From 1e6c93250172946eeb38e94a92a1fd12c9d3011e Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 7 Nov 2018 13:22:44 +0100 Subject: Merging upstream version 1.11.0+dfsg. Signed-off-by: Daniel Baumann --- .codacy.yml | 15 +- .codeclimate.yml | 19 +- .github/CODEOWNERS | 31 + .gitignore | 57 +- .lgtm.yml | 23 + .travis/README.md | 80 + .travis/containerized_build.sh | 7 + .travis/create_artifacts.sh | 33 + .travis/deploy-if-have-key | 8 +- .travis/firehol_create_artifacts.sh | 31 + .travis/generate_changelog.sh | 36 + .travis/images/Dockerfile.alpine | 5 + .travis/images/Dockerfile.centos6 | 5 + .travis/images/Dockerfile.centos7 | 5 + .travis/images/Dockerfile.ubuntu1804 | 6 + .travis/releaser.sh | 83 + CODE_OF_CONDUCT.md | 74 + CONTRIBUTORS.md | 123 + COPYING | 674 -- ChangeLog | 655 -- Dockerfile | 15 - Dockerfile.aarch64 | 19 - Dockerfile.alpine | 58 - Dockerfile.armv7hf | 19 - LICENSE | 10 + LICENSE-REDISTRIBUTED.md | 180 - LICENSE.md | 9 - Makefile.am | 465 +- Makefile.in | 2246 ++++- README.md | 132 +- REDISTRIBUTED.md | 209 + aclocal.m4 | 20 +- autogen.sh | 2 - backends/Makefile.am | 19 + backends/Makefile.in | 657 ++ backends/README.md | 199 + backends/backends.c | 662 ++ backends/backends.h | 52 + backends/graphite/Makefile.am | 4 + backends/graphite/Makefile.in | 457 + backends/graphite/graphite.c | 90 + backends/graphite/graphite.h | 35 + backends/json/Makefile.am | 4 + backends/json/Makefile.in | 457 + backends/json/json.c | 152 + backends/json/json.h | 34 + backends/nc-backend.sh | 158 + backends/opentsdb/Makefile.am | 4 + backends/opentsdb/Makefile.in | 457 + backends/opentsdb/opentsdb.c | 90 + backends/opentsdb/opentsdb.h | 35 + backends/prometheus/Makefile.am | 8 + backends/prometheus/Makefile.in | 464 + backends/prometheus/README.md | 376 + backends/prometheus/backend_prometheus.c | 566 ++ backends/prometheus/backend_prometheus.h | 20 + build/Dockerfile | 8 + build/build.sh | 22 + build/m4/ax_c___atomic.m4 | 36 + build/m4/ax_c__generic.m4 | 28 + build/m4/ax_c_lto.m4 | 21 + build/m4/ax_c_mallinfo.m4 | 24 + build/m4/ax_c_mallopt.m4 | 20 + build/m4/ax_c_statement_expressions.m4 | 23 + build/m4/ax_check_compile_flag.m4 | 50 + build/m4/ax_check_enable_debug.m4 | 122 + build/m4/ax_gcc_func_attribute.m4 | 223 + build/m4/ax_pthread.m4 | 308 + build/m4/jemalloc.m4 | 75 + build/m4/tcmalloc.m4 | 45 + build/subst.inc | 3 + charts.d/Makefile.am | 31 - charts.d/Makefile.in | 575 -- charts.d/README.md | 344 - charts.d/ap.chart.sh | 181 - charts.d/apache.chart.sh | 254 - charts.d/apcupsd.chart.sh | 198 - charts.d/cpu_apps.chart.sh | 71 - charts.d/cpufreq.chart.sh | 88 - charts.d/example.chart.sh | 119 - charts.d/exim.chart.sh | 47 - charts.d/hddtemp.chart.sh | 74 - charts.d/libreswan.chart.sh | 173 - charts.d/load_average.chart.sh | 70 - charts.d/mem_apps.chart.sh | 62 - charts.d/mysql.chart.sh | 522 -- charts.d/nginx.chart.sh | 141 - charts.d/nut.chart.sh | 222 - charts.d/opensips.chart.sh | 322 - charts.d/phpfpm.chart.sh | 194 - charts.d/postfix.chart.sh | 86 - charts.d/sensors.chart.sh | 250 - charts.d/squid.chart.sh | 144 - charts.d/tomcat.chart.sh | 147 - collectors/Makefile.am | 28 + collectors/Makefile.in | 663 ++ collectors/README.md | 118 + collectors/all.h | 318 + collectors/apps.plugin/Makefile.am | 13 + collectors/apps.plugin/Makefile.in | 521 ++ collectors/apps.plugin/README.md | 372 + collectors/apps.plugin/apps_groups.conf | 286 + collectors/apps.plugin/apps_plugin.c | 3799 ++++++++ collectors/cgroups.plugin/Makefile.am | 21 + collectors/cgroups.plugin/Makefile.in | 563 ++ collectors/cgroups.plugin/README.md | 187 + collectors/cgroups.plugin/cgroup-name.sh | 196 + collectors/cgroups.plugin/cgroup-name.sh.in | 196 + collectors/cgroups.plugin/cgroup-network-helper.sh | 258 + collectors/cgroups.plugin/cgroup-network.c | 682 ++ collectors/cgroups.plugin/sys_fs_cgroup.c | 2771 ++++++ collectors/cgroups.plugin/sys_fs_cgroup.h | 31 + collectors/charts.d.plugin/Makefile.am | 62 + collectors/charts.d.plugin/Makefile.in | 953 ++ collectors/charts.d.plugin/README.md | 193 + collectors/charts.d.plugin/ap/Makefile.inc | 13 + collectors/charts.d.plugin/ap/README.md | 84 + collectors/charts.d.plugin/ap/ap.chart.sh | 182 + collectors/charts.d.plugin/ap/ap.conf | 23 + collectors/charts.d.plugin/apache/Makefile.inc | 13 + collectors/charts.d.plugin/apache/README.md | 127 + collectors/charts.d.plugin/apache/apache.chart.sh | 258 + collectors/charts.d.plugin/apache/apache.conf | 30 + collectors/charts.d.plugin/apcupsd/Makefile.inc | 13 + collectors/charts.d.plugin/apcupsd/README.md | 0 .../charts.d.plugin/apcupsd/apcupsd.chart.sh | 201 + collectors/charts.d.plugin/apcupsd/apcupsd.conf | 25 + collectors/charts.d.plugin/charts.d.conf | 63 + .../charts.d.plugin/charts.d.dryrun-helper.sh | 78 + collectors/charts.d.plugin/charts.d.plugin | 743 ++ collectors/charts.d.plugin/charts.d.plugin.in | 743 ++ collectors/charts.d.plugin/cpu_apps/Makefile.inc | 13 + collectors/charts.d.plugin/cpu_apps/README.md | 2 + .../charts.d.plugin/cpu_apps/cpu_apps.chart.sh | 72 + collectors/charts.d.plugin/cpu_apps/cpu_apps.conf | 19 + collectors/charts.d.plugin/cpufreq/Makefile.inc | 13 + collectors/charts.d.plugin/cpufreq/README.md | 2 + .../charts.d.plugin/cpufreq/cpufreq.chart.sh | 90 + collectors/charts.d.plugin/cpufreq/cpufreq.conf | 24 + collectors/charts.d.plugin/example/Makefile.inc | 13 + collectors/charts.d.plugin/example/README.md | 2 + .../charts.d.plugin/example/example.chart.sh | 126 + collectors/charts.d.plugin/example/example.conf | 21 + collectors/charts.d.plugin/exim/Makefile.inc | 13 + collectors/charts.d.plugin/exim/README.md | 2 + collectors/charts.d.plugin/exim/exim.chart.sh | 48 + collectors/charts.d.plugin/exim/exim.conf | 24 + collectors/charts.d.plugin/hddtemp/Makefile.inc | 13 + collectors/charts.d.plugin/hddtemp/README.md | 28 + .../charts.d.plugin/hddtemp/hddtemp.chart.sh | 77 + collectors/charts.d.plugin/hddtemp/hddtemp.conf | 23 + collectors/charts.d.plugin/libreswan/Makefile.inc | 13 + collectors/charts.d.plugin/libreswan/README.md | 42 + .../charts.d.plugin/libreswan/libreswan.chart.sh | 176 + .../charts.d.plugin/libreswan/libreswan.conf | 29 + .../charts.d.plugin/load_average/Makefile.inc | 13 + collectors/charts.d.plugin/load_average/README.md | 2 + .../load_average/load_average.chart.sh | 71 + .../charts.d.plugin/load_average/load_average.conf | 22 + collectors/charts.d.plugin/loopsleepms.sh.inc | 237 + collectors/charts.d.plugin/mem_apps/Makefile.inc | 13 + collectors/charts.d.plugin/mem_apps/README.md | 2 + .../charts.d.plugin/mem_apps/mem_apps.chart.sh | 63 + collectors/charts.d.plugin/mem_apps/mem_apps.conf | 19 + collectors/charts.d.plugin/mysql/Makefile.inc | 13 + collectors/charts.d.plugin/mysql/README.md | 81 + collectors/charts.d.plugin/mysql/mysql.chart.sh | 528 ++ collectors/charts.d.plugin/mysql/mysql.conf | 23 + collectors/charts.d.plugin/nginx/Makefile.inc | 13 + collectors/charts.d.plugin/nginx/README.md | 2 + collectors/charts.d.plugin/nginx/nginx.chart.sh | 144 + collectors/charts.d.plugin/nginx/nginx.conf | 23 + collectors/charts.d.plugin/nut/Makefile.inc | 13 + collectors/charts.d.plugin/nut/README.md | 59 + collectors/charts.d.plugin/nut/nut.chart.sh | 241 + collectors/charts.d.plugin/nut/nut.conf | 33 + collectors/charts.d.plugin/opensips/Makefile.inc | 13 + collectors/charts.d.plugin/opensips/README.md | 0 .../charts.d.plugin/opensips/opensips.chart.sh | 326 + collectors/charts.d.plugin/opensips/opensips.conf | 21 + collectors/charts.d.plugin/phpfpm/Makefile.inc | 13 + collectors/charts.d.plugin/phpfpm/README.md | 2 + collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh | 198 + collectors/charts.d.plugin/phpfpm/phpfpm.conf | 27 + collectors/charts.d.plugin/postfix/Makefile.inc | 13 + collectors/charts.d.plugin/postfix/README.md | 26 + .../charts.d.plugin/postfix/postfix.chart.sh | 89 + collectors/charts.d.plugin/postfix/postfix.conf | 25 + collectors/charts.d.plugin/sensors/Makefile.inc | 13 + collectors/charts.d.plugin/sensors/README.md | 52 + .../charts.d.plugin/sensors/sensors.chart.sh | 255 + collectors/charts.d.plugin/sensors/sensors.conf | 32 + collectors/charts.d.plugin/squid/Makefile.inc | 13 + collectors/charts.d.plugin/squid/README.md | 66 + collectors/charts.d.plugin/squid/squid.chart.sh | 147 + collectors/charts.d.plugin/squid/squid.conf | 26 + collectors/charts.d.plugin/tomcat/Makefile.inc | 13 + collectors/charts.d.plugin/tomcat/README.md | 2 + collectors/charts.d.plugin/tomcat/tomcat.chart.sh | 150 + collectors/charts.d.plugin/tomcat/tomcat.conf | 38 + collectors/checks.plugin/Makefile.am | 4 + collectors/checks.plugin/Makefile.in | 457 + collectors/checks.plugin/plugin_checks.c | 129 + collectors/checks.plugin/plugin_checks.h | 29 + collectors/diskspace.plugin/Makefile.am | 8 + collectors/diskspace.plugin/Makefile.in | 464 + collectors/diskspace.plugin/README.md | 5 + collectors/diskspace.plugin/plugin_diskspace.c | 465 + collectors/diskspace.plugin/plugin_diskspace.h | 34 + collectors/fping.plugin/Makefile.am | 24 + collectors/fping.plugin/Makefile.in | 591 ++ collectors/fping.plugin/README.md | 96 + collectors/fping.plugin/fping.conf | 44 + collectors/fping.plugin/fping.plugin | 200 + collectors/fping.plugin/fping.plugin.in | 200 + collectors/freebsd.plugin/Makefile.am | 5 + collectors/freebsd.plugin/Makefile.in | 457 + collectors/freebsd.plugin/freebsd_devstat.c | 780 ++ collectors/freebsd.plugin/freebsd_getifaddrs.c | 618 ++ collectors/freebsd.plugin/freebsd_getmntinfo.c | 301 + collectors/freebsd.plugin/freebsd_ipfw.c | 372 + collectors/freebsd.plugin/freebsd_kstat_zfs.c | 300 + collectors/freebsd.plugin/freebsd_sysctl.c | 3188 +++++++ collectors/freebsd.plugin/plugin_freebsd.c | 175 + collectors/freebsd.plugin/plugin_freebsd.h | 74 + collectors/freeipmi.plugin/Makefile.am | 8 + collectors/freeipmi.plugin/Makefile.in | 464 + collectors/freeipmi.plugin/README.md | 180 + collectors/freeipmi.plugin/freeipmi_plugin.c | 1760 ++++ collectors/idlejitter.plugin/Makefile.am | 8 + collectors/idlejitter.plugin/Makefile.in | 464 + collectors/idlejitter.plugin/README.md | 13 + collectors/idlejitter.plugin/plugin_idlejitter.c | 92 + collectors/idlejitter.plugin/plugin_idlejitter.h | 21 + collectors/macos.plugin/Makefile.am | 4 + collectors/macos.plugin/Makefile.in | 457 + collectors/macos.plugin/macos_fw.c | 687 ++ collectors/macos.plugin/macos_mach_smi.c | 241 + collectors/macos.plugin/macos_sysctl.c | 1492 +++ collectors/macos.plugin/plugin_macos.c | 69 + collectors/macos.plugin/plugin_macos.h | 43 + collectors/nfacct.plugin/Makefile.am | 8 + collectors/nfacct.plugin/Makefile.in | 464 + collectors/nfacct.plugin/README.md | 10 + collectors/nfacct.plugin/plugin_nfacct.c | 822 ++ collectors/nfacct.plugin/plugin_nfacct.h | 30 + collectors/node.d.plugin/Makefile.am | 59 + collectors/node.d.plugin/Makefile.in | 805 ++ collectors/node.d.plugin/README.md | 218 + collectors/node.d.plugin/fronius/Makefile.inc | 13 + collectors/node.d.plugin/fronius/README.md | 120 + collectors/node.d.plugin/fronius/fronius.node.js | 400 + collectors/node.d.plugin/named/Makefile.inc | 13 + collectors/node.d.plugin/named/README.md | 342 + collectors/node.d.plugin/named/named.node.js | 610 ++ collectors/node.d.plugin/node.d.conf | 39 + collectors/node.d.plugin/node.d.plugin | 303 + collectors/node.d.plugin/node.d.plugin.in | 303 + collectors/node.d.plugin/node_modules/asn1-ber.js | 7 + collectors/node.d.plugin/node_modules/extend.js | 88 + .../node.d.plugin/node_modules/lib/ber/errors.js | 10 + .../node.d.plugin/node_modules/lib/ber/index.js | 18 + .../node.d.plugin/node_modules/lib/ber/reader.js | 270 + .../node.d.plugin/node_modules/lib/ber/types.js | 35 + .../node.d.plugin/node_modules/lib/ber/writer.js | 318 + collectors/node.d.plugin/node_modules/net-snmp.js | 1465 +++ collectors/node.d.plugin/node_modules/netdata.js | 654 ++ collectors/node.d.plugin/node_modules/pixl-xml.js | 607 ++ collectors/node.d.plugin/sma_webbox/Makefile.inc | 13 + collectors/node.d.plugin/sma_webbox/README.md | 25 + .../node.d.plugin/sma_webbox/sma_webbox.node.js | 238 + collectors/node.d.plugin/snmp/Makefile.inc | 13 + collectors/node.d.plugin/snmp/README.md | 357 + collectors/node.d.plugin/snmp/snmp.node.js | 516 ++ .../node.d.plugin/stiebeleltron/Makefile.inc | 13 + collectors/node.d.plugin/stiebeleltron/README.md | 505 ++ .../stiebeleltron/stiebeleltron.node.js | 197 + collectors/plugins.d/Makefile.am | 11 + collectors/plugins.d/Makefile.in | 647 ++ collectors/plugins.d/README.md | 472 + collectors/plugins.d/plugins_d.c | 696 ++ collectors/plugins.d/plugins_d.h | 73 + collectors/proc.plugin/Makefile.am | 8 + collectors/proc.plugin/Makefile.in | 464 + collectors/proc.plugin/README.md | 200 + collectors/proc.plugin/ipc.c | 263 + collectors/proc.plugin/plugin_proc.c | 217 + collectors/proc.plugin/plugin_proc.h | 74 + collectors/proc.plugin/proc_diskstats.c | 1649 ++++ collectors/proc.plugin/proc_interrupts.c | 248 + collectors/proc.plugin/proc_loadavg.c | 124 + collectors/proc.plugin/proc_meminfo.c | 519 ++ collectors/proc.plugin/proc_net_dev.c | 912 ++ collectors/proc.plugin/proc_net_ip_vs_stats.c | 133 + collectors/proc.plugin/proc_net_netstat.c | 818 ++ collectors/proc.plugin/proc_net_rpc_nfs.c | 454 + collectors/proc.plugin/proc_net_rpc_nfsd.c | 1006 +++ collectors/proc.plugin/proc_net_sctp_snmp.c | 352 + collectors/proc.plugin/proc_net_snmp.c | 1085 +++ collectors/proc.plugin/proc_net_snmp6.c | 1268 +++ collectors/proc.plugin/proc_net_sockstat.c | 518 ++ collectors/proc.plugin/proc_net_sockstat6.c | 273 + collectors/proc.plugin/proc_net_softnet_stat.c | 151 + collectors/proc.plugin/proc_net_stat_conntrack.c | 351 + collectors/proc.plugin/proc_net_stat_synproxy.c | 185 + collectors/proc.plugin/proc_self_mountinfo.c | 403 + collectors/proc.plugin/proc_self_mountinfo.h | 57 + collectors/proc.plugin/proc_softirqs.c | 242 + collectors/proc.plugin/proc_spl_kstat_zfs.c | 155 + collectors/proc.plugin/proc_stat.c | 570 ++ .../proc_sys_kernel_random_entropy_avail.c | 49 + collectors/proc.plugin/proc_uptime.c | 105 + collectors/proc.plugin/proc_vmstat.c | 259 + .../proc.plugin/sys_devices_system_edac_mc.c | 206 + collectors/proc.plugin/sys_devices_system_node.c | 163 + collectors/proc.plugin/sys_fs_btrfs.c | 722 ++ collectors/proc.plugin/sys_kernel_mm_ksm.c | 201 + collectors/proc.plugin/zfs_common.c | 714 ++ collectors/proc.plugin/zfs_common.h | 115 + collectors/python.d.plugin/Makefile.am | 244 + collectors/python.d.plugin/Makefile.in | 1987 ++++ collectors/python.d.plugin/README.md | 198 + .../python.d.plugin/adaptec_raid/Makefile.inc | 13 + collectors/python.d.plugin/adaptec_raid/README.md | 46 + .../adaptec_raid/adaptec_raid.chart.py | 247 + .../python.d.plugin/adaptec_raid/adaptec_raid.conf | 55 + collectors/python.d.plugin/apache/Makefile.inc | 13 + collectors/python.d.plugin/apache/README.md | 59 + collectors/python.d.plugin/apache/apache.chart.py | 132 + collectors/python.d.plugin/apache/apache.conf | 87 + collectors/python.d.plugin/beanstalk/Makefile.inc | 13 + collectors/python.d.plugin/beanstalk/README.md | 103 + .../python.d.plugin/beanstalk/beanstalk.chart.py | 247 + .../python.d.plugin/beanstalk/beanstalk.conf | 80 + collectors/python.d.plugin/bind_rndc/Makefile.inc | 13 + collectors/python.d.plugin/bind_rndc/README.md | 60 + .../python.d.plugin/bind_rndc/bind_rndc.chart.py | 240 + .../python.d.plugin/bind_rndc/bind_rndc.conf | 112 + collectors/python.d.plugin/boinc/Makefile.inc | 13 + collectors/python.d.plugin/boinc/README.md | 28 + collectors/python.d.plugin/boinc/boinc.chart.py | 162 + collectors/python.d.plugin/boinc/boinc.conf | 68 + collectors/python.d.plugin/ceph/Makefile.inc | 13 + collectors/python.d.plugin/ceph/README.md | 32 + collectors/python.d.plugin/ceph/ceph.chart.py | 345 + collectors/python.d.plugin/ceph/ceph.conf | 75 + collectors/python.d.plugin/chrony/Makefile.inc | 13 + collectors/python.d.plugin/chrony/README.md | 31 + collectors/python.d.plugin/chrony/chrony.chart.py | 110 + collectors/python.d.plugin/chrony/chrony.conf | 79 + collectors/python.d.plugin/couchdb/Makefile.inc | 13 + collectors/python.d.plugin/couchdb/README.md | 35 + .../python.d.plugin/couchdb/couchdb.chart.py | 411 + collectors/python.d.plugin/couchdb/couchdb.conf | 91 + collectors/python.d.plugin/cpufreq/Makefile.inc | 13 + collectors/python.d.plugin/cpufreq/README.md | 30 + .../python.d.plugin/cpufreq/cpufreq.chart.py | 115 + collectors/python.d.plugin/cpufreq/cpufreq.conf | 43 + collectors/python.d.plugin/cpuidle/Makefile.inc | 13 + collectors/python.d.plugin/cpuidle/README.md | 11 + .../python.d.plugin/cpuidle/cpuidle.chart.py | 148 + collectors/python.d.plugin/cpuidle/cpuidle.conf | 40 + .../python.d.plugin/dns_query_time/Makefile.inc | 13 + .../python.d.plugin/dns_query_time/README.md | 10 + .../dns_query_time/dns_query_time.chart.py | 145 + .../dns_query_time/dns_query_time.conf | 71 + collectors/python.d.plugin/dnsdist/Makefile.inc | 13 + collectors/python.d.plugin/dnsdist/README.md | 54 + .../python.d.plugin/dnsdist/dnsdist.chart.py | 133 + collectors/python.d.plugin/dnsdist/dnsdist.conf | 85 + collectors/python.d.plugin/dockerd/Makefile.inc | 13 + collectors/python.d.plugin/dockerd/README.md | 26 + .../python.d.plugin/dockerd/dockerd.chart.py | 77 + collectors/python.d.plugin/dockerd/dockerd.conf | 79 + collectors/python.d.plugin/dovecot/Makefile.inc | 13 + collectors/python.d.plugin/dovecot/README.md | 73 + .../python.d.plugin/dovecot/dovecot.chart.py | 147 + collectors/python.d.plugin/dovecot/dovecot.conf | 96 + .../python.d.plugin/elasticsearch/Makefile.inc | 13 + collectors/python.d.plugin/elasticsearch/README.md | 60 + .../elasticsearch/elasticsearch.chart.py | 644 ++ .../elasticsearch/elasticsearch.conf | 83 + collectors/python.d.plugin/example/Makefile.inc | 13 + collectors/python.d.plugin/example/README.md | 1 + .../python.d.plugin/example/example.chart.py | 48 + collectors/python.d.plugin/example/example.conf | 70 + collectors/python.d.plugin/exim/Makefile.inc | 13 + collectors/python.d.plugin/exim/README.md | 13 + collectors/python.d.plugin/exim/exim.chart.py | 41 + collectors/python.d.plugin/exim/exim.conf | 93 + collectors/python.d.plugin/fail2ban/Makefile.inc | 13 + collectors/python.d.plugin/fail2ban/README.md | 23 + .../python.d.plugin/fail2ban/fail2ban.chart.py | 196 + collectors/python.d.plugin/fail2ban/fail2ban.conf | 70 + collectors/python.d.plugin/freeradius/Makefile.inc | 13 + collectors/python.d.plugin/freeradius/README.md | 70 + .../python.d.plugin/freeradius/freeradius.chart.py | 129 + .../python.d.plugin/freeradius/freeradius.conf | 82 + collectors/python.d.plugin/go_expvar/Makefile.inc | 13 + collectors/python.d.plugin/go_expvar/README.md | 276 + .../python.d.plugin/go_expvar/go_expvar.chart.py | 245 + .../python.d.plugin/go_expvar/go_expvar.conf | 110 + collectors/python.d.plugin/haproxy/Makefile.inc | 13 + collectors/python.d.plugin/haproxy/README.md | 49 + .../python.d.plugin/haproxy/haproxy.chart.py | 370 + collectors/python.d.plugin/haproxy/haproxy.conf | 85 + collectors/python.d.plugin/hddtemp/Makefile.inc | 13 + collectors/python.d.plugin/hddtemp/README.md | 22 + .../python.d.plugin/hddtemp/hddtemp.chart.py | 100 + collectors/python.d.plugin/hddtemp/hddtemp.conf | 97 + collectors/python.d.plugin/httpcheck/Makefile.inc | 13 + collectors/python.d.plugin/httpcheck/README.md | 41 + .../python.d.plugin/httpcheck/httpcheck.chart.py | 121 + .../python.d.plugin/httpcheck/httpcheck.conf | 100 + collectors/python.d.plugin/icecast/Makefile.inc | 13 + collectors/python.d.plugin/icecast/README.md | 26 + .../python.d.plugin/icecast/icecast.chart.py | 97 + collectors/python.d.plugin/icecast/icecast.conf | 83 + collectors/python.d.plugin/ipfs/Makefile.inc | 13 + collectors/python.d.plugin/ipfs/README.md | 25 + collectors/python.d.plugin/ipfs/ipfs.chart.py | 140 + collectors/python.d.plugin/ipfs/ipfs.conf | 79 + collectors/python.d.plugin/isc_dhcpd/Makefile.inc | 13 + collectors/python.d.plugin/isc_dhcpd/README.md | 34 + .../python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py | 195 + .../python.d.plugin/isc_dhcpd/isc_dhcpd.conf | 81 + .../linux_power_supply/Makefile.inc | 13 + .../python.d.plugin/linux_power_supply/README.md | 67 + .../linux_power_supply/linux_power_supply.chart.py | 160 + .../linux_power_supply/linux_power_supply.conf | 81 + collectors/python.d.plugin/litespeed/Makefile.inc | 13 + collectors/python.d.plugin/litespeed/README.md | 47 + .../python.d.plugin/litespeed/litespeed.chart.py | 186 + .../python.d.plugin/litespeed/litespeed.conf | 74 + collectors/python.d.plugin/logind/Makefile.inc | 13 + collectors/python.d.plugin/logind/README.md | 54 + collectors/python.d.plugin/logind/logind.chart.py | 79 + collectors/python.d.plugin/logind/logind.conf | 62 + collectors/python.d.plugin/mdstat/Makefile.inc | 13 + collectors/python.d.plugin/mdstat/README.md | 26 + collectors/python.d.plugin/mdstat/mdstat.chart.py | 205 + collectors/python.d.plugin/mdstat/mdstat.conf | 32 + collectors/python.d.plugin/megacli/Makefile.inc | 13 + collectors/python.d.plugin/megacli/README.md | 48 + .../python.d.plugin/megacli/megacli.chart.py | 279 + collectors/python.d.plugin/megacli/megacli.conf | 62 + collectors/python.d.plugin/memcached/Makefile.inc | 13 + collectors/python.d.plugin/memcached/README.md | 69 + .../python.d.plugin/memcached/memcached.chart.py | 198 + .../python.d.plugin/memcached/memcached.conf | 92 + collectors/python.d.plugin/mongodb/Makefile.inc | 13 + collectors/python.d.plugin/mongodb/README.md | 141 + .../python.d.plugin/mongodb/mongodb.chart.py | 731 ++ collectors/python.d.plugin/mongodb/mongodb.conf | 84 + collectors/python.d.plugin/monit/Makefile.inc | 13 + collectors/python.d.plugin/monit/README.md | 33 + collectors/python.d.plugin/monit/monit.chart.py | 166 + collectors/python.d.plugin/monit/monit.conf | 88 + collectors/python.d.plugin/mysql/Makefile.inc | 13 + collectors/python.d.plugin/mysql/README.md | 90 + collectors/python.d.plugin/mysql/mysql.chart.py | 602 ++ collectors/python.d.plugin/mysql/mysql.conf | 286 + collectors/python.d.plugin/nginx/Makefile.inc | 13 + collectors/python.d.plugin/nginx/README.md | 45 + collectors/python.d.plugin/nginx/nginx.chart.py | 80 + collectors/python.d.plugin/nginx/nginx.conf | 109 + collectors/python.d.plugin/nginx_plus/Makefile.inc | 13 + collectors/python.d.plugin/nginx_plus/README.md | 125 + .../python.d.plugin/nginx_plus/nginx_plus.chart.py | 492 + .../python.d.plugin/nginx_plus/nginx_plus.conf | 87 + collectors/python.d.plugin/nsd/Makefile.inc | 13 + collectors/python.d.plugin/nsd/README.md | 54 + collectors/python.d.plugin/nsd/nsd.chart.py | 100 + collectors/python.d.plugin/nsd/nsd.conf | 93 + collectors/python.d.plugin/ntpd/Makefile.inc | 13 + collectors/python.d.plugin/ntpd/README.md | 71 + collectors/python.d.plugin/ntpd/ntpd.chart.py | 390 + collectors/python.d.plugin/ntpd/ntpd.conf | 91 + .../python.d.plugin/ovpn_status_log/Makefile.inc | 13 + .../python.d.plugin/ovpn_status_log/README.md | 32 + .../ovpn_status_log/ovpn_status_log.chart.py | 129 + .../ovpn_status_log/ovpn_status_log.conf | 99 + collectors/python.d.plugin/phpfpm/Makefile.inc | 13 + collectors/python.d.plugin/phpfpm/README.md | 40 + collectors/python.d.plugin/phpfpm/phpfpm.chart.py | 177 + collectors/python.d.plugin/phpfpm/phpfpm.conf | 90 + collectors/python.d.plugin/portcheck/Makefile.inc | 13 + collectors/python.d.plugin/portcheck/README.md | 35 + .../python.d.plugin/portcheck/portcheck.chart.py | 161 + .../python.d.plugin/portcheck/portcheck.conf | 70 + collectors/python.d.plugin/postfix/Makefile.inc | 13 + collectors/python.d.plugin/postfix/README.md | 15 + .../python.d.plugin/postfix/postfix.chart.py | 53 + collectors/python.d.plugin/postfix/postfix.conf | 74 + collectors/python.d.plugin/postgres/Makefile.inc | 13 + collectors/python.d.plugin/postgres/README.md | 68 + .../python.d.plugin/postgres/postgres.chart.py | 823 ++ collectors/python.d.plugin/postgres/postgres.conf | 124 + collectors/python.d.plugin/powerdns/Makefile.inc | 13 + collectors/python.d.plugin/powerdns/README.md | 77 + .../python.d.plugin/powerdns/powerdns.chart.py | 150 + collectors/python.d.plugin/powerdns/powerdns.conf | 78 + collectors/python.d.plugin/proxysql/Makefile.inc | 13 + collectors/python.d.plugin/proxysql/README.md | 62 + .../python.d.plugin/proxysql/proxysql.chart.py | 356 + collectors/python.d.plugin/proxysql/proxysql.conf | 118 + collectors/python.d.plugin/puppet/Makefile.inc | 13 + collectors/python.d.plugin/puppet/README.md | 48 + collectors/python.d.plugin/puppet/puppet.chart.py | 121 + collectors/python.d.plugin/puppet/puppet.conf | 98 + collectors/python.d.plugin/python.d.conf | 97 + collectors/python.d.plugin/python.d.plugin | 427 + collectors/python.d.plugin/python.d.plugin.in | 427 + .../python.d.plugin/python_modules/__init__.py | 0 .../bases/FrameworkServices/ExecutableService.py | 89 + .../bases/FrameworkServices/LogService.py | 80 + .../bases/FrameworkServices/MySQLService.py | 159 + .../bases/FrameworkServices/SimpleService.py | 261 + .../bases/FrameworkServices/SocketService.py | 309 + .../bases/FrameworkServices/UrlService.py | 146 + .../bases/FrameworkServices/__init__.py | 0 .../python_modules/bases/__init__.py | 0 .../python.d.plugin/python_modules/bases/charts.py | 394 + .../python_modules/bases/collection.py | 145 + .../python_modules/bases/loaders.py | 83 + .../python_modules/bases/loggers.py | 206 + .../python_modules/pyyaml2/__init__.py | 316 + .../python_modules/pyyaml2/composer.py | 140 + .../python_modules/pyyaml2/constructor.py | 676 ++ .../python_modules/pyyaml2/cyaml.py | 86 + .../python_modules/pyyaml2/dumper.py | 63 + .../python_modules/pyyaml2/emitter.py | 1141 +++ .../python_modules/pyyaml2/error.py | 76 + .../python_modules/pyyaml2/events.py | 87 + .../python_modules/pyyaml2/loader.py | 41 + .../python_modules/pyyaml2/nodes.py | 50 + .../python_modules/pyyaml2/parser.py | 590 ++ .../python_modules/pyyaml2/reader.py | 191 + .../python_modules/pyyaml2/representer.py | 485 + .../python_modules/pyyaml2/resolver.py | 225 + .../python_modules/pyyaml2/scanner.py | 1458 +++ .../python_modules/pyyaml2/serializer.py | 112 + .../python_modules/pyyaml2/tokens.py | 105 + .../python_modules/pyyaml3/__init__.py | 313 + .../python_modules/pyyaml3/composer.py | 140 + .../python_modules/pyyaml3/constructor.py | 687 ++ .../python_modules/pyyaml3/cyaml.py | 86 + .../python_modules/pyyaml3/dumper.py | 63 + .../python_modules/pyyaml3/emitter.py | 1138 +++ .../python_modules/pyyaml3/error.py | 76 + .../python_modules/pyyaml3/events.py | 87 + .../python_modules/pyyaml3/loader.py | 41 + .../python_modules/pyyaml3/nodes.py | 50 + .../python_modules/pyyaml3/parser.py | 590 ++ .../python_modules/pyyaml3/reader.py | 193 + .../python_modules/pyyaml3/representer.py | 375 + .../python_modules/pyyaml3/resolver.py | 225 + .../python_modules/pyyaml3/scanner.py | 1449 +++ .../python_modules/pyyaml3/serializer.py | 112 + .../python_modules/pyyaml3/tokens.py | 105 + .../python_modules/third_party/__init__.py | 0 .../python_modules/third_party/boinc_client.py | 515 ++ .../python_modules/third_party/lm_sensors.py | 258 + .../python_modules/third_party/mcrcon.py | 74 + .../python_modules/third_party/monotonic.py | 171 + .../python_modules/third_party/ordereddict.py | 110 + .../python_modules/urllib3/__init__.py | 98 + .../python_modules/urllib3/_collections.py | 315 + .../python_modules/urllib3/connection.py | 374 + .../python_modules/urllib3/connectionpool.py | 900 ++ .../python_modules/urllib3/contrib/__init__.py | 0 .../urllib3/contrib/_securetransport/__init__.py | 0 .../urllib3/contrib/_securetransport/bindings.py | 591 ++ .../urllib3/contrib/_securetransport/low_level.py | 344 + .../python_modules/urllib3/contrib/appengine.py | 297 + .../python_modules/urllib3/contrib/ntlmpool.py | 113 + .../python_modules/urllib3/contrib/pyopenssl.py | 458 + .../urllib3/contrib/securetransport.py | 808 ++ .../python_modules/urllib3/contrib/socks.py | 189 + .../python_modules/urllib3/exceptions.py | 247 + .../python_modules/urllib3/fields.py | 179 + .../python_modules/urllib3/filepost.py | 95 + .../python_modules/urllib3/packages/__init__.py | 5 + .../urllib3/packages/backports/__init__.py | 0 .../urllib3/packages/backports/makefile.py | 54 + .../urllib3/packages/ordered_dict.py | 260 + .../python_modules/urllib3/packages/six.py | 852 ++ .../packages/ssl_match_hostname/__init__.py | 20 + .../packages/ssl_match_hostname/_implementation.py | 156 + .../python_modules/urllib3/poolmanager.py | 441 + .../python_modules/urllib3/request.py | 149 + .../python_modules/urllib3/response.py | 623 ++ .../python_modules/urllib3/util/__init__.py | 55 + .../python_modules/urllib3/util/connection.py | 131 + .../python_modules/urllib3/util/request.py | 119 + .../python_modules/urllib3/util/response.py | 82 + .../python_modules/urllib3/util/retry.py | 402 + .../python_modules/urllib3/util/selectors.py | 582 ++ .../python_modules/urllib3/util/ssl_.py | 338 + .../python_modules/urllib3/util/timeout.py | 243 + .../python_modules/urllib3/util/url.py | 231 + .../python_modules/urllib3/util/wait.py | 41 + collectors/python.d.plugin/rabbitmq/Makefile.inc | 13 + collectors/python.d.plugin/rabbitmq/README.md | 56 + .../python.d.plugin/rabbitmq/rabbitmq.chart.py | 207 + collectors/python.d.plugin/rabbitmq/rabbitmq.conf | 82 + collectors/python.d.plugin/redis/Makefile.inc | 13 + collectors/python.d.plugin/redis/README.md | 42 + collectors/python.d.plugin/redis/redis.chart.py | 261 + collectors/python.d.plugin/redis/redis.conf | 112 + collectors/python.d.plugin/rethinkdbs/Makefile.inc | 13 + collectors/python.d.plugin/rethinkdbs/README.md | 34 + .../python.d.plugin/rethinkdbs/rethinkdbs.chart.py | 235 + .../python.d.plugin/rethinkdbs/rethinkdbs.conf | 78 + collectors/python.d.plugin/retroshare/Makefile.inc | 13 + collectors/python.d.plugin/retroshare/README.md | 1 + .../python.d.plugin/retroshare/retroshare.chart.py | 80 + .../python.d.plugin/retroshare/retroshare.conf | 74 + collectors/python.d.plugin/samba/Makefile.inc | 13 + collectors/python.d.plugin/samba/README.md | 67 + collectors/python.d.plugin/samba/samba.chart.py | 138 + collectors/python.d.plugin/samba/samba.conf | 62 + collectors/python.d.plugin/sensors/Makefile.inc | 13 + collectors/python.d.plugin/sensors/README.md | 17 + .../python.d.plugin/sensors/sensors.chart.py | 146 + collectors/python.d.plugin/sensors/sensors.conf | 63 + collectors/python.d.plugin/smartd_log/Makefile.inc | 13 + collectors/python.d.plugin/smartd_log/README.md | 38 + .../python.d.plugin/smartd_log/smartd_log.chart.py | 353 + .../python.d.plugin/smartd_log/smartd_log.conf | 90 + collectors/python.d.plugin/spigotmc/Makefile.inc | 13 + collectors/python.d.plugin/spigotmc/README.md | 22 + .../python.d.plugin/spigotmc/spigotmc.chart.py | 120 + collectors/python.d.plugin/spigotmc/spigotmc.conf | 68 + collectors/python.d.plugin/springboot/Makefile.inc | 13 + collectors/python.d.plugin/springboot/README.md | 129 + .../python.d.plugin/springboot/springboot.chart.py | 159 + .../python.d.plugin/springboot/springboot.conf | 120 + collectors/python.d.plugin/squid/Makefile.inc | 13 + collectors/python.d.plugin/squid/README.md | 38 + collectors/python.d.plugin/squid/squid.chart.py | 125 + collectors/python.d.plugin/squid/squid.conf | 169 + collectors/python.d.plugin/tomcat/Makefile.inc | 13 + collectors/python.d.plugin/tomcat/README.md | 33 + collectors/python.d.plugin/tomcat/tomcat.chart.py | 163 + collectors/python.d.plugin/tomcat/tomcat.conf | 91 + collectors/python.d.plugin/traefik/Makefile.inc | 13 + collectors/python.d.plugin/traefik/README.md | 54 + .../python.d.plugin/traefik/traefik.chart.py | 195 + collectors/python.d.plugin/traefik/traefik.conf | 79 + collectors/python.d.plugin/unbound/Makefile.inc | 13 + collectors/python.d.plugin/unbound/README.md | 76 + .../python.d.plugin/unbound/unbound.chart.py | 275 + collectors/python.d.plugin/unbound/unbound.conf | 87 + collectors/python.d.plugin/uwsgi/Makefile.inc | 13 + collectors/python.d.plugin/uwsgi/README.md | 37 + collectors/python.d.plugin/uwsgi/uwsgi.chart.py | 183 + collectors/python.d.plugin/uwsgi/uwsgi.conf | 94 + collectors/python.d.plugin/varnish/Makefile.inc | 13 + collectors/python.d.plugin/varnish/README.md | 69 + .../python.d.plugin/varnish/varnish.chart.py | 252 + collectors/python.d.plugin/varnish/varnish.conf | 64 + collectors/python.d.plugin/w1sensor/Makefile.inc | 13 + collectors/python.d.plugin/w1sensor/README.md | 13 + .../python.d.plugin/w1sensor/w1sensor.chart.py | 93 + collectors/python.d.plugin/w1sensor/w1sensor.conf | 74 + collectors/python.d.plugin/web_log/Makefile.inc | 13 + collectors/python.d.plugin/web_log/README.md | 64 + .../python.d.plugin/web_log/web_log.chart.py | 1194 +++ collectors/python.d.plugin/web_log/web_log.conf | 206 + collectors/statsd.plugin/Makefile.am | 20 + collectors/statsd.plugin/Makefile.in | 556 ++ collectors/statsd.plugin/README.md | 523 ++ collectors/statsd.plugin/example.conf | 64 + collectors/statsd.plugin/statsd.c | 2556 ++++++ collectors/statsd.plugin/statsd.h | 25 + collectors/tc.plugin/Makefile.am | 20 + collectors/tc.plugin/Makefile.in | 562 ++ collectors/tc.plugin/README.md | 183 + collectors/tc.plugin/plugin_tc.c | 1168 +++ collectors/tc.plugin/plugin_tc.h | 31 + collectors/tc.plugin/tc-qos-helper.sh | 315 + collectors/tc.plugin/tc-qos-helper.sh.in | 315 + conf.d/Makefile.am | 160 - conf.d/Makefile.in | 788 -- conf.d/apps_groups.conf | 282 - conf.d/charts.d.conf | 63 - conf.d/charts.d/ap.conf | 23 - conf.d/charts.d/apache.conf | 30 - conf.d/charts.d/apcupsd.conf | 25 - conf.d/charts.d/cpu_apps.conf | 19 - conf.d/charts.d/cpufreq.conf | 24 - conf.d/charts.d/example.conf | 21 - conf.d/charts.d/exim.conf | 24 - conf.d/charts.d/hddtemp.conf | 23 - conf.d/charts.d/libreswan.conf | 29 - conf.d/charts.d/load_average.conf | 22 - conf.d/charts.d/mem_apps.conf | 19 - conf.d/charts.d/mysql.conf | 23 - conf.d/charts.d/nginx.conf | 23 - conf.d/charts.d/nut.conf | 28 - conf.d/charts.d/opensips.conf | 21 - conf.d/charts.d/phpfpm.conf | 27 - conf.d/charts.d/postfix.conf | 25 - conf.d/charts.d/sensors.conf | 32 - conf.d/charts.d/squid.conf | 26 - conf.d/charts.d/tomcat.conf | 38 - conf.d/fping.conf | 44 - conf.d/health.d/apache.conf | 14 - conf.d/health.d/backend.conf | 45 - conf.d/health.d/beanstalkd.conf | 36 - conf.d/health.d/bind_rndc.conf | 9 - conf.d/health.d/btrfs.conf | 57 - conf.d/health.d/ceph.conf | 13 - conf.d/health.d/couchdb.conf | 13 - conf.d/health.d/cpu.conf | 55 - conf.d/health.d/disks.conf | 167 - conf.d/health.d/elasticsearch.conf | 9 - conf.d/health.d/entropy.conf | 16 - conf.d/health.d/fping.conf | 53 - conf.d/health.d/fronius.conf | 11 - conf.d/health.d/haproxy.conf | 27 - conf.d/health.d/httpcheck.conf | 99 - conf.d/health.d/ipc.conf | 28 - conf.d/health.d/ipfs.conf | 11 - conf.d/health.d/ipmi.conf | 20 - conf.d/health.d/isc_dhcpd.conf | 10 - conf.d/health.d/lighttpd.conf | 14 - conf.d/health.d/mdstat.conf | 18 - conf.d/health.d/memcached.conf | 52 - conf.d/health.d/memory.conf | 38 - conf.d/health.d/mongodb.conf | 13 - conf.d/health.d/mysql.conf | 85 - conf.d/health.d/named.conf | 14 - conf.d/health.d/net.conf | 122 - conf.d/health.d/netfilter.conf | 29 - conf.d/health.d/nginx.conf | 14 - conf.d/health.d/nginx_plus.conf | 14 - conf.d/health.d/portcheck.conf | 48 - conf.d/health.d/postgres.conf | 13 - conf.d/health.d/qos.conf | 18 - conf.d/health.d/ram.conf | 64 - conf.d/health.d/redis.conf | 34 - conf.d/health.d/retroshare.conf | 25 - conf.d/health.d/softnet.conf | 40 - conf.d/health.d/squid.conf | 14 - conf.d/health.d/stiebeleltron.conf | 11 - conf.d/health.d/swap.conf | 43 - conf.d/health.d/tcp_conn.conf | 19 - conf.d/health.d/tcp_listen.conf | 27 - conf.d/health.d/tcp_mem.conf | 20 - conf.d/health.d/tcp_orphans.conf | 21 - conf.d/health.d/tcp_resets.conf | 67 - conf.d/health.d/udp_errors.conf | 49 - conf.d/health.d/varnish.conf | 9 - conf.d/health.d/web_log.conf | 163 - conf.d/health.d/zfs.conf | 10 - conf.d/health_alarm_notify.conf | 708 -- conf.d/health_email_recipients.conf | 2 - conf.d/node.d.conf | 39 - conf.d/node.d/README.md | 7 - conf.d/node.d/fronius.conf.md | 67 - conf.d/node.d/named.conf.md | 344 - conf.d/node.d/sma_webbox.conf.md | 25 - conf.d/node.d/snmp.conf.md | 359 - conf.d/node.d/stiebeleltron.conf.md | 453 - conf.d/python.d.conf | 79 - conf.d/python.d/apache.conf | 87 - conf.d/python.d/beanstalk.conf | 80 - conf.d/python.d/bind_rndc.conf | 112 - conf.d/python.d/ceph.conf | 75 - conf.d/python.d/chrony.conf | 79 - conf.d/python.d/couchdb.conf | 91 - conf.d/python.d/cpufreq.conf | 43 - conf.d/python.d/dns_query_time.conf | 71 - conf.d/python.d/dnsdist.conf | 85 - conf.d/python.d/dovecot.conf | 96 - conf.d/python.d/elasticsearch.conf | 83 - conf.d/python.d/example.conf | 70 - conf.d/python.d/exim.conf | 93 - conf.d/python.d/fail2ban.conf | 70 - conf.d/python.d/freeradius.conf | 82 - conf.d/python.d/go_expvar.conf | 110 - conf.d/python.d/haproxy.conf | 85 - conf.d/python.d/hddtemp.conf | 97 - conf.d/python.d/httpcheck.conf | 99 - conf.d/python.d/icecast.conf | 83 - conf.d/python.d/ipfs.conf | 74 - conf.d/python.d/isc_dhcpd.conf | 81 - conf.d/python.d/mdstat.conf | 32 - conf.d/python.d/memcached.conf | 92 - conf.d/python.d/mongodb.conf | 84 - conf.d/python.d/mysql.conf | 286 - conf.d/python.d/nginx.conf | 109 - conf.d/python.d/nginx_plus.conf | 87 - conf.d/python.d/nsd.conf | 93 - conf.d/python.d/ntpd.conf | 91 - conf.d/python.d/ovpn_status_log.conf | 95 - conf.d/python.d/phpfpm.conf | 90 - conf.d/python.d/portcheck.conf | 70 - conf.d/python.d/postfix.conf | 74 - conf.d/python.d/postgres.conf | 124 - conf.d/python.d/powerdns.conf | 78 - conf.d/python.d/rabbitmq.conf | 82 - conf.d/python.d/redis.conf | 112 - conf.d/python.d/retroshare.conf | 74 - conf.d/python.d/samba.conf | 62 - conf.d/python.d/sensors.conf | 63 - conf.d/python.d/smartd_log.conf | 90 - conf.d/python.d/springboot.conf | 120 - conf.d/python.d/squid.conf | 169 - conf.d/python.d/tomcat.conf | 91 - conf.d/python.d/traefik.conf | 79 - conf.d/python.d/varnish.conf | 64 - conf.d/python.d/web_log.conf | 195 - conf.d/statsd.d/example.conf | 65 - conf.d/stream.conf | 179 - config.h.in | 24 + configs.signatures | 78 + configure | 217 +- configure.ac | 159 +- contrib/Makefile.am | 5 +- contrib/Makefile.in | 23 +- contrib/debian/changelog | 3 + contrib/debian/compat | 1 + contrib/debian/control | 25 + contrib/debian/control.wheezy | 25 + contrib/debian/copyright | 10 + contrib/debian/netdata.conf | 16 + contrib/debian/netdata.default | 5 + contrib/debian/netdata.docs | 1 + contrib/debian/netdata.init | 56 + contrib/debian/netdata.install | 1 + contrib/debian/netdata.lintian-overrides | 16 + contrib/debian/netdata.postinst.in | 41 + contrib/debian/netdata.postrm | 43 + contrib/debian/netdata.service | 14 + contrib/debian/rules | 87 + contrib/debian/source/format | 1 + contrib/nc-backend.sh | 151 - contrib/rhel/build-netdata-rpm.sh | 7 +- coverity-scan.sh | 62 +- cppcheck.sh | 9 +- daemon/Makefile.am | 8 + daemon/Makefile.in | 464 + daemon/README.md | 445 + daemon/common.c | 16 + daemon/common.h | 82 + daemon/daemon.c | 452 + daemon/daemon.h | 14 + daemon/global_statistics.c | 533 ++ daemon/global_statistics.h | 23 + daemon/main.c | 1098 +++ daemon/main.h | 47 + daemon/signals.c | 171 + daemon/signals.h | 12 + daemon/unit_test.c | 1412 +++ daemon/unit_test.h | 12 + database/Makefile.am | 8 + database/Makefile.in | 464 + database/README.md | 206 + database/rrd.c | 150 + database/rrd.h | 886 ++ database/rrdcalc.c | 429 + database/rrdcalc.h | 136 + database/rrdcalctemplate.c | 71 + database/rrdcalctemplate.h | 68 + database/rrddim.c | 397 + database/rrddimvar.c | 217 + database/rrddimvar.h | 56 + database/rrdfamily.c | 61 + database/rrdhost.c | 743 ++ database/rrdset.c | 1621 ++++ database/rrdsetvar.c | 189 + database/rrdsetvar.h | 44 + database/rrdvar.c | 285 + database/rrdvar.h | 66 + diagrams/Makefile.am | 1 + diagrams/Makefile.in | 22 +- diagrams/build.sh | 1 + diagrams/netdata-overview.xml | 2 +- docker-build.sh | 36 - docker/Dockerfile | 104 + docker/build.sh | 83 + docker/run.sh | 11 + health/Makefile.am | 82 + health/Makefile.in | 800 ++ health/README.md | 657 ++ health/health.c | 750 ++ health/health.d/adaptec_raid.conf | 24 + health/health.d/apache.conf | 14 + health/health.d/apcupsd.conf | 40 + health/health.d/backend.conf | 45 + health/health.d/bcache.conf | 22 + health/health.d/beanstalkd.conf | 36 + health/health.d/bind_rndc.conf | 9 + health/health.d/boinc.conf | 62 + health/health.d/btrfs.conf | 57 + health/health.d/ceph.conf | 13 + health/health.d/couchdb.conf | 13 + health/health.d/cpu.conf | 55 + health/health.d/disks.conf | 167 + health/health.d/dockerd.conf | 8 + health/health.d/elasticsearch.conf | 9 + health/health.d/entropy.conf | 16 + health/health.d/fping.conf | 53 + health/health.d/fronius.conf | 11 + health/health.d/haproxy.conf | 27 + health/health.d/httpcheck.conf | 99 + health/health.d/ipc.conf | 28 + health/health.d/ipfs.conf | 11 + health/health.d/ipmi.conf | 20 + health/health.d/isc_dhcpd.conf | 10 + health/health.d/lighttpd.conf | 14 + health/health.d/linux_power_supply.conf | 12 + health/health.d/load.conf | 56 + health/health.d/mdstat.conf | 27 + health/health.d/megacli.conf | 48 + health/health.d/memcached.conf | 52 + health/health.d/memory.conf | 38 + health/health.d/mongodb.conf | 13 + health/health.d/mysql.conf | 100 + health/health.d/named.conf | 14 + health/health.d/net.conf | 155 + health/health.d/netfilter.conf | 29 + health/health.d/nginx.conf | 14 + health/health.d/nginx_plus.conf | 14 + health/health.d/portcheck.conf | 48 + health/health.d/postgres.conf | 13 + health/health.d/qos.conf | 18 + health/health.d/ram.conf | 64 + health/health.d/redis.conf | 34 + health/health.d/retroshare.conf | 25 + health/health.d/softnet.conf | 40 + health/health.d/squid.conf | 14 + health/health.d/stiebeleltron.conf | 11 + health/health.d/swap.conf | 43 + health/health.d/tcp_conn.conf | 19 + health/health.d/tcp_listen.conf | 82 + health/health.d/tcp_mem.conf | 20 + health/health.d/tcp_orphans.conf | 21 + health/health.d/tcp_resets.conf | 67 + health/health.d/udp_errors.conf | 49 + health/health.d/varnish.conf | 9 + health/health.d/web_log.conf | 163 + health/health.d/zfs.conf | 10 + health/health.h | 76 + health/health_config.c | 858 ++ health/health_json.c | 261 + health/health_log.c | 464 + health/notifications/Makefile.am | 45 + health/notifications/Makefile.in | 754 ++ health/notifications/README.md | 60 + health/notifications/alarm-email.sh | 7 + health/notifications/alarm-notify.sh | 2378 +++++ health/notifications/alarm-notify.sh.in | 2378 +++++ health/notifications/alarm-test.sh | 12 + health/notifications/alerta/Makefile.inc | 12 + health/notifications/alerta/README.md | 236 + health/notifications/awssns/Makefile.inc | 12 + health/notifications/awssns/README.md | 31 + health/notifications/discord/Makefile.inc | 12 + health/notifications/discord/README.md | 44 + health/notifications/email/Makefile.inc | 12 + health/notifications/email/README.md | 31 + health/notifications/flock/Makefile.inc | 12 + health/notifications/flock/README.md | 31 + health/notifications/health_alarm_notify.conf | 961 ++ health/notifications/health_email_recipients.conf | 2 + health/notifications/irc/Makefile.inc | 12 + health/notifications/irc/README.md | 73 + health/notifications/kavenegar/Makefile.inc | 12 + health/notifications/kavenegar/README.md | 39 + health/notifications/messagebird/Makefile.inc | 12 + health/notifications/messagebird/README.md | 38 + health/notifications/pagerduty/Makefile.inc | 12 + health/notifications/pagerduty/README.md | 34 + health/notifications/pushbullet/Makefile.inc | 12 + health/notifications/pushbullet/README.md | 42 + health/notifications/pushover/Makefile.inc | 12 + health/notifications/pushover/README.md | 17 + health/notifications/rocketchat/Makefile.inc | 12 + health/notifications/rocketchat/README.md | 46 + health/notifications/slack/Makefile.inc | 12 + health/notifications/slack/README.md | 45 + health/notifications/syslog/Makefile.inc | 12 + health/notifications/syslog/README.md | 23 + health/notifications/telegram/Makefile.inc | 12 + health/notifications/telegram/README.md | 19 + health/notifications/twilio/Makefile.inc | 12 + health/notifications/twilio/README.md | 40 + health/notifications/web/Makefile.inc | 12 + health/notifications/web/README.md | 6 + installer/.keep | 0 installer/functions.sh | 85 +- kickstart-static64.sh | 12 +- kickstart.sh | 11 +- libnetdata/Makefile.am | 28 + libnetdata/Makefile.in | 664 ++ libnetdata/README.md | 6 + libnetdata/adaptive_resortable_list/Makefile.am | 9 + libnetdata/adaptive_resortable_list/Makefile.in | 464 + libnetdata/adaptive_resortable_list/README.md | 93 + .../adaptive_resortable_list.c | 280 + .../adaptive_resortable_list.h | 138 + libnetdata/avl/Makefile.am | 9 + libnetdata/avl/Makefile.in | 464 + libnetdata/avl/README.md | 11 + libnetdata/avl/avl.c | 404 + libnetdata/avl/avl.h | 89 + libnetdata/buffer/Makefile.am | 9 + libnetdata/buffer/Makefile.in | 464 + libnetdata/buffer/README.md | 11 + libnetdata/buffer/buffer.c | 401 + libnetdata/buffer/buffer.h | 85 + libnetdata/clocks/Makefile.am | 9 + libnetdata/clocks/Makefile.in | 464 + libnetdata/clocks/README.md | 0 libnetdata/clocks/clocks.c | 204 + libnetdata/clocks/clocks.h | 133 + libnetdata/config/Makefile.am | 9 + libnetdata/config/Makefile.in | 464 + libnetdata/config/README.md | 46 + libnetdata/config/appconfig.c | 587 ++ libnetdata/config/appconfig.h | 135 + libnetdata/dictionary/Makefile.am | 9 + libnetdata/dictionary/Makefile.in | 464 + libnetdata/dictionary/README.md | 0 libnetdata/dictionary/dictionary.c | 294 + libnetdata/dictionary/dictionary.h | 48 + libnetdata/eval/Makefile.am | 9 + libnetdata/eval/Makefile.in | 464 + libnetdata/eval/README.md | 0 libnetdata/eval/eval.c | 1190 +++ libnetdata/eval/eval.h | 88 + libnetdata/inlined.h | 298 + libnetdata/libnetdata.c | 1447 +++ libnetdata/libnetdata.h | 308 + libnetdata/locks/Makefile.am | 9 + libnetdata/locks/Makefile.in | 464 + libnetdata/locks/README.md | 0 libnetdata/locks/locks.c | 332 + libnetdata/locks/locks.h | 75 + libnetdata/log/Makefile.am | 9 + libnetdata/log/Makefile.in | 464 + libnetdata/log/README.md | 0 libnetdata/log/log.c | 436 + libnetdata/log/log.h | 94 + libnetdata/os.c | 218 + libnetdata/os.h | 70 + libnetdata/popen/Makefile.am | 9 + libnetdata/popen/Makefile.in | 464 + libnetdata/popen/README.md | 0 libnetdata/popen/popen.c | 206 + libnetdata/popen/popen.h | 18 + libnetdata/procfile/Makefile.am | 9 + libnetdata/procfile/Makefile.in | 464 + libnetdata/procfile/README.md | 61 + libnetdata/procfile/procfile.c | 472 + libnetdata/procfile/procfile.h | 106 + libnetdata/simple_pattern/Makefile.am | 9 + libnetdata/simple_pattern/Makefile.in | 464 + libnetdata/simple_pattern/README.md | 36 + libnetdata/simple_pattern/simple_pattern.c | 262 + libnetdata/simple_pattern/simple_pattern.h | 33 + libnetdata/socket/Makefile.am | 9 + libnetdata/socket/Makefile.in | 464 + libnetdata/socket/README.md | 0 libnetdata/socket/socket.c | 1532 ++++ libnetdata/socket/socket.h | 167 + libnetdata/statistical/Makefile.am | 9 + libnetdata/statistical/Makefile.in | 464 + libnetdata/statistical/README.md | 0 libnetdata/statistical/statistical.c | 452 + libnetdata/statistical/statistical.h | 30 + libnetdata/storage_number/Makefile.am | 9 + libnetdata/storage_number/Makefile.in | 464 + libnetdata/storage_number/README.md | 10 + libnetdata/storage_number/storage_number.c | 233 + libnetdata/storage_number/storage_number.h | 92 + libnetdata/threads/Makefile.am | 9 + libnetdata/threads/Makefile.in | 464 + libnetdata/threads/README.md | 0 libnetdata/threads/threads.c | 183 + libnetdata/threads/threads.h | 37 + libnetdata/url/Makefile.am | 9 + libnetdata/url/Makefile.in | 464 + libnetdata/url/README.md | 0 libnetdata/url/url.c | 81 + libnetdata/url/url.h | 28 + m4/ax_c___atomic.m4 | 36 - m4/ax_c__generic.m4 | 28 - m4/ax_c_lto.m4 | 21 - m4/ax_c_mallinfo.m4 | 24 - m4/ax_c_mallopt.m4 | 20 - m4/ax_c_statement_expressions.m4 | 23 - m4/ax_check_compile_flag.m4 | 74 - m4/ax_check_enable_debug.m4 | 124 - m4/ax_gcc_func_attribute.m4 | 226 - m4/ax_pthread.m4 | 332 - m4/jemalloc.m4 | 88 - m4/tcmalloc.m4 | 58 - makeself/Makefile.am | 5 +- makeself/Makefile.in | 26 +- makeself/build-x86_64-static.sh | 1 + makeself/build.sh | 3 +- makeself/functions.sh | 3 +- makeself/install-alpine-packages.sh | 1 + makeself/install-or-update.sh | 98 +- makeself/jobs/10-prepare-destination.install.sh | 1 + makeself/jobs/50-bash-4.4.18.install.sh | 54 + makeself/jobs/50-bash-4.4.install.sh | 50 - makeself/jobs/50-curl-7.53.1.install.sh | 33 - makeself/jobs/50-curl-7.60.0.install.sh | 34 + makeself/jobs/50-fping-4.0.install.sh | 1 + makeself/jobs/70-netdata-git.install.sh | 8 +- makeself/jobs/99-makeself.install.sh | 29 +- makeself/makeself-header.sh | 1 + makeself/makeself-help-header.txt | 4 +- makeself/makeself-license.txt | 4 +- makeself/makeself.sh | 1 + makeself/post-installer.sh | 1 + makeself/run-all-jobs.sh | 1 + netdata-installer.sh | 426 +- netdata.cppcheck | 1 - netdata.spec | 99 +- netdata.spec.in | 95 +- node.d/Makefile.am | 28 - node.d/Makefile.in | 583 -- node.d/README.md | 118 - node.d/fronius.node.js | 399 - node.d/named.node.js | 609 -- node.d/node_modules/asn1-ber.js | 6 - node.d/node_modules/extend.js | 87 - node.d/node_modules/lib/ber/errors.js | 9 - node.d/node_modules/lib/ber/index.js | 17 - node.d/node_modules/lib/ber/reader.js | 269 - node.d/node_modules/lib/ber/types.js | 34 - node.d/node_modules/lib/ber/writer.js | 317 - node.d/node_modules/net-snmp.js | 1457 --- node.d/node_modules/netdata.js | 654 -- node.d/node_modules/pixl-xml.js | 606 -- node.d/sma_webbox.node.js | 237 - node.d/snmp.node.js | 513 -- node.d/stiebeleltron.node.js | 196 - plugins.d/Makefile.am | 23 - plugins.d/Makefile.in | 567 -- plugins.d/README.md | 236 - plugins.d/alarm-email.sh | 6 - plugins.d/alarm-notify.sh | 1919 ---- plugins.d/alarm-test.sh | 12 - plugins.d/cgroup-name.sh | 189 - plugins.d/cgroup-network-helper.sh | 251 - plugins.d/charts.d.dryrun-helper.sh | 73 - plugins.d/charts.d.plugin | 713 -- plugins.d/fping.plugin | 188 - plugins.d/loopsleepms.sh.inc | 189 - plugins.d/node.d.plugin | 294 - plugins.d/python.d.plugin | 382 - plugins.d/tc-qos-helper.sh | 303 - python.d/Makefile.am | 204 - python.d/Makefile.in | 1107 --- python.d/README.md | 2363 ----- python.d/apache.chart.py | 129 - python.d/beanstalk.chart.py | 250 - python.d/bind_rndc.chart.py | 243 - python.d/ceph.chart.py | 313 - python.d/chrony.chart.py | 105 - python.d/couchdb.chart.py | 410 - python.d/cpufreq.chart.py | 113 - python.d/cpuidle.chart.py | 148 - python.d/dns_query_time.chart.py | 134 - python.d/dnsdist.chart.py | 101 - python.d/dovecot.chart.py | 126 - python.d/elasticsearch.chart.py | 554 -- python.d/example.chart.py | 47 - python.d/exim.chart.py | 39 - python.d/fail2ban.chart.py | 213 - python.d/freeradius.chart.py | 125 - python.d/go_expvar.chart.py | 237 - python.d/haproxy.chart.py | 339 - python.d/hddtemp.chart.py | 110 - python.d/httpcheck.chart.py | 117 - python.d/icecast.chart.py | 92 - python.d/ipfs.chart.py | 124 - python.d/isc_dhcpd.chart.py | 193 - python.d/mdstat.chart.py | 189 - python.d/memcached.chart.py | 183 - python.d/mongodb.chart.py | 673 -- python.d/mysql.chart.py | 491 - python.d/nginx.chart.py | 75 - python.d/nginx_plus.chart.py | 491 - python.d/nsd.chart.py | 93 - python.d/ntpd.chart.py | 380 - python.d/ovpn_status_log.chart.py | 118 - python.d/phpfpm.chart.py | 169 - python.d/portcheck.chart.py | 159 - python.d/postfix.chart.py | 50 - python.d/postgres.chart.py | 666 -- python.d/powerdns.chart.py | 58 - python.d/python-modules-installer.sh | 158 - python.d/python_modules/__init__.py | 1 - python.d/python_modules/base.py | 9 - .../bases/FrameworkServices/ExecutableService.py | 88 - .../bases/FrameworkServices/LogService.py | 78 - .../bases/FrameworkServices/MySQLService.py | 158 - .../bases/FrameworkServices/SimpleService.py | 262 - .../bases/FrameworkServices/SocketService.py | 261 - .../bases/FrameworkServices/UrlService.py | 126 - .../bases/FrameworkServices/__init__.py | 0 python.d/python_modules/bases/__init__.py | 0 python.d/python_modules/bases/charts.py | 382 - python.d/python_modules/bases/collection.py | 144 - python.d/python_modules/bases/loaders.py | 66 - python.d/python_modules/bases/loggers.py | 205 - python.d/python_modules/pyyaml2/__init__.py | 315 - python.d/python_modules/pyyaml2/composer.py | 139 - python.d/python_modules/pyyaml2/constructor.py | 675 -- python.d/python_modules/pyyaml2/cyaml.py | 85 - python.d/python_modules/pyyaml2/dumper.py | 62 - python.d/python_modules/pyyaml2/emitter.py | 1140 --- python.d/python_modules/pyyaml2/error.py | 75 - python.d/python_modules/pyyaml2/events.py | 86 - python.d/python_modules/pyyaml2/loader.py | 40 - python.d/python_modules/pyyaml2/nodes.py | 49 - python.d/python_modules/pyyaml2/parser.py | 589 -- python.d/python_modules/pyyaml2/reader.py | 190 - python.d/python_modules/pyyaml2/representer.py | 484 - python.d/python_modules/pyyaml2/resolver.py | 224 - python.d/python_modules/pyyaml2/scanner.py | 1457 --- python.d/python_modules/pyyaml2/serializer.py | 111 - python.d/python_modules/pyyaml2/tokens.py | 104 - python.d/python_modules/pyyaml3/__init__.py | 312 - python.d/python_modules/pyyaml3/composer.py | 139 - python.d/python_modules/pyyaml3/constructor.py | 686 -- python.d/python_modules/pyyaml3/cyaml.py | 85 - python.d/python_modules/pyyaml3/dumper.py | 62 - python.d/python_modules/pyyaml3/emitter.py | 1137 --- python.d/python_modules/pyyaml3/error.py | 75 - python.d/python_modules/pyyaml3/events.py | 86 - python.d/python_modules/pyyaml3/loader.py | 40 - python.d/python_modules/pyyaml3/nodes.py | 49 - python.d/python_modules/pyyaml3/parser.py | 589 -- python.d/python_modules/pyyaml3/reader.py | 192 - python.d/python_modules/pyyaml3/representer.py | 374 - python.d/python_modules/pyyaml3/resolver.py | 224 - python.d/python_modules/pyyaml3/scanner.py | 1448 --- python.d/python_modules/pyyaml3/serializer.py | 111 - python.d/python_modules/pyyaml3/tokens.py | 104 - python.d/python_modules/third_party/__init__.py | 0 python.d/python_modules/third_party/lm_sensors.py | 257 - python.d/python_modules/third_party/ordereddict.py | 128 - python.d/python_modules/urllib3/__init__.py | 97 - python.d/python_modules/urllib3/_collections.py | 314 - python.d/python_modules/urllib3/connection.py | 373 - python.d/python_modules/urllib3/connectionpool.py | 899 -- .../python_modules/urllib3/contrib/__init__.py | 0 .../urllib3/contrib/_securetransport/__init__.py | 0 .../urllib3/contrib/_securetransport/bindings.py | 590 -- .../urllib3/contrib/_securetransport/low_level.py | 343 - .../python_modules/urllib3/contrib/appengine.py | 296 - .../python_modules/urllib3/contrib/ntlmpool.py | 112 - .../python_modules/urllib3/contrib/pyopenssl.py | 457 - .../urllib3/contrib/securetransport.py | 807 -- python.d/python_modules/urllib3/contrib/socks.py | 188 - python.d/python_modules/urllib3/exceptions.py | 246 - python.d/python_modules/urllib3/fields.py | 178 - python.d/python_modules/urllib3/filepost.py | 94 - .../python_modules/urllib3/packages/__init__.py | 5 - .../urllib3/packages/backports/__init__.py | 0 .../urllib3/packages/backports/makefile.py | 53 - .../urllib3/packages/ordered_dict.py | 259 - python.d/python_modules/urllib3/packages/six.py | 868 -- .../packages/ssl_match_hostname/__init__.py | 19 - .../packages/ssl_match_hostname/_implementation.py | 157 - python.d/python_modules/urllib3/poolmanager.py | 440 - python.d/python_modules/urllib3/request.py | 148 - python.d/python_modules/urllib3/response.py | 622 -- python.d/python_modules/urllib3/util/__init__.py | 54 - python.d/python_modules/urllib3/util/connection.py | 130 - python.d/python_modules/urllib3/util/request.py | 118 - python.d/python_modules/urllib3/util/response.py | 81 - python.d/python_modules/urllib3/util/retry.py | 401 - python.d/python_modules/urllib3/util/selectors.py | 581 -- python.d/python_modules/urllib3/util/ssl_.py | 337 - python.d/python_modules/urllib3/util/timeout.py | 242 - python.d/python_modules/urllib3/util/url.py | 230 - python.d/python_modules/urllib3/util/wait.py | 40 - python.d/rabbitmq.chart.py | 193 - python.d/redis.chart.py | 200 - python.d/retroshare.chart.py | 76 - python.d/samba.chart.py | 126 - python.d/sensors.chart.py | 139 - python.d/smartd_log.chart.py | 346 - python.d/springboot.chart.py | 151 - python.d/squid.chart.py | 120 - python.d/tomcat.chart.py | 153 - python.d/traefik.chart.py | 181 - python.d/varnish.chart.py | 241 - python.d/web_log.chart.py | 1047 --- registry/Makefile.am | 9 + registry/Makefile.in | 464 + registry/README.md | 152 + registry/registry.c | 415 + registry/registry.h | 79 + registry/registry_db.c | 346 + registry/registry_init.c | 146 + registry/registry_internals.c | 325 + registry/registry_internals.h | 88 + registry/registry_log.c | 136 + registry/registry_machine.c | 104 + registry/registry_machine.h | 43 + registry/registry_person.c | 267 + registry/registry_person.h | 62 + registry/registry_url.c | 88 + registry/registry_url.h | 35 + src/.keep | 0 src/Makefile.am | 307 - src/Makefile.in | 1238 --- src/adaptive_resortable_list.c | 269 - src/adaptive_resortable_list.h | 171 - src/appconfig.c | 606 -- src/appconfig.h | 74 - src/apps_plugin.c | 3639 -------- src/avl.c | 403 - src/avl.h | 86 - src/backend_prometheus.c | 479 - src/backend_prometheus.h | 11 - src/backends.c | 969 -- src/backends.h | 30 - src/cgroup-network.c | 651 -- src/clocks.c | 137 - src/clocks.h | 124 - src/common.c | 1361 --- src/common.h | 360 - src/daemon.c | 396 - src/daemon.h | 12 - src/dictionary.c | 292 - src/dictionary.h | 44 - src/eval.c | 1188 --- src/eval.h | 72 - src/freebsd_devstat.c | 778 -- src/freebsd_getifaddrs.c | 506 -- src/freebsd_getmntinfo.c | 299 - src/freebsd_ipfw.c | 370 - src/freebsd_kstat_zfs.c | 298 - src/freebsd_sysctl.c | 3171 ------- src/freeipmi_plugin.c | 1680 ---- src/global_statistics.c | 414 - src/global_statistics.h | 38 - src/health.c | 737 -- src/health.h | 438 - src/health_config.c | 900 -- src/health_json.c | 260 - src/health_log.c | 463 - src/inlined.h | 291 - src/ipc.c | 261 - src/ipc.h | 7 - src/locks.c | 319 - src/locks.h | 71 - src/log.c | 427 - src/log.h | 87 - src/macos_fw.c | 684 -- src/macos_mach_smi.c | 238 - src/macos_sysctl.c | 1504 ---- src/main.c | 1071 --- src/main.h | 41 - src/plugin_checks.c | 127 - src/plugin_checks.h | 8 - src/plugin_freebsd.c | 173 - src/plugin_freebsd.h | 130 - src/plugin_idlejitter.c | 90 - src/plugin_idlejitter.h | 6 - src/plugin_macos.c | 67 - src/plugin_macos.h | 14 - src/plugin_nfacct.c | 819 -- src/plugin_nfacct.h | 7 - src/plugin_proc.c | 201 - src/plugin_proc.h | 42 - src/plugin_proc_diskspace.c | 460 - src/plugin_proc_diskspace.h | 6 - src/plugin_tc.c | 1164 --- src/plugin_tc.h | 7 - src/plugins_d.c | 694 -- src/plugins_d.h | 57 - src/popen.c | 204 - src/popen.h | 11 - src/proc_diskstats.c | 1438 --- src/proc_interrupts.c | 253 - src/proc_loadavg.c | 119 - src/proc_meminfo.c | 514 -- src/proc_net_dev.c | 894 -- src/proc_net_ip_vs_stats.c | 129 - src/proc_net_netstat.c | 761 -- src/proc_net_rpc_nfs.c | 449 - src/proc_net_rpc_nfsd.c | 1002 --- src/proc_net_snmp.c | 1020 --- src/proc_net_snmp6.c | 1265 --- src/proc_net_sockstat.c | 514 -- src/proc_net_sockstat6.c | 269 - src/proc_net_softnet_stat.c | 147 - src/proc_net_stat_conntrack.c | 348 - src/proc_net_stat_synproxy.c | 181 - src/proc_self_mountinfo.c | 401 - src/proc_self_mountinfo.h | 55 - src/proc_softirqs.c | 249 - src/proc_spl_kstat_zfs.c | 153 - src/proc_stat.c | 557 -- src/proc_sys_kernel_random_entropy_avail.c | 47 - src/proc_uptime.c | 103 - src/proc_vmstat.c | 255 - src/procfile.c | 468 - src/procfile.h | 124 - src/registry.c | 414 - src/registry.h | 77 - src/registry_db.c | 343 - src/registry_init.c | 143 - src/registry_internals.c | 322 - src/registry_internals.h | 86 - src/registry_log.c | 133 - src/registry_machine.c | 101 - src/registry_machine.h | 41 - src/registry_person.c | 264 - src/registry_person.h | 60 - src/registry_url.c | 85 - src/registry_url.h | 33 - src/rrd.c | 148 - src/rrd.h | 747 -- src/rrd2json.c | 2059 ----- src/rrd2json.h | 82 - src/rrd2json_api_old.c | 487 - src/rrd2json_api_old.h | 14 - src/rrdcalc.c | 424 - src/rrdcalctemplate.c | 69 - src/rrddim.c | 387 - src/rrddimvar.c | 212 - src/rrdfamily.c | 59 - src/rrdhost.c | 735 -- src/rrdpush.c | 1160 --- src/rrdpush.h | 19 - src/rrdset.c | 1544 ---- src/rrdsetvar.c | 183 - src/rrdvar.c | 275 - src/signals.c | 168 - src/signals.h | 10 - src/simple_pattern.c | 260 - src/simple_pattern.h | 28 - src/socket.c | 1518 ---- src/socket.h | 162 - src/statistical.c | 459 - src/statistical.h | 19 - src/statsd.c | 2499 ----- src/statsd.h | 9 - src/storage_number.c | 231 - src/storage_number.h | 88 - src/sys_devices_system_edac_mc.c | 204 - src/sys_devices_system_node.c | 161 - src/sys_fs_btrfs.c | 714 -- src/sys_fs_cgroup.c | 2765 ------ src/sys_kernel_mm_ksm.c | 197 - src/threads.c | 181 - src/threads.h | 33 - src/unit_test.c | 1368 --- src/unit_test.h | 10 - src/url.c | 77 - src/url.h | 24 - src/web_api_old.c | 237 - src/web_api_old.h | 13 - src/web_api_v1.c | 1017 --- src/web_api_v1.h | 23 - src/web_buffer.c | 400 - src/web_buffer.h | 81 - src/web_buffer_svg.c | 832 -- src/web_buffer_svg.h | 7 - src/web_client.c | 1706 ---- src/web_client.h | 189 - src/web_server.c | 1292 --- src/web_server.h | 46 - src/zfs_common.c | 713 -- src/zfs_common.h | 111 - streaming/Makefile.am | 12 + streaming/Makefile.in | 521 ++ streaming/README.md | 413 + streaming/rrdpush.c | 1279 +++ streaming/rrdpush.h | 25 + streaming/stream.conf | 191 + system/Makefile.am | 11 +- system/Makefile.in | 125 +- system/edit-config | 96 + system/edit-config.in | 96 + system/netdata-freebsd.in | 6 +- system/netdata-init-d.in | 5 +- system/netdata-lsb.in | 8 +- system/netdata-openrc.in | 86 +- system/netdata.logrotate.in | 2 +- system/netdata.plist.in | 15 + system/netdata.service.in | 22 +- tests/Makefile.am | 4 +- tests/Makefile.in | 22 +- tests/node.d/fronius.chart.spec.js | 3 +- tests/node.d/fronius.parse.spec.js | 3 +- tests/node.d/fronius.process.spec.js | 3 +- tests/node.d/fronius.validation.spec.js | 3 +- tests/stress.sh | 21 +- tests/web/easypiechart.chart.spec.js | 2 +- tests/web/easypiechart.percentage.spec.js | 2 +- web/Makefile.am | 134 +- web/Makefile.in | 644 +- web/README.md | 0 web/api/Makefile.am | 20 + web/api/Makefile.in | 709 ++ web/api/README.md | 18 + web/api/badges/Makefile.am | 8 + web/api/badges/Makefile.in | 464 + web/api/badges/README.md | 324 + web/api/badges/web_buffer_svg.c | 1142 +++ web/api/badges/web_buffer_svg.h | 16 + web/api/exporters/Makefile.am | 13 + web/api/exporters/Makefile.in | 649 ++ web/api/exporters/README.md | 0 web/api/exporters/allmetrics.c | 113 + web/api/exporters/allmetrics.h | 11 + web/api/exporters/prometheus/Makefile.am | 8 + web/api/exporters/prometheus/Makefile.in | 464 + web/api/exporters/prometheus/README.md | 3 + web/api/exporters/shell/Makefile.am | 8 + web/api/exporters/shell/Makefile.in | 464 + web/api/exporters/shell/README.md | 64 + web/api/exporters/shell/allmetrics_shell.c | 159 + web/api/exporters/shell/allmetrics_shell.h | 21 + web/api/formatters/Makefile.am | 15 + web/api/formatters/Makefile.in | 651 ++ web/api/formatters/README.md | 72 + web/api/formatters/charts2json.c | 103 + web/api/formatters/charts2json.h | 10 + web/api/formatters/csv/Makefile.am | 8 + web/api/formatters/csv/Makefile.in | 464 + web/api/formatters/csv/README.md | 139 + web/api/formatters/csv/csv.c | 143 + web/api/formatters/csv/csv.h | 12 + web/api/formatters/json/Makefile.am | 8 + web/api/formatters/json/Makefile.in | 464 + web/api/formatters/json/README.md | 150 + web/api/formatters/json/json.c | 246 + web/api/formatters/json/json.h | 10 + web/api/formatters/json_wrapper.c | 210 + web/api/formatters/json_wrapper.h | 11 + web/api/formatters/rrd2json.c | 298 + web/api/formatters/rrd2json.h | 84 + web/api/formatters/rrdset2json.c | 108 + web/api/formatters/rrdset2json.h | 10 + web/api/formatters/ssv/Makefile.am | 8 + web/api/formatters/ssv/Makefile.in | 464 + web/api/formatters/ssv/README.md | 52 + web/api/formatters/ssv/ssv.c | 45 + web/api/formatters/ssv/ssv.h | 10 + web/api/formatters/value/Makefile.am | 8 + web/api/formatters/value/Makefile.in | 464 + web/api/formatters/value/README.md | 17 + web/api/formatters/value/value.c | 94 + web/api/formatters/value/value.h | 10 + web/api/netdata-swagger.json | 777 ++ web/api/netdata-swagger.yaml | 512 ++ web/api/queries/Makefile.am | 20 + web/api/queries/Makefile.in | 656 ++ web/api/queries/README.md | 128 + web/api/queries/average/Makefile.am | 8 + web/api/queries/average/Makefile.in | 464 + web/api/queries/average/README.md | 39 + web/api/queries/average/average.c | 59 + web/api/queries/average/average.h | 15 + web/api/queries/des/Makefile.am | 8 + web/api/queries/des/Makefile.in | 464 + web/api/queries/des/README.md | 66 + web/api/queries/des/des.c | 139 + web/api/queries/des/des.h | 17 + web/api/queries/incremental_sum/Makefile.am | 8 + web/api/queries/incremental_sum/Makefile.in | 464 + web/api/queries/incremental_sum/README.md | 34 + web/api/queries/incremental_sum/incremental_sum.c | 69 + web/api/queries/incremental_sum/incremental_sum.h | 15 + web/api/queries/max/Makefile.am | 8 + web/api/queries/max/Makefile.in | 464 + web/api/queries/max/README.md | 31 + web/api/queries/max/max.c | 60 + web/api/queries/max/max.h | 15 + web/api/queries/median/Makefile.am | 8 + web/api/queries/median/Makefile.in | 464 + web/api/queries/median/README.md | 37 + web/api/queries/median/median.c | 79 + web/api/queries/median/median.h | 15 + web/api/queries/min/Makefile.am | 8 + web/api/queries/min/Makefile.in | 464 + web/api/queries/min/README.md | 31 + web/api/queries/min/min.c | 60 + web/api/queries/min/min.h | 15 + web/api/queries/query.c | 974 ++ web/api/queries/query.h | 24 + web/api/queries/rrdr.c | 136 + web/api/queries/rrdr.h | 106 + web/api/queries/ses/Makefile.am | 8 + web/api/queries/ses/Makefile.in | 464 + web/api/queries/ses/README.md | 54 + web/api/queries/ses/ses.c | 92 + web/api/queries/ses/ses.h | 17 + web/api/queries/stddev/Makefile.am | 8 + web/api/queries/stddev/Makefile.in | 464 + web/api/queries/stddev/README.md | 87 + web/api/queries/stddev/stddev.c | 178 + web/api/queries/stddev/stddev.h | 18 + web/api/queries/sum/Makefile.am | 8 + web/api/queries/sum/Makefile.in | 464 + web/api/queries/sum/README.md | 34 + web/api/queries/sum/sum.c | 58 + web/api/queries/sum/sum.h | 15 + web/api/web_api_v1.c | 665 ++ web/api/web_api_v1.h | 26 + web/css/bootstrap-3.3.7.css | 6757 -------------- web/css/bootstrap-slate-flat-3.3.7.css | 7100 --------------- web/css/bootstrap-slider-10.0.0.min.css | 41 - web/css/bootstrap-theme-3.3.7.min.css | 6 - web/css/bootstrap-toggle-2.2.2.min.css | 28 - web/css/c3-0.4.18.min.css | 1 - web/css/morris-0.5.1.css | 2 - web/dashboard.css | 738 -- web/dashboard.html | 700 -- web/dashboard.js | 9457 ------------------- web/dashboard.slate.css | 756 -- web/dashboard_info.js | 2070 ----- web/dashboard_info_custom_example.js | 58 - web/favicon.ico | Bin 56875 -> 0 bytes web/goto-host-from-alarm.html | 209 - web/gui/Makefile.am | 123 + web/gui/Makefile.in | 779 ++ web/gui/README.md | 0 web/gui/css/bootstrap-3.3.7.css | 6758 ++++++++++++++ web/gui/css/bootstrap-slate-flat-3.3.7.css | 7101 +++++++++++++++ web/gui/css/bootstrap-slider-10.0.0.min.css | 22 + web/gui/css/bootstrap-theme-3.3.7.min.css | 7 + web/gui/css/bootstrap-toggle-2.2.2.min.css | 29 + web/gui/css/c3-0.4.18.min.css | 2 + web/gui/css/morris-0.5.1.css | 3 + web/gui/dashboard.css | 739 ++ web/gui/dashboard.html | 701 ++ web/gui/dashboard.js | 9512 ++++++++++++++++++++ web/gui/dashboard.slate.css | 757 ++ web/gui/dashboard_info.js | 2321 +++++ web/gui/dashboard_info_custom_example.js | 59 + web/gui/favicon.ico | Bin 0 -> 56875 bytes web/gui/goto-host-from-alarm.html | 244 + web/gui/images/alert-128-orange.png | Bin 0 -> 3477 bytes web/gui/images/alert-128-red.png | Bin 0 -> 3743 bytes web/gui/images/alert-multi-size-orange.ico | Bin 0 -> 112374 bytes web/gui/images/alert-multi-size-red.ico | Bin 0 -> 112458 bytes web/gui/images/animated.gif | Bin 0 -> 389597 bytes web/gui/images/check-mark-2-128-green.png | Bin 0 -> 3771 bytes web/gui/images/check-mark-2-multi-size-green.ico | Bin 0 -> 111893 bytes web/gui/images/netdata.svg | 18 + web/gui/images/post.png | Bin 0 -> 9043 bytes web/gui/images/seo-performance-114.png | Bin 0 -> 3578 bytes web/gui/images/seo-performance-128.png | Bin 0 -> 1828 bytes web/gui/images/seo-performance-16.png | Bin 0 -> 287 bytes web/gui/images/seo-performance-24.png | Bin 0 -> 528 bytes web/gui/images/seo-performance-256.png | Bin 0 -> 3216 bytes web/gui/images/seo-performance-32.png | Bin 0 -> 509 bytes web/gui/images/seo-performance-48.png | Bin 0 -> 1116 bytes web/gui/images/seo-performance-512.png | Bin 0 -> 6995 bytes web/gui/images/seo-performance-64.png | Bin 0 -> 961 bytes web/gui/images/seo-performance-72.png | Bin 0 -> 1609 bytes web/gui/images/seo-performance-multi-size.icns | Bin 0 -> 80967 bytes web/gui/images/seo-performance-multi-size.ico | Bin 0 -> 56875 bytes web/gui/index.html | 5791 ++++++++++++ web/gui/infographic.html | 171 + web/gui/lib/bootstrap-3.3.7.min.js | 8 + web/gui/lib/bootstrap-slider-10.0.0.min.js | 6 + web/gui/lib/bootstrap-table-1.11.0.min.js | 9 + web/gui/lib/bootstrap-table-export-1.11.0.min.js | 8 + web/gui/lib/bootstrap-toggle-2.2.2.min.js | 10 + web/gui/lib/c3-0.4.18.min.js | 2 + web/gui/lib/clipboard-polyfill-be05dad.js | 9 + web/gui/lib/d3-4.12.2.min.js | 3 + web/gui/lib/d3pie-0.2.1-netdata-3.js | 2124 +++++ web/gui/lib/dygraph-c91c859.min.js | 7 + web/gui/lib/dygraph-smooth-plotter-c91c859.js | 141 + web/gui/lib/fontawesome-all-5.0.1.min.js | 6 + web/gui/lib/gauge-1.3.2.min.js | 2 + web/gui/lib/jquery-2.2.4.min.js | 5 + web/gui/lib/jquery.easypiechart-97b5824.min.js | 10 + web/gui/lib/jquery.peity-3.2.0.min.js | 14 + web/gui/lib/jquery.sparkline-2.1.2.min.js | 6 + web/gui/lib/lz-string-1.4.4.min.js | 2 + web/gui/lib/morris-0.5.1.min.js | 8 + web/gui/lib/pako-1.0.6.min.js | 2 + web/gui/lib/perfect-scrollbar-0.6.15.min.js | 3 + web/gui/lib/raphael-2.2.4-min.js | 4 + web/gui/lib/tableExport-1.6.0.min.js | 55 + web/gui/refresh-badges.js | 98 + web/gui/registry.html | 203 + web/gui/robots.txt | 7 + web/gui/sitemap.xml | 9 + web/gui/tv.html | 279 + web/gui/version.txt | 1 + web/images/alert-128-orange.png | Bin 3477 -> 0 bytes web/images/alert-128-red.png | Bin 3743 -> 0 bytes web/images/alert-multi-size-orange.ico | Bin 112374 -> 0 bytes web/images/alert-multi-size-red.ico | Bin 112458 -> 0 bytes web/images/animated.gif | Bin 389597 -> 0 bytes web/images/check-mark-2-128-green.png | Bin 3771 -> 0 bytes web/images/check-mark-2-multi-size-green.ico | Bin 111893 -> 0 bytes web/images/post.png | Bin 9043 -> 0 bytes web/images/seo-performance-114.png | Bin 3578 -> 0 bytes web/images/seo-performance-128.png | Bin 1828 -> 0 bytes web/images/seo-performance-16.png | Bin 287 -> 0 bytes web/images/seo-performance-24.png | Bin 528 -> 0 bytes web/images/seo-performance-256.png | Bin 3216 -> 0 bytes web/images/seo-performance-32.png | Bin 509 -> 0 bytes web/images/seo-performance-48.png | Bin 1116 -> 0 bytes web/images/seo-performance-512.png | Bin 6995 -> 0 bytes web/images/seo-performance-64.png | Bin 961 -> 0 bytes web/images/seo-performance-72.png | Bin 1609 -> 0 bytes web/images/seo-performance-multi-size.icns | Bin 80967 -> 0 bytes web/images/seo-performance-multi-size.ico | Bin 56875 -> 0 bytes web/index.html | 5724 ------------ web/infographic.html | 170 - web/lib/bootstrap-3.3.7.min.js | 7 - web/lib/bootstrap-slider-10.0.0.min.js | 5 - web/lib/bootstrap-table-1.11.0.min.js | 8 - web/lib/bootstrap-table-export-1.11.0.min.js | 7 - web/lib/bootstrap-toggle-2.2.2.min.js | 9 - web/lib/c3-0.4.18.min.js | 1 - web/lib/clipboard-polyfill-be05dad.js | 8 - web/lib/d3-4.12.2.min.js | 2 - web/lib/d3pie-0.2.1-netdata-3.js | 2123 ----- web/lib/dygraph-c91c859.min.js | 6 - web/lib/dygraph-smooth-plotter-c91c859.js | 140 - web/lib/fontawesome-all-5.0.1.min.js | 5 - web/lib/gauge-1.3.2.min.js | 1 - web/lib/jquery-2.2.4.min.js | 4 - web/lib/jquery.easypiechart-97b5824.min.js | 9 - web/lib/jquery.peity-3.2.0.min.js | 13 - web/lib/jquery.sparkline-2.1.2.min.js | 5 - web/lib/lz-string-1.4.4.min.js | 1 - web/lib/morris-0.5.1.min.js | 7 - web/lib/pako-1.0.6.min.js | 1 - web/lib/perfect-scrollbar-0.6.15.min.js | 2 - web/lib/raphael-2.2.4-min.js | 3 - web/lib/tableExport-1.6.0.min.js | 53 - web/netdata-swagger.json | 771 -- web/netdata-swagger.yaml | 510 -- web/refresh-badges.js | 97 - web/registry.html | 202 - web/robots.txt | 7 - web/server/Makefile.am | 14 + web/server/Makefile.in | 650 ++ web/server/README.md | 107 + web/server/multi/Makefile.am | 11 + web/server/multi/Makefile.in | 647 ++ web/server/multi/README.md | 8 + web/server/multi/multi-threaded.c | 314 + web/server/multi/multi-threaded.h | 10 + web/server/single/Makefile.am | 11 + web/server/single/Makefile.in | 647 ++ web/server/single/README.md | 6 + web/server/single/single-threaded.c | 194 + web/server/single/single-threaded.h | 10 + web/server/static/Makefile.am | 11 + web/server/static/Makefile.in | 647 ++ web/server/static/README.md | 9 + web/server/static/static-threaded.c | 424 + web/server/static/static-threaded.h | 10 + web/server/web_client.c | 1665 ++++ web/server/web_client.h | 193 + web/server/web_client_cache.c | 231 + web/server/web_client_cache.h | 31 + web/server/web_server.c | 146 + web/server/web_server.h | 60 + web/sitemap.xml | 8 - web/tv.html | 278 - web/version.txt | 1 - 1779 files changed, 244400 insertions(+), 179931 deletions(-) create mode 100644 .github/CODEOWNERS create mode 100644 .lgtm.yml create mode 100644 .travis/README.md create mode 100755 .travis/containerized_build.sh create mode 100755 .travis/create_artifacts.sh create mode 100755 .travis/firehol_create_artifacts.sh create mode 100755 .travis/generate_changelog.sh create mode 100644 .travis/images/Dockerfile.alpine create mode 100644 .travis/images/Dockerfile.centos6 create mode 100644 .travis/images/Dockerfile.centos7 create mode 100644 .travis/images/Dockerfile.ubuntu1804 create mode 100755 .travis/releaser.sh create mode 100644 CODE_OF_CONDUCT.md create mode 100644 CONTRIBUTORS.md delete mode 100644 COPYING delete mode 100644 ChangeLog delete mode 100644 Dockerfile delete mode 100644 Dockerfile.aarch64 delete mode 100644 Dockerfile.alpine delete mode 100644 Dockerfile.armv7hf delete mode 100644 LICENSE-REDISTRIBUTED.md delete mode 100644 LICENSE.md create mode 100644 REDISTRIBUTED.md delete mode 100755 autogen.sh create mode 100644 backends/Makefile.am create mode 100644 backends/Makefile.in create mode 100644 backends/README.md create mode 100644 backends/backends.c create mode 100644 backends/backends.h create mode 100644 backends/graphite/Makefile.am create mode 100644 backends/graphite/Makefile.in create mode 100644 backends/graphite/graphite.c create mode 100644 backends/graphite/graphite.h create mode 100644 backends/json/Makefile.am create mode 100644 backends/json/Makefile.in create mode 100644 backends/json/json.c create mode 100644 backends/json/json.h create mode 100755 backends/nc-backend.sh create mode 100644 backends/opentsdb/Makefile.am create mode 100644 backends/opentsdb/Makefile.in create mode 100644 backends/opentsdb/opentsdb.c create mode 100644 backends/opentsdb/opentsdb.h create mode 100644 backends/prometheus/Makefile.am create mode 100644 backends/prometheus/Makefile.in create mode 100644 backends/prometheus/README.md create mode 100644 backends/prometheus/backend_prometheus.c create mode 100644 backends/prometheus/backend_prometheus.h create mode 100644 build/Dockerfile create mode 100755 build/build.sh create mode 100644 build/m4/ax_c___atomic.m4 create mode 100644 build/m4/ax_c__generic.m4 create mode 100644 build/m4/ax_c_lto.m4 create mode 100644 build/m4/ax_c_mallinfo.m4 create mode 100644 build/m4/ax_c_mallopt.m4 create mode 100644 build/m4/ax_c_statement_expressions.m4 create mode 100644 build/m4/ax_check_compile_flag.m4 create mode 100644 build/m4/ax_check_enable_debug.m4 create mode 100644 build/m4/ax_gcc_func_attribute.m4 create mode 100644 build/m4/ax_pthread.m4 create mode 100644 build/m4/jemalloc.m4 create mode 100644 build/m4/tcmalloc.m4 delete mode 100644 charts.d/Makefile.am delete mode 100644 charts.d/Makefile.in delete mode 100644 charts.d/README.md delete mode 100644 charts.d/ap.chart.sh delete mode 100644 charts.d/apache.chart.sh delete mode 100644 charts.d/apcupsd.chart.sh delete mode 100644 charts.d/cpu_apps.chart.sh delete mode 100644 charts.d/cpufreq.chart.sh delete mode 100644 charts.d/example.chart.sh delete mode 100644 charts.d/exim.chart.sh delete mode 100644 charts.d/hddtemp.chart.sh delete mode 100644 charts.d/libreswan.chart.sh delete mode 100644 charts.d/load_average.chart.sh delete mode 100644 charts.d/mem_apps.chart.sh delete mode 100644 charts.d/mysql.chart.sh delete mode 100644 charts.d/nginx.chart.sh delete mode 100644 charts.d/nut.chart.sh delete mode 100644 charts.d/opensips.chart.sh delete mode 100644 charts.d/phpfpm.chart.sh delete mode 100644 charts.d/postfix.chart.sh delete mode 100644 charts.d/sensors.chart.sh delete mode 100644 charts.d/squid.chart.sh delete mode 100644 charts.d/tomcat.chart.sh create mode 100644 collectors/Makefile.am create mode 100644 collectors/Makefile.in create mode 100644 collectors/README.md create mode 100644 collectors/all.h create mode 100644 collectors/apps.plugin/Makefile.am create mode 100644 collectors/apps.plugin/Makefile.in create mode 100644 collectors/apps.plugin/README.md create mode 100644 collectors/apps.plugin/apps_groups.conf create mode 100644 collectors/apps.plugin/apps_plugin.c create mode 100644 collectors/cgroups.plugin/Makefile.am create mode 100644 collectors/cgroups.plugin/Makefile.in create mode 100644 collectors/cgroups.plugin/README.md create mode 100644 collectors/cgroups.plugin/cgroup-name.sh create mode 100755 collectors/cgroups.plugin/cgroup-name.sh.in create mode 100755 collectors/cgroups.plugin/cgroup-network-helper.sh create mode 100644 collectors/cgroups.plugin/cgroup-network.c create mode 100644 collectors/cgroups.plugin/sys_fs_cgroup.c create mode 100644 collectors/cgroups.plugin/sys_fs_cgroup.h create mode 100644 collectors/charts.d.plugin/Makefile.am create mode 100644 collectors/charts.d.plugin/Makefile.in create mode 100644 collectors/charts.d.plugin/README.md create mode 100644 collectors/charts.d.plugin/ap/Makefile.inc create mode 100644 collectors/charts.d.plugin/ap/README.md create mode 100644 collectors/charts.d.plugin/ap/ap.chart.sh create mode 100644 collectors/charts.d.plugin/ap/ap.conf create mode 100644 collectors/charts.d.plugin/apache/Makefile.inc create mode 100644 collectors/charts.d.plugin/apache/README.md create mode 100644 collectors/charts.d.plugin/apache/apache.chart.sh create mode 100644 collectors/charts.d.plugin/apache/apache.conf create mode 100644 collectors/charts.d.plugin/apcupsd/Makefile.inc create mode 100644 collectors/charts.d.plugin/apcupsd/README.md create mode 100644 collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh create mode 100644 collectors/charts.d.plugin/apcupsd/apcupsd.conf create mode 100644 collectors/charts.d.plugin/charts.d.conf create mode 100755 collectors/charts.d.plugin/charts.d.dryrun-helper.sh create mode 100644 collectors/charts.d.plugin/charts.d.plugin create mode 100755 collectors/charts.d.plugin/charts.d.plugin.in create mode 100644 collectors/charts.d.plugin/cpu_apps/Makefile.inc create mode 100644 collectors/charts.d.plugin/cpu_apps/README.md create mode 100644 collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh create mode 100644 collectors/charts.d.plugin/cpu_apps/cpu_apps.conf create mode 100644 collectors/charts.d.plugin/cpufreq/Makefile.inc create mode 100644 collectors/charts.d.plugin/cpufreq/README.md create mode 100644 collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh create mode 100644 collectors/charts.d.plugin/cpufreq/cpufreq.conf create mode 100644 collectors/charts.d.plugin/example/Makefile.inc create mode 100644 collectors/charts.d.plugin/example/README.md create mode 100644 collectors/charts.d.plugin/example/example.chart.sh create mode 100644 collectors/charts.d.plugin/example/example.conf create mode 100644 collectors/charts.d.plugin/exim/Makefile.inc create mode 100644 collectors/charts.d.plugin/exim/README.md create mode 100644 collectors/charts.d.plugin/exim/exim.chart.sh create mode 100644 collectors/charts.d.plugin/exim/exim.conf create mode 100644 collectors/charts.d.plugin/hddtemp/Makefile.inc create mode 100644 collectors/charts.d.plugin/hddtemp/README.md create mode 100644 collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh create mode 100644 collectors/charts.d.plugin/hddtemp/hddtemp.conf create mode 100644 collectors/charts.d.plugin/libreswan/Makefile.inc create mode 100644 collectors/charts.d.plugin/libreswan/README.md create mode 100644 collectors/charts.d.plugin/libreswan/libreswan.chart.sh create mode 100644 collectors/charts.d.plugin/libreswan/libreswan.conf create mode 100644 collectors/charts.d.plugin/load_average/Makefile.inc create mode 100644 collectors/charts.d.plugin/load_average/README.md create mode 100644 collectors/charts.d.plugin/load_average/load_average.chart.sh create mode 100644 collectors/charts.d.plugin/load_average/load_average.conf create mode 100644 collectors/charts.d.plugin/loopsleepms.sh.inc create mode 100644 collectors/charts.d.plugin/mem_apps/Makefile.inc create mode 100644 collectors/charts.d.plugin/mem_apps/README.md create mode 100644 collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh create mode 100644 collectors/charts.d.plugin/mem_apps/mem_apps.conf create mode 100644 collectors/charts.d.plugin/mysql/Makefile.inc create mode 100644 collectors/charts.d.plugin/mysql/README.md create mode 100644 collectors/charts.d.plugin/mysql/mysql.chart.sh create mode 100644 collectors/charts.d.plugin/mysql/mysql.conf create mode 100644 collectors/charts.d.plugin/nginx/Makefile.inc create mode 100644 collectors/charts.d.plugin/nginx/README.md create mode 100644 collectors/charts.d.plugin/nginx/nginx.chart.sh create mode 100644 collectors/charts.d.plugin/nginx/nginx.conf create mode 100644 collectors/charts.d.plugin/nut/Makefile.inc create mode 100644 collectors/charts.d.plugin/nut/README.md create mode 100644 collectors/charts.d.plugin/nut/nut.chart.sh create mode 100644 collectors/charts.d.plugin/nut/nut.conf create mode 100644 collectors/charts.d.plugin/opensips/Makefile.inc create mode 100644 collectors/charts.d.plugin/opensips/README.md create mode 100644 collectors/charts.d.plugin/opensips/opensips.chart.sh create mode 100644 collectors/charts.d.plugin/opensips/opensips.conf create mode 100644 collectors/charts.d.plugin/phpfpm/Makefile.inc create mode 100644 collectors/charts.d.plugin/phpfpm/README.md create mode 100644 collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh create mode 100644 collectors/charts.d.plugin/phpfpm/phpfpm.conf create mode 100644 collectors/charts.d.plugin/postfix/Makefile.inc create mode 100644 collectors/charts.d.plugin/postfix/README.md create mode 100644 collectors/charts.d.plugin/postfix/postfix.chart.sh create mode 100644 collectors/charts.d.plugin/postfix/postfix.conf create mode 100644 collectors/charts.d.plugin/sensors/Makefile.inc create mode 100644 collectors/charts.d.plugin/sensors/README.md create mode 100644 collectors/charts.d.plugin/sensors/sensors.chart.sh create mode 100644 collectors/charts.d.plugin/sensors/sensors.conf create mode 100644 collectors/charts.d.plugin/squid/Makefile.inc create mode 100644 collectors/charts.d.plugin/squid/README.md create mode 100644 collectors/charts.d.plugin/squid/squid.chart.sh create mode 100644 collectors/charts.d.plugin/squid/squid.conf create mode 100644 collectors/charts.d.plugin/tomcat/Makefile.inc create mode 100644 collectors/charts.d.plugin/tomcat/README.md create mode 100644 collectors/charts.d.plugin/tomcat/tomcat.chart.sh create mode 100644 collectors/charts.d.plugin/tomcat/tomcat.conf create mode 100644 collectors/checks.plugin/Makefile.am create mode 100644 collectors/checks.plugin/Makefile.in create mode 100644 collectors/checks.plugin/plugin_checks.c create mode 100644 collectors/checks.plugin/plugin_checks.h create mode 100644 collectors/diskspace.plugin/Makefile.am create mode 100644 collectors/diskspace.plugin/Makefile.in create mode 100644 collectors/diskspace.plugin/README.md create mode 100644 collectors/diskspace.plugin/plugin_diskspace.c create mode 100644 collectors/diskspace.plugin/plugin_diskspace.h create mode 100644 collectors/fping.plugin/Makefile.am create mode 100644 collectors/fping.plugin/Makefile.in create mode 100644 collectors/fping.plugin/README.md create mode 100644 collectors/fping.plugin/fping.conf create mode 100644 collectors/fping.plugin/fping.plugin create mode 100755 collectors/fping.plugin/fping.plugin.in create mode 100644 collectors/freebsd.plugin/Makefile.am create mode 100644 collectors/freebsd.plugin/Makefile.in create mode 100644 collectors/freebsd.plugin/freebsd_devstat.c create mode 100644 collectors/freebsd.plugin/freebsd_getifaddrs.c create mode 100644 collectors/freebsd.plugin/freebsd_getmntinfo.c create mode 100644 collectors/freebsd.plugin/freebsd_ipfw.c create mode 100644 collectors/freebsd.plugin/freebsd_kstat_zfs.c create mode 100644 collectors/freebsd.plugin/freebsd_sysctl.c create mode 100644 collectors/freebsd.plugin/plugin_freebsd.c create mode 100644 collectors/freebsd.plugin/plugin_freebsd.h create mode 100644 collectors/freeipmi.plugin/Makefile.am create mode 100644 collectors/freeipmi.plugin/Makefile.in create mode 100644 collectors/freeipmi.plugin/README.md create mode 100644 collectors/freeipmi.plugin/freeipmi_plugin.c create mode 100644 collectors/idlejitter.plugin/Makefile.am create mode 100644 collectors/idlejitter.plugin/Makefile.in create mode 100644 collectors/idlejitter.plugin/README.md create mode 100644 collectors/idlejitter.plugin/plugin_idlejitter.c create mode 100644 collectors/idlejitter.plugin/plugin_idlejitter.h create mode 100644 collectors/macos.plugin/Makefile.am create mode 100644 collectors/macos.plugin/Makefile.in create mode 100644 collectors/macos.plugin/macos_fw.c create mode 100644 collectors/macos.plugin/macos_mach_smi.c create mode 100644 collectors/macos.plugin/macos_sysctl.c create mode 100644 collectors/macos.plugin/plugin_macos.c create mode 100644 collectors/macos.plugin/plugin_macos.h create mode 100644 collectors/nfacct.plugin/Makefile.am create mode 100644 collectors/nfacct.plugin/Makefile.in create mode 100644 collectors/nfacct.plugin/README.md create mode 100644 collectors/nfacct.plugin/plugin_nfacct.c create mode 100644 collectors/nfacct.plugin/plugin_nfacct.h create mode 100644 collectors/node.d.plugin/Makefile.am create mode 100644 collectors/node.d.plugin/Makefile.in create mode 100644 collectors/node.d.plugin/README.md create mode 100644 collectors/node.d.plugin/fronius/Makefile.inc create mode 100644 collectors/node.d.plugin/fronius/README.md create mode 100644 collectors/node.d.plugin/fronius/fronius.node.js create mode 100644 collectors/node.d.plugin/named/Makefile.inc create mode 100644 collectors/node.d.plugin/named/README.md create mode 100644 collectors/node.d.plugin/named/named.node.js create mode 100644 collectors/node.d.plugin/node.d.conf create mode 100644 collectors/node.d.plugin/node.d.plugin create mode 100755 collectors/node.d.plugin/node.d.plugin.in create mode 100644 collectors/node.d.plugin/node_modules/asn1-ber.js create mode 100644 collectors/node.d.plugin/node_modules/extend.js create mode 100644 collectors/node.d.plugin/node_modules/lib/ber/errors.js create mode 100644 collectors/node.d.plugin/node_modules/lib/ber/index.js create mode 100644 collectors/node.d.plugin/node_modules/lib/ber/reader.js create mode 100644 collectors/node.d.plugin/node_modules/lib/ber/types.js create mode 100644 collectors/node.d.plugin/node_modules/lib/ber/writer.js create mode 100644 collectors/node.d.plugin/node_modules/net-snmp.js create mode 100644 collectors/node.d.plugin/node_modules/netdata.js create mode 100644 collectors/node.d.plugin/node_modules/pixl-xml.js create mode 100644 collectors/node.d.plugin/sma_webbox/Makefile.inc create mode 100644 collectors/node.d.plugin/sma_webbox/README.md create mode 100644 collectors/node.d.plugin/sma_webbox/sma_webbox.node.js create mode 100644 collectors/node.d.plugin/snmp/Makefile.inc create mode 100644 collectors/node.d.plugin/snmp/README.md create mode 100644 collectors/node.d.plugin/snmp/snmp.node.js create mode 100644 collectors/node.d.plugin/stiebeleltron/Makefile.inc create mode 100644 collectors/node.d.plugin/stiebeleltron/README.md create mode 100644 collectors/node.d.plugin/stiebeleltron/stiebeleltron.node.js create mode 100644 collectors/plugins.d/Makefile.am create mode 100644 collectors/plugins.d/Makefile.in create mode 100644 collectors/plugins.d/README.md create mode 100644 collectors/plugins.d/plugins_d.c create mode 100644 collectors/plugins.d/plugins_d.h create mode 100644 collectors/proc.plugin/Makefile.am create mode 100644 collectors/proc.plugin/Makefile.in create mode 100644 collectors/proc.plugin/README.md create mode 100644 collectors/proc.plugin/ipc.c create mode 100644 collectors/proc.plugin/plugin_proc.c create mode 100644 collectors/proc.plugin/plugin_proc.h create mode 100644 collectors/proc.plugin/proc_diskstats.c create mode 100644 collectors/proc.plugin/proc_interrupts.c create mode 100644 collectors/proc.plugin/proc_loadavg.c create mode 100644 collectors/proc.plugin/proc_meminfo.c create mode 100644 collectors/proc.plugin/proc_net_dev.c create mode 100644 collectors/proc.plugin/proc_net_ip_vs_stats.c create mode 100644 collectors/proc.plugin/proc_net_netstat.c create mode 100644 collectors/proc.plugin/proc_net_rpc_nfs.c create mode 100644 collectors/proc.plugin/proc_net_rpc_nfsd.c create mode 100644 collectors/proc.plugin/proc_net_sctp_snmp.c create mode 100644 collectors/proc.plugin/proc_net_snmp.c create mode 100644 collectors/proc.plugin/proc_net_snmp6.c create mode 100644 collectors/proc.plugin/proc_net_sockstat.c create mode 100644 collectors/proc.plugin/proc_net_sockstat6.c create mode 100644 collectors/proc.plugin/proc_net_softnet_stat.c create mode 100644 collectors/proc.plugin/proc_net_stat_conntrack.c create mode 100644 collectors/proc.plugin/proc_net_stat_synproxy.c create mode 100644 collectors/proc.plugin/proc_self_mountinfo.c create mode 100644 collectors/proc.plugin/proc_self_mountinfo.h create mode 100644 collectors/proc.plugin/proc_softirqs.c create mode 100644 collectors/proc.plugin/proc_spl_kstat_zfs.c create mode 100644 collectors/proc.plugin/proc_stat.c create mode 100644 collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c create mode 100644 collectors/proc.plugin/proc_uptime.c create mode 100644 collectors/proc.plugin/proc_vmstat.c create mode 100644 collectors/proc.plugin/sys_devices_system_edac_mc.c create mode 100644 collectors/proc.plugin/sys_devices_system_node.c create mode 100644 collectors/proc.plugin/sys_fs_btrfs.c create mode 100644 collectors/proc.plugin/sys_kernel_mm_ksm.c create mode 100644 collectors/proc.plugin/zfs_common.c create mode 100644 collectors/proc.plugin/zfs_common.h create mode 100644 collectors/python.d.plugin/Makefile.am create mode 100644 collectors/python.d.plugin/Makefile.in create mode 100644 collectors/python.d.plugin/README.md create mode 100644 collectors/python.d.plugin/adaptec_raid/Makefile.inc create mode 100644 collectors/python.d.plugin/adaptec_raid/README.md create mode 100644 collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py create mode 100644 collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf create mode 100644 collectors/python.d.plugin/apache/Makefile.inc create mode 100644 collectors/python.d.plugin/apache/README.md create mode 100644 collectors/python.d.plugin/apache/apache.chart.py create mode 100644 collectors/python.d.plugin/apache/apache.conf create mode 100644 collectors/python.d.plugin/beanstalk/Makefile.inc create mode 100644 collectors/python.d.plugin/beanstalk/README.md create mode 100644 collectors/python.d.plugin/beanstalk/beanstalk.chart.py create mode 100644 collectors/python.d.plugin/beanstalk/beanstalk.conf create mode 100644 collectors/python.d.plugin/bind_rndc/Makefile.inc create mode 100644 collectors/python.d.plugin/bind_rndc/README.md create mode 100644 collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py create mode 100644 collectors/python.d.plugin/bind_rndc/bind_rndc.conf create mode 100644 collectors/python.d.plugin/boinc/Makefile.inc create mode 100644 collectors/python.d.plugin/boinc/README.md create mode 100644 collectors/python.d.plugin/boinc/boinc.chart.py create mode 100644 collectors/python.d.plugin/boinc/boinc.conf create mode 100644 collectors/python.d.plugin/ceph/Makefile.inc create mode 100644 collectors/python.d.plugin/ceph/README.md create mode 100644 collectors/python.d.plugin/ceph/ceph.chart.py create mode 100644 collectors/python.d.plugin/ceph/ceph.conf create mode 100644 collectors/python.d.plugin/chrony/Makefile.inc create mode 100644 collectors/python.d.plugin/chrony/README.md create mode 100644 collectors/python.d.plugin/chrony/chrony.chart.py create mode 100644 collectors/python.d.plugin/chrony/chrony.conf create mode 100644 collectors/python.d.plugin/couchdb/Makefile.inc create mode 100644 collectors/python.d.plugin/couchdb/README.md create mode 100644 collectors/python.d.plugin/couchdb/couchdb.chart.py create mode 100644 collectors/python.d.plugin/couchdb/couchdb.conf create mode 100644 collectors/python.d.plugin/cpufreq/Makefile.inc create mode 100644 collectors/python.d.plugin/cpufreq/README.md create mode 100644 collectors/python.d.plugin/cpufreq/cpufreq.chart.py create mode 100644 collectors/python.d.plugin/cpufreq/cpufreq.conf create mode 100644 collectors/python.d.plugin/cpuidle/Makefile.inc create mode 100644 collectors/python.d.plugin/cpuidle/README.md create mode 100644 collectors/python.d.plugin/cpuidle/cpuidle.chart.py create mode 100644 collectors/python.d.plugin/cpuidle/cpuidle.conf create mode 100644 collectors/python.d.plugin/dns_query_time/Makefile.inc create mode 100644 collectors/python.d.plugin/dns_query_time/README.md create mode 100644 collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py create mode 100644 collectors/python.d.plugin/dns_query_time/dns_query_time.conf create mode 100644 collectors/python.d.plugin/dnsdist/Makefile.inc create mode 100644 collectors/python.d.plugin/dnsdist/README.md create mode 100644 collectors/python.d.plugin/dnsdist/dnsdist.chart.py create mode 100644 collectors/python.d.plugin/dnsdist/dnsdist.conf create mode 100644 collectors/python.d.plugin/dockerd/Makefile.inc create mode 100644 collectors/python.d.plugin/dockerd/README.md create mode 100644 collectors/python.d.plugin/dockerd/dockerd.chart.py create mode 100644 collectors/python.d.plugin/dockerd/dockerd.conf create mode 100644 collectors/python.d.plugin/dovecot/Makefile.inc create mode 100644 collectors/python.d.plugin/dovecot/README.md create mode 100644 collectors/python.d.plugin/dovecot/dovecot.chart.py create mode 100644 collectors/python.d.plugin/dovecot/dovecot.conf create mode 100644 collectors/python.d.plugin/elasticsearch/Makefile.inc create mode 100644 collectors/python.d.plugin/elasticsearch/README.md create mode 100644 collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py create mode 100644 collectors/python.d.plugin/elasticsearch/elasticsearch.conf create mode 100644 collectors/python.d.plugin/example/Makefile.inc create mode 100644 collectors/python.d.plugin/example/README.md create mode 100644 collectors/python.d.plugin/example/example.chart.py create mode 100644 collectors/python.d.plugin/example/example.conf create mode 100644 collectors/python.d.plugin/exim/Makefile.inc create mode 100644 collectors/python.d.plugin/exim/README.md create mode 100644 collectors/python.d.plugin/exim/exim.chart.py create mode 100644 collectors/python.d.plugin/exim/exim.conf create mode 100644 collectors/python.d.plugin/fail2ban/Makefile.inc create mode 100644 collectors/python.d.plugin/fail2ban/README.md create mode 100644 collectors/python.d.plugin/fail2ban/fail2ban.chart.py create mode 100644 collectors/python.d.plugin/fail2ban/fail2ban.conf create mode 100644 collectors/python.d.plugin/freeradius/Makefile.inc create mode 100644 collectors/python.d.plugin/freeradius/README.md create mode 100644 collectors/python.d.plugin/freeradius/freeradius.chart.py create mode 100644 collectors/python.d.plugin/freeradius/freeradius.conf create mode 100644 collectors/python.d.plugin/go_expvar/Makefile.inc create mode 100644 collectors/python.d.plugin/go_expvar/README.md create mode 100644 collectors/python.d.plugin/go_expvar/go_expvar.chart.py create mode 100644 collectors/python.d.plugin/go_expvar/go_expvar.conf create mode 100644 collectors/python.d.plugin/haproxy/Makefile.inc create mode 100644 collectors/python.d.plugin/haproxy/README.md create mode 100644 collectors/python.d.plugin/haproxy/haproxy.chart.py create mode 100644 collectors/python.d.plugin/haproxy/haproxy.conf create mode 100644 collectors/python.d.plugin/hddtemp/Makefile.inc create mode 100644 collectors/python.d.plugin/hddtemp/README.md create mode 100644 collectors/python.d.plugin/hddtemp/hddtemp.chart.py create mode 100644 collectors/python.d.plugin/hddtemp/hddtemp.conf create mode 100644 collectors/python.d.plugin/httpcheck/Makefile.inc create mode 100644 collectors/python.d.plugin/httpcheck/README.md create mode 100644 collectors/python.d.plugin/httpcheck/httpcheck.chart.py create mode 100644 collectors/python.d.plugin/httpcheck/httpcheck.conf create mode 100644 collectors/python.d.plugin/icecast/Makefile.inc create mode 100644 collectors/python.d.plugin/icecast/README.md create mode 100644 collectors/python.d.plugin/icecast/icecast.chart.py create mode 100644 collectors/python.d.plugin/icecast/icecast.conf create mode 100644 collectors/python.d.plugin/ipfs/Makefile.inc create mode 100644 collectors/python.d.plugin/ipfs/README.md create mode 100644 collectors/python.d.plugin/ipfs/ipfs.chart.py create mode 100644 collectors/python.d.plugin/ipfs/ipfs.conf create mode 100644 collectors/python.d.plugin/isc_dhcpd/Makefile.inc create mode 100644 collectors/python.d.plugin/isc_dhcpd/README.md create mode 100644 collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py create mode 100644 collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf create mode 100644 collectors/python.d.plugin/linux_power_supply/Makefile.inc create mode 100644 collectors/python.d.plugin/linux_power_supply/README.md create mode 100644 collectors/python.d.plugin/linux_power_supply/linux_power_supply.chart.py create mode 100644 collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf create mode 100644 collectors/python.d.plugin/litespeed/Makefile.inc create mode 100644 collectors/python.d.plugin/litespeed/README.md create mode 100644 collectors/python.d.plugin/litespeed/litespeed.chart.py create mode 100644 collectors/python.d.plugin/litespeed/litespeed.conf create mode 100644 collectors/python.d.plugin/logind/Makefile.inc create mode 100644 collectors/python.d.plugin/logind/README.md create mode 100644 collectors/python.d.plugin/logind/logind.chart.py create mode 100644 collectors/python.d.plugin/logind/logind.conf create mode 100644 collectors/python.d.plugin/mdstat/Makefile.inc create mode 100644 collectors/python.d.plugin/mdstat/README.md create mode 100644 collectors/python.d.plugin/mdstat/mdstat.chart.py create mode 100644 collectors/python.d.plugin/mdstat/mdstat.conf create mode 100644 collectors/python.d.plugin/megacli/Makefile.inc create mode 100644 collectors/python.d.plugin/megacli/README.md create mode 100644 collectors/python.d.plugin/megacli/megacli.chart.py create mode 100644 collectors/python.d.plugin/megacli/megacli.conf create mode 100644 collectors/python.d.plugin/memcached/Makefile.inc create mode 100644 collectors/python.d.plugin/memcached/README.md create mode 100644 collectors/python.d.plugin/memcached/memcached.chart.py create mode 100644 collectors/python.d.plugin/memcached/memcached.conf create mode 100644 collectors/python.d.plugin/mongodb/Makefile.inc create mode 100644 collectors/python.d.plugin/mongodb/README.md create mode 100644 collectors/python.d.plugin/mongodb/mongodb.chart.py create mode 100644 collectors/python.d.plugin/mongodb/mongodb.conf create mode 100644 collectors/python.d.plugin/monit/Makefile.inc create mode 100644 collectors/python.d.plugin/monit/README.md create mode 100644 collectors/python.d.plugin/monit/monit.chart.py create mode 100644 collectors/python.d.plugin/monit/monit.conf create mode 100644 collectors/python.d.plugin/mysql/Makefile.inc create mode 100644 collectors/python.d.plugin/mysql/README.md create mode 100644 collectors/python.d.plugin/mysql/mysql.chart.py create mode 100644 collectors/python.d.plugin/mysql/mysql.conf create mode 100644 collectors/python.d.plugin/nginx/Makefile.inc create mode 100644 collectors/python.d.plugin/nginx/README.md create mode 100644 collectors/python.d.plugin/nginx/nginx.chart.py create mode 100644 collectors/python.d.plugin/nginx/nginx.conf create mode 100644 collectors/python.d.plugin/nginx_plus/Makefile.inc create mode 100644 collectors/python.d.plugin/nginx_plus/README.md create mode 100644 collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py create mode 100644 collectors/python.d.plugin/nginx_plus/nginx_plus.conf create mode 100644 collectors/python.d.plugin/nsd/Makefile.inc create mode 100644 collectors/python.d.plugin/nsd/README.md create mode 100644 collectors/python.d.plugin/nsd/nsd.chart.py create mode 100644 collectors/python.d.plugin/nsd/nsd.conf create mode 100644 collectors/python.d.plugin/ntpd/Makefile.inc create mode 100644 collectors/python.d.plugin/ntpd/README.md create mode 100644 collectors/python.d.plugin/ntpd/ntpd.chart.py create mode 100644 collectors/python.d.plugin/ntpd/ntpd.conf create mode 100644 collectors/python.d.plugin/ovpn_status_log/Makefile.inc create mode 100644 collectors/python.d.plugin/ovpn_status_log/README.md create mode 100644 collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py create mode 100644 collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf create mode 100644 collectors/python.d.plugin/phpfpm/Makefile.inc create mode 100644 collectors/python.d.plugin/phpfpm/README.md create mode 100644 collectors/python.d.plugin/phpfpm/phpfpm.chart.py create mode 100644 collectors/python.d.plugin/phpfpm/phpfpm.conf create mode 100644 collectors/python.d.plugin/portcheck/Makefile.inc create mode 100644 collectors/python.d.plugin/portcheck/README.md create mode 100644 collectors/python.d.plugin/portcheck/portcheck.chart.py create mode 100644 collectors/python.d.plugin/portcheck/portcheck.conf create mode 100644 collectors/python.d.plugin/postfix/Makefile.inc create mode 100644 collectors/python.d.plugin/postfix/README.md create mode 100644 collectors/python.d.plugin/postfix/postfix.chart.py create mode 100644 collectors/python.d.plugin/postfix/postfix.conf create mode 100644 collectors/python.d.plugin/postgres/Makefile.inc create mode 100644 collectors/python.d.plugin/postgres/README.md create mode 100644 collectors/python.d.plugin/postgres/postgres.chart.py create mode 100644 collectors/python.d.plugin/postgres/postgres.conf create mode 100644 collectors/python.d.plugin/powerdns/Makefile.inc create mode 100644 collectors/python.d.plugin/powerdns/README.md create mode 100644 collectors/python.d.plugin/powerdns/powerdns.chart.py create mode 100644 collectors/python.d.plugin/powerdns/powerdns.conf create mode 100644 collectors/python.d.plugin/proxysql/Makefile.inc create mode 100644 collectors/python.d.plugin/proxysql/README.md create mode 100644 collectors/python.d.plugin/proxysql/proxysql.chart.py create mode 100644 collectors/python.d.plugin/proxysql/proxysql.conf create mode 100644 collectors/python.d.plugin/puppet/Makefile.inc create mode 100644 collectors/python.d.plugin/puppet/README.md create mode 100644 collectors/python.d.plugin/puppet/puppet.chart.py create mode 100644 collectors/python.d.plugin/puppet/puppet.conf create mode 100644 collectors/python.d.plugin/python.d.conf create mode 100644 collectors/python.d.plugin/python.d.plugin create mode 100755 collectors/python.d.plugin/python.d.plugin.in create mode 100644 collectors/python.d.plugin/python_modules/__init__.py create mode 100644 collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py create mode 100644 collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py create mode 100644 collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py create mode 100644 collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py create mode 100644 collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py create mode 100644 collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py create mode 100644 collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py create mode 100644 collectors/python.d.plugin/python_modules/bases/__init__.py create mode 100644 collectors/python.d.plugin/python_modules/bases/charts.py create mode 100644 collectors/python.d.plugin/python_modules/bases/collection.py create mode 100644 collectors/python.d.plugin/python_modules/bases/loaders.py create mode 100644 collectors/python.d.plugin/python_modules/bases/loggers.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml2/__init__.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml2/composer.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml2/constructor.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml2/dumper.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml2/emitter.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml2/error.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml2/events.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml2/loader.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml2/nodes.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml2/parser.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml2/reader.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml2/representer.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml2/resolver.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml2/scanner.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml2/serializer.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml2/tokens.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml3/__init__.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml3/composer.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml3/constructor.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml3/dumper.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml3/emitter.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml3/error.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml3/events.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml3/loader.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml3/nodes.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml3/parser.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml3/reader.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml3/representer.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml3/resolver.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml3/scanner.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml3/serializer.py create mode 100644 collectors/python.d.plugin/python_modules/pyyaml3/tokens.py create mode 100644 collectors/python.d.plugin/python_modules/third_party/__init__.py create mode 100644 collectors/python.d.plugin/python_modules/third_party/boinc_client.py create mode 100644 collectors/python.d.plugin/python_modules/third_party/lm_sensors.py create mode 100644 collectors/python.d.plugin/python_modules/third_party/mcrcon.py create mode 100644 collectors/python.d.plugin/python_modules/third_party/monotonic.py create mode 100644 collectors/python.d.plugin/python_modules/third_party/ordereddict.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/__init__.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/_collections.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/connection.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/connectionpool.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/exceptions.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/fields.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/filepost.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/packages/six.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/poolmanager.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/request.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/response.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/util/__init__.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/util/connection.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/util/request.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/util/response.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/util/retry.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/util/selectors.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/util/timeout.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/util/url.py create mode 100644 collectors/python.d.plugin/python_modules/urllib3/util/wait.py create mode 100644 collectors/python.d.plugin/rabbitmq/Makefile.inc create mode 100644 collectors/python.d.plugin/rabbitmq/README.md create mode 100644 collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py create mode 100644 collectors/python.d.plugin/rabbitmq/rabbitmq.conf create mode 100644 collectors/python.d.plugin/redis/Makefile.inc create mode 100644 collectors/python.d.plugin/redis/README.md create mode 100644 collectors/python.d.plugin/redis/redis.chart.py create mode 100644 collectors/python.d.plugin/redis/redis.conf create mode 100644 collectors/python.d.plugin/rethinkdbs/Makefile.inc create mode 100644 collectors/python.d.plugin/rethinkdbs/README.md create mode 100644 collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py create mode 100644 collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf create mode 100644 collectors/python.d.plugin/retroshare/Makefile.inc create mode 100644 collectors/python.d.plugin/retroshare/README.md create mode 100644 collectors/python.d.plugin/retroshare/retroshare.chart.py create mode 100644 collectors/python.d.plugin/retroshare/retroshare.conf create mode 100644 collectors/python.d.plugin/samba/Makefile.inc create mode 100644 collectors/python.d.plugin/samba/README.md create mode 100644 collectors/python.d.plugin/samba/samba.chart.py create mode 100644 collectors/python.d.plugin/samba/samba.conf create mode 100644 collectors/python.d.plugin/sensors/Makefile.inc create mode 100644 collectors/python.d.plugin/sensors/README.md create mode 100644 collectors/python.d.plugin/sensors/sensors.chart.py create mode 100644 collectors/python.d.plugin/sensors/sensors.conf create mode 100644 collectors/python.d.plugin/smartd_log/Makefile.inc create mode 100644 collectors/python.d.plugin/smartd_log/README.md create mode 100644 collectors/python.d.plugin/smartd_log/smartd_log.chart.py create mode 100644 collectors/python.d.plugin/smartd_log/smartd_log.conf create mode 100644 collectors/python.d.plugin/spigotmc/Makefile.inc create mode 100644 collectors/python.d.plugin/spigotmc/README.md create mode 100644 collectors/python.d.plugin/spigotmc/spigotmc.chart.py create mode 100644 collectors/python.d.plugin/spigotmc/spigotmc.conf create mode 100644 collectors/python.d.plugin/springboot/Makefile.inc create mode 100644 collectors/python.d.plugin/springboot/README.md create mode 100644 collectors/python.d.plugin/springboot/springboot.chart.py create mode 100644 collectors/python.d.plugin/springboot/springboot.conf create mode 100644 collectors/python.d.plugin/squid/Makefile.inc create mode 100644 collectors/python.d.plugin/squid/README.md create mode 100644 collectors/python.d.plugin/squid/squid.chart.py create mode 100644 collectors/python.d.plugin/squid/squid.conf create mode 100644 collectors/python.d.plugin/tomcat/Makefile.inc create mode 100644 collectors/python.d.plugin/tomcat/README.md create mode 100644 collectors/python.d.plugin/tomcat/tomcat.chart.py create mode 100644 collectors/python.d.plugin/tomcat/tomcat.conf create mode 100644 collectors/python.d.plugin/traefik/Makefile.inc create mode 100644 collectors/python.d.plugin/traefik/README.md create mode 100644 collectors/python.d.plugin/traefik/traefik.chart.py create mode 100644 collectors/python.d.plugin/traefik/traefik.conf create mode 100644 collectors/python.d.plugin/unbound/Makefile.inc create mode 100644 collectors/python.d.plugin/unbound/README.md create mode 100644 collectors/python.d.plugin/unbound/unbound.chart.py create mode 100644 collectors/python.d.plugin/unbound/unbound.conf create mode 100644 collectors/python.d.plugin/uwsgi/Makefile.inc create mode 100644 collectors/python.d.plugin/uwsgi/README.md create mode 100644 collectors/python.d.plugin/uwsgi/uwsgi.chart.py create mode 100644 collectors/python.d.plugin/uwsgi/uwsgi.conf create mode 100644 collectors/python.d.plugin/varnish/Makefile.inc create mode 100644 collectors/python.d.plugin/varnish/README.md create mode 100644 collectors/python.d.plugin/varnish/varnish.chart.py create mode 100644 collectors/python.d.plugin/varnish/varnish.conf create mode 100644 collectors/python.d.plugin/w1sensor/Makefile.inc create mode 100644 collectors/python.d.plugin/w1sensor/README.md create mode 100644 collectors/python.d.plugin/w1sensor/w1sensor.chart.py create mode 100644 collectors/python.d.plugin/w1sensor/w1sensor.conf create mode 100644 collectors/python.d.plugin/web_log/Makefile.inc create mode 100644 collectors/python.d.plugin/web_log/README.md create mode 100644 collectors/python.d.plugin/web_log/web_log.chart.py create mode 100644 collectors/python.d.plugin/web_log/web_log.conf create mode 100644 collectors/statsd.plugin/Makefile.am create mode 100644 collectors/statsd.plugin/Makefile.in create mode 100644 collectors/statsd.plugin/README.md create mode 100644 collectors/statsd.plugin/example.conf create mode 100644 collectors/statsd.plugin/statsd.c create mode 100644 collectors/statsd.plugin/statsd.h create mode 100644 collectors/tc.plugin/Makefile.am create mode 100644 collectors/tc.plugin/Makefile.in create mode 100644 collectors/tc.plugin/README.md create mode 100644 collectors/tc.plugin/plugin_tc.c create mode 100644 collectors/tc.plugin/plugin_tc.h create mode 100644 collectors/tc.plugin/tc-qos-helper.sh create mode 100755 collectors/tc.plugin/tc-qos-helper.sh.in delete mode 100644 conf.d/Makefile.am delete mode 100644 conf.d/Makefile.in delete mode 100644 conf.d/apps_groups.conf delete mode 100644 conf.d/charts.d.conf delete mode 100644 conf.d/charts.d/ap.conf delete mode 100644 conf.d/charts.d/apache.conf delete mode 100644 conf.d/charts.d/apcupsd.conf delete mode 100644 conf.d/charts.d/cpu_apps.conf delete mode 100644 conf.d/charts.d/cpufreq.conf delete mode 100644 conf.d/charts.d/example.conf delete mode 100644 conf.d/charts.d/exim.conf delete mode 100644 conf.d/charts.d/hddtemp.conf delete mode 100644 conf.d/charts.d/libreswan.conf delete mode 100644 conf.d/charts.d/load_average.conf delete mode 100644 conf.d/charts.d/mem_apps.conf delete mode 100644 conf.d/charts.d/mysql.conf delete mode 100644 conf.d/charts.d/nginx.conf delete mode 100644 conf.d/charts.d/nut.conf delete mode 100644 conf.d/charts.d/opensips.conf delete mode 100644 conf.d/charts.d/phpfpm.conf delete mode 100644 conf.d/charts.d/postfix.conf delete mode 100644 conf.d/charts.d/sensors.conf delete mode 100644 conf.d/charts.d/squid.conf delete mode 100644 conf.d/charts.d/tomcat.conf delete mode 100644 conf.d/fping.conf delete mode 100644 conf.d/health.d/apache.conf delete mode 100644 conf.d/health.d/backend.conf delete mode 100644 conf.d/health.d/beanstalkd.conf delete mode 100644 conf.d/health.d/bind_rndc.conf delete mode 100644 conf.d/health.d/btrfs.conf delete mode 100644 conf.d/health.d/ceph.conf delete mode 100644 conf.d/health.d/couchdb.conf delete mode 100644 conf.d/health.d/cpu.conf delete mode 100644 conf.d/health.d/disks.conf delete mode 100644 conf.d/health.d/elasticsearch.conf delete mode 100644 conf.d/health.d/entropy.conf delete mode 100644 conf.d/health.d/fping.conf delete mode 100644 conf.d/health.d/fronius.conf delete mode 100644 conf.d/health.d/haproxy.conf delete mode 100644 conf.d/health.d/httpcheck.conf delete mode 100644 conf.d/health.d/ipc.conf delete mode 100644 conf.d/health.d/ipfs.conf delete mode 100644 conf.d/health.d/ipmi.conf delete mode 100644 conf.d/health.d/isc_dhcpd.conf delete mode 100644 conf.d/health.d/lighttpd.conf delete mode 100644 conf.d/health.d/mdstat.conf delete mode 100644 conf.d/health.d/memcached.conf delete mode 100644 conf.d/health.d/memory.conf delete mode 100644 conf.d/health.d/mongodb.conf delete mode 100644 conf.d/health.d/mysql.conf delete mode 100644 conf.d/health.d/named.conf delete mode 100644 conf.d/health.d/net.conf delete mode 100644 conf.d/health.d/netfilter.conf delete mode 100644 conf.d/health.d/nginx.conf delete mode 100644 conf.d/health.d/nginx_plus.conf delete mode 100644 conf.d/health.d/portcheck.conf delete mode 100644 conf.d/health.d/postgres.conf delete mode 100644 conf.d/health.d/qos.conf delete mode 100644 conf.d/health.d/ram.conf delete mode 100644 conf.d/health.d/redis.conf delete mode 100644 conf.d/health.d/retroshare.conf delete mode 100644 conf.d/health.d/softnet.conf delete mode 100644 conf.d/health.d/squid.conf delete mode 100644 conf.d/health.d/stiebeleltron.conf delete mode 100644 conf.d/health.d/swap.conf delete mode 100644 conf.d/health.d/tcp_conn.conf delete mode 100644 conf.d/health.d/tcp_listen.conf delete mode 100644 conf.d/health.d/tcp_mem.conf delete mode 100644 conf.d/health.d/tcp_orphans.conf delete mode 100644 conf.d/health.d/tcp_resets.conf delete mode 100644 conf.d/health.d/udp_errors.conf delete mode 100644 conf.d/health.d/varnish.conf delete mode 100644 conf.d/health.d/web_log.conf delete mode 100644 conf.d/health.d/zfs.conf delete mode 100755 conf.d/health_alarm_notify.conf delete mode 100644 conf.d/health_email_recipients.conf delete mode 100644 conf.d/node.d.conf delete mode 100644 conf.d/node.d/README.md delete mode 100644 conf.d/node.d/fronius.conf.md delete mode 100644 conf.d/node.d/named.conf.md delete mode 100644 conf.d/node.d/sma_webbox.conf.md delete mode 100644 conf.d/node.d/snmp.conf.md delete mode 100644 conf.d/node.d/stiebeleltron.conf.md delete mode 100644 conf.d/python.d.conf delete mode 100644 conf.d/python.d/apache.conf delete mode 100644 conf.d/python.d/beanstalk.conf delete mode 100644 conf.d/python.d/bind_rndc.conf delete mode 100644 conf.d/python.d/ceph.conf delete mode 100644 conf.d/python.d/chrony.conf delete mode 100644 conf.d/python.d/couchdb.conf delete mode 100644 conf.d/python.d/cpufreq.conf delete mode 100644 conf.d/python.d/dns_query_time.conf delete mode 100644 conf.d/python.d/dnsdist.conf delete mode 100644 conf.d/python.d/dovecot.conf delete mode 100644 conf.d/python.d/elasticsearch.conf delete mode 100644 conf.d/python.d/example.conf delete mode 100644 conf.d/python.d/exim.conf delete mode 100644 conf.d/python.d/fail2ban.conf delete mode 100644 conf.d/python.d/freeradius.conf delete mode 100644 conf.d/python.d/go_expvar.conf delete mode 100644 conf.d/python.d/haproxy.conf delete mode 100644 conf.d/python.d/hddtemp.conf delete mode 100644 conf.d/python.d/httpcheck.conf delete mode 100644 conf.d/python.d/icecast.conf delete mode 100644 conf.d/python.d/ipfs.conf delete mode 100644 conf.d/python.d/isc_dhcpd.conf delete mode 100644 conf.d/python.d/mdstat.conf delete mode 100644 conf.d/python.d/memcached.conf delete mode 100644 conf.d/python.d/mongodb.conf delete mode 100644 conf.d/python.d/mysql.conf delete mode 100644 conf.d/python.d/nginx.conf delete mode 100644 conf.d/python.d/nginx_plus.conf delete mode 100644 conf.d/python.d/nsd.conf delete mode 100644 conf.d/python.d/ntpd.conf delete mode 100644 conf.d/python.d/ovpn_status_log.conf delete mode 100644 conf.d/python.d/phpfpm.conf delete mode 100644 conf.d/python.d/portcheck.conf delete mode 100644 conf.d/python.d/postfix.conf delete mode 100644 conf.d/python.d/postgres.conf delete mode 100644 conf.d/python.d/powerdns.conf delete mode 100644 conf.d/python.d/rabbitmq.conf delete mode 100644 conf.d/python.d/redis.conf delete mode 100644 conf.d/python.d/retroshare.conf delete mode 100644 conf.d/python.d/samba.conf delete mode 100644 conf.d/python.d/sensors.conf delete mode 100644 conf.d/python.d/smartd_log.conf delete mode 100644 conf.d/python.d/springboot.conf delete mode 100644 conf.d/python.d/squid.conf delete mode 100644 conf.d/python.d/tomcat.conf delete mode 100644 conf.d/python.d/traefik.conf delete mode 100644 conf.d/python.d/varnish.conf delete mode 100644 conf.d/python.d/web_log.conf delete mode 100644 conf.d/statsd.d/example.conf delete mode 100644 conf.d/stream.conf create mode 100644 contrib/debian/changelog create mode 100644 contrib/debian/compat create mode 100644 contrib/debian/control create mode 100644 contrib/debian/control.wheezy create mode 100644 contrib/debian/copyright create mode 100644 contrib/debian/netdata.conf create mode 100644 contrib/debian/netdata.default create mode 100644 contrib/debian/netdata.docs create mode 100755 contrib/debian/netdata.init create mode 100644 contrib/debian/netdata.install create mode 100644 contrib/debian/netdata.lintian-overrides create mode 100644 contrib/debian/netdata.postinst.in create mode 100644 contrib/debian/netdata.postrm create mode 100644 contrib/debian/netdata.service create mode 100755 contrib/debian/rules create mode 100644 contrib/debian/source/format delete mode 100755 contrib/nc-backend.sh create mode 100644 daemon/Makefile.am create mode 100644 daemon/Makefile.in create mode 100644 daemon/README.md create mode 100644 daemon/common.c create mode 100644 daemon/common.h create mode 100644 daemon/daemon.c create mode 100644 daemon/daemon.h create mode 100644 daemon/global_statistics.c create mode 100644 daemon/global_statistics.h create mode 100644 daemon/main.c create mode 100644 daemon/main.h create mode 100644 daemon/signals.c create mode 100644 daemon/signals.h create mode 100644 daemon/unit_test.c create mode 100644 daemon/unit_test.h create mode 100644 database/Makefile.am create mode 100644 database/Makefile.in create mode 100644 database/README.md create mode 100644 database/rrd.c create mode 100644 database/rrd.h create mode 100644 database/rrdcalc.c create mode 100644 database/rrdcalc.h create mode 100644 database/rrdcalctemplate.c create mode 100644 database/rrdcalctemplate.h create mode 100644 database/rrddim.c create mode 100644 database/rrddimvar.c create mode 100644 database/rrddimvar.h create mode 100644 database/rrdfamily.c create mode 100644 database/rrdhost.c create mode 100644 database/rrdset.c create mode 100644 database/rrdsetvar.c create mode 100644 database/rrdsetvar.h create mode 100644 database/rrdvar.c create mode 100644 database/rrdvar.h delete mode 100644 docker-build.sh create mode 100644 docker/Dockerfile create mode 100755 docker/build.sh create mode 100644 docker/run.sh create mode 100644 health/Makefile.am create mode 100644 health/Makefile.in create mode 100644 health/README.md create mode 100644 health/health.c create mode 100644 health/health.d/adaptec_raid.conf create mode 100644 health/health.d/apache.conf create mode 100644 health/health.d/apcupsd.conf create mode 100644 health/health.d/backend.conf create mode 100644 health/health.d/bcache.conf create mode 100644 health/health.d/beanstalkd.conf create mode 100644 health/health.d/bind_rndc.conf create mode 100644 health/health.d/boinc.conf create mode 100644 health/health.d/btrfs.conf create mode 100644 health/health.d/ceph.conf create mode 100644 health/health.d/couchdb.conf create mode 100644 health/health.d/cpu.conf create mode 100644 health/health.d/disks.conf create mode 100644 health/health.d/dockerd.conf create mode 100644 health/health.d/elasticsearch.conf create mode 100644 health/health.d/entropy.conf create mode 100644 health/health.d/fping.conf create mode 100644 health/health.d/fronius.conf create mode 100644 health/health.d/haproxy.conf create mode 100644 health/health.d/httpcheck.conf create mode 100644 health/health.d/ipc.conf create mode 100644 health/health.d/ipfs.conf create mode 100644 health/health.d/ipmi.conf create mode 100644 health/health.d/isc_dhcpd.conf create mode 100644 health/health.d/lighttpd.conf create mode 100644 health/health.d/linux_power_supply.conf create mode 100644 health/health.d/load.conf create mode 100644 health/health.d/mdstat.conf create mode 100644 health/health.d/megacli.conf create mode 100644 health/health.d/memcached.conf create mode 100644 health/health.d/memory.conf create mode 100644 health/health.d/mongodb.conf create mode 100644 health/health.d/mysql.conf create mode 100644 health/health.d/named.conf create mode 100644 health/health.d/net.conf create mode 100644 health/health.d/netfilter.conf create mode 100644 health/health.d/nginx.conf create mode 100644 health/health.d/nginx_plus.conf create mode 100644 health/health.d/portcheck.conf create mode 100644 health/health.d/postgres.conf create mode 100644 health/health.d/qos.conf create mode 100644 health/health.d/ram.conf create mode 100644 health/health.d/redis.conf create mode 100644 health/health.d/retroshare.conf create mode 100644 health/health.d/softnet.conf create mode 100644 health/health.d/squid.conf create mode 100644 health/health.d/stiebeleltron.conf create mode 100644 health/health.d/swap.conf create mode 100644 health/health.d/tcp_conn.conf create mode 100644 health/health.d/tcp_listen.conf create mode 100644 health/health.d/tcp_mem.conf create mode 100644 health/health.d/tcp_orphans.conf create mode 100644 health/health.d/tcp_resets.conf create mode 100644 health/health.d/udp_errors.conf create mode 100644 health/health.d/varnish.conf create mode 100644 health/health.d/web_log.conf create mode 100644 health/health.d/zfs.conf create mode 100644 health/health.h create mode 100644 health/health_config.c create mode 100644 health/health_json.c create mode 100644 health/health_log.c create mode 100644 health/notifications/Makefile.am create mode 100644 health/notifications/Makefile.in create mode 100644 health/notifications/README.md create mode 100755 health/notifications/alarm-email.sh create mode 100644 health/notifications/alarm-notify.sh create mode 100755 health/notifications/alarm-notify.sh.in create mode 100755 health/notifications/alarm-test.sh create mode 100644 health/notifications/alerta/Makefile.inc create mode 100644 health/notifications/alerta/README.md create mode 100644 health/notifications/awssns/Makefile.inc create mode 100644 health/notifications/awssns/README.md create mode 100644 health/notifications/discord/Makefile.inc create mode 100644 health/notifications/discord/README.md create mode 100644 health/notifications/email/Makefile.inc create mode 100644 health/notifications/email/README.md create mode 100644 health/notifications/flock/Makefile.inc create mode 100644 health/notifications/flock/README.md create mode 100755 health/notifications/health_alarm_notify.conf create mode 100644 health/notifications/health_email_recipients.conf create mode 100644 health/notifications/irc/Makefile.inc create mode 100644 health/notifications/irc/README.md create mode 100644 health/notifications/kavenegar/Makefile.inc create mode 100644 health/notifications/kavenegar/README.md create mode 100644 health/notifications/messagebird/Makefile.inc create mode 100644 health/notifications/messagebird/README.md create mode 100644 health/notifications/pagerduty/Makefile.inc create mode 100644 health/notifications/pagerduty/README.md create mode 100644 health/notifications/pushbullet/Makefile.inc create mode 100644 health/notifications/pushbullet/README.md create mode 100644 health/notifications/pushover/Makefile.inc create mode 100644 health/notifications/pushover/README.md create mode 100644 health/notifications/rocketchat/Makefile.inc create mode 100644 health/notifications/rocketchat/README.md create mode 100644 health/notifications/slack/Makefile.inc create mode 100644 health/notifications/slack/README.md create mode 100644 health/notifications/syslog/Makefile.inc create mode 100644 health/notifications/syslog/README.md create mode 100644 health/notifications/telegram/Makefile.inc create mode 100644 health/notifications/telegram/README.md create mode 100644 health/notifications/twilio/Makefile.inc create mode 100644 health/notifications/twilio/README.md create mode 100644 health/notifications/web/Makefile.inc create mode 100644 health/notifications/web/README.md create mode 100644 installer/.keep create mode 100644 libnetdata/Makefile.am create mode 100644 libnetdata/Makefile.in create mode 100644 libnetdata/README.md create mode 100644 libnetdata/adaptive_resortable_list/Makefile.am create mode 100644 libnetdata/adaptive_resortable_list/Makefile.in create mode 100644 libnetdata/adaptive_resortable_list/README.md create mode 100644 libnetdata/adaptive_resortable_list/adaptive_resortable_list.c create mode 100644 libnetdata/adaptive_resortable_list/adaptive_resortable_list.h create mode 100644 libnetdata/avl/Makefile.am create mode 100644 libnetdata/avl/Makefile.in create mode 100644 libnetdata/avl/README.md create mode 100644 libnetdata/avl/avl.c create mode 100644 libnetdata/avl/avl.h create mode 100644 libnetdata/buffer/Makefile.am create mode 100644 libnetdata/buffer/Makefile.in create mode 100644 libnetdata/buffer/README.md create mode 100644 libnetdata/buffer/buffer.c create mode 100644 libnetdata/buffer/buffer.h create mode 100644 libnetdata/clocks/Makefile.am create mode 100644 libnetdata/clocks/Makefile.in create mode 100644 libnetdata/clocks/README.md create mode 100644 libnetdata/clocks/clocks.c create mode 100644 libnetdata/clocks/clocks.h create mode 100644 libnetdata/config/Makefile.am create mode 100644 libnetdata/config/Makefile.in create mode 100644 libnetdata/config/README.md create mode 100644 libnetdata/config/appconfig.c create mode 100644 libnetdata/config/appconfig.h create mode 100644 libnetdata/dictionary/Makefile.am create mode 100644 libnetdata/dictionary/Makefile.in create mode 100644 libnetdata/dictionary/README.md create mode 100644 libnetdata/dictionary/dictionary.c create mode 100644 libnetdata/dictionary/dictionary.h create mode 100644 libnetdata/eval/Makefile.am create mode 100644 libnetdata/eval/Makefile.in create mode 100644 libnetdata/eval/README.md create mode 100644 libnetdata/eval/eval.c create mode 100644 libnetdata/eval/eval.h create mode 100644 libnetdata/inlined.h create mode 100644 libnetdata/libnetdata.c create mode 100644 libnetdata/libnetdata.h create mode 100644 libnetdata/locks/Makefile.am create mode 100644 libnetdata/locks/Makefile.in create mode 100644 libnetdata/locks/README.md create mode 100644 libnetdata/locks/locks.c create mode 100644 libnetdata/locks/locks.h create mode 100644 libnetdata/log/Makefile.am create mode 100644 libnetdata/log/Makefile.in create mode 100644 libnetdata/log/README.md create mode 100644 libnetdata/log/log.c create mode 100644 libnetdata/log/log.h create mode 100644 libnetdata/os.c create mode 100644 libnetdata/os.h create mode 100644 libnetdata/popen/Makefile.am create mode 100644 libnetdata/popen/Makefile.in create mode 100644 libnetdata/popen/README.md create mode 100644 libnetdata/popen/popen.c create mode 100644 libnetdata/popen/popen.h create mode 100644 libnetdata/procfile/Makefile.am create mode 100644 libnetdata/procfile/Makefile.in create mode 100644 libnetdata/procfile/README.md create mode 100644 libnetdata/procfile/procfile.c create mode 100644 libnetdata/procfile/procfile.h create mode 100644 libnetdata/simple_pattern/Makefile.am create mode 100644 libnetdata/simple_pattern/Makefile.in create mode 100644 libnetdata/simple_pattern/README.md create mode 100644 libnetdata/simple_pattern/simple_pattern.c create mode 100644 libnetdata/simple_pattern/simple_pattern.h create mode 100644 libnetdata/socket/Makefile.am create mode 100644 libnetdata/socket/Makefile.in create mode 100644 libnetdata/socket/README.md create mode 100644 libnetdata/socket/socket.c create mode 100644 libnetdata/socket/socket.h create mode 100644 libnetdata/statistical/Makefile.am create mode 100644 libnetdata/statistical/Makefile.in create mode 100644 libnetdata/statistical/README.md create mode 100644 libnetdata/statistical/statistical.c create mode 100644 libnetdata/statistical/statistical.h create mode 100644 libnetdata/storage_number/Makefile.am create mode 100644 libnetdata/storage_number/Makefile.in create mode 100644 libnetdata/storage_number/README.md create mode 100644 libnetdata/storage_number/storage_number.c create mode 100644 libnetdata/storage_number/storage_number.h create mode 100644 libnetdata/threads/Makefile.am create mode 100644 libnetdata/threads/Makefile.in create mode 100644 libnetdata/threads/README.md create mode 100644 libnetdata/threads/threads.c create mode 100644 libnetdata/threads/threads.h create mode 100644 libnetdata/url/Makefile.am create mode 100644 libnetdata/url/Makefile.in create mode 100644 libnetdata/url/README.md create mode 100644 libnetdata/url/url.c create mode 100644 libnetdata/url/url.h delete mode 100644 m4/ax_c___atomic.m4 delete mode 100644 m4/ax_c__generic.m4 delete mode 100644 m4/ax_c_lto.m4 delete mode 100644 m4/ax_c_mallinfo.m4 delete mode 100644 m4/ax_c_mallopt.m4 delete mode 100644 m4/ax_c_statement_expressions.m4 delete mode 100644 m4/ax_check_compile_flag.m4 delete mode 100644 m4/ax_check_enable_debug.m4 delete mode 100644 m4/ax_gcc_func_attribute.m4 delete mode 100644 m4/ax_pthread.m4 delete mode 100644 m4/jemalloc.m4 delete mode 100644 m4/tcmalloc.m4 create mode 100755 makeself/jobs/50-bash-4.4.18.install.sh delete mode 100755 makeself/jobs/50-bash-4.4.install.sh delete mode 100755 makeself/jobs/50-curl-7.53.1.install.sh create mode 100755 makeself/jobs/50-curl-7.60.0.install.sh delete mode 100644 node.d/Makefile.am delete mode 100644 node.d/Makefile.in delete mode 100644 node.d/README.md delete mode 100644 node.d/fronius.node.js delete mode 100644 node.d/named.node.js delete mode 100644 node.d/node_modules/asn1-ber.js delete mode 100644 node.d/node_modules/extend.js delete mode 100644 node.d/node_modules/lib/ber/errors.js delete mode 100644 node.d/node_modules/lib/ber/index.js delete mode 100644 node.d/node_modules/lib/ber/reader.js delete mode 100644 node.d/node_modules/lib/ber/types.js delete mode 100644 node.d/node_modules/lib/ber/writer.js delete mode 100644 node.d/node_modules/net-snmp.js delete mode 100644 node.d/node_modules/netdata.js delete mode 100644 node.d/node_modules/pixl-xml.js delete mode 100644 node.d/sma_webbox.node.js delete mode 100644 node.d/snmp.node.js delete mode 100644 node.d/stiebeleltron.node.js delete mode 100644 plugins.d/Makefile.am delete mode 100644 plugins.d/Makefile.in delete mode 100644 plugins.d/README.md delete mode 100755 plugins.d/alarm-email.sh delete mode 100755 plugins.d/alarm-notify.sh delete mode 100755 plugins.d/alarm-test.sh delete mode 100755 plugins.d/cgroup-name.sh delete mode 100755 plugins.d/cgroup-network-helper.sh delete mode 100755 plugins.d/charts.d.dryrun-helper.sh delete mode 100755 plugins.d/charts.d.plugin delete mode 100755 plugins.d/fping.plugin delete mode 100644 plugins.d/loopsleepms.sh.inc delete mode 100755 plugins.d/node.d.plugin delete mode 100755 plugins.d/python.d.plugin delete mode 100755 plugins.d/tc-qos-helper.sh delete mode 100644 python.d/Makefile.am delete mode 100644 python.d/Makefile.in delete mode 100644 python.d/README.md delete mode 100644 python.d/apache.chart.py delete mode 100644 python.d/beanstalk.chart.py delete mode 100644 python.d/bind_rndc.chart.py delete mode 100644 python.d/ceph.chart.py delete mode 100644 python.d/chrony.chart.py delete mode 100644 python.d/couchdb.chart.py delete mode 100644 python.d/cpufreq.chart.py delete mode 100644 python.d/cpuidle.chart.py delete mode 100644 python.d/dns_query_time.chart.py delete mode 100644 python.d/dnsdist.chart.py delete mode 100644 python.d/dovecot.chart.py delete mode 100644 python.d/elasticsearch.chart.py delete mode 100644 python.d/example.chart.py delete mode 100644 python.d/exim.chart.py delete mode 100644 python.d/fail2ban.chart.py delete mode 100644 python.d/freeradius.chart.py delete mode 100644 python.d/go_expvar.chart.py delete mode 100644 python.d/haproxy.chart.py delete mode 100644 python.d/hddtemp.chart.py delete mode 100644 python.d/httpcheck.chart.py delete mode 100644 python.d/icecast.chart.py delete mode 100644 python.d/ipfs.chart.py delete mode 100644 python.d/isc_dhcpd.chart.py delete mode 100644 python.d/mdstat.chart.py delete mode 100644 python.d/memcached.chart.py delete mode 100644 python.d/mongodb.chart.py delete mode 100644 python.d/mysql.chart.py delete mode 100644 python.d/nginx.chart.py delete mode 100644 python.d/nginx_plus.chart.py delete mode 100644 python.d/nsd.chart.py delete mode 100644 python.d/ntpd.chart.py delete mode 100644 python.d/ovpn_status_log.chart.py delete mode 100644 python.d/phpfpm.chart.py delete mode 100644 python.d/portcheck.chart.py delete mode 100644 python.d/postfix.chart.py delete mode 100644 python.d/postgres.chart.py delete mode 100644 python.d/powerdns.chart.py delete mode 100644 python.d/python-modules-installer.sh delete mode 100644 python.d/python_modules/__init__.py delete mode 100644 python.d/python_modules/base.py delete mode 100644 python.d/python_modules/bases/FrameworkServices/ExecutableService.py delete mode 100644 python.d/python_modules/bases/FrameworkServices/LogService.py delete mode 100644 python.d/python_modules/bases/FrameworkServices/MySQLService.py delete mode 100644 python.d/python_modules/bases/FrameworkServices/SimpleService.py delete mode 100644 python.d/python_modules/bases/FrameworkServices/SocketService.py delete mode 100644 python.d/python_modules/bases/FrameworkServices/UrlService.py delete mode 100644 python.d/python_modules/bases/FrameworkServices/__init__.py delete mode 100644 python.d/python_modules/bases/__init__.py delete mode 100644 python.d/python_modules/bases/charts.py delete mode 100644 python.d/python_modules/bases/collection.py delete mode 100644 python.d/python_modules/bases/loaders.py delete mode 100644 python.d/python_modules/bases/loggers.py delete mode 100644 python.d/python_modules/pyyaml2/__init__.py delete mode 100644 python.d/python_modules/pyyaml2/composer.py delete mode 100644 python.d/python_modules/pyyaml2/constructor.py delete mode 100644 python.d/python_modules/pyyaml2/cyaml.py delete mode 100644 python.d/python_modules/pyyaml2/dumper.py delete mode 100644 python.d/python_modules/pyyaml2/emitter.py delete mode 100644 python.d/python_modules/pyyaml2/error.py delete mode 100644 python.d/python_modules/pyyaml2/events.py delete mode 100644 python.d/python_modules/pyyaml2/loader.py delete mode 100644 python.d/python_modules/pyyaml2/nodes.py delete mode 100644 python.d/python_modules/pyyaml2/parser.py delete mode 100644 python.d/python_modules/pyyaml2/reader.py delete mode 100644 python.d/python_modules/pyyaml2/representer.py delete mode 100644 python.d/python_modules/pyyaml2/resolver.py delete mode 100644 python.d/python_modules/pyyaml2/scanner.py delete mode 100644 python.d/python_modules/pyyaml2/serializer.py delete mode 100644 python.d/python_modules/pyyaml2/tokens.py delete mode 100644 python.d/python_modules/pyyaml3/__init__.py delete mode 100644 python.d/python_modules/pyyaml3/composer.py delete mode 100644 python.d/python_modules/pyyaml3/constructor.py delete mode 100644 python.d/python_modules/pyyaml3/cyaml.py delete mode 100644 python.d/python_modules/pyyaml3/dumper.py delete mode 100644 python.d/python_modules/pyyaml3/emitter.py delete mode 100644 python.d/python_modules/pyyaml3/error.py delete mode 100644 python.d/python_modules/pyyaml3/events.py delete mode 100644 python.d/python_modules/pyyaml3/loader.py delete mode 100644 python.d/python_modules/pyyaml3/nodes.py delete mode 100644 python.d/python_modules/pyyaml3/parser.py delete mode 100644 python.d/python_modules/pyyaml3/reader.py delete mode 100644 python.d/python_modules/pyyaml3/representer.py delete mode 100644 python.d/python_modules/pyyaml3/resolver.py delete mode 100644 python.d/python_modules/pyyaml3/scanner.py delete mode 100644 python.d/python_modules/pyyaml3/serializer.py delete mode 100644 python.d/python_modules/pyyaml3/tokens.py delete mode 100644 python.d/python_modules/third_party/__init__.py delete mode 100644 python.d/python_modules/third_party/lm_sensors.py delete mode 100644 python.d/python_modules/third_party/ordereddict.py delete mode 100644 python.d/python_modules/urllib3/__init__.py delete mode 100644 python.d/python_modules/urllib3/_collections.py delete mode 100644 python.d/python_modules/urllib3/connection.py delete mode 100644 python.d/python_modules/urllib3/connectionpool.py delete mode 100644 python.d/python_modules/urllib3/contrib/__init__.py delete mode 100644 python.d/python_modules/urllib3/contrib/_securetransport/__init__.py delete mode 100644 python.d/python_modules/urllib3/contrib/_securetransport/bindings.py delete mode 100644 python.d/python_modules/urllib3/contrib/_securetransport/low_level.py delete mode 100644 python.d/python_modules/urllib3/contrib/appengine.py delete mode 100644 python.d/python_modules/urllib3/contrib/ntlmpool.py delete mode 100644 python.d/python_modules/urllib3/contrib/pyopenssl.py delete mode 100644 python.d/python_modules/urllib3/contrib/securetransport.py delete mode 100644 python.d/python_modules/urllib3/contrib/socks.py delete mode 100644 python.d/python_modules/urllib3/exceptions.py delete mode 100644 python.d/python_modules/urllib3/fields.py delete mode 100644 python.d/python_modules/urllib3/filepost.py delete mode 100644 python.d/python_modules/urllib3/packages/__init__.py delete mode 100644 python.d/python_modules/urllib3/packages/backports/__init__.py delete mode 100644 python.d/python_modules/urllib3/packages/backports/makefile.py delete mode 100644 python.d/python_modules/urllib3/packages/ordered_dict.py delete mode 100644 python.d/python_modules/urllib3/packages/six.py delete mode 100644 python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py delete mode 100644 python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py delete mode 100644 python.d/python_modules/urllib3/poolmanager.py delete mode 100644 python.d/python_modules/urllib3/request.py delete mode 100644 python.d/python_modules/urllib3/response.py delete mode 100644 python.d/python_modules/urllib3/util/__init__.py delete mode 100644 python.d/python_modules/urllib3/util/connection.py delete mode 100644 python.d/python_modules/urllib3/util/request.py delete mode 100644 python.d/python_modules/urllib3/util/response.py delete mode 100644 python.d/python_modules/urllib3/util/retry.py delete mode 100644 python.d/python_modules/urllib3/util/selectors.py delete mode 100644 python.d/python_modules/urllib3/util/ssl_.py delete mode 100644 python.d/python_modules/urllib3/util/timeout.py delete mode 100644 python.d/python_modules/urllib3/util/url.py delete mode 100644 python.d/python_modules/urllib3/util/wait.py delete mode 100644 python.d/rabbitmq.chart.py delete mode 100644 python.d/redis.chart.py delete mode 100644 python.d/retroshare.chart.py delete mode 100644 python.d/samba.chart.py delete mode 100644 python.d/sensors.chart.py delete mode 100644 python.d/smartd_log.chart.py delete mode 100644 python.d/springboot.chart.py delete mode 100644 python.d/squid.chart.py delete mode 100644 python.d/tomcat.chart.py delete mode 100644 python.d/traefik.chart.py delete mode 100644 python.d/varnish.chart.py delete mode 100644 python.d/web_log.chart.py create mode 100644 registry/Makefile.am create mode 100644 registry/Makefile.in create mode 100644 registry/README.md create mode 100644 registry/registry.c create mode 100644 registry/registry.h create mode 100644 registry/registry_db.c create mode 100644 registry/registry_init.c create mode 100644 registry/registry_internals.c create mode 100644 registry/registry_internals.h create mode 100644 registry/registry_log.c create mode 100644 registry/registry_machine.c create mode 100644 registry/registry_machine.h create mode 100644 registry/registry_person.c create mode 100644 registry/registry_person.h create mode 100644 registry/registry_url.c create mode 100644 registry/registry_url.h delete mode 100644 src/.keep delete mode 100644 src/Makefile.am delete mode 100644 src/Makefile.in delete mode 100644 src/adaptive_resortable_list.c delete mode 100644 src/adaptive_resortable_list.h delete mode 100644 src/appconfig.c delete mode 100644 src/appconfig.h delete mode 100644 src/apps_plugin.c delete mode 100644 src/avl.c delete mode 100644 src/avl.h delete mode 100644 src/backend_prometheus.c delete mode 100644 src/backend_prometheus.h delete mode 100644 src/backends.c delete mode 100644 src/backends.h delete mode 100644 src/cgroup-network.c delete mode 100644 src/clocks.c delete mode 100644 src/clocks.h delete mode 100644 src/common.c delete mode 100644 src/common.h delete mode 100644 src/daemon.c delete mode 100644 src/daemon.h delete mode 100644 src/dictionary.c delete mode 100644 src/dictionary.h delete mode 100644 src/eval.c delete mode 100644 src/eval.h delete mode 100644 src/freebsd_devstat.c delete mode 100644 src/freebsd_getifaddrs.c delete mode 100644 src/freebsd_getmntinfo.c delete mode 100644 src/freebsd_ipfw.c delete mode 100644 src/freebsd_kstat_zfs.c delete mode 100644 src/freebsd_sysctl.c delete mode 100644 src/freeipmi_plugin.c delete mode 100644 src/global_statistics.c delete mode 100644 src/global_statistics.h delete mode 100644 src/health.c delete mode 100644 src/health.h delete mode 100644 src/health_config.c delete mode 100644 src/health_json.c delete mode 100644 src/health_log.c delete mode 100644 src/inlined.h delete mode 100644 src/ipc.c delete mode 100644 src/ipc.h delete mode 100644 src/locks.c delete mode 100644 src/locks.h delete mode 100644 src/log.c delete mode 100644 src/log.h delete mode 100644 src/macos_fw.c delete mode 100644 src/macos_mach_smi.c delete mode 100644 src/macos_sysctl.c delete mode 100644 src/main.c delete mode 100644 src/main.h delete mode 100644 src/plugin_checks.c delete mode 100644 src/plugin_checks.h delete mode 100644 src/plugin_freebsd.c delete mode 100644 src/plugin_freebsd.h delete mode 100644 src/plugin_idlejitter.c delete mode 100644 src/plugin_idlejitter.h delete mode 100644 src/plugin_macos.c delete mode 100644 src/plugin_macos.h delete mode 100644 src/plugin_nfacct.c delete mode 100644 src/plugin_nfacct.h delete mode 100644 src/plugin_proc.c delete mode 100644 src/plugin_proc.h delete mode 100644 src/plugin_proc_diskspace.c delete mode 100644 src/plugin_proc_diskspace.h delete mode 100644 src/plugin_tc.c delete mode 100644 src/plugin_tc.h delete mode 100644 src/plugins_d.c delete mode 100644 src/plugins_d.h delete mode 100644 src/popen.c delete mode 100644 src/popen.h delete mode 100644 src/proc_diskstats.c delete mode 100644 src/proc_interrupts.c delete mode 100644 src/proc_loadavg.c delete mode 100644 src/proc_meminfo.c delete mode 100644 src/proc_net_dev.c delete mode 100644 src/proc_net_ip_vs_stats.c delete mode 100644 src/proc_net_netstat.c delete mode 100644 src/proc_net_rpc_nfs.c delete mode 100644 src/proc_net_rpc_nfsd.c delete mode 100644 src/proc_net_snmp.c delete mode 100644 src/proc_net_snmp6.c delete mode 100644 src/proc_net_sockstat.c delete mode 100644 src/proc_net_sockstat6.c delete mode 100644 src/proc_net_softnet_stat.c delete mode 100644 src/proc_net_stat_conntrack.c delete mode 100644 src/proc_net_stat_synproxy.c delete mode 100644 src/proc_self_mountinfo.c delete mode 100644 src/proc_self_mountinfo.h delete mode 100644 src/proc_softirqs.c delete mode 100644 src/proc_spl_kstat_zfs.c delete mode 100644 src/proc_stat.c delete mode 100644 src/proc_sys_kernel_random_entropy_avail.c delete mode 100644 src/proc_uptime.c delete mode 100644 src/proc_vmstat.c delete mode 100644 src/procfile.c delete mode 100644 src/procfile.h delete mode 100644 src/registry.c delete mode 100644 src/registry.h delete mode 100644 src/registry_db.c delete mode 100644 src/registry_init.c delete mode 100644 src/registry_internals.c delete mode 100644 src/registry_internals.h delete mode 100644 src/registry_log.c delete mode 100644 src/registry_machine.c delete mode 100644 src/registry_machine.h delete mode 100644 src/registry_person.c delete mode 100644 src/registry_person.h delete mode 100644 src/registry_url.c delete mode 100644 src/registry_url.h delete mode 100644 src/rrd.c delete mode 100644 src/rrd.h delete mode 100644 src/rrd2json.c delete mode 100644 src/rrd2json.h delete mode 100644 src/rrd2json_api_old.c delete mode 100644 src/rrd2json_api_old.h delete mode 100644 src/rrdcalc.c delete mode 100644 src/rrdcalctemplate.c delete mode 100644 src/rrddim.c delete mode 100644 src/rrddimvar.c delete mode 100644 src/rrdfamily.c delete mode 100644 src/rrdhost.c delete mode 100644 src/rrdpush.c delete mode 100644 src/rrdpush.h delete mode 100644 src/rrdset.c delete mode 100644 src/rrdsetvar.c delete mode 100644 src/rrdvar.c delete mode 100644 src/signals.c delete mode 100644 src/signals.h delete mode 100644 src/simple_pattern.c delete mode 100644 src/simple_pattern.h delete mode 100644 src/socket.c delete mode 100644 src/socket.h delete mode 100644 src/statistical.c delete mode 100644 src/statistical.h delete mode 100644 src/statsd.c delete mode 100644 src/statsd.h delete mode 100644 src/storage_number.c delete mode 100644 src/storage_number.h delete mode 100644 src/sys_devices_system_edac_mc.c delete mode 100644 src/sys_devices_system_node.c delete mode 100644 src/sys_fs_btrfs.c delete mode 100644 src/sys_fs_cgroup.c delete mode 100644 src/sys_kernel_mm_ksm.c delete mode 100644 src/threads.c delete mode 100644 src/threads.h delete mode 100644 src/unit_test.c delete mode 100644 src/unit_test.h delete mode 100644 src/url.c delete mode 100644 src/url.h delete mode 100644 src/web_api_old.c delete mode 100644 src/web_api_old.h delete mode 100644 src/web_api_v1.c delete mode 100644 src/web_api_v1.h delete mode 100644 src/web_buffer.c delete mode 100644 src/web_buffer.h delete mode 100644 src/web_buffer_svg.c delete mode 100644 src/web_buffer_svg.h delete mode 100644 src/web_client.c delete mode 100644 src/web_client.h delete mode 100644 src/web_server.c delete mode 100644 src/web_server.h delete mode 100644 src/zfs_common.c delete mode 100644 src/zfs_common.h create mode 100644 streaming/Makefile.am create mode 100644 streaming/Makefile.in create mode 100644 streaming/README.md create mode 100644 streaming/rrdpush.c create mode 100644 streaming/rrdpush.h create mode 100644 streaming/stream.conf create mode 100644 system/edit-config create mode 100755 system/edit-config.in create mode 100644 system/netdata.plist.in create mode 100644 web/README.md create mode 100644 web/api/Makefile.am create mode 100644 web/api/Makefile.in create mode 100644 web/api/README.md create mode 100644 web/api/badges/Makefile.am create mode 100644 web/api/badges/Makefile.in create mode 100644 web/api/badges/README.md create mode 100644 web/api/badges/web_buffer_svg.c create mode 100644 web/api/badges/web_buffer_svg.h create mode 100644 web/api/exporters/Makefile.am create mode 100644 web/api/exporters/Makefile.in create mode 100644 web/api/exporters/README.md create mode 100644 web/api/exporters/allmetrics.c create mode 100644 web/api/exporters/allmetrics.h create mode 100644 web/api/exporters/prometheus/Makefile.am create mode 100644 web/api/exporters/prometheus/Makefile.in create mode 100644 web/api/exporters/prometheus/README.md create mode 100644 web/api/exporters/shell/Makefile.am create mode 100644 web/api/exporters/shell/Makefile.in create mode 100644 web/api/exporters/shell/README.md create mode 100644 web/api/exporters/shell/allmetrics_shell.c create mode 100644 web/api/exporters/shell/allmetrics_shell.h create mode 100644 web/api/formatters/Makefile.am create mode 100644 web/api/formatters/Makefile.in create mode 100644 web/api/formatters/README.md create mode 100644 web/api/formatters/charts2json.c create mode 100644 web/api/formatters/charts2json.h create mode 100644 web/api/formatters/csv/Makefile.am create mode 100644 web/api/formatters/csv/Makefile.in create mode 100644 web/api/formatters/csv/README.md create mode 100644 web/api/formatters/csv/csv.c create mode 100644 web/api/formatters/csv/csv.h create mode 100644 web/api/formatters/json/Makefile.am create mode 100644 web/api/formatters/json/Makefile.in create mode 100644 web/api/formatters/json/README.md create mode 100644 web/api/formatters/json/json.c create mode 100644 web/api/formatters/json/json.h create mode 100644 web/api/formatters/json_wrapper.c create mode 100644 web/api/formatters/json_wrapper.h create mode 100644 web/api/formatters/rrd2json.c create mode 100644 web/api/formatters/rrd2json.h create mode 100644 web/api/formatters/rrdset2json.c create mode 100644 web/api/formatters/rrdset2json.h create mode 100644 web/api/formatters/ssv/Makefile.am create mode 100644 web/api/formatters/ssv/Makefile.in create mode 100644 web/api/formatters/ssv/README.md create mode 100644 web/api/formatters/ssv/ssv.c create mode 100644 web/api/formatters/ssv/ssv.h create mode 100644 web/api/formatters/value/Makefile.am create mode 100644 web/api/formatters/value/Makefile.in create mode 100644 web/api/formatters/value/README.md create mode 100644 web/api/formatters/value/value.c create mode 100644 web/api/formatters/value/value.h create mode 100644 web/api/netdata-swagger.json create mode 100644 web/api/netdata-swagger.yaml create mode 100644 web/api/queries/Makefile.am create mode 100644 web/api/queries/Makefile.in create mode 100644 web/api/queries/README.md create mode 100644 web/api/queries/average/Makefile.am create mode 100644 web/api/queries/average/Makefile.in create mode 100644 web/api/queries/average/README.md create mode 100644 web/api/queries/average/average.c create mode 100644 web/api/queries/average/average.h create mode 100644 web/api/queries/des/Makefile.am create mode 100644 web/api/queries/des/Makefile.in create mode 100644 web/api/queries/des/README.md create mode 100644 web/api/queries/des/des.c create mode 100644 web/api/queries/des/des.h create mode 100644 web/api/queries/incremental_sum/Makefile.am create mode 100644 web/api/queries/incremental_sum/Makefile.in create mode 100644 web/api/queries/incremental_sum/README.md create mode 100644 web/api/queries/incremental_sum/incremental_sum.c create mode 100644 web/api/queries/incremental_sum/incremental_sum.h create mode 100644 web/api/queries/max/Makefile.am create mode 100644 web/api/queries/max/Makefile.in create mode 100644 web/api/queries/max/README.md create mode 100644 web/api/queries/max/max.c create mode 100644 web/api/queries/max/max.h create mode 100644 web/api/queries/median/Makefile.am create mode 100644 web/api/queries/median/Makefile.in create mode 100644 web/api/queries/median/README.md create mode 100644 web/api/queries/median/median.c create mode 100644 web/api/queries/median/median.h create mode 100644 web/api/queries/min/Makefile.am create mode 100644 web/api/queries/min/Makefile.in create mode 100644 web/api/queries/min/README.md create mode 100644 web/api/queries/min/min.c create mode 100644 web/api/queries/min/min.h create mode 100644 web/api/queries/query.c create mode 100644 web/api/queries/query.h create mode 100644 web/api/queries/rrdr.c create mode 100644 web/api/queries/rrdr.h create mode 100644 web/api/queries/ses/Makefile.am create mode 100644 web/api/queries/ses/Makefile.in create mode 100644 web/api/queries/ses/README.md create mode 100644 web/api/queries/ses/ses.c create mode 100644 web/api/queries/ses/ses.h create mode 100644 web/api/queries/stddev/Makefile.am create mode 100644 web/api/queries/stddev/Makefile.in create mode 100644 web/api/queries/stddev/README.md create mode 100644 web/api/queries/stddev/stddev.c create mode 100644 web/api/queries/stddev/stddev.h create mode 100644 web/api/queries/sum/Makefile.am create mode 100644 web/api/queries/sum/Makefile.in create mode 100644 web/api/queries/sum/README.md create mode 100644 web/api/queries/sum/sum.c create mode 100644 web/api/queries/sum/sum.h create mode 100644 web/api/web_api_v1.c create mode 100644 web/api/web_api_v1.h delete mode 100644 web/css/bootstrap-3.3.7.css delete mode 100644 web/css/bootstrap-slate-flat-3.3.7.css delete mode 100644 web/css/bootstrap-slider-10.0.0.min.css delete mode 100644 web/css/bootstrap-theme-3.3.7.min.css delete mode 100644 web/css/bootstrap-toggle-2.2.2.min.css delete mode 100644 web/css/c3-0.4.18.min.css delete mode 100644 web/css/morris-0.5.1.css delete mode 100644 web/dashboard.css delete mode 100644 web/dashboard.html delete mode 100644 web/dashboard.js delete mode 100644 web/dashboard.slate.css delete mode 100644 web/dashboard_info.js delete mode 100644 web/dashboard_info_custom_example.js delete mode 100644 web/favicon.ico delete mode 100644 web/goto-host-from-alarm.html create mode 100644 web/gui/Makefile.am create mode 100644 web/gui/Makefile.in create mode 100644 web/gui/README.md create mode 100644 web/gui/css/bootstrap-3.3.7.css create mode 100644 web/gui/css/bootstrap-slate-flat-3.3.7.css create mode 100644 web/gui/css/bootstrap-slider-10.0.0.min.css create mode 100644 web/gui/css/bootstrap-theme-3.3.7.min.css create mode 100644 web/gui/css/bootstrap-toggle-2.2.2.min.css create mode 100644 web/gui/css/c3-0.4.18.min.css create mode 100644 web/gui/css/morris-0.5.1.css create mode 100644 web/gui/dashboard.css create mode 100644 web/gui/dashboard.html create mode 100644 web/gui/dashboard.js create mode 100644 web/gui/dashboard.slate.css create mode 100644 web/gui/dashboard_info.js create mode 100644 web/gui/dashboard_info_custom_example.js create mode 100644 web/gui/favicon.ico create mode 100644 web/gui/goto-host-from-alarm.html create mode 100644 web/gui/images/alert-128-orange.png create mode 100644 web/gui/images/alert-128-red.png create mode 100644 web/gui/images/alert-multi-size-orange.ico create mode 100644 web/gui/images/alert-multi-size-red.ico create mode 100644 web/gui/images/animated.gif create mode 100644 web/gui/images/check-mark-2-128-green.png create mode 100644 web/gui/images/check-mark-2-multi-size-green.ico create mode 100644 web/gui/images/netdata.svg create mode 100644 web/gui/images/post.png create mode 100644 web/gui/images/seo-performance-114.png create mode 100644 web/gui/images/seo-performance-128.png create mode 100644 web/gui/images/seo-performance-16.png create mode 100644 web/gui/images/seo-performance-24.png create mode 100644 web/gui/images/seo-performance-256.png create mode 100644 web/gui/images/seo-performance-32.png create mode 100644 web/gui/images/seo-performance-48.png create mode 100644 web/gui/images/seo-performance-512.png create mode 100644 web/gui/images/seo-performance-64.png create mode 100644 web/gui/images/seo-performance-72.png create mode 100644 web/gui/images/seo-performance-multi-size.icns create mode 100644 web/gui/images/seo-performance-multi-size.ico create mode 100644 web/gui/index.html create mode 100644 web/gui/infographic.html create mode 100644 web/gui/lib/bootstrap-3.3.7.min.js create mode 100644 web/gui/lib/bootstrap-slider-10.0.0.min.js create mode 100644 web/gui/lib/bootstrap-table-1.11.0.min.js create mode 100644 web/gui/lib/bootstrap-table-export-1.11.0.min.js create mode 100644 web/gui/lib/bootstrap-toggle-2.2.2.min.js create mode 100644 web/gui/lib/c3-0.4.18.min.js create mode 100644 web/gui/lib/clipboard-polyfill-be05dad.js create mode 100644 web/gui/lib/d3-4.12.2.min.js create mode 100644 web/gui/lib/d3pie-0.2.1-netdata-3.js create mode 100644 web/gui/lib/dygraph-c91c859.min.js create mode 100644 web/gui/lib/dygraph-smooth-plotter-c91c859.js create mode 100644 web/gui/lib/fontawesome-all-5.0.1.min.js create mode 100644 web/gui/lib/gauge-1.3.2.min.js create mode 100644 web/gui/lib/jquery-2.2.4.min.js create mode 100644 web/gui/lib/jquery.easypiechart-97b5824.min.js create mode 100644 web/gui/lib/jquery.peity-3.2.0.min.js create mode 100644 web/gui/lib/jquery.sparkline-2.1.2.min.js create mode 100644 web/gui/lib/lz-string-1.4.4.min.js create mode 100644 web/gui/lib/morris-0.5.1.min.js create mode 100644 web/gui/lib/pako-1.0.6.min.js create mode 100644 web/gui/lib/perfect-scrollbar-0.6.15.min.js create mode 100644 web/gui/lib/raphael-2.2.4-min.js create mode 100644 web/gui/lib/tableExport-1.6.0.min.js create mode 100644 web/gui/refresh-badges.js create mode 100644 web/gui/registry.html create mode 100644 web/gui/robots.txt create mode 100644 web/gui/sitemap.xml create mode 100644 web/gui/tv.html create mode 100644 web/gui/version.txt delete mode 100644 web/images/alert-128-orange.png delete mode 100644 web/images/alert-128-red.png delete mode 100644 web/images/alert-multi-size-orange.ico delete mode 100644 web/images/alert-multi-size-red.ico delete mode 100644 web/images/animated.gif delete mode 100644 web/images/check-mark-2-128-green.png delete mode 100644 web/images/check-mark-2-multi-size-green.ico delete mode 100644 web/images/post.png delete mode 100644 web/images/seo-performance-114.png delete mode 100644 web/images/seo-performance-128.png delete mode 100644 web/images/seo-performance-16.png delete mode 100644 web/images/seo-performance-24.png delete mode 100644 web/images/seo-performance-256.png delete mode 100644 web/images/seo-performance-32.png delete mode 100644 web/images/seo-performance-48.png delete mode 100644 web/images/seo-performance-512.png delete mode 100644 web/images/seo-performance-64.png delete mode 100644 web/images/seo-performance-72.png delete mode 100644 web/images/seo-performance-multi-size.icns delete mode 100644 web/images/seo-performance-multi-size.ico delete mode 100644 web/index.html delete mode 100644 web/infographic.html delete mode 100644 web/lib/bootstrap-3.3.7.min.js delete mode 100644 web/lib/bootstrap-slider-10.0.0.min.js delete mode 100644 web/lib/bootstrap-table-1.11.0.min.js delete mode 100644 web/lib/bootstrap-table-export-1.11.0.min.js delete mode 100644 web/lib/bootstrap-toggle-2.2.2.min.js delete mode 100644 web/lib/c3-0.4.18.min.js delete mode 100644 web/lib/clipboard-polyfill-be05dad.js delete mode 100644 web/lib/d3-4.12.2.min.js delete mode 100644 web/lib/d3pie-0.2.1-netdata-3.js delete mode 100644 web/lib/dygraph-c91c859.min.js delete mode 100644 web/lib/dygraph-smooth-plotter-c91c859.js delete mode 100644 web/lib/fontawesome-all-5.0.1.min.js delete mode 100644 web/lib/gauge-1.3.2.min.js delete mode 100644 web/lib/jquery-2.2.4.min.js delete mode 100644 web/lib/jquery.easypiechart-97b5824.min.js delete mode 100644 web/lib/jquery.peity-3.2.0.min.js delete mode 100644 web/lib/jquery.sparkline-2.1.2.min.js delete mode 100644 web/lib/lz-string-1.4.4.min.js delete mode 100644 web/lib/morris-0.5.1.min.js delete mode 100644 web/lib/pako-1.0.6.min.js delete mode 100644 web/lib/perfect-scrollbar-0.6.15.min.js delete mode 100644 web/lib/raphael-2.2.4-min.js delete mode 100644 web/lib/tableExport-1.6.0.min.js delete mode 100644 web/netdata-swagger.json delete mode 100644 web/netdata-swagger.yaml delete mode 100644 web/refresh-badges.js delete mode 100644 web/registry.html delete mode 100644 web/robots.txt create mode 100644 web/server/Makefile.am create mode 100644 web/server/Makefile.in create mode 100644 web/server/README.md create mode 100644 web/server/multi/Makefile.am create mode 100644 web/server/multi/Makefile.in create mode 100644 web/server/multi/README.md create mode 100644 web/server/multi/multi-threaded.c create mode 100644 web/server/multi/multi-threaded.h create mode 100644 web/server/single/Makefile.am create mode 100644 web/server/single/Makefile.in create mode 100644 web/server/single/README.md create mode 100644 web/server/single/single-threaded.c create mode 100644 web/server/single/single-threaded.h create mode 100644 web/server/static/Makefile.am create mode 100644 web/server/static/Makefile.in create mode 100644 web/server/static/README.md create mode 100644 web/server/static/static-threaded.c create mode 100644 web/server/static/static-threaded.h create mode 100644 web/server/web_client.c create mode 100644 web/server/web_client.h create mode 100644 web/server/web_client_cache.c create mode 100644 web/server/web_client_cache.h create mode 100644 web/server/web_server.c create mode 100644 web/server/web_server.h delete mode 100644 web/sitemap.xml delete mode 100644 web/tv.html delete mode 100644 web/version.txt diff --git a/.codacy.yml b/.codacy.yml index 0e3f44365..62f669ab3 100644 --- a/.codacy.yml +++ b/.codacy.yml @@ -1,15 +1,12 @@ --- exclude_paths: - - python.d/python_modules/pyyaml2/** - - python.d/python_modules/pyyaml3/** - - python.d/python_modules/urllib3/** - - python.d/python_modules/lm_sensors.py + - collectors/python.d.plugin/python_modules/pyyaml2/** + - collectors/python.d.plugin/python_modules/pyyaml3/** + - collectors/python.d.plugin/python_modules/urllib3/** + - collectors/python.d.plugin/python_modules/third_party/** - web/css/** - web/lib/** - web/old/** - - node.d/node_modules/lib/** - - node.d/node_modules/asn1-ber.js - - node.d/node_modules/net-snmp.js - - node.d/node_modules/pixl-xml.js - - node.d/node_modules/extend.js + - web/gui/src/** + - collectors/node.d.plugin/node_modules/** - tests/** diff --git a/.codeclimate.yml b/.codeclimate.yml index f53287ad7..8a11c84a6 100644 --- a/.codeclimate.yml +++ b/.codeclimate.yml @@ -81,20 +81,19 @@ plugins: enabled: false exclude_patterns: - ".gitignore" - - "conf.d/" - - "hooks/" + - ".githooks/" - "tests/" - "m4/" - "web/css/" - "web/lib/" - "web/fonts/" - "web/old/" - - "python.d/python_modules/pyyaml2/" - - "python.d/python_modules/pyyaml3/" - - "python.d/python_modules/urllib3/" - - "node.d/node_modules/lib/" - - "node.d/node_modules/asn1-ber.js" - - "node.d/node_modules/extend.js" - - "node.d/node_modules/pixl-xml.js" - - "node.d/node_modules/net-snmp.js" + - "collectors/python.d.plugin/python_modules/pyyaml2/" + - "collectors/python.d.plugin/python_modules/pyyaml3/" + - "collectors/python.d.plugin/python_modules/urllib3/" + - "collectors/node.d.plugin/node_modules/lib/" + - "collectors/node.d.plugin/node_modules/asn1-ber.js" + - "collectors/node.d.plugin/node_modules/extend.js" + - "collectors/node.d.plugin/node_modules/pixl-xml.js" + - "collectors/node.d.plugin/node_modules/net-snmp.js" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000..f4e61be24 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,31 @@ +# Global (default) code owner +* @ktsaou + +# Ownership by directory structure +.travis/ @paulfantom +# backends/ +build/ @paulfantom +collectors/python.d.plugin/ @l2isbad @Ferroin +contrib/ @paulfantom +daemon/ @ktsaou +database/ @ktsaou +docker/ @paulfantom +# health/ +# installer/ +# libnetdata/ +# makeself/ +packaging/ @paulfantom +# registry/ +# streaming/ +# system/ +# tests/ +# web/ + +# Ownership of specific files +CHANGELOG.md @netdatabot +.travis.yml @paulfantom +.lgtm.yml @paulfantom + +# Ownership by filetype (overwrites ownership by directory) +*.am @paulfantom @ktsaou +*.c *.h @ktsaou @vlvkobal diff --git a/.gitignore b/.gitignore index cd69d7ea5..c3f327b37 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,15 @@ .deps .libs +.dirstamp +.project +.pydevproject *.o +*.a config.h.in Makefile.in *~ +.*.swp *.old *.log *.pyc @@ -25,12 +30,20 @@ ltmain.sh missing stamp-h1 netdata.spec +sha256sums.txt # netdata binaries netdata +!netdata/ + apps.plugin +!apps.plugin/ + freeipmi.plugin +!freeipmi.plugin/ + cgroup-network +!cgroup-network/ # netdata makeself archives *.tar.* @@ -52,15 +65,15 @@ netdata-coverity-analysis.tgz .settings/ README TODO.md -conf.d/netdata.conf -src/TODO.txt +netdata.conf +TODO.txt -web/chart-info/ -web/control.html -web/datasource.css -web/gadget.xml -web/index_new.html -web/version.txt +web/gui/chart-info/ +web/gui/control.html +web/gui/datasource.css +web/gui/gadget.xml +web/gui/index_new.html +web/gui/version.txt # related to karma/javascript/node /node_modules/ @@ -71,7 +84,17 @@ system/netdata-openrc system/netdata-init-d system/netdata.logrotate system/netdata.service +system/netdata.plist system/netdata-freebsd +system/edit-config + +health/notifications/alarm-notify.sh +collectors/cgroups.plugin/cgroup-name.sh +collectors/tc.plugin/tc-qos-helper.sh +collectors/charts.d.plugin/charts.d.plugin +collectors/node.d.plugin/node.d.plugin +collectors/python.d.plugin/python.d.plugin +collectors/fping.plugin/fping.plugin # installer generated files netdata-uninstaller.sh @@ -79,6 +102,7 @@ netdata-updater.sh # cmake files cmake-build-debug/ +cmake-build-release/ CMakeCache.txt CMakeFiles/ cmake_install.cmake @@ -97,16 +121,19 @@ diagrams/*.atxt diagrams/plantuml.jar # cppcheck -src/cppcheck-build/ +cppcheck-build/ + +venv/ # debugging / profiling makeself/debug/ -profile/benchmark-dictionary -profile/benchmark-registry -profile/test-eval -profile/benchmark-line-parsing -profile/benchmark-procfile-parser -profile/statsd-stress +tests/profile/benchmark-dictionary +tests/profile/benchmark-registry +tests/profile/test-eval +tests/profile/benchmark-line-parsing +tests/profile/benchmark-procfile-parser +tests/profile/benchmark-value-pairs +tests/profile/statsd-stress oprofile_data/ vgcore.* callgrind.out.* diff --git a/.lgtm.yml b/.lgtm.yml new file mode 100644 index 000000000..7b789e7ea --- /dev/null +++ b/.lgtm.yml @@ -0,0 +1,23 @@ +--- +# LGTM does a good job at classifying files, but sometimes it needs some help. +# To classify files which shouldn't be checked we need to define where such +# files are located and manually assign them one of possible categories: +# docs, generated, library, template, test +# More information can be found in lgtm documentation: +# https://help.semmle.com/lgtm-enterprise/user/help/file-classification.html#built-in-tags +# https://lgtm.com/help/lgtm/lgtm.yml-configuration-file +path_classifiers: + library: + - collectors/python.d.plugin/python_modules/third_party/ + - collectors/python.d.plugin/python_modules/urllib3/ + - collectors/python.d.plugin/python_modules/pyyaml2/ + - collectors/python.d.plugin/python_modules/pyyaml3/ + - collectors/node.d.plugin/node_modules/lib/ + - collectors/node.d.plugin/node_modules/asn1-ber.js + - collectors/node.d.plugin/node_modules/extend.js + - collectors/node.d.plugin/node_modules/net-snmp.js + - collectors/node.d.plugin/node_modules/pixl-xml.js + - web/gui/lib/ + - web/gui/css/ + test: + - tests/ diff --git a/.travis/README.md b/.travis/README.md new file mode 100644 index 000000000..5a51b2a7c --- /dev/null +++ b/.travis/README.md @@ -0,0 +1,80 @@ +# Description of CI build configuration + +## Variables needed by travis + +- GITHUB_TOKEN - GitHub token with push access to repository +- DOCKER_USERNAME - Username (netdatabot) with write access to docker hub repository +- DOCKER_PASSWORD - Password to docker hub +- encrypted_decb6f6387c4_key - Something to do with package releasing (soon to be deprecated) +- encrypted_decb6f6387c4_iv - Something to do with package releasing (soon to be deprecated) +- OLD_DOCKER_USERNAME - Username used to push images to firehol/netdata # TODO: remove after deprecating that repo +- OLD_DOCKER_PASSWORD - Password used to push images to firehol/netdata # TODO: remove after deprecating that repo + +## Stages + +### Test + +Unit tests and coverage tests are executed here. Stage consists of 2 parallel jobs: + - C tests - executed every time + - coverity test - executed only when pipeline was triggered from cron + +### Build + +Stage is executed every time and consists of 5 parallel jobs which execute containerized and non-containerized +installations of netdata. Jobs are run on following operating systems: + - OSX + - ubuntu 14.04 + - ubuntu 16.04 (containerized) + - CentOS 6 (containerized) + - CentOS 7 (containerized) + - alpine (containerized) + +### Release + +This stage is executed only on "master" brach and allows us to create a new tag just looking at git commit message. +It also has an option to automatically generate changelog based on GitHub labels and sync it with GitHub release. +For the sake of simplicity and to use travis features this stage cannot be integrated with next stage. + +Releases are generated by searching for a keyword in last commit message. Keywords are: + - [patch] or [fix] to bump patch number + - [minor], [feature] or [feat] to bump minor number + - [major] or [breaking change] to bump major number +All keywords MUST be surrounded with square braces. +Alternative is to push a tag to master branch. + +### Packaging + +This stage is executed only on "master" branch and it is separated into 3 jobs: + - Update Changelog/Create release + - Nightly tarball and self-extractor build + - Nightly docker images + +##### Update Changelog/Create release + +This job is running one script called `releaser.sh`, which is responsible for a couple of things. First of all it +automatically updates our CHANGELOG.md file based on GitHub features (mostly labels and pull requests). Apart from +that it can also create a new git tag and a github draft release connected to that tag. +Releases are generated by searching for a keyword in last commit message. Keywords are: + - `[netdata patch release]` to bump patch number + - `[netdata minor release]` to bump minor number + - `[netdata major release]` to bump major number +All keywords MUST be surrounded with square brackets. + +Alternatively new release can be also created by pushing new tag to master branch. + +##### Nightly tarball and self-extractor build AND Nightly docker images + +As names might suggest those two jobs are responsible for nightly netdata package creation and are run every day (in +cron). Combined they produce: + - docker images + - tar.gz archive (soon to be removed) + - self-extracting package + +Currently "Nightly tarball and self-extractor build" is using old firehol script and it is planed to be replaced with +new design. + +##### Nightly changelog generation + +This job is responsible for regenerating changelog every day by executing `generate_changelog.sh` script. This is done +only once a day due to github rate limiter. + diff --git a/.travis/containerized_build.sh b/.travis/containerized_build.sh new file mode 100755 index 000000000..314a2ec39 --- /dev/null +++ b/.travis/containerized_build.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +set -e + +docker build -t dev-image -f ".travis/images/Dockerfile.$1" . + +docker run -it -w /code dev-image ./netdata-installer.sh --dont-wait --dont-start-it --install /tmp diff --git a/.travis/create_artifacts.sh b/.travis/create_artifacts.sh new file mode 100755 index 000000000..40ba9c85f --- /dev/null +++ b/.travis/create_artifacts.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# shellcheck disable=SC2230 + +if [ ! -f .gitignore ] +then + echo "Run as ./travis/$(basename "$0") from top level directory of git repository" + exit 1 +fi + +# Make sure stdout is in blocking mode. If we don't, then conda create will barf during downloads. +# See https://github.com/travis-ci/travis-ci/issues/4704#issuecomment-348435959 for details. +python -c 'import os,sys,fcntl; flags = fcntl.fcntl(sys.stdout, fcntl.F_GETFL); fcntl.fcntl(sys.stdout, fcntl.F_SETFL, flags&~os.O_NONBLOCK);' +echo "--- Create tarball ---" +autoreconf -ivf +./configure +make dist +echo "--- Create self-extractor ---" +./makeself/build-x86_64-static.sh + +echo "--- Create checksums ---" +GIT_TAG=$(git tag --points-at) +if [ "${GIT_TAG}" != "" ]; then + ln -s netdata-latest.gz.run "netdata-${GIT_TAG}.gz.run" + ln -s netdata-*.tar.gz "netdata-${GIT_TAG}.tar.gz" + sha256sum -b "netdata-${GIT_TAG}.gz.run" "netdata-${GIT_TAG}.tar.gz" > "sha256sums.txt" +else + sha256sum -b ./*.tar.gz ./*.gz.run > "sha256sums.txt" +fi + +echo "checksums:" +cat sha256sums.txt + + diff --git a/.travis/deploy-if-have-key b/.travis/deploy-if-have-key index 1933eeb2e..8b3b40f7e 100755 --- a/.travis/deploy-if-have-key +++ b/.travis/deploy-if-have-key @@ -46,6 +46,10 @@ fi ssh-keyscan -H firehol.org >> ~/.ssh/known_hosts ssh travis@firehol.org mkdir -p uploads/netdata/$TRAVIS_BRANCH/ -scp -p *.tar.* travis@firehol.org:uploads/netdata/$TRAVIS_BRANCH/ -scp -p *.gz.run* travis@firehol.org:uploads/netdata/$TRAVIS_BRANCH/ +scp -p *.tar.gz travis@firehol.org:uploads/netdata/$TRAVIS_BRANCH/ +scp -p *.tar.gz.sha travis@firehol.org:uploads/netdata/$TRAVIS_BRANCH/ +scp -p *.tar.gz.asc travis@firehol.org:uploads/netdata/$TRAVIS_BRANCH/ +scp -p *.gz.run travis@firehol.org:uploads/netdata/$TRAVIS_BRANCH/ +scp -p *.gz.run.sha travis@firehol.org:uploads/netdata/$TRAVIS_BRANCH/ +scp -p *.gz.run.asc travis@firehol.org:uploads/netdata/$TRAVIS_BRANCH/ ssh travis@firehol.org touch uploads/netdata/$TRAVIS_BRANCH/complete.txt diff --git a/.travis/firehol_create_artifacts.sh b/.travis/firehol_create_artifacts.sh new file mode 100755 index 000000000..3fcb910e7 --- /dev/null +++ b/.travis/firehol_create_artifacts.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# shellcheck disable=SC2230 + +# WARNING: This script is deprecated and placed here until @paulfantom figures out how to fully replace it + +if [ ! -f .gitignore ] +then + echo "Run as ./travis/$(basename "$0") from top level directory of git repository" + exit 1 +fi + +eval "$(ssh-agent -s)" +./.travis/decrypt-if-have-key decb6f6387c4 +export KEYSERVER=ipv4.pool.sks-keyservers.net +./packaging/gpg-recv-key phil@firehol.org "0762 9FF7 89EA 6156 012F 9F50 C406 9602 1359 9237" +./packaging/gpg-recv-key costa@tsaousis.gr "4DFF 624A E564 3B51 2872 1F40 29CA 3358 89B9 A863" +# Run the commit hooks in case the developer didn't +git diff 4b825dc642cb6eb9a060e54bf8d69288fbee4904 | ./packaging/check-files - +fakeroot ./packaging/git-build +# Make sure stdout is in blocking mode. If we don't, then conda create will barf during downloads. +# See https://github.com/travis-ci/travis-ci/issues/4704#issuecomment-348435959 for details. +python -c 'import os,sys,fcntl; flags = fcntl.fcntl(sys.stdout, fcntl.F_GETFL); fcntl.fcntl(sys.stdout, fcntl.F_SETFL, flags&~os.O_NONBLOCK);' +echo "--- Create tarball ---" +make dist +echo "--- Create self-extractor ---" +./makeself/build-x86_64-static.sh +echo "--- Create checksums ---" +for i in *.tar.gz; do sha512sum -b "$i" > "$i.sha"; done #FIXME remove? +for i in *.gz.run; do sha512sum -b "$i" > "$i.sha"; done #FIXME remove? +sha256sum -b ./*.tar.gz ./*.gz.run > "sha256sums.txt" +./.travis/deploy-if-have-key diff --git a/.travis/generate_changelog.sh b/.travis/generate_changelog.sh new file mode 100755 index 000000000..bc8be1023 --- /dev/null +++ b/.travis/generate_changelog.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +set -e + +if [ ! -f .gitignore ] +then + echo "Run as ./travis/$(basename "$0") from top level directory of git repository" + exit 1 +fi + +ORGANIZATION=$(echo "$TRAVIS_REPO_SLUG" | awk -F '/' '{print $1}') +PROJECT=$(echo "$TRAVIS_REPO_SLUG" | awk -F '/' '{print $2}') +GIT_MAIL="pawel+bot@netdata.cloud" +GIT_USER="netdatabot" + +echo "--- Initialize git configuration ---" +git config user.email "${GIT_MAIL}" +git config user.name "${GIT_USER}" + +echo "--- Creating changelog ---" +git checkout master +git pull +#docker run -it --rm -v "$(pwd)":/usr/local/src/your-app ferrarimarco/github-changelog-generator:1.14.3 \ +docker run -it -v "$(pwd)":/project markmandel/github-changelog-generator:latest \ + --user "${ORGANIZATION}" \ + --project "${PROJECT}" \ + --token "${GITHUB_TOKEN}" \ + --since-tag "v1.10.0" \ + --unreleased-label "**Next release**" \ + --no-compare-link \ + --exclude-labels duplicate,question,invalid,wontfix,discussion,documentation + +echo "--- Uploading changelog ---" +git add CHANGELOG.md +git commit -m '[ci skip] Automatic changelog update' +git push "https://${GITHUB_TOKEN}:@$(git config --get remote.origin.url | sed -e 's/^https:\/\///')" diff --git a/.travis/images/Dockerfile.alpine b/.travis/images/Dockerfile.alpine new file mode 100644 index 000000000..15f527165 --- /dev/null +++ b/.travis/images/Dockerfile.alpine @@ -0,0 +1,5 @@ +FROM alpine:latest + +RUN apk add bash gcc make autoconf automake pkgconfig zlib-dev libuuid git libmnl-dev util-linux-dev build-base + +COPY . /code diff --git a/.travis/images/Dockerfile.centos6 b/.travis/images/Dockerfile.centos6 new file mode 100644 index 000000000..c286fb95c --- /dev/null +++ b/.travis/images/Dockerfile.centos6 @@ -0,0 +1,5 @@ +FROM centos:6 + +RUN yum install -y gcc make autoconf automake pkg-config zlib-devel libuuid-devel git + +COPY . /code diff --git a/.travis/images/Dockerfile.centos7 b/.travis/images/Dockerfile.centos7 new file mode 100644 index 000000000..d94359dec --- /dev/null +++ b/.travis/images/Dockerfile.centos7 @@ -0,0 +1,5 @@ +FROM centos:7 + +RUN yum install -y gcc make autoconf automake pkg-config zlib-devel libuuid-devel git + +COPY . /code diff --git a/.travis/images/Dockerfile.ubuntu1804 b/.travis/images/Dockerfile.ubuntu1804 new file mode 100644 index 000000000..30ac7cf71 --- /dev/null +++ b/.travis/images/Dockerfile.ubuntu1804 @@ -0,0 +1,6 @@ +FROM ubuntu:18.04 + +RUN apt-get update && \ + apt-get install -y gcc make autoconf automake pkg-config zlib1g-dev uuid-dev git + +COPY . /code diff --git a/.travis/releaser.sh b/.travis/releaser.sh new file mode 100755 index 000000000..9f7ecd4ee --- /dev/null +++ b/.travis/releaser.sh @@ -0,0 +1,83 @@ +#!/bin/bash +# SPDX-License-Identifier: MIT +# Copyright (C) 2018 Pawel Krupa (@paulfantom) - All Rights Reserved +# Permission to copy and modify is granted under the MIT license +# +# Original script is available at https://github.com/paulfantom/travis-helper/blob/master/releasing/releaser.sh +# +# Script to automatically do a couple of things: +# - generate a new tag according to semver (https://semver.org/) +# - generate CHANGELOG.md by using https://github.com/skywinder/github-changelog-generator +# - create draft of GitHub releases by using https://github.com/github/hub +# +# Tags are generated by searching for a keyword in last commit message. Keywords are: +# - [patch] or [fix] to bump patch number +# - [minor], [feature] or [feat] to bump minor number +# - [major] or [breaking change] to bump major number +# All keywords MUST be surrounded with square braces. +# +# Script uses git mechanisms for locking, so it can be used in parallel builds +# +# Requirements: +# - GITHUB_TOKEN variable set with GitHub token. Access level: repo.public_repo +# - docker +# - git-semver python package (pip install git-semver) + +set -e + +if [ ! -f .gitignore ] +then + echo "Run as ./travis/$(basename "$0") from top level directory of git repository" + exit 1 +fi + +echo "---- GENERATING CHANGELOG -----" +./.travis/generate_changelog.sh + +echo "---- FIGURING OUT TAGS ----" +# Check if current commit is tagged or not +GIT_TAG=$(git tag --points-at) +if [ -z "${GIT_TAG}" ]; then + git semver + # Figure out next tag based on commit message + GIT_TAG=HEAD + echo "Last commit message: $TRAVIS_COMMIT_MESSAGE" + case "${TRAVIS_COMMIT_MESSAGE}" in + *"[netdata patch release]"* ) GIT_TAG="v$(git semver --next-patch)" ;; + *"[netdata minor release]"* ) GIT_TAG="v$(git semver --next-minor)" ;; + *"[netdata major release]"* ) GIT_TAG="v$(git semver --next-major)" ;; + *) echo "Keyword not detected. Doing nothing" ;; + esac + + # Tag it! + if [ "$GIT_TAG" != "HEAD" ]; then + echo "Assigning a new tag: $GIT_TAG" + git tag "$GIT_TAG" -a -m "Automatic tag generation for travis build no. $TRAVIS_BUILD_NUMBER" + # git is able to push due to configuration already being initialized in `generate_changelog.sh` script + git push "https://${GITHUB_TOKEN}:@$(git config --get remote.origin.url | sed -e 's/^https:\/\///')" --tags + fi +fi + +if [ "${GIT_TAG}" == "HEAD" ]; then + echo "Not creating a release since neither of two conditions was met:" + echo " - keyword in commit message" + echo " - commit is tagged" + exit 0 +fi + +echo "---- CREATING TAGGED DOCKER CONTAINERS ----" +export REPOSITORY="netdata/netdata" +./docker/build.sh + +echo "---- CREATING RELEASE ARTIFACTS -----" +./.travis/create_artifacts.sh + +echo "---- CREATING RELEASE DRAFT WITH ASSETS -----" +# Download hub +HUB_VERSION=${HUB_VERSION:-"2.5.1"} +wget "https://github.com/github/hub/releases/download/v${HUB_VERSION}/hub-linux-amd64-${HUB_VERSION}.tgz" -O "/tmp/hub-linux-amd64-${HUB_VERSION}.tgz" +tar -C /tmp -xvf "/tmp/hub-linux-amd64-${HUB_VERSION}.tgz" +export PATH=$PATH:"/tmp/hub-linux-amd64-${HUB_VERSION}/bin" + +# Create a release draft +hub release create --draft -a "netdata-${GIT_TAG}.tar.gz" -a "netdata-${GIT_TAG}.gz.run" -a "sha256sums.txt" -m "${GIT_TAG}" "${GIT_TAG}" diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..646a7d481 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team lead at costa@tsaousis.gr. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md new file mode 100644 index 000000000..fde3d0b4f --- /dev/null +++ b/CONTRIBUTORS.md @@ -0,0 +1,123 @@ + + +# netdata contributors license agreement + +**Thank you for contributing to netdata!** + +This agreement is part of the legal framework of the open-source ecosystem +that adds some red tape, but protects both the contributor and the project. + +To understand why this is needed, please read [a well-written chapter from +Karl Fogel’s Producing Open Source Software on CLAs](http://producingoss.com/en/copyright-assignment.html). + +By signing this agreement, you do not change your rights to use your own +contributions for any other purpose. + +## copyright license + +The Contributor (*you*) grants netdata Inc. a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable copyright license to reproduce, +prepare derivative works of, publicly display, publicly perform, sublicense, +and distribute his contributions and such derivative works. + +## copyright transfer + +The Contributor (*you*) hereby assigns netdata Inc. copyright in his +contributions, to be licensed under the same terms as the rest of the code. + +> *Note: this means we may re-license netdata (your contributions included) +> any way we see fit, without asking your permission. +> Open-source licenses have significant differences and in our attempt to +> help netdata grow we may have to distribute it under a different license. +> For example, CNCF, the Cloud Native Computing Foundation, requires netdata +> to be licensed under Apache-2.0 for it to be accepted as a member of the +> Foundation. We want to be free to do it.* + +## original work + +The Contributor (*you*) represent that each of his contributions is his +original creation and that he is legally entitled to grant the above license. + +> *Note: if you are committing third party code, please make sure the third party +> license or any other restrictions are also included with your commits. +> netdata includes many third party libraries and tools and this is not a +> problem, provided that the license of the third party code is compatible with +> the one we use for netdata.* + +## signature + +Since Sep 17th 2018, we use https://cla-assistant.io/netdata/netdata for signing the CLA, on all pull requests. +Old contributors can sign the CLA at any time using this link. + +## HISTORICAL SIGNATURES +(they have been imported to https://cla-assistant.io/netdata/netdata already) + +The Contributor (*you*) signs this agreement by adding his personal data in +this document and committing it to the project repo +(the same way contributions are submitted to the project). + +By signing once, all contributions (past and future) of The Contributor (*you*), +are subject to this agreement. + +> *Note: so you have to:* +> 1. add your github username and name in this file +> 2. commit it to the repo with a PR, using the same github username, or include this change in your first PR. + +# netdata contributors + +This is the list of contributors that have signed this agreement: + +username|name|email (optional) +:--------:|:----:|:---------------- +@lets00|Luís Eduardo|leduardo@lsd.ufcg.edu.br +@ktsaou|Costa Tsaousis|costa@tsaousis.gr +@tycho|Steven Noonan|steven@uplinklabs.net +@philwhineray|Phil Whineray| +@paulfantom|Paweł Krupa|pawel@krupa.net.pl +@Ferroin|Austin S. Hemmelgarn|ahferroin7@gmail.com +@glensc|Elan Ruusamäe| +@l2isbad|Ilya Mashchenko|ilyamaschenko@gmail.com +@rlefevre|Rémi Lefèvre| +@vlvkobal|Vladimir Kobal|vlad@prokk.net +@simonnagl|Simon Nagl| +@manosf|Emmanouil Fokas|manosf@protonmail.com +@user501254|Ashesh Singh|user501254@gmail.com +@t-h-e|Stefan Forstenlechner| +@facetoe|Facetoe| +@ntlug|Christopher Cox|ccox@endlessnow.com +@alonbl|Alon Bar-Lev|alon.barlev@gmail.com +@Wing924|Wei He|weihe924stephen@gmail.com +@NeonSludge|Kirill Buev|kirill.buev@gmx.com +@kmlucy|Kyle Lucy|kmlucy@gmail.com +@RicardoSette|Ricardo Sette|ricardosette@freebsdbrasil.com.br +@383c57|Shinichi Tagashira| +@davidak|David Kleuker|netdata-contributors+vyff@davidak.de +@ccremer|Christian Cremer| +@jimcooley|Jim Cooley|jim.cooley@healthvana.com +@Chocobo1|Mike Tzou| +@cosmix|Dimosthenis Kaponis| +@shadycuz|Levi Blaney|shadycuz+spam@gmail.com +@Flums|Philip Gabrielsen|philip@digno.no +@domschl|Dominik Schlösser|dominik.schloesser@gmail.com +@tioumen|Guillaume Hospital| +@arch273|Jacob Ayres +@x4FF3|David Fuellgraf| +@jasonwbarnett|Jason Barnett| +@ecowed|Ed Wade| +@wungad|Rob Man| +@rda0|Sven Mäder|maeder@phys.ethz.ch +@alibo|Ali Borhani|aliborhani1@gmail.com +@Nani-o|Sofiane Medjkoune|sofiane@medjkoune.fr +@n0guest|Evgeniy K.|ask@osshelp.ru +@amichelic|Adalbert Michelic| +@abalabahaha|abalabahaha|hi@abal.moe +@illes|Illes S.| +@plasticrake|Patrick Seal +@jonfairbanks|Jon Fairbanks +@pjz|Paul Jimenez|pj@place.org +@jgrossiord|Julien Grossiord|julien@grossiord.net +@pohzipohzi|Poh Zi How +@vladmovchan|Vladyslav Movchan|vladislav.movchan@gmail.com +@gmosx|George Moschovitis diff --git a/COPYING b/COPYING deleted file mode 100644 index 94a9ed024..000000000 --- a/COPYING +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/ChangeLog b/ChangeLog deleted file mode 100644 index c1980c802..000000000 --- a/ChangeLog +++ /dev/null @@ -1,655 +0,0 @@ -netdata (1.10.0) - 2018-03-27 - - Please check full changelog at github. - https://github.com/firehol/netdata/releases - - -netdata (1.9.0) - 2017-12-17 - - Please check full changelog at github. - https://github.com/firehol/netdata/releases - - -netdata (1.8.0) - 2017-09-17 - - This is mainly a bugfix release. - Please check full changelog at github. - - -netdata (1.7.0) - 2017-07-16 - - * netdata is still spreading fast - - we are at 320.000 users and 132.000 servers - - Almost 100k new users, 52k new installations and 800k docker pulls - since the previous release, 4 and a half months ago. - - netdata user base grows at about 1000 new users and 600 new servers - per day. Thank you. You are awesome. - - * The next release (v1.8) will be focused on providing a global health - monitoring service, for all netdata users, for free. - - * netdata is now a (very fast) fully featured statsd server and the - only one with automatic visualization: push a statsd metric and hit - F5 on the netdata dashboard: your metric visualized. It also supports - synthetic charts, defined by you, so that you can correlate and - visualize your application the way you like it. - - * netdata got new installation options - It is now easier than ever to install netdata - we also distribute a - statically linked netdata x86_64 binary, including key dependencies - (like bash, curl, etc) that can run everywhere a Linux kernel runs - (CoreOS, CirrOS, etc). - - * metrics streaming and replication has been improved significantly. - All known issues have been solved and key enhancements have been added. - Headless collectors and proxies can now send metrics to backends when - data source = as collected. - - * backends have got quite a few enhancements, including host tags and - metrics filtering at the netdata side; - prometheus support has been re-written to utilize more prometheus - features and provide more flexibility and integration options. - - * netdata now monitors ZFS (on Linux and FreeBSD), ElasticSearch, - RabbitMQ, Go applications (via expvar), ipfw (on FreeBSD 11), samba, - squid logs (with web_log plugin). - - * netdata dashboard loading times have been improved significantly - (hit F5 a few times on a netdata dashboard - it is now amazingly fast), - to support dashboards with thousands of charts. - - * netdata alarms now support custom hooks, so you can run whatever you - like in parallel with netdata alarms. - - * As usual, this release brings dozens of more improvements, enhancements - and compatibility fixes. - - -netdata (1.6.0) - 2017-03-20 - - * birthday release: 1 year netdata - - netdata was first published on March 30th, 2016. - It has been a crazy year since then: - - 225.000 unique netdata users - currently, at 1.000 new unique users per day - - 80.000 unique netdata installations - currently, at 500 new installation per day - - 610.000 docker pulls on docker hub - - 4.000.000 netdata sessions served - currently, at 15.000 sessions served per day - - 20.000 github stars - - Thank you! - You are awesome! - - * central netdata is here - - This is the first release that supports real-time streaming of - metrics between netdata servers. - - netdata can now be: - - - autonomous host monitoring - (like it always has been) - - - headless data collector - (collect and stream metrics in real-time to another netdata) - - - headless proxy - (collect metrics from multiple netdata and stream them to another netdata) - - - store and forward proxy - (like headless proxy, but with a local database) - - - central database - (metrics from multiple hosts are aggregated) - - metrics databases can be configured on all nodes and each node maintaining - a database may have a different retention policy and possibly run - (even different) alarms on them. - - * monitoring ephemeral nodes - - netdata now supports monitoring autoscaled ephemeral nodes, - that are started and stopped on demand (their IP is not known). - - When the ephemeral nodes start streaming metrics to the central - netdata, the central netdata will show register them at "my-netdata" - menu on the dashboard. - - For more information check: - https://github.com/firehol/netdata/wiki/monitoring-ephemeral-nodes - - * monitoring ephemeral containers and VM guests - - netdata now cleans up container, guest VM, network interfaces and mounted - disk metrics, disabling automatically their alarms too. - - For more information check: - https://github.com/firehol/netdata/wiki/monitoring-ephemeral-containers - - * apps.plugin ported for FreeBSD - - @vlvkobal has ported "apps.plugin" to FreeBSD. netdata can now provide - "Applications", "Users" and "User Groups" on FreeBSD. - - * web_log plugin - - @l2isbad has done a wonderful job creating a unified web log parsing plugin - for all kinds of web server logs. With it, netdata provides real-time - performance information and health monitoring alarms for web applications - and web sites! - - For more information check: - https://github.com/firehol/netdata/wiki/The-spectacles-of-a-web-server-log-file - - * backends - - netdata can now archive metrics to `JSON` backends - (both push, by @lfdominguez, and pull modes). - - * IPMI monitoring - - netdata now has an IPMI plugin (based on freeipmi) - for monitoring server hardware. - - The plugin creates (up to) 8 charts: - - 1. number of sensors by state - 2. number of events in SEL - 3. Temperatures CELCIUS - 4. Temperatures FAHRENHEIT - 5. Voltages - 6. Currents - 7. Power - 8. Fans - - It also supports alarms (including the number of sensors in critical state). - - For more information, check: - https://github.com/firehol/netdata/wiki/monitoring-IPMI - - * new plugins - - @l2isbad builds python data collection plugins for netdata at an wonderfull - rate! He rocks! - - - **web_log** for monitoring in real-time all kinds of web server log files @l2isbad - - **freeipmi** for monitoring IPMI (server hardware) - - **nsd** (the [name server daemon](https://www.nlnetlabs.nl/projects/nsd/)) @383c57 - - **mongodb** @l2isbad - - **smartd_log** (monitoring disk S.M.A.R.T. values) @l2isbad - - * improved plugins - - - **nfacct** reworked and now collects connection tracker information using netlink. - - **ElasticSearch** re-worked @l2isbad - - **mysql** re-worked to allow faster development of custom mysql based plugins (MySQLService) @l2isbad - - **SNMP** - - **tomcat** @NMcCloud - - **ap** (monitoring hostapd access points) - - **php_fpm** @l2isbad - - **postgres** @l2isbad - - **isc_dhcpd** @l2isbad - - **bind_rndc** @l2isbad - - **numa** - - **apps.plugin** improvements and freebsd support @vlvkobal - - **fail2ban** @l2isbad - - **freeradius** @l2isbad - - **nut** (monitoring UPSes) - - **tc** (Linux QoS) now works on qdiscs instead of classes for the same result (a lot faster) @t-h-e - - **varnish** @l2isbad - - * new and improved alarms - - **web_log**, many alarms to detect common web site/API issues - - **fping**, alarms to detect packet loss, disconnects and unusually high latency - - **cpu**, cpu utilization alarm now ignores `nice` - - * new and improved alarm notification methods - - **HipChat** to allow hosted HipChat @frei-style - - **discordapp** @lowfive - - * dashboard improvements - - dashboard now works on HiDPi screens - - dashboard now shows version of netdata - - dashboard now resets charts properly - - dashboard updated to use latest gauge.js release - - * other improvements - - thanks to @rlefevre netdata now uses a lot of different high resolution system clocks. - - netdata has received a lot more improvements from many more contributors! - - Thank you all! - - -netdata (1.5.0) - 2017-01-22 - - * yet another release that makes netdata the fastest - netdata ever! - - * netdata runs on FreeBSD, FreeNAS and MacOS ! - - Vladimir Kobal (@vlvkobal) has done a magnificent work - porting netdata to FreeBSD and MacOS. - - Everyhing works: cpu, memory, disks performance, disks space, - network interfaces, interrupts, IPv4 metrics, IPv6 metrics - processes, context switches, softnet, IPC queues, - IPC semaphores, IPC shared memory, uptime, etc. Wow! - - * netdata supports data archiving to backend databases: - - - Graphite - - OpenTSDB - - Prometheus - - and of course all the compatible ones - (KairosDB, InfluxDB, Blueflood, etc) - - * new plugins: - - Ilya Mashchenko (@l2isbad) has created most of the python - data collection plugins in this release ! - - - systemd Services (using cgroups!) - - FPing (yes, network latency in netdata!) - - postgres databases @facetoe, @moumoul - - Vanish disk cache (v3 and v4) @l2isbad - - ElasticSearch @l2isbad - - HAproxy @l2isbad - - FreeRadius @l2isbad, @lgz - - mdstat (RAID) @l2isbad - - ISC bind (via rndc) @l2isbad - - ISC dhcpd @l2isbad, @lgz - - Fail2Ban @l2isbad - - OpenVPN status log @l2isbad, @lgz - - NUMA memory @tycho - - CPU Idle @tycho - - gunicorn log @deltaskelta - - ECC memory hardware errors - - IPC semaphores - - uptime plugin (with a nice badge too) - - * improved plugins: - - - netfilter conntrack - - mysql (replication) @l2isbad - - ipfs @pjz - - cpufreq @tycho - - hddtemp @l2isbad - - sensors @l2isbad - - nginx @leolovenet - - nginx_log @paulfantom - - phpfpm @leolovenet - - redis @leolovenet - - dovecot @justohall - - cgroups - - disk space - - apps.plugin - - /proc/interrupts @rlefevre - - /proc/softirqs @rlefevre - - /proc/vmstat (system memory charts) - - /proc/net/snmp6 (IPv6 charts) - - /proc/self/meminfo (system memory charts) - - /proc/net/dev (network interfaces) - - tc (linux QoS) - - * new/improved alarms: - - - MySQL / MariaDB alarms (incl. replication) - - IPFS alarms - - HAproxy alarms - - UDP buffer alarms - - TCP AttemptFails - - ECC memory alarms - - netfilter connections alarms - - SNMP - - * new alarm notifications: - - - messagebird.com @tech-no-logical - - pagerduty.com @jimcooley - - pushbullet.com @tperalta82 - - twilio.com @shadycuz - - HipChat - - kafka - - * shell integration - - - shell scripts can now query netdata easily! - - * dashboard improvements: - - dashboard is now faster on firefox, safari, opera, edge - (edge is still the slowest) - - dashboard now has a little bigger fonts - - SHIFT + mouse wheel to zoom charts, works on all browsers - - perfect-scrollbar on the dashboard - - dashboard 4K resolution fixes - - dashboard compatibility fixes for embedding charts in - third party web sites - - charts on custom dashboards can have common min/max - even if they come from different netdata servers - - alarm log is now saved and loaded back so that - the alarm history is available at the dashboard - - * other improvements: - - python.d.plugin has received way to many improvements - from many contributors! - - charts.d.plugin can now be forked to support - multiple independent instances - - registry has been re-factored to lower its memory - requirements (required for the public registry) - - simple patterns in cgroups, disks and alarms - - netdata-installer.sh can now correctly install - netdata in containers - - supplied logrotate script compatibility fixes - - spec cleanup @breed808 - - clocks and timers reworked @rlefevre - - netdata has received a lot more improvements from many more - contributors! - - Thank you all guys! - - -netdata (1.4.0) - 2016-10-04 - - At a glance: - - - the fastest netdata ever (with a better look too)! - - improved IoT and containers support! - - alarms improved in almost every way! - - - new plugins: - softnet netdev, - extended TCP metrics, - UDPLite - NFS v2, v3 client (server was there already), - NFS v4 server & client, - APCUPSd, - RetroShare - - - improved plugins: - mysql, - cgroups, - hddtemp, - sensors, - phpfm, - tc (QoS) - - In detail: - - * improved alarms - - Many new alarms have been added to detect common kernel - configuration errors and old alarms have been re-worked - to avoid notification floods. - - Alarms now support notification hysteresis (both static - and dynamic), notification self-cancellation, dynamic - thresholds based on current alarm status - - * improved alarm notifications - - netdata now supports: - - - email notifications - - slack.com notifications on slack channels - - pushover.net notifications (mobile push notifications) - - telegram.org notifications - - For all the above methods, netdata supports role-based - notifications, with multiple recipients for each role - and severity filtering per recipient! - - Also, netdata support HTML5 notifications, while the - dashboard is open in a browser window (no need to be - the active one). - - All notifications are now clickable to get to the chart - that raised the alarm. - - * improved IoT support! - - netdata builds and runs with musl libc and runs on systems - based on busybox. - - * improved containers support! - - netdata runs on alpine linux (a low profile linux distribution - used in containers). - - * Dozens of other improvements and bugfixes - - -netdata (1.3.0) - 2016-08-28 - - At a glance: - - - netdata has health monitoring / alarms! - - netdata has badges that can be embeded anywhere! - - netdata plugins are now written in Python! - - new plugins: redis, memcached, nginx_log, ipfs, apache_cache - - IMPORTANT: - Since netdata now uses Python plugins, new packages are - required to be installed on a system to allow it work. - For more information, please check the installation page: - - https://github.com/firehol/netdata/wiki/Installation - - In detail: - - * netdata has alarms! - - Based on the POLL we made on github - (https://github.com/firehol/netdata/issues/436), - health monitoring was the winner. So here it is! - - netdata now has a poweful health monitoring system embedded. - Please check the wiki page: - - https://github.com/firehol/netdata/wiki/health-monitoring - - * netdata has badges! - - netdata can generate badges with live information from the - collected metrics. - Please check the wiki page: - - https://github.com/firehol/netdata/wiki/Generating-Badges - - * netdata plugins are now written in Python! - - Thanks to the great work of Paweł Krupa (@paulfantom), most BASH - plugins have been ported to Python. - - The new python.d.plugin supports both python2 and python3 and - data collection from multiple sources for all modules. - - The following pre-existing modules have been ported to Python: - - - apache - - cpufreq - - example - - exim - - hddtemp - - mysql - - nginx - - phpfm - - postfix - - sensors - - squid - - tomcat - - The following new modules have been added: - - - apache_cache - - dovecot - - ipfs - - memcached - - nginx_log - - redis - - * other data collectors: - - - Thanks to @simonnagl netdata now reports disk space usage. - - * dashboards now transfer a certain settings from server to server - when changing servers via the my-netdata menu. - - The settings transferred are the dashboard theme, the online - help status and current pan and zoom timeframe of the dashboard. - - * API improvements: - - - reduction functions now support 'min', 'sum' and 'incremental-sum'. - - - netdata now offers a multi-threaded and a single threaded - web server (single threaded is better for IoT). - - * apps.plugin improvements: - - - can now run with command line argument 'without-files' - to prevent it from enumating all the open files/sockets/pipes - of all running processes. - - - apps.plugin now scales the collected values to match the - the total system usage. - - - apps.plugin can now report guest CPU usage per process. - - - repeating errors are now logged once per process. - - * netdata now runs with IDLE process priority (lower than nice 19) - - * netdata now instructs the kernel to kill it first when it starves - for memory. - - * netdata listens for signals: - - - SIGHUP to netdata instructs it to re-open its log files - (new logrotate files added too). - - - SIGUSR1 to netdata saves the database - - - SIGUSR2 to netdata reloads health / alarms configuration - - * netdata can now bind to multiple IPs and ports. - - * netdata now has new systemd service file (it starts as user - netdata and does not fork). - - * Dozens of other improvements and bugfixes - - -netdata (1.2.0) - 2016-05-16 - - At a glance: - - - netdata is now 30% faster - - netdata now has a registry (my-netdata dashboard menu) - - netdata now monitors Linux Containers (docker, lxc, etc) - - IMPORTANT: - This version requires libuuid. The package you need is: - - - uuid-dev (debian/ubuntu), or - - libuuid-devel (centos/fedora/redhat) - - In detail: - - * netdata is now 30% faster ! - - - Patches submitted by @fredericopissarra improved overall - netdata performance by 10%. - - - A new improved search function in the internal indexes - made all searches faster by 50%, resulting in about - 20% better performance for the core of netdata. - - - More efficient threads locking in key components - contributed to the overal efficiency. - - * netdata now has a CENTRAL REGISTRY ! - - The central registry tracks all your netdata servers - and bookmarks them for you at the 'my-netdata' menu - on all dashboards. - - Every netdata can act as a registry, but there is also - a global registry provided for free for all netdata users! - - * netdata now monitors CONTAINERS ! - - docker, lxc, or anything else. For each container it monitors - CPU, RAM, DISK I/O (network interfaces were already monitored) - - * apps.plugin: now uses linux capabilities by default - without setuid to root - - * netdata has now an improved signal handler - thanks to @simonnagl - - * API: new improved CORS support - - * SNMP: counter64 support fixed - - * MYSQL: more charts, about QCache, MyISAM key cache, - InnoDB buffer pools, open files - - * DISK charts now show mount point when available - - * Dashboard: improved support for older web browsers - and mobile web browsers (thanks to @simonnagl) - - * Multi-server dashboards now allow de-coupled refreshes for - each chart, so that if one netdata has a network latency - the other charts are not affected - - * Several other minor improvements and bugfixes - - -netdata (1.1.0) - 2016-04-20 - - Dozens of commits that improve netdata in several ways: - - - Data collection: added IPv6 monitoring - - Data collection: added SYNPROXY DDoS protection monitoring - - Data collection: apps.plugin: added charts for users and user groups - - Data collection: apps.plugin: grouping of processes now support patterns - - Data collection: apps.plugin: now it is faster, after the new features added - - Data collection: better auto-detection of partitions for disk monitoring - - Data collection: better fireqos intergation for QoS monitoring - - Data collection: squid monitoring now uses squidclient - - Data collection: SNMP monitoring now supports 64bit counters - - API: fixed issues in CSV output generation - - API: netdata can now be restricted to listen on a specific IP - - Core and apps.plugin: error log flood protection - - Dashboard: better error handling when the netdata server is unreachable - - Dashboard: each chart now has a toolbox - - Dashboard: on-line help support - - Dashboard: check for netdata updates button - - Dashboard: added example /tv.html dashboard - - Packaging: now compiles with musl libc (alpine linux) - - Packaging: added debian packaging - - Packaging: support non-root installations - - Packaging: the installer generates uninstall script - -netdata (1.0.0) - 2016-03-22 - - - first public release - -netdata (1.0.0-rc.1) - 2015-11-28 - - - initial packaging diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index cb1f75cb9..000000000 --- a/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -# author : titpetric -# original: https://github.com/titpetric/netdata - -FROM debian:stretch - -ADD . /netdata.git - -RUN cd ./netdata.git && chmod +x ./docker-build.sh && sync && sleep 1 && ./docker-build.sh - -WORKDIR / - -ENV NETDATA_PORT 19999 -EXPOSE $NETDATA_PORT - -CMD /usr/sbin/netdata -D -s /host -p ${NETDATA_PORT} diff --git a/Dockerfile.aarch64 b/Dockerfile.aarch64 deleted file mode 100644 index 544269876..000000000 --- a/Dockerfile.aarch64 +++ /dev/null @@ -1,19 +0,0 @@ -# author : titpetric -# original: https://github.com/titpetric/netdata - -FROM resin/aarch64-debian:stretch - -RUN [ "cross-build-start"] - -ADD . /netdata.git - -RUN cd ./netdata.git && chmod +x ./docker-build.sh && sync && sleep 1 && ./docker-build.sh - -WORKDIR / - -ENV NETDATA_PORT 19999 -EXPOSE $NETDATA_PORT - -CMD /usr/sbin/netdata -D -s /host -p ${NETDATA_PORT} - -RUN [ "cross-build-end"] diff --git a/Dockerfile.alpine b/Dockerfile.alpine deleted file mode 100644 index bd958116c..000000000 --- a/Dockerfile.alpine +++ /dev/null @@ -1,58 +0,0 @@ -FROM alpine:edge as builder - -# Install prerequisites -RUN apk --no-cache add alpine-sdk autoconf automake libmnl-dev build-base jq \ - lm_sensors nodejs pkgconfig py-mysqldb python libuuid \ - py-psycopg2 py-yaml util-linux-dev zlib-dev curl bash \ - netcat-openbsd - -# Copy source -COPY . /opt/netdata.git -WORKDIR /opt/netdata.git - -# Install source -RUN chmod +x ./netdata-installer.sh && \ - sync && sleep 1 && \ - ./netdata-installer.sh --dont-wait --dont-start-it - -################################################################################ -FROM alpine:edge - -# Reinstall some prerequisites -RUN apk --no-cache add lm_sensors nodejs libuuid python py-mysqldb \ - py-psycopg2 py-yaml netcat-openbsd jq curl fping - -# Copy files over -COPY --from=builder /usr/share/netdata /usr/share/netdata -COPY --from=builder /usr/libexec/netdata /usr/libexec/netdata -COPY --from=builder /var/cache/netdata /var/cache/netdata -COPY --from=builder /var/lib/netdata /var/lib/netdata -COPY --from=builder /usr/sbin/netdata /usr/sbin/netdata -COPY --from=builder /etc/netdata /etc/netdata - -ARG NETDATA_UID=101 -ARG NETDATA_GID=101 - -RUN \ - # fping from alpine apk is on a different location. Moving it. - mv /usr/sbin/fping /usr/local/bin/fping && \ - chmod 4755 /usr/local/bin/fping && \ - mkdir -p /var/log/netdata && \ - # Add netdata user - addgroup -g ${NETDATA_GID} -S netdata && \ - adduser -S -H -s /bin/sh -u ${NETDATA_GID} -h /etc/netdata -G netdata netdata && \ - # Apply the permissions as described in - # https://github.com/firehol/netdata/wiki/netdata-security#netdata-directories - chown -R root:netdata /etc/netdata && \ - chown -R netdata:netdata /var/cache/netdata /var/lib/netdata /usr/share/netdata && \ - chown root:netdata /usr/libexec/netdata/plugins.d/apps.plugin /usr/libexec/netdata/plugins.d/cgroup-network && \ - chmod 4750 /usr/libexec/netdata/plugins.d/cgroup-network /usr/libexec/netdata/plugins.d/apps.plugin && \ - chmod 0750 /var/lib/netdata /var/cache/netdata && \ - # Link log files to stdout - ln -sf /dev/stdout /var/log/netdata/access.log && \ - ln -sf /dev/stdout /var/log/netdata/debug.log && \ - ln -sf /dev/stderr /var/log/netdata/error.log - -EXPOSE 19999 - -CMD [ "/usr/sbin/netdata" , "-D", "-s", "/host", "-p", "19999"] diff --git a/Dockerfile.armv7hf b/Dockerfile.armv7hf deleted file mode 100644 index 278e45424..000000000 --- a/Dockerfile.armv7hf +++ /dev/null @@ -1,19 +0,0 @@ -# author : titpetric -# original: https://github.com/titpetric/netdata - -FROM resin/armv7hf-debian:stretch - -RUN [ "cross-build-start"] - -ADD . /netdata.git - -RUN cd ./netdata.git && chmod +x ./docker-build.sh && sync && sleep 1 && ./docker-build.sh - -WORKDIR / - -ENV NETDATA_PORT 19999 -EXPOSE $NETDATA_PORT - -CMD /usr/sbin/netdata -D -s /host -p ${NETDATA_PORT} - -RUN [ "cross-build-end"] diff --git a/LICENSE b/LICENSE index 9cecc1d46..1351f44ec 100644 --- a/LICENSE +++ b/LICENSE @@ -672,3 +672,13 @@ may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . + +--------------------------------------------------------------------------- + +Note: +Individual files contain the following tag instead of the full license text. + + SPDX-License-Identifier: GPL-3.0-or-later + +This enables machine processing of license information based on the SPDX +License Identifiers that are here available: http://spdx.org/licenses/ diff --git a/LICENSE-REDISTRIBUTED.md b/LICENSE-REDISTRIBUTED.md deleted file mode 100644 index caf411b62..000000000 --- a/LICENSE-REDISTRIBUTED.md +++ /dev/null @@ -1,180 +0,0 @@ -# Netdata - -Copyright 2016-2017, Costa Tsaousis. -Released under [GPL v3 or later](http://www.gnu.org/licenses/gpl-3.0.en.html). - ---- - -## Re-distributed software - -Netdata re-distributes the following third party software. -We decided to re-distribute all these, instead of using them -through a CDN, to allow netdata work in cases where internet -connectivity is not available. - - -- [Dygraphs](http://dygraphs.com/) - - Copyright 2009, Dan Vanderkam - [MIT License](http://dygraphs.com/legal.html) - - -- [jQuery Sparklines](http://omnipotent.net/jquery.sparkline/) - - Copyright 2009-2012, Splunk Inc. - [New BSD License](http://opensource.org/licenses/BSD-3-Clause) - - -- [Peity](http://benpickles.github.io/peity/) - - Copyright 2009-2015, Ben Pickles - [MIT License](https://github.com/benpickles/peity/blob/master/MIT-LICENCE) - - -- [Easy Pie Chart](https://rendro.github.io/easy-pie-chart/) - - Copyright 2013, Robert Fleischmann - [MIT License](https://github.com/rendro/easy-pie-chart/blob/master/LICENSE) - - -- [Guage.js](http://bernii.github.io/gauge.js/) - - Copyright, Bernard Kobos - [MIT License](http://bernii.github.io/gauge.js/) - - -- [jQuery](https://jquery.org/) - - Copyright 2015, jQuery Foundation - [MIT License](https://jquery.org/license/) - - -- [Bootstrap](http://getbootstrap.com/getting-started/) - - Copyright 2015, Twitter - [MIT License](http://getbootstrap.com/getting-started/#license-faqs) - -- [Bootstrap-slider](http://seiyria.com/bootstrap-slider/) - - Copyright 2017 Kyle Kemp, Rohit Kalkur, and contributors - [MIT License](https://github.com/seiyria/bootstrap-slider/blob/master/LICENSE.md) - -- [Bootstrap Toggle](http://www.bootstraptoggle.com/) - - Copyright (c) 2011-2014 Min Hur, The New York Times Company - [MIT License](https://github.com/minhur/bootstrap-toggle/blob/master/LICENSE) - - -- [bootstrap-table](http://bootstrap-table.wenzhixin.net.cn/) - - Copyright (c) 2012-2016 Zhixin Wen - [MIT License](https://github.com/wenzhixin/bootstrap-table/blob/master/LICENSE) - - -- [tableExport.jquery.plugin](https://github.com/hhurz/tableExport.jquery.plugin) - - Copyright (c) 2015,2016 hhurz - [MIT License](http://rawgit.com/hhurz/tableExport.jquery.plugin/master/tableExport.js) - - -- [perfect-scrollbar](https://jamesflorentino.github.io/nanoScrollerJS/) - - Copyright 2016, Hyunje Alex Jun and other contributors - [MIT License](https://github.com/noraesae/perfect-scrollbar/blob/master/LICENSE) - - -- [FontAwesome](https://fortawesome.github.io/Font-Awesome/) - - Created by Dave Gandy - Font license: [SIL OFL 1.1](http://scripts.sil.org/OFL) - CSS license: [MIT License](http://opensource.org/licenses/mit-license.html) - - -- [IconsDB.com Icons](http://www.iconsdb.com/soylent-red-icons/seo-performance-icon.html) - - Icons provided as CC0 1.0 Universal (CC0 1.0) Public Domain Dedication - - -- [morris.js](http://morrisjs.github.io/morris.js/) - - Copyright 2013, Olly Smith - [Simplified BSD License](http://morrisjs.github.io/morris.js/) - - -- [Raphaël](http://raphaeljs.com/) - - Copyright 2008, Dmitry Baranovskiy - [MIT License](http://raphaeljs.com/license.html) - - -- [C3](http://c3js.org/) - - Copyright 2013, Masayuki Tanaka - [MIT License](https://github.com/masayuki0812/c3/blob/master/LICENSE) - - -- [D3](http://d3js.org/) - - Copyright 2015, Mike Bostock - [BSD License](http://opensource.org/licenses/BSD-3-Clause) - - -- [node-extend](https://github.com/justmoon/node-extend) - - Copyright 2014, Stefan Thomas - [MIT License](https://github.com/justmoon/node-extend/blob/master/LICENSE) - - -- [node-net-snmp](https://github.com/stephenwvickers/node-net-snmp) - - Copyright 2013, Stephen Vickers - [MIT License](https://github.com/stephenwvickers/node-net-snmp) - - -- [node-asn1-ber](https://github.com/stephenwvickers/node-asn1-ber) - - Copyright 2017, Stephen Vickers - Copyright 2011, Mark Cavage - [MIT License](https://github.com/stephenwvickers/node-asn1-ber) - - -- [pixl-xml](https://github.com/jhuckaby/pixl-xml) - - Copyright 2015, Joseph Huckaby - [MIT License](https://github.com/jhuckaby/pixl-xml) - -- [sensors](https://github.com/paroj/sensors.py) - - Copyright 2014, Pavel Rojtberg - [LGPL 2.1 License](http://opensource.org/licenses/LGPL-2.1) - -- [PyYAML](https://bitbucket.org/blackjack/pysensors) - - Copyright 2006, Kirill Simonov - [MIT License](http://pyyaml.org) - -- [urllib3](https://github.com/shazow/urllib3) - - Copyright 2008-2016 Andrey Petrov and [contributors](https://github.com/shazow/urllib3/blob/master/CONTRIBUTORS.txt) - [MIT License](https://github.com/shazow/urllib3/blob/master/LICENSE.txt) - -- [lz-string](http://pieroxy.net/blog/pages/lz-string/index.html) - - Copyright 2013 Pieroxy - [WTFPL License](http://pieroxy.net/blog/pages/lz-string/index.html#inline_menu_10) - -- [pako](http://nodeca.github.io/pako/) - - Copyright 2014-2017 Vitaly Puzrin and Andrei Tuputcyn - [MIT License](https://github.com/nodeca/pako/blob/master/LICENSE) - -- [clipboard-polyfill](https://github.com/lgarron/clipboard-polyfill) - - Copyright (c) 2014 Lucas Garron - [MIT License](https://github.com/lgarron/clipboard-polyfill/blob/master/LICENSE.md) - -- [d3pie](https://github.com/benkeen/d3pie) - - Copyright (c) 2014-2015 Benjamin Keen - [MIT License](https://github.com/benkeen/d3pie/blob/master/LICENSE) - \ No newline at end of file diff --git a/LICENSE.md b/LICENSE.md deleted file mode 100644 index 37a09f485..000000000 --- a/LICENSE.md +++ /dev/null @@ -1,9 +0,0 @@ -**netdata**
-(C) Copyright 2017
-Costa Tsaousis <costa@tsaousis.gr> - -For license details refer to the following files: - -- [netdata license](LICENSE) (GPL v3+) -- [third party licenses](LICENSE-REDISTRIBUTED.md), for packages re-distributed with netdata - diff --git a/Makefile.am b/Makefile.am index f20cfc3da..c7fa48c6b 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,8 +1,7 @@ -# -# Copyright (C) 2015 Alon Bar-Lev -# -AUTOMAKE_OPTIONS=foreign dist-bzip2 dist-xz 1.10 -ACLOCAL_AMFLAGS = -I m4 +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS=foreign subdir-objects 1.10 +ACLOCAL_AMFLAGS = -I build/m4 MAINTAINERCLEANFILES= \ config.log config.status \ @@ -23,39 +22,34 @@ EXTRA_DIST = \ .csslintrc \ .eslintignore \ .eslintrc \ + .lgtm.yml \ .travis \ - m4/jemalloc.m4 \ - m4/ax_c___atomic.m4 \ - m4/ax_check_enable_debug.m4 \ - m4/ax_c_mallinfo.m4 \ - m4/ax_gcc_func_attribute.m4 \ - m4/ax_check_compile_flag.m4 \ - m4/ax_c_statement_expressions.m4 \ - m4/ax_pthread.m4 \ - m4/ax_c_lto.m4 \ - m4/ax_c_mallopt.m4 \ - m4/tcmalloc.m4 \ - m4/ax_c__generic.m4 \ - autogen.sh \ + .github/CODEOWNERS \ + build/build.sh \ + build/Dockerfile \ + build/m4/jemalloc.m4 \ + build/m4/ax_c___atomic.m4 \ + build/m4/ax_check_enable_debug.m4 \ + build/m4/ax_c_mallinfo.m4 \ + build/m4/ax_gcc_func_attribute.m4 \ + build/m4/ax_check_compile_flag.m4 \ + build/m4/ax_c_statement_expressions.m4 \ + build/m4/ax_pthread.m4 \ + build/m4/ax_c_lto.m4 \ + build/m4/ax_c_mallopt.m4 \ + build/m4/tcmalloc.m4 \ + build/m4/ax_c__generic.m4 \ README.md \ + CONTRIBUTORS.md \ + CODE_OF_CONDUCT.md \ LICENSE \ - LICENSE.md \ - LICENSE-REDISTRIBUTED.md \ - COPYING \ - autogen.sh \ + REDISTRIBUTED.md \ $(NULL) SUBDIRS = \ - charts.d \ - conf.d \ diagrams \ makeself \ - node.d \ - plugins.d \ - python.d \ - src \ system \ - web \ contrib \ tests \ $(NULL) @@ -63,10 +57,7 @@ SUBDIRS = \ dist_noinst_DATA= \ cppcheck.sh \ configs.signatures \ - Dockerfile \ - Dockerfile.alpine \ - Dockerfile.aarch64 \ - Dockerfile.armv7hf \ + docker \ netdata.cppcheck \ netdata.spec \ package.json \ @@ -76,9 +67,415 @@ dist_noinst_DATA= \ # should be proper init.d/openrc/systemd usable dist_noinst_SCRIPTS= \ coverity-scan.sh \ - docker-build.sh \ kickstart.sh \ kickstart-static64.sh \ netdata-installer.sh \ installer/functions.sh \ $(NULL) + +# ----------------------------------------------------------------------------- +# Compile netdata binaries + +SUBDIRS += \ + backends \ + collectors \ + daemon \ + database \ + health \ + libnetdata \ + registry \ + streaming \ + web \ + $(NULL) + + +AM_CFLAGS = \ + $(OPTIONAL_MATH_CFLAGS) \ + $(OPTIONAL_NFACCT_CLFAGS) \ + $(OPTIONAL_ZLIB_CFLAGS) \ + $(OPTIONAL_UUID_CFLAGS) \ + $(OPTIONAL_LIBCAP_LIBS) \ + $(OPTIONAL_IPMIMONITORING_CFLAGS) \ + $(NULL) + +sbin_PROGRAMS = +dist_cache_DATA = installer/.keep +dist_varlib_DATA = installer/.keep +dist_registry_DATA = installer/.keep +dist_log_DATA = installer/.keep +plugins_PROGRAMS = + +LIBNETDATA_FILES = \ + libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \ + libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \ + libnetdata/config/appconfig.c \ + libnetdata/config/appconfig.h \ + libnetdata/avl/avl.c \ + libnetdata/avl/avl.h \ + libnetdata/buffer/buffer.c \ + libnetdata/buffer/buffer.h \ + libnetdata/clocks/clocks.c \ + libnetdata/clocks/clocks.h \ + libnetdata/dictionary/dictionary.c \ + libnetdata/dictionary/dictionary.h \ + libnetdata/eval/eval.c \ + libnetdata/eval/eval.h \ + libnetdata/inlined.h \ + libnetdata/libnetdata.c \ + libnetdata/libnetdata.h \ + libnetdata/locks/locks.c \ + libnetdata/locks/locks.h \ + libnetdata/log/log.c \ + libnetdata/log/log.h \ + libnetdata/popen/popen.c \ + libnetdata/popen/popen.h \ + libnetdata/procfile/procfile.c \ + libnetdata/procfile/procfile.h \ + libnetdata/os.c \ + libnetdata/os.h \ + libnetdata/simple_pattern/simple_pattern.c \ + libnetdata/simple_pattern/simple_pattern.h \ + libnetdata/socket/socket.c \ + libnetdata/socket/socket.h \ + libnetdata/statistical/statistical.c \ + libnetdata/statistical/statistical.h \ + libnetdata/storage_number/storage_number.c \ + libnetdata/storage_number/storage_number.h \ + libnetdata/threads/threads.c \ + libnetdata/threads/threads.h \ + libnetdata/url/url.c \ + libnetdata/url/url.h \ + $(NULL) + +APPS_PLUGIN_FILES = \ + collectors/apps.plugin/apps_plugin.c \ + $(LIBNETDATA_FILES) \ + $(NULL) + +CHECKS_PLUGIN_FILES = \ + collectors/checks.plugin/plugin_checks.c \ + collectors/checks.plugin/plugin_checks.h \ + $(NULL) + +FREEBSD_PLUGIN_FILES = \ + collectors/freebsd.plugin/plugin_freebsd.c \ + collectors/freebsd.plugin/plugin_freebsd.h \ + collectors/freebsd.plugin/freebsd_sysctl.c \ + collectors/freebsd.plugin/freebsd_getmntinfo.c \ + collectors/freebsd.plugin/freebsd_getifaddrs.c \ + collectors/freebsd.plugin/freebsd_devstat.c \ + collectors/freebsd.plugin/freebsd_kstat_zfs.c \ + collectors/freebsd.plugin/freebsd_ipfw.c \ + collectors/proc.plugin/zfs_common.c \ + collectors/proc.plugin/zfs_common.h \ + $(NULL) + +HEALTH_PLUGIN_FILES = \ + health/health.c \ + health/health.h \ + health/health_config.c \ + health/health_json.c \ + health/health_log.c \ + $(NULL) + +IDLEJITTER_PLUGIN_FILES = \ + collectors/idlejitter.plugin/plugin_idlejitter.c \ + collectors/idlejitter.plugin/plugin_idlejitter.h \ + $(NULL) + +CGROUPS_PLUGIN_FILES = \ + collectors/cgroups.plugin/sys_fs_cgroup.c \ + collectors/cgroups.plugin/sys_fs_cgroup.h \ + $(NULL) + +CGROUP_NETWORK_FILES = \ + collectors/cgroups.plugin/cgroup-network.c \ + $(LIBNETDATA_FILES) \ + $(NULL) + +DISKSPACE_PLUGIN_FILES = \ + collectors/diskspace.plugin/plugin_diskspace.h \ + collectors/diskspace.plugin/plugin_diskspace.c \ + $(NULL) + +FREEIPMI_PLUGIN_FILES = \ + collectors/freeipmi.plugin/freeipmi_plugin.c \ + $(LIBNETDATA_FILES) \ + $(NULL) + +NFACCT_PLUGIN_FILES = \ + collectors/nfacct.plugin/plugin_nfacct.c \ + collectors/nfacct.plugin/plugin_nfacct.h \ + $(NULL) + +PROC_PLUGIN_FILES = \ + collectors/proc.plugin/ipc.c \ + collectors/proc.plugin/plugin_proc.c \ + collectors/proc.plugin/plugin_proc.h \ + collectors/proc.plugin/proc_diskstats.c \ + collectors/proc.plugin/proc_interrupts.c \ + collectors/proc.plugin/proc_softirqs.c \ + collectors/proc.plugin/proc_loadavg.c \ + collectors/proc.plugin/proc_meminfo.c \ + collectors/proc.plugin/proc_net_dev.c \ + collectors/proc.plugin/proc_net_ip_vs_stats.c \ + collectors/proc.plugin/proc_net_netstat.c \ + collectors/proc.plugin/proc_net_rpc_nfs.c \ + collectors/proc.plugin/proc_net_rpc_nfsd.c \ + collectors/proc.plugin/proc_net_snmp.c \ + collectors/proc.plugin/proc_net_snmp6.c \ + collectors/proc.plugin/proc_net_sctp_snmp.c \ + collectors/proc.plugin/proc_net_sockstat.c \ + collectors/proc.plugin/proc_net_sockstat6.c \ + collectors/proc.plugin/proc_net_softnet_stat.c \ + collectors/proc.plugin/proc_net_stat_conntrack.c \ + collectors/proc.plugin/proc_net_stat_synproxy.c \ + collectors/proc.plugin/proc_self_mountinfo.c \ + collectors/proc.plugin/proc_self_mountinfo.h \ + collectors/proc.plugin/zfs_common.c \ + collectors/proc.plugin/zfs_common.h \ + collectors/proc.plugin/proc_spl_kstat_zfs.c \ + collectors/proc.plugin/proc_stat.c \ + collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c \ + collectors/proc.plugin/proc_vmstat.c \ + collectors/proc.plugin/proc_uptime.c \ + collectors/proc.plugin/sys_kernel_mm_ksm.c \ + collectors/proc.plugin/sys_devices_system_edac_mc.c \ + collectors/proc.plugin/sys_devices_system_node.c \ + collectors/proc.plugin/sys_fs_btrfs.c \ + $(NULL) + +TC_PLUGIN_FILES = \ + collectors/tc.plugin/plugin_tc.c \ + collectors/tc.plugin/plugin_tc.h \ + $(NULL) + +MACOS_PLUGIN_FILES = \ + collectors/macos.plugin/plugin_macos.c \ + collectors/macos.plugin/plugin_macos.h \ + collectors/macos.plugin/macos_sysctl.c \ + collectors/macos.plugin/macos_mach_smi.c \ + collectors/macos.plugin/macos_fw.c \ + $(NULL) + +PLUGINSD_PLUGIN_FILES = \ + collectors/plugins.d/plugins_d.c \ + collectors/plugins.d/plugins_d.h \ + $(NULL) + +RRD_PLUGIN_FILES = \ + database/rrdcalc.c \ + database/rrdcalc.h \ + database/rrdcalctemplate.c \ + database/rrdcalctemplate.h \ + database/rrddim.c \ + database/rrddimvar.c \ + database/rrddimvar.h \ + database/rrdfamily.c \ + database/rrdhost.c \ + database/rrd.c \ + database/rrd.h \ + database/rrdset.c \ + database/rrdsetvar.c \ + database/rrdsetvar.h \ + database/rrdvar.c \ + database/rrdvar.h \ + $(NULL) + +API_PLUGIN_FILES = \ + web/api/badges/web_buffer_svg.c \ + web/api/badges/web_buffer_svg.h \ + web/api/exporters/allmetrics.c \ + web/api/exporters/allmetrics.h \ + web/api/exporters/shell/allmetrics_shell.c \ + web/api/exporters/shell/allmetrics_shell.h \ + web/api/queries/average/average.c \ + web/api/queries/average/average.h \ + web/api/queries/des/des.c \ + web/api/queries/des/des.h \ + web/api/queries/incremental_sum/incremental_sum.c \ + web/api/queries/incremental_sum/incremental_sum.h \ + web/api/queries/max/max.c \ + web/api/queries/max/max.h \ + web/api/queries/median/median.c \ + web/api/queries/median/median.h \ + web/api/queries/min/min.c \ + web/api/queries/min/min.h \ + web/api/queries/query.c \ + web/api/queries/query.h \ + web/api/queries/rrdr.c \ + web/api/queries/rrdr.h \ + web/api/queries/ses/ses.c \ + web/api/queries/ses/ses.h \ + web/api/queries/stddev/stddev.c \ + web/api/queries/stddev/stddev.h \ + web/api/queries/sum/sum.c \ + web/api/queries/sum/sum.h \ + web/api/formatters/rrd2json.c \ + web/api/formatters/rrd2json.h \ + web/api/formatters/csv/csv.c \ + web/api/formatters/csv/csv.h \ + web/api/formatters/json/json.c \ + web/api/formatters/json/json.h \ + web/api/formatters/ssv/ssv.c \ + web/api/formatters/ssv/ssv.h \ + web/api/formatters/value/value.c \ + web/api/formatters/value/value.h \ + web/api/formatters/json_wrapper.c \ + web/api/formatters/json_wrapper.h \ + web/api/formatters/charts2json.c \ + web/api/formatters/charts2json.h \ + web/api/formatters/rrdset2json.c \ + web/api/formatters/rrdset2json.h \ + web/api/web_api_v1.c \ + web/api/web_api_v1.h \ + $(NULL) + +STREAMING_PLUGIN_FILES = \ + streaming/rrdpush.c \ + streaming/rrdpush.h \ + $(NULL) + +REGISTRY_PLUGIN_FILES = \ + registry/registry.c \ + registry/registry.h \ + registry/registry_db.c \ + registry/registry_init.c \ + registry/registry_internals.c \ + registry/registry_internals.h \ + registry/registry_log.c \ + registry/registry_machine.c \ + registry/registry_machine.h \ + registry/registry_person.c \ + registry/registry_person.h \ + registry/registry_url.c \ + registry/registry_url.h \ + $(NULL) + +STATSD_PLUGIN_FILES = \ + collectors/statsd.plugin/statsd.c \ + collectors/statsd.plugin/statsd.h \ + $(NULL) + +WEB_PLUGIN_FILES = \ + web/server/web_client.c \ + web/server/web_client.h \ + web/server/web_server.c \ + web/server/web_server.h \ + web/server/web_client_cache.c \ + web/server/web_client_cache.h \ + web/server/single/single-threaded.c \ + web/server/single/single-threaded.h \ + web/server/multi/multi-threaded.c \ + web/server/multi/multi-threaded.h \ + web/server/static/static-threaded.c \ + web/server/static/static-threaded.h \ + $(NULL) + +BACKENDS_PLUGIN_FILES = \ + backends/backends.c \ + backends/backends.h \ + backends/graphite/graphite.c \ + backends/graphite/graphite.h \ + backends/json/json.c \ + backends/json/json.h \ + backends/opentsdb/opentsdb.c \ + backends/opentsdb/opentsdb.h \ + backends/prometheus/backend_prometheus.c \ + backends/prometheus/backend_prometheus.h \ + $(NULL) + +DAEMON_FILES = \ + daemon/common.c \ + daemon/common.h \ + daemon/daemon.c \ + daemon/daemon.h \ + daemon/global_statistics.c \ + daemon/global_statistics.h \ + daemon/main.c \ + daemon/main.h \ + daemon/signals.c \ + daemon/signals.h \ + daemon/unit_test.c \ + daemon/unit_test.h \ + $(NULL) + +NETDATA_FILES = \ + collectors/all.h \ + $(DAEMON_FILES) \ + $(LIBNETDATA_FILES) \ + $(API_PLUGIN_FILES) \ + $(BACKENDS_PLUGIN_FILES) \ + $(CHECKS_PLUGIN_FILES) \ + $(HEALTH_PLUGIN_FILES) \ + $(IDLEJITTER_PLUGIN_FILES) \ + $(PLUGINSD_PLUGIN_FILES) \ + $(REGISTRY_PLUGIN_FILES) \ + $(RRD_PLUGIN_FILES) \ + $(STREAMING_PLUGIN_FILES) \ + $(STATSD_PLUGIN_FILES) \ + $(WEB_PLUGIN_FILES) \ + $(NULL) + +if FREEBSD + NETDATA_FILES += \ + $(FREEBSD_PLUGIN_FILES) \ + $(NULL) +endif + +if MACOS + NETDATA_FILES += \ + $(MACOS_PLUGIN_FILES) \ + $(NULL) +endif + +if LINUX + NETDATA_FILES += \ + $(CGROUPS_PLUGIN_FILES) \ + $(DISKSPACE_PLUGIN_FILES) \ + $(NFACCT_PLUGIN_FILES) \ + $(PROC_PLUGIN_FILES) \ + $(TC_PLUGIN_FILES) \ + $(NULL) + +endif + +NETDATA_COMMON_LIBS = \ + $(OPTIONAL_MATH_LIBS) \ + $(OPTIONAL_ZLIB_LIBS) \ + $(OPTIONAL_UUID_LIBS) \ + $(NULL) + + +sbin_PROGRAMS += netdata +netdata_SOURCES = $(NETDATA_FILES) +netdata_LDADD = \ + $(NETDATA_COMMON_LIBS) \ + $(OPTIONAL_NFACCT_LIBS) \ + $(NULL) + +if ENABLE_PLUGIN_APPS + plugins_PROGRAMS += apps.plugin + apps_plugin_SOURCES = $(APPS_PLUGIN_FILES) + apps_plugin_LDADD = \ + $(NETDATA_COMMON_LIBS) \ + $(OPTIONAL_LIBCAP_LIBS) \ + $(NULL) +endif + +if ENABLE_PLUGIN_CGROUP_NETWORK + plugins_PROGRAMS += cgroup-network + cgroup_network_SOURCES = $(CGROUP_NETWORK_FILES) + cgroup_network_LDADD = \ + $(NETDATA_COMMON_LIBS) \ + $(NULL) +endif + +if ENABLE_PLUGIN_FREEIPMI + plugins_PROGRAMS += freeipmi.plugin + freeipmi_plugin_SOURCES = $(FREEIPMI_PLUGIN_FILES) + freeipmi_plugin_LDADD = \ + $(NETDATA_COMMON_LIBS) \ + $(OPTIONAL_IPMIMONITORING_LIBS) \ + $(NULL) +endif diff --git a/Makefile.in b/Makefile.in index 1bbf19aad..9b1c226b8 100644 --- a/Makefile.in +++ b/Makefile.in @@ -14,6 +14,9 @@ @SET_MAKE@ +# SPDX-License-Identifier: GPL-3.0-or-later + + VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' @@ -79,21 +82,46 @@ PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ +sbin_PROGRAMS = netdata$(EXEEXT) +plugins_PROGRAMS = $(am__EXEEXT_1) $(am__EXEEXT_2) $(am__EXEEXT_3) +@FREEBSD_TRUE@am__append_1 = \ +@FREEBSD_TRUE@ $(FREEBSD_PLUGIN_FILES) \ +@FREEBSD_TRUE@ $(NULL) + +@MACOS_TRUE@am__append_2 = \ +@MACOS_TRUE@ $(MACOS_PLUGIN_FILES) \ +@MACOS_TRUE@ $(NULL) + +@LINUX_TRUE@am__append_3 = \ +@LINUX_TRUE@ $(CGROUPS_PLUGIN_FILES) \ +@LINUX_TRUE@ $(DISKSPACE_PLUGIN_FILES) \ +@LINUX_TRUE@ $(NFACCT_PLUGIN_FILES) \ +@LINUX_TRUE@ $(PROC_PLUGIN_FILES) \ +@LINUX_TRUE@ $(TC_PLUGIN_FILES) \ +@LINUX_TRUE@ $(NULL) + +@ENABLE_PLUGIN_APPS_TRUE@am__append_4 = apps.plugin +@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@am__append_5 = cgroup-network +@ENABLE_PLUGIN_FREEIPMI_TRUE@am__append_6 = freeipmi.plugin subdir = . DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(top_srcdir)/configure $(am__configure_deps) \ $(srcdir)/config.h.in $(srcdir)/netdata.spec.in \ - $(dist_noinst_SCRIPTS) $(dist_noinst_DATA) COPYING ChangeLog \ - compile config.guess config.sub install-sh missing + $(dist_noinst_SCRIPTS) depcomp $(dist_cache_DATA) \ + $(dist_log_DATA) $(dist_noinst_DATA) $(dist_registry_DATA) \ + $(dist_varlib_DATA) compile config.guess config.sub install-sh \ + missing ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \ - $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \ - $(top_srcdir)/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \ - $(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ @@ -102,6 +130,405 @@ mkinstalldirs = $(install_sh) -d CONFIG_HEADER = config.h CONFIG_CLEAN_FILES = netdata.spec CONFIG_CLEAN_VPATH_FILES = +@ENABLE_PLUGIN_APPS_TRUE@am__EXEEXT_1 = apps.plugin$(EXEEXT) +@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@am__EXEEXT_2 = \ +@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@ cgroup-network$(EXEEXT) +@ENABLE_PLUGIN_FREEIPMI_TRUE@am__EXEEXT_3 = freeipmi.plugin$(EXEEXT) +am__installdirs = "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(sbindir)" \ + "$(DESTDIR)$(cachedir)" "$(DESTDIR)$(logdir)" \ + "$(DESTDIR)$(registrydir)" "$(DESTDIR)$(varlibdir)" +PROGRAMS = $(plugins_PROGRAMS) $(sbin_PROGRAMS) +am__apps_plugin_SOURCES_DIST = collectors/apps.plugin/apps_plugin.c \ + libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \ + libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \ + libnetdata/config/appconfig.c libnetdata/config/appconfig.h \ + libnetdata/avl/avl.c libnetdata/avl/avl.h \ + libnetdata/buffer/buffer.c libnetdata/buffer/buffer.h \ + libnetdata/clocks/clocks.c libnetdata/clocks/clocks.h \ + libnetdata/dictionary/dictionary.c \ + libnetdata/dictionary/dictionary.h libnetdata/eval/eval.c \ + libnetdata/eval/eval.h libnetdata/inlined.h \ + libnetdata/libnetdata.c libnetdata/libnetdata.h \ + libnetdata/locks/locks.c libnetdata/locks/locks.h \ + libnetdata/log/log.c libnetdata/log/log.h \ + libnetdata/popen/popen.c libnetdata/popen/popen.h \ + libnetdata/procfile/procfile.c libnetdata/procfile/procfile.h \ + libnetdata/os.c libnetdata/os.h \ + libnetdata/simple_pattern/simple_pattern.c \ + libnetdata/simple_pattern/simple_pattern.h \ + libnetdata/socket/socket.c libnetdata/socket/socket.h \ + libnetdata/statistical/statistical.c \ + libnetdata/statistical/statistical.h \ + libnetdata/storage_number/storage_number.c \ + libnetdata/storage_number/storage_number.h \ + libnetdata/threads/threads.c libnetdata/threads/threads.h \ + libnetdata/url/url.c libnetdata/url/url.h +am__dirstamp = $(am__leading_dot)dirstamp +am__objects_1 = libnetdata/adaptive_resortable_list/adaptive_resortable_list.$(OBJEXT) \ + libnetdata/config/appconfig.$(OBJEXT) \ + libnetdata/avl/avl.$(OBJEXT) \ + libnetdata/buffer/buffer.$(OBJEXT) \ + libnetdata/clocks/clocks.$(OBJEXT) \ + libnetdata/dictionary/dictionary.$(OBJEXT) \ + libnetdata/eval/eval.$(OBJEXT) libnetdata/libnetdata.$(OBJEXT) \ + libnetdata/locks/locks.$(OBJEXT) libnetdata/log/log.$(OBJEXT) \ + libnetdata/popen/popen.$(OBJEXT) \ + libnetdata/procfile/procfile.$(OBJEXT) libnetdata/os.$(OBJEXT) \ + libnetdata/simple_pattern/simple_pattern.$(OBJEXT) \ + libnetdata/socket/socket.$(OBJEXT) \ + libnetdata/statistical/statistical.$(OBJEXT) \ + libnetdata/storage_number/storage_number.$(OBJEXT) \ + libnetdata/threads/threads.$(OBJEXT) \ + libnetdata/url/url.$(OBJEXT) +am__objects_2 = collectors/apps.plugin/apps_plugin.$(OBJEXT) \ + $(am__objects_1) +@ENABLE_PLUGIN_APPS_TRUE@am_apps_plugin_OBJECTS = $(am__objects_2) +apps_plugin_OBJECTS = $(am_apps_plugin_OBJECTS) +am__DEPENDENCIES_1 = +am__DEPENDENCIES_2 = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ + $(am__DEPENDENCIES_1) +@ENABLE_PLUGIN_APPS_TRUE@apps_plugin_DEPENDENCIES = \ +@ENABLE_PLUGIN_APPS_TRUE@ $(am__DEPENDENCIES_2) \ +@ENABLE_PLUGIN_APPS_TRUE@ $(am__DEPENDENCIES_1) +am__cgroup_network_SOURCES_DIST = \ + collectors/cgroups.plugin/cgroup-network.c \ + libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \ + libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \ + libnetdata/config/appconfig.c libnetdata/config/appconfig.h \ + libnetdata/avl/avl.c libnetdata/avl/avl.h \ + libnetdata/buffer/buffer.c libnetdata/buffer/buffer.h \ + libnetdata/clocks/clocks.c libnetdata/clocks/clocks.h \ + libnetdata/dictionary/dictionary.c \ + libnetdata/dictionary/dictionary.h libnetdata/eval/eval.c \ + libnetdata/eval/eval.h libnetdata/inlined.h \ + libnetdata/libnetdata.c libnetdata/libnetdata.h \ + libnetdata/locks/locks.c libnetdata/locks/locks.h \ + libnetdata/log/log.c libnetdata/log/log.h \ + libnetdata/popen/popen.c libnetdata/popen/popen.h \ + libnetdata/procfile/procfile.c libnetdata/procfile/procfile.h \ + libnetdata/os.c libnetdata/os.h \ + libnetdata/simple_pattern/simple_pattern.c \ + libnetdata/simple_pattern/simple_pattern.h \ + libnetdata/socket/socket.c libnetdata/socket/socket.h \ + libnetdata/statistical/statistical.c \ + libnetdata/statistical/statistical.h \ + libnetdata/storage_number/storage_number.c \ + libnetdata/storage_number/storage_number.h \ + libnetdata/threads/threads.c libnetdata/threads/threads.h \ + libnetdata/url/url.c libnetdata/url/url.h +am__objects_3 = collectors/cgroups.plugin/cgroup-network.$(OBJEXT) \ + $(am__objects_1) +@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@am_cgroup_network_OBJECTS = \ +@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@ $(am__objects_3) +cgroup_network_OBJECTS = $(am_cgroup_network_OBJECTS) +@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@cgroup_network_DEPENDENCIES = \ +@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@ $(am__DEPENDENCIES_2) +am__freeipmi_plugin_SOURCES_DIST = \ + collectors/freeipmi.plugin/freeipmi_plugin.c \ + libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \ + libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \ + libnetdata/config/appconfig.c libnetdata/config/appconfig.h \ + libnetdata/avl/avl.c libnetdata/avl/avl.h \ + libnetdata/buffer/buffer.c libnetdata/buffer/buffer.h \ + libnetdata/clocks/clocks.c libnetdata/clocks/clocks.h \ + libnetdata/dictionary/dictionary.c \ + libnetdata/dictionary/dictionary.h libnetdata/eval/eval.c \ + libnetdata/eval/eval.h libnetdata/inlined.h \ + libnetdata/libnetdata.c libnetdata/libnetdata.h \ + libnetdata/locks/locks.c libnetdata/locks/locks.h \ + libnetdata/log/log.c libnetdata/log/log.h \ + libnetdata/popen/popen.c libnetdata/popen/popen.h \ + libnetdata/procfile/procfile.c libnetdata/procfile/procfile.h \ + libnetdata/os.c libnetdata/os.h \ + libnetdata/simple_pattern/simple_pattern.c \ + libnetdata/simple_pattern/simple_pattern.h \ + libnetdata/socket/socket.c libnetdata/socket/socket.h \ + libnetdata/statistical/statistical.c \ + libnetdata/statistical/statistical.h \ + libnetdata/storage_number/storage_number.c \ + libnetdata/storage_number/storage_number.h \ + libnetdata/threads/threads.c libnetdata/threads/threads.h \ + libnetdata/url/url.c libnetdata/url/url.h +am__objects_4 = collectors/freeipmi.plugin/freeipmi_plugin.$(OBJEXT) \ + $(am__objects_1) +@ENABLE_PLUGIN_FREEIPMI_TRUE@am_freeipmi_plugin_OBJECTS = \ +@ENABLE_PLUGIN_FREEIPMI_TRUE@ $(am__objects_4) +freeipmi_plugin_OBJECTS = $(am_freeipmi_plugin_OBJECTS) +@ENABLE_PLUGIN_FREEIPMI_TRUE@freeipmi_plugin_DEPENDENCIES = \ +@ENABLE_PLUGIN_FREEIPMI_TRUE@ $(am__DEPENDENCIES_2) \ +@ENABLE_PLUGIN_FREEIPMI_TRUE@ $(am__DEPENDENCIES_1) +am__netdata_SOURCES_DIST = collectors/all.h daemon/common.c \ + daemon/common.h daemon/daemon.c daemon/daemon.h \ + daemon/global_statistics.c daemon/global_statistics.h \ + daemon/main.c daemon/main.h daemon/signals.c daemon/signals.h \ + daemon/unit_test.c daemon/unit_test.h \ + libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \ + libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \ + libnetdata/config/appconfig.c libnetdata/config/appconfig.h \ + libnetdata/avl/avl.c libnetdata/avl/avl.h \ + libnetdata/buffer/buffer.c libnetdata/buffer/buffer.h \ + libnetdata/clocks/clocks.c libnetdata/clocks/clocks.h \ + libnetdata/dictionary/dictionary.c \ + libnetdata/dictionary/dictionary.h libnetdata/eval/eval.c \ + libnetdata/eval/eval.h libnetdata/inlined.h \ + libnetdata/libnetdata.c libnetdata/libnetdata.h \ + libnetdata/locks/locks.c libnetdata/locks/locks.h \ + libnetdata/log/log.c libnetdata/log/log.h \ + libnetdata/popen/popen.c libnetdata/popen/popen.h \ + libnetdata/procfile/procfile.c libnetdata/procfile/procfile.h \ + libnetdata/os.c libnetdata/os.h \ + libnetdata/simple_pattern/simple_pattern.c \ + libnetdata/simple_pattern/simple_pattern.h \ + libnetdata/socket/socket.c libnetdata/socket/socket.h \ + libnetdata/statistical/statistical.c \ + libnetdata/statistical/statistical.h \ + libnetdata/storage_number/storage_number.c \ + libnetdata/storage_number/storage_number.h \ + libnetdata/threads/threads.c libnetdata/threads/threads.h \ + libnetdata/url/url.c libnetdata/url/url.h \ + web/api/badges/web_buffer_svg.c \ + web/api/badges/web_buffer_svg.h web/api/exporters/allmetrics.c \ + web/api/exporters/allmetrics.h \ + web/api/exporters/shell/allmetrics_shell.c \ + web/api/exporters/shell/allmetrics_shell.h \ + web/api/queries/average/average.c \ + web/api/queries/average/average.h web/api/queries/des/des.c \ + web/api/queries/des/des.h \ + web/api/queries/incremental_sum/incremental_sum.c \ + web/api/queries/incremental_sum/incremental_sum.h \ + web/api/queries/max/max.c web/api/queries/max/max.h \ + web/api/queries/median/median.c \ + web/api/queries/median/median.h web/api/queries/min/min.c \ + web/api/queries/min/min.h web/api/queries/query.c \ + web/api/queries/query.h web/api/queries/rrdr.c \ + web/api/queries/rrdr.h web/api/queries/ses/ses.c \ + web/api/queries/ses/ses.h web/api/queries/stddev/stddev.c \ + web/api/queries/stddev/stddev.h web/api/queries/sum/sum.c \ + web/api/queries/sum/sum.h web/api/formatters/rrd2json.c \ + web/api/formatters/rrd2json.h web/api/formatters/csv/csv.c \ + web/api/formatters/csv/csv.h web/api/formatters/json/json.c \ + web/api/formatters/json/json.h web/api/formatters/ssv/ssv.c \ + web/api/formatters/ssv/ssv.h web/api/formatters/value/value.c \ + web/api/formatters/value/value.h \ + web/api/formatters/json_wrapper.c \ + web/api/formatters/json_wrapper.h \ + web/api/formatters/charts2json.c \ + web/api/formatters/charts2json.h \ + web/api/formatters/rrdset2json.c \ + web/api/formatters/rrdset2json.h web/api/web_api_v1.c \ + web/api/web_api_v1.h backends/backends.c backends/backends.h \ + backends/graphite/graphite.c backends/graphite/graphite.h \ + backends/json/json.c backends/json/json.h \ + backends/opentsdb/opentsdb.c backends/opentsdb/opentsdb.h \ + backends/prometheus/backend_prometheus.c \ + backends/prometheus/backend_prometheus.h \ + collectors/checks.plugin/plugin_checks.c \ + collectors/checks.plugin/plugin_checks.h health/health.c \ + health/health.h health/health_config.c health/health_json.c \ + health/health_log.c \ + collectors/idlejitter.plugin/plugin_idlejitter.c \ + collectors/idlejitter.plugin/plugin_idlejitter.h \ + collectors/plugins.d/plugins_d.c \ + collectors/plugins.d/plugins_d.h registry/registry.c \ + registry/registry.h registry/registry_db.c \ + registry/registry_init.c registry/registry_internals.c \ + registry/registry_internals.h registry/registry_log.c \ + registry/registry_machine.c registry/registry_machine.h \ + registry/registry_person.c registry/registry_person.h \ + registry/registry_url.c registry/registry_url.h \ + database/rrdcalc.c database/rrdcalc.h \ + database/rrdcalctemplate.c database/rrdcalctemplate.h \ + database/rrddim.c database/rrddimvar.c database/rrddimvar.h \ + database/rrdfamily.c database/rrdhost.c database/rrd.c \ + database/rrd.h database/rrdset.c database/rrdsetvar.c \ + database/rrdsetvar.h database/rrdvar.c database/rrdvar.h \ + streaming/rrdpush.c streaming/rrdpush.h \ + collectors/statsd.plugin/statsd.c \ + collectors/statsd.plugin/statsd.h web/server/web_client.c \ + web/server/web_client.h web/server/web_server.c \ + web/server/web_server.h web/server/web_client_cache.c \ + web/server/web_client_cache.h \ + web/server/single/single-threaded.c \ + web/server/single/single-threaded.h \ + web/server/multi/multi-threaded.c \ + web/server/multi/multi-threaded.h \ + web/server/static/static-threaded.c \ + web/server/static/static-threaded.h \ + collectors/freebsd.plugin/plugin_freebsd.c \ + collectors/freebsd.plugin/plugin_freebsd.h \ + collectors/freebsd.plugin/freebsd_sysctl.c \ + collectors/freebsd.plugin/freebsd_getmntinfo.c \ + collectors/freebsd.plugin/freebsd_getifaddrs.c \ + collectors/freebsd.plugin/freebsd_devstat.c \ + collectors/freebsd.plugin/freebsd_kstat_zfs.c \ + collectors/freebsd.plugin/freebsd_ipfw.c \ + collectors/proc.plugin/zfs_common.c \ + collectors/proc.plugin/zfs_common.h \ + collectors/macos.plugin/plugin_macos.c \ + collectors/macos.plugin/plugin_macos.h \ + collectors/macos.plugin/macos_sysctl.c \ + collectors/macos.plugin/macos_mach_smi.c \ + collectors/macos.plugin/macos_fw.c \ + collectors/cgroups.plugin/sys_fs_cgroup.c \ + collectors/cgroups.plugin/sys_fs_cgroup.h \ + collectors/diskspace.plugin/plugin_diskspace.h \ + collectors/diskspace.plugin/plugin_diskspace.c \ + collectors/nfacct.plugin/plugin_nfacct.c \ + collectors/nfacct.plugin/plugin_nfacct.h \ + collectors/proc.plugin/ipc.c \ + collectors/proc.plugin/plugin_proc.c \ + collectors/proc.plugin/plugin_proc.h \ + collectors/proc.plugin/proc_diskstats.c \ + collectors/proc.plugin/proc_interrupts.c \ + collectors/proc.plugin/proc_softirqs.c \ + collectors/proc.plugin/proc_loadavg.c \ + collectors/proc.plugin/proc_meminfo.c \ + collectors/proc.plugin/proc_net_dev.c \ + collectors/proc.plugin/proc_net_ip_vs_stats.c \ + collectors/proc.plugin/proc_net_netstat.c \ + collectors/proc.plugin/proc_net_rpc_nfs.c \ + collectors/proc.plugin/proc_net_rpc_nfsd.c \ + collectors/proc.plugin/proc_net_snmp.c \ + collectors/proc.plugin/proc_net_snmp6.c \ + collectors/proc.plugin/proc_net_sctp_snmp.c \ + collectors/proc.plugin/proc_net_sockstat.c \ + collectors/proc.plugin/proc_net_sockstat6.c \ + collectors/proc.plugin/proc_net_softnet_stat.c \ + collectors/proc.plugin/proc_net_stat_conntrack.c \ + collectors/proc.plugin/proc_net_stat_synproxy.c \ + collectors/proc.plugin/proc_self_mountinfo.c \ + collectors/proc.plugin/proc_self_mountinfo.h \ + collectors/proc.plugin/proc_spl_kstat_zfs.c \ + collectors/proc.plugin/proc_stat.c \ + collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c \ + collectors/proc.plugin/proc_vmstat.c \ + collectors/proc.plugin/proc_uptime.c \ + collectors/proc.plugin/sys_kernel_mm_ksm.c \ + collectors/proc.plugin/sys_devices_system_edac_mc.c \ + collectors/proc.plugin/sys_devices_system_node.c \ + collectors/proc.plugin/sys_fs_btrfs.c \ + collectors/tc.plugin/plugin_tc.c \ + collectors/tc.plugin/plugin_tc.h +am__objects_5 = daemon/common.$(OBJEXT) daemon/daemon.$(OBJEXT) \ + daemon/global_statistics.$(OBJEXT) daemon/main.$(OBJEXT) \ + daemon/signals.$(OBJEXT) daemon/unit_test.$(OBJEXT) +am__objects_6 = web/api/badges/web_buffer_svg.$(OBJEXT) \ + web/api/exporters/allmetrics.$(OBJEXT) \ + web/api/exporters/shell/allmetrics_shell.$(OBJEXT) \ + web/api/queries/average/average.$(OBJEXT) \ + web/api/queries/des/des.$(OBJEXT) \ + web/api/queries/incremental_sum/incremental_sum.$(OBJEXT) \ + web/api/queries/max/max.$(OBJEXT) \ + web/api/queries/median/median.$(OBJEXT) \ + web/api/queries/min/min.$(OBJEXT) \ + web/api/queries/query.$(OBJEXT) web/api/queries/rrdr.$(OBJEXT) \ + web/api/queries/ses/ses.$(OBJEXT) \ + web/api/queries/stddev/stddev.$(OBJEXT) \ + web/api/queries/sum/sum.$(OBJEXT) \ + web/api/formatters/rrd2json.$(OBJEXT) \ + web/api/formatters/csv/csv.$(OBJEXT) \ + web/api/formatters/json/json.$(OBJEXT) \ + web/api/formatters/ssv/ssv.$(OBJEXT) \ + web/api/formatters/value/value.$(OBJEXT) \ + web/api/formatters/json_wrapper.$(OBJEXT) \ + web/api/formatters/charts2json.$(OBJEXT) \ + web/api/formatters/rrdset2json.$(OBJEXT) \ + web/api/web_api_v1.$(OBJEXT) +am__objects_7 = backends/backends.$(OBJEXT) \ + backends/graphite/graphite.$(OBJEXT) \ + backends/json/json.$(OBJEXT) \ + backends/opentsdb/opentsdb.$(OBJEXT) \ + backends/prometheus/backend_prometheus.$(OBJEXT) +am__objects_8 = collectors/checks.plugin/plugin_checks.$(OBJEXT) +am__objects_9 = health/health.$(OBJEXT) health/health_config.$(OBJEXT) \ + health/health_json.$(OBJEXT) health/health_log.$(OBJEXT) +am__objects_10 = \ + collectors/idlejitter.plugin/plugin_idlejitter.$(OBJEXT) +am__objects_11 = collectors/plugins.d/plugins_d.$(OBJEXT) +am__objects_12 = registry/registry.$(OBJEXT) \ + registry/registry_db.$(OBJEXT) \ + registry/registry_init.$(OBJEXT) \ + registry/registry_internals.$(OBJEXT) \ + registry/registry_log.$(OBJEXT) \ + registry/registry_machine.$(OBJEXT) \ + registry/registry_person.$(OBJEXT) \ + registry/registry_url.$(OBJEXT) +am__objects_13 = database/rrdcalc.$(OBJEXT) \ + database/rrdcalctemplate.$(OBJEXT) database/rrddim.$(OBJEXT) \ + database/rrddimvar.$(OBJEXT) database/rrdfamily.$(OBJEXT) \ + database/rrdhost.$(OBJEXT) database/rrd.$(OBJEXT) \ + database/rrdset.$(OBJEXT) database/rrdsetvar.$(OBJEXT) \ + database/rrdvar.$(OBJEXT) +am__objects_14 = streaming/rrdpush.$(OBJEXT) +am__objects_15 = collectors/statsd.plugin/statsd.$(OBJEXT) +am__objects_16 = web/server/web_client.$(OBJEXT) \ + web/server/web_server.$(OBJEXT) \ + web/server/web_client_cache.$(OBJEXT) \ + web/server/single/single-threaded.$(OBJEXT) \ + web/server/multi/multi-threaded.$(OBJEXT) \ + web/server/static/static-threaded.$(OBJEXT) +am__objects_17 = collectors/freebsd.plugin/plugin_freebsd.$(OBJEXT) \ + collectors/freebsd.plugin/freebsd_sysctl.$(OBJEXT) \ + collectors/freebsd.plugin/freebsd_getmntinfo.$(OBJEXT) \ + collectors/freebsd.plugin/freebsd_getifaddrs.$(OBJEXT) \ + collectors/freebsd.plugin/freebsd_devstat.$(OBJEXT) \ + collectors/freebsd.plugin/freebsd_kstat_zfs.$(OBJEXT) \ + collectors/freebsd.plugin/freebsd_ipfw.$(OBJEXT) \ + collectors/proc.plugin/zfs_common.$(OBJEXT) +@FREEBSD_TRUE@am__objects_18 = $(am__objects_17) +am__objects_19 = collectors/macos.plugin/plugin_macos.$(OBJEXT) \ + collectors/macos.plugin/macos_sysctl.$(OBJEXT) \ + collectors/macos.plugin/macos_mach_smi.$(OBJEXT) \ + collectors/macos.plugin/macos_fw.$(OBJEXT) +@MACOS_TRUE@am__objects_20 = $(am__objects_19) +am__objects_21 = collectors/cgroups.plugin/sys_fs_cgroup.$(OBJEXT) +am__objects_22 = \ + collectors/diskspace.plugin/plugin_diskspace.$(OBJEXT) +am__objects_23 = collectors/nfacct.plugin/plugin_nfacct.$(OBJEXT) +am__objects_24 = collectors/proc.plugin/ipc.$(OBJEXT) \ + collectors/proc.plugin/plugin_proc.$(OBJEXT) \ + collectors/proc.plugin/proc_diskstats.$(OBJEXT) \ + collectors/proc.plugin/proc_interrupts.$(OBJEXT) \ + collectors/proc.plugin/proc_softirqs.$(OBJEXT) \ + collectors/proc.plugin/proc_loadavg.$(OBJEXT) \ + collectors/proc.plugin/proc_meminfo.$(OBJEXT) \ + collectors/proc.plugin/proc_net_dev.$(OBJEXT) \ + collectors/proc.plugin/proc_net_ip_vs_stats.$(OBJEXT) \ + collectors/proc.plugin/proc_net_netstat.$(OBJEXT) \ + collectors/proc.plugin/proc_net_rpc_nfs.$(OBJEXT) \ + collectors/proc.plugin/proc_net_rpc_nfsd.$(OBJEXT) \ + collectors/proc.plugin/proc_net_snmp.$(OBJEXT) \ + collectors/proc.plugin/proc_net_snmp6.$(OBJEXT) \ + collectors/proc.plugin/proc_net_sctp_snmp.$(OBJEXT) \ + collectors/proc.plugin/proc_net_sockstat.$(OBJEXT) \ + collectors/proc.plugin/proc_net_sockstat6.$(OBJEXT) \ + collectors/proc.plugin/proc_net_softnet_stat.$(OBJEXT) \ + collectors/proc.plugin/proc_net_stat_conntrack.$(OBJEXT) \ + collectors/proc.plugin/proc_net_stat_synproxy.$(OBJEXT) \ + collectors/proc.plugin/proc_self_mountinfo.$(OBJEXT) \ + collectors/proc.plugin/zfs_common.$(OBJEXT) \ + collectors/proc.plugin/proc_spl_kstat_zfs.$(OBJEXT) \ + collectors/proc.plugin/proc_stat.$(OBJEXT) \ + collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.$(OBJEXT) \ + collectors/proc.plugin/proc_vmstat.$(OBJEXT) \ + collectors/proc.plugin/proc_uptime.$(OBJEXT) \ + collectors/proc.plugin/sys_kernel_mm_ksm.$(OBJEXT) \ + collectors/proc.plugin/sys_devices_system_edac_mc.$(OBJEXT) \ + collectors/proc.plugin/sys_devices_system_node.$(OBJEXT) \ + collectors/proc.plugin/sys_fs_btrfs.$(OBJEXT) +am__objects_25 = collectors/tc.plugin/plugin_tc.$(OBJEXT) +@LINUX_TRUE@am__objects_26 = $(am__objects_21) $(am__objects_22) \ +@LINUX_TRUE@ $(am__objects_23) $(am__objects_24) \ +@LINUX_TRUE@ $(am__objects_25) +am__objects_27 = $(am__objects_5) $(am__objects_1) $(am__objects_6) \ + $(am__objects_7) $(am__objects_8) $(am__objects_9) \ + $(am__objects_10) $(am__objects_11) $(am__objects_12) \ + $(am__objects_13) $(am__objects_14) $(am__objects_15) \ + $(am__objects_16) $(am__objects_18) $(am__objects_20) \ + $(am__objects_26) +am_netdata_OBJECTS = $(am__objects_27) +netdata_OBJECTS = $(am_netdata_OBJECTS) +netdata_DEPENDENCIES = $(am__DEPENDENCIES_2) $(am__DEPENDENCIES_1) SCRIPTS = $(dist_noinst_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) @@ -115,8 +542,28 @@ AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = -SOURCES = -DIST_SOURCES = +DEFAULT_INCLUDES = -I.@am__isrc@ +depcomp = $(SHELL) $(top_srcdir)/depcomp +am__depfiles_maybe = depfiles +am__mv = mv -f +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +AM_V_CC = $(am__v_CC_@AM_V@) +am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) +am__v_CC_0 = @echo " CC " $@; +am__v_CC_1 = +CCLD = $(CC) +LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_CCLD = $(am__v_CCLD_@AM_V@) +am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) +am__v_CCLD_0 = @echo " CCLD " $@; +am__v_CCLD_1 = +SOURCES = $(apps_plugin_SOURCES) $(cgroup_network_SOURCES) \ + $(freeipmi_plugin_SOURCES) $(netdata_SOURCES) +DIST_SOURCES = $(am__apps_plugin_SOURCES_DIST) \ + $(am__cgroup_network_SOURCES_DIST) \ + $(am__freeipmi_plugin_SOURCES_DIST) \ + $(am__netdata_SOURCES_DIST) RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ @@ -130,7 +577,35 @@ am__can_run_installinfo = \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac -DATA = $(dist_noinst_DATA) +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +DATA = $(dist_cache_DATA) $(dist_log_DATA) $(dist_noinst_DATA) \ + $(dist_registry_DATA) $(dist_varlib_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ @@ -196,9 +671,9 @@ am__relativize = \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" -DIST_ARCHIVES = $(distdir).tar.gz $(distdir).tar.bz2 $(distdir).tar.xz +DIST_ARCHIVES = $(distdir).tar.gz GZIP_ENV = --best -DIST_TARGETS = dist-xz dist-bzip2 dist-gzip +DIST_TARGETS = dist-gzip distuninstallcheck_listfiles = find . -type f -print am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \ | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$' @@ -300,6 +775,7 @@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ +build_target = @build_target@ build_vendor = @build_vendor@ builddir = @builddir@ cachedir = @cachedir@ @@ -321,6 +797,7 @@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ +libconfigdir = @libconfigdir@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ @@ -347,12 +824,8 @@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ varlibdir = @varlibdir@ webdir = @webdir@ - -# -# Copyright (C) 2015 Alon Bar-Lev -# -AUTOMAKE_OPTIONS = foreign dist-bzip2 dist-xz 1.10 -ACLOCAL_AMFLAGS = -I m4 +AUTOMAKE_OPTIONS = foreign subdir-objects 1.10 +ACLOCAL_AMFLAGS = -I build/m4 MAINTAINERCLEANFILES = \ config.log config.status \ $(srcdir)/Makefile.in \ @@ -372,50 +845,40 @@ EXTRA_DIST = \ .csslintrc \ .eslintignore \ .eslintrc \ + .lgtm.yml \ .travis \ - m4/jemalloc.m4 \ - m4/ax_c___atomic.m4 \ - m4/ax_check_enable_debug.m4 \ - m4/ax_c_mallinfo.m4 \ - m4/ax_gcc_func_attribute.m4 \ - m4/ax_check_compile_flag.m4 \ - m4/ax_c_statement_expressions.m4 \ - m4/ax_pthread.m4 \ - m4/ax_c_lto.m4 \ - m4/ax_c_mallopt.m4 \ - m4/tcmalloc.m4 \ - m4/ax_c__generic.m4 \ - autogen.sh \ + .github/CODEOWNERS \ + build/build.sh \ + build/Dockerfile \ + build/m4/jemalloc.m4 \ + build/m4/ax_c___atomic.m4 \ + build/m4/ax_check_enable_debug.m4 \ + build/m4/ax_c_mallinfo.m4 \ + build/m4/ax_gcc_func_attribute.m4 \ + build/m4/ax_check_compile_flag.m4 \ + build/m4/ax_c_statement_expressions.m4 \ + build/m4/ax_pthread.m4 \ + build/m4/ax_c_lto.m4 \ + build/m4/ax_c_mallopt.m4 \ + build/m4/tcmalloc.m4 \ + build/m4/ax_c__generic.m4 \ README.md \ + CONTRIBUTORS.md \ + CODE_OF_CONDUCT.md \ LICENSE \ - LICENSE.md \ - LICENSE-REDISTRIBUTED.md \ - COPYING \ - autogen.sh \ + REDISTRIBUTED.md \ $(NULL) -SUBDIRS = \ - charts.d \ - conf.d \ - diagrams \ - makeself \ - node.d \ - plugins.d \ - python.d \ - src \ - system \ - web \ - contrib \ - tests \ - $(NULL) +# ----------------------------------------------------------------------------- +# Compile netdata binaries +SUBDIRS = diagrams makeself system contrib tests $(NULL) backends \ + collectors daemon database health libnetdata registry \ + streaming web $(NULL) dist_noinst_DATA = \ cppcheck.sh \ configs.signatures \ - Dockerfile \ - Dockerfile.alpine \ - Dockerfile.aarch64 \ - Dockerfile.armv7hf \ + docker \ netdata.cppcheck \ netdata.spec \ package.json \ @@ -426,17 +889,362 @@ dist_noinst_DATA = \ # should be proper init.d/openrc/systemd usable dist_noinst_SCRIPTS = \ coverity-scan.sh \ - docker-build.sh \ kickstart.sh \ kickstart-static64.sh \ netdata-installer.sh \ installer/functions.sh \ $(NULL) +AM_CFLAGS = \ + $(OPTIONAL_MATH_CFLAGS) \ + $(OPTIONAL_NFACCT_CLFAGS) \ + $(OPTIONAL_ZLIB_CFLAGS) \ + $(OPTIONAL_UUID_CFLAGS) \ + $(OPTIONAL_LIBCAP_LIBS) \ + $(OPTIONAL_IPMIMONITORING_CFLAGS) \ + $(NULL) + +dist_cache_DATA = installer/.keep +dist_varlib_DATA = installer/.keep +dist_registry_DATA = installer/.keep +dist_log_DATA = installer/.keep +LIBNETDATA_FILES = \ + libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \ + libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \ + libnetdata/config/appconfig.c \ + libnetdata/config/appconfig.h \ + libnetdata/avl/avl.c \ + libnetdata/avl/avl.h \ + libnetdata/buffer/buffer.c \ + libnetdata/buffer/buffer.h \ + libnetdata/clocks/clocks.c \ + libnetdata/clocks/clocks.h \ + libnetdata/dictionary/dictionary.c \ + libnetdata/dictionary/dictionary.h \ + libnetdata/eval/eval.c \ + libnetdata/eval/eval.h \ + libnetdata/inlined.h \ + libnetdata/libnetdata.c \ + libnetdata/libnetdata.h \ + libnetdata/locks/locks.c \ + libnetdata/locks/locks.h \ + libnetdata/log/log.c \ + libnetdata/log/log.h \ + libnetdata/popen/popen.c \ + libnetdata/popen/popen.h \ + libnetdata/procfile/procfile.c \ + libnetdata/procfile/procfile.h \ + libnetdata/os.c \ + libnetdata/os.h \ + libnetdata/simple_pattern/simple_pattern.c \ + libnetdata/simple_pattern/simple_pattern.h \ + libnetdata/socket/socket.c \ + libnetdata/socket/socket.h \ + libnetdata/statistical/statistical.c \ + libnetdata/statistical/statistical.h \ + libnetdata/storage_number/storage_number.c \ + libnetdata/storage_number/storage_number.h \ + libnetdata/threads/threads.c \ + libnetdata/threads/threads.h \ + libnetdata/url/url.c \ + libnetdata/url/url.h \ + $(NULL) + +APPS_PLUGIN_FILES = \ + collectors/apps.plugin/apps_plugin.c \ + $(LIBNETDATA_FILES) \ + $(NULL) + +CHECKS_PLUGIN_FILES = \ + collectors/checks.plugin/plugin_checks.c \ + collectors/checks.plugin/plugin_checks.h \ + $(NULL) + +FREEBSD_PLUGIN_FILES = \ + collectors/freebsd.plugin/plugin_freebsd.c \ + collectors/freebsd.plugin/plugin_freebsd.h \ + collectors/freebsd.plugin/freebsd_sysctl.c \ + collectors/freebsd.plugin/freebsd_getmntinfo.c \ + collectors/freebsd.plugin/freebsd_getifaddrs.c \ + collectors/freebsd.plugin/freebsd_devstat.c \ + collectors/freebsd.plugin/freebsd_kstat_zfs.c \ + collectors/freebsd.plugin/freebsd_ipfw.c \ + collectors/proc.plugin/zfs_common.c \ + collectors/proc.plugin/zfs_common.h \ + $(NULL) + +HEALTH_PLUGIN_FILES = \ + health/health.c \ + health/health.h \ + health/health_config.c \ + health/health_json.c \ + health/health_log.c \ + $(NULL) + +IDLEJITTER_PLUGIN_FILES = \ + collectors/idlejitter.plugin/plugin_idlejitter.c \ + collectors/idlejitter.plugin/plugin_idlejitter.h \ + $(NULL) + +CGROUPS_PLUGIN_FILES = \ + collectors/cgroups.plugin/sys_fs_cgroup.c \ + collectors/cgroups.plugin/sys_fs_cgroup.h \ + $(NULL) + +CGROUP_NETWORK_FILES = \ + collectors/cgroups.plugin/cgroup-network.c \ + $(LIBNETDATA_FILES) \ + $(NULL) + +DISKSPACE_PLUGIN_FILES = \ + collectors/diskspace.plugin/plugin_diskspace.h \ + collectors/diskspace.plugin/plugin_diskspace.c \ + $(NULL) + +FREEIPMI_PLUGIN_FILES = \ + collectors/freeipmi.plugin/freeipmi_plugin.c \ + $(LIBNETDATA_FILES) \ + $(NULL) + +NFACCT_PLUGIN_FILES = \ + collectors/nfacct.plugin/plugin_nfacct.c \ + collectors/nfacct.plugin/plugin_nfacct.h \ + $(NULL) + +PROC_PLUGIN_FILES = \ + collectors/proc.plugin/ipc.c \ + collectors/proc.plugin/plugin_proc.c \ + collectors/proc.plugin/plugin_proc.h \ + collectors/proc.plugin/proc_diskstats.c \ + collectors/proc.plugin/proc_interrupts.c \ + collectors/proc.plugin/proc_softirqs.c \ + collectors/proc.plugin/proc_loadavg.c \ + collectors/proc.plugin/proc_meminfo.c \ + collectors/proc.plugin/proc_net_dev.c \ + collectors/proc.plugin/proc_net_ip_vs_stats.c \ + collectors/proc.plugin/proc_net_netstat.c \ + collectors/proc.plugin/proc_net_rpc_nfs.c \ + collectors/proc.plugin/proc_net_rpc_nfsd.c \ + collectors/proc.plugin/proc_net_snmp.c \ + collectors/proc.plugin/proc_net_snmp6.c \ + collectors/proc.plugin/proc_net_sctp_snmp.c \ + collectors/proc.plugin/proc_net_sockstat.c \ + collectors/proc.plugin/proc_net_sockstat6.c \ + collectors/proc.plugin/proc_net_softnet_stat.c \ + collectors/proc.plugin/proc_net_stat_conntrack.c \ + collectors/proc.plugin/proc_net_stat_synproxy.c \ + collectors/proc.plugin/proc_self_mountinfo.c \ + collectors/proc.plugin/proc_self_mountinfo.h \ + collectors/proc.plugin/zfs_common.c \ + collectors/proc.plugin/zfs_common.h \ + collectors/proc.plugin/proc_spl_kstat_zfs.c \ + collectors/proc.plugin/proc_stat.c \ + collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c \ + collectors/proc.plugin/proc_vmstat.c \ + collectors/proc.plugin/proc_uptime.c \ + collectors/proc.plugin/sys_kernel_mm_ksm.c \ + collectors/proc.plugin/sys_devices_system_edac_mc.c \ + collectors/proc.plugin/sys_devices_system_node.c \ + collectors/proc.plugin/sys_fs_btrfs.c \ + $(NULL) + +TC_PLUGIN_FILES = \ + collectors/tc.plugin/plugin_tc.c \ + collectors/tc.plugin/plugin_tc.h \ + $(NULL) + +MACOS_PLUGIN_FILES = \ + collectors/macos.plugin/plugin_macos.c \ + collectors/macos.plugin/plugin_macos.h \ + collectors/macos.plugin/macos_sysctl.c \ + collectors/macos.plugin/macos_mach_smi.c \ + collectors/macos.plugin/macos_fw.c \ + $(NULL) + +PLUGINSD_PLUGIN_FILES = \ + collectors/plugins.d/plugins_d.c \ + collectors/plugins.d/plugins_d.h \ + $(NULL) + +RRD_PLUGIN_FILES = \ + database/rrdcalc.c \ + database/rrdcalc.h \ + database/rrdcalctemplate.c \ + database/rrdcalctemplate.h \ + database/rrddim.c \ + database/rrddimvar.c \ + database/rrddimvar.h \ + database/rrdfamily.c \ + database/rrdhost.c \ + database/rrd.c \ + database/rrd.h \ + database/rrdset.c \ + database/rrdsetvar.c \ + database/rrdsetvar.h \ + database/rrdvar.c \ + database/rrdvar.h \ + $(NULL) + +API_PLUGIN_FILES = \ + web/api/badges/web_buffer_svg.c \ + web/api/badges/web_buffer_svg.h \ + web/api/exporters/allmetrics.c \ + web/api/exporters/allmetrics.h \ + web/api/exporters/shell/allmetrics_shell.c \ + web/api/exporters/shell/allmetrics_shell.h \ + web/api/queries/average/average.c \ + web/api/queries/average/average.h \ + web/api/queries/des/des.c \ + web/api/queries/des/des.h \ + web/api/queries/incremental_sum/incremental_sum.c \ + web/api/queries/incremental_sum/incremental_sum.h \ + web/api/queries/max/max.c \ + web/api/queries/max/max.h \ + web/api/queries/median/median.c \ + web/api/queries/median/median.h \ + web/api/queries/min/min.c \ + web/api/queries/min/min.h \ + web/api/queries/query.c \ + web/api/queries/query.h \ + web/api/queries/rrdr.c \ + web/api/queries/rrdr.h \ + web/api/queries/ses/ses.c \ + web/api/queries/ses/ses.h \ + web/api/queries/stddev/stddev.c \ + web/api/queries/stddev/stddev.h \ + web/api/queries/sum/sum.c \ + web/api/queries/sum/sum.h \ + web/api/formatters/rrd2json.c \ + web/api/formatters/rrd2json.h \ + web/api/formatters/csv/csv.c \ + web/api/formatters/csv/csv.h \ + web/api/formatters/json/json.c \ + web/api/formatters/json/json.h \ + web/api/formatters/ssv/ssv.c \ + web/api/formatters/ssv/ssv.h \ + web/api/formatters/value/value.c \ + web/api/formatters/value/value.h \ + web/api/formatters/json_wrapper.c \ + web/api/formatters/json_wrapper.h \ + web/api/formatters/charts2json.c \ + web/api/formatters/charts2json.h \ + web/api/formatters/rrdset2json.c \ + web/api/formatters/rrdset2json.h \ + web/api/web_api_v1.c \ + web/api/web_api_v1.h \ + $(NULL) + +STREAMING_PLUGIN_FILES = \ + streaming/rrdpush.c \ + streaming/rrdpush.h \ + $(NULL) + +REGISTRY_PLUGIN_FILES = \ + registry/registry.c \ + registry/registry.h \ + registry/registry_db.c \ + registry/registry_init.c \ + registry/registry_internals.c \ + registry/registry_internals.h \ + registry/registry_log.c \ + registry/registry_machine.c \ + registry/registry_machine.h \ + registry/registry_person.c \ + registry/registry_person.h \ + registry/registry_url.c \ + registry/registry_url.h \ + $(NULL) + +STATSD_PLUGIN_FILES = \ + collectors/statsd.plugin/statsd.c \ + collectors/statsd.plugin/statsd.h \ + $(NULL) + +WEB_PLUGIN_FILES = \ + web/server/web_client.c \ + web/server/web_client.h \ + web/server/web_server.c \ + web/server/web_server.h \ + web/server/web_client_cache.c \ + web/server/web_client_cache.h \ + web/server/single/single-threaded.c \ + web/server/single/single-threaded.h \ + web/server/multi/multi-threaded.c \ + web/server/multi/multi-threaded.h \ + web/server/static/static-threaded.c \ + web/server/static/static-threaded.h \ + $(NULL) + +BACKENDS_PLUGIN_FILES = \ + backends/backends.c \ + backends/backends.h \ + backends/graphite/graphite.c \ + backends/graphite/graphite.h \ + backends/json/json.c \ + backends/json/json.h \ + backends/opentsdb/opentsdb.c \ + backends/opentsdb/opentsdb.h \ + backends/prometheus/backend_prometheus.c \ + backends/prometheus/backend_prometheus.h \ + $(NULL) + +DAEMON_FILES = \ + daemon/common.c \ + daemon/common.h \ + daemon/daemon.c \ + daemon/daemon.h \ + daemon/global_statistics.c \ + daemon/global_statistics.h \ + daemon/main.c \ + daemon/main.h \ + daemon/signals.c \ + daemon/signals.h \ + daemon/unit_test.c \ + daemon/unit_test.h \ + $(NULL) + +NETDATA_FILES = collectors/all.h $(DAEMON_FILES) $(LIBNETDATA_FILES) \ + $(API_PLUGIN_FILES) $(BACKENDS_PLUGIN_FILES) \ + $(CHECKS_PLUGIN_FILES) $(HEALTH_PLUGIN_FILES) \ + $(IDLEJITTER_PLUGIN_FILES) $(PLUGINSD_PLUGIN_FILES) \ + $(REGISTRY_PLUGIN_FILES) $(RRD_PLUGIN_FILES) \ + $(STREAMING_PLUGIN_FILES) $(STATSD_PLUGIN_FILES) \ + $(WEB_PLUGIN_FILES) $(NULL) $(am__append_1) $(am__append_2) \ + $(am__append_3) +NETDATA_COMMON_LIBS = \ + $(OPTIONAL_MATH_LIBS) \ + $(OPTIONAL_ZLIB_LIBS) \ + $(OPTIONAL_UUID_LIBS) \ + $(NULL) + +netdata_SOURCES = $(NETDATA_FILES) +netdata_LDADD = \ + $(NETDATA_COMMON_LIBS) \ + $(OPTIONAL_NFACCT_LIBS) \ + $(NULL) + +@ENABLE_PLUGIN_APPS_TRUE@apps_plugin_SOURCES = $(APPS_PLUGIN_FILES) +@ENABLE_PLUGIN_APPS_TRUE@apps_plugin_LDADD = \ +@ENABLE_PLUGIN_APPS_TRUE@ $(NETDATA_COMMON_LIBS) \ +@ENABLE_PLUGIN_APPS_TRUE@ $(OPTIONAL_LIBCAP_LIBS) \ +@ENABLE_PLUGIN_APPS_TRUE@ $(NULL) + +@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@cgroup_network_SOURCES = $(CGROUP_NETWORK_FILES) +@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@cgroup_network_LDADD = \ +@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@ $(NETDATA_COMMON_LIBS) \ +@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@ $(NULL) + +@ENABLE_PLUGIN_FREEIPMI_TRUE@freeipmi_plugin_SOURCES = $(FREEIPMI_PLUGIN_FILES) +@ENABLE_PLUGIN_FREEIPMI_TRUE@freeipmi_plugin_LDADD = \ +@ENABLE_PLUGIN_FREEIPMI_TRUE@ $(NETDATA_COMMON_LIBS) \ +@ENABLE_PLUGIN_FREEIPMI_TRUE@ $(OPTIONAL_IPMIMONITORING_LIBS) \ +@ENABLE_PLUGIN_FREEIPMI_TRUE@ $(NULL) + all: config.h $(MAKE) $(AM_MAKEFLAGS) all-recursive .SUFFIXES: +.SUFFIXES: .c .o .obj am--refresh: Makefile @: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @@ -488,6 +1296,1153 @@ distclean-hdr: -rm -f config.h stamp-h1 netdata.spec: $(top_builddir)/config.status $(srcdir)/netdata.spec.in cd $(top_builddir) && $(SHELL) ./config.status $@ +install-pluginsPROGRAMS: $(plugins_PROGRAMS) + @$(NORMAL_INSTALL) + @list='$(plugins_PROGRAMS)'; test -n "$(pluginsdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \ + fi; \ + for p in $$list; do echo "$$p $$p"; done | \ + sed 's/$(EXEEXT)$$//' | \ + while read p p1; do if test -f $$p \ + ; then echo "$$p"; echo "$$p"; else :; fi; \ + done | \ + sed -e 'p;s,.*/,,;n;h' \ + -e 's|.*|.|' \ + -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ + sed 'N;N;N;s,\n, ,g' | \ + $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ + { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ + if ($$2 == $$4) files[d] = files[d] " " $$1; \ + else { print "f", $$3 "/" $$4, $$1; } } \ + END { for (d in files) print "f", d, files[d] }' | \ + while read type dir files; do \ + if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ + test -z "$$files" || { \ + echo " $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \ + $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \ + } \ + ; done + +uninstall-pluginsPROGRAMS: + @$(NORMAL_UNINSTALL) + @list='$(plugins_PROGRAMS)'; test -n "$(pluginsdir)" || list=; \ + files=`for p in $$list; do echo "$$p"; done | \ + sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ + -e 's/$$/$(EXEEXT)/' \ + `; \ + test -n "$$list" || exit 0; \ + echo " ( cd '$(DESTDIR)$(pluginsdir)' && rm -f" $$files ")"; \ + cd "$(DESTDIR)$(pluginsdir)" && rm -f $$files + +clean-pluginsPROGRAMS: + -test -z "$(plugins_PROGRAMS)" || rm -f $(plugins_PROGRAMS) +install-sbinPROGRAMS: $(sbin_PROGRAMS) + @$(NORMAL_INSTALL) + @list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(sbindir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(sbindir)" || exit 1; \ + fi; \ + for p in $$list; do echo "$$p $$p"; done | \ + sed 's/$(EXEEXT)$$//' | \ + while read p p1; do if test -f $$p \ + ; then echo "$$p"; echo "$$p"; else :; fi; \ + done | \ + sed -e 'p;s,.*/,,;n;h' \ + -e 's|.*|.|' \ + -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ + sed 'N;N;N;s,\n, ,g' | \ + $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ + { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ + if ($$2 == $$4) files[d] = files[d] " " $$1; \ + else { print "f", $$3 "/" $$4, $$1; } } \ + END { for (d in files) print "f", d, files[d] }' | \ + while read type dir files; do \ + if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ + test -z "$$files" || { \ + echo " $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(sbindir)$$dir'"; \ + $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(sbindir)$$dir" || exit $$?; \ + } \ + ; done + +uninstall-sbinPROGRAMS: + @$(NORMAL_UNINSTALL) + @list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \ + files=`for p in $$list; do echo "$$p"; done | \ + sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ + -e 's/$$/$(EXEEXT)/' \ + `; \ + test -n "$$list" || exit 0; \ + echo " ( cd '$(DESTDIR)$(sbindir)' && rm -f" $$files ")"; \ + cd "$(DESTDIR)$(sbindir)" && rm -f $$files + +clean-sbinPROGRAMS: + -test -z "$(sbin_PROGRAMS)" || rm -f $(sbin_PROGRAMS) +collectors/apps.plugin/$(am__dirstamp): + @$(MKDIR_P) collectors/apps.plugin + @: > collectors/apps.plugin/$(am__dirstamp) +collectors/apps.plugin/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) collectors/apps.plugin/$(DEPDIR) + @: > collectors/apps.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/apps.plugin/apps_plugin.$(OBJEXT): \ + collectors/apps.plugin/$(am__dirstamp) \ + collectors/apps.plugin/$(DEPDIR)/$(am__dirstamp) +libnetdata/adaptive_resortable_list/$(am__dirstamp): + @$(MKDIR_P) libnetdata/adaptive_resortable_list + @: > libnetdata/adaptive_resortable_list/$(am__dirstamp) +libnetdata/adaptive_resortable_list/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) libnetdata/adaptive_resortable_list/$(DEPDIR) + @: > libnetdata/adaptive_resortable_list/$(DEPDIR)/$(am__dirstamp) +libnetdata/adaptive_resortable_list/adaptive_resortable_list.$(OBJEXT): \ + libnetdata/adaptive_resortable_list/$(am__dirstamp) \ + libnetdata/adaptive_resortable_list/$(DEPDIR)/$(am__dirstamp) +libnetdata/config/$(am__dirstamp): + @$(MKDIR_P) libnetdata/config + @: > libnetdata/config/$(am__dirstamp) +libnetdata/config/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) libnetdata/config/$(DEPDIR) + @: > libnetdata/config/$(DEPDIR)/$(am__dirstamp) +libnetdata/config/appconfig.$(OBJEXT): \ + libnetdata/config/$(am__dirstamp) \ + libnetdata/config/$(DEPDIR)/$(am__dirstamp) +libnetdata/avl/$(am__dirstamp): + @$(MKDIR_P) libnetdata/avl + @: > libnetdata/avl/$(am__dirstamp) +libnetdata/avl/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) libnetdata/avl/$(DEPDIR) + @: > libnetdata/avl/$(DEPDIR)/$(am__dirstamp) +libnetdata/avl/avl.$(OBJEXT): libnetdata/avl/$(am__dirstamp) \ + libnetdata/avl/$(DEPDIR)/$(am__dirstamp) +libnetdata/buffer/$(am__dirstamp): + @$(MKDIR_P) libnetdata/buffer + @: > libnetdata/buffer/$(am__dirstamp) +libnetdata/buffer/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) libnetdata/buffer/$(DEPDIR) + @: > libnetdata/buffer/$(DEPDIR)/$(am__dirstamp) +libnetdata/buffer/buffer.$(OBJEXT): libnetdata/buffer/$(am__dirstamp) \ + libnetdata/buffer/$(DEPDIR)/$(am__dirstamp) +libnetdata/clocks/$(am__dirstamp): + @$(MKDIR_P) libnetdata/clocks + @: > libnetdata/clocks/$(am__dirstamp) +libnetdata/clocks/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) libnetdata/clocks/$(DEPDIR) + @: > libnetdata/clocks/$(DEPDIR)/$(am__dirstamp) +libnetdata/clocks/clocks.$(OBJEXT): libnetdata/clocks/$(am__dirstamp) \ + libnetdata/clocks/$(DEPDIR)/$(am__dirstamp) +libnetdata/dictionary/$(am__dirstamp): + @$(MKDIR_P) libnetdata/dictionary + @: > libnetdata/dictionary/$(am__dirstamp) +libnetdata/dictionary/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) libnetdata/dictionary/$(DEPDIR) + @: > libnetdata/dictionary/$(DEPDIR)/$(am__dirstamp) +libnetdata/dictionary/dictionary.$(OBJEXT): \ + libnetdata/dictionary/$(am__dirstamp) \ + libnetdata/dictionary/$(DEPDIR)/$(am__dirstamp) +libnetdata/eval/$(am__dirstamp): + @$(MKDIR_P) libnetdata/eval + @: > libnetdata/eval/$(am__dirstamp) +libnetdata/eval/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) libnetdata/eval/$(DEPDIR) + @: > libnetdata/eval/$(DEPDIR)/$(am__dirstamp) +libnetdata/eval/eval.$(OBJEXT): libnetdata/eval/$(am__dirstamp) \ + libnetdata/eval/$(DEPDIR)/$(am__dirstamp) +libnetdata/$(am__dirstamp): + @$(MKDIR_P) libnetdata + @: > libnetdata/$(am__dirstamp) +libnetdata/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) libnetdata/$(DEPDIR) + @: > libnetdata/$(DEPDIR)/$(am__dirstamp) +libnetdata/libnetdata.$(OBJEXT): libnetdata/$(am__dirstamp) \ + libnetdata/$(DEPDIR)/$(am__dirstamp) +libnetdata/locks/$(am__dirstamp): + @$(MKDIR_P) libnetdata/locks + @: > libnetdata/locks/$(am__dirstamp) +libnetdata/locks/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) libnetdata/locks/$(DEPDIR) + @: > libnetdata/locks/$(DEPDIR)/$(am__dirstamp) +libnetdata/locks/locks.$(OBJEXT): libnetdata/locks/$(am__dirstamp) \ + libnetdata/locks/$(DEPDIR)/$(am__dirstamp) +libnetdata/log/$(am__dirstamp): + @$(MKDIR_P) libnetdata/log + @: > libnetdata/log/$(am__dirstamp) +libnetdata/log/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) libnetdata/log/$(DEPDIR) + @: > libnetdata/log/$(DEPDIR)/$(am__dirstamp) +libnetdata/log/log.$(OBJEXT): libnetdata/log/$(am__dirstamp) \ + libnetdata/log/$(DEPDIR)/$(am__dirstamp) +libnetdata/popen/$(am__dirstamp): + @$(MKDIR_P) libnetdata/popen + @: > libnetdata/popen/$(am__dirstamp) +libnetdata/popen/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) libnetdata/popen/$(DEPDIR) + @: > libnetdata/popen/$(DEPDIR)/$(am__dirstamp) +libnetdata/popen/popen.$(OBJEXT): libnetdata/popen/$(am__dirstamp) \ + libnetdata/popen/$(DEPDIR)/$(am__dirstamp) +libnetdata/procfile/$(am__dirstamp): + @$(MKDIR_P) libnetdata/procfile + @: > libnetdata/procfile/$(am__dirstamp) +libnetdata/procfile/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) libnetdata/procfile/$(DEPDIR) + @: > libnetdata/procfile/$(DEPDIR)/$(am__dirstamp) +libnetdata/procfile/procfile.$(OBJEXT): \ + libnetdata/procfile/$(am__dirstamp) \ + libnetdata/procfile/$(DEPDIR)/$(am__dirstamp) +libnetdata/os.$(OBJEXT): libnetdata/$(am__dirstamp) \ + libnetdata/$(DEPDIR)/$(am__dirstamp) +libnetdata/simple_pattern/$(am__dirstamp): + @$(MKDIR_P) libnetdata/simple_pattern + @: > libnetdata/simple_pattern/$(am__dirstamp) +libnetdata/simple_pattern/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) libnetdata/simple_pattern/$(DEPDIR) + @: > libnetdata/simple_pattern/$(DEPDIR)/$(am__dirstamp) +libnetdata/simple_pattern/simple_pattern.$(OBJEXT): \ + libnetdata/simple_pattern/$(am__dirstamp) \ + libnetdata/simple_pattern/$(DEPDIR)/$(am__dirstamp) +libnetdata/socket/$(am__dirstamp): + @$(MKDIR_P) libnetdata/socket + @: > libnetdata/socket/$(am__dirstamp) +libnetdata/socket/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) libnetdata/socket/$(DEPDIR) + @: > libnetdata/socket/$(DEPDIR)/$(am__dirstamp) +libnetdata/socket/socket.$(OBJEXT): libnetdata/socket/$(am__dirstamp) \ + libnetdata/socket/$(DEPDIR)/$(am__dirstamp) +libnetdata/statistical/$(am__dirstamp): + @$(MKDIR_P) libnetdata/statistical + @: > libnetdata/statistical/$(am__dirstamp) +libnetdata/statistical/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) libnetdata/statistical/$(DEPDIR) + @: > libnetdata/statistical/$(DEPDIR)/$(am__dirstamp) +libnetdata/statistical/statistical.$(OBJEXT): \ + libnetdata/statistical/$(am__dirstamp) \ + libnetdata/statistical/$(DEPDIR)/$(am__dirstamp) +libnetdata/storage_number/$(am__dirstamp): + @$(MKDIR_P) libnetdata/storage_number + @: > libnetdata/storage_number/$(am__dirstamp) +libnetdata/storage_number/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) libnetdata/storage_number/$(DEPDIR) + @: > libnetdata/storage_number/$(DEPDIR)/$(am__dirstamp) +libnetdata/storage_number/storage_number.$(OBJEXT): \ + libnetdata/storage_number/$(am__dirstamp) \ + libnetdata/storage_number/$(DEPDIR)/$(am__dirstamp) +libnetdata/threads/$(am__dirstamp): + @$(MKDIR_P) libnetdata/threads + @: > libnetdata/threads/$(am__dirstamp) +libnetdata/threads/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) libnetdata/threads/$(DEPDIR) + @: > libnetdata/threads/$(DEPDIR)/$(am__dirstamp) +libnetdata/threads/threads.$(OBJEXT): \ + libnetdata/threads/$(am__dirstamp) \ + libnetdata/threads/$(DEPDIR)/$(am__dirstamp) +libnetdata/url/$(am__dirstamp): + @$(MKDIR_P) libnetdata/url + @: > libnetdata/url/$(am__dirstamp) +libnetdata/url/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) libnetdata/url/$(DEPDIR) + @: > libnetdata/url/$(DEPDIR)/$(am__dirstamp) +libnetdata/url/url.$(OBJEXT): libnetdata/url/$(am__dirstamp) \ + libnetdata/url/$(DEPDIR)/$(am__dirstamp) + +apps.plugin$(EXEEXT): $(apps_plugin_OBJECTS) $(apps_plugin_DEPENDENCIES) $(EXTRA_apps_plugin_DEPENDENCIES) + @rm -f apps.plugin$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(apps_plugin_OBJECTS) $(apps_plugin_LDADD) $(LIBS) +collectors/cgroups.plugin/$(am__dirstamp): + @$(MKDIR_P) collectors/cgroups.plugin + @: > collectors/cgroups.plugin/$(am__dirstamp) +collectors/cgroups.plugin/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) collectors/cgroups.plugin/$(DEPDIR) + @: > collectors/cgroups.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/cgroups.plugin/cgroup-network.$(OBJEXT): \ + collectors/cgroups.plugin/$(am__dirstamp) \ + collectors/cgroups.plugin/$(DEPDIR)/$(am__dirstamp) + +cgroup-network$(EXEEXT): $(cgroup_network_OBJECTS) $(cgroup_network_DEPENDENCIES) $(EXTRA_cgroup_network_DEPENDENCIES) + @rm -f cgroup-network$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(cgroup_network_OBJECTS) $(cgroup_network_LDADD) $(LIBS) +collectors/freeipmi.plugin/$(am__dirstamp): + @$(MKDIR_P) collectors/freeipmi.plugin + @: > collectors/freeipmi.plugin/$(am__dirstamp) +collectors/freeipmi.plugin/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) collectors/freeipmi.plugin/$(DEPDIR) + @: > collectors/freeipmi.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/freeipmi.plugin/freeipmi_plugin.$(OBJEXT): \ + collectors/freeipmi.plugin/$(am__dirstamp) \ + collectors/freeipmi.plugin/$(DEPDIR)/$(am__dirstamp) + +freeipmi.plugin$(EXEEXT): $(freeipmi_plugin_OBJECTS) $(freeipmi_plugin_DEPENDENCIES) $(EXTRA_freeipmi_plugin_DEPENDENCIES) + @rm -f freeipmi.plugin$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(freeipmi_plugin_OBJECTS) $(freeipmi_plugin_LDADD) $(LIBS) +daemon/$(am__dirstamp): + @$(MKDIR_P) daemon + @: > daemon/$(am__dirstamp) +daemon/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) daemon/$(DEPDIR) + @: > daemon/$(DEPDIR)/$(am__dirstamp) +daemon/common.$(OBJEXT): daemon/$(am__dirstamp) \ + daemon/$(DEPDIR)/$(am__dirstamp) +daemon/daemon.$(OBJEXT): daemon/$(am__dirstamp) \ + daemon/$(DEPDIR)/$(am__dirstamp) +daemon/global_statistics.$(OBJEXT): daemon/$(am__dirstamp) \ + daemon/$(DEPDIR)/$(am__dirstamp) +daemon/main.$(OBJEXT): daemon/$(am__dirstamp) \ + daemon/$(DEPDIR)/$(am__dirstamp) +daemon/signals.$(OBJEXT): daemon/$(am__dirstamp) \ + daemon/$(DEPDIR)/$(am__dirstamp) +daemon/unit_test.$(OBJEXT): daemon/$(am__dirstamp) \ + daemon/$(DEPDIR)/$(am__dirstamp) +web/api/badges/$(am__dirstamp): + @$(MKDIR_P) web/api/badges + @: > web/api/badges/$(am__dirstamp) +web/api/badges/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/api/badges/$(DEPDIR) + @: > web/api/badges/$(DEPDIR)/$(am__dirstamp) +web/api/badges/web_buffer_svg.$(OBJEXT): \ + web/api/badges/$(am__dirstamp) \ + web/api/badges/$(DEPDIR)/$(am__dirstamp) +web/api/exporters/$(am__dirstamp): + @$(MKDIR_P) web/api/exporters + @: > web/api/exporters/$(am__dirstamp) +web/api/exporters/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/api/exporters/$(DEPDIR) + @: > web/api/exporters/$(DEPDIR)/$(am__dirstamp) +web/api/exporters/allmetrics.$(OBJEXT): \ + web/api/exporters/$(am__dirstamp) \ + web/api/exporters/$(DEPDIR)/$(am__dirstamp) +web/api/exporters/shell/$(am__dirstamp): + @$(MKDIR_P) web/api/exporters/shell + @: > web/api/exporters/shell/$(am__dirstamp) +web/api/exporters/shell/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/api/exporters/shell/$(DEPDIR) + @: > web/api/exporters/shell/$(DEPDIR)/$(am__dirstamp) +web/api/exporters/shell/allmetrics_shell.$(OBJEXT): \ + web/api/exporters/shell/$(am__dirstamp) \ + web/api/exporters/shell/$(DEPDIR)/$(am__dirstamp) +web/api/queries/average/$(am__dirstamp): + @$(MKDIR_P) web/api/queries/average + @: > web/api/queries/average/$(am__dirstamp) +web/api/queries/average/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/api/queries/average/$(DEPDIR) + @: > web/api/queries/average/$(DEPDIR)/$(am__dirstamp) +web/api/queries/average/average.$(OBJEXT): \ + web/api/queries/average/$(am__dirstamp) \ + web/api/queries/average/$(DEPDIR)/$(am__dirstamp) +web/api/queries/des/$(am__dirstamp): + @$(MKDIR_P) web/api/queries/des + @: > web/api/queries/des/$(am__dirstamp) +web/api/queries/des/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/api/queries/des/$(DEPDIR) + @: > web/api/queries/des/$(DEPDIR)/$(am__dirstamp) +web/api/queries/des/des.$(OBJEXT): \ + web/api/queries/des/$(am__dirstamp) \ + web/api/queries/des/$(DEPDIR)/$(am__dirstamp) +web/api/queries/incremental_sum/$(am__dirstamp): + @$(MKDIR_P) web/api/queries/incremental_sum + @: > web/api/queries/incremental_sum/$(am__dirstamp) +web/api/queries/incremental_sum/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/api/queries/incremental_sum/$(DEPDIR) + @: > web/api/queries/incremental_sum/$(DEPDIR)/$(am__dirstamp) +web/api/queries/incremental_sum/incremental_sum.$(OBJEXT): \ + web/api/queries/incremental_sum/$(am__dirstamp) \ + web/api/queries/incremental_sum/$(DEPDIR)/$(am__dirstamp) +web/api/queries/max/$(am__dirstamp): + @$(MKDIR_P) web/api/queries/max + @: > web/api/queries/max/$(am__dirstamp) +web/api/queries/max/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/api/queries/max/$(DEPDIR) + @: > web/api/queries/max/$(DEPDIR)/$(am__dirstamp) +web/api/queries/max/max.$(OBJEXT): \ + web/api/queries/max/$(am__dirstamp) \ + web/api/queries/max/$(DEPDIR)/$(am__dirstamp) +web/api/queries/median/$(am__dirstamp): + @$(MKDIR_P) web/api/queries/median + @: > web/api/queries/median/$(am__dirstamp) +web/api/queries/median/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/api/queries/median/$(DEPDIR) + @: > web/api/queries/median/$(DEPDIR)/$(am__dirstamp) +web/api/queries/median/median.$(OBJEXT): \ + web/api/queries/median/$(am__dirstamp) \ + web/api/queries/median/$(DEPDIR)/$(am__dirstamp) +web/api/queries/min/$(am__dirstamp): + @$(MKDIR_P) web/api/queries/min + @: > web/api/queries/min/$(am__dirstamp) +web/api/queries/min/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/api/queries/min/$(DEPDIR) + @: > web/api/queries/min/$(DEPDIR)/$(am__dirstamp) +web/api/queries/min/min.$(OBJEXT): \ + web/api/queries/min/$(am__dirstamp) \ + web/api/queries/min/$(DEPDIR)/$(am__dirstamp) +web/api/queries/$(am__dirstamp): + @$(MKDIR_P) web/api/queries + @: > web/api/queries/$(am__dirstamp) +web/api/queries/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/api/queries/$(DEPDIR) + @: > web/api/queries/$(DEPDIR)/$(am__dirstamp) +web/api/queries/query.$(OBJEXT): web/api/queries/$(am__dirstamp) \ + web/api/queries/$(DEPDIR)/$(am__dirstamp) +web/api/queries/rrdr.$(OBJEXT): web/api/queries/$(am__dirstamp) \ + web/api/queries/$(DEPDIR)/$(am__dirstamp) +web/api/queries/ses/$(am__dirstamp): + @$(MKDIR_P) web/api/queries/ses + @: > web/api/queries/ses/$(am__dirstamp) +web/api/queries/ses/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/api/queries/ses/$(DEPDIR) + @: > web/api/queries/ses/$(DEPDIR)/$(am__dirstamp) +web/api/queries/ses/ses.$(OBJEXT): \ + web/api/queries/ses/$(am__dirstamp) \ + web/api/queries/ses/$(DEPDIR)/$(am__dirstamp) +web/api/queries/stddev/$(am__dirstamp): + @$(MKDIR_P) web/api/queries/stddev + @: > web/api/queries/stddev/$(am__dirstamp) +web/api/queries/stddev/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/api/queries/stddev/$(DEPDIR) + @: > web/api/queries/stddev/$(DEPDIR)/$(am__dirstamp) +web/api/queries/stddev/stddev.$(OBJEXT): \ + web/api/queries/stddev/$(am__dirstamp) \ + web/api/queries/stddev/$(DEPDIR)/$(am__dirstamp) +web/api/queries/sum/$(am__dirstamp): + @$(MKDIR_P) web/api/queries/sum + @: > web/api/queries/sum/$(am__dirstamp) +web/api/queries/sum/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/api/queries/sum/$(DEPDIR) + @: > web/api/queries/sum/$(DEPDIR)/$(am__dirstamp) +web/api/queries/sum/sum.$(OBJEXT): \ + web/api/queries/sum/$(am__dirstamp) \ + web/api/queries/sum/$(DEPDIR)/$(am__dirstamp) +web/api/formatters/$(am__dirstamp): + @$(MKDIR_P) web/api/formatters + @: > web/api/formatters/$(am__dirstamp) +web/api/formatters/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/api/formatters/$(DEPDIR) + @: > web/api/formatters/$(DEPDIR)/$(am__dirstamp) +web/api/formatters/rrd2json.$(OBJEXT): \ + web/api/formatters/$(am__dirstamp) \ + web/api/formatters/$(DEPDIR)/$(am__dirstamp) +web/api/formatters/csv/$(am__dirstamp): + @$(MKDIR_P) web/api/formatters/csv + @: > web/api/formatters/csv/$(am__dirstamp) +web/api/formatters/csv/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/api/formatters/csv/$(DEPDIR) + @: > web/api/formatters/csv/$(DEPDIR)/$(am__dirstamp) +web/api/formatters/csv/csv.$(OBJEXT): \ + web/api/formatters/csv/$(am__dirstamp) \ + web/api/formatters/csv/$(DEPDIR)/$(am__dirstamp) +web/api/formatters/json/$(am__dirstamp): + @$(MKDIR_P) web/api/formatters/json + @: > web/api/formatters/json/$(am__dirstamp) +web/api/formatters/json/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/api/formatters/json/$(DEPDIR) + @: > web/api/formatters/json/$(DEPDIR)/$(am__dirstamp) +web/api/formatters/json/json.$(OBJEXT): \ + web/api/formatters/json/$(am__dirstamp) \ + web/api/formatters/json/$(DEPDIR)/$(am__dirstamp) +web/api/formatters/ssv/$(am__dirstamp): + @$(MKDIR_P) web/api/formatters/ssv + @: > web/api/formatters/ssv/$(am__dirstamp) +web/api/formatters/ssv/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/api/formatters/ssv/$(DEPDIR) + @: > web/api/formatters/ssv/$(DEPDIR)/$(am__dirstamp) +web/api/formatters/ssv/ssv.$(OBJEXT): \ + web/api/formatters/ssv/$(am__dirstamp) \ + web/api/formatters/ssv/$(DEPDIR)/$(am__dirstamp) +web/api/formatters/value/$(am__dirstamp): + @$(MKDIR_P) web/api/formatters/value + @: > web/api/formatters/value/$(am__dirstamp) +web/api/formatters/value/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/api/formatters/value/$(DEPDIR) + @: > web/api/formatters/value/$(DEPDIR)/$(am__dirstamp) +web/api/formatters/value/value.$(OBJEXT): \ + web/api/formatters/value/$(am__dirstamp) \ + web/api/formatters/value/$(DEPDIR)/$(am__dirstamp) +web/api/formatters/json_wrapper.$(OBJEXT): \ + web/api/formatters/$(am__dirstamp) \ + web/api/formatters/$(DEPDIR)/$(am__dirstamp) +web/api/formatters/charts2json.$(OBJEXT): \ + web/api/formatters/$(am__dirstamp) \ + web/api/formatters/$(DEPDIR)/$(am__dirstamp) +web/api/formatters/rrdset2json.$(OBJEXT): \ + web/api/formatters/$(am__dirstamp) \ + web/api/formatters/$(DEPDIR)/$(am__dirstamp) +web/api/$(am__dirstamp): + @$(MKDIR_P) web/api + @: > web/api/$(am__dirstamp) +web/api/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/api/$(DEPDIR) + @: > web/api/$(DEPDIR)/$(am__dirstamp) +web/api/web_api_v1.$(OBJEXT): web/api/$(am__dirstamp) \ + web/api/$(DEPDIR)/$(am__dirstamp) +backends/$(am__dirstamp): + @$(MKDIR_P) backends + @: > backends/$(am__dirstamp) +backends/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) backends/$(DEPDIR) + @: > backends/$(DEPDIR)/$(am__dirstamp) +backends/backends.$(OBJEXT): backends/$(am__dirstamp) \ + backends/$(DEPDIR)/$(am__dirstamp) +backends/graphite/$(am__dirstamp): + @$(MKDIR_P) backends/graphite + @: > backends/graphite/$(am__dirstamp) +backends/graphite/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) backends/graphite/$(DEPDIR) + @: > backends/graphite/$(DEPDIR)/$(am__dirstamp) +backends/graphite/graphite.$(OBJEXT): \ + backends/graphite/$(am__dirstamp) \ + backends/graphite/$(DEPDIR)/$(am__dirstamp) +backends/json/$(am__dirstamp): + @$(MKDIR_P) backends/json + @: > backends/json/$(am__dirstamp) +backends/json/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) backends/json/$(DEPDIR) + @: > backends/json/$(DEPDIR)/$(am__dirstamp) +backends/json/json.$(OBJEXT): backends/json/$(am__dirstamp) \ + backends/json/$(DEPDIR)/$(am__dirstamp) +backends/opentsdb/$(am__dirstamp): + @$(MKDIR_P) backends/opentsdb + @: > backends/opentsdb/$(am__dirstamp) +backends/opentsdb/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) backends/opentsdb/$(DEPDIR) + @: > backends/opentsdb/$(DEPDIR)/$(am__dirstamp) +backends/opentsdb/opentsdb.$(OBJEXT): \ + backends/opentsdb/$(am__dirstamp) \ + backends/opentsdb/$(DEPDIR)/$(am__dirstamp) +backends/prometheus/$(am__dirstamp): + @$(MKDIR_P) backends/prometheus + @: > backends/prometheus/$(am__dirstamp) +backends/prometheus/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) backends/prometheus/$(DEPDIR) + @: > backends/prometheus/$(DEPDIR)/$(am__dirstamp) +backends/prometheus/backend_prometheus.$(OBJEXT): \ + backends/prometheus/$(am__dirstamp) \ + backends/prometheus/$(DEPDIR)/$(am__dirstamp) +collectors/checks.plugin/$(am__dirstamp): + @$(MKDIR_P) collectors/checks.plugin + @: > collectors/checks.plugin/$(am__dirstamp) +collectors/checks.plugin/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) collectors/checks.plugin/$(DEPDIR) + @: > collectors/checks.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/checks.plugin/plugin_checks.$(OBJEXT): \ + collectors/checks.plugin/$(am__dirstamp) \ + collectors/checks.plugin/$(DEPDIR)/$(am__dirstamp) +health/$(am__dirstamp): + @$(MKDIR_P) health + @: > health/$(am__dirstamp) +health/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) health/$(DEPDIR) + @: > health/$(DEPDIR)/$(am__dirstamp) +health/health.$(OBJEXT): health/$(am__dirstamp) \ + health/$(DEPDIR)/$(am__dirstamp) +health/health_config.$(OBJEXT): health/$(am__dirstamp) \ + health/$(DEPDIR)/$(am__dirstamp) +health/health_json.$(OBJEXT): health/$(am__dirstamp) \ + health/$(DEPDIR)/$(am__dirstamp) +health/health_log.$(OBJEXT): health/$(am__dirstamp) \ + health/$(DEPDIR)/$(am__dirstamp) +collectors/idlejitter.plugin/$(am__dirstamp): + @$(MKDIR_P) collectors/idlejitter.plugin + @: > collectors/idlejitter.plugin/$(am__dirstamp) +collectors/idlejitter.plugin/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) collectors/idlejitter.plugin/$(DEPDIR) + @: > collectors/idlejitter.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/idlejitter.plugin/plugin_idlejitter.$(OBJEXT): \ + collectors/idlejitter.plugin/$(am__dirstamp) \ + collectors/idlejitter.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/plugins.d/$(am__dirstamp): + @$(MKDIR_P) collectors/plugins.d + @: > collectors/plugins.d/$(am__dirstamp) +collectors/plugins.d/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) collectors/plugins.d/$(DEPDIR) + @: > collectors/plugins.d/$(DEPDIR)/$(am__dirstamp) +collectors/plugins.d/plugins_d.$(OBJEXT): \ + collectors/plugins.d/$(am__dirstamp) \ + collectors/plugins.d/$(DEPDIR)/$(am__dirstamp) +registry/$(am__dirstamp): + @$(MKDIR_P) registry + @: > registry/$(am__dirstamp) +registry/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) registry/$(DEPDIR) + @: > registry/$(DEPDIR)/$(am__dirstamp) +registry/registry.$(OBJEXT): registry/$(am__dirstamp) \ + registry/$(DEPDIR)/$(am__dirstamp) +registry/registry_db.$(OBJEXT): registry/$(am__dirstamp) \ + registry/$(DEPDIR)/$(am__dirstamp) +registry/registry_init.$(OBJEXT): registry/$(am__dirstamp) \ + registry/$(DEPDIR)/$(am__dirstamp) +registry/registry_internals.$(OBJEXT): registry/$(am__dirstamp) \ + registry/$(DEPDIR)/$(am__dirstamp) +registry/registry_log.$(OBJEXT): registry/$(am__dirstamp) \ + registry/$(DEPDIR)/$(am__dirstamp) +registry/registry_machine.$(OBJEXT): registry/$(am__dirstamp) \ + registry/$(DEPDIR)/$(am__dirstamp) +registry/registry_person.$(OBJEXT): registry/$(am__dirstamp) \ + registry/$(DEPDIR)/$(am__dirstamp) +registry/registry_url.$(OBJEXT): registry/$(am__dirstamp) \ + registry/$(DEPDIR)/$(am__dirstamp) +database/$(am__dirstamp): + @$(MKDIR_P) database + @: > database/$(am__dirstamp) +database/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) database/$(DEPDIR) + @: > database/$(DEPDIR)/$(am__dirstamp) +database/rrdcalc.$(OBJEXT): database/$(am__dirstamp) \ + database/$(DEPDIR)/$(am__dirstamp) +database/rrdcalctemplate.$(OBJEXT): database/$(am__dirstamp) \ + database/$(DEPDIR)/$(am__dirstamp) +database/rrddim.$(OBJEXT): database/$(am__dirstamp) \ + database/$(DEPDIR)/$(am__dirstamp) +database/rrddimvar.$(OBJEXT): database/$(am__dirstamp) \ + database/$(DEPDIR)/$(am__dirstamp) +database/rrdfamily.$(OBJEXT): database/$(am__dirstamp) \ + database/$(DEPDIR)/$(am__dirstamp) +database/rrdhost.$(OBJEXT): database/$(am__dirstamp) \ + database/$(DEPDIR)/$(am__dirstamp) +database/rrd.$(OBJEXT): database/$(am__dirstamp) \ + database/$(DEPDIR)/$(am__dirstamp) +database/rrdset.$(OBJEXT): database/$(am__dirstamp) \ + database/$(DEPDIR)/$(am__dirstamp) +database/rrdsetvar.$(OBJEXT): database/$(am__dirstamp) \ + database/$(DEPDIR)/$(am__dirstamp) +database/rrdvar.$(OBJEXT): database/$(am__dirstamp) \ + database/$(DEPDIR)/$(am__dirstamp) +streaming/$(am__dirstamp): + @$(MKDIR_P) streaming + @: > streaming/$(am__dirstamp) +streaming/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) streaming/$(DEPDIR) + @: > streaming/$(DEPDIR)/$(am__dirstamp) +streaming/rrdpush.$(OBJEXT): streaming/$(am__dirstamp) \ + streaming/$(DEPDIR)/$(am__dirstamp) +collectors/statsd.plugin/$(am__dirstamp): + @$(MKDIR_P) collectors/statsd.plugin + @: > collectors/statsd.plugin/$(am__dirstamp) +collectors/statsd.plugin/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) collectors/statsd.plugin/$(DEPDIR) + @: > collectors/statsd.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/statsd.plugin/statsd.$(OBJEXT): \ + collectors/statsd.plugin/$(am__dirstamp) \ + collectors/statsd.plugin/$(DEPDIR)/$(am__dirstamp) +web/server/$(am__dirstamp): + @$(MKDIR_P) web/server + @: > web/server/$(am__dirstamp) +web/server/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/server/$(DEPDIR) + @: > web/server/$(DEPDIR)/$(am__dirstamp) +web/server/web_client.$(OBJEXT): web/server/$(am__dirstamp) \ + web/server/$(DEPDIR)/$(am__dirstamp) +web/server/web_server.$(OBJEXT): web/server/$(am__dirstamp) \ + web/server/$(DEPDIR)/$(am__dirstamp) +web/server/web_client_cache.$(OBJEXT): web/server/$(am__dirstamp) \ + web/server/$(DEPDIR)/$(am__dirstamp) +web/server/single/$(am__dirstamp): + @$(MKDIR_P) web/server/single + @: > web/server/single/$(am__dirstamp) +web/server/single/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/server/single/$(DEPDIR) + @: > web/server/single/$(DEPDIR)/$(am__dirstamp) +web/server/single/single-threaded.$(OBJEXT): \ + web/server/single/$(am__dirstamp) \ + web/server/single/$(DEPDIR)/$(am__dirstamp) +web/server/multi/$(am__dirstamp): + @$(MKDIR_P) web/server/multi + @: > web/server/multi/$(am__dirstamp) +web/server/multi/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/server/multi/$(DEPDIR) + @: > web/server/multi/$(DEPDIR)/$(am__dirstamp) +web/server/multi/multi-threaded.$(OBJEXT): \ + web/server/multi/$(am__dirstamp) \ + web/server/multi/$(DEPDIR)/$(am__dirstamp) +web/server/static/$(am__dirstamp): + @$(MKDIR_P) web/server/static + @: > web/server/static/$(am__dirstamp) +web/server/static/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) web/server/static/$(DEPDIR) + @: > web/server/static/$(DEPDIR)/$(am__dirstamp) +web/server/static/static-threaded.$(OBJEXT): \ + web/server/static/$(am__dirstamp) \ + web/server/static/$(DEPDIR)/$(am__dirstamp) +collectors/freebsd.plugin/$(am__dirstamp): + @$(MKDIR_P) collectors/freebsd.plugin + @: > collectors/freebsd.plugin/$(am__dirstamp) +collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) collectors/freebsd.plugin/$(DEPDIR) + @: > collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/freebsd.plugin/plugin_freebsd.$(OBJEXT): \ + collectors/freebsd.plugin/$(am__dirstamp) \ + collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/freebsd.plugin/freebsd_sysctl.$(OBJEXT): \ + collectors/freebsd.plugin/$(am__dirstamp) \ + collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/freebsd.plugin/freebsd_getmntinfo.$(OBJEXT): \ + collectors/freebsd.plugin/$(am__dirstamp) \ + collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/freebsd.plugin/freebsd_getifaddrs.$(OBJEXT): \ + collectors/freebsd.plugin/$(am__dirstamp) \ + collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/freebsd.plugin/freebsd_devstat.$(OBJEXT): \ + collectors/freebsd.plugin/$(am__dirstamp) \ + collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/freebsd.plugin/freebsd_kstat_zfs.$(OBJEXT): \ + collectors/freebsd.plugin/$(am__dirstamp) \ + collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/freebsd.plugin/freebsd_ipfw.$(OBJEXT): \ + collectors/freebsd.plugin/$(am__dirstamp) \ + collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/$(am__dirstamp): + @$(MKDIR_P) collectors/proc.plugin + @: > collectors/proc.plugin/$(am__dirstamp) +collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) collectors/proc.plugin/$(DEPDIR) + @: > collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/zfs_common.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/macos.plugin/$(am__dirstamp): + @$(MKDIR_P) collectors/macos.plugin + @: > collectors/macos.plugin/$(am__dirstamp) +collectors/macos.plugin/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) collectors/macos.plugin/$(DEPDIR) + @: > collectors/macos.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/macos.plugin/plugin_macos.$(OBJEXT): \ + collectors/macos.plugin/$(am__dirstamp) \ + collectors/macos.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/macos.plugin/macos_sysctl.$(OBJEXT): \ + collectors/macos.plugin/$(am__dirstamp) \ + collectors/macos.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/macos.plugin/macos_mach_smi.$(OBJEXT): \ + collectors/macos.plugin/$(am__dirstamp) \ + collectors/macos.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/macos.plugin/macos_fw.$(OBJEXT): \ + collectors/macos.plugin/$(am__dirstamp) \ + collectors/macos.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/cgroups.plugin/sys_fs_cgroup.$(OBJEXT): \ + collectors/cgroups.plugin/$(am__dirstamp) \ + collectors/cgroups.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/diskspace.plugin/$(am__dirstamp): + @$(MKDIR_P) collectors/diskspace.plugin + @: > collectors/diskspace.plugin/$(am__dirstamp) +collectors/diskspace.plugin/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) collectors/diskspace.plugin/$(DEPDIR) + @: > collectors/diskspace.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/diskspace.plugin/plugin_diskspace.$(OBJEXT): \ + collectors/diskspace.plugin/$(am__dirstamp) \ + collectors/diskspace.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/nfacct.plugin/$(am__dirstamp): + @$(MKDIR_P) collectors/nfacct.plugin + @: > collectors/nfacct.plugin/$(am__dirstamp) +collectors/nfacct.plugin/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) collectors/nfacct.plugin/$(DEPDIR) + @: > collectors/nfacct.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/nfacct.plugin/plugin_nfacct.$(OBJEXT): \ + collectors/nfacct.plugin/$(am__dirstamp) \ + collectors/nfacct.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/ipc.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/plugin_proc.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_diskstats.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_interrupts.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_softirqs.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_loadavg.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_meminfo.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_net_dev.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_net_ip_vs_stats.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_net_netstat.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_net_rpc_nfs.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_net_rpc_nfsd.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_net_snmp.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_net_snmp6.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_net_sctp_snmp.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_net_sockstat.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_net_sockstat6.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_net_softnet_stat.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_net_stat_conntrack.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_net_stat_synproxy.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_self_mountinfo.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_spl_kstat_zfs.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_stat.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_vmstat.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/proc_uptime.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/sys_kernel_mm_ksm.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/sys_devices_system_edac_mc.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/sys_devices_system_node.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/proc.plugin/sys_fs_btrfs.$(OBJEXT): \ + collectors/proc.plugin/$(am__dirstamp) \ + collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/tc.plugin/$(am__dirstamp): + @$(MKDIR_P) collectors/tc.plugin + @: > collectors/tc.plugin/$(am__dirstamp) +collectors/tc.plugin/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) collectors/tc.plugin/$(DEPDIR) + @: > collectors/tc.plugin/$(DEPDIR)/$(am__dirstamp) +collectors/tc.plugin/plugin_tc.$(OBJEXT): \ + collectors/tc.plugin/$(am__dirstamp) \ + collectors/tc.plugin/$(DEPDIR)/$(am__dirstamp) + +netdata$(EXEEXT): $(netdata_OBJECTS) $(netdata_DEPENDENCIES) $(EXTRA_netdata_DEPENDENCIES) + @rm -f netdata$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(netdata_OBJECTS) $(netdata_LDADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + -rm -f backends/*.$(OBJEXT) + -rm -f backends/graphite/*.$(OBJEXT) + -rm -f backends/json/*.$(OBJEXT) + -rm -f backends/opentsdb/*.$(OBJEXT) + -rm -f backends/prometheus/*.$(OBJEXT) + -rm -f collectors/apps.plugin/*.$(OBJEXT) + -rm -f collectors/cgroups.plugin/*.$(OBJEXT) + -rm -f collectors/checks.plugin/*.$(OBJEXT) + -rm -f collectors/diskspace.plugin/*.$(OBJEXT) + -rm -f collectors/freebsd.plugin/*.$(OBJEXT) + -rm -f collectors/freeipmi.plugin/*.$(OBJEXT) + -rm -f collectors/idlejitter.plugin/*.$(OBJEXT) + -rm -f collectors/macos.plugin/*.$(OBJEXT) + -rm -f collectors/nfacct.plugin/*.$(OBJEXT) + -rm -f collectors/plugins.d/*.$(OBJEXT) + -rm -f collectors/proc.plugin/*.$(OBJEXT) + -rm -f collectors/statsd.plugin/*.$(OBJEXT) + -rm -f collectors/tc.plugin/*.$(OBJEXT) + -rm -f daemon/*.$(OBJEXT) + -rm -f database/*.$(OBJEXT) + -rm -f health/*.$(OBJEXT) + -rm -f libnetdata/*.$(OBJEXT) + -rm -f libnetdata/adaptive_resortable_list/*.$(OBJEXT) + -rm -f libnetdata/avl/*.$(OBJEXT) + -rm -f libnetdata/buffer/*.$(OBJEXT) + -rm -f libnetdata/clocks/*.$(OBJEXT) + -rm -f libnetdata/config/*.$(OBJEXT) + -rm -f libnetdata/dictionary/*.$(OBJEXT) + -rm -f libnetdata/eval/*.$(OBJEXT) + -rm -f libnetdata/locks/*.$(OBJEXT) + -rm -f libnetdata/log/*.$(OBJEXT) + -rm -f libnetdata/popen/*.$(OBJEXT) + -rm -f libnetdata/procfile/*.$(OBJEXT) + -rm -f libnetdata/simple_pattern/*.$(OBJEXT) + -rm -f libnetdata/socket/*.$(OBJEXT) + -rm -f libnetdata/statistical/*.$(OBJEXT) + -rm -f libnetdata/storage_number/*.$(OBJEXT) + -rm -f libnetdata/threads/*.$(OBJEXT) + -rm -f libnetdata/url/*.$(OBJEXT) + -rm -f registry/*.$(OBJEXT) + -rm -f streaming/*.$(OBJEXT) + -rm -f web/api/*.$(OBJEXT) + -rm -f web/api/badges/*.$(OBJEXT) + -rm -f web/api/exporters/*.$(OBJEXT) + -rm -f web/api/exporters/shell/*.$(OBJEXT) + -rm -f web/api/formatters/*.$(OBJEXT) + -rm -f web/api/formatters/csv/*.$(OBJEXT) + -rm -f web/api/formatters/json/*.$(OBJEXT) + -rm -f web/api/formatters/ssv/*.$(OBJEXT) + -rm -f web/api/formatters/value/*.$(OBJEXT) + -rm -f web/api/queries/*.$(OBJEXT) + -rm -f web/api/queries/average/*.$(OBJEXT) + -rm -f web/api/queries/des/*.$(OBJEXT) + -rm -f web/api/queries/incremental_sum/*.$(OBJEXT) + -rm -f web/api/queries/max/*.$(OBJEXT) + -rm -f web/api/queries/median/*.$(OBJEXT) + -rm -f web/api/queries/min/*.$(OBJEXT) + -rm -f web/api/queries/ses/*.$(OBJEXT) + -rm -f web/api/queries/stddev/*.$(OBJEXT) + -rm -f web/api/queries/sum/*.$(OBJEXT) + -rm -f web/server/*.$(OBJEXT) + -rm -f web/server/multi/*.$(OBJEXT) + -rm -f web/server/single/*.$(OBJEXT) + -rm -f web/server/static/*.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@backends/$(DEPDIR)/backends.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@backends/graphite/$(DEPDIR)/graphite.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@backends/json/$(DEPDIR)/json.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@backends/opentsdb/$(DEPDIR)/opentsdb.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@backends/prometheus/$(DEPDIR)/backend_prometheus.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/apps.plugin/$(DEPDIR)/apps_plugin.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/cgroups.plugin/$(DEPDIR)/cgroup-network.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/cgroups.plugin/$(DEPDIR)/sys_fs_cgroup.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/checks.plugin/$(DEPDIR)/plugin_checks.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/diskspace.plugin/$(DEPDIR)/plugin_diskspace.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/freebsd.plugin/$(DEPDIR)/freebsd_devstat.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/freebsd.plugin/$(DEPDIR)/freebsd_getifaddrs.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/freebsd.plugin/$(DEPDIR)/freebsd_getmntinfo.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/freebsd.plugin/$(DEPDIR)/freebsd_ipfw.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/freebsd.plugin/$(DEPDIR)/freebsd_kstat_zfs.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/freebsd.plugin/$(DEPDIR)/freebsd_sysctl.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/freebsd.plugin/$(DEPDIR)/plugin_freebsd.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/freeipmi.plugin/$(DEPDIR)/freeipmi_plugin.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/idlejitter.plugin/$(DEPDIR)/plugin_idlejitter.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/macos.plugin/$(DEPDIR)/macos_fw.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/macos.plugin/$(DEPDIR)/macos_mach_smi.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/macos.plugin/$(DEPDIR)/macos_sysctl.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/macos.plugin/$(DEPDIR)/plugin_macos.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/nfacct.plugin/$(DEPDIR)/plugin_nfacct.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/plugins.d/$(DEPDIR)/plugins_d.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/ipc.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/plugin_proc.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_diskstats.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_interrupts.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_loadavg.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_meminfo.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_dev.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_ip_vs_stats.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_netstat.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_rpc_nfs.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_rpc_nfsd.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_sctp_snmp.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_snmp.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_snmp6.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_sockstat.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_sockstat6.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_softnet_stat.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_stat_conntrack.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_stat_synproxy.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_self_mountinfo.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_softirqs.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_spl_kstat_zfs.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_stat.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_sys_kernel_random_entropy_avail.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_uptime.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_vmstat.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/sys_devices_system_edac_mc.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/sys_devices_system_node.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/sys_fs_btrfs.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/sys_kernel_mm_ksm.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/zfs_common.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/statsd.plugin/$(DEPDIR)/statsd.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@collectors/tc.plugin/$(DEPDIR)/plugin_tc.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@daemon/$(DEPDIR)/common.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@daemon/$(DEPDIR)/daemon.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@daemon/$(DEPDIR)/global_statistics.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@daemon/$(DEPDIR)/main.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@daemon/$(DEPDIR)/signals.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@daemon/$(DEPDIR)/unit_test.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrd.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrdcalc.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrdcalctemplate.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrddim.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrddimvar.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrdfamily.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrdhost.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrdset.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrdsetvar.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrdvar.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@health/$(DEPDIR)/health.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@health/$(DEPDIR)/health_config.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@health/$(DEPDIR)/health_json.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@health/$(DEPDIR)/health_log.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/$(DEPDIR)/libnetdata.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/$(DEPDIR)/os.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/adaptive_resortable_list/$(DEPDIR)/adaptive_resortable_list.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/avl/$(DEPDIR)/avl.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/buffer/$(DEPDIR)/buffer.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/clocks/$(DEPDIR)/clocks.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/config/$(DEPDIR)/appconfig.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/dictionary/$(DEPDIR)/dictionary.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/eval/$(DEPDIR)/eval.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/locks/$(DEPDIR)/locks.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/log/$(DEPDIR)/log.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/popen/$(DEPDIR)/popen.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/procfile/$(DEPDIR)/procfile.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/simple_pattern/$(DEPDIR)/simple_pattern.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/socket/$(DEPDIR)/socket.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/statistical/$(DEPDIR)/statistical.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/storage_number/$(DEPDIR)/storage_number.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/threads/$(DEPDIR)/threads.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/url/$(DEPDIR)/url.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry_db.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry_init.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry_internals.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry_log.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry_machine.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry_person.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry_url.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@streaming/$(DEPDIR)/rrdpush.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/$(DEPDIR)/web_api_v1.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/badges/$(DEPDIR)/web_buffer_svg.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/exporters/$(DEPDIR)/allmetrics.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/exporters/shell/$(DEPDIR)/allmetrics_shell.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/$(DEPDIR)/charts2json.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/$(DEPDIR)/json_wrapper.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/$(DEPDIR)/rrd2json.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/$(DEPDIR)/rrdset2json.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/csv/$(DEPDIR)/csv.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/json/$(DEPDIR)/json.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/ssv/$(DEPDIR)/ssv.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/value/$(DEPDIR)/value.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/$(DEPDIR)/query.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/$(DEPDIR)/rrdr.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/average/$(DEPDIR)/average.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/des/$(DEPDIR)/des.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/incremental_sum/$(DEPDIR)/incremental_sum.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/max/$(DEPDIR)/max.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/median/$(DEPDIR)/median.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/min/$(DEPDIR)/min.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/ses/$(DEPDIR)/ses.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/stddev/$(DEPDIR)/stddev.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/sum/$(DEPDIR)/sum.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/server/$(DEPDIR)/web_client.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/server/$(DEPDIR)/web_client_cache.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/server/$(DEPDIR)/web_server.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/server/multi/$(DEPDIR)/multi-threaded.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/server/single/$(DEPDIR)/single-threaded.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@web/server/static/$(DEPDIR)/static-threaded.Po@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +@am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< + +.c.obj: +@am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ +@am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` +install-dist_cacheDATA: $(dist_cache_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_cache_DATA)'; test -n "$(cachedir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(cachedir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(cachedir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(cachedir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(cachedir)" || exit $$?; \ + done + +uninstall-dist_cacheDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_cache_DATA)'; test -n "$(cachedir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(cachedir)'; $(am__uninstall_files_from_dir) +install-dist_logDATA: $(dist_log_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_log_DATA)'; test -n "$(logdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(logdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(logdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(logdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(logdir)" || exit $$?; \ + done + +uninstall-dist_logDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_log_DATA)'; test -n "$(logdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(logdir)'; $(am__uninstall_files_from_dir) +install-dist_registryDATA: $(dist_registry_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_registry_DATA)'; test -n "$(registrydir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(registrydir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(registrydir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(registrydir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(registrydir)" || exit $$?; \ + done + +uninstall-dist_registryDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_registry_DATA)'; test -n "$(registrydir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(registrydir)'; $(am__uninstall_files_from_dir) +install-dist_varlibDATA: $(dist_varlib_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_varlib_DATA)'; test -n "$(varlibdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(varlibdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(varlibdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(varlibdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(varlibdir)" || exit $$?; \ + done + +uninstall-dist_varlibDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_varlib_DATA)'; test -n "$(varlibdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(varlibdir)'; $(am__uninstall_files_from_dir) # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. @@ -662,6 +2617,7 @@ distdir: $(DISTFILES) dist-gzip: distdir tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz $(am__post_remove_distdir) + dist-bzip2: distdir tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2 $(am__post_remove_distdir) @@ -669,6 +2625,7 @@ dist-bzip2: distdir dist-lzip: distdir tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz $(am__post_remove_distdir) + dist-xz: distdir tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz $(am__post_remove_distdir) @@ -783,9 +2740,12 @@ distcleancheck: distclean exit 1; } >&2 check-am: all-am check: check-recursive -all-am: Makefile $(SCRIPTS) $(DATA) config.h +all-am: Makefile $(PROGRAMS) $(SCRIPTS) $(DATA) config.h installdirs: installdirs-recursive installdirs-am: + for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(sbindir)" "$(DESTDIR)$(cachedir)" "$(DESTDIR)$(logdir)" "$(DESTDIR)$(registrydir)" "$(DESTDIR)$(varlibdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive @@ -812,6 +2772,134 @@ clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + -rm -f backends/$(DEPDIR)/$(am__dirstamp) + -rm -f backends/$(am__dirstamp) + -rm -f backends/graphite/$(DEPDIR)/$(am__dirstamp) + -rm -f backends/graphite/$(am__dirstamp) + -rm -f backends/json/$(DEPDIR)/$(am__dirstamp) + -rm -f backends/json/$(am__dirstamp) + -rm -f backends/opentsdb/$(DEPDIR)/$(am__dirstamp) + -rm -f backends/opentsdb/$(am__dirstamp) + -rm -f backends/prometheus/$(DEPDIR)/$(am__dirstamp) + -rm -f backends/prometheus/$(am__dirstamp) + -rm -f collectors/apps.plugin/$(DEPDIR)/$(am__dirstamp) + -rm -f collectors/apps.plugin/$(am__dirstamp) + -rm -f collectors/cgroups.plugin/$(DEPDIR)/$(am__dirstamp) + -rm -f collectors/cgroups.plugin/$(am__dirstamp) + -rm -f collectors/checks.plugin/$(DEPDIR)/$(am__dirstamp) + -rm -f collectors/checks.plugin/$(am__dirstamp) + -rm -f collectors/diskspace.plugin/$(DEPDIR)/$(am__dirstamp) + -rm -f collectors/diskspace.plugin/$(am__dirstamp) + -rm -f collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp) + -rm -f collectors/freebsd.plugin/$(am__dirstamp) + -rm -f collectors/freeipmi.plugin/$(DEPDIR)/$(am__dirstamp) + -rm -f collectors/freeipmi.plugin/$(am__dirstamp) + -rm -f collectors/idlejitter.plugin/$(DEPDIR)/$(am__dirstamp) + -rm -f collectors/idlejitter.plugin/$(am__dirstamp) + -rm -f collectors/macos.plugin/$(DEPDIR)/$(am__dirstamp) + -rm -f collectors/macos.plugin/$(am__dirstamp) + -rm -f collectors/nfacct.plugin/$(DEPDIR)/$(am__dirstamp) + -rm -f collectors/nfacct.plugin/$(am__dirstamp) + -rm -f collectors/plugins.d/$(DEPDIR)/$(am__dirstamp) + -rm -f collectors/plugins.d/$(am__dirstamp) + -rm -f collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) + -rm -f collectors/proc.plugin/$(am__dirstamp) + -rm -f collectors/statsd.plugin/$(DEPDIR)/$(am__dirstamp) + -rm -f collectors/statsd.plugin/$(am__dirstamp) + -rm -f collectors/tc.plugin/$(DEPDIR)/$(am__dirstamp) + -rm -f collectors/tc.plugin/$(am__dirstamp) + -rm -f daemon/$(DEPDIR)/$(am__dirstamp) + -rm -f daemon/$(am__dirstamp) + -rm -f database/$(DEPDIR)/$(am__dirstamp) + -rm -f database/$(am__dirstamp) + -rm -f health/$(DEPDIR)/$(am__dirstamp) + -rm -f health/$(am__dirstamp) + -rm -f libnetdata/$(DEPDIR)/$(am__dirstamp) + -rm -f libnetdata/$(am__dirstamp) + -rm -f libnetdata/adaptive_resortable_list/$(DEPDIR)/$(am__dirstamp) + -rm -f libnetdata/adaptive_resortable_list/$(am__dirstamp) + -rm -f libnetdata/avl/$(DEPDIR)/$(am__dirstamp) + -rm -f libnetdata/avl/$(am__dirstamp) + -rm -f libnetdata/buffer/$(DEPDIR)/$(am__dirstamp) + -rm -f libnetdata/buffer/$(am__dirstamp) + -rm -f libnetdata/clocks/$(DEPDIR)/$(am__dirstamp) + -rm -f libnetdata/clocks/$(am__dirstamp) + -rm -f libnetdata/config/$(DEPDIR)/$(am__dirstamp) + -rm -f libnetdata/config/$(am__dirstamp) + -rm -f libnetdata/dictionary/$(DEPDIR)/$(am__dirstamp) + -rm -f libnetdata/dictionary/$(am__dirstamp) + -rm -f libnetdata/eval/$(DEPDIR)/$(am__dirstamp) + -rm -f libnetdata/eval/$(am__dirstamp) + -rm -f libnetdata/locks/$(DEPDIR)/$(am__dirstamp) + -rm -f libnetdata/locks/$(am__dirstamp) + -rm -f libnetdata/log/$(DEPDIR)/$(am__dirstamp) + -rm -f libnetdata/log/$(am__dirstamp) + -rm -f libnetdata/popen/$(DEPDIR)/$(am__dirstamp) + -rm -f libnetdata/popen/$(am__dirstamp) + -rm -f libnetdata/procfile/$(DEPDIR)/$(am__dirstamp) + -rm -f libnetdata/procfile/$(am__dirstamp) + -rm -f libnetdata/simple_pattern/$(DEPDIR)/$(am__dirstamp) + -rm -f libnetdata/simple_pattern/$(am__dirstamp) + -rm -f libnetdata/socket/$(DEPDIR)/$(am__dirstamp) + -rm -f libnetdata/socket/$(am__dirstamp) + -rm -f libnetdata/statistical/$(DEPDIR)/$(am__dirstamp) + -rm -f libnetdata/statistical/$(am__dirstamp) + -rm -f libnetdata/storage_number/$(DEPDIR)/$(am__dirstamp) + -rm -f libnetdata/storage_number/$(am__dirstamp) + -rm -f libnetdata/threads/$(DEPDIR)/$(am__dirstamp) + -rm -f libnetdata/threads/$(am__dirstamp) + -rm -f libnetdata/url/$(DEPDIR)/$(am__dirstamp) + -rm -f libnetdata/url/$(am__dirstamp) + -rm -f registry/$(DEPDIR)/$(am__dirstamp) + -rm -f registry/$(am__dirstamp) + -rm -f streaming/$(DEPDIR)/$(am__dirstamp) + -rm -f streaming/$(am__dirstamp) + -rm -f web/api/$(DEPDIR)/$(am__dirstamp) + -rm -f web/api/$(am__dirstamp) + -rm -f web/api/badges/$(DEPDIR)/$(am__dirstamp) + -rm -f web/api/badges/$(am__dirstamp) + -rm -f web/api/exporters/$(DEPDIR)/$(am__dirstamp) + -rm -f web/api/exporters/$(am__dirstamp) + -rm -f web/api/exporters/shell/$(DEPDIR)/$(am__dirstamp) + -rm -f web/api/exporters/shell/$(am__dirstamp) + -rm -f web/api/formatters/$(DEPDIR)/$(am__dirstamp) + -rm -f web/api/formatters/$(am__dirstamp) + -rm -f web/api/formatters/csv/$(DEPDIR)/$(am__dirstamp) + -rm -f web/api/formatters/csv/$(am__dirstamp) + -rm -f web/api/formatters/json/$(DEPDIR)/$(am__dirstamp) + -rm -f web/api/formatters/json/$(am__dirstamp) + -rm -f web/api/formatters/ssv/$(DEPDIR)/$(am__dirstamp) + -rm -f web/api/formatters/ssv/$(am__dirstamp) + -rm -f web/api/formatters/value/$(DEPDIR)/$(am__dirstamp) + -rm -f web/api/formatters/value/$(am__dirstamp) + -rm -f web/api/queries/$(DEPDIR)/$(am__dirstamp) + -rm -f web/api/queries/$(am__dirstamp) + -rm -f web/api/queries/average/$(DEPDIR)/$(am__dirstamp) + -rm -f web/api/queries/average/$(am__dirstamp) + -rm -f web/api/queries/des/$(DEPDIR)/$(am__dirstamp) + -rm -f web/api/queries/des/$(am__dirstamp) + -rm -f web/api/queries/incremental_sum/$(DEPDIR)/$(am__dirstamp) + -rm -f web/api/queries/incremental_sum/$(am__dirstamp) + -rm -f web/api/queries/max/$(DEPDIR)/$(am__dirstamp) + -rm -f web/api/queries/max/$(am__dirstamp) + -rm -f web/api/queries/median/$(DEPDIR)/$(am__dirstamp) + -rm -f web/api/queries/median/$(am__dirstamp) + -rm -f web/api/queries/min/$(DEPDIR)/$(am__dirstamp) + -rm -f web/api/queries/min/$(am__dirstamp) + -rm -f web/api/queries/ses/$(DEPDIR)/$(am__dirstamp) + -rm -f web/api/queries/ses/$(am__dirstamp) + -rm -f web/api/queries/stddev/$(DEPDIR)/$(am__dirstamp) + -rm -f web/api/queries/stddev/$(am__dirstamp) + -rm -f web/api/queries/sum/$(DEPDIR)/$(am__dirstamp) + -rm -f web/api/queries/sum/$(am__dirstamp) + -rm -f web/server/$(DEPDIR)/$(am__dirstamp) + -rm -f web/server/$(am__dirstamp) + -rm -f web/server/multi/$(DEPDIR)/$(am__dirstamp) + -rm -f web/server/multi/$(am__dirstamp) + -rm -f web/server/single/$(DEPDIR)/$(am__dirstamp) + -rm -f web/server/single/$(am__dirstamp) + -rm -f web/server/static/$(DEPDIR)/$(am__dirstamp) + -rm -f web/server/static/$(am__dirstamp) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @@ -819,12 +2907,15 @@ maintainer-clean-generic: -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) clean: clean-recursive -clean-am: clean-generic mostlyclean-am +clean-am: clean-generic clean-pluginsPROGRAMS clean-sbinPROGRAMS \ + mostlyclean-am distclean: distclean-recursive -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -rf backends/$(DEPDIR) backends/graphite/$(DEPDIR) backends/json/$(DEPDIR) backends/opentsdb/$(DEPDIR) backends/prometheus/$(DEPDIR) collectors/apps.plugin/$(DEPDIR) collectors/cgroups.plugin/$(DEPDIR) collectors/checks.plugin/$(DEPDIR) collectors/diskspace.plugin/$(DEPDIR) collectors/freebsd.plugin/$(DEPDIR) collectors/freeipmi.plugin/$(DEPDIR) collectors/idlejitter.plugin/$(DEPDIR) collectors/macos.plugin/$(DEPDIR) collectors/nfacct.plugin/$(DEPDIR) collectors/plugins.d/$(DEPDIR) collectors/proc.plugin/$(DEPDIR) collectors/statsd.plugin/$(DEPDIR) collectors/tc.plugin/$(DEPDIR) daemon/$(DEPDIR) database/$(DEPDIR) health/$(DEPDIR) libnetdata/$(DEPDIR) libnetdata/adaptive_resortable_list/$(DEPDIR) libnetdata/avl/$(DEPDIR) libnetdata/buffer/$(DEPDIR) libnetdata/clocks/$(DEPDIR) libnetdata/config/$(DEPDIR) libnetdata/dictionary/$(DEPDIR) libnetdata/eval/$(DEPDIR) libnetdata/locks/$(DEPDIR) libnetdata/log/$(DEPDIR) libnetdata/popen/$(DEPDIR) libnetdata/procfile/$(DEPDIR) libnetdata/simple_pattern/$(DEPDIR) libnetdata/socket/$(DEPDIR) libnetdata/statistical/$(DEPDIR) libnetdata/storage_number/$(DEPDIR) libnetdata/threads/$(DEPDIR) libnetdata/url/$(DEPDIR) registry/$(DEPDIR) streaming/$(DEPDIR) web/api/$(DEPDIR) web/api/badges/$(DEPDIR) web/api/exporters/$(DEPDIR) web/api/exporters/shell/$(DEPDIR) web/api/formatters/$(DEPDIR) web/api/formatters/csv/$(DEPDIR) web/api/formatters/json/$(DEPDIR) web/api/formatters/ssv/$(DEPDIR) web/api/formatters/value/$(DEPDIR) web/api/queries/$(DEPDIR) web/api/queries/average/$(DEPDIR) web/api/queries/des/$(DEPDIR) web/api/queries/incremental_sum/$(DEPDIR) web/api/queries/max/$(DEPDIR) web/api/queries/median/$(DEPDIR) web/api/queries/min/$(DEPDIR) web/api/queries/ses/$(DEPDIR) web/api/queries/stddev/$(DEPDIR) web/api/queries/sum/$(DEPDIR) web/server/$(DEPDIR) web/server/multi/$(DEPDIR) web/server/single/$(DEPDIR) web/server/static/$(DEPDIR) -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-hdr distclean-tags +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-hdr distclean-tags dvi: dvi-recursive @@ -838,13 +2929,15 @@ info: info-recursive info-am: -install-data-am: +install-data-am: install-dist_cacheDATA install-dist_logDATA \ + install-dist_registryDATA install-dist_varlibDATA \ + install-pluginsPROGRAMS install-dvi: install-dvi-recursive install-dvi-am: -install-exec-am: +install-exec-am: install-sbinPROGRAMS install-html: install-html-recursive @@ -869,12 +2962,13 @@ installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f $(am__CONFIG_DISTCLEAN_FILES) -rm -rf $(top_srcdir)/autom4te.cache + -rm -rf backends/$(DEPDIR) backends/graphite/$(DEPDIR) backends/json/$(DEPDIR) backends/opentsdb/$(DEPDIR) backends/prometheus/$(DEPDIR) collectors/apps.plugin/$(DEPDIR) collectors/cgroups.plugin/$(DEPDIR) collectors/checks.plugin/$(DEPDIR) collectors/diskspace.plugin/$(DEPDIR) collectors/freebsd.plugin/$(DEPDIR) collectors/freeipmi.plugin/$(DEPDIR) collectors/idlejitter.plugin/$(DEPDIR) collectors/macos.plugin/$(DEPDIR) collectors/nfacct.plugin/$(DEPDIR) collectors/plugins.d/$(DEPDIR) collectors/proc.plugin/$(DEPDIR) collectors/statsd.plugin/$(DEPDIR) collectors/tc.plugin/$(DEPDIR) daemon/$(DEPDIR) database/$(DEPDIR) health/$(DEPDIR) libnetdata/$(DEPDIR) libnetdata/adaptive_resortable_list/$(DEPDIR) libnetdata/avl/$(DEPDIR) libnetdata/buffer/$(DEPDIR) libnetdata/clocks/$(DEPDIR) libnetdata/config/$(DEPDIR) libnetdata/dictionary/$(DEPDIR) libnetdata/eval/$(DEPDIR) libnetdata/locks/$(DEPDIR) libnetdata/log/$(DEPDIR) libnetdata/popen/$(DEPDIR) libnetdata/procfile/$(DEPDIR) libnetdata/simple_pattern/$(DEPDIR) libnetdata/socket/$(DEPDIR) libnetdata/statistical/$(DEPDIR) libnetdata/storage_number/$(DEPDIR) libnetdata/threads/$(DEPDIR) libnetdata/url/$(DEPDIR) registry/$(DEPDIR) streaming/$(DEPDIR) web/api/$(DEPDIR) web/api/badges/$(DEPDIR) web/api/exporters/$(DEPDIR) web/api/exporters/shell/$(DEPDIR) web/api/formatters/$(DEPDIR) web/api/formatters/csv/$(DEPDIR) web/api/formatters/json/$(DEPDIR) web/api/formatters/ssv/$(DEPDIR) web/api/formatters/value/$(DEPDIR) web/api/queries/$(DEPDIR) web/api/queries/average/$(DEPDIR) web/api/queries/des/$(DEPDIR) web/api/queries/incremental_sum/$(DEPDIR) web/api/queries/max/$(DEPDIR) web/api/queries/median/$(DEPDIR) web/api/queries/min/$(DEPDIR) web/api/queries/ses/$(DEPDIR) web/api/queries/stddev/$(DEPDIR) web/api/queries/sum/$(DEPDIR) web/server/$(DEPDIR) web/server/multi/$(DEPDIR) web/server/single/$(DEPDIR) web/server/static/$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive -mostlyclean-am: mostlyclean-generic +mostlyclean-am: mostlyclean-compile mostlyclean-generic pdf: pdf-recursive @@ -884,25 +2978,33 @@ ps: ps-recursive ps-am: -uninstall-am: +uninstall-am: uninstall-dist_cacheDATA uninstall-dist_logDATA \ + uninstall-dist_registryDATA uninstall-dist_varlibDATA \ + uninstall-pluginsPROGRAMS uninstall-sbinPROGRAMS .MAKE: $(am__recursive_targets) all install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \ am--refresh check check-am clean clean-cscope clean-generic \ - cscope cscopelist-am ctags ctags-am dist dist-all dist-bzip2 \ - dist-gzip dist-lzip dist-shar dist-tarZ dist-xz dist-zip \ - distcheck distclean distclean-generic distclean-hdr \ + clean-pluginsPROGRAMS clean-sbinPROGRAMS cscope cscopelist-am \ + ctags ctags-am dist dist-all dist-bzip2 dist-gzip dist-lzip \ + dist-shar dist-tarZ dist-xz dist-zip distcheck distclean \ + distclean-compile distclean-generic distclean-hdr \ distclean-tags distcleancheck distdir distuninstallcheck dvi \ dvi-am html html-am info info-am install install-am \ - install-data install-data-am install-dvi install-dvi-am \ + install-data install-data-am install-dist_cacheDATA \ + install-dist_logDATA install-dist_registryDATA \ + install-dist_varlibDATA install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ - install-pdf-am install-ps install-ps-am install-strip \ - installcheck installcheck-am installdirs installdirs-am \ - maintainer-clean maintainer-clean-generic mostlyclean \ + install-pdf-am install-pluginsPROGRAMS install-ps \ + install-ps-am install-sbinPROGRAMS install-strip installcheck \ + installcheck-am installdirs installdirs-am maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic pdf pdf-am ps ps-am tags tags-am uninstall \ - uninstall-am + uninstall-am uninstall-dist_cacheDATA uninstall-dist_logDATA \ + uninstall-dist_registryDATA uninstall-dist_varlibDATA \ + uninstall-pluginsPROGRAMS uninstall-sbinPROGRAMS # Tell versions [3.59,3.63) of GNU make to not export all variables. diff --git a/README.md b/README.md index 4fd40b497..6f11fd14a 100644 --- a/README.md +++ b/README.md @@ -1,48 +1,128 @@ -# netdata [![Build Status](https://travis-ci.org/firehol/netdata.svg?branch=master)](https://travis-ci.org/firehol/netdata) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/a994873f30d045b9b4b83606c3eb3498)](https://www.codacy.com/app/netdata/netdata?utm_source=github.com&utm_medium=referral&utm_content=firehol/netdata&utm_campaign=Badge_Grade) [![Code Climate](https://codeclimate.com/github/firehol/netdata/badges/gpa.svg)](https://codeclimate.com/github/firehol/netdata) [![License: GPL v3+](https://img.shields.io/badge/License-GPL%20v3%2B-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) +# netdata [![Build Status](https://travis-ci.com/netdata/netdata.svg?branch=master)](https://travis-ci.com/netdata/netdata) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/2231/badge)](https://bestpractices.coreinfrastructure.org/projects/2231) [![License: GPL v3+](https://img.shields.io/badge/License-GPL%20v3%2B-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) + +[![Code Climate](https://codeclimate.com/github/netdata/netdata/badges/gpa.svg)](https://codeclimate.com/github/netdata/netdata) +[![Codacy Badge](https://api.codacy.com/project/badge/Grade/a994873f30d045b9b4b83606c3eb3498)](https://www.codacy.com/app/netdata/netdata?utm_source=github.com&utm_medium=referral&utm_content=netdata/netdata&utm_campaign=Badge_Grade) +[![LGTM C](https://img.shields.io/lgtm/grade/cpp/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:cpp) +[![LGTM JS](https://img.shields.io/lgtm/grade/javascript/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:javascript) +[![LGTM PYTHON](https://img.shields.io/lgtm/grade/python/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:python) + > *New to netdata? Here is a live demo: [http://my-netdata.io](http://my-netdata.io)* **netdata** is a system for **distributed real-time performance and health monitoring**. -It provides **unparalleled insights, in real-time**, of everything happening on the -system it runs (including applications such as web and database servers), using -**modern interactive web dashboards**. + +It provides **unparalleled insights**, **in real-time**, of everything happening on the systems it runs (including containers and applications such as web and database servers), using **modern interactive web dashboards**. + +### netdata core values + +we value |netdata... +:--------------------------:|:---- +high resolution metrics |collects all metrics **every single second** +unlimited metrics |collects **thousands of metrics** per monitored node +real-time visualization |dashboards run with **sub-second latency**, collection to visualization +powerful anomaly detection |has a **distributed watchdog** embedded in it, running on all monitored nodes +visual anomaly detection |dashboards are optimized for **spotting anomalies**, across all metrics +meaningful presentation |dashboards present **all metrics in a structured, easy to understand, way** +zero configuration |**auto-detects** all metrics and comes with dozens of alarms +resource utilization |core is **optimized C code**, using <1% utilization of single CPU core + +netdata also supports: + + - monitoring **ephemeral nodes** and **auto-scaled containers**, + - **integration** with existing monitoring infrastructure (time-series databases like `prometheus`, `graphite`, `opentsdb`) and third-party event notification methods +(like `slack`, `pagerduty`, `pushover`, and dozens more), + - building hierarchies of monitored nodes via **real-time metrics streaming** between them, + - embedding charts and dashboards on third party web sites and applications, such as [Atlassian's Confluence](https://github.com/netdata/netdata/wiki/Custom-Dashboard-with-Confluence). _netdata is **fast** and **efficient**, designed to permanently run on all systems (**physical** & **virtual** servers, **containers**, **IoT** devices), without disrupting their core function._ -netdata runs on **Linux**, **FreeBSD**, and **MacOS**. +netdata currently runs on **Linux**, **FreeBSD**, and **MacOS**. [![Twitter Follow](https://img.shields.io/twitter/follow/linuxnetdata.svg?style=social&label=New%20-%20stay%20in%20touch%20-%20follow%20netdata%20on%20twitter)](https://twitter.com/linuxnetdata) -[![analytics](http://www.google-analytics.com/collect?v=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Ffirehol%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Freadme&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() +[![analytics](http://www.google-analytics.com/collect?v=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Freadme&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() --- ## User base *Docker pulls*
-[![Docker titpetric/netdata Pulls](https://img.shields.io/docker/pulls/titpetric/netdata.svg?label=docker%20pulls%20titpetric/netdata)](https://hub.docker.com/r/titpetric/netdata/) [![Docker firehol/netdata Pulls](https://img.shields.io/docker/pulls/firehol/netdata.svg?label=docker%20pulls%20firehol/netdata)](https://hub.docker.com/r/firehol/netdata/) +[![netdata/netdata (official)](https://img.shields.io/docker/pulls/netdata/netdata.svg?label=netdata/netdata+%28official%29)](https://hub.docker.com/r/netdata/netdata/) +[![firehol/netdata (deprecated)](https://img.shields.io/docker/pulls/firehol/netdata.svg?label=firehol/netdata+%28deprecated%29)](https://hub.docker.com/r/firehol/netdata/) +[![titpetric/netdata (donated)](https://img.shields.io/docker/pulls/titpetric/netdata.svg?label=titpetric/netdata+%28third+party%29)](https://hub.docker.com/r/titpetric/netdata/) -*Since May 16th 2016 (the date the [global public netdata registry](https://github.com/firehol/netdata/wiki/mynetdata-menu-item) was released):*
-[![User Base](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&label=user%20base&units=null&value_color=blue&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![Monitored Servers](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&label=servers%20monitored&units=null&value_color=orange&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![Sessions Served](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&label=sessions%20served&units=null&value_color=yellowgreen&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) +*Since May 16th 2016 (the date the [global public netdata registry](https://github.com/netdata/netdata/wiki/mynetdata-menu-item) was released):*
+[![User Base](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&label=user%20base&units=null&value_color=blue&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) +[![Monitored Servers](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&label=servers%20monitored&units=null&value_color=orange&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) +[![Sessions Served](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&label=sessions%20served&units=null&value_color=yellowgreen&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) *in the last 24 hours:*
-[![New Users Today](http://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&after=-86400&options=unaligned&group=incremental-sum&label=new%20users%20today&units=null&value_color=blue&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![New Machines Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&group=incremental-sum&after=-86400&options=unaligned&label=servers%20added%20today&units=null&value_color=orange&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![Sessions Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&after=-86400&group=incremental-sum&options=unaligned&label=sessions%20served%20today&units=null&value_color=yellowgreen&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) +[![New Users Today](http://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&after=-86400&options=unaligned&group=incremental-sum&label=new%20users%20today&units=null&value_color=blue&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) +[![New Machines Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&group=incremental-sum&after=-86400&options=unaligned&label=servers%20added%20today&units=null&value_color=orange&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) +[![Sessions Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&after=-86400&group=incremental-sum&options=unaligned&label=sessions%20served%20today&units=null&value_color=yellowgreen&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) --- ## News -`Dec 17th, 2017` - **[netdata v1.9.0 released!](https://github.com/firehol/netdata/releases)** +`Sep 18, 2018` - **netdata has its own organization** + +Netdata used to be a [firehol.org](https://firehol.org) project, accessible as `firehol/netdata`. + +Netdata now has its own github organization `netdata`, so all github URLs are now `netdata/netdata`. The old github URLs, repo clones, forks, etc redirect automatically to the new repo. + +--- + +![cncf](https://www.cncf.io/wp-content/uploads/2016/09/logo_cncf.png) + +`Jun 16, 2018` - **netdata in CNCF** -A big release: +Netdata is now at the [Cloud Native Computing Foundation (CNCF) landscape](https://landscape.cncf.io/grouping=no&sort=stars). + +Read the [netdata presentation](https://docs.google.com/presentation/d/18C8bCTbtgKDWqPa57GXIjB2PbjjpjsUNkLtZEz6YK8s/edit?usp=sharing) we gave at CNCF TOC on Sep 18, 2018. + +--- - - dashboard snapshots, for loading / saving selected time-frames - - highlighted time-frames across all charts of the dashboard - - IP access lists for filtering access to netdata - - enhanced VMs and containers monitoring - - auto-scaling of chart units - - timezone conversion at the dashboard to allow comparing charts with server logs - - python.d.plugin rewritten - - dozens of more improvements, enhancements, features and compatibility fixes +`Mar 27th, 2018` - **[netdata v1.10.0 released!](https://github.com/netdata/netdata/releases)** + + - new web server, a lot faster and more secure + - updated all javascript libraries to their latest versions (fixed compatibility issues - now netdata chart can now be embedded on **Atlassian Confluence** pages and remain fully interactive!) + - new plugins: + - **BTRFS** (visualize BTRFS allocation with alarms) + - **bcache** (monitor hybrid setups HDD + SSD) + - **ceph** + - **nginx plus** + - **libreswan** (monitor the traffic of IPSEC tunnels) + - **traefik** + - **icecast** + - **ntpd** + - **httpcheck** (monitor any remote web server) + - **portcheck** (monitor any remote TCP port) + - **spring-boot** (monitor java spring-boot apps) + - **dnsdist** + - **Linux hugepages** + - improved plugins: + - **statsd** + - **web_log** + - **cgroups** for containers and VMs monitoring (netdata now supports **systemd-nspawn** and **kubernetes** - fixed security issue with `cgroup-network`) + - **Linux memory** + - **diskspace** + - **network interfaces** + - **postgres** + - **rabbitmq** + - **apps.plugin** (now it also tracks swap usage per process) + - **haproxy** + - **uptime** + - **ksm** (kernel memory debupper) + - **mdstat** (software raid) + - **elasticsearch** + - **apcupsd** + - **dhcpd** + - **fronius** + - **stiebeletron** + - new alarm notification methods + - **alerta** + - **IRC** (post on IRC channels) + - and dozens more improvements, enhancements, features and compatibility fixes --- @@ -285,7 +365,7 @@ This is a list of what it currently monitors: global and per tube monitoring - **statsd**
- [netdata is a fully featured statsd server](https://github.com/firehol/netdata/wiki/statsd) + [netdata is a fully featured statsd server](https://github.com/netdata/netdata/wiki/statsd) - **ceph**
OSD usage, Pool usage, number of objects, etc. @@ -299,13 +379,13 @@ And you can extend it, by writing plugins that collect data from any source, usi This is a high level overview of netdata feature set and architecture. Click it to to interact with it (it has direct links to documentation). -[![netdata-overview](https://user-images.githubusercontent.com/2662304/37909754-6c812a7c-3114-11e8-8673-0d1926a9793a.png)](https://my-netdata.io/infographic.html) +[![image](https://user-images.githubusercontent.com/2662304/47672043-a47eb480-dbb9-11e8-92a4-fa422d053309.png)](https://my-netdata.io/infographic.html) --- ## Installation -Use our **[automatic installer](https://github.com/firehol/netdata/wiki/Installation)** to build and install it on your system. +Use our **[automatic installer](https://github.com/netdata/netdata/wiki/Installation)** to build and install it on your system. It should run on **any Linux** system (including IoT). It has been tested on: @@ -325,17 +405,17 @@ It should run on **any Linux** system (including IoT). It has been tested on: ## Interaction with netdata -After installation, you can interact with netdata using **[CLI](https://github.com/firehol/netdata/wiki/Command-Line-Options)** and web dashboards. +After installation, you can interact with netdata using **[CLI](https://github.com/netdata/netdata/wiki/Command-Line-Options)** and web dashboards. The default port of dashboard is 19999. To access the web dashboard on localhost, use: http://localhost:19999 --- ## Documentation -Check the **[netdata wiki](https://github.com/firehol/netdata/wiki)**. +Check the **[netdata wiki](https://github.com/netdata/netdata/wiki)**. ## License netdata is [GPLv3+](LICENSE). -It re-distributes other open-source tools and libraries. Please check the [third party licenses](https://github.com/firehol/netdata/blob/master/LICENSE-REDISTRIBUTED.md). +Netdata re-distributes other open-source tools and libraries. Please check the [third party licenses](https://github.com/netdata/netdata/blob/master/REDISTRIBUTED.md). diff --git a/REDISTRIBUTED.md b/REDISTRIBUTED.md new file mode 100644 index 000000000..ed30624fb --- /dev/null +++ b/REDISTRIBUTED.md @@ -0,0 +1,209 @@ +# Netdata + +Copyright 2016-2017, Costa Tsaousis. +Copyright 2017-2018, Netdata Inc. +Released under [GPL v3 or later](http://www.gnu.org/licenses/gpl-3.0.en.html). + +Netdata uses SPDX license tags to identify the license for its files. +Individual licenses referenced in the tags are available on the [SPDX project site](http://spdx.org/licenses/). + +--- + +## Re-distributed software + +Netdata re-distributes the following third-party software. +We have decided to re-distribute all these, instead of using them +through a CDN, to allow netdata to work in cases where Internet +connectivity is not available. + +- [Dygraphs](http://dygraphs.com/) + + Copyright 2009, Dan Vanderkam + [MIT License](http://dygraphs.com/legal.html) + + +- [Easy Pie Chart](https://rendro.github.io/easy-pie-chart/) + + Copyright 2013, Robert Fleischmann + [MIT License](https://github.com/rendro/easy-pie-chart/blob/master/LICENSE) + + +- [Gauge.js](http://bernii.github.io/gauge.js/) + + Copyright, Bernard Kobos + [MIT License](http://bernii.github.io/gauge.js/) + + +- [d3pie](https://github.com/benkeen/d3pie) + + Copyright (c) 2014-2015 Benjamin Keen + [MIT License](https://github.com/benkeen/d3pie/blob/master/LICENSE) + + +- [jQuery Sparklines](http://omnipotent.net/jquery.sparkline/) + + Copyright 2009-2012, Splunk Inc. + [New BSD License](http://opensource.org/licenses/BSD-3-Clause) + + +- [Peity](http://benpickles.github.io/peity/) + + Copyright 2009-2015, Ben Pickles + [MIT License](https://github.com/benpickles/peity/blob/master/LICENCE) + + +- [morris.js](http://morrisjs.github.io/morris.js/) + + Copyright 2013, Olly Smith + [Simplified BSD License](http://morrisjs.github.io/morris.js/) + + +- [Raphaël](http://dmitrybaranovskiy.github.io/raphael/) + + Copyright 2008, Dmitry Baranovskiy + [MIT License](http://dmitrybaranovskiy.github.io/raphael/license.html) + + +- [C3](http://c3js.org/) + + Copyright 2013, Masayuki Tanaka + [MIT License](https://github.com/masayuki0812/c3/blob/master/LICENSE) + + +- [D3](http://d3js.org/) + + Copyright 2015, Mike Bostock + [BSD License](http://opensource.org/licenses/BSD-3-Clause) + + +- [jQuery](https://jquery.org/) + + Copyright 2015, jQuery Foundation + [MIT License](https://jquery.org/license/) + + +- [Bootstrap](http://getbootstrap.com/getting-started/) + + Copyright 2015, Twitter + [MIT License](http://getbootstrap.com/getting-started/#license-faqs) + + +- [Bootstrap Toggle](http://www.bootstraptoggle.com/) + + Copyright (c) 2011-2014 Min Hur, The New York Times Company + [MIT License](https://github.com/minhur/bootstrap-toggle/blob/master/LICENSE) + + +- [Bootstrap-slider](http://seiyria.com/bootstrap-slider/) + + Copyright 2017 Kyle Kemp, Rohit Kalkur, and contributors + [MIT License](https://github.com/seiyria/bootstrap-slider/blob/master/LICENSE.md) + + +- [bootstrap-table](http://bootstrap-table.wenzhixin.net.cn/) + + Copyright (c) 2012-2016 Zhixin Wen + [MIT License](https://github.com/wenzhixin/bootstrap-table/blob/master/LICENSE) + + +- [tableExport.jquery.plugin](https://github.com/hhurz/tableExport.jquery.plugin) + + Copyright (c) 2015,2016 hhurz + [MIT License](http://rawgit.com/hhurz/tableExport.jquery.plugin/master/tableExport.js) + + +- [perfect-scrollbar](https://jamesflorentino.github.io/nanoScrollerJS/) + + Copyright 2016, Hyunje Alex Jun and other contributors + [MIT License](https://github.com/noraesae/perfect-scrollbar/blob/master/LICENSE) + + +- [FontAwesome](https://fortawesome.github.io/Font-Awesome/) + + Created by Dave Gandy + Font license: [SIL OFL 1.1](http://scripts.sil.org/OFL) + Icon license [Creative Commons Attribution 4.0 (CC-BY 4.0)](https://creativecommons.org/licenses/by/4.0/) + Code license: [MIT License](http://opensource.org/licenses/mit-license.html) + + +- [IconsDB.com Icons](http://www.iconsdb.com/soylent-red-icons/seo-performance-icon.html) + + Icons provided as CC0 1.0 Universal (CC0 1.0) Public Domain Dedication + + +- [node-extend](https://github.com/justmoon/node-extend) + + Copyright 2014, Stefan Thomas + [MIT License](https://github.com/justmoon/node-extend/blob/master/LICENSE) + + +- [node-net-snmp](https://github.com/stephenwvickers/node-net-snmp) + + Copyright 2013, Stephen Vickers + [MIT License](https://github.com/stephenwvickers/node-net-snmp) + + +- [node-asn1-ber](https://github.com/stephenwvickers/node-asn1-ber) + + Copyright 2017, Stephen Vickers + Copyright 2011, Mark Cavage + [MIT License](https://github.com/stephenwvickers/node-asn1-ber) + + +- [pixl-xml](https://github.com/jhuckaby/pixl-xml) + + Copyright 2015, Joseph Huckaby + [MIT License](https://github.com/jhuckaby/pixl-xml) + + +- [sensors](https://github.com/paroj/sensors.py) + + Copyright 2014, Pavel Rojtberg + [LGPL 2.1 License](http://opensource.org/licenses/LGPL-2.1) + + +- [PyYAML](https://bitbucket.org/blackjack/pysensors) + + Copyright 2006, Kirill Simonov + [MIT License](https://github.com/yaml/pyyaml) + + +- [urllib3](https://github.com/shazow/urllib3) + + Copyright 2008-2016 Andrey Petrov and [contributors](https://github.com/shazow/urllib3/blob/master/CONTRIBUTORS.txt) + [MIT License](https://github.com/shazow/urllib3/blob/master/LICENSE.txt) + + +- [lz-string](http://pieroxy.net/blog/pages/lz-string/index.html) + + Copyright 2013 Pieroxy + [WTFPL License](http://pieroxy.net/blog/pages/lz-string/index.html#inline_menu_10) + + +- [pako](http://nodeca.github.io/pako/) + + Copyright 2014-2017 Vitaly Puzrin and Andrei Tuputcyn + [MIT License](https://github.com/nodeca/pako/blob/master/LICENSE) + + +- [clipboard-polyfill](https://github.com/lgarron/clipboard-polyfill) + + Copyright (c) 2014 Lucas Garron + [MIT License](https://github.com/lgarron/clipboard-polyfill/blob/master/LICENSE.md) + + +- [Utilities for writing code that runs on Python 2 and 3](https://github.com/netdata/netdata/blob/master/python.d/python_modules/urllib3/packages/six.py) + + Copyright (c) 2010-2015 Benjamin Peterson + [MIT License](https://github.com/netdata/netdata/blob/master/python.d/python_modules/urllib3/packages/six.py) + + +- [mcrcon](https://github.com/barneygale/MCRcon) + + Copyright (C) 2015 Barnaby Gale + [MIT License](https://raw.githubusercontent.com/barneygale/MCRcon/master/COPYING.txt) + +- [monotonic](https://github.com/atdt/monotonic) + + Copyright 2014, 2015, 2016 Ori Livneh + [Apache-2.0](http://www.apache.org/licenses/LICENSE-2.0) diff --git a/aclocal.m4 b/aclocal.m4 index 58b64dc77..d072284de 100644 --- a/aclocal.m4 +++ b/aclocal.m4 @@ -1343,13 +1343,13 @@ AC_SUBST([am__tar]) AC_SUBST([am__untar]) ]) # _AM_PROG_TAR -m4_include([m4/ax_c___atomic.m4]) -m4_include([m4/ax_c__generic.m4]) -m4_include([m4/ax_c_lto.m4]) -m4_include([m4/ax_c_mallinfo.m4]) -m4_include([m4/ax_c_mallopt.m4]) -m4_include([m4/ax_check_compile_flag.m4]) -m4_include([m4/ax_gcc_func_attribute.m4]) -m4_include([m4/ax_pthread.m4]) -m4_include([m4/jemalloc.m4]) -m4_include([m4/tcmalloc.m4]) +m4_include([build/m4/ax_c___atomic.m4]) +m4_include([build/m4/ax_c__generic.m4]) +m4_include([build/m4/ax_c_lto.m4]) +m4_include([build/m4/ax_c_mallinfo.m4]) +m4_include([build/m4/ax_c_mallopt.m4]) +m4_include([build/m4/ax_check_compile_flag.m4]) +m4_include([build/m4/ax_gcc_func_attribute.m4]) +m4_include([build/m4/ax_pthread.m4]) +m4_include([build/m4/jemalloc.m4]) +m4_include([build/m4/tcmalloc.m4]) diff --git a/autogen.sh b/autogen.sh deleted file mode 100755 index 38e2ed159..000000000 --- a/autogen.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/usr/bin/env sh -autoreconf -ivf diff --git a/backends/Makefile.am b/backends/Makefile.am new file mode 100644 index 000000000..268259edd --- /dev/null +++ b/backends/Makefile.am @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +SUBDIRS = \ + graphite \ + json \ + opentsdb \ + prometheus \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +dist_noinst_SCRIPTS = \ + nc-backend.sh \ + $(NULL) diff --git a/backends/Makefile.in b/backends/Makefile.in new file mode 100644 index 000000000..c2484e96a --- /dev/null +++ b/backends/Makefile.in @@ -0,0 +1,657 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = backends +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_SCRIPTS) $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +SCRIPTS = $(dist_noinst_SCRIPTS) +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + distdir +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = $(SUBDIRS) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +SUBDIRS = \ + graphite \ + json \ + opentsdb \ + prometheus \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +dist_noinst_SCRIPTS = \ + nc-backend.sh \ + $(NULL) + +all: all-recursive + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu backends/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu backends/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-recursive +all-am: Makefile $(SCRIPTS) $(DATA) +installdirs: installdirs-recursive +installdirs-am: +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-recursive + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-recursive + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: + +.MAKE: $(am__recursive_targets) install-am install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ + check-am clean clean-generic cscopelist-am ctags ctags-am \ + distclean distclean-generic distclean-tags distdir dvi dvi-am \ + html html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs installdirs-am maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/backends/README.md b/backends/README.md new file mode 100644 index 000000000..b449f060f --- /dev/null +++ b/backends/README.md @@ -0,0 +1,199 @@ + +# Metrics Long Term Archiving + +netdata supports backends for archiving the metrics, or providing long term dashboards, +using Grafana or other tools, like this: + +![image](https://cloud.githubusercontent.com/assets/2662304/20649711/29f182ba-b4ce-11e6-97c8-ab2c0ab59833.png) + +Since netdata collects thousands of metrics per server per second, which would easily congest any backend +server when several netdata servers are sending data to it, netdata allows sending metrics at a lower +frequency, by resampling them. + +So, although netdata collects metrics every second, it can send to the backend servers averages or sums every +X seconds (though, it can send them per second if you need it to). + +## features + +1. Supported backends + + - **graphite** (`plaintext interface`, used by **Graphite**, **InfluxDB**, **KairosDB**, + **Blueflood**, **ElasticSearch** via logstash tcp input and the graphite codec, etc) + + metrics are sent to the backend server as `prefix.hostname.chart.dimension`. `prefix` is + configured below, `hostname` is the hostname of the machine (can also be configured). + + - **opentsdb** (`telnet interface`, used by **OpenTSDB**, **InfluxDB**, **KairosDB**, etc) + + metrics are sent to opentsdb as `prefix.chart.dimension` with tag `host=hostname`. + + - **json** document DBs + + metrics are sent to a document db, `JSON` formatted. + + - **prometheus** is described at [prometheus page](prometheus/) since it pulls data from netdata. + +2. Only one backend may be active at a time. + +3. Netdata can filter metrics (at the chart level), to send only a subset of the collected metrics. + +4. Three modes of operation (for all backends): + + - `as collected`: the latest collected value is sent to the backend. This means that if netdata + is configured to send data to the backend every 10 seconds, only 1 out of 10 values will appear + at the backend server. The values are sent exactly as collected, before any multipliers or + dividers applied and before any interpolation. This mode emulates other data collectors, + such as `collectd` or `telegraf`. + + - `average`: the average of the interpolated values shown on the netdata graphs is sent to the + backend. So, if netdata is configured to send data to the backend server every 10 seconds, + the average of the 10 values shown on the netdata charts will be used. **If you can't decide + which mode to use, use `average`.** + + - `sum` or `volume`: the sum of the interpolated values shown on the netdata graphs is sent to + the backend. So, if netdata is configured to send data to the backend every 10 seconds, the + sum of the 10 values shown on the netdata charts will be used. + +5. This code is smart enough, not to slow down netdata, independently of the speed of the backend server. + +## configuration + +In `/etc/netdata/netdata.conf` you should have something like this (if not download the latest version +of `netdata.conf` from your netdata): + +``` +[backend] + enabled = yes | no + type = graphite | opentsdb | json + host tags = list of TAG=VALUE + destination = space separated list of [PROTOCOL:]HOST[:PORT] - the first working will be used + data source = average | sum | as collected + prefix = netdata + hostname = my-name + update every = 10 + buffer on failures = 10 + timeout ms = 20000 + send charts matching = * + send hosts matching = localhost * + send names instead of ids = yes +``` + +- `enabled = yes | no`, enables or disables sending data to a backend + +- `type = graphite | opentsdb | json`, selects the backend type + +- `destination = host1 host2 host3 ...`, accepts **a space separated list** of hostnames, + IPs (IPv4 and IPv6) and ports to connect to. + Netdata will use the **first available** to send the metrics. + + The format of each item in this list, is: `[PROTOCOL:]IP[:PORT]`. + + `PROTOCOL` can be `udp` or `tcp`. `tcp` is the default and only supported by the current backends. + + `IP` can be `XX.XX.XX.XX` (IPv4), or `[XX:XX...XX:XX]` (IPv6). + For IPv6 you can to enclose the IP in `[]` to separate it from the port. + + `PORT` can be a number of a service name. If omitted, the default port for the backend will be used + (graphite = 2003, opentsdb = 4242). + + Example IPv4: + +``` + destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242 +``` + + Example IPv6 and IPv4 together: + +``` + destination = [ffff:...:0001]:2003 10.11.12.1:2003 +``` + + When multiple servers are defined, netdata will try the next one when the first one fails. This allows + you to load-balance different servers: give your backend servers in different order on each netdata. + + netdata also ships [`nc-backend.sh`](nc-backend.sh), + a script that can be used as a fallback backend to save the metrics to disk and push them to the + time-series database when it becomes available again. It can also be used to monitor / trace / debug + the metrics netdata generates. + +- `data source = as collected`, or `data source = average`, or `data source = sum`, selects the kind of + data that will be sent to the backend. + +- `hostname = my-name`, is the hostname to be used for sending data to the backend server. By default + this is `[global].hostname`. + +- `prefix = netdata`, is the prefix to add to all metrics. + +- `update every = 10`, is the number of seconds between sending data to the backend. netdata will add + some randomness to this number, to prevent stressing the backend server when many netdata servers send + data to the same backend. This randomness does not affect the quality of the data, only the time they + are sent. + +- `buffer on failures = 10`, is the number of iterations (each iteration is `[backend].update every` seconds) + to buffer data, when the backend is not available. If the backend fails to receive the data after that + many failures, data loss on the backend is expected (netdata will also log it). + +- `timeout ms = 20000`, is the timeout in milliseconds to wait for the backend server to process the data. + By default this is `2 * update_every * 1000`. + +- `send hosts matching = localhost *` includes one or more space separated patterns, using ` * ` as wildcard + (any number of times within each pattern). The patterns are checked against the hostname (the localhost + is always checked as `localhost`), allowing us to filter which hosts will be sent to the backend when + this netdata is a central netdata aggregating multiple hosts. A pattern starting with ` ! ` gives a + negative match. So to match all hosts named `*db*` except hosts containing `*slave*`, use + `!*slave* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive + or negative). + +- `send charts matching = *` includes one or more space separated patterns, using ` * ` as wildcard (any + number of times within each pattern). The patterns are checked against both chart id and chart name. + A pattern starting with ` ! ` gives a negative match. So to match all charts named `apps.*` + except charts ending in `*reads`, use `!*reads apps.*` (so, the order is important: the first pattern + matching the chart id or the chart name will be used - positive or negative). + +- `send names instead of ids = yes | no` controls the metric names netdata should send to backend. + netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read + by the system and names are human friendly labels (also unique). Most charts and metrics have the same + ID and name, but in several cases they are different: disks with device-mapper, interrupts, QoS classes, + statsd synthetic charts, etc. + +- `host tags = list of TAG=VALUE` defines tags that should be appended on all metrics for the given host. + These are currently only sent to opentsdb and prometheus. Please use the appropriate format for each + time-series db. For example opentsdb likes them like `TAG1=VALUE1 TAG2=VALUE2`, but prometheus like + `tag1="value1",tag2="value2"`. Host tags are mirrored with database replication (streaming of metrics + between netdata servers). + +## monitoring operation + +netdata provides 5 charts: + +1. **Buffered metrics**, the number of metrics netdata added to the buffer for dispatching them to the + backend server. + +2. **Buffered data size**, the amount of data (in KB) netdata added the buffer. + +3. ~~**Backend latency**, the time the backend server needed to process the data netdata sent. + If there was a re-connection involved, this includes the connection time.~~ + (this chart has been removed, because it only measures the time netdata needs to give the data + to the O/S - since the backend servers do not ack the reception, netdata does not have any means + to measure this properly). + +4. **Backend operations**, the number of operations performed by netdata. + +5. **Backend thread CPU usage**, the CPU resources consumed by the netdata thread, that is responsible + for sending the metrics to the backend server. + +![image](https://cloud.githubusercontent.com/assets/2662304/20463536/eb196084-af3d-11e6-8ee5-ddbd3b4d8449.png) + +## alarms + +The latest version of the alarms configuration for monitoring the backend is [here](../health/health.d/backend.conf) + +netdata adds 4 alarms: + +1. `backend_last_buffering`, number of seconds since the last successful buffering of backend data +2. `backend_metrics_sent`, percentage of metrics sent to the backend server +3. `backend_metrics_lost`, number of metrics lost due to repeating failures to contact the backend server +4. ~~`backend_slow`, the percentage of time between iterations needed by the backend time to process the data sent by netdata~~ (this was misleading and has been removed). + +![image](https://cloud.githubusercontent.com/assets/2662304/20463779/a46ed1c2-af43-11e6-91a5-07ca4533cac3.png) + diff --git a/backends/backends.c b/backends/backends.c new file mode 100644 index 000000000..53a9a2395 --- /dev/null +++ b/backends/backends.c @@ -0,0 +1,662 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define BACKENDS_INTERNALS +#include "backends.h" + +// ---------------------------------------------------------------------------- +// How backends work in netdata: +// +// 1. There is an independent thread that runs at the required interval +// (for example, once every 10 seconds) +// +// 2. Every time it wakes, it calls the backend formatting functions to build +// a buffer of data. This is a very fast, memory only operation. +// +// 3. If the buffer already includes data, the new data are appended. +// If the buffer becomes too big, because the data cannot be sent, a +// log is written and the buffer is discarded. +// +// 4. Then it tries to send all the data. It blocks until all the data are sent +// or the socket returns an error. +// If the time required for this is above the interval, it starts skipping +// intervals, but the calculated values include the entire database, without +// gaps (it remembers the timestamps and continues from where it stopped). +// +// 5. repeats the above forever. +// + +const char *global_backend_prefix = "netdata"; +int global_backend_update_every = 10; +BACKEND_OPTIONS global_backend_options = BACKEND_SOURCE_DATA_AVERAGE | BACKEND_OPTION_SEND_NAMES; + +// ---------------------------------------------------------------------------- +// helper functions for backends + +size_t backend_name_copy(char *d, const char *s, size_t usable) { + size_t n; + + for(n = 0; *s && n < usable ; d++, s++, n++) { + char c = *s; + + if(c != '.' && !isalnum(c)) *d = '_'; + else *d = c; + } + *d = '\0'; + + return n; +} + +// calculate the SUM or AVERAGE of a dimension, for any timeframe +// may return NAN if the database does not have any value in the give timeframe + +calculated_number backend_calculate_value_from_stored_data( + RRDSET *st // the chart + , RRDDIM *rd // the dimension + , time_t after // the start timestamp + , time_t before // the end timestamp + , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap + , time_t *first_timestamp // the first point of the database used in this response + , time_t *last_timestamp // the timestamp that should be reported to backend +) { + RRDHOST *host = st->rrdhost; + (void)host; + + // find the edges of the rrd database for this chart + time_t first_t = rrdset_first_entry_t(st); + time_t last_t = rrdset_last_entry_t(st); + time_t update_every = st->update_every; + + // step back a little, to make sure we have complete data collection + // for all metrics + after -= update_every * 2; + before -= update_every * 2; + + // align the time-frame + after = after - (after % update_every); + before = before - (before % update_every); + + // for before, loose another iteration + // the latest point will be reported the next time + before -= update_every; + + if(unlikely(after > before)) + // this can happen when update_every > before - after + after = before; + + if(unlikely(after < first_t)) + after = first_t; + + if(unlikely(before > last_t)) + before = last_t; + + if(unlikely(before < first_t || after > last_t)) { + // the chart has not been updated in the wanted timeframe + debug(D_BACKEND, "BACKEND: %s.%s.%s: aligned timeframe %lu to %lu is outside the chart's database range %lu to %lu", + host->hostname, st->id, rd->id, + (unsigned long)after, (unsigned long)before, + (unsigned long)first_t, (unsigned long)last_t + ); + return NAN; + } + + *first_timestamp = after; + *last_timestamp = before; + + size_t counter = 0; + calculated_number sum = 0; + + long start_at_slot = rrdset_time2slot(st, before), + stop_at_slot = rrdset_time2slot(st, after), + slot, stop_now = 0; + + for(slot = start_at_slot; !stop_now ; slot--) { + + if(unlikely(slot < 0)) slot = st->entries - 1; + if(unlikely(slot == stop_at_slot)) stop_now = 1; + + storage_number n = rd->values[slot]; + + if(unlikely(!does_storage_number_exist(n))) { + // not collected + continue; + } + + calculated_number value = unpack_storage_number(n); + sum += value; + + counter++; + } + + if(unlikely(!counter)) { + debug(D_BACKEND, "BACKEND: %s.%s.%s: no values stored in database for range %lu to %lu", + host->hostname, st->id, rd->id, + (unsigned long)after, (unsigned long)before + ); + return NAN; + } + + if(unlikely(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_SUM)) + return sum; + + return sum / (calculated_number)counter; +} + + +// discard a response received by a backend +// after logging a simple of it to error.log + +int discard_response(BUFFER *b, const char *backend) { + char sample[1024]; + const char *s = buffer_tostring(b); + char *d = sample, *e = &sample[sizeof(sample) - 1]; + + for(; *s && d < e ;s++) { + char c = *s; + if(unlikely(!isprint(c))) c = ' '; + *d++ = c; + } + *d = '\0'; + + info("BACKEND: received %zu bytes from %s backend. Ignoring them. Sample: '%s'", buffer_strlen(b), backend, sample); + buffer_flush(b); + return 0; +} + + +// ---------------------------------------------------------------------------- +// the backend thread + +static SIMPLE_PATTERN *charts_pattern = NULL; +static SIMPLE_PATTERN *hosts_pattern = NULL; + +inline int backends_can_send_rrdset(BACKEND_OPTIONS backend_options, RRDSET *st) { + RRDHOST *host = st->rrdhost; + (void)host; + + if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_BACKEND_IGNORE))) + return 0; + + if(unlikely(!rrdset_flag_check(st, RRDSET_FLAG_BACKEND_SEND))) { + // we have not checked this chart + if(simple_pattern_matches(charts_pattern, st->id) || simple_pattern_matches(charts_pattern, st->name)) + rrdset_flag_set(st, RRDSET_FLAG_BACKEND_SEND); + else { + rrdset_flag_set(st, RRDSET_FLAG_BACKEND_IGNORE); + debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s', because it is disabled for backends.", st->id, host->hostname); + return 0; + } + } + + if(unlikely(!rrdset_is_available_for_backends(st))) { + debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s', because it is not available for backends.", st->id, host->hostname); + return 0; + } + + if(unlikely(st->rrd_memory_mode == RRD_MEMORY_MODE_NONE && !(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED))) { + debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s' because its memory mode is '%s' and the backend requires database access.", st->id, host->hostname, rrd_memory_mode_name(host->rrd_memory_mode)); + return 0; + } + + return 1; +} + +inline BACKEND_OPTIONS backend_parse_data_source(const char *source, BACKEND_OPTIONS backend_options) { + if(!strcmp(source, "raw") || !strcmp(source, "as collected") || !strcmp(source, "as-collected") || !strcmp(source, "as_collected") || !strcmp(source, "ascollected")) { + backend_options |= BACKEND_SOURCE_DATA_AS_COLLECTED; + backend_options &= ~(BACKEND_OPTIONS_SOURCE_BITS ^ BACKEND_SOURCE_DATA_AS_COLLECTED); + } + else if(!strcmp(source, "average")) { + backend_options |= BACKEND_SOURCE_DATA_AVERAGE; + backend_options &= ~(BACKEND_OPTIONS_SOURCE_BITS ^ BACKEND_SOURCE_DATA_AVERAGE); + } + else if(!strcmp(source, "sum") || !strcmp(source, "volume")) { + backend_options |= BACKEND_SOURCE_DATA_SUM; + backend_options &= ~(BACKEND_OPTIONS_SOURCE_BITS ^ BACKEND_SOURCE_DATA_SUM); + } + else { + error("BACKEND: invalid data source method '%s'.", source); + } + + return backend_options; +} + +static void backends_main_cleanup(void *ptr) { + struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; + static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; + + info("cleaning up..."); + + static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; +} + +void *backends_main(void *ptr) { + netdata_thread_cleanup_push(backends_main_cleanup, ptr); + + int default_port = 0; + int sock = -1; + BUFFER *b = buffer_create(1), *response = buffer_create(1); + int (*backend_request_formatter)(BUFFER *, const char *, RRDHOST *, const char *, RRDSET *, RRDDIM *, time_t, time_t, BACKEND_OPTIONS) = NULL; + int (*backend_response_checker)(BUFFER *) = NULL; + + // ------------------------------------------------------------------------ + // collect configuration options + + struct timeval timeout = { + .tv_sec = 0, + .tv_usec = 0 + }; + int enabled = config_get_boolean(CONFIG_SECTION_BACKEND, "enabled", 0); + const char *source = config_get(CONFIG_SECTION_BACKEND, "data source", "average"); + const char *type = config_get(CONFIG_SECTION_BACKEND, "type", "graphite"); + const char *destination = config_get(CONFIG_SECTION_BACKEND, "destination", "localhost"); + global_backend_prefix = config_get(CONFIG_SECTION_BACKEND, "prefix", "netdata"); + const char *hostname = config_get(CONFIG_SECTION_BACKEND, "hostname", localhost->hostname); + global_backend_update_every = (int)config_get_number(CONFIG_SECTION_BACKEND, "update every", global_backend_update_every); + int buffer_on_failures = (int)config_get_number(CONFIG_SECTION_BACKEND, "buffer on failures", 10); + long timeoutms = config_get_number(CONFIG_SECTION_BACKEND, "timeout ms", global_backend_update_every * 2 * 1000); + + if(config_get_boolean(CONFIG_SECTION_BACKEND, "send names instead of ids", (global_backend_options & BACKEND_OPTION_SEND_NAMES))) + global_backend_options |= BACKEND_OPTION_SEND_NAMES; + else + global_backend_options &= ~BACKEND_OPTION_SEND_NAMES; + + charts_pattern = simple_pattern_create(config_get(CONFIG_SECTION_BACKEND, "send charts matching", "*"), NULL, SIMPLE_PATTERN_EXACT); + hosts_pattern = simple_pattern_create(config_get(CONFIG_SECTION_BACKEND, "send hosts matching", "localhost *"), NULL, SIMPLE_PATTERN_EXACT); + + + // ------------------------------------------------------------------------ + // validate configuration options + // and prepare for sending data to our backend + + global_backend_options = backend_parse_data_source(source, global_backend_options); + + if(timeoutms < 1) { + error("BACKEND: invalid timeout %ld ms given. Assuming %d ms.", timeoutms, global_backend_update_every * 2 * 1000); + timeoutms = global_backend_update_every * 2 * 1000; + } + timeout.tv_sec = (timeoutms * 1000) / 1000000; + timeout.tv_usec = (timeoutms * 1000) % 1000000; + + if(!enabled || global_backend_update_every < 1) + goto cleanup; + + // ------------------------------------------------------------------------ + // select the backend type + + if(!strcmp(type, "graphite") || !strcmp(type, "graphite:plaintext")) { + + default_port = 2003; + backend_response_checker = process_graphite_response; + + if(BACKEND_OPTIONS_DATA_SOURCE(global_backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED) + backend_request_formatter = format_dimension_collected_graphite_plaintext; + else + backend_request_formatter = format_dimension_stored_graphite_plaintext; + + } + else if(!strcmp(type, "opentsdb") || !strcmp(type, "opentsdb:telnet")) { + + default_port = 4242; + backend_response_checker = process_opentsdb_response; + + if(BACKEND_OPTIONS_DATA_SOURCE(global_backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED) + backend_request_formatter = format_dimension_collected_opentsdb_telnet; + else + backend_request_formatter = format_dimension_stored_opentsdb_telnet; + + } + else if (!strcmp(type, "json") || !strcmp(type, "json:plaintext")) { + + default_port = 5448; + backend_response_checker = process_json_response; + + if (BACKEND_OPTIONS_DATA_SOURCE(global_backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED) + backend_request_formatter = format_dimension_collected_json_plaintext; + else + backend_request_formatter = format_dimension_stored_json_plaintext; + + } + else { + error("BACKEND: Unknown backend type '%s'", type); + goto cleanup; + } + + if(backend_request_formatter == NULL || backend_response_checker == NULL) { + error("BACKEND: backend is misconfigured - disabling it."); + goto cleanup; + } + + + // ------------------------------------------------------------------------ + // prepare the charts for monitoring the backend operation + + struct rusage thread; + + collected_number + chart_buffered_metrics = 0, + chart_lost_metrics = 0, + chart_sent_metrics = 0, + chart_buffered_bytes = 0, + chart_received_bytes = 0, + chart_sent_bytes = 0, + chart_receptions = 0, + chart_transmission_successes = 0, + chart_transmission_failures = 0, + chart_data_lost_events = 0, + chart_lost_bytes = 0, + chart_backend_reconnects = 0; + // chart_backend_latency = 0; + + RRDSET *chart_metrics = rrdset_create_localhost("netdata", "backend_metrics", NULL, "backend", NULL, "Netdata Buffered Metrics", "metrics", "backends", NULL, 130600, global_backend_update_every, RRDSET_TYPE_LINE); + rrddim_add(chart_metrics, "buffered", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(chart_metrics, "lost", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(chart_metrics, "sent", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + RRDSET *chart_bytes = rrdset_create_localhost("netdata", "backend_bytes", NULL, "backend", NULL, "Netdata Backend Data Size", "KB", "backends", NULL, 130610, global_backend_update_every, RRDSET_TYPE_AREA); + rrddim_add(chart_bytes, "buffered", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(chart_bytes, "lost", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(chart_bytes, "sent", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(chart_bytes, "received", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + + RRDSET *chart_ops = rrdset_create_localhost("netdata", "backend_ops", NULL, "backend", NULL, "Netdata Backend Operations", "operations", "backends", NULL, 130630, global_backend_update_every, RRDSET_TYPE_LINE); + rrddim_add(chart_ops, "write", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(chart_ops, "discard", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(chart_ops, "reconnect", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(chart_ops, "failure", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(chart_ops, "read", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + /* + * this is misleading - we can only measure the time we need to send data + * this time is not related to the time required for the data to travel to + * the backend database and the time that server needed to process them + * + * issue #1432 and https://www.softlab.ntua.gr/facilities/documentation/unix/unix-socket-faq/unix-socket-faq-2.html + * + RRDSET *chart_latency = rrdset_create_localhost("netdata", "backend_latency", NULL, "backend", NULL, "Netdata Backend Latency", "ms", "backends", NULL, 130620, global_backend_update_every, RRDSET_TYPE_AREA); + rrddim_add(chart_latency, "latency", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + */ + + RRDSET *chart_rusage = rrdset_create_localhost("netdata", "backend_thread_cpu", NULL, "backend", NULL, "NetData Backend Thread CPU usage", "milliseconds/s", "backends", NULL, 130630, global_backend_update_every, RRDSET_TYPE_STACKED); + rrddim_add(chart_rusage, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(chart_rusage, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); + + + // ------------------------------------------------------------------------ + // prepare the backend main loop + + info("BACKEND: configured ('%s' on '%s' sending '%s' data, every %d seconds, as host '%s', with prefix '%s')", type, destination, source, global_backend_update_every, hostname, global_backend_prefix); + + usec_t step_ut = global_backend_update_every * USEC_PER_SEC; + time_t after = now_realtime_sec(); + int failures = 0; + heartbeat_t hb; + heartbeat_init(&hb); + + while(!netdata_exit) { + + // ------------------------------------------------------------------------ + // Wait for the next iteration point. + + heartbeat_next(&hb, step_ut); + time_t before = now_realtime_sec(); + debug(D_BACKEND, "BACKEND: preparing buffer for timeframe %lu to %lu", (unsigned long)after, (unsigned long)before); + + // ------------------------------------------------------------------------ + // add to the buffer the data we need to send to the backend + + netdata_thread_disable_cancelability(); + + size_t count_hosts = 0; + size_t count_charts_total = 0; + size_t count_dims_total = 0; + + rrd_rdlock(); + RRDHOST *host; + rrdhost_foreach_read(host) { + if(unlikely(!rrdhost_flag_check(host, RRDHOST_FLAG_BACKEND_SEND|RRDHOST_FLAG_BACKEND_DONT_SEND))) { + char *name = (host == localhost)?"localhost":host->hostname; + if (!hosts_pattern || simple_pattern_matches(hosts_pattern, name)) { + rrdhost_flag_set(host, RRDHOST_FLAG_BACKEND_SEND); + info("enabled backend for host '%s'", name); + } + else { + rrdhost_flag_set(host, RRDHOST_FLAG_BACKEND_DONT_SEND); + info("disabled backend for host '%s'", name); + } + } + + if(unlikely(!rrdhost_flag_check(host, RRDHOST_FLAG_BACKEND_SEND))) + continue; + + rrdhost_rdlock(host); + + count_hosts++; + size_t count_charts = 0; + size_t count_dims = 0; + size_t count_dims_skipped = 0; + + const char *__hostname = (host == localhost)?hostname:host->hostname; + + RRDSET *st; + rrdset_foreach_read(st, host) { + if(likely(backends_can_send_rrdset(global_backend_options, st))) { + rrdset_rdlock(st); + + count_charts++; + + RRDDIM *rd; + rrddim_foreach_read(rd, st) { + if (likely(rd->last_collected_time.tv_sec >= after)) { + chart_buffered_metrics += backend_request_formatter(b, global_backend_prefix, host, __hostname, st, rd, after, before, global_backend_options); + count_dims++; + } + else { + debug(D_BACKEND, "BACKEND: not sending dimension '%s' of chart '%s' from host '%s', its last data collection (%lu) is not within our timeframe (%lu to %lu)", rd->id, st->id, __hostname, (unsigned long)rd->last_collected_time.tv_sec, (unsigned long)after, (unsigned long)before); + count_dims_skipped++; + } + } + + rrdset_unlock(st); + } + } + + debug(D_BACKEND, "BACKEND: sending host '%s', metrics of %zu dimensions, of %zu charts. Skipped %zu dimensions.", __hostname, count_dims, count_charts, count_dims_skipped); + count_charts_total += count_charts; + count_dims_total += count_dims; + + rrdhost_unlock(host); + } + rrd_unlock(); + + netdata_thread_enable_cancelability(); + + debug(D_BACKEND, "BACKEND: buffer has %zu bytes, added metrics for %zu dimensions, of %zu charts, from %zu hosts", buffer_strlen(b), count_dims_total, count_charts_total, count_hosts); + + // ------------------------------------------------------------------------ + + chart_buffered_bytes = (collected_number)buffer_strlen(b); + + // reset the monitoring chart counters + chart_received_bytes = + chart_sent_bytes = + chart_sent_metrics = + chart_lost_metrics = + chart_transmission_successes = + chart_transmission_failures = + chart_data_lost_events = + chart_lost_bytes = + chart_backend_reconnects = 0; + // chart_backend_latency = 0; + + if(unlikely(netdata_exit)) break; + + //fprintf(stderr, "\nBACKEND BEGIN:\n%s\nBACKEND END\n", buffer_tostring(b)); + //fprintf(stderr, "after = %lu, before = %lu\n", after, before); + + // prepare for the next iteration + // to add incrementally data to buffer + after = before; + + // ------------------------------------------------------------------------ + // if we are connected, receive a response, without blocking + + if(likely(sock != -1)) { + errno = 0; + + // loop through to collect all data + while(sock != -1 && errno != EWOULDBLOCK) { + buffer_need_bytes(response, 4096); + + ssize_t r = recv(sock, &response->buffer[response->len], response->size - response->len, MSG_DONTWAIT); + if(likely(r > 0)) { + // we received some data + response->len += r; + chart_received_bytes += r; + chart_receptions++; + } + else if(r == 0) { + error("BACKEND: '%s' closed the socket", destination); + close(sock); + sock = -1; + } + else { + // failed to receive data + if(errno != EAGAIN && errno != EWOULDBLOCK) { + error("BACKEND: cannot receive data from backend '%s'.", destination); + } + } + } + + // if we received data, process them + if(buffer_strlen(response)) + backend_response_checker(response); + } + + // ------------------------------------------------------------------------ + // if we are not connected, connect to a backend server + + if(unlikely(sock == -1)) { + // usec_t start_ut = now_monotonic_usec(); + size_t reconnects = 0; + + sock = connect_to_one_of(destination, default_port, &timeout, &reconnects, NULL, 0); + + chart_backend_reconnects += reconnects; + // chart_backend_latency += now_monotonic_usec() - start_ut; + } + + if(unlikely(netdata_exit)) break; + + // ------------------------------------------------------------------------ + // if we are connected, send our buffer to the backend server + + if(likely(sock != -1)) { + size_t len = buffer_strlen(b); + // usec_t start_ut = now_monotonic_usec(); + int flags = 0; +#ifdef MSG_NOSIGNAL + flags += MSG_NOSIGNAL; +#endif + + ssize_t written = send(sock, buffer_tostring(b), len, flags); + // chart_backend_latency += now_monotonic_usec() - start_ut; + if(written != -1 && (size_t)written == len) { + // we sent the data successfully + chart_transmission_successes++; + chart_sent_bytes += written; + chart_sent_metrics = chart_buffered_metrics; + + // reset the failures count + failures = 0; + + // empty the buffer + buffer_flush(b); + } + else { + // oops! we couldn't send (all or some of the) data + error("BACKEND: failed to write data to database backend '%s'. Willing to write %zu bytes, wrote %zd bytes. Will re-connect.", destination, len, written); + chart_transmission_failures++; + + if(written != -1) + chart_sent_bytes += written; + + // increment the counter we check for data loss + failures++; + + // close the socket - we will re-open it next time + close(sock); + sock = -1; + } + } + else { + error("BACKEND: failed to update database backend '%s'", destination); + chart_transmission_failures++; + + // increment the counter we check for data loss + failures++; + } + + if(failures > buffer_on_failures) { + // too bad! we are going to lose data + chart_lost_bytes += buffer_strlen(b); + error("BACKEND: reached %d backend failures. Flushing buffers to protect this host - this results in data loss on back-end server '%s'", failures, destination); + buffer_flush(b); + failures = 0; + chart_data_lost_events++; + chart_lost_metrics = chart_buffered_metrics; + } + + if(unlikely(netdata_exit)) break; + + // ------------------------------------------------------------------------ + // update the monitoring charts + + if(likely(chart_ops->counter_done)) rrdset_next(chart_ops); + rrddim_set(chart_ops, "read", chart_receptions); + rrddim_set(chart_ops, "write", chart_transmission_successes); + rrddim_set(chart_ops, "discard", chart_data_lost_events); + rrddim_set(chart_ops, "failure", chart_transmission_failures); + rrddim_set(chart_ops, "reconnect", chart_backend_reconnects); + rrdset_done(chart_ops); + + if(likely(chart_metrics->counter_done)) rrdset_next(chart_metrics); + rrddim_set(chart_metrics, "buffered", chart_buffered_metrics); + rrddim_set(chart_metrics, "lost", chart_lost_metrics); + rrddim_set(chart_metrics, "sent", chart_sent_metrics); + rrdset_done(chart_metrics); + + if(likely(chart_bytes->counter_done)) rrdset_next(chart_bytes); + rrddim_set(chart_bytes, "buffered", chart_buffered_bytes); + rrddim_set(chart_bytes, "lost", chart_lost_bytes); + rrddim_set(chart_bytes, "sent", chart_sent_bytes); + rrddim_set(chart_bytes, "received", chart_received_bytes); + rrdset_done(chart_bytes); + + /* + if(likely(chart_latency->counter_done)) rrdset_next(chart_latency); + rrddim_set(chart_latency, "latency", chart_backend_latency); + rrdset_done(chart_latency); + */ + + getrusage(RUSAGE_THREAD, &thread); + if(likely(chart_rusage->counter_done)) rrdset_next(chart_rusage); + rrddim_set(chart_rusage, "user", thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec); + rrddim_set(chart_rusage, "system", thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec); + rrdset_done(chart_rusage); + + if(likely(buffer_strlen(b) == 0)) + chart_buffered_metrics = 0; + + if(unlikely(netdata_exit)) break; + } + +cleanup: + if(sock != -1) + close(sock); + + buffer_free(b); + buffer_free(response); + + netdata_thread_cleanup_pop(1); + return NULL; +} diff --git a/backends/backends.h b/backends/backends.h new file mode 100644 index 000000000..468e4fed8 --- /dev/null +++ b/backends/backends.h @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_BACKENDS_H +#define NETDATA_BACKENDS_H 1 + +#include "daemon/common.h" + +typedef enum backend_options { + BACKEND_OPTION_NONE = 0, + + BACKEND_SOURCE_DATA_AS_COLLECTED = (1 << 0), + BACKEND_SOURCE_DATA_AVERAGE = (1 << 1), + BACKEND_SOURCE_DATA_SUM = (1 << 2), + + BACKEND_OPTION_SEND_NAMES = (1 << 16) +} BACKEND_OPTIONS; + +#define BACKEND_OPTIONS_SOURCE_BITS (BACKEND_SOURCE_DATA_AS_COLLECTED|BACKEND_SOURCE_DATA_AVERAGE|BACKEND_SOURCE_DATA_SUM) +#define BACKEND_OPTIONS_DATA_SOURCE(backend_options) (backend_options & BACKEND_OPTIONS_SOURCE_BITS) + +extern int global_backend_update_every; +extern BACKEND_OPTIONS global_backend_options; +extern const char *global_backend_prefix; + +extern void *backends_main(void *ptr); + +extern BACKEND_OPTIONS backend_parse_data_source(const char *source, BACKEND_OPTIONS backend_options); + +#ifdef BACKENDS_INTERNALS + +extern int backends_can_send_rrdset(BACKEND_OPTIONS backend_options, RRDSET *st); +extern calculated_number backend_calculate_value_from_stored_data( + RRDSET *st // the chart + , RRDDIM *rd // the dimension + , time_t after // the start timestamp + , time_t before // the end timestamp + , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap + , time_t *first_timestamp // the timestamp of the first point used in this response + , time_t *last_timestamp // the timestamp that should be reported to backend +); + +extern size_t backend_name_copy(char *d, const char *s, size_t usable); +extern int discard_response(BUFFER *b, const char *backend); + +#endif // BACKENDS_INTERNALS + +#include "backends/prometheus/backend_prometheus.h" +#include "backends/graphite/graphite.h" +#include "backends/json/json.h" +#include "backends/opentsdb/opentsdb.h" + +#endif /* NETDATA_BACKENDS_H */ diff --git a/backends/graphite/Makefile.am b/backends/graphite/Makefile.am new file mode 100644 index 000000000..babdcf0df --- /dev/null +++ b/backends/graphite/Makefile.am @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in diff --git a/backends/graphite/Makefile.in b/backends/graphite/Makefile.in new file mode 100644 index 000000000..c214a0e75 --- /dev/null +++ b/backends/graphite/Makefile.in @@ -0,0 +1,457 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = backends/graphite +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu backends/graphite/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu backends/graphite/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/backends/graphite/graphite.c b/backends/graphite/graphite.c new file mode 100644 index 000000000..805703893 --- /dev/null +++ b/backends/graphite/graphite.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define BACKENDS_INTERNALS +#include "graphite.h" + +// ---------------------------------------------------------------------------- +// graphite backend + +int format_dimension_collected_graphite_plaintext( + BUFFER *b // the buffer to write data to + , const char *prefix // the prefix to use + , RRDHOST *host // the host this chart comes from + , const char *hostname // the hostname (to override host->hostname) + , RRDSET *st // the chart + , RRDDIM *rd // the dimension + , time_t after // the start timestamp + , time_t before // the end timestamp + , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap +) { + (void)host; + (void)after; + (void)before; + + char chart_name[RRD_ID_LENGTH_MAX + 1]; + char dimension_name[RRD_ID_LENGTH_MAX + 1]; + backend_name_copy(chart_name, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX); + backend_name_copy(dimension_name, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX); + + buffer_sprintf( + b + , "%s.%s.%s.%s%s%s " COLLECTED_NUMBER_FORMAT " %llu\n" + , prefix + , hostname + , chart_name + , dimension_name + , (host->tags)?";":"" + , (host->tags)?host->tags:"" + , rd->last_collected_value + , (unsigned long long)rd->last_collected_time.tv_sec + ); + + return 1; +} + +int format_dimension_stored_graphite_plaintext( + BUFFER *b // the buffer to write data to + , const char *prefix // the prefix to use + , RRDHOST *host // the host this chart comes from + , const char *hostname // the hostname (to override host->hostname) + , RRDSET *st // the chart + , RRDDIM *rd // the dimension + , time_t after // the start timestamp + , time_t before // the end timestamp + , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap +) { + (void)host; + + char chart_name[RRD_ID_LENGTH_MAX + 1]; + char dimension_name[RRD_ID_LENGTH_MAX + 1]; + backend_name_copy(chart_name, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX); + backend_name_copy(dimension_name, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX); + + time_t first_t = after, last_t = before; + calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, backend_options, &first_t, &last_t); + + if(!isnan(value)) { + + buffer_sprintf( + b + , "%s.%s.%s.%s%s%s " CALCULATED_NUMBER_FORMAT " %llu\n" + , prefix + , hostname + , chart_name + , dimension_name + , (host->tags)?";":"" + , (host->tags)?host->tags:"" + , value + , (unsigned long long) last_t + ); + + return 1; + } + return 0; +} + +int process_graphite_response(BUFFER *b) { + return discard_response(b, "graphite"); +} + + diff --git a/backends/graphite/graphite.h b/backends/graphite/graphite.h new file mode 100644 index 000000000..b7b0930fa --- /dev/null +++ b/backends/graphite/graphite.h @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + + +#ifndef NETDATA_BACKEND_GRAPHITE_H +#define NETDATA_BACKEND_GRAPHITE_H + +#include "backends/backends.h" + +extern int format_dimension_collected_graphite_plaintext( + BUFFER *b // the buffer to write data to + , const char *prefix // the prefix to use + , RRDHOST *host // the host this chart comes from + , const char *hostname // the hostname (to override host->hostname) + , RRDSET *st // the chart + , RRDDIM *rd // the dimension + , time_t after // the start timestamp + , time_t before // the end timestamp + , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap +); + +extern int format_dimension_stored_graphite_plaintext( + BUFFER *b // the buffer to write data to + , const char *prefix // the prefix to use + , RRDHOST *host // the host this chart comes from + , const char *hostname // the hostname (to override host->hostname) + , RRDSET *st // the chart + , RRDDIM *rd // the dimension + , time_t after // the start timestamp + , time_t before // the end timestamp + , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap +); + +extern int process_graphite_response(BUFFER *b); + +#endif //NETDATA_BACKEND_GRAPHITE_H diff --git a/backends/json/Makefile.am b/backends/json/Makefile.am new file mode 100644 index 000000000..babdcf0df --- /dev/null +++ b/backends/json/Makefile.am @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in diff --git a/backends/json/Makefile.in b/backends/json/Makefile.in new file mode 100644 index 000000000..88fd11a0c --- /dev/null +++ b/backends/json/Makefile.in @@ -0,0 +1,457 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = backends/json +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu backends/json/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu backends/json/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/backends/json/json.c b/backends/json/json.c new file mode 100644 index 000000000..a53c0f143 --- /dev/null +++ b/backends/json/json.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define BACKENDS_INTERNALS +#include "json.h" + +// ---------------------------------------------------------------------------- +// json backend + +int format_dimension_collected_json_plaintext( + BUFFER *b // the buffer to write data to + , const char *prefix // the prefix to use + , RRDHOST *host // the host this chart comes from + , const char *hostname // the hostname (to override host->hostname) + , RRDSET *st // the chart + , RRDDIM *rd // the dimension + , time_t after // the start timestamp + , time_t before // the end timestamp + , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap +) { + (void)host; + (void)after; + (void)before; + (void)backend_options; + + const char *tags_pre = "", *tags_post = "", *tags = host->tags; + if(!tags) tags = ""; + + if(*tags) { + if(*tags == '{' || *tags == '[' || *tags == '"') { + tags_pre = "\"host_tags\":"; + tags_post = ","; + } + else { + tags_pre = "\"host_tags\":\""; + tags_post = "\","; + } + } + + buffer_sprintf(b, "{" + "\"prefix\":\"%s\"," + "\"hostname\":\"%s\"," + "%s%s%s" + + "\"chart_id\":\"%s\"," + "\"chart_name\":\"%s\"," + "\"chart_family\":\"%s\"," + "\"chart_context\": \"%s\"," + "\"chart_type\":\"%s\"," + "\"units\": \"%s\"," + + "\"id\":\"%s\"," + "\"name\":\"%s\"," + "\"value\":" COLLECTED_NUMBER_FORMAT "," + + "\"timestamp\": %llu}\n", + prefix, + hostname, + tags_pre, tags, tags_post, + + st->id, + st->name, + st->family, + st->context, + st->type, + st->units, + + rd->id, + rd->name, + rd->last_collected_value, + + (unsigned long long) rd->last_collected_time.tv_sec + ); + + return 1; +} + +int format_dimension_stored_json_plaintext( + BUFFER *b // the buffer to write data to + , const char *prefix // the prefix to use + , RRDHOST *host // the host this chart comes from + , const char *hostname // the hostname (to override host->hostname) + , RRDSET *st // the chart + , RRDDIM *rd // the dimension + , time_t after // the start timestamp + , time_t before // the end timestamp + , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap +) { + (void)host; + + time_t first_t = after, last_t = before; + calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, backend_options, &first_t, &last_t); + + if(!isnan(value)) { + const char *tags_pre = "", *tags_post = "", *tags = host->tags; + if(!tags) tags = ""; + + if(*tags) { + if(*tags == '{' || *tags == '[' || *tags == '"') { + tags_pre = "\"host_tags\":"; + tags_post = ","; + } + else { + tags_pre = "\"host_tags\":\""; + tags_post = "\","; + } + } + + buffer_sprintf(b, "{" + "\"prefix\":\"%s\"," + "\"hostname\":\"%s\"," + "%s%s%s" + + "\"chart_id\":\"%s\"," + "\"chart_name\":\"%s\"," + "\"chart_family\":\"%s\"," + "\"chart_context\": \"%s\"," + "\"chart_type\":\"%s\"," + "\"units\": \"%s\"," + + "\"id\":\"%s\"," + "\"name\":\"%s\"," + "\"value\":" CALCULATED_NUMBER_FORMAT "," + + "\"timestamp\": %llu}\n", + prefix, + hostname, + tags_pre, tags, tags_post, + + st->id, + st->name, + st->family, + st->context, + st->type, + st->units, + + rd->id, + rd->name, + value, + + (unsigned long long) last_t + ); + + return 1; + } + return 0; +} + +int process_json_response(BUFFER *b) { + return discard_response(b, "json"); +} + + diff --git a/backends/json/json.h b/backends/json/json.h new file mode 100644 index 000000000..11015652e --- /dev/null +++ b/backends/json/json.h @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_BACKEND_JSON_H +#define NETDATA_BACKEND_JSON_H + +#include "backends/backends.h" + +extern int format_dimension_collected_json_plaintext( + BUFFER *b // the buffer to write data to + , const char *prefix // the prefix to use + , RRDHOST *host // the host this chart comes from + , const char *hostname // the hostname (to override host->hostname) + , RRDSET *st // the chart + , RRDDIM *rd // the dimension + , time_t after // the start timestamp + , time_t before // the end timestamp + , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap +); + +extern int format_dimension_stored_json_plaintext( + BUFFER *b // the buffer to write data to + , const char *prefix // the prefix to use + , RRDHOST *host // the host this chart comes from + , const char *hostname // the hostname (to override host->hostname) + , RRDSET *st // the chart + , RRDDIM *rd // the dimension + , time_t after // the start timestamp + , time_t before // the end timestamp + , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap +); + +extern int process_json_response(BUFFER *b); + +#endif //NETDATA_BACKEND_JSON_H diff --git a/backends/nc-backend.sh b/backends/nc-backend.sh new file mode 100755 index 000000000..7280f86a0 --- /dev/null +++ b/backends/nc-backend.sh @@ -0,0 +1,158 @@ +#!/usr/bin/env bash + +# SPDX-License-Identifier: GPL-3.0-or-later + +# This is a simple backend database proxy, written in BASH, using the nc command. +# Run the script without any parameters for help. + +MODE="${1}" +MY_PORT="${2}" +BACKEND_HOST="${3}" +BACKEND_PORT="${4}" +FILE="${NETDATA_NC_BACKEND_DIR-/tmp}/netdata-nc-backend-${MY_PORT}" + +log() { + logger --stderr --id=$$ --tag "netdata-nc-backend" "${*}" +} + +mync() { + local ret + + log "Running: nc ${*}" + nc "${@}" + ret=$? + + log "nc stopped with return code ${ret}." + + return ${ret} +} + +listen_save_replay_forever() { + local file="${1}" port="${2}" real_backend_host="${3}" real_backend_port="${4}" ret delay=1 started ended + + while true + do + log "Starting nc to listen on port ${port} and save metrics to ${file}" + + started=$(date +%s) + mync -l -p "${port}" | tee -a -p --output-error=exit "${file}" + ended=$(date +%s) + + if [ -s "${file}" ] + then + if [ ! -z "${real_backend_host}" ] && [ ! -z "${real_backend_port}" ] + then + log "Attempting to send the metrics to the real backend at ${real_backend_host}:${real_backend_port}" + + mync "${real_backend_host}" "${real_backend_port}" <"${file}" + ret=$? + + if [ ${ret} -eq 0 ] + then + log "Successfuly sent the metrics to ${real_backend_host}:${real_backend_port}" + mv "${file}" "${file}.old" + touch "${file}" + else + log "Failed to send the metrics to ${real_backend_host}:${real_backend_port} (nc returned ${ret}) - appending more data to ${file}" + fi + else + log "No backend configured - appending more data to ${file}" + fi + fi + + # prevent a CPU hungry infinite loop + # if nc cannot listen to port + if [ $((ended - started)) -lt 5 ] + then + log "nc has been stopped too fast." + delay=30 + else + delay=1 + fi + + log "Waiting ${delay} seconds before listening again for data." + sleep ${delay} + done +} + +if [ "${MODE}" = "start" ] + then + + # start the listener, in exclusive mode + # only one can use the same file/port at a time + { + flock -n 9 + # shellcheck disable=SC2181 + if [ $? -ne 0 ] + then + log "Cannot get exclusive lock on file ${FILE}.lock - Am I running multiple times?" + exit 2 + fi + + # save our PID to the lock file + echo "$$" >"${FILE}.lock" + + listen_save_replay_forever "${FILE}" "${MY_PORT}" "${BACKEND_HOST}" "${BACKEND_PORT}" + ret=$? + + log "listener exited." + exit ${ret} + + } 9>>"${FILE}.lock" + + # we can only get here if ${FILE}.lock cannot be created + log "Cannot create file ${FILE}." + exit 3 + +elif [ "${MODE}" = "stop" ] + then + + { + flock -n 9 + # shellcheck disable=SC2181 + if [ $? -ne 0 ] + then + pid=$(<"${FILE}".lock) + log "Killing process ${pid}..." + kill -TERM "-${pid}" + exit 0 + fi + + log "File ${FILE}.lock has been locked by me but it shouldn't. Is a collector running?" + exit 4 + + } 9<"${FILE}.lock" + + log "File ${FILE}.lock does not exist. Is a collector running?" + exit 5 + +else + + cat <&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = backends/opentsdb +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu backends/opentsdb/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu backends/opentsdb/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/backends/opentsdb/opentsdb.c b/backends/opentsdb/opentsdb.c new file mode 100644 index 000000000..6e3a31ab6 --- /dev/null +++ b/backends/opentsdb/opentsdb.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define BACKENDS_INTERNALS +#include "opentsdb.h" + +// ---------------------------------------------------------------------------- +// opentsdb backend + +int format_dimension_collected_opentsdb_telnet( + BUFFER *b // the buffer to write data to + , const char *prefix // the prefix to use + , RRDHOST *host // the host this chart comes from + , const char *hostname // the hostname (to override host->hostname) + , RRDSET *st // the chart + , RRDDIM *rd // the dimension + , time_t after // the start timestamp + , time_t before // the end timestamp + , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap +) { + (void)host; + (void)after; + (void)before; + + char chart_name[RRD_ID_LENGTH_MAX + 1]; + char dimension_name[RRD_ID_LENGTH_MAX + 1]; + backend_name_copy(chart_name, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX); + backend_name_copy(dimension_name, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX); + + buffer_sprintf( + b + , "put %s.%s.%s %llu " COLLECTED_NUMBER_FORMAT " host=%s%s%s\n" + , prefix + , chart_name + , dimension_name + , (unsigned long long)rd->last_collected_time.tv_sec + , rd->last_collected_value + , hostname + , (host->tags)?" ":"" + , (host->tags)?host->tags:"" + ); + + return 1; +} + +int format_dimension_stored_opentsdb_telnet( + BUFFER *b // the buffer to write data to + , const char *prefix // the prefix to use + , RRDHOST *host // the host this chart comes from + , const char *hostname // the hostname (to override host->hostname) + , RRDSET *st // the chart + , RRDDIM *rd // the dimension + , time_t after // the start timestamp + , time_t before // the end timestamp + , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap +) { + (void)host; + + time_t first_t = after, last_t = before; + calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, backend_options, &first_t, &last_t); + + char chart_name[RRD_ID_LENGTH_MAX + 1]; + char dimension_name[RRD_ID_LENGTH_MAX + 1]; + backend_name_copy(chart_name, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX); + backend_name_copy(dimension_name, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX); + + if(!isnan(value)) { + + buffer_sprintf( + b + , "put %s.%s.%s %llu " CALCULATED_NUMBER_FORMAT " host=%s%s%s\n" + , prefix + , chart_name + , dimension_name + , (unsigned long long) last_t + , value + , hostname + , (host->tags)?" ":"" + , (host->tags)?host->tags:"" + ); + + return 1; + } + return 0; +} + +int process_opentsdb_response(BUFFER *b) { + return discard_response(b, "opentsdb"); +} + + diff --git a/backends/opentsdb/opentsdb.h b/backends/opentsdb/opentsdb.h new file mode 100644 index 000000000..fc83b39ca --- /dev/null +++ b/backends/opentsdb/opentsdb.h @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_BACKEND_OPENTSDB_H +#define NETDATA_BACKEND_OPENTSDB_H + +#include "backends/backends.h" + +extern int format_dimension_collected_opentsdb_telnet( + BUFFER *b // the buffer to write data to + , const char *prefix // the prefix to use + , RRDHOST *host // the host this chart comes from + , const char *hostname // the hostname (to override host->hostname) + , RRDSET *st // the chart + , RRDDIM *rd // the dimension + , time_t after // the start timestamp + , time_t before // the end timestamp + , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap +); + +extern int format_dimension_stored_opentsdb_telnet( + BUFFER *b // the buffer to write data to + , const char *prefix // the prefix to use + , RRDHOST *host // the host this chart comes from + , const char *hostname // the hostname (to override host->hostname) + , RRDSET *st // the chart + , RRDDIM *rd // the dimension + , time_t after // the start timestamp + , time_t before // the end timestamp + , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap +); + +extern int process_opentsdb_response(BUFFER *b); + + +#endif //NETDATA_BACKEND_OPENTSDB_H diff --git a/backends/prometheus/Makefile.am b/backends/prometheus/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/backends/prometheus/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/backends/prometheus/Makefile.in b/backends/prometheus/Makefile.in new file mode 100644 index 000000000..76813e758 --- /dev/null +++ b/backends/prometheus/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = backends/prometheus +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu backends/prometheus/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu backends/prometheus/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/backends/prometheus/README.md b/backends/prometheus/README.md new file mode 100644 index 000000000..826cf051b --- /dev/null +++ b/backends/prometheus/README.md @@ -0,0 +1,376 @@ +> IMPORTANT: the format netdata sends metrics to prometheus has changed since netdata v1.7. The new prometheus backend for netdata supports a lot more features and is aligned to the development of the rest of the netdata backends. + +# Using netdata with Prometheus + +Prometheus is a distributed monitoring system which offers a very simple setup along with a robust data model. Recently netdata added support for Prometheus. I'm going to quickly show you how to install both netdata and prometheus on the same server. We can then use grafana pointed at Prometheus to obtain long term metrics netdata offers. I'm assuming we are starting at a fresh ubuntu shell (whether you'd like to follow along in a VM or a cloud instance is up to you). + +## Installing netdata and prometheus + +### Installing netdata +There are number of ways to install netdata according to [Installation](https://github.com/netdata/netdata/wiki/Installation) +The suggested way of installing the latest netdata and keep it upgrade automatically. Using one line installation: + +``` +bash <(curl -Ss https://my-netdata.io/kickstart.sh) +``` +At this point we should have netdata listening on port 19999. Attempt to take your browser here: + +``` +http://your.netdata.ip:19999 +``` + +*(replace `your.netdata.ip` with the IP or hostname of the server running netdata)* + +### Installing Prometheus +In order to install prometheus we are going to introduce our own systemd startup script along with an example of prometheus.yaml configuration. Prometheus needs to be pointed to your server at a specific target url for it to scrape netdata's api. Prometheus is always a pull model meaning netdata is the passive client within this architecture. Prometheus always initiates the connection with netdata. + +##### Download Prometheus + +```sh +wget -O /tmp/prometheus-2.3.2.linux-amd64.tar.gz https://github.com/prometheus/prometheus/releases/download/v2.3.2/prometheus-2.3.2.linux-amd64.tar.gz +``` + +##### Create prometheus system user + +```sh +sudo useradd -r prometheus +``` + +#### Create prometheus directory + +```sh +sudo mkdir /opt/prometheus +sudo chown prometheus:prometheus /opt/prometheus +``` + +#### Untar prometheus directory + +```sh +sudo tar -xvf /tmp/prometheus-2.3.2.linux-amd64.tar.gz -C /opt/prometheus --strip=1 +``` + +#### Install prometheus.yml + +We will use the following `prometheus.yml` file. Save it at `/opt/prometheus/prometheus.yml`. + +Make sure to replace `your.netdata.ip` with the IP or hostname of the host running netdata. + +```yaml +# my global config +global: + scrape_interval: 5s # Set the scrape interval to every 5 seconds. Default is every 1 minute. + evaluation_interval: 5s # Evaluate rules every 5 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + + # Attach these labels to any time series or alerts when communicating with + # external systems (federation, remote storage, Alertmanager). + external_labels: + monitor: 'codelab-monitor' + +# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. +rule_files: + # - "first.rules" + # - "second.rules" + +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'prometheus' + + # metrics_path defaults to '/metrics' + # scheme defaults to 'http'. + + static_configs: + - targets: ['0.0.0.0:9090'] + + - job_name: 'netdata-scrape' + + metrics_path: '/api/v1/allmetrics' + params: + # format: prometheus | prometheus_all_hosts + # You can use `prometheus_all_hosts` if you want Prometheus to set the `instance` to your hostname instead of IP + format: [prometheus] + # + # sources: as-collected | raw | average | sum | volume + # default is: average + #source: [as-collected] + # + # server name for this prometheus - the default is the client IP + # for netdata to uniquely identify it + #server: ['prometheus1'] + honor_labels: true + + static_configs: + - targets: ['{your.netdata.ip}:19999'] +``` +#### Install nodes.yml + +The following is completely optional, it will enable Prometheus to generate alerts from some NetData sources. Tweak the values to your own needs. We will use the following `nodes.yml` file below. Save it at `/opt/prometheus/nodes.yml`, and add a *- "nodes.yml"* entry under the *rule_files:* section in the example prometheus.yml file above. +``` +groups: +- name: nodes + + rules: + - alert: node_high_cpu_usage_70 + expr: avg(rate(netdata_cpu_cpu_percentage_average{dimension="idle"}[1m])) by (job) > 70 + for: 1m + annotations: + description: '{{ $labels.job }} on ''{{ $labels.job }}'' CPU usage is at {{ humanize $value }}%.' + summary: CPU alert for container node '{{ $labels.job }}' + + - alert: node_high_memory_usage_70 + expr: 100 / sum(netdata_system_ram_MB_average) by (job) + * sum(netdata_system_ram_MB_average{dimension=~"free|cached"}) by (job) < 30 + for: 1m + annotations: + description: '{{ $labels.job }} memory usage is {{ humanize $value}}%.' + summary: Memory alert for container node '{{ $labels.job }}' + + - alert: node_low_root_filesystem_space_20 + expr: 100 / sum(netdata_disk_space_GB_average{family="/"}) by (job) + * sum(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}) by (job) < 20 + for: 1m + annotations: + description: '{{ $labels.job }} root filesystem space is {{ humanize $value}}%.' + summary: Root filesystem alert for container node '{{ $labels.job }}' + + - alert: node_root_filesystem_fill_rate_6h + expr: predict_linear(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}[1h], 6 * 3600) < 0 + for: 1h + labels: + severity: critical + annotations: + description: Container node {{ $labels.job }} root filesystem is going to fill up in 6h. + summary: Disk fill alert for Swarm node '{{ $labels.job }}' +``` + +#### Install prometheus.service + +Save this service file as `/etc/systemd/system/prometheus.service`: + +``` +[Unit] +Description=Prometheus Server +AssertPathExists=/opt/prometheus + +[Service] +Type=simple +WorkingDirectory=/opt/prometheus +User=prometheus +Group=prometheus +ExecStart=/opt/prometheus/prometheus --config.file=/opt/prometheus/prometheus.yml --log.level=info +ExecReload=/bin/kill -SIGHUP $MAINPID +ExecStop=/bin/kill -SIGINT $MAINPID + +[Install] +WantedBy=multi-user.target +``` + +##### Start Prometheus + +``` +sudo systemctl start prometheus +sudo systemctl enable prometheus +``` + +Prometheus should now start and listen on port 9090. Attempt to head there with your browser. + +If everything is working correctly when you fetch `http://your.prometheus.ip:9090` you will see a 'Status' tab. Click this and click on 'targets' We should see the netdata host as a scraped target. + +--- + +## netdata support for prometheus + +> IMPORTANT: the format netdata sends metrics to prometheus has changed since netdata v1.6. The new format allows easier queries for metrics and supports both `as collected` and normalized metrics. + +Before explaining the changes, we have to understand the key differences between netdata and prometheus. + +### understanding netdata metrics + +##### charts + +Each chart in netdata has several properties (common to all its metrics): + +- `chart_id` - uniquely identifies a chart. + +- `chart_name` - a more human friendly name for `chart_id`, also unique. + +- `context` - this is the template of the chart. All disk I/O charts have the same context, all mysql requests charts have the same context, etc. This is used for alarm templates to match all the charts they should be attached to. + +- `family` groups a set of charts together. It is used as the submenu of the dashboard. + +- `units` is the units for all the metrics attached to the chart. + +##### dimensions + +Then each netdata chart contains metrics called `dimensions`. All the dimensions of a chart have the same units of measurement, and are contextually in the same category (ie. the metrics for disk bandwidth are `read` and `write` and they are both in the same chart). + +### netdata data source + +netdata can send metrics to prometheus from 3 data sources: + +- `as collected` or `raw` - this data source sends the metrics to prometheus as they are collected. No conversion is done by netdata. The latest value for each metric is just given to prometheus. This is the most preferred method by prometheus, but it is also the harder to work with. To work with this data source, you will need to understand how to get meaningful values out of them. + + The format of the metrics is: `CONTEXT{chart="CHART",family="FAMILY",dimension="DIMENSION"}`. + + If the metric is a counter (`incremental` in netdata lingo), `_total` is appended the context. + + Unlike prometheus, netdata allows each dimension of a chart to have a different algorithm and conversion constants (`multiplier` and `divisor`). In this case, that the dimensions of a charts are heterogeneous, netdata will use this format: `CONTEXT_DIMENSION{chart="CHART",family="FAMILY"}` + +- `average` - this data source uses the netdata database to send the metrics to prometheus as they are presented on the netdata dashboard. So, all the metrics are sent as gauges, at the units they are presented in the netdata dashboard charts. This is the easiest to work with. + + The format of the metrics is: `CONTEXT_UNITS_average{chart="CHART",family="FAMILY",dimension="DIMENSION"}`. + + When this source is used, netdata keeps track of the last access time for each prometheus server fetching the metrics. This last access time is used at the subsequent queries of the same prometheus server to identify the time-frame the `average` will be calculated. So, no matter how frequently prometheus scrapes netdata, it will get all the database data. To identify each prometheus server, netdata uses by default the IP of the client fetching the metrics. If there are multiple prometheus servers fetching data from the same netdata, using the same IP, each prometheus server can append `server=NAME` to the URL. Netdata will use this `NAME` to uniquely identify the prometheus server. + +- `sum` or `volume`, is like `average` but instead of averaging the values, it sums them. + + The format of the metrics is: `CONTEXT_UNITS_sum{chart="CHART",family="FAMILY",dimension="DIMENSION"}`. + All the other operations are the same with `average`. + +Keep in mind that early versions of netdata were sending the metrics as: `CHART_DIMENSION{}`. + + +### Querying Metrics + +Fetch with your web browser this URL: + +`http://your.netdata.ip:19999/api/v1/allmetrics?format=prometheus&help=yes` + +*(replace `your.netdata.ip` with the ip or hostname of your netdata server)* + +netdata will respond with all the metrics it sends to prometheus. + +If you search that page for `"system.cpu"` you will find all the metrics netdata is exporting to prometheus for this chart. `system.cpu` is the chart name on the netdata dashboard (on the netdata dashboard all charts have a text heading such as : `Total CPU utilization (system.cpu)`. What we are interested here in the chart name: `system.cpu`). + +Searching for `"system.cpu"` reveals: + +```sh +# COMMENT homogeneus chart "system.cpu", context "system.cpu", family "cpu", units "percentage" +# COMMENT netdata_system_cpu_percentage_average: dimension "guest_nice", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive +netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="guest_nice"} 0.0000000 1500066662000 +# COMMENT netdata_system_cpu_percentage_average: dimension "guest", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive +netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="guest"} 1.7837326 1500066662000 +# COMMENT netdata_system_cpu_percentage_average: dimension "steal", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive +netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="steal"} 0.0000000 1500066662000 +# COMMENT netdata_system_cpu_percentage_average: dimension "softirq", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive +netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="softirq"} 0.5275442 1500066662000 +# COMMENT netdata_system_cpu_percentage_average: dimension "irq", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive +netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="irq"} 0.2260836 1500066662000 +# COMMENT netdata_system_cpu_percentage_average: dimension "user", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive +netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="user"} 2.3362762 1500066662000 +# COMMENT netdata_system_cpu_percentage_average: dimension "system", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive +netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 1.7961062 1500066662000 +# COMMENT netdata_system_cpu_percentage_average: dimension "nice", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive +netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="nice"} 0.0000000 1500066662000 +# COMMENT netdata_system_cpu_percentage_average: dimension "iowait", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive +netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="iowait"} 0.9671802 1500066662000 +# COMMENT netdata_system_cpu_percentage_average: dimension "idle", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive +netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="idle"} 92.3630770 1500066662000 +``` +*(netdata response for `system.cpu` with source=`average`)* + +In `average` or `sum` data sources, all values are normalized and are reported to prometheus as gauges. Now, use the 'expression' text form in prometheus. Begin to type the metrics we are looking for: `netdata_system_cpu`. You should see that the text form begins to auto-fill as prometheus knows about this metric. + +If the data source was `as collected`, the response would be: + +```sh +# COMMENT homogeneus chart "system.cpu", context "system.cpu", family "cpu", units "percentage" +# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "guest_nice", value * 1 / 1 delta gives percentage (counter) +netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="guest_nice"} 0 1500066716438 +# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "guest", value * 1 / 1 delta gives percentage (counter) +netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="guest"} 63945 1500066716438 +# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "steal", value * 1 / 1 delta gives percentage (counter) +netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="steal"} 0 1500066716438 +# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "softirq", value * 1 / 1 delta gives percentage (counter) +netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="softirq"} 8295 1500066716438 +# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "irq", value * 1 / 1 delta gives percentage (counter) +netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="irq"} 4079 1500066716438 +# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "user", value * 1 / 1 delta gives percentage (counter) +netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="user"} 116488 1500066716438 +# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "system", value * 1 / 1 delta gives percentage (counter) +netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="system"} 35084 1500066716438 +# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "nice", value * 1 / 1 delta gives percentage (counter) +netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="nice"} 505 1500066716438 +# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "iowait", value * 1 / 1 delta gives percentage (counter) +netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="iowait"} 23314 1500066716438 +# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "idle", value * 1 / 1 delta gives percentage (counter) +netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="idle"} 918470 1500066716438 +``` +*(netdata response for `system.cpu` with source=`as-collected`)* + +For more information check prometheus documentation. + +### Streaming data from upstream hosts + +The `format=prometheus` parameter only exports the host's netdata metrics. If you are using the master/slave functionality of netdata this ignores any upstream hosts - so you should consider using the below in your **prometheus.yml**: + +``` + metrics_path: '/api/v1/allmetrics' + params: + format: [prometheus_all_hosts] + honor_labels: true +``` + +This will report all upstream host data, and `honor_labels` will make Prometheus take note of the instance names provided. + +### timestamps + +To pass the metrics through prometheus pushgateway, netdata supports the option `×tamps=no` to send the metrics without timestamps. + +## netdata host variables + +netdata collects various system configuration metrics, like the max number of TCP sockets supported, the max number of files allowed system-wide, various IPC sizes, etc. These metrics are not exposed to prometheus by default. + +To expose them, append `variables=yes` to the netdata URL. + +### TYPE and HELP + +To save bandwidth, and because prometheus does not use them anyway, `# TYPE` and `# HELP` lines are suppressed. If wanted they can be re-enabled via `types=yes` and `help=yes`, e.g. `/api/v1/allmetrics?format=prometheus&types=yes&help=yes` + +### Names and IDs + +netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names are human friendly labels (also unique). + +Most charts and metrics have the same ID and name, but in several cases they are different: disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc. + +The default is controlled in `netdata.conf`: + +``` +[backend] + send names instead of ids = yes | no +``` + +You can overwrite it from prometheus, by appending to the URL: + +* `&names=no` to get IDs (the old behaviour) +* `&names=yes` to get names + +### Filtering metrics sent to prometheus + +netdata can filter the metrics it sends to prometheus with this setting: + +``` +[backend] + send charts matching = * +``` + +This settings accepts a space separated list of patterns to match the **charts** to be sent to prometheus. Each pattern can use ` * ` as wildcard, any number of times (e.g `*a*b*c*` is valid). Patterns starting with ` ! ` give a negative match (e.g `!*.bad users.* groups.*` will send all the users and groups except `bad` user and `bad` group). The order is important: the first match (positive or negative) left to right, is used. + +### Changing the prefix of netdata metrics + +netdata sends all metrics prefixed with `netdata_`. You can change this in `netdata.conf`, like this: + +``` +[backend] + prefix = netdata +``` + +It can also be changed from the URL, by appending `&prefix=netdata`. + +### accuracy of `average` and `sum` data sources + +When the data source is set to `average` or `sum`, netdata remembers the last access of each client accessing prometheus metrics and uses this last access time to respond with the `average` or `sum` of all the entries in the database since that. This means that prometheus servers are not losing data when they access netdata with data source = `average` or `sum`. + +To uniquely identify each prometheus server, netdata uses the IP of the client accessing the metrics. If however the IP is not good enough for identifying a single prometheus server (e.g. when prometheus servers are accessing netdata through a web proxy, or when multiple prometheus servers are NATed to a single IP), each prometheus may append `&server=NAME` to the URL. This `NAME` is used by netdata to uniquely identify each prometheus server and keep track of its last access time. diff --git a/backends/prometheus/backend_prometheus.c b/backends/prometheus/backend_prometheus.c new file mode 100644 index 000000000..223b3f9f0 --- /dev/null +++ b/backends/prometheus/backend_prometheus.c @@ -0,0 +1,566 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define BACKENDS_INTERNALS +#include "backend_prometheus.h" + +// ---------------------------------------------------------------------------- +// PROMETHEUS +// /api/v1/allmetrics?format=prometheus and /api/v1/allmetrics?format=prometheus_all_hosts + +static struct prometheus_server { + const char *server; + uint32_t hash; + RRDHOST *host; + time_t last_access; + struct prometheus_server *next; +} *prometheus_server_root = NULL; + +static inline time_t prometheus_server_last_access(const char *server, RRDHOST *host, time_t now) { + static netdata_mutex_t prometheus_server_root_mutex = NETDATA_MUTEX_INITIALIZER; + + uint32_t hash = simple_hash(server); + + netdata_mutex_lock(&prometheus_server_root_mutex); + + struct prometheus_server *ps; + for(ps = prometheus_server_root; ps ;ps = ps->next) { + if (host == ps->host && hash == ps->hash && !strcmp(server, ps->server)) { + time_t last = ps->last_access; + ps->last_access = now; + netdata_mutex_unlock(&prometheus_server_root_mutex); + return last; + } + } + + ps = callocz(1, sizeof(struct prometheus_server)); + ps->server = strdupz(server); + ps->hash = hash; + ps->host = host; + ps->last_access = now; + ps->next = prometheus_server_root; + prometheus_server_root = ps; + + netdata_mutex_unlock(&prometheus_server_root_mutex); + return 0; +} + +static inline size_t prometheus_name_copy(char *d, const char *s, size_t usable) { + size_t n; + + for(n = 0; *s && n < usable ; d++, s++, n++) { + register char c = *s; + + if(!isalnum(c)) *d = '_'; + else *d = c; + } + *d = '\0'; + + return n; +} + +static inline size_t prometheus_label_copy(char *d, const char *s, size_t usable) { + size_t n; + + // make sure we can escape one character without overflowing the buffer + usable--; + + for(n = 0; *s && n < usable ; d++, s++, n++) { + register char c = *s; + + if(unlikely(c == '"' || c == '\\' || c == '\n')) { + *d++ = '\\'; + n++; + } + *d = c; + } + *d = '\0'; + + return n; +} + +static inline char *prometheus_units_copy(char *d, const char *s, size_t usable) { + const char *sorig = s; + char *ret = d; + size_t n; + + *d++ = '_'; + for(n = 1; *s && n < usable ; d++, s++, n++) { + register char c = *s; + + if(!isalnum(c)) *d = '_'; + else *d = c; + } + + if(n == 2 && sorig[0] == '%') { + n = 0; + d = ret; + s = "_percent"; + for( ; *s && n < usable ; n++) *d++ = *s++; + } + else if(n > 3 && sorig[n-3] == '/' && sorig[n-2] == 's') { + n = n - 2; + d -= 2; + s = "_persec"; + for( ; *s && n < usable ; n++) *d++ = *s++; + } + + *d = '\0'; + + return ret; +} + + +#define PROMETHEUS_ELEMENT_MAX 256 +#define PROMETHEUS_LABELS_MAX 1024 +#define PROMETHEUS_VARIABLE_MAX 256 + +struct host_variables_callback_options { + RRDHOST *host; + BUFFER *wb; + BACKEND_OPTIONS backend_options; + PROMETHEUS_OUTPUT_OPTIONS output_options; + const char *prefix; + const char *labels; + time_t now; + int host_header_printed; + char name[PROMETHEUS_VARIABLE_MAX+1]; +}; + +static int print_host_variables(RRDVAR *rv, void *data) { + struct host_variables_callback_options *opts = data; + + if(rv->options & (RRDVAR_OPTION_CUSTOM_HOST_VAR|RRDVAR_OPTION_CUSTOM_CHART_VAR)) { + if(!opts->host_header_printed) { + opts->host_header_printed = 1; + + if(opts->output_options & PROMETHEUS_OUTPUT_HELP) { + buffer_sprintf(opts->wb, "\n# COMMENT global host and chart variables\n"); + } + } + + calculated_number value = rrdvar2number(rv); + if(isnan(value) || isinf(value)) { + if(opts->output_options & PROMETHEUS_OUTPUT_HELP) + buffer_sprintf(opts->wb, "# COMMENT variable \"%s\" is %s. Skipped.\n", rv->name, (isnan(value))?"NAN":"INF"); + + return 0; + } + + char *label_pre = ""; + char *label_post = ""; + if(opts->labels && *opts->labels) { + label_pre = "{"; + label_post = "}"; + } + + prometheus_name_copy(opts->name, rv->name, sizeof(opts->name)); + + if(opts->output_options & PROMETHEUS_OUTPUT_TIMESTAMPS) + buffer_sprintf(opts->wb + , "%s_%s%s%s%s " CALCULATED_NUMBER_FORMAT " %llu\n" + , opts->prefix + , opts->name + , label_pre + , opts->labels + , label_post + , value + , ((rv->last_updated) ? rv->last_updated : opts->now) * 1000ULL + ); + else + buffer_sprintf(opts->wb, "%s_%s%s%s%s " CALCULATED_NUMBER_FORMAT "\n" + , opts->prefix + , opts->name + , label_pre + , opts->labels + , label_post + , value + ); + + return 1; + } + + return 0; +} + +static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER *wb, const char *prefix, BACKEND_OPTIONS backend_options, time_t after, time_t before, int allhosts, PROMETHEUS_OUTPUT_OPTIONS output_options) { + rrdhost_rdlock(host); + + char hostname[PROMETHEUS_ELEMENT_MAX + 1]; + prometheus_label_copy(hostname, host->hostname, PROMETHEUS_ELEMENT_MAX); + + char labels[PROMETHEUS_LABELS_MAX + 1] = ""; + if(allhosts) { + if(output_options & PROMETHEUS_OUTPUT_TIMESTAMPS) + buffer_sprintf(wb, "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1 %llu\n", hostname, host->program_name, host->program_version, now_realtime_usec() / USEC_PER_MS); + else + buffer_sprintf(wb, "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1\n", hostname, host->program_name, host->program_version); + + if(host->tags && *(host->tags)) { + if(output_options & PROMETHEUS_OUTPUT_TIMESTAMPS) { + buffer_sprintf(wb, "netdata_host_tags_info{instance=\"%s\",%s} 1 %llu\n", hostname, host->tags, now_realtime_usec() / USEC_PER_MS); + + // deprecated, exists only for compatibility with older queries + buffer_sprintf(wb, "netdata_host_tags{instance=\"%s\",%s} 1 %llu\n", hostname, host->tags, now_realtime_usec() / USEC_PER_MS); + } + else { + buffer_sprintf(wb, "netdata_host_tags_info{instance=\"%s\",%s} 1\n", hostname, host->tags); + + // deprecated, exists only for compatibility with older queries + buffer_sprintf(wb, "netdata_host_tags{instance=\"%s\",%s} 1\n", hostname, host->tags); + } + + } + + snprintfz(labels, PROMETHEUS_LABELS_MAX, ",instance=\"%s\"", hostname); + } + else { + if(output_options & PROMETHEUS_OUTPUT_TIMESTAMPS) + buffer_sprintf(wb, "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1 %llu\n", hostname, host->program_name, host->program_version, now_realtime_usec() / USEC_PER_MS); + else + buffer_sprintf(wb, "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1\n", hostname, host->program_name, host->program_version); + + if(host->tags && *(host->tags)) { + if(output_options & PROMETHEUS_OUTPUT_TIMESTAMPS) { + buffer_sprintf(wb, "netdata_host_tags_info{%s} 1 %llu\n", host->tags, now_realtime_usec() / USEC_PER_MS); + + // deprecated, exists only for compatibility with older queries + buffer_sprintf(wb, "netdata_host_tags{%s} 1 %llu\n", host->tags, now_realtime_usec() / USEC_PER_MS); + } + else { + buffer_sprintf(wb, "netdata_host_tags_info{%s} 1\n", host->tags); + + // deprecated, exists only for compatibility with older queries + buffer_sprintf(wb, "netdata_host_tags{%s} 1\n", host->tags); + } + } + } + + // send custom variables set for the host + if(output_options & PROMETHEUS_OUTPUT_VARIABLES){ + struct host_variables_callback_options opts = { + .host = host, + .wb = wb, + .labels = (labels[0] == ',')?&labels[1]:labels, + .backend_options = backend_options, + .output_options = output_options, + .prefix = prefix, + .now = now_realtime_sec(), + .host_header_printed = 0 + }; + foreach_host_variable_callback(host, print_host_variables, &opts); + } + + // for each chart + RRDSET *st; + rrdset_foreach_read(st, host) { + char chart[PROMETHEUS_ELEMENT_MAX + 1]; + char context[PROMETHEUS_ELEMENT_MAX + 1]; + char family[PROMETHEUS_ELEMENT_MAX + 1]; + char units[PROMETHEUS_ELEMENT_MAX + 1] = ""; + + prometheus_label_copy(chart, (output_options & PROMETHEUS_OUTPUT_NAMES && st->name)?st->name:st->id, PROMETHEUS_ELEMENT_MAX); + prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX); + prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX); + + if(likely(backends_can_send_rrdset(backend_options, st))) { + rrdset_rdlock(st); + + int as_collected = (BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED); + int homogeneus = 1; + if(as_collected) { + if(rrdset_flag_check(st, RRDSET_FLAG_HOMEGENEOUS_CHECK)) + rrdset_update_heterogeneous_flag(st); + + if(rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS)) + homogeneus = 0; + } + else { + if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE) + prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX); + } + + if(unlikely(output_options & PROMETHEUS_OUTPUT_HELP)) + buffer_sprintf(wb, "\n# COMMENT %s chart \"%s\", context \"%s\", family \"%s\", units \"%s\"\n" + , (homogeneus)?"homogeneus":"heterogeneous" + , (output_options & PROMETHEUS_OUTPUT_NAMES && st->name) ? st->name : st->id + , st->context + , st->family + , st->units + ); + + // for each dimension + RRDDIM *rd; + rrddim_foreach_read(rd, st) { + if(rd->collections_counter) { + char dimension[PROMETHEUS_ELEMENT_MAX + 1]; + char *suffix = ""; + + if (as_collected) { + // we need as-collected / raw data + + const char *t = "gauge", *h = "gives"; + if(rd->algorithm == RRD_ALGORITHM_INCREMENTAL || + rd->algorithm == RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL) { + t = "counter"; + h = "delta gives"; + suffix = "_total"; + } + + if(homogeneus) { + // all the dimensions of the chart, has the same algorithm, multiplier and divisor + // we add all dimensions as labels + + prometheus_label_copy(dimension, (output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX); + + if(unlikely(output_options & PROMETHEUS_OUTPUT_HELP)) + buffer_sprintf(wb + , "# COMMENT %s_%s%s: chart \"%s\", context \"%s\", family \"%s\", dimension \"%s\", value * " COLLECTED_NUMBER_FORMAT " / " COLLECTED_NUMBER_FORMAT " %s %s (%s)\n" + , prefix + , context + , suffix + , (output_options & PROMETHEUS_OUTPUT_NAMES && st->name) ? st->name : st->id + , st->context + , st->family + , (output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id + , rd->multiplier + , rd->divisor + , h + , st->units + , t + ); + + if(unlikely(output_options & PROMETHEUS_OUTPUT_TYPES)) + buffer_sprintf(wb, "# COMMENT TYPE %s_%s%s %s\n" + , prefix + , context + , suffix + , t + ); + + if(output_options & PROMETHEUS_OUTPUT_TIMESTAMPS) + buffer_sprintf(wb + , "%s_%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " COLLECTED_NUMBER_FORMAT " %llu\n" + , prefix + , context + , suffix + , chart + , family + , dimension + , labels + , rd->last_collected_value + , timeval_msec(&rd->last_collected_time) + ); + else + buffer_sprintf(wb + , "%s_%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " COLLECTED_NUMBER_FORMAT "\n" + , prefix + , context + , suffix + , chart + , family + , dimension + , labels + , rd->last_collected_value + ); + } + else { + // the dimensions of the chart, do not have the same algorithm, multiplier or divisor + // we create a metric per dimension + + prometheus_name_copy(dimension, (output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX); + + if(unlikely(output_options & PROMETHEUS_OUTPUT_HELP)) + buffer_sprintf(wb + , "# COMMENT %s_%s_%s%s: chart \"%s\", context \"%s\", family \"%s\", dimension \"%s\", value * " COLLECTED_NUMBER_FORMAT " / " COLLECTED_NUMBER_FORMAT " %s %s (%s)\n" + , prefix + , context + , dimension + , suffix + , (output_options & PROMETHEUS_OUTPUT_NAMES && st->name) ? st->name : st->id + , st->context + , st->family + , (output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id + , rd->multiplier + , rd->divisor + , h + , st->units + , t + ); + + if(unlikely(output_options & PROMETHEUS_OUTPUT_TYPES)) + buffer_sprintf(wb, "# COMMENT TYPE %s_%s_%s%s %s\n" + , prefix + , context + , dimension + , suffix + , t + ); + + if(output_options & PROMETHEUS_OUTPUT_TIMESTAMPS) + buffer_sprintf(wb + , "%s_%s_%s%s{chart=\"%s\",family=\"%s\"%s} " COLLECTED_NUMBER_FORMAT " %llu\n" + , prefix + , context + , dimension + , suffix + , chart + , family + , labels + , rd->last_collected_value + , timeval_msec(&rd->last_collected_time) + ); + else + buffer_sprintf(wb + , "%s_%s_%s%s{chart=\"%s\",family=\"%s\"%s} " COLLECTED_NUMBER_FORMAT "\n" + , prefix + , context + , dimension + , suffix + , chart + , family + , labels + , rd->last_collected_value + ); + } + } + else { + // we need average or sum of the data + + time_t first_t = after, last_t = before; + calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, backend_options, &first_t, &last_t); + + if(!isnan(value) && !isinf(value)) { + + if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE) + suffix = "_average"; + else if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_SUM) + suffix = "_sum"; + + prometheus_label_copy(dimension, (output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX); + + if (unlikely(output_options & PROMETHEUS_OUTPUT_HELP)) + buffer_sprintf(wb, "# COMMENT %s_%s%s%s: dimension \"%s\", value is %s, gauge, dt %llu to %llu inclusive\n" + , prefix + , context + , units + , suffix + , (output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id + , st->units + , (unsigned long long)first_t + , (unsigned long long)last_t + ); + + if (unlikely(output_options & PROMETHEUS_OUTPUT_TYPES)) + buffer_sprintf(wb, "# COMMENT TYPE %s_%s%s%s gauge\n" + , prefix + , context + , units + , suffix + ); + + if(output_options & PROMETHEUS_OUTPUT_TIMESTAMPS) + buffer_sprintf(wb, "%s_%s%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " CALCULATED_NUMBER_FORMAT " %llu\n" + , prefix + , context + , units + , suffix + , chart + , family + , dimension + , labels + , value + , last_t * MSEC_PER_SEC + ); + else + buffer_sprintf(wb, "%s_%s%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " CALCULATED_NUMBER_FORMAT "\n" + , prefix + , context + , units + , suffix + , chart + , family + , dimension + , labels + , value + ); + } + } + } + } + + rrdset_unlock(st); + } + } + + rrdhost_unlock(host); +} + +static inline time_t prometheus_preparation(RRDHOST *host, BUFFER *wb, BACKEND_OPTIONS backend_options, const char *server, time_t now, PROMETHEUS_OUTPUT_OPTIONS output_options) { + if(!server || !*server) server = "default"; + + time_t after = prometheus_server_last_access(server, host, now); + + int first_seen = 0; + if(!after) { + after = now - global_backend_update_every; + first_seen = 1; + } + + if(after > now) { + // oops! this should never happen + after = now - global_backend_update_every; + } + + if(output_options & PROMETHEUS_OUTPUT_HELP) { + int show_range = 1; + char *mode; + if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED) { + mode = "as collected"; + show_range = 0; + } + else if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE) + mode = "average"; + else if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_SUM) + mode = "sum"; + else + mode = "unknown"; + + buffer_sprintf(wb, "# COMMENT netdata \"%s\" to %sprometheus \"%s\", source \"%s\", last seen %lu %s" + , host->hostname + , (first_seen)?"FIRST SEEN ":"" + , server + , mode + , (unsigned long)((first_seen)?0:(now - after)) + , (first_seen)?"never":"seconds ago" + ); + + if(show_range) + buffer_sprintf(wb, ", time range %lu to %lu", (unsigned long)after, (unsigned long)now); + + buffer_strcat(wb, "\n\n"); + } + + return after; +} + +void rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, BACKEND_OPTIONS backend_options, PROMETHEUS_OUTPUT_OPTIONS output_options) { + time_t before = now_realtime_sec(); + + // we start at the point we had stopped before + time_t after = prometheus_preparation(host, wb, backend_options, server, before, output_options); + + rrd_stats_api_v1_charts_allmetrics_prometheus(host, wb, prefix, backend_options, after, before, 0, output_options); +} + +void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, BACKEND_OPTIONS backend_options, PROMETHEUS_OUTPUT_OPTIONS output_options) { + time_t before = now_realtime_sec(); + + // we start at the point we had stopped before + time_t after = prometheus_preparation(host, wb, backend_options, server, before, output_options); + + rrd_rdlock(); + rrdhost_foreach_read(host) { + rrd_stats_api_v1_charts_allmetrics_prometheus(host, wb, prefix, backend_options, after, before, 1, output_options); + } + rrd_unlock(); +} diff --git a/backends/prometheus/backend_prometheus.h b/backends/prometheus/backend_prometheus.h new file mode 100644 index 000000000..dc4ec753f --- /dev/null +++ b/backends/prometheus/backend_prometheus.h @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_BACKEND_PROMETHEUS_H +#define NETDATA_BACKEND_PROMETHEUS_H 1 + +#include "backends/backends.h" + +typedef enum prometheus_output_flags { + PROMETHEUS_OUTPUT_NONE = 0, + PROMETHEUS_OUTPUT_HELP = (1 << 0), + PROMETHEUS_OUTPUT_TYPES = (1 << 1), + PROMETHEUS_OUTPUT_NAMES = (1 << 2), + PROMETHEUS_OUTPUT_TIMESTAMPS = (1 << 3), + PROMETHEUS_OUTPUT_VARIABLES = (1 << 4) +} PROMETHEUS_OUTPUT_OPTIONS; + +extern void rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, BACKEND_OPTIONS backend_options, PROMETHEUS_OUTPUT_OPTIONS output_options); +extern void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, BACKEND_OPTIONS backend_options, PROMETHEUS_OUTPUT_OPTIONS output_options); + +#endif //NETDATA_BACKEND_PROMETHEUS_H diff --git a/build/Dockerfile b/build/Dockerfile new file mode 100644 index 000000000..8a816b825 --- /dev/null +++ b/build/Dockerfile @@ -0,0 +1,8 @@ +FROM gcc:8 + +RUN apt-get update && apt-get install -y \ + autoconf-archive \ + autogen \ + libmnl-dev \ + uuid-dev \ + && rm -rf /var/lib/apt/lists/* diff --git a/build/build.sh b/build/build.sh new file mode 100755 index 000000000..ee087b98f --- /dev/null +++ b/build/build.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +if [ -f build.sh ]; then + cd ../ || exit 1 +fi + +if [ "$IS_CONTAINER" != "" ]; then + autoreconf -ivf + ./configure --enable-maintainer-mode + make dist + rm -rf autom4te.cache +else + if [[ "$(docker images -q netdata-gcc-builder:latest 2> /dev/null)" == "" ]]; then + docker build -t netdata-gcc-builder:latest -f build/Dockerfile . + fi + docker run --rm -it \ + --env IS_CONTAINER=TRUE \ + --volume "${PWD}:/project:Z" \ + --workdir "/project" \ + netdata:gcc \ + ./.travis/build.sh +fi diff --git a/build/m4/ax_c___atomic.m4 b/build/m4/ax_c___atomic.m4 new file mode 100644 index 000000000..dd5ee3d1c --- /dev/null +++ b/build/m4/ax_c___atomic.m4 @@ -0,0 +1,36 @@ +# AC_C___ATOMIC +# ------------- +# Define HAVE_C___ATOMIC if __atomic works. +AN_IDENTIFIER([__atomic], [AC_C___ATOMIC]) +AC_DEFUN([AC_C___ATOMIC], +[AC_CACHE_CHECK([for __atomic], ac_cv_c___atomic, +[AC_LINK_IFELSE( + [AC_LANG_SOURCE( + [[int + main (int argc, char **argv) + { + volatile unsigned long ul1 = 1, ul2 = 0, ul3 = 2; + __atomic_load_n(&ul1, __ATOMIC_SEQ_CST); + __atomic_compare_exchange(&ul1, &ul2, &ul3, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + __atomic_fetch_add(&ul1, 1, __ATOMIC_SEQ_CST); + __atomic_fetch_sub(&ul3, 1, __ATOMIC_SEQ_CST); + __atomic_or_fetch(&ul1, ul2, __ATOMIC_SEQ_CST); + __atomic_and_fetch(&ul1, ul2, __ATOMIC_SEQ_CST); + volatile unsigned long long ull1 = 1, ull2 = 0, ull3 = 2; + __atomic_load_n(&ull1, __ATOMIC_SEQ_CST); + __atomic_compare_exchange(&ull1, &ull2, &ull3, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + __atomic_fetch_add(&ull1, 1, __ATOMIC_SEQ_CST); + __atomic_fetch_sub(&ull3, 1, __ATOMIC_SEQ_CST); + __atomic_or_fetch(&ull1, ull2, __ATOMIC_SEQ_CST); + __atomic_and_fetch(&ull1, ull2, __ATOMIC_SEQ_CST); + return 0; + } + ]])], + [ac_cv_c___atomic=yes], + [ac_cv_c___atomic=no])]) +if test $ac_cv_c___atomic = yes; then + AC_DEFINE([HAVE_C___ATOMIC], 1, + [Define to 1 if __atomic operations work.]) +fi +])# AC_C___ATOMIC + diff --git a/build/m4/ax_c__generic.m4 b/build/m4/ax_c__generic.m4 new file mode 100644 index 000000000..0c4dd52c6 --- /dev/null +++ b/build/m4/ax_c__generic.m4 @@ -0,0 +1,28 @@ +# https://lists.gnu.org/archive/html/autoconf-commit/2012-12/msg00004.html +# AC_C__GENERIC +# ------------- +# Define HAVE_C__GENERIC if _Generic works, a la C11. +AN_IDENTIFIER([_Generic], [AC_C__GENERIC]) +AC_DEFUN([AC_C__GENERIC], +[AC_CACHE_CHECK([for _Generic], ac_cv_c__Generic, +[AC_COMPILE_IFELSE( + [AC_LANG_SOURCE( + [[int + main (int argc, char **argv) + { + int a = _Generic (argc, int: argc = 1); + int *b = &_Generic (argc, default: argc); + char ***c = _Generic (argv, int: argc, default: argv ? &argv : 0); + _Generic (1 ? 0 : b, int: a, default: b) = &argc; + _Generic (a = 1, default: a) = 3; + return a + !b + !c; + } + ]])], + [ac_cv_c__Generic=yes], + [ac_cv_c__Generic=no])]) +if test $ac_cv_c__Generic = yes; then + AC_DEFINE([HAVE_C__GENERIC], 1, + [Define to 1 if C11-style _Generic works.]) +fi +])# AC_C__GENERIC + diff --git a/build/m4/ax_c_lto.m4 b/build/m4/ax_c_lto.m4 new file mode 100644 index 000000000..7e6bc0119 --- /dev/null +++ b/build/m4/ax_c_lto.m4 @@ -0,0 +1,21 @@ +# AC_C_LTO +# ------------- +# Define HAVE_LTO if -flto works. +AN_IDENTIFIER([lto], [AC_C_LTO]) +AC_DEFUN([AC_C_LTO], +[AC_CACHE_CHECK([if -flto builds executables], ac_cv_c_lto, +[AC_RUN_IFELSE( + [AC_LANG_SOURCE( + [[#include + int main(int argc, char **argv) { + return 0; + } + ]])], + [ac_cv_c_lto=yes], + [ac_cv_c_lto=no], + [ac_cv_c_lto=${ac_cv_c_lto_cross_compile}])]) +if test "${ac_cv_c_lto}" = "yes"; then + AC_DEFINE([HAVE_LTO], 1, + [Define to 1 if -flto works.]) +fi +])# AC_C_LTO diff --git a/build/m4/ax_c_mallinfo.m4 b/build/m4/ax_c_mallinfo.m4 new file mode 100644 index 000000000..af8d0481e --- /dev/null +++ b/build/m4/ax_c_mallinfo.m4 @@ -0,0 +1,24 @@ +# AC_C_MALLINFO +# ------------- +# Define HAVE_C_MALLINFO if mallinfo() works. +AN_IDENTIFIER([mallinfo], [AC_C_MALLINFO]) +AC_DEFUN([AC_C_MALLINFO], +[AC_CACHE_CHECK([for mallinfo], ac_cv_c_mallinfo, +[AC_LINK_IFELSE( + [AC_LANG_PROGRAM( + [[#include ]], + [[ + struct mallinfo mi = mallinfo(); + /* make sure that fields exists */ + mi.uordblks = 0; + mi.hblkhd = 0; + mi.arena = 0; + ]] + )], + [ac_cv_c_mallinfo=yes], + [ac_cv_c_mallinfo=no])]) +if test $ac_cv_c_mallinfo = yes; then + AC_DEFINE([HAVE_C_MALLINFO], 1, + [Define to 1 if glibc mallinfo exists.]) +fi +])# AC_C_MALLINFO diff --git a/build/m4/ax_c_mallopt.m4 b/build/m4/ax_c_mallopt.m4 new file mode 100644 index 000000000..31c4fdc36 --- /dev/null +++ b/build/m4/ax_c_mallopt.m4 @@ -0,0 +1,20 @@ +# AC_C_MALLOPT +# ------------- +# Define HAVE_C_MALLOPT if mallopt() works. +AN_IDENTIFIER([mallopt], [AC_C_MALLOPT]) +AC_DEFUN([AC_C_MALLOPT], +[AC_CACHE_CHECK([for mallopt], ac_cv_c_mallopt, +[AC_LINK_IFELSE( + [AC_LANG_SOURCE( + [[#include + int main(int argc, char **argv) { + mallopt(M_ARENA_MAX, 1); + } + ]])], + [ac_cv_c_mallopt=yes], + [ac_cv_c_mallopt=no])]) +if test $ac_cv_c_mallopt = yes; then + AC_DEFINE([HAVE_C_MALLOPT], 1, + [Define to 1 if glibc mallopt exists.]) +fi +])# AC_C_MALLOPT diff --git a/build/m4/ax_c_statement_expressions.m4 b/build/m4/ax_c_statement_expressions.m4 new file mode 100644 index 000000000..fb259e727 --- /dev/null +++ b/build/m4/ax_c_statement_expressions.m4 @@ -0,0 +1,23 @@ +# AC_C_STMT_EXPR +# ------------- +# Define HAVE_STMT_EXPR if compiler has statement expressions. +AN_IDENTIFIER([_Generic], [AC_C_STMT_EXPR]) +AC_DEFUN([AC_C_STMT_EXPR], +[AC_CACHE_CHECK([for statement expressions], ac_cv_c_stmt_expr, +[AC_COMPILE_IFELSE( + [AC_LANG_SOURCE( + [[int + main (int argc, char **argv) + { + int x = ({ int y = 1; y; }); + return x; + } + ]])], + [ac_cv_c_stmt_expr=yes], + [ac_cv_c_stmt_expr=no])]) +if test $ac_cv_c_stmt_expr = yes; then + AC_DEFINE([HAVE_STMT_EXPR], 1, + [Define to 1 if compiler supports statement expressions.]) +fi +])# AC_C_STMT_EXPR + diff --git a/build/m4/ax_check_compile_flag.m4 b/build/m4/ax_check_compile_flag.m4 new file mode 100644 index 000000000..c515602f0 --- /dev/null +++ b/build/m4/ax_check_compile_flag.m4 @@ -0,0 +1,50 @@ +# =========================================================================== +# http://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS], [INPUT]) +# +# DESCRIPTION +# +# Check whether the given FLAG works with the current language's compiler +# or gives an error. (Warnings, however, are ignored) +# +# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on +# success/failure. +# +# If EXTRA-FLAGS is defined, it is added to the current language's default +# flags (e.g. CFLAGS) when the check is done. The check is thus made with +# the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to +# force the compiler to issue an error when a bad flag is given. +# +# INPUT gives an alternative input source to AC_COMPILE_IFELSE. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this +# macro in sync with AX_CHECK_{PREPROC,LINK}_FLAG. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# SPDX-License-Identifier: GPL-3.0 + +#serial 3 + +AC_DEFUN([AX_CHECK_COMPILE_FLAG], +[AC_PREREQ(2.59)dnl for _AC_LANG_PREFIX +AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_[]_AC_LANG_ABBREV[]flags_$4_$1])dnl +AC_CACHE_CHECK([whether _AC_LANG compiler accepts $1], CACHEVAR, [ + ax_check_save_flags=$[]_AC_LANG_PREFIX[]FLAGS + _AC_LANG_PREFIX[]FLAGS="$[]_AC_LANG_PREFIX[]FLAGS $4 $1" + AC_COMPILE_IFELSE([m4_default([$5],[AC_LANG_PROGRAM()])], + [AS_VAR_SET(CACHEVAR,[yes])], + [AS_VAR_SET(CACHEVAR,[no])]) + _AC_LANG_PREFIX[]FLAGS=$ax_check_save_flags]) +AS_IF([test x"AS_VAR_GET(CACHEVAR)" = xyes], + [m4_default([$2], :)], + [m4_default([$3], :)]) +AS_VAR_POPDEF([CACHEVAR])dnl +])dnl AX_CHECK_COMPILE_FLAGS diff --git a/build/m4/ax_check_enable_debug.m4 b/build/m4/ax_check_enable_debug.m4 new file mode 100644 index 000000000..db5bab2f4 --- /dev/null +++ b/build/m4/ax_check_enable_debug.m4 @@ -0,0 +1,122 @@ +# =========================================================================== +# http://www.gnu.org/software/autoconf-archive/ax_check_enable_debug.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CHECK_ENABLE_DEBUG([enable by default=yes/info/profile/no], [ENABLE DEBUG VARIABLES ...], [DISABLE DEBUG VARIABLES NDEBUG ...], [IS-RELEASE]) +# +# DESCRIPTION +# +# Check for the presence of an --enable-debug option to configure, with +# the specified default value used when the option is not present. Return +# the value in the variable $ax_enable_debug. +# +# Specifying 'yes' adds '-g -O0' to the compilation flags for all +# languages. Specifying 'info' adds '-g' to the compilation flags. +# Specifying 'profile' adds '-g -pg' to the compilation flags and '-pg' to +# the linking flags. Otherwise, nothing is added. +# +# Define the variables listed in the second argument if debug is enabled, +# defaulting to no variables. Defines the variables listed in the third +# argument if debug is disabled, defaulting to NDEBUG. All lists of +# variables should be space-separated. +# +# If debug is not enabled, ensure AC_PROG_* will not add debugging flags. +# Should be invoked prior to any AC_PROG_* compiler checks. +# +# IS-RELEASE can be used to change the default to 'no' when making a +# release. Set IS-RELEASE to 'yes' or 'no' as appropriate. By default, it +# uses the value of $ax_is_release, so if you are using the AX_IS_RELEASE +# macro, there is no need to pass this parameter. +# +# AX_IS_RELEASE([git-directory]) +# AX_CHECK_ENABLE_DEBUG() +# +# LICENSE +# +# Copyright (c) 2011 Rhys Ulerich +# Copyright (c) 2014, 2015 Philip Withnall +# +# SPDX-License-Identifier: FSFAP + +#serial 5 + +AC_DEFUN([AX_CHECK_ENABLE_DEBUG],[ + AC_BEFORE([$0],[AC_PROG_CC])dnl + AC_BEFORE([$0],[AC_PROG_CXX])dnl + AC_BEFORE([$0],[AC_PROG_F77])dnl + AC_BEFORE([$0],[AC_PROG_FC])dnl + + AC_MSG_CHECKING(whether to enable debugging) + + ax_enable_debug_default=m4_tolower(m4_normalize(ifelse([$1],,[no],[$1]))) + ax_enable_debug_is_release=m4_tolower(m4_normalize(ifelse([$4],, + [$ax_is_release], + [$4]))) + + # If this is a release, override the default. + AS_IF([test "$ax_enable_debug_is_release" = "yes"], + [ax_enable_debug_default="no"]) + + m4_define(ax_enable_debug_vars,[m4_normalize(ifelse([$2],,,[$2]))]) + m4_define(ax_disable_debug_vars,[m4_normalize(ifelse([$3],,[NDEBUG],[$3]))]) + + AC_ARG_ENABLE(debug, + [AS_HELP_STRING([--enable-debug=]@<:@yes/info/profile/no@:>@,[compile with debugging])], + [],enable_debug=$ax_enable_debug_default) + + # empty mean debug yes + AS_IF([test "x$enable_debug" = "x"], + [enable_debug="yes"]) + + # case of debug + AS_CASE([$enable_debug], + [yes],[ + AC_MSG_RESULT(yes) + CFLAGS="${CFLAGS} -g -O0" + CXXFLAGS="${CXXFLAGS} -g -O0" + FFLAGS="${FFLAGS} -g -O0" + FCFLAGS="${FCFLAGS} -g -O0" + OBJCFLAGS="${OBJCFLAGS} -g -O0" + ], + [info],[ + AC_MSG_RESULT(info) + CFLAGS="${CFLAGS} -g" + CXXFLAGS="${CXXFLAGS} -g" + FFLAGS="${FFLAGS} -g" + FCFLAGS="${FCFLAGS} -g" + OBJCFLAGS="${OBJCFLAGS} -g" + ], + [profile],[ + AC_MSG_RESULT(profile) + CFLAGS="${CFLAGS} -g -pg" + CXXFLAGS="${CXXFLAGS} -g -pg" + FFLAGS="${FFLAGS} -g -pg" + FCFLAGS="${FCFLAGS} -g -pg" + OBJCFLAGS="${OBJCFLAGS} -g -pg" + LDFLAGS="${LDFLAGS} -pg" + ], + [ + AC_MSG_RESULT(no) + dnl Ensure AC_PROG_CC/CXX/F77/FC/OBJC will not enable debug flags + dnl by setting any unset environment flag variables + AS_IF([test "x${CFLAGS+set}" != "xset"], + [CFLAGS=""]) + AS_IF([test "x${CXXFLAGS+set}" != "xset"], + [CXXFLAGS=""]) + AS_IF([test "x${FFLAGS+set}" != "xset"], + [FFLAGS=""]) + AS_IF([test "x${FCFLAGS+set}" != "xset"], + [FCFLAGS=""]) + AS_IF([test "x${OBJCFLAGS+set}" != "xset"], + [OBJCFLAGS=""]) + ]) + + dnl Define various variables if debugging is disabled. + dnl assert.h is a NOP if NDEBUG is defined, so define it by default. + AS_IF([test "x$enable_debug" = "xyes"], + [m4_map_args_w(ax_enable_debug_vars, [AC_DEFINE(], [,,[Define if debugging is enabled])])], + [m4_map_args_w(ax_disable_debug_vars, [AC_DEFINE(], [,,[Define if debugging is disabled])])]) + ax_enable_debug=$enable_debug +]) diff --git a/build/m4/ax_gcc_func_attribute.m4 b/build/m4/ax_gcc_func_attribute.m4 new file mode 100644 index 000000000..6f1e1b051 --- /dev/null +++ b/build/m4/ax_gcc_func_attribute.m4 @@ -0,0 +1,223 @@ +# =========================================================================== +# http://www.gnu.org/software/autoconf-archive/ax_gcc_func_attribute.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_GCC_FUNC_ATTRIBUTE(ATTRIBUTE) +# +# DESCRIPTION +# +# This macro checks if the compiler supports one of GCC's function +# attributes; many other compilers also provide function attributes with +# the same syntax. Compiler warnings are used to detect supported +# attributes as unsupported ones are ignored by default so quieting +# warnings when using this macro will yield false positives. +# +# The ATTRIBUTE parameter holds the name of the attribute to be checked. +# +# If ATTRIBUTE is supported define HAVE_FUNC_ATTRIBUTE_. +# +# The macro caches its result in the ax_cv_have_func_attribute_ +# variable. +# +# The macro currently supports the following function attributes: +# +# alias +# aligned +# alloc_size +# always_inline +# artificial +# cold +# const +# constructor +# constructor_priority for constructor attribute with priority +# deprecated +# destructor +# dllexport +# dllimport +# error +# externally_visible +# flatten +# format +# format_arg +# gnu_inline +# hot +# ifunc +# leaf +# malloc +# noclone +# noinline +# nonnull +# noreturn +# nothrow +# optimize +# pure +# unused +# used +# visibility +# warning +# warn_unused_result +# weak +# weakref +# +# Unsuppored function attributes will be tested with a prototype returning +# an int and not accepting any arguments and the result of the check might +# be wrong or meaningless so use with care. +# +# LICENSE +# +# Copyright (c) 2013 Gabriele Svelto +# +# SPDX-License-Identifier: FSFAP + +#serial 4 + +AC_DEFUN([AX_GCC_FUNC_ATTRIBUTE], [ + AS_VAR_PUSHDEF([ac_var], [ax_cv_have_func_attribute_$1]) + + AC_CACHE_CHECK([for __attribute__(($1))], [ac_var], [ + AC_LINK_IFELSE([AC_LANG_PROGRAM([ + m4_case([$1], + [alias], [ + int foo( void ) { return 0; } + int bar( void ) __attribute__(($1("foo"))); + ], + [aligned], [ + int foo( void ) __attribute__(($1(32))); + ], + [alloc_size], [ + void *foo(int a) __attribute__(($1(1))); + ], + [always_inline], [ + inline __attribute__(($1)) int foo( void ) { return 0; } + ], + [artificial], [ + inline __attribute__(($1)) int foo( void ) { return 0; } + ], + [cold], [ + int foo( void ) __attribute__(($1)); + ], + [const], [ + int foo( void ) __attribute__(($1)); + ], + [constructor_priority], [ + int foo( void ) __attribute__((__constructor__(65535/2))); + ], + [constructor], [ + int foo( void ) __attribute__(($1)); + ], + [deprecated], [ + int foo( void ) __attribute__(($1(""))); + ], + [destructor], [ + int foo( void ) __attribute__(($1)); + ], + [dllexport], [ + __attribute__(($1)) int foo( void ) { return 0; } + ], + [dllimport], [ + int foo( void ) __attribute__(($1)); + ], + [error], [ + int foo( void ) __attribute__(($1(""))); + ], + [externally_visible], [ + int foo( void ) __attribute__(($1)); + ], + [flatten], [ + int foo( void ) __attribute__(($1)); + ], + [format], [ + int foo(const char *p, ...) __attribute__(($1(printf, 1, 2))); + ], + [format_arg], [ + char *foo(const char *p) __attribute__(($1(1))); + ], + [gnu_inline], [ + inline __attribute__(($1)) int foo( void ) { return 0; } + ], + [hot], [ + int foo( void ) __attribute__(($1)); + ], + [ifunc], [ + int my_foo( void ) { return 0; } + static int (*resolve_foo(void))(void) { return my_foo; } + int foo( void ) __attribute__(($1("resolve_foo"))); + ], + [leaf], [ + __attribute__(($1)) int foo( void ) { return 0; } + ], + [malloc], [ + void *foo( void ) __attribute__(($1)); + ], + [noclone], [ + int foo( void ) __attribute__(($1)); + ], + [noinline], [ + __attribute__(($1)) int foo( void ) { return 0; } + ], + [nonnull], [ + int foo(char *p) __attribute__(($1(1))); + ], + [noreturn], [ + void foo( void ) __attribute__(($1)); + ], + [nothrow], [ + int foo( void ) __attribute__(($1)); + ], + [optimize], [ + __attribute__(($1(3))) int foo( void ) { return 0; } + ], + [pure], [ + int foo( void ) __attribute__(($1)); + ], + [returns_nonnull], [ + void *foo( void ) __attribute__(($1)); + ], + [unused], [ + int foo( void ) __attribute__(($1)); + ], + [used], [ + int foo( void ) __attribute__(($1)); + ], + [visibility], [ + int foo_def( void ) __attribute__(($1("default"))); + int foo_hid( void ) __attribute__(($1("hidden"))); + int foo_int( void ) __attribute__(($1("internal"))); + int foo_pro( void ) __attribute__(($1("protected"))); + ], + [warning], [ + int foo( void ) __attribute__(($1(""))); + ], + [warn_unused_result], [ + int foo( void ) __attribute__(($1)); + ], + [weak], [ + int foo( void ) __attribute__(($1)); + ], + [weakref], [ + static int foo( void ) { return 0; } + static int bar( void ) __attribute__(($1("foo"))); + ], + [ + m4_warn([syntax], [Unsupported attribute $1, the test may fail]) + int foo( void ) __attribute__(($1)); + ] + )], []) + ], + dnl GCC doesn't exit with an error if an unknown attribute is + dnl provided but only outputs a warning, so accept the attribute + dnl only if no warning were issued. + [AS_IF([test -s conftest.err], + [AS_VAR_SET([ac_var], [no])], + [AS_VAR_SET([ac_var], [yes])])], + [AS_VAR_SET([ac_var], [no])]) + ]) + + AS_IF([test yes = AS_VAR_GET([ac_var])], + [AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_FUNC_ATTRIBUTE_$1), 1, + [Define to 1 if the system has the `$1' function attribute])], []) + + AS_VAR_POPDEF([ac_var]) +]) diff --git a/build/m4/ax_pthread.m4 b/build/m4/ax_pthread.m4 new file mode 100644 index 000000000..ba9ac28a5 --- /dev/null +++ b/build/m4/ax_pthread.m4 @@ -0,0 +1,308 @@ +# =========================================================================== +# http://www.gnu.org/software/autoconf-archive/ax_pthread.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_PTHREAD([ACTION-IF-FOUND[, ACTION-IF-NOT-FOUND]]) +# +# DESCRIPTION +# +# This macro figures out how to build C programs using POSIX threads. It +# sets the PTHREAD_LIBS output variable to the threads library and linker +# flags, and the PTHREAD_CFLAGS output variable to any special C compiler +# flags that are needed. (The user can also force certain compiler +# flags/libs to be tested by setting these environment variables.) +# +# Also sets PTHREAD_CC to any special C compiler that is needed for +# multi-threaded programs (defaults to the value of CC otherwise). (This +# is necessary on AIX to use the special cc_r compiler alias.) +# +# NOTE: You are assumed to not only compile your program with these flags, +# but also link it with them as well. e.g. you should link with +# $PTHREAD_CC $CFLAGS $PTHREAD_CFLAGS $LDFLAGS ... $PTHREAD_LIBS $LIBS +# +# If you are only building threads programs, you may wish to use these +# variables in your default LIBS, CFLAGS, and CC: +# +# LIBS="$PTHREAD_LIBS $LIBS" +# CFLAGS="$CFLAGS $PTHREAD_CFLAGS" +# CC="$PTHREAD_CC" +# +# In addition, if the PTHREAD_CREATE_JOINABLE thread-attribute constant +# has a nonstandard name, defines PTHREAD_CREATE_JOINABLE to that name +# (e.g. PTHREAD_CREATE_UNDETACHED on AIX). +# +# Also HAVE_PTHREAD_PRIO_INHERIT is defined if pthread is found and the +# PTHREAD_PRIO_INHERIT symbol is defined when compiling with +# PTHREAD_CFLAGS. +# +# ACTION-IF-FOUND is a list of shell commands to run if a threads library +# is found, and ACTION-IF-NOT-FOUND is a list of commands to run it if it +# is not found. If ACTION-IF-FOUND is not specified, the default action +# will define HAVE_PTHREAD. +# +# Please let the authors know if this macro fails on any platform, or if +# you have any other suggestions or comments. This macro was based on work +# by SGJ on autoconf scripts for FFTW (http://www.fftw.org/) (with help +# from M. Frigo), as well as ac_pthread and hb_pthread macros posted by +# Alejandro Forero Cuervo to the autoconf macro repository. We are also +# grateful for the helpful feedback of numerous users. +# +# Updated for Autoconf 2.68 by Daniel Richard G. +# +# LICENSE +# +# Copyright (c) 2008 Steven G. Johnson +# Copyright (c) 2011 Daniel Richard G. +# +# SPDX-License-Identifier: GPL-3.0-or-later + +#serial 21 + +AU_ALIAS([ACX_PTHREAD], [AX_PTHREAD]) +AC_DEFUN([AX_PTHREAD], [ +AC_REQUIRE([AC_CANONICAL_HOST]) +AC_LANG_PUSH([C]) +ax_pthread_ok=no + +# We used to check for pthread.h first, but this fails if pthread.h +# requires special compiler flags (e.g. on True64 or Sequent). +# It gets checked for in the link test anyway. + +# First of all, check if the user has set any of the PTHREAD_LIBS, +# etcetera environment variables, and if threads linking works using +# them: +if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then + save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS" + save_LIBS="$LIBS" + LIBS="$PTHREAD_LIBS $LIBS" + AC_MSG_CHECKING([for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS]) + AC_TRY_LINK_FUNC([pthread_join], [ax_pthread_ok=yes]) + AC_MSG_RESULT([$ax_pthread_ok]) + if test x"$ax_pthread_ok" = xno; then + PTHREAD_LIBS="" + PTHREAD_CFLAGS="" + fi + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" +fi + +# We must check for the threads library under a number of different +# names; the ordering is very important because some systems +# (e.g. DEC) have both -lpthread and -lpthreads, where one of the +# libraries is broken (non-POSIX). + +# Create a list of thread flags to try. Items starting with a "-" are +# C compiler flags, and other items are library names, except for "none" +# which indicates that we try without any flags at all, and "pthread-config" +# which is a program returning the flags for the Pth emulation library. + +ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config" + +# The ordering *is* (sometimes) important. Some notes on the +# individual items follow: + +# pthreads: AIX (must check this before -lpthread) +# none: in case threads are in libc; should be tried before -Kthread and +# other compiler flags to prevent continual compiler warnings +# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) +# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) +# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread) +# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads) +# -pthreads: Solaris/gcc +# -mthreads: Mingw32/gcc, Lynx/gcc +# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it +# doesn't hurt to check since this sometimes defines pthreads too; +# also defines -D_REENTRANT) +# ... -mt is also the pthreads flag for HP/aCC +# pthread: Linux, etcetera +# --thread-safe: KAI C++ +# pthread-config: use pthread-config program (for GNU Pth library) + +case ${host_os} in + solaris*) + + # On Solaris (at least, for some versions), libc contains stubbed + # (non-functional) versions of the pthreads routines, so link-based + # tests will erroneously succeed. (We need to link with -pthreads/-mt/ + # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather + # a function called by this macro, so we could check for that, but + # who knows whether they'll stub that too in a future libc.) So, + # we'll just look for -pthreads and -lpthread first: + + ax_pthread_flags="-pthreads pthread -mt -pthread $ax_pthread_flags" + ;; + + darwin*) + ax_pthread_flags="-pthread $ax_pthread_flags" + ;; +esac + +# Clang doesn't consider unrecognized options an error unless we specify +# -Werror. We throw in some extra Clang-specific options to ensure that +# this doesn't happen for GCC, which also accepts -Werror. + +AC_MSG_CHECKING([if compiler needs -Werror to reject unknown flags]) +save_CFLAGS="$CFLAGS" +ax_pthread_extra_flags="-Werror" +CFLAGS="$CFLAGS $ax_pthread_extra_flags -Wunknown-warning-option -Wsizeof-array-argument" +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([int foo(void);],[foo()])], + [AC_MSG_RESULT([yes])], + [ax_pthread_extra_flags= + AC_MSG_RESULT([no])]) +CFLAGS="$save_CFLAGS" + +if test x"$ax_pthread_ok" = xno; then +for flag in $ax_pthread_flags; do + + case $flag in + none) + AC_MSG_CHECKING([whether pthreads work without any flags]) + ;; + + -*) + AC_MSG_CHECKING([whether pthreads work with $flag]) + PTHREAD_CFLAGS="$flag" + ;; + + pthread-config) + AC_CHECK_PROG([ax_pthread_config], [pthread-config], [yes], [no]) + if test x"$ax_pthread_config" = xno; then continue; fi + PTHREAD_CFLAGS="`pthread-config --cflags`" + PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`" + ;; + + *) + AC_MSG_CHECKING([for the pthreads library -l$flag]) + PTHREAD_LIBS="-l$flag" + ;; + esac + + save_LIBS="$LIBS" + save_CFLAGS="$CFLAGS" + LIBS="$PTHREAD_LIBS $LIBS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS $ax_pthread_extra_flags" + + # Check for various functions. We must include pthread.h, + # since some functions may be macros. (On the Sequent, we + # need a special flag -Kthread to make this header compile.) + # We check for pthread_join because it is in -lpthread on IRIX + # while pthread_create is in libc. We check for pthread_attr_init + # due to DEC craziness with -lpthreads. We check for + # pthread_cleanup_push because it is one of the few pthread + # functions on Solaris that doesn't have a non-functional libc stub. + # We try pthread_create on general principles. + AC_LINK_IFELSE([AC_LANG_PROGRAM([#include + static void routine(void *a) { a = 0; } + static void *start_routine(void *a) { return a; }], + [pthread_t th; pthread_attr_t attr; + pthread_create(&th, 0, start_routine, 0); + pthread_join(th, 0); + pthread_attr_init(&attr); + pthread_cleanup_push(routine, 0); + pthread_cleanup_pop(0) /* ; */])], + [ax_pthread_ok=yes], + []) + + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" + + AC_MSG_RESULT([$ax_pthread_ok]) + if test "x$ax_pthread_ok" = xyes; then + break; + fi + + PTHREAD_LIBS="" + PTHREAD_CFLAGS="" +done +fi + +# Various other checks: +if test "x$ax_pthread_ok" = xyes; then + save_LIBS="$LIBS" + LIBS="$PTHREAD_LIBS $LIBS" + save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS" + + # Detect AIX lossage: JOINABLE attribute is called UNDETACHED. + AC_MSG_CHECKING([for joinable pthread attribute]) + attr_name=unknown + for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do + AC_LINK_IFELSE([AC_LANG_PROGRAM([#include ], + [int attr = $attr; return attr /* ; */])], + [attr_name=$attr; break], + []) + done + AC_MSG_RESULT([$attr_name]) + if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then + AC_DEFINE_UNQUOTED([PTHREAD_CREATE_JOINABLE], [$attr_name], + [Define to necessary symbol if this constant + uses a non-standard name on your system.]) + fi + + AC_MSG_CHECKING([if more special flags are required for pthreads]) + flag=no + case ${host_os} in + aix* | freebsd* | darwin*) flag="-D_THREAD_SAFE";; + osf* | hpux*) flag="-D_REENTRANT";; + solaris*) + if test "$GCC" = "yes"; then + flag="-D_REENTRANT" + else + # TODO: What about Clang on Solaris? + flag="-mt -D_REENTRANT" + fi + ;; + esac + AC_MSG_RESULT([$flag]) + if test "x$flag" != xno; then + PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS" + fi + + AC_CACHE_CHECK([for PTHREAD_PRIO_INHERIT], + [ax_cv_PTHREAD_PRIO_INHERIT], [ + AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include ]], + [[int i = PTHREAD_PRIO_INHERIT;]])], + [ax_cv_PTHREAD_PRIO_INHERIT=yes], + [ax_cv_PTHREAD_PRIO_INHERIT=no]) + ]) + AS_IF([test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes"], + [AC_DEFINE([HAVE_PTHREAD_PRIO_INHERIT], [1], [Have PTHREAD_PRIO_INHERIT.])]) + + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" + + # More AIX lossage: compile with *_r variant + if test "x$GCC" != xyes; then + case $host_os in + aix*) + AS_CASE(["x/$CC"], + [x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6], + [#handle absolute path differently from PATH based program lookup + AS_CASE(["x$CC"], + [x/*], + [AS_IF([AS_EXECUTABLE_P([${CC}_r])],[PTHREAD_CC="${CC}_r"])], + [AC_CHECK_PROGS([PTHREAD_CC],[${CC}_r],[$CC])])]) + ;; + esac + fi +fi + +test -n "$PTHREAD_CC" || PTHREAD_CC="$CC" + +AC_SUBST([PTHREAD_LIBS]) +AC_SUBST([PTHREAD_CFLAGS]) +AC_SUBST([PTHREAD_CC]) + +# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND: +if test x"$ax_pthread_ok" = xyes; then + ifelse([$1],,[AC_DEFINE([HAVE_PTHREAD],[1],[Define if you have POSIX threads libraries and header files.])],[$1]) + : +else + ax_pthread_ok=no + $2 +fi +AC_LANG_POP +])dnl AX_PTHREAD diff --git a/build/m4/jemalloc.m4 b/build/m4/jemalloc.m4 new file mode 100644 index 000000000..c2008a8ea --- /dev/null +++ b/build/m4/jemalloc.m4 @@ -0,0 +1,75 @@ +dnl -------------------------------------------------------- -*- autoconf -*- +dnl SPDX-License-Identifier: Apache-2.0 + +dnl +dnl jemalloc.m4: Trafficserver's jemalloc autoconf macros +dnl modified to skip other TS_ helpers +dnl + +AC_DEFUN([TS_CHECK_JEMALLOC], [ +AC_ARG_WITH([jemalloc-prefix], + [AS_HELP_STRING([--with-jemalloc-prefix=PREFIX],[Specify the jemalloc prefix [default=""]])], + [ + jemalloc_prefix="$withval" + ],[ + if test "`uname -s`" = "Darwin"; then + jemalloc_prefix="je_" + else + jemalloc_prefix="" + fi + ] +) +AC_DEFINE_UNQUOTED([prefix_jemalloc], [${jemalloc_prefix}], [jemalloc prefix]) + +enable_jemalloc=no +AC_ARG_WITH([jemalloc], [AS_HELP_STRING([--with-jemalloc=DIR], [use a specific jemalloc library])], +[ + if test "$withval" != "no"; then + if test "x${enable_tcmalloc}" = "xyes"; then + AC_MSG_ERROR([Cannot compile with both jemalloc and tcmalloc]) + fi + enable_jemalloc=yes + jemalloc_base_dir="$withval" + case "$withval" in + yes) + jemalloc_base_dir="/usr" + AC_MSG_CHECKING(checking for jemalloc includes standard directories) + ;; + *":"*) + jemalloc_include="`echo $withval |sed -e 's/:.*$//'`" + jemalloc_ldflags="`echo $withval |sed -e 's/^.*://'`" + AC_MSG_CHECKING(checking for jemalloc includes in $jemalloc_include libs in $jemalloc_ldflags) + ;; + *) + jemalloc_include="$withval/include" + jemalloc_ldflags="$withval/lib" + AC_MSG_CHECKING(checking for jemalloc includes in $withval) + ;; + esac + fi +]) + +has_jemalloc=0 +if test "$enable_jemalloc" != "no"; then + jemalloc_have_headers=0 + jemalloc_have_libs=0 + if test "$jemalloc_base_dir" != "/usr"; then + CFLAGS="${CFLAGS} -I${jemalloc_include}" + LDFLAGS="${LDFLAGS} -L${jemalloc_ldflags}" + LIBTOOL_LINK_FLAGS="${LIBTOOL_LINK_FLAGS} -R${jemalloc_ldflags}" + fi + func="${jemalloc_prefix}malloc_stats_print" + AC_CHECK_LIB(jemalloc, ${func}, [jemalloc_have_libs=1]) + if test "$jemalloc_have_libs" != "0"; then + AC_CHECK_HEADERS([jemalloc/jemalloc.h], [jemalloc_have_headers=1]) + fi + if test "$jemalloc_have_headers" != "0"; then + has_jemalloc=1 + LIBS="${LIBS} -ljemalloc" + AC_DEFINE(has_jemalloc, [1], [Link/compile against jemalloc]) + else + AC_MSG_ERROR([Couldn't find a jemalloc installation]) + fi +fi +AC_SUBST(has_jemalloc) +]) diff --git a/build/m4/tcmalloc.m4 b/build/m4/tcmalloc.m4 new file mode 100644 index 000000000..765d2accc --- /dev/null +++ b/build/m4/tcmalloc.m4 @@ -0,0 +1,45 @@ +dnl -------------------------------------------------------- -*- autoconf -*- +dnl SPDX-License-Identifier: Apache-2.0 + +dnl +dnl tcmalloc.m4: Trafficserver's tcmalloc autoconf macros +dnl modified to skip other TS_ helpers +dnl + +dnl This is kinda fugly, but need a way to both specify a directory and which +dnl of the many tcmalloc libraries to use ... +AC_DEFUN([TS_CHECK_TCMALLOC], [ +AC_ARG_WITH([tcmalloc-lib], + [AS_HELP_STRING([--with-tcmalloc-lib],[specify the tcmalloc library to use [default=tcmalloc]])], + [ + with_tcmalloc_lib="$withval" + ],[ + with_tcmalloc_lib="tcmalloc" + ] +) + +has_tcmalloc=0 +AC_ARG_WITH([tcmalloc], [AS_HELP_STRING([--with-tcmalloc=DIR], [use the tcmalloc library])], +[ + if test "$withval" != "no"; then + if test "x${enable_jemalloc}" = "xyes"; then + AC_MSG_ERROR([Cannot compile with both tcmalloc and jemalloc]) + fi + tcmalloc_have_lib=0 + if test "x$withval" != "xyes" && test "x$withval" != "x"; then + tcmalloc_ldflags="$withval/lib" + LDFLAGS="${LDFLAGS} -L${tcmalloc_ldflags}" + LIBTOOL_LINK_FLAGS="${LIBTOOL_LINK_FLAGS} -rpath ${tcmalloc_ldflags}" + fi + AC_CHECK_LIB(${with_tcmalloc_lib}, tc_cfree, [tcmalloc_have_lib=1]) + if test "$tcmalloc_have_lib" != "0"; then + LIBS="${LIBS} -l${with_tcmalloc_lib}" + has_tcmalloc=1 + AC_DEFINE(has_tcmalloc, [1], [Link/compile against tcmalloc]) + else + AC_MSG_ERROR([Couldn't find a tcmalloc installation]) + fi + fi +]) +AC_SUBST(has_tcmalloc) +]) diff --git a/build/subst.inc b/build/subst.inc index 9682cf882..8f9ac0551 100644 --- a/build/subst.inc +++ b/build/subst.inc @@ -4,6 +4,9 @@ -e 's#[@]sbindir_POST@#$(sbindir)#g' \ -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \ -e 's#[@]pythondir_POST@#$(pythondir)#g' \ + -e 's#[@]configdir_POST@#$(configdir)#g' \ + -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \ + -e 's#[@]cachedir_POST@#$(cachedir)#g' \ $< > $@.tmp; then \ mv "$@.tmp" "$@"; \ else \ diff --git a/charts.d/Makefile.am b/charts.d/Makefile.am deleted file mode 100644 index 104ba86af..000000000 --- a/charts.d/Makefile.am +++ /dev/null @@ -1,31 +0,0 @@ -# -# Copyright (C) 2015 Alon Bar-Lev -# -MAINTAINERCLEANFILES= $(srcdir)/Makefile.in - -dist_charts_SCRIPTS = \ - $(NULL) - -dist_charts_DATA = \ - README.md \ - ap.chart.sh \ - apcupsd.chart.sh \ - apache.chart.sh \ - cpu_apps.chart.sh \ - cpufreq.chart.sh \ - example.chart.sh \ - exim.chart.sh \ - hddtemp.chart.sh \ - libreswan.chart.sh \ - load_average.chart.sh \ - mem_apps.chart.sh \ - mysql.chart.sh \ - nginx.chart.sh \ - nut.chart.sh \ - opensips.chart.sh \ - phpfpm.chart.sh \ - postfix.chart.sh \ - sensors.chart.sh \ - squid.chart.sh \ - tomcat.chart.sh \ - $(NULL) diff --git a/charts.d/Makefile.in b/charts.d/Makefile.in deleted file mode 100644 index ebd1af2be..000000000 --- a/charts.d/Makefile.in +++ /dev/null @@ -1,575 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = charts.d -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_charts_SCRIPTS) $(dist_charts_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \ - $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \ - $(top_srcdir)/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \ - $(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(chartsdir)" -SCRIPTS = $(dist_charts_SCRIPTS) -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_charts_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ - -# -# Copyright (C) 2015 Alon Bar-Lev -# -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_charts_SCRIPTS = \ - $(NULL) - -dist_charts_DATA = \ - README.md \ - ap.chart.sh \ - apcupsd.chart.sh \ - apache.chart.sh \ - cpu_apps.chart.sh \ - cpufreq.chart.sh \ - example.chart.sh \ - exim.chart.sh \ - hddtemp.chart.sh \ - libreswan.chart.sh \ - load_average.chart.sh \ - mem_apps.chart.sh \ - mysql.chart.sh \ - nginx.chart.sh \ - nut.chart.sh \ - opensips.chart.sh \ - phpfpm.chart.sh \ - postfix.chart.sh \ - sensors.chart.sh \ - squid.chart.sh \ - tomcat.chart.sh \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu charts.d/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu charts.d/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -install-dist_chartsSCRIPTS: $(dist_charts_SCRIPTS) - @$(NORMAL_INSTALL) - @list='$(dist_charts_SCRIPTS)'; test -n "$(chartsdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(chartsdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(chartsdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ - done | \ - sed -e 'p;s,.*/,,;n' \ - -e 'h;s|.*|.|' \ - -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ - $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ - { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ - if ($$2 == $$4) { files[d] = files[d] " " $$1; \ - if (++n[d] == $(am__install_max)) { \ - print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ - else { print "f", d "/" $$4, $$1 } } \ - END { for (d in files) print "f", d, files[d] }' | \ - while read type dir files; do \ - if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ - test -z "$$files" || { \ - echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(chartsdir)$$dir'"; \ - $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(chartsdir)$$dir" || exit $$?; \ - } \ - ; done - -uninstall-dist_chartsSCRIPTS: - @$(NORMAL_UNINSTALL) - @list='$(dist_charts_SCRIPTS)'; test -n "$(chartsdir)" || exit 0; \ - files=`for p in $$list; do echo "$$p"; done | \ - sed -e 's,.*/,,;$(transform)'`; \ - dir='$(DESTDIR)$(chartsdir)'; $(am__uninstall_files_from_dir) -install-dist_chartsDATA: $(dist_charts_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_charts_DATA)'; test -n "$(chartsdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(chartsdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(chartsdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(chartsdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(chartsdir)" || exit $$?; \ - done - -uninstall-dist_chartsDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_charts_DATA)'; test -n "$(chartsdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(chartsdir)'; $(am__uninstall_files_from_dir) -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(SCRIPTS) $(DATA) -installdirs: - for dir in "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(chartsdir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: install-dist_chartsDATA install-dist_chartsSCRIPTS - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-dist_chartsDATA uninstall-dist_chartsSCRIPTS - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dist_chartsDATA \ - install-dist_chartsSCRIPTS install-dvi install-dvi-am \ - install-exec install-exec-am install-html install-html-am \ - install-info install-info-am install-man install-pdf \ - install-pdf-am install-ps install-ps-am install-strip \ - installcheck installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am \ - uninstall-dist_chartsDATA uninstall-dist_chartsSCRIPTS - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/charts.d/README.md b/charts.d/README.md deleted file mode 100644 index 748af08a1..000000000 --- a/charts.d/README.md +++ /dev/null @@ -1,344 +0,0 @@ -The following charts.d plugins are supported: - ---- - -# hddtemp - -The plugin will collect temperatures from disks - -It will create one chart with all active disks - -1. **temperature in Celsius** - -### configuration - -hddtemp needs to be running in daemonized mode - -```sh -# host with daemonized hddtemp -hddtemp_host="localhost" - -# port on which hddtemp is showing data -hddtemp_port="7634" - -# array of included disks -# the default is to include all -hddtemp_disks=() -``` - ---- - -# libreswan - -The plugin will collects bytes-in, bytes-out and uptime for all established libreswan IPSEC tunnels. - -The following charts are created, **per tunnel**: - -1. **Uptime** - - * the uptime of the tunnel - -2. **Traffic** - - * bytes in - * bytes out - -### configuration - -Its config file is `/etc/netdata/charts.d/libreswan.conf`. - -The plugin executes 2 commands to collect all the information it needs: - -```sh -ipsec whack --status -ipsec whack --trafficstatus -``` - -The first command is used to extract the currently established tunnels, their IDs and their names. -The second command is used to extract the current uptime and traffic. - -Most probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied. -The plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics. - -To allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content: - -``` -netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status -netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus -``` - -Make sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path). - ---- - -# mysql - -The plugin will monitor one or more mysql servers - -It will produce the following charts: - -1. **Bandwidth** in kbps - * in - * out - -2. **Queries** in queries/sec - * queries - * questions - * slow queries - -3. **Operations** in operations/sec - * opened tables - * flush - * commit - * delete - * prepare - * read first - * read key - * read next - * read prev - * read random - * read random next - * rollback - * save point - * update - * write - -4. **Table Locks** in locks/sec - * immediate - * waited - -5. **Select Issues** in issues/sec - * full join - * full range join - * range - * range check - * scan - -6. **Sort Issues** in issues/sec - * merge passes - * range - * scan - -### configuration - -You can configure many database servers, like this: - -You can provide, per server, the following: - -1. a name, anything you like, but keep it short -2. the mysql command to connect to the server -3. the mysql command line options to be used for connecting to the server - -Here is an example for 2 servers: - -```sh -mysql_opts[server1]="-h server1.example.com" -mysql_opts[server2]="-h server2.example.com --connect_timeout 2" -``` - -The above will use the `mysql` command found in the system path. -You can also provide a custom mysql command per server, like this: - -```sh -mysql_cmds[server2]="/opt/mysql/bin/mysql" -``` - -The above sets the mysql command only for server2. server1 will use the system default. - -If no configuration is given, the plugin will attempt to connect to mysql server at localhost. - - ---- - -# nut - -The plugin will collect UPS data for all UPSes configured in the system. - -The following charts will be created: - -1. **UPS Charge** - - * percentage changed - -2. **UPS Battery Voltage** - - * current voltage - * high voltage - * low voltage - * nominal voltage - -3. **UPS Input Voltage** - - * current voltage - * fault voltage - * nominal voltage - -4. **UPS Input Current** - - * nominal current - -5. **UPS Input Frequency** - - * current frequency - * nominal frequency - -6. **UPS Output Voltage** - - * current voltage - -7. **UPS Load** - - * current load - -8. **UPS Temperature** - - * current temperature - - -### configuration - -This is the internal default for `/etc/netdata/nut.conf` - -```sh -# a space separated list of UPS names -# if empty, the list returned by 'upsc -l' will be used -nut_ups= - -# how frequently to collect UPS data -nut_update_every=2 -``` - ---- - -# postfix - -The plugin will collect the postfix queue size. - -It will create two charts: - -1. **queue size in emails** -2. **queue size in KB** - -### configuration - -This is the internal default for `/etc/netdata/postfix.conf` - -```sh -# the postqueue command -# if empty, it will use the one found in the system path -postfix_postqueue= - -# how frequently to collect queue size -postfix_update_every=15 -``` - ---- - -# sensors - -The plugin will provide charts for all configured system sensors - -> This plugin is reading sensors directly from the kernel. -> The `lm-sensors` package is able to perform calculations on the -> kernel provided values, this plugin will not perform. -> So, the values graphed, are the raw hardware values of the sensors. - -The plugin will create netdata charts for: - -1. **Temperature** -2. **Voltage** -3. **Current** -4. **Power** -5. **Fans Speed** -6. **Energy** -7. **Humidity** - -One chart for every sensor chip found and each of the above will be created. - -### configuration - -This is the internal default for `/etc/netdata/sensors.conf` - -```sh -# the directory the kernel keeps sensor data -sensors_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices" - -# how deep in the tree to check for sensor data -sensors_sys_depth=10 - -# if set to 1, the script will overwrite internal -# script functions with code generated ones -# leave to 1, is faster -sensors_source_update=1 - -# how frequently to collect sensor data -# the default is to collect it at every iteration of charts.d -sensors_update_every= - -# array of sensors which are excluded -# the default is to include all -sensors_excluded=() -``` - ---- - -# squid - -The plugin will monitor a squid server. - -It will produce 4 charts: - -1. **Squid Client Bandwidth** in kbps - - * in - * out - * hits - -2. **Squid Client Requests** in requests/sec - - * requests - * hits - * errors - -3. **Squid Server Bandwidth** in kbps - - * in - * out - -4. **Squid Server Requests** in requests/sec - - * requests - * errors - -### autoconfig - -The plugin will by itself detect squid servers running on -localhost, on ports 3128 or 8080. - -It will attempt to download URLs in the form: - -- `cache_object://HOST:PORT/counters` -- `/squid-internal-mgr/counters` - -If any succeeds, it will use this. - -### configuration - -If you need to configure it by hand, create the file -`/etc/netdata/squid.conf` with the following variables: - -- `squid_host=IP` the IP of the squid host -- `squid_port=PORT` the port the squid is listening -- `squid_url="URL"` the URL with the statistics to be fetched from squid -- `squid_timeout=SECONDS` how much time we should wait for squid to respond -- `squid_update_every=SECONDS` the frequency of the data collection - -Example `/etc/netdata/squid.conf`: - -```sh -squid_host=127.0.0.1 -squid_port=3128 -squid_url="cache_object://127.0.0.1:3128/counters" -squid_timeout=2 -squid_update_every=5 -``` diff --git a/charts.d/ap.chart.sh b/charts.d/ap.chart.sh deleted file mode 100644 index ce2eefc9f..000000000 --- a/charts.d/ap.chart.sh +++ /dev/null @@ -1,181 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2016 Costa Tsaousis -# GPL v3+ -# - -# _update_every is a special variable - it holds the number of seconds -# between the calls of the _update() function -ap_update_every= -ap_priority=6900 - -declare -A ap_devs=() - -# _check is called once, to find out if this chart should be enabled or not -ap_check() { - require_cmd iw || return 1 - - local ev=$(run iw dev | awk ' - BEGIN { - i = ""; - ssid = ""; - ap = 0; - } - /^[ \t]+Interface / { - if( ap == 1 ) { - print "ap_devs[" i "]=\"" ssid "\"" - } - - i = $2; - ssid = ""; - ap = 0; - } - /^[ \t]+ssid / { ssid = $2; } - /^[ \t]+type AP$/ { ap = 1; } - END { - if( ap == 1 ) { - print "ap_devs[" i "]=\"" ssid "\"" - } - } - ') - eval "${ev}" - - # this should return: - # - 0 to enable the chart - # - 1 to disable the chart - - [ ${#ap_devs[@]} -gt 0 ] && return 0 - error "no devices found in AP mode, with 'iw dev'" - return 1 -} - -# _create is called once, to create the charts -ap_create() { - local ssid dev - - for dev in "${!ap_devs[@]}" - do - ssid="${ap_devs[${dev}]}" - - # create the chart with 3 dimensions - cat < 0) { - print \"BEGIN ap_clients.\" dev; - print \"SET clients = \" c; - print \"END\"; - print \"BEGIN ap_bandwidth.\" dev; - print \"SET received = \" rb; - print \"SET sent = \" tb; - print \"END\"; - print \"BEGIN ap_packets.\" dev; - print \"SET received = \" rp; - print \"SET sent = \" tp; - print \"END\"; - print \"BEGIN ap_issues.\" dev; - print \"SET retries = \" tr; - print \"SET failures = \" tf; - print \"END\"; - - if( c == 0 ) c = 1; - print \"BEGIN ap_signal.\" dev; - print \"SET signal = \" int(s / c); - print \"END\"; - print \"BEGIN ap_bitrate.\" dev; - print \"SET receive = \" int(rt / c); - print \"SET transmit = \" int(tt / c); - print \"SET expected = \" int(e / c); - print \"END\"; - } - zero_data(); - } - BEGIN { - zero_data(); - } - /^DEVICE / { - print_device(); - dev = \$2; - } - /^Station/ { c++; } - /^[ \\t]+rx bytes:/ { rb += \$3; } - /^[ \\t]+tx bytes:/ { tb += \$3; } - /^[ \\t]+rx packets:/ { rp += \$3; } - /^[ \\t]+tx packets:/ { tp += \$3; } - /^[ \\t]+tx retries:/ { tr += \$3; } - /^[ \\t]+tx failed:/ { tf += \$3; } - /^[ \\t]+signal:/ { x = \$2; s += x * 1000; } - /^[ \\t]+rx bitrate:/ { x = \$3; rt += x * 1000; } - /^[ \\t]+tx bitrate:/ { x = \$3; tt += x * 1000; } - /^[ \\t]+expected throughput:(.*)Mbps/ { - x=\$3; - sub(/Mbps/, \"\", x); - e += x * 1000; - } - END { - print_device(); - } - " - - return 0 -} - diff --git a/charts.d/apache.chart.sh b/charts.d/apache.chart.sh deleted file mode 100644 index a8ac08014..000000000 --- a/charts.d/apache.chart.sh +++ /dev/null @@ -1,254 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2016 Costa Tsaousis -# GPL v3+ -# - -# the URL to download apache status info -apache_url="http://127.0.0.1:80/server-status?auto" -apache_curl_opts= - -# _update_every is a special variable - it holds the number of seconds -# between the calls of the _update() function -apache_update_every= - -apache_priority=60000 - -# convert apache floating point values -# to integer using this multiplier -# this only affects precision - the values -# will be in the proper units -apache_decimal_detail=1000000 - -declare -a apache_response=() -apache_accesses=0 -apache_kbytes=0 -apache_reqpersec=0 -apache_bytespersec=0 -apache_bytesperreq=0 -apache_busyworkers=0 -apache_idleworkers=0 -apache_connstotal=0 -apache_connsasyncwriting=0 -apache_connsasynckeepalive=0 -apache_connsasyncclosing=0 - -apache_keys_detected=0 -apache_has_conns=0 -apache_key_accesses= -apache_key_kbytes= -apache_key_reqpersec= -apache_key_bytespersec= -apache_key_bytesperreq= -apache_key_busyworkers= -apache_key_idleworkers= -apache_key_scoreboard= -apache_key_connstotal= -apache_key_connsasyncwriting= -apache_key_connsasynckeepalive= -apache_key_connsasyncclosing= -apache_detect() { - local i=0 - for x in "${@}" - do - case "${x}" in - 'Total Accesses') apache_key_accesses=$((i + 1)) ;; - 'Total kBytes') apache_key_kbytes=$((i + 1)) ;; - 'ReqPerSec') apache_key_reqpersec=$((i + 1)) ;; - 'BytesPerSec') apache_key_bytespersec=$((i + 1)) ;; - 'BytesPerReq') apache_key_bytesperreq=$((i + 1)) ;; - 'BusyWorkers') apache_key_busyworkers=$((i + 1)) ;; - 'IdleWorkers') apache_key_idleworkers=$((i + 1));; - 'ConnsTotal') apache_key_connstotal=$((i + 1)) ;; - 'ConnsAsyncWriting') apache_key_connsasyncwriting=$((i + 1)) ;; - 'ConnsAsyncKeepAlive') apache_key_connsasynckeepalive=$((i + 1)) ;; - 'ConnsAsyncClosing') apache_key_connsasyncclosing=$((i + 1)) ;; - 'Scoreboard') apache_key_scoreboard=$((i)) ;; - esac - - i=$((i + 1)) - done - - # we will not check of the Conns* - # keys, since these are apache 2.4 specific - [ -z "${apache_key_accesses}" ] && error "missing 'Total Accesses' from apache server: ${*}" && return 1 - [ -z "${apache_key_kbytes}" ] && error "missing 'Total kBytes' from apache server: ${*}" && return 1 - [ -z "${apache_key_reqpersec}" ] && error "missing 'ReqPerSec' from apache server: ${*}" && return 1 - [ -z "${apache_key_bytespersec}" ] && error "missing 'BytesPerSec' from apache server: ${*}" && return 1 - [ -z "${apache_key_bytesperreq}" ] && error "missing 'BytesPerReq' from apache server: ${*}" && return 1 - [ -z "${apache_key_busyworkers}" ] && error "missing 'BusyWorkers' from apache server: ${*}" && return 1 - [ -z "${apache_key_idleworkers}" ] && error "missing 'IdleWorkers' from apache server: ${*}" && return 1 - [ -z "${apache_key_scoreboard}" ] && error "missing 'Scoreboard' from apache server: ${*}" && return 1 - - if [ ! -z "${apache_key_connstotal}" \ - -a ! -z "${apache_key_connsasyncwriting}" \ - -a ! -z "${apache_key_connsasynckeepalive}" \ - -a ! -z "${apache_key_connsasyncclosing}" \ - ] - then - apache_has_conns=1 - else - apache_has_conns=0 - fi - - return 0 -} - -apache_get() { - local oIFS="${IFS}" ret - IFS=$':\n' apache_response=($(run curl -Ss ${apache_curl_opts} "${apache_url}")) - ret=$? - IFS="${oIFS}" - - [ $ret -ne 0 -o "${#apache_response[@]}" -eq 0 ] && return 1 - - # the last line on the apache output is "Scoreboard" - # we use this label to detect that the output has a new word count - if [ ${apache_keys_detected} -eq 0 -o "${apache_response[${apache_key_scoreboard}]}" != "Scoreboard" ] - then - apache_detect "${apache_response[@]}" || return 1 - apache_keys_detected=1 - fi - - apache_accesses="${apache_response[${apache_key_accesses}]}" - apache_kbytes="${apache_response[${apache_key_kbytes}]}" - - float2int "${apache_response[${apache_key_reqpersec}]}" ${apache_decimal_detail} - apache_reqpersec=${FLOAT2INT_RESULT} - - float2int "${apache_response[${apache_key_bytespersec}]}" ${apache_decimal_detail} - apache_bytespersec=${FLOAT2INT_RESULT} - - float2int "${apache_response[${apache_key_bytesperreq}]}" ${apache_decimal_detail} - apache_bytesperreq=${FLOAT2INT_RESULT} - - apache_busyworkers="${apache_response[${apache_key_busyworkers}]}" - apache_idleworkers="${apache_response[${apache_key_idleworkers}]}" - - if [ -z "${apache_accesses}" \ - -o -z "${apache_kbytes}" \ - -o -z "${apache_reqpersec}" \ - -o -z "${apache_bytespersec}" \ - -o -z "${apache_bytesperreq}" \ - -o -z "${apache_busyworkers}" \ - -o -z "${apache_idleworkers}" \ - ] - then - error "empty values got from apache server: ${apache_response[*]}" - return 1 - fi - - if [ ${apache_has_conns} -eq 1 ] - then - apache_connstotal="${apache_response[${apache_key_connstotal}]}" - apache_connsasyncwriting="${apache_response[${apache_key_connsasyncwriting}]}" - apache_connsasynckeepalive="${apache_response[${apache_key_connsasynckeepalive}]}" - apache_connsasyncclosing="${apache_response[${apache_key_connsasyncclosing}]}" - fi - - return 0 -} - -# _check is called once, to find out if this chart should be enabled or not -apache_check() { - - apache_get - if [ $? -ne 0 ] - then - error "cannot find stub_status on URL '${apache_url}'. Please set apache_url='http://apache.server:80/server-status?auto' in $confd/apache.conf" - return 1 - fi - - # this should return: - # - 0 to enable the chart - # - 1 to disable the chart - - return 0 -} - -# _create is called once, to create the charts -apache_create() { - cat < -# GPL v3+ -# - -apcupsd_ip= -apcupsd_port= - -declare -A apcupsd_sources=( - ["local"]="127.0.0.1:3551" -) - -# how frequently to collect UPS data -apcupsd_update_every=10 - -apcupsd_timeout=3 - -# the priority of apcupsd related to other charts -apcupsd_priority=90000 - -apcupsd_get() { - run -t $apcupsd_timeout apcaccess status "$1" -} - -apcupsd_check() { - - # this should return: - # - 0 to enable the chart - # - 1 to disable the chart - - require_cmd apcaccess || return 1 - - # backwards compatibility - if [ "${apcupsd_ip}:${apcupsd_port}" != ":" ] - then - apcupsd_sources["local"]="${apcupsd_ip}:${apcupsd_port}" - fi - - local host working=0 failed=0 - for host in "${!apcupsd_sources[@]}" - do - run apcupsd_get "${apcupsd_sources[${host}]}" >/dev/null - if [ $? -ne 0 ] - then - error "cannot get information for apcupsd server ${host} on ${apcupsd_sources[${host}]}." - failed=$((failed + 1)) - elif [ $(apcupsd_get "${apcupsd_sources[${host}]}" | awk '/^STATUS.*/{ print $3 }') != "ONLINE" ] - then - error "APC UPS ${host} on ${apcupsd_sources[${host}]} is not online." - failed=$((failed + 1)) - else - working=$((working + 1)) - fi - done - - if [ ${working} -eq 0 ] - then - error "No APC UPSes found available." - return 1 - fi - - return 0 -} - -apcupsd_create() { - local host src - for host in "${!apcupsd_sources[@]}" - do - src=${apcupsd_sources[${host}]} - - # create the charts - cat < -# GPL v3+ -# -# THIS PLUGIN IS OBSOLETE -# USE apps.plugin INSTEAD - -# a space separated list of command to monitor -cpu_apps_apps= - -# these are required for computing memory in bytes and cpu in seconds -#cpu_apps_pagesize="`getconf PAGESIZE`" -cpu_apps_clockticks="$(getconf CLK_TCK)" - -cpu_apps_update_every=60 - -cpu_apps_check() { - # this should return: - # - 0 to enable the chart - # - 1 to disable the chart - - if [ -z "$cpu_apps_apps" ] - then - error "manual configuration required: please set cpu_apps_apps='command1 command2 ...' in $confd/cpu_apps_apps.conf" - return 1 - fi - return 0 -} - -cpu_apps_bc_finalze= - -cpu_apps_create() { - - echo "CHART chartsd_apps.cpu '' 'Apps CPU' 'milliseconds / $cpu_apps_update_every sec' apps apps stacked 20001 $cpu_apps_update_every" - - local x= - for x in $cpu_apps_apps - do - echo "DIMENSION $x $x incremental 1000 $cpu_apps_clockticks" - - # this string is needed later in the update() function - # to finalize the instructions for the bc command - cpu_apps_bc_finalze="$cpu_apps_bc_finalze \"SET $x = \"; $x;" - done - return 0 -} - -cpu_apps_update() { - # do all the work to collect / calculate the values - # for each dimension - # remember: KEEP IT SIMPLE AND SHORT - - echo "BEGIN chartsd_apps.cpu" - ps -o pid,comm -C "$cpu_apps_apps" |\ - grep -v "COMMAND" |\ - ( - while read pid name - do - echo "$name+=`cat /proc/$pid/stat | cut -d ' ' -f 14-15`" - done - ) |\ - ( sed -e "s/ \+/ /g" -e "s/ /+/g"; - echo "$cpu_apps_bc_finalze" - ) | bc - echo "END" - - return 0 -} diff --git a/charts.d/cpufreq.chart.sh b/charts.d/cpufreq.chart.sh deleted file mode 100644 index 1c41c38f2..000000000 --- a/charts.d/cpufreq.chart.sh +++ /dev/null @@ -1,88 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2016 Costa Tsaousis -# GPL v3+ -# - -# if this chart is called X.chart.sh, then all functions and global variables -# must start with X_ - -cpufreq_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices" -cpufreq_sys_depth=10 -cpufreq_source_update=1 - -# _update_every is a special variable - it holds the number of seconds -# between the calls of the _update() function -cpufreq_update_every= -cpufreq_priority=10000 - -cpufreq_find_all_files() { - find $1 -maxdepth $cpufreq_sys_depth -name scaling_cur_freq 2>/dev/null -} - -# _check is called once, to find out if this chart should be enabled or not -cpufreq_check() { - - # this should return: - # - 0 to enable the chart - # - 1 to disable the chart - - [ -z "$( cpufreq_find_all_files $cpufreq_sys_dir )" ] && return 1 - return 0 -} - -# _create is called once, to create the charts -cpufreq_create() { - local dir= file= id= i= - - # we create a script with the source of the - # cpufreq_update() function - # - the highest speed we can achieve - - [ $cpufreq_source_update -eq 1 ] && echo >$TMP_DIR/cpufreq.sh "cpufreq_update() {" - - echo "CHART cpu.cpufreq '' 'CPU Clock' 'MHz' 'cpufreq' '' line $((cpufreq_priority + 1)) $cpufreq_update_every" - echo >>$TMP_DIR/cpufreq.sh "echo \"BEGIN cpu.cpufreq \$1\"" - - i=0 - for file in $( cpufreq_find_all_files $cpufreq_sys_dir | sort -u ) - do - i=$(( i + 1 )) - dir=$( dirname $file ) - cpu= - - [ -f $dir/affected_cpus ] && cpu=$( cat $dir/affected_cpus ) - [ -z "$cpu" ] && cpu="$i.a" - - id="$( fixid "cpu$cpu" )" - - debug "file='$file', dir='$dir', cpu='$cpu', id='$id'" - - echo "DIMENSION $id '$id' absolute 1 1000" - echo >>$TMP_DIR/cpufreq.sh "echo \"SET $id = \"\$(< $file )" - done - echo >>$TMP_DIR/cpufreq.sh "echo END" - - [ $cpufreq_source_update -eq 1 ] && echo >>$TMP_DIR/cpufreq.sh "}" - - # ok, load the function cpufreq_update() we created - [ $cpufreq_source_update -eq 1 ] && . $TMP_DIR/cpufreq.sh - - return 0 -} - -# _update is called continuously, to collect the values -cpufreq_update() { - # the first argument to this function is the microseconds since last update - # pass this parameter to the BEGIN statement (see bellow). - - # do all the work to collect / calculate the values - # for each dimension - # remember: KEEP IT SIMPLE AND SHORT - - [ $cpufreq_source_update -eq 0 ] && . $TMP_DIR/cpufreq.sh $1 - - return 0 -} - diff --git a/charts.d/example.chart.sh b/charts.d/example.chart.sh deleted file mode 100644 index ffc98712f..000000000 --- a/charts.d/example.chart.sh +++ /dev/null @@ -1,119 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2016 Costa Tsaousis -# GPL v3+ -# - -# if this chart is called X.chart.sh, then all functions and global variables -# must start with X_ - -# _update_every is a special variable - it holds the number of seconds -# between the calls of the _update() function -example_update_every= - -# the priority is used to sort the charts on the dashboard -# 1 = the first chart -example_priority=150000 - -# to enable this chart, you have to set this to 12345 -# (just a demonstration for something that needs to be checked) -example_magic_number= - -# global variables to store our collected data -# remember: they need to start with the module name example_ -example_value1= -example_value2= -example_value3= -example_value4= -example_last=0 -example_count=0 - -example_get() { - # do all the work to collect / calculate the values - # for each dimension - # - # Remember: - # 1. KEEP IT SIMPLE AND SHORT - # 2. AVOID FORKS (avoid piping commands) - # 3. AVOID CALLING TOO MANY EXTERNAL PROGRAMS - # 4. USE LOCAL VARIABLES (global variables may overlap with other modules) - - example_value1=$RANDOM - example_value2=$RANDOM - example_value3=$RANDOM - example_value4=$((8192 + (RANDOM * 16383 / 32767) )) - - if [ $example_count -gt 0 ] - then - example_count=$((example_count - 1)) - - [ $example_last -gt 16383 ] && example_value4=$((example_last + (RANDOM * ( (32767 - example_last) / 2) / 32767))) - [ $example_last -le 16383 ] && example_value4=$((example_last - (RANDOM * (example_last / 2) / 32767))) - else - example_count=$((1 + (RANDOM * 5 / 32767) )) - - [ $example_last -gt 16383 -a $example_value4 -gt 16383 ] && example_value4=$((example_value4 - 16383)) - [ $example_last -le 16383 -a $example_value4 -lt 16383 ] && example_value4=$((example_value4 + 16383)) - fi - example_last=$example_value4 - - # this should return: - # - 0 to send the data to netdata - # - 1 to report a failure to collect the data - - return 0 -} - -# _check is called once, to find out if this chart should be enabled or not -example_check() { - # this should return: - # - 0 to enable the chart - # - 1 to disable the chart - - # check something - [ "${example_magic_number}" != "12345" ] && error "manual configuration required: you have to set example_magic_number=$example_magic_number in example.conf to start example chart." && return 1 - - # check that we can collect data - example_get || return 1 - - return 0 -} - -# _create is called once, to create the charts -example_create() { - # create the chart with 3 dimensions - cat < -# GPL v3+ -# -# Contributed by @jsveiga with PR #480 - -# the exim command to run -exim_command= - -# how frequently to collect queue size -exim_update_every=5 - -exim_priority=60000 - -exim_check() { - if [ -z "${exim_command}" ] - then - require_cmd exim || return 1 - exim_command="${EXIM_CMD}" - fi - - if [ $(${exim_command} -bpc 2>&1 | grep -c denied) -ne 0 ] - then - error "permission denied - please set 'queue_list_requires_admin = false' in your exim options file" - return 1 - fi - - return 0 -} - -exim_create() { - cat < -# GPL v3+ -# -# contributed by @paulfantom with PR #511 - -# if this chart is called X.chart.sh, then all functions and global variables -# must start with X_ -hddtemp_host="localhost" -hddtemp_port="7634" -declare -A hddtemp_disks=() - -# _update_every is a special variable - it holds the number of seconds -# between the calls of the _update() function -hddtemp_update_every=3 -hddtemp_priority=90000 - -# _check is called once, to find out if this chart should be enabled or not -hddtemp_check() { - require_cmd nc || return 1 - run nc $hddtemp_host $hddtemp_port && return 0 || return 1 -} - -# _create is called once, to create the charts -hddtemp_create() { - if [ ${#hddtemp_disks[@]} -eq 0 ]; then - local all - all=$(nc $hddtemp_host $hddtemp_port ) - unset hddtemp_disks - hddtemp_disks=( `grep -Po '/dev/[^|]+' <<< "$all" | cut -c 6-` ) - fi -# local disk_names -# disk_names=(`sed -e 's/||/\n/g;s/^|//' <<< "$all" | cut -d '|' -f2 | tr ' ' '_'`) - - echo "CHART hddtemp.temperature 'disks_temp' 'temperature' 'Celsius' 'Disks temperature' 'hddtemp.temp' line $((hddtemp_priority)) $hddtemp_update_every" - for i in `seq 0 $((${#hddtemp_disks[@]}-1))`; do -# echo "DIMENSION ${hddtemp_disks[i]} ${disk_names[i]} absolute 1 1" - echo "DIMENSION ${hddtemp_disks[$i]} '' absolute 1 1" - done - return 0 -} - -# _update is called continuously, to collect the values -hddtemp_last=0 -hddtemp_count=0 -hddtemp_update() { -# local all=( `nc $hddtemp_host $hddtemp_port | sed -e 's/||/\n/g;s/^|//' | cut -d '|' -f3` ) -# local all=( `nc $hddtemp_host $hddtemp_port | awk 'BEGIN { FS="|" };{i=4; while (i <= NF) {print $i+0;i+=5;};}'` ) - OLD_IFS=$IFS - set -f - IFS="|" all=( $(nc $hddtemp_host $hddtemp_port 2>/dev/null) ) - set +f - IFS=$OLD_IFS - - # check if there is some data - if [ -z "${all[3]}" ]; then - return 1 - fi - - # write the result of the work. - echo "BEGIN hddtemp.temperature $1" - end=${#hddtemp_disks[@]} - for ((i=0; i -# GPL v3+ -# - -# _update_every is a special variable - it holds the number of seconds -# between the calls of the _update() function -libreswan_update_every=1 - -# the priority is used to sort the charts on the dashboard -# 1 = the first chart -libreswan_priority=90000 - -# set to 1, to run ipsec with sudo -libreswan_sudo=1 - -# global variables to store our collected data - -# [TUNNELID] = TUNNELNAME -# here we track the *latest* established tunnels -# as detected by: ipsec whack --status -declare -A libreswan_connected_tunnels=() - -# [TUNNELID] = VALUE -# here we track values of all established tunnels (not only the latest) -# as detected by: ipsec whack --trafficstatus -declare -A libreswan_traffic_in=() -declare -A libreswan_traffic_out=() -declare -A libreswan_established_add_time=() - -# [TUNNELNAME] = CHARTID -# here we remember CHARTIDs of all tunnels -# we need this to avoid converting tunnel names to chart IDs on every iteration -declare -A libreswan_tunnel_charts=() - -# run the ipsec command -libreswan_ipsec() { - if [ ${libreswan_sudo} -ne 0 ] - then - sudo -n "${IPSEC_CMD}" "${@}" - return $? - else - "${IPSEC_CMD}" "${@}" - return $? - fi -} - -# fetch latest values - fill the arrays -libreswan_get() { - # do all the work to collect / calculate the values - # for each dimension - - # empty the variables - libreswan_traffic_in=() - libreswan_traffic_out=() - libreswan_established_add_time=() - libreswan_connected_tunnels=() - - # convert the ipsec command output to a shell script - # and source it to get the values - source <( - { - libreswan_ipsec whack --status; - libreswan_ipsec whack --trafficstatus; - } | sed -n \ - -e "s|[0-9]\+ #\([0-9]\+\): \"\(.*\)\".*IPsec SA established.*newest IPSEC.*|libreswan_connected_tunnels[\"\1\"]=\"\2\"|p" \ - -e "s|[0-9]\+ #\([0-9]\+\): \"\(.*\)\",.* add_time=\([0-9]\+\),.* inBytes=\([0-9]\+\),.* outBytes=\([0-9]\+\).*|libreswan_traffic_in[\"\1\"]=\"\4\"; libreswan_traffic_out[\"\1\"]=\"\5\"; libreswan_established_add_time[\"\1\"]=\"\3\";|p" - ) || return 1 - - # check we got some data - [ ${#libreswan_connected_tunnels[@]} -eq 0 ] && return 1 - - return 0 -} - -# _check is called once, to find out if this chart should be enabled or not -libreswan_check() { - # this should return: - # - 0 to enable the chart - # - 1 to disable the chart - - require_cmd ipsec || return 1 - - # make sure it is libreswan - if [ -z "$(ipsec --version | grep -i libreswan)" ] - then - error "ipsec command is not Libreswan. Disabling Libreswan plugin." - return 1 - fi - - # check that we can collect data - libreswan_get || return 1 - - return 0 -} - -# create the charts for an ipsec tunnel -libreswan_create_one() { - local n="${1}" name - - name="${libreswan_connected_tunnels[${n}]}" - - [ ! -z "${libreswan_tunnel_charts[${name}]}" ] && return 0 - - libreswan_tunnel_charts[${name}]="$(fixid "${name}")" - - cat < -# GPL v3+ -# - -load_average_update_every=5 -load_priority=100 - -# this is an example charts.d collector -# it is disabled by default. -# there is no point to enable it, since netdata already -# collects this information using its internal plugins. -load_average_enabled=0 - -load_average_check() { - # this should return: - # - 0 to enable the chart - # - 1 to disable the chart - - if [ ${load_average_update_every} -lt 5 ] - then - # there is no meaning for shorter than 5 seconds - # the kernel changes this value every 5 seconds - load_average_update_every=5 - fi - - [ ${load_average_enabled} -eq 0 ] && return 1 - return 0 -} - -load_average_create() { - # create a chart with 3 dimensions -cat < -# GPL v3+ -# - -mem_apps_apps= - -# these are required for computing memory in bytes and cpu in seconds -#mem_apps_pagesize="`getconf PAGESIZE`" -#mem_apps_clockticks="`getconf CLK_TCK`" - -mem_apps_update_every= - -mem_apps_check() { - # this should return: - # - 0 to enable the chart - # - 1 to disable the chart - - if [ -z "$mem_apps_apps" ] - then - error "manual configuration required: please set mem_apps_apps='command1 command2 ...' in $confd/mem_apps_apps.conf" - return 1 - fi - return 0 -} - -mem_apps_bc_finalze= - -mem_apps_create() { - - echo "CHART chartsd_apps.mem '' 'Apps Memory' MB apps apps.mem stacked 20000 $mem_apps_update_every" - - local x= - for x in $mem_apps_apps - do - echo "DIMENSION $x $x absolute 1 1024" - - # this string is needed later in the update() function - # to finalize the instructions for the bc command - mem_apps_bc_finalze="$mem_apps_bc_finalze \"SET $x = \"; $x;" - done - return 0 -} - -mem_apps_update() { - # do all the work to collect / calculate the values - # for each dimension - # remember: KEEP IT SIMPLE AND SHORT - - echo "BEGIN chartsd_apps.mem" - ps -o comm,rss -C "$mem_apps_apps" |\ - grep -v "^COMMAND" |\ - ( sed -e "s/ \+/ /g" -e "s/ /+=/g"; - echo "$mem_apps_bc_finalze" - ) | bc - echo "END" - - return 0 -} diff --git a/charts.d/mysql.chart.sh b/charts.d/mysql.chart.sh deleted file mode 100644 index 1363d01f4..000000000 --- a/charts.d/mysql.chart.sh +++ /dev/null @@ -1,522 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2016 Costa Tsaousis -# GPL v3+ -# - -# http://dev.mysql.com/doc/refman/5.0/en/server-status-variables.html -# -# https://dev.mysql.com/doc/refman/5.1/en/show-status.html -# SHOW STATUS provides server status information (see Section 5.1.6, “Server Status Variables”). -# This statement does not require any privilege. -# It requires only the ability to connect to the server. - -mysql_update_every=2 -mysql_priority=60000 - -declare -A mysql_cmds=() mysql_opts=() mysql_ids=() mysql_data=() - -mysql_get() { - local arr - local oIFS="${IFS}" - mysql_data=() - IFS=$'\t'$'\n' - #arr=($(run "${@}" -e "SHOW GLOBAL STATUS WHERE value REGEXP '^[0-9]';" | egrep "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)" )) - #arr=($(run "${@}" -N -e "SHOW GLOBAL STATUS;" | egrep "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)[^ ]+\s[0-9]" )) - arr=($(run "${@}" -N -e "SHOW GLOBAL STATUS;" | egrep "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)[^[:space:]]+[[:space:]]+[0-9]+" )) - IFS="${oIFS}" - - [ "${#arr[@]}" -lt 3 ] && return 1 - local end=${#arr[@]} - for ((i=2;i -# GPL v3+ -# - -# if this chart is called X.chart.sh, then all functions and global variables -# must start with X_ - -nginx_url="http://127.0.0.1:80/stub_status" -nginx_curl_opts="" - -# _update_every is a special variable - it holds the number of seconds -# between the calls of the _update() function -nginx_update_every= -nginx_priority=60000 - -declare -a nginx_response=() -nginx_active_connections=0 -nginx_accepts=0 -nginx_handled=0 -nginx_requests=0 -nginx_reading=0 -nginx_writing=0 -nginx_waiting=0 -nginx_get() { - nginx_response=($(run curl -Ss ${nginx_curl_opts} "${nginx_url}")) - [ $? -ne 0 -o "${#nginx_response[@]}" -eq 0 ] && return 1 - - if [ "${nginx_response[0]}" != "Active" \ - -o "${nginx_response[1]}" != "connections:" \ - -o "${nginx_response[3]}" != "server" \ - -o "${nginx_response[4]}" != "accepts" \ - -o "${nginx_response[5]}" != "handled" \ - -o "${nginx_response[6]}" != "requests" \ - -o "${nginx_response[10]}" != "Reading:" \ - -o "${nginx_response[12]}" != "Writing:" \ - -o "${nginx_response[14]}" != "Waiting:" \ - ] - then - error "Invalid response from nginx server: ${nginx_response[*]}" - return 1 - fi - - nginx_active_connections="${nginx_response[2]}" - nginx_accepts="${nginx_response[7]}" - nginx_handled="${nginx_response[8]}" - nginx_requests="${nginx_response[9]}" - nginx_reading="${nginx_response[11]}" - nginx_writing="${nginx_response[13]}" - nginx_waiting="${nginx_response[15]}" - - if [ -z "${nginx_active_connections}" \ - -o -z "${nginx_accepts}" \ - -o -z "${nginx_handled}" \ - -o -z "${nginx_requests}" \ - -o -z "${nginx_reading}" \ - -o -z "${nginx_writing}" \ - -o -z "${nginx_waiting}" \ - ] - then - error "empty values got from nginx server: ${nginx_response[*]}" - return 1 - fi - - return 0 -} - -# _check is called once, to find out if this chart should be enabled or not -nginx_check() { - - nginx_get - if [ $? -ne 0 ] - then - error "cannot find stub_status on URL '${nginx_url}'. Please set nginx_url='http://nginx.server/stub_status' in $confd/nginx.conf" - return 1 - fi - - # this should return: - # - 0 to enable the chart - # - 1 to disable the chart - - return 0 -} - -# _create is called once, to create the charts -nginx_create() { - cat < -# GPL v3+ -# - -# a space separated list of UPS names -# if empty, the list returned by 'upsc -l' will be used -nut_ups= - -# how frequently to collect UPS data -nut_update_every=2 - -# how much time in seconds, to wait for nut to respond -nut_timeout=2 - -# set this to 1, to enable another chart showing the number -# of UPS clients connected to upsd -nut_clients_chart=0 - -# the priority of nut related to other charts -nut_priority=90000 - -declare -A nut_ids=() - -nut_get_all() { - run -t $nut_timeout upsc -l -} - -nut_get() { - run -t $nut_timeout upsc "$1" - - if [ "${nut_clients_chart}" -eq "1" ] - then - printf "ups.connected_clients: " - run -t $nut_timeout upsc -c "$1" | wc -l - fi -} - -nut_check() { - - # this should return: - # - 0 to enable the chart - # - 1 to disable the chart - - local x - - require_cmd upsc || return 1 - - [ -z "$nut_ups" ] && nut_ups="$( nut_get_all )" - - for x in $nut_ups - do - nut_get "$x" >/dev/null - if [ $? -eq 0 ] - then - nut_ids[$x]="$( fixid "$x" )" - continue - fi - error "cannot get information for NUT UPS '$x'." - done - - if [ ${#nut_ids[@]} -eq 0 ] - then - error "Cannot find UPSes - please set nut_ups='ups_name' in $confd/nut.conf" - return 1 - fi - - return 0 -} - -nut_create() { - # create the charts - local x - - for x in "${nut_ids[@]}" - do - cat < -# GPL v3+ -# - -opensips_opts="fifo get_statistics all" -opensips_cmd= -opensips_update_every=5 -opensips_timeout=2 -opensips_priority=80000 - -opensips_get_stats() { - run -t $opensips_timeout "$opensips_cmd" $opensips_opts |\ - grep "^\(core\|dialog\|net\|registrar\|shmem\|siptrace\|sl\|tm\|uri\|usrloc\):[a-zA-Z0-9_-]\+[[:space:]]*[=:]\+[[:space:]]*[0-9]\+[[:space:]]*$" |\ - sed \ - -e "s|[[:space:]]*[=:]\+[[:space:]]*\([0-9]\+\)[[:space:]]*$|=\1|g" \ - -e "s|[[:space:]:-]\+|_|g" \ - -e "s|^|opensips_|g" - - local ret=$? - [ $ret -ne 0 ] && echo "opensips_command_failed=1" - return $ret -} - -opensips_check() { - # if the user did not provide an opensips_cmd - # try to find it in the system - if [ -z "$opensips_cmd" ] - then - require_cmd opensipsctl || return 1 - fi - - # check once if the command works - local x="$(opensips_get_stats | grep "^opensips_core_")" - if [ ! $? -eq 0 -o -z "$x" ] - then - error "cannot get global status. Please set opensips_opts='options' whatever needed to get connected to opensips server, in $confd/opensips.conf" - return 1 - fi - - return 0 -} - -opensips_create() { - # create the charts - cat < -# GPL v3+ -# -# Contributed by @safeie with PR #276 - -# first, you need open php-fpm status in php-fpm.conf -# second, you need add status location in nginx.conf -# you can see, https://easyengine.io/tutorials/php/fpm-status-page/ - -declare -A phpfpm_urls=() -declare -A phpfpm_curl_opts=() - -# _update_every is a special variable - it holds the number of seconds -# between the calls of the _update() function -phpfpm_update_every= -phpfpm_priority=60000 - -declare -a phpfpm_response=() -phpfpm_pool="" -phpfpm_start_time="" -phpfpm_start_since=0 -phpfpm_accepted_conn=0 -phpfpm_listen_queue=0 -phpfpm_max_listen_queue=0 -phpfpm_listen_queue_len=0 -phpfpm_idle_processes=0 -phpfpm_active_processes=0 -phpfpm_total_processes=0 -phpfpm_max_active_processes=0 -phpfpm_max_children_reached=0 -phpfpm_slow_requests=0 -phpfpm_get() { - local opts="${1}" url="${2}" - - phpfpm_response=($(run curl -Ss ${opts} "${url}")) - [ $? -ne 0 -o "${#phpfpm_response[@]}" -eq 0 ] && return 1 - - if [[ "${phpfpm_response[0]}" != "pool:" \ - || "${phpfpm_response[2]}" != "process" \ - || "${phpfpm_response[5]}" != "start" \ - || "${phpfpm_response[12]}" != "accepted" \ - || "${phpfpm_response[15]}" != "listen" \ - || "${phpfpm_response[16]}" != "queue:" \ - || "${phpfpm_response[26]}" != "idle" \ - || "${phpfpm_response[29]}" != "active" \ - || "${phpfpm_response[32]}" != "total" \ - ]] - then - error "invalid response from phpfpm status server: ${phpfpm_response[*]}" - return 1 - fi - - phpfpm_pool="${phpfpm_response[1]}" - phpfpm_start_time="${phpfpm_response[7]} ${phpfpm_response[8]}" - phpfpm_start_since="${phpfpm_response[11]}" - phpfpm_accepted_conn="${phpfpm_response[14]}" - phpfpm_listen_queue="${phpfpm_response[17]}" - phpfpm_max_listen_queue="${phpfpm_response[21]}" - phpfpm_listen_queue_len="${phpfpm_response[25]}" - phpfpm_idle_processes="${phpfpm_response[28]}" - phpfpm_active_processes="${phpfpm_response[31]}" - phpfpm_total_processes="${phpfpm_response[34]}" - phpfpm_max_active_processes="${phpfpm_response[38]}" - phpfpm_max_children_reached="${phpfpm_response[42]}" - if [ "${phpfpm_response[43]}" == "slow" ] - then - phpfpm_slow_requests="${phpfpm_response[45]}" - else - phpfpm_slow_requests="-1" - fi - - if [[ -z "${phpfpm_pool}" \ - || -z "${phpfpm_start_time}" \ - || -z "${phpfpm_start_since}" \ - || -z "${phpfpm_accepted_conn}" \ - || -z "${phpfpm_listen_queue}" \ - || -z "${phpfpm_max_listen_queue}" \ - || -z "${phpfpm_listen_queue_len}" \ - || -z "${phpfpm_idle_processes}" \ - || -z "${phpfpm_active_processes}" \ - || -z "${phpfpm_total_processes}" \ - || -z "${phpfpm_max_active_processes}" \ - || -z "${phpfpm_max_children_reached}" \ - ]] - then - error "empty values got from phpfpm status server: ${phpfpm_response[*]}" - return 1 - fi - - return 0 -} - -# _check is called once, to find out if this chart should be enabled or not -phpfpm_check() { - if [ ${#phpfpm_urls[@]} -eq 0 ]; then - phpfpm_urls[local]="http://localhost/status" - fi - - local m - for m in "${!phpfpm_urls[@]}" - do - phpfpm_get "${phpfpm_curl_opts[$m]}" "${phpfpm_urls[$m]}" - if [ $? -ne 0 ]; then - error "cannot find status on URL '${phpfpm_url[$m]}'. Please set phpfpm_urls[$m]='http://localhost/status' in $confd/phpfpm.conf" - unset phpfpm_urls[$m] - continue - fi - done - - if [ ${#phpfpm_urls[@]} -eq 0 ]; then - error "no phpfpm servers found. Please set phpfpm_urls[name]='url' to whatever needed to get status to the phpfpm server, in $confd/phpfpm.conf" - return 1 - fi - - # this should return: - # - 0 to enable the chart - # - 1 to disable the chart - - return 0 -} - -# _create is called once, to create the charts -phpfpm_create() { - local m - for m in "${!phpfpm_urls[@]}" - do - cat < -# GPL v3+ -# - -# the postqueue command -# if empty, it will use the one found in the system path -postfix_postqueue= - -# how frequently to collect queue size -postfix_update_every=15 - -postfix_priority=60000 - -postfix_check() { - # this should return: - # - 0 to enable the chart - # - 1 to disable the chart - - # try to find the postqueue executable - if [ -z "$postfix_postqueue" -o ! -x "$postfix_postqueue" ] - then - postfix_postqueue="$(which postqueue 2>/dev/null || command -v postqueue 2>/dev/null)" - fi - - if [ -z "$postfix_postqueue" -o ! -x "$postfix_postqueue" ] - then - error "cannot find postqueue. Please set 'postfix_postqueue=/path/to/postqueue' in $confd/postfix.conf" - return 1 - fi - - return 0 -} - -postfix_create() { -cat < -# GPL v3+ -# - -# sensors docs -# https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface - -# if this chart is called X.chart.sh, then all functions and global variables -# must start with X_ - -# the directory the kernel keeps sensor data -sensors_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices" - -# how deep in the tree to check for sensor data -sensors_sys_depth=10 - -# if set to 1, the script will overwrite internal -# script functions with code generated ones -# leave to 1, is faster -sensors_source_update=1 - -# how frequently to collect sensor data -# the default is to collect it at every iteration of charts.d -sensors_update_every= - -sensors_priority=90000 - -declare -A sensors_excluded=() - -sensors_find_all_files() { - find $1 -maxdepth $sensors_sys_depth -name \*_input -o -name temp 2>/dev/null -} - -sensors_find_all_dirs() { - sensors_find_all_files $1 | while read - do - dirname $REPLY - done | sort -u -} - -# _check is called once, to find out if this chart should be enabled or not -sensors_check() { - - # this should return: - # - 0 to enable the chart - # - 1 to disable the chart - - [ -z "$( sensors_find_all_files $sensors_sys_dir )" ] && error "no sensors found in '$sensors_sys_dir'." && return 1 - return 0 -} - -sensors_check_files() { - # we only need sensors that report a non-zero value - # also remove not needed sensors - - local f= v= excluded= - for f in $* - do - [ ! -f "$f" ] && continue - for ex in ${sensors_excluded[@]}; do - [[ $f =~ .*$ex$ ]] && excluded='1' && break - done - - [ "$excluded" != "1" ] && v="$( cat $f )" || v=0 - v=$(( v + 1 - 1 )) - [ $v -ne 0 ] && echo "$f" && continue - excluded= - - error "$f gives zero values" - done -} - -sensors_check_temp_type() { - # valid temp types are 1 to 6 - # disabled sensors have the value 0 - - local f= t= v= - for f in $* - do - t=$( echo $f | sed "s|_input$|_type|g" ) - [ "$f" = "$t" ] && echo "$f" && continue - [ ! -f "$t" ] && echo "$f" && continue - - v="$( cat $t )" - v=$(( v + 1 - 1 )) - [ $v -ne 0 ] && echo "$f" && continue - - error "$f is disabled" - done -} - -# _create is called once, to create the charts -sensors_create() { - local path= dir= name= x= file= lfile= labelname= labelid= device= subsystem= id= type= mode= files= multiplier= divisor= - - # we create a script with the source of the - # sensors_update() function - # - the highest speed we can achieve - - [ $sensors_source_update -eq 1 ] && echo >$TMP_DIR/sensors.sh "sensors_update() {" - - for path in $( sensors_find_all_dirs $sensors_sys_dir | sort -u ) - do - dir=$( basename $path ) - device= - subsystem= - id= - type= - name= - - [ -h $path/device ] && device=$( readlink -f $path/device ) - [ ! -z "$device" ] && device=$( basename $device ) - [ -z "$device" ] && device="$dir" - - [ -h $path/subsystem ] && subsystem=$( readlink -f $path/subsystem ) - [ ! -z "$subsystem" ] && subsystem=$( basename $subsystem ) - [ -z "$subsystem" ] && subsystem="$dir" - - [ -f $path/name ] && name=$( cat $path/name ) - [ -z "$name" ] && name="$dir" - - [ -f $path/type ] && type=$( cat $path/type ) - [ -z "$type" ] && type="$dir" - - id="$( fixid "$device.$subsystem.$dir" )" - - debug "path='$path', dir='$dir', device='$device', subsystem='$subsystem', id='$id', name='$name'" - - for mode in temperature voltage fans power current energy humidity - do - files= - multiplier=1 - divisor=1 - algorithm="absolute" - - case $mode in - temperature) - files="$( ls $path/temp*_input 2>/dev/null; ls $path/temp 2>/dev/null )" - files="$( sensors_check_files $files )" - files="$( sensors_check_temp_type $files )" - [ -z "$files" ] && continue - echo "CHART sensors.temp_$id '' '$name Temperature' 'Celsius' 'temperature' 'sensors.temp' line $((sensors_priority + 1)) $sensors_update_every" - echo >>$TMP_DIR/sensors.sh "echo \"BEGIN sensors.temp_$id \$1\"" - divisor=1000 - ;; - - voltage) - files="$( ls $path/in*_input 2>/dev/null )" - files="$( sensors_check_files $files )" - [ -z "$files" ] && continue - echo "CHART sensors.volt_$id '' '$name Voltage' 'Volts' 'voltage' 'sensors.volt' line $((sensors_priority + 2)) $sensors_update_every" - echo >>$TMP_DIR/sensors.sh "echo \"BEGIN sensors.volt_$id \$1\"" - divisor=1000 - ;; - - current) - files="$( ls $path/curr*_input 2>/dev/null )" - files="$( sensors_check_files $files )" - [ -z "$files" ] && continue - echo "CHART sensors.curr_$id '' '$name Current' 'Ampere' 'current' 'sensors.curr' line $((sensors_priority + 3)) $sensors_update_every" - echo >>$TMP_DIR/sensors.sh "echo \"BEGIN sensors.curr_$id \$1\"" - divisor=1000 - ;; - - power) - files="$( ls $path/power*_input 2>/dev/null )" - files="$( sensors_check_files $files )" - [ -z "$files" ] && continue - echo "CHART sensors.power_$id '' '$name Power' 'Watt' 'power' 'sensors.power' line $((sensors_priority + 4)) $sensors_update_every" - echo >>$TMP_DIR/sensors.sh "echo \"BEGIN sensors.power_$id \$1\"" - divisor=1000000 - ;; - - fans) - files="$( ls $path/fan*_input 2>/dev/null )" - files="$( sensors_check_files $files )" - [ -z "$files" ] && continue - echo "CHART sensors.fan_$id '' '$name Fans Speed' 'Rotations / Minute' 'fans' 'sensors.fans' line $((sensors_priority + 5)) $sensors_update_every" - echo >>$TMP_DIR/sensors.sh "echo \"BEGIN sensors.fan_$id \$1\"" - ;; - - energy) - files="$( ls $path/energy*_input 2>/dev/null )" - files="$( sensors_check_files $files )" - [ -z "$files" ] && continue - echo "CHART sensors.energy_$id '' '$name Energy' 'Joule' 'energy' 'sensors.energy' areastack $((sensors_priority + 6)) $sensors_update_every" - echo >>$TMP_DIR/sensors.sh "echo \"BEGIN sensors.energy_$id \$1\"" - algorithm="incremental" - divisor=1000000 - ;; - - humidity) - files="$( ls $path/humidity*_input 2>/dev/null )" - files="$( sensors_check_files $files )" - [ -z "$files" ] && continue - echo "CHART sensors.humidity_$id '' '$name Humidity' 'Percent' 'humidity' 'sensors.humidity' line $((sensors_priority + 7)) $sensors_update_every" - echo >>$TMP_DIR/sensors.sh "echo \"BEGIN sensors.humidity_$id \$1\"" - divisor=1000 - ;; - - *) - continue - ;; - esac - - for x in $files - do - file="$x" - fid="$( fixid "$file" )" - lfile="$( basename $file | sed "s|_input$|_label|g" )" - labelname="$( basename $file | sed "s|_input$||g" )" - - if [ ! "$path/$lfile" = "$file" -a -f "$path/$lfile" ] - then - labelname="$( cat "$path/$lfile" )" - fi - - echo "DIMENSION $fid '$labelname' $algorithm $multiplier $divisor" - echo >>$TMP_DIR/sensors.sh "echo \"SET $fid = \"\$(< $file )" - done - - echo >>$TMP_DIR/sensors.sh "echo END" - done - done - - [ $sensors_source_update -eq 1 ] && echo >>$TMP_DIR/sensors.sh "}" - - # ok, load the function sensors_update() we created - [ $sensors_source_update -eq 1 ] && . $TMP_DIR/sensors.sh - - return 0 -} - -# _update is called continuously, to collect the values -sensors_update() { - # the first argument to this function is the microseconds since last update - # pass this parameter to the BEGIN statement (see bellow). - - # do all the work to collect / calculate the values - # for each dimension - # remember: KEEP IT SIMPLE AND SHORT - - [ $sensors_source_update -eq 0 ] && . $TMP_DIR/sensors.sh $1 - - return 0 -} - diff --git a/charts.d/squid.chart.sh b/charts.d/squid.chart.sh deleted file mode 100644 index 2c19c35d5..000000000 --- a/charts.d/squid.chart.sh +++ /dev/null @@ -1,144 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2016 Costa Tsaousis -# GPL v3+ -# - -squid_host= -squid_port= -squid_url= -squid_timeout=2 -squid_update_every=2 -squid_priority=60000 - -squid_get_stats_internal() { - local host="$1" port="$2" url="$3" - run squidclient -h $host -p $port $url -} - -squid_get_stats() { - squid_get_stats_internal "$squid_host" "$squid_port" "$squid_url" -} - -squid_autodetect() { - local host="127.0.0.1" port url x - - for port in 3128 8080 - do - for url in "cache_object://$host:$port/counters" "/squid-internal-mgr/counters" - do - x=$(squid_get_stats_internal "$host" "$port" "$url" | grep client_http.requests) - if [ ! -z "$x" ] - then - squid_host="$host" - squid_port="$port" - squid_url="$url" - debug "found squid at '$host:$port' with url '$url'" - return 0 - fi - done - done - - error "cannot find squid running in localhost. Please set squid_url='url' and squid_host='IP' and squid_port='PORT' in $confd/squid.conf" - return 1 -} - -squid_check() { - require_cmd squidclient || return 1 - require_cmd sed || return 1 - require_cmd egrep || return 1 - - if [ -z "$squid_host" -o -z "$squid_port" -o -z "$squid_url" ] - then - squid_autodetect || return 1 - fi - - # check once if the url works - local x="$(squid_get_stats | grep client_http.requests)" - if [ ! $? -eq 0 -o -z "$x" ] - then - error "cannot fetch URL '$squid_url' by connecting to $squid_host:$squid_port. Please set squid_url='url' and squid_host='host' and squid_port='port' in $confd/squid.conf" - return 1 - fi - - return 0 -} - -squid_create() { - # create the charts - cat < -# GPL v3+ -# -# Contributed by @jgeromero with PR #277 - -# Description: Tomcat netdata charts.d plugin -# Author: Jorge Romero - -# the URL to download tomcat status info -# usually http://localhost:8080/manager/status?XML=true -tomcat_url="" -tomcat_curl_opts="" - -# set tomcat username/password here -tomcat_user="" -tomcat_password="" - -# _update_every is a special variable - it holds the number of seconds -# between the calls of the _update() function -tomcat_update_every= - -tomcat_priority=60000 - -# convert tomcat floating point values -# to integer using this multiplier -# this only affects precision - the values -# will be in the proper units -tomcat_decimal_detail=1000000 - -# used by volume chart to convert bytes to KB -tomcat_decimal_KB_detail=1000 - -tomcat_check() { - - require_cmd xmlstarlet || return 1 - - - # check if url, username, passwords are set - if [ -z "${tomcat_url}" ]; then - error "tomcat url is unset or set to the empty string" - return 1 - fi - if [ -z "${tomcat_user}" ]; then - # check backwards compatibility - if [ -z "${tomcatUser}" ]; then - error "tomcat user is unset or set to the empty string" - return 1 - else - tomcat_user="${tomcatUser}" - fi - fi - if [ -z "${tomcat_password}" ]; then - # check backwards compatibility - if [ -z "${tomcatPassword}" ]; then - error "tomcat password is unset or set to the empty string" - return 1 - else - tomcat_password="${tomcatPassword}" - fi - fi - - # check if we can get to tomcat's status page - tomcat_get - if [ $? -ne 0 ] - then - error "cannot get to status page on URL '${tomcat_url}'. Please make sure tomcat url, username and password are correct." - return 1 - fi - - # this should return: - # - 0 to enable the chart - # - 1 to disable the chart - - return 0 -} - -tomcat_get() { - # collect tomcat values - tomcat_port="$(IFS=/ read -ra a <<< "$tomcat_url"; hostport=${a[2]}; echo "${hostport#*:}")" - mapfile -t lines < <(run curl -u "$tomcat_user":"$tomcat_password" -Ss ${tomcat_curl_opts} "$tomcat_url" |\ - run xmlstarlet sel \ - -t -m "/status/jvm/memory" -v @free \ - -n -m "/status/connector[@name='\"http-bio-$tomcat_port\"']/threadInfo" -v @currentThreadCount \ - -n -v @currentThreadsBusy \ - -n -m "/status/connector[@name='\"http-bio-$tomcat_port\"']/requestInfo" -v @requestCount \ - -n -v @bytesSent -n -) - - tomcat_jvm_freememory="${lines[0]}" - tomcat_threads="${lines[1]}" - tomcat_threads_busy="${lines[2]}" - tomcat_accesses="${lines[3]}" - tomcat_volume="${lines[4]}" - - return 0 -} - -# _create is called once, to create the charts -tomcat_create() { - cat <&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = collectors +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + distdir +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = $(SUBDIRS) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +SUBDIRS = \ + plugins.d \ + apps.plugin \ + cgroups.plugin \ + charts.d.plugin \ + checks.plugin \ + diskspace.plugin \ + fping.plugin \ + freebsd.plugin \ + freeipmi.plugin \ + idlejitter.plugin \ + macos.plugin \ + nfacct.plugin \ + node.d.plugin \ + proc.plugin \ + python.d.plugin \ + statsd.plugin \ + tc.plugin \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-recursive + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu collectors/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-recursive +all-am: Makefile $(DATA) +installdirs: installdirs-recursive +installdirs-am: +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-recursive + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-recursive + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: + +.MAKE: $(am__recursive_targets) install-am install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ + check-am clean clean-generic cscopelist-am ctags ctags-am \ + distclean distclean-generic distclean-tags distdir dvi dvi-am \ + html html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs installdirs-am maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/collectors/README.md b/collectors/README.md new file mode 100644 index 000000000..b7fc73286 --- /dev/null +++ b/collectors/README.md @@ -0,0 +1,118 @@ +# Data Collection Plugins + +netdata supports **internal** and **external** data collection plugins: + +- **internal** plugins are written in `C` and run as threads inside the netdata daemon. + +- **external** plugins may be written in any computer language and are spawn as independent long-running processes by the netdata daemon. + They communicate with the netdata daemon via `pipes` (`stdout` communication). + +To minimize the number of processes spawn for data collection, netdata also supports **plugin orchestrators**. + +- **plugin orchestrators** are external plugins that do not collect any data by themeselves. + Instead they support data collection **modules** written in the language of the orchestrator. + Usually the orchestrator provides a higher level abstraction, making it ideal for writing new + data collection modules with the minimum of code. + + Currently netdata provides plugin orchestrators + BASH v4+ [charts.d.plugin](charts.d.plugin), + node.js [node.d.plugin](node.d.plugin) and + python v2+ (including v3) [python.d.plugin](python.d.plugin). + +## Netdata Plugins + +plugin|lang|O/S|runs as|modular|description +:---:|:---:|:---:|:---:|:---:|:--- +[apps.plugin](apps.plugin/)|`C`|linux, freebsd|external|-|monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**. +[cgroups.plugin](cgroups.plugin/)|`C`|linux|internal|-|collects resource usage of **Containers**, libvirt **VMs** and **systemd services**, on Linux systems +[charts.d.plugin](charts.d.plugin/)|`BASH` v4+|any|external|yes|a **plugin orchestrator** for data collection modules written in `BASH` v4+. +[checks.plugin](checks.plugin/)|`C`|any|internal|-|a debugging plugin (by default it is disabled) +[diskspace.plugin](diskspace.plugin/)|`C`|linux|internal|-|collects disk space usage metrics on Linux mount points +[fping.plugin](fping.plugin/)|`C`|any|external|-|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points. +[freebsd.plugin](freebsd.plugin/)|`C`|freebsd|internal|yes|collects resource usage and performance data on FreeBSD systems +[freeipmi.plugin](freeipmi.plugin/)|`C`|linux, freebsd|external|-|collects metrics from enterprise hardware sensors, on Linux and FreeBSD servers. +[idlejitter.plugin](idlejitter.plugin/)|`C`|any|internal|-|measures CPU latency and jitter on all operating systems +[macos.plugin](macos.plugin/)|`C`|macos|internal|yes|collects resource usage and performance data on MacOS systems +[nfacct.plugin](nfacct.plugin/)|`C`|linux|internal|-|collects netfilter firewall, connection tracker and accounting metrics using `libmnl` and `libnetfilter_acct` +[node.d.plugin](node.d.plugin/)|`node.js`|any|external|yes|a **plugin orchestrator** for data collection modules written in `node.js`. +[plugins.d](plugins.d/)|`C`|any|internal|-|implements the **external plugins** API and serves external plugins +[proc.plugin](proc.plugin/)|`C`|linux|internal|yes|collects resource usage and performance data on Linux systems +[python.d.plugin](python.d.plugin/)|`python` v2+|any|external|yes|a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported). +[statsd.plugin](statsd.plugin/)|`C`|any|internal|-|implements a high performance **statsd** server for netdata +[tc.plugin](tc.plugin/)|`C`|linux|internal|-|collects traffic QoS metrics (`tc`) of Linux network interfaces + +## Enabling and Disabling plugins + +Each plugin can be enabled or disabled via `netdata.conf`, section `[plugins]`. + +At this section there a list of all the plugins with a boolean setting to enable them or disable them. + +The exception is `statsd.plugin` that has its own `[statsd]` section. + +Once a plugin is enabled, consult the page of each plugin for additional configuration options. + +All **external plugins** are managed by [plugins.d](plugins.d/), which provides additional management options. + +### Internal Plugins + +Each of the internal plugins runs as a thread inside the netdata daemon. +Once this thread has started, the plugin may spawn additional threads according to its design. + +#### Internal Plugins API + +The internal data collection API consists of the following calls: + +```c +collect_data() { + // collect data here (one iteration) + + collected_number collected_value = collect_a_value(); + + // give the metrics to netdata + + static RRDSET *st = NULL; // the chart + static RRDDIM *rd = NULL; // a dimension attached to this chart + + if(unlikely(!st)) { + // we haven't created this chart before + // create it now + st = rrdset_create_localhost( + "type" + , "id" + , "name" + , "family" + , "context" + , "Chart Title" + , "units" + , "plugin-name" + , "module-name" + , priority + , update_every + , chart_type + ); + + // attach a metric to it + rd = rrddim_add(st, "id", "name", multiplier, divider, algorithm); + } + else { + // this chart is already created + // let netdata know we start a new iteration on it + rrdset_next(st); + } + + // give the collected value(s) to the chart + rrddim_set_by_pointer(st, rd, collected_value); + + // signal netdata we are done with this iteration + rrdset_done(st); +} +``` + +Of course netdata has a lot of libraries to help you also in collecting the metrics. +The best way to find your way through this, is to examine what other similar plugins do. + + +### External Plugins + +**External plugins** use the API and are managed by [plugins.d](plugins.d/). + diff --git a/collectors/all.h b/collectors/all.h new file mode 100644 index 000000000..aa19bd5bd --- /dev/null +++ b/collectors/all.h @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_ALL_H +#define NETDATA_ALL_H 1 + +#include "../daemon/common.h" + +// netdata internal data collection plugins + +#include "checks.plugin/plugin_checks.h" +#include "freebsd.plugin/plugin_freebsd.h" +#include "idlejitter.plugin/plugin_idlejitter.h" +#include "cgroups.plugin/sys_fs_cgroup.h" +#include "diskspace.plugin/plugin_diskspace.h" +#include "nfacct.plugin/plugin_nfacct.h" +#include "proc.plugin/plugin_proc.h" +#include "tc.plugin/plugin_tc.h" +#include "macos.plugin/plugin_macos.h" +#include "statsd.plugin/statsd.h" + +#include "plugins.d/plugins_d.h" + + +// ---------------------------------------------------------------------------- +// netdata chart priorities + +// This is a work in progress - to scope is to collect here all chart priorities. +// These should be based on the CONTEXT of the charts + the chart id when needed +// - for each SECTION +1000 (or +X000 for big sections) +// - for each FAMILY +100 +// - for each CHART +10 + +#define NETDATA_CHART_PRIO_SYSTEM_CPU 100 +#define NETDATA_CHART_PRIO_SYSTEM_LOAD 100 +#define NETDATA_CHART_PRIO_SYSTEM_IO 150 +#define NETDATA_CHART_PRIO_SYSTEM_PGPGIO 151 +#define NETDATA_CHART_PRIO_SYSTEM_RAM 200 +#define NETDATA_CHART_PRIO_SYSTEM_SWAP 201 +#define NETDATA_CHART_PRIO_SYSTEM_SWAPIO 250 +#define NETDATA_CHART_PRIO_SYSTEM_NET 500 +#define NETDATA_CHART_PRIO_SYSTEM_IPV4 500 // freebsd only +#define NETDATA_CHART_PRIO_SYSTEM_IP 501 +#define NETDATA_CHART_PRIO_SYSTEM_IPV6 502 +#define NETDATA_CHART_PRIO_SYSTEM_PROCESSES 600 +#define NETDATA_CHART_PRIO_SYSTEM_FORKS 700 +#define NETDATA_CHART_PRIO_SYSTEM_ACTIVE_PROCESSES 750 +#define NETDATA_CHART_PRIO_SYSTEM_CTXT 800 +#define NETDATA_CHART_PRIO_SYSTEM_IDLEJITTER 800 +#define NETDATA_CHART_PRIO_SYSTEM_INTR 900 +#define NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS 950 +#define NETDATA_CHART_PRIO_SYSTEM_SOFTNET_STAT 955 +#define NETDATA_CHART_PRIO_SYSTEM_INTERRUPTS 1000 +#define NETDATA_CHART_PRIO_SYSTEM_DEV_INTR 1000 // freebsd only +#define NETDATA_CHART_PRIO_SYSTEM_SOFT_INTR 1100 // freebsd only +#define NETDATA_CHART_PRIO_SYSTEM_ENTROPY 1000 +#define NETDATA_CHART_PRIO_SYSTEM_UPTIME 1000 +#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_QUEUES 990 // freebsd only +#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_MESSAGES 1000 // freebsd only +#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_SIZE 1100 // freebsd only +#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES 1000 +#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEM_ARRAYS 1000 +#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SEGS 1000 // freebsd only +#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SIZE 1000 // freebsd only +#define NETDATA_CHART_PRIO_SYSTEM_PACKETS 7001 // freebsd only + + +// CPU per core + +#define NETDATA_CHART_PRIO_CPU_PER_CORE 1000 // +1 per core +#define NETDATA_CHART_PRIO_CPU_TEMPERATURE 1050 // freebsd only +#define NETDATA_CHART_PRIO_CPUFREQ_SCALING_CUR_FREQ 5003 // freebsd only + +#define NETDATA_CHART_PRIO_CORE_THROTTLING 5001 +#define NETDATA_CHART_PRIO_PACKAGE_THROTTLING 5002 + +// Interrupts per core + +#define NETDATA_CHART_PRIO_INTERRUPTS_PER_CORE 1100 // +1 per core + +// Memory Section - 1xxx + +#define NETDATA_CHART_PRIO_MEM_SYSTEM_AVAILABLE 1010 +#define NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED 1020 +#define NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS 1030 +#define NETDATA_CHART_PRIO_MEM_KERNEL 1100 +#define NETDATA_CHART_PRIO_MEM_SLAB 1200 +#define NETDATA_CHART_PRIO_MEM_HUGEPAGES 1250 +#define NETDATA_CHART_PRIO_MEM_KSM 1300 +#define NETDATA_CHART_PRIO_MEM_KSM_SAVINGS 1301 +#define NETDATA_CHART_PRIO_MEM_KSM_RATIOS 1302 +#define NETDATA_CHART_PRIO_MEM_NUMA 1400 +#define NETDATA_CHART_PRIO_MEM_NUMA_NODES 1410 +#define NETDATA_CHART_PRIO_MEM_HW 1500 +#define NETDATA_CHART_PRIO_MEM_HW_ECC_CE 1550 +#define NETDATA_CHART_PRIO_MEM_HW_ECC_UE 1560 + +// Disks + +#define NETDATA_CHART_PRIO_DISK_IO 2000 +#define NETDATA_CHART_PRIO_DISK_OPS 2001 +#define NETDATA_CHART_PRIO_DISK_QOPS 2002 +#define NETDATA_CHART_PRIO_DISK_BACKLOG 2003 +#define NETDATA_CHART_PRIO_DISK_UTIL 2004 +#define NETDATA_CHART_PRIO_DISK_AWAIT 2005 +#define NETDATA_CHART_PRIO_DISK_AVGSZ 2006 +#define NETDATA_CHART_PRIO_DISK_SVCTM 2007 +#define NETDATA_CHART_PRIO_DISK_MOPS 2021 +#define NETDATA_CHART_PRIO_DISK_IOTIME 2022 +#define NETDATA_CHART_PRIO_BCACHE_CACHE_ALLOC 2120 +#define NETDATA_CHART_PRIO_BCACHE_HIT_RATIO 2120 +#define NETDATA_CHART_PRIO_BCACHE_RATES 2121 +#define NETDATA_CHART_PRIO_BCACHE_SIZE 2122 +#define NETDATA_CHART_PRIO_BCACHE_USAGE 2123 +#define NETDATA_CHART_PRIO_BCACHE_OPS 2124 +#define NETDATA_CHART_PRIO_BCACHE_BYPASS 2125 +#define NETDATA_CHART_PRIO_BCACHE_CACHE_READ_RACES 2126 + +#define NETDATA_CHART_PRIO_DISKSPACE_SPACE 2023 +#define NETDATA_CHART_PRIO_DISKSPACE_INODES 2024 + +// NFS (server) + +#define NETDATA_CHART_PRIO_NFSD_READCACHE 2100 +#define NETDATA_CHART_PRIO_NFSD_FILEHANDLES 2101 +#define NETDATA_CHART_PRIO_NFSD_IO 2102 +#define NETDATA_CHART_PRIO_NFSD_THREADS 2103 +#define NETDATA_CHART_PRIO_NFSD_THREADS_FULLCNT 2104 +#define NETDATA_CHART_PRIO_NFSD_THREADS_HISTOGRAM 2105 +#define NETDATA_CHART_PRIO_NFSD_READAHEAD 2105 +#define NETDATA_CHART_PRIO_NFSD_NET 2107 +#define NETDATA_CHART_PRIO_NFSD_RPC 2108 +#define NETDATA_CHART_PRIO_NFSD_PROC2 2109 +#define NETDATA_CHART_PRIO_NFSD_PROC3 2110 +#define NETDATA_CHART_PRIO_NFSD_PROC4 2111 +#define NETDATA_CHART_PRIO_NFSD_PROC4OPS 2112 + +// NFS (client) + +#define NETDATA_CHART_PRIO_NFS_NET 2207 +#define NETDATA_CHART_PRIO_NFS_RPC 2208 +#define NETDATA_CHART_PRIO_NFS_PROC2 2209 +#define NETDATA_CHART_PRIO_NFS_PROC3 2210 +#define NETDATA_CHART_PRIO_NFS_PROC4 2211 + +// BTRFS + +#define NETDATA_CHART_PRIO_BTRFS_DISK 2300 +#define NETDATA_CHART_PRIO_BTRFS_DATA 2301 +#define NETDATA_CHART_PRIO_BTRFS_METADATA 2302 +#define NETDATA_CHART_PRIO_BTRFS_SYSTEM 2303 + +// ZFS + +#define NETDATA_CHART_PRIO_ZFS_ARC_SIZE 2500 +#define NETDATA_CHART_PRIO_ZFS_L2_SIZE 2500 +#define NETDATA_CHART_PRIO_ZFS_READS 2510 +#define NETDATA_CHART_PRIO_ZFS_ACTUAL_HITS 2519 +#define NETDATA_CHART_PRIO_ZFS_ARC_SIZE_BREAKDOWN 2520 +#define NETDATA_CHART_PRIO_ZFS_IMPORTANT_OPS 2522 +#define NETDATA_CHART_PRIO_ZFS_MEMORY_OPS 2523 +#define NETDATA_CHART_PRIO_ZFS_IO 2700 +#define NETDATA_CHART_PRIO_ZFS_HITS 2520 +#define NETDATA_CHART_PRIO_ZFS_DHITS 2530 +#define NETDATA_CHART_PRIO_ZFS_DEMAND_DATA_HITS 2531 +#define NETDATA_CHART_PRIO_ZFS_PREFETCH_DATA_HITS 2532 +#define NETDATA_CHART_PRIO_ZFS_PHITS 2540 +#define NETDATA_CHART_PRIO_ZFS_MHITS 2550 +#define NETDATA_CHART_PRIO_ZFS_L2HITS 2560 +#define NETDATA_CHART_PRIO_ZFS_LIST_HITS 2600 +#define NETDATA_CHART_PRIO_ZFS_HASH_ELEMENTS 2800 +#define NETDATA_CHART_PRIO_ZFS_HASH_CHAINS 2810 + + +// SOFTIRQs + +#define NETDATA_CHART_PRIO_SOFTIRQS_PER_CORE 3000 // +1 per core + +// IPFW (freebsd) + +#define NETDATA_CHART_PRIO_IPFW_PACKETS 3001 +#define NETDATA_CHART_PRIO_IPFW_BYTES 3002 +#define NETDATA_CHART_PRIO_IPFW_ACTIVE 3003 +#define NETDATA_CHART_PRIO_IPFW_EXPIRED 3004 +#define NETDATA_CHART_PRIO_IPFW_MEM 3005 + + +// IPVS + +#define NETDATA_CHART_PRIO_IPVS_NET 3100 +#define NETDATA_CHART_PRIO_IPVS_SOCKETS 3101 +#define NETDATA_CHART_PRIO_IPVS_PACKETS 3102 + +// Softnet + +#define NETDATA_CHART_PRIO_SOFTNET_PER_CORE 4101 // +1 per core + +// IP STACK + +#define NETDATA_CHART_PRIO_IP_ERRORS 4100 +#define NETDATA_CHART_PRIO_IP_TCP_CONNABORTS 4210 +#define NETDATA_CHART_PRIO_IP_TCP_SYN_QUEUE 4215 +#define NETDATA_CHART_PRIO_IP_TCP_ACCEPT_QUEUE 4216 +#define NETDATA_CHART_PRIO_IP_TCP_REORDERS 4220 +#define NETDATA_CHART_PRIO_IP_TCP_OFO 4250 +#define NETDATA_CHART_PRIO_IP_TCP_SYNCOOKIES 4260 +#define NETDATA_CHART_PRIO_IP_TCP_MEM 4290 +#define NETDATA_CHART_PRIO_IP_BCAST 4500 +#define NETDATA_CHART_PRIO_IP_BCAST_PACKETS 4510 +#define NETDATA_CHART_PRIO_IP_MCAST 4600 +#define NETDATA_CHART_PRIO_IP_MCAST_PACKETS 4610 +#define NETDATA_CHART_PRIO_IP_ECN 4700 + +// IPv4 + +#define NETDATA_CHART_PRIO_IPV4_SOCKETS 5100 +#define NETDATA_CHART_PRIO_IPV4_PACKETS 5130 +#define NETDATA_CHART_PRIO_IPV4_ERRORS 5150 +#define NETDATA_CHART_PRIO_IPV4_ICMP 5170 +#define NETDATA_CHART_PRIO_IPV4_TCP 5200 +#define NETDATA_CHART_PRIO_IPV4_TCP_SOCKETS 5201 +#define NETDATA_CHART_PRIO_IPV4_TCP_MEM 5290 +#define NETDATA_CHART_PRIO_IPV4_UDP 5300 +#define NETDATA_CHART_PRIO_IPV4_UDP_MEM 5390 +#define NETDATA_CHART_PRIO_IPV4_UDPLITE 5400 +#define NETDATA_CHART_PRIO_IPV4_RAW 5450 +#define NETDATA_CHART_PRIO_IPV4_FRAGMENTS 5460 +#define NETDATA_CHART_PRIO_IPV4_FRAGMENTS_MEM 5470 + +// IPv6 + +#define NETDATA_CHART_PRIO_IPV6_PACKETS 6200 +#define NETDATA_CHART_PRIO_IPV6_ECT 6210 +#define NETDATA_CHART_PRIO_IPV6_ERRORS 6300 +#define NETDATA_CHART_PRIO_IPV6_FRAGMENTS 6400 +#define NETDATA_CHART_PRIO_IPV6_FRAGSOUT 6401 +#define NETDATA_CHART_PRIO_IPV6_FRAGSIN 6402 +#define NETDATA_CHART_PRIO_IPV6_TCP 6500 +#define NETDATA_CHART_PRIO_IPV6_UDP 6600 +#define NETDATA_CHART_PRIO_IPV6_UDP_PACKETS 6601 +#define NETDATA_CHART_PRIO_IPV6_UDP_ERRORS 6610 +#define NETDATA_CHART_PRIO_IPV6_UDPLITE 6700 +#define NETDATA_CHART_PRIO_IPV6_UDPLITE_PACKETS 6701 +#define NETDATA_CHART_PRIO_IPV6_UDPLITE_ERRORS 6710 +#define NETDATA_CHART_PRIO_IPV6_RAW 6800 +#define NETDATA_CHART_PRIO_IPV6_BCAST 6840 +#define NETDATA_CHART_PRIO_IPV6_MCAST 6850 +#define NETDATA_CHART_PRIO_IPV6_MCAST_PACKETS 6851 +#define NETDATA_CHART_PRIO_IPV6_ICMP 6900 +#define NETDATA_CHART_PRIO_IPV6_ICMP_REDIR 6910 +#define NETDATA_CHART_PRIO_IPV6_ICMP_ERRORS 6920 +#define NETDATA_CHART_PRIO_IPV6_ICMP_ECHOS 6930 +#define NETDATA_CHART_PRIO_IPV6_ICMP_GROUPMEMB 6940 +#define NETDATA_CHART_PRIO_IPV6_ICMP_ROUTER 6950 +#define NETDATA_CHART_PRIO_IPV6_ICMP_NEIGHBOR 6960 +#define NETDATA_CHART_PRIO_IPV6_ICMP_LDV2 6970 +#define NETDATA_CHART_PRIO_IPV6_ICMP_TYPES 6980 + + +// Network interfaces + +#define NETDATA_CHART_PRIO_FIRST_NET_IFACE 7000 // 6 charts per interface +#define NETDATA_CHART_PRIO_FIRST_NET_PACKETS 7001 +#define NETDATA_CHART_PRIO_FIRST_NET_ERRORS 7002 +#define NETDATA_CHART_PRIO_FIRST_NET_DROPS 7003 +#define NETDATA_CHART_PRIO_FIRST_NET_EVENTS 7006 +#define NETDATA_CHART_PRIO_CGROUP_NET_IFACE 43000 + +// SCTP + +#define NETDATA_CHART_PRIO_SCTP 7000 + +// QoS + +#define NETDATA_CHART_PRIO_TC_QOS 7000 +#define NETDATA_CHART_PRIO_TC_QOS_PACKETS 7010 +#define NETDATA_CHART_PRIO_TC_QOS_DROPPED 7020 +#define NETDATA_CHART_PRIO_TC_QOS_TOCKENS 7030 +#define NETDATA_CHART_PRIO_TC_QOS_CTOCKENS 7040 + + +// Netfilter + +#define NETDATA_CHART_PRIO_NETFILTER_SOCKETS 8700 +#define NETDATA_CHART_PRIO_NETFILTER_NEW 8701 +#define NETDATA_CHART_PRIO_NETFILTER_CHANGES 8702 +#define NETDATA_CHART_PRIO_NETFILTER_EXPECT 8703 +#define NETDATA_CHART_PRIO_NETFILTER_ERRORS 8705 +#define NETDATA_CHART_PRIO_NETFILTER_SEARCH 8710 + +#define NETDATA_CHART_PRIO_NETFILTER_PACKETS 8906 +#define NETDATA_CHART_PRIO_NETFILTER_BYTES 8907 + +// SYNPROXY + +#define NETDATA_CHART_PRIO_SYNPROXY_SYN_RECEIVED 8751 +#define NETDATA_CHART_PRIO_SYNPROXY_COOKIES 8752 +#define NETDATA_CHART_PRIO_SYNPROXY_CONN_OPEN 8753 +#define NETDATA_CHART_PRIO_SYNPROXY_ENTRIES 8754 + +// CGROUPS + +#define NETDATA_CHART_PRIO_CGROUPS_SYSTEMD 19000 // many charts +#define NETDATA_CHART_PRIO_CGROUPS_CONTAINERS 40000 // many charts + +// STATSD + +#define NETDATA_CHART_PRIO_STATSD_PRIVATE 90000 // many charts + +// INTERNAL NETDATA INFO + +#define NETDATA_CHART_PRIO_CHECKS 99999 + +#define NETDATA_CHART_PRIO_NETDATA_DISKSPACE 132020 +#define NETDATA_CHART_PRIO_NETDATA_TC_CPU 135000 +#define NETDATA_CHART_PRIO_NETDATA_TC_TIME 135001 + + +#endif //NETDATA_ALL_H diff --git a/collectors/apps.plugin/Makefile.am b/collectors/apps.plugin/Makefile.am new file mode 100644 index 000000000..be0306492 --- /dev/null +++ b/collectors/apps.plugin/Makefile.am @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects + +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +dist_libconfig_DATA = \ + apps_groups.conf \ + $(NULL) diff --git a/collectors/apps.plugin/Makefile.in b/collectors/apps.plugin/Makefile.in new file mode 100644 index 000000000..38120c048 --- /dev/null +++ b/collectors/apps.plugin/Makefile.in @@ -0,0 +1,521 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = collectors/apps.plugin +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_libconfig_DATA) $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(libconfigdir)" +DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +dist_libconfig_DATA = \ + apps_groups.conf \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/apps.plugin/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu collectors/apps.plugin/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-dist_libconfigDATA: $(dist_libconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \ + done + +uninstall-dist_libconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir) +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: + for dir in "$(DESTDIR)$(libconfigdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-dist_libconfigDATA + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-dist_libconfigDATA + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dist_libconfigDATA install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \ + uninstall-am uninstall-dist_libconfigDATA + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/collectors/apps.plugin/README.md b/collectors/apps.plugin/README.md new file mode 100644 index 000000000..05680efe8 --- /dev/null +++ b/collectors/apps.plugin/README.md @@ -0,0 +1,372 @@ +# apps.plugin + +`apps.plugin` breaks down system resource usage to **processes**, **users** and **user groups**. + +To achieve this task, it iterates through the whole process tree, collecting resource usage information +for every process found running. + +Since netdata needs to present this information in charts and track them through time, +instead of presenting a `top` like list, `apps.plugin` uses a pre-defined list of **process groups** +to which it assigns all running processes. This list is [customizable](apps_groups.conf) and netdata +ships with a good default for most cases (to edit it on your system run `/etc/netdata/edit-config apps_groups.conf`). + +So, `apps.plugin` builds a process tree (much like `ps fax` does in Linux), and groups +processes together (evaluating both child and parent processes) so that the result is always a list with +a predefined set of members (of course, only process groups found running are reported). + +> If you find that `apps.plugin` categorizes standard applications as `other`, we would be +> glad to accept pull requests improving the [defaults](apps_groups.conf) shipped with netdata. + +Unlike traditional process monitoring tools (like `top`), `apps.plugin` is able to account the resource +utilization of exit processes. Their utilization is accounted at their currently running parents. +So, `apps.plugin` is perfectly able to measure the resources used by shell scripts and other processes +that fork/spawn other short lived processes hundreds of times per second. + +## Charts + +`apps.plugin` provides charts for 3 sections: + +1. Per application charts as **Applications** at netdata dashboards +2. Per user charts as **Users** at netdata dashboards +3. Per user group charts as **User Groups** at netdata dashboards + +Each of these sections provides the same number of charts: + +- CPU Utilization + - Total CPU usage + - User / System CPU usage +- Disk I/O + - Physical Reads / Writes + - Logical Reads / Writes + - Open Unique Files (if a file is found open multiple times, it is counted just once) +- Memory + - Real Memory Used (non shared) + - Virtual Memory Allocated + - Minor Page Faults (i.e. memory activity) +- Processes + - Threads Running + - Processes Running + - Pipes Open +- Swap Memory + - Swap Memory Used + - Major Page Faults (i.e. swap activity) +- Network + - Sockets Open + +The above are reported: + +- For **Applications** per [target configured](apps_groups.conf). +- For **Users** per username or UID (when the username is not available). +- For **User Groups** per groupname or GID (when groupname is not available). + +## Performance + +`apps.plugin` is a complex piece of software and has a lot of work to do +We are proud that `apps.plugin` is a lot faster compared to any other similar tool, +while collecting a lot more information for the processes, however the fact is that +this plugin requires more CPU resources than the netdata daemon itself. + +Under Linux, for each process running, `apps.plugin` reads several `/proc` files +per process. Doing this work per-second, especially on hosts with several thousands +of processes, may increase the CPU resources consumed by the plugin. + +In such cases, you many need to lower its data collection frequency. + +To do this, edit `/etc/netdata/netdata.conf` and find this section: + +``` +[plugin:apps] + # update every = 1 + # command options = +``` + +Uncomment the line `update every` and set it to a higher number. If you just set it to ` 2 `, +its CPU resources will be cut in half, and data collection will be once every 2 seconds. + +## Configuration + +The configuration file is `/etc/netdata/apps_groups.conf` (the default is [here](apps_groups.conf)). +To edit it on your system run `/etc/netdata/edit-config apps_groups.conf`. + +The configuration file works accepts multiple lines, each having this format: + +```txt +group: process1 process2 ... +``` + +Each group can be given multiple times, to add more processes to it. + +For the **Applications** section, only groups configured in this file are reported. +All other processes will be reported as `other`. + +For each process given, its whole process tree will be grouped, not just the process matched. +The plugin will include both parents and children. + +The process names are the ones returned by: + + - `ps -e` or `cat /proc/PID/stat` + - in case of substring mode (see below): `/proc/PID/cmdline` + +To add process names with spaces, enclose them in quotes (single or double) +example: ` 'Plex Media Serv' ` or ` "my other process" `. + +You can add an asterisk ` * ` at the beginning and/or the end of a process: + + - `*name` *suffix* mode: will search for processes ending with `name` (at `/proc/PID/stat`) + - `name*` *prefix* mode: will search for processes beginning with `name` (at `/proc/PID/stat`) + - `*name*` *substring* mode: will search for `name` in the whole command line (at `/proc/PID/cmdline`) + +If you enter even just one *name* (substring), `apps.plugin` will process +`/proc/PID/cmdline` for all processes (of course only once per process: when they are first seen). + +To add processes with single quotes, enclose them in double quotes: ` "process with this ' single quote" ` + +To add processes with double quotes, enclose them in single quotes: ` 'process with this " double quote' ` + +If a group or process name starts with a ` - `, the dimension will be hidden from the chart (cpu chart only). + +If a process starts with a ` + `, debugging will be enabled for it (debugging produces a lot of output - do not enable it in production systems). + +You can add any number of groups. Only the ones found running will affect the charts generated. +However, producing charts with hundreds of dimensions may slow down your web browser. + +The order of the entries in this list is important: the first that matches a process is used, so put important +ones at the top. Processes not matched by any row, will inherit it from their parents or children. + +The order also controls the order of the dimensions on the generated charts (although applications started +after apps.plugin is started, will be appended to the existing list of dimensions the netdata daemon maintains). + +## Permissions + +`apps.plugin` requires additional privileges to collect all the information it needs. +The problem is described in issue #157. + +When netdata is installed, `apps.plugin` is given the capabilities `cap_dac_read_search,cap_sys_ptrace+ep`. +If this fails (i.e. `setcap` fails), `apps.plugin` is setuid to `root`. + +#### linux capabilities in containers + +There are a few cases, like `docker` and `virtuozzo` containers, where `setcap` succeeds, but the capabilities +are silently ignored (in `lxc` containers `setcap` fails). + +In these cases ()`setcap` succeeds but capabilities do not work), you will have to setuid +to root `apps.plugin` by running these commands: + +```sh +chown root:netdata /usr/libexec/netdata/plugins.d/apps.plugin +chmod 4750 /usr/libexec/netdata/plugins.d/apps.plugin +``` + +You will have to run these, every time you update netdata. + +## Security + +`apps.plugin` performs a hard-coded function of building the process tree in memory, +iterating forever, collecting metrics for each running process and sending them to netdata. +This is a one-way communication, from `apps.plugin` to netdata. + +So, since `apps.plugin` cannot be instructed by netdata for the actions it performs, +we think it is pretty safe to allow it have these increased privileges. + +Keep in mind that `apps.plugin` will still run without escalated permissions, +but it will not be able to collect all the information. + +## Application Badges + +You can create badges that you can embed anywhere you like, with URLs like this: + +``` +https://your.netdata.ip:19999/api/v1/badge.svg?chart=apps.processes&dimensions=myapp&value_color=green%3E0%7Cred +``` + +The color expression unescaped is this: `value_color=green>0|red`. + +Here is an example for the process group `sql` at `https://registry.my-netdata.io`: + +![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.processes&dimensions=sql&value_color=green%3E0%7Cred) + +Netdata is able give you a lot more badges for your app. +Examples below for process group `sql`: + +- CPU usage: ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.cpu&dimensions=sql&value_color=green=0%7Corange%3C50%7Cred) +- Disk Physical Reads ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.preads&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred) +- Disk Physical Writes ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.pwrites&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred) +- Disk Logical Reads ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.lreads&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred) +- Disk Logical Writes ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.lwrites&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred) +- Open Files ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.files&dimensions=sql&value_color=green%3E30%7Cred) +- Real Memory ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.mem&dimensions=sql&value_color=green%3C100%7Corange%3C200%7Cred) +- Virtual Memory ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.vmem&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred) +- Swap Memory ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.swap&dimensions=sql&value_color=green=0%7Cred) +- Minor Page Faults ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.minor_faults&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred) +- Processes ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.processes&dimensions=sql&value_color=green%3E0%7Cred) +- Threads ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.threads&dimensions=sql&value_color=green%3E=28%7Cred) +- Major Faults (swap activity) ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.major_faults&dimensions=sql&value_color=green=0%7Cred) +- Open Pipes ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.pipes&dimensions=sql&value_color=green=0%7Cred) +- Open Sockets ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.sockets&dimensions=sql&value_color=green%3E=3%7Cred) + + +For more information about badges check [Generating Badges](../../web/api/badges) + +## Comparison with console tools + +Ssh to a server running netdata and execute this: + +```sh +while true; do ls -l /var/run >/dev/null; done +``` + +In most systems `/var/run` is a `tmpfs` device, so there is nothing that can stop this command +from consuming entirely one of the CPU cores of the machine. + +As we will see below, **none** of the console performance monitoring tools can report that this +command is using 100% CPU. They do report of course that the CPU is busy, but **they fail to +identify the process that consumes so much CPU**. + +Here is what common Linux console monitoring tools report: + +#### top + +`top` reports that `bash` is using just 14%. + +If you check the total system CPU utilization, it says there is no idle CPU at all, but `top` +fails to provide a breakdown of the CPU consumption in the system. The sum of the CPU utilization +of all processes reported by `top`, is 15.6%. + +``` +top - 18:46:28 up 3 days, 20:14, 2 users, load average: 0.22, 0.05, 0.02 +Tasks: 76 total, 2 running, 74 sleeping, 0 stopped, 0 zombie +%Cpu(s): 32.8 us, 65.6 sy, 0.0 ni, 0.0 id, 0.0 wa, 1.3 hi, 0.3 si, 0.0 st +KiB Mem : 1016576 total, 244112 free, 52012 used, 720452 buff/cache +KiB Swap: 0 total, 0 free, 0 used. 753712 avail Mem + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND +12789 root 20 0 14980 4180 3020 S 14.0 0.4 0:02.82 bash + 9 root 20 0 0 0 0 S 1.0 0.0 0:22.36 rcuos/0 + 642 netdata 20 0 132024 20112 2660 S 0.3 2.0 14:26.29 netdata +12522 netdata 20 0 9508 2476 1828 S 0.3 0.2 0:02.26 apps.plugin + 1 root 20 0 67196 10216 7500 S 0.0 1.0 0:04.83 systemd + 2 root 20 0 0 0 0 S 0.0 0.0 0:00.00 kthreadd +``` + +#### htop + +Exactly like `top`, `htop` is providing an incomplete breakdown of the system CPU utilization. + +``` + CPU[||||||||||||||||||||||||100.0%] Tasks: 27, 11 thr; 2 running + Mem[||||||||||||||||||||85.4M/993M] Load average: 1.16 0.88 0.90 + Swp[ 0K/0K] Uptime: 3 days, 21:37:03 + + PID USER PRI NI VIRT RES SHR S CPU% MEM% TIME+ Command +12789 root 20 0 15104 4484 3208 S 14.0 0.4 10:57.15 -bash + 7024 netdata 20 0 9544 2480 1744 S 0.7 0.2 0:00.88 /usr/libexec/netd + 7009 netdata 20 0 138M 21016 2712 S 0.7 2.1 0:00.89 /usr/sbin/netdata + 7012 netdata 20 0 138M 21016 2712 S 0.0 2.1 0:00.31 /usr/sbin/netdata + 563 root 20 0 308M 202M 202M S 0.0 20.4 1:00.81 /usr/lib/systemd/ + 7019 netdata 20 0 138M 21016 2712 S 0.0 2.1 0:00.14 /usr/sbin/netdata +``` + +#### atop + +`atop` also fails to break down CPU usage. + +``` +ATOP - localhost 2016/12/10 20:11:27 ----------- 10s elapsed +PRC | sys 1.13s | user 0.43s | #proc 75 | #zombie 0 | #exit 5383 | +CPU | sys 67% | user 31% | irq 2% | idle 0% | wait 0% | +CPL | avg1 1.34 | avg5 1.05 | avg15 0.96 | csw 51346 | intr 10508 | +MEM | tot 992.8M | free 211.5M | cache 470.0M | buff 87.2M | slab 164.7M | +SWP | tot 0.0M | free 0.0M | | vmcom 207.6M | vmlim 496.4M | +DSK | vda | busy 0% | read 0 | write 4 | avio 1.50 ms | +NET | transport | tcpi 16 | tcpo 15 | udpi 0 | udpo 0 | +NET | network | ipi 16 | ipo 15 | ipfrw 0 | deliv 16 | +NET | eth0 ---- | pcki 16 | pcko 15 | si 1 Kbps | so 4 Kbps | + + PID SYSCPU USRCPU VGROW RGROW RDDSK WRDSK ST EXC S CPU CMD 1/600 +12789 0.98s 0.40s 0K 0K 0K 336K -- - S 14% bash + 9 0.08s 0.00s 0K 0K 0K 0K -- - S 1% rcuos/0 + 7024 0.03s 0.00s 0K 0K 0K 0K -- - S 0% apps.plugin + 7009 0.01s 0.01s 0K 0K 0K 4K -- - S 0% netdata +``` + +#### glances + +And the same is true for `glances`. The system runs at 100%, but `glances` reports only 17% +per process utilization. + +Note also, that being a `python` program, `glances` uses 1.6% CPU while it runs. + + +``` +localhost Uptime: 3 days, 21:42:00 + +CPU [100.0%] CPU 100.0% MEM 23.7% SWAP 0.0% LOAD 1-core +MEM [ 23.7%] user: 30.9% total: 993M total: 0 1 min: 1.18 +SWAP [ 0.0%] system: 67.8% used: 236M used: 0 5 min: 1.08 + idle: 0.0% free: 757M free: 0 15 min: 1.00 + +NETWORK Rx/s Tx/s TASKS 75 (90 thr), 1 run, 74 slp, 0 oth +eth0 168b 2Kb +eth1 0b 0b CPU% MEM% PID USER NI S Command +lo 0b 0b 13.5 0.4 12789 root 0 S -bash + 1.6 2.2 7025 root 0 R /usr/bin/python /u +DISK I/O R/s W/s 1.0 0.0 9 root 0 S rcuos/0 +vda1 0 4K 0.3 0.2 7024 netdata 0 S /usr/libexec/netda + 0.3 0.0 7 root 0 S rcu_sched +FILE SYS Used Total 0.3 2.1 7009 netdata 0 S /usr/sbin/netdata +/ (vda1) 1.56G 29.5G 0.0 0.0 17 root 0 S oom_reaper +``` + +#### why this happens? + +All the console tools report usage based on the processes found running *at the moment they +examine the process tree*. So, they see just one `ls` command, which is actually very quick +with minor CPU utilization. But the shell, is spawning hundreds of them, one after another +(much like shell scripts do). + +#### what netdata reports? + +The total CPU utilization of the system: + +![image](https://cloud.githubusercontent.com/assets/2662304/21076212/9198e5a6-bf2e-11e6-9bc0-6bdea25befb2.png) +
_**Figure 1**: The system overview section at netdata, just a few seconds after the command was run_ + +And at the applications `apps.plugin` breaks down CPU usage per application: + +![image](https://cloud.githubusercontent.com/assets/2662304/21076220/c9687848-bf2e-11e6-8d81-348592c5aca2.png) +
_**Figure 2**: The Applications section at netdata, just a few seconds after the command was run_ + +So, the `ssh` session is using 95% CPU time. + +Why `ssh`? + +`apps.plugin` groups all processes based on its configuration file +[`/etc/netdata/apps_groups.conf`](apps_groups.conf) +(to edit it on your system run `/etc/netdata/edit-config apps_groups.conf`). +The default configuration has nothing for `bash`, but it has for `sshd`, so netdata accumulates +all ssh sessions to a dimension on the charts, called `ssh`. This includes all the processes in +the process tree of `sshd`, **including the exited children**. + +> Distributions based on `systemd`, provide another way to get cpu utilization per user session +> or service running: control groups, or cgroups, commonly used as part of containers +> `apps.plugin` does not use these mechanisms. The process grouping made by `apps.plugin` works +> on any Linux, `systemd` based or not. + +#### a more technical description of how netdata works + +netdata reads `/proc//stat` for all processes, once per second and extracts `utime` and +`stime` (user and system cpu utilization), much like all the console tools do. + +But it [also extracts `cutime` and `cstime`](https://github.com/netdata/netdata/blob/62596cc6b906b1564657510ca9135c08f6d4cdda/src/apps_plugin.c#L636-L642) +that account the user and system time of the exit children of each process. By keeping a map in +memory of the whole process tree, it is capable of assigning the right time to every process, +taking into account all its exited children. + +It is tricky, since a process may be running for 1 hour and once it exits, its parent should not +receive the whole 1 hour of cpu time in just 1 second - you have to subtract the cpu time that has +been reported for it prior to this iteration. + +It is even trickier, because walking through the entire process tree takes some time itself. So, +if you sum the CPU utilization of all processes, you might have more CPU time than the reported +total cpu time of the system. netdata solves this, by adapting the per process cpu utilization to +the total of the system. [Netdata adds charts that document this normalization](https://london.my-netdata.io/default.html#menu_netdata_submenu_apps_plugin). diff --git a/collectors/apps.plugin/apps_groups.conf b/collectors/apps.plugin/apps_groups.conf new file mode 100644 index 000000000..c0d22fac9 --- /dev/null +++ b/collectors/apps.plugin/apps_groups.conf @@ -0,0 +1,286 @@ +# +# apps.plugin process grouping +# +# The apps.plugin displays charts with information about the processes running. +# This config allows grouping processes together, so that several processes +# will be reported as one. +# +# Only groups in this file are reported. All other processes will be reported +# as 'other'. +# +# For each process given, its whole process tree will be grouped, not just +# the process matched. The plugin will include both parents and childs. +# +# The format is: +# +# group: process1 process2 process3 ... +# +# Each group can be given multiple times, to add more processes to it. +# +# The process names are the ones returned by: +# +# - ps -e or /proc/PID/stat +# - in case of substring mode (see below): /proc/PID/cmdline +# +# To add process names with spaces, enclose them in quotes (single or double) +# example: 'Plex Media Serv' "my other process". +# +# Wildcard support: +# You can add an asterisk (*) at the beginning and/or the end of a process: +# +# *name suffix mode: will search for processes ending with 'name' +# (/proc/PID/stat) +# +# name* prefix mode: will search for processes beginning with 'name' +# (/proc/PID/stat) +# +# *name* substring mode: will search for 'name' in the whole command line +# (/proc/PID/cmdline) +# +# If you enter even just one *name* (substring), apps.plugin will process +# /proc/PID/cmdline for all processes, just once (when they are first seen). +# +# To add processes with single quotes, enclose them in double quotes +# example: "process with this ' single quote" +# +# To add processes with double quotes, enclose them in single quotes: +# example: 'process with this " double quote' +# +# If a group or process name starts with a -, the dimension will be hidden +# (cpu chart only). +# +# If a process starts with a +, debugging will be enabled for it +# (debugging produces a lot of output - do not enable it in production systems) +# +# You can add any number of groups you like. Only the ones found running will +# affect the charts generated. However, producing charts with hundreds of +# dimensions may slow down your web browser. +# +# The order of the entries in this list is important: the first that matches +# a process is used, so put important ones at the top. Processes not matched +# by any row, will inherit it from their parents or children. +# +# The order also controls the order of the dimensions on the generated charts +# (although applications started after apps.plugin is started, will be appended +# to the existing list of dimensions the netdata daemon maintains). + +# ----------------------------------------------------------------------------- +# NETDATA processes accounting + +# netdata main process +netdata: netdata + +# netdata known plugins +# plugins not defined here will be accumulated in netdata, above +apps.plugin: apps.plugin +freeipmi.plugin: freeipmi.plugin +charts.d.plugin: *charts.d.plugin* +node.d.plugin: *node.d.plugin* +python.d.plugin: *python.d.plugin* +tc-qos-helper: *tc-qos-helper.sh* +fping: fping + +# ----------------------------------------------------------------------------- +# authentication/authorization related servers + +auth: radius* openldap* ldap* +fail2ban: fail2ban* + +# ----------------------------------------------------------------------------- +# web/ftp servers + +httpd: apache* httpd nginx* lighttpd +proxy: squid* c-icap squidGuard varnish* +php: php* +ftpd: proftpd in.tftpd vsftpd +uwsgi: uwsgi +unicorn: *unicorn* +puma: *puma* + +# ----------------------------------------------------------------------------- +# database servers + +sql: mysqld* mariad* postgres* postmaster* oracle_* ora_* +nosql: mongod redis* memcached *couchdb* +timedb: prometheus *carbon-cache.py* *carbon-aggregator.py* *graphite/manage.py* *net.opentsdb.tools.TSDMain* + +# ----------------------------------------------------------------------------- +# email servers + +email: dovecot imapd pop3d amavis* master zmstat* zmmailboxdmgr qmgr oqmgr saslauthd opendkim clamd freshclam unbound tlsmgr postfwd2 postscreen postfix smtp* lmtp* sendmail + +# ----------------------------------------------------------------------------- +# network, routing, VPN + +ppp: ppp* +vpn: openvpn pptp* cjdroute gvpe tincd +wifi: hostapd wpa_supplicant NetworkManager +routing: ospfd* ospf6d* bgpd isisd ripd ripngd pimd ldpd zebra vtysh bird* +modem: ModemManager + +# ----------------------------------------------------------------------------- +# high availability and balancers + +camo: *camo* +balancer: ipvs_* haproxy +ha: corosync hs_logd ha_logd stonithd pacemakerd lrmd crmd + +# ----------------------------------------------------------------------------- +# telephony + +pbx: asterisk safe_asterisk *vicidial* +sip: opensips* stund + +# ----------------------------------------------------------------------------- +# chat + +chat: irssi *vines* *prosody* murmurd + +# ----------------------------------------------------------------------------- +# monitoring + +logs: ulogd* syslog* rsyslog* logrotate systemd-journald rotatelogs +nms: snmpd vnstatd smokeping zabbix* monit munin* mon openhpid watchdog tailon nrpe +splunk: splunkd +azure: mdsd *waagent* *omiserver* *omiagent* hv_kvp_daemon hv_vss_daemon *auoms* *omsagent* + +# ----------------------------------------------------------------------------- +# storage, file systems and file servers + +ceph: ceph-mds ceph-mgr ceph-mon ceph-osd radosgw* rbd-* +samba: smbd nmbd winbindd +nfs: rpcbind rpc.* nfs* +zfs: spl_* z_* txg_* zil_* arc_* l2arc* +btrfs: btrfs* +iscsi: iscsid iscsi_eh + +# ----------------------------------------------------------------------------- +# containers & virtual machines + +containers: lxc* docker* +VMs: vbox* VBox* qemu* + +# ----------------------------------------------------------------------------- +# ssh servers and clients + +ssh: ssh* scp dropbear + +# ----------------------------------------------------------------------------- +# print servers and clients + +print: cups* lpd lpq + +# ----------------------------------------------------------------------------- +# time servers and clients + +time: ntp* systemd-timesyncd chronyd + +# ----------------------------------------------------------------------------- +# dhcp servers and clients + +dhcp: *dhcp* + +# ----------------------------------------------------------------------------- +# name servers and clients + +named: named rncd dig +dnsdist: dnsdist + +# ----------------------------------------------------------------------------- +# installation / compilation / debugging + +build: cc1 cc1plus as gcc* cppcheck ld make cmake automake autoconf autoreconf +build: git gdb valgrind* + +# ----------------------------------------------------------------------------- +# antivirus + +antivirus: clam* *clam + +# ----------------------------------------------------------------------------- +# torrent clients + +torrents: *deluge* transmission* *SickBeard* *CouchPotato* *rtorrent* + +# ----------------------------------------------------------------------------- +# backup servers and clients + +backup: rsync bacula* + +# ----------------------------------------------------------------------------- +# cron + +cron: cron* atd anacron systemd-cron* + +# ----------------------------------------------------------------------------- +# UPS + +ups: upsmon upsd */nut/* + +# ----------------------------------------------------------------------------- +# media players, servers, clients + +media: mplayer vlc xine mediatomb omxplayer* kodi* xbmc* mediacenter eventlircd +media: mpd minidlnad mt-daapd avahi* Plex* + +# ----------------------------------------------------------------------------- +# java applications + +hdfsdatanode: *org.apache.hadoop.hdfs.server.datanode.DataNode* +hdfsnamenode: *org.apache.hadoop.hdfs.server.namenode.NameNode* +hdfsjournalnode: *org.apache.hadoop.hdfs.qjournal.server.JournalNode* +hdfszkfc: *org.apache.hadoop.hdfs.tools.DFSZKFailoverController* + +yarnnode: *org.apache.hadoop.yarn.server.nodemanager.NodeManager* +yarnmgr: *org.apache.hadoop.yarn.server.resourcemanager.ResourceManager* +yarnproxy: *org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer* + +sparkworker: *org.apache.spark.deploy.worker.Worker* +sparkmaster: *org.apache.spark.deploy.master.Master* + +hbaseregion: *org.apache.hadoop.hbase.regionserver.HRegionServer* +hbaserest: *org.apache.hadoop.hbase.rest.RESTServer* +hbasethrift: *org.apache.hadoop.hbase.thrift.ThriftServer* +hbasemaster: *org.apache.hadoop.hbase.master.HMaster* + +zookeeper: *org.apache.zookeeper.server.quorum.QuorumPeerMain* + +hive2: *org.apache.hive.service.server.HiveServer2* +hivemetastore: *org.apache.hadoop.hive.metastore.HiveMetaStore* + +solr: *solr.install.dir* + +airflow: *airflow* + +# ----------------------------------------------------------------------------- +# X + +X: X Xorg xinit lightdm xdm pulseaudio gkrellm xfwm4 xfdesktop xfce* Thunar +X: xfsettingsd xfconfd gnome-* gdm gconf* dconf* xfconf* *gvfs gvfs* kdm slim +X: evolution-* firefox chromium opera vivaldi-bin epiphany WebKit* +X: '*systemd --user*' chrome *chrome-sandbox* *google-chrome* *chromium* *firefox* + +# ----------------------------------------------------------------------------- +# Kernel / System + +ksmd: ksmd + +system: systemd-* udisks* udevd* *udevd connmand ipv6_addrconf dbus-* rtkit* +system: inetd xinetd mdadm polkitd acpid uuidd packagekitd upowerd colord +system: accounts-daemon rngd haveged + +kernel: kthreadd kauditd lockd khelper kdevtmpfs khungtaskd rpciod +kernel: fsnotify_mark kthrotld deferwq scsi_* + +# ----------------------------------------------------------------------------- +# other application servers + +kafka: *kafka.Kafka* + +rabbitmq: *rabbitmq* + +sidekiq: *sidekiq* +java: java +ipfs: ipfs + +node: node diff --git a/collectors/apps.plugin/apps_plugin.c b/collectors/apps.plugin/apps_plugin.c new file mode 100644 index 000000000..f592e9fc8 --- /dev/null +++ b/collectors/apps.plugin/apps_plugin.c @@ -0,0 +1,3799 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +/* + * netdata apps.plugin + * (C) Copyright 2016-2017 Costa Tsaousis + * Released under GPL v3+ + */ + +#include "../../libnetdata/libnetdata.h" + +// ---------------------------------------------------------------------------- + +// callback required by fatal() +void netdata_cleanup_and_exit(int ret) { + exit(ret); +} + +// callbacks required by popen() +void signals_block(void) {}; +void signals_unblock(void) {}; +void signals_reset(void) {}; + +// callback required by eval() +int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result) { + (void)variable; + (void)hash; + (void)rc; + (void)result; + return 0; +}; + +// required by get_system_cpus() +char *netdata_configured_host_prefix = ""; + + +// ---------------------------------------------------------------------------- +// debugging + +static int debug_enabled = 0; +static inline void debug_log_int(const char *fmt, ... ) { + va_list args; + + fprintf( stderr, "apps.plugin: "); + va_start( args, fmt ); + vfprintf( stderr, fmt, args ); + va_end( args ); + + fputc('\n', stderr); +} + +#ifdef NETDATA_INTERNAL_CHECKS + +#define debug_log(fmt, args...) do { if(unlikely(debug_enabled)) debug_log_int(fmt, ##args); } while(0) + +#else + +static inline void debug_log_dummy(void) {} +#define debug_log(fmt, args...) debug_log_dummy() + +#endif + + +// ---------------------------------------------------------------------------- + +#ifdef __FreeBSD__ +#include +#endif + +// ---------------------------------------------------------------------------- +// per O/S configuration + +// the minimum PID of the system +// this is also the pid of the init process +#define INIT_PID 1 + +// if the way apps.plugin will work, will read the entire process list, +// including the resource utilization of each process, instantly +// set this to 1 +// when set to 0, apps.plugin builds a sort list of processes, in order +// to process children processes, before parent processes +#ifdef __FreeBSD__ +#define ALL_PIDS_ARE_READ_INSTANTLY 1 +#else +#define ALL_PIDS_ARE_READ_INSTANTLY 0 +#endif + +// ---------------------------------------------------------------------------- +// string lengths + +#define MAX_COMPARE_NAME 100 +#define MAX_NAME 100 +#define MAX_CMDLINE 16384 + +// ---------------------------------------------------------------------------- +// the rates we are going to send to netdata will have this detail a value of: +// - 1 will send just integer parts to netdata +// - 100 will send 2 decimal points +// - 1000 will send 3 decimal points +// etc. +#define RATES_DETAIL 10000ULL + + +// ---------------------------------------------------------------------------- +// to avoid reallocating too frequently, we can increase the number of spare +// file descriptors used by processes. +// IMPORTANT: +// having a lot of spares, increases the CPU utilization of the plugin. +#define MAX_SPARE_FDS 1 + + +// ---------------------------------------------------------------------------- +// command line options + +static int + update_every = 1, + enable_guest_charts = 0, +#ifdef __FreeBSD__ + enable_file_charts = 0, +#else + enable_file_charts = 1, + max_fds_cache_seconds = 60, +#endif + enable_users_charts = 1, + enable_groups_charts = 1, + include_exited_childs = 1; + +// will be changed to getenv(NETDATA_USER_CONFIG_DIR) if it exists +static char *user_config_dir = CONFIG_DIR; +static char *stock_config_dir = LIBCONFIG_DIR; + +// ---------------------------------------------------------------------------- +// internal flags +// handled in code (automatically set) + +static int + show_guest_time = 0, // 1 when guest values are collected + show_guest_time_old = 0, + proc_pid_cmdline_is_needed = 0; // 1 when we need to read /proc/cmdline + + +// ---------------------------------------------------------------------------- +// internal counters + +static size_t + global_iterations_counter = 1, + calls_counter = 0, + file_counter = 0, + filenames_allocated_counter = 0, + inodes_changed_counter = 0, + links_changed_counter = 0, + targets_assignment_counter = 0; + + +// ---------------------------------------------------------------------------- +// Normalization +// +// With normalization we lower the collected metrics by a factor to make them +// match the total utilization of the system. +// The discrepancy exists because apps.plugin needs some time to collect all +// the metrics. This results in utilization that exceeds the total utilization +// of the system. +// +// With normalization we align the per-process utilization, to the total of +// the system. We first consume the exited children utilization and it the +// collected values is above the total, we proportionally scale each reported +// metric. + +// the total system time, as reported by /proc/stat +#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) +static kernel_uint_t + global_utime = 0, + global_stime = 0, + global_gtime = 0; +#endif + +// the normalization ratios, as calculated by normalize_utilization() +double utime_fix_ratio = 1.0, + stime_fix_ratio = 1.0, + gtime_fix_ratio = 1.0, + minflt_fix_ratio = 1.0, + majflt_fix_ratio = 1.0, + cutime_fix_ratio = 1.0, + cstime_fix_ratio = 1.0, + cgtime_fix_ratio = 1.0, + cminflt_fix_ratio = 1.0, + cmajflt_fix_ratio = 1.0; + +// ---------------------------------------------------------------------------- +// target +// +// target is the structure that processes are aggregated to be reported +// to netdata. +// +// - Each entry in /etc/apps_groups.conf creates a target. +// - Each user and group used by a process in the system, creates a target. + +struct target { + char compare[MAX_COMPARE_NAME + 1]; + uint32_t comparehash; + size_t comparelen; + + char id[MAX_NAME + 1]; + uint32_t idhash; + + char name[MAX_NAME + 1]; + + uid_t uid; + gid_t gid; + + kernel_uint_t minflt; + kernel_uint_t cminflt; + kernel_uint_t majflt; + kernel_uint_t cmajflt; + kernel_uint_t utime; + kernel_uint_t stime; + kernel_uint_t gtime; + kernel_uint_t cutime; + kernel_uint_t cstime; + kernel_uint_t cgtime; + kernel_uint_t num_threads; + // kernel_uint_t rss; + + kernel_uint_t status_vmsize; + kernel_uint_t status_vmrss; + kernel_uint_t status_vmshared; + kernel_uint_t status_rssfile; + kernel_uint_t status_rssshmem; + kernel_uint_t status_vmswap; + + kernel_uint_t io_logical_bytes_read; + kernel_uint_t io_logical_bytes_written; + // kernel_uint_t io_read_calls; + // kernel_uint_t io_write_calls; + kernel_uint_t io_storage_bytes_read; + kernel_uint_t io_storage_bytes_written; + // kernel_uint_t io_cancelled_write_bytes; + + int *target_fds; + int target_fds_size; + + kernel_uint_t openfiles; + kernel_uint_t openpipes; + kernel_uint_t opensockets; + kernel_uint_t openinotifies; + kernel_uint_t openeventfds; + kernel_uint_t opentimerfds; + kernel_uint_t opensignalfds; + kernel_uint_t openeventpolls; + kernel_uint_t openother; + + unsigned int processes; // how many processes have been merged to this + int exposed; // if set, we have sent this to netdata + int hidden; // if set, we set the hidden flag on the dimension + int debug_enabled; + int ends_with; + int starts_with; // if set, the compare string matches only the + // beginning of the command + + struct target *target; // the one that will be reported to netdata + struct target *next; +}; + +struct target + *apps_groups_default_target = NULL, // the default target + *apps_groups_root_target = NULL, // apps_groups.conf defined + *users_root_target = NULL, // users + *groups_root_target = NULL; // user groups + +size_t + apps_groups_targets_count = 0; // # of apps_groups.conf targets + + +// ---------------------------------------------------------------------------- +// pid_stat +// +// structure to store data for each process running +// see: man proc for the description of the fields + +struct pid_fd { + int fd; + +#ifndef __FreeBSD__ + ino_t inode; + char *filename; + uint32_t link_hash; + size_t cache_iterations_counter; + size_t cache_iterations_reset; +#endif +}; + +struct pid_stat { + int32_t pid; + char comm[MAX_COMPARE_NAME + 1]; + char *cmdline; + + uint32_t log_thrown; + + // char state; + int32_t ppid; + // int32_t pgrp; + // int32_t session; + // int32_t tty_nr; + // int32_t tpgid; + // uint64_t flags; + + // these are raw values collected + kernel_uint_t minflt_raw; + kernel_uint_t cminflt_raw; + kernel_uint_t majflt_raw; + kernel_uint_t cmajflt_raw; + kernel_uint_t utime_raw; + kernel_uint_t stime_raw; + kernel_uint_t gtime_raw; // guest_time + kernel_uint_t cutime_raw; + kernel_uint_t cstime_raw; + kernel_uint_t cgtime_raw; // cguest_time + + // these are rates + kernel_uint_t minflt; + kernel_uint_t cminflt; + kernel_uint_t majflt; + kernel_uint_t cmajflt; + kernel_uint_t utime; + kernel_uint_t stime; + kernel_uint_t gtime; + kernel_uint_t cutime; + kernel_uint_t cstime; + kernel_uint_t cgtime; + + // int64_t priority; + // int64_t nice; + int32_t num_threads; + // int64_t itrealvalue; + // kernel_uint_t starttime; + // kernel_uint_t vsize; + // kernel_uint_t rss; + // kernel_uint_t rsslim; + // kernel_uint_t starcode; + // kernel_uint_t endcode; + // kernel_uint_t startstack; + // kernel_uint_t kstkesp; + // kernel_uint_t kstkeip; + // uint64_t signal; + // uint64_t blocked; + // uint64_t sigignore; + // uint64_t sigcatch; + // uint64_t wchan; + // uint64_t nswap; + // uint64_t cnswap; + // int32_t exit_signal; + // int32_t processor; + // uint32_t rt_priority; + // uint32_t policy; + // kernel_uint_t delayacct_blkio_ticks; + + uid_t uid; + gid_t gid; + + kernel_uint_t status_vmsize; + kernel_uint_t status_vmrss; + kernel_uint_t status_vmshared; + kernel_uint_t status_rssfile; + kernel_uint_t status_rssshmem; + kernel_uint_t status_vmswap; +#ifndef __FreeBSD__ + ARL_BASE *status_arl; +#endif + + kernel_uint_t io_logical_bytes_read_raw; + kernel_uint_t io_logical_bytes_written_raw; + // kernel_uint_t io_read_calls_raw; + // kernel_uint_t io_write_calls_raw; + kernel_uint_t io_storage_bytes_read_raw; + kernel_uint_t io_storage_bytes_written_raw; + // kernel_uint_t io_cancelled_write_bytes_raw; + + kernel_uint_t io_logical_bytes_read; + kernel_uint_t io_logical_bytes_written; + // kernel_uint_t io_read_calls; + // kernel_uint_t io_write_calls; + kernel_uint_t io_storage_bytes_read; + kernel_uint_t io_storage_bytes_written; + // kernel_uint_t io_cancelled_write_bytes; + + struct pid_fd *fds; // array of fds it uses + size_t fds_size; // the size of the fds array + + int children_count; // number of processes directly referencing this + unsigned char keep:1; // 1 when we need to keep this process in memory even after it exited + int keeploops; // increases by 1 every time keep is 1 and updated 0 + unsigned char updated:1; // 1 when the process is currently running + unsigned char merged:1; // 1 when it has been merged to its parent + unsigned char read:1; // 1 when we have already read this process for this iteration + + int sortlist; // higher numbers = top on the process tree + // each process gets a unique number + + struct target *target; // app_groups.conf targets + struct target *user_target; // uid based targets + struct target *group_target; // gid based targets + + usec_t stat_collected_usec; + usec_t last_stat_collected_usec; + + usec_t io_collected_usec; + usec_t last_io_collected_usec; + + char *fds_dirname; // the full directory name in /proc/PID/fd + + char *stat_filename; + char *status_filename; + char *io_filename; + char *cmdline_filename; + + struct pid_stat *parent; + struct pid_stat *prev; + struct pid_stat *next; +}; + +size_t pagesize; + +// log each problem once per process +// log flood protection flags (log_thrown) +#define PID_LOG_IO 0x00000001 +#define PID_LOG_STATUS 0x00000002 +#define PID_LOG_CMDLINE 0x00000004 +#define PID_LOG_FDS 0x00000008 +#define PID_LOG_STAT 0x00000010 + +static struct pid_stat + *root_of_pids = NULL, // global list of all processes running + **all_pids = NULL; // to avoid allocations, we pre-allocate the + // the entire pid space. + +static size_t + all_pids_count = 0; // the number of processes running + +#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) +// Another pre-allocated list of all possible pids. +// We need it to pids and assign them a unique sortlist id, so that we +// read parents before children. This is needed to prevent a situation where +// a child is found running, but until we read its parent, it has exited and +// its parent has accumulated its resources. +static pid_t + *all_pids_sortlist = NULL; +#endif + +// ---------------------------------------------------------------------------- +// file descriptor +// +// this is used to keep a global list of all open files of the system. +// it is needed in order to calculate the unique files processes have open. + +#define FILE_DESCRIPTORS_INCREASE_STEP 100 + +// types for struct file_descriptor->type +typedef enum fd_filetype { + FILETYPE_OTHER, + FILETYPE_FILE, + FILETYPE_PIPE, + FILETYPE_SOCKET, + FILETYPE_INOTIFY, + FILETYPE_EVENTFD, + FILETYPE_EVENTPOLL, + FILETYPE_TIMERFD, + FILETYPE_SIGNALFD +} FD_FILETYPE; + +struct file_descriptor { + avl avl; + +#ifdef NETDATA_INTERNAL_CHECKS + uint32_t magic; +#endif /* NETDATA_INTERNAL_CHECKS */ + + const char *name; + uint32_t hash; + + FD_FILETYPE type; + int count; + int pos; +} *all_files = NULL; + +static int + all_files_len = 0, + all_files_size = 0; + +// ---------------------------------------------------------------------------- +// apps_groups.conf +// aggregate all processes in groups, to have a limited number of dimensions + +static struct target *get_users_target(uid_t uid) { + struct target *w; + for(w = users_root_target ; w ; w = w->next) + if(w->uid == uid) return w; + + w = callocz(sizeof(struct target), 1); + snprintfz(w->compare, MAX_COMPARE_NAME, "%u", uid); + w->comparehash = simple_hash(w->compare); + w->comparelen = strlen(w->compare); + + snprintfz(w->id, MAX_NAME, "%u", uid); + w->idhash = simple_hash(w->id); + + struct passwd *pw = getpwuid(uid); + if(!pw || !pw->pw_name || !*pw->pw_name) + snprintfz(w->name, MAX_NAME, "%u", uid); + else + snprintfz(w->name, MAX_NAME, "%s", pw->pw_name); + + netdata_fix_chart_name(w->name); + + w->uid = uid; + + w->next = users_root_target; + users_root_target = w; + + debug_log("added uid %u ('%s') target", w->uid, w->name); + + return w; +} + +struct target *get_groups_target(gid_t gid) +{ + struct target *w; + for(w = groups_root_target ; w ; w = w->next) + if(w->gid == gid) return w; + + w = callocz(sizeof(struct target), 1); + snprintfz(w->compare, MAX_COMPARE_NAME, "%u", gid); + w->comparehash = simple_hash(w->compare); + w->comparelen = strlen(w->compare); + + snprintfz(w->id, MAX_NAME, "%u", gid); + w->idhash = simple_hash(w->id); + + struct group *gr = getgrgid(gid); + if(!gr || !gr->gr_name || !*gr->gr_name) + snprintfz(w->name, MAX_NAME, "%u", gid); + else + snprintfz(w->name, MAX_NAME, "%s", gr->gr_name); + + netdata_fix_chart_name(w->name); + + w->gid = gid; + + w->next = groups_root_target; + groups_root_target = w; + + debug_log("added gid %u ('%s') target", w->gid, w->name); + + return w; +} + +// find or create a new target +// there are targets that are just aggregated to other target (the second argument) +static struct target *get_apps_groups_target(const char *id, struct target *target, const char *name) { + int tdebug = 0, thidden = target?target->hidden:0, ends_with = 0; + const char *nid = id; + + // extract the options + while(nid[0] == '-' || nid[0] == '+' || nid[0] == '*') { + if(nid[0] == '-') thidden = 1; + if(nid[0] == '+') tdebug = 1; + if(nid[0] == '*') ends_with = 1; + nid++; + } + uint32_t hash = simple_hash(id); + + // find if it already exists + struct target *w, *last = apps_groups_root_target; + for(w = apps_groups_root_target ; w ; w = w->next) { + if(w->idhash == hash && strncmp(nid, w->id, MAX_NAME) == 0) + return w; + + last = w; + } + + // find an existing target + if(unlikely(!target)) { + while(*name == '-') { + if(*name == '-') thidden = 1; + name++; + } + + for(target = apps_groups_root_target ; target != NULL ; target = target->next) { + if(!target->target && strcmp(name, target->name) == 0) + break; + } + + if(unlikely(debug_enabled)) { + if(unlikely(target)) + debug_log("REUSING TARGET NAME '%s' on ID '%s'", target->name, target->id); + else + debug_log("NEW TARGET NAME '%s' on ID '%s'", name, id); + } + } + + if(target && target->target) + fatal("Internal Error: request to link process '%s' to target '%s' which is linked to target '%s'", id, target->id, target->target->id); + + w = callocz(sizeof(struct target), 1); + strncpyz(w->id, nid, MAX_NAME); + w->idhash = simple_hash(w->id); + + if(unlikely(!target)) + // copy the name + strncpyz(w->name, name, MAX_NAME); + else + // copy the id + strncpyz(w->name, nid, MAX_NAME); + + strncpyz(w->compare, nid, MAX_COMPARE_NAME); + size_t len = strlen(w->compare); + if(w->compare[len - 1] == '*') { + w->compare[len - 1] = '\0'; + w->starts_with = 1; + } + w->ends_with = ends_with; + + if(w->starts_with && w->ends_with) + proc_pid_cmdline_is_needed = 1; + + w->comparehash = simple_hash(w->compare); + w->comparelen = strlen(w->compare); + + w->hidden = thidden; +#ifdef NETDATA_INTERNAL_CHECKS + w->debug_enabled = tdebug; +#else + if(tdebug) + fprintf(stderr, "apps.plugin has been compiled without debugging\n"); +#endif + w->target = target; + + // append it, to maintain the order in apps_groups.conf + if(last) last->next = w; + else apps_groups_root_target = w; + + debug_log("ADDING TARGET ID '%s', process name '%s' (%s), aggregated on target '%s', options: %s %s" + , w->id + , w->compare, (w->starts_with && w->ends_with)?"substring":((w->starts_with)?"prefix":((w->ends_with)?"suffix":"exact")) + , w->target?w->target->name:w->name + , (w->hidden)?"hidden":"-" + , (w->debug_enabled)?"debug":"-" + ); + + return w; +} + +// read the apps_groups.conf file +static int read_apps_groups_conf(const char *path, const char *file) +{ + char filename[FILENAME_MAX + 1]; + + snprintfz(filename, FILENAME_MAX, "%s/apps_%s.conf", path, file); + + debug_log("process groups file: '%s'", filename); + + // ---------------------------------------- + + procfile *ff = procfile_open(filename, " :\t", PROCFILE_FLAG_DEFAULT); + if(!ff) return 1; + + procfile_set_quotes(ff, "'\""); + + ff = procfile_readall(ff); + if(!ff) + return 1; + + size_t line, lines = procfile_lines(ff); + + for(line = 0; line < lines ;line++) { + size_t word, words = procfile_linewords(ff, line); + if(!words) continue; + + char *name = procfile_lineword(ff, line, 0); + if(!name || !*name) continue; + + // find a possibly existing target + struct target *w = NULL; + + // loop through all words, skipping the first one (the name) + for(word = 0; word < words ;word++) { + char *s = procfile_lineword(ff, line, word); + if(!s || !*s) continue; + if(*s == '#') break; + + // is this the first word? skip it + if(s == name) continue; + + // add this target + struct target *n = get_apps_groups_target(s, w, name); + if(!n) { + error("Cannot create target '%s' (line %zu, word %zu)", s, line, word); + continue; + } + + // just some optimization + // to avoid searching for a target for each process + if(!w) w = n->target?n->target:n; + } + } + + procfile_close(ff); + + apps_groups_default_target = get_apps_groups_target("p+!o@w#e$i^r&7*5(-i)l-o_", NULL, "other"); // match nothing + if(!apps_groups_default_target) + fatal("Cannot create default target"); + + // allow the user to override group 'other' + if(apps_groups_default_target->target) + apps_groups_default_target = apps_groups_default_target->target; + + return 0; +} + + +// ---------------------------------------------------------------------------- +// struct pid_stat management +static inline void init_pid_fds(struct pid_stat *p, size_t first, size_t size); + +static inline struct pid_stat *get_pid_entry(pid_t pid) { + if(unlikely(all_pids[pid])) + return all_pids[pid]; + + struct pid_stat *p = callocz(sizeof(struct pid_stat), 1); + p->fds = mallocz(sizeof(struct pid_fd) * MAX_SPARE_FDS); + p->fds_size = MAX_SPARE_FDS; + init_pid_fds(p, 0, p->fds_size); + + if(likely(root_of_pids)) + root_of_pids->prev = p; + + p->next = root_of_pids; + root_of_pids = p; + + p->pid = pid; + + all_pids[pid] = p; + all_pids_count++; + + return p; +} + +static inline void del_pid_entry(pid_t pid) { + struct pid_stat *p = all_pids[pid]; + + if(unlikely(!p)) { + error("attempted to free pid %d that is not allocated.", pid); + return; + } + + debug_log("process %d %s exited, deleting it.", pid, p->comm); + + if(root_of_pids == p) + root_of_pids = p->next; + + if(p->next) p->next->prev = p->prev; + if(p->prev) p->prev->next = p->next; + + // free the filename +#ifndef __FreeBSD__ + { + size_t i; + for(i = 0; i < p->fds_size; i++) + if(p->fds[i].filename) + freez(p->fds[i].filename); + } +#endif + freez(p->fds); + + freez(p->fds_dirname); + freez(p->stat_filename); + freez(p->status_filename); +#ifndef __FreeBSD__ + arl_free(p->status_arl); +#endif + freez(p->io_filename); + freez(p->cmdline_filename); + freez(p->cmdline); + freez(p); + + all_pids[pid] = NULL; + all_pids_count--; +} + +// ---------------------------------------------------------------------------- + +static inline int managed_log(struct pid_stat *p, uint32_t log, int status) { + if(unlikely(!status)) { + // error("command failed log %u, errno %d", log, errno); + + if(unlikely(debug_enabled || errno != ENOENT)) { + if(unlikely(debug_enabled || !(p->log_thrown & log))) { + p->log_thrown |= log; + switch(log) { + case PID_LOG_IO: + #ifdef __FreeBSD__ + error("Cannot fetch process %d I/O info (command '%s')", p->pid, p->comm); + #else + error("Cannot process %s/proc/%d/io (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); + #endif + break; + + case PID_LOG_STATUS: + #ifdef __FreeBSD__ + error("Cannot fetch process %d status info (command '%s')", p->pid, p->comm); + #else + error("Cannot process %s/proc/%d/status (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); + #endif + break; + + case PID_LOG_CMDLINE: + #ifdef __FreeBSD__ + error("Cannot fetch process %d command line (command '%s')", p->pid, p->comm); + #else + error("Cannot process %s/proc/%d/cmdline (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); + #endif + break; + + case PID_LOG_FDS: + #ifdef __FreeBSD__ + error("Cannot fetch process %d files (command '%s')", p->pid, p->comm); + #else + error("Cannot process entries in %s/proc/%d/fd (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); + #endif + break; + + case PID_LOG_STAT: + break; + + default: + error("unhandled error for pid %d, command '%s'", p->pid, p->comm); + break; + } + } + } + errno = 0; + } + else if(unlikely(p->log_thrown & log)) { + // error("unsetting log %u on pid %d", log, p->pid); + p->log_thrown &= ~log; + } + + return status; +} + +static inline void assign_target_to_pid(struct pid_stat *p) { + targets_assignment_counter++; + + uint32_t hash = simple_hash(p->comm); + size_t pclen = strlen(p->comm); + + struct target *w; + for(w = apps_groups_root_target; w ; w = w->next) { + // if(debug_enabled || (p->target && p->target->debug_enabled)) debug_log_int("\t\tcomparing '%s' with '%s'", w->compare, p->comm); + + // find it - 4 cases: + // 1. the target is not a pattern + // 2. the target has the prefix + // 3. the target has the suffix + // 4. the target is something inside cmdline + + if(unlikely(( (!w->starts_with && !w->ends_with && w->comparehash == hash && !strcmp(w->compare, p->comm)) + || (w->starts_with && !w->ends_with && !strncmp(w->compare, p->comm, w->comparelen)) + || (!w->starts_with && w->ends_with && pclen >= w->comparelen && !strcmp(w->compare, &p->comm[pclen - w->comparelen])) + || (proc_pid_cmdline_is_needed && w->starts_with && w->ends_with && p->cmdline && strstr(p->cmdline, w->compare)) + ))) { + + if(w->target) p->target = w->target; + else p->target = w; + + if(debug_enabled || (p->target && p->target->debug_enabled)) + debug_log_int("%s linked to target %s", p->comm, p->target->name); + + break; + } + } +} + + +// ---------------------------------------------------------------------------- +// update pids from proc + +static inline int read_proc_pid_cmdline(struct pid_stat *p) { + static char cmdline[MAX_CMDLINE + 1]; + +#ifdef __FreeBSD__ + size_t i, bytes = MAX_CMDLINE; + int mib[4]; + + mib[0] = CTL_KERN; + mib[1] = KERN_PROC; + mib[2] = KERN_PROC_ARGS; + mib[3] = p->pid; + if (unlikely(sysctl(mib, 4, cmdline, &bytes, NULL, 0))) + goto cleanup; +#else + if(unlikely(!p->cmdline_filename)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/proc/%d/cmdline", netdata_configured_host_prefix, p->pid); + p->cmdline_filename = strdupz(filename); + } + + int fd = open(p->cmdline_filename, procfile_open_flags, 0666); + if(unlikely(fd == -1)) goto cleanup; + + ssize_t i, bytes = read(fd, cmdline, MAX_CMDLINE); + close(fd); + + if(unlikely(bytes < 0)) goto cleanup; +#endif + + cmdline[bytes] = '\0'; + for(i = 0; i < bytes ; i++) { + if(unlikely(!cmdline[i])) cmdline[i] = ' '; + } + + if(p->cmdline) freez(p->cmdline); + p->cmdline = strdupz(cmdline); + + debug_log("Read file '%s' contents: %s", p->cmdline_filename, p->cmdline); + + return 1; + +cleanup: + // copy the command to the command line + if(p->cmdline) freez(p->cmdline); + p->cmdline = strdupz(p->comm); + return 0; +} + +// ---------------------------------------------------------------------------- +// macro to calculate the incremental rate of a value +// each parameter is accessed only ONCE - so it is safe to pass function calls +// or other macros as parameters + +#define incremental_rate(rate_variable, last_kernel_variable, new_kernel_value, collected_usec, last_collected_usec) { \ + kernel_uint_t _new_tmp = new_kernel_value; \ + (rate_variable) = (_new_tmp - (last_kernel_variable)) * (USEC_PER_SEC * RATES_DETAIL) / ((collected_usec) - (last_collected_usec)); \ + (last_kernel_variable) = _new_tmp; \ + } + +// the same macro for struct pid members +#define pid_incremental_rate(type, var, value) \ + incremental_rate(var, var##_raw, value, p->type##_collected_usec, p->last_##type##_collected_usec) + + +// ---------------------------------------------------------------------------- + +#ifndef __FreeBSD__ +struct arl_callback_ptr { + struct pid_stat *p; + procfile *ff; + size_t line; +}; + +void arl_callback_status_uid(const char *name, uint32_t hash, const char *value, void *dst) { + (void)name; (void)hash; (void)value; + struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; + if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 5)) return; + + //const char *real_uid = procfile_lineword(aptr->ff, aptr->line, 1); + const char *effective_uid = procfile_lineword(aptr->ff, aptr->line, 2); + //const char *saved_uid = procfile_lineword(aptr->ff, aptr->line, 3); + //const char *filesystem_uid = procfile_lineword(aptr->ff, aptr->line, 4); + + if(likely(effective_uid && *effective_uid)) + aptr->p->uid = (uid_t)str2l(effective_uid); +} + +void arl_callback_status_gid(const char *name, uint32_t hash, const char *value, void *dst) { + (void)name; (void)hash; (void)value; + struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; + if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 5)) return; + + //const char *real_gid = procfile_lineword(aptr->ff, aptr->line, 1); + const char *effective_gid = procfile_lineword(aptr->ff, aptr->line, 2); + //const char *saved_gid = procfile_lineword(aptr->ff, aptr->line, 3); + //const char *filesystem_gid = procfile_lineword(aptr->ff, aptr->line, 4); + + if(likely(effective_gid && *effective_gid)) + aptr->p->gid = (uid_t)str2l(effective_gid); +} + +void arl_callback_status_vmsize(const char *name, uint32_t hash, const char *value, void *dst) { + (void)name; (void)hash; (void)value; + struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; + if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return; + + aptr->p->status_vmsize = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)); +} + +void arl_callback_status_vmswap(const char *name, uint32_t hash, const char *value, void *dst) { + (void)name; (void)hash; (void)value; + struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; + if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return; + + aptr->p->status_vmswap = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)); +} + +void arl_callback_status_vmrss(const char *name, uint32_t hash, const char *value, void *dst) { + (void)name; (void)hash; (void)value; + struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; + if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return; + + aptr->p->status_vmrss = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)); +} + +void arl_callback_status_rssfile(const char *name, uint32_t hash, const char *value, void *dst) { + (void)name; (void)hash; (void)value; + struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; + if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return; + + aptr->p->status_rssfile = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)); +} + +void arl_callback_status_rssshmem(const char *name, uint32_t hash, const char *value, void *dst) { + (void)name; (void)hash; (void)value; + struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; + if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return; + + aptr->p->status_rssshmem = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)); +} +#endif // !__FreeBSD__ + +static inline int read_proc_pid_status(struct pid_stat *p, void *ptr) { + p->status_vmsize = 0; + p->status_vmrss = 0; + p->status_vmshared = 0; + p->status_rssfile = 0; + p->status_rssshmem = 0; + p->status_vmswap = 0; + +#ifdef __FreeBSD__ + struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr; + + p->uid = proc_info->ki_uid; + p->gid = proc_info->ki_groups[0]; + p->status_vmsize = proc_info->ki_size / 1024; // in kB + p->status_vmrss = proc_info->ki_rssize * pagesize / 1024; // in kB + // TODO: what about shared and swap memory on FreeBSD? + return 1; +#else + (void)ptr; + + static struct arl_callback_ptr arl_ptr; + static procfile *ff = NULL; + + if(unlikely(!p->status_arl)) { + p->status_arl = arl_create("/proc/pid/status", NULL, 60); + arl_expect_custom(p->status_arl, "Uid", arl_callback_status_uid, &arl_ptr); + arl_expect_custom(p->status_arl, "Gid", arl_callback_status_gid, &arl_ptr); + arl_expect_custom(p->status_arl, "VmSize", arl_callback_status_vmsize, &arl_ptr); + arl_expect_custom(p->status_arl, "VmRSS", arl_callback_status_vmrss, &arl_ptr); + arl_expect_custom(p->status_arl, "RssFile", arl_callback_status_rssfile, &arl_ptr); + arl_expect_custom(p->status_arl, "RssShmem", arl_callback_status_rssshmem, &arl_ptr); + arl_expect_custom(p->status_arl, "VmSwap", arl_callback_status_vmswap, &arl_ptr); + } + + if(unlikely(!p->status_filename)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/proc/%d/status", netdata_configured_host_prefix, p->pid); + p->status_filename = strdupz(filename); + } + + ff = procfile_reopen(ff, p->status_filename, (!ff)?" \t:,-()/":NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO); + if(unlikely(!ff)) return 0; + + ff = procfile_readall(ff); + if(unlikely(!ff)) return 0; + + calls_counter++; + + // let ARL use this pid + arl_ptr.p = p; + arl_ptr.ff = ff; + + size_t lines = procfile_lines(ff), l; + arl_begin(p->status_arl); + + for(l = 0; l < lines ;l++) { + // debug_log("CHECK: line %zu of %zu, key '%s' = '%s'", l, lines, procfile_lineword(ff, l, 0), procfile_lineword(ff, l, 1)); + arl_ptr.line = l; + if(unlikely(arl_check(p->status_arl, + procfile_lineword(ff, l, 0), + procfile_lineword(ff, l, 1)))) break; + } + + p->status_vmshared = p->status_rssfile + p->status_rssshmem; + + // debug_log("%s uid %d, gid %d, VmSize %zu, VmRSS %zu, RssFile %zu, RssShmem %zu, shared %zu", p->comm, (int)p->uid, (int)p->gid, p->status_vmsize, p->status_vmrss, p->status_rssfile, p->status_rssshmem, p->status_vmshared); + + return 1; +#endif +} + + +// ---------------------------------------------------------------------------- + +static inline int read_proc_pid_stat(struct pid_stat *p, void *ptr) { + (void)ptr; + +#ifdef __FreeBSD__ + struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr; + + if (unlikely(proc_info->ki_tdflags & TDF_IDLETD)) + goto cleanup; +#else + static procfile *ff = NULL; + + if(unlikely(!p->stat_filename)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/proc/%d/stat", netdata_configured_host_prefix, p->pid); + p->stat_filename = strdupz(filename); + } + + int set_quotes = (!ff)?1:0; + + ff = procfile_reopen(ff, p->stat_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO); + if(unlikely(!ff)) goto cleanup; + + // if(set_quotes) procfile_set_quotes(ff, "()"); + if(unlikely(set_quotes)) + procfile_set_open_close(ff, "(", ")"); + + ff = procfile_readall(ff); + if(unlikely(!ff)) goto cleanup; +#endif + + p->last_stat_collected_usec = p->stat_collected_usec; + p->stat_collected_usec = now_monotonic_usec(); + calls_counter++; + +#ifdef __FreeBSD__ + char *comm = proc_info->ki_comm; + p->ppid = proc_info->ki_ppid; +#else + // p->pid = str2pid_t(procfile_lineword(ff, 0, 0)); + char *comm = procfile_lineword(ff, 0, 1); + // p->state = *(procfile_lineword(ff, 0, 2)); + p->ppid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 3)); + // p->pgrp = (int32_t)str2pid_t(procfile_lineword(ff, 0, 4)); + // p->session = (int32_t)str2pid_t(procfile_lineword(ff, 0, 5)); + // p->tty_nr = (int32_t)str2pid_t(procfile_lineword(ff, 0, 6)); + // p->tpgid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 7)); + // p->flags = str2uint64_t(procfile_lineword(ff, 0, 8)); +#endif + + if(strcmp(p->comm, comm) != 0) { + if(unlikely(debug_enabled)) { + if(p->comm[0]) + debug_log("\tpid %d (%s) changed name to '%s'", p->pid, p->comm, comm); + else + debug_log("\tJust added %d (%s)", p->pid, comm); + } + + strncpyz(p->comm, comm, MAX_COMPARE_NAME); + + // /proc//cmdline + if(likely(proc_pid_cmdline_is_needed)) + managed_log(p, PID_LOG_CMDLINE, read_proc_pid_cmdline(p)); + + assign_target_to_pid(p); + } + +#ifdef __FreeBSD__ + pid_incremental_rate(stat, p->minflt, (kernel_uint_t)proc_info->ki_rusage.ru_minflt); + pid_incremental_rate(stat, p->cminflt, (kernel_uint_t)proc_info->ki_rusage_ch.ru_minflt); + pid_incremental_rate(stat, p->majflt, (kernel_uint_t)proc_info->ki_rusage.ru_majflt); + pid_incremental_rate(stat, p->cmajflt, (kernel_uint_t)proc_info->ki_rusage_ch.ru_majflt); + pid_incremental_rate(stat, p->utime, (kernel_uint_t)proc_info->ki_rusage.ru_utime.tv_sec * 100 + proc_info->ki_rusage.ru_utime.tv_usec / 10000); + pid_incremental_rate(stat, p->stime, (kernel_uint_t)proc_info->ki_rusage.ru_stime.tv_sec * 100 + proc_info->ki_rusage.ru_stime.tv_usec / 10000); + pid_incremental_rate(stat, p->cutime, (kernel_uint_t)proc_info->ki_rusage_ch.ru_utime.tv_sec * 100 + proc_info->ki_rusage_ch.ru_utime.tv_usec / 10000); + pid_incremental_rate(stat, p->cstime, (kernel_uint_t)proc_info->ki_rusage_ch.ru_stime.tv_sec * 100 + proc_info->ki_rusage_ch.ru_stime.tv_usec / 10000); + + p->num_threads = proc_info->ki_numthreads; + + if(enable_guest_charts) { + enable_guest_charts = 0; + info("Guest charts aren't supported by FreeBSD"); + } +#else + pid_incremental_rate(stat, p->minflt, str2kernel_uint_t(procfile_lineword(ff, 0, 9))); + pid_incremental_rate(stat, p->cminflt, str2kernel_uint_t(procfile_lineword(ff, 0, 10))); + pid_incremental_rate(stat, p->majflt, str2kernel_uint_t(procfile_lineword(ff, 0, 11))); + pid_incremental_rate(stat, p->cmajflt, str2kernel_uint_t(procfile_lineword(ff, 0, 12))); + pid_incremental_rate(stat, p->utime, str2kernel_uint_t(procfile_lineword(ff, 0, 13))); + pid_incremental_rate(stat, p->stime, str2kernel_uint_t(procfile_lineword(ff, 0, 14))); + pid_incremental_rate(stat, p->cutime, str2kernel_uint_t(procfile_lineword(ff, 0, 15))); + pid_incremental_rate(stat, p->cstime, str2kernel_uint_t(procfile_lineword(ff, 0, 16))); + // p->priority = str2kernel_uint_t(procfile_lineword(ff, 0, 17)); + // p->nice = str2kernel_uint_t(procfile_lineword(ff, 0, 18)); + p->num_threads = (int32_t)str2uint32_t(procfile_lineword(ff, 0, 19)); + // p->itrealvalue = str2kernel_uint_t(procfile_lineword(ff, 0, 20)); + // p->starttime = str2kernel_uint_t(procfile_lineword(ff, 0, 21)); + // p->vsize = str2kernel_uint_t(procfile_lineword(ff, 0, 22)); + // p->rss = str2kernel_uint_t(procfile_lineword(ff, 0, 23)); + // p->rsslim = str2kernel_uint_t(procfile_lineword(ff, 0, 24)); + // p->starcode = str2kernel_uint_t(procfile_lineword(ff, 0, 25)); + // p->endcode = str2kernel_uint_t(procfile_lineword(ff, 0, 26)); + // p->startstack = str2kernel_uint_t(procfile_lineword(ff, 0, 27)); + // p->kstkesp = str2kernel_uint_t(procfile_lineword(ff, 0, 28)); + // p->kstkeip = str2kernel_uint_t(procfile_lineword(ff, 0, 29)); + // p->signal = str2kernel_uint_t(procfile_lineword(ff, 0, 30)); + // p->blocked = str2kernel_uint_t(procfile_lineword(ff, 0, 31)); + // p->sigignore = str2kernel_uint_t(procfile_lineword(ff, 0, 32)); + // p->sigcatch = str2kernel_uint_t(procfile_lineword(ff, 0, 33)); + // p->wchan = str2kernel_uint_t(procfile_lineword(ff, 0, 34)); + // p->nswap = str2kernel_uint_t(procfile_lineword(ff, 0, 35)); + // p->cnswap = str2kernel_uint_t(procfile_lineword(ff, 0, 36)); + // p->exit_signal = str2kernel_uint_t(procfile_lineword(ff, 0, 37)); + // p->processor = str2kernel_uint_t(procfile_lineword(ff, 0, 38)); + // p->rt_priority = str2kernel_uint_t(procfile_lineword(ff, 0, 39)); + // p->policy = str2kernel_uint_t(procfile_lineword(ff, 0, 40)); + // p->delayacct_blkio_ticks = str2kernel_uint_t(procfile_lineword(ff, 0, 41)); + + if(enable_guest_charts) { + + pid_incremental_rate(stat, p->gtime, str2kernel_uint_t(procfile_lineword(ff, 0, 42))); + pid_incremental_rate(stat, p->cgtime, str2kernel_uint_t(procfile_lineword(ff, 0, 43))); + + if (show_guest_time || p->gtime || p->cgtime) { + p->utime -= (p->utime >= p->gtime) ? p->gtime : p->utime; + p->cutime -= (p->cutime >= p->cgtime) ? p->cgtime : p->cutime; + show_guest_time = 1; + } + } +#endif + + if(unlikely(debug_enabled || (p->target && p->target->debug_enabled))) + debug_log_int("READ PROC/PID/STAT: %s/proc/%d/stat, process: '%s' on target '%s' (dt=%llu) VALUES: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT ", threads=%d", netdata_configured_host_prefix, p->pid, p->comm, (p->target)?p->target->name:"UNSET", p->stat_collected_usec - p->last_stat_collected_usec, p->utime, p->stime, p->cutime, p->cstime, p->minflt, p->majflt, p->cminflt, p->cmajflt, p->num_threads); + + if(unlikely(global_iterations_counter == 1)) { + p->minflt = 0; + p->cminflt = 0; + p->majflt = 0; + p->cmajflt = 0; + p->utime = 0; + p->stime = 0; + p->gtime = 0; + p->cutime = 0; + p->cstime = 0; + p->cgtime = 0; + } + + return 1; + +cleanup: + p->minflt = 0; + p->cminflt = 0; + p->majflt = 0; + p->cmajflt = 0; + p->utime = 0; + p->stime = 0; + p->gtime = 0; + p->cutime = 0; + p->cstime = 0; + p->cgtime = 0; + p->num_threads = 0; + // p->rss = 0; + return 0; +} + +static inline int read_proc_pid_io(struct pid_stat *p, void *ptr) { + (void)ptr; +#ifdef __FreeBSD__ + struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr; +#else + static procfile *ff = NULL; + + if(unlikely(!p->io_filename)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/proc/%d/io", netdata_configured_host_prefix, p->pid); + p->io_filename = strdupz(filename); + } + + // open the file + ff = procfile_reopen(ff, p->io_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO); + if(unlikely(!ff)) goto cleanup; + + ff = procfile_readall(ff); + if(unlikely(!ff)) goto cleanup; +#endif + + calls_counter++; + + p->last_io_collected_usec = p->io_collected_usec; + p->io_collected_usec = now_monotonic_usec(); + +#ifdef __FreeBSD__ + pid_incremental_rate(io, p->io_storage_bytes_read, proc_info->ki_rusage.ru_inblock); + pid_incremental_rate(io, p->io_storage_bytes_written, proc_info->ki_rusage.ru_oublock); +#else + pid_incremental_rate(io, p->io_logical_bytes_read, str2kernel_uint_t(procfile_lineword(ff, 0, 1))); + pid_incremental_rate(io, p->io_logical_bytes_written, str2kernel_uint_t(procfile_lineword(ff, 1, 1))); + // pid_incremental_rate(io, p->io_read_calls, str2kernel_uint_t(procfile_lineword(ff, 2, 1))); + // pid_incremental_rate(io, p->io_write_calls, str2kernel_uint_t(procfile_lineword(ff, 3, 1))); + pid_incremental_rate(io, p->io_storage_bytes_read, str2kernel_uint_t(procfile_lineword(ff, 4, 1))); + pid_incremental_rate(io, p->io_storage_bytes_written, str2kernel_uint_t(procfile_lineword(ff, 5, 1))); + // pid_incremental_rate(io, p->io_cancelled_write_bytes, str2kernel_uint_t(procfile_lineword(ff, 6, 1))); +#endif + + if(unlikely(global_iterations_counter == 1)) { + p->io_logical_bytes_read = 0; + p->io_logical_bytes_written = 0; + // p->io_read_calls = 0; + // p->io_write_calls = 0; + p->io_storage_bytes_read = 0; + p->io_storage_bytes_written = 0; + // p->io_cancelled_write_bytes = 0; + } + + return 1; + +#ifndef __FreeBSD__ +cleanup: + p->io_logical_bytes_read = 0; + p->io_logical_bytes_written = 0; + // p->io_read_calls = 0; + // p->io_write_calls = 0; + p->io_storage_bytes_read = 0; + p->io_storage_bytes_written = 0; + // p->io_cancelled_write_bytes = 0; + return 0; +#endif +} + +#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) +static inline int read_proc_stat() { + static char filename[FILENAME_MAX + 1] = ""; + static procfile *ff = NULL; + static kernel_uint_t utime_raw = 0, stime_raw = 0, gtime_raw = 0, gntime_raw = 0, ntime_raw = 0; + static usec_t collected_usec = 0, last_collected_usec = 0; + + if(unlikely(!ff)) { + snprintfz(filename, FILENAME_MAX, "%s/proc/stat", netdata_configured_host_prefix); + ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) goto cleanup; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) goto cleanup; + + last_collected_usec = collected_usec; + collected_usec = now_monotonic_usec(); + + calls_counter++; + + // temporary - it is added global_ntime; + kernel_uint_t global_ntime = 0; + + incremental_rate(global_utime, utime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 1)), collected_usec, last_collected_usec); + incremental_rate(global_ntime, ntime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 2)), collected_usec, last_collected_usec); + incremental_rate(global_stime, stime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 3)), collected_usec, last_collected_usec); + incremental_rate(global_gtime, gtime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 10)), collected_usec, last_collected_usec); + + global_utime += global_ntime; + + if(enable_guest_charts) { + // temporary - it is added global_ntime; + kernel_uint_t global_gntime = 0; + + // guest nice time, on guest time + incremental_rate(global_gntime, gntime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 11)), collected_usec, last_collected_usec); + + global_gtime += global_gntime; + + // remove guest time from user time + global_utime -= (global_utime > global_gtime) ? global_gtime : global_utime; + } + + if(unlikely(global_iterations_counter == 1)) { + global_utime = 0; + global_stime = 0; + global_gtime = 0; + } + + return 1; + +cleanup: + global_utime = 0; + global_stime = 0; + global_gtime = 0; + return 0; +} +#else +static inline int read_proc_stat() { + return 0; +} +#endif + +// ---------------------------------------------------------------------------- + +int file_descriptor_compare(void* a, void* b) { +#ifdef NETDATA_INTERNAL_CHECKS + if(((struct file_descriptor *)a)->magic != 0x0BADCAFE || ((struct file_descriptor *)b)->magic != 0x0BADCAFE) + error("Corrupted index data detected. Please report this."); +#endif /* NETDATA_INTERNAL_CHECKS */ + + if(((struct file_descriptor *)a)->hash < ((struct file_descriptor *)b)->hash) + return -1; + + else if(((struct file_descriptor *)a)->hash > ((struct file_descriptor *)b)->hash) + return 1; + + else + return strcmp(((struct file_descriptor *)a)->name, ((struct file_descriptor *)b)->name); +} + +// int file_descriptor_iterator(avl *a) { if(a) {}; return 0; } + +avl_tree all_files_index = { + NULL, + file_descriptor_compare +}; + +static struct file_descriptor *file_descriptor_find(const char *name, uint32_t hash) { + struct file_descriptor tmp; + tmp.hash = (hash)?hash:simple_hash(name); + tmp.name = name; + tmp.count = 0; + tmp.pos = 0; +#ifdef NETDATA_INTERNAL_CHECKS + tmp.magic = 0x0BADCAFE; +#endif /* NETDATA_INTERNAL_CHECKS */ + + return (struct file_descriptor *)avl_search(&all_files_index, (avl *) &tmp); +} + +#define file_descriptor_add(fd) avl_insert(&all_files_index, (avl *)(fd)) +#define file_descriptor_remove(fd) avl_remove(&all_files_index, (avl *)(fd)) + +// ---------------------------------------------------------------------------- + +static inline void file_descriptor_not_used(int id) +{ + if(id > 0 && id < all_files_size) { + +#ifdef NETDATA_INTERNAL_CHECKS + if(all_files[id].magic != 0x0BADCAFE) { + error("Ignoring request to remove empty file id %d.", id); + return; + } +#endif /* NETDATA_INTERNAL_CHECKS */ + + debug_log("decreasing slot %d (count = %d).", id, all_files[id].count); + + if(all_files[id].count > 0) { + all_files[id].count--; + + if(!all_files[id].count) { + debug_log(" >> slot %d is empty.", id); + + if(unlikely(file_descriptor_remove(&all_files[id]) != (void *)&all_files[id])) + error("INTERNAL ERROR: removal of unused fd from index, removed a different fd"); + +#ifdef NETDATA_INTERNAL_CHECKS + all_files[id].magic = 0x00000000; +#endif /* NETDATA_INTERNAL_CHECKS */ + all_files_len--; + } + } + else + error("Request to decrease counter of fd %d (%s), while the use counter is 0", id, all_files[id].name); + } + else error("Request to decrease counter of fd %d, which is outside the array size (1 to %d)", id, all_files_size); +} + +static inline void all_files_grow() { + void *old = all_files; + int i; + + // there is no empty slot + debug_log("extending fd array to %d entries", all_files_size + FILE_DESCRIPTORS_INCREASE_STEP); + + all_files = reallocz(all_files, (all_files_size + FILE_DESCRIPTORS_INCREASE_STEP) * sizeof(struct file_descriptor)); + + // if the address changed, we have to rebuild the index + // since all pointers are now invalid + + if(unlikely(old && old != (void *)all_files)) { + debug_log(" >> re-indexing."); + + all_files_index.root = NULL; + for(i = 0; i < all_files_size; i++) { + if(!all_files[i].count) continue; + if(unlikely(file_descriptor_add(&all_files[i]) != (void *)&all_files[i])) + error("INTERNAL ERROR: duplicate indexing of fd during realloc."); + } + + debug_log(" >> re-indexing done."); + } + + // initialize the newly added entries + + for(i = all_files_size; i < (all_files_size + FILE_DESCRIPTORS_INCREASE_STEP); i++) { + all_files[i].count = 0; + all_files[i].name = NULL; +#ifdef NETDATA_INTERNAL_CHECKS + all_files[i].magic = 0x00000000; +#endif /* NETDATA_INTERNAL_CHECKS */ + all_files[i].pos = i; + } + + if(unlikely(!all_files_size)) all_files_len = 1; + all_files_size += FILE_DESCRIPTORS_INCREASE_STEP; +} + +static inline int file_descriptor_set_on_empty_slot(const char *name, uint32_t hash, FD_FILETYPE type) { + // check we have enough memory to add it + if(!all_files || all_files_len == all_files_size) + all_files_grow(); + + debug_log(" >> searching for empty slot."); + + // search for an empty slot + + static int last_pos = 0; + int i, c; + for(i = 0, c = last_pos ; i < all_files_size ; i++, c++) { + if(c >= all_files_size) c = 0; + if(c == 0) continue; + + if(!all_files[c].count) { + debug_log(" >> Examining slot %d.", c); + +#ifdef NETDATA_INTERNAL_CHECKS + if(all_files[c].magic == 0x0BADCAFE && all_files[c].name && file_descriptor_find(all_files[c].name, all_files[c].hash)) + error("fd on position %d is not cleared properly. It still has %s in it.", c, all_files[c].name); +#endif /* NETDATA_INTERNAL_CHECKS */ + + debug_log(" >> %s fd position %d for %s (last name: %s)", all_files[c].name?"re-using":"using", c, name, all_files[c].name); + + freez((void *)all_files[c].name); + all_files[c].name = NULL; + last_pos = c; + break; + } + } + + all_files_len++; + + if(i == all_files_size) { + fatal("We should find an empty slot, but there isn't any"); + exit(1); + } + // else we have an empty slot in 'c' + + debug_log(" >> updating slot %d.", c); + + all_files[c].name = strdupz(name); + all_files[c].hash = hash; + all_files[c].type = type; + all_files[c].pos = c; + all_files[c].count = 1; +#ifdef NETDATA_INTERNAL_CHECKS + all_files[c].magic = 0x0BADCAFE; +#endif /* NETDATA_INTERNAL_CHECKS */ + if(unlikely(file_descriptor_add(&all_files[c]) != (void *)&all_files[c])) + error("INTERNAL ERROR: duplicate indexing of fd."); + + debug_log("using fd position %d (name: %s)", c, all_files[c].name); + + return c; +} + +static inline int file_descriptor_find_or_add(const char *name, uint32_t hash) { + if(unlikely(!hash)) + hash = simple_hash(name); + + debug_log("adding or finding name '%s' with hash %u", name, hash); + + struct file_descriptor *fd = file_descriptor_find(name, hash); + if(fd) { + // found + debug_log(" >> found on slot %d", fd->pos); + + fd->count++; + return fd->pos; + } + // not found + + FD_FILETYPE type; + if(likely(name[0] == '/')) type = FILETYPE_FILE; + else if(likely(strncmp(name, "pipe:", 5) == 0)) type = FILETYPE_PIPE; + else if(likely(strncmp(name, "socket:", 7) == 0)) type = FILETYPE_SOCKET; + else if(likely(strncmp(name, "anon_inode:", 11) == 0)) { + const char *t = &name[11]; + + if(strcmp(t, "inotify") == 0) type = FILETYPE_INOTIFY; + else if(strcmp(t, "[eventfd]") == 0) type = FILETYPE_EVENTFD; + else if(strcmp(t, "[eventpoll]") == 0) type = FILETYPE_EVENTPOLL; + else if(strcmp(t, "[timerfd]") == 0) type = FILETYPE_TIMERFD; + else if(strcmp(t, "[signalfd]") == 0) type = FILETYPE_SIGNALFD; + else { + debug_log("UNKNOWN anonymous inode: %s", name); + type = FILETYPE_OTHER; + } + } + else if(likely(strcmp(name, "inotify") == 0)) type = FILETYPE_INOTIFY; + else { + debug_log("UNKNOWN linkname: %s", name); + type = FILETYPE_OTHER; + } + + return file_descriptor_set_on_empty_slot(name, hash, type); +} + +static inline void clear_pid_fd(struct pid_fd *pfd) { + pfd->fd = 0; + + #ifndef __FreeBSD__ + pfd->link_hash = 0; + pfd->inode = 0; + pfd->cache_iterations_counter = 0; + pfd->cache_iterations_reset = 0; +#endif +} + +static inline void make_all_pid_fds_negative(struct pid_stat *p) { + struct pid_fd *pfd = p->fds, *pfdend = &p->fds[p->fds_size]; + while(pfd < pfdend) { + pfd->fd = -(pfd->fd); + pfd++; + } +} + +static inline void cleanup_negative_pid_fds(struct pid_stat *p) { + struct pid_fd *pfd = p->fds, *pfdend = &p->fds[p->fds_size]; + + while(pfd < pfdend) { + int fd = pfd->fd; + + if(unlikely(fd < 0)) { + file_descriptor_not_used(-(fd)); + clear_pid_fd(pfd); + } + + pfd++; + } +} + +static inline void init_pid_fds(struct pid_stat *p, size_t first, size_t size) { + struct pid_fd *pfd = &p->fds[first], *pfdend = &p->fds[first + size]; + size_t i = first; + + while(pfd < pfdend) { +#ifndef __FreeBSD__ + pfd->filename = NULL; +#endif + clear_pid_fd(pfd); + pfd++; + i++; + } +} + +static inline int read_pid_file_descriptors(struct pid_stat *p, void *ptr) { + (void)ptr; +#ifdef __FreeBSD__ + int mib[4]; + size_t size; + struct kinfo_file *fds; + static char *fdsbuf; + char *bfdsbuf, *efdsbuf; + char fdsname[FILENAME_MAX + 1]; + + // we make all pid fds negative, so that + // we can detect unused file descriptors + // at the end, to free them + make_all_pid_fds_negative(p); + + mib[0] = CTL_KERN; + mib[1] = KERN_PROC; + mib[2] = KERN_PROC_FILEDESC; + mib[3] = p->pid; + + if (unlikely(sysctl(mib, 4, NULL, &size, NULL, 0))) { + error("sysctl error: Can't get file descriptors data size for pid %d", p->pid); + return 0; + } + if (likely(size > 0)) + fdsbuf = reallocz(fdsbuf, size); + if (unlikely(sysctl(mib, 4, fdsbuf, &size, NULL, 0))) { + error("sysctl error: Can't get file descriptors data for pid %d", p->pid); + return 0; + } + + bfdsbuf = fdsbuf; + efdsbuf = fdsbuf + size; + while (bfdsbuf < efdsbuf) { + fds = (struct kinfo_file *)(uintptr_t)bfdsbuf; + if (unlikely(fds->kf_structsize == 0)) + break; + + // do not process file descriptors for current working directory, root directory, + // jail directory, ktrace vnode, text vnode and controlling terminal + if (unlikely(fds->kf_fd < 0)) { + bfdsbuf += fds->kf_structsize; + continue; + } + + // get file descriptors array index + int fdid = fds->kf_fd; + + // check if the fds array is small + if (unlikely(fdid >= p->fds_size)) { + // it is small, extend it + + debug_log("extending fd memory slots for %s from %d to %d", p->comm, p->fds_size, fdid + MAX_SPARE_FDS); + + p->fds = reallocz(p->fds, (fdid + MAX_SPARE_FDS) * sizeof(struct pid_fd)); + + // and initialize it + init_pid_fds(p, p->fds_size, (fdid + MAX_SPARE_FDS) - p->fds_size); + p->fds_size = fdid + MAX_SPARE_FDS; + } + + if (unlikely(p->fds[fdid].fd == 0)) { + // we don't know this fd, get it + + switch (fds->kf_type) { + case KF_TYPE_FIFO: + case KF_TYPE_VNODE: + if (unlikely(!fds->kf_path[0])) { + sprintf(fdsname, "other: inode: %lu", fds->kf_un.kf_file.kf_file_fileid); + break; + } + sprintf(fdsname, "%s", fds->kf_path); + break; + case KF_TYPE_SOCKET: + switch (fds->kf_sock_domain) { + case AF_INET: + case AF_INET6: + if (fds->kf_sock_protocol == IPPROTO_TCP) + sprintf(fdsname, "socket: %d %lx", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sock_inpcb); + else + sprintf(fdsname, "socket: %d %lx", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sock_pcb); + break; + case AF_UNIX: + /* print address of pcb and connected pcb */ + sprintf(fdsname, "socket: %lx %lx", fds->kf_un.kf_sock.kf_sock_pcb, fds->kf_un.kf_sock.kf_sock_unpconn); + break; + default: + /* print protocol number and socket address */ +#if __FreeBSD_version < 1200031 + sprintf(fdsname, "socket: other: %d %s %s", fds->kf_sock_protocol, fds->kf_sa_local.__ss_pad1, fds->kf_sa_local.__ss_pad2); +#else + sprintf(fdsname, "socket: other: %d %s %s", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sa_local.__ss_pad1, fds->kf_un.kf_sock.kf_sa_local.__ss_pad2); +#endif + } + break; + case KF_TYPE_PIPE: + sprintf(fdsname, "pipe: %lu %lu", fds->kf_un.kf_pipe.kf_pipe_addr, fds->kf_un.kf_pipe.kf_pipe_peer); + break; + case KF_TYPE_PTS: +#if __FreeBSD_version < 1200031 + sprintf(fdsname, "other: pts: %u", fds->kf_un.kf_pts.kf_pts_dev); +#else + sprintf(fdsname, "other: pts: %lu", fds->kf_un.kf_pts.kf_pts_dev); +#endif + break; + case KF_TYPE_SHM: + sprintf(fdsname, "other: shm: %s size: %lu", fds->kf_path, fds->kf_un.kf_file.kf_file_size); + break; + case KF_TYPE_SEM: + sprintf(fdsname, "other: sem: %u", fds->kf_un.kf_sem.kf_sem_value); + break; + default: + sprintf(fdsname, "other: pid: %d fd: %d", fds->kf_un.kf_proc.kf_pid, fds->kf_fd); + } + + // if another process already has this, we will get + // the same id + p->fds[fdid].fd = file_descriptor_find_or_add(fdsname, 0); + } + + // else make it positive again, we need it + // of course, the actual file may have changed + + else + p->fds[fdid].fd = -p->fds[fdid].fd; + + bfdsbuf += fds->kf_structsize; + } +#else + if(unlikely(!p->fds_dirname)) { + char dirname[FILENAME_MAX+1]; + snprintfz(dirname, FILENAME_MAX, "%s/proc/%d/fd", netdata_configured_host_prefix, p->pid); + p->fds_dirname = strdupz(dirname); + } + + DIR *fds = opendir(p->fds_dirname); + if(unlikely(!fds)) return 0; + + struct dirent *de; + char linkname[FILENAME_MAX + 1]; + + // we make all pid fds negative, so that + // we can detect unused file descriptors + // at the end, to free them + make_all_pid_fds_negative(p); + + while((de = readdir(fds))) { + // we need only files with numeric names + + if(unlikely(de->d_name[0] < '0' || de->d_name[0] > '9')) + continue; + + // get its number + int fdid = (int) str2l(de->d_name); + if(unlikely(fdid < 0)) continue; + + // check if the fds array is small + if(unlikely((size_t)fdid >= p->fds_size)) { + // it is small, extend it + + debug_log("extending fd memory slots for %s from %d to %d" + , p->comm + , p->fds_size + , fdid + MAX_SPARE_FDS + ); + + p->fds = reallocz(p->fds, (fdid + MAX_SPARE_FDS) * sizeof(struct pid_fd)); + + // and initialize it + init_pid_fds(p, p->fds_size, (fdid + MAX_SPARE_FDS) - p->fds_size); + p->fds_size = (size_t)fdid + MAX_SPARE_FDS; + } + + if(unlikely(p->fds[fdid].fd < 0 && de->d_ino != p->fds[fdid].inode)) { + // inodes do not match, clear the previous entry + inodes_changed_counter++; + file_descriptor_not_used(-p->fds[fdid].fd); + clear_pid_fd(&p->fds[fdid]); + } + + if(p->fds[fdid].fd < 0 && p->fds[fdid].cache_iterations_counter > 0) { + p->fds[fdid].fd = -p->fds[fdid].fd; + p->fds[fdid].cache_iterations_counter--; + continue; + } + + if(unlikely(!p->fds[fdid].filename)) { + filenames_allocated_counter++; + char fdname[FILENAME_MAX + 1]; + snprintfz(fdname, FILENAME_MAX, "%s/proc/%d/fd/%s", netdata_configured_host_prefix, p->pid, de->d_name); + p->fds[fdid].filename = strdupz(fdname); + } + + file_counter++; + ssize_t l = readlink(p->fds[fdid].filename, linkname, FILENAME_MAX); + if(unlikely(l == -1)) { + // cannot read the link + + if(debug_enabled || (p->target && p->target->debug_enabled)) + error("Cannot read link %s", p->fds[fdid].filename); + + if(unlikely(p->fds[fdid].fd < 0)) { + file_descriptor_not_used(-p->fds[fdid].fd); + clear_pid_fd(&p->fds[fdid]); + } + + continue; + } + else + linkname[l] = '\0'; + + uint32_t link_hash = simple_hash(linkname); + + if(unlikely(p->fds[fdid].fd < 0 && p->fds[fdid].link_hash != link_hash)) { + // the link changed + links_changed_counter++; + file_descriptor_not_used(-p->fds[fdid].fd); + clear_pid_fd(&p->fds[fdid]); + } + + if(unlikely(p->fds[fdid].fd == 0)) { + // we don't know this fd, get it + + // if another process already has this, we will get + // the same id + p->fds[fdid].fd = file_descriptor_find_or_add(linkname, link_hash); + p->fds[fdid].inode = de->d_ino; + p->fds[fdid].link_hash = link_hash; + } + else { + // else make it positive again, we need it + p->fds[fdid].fd = -p->fds[fdid].fd; + } + + // caching control + // without this we read all the files on every iteration + if(max_fds_cache_seconds > 0) { + size_t spread = ((size_t)max_fds_cache_seconds > 10) ? 10 : (size_t)max_fds_cache_seconds; + + // cache it for a few iterations + size_t max = ((size_t) max_fds_cache_seconds + (fdid % spread)) / (size_t) update_every; + p->fds[fdid].cache_iterations_reset++; + + if(unlikely(p->fds[fdid].cache_iterations_reset % spread == (size_t) fdid % spread)) + p->fds[fdid].cache_iterations_reset++; + + if(unlikely((fdid <= 2 && p->fds[fdid].cache_iterations_reset > 5) || + p->fds[fdid].cache_iterations_reset > max)) { + // for stdin, stdout, stderr (fdid <= 2) we have checked a few times, or if it goes above the max, goto max + p->fds[fdid].cache_iterations_reset = max; + } + + p->fds[fdid].cache_iterations_counter = p->fds[fdid].cache_iterations_reset; + } + } + + closedir(fds); +#endif + cleanup_negative_pid_fds(p); + + return 1; +} + +// ---------------------------------------------------------------------------- + +static inline int debug_print_process_and_parents(struct pid_stat *p, usec_t time) { + char *prefix = "\\_ "; + int indent = 0; + + if(p->parent) + indent = debug_print_process_and_parents(p->parent, p->stat_collected_usec); + else + prefix = " > "; + + char buffer[indent + 1]; + int i; + + for(i = 0; i < indent ;i++) buffer[i] = ' '; + buffer[i] = '\0'; + + fprintf(stderr, " %s %s%s (%d %s %llu" + , buffer + , prefix + , p->comm + , p->pid + , p->updated?"running":"exited" + , p->stat_collected_usec - time + ); + + if(p->utime) fprintf(stderr, " utime=" KERNEL_UINT_FORMAT, p->utime); + if(p->stime) fprintf(stderr, " stime=" KERNEL_UINT_FORMAT, p->stime); + if(p->gtime) fprintf(stderr, " gtime=" KERNEL_UINT_FORMAT, p->gtime); + if(p->cutime) fprintf(stderr, " cutime=" KERNEL_UINT_FORMAT, p->cutime); + if(p->cstime) fprintf(stderr, " cstime=" KERNEL_UINT_FORMAT, p->cstime); + if(p->cgtime) fprintf(stderr, " cgtime=" KERNEL_UINT_FORMAT, p->cgtime); + if(p->minflt) fprintf(stderr, " minflt=" KERNEL_UINT_FORMAT, p->minflt); + if(p->cminflt) fprintf(stderr, " cminflt=" KERNEL_UINT_FORMAT, p->cminflt); + if(p->majflt) fprintf(stderr, " majflt=" KERNEL_UINT_FORMAT, p->majflt); + if(p->cmajflt) fprintf(stderr, " cmajflt=" KERNEL_UINT_FORMAT, p->cmajflt); + fprintf(stderr, ")\n"); + + return indent + 1; +} + +static inline void debug_print_process_tree(struct pid_stat *p, char *msg) { + debug_log("%s: process %s (%d, %s) with parents:", msg, p->comm, p->pid, p->updated?"running":"exited"); + debug_print_process_and_parents(p, p->stat_collected_usec); +} + +static inline void debug_find_lost_child(struct pid_stat *pe, kernel_uint_t lost, int type) { + int found = 0; + struct pid_stat *p = NULL; + + for(p = root_of_pids; p ; p = p->next) { + if(p == pe) continue; + + switch(type) { + case 1: + if(p->cminflt > lost) { + fprintf(stderr, " > process %d (%s) could use the lost exited child minflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm); + found++; + } + break; + + case 2: + if(p->cmajflt > lost) { + fprintf(stderr, " > process %d (%s) could use the lost exited child majflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm); + found++; + } + break; + + case 3: + if(p->cutime > lost) { + fprintf(stderr, " > process %d (%s) could use the lost exited child utime " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm); + found++; + } + break; + + case 4: + if(p->cstime > lost) { + fprintf(stderr, " > process %d (%s) could use the lost exited child stime " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm); + found++; + } + break; + + case 5: + if(p->cgtime > lost) { + fprintf(stderr, " > process %d (%s) could use the lost exited child gtime " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm); + found++; + } + break; + } + } + + if(!found) { + switch(type) { + case 1: + fprintf(stderr, " > cannot find any process to use the lost exited child minflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm); + break; + + case 2: + fprintf(stderr, " > cannot find any process to use the lost exited child majflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm); + break; + + case 3: + fprintf(stderr, " > cannot find any process to use the lost exited child utime " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm); + break; + + case 4: + fprintf(stderr, " > cannot find any process to use the lost exited child stime " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm); + break; + + case 5: + fprintf(stderr, " > cannot find any process to use the lost exited child gtime " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm); + break; + } + } +} + +static inline kernel_uint_t remove_exited_child_from_parent(kernel_uint_t *field, kernel_uint_t *pfield) { + kernel_uint_t absorbed = 0; + + if(*field > *pfield) { + absorbed += *pfield; + *field -= *pfield; + *pfield = 0; + } + else { + absorbed += *field; + *pfield -= *field; + *field = 0; + } + + return absorbed; +} + +static inline void process_exited_processes() { + struct pid_stat *p; + + for(p = root_of_pids; p ; p = p->next) { + if(p->updated || !p->stat_collected_usec) + continue; + + kernel_uint_t utime = (p->utime_raw + p->cutime_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec); + kernel_uint_t stime = (p->stime_raw + p->cstime_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec); + kernel_uint_t gtime = (p->gtime_raw + p->cgtime_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec); + kernel_uint_t minflt = (p->minflt_raw + p->cminflt_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec); + kernel_uint_t majflt = (p->majflt_raw + p->cmajflt_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec); + + if(utime + stime + gtime + minflt + majflt == 0) + continue; + + if(unlikely(debug_enabled)) { + debug_log("Absorb %s (%d %s total resources: utime=" KERNEL_UINT_FORMAT " stime=" KERNEL_UINT_FORMAT " gtime=" KERNEL_UINT_FORMAT " minflt=" KERNEL_UINT_FORMAT " majflt=" KERNEL_UINT_FORMAT ")" + , p->comm + , p->pid + , p->updated?"running":"exited" + , utime + , stime + , gtime + , minflt + , majflt + ); + debug_print_process_tree(p, "Searching parents"); + } + + struct pid_stat *pp; + for(pp = p->parent; pp ; pp = pp->parent) { + if(!pp->updated) continue; + + kernel_uint_t absorbed; + absorbed = remove_exited_child_from_parent(&utime, &pp->cutime); + if(unlikely(debug_enabled && absorbed)) + debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " utime (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, utime); + + absorbed = remove_exited_child_from_parent(&stime, &pp->cstime); + if(unlikely(debug_enabled && absorbed)) + debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " stime (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, stime); + + absorbed = remove_exited_child_from_parent(>ime, &pp->cgtime); + if(unlikely(debug_enabled && absorbed)) + debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " gtime (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, gtime); + + absorbed = remove_exited_child_from_parent(&minflt, &pp->cminflt); + if(unlikely(debug_enabled && absorbed)) + debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " minflt (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, minflt); + + absorbed = remove_exited_child_from_parent(&majflt, &pp->cmajflt); + if(unlikely(debug_enabled && absorbed)) + debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " majflt (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, majflt); + } + + if(unlikely(utime + stime + gtime + minflt + majflt > 0)) { + if(unlikely(debug_enabled)) { + if(utime) debug_find_lost_child(p, utime, 3); + if(stime) debug_find_lost_child(p, stime, 4); + if(gtime) debug_find_lost_child(p, gtime, 5); + if(minflt) debug_find_lost_child(p, minflt, 1); + if(majflt) debug_find_lost_child(p, majflt, 2); + } + + p->keep = 1; + + debug_log(" > remaining resources - KEEP - for another loop: %s (%d %s total resources: utime=" KERNEL_UINT_FORMAT " stime=" KERNEL_UINT_FORMAT " gtime=" KERNEL_UINT_FORMAT " minflt=" KERNEL_UINT_FORMAT " majflt=" KERNEL_UINT_FORMAT ")" + , p->comm + , p->pid + , p->updated?"running":"exited" + , utime + , stime + , gtime + , minflt + , majflt + ); + + for(pp = p->parent; pp ; pp = pp->parent) { + if(pp->updated) break; + pp->keep = 1; + + debug_log(" > - KEEP - parent for another loop: %s (%d %s)" + , pp->comm + , pp->pid + , pp->updated?"running":"exited" + ); + } + + p->utime_raw = utime * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL); + p->stime_raw = stime * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL); + p->gtime_raw = gtime * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL); + p->minflt_raw = minflt * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL); + p->majflt_raw = majflt * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL); + p->cutime_raw = p->cstime_raw = p->cgtime_raw = p->cminflt_raw = p->cmajflt_raw = 0; + + debug_log(" "); + } + else + debug_log(" > totally absorbed - DONE - %s (%d %s)" + , p->comm + , p->pid + , p->updated?"running":"exited" + ); + } +} + +static inline void link_all_processes_to_their_parents(void) { + struct pid_stat *p, *pp; + + // link all children to their parents + // and update children count on parents + for(p = root_of_pids; p ; p = p->next) { + // for each process found + + p->sortlist = 0; + p->parent = NULL; + + if(unlikely(!p->ppid)) { + p->parent = NULL; + continue; + } + + pp = all_pids[p->ppid]; + if(likely(pp)) { + p->parent = pp; + pp->children_count++; + + if(unlikely(debug_enabled || (p->target && p->target->debug_enabled))) + debug_log_int("child %d (%s, %s) on target '%s' has parent %d (%s, %s). Parent: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", gtime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", cgtime=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT "", p->pid, p->comm, p->updated?"running":"exited", (p->target)?p->target->name:"UNSET", pp->pid, pp->comm, pp->updated?"running":"exited", pp->utime, pp->stime, pp->gtime, pp->minflt, pp->majflt, pp->cutime, pp->cstime, pp->cgtime, pp->cminflt, pp->cmajflt); + } + else { + p->parent = NULL; + error("pid %d %s states parent %d, but the later does not exist.", p->pid, p->comm, p->ppid); + } + } +} + +// ---------------------------------------------------------------------------- + +// 1. read all files in /proc +// 2. for each numeric directory: +// i. read /proc/pid/stat +// ii. read /proc/pid/status +// iii. read /proc/pid/io (requires root access) +// iii. read the entries in directory /proc/pid/fd (requires root access) +// for each entry: +// a. find or create a struct file_descriptor +// b. cleanup any old/unused file_descriptors + +// after all these, some pids may be linked to targets, while others may not + +// in case of errors, only 1 every 1000 errors is printed +// to avoid filling up all disk space +// if debug is enabled, all errors are printed + +#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) +static int compar_pid(const void *pid1, const void *pid2) { + + struct pid_stat *p1 = all_pids[*((pid_t *)pid1)]; + struct pid_stat *p2 = all_pids[*((pid_t *)pid2)]; + + if(p1->sortlist > p2->sortlist) + return -1; + else + return 1; +} +#endif + +static inline int collect_data_for_pid(pid_t pid, void *ptr) { + if(unlikely(pid < 0 || pid > pid_max)) { + error("Invalid pid %d read (expected %d to %d). Ignoring process.", pid, 0, pid_max); + return 0; + } + + struct pid_stat *p = get_pid_entry(pid); + if(unlikely(!p || p->read)) return 0; + p->read = 1; + + // debug_log("Reading process %d (%s), sortlist %d", p->pid, p->comm, p->sortlist); + + // -------------------------------------------------------------------- + // /proc//stat + + if(unlikely(!managed_log(p, PID_LOG_STAT, read_proc_pid_stat(p, ptr)))) + // there is no reason to proceed if we cannot get its status + return 0; + + // check its parent pid + if(unlikely(p->ppid < 0 || p->ppid > pid_max)) { + error("Pid %d (command '%s') states invalid parent pid %d. Using 0.", pid, p->comm, p->ppid); + p->ppid = 0; + } + + // -------------------------------------------------------------------- + // /proc//io + + managed_log(p, PID_LOG_IO, read_proc_pid_io(p, ptr)); + + // -------------------------------------------------------------------- + // /proc//status + + if(unlikely(!managed_log(p, PID_LOG_STATUS, read_proc_pid_status(p, ptr)))) + // there is no reason to proceed if we cannot get its status + return 0; + + // -------------------------------------------------------------------- + // /proc//fd + + if(enable_file_charts) + managed_log(p, PID_LOG_FDS, read_pid_file_descriptors(p, ptr)); + + // -------------------------------------------------------------------- + // done! + + if(unlikely(debug_enabled && include_exited_childs && all_pids_count && p->ppid && all_pids[p->ppid] && !all_pids[p->ppid]->read)) + debug_log("Read process %d (%s) sortlisted %d, but its parent %d (%s) sortlisted %d, is not read", p->pid, p->comm, p->sortlist, all_pids[p->ppid]->pid, all_pids[p->ppid]->comm, all_pids[p->ppid]->sortlist); + + // mark it as updated + p->updated = 1; + p->keep = 0; + p->keeploops = 0; + + return 1; +} + +static int collect_data_for_all_processes(void) { + struct pid_stat *p = NULL; + +#ifdef __FreeBSD__ + int i, procnum; + + static size_t procbase_size = 0; + static struct kinfo_proc *procbase = NULL; + + size_t new_procbase_size; + + int mib[3] = { CTL_KERN, KERN_PROC, KERN_PROC_PROC }; + if (unlikely(sysctl(mib, 3, NULL, &new_procbase_size, NULL, 0))) { + error("sysctl error: Can't get processes data size"); + return 0; + } + + // give it some air for processes that may be started + // during this little time. + new_procbase_size += 100 * sizeof(struct kinfo_proc); + + // increase the buffer if needed + if(new_procbase_size > procbase_size) { + procbase_size = new_procbase_size; + procbase = reallocz(procbase, procbase_size); + } + + // sysctl() gets from new_procbase_size the buffer size + // and also returns to it the amount of data filled in + new_procbase_size = procbase_size; + + // get the processes from the system + if (unlikely(sysctl(mib, 3, procbase, &new_procbase_size, NULL, 0))) { + error("sysctl error: Can't get processes data"); + return 0; + } + + // based on the amount of data filled in + // calculate the number of processes we got + procnum = new_procbase_size / sizeof(struct kinfo_proc); + +#endif + + if(all_pids_count) { +#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) + size_t slc = 0; +#endif + for(p = root_of_pids; p ; p = p->next) { + p->read = 0; // mark it as not read, so that collect_data_for_pid() will read it + p->updated = 0; + p->merged = 0; + p->children_count = 0; + p->parent = NULL; + +#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) + all_pids_sortlist[slc++] = p->pid; +#endif + } + +#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) + if(unlikely(slc != all_pids_count)) { + error("Internal error: I was thinking I had %zu processes in my arrays, but it seems there are %zu.", all_pids_count, slc); + all_pids_count = slc; + } + + if(include_exited_childs) { + // Read parents before childs + // This is needed to prevent a situation where + // a child is found running, but until we read + // its parent, it has exited and its parent + // has accumulated its resources. + + qsort((void *)all_pids_sortlist, (size_t)all_pids_count, sizeof(pid_t), compar_pid); + + // we forward read all running processes + // collect_data_for_pid() is smart enough, + // not to read the same pid twice per iteration + for(slc = 0; slc < all_pids_count; slc++) + collect_data_for_pid(all_pids_sortlist[slc], NULL); + } +#endif + } + +#ifdef __FreeBSD__ + for (i = 0 ; i < procnum ; ++i) { + pid_t pid = procbase[i].ki_pid; + collect_data_for_pid(pid, &procbase[i]); + } +#else + char dirname[FILENAME_MAX + 1]; + + snprintfz(dirname, FILENAME_MAX, "%s/proc", netdata_configured_host_prefix); + DIR *dir = opendir(dirname); + if(!dir) return 0; + + struct dirent *de = NULL; + + while((de = readdir(dir))) { + char *endptr = de->d_name; + + if(unlikely(de->d_type != DT_DIR || de->d_name[0] < '0' || de->d_name[0] > '9')) + continue; + + pid_t pid = (pid_t) strtoul(de->d_name, &endptr, 10); + + // make sure we read a valid number + if(unlikely(endptr == de->d_name || *endptr != '\0')) + continue; + + collect_data_for_pid(pid, NULL); + } + closedir(dir); +#endif + + if(!all_pids_count) + return 0; + + // we need /proc/stat to normalize the cpu consumption of the exited childs + read_proc_stat(); + + // build the process tree + link_all_processes_to_their_parents(); + + // normally this is done + // however we may have processes exited while we collected values + // so let's find the exited ones + // we do this by collecting the ownership of process + // if we manage to get the ownership, the process still runs + process_exited_processes(); + + return 1; +} + +// ---------------------------------------------------------------------------- +// update statistics on the targets + +// 1. link all childs to their parents +// 2. go from bottom to top, marking as merged all childs to their parents +// this step links all parents without a target to the child target, if any +// 3. link all top level processes (the ones not merged) to the default target +// 4. go from top to bottom, linking all childs without a target, to their parent target +// after this step, all processes have a target +// [5. for each killed pid (updated = 0), remove its usage from its target] +// 6. zero all apps_groups_targets +// 7. concentrate all values on the apps_groups_targets +// 8. remove all killed processes +// 9. find the unique file count for each target +// check: update_apps_groups_statistics() + +static void cleanup_exited_pids(void) { + size_t c; + struct pid_stat *p = NULL; + + for(p = root_of_pids; p ;) { + if(!p->updated && (!p->keep || p->keeploops > 0)) { + if(unlikely(debug_enabled && (p->keep || p->keeploops))) + debug_log(" > CLEANUP cannot keep exited process %d (%s) anymore - removing it.", p->pid, p->comm); + + for(c = 0; c < p->fds_size; c++) + if(p->fds[c].fd > 0) { + file_descriptor_not_used(p->fds[c].fd); + clear_pid_fd(&p->fds[c]); + } + + pid_t r = p->pid; + p = p->next; + del_pid_entry(r); + } + else { + if(unlikely(p->keep)) p->keeploops++; + p->keep = 0; + p = p->next; + } + } +} + +static void apply_apps_groups_targets_inheritance(void) { + struct pid_stat *p = NULL; + + // children that do not have a target + // inherit their target from their parent + int found = 1, loops = 0; + while(found) { + if(unlikely(debug_enabled)) loops++; + found = 0; + for(p = root_of_pids; p ; p = p->next) { + // if this process does not have a target + // and it has a parent + // and its parent has a target + // then, set the parent's target to this process + if(unlikely(!p->target && p->parent && p->parent->target)) { + p->target = p->parent->target; + found++; + + if(debug_enabled || (p->target && p->target->debug_enabled)) + debug_log_int("TARGET INHERITANCE: %s is inherited by %d (%s) from its parent %d (%s).", p->target->name, p->pid, p->comm, p->parent->pid, p->parent->comm); + } + } + } + + // find all the procs with 0 childs and merge them to their parents + // repeat, until nothing more can be done. + int sortlist = 1; + found = 1; + while(found) { + if(unlikely(debug_enabled)) loops++; + found = 0; + + for(p = root_of_pids; p ; p = p->next) { + if(unlikely(!p->sortlist && !p->children_count)) + p->sortlist = sortlist++; + + if(unlikely( + !p->children_count // if this process does not have any children + && !p->merged // and is not already merged + && p->parent // and has a parent + && p->parent->children_count // and its parent has children + // and the target of this process and its parent is the same, + // or the parent does not have a target + && (p->target == p->parent->target || !p->parent->target) + && p->ppid != INIT_PID // and its parent is not init + )) { + // mark it as merged + p->parent->children_count--; + p->merged = 1; + + // the parent inherits the child's target, if it does not have a target itself + if(unlikely(p->target && !p->parent->target)) { + p->parent->target = p->target; + + if(debug_enabled || (p->target && p->target->debug_enabled)) + debug_log_int("TARGET INHERITANCE: %s is inherited by %d (%s) from its child %d (%s).", p->target->name, p->parent->pid, p->parent->comm, p->pid, p->comm); + } + + found++; + } + } + + debug_log("TARGET INHERITANCE: merged %d processes", found); + } + + // init goes always to default target + if(all_pids[INIT_PID]) + all_pids[INIT_PID]->target = apps_groups_default_target; + + // pid 0 goes always to default target + if(all_pids[0]) + all_pids[0]->target = apps_groups_default_target; + + // give a default target on all top level processes + if(unlikely(debug_enabled)) loops++; + for(p = root_of_pids; p ; p = p->next) { + // if the process is not merged itself + // then is is a top level process + if(unlikely(!p->merged && !p->target)) + p->target = apps_groups_default_target; + + // make sure all processes have a sortlist + if(unlikely(!p->sortlist)) + p->sortlist = sortlist++; + } + + if(all_pids[1]) + all_pids[1]->sortlist = sortlist++; + + // give a target to all merged child processes + found = 1; + while(found) { + if(unlikely(debug_enabled)) loops++; + found = 0; + for(p = root_of_pids; p ; p = p->next) { + if(unlikely(!p->target && p->merged && p->parent && p->parent->target)) { + p->target = p->parent->target; + found++; + + if(debug_enabled || (p->target && p->target->debug_enabled)) + debug_log_int("TARGET INHERITANCE: %s is inherited by %d (%s) from its parent %d (%s) at phase 2.", p->target->name, p->pid, p->comm, p->parent->pid, p->parent->comm); + } + } + } + + debug_log("apply_apps_groups_targets_inheritance() made %d loops on the process tree", loops); +} + +static size_t zero_all_targets(struct target *root) { + struct target *w; + size_t count = 0; + + for (w = root; w ; w = w->next) { + count++; + + w->minflt = 0; + w->majflt = 0; + w->utime = 0; + w->stime = 0; + w->gtime = 0; + w->cminflt = 0; + w->cmajflt = 0; + w->cutime = 0; + w->cstime = 0; + w->cgtime = 0; + w->num_threads = 0; + // w->rss = 0; + w->processes = 0; + + w->status_vmsize = 0; + w->status_vmrss = 0; + w->status_vmshared = 0; + w->status_rssfile = 0; + w->status_rssshmem = 0; + w->status_vmswap = 0; + + w->io_logical_bytes_read = 0; + w->io_logical_bytes_written = 0; + // w->io_read_calls = 0; + // w->io_write_calls = 0; + w->io_storage_bytes_read = 0; + w->io_storage_bytes_written = 0; + // w->io_cancelled_write_bytes = 0; + + // zero file counters + if(w->target_fds) { + memset(w->target_fds, 0, sizeof(int) * w->target_fds_size); + w->openfiles = 0; + w->openpipes = 0; + w->opensockets = 0; + w->openinotifies = 0; + w->openeventfds = 0; + w->opentimerfds = 0; + w->opensignalfds = 0; + w->openeventpolls = 0; + w->openother = 0; + } + } + + return count; +} + +static inline void reallocate_target_fds(struct target *w) { + if(unlikely(!w)) + return; + + if(unlikely(!w->target_fds || w->target_fds_size < all_files_size)) { + w->target_fds = reallocz(w->target_fds, sizeof(int) * all_files_size); + memset(&w->target_fds[w->target_fds_size], 0, sizeof(int) * (all_files_size - w->target_fds_size)); + w->target_fds_size = all_files_size; + } +} + +static inline void aggregate_fd_on_target(int fd, struct target *w) { + if(unlikely(!w)) + return; + + if(unlikely(w->target_fds[fd])) { + // it is already aggregated + // just increase its usage counter + w->target_fds[fd]++; + return; + } + + // increase its usage counter + // so that we will not add it again + w->target_fds[fd]++; + + switch(all_files[fd].type) { + case FILETYPE_FILE: + w->openfiles++; + break; + + case FILETYPE_PIPE: + w->openpipes++; + break; + + case FILETYPE_SOCKET: + w->opensockets++; + break; + + case FILETYPE_INOTIFY: + w->openinotifies++; + break; + + case FILETYPE_EVENTFD: + w->openeventfds++; + break; + + case FILETYPE_TIMERFD: + w->opentimerfds++; + break; + + case FILETYPE_SIGNALFD: + w->opensignalfds++; + break; + + case FILETYPE_EVENTPOLL: + w->openeventpolls++; + break; + + case FILETYPE_OTHER: + w->openother++; + break; + } +} + +static inline void aggregate_pid_fds_on_targets(struct pid_stat *p) { + + if(unlikely(!p->updated)) { + // the process is not running + return; + } + + struct target *w = p->target, *u = p->user_target, *g = p->group_target; + + reallocate_target_fds(w); + reallocate_target_fds(u); + reallocate_target_fds(g); + + size_t c, size = p->fds_size; + struct pid_fd *fds = p->fds; + for(c = 0; c < size ;c++) { + int fd = fds[c].fd; + + if(likely(fd <= 0 || fd >= all_files_size)) + continue; + + aggregate_fd_on_target(fd, w); + aggregate_fd_on_target(fd, u); + aggregate_fd_on_target(fd, g); + } +} + +static inline void aggregate_pid_on_target(struct target *w, struct pid_stat *p, struct target *o) { + (void)o; + + if(unlikely(!p->updated)) { + // the process is not running + return; + } + + if(unlikely(!w)) { + error("pid %d %s was left without a target!", p->pid, p->comm); + return; + } + + w->cutime += p->cutime; + w->cstime += p->cstime; + w->cgtime += p->cgtime; + w->cminflt += p->cminflt; + w->cmajflt += p->cmajflt; + + w->utime += p->utime; + w->stime += p->stime; + w->gtime += p->gtime; + w->minflt += p->minflt; + w->majflt += p->majflt; + + // w->rss += p->rss; + + w->status_vmsize += p->status_vmsize; + w->status_vmrss += p->status_vmrss; + w->status_vmshared += p->status_vmshared; + w->status_rssfile += p->status_rssfile; + w->status_rssshmem += p->status_rssshmem; + w->status_vmswap += p->status_vmswap; + + w->io_logical_bytes_read += p->io_logical_bytes_read; + w->io_logical_bytes_written += p->io_logical_bytes_written; + // w->io_read_calls += p->io_read_calls; + // w->io_write_calls += p->io_write_calls; + w->io_storage_bytes_read += p->io_storage_bytes_read; + w->io_storage_bytes_written += p->io_storage_bytes_written; + // w->io_cancelled_write_bytes += p->io_cancelled_write_bytes; + + w->processes++; + w->num_threads += p->num_threads; + + if(unlikely(debug_enabled || w->debug_enabled)) + debug_log_int("aggregating '%s' pid %d on target '%s' utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", gtime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", cgtime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT "", p->comm, p->pid, w->name, p->utime, p->stime, p->gtime, p->cutime, p->cstime, p->cgtime, p->minflt, p->majflt, p->cminflt, p->cmajflt); +} + +static void calculate_netdata_statistics(void) { + + apply_apps_groups_targets_inheritance(); + + zero_all_targets(users_root_target); + zero_all_targets(groups_root_target); + apps_groups_targets_count = zero_all_targets(apps_groups_root_target); + + // this has to be done, before the cleanup + struct pid_stat *p = NULL; + struct target *w = NULL, *o = NULL; + + // concentrate everything on the targets + for(p = root_of_pids; p ; p = p->next) { + + // -------------------------------------------------------------------- + // apps_groups target + + aggregate_pid_on_target(p->target, p, NULL); + + + // -------------------------------------------------------------------- + // user target + + o = p->user_target; + if(likely(p->user_target && p->user_target->uid == p->uid)) + w = p->user_target; + else { + if(unlikely(debug_enabled && p->user_target)) + debug_log("pid %d (%s) switched user from %u (%s) to %u.", p->pid, p->comm, p->user_target->uid, p->user_target->name, p->uid); + + w = p->user_target = get_users_target(p->uid); + } + + aggregate_pid_on_target(w, p, o); + + + // -------------------------------------------------------------------- + // user group target + + o = p->group_target; + if(likely(p->group_target && p->group_target->gid == p->gid)) + w = p->group_target; + else { + if(unlikely(debug_enabled && p->group_target)) + debug_log("pid %d (%s) switched group from %u (%s) to %u.", p->pid, p->comm, p->group_target->gid, p->group_target->name, p->gid); + + w = p->group_target = get_groups_target(p->gid); + } + + aggregate_pid_on_target(w, p, o); + + + // -------------------------------------------------------------------- + // aggregate all file descriptors + + if(enable_file_charts) + aggregate_pid_fds_on_targets(p); + } + + cleanup_exited_pids(); +} + +// ---------------------------------------------------------------------------- +// update chart dimensions + +static inline void send_BEGIN(const char *type, const char *id, usec_t usec) { + fprintf(stdout, "BEGIN %s.%s %llu\n", type, id, usec); +} + +static inline void send_SET(const char *name, kernel_uint_t value) { + fprintf(stdout, "SET %s = " KERNEL_UINT_FORMAT "\n", name, value); +} + +static inline void send_END(void) { + fprintf(stdout, "END\n"); +} + +void send_resource_usage_to_netdata(usec_t dt) { + static struct timeval last = { 0, 0 }; + static struct rusage me_last; + + struct timeval now; + struct rusage me; + + usec_t cpuuser; + usec_t cpusyst; + + if(!last.tv_sec) { + now_monotonic_timeval(&last); + getrusage(RUSAGE_SELF, &me_last); + + cpuuser = 0; + cpusyst = 0; + } + else { + now_monotonic_timeval(&now); + getrusage(RUSAGE_SELF, &me); + + cpuuser = me.ru_utime.tv_sec * USEC_PER_SEC + me.ru_utime.tv_usec; + cpusyst = me.ru_stime.tv_sec * USEC_PER_SEC + me.ru_stime.tv_usec; + + memmove(&last, &now, sizeof(struct timeval)); + memmove(&me_last, &me, sizeof(struct rusage)); + } + + static char created_charts = 0; + if(unlikely(!created_charts)) { + created_charts = 1; + + fprintf(stdout, + "CHART netdata.apps_cpu '' 'Apps Plugin CPU' 'milliseconds/s' apps.plugin netdata.apps_cpu stacked 140000 %1$d\n" + "DIMENSION user '' incremental 1 1000\n" + "DIMENSION system '' incremental 1 1000\n" + "CHART netdata.apps_sizes '' 'Apps Plugin Files' 'files/s' apps.plugin netdata.apps_sizes line 140001 %1$d\n" + "DIMENSION calls '' incremental 1 1\n" + "DIMENSION files '' incremental 1 1\n" + "DIMENSION filenames '' incremental 1 1\n" + "DIMENSION inode_changes '' incremental 1 1\n" + "DIMENSION link_changes '' incremental 1 1\n" + "DIMENSION pids '' absolute 1 1\n" + "DIMENSION fds '' absolute 1 1\n" + "DIMENSION targets '' absolute 1 1\n" + "DIMENSION new_pids 'new pids' incremental 1 1\n" + , update_every + ); + +#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) + fprintf(stdout, + "CHART netdata.apps_fix '' 'Apps Plugin Normalization Ratios' 'percentage' apps.plugin netdata.apps_fix line 140002 %1$d\n" + "DIMENSION utime '' absolute 1 %2$llu\n" + "DIMENSION stime '' absolute 1 %2$llu\n" + "DIMENSION gtime '' absolute 1 %2$llu\n" + "DIMENSION minflt '' absolute 1 %2$llu\n" + "DIMENSION majflt '' absolute 1 %2$llu\n" + , update_every + , RATES_DETAIL + ); + + if(include_exited_childs) + fprintf(stdout, + "CHART netdata.apps_children_fix '' 'Apps Plugin Exited Children Normalization Ratios' 'percentage' apps.plugin netdata.apps_children_fix line 140003 %1$d\n" + "DIMENSION cutime '' absolute 1 %2$llu\n" + "DIMENSION cstime '' absolute 1 %2$llu\n" + "DIMENSION cgtime '' absolute 1 %2$llu\n" + "DIMENSION cminflt '' absolute 1 %2$llu\n" + "DIMENSION cmajflt '' absolute 1 %2$llu\n" + , update_every + , RATES_DETAIL + ); +#endif + + } + + fprintf(stdout, + "BEGIN netdata.apps_cpu %llu\n" + "SET user = %llu\n" + "SET system = %llu\n" + "END\n" + "BEGIN netdata.apps_sizes %llu\n" + "SET calls = %zu\n" + "SET files = %zu\n" + "SET filenames = %zu\n" + "SET inode_changes = %zu\n" + "SET link_changes = %zu\n" + "SET pids = %zu\n" + "SET fds = %d\n" + "SET targets = %zu\n" + "SET new_pids = %zu\n" + "END\n" + , dt + , cpuuser + , cpusyst + , dt + , calls_counter + , file_counter + , filenames_allocated_counter + , inodes_changed_counter + , links_changed_counter + , all_pids_count + , all_files_len + , apps_groups_targets_count + , targets_assignment_counter + ); + +#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) + fprintf(stdout, + "BEGIN netdata.apps_fix %llu\n" + "SET utime = %u\n" + "SET stime = %u\n" + "SET gtime = %u\n" + "SET minflt = %u\n" + "SET majflt = %u\n" + "END\n" + , dt + , (unsigned int)(utime_fix_ratio * 100 * RATES_DETAIL) + , (unsigned int)(stime_fix_ratio * 100 * RATES_DETAIL) + , (unsigned int)(gtime_fix_ratio * 100 * RATES_DETAIL) + , (unsigned int)(minflt_fix_ratio * 100 * RATES_DETAIL) + , (unsigned int)(majflt_fix_ratio * 100 * RATES_DETAIL) + ); + + if(include_exited_childs) + fprintf(stdout, + "BEGIN netdata.apps_children_fix %llu\n" + "SET cutime = %u\n" + "SET cstime = %u\n" + "SET cgtime = %u\n" + "SET cminflt = %u\n" + "SET cmajflt = %u\n" + "END\n" + , dt + , (unsigned int)(cutime_fix_ratio * 100 * RATES_DETAIL) + , (unsigned int)(cstime_fix_ratio * 100 * RATES_DETAIL) + , (unsigned int)(cgtime_fix_ratio * 100 * RATES_DETAIL) + , (unsigned int)(cminflt_fix_ratio * 100 * RATES_DETAIL) + , (unsigned int)(cmajflt_fix_ratio * 100 * RATES_DETAIL) + ); +#endif +} + +#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) +static void normalize_utilization(struct target *root) { + struct target *w; + + // childs processing introduces spikes + // here we try to eliminate them by disabling childs processing either for specific dimensions + // or entirely. Of course, either way, we disable it just a single iteration. + + kernel_uint_t max_time = processors * system_hz * RATES_DETAIL; + kernel_uint_t utime = 0, cutime = 0, stime = 0, cstime = 0, gtime = 0, cgtime = 0, minflt = 0, cminflt = 0, majflt = 0, cmajflt = 0; + + if(global_utime > max_time) global_utime = max_time; + if(global_stime > max_time) global_stime = max_time; + if(global_gtime > max_time) global_gtime = max_time; + + for(w = root; w ; w = w->next) { + if(w->target || (!w->processes && !w->exposed)) continue; + + utime += w->utime; + stime += w->stime; + gtime += w->gtime; + cutime += w->cutime; + cstime += w->cstime; + cgtime += w->cgtime; + + minflt += w->minflt; + majflt += w->majflt; + cminflt += w->cminflt; + cmajflt += w->cmajflt; + } + + if((global_utime || global_stime || global_gtime) && (utime || stime || gtime)) { + if(global_utime + global_stime + global_gtime > utime + cutime + stime + cstime + gtime + cgtime) { + // everything we collected fits + utime_fix_ratio = + stime_fix_ratio = + gtime_fix_ratio = + cutime_fix_ratio = + cstime_fix_ratio = + cgtime_fix_ratio = 1.0; //(double)(global_utime + global_stime) / (double)(utime + cutime + stime + cstime); + } + else if(global_utime + global_stime > utime + stime) { + // childrens resources are too high + // lower only the children resources + utime_fix_ratio = + stime_fix_ratio = + gtime_fix_ratio = 1.0; + cutime_fix_ratio = + cstime_fix_ratio = + cgtime_fix_ratio = (double)((global_utime + global_stime) - (utime + stime)) / (double)(cutime + cstime); + } + else { + // even running processes are unrealistic + // zero the children resources + // lower the running processes resources + utime_fix_ratio = + stime_fix_ratio = + gtime_fix_ratio = (double)(global_utime + global_stime) / (double)(utime + stime); + cutime_fix_ratio = + cstime_fix_ratio = + cgtime_fix_ratio = 0.0; + } + } + else { + utime_fix_ratio = + stime_fix_ratio = + gtime_fix_ratio = + cutime_fix_ratio = + cstime_fix_ratio = + cgtime_fix_ratio = 0.0; + } + + if(utime_fix_ratio > 1.0) utime_fix_ratio = 1.0; + if(cutime_fix_ratio > 1.0) cutime_fix_ratio = 1.0; + if(stime_fix_ratio > 1.0) stime_fix_ratio = 1.0; + if(cstime_fix_ratio > 1.0) cstime_fix_ratio = 1.0; + if(gtime_fix_ratio > 1.0) gtime_fix_ratio = 1.0; + if(cgtime_fix_ratio > 1.0) cgtime_fix_ratio = 1.0; + + // if(utime_fix_ratio < 0.0) utime_fix_ratio = 0.0; + // if(cutime_fix_ratio < 0.0) cutime_fix_ratio = 0.0; + // if(stime_fix_ratio < 0.0) stime_fix_ratio = 0.0; + // if(cstime_fix_ratio < 0.0) cstime_fix_ratio = 0.0; + // if(gtime_fix_ratio < 0.0) gtime_fix_ratio = 0.0; + // if(cgtime_fix_ratio < 0.0) cgtime_fix_ratio = 0.0; + + // TODO + // we use cpu time to normalize page faults + // the problem is that to find the proper max values + // for page faults we have to parse /proc/vmstat + // which is quite big to do it again (netdata does it already) + // + // a better solution could be to somehow have netdata + // do this normalization for us + + if(utime || stime || gtime) + majflt_fix_ratio = + minflt_fix_ratio = (double)(utime * utime_fix_ratio + stime * stime_fix_ratio + gtime * gtime_fix_ratio) / (double)(utime + stime + gtime); + else + minflt_fix_ratio = + majflt_fix_ratio = 1.0; + + if(cutime || cstime || cgtime) + cmajflt_fix_ratio = + cminflt_fix_ratio = (double)(cutime * cutime_fix_ratio + cstime * cstime_fix_ratio + cgtime * cgtime_fix_ratio) / (double)(cutime + cstime + cgtime); + else + cminflt_fix_ratio = + cmajflt_fix_ratio = 1.0; + + // the report + + debug_log( + "SYSTEM: u=" KERNEL_UINT_FORMAT " s=" KERNEL_UINT_FORMAT " g=" KERNEL_UINT_FORMAT " " + "COLLECTED: u=" KERNEL_UINT_FORMAT " s=" KERNEL_UINT_FORMAT " g=" KERNEL_UINT_FORMAT " cu=" KERNEL_UINT_FORMAT " cs=" KERNEL_UINT_FORMAT " cg=" KERNEL_UINT_FORMAT " " + "DELTA: u=" KERNEL_UINT_FORMAT " s=" KERNEL_UINT_FORMAT " g=" KERNEL_UINT_FORMAT " " + "FIX: u=%0.2f s=%0.2f g=%0.2f cu=%0.2f cs=%0.2f cg=%0.2f " + "FINALLY: u=" KERNEL_UINT_FORMAT " s=" KERNEL_UINT_FORMAT " g=" KERNEL_UINT_FORMAT " cu=" KERNEL_UINT_FORMAT " cs=" KERNEL_UINT_FORMAT " cg=" KERNEL_UINT_FORMAT " " + , global_utime + , global_stime + , global_gtime + , utime + , stime + , gtime + , cutime + , cstime + , cgtime + , utime + cutime - global_utime + , stime + cstime - global_stime + , gtime + cgtime - global_gtime + , utime_fix_ratio + , stime_fix_ratio + , gtime_fix_ratio + , cutime_fix_ratio + , cstime_fix_ratio + , cgtime_fix_ratio + , (kernel_uint_t)(utime * utime_fix_ratio) + , (kernel_uint_t)(stime * stime_fix_ratio) + , (kernel_uint_t)(gtime * gtime_fix_ratio) + , (kernel_uint_t)(cutime * cutime_fix_ratio) + , (kernel_uint_t)(cstime * cstime_fix_ratio) + , (kernel_uint_t)(cgtime * cgtime_fix_ratio) + ); +} +#else // ALL_PIDS_ARE_READ_INSTANTLY == 1 +static void normalize_utilization(struct target *root) { + (void)root; +} +#endif // ALL_PIDS_ARE_READ_INSTANTLY + +static void send_collected_data_to_netdata(struct target *root, const char *type, usec_t dt) { + struct target *w; + + send_BEGIN(type, "cpu", dt); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + send_SET(w->name, (kernel_uint_t)(w->utime * utime_fix_ratio) + (kernel_uint_t)(w->stime * stime_fix_ratio) + (kernel_uint_t)(w->gtime * gtime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cutime * cutime_fix_ratio) + (kernel_uint_t)(w->cstime * cstime_fix_ratio) + (kernel_uint_t)(w->cgtime * cgtime_fix_ratio)):0ULL)); + } + send_END(); + + send_BEGIN(type, "cpu_user", dt); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + send_SET(w->name, (kernel_uint_t)(w->utime * utime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cutime * cutime_fix_ratio)):0ULL)); + } + send_END(); + + send_BEGIN(type, "cpu_system", dt); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + send_SET(w->name, (kernel_uint_t)(w->stime * stime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cstime * cstime_fix_ratio)):0ULL)); + } + send_END(); + + if(show_guest_time) { + send_BEGIN(type, "cpu_guest", dt); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + send_SET(w->name, (kernel_uint_t)(w->gtime * gtime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cgtime * cgtime_fix_ratio)):0ULL)); + } + send_END(); + } + + send_BEGIN(type, "threads", dt); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + send_SET(w->name, w->num_threads); + } + send_END(); + + send_BEGIN(type, "processes", dt); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + send_SET(w->name, w->processes); + } + send_END(); + + send_BEGIN(type, "mem", dt); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + send_SET(w->name, (w->status_vmrss > w->status_vmshared)?(w->status_vmrss - w->status_vmshared):0ULL); + } + send_END(); + + send_BEGIN(type, "vmem", dt); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + send_SET(w->name, w->status_vmsize); + } + send_END(); + +#ifndef __FreeBSD__ + send_BEGIN(type, "swap", dt); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + send_SET(w->name, w->status_vmswap); + } + send_END(); +#endif + + send_BEGIN(type, "minor_faults", dt); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + send_SET(w->name, (kernel_uint_t)(w->minflt * minflt_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cminflt * cminflt_fix_ratio)):0ULL)); + } + send_END(); + + send_BEGIN(type, "major_faults", dt); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + send_SET(w->name, (kernel_uint_t)(w->majflt * majflt_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cmajflt * cmajflt_fix_ratio)):0ULL)); + } + send_END(); + +#ifndef __FreeBSD__ + send_BEGIN(type, "lreads", dt); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + send_SET(w->name, w->io_logical_bytes_read); + } + send_END(); + + send_BEGIN(type, "lwrites", dt); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + send_SET(w->name, w->io_logical_bytes_written); + } + send_END(); +#endif + + send_BEGIN(type, "preads", dt); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + send_SET(w->name, w->io_storage_bytes_read); + } + send_END(); + + send_BEGIN(type, "pwrites", dt); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + send_SET(w->name, w->io_storage_bytes_written); + } + send_END(); + + if(enable_file_charts) { + send_BEGIN(type, "files", dt); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed)) + send_SET(w->name, w->openfiles); + } + send_END(); + + send_BEGIN(type, "sockets", dt); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed)) + send_SET(w->name, w->opensockets); + } + send_END(); + + send_BEGIN(type, "pipes", dt); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed)) + send_SET(w->name, w->openpipes); + } + send_END(); + } +} + + +// ---------------------------------------------------------------------------- +// generate the charts + +static void send_charts_updates_to_netdata(struct target *root, const char *type, const char *title) +{ + struct target *w; + int newly_added = 0; + + for(w = root ; w ; w = w->next) { + if (w->target) continue; + + if (!w->exposed && w->processes) { + newly_added++; + w->exposed = 1; + if (debug_enabled || w->debug_enabled) + debug_log_int("%s just added - regenerating charts.", w->name); + } + } + + // nothing more to show + if(!newly_added && show_guest_time == show_guest_time_old) return; + + // we have something new to show + // update the charts + fprintf(stdout, "CHART %s.cpu '' '%s CPU Time (%d%% = %d core%s)' 'cpu time %%' cpu %s.cpu stacked 20001 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute 1 %llu %s\n", w->name, system_hz * RATES_DETAIL / 100, w->hidden ? "hidden" : ""); + } + + fprintf(stdout, "CHART %s.mem '' '%s Real Memory (w/o shared)' 'MB' mem %s.mem stacked 20003 %d\n", type, title, type, update_every); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L); + } + + fprintf(stdout, "CHART %s.vmem '' '%s Virtual Memory Size' 'MB' mem %s.vmem stacked 20005 %d\n", type, title, type, update_every); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L); + } + + fprintf(stdout, "CHART %s.threads '' '%s Threads' 'threads' processes %s.threads stacked 20006 %d\n", type, title, type, update_every); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); + } + + fprintf(stdout, "CHART %s.processes '' '%s Processes' 'processes' processes %s.processes stacked 20007 %d\n", type, title, type, update_every); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); + } + + fprintf(stdout, "CHART %s.cpu_user '' '%s CPU User Time (%d%% = %d core%s)' 'cpu time %%' cpu %s.cpu_user stacked 20020 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, system_hz * RATES_DETAIL / 100LLU); + } + + fprintf(stdout, "CHART %s.cpu_system '' '%s CPU System Time (%d%% = %d core%s)' 'cpu time %%' cpu %s.cpu_system stacked 20021 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, system_hz * RATES_DETAIL / 100LLU); + } + + if(show_guest_time) { + fprintf(stdout, "CHART %s.cpu_guest '' '%s CPU Guest Time (%d%% = %d core%s)' 'cpu time %%' cpu %s.cpu_system stacked 20022 %d\n", type, title, (processors * 100), processors, (processors > 1) ? "s" : "", type, update_every); + for (w = root; w; w = w->next) { + if(unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, system_hz * RATES_DETAIL / 100LLU); + } + } + +#ifndef __FreeBSD__ + fprintf(stdout, "CHART %s.swap '' '%s Swap Memory' 'MB' swap %s.swap stacked 20011 %d\n", type, title, type, update_every); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L); + } +#endif + + fprintf(stdout, "CHART %s.major_faults '' '%s Major Page Faults (swap read)' 'page faults/s' swap %s.major_faults stacked 20012 %d\n", type, title, type, update_every); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL); + } + + fprintf(stdout, "CHART %s.minor_faults '' '%s Minor Page Faults' 'page faults/s' mem %s.minor_faults stacked 20011 %d\n", type, title, type, update_every); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL); + } + +#ifdef __FreeBSD__ + fprintf(stdout, "CHART %s.preads '' '%s Disk Reads' 'blocks/s' disk %s.preads stacked 20002 %d\n", type, title, type, update_every); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL); + } + + fprintf(stdout, "CHART %s.pwrites '' '%s Disk Writes' 'blocks/s' disk %s.pwrites stacked 20002 %d\n", type, title, type, update_every); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL); + } +#else + fprintf(stdout, "CHART %s.preads '' '%s Disk Reads' 'kilobytes/s' disk %s.preads stacked 20002 %d\n", type, title, type, update_every); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL); + } + + fprintf(stdout, "CHART %s.pwrites '' '%s Disk Writes' 'kilobytes/s' disk %s.pwrites stacked 20002 %d\n", type, title, type, update_every); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL); + } + + fprintf(stdout, "CHART %s.lreads '' '%s Disk Logical Reads' 'kilobytes/s' disk %s.lreads stacked 20042 %d\n", type, title, type, update_every); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL); + } + + fprintf(stdout, "CHART %s.lwrites '' '%s I/O Logical Writes' 'kilobytes/s' disk %s.lwrites stacked 20042 %d\n", type, title, type, update_every); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL); + } +#endif + + if(enable_file_charts) { + fprintf(stdout, "CHART %s.files '' '%s Open Files' 'open files' disk %s.files stacked 20050 %d\n", type, + title, type, update_every); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); + } + + fprintf(stdout, "CHART %s.sockets '' '%s Open Sockets' 'open sockets' net %s.sockets stacked 20051 %d\n", + type, title, type, update_every); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); + } + + fprintf(stdout, "CHART %s.pipes '' '%s Pipes' 'open pipes' processes %s.pipes stacked 20053 %d\n", type, + title, type, update_every); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); + } + } +} + + +// ---------------------------------------------------------------------------- +// parse command line arguments + +int check_proc_1_io() { + int ret = 0; + + procfile *ff = procfile_open("/proc/1/io", NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO); + if(!ff) goto cleanup; + + ff = procfile_readall(ff); + if(!ff) goto cleanup; + + ret = 1; + +cleanup: + procfile_close(ff); + return ret; +} + +static void parse_args(int argc, char **argv) +{ + int i, freq = 0; + + for(i = 1; i < argc; i++) { + if(!freq) { + int n = (int)str2l(argv[i]); + if(n > 0) { + freq = n; + continue; + } + } + + if(strcmp("version", argv[i]) == 0 || strcmp("-version", argv[i]) == 0 || strcmp("--version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) { + printf("apps.plugin %s\n", VERSION); + exit(0); + } + + if(strcmp("test-permissions", argv[i]) == 0 || strcmp("-t", argv[i]) == 0) { + if(!check_proc_1_io()) { + perror("Tried to read /proc/1/io and it failed"); + exit(1); + } + printf("OK\n"); + exit(0); + } + + if(strcmp("debug", argv[i]) == 0) { +#ifdef NETDATA_INTERNAL_CHECKS + debug_enabled = 1; +#else + fprintf(stderr, "apps.plugin has been compiled without debugging\n"); +#endif + continue; + } + +#ifndef __FreeBSD__ + if(strcmp("fds-cache-secs", argv[i]) == 0) { + if(argc <= i + 1) { + fprintf(stderr, "Parameter 'fds-cache-secs' requires a number as argument.\n"); + exit(1); + } + i++; + max_fds_cache_seconds = str2i(argv[i]); + if(max_fds_cache_seconds < 0) max_fds_cache_seconds = 0; + continue; + } +#endif + + if(strcmp("no-childs", argv[i]) == 0 || strcmp("without-childs", argv[i]) == 0) { + include_exited_childs = 0; + continue; + } + + if(strcmp("with-childs", argv[i]) == 0) { + include_exited_childs = 1; + continue; + } + + if(strcmp("with-guest", argv[i]) == 0) { + enable_guest_charts = 1; + continue; + } + + if(strcmp("no-guest", argv[i]) == 0 || strcmp("without-guest", argv[i]) == 0) { + enable_guest_charts = 0; + continue; + } + + if(strcmp("with-files", argv[i]) == 0) { + enable_file_charts = 1; + continue; + } + + if(strcmp("no-files", argv[i]) == 0 || strcmp("without-files", argv[i]) == 0) { + enable_file_charts = 0; + continue; + } + + if(strcmp("no-users", argv[i]) == 0 || strcmp("without-users", argv[i]) == 0) { + enable_users_charts = 0; + continue; + } + + if(strcmp("no-groups", argv[i]) == 0 || strcmp("without-groups", argv[i]) == 0) { + enable_groups_charts = 0; + continue; + } + + if(strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) { + fprintf(stderr, + "\n" + " netdata apps.plugin %s\n" + " Copyright (C) 2016-2017 Costa Tsaousis \n" + " Released under GNU General Public License v3 or later.\n" + " All rights reserved.\n" + "\n" + " This program is a data collector plugin for netdata.\n" + "\n" + " Available command line options:\n" + "\n" + " SECONDS set the data collection frequency\n" + "\n" + " debug enable debugging (lot of output)\n" + "\n" + " with-childs\n" + " without-childs enable / disable aggregating exited\n" + " children resources into parents\n" + " (default is enabled)\n" + "\n" + " with-guest\n" + " without-guest enable / disable reporting guest charts\n" + " (default is disabled)\n" + "\n" + " with-files\n" + " without-files enable / disable reporting files, sockets, pipes\n" + " (default is enabled)\n" + "\n" +#ifndef __FreeBSD__ + " fds-cache-secs N cache the files of processed for N seconds\n" + " caching is adaptive per file (when a file\n" + " is found, it starts at 0 and while the file\n" + " remains open, it is incremented up to the\n" + " max given)\n" + " (default is %d seconds)\n" + "\n" +#endif + " version or -v or -V print program version and exit\n" + "\n" + , VERSION +#ifndef __FreeBSD__ + , max_fds_cache_seconds +#endif + ); + exit(1); + } + + error("Cannot understand option %s", argv[i]); + exit(1); + } + + if(freq > 0) update_every = freq; + + if(read_apps_groups_conf(user_config_dir, "groups")) { + info("Cannot read process groups configuration file '%s/apps_groups.conf'. Will try '%s/apps_groups.conf'", user_config_dir, stock_config_dir); + + if(read_apps_groups_conf(stock_config_dir, "groups")) { + error("Cannot read process groups '%s/apps_groups.conf'. There are no internal defaults. Failing.", stock_config_dir); + exit(1); + } + else + info("Loaded config file '%s/apps_groups.conf'", stock_config_dir); + } + else + info("Loaded config file '%s/apps_groups.conf'", user_config_dir); +} + +static int am_i_running_as_root() { + uid_t uid = getuid(), euid = geteuid(); + + if(uid == 0 || euid == 0) { + if(debug_enabled) info("I am running with escalated privileges, uid = %u, euid = %u.", uid, euid); + return 1; + } + + if(debug_enabled) info("I am not running with escalated privileges, uid = %u, euid = %u.", uid, euid); + return 0; +} + +#ifdef HAVE_CAPABILITY +static int check_capabilities() { + cap_t caps = cap_get_proc(); + if(!caps) { + error("Cannot get current capabilities."); + return 0; + } + else if(debug_enabled) + info("Received my capabilities from the system."); + + int ret = 1; + + cap_flag_value_t cfv = CAP_CLEAR; + if(cap_get_flag(caps, CAP_DAC_READ_SEARCH, CAP_EFFECTIVE, &cfv) == -1) { + error("Cannot find if CAP_DAC_READ_SEARCH is effective."); + ret = 0; + } + else { + if(cfv != CAP_SET) { + error("apps.plugin should run with CAP_DAC_READ_SEARCH."); + ret = 0; + } + else if(debug_enabled) + info("apps.plugin runs with CAP_DAC_READ_SEARCH."); + } + + cfv = CAP_CLEAR; + if(cap_get_flag(caps, CAP_SYS_PTRACE, CAP_EFFECTIVE, &cfv) == -1) { + error("Cannot find if CAP_SYS_PTRACE is effective."); + ret = 0; + } + else { + if(cfv != CAP_SET) { + error("apps.plugin should run with CAP_SYS_PTRACE."); + ret = 0; + } + else if(debug_enabled) + info("apps.plugin runs with CAP_SYS_PTRACE."); + } + + cap_free(caps); + + return ret; +} +#else +static int check_capabilities() { + return 0; +} +#endif + +int main(int argc, char **argv) { + // debug_flags = D_PROCFILE; + + pagesize = (size_t)sysconf(_SC_PAGESIZE); + + // set the name for logging + program_name = "apps.plugin"; + + // disable syslog for apps.plugin + error_log_syslog = 0; + + // set errors flood protection to 100 logs per hour + error_log_errors_per_period = 100; + error_log_throttle_period = 3600; + + // since apps.plugin runs as root, prevent it from opening symbolic links + procfile_open_flags = O_RDONLY|O_NOFOLLOW; + + netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX"); + if(verify_netdata_host_prefix() == -1) exit(1); + + user_config_dir = getenv("NETDATA_USER_CONFIG_DIR"); + if(user_config_dir == NULL) { + // info("NETDATA_CONFIG_DIR is not passed from netdata"); + user_config_dir = CONFIG_DIR; + } + // else info("Found NETDATA_USER_CONFIG_DIR='%s'", user_config_dir); + + stock_config_dir = getenv("NETDATA_STOCK_CONFIG_DIR"); + if(stock_config_dir == NULL) { + // info("NETDATA_CONFIG_DIR is not passed from netdata"); + stock_config_dir = LIBCONFIG_DIR; + } + // else info("Found NETDATA_USER_CONFIG_DIR='%s'", user_config_dir); + +#ifdef NETDATA_INTERNAL_CHECKS + if(debug_flags != 0) { + struct rlimit rl = { RLIM_INFINITY, RLIM_INFINITY }; + if(setrlimit(RLIMIT_CORE, &rl) != 0) + info("Cannot request unlimited core dumps for debugging... Proceeding anyway..."); +#ifdef HAVE_SYS_PRCTL_H + prctl(PR_SET_DUMPABLE, 1, 0, 0, 0); +#endif + } +#endif /* NETDATA_INTERNAL_CHECKS */ + + procfile_adaptive_initial_allocation = 1; + + time_t started_t = now_monotonic_sec(); + get_system_HZ(); + get_system_pid_max(); + get_system_cpus(); + + parse_args(argc, argv); + + if(!check_capabilities() && !am_i_running_as_root() && !check_proc_1_io()) { + uid_t uid = getuid(), euid = geteuid(); +#ifdef HAVE_CAPABILITY + error("apps.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. " + "Without these, apps.plugin cannot report disk I/O utilization of other processes. " + "To enable capabilities run: sudo setcap cap_dac_read_search,cap_sys_ptrace+ep %s; " + "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; " + , uid, euid, argv[0], argv[0], argv[0] + ); +#else + error("apps.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. " + "Without these, apps.plugin cannot report disk I/O utilization of other processes. " + "Your system does not support capabilities. " + "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; " + , uid, euid, argv[0], argv[0] + ); +#endif + } + + info("started on pid %d", getpid()); + +#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) + all_pids_sortlist = callocz(sizeof(pid_t), (size_t)pid_max); +#endif + + all_pids = callocz(sizeof(struct pid_stat *), (size_t) pid_max); + + usec_t step = update_every * USEC_PER_SEC; + global_iterations_counter = 1; + heartbeat_t hb; + heartbeat_init(&hb); + for(;1; global_iterations_counter++) { + +#ifdef NETDATA_PROFILING +#warning "compiling for profiling" + static int profiling_count=0; + profiling_count++; + if(unlikely(profiling_count > 2000)) exit(0); + usec_t dt = update_every * USEC_PER_SEC; +#else + usec_t dt = heartbeat_next(&hb, step); +#endif + + if(!collect_data_for_all_processes()) { + error("Cannot collect /proc data for running processes. Disabling apps.plugin..."); + printf("DISABLE\n"); + exit(1); + } + + calculate_netdata_statistics(); + normalize_utilization(apps_groups_root_target); + + send_resource_usage_to_netdata(dt); + + // this is smart enough to show only newly added apps, when needed + send_charts_updates_to_netdata(apps_groups_root_target, "apps", "Apps"); + + if(likely(enable_users_charts)) + send_charts_updates_to_netdata(users_root_target, "users", "Users"); + + if(likely(enable_groups_charts)) + send_charts_updates_to_netdata(groups_root_target, "groups", "User Groups"); + + send_collected_data_to_netdata(apps_groups_root_target, "apps", dt); + + if(likely(enable_users_charts)) + send_collected_data_to_netdata(users_root_target, "users", dt); + + if(likely(enable_groups_charts)) + send_collected_data_to_netdata(groups_root_target, "groups", dt); + + fflush(stdout); + + show_guest_time_old = show_guest_time; + + debug_log("done Loop No %zu", global_iterations_counter); + + // restart check (14400 seconds) + if(now_monotonic_sec() - started_t > 14400) exit(0); + } +} diff --git a/collectors/cgroups.plugin/Makefile.am b/collectors/cgroups.plugin/Makefile.am new file mode 100644 index 000000000..eb3214ab2 --- /dev/null +++ b/collectors/cgroups.plugin/Makefile.am @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +CLEANFILES = \ + cgroup-name.sh \ + $(NULL) + +include $(top_srcdir)/build/subst.inc +SUFFIXES = .in + +dist_plugins_SCRIPTS = \ + cgroup-name.sh \ + cgroup-network-helper.sh \ + $(NULL) + +dist_noinst_DATA = \ + cgroup-name.sh.in \ + README.md \ + $(NULL) diff --git a/collectors/cgroups.plugin/Makefile.in b/collectors/cgroups.plugin/Makefile.in new file mode 100644 index 000000000..49c3c9834 --- /dev/null +++ b/collectors/cgroups.plugin/Makefile.in @@ -0,0 +1,563 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \ + $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \ + $(dist_noinst_DATA) +subdir = collectors/cgroups.plugin +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(pluginsdir)" +SCRIPTS = $(dist_plugins_SCRIPTS) +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +CLEANFILES = \ + cgroup-name.sh \ + $(NULL) + +SUFFIXES = .in +dist_plugins_SCRIPTS = \ + cgroup-name.sh \ + cgroup-network-helper.sh \ + $(NULL) + +dist_noinst_DATA = \ + cgroup-name.sh.in \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +.SUFFIXES: .in +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/cgroups.plugin/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu collectors/cgroups.plugin/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; +$(top_srcdir)/build/subst.inc: + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS) + @$(NORMAL_INSTALL) + @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ + done | \ + sed -e 'p;s,.*/,,;n' \ + -e 'h;s|.*|.|' \ + -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ + $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ + { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ + if ($$2 == $$4) { files[d] = files[d] " " $$1; \ + if (++n[d] == $(am__install_max)) { \ + print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ + else { print "f", d "/" $$4, $$1 } } \ + END { for (d in files) print "f", d, files[d] }' | \ + while read type dir files; do \ + if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ + test -z "$$files" || { \ + echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \ + $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \ + } \ + ; done + +uninstall-dist_pluginsSCRIPTS: + @$(NORMAL_UNINSTALL) + @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \ + files=`for p in $$list; do echo "$$p"; done | \ + sed -e 's,.*/,,;$(transform)'`; \ + dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir) +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(SCRIPTS) $(DATA) +installdirs: + for dir in "$(DESTDIR)$(pluginsdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-dist_pluginsSCRIPTS + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-dist_pluginsSCRIPTS + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dist_pluginsSCRIPTS install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \ + uninstall-am uninstall-dist_pluginsSCRIPTS + +.in: + if sed \ + -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \ + -e 's#[@]sbindir_POST@#$(sbindir)#g' \ + -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \ + -e 's#[@]pythondir_POST@#$(pythondir)#g' \ + -e 's#[@]configdir_POST@#$(configdir)#g' \ + -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \ + -e 's#[@]cachedir_POST@#$(cachedir)#g' \ + $< > $@.tmp; then \ + mv "$@.tmp" "$@"; \ + else \ + rm -f "$@.tmp"; \ + false; \ + fi + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/collectors/cgroups.plugin/README.md b/collectors/cgroups.plugin/README.md new file mode 100644 index 000000000..e78aa0440 --- /dev/null +++ b/collectors/cgroups.plugin/README.md @@ -0,0 +1,187 @@ +# cgroups.plugin + +You can monitor containers and virtual machines using **cgroups**. + +cgroups (or control groups), are a Linux kernel feature that provides accounting and resource usage limiting for processes. When cgroups are bundled with namespaces (i.e. isolation), they form what we usually call **containers**. + +cgroups are hierarchical, meaning that cgroups can contain child cgroups, which can contain more cgroups, etc. All accounting is reported (and resource usage limits are applied) also in a hierarchical way. + +To visualize cgroup metrics netdata provides configuration for cherry picking the cgroups of interest. By default (without any configuration) netdata should pick **systemd services**, all kinds of **containers** (lxc, docker, etc) and **virtual machines** spawn by managers that register them with cgroups (qemu, libvirt, etc). + +## configuring netdata for cgroups + +For each cgroup available in the system, netdata provides this configuration: + +``` +[plugin:cgroups] + enable cgroup XXX = yes | no +``` + +But it also provides a few patterns to provide a sane default (`yes` or `no`). + +Below we see, how this works. + +### how netdata finds the available cgroups + +Linux exposes resource usage reporting and provides dynamic configuration for cgroups, using virtual files (usually) under `/sys/fs/cgroup`. netdata reads `/proc/self/mountinfo` to detect the exact mount point of cgroups. netdata also allows manual configuration of this mount point, using these settings: + +``` +[plugin:cgroups] + check for new cgroups every = 10 + path to /sys/fs/cgroup/cpuacct = /sys/fs/cgroup/cpuacct + path to /sys/fs/cgroup/blkio = /sys/fs/cgroup/blkio + path to /sys/fs/cgroup/memory = /sys/fs/cgroup/memory + path to /sys/fs/cgroup/devices = /sys/fs/cgroup/devices +``` + +netdata rescans these directories for added or removed cgroups every `check for new cgroups every` seconds. + +### hierarchical search for cgroups + +Since cgroups are hierarchical, for each of the directories shown above, netdata walks through the subdirectories recursively searching for cgroups (each subdirectory is another cgroup). + +For each of the directories found, netdata provides a configuration variable: + +``` +[plugin:cgroups] + search for cgroups under PATH = yes | no +``` + +To provide a sane default for this setting, netdata uses the following pattern list (patterns starting with `!` give a negative match and their order is important: the first matching a path will be used): + +``` +[plugin:cgroups] + search for cgroups in subpaths matching = !*/init.scope !*-qemu !/init.scope !/system !/systemd !/user !/user.slice * +``` + +So, we disable checking for **child cgroups** in systemd internal cgroups ([systemd services are monitored by netdata](https://github.com/netdata/netdata/wiki/monitoring-systemd-services)), user cgroups (normally used for desktop and remote user sessions), qemu virtual machines (child cgroups of virtual machines) and `init.scope`. All others are enabled. + + +### enabled cgroups + +To check if the cgroup is enabled, netdata uses this setting: + +``` +[plugin:cgroups] + enable cgroup NAME = yes | no +``` + +To provide a sane default, netdata uses the following pattern list (it checks the pattern against the path of the cgroup): + +``` +[plugin:cgroups] + enable by default cgroups matching = !*/init.scope *.scope !*/vcpu* !*/emulator !*.mount !*.partition !*.service !*.slice !*.swap !*.user !/ !/docker !/libvirt !/lxc !/lxc/*/ns !/lxc/*/ns/* !/machine !/qemu !/system !/systemd !/user * +``` + +The above provides the default `yes` or `no` setting for the cgroup. However, there is an additional step. In many cases the cgroups found in the `/sys/fs/cgroup` hierarchy are just random numbers and in many cases these numbers are ephemeral: they change across reboots or sessions. + +So, we need to somehow map the paths of the cgroups to names, to provide consistent netdata configuration (i.e. there is no point to say `enable cgroup 1234 = yes | no`, if `1234` is a random number that changes over time - we need a name for the cgroup first, so that `enable cgroup NAME = yes | no` will be consistent). + +For this mapping netdata provides 2 configuration options: + +``` +[plugin:cgroups] + run script to rename cgroups matching = *.scope *docker* *lxc* *qemu* !/ !*.mount !*.partition !*.service !*.slice !*.swap !*.user * + script to get cgroup names = /usr/libexec/netdata/plugins.d/cgroup-name.sh +``` + +The whole point for the additional pattern list, is to limit the number of times the script will be called. Without this pattern list, the script might be called thousands of times, depending on the number of cgroups available in the system. + +The above pattern list is matched against the path of the cgroup. For matched cgroups, netdata calls the script [cgroup-name.sh](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/cgroup-name.sh.in) to get its name. This script queries `docker`, or applies heuristics to find give a name for the cgroup. + +## Monitoring systemd services + +netdata monitors **systemd services**. Example: + +![image](https://cloud.githubusercontent.com/assets/2662304/21964372/20cd7b84-db53-11e6-98a2-b9c986b082c0.png) + +Support per distribution: + +system|systemd services
charts shown|`tree`
`/sys/fs/cgroup`|comments +:-------:|:-------:|:-------:|:------------ +Arch Linux|YES| | +Gentoo|NO| |can be enabled, see below +Ubuntu 16.04 LTS|YES| | +Ubuntu 16.10|YES|[here](http://pastebin.com/PiWbQEXy)| +Fedora 25|YES|[here](http://pastebin.com/ax0373wF)| +Debian 8|NO| |can be enabled, see below +AMI|NO|[here](http://pastebin.com/FrxmptjL)|not a systemd system +Centos 7.3.1611|NO|[here](http://pastebin.com/SpzgezAg)|can be enabled, see below + +#### how to enable cgroup accounting on systemd systems that is by default disabled + +You can verify there is no accounting enabled, by running `systemd-cgtop`. The program will show only resources for cgroup ` / `, but all services will show nothing. + +To enable cgroup accounting, execute this: + +```sh +sed -e 's|^#Default\(.*\)Accounting=.*$|Default\1Accounting=yes|g' /etc/systemd/system.conf >/tmp/system.conf +``` + +To see the changes it made, run this: + +``` +# diff /etc/systemd/system.conf /tmp/system.conf +40,44c40,44 +< #DefaultCPUAccounting=no +< #DefaultIOAccounting=no +< #DefaultBlockIOAccounting=no +< #DefaultMemoryAccounting=no +< #DefaultTasksAccounting=yes +--- +> DefaultCPUAccounting=yes +> DefaultIOAccounting=yes +> DefaultBlockIOAccounting=yes +> DefaultMemoryAccounting=yes +> DefaultTasksAccounting=yes +``` + +If you are happy with the changes, run: + +```sh +# copy the file to the right location +sudo cp /tmp/system.conf /etc/systemd/system.conf + +# restart systemd to take it into account +sudo systemctl daemon-reexec +``` + +(`systemctl daemon-reload` does not reload the configuration of the server - so you have to execute `systemctl daemon-reexec`). + +Now, when you run `systemd-cgtop`, services will start reporting usage (if it does not, restart a service - any service - to wake it up). Refresh your netdata dashboard, and you will have the charts too. + +In case memory accounting is missing, you will need to enable it at your kernel, by appending the following kernel boot options and rebooting: + +``` +cgroup_enable=memory swapaccount=1 +``` + +You can add the above, directly at the `linux` line in your `/boot/grub/grub.cfg` or appending them to the `GRUB_CMDLINE_LINUX` in `/etc/default/grub` (in which case you will have to run `update-grub` before rebooting). On DigitalOcean debian images you may have to set it at `/etc/default/grub.d/50-cloudimg-settings.cfg`. + +--- + +## Monitoring ephemeral containers + +netdata monitors containers automatically when it is installed at the host, or when it is installed in a container that has access to the `/proc` and `/sys` filesystems of the host. + +netdata prior to v1.6 had 2 issues when such containers were monitored: + +1. network interface alarms where triggering when containers were stopped + +2. charts were never cleaned up, so after some time dozens of containers were showing up on the dashboard, and they were occupying memory. + + +### the current netdata + +network interfaces and cgroups (containers) are now self-cleaned. + +So, when a network interface or container stops, netdata might log a few errors in error.log complaining about files it cannot find, but immediately: + +1. it will detect this is a removed container or network interface +2. it will freeze/pause all alarms for them +3. it will mark their charts as obsolete +4. obsolete charts are not be offered on new dashboard sessions (so hit F5 and the charts are gone) +5. existing dashboard sessions will continue to see them, but of course they will not refresh +6. obsolete charts will be removed from memory, 1 hour after the last user viewed them (configurable with `[global].cleanup obsolete charts after seconds = 3600` (at netdata.conf). +7. when obsolete charts are removed from memory they are also deleted from disk (configurable with `[global].delete obsolete charts files = yes`) + diff --git a/collectors/cgroups.plugin/cgroup-name.sh b/collectors/cgroups.plugin/cgroup-name.sh new file mode 100644 index 000000000..6bf8b8b03 --- /dev/null +++ b/collectors/cgroups.plugin/cgroup-name.sh @@ -0,0 +1,196 @@ +#!/usr/bin/env bash + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# SPDX-License-Identifier: GPL-3.0-or-later +# +# Script to find a better name for cgroups +# + +export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin" +export LC_ALL=C + +# ----------------------------------------------------------------------------- + +PROGRAM_NAME="$(basename "${0}")" + +logdate() { + date "+%Y-%m-%d %H:%M:%S" +} + +log() { + local status="${1}" + shift + + echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" + +} + +warning() { + log WARNING "${@}" +} + +error() { + log ERROR "${@}" +} + +info() { + log INFO "${@}" +} + +fatal() { + log FATAL "${@}" + exit 1 +} + +debug=0 +debug() { + [ $debug -eq 1 ] && log DEBUG "${@}" +} + +# ----------------------------------------------------------------------------- + +[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata" +[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d" + +DOCKER_HOST="${DOCKER_HOST:=/var/run/docker.sock}" +CGROUP="${1}" +NAME= + +# ----------------------------------------------------------------------------- + +if [ -z "${CGROUP}" ] + then + fatal "called without a cgroup name. Nothing to do." +fi + +for CONFIG in "${NETDATA_USER_CONFIG_DIR}/cgroups-names.conf" "${NETDATA_STOCK_CONFIG_DIR}/cgroups-names.conf" +do + if [ -f "${CONFIG}" ] + then + NAME="$(grep "^${CGROUP} " "${CONFIG}" | sed "s/[[:space:]]\+/ /g" | cut -d ' ' -f 2)" + if [ -z "${NAME}" ] + then + info "cannot find cgroup '${CGROUP}' in '${CONFIG}'." + else + break + fi + #else + # info "configuration file '${CONFIG}' is not available." + fi +done + +function docker_get_name_classic { + local id="${1}" + info "Running command: docker ps --filter=id=\"${id}\" --format=\"{{.Names}}\"" + NAME="$( docker ps --filter=id="${id}" --format="{{.Names}}" )" + return 0 +} + +function docker_get_name_api { + local id="${1}" + if [ ! -S "${DOCKER_HOST}" ] + then + warning "Can't find ${DOCKER_HOST}" + return 1 + fi + info "Running API command: /containers/${id}/json" + JSON=$(echo -e "GET /containers/${id}/json HTTP/1.0\r\n" | nc -U ${DOCKER_HOST} | grep '^{.*') + NAME=$(echo $JSON | jq -r .Name,.Config.Hostname | grep -v null | head -n1 | sed 's|^/||') + return 0 +} + +function docker_get_name { + local id="${1}" + if hash docker 2>/dev/null + then + docker_get_name_classic "${id}" + else + docker_get_name_api "${id}" || docker_get_name_classic "${id}" + fi + if [ -z "${NAME}" ] + then + warning "cannot find the name of docker container '${id}'" + NAME="${id:0:12}" + else + info "docker container '${id}' is named '${NAME}'" + fi +} + +if [ -z "${NAME}" ] + then + if [[ "${CGROUP}" =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]] + then + # docker containers + + DOCKERID="$( echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|" )" + # echo "DOCKERID=${DOCKERID}" + + if [ ! -z "${DOCKERID}" -a \( ${#DOCKERID} -eq 64 -o ${#DOCKERID} -eq 12 \) ] + then + docker_get_name "${DOCKERID}" + else + error "a docker id cannot be extracted from docker cgroup '${CGROUP}'." + fi + elif [[ "${CGROUP}" =~ ^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]+[_/][a-fA-F0-9]+$ ]] + then + # kubernetes + + DOCKERID="$( echo "${CGROUP}" | sed "s|^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]\+[_/]\([a-fA-F0-9]\+\)$|\1|" )" + # echo "DOCKERID=${DOCKERID}" + + if [ ! -z "${DOCKERID}" -a \( ${#DOCKERID} -eq 64 -o ${#DOCKERID} -eq 12 \) ] + then + docker_get_name "${DOCKERID}" + else + error "a docker id cannot be extracted from kubernetes cgroup '${CGROUP}'." + fi + elif [[ "${CGROUP}" =~ machine.slice[_/].*\.service ]] + then + # systemd-nspawn + + NAME="$(echo ${CGROUP} | sed 's/.*machine.slice[_\/]\(.*\)\.service/\1/g')" + + elif [[ "${CGROUP}" =~ machine.slice_machine.*-qemu ]] + then + # libvirtd / qemu virtual machines + + # NAME="$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d//; s/\/x2d/\-/g; s/\.scope//g')" + NAME="qemu_$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d[[:digit:]]*//; s/\/x2d//g; s/\.scope//g')" + + elif [[ "${CGROUP}" =~ machine_.*\.libvirt-qemu ]] + then + # libvirtd / qemu virtual machines + NAME="qemu_$(echo ${CGROUP} | sed 's/^machine_//; s/\.libvirt-qemu$//; s/-/_/;')" + + elif [[ "${CGROUP}" =~ qemu.slice_([0-9]+).scope && -d /etc/pve ]] + then + # Proxmox VMs + + FILENAME="/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" + if [[ -f $FILENAME && -r $FILENAME ]] + then + NAME="qemu_$(grep -e '^name: ' "/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')" + else + error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group." + fi + elif [[ "${CGROUP}" =~ lxc_([0-9]+) && -d /etc/pve ]] + then + # Proxmox Containers (LXC) + + FILENAME="/etc/pve/lxc/${BASH_REMATCH[1]}.conf" + if [[ -f ${FILENAME} && -r ${FILENAME} ]] + then + NAME=$(grep -e '^hostname: ' /etc/pve/lxc/${BASH_REMATCH[1]}.conf | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p') + else + error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group." + fi + fi + + [ -z "${NAME}" ] && NAME="${CGROUP}" + [ ${#NAME} -gt 100 ] && NAME="${NAME:0:100}" +fi + +info "cgroup '${CGROUP}' is called '${NAME}'" +echo "${NAME}" diff --git a/collectors/cgroups.plugin/cgroup-name.sh.in b/collectors/cgroups.plugin/cgroup-name.sh.in new file mode 100755 index 000000000..53696a4bf --- /dev/null +++ b/collectors/cgroups.plugin/cgroup-name.sh.in @@ -0,0 +1,196 @@ +#!/usr/bin/env bash + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# SPDX-License-Identifier: GPL-3.0-or-later +# +# Script to find a better name for cgroups +# + +export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin" +export LC_ALL=C + +# ----------------------------------------------------------------------------- + +PROGRAM_NAME="$(basename "${0}")" + +logdate() { + date "+%Y-%m-%d %H:%M:%S" +} + +log() { + local status="${1}" + shift + + echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" + +} + +warning() { + log WARNING "${@}" +} + +error() { + log ERROR "${@}" +} + +info() { + log INFO "${@}" +} + +fatal() { + log FATAL "${@}" + exit 1 +} + +debug=0 +debug() { + [ $debug -eq 1 ] && log DEBUG "${@}" +} + +# ----------------------------------------------------------------------------- + +[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@" +[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@" + +DOCKER_HOST="${DOCKER_HOST:=/var/run/docker.sock}" +CGROUP="${1}" +NAME= + +# ----------------------------------------------------------------------------- + +if [ -z "${CGROUP}" ] + then + fatal "called without a cgroup name. Nothing to do." +fi + +for CONFIG in "${NETDATA_USER_CONFIG_DIR}/cgroups-names.conf" "${NETDATA_STOCK_CONFIG_DIR}/cgroups-names.conf" +do + if [ -f "${CONFIG}" ] + then + NAME="$(grep "^${CGROUP} " "${CONFIG}" | sed "s/[[:space:]]\+/ /g" | cut -d ' ' -f 2)" + if [ -z "${NAME}" ] + then + info "cannot find cgroup '${CGROUP}' in '${CONFIG}'." + else + break + fi + #else + # info "configuration file '${CONFIG}' is not available." + fi +done + +function docker_get_name_classic { + local id="${1}" + info "Running command: docker ps --filter=id=\"${id}\" --format=\"{{.Names}}\"" + NAME="$( docker ps --filter=id="${id}" --format="{{.Names}}" )" + return 0 +} + +function docker_get_name_api { + local id="${1}" + if [ ! -S "${DOCKER_HOST}" ] + then + warning "Can't find ${DOCKER_HOST}" + return 1 + fi + info "Running API command: /containers/${id}/json" + JSON=$(echo -e "GET /containers/${id}/json HTTP/1.0\r\n" | nc -U ${DOCKER_HOST} | grep '^{.*') + NAME=$(echo $JSON | jq -r .Name,.Config.Hostname | grep -v null | head -n1 | sed 's|^/||') + return 0 +} + +function docker_get_name { + local id="${1}" + if hash docker 2>/dev/null + then + docker_get_name_classic "${id}" + else + docker_get_name_api "${id}" || docker_get_name_classic "${id}" + fi + if [ -z "${NAME}" ] + then + warning "cannot find the name of docker container '${id}'" + NAME="${id:0:12}" + else + info "docker container '${id}' is named '${NAME}'" + fi +} + +if [ -z "${NAME}" ] + then + if [[ "${CGROUP}" =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]] + then + # docker containers + + DOCKERID="$( echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|" )" + # echo "DOCKERID=${DOCKERID}" + + if [ ! -z "${DOCKERID}" -a \( ${#DOCKERID} -eq 64 -o ${#DOCKERID} -eq 12 \) ] + then + docker_get_name "${DOCKERID}" + else + error "a docker id cannot be extracted from docker cgroup '${CGROUP}'." + fi + elif [[ "${CGROUP}" =~ ^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]+[_/][a-fA-F0-9]+$ ]] + then + # kubernetes + + DOCKERID="$( echo "${CGROUP}" | sed "s|^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]\+[_/]\([a-fA-F0-9]\+\)$|\1|" )" + # echo "DOCKERID=${DOCKERID}" + + if [ ! -z "${DOCKERID}" -a \( ${#DOCKERID} -eq 64 -o ${#DOCKERID} -eq 12 \) ] + then + docker_get_name "${DOCKERID}" + else + error "a docker id cannot be extracted from kubernetes cgroup '${CGROUP}'." + fi + elif [[ "${CGROUP}" =~ machine.slice[_/].*\.service ]] + then + # systemd-nspawn + + NAME="$(echo ${CGROUP} | sed 's/.*machine.slice[_\/]\(.*\)\.service/\1/g')" + + elif [[ "${CGROUP}" =~ machine.slice_machine.*-qemu ]] + then + # libvirtd / qemu virtual machines + + # NAME="$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d//; s/\/x2d/\-/g; s/\.scope//g')" + NAME="qemu_$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d[[:digit:]]*//; s/\/x2d//g; s/\.scope//g')" + + elif [[ "${CGROUP}" =~ machine_.*\.libvirt-qemu ]] + then + # libvirtd / qemu virtual machines + NAME="qemu_$(echo ${CGROUP} | sed 's/^machine_//; s/\.libvirt-qemu$//; s/-/_/;')" + + elif [[ "${CGROUP}" =~ qemu.slice_([0-9]+).scope && -d /etc/pve ]] + then + # Proxmox VMs + + FILENAME="/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" + if [[ -f $FILENAME && -r $FILENAME ]] + then + NAME="qemu_$(grep -e '^name: ' "/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')" + else + error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group." + fi + elif [[ "${CGROUP}" =~ lxc_([0-9]+) && -d /etc/pve ]] + then + # Proxmox Containers (LXC) + + FILENAME="/etc/pve/lxc/${BASH_REMATCH[1]}.conf" + if [[ -f ${FILENAME} && -r ${FILENAME} ]] + then + NAME=$(grep -e '^hostname: ' /etc/pve/lxc/${BASH_REMATCH[1]}.conf | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p') + else + error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group." + fi + fi + + [ -z "${NAME}" ] && NAME="${CGROUP}" + [ ${#NAME} -gt 100 ] && NAME="${NAME:0:100}" +fi + +info "cgroup '${CGROUP}' is called '${NAME}'" +echo "${NAME}" diff --git a/collectors/cgroups.plugin/cgroup-network-helper.sh b/collectors/cgroups.plugin/cgroup-network-helper.sh new file mode 100755 index 000000000..666f02fc8 --- /dev/null +++ b/collectors/cgroups.plugin/cgroup-network-helper.sh @@ -0,0 +1,258 @@ +#!/usr/bin/env bash +# shellcheck disable=SC1117 + +# cgroup-network-helper.sh +# detect container and virtual machine interfaces +# +# (C) 2017 Costa Tsaousis +# SPDX-License-Identifier: GPL-3.0-or-later +# +# This script is called as root (by cgroup-network), with either a pid, or a cgroup path. +# It tries to find all the network interfaces that belong to the same cgroup. +# +# It supports several method for this detection: +# +# 1. cgroup-network (the binary father of this script) detects veth network interfaces, +# by examining iflink and ifindex IDs and switching namespaces +# (it also detects the interface name as it is used by the container). +# +# 2. this script, uses /proc/PID/fdinfo to find tun/tap network interfaces. +# +# 3. this script, calls virsh to find libvirt network interfaces. +# + +# ----------------------------------------------------------------------------- + +# the system path is cleared by cgroup-network +# shellcheck source=/dev/null +[ -f /etc/profile ] && source /etc/profile + +export LC_ALL=C + +PROGRAM_NAME="$(basename "${0}")" + +logdate() { + date "+%Y-%m-%d %H:%M:%S" +} + +log() { + local status="${1}" + shift + + echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" + +} + +warning() { + log WARNING "${@}" +} + +error() { + log ERROR "${@}" +} + +info() { + log INFO "${@}" +} + +fatal() { + log FATAL "${@}" + exit 1 +} + +debug=0 +debug() { + [ "${debug}" = "1" ] && log DEBUG "${@}" +} + +# ----------------------------------------------------------------------------- +# check for BASH v4+ (required for associative arrays) + +[ $(( BASH_VERSINFO[0] )) -lt 4 ] && \ + fatal "BASH version 4 or later is required (this is ${BASH_VERSION})." + +# ----------------------------------------------------------------------------- +# parse the arguments + +pid= +cgroup= +while [ ! -z "${1}" ] +do + case "${1}" in + --cgroup) cgroup="${2}"; shift 1;; + --pid|-p) pid="${2}"; shift 1;; + --debug|debug) debug=1;; + *) fatal "Cannot understand argument '${1}'";; + esac + + shift +done + +if [ -z "${pid}" ] && [ -z "${cgroup}" ] +then + fatal "Either --pid or --cgroup is required" +fi + +# ----------------------------------------------------------------------------- + +set_source() { + [ ${debug} -eq 1 ] && echo "SRC ${*}" +} + + +# ----------------------------------------------------------------------------- +# veth interfaces via cgroup + +# cgroup-network can detect veth interfaces by itself (written in C). +# If you seek for a shell version of what it does, check this: +# https://github.com/netdata/netdata/issues/474#issuecomment-317866709 + + +# ----------------------------------------------------------------------------- +# tun/tap interfaces via /proc/PID/fdinfo + +# find any tun/tap devices linked to a pid +proc_pid_fdinfo_iff() { + local p="${1}" # the pid + + debug "Searching for tun/tap interfaces for pid ${p}..." + set_source "fdinfo" + grep "^iff:.*" "${NETDATA_HOST_PREFIX}/proc/${p}/fdinfo"/* 2>/dev/null | cut -f 2 +} + +find_tun_tap_interfaces_for_cgroup() { + local c="${1}" # the cgroup path + + # for each pid of the cgroup + # find any tun/tap devices linked to the pid + if [ -f "${c}/emulator/cgroup.procs" ] + then + local p + for p in $(< "${c}/emulator/cgroup.procs" ) + do + proc_pid_fdinfo_iff "${p}" + done + fi +} + + +# ----------------------------------------------------------------------------- +# virsh domain network interfaces + +virsh_cgroup_to_domain_name() { + local c="${1}" # the cgroup path + + debug "extracting a possible virsh domain from cgroup ${c}..." + + # extract for the cgroup path + sed -n -e "s|.*/machine-qemu\\\\x2d[0-9]\+\\\\x2d\(.*\)\.scope$|\1|p" \ + -e "s|.*/machine/\(.*\)\.libvirt-qemu$|\1|p" \ + < +#endif + +char environment_variable2[FILENAME_MAX + 50] = ""; +char *environment[] = { + "PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin", + environment_variable2, + NULL +}; + + +// ---------------------------------------------------------------------------- + +// callback required by fatal() +void netdata_cleanup_and_exit(int ret) { + exit(ret); +} + +// callbacks required by popen() +void signals_block(void) {}; +void signals_unblock(void) {}; +void signals_reset(void) {}; + +// callback required by eval() +int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result) { + (void)variable; + (void)hash; + (void)rc; + (void)result; + return 0; +}; + +// required by get_system_cpus() +char *netdata_configured_host_prefix = ""; + +// ---------------------------------------------------------------------------- + +struct iface { + const char *device; + uint32_t hash; + + unsigned int ifindex; + unsigned int iflink; + + struct iface *next; +}; + +unsigned int read_iface_iflink(const char *prefix, const char *iface) { + if(!prefix) prefix = ""; + + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/sys/class/net/%s/iflink", prefix, iface); + + unsigned long long iflink = 0; + int ret = read_single_number_file(filename, &iflink); + if(ret) error("Cannot read '%s'.", filename); + + return (unsigned int)iflink; +} + +unsigned int read_iface_ifindex(const char *prefix, const char *iface) { + if(!prefix) prefix = ""; + + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/sys/class/net/%s/ifindex", prefix, iface); + + unsigned long long ifindex = 0; + int ret = read_single_number_file(filename, &ifindex); + if(ret) error("Cannot read '%s'.", filename); + + return (unsigned int)ifindex; +} + +struct iface *read_proc_net_dev(const char *prefix) { + if(!prefix) prefix = ""; + + procfile *ff = NULL; + char filename[FILENAME_MAX + 1]; + + snprintfz(filename, FILENAME_MAX, "%s%s", prefix, (*prefix)?"/proc/1/net/dev":"/proc/net/dev"); + ff = procfile_open(filename, " \t,:|", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) { + error("Cannot open file '%s'", filename); + return NULL; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) { + error("Cannot read file '%s'", filename); + return NULL; + } + + size_t lines = procfile_lines(ff), l; + struct iface *root = NULL; + for(l = 2; l < lines ;l++) { + if (unlikely(procfile_linewords(ff, l) < 1)) continue; + + struct iface *t = callocz(1, sizeof(struct iface)); + t->device = strdupz(procfile_lineword(ff, l, 0)); + t->hash = simple_hash(t->device); + t->ifindex = read_iface_ifindex(prefix, t->device); + t->iflink = read_iface_iflink(prefix, t->device); + t->next = root; + root = t; + } + + procfile_close(ff); + + return root; +} + +void free_iface(struct iface *iface) { + freez((void *)iface->device); + freez(iface); +} + +void free_host_ifaces(struct iface *iface) { + while(iface) { + struct iface *t = iface->next; + free_iface(iface); + iface = t; + } +} + +int iface_is_eligible(struct iface *iface) { + if(iface->iflink != iface->ifindex) + return 1; + + return 0; +} + +int eligible_ifaces(struct iface *root) { + int eligible = 0; + + struct iface *t; + for(t = root; t ; t = t->next) + if(iface_is_eligible(t)) + eligible++; + + return eligible; +} + +static void continue_as_child(void) { + pid_t child = fork(); + int status; + pid_t ret; + + if (child < 0) + error("fork() failed"); + + /* Only the child returns */ + if (child == 0) + return; + + for (;;) { + ret = waitpid(child, &status, WUNTRACED); + if ((ret == child) && (WIFSTOPPED(status))) { + /* The child suspended so suspend us as well */ + kill(getpid(), SIGSTOP); + kill(child, SIGCONT); + } else { + break; + } + } + + /* Return the child's exit code if possible */ + if (WIFEXITED(status)) { + exit(WEXITSTATUS(status)); + } else if (WIFSIGNALED(status)) { + kill(getpid(), WTERMSIG(status)); + } + + exit(EXIT_FAILURE); +} + +int proc_pid_fd(const char *prefix, const char *ns, pid_t pid) { + if(!prefix) prefix = ""; + + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/proc/%d/%s", prefix, (int)pid, ns); + int fd = open(filename, O_RDONLY); + + if(fd == -1) + error("Cannot open proc_pid_fd() file '%s'", filename); + + return fd; +} + +static struct ns { + int nstype; + int fd; + int status; + const char *name; + const char *path; +} all_ns[] = { + // { .nstype = CLONE_NEWUSER, .fd = -1, .status = -1, .name = "user", .path = "ns/user" }, + // { .nstype = CLONE_NEWCGROUP, .fd = -1, .status = -1, .name = "cgroup", .path = "ns/cgroup" }, + // { .nstype = CLONE_NEWIPC, .fd = -1, .status = -1, .name = "ipc", .path = "ns/ipc" }, + // { .nstype = CLONE_NEWUTS, .fd = -1, .status = -1, .name = "uts", .path = "ns/uts" }, + { .nstype = CLONE_NEWNET, .fd = -1, .status = -1, .name = "network", .path = "ns/net" }, + { .nstype = CLONE_NEWPID, .fd = -1, .status = -1, .name = "pid", .path = "ns/pid" }, + { .nstype = CLONE_NEWNS, .fd = -1, .status = -1, .name = "mount", .path = "ns/mnt" }, + + // terminator + { .nstype = 0, .fd = -1, .status = -1, .name = NULL, .path = NULL } +}; + +int switch_namespace(const char *prefix, pid_t pid) { + if(!prefix) prefix = ""; + +#ifdef HAVE_SETNS + + int i; + for(i = 0; all_ns[i].name ; i++) + all_ns[i].fd = proc_pid_fd(prefix, all_ns[i].path, pid); + + int root_fd = proc_pid_fd(prefix, "root", pid); + int cwd_fd = proc_pid_fd(prefix, "cwd", pid); + + setgroups(0, NULL); + + // 2 passes - found it at nsenter source code + // this is related CLONE_NEWUSER functionality + + // This code cannot switch user namespace (it can all the other namespaces) + // Fortunately, we don't need to switch user namespaces. + + int pass, errors = 0; + for(pass = 0; pass < 2 ;pass++) { + for(i = 0; all_ns[i].name ; i++) { + if (all_ns[i].fd != -1 && all_ns[i].status == -1) { + if(setns(all_ns[i].fd, all_ns[i].nstype) == -1) { + if(pass == 1) { + all_ns[i].status = 0; + error("Cannot switch to %s namespace of pid %d", all_ns[i].name, (int) pid); + errors++; + } + } + else + all_ns[i].status = 1; + } + } + } + + setgroups(0, NULL); + + if(root_fd != -1) { + if(fchdir(root_fd) < 0) + error("Cannot fchdir() to pid %d root directory", (int)pid); + + if(chroot(".") < 0) + error("Cannot chroot() to pid %d root directory", (int)pid); + + close(root_fd); + } + + if(cwd_fd != -1) { + if(fchdir(cwd_fd) < 0) + error("Cannot fchdir() to pid %d current working directory", (int)pid); + + close(cwd_fd); + } + + int do_fork = 0; + for(i = 0; all_ns[i].name ; i++) + if(all_ns[i].fd != -1) { + + // CLONE_NEWPID requires a fork() to become effective + if(all_ns[i].nstype == CLONE_NEWPID && all_ns[i].status) + do_fork = 1; + + close(all_ns[i].fd); + } + + if(do_fork) + continue_as_child(); + + return 0; + +#else + + errno = ENOSYS; + error("setns() is missing on this system."); + return 1; + +#endif +} + +pid_t read_pid_from_cgroup_file(const char *filename) { + int fd = open(filename, procfile_open_flags); + if(fd == -1) { + error("Cannot open pid_from_cgroup() file '%s'.", filename); + return 0; + } + + FILE *fp = fdopen(fd, "r"); + if(!fp) { + error("Cannot upgrade fd to fp for file '%s'.", filename); + return 0; + } + + char buffer[100 + 1]; + pid_t pid = 0; + char *s; + while((s = fgets(buffer, 100, fp))) { + buffer[100] = '\0'; + pid = atoi(s); + if(pid > 0) break; + } + + fclose(fp); + return pid; +} + +pid_t read_pid_from_cgroup_files(const char *path) { + char filename[FILENAME_MAX + 1]; + + snprintfz(filename, FILENAME_MAX, "%s/cgroup.procs", path); + pid_t pid = read_pid_from_cgroup_file(filename); + if(pid > 0) return pid; + + snprintfz(filename, FILENAME_MAX, "%s/tasks", path); + return read_pid_from_cgroup_file(filename); +} + +pid_t read_pid_from_cgroup(const char *path) { + pid_t pid = read_pid_from_cgroup_files(path); + if (pid > 0) return pid; + + DIR *dir = opendir(path); + if (!dir) { + error("cannot read directory '%s'", path); + return 0; + } + + struct dirent *de = NULL; + while ((de = readdir(dir))) { + if (de->d_type == DT_DIR + && ( + (de->d_name[0] == '.' && de->d_name[1] == '\0') + || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') + )) + continue; + + if (de->d_type == DT_DIR) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/%s", path, de->d_name); + pid = read_pid_from_cgroup(filename); + if(pid > 0) break; + } + } + closedir(dir); + return pid; +} + +// ---------------------------------------------------------------------------- +// send the result to netdata + +struct found_device { + const char *host_device; + const char *guest_device; + + uint32_t host_device_hash; + + struct found_device *next; +} *detected_devices = NULL; + +void add_device(const char *host, const char *guest) { + uint32_t hash = simple_hash(host); + + if(guest && (!*guest || strcmp(host, guest) == 0)) + guest = NULL; + + struct found_device *f; + for(f = detected_devices; f ; f = f->next) { + if(f->host_device_hash == hash && strcmp(host, f->host_device) == 0) { + + if(guest && !f->guest_device) + f->guest_device = strdupz(guest); + + return; + } + } + + f = mallocz(sizeof(struct found_device)); + f->host_device = strdupz(host); + f->host_device_hash = hash; + f->guest_device = (guest)?strdupz(guest):NULL; + f->next = detected_devices; + detected_devices = f; +} + +int send_devices(void) { + int found = 0; + + struct found_device *f; + for(f = detected_devices; f ; f = f->next) { + found++; + printf("%s %s\n", f->host_device, (f->guest_device)?f->guest_device:f->host_device); + } + + return found; +} + +// ---------------------------------------------------------------------------- +// this function should be called only **ONCE** +// also it has to be the **LAST** to be called +// since it switches namespaces, so after this call, everything is different! + +void detect_veth_interfaces(pid_t pid) { + struct iface *host = NULL, *cgroup = NULL, *h, *c; + + host = read_proc_net_dev(netdata_configured_host_prefix); + if(!host) { + errno = 0; + error("cannot read host interface list."); + goto cleanup; + } + + if(!eligible_ifaces(host)) { + errno = 0; + error("there are no double-linked host interfaces available."); + goto cleanup; + } + + if(switch_namespace(netdata_configured_host_prefix, pid)) { + errno = 0; + error("cannot switch to the namespace of pid %u", (unsigned int) pid); + goto cleanup; + } + + cgroup = read_proc_net_dev(NULL); + if(!cgroup) { + errno = 0; + error("cannot read cgroup interface list."); + goto cleanup; + } + + if(!eligible_ifaces(cgroup)) { + errno = 0; + error("there are not double-linked cgroup interfaces available."); + goto cleanup; + } + + for(h = host; h ; h = h->next) { + if(iface_is_eligible(h)) { + for (c = cgroup; c; c = c->next) { + if(iface_is_eligible(c) && h->ifindex == c->iflink && h->iflink == c->ifindex) { + add_device(h->device, c->device); + } + } + } + } + +cleanup: + free_host_ifaces(cgroup); + free_host_ifaces(host); +} + +// ---------------------------------------------------------------------------- +// call the external helper + +#define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048 +void call_the_helper(pid_t pid, const char *cgroup) { + if(setresuid(0, 0, 0) == -1) + error("setresuid(0, 0, 0) failed."); + + char command[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1]; + if(cgroup) + snprintfz(command, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec " PLUGINS_DIR "/cgroup-network-helper.sh --cgroup '%s'", cgroup); + else + snprintfz(command, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec " PLUGINS_DIR "/cgroup-network-helper.sh --pid %d", pid); + + info("running: %s", command); + + pid_t cgroup_pid; + FILE *fp = mypopene(command, &cgroup_pid, environment); + if(fp) { + char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1]; + char *s; + while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, fp))) { + trim(s); + + if(*s && *s != '\n') { + char *t = s; + while(*t && *t != ' ') t++; + if(*t == ' ') { + *t = '\0'; + t++; + } + + if(!*s || !*t) continue; + add_device(s, t); + } + } + + mypclose(fp, cgroup_pid); + } + else + error("cannot execute cgroup-network helper script: %s", command); +} + +int is_valid_path_symbol(char c) { + switch(c) { + case '/': // path separators + case '\\': // needed for virsh domains \x2d1\x2dname + case ' ': // space + case '-': // hyphen + case '_': // underscore + case '.': // dot + case ',': // comma + return 1; + + default: + return 0; + } +} + +// we will pass this path a shell script running as root +// so, we need to make sure the path will be valid +// and will not include anything that could allow +// the caller use shell expansion for gaining escalated +// privileges. +int verify_path(const char *path) { + struct stat sb; + + char c; + const char *s = path; + while((c = *s++)) { + if(!( isalnum(c) || is_valid_path_symbol(c) )) { + error("invalid character in path '%s'", path); + return -1; + } + } + + if(strstr(path, "\\") && !strstr(path, "\\x")) { + error("invalid escape sequence in path '%s'", path); + return 1; + } + + if(strstr(path, "/../")) { + error("invalid parent path sequence detected in '%s'", path); + return 1; + } + + if(path[0] != '/') { + error("only absolute path names are supported - invalid path '%s'", path); + return -1; + } + + if (stat(path, &sb) == -1) { + error("cannot stat() path '%s'", path); + return -1; + } + + if((sb.st_mode & S_IFMT) != S_IFDIR) { + error("path '%s' is not a directory", path); + return -1; + } + + return 0; +} + +/* +char *fix_path_variable(void) { + const char *path = getenv("PATH"); + if(!path || !*path) return 0; + + char *p = strdupz(path); + char *safe_path = callocz(1, strlen(p) + strlen("PATH=") + 1); + strcpy(safe_path, "PATH="); + + int added = 0; + char *ptr = p; + while(ptr && *ptr) { + char *s = strsep(&ptr, ":"); + if(s && *s) { + if(verify_path(s) == -1) { + error("the PATH variable includes an invalid path '%s' - removed it.", s); + } + else { + info("the PATH variable includes a valid path '%s'.", s); + if(added) strcat(safe_path, ":"); + strcat(safe_path, s); + added++; + } + } + } + + info("unsafe PATH: '%s'.", path); + info(" safe PATH: '%s'.", safe_path); + + freez(p); + return safe_path; +} +*/ + +// ---------------------------------------------------------------------------- +// main + +void usage(void) { + fprintf(stderr, "%s [ -p PID | --pid PID | --cgroup /path/to/cgroup ]\n", program_name); + exit(1); +} + +int main(int argc, char **argv) { + pid_t pid = 0; + + program_name = argv[0]; + program_version = VERSION; + error_log_syslog = 0; + + // since cgroup-network runs as root, prevent it from opening symbolic links + procfile_open_flags = O_RDONLY|O_NOFOLLOW; + + // ------------------------------------------------------------------------ + // make sure NETDATA_HOST_PREFIX is safe + + netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX"); + if(verify_netdata_host_prefix() == -1) exit(1); + + if(netdata_configured_host_prefix[0] != '\0' && verify_path(netdata_configured_host_prefix) == -1) + fatal("invalid NETDATA_HOST_PREFIX '%s'", netdata_configured_host_prefix); + + // ------------------------------------------------------------------------ + // build a safe environment for our script + + // the first environment variable is a fixed PATH= + snprintfz(environment_variable2, sizeof(environment_variable2) - 1, "NETDATA_HOST_PREFIX=%s", netdata_configured_host_prefix); + + // ------------------------------------------------------------------------ + + if(argc == 2 && (!strcmp(argv[1], "version") || !strcmp(argv[1], "-version") || !strcmp(argv[1], "--version") || !strcmp(argv[1], "-v") || !strcmp(argv[1], "-V"))) { + fprintf(stderr, "cgroup-network %s\n", VERSION); + exit(0); + } + + if(argc != 3) + usage(); + + if(!strcmp(argv[1], "-p") || !strcmp(argv[1], "--pid")) { + pid = atoi(argv[2]); + + if(pid <= 0) { + errno = 0; + error("Invalid pid %d given", (int) pid); + return 2; + } + + call_the_helper(pid, NULL); + } + else if(!strcmp(argv[1], "--cgroup")) { + char *cgroup = argv[2]; + if(verify_path(cgroup) == -1) + fatal("cgroup '%s' does not exist or is not valid.", cgroup); + + pid = read_pid_from_cgroup(cgroup); + call_the_helper(pid, cgroup); + + if(pid <= 0 && !detected_devices) { + errno = 0; + error("Cannot find a cgroup PID from cgroup '%s'", cgroup); + } + } + else + usage(); + + if(pid > 0) + detect_veth_interfaces(pid); + + int found = send_devices(); + if(found <= 0) return 1; + return 0; +} diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.c b/collectors/cgroups.plugin/sys_fs_cgroup.c new file mode 100644 index 000000000..9c0fd7f43 --- /dev/null +++ b/collectors/cgroups.plugin/sys_fs_cgroup.c @@ -0,0 +1,2771 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "sys_fs_cgroup.h" + +#define PLUGIN_CGROUPS_NAME "cgroups.plugin" +#define PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME "systemd" +#define PLUGIN_CGROUPS_MODULE_CGROUPS_NAME "/sys/fs/cgroup" + +// ---------------------------------------------------------------------------- +// cgroup globals + +static long system_page_size = 4096; // system will be queried via sysconf() in configuration() + +static int cgroup_enable_cpuacct_stat = CONFIG_BOOLEAN_AUTO; +static int cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_AUTO; +static int cgroup_enable_memory = CONFIG_BOOLEAN_AUTO; +static int cgroup_enable_detailed_memory = CONFIG_BOOLEAN_AUTO; +static int cgroup_enable_memory_failcnt = CONFIG_BOOLEAN_AUTO; +static int cgroup_enable_swap = CONFIG_BOOLEAN_AUTO; +static int cgroup_enable_blkio_io = CONFIG_BOOLEAN_AUTO; +static int cgroup_enable_blkio_ops = CONFIG_BOOLEAN_AUTO; +static int cgroup_enable_blkio_throttle_io = CONFIG_BOOLEAN_AUTO; +static int cgroup_enable_blkio_throttle_ops = CONFIG_BOOLEAN_AUTO; +static int cgroup_enable_blkio_merged_ops = CONFIG_BOOLEAN_AUTO; +static int cgroup_enable_blkio_queued_ops = CONFIG_BOOLEAN_AUTO; + +static int cgroup_enable_systemd_services = CONFIG_BOOLEAN_YES; +static int cgroup_enable_systemd_services_detailed_memory = CONFIG_BOOLEAN_NO; +static int cgroup_used_memory_without_cache = CONFIG_BOOLEAN_YES; + +static int cgroup_search_in_devices = 1; + +static int cgroup_enable_new_cgroups_detected_at_runtime = 1; +static int cgroup_check_for_new_every = 10; +static int cgroup_update_every = 1; + +static int cgroup_recheck_zero_blkio_every_iterations = 10; +static int cgroup_recheck_zero_mem_failcnt_every_iterations = 10; +static int cgroup_recheck_zero_mem_detailed_every_iterations = 10; + +static char *cgroup_cpuacct_base = NULL; +static char *cgroup_blkio_base = NULL; +static char *cgroup_memory_base = NULL; +static char *cgroup_devices_base = NULL; + +static int cgroup_root_count = 0; +static int cgroup_root_max = 1000; +static int cgroup_max_depth = 0; + +static SIMPLE_PATTERN *enabled_cgroup_patterns = NULL; +static SIMPLE_PATTERN *enabled_cgroup_paths = NULL; +static SIMPLE_PATTERN *enabled_cgroup_renames = NULL; +static SIMPLE_PATTERN *systemd_services_cgroups = NULL; + +static char *cgroups_rename_script = NULL; +static char *cgroups_network_interface_script = NULL; + +static int cgroups_check = 0; + +static uint32_t Read_hash = 0; +static uint32_t Write_hash = 0; +static uint32_t user_hash = 0; +static uint32_t system_hash = 0; + +void read_cgroup_plugin_configuration() { + system_page_size = sysconf(_SC_PAGESIZE); + + Read_hash = simple_hash("Read"); + Write_hash = simple_hash("Write"); + user_hash = simple_hash("user"); + system_hash = simple_hash("system"); + + cgroup_update_every = (int)config_get_number("plugin:cgroups", "update every", localhost->rrd_update_every); + if(cgroup_update_every < localhost->rrd_update_every) + cgroup_update_every = localhost->rrd_update_every; + + cgroup_check_for_new_every = (int)config_get_number("plugin:cgroups", "check for new cgroups every", (long long)cgroup_check_for_new_every * (long long)cgroup_update_every); + if(cgroup_check_for_new_every < cgroup_update_every) + cgroup_check_for_new_every = cgroup_update_every; + + cgroup_enable_cpuacct_stat = config_get_boolean_ondemand("plugin:cgroups", "enable cpuacct stat (total CPU)", cgroup_enable_cpuacct_stat); + cgroup_enable_cpuacct_usage = config_get_boolean_ondemand("plugin:cgroups", "enable cpuacct usage (per core CPU)", cgroup_enable_cpuacct_usage); + + cgroup_enable_memory = config_get_boolean_ondemand("plugin:cgroups", "enable memory (used mem including cache)", cgroup_enable_memory); + cgroup_enable_detailed_memory = config_get_boolean_ondemand("plugin:cgroups", "enable detailed memory", cgroup_enable_detailed_memory); + cgroup_enable_memory_failcnt = config_get_boolean_ondemand("plugin:cgroups", "enable memory limits fail count", cgroup_enable_memory_failcnt); + cgroup_enable_swap = config_get_boolean_ondemand("plugin:cgroups", "enable swap memory", cgroup_enable_swap); + + cgroup_enable_blkio_io = config_get_boolean_ondemand("plugin:cgroups", "enable blkio bandwidth", cgroup_enable_blkio_io); + cgroup_enable_blkio_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio operations", cgroup_enable_blkio_ops); + cgroup_enable_blkio_throttle_io = config_get_boolean_ondemand("plugin:cgroups", "enable blkio throttle bandwidth", cgroup_enable_blkio_throttle_io); + cgroup_enable_blkio_throttle_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio throttle operations", cgroup_enable_blkio_throttle_ops); + cgroup_enable_blkio_queued_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio queued operations", cgroup_enable_blkio_queued_ops); + cgroup_enable_blkio_merged_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio merged operations", cgroup_enable_blkio_merged_ops); + + cgroup_recheck_zero_blkio_every_iterations = (int)config_get_number("plugin:cgroups", "recheck zero blkio every iterations", cgroup_recheck_zero_blkio_every_iterations); + cgroup_recheck_zero_mem_failcnt_every_iterations = (int)config_get_number("plugin:cgroups", "recheck zero memory failcnt every iterations", cgroup_recheck_zero_mem_failcnt_every_iterations); + cgroup_recheck_zero_mem_detailed_every_iterations = (int)config_get_number("plugin:cgroups", "recheck zero detailed memory every iterations", cgroup_recheck_zero_mem_detailed_every_iterations); + + cgroup_enable_systemd_services = config_get_boolean("plugin:cgroups", "enable systemd services", cgroup_enable_systemd_services); + cgroup_enable_systemd_services_detailed_memory = config_get_boolean("plugin:cgroups", "enable systemd services detailed memory", cgroup_enable_systemd_services_detailed_memory); + cgroup_used_memory_without_cache = config_get_boolean("plugin:cgroups", "report used memory without cache", cgroup_used_memory_without_cache); + + char filename[FILENAME_MAX + 1], *s; + struct mountinfo *mi, *root = mountinfo_read(0); + + mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "cpuacct"); + if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "cpuacct"); + if(!mi) { + error("CGROUP: cannot find cpuacct mountinfo. Assuming default: /sys/fs/cgroup/cpuacct"); + s = "/sys/fs/cgroup/cpuacct"; + } + else s = mi->mount_point; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s); + cgroup_cpuacct_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/cpuacct", filename); + + mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "blkio"); + if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "blkio"); + if(!mi) { + error("CGROUP: cannot find blkio mountinfo. Assuming default: /sys/fs/cgroup/blkio"); + s = "/sys/fs/cgroup/blkio"; + } + else s = mi->mount_point; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s); + cgroup_blkio_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/blkio", filename); + + mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "memory"); + if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "memory"); + if(!mi) { + error("CGROUP: cannot find memory mountinfo. Assuming default: /sys/fs/cgroup/memory"); + s = "/sys/fs/cgroup/memory"; + } + else s = mi->mount_point; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s); + cgroup_memory_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/memory", filename); + + mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "devices"); + if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "devices"); + if(!mi) { + error("CGROUP: cannot find devices mountinfo. Assuming default: /sys/fs/cgroup/devices"); + s = "/sys/fs/cgroup/devices"; + } + else s = mi->mount_point; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s); + cgroup_devices_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/devices", filename); + + cgroup_root_max = (int)config_get_number("plugin:cgroups", "max cgroups to allow", cgroup_root_max); + cgroup_max_depth = (int)config_get_number("plugin:cgroups", "max cgroups depth to monitor", cgroup_max_depth); + + cgroup_enable_new_cgroups_detected_at_runtime = config_get_boolean("plugin:cgroups", "enable new cgroups detected at run time", cgroup_enable_new_cgroups_detected_at_runtime); + + enabled_cgroup_patterns = simple_pattern_create( + config_get("plugin:cgroups", "enable by default cgroups matching", + // ---------------------------------------------------------------- + + " !*/init.scope " // ignore init.scope + " !/system.slice/run-*.scope " // ignore system.slice/run-XXXX.scope + " *.scope " // we need all other *.scope for sure + + // ---------------------------------------------------------------- + + " /machine.slice/*.service " // #3367 systemd-nspawn + + // ---------------------------------------------------------------- + + " !*/vcpu* " // libvirtd adds these sub-cgroups + " !*/emulator " // libvirtd adds these sub-cgroups + " !*.mount " + " !*.partition " + " !*.service " + " !*.socket " + " !*.slice " + " !*.swap " + " !*.user " + " !/ " + " !/docker " + " !/libvirt " + " !/lxc " + " !/lxc/*/* " // #1397 #2649 + " !/machine " + " !/qemu " + " !/system " + " !/systemd " + " !/user " + " * " // enable anything else + ), NULL, SIMPLE_PATTERN_EXACT); + + enabled_cgroup_paths = simple_pattern_create( + config_get("plugin:cgroups", "search for cgroups in subpaths matching", + " !*/init.scope " // ignore init.scope + " !*-qemu " // #345 + " !*.libvirt-qemu " // #3010 + " !/init.scope " + " !/system " + " !/systemd " + " !/user " + " !/user.slice " + " !/lxc/*/* " // #2161 #2649 + " * " + ), NULL, SIMPLE_PATTERN_EXACT); + + snprintfz(filename, FILENAME_MAX, "%s/cgroup-name.sh", netdata_configured_plugins_dir); + cgroups_rename_script = config_get("plugin:cgroups", "script to get cgroup names", filename); + + snprintfz(filename, FILENAME_MAX, "%s/cgroup-network", netdata_configured_plugins_dir); + cgroups_network_interface_script = config_get("plugin:cgroups", "script to get cgroup network interfaces", filename); + + enabled_cgroup_renames = simple_pattern_create( + config_get("plugin:cgroups", "run script to rename cgroups matching", + " !/ " + " !*.mount " + " !*.socket " + " !*.partition " + " /machine.slice/*.service " // #3367 systemd-nspawn + " !*.service " + " !*.slice " + " !*.swap " + " !*.user " + " !init.scope " + " !*.scope/vcpu* " // libvirtd adds these sub-cgroups + " !*.scope/emulator " // libvirtd adds these sub-cgroups + " *.scope " + " *docker* " + " *lxc* " + " *qemu* " + " *kubepods* " // #3396 kubernetes + " *.libvirt-qemu " // #3010 + " * " + ), NULL, SIMPLE_PATTERN_EXACT); + + if(cgroup_enable_systemd_services) { + systemd_services_cgroups = simple_pattern_create( + config_get("plugin:cgroups", "cgroups to match as systemd services", + " !/system.slice/*/*.service " + " /system.slice/*.service " + ), NULL, SIMPLE_PATTERN_EXACT); + } + + mountinfo_free_all(root); +} + +// ---------------------------------------------------------------------------- +// cgroup objects + +struct blkio { + int updated; + int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO + int delay_counter; + + char *filename; + + unsigned long long Read; + unsigned long long Write; +/* + unsigned long long Sync; + unsigned long long Async; + unsigned long long Total; +*/ +}; + +// https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt +struct memory { + ARL_BASE *arl_base; + ARL_ENTRY *arl_dirty; + ARL_ENTRY *arl_swap; + + int updated_detailed; + int updated_usage_in_bytes; + int updated_msw_usage_in_bytes; + int updated_failcnt; + + int enabled_detailed; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO + int enabled_usage_in_bytes; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO + int enabled_msw_usage_in_bytes; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO + int enabled_failcnt; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO + + int delay_counter_detailed; + int delay_counter_failcnt; + + char *filename_detailed; + char *filename_usage_in_bytes; + char *filename_msw_usage_in_bytes; + char *filename_failcnt; + + int detailed_has_dirty; + int detailed_has_swap; + + // detailed metrics + unsigned long long cache; + unsigned long long rss; + unsigned long long rss_huge; + unsigned long long mapped_file; + unsigned long long writeback; + unsigned long long dirty; + unsigned long long swap; + unsigned long long pgpgin; + unsigned long long pgpgout; + unsigned long long pgfault; + unsigned long long pgmajfault; +/* + unsigned long long inactive_anon; + unsigned long long active_anon; + unsigned long long inactive_file; + unsigned long long active_file; + unsigned long long unevictable; + unsigned long long hierarchical_memory_limit; + unsigned long long total_cache; + unsigned long long total_rss; + unsigned long long total_rss_huge; + unsigned long long total_mapped_file; + unsigned long long total_writeback; + unsigned long long total_dirty; + unsigned long long total_swap; + unsigned long long total_pgpgin; + unsigned long long total_pgpgout; + unsigned long long total_pgfault; + unsigned long long total_pgmajfault; + unsigned long long total_inactive_anon; + unsigned long long total_active_anon; + unsigned long long total_inactive_file; + unsigned long long total_active_file; + unsigned long long total_unevictable; +*/ + + // single file metrics + unsigned long long usage_in_bytes; + unsigned long long msw_usage_in_bytes; + unsigned long long failcnt; +}; + +// https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt +struct cpuacct_stat { + int updated; + int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO + + char *filename; + + unsigned long long user; + unsigned long long system; +}; + +// https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt +struct cpuacct_usage { + int updated; + int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO + + char *filename; + + unsigned int cpus; + unsigned long long *cpu_percpu; +}; + +struct cgroup_network_interface { + const char *host_device; + const char *container_device; + struct cgroup_network_interface *next; +}; + +#define CGROUP_OPTIONS_DISABLED_DUPLICATE 0x00000001 +#define CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE 0x00000002 + +struct cgroup { + uint32_t options; + + char available; // found in the filesystem + char enabled; // enabled in the config + + char *id; + uint32_t hash; + + char *chart_id; + uint32_t hash_chart; + + char *chart_title; + + struct cpuacct_stat cpuacct_stat; + struct cpuacct_usage cpuacct_usage; + + struct memory memory; + + struct blkio io_service_bytes; // bytes + struct blkio io_serviced; // operations + + struct blkio throttle_io_service_bytes; // bytes + struct blkio throttle_io_serviced; // operations + + struct blkio io_merged; // operations + struct blkio io_queued; // operations + + struct cgroup_network_interface *interfaces; + + // per cgroup charts + RRDSET *st_cpu; + RRDSET *st_cpu_per_core; + RRDSET *st_mem; + RRDSET *st_writeback; + RRDSET *st_mem_activity; + RRDSET *st_pgfaults; + RRDSET *st_mem_usage; + RRDSET *st_mem_failcnt; + RRDSET *st_io; + RRDSET *st_serviced_ops; + RRDSET *st_throttle_io; + RRDSET *st_throttle_serviced_ops; + RRDSET *st_queued_ops; + RRDSET *st_merged_ops; + + // services + RRDDIM *rd_cpu; + RRDDIM *rd_mem_usage; + RRDDIM *rd_mem_failcnt; + RRDDIM *rd_swap_usage; + + RRDDIM *rd_mem_detailed_cache; + RRDDIM *rd_mem_detailed_rss; + RRDDIM *rd_mem_detailed_mapped; + RRDDIM *rd_mem_detailed_writeback; + RRDDIM *rd_mem_detailed_pgpgin; + RRDDIM *rd_mem_detailed_pgpgout; + RRDDIM *rd_mem_detailed_pgfault; + RRDDIM *rd_mem_detailed_pgmajfault; + + RRDDIM *rd_io_service_bytes_read; + RRDDIM *rd_io_serviced_read; + RRDDIM *rd_throttle_io_read; + RRDDIM *rd_throttle_io_serviced_read; + RRDDIM *rd_io_queued_read; + RRDDIM *rd_io_merged_read; + + RRDDIM *rd_io_service_bytes_write; + RRDDIM *rd_io_serviced_write; + RRDDIM *rd_throttle_io_write; + RRDDIM *rd_throttle_io_serviced_write; + RRDDIM *rd_io_queued_write; + RRDDIM *rd_io_merged_write; + + struct cgroup *next; + +} *cgroup_root = NULL; + +// ---------------------------------------------------------------------------- +// read values from /sys + +static inline void cgroup_read_cpuacct_stat(struct cpuacct_stat *cp) { + static procfile *ff = NULL; + + if(likely(cp->filename)) { + ff = procfile_reopen(ff, cp->filename, NULL, PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) { + cp->updated = 0; + cgroups_check = 1; + return; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) { + cp->updated = 0; + cgroups_check = 1; + return; + } + + unsigned long i, lines = procfile_lines(ff); + + if(unlikely(lines < 1)) { + error("CGROUP: file '%s' should have 1+ lines.", cp->filename); + cp->updated = 0; + return; + } + + for(i = 0; i < lines ; i++) { + char *s = procfile_lineword(ff, i, 0); + uint32_t hash = simple_hash(s); + + if(unlikely(hash == user_hash && !strcmp(s, "user"))) + cp->user = str2ull(procfile_lineword(ff, i, 1)); + + else if(unlikely(hash == system_hash && !strcmp(s, "system"))) + cp->system = str2ull(procfile_lineword(ff, i, 1)); + } + + cp->updated = 1; + + if(unlikely(cp->enabled == CONFIG_BOOLEAN_AUTO && (cp->user || cp->system))) + cp->enabled = CONFIG_BOOLEAN_YES; + } +} + +static inline void cgroup_read_cpuacct_usage(struct cpuacct_usage *ca) { + static procfile *ff = NULL; + + if(likely(ca->filename)) { + ff = procfile_reopen(ff, ca->filename, NULL, PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) { + ca->updated = 0; + cgroups_check = 1; + return; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) { + ca->updated = 0; + cgroups_check = 1; + return; + } + + if(unlikely(procfile_lines(ff) < 1)) { + error("CGROUP: file '%s' should have 1+ lines but has %zu.", ca->filename, procfile_lines(ff)); + ca->updated = 0; + return; + } + + unsigned long i = procfile_linewords(ff, 0); + if(unlikely(i == 0)) { + ca->updated = 0; + return; + } + + // we may have 1 more CPU reported + while(i > 0) { + char *s = procfile_lineword(ff, 0, i - 1); + if(!*s) i--; + else break; + } + + if(unlikely(i != ca->cpus)) { + freez(ca->cpu_percpu); + ca->cpu_percpu = mallocz(sizeof(unsigned long long) * i); + ca->cpus = (unsigned int)i; + } + + unsigned long long total = 0; + for(i = 0; i < ca->cpus ;i++) { + unsigned long long n = str2ull(procfile_lineword(ff, 0, i)); + ca->cpu_percpu[i] = n; + total += n; + } + + ca->updated = 1; + + if(unlikely(ca->enabled == CONFIG_BOOLEAN_AUTO && total)) + ca->enabled = CONFIG_BOOLEAN_YES; + } +} + +static inline void cgroup_read_blkio(struct blkio *io) { + if(unlikely(io->enabled == CONFIG_BOOLEAN_AUTO && io->delay_counter > 0)) { + io->delay_counter--; + return; + } + + if(likely(io->filename)) { + static procfile *ff = NULL; + + ff = procfile_reopen(ff, io->filename, NULL, PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) { + io->updated = 0; + cgroups_check = 1; + return; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) { + io->updated = 0; + cgroups_check = 1; + return; + } + + unsigned long i, lines = procfile_lines(ff); + + if(unlikely(lines < 1)) { + error("CGROUP: file '%s' should have 1+ lines.", io->filename); + io->updated = 0; + return; + } + + io->Read = 0; + io->Write = 0; +/* + io->Sync = 0; + io->Async = 0; + io->Total = 0; +*/ + + for(i = 0; i < lines ; i++) { + char *s = procfile_lineword(ff, i, 1); + uint32_t hash = simple_hash(s); + + if(unlikely(hash == Read_hash && !strcmp(s, "Read"))) + io->Read += str2ull(procfile_lineword(ff, i, 2)); + + else if(unlikely(hash == Write_hash && !strcmp(s, "Write"))) + io->Write += str2ull(procfile_lineword(ff, i, 2)); + +/* + else if(unlikely(hash == Sync_hash && !strcmp(s, "Sync"))) + io->Sync += str2ull(procfile_lineword(ff, i, 2)); + + else if(unlikely(hash == Async_hash && !strcmp(s, "Async"))) + io->Async += str2ull(procfile_lineword(ff, i, 2)); + + else if(unlikely(hash == Total_hash && !strcmp(s, "Total"))) + io->Total += str2ull(procfile_lineword(ff, i, 2)); +*/ + } + + io->updated = 1; + + if(unlikely(io->enabled == CONFIG_BOOLEAN_AUTO)) { + if(unlikely(io->Read || io->Write)) + io->enabled = CONFIG_BOOLEAN_YES; + else + io->delay_counter = cgroup_recheck_zero_blkio_every_iterations; + } + } +} + +static inline void cgroup_read_memory(struct memory *mem) { + static procfile *ff = NULL; + + // read detailed ram usage + if(likely(mem->filename_detailed)) { + if(unlikely(mem->enabled_detailed == CONFIG_BOOLEAN_AUTO && mem->delay_counter_detailed > 0)) { + mem->delay_counter_detailed--; + goto memory_next; + } + + ff = procfile_reopen(ff, mem->filename_detailed, NULL, PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) { + mem->updated_detailed = 0; + cgroups_check = 1; + goto memory_next; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) { + mem->updated_detailed = 0; + cgroups_check = 1; + goto memory_next; + } + + unsigned long i, lines = procfile_lines(ff); + + if(unlikely(lines < 1)) { + error("CGROUP: file '%s' should have 1+ lines.", mem->filename_detailed); + mem->updated_detailed = 0; + goto memory_next; + } + + if(unlikely(!mem->arl_base)) { + mem->arl_base = arl_create("cgroup/memory", NULL, 60); + + arl_expect(mem->arl_base, "cache", &mem->cache); + arl_expect(mem->arl_base, "rss", &mem->rss); + arl_expect(mem->arl_base, "rss_huge", &mem->rss_huge); + arl_expect(mem->arl_base, "mapped_file", &mem->mapped_file); + arl_expect(mem->arl_base, "writeback", &mem->writeback); + mem->arl_dirty = arl_expect(mem->arl_base, "dirty", &mem->dirty); + mem->arl_swap = arl_expect(mem->arl_base, "swap", &mem->swap); + arl_expect(mem->arl_base, "pgpgin", &mem->pgpgin); + arl_expect(mem->arl_base, "pgpgout", &mem->pgpgout); + arl_expect(mem->arl_base, "pgfault", &mem->pgfault); + arl_expect(mem->arl_base, "pgmajfault", &mem->pgmajfault); + } + + arl_begin(mem->arl_base); + + for(i = 0; i < lines ; i++) { + if(arl_check(mem->arl_base, + procfile_lineword(ff, i, 0), + procfile_lineword(ff, i, 1))) break; + } + + if(unlikely(mem->arl_dirty->flags & ARL_ENTRY_FLAG_FOUND)) + mem->detailed_has_dirty = 1; + + if(unlikely(mem->arl_swap->flags & ARL_ENTRY_FLAG_FOUND)) + mem->detailed_has_swap = 1; + + // fprintf(stderr, "READ: '%s', cache: %llu, rss: %llu, rss_huge: %llu, mapped_file: %llu, writeback: %llu, dirty: %llu, swap: %llu, pgpgin: %llu, pgpgout: %llu, pgfault: %llu, pgmajfault: %llu, inactive_anon: %llu, active_anon: %llu, inactive_file: %llu, active_file: %llu, unevictable: %llu, hierarchical_memory_limit: %llu, total_cache: %llu, total_rss: %llu, total_rss_huge: %llu, total_mapped_file: %llu, total_writeback: %llu, total_dirty: %llu, total_swap: %llu, total_pgpgin: %llu, total_pgpgout: %llu, total_pgfault: %llu, total_pgmajfault: %llu, total_inactive_anon: %llu, total_active_anon: %llu, total_inactive_file: %llu, total_active_file: %llu, total_unevictable: %llu\n", mem->filename, mem->cache, mem->rss, mem->rss_huge, mem->mapped_file, mem->writeback, mem->dirty, mem->swap, mem->pgpgin, mem->pgpgout, mem->pgfault, mem->pgmajfault, mem->inactive_anon, mem->active_anon, mem->inactive_file, mem->active_file, mem->unevictable, mem->hierarchical_memory_limit, mem->total_cache, mem->total_rss, mem->total_rss_huge, mem->total_mapped_file, mem->total_writeback, mem->total_dirty, mem->total_swap, mem->total_pgpgin, mem->total_pgpgout, mem->total_pgfault, mem->total_pgmajfault, mem->total_inactive_anon, mem->total_active_anon, mem->total_inactive_file, mem->total_active_file, mem->total_unevictable); + + mem->updated_detailed = 1; + + if(unlikely(mem->enabled_detailed == CONFIG_BOOLEAN_AUTO)) { + if(mem->cache || mem->dirty || mem->rss || mem->rss_huge || mem->mapped_file || mem->writeback || mem->swap || mem->pgpgin || mem->pgpgout || mem->pgfault || mem->pgmajfault) + mem->enabled_detailed = CONFIG_BOOLEAN_YES; + else + mem->delay_counter_detailed = cgroup_recheck_zero_mem_detailed_every_iterations; + } + } + +memory_next: + + // read usage_in_bytes + if(likely(mem->filename_usage_in_bytes)) { + mem->updated_usage_in_bytes = !read_single_number_file(mem->filename_usage_in_bytes, &mem->usage_in_bytes); + if(unlikely(mem->updated_usage_in_bytes && mem->enabled_usage_in_bytes == CONFIG_BOOLEAN_AUTO && mem->usage_in_bytes)) + mem->enabled_usage_in_bytes = CONFIG_BOOLEAN_YES; + } + + // read msw_usage_in_bytes + if(likely(mem->filename_msw_usage_in_bytes)) { + mem->updated_msw_usage_in_bytes = !read_single_number_file(mem->filename_msw_usage_in_bytes, &mem->msw_usage_in_bytes); + if(unlikely(mem->updated_msw_usage_in_bytes && mem->enabled_msw_usage_in_bytes == CONFIG_BOOLEAN_AUTO && mem->msw_usage_in_bytes)) + mem->enabled_msw_usage_in_bytes = CONFIG_BOOLEAN_YES; + } + + // read failcnt + if(likely(mem->filename_failcnt)) { + if(unlikely(mem->enabled_failcnt == CONFIG_BOOLEAN_AUTO && mem->delay_counter_failcnt > 0)) { + mem->updated_failcnt = 0; + mem->delay_counter_failcnt--; + } + else { + mem->updated_failcnt = !read_single_number_file(mem->filename_failcnt, &mem->failcnt); + if(unlikely(mem->updated_failcnt && mem->enabled_failcnt == CONFIG_BOOLEAN_AUTO)) { + if(unlikely(!mem->failcnt)) + mem->delay_counter_failcnt = cgroup_recheck_zero_mem_failcnt_every_iterations; + else + mem->enabled_failcnt = CONFIG_BOOLEAN_YES; + } + } + } +} + +static inline void cgroup_read(struct cgroup *cg) { + debug(D_CGROUP, "reading metrics for cgroups '%s'", cg->id); + + cgroup_read_cpuacct_stat(&cg->cpuacct_stat); + cgroup_read_cpuacct_usage(&cg->cpuacct_usage); + cgroup_read_memory(&cg->memory); + cgroup_read_blkio(&cg->io_service_bytes); + cgroup_read_blkio(&cg->io_serviced); + cgroup_read_blkio(&cg->throttle_io_service_bytes); + cgroup_read_blkio(&cg->throttle_io_serviced); + cgroup_read_blkio(&cg->io_merged); + cgroup_read_blkio(&cg->io_queued); +} + +static inline void read_all_cgroups(struct cgroup *root) { + debug(D_CGROUP, "reading metrics for all cgroups"); + + struct cgroup *cg; + + for(cg = root; cg ; cg = cg->next) + if(cg->enabled && cg->available) + cgroup_read(cg); +} + +// ---------------------------------------------------------------------------- +// cgroup network interfaces + +#define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048 +static inline void read_cgroup_network_interfaces(struct cgroup *cg) { + debug(D_CGROUP, "looking for the network interfaces of cgroup '%s' with chart id '%s' and title '%s'", cg->id, cg->chart_id, cg->chart_title); + + pid_t cgroup_pid; + char command[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1]; + + snprintfz(command, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec %s --cgroup '%s%s'", cgroups_network_interface_script, cgroup_cpuacct_base, cg->id); + + debug(D_CGROUP, "executing command '%s' for cgroup '%s'", command, cg->id); + FILE *fp = mypopen(command, &cgroup_pid); + if(!fp) { + error("CGROUP: cannot popen(\"%s\", \"r\").", command); + return; + } + + char *s; + char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1]; + while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, fp))) { + trim(s); + + if(*s && *s != '\n') { + char *t = s; + while(*t && *t != ' ') t++; + if(*t == ' ') { + *t = '\0'; + t++; + } + + if(!*s) { + error("CGROUP: empty host interface returned by script"); + continue; + } + + if(!*t) { + error("CGROUP: empty guest interface returned by script"); + continue; + } + + struct cgroup_network_interface *i = callocz(1, sizeof(struct cgroup_network_interface)); + i->host_device = strdupz(s); + i->container_device = strdupz(t); + i->next = cg->interfaces; + cg->interfaces = i; + + info("CGROUP: cgroup '%s' has network interface '%s' as '%s'", cg->id, i->host_device, i->container_device); + + // register a device rename to proc_net_dev.c + netdev_rename_device_add(i->host_device, i->container_device, cg->chart_id); + } + } + + mypclose(fp, cgroup_pid); + // debug(D_CGROUP, "closed command for cgroup '%s'", cg->id); +} + +static inline void free_cgroup_network_interfaces(struct cgroup *cg) { + while(cg->interfaces) { + struct cgroup_network_interface *i = cg->interfaces; + cg->interfaces = i->next; + + // delete the registration of proc_net_dev rename + netdev_rename_device_del(i->host_device); + + freez((void *)i->host_device); + freez((void *)i->container_device); + freez((void *)i); + } +} + +// ---------------------------------------------------------------------------- +// add/remove/find cgroup objects + +#define CGROUP_CHARTID_LINE_MAX 1024 + +static inline char *cgroup_title_strdupz(const char *s) { + if(!s || !*s) s = "/"; + + if(*s == '/' && s[1] != '\0') s++; + + char *r = strdupz(s); + netdata_fix_chart_name(r); + + return r; +} + +static inline char *cgroup_chart_id_strdupz(const char *s) { + if(!s || !*s) s = "/"; + + if(*s == '/' && s[1] != '\0') s++; + + char *r = strdupz(s); + netdata_fix_chart_id(r); + + return r; +} + +static inline void cgroup_get_chart_name(struct cgroup *cg) { + debug(D_CGROUP, "looking for the name of cgroup '%s' with chart id '%s' and title '%s'", cg->id, cg->chart_id, cg->chart_title); + + pid_t cgroup_pid; + char command[CGROUP_CHARTID_LINE_MAX + 1]; + + snprintfz(command, CGROUP_CHARTID_LINE_MAX, "exec %s '%s' '%s'", cgroups_rename_script, cg->chart_id, cg->id); + + debug(D_CGROUP, "executing command \"%s\" for cgroup '%s'", command, cg->id); + FILE *fp = mypopen(command, &cgroup_pid); + if(fp) { + // debug(D_CGROUP, "reading from command '%s' for cgroup '%s'", command, cg->id); + char buffer[CGROUP_CHARTID_LINE_MAX + 1]; + char *s = fgets(buffer, CGROUP_CHARTID_LINE_MAX, fp); + // debug(D_CGROUP, "closing command for cgroup '%s'", cg->id); + mypclose(fp, cgroup_pid); + // debug(D_CGROUP, "closed command for cgroup '%s'", cg->id); + + if(s && *s && *s != '\n') { + debug(D_CGROUP, "cgroup '%s' should be renamed to '%s'", cg->id, s); + + trim(s); + + freez(cg->chart_title); + cg->chart_title = cgroup_title_strdupz(s); + + freez(cg->chart_id); + cg->chart_id = cgroup_chart_id_strdupz(s); + cg->hash_chart = simple_hash(cg->chart_id); + } + } + else + error("CGROUP: cannot popen(\"%s\", \"r\").", command); +} + +static inline struct cgroup *cgroup_add(const char *id) { + if(!id || !*id) id = "/"; + debug(D_CGROUP, "adding to list, cgroup with id '%s'", id); + + if(cgroup_root_count >= cgroup_root_max) { + info("CGROUP: maximum number of cgroups reached (%d). Not adding cgroup '%s'", cgroup_root_count, id); + return NULL; + } + + int def = simple_pattern_matches(enabled_cgroup_patterns, id)?cgroup_enable_new_cgroups_detected_at_runtime:0; + struct cgroup *cg = callocz(1, sizeof(struct cgroup)); + + cg->id = strdupz(id); + cg->hash = simple_hash(cg->id); + + cg->chart_title = cgroup_title_strdupz(id); + + cg->chart_id = cgroup_chart_id_strdupz(id); + cg->hash_chart = simple_hash(cg->chart_id); + + if(!cgroup_root) + cgroup_root = cg; + else { + // append it + struct cgroup *e; + for(e = cgroup_root; e->next ;e = e->next) ; + e->next = cg; + } + + cgroup_root_count++; + + // fix the chart_id and title by calling the external script + if(simple_pattern_matches(enabled_cgroup_renames, cg->id)) { + + cgroup_get_chart_name(cg); + + debug(D_CGROUP, "cgroup '%s' renamed to '%s' (title: '%s')", cg->id, cg->chart_id, cg->chart_title); + } + else + debug(D_CGROUP, "cgroup '%s' will not be renamed - it matches the list of disabled cgroup renames (will be shown as '%s')", cg->id, cg->chart_id); + + int user_configurable = 1; + + // check if this cgroup should be a systemd service + if(cgroup_enable_systemd_services) { + if(simple_pattern_matches(systemd_services_cgroups, cg->id) || + simple_pattern_matches(systemd_services_cgroups, cg->chart_id)) { + debug(D_CGROUP, "cgroup '%s' with chart id '%s' (title: '%s') matches systemd services cgroups", cg->id, cg->chart_id, cg->chart_title); + + char buffer[CGROUP_CHARTID_LINE_MAX + 1]; + cg->options |= CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE; + + strncpy(buffer, cg->id, CGROUP_CHARTID_LINE_MAX); + char *s = buffer; + + //freez(cg->chart_id); + //cg->chart_id = cgroup_chart_id_strdupz(s); + //cg->hash_chart = simple_hash(cg->chart_id); + + // skip to the last slash + size_t len = strlen(s); + while(len--) if(unlikely(s[len] == '/')) break; + if(len) s = &s[len + 1]; + + // remove extension + len = strlen(s); + while(len--) if(unlikely(s[len] == '.')) break; + if(len) s[len] = '\0'; + + freez(cg->chart_title); + cg->chart_title = cgroup_title_strdupz(s); + + cg->enabled = 1; + user_configurable = 0; + + debug(D_CGROUP, "cgroup '%s' renamed to '%s' (title: '%s')", cg->id, cg->chart_id, cg->chart_title); + } + else + debug(D_CGROUP, "cgroup '%s' with chart id '%s' (title: '%s') does not match systemd services groups", cg->id, cg->chart_id, cg->chart_title); + } + + if(user_configurable) { + // allow the user to enable/disable this individualy + char option[FILENAME_MAX + 1]; + snprintfz(option, FILENAME_MAX, "enable cgroup %s", cg->chart_title); + cg->enabled = (char) config_get_boolean("plugin:cgroups", option, def); + } + + // detect duplicate cgroups + if(cg->enabled) { + struct cgroup *t; + for (t = cgroup_root; t; t = t->next) { + if (t != cg && t->enabled && t->hash_chart == cg->hash_chart && !strcmp(t->chart_id, cg->chart_id)) { + if (!strncmp(t->chart_id, "/system.slice/", 14) && !strncmp(cg->chart_id, "/init.scope/system.slice/", 25)) { + error("CGROUP: chart id '%s' already exists with id '%s' and is enabled. Swapping them by enabling cgroup with id '%s' and disabling cgroup with id '%s'.", + cg->chart_id, t->id, cg->id, t->id); + debug(D_CGROUP, "Control group with chart id '%s' already exists with id '%s' and is enabled. Swapping them by enabling cgroup with id '%s' and disabling cgroup with id '%s'.", + cg->chart_id, t->id, cg->id, t->id); + t->enabled = 0; + t->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE; + } + else { + error("CGROUP: chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.", + cg->chart_id, t->id, cg->id); + debug(D_CGROUP, "Control group with chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.", + cg->chart_id, t->id, cg->id); + cg->enabled = 0; + cg->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE; + } + + break; + } + } + } + + if(cg->enabled && !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE)) + read_cgroup_network_interfaces(cg); + + debug(D_CGROUP, "ADDED CGROUP: '%s' with chart id '%s' and title '%s' as %s (default was %s)", cg->id, cg->chart_id, cg->chart_title, (cg->enabled)?"enabled":"disabled", (def)?"enabled":"disabled"); + + return cg; +} + +static inline void cgroup_free(struct cgroup *cg) { + debug(D_CGROUP, "Removing cgroup '%s' with chart id '%s' (was %s and %s)", cg->id, cg->chart_id, (cg->enabled)?"enabled":"disabled", (cg->available)?"available":"not available"); + + if(cg->st_cpu) rrdset_is_obsolete(cg->st_cpu); + if(cg->st_cpu_per_core) rrdset_is_obsolete(cg->st_cpu_per_core); + if(cg->st_mem) rrdset_is_obsolete(cg->st_mem); + if(cg->st_writeback) rrdset_is_obsolete(cg->st_writeback); + if(cg->st_mem_activity) rrdset_is_obsolete(cg->st_mem_activity); + if(cg->st_pgfaults) rrdset_is_obsolete(cg->st_pgfaults); + if(cg->st_mem_usage) rrdset_is_obsolete(cg->st_mem_usage); + if(cg->st_mem_failcnt) rrdset_is_obsolete(cg->st_mem_failcnt); + if(cg->st_io) rrdset_is_obsolete(cg->st_io); + if(cg->st_serviced_ops) rrdset_is_obsolete(cg->st_serviced_ops); + if(cg->st_throttle_io) rrdset_is_obsolete(cg->st_throttle_io); + if(cg->st_throttle_serviced_ops) rrdset_is_obsolete(cg->st_throttle_serviced_ops); + if(cg->st_queued_ops) rrdset_is_obsolete(cg->st_queued_ops); + if(cg->st_merged_ops) rrdset_is_obsolete(cg->st_merged_ops); + + free_cgroup_network_interfaces(cg); + + freez(cg->cpuacct_usage.cpu_percpu); + + freez(cg->cpuacct_stat.filename); + freez(cg->cpuacct_usage.filename); + + arl_free(cg->memory.arl_base); + freez(cg->memory.filename_detailed); + freez(cg->memory.filename_failcnt); + freez(cg->memory.filename_usage_in_bytes); + freez(cg->memory.filename_msw_usage_in_bytes); + + freez(cg->io_service_bytes.filename); + freez(cg->io_serviced.filename); + + freez(cg->throttle_io_service_bytes.filename); + freez(cg->throttle_io_serviced.filename); + + freez(cg->io_merged.filename); + freez(cg->io_queued.filename); + + freez(cg->id); + freez(cg->chart_id); + freez(cg->chart_title); + + freez(cg); + + cgroup_root_count--; +} + +// find if a given cgroup exists +static inline struct cgroup *cgroup_find(const char *id) { + debug(D_CGROUP, "searching for cgroup '%s'", id); + + uint32_t hash = simple_hash(id); + + struct cgroup *cg; + for(cg = cgroup_root; cg ; cg = cg->next) { + if(hash == cg->hash && strcmp(id, cg->id) == 0) + break; + } + + debug(D_CGROUP, "cgroup '%s' %s in memory", id, (cg)?"found":"not found"); + return cg; +} + +// ---------------------------------------------------------------------------- +// detect running cgroups + +// callback for find_file_in_subdirs() +static inline void found_subdir_in_dir(const char *dir) { + debug(D_CGROUP, "examining cgroup dir '%s'", dir); + + struct cgroup *cg = cgroup_find(dir); + if(!cg) { + if(*dir && cgroup_max_depth > 0) { + int depth = 0; + const char *s; + + for(s = dir; *s ;s++) + if(unlikely(*s == '/')) + depth++; + + if(depth > cgroup_max_depth) { + info("CGROUP: '%s' is too deep (%d, while max is %d)", dir, depth, cgroup_max_depth); + return; + } + } + // debug(D_CGROUP, "will add dir '%s' as cgroup", dir); + cg = cgroup_add(dir); + } + + if(cg) cg->available = 1; +} + +static inline int find_dir_in_subdirs(const char *base, const char *this, void (*callback)(const char *)) { + if(!this) this = base; + debug(D_CGROUP, "searching for directories in '%s' (base '%s')", this?this:"", base); + + size_t dirlen = strlen(this), baselen = strlen(base); + + int ret = -1; + int enabled = -1; + + const char *relative_path = &this[baselen]; + if(!*relative_path) relative_path = "/"; + + DIR *dir = opendir(this); + if(!dir) { + error("CGROUP: cannot read directory '%s'", base); + return ret; + } + ret = 1; + + callback(relative_path); + + struct dirent *de = NULL; + while((de = readdir(dir))) { + if(de->d_type == DT_DIR + && ( + (de->d_name[0] == '.' && de->d_name[1] == '\0') + || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') + )) + continue; + + if(de->d_type == DT_DIR) { + if(enabled == -1) { + const char *r = relative_path; + if(*r == '\0') r = "/"; + + // do not decent in directories we are not interested + int def = simple_pattern_matches(enabled_cgroup_paths, r); + + // we check for this option here + // so that the config will not have settings + // for leaf directories + char option[FILENAME_MAX + 1]; + snprintfz(option, FILENAME_MAX, "search for cgroups under %s", r); + option[FILENAME_MAX] = '\0'; + enabled = config_get_boolean("plugin:cgroups", option, def); + } + + if(enabled) { + char *s = mallocz(dirlen + strlen(de->d_name) + 2); + strcpy(s, this); + strcat(s, "/"); + strcat(s, de->d_name); + int ret2 = find_dir_in_subdirs(base, s, callback); + if(ret2 > 0) ret += ret2; + freez(s); + } + } + } + + closedir(dir); + return ret; +} + +static inline void mark_all_cgroups_as_not_available() { + debug(D_CGROUP, "marking all cgroups as not available"); + + struct cgroup *cg; + + // mark all as not available + for(cg = cgroup_root; cg ; cg = cg->next) { + cg->available = 0; + } +} + +static inline void cleanup_all_cgroups() { + struct cgroup *cg = cgroup_root, *last = NULL; + + for(; cg ;) { + if(!cg->available) { + // enable the first duplicate cgroup + { + struct cgroup *t; + for(t = cgroup_root; t ; t = t->next) { + if(t != cg && t->available && !t->enabled && t->options & CGROUP_OPTIONS_DISABLED_DUPLICATE && t->hash_chart == cg->hash_chart && !strcmp(t->chart_id, cg->chart_id)) { + debug(D_CGROUP, "Enabling duplicate of cgroup '%s' with id '%s', because the original with id '%s' stopped.", t->chart_id, t->id, cg->id); + t->enabled = 1; + t->options &= ~CGROUP_OPTIONS_DISABLED_DUPLICATE; + break; + } + } + } + + if(!last) + cgroup_root = cg->next; + else + last->next = cg->next; + + cgroup_free(cg); + + if(!last) + cg = cgroup_root; + else + cg = last->next; + } + else { + last = cg; + cg = cg->next; + } + } +} + +static inline void find_all_cgroups() { + debug(D_CGROUP, "searching for cgroups"); + + mark_all_cgroups_as_not_available(); + + if(cgroup_enable_cpuacct_stat || cgroup_enable_cpuacct_usage) { + if(find_dir_in_subdirs(cgroup_cpuacct_base, NULL, found_subdir_in_dir) == -1) { + cgroup_enable_cpuacct_stat = + cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_NO; + error("CGROUP: disabled cpu statistics."); + } + } + + if(cgroup_enable_blkio_io || cgroup_enable_blkio_ops || cgroup_enable_blkio_throttle_io || cgroup_enable_blkio_throttle_ops || cgroup_enable_blkio_merged_ops || cgroup_enable_blkio_queued_ops) { + if(find_dir_in_subdirs(cgroup_blkio_base, NULL, found_subdir_in_dir) == -1) { + cgroup_enable_blkio_io = + cgroup_enable_blkio_ops = + cgroup_enable_blkio_throttle_io = + cgroup_enable_blkio_throttle_ops = + cgroup_enable_blkio_merged_ops = + cgroup_enable_blkio_queued_ops = CONFIG_BOOLEAN_NO; + error("CGROUP: disabled blkio statistics."); + } + } + + if(cgroup_enable_memory || cgroup_enable_detailed_memory || cgroup_enable_swap || cgroup_enable_memory_failcnt) { + if(find_dir_in_subdirs(cgroup_memory_base, NULL, found_subdir_in_dir) == -1) { + cgroup_enable_memory = + cgroup_enable_detailed_memory = + cgroup_enable_swap = + cgroup_enable_memory_failcnt = CONFIG_BOOLEAN_NO; + error("CGROUP: disabled memory statistics."); + } + } + + if(cgroup_search_in_devices) { + if(find_dir_in_subdirs(cgroup_devices_base, NULL, found_subdir_in_dir) == -1) { + cgroup_search_in_devices = 0; + error("CGROUP: disabled devices statistics."); + } + } + + // remove any non-existing cgroups + cleanup_all_cgroups(); + + struct cgroup *cg; + struct stat buf; + for(cg = cgroup_root; cg ; cg = cg->next) { + // fprintf(stderr, " >>> CGROUP '%s' (%u - %s) with name '%s'\n", cg->id, cg->hash, cg->available?"available":"stopped", cg->name); + + if(unlikely(!cg->available)) + continue; + + debug(D_CGROUP, "checking paths for cgroup '%s'", cg->id); + + // check for newly added cgroups + // and update the filenames they read + char filename[FILENAME_MAX + 1]; + if(unlikely(cgroup_enable_cpuacct_stat && !cg->cpuacct_stat.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.stat", cgroup_cpuacct_base, cg->id); + if(likely(stat(filename, &buf) != -1)) { + cg->cpuacct_stat.filename = strdupz(filename); + cg->cpuacct_stat.enabled = cgroup_enable_cpuacct_stat; + debug(D_CGROUP, "cpuacct.stat filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_stat.filename); + } + else + debug(D_CGROUP, "cpuacct.stat file for cgroup '%s': '%s' does not exist.", cg->id, filename); + } + + if(unlikely(cgroup_enable_cpuacct_usage && !cg->cpuacct_usage.filename && !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE))) { + snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.usage_percpu", cgroup_cpuacct_base, cg->id); + if(likely(stat(filename, &buf) != -1)) { + cg->cpuacct_usage.filename = strdupz(filename); + cg->cpuacct_usage.enabled = cgroup_enable_cpuacct_usage; + debug(D_CGROUP, "cpuacct.usage_percpu filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_usage.filename); + } + else + debug(D_CGROUP, "cpuacct.usage_percpu file for cgroup '%s': '%s' does not exist.", cg->id, filename); + } + + if(unlikely((cgroup_enable_detailed_memory || cgroup_used_memory_without_cache) && !cg->memory.filename_detailed && (cgroup_used_memory_without_cache || cgroup_enable_systemd_services_detailed_memory || !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE)))) { + snprintfz(filename, FILENAME_MAX, "%s%s/memory.stat", cgroup_memory_base, cg->id); + if(likely(stat(filename, &buf) != -1)) { + cg->memory.filename_detailed = strdupz(filename); + cg->memory.enabled_detailed = (cgroup_enable_detailed_memory == CONFIG_BOOLEAN_YES)?CONFIG_BOOLEAN_YES:CONFIG_BOOLEAN_AUTO; + debug(D_CGROUP, "memory.stat filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_detailed); + } + else + debug(D_CGROUP, "memory.stat file for cgroup '%s': '%s' does not exist.", cg->id, filename); + } + + if(unlikely(cgroup_enable_memory && !cg->memory.filename_usage_in_bytes)) { + snprintfz(filename, FILENAME_MAX, "%s%s/memory.usage_in_bytes", cgroup_memory_base, cg->id); + if(likely(stat(filename, &buf) != -1)) { + cg->memory.filename_usage_in_bytes = strdupz(filename); + cg->memory.enabled_usage_in_bytes = cgroup_enable_memory; + debug(D_CGROUP, "memory.usage_in_bytes filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_usage_in_bytes); + } + else + debug(D_CGROUP, "memory.usage_in_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename); + } + + if(unlikely(cgroup_enable_swap && !cg->memory.filename_msw_usage_in_bytes)) { + snprintfz(filename, FILENAME_MAX, "%s%s/memory.msw_usage_in_bytes", cgroup_memory_base, cg->id); + if(likely(stat(filename, &buf) != -1)) { + cg->memory.filename_msw_usage_in_bytes = strdupz(filename); + cg->memory.enabled_msw_usage_in_bytes = cgroup_enable_swap; + debug(D_CGROUP, "memory.msw_usage_in_bytes filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_msw_usage_in_bytes); + } + else + debug(D_CGROUP, "memory.msw_usage_in_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename); + } + + if(unlikely(cgroup_enable_memory_failcnt && !cg->memory.filename_failcnt)) { + snprintfz(filename, FILENAME_MAX, "%s%s/memory.failcnt", cgroup_memory_base, cg->id); + if(likely(stat(filename, &buf) != -1)) { + cg->memory.filename_failcnt = strdupz(filename); + cg->memory.enabled_failcnt = cgroup_enable_memory_failcnt; + debug(D_CGROUP, "memory.failcnt filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_failcnt); + } + else + debug(D_CGROUP, "memory.failcnt file for cgroup '%s': '%s' does not exist.", cg->id, filename); + } + + if(unlikely(cgroup_enable_blkio_io && !cg->io_service_bytes.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_service_bytes", cgroup_blkio_base, cg->id); + if(likely(stat(filename, &buf) != -1)) { + cg->io_service_bytes.filename = strdupz(filename); + cg->io_service_bytes.enabled = cgroup_enable_blkio_io; + debug(D_CGROUP, "io_service_bytes filename for cgroup '%s': '%s'", cg->id, cg->io_service_bytes.filename); + } + else + debug(D_CGROUP, "io_service_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename); + } + + if(unlikely(cgroup_enable_blkio_ops && !cg->io_serviced.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_serviced", cgroup_blkio_base, cg->id); + if(likely(stat(filename, &buf) != -1)) { + cg->io_serviced.filename = strdupz(filename); + cg->io_serviced.enabled = cgroup_enable_blkio_ops; + debug(D_CGROUP, "io_serviced filename for cgroup '%s': '%s'", cg->id, cg->io_serviced.filename); + } + else + debug(D_CGROUP, "io_serviced file for cgroup '%s': '%s' does not exist.", cg->id, filename); + } + + if(unlikely(cgroup_enable_blkio_throttle_io && !cg->throttle_io_service_bytes.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_service_bytes", cgroup_blkio_base, cg->id); + if(likely(stat(filename, &buf) != -1)) { + cg->throttle_io_service_bytes.filename = strdupz(filename); + cg->throttle_io_service_bytes.enabled = cgroup_enable_blkio_throttle_io; + debug(D_CGROUP, "throttle_io_service_bytes filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_service_bytes.filename); + } + else + debug(D_CGROUP, "throttle_io_service_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename); + } + + if(unlikely(cgroup_enable_blkio_throttle_ops && !cg->throttle_io_serviced.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_serviced", cgroup_blkio_base, cg->id); + if(likely(stat(filename, &buf) != -1)) { + cg->throttle_io_serviced.filename = strdupz(filename); + cg->throttle_io_serviced.enabled = cgroup_enable_blkio_throttle_ops; + debug(D_CGROUP, "throttle_io_serviced filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_serviced.filename); + } + else + debug(D_CGROUP, "throttle_io_serviced file for cgroup '%s': '%s' does not exist.", cg->id, filename); + } + + if(unlikely(cgroup_enable_blkio_merged_ops && !cg->io_merged.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_merged", cgroup_blkio_base, cg->id); + if(likely(stat(filename, &buf) != -1)) { + cg->io_merged.filename = strdupz(filename); + cg->io_merged.enabled = cgroup_enable_blkio_merged_ops; + debug(D_CGROUP, "io_merged filename for cgroup '%s': '%s'", cg->id, cg->io_merged.filename); + } + else + debug(D_CGROUP, "io_merged file for cgroup '%s': '%s' does not exist.", cg->id, filename); + } + + if(unlikely(cgroup_enable_blkio_queued_ops && !cg->io_queued.filename)) { + snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_queued", cgroup_blkio_base, cg->id); + if(likely(stat(filename, &buf) != -1)) { + cg->io_queued.filename = strdupz(filename); + cg->io_queued.enabled = cgroup_enable_blkio_queued_ops; + debug(D_CGROUP, "io_queued filename for cgroup '%s': '%s'", cg->id, cg->io_queued.filename); + } + else + debug(D_CGROUP, "io_queued file for cgroup '%s': '%s' does not exist.", cg->id, filename); + } + } + + debug(D_CGROUP, "done searching for cgroups"); +} + +// ---------------------------------------------------------------------------- +// generate charts + +#define CHART_TITLE_MAX 300 + +void update_systemd_services_charts( + int update_every + , int do_cpu + , int do_mem_usage + , int do_mem_detailed + , int do_mem_failcnt + , int do_swap_usage + , int do_io + , int do_io_ops + , int do_throttle_io + , int do_throttle_ops + , int do_queued_ops + , int do_merged_ops +) { + static RRDSET + *st_cpu = NULL, + *st_mem_usage = NULL, + *st_mem_failcnt = NULL, + *st_swap_usage = NULL, + + *st_mem_detailed_cache = NULL, + *st_mem_detailed_rss = NULL, + *st_mem_detailed_mapped = NULL, + *st_mem_detailed_writeback = NULL, + *st_mem_detailed_pgfault = NULL, + *st_mem_detailed_pgmajfault = NULL, + *st_mem_detailed_pgpgin = NULL, + *st_mem_detailed_pgpgout = NULL, + + *st_io_read = NULL, + *st_io_serviced_read = NULL, + *st_throttle_io_read = NULL, + *st_throttle_ops_read = NULL, + *st_queued_ops_read = NULL, + *st_merged_ops_read = NULL, + + *st_io_write = NULL, + *st_io_serviced_write = NULL, + *st_throttle_io_write = NULL, + *st_throttle_ops_write = NULL, + *st_queued_ops_write = NULL, + *st_merged_ops_write = NULL; + + // create the charts + + if(likely(do_cpu)) { + if(unlikely(!st_cpu)) { + char title[CHART_TITLE_MAX + 1]; + snprintfz(title, CHART_TITLE_MAX, "Systemd Services CPU utilization (%d%% = %d core%s)", (processors * 100), processors, (processors > 1) ? "s" : ""); + + st_cpu = rrdset_create_localhost( + "services" + , "cpu" + , NULL + , "cpu" + , "services.cpu" + , title + , "%" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_cpu); + } + + if(likely(do_mem_usage)) { + if(unlikely(!st_mem_usage)) { + + st_mem_usage = rrdset_create_localhost( + "services" + , "mem_usage" + , NULL + , "mem" + , "services.mem_usage" + , (cgroup_used_memory_without_cache) ? "Systemd Services Used Memory without Cache" + : "Systemd Services Used Memory" + , "MB" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 10 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_mem_usage); + } + + if(likely(do_mem_detailed)) { + if(unlikely(!st_mem_detailed_rss)) { + + st_mem_detailed_rss = rrdset_create_localhost( + "services" + , "mem_rss" + , NULL + , "mem" + , "services.mem_rss" + , "Systemd Services RSS Memory" + , "MB" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 20 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_mem_detailed_rss); + + if(unlikely(!st_mem_detailed_mapped)) { + + st_mem_detailed_mapped = rrdset_create_localhost( + "services" + , "mem_mapped" + , NULL + , "mem" + , "services.mem_mapped" + , "Systemd Services Mapped Memory" + , "MB" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 30 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_mem_detailed_mapped); + + if(unlikely(!st_mem_detailed_cache)) { + + st_mem_detailed_cache = rrdset_create_localhost( + "services" + , "mem_cache" + , NULL + , "mem" + , "services.mem_cache" + , "Systemd Services Cache Memory" + , "MB" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 40 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_mem_detailed_cache); + + if(unlikely(!st_mem_detailed_writeback)) { + + st_mem_detailed_writeback = rrdset_create_localhost( + "services" + , "mem_writeback" + , NULL + , "mem" + , "services.mem_writeback" + , "Systemd Services Writeback Memory" + , "MB" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 50 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_mem_detailed_writeback); + + if(unlikely(!st_mem_detailed_pgfault)) { + + st_mem_detailed_pgfault = rrdset_create_localhost( + "services" + , "mem_pgfault" + , NULL + , "mem" + , "services.mem_pgfault" + , "Systemd Services Memory Minor Page Faults" + , "MB/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 60 + , update_every + , RRDSET_TYPE_STACKED + ); + } + else + rrdset_next(st_mem_detailed_pgfault); + + if(unlikely(!st_mem_detailed_pgmajfault)) { + + st_mem_detailed_pgmajfault = rrdset_create_localhost( + "services" + , "mem_pgmajfault" + , NULL + , "mem" + , "services.mem_pgmajfault" + , "Systemd Services Memory Major Page Faults" + , "MB/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 70 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_mem_detailed_pgmajfault); + + if(unlikely(!st_mem_detailed_pgpgin)) { + + st_mem_detailed_pgpgin = rrdset_create_localhost( + "services" + , "mem_pgpgin" + , NULL + , "mem" + , "services.mem_pgpgin" + , "Systemd Services Memory Charging Activity" + , "MB/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 80 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_mem_detailed_pgpgin); + + if(unlikely(!st_mem_detailed_pgpgout)) { + + st_mem_detailed_pgpgout = rrdset_create_localhost( + "services" + , "mem_pgpgout" + , NULL + , "mem" + , "services.mem_pgpgout" + , "Systemd Services Memory Uncharging Activity" + , "MB/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 90 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_mem_detailed_pgpgout); + } + + if(likely(do_mem_failcnt)) { + if(unlikely(!st_mem_failcnt)) { + + st_mem_failcnt = rrdset_create_localhost( + "services" + , "mem_failcnt" + , NULL + , "mem" + , "services.mem_failcnt" + , "Systemd Services Memory Limit Failures" + , "MB" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 110 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_mem_failcnt); + } + + if(likely(do_swap_usage)) { + if(unlikely(!st_swap_usage)) { + + st_swap_usage = rrdset_create_localhost( + "services" + , "swap_usage" + , NULL + , "swap" + , "services.swap_usage" + , "Systemd Services Swap Memory Used" + , "MB" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 100 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_swap_usage); + } + + if(likely(do_io)) { + if(unlikely(!st_io_read)) { + + st_io_read = rrdset_create_localhost( + "services" + , "io_read" + , NULL + , "disk" + , "services.io_read" + , "Systemd Services Disk Read Bandwidth" + , "KB/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 120 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_io_read); + + if(unlikely(!st_io_write)) { + + st_io_write = rrdset_create_localhost( + "services" + , "io_write" + , NULL + , "disk" + , "services.io_write" + , "Systemd Services Disk Write Bandwidth" + , "KB/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 130 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_io_write); + } + + if(likely(do_io_ops)) { + if(unlikely(!st_io_serviced_read)) { + + st_io_serviced_read = rrdset_create_localhost( + "services" + , "io_ops_read" + , NULL + , "disk" + , "services.io_ops_read" + , "Systemd Services Disk Read Operations" + , "operations/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 140 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_io_serviced_read); + + if(unlikely(!st_io_serviced_write)) { + + st_io_serviced_write = rrdset_create_localhost( + "services" + , "io_ops_write" + , NULL + , "disk" + , "services.io_ops_write" + , "Systemd Services Disk Write Operations" + , "operations/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 150 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_io_serviced_write); + } + + if(likely(do_throttle_io)) { + if(unlikely(!st_throttle_io_read)) { + + st_throttle_io_read = rrdset_create_localhost( + "services" + , "throttle_io_read" + , NULL + , "disk" + , "services.throttle_io_read" + , "Systemd Services Throttle Disk Read Bandwidth" + , "KB/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 160 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_throttle_io_read); + + if(unlikely(!st_throttle_io_write)) { + + st_throttle_io_write = rrdset_create_localhost( + "services" + , "throttle_io_write" + , NULL + , "disk" + , "services.throttle_io_write" + , "Systemd Services Throttle Disk Write Bandwidth" + , "KB/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 170 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_throttle_io_write); + } + + if(likely(do_throttle_ops)) { + if(unlikely(!st_throttle_ops_read)) { + + st_throttle_ops_read = rrdset_create_localhost( + "services" + , "throttle_io_ops_read" + , NULL + , "disk" + , "services.throttle_io_ops_read" + , "Systemd Services Throttle Disk Read Operations" + , "operations/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 180 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_throttle_ops_read); + + if(unlikely(!st_throttle_ops_write)) { + + st_throttle_ops_write = rrdset_create_localhost( + "services" + , "throttle_io_ops_write" + , NULL + , "disk" + , "services.throttle_io_ops_write" + , "Systemd Services Throttle Disk Write Operations" + , "operations/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 190 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_throttle_ops_write); + } + + if(likely(do_queued_ops)) { + if(unlikely(!st_queued_ops_read)) { + + st_queued_ops_read = rrdset_create_localhost( + "services" + , "queued_io_ops_read" + , NULL + , "disk" + , "services.queued_io_ops_read" + , "Systemd Services Queued Disk Read Operations" + , "operations/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 200 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_queued_ops_read); + + if(unlikely(!st_queued_ops_write)) { + + st_queued_ops_write = rrdset_create_localhost( + "services" + , "queued_io_ops_write" + , NULL + , "disk" + , "services.queued_io_ops_write" + , "Systemd Services Queued Disk Write Operations" + , "operations/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 210 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_queued_ops_write); + } + + if(likely(do_merged_ops)) { + if(unlikely(!st_merged_ops_read)) { + + st_merged_ops_read = rrdset_create_localhost( + "services" + , "merged_io_ops_read" + , NULL + , "disk" + , "services.merged_io_ops_read" + , "Systemd Services Merged Disk Read Operations" + , "operations/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 220 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_merged_ops_read); + + if(unlikely(!st_merged_ops_write)) { + + st_merged_ops_write = rrdset_create_localhost( + "services" + , "merged_io_ops_write" + , NULL + , "disk" + , "services.merged_io_ops_write" + , "Systemd Services Merged Disk Write Operations" + , "operations/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 230 + , update_every + , RRDSET_TYPE_STACKED + ); + + } + else + rrdset_next(st_merged_ops_write); + } + + // update the values + struct cgroup *cg; + for(cg = cgroup_root; cg ; cg = cg->next) { + if(unlikely(!cg->available || !cg->enabled || !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE))) + continue; + + if(likely(do_cpu && cg->cpuacct_stat.updated)) { + if(unlikely(!cg->rd_cpu)) + cg->rd_cpu = rrddim_add(st_cpu, cg->chart_id, cg->chart_title, 100, system_hz, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st_cpu, cg->rd_cpu, cg->cpuacct_stat.user + cg->cpuacct_stat.system); + } + + if(likely(do_mem_usage && cg->memory.updated_usage_in_bytes)) { + if(unlikely(!cg->rd_mem_usage)) + cg->rd_mem_usage = rrddim_add(st_mem_usage, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + + rrddim_set_by_pointer(st_mem_usage, cg->rd_mem_usage, cg->memory.usage_in_bytes - ((cgroup_used_memory_without_cache)?cg->memory.cache:0)); + } + + if(likely(do_mem_detailed && cg->memory.updated_detailed)) { + if(unlikely(!cg->rd_mem_detailed_rss)) + cg->rd_mem_detailed_rss = rrddim_add(st_mem_detailed_rss, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + + rrddim_set_by_pointer(st_mem_detailed_rss, cg->rd_mem_detailed_rss, cg->memory.rss + cg->memory.rss_huge); + + if(unlikely(!cg->rd_mem_detailed_mapped)) + cg->rd_mem_detailed_mapped = rrddim_add(st_mem_detailed_mapped, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + + rrddim_set_by_pointer(st_mem_detailed_mapped, cg->rd_mem_detailed_mapped, cg->memory.mapped_file); + + if(unlikely(!cg->rd_mem_detailed_cache)) + cg->rd_mem_detailed_cache = rrddim_add(st_mem_detailed_cache, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + + rrddim_set_by_pointer(st_mem_detailed_cache, cg->rd_mem_detailed_cache, cg->memory.cache); + + if(unlikely(!cg->rd_mem_detailed_writeback)) + cg->rd_mem_detailed_writeback = rrddim_add(st_mem_detailed_writeback, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + + rrddim_set_by_pointer(st_mem_detailed_writeback, cg->rd_mem_detailed_writeback, cg->memory.writeback); + + if(unlikely(!cg->rd_mem_detailed_pgfault)) + cg->rd_mem_detailed_pgfault = rrddim_add(st_mem_detailed_pgfault, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st_mem_detailed_pgfault, cg->rd_mem_detailed_pgfault, cg->memory.pgfault); + + if(unlikely(!cg->rd_mem_detailed_pgmajfault)) + cg->rd_mem_detailed_pgmajfault = rrddim_add(st_mem_detailed_pgmajfault, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st_mem_detailed_pgmajfault, cg->rd_mem_detailed_pgmajfault, cg->memory.pgmajfault); + + if(unlikely(!cg->rd_mem_detailed_pgpgin)) + cg->rd_mem_detailed_pgpgin = rrddim_add(st_mem_detailed_pgpgin, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st_mem_detailed_pgpgin, cg->rd_mem_detailed_pgpgin, cg->memory.pgpgin); + + if(unlikely(!cg->rd_mem_detailed_pgpgout)) + cg->rd_mem_detailed_pgpgout = rrddim_add(st_mem_detailed_pgpgout, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st_mem_detailed_pgpgout, cg->rd_mem_detailed_pgpgout, cg->memory.pgpgout); + } + + if(likely(do_mem_failcnt && cg->memory.updated_failcnt)) { + if(unlikely(!cg->rd_mem_failcnt)) + cg->rd_mem_failcnt = rrddim_add(st_mem_failcnt, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st_mem_failcnt, cg->rd_mem_failcnt, cg->memory.failcnt); + } + + if(likely(do_swap_usage && cg->memory.updated_msw_usage_in_bytes)) { + if(unlikely(!cg->rd_swap_usage)) + cg->rd_swap_usage = rrddim_add(st_swap_usage, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + + rrddim_set_by_pointer(st_swap_usage, cg->rd_swap_usage, cg->memory.msw_usage_in_bytes); + } + + if(likely(do_io && cg->io_service_bytes.updated)) { + if(unlikely(!cg->rd_io_service_bytes_read)) + cg->rd_io_service_bytes_read = rrddim_add(st_io_read, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st_io_read, cg->rd_io_service_bytes_read, cg->io_service_bytes.Read); + + if(unlikely(!cg->rd_io_service_bytes_write)) + cg->rd_io_service_bytes_write = rrddim_add(st_io_write, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st_io_write, cg->rd_io_service_bytes_write, cg->io_service_bytes.Write); + } + + if(likely(do_io_ops && cg->io_serviced.updated)) { + if(unlikely(!cg->rd_io_serviced_read)) + cg->rd_io_serviced_read = rrddim_add(st_io_serviced_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st_io_serviced_read, cg->rd_io_serviced_read, cg->io_serviced.Read); + + if(unlikely(!cg->rd_io_serviced_write)) + cg->rd_io_serviced_write = rrddim_add(st_io_serviced_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st_io_serviced_write, cg->rd_io_serviced_write, cg->io_serviced.Write); + } + + if(likely(do_throttle_io && cg->throttle_io_service_bytes.updated)) { + if(unlikely(!cg->rd_throttle_io_read)) + cg->rd_throttle_io_read = rrddim_add(st_throttle_io_read, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st_throttle_io_read, cg->rd_throttle_io_read, cg->throttle_io_service_bytes.Read); + + if(unlikely(!cg->rd_throttle_io_write)) + cg->rd_throttle_io_write = rrddim_add(st_throttle_io_write, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st_throttle_io_write, cg->rd_throttle_io_write, cg->throttle_io_service_bytes.Write); + } + + if(likely(do_throttle_ops && cg->throttle_io_serviced.updated)) { + if(unlikely(!cg->rd_throttle_io_serviced_read)) + cg->rd_throttle_io_serviced_read = rrddim_add(st_throttle_ops_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st_throttle_ops_read, cg->rd_throttle_io_serviced_read, cg->throttle_io_serviced.Read); + + if(unlikely(!cg->rd_throttle_io_serviced_write)) + cg->rd_throttle_io_serviced_write = rrddim_add(st_throttle_ops_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st_throttle_ops_write, cg->rd_throttle_io_serviced_write, cg->throttle_io_serviced.Write); + } + + if(likely(do_queued_ops && cg->io_queued.updated)) { + if(unlikely(!cg->rd_io_queued_read)) + cg->rd_io_queued_read = rrddim_add(st_queued_ops_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st_queued_ops_read, cg->rd_io_queued_read, cg->io_queued.Read); + + if(unlikely(!cg->rd_io_queued_write)) + cg->rd_io_queued_write = rrddim_add(st_queued_ops_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st_queued_ops_write, cg->rd_io_queued_write, cg->io_queued.Write); + } + + if(likely(do_merged_ops && cg->io_merged.updated)) { + if(unlikely(!cg->rd_io_merged_read)) + cg->rd_io_merged_read = rrddim_add(st_merged_ops_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st_merged_ops_read, cg->rd_io_merged_read, cg->io_merged.Read); + + if(unlikely(!cg->rd_io_merged_write)) + cg->rd_io_merged_write = rrddim_add(st_merged_ops_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st_merged_ops_write, cg->rd_io_merged_write, cg->io_merged.Write); + } + } + + // complete the iteration + if(likely(do_cpu)) + rrdset_done(st_cpu); + + if(likely(do_mem_usage)) + rrdset_done(st_mem_usage); + + if(unlikely(do_mem_detailed)) { + rrdset_done(st_mem_detailed_cache); + rrdset_done(st_mem_detailed_rss); + rrdset_done(st_mem_detailed_mapped); + rrdset_done(st_mem_detailed_writeback); + rrdset_done(st_mem_detailed_pgfault); + rrdset_done(st_mem_detailed_pgmajfault); + rrdset_done(st_mem_detailed_pgpgin); + rrdset_done(st_mem_detailed_pgpgout); + } + + if(likely(do_mem_failcnt)) + rrdset_done(st_mem_failcnt); + + if(likely(do_swap_usage)) + rrdset_done(st_swap_usage); + + if(likely(do_io)) { + rrdset_done(st_io_read); + rrdset_done(st_io_write); + } + + if(likely(do_io_ops)) { + rrdset_done(st_io_serviced_read); + rrdset_done(st_io_serviced_write); + } + + if(likely(do_throttle_io)) { + rrdset_done(st_throttle_io_read); + rrdset_done(st_throttle_io_write); + } + + if(likely(do_throttle_ops)) { + rrdset_done(st_throttle_ops_read); + rrdset_done(st_throttle_ops_write); + } + + if(likely(do_queued_ops)) { + rrdset_done(st_queued_ops_read); + rrdset_done(st_queued_ops_write); + } + + if(likely(do_merged_ops)) { + rrdset_done(st_merged_ops_read); + rrdset_done(st_merged_ops_write); + } +} + +static inline char *cgroup_chart_type(char *buffer, const char *id, size_t len) { + if(buffer[0]) return buffer; + + if(id[0] == '\0' || (id[0] == '/' && id[1] == '\0')) + strncpy(buffer, "cgroup_root", len); + else + snprintfz(buffer, len, "cgroup_%s", id); + + netdata_fix_chart_id(buffer); + return buffer; +} + +void update_cgroup_charts(int update_every) { + debug(D_CGROUP, "updating cgroups charts"); + + char type[RRD_ID_LENGTH_MAX + 1]; + char title[CHART_TITLE_MAX + 1]; + + int services_do_cpu = 0, + services_do_mem_usage = 0, + services_do_mem_detailed = 0, + services_do_mem_failcnt = 0, + services_do_swap_usage = 0, + services_do_io = 0, + services_do_io_ops = 0, + services_do_throttle_io = 0, + services_do_throttle_ops = 0, + services_do_queued_ops = 0, + services_do_merged_ops = 0; + + struct cgroup *cg; + for(cg = cgroup_root; cg ; cg = cg->next) { + if(unlikely(!cg->available || !cg->enabled)) + continue; + + if(likely(cgroup_enable_systemd_services && cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE)) { + if(cg->cpuacct_stat.updated && cg->cpuacct_stat.enabled == CONFIG_BOOLEAN_YES) services_do_cpu++; + + if(cgroup_enable_systemd_services_detailed_memory && cg->memory.updated_detailed && cg->memory.enabled_detailed) services_do_mem_detailed++; + if(cg->memory.updated_usage_in_bytes && cg->memory.enabled_usage_in_bytes == CONFIG_BOOLEAN_YES) services_do_mem_usage++; + if(cg->memory.updated_failcnt && cg->memory.enabled_failcnt == CONFIG_BOOLEAN_YES) services_do_mem_failcnt++; + if(cg->memory.updated_msw_usage_in_bytes && cg->memory.enabled_msw_usage_in_bytes == CONFIG_BOOLEAN_YES) services_do_swap_usage++; + + if(cg->io_service_bytes.updated && cg->io_service_bytes.enabled == CONFIG_BOOLEAN_YES) services_do_io++; + if(cg->io_serviced.updated && cg->io_serviced.enabled == CONFIG_BOOLEAN_YES) services_do_io_ops++; + if(cg->throttle_io_service_bytes.updated && cg->throttle_io_service_bytes.enabled == CONFIG_BOOLEAN_YES) services_do_throttle_io++; + if(cg->throttle_io_serviced.updated && cg->throttle_io_serviced.enabled == CONFIG_BOOLEAN_YES) services_do_throttle_ops++; + if(cg->io_queued.updated && cg->io_queued.enabled == CONFIG_BOOLEAN_YES) services_do_queued_ops++; + if(cg->io_merged.updated && cg->io_merged.enabled == CONFIG_BOOLEAN_YES) services_do_merged_ops++; + continue; + } + + type[0] = '\0'; + + if(likely(cg->cpuacct_stat.updated && cg->cpuacct_stat.enabled == CONFIG_BOOLEAN_YES)) { + if(unlikely(!cg->st_cpu)) { + snprintfz(title, CHART_TITLE_MAX, "CPU Usage (%d%% = %d core%s) for cgroup %s", (processors * 100), processors, (processors > 1) ? "s" : "", cg->chart_title); + + cg->st_cpu = rrdset_create_localhost( + cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) + , "cpu" + , NULL + , "cpu" + , "cgroup.cpu" + , title + , "%" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME + , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + , update_every + , RRDSET_TYPE_STACKED + ); + + rrddim_add(cg->st_cpu, "user", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(cg->st_cpu, "system", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(cg->st_cpu); + + rrddim_set(cg->st_cpu, "user", cg->cpuacct_stat.user); + rrddim_set(cg->st_cpu, "system", cg->cpuacct_stat.system); + rrdset_done(cg->st_cpu); + } + + if(likely(cg->cpuacct_usage.updated && cg->cpuacct_usage.enabled == CONFIG_BOOLEAN_YES)) { + char id[RRD_ID_LENGTH_MAX + 1]; + unsigned int i; + + if(unlikely(!cg->st_cpu_per_core)) { + snprintfz(title, CHART_TITLE_MAX, "CPU Usage (%d%% = %d core%s) Per Core for cgroup %s", (processors * 100), processors, (processors > 1) ? "s" : "", cg->chart_title); + + cg->st_cpu_per_core = rrdset_create_localhost( + cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) + , "cpu_per_core" + , NULL + , "cpu" + , "cgroup.cpu_per_core" + , title + , "%" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME + , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 100 + , update_every + , RRDSET_TYPE_STACKED + ); + + for(i = 0; i < cg->cpuacct_usage.cpus; i++) { + snprintfz(id, RRD_ID_LENGTH_MAX, "cpu%u", i); + rrddim_add(cg->st_cpu_per_core, id, NULL, 100, 1000000000, RRD_ALGORITHM_INCREMENTAL); + } + } + else + rrdset_next(cg->st_cpu_per_core); + + for(i = 0; i < cg->cpuacct_usage.cpus ;i++) { + snprintfz(id, RRD_ID_LENGTH_MAX, "cpu%u", i); + rrddim_set(cg->st_cpu_per_core, id, cg->cpuacct_usage.cpu_percpu[i]); + } + rrdset_done(cg->st_cpu_per_core); + } + + if(likely(cg->memory.updated_detailed && cg->memory.enabled_detailed == CONFIG_BOOLEAN_YES)) { + if(unlikely(!cg->st_mem)) { + snprintfz(title, CHART_TITLE_MAX, "Memory Usage for cgroup %s", cg->chart_title); + + cg->st_mem = rrdset_create_localhost( + cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) + , "mem" + , NULL + , "mem" + , "cgroup.mem" + , title + , "MB" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME + , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 210 + , update_every + , RRDSET_TYPE_STACKED + ); + + rrddim_add(cg->st_mem, "cache", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(cg->st_mem, "rss", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + + if(cg->memory.detailed_has_swap) + rrddim_add(cg->st_mem, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + + rrddim_add(cg->st_mem, "rss_huge", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(cg->st_mem, "mapped_file", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } + else + rrdset_next(cg->st_mem); + + rrddim_set(cg->st_mem, "cache", cg->memory.cache); + rrddim_set(cg->st_mem, "rss", (cg->memory.rss > cg->memory.rss_huge)?(cg->memory.rss - cg->memory.rss_huge):0); + + if(cg->memory.detailed_has_swap) + rrddim_set(cg->st_mem, "swap", cg->memory.swap); + + rrddim_set(cg->st_mem, "rss_huge", cg->memory.rss_huge); + rrddim_set(cg->st_mem, "mapped_file", cg->memory.mapped_file); + rrdset_done(cg->st_mem); + + if(unlikely(!cg->st_writeback)) { + snprintfz(title, CHART_TITLE_MAX, "Writeback Memory for cgroup %s", cg->chart_title); + + cg->st_writeback = rrdset_create_localhost( + cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) + , "writeback" + , NULL + , "mem" + , "cgroup.writeback" + , title + , "MB" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME + , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 300 + , update_every + , RRDSET_TYPE_AREA + ); + + if(cg->memory.detailed_has_dirty) + rrddim_add(cg->st_writeback, "dirty", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + + rrddim_add(cg->st_writeback, "writeback", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } + else + rrdset_next(cg->st_writeback); + + if(cg->memory.detailed_has_dirty) + rrddim_set(cg->st_writeback, "dirty", cg->memory.dirty); + + rrddim_set(cg->st_writeback, "writeback", cg->memory.writeback); + rrdset_done(cg->st_writeback); + + if(unlikely(!cg->st_mem_activity)) { + snprintfz(title, CHART_TITLE_MAX, "Memory Activity for cgroup %s", cg->chart_title); + + cg->st_mem_activity = rrdset_create_localhost( + cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) + , "mem_activity" + , NULL + , "mem" + , "cgroup.mem_activity" + , title + , "MB/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME + , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 400 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(cg->st_mem_activity, "pgpgin", "in", system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(cg->st_mem_activity, "pgpgout", "out", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(cg->st_mem_activity); + + rrddim_set(cg->st_mem_activity, "pgpgin", cg->memory.pgpgin); + rrddim_set(cg->st_mem_activity, "pgpgout", cg->memory.pgpgout); + rrdset_done(cg->st_mem_activity); + + if(unlikely(!cg->st_pgfaults)) { + snprintfz(title, CHART_TITLE_MAX, "Memory Page Faults for cgroup %s", cg->chart_title); + + cg->st_pgfaults = rrdset_create_localhost( + cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) + , "pgfaults" + , NULL + , "mem" + , "cgroup.pgfaults" + , title + , "MB/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME + , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 500 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(cg->st_pgfaults, "pgfault", NULL, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(cg->st_pgfaults, "pgmajfault", "swap", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(cg->st_pgfaults); + + rrddim_set(cg->st_pgfaults, "pgfault", cg->memory.pgfault); + rrddim_set(cg->st_pgfaults, "pgmajfault", cg->memory.pgmajfault); + rrdset_done(cg->st_pgfaults); + } + + if(likely(cg->memory.updated_usage_in_bytes && cg->memory.enabled_usage_in_bytes == CONFIG_BOOLEAN_YES)) { + if(unlikely(!cg->st_mem_usage)) { + snprintfz(title, CHART_TITLE_MAX, "Used Memory %sfor cgroup %s", (cgroup_used_memory_without_cache && cg->memory.updated_detailed)?"without Cache ":"", cg->chart_title); + + cg->st_mem_usage = rrdset_create_localhost( + cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) + , "mem_usage" + , NULL + , "mem" + , "cgroup.mem_usage" + , title + , "MB" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME + , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 200 + , update_every + , RRDSET_TYPE_STACKED + ); + + rrddim_add(cg->st_mem_usage, "ram", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(cg->st_mem_usage, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } + else + rrdset_next(cg->st_mem_usage); + + rrddim_set(cg->st_mem_usage, "ram", cg->memory.usage_in_bytes - ((cgroup_used_memory_without_cache)?cg->memory.cache:0)); + rrddim_set(cg->st_mem_usage, "swap", (cg->memory.msw_usage_in_bytes > cg->memory.usage_in_bytes)?cg->memory.msw_usage_in_bytes - cg->memory.usage_in_bytes:0); + rrdset_done(cg->st_mem_usage); + } + + if(likely(cg->memory.updated_failcnt && cg->memory.enabled_failcnt == CONFIG_BOOLEAN_YES)) { + if(unlikely(!cg->st_mem_failcnt)) { + snprintfz(title, CHART_TITLE_MAX, "Memory Limit Failures for cgroup %s", cg->chart_title); + + cg->st_mem_failcnt = rrdset_create_localhost( + cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) + , "mem_failcnt" + , NULL + , "mem" + , "cgroup.mem_failcnt" + , title + , "count" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME + , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 250 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(cg->st_mem_failcnt, "failures", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(cg->st_mem_failcnt); + + rrddim_set(cg->st_mem_failcnt, "failures", cg->memory.failcnt); + rrdset_done(cg->st_mem_failcnt); + } + + if(likely(cg->io_service_bytes.updated && cg->io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) { + if(unlikely(!cg->st_io)) { + snprintfz(title, CHART_TITLE_MAX, "I/O Bandwidth (all disks) for cgroup %s", cg->chart_title); + + cg->st_io = rrdset_create_localhost( + cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) + , "io" + , NULL + , "disk" + , "cgroup.io" + , title + , "KB/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME + , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 1200 + , update_every + , RRDSET_TYPE_AREA + ); + + rrddim_add(cg->st_io, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(cg->st_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(cg->st_io); + + rrddim_set(cg->st_io, "read", cg->io_service_bytes.Read); + rrddim_set(cg->st_io, "write", cg->io_service_bytes.Write); + rrdset_done(cg->st_io); + } + + if(likely(cg->io_serviced.updated && cg->io_serviced.enabled == CONFIG_BOOLEAN_YES)) { + if(unlikely(!cg->st_serviced_ops)) { + snprintfz(title, CHART_TITLE_MAX, "Serviced I/O Operations (all disks) for cgroup %s", cg->chart_title); + + cg->st_serviced_ops = rrdset_create_localhost( + cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) + , "serviced_ops" + , NULL + , "disk" + , "cgroup.serviced_ops" + , title + , "operations/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME + , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 1200 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(cg->st_serviced_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(cg->st_serviced_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(cg->st_serviced_ops); + + rrddim_set(cg->st_serviced_ops, "read", cg->io_serviced.Read); + rrddim_set(cg->st_serviced_ops, "write", cg->io_serviced.Write); + rrdset_done(cg->st_serviced_ops); + } + + if(likely(cg->throttle_io_service_bytes.updated && cg->throttle_io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) { + if(unlikely(!cg->st_throttle_io)) { + snprintfz(title, CHART_TITLE_MAX, "Throttle I/O Bandwidth (all disks) for cgroup %s", cg->chart_title); + + cg->st_throttle_io = rrdset_create_localhost( + cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) + , "throttle_io" + , NULL + , "disk" + , "cgroup.throttle_io" + , title + , "KB/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME + , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 1200 + , update_every + , RRDSET_TYPE_AREA + ); + + rrddim_add(cg->st_throttle_io, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(cg->st_throttle_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(cg->st_throttle_io); + + rrddim_set(cg->st_throttle_io, "read", cg->throttle_io_service_bytes.Read); + rrddim_set(cg->st_throttle_io, "write", cg->throttle_io_service_bytes.Write); + rrdset_done(cg->st_throttle_io); + } + + if(likely(cg->throttle_io_serviced.updated && cg->throttle_io_serviced.enabled == CONFIG_BOOLEAN_YES)) { + if(unlikely(!cg->st_throttle_serviced_ops)) { + snprintfz(title, CHART_TITLE_MAX, "Throttle Serviced I/O Operations (all disks) for cgroup %s", cg->chart_title); + + cg->st_throttle_serviced_ops = rrdset_create_localhost( + cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) + , "throttle_serviced_ops" + , NULL + , "disk" + , "cgroup.throttle_serviced_ops" + , title + , "operations/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME + , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 1200 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(cg->st_throttle_serviced_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(cg->st_throttle_serviced_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(cg->st_throttle_serviced_ops); + + rrddim_set(cg->st_throttle_serviced_ops, "read", cg->throttle_io_serviced.Read); + rrddim_set(cg->st_throttle_serviced_ops, "write", cg->throttle_io_serviced.Write); + rrdset_done(cg->st_throttle_serviced_ops); + } + + if(likely(cg->io_queued.updated && cg->io_queued.enabled == CONFIG_BOOLEAN_YES)) { + if(unlikely(!cg->st_queued_ops)) { + snprintfz(title, CHART_TITLE_MAX, "Queued I/O Operations (all disks) for cgroup %s", cg->chart_title); + + cg->st_queued_ops = rrdset_create_localhost( + cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) + , "queued_ops" + , NULL + , "disk" + , "cgroup.queued_ops" + , title + , "operations" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME + , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 2000 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(cg->st_queued_ops, "read", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(cg->st_queued_ops, "write", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else + rrdset_next(cg->st_queued_ops); + + rrddim_set(cg->st_queued_ops, "read", cg->io_queued.Read); + rrddim_set(cg->st_queued_ops, "write", cg->io_queued.Write); + rrdset_done(cg->st_queued_ops); + } + + if(likely(cg->io_merged.updated && cg->io_merged.enabled == CONFIG_BOOLEAN_YES)) { + if(unlikely(!cg->st_merged_ops)) { + snprintfz(title, CHART_TITLE_MAX, "Merged I/O Operations (all disks) for cgroup %s", cg->chart_title); + + cg->st_merged_ops = rrdset_create_localhost( + cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) + , "merged_ops" + , NULL + , "disk" + , "cgroup.merged_ops" + , title + , "operations/s" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME + , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 2100 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(cg->st_merged_ops, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(cg->st_merged_ops, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(cg->st_merged_ops); + + rrddim_set(cg->st_merged_ops, "read", cg->io_merged.Read); + rrddim_set(cg->st_merged_ops, "write", cg->io_merged.Write); + rrdset_done(cg->st_merged_ops); + } + } + + if(likely(cgroup_enable_systemd_services)) + update_systemd_services_charts(update_every, services_do_cpu, services_do_mem_usage, services_do_mem_detailed + , services_do_mem_failcnt, services_do_swap_usage, services_do_io + , services_do_io_ops, services_do_throttle_io, services_do_throttle_ops + , services_do_queued_ops, services_do_merged_ops + ); + + debug(D_CGROUP, "done updating cgroups charts"); +} + +// ---------------------------------------------------------------------------- +// cgroups main + +static void cgroup_main_cleanup(void *ptr) { + struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; + static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; + + info("cleaning up..."); + + static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; +} + +void *cgroups_main(void *ptr) { + netdata_thread_cleanup_push(cgroup_main_cleanup, ptr); + + struct rusage thread; + + // when ZERO, attempt to do it + int vdo_cpu_netdata = config_get_boolean("plugin:cgroups", "cgroups plugin resource charts", 1); + + read_cgroup_plugin_configuration(); + + RRDSET *stcpu_thread = NULL; + + heartbeat_t hb; + heartbeat_init(&hb); + usec_t step = cgroup_update_every * USEC_PER_SEC; + usec_t find_every = cgroup_check_for_new_every * USEC_PER_SEC, find_dt = 0; + + while(!netdata_exit) { + usec_t hb_dt = heartbeat_next(&hb, step); + if(unlikely(netdata_exit)) break; + + // BEGIN -- the job to be done + + find_dt += hb_dt; + if(unlikely(find_dt >= find_every || cgroups_check)) { + find_all_cgroups(); + find_dt = 0; + cgroups_check = 0; + } + + read_all_cgroups(cgroup_root); + update_cgroup_charts(cgroup_update_every); + + // END -- the job is done + + // -------------------------------------------------------------------- + + if(vdo_cpu_netdata) { + getrusage(RUSAGE_THREAD, &thread); + + if(unlikely(!stcpu_thread)) { + + stcpu_thread = rrdset_create_localhost( + "netdata" + , "plugin_cgroups_cpu" + , NULL + , "cgroups" + , NULL + , "NetData CGroups Plugin CPU usage" + , "milliseconds/s" + , PLUGIN_CGROUPS_NAME + , "stats" + , 132000 + , cgroup_update_every + , RRDSET_TYPE_STACKED + ); + + rrddim_add(stcpu_thread, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(stcpu_thread, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(stcpu_thread); + + rrddim_set(stcpu_thread, "user" , thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec); + rrddim_set(stcpu_thread, "system", thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec); + rrdset_done(stcpu_thread); + } + } + + netdata_thread_cleanup_pop(1); + return NULL; +} diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.h b/collectors/cgroups.plugin/sys_fs_cgroup.h new file mode 100644 index 000000000..09ce5e3fb --- /dev/null +++ b/collectors/cgroups.plugin/sys_fs_cgroup.h @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SYS_FS_CGROUP_H +#define NETDATA_SYS_FS_CGROUP_H 1 + +#include "../../daemon/common.h" + +#if (TARGET_OS == OS_LINUX) + +#define NETDATA_PLUGIN_HOOK_LINUX_CGROUPS \ + { \ + .name = "PLUGIN[cgroups]", \ + .config_section = CONFIG_SECTION_PLUGINS, \ + .config_name = "cgroups", \ + .enabled = 1, \ + .thread = NULL, \ + .init_routine = NULL, \ + .start_routine = cgroups_main \ + }, + +extern void *cgroups_main(void *ptr); + +#include "../proc.plugin/plugin_proc.h" + +#else // (TARGET_OS == OS_LINUX) + +#define NETDATA_PLUGIN_HOOK_LINUX_CGROUPS + +#endif // (TARGET_OS == OS_LINUX) + +#endif //NETDATA_SYS_FS_CGROUP_H diff --git a/collectors/charts.d.plugin/Makefile.am b/collectors/charts.d.plugin/Makefile.am new file mode 100644 index 000000000..e2e00258f --- /dev/null +++ b/collectors/charts.d.plugin/Makefile.am @@ -0,0 +1,62 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +CLEANFILES = \ + charts.d.plugin \ + $(NULL) + +include $(top_srcdir)/build/subst.inc +SUFFIXES = .in + +dist_libconfig_DATA = \ + charts.d.conf \ + $(NULL) + +dist_plugins_SCRIPTS = \ + charts.d.dryrun-helper.sh \ + charts.d.plugin \ + loopsleepms.sh.inc \ + $(NULL) + +dist_noinst_DATA = \ + charts.d.plugin.in \ + README.md \ + $(NULL) + +dist_charts_SCRIPTS = \ + $(NULL) + +dist_charts_DATA = \ + $(NULL) + +userchartsconfigdir=$(configdir)/charts.d +dist_userchartsconfig_DATA = \ + $(top_srcdir)/installer/.keep \ + $(NULL) + +chartsconfigdir=$(libconfigdir)/charts.d +dist_chartsconfig_DATA = \ + $(top_srcdir)/installer/.keep \ + $(NULL) + +include ap/Makefile.inc +include apache/Makefile.inc +include apcupsd/Makefile.inc +include cpu_apps/Makefile.inc +include cpufreq/Makefile.inc +include example/Makefile.inc +include exim/Makefile.inc +include hddtemp/Makefile.inc +include libreswan/Makefile.inc +include load_average/Makefile.inc +include mem_apps/Makefile.inc +include mysql/Makefile.inc +include nginx/Makefile.inc +include nut/Makefile.inc +include opensips/Makefile.inc +include phpfpm/Makefile.inc +include postfix/Makefile.inc +include sensors/Makefile.inc +include squid/Makefile.inc +include tomcat/Makefile.inc diff --git a/collectors/charts.d.plugin/Makefile.in b/collectors/charts.d.plugin/Makefile.in new file mode 100644 index 000000000..23e2edebb --- /dev/null +++ b/collectors/charts.d.plugin/Makefile.in @@ -0,0 +1,953 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/ap/Makefile.inc \ + $(srcdir)/apache/Makefile.inc $(srcdir)/apcupsd/Makefile.inc \ + $(srcdir)/cpu_apps/Makefile.inc $(srcdir)/cpufreq/Makefile.inc \ + $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc \ + $(srcdir)/hddtemp/Makefile.inc \ + $(srcdir)/libreswan/Makefile.inc \ + $(srcdir)/load_average/Makefile.inc \ + $(srcdir)/mem_apps/Makefile.inc $(srcdir)/mysql/Makefile.inc \ + $(srcdir)/nginx/Makefile.inc $(srcdir)/nut/Makefile.inc \ + $(srcdir)/opensips/Makefile.inc $(srcdir)/phpfpm/Makefile.inc \ + $(srcdir)/postfix/Makefile.inc $(srcdir)/sensors/Makefile.inc \ + $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc \ + $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_charts_SCRIPTS) $(dist_plugins_SCRIPTS) \ + $(dist_charts_DATA) $(dist_chartsconfig_DATA) \ + $(dist_libconfig_DATA) $(dist_noinst_DATA) \ + $(dist_userchartsconfig_DATA) +subdir = collectors/charts.d.plugin +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(pluginsdir)" \ + "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(chartsconfigdir)" \ + "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(userchartsconfigdir)" +SCRIPTS = $(dist_charts_SCRIPTS) $(dist_plugins_SCRIPTS) +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_charts_DATA) $(dist_chartsconfig_DATA) \ + $(dist_libconfig_DATA) $(dist_noinst_DATA) \ + $(dist_userchartsconfig_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +CLEANFILES = \ + charts.d.plugin \ + $(NULL) + +SUFFIXES = .in +dist_libconfig_DATA = \ + charts.d.conf \ + $(NULL) + +dist_plugins_SCRIPTS = \ + charts.d.dryrun-helper.sh \ + charts.d.plugin \ + loopsleepms.sh.inc \ + $(NULL) + + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution +dist_noinst_DATA = charts.d.plugin.in README.md $(NULL) ap/README.md \ + ap/Makefile.inc apache/README.md apache/Makefile.inc \ + apcupsd/README.md apcupsd/Makefile.inc cpu_apps/README.md \ + cpu_apps/Makefile.inc cpufreq/README.md cpufreq/Makefile.inc \ + example/README.md example/Makefile.inc exim/README.md \ + exim/Makefile.inc hddtemp/README.md hddtemp/Makefile.inc \ + libreswan/README.md libreswan/Makefile.inc \ + load_average/README.md load_average/Makefile.inc \ + mem_apps/README.md mem_apps/Makefile.inc mysql/README.md \ + mysql/Makefile.inc nginx/README.md nginx/Makefile.inc \ + nut/README.md nut/Makefile.inc opensips/README.md \ + opensips/Makefile.inc phpfpm/README.md phpfpm/Makefile.inc \ + postfix/README.md postfix/Makefile.inc sensors/README.md \ + sensors/Makefile.inc squid/README.md squid/Makefile.inc \ + tomcat/README.md tomcat/Makefile.inc +dist_charts_SCRIPTS = \ + $(NULL) + + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files +dist_charts_DATA = $(NULL) ap/ap.chart.sh apache/apache.chart.sh \ + apcupsd/apcupsd.chart.sh cpu_apps/cpu_apps.chart.sh \ + cpufreq/cpufreq.chart.sh example/example.chart.sh \ + exim/exim.chart.sh hddtemp/hddtemp.chart.sh \ + libreswan/libreswan.chart.sh \ + load_average/load_average.chart.sh mem_apps/mem_apps.chart.sh \ + mysql/mysql.chart.sh nginx/nginx.chart.sh nut/nut.chart.sh \ + opensips/opensips.chart.sh phpfpm/phpfpm.chart.sh \ + postfix/postfix.chart.sh sensors/sensors.chart.sh \ + squid/squid.chart.sh tomcat/tomcat.chart.sh +userchartsconfigdir = $(configdir)/charts.d +dist_userchartsconfig_DATA = \ + $(top_srcdir)/installer/.keep \ + $(NULL) + +chartsconfigdir = $(libconfigdir)/charts.d +dist_chartsconfig_DATA = $(top_srcdir)/installer/.keep $(NULL) \ + ap/ap.conf apache/apache.conf apcupsd/apcupsd.conf \ + cpu_apps/cpu_apps.conf cpufreq/cpufreq.conf \ + example/example.conf exim/exim.conf hddtemp/hddtemp.conf \ + libreswan/libreswan.conf load_average/load_average.conf \ + mem_apps/mem_apps.conf mysql/mysql.conf nginx/nginx.conf \ + nut/nut.conf opensips/opensips.conf phpfpm/phpfpm.conf \ + postfix/postfix.conf sensors/sensors.conf squid/squid.conf \ + tomcat/tomcat.conf +all: all-am + +.SUFFIXES: +.SUFFIXES: .in +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/ap/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/apcupsd/Makefile.inc $(srcdir)/cpu_apps/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/libreswan/Makefile.inc $(srcdir)/load_average/Makefile.inc $(srcdir)/mem_apps/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nut/Makefile.inc $(srcdir)/opensips/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/charts.d.plugin/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu collectors/charts.d.plugin/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; +$(top_srcdir)/build/subst.inc $(srcdir)/ap/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/apcupsd/Makefile.inc $(srcdir)/cpu_apps/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/libreswan/Makefile.inc $(srcdir)/load_average/Makefile.inc $(srcdir)/mem_apps/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nut/Makefile.inc $(srcdir)/opensips/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc: + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-dist_chartsSCRIPTS: $(dist_charts_SCRIPTS) + @$(NORMAL_INSTALL) + @list='$(dist_charts_SCRIPTS)'; test -n "$(chartsdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(chartsdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(chartsdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ + done | \ + sed -e 'p;s,.*/,,;n' \ + -e 'h;s|.*|.|' \ + -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ + $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ + { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ + if ($$2 == $$4) { files[d] = files[d] " " $$1; \ + if (++n[d] == $(am__install_max)) { \ + print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ + else { print "f", d "/" $$4, $$1 } } \ + END { for (d in files) print "f", d, files[d] }' | \ + while read type dir files; do \ + if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ + test -z "$$files" || { \ + echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(chartsdir)$$dir'"; \ + $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(chartsdir)$$dir" || exit $$?; \ + } \ + ; done + +uninstall-dist_chartsSCRIPTS: + @$(NORMAL_UNINSTALL) + @list='$(dist_charts_SCRIPTS)'; test -n "$(chartsdir)" || exit 0; \ + files=`for p in $$list; do echo "$$p"; done | \ + sed -e 's,.*/,,;$(transform)'`; \ + dir='$(DESTDIR)$(chartsdir)'; $(am__uninstall_files_from_dir) +install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS) + @$(NORMAL_INSTALL) + @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ + done | \ + sed -e 'p;s,.*/,,;n' \ + -e 'h;s|.*|.|' \ + -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ + $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ + { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ + if ($$2 == $$4) { files[d] = files[d] " " $$1; \ + if (++n[d] == $(am__install_max)) { \ + print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ + else { print "f", d "/" $$4, $$1 } } \ + END { for (d in files) print "f", d, files[d] }' | \ + while read type dir files; do \ + if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ + test -z "$$files" || { \ + echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \ + $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \ + } \ + ; done + +uninstall-dist_pluginsSCRIPTS: + @$(NORMAL_UNINSTALL) + @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \ + files=`for p in $$list; do echo "$$p"; done | \ + sed -e 's,.*/,,;$(transform)'`; \ + dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir) +install-dist_chartsDATA: $(dist_charts_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_charts_DATA)'; test -n "$(chartsdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(chartsdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(chartsdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(chartsdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(chartsdir)" || exit $$?; \ + done + +uninstall-dist_chartsDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_charts_DATA)'; test -n "$(chartsdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(chartsdir)'; $(am__uninstall_files_from_dir) +install-dist_chartsconfigDATA: $(dist_chartsconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_chartsconfig_DATA)'; test -n "$(chartsconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(chartsconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(chartsconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(chartsconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(chartsconfigdir)" || exit $$?; \ + done + +uninstall-dist_chartsconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_chartsconfig_DATA)'; test -n "$(chartsconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(chartsconfigdir)'; $(am__uninstall_files_from_dir) +install-dist_libconfigDATA: $(dist_libconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \ + done + +uninstall-dist_libconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir) +install-dist_userchartsconfigDATA: $(dist_userchartsconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_userchartsconfig_DATA)'; test -n "$(userchartsconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(userchartsconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(userchartsconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userchartsconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(userchartsconfigdir)" || exit $$?; \ + done + +uninstall-dist_userchartsconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_userchartsconfig_DATA)'; test -n "$(userchartsconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(userchartsconfigdir)'; $(am__uninstall_files_from_dir) +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(SCRIPTS) $(DATA) +installdirs: + for dir in "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(chartsconfigdir)" "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(userchartsconfigdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-dist_chartsDATA install-dist_chartsSCRIPTS \ + install-dist_chartsconfigDATA install-dist_libconfigDATA \ + install-dist_pluginsSCRIPTS install-dist_userchartsconfigDATA + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-dist_chartsDATA uninstall-dist_chartsSCRIPTS \ + uninstall-dist_chartsconfigDATA uninstall-dist_libconfigDATA \ + uninstall-dist_pluginsSCRIPTS \ + uninstall-dist_userchartsconfigDATA + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dist_chartsDATA \ + install-dist_chartsSCRIPTS install-dist_chartsconfigDATA \ + install-dist_libconfigDATA install-dist_pluginsSCRIPTS \ + install-dist_userchartsconfigDATA install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + installcheck installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am \ + uninstall-dist_chartsDATA uninstall-dist_chartsSCRIPTS \ + uninstall-dist_chartsconfigDATA uninstall-dist_libconfigDATA \ + uninstall-dist_pluginsSCRIPTS \ + uninstall-dist_userchartsconfigDATA + +.in: + if sed \ + -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \ + -e 's#[@]sbindir_POST@#$(sbindir)#g' \ + -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \ + -e 's#[@]pythondir_POST@#$(pythondir)#g' \ + -e 's#[@]configdir_POST@#$(configdir)#g' \ + -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \ + -e 's#[@]cachedir_POST@#$(cachedir)#g' \ + $< > $@.tmp; then \ + mv "$@.tmp" "$@"; \ + else \ + rm -f "$@.tmp"; \ + false; \ + fi + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/collectors/charts.d.plugin/README.md b/collectors/charts.d.plugin/README.md new file mode 100644 index 000000000..b224bffe3 --- /dev/null +++ b/collectors/charts.d.plugin/README.md @@ -0,0 +1,193 @@ +# charts.d.plugin + +`charts.d.plugin` is a netdata external plugin. It is an **orchestrator** for data collection modules written in `BASH` v4+. + +1. It runs as an independent process `ps fax` shows it +2. It is started and stopped automatically by netdata +3. It communicates with netdata via a unidirectional pipe (sending data to the netdata daemon) +4. Supports any number of data collection **modules** + +`charts.d.plugin` has been designed so that the actual script that will do data collection will be permanently in +memory, collecting data with as little overheads as possible +(i.e. initialize once, repeatedly collect values with minimal overhead). + +`charts.d.plugin` looks for scripts in `/usr/lib/netdata/charts.d`. +The scripts should have the filename suffix: `.chart.sh`. + +## Configuration + +`charts.d.plugin` itself can be configured using the configuration file `/etc/netdata/charts.d.conf` +(to edit it on your system run `/etc/netdata/edit-config charts.d.conf`). This file is also a BASH script. + +In this file, you can place statements like this: + +``` +enable_all_charts="yes" +X="yes" +Y="no" +``` + +where `X` and `Y` are the names of individual charts.d collector scripts. +When set to `yes`, charts.d will evaluate the collector script (see below). +When set to `no`, charts.d will ignore the collector script. + +The variable `enable_all_charts` sets the default enable/disable state for all charts. + +## A charts.d module + +A `charts.d.plugin` module is a BASH script defining a few functions. + +For a module called `X`, the following criteria must be met: + +1. The module script must be called `X.chart.sh` and placed in `/usr/libexec/netdata/charts.d`. + +2. If the module needs a configuration, it should be called `X.conf` and placed in `/etc/netdata/charts.d`. + The configuration file `X.conf` is also a BASH script itself. + To edit the default files supplied by netdata run `/etc/netdata/edit-config charts.d/X.conf`, + where `X` is the name of the module. + +3. All functions and global variables defined in the script and its configuration, must begin with `X_`. + +4. The following functions must be defined: + + - `X_check()` - returns 0 or 1 depending on whether the module is able to run or not + (following the standard Linux command line return codes: 0 = OK, the collector can operate and 1 = FAILED, + the collector cannot be used). + + - `X_create()` - creates the netdata charts, following the standard netdata plugin guides as described in + **[External Plugins](../plugins.d/)** (commands `CHART` and `DIMENSION`). + The return value does matter: 0 = OK, 1 = FAILED. + + - `X_update()` - collects the values for the defined charts, following the standard netdata plugin guides + as described in **[External Plugins](../plugins.d/)** (commands `BEGIN`, `SET`, `END`). + The return value also matters: 0 = OK, 1 = FAILED. + +5. The following global variables are available to be set: + - `X_update_every` - is the data collection frequency for the module script, in seconds. + +The module script may use more functions or variables. But all of them must begin with `X_`. + +The standard netdata plugin variables are also available (check **[External Plugins](../plugins.d/)**). + +### X_check() + +The purpose of the BASH function `X_check()` is to check if the module can collect data (or check its config). + +For example, if the module is about monitoring a local mysql database, the `X_check()` function may attempt to +connect to a local mysql database to find out if it can read the values it needs. + +`X_check()` is run only once for the lifetime of the module. + +### X_create() + +The purpose of the BASH function `X_create()` is to create the charts and dimensions using the standard netdata +plugin guides (**[External Plugins](../plugins.d/)**). + +`X_create()` will be called just once and only after `X_check()` was successful. +You can however call it yourself when there is need for it (for example to add a new dimension to an existing chart). + +A non-zero return value will disable the collector. + +### X_update() + +`X_update()` will be called repeatedly every `X_update_every` seconds, to collect new values and send them to netdata, +following the netdata plugin guides (**[External Plugins](../plugins.d/)**). + +The function will be called with one parameter: microseconds since the last time it was run. This value should be +appended to the `BEGIN` statement of every chart updated by the collector script. + +A non-zero return value will disable the collector. + +### Useful functions charts.d provides + +Module scripts can use the following charts.d functions: + +#### require_cmd command + +`require_cmd()` will check if a command is available in the running system. + +For example, your `X_check()` function may use it like this: + +```sh +mysql_check() { + require_cmd mysql || return 1 + return 0 +} +``` + +Using the above, if the command `mysql` is not available in the system, the `mysql` module will be disabled. + +#### fixid "string" + +`fixid()` will get a string and return a properly formatted id for a chart or dimension. + +This is an expensive function that should not be used in `X_update()`. +You can keep the generated id in a BASH associative array to have the values availables in `X_update()`, like this: + +```sh +declare -A X_ids=() +X_create() { + local name="a very bad name for id" + + X_ids[$name]="$(fixid "$name")" +} + +X_update() { + local microseconds="$1" + + ... + local name="a very bad name for id" + ... + + echo "BEGIN ${X_ids[$name]} $microseconds" + ... +} +``` + +### Debugging your collectors + +You can run `charts.d.plugin` by hand with something like this: + +```sh +# become user netdata +sudo su -s /bin/sh netdata + +# run the plugin in debug mode +/usr/libexec/netdata/plugins.d/charts.d.plugin debug 1 X Y Z +``` + +Charts.d will run in `debug` mode, with an update frequency of `1`, evaluating only the collector scripts +`X`, `Y` and `Z`. You can define zero or more module scripts. If none is defined, charts.d will evaluate all +module scripts available. + +Keep in mind that if your configs are not in `/etc/netdata`, you should do the following before running +`charts.d.plugin`: + +```sh +export NETDATA_USER_CONFIG_DIR="/path/to/etc/netdata" +``` + +Also, remember that netdata runs `chart.d.plugin` as user `netdata` (or any other user netdata is configured to run as). + + +## Running multiple instances of charts.d.plugin + +`charts.d.plugin` will call the `X_update()` function one after another. This means that a delay in collector `X` +will also delay the collection of `Y` and `Z`. + +You can have multiple `charts.d.plugin` running to overcome this problem. + +This is what you need to do: + +1. Decide a new name for the new charts.d instance: example `charts2.d`. + +2. Create/edit the files `/etc/netdata/charts.d.conf` and `/etc/netdata/charts2.d.conf` and enable / disable the + module you want each to run. Remember to set `enable_all_charts="no"` to both of them, and enable the individual + modules for each. + +3. link `/usr/libexec/netdata/plugins.d/charts.d.plugin` to `/usr/libexec/netdata/plugins.d/charts2.d.plugin`. + Netdata will spawn a new charts.d process. + +Execute the above in this order, since netdata will (by default) attempt to start new plugins soon after they are +created in `/usr/libexec/netdata/plugins.d/`. + diff --git a/collectors/charts.d.plugin/ap/Makefile.inc b/collectors/charts.d.plugin/ap/Makefile.inc new file mode 100644 index 000000000..a2dd375ac --- /dev/null +++ b/collectors/charts.d.plugin/ap/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_charts_DATA += ap/ap.chart.sh +dist_chartsconfig_DATA += ap/ap.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += ap/README.md ap/Makefile.inc + diff --git a/collectors/charts.d.plugin/ap/README.md b/collectors/charts.d.plugin/ap/README.md new file mode 100644 index 000000000..eb4e80707 --- /dev/null +++ b/collectors/charts.d.plugin/ap/README.md @@ -0,0 +1,84 @@ +# Access Point Plugin (ap) + +The `ap` collector visualizes data related to access points. + +## Example netdata charts + +![image](https://cloud.githubusercontent.com/assets/2662304/12377654/9f566e88-bd2d-11e5-855a-e0ba96b8fd98.png) + +## How it works + +It does the following: + +1. Runs `iw dev` searching for interfaces that have `type AP`. + + From the same output it collects the SSIDs each AP supports by looking for lines `ssid NAME`. + + Example: +```sh +# iw dev +phy#0 + Interface wlan0 + ifindex 3 + wdev 0x1 + addr 7c:dd:90:77:34:2a + ssid TSAOUSIS + type AP + channel 7 (2442 MHz), width: 20 MHz, center1: 2442 MHz +``` + + +2. For each interface found, it runs `iw INTERFACE station dump`. + + From the output is collects: + + - rx/tx bytes + - rx/tx packets + - tx retries + - tx failed + - signal strength + - rx/tx bitrate + - expected throughput + + Example: + +```sh +# iw wlan0 station dump +Station 40:b8:37:5a:ed:5e (on wlan0) + inactive time: 910 ms + rx bytes: 15588897 + rx packets: 127772 + tx bytes: 52257763 + tx packets: 95802 + tx retries: 2162 + tx failed: 28 + signal: -43 dBm + signal avg: -43 dBm + tx bitrate: 65.0 MBit/s MCS 7 + rx bitrate: 1.0 MBit/s + expected throughput: 32.125Mbps + authorized: yes + authenticated: yes + preamble: long + WMM/WME: yes + MFP: no + TDLS peer: no +``` + +3. For each interface found, it creates 6 charts: + + - Number of Connected clients + - Bandwidth for all clients + - Packets for all clients + - Transmit Issues for all clients + - Average Signal among all clients + - Average Bitrate (including average expected throughput) among all clients + +## Configuration + +You can only set `ap_update_every=NUMBER` to `/etc/netdata/charts.d/ap.conf`, to give the data collection frequency. +To edit this file on your system run `/etc/netdata/edit-config charts.d/ap.conf`. + +## Auto-detection + +The plugin is able to auto-detect if you are running access points on your linux box. diff --git a/collectors/charts.d.plugin/ap/ap.chart.sh b/collectors/charts.d.plugin/ap/ap.chart.sh new file mode 100644 index 000000000..ccc36120c --- /dev/null +++ b/collectors/charts.d.plugin/ap/ap.chart.sh @@ -0,0 +1,182 @@ +# shellcheck shell=bash +# no need for shebang - this file is loaded from charts.d.plugin +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# + +# _update_every is a special variable - it holds the number of seconds +# between the calls of the _update() function +ap_update_every= +ap_priority=6900 + +declare -A ap_devs=() + +# _check is called once, to find out if this chart should be enabled or not +ap_check() { + require_cmd iw || return 1 + local ev + ev=$(run iw dev | awk ' + BEGIN { + i = ""; + ssid = ""; + ap = 0; + } + /^[ \t]+Interface / { + if( ap == 1 ) { + print "ap_devs[" i "]=\"" ssid "\"" + } + + i = $2; + ssid = ""; + ap = 0; + } + /^[ \t]+ssid / { ssid = $2; } + /^[ \t]+type AP$/ { ap = 1; } + END { + if( ap == 1 ) { + print "ap_devs[" i "]=\"" ssid "\"" + } + } + ') + eval "${ev}" + + # this should return: + # - 0 to enable the chart + # - 1 to disable the chart + + [ ${#ap_devs[@]} -gt 0 ] && return 0 + error "no devices found in AP mode, with 'iw dev'" + return 1 +} + +# _create is called once, to create the charts +ap_create() { + local ssid dev + + for dev in "${!ap_devs[@]}" + do + ssid="${ap_devs[${dev}]}" + + # create the chart with 3 dimensions + cat < 0) { + print \"BEGIN ap_clients.\" dev; + print \"SET clients = \" c; + print \"END\"; + print \"BEGIN ap_bandwidth.\" dev; + print \"SET received = \" rb; + print \"SET sent = \" tb; + print \"END\"; + print \"BEGIN ap_packets.\" dev; + print \"SET received = \" rp; + print \"SET sent = \" tp; + print \"END\"; + print \"BEGIN ap_issues.\" dev; + print \"SET retries = \" tr; + print \"SET failures = \" tf; + print \"END\"; + + if( c == 0 ) c = 1; + print \"BEGIN ap_signal.\" dev; + print \"SET signal = \" int(s / c); + print \"END\"; + print \"BEGIN ap_bitrate.\" dev; + print \"SET receive = \" int(rt / c); + print \"SET transmit = \" int(tt / c); + print \"SET expected = \" int(e / c); + print \"END\"; + } + zero_data(); + } + BEGIN { + zero_data(); + } + /^DEVICE / { + print_device(); + dev = \$2; + } + /^Station/ { c++; } + /^[ \\t]+rx bytes:/ { rb += \$3; } + /^[ \\t]+tx bytes:/ { tb += \$3; } + /^[ \\t]+rx packets:/ { rp += \$3; } + /^[ \\t]+tx packets:/ { tp += \$3; } + /^[ \\t]+tx retries:/ { tr += \$3; } + /^[ \\t]+tx failed:/ { tf += \$3; } + /^[ \\t]+signal:/ { x = \$2; s += x * 1000; } + /^[ \\t]+rx bitrate:/ { x = \$3; rt += x * 1000; } + /^[ \\t]+tx bitrate:/ { x = \$3; tt += x * 1000; } + /^[ \\t]+expected throughput:(.*)Mbps/ { + x=\$3; + sub(/Mbps/, \"\", x); + e += x * 1000; + } + END { + print_device(); + } + " + + return 0 +} + diff --git a/collectors/charts.d.plugin/ap/ap.conf b/collectors/charts.d.plugin/ap/ap.conf new file mode 100644 index 000000000..38fc157ce --- /dev/null +++ b/collectors/charts.d.plugin/ap/ap.conf @@ -0,0 +1,23 @@ +# no need for shebang - this file is loaded from charts.d.plugin + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2018 Costa Tsaousis +# GPL v3+ + +# nothing fancy to configure. +# this module will run +# iw dev - to find wireless devices in AP mode +# iw ${dev} station dump - to get connected clients +# based on the above, it generates several charts + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#ap_update_every= + +# the charts priority on the dashboard +#ap_priority=6900 + +# the number of retries to do in case of failure +# before disabling the module +#ap_retries=10 diff --git a/collectors/charts.d.plugin/apache/Makefile.inc b/collectors/charts.d.plugin/apache/Makefile.inc new file mode 100644 index 000000000..4b360eae0 --- /dev/null +++ b/collectors/charts.d.plugin/apache/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_charts_DATA += apache/apache.chart.sh +dist_chartsconfig_DATA += apache/apache.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += apache/README.md apache/Makefile.inc + diff --git a/collectors/charts.d.plugin/apache/README.md b/collectors/charts.d.plugin/apache/README.md new file mode 100644 index 000000000..890cee984 --- /dev/null +++ b/collectors/charts.d.plugin/apache/README.md @@ -0,0 +1,127 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT + +--- + +# Apache Plugin (apache) + +The `apache` collector visualizes key performance data for an apache web server. + +## Example netdata charts + +For apache 2.2: + +![image](https://cloud.githubusercontent.com/assets/2662304/12530273/421c4d14-c1e2-11e5-9fb6-ca6d6dd3b1dd.png) + +For apache 2.4: + +![image](https://cloud.githubusercontent.com/assets/2662304/12530376/29ec26de-c1e6-11e5-9af1-e48aaf781795.png) + +## How it works + +It runs `curl "http://apache.host/server-status?auto` to fetch the current status of apache. + +It has been tested with apache 2.2 and apache 2.4. The latter also provides connections information (total and break down by status). + +Apache 2.2 response: + +```sh +$ curl "http://127.0.0.1/server-status?auto" +Total Accesses: 80057 +Total kBytes: 223017 +CPULoad: .018287 +Uptime: 64472 +ReqPerSec: 1.24173 +BytesPerSec: 3542.15 +BytesPerReq: 2852.59 +BusyWorkers: 1 +IdleWorkers: 49 +Scoreboard: _________________________......................................._W_______________________....................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................... +``` + +Apache 2.4 response: + +```sh +$ curl "http://127.0.0.1/server-status?auto" +127.0.0.1 +ServerVersion: Apache/2.4.18 (Unix) +ServerMPM: event +Server Built: Dec 14 2015 08:05:54 +CurrentTime: Saturday, 23-Jan-2016 14:42:06 EET +RestartTime: Saturday, 23-Jan-2016 04:57:13 EET +ParentServerConfigGeneration: 2 +ParentServerMPMGeneration: 1 +ServerUptimeSeconds: 35092 +ServerUptime: 9 hours 44 minutes 52 seconds +Load1: 0.32 +Load5: 0.32 +Load15: 0.27 +Total Accesses: 32403 +Total kBytes: 34464 +CPUUser: 30.37 +CPUSystem: 29.55 +CPUChildrenUser: 0 +CPUChildrenSystem: 0 +CPULoad: .170751 +Uptime: 35092 +ReqPerSec: .923373 +BytesPerSec: 1005.67 +BytesPerReq: 1089.13 +BusyWorkers: 1 +IdleWorkers: 99 +ConnsTotal: 0 +ConnsAsyncWriting: 0 +ConnsAsyncKeepAlive: 0 +ConnsAsyncClosing: 0 +Scoreboard: __________________________________________________________________________________________W_________............................................................................................................................................................................................................................................................................................................ +``` + +From the apache status output it collects: + + - total accesses (incremental value, rendered as requests/s) + - total bandwidth (incremental value, rendered as bandwidth/s) + - requests per second (this appears to be calculated by apache as an average for its lifetime, while the one calculated by netdata using the total accesses counter is real-time) + - bytes per second (average for the lifetime of the apache server) + - bytes per request (average for the lifetime of the apache server) + - workers by status (`busy` and `idle`) + - total connections (currently active connections - offered by apache 2.4+) + - async connections per status (`keepalive`, `writing`, `closing` - offered by apache 2.4+) + +## Configuration + +The configuration is stored in `/etc/netdata/charts.d/apache.conf`. +To edit this file on your system run `/etc/netdata/edit-config charts.d/apache.conf`. + +The internal default is: + +```sh +# the URL your apache server is responding with mod_status information. +apache_url="http://127.0.0.1:80/server-status?auto" + +# use this to set custom curl options you may need +apache_curl_opts= + +# set this to a NUMBER to overwrite the update frequency +# it is in seconds +apache_update_every= +``` + +The default `apache_update_every` is configured in netdata. + +## Auto-detection + +If you have configured your apache server to offer server-status information on localhost clients, the defaults should work fine. + +## Apache Configuration + +Apache configuration differs between distributions. Please check your distribution's documentation for information on enabling apache's `mod_status` module. + +If you are able to run successfully, by hand this command: + +```sh +curl "http://127.0.0.1:80/server-status?auto" +``` + +netdata will be able to do it too. + +Notice: You may need to have the default `000-default.conf ` website enabled in order for the status mod to work. diff --git a/collectors/charts.d.plugin/apache/apache.chart.sh b/collectors/charts.d.plugin/apache/apache.chart.sh new file mode 100644 index 000000000..95876432f --- /dev/null +++ b/collectors/charts.d.plugin/apache/apache.chart.sh @@ -0,0 +1,258 @@ +# shellcheck shell=bash +# no need for shebang - this file is loaded from charts.d.plugin +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# + +# the URL to download apache status info +apache_url="http://127.0.0.1:80/server-status?auto" +apache_curl_opts= + +# _update_every is a special variable - it holds the number of seconds +# between the calls of the _update() function +apache_update_every= + +apache_priority=60000 + +# convert apache floating point values +# to integer using this multiplier +# this only affects precision - the values +# will be in the proper units +apache_decimal_detail=1000000 + +declare -a apache_response=() +apache_accesses=0 +apache_kbytes=0 +apache_reqpersec=0 +apache_bytespersec=0 +apache_bytesperreq=0 +apache_busyworkers=0 +apache_idleworkers=0 +apache_connstotal=0 +apache_connsasyncwriting=0 +apache_connsasynckeepalive=0 +apache_connsasyncclosing=0 + +apache_keys_detected=0 +apache_has_conns=0 +apache_key_accesses= +apache_key_kbytes= +apache_key_reqpersec= +apache_key_bytespersec= +apache_key_bytesperreq= +apache_key_busyworkers= +apache_key_idleworkers= +apache_key_scoreboard= +apache_key_connstotal= +apache_key_connsasyncwriting= +apache_key_connsasynckeepalive= +apache_key_connsasyncclosing= +apache_detect() { + local i=0 + for x in "${@}" + do + case "${x}" in + 'Total Accesses') apache_key_accesses=$((i + 1)) ;; + 'Total kBytes') apache_key_kbytes=$((i + 1)) ;; + 'ReqPerSec') apache_key_reqpersec=$((i + 1)) ;; + 'BytesPerSec') apache_key_bytespersec=$((i + 1)) ;; + 'BytesPerReq') apache_key_bytesperreq=$((i + 1)) ;; + 'BusyWorkers') apache_key_busyworkers=$((i + 1)) ;; + 'IdleWorkers') apache_key_idleworkers=$((i + 1));; + 'ConnsTotal') apache_key_connstotal=$((i + 1)) ;; + 'ConnsAsyncWriting') apache_key_connsasyncwriting=$((i + 1)) ;; + 'ConnsAsyncKeepAlive') apache_key_connsasynckeepalive=$((i + 1)) ;; + 'ConnsAsyncClosing') apache_key_connsasyncclosing=$((i + 1)) ;; + 'Scoreboard') apache_key_scoreboard=$((i)) ;; + esac + + i=$((i + 1)) + done + + # we will not check of the Conns* + # keys, since these are apache 2.4 specific + [ -z "${apache_key_accesses}" ] && error "missing 'Total Accesses' from apache server: ${*}" && return 1 + [ -z "${apache_key_kbytes}" ] && error "missing 'Total kBytes' from apache server: ${*}" && return 1 + [ -z "${apache_key_reqpersec}" ] && error "missing 'ReqPerSec' from apache server: ${*}" && return 1 + [ -z "${apache_key_bytespersec}" ] && error "missing 'BytesPerSec' from apache server: ${*}" && return 1 + [ -z "${apache_key_bytesperreq}" ] && error "missing 'BytesPerReq' from apache server: ${*}" && return 1 + [ -z "${apache_key_busyworkers}" ] && error "missing 'BusyWorkers' from apache server: ${*}" && return 1 + [ -z "${apache_key_idleworkers}" ] && error "missing 'IdleWorkers' from apache server: ${*}" && return 1 + [ -z "${apache_key_scoreboard}" ] && error "missing 'Scoreboard' from apache server: ${*}" && return 1 + + if [ ! -z "${apache_key_connstotal}" ] && \ + [ ! -z "${apache_key_connsasyncwriting}" ] && \ + [ ! -z "${apache_key_connsasynckeepalive}" ] && \ + [ ! -z "${apache_key_connsasyncclosing}" ] + then + apache_has_conns=1 + else + apache_has_conns=0 + fi + + return 0 +} + +apache_get() { + local oIFS="${IFS}" ret + # shellcheck disable=2207 + IFS=$':\n' apache_response=($(run curl -Ss ${apache_curl_opts} "${apache_url}")) + ret=$? + IFS="${oIFS}" + + if [ $ret -ne 0 ] || [ "${#apache_response[@]}" -eq 0 ] + then + return 1 + fi + + # the last line on the apache output is "Scoreboard" + # we use this label to detect that the output has a new word count + if [ ${apache_keys_detected} -eq 0 ] || [ "${apache_response[${apache_key_scoreboard}]}" != "Scoreboard" ] + then + apache_detect "${apache_response[@]}" || return 1 + apache_keys_detected=1 + fi + + apache_accesses="${apache_response[${apache_key_accesses}]}" + apache_kbytes="${apache_response[${apache_key_kbytes}]}" + + float2int "${apache_response[${apache_key_reqpersec}]}" ${apache_decimal_detail} + apache_reqpersec=${FLOAT2INT_RESULT} + + float2int "${apache_response[${apache_key_bytespersec}]}" ${apache_decimal_detail} + apache_bytespersec=${FLOAT2INT_RESULT} + + float2int "${apache_response[${apache_key_bytesperreq}]}" ${apache_decimal_detail} + apache_bytesperreq=${FLOAT2INT_RESULT} + + apache_busyworkers="${apache_response[${apache_key_busyworkers}]}" + apache_idleworkers="${apache_response[${apache_key_idleworkers}]}" + + if [ -z "${apache_accesses}" ] || \ + [ -z "${apache_kbytes}" ] || \ + [ -z "${apache_reqpersec}" ] || \ + [ -z "${apache_bytespersec}" ] || \ + [ -z "${apache_bytesperreq}" ] || \ + [ -z "${apache_busyworkers}" ] + [ -z "${apache_idleworkers}" ] + then + error "empty values got from apache server: ${apache_response[*]}" + return 1 + fi + + if [ ${apache_has_conns} -eq 1 ] + then + apache_connstotal="${apache_response[${apache_key_connstotal}]}" + apache_connsasyncwriting="${apache_response[${apache_key_connsasyncwriting}]}" + apache_connsasynckeepalive="${apache_response[${apache_key_connsasynckeepalive}]}" + apache_connsasyncclosing="${apache_response[${apache_key_connsasyncclosing}]}" + fi + + return 0 +} + +# _check is called once, to find out if this chart should be enabled or not +apache_check() { + + apache_get + # shellcheck disable=2181 + if [ $? -ne 0 ] + then + # shellcheck disable=2154 + error "cannot find stub_status on URL '${apache_url}'. Please set apache_url='http://apache.server:80/server-status?auto' in $confd/apache.conf" + return 1 + fi + + # this should return: + # - 0 to enable the chart + # - 1 to disable the chart + + return 0 +} + +# _create is called once, to create the charts +apache_create() { + cat < +# GPL v3+ + +# THIS PLUGIN IS DEPRECATED +# USE THE PYTHON.D ONE + +# the URL to download apache status info +#apache_url="http://127.0.0.1:80/server-status?auto" +#apache_curl_opts= + +# convert apache floating point values +# to integer using this multiplier +# this only affects precision - the values +# will be in the proper units +#apache_decimal_detail=1000000 + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#apache_update_every= + +# the charts priority on the dashboard +#apache_priority=60000 + +# the number of retries to do in case of failure +# before disabling the module +#apache_retries=10 diff --git a/collectors/charts.d.plugin/apcupsd/Makefile.inc b/collectors/charts.d.plugin/apcupsd/Makefile.inc new file mode 100644 index 000000000..19cb9cad7 --- /dev/null +++ b/collectors/charts.d.plugin/apcupsd/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_charts_DATA += apcupsd/apcupsd.chart.sh +dist_chartsconfig_DATA += apcupsd/apcupsd.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += apcupsd/README.md apcupsd/Makefile.inc + diff --git a/collectors/charts.d.plugin/apcupsd/README.md b/collectors/charts.d.plugin/apcupsd/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh new file mode 100644 index 000000000..e26ef566a --- /dev/null +++ b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh @@ -0,0 +1,201 @@ +# shellcheck shell=bash +# no need for shebang - this file is loaded from charts.d.plugin +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# + +apcupsd_ip= +apcupsd_port= + +declare -A apcupsd_sources=( + ["local"]="127.0.0.1:3551" +) + +# how frequently to collect UPS data +apcupsd_update_every=10 + +apcupsd_timeout=3 + +# the priority of apcupsd related to other charts +apcupsd_priority=90000 + +apcupsd_get() { + run -t $apcupsd_timeout apcaccess status "$1" +} + +apcupsd_check() { + + # this should return: + # - 0 to enable the chart + # - 1 to disable the chart + + require_cmd apcaccess || return 1 + + # backwards compatibility + if [ "${apcupsd_ip}:${apcupsd_port}" != ":" ] + then + apcupsd_sources["local"]="${apcupsd_ip}:${apcupsd_port}" + fi + + local host working=0 failed=0 + for host in "${!apcupsd_sources[@]}" + do + run apcupsd_get "${apcupsd_sources[${host}]}" >/dev/null + # shellcheck disable=2181 + if [ $? -ne 0 ] + then + error "cannot get information for apcupsd server ${host} on ${apcupsd_sources[${host}]}." + failed=$((failed + 1)) + elif [ "$(apcupsd_get "${apcupsd_sources[${host}]}" | awk '/^STATUS.*/{ print $3 }')" != "ONLINE" ] + then + error "APC UPS ${host} on ${apcupsd_sources[${host}]} is not online." + failed=$((failed + 1)) + else + working=$((working + 1)) + fi + done + + if [ ${working} -eq 0 ] + then + error "No APC UPSes found available." + return 1 + fi + + return 0 +} + +apcupsd_create() { + local host src + for host in "${!apcupsd_sources[@]}" + do + src=${apcupsd_sources[${host}]} + + # create the charts + cat < +# GPL v3+ + +# add all your APC UPSes in this array - uncomment it too +#declare -A apcupsd_sources=( +# ["local"]="127.0.0.1:3551" +#) + +# how long to wait for apcupsd to respond +#apcupsd_timeout=3 + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#apcupsd_update_every=10 + +# the charts priority on the dashboard +#apcupsd_priority=90000 + +# the number of retries to do in case of failure +# before disabling the module +#apcupsd_retries=10 diff --git a/collectors/charts.d.plugin/charts.d.conf b/collectors/charts.d.plugin/charts.d.conf new file mode 100644 index 000000000..acb2a6fae --- /dev/null +++ b/collectors/charts.d.plugin/charts.d.conf @@ -0,0 +1,63 @@ +# This is the configuration for charts.d.plugin + +# Each of its collectors can read configuration eiher from this file +# or a NAME.conf file (where NAME is the collector name). +# The collector specific file has higher precedence. + +# This file is a shell script too. + +# ----------------------------------------------------------------------------- + +# number of seconds to run without restart +# after this time, charts.d.plugin will exit +# netdata will restart it, but a small gap +# will appear in the charts.d.plugin charts. +#restart_timeout=$[3600 * 4] + +# when making iterations, charts.d can loop more frequently +# to prevent plugins missing iterations. +# this is a percentage relative to update_every to align its +# iterations. +# The minimum is 10%, the maximum 100%. +# So, if update_every is 1 second and time_divisor is 50, +# charts.d will iterate every 500ms. +# Charts will be called to collect data only if the time +# passed since the last time the collected data is equal or +# above their update_every. +#time_divisor=50 + +# ----------------------------------------------------------------------------- + +# the default enable/disable for all charts.d collectors +# the default is "yes" +# enable_all_charts="yes" + +# BY DEFAULT ENABLED MODULES +# ap=yes +# nut=yes +# opensips=yes + +# ----------------------------------------------------------------------------- +# THESE NEED TO BE SET TO "force" TO BE ENABLED + +# Nothing useful. +# Just an example charts.d plugin you can use as a template. +# example=force + +# OLD MODULES THAT ARE NOW SERVED BY python.d.plugin +# apache=force +# cpufreq=force +# exim=force +# hddtemp=force +# mysql=force +# nginx=force +# phpfpm=force +# postfix=force +# sensors=force +# squid=force +# tomcat=force + +# OLD MODULES THAT ARE NOW SERVED BY NETDATA DAEMON +# cpu_apps=force +# mem_apps=force +# load_average=force diff --git a/collectors/charts.d.plugin/charts.d.dryrun-helper.sh b/collectors/charts.d.plugin/charts.d.dryrun-helper.sh new file mode 100755 index 000000000..67496c1bd --- /dev/null +++ b/collectors/charts.d.plugin/charts.d.dryrun-helper.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +# shellcheck disable=SC2181 + +# will stop the script for any error +set -e + +me="$0" +name="$1" +chart="$2" +conf="$3" + +can_diff=1 + +tmp1="$(mktemp)" +tmp2="$(mktemp)" + +myset() { + set | grep -v "^_=" | grep -v "^PIPESTATUS=" | grep -v "^BASH_LINENO=" +} + +# save 2 'set' +myset >"$tmp1" +myset >"$tmp2" + +# make sure they don't differ +diff "$tmp1" "$tmp2" >/dev/null 2>&1 +if [ $? -ne 0 ] +then + # they differ, we cannot do the check + echo >&2 "$me: cannot check with diff." + can_diff=0 +fi + +# do it again, now including the script +myset >"$tmp1" + +# include the plugin and its config +if [ -f "$conf" ] +then + # shellcheck source=/dev/null + . "$conf" + if [ $? -ne 0 ] + then + echo >&2 "$me: cannot load config file $conf" + rm "$tmp1" "$tmp2" + exit 1 + fi +fi + +# shellcheck source=/dev/null +. "$chart" +if [ $? -ne 0 ] +then + echo >&2 "$me: cannot load chart file $chart" + rm "$tmp1" "$tmp2" + exit 1 +fi + +# remove all variables starting with the plugin name +myset | grep -v "^$name" >"$tmp2" + +if [ $can_diff -eq 1 ] +then + # check if they are different + # make sure they don't differ + diff "$tmp1" "$tmp2" >&2 + if [ $? -ne 0 ] + then + # they differ + rm "$tmp1" "$tmp2" + exit 1 + fi +fi + +rm "$tmp1" "$tmp2" +exit 0 diff --git a/collectors/charts.d.plugin/charts.d.plugin b/collectors/charts.d.plugin/charts.d.plugin new file mode 100644 index 000000000..1c6e8c5c9 --- /dev/null +++ b/collectors/charts.d.plugin/charts.d.plugin @@ -0,0 +1,743 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2017 Costa Tsaousis +# GPL v3+ +# +# charts.d.plugin allows easy development of BASH plugins +# +# if you need to run parallel charts.d processes, link this file to a different name +# in the same directory, with a .plugin suffix and netdata will start both of them, +# each will have a different config file and modules configuration directory. +# + +export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin" + +PROGRAM_FILE="$0" +PROGRAM_NAME="$(basename $0)" +PROGRAM_NAME="${PROGRAM_NAME/.plugin}" +MODULE_NAME="main" + +# ----------------------------------------------------------------------------- +# create temp dir + +debug=0 +TMP_DIR= +chartsd_cleanup() { + trap '' EXIT QUIT HUP INT TERM + + if [ ! -z "$TMP_DIR" -a -d "$TMP_DIR" ] + then + [ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: cleaning up temporary directory $TMP_DIR ..." + rm -rf "$TMP_DIR" + fi + exit 0 +} +trap chartsd_cleanup EXIT QUIT HUP INT TERM + +if [ $UID = "0" ] +then + TMP_DIR="$( mktemp -d /var/run/netdata-${PROGRAM_NAME}-XXXXXXXXXX )" +else + TMP_DIR="$( mktemp -d /tmp/.netdata-${PROGRAM_NAME}-XXXXXXXXXX )" +fi + +logdate() { + date "+%Y-%m-%d %H:%M:%S" +} + +log() { + local status="${1}" + shift + + echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: ${*}" + +} + +warning() { + log WARNING "${@}" +} + +error() { + log ERROR "${@}" +} + +info() { + log INFO "${@}" +} + +fatal() { + log FATAL "${@}" + echo "DISABLE" + exit 1 +} + +debug() { + [ $debug -eq 1 ] && log DEBUG "${@}" +} + +# ----------------------------------------------------------------------------- +# check a few commands + +require_cmd() { + local x=$(which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null) + if [ -z "${x}" -o ! -x "${x}" ] + then + warning "command '${1}' is not found in ${PATH}." + eval "${1^^}_CMD=\"\"" + return 1 + fi + + eval "${1^^}_CMD=\"${x}\"" + return 0 +} + +require_cmd date || exit 1 +require_cmd sed || exit 1 +require_cmd basename || exit 1 +require_cmd dirname || exit 1 +require_cmd cat || exit 1 +require_cmd grep || exit 1 +require_cmd egrep || exit 1 +require_cmd mktemp || exit 1 +require_cmd awk || exit 1 +require_cmd timeout || exit 1 +require_cmd curl || exit 1 + +# ----------------------------------------------------------------------------- + +[ $(( ${BASH_VERSINFO[0]} )) -lt 4 ] && fatal "BASH version 4 or later is required, but found version: ${BASH_VERSION}. Please upgrade." + +info "started from '$PROGRAM_FILE' with options: $*" + +# ----------------------------------------------------------------------------- +# internal defaults +# netdata exposes a few environment variables for us + +[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")" +[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata" +[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d" + +pluginsd="${NETDATA_PLUGINS_DIR}" +stockconfd="${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}" +userconfd="${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}" +olduserconfd="${NETDATA_USER_CONFIG_DIR}" +chartsd="$pluginsd/../charts.d" + +minimum_update_frequency="${NETDATA_UPDATE_EVERY-1}" +update_every=${minimum_update_frequency} # this will be overwritten by the command line + +# work around for non BASH shells +charts_create="_create" +charts_update="_update" +charts_check="_check" +charts_undescore="_" + +# when making iterations, charts.d can loop more frequently +# to prevent plugins missing iterations. +# this is a percentage relative to update_every to align its +# iterations. +# The minimum is 10%, the maximum 100%. +# So, if update_every is 1 second and time_divisor is 50, +# charts.d will iterate every 500ms. +# Charts will be called to collect data only if the time +# passed since the last time the collected data is equal or +# above their update_every. +time_divisor=50 + +# number of seconds to run without restart +# after this time, charts.d.plugin will exit +# netdata will restart it +restart_timeout=$((3600 * 4)) + +# check if the charts.d plugins are using global variables +# they should not. +# It does not currently support BASH v4 arrays, so it is +# disabled +dryrunner=0 + +# check for timeout command +check_for_timeout=1 + +# the default enable/disable value for all charts +enable_all_charts="yes" + +# ----------------------------------------------------------------------------- +# parse parameters + +check=0 +chart_only= +while [ ! -z "$1" ] +do + if [ "$1" = "check" ] + then + check=1 + shift + continue + fi + + if [ "$1" = "debug" -o "$1" = "all" ] + then + debug=1 + shift + continue + fi + + if [ -f "$chartsd/$1.chart.sh" ] + then + debug=1 + chart_only="$( echo $1.chart.sh | sed "s/\.chart\.sh$//g" )" + shift + continue + fi + + if [ -f "$chartsd/$1" ] + then + debug=1 + chart_only="$( echo $1 | sed "s/\.chart\.sh$//g" )" + shift + continue + fi + + # number check + n="$1" + x=$(( n )) + if [ "$x" = "$n" ] + then + shift + update_every=$x + [ $update_every -lt $minimum_update_frequency ] && update_every=$minimum_update_frequency + continue + fi + + fatal "Cannot understand parameter $1. Aborting." +done + + +# ----------------------------------------------------------------------------- +# loop control + +# default sleep function +LOOPSLEEPMS_HIGHRES=0 +now_ms= +current_time_ms_default() { + now_ms="$(date +'%s')000" +} +current_time_ms="current_time_ms_default" +current_time_ms_accuracy=1 +mysleep="sleep" + +# if found and included, this file overwrites loopsleepms() +# and current_time_ms() with a high resolution timer function +# for precise looping. +source "$pluginsd/loopsleepms.sh.inc" +[ $? -ne 0 ] && error "Failed to load '$pluginsd/loopsleepms.sh.inc'." + +# ----------------------------------------------------------------------------- +# load my configuration + +for myconfig in "${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}.conf" "${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf" +do + if [ -f "$myconfig" ] + then + source "$myconfig" + if [ $? -ne 0 ] + then + error "Config file '$myconfig' loaded with errors." + else + info "Configuration file '$myconfig' loaded." + fi + else + warning "Configuration file '$myconfig' not found." + fi +done + +# make sure time_divisor is right +time_divisor=$((time_divisor)) +[ $time_divisor -lt 10 ] && time_divisor=10 +[ $time_divisor -gt 100 ] && time_divisor=100 + + +# we check for the timeout command, after we load our +# configuration, so that the user may overwrite the +# timeout command we use, providing a function that +# can emulate the timeout command we need: +# > timeout SECONDS command ... +if [ $check_for_timeout -eq 1 ] + then + require_cmd timeout || exit 1 +fi + +# ----------------------------------------------------------------------------- +# internal checks + +# netdata passes the requested update frequency as the first argument +update_every=$(( update_every + 1 - 1)) # makes sure it is a number +test $update_every -eq 0 && update_every=1 # if it is zero, make it 1 + +# check the charts.d directory +[ ! -d "$chartsd" ] && fatal "cannot find charts directory '$chartsd'" + +# ----------------------------------------------------------------------------- +# library functions + +fixid() { + echo "$*" |\ + tr -c "[A-Z][a-z][0-9]" "_" |\ + sed -e "s|^_\+||g" -e "s|_\+$||g" -e "s|_\+|_|g" |\ + tr "[A-Z]" "[a-z]" +} + +run() { + local ret pid="${BASHPID}" t + + if [ "z${1}" = "z-t" -a "${2}" != "0" ] + then + t="${2}" + shift 2 + timeout ${t} "${@}" 2>"${TMP_DIR}/run.${pid}" + ret=$? + else + "${@}" 2>"${TMP_DIR}/run.${pid}" + ret=$? + fi + + if [ ${ret} -ne 0 ] + then + { + printf "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: command '" + printf "%q " "${@}" + printf "' failed with code ${ret}:\n --- BEGIN TRACE ---\n" + cat "${TMP_DIR}/run.${pid}" + printf " --- END TRACE ---\n" + } >&2 + fi + rm "${TMP_DIR}/run.${pid}" + + return ${ret} +} + +# convert any floating point number +# to integer, give a multiplier +# the result is stored in ${FLOAT2INT_RESULT} +# so that no fork is necessary +# the multiplier must be a power of 10 +float2int() { + local f m="$2" a b l v=($1) + f=${v[0]} + + # the length of the multiplier - 1 + l=$(( ${#m} - 1 )) + + # check if the number is in scientific notation + if [[ ${f} =~ ^[[:space:]]*(-)?[0-9.]+(e|E)(\+|-)[0-9]+ ]] + then + # convert it to decimal + # unfortunately, this fork cannot be avoided + # if you know of a way to avoid it, please let me know + f=$(printf "%0.${l}f" ${f}) + fi + + # split the floating point number + # in integer (a) and decimal (b) + a=${f/.*/} + b=${f/*./} + + # if the integer part is missing + # set it to zero + [ -z "${a}" ] && a="0" + + # strip leading zeros from the integer part + # base 10 convertion + a=$((10#$a)) + + # check the length of the decimal part + # against the length of the multiplier + if [ ${#b} -gt ${l} ] + then + # too many digits - take the most significant + b=${b:0:${l}} + + elif [ ${#b} -lt ${l} ] + then + # too few digits - pad with zero on the right + local z="00000000000000000000000" r=$((l - ${#b})) + b="${b}${z:0:${r}}" + fi + + # strip leading zeros from the decimal part + # base 10 convertion + b=$((10#$b)) + + # store the result + FLOAT2INT_RESULT=$(( (a * m) + b )) +} + + +# ----------------------------------------------------------------------------- +# charts check functions + +all_charts() { + cd "$chartsd" + [ $? -ne 0 ] && error "cannot cd to $chartsd" && return 1 + + ls *.chart.sh | sed "s/\.chart\.sh$//g" +} + +declare -A charts_enable_keyword=( + ['apache']="force" + ['cpu_apps']="force" + ['cpufreq']="force" + ['example']="force" + ['exim']="force" + ['hddtemp']="force" + ['load_average']="force" + ['mem_apps']="force" + ['mysql']="force" + ['nginx']="force" + ['phpfpm']="force" + ['postfix']="force" + ['sensors']="force" + ['squid']="force" + ['tomcat']="force" + ) + +all_enabled_charts() { + local charts= enabled= required= + + # find all enabled charts + + for chart in $( all_charts ) + do + MODULE_NAME="${chart}" + + eval "enabled=\$$chart" + if [ -z "${enabled}" ] + then + enabled="${enable_all_charts}" + fi + + required="${charts_enable_keyword[${chart}]}" + [ -z "${required}" ] && required="yes" + + if [ ! "${enabled}" = "${required}" ] + then + info "is disabled. Add a line with $chart=$required in '${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf' to enable it (or remove the line that disables it)." + else + debug "is enabled for auto-detection." + local charts="$charts $chart" + fi + done + MODULE_NAME="main" + + local charts2= + for chart in $charts + do + MODULE_NAME="${chart}" + + # check the enabled charts + local check="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_check()" )" + if [ -z "$check" ] + then + error "module '$chart' does not seem to have a $chart$charts_check() function. Disabling it." + continue + fi + + local create="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_create()" )" + if [ -z "$create" ] + then + error "module '$chart' does not seem to have a $chart$charts_create() function. Disabling it." + continue + fi + + local update="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_update()" )" + if [ -z "$update" ] + then + error "module '$chart' does not seem to have a $chart$charts_update() function. Disabling it." + continue + fi + + # check its config + #if [ -f "$userconfd/$chart.conf" ] + #then + # if [ ! -z "$( cat "$userconfd/$chart.conf" | sed "s/^ \+//g" | grep -v "^$" | grep -v "^#" | grep -v "^$chart$charts_undescore" )" ] + # then + # error "module's $chart config $userconfd/$chart.conf should only have lines starting with $chart$charts_undescore . Disabling it." + # continue + # fi + #fi + + #if [ $dryrunner -eq 1 ] + # then + # "$pluginsd/charts.d.dryrun-helper.sh" "$chart" "$chartsd/$chart.chart.sh" "$userconfd/$chart.conf" >/dev/null + # if [ $? -ne 0 ] + # then + # error "module's $chart did not pass the dry run check. This means it uses global variables not starting with $chart. Disabling it." + # continue + # fi + #fi + + local charts2="$charts2 $chart" + done + MODULE_NAME="main" + + echo $charts2 + debug "enabled charts: $charts2" +} + +# ----------------------------------------------------------------------------- +# load the charts + +suffix_retries="_retries" +suffix_update_every="_update_every" +active_charts= +for chart in $( all_enabled_charts ) +do + MODULE_NAME="${chart}" + + debug "loading module: '$chartsd/$chart.chart.sh'" + + source "$chartsd/$chart.chart.sh" + [ $? -ne 0 ] && warning "Module '$chartsd/$chart.chart.sh' loaded with errors." + + # first load the stock config + if [ -f "$stockconfd/$chart.conf" ] + then + debug "loading module configuration: '$stockconfd/$chart.conf'" + source "$stockconfd/$chart.conf" + [ $? -ne 0 ] && warning "Config file '$stockconfd/$chart.conf' loaded with errors." + else + debug "not found module configuration: '$stockconfd/$chart.conf'" + fi + + # then load the user config (it overwrites the stock) + if [ -f "$userconfd/$chart.conf" ] + then + debug "loading module configuration: '$userconfd/$chart.conf'" + source "$userconfd/$chart.conf" + [ $? -ne 0 ] && warning "Config file '$userconfd/$chart.conf' loaded with errors." + else + debug "not found module configuration: '$userconfd/$chart.conf'" + + if [ -f "$olduserconfd/$chart.conf" ] + then + # support for very old netdata that had the charts.d module configs in /etc/netdata + info "loading module configuration from obsolete location: '$olduserconfd/$chart.conf'" + source "$olduserconfd/$chart.conf" + [ $? -ne 0 ] && warning "Config file '$olduserconfd/$chart.conf' loaded with errors." + fi + fi + + eval "dt=\$$chart$suffix_update_every" + dt=$(( dt + 1 - 1 )) # make sure it is a number + if [ $dt -lt $update_every ] + then + eval "$chart$suffix_update_every=$update_every" + fi + + $chart$charts_check + if [ $? -eq 0 ] + then + debug "module '$chart' activated" + active_charts="$active_charts $chart" + else + error "module's '$chart' check() function reports failure." + fi +done +MODULE_NAME="main" +debug "activated modules: $active_charts" + + +# ----------------------------------------------------------------------------- +# check overwrites + +# enable work time reporting +debug_time= +test $debug -eq 1 && debug_time=tellwork + +# if we only need a specific chart, remove all the others +if [ ! -z "${chart_only}" ] +then + debug "requested to run only for: '${chart_only}'" + check_charts= + for chart in $active_charts + do + if [ "$chart" = "$chart_only" ] + then + check_charts="$chart" + break + fi + done + active_charts="$check_charts" +fi +debug "activated charts: $active_charts" + +# stop if we just need a pre-check +if [ $check -eq 1 ] +then + info "CHECK RESULT" + info "Will run the charts: $active_charts" + exit 0 +fi + +# ----------------------------------------------------------------------------- + +cd "${TMP_DIR}" || exit 1 + +# ----------------------------------------------------------------------------- +# create charts + +run_charts= +for chart in $active_charts +do + MODULE_NAME="${chart}" + + debug "calling '$chart$charts_create()'..." + $chart$charts_create + if [ $? -eq 0 ] + then + run_charts="$run_charts $chart" + debug "'$chart' initialized." + else + error "module's '$chart' function '$chart$charts_create()' reports failure." + fi +done +MODULE_NAME="main" +debug "run_charts='$run_charts'" + + +# ----------------------------------------------------------------------------- +# update dimensions + +[ -z "$run_charts" ] && fatal "No charts to collect data from." + +declare -A charts_last_update=() charts_update_every=() charts_retries=() charts_next_update=() charts_run_counter=() charts_serial_failures=() +global_update() { + local exit_at \ + c=0 dt ret last_ms exec_start_ms exec_end_ms \ + chart now_charts=() next_charts=($run_charts) \ + next_ms x seconds millis + + # return the current time in ms in $now_ms + ${current_time_ms} + + exit_at=$(( now_ms + (restart_timeout * 1000) )) + + for chart in $run_charts + do + eval "charts_update_every[$chart]=\$$chart$suffix_update_every" + test -z "${charts_update_every[$chart]}" && charts_update_every[$chart]=$update_every + + eval "charts_retries[$chart]=\$$chart$suffix_retries" + test -z "${charts_retries[$chart]}" && charts_retries[$chart]=10 + + charts_last_update[$chart]=$((now_ms - (now_ms % (charts_update_every[$chart] * 1000) ) )) + charts_next_update[$chart]=$(( charts_last_update[$chart] + (charts_update_every[$chart] * 1000) )) + charts_run_counter[$chart]=0 + charts_serial_failures[$chart]=0 + + echo "CHART netdata.plugin_chartsd_$chart '' 'Execution time for $chart plugin' 'milliseconds / run' charts.d netdata.plugin_charts area 145000 ${charts_update_every[$chart]}" + echo "DIMENSION run_time 'run time' absolute 1 1" + done + + # the main loop + while [ "${#next_charts[@]}" -gt 0 ] + do + c=$((c + 1)) + now_charts=("${next_charts[@]}") + next_charts=() + + # return the current time in ms in $now_ms + ${current_time_ms} + + for chart in "${now_charts[@]}" + do + MODULE_NAME="${chart}" + + if [ ${now_ms} -ge ${charts_next_update[$chart]} ] + then + last_ms=${charts_last_update[$chart]} + dt=$(( (now_ms - last_ms) )) + + charts_last_update[$chart]=${now_ms} + + while [ ${charts_next_update[$chart]} -lt ${now_ms} ] + do + charts_next_update[$chart]=$(( charts_next_update[$chart] + (charts_update_every[$chart] * 1000) )) + done + + # the first call should not give a duration + # so that netdata calibrates to current time + dt=$(( dt * 1000 )) + charts_run_counter[$chart]=$(( charts_run_counter[$chart] + 1 )) + if [ ${charts_run_counter[$chart]} -eq 1 ] + then + dt= + fi + + exec_start_ms=$now_ms + $chart$charts_update $dt + ret=$? + + # return the current time in ms in $now_ms + ${current_time_ms}; exec_end_ms=$now_ms + + echo "BEGIN netdata.plugin_chartsd_$chart $dt" + echo "SET run_time = $(( exec_end_ms - exec_start_ms ))" + echo "END" + + if [ $ret -eq 0 ] + then + charts_serial_failures[$chart]=0 + next_charts+=($chart) + else + charts_serial_failures[$chart]=$(( charts_serial_failures[$chart] + 1 )) + + if [ ${charts_serial_failures[$chart]} -gt ${charts_retries[$chart]} ] + then + error "module's '$chart' update() function reported failure ${charts_serial_failures[$chart]} times. Disabling it." + else + error "module's '$chart' update() function reports failure. Will keep trying for a while." + next_charts+=($chart) + fi + fi + else + next_charts+=($chart) + fi + done + MODULE_NAME="${chart}" + + # wait the time you are required to + next_ms=$((now_ms + (update_every * 1000 * 100) )) + for x in "${charts_next_update[@]}"; do [ ${x} -lt ${next_ms} ] && next_ms=${x}; done + next_ms=$((next_ms - now_ms)) + + if [ ${LOOPSLEEPMS_HIGHRES} -eq 1 -a ${next_ms} -gt 0 ] + then + next_ms=$(( next_ms + current_time_ms_accuracy )) + seconds=$(( next_ms / 1000 )) + millis=$(( next_ms % 1000 )) + if [ ${millis} -lt 10 ] + then + millis="00${millis}" + elif [ ${millis} -lt 100 ] + then + millis="0${millis}" + fi + + debug "sleeping for ${seconds}.${millis} seconds." + ${mysleep} ${seconds}.${millis} + else + debug "sleeping for ${update_every} seconds." + ${mysleep} $update_every + fi + + test ${now_ms} -ge ${exit_at} && exit 0 + done + + fatal "nothing left to do, exiting..." +} + +global_update diff --git a/collectors/charts.d.plugin/charts.d.plugin.in b/collectors/charts.d.plugin/charts.d.plugin.in new file mode 100755 index 000000000..3477894d8 --- /dev/null +++ b/collectors/charts.d.plugin/charts.d.plugin.in @@ -0,0 +1,743 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2017 Costa Tsaousis +# GPL v3+ +# +# charts.d.plugin allows easy development of BASH plugins +# +# if you need to run parallel charts.d processes, link this file to a different name +# in the same directory, with a .plugin suffix and netdata will start both of them, +# each will have a different config file and modules configuration directory. +# + +export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin" + +PROGRAM_FILE="$0" +PROGRAM_NAME="$(basename $0)" +PROGRAM_NAME="${PROGRAM_NAME/.plugin}" +MODULE_NAME="main" + +# ----------------------------------------------------------------------------- +# create temp dir + +debug=0 +TMP_DIR= +chartsd_cleanup() { + trap '' EXIT QUIT HUP INT TERM + + if [ ! -z "$TMP_DIR" -a -d "$TMP_DIR" ] + then + [ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: cleaning up temporary directory $TMP_DIR ..." + rm -rf "$TMP_DIR" + fi + exit 0 +} +trap chartsd_cleanup EXIT QUIT HUP INT TERM + +if [ $UID = "0" ] +then + TMP_DIR="$( mktemp -d /var/run/netdata-${PROGRAM_NAME}-XXXXXXXXXX )" +else + TMP_DIR="$( mktemp -d /tmp/.netdata-${PROGRAM_NAME}-XXXXXXXXXX )" +fi + +logdate() { + date "+%Y-%m-%d %H:%M:%S" +} + +log() { + local status="${1}" + shift + + echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: ${*}" + +} + +warning() { + log WARNING "${@}" +} + +error() { + log ERROR "${@}" +} + +info() { + log INFO "${@}" +} + +fatal() { + log FATAL "${@}" + echo "DISABLE" + exit 1 +} + +debug() { + [ $debug -eq 1 ] && log DEBUG "${@}" +} + +# ----------------------------------------------------------------------------- +# check a few commands + +require_cmd() { + local x=$(which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null) + if [ -z "${x}" -o ! -x "${x}" ] + then + warning "command '${1}' is not found in ${PATH}." + eval "${1^^}_CMD=\"\"" + return 1 + fi + + eval "${1^^}_CMD=\"${x}\"" + return 0 +} + +require_cmd date || exit 1 +require_cmd sed || exit 1 +require_cmd basename || exit 1 +require_cmd dirname || exit 1 +require_cmd cat || exit 1 +require_cmd grep || exit 1 +require_cmd egrep || exit 1 +require_cmd mktemp || exit 1 +require_cmd awk || exit 1 +require_cmd timeout || exit 1 +require_cmd curl || exit 1 + +# ----------------------------------------------------------------------------- + +[ $(( ${BASH_VERSINFO[0]} )) -lt 4 ] && fatal "BASH version 4 or later is required, but found version: ${BASH_VERSION}. Please upgrade." + +info "started from '$PROGRAM_FILE' with options: $*" + +# ----------------------------------------------------------------------------- +# internal defaults +# netdata exposes a few environment variables for us + +[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")" +[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@" +[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@" + +pluginsd="${NETDATA_PLUGINS_DIR}" +stockconfd="${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}" +userconfd="${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}" +olduserconfd="${NETDATA_USER_CONFIG_DIR}" +chartsd="$pluginsd/../charts.d" + +minimum_update_frequency="${NETDATA_UPDATE_EVERY-1}" +update_every=${minimum_update_frequency} # this will be overwritten by the command line + +# work around for non BASH shells +charts_create="_create" +charts_update="_update" +charts_check="_check" +charts_undescore="_" + +# when making iterations, charts.d can loop more frequently +# to prevent plugins missing iterations. +# this is a percentage relative to update_every to align its +# iterations. +# The minimum is 10%, the maximum 100%. +# So, if update_every is 1 second and time_divisor is 50, +# charts.d will iterate every 500ms. +# Charts will be called to collect data only if the time +# passed since the last time the collected data is equal or +# above their update_every. +time_divisor=50 + +# number of seconds to run without restart +# after this time, charts.d.plugin will exit +# netdata will restart it +restart_timeout=$((3600 * 4)) + +# check if the charts.d plugins are using global variables +# they should not. +# It does not currently support BASH v4 arrays, so it is +# disabled +dryrunner=0 + +# check for timeout command +check_for_timeout=1 + +# the default enable/disable value for all charts +enable_all_charts="yes" + +# ----------------------------------------------------------------------------- +# parse parameters + +check=0 +chart_only= +while [ ! -z "$1" ] +do + if [ "$1" = "check" ] + then + check=1 + shift + continue + fi + + if [ "$1" = "debug" -o "$1" = "all" ] + then + debug=1 + shift + continue + fi + + if [ -f "$chartsd/$1.chart.sh" ] + then + debug=1 + chart_only="$( echo $1.chart.sh | sed "s/\.chart\.sh$//g" )" + shift + continue + fi + + if [ -f "$chartsd/$1" ] + then + debug=1 + chart_only="$( echo $1 | sed "s/\.chart\.sh$//g" )" + shift + continue + fi + + # number check + n="$1" + x=$(( n )) + if [ "$x" = "$n" ] + then + shift + update_every=$x + [ $update_every -lt $minimum_update_frequency ] && update_every=$minimum_update_frequency + continue + fi + + fatal "Cannot understand parameter $1. Aborting." +done + + +# ----------------------------------------------------------------------------- +# loop control + +# default sleep function +LOOPSLEEPMS_HIGHRES=0 +now_ms= +current_time_ms_default() { + now_ms="$(date +'%s')000" +} +current_time_ms="current_time_ms_default" +current_time_ms_accuracy=1 +mysleep="sleep" + +# if found and included, this file overwrites loopsleepms() +# and current_time_ms() with a high resolution timer function +# for precise looping. +source "$pluginsd/loopsleepms.sh.inc" +[ $? -ne 0 ] && error "Failed to load '$pluginsd/loopsleepms.sh.inc'." + +# ----------------------------------------------------------------------------- +# load my configuration + +for myconfig in "${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}.conf" "${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf" +do + if [ -f "$myconfig" ] + then + source "$myconfig" + if [ $? -ne 0 ] + then + error "Config file '$myconfig' loaded with errors." + else + info "Configuration file '$myconfig' loaded." + fi + else + warning "Configuration file '$myconfig' not found." + fi +done + +# make sure time_divisor is right +time_divisor=$((time_divisor)) +[ $time_divisor -lt 10 ] && time_divisor=10 +[ $time_divisor -gt 100 ] && time_divisor=100 + + +# we check for the timeout command, after we load our +# configuration, so that the user may overwrite the +# timeout command we use, providing a function that +# can emulate the timeout command we need: +# > timeout SECONDS command ... +if [ $check_for_timeout -eq 1 ] + then + require_cmd timeout || exit 1 +fi + +# ----------------------------------------------------------------------------- +# internal checks + +# netdata passes the requested update frequency as the first argument +update_every=$(( update_every + 1 - 1)) # makes sure it is a number +test $update_every -eq 0 && update_every=1 # if it is zero, make it 1 + +# check the charts.d directory +[ ! -d "$chartsd" ] && fatal "cannot find charts directory '$chartsd'" + +# ----------------------------------------------------------------------------- +# library functions + +fixid() { + echo "$*" |\ + tr -c "[A-Z][a-z][0-9]" "_" |\ + sed -e "s|^_\+||g" -e "s|_\+$||g" -e "s|_\+|_|g" |\ + tr "[A-Z]" "[a-z]" +} + +run() { + local ret pid="${BASHPID}" t + + if [ "z${1}" = "z-t" -a "${2}" != "0" ] + then + t="${2}" + shift 2 + timeout ${t} "${@}" 2>"${TMP_DIR}/run.${pid}" + ret=$? + else + "${@}" 2>"${TMP_DIR}/run.${pid}" + ret=$? + fi + + if [ ${ret} -ne 0 ] + then + { + printf "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: command '" + printf "%q " "${@}" + printf "' failed with code ${ret}:\n --- BEGIN TRACE ---\n" + cat "${TMP_DIR}/run.${pid}" + printf " --- END TRACE ---\n" + } >&2 + fi + rm "${TMP_DIR}/run.${pid}" + + return ${ret} +} + +# convert any floating point number +# to integer, give a multiplier +# the result is stored in ${FLOAT2INT_RESULT} +# so that no fork is necessary +# the multiplier must be a power of 10 +float2int() { + local f m="$2" a b l v=($1) + f=${v[0]} + + # the length of the multiplier - 1 + l=$(( ${#m} - 1 )) + + # check if the number is in scientific notation + if [[ ${f} =~ ^[[:space:]]*(-)?[0-9.]+(e|E)(\+|-)[0-9]+ ]] + then + # convert it to decimal + # unfortunately, this fork cannot be avoided + # if you know of a way to avoid it, please let me know + f=$(printf "%0.${l}f" ${f}) + fi + + # split the floating point number + # in integer (a) and decimal (b) + a=${f/.*/} + b=${f/*./} + + # if the integer part is missing + # set it to zero + [ -z "${a}" ] && a="0" + + # strip leading zeros from the integer part + # base 10 convertion + a=$((10#$a)) + + # check the length of the decimal part + # against the length of the multiplier + if [ ${#b} -gt ${l} ] + then + # too many digits - take the most significant + b=${b:0:${l}} + + elif [ ${#b} -lt ${l} ] + then + # too few digits - pad with zero on the right + local z="00000000000000000000000" r=$((l - ${#b})) + b="${b}${z:0:${r}}" + fi + + # strip leading zeros from the decimal part + # base 10 convertion + b=$((10#$b)) + + # store the result + FLOAT2INT_RESULT=$(( (a * m) + b )) +} + + +# ----------------------------------------------------------------------------- +# charts check functions + +all_charts() { + cd "$chartsd" + [ $? -ne 0 ] && error "cannot cd to $chartsd" && return 1 + + ls *.chart.sh | sed "s/\.chart\.sh$//g" +} + +declare -A charts_enable_keyword=( + ['apache']="force" + ['cpu_apps']="force" + ['cpufreq']="force" + ['example']="force" + ['exim']="force" + ['hddtemp']="force" + ['load_average']="force" + ['mem_apps']="force" + ['mysql']="force" + ['nginx']="force" + ['phpfpm']="force" + ['postfix']="force" + ['sensors']="force" + ['squid']="force" + ['tomcat']="force" + ) + +all_enabled_charts() { + local charts= enabled= required= + + # find all enabled charts + + for chart in $( all_charts ) + do + MODULE_NAME="${chart}" + + eval "enabled=\$$chart" + if [ -z "${enabled}" ] + then + enabled="${enable_all_charts}" + fi + + required="${charts_enable_keyword[${chart}]}" + [ -z "${required}" ] && required="yes" + + if [ ! "${enabled}" = "${required}" ] + then + info "is disabled. Add a line with $chart=$required in '${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf' to enable it (or remove the line that disables it)." + else + debug "is enabled for auto-detection." + local charts="$charts $chart" + fi + done + MODULE_NAME="main" + + local charts2= + for chart in $charts + do + MODULE_NAME="${chart}" + + # check the enabled charts + local check="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_check()" )" + if [ -z "$check" ] + then + error "module '$chart' does not seem to have a $chart$charts_check() function. Disabling it." + continue + fi + + local create="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_create()" )" + if [ -z "$create" ] + then + error "module '$chart' does not seem to have a $chart$charts_create() function. Disabling it." + continue + fi + + local update="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_update()" )" + if [ -z "$update" ] + then + error "module '$chart' does not seem to have a $chart$charts_update() function. Disabling it." + continue + fi + + # check its config + #if [ -f "$userconfd/$chart.conf" ] + #then + # if [ ! -z "$( cat "$userconfd/$chart.conf" | sed "s/^ \+//g" | grep -v "^$" | grep -v "^#" | grep -v "^$chart$charts_undescore" )" ] + # then + # error "module's $chart config $userconfd/$chart.conf should only have lines starting with $chart$charts_undescore . Disabling it." + # continue + # fi + #fi + + #if [ $dryrunner -eq 1 ] + # then + # "$pluginsd/charts.d.dryrun-helper.sh" "$chart" "$chartsd/$chart.chart.sh" "$userconfd/$chart.conf" >/dev/null + # if [ $? -ne 0 ] + # then + # error "module's $chart did not pass the dry run check. This means it uses global variables not starting with $chart. Disabling it." + # continue + # fi + #fi + + local charts2="$charts2 $chart" + done + MODULE_NAME="main" + + echo $charts2 + debug "enabled charts: $charts2" +} + +# ----------------------------------------------------------------------------- +# load the charts + +suffix_retries="_retries" +suffix_update_every="_update_every" +active_charts= +for chart in $( all_enabled_charts ) +do + MODULE_NAME="${chart}" + + debug "loading module: '$chartsd/$chart.chart.sh'" + + source "$chartsd/$chart.chart.sh" + [ $? -ne 0 ] && warning "Module '$chartsd/$chart.chart.sh' loaded with errors." + + # first load the stock config + if [ -f "$stockconfd/$chart.conf" ] + then + debug "loading module configuration: '$stockconfd/$chart.conf'" + source "$stockconfd/$chart.conf" + [ $? -ne 0 ] && warning "Config file '$stockconfd/$chart.conf' loaded with errors." + else + debug "not found module configuration: '$stockconfd/$chart.conf'" + fi + + # then load the user config (it overwrites the stock) + if [ -f "$userconfd/$chart.conf" ] + then + debug "loading module configuration: '$userconfd/$chart.conf'" + source "$userconfd/$chart.conf" + [ $? -ne 0 ] && warning "Config file '$userconfd/$chart.conf' loaded with errors." + else + debug "not found module configuration: '$userconfd/$chart.conf'" + + if [ -f "$olduserconfd/$chart.conf" ] + then + # support for very old netdata that had the charts.d module configs in /etc/netdata + info "loading module configuration from obsolete location: '$olduserconfd/$chart.conf'" + source "$olduserconfd/$chart.conf" + [ $? -ne 0 ] && warning "Config file '$olduserconfd/$chart.conf' loaded with errors." + fi + fi + + eval "dt=\$$chart$suffix_update_every" + dt=$(( dt + 1 - 1 )) # make sure it is a number + if [ $dt -lt $update_every ] + then + eval "$chart$suffix_update_every=$update_every" + fi + + $chart$charts_check + if [ $? -eq 0 ] + then + debug "module '$chart' activated" + active_charts="$active_charts $chart" + else + error "module's '$chart' check() function reports failure." + fi +done +MODULE_NAME="main" +debug "activated modules: $active_charts" + + +# ----------------------------------------------------------------------------- +# check overwrites + +# enable work time reporting +debug_time= +test $debug -eq 1 && debug_time=tellwork + +# if we only need a specific chart, remove all the others +if [ ! -z "${chart_only}" ] +then + debug "requested to run only for: '${chart_only}'" + check_charts= + for chart in $active_charts + do + if [ "$chart" = "$chart_only" ] + then + check_charts="$chart" + break + fi + done + active_charts="$check_charts" +fi +debug "activated charts: $active_charts" + +# stop if we just need a pre-check +if [ $check -eq 1 ] +then + info "CHECK RESULT" + info "Will run the charts: $active_charts" + exit 0 +fi + +# ----------------------------------------------------------------------------- + +cd "${TMP_DIR}" || exit 1 + +# ----------------------------------------------------------------------------- +# create charts + +run_charts= +for chart in $active_charts +do + MODULE_NAME="${chart}" + + debug "calling '$chart$charts_create()'..." + $chart$charts_create + if [ $? -eq 0 ] + then + run_charts="$run_charts $chart" + debug "'$chart' initialized." + else + error "module's '$chart' function '$chart$charts_create()' reports failure." + fi +done +MODULE_NAME="main" +debug "run_charts='$run_charts'" + + +# ----------------------------------------------------------------------------- +# update dimensions + +[ -z "$run_charts" ] && fatal "No charts to collect data from." + +declare -A charts_last_update=() charts_update_every=() charts_retries=() charts_next_update=() charts_run_counter=() charts_serial_failures=() +global_update() { + local exit_at \ + c=0 dt ret last_ms exec_start_ms exec_end_ms \ + chart now_charts=() next_charts=($run_charts) \ + next_ms x seconds millis + + # return the current time in ms in $now_ms + ${current_time_ms} + + exit_at=$(( now_ms + (restart_timeout * 1000) )) + + for chart in $run_charts + do + eval "charts_update_every[$chart]=\$$chart$suffix_update_every" + test -z "${charts_update_every[$chart]}" && charts_update_every[$chart]=$update_every + + eval "charts_retries[$chart]=\$$chart$suffix_retries" + test -z "${charts_retries[$chart]}" && charts_retries[$chart]=10 + + charts_last_update[$chart]=$((now_ms - (now_ms % (charts_update_every[$chart] * 1000) ) )) + charts_next_update[$chart]=$(( charts_last_update[$chart] + (charts_update_every[$chart] * 1000) )) + charts_run_counter[$chart]=0 + charts_serial_failures[$chart]=0 + + echo "CHART netdata.plugin_chartsd_$chart '' 'Execution time for $chart plugin' 'milliseconds / run' charts.d netdata.plugin_charts area 145000 ${charts_update_every[$chart]}" + echo "DIMENSION run_time 'run time' absolute 1 1" + done + + # the main loop + while [ "${#next_charts[@]}" -gt 0 ] + do + c=$((c + 1)) + now_charts=("${next_charts[@]}") + next_charts=() + + # return the current time in ms in $now_ms + ${current_time_ms} + + for chart in "${now_charts[@]}" + do + MODULE_NAME="${chart}" + + if [ ${now_ms} -ge ${charts_next_update[$chart]} ] + then + last_ms=${charts_last_update[$chart]} + dt=$(( (now_ms - last_ms) )) + + charts_last_update[$chart]=${now_ms} + + while [ ${charts_next_update[$chart]} -lt ${now_ms} ] + do + charts_next_update[$chart]=$(( charts_next_update[$chart] + (charts_update_every[$chart] * 1000) )) + done + + # the first call should not give a duration + # so that netdata calibrates to current time + dt=$(( dt * 1000 )) + charts_run_counter[$chart]=$(( charts_run_counter[$chart] + 1 )) + if [ ${charts_run_counter[$chart]} -eq 1 ] + then + dt= + fi + + exec_start_ms=$now_ms + $chart$charts_update $dt + ret=$? + + # return the current time in ms in $now_ms + ${current_time_ms}; exec_end_ms=$now_ms + + echo "BEGIN netdata.plugin_chartsd_$chart $dt" + echo "SET run_time = $(( exec_end_ms - exec_start_ms ))" + echo "END" + + if [ $ret -eq 0 ] + then + charts_serial_failures[$chart]=0 + next_charts+=($chart) + else + charts_serial_failures[$chart]=$(( charts_serial_failures[$chart] + 1 )) + + if [ ${charts_serial_failures[$chart]} -gt ${charts_retries[$chart]} ] + then + error "module's '$chart' update() function reported failure ${charts_serial_failures[$chart]} times. Disabling it." + else + error "module's '$chart' update() function reports failure. Will keep trying for a while." + next_charts+=($chart) + fi + fi + else + next_charts+=($chart) + fi + done + MODULE_NAME="${chart}" + + # wait the time you are required to + next_ms=$((now_ms + (update_every * 1000 * 100) )) + for x in "${charts_next_update[@]}"; do [ ${x} -lt ${next_ms} ] && next_ms=${x}; done + next_ms=$((next_ms - now_ms)) + + if [ ${LOOPSLEEPMS_HIGHRES} -eq 1 -a ${next_ms} -gt 0 ] + then + next_ms=$(( next_ms + current_time_ms_accuracy )) + seconds=$(( next_ms / 1000 )) + millis=$(( next_ms % 1000 )) + if [ ${millis} -lt 10 ] + then + millis="00${millis}" + elif [ ${millis} -lt 100 ] + then + millis="0${millis}" + fi + + debug "sleeping for ${seconds}.${millis} seconds." + ${mysleep} ${seconds}.${millis} + else + debug "sleeping for ${update_every} seconds." + ${mysleep} $update_every + fi + + test ${now_ms} -ge ${exit_at} && exit 0 + done + + fatal "nothing left to do, exiting..." +} + +global_update diff --git a/collectors/charts.d.plugin/cpu_apps/Makefile.inc b/collectors/charts.d.plugin/cpu_apps/Makefile.inc new file mode 100644 index 000000000..a35f82837 --- /dev/null +++ b/collectors/charts.d.plugin/cpu_apps/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_charts_DATA += cpu_apps/cpu_apps.chart.sh +dist_chartsconfig_DATA += cpu_apps/cpu_apps.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += cpu_apps/README.md cpu_apps/Makefile.inc + diff --git a/collectors/charts.d.plugin/cpu_apps/README.md b/collectors/charts.d.plugin/cpu_apps/README.md new file mode 100644 index 000000000..cd8adf0a2 --- /dev/null +++ b/collectors/charts.d.plugin/cpu_apps/README.md @@ -0,0 +1,2 @@ +> THIS MODULE IS OBSOLETE. +> USE APPS.PLUGIN. diff --git a/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh b/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh new file mode 100644 index 000000000..869464afe --- /dev/null +++ b/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh @@ -0,0 +1,72 @@ +# shellcheck shell=bash disable=SC2154,SC1072,SC1073,SC2009,SC2162,SC2006,SC2002,SC2086,SC1117 +# no need for shebang - this file is loaded from charts.d.plugin +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# +# THIS PLUGIN IS OBSOLETE +# USE apps.plugin INSTEAD + +# a space separated list of command to monitor +cpu_apps_apps= + +# these are required for computing memory in bytes and cpu in seconds +#cpu_apps_pagesize="`getconf PAGESIZE`" +cpu_apps_clockticks="$(getconf CLK_TCK)" + +cpu_apps_update_every=60 + +cpu_apps_check() { + # this should return: + # - 0 to enable the chart + # - 1 to disable the chart + + if [ -z "$cpu_apps_apps" ] + then + error "manual configuration required: please set cpu_apps_apps='command1 command2 ...' in $confd/cpu_apps_apps.conf" + return 1 + fi + return 0 +} + +cpu_apps_bc_finalze= + +cpu_apps_create() { + + echo "CHART chartsd_apps.cpu '' 'Apps CPU' 'milliseconds / $cpu_apps_update_every sec' apps apps stacked 20001 $cpu_apps_update_every" + + local x= + for x in $cpu_apps_apps + do + echo "DIMENSION $x $x incremental 1000 $cpu_apps_clockticks" + + # this string is needed later in the update() function + # to finalize the instructions for the bc command + cpu_apps_bc_finalze="$cpu_apps_bc_finalze \"SET $x = \"; $x;" + done + return 0 +} + +cpu_apps_update() { + # do all the work to collect / calculate the values + # for each dimension + # remember: KEEP IT SIMPLE AND SHORT + + echo "BEGIN chartsd_apps.cpu" + ps -o pid,comm -C "$cpu_apps_apps" |\ + grep -v "COMMAND" |\ + ( + while read pid name + do + echo "$name+=`cat /proc/$pid/stat | cut -d ' ' -f 14-15`" + done + ) |\ + ( sed -e "s/ \+/ /g" -e "s/ /+/g"; + echo "$cpu_apps_bc_finalze" + ) | bc + echo "END" + + return 0 +} diff --git a/collectors/charts.d.plugin/cpu_apps/cpu_apps.conf b/collectors/charts.d.plugin/cpu_apps/cpu_apps.conf new file mode 100644 index 000000000..850cd0c6f --- /dev/null +++ b/collectors/charts.d.plugin/cpu_apps/cpu_apps.conf @@ -0,0 +1,19 @@ +# no need for shebang - this file is loaded from charts.d.plugin + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2018 Costa Tsaousis +# GPL v3+ + +# THIS PLUGIN IS DEPRECATED +# app.plugin can do better + +#cpu_apps_apps= + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#cpu_apps_update_every=2 + +# the number of retries to do in case of failure +# before disabling the module +#cpu_apps_retries=10 diff --git a/collectors/charts.d.plugin/cpufreq/Makefile.inc b/collectors/charts.d.plugin/cpufreq/Makefile.inc new file mode 100644 index 000000000..682379133 --- /dev/null +++ b/collectors/charts.d.plugin/cpufreq/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_charts_DATA += cpufreq/cpufreq.chart.sh +dist_chartsconfig_DATA += cpufreq/cpufreq.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += cpufreq/README.md cpufreq/Makefile.inc + diff --git a/collectors/charts.d.plugin/cpufreq/README.md b/collectors/charts.d.plugin/cpufreq/README.md new file mode 100644 index 000000000..d82951aac --- /dev/null +++ b/collectors/charts.d.plugin/cpufreq/README.md @@ -0,0 +1,2 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT diff --git a/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh b/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh new file mode 100644 index 000000000..1fc6caabf --- /dev/null +++ b/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh @@ -0,0 +1,90 @@ +# shellcheck shell=bash +# no need for shebang - this file is loaded from charts.d.plugin +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# + +# if this chart is called X.chart.sh, then all functions and global variables +# must start with X_ + +cpufreq_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices" +cpufreq_sys_depth=10 +cpufreq_source_update=1 + +# _update_every is a special variable - it holds the number of seconds +# between the calls of the _update() function +cpufreq_update_every= +cpufreq_priority=10000 + +cpufreq_find_all_files() { + find "$1" -maxdepth $cpufreq_sys_depth -name scaling_cur_freq 2>/dev/null +} + +# _check is called once, to find out if this chart should be enabled or not +cpufreq_check() { + + # this should return: + # - 0 to enable the chart + # - 1 to disable the chart + + [ -z "$( cpufreq_find_all_files "$cpufreq_sys_dir" )" ] && return 1 + return 0 +} + +# _create is called once, to create the charts +cpufreq_create() { + local dir file id i + + # we create a script with the source of the + # cpufreq_update() function + # - the highest speed we can achieve - + [ $cpufreq_source_update -eq 1 ] && echo >"$TMP_DIR/cpufreq.sh" "cpufreq_update() {" + + echo "CHART cpu.cpufreq '' 'CPU Clock' 'MHz' 'cpufreq' '' line $((cpufreq_priority + 1)) $cpufreq_update_every" + echo >>"$TMP_DIR/cpufreq.sh" "echo \"BEGIN cpu.cpufreq \$1\"" + + i=0 + for file in $( cpufreq_find_all_files "$cpufreq_sys_dir" | sort -u ) + do + i=$(( i + 1 )) + dir=$( dirname "$file" ) + cpu= + + [ -f "$dir/affected_cpus" ] && cpu=$( cat "$dir/affected_cpus" ) + [ -z "$cpu" ] && cpu="$i.a" + + id="$( fixid "cpu$cpu" )" + + debug "file='$file', dir='$dir', cpu='$cpu', id='$id'" + + echo "DIMENSION $id '$id' absolute 1 1000" + echo >>"$TMP_DIR/cpufreq.sh" "echo \"SET $id = \"\$(< $file )" + done + echo >>"$TMP_DIR/cpufreq.sh" "echo END" + + [ $cpufreq_source_update -eq 1 ] && echo >>"$TMP_DIR/cpufreq.sh" "}" + + # ok, load the function cpufreq_update() we created + # shellcheck disable=SC1090 + [ $cpufreq_source_update -eq 1 ] && . "$TMP_DIR/cpufreq.sh" + + return 0 +} + +# _update is called continuously, to collect the values +cpufreq_update() { + # the first argument to this function is the microseconds since last update + # pass this parameter to the BEGIN statement (see bellow). + + # do all the work to collect / calculate the values + # for each dimension + # remember: KEEP IT SIMPLE AND SHORT + # shellcheck disable=SC1090 + [ $cpufreq_source_update -eq 0 ] && . "$TMP_DIR/cpufreq.sh" "$1" + + return 0 +} + diff --git a/collectors/charts.d.plugin/cpufreq/cpufreq.conf b/collectors/charts.d.plugin/cpufreq/cpufreq.conf new file mode 100644 index 000000000..7130555af --- /dev/null +++ b/collectors/charts.d.plugin/cpufreq/cpufreq.conf @@ -0,0 +1,24 @@ +# no need for shebang - this file is loaded from charts.d.plugin + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2018 Costa Tsaousis +# GPL v3+ + +# THIS PLUGIN IS DEPRECATED +# USE THE PYTHON.D ONE + +#cpufreq_sys_dir="/sys/devices" +#cpufreq_sys_depth=10 +#cpufreq_source_update=1 + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#cpufreq_update_every= + +# the charts priority on the dashboard +#cpufreq_priority=10000 + +# the number of retries to do in case of failure +# before disabling the module +#cpufreq_retries=10 diff --git a/collectors/charts.d.plugin/example/Makefile.inc b/collectors/charts.d.plugin/example/Makefile.inc new file mode 100644 index 000000000..e6838fbbe --- /dev/null +++ b/collectors/charts.d.plugin/example/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_charts_DATA += example/example.chart.sh +dist_chartsconfig_DATA += example/example.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += example/README.md example/Makefile.inc + diff --git a/collectors/charts.d.plugin/example/README.md b/collectors/charts.d.plugin/example/README.md new file mode 100644 index 000000000..bfd5e210a --- /dev/null +++ b/collectors/charts.d.plugin/example/README.md @@ -0,0 +1,2 @@ +This is just an example charts.d data collector. + diff --git a/collectors/charts.d.plugin/example/example.chart.sh b/collectors/charts.d.plugin/example/example.chart.sh new file mode 100644 index 000000000..1562c597a --- /dev/null +++ b/collectors/charts.d.plugin/example/example.chart.sh @@ -0,0 +1,126 @@ +# shellcheck shell=bash +# no need for shebang - this file is loaded from charts.d.plugin +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# + +# if this chart is called X.chart.sh, then all functions and global variables +# must start with X_ + +# _update_every is a special variable - it holds the number of seconds +# between the calls of the _update() function +example_update_every= + +# the priority is used to sort the charts on the dashboard +# 1 = the first chart +example_priority=150000 + +# to enable this chart, you have to set this to 12345 +# (just a demonstration for something that needs to be checked) +example_magic_number= + +# global variables to store our collected data +# remember: they need to start with the module name example_ +example_value1= +example_value2= +example_value3= +example_value4= +example_last=0 +example_count=0 + +example_get() { + # do all the work to collect / calculate the values + # for each dimension + # + # Remember: + # 1. KEEP IT SIMPLE AND SHORT + # 2. AVOID FORKS (avoid piping commands) + # 3. AVOID CALLING TOO MANY EXTERNAL PROGRAMS + # 4. USE LOCAL VARIABLES (global variables may overlap with other modules) + + example_value1=$RANDOM + example_value2=$RANDOM + example_value3=$RANDOM + example_value4=$((8192 + (RANDOM * 16383 / 32767) )) + + if [ $example_count -gt 0 ] + then + example_count=$((example_count - 1)) + + [ $example_last -gt 16383 ] && example_value4=$((example_last + (RANDOM * ( (32767 - example_last) / 2) / 32767))) + [ $example_last -le 16383 ] && example_value4=$((example_last - (RANDOM * (example_last / 2) / 32767))) + else + example_count=$((1 + (RANDOM * 5 / 32767) )) + + if [ $example_last -gt 16383 ] && [ $example_value4 -gt 16383 ] + then + example_value4=$((example_value4 - 16383)) + fi + if [ $example_last -le 16383 ] && [ $example_value4 -lt 16383 ] + then + example_value4=$((example_value4 + 16383)) + fi + fi + example_last=$example_value4 + + # this should return: + # - 0 to send the data to netdata + # - 1 to report a failure to collect the data + + return 0 +} + +# _check is called once, to find out if this chart should be enabled or not +example_check() { + # this should return: + # - 0 to enable the chart + # - 1 to disable the chart + + # check something + [ "${example_magic_number}" != "12345" ] && error "manual configuration required: you have to set example_magic_number=$example_magic_number in example.conf to start example chart." && return 1 + + # check that we can collect data + example_get || return 1 + + return 0 +} + +# _create is called once, to create the charts +example_create() { + # create the chart with 3 dimensions + cat < +# GPL v3+ + +# to enable this chart, you have to set this to 12345 +# (just a demonstration for something that needs to be checked) +#example_magic_number=12345 + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#example_update_every= + +# the charts priority on the dashboard +#example_priority=150000 + +# the number of retries to do in case of failure +# before disabling the module +#example_retries=10 diff --git a/collectors/charts.d.plugin/exim/Makefile.inc b/collectors/charts.d.plugin/exim/Makefile.inc new file mode 100644 index 000000000..ca2112a80 --- /dev/null +++ b/collectors/charts.d.plugin/exim/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_charts_DATA += exim/exim.chart.sh +dist_chartsconfig_DATA += exim/exim.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += exim/README.md exim/Makefile.inc + diff --git a/collectors/charts.d.plugin/exim/README.md b/collectors/charts.d.plugin/exim/README.md new file mode 100644 index 000000000..d82951aac --- /dev/null +++ b/collectors/charts.d.plugin/exim/README.md @@ -0,0 +1,2 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT diff --git a/collectors/charts.d.plugin/exim/exim.chart.sh b/collectors/charts.d.plugin/exim/exim.chart.sh new file mode 100644 index 000000000..8099a7249 --- /dev/null +++ b/collectors/charts.d.plugin/exim/exim.chart.sh @@ -0,0 +1,48 @@ +# shellcheck shell=bash +# no need for shebang - this file is loaded from charts.d.plugin +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# +# Contributed by @jsveiga with PR #480 + +# the exim command to run +exim_command= + +# how frequently to collect queue size +exim_update_every=5 + +exim_priority=60000 + +exim_check() { + if [ -z "${exim_command}" ] + then + require_cmd exim || return 1 + exim_command="${EXIM_CMD}" + fi + + if [ "$(${exim_command} -bpc 2>&1 | grep -c denied)" -ne 0 ] + then + error "permission denied - please set 'queue_list_requires_admin = false' in your exim options file" + return 1 + fi + + return 0 +} + +exim_create() { + cat < +# GPL v3+ + +# THIS PLUGIN IS DEPRECATED +# USE THE PYTHON.D ONE + +# the exim command to run +# if empty, it will use the one found in the system path +#exim_command= + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#exim_update_every=5 + +# the charts priority on the dashboard +#exim_priority=60000 + +# the number of retries to do in case of failure +# before disabling the module +#exim_retries=10 diff --git a/collectors/charts.d.plugin/hddtemp/Makefile.inc b/collectors/charts.d.plugin/hddtemp/Makefile.inc new file mode 100644 index 000000000..2bd29e5b1 --- /dev/null +++ b/collectors/charts.d.plugin/hddtemp/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_charts_DATA += hddtemp/hddtemp.chart.sh +dist_chartsconfig_DATA += hddtemp/hddtemp.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += hddtemp/README.md hddtemp/Makefile.inc + diff --git a/collectors/charts.d.plugin/hddtemp/README.md b/collectors/charts.d.plugin/hddtemp/README.md new file mode 100644 index 000000000..98f18900c --- /dev/null +++ b/collectors/charts.d.plugin/hddtemp/README.md @@ -0,0 +1,28 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT + +# hddtemp + +The plugin will collect temperatures from disks + +It will create one chart with all active disks + +1. **temperature in Celsius** + +### configuration + +hddtemp needs to be running in daemonized mode + +```sh +# host with daemonized hddtemp +hddtemp_host="localhost" + +# port on which hddtemp is showing data +hddtemp_port="7634" + +# array of included disks +# the default is to include all +hddtemp_disks=() +``` + +--- diff --git a/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh b/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh new file mode 100644 index 000000000..e90310981 --- /dev/null +++ b/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh @@ -0,0 +1,77 @@ +# shellcheck shell=bash +# no need for shebang - this file is loaded from charts.d.plugin +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# +# contributed by @paulfantom with PR #511 + +# if this chart is called X.chart.sh, then all functions and global variables +# must start with X_ +hddtemp_host="localhost" +hddtemp_port="7634" +declare -A hddtemp_disks=() + +# _update_every is a special variable - it holds the number of seconds +# between the calls of the _update() function +hddtemp_update_every=3 +hddtemp_priority=90000 + +# _check is called once, to find out if this chart should be enabled or not +hddtemp_check() { + require_cmd nc || return 1 + run nc $hddtemp_host $hddtemp_port && return 0 || return 1 +} + +# _create is called once, to create the charts +hddtemp_create() { + if [ ${#hddtemp_disks[@]} -eq 0 ]; then + local all + all=$(nc $hddtemp_host $hddtemp_port ) + unset hddtemp_disks + # shellcheck disable=SC2190,SC2207 + hddtemp_disks=( $(grep -Po '/dev/[^|]+' <<< "$all" | cut -c 6-) ) + fi +# local disk_names +# disk_names=(`sed -e 's/||/\n/g;s/^|//' <<< "$all" | cut -d '|' -f2 | tr ' ' '_'`) + + echo "CHART hddtemp.temperature 'disks_temp' 'temperature' 'Celsius' 'Disks temperature' 'hddtemp.temp' line $((hddtemp_priority)) $hddtemp_update_every" + for i in $(seq 0 $((${#hddtemp_disks[@]}-1))); do +# echo "DIMENSION ${hddtemp_disks[i]} ${disk_names[i]} absolute 1 1" + echo "DIMENSION ${hddtemp_disks[$i]} '' absolute 1 1" + done + return 0 +} + +# _update is called continuously, to collect the values +#hddtemp_last=0 +#hddtemp_count=0 +hddtemp_update() { +# local all=( `nc $hddtemp_host $hddtemp_port | sed -e 's/||/\n/g;s/^|//' | cut -d '|' -f3` ) +# local all=( `nc $hddtemp_host $hddtemp_port | awk 'BEGIN { FS="|" };{i=4; while (i <= NF) {print $i+0;i+=5;};}'` ) + OLD_IFS=$IFS + set -f + # shellcheck disable=SC2207 + IFS="|" all=( $(nc $hddtemp_host $hddtemp_port 2>/dev/null) ) + set +f + IFS=$OLD_IFS + + # check if there is some data + if [ -z "${all[3]}" ]; then + return 1 + fi + + # write the result of the work. + echo "BEGIN hddtemp.temperature $1" + end=${#hddtemp_disks[@]} + for ((i=0; i +# GPL v3+ + +# THIS PLUGIN IS DEPRECATED +# USE THE PYTHON.D ONE + +#hddtemp_host="localhost" +#hddtemp_port="7634" + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#hddtemp_update_every=3 + +# the charts priority on the dashboard +#hddtemp_priority=90000 + +# the number of retries to do in case of failure +# before disabling the module +#hddtemp_retries=10 diff --git a/collectors/charts.d.plugin/libreswan/Makefile.inc b/collectors/charts.d.plugin/libreswan/Makefile.inc new file mode 100644 index 000000000..af767d0dd --- /dev/null +++ b/collectors/charts.d.plugin/libreswan/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_charts_DATA += libreswan/libreswan.chart.sh +dist_chartsconfig_DATA += libreswan/libreswan.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += libreswan/README.md libreswan/Makefile.inc + diff --git a/collectors/charts.d.plugin/libreswan/README.md b/collectors/charts.d.plugin/libreswan/README.md new file mode 100644 index 000000000..41026cf72 --- /dev/null +++ b/collectors/charts.d.plugin/libreswan/README.md @@ -0,0 +1,42 @@ +# libreswan + +The plugin will collects bytes-in, bytes-out and uptime for all established libreswan IPSEC tunnels. + +The following charts are created, **per tunnel**: + +1. **Uptime** + + * the uptime of the tunnel + +2. **Traffic** + + * bytes in + * bytes out + +### configuration + +Its config file is `/etc/netdata/charts.d/libreswan.conf`. + +The plugin executes 2 commands to collect all the information it needs: + +```sh +ipsec whack --status +ipsec whack --trafficstatus +``` + +The first command is used to extract the currently established tunnels, their IDs and their names. +The second command is used to extract the current uptime and traffic. + +Most probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied. +The plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics. + +To allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content: + +``` +netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status +netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus +``` + +Make sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path). + +--- diff --git a/collectors/charts.d.plugin/libreswan/libreswan.chart.sh b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh new file mode 100644 index 000000000..6e29f8473 --- /dev/null +++ b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh @@ -0,0 +1,176 @@ +# shellcheck shell=bash disable=SC1117 +# no need for shebang - this file is loaded from charts.d.plugin +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2018 Costa Tsaousis +# + +# _update_every is a special variable - it holds the number of seconds +# between the calls of the _update() function +libreswan_update_every=1 + +# the priority is used to sort the charts on the dashboard +# 1 = the first chart +libreswan_priority=90000 + +# set to 1, to run ipsec with sudo +libreswan_sudo=1 + +# global variables to store our collected data + +# [TUNNELID] = TUNNELNAME +# here we track the *latest* established tunnels +# as detected by: ipsec whack --status +declare -A libreswan_connected_tunnels=() + +# [TUNNELID] = VALUE +# here we track values of all established tunnels (not only the latest) +# as detected by: ipsec whack --trafficstatus +declare -A libreswan_traffic_in=() +declare -A libreswan_traffic_out=() +declare -A libreswan_established_add_time=() + +# [TUNNELNAME] = CHARTID +# here we remember CHARTIDs of all tunnels +# we need this to avoid converting tunnel names to chart IDs on every iteration +declare -A libreswan_tunnel_charts=() + +# run the ipsec command +libreswan_ipsec() { + if [ ${libreswan_sudo} -ne 0 ] + then + sudo -n "${IPSEC_CMD}" "${@}" + return $? + else + "${IPSEC_CMD}" "${@}" + return $? + fi +} + +# fetch latest values - fill the arrays +libreswan_get() { + # do all the work to collect / calculate the values + # for each dimension + + # empty the variables + libreswan_traffic_in=() + libreswan_traffic_out=() + libreswan_established_add_time=() + libreswan_connected_tunnels=() + + # convert the ipsec command output to a shell script + # and source it to get the values + # shellcheck disable=SC1090 + source <( + { + libreswan_ipsec whack --status; + libreswan_ipsec whack --trafficstatus; + } | sed -n \ + -e "s|[0-9]\+ #\([0-9]\+\): \"\(.*\)\".*IPsec SA established.*newest IPSEC.*|libreswan_connected_tunnels[\"\1\"]=\"\2\"|p" \ + -e "s|[0-9]\+ #\([0-9]\+\): \"\(.*\)\",.* add_time=\([0-9]\+\),.* inBytes=\([0-9]\+\),.* outBytes=\([0-9]\+\).*|libreswan_traffic_in[\"\1\"]=\"\4\"; libreswan_traffic_out[\"\1\"]=\"\5\"; libreswan_established_add_time[\"\1\"]=\"\3\";|p" + ) || return 1 + + # check we got some data + [ ${#libreswan_connected_tunnels[@]} -eq 0 ] && return 1 + + return 0 +} + +# _check is called once, to find out if this chart should be enabled or not +libreswan_check() { + # this should return: + # - 0 to enable the chart + # - 1 to disable the chart + + require_cmd ipsec || return 1 + + # make sure it is libreswan + # shellcheck disable=SC2143 + if [ -z "$(ipsec --version | grep -i libreswan)" ] + then + error "ipsec command is not Libreswan. Disabling Libreswan plugin." + return 1 + fi + + # check that we can collect data + libreswan_get || return 1 + + return 0 +} + +# create the charts for an ipsec tunnel +libreswan_create_one() { + local n="${1}" name + + name="${libreswan_connected_tunnels[${n}]}" + + [ ! -z "${libreswan_tunnel_charts[${name}]}" ] && return 0 + + libreswan_tunnel_charts[${name}]="$(fixid "${name}")" + + cat < +# GPL v3+ +# + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#libreswan_update_every=1 + +# the charts priority on the dashboard +#libreswan_priority=90000 + +# the number of retries to do in case of failure +# before disabling the module +#libreswan_retries=10 + +# set to 1, to run ipsec with sudo (the default) +# set to 0, to run ipsec without sudo +#libreswan_sudo=1 + +# TO ALLOW NETDATA RUN ipsec AS ROOT +# CREATE THE FILE: /etc/sudoers.d/netdata +# WITH THESE 2 LINES (uncommented of course): +# +# netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status +# netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus diff --git a/collectors/charts.d.plugin/load_average/Makefile.inc b/collectors/charts.d.plugin/load_average/Makefile.inc new file mode 100644 index 000000000..e5a481bf4 --- /dev/null +++ b/collectors/charts.d.plugin/load_average/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_charts_DATA += load_average/load_average.chart.sh +dist_chartsconfig_DATA += load_average/load_average.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += load_average/README.md load_average/Makefile.inc + diff --git a/collectors/charts.d.plugin/load_average/README.md b/collectors/charts.d.plugin/load_average/README.md new file mode 100644 index 000000000..39d3b8189 --- /dev/null +++ b/collectors/charts.d.plugin/load_average/README.md @@ -0,0 +1,2 @@ +> THIS MODULE IS OBSOLETE. +> THE NETDATA DAEMON COLLECTS LOAD AVERAGE BY ITSELF diff --git a/collectors/charts.d.plugin/load_average/load_average.chart.sh b/collectors/charts.d.plugin/load_average/load_average.chart.sh new file mode 100644 index 000000000..b30cb850f --- /dev/null +++ b/collectors/charts.d.plugin/load_average/load_average.chart.sh @@ -0,0 +1,71 @@ +# shellcheck shell=bash disable=SC2154,SC1072,SC1073,SC2009,SC2162,SC2006,SC2002,SC2086,SC1117 +# no need for shebang - this file is loaded from charts.d.plugin +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# + +load_average_update_every=5 +load_priority=100 + +# this is an example charts.d collector +# it is disabled by default. +# there is no point to enable it, since netdata already +# collects this information using its internal plugins. +load_average_enabled=0 + +load_average_check() { + # this should return: + # - 0 to enable the chart + # - 1 to disable the chart + + if [ ${load_average_update_every} -lt 5 ] + then + # there is no meaning for shorter than 5 seconds + # the kernel changes this value every 5 seconds + load_average_update_every=5 + fi + + [ ${load_average_enabled} -eq 0 ] && return 1 + return 0 +} + +load_average_create() { + # create a chart with 3 dimensions +cat < +# GPL v3+ + +# THIS PLUGIN IS DEPRECATED +# netdata can collect this metric already + +#load_average_enabled=0 + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#load_average_update_every=5 + +# the charts priority on the dashboard +#load_average_priority=100 + +# the number of retries to do in case of failure +# before disabling the module +#load_average_retries=10 diff --git a/collectors/charts.d.plugin/loopsleepms.sh.inc b/collectors/charts.d.plugin/loopsleepms.sh.inc new file mode 100644 index 000000000..bdc032b99 --- /dev/null +++ b/collectors/charts.d.plugin/loopsleepms.sh.inc @@ -0,0 +1,237 @@ +# no need for shebang - this file is included from other scripts +# SPDX-License-Identifier: GPL-3.0-or-later + +LOOPSLEEP_DATE="$(which date 2>/dev/null || command -v date 2>/dev/null)" +if [ -z "$LOOPSLEEP_DATE" ] + then + echo >&2 "$0: ERROR: Cannot find the command 'date' in the system path." + exit 1 +fi + +# ----------------------------------------------------------------------------- +# use the date command as a high resolution timer + +now_ms= +LOOPSLEEPMS_HIGHRES=1 +test "$($LOOPSLEEP_DATE +%N)" = "%N" && LOOPSLEEPMS_HIGHRES=0 +test -z "$($LOOPSLEEP_DATE +%N)" && LOOPSLEEPMS_HIGHRES=0 +current_time_ms_from_date() { + if [ $LOOPSLEEPMS_HIGHRES -eq 0 ] + then + now_ms="$($LOOPSLEEP_DATE +'%s')000" + else + now_ms="$(( $( $LOOPSLEEP_DATE +'%s * 1000 + %-N / 1000000' ) ))" + fi +} + +# ----------------------------------------------------------------------------- +# use /proc/uptime as a high resolution timer + +current_time_ms_from_date +current_time_ms_from_uptime_started="${now_ms}" +current_time_ms_from_uptime_last="${now_ms}" +current_time_ms_from_uptime_first=0 +current_time_ms_from_uptime() { + local up rest arr=() n + + read up rest &2 "$0: Cannot read /proc/uptime - falling back to current_time_ms_from_date()." + current_time_ms="current_time_ms_from_date" + current_time_ms_from_date + current_time_ms_accuracy=1 + return + fi + + arr=(${up//./ }) + + if [ ${#arr[1]} -lt 1 ] + then + n="${arr[0]}000" + elif [ ${#arr[1]} -lt 2 ] + then + n="${arr[0]}${arr[1]}00" + elif [ ${#arr[1]} -lt 3 ] + then + n="${arr[0]}${arr[1]}0" + else + n="${arr[0]}${arr[1]}" + fi + + now_ms=$((current_time_ms_from_uptime_started - current_time_ms_from_uptime_first + n)) + + if [ "${now_ms}" -lt "${current_time_ms_from_uptime_last}" ] + then + echo >&2 "$0: Cannot use current_time_ms_from_uptime() - new time ${now_ms} is older than the last ${current_time_ms_from_uptime_last} - falling back to current_time_ms_from_date()." + current_time_ms="current_time_ms_from_date" + current_time_ms_from_date + current_time_ms_accuracy=1 + fi + + current_time_ms_from_uptime_last="${now_ms}" +} +current_time_ms_from_uptime +current_time_ms_from_uptime_first="$((now_ms - current_time_ms_from_uptime_started))" +current_time_ms_from_uptime_last="${current_time_ms_from_uptime_first}" +current_time_ms="current_time_ms_from_uptime" +current_time_ms_accuracy=10 +if [ "${current_time_ms_from_uptime_first}" -eq 0 ] + then + echo >&2 "$0: Invalid setup for current_time_ms_from_uptime() - falling back to current_time_ms_from_date()." + current_time_ms="current_time_ms_from_date" + current_time_ms_accuracy=1 +fi + +# ----------------------------------------------------------------------------- +# use read with timeout for sleep + +mysleep="" + +mysleep_fifo="${NETDATA_CACHE_DIR-/tmp}/.netdata_bash_sleep_timer_fifo" +[ -f "${mysleep_fifo}" ] && rm "${mysleep_fifo}" +[ ! -p "${mysleep_fifo}" ] && mkfifo "${mysleep_fifo}" +[ -p "${mysleep_fifo}" ] && mysleep="mysleep_read" + +mysleep_read() { + read -t "${1}" <>"${mysleep_fifo}" + ret=$? + if [ $ret -le 128 ] + then + echo >&2 "$0: Cannot use read for sleeping (return code ${ret})." + mysleep="sleep" + ${mysleep} "${1}" + fi +} + +# ----------------------------------------------------------------------------- +# use bash loadable module for sleep + +mysleep_builtin() { + builtin sleep "${1}" + ret=$? + if [ $ret -ne 0 ] + then + echo >&2 "$0: Cannot use builtin sleep for sleeping (return code ${ret})." + mysleep="sleep" + ${mysleep} "${1}" + fi +} + +if [ -z "${mysleep}" -a "$((BASH_VERSINFO[0] +0))" -ge 3 -a "${NETDATA_BASH_LOADABLES}" != "DISABLE" ] + then + # enable modules only for bash version 3+ + + for bash_modules_path in ${BASH_LOADABLES_PATH//:/ } "$(pkg-config bash --variable=loadablesdir 2>/dev/null)" "/usr/lib/bash" "/lib/bash" "/lib64/bash" "/usr/local/lib/bash" "/usr/local/lib64/bash" + do + [ -z "${bash_modules_path}" -o ! -d "${bash_modules_path}" ] && continue + + # check for sleep + for bash_module_sleep in "sleep" "sleep.so" + do + if [ -f "${bash_modules_path}/${bash_module_sleep}" ] + then + if enable -f "${bash_modules_path}/${bash_module_sleep}" sleep 2>/dev/null + then + mysleep="mysleep_builtin" + # echo >&2 "$0: Using bash loadable ${bash_modules_path}/${bash_module_sleep} for sleep" + break + fi + fi + + done + + [ ! -z "${mysleep}" ] && break + done +fi + +# ----------------------------------------------------------------------------- +# fallback to external sleep + +[ -z "${mysleep}" ] && mysleep="sleep" + + +# ----------------------------------------------------------------------------- +# this function is used to sleep a fraction of a second +# it calculates the difference between every time is called +# and tries to align the sleep time to give you exactly the +# loop you need. + +LOOPSLEEPMS_LASTRUN=0 +LOOPSLEEPMS_NEXTRUN=0 +LOOPSLEEPMS_LASTSLEEP=0 +LOOPSLEEPMS_LASTWORK=0 + +loopsleepms() { + local tellwork=0 t="${1}" div s m now mstosleep + + if [ "${t}" = "tellwork" ] + then + tellwork=1 + shift + t="${1}" + fi + + # $t = the time in seconds to wait + + # if high resolution is not supported + # just sleep the time requested, in seconds + if [ ${LOOPSLEEPMS_HIGHRES} -eq 0 ] + then + sleep ${t} + return + fi + + # get the current time, in ms in ${now_ms} + ${current_time_ms} + + # calculate ms since last run + [ ${LOOPSLEEPMS_LASTRUN} -gt 0 ] && \ + LOOPSLEEPMS_LASTWORK=$((now_ms - LOOPSLEEPMS_LASTRUN - LOOPSLEEPMS_LASTSLEEP + current_time_ms_accuracy)) + # echo "# last loop's work took $LOOPSLEEPMS_LASTWORK ms" + + # remember this run + LOOPSLEEPMS_LASTRUN=${now_ms} + + # calculate the next run + LOOPSLEEPMS_NEXTRUN=$(( ( now_ms - ( now_ms % ( t * 1000 ) ) ) + ( t * 1000 ) )) + + # calculate ms to sleep + mstosleep=$(( LOOPSLEEPMS_NEXTRUN - now_ms + current_time_ms_accuracy )) + # echo "# mstosleep is $mstosleep ms" + + # if we are too slow, sleep some time + test ${mstosleep} -lt 200 && mstosleep=200 + + s=$(( mstosleep / 1000 )) + m=$(( mstosleep - (s * 1000) )) + [ "${m}" -lt 100 ] && m="0${m}" + [ "${m}" -lt 10 ] && m="0${m}" + + test $tellwork -eq 1 && echo >&2 " >>> PERFORMANCE >>> WORK TOOK ${LOOPSLEEPMS_LASTWORK} ms ( $((LOOPSLEEPMS_LASTWORK * 100 / 1000)).$((LOOPSLEEPMS_LASTWORK % 10))% cpu ) >>> SLEEPING ${mstosleep} ms" + + # echo "# sleeping ${s}.${m}" + # echo + ${mysleep} ${s}.${m} + + # keep the values we need + # for our next run + LOOPSLEEPMS_LASTSLEEP=$mstosleep +} + +# test it +#while [ 1 ] +#do +# r=$(( (RANDOM * 2000 / 32767) )) +# s=$((r / 1000)) +# m=$((r - (s * 1000))) +# [ "${m}" -lt 100 ] && m="0${m}" +# [ "${m}" -lt 10 ] && m="0${m}" +# echo "${r} = ${s}.${m}" +# +# # the work +# ${mysleep} ${s}.${m} +# +# # the alignment loop +# loopsleepms tellwork 1 +#done diff --git a/collectors/charts.d.plugin/mem_apps/Makefile.inc b/collectors/charts.d.plugin/mem_apps/Makefile.inc new file mode 100644 index 000000000..ea546fb69 --- /dev/null +++ b/collectors/charts.d.plugin/mem_apps/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_charts_DATA += mem_apps/mem_apps.chart.sh +dist_chartsconfig_DATA += mem_apps/mem_apps.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += mem_apps/README.md mem_apps/Makefile.inc + diff --git a/collectors/charts.d.plugin/mem_apps/README.md b/collectors/charts.d.plugin/mem_apps/README.md new file mode 100644 index 000000000..cd8adf0a2 --- /dev/null +++ b/collectors/charts.d.plugin/mem_apps/README.md @@ -0,0 +1,2 @@ +> THIS MODULE IS OBSOLETE. +> USE APPS.PLUGIN. diff --git a/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh b/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh new file mode 100644 index 000000000..a13dc71f1 --- /dev/null +++ b/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh @@ -0,0 +1,63 @@ +# shellcheck shell=bash disable=SC2154,SC1072,SC1073,SC2009,SC2162,SC2006,SC2002,SC2086,SC1117 +# no need for shebang - this file is loaded from charts.d.plugin +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# + +mem_apps_apps= + +# these are required for computing memory in bytes and cpu in seconds +#mem_apps_pagesize="`getconf PAGESIZE`" +#mem_apps_clockticks="`getconf CLK_TCK`" + +mem_apps_update_every= + +mem_apps_check() { + # this should return: + # - 0 to enable the chart + # - 1 to disable the chart + + if [ -z "$mem_apps_apps" ] + then + error "manual configuration required: please set mem_apps_apps='command1 command2 ...' in $confd/mem_apps_apps.conf" + return 1 + fi + return 0 +} + +mem_apps_bc_finalze= + +mem_apps_create() { + + echo "CHART chartsd_apps.mem '' 'Apps Memory' MB apps apps.mem stacked 20000 $mem_apps_update_every" + + local x= + for x in $mem_apps_apps + do + echo "DIMENSION $x $x absolute 1 1024" + + # this string is needed later in the update() function + # to finalize the instructions for the bc command + mem_apps_bc_finalze="$mem_apps_bc_finalze \"SET $x = \"; $x;" + done + return 0 +} + +mem_apps_update() { + # do all the work to collect / calculate the values + # for each dimension + # remember: KEEP IT SIMPLE AND SHORT + + echo "BEGIN chartsd_apps.mem" + ps -o comm,rss -C "$mem_apps_apps" |\ + grep -v "^COMMAND" |\ + ( sed -e "s/ \+/ /g" -e "s/ /+=/g"; + echo "$mem_apps_bc_finalze" + ) | bc + echo "END" + + return 0 +} diff --git a/collectors/charts.d.plugin/mem_apps/mem_apps.conf b/collectors/charts.d.plugin/mem_apps/mem_apps.conf new file mode 100644 index 000000000..75d24dc3e --- /dev/null +++ b/collectors/charts.d.plugin/mem_apps/mem_apps.conf @@ -0,0 +1,19 @@ +# no need for shebang - this file is loaded from charts.d.plugin + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2018 Costa Tsaousis +# GPL v3+ + +# THIS PLUGIN IS DEPRECATED +# app.plugin can do better + +#mem_apps_apps= + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#mem_apps_update_every=2 + +# the number of retries to do in case of failure +# before disabling the module +#mem_apps_retries=10 diff --git a/collectors/charts.d.plugin/mysql/Makefile.inc b/collectors/charts.d.plugin/mysql/Makefile.inc new file mode 100644 index 000000000..ca02fd078 --- /dev/null +++ b/collectors/charts.d.plugin/mysql/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_charts_DATA += mysql/mysql.chart.sh +dist_chartsconfig_DATA += mysql/mysql.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += mysql/README.md mysql/Makefile.inc + diff --git a/collectors/charts.d.plugin/mysql/README.md b/collectors/charts.d.plugin/mysql/README.md new file mode 100644 index 000000000..6765b53ab --- /dev/null +++ b/collectors/charts.d.plugin/mysql/README.md @@ -0,0 +1,81 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT + +# mysql + +The plugin will monitor one or more mysql servers + +It will produce the following charts: + +1. **Bandwidth** in kbps + * in + * out + +2. **Queries** in queries/sec + * queries + * questions + * slow queries + +3. **Operations** in operations/sec + * opened tables + * flush + * commit + * delete + * prepare + * read first + * read key + * read next + * read prev + * read random + * read random next + * rollback + * save point + * update + * write + +4. **Table Locks** in locks/sec + * immediate + * waited + +5. **Select Issues** in issues/sec + * full join + * full range join + * range + * range check + * scan + +6. **Sort Issues** in issues/sec + * merge passes + * range + * scan + +### configuration + +You can configure many database servers, like this: + +You can provide, per server, the following: + +1. a name, anything you like, but keep it short +2. the mysql command to connect to the server +3. the mysql command line options to be used for connecting to the server + +Here is an example for 2 servers: + +```sh +mysql_opts[server1]="-h server1.example.com" +mysql_opts[server2]="-h server2.example.com --connect_timeout 2" +``` + +The above will use the `mysql` command found in the system path. +You can also provide a custom mysql command per server, like this: + +```sh +mysql_cmds[server2]="/opt/mysql/bin/mysql" +``` + +The above sets the mysql command only for server2. server1 will use the system default. + +If no configuration is given, the plugin will attempt to connect to mysql server at localhost. + + +--- diff --git a/collectors/charts.d.plugin/mysql/mysql.chart.sh b/collectors/charts.d.plugin/mysql/mysql.chart.sh new file mode 100644 index 000000000..37e8e2a7c --- /dev/null +++ b/collectors/charts.d.plugin/mysql/mysql.chart.sh @@ -0,0 +1,528 @@ +# shellcheck shell=bash +# no need for shebang - this file is loaded from charts.d.plugin +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# + +# http://dev.mysql.com/doc/refman/5.0/en/server-status-variables.html +# +# https://dev.mysql.com/doc/refman/5.1/en/show-status.html +# SHOW STATUS provides server status information (see Section 5.1.6, “Server Status Variables”). +# This statement does not require any privilege. +# It requires only the ability to connect to the server. + +mysql_update_every=2 +mysql_priority=60000 + +declare -A mysql_cmds=() mysql_opts=() mysql_ids=() mysql_data=() + +mysql_get() { + local arr + local oIFS="${IFS}" + mysql_data=() + IFS=$'\t'$'\n' + #arr=($(run "${@}" -e "SHOW GLOBAL STATUS WHERE value REGEXP '^[0-9]';" | egrep "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)" )) + #arr=($(run "${@}" -N -e "SHOW GLOBAL STATUS;" | egrep "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)[^ ]+\s[0-9]" )) + # shellcheck disable=SC2207 + arr=($(run "${@}" -N -e "SHOW GLOBAL STATUS;" | grep -E "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)[^[:space:]]+[[:space:]]+[0-9]+" )) + IFS="${oIFS}" + + [ "${#arr[@]}" -lt 3 ] && return 1 + local end=${#arr[@]} + for ((i=2;i +# GPL v3+ + +# THIS PLUGIN IS DEPRECATED +# USE THE PYTHON.D ONE + +#mysql_cmds[name]="" +#mysql_opts[name]="" + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#mysql_update_every=2 + +# the charts priority on the dashboard +#mysql_priority=60000 + +# the number of retries to do in case of failure +# before disabling the module +#mysql_retries=10 diff --git a/collectors/charts.d.plugin/nginx/Makefile.inc b/collectors/charts.d.plugin/nginx/Makefile.inc new file mode 100644 index 000000000..c9d31aada --- /dev/null +++ b/collectors/charts.d.plugin/nginx/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_charts_DATA += nginx/nginx.chart.sh +dist_chartsconfig_DATA += nginx/nginx.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += nginx/README.md nginx/Makefile.inc + diff --git a/collectors/charts.d.plugin/nginx/README.md b/collectors/charts.d.plugin/nginx/README.md new file mode 100644 index 000000000..d82951aac --- /dev/null +++ b/collectors/charts.d.plugin/nginx/README.md @@ -0,0 +1,2 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT diff --git a/collectors/charts.d.plugin/nginx/nginx.chart.sh b/collectors/charts.d.plugin/nginx/nginx.chart.sh new file mode 100644 index 000000000..14dda0832 --- /dev/null +++ b/collectors/charts.d.plugin/nginx/nginx.chart.sh @@ -0,0 +1,144 @@ +# shellcheck shell=bash +# no need for shebang - this file is loaded from charts.d.plugin +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# + +# if this chart is called X.chart.sh, then all functions and global variables +# must start with X_ + +nginx_url="http://127.0.0.1:80/stub_status" +nginx_curl_opts="" + +# _update_every is a special variable - it holds the number of seconds +# between the calls of the _update() function +nginx_update_every= +nginx_priority=60000 + +declare -a nginx_response=() +nginx_active_connections=0 +nginx_accepts=0 +nginx_handled=0 +nginx_requests=0 +nginx_reading=0 +nginx_writing=0 +nginx_waiting=0 +nginx_get() { + # shellcheck disable=SC2207 + nginx_response=($(run curl -Ss ${nginx_curl_opts} "${nginx_url}")) + # shellcheck disable=SC2181 + if [ $? -ne 0 ] || [ "${#nginx_response[@]}" -eq 0 ]; then return 1; fi + + if [ "${nginx_response[0]}" != "Active" ] ||\ + [ "${nginx_response[1]}" != "connections:" ] ||\ + [ "${nginx_response[3]}" != "server" ] ||\ + [ "${nginx_response[4]}" != "accepts" ] ||\ + [ "${nginx_response[5]}" != "handled" ] ||\ + [ "${nginx_response[6]}" != "requests" ] ||\ + [ "${nginx_response[10]}" != "Reading:" ] ||\ + [ "${nginx_response[12]}" != "Writing:" ] ||\ + [ "${nginx_response[14]}" != "Waiting:" ] + then + error "Invalid response from nginx server: ${nginx_response[*]}" + return 1 + fi + + nginx_active_connections="${nginx_response[2]}" + nginx_accepts="${nginx_response[7]}" + nginx_handled="${nginx_response[8]}" + nginx_requests="${nginx_response[9]}" + nginx_reading="${nginx_response[11]}" + nginx_writing="${nginx_response[13]}" + nginx_waiting="${nginx_response[15]}" + + if [ -z "${nginx_active_connections}" ] ||\ + [ -z "${nginx_accepts}" ] ||\ + [ -z "${nginx_handled}" ] ||\ + [ -z "${nginx_requests}" ] ||\ + [ -z "${nginx_reading}" ] ||\ + [ -z "${nginx_writing}" ] ||\ + [ -z "${nginx_waiting}" ] + then + error "empty values got from nginx server: ${nginx_response[*]}" + return 1 + fi + + return 0 +} + +# _check is called once, to find out if this chart should be enabled or not +nginx_check() { + + nginx_get + # shellcheck disable=2181 + if [ $? -ne 0 ] + then + # shellcheck disable=SC2154 + error "cannot find stub_status on URL '${nginx_url}'. Please set nginx_url='http://nginx.server/stub_status' in $confd/nginx.conf" + return 1 + fi + + # this should return: + # - 0 to enable the chart + # - 1 to disable the chart + + return 0 +} + +# _create is called once, to create the charts +nginx_create() { + cat < +# GPL v3+ + +# THIS PLUGIN IS DEPRECATED +# USE THE PYTHON.D ONE + +#nginx_url="http://127.0.0.1:80/stub_status" +#nginx_curl_opts="" + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#nginx_update_every= + +# the charts priority on the dashboard +#nginx_priority=60000 + +# the number of retries to do in case of failure +# before disabling the module +#nginx_retries=10 diff --git a/collectors/charts.d.plugin/nut/Makefile.inc b/collectors/charts.d.plugin/nut/Makefile.inc new file mode 100644 index 000000000..4fb47145d --- /dev/null +++ b/collectors/charts.d.plugin/nut/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_charts_DATA += nut/nut.chart.sh +dist_chartsconfig_DATA += nut/nut.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += nut/README.md nut/Makefile.inc + diff --git a/collectors/charts.d.plugin/nut/README.md b/collectors/charts.d.plugin/nut/README.md new file mode 100644 index 000000000..71906f55a --- /dev/null +++ b/collectors/charts.d.plugin/nut/README.md @@ -0,0 +1,59 @@ +# nut + +The plugin will collect UPS data for all UPSes configured in the system. + +The following charts will be created: + +1. **UPS Charge** + + * percentage changed + +2. **UPS Battery Voltage** + + * current voltage + * high voltage + * low voltage + * nominal voltage + +3. **UPS Input Voltage** + + * current voltage + * fault voltage + * nominal voltage + +4. **UPS Input Current** + + * nominal current + +5. **UPS Input Frequency** + + * current frequency + * nominal frequency + +6. **UPS Output Voltage** + + * current voltage + +7. **UPS Load** + + * current load + +8. **UPS Temperature** + + * current temperature + + +### configuration + +This is the internal default for `/etc/netdata/nut.conf` + +```sh +# a space separated list of UPS names +# if empty, the list returned by 'upsc -l' will be used +nut_ups= + +# how frequently to collect UPS data +nut_update_every=2 +``` + +--- diff --git a/collectors/charts.d.plugin/nut/nut.chart.sh b/collectors/charts.d.plugin/nut/nut.chart.sh new file mode 100644 index 000000000..7e252f325 --- /dev/null +++ b/collectors/charts.d.plugin/nut/nut.chart.sh @@ -0,0 +1,241 @@ +# shellcheck shell=bash +# no need for shebang - this file is loaded from charts.d.plugin +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016-2017 Costa Tsaousis +# + +# a space separated list of UPS names +# if empty, the list returned by 'upsc -l' will be used +nut_ups= + +# how frequently to collect UPS data +nut_update_every=2 + +# how much time in seconds, to wait for nut to respond +nut_timeout=2 + +# set this to 1, to enable another chart showing the number +# of UPS clients connected to upsd +nut_clients_chart=0 + +# the priority of nut related to other charts +nut_priority=90000 + +declare -A nut_ids=() +declare -A nut_names=() + +nut_get_all() { + run -t $nut_timeout upsc -l +} + +nut_get() { + run -t $nut_timeout upsc "$1" + + if [ "${nut_clients_chart}" -eq "1" ] + then + printf "ups.connected_clients: " + run -t $nut_timeout upsc -c "$1" | wc -l + fi +} + +nut_check() { + + # this should return: + # - 0 to enable the chart + # - 1 to disable the chart + + local x + + require_cmd upsc || return 1 + + [ -z "$nut_ups" ] && nut_ups="$( nut_get_all )" + + for x in $nut_ups + do + nut_get "$x" >/dev/null + # shellcheck disable=SC2181 + if [ $? -eq 0 ] + then + if [ ! -z "${nut_names[${x}]}" ] + then + nut_ids[$x]="$( fixid "${nut_names[${x}]}" )" + else + nut_ids[$x]="$( fixid "$x" )" + fi + continue + fi + error "cannot get information for NUT UPS '$x'." + done + + if [ ${#nut_ids[@]} -eq 0 ] + then + # shellcheck disable=SC2154 + error "Cannot find UPSes - please set nut_ups='ups_name' in $confd/nut.conf" + return 1 + fi + + return 0 +} + +nut_create() { + # create the charts + local x + + for x in "${nut_ids[@]}" + do + cat < +# GPL v3+ + +# a space separated list of UPS names +# if empty, the list returned by 'upsc -l' will be used +#nut_ups= + +# each line represents an alias for one UPS +# if empty, the FQDN will be used +#nut_names["FQDN1"]="alias" +#nut_names["FQDN2"]="alias" + +# how much time in seconds, to wait for nut to respond +#nut_timeout=2 + +# set this to 1, to enable another chart showing the number +# of UPS clients connected to upsd +#nut_clients_chart=1 + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#nut_update_every=2 + +# the charts priority on the dashboard +#nut_priority=90000 + +# the number of retries to do in case of failure +# before disabling the module +#nut_retries=10 diff --git a/collectors/charts.d.plugin/opensips/Makefile.inc b/collectors/charts.d.plugin/opensips/Makefile.inc new file mode 100644 index 000000000..a7b5d3a92 --- /dev/null +++ b/collectors/charts.d.plugin/opensips/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_charts_DATA += opensips/opensips.chart.sh +dist_chartsconfig_DATA += opensips/opensips.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += opensips/README.md opensips/Makefile.inc + diff --git a/collectors/charts.d.plugin/opensips/README.md b/collectors/charts.d.plugin/opensips/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/collectors/charts.d.plugin/opensips/opensips.chart.sh b/collectors/charts.d.plugin/opensips/opensips.chart.sh new file mode 100644 index 000000000..c227bd4f2 --- /dev/null +++ b/collectors/charts.d.plugin/opensips/opensips.chart.sh @@ -0,0 +1,326 @@ +# shellcheck shell=bash disable=SC1117,SC2154,SC2086 +# no need for shebang - this file is loaded from charts.d.plugin +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# + +opensips_opts="fifo get_statistics all" +opensips_cmd= +opensips_update_every=5 +opensips_timeout=2 +opensips_priority=80000 + +opensips_get_stats() { + run -t $opensips_timeout "$opensips_cmd" $opensips_opts |\ + grep "^\(core\|dialog\|net\|registrar\|shmem\|siptrace\|sl\|tm\|uri\|usrloc\):[a-zA-Z0-9_-]\+[[:space:]]*[=:]\+[[:space:]]*[0-9]\+[[:space:]]*$" |\ + sed \ + -e "s|[[:space:]]*[=:]\+[[:space:]]*\([0-9]\+\)[[:space:]]*$|=\1|g" \ + -e "s|[[:space:]:-]\+|_|g" \ + -e "s|^|opensips_|g" + + local ret=$? + [ $ret -ne 0 ] && echo "opensips_command_failed=1" + return $ret +} + +opensips_check() { + # if the user did not provide an opensips_cmd + # try to find it in the system + if [ -z "$opensips_cmd" ] + then + require_cmd opensipsctl || return 1 + fi + + # check once if the command works + local x + x="$(opensips_get_stats | grep "^opensips_core_")" + # shellcheck disable=SC2181 + if [ ! $? -eq 0 ] || [ -z "$x" ] + then + error "cannot get global status. Please set opensips_opts='options' whatever needed to get connected to opensips server, in $confd/opensips.conf" + return 1 + fi + + return 0 +} + +opensips_create() { + # create the charts + cat < +# GPL v3+ + +#opensips_opts="fifo get_statistics all" +#opensips_cmd= +#opensips_timeout=2 + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#opensips_update_every=5 + +# the charts priority on the dashboard +#opensips_priority=80000 + +# the number of retries to do in case of failure +# before disabling the module +#opensips_retries=10 diff --git a/collectors/charts.d.plugin/phpfpm/Makefile.inc b/collectors/charts.d.plugin/phpfpm/Makefile.inc new file mode 100644 index 000000000..56bff6102 --- /dev/null +++ b/collectors/charts.d.plugin/phpfpm/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_charts_DATA += phpfpm/phpfpm.chart.sh +dist_chartsconfig_DATA += phpfpm/phpfpm.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += phpfpm/README.md phpfpm/Makefile.inc + diff --git a/collectors/charts.d.plugin/phpfpm/README.md b/collectors/charts.d.plugin/phpfpm/README.md new file mode 100644 index 000000000..d82951aac --- /dev/null +++ b/collectors/charts.d.plugin/phpfpm/README.md @@ -0,0 +1,2 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT diff --git a/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh b/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh new file mode 100644 index 000000000..1af7910bc --- /dev/null +++ b/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh @@ -0,0 +1,198 @@ +# shellcheck shell=bash +# no need for shebang - this file is loaded from charts.d.plugin +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# +# Contributed by @safeie with PR #276 + +# first, you need open php-fpm status in php-fpm.conf +# second, you need add status location in nginx.conf +# you can see, https://easyengine.io/tutorials/php/fpm-status-page/ + +declare -A phpfpm_urls=() +declare -A phpfpm_curl_opts=() + +# _update_every is a special variable - it holds the number of seconds +# between the calls of the _update() function +phpfpm_update_every= +phpfpm_priority=60000 + +declare -a phpfpm_response=() +phpfpm_pool="" +phpfpm_start_time="" +phpfpm_start_since=0 +phpfpm_accepted_conn=0 +phpfpm_listen_queue=0 +phpfpm_max_listen_queue=0 +phpfpm_listen_queue_len=0 +phpfpm_idle_processes=0 +phpfpm_active_processes=0 +phpfpm_total_processes=0 +phpfpm_max_active_processes=0 +phpfpm_max_children_reached=0 +phpfpm_slow_requests=0 +phpfpm_get() { + local opts="${1}" url="${2}" + + # shellcheck disable=SC2207,2086 + phpfpm_response=($(run curl -Ss ${opts} "${url}")) + # shellcheck disable=SC2181 + if [ $? -ne 0 ] || [ "${#phpfpm_response[@]}" -eq 0 ]; then + return 1 + fi + + if [[ "${phpfpm_response[0]}" != "pool:" \ + || "${phpfpm_response[2]}" != "process" \ + || "${phpfpm_response[5]}" != "start" \ + || "${phpfpm_response[12]}" != "accepted" \ + || "${phpfpm_response[15]}" != "listen" \ + || "${phpfpm_response[16]}" != "queue:" \ + || "${phpfpm_response[26]}" != "idle" \ + || "${phpfpm_response[29]}" != "active" \ + || "${phpfpm_response[32]}" != "total" \ + ]] + then + error "invalid response from phpfpm status server: ${phpfpm_response[*]}" + return 1 + fi + + phpfpm_pool="${phpfpm_response[1]}" + phpfpm_start_time="${phpfpm_response[7]} ${phpfpm_response[8]}" + phpfpm_start_since="${phpfpm_response[11]}" + phpfpm_accepted_conn="${phpfpm_response[14]}" + phpfpm_listen_queue="${phpfpm_response[17]}" + phpfpm_max_listen_queue="${phpfpm_response[21]}" + phpfpm_listen_queue_len="${phpfpm_response[25]}" + phpfpm_idle_processes="${phpfpm_response[28]}" + phpfpm_active_processes="${phpfpm_response[31]}" + phpfpm_total_processes="${phpfpm_response[34]}" + phpfpm_max_active_processes="${phpfpm_response[38]}" + phpfpm_max_children_reached="${phpfpm_response[42]}" + if [ "${phpfpm_response[43]}" == "slow" ] + then + phpfpm_slow_requests="${phpfpm_response[45]}" + else + phpfpm_slow_requests="-1" + fi + + if [[ -z "${phpfpm_pool}" \ + || -z "${phpfpm_start_time}" \ + || -z "${phpfpm_start_since}" \ + || -z "${phpfpm_accepted_conn}" \ + || -z "${phpfpm_listen_queue}" \ + || -z "${phpfpm_max_listen_queue}" \ + || -z "${phpfpm_listen_queue_len}" \ + || -z "${phpfpm_idle_processes}" \ + || -z "${phpfpm_active_processes}" \ + || -z "${phpfpm_total_processes}" \ + || -z "${phpfpm_max_active_processes}" \ + || -z "${phpfpm_max_children_reached}" \ + ]] + then + error "empty values got from phpfpm status server: ${phpfpm_response[*]}" + return 1 + fi + + return 0 +} + +# _check is called once, to find out if this chart should be enabled or not +phpfpm_check() { + if [ ${#phpfpm_urls[@]} -eq 0 ]; then + phpfpm_urls[local]="http://localhost/status" + fi + + local m + for m in "${!phpfpm_urls[@]}" + do + phpfpm_get "${phpfpm_curl_opts[$m]}" "${phpfpm_urls[$m]}" + # shellcheck disable=SC2181 + if [ $? -ne 0 ]; then + # shellcheck disable=SC2154 + error "cannot find status on URL '${phpfpm_urls[$m]}'. Please set phpfpm_urls[$m]='http://localhost/status' in $confd/phpfpm.conf" + unset "phpfpm_urls[$m]" + continue + fi + done + + if [ ${#phpfpm_urls[@]} -eq 0 ]; then + error "no phpfpm servers found. Please set phpfpm_urls[name]='url' to whatever needed to get status to the phpfpm server, in $confd/phpfpm.conf" + return 1 + fi + + # this should return: + # - 0 to enable the chart + # - 1 to disable the chart + + return 0 +} + +# _create is called once, to create the charts +phpfpm_create() { + local m + for m in "${!phpfpm_urls[@]}" + do + cat < +# GPL v3+ + +# THIS PLUGIN IS DEPRECATED +# USE THE PYTHON.D ONE + +# first, you need open php-fpm status in php-fpm.conf +# second, you need add status location in nginx.conf +# you can see, https://easyengine.io/tutorials/php/fpm-status-page/ +#phpfpm_urls[name]="" +#phpfpm_curl_opts[name]="" + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#phpfpm_update_every= + +# the charts priority on the dashboard +#phpfpm_priority=60000 + +# the number of retries to do in case of failure +# before disabling the module +#phpfpm_retries=10 + diff --git a/collectors/charts.d.plugin/postfix/Makefile.inc b/collectors/charts.d.plugin/postfix/Makefile.inc new file mode 100644 index 000000000..6e148352d --- /dev/null +++ b/collectors/charts.d.plugin/postfix/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_charts_DATA += postfix/postfix.chart.sh +dist_chartsconfig_DATA += postfix/postfix.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += postfix/README.md postfix/Makefile.inc + diff --git a/collectors/charts.d.plugin/postfix/README.md b/collectors/charts.d.plugin/postfix/README.md new file mode 100644 index 000000000..5fc265d56 --- /dev/null +++ b/collectors/charts.d.plugin/postfix/README.md @@ -0,0 +1,26 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT + +# postfix + +The plugin will collect the postfix queue size. + +It will create two charts: + +1. **queue size in emails** +2. **queue size in KB** + +### configuration + +This is the internal default for `/etc/netdata/postfix.conf` + +```sh +# the postqueue command +# if empty, it will use the one found in the system path +postfix_postqueue= + +# how frequently to collect queue size +postfix_update_every=15 +``` + +--- diff --git a/collectors/charts.d.plugin/postfix/postfix.chart.sh b/collectors/charts.d.plugin/postfix/postfix.chart.sh new file mode 100644 index 000000000..8cb938ce1 --- /dev/null +++ b/collectors/charts.d.plugin/postfix/postfix.chart.sh @@ -0,0 +1,89 @@ +# shellcheck shell=bash disable=SC1117 +# no need for shebang - this file is loaded from charts.d.plugin +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# + +# the postqueue command +# if empty, it will use the one found in the system path +postfix_postqueue= + +# how frequently to collect queue size +postfix_update_every=15 + +postfix_priority=60000 + +postfix_check() { + # this should return: + # - 0 to enable the chart + # - 1 to disable the chart + + # try to find the postqueue executable + if [ -z "$postfix_postqueue" ] || [ ! -x "$postfix_postqueue" ] + then + # shellcheck disable=SC2230 + postfix_postqueue="$(which postqueue 2>/dev/null || command -v postqueue 2>/dev/null)" + fi + + if [ -z "$postfix_postqueue" ] || [ ! -x "$postfix_postqueue" ] + then + # shellcheck disable=SC2154 + error "cannot find postqueue. Please set 'postfix_postqueue=/path/to/postqueue' in $confd/postfix.conf" + return 1 + fi + + return 0 +} + +postfix_create() { +cat < +# GPL v3+ + +# THIS PLUGIN IS DEPRECATED +# USE THE PYTHON.D ONE + +# the postqueue command +# if empty, it will use the one found in the system path +#postfix_postqueue= + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#postfix_update_every=15 + +# the charts priority on the dashboard +#postfix_priority=60000 + +# the number of retries to do in case of failure +# before disabling the module +#postfix_retries=10 + diff --git a/collectors/charts.d.plugin/sensors/Makefile.inc b/collectors/charts.d.plugin/sensors/Makefile.inc new file mode 100644 index 000000000..f466a1b62 --- /dev/null +++ b/collectors/charts.d.plugin/sensors/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_charts_DATA += sensors/sensors.chart.sh +dist_chartsconfig_DATA += sensors/sensors.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += sensors/README.md sensors/Makefile.inc + diff --git a/collectors/charts.d.plugin/sensors/README.md b/collectors/charts.d.plugin/sensors/README.md new file mode 100644 index 000000000..ddc3650d6 --- /dev/null +++ b/collectors/charts.d.plugin/sensors/README.md @@ -0,0 +1,52 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT + +> Unlike the python one, this module can collect temperature on RPi. + +# sensors + +The plugin will provide charts for all configured system sensors + +> This plugin is reading sensors directly from the kernel. +> The `lm-sensors` package is able to perform calculations on the +> kernel provided values, this plugin will not perform. +> So, the values graphed, are the raw hardware values of the sensors. + +The plugin will create netdata charts for: + +1. **Temperature** +2. **Voltage** +3. **Current** +4. **Power** +5. **Fans Speed** +6. **Energy** +7. **Humidity** + +One chart for every sensor chip found and each of the above will be created. + +### configuration + +This is the internal default for `/etc/netdata/sensors.conf` + +```sh +# the directory the kernel keeps sensor data +sensors_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices" + +# how deep in the tree to check for sensor data +sensors_sys_depth=10 + +# if set to 1, the script will overwrite internal +# script functions with code generated ones +# leave to 1, is faster +sensors_source_update=1 + +# how frequently to collect sensor data +# the default is to collect it at every iteration of charts.d +sensors_update_every= + +# array of sensors which are excluded +# the default is to include all +sensors_excluded=() +``` + +--- diff --git a/collectors/charts.d.plugin/sensors/sensors.chart.sh b/collectors/charts.d.plugin/sensors/sensors.chart.sh new file mode 100644 index 000000000..54368f1e0 --- /dev/null +++ b/collectors/charts.d.plugin/sensors/sensors.chart.sh @@ -0,0 +1,255 @@ +# shellcheck shell=bash +# no need for shebang - this file is loaded from charts.d.plugin +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# + +# sensors docs +# https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface + +# if this chart is called X.chart.sh, then all functions and global variables +# must start with X_ + +# the directory the kernel keeps sensor data +sensors_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices" + +# how deep in the tree to check for sensor data +sensors_sys_depth=10 + +# if set to 1, the script will overwrite internal +# script functions with code generated ones +# leave to 1, is faster +sensors_source_update=1 + +# how frequently to collect sensor data +# the default is to collect it at every iteration of charts.d +sensors_update_every= + +sensors_priority=90000 + +declare -A sensors_excluded=() + +sensors_find_all_files() { + find "$1" -maxdepth $sensors_sys_depth -name \*_input -o -name temp 2>/dev/null +} + +sensors_find_all_dirs() { + # shellcheck disable=SC2162 + sensors_find_all_files "$1" | while read + do + dirname "$REPLY" + done | sort -u +} + +# _check is called once, to find out if this chart should be enabled or not +sensors_check() { + + # this should return: + # - 0 to enable the chart + # - 1 to disable the chart + + [ -z "$( sensors_find_all_files "$sensors_sys_dir" )" ] && error "no sensors found in '$sensors_sys_dir'." && return 1 + return 0 +} + +sensors_check_files() { + # we only need sensors that report a non-zero value + # also remove not needed sensors + + local f v excluded + for f in "$@" + do + [ ! -f "$f" ] && continue + for ex in "${sensors_excluded[@]}"; do + [[ $f =~ .*$ex$ ]] && excluded='1' && break + done + + [ "$excluded" != "1" ] && v="$( cat "$f" )" || v=0 + v=$(( v + 1 - 1 )) + [ $v -ne 0 ] && echo "$f" && continue + excluded= + + error "$f gives zero values" + done +} + +sensors_check_temp_type() { + # valid temp types are 1 to 6 + # disabled sensors have the value 0 + + local f t v + for f in "$@" + do + # shellcheck disable=SC2001 + t=$( echo "$f" | sed "s|_input$|_type|g" ) + [ "$f" = "$t" ] && echo "$f" && continue + [ ! -f "$t" ] && echo "$f" && continue + + v="$( cat "$t" )" + v=$(( v + 1 - 1 )) + [ $v -ne 0 ] && echo "$f" && continue + + error "$f is disabled" + done +} + +# _create is called once, to create the charts +sensors_create() { + local path dir name x file lfile labelname device subsystem id type mode files multiplier divisor + + # we create a script with the source of the + # sensors_update() function + # - the highest speed we can achieve - + [ $sensors_source_update -eq 1 ] && echo >"$TMP_DIR/sensors.sh" "sensors_update() {" + + for path in $( sensors_find_all_dirs "$sensors_sys_dir" | sort -u ) + do + dir=$( basename "$path" ) + device= + subsystem= + id= + type= + name= + + [ -h "$path/device" ] && device=$( readlink -f "$path/device" ) + [ ! -z "$device" ] && device=$( basename "$device" ) + [ -z "$device" ] && device="$dir" + + [ -h "$path/subsystem" ] && subsystem=$( readlink -f "$path/subsystem" ) + [ ! -z "$subsystem" ] && subsystem=$( basename "$subsystem" ) + [ -z "$subsystem" ] && subsystem="$dir" + + [ -f "$path/name" ] && name=$( cat "$path/name" ) + [ -z "$name" ] && name="$dir" + + [ -f "$path/type" ] && type=$( cat "$path/type" ) + [ -z "$type" ] && type="$dir" + + id="$( fixid "$device.$subsystem.$dir" )" + + debug "path='$path', dir='$dir', device='$device', subsystem='$subsystem', id='$id', name='$name'" + + for mode in temperature voltage fans power current energy humidity + do + files= + multiplier=1 + divisor=1 + algorithm="absolute" + + case $mode in + temperature) + files="$( ls "$path"/temp*_input 2>/dev/null; ls "$path/temp" 2>/dev/null )" + files="$( sensors_check_files "$files" )" + files="$( sensors_check_temp_type "$files" )" + [ -z "$files" ] && continue + echo "CHART sensors.temp_$id '' '$name Temperature' 'Celsius' 'temperature' 'sensors.temp' line $((sensors_priority + 1)) $sensors_update_every" + echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.temp_$id \$1\"" + divisor=1000 + ;; + + voltage) + files="$( ls "$path"/in*_input 2>/dev/null )" + files="$( sensors_check_files "$files" )" + [ -z "$files" ] && continue + echo "CHART sensors.volt_$id '' '$name Voltage' 'Volts' 'voltage' 'sensors.volt' line $((sensors_priority + 2)) $sensors_update_every" + echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.volt_$id \$1\"" + divisor=1000 + ;; + + current) + files="$( ls "$path"/curr*_input 2>/dev/null )" + files="$( sensors_check_files "$files" )" + [ -z "$files" ] && continue + echo "CHART sensors.curr_$id '' '$name Current' 'Ampere' 'current' 'sensors.curr' line $((sensors_priority + 3)) $sensors_update_every" + echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.curr_$id \$1\"" + divisor=1000 + ;; + + power) + files="$( ls "$path"/power*_input 2>/dev/null )" + files="$( sensors_check_files "$files" )" + [ -z "$files" ] && continue + echo "CHART sensors.power_$id '' '$name Power' 'Watt' 'power' 'sensors.power' line $((sensors_priority + 4)) $sensors_update_every" + echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.power_$id \$1\"" + divisor=1000000 + ;; + + fans) + files="$( ls "$path"/fan*_input 2>/dev/null )" + files="$( sensors_check_files "$files" )" + [ -z "$files" ] && continue + echo "CHART sensors.fan_$id '' '$name Fans Speed' 'Rotations / Minute' 'fans' 'sensors.fans' line $((sensors_priority + 5)) $sensors_update_every" + echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.fan_$id \$1\"" + ;; + + energy) + files="$( ls "$path"/energy*_input 2>/dev/null )" + files="$( sensors_check_files "$files" )" + [ -z "$files" ] && continue + echo "CHART sensors.energy_$id '' '$name Energy' 'Joule' 'energy' 'sensors.energy' areastack $((sensors_priority + 6)) $sensors_update_every" + echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.energy_$id \$1\"" + algorithm="incremental" + divisor=1000000 + ;; + + humidity) + files="$( ls "$path"/humidity*_input 2>/dev/null )" + files="$( sensors_check_files "$files" )" + [ -z "$files" ] && continue + echo "CHART sensors.humidity_$id '' '$name Humidity' 'Percent' 'humidity' 'sensors.humidity' line $((sensors_priority + 7)) $sensors_update_every" + echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.humidity_$id \$1\"" + divisor=1000 + ;; + + *) + continue + ;; + esac + + for x in $files + do + file="$x" + fid="$( fixid "$file" )" + lfile="$( basename "$file" | sed "s|_input$|_label|g" )" + labelname="$( basename "$file" | sed "s|_input$||g" )" + + if [ ! "$path/$lfile" = "$file" ] && [ -f "$path/$lfile" ] + then + labelname="$( cat "$path/$lfile" )" + fi + + echo "DIMENSION $fid '$labelname' $algorithm $multiplier $divisor" + echo >>"$TMP_DIR/sensors.sh" "echo \"SET $fid = \"\$(< $file )" + done + + echo >>"$TMP_DIR/sensors.sh" "echo END" + done + done + + [ $sensors_source_update -eq 1 ] && echo >>"$TMP_DIR/sensors.sh" "}" + + # ok, load the function sensors_update() we created + # shellcheck source=/dev/null + [ $sensors_source_update -eq 1 ] && . "$TMP_DIR/sensors.sh" + + return 0 +} + +# _update is called continuously, to collect the values +sensors_update() { + # the first argument to this function is the microseconds since last update + # pass this parameter to the BEGIN statement (see bellow). + + # do all the work to collect / calculate the values + # for each dimension + # remember: KEEP IT SIMPLE AND SHORT + + # shellcheck source=/dev/null + [ $sensors_source_update -eq 0 ] && . "$TMP_DIR/sensors.sh" "$1" + + return 0 +} + diff --git a/collectors/charts.d.plugin/sensors/sensors.conf b/collectors/charts.d.plugin/sensors/sensors.conf new file mode 100644 index 000000000..bcb28807d --- /dev/null +++ b/collectors/charts.d.plugin/sensors/sensors.conf @@ -0,0 +1,32 @@ +# no need for shebang - this file is loaded from charts.d.plugin + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2018 Costa Tsaousis +# GPL v3+ + +# THIS PLUGIN IS DEPRECATED +# USE THE PYTHON.D ONE + +# the directory the kernel keeps sensor data +#sensors_sys_dir="/sys/devices" + +# how deep in the tree to check for sensor data +#sensors_sys_depth=10 + +# if set to 1, the script will overwrite internal +# script functions with code generated ones +# leave to 1, is faster +#sensors_source_update=1 + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#sensors_update_every= + +# the charts priority on the dashboard +#sensors_priority=90000 + +# the number of retries to do in case of failure +# before disabling the module +#sensors_retries=10 + diff --git a/collectors/charts.d.plugin/squid/Makefile.inc b/collectors/charts.d.plugin/squid/Makefile.inc new file mode 100644 index 000000000..ad470d88c --- /dev/null +++ b/collectors/charts.d.plugin/squid/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_charts_DATA += squid/squid.chart.sh +dist_chartsconfig_DATA += squid/squid.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += squid/README.md squid/Makefile.inc + diff --git a/collectors/charts.d.plugin/squid/README.md b/collectors/charts.d.plugin/squid/README.md new file mode 100644 index 000000000..0934ccfcf --- /dev/null +++ b/collectors/charts.d.plugin/squid/README.md @@ -0,0 +1,66 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT + + +# squid + +The plugin will monitor a squid server. + +It will produce 4 charts: + +1. **Squid Client Bandwidth** in kbps + + * in + * out + * hits + +2. **Squid Client Requests** in requests/sec + + * requests + * hits + * errors + +3. **Squid Server Bandwidth** in kbps + + * in + * out + +4. **Squid Server Requests** in requests/sec + + * requests + * errors + +### autoconfig + +The plugin will by itself detect squid servers running on +localhost, on ports 3128 or 8080. + +It will attempt to download URLs in the form: + +- `cache_object://HOST:PORT/counters` +- `/squid-internal-mgr/counters` + +If any succeeds, it will use this. + +### configuration + +If you need to configure it by hand, create the file +`/etc/netdata/squid.conf` with the following variables: + +- `squid_host=IP` the IP of the squid host +- `squid_port=PORT` the port the squid is listening +- `squid_url="URL"` the URL with the statistics to be fetched from squid +- `squid_timeout=SECONDS` how much time we should wait for squid to respond +- `squid_update_every=SECONDS` the frequency of the data collection + +Example `/etc/netdata/squid.conf`: + +```sh +squid_host=127.0.0.1 +squid_port=3128 +squid_url="cache_object://127.0.0.1:3128/counters" +squid_timeout=2 +squid_update_every=5 +``` + +--- diff --git a/collectors/charts.d.plugin/squid/squid.chart.sh b/collectors/charts.d.plugin/squid/squid.chart.sh new file mode 100644 index 000000000..cf5d1d78a --- /dev/null +++ b/collectors/charts.d.plugin/squid/squid.chart.sh @@ -0,0 +1,147 @@ +# shellcheck shell=bash disable=SC2154 +# no need for shebang - this file is loaded from charts.d.plugin +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# + +squid_host= +squid_port= +squid_url= +squid_update_every=2 +squid_priority=60000 + +squid_get_stats_internal() { + local host="$1" port="$2" url="$3" + run squidclient -h "$host" -p "$port" "$url" +} + +squid_get_stats() { + squid_get_stats_internal "$squid_host" "$squid_port" "$squid_url" +} + +squid_autodetect() { + local host="127.0.0.1" port url x + + for port in 3128 8080 + do + for url in "cache_object://$host:$port/counters" "/squid-internal-mgr/counters" + do + x=$(squid_get_stats_internal "$host" "$port" "$url" | grep client_http.requests) + if [ ! -z "$x" ] + then + squid_host="$host" + squid_port="$port" + squid_url="$url" + debug "found squid at '$host:$port' with url '$url'" + return 0 + fi + done + done + + error "cannot find squid running in localhost. Please set squid_url='url' and squid_host='IP' and squid_port='PORT' in $confd/squid.conf" + return 1 +} + +squid_check() { + require_cmd squidclient || return 1 + require_cmd sed || return 1 + require_cmd egrep || return 1 + + if [ -z "$squid_host" ] || [ -z "$squid_port" ] || [ -z "$squid_url" ] + then + squid_autodetect || return 1 + fi + + # check once if the url works + local x + x="$(squid_get_stats | grep client_http.requests)" + # shellcheck disable=SC2181 + if [ ! $? -eq 0 ] || [ -z "$x" ] + then + error "cannot fetch URL '$squid_url' by connecting to $squid_host:$squid_port. Please set squid_url='url' and squid_host='host' and squid_port='port' in $confd/squid.conf" + return 1 + fi + + return 0 +} + +squid_create() { + # create the charts + cat < +# GPL v3+ + +# THIS PLUGIN IS DEPRECATED +# USE THE PYTHON.D ONE + +#squid_host= +#squid_port= +#squid_url= +#squid_timeout=2 + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#squid_update_every=2 + +# the charts priority on the dashboard +#squid_priority=60000 + +# the number of retries to do in case of failure +# before disabling the module +#squid_retries=10 + diff --git a/collectors/charts.d.plugin/tomcat/Makefile.inc b/collectors/charts.d.plugin/tomcat/Makefile.inc new file mode 100644 index 000000000..ef05b1953 --- /dev/null +++ b/collectors/charts.d.plugin/tomcat/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_charts_DATA += tomcat/tomcat.chart.sh +dist_chartsconfig_DATA += tomcat/tomcat.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += tomcat/README.md tomcat/Makefile.inc + diff --git a/collectors/charts.d.plugin/tomcat/README.md b/collectors/charts.d.plugin/tomcat/README.md new file mode 100644 index 000000000..d82951aac --- /dev/null +++ b/collectors/charts.d.plugin/tomcat/README.md @@ -0,0 +1,2 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT diff --git a/collectors/charts.d.plugin/tomcat/tomcat.chart.sh b/collectors/charts.d.plugin/tomcat/tomcat.chart.sh new file mode 100644 index 000000000..294487b8b --- /dev/null +++ b/collectors/charts.d.plugin/tomcat/tomcat.chart.sh @@ -0,0 +1,150 @@ +# shellcheck shell=bash +# no need for shebang - this file is loaded from charts.d.plugin +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# +# Contributed by @jgeromero with PR #277 + +# Description: Tomcat netdata charts.d plugin +# Author: Jorge Romero + +# the URL to download tomcat status info +# usually http://localhost:8080/manager/status?XML=true +tomcat_url="" +tomcat_curl_opts="" + +# set tomcat username/password here +tomcat_user="" +tomcat_password="" + +# _update_every is a special variable - it holds the number of seconds +# between the calls of the _update() function +tomcat_update_every= + +tomcat_priority=60000 + +# convert tomcat floating point values +# to integer using this multiplier +# this only affects precision - the values +# will be in the proper units +tomcat_decimal_detail=1000000 + +# used by volume chart to convert bytes to KB +tomcat_decimal_KB_detail=1000 + +tomcat_check() { + + require_cmd xmlstarlet || return 1 + + + # check if url, username, passwords are set + if [ -z "${tomcat_url}" ]; then + error "tomcat url is unset or set to the empty string" + return 1 + fi + if [ -z "${tomcat_user}" ]; then + # check backwards compatibility + # shellcheck disable=SC2154 + if [ -z "${tomcatUser}" ]; then + error "tomcat user is unset or set to the empty string" + return 1 + else + tomcat_user="${tomcatUser}" + fi + fi + if [ -z "${tomcat_password}" ]; then + # check backwards compatibility + # shellcheck disable=SC2154 + if [ -z "${tomcatPassword}" ]; then + error "tomcat password is unset or set to the empty string" + return 1 + else + tomcat_password="${tomcatPassword}" + fi + fi + + # check if we can get to tomcat's status page + tomcat_get + # shellcheck disable=2181 + if [ $? -ne 0 ] + then + error "cannot get to status page on URL '${tomcat_url}'. Please make sure tomcat url, username and password are correct." + return 1 + fi + + # this should return: + # - 0 to enable the chart + # - 1 to disable the chart + + return 0 +} + +tomcat_get() { + # collect tomcat values + tomcat_port="$(IFS=/ read -ra a <<< "$tomcat_url"; hostport=${a[2]}; echo "${hostport#*:}")" + mapfile -t lines < <(run curl -u "$tomcat_user":"$tomcat_password" -Ss ${tomcat_curl_opts} "$tomcat_url" |\ + run xmlstarlet sel \ + -t -m "/status/jvm/memory" -v @free \ + -n -m "/status/connector[@name='\"http-bio-$tomcat_port\"']/threadInfo" -v @currentThreadCount \ + -n -v @currentThreadsBusy \ + -n -m "/status/connector[@name='\"http-bio-$tomcat_port\"']/requestInfo" -v @requestCount \ + -n -v @bytesSent -n -) + + tomcat_jvm_freememory="${lines[0]}" + tomcat_threads="${lines[1]}" + tomcat_threads_busy="${lines[2]}" + tomcat_accesses="${lines[3]}" + tomcat_volume="${lines[4]}" + + return 0 +} + +# _create is called once, to create the charts +tomcat_create() { + cat < +# GPL v3+ + +# THIS PLUGIN IS DEPRECATED +# USE THE PYTHON.D ONE + +# the URL to download tomcat status info +# usually http://localhost:8080/manager/status?XML=true +#tomcat_url="" +#tomcat_curl_opts="" + +# set tomcat username/password here +#tomcat_user="" +#tomcat_password="" + +# the data collection frequency +# if unset, will inherit the netdata update frequency +#tomcat_update_every=1 + +# the charts priority on the dashboard +#tomcat_priority=60000 + +# the number of retries to do in case of failure +# before disabling the module +#tomcat_retries=10 + +# convert tomcat floating point values +# to integer using this multiplier +# this only affects precision - the values +# will be in the proper units +#tomcat_decimal_detail=1000000 + +# used by volume chart to convert bytes to KB +#tomcat_decimal_KB_detail=1000 diff --git a/collectors/checks.plugin/Makefile.am b/collectors/checks.plugin/Makefile.am new file mode 100644 index 000000000..babdcf0df --- /dev/null +++ b/collectors/checks.plugin/Makefile.am @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in diff --git a/collectors/checks.plugin/Makefile.in b/collectors/checks.plugin/Makefile.in new file mode 100644 index 000000000..632125466 --- /dev/null +++ b/collectors/checks.plugin/Makefile.in @@ -0,0 +1,457 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = collectors/checks.plugin +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/checks.plugin/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu collectors/checks.plugin/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/collectors/checks.plugin/plugin_checks.c b/collectors/checks.plugin/plugin_checks.c new file mode 100644 index 000000000..f8a2008a8 --- /dev/null +++ b/collectors/checks.plugin/plugin_checks.c @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_checks.h" + +#ifdef NETDATA_INTERNAL_CHECKS + +static void checks_main_cleanup(void *ptr) { + struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; + static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; + + info("cleaning up..."); + + static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; +} + +void *checks_main(void *ptr) { + netdata_thread_cleanup_push(checks_main_cleanup, ptr); + + usec_t usec = 0, susec = localhost->rrd_update_every * USEC_PER_SEC, loop_usec = 0, total_susec = 0; + struct timeval now, last, loop; + + RRDSET *check1, *check2, *check3, *apps_cpu = NULL; + + check1 = rrdset_create_localhost( + "netdata" + , "check1" + , NULL + , "netdata" + , NULL + , "Caller gives microseconds" + , "a million !" + , "checks.plugin" + , "" + , NETDATA_CHART_PRIO_CHECKS + , localhost->rrd_update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(check1, "absolute", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(check1, "incremental", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + check2 = rrdset_create_localhost( + "netdata" + , "check2" + , NULL + , "netdata" + , NULL + , "Netdata calcs microseconds" + , "a million !" + , "checks.plugin" + , "" + , NETDATA_CHART_PRIO_CHECKS + , localhost->rrd_update_every + , RRDSET_TYPE_LINE + ); + rrddim_add(check2, "absolute", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(check2, "incremental", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + check3 = rrdset_create_localhost( + "netdata" + , "checkdt" + , NULL + , "netdata" + , NULL + , "Clock difference" + , "microseconds diff" + , "checks.plugin" + , "" + , NETDATA_CHART_PRIO_CHECKS + , localhost->rrd_update_every + , RRDSET_TYPE_LINE + ); + rrddim_add(check3, "caller", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(check3, "netdata", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(check3, "apps.plugin", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + now_realtime_timeval(&last); + while(!netdata_exit) { + usleep(susec); + + // find the time to sleep in order to wait exactly update_every seconds + now_realtime_timeval(&now); + loop_usec = dt_usec(&now, &last); + usec = loop_usec - susec; + debug(D_PROCNETDEV_LOOP, "CHECK: last loop took %llu usec (worked for %llu, sleeped for %llu).", loop_usec, usec, susec); + + if(usec < (localhost->rrd_update_every * USEC_PER_SEC / 2ULL)) susec = (localhost->rrd_update_every * USEC_PER_SEC) - usec; + else susec = localhost->rrd_update_every * USEC_PER_SEC / 2ULL; + + // -------------------------------------------------------------------- + // Calculate loop time + + last.tv_sec = now.tv_sec; + last.tv_usec = now.tv_usec; + total_susec += loop_usec; + + // -------------------------------------------------------------------- + // check chart 1 + + if(check1->counter_done) rrdset_next_usec(check1, loop_usec); + rrddim_set(check1, "absolute", 1000000); + rrddim_set(check1, "incremental", total_susec); + rrdset_done(check1); + + // -------------------------------------------------------------------- + // check chart 2 + + if(check2->counter_done) rrdset_next(check2); + rrddim_set(check2, "absolute", 1000000); + rrddim_set(check2, "incremental", total_susec); + rrdset_done(check2); + + // -------------------------------------------------------------------- + // check chart 3 + + if(!apps_cpu) apps_cpu = rrdset_find_localhost("apps.cpu"); + if(check3->counter_done) rrdset_next_usec(check3, loop_usec); + now_realtime_timeval(&loop); + rrddim_set(check3, "caller", (long long) dt_usec(&loop, &check1->last_collected_time)); + rrddim_set(check3, "netdata", (long long) dt_usec(&loop, &check2->last_collected_time)); + if(apps_cpu) rrddim_set(check3, "apps.plugin", (long long) dt_usec(&loop, &apps_cpu->last_collected_time)); + rrdset_done(check3); + } + + netdata_thread_cleanup_pop(1); + return NULL; +} + +#endif // NETDATA_INTERNAL_CHECKS diff --git a/collectors/checks.plugin/plugin_checks.h b/collectors/checks.plugin/plugin_checks.h new file mode 100644 index 000000000..93494765d --- /dev/null +++ b/collectors/checks.plugin/plugin_checks.h @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_PLUGIN_CHECKS_H +#define NETDATA_PLUGIN_CHECKS_H 1 + +#include "../../daemon/common.h" + +#ifdef NETDATA_INTERNAL_CHECKS + +#define NETDATA_PLUGIN_HOOK_CHECKS \ + { \ + .name = "PLUGIN[check]", \ + .config_section = CONFIG_SECTION_PLUGINS, \ + .config_name = "checks", \ + .enabled = 0, \ + .thread = NULL, \ + .init_routine = NULL, \ + .start_routine = checks_main \ + }, + +extern void *checks_main(void *ptr); + +#else // !NETDATA_INTERNAL_CHECKS + +#define NETDATA_PLUGIN_HOOK_CHECKS + +#endif // NETDATA_INTERNAL_CHECKS + +#endif // NETDATA_PLUGIN_CHECKS_H diff --git a/collectors/diskspace.plugin/Makefile.am b/collectors/diskspace.plugin/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/collectors/diskspace.plugin/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/collectors/diskspace.plugin/Makefile.in b/collectors/diskspace.plugin/Makefile.in new file mode 100644 index 000000000..ceebc5455 --- /dev/null +++ b/collectors/diskspace.plugin/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = collectors/diskspace.plugin +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/diskspace.plugin/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu collectors/diskspace.plugin/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/collectors/diskspace.plugin/README.md b/collectors/diskspace.plugin/README.md new file mode 100644 index 000000000..74d6cde3c --- /dev/null +++ b/collectors/diskspace.plugin/README.md @@ -0,0 +1,5 @@ +> for disks performance monitoring, see the `proc` plugin, [here](../proc.plugin/#monitoring-disks-performance-with-netdata) + +# diskspace.plugin + +This plugin monitors the disk space usage of mounted disks, under Linux. diff --git a/collectors/diskspace.plugin/plugin_diskspace.c b/collectors/diskspace.plugin/plugin_diskspace.c new file mode 100644 index 000000000..dca7c9076 --- /dev/null +++ b/collectors/diskspace.plugin/plugin_diskspace.c @@ -0,0 +1,465 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_diskspace.h" + +#define PLUGIN_DISKSPACE_NAME "diskspace.plugin" + +#define DELAULT_EXCLUDED_PATHS "/proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/*" +#define DEFAULT_EXCLUDED_FILESYSTEMS "*gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl" +#define CONFIG_SECTION_DISKSPACE "plugin:proc:diskspace" + +static struct mountinfo *disk_mountinfo_root = NULL; +static int check_for_new_mountpoints_every = 15; +static int cleanup_mount_points = 1; + +static inline void mountinfo_reload(int force) { + static time_t last_loaded = 0; + time_t now = now_realtime_sec(); + + if(force || now - last_loaded >= check_for_new_mountpoints_every) { + // mountinfo_free_all() can be called with NULL disk_mountinfo_root + mountinfo_free_all(disk_mountinfo_root); + + // re-read mountinfo in case something changed + disk_mountinfo_root = mountinfo_read(0); + + last_loaded = now; + } +} + +// Data to be stored in DICTIONARY dict_mountpoints used by do_disk_space_stats(). +// This DICTIONARY is used to lookup the settings of the mount point on each iteration. +struct mount_point_metadata { + int do_space; + int do_inodes; + int shown_error; + int updated; + + size_t collected; // the number of times this has been collected + + RRDSET *st_space; + RRDDIM *rd_space_used; + RRDDIM *rd_space_avail; + RRDDIM *rd_space_reserved; + + RRDSET *st_inodes; + RRDDIM *rd_inodes_used; + RRDDIM *rd_inodes_avail; + RRDDIM *rd_inodes_reserved; +}; + +static DICTIONARY *dict_mountpoints = NULL; + +#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete(st); (st) = NULL; } } while(st) + +int mount_point_cleanup(void *entry, void *data) { + (void)data; + + struct mount_point_metadata *mp = (struct mount_point_metadata *)entry; + if(!mp) return 0; + + if(likely(mp->updated)) { + mp->updated = 0; + return 0; + } + + if(likely(cleanup_mount_points && mp->collected)) { + mp->collected = 0; + mp->updated = 0; + mp->shown_error = 0; + + mp->rd_space_avail = NULL; + mp->rd_space_used = NULL; + mp->rd_space_reserved = NULL; + + mp->rd_inodes_avail = NULL; + mp->rd_inodes_used = NULL; + mp->rd_inodes_reserved = NULL; + + rrdset_obsolete_and_pointer_null(mp->st_space); + rrdset_obsolete_and_pointer_null(mp->st_inodes); + } + + return 0; +} + +static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) { + const char *family = mi->mount_point; + const char *disk = mi->persistent_id; + + static SIMPLE_PATTERN *excluded_mountpoints = NULL; + static SIMPLE_PATTERN *excluded_filesystems = NULL; + int do_space, do_inodes; + + if(unlikely(!dict_mountpoints)) { + SIMPLE_PREFIX_MODE mode = SIMPLE_PATTERN_EXACT; + + if(config_move("plugin:proc:/proc/diskstats", "exclude space metrics on paths", CONFIG_SECTION_DISKSPACE, "exclude space metrics on paths") != -1) { + // old configuration, enable backwards compatibility + mode = SIMPLE_PATTERN_PREFIX; + } + + excluded_mountpoints = simple_pattern_create( + config_get(CONFIG_SECTION_DISKSPACE, "exclude space metrics on paths", DELAULT_EXCLUDED_PATHS) + , NULL + , mode + ); + + excluded_filesystems = simple_pattern_create( + config_get(CONFIG_SECTION_DISKSPACE, "exclude space metrics on filesystems", DEFAULT_EXCLUDED_FILESYSTEMS) + , NULL + , SIMPLE_PATTERN_EXACT + ); + + dict_mountpoints = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED); + } + + struct mount_point_metadata *m = dictionary_get(dict_mountpoints, mi->mount_point); + if(unlikely(!m)) { + char var_name[4096 + 1]; + snprintfz(var_name, 4096, "plugin:proc:diskspace:%s", mi->mount_point); + + int def_space = config_get_boolean_ondemand(CONFIG_SECTION_DISKSPACE, "space usage for all disks", CONFIG_BOOLEAN_AUTO); + int def_inodes = config_get_boolean_ondemand(CONFIG_SECTION_DISKSPACE, "inodes usage for all disks", CONFIG_BOOLEAN_AUTO); + + if(unlikely(simple_pattern_matches(excluded_mountpoints, mi->mount_point))) { + def_space = CONFIG_BOOLEAN_NO; + def_inodes = CONFIG_BOOLEAN_NO; + } + + if(unlikely(simple_pattern_matches(excluded_filesystems, mi->filesystem))) { + def_space = CONFIG_BOOLEAN_NO; + def_inodes = CONFIG_BOOLEAN_NO; + } + + // check if the mount point is a directory #2407 + // but only when it is enabled by default #4491 + if(def_space != CONFIG_BOOLEAN_NO || def_inodes != CONFIG_BOOLEAN_NO) { + struct stat bs; + if(stat(mi->mount_point, &bs) == -1) { + error("DISKSPACE: Cannot stat() mount point '%s' (disk '%s', filesystem '%s', root '%s')." + , mi->mount_point + , disk + , mi->filesystem?mi->filesystem:"" + , mi->root?mi->root:"" + ); + def_space = CONFIG_BOOLEAN_NO; + def_inodes = CONFIG_BOOLEAN_NO; + } + else { + if((bs.st_mode & S_IFMT) != S_IFDIR) { + error("DISKSPACE: Mount point '%s' (disk '%s', filesystem '%s', root '%s') is not a directory." + , mi->mount_point + , disk + , mi->filesystem?mi->filesystem:"" + , mi->root?mi->root:"" + ); + def_space = CONFIG_BOOLEAN_NO; + def_inodes = CONFIG_BOOLEAN_NO; + } + } + } + + do_space = config_get_boolean_ondemand(var_name, "space usage", def_space); + do_inodes = config_get_boolean_ondemand(var_name, "inodes usage", def_inodes); + + struct mount_point_metadata mp = { + .do_space = do_space, + .do_inodes = do_inodes, + .shown_error = 0, + .updated = 0, + + .collected = 0, + + .st_space = NULL, + .rd_space_avail = NULL, + .rd_space_used = NULL, + .rd_space_reserved = NULL, + + .st_inodes = NULL, + .rd_inodes_avail = NULL, + .rd_inodes_used = NULL, + .rd_inodes_reserved = NULL + }; + + m = dictionary_set(dict_mountpoints, mi->mount_point, &mp, sizeof(struct mount_point_metadata)); + } + + m->updated = 1; + + if(unlikely(m->do_space == CONFIG_BOOLEAN_NO && m->do_inodes == CONFIG_BOOLEAN_NO)) + return; + + if(unlikely(mi->flags & MOUNTINFO_READONLY && !m->collected)) + return; + + struct statvfs buff_statvfs; + if (statvfs(mi->mount_point, &buff_statvfs) < 0) { + if(!m->shown_error) { + error("DISKSPACE: failed to statvfs() mount point '%s' (disk '%s', filesystem '%s', root '%s')" + , mi->mount_point + , disk + , mi->filesystem?mi->filesystem:"" + , mi->root?mi->root:"" + ); + m->shown_error = 1; + } + return; + } + m->shown_error = 0; + + // logic found at get_fs_usage() in coreutils + unsigned long bsize = (buff_statvfs.f_frsize) ? buff_statvfs.f_frsize : buff_statvfs.f_bsize; + + fsblkcnt_t bavail = buff_statvfs.f_bavail; + fsblkcnt_t btotal = buff_statvfs.f_blocks; + fsblkcnt_t bavail_root = buff_statvfs.f_bfree; + fsblkcnt_t breserved_root = bavail_root - bavail; + fsblkcnt_t bused; + if(likely(btotal >= bavail_root)) + bused = btotal - bavail_root; + else + bused = bavail_root - btotal; + +#ifdef NETDATA_INTERNAL_CHECKS + if(unlikely(btotal != bavail + breserved_root + bused)) + error("DISKSPACE: disk block statistics for '%s' (disk '%s') do not sum up: total = %llu, available = %llu, reserved = %llu, used = %llu", mi->mount_point, disk, (unsigned long long)btotal, (unsigned long long)bavail, (unsigned long long)breserved_root, (unsigned long long)bused); +#endif + + // -------------------------------------------------------------------------- + + fsfilcnt_t favail = buff_statvfs.f_favail; + fsfilcnt_t ftotal = buff_statvfs.f_files; + fsfilcnt_t favail_root = buff_statvfs.f_ffree; + fsfilcnt_t freserved_root = favail_root - favail; + fsfilcnt_t fused = ftotal - favail_root; + + if(m->do_inodes == CONFIG_BOOLEAN_AUTO && favail == (fsfilcnt_t)-1) { + // this file system does not support inodes reporting + // eg. cephfs + m->do_inodes = CONFIG_BOOLEAN_NO; + } + +#ifdef NETDATA_INTERNAL_CHECKS + if(unlikely(btotal != bavail + breserved_root + bused)) + error("DISKSPACE: disk inode statistics for '%s' (disk '%s') do not sum up: total = %llu, available = %llu, reserved = %llu, used = %llu", mi->mount_point, disk, (unsigned long long)ftotal, (unsigned long long)favail, (unsigned long long)freserved_root, (unsigned long long)fused); +#endif + + // -------------------------------------------------------------------------- + + int rendered = 0; + + if(m->do_space == CONFIG_BOOLEAN_YES || (m->do_space == CONFIG_BOOLEAN_AUTO && (bavail || breserved_root || bused))) { + if(unlikely(!m->st_space)) { + m->do_space = CONFIG_BOOLEAN_YES; + m->st_space = rrdset_find_bytype_localhost("disk_space", disk); + if(unlikely(!m->st_space)) { + char title[4096 + 1]; + snprintfz(title, 4096, "Disk Space Usage for %s [%s]", family, mi->mount_source); + m->st_space = rrdset_create_localhost( + "disk_space" + , disk + , NULL + , family + , "disk.space" + , title + , "GB" + , PLUGIN_DISKSPACE_NAME + , NULL + , NETDATA_CHART_PRIO_DISKSPACE_SPACE + , update_every + , RRDSET_TYPE_STACKED + ); + } + + m->rd_space_avail = rrddim_add(m->st_space, "avail", NULL, (collected_number)bsize, 1024 * 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + m->rd_space_used = rrddim_add(m->st_space, "used", NULL, (collected_number)bsize, 1024 * 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + m->rd_space_reserved = rrddim_add(m->st_space, "reserved_for_root", "reserved for root", (collected_number)bsize, 1024 * 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } + else + rrdset_next(m->st_space); + + rrddim_set_by_pointer(m->st_space, m->rd_space_avail, (collected_number)bavail); + rrddim_set_by_pointer(m->st_space, m->rd_space_used, (collected_number)bused); + rrddim_set_by_pointer(m->st_space, m->rd_space_reserved, (collected_number)breserved_root); + rrdset_done(m->st_space); + + rendered++; + } + + // -------------------------------------------------------------------------- + + if(m->do_inodes == CONFIG_BOOLEAN_YES || (m->do_inodes == CONFIG_BOOLEAN_AUTO && (favail || freserved_root || fused))) { + if(unlikely(!m->st_inodes)) { + m->do_inodes = CONFIG_BOOLEAN_YES; + m->st_inodes = rrdset_find_bytype_localhost("disk_inodes", disk); + if(unlikely(!m->st_inodes)) { + char title[4096 + 1]; + snprintfz(title, 4096, "Disk Files (inodes) Usage for %s [%s]", family, mi->mount_source); + m->st_inodes = rrdset_create_localhost( + "disk_inodes" + , disk + , NULL + , family + , "disk.inodes" + , title + , "Inodes" + , PLUGIN_DISKSPACE_NAME + , NULL + , NETDATA_CHART_PRIO_DISKSPACE_INODES + , update_every + , RRDSET_TYPE_STACKED + ); + } + + m->rd_inodes_avail = rrddim_add(m->st_inodes, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + m->rd_inodes_used = rrddim_add(m->st_inodes, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + m->rd_inodes_reserved = rrddim_add(m->st_inodes, "reserved_for_root", "reserved for root", 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else + rrdset_next(m->st_inodes); + + rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_avail, (collected_number)favail); + rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_used, (collected_number)fused); + rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_reserved, (collected_number)freserved_root); + rrdset_done(m->st_inodes); + + rendered++; + } + + // -------------------------------------------------------------------------- + + if(likely(rendered)) + m->collected++; +} + +static void diskspace_main_cleanup(void *ptr) { + struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; + static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; + + info("cleaning up..."); + + static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; +} + +void *diskspace_main(void *ptr) { + netdata_thread_cleanup_push(diskspace_main_cleanup, ptr); + + int vdo_cpu_netdata = config_get_boolean("plugin:proc", "netdata server resources", 1); + + cleanup_mount_points = config_get_boolean(CONFIG_SECTION_DISKSPACE, "remove charts of unmounted disks" , cleanup_mount_points); + + int update_every = (int)config_get_number(CONFIG_SECTION_DISKSPACE, "update every", localhost->rrd_update_every); + if(update_every < localhost->rrd_update_every) + update_every = localhost->rrd_update_every; + + check_for_new_mountpoints_every = (int)config_get_number(CONFIG_SECTION_DISKSPACE, "check for new mount points every", check_for_new_mountpoints_every); + if(check_for_new_mountpoints_every < update_every) + check_for_new_mountpoints_every = update_every; + + struct rusage thread; + + usec_t duration = 0; + usec_t step = update_every * USEC_PER_SEC; + heartbeat_t hb; + heartbeat_init(&hb); + while(!netdata_exit) { + duration = heartbeat_monotonic_dt_to_now_usec(&hb); + /* usec_t hb_dt = */ heartbeat_next(&hb, step); + + if(unlikely(netdata_exit)) break; + + + // -------------------------------------------------------------------------- + // this is smart enough not to reload it every time + + mountinfo_reload(0); + + + // -------------------------------------------------------------------------- + // disk space metrics + + struct mountinfo *mi; + for(mi = disk_mountinfo_root; mi; mi = mi->next) { + + if(unlikely(mi->flags & (MOUNTINFO_IS_DUMMY | MOUNTINFO_IS_BIND))) + continue; + + do_disk_space_stats(mi, update_every); + if(unlikely(netdata_exit)) break; + } + + if(unlikely(netdata_exit)) break; + + if(dict_mountpoints) + dictionary_get_all(dict_mountpoints, mount_point_cleanup, NULL); + + if(vdo_cpu_netdata) { + static RRDSET *stcpu_thread = NULL, *st_duration = NULL; + static RRDDIM *rd_user = NULL, *rd_system = NULL, *rd_duration = NULL; + + // ---------------------------------------------------------------- + + getrusage(RUSAGE_THREAD, &thread); + + if(unlikely(!stcpu_thread)) { + stcpu_thread = rrdset_create_localhost( + "netdata" + , "plugin_diskspace" + , NULL + , "diskspace" + , NULL + , "NetData Disk Space Plugin CPU usage" + , "milliseconds/s" + , PLUGIN_DISKSPACE_NAME + , NULL + , NETDATA_CHART_PRIO_NETDATA_DISKSPACE + , update_every + , RRDSET_TYPE_STACKED + ); + + rd_user = rrddim_add(stcpu_thread, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); + rd_system = rrddim_add(stcpu_thread, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(stcpu_thread); + + rrddim_set_by_pointer(stcpu_thread, rd_user, thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec); + rrddim_set_by_pointer(stcpu_thread, rd_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec); + rrdset_done(stcpu_thread); + + // ---------------------------------------------------------------- + + if(unlikely(!st_duration)) { + st_duration = rrdset_create_localhost( + "netdata" + , "plugin_diskspace_dt" + , NULL + , "diskspace" + , NULL + , "NetData Disk Space Plugin Duration" + , "milliseconds/run" + , PLUGIN_DISKSPACE_NAME + , NULL + , 132021 + , update_every + , RRDSET_TYPE_AREA + ); + + rd_duration = rrddim_add(st_duration, "duration", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + } + else + rrdset_next(st_duration); + + rrddim_set_by_pointer(st_duration, rd_duration, duration); + rrdset_done(st_duration); + + // ---------------------------------------------------------------- + + if(unlikely(netdata_exit)) break; + } + } + + netdata_thread_cleanup_pop(1); + return NULL; +} diff --git a/collectors/diskspace.plugin/plugin_diskspace.h b/collectors/diskspace.plugin/plugin_diskspace.h new file mode 100644 index 000000000..7c9df9d13 --- /dev/null +++ b/collectors/diskspace.plugin/plugin_diskspace.h @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_PLUGIN_PROC_DISKSPACE_H +#define NETDATA_PLUGIN_PROC_DISKSPACE_H + +#include "../../daemon/common.h" + + +#if (TARGET_OS == OS_LINUX) + +#define NETDATA_PLUGIN_HOOK_LINUX_DISKSPACE \ + { \ + .name = "PLUGIN[diskspace]", \ + .config_section = CONFIG_SECTION_PLUGINS, \ + .config_name = "diskspace", \ + .enabled = 1, \ + .thread = NULL, \ + .init_routine = NULL, \ + .start_routine = diskspace_main \ + }, + +extern void *diskspace_main(void *ptr); + +#include "../proc.plugin/plugin_proc.h" + +#else // (TARGET_OS == OS_LINUX) + +#define NETDATA_PLUGIN_HOOK_LINUX_DISKSPACE + +#endif // (TARGET_OS == OS_LINUX) + + + +#endif //NETDATA_PLUGIN_PROC_DISKSPACE_H diff --git a/collectors/fping.plugin/Makefile.am b/collectors/fping.plugin/Makefile.am new file mode 100644 index 000000000..4395394db --- /dev/null +++ b/collectors/fping.plugin/Makefile.am @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +CLEANFILES = \ + fping.plugin \ + $(NULL) + +include $(top_srcdir)/build/subst.inc +SUFFIXES = .in + +dist_plugins_SCRIPTS = \ + fping.plugin \ + $(NULL) + +dist_noinst_DATA = \ + fping.plugin.in \ + README.md \ + $(NULL) + +dist_libconfig_DATA = \ + fping.conf \ + $(NULL) diff --git a/collectors/fping.plugin/Makefile.in b/collectors/fping.plugin/Makefile.in new file mode 100644 index 000000000..67b9699b7 --- /dev/null +++ b/collectors/fping.plugin/Makefile.in @@ -0,0 +1,591 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \ + $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \ + $(dist_libconfig_DATA) $(dist_noinst_DATA) +subdir = collectors/fping.plugin +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(pluginsdir)" \ + "$(DESTDIR)$(libconfigdir)" +SCRIPTS = $(dist_plugins_SCRIPTS) +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +CLEANFILES = \ + fping.plugin \ + $(NULL) + +SUFFIXES = .in +dist_plugins_SCRIPTS = \ + fping.plugin \ + $(NULL) + +dist_noinst_DATA = \ + fping.plugin.in \ + README.md \ + $(NULL) + +dist_libconfig_DATA = \ + fping.conf \ + $(NULL) + +all: all-am + +.SUFFIXES: +.SUFFIXES: .in +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/fping.plugin/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu collectors/fping.plugin/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; +$(top_srcdir)/build/subst.inc: + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS) + @$(NORMAL_INSTALL) + @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ + done | \ + sed -e 'p;s,.*/,,;n' \ + -e 'h;s|.*|.|' \ + -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ + $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ + { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ + if ($$2 == $$4) { files[d] = files[d] " " $$1; \ + if (++n[d] == $(am__install_max)) { \ + print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ + else { print "f", d "/" $$4, $$1 } } \ + END { for (d in files) print "f", d, files[d] }' | \ + while read type dir files; do \ + if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ + test -z "$$files" || { \ + echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \ + $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \ + } \ + ; done + +uninstall-dist_pluginsSCRIPTS: + @$(NORMAL_UNINSTALL) + @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \ + files=`for p in $$list; do echo "$$p"; done | \ + sed -e 's,.*/,,;$(transform)'`; \ + dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir) +install-dist_libconfigDATA: $(dist_libconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \ + done + +uninstall-dist_libconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir) +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(SCRIPTS) $(DATA) +installdirs: + for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(libconfigdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-dist_libconfigDATA \ + install-dist_pluginsSCRIPTS + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-dist_libconfigDATA \ + uninstall-dist_pluginsSCRIPTS + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dist_libconfigDATA \ + install-dist_pluginsSCRIPTS install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + installcheck installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am \ + uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS + +.in: + if sed \ + -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \ + -e 's#[@]sbindir_POST@#$(sbindir)#g' \ + -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \ + -e 's#[@]pythondir_POST@#$(pythondir)#g' \ + -e 's#[@]configdir_POST@#$(configdir)#g' \ + -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \ + -e 's#[@]cachedir_POST@#$(cachedir)#g' \ + $< > $@.tmp; then \ + mv "$@.tmp" "$@"; \ + else \ + rm -f "$@.tmp"; \ + false; \ + fi + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/collectors/fping.plugin/README.md b/collectors/fping.plugin/README.md new file mode 100644 index 000000000..0554a7edc --- /dev/null +++ b/collectors/fping.plugin/README.md @@ -0,0 +1,96 @@ +# fping.plugin + +The fping plugin supports monitoring latency, packet loss and uptime of any number of network end points, +by pinging them with `fping`. + +A recent version of `fping` is required (one that supports option ` -N `). +The supplied plugin can install it, by running: + +```sh +/usr/libexec/netdata/plugins.d/fping.plugin install +``` + +The above will download, build and install the right version as `/usr/local/bin/fping`. + +Then you need to edit `/etc/netdata/fping.conf` (to edit it on your system run +`/etc/netdata/edit-config fping.conf`) like this: + +```sh +# uncomment the following line - it should already be there +fping="/usr/local/bin/fping" + +# set here all the hosts you need to ping +# I suggest to use hostnames and put their IPs in /etc/hosts +hosts="host1 host2 host3" + +# override the chart update frequency - the default is inherited from netdata +update_every=1 + +# time in milliseconds (1 sec = 1000 ms) to ping the hosts +# 200 = 5 pings per second +ping_every=200 + +# other fping options - these are the defaults +fping_opts="-R -b 56 -i 1 -r 0 -t 5000" +``` + +## alarms + +netdata will automatically attach a few alarms for each host. +Check the [latest versions of the fping alarms](https://github.com/netdata/netdata/blob/master/health/health.d/fping.conf) + +## Additional Tips + +### Customizing Amount of Pings Per Second + +For example, to update the chart every 10 seconds and use 2 pings every 10 seconds, use this: + +```sh +# Chart Update Frequency (Time in Seconds) +update_every=10 + +# Time in Milliseconds (1 sec = 1000 ms) to Ping the Hosts +# The Following Example Sends 1 Ping Every 5000 ms +# Calculation Formula: ping_every = (update_every * 1000 ) / 2 +ping_every=5000 +``` + +### Multiple fping Plugins With Different Settings + +You may need to run multiple fping plugins with different settings for different end points. +For example, you may need to ping a few hosts 10 times per second, and others once per second. + +netdata allows you to add as many `fping` plugins as you like. + +Follow this procedure: + +**1. Create New fping Configuration File** + + +```sh +# Step Into Configuration Directory +cd /etc/netdata + +# Copy Original fping Configuration File To New Configuration File +cp fping.conf fping2.conf +``` + +Edit `fping2.conf` and set the settings and the hosts you need for the seconds instance. + +**2. Soft Link Original fping Plugin to New Plugin File** + +```sh +# Become root (If The Step Step Is Performed As Non-Root User) +sudo su + +# Step Into The Plugins Directory +cd /usr/libexec/netdata/plugins.d + +# Link fping.plugin to fping2.plugin +ln -s fping.plugin fping2.plugin +``` + +That's it. netdata will detect the new plugin and start it. + +You can name the new plugin any name you like. +Just make sure the plugin and the configuration file have the same name. diff --git a/collectors/fping.plugin/fping.conf b/collectors/fping.plugin/fping.conf new file mode 100644 index 000000000..63a7f7acd --- /dev/null +++ b/collectors/fping.plugin/fping.conf @@ -0,0 +1,44 @@ +# no need for shebang - this file is sourced from fping.plugin + +# fping.plugin requires a recent version of fping. +# +# You can get it on your system, by running: +# +# /usr/libexec/netdata/plugins.d/fping.plugin install + +# ----------------------------------------------------------------------------- +# configuration options + +# The fping binary to use. We need one that can output netdata friendly info +# (supporting: -N). If you have multiple versions, put here the full filename +# of the right one + +#fping="/usr/local/bin/fping" + + +# a space separated list of hosts to fping +# we suggest to put names here and the IPs of these names in /etc/hosts + +hosts="" + + +# The update frequency of the chart - the default is inherited from netdata + +#update_every=2 + + +# The time in milliseconds (1 sec = 1000 ms) to ping the hosts +# by default 5 pings per host per iteration +# fping will not allow this to be below 20ms + +#ping_every="200" + + +# other fping options - defaults: +# -R = send packets with random data +# -b 56 = the number of bytes per packet +# -i 1 = 1 ms when sending packets to others hosts (switching hosts) +# -r 0 = never retry packets +# -t 5000 = per packet timeout at 5000 ms + +#fping_opts="-R -b 56 -i 1 -r 0 -t 5000" diff --git a/collectors/fping.plugin/fping.plugin b/collectors/fping.plugin/fping.plugin new file mode 100644 index 000000000..cf8f17e9a --- /dev/null +++ b/collectors/fping.plugin/fping.plugin @@ -0,0 +1,200 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2017 Costa Tsaousis +# GPL v3+ +# +# This plugin requires a latest version of fping. +# You can compile it from source, by running me with option: install + +export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin" +export LC_ALL=C + +if [ "${1}" = "install" ] + then + [ "${UID}" != 0 ] && echo >&2 "Please run me as root. This will install a single binary file: /usr/local/bin/fping." && exit 1 + + run() { + printf >&2 " > " + printf >&2 "%q " "${@}" + printf >&2 "\n" + "${@}" || exit 1 + } + + download() { + local curl="$(which curl 2>/dev/null || command -v curl 2>/dev/null)" + [ ! -z "${curl}" ] && run curl -s -L "${1}" && return 0 + + local wget="$(which wget 2>/dev/null || command -v wget 2>/dev/null)" + [ ! -z "${wget}" ] && run wget -q -O - "${1}" && return 0 + + echo >&2 "Cannot find 'curl' or 'wget' in this system." && exit 1 + } + + [ ! -d /usr/src ] && run mkdir -p /usr/src + [ ! -d /usr/local/bin ] && run mkdir -p /usr/local/bin + + run cd /usr/src + + if [ -d fping-4.0 ] + then + run rm -rf fping-4.0 || exit 1 + fi + + download 'https://github.com/schweikert/fping/releases/download/v4.0/fping-4.0.tar.gz' | run tar -zxvpf - + [ $? -ne 0 ] && exit 1 + run cd fping-4.0 || exit 1 + + run ./configure --prefix=/usr/local + run make clean + run make + if [ -f /usr/local/bin/fping ] + then + run mv -f /usr/local/bin/fping /usr/local/bin/fping.old + fi + run mv src/fping /usr/local/bin/fping + run chown root:root /usr/local/bin/fping + run chmod 4755 /usr/local/bin/fping + echo >&2 + echo >&2 "All done, you have a compatible fping now at /usr/local/bin/fping." + echo >&2 + + fping="$(which fping 2>/dev/null || command -v fping 2>/dev/null)" + if [ "${fping}" != "/usr/local/bin/fping" ] + then + echo >&2 "You have another fping installed at: ${fping}." + echo >&2 "Please set:" + echo >&2 + echo >&2 " fping=\"/usr/local/bin/fping\"" + echo >&2 + echo >&2 "at /etc/netdata/fping.conf" + echo >&2 + fi + exit 0 +fi + +# ----------------------------------------------------------------------------- + +PROGRAM_NAME="$(basename "${0}")" + +logdate() { + date "+%Y-%m-%d %H:%M:%S" +} + +log() { + local status="${1}" + shift + + echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" + +} + +warning() { + log WARNING "${@}" +} + +error() { + log ERROR "${@}" +} + +info() { + log INFO "${@}" +} + +fatal() { + log FATAL "${@}" + echo "DISABLE" + exit 1 +} + +debug=0 +debug() { + [ $debug -eq 1 ] && log DEBUG "${@}" +} + +# ----------------------------------------------------------------------------- + +# store in ${plugin} the name we run under +# this allows us to copy/link fping.plugin under a different name +# to have multiple fping plugins running with different settings +plugin="${PROGRAM_NAME/.plugin/}" + + +# ----------------------------------------------------------------------------- + +# the frequency to send info to netdata +# passed by netdata as the first parameter +update_every="${1-1}" + +# the netdata configuration directory +# passed by netdata as an environment variable +[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata" +[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d" + +# ----------------------------------------------------------------------------- +# configuration options +# can be overwritten at /etc/netdata/fping.conf + +# the fping binary to use +# we need one that can output netdata friendly info (supporting: -N) +# if you have multiple versions, put here the full filename of the right one +fping="$( which fping 2>/dev/null || command -v fping 2>/dev/null )" + +# a space separated list of hosts to fping +# we suggest to put names here and the IPs of these names in /etc/hosts +hosts="" + +# the time in milliseconds (1 sec = 1000 ms) +# to ping the hosts - by default 5 pings per host per iteration +ping_every="$((update_every * 1000 / 5))" + +# fping options +fping_opts="-R -b 56 -i 1 -r 0 -t 5000" + +# ----------------------------------------------------------------------------- +# load the configuration files + +for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/${plugin}.conf" "${NETDATA_USER_CONFIG_DIR}/${plugin}.conf" +do + if [ -f "${CONFIG}" ] + then + info "Loading config file '${CONFIG}'..." + source "${CONFIG}" + [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'." + else + warning "Cannot find file '${CONFIG}'." + fi +done + +if [ -z "${hosts}" ] +then + fatal "no hosts configured - nothing to do." +fi + +if [ -z "${fping}" ] +then + fatal "fping command is not found. Please set its full path in '${NETDATA_USER_CONFIG_DIR}/${plugin}.conf'" +fi + +if [ ! -x "${fping}" ] +then + fatal "fping command '${fping}' is not executable - cannot proceed." +fi + +if [ ${ping_every} -lt 20 ] + then + warning "ping every was set to ${ping_every} but 20 is the minimum for non-root users. Setting it to 20 ms." + ping_every=20 +fi + +# the fping options we will use +options=( -N -l -Q ${update_every} -p ${ping_every} ${fping_opts} ${hosts} ) + +# execute fping +info "starting fping: ${fping} ${options[*]}" +exec "${fping}" "${options[@]}" + +# if we cannot execute fping, stop +fatal "command '${fping} ${options[*]}' failed to be executed (returned code $?)." diff --git a/collectors/fping.plugin/fping.plugin.in b/collectors/fping.plugin/fping.plugin.in new file mode 100755 index 000000000..2c03e418e --- /dev/null +++ b/collectors/fping.plugin/fping.plugin.in @@ -0,0 +1,200 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2017 Costa Tsaousis +# GPL v3+ +# +# This plugin requires a latest version of fping. +# You can compile it from source, by running me with option: install + +export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin" +export LC_ALL=C + +if [ "${1}" = "install" ] + then + [ "${UID}" != 0 ] && echo >&2 "Please run me as root. This will install a single binary file: /usr/local/bin/fping." && exit 1 + + run() { + printf >&2 " > " + printf >&2 "%q " "${@}" + printf >&2 "\n" + "${@}" || exit 1 + } + + download() { + local curl="$(which curl 2>/dev/null || command -v curl 2>/dev/null)" + [ ! -z "${curl}" ] && run curl -s -L "${1}" && return 0 + + local wget="$(which wget 2>/dev/null || command -v wget 2>/dev/null)" + [ ! -z "${wget}" ] && run wget -q -O - "${1}" && return 0 + + echo >&2 "Cannot find 'curl' or 'wget' in this system." && exit 1 + } + + [ ! -d /usr/src ] && run mkdir -p /usr/src + [ ! -d /usr/local/bin ] && run mkdir -p /usr/local/bin + + run cd /usr/src + + if [ -d fping-4.0 ] + then + run rm -rf fping-4.0 || exit 1 + fi + + download 'https://github.com/schweikert/fping/releases/download/v4.0/fping-4.0.tar.gz' | run tar -zxvpf - + [ $? -ne 0 ] && exit 1 + run cd fping-4.0 || exit 1 + + run ./configure --prefix=/usr/local + run make clean + run make + if [ -f /usr/local/bin/fping ] + then + run mv -f /usr/local/bin/fping /usr/local/bin/fping.old + fi + run mv src/fping /usr/local/bin/fping + run chown root:root /usr/local/bin/fping + run chmod 4755 /usr/local/bin/fping + echo >&2 + echo >&2 "All done, you have a compatible fping now at /usr/local/bin/fping." + echo >&2 + + fping="$(which fping 2>/dev/null || command -v fping 2>/dev/null)" + if [ "${fping}" != "/usr/local/bin/fping" ] + then + echo >&2 "You have another fping installed at: ${fping}." + echo >&2 "Please set:" + echo >&2 + echo >&2 " fping=\"/usr/local/bin/fping\"" + echo >&2 + echo >&2 "at /etc/netdata/fping.conf" + echo >&2 + fi + exit 0 +fi + +# ----------------------------------------------------------------------------- + +PROGRAM_NAME="$(basename "${0}")" + +logdate() { + date "+%Y-%m-%d %H:%M:%S" +} + +log() { + local status="${1}" + shift + + echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" + +} + +warning() { + log WARNING "${@}" +} + +error() { + log ERROR "${@}" +} + +info() { + log INFO "${@}" +} + +fatal() { + log FATAL "${@}" + echo "DISABLE" + exit 1 +} + +debug=0 +debug() { + [ $debug -eq 1 ] && log DEBUG "${@}" +} + +# ----------------------------------------------------------------------------- + +# store in ${plugin} the name we run under +# this allows us to copy/link fping.plugin under a different name +# to have multiple fping plugins running with different settings +plugin="${PROGRAM_NAME/.plugin/}" + + +# ----------------------------------------------------------------------------- + +# the frequency to send info to netdata +# passed by netdata as the first parameter +update_every="${1-1}" + +# the netdata configuration directory +# passed by netdata as an environment variable +[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@" +[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@" + +# ----------------------------------------------------------------------------- +# configuration options +# can be overwritten at /etc/netdata/fping.conf + +# the fping binary to use +# we need one that can output netdata friendly info (supporting: -N) +# if you have multiple versions, put here the full filename of the right one +fping="$( which fping 2>/dev/null || command -v fping 2>/dev/null )" + +# a space separated list of hosts to fping +# we suggest to put names here and the IPs of these names in /etc/hosts +hosts="" + +# the time in milliseconds (1 sec = 1000 ms) +# to ping the hosts - by default 5 pings per host per iteration +ping_every="$((update_every * 1000 / 5))" + +# fping options +fping_opts="-R -b 56 -i 1 -r 0 -t 5000" + +# ----------------------------------------------------------------------------- +# load the configuration files + +for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/${plugin}.conf" "${NETDATA_USER_CONFIG_DIR}/${plugin}.conf" +do + if [ -f "${CONFIG}" ] + then + info "Loading config file '${CONFIG}'..." + source "${CONFIG}" + [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'." + else + warning "Cannot find file '${CONFIG}'." + fi +done + +if [ -z "${hosts}" ] +then + fatal "no hosts configured - nothing to do." +fi + +if [ -z "${fping}" ] +then + fatal "fping command is not found. Please set its full path in '${NETDATA_USER_CONFIG_DIR}/${plugin}.conf'" +fi + +if [ ! -x "${fping}" ] +then + fatal "fping command '${fping}' is not executable - cannot proceed." +fi + +if [ ${ping_every} -lt 20 ] + then + warning "ping every was set to ${ping_every} but 20 is the minimum for non-root users. Setting it to 20 ms." + ping_every=20 +fi + +# the fping options we will use +options=( -N -l -Q ${update_every} -p ${ping_every} ${fping_opts} ${hosts} ) + +# execute fping +info "starting fping: ${fping} ${options[*]}" +exec "${fping}" "${options[@]}" + +# if we cannot execute fping, stop +fatal "command '${fping} ${options[*]}' failed to be executed (returned code $?)." diff --git a/collectors/freebsd.plugin/Makefile.am b/collectors/freebsd.plugin/Makefile.am new file mode 100644 index 000000000..e80ec702d --- /dev/null +++ b/collectors/freebsd.plugin/Makefile.am @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects + +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in diff --git a/collectors/freebsd.plugin/Makefile.in b/collectors/freebsd.plugin/Makefile.in new file mode 100644 index 000000000..c88b3d755 --- /dev/null +++ b/collectors/freebsd.plugin/Makefile.in @@ -0,0 +1,457 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = collectors/freebsd.plugin +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/freebsd.plugin/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu collectors/freebsd.plugin/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/collectors/freebsd.plugin/freebsd_devstat.c b/collectors/freebsd.plugin/freebsd_devstat.c new file mode 100644 index 000000000..10279aabc --- /dev/null +++ b/collectors/freebsd.plugin/freebsd_devstat.c @@ -0,0 +1,780 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_freebsd.h" + +#include + +struct disk { + char *name; + uint32_t hash; + size_t len; + + // flags + int configured; + int enabled; + int updated; + + int do_io; + int do_ops; + int do_qops; + int do_util; + int do_iotime; + int do_await; + int do_avagsz; + int do_svctm; + + + // data for differential charts + + struct prev_dstat { + collected_number bytes_read; + collected_number bytes_write; + collected_number bytes_free; + collected_number operations_read; + collected_number operations_write; + collected_number operations_other; + collected_number operations_free; + collected_number duration_read_ms; + collected_number duration_write_ms; + collected_number duration_other_ms; + collected_number duration_free_ms; + collected_number busy_time_ms; + } prev_dstat; + + // charts and dimensions + + RRDSET *st_io; + RRDDIM *rd_io_in; + RRDDIM *rd_io_out; + RRDDIM *rd_io_free; + + RRDSET *st_ops; + RRDDIM *rd_ops_in; + RRDDIM *rd_ops_out; + RRDDIM *rd_ops_other; + RRDDIM *rd_ops_free; + + RRDSET *st_qops; + RRDDIM *rd_qops; + + RRDSET *st_util; + RRDDIM *rd_util; + + RRDSET *st_iotime; + RRDDIM *rd_iotime_in; + RRDDIM *rd_iotime_out; + RRDDIM *rd_iotime_other; + RRDDIM *rd_iotime_free; + + RRDSET *st_await; + RRDDIM *rd_await_in; + RRDDIM *rd_await_out; + RRDDIM *rd_await_other; + RRDDIM *rd_await_free; + + RRDSET *st_avagsz; + RRDDIM *rd_avagsz_in; + RRDDIM *rd_avagsz_out; + RRDDIM *rd_avagsz_free; + + RRDSET *st_svctm; + RRDDIM *rd_svctm; + + struct disk *next; +}; + +static struct disk *disks_root = NULL, *disks_last_used = NULL; + +static size_t disks_added = 0, disks_found = 0; + +static void disk_free(struct disk *dm) { + if (likely(dm->st_io)) + rrdset_is_obsolete(dm->st_io); + if (likely(dm->st_ops)) + rrdset_is_obsolete(dm->st_ops); + if (likely(dm->st_qops)) + rrdset_is_obsolete(dm->st_qops); + if (likely(dm->st_util)) + rrdset_is_obsolete(dm->st_util); + if (likely(dm->st_iotime)) + rrdset_is_obsolete(dm->st_iotime); + if (likely(dm->st_await)) + rrdset_is_obsolete(dm->st_await); + if (likely(dm->st_avagsz)) + rrdset_is_obsolete(dm->st_avagsz); + if (likely(dm->st_svctm)) + rrdset_is_obsolete(dm->st_svctm); + + disks_added--; + freez(dm->name); + freez(dm); +} + +static void disks_cleanup() { + if (likely(disks_found == disks_added)) return; + + struct disk *dm = disks_root, *last = NULL; + while(dm) { + if (unlikely(!dm->updated)) { + // info("Removing disk '%s', linked after '%s'", dm->name, last?last->name:"ROOT"); + + if (disks_last_used == dm) + disks_last_used = last; + + struct disk *t = dm; + + if (dm == disks_root || !last) + disks_root = dm = dm->next; + + else + last->next = dm = dm->next; + + t->next = NULL; + disk_free(t); + } + else { + last = dm; + dm->updated = 0; + dm = dm->next; + } + } +} + +static struct disk *get_disk(const char *name) { + struct disk *dm; + + uint32_t hash = simple_hash(name); + + // search it, from the last position to the end + for(dm = disks_last_used ; dm ; dm = dm->next) { + if (unlikely(hash == dm->hash && !strcmp(name, dm->name))) { + disks_last_used = dm->next; + return dm; + } + } + + // search it from the beginning to the last position we used + for(dm = disks_root ; dm != disks_last_used ; dm = dm->next) { + if (unlikely(hash == dm->hash && !strcmp(name, dm->name))) { + disks_last_used = dm->next; + return dm; + } + } + + // create a new one + dm = callocz(1, sizeof(struct disk)); + dm->name = strdupz(name); + dm->hash = simple_hash(dm->name); + dm->len = strlen(dm->name); + disks_added++; + + // link it to the end + if (disks_root) { + struct disk *e; + for(e = disks_root; e->next ; e = e->next) ; + e->next = dm; + } + else + disks_root = dm; + + return dm; +} + +// -------------------------------------------------------------------------------------------------------------------- +// kern.devstat + +int do_kern_devstat(int update_every, usec_t dt) { + +#define DELAULT_EXLUDED_DISKS "" +#define CONFIG_SECTION_KERN_DEVSTAT "plugin:freebsd:kern.devstat" +#define BINTIME_SCALE 5.42101086242752217003726400434970855712890625e-17 // this is 1000/2^64 + + static int enable_new_disks = -1; + static int enable_pass_devices = -1, do_system_io = -1, do_io = -1, do_ops = -1, do_qops = -1, do_util = -1, + do_iotime = -1, do_await = -1, do_avagsz = -1, do_svctm = -1; + static SIMPLE_PATTERN *excluded_disks = NULL; + + if (unlikely(enable_new_disks == -1)) { + enable_new_disks = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, + "enable new disks detected at runtime", CONFIG_BOOLEAN_AUTO); + + enable_pass_devices = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, + "performance metrics for pass devices", CONFIG_BOOLEAN_AUTO); + + do_system_io = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "total bandwidth for all disks", + CONFIG_BOOLEAN_YES); + + do_io = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "bandwidth for all disks", + CONFIG_BOOLEAN_AUTO); + do_ops = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "operations for all disks", + CONFIG_BOOLEAN_AUTO); + do_qops = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "queued operations for all disks", + CONFIG_BOOLEAN_AUTO); + do_util = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "utilization percentage for all disks", + CONFIG_BOOLEAN_AUTO); + do_iotime = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "i/o time for all disks", + CONFIG_BOOLEAN_AUTO); + do_await = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "average completed i/o time for all disks", + CONFIG_BOOLEAN_AUTO); + do_avagsz = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "average completed i/o bandwidth for all disks", + CONFIG_BOOLEAN_AUTO); + do_svctm = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "average service time for all disks", + CONFIG_BOOLEAN_AUTO); + + excluded_disks = simple_pattern_create( + config_get(CONFIG_SECTION_KERN_DEVSTAT, "disable by default disks matching", DELAULT_EXLUDED_DISKS) + , NULL + , SIMPLE_PATTERN_EXACT + ); + } + + if (likely(do_system_io || do_io || do_ops || do_qops || do_util || do_iotime || do_await || do_avagsz || do_svctm)) { + static int mib_numdevs[3] = {0, 0, 0}; + int numdevs; + int common_error = 0; + + if (unlikely(GETSYSCTL_SIMPLE("kern.devstat.numdevs", mib_numdevs, numdevs))) { + common_error = 1; + } else { + static int mib_devstat[3] = {0, 0, 0}; + static void *devstat_data = NULL; + static int old_numdevs = 0; + + if (unlikely(numdevs != old_numdevs)) { + devstat_data = reallocz(devstat_data, sizeof(long) + sizeof(struct devstat) * + numdevs); // there is generation number before devstat structures + old_numdevs = numdevs; + } + if (unlikely(GETSYSCTL_WSIZE("kern.devstat.all", mib_devstat, devstat_data, + sizeof(long) + sizeof(struct devstat) * numdevs))) { + common_error = 1; + } else { + struct devstat *dstat; + int i; + collected_number total_disk_kbytes_read = 0; + collected_number total_disk_kbytes_write = 0; + + disks_found = 0; + + dstat = devstat_data + sizeof(long); // skip generation number + + for (i = 0; i < numdevs; i++) { + if (likely(do_system_io)) { + if (((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_DIRECT) || + ((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_STORARRAY)) { + total_disk_kbytes_read += dstat[i].bytes[DEVSTAT_READ] / KILO_FACTOR; + total_disk_kbytes_write += dstat[i].bytes[DEVSTAT_WRITE] / KILO_FACTOR; + } + } + + if (unlikely(!enable_pass_devices)) + if ((dstat[i].device_type & DEVSTAT_TYPE_PASS) == DEVSTAT_TYPE_PASS) + continue; + + if (((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_DIRECT) || + ((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_STORARRAY)) { + char disk[DEVSTAT_NAME_LEN + MAX_INT_DIGITS + 1]; + struct cur_dstat { + collected_number duration_read_ms; + collected_number duration_write_ms; + collected_number duration_other_ms; + collected_number duration_free_ms; + collected_number busy_time_ms; + } cur_dstat; + + sprintf(disk, "%s%d", dstat[i].device_name, dstat[i].unit_number); + + struct disk *dm = get_disk(disk); + dm->updated = 1; + disks_found++; + + if(unlikely(!dm->configured)) { + char var_name[4096 + 1]; + + // this is the first time we see this disk + + // remember we configured it + dm->configured = 1; + + dm->enabled = enable_new_disks; + + if (likely(dm->enabled)) + dm->enabled = !simple_pattern_matches(excluded_disks, disk); + + snprintfz(var_name, 4096, "%s:%s", CONFIG_SECTION_KERN_DEVSTAT, disk); + dm->enabled = config_get_boolean_ondemand(var_name, "enabled", dm->enabled); + + dm->do_io = config_get_boolean_ondemand(var_name, "bandwidth", do_io); + dm->do_ops = config_get_boolean_ondemand(var_name, "operations", do_ops); + dm->do_qops = config_get_boolean_ondemand(var_name, "queued operations", do_qops); + dm->do_util = config_get_boolean_ondemand(var_name, "utilization percentage", do_util); + dm->do_iotime = config_get_boolean_ondemand(var_name, "i/o time", do_iotime); + dm->do_await = config_get_boolean_ondemand(var_name, "average completed i/o time", + do_await); + dm->do_avagsz = config_get_boolean_ondemand(var_name, "average completed i/o bandwidth", + do_avagsz); + dm->do_svctm = config_get_boolean_ondemand(var_name, "average service time", do_svctm); + + // initialise data for differential charts + + dm->prev_dstat.bytes_read = dstat[i].bytes[DEVSTAT_READ]; + dm->prev_dstat.bytes_write = dstat[i].bytes[DEVSTAT_WRITE]; + dm->prev_dstat.bytes_free = dstat[i].bytes[DEVSTAT_FREE]; + dm->prev_dstat.operations_read = dstat[i].operations[DEVSTAT_READ]; + dm->prev_dstat.operations_write = dstat[i].operations[DEVSTAT_WRITE]; + dm->prev_dstat.operations_other = dstat[i].operations[DEVSTAT_NO_DATA]; + dm->prev_dstat.operations_free = dstat[i].operations[DEVSTAT_FREE]; + dm->prev_dstat.duration_read_ms = dstat[i].duration[DEVSTAT_READ].sec * 1000 + + dstat[i].duration[DEVSTAT_READ].frac * BINTIME_SCALE; + dm->prev_dstat.duration_write_ms = dstat[i].duration[DEVSTAT_WRITE].sec * 1000 + + dstat[i].duration[DEVSTAT_WRITE].frac * BINTIME_SCALE; + dm->prev_dstat.duration_other_ms = dstat[i].duration[DEVSTAT_NO_DATA].sec * 1000 + + dstat[i].duration[DEVSTAT_NO_DATA].frac * BINTIME_SCALE; + dm->prev_dstat.duration_free_ms = dstat[i].duration[DEVSTAT_FREE].sec * 1000 + + dstat[i].duration[DEVSTAT_FREE].frac * BINTIME_SCALE; + dm->prev_dstat.busy_time_ms = dstat[i].busy_time.sec * 1000 + + dstat[i].busy_time.frac * BINTIME_SCALE; + } + + cur_dstat.duration_read_ms = dstat[i].duration[DEVSTAT_READ].sec * 1000 + + dstat[i].duration[DEVSTAT_READ].frac * BINTIME_SCALE; + cur_dstat.duration_write_ms = dstat[i].duration[DEVSTAT_WRITE].sec * 1000 + + dstat[i].duration[DEVSTAT_WRITE].frac * BINTIME_SCALE; + cur_dstat.duration_other_ms = dstat[i].duration[DEVSTAT_NO_DATA].sec * 1000 + + dstat[i].duration[DEVSTAT_NO_DATA].frac * BINTIME_SCALE; + cur_dstat.duration_free_ms = dstat[i].duration[DEVSTAT_FREE].sec * 1000 + + dstat[i].duration[DEVSTAT_FREE].frac * BINTIME_SCALE; + + cur_dstat.busy_time_ms = dstat[i].busy_time.sec * 1000 + dstat[i].busy_time.frac * BINTIME_SCALE; + + // -------------------------------------------------------------------- + + if(dm->do_io == CONFIG_BOOLEAN_YES || (dm->do_io == CONFIG_BOOLEAN_AUTO && + (dstat[i].bytes[DEVSTAT_READ] || + dstat[i].bytes[DEVSTAT_WRITE] || + dstat[i].bytes[DEVSTAT_FREE]))) { + if (unlikely(!dm->st_io)) { + dm->st_io = rrdset_create_localhost("disk", + disk, + NULL, + disk, + "disk.io", + "Disk I/O Bandwidth", + "kilobytes/s", + "freebsd.plugin", + "devstat", + NETDATA_CHART_PRIO_DISK_IO, + update_every, + RRDSET_TYPE_AREA + ); + + dm->rd_io_in = rrddim_add(dm->st_io, "reads", NULL, 1, KILO_FACTOR, + RRD_ALGORITHM_INCREMENTAL); + dm->rd_io_out = rrddim_add(dm->st_io, "writes", NULL, -1, KILO_FACTOR, + RRD_ALGORITHM_INCREMENTAL); + dm->rd_io_free = rrddim_add(dm->st_io, "frees", NULL, -1, KILO_FACTOR, + RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(dm->st_io); + + rrddim_set_by_pointer(dm->st_io, dm->rd_io_in, dstat[i].bytes[DEVSTAT_READ]); + rrddim_set_by_pointer(dm->st_io, dm->rd_io_out, dstat[i].bytes[DEVSTAT_WRITE]); + rrddim_set_by_pointer(dm->st_io, dm->rd_io_free, dstat[i].bytes[DEVSTAT_FREE]); + rrdset_done(dm->st_io); + } + + // -------------------------------------------------------------------- + + if(dm->do_ops == CONFIG_BOOLEAN_YES || (dm->do_ops == CONFIG_BOOLEAN_AUTO && + (dstat[i].operations[DEVSTAT_READ] || + dstat[i].operations[DEVSTAT_WRITE] || + dstat[i].operations[DEVSTAT_NO_DATA] || + dstat[i].operations[DEVSTAT_FREE]))) { + if (unlikely(!dm->st_ops)) { + dm->st_ops = rrdset_create_localhost("disk_ops", + disk, + NULL, + disk, + "disk.ops", + "Disk Completed I/O Operations", + "operations/s", + "freebsd.plugin", + "devstat", + NETDATA_CHART_PRIO_DISK_OPS, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(dm->st_ops, RRDSET_FLAG_DETAIL); + + dm->rd_ops_in = rrddim_add(dm->st_ops, "reads", NULL, 1, 1, + RRD_ALGORITHM_INCREMENTAL); + dm->rd_ops_out = rrddim_add(dm->st_ops, "writes", NULL, -1, 1, + RRD_ALGORITHM_INCREMENTAL); + dm->rd_ops_other = rrddim_add(dm->st_ops, "other", NULL, 1, 1, + RRD_ALGORITHM_INCREMENTAL); + dm->rd_ops_free = rrddim_add(dm->st_ops, "frees", NULL, -1, 1, + RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(dm->st_ops); + + rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_in, dstat[i].operations[DEVSTAT_READ]); + rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_out, dstat[i].operations[DEVSTAT_WRITE]); + rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_other, dstat[i].operations[DEVSTAT_NO_DATA]); + rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_free, dstat[i].operations[DEVSTAT_FREE]); + rrdset_done(dm->st_ops); + } + + // -------------------------------------------------------------------- + + if(dm->do_qops == CONFIG_BOOLEAN_YES || (dm->do_qops == CONFIG_BOOLEAN_AUTO && + (dstat[i].start_count || dstat[i].end_count))) { + if (unlikely(!dm->st_qops)) { + dm->st_qops = rrdset_create_localhost("disk_qops", + disk, + NULL, + disk, + "disk.qops", + "Disk Current I/O Operations", + "operations", + "freebsd.plugin", + "devstat", + NETDATA_CHART_PRIO_DISK_QOPS, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(dm->st_qops, RRDSET_FLAG_DETAIL); + + dm->rd_qops = rrddim_add(dm->st_qops, "operations", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } else + rrdset_next(dm->st_qops); + + rrddim_set_by_pointer(dm->st_qops, dm->rd_qops, dstat[i].start_count - dstat[i].end_count); + rrdset_done(dm->st_qops); + } + + // -------------------------------------------------------------------- + + if(dm->do_util == CONFIG_BOOLEAN_YES || (dm->do_util == CONFIG_BOOLEAN_AUTO && + cur_dstat.busy_time_ms)) { + if (unlikely(!dm->st_util)) { + dm->st_util = rrdset_create_localhost("disk_util", + disk, + NULL, + disk, + "disk.util", + "Disk Utilization Time", + "% of time working", + "freebsd.plugin", + "devstat", + NETDATA_CHART_PRIO_DISK_UTIL, + update_every, + RRDSET_TYPE_AREA + ); + + rrdset_flag_set(dm->st_util, RRDSET_FLAG_DETAIL); + + dm->rd_util = rrddim_add(dm->st_util, "utilization", NULL, 1, 10, + RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(dm->st_util); + + rrddim_set_by_pointer(dm->st_util, dm->rd_util, cur_dstat.busy_time_ms); + rrdset_done(dm->st_util); + } + + // -------------------------------------------------------------------- + + if(dm->do_iotime == CONFIG_BOOLEAN_YES || (dm->do_iotime == CONFIG_BOOLEAN_AUTO && + (cur_dstat.duration_read_ms || + cur_dstat.duration_write_ms || + cur_dstat.duration_other_ms || + cur_dstat.duration_free_ms))) { + if (unlikely(!dm->st_iotime)) { + dm->st_iotime = rrdset_create_localhost("disk_iotime", + disk, + NULL, + disk, + "disk.iotime", + "Disk Total I/O Time", + "milliseconds/s", + "freebsd.plugin", + "devstat", + NETDATA_CHART_PRIO_DISK_IOTIME, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(dm->st_iotime, RRDSET_FLAG_DETAIL); + + dm->rd_iotime_in = rrddim_add(dm->st_iotime, "reads", NULL, 1, 1, + RRD_ALGORITHM_INCREMENTAL); + dm->rd_iotime_out = rrddim_add(dm->st_iotime, "writes", NULL, -1, 1, + RRD_ALGORITHM_INCREMENTAL); + dm->rd_iotime_other = rrddim_add(dm->st_iotime, "other", NULL, 1, 1, + RRD_ALGORITHM_INCREMENTAL); + dm->rd_iotime_free = rrddim_add(dm->st_iotime, "frees", NULL, -1, 1, + RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(dm->st_iotime); + + rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_in, cur_dstat.duration_read_ms); + rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_out, cur_dstat.duration_write_ms); + rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_other, cur_dstat.duration_other_ms); + rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_free, cur_dstat.duration_free_ms); + rrdset_done(dm->st_iotime); + } + + // -------------------------------------------------------------------- + // calculate differential charts + // only if this is not the first time we run + + if (likely(dt)) { + + // -------------------------------------------------------------------- + + if(dm->do_await == CONFIG_BOOLEAN_YES || (dm->do_await == CONFIG_BOOLEAN_AUTO && + (dstat[i].operations[DEVSTAT_READ] || + dstat[i].operations[DEVSTAT_WRITE] || + dstat[i].operations[DEVSTAT_NO_DATA] || + dstat[i].operations[DEVSTAT_FREE]))) { + if (unlikely(!dm->st_await)) { + dm->st_await = rrdset_create_localhost("disk_await", + disk, + NULL, + disk, + "disk.await", + "Average Completed I/O Operation Time", + "ms per operation", + "freebsd.plugin", + "devstat", + NETDATA_CHART_PRIO_DISK_AWAIT, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(dm->st_await, RRDSET_FLAG_DETAIL); + + dm->rd_await_in = rrddim_add(dm->st_await, "reads", NULL, 1, 1, + RRD_ALGORITHM_ABSOLUTE); + dm->rd_await_out = rrddim_add(dm->st_await, "writes", NULL, -1, 1, + RRD_ALGORITHM_ABSOLUTE); + dm->rd_await_other = rrddim_add(dm->st_await, "other", NULL, 1, 1, + RRD_ALGORITHM_ABSOLUTE); + dm->rd_await_free = rrddim_add(dm->st_await, "frees", NULL, -1, 1, + RRD_ALGORITHM_ABSOLUTE); + } else + rrdset_next(dm->st_await); + + rrddim_set_by_pointer(dm->st_await, dm->rd_await_in, + (dstat[i].operations[DEVSTAT_READ] - + dm->prev_dstat.operations_read) ? + (cur_dstat.duration_read_ms - dm->prev_dstat.duration_read_ms) / + (dstat[i].operations[DEVSTAT_READ] - + dm->prev_dstat.operations_read) : + 0); + rrddim_set_by_pointer(dm->st_await, dm->rd_await_out, + (dstat[i].operations[DEVSTAT_WRITE] - + dm->prev_dstat.operations_write) ? + (cur_dstat.duration_write_ms - dm->prev_dstat.duration_write_ms) / + (dstat[i].operations[DEVSTAT_WRITE] - + dm->prev_dstat.operations_write) : + 0); + rrddim_set_by_pointer(dm->st_await, dm->rd_await_other, + (dstat[i].operations[DEVSTAT_NO_DATA] - + dm->prev_dstat.operations_other) ? + (cur_dstat.duration_other_ms - dm->prev_dstat.duration_other_ms) / + (dstat[i].operations[DEVSTAT_NO_DATA] - + dm->prev_dstat.operations_other) : + 0); + rrddim_set_by_pointer(dm->st_await, dm->rd_await_free, + (dstat[i].operations[DEVSTAT_FREE] - + dm->prev_dstat.operations_free) ? + (cur_dstat.duration_free_ms - dm->prev_dstat.duration_free_ms) / + (dstat[i].operations[DEVSTAT_FREE] - + dm->prev_dstat.operations_free) : + 0); + rrdset_done(dm->st_await); + } + + // -------------------------------------------------------------------- + + if(dm->do_avagsz == CONFIG_BOOLEAN_YES || (dm->do_avagsz == CONFIG_BOOLEAN_AUTO && + (dstat[i].operations[DEVSTAT_READ] || + dstat[i].operations[DEVSTAT_WRITE] || + dstat[i].operations[DEVSTAT_FREE]))) { + if (unlikely(!dm->st_avagsz)) { + dm->st_avagsz = rrdset_create_localhost("disk_avgsz", + disk, + NULL, + disk, + "disk.avgsz", + "Average Completed I/O Operation Bandwidth", + "kilobytes per operation", + "freebsd.plugin", + "devstat", + NETDATA_CHART_PRIO_DISK_AVGSZ, + update_every, + RRDSET_TYPE_AREA + ); + + rrdset_flag_set(dm->st_avagsz, RRDSET_FLAG_DETAIL); + + dm->rd_avagsz_in = rrddim_add(dm->st_avagsz, "reads", NULL, 1, KILO_FACTOR, + RRD_ALGORITHM_ABSOLUTE); + dm->rd_avagsz_out = rrddim_add(dm->st_avagsz, "writes", NULL, -1, KILO_FACTOR, + RRD_ALGORITHM_ABSOLUTE); + dm->rd_avagsz_free = rrddim_add(dm->st_avagsz, "frees", NULL, -1, KILO_FACTOR, + RRD_ALGORITHM_ABSOLUTE); + } else + rrdset_next(dm->st_avagsz); + + rrddim_set_by_pointer(dm->st_avagsz, dm->rd_avagsz_in, + (dstat[i].operations[DEVSTAT_READ] - + dm->prev_dstat.operations_read) ? + (dstat[i].bytes[DEVSTAT_READ] - dm->prev_dstat.bytes_read) / + (dstat[i].operations[DEVSTAT_READ] - + dm->prev_dstat.operations_read) : + 0); + rrddim_set_by_pointer(dm->st_avagsz, dm->rd_avagsz_out, + (dstat[i].operations[DEVSTAT_WRITE] - + dm->prev_dstat.operations_write) ? + (dstat[i].bytes[DEVSTAT_WRITE] - dm->prev_dstat.bytes_write) / + (dstat[i].operations[DEVSTAT_WRITE] - + dm->prev_dstat.operations_write) : + 0); + rrddim_set_by_pointer(dm->st_avagsz, dm->rd_avagsz_free, + (dstat[i].operations[DEVSTAT_FREE] - + dm->prev_dstat.operations_free) ? + (dstat[i].bytes[DEVSTAT_FREE] - dm->prev_dstat.bytes_free) / + (dstat[i].operations[DEVSTAT_FREE] - + dm->prev_dstat.operations_free) : + 0); + rrdset_done(dm->st_avagsz); + } + + // -------------------------------------------------------------------- + + if(dm->do_svctm == CONFIG_BOOLEAN_YES || (dm->do_svctm == CONFIG_BOOLEAN_AUTO && + (dstat[i].operations[DEVSTAT_READ] || + dstat[i].operations[DEVSTAT_WRITE] || + dstat[i].operations[DEVSTAT_NO_DATA] || + dstat[i].operations[DEVSTAT_FREE]))) { + if (unlikely(!dm->st_svctm)) { + dm->st_svctm = rrdset_create_localhost("disk_svctm", + disk, + NULL, + disk, + "disk.svctm", + "Average Service Time", + "ms per operation", + "freebsd.plugin", + "devstat", + NETDATA_CHART_PRIO_DISK_SVCTM, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(dm->st_svctm, RRDSET_FLAG_DETAIL); + + dm->rd_svctm = rrddim_add(dm->st_svctm, "svctm", NULL, 1, 1, + RRD_ALGORITHM_ABSOLUTE); + } else + rrdset_next(dm->st_svctm); + + rrddim_set_by_pointer(dm->st_svctm, dm->rd_svctm, + ((dstat[i].operations[DEVSTAT_READ] - dm->prev_dstat.operations_read) + + (dstat[i].operations[DEVSTAT_WRITE] - dm->prev_dstat.operations_write) + + (dstat[i].operations[DEVSTAT_NO_DATA] - dm->prev_dstat.operations_other) + + (dstat[i].operations[DEVSTAT_FREE] - dm->prev_dstat.operations_free)) ? + (cur_dstat.busy_time_ms - dm->prev_dstat.busy_time_ms) / + ((dstat[i].operations[DEVSTAT_READ] - dm->prev_dstat.operations_read) + + (dstat[i].operations[DEVSTAT_WRITE] - dm->prev_dstat.operations_write) + + (dstat[i].operations[DEVSTAT_NO_DATA] - dm->prev_dstat.operations_other) + + (dstat[i].operations[DEVSTAT_FREE] - dm->prev_dstat.operations_free)) : + 0); + rrdset_done(dm->st_svctm); + } + + // -------------------------------------------------------------------- + + dm->prev_dstat.bytes_read = dstat[i].bytes[DEVSTAT_READ]; + dm->prev_dstat.bytes_write = dstat[i].bytes[DEVSTAT_WRITE]; + dm->prev_dstat.bytes_free = dstat[i].bytes[DEVSTAT_FREE]; + dm->prev_dstat.operations_read = dstat[i].operations[DEVSTAT_READ]; + dm->prev_dstat.operations_write = dstat[i].operations[DEVSTAT_WRITE]; + dm->prev_dstat.operations_other = dstat[i].operations[DEVSTAT_NO_DATA]; + dm->prev_dstat.operations_free = dstat[i].operations[DEVSTAT_FREE]; + dm->prev_dstat.duration_read_ms = cur_dstat.duration_read_ms; + dm->prev_dstat.duration_write_ms = cur_dstat.duration_write_ms; + dm->prev_dstat.duration_other_ms = cur_dstat.duration_other_ms; + dm->prev_dstat.duration_free_ms = cur_dstat.duration_free_ms; + dm->prev_dstat.busy_time_ms = cur_dstat.busy_time_ms; + } + } + } + + // -------------------------------------------------------------------- + + if (likely(do_system_io)) { + static RRDSET *st = NULL; + static RRDDIM *rd_in = NULL, *rd_out = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost("system", + "io", + NULL, + "disk", + NULL, + "Disk I/O", + "kilobytes/s", + "freebsd.plugin", + "devstat", + NETDATA_CHART_PRIO_SYSTEM_IO, + update_every, + RRDSET_TYPE_AREA + ); + + rd_in = rrddim_add(st, "in", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out = rrddim_add(st, "out", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_in, total_disk_kbytes_read); + rrddim_set_by_pointer(st, rd_out, total_disk_kbytes_write); + rrdset_done(st); + } + } + } + if (unlikely(common_error)) { + do_system_io = 0; + error("DISABLED: system.io chart"); + do_io = 0; + error("DISABLED: disk.* charts"); + do_ops = 0; + error("DISABLED: disk_ops.* charts"); + do_qops = 0; + error("DISABLED: disk_qops.* charts"); + do_util = 0; + error("DISABLED: disk_util.* charts"); + do_iotime = 0; + error("DISABLED: disk_iotime.* charts"); + do_await = 0; + error("DISABLED: disk_await.* charts"); + do_avagsz = 0; + error("DISABLED: disk_avgsz.* charts"); + do_svctm = 0; + error("DISABLED: disk_svctm.* charts"); + error("DISABLED: kern.devstat module"); + return 1; + } + } else { + error("DISABLED: kern.devstat module"); + return 1; + } + + disks_cleanup(); + + return 0; +} diff --git a/collectors/freebsd.plugin/freebsd_getifaddrs.c b/collectors/freebsd.plugin/freebsd_getifaddrs.c new file mode 100644 index 000000000..e15845857 --- /dev/null +++ b/collectors/freebsd.plugin/freebsd_getifaddrs.c @@ -0,0 +1,618 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_freebsd.h" + +#include + +struct cgroup_network_interface { + char *name; + uint32_t hash; + size_t len; + + // flags + int configured; + int enabled; + int updated; + + int do_bandwidth; + int do_packets; + int do_errors; + int do_drops; + int do_events; + + // charts and dimensions + + RRDSET *st_bandwidth; + RRDDIM *rd_bandwidth_in; + RRDDIM *rd_bandwidth_out; + + RRDSET *st_packets; + RRDDIM *rd_packets_in; + RRDDIM *rd_packets_out; + RRDDIM *rd_packets_m_in; + RRDDIM *rd_packets_m_out; + + RRDSET *st_errors; + RRDDIM *rd_errors_in; + RRDDIM *rd_errors_out; + + RRDSET *st_drops; + RRDDIM *rd_drops_in; + RRDDIM *rd_drops_out; + + RRDSET *st_events; + RRDDIM *rd_events_coll; + + struct cgroup_network_interface *next; +}; + +static struct cgroup_network_interface *network_interfaces_root = NULL, *network_interfaces_last_used = NULL; + +static size_t network_interfaces_added = 0, network_interfaces_found = 0; + +static void network_interface_free(struct cgroup_network_interface *ifm) { + if (likely(ifm->st_bandwidth)) + rrdset_is_obsolete(ifm->st_bandwidth); + if (likely(ifm->st_packets)) + rrdset_is_obsolete(ifm->st_packets); + if (likely(ifm->st_errors)) + rrdset_is_obsolete(ifm->st_errors); + if (likely(ifm->st_drops)) + rrdset_is_obsolete(ifm->st_drops); + if (likely(ifm->st_events)) + rrdset_is_obsolete(ifm->st_events); + + network_interfaces_added--; + freez(ifm->name); + freez(ifm); +} + +static void network_interfaces_cleanup() { + if (likely(network_interfaces_found == network_interfaces_added)) return; + + struct cgroup_network_interface *ifm = network_interfaces_root, *last = NULL; + while(ifm) { + if (unlikely(!ifm->updated)) { + // info("Removing network interface '%s', linked after '%s'", ifm->name, last?last->name:"ROOT"); + + if (network_interfaces_last_used == ifm) + network_interfaces_last_used = last; + + struct cgroup_network_interface *t = ifm; + + if (ifm == network_interfaces_root || !last) + network_interfaces_root = ifm = ifm->next; + + else + last->next = ifm = ifm->next; + + t->next = NULL; + network_interface_free(t); + } + else { + last = ifm; + ifm->updated = 0; + ifm = ifm->next; + } + } +} + +static struct cgroup_network_interface *get_network_interface(const char *name) { + struct cgroup_network_interface *ifm; + + uint32_t hash = simple_hash(name); + + // search it, from the last position to the end + for(ifm = network_interfaces_last_used ; ifm ; ifm = ifm->next) { + if (unlikely(hash == ifm->hash && !strcmp(name, ifm->name))) { + network_interfaces_last_used = ifm->next; + return ifm; + } + } + + // search it from the beginning to the last position we used + for(ifm = network_interfaces_root ; ifm != network_interfaces_last_used ; ifm = ifm->next) { + if (unlikely(hash == ifm->hash && !strcmp(name, ifm->name))) { + network_interfaces_last_used = ifm->next; + return ifm; + } + } + + // create a new one + ifm = callocz(1, sizeof(struct cgroup_network_interface)); + ifm->name = strdupz(name); + ifm->hash = simple_hash(ifm->name); + ifm->len = strlen(ifm->name); + network_interfaces_added++; + + // link it to the end + if (network_interfaces_root) { + struct cgroup_network_interface *e; + for(e = network_interfaces_root; e->next ; e = e->next) ; + e->next = ifm; + } + else + network_interfaces_root = ifm; + + return ifm; +} + +// -------------------------------------------------------------------------------------------------------------------- +// getifaddrs + +int do_getifaddrs(int update_every, usec_t dt) { + (void)dt; + +#define DEFAULT_EXLUDED_INTERFACES "lo*" +#define DEFAULT_PHYSICAL_INTERFACES "igb* ix* cxl* em* ixl* ixlv* bge* ixgbe*" +#define CONFIG_SECTION_GETIFADDRS "plugin:freebsd:getifaddrs" + + static int enable_new_interfaces = -1; + static int do_bandwidth_ipv4 = -1, do_bandwidth_ipv6 = -1, do_bandwidth = -1, do_packets = -1, do_bandwidth_net = -1, do_packets_net = -1, + do_errors = -1, do_drops = -1, do_events = -1; + static SIMPLE_PATTERN *excluded_interfaces = NULL, *physical_interfaces = NULL; + + if (unlikely(enable_new_interfaces == -1)) { + enable_new_interfaces = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, + "enable new interfaces detected at runtime", + CONFIG_BOOLEAN_AUTO); + + do_bandwidth_net = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total bandwidth for physical interfaces", + CONFIG_BOOLEAN_AUTO); + do_packets_net = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total packets for physical interfaces", + CONFIG_BOOLEAN_AUTO); + do_bandwidth_ipv4 = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total bandwidth for ipv4 interfaces", + CONFIG_BOOLEAN_AUTO); + do_bandwidth_ipv6 = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total bandwidth for ipv6 interfaces", + CONFIG_BOOLEAN_AUTO); + do_bandwidth = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "bandwidth for all interfaces", + CONFIG_BOOLEAN_AUTO); + do_packets = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "packets for all interfaces", + CONFIG_BOOLEAN_AUTO); + do_errors = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "errors for all interfaces", + CONFIG_BOOLEAN_AUTO); + do_drops = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "drops for all interfaces", + CONFIG_BOOLEAN_AUTO); + do_events = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "collisions for all interfaces", + CONFIG_BOOLEAN_AUTO); + + excluded_interfaces = simple_pattern_create( + config_get(CONFIG_SECTION_GETIFADDRS, "disable by default interfaces matching", DEFAULT_EXLUDED_INTERFACES) + , NULL + , SIMPLE_PATTERN_EXACT + ); + physical_interfaces = simple_pattern_create( + config_get(CONFIG_SECTION_GETIFADDRS, "set physical interfaces for system.net", DEFAULT_PHYSICAL_INTERFACES) + , NULL + , SIMPLE_PATTERN_EXACT + ); + } + + if (likely(do_bandwidth_ipv4 || do_bandwidth_ipv6 || do_bandwidth || do_packets || do_errors || do_bandwidth_net || do_packets_net || + do_drops || do_events)) { + struct ifaddrs *ifap; + + if (unlikely(getifaddrs(&ifap))) { + error("FREEBSD: getifaddrs() failed"); + do_bandwidth_net = 0; + error("DISABLED: system.net chart"); + do_packets_net = 0; + error("DISABLED: system.packets chart"); + do_bandwidth_ipv4 = 0; + error("DISABLED: system.ipv4 chart"); + do_bandwidth_ipv6 = 0; + error("DISABLED: system.ipv6 chart"); + do_bandwidth = 0; + error("DISABLED: net.* charts"); + do_packets = 0; + error("DISABLED: net_packets.* charts"); + do_errors = 0; + error("DISABLED: net_errors.* charts"); + do_drops = 0; + error("DISABLED: net_drops.* charts"); + do_events = 0; + error("DISABLED: net_events.* charts"); + error("DISABLED: getifaddrs module"); + return 1; + } else { +#define IFA_DATA(s) (((struct if_data *)ifa->ifa_data)->ifi_ ## s) + struct ifaddrs *ifa; + struct iftot { + u_long ift_ibytes; + u_long ift_obytes; + u_long ift_ipackets; + u_long ift_opackets; + u_long ift_imcasts; + u_long ift_omcasts; + } iftot = {0, 0, 0, 0, 0, 0}; + + // -------------------------------------------------------------------- + + if (likely(do_bandwidth_net)) { + + iftot.ift_ibytes = iftot.ift_obytes = 0; + for (ifa = ifap; ifa; ifa = ifa->ifa_next) { + if (ifa->ifa_addr->sa_family != AF_LINK) + continue; + if (!simple_pattern_matches(physical_interfaces, ifa->ifa_name)) + continue; + iftot.ift_ibytes += IFA_DATA(ibytes); + iftot.ift_obytes += IFA_DATA(obytes); + } + + static RRDSET *st = NULL; + static RRDDIM *rd_in = NULL, *rd_out = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost("system", + "net", + NULL, + "network", + NULL, + "Network Traffic", + "kilobits/s", + "freebsd.plugin", + "getifaddrs", + NETDATA_CHART_PRIO_SYSTEM_NET, + update_every, + RRDSET_TYPE_AREA + ); + + rd_in = rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + rd_out = rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_in, iftot.ift_ibytes); + rrddim_set_by_pointer(st, rd_out, iftot.ift_obytes); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (likely(do_packets_net)) { + + iftot.ift_ipackets = iftot.ift_opackets = iftot.ift_imcasts = iftot.ift_omcasts = 0; + for (ifa = ifap; ifa; ifa = ifa->ifa_next) { + if (ifa->ifa_addr->sa_family != AF_LINK) + continue; + if (!simple_pattern_matches(physical_interfaces, ifa->ifa_name)) + continue; + iftot.ift_ipackets += IFA_DATA(ipackets); + iftot.ift_opackets += IFA_DATA(opackets); + iftot.ift_imcasts += IFA_DATA(imcasts); + iftot.ift_omcasts += IFA_DATA(omcasts); + } + + static RRDSET *st = NULL; + static RRDDIM *rd_packets_in = NULL, *rd_packets_out = NULL, *rd_packets_m_in = NULL, *rd_packets_m_out = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost("system", + "packets", + NULL, + "network", + NULL, + "Network Packets", + "packets/s", + "freebsd.plugin", + "getifaddrs", + NETDATA_CHART_PRIO_SYSTEM_PACKETS, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_packets_in = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_packets_out = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_packets_m_in = rrddim_add(st, "multicast_received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_packets_m_out = rrddim_add(st, "multicast_sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_packets_in, iftot.ift_ipackets); + rrddim_set_by_pointer(st, rd_packets_out, iftot.ift_opackets); + rrddim_set_by_pointer(st, rd_packets_m_in, iftot.ift_imcasts); + rrddim_set_by_pointer(st, rd_packets_m_out, iftot.ift_omcasts); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (likely(do_bandwidth_ipv4)) { + iftot.ift_ibytes = iftot.ift_obytes = 0; + for (ifa = ifap; ifa; ifa = ifa->ifa_next) { + if (ifa->ifa_addr->sa_family != AF_INET) + continue; + iftot.ift_ibytes += IFA_DATA(ibytes); + iftot.ift_obytes += IFA_DATA(obytes); + } + + static RRDSET *st = NULL; + static RRDDIM *rd_in = NULL, *rd_out = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost("system", + "ipv4", + NULL, + "network", + NULL, + "IPv4 Bandwidth", + "kilobits/s", + "freebsd.plugin", + "getifaddrs", + NETDATA_CHART_PRIO_SYSTEM_IPV4, + update_every, + RRDSET_TYPE_AREA + ); + + rd_in = rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + rd_out = rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_in, iftot.ift_ibytes); + rrddim_set_by_pointer(st, rd_out, iftot.ift_obytes); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (likely(do_bandwidth_ipv6)) { + iftot.ift_ibytes = iftot.ift_obytes = 0; + for (ifa = ifap; ifa; ifa = ifa->ifa_next) { + if (ifa->ifa_addr->sa_family != AF_INET6) + continue; + iftot.ift_ibytes += IFA_DATA(ibytes); + iftot.ift_obytes += IFA_DATA(obytes); + } + + static RRDSET *st = NULL; + static RRDDIM *rd_in = NULL, *rd_out = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost("system", + "ipv6", + NULL, + "network", + NULL, + "IPv6 Bandwidth", + "kilobits/s", + "freebsd.plugin", + "getifaddrs", + NETDATA_CHART_PRIO_SYSTEM_IPV6, + update_every, + RRDSET_TYPE_AREA + ); + + rd_in = rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + rd_out = rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_in, iftot.ift_ibytes); + rrddim_set_by_pointer(st, rd_out, iftot.ift_obytes); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + network_interfaces_found = 0; + + for (ifa = ifap; ifa; ifa = ifa->ifa_next) { + if (ifa->ifa_addr->sa_family != AF_LINK) + continue; + + struct cgroup_network_interface *ifm = get_network_interface(ifa->ifa_name); + ifm->updated = 1; + network_interfaces_found++; + + if (unlikely(!ifm->configured)) { + char var_name[4096 + 1]; + + // this is the first time we see this network interface + + // remember we configured it + ifm->configured = 1; + + ifm->enabled = enable_new_interfaces; + + if (likely(ifm->enabled)) + ifm->enabled = !simple_pattern_matches(excluded_interfaces, ifa->ifa_name); + + snprintfz(var_name, 4096, "%s:%s", CONFIG_SECTION_GETIFADDRS, ifa->ifa_name); + ifm->enabled = config_get_boolean_ondemand(var_name, "enabled", ifm->enabled); + + if (unlikely(ifm->enabled == CONFIG_BOOLEAN_NO)) + continue; + + ifm->do_bandwidth = config_get_boolean_ondemand(var_name, "bandwidth", do_bandwidth); + ifm->do_packets = config_get_boolean_ondemand(var_name, "packets", do_packets); + ifm->do_errors = config_get_boolean_ondemand(var_name, "errors", do_errors); + ifm->do_drops = config_get_boolean_ondemand(var_name, "drops", do_drops); + ifm->do_events = config_get_boolean_ondemand(var_name, "events", do_events); + } + + if (unlikely(!ifm->enabled)) + continue; + + // -------------------------------------------------------------------- + + if (ifm->do_bandwidth == CONFIG_BOOLEAN_YES || (ifm->do_bandwidth == CONFIG_BOOLEAN_AUTO && + (IFA_DATA(ibytes) || IFA_DATA(obytes)))) { + if (unlikely(!ifm->st_bandwidth)) { + ifm->st_bandwidth = rrdset_create_localhost("net", + ifa->ifa_name, + NULL, + ifa->ifa_name, + "net.net", + "Bandwidth", + "kilobits/s", + "freebsd.plugin", + "getifaddrs", + NETDATA_CHART_PRIO_FIRST_NET_IFACE, + update_every, + RRDSET_TYPE_AREA + ); + + ifm->rd_bandwidth_in = rrddim_add(ifm->st_bandwidth, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + ifm->rd_bandwidth_out = rrddim_add(ifm->st_bandwidth, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(ifm->st_bandwidth); + + rrddim_set_by_pointer(ifm->st_bandwidth, ifm->rd_bandwidth_in, IFA_DATA(ibytes)); + rrddim_set_by_pointer(ifm->st_bandwidth, ifm->rd_bandwidth_out, IFA_DATA(obytes)); + rrdset_done(ifm->st_bandwidth); + } + + // -------------------------------------------------------------------- + + if (ifm->do_packets == CONFIG_BOOLEAN_YES || (ifm->do_packets == CONFIG_BOOLEAN_AUTO && + (IFA_DATA(ipackets) || IFA_DATA(opackets) || IFA_DATA(imcasts) || IFA_DATA(omcasts)))) { + if (unlikely(!ifm->st_packets)) { + ifm->st_packets = rrdset_create_localhost("net_packets", + ifa->ifa_name, + NULL, + ifa->ifa_name, + "net.packets", + "Packets", + "packets/s", + "freebsd.plugin", + "getifaddrs", + NETDATA_CHART_PRIO_FIRST_NET_PACKETS, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(ifm->st_packets, RRDSET_FLAG_DETAIL); + + ifm->rd_packets_in = rrddim_add(ifm->st_packets, "received", NULL, 1, 1, + RRD_ALGORITHM_INCREMENTAL); + ifm->rd_packets_out = rrddim_add(ifm->st_packets, "sent", NULL, -1, 1, + RRD_ALGORITHM_INCREMENTAL); + ifm->rd_packets_m_in = rrddim_add(ifm->st_packets, "multicast_received", NULL, 1, 1, + RRD_ALGORITHM_INCREMENTAL); + ifm->rd_packets_m_out = rrddim_add(ifm->st_packets, "multicast_sent", NULL, -1, 1, + RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(ifm->st_packets); + + rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_in, IFA_DATA(ipackets)); + rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_out, IFA_DATA(opackets)); + rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_m_in, IFA_DATA(imcasts)); + rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_m_out, IFA_DATA(omcasts)); + rrdset_done(ifm->st_packets); + } + + // -------------------------------------------------------------------- + + if (ifm->do_errors == CONFIG_BOOLEAN_YES || (ifm->do_errors == CONFIG_BOOLEAN_AUTO && + (IFA_DATA(ierrors) || IFA_DATA(oerrors)))) { + if (unlikely(!ifm->st_errors)) { + ifm->st_errors = rrdset_create_localhost("net_errors", + ifa->ifa_name, + NULL, + ifa->ifa_name, + "net.errors", + "Interface Errors", + "errors/s", + "freebsd.plugin", + "getifaddrs", + NETDATA_CHART_PRIO_FIRST_NET_ERRORS, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(ifm->st_errors, RRDSET_FLAG_DETAIL); + + ifm->rd_errors_in = rrddim_add(ifm->st_errors, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + ifm->rd_errors_out = rrddim_add(ifm->st_errors, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(ifm->st_errors); + + rrddim_set_by_pointer(ifm->st_errors, ifm->rd_errors_in, IFA_DATA(ierrors)); + rrddim_set_by_pointer(ifm->st_errors, ifm->rd_errors_out, IFA_DATA(oerrors)); + rrdset_done(ifm->st_errors); + } + // -------------------------------------------------------------------- + + if (ifm->do_drops == CONFIG_BOOLEAN_YES || (ifm->do_drops == CONFIG_BOOLEAN_AUTO && + (IFA_DATA(iqdrops) + #if __FreeBSD__ >= 11 + || IFA_DATA(oqdrops) +#endif + ))) { + if (unlikely(!ifm->st_drops)) { + ifm->st_drops = rrdset_create_localhost("net_drops", + ifa->ifa_name, + NULL, + ifa->ifa_name, + "net.drops", + "Interface Drops", + "drops/s", + "freebsd.plugin", + "getifaddrs", + NETDATA_CHART_PRIO_FIRST_NET_DROPS, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(ifm->st_drops, RRDSET_FLAG_DETAIL); + + ifm->rd_drops_in = rrddim_add(ifm->st_drops, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); +#if __FreeBSD__ >= 11 + ifm->rd_drops_out = rrddim_add(ifm->st_drops, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); +#endif + } else + rrdset_next(ifm->st_drops); + + rrddim_set_by_pointer(ifm->st_drops, ifm->rd_drops_in, IFA_DATA(iqdrops)); +#if __FreeBSD__ >= 11 + rrddim_set_by_pointer(ifm->st_drops, ifm->rd_drops_out, IFA_DATA(oqdrops)); +#endif + rrdset_done(ifm->st_drops); + } + + // -------------------------------------------------------------------- + + if (ifm->do_events == CONFIG_BOOLEAN_YES || (ifm->do_events == CONFIG_BOOLEAN_AUTO && + IFA_DATA(collisions))) { + if (unlikely(!ifm->st_events)) { + ifm->st_events = rrdset_create_localhost("net_events", + ifa->ifa_name, + NULL, + ifa->ifa_name, + "net.events", + "Network Interface Events", + "events/s", + "freebsd.plugin", + "getifaddrs", + NETDATA_CHART_PRIO_FIRST_NET_EVENTS, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(ifm->st_events, RRDSET_FLAG_DETAIL); + + ifm->rd_events_coll = rrddim_add(ifm->st_events, "collisions", NULL, -1, 1, + RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(ifm->st_events); + + rrddim_set_by_pointer(ifm->st_events, ifm->rd_events_coll, IFA_DATA(collisions)); + rrdset_done(ifm->st_events); + } + } + + freeifaddrs(ifap); + } + } else { + error("DISABLED: getifaddrs module"); + return 1; + } + + network_interfaces_cleanup(); + + return 0; +} diff --git a/collectors/freebsd.plugin/freebsd_getmntinfo.c b/collectors/freebsd.plugin/freebsd_getmntinfo.c new file mode 100644 index 000000000..c86f23166 --- /dev/null +++ b/collectors/freebsd.plugin/freebsd_getmntinfo.c @@ -0,0 +1,301 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_freebsd.h" + +#include + +struct mount_point { + char *name; + uint32_t hash; + size_t len; + + // flags + int configured; + int enabled; + int updated; + + int do_space; + int do_inodes; + + size_t collected; // the number of times this has been collected + + // charts and dimensions + + RRDSET *st_space; + RRDDIM *rd_space_used; + RRDDIM *rd_space_avail; + RRDDIM *rd_space_reserved; + + RRDSET *st_inodes; + RRDDIM *rd_inodes_used; + RRDDIM *rd_inodes_avail; + + struct mount_point *next; +}; + +static struct mount_point *mount_points_root = NULL, *mount_points_last_used = NULL; + +static size_t mount_points_added = 0, mount_points_found = 0; + +static void mount_point_free(struct mount_point *m) { + if (likely(m->st_space)) + rrdset_is_obsolete(m->st_space); + if (likely(m->st_inodes)) + rrdset_is_obsolete(m->st_inodes); + + mount_points_added--; + freez(m->name); + freez(m); +} + +static void mount_points_cleanup() { + if (likely(mount_points_found == mount_points_added)) return; + + struct mount_point *m = mount_points_root, *last = NULL; + while(m) { + if (unlikely(!m->updated)) { + // info("Removing mount point '%s', linked after '%s'", m->name, last?last->name:"ROOT"); + + if (mount_points_last_used == m) + mount_points_last_used = last; + + struct mount_point *t = m; + + if (m == mount_points_root || !last) + mount_points_root = m = m->next; + + else + last->next = m = m->next; + + t->next = NULL; + mount_point_free(t); + } + else { + last = m; + m->updated = 0; + m = m->next; + } + } +} + +static struct mount_point *get_mount_point(const char *name) { + struct mount_point *m; + + uint32_t hash = simple_hash(name); + + // search it, from the last position to the end + for(m = mount_points_last_used ; m ; m = m->next) { + if (unlikely(hash == m->hash && !strcmp(name, m->name))) { + mount_points_last_used = m->next; + return m; + } + } + + // search it from the beginning to the last position we used + for(m = mount_points_root ; m != mount_points_last_used ; m = m->next) { + if (unlikely(hash == m->hash && !strcmp(name, m->name))) { + mount_points_last_used = m->next; + return m; + } + } + + // create a new one + m = callocz(1, sizeof(struct mount_point)); + m->name = strdupz(name); + m->hash = simple_hash(m->name); + m->len = strlen(m->name); + mount_points_added++; + + // link it to the end + if (mount_points_root) { + struct mount_point *e; + for(e = mount_points_root; e->next ; e = e->next) ; + e->next = m; + } + else + mount_points_root = m; + + return m; +} + +// -------------------------------------------------------------------------------------------------------------------- +// getmntinfo + +int do_getmntinfo(int update_every, usec_t dt) { + (void)dt; + +#define DELAULT_EXCLUDED_PATHS "/proc/*" +// taken from gnulib/mountlist.c and shortened to FreeBSD related fstypes +#define DEFAULT_EXCLUDED_FILESYSTEMS "autofs procfs subfs devfs none" +#define CONFIG_SECTION_GETMNTINFO "plugin:freebsd:getmntinfo" + + static int enable_new_mount_points = -1; + static int do_space = -1, do_inodes = -1; + static SIMPLE_PATTERN *excluded_mountpoints = NULL; + static SIMPLE_PATTERN *excluded_filesystems = NULL; + + if (unlikely(enable_new_mount_points == -1)) { + enable_new_mount_points = config_get_boolean_ondemand(CONFIG_SECTION_GETMNTINFO, + "enable new mount points detected at runtime", + CONFIG_BOOLEAN_AUTO); + + do_space = config_get_boolean_ondemand(CONFIG_SECTION_GETMNTINFO, "space usage for all disks", CONFIG_BOOLEAN_AUTO); + do_inodes = config_get_boolean_ondemand(CONFIG_SECTION_GETMNTINFO, "inodes usage for all disks", CONFIG_BOOLEAN_AUTO); + + excluded_mountpoints = simple_pattern_create( + config_get(CONFIG_SECTION_GETMNTINFO, "exclude space metrics on paths", + DELAULT_EXCLUDED_PATHS) + , NULL + , SIMPLE_PATTERN_EXACT + ); + + excluded_filesystems = simple_pattern_create( + config_get(CONFIG_SECTION_GETMNTINFO, "exclude space metrics on filesystems", + DEFAULT_EXCLUDED_FILESYSTEMS) + , NULL + , SIMPLE_PATTERN_EXACT + ); + } + + if (likely(do_space || do_inodes)) { + struct statfs *mntbuf; + int mntsize; + + // there is no mount info in sysctl MIBs + if (unlikely(!(mntsize = getmntinfo(&mntbuf, MNT_NOWAIT)))) { + error("FREEBSD: getmntinfo() failed"); + do_space = 0; + error("DISABLED: disk_space.* charts"); + do_inodes = 0; + error("DISABLED: disk_inodes.* charts"); + error("DISABLED: getmntinfo module"); + return 1; + } else { + int i; + + mount_points_found = 0; + + for (i = 0; i < mntsize; i++) { + char title[4096 + 1]; + + struct mount_point *m = get_mount_point(mntbuf[i].f_mntonname); + m->updated = 1; + mount_points_found++; + + if (unlikely(!m->configured)) { + char var_name[4096 + 1]; + + // this is the first time we see this filesystem + + // remember we configured it + m->configured = 1; + + m->enabled = enable_new_mount_points; + + if (likely(m->enabled)) + m->enabled = !(simple_pattern_matches(excluded_mountpoints, mntbuf[i].f_mntonname) + || simple_pattern_matches(excluded_filesystems, mntbuf[i].f_fstypename)); + + snprintfz(var_name, 4096, "%s:%s", CONFIG_SECTION_GETMNTINFO, mntbuf[i].f_mntonname); + m->enabled = config_get_boolean_ondemand(var_name, "enabled", m->enabled); + + if (unlikely(m->enabled == CONFIG_BOOLEAN_NO)) + continue; + + m->do_space = config_get_boolean_ondemand(var_name, "space usage", do_space); + m->do_inodes = config_get_boolean_ondemand(var_name, "inodes usage", do_inodes); + } + + if (unlikely(!m->enabled)) + continue; + + if (unlikely(mntbuf[i].f_flags & MNT_RDONLY && !m->collected)) + continue; + + // -------------------------------------------------------------------------- + + int rendered = 0; + + if (m->do_space == CONFIG_BOOLEAN_YES || (m->do_space == CONFIG_BOOLEAN_AUTO && (mntbuf[i].f_blocks > 2))) { + if (unlikely(!m->st_space)) { + snprintfz(title, 4096, "Disk Space Usage for %s [%s]", + mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname); + m->st_space = rrdset_create_localhost("disk_space", + mntbuf[i].f_mntonname, + NULL, + mntbuf[i].f_mntonname, + "disk.space", + title, + "GB", + "freebsd.plugin", + "getmntinfo", + NETDATA_CHART_PRIO_DISKSPACE_SPACE, + update_every, + RRDSET_TYPE_STACKED + ); + + m->rd_space_avail = rrddim_add(m->st_space, "avail", NULL, + mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); + m->rd_space_used = rrddim_add(m->st_space, "used", NULL, + mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); + m->rd_space_reserved = rrddim_add(m->st_space, "reserved_for_root", "reserved for root", + mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); + } else + rrdset_next(m->st_space); + + rrddim_set_by_pointer(m->st_space, m->rd_space_avail, (collected_number) mntbuf[i].f_bavail); + rrddim_set_by_pointer(m->st_space, m->rd_space_used, (collected_number) (mntbuf[i].f_blocks - + mntbuf[i].f_bfree)); + rrddim_set_by_pointer(m->st_space, m->rd_space_reserved, (collected_number) (mntbuf[i].f_bfree - + mntbuf[i].f_bavail)); + rrdset_done(m->st_space); + + rendered++; + } + + // -------------------------------------------------------------------------- + + if (m->do_inodes == CONFIG_BOOLEAN_YES || (m->do_inodes == CONFIG_BOOLEAN_AUTO && (mntbuf[i].f_files > 1))) { + if (unlikely(!m->st_inodes)) { + snprintfz(title, 4096, "Disk Files (inodes) Usage for %s [%s]", + mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname); + m->st_inodes = rrdset_create_localhost("disk_inodes", + mntbuf[i].f_mntonname, + NULL, + mntbuf[i].f_mntonname, + "disk.inodes", + title, + "Inodes", + "freebsd.plugin", + "getmntinfo", + NETDATA_CHART_PRIO_DISKSPACE_INODES, + update_every, + RRDSET_TYPE_STACKED + ); + + m->rd_inodes_avail = rrddim_add(m->st_inodes, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + m->rd_inodes_used = rrddim_add(m->st_inodes, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } else + rrdset_next(m->st_inodes); + + rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_avail, (collected_number) mntbuf[i].f_ffree); + rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_used, (collected_number) (mntbuf[i].f_files - + mntbuf[i].f_ffree)); + rrdset_done(m->st_inodes); + + rendered++; + } + + if (likely(rendered)) + m->collected++; + } + } + } else { + error("DISABLED: getmntinfo module"); + return 1; + } + + mount_points_cleanup(); + + return 0; +} diff --git a/collectors/freebsd.plugin/freebsd_ipfw.c b/collectors/freebsd.plugin/freebsd_ipfw.c new file mode 100644 index 000000000..c256da8b3 --- /dev/null +++ b/collectors/freebsd.plugin/freebsd_ipfw.c @@ -0,0 +1,372 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_freebsd.h" + +#include + +#define FREE_MEM_THRESHOLD 10000 // number of unused chunks that trigger memory freeing + +#define COMMON_IPFW_ERROR() error("DISABLED: ipfw.packets chart"); \ + error("DISABLED: ipfw.bytes chart"); \ + error("DISABLED: ipfw.dyn_active chart"); \ + error("DISABLED: ipfw.dyn_expired chart"); \ + error("DISABLED: ipfw.mem chart"); + +// -------------------------------------------------------------------------------------------------------------------- +// ipfw + +int do_ipfw(int update_every, usec_t dt) { + (void)dt; +#if __FreeBSD__ >= 11 + + static int do_static = -1, do_dynamic = -1, do_mem = -1; + + if (unlikely(do_static == -1)) { + do_static = config_get_boolean("plugin:freebsd:ipfw", "counters for static rules", 1); + do_dynamic = config_get_boolean("plugin:freebsd:ipfw", "number of dynamic rules", 1); + do_mem = config_get_boolean("plugin:freebsd:ipfw", "allocated memory", 1); + } + + // variables for getting ipfw configuration + + int error; + static int ipfw_socket = -1; + static ipfw_cfg_lheader *cfg = NULL; + ip_fw3_opheader *op3 = NULL; + static socklen_t *optlen = NULL, cfg_size = 0; + + // variables for static rules handling + + ipfw_obj_ctlv *ctlv = NULL; + ipfw_obj_tlv *rbase = NULL; + int rcnt = 0; + + int n, seen; + struct ip_fw_rule *rule; + struct ip_fw_bcounter *cntr; + int c = 0; + + char rule_num_str[12]; + + // variables for dynamic rules handling + + caddr_t dynbase = NULL; + size_t dynsz = 0; + size_t readsz = sizeof(*cfg);; + int ttype = 0; + ipfw_obj_tlv *tlv; + ipfw_dyn_rule *dyn_rule; + uint16_t rulenum, prev_rulenum = IPFW_DEFAULT_RULE; + unsigned srn, static_rules_num = 0; + static size_t dyn_rules_num_size = 0; + + static struct dyn_rule_num { + uint16_t rule_num; + uint32_t active_rules; + uint32_t expired_rules; + } *dyn_rules_num = NULL; + + uint32_t *dyn_rules_counter; + + if (likely(do_static | do_dynamic | do_mem)) { + + // initialize the smallest ipfw_cfg_lheader possible + + if (unlikely((optlen == NULL) || (cfg == NULL))) { + optlen = reallocz(optlen, sizeof(socklen_t)); + *optlen = cfg_size = 32; + cfg = reallocz(cfg, *optlen); + } + + // get socket descriptor and initialize ipfw_cfg_lheader structure + + if (unlikely(ipfw_socket == -1)) + ipfw_socket = socket(AF_INET, SOCK_RAW, IPPROTO_RAW); + if (unlikely(ipfw_socket == -1)) { + error("FREEBSD: can't get socket for ipfw configuration"); + error("FREEBSD: run netdata as root to get access to ipfw data"); + COMMON_IPFW_ERROR(); + return 1; + } + + bzero(cfg, 32); + cfg->flags = IPFW_CFG_GET_STATIC | IPFW_CFG_GET_COUNTERS | IPFW_CFG_GET_STATES; + op3 = &cfg->opheader; + op3->opcode = IP_FW_XGET; + + // get ifpw configuration size than get configuration + + *optlen = cfg_size; + error = getsockopt(ipfw_socket, IPPROTO_IP, IP_FW3, op3, optlen); + if (error) + if (errno != ENOMEM) { + error("FREEBSD: ipfw socket reading error"); + COMMON_IPFW_ERROR(); + return 1; + } + if ((cfg->size > cfg_size) || ((cfg_size - cfg->size) > sizeof(struct dyn_rule_num) * FREE_MEM_THRESHOLD)) { + *optlen = cfg_size = cfg->size; + cfg = reallocz(cfg, *optlen); + bzero(cfg, 32); + cfg->flags = IPFW_CFG_GET_STATIC | IPFW_CFG_GET_COUNTERS | IPFW_CFG_GET_STATES; + op3 = &cfg->opheader; + op3->opcode = IP_FW_XGET; + error = getsockopt(ipfw_socket, IPPROTO_IP, IP_FW3, op3, optlen); + if (error) { + error("FREEBSD: ipfw socket reading error"); + COMMON_IPFW_ERROR(); + return 1; + } + } + + // go through static rules configuration structures + + ctlv = (ipfw_obj_ctlv *) (cfg + 1); + + if (cfg->flags & IPFW_CFG_GET_STATIC) { + /* We've requested static rules */ + if (ctlv->head.type == IPFW_TLV_TBLNAME_LIST) { + readsz += ctlv->head.length; + ctlv = (ipfw_obj_ctlv *) ((caddr_t) ctlv + + ctlv->head.length); + } + + if (ctlv->head.type == IPFW_TLV_RULE_LIST) { + rbase = (ipfw_obj_tlv *) (ctlv + 1); + rcnt = ctlv->count; + readsz += ctlv->head.length; + ctlv = (ipfw_obj_ctlv *) ((caddr_t) ctlv + ctlv->head.length); + } + } + + if ((cfg->flags & IPFW_CFG_GET_STATES) && (readsz != *optlen)) { + /* We may have some dynamic states */ + dynsz = *optlen - readsz; + /* Skip empty header */ + if (dynsz != sizeof(ipfw_obj_ctlv)) + dynbase = (caddr_t) ctlv; + else + dynsz = 0; + } + + // -------------------------------------------------------------------- + + if (likely(do_mem)) { + static RRDSET *st_mem = NULL; + static RRDDIM *rd_dyn_mem = NULL; + static RRDDIM *rd_stat_mem = NULL; + + if (unlikely(!st_mem)) { + st_mem = rrdset_create_localhost("ipfw", + "mem", + NULL, + "memory allocated", + NULL, + "Memory allocated by rules", + "bytes", + "freebsd.plugin", + "ipfw", + NETDATA_CHART_PRIO_IPFW_MEM, + update_every, + RRDSET_TYPE_STACKED + ); + rrdset_flag_set(st_mem, RRDSET_FLAG_DETAIL); + + rd_dyn_mem = rrddim_add(st_mem, "dynamic", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rd_stat_mem = rrddim_add(st_mem, "static", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } else + rrdset_next(st_mem); + + rrddim_set_by_pointer(st_mem, rd_dyn_mem, dynsz); + rrddim_set_by_pointer(st_mem, rd_stat_mem, *optlen - dynsz); + rrdset_done(st_mem); + } + + // -------------------------------------------------------------------- + + static RRDSET *st_packets = NULL, *st_bytes = NULL; + RRDDIM *rd_packets = NULL, *rd_bytes = NULL; + + if (likely(do_static || do_dynamic)) { + if (likely(do_static)) { + if (unlikely(!st_packets)) + st_packets = rrdset_create_localhost("ipfw", + "packets", + NULL, + "static rules", + NULL, + "Packets", + "packets/s", + "freebsd.plugin", + "ipfw", + NETDATA_CHART_PRIO_IPFW_PACKETS, + update_every, + RRDSET_TYPE_STACKED + ); + else + rrdset_next(st_packets); + + if (unlikely(!st_bytes)) + st_bytes = rrdset_create_localhost("ipfw", + "bytes", + NULL, + "static rules", + NULL, + "Bytes", + "bytes/s", + "freebsd.plugin", + "ipfw", + NETDATA_CHART_PRIO_IPFW_BYTES, + update_every, + RRDSET_TYPE_STACKED + ); + else + rrdset_next(st_bytes); + } + + for (n = seen = 0; n < rcnt; n++, rbase = (ipfw_obj_tlv *) ((caddr_t) rbase + rbase->length)) { + cntr = (struct ip_fw_bcounter *) (rbase + 1); + rule = (struct ip_fw_rule *) ((caddr_t) cntr + cntr->size); + if (rule->rulenum != prev_rulenum) + static_rules_num++; + if (rule->rulenum > IPFW_DEFAULT_RULE) + break; + + if (likely(do_static)) { + sprintf(rule_num_str, "%d_%d", rule->rulenum, rule->id); + + rd_packets = rrddim_find(st_packets, rule_num_str); + if (unlikely(!rd_packets)) + rd_packets = rrddim_add(st_packets, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_set_by_pointer(st_packets, rd_packets, cntr->pcnt); + + rd_bytes = rrddim_find(st_bytes, rule_num_str); + if (unlikely(!rd_bytes)) + rd_bytes = rrddim_add(st_bytes, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_set_by_pointer(st_bytes, rd_bytes, cntr->bcnt); + } + + c += rbase->length; + seen++; + } + + if (likely(do_static)) { + rrdset_done(st_packets); + rrdset_done(st_bytes); + } + } + + // -------------------------------------------------------------------- + + // go through dynamic rules configuration structures + + if (likely(do_dynamic && (dynsz > 0))) { + if ((dyn_rules_num_size < sizeof(struct dyn_rule_num) * static_rules_num) || + ((dyn_rules_num_size - sizeof(struct dyn_rule_num) * static_rules_num) > + sizeof(struct dyn_rule_num) * FREE_MEM_THRESHOLD)) { + dyn_rules_num_size = sizeof(struct dyn_rule_num) * static_rules_num; + dyn_rules_num = reallocz(dyn_rules_num, dyn_rules_num_size); + } + bzero(dyn_rules_num, sizeof(struct dyn_rule_num) * static_rules_num); + dyn_rules_num->rule_num = IPFW_DEFAULT_RULE; + + if (dynsz > 0 && ctlv->head.type == IPFW_TLV_DYNSTATE_LIST) { + dynbase += sizeof(*ctlv); + dynsz -= sizeof(*ctlv); + ttype = IPFW_TLV_DYN_ENT; + } + + while (dynsz > 0) { + tlv = (ipfw_obj_tlv *) dynbase; + if (tlv->type != ttype) + break; + + dyn_rule = (ipfw_dyn_rule *) (tlv + 1); + bcopy(&dyn_rule->rule, &rulenum, sizeof(rulenum)); + + for (srn = 0; srn < (static_rules_num - 1); srn++) { + if (dyn_rule->expire > 0) + dyn_rules_counter = &dyn_rules_num[srn].active_rules; + else + dyn_rules_counter = &dyn_rules_num[srn].expired_rules; + if (dyn_rules_num[srn].rule_num == rulenum) { + (*dyn_rules_counter)++; + break; + } + if (dyn_rules_num[srn].rule_num == IPFW_DEFAULT_RULE) { + dyn_rules_num[srn].rule_num = rulenum; + dyn_rules_num[srn + 1].rule_num = IPFW_DEFAULT_RULE; + (*dyn_rules_counter)++; + break; + } + } + + dynsz -= tlv->length; + dynbase += tlv->length; + } + + // -------------------------------------------------------------------- + + static RRDSET *st_active = NULL, *st_expired = NULL; + RRDDIM *rd_active = NULL, *rd_expired = NULL; + + if (unlikely(!st_active)) + st_active = rrdset_create_localhost("ipfw", + "active", + NULL, + "dynamic_rules", + NULL, + "Active rules", + "rules", + "freebsd.plugin", + "ipfw", + NETDATA_CHART_PRIO_IPFW_ACTIVE, + update_every, + RRDSET_TYPE_STACKED + ); + else + rrdset_next(st_active); + + if (unlikely(!st_expired)) + st_expired = rrdset_create_localhost("ipfw", + "expired", + NULL, + "dynamic_rules", + NULL, + "Expired rules", + "rules", + "freebsd.plugin", + "ipfw", + NETDATA_CHART_PRIO_IPFW_EXPIRED, + update_every, + RRDSET_TYPE_STACKED + ); + else + rrdset_next(st_expired); + + for (srn = 0; (srn < (static_rules_num - 1)) && (dyn_rules_num[srn].rule_num != IPFW_DEFAULT_RULE); srn++) { + sprintf(rule_num_str, "%d", dyn_rules_num[srn].rule_num); + + rd_active = rrddim_find(st_active, rule_num_str); + if (unlikely(!rd_active)) + rd_active = rrddim_add(st_active, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_set_by_pointer(st_active, rd_active, dyn_rules_num[srn].active_rules); + + rd_expired = rrddim_find(st_expired, rule_num_str); + if (unlikely(!rd_expired)) + rd_expired = rrddim_add(st_expired, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_set_by_pointer(st_expired, rd_expired, dyn_rules_num[srn].expired_rules); + } + + rrdset_done(st_active); + rrdset_done(st_expired); + } + } + + return 0; +#else + error("FREEBSD: ipfw charts supported for FreeBSD 11.0 and newer releases only"); + COMMON_IPFW_ERROR(); + return 1; +#endif +} diff --git a/collectors/freebsd.plugin/freebsd_kstat_zfs.c b/collectors/freebsd.plugin/freebsd_kstat_zfs.c new file mode 100644 index 000000000..93dfc320b --- /dev/null +++ b/collectors/freebsd.plugin/freebsd_kstat_zfs.c @@ -0,0 +1,300 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_freebsd.h" +#include "collectors/proc.plugin/zfs_common.h" + +extern struct arcstats arcstats; + +// -------------------------------------------------------------------------------------------------------------------- +// kstat.zfs.misc.arcstats + +int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt) { + (void)dt; + + unsigned long long l2_size; + size_t uint64_t_size = sizeof(uint64_t); + static struct mibs { + int hits[5]; + int misses[5]; + int demand_data_hits[5]; + int demand_data_misses[5]; + int demand_metadata_hits[5]; + int demand_metadata_misses[5]; + int prefetch_data_hits[5]; + int prefetch_data_misses[5]; + int prefetch_metadata_hits[5]; + int prefetch_metadata_misses[5]; + int mru_hits[5]; + int mru_ghost_hits[5]; + int mfu_hits[5]; + int mfu_ghost_hits[5]; + int deleted[5]; + int mutex_miss[5]; + int evict_skip[5]; + int evict_not_enough[5]; + int evict_l2_cached[5]; + int evict_l2_eligible[5]; + int evict_l2_ineligible[5]; + int evict_l2_skip[5]; + int hash_elements[5]; + int hash_elements_max[5]; + int hash_collisions[5]; + int hash_chains[5]; + int hash_chain_max[5]; + int p[5]; + int c[5]; + int c_min[5]; + int c_max[5]; + int size[5]; + int hdr_size[5]; + int data_size[5]; + int metadata_size[5]; + int other_size[5]; + int anon_size[5]; + int anon_evictable_data[5]; + int anon_evictable_metadata[5]; + int mru_size[5]; + int mru_evictable_data[5]; + int mru_evictable_metadata[5]; + int mru_ghost_size[5]; + int mru_ghost_evictable_data[5]; + int mru_ghost_evictable_metadata[5]; + int mfu_size[5]; + int mfu_evictable_data[5]; + int mfu_evictable_metadata[5]; + int mfu_ghost_size[5]; + int mfu_ghost_evictable_data[5]; + int mfu_ghost_evictable_metadata[5]; + int l2_hits[5]; + int l2_misses[5]; + int l2_feeds[5]; + int l2_rw_clash[5]; + int l2_read_bytes[5]; + int l2_write_bytes[5]; + int l2_writes_sent[5]; + int l2_writes_done[5]; + int l2_writes_error[5]; + int l2_writes_lock_retry[5]; + int l2_evict_lock_retry[5]; + int l2_evict_reading[5]; + int l2_evict_l1cached[5]; + int l2_free_on_write[5]; + int l2_cdata_free_on_write[5]; + int l2_abort_lowmem[5]; + int l2_cksum_bad[5]; + int l2_io_error[5]; + int l2_size[5]; + int l2_asize[5]; + int l2_hdr_size[5]; + int l2_compress_successes[5]; + int l2_compress_zeros[5]; + int l2_compress_failures[5]; + int memory_throttle_count[5]; + int duplicate_buffers[5]; + int duplicate_buffers_size[5]; + int duplicate_reads[5]; + int memory_direct_count[5]; + int memory_indirect_count[5]; + int arc_no_grow[5]; + int arc_tempreserve[5]; + int arc_loaned_bytes[5]; + int arc_prune[5]; + int arc_meta_used[5]; + int arc_meta_limit[5]; + int arc_meta_max[5]; + int arc_meta_min[5]; + int arc_need_free[5]; + int arc_sys_free[5]; + } mibs; + + arcstats.l2exist = -1; + + if(unlikely(sysctlbyname("kstat.zfs.misc.arcstats.l2_size", &l2_size, &uint64_t_size, NULL, 0))) + return 0; + + if(likely(l2_size)) + arcstats.l2exist = 1; + else + arcstats.l2exist = 0; + + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hits", mibs.hits, arcstats.hits); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.misses", mibs.misses, arcstats.misses); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_data_hits", mibs.demand_data_hits, arcstats.demand_data_hits); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_data_misses", mibs.demand_data_misses, arcstats.demand_data_misses); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_metadata_hits", mibs.demand_metadata_hits, arcstats.demand_metadata_hits); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_metadata_misses", mibs.demand_metadata_misses, arcstats.demand_metadata_misses); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_data_hits", mibs.prefetch_data_hits, arcstats.prefetch_data_hits); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_data_misses", mibs.prefetch_data_misses, arcstats.prefetch_data_misses); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_metadata_hits", mibs.prefetch_metadata_hits, arcstats.prefetch_metadata_hits); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_metadata_misses", mibs.prefetch_metadata_misses, arcstats.prefetch_metadata_misses); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_hits", mibs.mru_hits, arcstats.mru_hits); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_hits", mibs.mru_ghost_hits, arcstats.mru_ghost_hits); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_hits", mibs.mfu_hits, arcstats.mfu_hits); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_hits", mibs.mfu_ghost_hits, arcstats.mfu_ghost_hits); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.deleted", mibs.deleted, arcstats.deleted); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mutex_miss", mibs.mutex_miss, arcstats.mutex_miss); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_skip", mibs.evict_skip, arcstats.evict_skip); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_not_enough", mibs.evict_not_enough, arcstats.evict_not_enough); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_cached", mibs.evict_l2_cached, arcstats.evict_l2_cached); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_eligible", mibs.evict_l2_eligible, arcstats.evict_l2_eligible); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_ineligible", mibs.evict_l2_ineligible, arcstats.evict_l2_ineligible); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_skip", mibs.evict_l2_skip, arcstats.evict_l2_skip); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_elements", mibs.hash_elements, arcstats.hash_elements); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_elements_max", mibs.hash_elements_max, arcstats.hash_elements_max); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_collisions", mibs.hash_collisions, arcstats.hash_collisions); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_chains", mibs.hash_chains, arcstats.hash_chains); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_chain_max", mibs.hash_chain_max, arcstats.hash_chain_max); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.p", mibs.p, arcstats.p); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.c", mibs.c, arcstats.c); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.c_min", mibs.c_min, arcstats.c_min); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.c_max", mibs.c_max, arcstats.c_max); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.size", mibs.size, arcstats.size); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hdr_size", mibs.hdr_size, arcstats.hdr_size); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.data_size", mibs.data_size, arcstats.data_size); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.metadata_size", mibs.metadata_size, arcstats.metadata_size); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.other_size", mibs.other_size, arcstats.other_size); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.anon_size", mibs.anon_size, arcstats.anon_size); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.anon_evictable_data", mibs.anon_evictable_data, arcstats.anon_evictable_data); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.anon_evictable_metadata", mibs.anon_evictable_metadata, arcstats.anon_evictable_metadata); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_size", mibs.mru_size, arcstats.mru_size); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_evictable_data", mibs.mru_evictable_data, arcstats.mru_evictable_data); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_evictable_metadata", mibs.mru_evictable_metadata, arcstats.mru_evictable_metadata); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_size", mibs.mru_ghost_size, arcstats.mru_ghost_size); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_evictable_data", mibs.mru_ghost_evictable_data, arcstats.mru_ghost_evictable_data); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_evictable_metadata", mibs.mru_ghost_evictable_metadata, arcstats.mru_ghost_evictable_metadata); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_size", mibs.mfu_size, arcstats.mfu_size); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_evictable_data", mibs.mfu_evictable_data, arcstats.mfu_evictable_data); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_evictable_metadata", mibs.mfu_evictable_metadata, arcstats.mfu_evictable_metadata); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_size", mibs.mfu_ghost_size, arcstats.mfu_ghost_size); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_evictable_data", mibs.mfu_ghost_evictable_data, arcstats.mfu_ghost_evictable_data); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_evictable_metadata", mibs.mfu_ghost_evictable_metadata, arcstats.mfu_ghost_evictable_metadata); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_hits", mibs.l2_hits, arcstats.l2_hits); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_misses", mibs.l2_misses, arcstats.l2_misses); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_feeds", mibs.l2_feeds, arcstats.l2_feeds); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_rw_clash", mibs.l2_rw_clash, arcstats.l2_rw_clash); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_read_bytes", mibs.l2_read_bytes, arcstats.l2_read_bytes); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_write_bytes", mibs.l2_write_bytes, arcstats.l2_write_bytes); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_sent", mibs.l2_writes_sent, arcstats.l2_writes_sent); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_done", mibs.l2_writes_done, arcstats.l2_writes_done); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_error", mibs.l2_writes_error, arcstats.l2_writes_error); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_lock_retry", mibs.l2_writes_lock_retry, arcstats.l2_writes_lock_retry); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_evict_lock_retry", mibs.l2_evict_lock_retry, arcstats.l2_evict_lock_retry); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_evict_reading", mibs.l2_evict_reading, arcstats.l2_evict_reading); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_evict_l1cached", mibs.l2_evict_l1cached, arcstats.l2_evict_l1cached); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_free_on_write", mibs.l2_free_on_write, arcstats.l2_free_on_write); + // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_cdata_free_on_write", mibs.l2_cdata_free_on_write, arcstats.l2_cdata_free_on_write); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_abort_lowmem", mibs.l2_abort_lowmem, arcstats.l2_abort_lowmem); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_cksum_bad", mibs.l2_cksum_bad, arcstats.l2_cksum_bad); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_io_error", mibs.l2_io_error, arcstats.l2_io_error); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_size", mibs.l2_size, arcstats.l2_size); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_asize", mibs.l2_asize, arcstats.l2_asize); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_hdr_size", mibs.l2_hdr_size, arcstats.l2_hdr_size); + // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_compress_successes", mibs.l2_compress_successes, arcstats.l2_compress_successes); + // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_compress_zeros", mibs.l2_compress_zeros, arcstats.l2_compress_zeros); + // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_compress_failures", mibs.l2_compress_failures, arcstats.l2_compress_failures); + GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.memory_throttle_count", mibs.memory_throttle_count, arcstats.memory_throttle_count); + // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.duplicate_buffers", mibs.duplicate_buffers, arcstats.duplicate_buffers); + // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.duplicate_buffers_size", mibs.duplicate_buffers_size, arcstats.duplicate_buffers_size); + // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.duplicate_reads", mibs.duplicate_reads, arcstats.duplicate_reads); + // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.memory_direct_count", mibs.memory_direct_count, arcstats.memory_direct_count); + // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.memory_indirect_count", mibs.memory_indirect_count, arcstats.memory_indirect_count); + // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_no_grow", mibs.arc_no_grow, arcstats.arc_no_grow); + // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_tempreserve", mibs.arc_tempreserve, arcstats.arc_tempreserve); + // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_loaned_bytes", mibs.arc_loaned_bytes, arcstats.arc_loaned_bytes); + // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_prune", mibs.arc_prune, arcstats.arc_prune); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_used", mibs.arc_meta_used, arcstats.arc_meta_used); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_limit", mibs.arc_meta_limit, arcstats.arc_meta_limit); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_max", mibs.arc_meta_max, arcstats.arc_meta_max); + // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_min", mibs.arc_meta_min, arcstats.arc_meta_min); + // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_need_free", mibs.arc_need_free, arcstats.arc_need_free); + // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_sys_free", mibs.arc_sys_free, arcstats.arc_sys_free); + + generate_charts_arcstats("freebsd", "zfs", update_every); + generate_charts_arc_summary("freebsd", "zfs", update_every); + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// kstat.zfs.misc.zio_trim + +int do_kstat_zfs_misc_zio_trim(int update_every, usec_t dt) { + (void)dt; + static int mib_bytes[5] = {0, 0, 0, 0, 0}, mib_success[5] = {0, 0, 0, 0, 0}, + mib_failed[5] = {0, 0, 0, 0, 0}, mib_unsupported[5] = {0, 0, 0, 0, 0}; + uint64_t bytes, success, failed, unsupported; + + if (unlikely(GETSYSCTL_SIMPLE("kstat.zfs.misc.zio_trim.bytes", mib_bytes, bytes) || + GETSYSCTL_SIMPLE("kstat.zfs.misc.zio_trim.success", mib_success, success) || + GETSYSCTL_SIMPLE("kstat.zfs.misc.zio_trim.failed", mib_failed, failed) || + GETSYSCTL_SIMPLE("kstat.zfs.misc.zio_trim.unsupported", mib_unsupported, unsupported))) { + error("DISABLED: zfs.trim_bytes chart"); + error("DISABLED: zfs.trim_success chart"); + error("DISABLED: kstat.zfs.misc.zio_trim module"); + return 1; + } else { + + // -------------------------------------------------------------------- + + static RRDSET *st_bytes = NULL; + static RRDDIM *rd_bytes = NULL; + + if (unlikely(!st_bytes)) { + st_bytes = rrdset_create_localhost( + "zfs", + "trim_bytes", + NULL, + "trim", + NULL, + "Successfully TRIMmed bytes", + "bytes", + "freebsd", + "zfs", + 2320, + update_every, + RRDSET_TYPE_LINE + ); + + rd_bytes = rrddim_add(st_bytes, "TRIMmed", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st_bytes); + + rrddim_set_by_pointer(st_bytes, rd_bytes, bytes); + rrdset_done(st_bytes); + + // -------------------------------------------------------------------- + + static RRDSET *st_requests = NULL; + static RRDDIM *rd_successful = NULL, *rd_failed = NULL, *rd_unsupported = NULL; + + if (unlikely(!st_requests)) { + st_requests = rrdset_create_localhost( + "zfs", + "trim_requests", + NULL, + "trim", + NULL, + "TRIM requests", + "requests", + "freebsd", + "zfs", + 2321, + update_every, + RRDSET_TYPE_STACKED + ); + + rd_successful = rrddim_add(st_requests, "successful", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_failed = rrddim_add(st_requests, "failed", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_unsupported = rrddim_add(st_requests, "unsupported", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st_requests); + + rrddim_set_by_pointer(st_requests, rd_successful, success); + rrddim_set_by_pointer(st_requests, rd_failed, failed); + rrddim_set_by_pointer(st_requests, rd_unsupported, unsupported); + rrdset_done(st_requests); + + } + + return 0; +} \ No newline at end of file diff --git a/collectors/freebsd.plugin/freebsd_sysctl.c b/collectors/freebsd.plugin/freebsd_sysctl.c new file mode 100644 index 000000000..da5a351de --- /dev/null +++ b/collectors/freebsd.plugin/freebsd_sysctl.c @@ -0,0 +1,3188 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_freebsd.h" + +#include +#include + +#define _KERNEL +#include +#include +#include +#undef _KERNEL + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// -------------------------------------------------------------------------------------------------------------------- +// common definitions and variables + +int system_pagesize = PAGE_SIZE; +int number_of_cpus = 1; +#if __FreeBSD_version >= 1200029 +struct __vmmeter { + uint64_t v_swtch; + uint64_t v_trap; + uint64_t v_syscall; + uint64_t v_intr; + uint64_t v_soft; + uint64_t v_vm_faults; + uint64_t v_io_faults; + uint64_t v_cow_faults; + uint64_t v_cow_optim; + uint64_t v_zfod; + uint64_t v_ozfod; + uint64_t v_swapin; + uint64_t v_swapout; + uint64_t v_swappgsin; + uint64_t v_swappgsout; + uint64_t v_vnodein; + uint64_t v_vnodeout; + uint64_t v_vnodepgsin; + uint64_t v_vnodepgsout; + uint64_t v_intrans; + uint64_t v_reactivated; + uint64_t v_pdwakeups; + uint64_t v_pdpages; + uint64_t v_pdshortfalls; + uint64_t v_dfree; + uint64_t v_pfree; + uint64_t v_tfree; + uint64_t v_forks; + uint64_t v_vforks; + uint64_t v_rforks; + uint64_t v_kthreads; + uint64_t v_forkpages; + uint64_t v_vforkpages; + uint64_t v_rforkpages; + uint64_t v_kthreadpages; + u_int v_page_size; + u_int v_page_count; + u_int v_free_reserved; + u_int v_free_target; + u_int v_free_min; + u_int v_free_count; + u_int v_wire_count; + u_int v_active_count; + u_int v_inactive_target; + u_int v_inactive_count; + u_int v_laundry_count; + u_int v_pageout_free_min; + u_int v_interrupt_free_min; + u_int v_free_severe; +}; +typedef struct __vmmeter vmmeter_t; +#else +typedef struct vmmeter vmmeter_t; +#endif + +#if (__FreeBSD_version >= 1101516 && __FreeBSD_version < 1200000) || __FreeBSD_version >= 1200015 +#define NETDATA_COLLECT_LAUNDRY 1 +#endif + +// -------------------------------------------------------------------------------------------------------------------- +// FreeBSD plugin initialization + +int freebsd_plugin_init() +{ + system_pagesize = getpagesize(); + if (system_pagesize <= 0) { + error("FREEBSD: can't get system page size"); + return 1; + } + + if (unlikely(GETSYSCTL_BY_NAME("kern.smp.cpus", number_of_cpus))) { + error("FREEBSD: can't get number of cpus"); + return 1; + } + + if (unlikely(!number_of_cpus)) { + error("FREEBSD: wrong number of cpus"); + return 1; + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// vm.loadavg + +// FreeBSD calculates load averages once every 5 seconds +#define MIN_LOADAVG_UPDATE_EVERY 5 + +int do_vm_loadavg(int update_every, usec_t dt){ + static usec_t next_loadavg_dt = 0; + + if (next_loadavg_dt <= dt) { + static int mib[2] = {0, 0}; + struct loadavg sysload; + + if (unlikely(GETSYSCTL_SIMPLE("vm.loadavg", mib, sysload))) { + error("DISABLED: system.load chart"); + error("DISABLED: vm.loadavg module"); + return 1; + } else { + + // -------------------------------------------------------------------- + + static RRDSET *st = NULL; + static RRDDIM *rd_load1 = NULL, *rd_load2 = NULL, *rd_load3 = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "system", + "load", + NULL, + "load", + NULL, + "System Load Average", + "load", + "freebsd.plugin", + "vm.loadavg", + NETDATA_CHART_PRIO_SYSTEM_LOAD, + (update_every < MIN_LOADAVG_UPDATE_EVERY) ? + MIN_LOADAVG_UPDATE_EVERY : update_every, RRDSET_TYPE_LINE + ); + rd_load1 = rrddim_add(st, "load1", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + rd_load2 = rrddim_add(st, "load5", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + rd_load3 = rrddim_add(st, "load15", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_load1, (collected_number) ((double) sysload.ldavg[0] / sysload.fscale * 1000)); + rrddim_set_by_pointer(st, rd_load2, (collected_number) ((double) sysload.ldavg[1] / sysload.fscale * 1000)); + rrddim_set_by_pointer(st, rd_load3, (collected_number) ((double) sysload.ldavg[2] / sysload.fscale * 1000)); + rrdset_done(st); + + next_loadavg_dt = st->update_every * USEC_PER_SEC; + } + } + else + next_loadavg_dt -= dt; + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// vm.vmtotal + +int do_vm_vmtotal(int update_every, usec_t dt) { + (void)dt; + static int do_all_processes = -1, do_processes = -1, do_committed = -1; + + if (unlikely(do_all_processes == -1)) { + do_all_processes = config_get_boolean("plugin:freebsd:vm.vmtotal", "enable total processes", 1); + do_processes = config_get_boolean("plugin:freebsd:vm.vmtotal", "processes running", 1); + do_committed = config_get_boolean("plugin:freebsd:vm.vmtotal", "committed memory", 1); + } + + if (likely(do_all_processes | do_processes | do_committed)) { + static int mib[2] = {0, 0}; + struct vmtotal vmtotal_data; + + if (unlikely(GETSYSCTL_SIMPLE("vm.vmtotal", mib, vmtotal_data))) { + do_all_processes = 0; + error("DISABLED: system.active_processes chart"); + do_processes = 0; + error("DISABLED: system.processes chart"); + do_committed = 0; + error("DISABLED: mem.committed chart"); + error("DISABLED: vm.vmtotal module"); + return 1; + } else { + + // -------------------------------------------------------------------- + + if (likely(do_all_processes)) { + static RRDSET *st = NULL; + static RRDDIM *rd = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "system", + "active_processes", + NULL, + "processes", + NULL, + "System Active Processes", + "processes", + "freebsd.plugin", + "vm.vmtotal", + NETDATA_CHART_PRIO_SYSTEM_ACTIVE_PROCESSES, + update_every, + RRDSET_TYPE_LINE + ); + rd = rrddim_add(st, "active", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd, (vmtotal_data.t_rq + vmtotal_data.t_dw + vmtotal_data.t_pw + vmtotal_data.t_sl + vmtotal_data.t_sw)); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (likely(do_processes)) { + static RRDSET *st = NULL; + static RRDDIM *rd_running = NULL, *rd_blocked = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "system", + "processes", + NULL, + "processes", + NULL, + "System Processes", + "processes", + "freebsd.plugin", + "vm.vmtotal", + NETDATA_CHART_PRIO_SYSTEM_PROCESSES, + update_every, + RRDSET_TYPE_LINE + ); + + rd_running = rrddim_add(st, "running", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rd_blocked = rrddim_add(st, "blocked", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_running, vmtotal_data.t_rq); + rrddim_set_by_pointer(st, rd_blocked, (vmtotal_data.t_dw + vmtotal_data.t_pw)); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (likely(do_committed)) { + static RRDSET *st = NULL; + static RRDDIM *rd = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "mem", + "committed", + NULL, + "system", + NULL, + "Committed (Allocated) Memory", + "MB", + "freebsd.plugin", + "vm.vmtotal", + NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED, + update_every, + RRDSET_TYPE_AREA + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd = rrddim_add(st, "Committed_AS", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd, vmtotal_data.t_rm); + rrdset_done(st); + } + } + } else { + error("DISABLED: vm.vmtotal module"); + return 1; + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// kern.cp_time + +int do_kern_cp_time(int update_every, usec_t dt) { + (void)dt; + + if (unlikely(CPUSTATES != 5)) { + error("FREEBSD: There are %d CPU states (5 was expected)", CPUSTATES); + error("DISABLED: system.cpu chart"); + error("DISABLED: kern.cp_time module"); + return 1; + } else { + static int mib[2] = {0, 0}; + long cp_time[CPUSTATES]; + + if (unlikely(GETSYSCTL_SIMPLE("kern.cp_time", mib, cp_time))) { + error("DISABLED: system.cpu chart"); + error("DISABLED: kern.cp_time module"); + return 1; + } else { + + // -------------------------------------------------------------------- + + static RRDSET *st = NULL; + static RRDDIM *rd_nice = NULL, *rd_system = NULL, *rd_user = NULL, *rd_interrupt = NULL, *rd_idle = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "system", + "cpu", + NULL, + "cpu", + "system.cpu", + "Total CPU utilization", + "percentage", + "freebsd.plugin", + "kern.cp_time", + NETDATA_CHART_PRIO_SYSTEM_CPU, + update_every, + RRDSET_TYPE_STACKED + ); + + rd_nice = rrddim_add(st, "nice", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_system = rrddim_add(st, "system", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_user = rrddim_add(st, "user", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_interrupt = rrddim_add(st, "interrupt", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_idle = rrddim_add(st, "idle", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rrddim_hide(st, "idle"); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_nice, cp_time[1]); + rrddim_set_by_pointer(st, rd_system, cp_time[2]); + rrddim_set_by_pointer(st, rd_user, cp_time[0]); + rrddim_set_by_pointer(st, rd_interrupt, cp_time[3]); + rrddim_set_by_pointer(st, rd_idle, cp_time[4]); + rrdset_done(st); + } + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// kern.cp_times + +int do_kern_cp_times(int update_every, usec_t dt) { + (void)dt; + + if (unlikely(CPUSTATES != 5)) { + error("FREEBSD: There are %d CPU states (5 was expected)", CPUSTATES); + error("DISABLED: cpu.cpuXX charts"); + error("DISABLED: kern.cp_times module"); + return 1; + } else { + static int mib[2] = {0, 0}; + long cp_time[CPUSTATES]; + static long *pcpu_cp_time = NULL; + static int old_number_of_cpus = 0; + + if(unlikely(number_of_cpus != old_number_of_cpus)) + pcpu_cp_time = reallocz(pcpu_cp_time, sizeof(cp_time) * number_of_cpus); + if (unlikely(GETSYSCTL_WSIZE("kern.cp_times", mib, pcpu_cp_time, sizeof(cp_time) * number_of_cpus))) { + error("DISABLED: cpu.cpuXX charts"); + error("DISABLED: kern.cp_times module"); + return 1; + } else { + + // -------------------------------------------------------------------- + + int i; + static struct cpu_chart { + char cpuid[MAX_INT_DIGITS + 4]; + RRDSET *st; + RRDDIM *rd_user; + RRDDIM *rd_nice; + RRDDIM *rd_system; + RRDDIM *rd_interrupt; + RRDDIM *rd_idle; + } *all_cpu_charts = NULL; + + if(unlikely(number_of_cpus > old_number_of_cpus)) { + all_cpu_charts = reallocz(all_cpu_charts, sizeof(struct cpu_chart) * number_of_cpus); + memset(&all_cpu_charts[old_number_of_cpus], 0, sizeof(struct cpu_chart) * (number_of_cpus - old_number_of_cpus)); + } + + for (i = 0; i < number_of_cpus; i++) { + if (unlikely(!all_cpu_charts[i].st)) { + snprintfz(all_cpu_charts[i].cpuid, MAX_INT_DIGITS, "cpu%d", i); + all_cpu_charts[i].st = rrdset_create_localhost( + "cpu", + all_cpu_charts[i].cpuid, + NULL, + "utilization", + "cpu.cpu", + "Core utilization", + "percentage", + "freebsd.plugin", + "kern.cp_times", + NETDATA_CHART_PRIO_CPU_PER_CORE, + update_every, + RRDSET_TYPE_STACKED + ); + + all_cpu_charts[i].rd_nice = rrddim_add(all_cpu_charts[i].st, "nice", NULL, 1, 1, + RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + all_cpu_charts[i].rd_system = rrddim_add(all_cpu_charts[i].st, "system", NULL, 1, 1, + RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + all_cpu_charts[i].rd_user = rrddim_add(all_cpu_charts[i].st, "user", NULL, 1, 1, + RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + all_cpu_charts[i].rd_interrupt = rrddim_add(all_cpu_charts[i].st, "interrupt", NULL, 1, 1, + RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + all_cpu_charts[i].rd_idle = rrddim_add(all_cpu_charts[i].st, "idle", NULL, 1, 1, + RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rrddim_hide(all_cpu_charts[i].st, "idle"); + } else rrdset_next(all_cpu_charts[i].st); + + rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_nice, pcpu_cp_time[i * 5 + 1]); + rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_system, pcpu_cp_time[i * 5 + 2]); + rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_user, pcpu_cp_time[i * 5 + 0]); + rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_interrupt, pcpu_cp_time[i * 5 + 3]); + rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_idle, pcpu_cp_time[i * 5 + 4]); + rrdset_done(all_cpu_charts[i].st); + } + } + + old_number_of_cpus = number_of_cpus; + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// dev.cpu.temperature + +int do_dev_cpu_temperature(int update_every, usec_t dt) { + (void)dt; + + int i; + static int *mib = NULL; + static int *pcpu_temperature = NULL; + static int old_number_of_cpus = 0; + char char_mib[MAX_INT_DIGITS + 21]; + char char_rd[MAX_INT_DIGITS + 9]; + + if (unlikely(number_of_cpus != old_number_of_cpus)) { + pcpu_temperature = reallocz(pcpu_temperature, sizeof(int) * number_of_cpus); + mib = reallocz(mib, sizeof(int) * number_of_cpus * 4); + if (unlikely(number_of_cpus > old_number_of_cpus)) + memset(&mib[old_number_of_cpus * 4], 0, sizeof(RRDDIM) * (number_of_cpus - old_number_of_cpus)); + } + for (i = 0; i < number_of_cpus; i++) { + if (unlikely(!(mib[i * 4]))) + sprintf(char_mib, "dev.cpu.%d.temperature", i); + if (unlikely(getsysctl_simple(char_mib, &mib[i * 4], 4, &pcpu_temperature[i], sizeof(int)))) { + error("DISABLED: cpu.temperature chart"); + error("DISABLED: dev.cpu.temperature module"); + return 1; + } + } + + // -------------------------------------------------------------------- + + static RRDSET *st; + static RRDDIM **rd_pcpu_temperature; + + if (unlikely(number_of_cpus != old_number_of_cpus)) { + rd_pcpu_temperature = reallocz(rd_pcpu_temperature, sizeof(RRDDIM) * number_of_cpus); + if (unlikely(number_of_cpus > old_number_of_cpus)) + memset(&rd_pcpu_temperature[old_number_of_cpus], 0, sizeof(RRDDIM) * (number_of_cpus - old_number_of_cpus)); + } + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "cpu", + "temperature", + NULL, + "temperature", + "cpu.temperatute", + "Core temperature", + "Celsius", + "freebsd.plugin", + "dev.cpu.temperature", + NETDATA_CHART_PRIO_CPU_TEMPERATURE, + update_every, + RRDSET_TYPE_LINE + ); + } + else rrdset_next(st); + + for (i = 0; i < number_of_cpus; i++) { + if (unlikely(!rd_pcpu_temperature[i])) { + sprintf(char_rd, "cpu%d.temp", i); + rd_pcpu_temperature[i] = rrddim_add(st, char_rd, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(st, rd_pcpu_temperature[i], (collected_number) ((double)pcpu_temperature[i] / 10 - 273.15)); + } + + rrdset_done(st); + + old_number_of_cpus = number_of_cpus; + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// dev.cpu.0.freq + +int do_dev_cpu_0_freq(int update_every, usec_t dt) { + (void)dt; + static int mib[4] = {0, 0, 0, 0}; + int cpufreq; + + if (unlikely(GETSYSCTL_SIMPLE("dev.cpu.0.freq", mib, cpufreq))) { + error("DISABLED: cpu.scaling_cur_freq chart"); + error("DISABLED: dev.cpu.0.freq module"); + return 1; + } else { + + // -------------------------------------------------------------------- + + static RRDSET *st = NULL; + static RRDDIM *rd = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "cpu", + "scaling_cur_freq", + NULL, + "cpufreq", + NULL, + "Current CPU Scaling Frequency", + "MHz", + "freebsd.plugin", + "dev.cpu.0.freq", + NETDATA_CHART_PRIO_CPUFREQ_SCALING_CUR_FREQ, + update_every, + RRDSET_TYPE_LINE + ); + + rd = rrddim_add(st, "frequency", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd, cpufreq); + rrdset_done(st); + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// hw.intrcnt + +int do_hw_intcnt(int update_every, usec_t dt) { + (void)dt; + static int mib_hw_intrcnt[2] = {0, 0}; + size_t intrcnt_size = 0; + unsigned long i; + + if (unlikely(GETSYSCTL_SIZE("hw.intrcnt", mib_hw_intrcnt, intrcnt_size))) { + error("DISABLED: system.intr chart"); + error("DISABLED: system.interrupts chart"); + error("DISABLED: hw.intrcnt module"); + return 1; + } else { + unsigned long nintr = 0; + static unsigned long old_nintr = 0; + static unsigned long *intrcnt = NULL; + unsigned long long totalintr = 0; + + nintr = intrcnt_size / sizeof(u_long); + if (unlikely(nintr != old_nintr)) + intrcnt = reallocz(intrcnt, nintr * sizeof(u_long)); + if (unlikely(GETSYSCTL_WSIZE("hw.intrcnt", mib_hw_intrcnt, intrcnt, nintr * sizeof(u_long)))) { + error("DISABLED: system.intr chart"); + error("DISABLED: system.interrupts chart"); + error("DISABLED: hw.intrcnt module"); + return 1; + } else { + for (i = 0; i < nintr; i++) + totalintr += intrcnt[i]; + + // -------------------------------------------------------------------- + + static RRDSET *st_intr = NULL; + static RRDDIM *rd_intr = NULL; + + if (unlikely(!st_intr)) { + st_intr = rrdset_create_localhost( + "system", + "intr", + NULL, + "interrupts", + NULL, + "Total Hardware Interrupts", + "interrupts/s", + "freebsd.plugin", + "hw.intrcnt", + NETDATA_CHART_PRIO_SYSTEM_INTR, + update_every, + RRDSET_TYPE_LINE + ); + rrdset_flag_set(st_intr, RRDSET_FLAG_DETAIL); + + rd_intr = rrddim_add(st_intr, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st_intr); + + rrddim_set_by_pointer(st_intr, rd_intr, totalintr); + rrdset_done(st_intr); + + // -------------------------------------------------------------------- + + size_t size; + static int mib_hw_intrnames[2] = {0, 0}; + static char *intrnames = NULL; + + size = nintr * (MAXCOMLEN + 1); + if (unlikely(nintr != old_nintr)) + intrnames = reallocz(intrnames, size); + if (unlikely(GETSYSCTL_WSIZE("hw.intrnames", mib_hw_intrnames, intrnames, size))) { + error("DISABLED: system.intr chart"); + error("DISABLED: system.interrupts chart"); + error("DISABLED: hw.intrcnt module"); + return 1; + } else { + + // -------------------------------------------------------------------- + + static RRDSET *st_interrupts = NULL; + void *p; + + if (unlikely(!st_interrupts)) + st_interrupts = rrdset_create_localhost( + "system", + "interrupts", + NULL, + "interrupts", + NULL, + "System interrupts", + "interrupts/s", + "freebsd.plugin", + "hw.intrcnt", + NETDATA_CHART_PRIO_SYSTEM_INTERRUPTS, + update_every, + RRDSET_TYPE_STACKED + ); + else + rrdset_next(st_interrupts); + + for (i = 0; i < nintr; i++) { + p = intrnames + i * (MAXCOMLEN + 1); + if (unlikely((intrcnt[i] != 0) && (*(char *) p != 0))) { + RRDDIM *rd_interrupts = rrddim_find(st_interrupts, p); + + if (unlikely(!rd_interrupts)) + rd_interrupts = rrddim_add(st_interrupts, p, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st_interrupts, rd_interrupts, intrcnt[i]); + } + } + rrdset_done(st_interrupts); + } + } + + old_nintr = nintr; + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// vm.stats.sys.v_intr + +int do_vm_stats_sys_v_intr(int update_every, usec_t dt) { + (void)dt; + static int mib[4] = {0, 0, 0, 0}; + u_int int_number; + + if (unlikely(GETSYSCTL_SIMPLE("vm.stats.sys.v_intr", mib, int_number))) { + error("DISABLED: system.dev_intr chart"); + error("DISABLED: vm.stats.sys.v_intr module"); + return 1; + } else { + + // -------------------------------------------------------------------- + + static RRDSET *st = NULL; + static RRDDIM *rd = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "system", + "dev_intr", + NULL, + "interrupts", + NULL, + "Device Interrupts", + "interrupts/s", + "freebsd.plugin", + "vm.stats.sys.v_intr", + NETDATA_CHART_PRIO_SYSTEM_DEV_INTR, + update_every, + RRDSET_TYPE_LINE + ); + + rd = rrddim_add(st, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd, int_number); + rrdset_done(st); + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// vm.stats.sys.v_soft + +int do_vm_stats_sys_v_soft(int update_every, usec_t dt) { + (void)dt; + static int mib[4] = {0, 0, 0, 0}; + u_int soft_intr_number; + + if (unlikely(GETSYSCTL_SIMPLE("vm.stats.sys.v_soft", mib, soft_intr_number))) { + error("DISABLED: system.dev_intr chart"); + error("DISABLED: vm.stats.sys.v_soft module"); + return 1; + } else { + + // -------------------------------------------------------------------- + + static RRDSET *st = NULL; + static RRDDIM *rd = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "system", + "soft_intr", + NULL, + "interrupts", + NULL, + "Software Interrupts", + "interrupts/s", + "freebsd.plugin", + "vm.stats.sys.v_soft", + NETDATA_CHART_PRIO_SYSTEM_SOFT_INTR, + update_every, + RRDSET_TYPE_LINE + ); + + rd = rrddim_add(st, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd, soft_intr_number); + rrdset_done(st); + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// vm.stats.sys.v_swtch + +int do_vm_stats_sys_v_swtch(int update_every, usec_t dt) { + (void)dt; + static int mib[4] = {0, 0, 0, 0}; + u_int ctxt_number; + + if (unlikely(GETSYSCTL_SIMPLE("vm.stats.sys.v_swtch", mib, ctxt_number))) { + error("DISABLED: system.ctxt chart"); + error("DISABLED: vm.stats.sys.v_swtch module"); + return 1; + } else { + + // -------------------------------------------------------------------- + + static RRDSET *st = NULL; + static RRDDIM *rd = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "system", + "ctxt", + NULL, + "processes", + NULL, + "CPU Context Switches", + "context switches/s", + "freebsd.plugin", + "vm.stats.sys.v_swtch", + NETDATA_CHART_PRIO_SYSTEM_CTXT, + update_every, + RRDSET_TYPE_LINE + ); + + rd = rrddim_add(st, "switches", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd, ctxt_number); + rrdset_done(st); + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// vm.stats.vm.v_forks + +int do_vm_stats_sys_v_forks(int update_every, usec_t dt) { + (void)dt; + static int mib[4] = {0, 0, 0, 0}; + u_int forks_number; + + if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_forks", mib, forks_number))) { + error("DISABLED: system.forks chart"); + error("DISABLED: vm.stats.sys.v_swtch module"); + return 1; + } else { + + // -------------------------------------------------------------------- + + static RRDSET *st = NULL; + static RRDDIM *rd = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "system", + "forks", + NULL, + "processes", + NULL, + "Started Processes", + "processes/s", + "freebsd.plugin", + "vm.stats.sys.v_swtch", + NETDATA_CHART_PRIO_SYSTEM_FORKS, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd = rrddim_add(st, "started", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd, forks_number); + rrdset_done(st); + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// vm.swap_info + +int do_vm_swap_info(int update_every, usec_t dt) { + (void)dt; + static int mib[3] = {0, 0, 0}; + + if (unlikely(getsysctl_mib("vm.swap_info", mib, 2))) { + error("DISABLED: system.swap chart"); + error("DISABLED: vm.swap_info module"); + return 1; + } else { + int i; + struct xswdev xsw; + struct total_xsw { + collected_number bytes_used; + collected_number bytes_total; + } total_xsw = {0, 0}; + + for (i = 0; ; i++) { + size_t size; + + mib[2] = i; + size = sizeof(xsw); + if (unlikely(sysctl(mib, 3, &xsw, &size, NULL, 0) == -1 )) { + if (unlikely(errno != ENOENT)) { + error("FREEBSD: sysctl(%s...) failed: %s", "vm.swap_info", strerror(errno)); + error("DISABLED: system.swap chart"); + error("DISABLED: vm.swap_info module"); + return 1; + } else { + if (unlikely(size != sizeof(xsw))) { + error("FREEBSD: sysctl(%s...) expected %lu, got %lu", "vm.swap_info", (unsigned long)sizeof(xsw), (unsigned long)size); + error("DISABLED: system.swap chart"); + error("DISABLED: vm.swap_info module"); + return 1; + } else break; + } + } + total_xsw.bytes_used += xsw.xsw_used; + total_xsw.bytes_total += xsw.xsw_nblks; + } + + // -------------------------------------------------------------------- + + static RRDSET *st = NULL; + static RRDDIM *rd_free = NULL, *rd_used = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "system", + "swap", + NULL, + "swap", + NULL, + "System Swap", + "MB", + "freebsd.plugin", + "vm.swap_info", + NETDATA_CHART_PRIO_SYSTEM_SWAP, + update_every, + RRDSET_TYPE_STACKED + ); + + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_free = rrddim_add(st, "free", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); + rd_used = rrddim_add(st, "used", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_free, total_xsw.bytes_total - total_xsw.bytes_used); + rrddim_set_by_pointer(st, rd_used, total_xsw.bytes_used); + rrdset_done(st); + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// system.ram + +int do_system_ram(int update_every, usec_t dt) { + (void)dt; + static int mib_active_count[4] = {0, 0, 0, 0}, mib_inactive_count[4] = {0, 0, 0, 0}, mib_wire_count[4] = {0, 0, 0, 0}, + mib_cache_count[4] = {0, 0, 0, 0}, mib_laundry_count[4] = {0, 0, 0, 0}, mib_vfs_bufspace[2] = {0, 0}, + mib_free_count[4] = {0, 0, 0, 0}; + vmmeter_t vmmeter_data; + int vfs_bufspace_count; + + if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_active_count", mib_active_count, vmmeter_data.v_active_count) || + GETSYSCTL_SIMPLE("vm.stats.vm.v_inactive_count", mib_inactive_count, vmmeter_data.v_inactive_count) || + GETSYSCTL_SIMPLE("vm.stats.vm.v_wire_count", mib_wire_count, vmmeter_data.v_wire_count) || +#if __FreeBSD_version < 1200016 + GETSYSCTL_SIMPLE("vm.stats.vm.v_cache_count", mib_cache_count, vmmeter_data.v_cache_count) || +#endif +#if defined(NETDATA_COLLECT_LAUNDRY) + GETSYSCTL_SIMPLE("vm.stats.vm.v_laundry_count", mib_laundry_count, vmmeter_data.v_laundry_count) || +#endif + GETSYSCTL_SIMPLE("vfs.bufspace", mib_vfs_bufspace, vfs_bufspace_count) || + GETSYSCTL_SIMPLE("vm.stats.vm.v_free_count", mib_free_count, vmmeter_data.v_free_count))) { + error("DISABLED: system.ram chart"); + error("DISABLED: system.ram module"); + return 1; + } else { + + // -------------------------------------------------------------------- + + static RRDSET *st = NULL; + static RRDDIM *rd_free = NULL, *rd_active = NULL, *rd_inactive = NULL, *rd_wired = NULL, + *rd_cache = NULL, *rd_laundry = NULL, *rd_buffers = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "system", + "ram", + NULL, + "ram", + NULL, + "System RAM", + "MB", + "freebsd.plugin", + "system.ram", + NETDATA_CHART_PRIO_SYSTEM_RAM, + update_every, + RRDSET_TYPE_STACKED + ); + + rd_free = rrddim_add(st, "free", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); + rd_active = rrddim_add(st, "active", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); + rd_inactive = rrddim_add(st, "inactive", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); + rd_wired = rrddim_add(st, "wired", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); +#if __FreeBSD_version < 1200016 + rd_cache = rrddim_add(st, "cache", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); +#endif +#if defined(NETDATA_COLLECT_LAUNDRY) + rd_laundry = rrddim_add(st, "laundry", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); +#endif + rd_buffers = rrddim_add(st, "buffers", NULL, 1, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_free, vmmeter_data.v_free_count); + rrddim_set_by_pointer(st, rd_active, vmmeter_data.v_active_count); + rrddim_set_by_pointer(st, rd_inactive, vmmeter_data.v_inactive_count); + rrddim_set_by_pointer(st, rd_wired, vmmeter_data.v_wire_count); +#if __FreeBSD_version < 1200016 + rrddim_set_by_pointer(st, rd_cache, vmmeter_data.v_cache_count); +#endif +#if defined(NETDATA_COLLECT_LAUNDRY) + rrddim_set_by_pointer(st, rd_laundry, vmmeter_data.v_laundry_count); +#endif + rrddim_set_by_pointer(st, rd_buffers, vfs_bufspace_count); + rrdset_done(st); + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// vm.stats.vm.v_swappgs + +int do_vm_stats_sys_v_swappgs(int update_every, usec_t dt) { + (void)dt; + static int mib_swappgsin[4] = {0, 0, 0, 0}, mib_swappgsout[4] = {0, 0, 0, 0}; + vmmeter_t vmmeter_data; + + if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_swappgsin", mib_swappgsin, vmmeter_data.v_swappgsin) || + GETSYSCTL_SIMPLE("vm.stats.vm.v_swappgsout", mib_swappgsout, vmmeter_data.v_swappgsout))) { + error("DISABLED: system.swapio chart"); + error("DISABLED: vm.stats.vm.v_swappgs module"); + return 1; + } else { + + // -------------------------------------------------------------------- + + static RRDSET *st = NULL; + static RRDDIM *rd_in = NULL, *rd_out = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "system", + "swapio", + NULL, + "swap", + NULL, + "Swap I/O", + "kilobytes/s", + "freebsd.plugin", + "vm.stats.vm.v_swappgs", + NETDATA_CHART_PRIO_SYSTEM_SWAPIO, + update_every, + RRDSET_TYPE_AREA + ); + + rd_in = rrddim_add(st, "in", NULL, system_pagesize, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL); + rd_out = rrddim_add(st, "out", NULL, -system_pagesize, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_in, vmmeter_data.v_swappgsin); + rrddim_set_by_pointer(st, rd_out, vmmeter_data.v_swappgsout); + rrdset_done(st); + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// vm.stats.vm.v_pgfaults + +int do_vm_stats_sys_v_pgfaults(int update_every, usec_t dt) { + (void)dt; + static int mib_vm_faults[4] = {0, 0, 0, 0}, mib_io_faults[4] = {0, 0, 0, 0}, mib_cow_faults[4] = {0, 0, 0, 0}, + mib_cow_optim[4] = {0, 0, 0, 0}, mib_intrans[4] = {0, 0, 0, 0}; + vmmeter_t vmmeter_data; + + if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_vm_faults", mib_vm_faults, vmmeter_data.v_vm_faults) || + GETSYSCTL_SIMPLE("vm.stats.vm.v_io_faults", mib_io_faults, vmmeter_data.v_io_faults) || + GETSYSCTL_SIMPLE("vm.stats.vm.v_cow_faults", mib_cow_faults, vmmeter_data.v_cow_faults) || + GETSYSCTL_SIMPLE("vm.stats.vm.v_cow_optim", mib_cow_optim, vmmeter_data.v_cow_optim) || + GETSYSCTL_SIMPLE("vm.stats.vm.v_intrans", mib_intrans, vmmeter_data.v_intrans))) { + error("DISABLED: mem.pgfaults chart"); + error("DISABLED: vm.stats.vm.v_pgfaults module"); + return 1; + } else { + + // -------------------------------------------------------------------- + + static RRDSET *st = NULL; + static RRDDIM *rd_memory = NULL, *rd_io_requiring = NULL, *rd_cow = NULL, + *rd_cow_optimized = NULL, *rd_in_transit = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "mem", + "pgfaults", + NULL, + "system", + NULL, + "Memory Page Faults", + "page faults/s", + "freebsd.plugin", + "vm.stats.vm.v_pgfaults", + NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_memory = rrddim_add(st, "memory", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_io_requiring = rrddim_add(st, "io_requiring", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_cow = rrddim_add(st, "cow", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_cow_optimized = rrddim_add(st, "cow_optimized", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_transit = rrddim_add(st, "in_transit", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_memory, vmmeter_data.v_vm_faults); + rrddim_set_by_pointer(st, rd_io_requiring, vmmeter_data.v_io_faults); + rrddim_set_by_pointer(st, rd_cow, vmmeter_data.v_cow_faults); + rrddim_set_by_pointer(st, rd_cow_optimized, vmmeter_data.v_cow_optim); + rrddim_set_by_pointer(st, rd_in_transit, vmmeter_data.v_intrans); + rrdset_done(st); + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// kern.ipc.sem + +int do_kern_ipc_sem(int update_every, usec_t dt) { + (void)dt; + static int mib_semmni[3] = {0, 0, 0}, mib_sema[3] = {0, 0, 0}; + struct ipc_sem { + int semmni; + collected_number sets; + collected_number semaphores; + } ipc_sem = {0, 0, 0}; + + if (unlikely(GETSYSCTL_SIMPLE("kern.ipc.semmni", mib_semmni, ipc_sem.semmni))) { + error("DISABLED: system.ipc_semaphores chart"); + error("DISABLED: system.ipc_semaphore_arrays chart"); + error("DISABLED: kern.ipc.sem module"); + return 1; + } else { + static struct semid_kernel *ipc_sem_data = NULL; + static int old_semmni = 0; + + if (unlikely(ipc_sem.semmni != old_semmni)) { + ipc_sem_data = reallocz(ipc_sem_data, sizeof(struct semid_kernel) * ipc_sem.semmni); + old_semmni = ipc_sem.semmni; + } + if (unlikely(GETSYSCTL_WSIZE("kern.ipc.sema", mib_sema, ipc_sem_data, sizeof(struct semid_kernel) * ipc_sem.semmni))) { + error("DISABLED: system.ipc_semaphores chart"); + error("DISABLED: system.ipc_semaphore_arrays chart"); + error("DISABLED: kern.ipc.sem module"); + return 1; + } else { + int i; + + for (i = 0; i < ipc_sem.semmni; i++) { + if (unlikely(ipc_sem_data[i].u.sem_perm.mode & SEM_ALLOC)) { + ipc_sem.sets += 1; + ipc_sem.semaphores += ipc_sem_data[i].u.sem_nsems; + } + } + + // -------------------------------------------------------------------- + + static RRDSET *st_semaphores = NULL, *st_semaphore_arrays = NULL; + static RRDDIM *rd_semaphores = NULL, *rd_semaphore_arrays = NULL; + + if (unlikely(!st_semaphores)) { + st_semaphores = rrdset_create_localhost( + "system", + "ipc_semaphores", + NULL, + "ipc semaphores", + NULL, + "IPC Semaphores", + "semaphores", + "freebsd.plugin", + "kern.ipc.sem", + NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES, + update_every, + RRDSET_TYPE_AREA + ); + + rd_semaphores = rrddim_add(st_semaphores, "semaphores", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st_semaphores); + + rrddim_set_by_pointer(st_semaphores, rd_semaphores, ipc_sem.semaphores); + rrdset_done(st_semaphores); + + // -------------------------------------------------------------------- + + if (unlikely(!st_semaphore_arrays)) { + st_semaphore_arrays = rrdset_create_localhost( + "system", + "ipc_semaphore_arrays", + NULL, + "ipc semaphores", + NULL, + "IPC Semaphore Arrays", + "arrays", + "freebsd.plugin", + "kern.ipc.sem", + NETDATA_CHART_PRIO_SYSTEM_IPC_SEM_ARRAYS, + update_every, + RRDSET_TYPE_AREA + ); + + rd_semaphore_arrays = rrddim_add(st_semaphore_arrays, "arrays", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st_semaphore_arrays); + + rrddim_set_by_pointer(st_semaphore_arrays, rd_semaphore_arrays, ipc_sem.sets); + rrdset_done(st_semaphore_arrays); + } + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// kern.ipc.shm + +int do_kern_ipc_shm(int update_every, usec_t dt) { + (void)dt; + static int mib_shmmni[3] = {0, 0, 0}, mib_shmsegs[3] = {0, 0, 0}; + struct ipc_shm { + u_long shmmni; + collected_number segs; + collected_number segsize; + } ipc_shm = {0, 0, 0}; + + if (unlikely(GETSYSCTL_SIMPLE("kern.ipc.shmmni", mib_shmmni, ipc_shm.shmmni))) { + error("DISABLED: system.ipc_shared_mem_segs chart"); + error("DISABLED: system.ipc_shared_mem_size chart"); + error("DISABLED: kern.ipc.shmmodule"); + return 1; + } else { + static struct shmid_kernel *ipc_shm_data = NULL; + static u_long old_shmmni = 0; + + if (unlikely(ipc_shm.shmmni != old_shmmni)) { + ipc_shm_data = reallocz(ipc_shm_data, sizeof(struct shmid_kernel) * ipc_shm.shmmni); + old_shmmni = ipc_shm.shmmni; + } + if (unlikely( + GETSYSCTL_WSIZE("kern.ipc.shmsegs", mib_shmsegs, ipc_shm_data, sizeof(struct shmid_kernel) * ipc_shm.shmmni))) { + error("DISABLED: system.ipc_shared_mem_segs chart"); + error("DISABLED: system.ipc_shared_mem_size chart"); + error("DISABLED: kern.ipc.shmmodule"); + return 1; + } else { + unsigned long i; + + for (i = 0; i < ipc_shm.shmmni; i++) { + if (unlikely(ipc_shm_data[i].u.shm_perm.mode & 0x0800)) { + ipc_shm.segs += 1; + ipc_shm.segsize += ipc_shm_data[i].u.shm_segsz; + } + } + + // -------------------------------------------------------------------- + + static RRDSET *st_segs = NULL, *st_size = NULL; + static RRDDIM *rd_segments = NULL, *rd_allocated = NULL; + + if (unlikely(!st_segs)) { + st_segs = rrdset_create_localhost( + "system", + "ipc_shared_mem_segs", + NULL, + "ipc shared memory", + NULL, + "IPC Shared Memory Segments", + "segments", + "freebsd.plugin", + "kern.ipc.shm", + NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SEGS, + update_every, + RRDSET_TYPE_AREA + ); + + rd_segments = rrddim_add(st_segs, "segments", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st_segs); + + rrddim_set_by_pointer(st_segs, rd_segments, ipc_shm.segs); + rrdset_done(st_segs); + + // -------------------------------------------------------------------- + + if (unlikely(!st_size)) { + st_size = rrdset_create_localhost( + "system", + "ipc_shared_mem_size", + NULL, + "ipc shared memory", + NULL, + "IPC Shared Memory Segments Size", + "kilobytes", + "freebsd.plugin", + "kern.ipc.shm", + NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SIZE, + update_every, + RRDSET_TYPE_AREA + ); + + rd_allocated = rrddim_add(st_size, "allocated", NULL, 1, KILO_FACTOR, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st_size); + + rrddim_set_by_pointer(st_size, rd_allocated, ipc_shm.segsize); + rrdset_done(st_size); + } + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// kern.ipc.msq + +int do_kern_ipc_msq(int update_every, usec_t dt) { + (void)dt; + static int mib_msgmni[3] = {0, 0, 0}, mib_msqids[3] = {0, 0, 0}; + struct ipc_msq { + int msgmni; + collected_number queues; + collected_number messages; + collected_number usedsize; + collected_number allocsize; + } ipc_msq = {0, 0, 0, 0, 0}; + + if (unlikely(GETSYSCTL_SIMPLE("kern.ipc.msgmni", mib_msgmni, ipc_msq.msgmni))) { + error("DISABLED: system.ipc_msq_queues chart"); + error("DISABLED: system.ipc_msq_messages chart"); + error("DISABLED: system.ipc_msq_size chart"); + error("DISABLED: kern.ipc.msg module"); + return 1; + } else { + static struct msqid_kernel *ipc_msq_data = NULL; + static int old_msgmni = 0; + + if (unlikely(ipc_msq.msgmni != old_msgmni)) { + ipc_msq_data = reallocz(ipc_msq_data, sizeof(struct msqid_kernel) * ipc_msq.msgmni); + old_msgmni = ipc_msq.msgmni; + } + if (unlikely( + GETSYSCTL_WSIZE("kern.ipc.msqids", mib_msqids, ipc_msq_data, sizeof(struct msqid_kernel) * ipc_msq.msgmni))) { + error("DISABLED: system.ipc_msq_queues chart"); + error("DISABLED: system.ipc_msq_messages chart"); + error("DISABLED: system.ipc_msq_size chart"); + error("DISABLED: kern.ipc.msg module"); + return 1; + } else { + int i; + + for (i = 0; i < ipc_msq.msgmni; i++) { + if (unlikely(ipc_msq_data[i].u.msg_qbytes != 0)) { + ipc_msq.queues += 1; + ipc_msq.messages += ipc_msq_data[i].u.msg_qnum; + ipc_msq.usedsize += ipc_msq_data[i].u.msg_cbytes; + ipc_msq.allocsize += ipc_msq_data[i].u.msg_qbytes; + } + } + + // -------------------------------------------------------------------- + + static RRDSET *st_queues = NULL, *st_messages = NULL, *st_size = NULL; + static RRDDIM *rd_queues = NULL, *rd_messages = NULL, *rd_allocated = NULL, *rd_used = NULL; + + if (unlikely(!st_queues)) { + st_queues = rrdset_create_localhost( + "system", + "ipc_msq_queues", + NULL, + "ipc message queues", + NULL, + "Number of IPC Message Queues", + "queues", + "freebsd.plugin", + "kern.ipc.msq", + NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_QUEUES, + update_every, + RRDSET_TYPE_AREA + ); + + rd_queues = rrddim_add(st_queues, "queues", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st_queues); + + rrddim_set_by_pointer(st_queues, rd_queues, ipc_msq.queues); + rrdset_done(st_queues); + + // -------------------------------------------------------------------- + + if (unlikely(!st_messages)) { + st_messages = rrdset_create_localhost( + "system", + "ipc_msq_messages", + NULL, + "ipc message queues", + NULL, + "Number of Messages in IPC Message Queues", + "messages", + "freebsd.plugin", + "kern.ipc.msq", + NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_MESSAGES, + update_every, + RRDSET_TYPE_AREA + ); + + rd_messages = rrddim_add(st_messages, "messages", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st_messages); + + rrddim_set_by_pointer(st_messages, rd_messages, ipc_msq.messages); + rrdset_done(st_messages); + + // -------------------------------------------------------------------- + + if (unlikely(!st_size)) { + st_size = rrdset_create_localhost( + "system", + "ipc_msq_size", + NULL, + "ipc message queues", + NULL, + "Size of IPC Message Queues", + "bytes", + "freebsd.plugin", + "kern.ipc.msq", + NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_SIZE, + update_every, + RRDSET_TYPE_LINE + ); + + rd_allocated = rrddim_add(st_size, "allocated", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rd_used = rrddim_add(st_size, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st_size); + + rrddim_set_by_pointer(st_size, rd_allocated, ipc_msq.allocsize); + rrddim_set_by_pointer(st_size, rd_used, ipc_msq.usedsize); + rrdset_done(st_size); + } + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// uptime + +int do_uptime(int update_every, usec_t dt) { + (void)dt; + struct timespec up_time; + + clock_gettime(CLOCK_UPTIME, &up_time); + + // -------------------------------------------------------------------- + + static RRDSET *st = NULL; + static RRDDIM *rd = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "system", + "uptime", + NULL, + "uptime", + NULL, + "System Uptime", + "seconds", + "freebsd.plugin", + "uptime", + NETDATA_CHART_PRIO_SYSTEM_UPTIME, + update_every, + RRDSET_TYPE_LINE + ); + + rd = rrddim_add(st, "uptime", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd, up_time.tv_sec); + rrdset_done(st); + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// net.isr + +int do_net_isr(int update_every, usec_t dt) { + (void)dt; + static int do_netisr = -1, do_netisr_per_core = -1; + + if (unlikely(do_netisr == -1)) { + do_netisr = config_get_boolean("plugin:freebsd:net.isr", "netisr", 1); + do_netisr_per_core = config_get_boolean("plugin:freebsd:net.isr", "netisr per core", 1); + } + + static int mib_workstream[3] = {0, 0, 0}, mib_work[3] = {0, 0, 0}; + int common_error = 0; + size_t netisr_workstream_size = 0, netisr_work_size = 0; + unsigned long num_netisr_workstreams = 0, num_netisr_works = 0; + static struct sysctl_netisr_workstream *netisr_workstream = NULL; + static struct sysctl_netisr_work *netisr_work = NULL; + static struct netisr_stats { + collected_number dispatched; + collected_number hybrid_dispatched; + collected_number qdrops; + collected_number queued; + } *netisr_stats = NULL; + + if (likely(do_netisr || do_netisr_per_core)) { + if (unlikely(GETSYSCTL_SIZE("net.isr.workstream", mib_workstream, netisr_workstream_size))) { + common_error = 1; + } else if (unlikely(GETSYSCTL_SIZE("net.isr.work", mib_work, netisr_work_size))) { + common_error = 1; + } else { + static size_t old_netisr_workstream_size = 0; + + num_netisr_workstreams = netisr_workstream_size / sizeof(struct sysctl_netisr_workstream); + if (unlikely(netisr_workstream_size != old_netisr_workstream_size)) { + netisr_workstream = reallocz(netisr_workstream, + num_netisr_workstreams * sizeof(struct sysctl_netisr_workstream)); + old_netisr_workstream_size = netisr_workstream_size; + } + if (unlikely(GETSYSCTL_WSIZE("net.isr.workstream", mib_workstream, netisr_workstream, + num_netisr_workstreams * sizeof(struct sysctl_netisr_workstream)))){ + common_error = 1; + } else { + static size_t old_netisr_work_size = 0; + + num_netisr_works = netisr_work_size / sizeof(struct sysctl_netisr_work); + if (unlikely(netisr_work_size != old_netisr_work_size)) { + netisr_work = reallocz(netisr_work, num_netisr_works * sizeof(struct sysctl_netisr_work)); + old_netisr_work_size = netisr_work_size; + } + if (unlikely(GETSYSCTL_WSIZE("net.isr.work", mib_work, netisr_work, + num_netisr_works * sizeof(struct sysctl_netisr_work)))){ + common_error = 1; + } + } + } + if (unlikely(common_error)) { + do_netisr = 0; + error("DISABLED: system.softnet_stat chart"); + do_netisr_per_core = 0; + error("DISABLED: system.cpuX_softnet_stat chart"); + common_error = 0; + error("DISABLED: net.isr module"); + return 1; + } else { + unsigned long i, n; + int j; + static int old_number_of_cpus = 0; + + if (unlikely(number_of_cpus != old_number_of_cpus)) { + netisr_stats = reallocz(netisr_stats, (number_of_cpus + 1) * sizeof(struct netisr_stats)); + old_number_of_cpus = number_of_cpus; + } + memset(netisr_stats, 0, (number_of_cpus + 1) * sizeof(struct netisr_stats)); + for (i = 0; i < num_netisr_workstreams; i++) { + for (n = 0; n < num_netisr_works; n++) { + if (netisr_workstream[i].snws_wsid == netisr_work[n].snw_wsid) { + netisr_stats[netisr_workstream[i].snws_cpu].dispatched += netisr_work[n].snw_dispatched; + netisr_stats[netisr_workstream[i].snws_cpu].hybrid_dispatched += netisr_work[n].snw_hybrid_dispatched; + netisr_stats[netisr_workstream[i].snws_cpu].qdrops += netisr_work[n].snw_qdrops; + netisr_stats[netisr_workstream[i].snws_cpu].queued += netisr_work[n].snw_queued; + } + } + } + for (j = 0; j < number_of_cpus; j++) { + netisr_stats[number_of_cpus].dispatched += netisr_stats[j].dispatched; + netisr_stats[number_of_cpus].hybrid_dispatched += netisr_stats[j].hybrid_dispatched; + netisr_stats[number_of_cpus].qdrops += netisr_stats[j].qdrops; + netisr_stats[number_of_cpus].queued += netisr_stats[j].queued; + } + } + } else { + error("DISABLED: net.isr module"); + return 1; + } + + // -------------------------------------------------------------------- + + if (likely(do_netisr)) { + static RRDSET *st = NULL; + static RRDDIM *rd_dispatched = NULL, *rd_hybrid_dispatched = NULL, *rd_qdrops = NULL, *rd_queued = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "system", + "softnet_stat", + NULL, + "softnet_stat", + NULL, + "System softnet_stat", + "events/s", + "freebsd.plugin", + "net.isr", + NETDATA_CHART_PRIO_SYSTEM_SOFTNET_STAT, + update_every, + RRDSET_TYPE_LINE + ); + + rd_dispatched = rrddim_add(st, "dispatched", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_hybrid_dispatched = rrddim_add(st, "hybrid_dispatched", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_qdrops = rrddim_add(st, "qdrops", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_queued = rrddim_add(st, "queued", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_dispatched, netisr_stats[number_of_cpus].dispatched); + rrddim_set_by_pointer(st, rd_hybrid_dispatched, netisr_stats[number_of_cpus].hybrid_dispatched); + rrddim_set_by_pointer(st, rd_qdrops, netisr_stats[number_of_cpus].qdrops); + rrddim_set_by_pointer(st, rd_queued, netisr_stats[number_of_cpus].queued); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (likely(do_netisr_per_core)) { + static struct softnet_chart { + char netisr_cpuid[MAX_INT_DIGITS + 17]; + RRDSET *st; + RRDDIM *rd_dispatched; + RRDDIM *rd_hybrid_dispatched; + RRDDIM *rd_qdrops; + RRDDIM *rd_queued; + } *all_softnet_charts = NULL; + static int old_number_of_cpus = 0; + int i; + + if(unlikely(number_of_cpus > old_number_of_cpus)) { + all_softnet_charts = reallocz(all_softnet_charts, sizeof(struct softnet_chart) * number_of_cpus); + memset(&all_softnet_charts[old_number_of_cpus], 0, sizeof(struct softnet_chart) * (number_of_cpus - old_number_of_cpus)); + old_number_of_cpus = number_of_cpus; + } + + for (i = 0; i < number_of_cpus ;i++) { + snprintfz(all_softnet_charts[i].netisr_cpuid, MAX_INT_DIGITS + 17, "cpu%d_softnet_stat", i); + + if (unlikely(!all_softnet_charts[i].st)) { + all_softnet_charts[i].st = rrdset_create_localhost( + "cpu", + all_softnet_charts[i].netisr_cpuid, + NULL, + "softnet_stat", + NULL, + "Per CPU netisr statistics", + "events/s", + "freebsd.plugin", + "net.isr", + NETDATA_CHART_PRIO_SOFTNET_PER_CORE + i, + update_every, + RRDSET_TYPE_LINE + ); + + all_softnet_charts[i].rd_dispatched = rrddim_add(all_softnet_charts[i].st, "dispatched", + NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + all_softnet_charts[i].rd_hybrid_dispatched = rrddim_add(all_softnet_charts[i].st, "hybrid_dispatched", + NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + all_softnet_charts[i].rd_qdrops = rrddim_add(all_softnet_charts[i].st, "qdrops", + NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + all_softnet_charts[i].rd_queued = rrddim_add(all_softnet_charts[i].st, "queued", + NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(all_softnet_charts[i].st); + + rrddim_set_by_pointer(all_softnet_charts[i].st, all_softnet_charts[i].rd_dispatched, + netisr_stats[i].dispatched); + rrddim_set_by_pointer(all_softnet_charts[i].st, all_softnet_charts[i].rd_hybrid_dispatched, + netisr_stats[i].hybrid_dispatched); + rrddim_set_by_pointer(all_softnet_charts[i].st, all_softnet_charts[i].rd_qdrops, + netisr_stats[i].qdrops); + rrddim_set_by_pointer(all_softnet_charts[i].st, all_softnet_charts[i].rd_queued, + netisr_stats[i].queued); + rrdset_done(all_softnet_charts[i].st); + } + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// net.inet.tcp.states + +int do_net_inet_tcp_states(int update_every, usec_t dt) { + (void)dt; + static int mib[4] = {0, 0, 0, 0}; + uint64_t tcps_states[TCP_NSTATES]; + + // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html + if (unlikely(GETSYSCTL_SIMPLE("net.inet.tcp.states", mib, tcps_states))) { + error("DISABLED: ipv4.tcpsock chart"); + error("DISABLED: net.inet.tcp.states module"); + return 1; + } else { + + // -------------------------------------------------------------------- + + static RRDSET *st = NULL; + static RRDDIM *rd = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4", + "tcpsock", + NULL, + "tcp", + NULL, + "IPv4 TCP Connections", + "active connections", + "freebsd.plugin", + "net.inet.tcp.states", + 2500, + update_every, + RRDSET_TYPE_LINE + ); + + rd = rrddim_add(st, "CurrEstab", "connections", 1, 1, RRD_ALGORITHM_ABSOLUTE); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd, tcps_states[TCPS_ESTABLISHED]); + rrdset_done(st); + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// net.inet.tcp.stats + +int do_net_inet_tcp_stats(int update_every, usec_t dt) { + (void)dt; + static int do_tcp_packets = -1, do_tcp_errors = -1, do_tcp_handshake = -1, do_tcpext_connaborts = -1, do_tcpext_ofo = -1, + do_tcpext_syncookies = -1, do_tcpext_listen = -1, do_ecn = -1; + + if (unlikely(do_tcp_packets == -1)) { + do_tcp_packets = config_get_boolean("plugin:freebsd:net.inet.tcp.stats", "ipv4 TCP packets", 1); + do_tcp_errors = config_get_boolean("plugin:freebsd:net.inet.tcp.stats", "ipv4 TCP errors", 1); + do_tcp_handshake = config_get_boolean("plugin:freebsd:net.inet.tcp.stats", "ipv4 TCP handshake issues", 1); + do_tcpext_connaborts = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "TCP connection aborts", + CONFIG_BOOLEAN_AUTO); + do_tcpext_ofo = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "TCP out-of-order queue", + CONFIG_BOOLEAN_AUTO); + do_tcpext_syncookies = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "TCP SYN cookies", + CONFIG_BOOLEAN_AUTO); + do_tcpext_listen = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "TCP listen issues", + CONFIG_BOOLEAN_AUTO); + do_ecn = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "ECN packets", + CONFIG_BOOLEAN_AUTO); + } + + // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html + if (likely(do_tcp_packets || do_tcp_errors || do_tcp_handshake || do_tcpext_connaborts || do_tcpext_ofo || + do_tcpext_syncookies || do_tcpext_listen || do_ecn)) { + static int mib[4] = {0, 0, 0, 0}; + struct tcpstat tcpstat; + + if (unlikely(GETSYSCTL_SIMPLE("net.inet.tcp.stats", mib, tcpstat))) { + do_tcp_packets = 0; + error("DISABLED: ipv4.tcppackets chart"); + do_tcp_errors = 0; + error("DISABLED: ipv4.tcperrors chart"); + do_tcp_handshake = 0; + error("DISABLED: ipv4.tcphandshake chart"); + do_tcpext_connaborts = 0; + error("DISABLED: ipv4.tcpconnaborts chart"); + do_tcpext_ofo = 0; + error("DISABLED: ipv4.tcpofo chart"); + do_tcpext_syncookies = 0; + error("DISABLED: ipv4.tcpsyncookies chart"); + do_tcpext_listen = 0; + error("DISABLED: ipv4.tcplistenissues chart"); + do_ecn = 0; + error("DISABLED: ipv4.ecnpkts chart"); + error("DISABLED: net.inet.tcp.stats module"); + return 1; + } else { + + // -------------------------------------------------------------------- + + if (likely(do_tcp_packets)) { + static RRDSET *st = NULL; + static RRDDIM *rd_in_segs = NULL, *rd_out_segs = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4", + "tcppackets", + NULL, + "tcp", + NULL, + "IPv4 TCP Packets", + "packets/s", + "freebsd.plugin", + "net.inet.tcp.stats", + 2600, + update_every, + RRDSET_TYPE_LINE + ); + + rd_in_segs = rrddim_add(st, "InSegs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_segs = rrddim_add(st, "OutSegs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_in_segs, tcpstat.tcps_rcvtotal); + rrddim_set_by_pointer(st, rd_out_segs, tcpstat.tcps_sndtotal); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (likely(do_tcp_errors)) { + static RRDSET *st = NULL; + static RRDDIM *rd_in_errs = NULL, *rd_in_csum_errs = NULL, *rd_retrans_segs = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4", + "tcperrors", + NULL, + "tcp", + NULL, + "IPv4 TCP Errors", + "packets/s", + "freebsd.plugin", + "net.inet.tcp.stats", + 2700, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_in_errs = rrddim_add(st, "InErrs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_csum_errs = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_retrans_segs = rrddim_add(st, "RetransSegs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + +#if __FreeBSD__ >= 11 + rrddim_set_by_pointer(st, rd_in_errs, tcpstat.tcps_rcvbadoff + tcpstat.tcps_rcvreassfull + + tcpstat.tcps_rcvshort); +#else + rrddim_set_by_pointer(st, rd_in_errs, tcpstat.tcps_rcvbadoff + tcpstat.tcps_rcvshort); +#endif + rrddim_set_by_pointer(st, rd_in_csum_errs, tcpstat.tcps_rcvbadsum); + rrddim_set_by_pointer(st, rd_retrans_segs, tcpstat.tcps_sndrexmitpack); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (likely(do_tcp_handshake)) { + static RRDSET *st = NULL; + static RRDDIM *rd_estab_resets = NULL, *rd_active_opens = NULL, *rd_passive_opens = NULL, + *rd_attempt_fails = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4", + "tcphandshake", + NULL, + "tcp", + NULL, + "IPv4 TCP Handshake Issues", + "events/s", + "freebsd.plugin", + "net.inet.tcp.stats", + 2900, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_estab_resets = rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_active_opens = rrddim_add(st, "ActiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_passive_opens = rrddim_add(st, "PassiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_attempt_fails = rrddim_add(st, "AttemptFails", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_estab_resets, tcpstat.tcps_drops); + rrddim_set_by_pointer(st, rd_active_opens, tcpstat.tcps_connattempt); + rrddim_set_by_pointer(st, rd_passive_opens, tcpstat.tcps_accepts); + rrddim_set_by_pointer(st, rd_attempt_fails, tcpstat.tcps_conndrops); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_rcvpackafterwin || tcpstat.tcps_rcvafterclose || tcpstat.tcps_rcvmemdrop || tcpstat.tcps_persistdrop || tcpstat.tcps_finwait2_drops))) { + do_tcpext_connaborts = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_on_data = NULL, *rd_on_close = NULL, *rd_on_memory = NULL, + *rd_on_timeout = NULL, *rd_on_linger = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4", + "tcpconnaborts", + NULL, + "tcp", + NULL, + "TCP Connection Aborts", + "connections/s", + "freebsd.plugin", + "net.inet.tcp.stats", + 3010, + update_every, + RRDSET_TYPE_LINE + ); + + rd_on_data = rrddim_add(st, "TCPAbortOnData", "baddata", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_on_close = rrddim_add(st, "TCPAbortOnClose", "userclosed", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_on_memory = rrddim_add(st, "TCPAbortOnMemory", "nomemory", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_on_timeout = rrddim_add(st, "TCPAbortOnTimeout", "timeout", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_on_linger = rrddim_add(st, "TCPAbortOnLinger", "linger", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_on_data, tcpstat.tcps_rcvpackafterwin); + rrddim_set_by_pointer(st, rd_on_close, tcpstat.tcps_rcvafterclose); + rrddim_set_by_pointer(st, rd_on_memory, tcpstat.tcps_rcvmemdrop); + rrddim_set_by_pointer(st, rd_on_timeout, tcpstat.tcps_persistdrop); + rrddim_set_by_pointer(st, rd_on_linger, tcpstat.tcps_finwait2_drops); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && tcpstat.tcps_rcvoopack)) { + do_tcpext_ofo = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_ofo_queue = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4", + "tcpofo", + NULL, + "tcp", + NULL, + "TCP Out-Of-Order Queue", + "packets/s", + "freebsd.plugin", + "net.inet.tcp.stats", + 3050, + update_every, + RRDSET_TYPE_LINE + ); + + rd_ofo_queue = rrddim_add(st, "TCPOFOQueue", "inqueue", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_ofo_queue, tcpstat.tcps_rcvoopack); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_tcpext_syncookies == CONFIG_BOOLEAN_YES || (do_tcpext_syncookies == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_sc_sendcookie || tcpstat.tcps_sc_recvcookie || tcpstat.tcps_sc_zonefail))) { + do_tcpext_syncookies = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_recv = NULL, *rd_send = NULL, *rd_failed = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4", + "tcpsyncookies", + NULL, + "tcp", + NULL, + "TCP SYN Cookies", + "packets/s", + "freebsd.plugin", + "net.inet.tcp.stats", + 3100, + update_every, + RRDSET_TYPE_LINE + ); + + rd_recv = rrddim_add(st, "SyncookiesRecv", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_send = rrddim_add(st, "SyncookiesSent", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_failed = rrddim_add(st, "SyncookiesFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_recv, tcpstat.tcps_sc_recvcookie); + rrddim_set_by_pointer(st, rd_send, tcpstat.tcps_sc_sendcookie); + rrddim_set_by_pointer(st, rd_failed, tcpstat.tcps_sc_zonefail); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_tcpext_listen == CONFIG_BOOLEAN_YES || (do_tcpext_listen == CONFIG_BOOLEAN_AUTO && tcpstat.tcps_listendrop)) { + do_tcpext_listen = CONFIG_BOOLEAN_YES; + + static RRDSET *st_listen = NULL; + static RRDDIM *rd_overflows = NULL; + + if(unlikely(!st_listen)) { + + st_listen = rrdset_create_localhost( + "ipv4", + "tcplistenissues", + NULL, + "tcp", + NULL, + "TCP Listen Socket Issues", + "packets/s", + "freebsd.plugin", + "net.inet.tcp.stats", + 3015, + update_every, + RRDSET_TYPE_LINE + ); + + rd_overflows = rrddim_add(st_listen, "ListenOverflows", "overflows", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_listen); + + rrddim_set_by_pointer(st_listen, rd_overflows, tcpstat.tcps_listendrop); + + rrdset_done(st_listen); + } + + // -------------------------------------------------------------------- + + if (do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_ecn_ce || tcpstat.tcps_ecn_ect0 || tcpstat.tcps_ecn_ect1))) { + do_ecn = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_ce = NULL, *rd_no_ect = NULL, *rd_ect0 = NULL, *rd_ect1 = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4", + "ecnpkts", + NULL, + "ecn", + NULL, + "IPv4 ECN Statistics", + "packets/s", + "freebsd.plugin", + "net.inet.tcp.stats", + 8700, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_ce = rrddim_add(st, "InCEPkts", "CEP", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_no_ect = rrddim_add(st, "InNoECTPkts", "NoECTP", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_ect0 = rrddim_add(st, "InECT0Pkts", "ECTP0", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_ect1 = rrddim_add(st, "InECT1Pkts", "ECTP1", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_ce, tcpstat.tcps_ecn_ce); + rrddim_set_by_pointer(st, rd_no_ect, tcpstat.tcps_ecn_ce - (tcpstat.tcps_ecn_ect0 + + tcpstat.tcps_ecn_ect1)); + rrddim_set_by_pointer(st, rd_ect0, tcpstat.tcps_ecn_ect0); + rrddim_set_by_pointer(st, rd_ect1, tcpstat.tcps_ecn_ect1); + rrdset_done(st); + } + + } + } else { + error("DISABLED: net.inet.tcp.stats module"); + return 1; + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// net.inet.udp.stats + +int do_net_inet_udp_stats(int update_every, usec_t dt) { + (void)dt; + static int do_udp_packets = -1, do_udp_errors = -1; + + if (unlikely(do_udp_packets == -1)) { + do_udp_packets = config_get_boolean("plugin:freebsd:net.inet.udp.stats", "ipv4 UDP packets", 1); + do_udp_errors = config_get_boolean("plugin:freebsd:net.inet.udp.stats", "ipv4 UDP errors", 1); + } + + // see http://net-snmp.sourceforge.net/docs/mibs/udp.html + if (likely(do_udp_packets || do_udp_errors)) { + static int mib[4] = {0, 0, 0, 0}; + struct udpstat udpstat; + + if (unlikely(GETSYSCTL_SIMPLE("net.inet.udp.stats", mib, udpstat))) { + do_udp_packets = 0; + error("DISABLED: ipv4.udppackets chart"); + do_udp_errors = 0; + error("DISABLED: ipv4.udperrors chart"); + error("DISABLED: net.inet.udp.stats module"); + return 1; + } else { + + // -------------------------------------------------------------------- + + if (likely(do_udp_packets)) { + static RRDSET *st = NULL; + static RRDDIM *rd_in = NULL, *rd_out = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4", + "udppackets", + NULL, + "udp", + NULL, + "IPv4 UDP Packets", + "packets/s", + "freebsd.plugin", + "net.inet.udp.stats", + 2601, + update_every, + RRDSET_TYPE_LINE + ); + + rd_in = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_in, udpstat.udps_ipackets); + rrddim_set_by_pointer(st, rd_out, udpstat.udps_opackets); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (likely(do_udp_errors)) { + static RRDSET *st = NULL; + static RRDDIM *rd_in_errors = NULL, *rd_no_ports = NULL, *rd_recv_buf_errors = NULL, + *rd_in_csum_errors = NULL, *rd_ignored_multi = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4", + "udperrors", + NULL, + "udp", + NULL, + "IPv4 UDP Errors", + "events/s", + "freebsd.plugin", + "net.inet.udp.stats", + 2701, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_in_errors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_no_ports = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_recv_buf_errors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_csum_errors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_ignored_multi = rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_in_errors, udpstat.udps_hdrops + udpstat.udps_badlen); + rrddim_set_by_pointer(st, rd_no_ports, udpstat.udps_noport); + rrddim_set_by_pointer(st, rd_recv_buf_errors, udpstat.udps_fullsock); + rrddim_set_by_pointer(st, rd_in_csum_errors, udpstat.udps_badsum + udpstat.udps_nosum); + rrddim_set_by_pointer(st, rd_ignored_multi, udpstat.udps_filtermcast); + rrdset_done(st); + } + } + } else { + error("DISABLED: net.inet.udp.stats module"); + return 1; + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// net.inet.icmp.stats + +int do_net_inet_icmp_stats(int update_every, usec_t dt) { + (void)dt; + static int do_icmp_packets = -1, do_icmp_errors = -1, do_icmpmsg = -1; + + if (unlikely(do_icmp_packets == -1)) { + do_icmp_packets = config_get_boolean("plugin:freebsd:net.inet.icmp.stats", "ipv4 ICMP packets", 1); + do_icmp_errors = config_get_boolean("plugin:freebsd:net.inet.icmp.stats", "ipv4 ICMP errors", 1); + do_icmpmsg = config_get_boolean("plugin:freebsd:net.inet.icmp.stats", "ipv4 ICMP messages", 1); + } + + if (likely(do_icmp_packets || do_icmp_errors || do_icmpmsg)) { + static int mib[4] = {0, 0, 0, 0}; + struct icmpstat icmpstat; + int i; + struct icmp_total { + u_long msgs_in; + u_long msgs_out; + } icmp_total = {0, 0}; + + if (unlikely(GETSYSCTL_SIMPLE("net.inet.icmp.stats", mib, icmpstat))) { + do_icmp_packets = 0; + error("DISABLED: ipv4.icmp chart"); + do_icmp_errors = 0; + error("DISABLED: ipv4.icmp_errors chart"); + do_icmpmsg = 0; + error("DISABLED: ipv4.icmpmsg chart"); + error("DISABLED: net.inet.icmp.stats module"); + return 1; + } else { + for (i = 0; i <= ICMP_MAXTYPE; i++) { + icmp_total.msgs_in += icmpstat.icps_inhist[i]; + icmp_total.msgs_out += icmpstat.icps_outhist[i]; + } + icmp_total.msgs_in += icmpstat.icps_badcode + icmpstat.icps_badlen + icmpstat.icps_checksum + icmpstat.icps_tooshort; + + // -------------------------------------------------------------------- + + if (likely(do_icmp_packets)) { + static RRDSET *st = NULL; + static RRDDIM *rd_in = NULL, *rd_out = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "icmp" + , NULL + , "icmp" + , NULL + , "IPv4 ICMP Packets" + , "packets/s" + , "freebsd.plugin" + , "net.inet.icmp.stats" + , 2602 + , update_every + , RRDSET_TYPE_LINE + ); + + rd_in = rrddim_add(st, "InMsgs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out = rrddim_add(st, "OutMsgs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_in, icmp_total.msgs_in); + rrddim_set_by_pointer(st, rd_out, icmp_total.msgs_out); + + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (likely(do_icmp_errors)) { + static RRDSET *st = NULL; + static RRDDIM *rd_in = NULL, *rd_out = NULL, *rd_in_csum = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "icmp_errors" + , NULL + , "icmp" + , NULL + , "IPv4 ICMP Errors" + , "packets/s" + , "freebsd.plugin" + , "net.inet.icmp.stats" + , 2603 + , update_every + , RRDSET_TYPE_LINE + ); + + rd_in = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out = rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_csum = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_in, icmpstat.icps_badcode + icmpstat.icps_badlen + + icmpstat.icps_checksum + icmpstat.icps_tooshort); + rrddim_set_by_pointer(st, rd_out, icmpstat.icps_error); + rrddim_set_by_pointer(st, rd_in_csum, icmpstat.icps_checksum); + + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (likely(do_icmpmsg)) { + static RRDSET *st = NULL; + static RRDDIM *rd_in_reps = NULL, *rd_out_reps = NULL, *rd_in = NULL, *rd_out = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "icmpmsg" + , NULL + , "icmp" + , NULL + , "IPv4 ICMP Messages" + , "packets/s" + , "freebsd.plugin" + , "net.inet.icmp.stats" + , 2604 + , update_every + , RRDSET_TYPE_LINE + ); + + rd_in_reps = rrddim_add(st, "InEchoReps", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_reps = rrddim_add(st, "OutEchoReps", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in = rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out = rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_in_reps, icmpstat.icps_inhist[ICMP_ECHOREPLY]); + rrddim_set_by_pointer(st, rd_out_reps, icmpstat.icps_outhist[ICMP_ECHOREPLY]); + rrddim_set_by_pointer(st, rd_in, icmpstat.icps_inhist[ICMP_ECHO]); + rrddim_set_by_pointer(st, rd_out, icmpstat.icps_outhist[ICMP_ECHO]); + + rrdset_done(st); + } + } + } else { + error("DISABLED: net.inet.icmp.stats module"); + return 1; + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// net.inet.ip.stats + +int do_net_inet_ip_stats(int update_every, usec_t dt) { + (void)dt; + static int do_ip_packets = -1, do_ip_fragsout = -1, do_ip_fragsin = -1, do_ip_errors = -1; + + if (unlikely(do_ip_packets == -1)) { + do_ip_packets = config_get_boolean("plugin:freebsd:net.inet.ip.stats", "ipv4 packets", 1); + do_ip_fragsout = config_get_boolean("plugin:freebsd:net.inet.ip.stats", "ipv4 fragments sent", 1); + do_ip_fragsin = config_get_boolean("plugin:freebsd:net.inet.ip.stats", "ipv4 fragments assembly", 1); + do_ip_errors = config_get_boolean("plugin:freebsd:net.inet.ip.stats", "ipv4 errors", 1); + } + + // see also http://net-snmp.sourceforge.net/docs/mibs/ip.html + if (likely(do_ip_packets || do_ip_fragsout || do_ip_fragsin || do_ip_errors)) { + static int mib[4] = {0, 0, 0, 0}; + struct ipstat ipstat; + + if (unlikely(GETSYSCTL_SIMPLE("net.inet.ip.stats", mib, ipstat))) { + do_ip_packets = 0; + error("DISABLED: ipv4.packets chart"); + do_ip_fragsout = 0; + error("DISABLED: ipv4.fragsout chart"); + do_ip_fragsin = 0; + error("DISABLED: ipv4.fragsin chart"); + do_ip_errors = 0; + error("DISABLED: ipv4.errors chart"); + error("DISABLED: net.inet.ip.stats module"); + return 1; + } else { + + // -------------------------------------------------------------------- + + if (likely(do_ip_packets)) { + static RRDSET *st = NULL; + static RRDDIM *rd_in_receives = NULL, *rd_out_requests = NULL, *rd_forward_datagrams = NULL, + *rd_in_delivers = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4", + "packets", + NULL, + "packets", + NULL, + "IPv4 Packets", + "packets/s", + "freebsd.plugin", + "net.inet.ip.stats", + 3000, + update_every, + RRDSET_TYPE_LINE + ); + + rd_in_receives = rrddim_add(st, "InReceives", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_requests = rrddim_add(st, "OutRequests", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_forward_datagrams = rrddim_add(st, "ForwDatagrams", "forwarded", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_delivers = rrddim_add(st, "InDelivers", "delivered", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_in_receives, ipstat.ips_total); + rrddim_set_by_pointer(st, rd_out_requests, ipstat.ips_localout); + rrddim_set_by_pointer(st, rd_forward_datagrams, ipstat.ips_forward); + rrddim_set_by_pointer(st, rd_in_delivers, ipstat.ips_delivered); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (likely(do_ip_fragsout)) { + static RRDSET *st = NULL; + static RRDDIM *rd_ok = NULL, *rd_fails = NULL, *rd_created = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4", + "fragsout", + NULL, + "fragments", + NULL, + "IPv4 Fragments Sent", + "packets/s", + "freebsd.plugin", + "net.inet.ip.stats", + 3010, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_ok = rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_fails = rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_created = rrddim_add(st, "FragCreates", "created", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_ok, ipstat.ips_fragmented); + rrddim_set_by_pointer(st, rd_fails, ipstat.ips_cantfrag); + rrddim_set_by_pointer(st, rd_created, ipstat.ips_ofragments); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (likely(do_ip_fragsin)) { + static RRDSET *st = NULL; + static RRDDIM *rd_ok = NULL, *rd_failed = NULL, *rd_all = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4", + "fragsin", + NULL, + "fragments", + NULL, + "IPv4 Fragments Reassembly", + "packets/s", + "freebsd.plugin", + "net.inet.ip.stats", + 3011, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_ok = rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_failed = rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_all = rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_ok, ipstat.ips_fragments); + rrddim_set_by_pointer(st, rd_failed, ipstat.ips_fragdropped); + rrddim_set_by_pointer(st, rd_all, ipstat.ips_reassembled); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (likely(do_ip_errors)) { + static RRDSET *st = NULL; + static RRDDIM *rd_in_discards = NULL, *rd_out_discards = NULL, + *rd_in_hdr_errors = NULL, *rd_out_no_routes = NULL, + *rd_in_addr_errors = NULL, *rd_in_unknown_protos = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4", + "errors", + NULL, + "errors", + NULL, + "IPv4 Errors", + "packets/s", + "freebsd.plugin", + "net.inet.ip.stats", + 3002, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_in_discards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_discards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_hdr_errors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_no_routes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_addr_errors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_unknown_protos = rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_in_discards, ipstat.ips_badsum + ipstat.ips_tooshort + + ipstat.ips_toosmall + ipstat.ips_toolong); + rrddim_set_by_pointer(st, rd_out_discards, ipstat.ips_odropped); + rrddim_set_by_pointer(st, rd_in_hdr_errors, ipstat.ips_badhlen + ipstat.ips_badlen + + ipstat.ips_badoptions + ipstat.ips_badvers); + rrddim_set_by_pointer(st, rd_out_no_routes, ipstat.ips_noroute); + rrddim_set_by_pointer(st, rd_in_addr_errors, ipstat.ips_badaddr); + rrddim_set_by_pointer(st, rd_in_unknown_protos, ipstat.ips_noproto); + rrdset_done(st); + } + } + } else { + error("DISABLED: net.inet.ip.stats module"); + return 1; + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// net.inet6.ip6.stats + +int do_net_inet6_ip6_stats(int update_every, usec_t dt) { + (void)dt; + static int do_ip6_packets = -1, do_ip6_fragsout = -1, do_ip6_fragsin = -1, do_ip6_errors = -1; + + if (unlikely(do_ip6_packets == -1)) { + do_ip6_packets = config_get_boolean_ondemand("plugin:freebsd:net.inet6.ip6.stats", "ipv6 packets", + CONFIG_BOOLEAN_AUTO); + do_ip6_fragsout = config_get_boolean_ondemand("plugin:freebsd:net.inet6.ip6.stats", "ipv6 fragments sent", + CONFIG_BOOLEAN_AUTO); + do_ip6_fragsin = config_get_boolean_ondemand("plugin:freebsd:net.inet6.ip6.stats", "ipv6 fragments assembly", + CONFIG_BOOLEAN_AUTO); + do_ip6_errors = config_get_boolean_ondemand("plugin:freebsd:net.inet6.ip6.stats", "ipv6 errors", + CONFIG_BOOLEAN_AUTO); + } + + if (likely(do_ip6_packets || do_ip6_fragsout || do_ip6_fragsin || do_ip6_errors)) { + static int mib[4] = {0, 0, 0, 0}; + struct ip6stat ip6stat; + + if (unlikely(GETSYSCTL_SIMPLE("net.inet6.ip6.stats", mib, ip6stat))) { + do_ip6_packets = 0; + error("DISABLED: ipv6.packets chart"); + do_ip6_fragsout = 0; + error("DISABLED: ipv6.fragsout chart"); + do_ip6_fragsin = 0; + error("DISABLED: ipv6.fragsin chart"); + do_ip6_errors = 0; + error("DISABLED: ipv6.errors chart"); + error("DISABLED: net.inet6.ip6.stats module"); + return 1; + } else { + + // -------------------------------------------------------------------- + + if (do_ip6_packets == CONFIG_BOOLEAN_YES || (do_ip6_packets == CONFIG_BOOLEAN_AUTO && + (ip6stat.ip6s_localout || ip6stat.ip6s_total || + ip6stat.ip6s_forward || ip6stat.ip6s_delivered))) { + do_ip6_packets = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_received = NULL, *rd_sent = NULL, *rd_forwarded = NULL, *rd_delivers = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6", + "packets", + NULL, + "packets", + NULL, + "IPv6 Packets", + "packets/s", + "freebsd.plugin", + "net.inet6.ip6.stats", + 3000, + update_every, + RRDSET_TYPE_LINE + ); + + rd_received = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_sent = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_forwarded = rrddim_add(st, "forwarded", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_delivers = rrddim_add(st, "delivers", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_sent, ip6stat.ip6s_localout); + rrddim_set_by_pointer(st, rd_received, ip6stat.ip6s_total); + rrddim_set_by_pointer(st, rd_forwarded, ip6stat.ip6s_forward); + rrddim_set_by_pointer(st, rd_delivers, ip6stat.ip6s_delivered); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_ip6_fragsout == CONFIG_BOOLEAN_YES || (do_ip6_fragsout == CONFIG_BOOLEAN_AUTO && + (ip6stat.ip6s_fragmented || ip6stat.ip6s_cantfrag || + ip6stat.ip6s_ofragments))) { + do_ip6_fragsout = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_ok = NULL, *rd_failed = NULL, *rd_all = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6", + "fragsout", + NULL, + "fragments", + NULL, + "IPv6 Fragments Sent", + "packets/s", + "freebsd.plugin", + "net.inet6.ip6.stats", + 3010, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_ok = rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_failed = rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_all = rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_ok, ip6stat.ip6s_fragmented); + rrddim_set_by_pointer(st, rd_failed, ip6stat.ip6s_cantfrag); + rrddim_set_by_pointer(st, rd_all, ip6stat.ip6s_ofragments); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_ip6_fragsin == CONFIG_BOOLEAN_YES || (do_ip6_fragsin == CONFIG_BOOLEAN_AUTO && + (ip6stat.ip6s_reassembled || ip6stat.ip6s_fragdropped || + ip6stat.ip6s_fragtimeout || ip6stat.ip6s_fragments))) { + do_ip6_fragsin = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_ok = NULL, *rd_failed = NULL, *rd_timeout = NULL, *rd_all = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6", + "fragsin", + NULL, + "fragments", + NULL, + "IPv6 Fragments Reassembly", + "packets/s", + "freebsd.plugin", + "net.inet6.ip6.stats", + 3011, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_ok = rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_failed = rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_timeout = rrddim_add(st, "timeout", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_all = rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_ok, ip6stat.ip6s_reassembled); + rrddim_set_by_pointer(st, rd_failed, ip6stat.ip6s_fragdropped); + rrddim_set_by_pointer(st, rd_timeout, ip6stat.ip6s_fragtimeout); + rrddim_set_by_pointer(st, rd_all, ip6stat.ip6s_fragments); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_ip6_errors == CONFIG_BOOLEAN_YES || (do_ip6_errors == CONFIG_BOOLEAN_AUTO && ( + ip6stat.ip6s_toosmall || + ip6stat.ip6s_odropped || + ip6stat.ip6s_badoptions || + ip6stat.ip6s_badvers || + ip6stat.ip6s_exthdrtoolong || + ip6stat.ip6s_sources_none || + ip6stat.ip6s_tooshort || + ip6stat.ip6s_cantforward || + ip6stat.ip6s_noroute))) { + do_ip6_errors = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_in_discards = NULL, *rd_out_discards = NULL, + *rd_in_hdr_errors = NULL, *rd_in_addr_errors = NULL, *rd_in_truncated_pkts = NULL, + *rd_in_no_routes = NULL, *rd_out_no_routes = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6", + "errors", + NULL, + "errors", + NULL, + "IPv6 Errors", + "packets/s", + "freebsd.plugin", + "net.inet6.ip6.stats", + 3002, + update_every, + RRDSET_TYPE_LINE + ); + + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_in_discards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_discards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_hdr_errors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_addr_errors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_truncated_pkts = rrddim_add(st, "InTruncatedPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_no_routes = rrddim_add(st, "InNoRoutes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_no_routes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_in_discards, ip6stat.ip6s_toosmall); + rrddim_set_by_pointer(st, rd_out_discards, ip6stat.ip6s_odropped); + rrddim_set_by_pointer(st, rd_in_hdr_errors, ip6stat.ip6s_badoptions + ip6stat.ip6s_badvers + + ip6stat.ip6s_exthdrtoolong); + rrddim_set_by_pointer(st, rd_in_addr_errors, ip6stat.ip6s_sources_none); + rrddim_set_by_pointer(st, rd_in_truncated_pkts, ip6stat.ip6s_tooshort); + rrddim_set_by_pointer(st, rd_in_no_routes, ip6stat.ip6s_cantforward); + rrddim_set_by_pointer(st, rd_out_no_routes, ip6stat.ip6s_noroute); + rrdset_done(st); + } + } + } else { + error("DISABLED: net.inet6.ip6.stats module"); + return 1; + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// net.inet6.icmp6.stats + +int do_net_inet6_icmp6_stats(int update_every, usec_t dt) { + (void)dt; + static int do_icmp6 = -1, do_icmp6_redir = -1, do_icmp6_errors = -1, do_icmp6_echos = -1, do_icmp6_router = -1, + do_icmp6_neighbor = -1, do_icmp6_types = -1; + + if (unlikely(do_icmp6 == -1)) { + do_icmp6 = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp", + CONFIG_BOOLEAN_AUTO); + do_icmp6_redir = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp redirects", + CONFIG_BOOLEAN_AUTO); + do_icmp6_errors = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp errors", + CONFIG_BOOLEAN_AUTO); + do_icmp6_echos = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp echos", + CONFIG_BOOLEAN_AUTO); + do_icmp6_router = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp router", + CONFIG_BOOLEAN_AUTO); + do_icmp6_neighbor = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp neighbor", + CONFIG_BOOLEAN_AUTO); + do_icmp6_types = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp types", + CONFIG_BOOLEAN_AUTO); + } + + if (likely(do_icmp6 || do_icmp6_redir || do_icmp6_errors || do_icmp6_echos || do_icmp6_router || do_icmp6_neighbor || do_icmp6_types)) { + static int mib[4] = {0, 0, 0, 0}; + struct icmp6stat icmp6stat; + + if (unlikely(GETSYSCTL_SIMPLE("net.inet6.icmp6.stats", mib, icmp6stat))) { + do_icmp6 = 0; + error("DISABLED: ipv6.icmp chart"); + do_icmp6_redir = 0; + error("DISABLED: ipv6.icmpredir chart"); + do_icmp6_errors = 0; + error("DISABLED: ipv6.icmperrors chart"); + do_icmp6_echos = 0; + error("DISABLED: ipv6.icmpechos chart"); + do_icmp6_router = 0; + error("DISABLED: ipv6.icmprouter chart"); + do_icmp6_neighbor = 0; + error("DISABLED: ipv6.icmpneighbor chart"); + do_icmp6_types = 0; + error("DISABLED: ipv6.icmptypes chart"); + error("DISABLED: net.inet6.icmp6.stats module"); + return 1; + } else { + int i; + struct icmp6_total { + u_long msgs_in; + u_long msgs_out; + } icmp6_total = {0, 0}; + + for (i = 0; i <= ICMP6_MAXTYPE; i++) { + icmp6_total.msgs_in += icmp6stat.icp6s_inhist[i]; + icmp6_total.msgs_out += icmp6stat.icp6s_outhist[i]; + } + icmp6_total.msgs_in += icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen + icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort; + + // -------------------------------------------------------------------- + + if (do_icmp6 == CONFIG_BOOLEAN_YES || (do_icmp6 == CONFIG_BOOLEAN_AUTO && (icmp6_total.msgs_in || icmp6_total.msgs_out))) { + do_icmp6 = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_received = NULL, *rd_sent = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6", + "icmp", + NULL, + "icmp", + NULL, + "IPv6 ICMP Messages", + "messages/s", + "freebsd.plugin", + "net.inet6.icmp6.stats", + 10000, + update_every, + RRDSET_TYPE_LINE + ); + + rd_received = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_sent = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_received, icmp6_total.msgs_out); + rrddim_set_by_pointer(st, rd_sent, icmp6_total.msgs_in); + + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_icmp6_redir == CONFIG_BOOLEAN_YES || (do_icmp6_redir == CONFIG_BOOLEAN_AUTO && (icmp6stat.icp6s_inhist[ND_REDIRECT] || icmp6stat.icp6s_outhist[ND_REDIRECT]))) { + do_icmp6_redir = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_received = NULL, *rd_sent = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6", + "icmpredir", + NULL, + "icmp", + NULL, + "IPv6 ICMP Redirects", + "redirects/s", + "freebsd.plugin", + "net.inet6.icmp6.stats", + 10050, + update_every, + RRDSET_TYPE_LINE + ); + + rd_received = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_sent = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_received, icmp6stat.icp6s_outhist[ND_REDIRECT]); + rrddim_set_by_pointer(st, rd_sent, icmp6stat.icp6s_inhist[ND_REDIRECT]); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_icmp6_errors == CONFIG_BOOLEAN_YES || (do_icmp6_errors == CONFIG_BOOLEAN_AUTO && ( + icmp6stat.icp6s_badcode || + icmp6stat.icp6s_badlen || + icmp6stat.icp6s_checksum || + icmp6stat.icp6s_tooshort || + icmp6stat.icp6s_error || + icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH] || + icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED] || + icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB] || + icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH] || + icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED] || + icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB]))) { + do_icmp6_errors = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_in_errors = NULL, *rd_out_errors = NULL, *rd_in_csum_errors = NULL, + *rd_in_dest_unreachs = NULL, *rd_in_pkt_too_bigs = NULL, *rd_in_time_excds = NULL, + *rd_in_parm_problems = NULL, *rd_out_dest_unreachs = NULL, *rd_out_time_excds = NULL, + *rd_out_parm_problems = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6", + "icmperrors", + NULL, "icmp", + NULL, + "IPv6 ICMP Errors", + "errors/s", + "freebsd.plugin", + "net.inet6.icmp6.stats", + 10100, + update_every, + RRDSET_TYPE_LINE + ); + + rd_in_errors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_errors = rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_csum_errors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_dest_unreachs = rrddim_add(st, "InDestUnreachs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_pkt_too_bigs = rrddim_add(st, "InPktTooBigs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_time_excds = rrddim_add(st, "InTimeExcds", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_parm_problems = rrddim_add(st, "InParmProblems", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_dest_unreachs = rrddim_add(st, "OutDestUnreachs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_time_excds = rrddim_add(st, "OutTimeExcds", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_parm_problems = rrddim_add(st, "OutParmProblems", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_in_errors, icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen + + icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort); + rrddim_set_by_pointer(st, rd_out_errors, icmp6stat.icp6s_error); + rrddim_set_by_pointer(st, rd_in_csum_errors, icmp6stat.icp6s_checksum); + rrddim_set_by_pointer(st, rd_in_dest_unreachs, icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH]); + rrddim_set_by_pointer(st, rd_in_pkt_too_bigs, icmp6stat.icp6s_badlen); + rrddim_set_by_pointer(st, rd_in_time_excds, icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED]); + rrddim_set_by_pointer(st, rd_in_parm_problems, icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB]); + rrddim_set_by_pointer(st, rd_out_dest_unreachs, icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH]); + rrddim_set_by_pointer(st, rd_out_time_excds, icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED]); + rrddim_set_by_pointer(st, rd_out_parm_problems, icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB]); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_icmp6_echos == CONFIG_BOOLEAN_YES || (do_icmp6_echos == CONFIG_BOOLEAN_AUTO && ( + icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST] || + icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST] || + icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY] || + icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY]))) { + do_icmp6_echos = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_in = NULL, *rd_out = NULL, *rd_in_replies = NULL, *rd_out_replies = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6", + "icmpechos", + NULL, + "icmp", + NULL, + "IPv6 ICMP Echo", + "messages/s", + "freebsd.plugin", + "net.inet6.icmp6.stats", + 10200, + update_every, + RRDSET_TYPE_LINE + ); + + rd_in = rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out = rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_replies = rrddim_add(st, "InEchoReplies", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_replies = rrddim_add(st, "OutEchoReplies", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_in, icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST]); + rrddim_set_by_pointer(st, rd_out, icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST]); + rrddim_set_by_pointer(st, rd_in_replies, icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY]); + rrddim_set_by_pointer(st, rd_out_replies, icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY]); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_icmp6_router == CONFIG_BOOLEAN_YES || (do_icmp6_router == CONFIG_BOOLEAN_AUTO && ( + icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT] || + icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT] || + icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT] || + icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT]))) { + do_icmp6_router = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_in_solicits = NULL, *rd_out_solicits = NULL, + *rd_in_advertisements = NULL, *rd_out_advertisements = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6", + "icmprouter", + NULL, + "icmp", + NULL, + "IPv6 Router Messages", + "messages/s", + "freebsd.plugin", + "net.inet6.icmp6.stats", + 10400, + update_every, + RRDSET_TYPE_LINE + ); + + rd_in_solicits = rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_solicits = rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_advertisements = rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_advertisements = rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_in_solicits, icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT]); + rrddim_set_by_pointer(st, rd_out_solicits, icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT]); + rrddim_set_by_pointer(st, rd_in_advertisements, icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT]); + rrddim_set_by_pointer(st, rd_out_advertisements, icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT]); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_icmp6_neighbor == CONFIG_BOOLEAN_YES || (do_icmp6_neighbor == CONFIG_BOOLEAN_AUTO && ( + icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT] || + icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT] || + icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT] || + icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]))) { + do_icmp6_neighbor = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_in_solicits = NULL, *rd_out_solicits = NULL, + *rd_in_advertisements = NULL, *rd_out_advertisements = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6", + "icmpneighbor", + NULL, + "icmp", + NULL, + "IPv6 Neighbor Messages", + "messages/s", + "freebsd.plugin", + "net.inet6.icmp6.stats", + 10500, + update_every, + RRDSET_TYPE_LINE + ); + + rd_in_solicits = rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_solicits = rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_advertisements = rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_advertisements = rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_in_solicits, icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT]); + rrddim_set_by_pointer(st, rd_out_solicits, icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT]); + rrddim_set_by_pointer(st, rd_in_advertisements, icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT]); + rrddim_set_by_pointer(st, rd_out_advertisements, icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_icmp6_types == CONFIG_BOOLEAN_YES || (do_icmp6_types == CONFIG_BOOLEAN_AUTO && ( + icmp6stat.icp6s_inhist[1] || + icmp6stat.icp6s_inhist[128] || + icmp6stat.icp6s_inhist[129] || + icmp6stat.icp6s_inhist[136] || + icmp6stat.icp6s_outhist[1] || + icmp6stat.icp6s_outhist[128] || + icmp6stat.icp6s_outhist[129] || + icmp6stat.icp6s_outhist[133] || + icmp6stat.icp6s_outhist[135] || + icmp6stat.icp6s_outhist[136]))) { + do_icmp6_types = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_in_1 = NULL, *rd_in_128 = NULL, *rd_in_129 = NULL, *rd_in_136 = NULL, + *rd_out_1 = NULL, *rd_out_128 = NULL, *rd_out_129 = NULL, *rd_out_133 = NULL, + *rd_out_135 = NULL, *rd_out_143 = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6", + "icmptypes", + NULL, + "icmp", + NULL, + "IPv6 ICMP Types", + "messages/s", + "freebsd.plugin", + "net.inet6.icmp6.stats", + 10700, + update_every, + RRDSET_TYPE_LINE + ); + + rd_in_1 = rrddim_add(st, "InType1", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_128 = rrddim_add(st, "InType128", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_129 = rrddim_add(st, "InType129", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_in_136 = rrddim_add(st, "InType136", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_1 = rrddim_add(st, "OutType1", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_128 = rrddim_add(st, "OutType128", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_129 = rrddim_add(st, "OutType129", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_133 = rrddim_add(st, "OutType133", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_135 = rrddim_add(st, "OutType135", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out_143 = rrddim_add(st, "OutType143", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_in_1, icmp6stat.icp6s_inhist[1]); + rrddim_set_by_pointer(st, rd_in_128, icmp6stat.icp6s_inhist[128]); + rrddim_set_by_pointer(st, rd_in_129, icmp6stat.icp6s_inhist[129]); + rrddim_set_by_pointer(st, rd_in_136, icmp6stat.icp6s_inhist[136]); + rrddim_set_by_pointer(st, rd_out_1, icmp6stat.icp6s_outhist[1]); + rrddim_set_by_pointer(st, rd_out_128, icmp6stat.icp6s_outhist[128]); + rrddim_set_by_pointer(st, rd_out_129, icmp6stat.icp6s_outhist[129]); + rrddim_set_by_pointer(st, rd_out_133, icmp6stat.icp6s_outhist[133]); + rrddim_set_by_pointer(st, rd_out_135, icmp6stat.icp6s_outhist[135]); + rrddim_set_by_pointer(st, rd_out_143, icmp6stat.icp6s_outhist[143]); + rrdset_done(st); + } + } + } else { + error("DISABLED: net.inet6.icmp6.stats module"); + return 1; + } + + return 0; +} diff --git a/collectors/freebsd.plugin/plugin_freebsd.c b/collectors/freebsd.plugin/plugin_freebsd.c new file mode 100644 index 000000000..5cde37113 --- /dev/null +++ b/collectors/freebsd.plugin/plugin_freebsd.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_freebsd.h" + +static struct freebsd_module { + const char *name; + const char *dim; + + int enabled; + + int (*func)(int update_every, usec_t dt); + usec_t duration; + + RRDDIM *rd; + +} freebsd_modules[] = { + + // system metrics + { .name = "kern.cp_time", .dim = "cp_time", .enabled = 1, .func = do_kern_cp_time }, + { .name = "vm.loadavg", .dim = "loadavg", .enabled = 1, .func = do_vm_loadavg }, + { .name = "system.ram", .dim = "system_ram", .enabled = 1, .func = do_system_ram }, + { .name = "vm.swap_info", .dim = "swap", .enabled = 1, .func = do_vm_swap_info }, + { .name = "vm.stats.vm.v_swappgs", .dim = "swap_io", .enabled = 1, .func = do_vm_stats_sys_v_swappgs }, + { .name = "vm.vmtotal", .dim = "vmtotal", .enabled = 1, .func = do_vm_vmtotal }, + { .name = "vm.stats.vm.v_forks", .dim = "forks", .enabled = 1, .func = do_vm_stats_sys_v_forks }, + { .name = "vm.stats.sys.v_swtch", .dim = "context_swtch", .enabled = 1, .func = do_vm_stats_sys_v_swtch }, + { .name = "hw.intrcnt", .dim = "hw_intr", .enabled = 1, .func = do_hw_intcnt }, + { .name = "vm.stats.sys.v_intr", .dim = "dev_intr", .enabled = 1, .func = do_vm_stats_sys_v_intr }, + { .name = "vm.stats.sys.v_soft", .dim = "soft_intr", .enabled = 1, .func = do_vm_stats_sys_v_soft }, + { .name = "net.isr", .dim = "net_isr", .enabled = 1, .func = do_net_isr }, + { .name = "kern.ipc.sem", .dim = "semaphores", .enabled = 1, .func = do_kern_ipc_sem }, + { .name = "kern.ipc.shm", .dim = "shared_memory", .enabled = 1, .func = do_kern_ipc_shm }, + { .name = "kern.ipc.msq", .dim = "message_queues", .enabled = 1, .func = do_kern_ipc_msq }, + { .name = "uptime", .dim = "uptime", .enabled = 1, .func = do_uptime }, + + // memory metrics + { .name = "vm.stats.vm.v_pgfaults", .dim = "pgfaults", .enabled = 1, .func = do_vm_stats_sys_v_pgfaults }, + + // CPU metrics + { .name = "kern.cp_times", .dim = "cp_times", .enabled = 1, .func = do_kern_cp_times }, + { .name = "dev.cpu.temperature", .dim = "cpu_temperature", .enabled = 1, .func = do_dev_cpu_temperature }, + { .name = "dev.cpu.0.freq", .dim = "cpu_frequency", .enabled = 1, .func = do_dev_cpu_0_freq }, + + // disk metrics + { .name = "kern.devstat", .dim = "kern_devstat", .enabled = 1, .func = do_kern_devstat }, + { .name = "getmntinfo", .dim = "getmntinfo", .enabled = 1, .func = do_getmntinfo }, + + // network metrics + { .name = "net.inet.tcp.states", .dim = "tcp_states", .enabled = 1, .func = do_net_inet_tcp_states }, + { .name = "net.inet.tcp.stats", .dim = "tcp_stats", .enabled = 1, .func = do_net_inet_tcp_stats }, + { .name = "net.inet.udp.stats", .dim = "udp_stats", .enabled = 1, .func = do_net_inet_udp_stats }, + { .name = "net.inet.icmp.stats", .dim = "icmp_stats", .enabled = 1, .func = do_net_inet_icmp_stats }, + { .name = "net.inet.ip.stats", .dim = "ip_stats", .enabled = 1, .func = do_net_inet_ip_stats }, + { .name = "net.inet6.ip6.stats", .dim = "ip6_stats", .enabled = 1, .func = do_net_inet6_ip6_stats }, + { .name = "net.inet6.icmp6.stats", .dim = "icmp6_stats", .enabled = 1, .func = do_net_inet6_icmp6_stats }, + + // network interfaces metrics + { .name = "getifaddrs", .dim = "getifaddrs", .enabled = 1, .func = do_getifaddrs }, + + // ZFS metrics + { .name = "kstat.zfs.misc.arcstats", .dim = "arcstats", .enabled = 1, .func = do_kstat_zfs_misc_arcstats }, + { .name = "kstat.zfs.misc.zio_trim", .dim = "trim", .enabled = 1, .func = do_kstat_zfs_misc_zio_trim }, + + // ipfw metrics + { .name = "ipfw", .dim = "ipfw", .enabled = 1, .func = do_ipfw }, + + // the terminator of this array + { .name = NULL, .dim = NULL, .enabled = 0, .func = NULL } +}; + +static void freebsd_main_cleanup(void *ptr) { + struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; + static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; + + info("cleaning up..."); + + static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; +} + +void *freebsd_main(void *ptr) { + netdata_thread_cleanup_push(freebsd_main_cleanup, ptr); + + int vdo_cpu_netdata = config_get_boolean("plugin:freebsd", "netdata server resources", 1); + + // initialize FreeBSD plugin + if (freebsd_plugin_init()) + netdata_cleanup_and_exit(1); + + // check the enabled status for each module + int i; + for(i = 0 ; freebsd_modules[i].name ;i++) { + struct freebsd_module *pm = &freebsd_modules[i]; + + pm->enabled = config_get_boolean("plugin:freebsd", pm->name, pm->enabled); + pm->duration = 0ULL; + pm->rd = NULL; + } + + usec_t step = localhost->rrd_update_every * USEC_PER_SEC; + heartbeat_t hb; + heartbeat_init(&hb); + + while(!netdata_exit) { + usec_t hb_dt = heartbeat_next(&hb, step); + usec_t duration = 0ULL; + + if(unlikely(netdata_exit)) break; + + // BEGIN -- the job to be done + + for(i = 0 ; freebsd_modules[i].name ;i++) { + struct freebsd_module *pm = &freebsd_modules[i]; + if(unlikely(!pm->enabled)) continue; + + debug(D_PROCNETDEV_LOOP, "FREEBSD calling %s.", pm->name); + + pm->enabled = !pm->func(localhost->rrd_update_every, hb_dt); + pm->duration = heartbeat_monotonic_dt_to_now_usec(&hb) - duration; + duration += pm->duration; + + if(unlikely(netdata_exit)) break; + } + + // END -- the job is done + + // -------------------------------------------------------------------- + + if(vdo_cpu_netdata) { + static RRDSET *st = NULL; + + if(unlikely(!st)) { + st = rrdset_find_bytype_localhost("netdata", "plugin_freebsd_modules"); + + if(!st) { + st = rrdset_create_localhost( + "netdata" + , "plugin_freebsd_modules" + , NULL + , "freebsd" + , NULL + , "NetData FreeBSD Plugin Modules Durations" + , "milliseconds/run" + , "netdata" + , "stats" + , 132001 + , localhost->rrd_update_every + , RRDSET_TYPE_STACKED + ); + + for(i = 0 ; freebsd_modules[i].name ;i++) { + struct freebsd_module *pm = &freebsd_modules[i]; + if(unlikely(!pm->enabled)) continue; + + pm->rd = rrddim_add(st, pm->dim, NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + } + } + } + else rrdset_next(st); + + for(i = 0 ; freebsd_modules[i].name ;i++) { + struct freebsd_module *pm = &freebsd_modules[i]; + if(unlikely(!pm->enabled)) continue; + + rrddim_set_by_pointer(st, pm->rd, pm->duration); + } + rrdset_done(st); + + global_statistics_charts(); + registry_statistics(); + } + } + + netdata_thread_cleanup_pop(1); + return NULL; +} diff --git a/collectors/freebsd.plugin/plugin_freebsd.h b/collectors/freebsd.plugin/plugin_freebsd.h new file mode 100644 index 000000000..ab46080be --- /dev/null +++ b/collectors/freebsd.plugin/plugin_freebsd.h @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_PLUGIN_FREEBSD_H +#define NETDATA_PLUGIN_FREEBSD_H 1 + +#include "daemon/common.h" + +#if (TARGET_OS == OS_FREEBSD) + +#define NETDATA_PLUGIN_HOOK_FREEBSD \ + { \ + .name = "PLUGIN[freebsd]", \ + .config_section = CONFIG_SECTION_PLUGINS, \ + .config_name = "freebsd", \ + .enabled = 1, \ + .thread = NULL, \ + .init_routine = NULL, \ + .start_routine = freebsd_main \ + }, + + +#include + +#define KILO_FACTOR 1024 +#define MEGA_FACTOR 1048576 // 1024 * 1024 +#define GIGA_FACTOR 1073741824 // 1024 * 1024 * 1024 + +#define MAX_INT_DIGITS 10 // maximum number of digits for int + +void *freebsd_main(void *ptr); + +extern int freebsd_plugin_init(); + +extern int do_vm_loadavg(int update_every, usec_t dt); +extern int do_vm_vmtotal(int update_every, usec_t dt); +extern int do_kern_cp_time(int update_every, usec_t dt); +extern int do_kern_cp_times(int update_every, usec_t dt); +extern int do_dev_cpu_temperature(int update_every, usec_t dt); +extern int do_dev_cpu_0_freq(int update_every, usec_t dt); +extern int do_hw_intcnt(int update_every, usec_t dt); +extern int do_vm_stats_sys_v_intr(int update_every, usec_t dt); +extern int do_vm_stats_sys_v_soft(int update_every, usec_t dt); +extern int do_vm_stats_sys_v_swtch(int update_every, usec_t dt); +extern int do_vm_stats_sys_v_forks(int update_every, usec_t dt); +extern int do_vm_swap_info(int update_every, usec_t dt); +extern int do_system_ram(int update_every, usec_t dt); +extern int do_vm_stats_sys_v_swappgs(int update_every, usec_t dt); +extern int do_vm_stats_sys_v_pgfaults(int update_every, usec_t dt); +extern int do_kern_ipc_sem(int update_every, usec_t dt); +extern int do_kern_ipc_shm(int update_every, usec_t dt); +extern int do_kern_ipc_msq(int update_every, usec_t dt); +extern int do_uptime(int update_every, usec_t dt); +extern int do_net_isr(int update_every, usec_t dt); +extern int do_net_inet_tcp_states(int update_every, usec_t dt); +extern int do_net_inet_tcp_stats(int update_every, usec_t dt); +extern int do_net_inet_udp_stats(int update_every, usec_t dt); +extern int do_net_inet_icmp_stats(int update_every, usec_t dt); +extern int do_net_inet_ip_stats(int update_every, usec_t dt); +extern int do_net_inet6_ip6_stats(int update_every, usec_t dt); +extern int do_net_inet6_icmp6_stats(int update_every, usec_t dt); +extern int do_getifaddrs(int update_every, usec_t dt); +extern int do_getmntinfo(int update_every, usec_t dt); +extern int do_kern_devstat(int update_every, usec_t dt); +extern int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt); +extern int do_kstat_zfs_misc_zio_trim(int update_every, usec_t dt); +extern int do_ipfw(int update_every, usec_t dt); + +#else // (TARGET_OS == OS_FREEBSD) + +#define NETDATA_PLUGIN_HOOK_FREEBSD + +#endif // (TARGET_OS == OS_FREEBSD) + +#endif /* NETDATA_PLUGIN_FREEBSD_H */ diff --git a/collectors/freeipmi.plugin/Makefile.am b/collectors/freeipmi.plugin/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/collectors/freeipmi.plugin/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/collectors/freeipmi.plugin/Makefile.in b/collectors/freeipmi.plugin/Makefile.in new file mode 100644 index 000000000..54a0035c6 --- /dev/null +++ b/collectors/freeipmi.plugin/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = collectors/freeipmi.plugin +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/freeipmi.plugin/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu collectors/freeipmi.plugin/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/collectors/freeipmi.plugin/README.md b/collectors/freeipmi.plugin/README.md new file mode 100644 index 000000000..f7c5cc148 --- /dev/null +++ b/collectors/freeipmi.plugin/README.md @@ -0,0 +1,180 @@ +netdata has a [freeipmi](https://www.gnu.org/software/freeipmi/) plugin. + +> FreeIPMI provides in-band and out-of-band IPMI software based on the IPMI v1.5/2.0 specification. The IPMI specification defines a set of interfaces for platform management and is implemented by a number vendors for system management. The features of IPMI that most users will be interested in are sensor monitoring, system event monitoring, power control, and serial-over-LAN (SOL). + +## compile `freeipmi.plugin` + +1. install `libipmimonitoring-dev` or `libipmimonitoring-devel` (`freeipmi-devel` on RHEL based OS) using the package manager of your system. + +2. re-install netdata from source. The installer will detect that the required libraries are now available and will also build `freeipmi.plugin`. + +Keep in mind IPMI requires root access, so the plugin is setuid to root. + +If you just installed the required IPMI tools, please run at least once the command `ipmimonitoring` and verify it returns sensors information. This command initialises IPMI configuration, so that the netdata plugin will be able to work. + +## netdata use + +The plugin creates (up to) 8 charts, based on the information collected from IPMI: + +1. number of sensors by state +2. number of events in SEL +3. Temperatures CELCIUS +4. Temperatures FAHRENHEIT +5. Voltages +6. Currents +7. Power +8. Fans + + +It also adds 2 alarms: + +1. Sensors in non-nominal state (i.e. warning and critical) +2. SEL is non empty + +![image](https://cloud.githubusercontent.com/assets/2662304/23674138/88926a20-037d-11e7-89c0-20e74ee10cd1.png) + +The plugin does a speed test when it starts, to find out the duration needed by the IPMI processor to respond. Depending on the speed of your IPMI processor, charts may need several seconds to show up on the dashboard. + +## `freeipmi.plugin` configuration + +The plugin supports a few options. To see them, run: + +```sh +# /usr/libexec/netdata/plugins.d/freeipmi.plugin -h + + netdata freeipmi.plugin 1.8.0-546-g72ce5d6b_rolling + Copyright (C) 2016-2017 Costa Tsaousis + Released under GNU General Public License v3 or later. + All rights reserved. + + This program is a data collector plugin for netdata. + + Available command line options: + + SECONDS data collection frequency + minimum: 5 + + debug enable verbose output + default: disabled + + sel + no-sel enable/disable SEL collection + default: enabled + + hostname HOST + username USER + password PASS connect to remote IPMI host + default: local IPMI processor + + sdr-cache-dir PATH directory for SDR cache files + default: /tmp + + sensor-config-file FILE filename to read sensor configuration + default: system default + + ignore N1,N2,N3,... sensor IDs to ignore + default: none + + -v + -V + version print version and exit + + Linux kernel module for IPMI is CPU hungry. + On Linux run this to lower kipmiN CPU utilization: + # echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us + + or create: /etc/modprobe.d/ipmi.conf with these contents: + options ipmi_si kipmid_max_busy_us=10 + + For more information: + https://github.com/ktsaou/netdata/tree/master/plugins/freeipmi.plugin + +``` + +You can set these options in `/etc/netdata/netdata.conf` at this section: + +``` +[plugin:freeipmi] + update every = 5 + command options = +``` + +Append to `command options = ` the settings you need. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable. + +## ignoring specific sensors + +Specific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`. **However this file is not used by `libipmimonitoring`** (the library used by netdata's `freeipmi.plugin`). + +So, `freeipmi.plugin` supports the option `ignore` that accepts a comma separated list of sensor IDs to ignore. To configure it, edit `/etc/netdata/netdata.conf` and set: + +``` +[plugin:freeipmi] + command options = ignore 1,2,3,4,... +``` + +To find the IDs to ignore, run the command `ipmimonitoring`. The first column is the wanted ID: + +``` +ID | Name | Type | State | Reading | Units | Event +1 | Ambient Temp | Temperature | Nominal | 26.00 | C | 'OK' +2 | Altitude | Other Units Based Sensor | Nominal | 480.00 | ft | 'OK' +3 | Avg Power | Current | Nominal | 100.00 | W | 'OK' +4 | Planar 3.3V | Voltage | Nominal | 3.29 | V | 'OK' +5 | Planar 5V | Voltage | Nominal | 4.90 | V | 'OK' +6 | Planar 12V | Voltage | Nominal | 11.99 | V | 'OK' +7 | Planar VBAT | Voltage | Nominal | 2.95 | V | 'OK' +8 | Fan 1A Tach | Fan | Nominal | 3132.00 | RPM | 'OK' +9 | Fan 1B Tach | Fan | Nominal | 2150.00 | RPM | 'OK' +10 | Fan 2A Tach | Fan | Nominal | 2494.00 | RPM | 'OK' +11 | Fan 2B Tach | Fan | Nominal | 1825.00 | RPM | 'OK' +12 | Fan 3A Tach | Fan | Nominal | 3538.00 | RPM | 'OK' +13 | Fan 3B Tach | Fan | Nominal | 2625.00 | RPM | 'OK' +14 | Fan 1 | Entity Presence | Nominal | N/A | N/A | 'Entity Present' +15 | Fan 2 | Entity Presence | Nominal | N/A | N/A | 'Entity Present' +... +``` + + +## debugging + +You can run the plugin by hand: + +```sh +# become user netdata +sudo su -s /bin/sh netdata + +# run the plugin in debug mode +/usr/libexec/netdata/plugins.d/freeipmi.plugin 5 debug +``` + +You will get verbose output on what the plugin does. + +## kipmi0 CPU usage + +There have been reports that kipmi is showing increased CPU when the IPMI is queried. + +[IBM has given a few explanations](http://www-01.ibm.com/support/docview.wss?uid=nas7d580df3d15874988862575fa0050f604). + +Check also [this stackexchange post](http://unix.stackexchange.com/questions/74900/kipmi0-eating-up-to-99-8-cpu-on-centos-6-4). + +To lower the CPU consumption of the system you can issue this command: + +```sh +echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us +``` + +You can also permanently set the above setting by creating the file `/etc/modprobe.d/ipmi.conf` with this content: + +```sh +# prevent kipmi from consuming 100% CPU +options ipmi_si kipmid_max_busy_us=10 +``` + +This instructs the kernel IPMI module to pause for a tick between checking IPMI. Querying IPMI will be a lot slower now (e.g. several seconds for IPMI to respond), but `kipmi` will not use any noticeable CPU. You can also use a higher number (this is the number of microseconds to poll IPMI for a response, before waiting for a tick). + +If you need to disable IPMI for netdata, edit `/etc/netdata/netdata.conf` and set: + +``` +[plugins] + freeipmi = no +``` diff --git a/collectors/freeipmi.plugin/freeipmi_plugin.c b/collectors/freeipmi.plugin/freeipmi_plugin.c new file mode 100644 index 000000000..a1cff3af0 --- /dev/null +++ b/collectors/freeipmi.plugin/freeipmi_plugin.c @@ -0,0 +1,1760 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +/* + * netdata freeipmi.plugin + * Copyright (C) 2017 Costa Tsaousis + * GPL v3+ + * + * Based on: + * ipmimonitoring-sensors.c,v 1.51 2016/11/02 23:46:24 chu11 Exp + * ipmimonitoring-sel.c,v 1.51 2016/11/02 23:46:24 chu11 Exp + * + * Copyright (C) 2007-2015 Lawrence Livermore National Security, LLC. + * Copyright (C) 2006-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Albert Chu + * UCRL-CODE-222073 + */ + +#include "../../libnetdata/libnetdata.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef HAVE_FREEIPMI + +// ---------------------------------------------------------------------------- + +// callback required by fatal() +void netdata_cleanup_and_exit(int ret) { + exit(ret); +} + +// callbacks required by popen() +void signals_block(void) {}; +void signals_unblock(void) {}; +void signals_reset(void) {}; + +// callback required by eval() +int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result) { + (void)variable; + (void)hash; + (void)rc; + (void)result; + return 0; +}; + +// required by get_system_cpus() +char *netdata_configured_host_prefix = ""; + +// ---------------------------------------------------------------------------- + +#include +#include + +/* Communication Configuration - Initialize accordingly */ + +/* Hostname, NULL for In-band communication, non-null for a hostname */ +char *hostname = NULL; + +/* In-band Communication Configuration */ +int driver_type = -1; // IPMI_MONITORING_DRIVER_TYPE_KCS; /* or -1 for default */ +int disable_auto_probe = 0; /* probe for in-band device */ +unsigned int driver_address = 0; /* not used if probing */ +unsigned int register_spacing = 0; /* not used if probing */ +char *driver_device = NULL; /* not used if probing */ + +/* Out-of-band Communication Configuration */ +int protocol_version = -1; //IPMI_MONITORING_PROTOCOL_VERSION_1_5; /* or -1 for default */ +char *username = "foousername"; +char *password = "foopassword"; +unsigned char *ipmi_k_g = NULL; +unsigned int ipmi_k_g_len = 0; +int privilege_level = -1; // IPMI_MONITORING_PRIVILEGE_LEVEL_USER; /* or -1 for default */ +int authentication_type = -1; // IPMI_MONITORING_AUTHENTICATION_TYPE_MD5; /* or -1 for default */ +int cipher_suite_id = 0; /* or -1 for default */ +int session_timeout = 0; /* 0 for default */ +int retransmission_timeout = 0; /* 0 for default */ + +/* Workarounds - specify workaround flags if necessary */ +unsigned int workaround_flags = 0; + +/* Initialize w/ record id numbers to only monitor specific record ids */ +unsigned int record_ids[] = {0}; +unsigned int record_ids_length = 0; + +/* Initialize w/ sensor types to only monitor specific sensor types + * see ipmi_monitoring.h sensor types list. + */ +unsigned int sensor_types[] = {0}; +unsigned int sensor_types_length = 0; + +/* Set to an appropriate alternate if desired */ +char *sdr_cache_directory = "/tmp"; +char *sensor_config_file = NULL; + +/* Set to 1 or 0 to enable these sensor reading flags + * - See ipmi_monitoring.h for descriptions of these flags. + */ +int reread_sdr_cache = 0; +int ignore_non_interpretable_sensors = 1; +int bridge_sensors = 0; +int interpret_oem_data = 0; +int shared_sensors = 0; +int discrete_reading = 0; +int ignore_scanning_disabled = 0; +int assume_bmc_owner = 0; +int entity_sensor_names = 0; + +/* Initialization flags + * + * Most commonly bitwise OR IPMI_MONITORING_FLAGS_DEBUG and/or + * IPMI_MONITORING_FLAGS_DEBUG_IPMI_PACKETS for extra debugging + * information. + */ +unsigned int ipmimonitoring_init_flags = 0; + +int errnum; + +// ---------------------------------------------------------------------------- +// SEL only variables + +/* Initialize w/ date range to only monitoring specific date range */ +char *date_begin = NULL; /* use MM/DD/YYYY format */ +char *date_end = NULL; /* use MM/DD/YYYY format */ + +int assume_system_event_record = 0; + +char *sel_config_file = NULL; + + +// ---------------------------------------------------------------------------- +// functions common to sensors and SEL + +static void +_init_ipmi_config (struct ipmi_monitoring_ipmi_config *ipmi_config) +{ + assert (ipmi_config); + + ipmi_config->driver_type = driver_type; + ipmi_config->disable_auto_probe = disable_auto_probe; + ipmi_config->driver_address = driver_address; + ipmi_config->register_spacing = register_spacing; + ipmi_config->driver_device = driver_device; + + ipmi_config->protocol_version = protocol_version; + ipmi_config->username = username; + ipmi_config->password = password; + ipmi_config->k_g = ipmi_k_g; + ipmi_config->k_g_len = ipmi_k_g_len; + ipmi_config->privilege_level = privilege_level; + ipmi_config->authentication_type = authentication_type; + ipmi_config->cipher_suite_id = cipher_suite_id; + ipmi_config->session_timeout_len = session_timeout; + ipmi_config->retransmission_timeout_len = retransmission_timeout; + + ipmi_config->workaround_flags = workaround_flags; +} + +#ifdef NETDATA_COMMENTED +static const char * +_get_sensor_type_string (int sensor_type) +{ + switch (sensor_type) + { + case IPMI_MONITORING_SENSOR_TYPE_RESERVED: + return ("Reserved"); + case IPMI_MONITORING_SENSOR_TYPE_TEMPERATURE: + return ("Temperature"); + case IPMI_MONITORING_SENSOR_TYPE_VOLTAGE: + return ("Voltage"); + case IPMI_MONITORING_SENSOR_TYPE_CURRENT: + return ("Current"); + case IPMI_MONITORING_SENSOR_TYPE_FAN: + return ("Fan"); + case IPMI_MONITORING_SENSOR_TYPE_PHYSICAL_SECURITY: + return ("Physical Security"); + case IPMI_MONITORING_SENSOR_TYPE_PLATFORM_SECURITY_VIOLATION_ATTEMPT: + return ("Platform Security Violation Attempt"); + case IPMI_MONITORING_SENSOR_TYPE_PROCESSOR: + return ("Processor"); + case IPMI_MONITORING_SENSOR_TYPE_POWER_SUPPLY: + return ("Power Supply"); + case IPMI_MONITORING_SENSOR_TYPE_POWER_UNIT: + return ("Power Unit"); + case IPMI_MONITORING_SENSOR_TYPE_COOLING_DEVICE: + return ("Cooling Device"); + case IPMI_MONITORING_SENSOR_TYPE_OTHER_UNITS_BASED_SENSOR: + return ("Other Units Based Sensor"); + case IPMI_MONITORING_SENSOR_TYPE_MEMORY: + return ("Memory"); + case IPMI_MONITORING_SENSOR_TYPE_DRIVE_SLOT: + return ("Drive Slot"); + case IPMI_MONITORING_SENSOR_TYPE_POST_MEMORY_RESIZE: + return ("POST Memory Resize"); + case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_FIRMWARE_PROGRESS: + return ("System Firmware Progress"); + case IPMI_MONITORING_SENSOR_TYPE_EVENT_LOGGING_DISABLED: + return ("Event Logging Disabled"); + case IPMI_MONITORING_SENSOR_TYPE_WATCHDOG1: + return ("Watchdog 1"); + case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_EVENT: + return ("System Event"); + case IPMI_MONITORING_SENSOR_TYPE_CRITICAL_INTERRUPT: + return ("Critical Interrupt"); + case IPMI_MONITORING_SENSOR_TYPE_BUTTON_SWITCH: + return ("Button/Switch"); + case IPMI_MONITORING_SENSOR_TYPE_MODULE_BOARD: + return ("Module/Board"); + case IPMI_MONITORING_SENSOR_TYPE_MICROCONTROLLER_COPROCESSOR: + return ("Microcontroller/Coprocessor"); + case IPMI_MONITORING_SENSOR_TYPE_ADD_IN_CARD: + return ("Add In Card"); + case IPMI_MONITORING_SENSOR_TYPE_CHASSIS: + return ("Chassis"); + case IPMI_MONITORING_SENSOR_TYPE_CHIP_SET: + return ("Chip Set"); + case IPMI_MONITORING_SENSOR_TYPE_OTHER_FRU: + return ("Other Fru"); + case IPMI_MONITORING_SENSOR_TYPE_CABLE_INTERCONNECT: + return ("Cable/Interconnect"); + case IPMI_MONITORING_SENSOR_TYPE_TERMINATOR: + return ("Terminator"); + case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_BOOT_INITIATED: + return ("System Boot Initiated"); + case IPMI_MONITORING_SENSOR_TYPE_BOOT_ERROR: + return ("Boot Error"); + case IPMI_MONITORING_SENSOR_TYPE_OS_BOOT: + return ("OS Boot"); + case IPMI_MONITORING_SENSOR_TYPE_OS_CRITICAL_STOP: + return ("OS Critical Stop"); + case IPMI_MONITORING_SENSOR_TYPE_SLOT_CONNECTOR: + return ("Slot/Connector"); + case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_ACPI_POWER_STATE: + return ("System ACPI Power State"); + case IPMI_MONITORING_SENSOR_TYPE_WATCHDOG2: + return ("Watchdog 2"); + case IPMI_MONITORING_SENSOR_TYPE_PLATFORM_ALERT: + return ("Platform Alert"); + case IPMI_MONITORING_SENSOR_TYPE_ENTITY_PRESENCE: + return ("Entity Presence"); + case IPMI_MONITORING_SENSOR_TYPE_MONITOR_ASIC_IC: + return ("Monitor ASIC/IC"); + case IPMI_MONITORING_SENSOR_TYPE_LAN: + return ("LAN"); + case IPMI_MONITORING_SENSOR_TYPE_MANAGEMENT_SUBSYSTEM_HEALTH: + return ("Management Subsystem Health"); + case IPMI_MONITORING_SENSOR_TYPE_BATTERY: + return ("Battery"); + case IPMI_MONITORING_SENSOR_TYPE_SESSION_AUDIT: + return ("Session Audit"); + case IPMI_MONITORING_SENSOR_TYPE_VERSION_CHANGE: + return ("Version Change"); + case IPMI_MONITORING_SENSOR_TYPE_FRU_STATE: + return ("FRU State"); + } + + return ("Unrecognized"); +} +#endif // NETDATA_COMMENTED + + +// ---------------------------------------------------------------------------- +// BEGIN NETDATA CODE + +static int debug = 0; + +static int netdata_update_every = 5; // this is the minimum update frequency +static int netdata_priority = 90000; +static int netdata_do_sel = 1; + +static size_t netdata_sensors_updated = 0; +static size_t netdata_sensors_collected = 0; +static size_t netdata_sel_events = 0; +static size_t netdata_sensors_states_nominal = 0; +static size_t netdata_sensors_states_warning = 0; +static size_t netdata_sensors_states_critical = 0; + +struct sensor { + int record_id; + int sensor_number; + int sensor_type; + int sensor_state; + int sensor_units; + char *sensor_name; + + int sensor_reading_type; + union { + uint8_t bool_value; + uint32_t uint32_value; + double double_value; + } sensor_reading; + + int sent; + int ignore; + int exposed; + int updated; + struct sensor *next; +} *sensors_root = NULL; + +static void netdata_mark_as_not_updated() { + struct sensor *sn; + for(sn = sensors_root; sn ;sn = sn->next) + sn->updated = sn->sent = 0; + + netdata_sensors_updated = 0; + netdata_sensors_collected = 0; + netdata_sel_events = 0; + + netdata_sensors_states_nominal = 0; + netdata_sensors_states_warning = 0; + netdata_sensors_states_critical = 0; +} + +static void send_chart_to_netdata_for_units(int units) { + struct sensor *sn; + + switch(units) { + case IPMI_MONITORING_SENSOR_UNITS_CELSIUS: + printf("CHART ipmi.temperatures_c '' 'System Celcius Temperatures read by IPMI' 'Celcius' 'temperatures' 'ipmi.temperatures_c' 'line' %d %d\n" + , netdata_priority + 10 + , netdata_update_every + ); + break; + + case IPMI_MONITORING_SENSOR_UNITS_FAHRENHEIT: + printf("CHART ipmi.temperatures_f '' 'System Fahrenheit Temperatures read by IPMI' 'Fahrenheit' 'temperatures' 'ipmi.temperatures_f' 'line' %d %d\n" + , netdata_priority + 11 + , netdata_update_every + ); + break; + + case IPMI_MONITORING_SENSOR_UNITS_VOLTS: + printf("CHART ipmi.volts '' 'System Voltages read by IPMI' 'Volts' 'voltages' 'ipmi.voltages' 'line' %d %d\n" + , netdata_priority + 12 + , netdata_update_every + ); + break; + + case IPMI_MONITORING_SENSOR_UNITS_AMPS: + printf("CHART ipmi.amps '' 'System Current read by IPMI' 'Amps' 'current' 'ipmi.amps' 'line' %d %d\n" + , netdata_priority + 13 + , netdata_update_every + ); + break; + + case IPMI_MONITORING_SENSOR_UNITS_RPM: + printf("CHART ipmi.rpm '' 'System Fans read by IPMI' 'RPM' 'fans' 'ipmi.rpm' 'line' %d %d\n" + , netdata_priority + 14 + , netdata_update_every + ); + break; + + case IPMI_MONITORING_SENSOR_UNITS_WATTS: + printf("CHART ipmi.watts '' 'System Power read by IPMI' 'Watts' 'power' 'ipmi.watts' 'line' %d %d\n" + , netdata_priority + 5 + , netdata_update_every + ); + break; + + case IPMI_MONITORING_SENSOR_UNITS_PERCENT: + printf("CHART ipmi.percent '' 'System Metrics read by IPMI' '%%' 'other' 'ipmi.percent' 'line' %d %d\n" + , netdata_priority + 15 + , netdata_update_every + ); + break; + + default: + for(sn = sensors_root; sn; sn = sn->next) + if(sn->sensor_units == units) + sn->ignore = 1; + return; + } + + for(sn = sensors_root; sn; sn = sn->next) { + if(sn->sensor_units == units && sn->updated && !sn->ignore) { + sn->exposed = 1; + + switch(sn->sensor_reading_type) { + case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL: + case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32: + printf("DIMENSION i%d_n%d_r%d '%s i%d' absolute 1 1\n" + , sn->sensor_number + , sn->record_id + , sn->sensor_reading_type + , sn->sensor_name + , sn->sensor_number + ); + break; + + case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE: + printf("DIMENSION i%d_n%d_r%d '%s i%d' absolute 1 1000\n" + , sn->sensor_number + , sn->record_id + , sn->sensor_reading_type + , sn->sensor_name + , sn->sensor_number + ); + break; + + default: + sn->ignore = 1; + break; + } + } + } +} + +static void send_metrics_to_netdata_for_units(int units) { + struct sensor *sn; + + switch(units) { + case IPMI_MONITORING_SENSOR_UNITS_CELSIUS: + printf("BEGIN ipmi.temperatures_c\n"); + break; + + case IPMI_MONITORING_SENSOR_UNITS_FAHRENHEIT: + printf("BEGIN ipmi.temperatures_f\n"); + break; + + case IPMI_MONITORING_SENSOR_UNITS_VOLTS: + printf("BEGIN ipmi.volts\n"); + break; + + case IPMI_MONITORING_SENSOR_UNITS_AMPS: + printf("BEGIN ipmi.amps\n"); + break; + + case IPMI_MONITORING_SENSOR_UNITS_RPM: + printf("BEGIN ipmi.rpm\n"); + break; + + case IPMI_MONITORING_SENSOR_UNITS_WATTS: + printf("BEGIN ipmi.watts\n"); + break; + + case IPMI_MONITORING_SENSOR_UNITS_PERCENT: + printf("BEGIN ipmi.percent\n"); + break; + + default: + for(sn = sensors_root; sn; sn = sn->next) + if(sn->sensor_units == units) + sn->ignore = 1; + return; + } + + for(sn = sensors_root; sn; sn = sn->next) { + if(sn->sensor_units == units && sn->updated && !sn->sent && !sn->ignore) { + netdata_sensors_updated++; + + sn->sent = 1; + + switch(sn->sensor_reading_type) { + case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL: + printf("SET i%d_n%d_r%d = %u\n" + , sn->sensor_number + , sn->record_id + , sn->sensor_reading_type + , sn->sensor_reading.bool_value + ); + break; + + case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32: + printf("SET i%d_n%d_r%d = %u\n" + , sn->sensor_number + , sn->record_id + , sn->sensor_reading_type + , sn->sensor_reading.uint32_value + ); + break; + + case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE: + printf("SET i%d_n%d_r%d = %lld\n" + , sn->sensor_number + , sn->record_id + , sn->sensor_reading_type + , (long long int)(sn->sensor_reading.double_value * 1000) + ); + break; + + default: + sn->ignore = 1; + break; + } + } + } + + printf("END\n"); +} + +static void send_metrics_to_netdata() { + static int sel_chart_generated = 0, sensors_states_chart_generated = 0; + struct sensor *sn; + + if(netdata_do_sel && !sel_chart_generated) { + sel_chart_generated = 1; + printf("CHART ipmi.events '' 'IPMI Events' 'events' 'events' ipmi.sel area %d %d\n" + , netdata_priority + 2 + , netdata_update_every + ); + printf("DIMENSION events '' absolute 1 1\n"); + } + + if(!sensors_states_chart_generated) { + sensors_states_chart_generated = 1; + printf("CHART ipmi.sensors_states '' 'IPMI Sensors State' 'sensors' 'states' ipmi.sensors_states line %d %d\n" + , netdata_priority + 1 + , netdata_update_every + ); + printf("DIMENSION nominal '' absolute 1 1\n"); + printf("DIMENSION critical '' absolute 1 1\n"); + printf("DIMENSION warning '' absolute 1 1\n"); + } + + // generate the CHART/DIMENSION lines, if we have to + for(sn = sensors_root; sn; sn = sn->next) + if(sn->updated && !sn->exposed && !sn->ignore) + send_chart_to_netdata_for_units(sn->sensor_units); + + if(netdata_do_sel) { + printf( + "BEGIN ipmi.events\n" + "SET events = %zu\n" + "END\n" + , netdata_sel_events + ); + } + + printf( + "BEGIN ipmi.sensors_states\n" + "SET nominal = %zu\n" + "SET warning = %zu\n" + "SET critical = %zu\n" + "END\n" + , netdata_sensors_states_nominal + , netdata_sensors_states_warning + , netdata_sensors_states_critical + ); + + // send metrics to netdata + for(sn = sensors_root; sn; sn = sn->next) + if(sn->updated && sn->exposed && !sn->sent && !sn->ignore) + send_metrics_to_netdata_for_units(sn->sensor_units); + +} + +static int *excluded_record_ids = NULL; +size_t excluded_record_ids_length = 0; + +static void excluded_record_ids_parse(const char *s) { + if(!s) return; + + while(*s) { + while(*s && !isdigit(*s)) s++; + + if(isdigit(*s)) { + char *e; + unsigned long n = strtoul(s, &e, 10); + s = e; + + if(n != 0) { + excluded_record_ids = realloc(excluded_record_ids, (excluded_record_ids_length + 1) * sizeof(int)); + if(!excluded_record_ids) { + fprintf(stderr, "freeipmi.plugin: failed to allocate memory. Exiting."); + exit(1); + } + excluded_record_ids[excluded_record_ids_length++] = (int)n; + } + } + } + + if(debug) { + fprintf(stderr, "freeipmi.plugin: excluded record ids:"); + size_t i; + for(i = 0; i < excluded_record_ids_length; i++) { + fprintf(stderr, " %d", excluded_record_ids[i]); + } + fprintf(stderr, "\n"); + } +} + +static int *excluded_status_record_ids = NULL; +size_t excluded_status_record_ids_length = 0; + +static void excluded_status_record_ids_parse(const char *s) { + if(!s) return; + + while(*s) { + while(*s && !isdigit(*s)) s++; + + if(isdigit(*s)) { + char *e; + unsigned long n = strtoul(s, &e, 10); + s = e; + + if(n != 0) { + excluded_status_record_ids = realloc(excluded_status_record_ids, (excluded_status_record_ids_length + 1) * sizeof(int)); + if(!excluded_status_record_ids) { + fprintf(stderr, "freeipmi.plugin: failed to allocate memory. Exiting."); + exit(1); + } + excluded_status_record_ids[excluded_status_record_ids_length++] = (int)n; + } + } + } + + if(debug) { + fprintf(stderr, "freeipmi.plugin: excluded status record ids:"); + size_t i; + for(i = 0; i < excluded_status_record_ids_length; i++) { + fprintf(stderr, " %d", excluded_status_record_ids[i]); + } + fprintf(stderr, "\n"); + } +} + + +static int excluded_record_ids_check(int record_id) { + size_t i; + + for(i = 0; i < excluded_record_ids_length; i++) { + if(excluded_record_ids[i] == record_id) + return 1; + } + + return 0; +} + +static int excluded_status_record_ids_check(int record_id) { + size_t i; + + for(i = 0; i < excluded_status_record_ids_length; i++) { + if(excluded_status_record_ids[i] == record_id) + return 1; + } + + return 0; +} + +static void netdata_get_sensor( + int record_id + , int sensor_number + , int sensor_type + , int sensor_state + , int sensor_units + , int sensor_reading_type + , char *sensor_name + , void *sensor_reading +) { + // find the sensor record + struct sensor *sn; + for(sn = sensors_root; sn ;sn = sn->next) + if( sn->record_id == record_id && + sn->sensor_number == sensor_number && + sn->sensor_reading_type == sensor_reading_type && + sn->sensor_units == sensor_units && + !strcmp(sn->sensor_name, sensor_name) + ) + break; + + if(!sn) { + // not found, create it + + // check if it is excluded + if(excluded_record_ids_check(record_id)) + return; + + sn = calloc(1, sizeof(struct sensor)); + if(!sn) { + fatal("cannot allocate %zu bytes of memory.", sizeof(struct sensor)); + } + + sn->record_id = record_id; + sn->sensor_number = sensor_number; + sn->sensor_type = sensor_type; + sn->sensor_state = sensor_state; + sn->sensor_units = sensor_units; + sn->sensor_reading_type = sensor_reading_type; + sn->sensor_name = strdup(sensor_name); + if(!sn->sensor_name) { + fatal("cannot allocate %zu bytes of memory.", strlen(sensor_name)); + } + + sn->next = sensors_root; + sensors_root = sn; + } + + switch(sensor_reading_type) { + case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL: + sn->sensor_reading.bool_value = *((uint8_t *)sensor_reading); + sn->updated = 1; + netdata_sensors_collected++; + break; + + case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32: + sn->sensor_reading.uint32_value = *((uint32_t *)sensor_reading); + sn->updated = 1; + netdata_sensors_collected++; + break; + + case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE: + sn->sensor_reading.double_value = *((double *)sensor_reading); + sn->updated = 1; + netdata_sensors_collected++; + break; + + default: + sn->ignore = 1; + break; + } + + // check if it is excluded + if(excluded_status_record_ids_check(record_id)) + return; + + switch(sensor_state) { + case IPMI_MONITORING_STATE_NOMINAL: + netdata_sensors_states_nominal++; + break; + + case IPMI_MONITORING_STATE_WARNING: + netdata_sensors_states_warning++; + break; + + case IPMI_MONITORING_STATE_CRITICAL: + netdata_sensors_states_critical++; + break; + + default: + break; + } +} + +static void netdata_get_sel( + int record_id + , int record_type_class + , int sel_state +) { + (void)record_id; + (void)record_type_class; + (void)sel_state; + + netdata_sel_events++; +} + + +// END NETDATA CODE +// ---------------------------------------------------------------------------- + + +static int +_ipmimonitoring_sensors (struct ipmi_monitoring_ipmi_config *ipmi_config) +{ + ipmi_monitoring_ctx_t ctx = NULL; + unsigned int sensor_reading_flags = 0; + int i; + int sensor_count; + int rv = -1; + + if (!(ctx = ipmi_monitoring_ctx_create ())) { + error("ipmi_monitoring_ctx_create()"); + goto cleanup; + } + + if (sdr_cache_directory) + { + if (ipmi_monitoring_ctx_sdr_cache_directory (ctx, + sdr_cache_directory) < 0) + { + error("ipmi_monitoring_ctx_sdr_cache_directory(): %s\n", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + } + + /* Must call otherwise only default interpretations ever used */ + if (sensor_config_file) + { + if (ipmi_monitoring_ctx_sensor_config_file (ctx, + sensor_config_file) < 0) + { + error( "ipmi_monitoring_ctx_sensor_config_file(): %s\n", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + } + else + { + if (ipmi_monitoring_ctx_sensor_config_file (ctx, NULL) < 0) + { + error( "ipmi_monitoring_ctx_sensor_config_file(): %s\n", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + } + + if (reread_sdr_cache) + sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_REREAD_SDR_CACHE; + + if (ignore_non_interpretable_sensors) + sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_IGNORE_NON_INTERPRETABLE_SENSORS; + + if (bridge_sensors) + sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_BRIDGE_SENSORS; + + if (interpret_oem_data) + sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_INTERPRET_OEM_DATA; + + if (shared_sensors) + sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_SHARED_SENSORS; + + if (discrete_reading) + sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_DISCRETE_READING; + + if (ignore_scanning_disabled) + sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_IGNORE_SCANNING_DISABLED; + + if (assume_bmc_owner) + sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_ASSUME_BMC_OWNER; + +#ifdef IPMI_MONITORING_SENSOR_READING_FLAGS_ENTITY_SENSOR_NAMES + if (entity_sensor_names) + sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_ENTITY_SENSOR_NAMES; +#endif // IPMI_MONITORING_SENSOR_READING_FLAGS_ENTITY_SENSOR_NAMES + + if (!record_ids_length && !sensor_types_length) + { + if ((sensor_count = ipmi_monitoring_sensor_readings_by_record_id (ctx, + hostname, + ipmi_config, + sensor_reading_flags, + NULL, + 0, + NULL, + NULL)) < 0) + { + error( "ipmi_monitoring_sensor_readings_by_record_id(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + } + else if (record_ids_length) + { + if ((sensor_count = ipmi_monitoring_sensor_readings_by_record_id (ctx, + hostname, + ipmi_config, + sensor_reading_flags, + record_ids, + record_ids_length, + NULL, + NULL)) < 0) + { + error( "ipmi_monitoring_sensor_readings_by_record_id(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + } + else + { + if ((sensor_count = ipmi_monitoring_sensor_readings_by_sensor_type (ctx, + hostname, + ipmi_config, + sensor_reading_flags, + sensor_types, + sensor_types_length, + NULL, + NULL)) < 0) + { + error( "ipmi_monitoring_sensor_readings_by_sensor_type(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + } + +#ifdef NETDATA_COMMENTED + printf ("%s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n", + "Record ID", + "Sensor Name", + "Sensor Number", + "Sensor Type", + "Sensor State", + "Sensor Reading", + "Sensor Units", + "Sensor Event/Reading Type Code", + "Sensor Event Bitmask", + "Sensor Event String"); +#endif // NETDATA_COMMENTED + + for (i = 0; i < sensor_count; i++, ipmi_monitoring_sensor_iterator_next (ctx)) + { + int record_id, sensor_number, sensor_type, sensor_state, sensor_units, + sensor_reading_type; + +#ifdef NETDATA_COMMENTED + int sensor_bitmask_type, sensor_bitmask, event_reading_type_code; + char **sensor_bitmask_strings = NULL; + const char *sensor_type_str; + const char *sensor_state_str; +#endif // NETDATA_COMMENTED + + char *sensor_name = NULL; + void *sensor_reading; + + if ((record_id = ipmi_monitoring_sensor_read_record_id (ctx)) < 0) + { + error( "ipmi_monitoring_sensor_read_record_id(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + if ((sensor_number = ipmi_monitoring_sensor_read_sensor_number (ctx)) < 0) + { + error( "ipmi_monitoring_sensor_read_sensor_number(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + if ((sensor_type = ipmi_monitoring_sensor_read_sensor_type (ctx)) < 0) + { + error( "ipmi_monitoring_sensor_read_sensor_type(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + if (!(sensor_name = ipmi_monitoring_sensor_read_sensor_name (ctx))) + { + error( "ipmi_monitoring_sensor_read_sensor_name(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + if ((sensor_state = ipmi_monitoring_sensor_read_sensor_state (ctx)) < 0) + { + error( "ipmi_monitoring_sensor_read_sensor_state(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + if ((sensor_units = ipmi_monitoring_sensor_read_sensor_units (ctx)) < 0) + { + error( "ipmi_monitoring_sensor_read_sensor_units(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + +#ifdef NETDATA_COMMENTED + if ((sensor_bitmask_type = ipmi_monitoring_sensor_read_sensor_bitmask_type (ctx)) < 0) + { + error( "ipmi_monitoring_sensor_read_sensor_bitmask_type(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + if ((sensor_bitmask = ipmi_monitoring_sensor_read_sensor_bitmask (ctx)) < 0) + { + error( + "ipmi_monitoring_sensor_read_sensor_bitmask(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + if (!(sensor_bitmask_strings = ipmi_monitoring_sensor_read_sensor_bitmask_strings (ctx))) + { + error( "ipmi_monitoring_sensor_read_sensor_bitmask_strings(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } +#endif // NETDATA_COMMENTED + + if ((sensor_reading_type = ipmi_monitoring_sensor_read_sensor_reading_type (ctx)) < 0) + { + error( "ipmi_monitoring_sensor_read_sensor_reading_type(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + sensor_reading = ipmi_monitoring_sensor_read_sensor_reading (ctx); + +#ifdef NETDATA_COMMENTED + if ((event_reading_type_code = ipmi_monitoring_sensor_read_event_reading_type_code (ctx)) < 0) + { + error( "ipmi_monitoring_sensor_read_event_reading_type_code(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } +#endif // NETDATA_COMMENTED + + netdata_get_sensor( + record_id + , sensor_number + , sensor_type + , sensor_state + , sensor_units + , sensor_reading_type + , sensor_name + , sensor_reading + ); + +#ifdef NETDATA_COMMENTED + if (!strlen (sensor_name)) + sensor_name = "N/A"; + + sensor_type_str = _get_sensor_type_string (sensor_type); + + printf ("%d, %s, %d, %s", + record_id, + sensor_name, + sensor_number, + sensor_type_str); + + if (sensor_state == IPMI_MONITORING_STATE_NOMINAL) + sensor_state_str = "Nominal"; + else if (sensor_state == IPMI_MONITORING_STATE_WARNING) + sensor_state_str = "Warning"; + else if (sensor_state == IPMI_MONITORING_STATE_CRITICAL) + sensor_state_str = "Critical"; + else + sensor_state_str = "N/A"; + + printf (", %s", sensor_state_str); + + if (sensor_reading) + { + const char *sensor_units_str; + + if (sensor_reading_type == IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL) + printf (", %s", + (*((uint8_t *)sensor_reading) ? "true" : "false")); + else if (sensor_reading_type == IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32) + printf (", %u", + *((uint32_t *)sensor_reading)); + else if (sensor_reading_type == IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE) + printf (", %.2f", + *((double *)sensor_reading)); + else + printf (", N/A"); + + if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_CELSIUS) + sensor_units_str = "C"; + else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_FAHRENHEIT) + sensor_units_str = "F"; + else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_VOLTS) + sensor_units_str = "V"; + else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_AMPS) + sensor_units_str = "A"; + else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_RPM) + sensor_units_str = "RPM"; + else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_WATTS) + sensor_units_str = "W"; + else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_PERCENT) + sensor_units_str = "%"; + else + sensor_units_str = "N/A"; + + printf (", %s", sensor_units_str); + } + else + printf (", N/A, N/A"); + + printf (", %Xh", event_reading_type_code); + + /* It is possible you may want to monitor specific event + * conditions that may occur. If that is the case, you may want + * to check out what specific bitmask type and bitmask events + * occurred. See ipmi_monitoring_bitmasks.h for a list of + * bitmasks and types. + */ + + if (sensor_bitmask_type != IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN) + printf (", %Xh", sensor_bitmask); + else + printf (", N/A"); + + if (sensor_bitmask_type != IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN) + { + unsigned int i = 0; + + printf (","); + + while (sensor_bitmask_strings[i]) + { + printf (" "); + + printf ("'%s'", + sensor_bitmask_strings[i]); + + i++; + } + } + else + printf (", N/A"); + + printf ("\n"); +#endif // NETDATA_COMMENTED + } + + rv = 0; + cleanup: + if (ctx) + ipmi_monitoring_ctx_destroy (ctx); + return (rv); +} + + +static int +_ipmimonitoring_sel (struct ipmi_monitoring_ipmi_config *ipmi_config) +{ + ipmi_monitoring_ctx_t ctx = NULL; + unsigned int sel_flags = 0; + int i; + int sel_count; + int rv = -1; + + if (!(ctx = ipmi_monitoring_ctx_create ())) + { + error("ipmi_monitoring_ctx_create()"); + goto cleanup; + } + + if (sdr_cache_directory) + { + if (ipmi_monitoring_ctx_sdr_cache_directory (ctx, + sdr_cache_directory) < 0) + { + error( "ipmi_monitoring_ctx_sdr_cache_directory(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + } + + /* Must call otherwise only default interpretations ever used */ + if (sel_config_file) + { + if (ipmi_monitoring_ctx_sel_config_file (ctx, + sel_config_file) < 0) + { + error( "ipmi_monitoring_ctx_sel_config_file(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + } + else + { + if (ipmi_monitoring_ctx_sel_config_file (ctx, NULL) < 0) + { + error( "ipmi_monitoring_ctx_sel_config_file(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + } + + if (reread_sdr_cache) + sel_flags |= IPMI_MONITORING_SEL_FLAGS_REREAD_SDR_CACHE; + + if (interpret_oem_data) + sel_flags |= IPMI_MONITORING_SEL_FLAGS_INTERPRET_OEM_DATA; + + if (assume_system_event_record) + sel_flags |= IPMI_MONITORING_SEL_FLAGS_ASSUME_SYSTEM_EVENT_RECORD; + +#ifdef IPMI_MONITORING_SEL_FLAGS_ENTITY_SENSOR_NAMES + if (entity_sensor_names) + sel_flags |= IPMI_MONITORING_SEL_FLAGS_ENTITY_SENSOR_NAMES; +#endif // IPMI_MONITORING_SEL_FLAGS_ENTITY_SENSOR_NAMES + + if (record_ids_length) + { + if ((sel_count = ipmi_monitoring_sel_by_record_id (ctx, + hostname, + ipmi_config, + sel_flags, + record_ids, + record_ids_length, + NULL, + NULL)) < 0) + { + error( "ipmi_monitoring_sel_by_record_id(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + } + else if (sensor_types_length) + { + if ((sel_count = ipmi_monitoring_sel_by_sensor_type (ctx, + hostname, + ipmi_config, + sel_flags, + sensor_types, + sensor_types_length, + NULL, + NULL)) < 0) + { + error( "ipmi_monitoring_sel_by_sensor_type(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + } + else if (date_begin + || date_end) + { + if ((sel_count = ipmi_monitoring_sel_by_date_range (ctx, + hostname, + ipmi_config, + sel_flags, + date_begin, + date_end, + NULL, + NULL)) < 0) + { + error( "ipmi_monitoring_sel_by_sensor_type(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + } + else + { + if ((sel_count = ipmi_monitoring_sel_by_record_id (ctx, + hostname, + ipmi_config, + sel_flags, + NULL, + 0, + NULL, + NULL)) < 0) + { + error( "ipmi_monitoring_sel_by_record_id(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + } + +#ifdef NETDATA_COMMENTED + printf ("%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n", + "Record ID", + "Record Type", + "SEL State", + "Timestamp", + "Sensor Name", + "Sensor Type", + "Event Direction", + "Event Type Code", + "Event Data", + "Event Offset", + "Event Offset String"); +#endif // NETDATA_COMMENTED + + for (i = 0; i < sel_count; i++, ipmi_monitoring_sel_iterator_next (ctx)) + { + int record_id, record_type, sel_state, record_type_class; +#ifdef NETDATA_COMMENTED + int sensor_type, sensor_number, event_direction, + event_offset_type, event_offset, event_type_code, manufacturer_id; + unsigned int timestamp, event_data1, event_data2, event_data3; + char *event_offset_string = NULL; + const char *sensor_type_str; + const char *event_direction_str; + const char *sel_state_str; + char *sensor_name = NULL; + unsigned char oem_data[64]; + int oem_data_len; + unsigned int j; +#endif // NETDATA_COMMENTED + + if ((record_id = ipmi_monitoring_sel_read_record_id (ctx)) < 0) + { + error( "ipmi_monitoring_sel_read_record_id(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + if ((record_type = ipmi_monitoring_sel_read_record_type (ctx)) < 0) + { + error( "ipmi_monitoring_sel_read_record_type(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + if ((record_type_class = ipmi_monitoring_sel_read_record_type_class (ctx)) < 0) + { + error( "ipmi_monitoring_sel_read_record_type_class(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + if ((sel_state = ipmi_monitoring_sel_read_sel_state (ctx)) < 0) + { + error( "ipmi_monitoring_sel_read_sel_state(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + netdata_get_sel( + record_id + , record_type_class + , sel_state + ); + +#ifdef NETDATA_COMMENTED + if (sel_state == IPMI_MONITORING_STATE_NOMINAL) + sel_state_str = "Nominal"; + else if (sel_state == IPMI_MONITORING_STATE_WARNING) + sel_state_str = "Warning"; + else if (sel_state == IPMI_MONITORING_STATE_CRITICAL) + sel_state_str = "Critical"; + else + sel_state_str = "N/A"; + + printf ("%d, %d, %s", + record_id, + record_type, + sel_state_str); + + if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_SYSTEM_EVENT_RECORD + || record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_TIMESTAMPED_OEM_RECORD) + { + + if (ipmi_monitoring_sel_read_timestamp (ctx, ×tamp) < 0) + { + error( "ipmi_monitoring_sel_read_timestamp(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + /* XXX: This should be converted to a nice date output using + * your favorite timestamp -> string conversion functions. + */ + printf (", %u", timestamp); + } + else + printf (", N/A"); + + if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_SYSTEM_EVENT_RECORD) + { + /* If you are integrating ipmimonitoring SEL into a monitoring application, + * you may wish to count the number of times a specific error occurred + * and report that to the monitoring application. + * + * In this particular case, you'll probably want to check out + * what sensor type each SEL event is reporting, the + * event offset type, and the specific event offset that occurred. + * + * See ipmi_monitoring_offsets.h for a list of event offsets + * and types. + */ + + if (!(sensor_name = ipmi_monitoring_sel_read_sensor_name (ctx))) + { + error( "ipmi_monitoring_sel_read_sensor_name(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + if ((sensor_type = ipmi_monitoring_sel_read_sensor_type (ctx)) < 0) + { + error( "ipmi_monitoring_sel_read_sensor_type(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + if ((sensor_number = ipmi_monitoring_sel_read_sensor_number (ctx)) < 0) + { + error( "ipmi_monitoring_sel_read_sensor_number(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + if ((event_direction = ipmi_monitoring_sel_read_event_direction (ctx)) < 0) + { + error( "ipmi_monitoring_sel_read_event_direction(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + if ((event_type_code = ipmi_monitoring_sel_read_event_type_code (ctx)) < 0) + { + error( "ipmi_monitoring_sel_read_event_type_code(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + if (ipmi_monitoring_sel_read_event_data (ctx, + &event_data1, + &event_data2, + &event_data3) < 0) + { + error( "ipmi_monitoring_sel_read_event_data(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + if ((event_offset_type = ipmi_monitoring_sel_read_event_offset_type (ctx)) < 0) + { + error( "ipmi_monitoring_sel_read_event_offset_type(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + if ((event_offset = ipmi_monitoring_sel_read_event_offset (ctx)) < 0) + { + error( "ipmi_monitoring_sel_read_event_offset(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + if (!(event_offset_string = ipmi_monitoring_sel_read_event_offset_string (ctx))) + { + error( "ipmi_monitoring_sel_read_event_offset_string(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + if (!strlen (sensor_name)) + sensor_name = "N/A"; + + sensor_type_str = _get_sensor_type_string (sensor_type); + + if (event_direction == IPMI_MONITORING_SEL_EVENT_DIRECTION_ASSERTION) + event_direction_str = "Assertion"; + else + event_direction_str = "Deassertion"; + + printf (", %s, %s, %d, %s, %Xh, %Xh-%Xh-%Xh", + sensor_name, + sensor_type_str, + sensor_number, + event_direction_str, + event_type_code, + event_data1, + event_data2, + event_data3); + + if (event_offset_type != IPMI_MONITORING_EVENT_OFFSET_TYPE_UNKNOWN) + printf (", %Xh", event_offset); + else + printf (", N/A"); + + if (event_offset_type != IPMI_MONITORING_EVENT_OFFSET_TYPE_UNKNOWN) + printf (", %s", event_offset_string); + else + printf (", N/A"); + } + else if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_TIMESTAMPED_OEM_RECORD + || record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_NON_TIMESTAMPED_OEM_RECORD) + { + if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_TIMESTAMPED_OEM_RECORD) + { + if ((manufacturer_id = ipmi_monitoring_sel_read_manufacturer_id (ctx)) < 0) + { + error( "ipmi_monitoring_sel_read_manufacturer_id(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + printf (", Manufacturer ID = %Xh", manufacturer_id); + } + + if ((oem_data_len = ipmi_monitoring_sel_read_oem_data (ctx, oem_data, 1024)) < 0) + { + error( "ipmi_monitoring_sel_read_oem_data(): %s", + ipmi_monitoring_ctx_errormsg (ctx)); + goto cleanup; + } + + printf (", OEM Data = "); + + for (j = 0; j < oem_data_len; j++) + printf ("%02Xh ", oem_data[j]); + } + else + printf (", N/A, N/A, N/A, N/A, N/A, N/A, N/A"); + + printf ("\n"); +#endif // NETDATA_COMMENTED + } + + rv = 0; + cleanup: + if (ctx) + ipmi_monitoring_ctx_destroy (ctx); + return (rv); +} + +// ---------------------------------------------------------------------------- +// MAIN PROGRAM FOR NETDATA PLUGIN + +int ipmi_collect_data(struct ipmi_monitoring_ipmi_config *ipmi_config) { + errno = 0; + + if (_ipmimonitoring_sensors(ipmi_config) < 0) return -1; + + if(netdata_do_sel) { + if(_ipmimonitoring_sel(ipmi_config) < 0) return -2; + } + + return 0; +} + +int ipmi_detect_speed_secs(struct ipmi_monitoring_ipmi_config *ipmi_config) { + int i, checks = 10; + unsigned long long total = 0; + + for(i = 0 ; i < checks ; i++) { + if(debug) fprintf(stderr, "freeipmi.plugin: checking data collection speed iteration %d of %d\n", i+1, checks); + + // measure the time a data collection needs + unsigned long long start = now_realtime_usec(); + if(ipmi_collect_data(ipmi_config) < 0) + fatal("freeipmi.plugin: data collection failed."); + + unsigned long long end = now_realtime_usec(); + + if(debug) fprintf(stderr, "freeipmi.plugin: data collection speed was %llu usec\n", end - start); + + // add it to our total + total += end - start; + + // wait the same time + // to avoid flooding the IPMI processor with requests + sleep_usec(end - start); + } + + // so, we assume it needed 2x the time + // we find the average in microseconds + // and we round-up to the closest second + + return (int)(( total * 2 / checks / 1000000 ) + 1); +} + +int main (int argc, char **argv) { + + // ------------------------------------------------------------------------ + // initialization of netdata plugin + + program_name = "freeipmi.plugin"; + + // disable syslog + error_log_syslog = 0; + + // set errors flood protection to 100 logs per hour + error_log_errors_per_period = 100; + error_log_throttle_period = 3600; + + + // ------------------------------------------------------------------------ + // parse command line parameters + + int i, freq = 0; + for(i = 1; i < argc ; i++) { + if(isdigit(*argv[i]) && !freq) { + int n = str2i(argv[i]); + if(n > 0 && n < 86400) { + freq = n; + continue; + } + } + else if(strcmp("version", argv[i]) == 0 || strcmp("-version", argv[i]) == 0 || strcmp("--version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) { + printf("freeipmi.plugin %s\n", VERSION); + exit(0); + } + else if(strcmp("debug", argv[i]) == 0) { + debug = 1; + continue; + } + else if(strcmp("sel", argv[i]) == 0) { + netdata_do_sel = 1; + continue; + } + else if(strcmp("no-sel", argv[i]) == 0) { + netdata_do_sel = 0; + continue; + } + else if(strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) { + fprintf(stderr, + "\n" + " netdata freeipmi.plugin %s\n" + " Copyright (C) 2016-2017 Costa Tsaousis \n" + " Released under GNU General Public License v3 or later.\n" + " All rights reserved.\n" + "\n" + " This program is a data collector plugin for netdata.\n" + "\n" + " Available command line options:\n" + "\n" + " SECONDS data collection frequency\n" + " minimum: %d\n" + "\n" + " debug enable verbose output\n" + " default: disabled\n" + "\n" + " sel\n" + " no-sel enable/disable SEL collection\n" + " default: %s\n" + "\n" + " hostname HOST\n" + " username USER\n" + " password PASS connect to remote IPMI host\n" + " default: local IPMI processor\n" + "\n" + " sdr-cache-dir PATH directory for SDR cache files\n" + " default: %s\n" + "\n" + " sensor-config-file FILE filename to read sensor configuration\n" + " default: %s\n" + "\n" + " ignore N1,N2,N3,... sensor IDs to ignore\n" + " default: none\n" + "\n" + " ignore-status N1,N2,N3,... sensor IDs to ignore status (nominal/warning/critical)\n" + " default: none\n" + "\n" + " -v\n" + " -V\n" + " version print version and exit\n" + "\n" + " Linux kernel module for IPMI is CPU hungry.\n" + " On Linux run this to lower kipmiN CPU utilization:\n" + " # echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us\n" + "\n" + " or create: /etc/modprobe.d/ipmi.conf with these contents:\n" + " options ipmi_si kipmid_max_busy_us=10\n" + "\n" + " For more information:\n" + " https://github.com/ktsaou/netdata/tree/master/plugins/freeipmi.plugin\n" + "\n" + , VERSION + , netdata_update_every + , netdata_do_sel?"enabled":"disabled" + , sdr_cache_directory?sdr_cache_directory:"system default" + , sensor_config_file?sensor_config_file:"system default" + ); + exit(1); + } + else if(i < argc && strcmp("hostname", argv[i]) == 0) { + hostname = strdupz(argv[++i]); + char *s = argv[i]; + // mask it be hidden from the process tree + while(*s) *s++ = 'x'; + if(debug) fprintf(stderr, "freeipmi.plugin: hostname set to '%s'\n", hostname); + continue; + } + else if(i < argc && strcmp("username", argv[i]) == 0) { + username = strdupz(argv[++i]); + char *s = argv[i]; + // mask it be hidden from the process tree + while(*s) *s++ = 'x'; + if(debug) fprintf(stderr, "freeipmi.plugin: username set to '%s'\n", username); + continue; + } + else if(i < argc && strcmp("password", argv[i]) == 0) { + password = strdupz(argv[++i]); + char *s = argv[i]; + // mask it be hidden from the process tree + while(*s) *s++ = 'x'; + if(debug) fprintf(stderr, "freeipmi.plugin: password set to '%s'\n", password); + continue; + } + else if(i < argc && strcmp("sdr-cache-dir", argv[i]) == 0) { + sdr_cache_directory = argv[++i]; + if(debug) fprintf(stderr, "freeipmi.plugin: SDR cache directory set to '%s'\n", sdr_cache_directory); + continue; + } + else if(i < argc && strcmp("sensor-config-file", argv[i]) == 0) { + sensor_config_file = argv[++i]; + if(debug) fprintf(stderr, "freeipmi.plugin: sensor config file set to '%s'\n", sensor_config_file); + continue; + } + else if(i < argc && strcmp("ignore", argv[i]) == 0) { + excluded_record_ids_parse(argv[++i]); + continue; + } + else if(i < argc && strcmp("ignore-status", argv[i]) == 0) { + excluded_status_record_ids_parse(argv[++i]); + continue; + } + + error("freeipmi.plugin: ignoring parameter '%s'", argv[i]); + } + + errno = 0; + + if(freq > netdata_update_every) + netdata_update_every = freq; + + else if(freq) + error("update frequency %d seconds is too small for IPMI. Using %d.", freq, netdata_update_every); + + + // ------------------------------------------------------------------------ + // initialize IPMI + + struct ipmi_monitoring_ipmi_config ipmi_config; + + if(debug) fprintf(stderr, "freeipmi.plugin: calling _init_ipmi_config()\n"); + + _init_ipmi_config(&ipmi_config); + + if(debug) fprintf(stderr, "freeipmi.plugin: calling ipmi_monitoring_init()\n"); + + if(ipmi_monitoring_init(ipmimonitoring_init_flags, &errnum) < 0) + fatal("ipmi_monitoring_init: %s", ipmi_monitoring_ctx_strerror(errnum)); + + if(debug) fprintf(stderr, "freeipmi.plugin: detecting IPMI minimum update frequency...\n"); + freq = ipmi_detect_speed_secs(&ipmi_config); + if(debug) fprintf(stderr, "freeipmi.plugin: IPMI minimum update frequency was calculated to %d seconds.\n", freq); + + if(freq > netdata_update_every) { + info("enforcing minimum data collection frequency, calculated to %d seconds.", freq); + netdata_update_every = freq; + } + + + // ------------------------------------------------------------------------ + // the main loop + + if(debug) fprintf(stderr, "freeipmi.plugin: starting data collection\n"); + + time_t started_t = now_monotonic_sec(); + + size_t iteration = 0; + usec_t step = netdata_update_every * USEC_PER_SEC; + + heartbeat_t hb; + heartbeat_init(&hb); + for(iteration = 0; 1 ; iteration++) { + usec_t dt = heartbeat_next(&hb, step); + + if(debug && iteration) + fprintf(stderr, "freeipmi.plugin: iteration %zu, dt %llu usec, sensors collected %zu, sensors sent to netdata %zu \n" + , iteration + , dt + , netdata_sensors_collected + , netdata_sensors_updated + ); + + netdata_mark_as_not_updated(); + + if(debug) fprintf(stderr, "freeipmi.plugin: calling ipmi_collect_data()\n"); + if(ipmi_collect_data(&ipmi_config) < 0) + fatal("data collection failed."); + + if(debug) fprintf(stderr, "freeipmi.plugin: calling send_metrics_to_netdata()\n"); + send_metrics_to_netdata(); + fflush(stdout); + + // restart check (14400 seconds) + if(now_monotonic_sec() - started_t > 14400) exit(0); + } +} + +#else // !HAVE_FREEIPMI + +int main(int argc, char **argv) { + fatal("freeipmi.plugin is not compiled."); +} + +#endif // !HAVE_FREEIPMI diff --git a/collectors/idlejitter.plugin/Makefile.am b/collectors/idlejitter.plugin/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/collectors/idlejitter.plugin/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/collectors/idlejitter.plugin/Makefile.in b/collectors/idlejitter.plugin/Makefile.in new file mode 100644 index 000000000..973a3bef7 --- /dev/null +++ b/collectors/idlejitter.plugin/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = collectors/idlejitter.plugin +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/idlejitter.plugin/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu collectors/idlejitter.plugin/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/collectors/idlejitter.plugin/README.md b/collectors/idlejitter.plugin/README.md new file mode 100644 index 000000000..3c2080536 --- /dev/null +++ b/collectors/idlejitter.plugin/README.md @@ -0,0 +1,13 @@ +## idlejitter.plugin + +It works like this: + +A thread is spawn that requests to sleep for 20000 microseconds (20ms). +When the system wakes it up, it measures how many microseconds have passed. +The difference between the requested and the actual duration of the sleep, is the idle jitter. +This is done at most 50 times per second, to ensure we have a good average. + +This number is useful: + + 1. in real-time environments, when the CPU jitter can affect the quality of the service (like VoIP media gateways). + 2. in cloud infrastructure, at can pause the VM or container for a small duration to perform operations at the host. diff --git a/collectors/idlejitter.plugin/plugin_idlejitter.c b/collectors/idlejitter.plugin/plugin_idlejitter.c new file mode 100644 index 000000000..3fe3b0306 --- /dev/null +++ b/collectors/idlejitter.plugin/plugin_idlejitter.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_idlejitter.h" + +#define CPU_IDLEJITTER_SLEEP_TIME_MS 20 + +static void cpuidlejitter_main_cleanup(void *ptr) { + struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; + static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; + + info("cleaning up..."); + + static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; +} + +void *cpuidlejitter_main(void *ptr) { + netdata_thread_cleanup_push(cpuidlejitter_main_cleanup, ptr); + + usec_t sleep_ut = config_get_number("plugin:idlejitter", "loop time in ms", CPU_IDLEJITTER_SLEEP_TIME_MS) * USEC_PER_MS; + if(sleep_ut <= 0) { + config_set_number("plugin:idlejitter", "loop time in ms", CPU_IDLEJITTER_SLEEP_TIME_MS); + sleep_ut = CPU_IDLEJITTER_SLEEP_TIME_MS * USEC_PER_MS; + } + + RRDSET *st = rrdset_create_localhost( + "system" + , "idlejitter" + , NULL + , "idlejitter" + , NULL + , "CPU Idle Jitter" + , "microseconds lost/s" + , "idlejitter.plugin" + , NULL + , NETDATA_CHART_PRIO_SYSTEM_IDLEJITTER + , localhost->rrd_update_every + , RRDSET_TYPE_AREA + ); + RRDDIM *rd_min = rrddim_add(st, "min", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + RRDDIM *rd_max = rrddim_add(st, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + RRDDIM *rd_avg = rrddim_add(st, "average", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + usec_t update_every_ut = localhost->rrd_update_every * USEC_PER_SEC; + struct timeval before, after; + unsigned long long counter; + + for(counter = 0; 1 ;counter++) { + int iterations = 0; + usec_t error_total = 0, + error_min = 0, + error_max = 0, + elapsed = 0; + + if(netdata_exit) break; + + while(elapsed < update_every_ut) { + now_monotonic_timeval(&before); + sleep_usec(sleep_ut); + now_monotonic_timeval(&after); + + usec_t dt = dt_usec(&after, &before); + elapsed += dt; + + usec_t error = dt - sleep_ut; + error_total += error; + + if(unlikely(!iterations)) + error_min = error; + else if(error < error_min) + error_min = error; + + if(error > error_max) + error_max = error; + + iterations++; + } + + if(netdata_exit) break; + + if(iterations) { + if (likely(counter)) rrdset_next(st); + rrddim_set_by_pointer(st, rd_min, error_min); + rrddim_set_by_pointer(st, rd_max, error_max); + rrddim_set_by_pointer(st, rd_avg, error_total / iterations); + rrdset_done(st); + } + } + + netdata_thread_cleanup_pop(1); + return NULL; +} + diff --git a/collectors/idlejitter.plugin/plugin_idlejitter.h b/collectors/idlejitter.plugin/plugin_idlejitter.h new file mode 100644 index 000000000..62fabea16 --- /dev/null +++ b/collectors/idlejitter.plugin/plugin_idlejitter.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_PLUGIN_IDLEJITTER_H +#define NETDATA_PLUGIN_IDLEJITTER_H 1 + +#include "../../daemon/common.h" + +#define NETDATA_PLUGIN_HOOK_IDLEJITTER \ + { \ + .name = "PLUGIN[idlejitter]", \ + .config_section = CONFIG_SECTION_PLUGINS, \ + .config_name = "idlejitter", \ + .enabled = 1, \ + .thread = NULL, \ + .init_routine = NULL, \ + .start_routine = cpuidlejitter_main \ + }, + +extern void *cpuidlejitter_main(void *ptr); + +#endif /* NETDATA_PLUGIN_IDLEJITTER_H */ diff --git a/collectors/macos.plugin/Makefile.am b/collectors/macos.plugin/Makefile.am new file mode 100644 index 000000000..babdcf0df --- /dev/null +++ b/collectors/macos.plugin/Makefile.am @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in diff --git a/collectors/macos.plugin/Makefile.in b/collectors/macos.plugin/Makefile.in new file mode 100644 index 000000000..6247dda70 --- /dev/null +++ b/collectors/macos.plugin/Makefile.in @@ -0,0 +1,457 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = collectors/macos.plugin +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/macos.plugin/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu collectors/macos.plugin/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/collectors/macos.plugin/macos_fw.c b/collectors/macos.plugin/macos_fw.c new file mode 100644 index 000000000..5d0ba929e --- /dev/null +++ b/collectors/macos.plugin/macos_fw.c @@ -0,0 +1,687 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_macos.h" + +#include +#include +#include +#include +// NEEDED BY do_space, do_inodes +#include +// NEEDED BY: struct ifaddrs, getifaddrs() +#include +#include + +// NEEDED BY: do_bandwidth +#define IFA_DATA(s) (((struct if_data *)ifa->ifa_data)->ifi_ ## s) + +#define MAXDRIVENAME 31 + +#define KILO_FACTOR 1024 +#define MEGA_FACTOR 1048576 // 1024 * 1024 +#define GIGA_FACTOR 1073741824 // 1024 * 1024 * 1024 + +int do_macos_iokit(int update_every, usec_t dt) { + (void)dt; + + static int do_io = -1, do_space = -1, do_inodes = -1, do_bandwidth = -1; + + if (unlikely(do_io == -1)) { + do_io = config_get_boolean("plugin:macos:iokit", "disk i/o", 1); + do_space = config_get_boolean("plugin:macos:sysctl", "space usage for all disks", 1); + do_inodes = config_get_boolean("plugin:macos:sysctl", "inodes usage for all disks", 1); + do_bandwidth = config_get_boolean("plugin:macos:sysctl", "bandwidth", 1); + } + + RRDSET *st; + + mach_port_t master_port; + io_registry_entry_t drive, drive_media; + io_iterator_t drive_list; + CFDictionaryRef properties, statistics; + CFStringRef name; + CFNumberRef number; + kern_return_t status; + collected_number total_disk_reads = 0; + collected_number total_disk_writes = 0; + struct diskstat { + char name[MAXDRIVENAME]; + collected_number bytes_read; + collected_number bytes_write; + collected_number reads; + collected_number writes; + collected_number time_read; + collected_number time_write; + collected_number latency_read; + collected_number latency_write; + } diskstat; + struct cur_diskstat { + collected_number duration_read_ns; + collected_number duration_write_ns; + collected_number busy_time_ns; + } cur_diskstat; + struct prev_diskstat { + collected_number bytes_read; + collected_number bytes_write; + collected_number operations_read; + collected_number operations_write; + collected_number duration_read_ns; + collected_number duration_write_ns; + collected_number busy_time_ns; + } prev_diskstat; + + // NEEDED BY: do_space, do_inodes + struct statfs *mntbuf; + int mntsize, i; + char mntonname[MNAMELEN + 1]; + char title[4096 + 1]; + + // NEEDED BY: do_bandwidth + struct ifaddrs *ifa, *ifap; + + /* Get ports and services for drive statistics. */ + if (unlikely(IOMasterPort(bootstrap_port, &master_port))) { + error("MACOS: IOMasterPort() failed"); + do_io = 0; + error("DISABLED: system.io"); + /* Get the list of all drive objects. */ + } else if (unlikely(IOServiceGetMatchingServices(master_port, IOServiceMatching("IOBlockStorageDriver"), &drive_list))) { + error("MACOS: IOServiceGetMatchingServices() failed"); + do_io = 0; + error("DISABLED: system.io"); + } else { + while ((drive = IOIteratorNext(drive_list)) != 0) { + properties = 0; + statistics = 0; + number = 0; + bzero(&diskstat, sizeof(diskstat)); + + /* Get drive media object. */ + status = IORegistryEntryGetChildEntry(drive, kIOServicePlane, &drive_media); + if (unlikely(status != KERN_SUCCESS)) { + IOObjectRelease(drive); + continue; + } + + /* Get drive media properties. */ + if (likely(!IORegistryEntryCreateCFProperties(drive_media, (CFMutableDictionaryRef *)&properties, kCFAllocatorDefault, 0))) { + /* Get disk name. */ + if (likely(name = (CFStringRef)CFDictionaryGetValue(properties, CFSTR(kIOBSDNameKey)))) { + CFStringGetCString(name, diskstat.name, MAXDRIVENAME, kCFStringEncodingUTF8); + } + } + + /* Release. */ + CFRelease(properties); + IOObjectRelease(drive_media); + + if(unlikely(!diskstat.name || !*diskstat.name)) { + IOObjectRelease(drive); + continue; + } + + /* Obtain the properties for this drive object. */ + if (unlikely(IORegistryEntryCreateCFProperties(drive, (CFMutableDictionaryRef *)&properties, kCFAllocatorDefault, 0))) { + IOObjectRelease(drive); + error("MACOS: IORegistryEntryCreateCFProperties() failed"); + do_io = 0; + error("DISABLED: system.io"); + break; + } else if (likely(properties)) { + /* Obtain the statistics from the drive properties. */ + if (likely(statistics = (CFDictionaryRef)CFDictionaryGetValue(properties, CFSTR(kIOBlockStorageDriverStatisticsKey)))) { + + // -------------------------------------------------------------------- + + /* Get bytes read. */ + if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsBytesReadKey)))) { + CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.bytes_read); + total_disk_reads += diskstat.bytes_read; + } + + /* Get bytes written. */ + if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsBytesWrittenKey)))) { + CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.bytes_write); + total_disk_writes += diskstat.bytes_write; + } + + st = rrdset_find_bytype_localhost("disk", diskstat.name); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "disk" + , diskstat.name + , NULL + , diskstat.name + , "disk.io" + , "Disk I/O Bandwidth" + , "kilobytes/s" + , "macos" + , "iokit" + , 2000 + , update_every + , RRDSET_TYPE_AREA + ); + + rrddim_add(st, "reads", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "writes", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + prev_diskstat.bytes_read = rrddim_set(st, "reads", diskstat.bytes_read); + prev_diskstat.bytes_write = rrddim_set(st, "writes", diskstat.bytes_write); + rrdset_done(st); + + // -------------------------------------------------------------------- + + /* Get number of reads. */ + if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsReadsKey)))) { + CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.reads); + } + + /* Get number of writes. */ + if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsWritesKey)))) { + CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.writes); + } + + st = rrdset_find_bytype_localhost("disk_ops", diskstat.name); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "disk_ops" + , diskstat.name + , NULL + , diskstat.name + , "disk.ops" + , "Disk Completed I/O Operations" + , "operations/s" + , "macos" + , "iokit" + , 2001 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + prev_diskstat.operations_read = rrddim_set(st, "reads", diskstat.reads); + prev_diskstat.operations_write = rrddim_set(st, "writes", diskstat.writes); + rrdset_done(st); + + // -------------------------------------------------------------------- + + /* Get reads time. */ + if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsTotalReadTimeKey)))) { + CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.time_read); + } + + /* Get writes time. */ + if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsTotalWriteTimeKey)))) { + CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.time_write); + } + + st = rrdset_find_bytype_localhost("disk_util", diskstat.name); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "disk_util" + , diskstat.name + , NULL + , diskstat.name + , "disk.util" + , "Disk Utilization Time" + , "% of time working" + , "macos" + , "iokit" + , 2004 + , update_every + , RRDSET_TYPE_AREA + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "utilization", NULL, 1, 10000000, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + cur_diskstat.busy_time_ns = (diskstat.time_read + diskstat.time_write); + prev_diskstat.busy_time_ns = rrddim_set(st, "utilization", cur_diskstat.busy_time_ns); + rrdset_done(st); + + // -------------------------------------------------------------------- + + /* Get reads latency. */ + if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsLatentReadTimeKey)))) { + CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.latency_read); + } + + /* Get writes latency. */ + if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsLatentWriteTimeKey)))) { + CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.latency_write); + } + + st = rrdset_find_bytype_localhost("disk_iotime", diskstat.name); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "disk_iotime" + , diskstat.name + , NULL + , diskstat.name + , "disk.iotime" + , "Disk Total I/O Time" + , "milliseconds/s" + , "macos" + , "iokit" + , 2022 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "reads", NULL, 1, 1000000, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "writes", NULL, -1, 1000000, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + cur_diskstat.duration_read_ns = diskstat.time_read + diskstat.latency_read; + cur_diskstat.duration_write_ns = diskstat.time_write + diskstat.latency_write; + prev_diskstat.duration_read_ns = rrddim_set(st, "reads", cur_diskstat.duration_read_ns); + prev_diskstat.duration_write_ns = rrddim_set(st, "writes", cur_diskstat.duration_write_ns); + rrdset_done(st); + + // -------------------------------------------------------------------- + // calculate differential charts + // only if this is not the first time we run + + if (likely(dt)) { + + // -------------------------------------------------------------------- + + st = rrdset_find_bytype_localhost("disk_await", diskstat.name); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "disk_await" + , diskstat.name + , NULL + , diskstat.name + , "disk.await" + , "Average Completed I/O Operation Time" + , "ms per operation" + , "macos" + , "iokit" + , 2005 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "reads", NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(st, "writes", NULL, -1, 1000000, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set(st, "reads", (diskstat.reads - prev_diskstat.operations_read) ? + (cur_diskstat.duration_read_ns - prev_diskstat.duration_read_ns) / (diskstat.reads - prev_diskstat.operations_read) : 0); + rrddim_set(st, "writes", (diskstat.writes - prev_diskstat.operations_write) ? + (cur_diskstat.duration_write_ns - prev_diskstat.duration_write_ns) / (diskstat.writes - prev_diskstat.operations_write) : 0); + rrdset_done(st); + + // -------------------------------------------------------------------- + + st = rrdset_find_bytype_localhost("disk_avgsz", diskstat.name); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "disk_avgsz" + , diskstat.name + , NULL + , diskstat.name + , "disk.avgsz" + , "Average Completed I/O Operation Bandwidth" + , "kilobytes per operation" + , "macos" + , "iokit" + , 2006 + , update_every + , RRDSET_TYPE_AREA + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "reads", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(st, "writes", NULL, -1, 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set(st, "reads", (diskstat.reads - prev_diskstat.operations_read) ? + (diskstat.bytes_read - prev_diskstat.bytes_read) / (diskstat.reads - prev_diskstat.operations_read) : 0); + rrddim_set(st, "writes", (diskstat.writes - prev_diskstat.operations_write) ? + (diskstat.bytes_write - prev_diskstat.bytes_write) / (diskstat.writes - prev_diskstat.operations_write) : 0); + rrdset_done(st); + + // -------------------------------------------------------------------- + + st = rrdset_find_bytype_localhost("disk_svctm", diskstat.name); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "disk_svctm" + , diskstat.name + , NULL + , diskstat.name + , "disk.svctm" + , "Average Service Time" + , "ms per operation" + , "macos" + , "iokit" + , 2007 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "svctm", NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set(st, "svctm", ((diskstat.reads - prev_diskstat.operations_read) + (diskstat.writes - prev_diskstat.operations_write)) ? + (cur_diskstat.busy_time_ns - prev_diskstat.busy_time_ns) / ((diskstat.reads - prev_diskstat.operations_read) + (diskstat.writes - prev_diskstat.operations_write)) : 0); + rrdset_done(st); + } + } + + /* Release. */ + CFRelease(properties); + } + + /* Release. */ + IOObjectRelease(drive); + } + IOIteratorReset(drive_list); + + /* Release. */ + IOObjectRelease(drive_list); + } + + if (likely(do_io)) { + st = rrdset_find_bytype_localhost("system", "io"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "system" + , "io" + , NULL + , "disk" + , NULL + , "Disk I/O" + , "kilobytes/s" + , "macos" + , "iokit" + , 150 + , update_every + , RRDSET_TYPE_AREA + ); + rrddim_add(st, "in", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "out", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set(st, "in", total_disk_reads); + rrddim_set(st, "out", total_disk_writes); + rrdset_done(st); + } + + // Can be merged with FreeBSD plugin + // -------------------------------------------------------------------------- + + if (likely(do_space || do_inodes)) { + // there is no mount info in sysctl MIBs + if (unlikely(!(mntsize = getmntinfo(&mntbuf, MNT_NOWAIT)))) { + error("MACOS: getmntinfo() failed"); + do_space = 0; + error("DISABLED: disk_space.X"); + do_inodes = 0; + error("DISABLED: disk_inodes.X"); + } else { + for (i = 0; i < mntsize; i++) { + if (mntbuf[i].f_flags == MNT_RDONLY || + mntbuf[i].f_blocks == 0 || + // taken from gnulib/mountlist.c and shortened to FreeBSD related fstypes + strcmp(mntbuf[i].f_fstypename, "autofs") == 0 || + strcmp(mntbuf[i].f_fstypename, "procfs") == 0 || + strcmp(mntbuf[i].f_fstypename, "subfs") == 0 || + strcmp(mntbuf[i].f_fstypename, "devfs") == 0 || + strcmp(mntbuf[i].f_fstypename, "none") == 0) + continue; + + // -------------------------------------------------------------------------- + + if (likely(do_space)) { + st = rrdset_find_bytype_localhost("disk_space", mntbuf[i].f_mntonname); + if (unlikely(!st)) { + snprintfz(title, 4096, "Disk Space Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname); + st = rrdset_create_localhost( + "disk_space" + , mntbuf[i].f_mntonname + , NULL + , mntbuf[i].f_mntonname + , "disk.space" + , title + , "GB" + , "macos" + , "iokit" + , 2023 + , update_every + , RRDSET_TYPE_STACKED + ); + + rrddim_add(st, "avail", NULL, mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(st, "used", NULL, mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(st, "reserved_for_root", "reserved for root", mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); + } else + rrdset_next(st); + + rrddim_set(st, "avail", (collected_number) mntbuf[i].f_bavail); + rrddim_set(st, "used", (collected_number) (mntbuf[i].f_blocks - mntbuf[i].f_bfree)); + rrddim_set(st, "reserved_for_root", (collected_number) (mntbuf[i].f_bfree - mntbuf[i].f_bavail)); + rrdset_done(st); + } + + // -------------------------------------------------------------------------- + + if (likely(do_inodes)) { + st = rrdset_find_bytype_localhost("disk_inodes", mntbuf[i].f_mntonname); + if (unlikely(!st)) { + snprintfz(title, 4096, "Disk Files (inodes) Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname); + st = rrdset_create_localhost( + "disk_inodes" + , mntbuf[i].f_mntonname + , NULL + , mntbuf[i].f_mntonname + , "disk.inodes" + , title + , "Inodes" + , "macos" + , "iokit" + , 2024 + , update_every + , RRDSET_TYPE_STACKED + ); + + rrddim_add(st, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(st, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(st, "reserved_for_root", "reserved for root", 1, 1, RRD_ALGORITHM_ABSOLUTE); + } else + rrdset_next(st); + + rrddim_set(st, "avail", (collected_number) mntbuf[i].f_ffree); + rrddim_set(st, "used", (collected_number) (mntbuf[i].f_files - mntbuf[i].f_ffree)); + rrdset_done(st); + } + } + } + } + + // Can be merged with FreeBSD plugin + // -------------------------------------------------------------------- + + if (likely(do_bandwidth)) { + if (unlikely(getifaddrs(&ifap))) { + error("MACOS: getifaddrs()"); + do_bandwidth = 0; + error("DISABLED: system.ipv4"); + } else { + for (ifa = ifap; ifa; ifa = ifa->ifa_next) { + if (ifa->ifa_addr->sa_family != AF_LINK) + continue; + + // -------------------------------------------------------------------- + + st = rrdset_find_bytype_localhost("net", ifa->ifa_name); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "net" + , ifa->ifa_name + , NULL + , ifa->ifa_name + , "net.net" + , "Bandwidth" + , "kilobits/s" + , "macos" + , "iokit" + , 7000 + , update_every + , RRDSET_TYPE_AREA + ); + + rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set(st, "received", IFA_DATA(ibytes)); + rrddim_set(st, "sent", IFA_DATA(obytes)); + rrdset_done(st); + + // -------------------------------------------------------------------- + + st = rrdset_find_bytype_localhost("net_packets", ifa->ifa_name); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "net_packets" + , ifa->ifa_name + , NULL + , ifa->ifa_name + , "net.packets" + , "Packets" + , "packets/s" + , "macos" + , "iokit" + , 7001 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "multicast_received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "multicast_sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set(st, "received", IFA_DATA(ipackets)); + rrddim_set(st, "sent", IFA_DATA(opackets)); + rrddim_set(st, "multicast_received", IFA_DATA(imcasts)); + rrddim_set(st, "multicast_sent", IFA_DATA(omcasts)); + rrdset_done(st); + + // -------------------------------------------------------------------- + + st = rrdset_find_bytype_localhost("net_errors", ifa->ifa_name); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "net_errors" + , ifa->ifa_name + , NULL + , ifa->ifa_name + , "net.errors" + , "Interface Errors" + , "errors/s" + , "macos" + , "iokit" + , 7002 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set(st, "inbound", IFA_DATA(ierrors)); + rrddim_set(st, "outbound", IFA_DATA(oerrors)); + rrdset_done(st); + + // -------------------------------------------------------------------- + + st = rrdset_find_bytype_localhost("net_drops", ifa->ifa_name); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "net_drops" + , ifa->ifa_name + , NULL + , ifa->ifa_name + , "net.drops" + , "Interface Drops" + , "drops/s" + , "macos" + , "iokit" + , 7003 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set(st, "inbound", IFA_DATA(iqdrops)); + rrdset_done(st); + + // -------------------------------------------------------------------- + + st = rrdset_find_bytype_localhost("net_events", ifa->ifa_name); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "net_events" + , ifa->ifa_name + , NULL + , ifa->ifa_name + , "net.events" + , "Network Interface Events" + , "events/s" + , "macos" + , "iokit" + , 7006 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "frames", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "carrier", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set(st, "collisions", IFA_DATA(collisions)); + rrdset_done(st); + } + + freeifaddrs(ifap); + } + } + + + return 0; +} diff --git a/collectors/macos.plugin/macos_mach_smi.c b/collectors/macos.plugin/macos_mach_smi.c new file mode 100644 index 000000000..1c43d624c --- /dev/null +++ b/collectors/macos.plugin/macos_mach_smi.c @@ -0,0 +1,241 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_macos.h" + +#include + +int do_macos_mach_smi(int update_every, usec_t dt) { + (void)dt; + + static int do_cpu = -1, do_ram = - 1, do_swapio = -1, do_pgfaults = -1; + + if (unlikely(do_cpu == -1)) { + do_cpu = config_get_boolean("plugin:macos:mach_smi", "cpu utilization", 1); + do_ram = config_get_boolean("plugin:macos:mach_smi", "system ram", 1); + do_swapio = config_get_boolean("plugin:macos:mach_smi", "swap i/o", 1); + do_pgfaults = config_get_boolean("plugin:macos:mach_smi", "memory page faults", 1); + } + + RRDSET *st; + + kern_return_t kr; + mach_msg_type_number_t count; + host_t host; + vm_size_t system_pagesize; + + + // NEEDED BY: do_cpu + natural_t cp_time[CPU_STATE_MAX]; + + // NEEDED BY: do_ram, do_swapio, do_pgfaults +#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1060) + vm_statistics64_data_t vm_statistics; +#else + vm_statistics_data_t vm_statistics; +#endif + + host = mach_host_self(); + kr = host_page_size(host, &system_pagesize); + if (unlikely(kr != KERN_SUCCESS)) + return -1; + + // -------------------------------------------------------------------- + + if (likely(do_cpu)) { + if (unlikely(HOST_CPU_LOAD_INFO_COUNT != 4)) { + error("MACOS: There are %d CPU states (4 was expected)", HOST_CPU_LOAD_INFO_COUNT); + do_cpu = 0; + error("DISABLED: system.cpu"); + } else { + count = HOST_CPU_LOAD_INFO_COUNT; + kr = host_statistics(host, HOST_CPU_LOAD_INFO, (host_info_t)cp_time, &count); + if (unlikely(kr != KERN_SUCCESS)) { + error("MACOS: host_statistics() failed: %s", mach_error_string(kr)); + do_cpu = 0; + error("DISABLED: system.cpu"); + } else { + + st = rrdset_find_bytype_localhost("system", "cpu"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "system" + , "cpu" + , NULL + , "cpu" + , "system.cpu" + , "Total CPU utilization" + , "percentage" + , "macos" + , "mach_smi" + , 100 + , update_every + , RRDSET_TYPE_STACKED + ); + + rrddim_add(st, "user", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rrddim_add(st, "nice", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rrddim_add(st, "system", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rrddim_add(st, "idle", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rrddim_hide(st, "idle"); + } + else rrdset_next(st); + + rrddim_set(st, "user", cp_time[CPU_STATE_USER]); + rrddim_set(st, "nice", cp_time[CPU_STATE_NICE]); + rrddim_set(st, "system", cp_time[CPU_STATE_SYSTEM]); + rrddim_set(st, "idle", cp_time[CPU_STATE_IDLE]); + rrdset_done(st); + } + } + } + + // -------------------------------------------------------------------- + + if (likely(do_ram || do_swapio || do_pgfaults)) { +#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1060) + count = sizeof(vm_statistics64_data_t); + kr = host_statistics64(host, HOST_VM_INFO64, (host_info64_t)&vm_statistics, &count); +#else + count = sizeof(vm_statistics_data_t); + kr = host_statistics(host, HOST_VM_INFO, (host_info_t)&vm_statistics, &count); +#endif + if (unlikely(kr != KERN_SUCCESS)) { + error("MACOS: host_statistics64() failed: %s", mach_error_string(kr)); + do_ram = 0; + error("DISABLED: system.ram"); + do_swapio = 0; + error("DISABLED: system.swapio"); + do_pgfaults = 0; + error("DISABLED: mem.pgfaults"); + } else { + if (likely(do_ram)) { + st = rrdset_find_localhost("system.ram"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "system" + , "ram" + , NULL + , "ram" + , NULL + , "System RAM" + , "MB" + , "macos" + , "mach_smi" + , 200 + , update_every + , RRDSET_TYPE_STACKED + ); + + rrddim_add(st, "active", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(st, "wired", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE); +#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090) + rrddim_add(st, "throttled", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(st, "compressor", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE); +#endif + rrddim_add(st, "inactive", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(st, "purgeable", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(st, "speculative", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(st, "free", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set(st, "active", vm_statistics.active_count); + rrddim_set(st, "wired", vm_statistics.wire_count); +#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090) + rrddim_set(st, "throttled", vm_statistics.throttled_count); + rrddim_set(st, "compressor", vm_statistics.compressor_page_count); +#endif + rrddim_set(st, "inactive", vm_statistics.inactive_count); + rrddim_set(st, "purgeable", vm_statistics.purgeable_count); + rrddim_set(st, "speculative", vm_statistics.speculative_count); + rrddim_set(st, "free", (vm_statistics.free_count - vm_statistics.speculative_count)); + rrdset_done(st); + } + +#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090) + // -------------------------------------------------------------------- + + if (likely(do_swapio)) { + st = rrdset_find_localhost("system.swapio"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "system" + , "swapio" + , NULL + , "swap" + , NULL + , "Swap I/O" + , "kilobytes/s" + , "macos" + , "mach_smi" + , 250 + , update_every + , RRDSET_TYPE_AREA + ); + + rrddim_add(st, "in", NULL, system_pagesize, 1024, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "out", NULL, -system_pagesize, 1024, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set(st, "in", vm_statistics.swapins); + rrddim_set(st, "out", vm_statistics.swapouts); + rrdset_done(st); + } +#endif + + // -------------------------------------------------------------------- + + if (likely(do_pgfaults)) { + st = rrdset_find_localhost("mem.pgfaults"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "mem" + , "pgfaults" + , NULL + , "system" + , NULL + , "Memory Page Faults" + , "page faults/s" + , "macos" + , "mach_smi" + , NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "memory", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "cow", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "pagein", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "pageout", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); +#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090) + rrddim_add(st, "compress", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "decompress", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); +#endif + rrddim_add(st, "zero_fill", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "reactivate", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "purge", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set(st, "memory", vm_statistics.faults); + rrddim_set(st, "cow", vm_statistics.cow_faults); + rrddim_set(st, "pagein", vm_statistics.pageins); + rrddim_set(st, "pageout", vm_statistics.pageouts); +#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090) + rrddim_set(st, "compress", vm_statistics.compressions); + rrddim_set(st, "decompress", vm_statistics.decompressions); +#endif + rrddim_set(st, "zero_fill", vm_statistics.zero_fill_count); + rrddim_set(st, "reactivate", vm_statistics.reactivations); + rrddim_set(st, "purge", vm_statistics.purges); + rrdset_done(st); + } + } + } + + // -------------------------------------------------------------------- + + return 0; +} diff --git a/collectors/macos.plugin/macos_sysctl.c b/collectors/macos.plugin/macos_sysctl.c new file mode 100644 index 000000000..6b443c04a --- /dev/null +++ b/collectors/macos.plugin/macos_sysctl.c @@ -0,0 +1,1492 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_macos.h" + +#include +// NEEDED BY: do_bandwidth +#include +// NEEDED BY do_tcp... +#include +#include +#include +// NEEDED BY do_udp..., do_ip... +#include +// NEEDED BY do_udp... +#include +#include +// NEEDED BY do_icmp... +#include +#include +#include +// NEEDED BY do_icmp6... +#include +// NEEDED BY do_uptime +#include + +// MacOS calculates load averages once every 5 seconds +#define MIN_LOADAVG_UPDATE_EVERY 5 + +int do_macos_sysctl(int update_every, usec_t dt) { + static int do_loadavg = -1, do_swap = -1, do_bandwidth = -1, + do_tcp_packets = -1, do_tcp_errors = -1, do_tcp_handshake = -1, do_ecn = -1, + do_tcpext_syscookies = -1, do_tcpext_ofo = -1, do_tcpext_connaborts = -1, + do_udp_packets = -1, do_udp_errors = -1, do_icmp_packets = -1, do_icmpmsg = -1, + do_ip_packets = -1, do_ip_fragsout = -1, do_ip_fragsin = -1, do_ip_errors = -1, + do_ip6_packets = -1, do_ip6_fragsout = -1, do_ip6_fragsin = -1, do_ip6_errors = -1, + do_icmp6 = -1, do_icmp6_redir = -1, do_icmp6_errors = -1, do_icmp6_echos = -1, + do_icmp6_router = -1, do_icmp6_neighbor = -1, do_icmp6_types = -1, do_uptime = -1; + + + if (unlikely(do_loadavg == -1)) { + do_loadavg = config_get_boolean("plugin:macos:sysctl", "enable load average", 1); + do_swap = config_get_boolean("plugin:macos:sysctl", "system swap", 1); + do_bandwidth = config_get_boolean("plugin:macos:sysctl", "bandwidth", 1); + do_tcp_packets = config_get_boolean("plugin:macos:sysctl", "ipv4 TCP packets", 1); + do_tcp_errors = config_get_boolean("plugin:macos:sysctl", "ipv4 TCP errors", 1); + do_tcp_handshake = config_get_boolean("plugin:macos:sysctl", "ipv4 TCP handshake issues", 1); + do_ecn = config_get_boolean_ondemand("plugin:macos:sysctl", "ECN packets", CONFIG_BOOLEAN_AUTO); + do_tcpext_syscookies = config_get_boolean_ondemand("plugin:macos:sysctl", "TCP SYN cookies", CONFIG_BOOLEAN_AUTO); + do_tcpext_ofo = config_get_boolean_ondemand("plugin:macos:sysctl", "TCP out-of-order queue", CONFIG_BOOLEAN_AUTO); + do_tcpext_connaborts = config_get_boolean_ondemand("plugin:macos:sysctl", "TCP connection aborts", CONFIG_BOOLEAN_AUTO); + do_udp_packets = config_get_boolean("plugin:macos:sysctl", "ipv4 UDP packets", 1); + do_udp_errors = config_get_boolean("plugin:macos:sysctl", "ipv4 UDP errors", 1); + do_icmp_packets = config_get_boolean("plugin:macos:sysctl", "ipv4 ICMP packets", 1); + do_icmpmsg = config_get_boolean("plugin:macos:sysctl", "ipv4 ICMP messages", 1); + do_ip_packets = config_get_boolean("plugin:macos:sysctl", "ipv4 packets", 1); + do_ip_fragsout = config_get_boolean("plugin:macos:sysctl", "ipv4 fragments sent", 1); + do_ip_fragsin = config_get_boolean("plugin:macos:sysctl", "ipv4 fragments assembly", 1); + do_ip_errors = config_get_boolean("plugin:macos:sysctl", "ipv4 errors", 1); + do_ip6_packets = config_get_boolean_ondemand("plugin:macos:sysctl", "ipv6 packets", CONFIG_BOOLEAN_AUTO); + do_ip6_fragsout = config_get_boolean_ondemand("plugin:macos:sysctl", "ipv6 fragments sent", CONFIG_BOOLEAN_AUTO); + do_ip6_fragsin = config_get_boolean_ondemand("plugin:macos:sysctl", "ipv6 fragments assembly", CONFIG_BOOLEAN_AUTO); + do_ip6_errors = config_get_boolean_ondemand("plugin:macos:sysctl", "ipv6 errors", CONFIG_BOOLEAN_AUTO); + do_icmp6 = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp", CONFIG_BOOLEAN_AUTO); + do_icmp6_redir = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp redirects", CONFIG_BOOLEAN_AUTO); + do_icmp6_errors = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp errors", CONFIG_BOOLEAN_AUTO); + do_icmp6_echos = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp echos", CONFIG_BOOLEAN_AUTO); + do_icmp6_router = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp router", CONFIG_BOOLEAN_AUTO); + do_icmp6_neighbor = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp neighbor", CONFIG_BOOLEAN_AUTO); + do_icmp6_types = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp types", CONFIG_BOOLEAN_AUTO); + do_uptime = config_get_boolean("plugin:macos:sysctl", "system uptime", 1); + } + + RRDSET *st; + + int system_pagesize = getpagesize(); // wouldn't it be better to get value directly from hw.pagesize? + int i, n; + int common_error = 0; + size_t size; + + // NEEDED BY: do_loadavg + static usec_t next_loadavg_dt = 0; + struct loadavg sysload; + + // NEEDED BY: do_swap + struct xsw_usage swap_usage; + + // NEEDED BY: do_bandwidth + int mib[6]; + static char *ifstatdata = NULL; + char *lim, *next; + struct if_msghdr *ifm; + struct iftot { + u_long ift_ibytes; + u_long ift_obytes; + } iftot = {0, 0}; + + // NEEDED BY: do_tcp... + struct tcpstat tcpstat; + uint64_t tcps_states[TCP_NSTATES]; + + // NEEDED BY: do_udp... + struct udpstat udpstat; + + // NEEDED BY: do_icmp... + struct icmpstat icmpstat; + struct icmp_total { + u_long msgs_in; + u_long msgs_out; + } icmp_total = {0, 0}; + + // NEEDED BY: do_ip... + struct ipstat ipstat; + + // NEEDED BY: do_ip6... + /* + * Dirty workaround for /usr/include/netinet6/ip6_var.h absence. + * Struct ip6stat was copied from bsd/netinet6/ip6_var.h from xnu sources. + * Do the same for previously missing scope6_var.h on OS X < 10.11. + */ +#define IP6S_SRCRULE_COUNT 16 + +#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED < 101100) +#ifndef _NETINET6_SCOPE6_VAR_H_ +#define _NETINET6_SCOPE6_VAR_H_ +#include + +#define SCOPE6_ID_MAX 16 +#endif +#else +#include +#endif + + struct ip6stat { + u_quad_t ip6s_total; /* total packets received */ + u_quad_t ip6s_tooshort; /* packet too short */ + u_quad_t ip6s_toosmall; /* not enough data */ + u_quad_t ip6s_fragments; /* fragments received */ + u_quad_t ip6s_fragdropped; /* frags dropped(dups, out of space) */ + u_quad_t ip6s_fragtimeout; /* fragments timed out */ + u_quad_t ip6s_fragoverflow; /* fragments that exceeded limit */ + u_quad_t ip6s_forward; /* packets forwarded */ + u_quad_t ip6s_cantforward; /* packets rcvd for unreachable dest */ + u_quad_t ip6s_redirectsent; /* packets forwarded on same net */ + u_quad_t ip6s_delivered; /* datagrams delivered to upper level */ + u_quad_t ip6s_localout; /* total ip packets generated here */ + u_quad_t ip6s_odropped; /* lost packets due to nobufs, etc. */ + u_quad_t ip6s_reassembled; /* total packets reassembled ok */ + u_quad_t ip6s_atmfrag_rcvd; /* atomic fragments received */ + u_quad_t ip6s_fragmented; /* datagrams successfully fragmented */ + u_quad_t ip6s_ofragments; /* output fragments created */ + u_quad_t ip6s_cantfrag; /* don't fragment flag was set, etc. */ + u_quad_t ip6s_badoptions; /* error in option processing */ + u_quad_t ip6s_noroute; /* packets discarded due to no route */ + u_quad_t ip6s_badvers; /* ip6 version != 6 */ + u_quad_t ip6s_rawout; /* total raw ip packets generated */ + u_quad_t ip6s_badscope; /* scope error */ + u_quad_t ip6s_notmember; /* don't join this multicast group */ + u_quad_t ip6s_nxthist[256]; /* next header history */ + u_quad_t ip6s_m1; /* one mbuf */ + u_quad_t ip6s_m2m[32]; /* two or more mbuf */ + u_quad_t ip6s_mext1; /* one ext mbuf */ + u_quad_t ip6s_mext2m; /* two or more ext mbuf */ + u_quad_t ip6s_exthdrtoolong; /* ext hdr are not continuous */ + u_quad_t ip6s_nogif; /* no match gif found */ + u_quad_t ip6s_toomanyhdr; /* discarded due to too many headers */ + + /* + * statistics for improvement of the source address selection + * algorithm: + */ + /* number of times that address selection fails */ + u_quad_t ip6s_sources_none; + /* number of times that an address on the outgoing I/F is chosen */ + u_quad_t ip6s_sources_sameif[SCOPE6_ID_MAX]; + /* number of times that an address on a non-outgoing I/F is chosen */ + u_quad_t ip6s_sources_otherif[SCOPE6_ID_MAX]; + /* + * number of times that an address that has the same scope + * from the destination is chosen. + */ + u_quad_t ip6s_sources_samescope[SCOPE6_ID_MAX]; + /* + * number of times that an address that has a different scope + * from the destination is chosen. + */ + u_quad_t ip6s_sources_otherscope[SCOPE6_ID_MAX]; + /* number of times that a deprecated address is chosen */ + u_quad_t ip6s_sources_deprecated[SCOPE6_ID_MAX]; + + u_quad_t ip6s_forward_cachehit; + u_quad_t ip6s_forward_cachemiss; + + /* number of times that each rule of source selection is applied. */ + u_quad_t ip6s_sources_rule[IP6S_SRCRULE_COUNT]; + + /* number of times we ignored address on expensive secondary interfaces */ + u_quad_t ip6s_sources_skip_expensive_secondary_if; + + /* pkt dropped, no mbufs for control data */ + u_quad_t ip6s_pktdropcntrl; + + /* total packets trimmed/adjusted */ + u_quad_t ip6s_adj; + /* hwcksum info discarded during adjustment */ + u_quad_t ip6s_adj_hwcsum_clr; + + /* duplicate address detection collisions */ + u_quad_t ip6s_dad_collide; + + /* DAD NS looped back */ + u_quad_t ip6s_dad_loopcount; + } ip6stat; + + // NEEDED BY: do_icmp6... + struct icmp6stat icmp6stat; + struct icmp6_total { + u_long msgs_in; + u_long msgs_out; + } icmp6_total = {0, 0}; + + // NEEDED BY: do_uptime + struct timespec boot_time, cur_time; + + // -------------------------------------------------------------------- + + if (next_loadavg_dt <= dt) { + if (likely(do_loadavg)) { + if (unlikely(GETSYSCTL_BY_NAME("vm.loadavg", sysload))) { + do_loadavg = 0; + error("DISABLED: system.load"); + } else { + + st = rrdset_find_bytype_localhost("system", "load"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "system" + , "load" + , NULL + , "load" + , NULL + , "System Load Average" + , "load" + , "macos" + , "sysctl" + , 100 + , (update_every < MIN_LOADAVG_UPDATE_EVERY) ? MIN_LOADAVG_UPDATE_EVERY : update_every + , RRDSET_TYPE_LINE + ); + rrddim_add(st, "load1", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(st, "load5", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(st, "load15", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set(st, "load1", (collected_number) ((double)sysload.ldavg[0] / sysload.fscale * 1000)); + rrddim_set(st, "load5", (collected_number) ((double)sysload.ldavg[1] / sysload.fscale * 1000)); + rrddim_set(st, "load15", (collected_number) ((double)sysload.ldavg[2] / sysload.fscale * 1000)); + rrdset_done(st); + } + } + + next_loadavg_dt = st->update_every * USEC_PER_SEC; + } + else next_loadavg_dt -= dt; + + // -------------------------------------------------------------------- + + if (likely(do_swap)) { + if (unlikely(GETSYSCTL_BY_NAME("vm.swapusage", swap_usage))) { + do_swap = 0; + error("DISABLED: system.swap"); + } else { + st = rrdset_find_localhost("system.swap"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "system" + , "swap" + , NULL + , "swap" + , NULL + , "System Swap" + , "MB" + , "macos" + , "sysctl" + , 201 + , update_every + , RRDSET_TYPE_STACKED + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "free", NULL, 1, 1048576, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(st, "used", NULL, 1, 1048576, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set(st, "free", swap_usage.xsu_avail); + rrddim_set(st, "used", swap_usage.xsu_used); + rrdset_done(st); + } + } + + // -------------------------------------------------------------------- + + if (likely(do_bandwidth)) { + mib[0] = CTL_NET; + mib[1] = PF_ROUTE; + mib[2] = 0; + mib[3] = AF_INET; + mib[4] = NET_RT_IFLIST2; + mib[5] = 0; + if (unlikely(sysctl(mib, 6, NULL, &size, NULL, 0))) { + error("MACOS: sysctl(%s...) failed: %s", "net interfaces", strerror(errno)); + do_bandwidth = 0; + error("DISABLED: system.ipv4"); + } else { + ifstatdata = reallocz(ifstatdata, size); + if (unlikely(sysctl(mib, 6, ifstatdata, &size, NULL, 0) < 0)) { + error("MACOS: sysctl(%s...) failed: %s", "net interfaces", strerror(errno)); + do_bandwidth = 0; + error("DISABLED: system.ipv4"); + } else { + lim = ifstatdata + size; + iftot.ift_ibytes = iftot.ift_obytes = 0; + for (next = ifstatdata; next < lim; ) { + ifm = (struct if_msghdr *)next; + next += ifm->ifm_msglen; + + if (ifm->ifm_type == RTM_IFINFO2) { + struct if_msghdr2 *if2m = (struct if_msghdr2 *)ifm; + + iftot.ift_ibytes += if2m->ifm_data.ifi_ibytes; + iftot.ift_obytes += if2m->ifm_data.ifi_obytes; + } + } + st = rrdset_find_localhost("system.ipv4"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "system" + , "ipv4" + , NULL + , "network" + , NULL + , "IPv4 Bandwidth" + , "kilobits/s" + , "macos" + , "sysctl" + , 500 + , update_every + , RRDSET_TYPE_AREA + ); + + rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set(st, "InOctets", iftot.ift_ibytes); + rrddim_set(st, "OutOctets", iftot.ift_obytes); + rrdset_done(st); + } + } + } + + // -------------------------------------------------------------------- + + // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html + if (likely(do_tcp_packets || do_tcp_errors || do_tcp_handshake || do_tcpext_connaborts || do_tcpext_ofo || do_tcpext_syscookies || do_ecn)) { + if (unlikely(GETSYSCTL_BY_NAME("net.inet.tcp.stats", tcpstat))){ + do_tcp_packets = 0; + error("DISABLED: ipv4.tcppackets"); + do_tcp_errors = 0; + error("DISABLED: ipv4.tcperrors"); + do_tcp_handshake = 0; + error("DISABLED: ipv4.tcphandshake"); + do_tcpext_connaborts = 0; + error("DISABLED: ipv4.tcpconnaborts"); + do_tcpext_ofo = 0; + error("DISABLED: ipv4.tcpofo"); + do_tcpext_syscookies = 0; + error("DISABLED: ipv4.tcpsyncookies"); + do_ecn = 0; + error("DISABLED: ipv4.ecnpkts"); + } else { + if (likely(do_tcp_packets)) { + st = rrdset_find_localhost("ipv4.tcppackets"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "tcppackets" + , NULL + , "tcp" + , NULL + , "IPv4 TCP Packets" + , "packets/s" + , "macos" + , "sysctl" + , 2600 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "InSegs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutSegs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "InSegs", tcpstat.tcps_rcvtotal); + rrddim_set(st, "OutSegs", tcpstat.tcps_sndtotal); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (likely(do_tcp_errors)) { + st = rrdset_find_localhost("ipv4.tcperrors"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "tcperrors" + , NULL + , "tcp" + , NULL + , "IPv4 TCP Errors" + , "packets/s" + , "macos" + , "sysctl" + , 2700 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "InErrs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "RetransSegs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "InErrs", tcpstat.tcps_rcvbadoff + tcpstat.tcps_rcvshort); + rrddim_set(st, "InCsumErrors", tcpstat.tcps_rcvbadsum); + rrddim_set(st, "RetransSegs", tcpstat.tcps_sndrexmitpack); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (likely(do_tcp_handshake)) { + st = rrdset_find_localhost("ipv4.tcphandshake"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "tcphandshake" + , NULL + , "tcp" + , NULL + , "IPv4 TCP Handshake Issues" + , "events/s" + , "macos" + , "sysctl" + , 2900 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "ActiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "PassiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "AttemptFails", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "EstabResets", tcpstat.tcps_drops); + rrddim_set(st, "ActiveOpens", tcpstat.tcps_connattempt); + rrddim_set(st, "PassiveOpens", tcpstat.tcps_accepts); + rrddim_set(st, "AttemptFails", tcpstat.tcps_conndrops); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_rcvpackafterwin || tcpstat.tcps_rcvafterclose || tcpstat.tcps_rcvmemdrop || tcpstat.tcps_persistdrop))) { + do_tcpext_connaborts = CONFIG_BOOLEAN_YES; + st = rrdset_find_localhost("ipv4.tcpconnaborts"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "tcpconnaborts" + , NULL + , "tcp" + , NULL + , "TCP Connection Aborts" + , "connections/s" + , "macos" + , "sysctl" + , 3010 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "TCPAbortOnData", "baddata", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "TCPAbortOnClose", "userclosed", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "TCPAbortOnMemory", "nomemory", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "TCPAbortOnTimeout", "timeout", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set(st, "TCPAbortOnData", tcpstat.tcps_rcvpackafterwin); + rrddim_set(st, "TCPAbortOnClose", tcpstat.tcps_rcvafterclose); + rrddim_set(st, "TCPAbortOnMemory", tcpstat.tcps_rcvmemdrop); + rrddim_set(st, "TCPAbortOnTimeout", tcpstat.tcps_persistdrop); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && tcpstat.tcps_rcvoopack)) { + do_tcpext_ofo = CONFIG_BOOLEAN_YES; + st = rrdset_find_localhost("ipv4.tcpofo"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "tcpofo" + , NULL + , "tcp" + , NULL + , "TCP Out-Of-Order Queue" + , "packets/s" + , "macos" + , "sysctl" + , 3050 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "TCPOFOQueue", "inqueue", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set(st, "TCPOFOQueue", tcpstat.tcps_rcvoopack); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_tcpext_syscookies == CONFIG_BOOLEAN_YES || (do_tcpext_syscookies == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_sc_sendcookie || tcpstat.tcps_sc_recvcookie || tcpstat.tcps_sc_zonefail))) { + do_tcpext_syscookies = CONFIG_BOOLEAN_YES; + + st = rrdset_find_localhost("ipv4.tcpsyncookies"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "tcpsyncookies" + , NULL + , "tcp" + , NULL + , "TCP SYN Cookies" + , "packets/s" + , "macos" + , "sysctl" + , 3100 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "SyncookiesRecv", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "SyncookiesSent", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "SyncookiesFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set(st, "SyncookiesRecv", tcpstat.tcps_sc_recvcookie); + rrddim_set(st, "SyncookiesSent", tcpstat.tcps_sc_sendcookie); + rrddim_set(st, "SyncookiesFailed", tcpstat.tcps_sc_zonefail); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + +#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101100) + if (do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_ecn_recv_ce || tcpstat.tcps_ecn_not_supported))) { + do_ecn = CONFIG_BOOLEAN_YES; + st = rrdset_find_localhost("ipv4.ecnpkts"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "ecnpkts" + , NULL + , "ecn" + , NULL + , "IPv4 ECN Statistics" + , "packets/s" + , "macos" + , "sysctl" + , 8700 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "InCEPkts", "CEP", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "InNoECTPkts", "NoECTP", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set(st, "InCEPkts", tcpstat.tcps_ecn_recv_ce); + rrddim_set(st, "InNoECTPkts", tcpstat.tcps_ecn_not_supported); + rrdset_done(st); + } +#endif + + } + } + + // -------------------------------------------------------------------- + + // see http://net-snmp.sourceforge.net/docs/mibs/udp.html + if (likely(do_udp_packets || do_udp_errors)) { + if (unlikely(GETSYSCTL_BY_NAME("net.inet.udp.stats", udpstat))) { + do_udp_packets = 0; + error("DISABLED: ipv4.udppackets"); + do_udp_errors = 0; + error("DISABLED: ipv4.udperrors"); + } else { + if (likely(do_udp_packets)) { + st = rrdset_find_localhost("ipv4.udppackets"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "udppackets" + , NULL + , "udp" + , NULL + , "IPv4 UDP Packets" + , "packets/s" + , "macos" + , "sysctl" + , 2601 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "InDatagrams", udpstat.udps_ipackets); + rrddim_set(st, "OutDatagrams", udpstat.udps_opackets); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (likely(do_udp_errors)) { + st = rrdset_find_localhost("ipv4.udperrors"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "udperrors" + , NULL + , "udp" + , NULL + , "IPv4 UDP Errors" + , "events/s" + , "macos" + , "sysctl" + , 2701 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); +#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090) + rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); +#endif + } else + rrdset_next(st); + + rrddim_set(st, "InErrors", udpstat.udps_hdrops + udpstat.udps_badlen); + rrddim_set(st, "NoPorts", udpstat.udps_noport); + rrddim_set(st, "RcvbufErrors", udpstat.udps_fullsock); +#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090) + rrddim_set(st, "InCsumErrors", udpstat.udps_badsum + udpstat.udps_nosum); + rrddim_set(st, "IgnoredMulti", udpstat.udps_filtermcast); +#else + rrddim_set(st, "InCsumErrors", udpstat.udps_badsum); +#endif + rrdset_done(st); + } + } + } + + // -------------------------------------------------------------------- + + if (likely(do_icmp_packets || do_icmpmsg)) { + if (unlikely(GETSYSCTL_BY_NAME("net.inet.icmp.stats", icmpstat))) { + do_icmp_packets = 0; + error("DISABLED: ipv4.icmp"); + error("DISABLED: ipv4.icmp_errors"); + do_icmpmsg = 0; + error("DISABLED: ipv4.icmpmsg"); + } else { + for (i = 0; i <= ICMP_MAXTYPE; i++) { + icmp_total.msgs_in += icmpstat.icps_inhist[i]; + icmp_total.msgs_out += icmpstat.icps_outhist[i]; + } + icmp_total.msgs_in += icmpstat.icps_badcode + icmpstat.icps_badlen + icmpstat.icps_checksum + icmpstat.icps_tooshort; + + // -------------------------------------------------------------------- + + if (likely(do_icmp_packets)) { + st = rrdset_find_localhost("ipv4.icmp"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "icmp" + , NULL + , "icmp" + , NULL + , "IPv4 ICMP Packets" + , "packets/s" + , "macos" + , "sysctl" + , 2602 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "InMsgs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutMsgs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "InMsgs", icmp_total.msgs_in); + rrddim_set(st, "OutMsgs", icmp_total.msgs_out); + + rrdset_done(st); + + // -------------------------------------------------------------------- + + st = rrdset_find_localhost("ipv4.icmp_errors"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "icmp_errors" + , NULL + , "icmp" + , NULL + , "IPv4 ICMP Errors" + , "packets/s" + , "macos" + , "sysctl" + , 2603 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "InErrors", icmpstat.icps_badcode + icmpstat.icps_badlen + icmpstat.icps_checksum + icmpstat.icps_tooshort); + rrddim_set(st, "OutErrors", icmpstat.icps_error); + rrddim_set(st, "InCsumErrors", icmpstat.icps_checksum); + + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (likely(do_icmpmsg)) { + st = rrdset_find_localhost("ipv4.icmpmsg"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "icmpmsg" + , NULL + , "icmp" + , NULL + , "IPv4 ICMP Messages" + , "packets/s" + , "macos" + , "sysctl" + , 2604 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "InEchoReps", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutEchoReps", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "InEchoReps", icmpstat.icps_inhist[ICMP_ECHOREPLY]); + rrddim_set(st, "OutEchoReps", icmpstat.icps_outhist[ICMP_ECHOREPLY]); + rrddim_set(st, "InEchos", icmpstat.icps_inhist[ICMP_ECHO]); + rrddim_set(st, "OutEchos", icmpstat.icps_outhist[ICMP_ECHO]); + + rrdset_done(st); + } + } + } + + // -------------------------------------------------------------------- + + // see also http://net-snmp.sourceforge.net/docs/mibs/ip.html + if (likely(do_ip_packets || do_ip_fragsout || do_ip_fragsin || do_ip_errors)) { + if (unlikely(GETSYSCTL_BY_NAME("net.inet.ip.stats", ipstat))) { + do_ip_packets = 0; + error("DISABLED: ipv4.packets"); + do_ip_fragsout = 0; + error("DISABLED: ipv4.fragsout"); + do_ip_fragsin = 0; + error("DISABLED: ipv4.fragsin"); + do_ip_errors = 0; + error("DISABLED: ipv4.errors"); + } else { + if (likely(do_ip_packets)) { + st = rrdset_find_localhost("ipv4.packets"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "packets" + , NULL + , "packets" + , NULL + , "IPv4 Packets" + , "packets/s" + , "macos" + , "sysctl" + , 3000 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "InReceives", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutRequests", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "ForwDatagrams", "forwarded", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "InDelivers", "delivered", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "OutRequests", ipstat.ips_localout); + rrddim_set(st, "InReceives", ipstat.ips_total); + rrddim_set(st, "ForwDatagrams", ipstat.ips_forward); + rrddim_set(st, "InDelivers", ipstat.ips_delivered); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (likely(do_ip_fragsout)) { + st = rrdset_find_localhost("ipv4.fragsout"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "fragsout" + , NULL + , "fragments" + , NULL + , "IPv4 Fragments Sent" + , "packets/s" + , "macos" + , "sysctl" + , 3010 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "FragCreates", "created", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "FragOKs", ipstat.ips_fragmented); + rrddim_set(st, "FragFails", ipstat.ips_cantfrag); + rrddim_set(st, "FragCreates", ipstat.ips_ofragments); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (likely(do_ip_fragsin)) { + st = rrdset_find_localhost("ipv4.fragsin"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "fragsin" + , NULL + , "fragments" + , NULL + , "IPv4 Fragments Reassembly" + , "packets/s" + , "macos" + , "sysctl" + , 3011 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "ReasmOKs", ipstat.ips_fragments); + rrddim_set(st, "ReasmFails", ipstat.ips_fragdropped); + rrddim_set(st, "ReasmReqds", ipstat.ips_reassembled); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (likely(do_ip_errors)) { + st = rrdset_find_localhost("ipv4.errors"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "errors" + , NULL + , "errors" + , NULL + , "IPv4 Errors" + , "packets/s" + , "macos" + , "sysctl" + , 3002 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "InDiscards", ipstat.ips_badsum + ipstat.ips_tooshort + ipstat.ips_toosmall + ipstat.ips_toolong); + rrddim_set(st, "OutDiscards", ipstat.ips_odropped); + rrddim_set(st, "InHdrErrors", ipstat.ips_badhlen + ipstat.ips_badlen + ipstat.ips_badoptions + ipstat.ips_badvers); + rrddim_set(st, "InAddrErrors", ipstat.ips_badaddr); + rrddim_set(st, "InUnknownProtos", ipstat.ips_noproto); + rrddim_set(st, "OutNoRoutes", ipstat.ips_noroute); + rrdset_done(st); + } + } + } + + // -------------------------------------------------------------------- + + if (likely(do_ip6_packets || do_ip6_fragsout || do_ip6_fragsin || do_ip6_errors)) { + if (unlikely(GETSYSCTL_BY_NAME("net.inet6.ip6.stats", ip6stat))) { + do_ip6_packets = 0; + error("DISABLED: ipv6.packets"); + do_ip6_fragsout = 0; + error("DISABLED: ipv6.fragsout"); + do_ip6_fragsin = 0; + error("DISABLED: ipv6.fragsin"); + do_ip6_errors = 0; + error("DISABLED: ipv6.errors"); + } else { + if (do_ip6_packets == CONFIG_BOOLEAN_YES || (do_ip6_packets == CONFIG_BOOLEAN_AUTO && + (ip6stat.ip6s_localout || ip6stat.ip6s_total || + ip6stat.ip6s_forward || ip6stat.ip6s_delivered))) { + do_ip6_packets = CONFIG_BOOLEAN_YES; + st = rrdset_find_localhost("ipv6.packets"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6" + , "packets" + , NULL + , "packets" + , NULL + , "IPv6 Packets" + , "packets/s" + , "macos" + , "sysctl" + , 3000 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "forwarded", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "delivers", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "sent", ip6stat.ip6s_localout); + rrddim_set(st, "received", ip6stat.ip6s_total); + rrddim_set(st, "forwarded", ip6stat.ip6s_forward); + rrddim_set(st, "delivers", ip6stat.ip6s_delivered); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_ip6_fragsout == CONFIG_BOOLEAN_YES || (do_ip6_fragsout == CONFIG_BOOLEAN_AUTO && + (ip6stat.ip6s_fragmented || ip6stat.ip6s_cantfrag || + ip6stat.ip6s_ofragments))) { + do_ip6_fragsout = CONFIG_BOOLEAN_YES; + st = rrdset_find_localhost("ipv6.fragsout"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6" + , "fragsout" + , NULL + , "fragments" + , NULL + , "IPv6 Fragments Sent" + , "packets/s" + , "macos" + , "sysctl" + , 3010 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "ok", ip6stat.ip6s_fragmented); + rrddim_set(st, "failed", ip6stat.ip6s_cantfrag); + rrddim_set(st, "all", ip6stat.ip6s_ofragments); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_ip6_fragsin == CONFIG_BOOLEAN_YES || (do_ip6_fragsin == CONFIG_BOOLEAN_AUTO && + (ip6stat.ip6s_reassembled || ip6stat.ip6s_fragdropped || + ip6stat.ip6s_fragtimeout || ip6stat.ip6s_fragments))) { + do_ip6_fragsin = CONFIG_BOOLEAN_YES; + st = rrdset_find_localhost("ipv6.fragsin"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6" + , "fragsin" + , NULL + , "fragments" + , NULL + , "IPv6 Fragments Reassembly" + , "packets/s" + , "macos" + , "sysctl" + , 3011 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "timeout", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "ok", ip6stat.ip6s_reassembled); + rrddim_set(st, "failed", ip6stat.ip6s_fragdropped); + rrddim_set(st, "timeout", ip6stat.ip6s_fragtimeout); + rrddim_set(st, "all", ip6stat.ip6s_fragments); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_ip6_errors == CONFIG_BOOLEAN_YES || (do_ip6_errors == CONFIG_BOOLEAN_AUTO && ( + ip6stat.ip6s_toosmall || + ip6stat.ip6s_odropped || + ip6stat.ip6s_badoptions || + ip6stat.ip6s_badvers || + ip6stat.ip6s_exthdrtoolong || + ip6stat.ip6s_sources_none || + ip6stat.ip6s_tooshort || + ip6stat.ip6s_cantforward || + ip6stat.ip6s_noroute))) { + do_ip6_errors = CONFIG_BOOLEAN_YES; + st = rrdset_find_localhost("ipv6.errors"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6" + , "errors" + , NULL + , "errors" + , NULL + , "IPv6 Errors" + , "packets/s" + , "macos" + , "sysctl" + , 3002 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "InTruncatedPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "InNoRoutes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "InDiscards", ip6stat.ip6s_toosmall); + rrddim_set(st, "OutDiscards", ip6stat.ip6s_odropped); + + rrddim_set(st, "InHdrErrors", + ip6stat.ip6s_badoptions + ip6stat.ip6s_badvers + ip6stat.ip6s_exthdrtoolong); + rrddim_set(st, "InAddrErrors", ip6stat.ip6s_sources_none); + rrddim_set(st, "InTruncatedPkts", ip6stat.ip6s_tooshort); + rrddim_set(st, "InNoRoutes", ip6stat.ip6s_cantforward); + + rrddim_set(st, "OutNoRoutes", ip6stat.ip6s_noroute); + rrdset_done(st); + } + } + } + + // -------------------------------------------------------------------- + + if (likely(do_icmp6 || do_icmp6_redir || do_icmp6_errors || do_icmp6_echos || do_icmp6_router || do_icmp6_neighbor || do_icmp6_types)) { + if (unlikely(GETSYSCTL_BY_NAME("net.inet6.icmp6.stats", icmp6stat))) { + do_icmp6 = 0; + error("DISABLED: ipv6.icmp"); + } else { + for (i = 0; i <= ICMP6_MAXTYPE; i++) { + icmp6_total.msgs_in += icmp6stat.icp6s_inhist[i]; + icmp6_total.msgs_out += icmp6stat.icp6s_outhist[i]; + } + icmp6_total.msgs_in += icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen + icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort; + if (do_icmp6 == CONFIG_BOOLEAN_YES || (do_icmp6 == CONFIG_BOOLEAN_AUTO && (icmp6_total.msgs_in || icmp6_total.msgs_out))) { + do_icmp6 = CONFIG_BOOLEAN_YES; + st = rrdset_find_localhost("ipv6.icmp"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6" + , "icmp" + , NULL + , "icmp" + , NULL + , "IPv6 ICMP Messages" + , "messages/s" + , "macos" + , "sysctl" + , 10000 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "sent", icmp6_total.msgs_in); + rrddim_set(st, "received", icmp6_total.msgs_out); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_icmp6_redir == CONFIG_BOOLEAN_YES || (do_icmp6_redir == CONFIG_BOOLEAN_AUTO && (icmp6stat.icp6s_inhist[ND_REDIRECT] || icmp6stat.icp6s_outhist[ND_REDIRECT]))) { + do_icmp6_redir = CONFIG_BOOLEAN_YES; + st = rrdset_find_localhost("ipv6.icmpredir"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6" + , "icmpredir" + , NULL + , "icmp" + , NULL + , "IPv6 ICMP Redirects" + , "redirects/s" + , "macos" + , "sysctl" + , 10050 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "sent", icmp6stat.icp6s_inhist[ND_REDIRECT]); + rrddim_set(st, "received", icmp6stat.icp6s_outhist[ND_REDIRECT]); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_icmp6_errors == CONFIG_BOOLEAN_YES || (do_icmp6_errors == CONFIG_BOOLEAN_AUTO && ( + icmp6stat.icp6s_badcode || + icmp6stat.icp6s_badlen || + icmp6stat.icp6s_checksum || + icmp6stat.icp6s_tooshort || + icmp6stat.icp6s_error || + icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH] || + icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED] || + icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB] || + icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH] || + icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED] || + icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB]))) { + do_icmp6_errors = CONFIG_BOOLEAN_YES; + st = rrdset_find_localhost("ipv6.icmperrors"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6" + , "icmperrors" + , NULL + , "icmp" + , NULL + , "IPv6 ICMP Errors" + , "errors/s" + , "macos" + , "sysctl" + , 10100 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "InDestUnreachs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "InPktTooBigs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "InTimeExcds", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "InParmProblems", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutDestUnreachs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutTimeExcds", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutParmProblems", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "InErrors", icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen + icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort); + rrddim_set(st, "OutErrors", icmp6stat.icp6s_error); + rrddim_set(st, "InCsumErrors", icmp6stat.icp6s_checksum); + rrddim_set(st, "InDestUnreachs", icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH]); + rrddim_set(st, "InPktTooBigs", icmp6stat.icp6s_badlen); + rrddim_set(st, "InTimeExcds", icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED]); + rrddim_set(st, "InParmProblems", icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB]); + rrddim_set(st, "OutDestUnreachs", icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH]); + rrddim_set(st, "OutTimeExcds", icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED]); + rrddim_set(st, "OutParmProblems", icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB]); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_icmp6_echos == CONFIG_BOOLEAN_YES || (do_icmp6_echos == CONFIG_BOOLEAN_AUTO && ( + icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST] || + icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST] || + icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY] || + icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY]))) { + do_icmp6_echos = CONFIG_BOOLEAN_YES; + st = rrdset_find_localhost("ipv6.icmpechos"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6" + , "icmpechos" + , NULL + , "icmp" + , NULL + , "IPv6 ICMP Echo" + , "messages/s" + , "macos" + , "sysctl" + , 10200 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "InEchoReplies", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutEchoReplies", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "InEchos", icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST]); + rrddim_set(st, "OutEchos", icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST]); + rrddim_set(st, "InEchoReplies", icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY]); + rrddim_set(st, "OutEchoReplies", icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY]); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_icmp6_router == CONFIG_BOOLEAN_YES || (do_icmp6_router == CONFIG_BOOLEAN_AUTO && ( + icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT] || + icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT] || + icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT] || + icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT]))) { + do_icmp6_router = CONFIG_BOOLEAN_YES; + st = rrdset_find_localhost("ipv6.icmprouter"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6" + , "icmprouter" + , NULL + , "icmp" + , NULL + , "IPv6 Router Messages" + , "messages/s" + , "macos" + , "sysctl" + , 10400 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "InSolicits", icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT]); + rrddim_set(st, "OutSolicits", icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT]); + rrddim_set(st, "InAdvertisements", icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT]); + rrddim_set(st, "OutAdvertisements", icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT]); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_icmp6_neighbor == CONFIG_BOOLEAN_YES || (do_icmp6_neighbor == CONFIG_BOOLEAN_AUTO && ( + icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT] || + icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT] || + icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT] || + icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]))) { + do_icmp6_neighbor = CONFIG_BOOLEAN_YES; + st = rrdset_find_localhost("ipv6.icmpneighbor"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6" + , "icmpneighbor" + , NULL + , "icmp" + , NULL + , "IPv6 Neighbor Messages" + , "messages/s" + , "macos" + , "sysctl" + , 10500 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "InSolicits", icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT]); + rrddim_set(st, "OutSolicits", icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT]); + rrddim_set(st, "InAdvertisements", icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT]); + rrddim_set(st, "OutAdvertisements", icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if (do_icmp6_types == CONFIG_BOOLEAN_YES || (do_icmp6_types == CONFIG_BOOLEAN_AUTO && ( + icmp6stat.icp6s_inhist[1] || + icmp6stat.icp6s_inhist[128] || + icmp6stat.icp6s_inhist[129] || + icmp6stat.icp6s_inhist[136] || + icmp6stat.icp6s_outhist[1] || + icmp6stat.icp6s_outhist[128] || + icmp6stat.icp6s_outhist[129] || + icmp6stat.icp6s_outhist[133] || + icmp6stat.icp6s_outhist[135] || + icmp6stat.icp6s_outhist[136]))) { + do_icmp6_types = CONFIG_BOOLEAN_YES; + st = rrdset_find_localhost("ipv6.icmptypes"); + if (unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6" + , "icmptypes" + , NULL + , "icmp" + , NULL + , "IPv6 ICMP Types" + , "messages/s" + , "macos" + , "sysctl" + , 10700 + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "InType1", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "InType128", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "InType129", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "InType136", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutType1", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutType128", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutType129", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutType133", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutType135", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "OutType143", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } else + rrdset_next(st); + + rrddim_set(st, "InType1", icmp6stat.icp6s_inhist[1]); + rrddim_set(st, "InType128", icmp6stat.icp6s_inhist[128]); + rrddim_set(st, "InType129", icmp6stat.icp6s_inhist[129]); + rrddim_set(st, "InType136", icmp6stat.icp6s_inhist[136]); + rrddim_set(st, "OutType1", icmp6stat.icp6s_outhist[1]); + rrddim_set(st, "OutType128", icmp6stat.icp6s_outhist[128]); + rrddim_set(st, "OutType129", icmp6stat.icp6s_outhist[129]); + rrddim_set(st, "OutType133", icmp6stat.icp6s_outhist[133]); + rrddim_set(st, "OutType135", icmp6stat.icp6s_outhist[135]); + rrddim_set(st, "OutType143", icmp6stat.icp6s_outhist[143]); + rrdset_done(st); + } + } + } + + // -------------------------------------------------------------------- + + if (likely(do_uptime)) { + if (unlikely(GETSYSCTL_BY_NAME("kern.boottime", boot_time))) { + do_uptime = 0; + error("DISABLED: system.uptime"); + } else { + clock_gettime(CLOCK_REALTIME, &cur_time); + st = rrdset_find_localhost("system.uptime"); + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "system" + , "uptime" + , NULL + , "uptime" + , NULL + , "System Uptime" + , "seconds" + , "macos" + , "sysctl" + , 1000 + , update_every + , RRDSET_TYPE_LINE + ); + rrddim_add(st, "uptime", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set(st, "uptime", cur_time.tv_sec - boot_time.tv_sec); + rrdset_done(st); + } + } + + return 0; +} + diff --git a/collectors/macos.plugin/plugin_macos.c b/collectors/macos.plugin/plugin_macos.c new file mode 100644 index 000000000..628a5b10d --- /dev/null +++ b/collectors/macos.plugin/plugin_macos.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_macos.h" + +static void macos_main_cleanup(void *ptr) { + struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; + static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; + + info("cleaning up..."); + + static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; +} + +void *macos_main(void *ptr) { + netdata_thread_cleanup_push(macos_main_cleanup, ptr); + + // when ZERO, attempt to do it + int vdo_cpu_netdata = !config_get_boolean("plugin:macos", "netdata server resources", 1); + int vdo_macos_sysctl = !config_get_boolean("plugin:macos", "sysctl", 1); + int vdo_macos_mach_smi = !config_get_boolean("plugin:macos", "mach system management interface", 1); + int vdo_macos_iokit = !config_get_boolean("plugin:macos", "iokit", 1); + + // keep track of the time each module was called + unsigned long long sutime_macos_sysctl = 0ULL; + unsigned long long sutime_macos_mach_smi = 0ULL; + unsigned long long sutime_macos_iokit = 0ULL; + + usec_t step = localhost->rrd_update_every * USEC_PER_SEC; + heartbeat_t hb; + heartbeat_init(&hb); + + while(!netdata_exit) { + usec_t hb_dt = heartbeat_next(&hb, step); + + if(unlikely(netdata_exit)) break; + + // BEGIN -- the job to be done + + if(!vdo_macos_sysctl) { + debug(D_PROCNETDEV_LOOP, "MACOS: calling do_macos_sysctl()."); + vdo_macos_sysctl = do_macos_sysctl(localhost->rrd_update_every, hb_dt); + } + if(unlikely(netdata_exit)) break; + + if(!vdo_macos_mach_smi) { + debug(D_PROCNETDEV_LOOP, "MACOS: calling do_macos_mach_smi()."); + vdo_macos_mach_smi = do_macos_mach_smi(localhost->rrd_update_every, hb_dt); + } + if(unlikely(netdata_exit)) break; + + if(!vdo_macos_iokit) { + debug(D_PROCNETDEV_LOOP, "MACOS: calling do_macos_iokit()."); + vdo_macos_iokit = do_macos_iokit(localhost->rrd_update_every, hb_dt); + } + if(unlikely(netdata_exit)) break; + + // END -- the job is done + + // -------------------------------------------------------------------- + + if(!vdo_cpu_netdata) { + global_statistics_charts(); + registry_statistics(); + } + } + + netdata_thread_cleanup_pop(1); + return NULL; +} diff --git a/collectors/macos.plugin/plugin_macos.h b/collectors/macos.plugin/plugin_macos.h new file mode 100644 index 000000000..0815c59c3 --- /dev/null +++ b/collectors/macos.plugin/plugin_macos.h @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + + +#ifndef NETDATA_PLUGIN_MACOS_H +#define NETDATA_PLUGIN_MACOS_H 1 + +#include "../../daemon/common.h" + +#if (TARGET_OS == OS_MACOS) + +#define NETDATA_PLUGIN_HOOK_MACOS \ + { \ + .name = "PLUGIN[macos]", \ + .config_section = CONFIG_SECTION_PLUGINS, \ + .config_name = "macos", \ + .enabled = 1, \ + .thread = NULL, \ + .init_routine = NULL, \ + .start_routine = macos_main \ + }, + +void *macos_main(void *ptr); + +#define GETSYSCTL_BY_NAME(name, var) getsysctl_by_name(name, &(var), sizeof(var)) + +extern int getsysctl_by_name(const char *name, void *ptr, size_t len); + +extern int do_macos_sysctl(int update_every, usec_t dt); +extern int do_macos_mach_smi(int update_every, usec_t dt); +extern int do_macos_iokit(int update_every, usec_t dt); + + +#else // (TARGET_OS == OS_MACOS) + +#define NETDATA_PLUGIN_HOOK_MACOS + +#endif // (TARGET_OS == OS_MACOS) + + + + + +#endif /* NETDATA_PLUGIN_MACOS_H */ diff --git a/collectors/nfacct.plugin/Makefile.am b/collectors/nfacct.plugin/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/collectors/nfacct.plugin/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/collectors/nfacct.plugin/Makefile.in b/collectors/nfacct.plugin/Makefile.in new file mode 100644 index 000000000..2a1d001de --- /dev/null +++ b/collectors/nfacct.plugin/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = collectors/nfacct.plugin +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/nfacct.plugin/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu collectors/nfacct.plugin/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/collectors/nfacct.plugin/README.md b/collectors/nfacct.plugin/README.md new file mode 100644 index 000000000..814b47915 --- /dev/null +++ b/collectors/nfacct.plugin/README.md @@ -0,0 +1,10 @@ +# nfacct.plugin + +This plugin that collects NFACCT statistics. + +It is currently disabled by default, because it requires root access. +We have to move the code to an external plugin to setuid just the plugin not the whole netdata server. + +You can build netdata with it to test it though. +Just run `./configure` (or `netdata-installer.sh`) with the option `--enable-plugin-nfacct` (and any other options you may need). +Remember, you have to tell netdata you want it to run as `root` for this plugin to work. diff --git a/collectors/nfacct.plugin/plugin_nfacct.c b/collectors/nfacct.plugin/plugin_nfacct.c new file mode 100644 index 000000000..7d42dd189 --- /dev/null +++ b/collectors/nfacct.plugin/plugin_nfacct.c @@ -0,0 +1,822 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_nfacct.h" + +#if defined(INTERNAL_PLUGIN_NFACCT) + +#define PLUGIN_NFACCT_NAME "nfacct.plugin" + +#ifdef HAVE_LIBMNL +#include + +static inline size_t mnl_buffer_size() { + long s = MNL_SOCKET_BUFFER_SIZE; + if(s <= 0) return 8192; + return (size_t)s; +} + +// ---------------------------------------------------------------------------- +// DO_NFSTAT - collect netfilter connection tracker statistics via netlink +// example: https://github.com/formorer/pkg-conntrack-tools/blob/master/src/conntrack.c + +#ifdef HAVE_LINUX_NETFILTER_NFNETLINK_CONNTRACK_H +#define DO_NFSTAT 1 + +#define RRD_TYPE_NET_STAT_NETFILTER "netfilter" +#define RRD_TYPE_NET_STAT_CONNTRACK "netlink" + +#include + +static struct { + int update_every; + char *buf; + size_t buf_size; + struct mnl_socket *mnl; + struct nlmsghdr *nlh; + struct nfgenmsg *nfh; + unsigned int seq; + uint32_t portid; + + struct nlattr *tb[CTA_STATS_MAX+1]; + const char *attr2name[CTA_STATS_MAX+1]; + kernel_uint_t metrics[CTA_STATS_MAX+1]; + + struct nlattr *tb_exp[CTA_STATS_EXP_MAX+1]; + const char *attr2name_exp[CTA_STATS_EXP_MAX+1]; + kernel_uint_t metrics_exp[CTA_STATS_EXP_MAX+1]; +} nfstat_root = { + .update_every = 1, + .buf = NULL, + .buf_size = 0, + .mnl = NULL, + .nlh = NULL, + .nfh = NULL, + .seq = 0, + .portid = 0, + .tb = {}, + .attr2name = { + [CTA_STATS_SEARCHED] = "searched", + [CTA_STATS_FOUND] = "found", + [CTA_STATS_NEW] = "new", + [CTA_STATS_INVALID] = "invalid", + [CTA_STATS_IGNORE] = "ignore", + [CTA_STATS_DELETE] = "delete", + [CTA_STATS_DELETE_LIST] = "delete_list", + [CTA_STATS_INSERT] = "insert", + [CTA_STATS_INSERT_FAILED] = "insert_failed", + [CTA_STATS_DROP] = "drop", + [CTA_STATS_EARLY_DROP] = "early_drop", + [CTA_STATS_ERROR] = "icmp_error", + [CTA_STATS_SEARCH_RESTART] = "search_restart", + }, + .metrics = {}, + .tb_exp = {}, + .attr2name_exp = { + [CTA_STATS_EXP_NEW] = "new", + [CTA_STATS_EXP_CREATE] = "created", + [CTA_STATS_EXP_DELETE] = "deleted", + }, + .metrics_exp = {} +}; + + +static int nfstat_init(int update_every) { + nfstat_root.update_every = update_every; + + nfstat_root.buf_size = mnl_buffer_size(); + nfstat_root.buf = mallocz(nfstat_root.buf_size); + + nfstat_root.mnl = mnl_socket_open(NETLINK_NETFILTER); + if(!nfstat_root.mnl) { + error("NFSTAT: mnl_socket_open() failed"); + return 1; + } + + nfstat_root.seq = (unsigned int)now_realtime_sec() - 1; + + if(mnl_socket_bind(nfstat_root.mnl, 0, MNL_SOCKET_AUTOPID) < 0) { + error("NFSTAT: mnl_socket_bind() failed"); + return 1; + } + nfstat_root.portid = mnl_socket_get_portid(nfstat_root.mnl); + + return 0; +} + +static void nfstat_cleanup() { + if(nfstat_root.mnl) { + mnl_socket_close(nfstat_root.mnl); + nfstat_root.mnl = NULL; + } + + freez(nfstat_root.buf); + nfstat_root.buf = NULL; + nfstat_root.buf_size = 0; +} + +static struct nlmsghdr * nfct_mnl_nlmsghdr_put(char *buf, uint16_t subsys, uint16_t type, uint8_t family, uint32_t seq) { + struct nlmsghdr *nlh; + struct nfgenmsg *nfh; + + nlh = mnl_nlmsg_put_header(buf); + nlh->nlmsg_type = (subsys << 8) | type; + nlh->nlmsg_flags = NLM_F_REQUEST|NLM_F_DUMP; + nlh->nlmsg_seq = seq; + + nfh = mnl_nlmsg_put_extra_header(nlh, sizeof(struct nfgenmsg)); + nfh->nfgen_family = family; + nfh->version = NFNETLINK_V0; + nfh->res_id = 0; + + return nlh; +} + +static int nfct_stats_attr_cb(const struct nlattr *attr, void *data) { + const struct nlattr **tb = data; + int type = mnl_attr_get_type(attr); + + if (mnl_attr_type_valid(attr, CTA_STATS_MAX) < 0) + return MNL_CB_OK; + + if (mnl_attr_validate(attr, MNL_TYPE_U32) < 0) { + error("NFSTAT: mnl_attr_validate() failed"); + return MNL_CB_ERROR; + } + + tb[type] = attr; + return MNL_CB_OK; +} + +static int nfstat_callback(const struct nlmsghdr *nlh, void *data) { + (void)data; + + struct nfgenmsg *nfg = mnl_nlmsg_get_payload(nlh); + + mnl_attr_parse(nlh, sizeof(*nfg), nfct_stats_attr_cb, nfstat_root.tb); + + // printf("cpu=%-4u\t", ntohs(nfg->res_id)); + + int i; + // add the metrics of this CPU into the metrics + for (i = 0; i < CTA_STATS_MAX+1; i++) { + if (nfstat_root.tb[i]) { + // printf("%s=%u ", nfstat_root.attr2name[i], ntohl(mnl_attr_get_u32(nfstat_root.tb[i]))); + nfstat_root.metrics[i] += ntohl(mnl_attr_get_u32(nfstat_root.tb[i])); + } + } + // printf("\n"); + + return MNL_CB_OK; +} + +static int nfstat_collect_conntrack() { + // zero all metrics - we will sum the metrics of all CPUs later + int i; + for (i = 0; i < CTA_STATS_MAX+1; i++) + nfstat_root.metrics[i] = 0; + + // prepare the request + nfstat_root.nlh = nfct_mnl_nlmsghdr_put(nfstat_root.buf, NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET_STATS_CPU, AF_UNSPEC, nfstat_root.seq); + + // send the request + if(mnl_socket_sendto(nfstat_root.mnl, nfstat_root.nlh, nfstat_root.nlh->nlmsg_len) < 0) { + error("NFSTAT: mnl_socket_sendto() failed"); + return 1; + } + + // get the reply + ssize_t ret; + while ((ret = mnl_socket_recvfrom(nfstat_root.mnl, nfstat_root.buf, nfstat_root.buf_size)) > 0) { + if(mnl_cb_run( + nfstat_root.buf + , (size_t)ret + , nfstat_root.nlh->nlmsg_seq + , nfstat_root.portid + , nfstat_callback + , NULL + ) <= MNL_CB_STOP) + break; + } + + // verify we run without issues + if (ret == -1) { + error("NFSTAT: error communicating with kernel. This plugin can only work when netdata runs as root."); + return 1; + } + + return 0; +} + +static int nfexp_stats_attr_cb(const struct nlattr *attr, void *data) +{ + const struct nlattr **tb = data; + int type = mnl_attr_get_type(attr); + + if (mnl_attr_type_valid(attr, CTA_STATS_EXP_MAX) < 0) + return MNL_CB_OK; + + if (mnl_attr_validate(attr, MNL_TYPE_U32) < 0) { + error("NFSTAT EXP: mnl_attr_validate() failed"); + return MNL_CB_ERROR; + } + + tb[type] = attr; + return MNL_CB_OK; +} + +static int nfstat_callback_exp(const struct nlmsghdr *nlh, void *data) { + (void)data; + + struct nfgenmsg *nfg = mnl_nlmsg_get_payload(nlh); + + mnl_attr_parse(nlh, sizeof(*nfg), nfexp_stats_attr_cb, nfstat_root.tb_exp); + + int i; + for (i = 0; i < CTA_STATS_EXP_MAX+1; i++) { + if (nfstat_root.tb_exp[i]) { + nfstat_root.metrics_exp[i] += ntohl(mnl_attr_get_u32(nfstat_root.tb_exp[i])); + } + } + + return MNL_CB_OK; +} + +static int nfstat_collect_conntrack_expectations() { + // zero all metrics - we will sum the metrics of all CPUs later + int i; + for (i = 0; i < CTA_STATS_EXP_MAX+1; i++) + nfstat_root.metrics_exp[i] = 0; + + // prepare the request + nfstat_root.nlh = nfct_mnl_nlmsghdr_put(nfstat_root.buf, NFNL_SUBSYS_CTNETLINK_EXP, IPCTNL_MSG_EXP_GET_STATS_CPU, AF_UNSPEC, nfstat_root.seq); + + // send the request + if(mnl_socket_sendto(nfstat_root.mnl, nfstat_root.nlh, nfstat_root.nlh->nlmsg_len) < 0) { + error("NFSTAT: mnl_socket_sendto() failed"); + return 1; + } + + // get the reply + ssize_t ret; + while ((ret = mnl_socket_recvfrom(nfstat_root.mnl, nfstat_root.buf, nfstat_root.buf_size)) > 0) { + if(mnl_cb_run( + nfstat_root.buf + , (size_t)ret + , nfstat_root.nlh->nlmsg_seq + , nfstat_root.portid + , nfstat_callback_exp + , NULL + ) <= MNL_CB_STOP) + break; + } + + // verify we run without issues + if (ret == -1) { + error("NFSTAT: error communicating with kernel. This plugin can only work when netdata runs as root."); + return 1; + } + + return 0; +} + +static int nfstat_collect() { + nfstat_root.seq++; + + if(nfstat_collect_conntrack()) + return 1; + + if(nfstat_collect_conntrack_expectations()) + return 1; + + return 0; +} + +static void nfstat_send_metrics() { + + { + static RRDSET *st_new = NULL; + static RRDDIM *rd_new = NULL, *rd_ignore = NULL, *rd_invalid = NULL; + + if(!st_new) { + st_new = rrdset_create_localhost( + RRD_TYPE_NET_STAT_NETFILTER + , RRD_TYPE_NET_STAT_CONNTRACK "_new" + , NULL + , RRD_TYPE_NET_STAT_CONNTRACK + , NULL + , "Connection Tracker New Connections" + , "connections/s" + , PLUGIN_NFACCT_NAME + , NULL + , NETDATA_CHART_PRIO_NETFILTER_NEW + , nfstat_root.update_every + , RRDSET_TYPE_LINE + ); + + rd_new = rrddim_add(st_new, nfstat_root.attr2name[CTA_STATS_NEW], NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_ignore = rrddim_add(st_new, nfstat_root.attr2name[CTA_STATS_IGNORE], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_invalid = rrddim_add(st_new, nfstat_root.attr2name[CTA_STATS_INVALID], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_new); + + rrddim_set_by_pointer(st_new, rd_new, (collected_number) nfstat_root.metrics[CTA_STATS_NEW]); + rrddim_set_by_pointer(st_new, rd_ignore, (collected_number) nfstat_root.metrics[CTA_STATS_IGNORE]); + rrddim_set_by_pointer(st_new, rd_invalid, (collected_number) nfstat_root.metrics[CTA_STATS_INVALID]); + + rrdset_done(st_new); + } + + // ---------------------------------------------------------------- + + { + static RRDSET *st_changes = NULL; + static RRDDIM *rd_inserted = NULL, *rd_deleted = NULL, *rd_delete_list = NULL; + + if(!st_changes) { + st_changes = rrdset_create_localhost( + RRD_TYPE_NET_STAT_NETFILTER + , RRD_TYPE_NET_STAT_CONNTRACK "_changes" + , NULL + , RRD_TYPE_NET_STAT_CONNTRACK + , NULL + , "Connection Tracker Changes" + , "changes/s" + , PLUGIN_NFACCT_NAME + , NULL + , NETDATA_CHART_PRIO_NETFILTER_CHANGES + , nfstat_root.update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st_changes, RRDSET_FLAG_DETAIL); + + rd_inserted = rrddim_add(st_changes, nfstat_root.attr2name[CTA_STATS_INSERT], NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_deleted = rrddim_add(st_changes, nfstat_root.attr2name[CTA_STATS_DELETE], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_delete_list = rrddim_add(st_changes, nfstat_root.attr2name[CTA_STATS_DELETE_LIST], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_changes); + + rrddim_set_by_pointer(st_changes, rd_inserted, (collected_number) nfstat_root.metrics[CTA_STATS_INSERT]); + rrddim_set_by_pointer(st_changes, rd_deleted, (collected_number) nfstat_root.metrics[CTA_STATS_DELETE]); + rrddim_set_by_pointer(st_changes, rd_delete_list, (collected_number) nfstat_root.metrics[CTA_STATS_DELETE_LIST]); + + rrdset_done(st_changes); + } + + // ---------------------------------------------------------------- + + { + static RRDSET *st_search = NULL; + static RRDDIM *rd_searched = NULL, *rd_restarted = NULL, *rd_found = NULL; + + if(!st_search) { + st_search = rrdset_create_localhost( + RRD_TYPE_NET_STAT_NETFILTER + , RRD_TYPE_NET_STAT_CONNTRACK "_search" + , NULL + , RRD_TYPE_NET_STAT_CONNTRACK + , NULL + , "Connection Tracker Searches" + , "searches/s" + , PLUGIN_NFACCT_NAME + , NULL + , NETDATA_CHART_PRIO_NETFILTER_SEARCH + , nfstat_root.update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st_search, RRDSET_FLAG_DETAIL); + + rd_searched = rrddim_add(st_search, nfstat_root.attr2name[CTA_STATS_SEARCHED], NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_restarted = rrddim_add(st_search, nfstat_root.attr2name[CTA_STATS_SEARCH_RESTART], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_found = rrddim_add(st_search, nfstat_root.attr2name[CTA_STATS_FOUND], NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_search); + + rrddim_set_by_pointer(st_search, rd_searched, (collected_number) nfstat_root.metrics[CTA_STATS_SEARCHED]); + rrddim_set_by_pointer(st_search, rd_restarted, (collected_number) nfstat_root.metrics[CTA_STATS_SEARCH_RESTART]); + rrddim_set_by_pointer(st_search, rd_found, (collected_number) nfstat_root.metrics[CTA_STATS_FOUND]); + + rrdset_done(st_search); + } + + // ---------------------------------------------------------------- + + { + static RRDSET *st_errors = NULL; + static RRDDIM *rd_error = NULL, *rd_insert_failed = NULL, *rd_drop = NULL, *rd_early_drop = NULL; + + if(!st_errors) { + st_errors = rrdset_create_localhost( + RRD_TYPE_NET_STAT_NETFILTER + , RRD_TYPE_NET_STAT_CONNTRACK "_errors" + , NULL + , RRD_TYPE_NET_STAT_CONNTRACK + , NULL + , "Connection Tracker Errors" + , "events/s" + , PLUGIN_NFACCT_NAME + , NULL + , NETDATA_CHART_PRIO_NETFILTER_ERRORS + , nfstat_root.update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st_errors, RRDSET_FLAG_DETAIL); + + rd_error = rrddim_add(st_errors, nfstat_root.attr2name[CTA_STATS_ERROR], NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_insert_failed = rrddim_add(st_errors, nfstat_root.attr2name[CTA_STATS_INSERT_FAILED], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_drop = rrddim_add(st_errors, nfstat_root.attr2name[CTA_STATS_DROP], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_early_drop = rrddim_add(st_errors, nfstat_root.attr2name[CTA_STATS_EARLY_DROP], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_errors); + + rrddim_set_by_pointer(st_errors, rd_error, (collected_number) nfstat_root.metrics[CTA_STATS_ERROR]); + rrddim_set_by_pointer(st_errors, rd_insert_failed, (collected_number) nfstat_root.metrics[CTA_STATS_INSERT_FAILED]); + rrddim_set_by_pointer(st_errors, rd_drop, (collected_number) nfstat_root.metrics[CTA_STATS_DROP]); + rrddim_set_by_pointer(st_errors, rd_early_drop, (collected_number) nfstat_root.metrics[CTA_STATS_EARLY_DROP]); + + rrdset_done(st_errors); + } + + // ---------------------------------------------------------------- + + { + static RRDSET *st_expect = NULL; + static RRDDIM *rd_new = NULL, *rd_created = NULL, *rd_deleted = NULL; + + if(!st_expect) { + st_expect = rrdset_create_localhost( + RRD_TYPE_NET_STAT_NETFILTER + , RRD_TYPE_NET_STAT_CONNTRACK "_expect" + , NULL + , RRD_TYPE_NET_STAT_CONNTRACK + , NULL + , "Connection Tracker Expectations" + , "expectations/s" + , PLUGIN_NFACCT_NAME + , NULL + , NETDATA_CHART_PRIO_NETFILTER_EXPECT + , nfstat_root.update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st_expect, RRDSET_FLAG_DETAIL); + + rd_created = rrddim_add(st_expect, nfstat_root.attr2name_exp[CTA_STATS_EXP_CREATE], NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_deleted = rrddim_add(st_expect, nfstat_root.attr2name_exp[CTA_STATS_EXP_DELETE], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_new = rrddim_add(st_expect, nfstat_root.attr2name_exp[CTA_STATS_EXP_NEW], NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_expect); + + rrddim_set_by_pointer(st_expect, rd_created, (collected_number) nfstat_root.metrics_exp[CTA_STATS_EXP_CREATE]); + rrddim_set_by_pointer(st_expect, rd_deleted, (collected_number) nfstat_root.metrics_exp[CTA_STATS_EXP_DELETE]); + rrddim_set_by_pointer(st_expect, rd_new, (collected_number) nfstat_root.metrics_exp[CTA_STATS_EXP_NEW]); + + rrdset_done(st_expect); + } + +} + +#endif // HAVE_LINUX_NETFILTER_NFNETLINK_CONNTRACK_H + + +// ---------------------------------------------------------------------------- +// DO_NFACCT - collect netfilter accounting statistics via netlink + +#ifdef HAVE_LIBNETFILTER_ACCT +#define DO_NFACCT 1 + +#include + +struct nfacct_data { + char *name; + uint32_t hash; + + uint64_t pkts; + uint64_t bytes; + + RRDDIM *rd_bytes; + RRDDIM *rd_packets; + + int updated; + + struct nfacct_data *next; +}; + +static struct { + int update_every; + char *buf; + size_t buf_size; + struct mnl_socket *mnl; + struct nlmsghdr *nlh; + unsigned int seq; + uint32_t portid; + struct nfacct *nfacct_buffer; + struct nfacct_data *nfacct_metrics; +} nfacct_root = { + .update_every = 1, + .buf = NULL, + .buf_size = 0, + .mnl = NULL, + .nlh = NULL, + .seq = 0, + .portid = 0, + .nfacct_buffer = NULL, + .nfacct_metrics = NULL +}; + +static inline struct nfacct_data *nfacct_data_get(const char *name, uint32_t hash) { + struct nfacct_data *d = NULL, *last = NULL; + for(d = nfacct_root.nfacct_metrics; d ; last = d, d = d->next) { + if(unlikely(d->hash == hash && !strcmp(d->name, name))) + return d; + } + + d = callocz(1, sizeof(struct nfacct_data)); + d->name = strdupz(name); + d->hash = hash; + + if(!last) { + d->next = nfacct_root.nfacct_metrics; + nfacct_root.nfacct_metrics = d; + } + else { + d->next = last->next; + last->next = d; + } + + return d; +} + +static int nfacct_init(int update_every) { + nfacct_root.update_every = update_every; + + nfacct_root.buf_size = mnl_buffer_size(); + nfacct_root.buf = mallocz(nfacct_root.buf_size); + + nfacct_root.nfacct_buffer = nfacct_alloc(); + if(!nfacct_root.nfacct_buffer) { + error("nfacct.plugin: nfacct_alloc() failed."); + return 0; + } + + nfacct_root.seq = (unsigned int)now_realtime_sec() - 1; + + nfacct_root.mnl = mnl_socket_open(NETLINK_NETFILTER); + if(!nfacct_root.mnl) { + error("nfacct.plugin: mnl_socket_open() failed"); + return 1; + } + + if(mnl_socket_bind(nfacct_root.mnl, 0, MNL_SOCKET_AUTOPID) < 0) { + error("nfacct.plugin: mnl_socket_bind() failed"); + return 1; + } + nfacct_root.portid = mnl_socket_get_portid(nfacct_root.mnl); + + return 0; +} + +static void nfacct_cleanup() { + if(nfacct_root.mnl) { + mnl_socket_close(nfacct_root.mnl); + nfacct_root.mnl = NULL; + } + + if(nfacct_root.nfacct_buffer) { + nfacct_free(nfacct_root.nfacct_buffer); + nfacct_root.nfacct_buffer = NULL; + } + + freez(nfacct_root.buf); + nfacct_root.buf = NULL; + nfacct_root.buf_size = 0; + + // TODO: cleanup the metrics linked list +} + +static int nfacct_callback(const struct nlmsghdr *nlh, void *data) { + (void)data; + + if(nfacct_nlmsg_parse_payload(nlh, nfacct_root.nfacct_buffer) < 0) { + error("NFACCT: nfacct_nlmsg_parse_payload() failed."); + return MNL_CB_OK; + } + + const char *name = nfacct_attr_get_str(nfacct_root.nfacct_buffer, NFACCT_ATTR_NAME); + uint32_t hash = simple_hash(name); + + struct nfacct_data *d = nfacct_data_get(name, hash); + + d->pkts = nfacct_attr_get_u64(nfacct_root.nfacct_buffer, NFACCT_ATTR_PKTS); + d->bytes = nfacct_attr_get_u64(nfacct_root.nfacct_buffer, NFACCT_ATTR_BYTES); + d->updated = 1; + + return MNL_CB_OK; +} + +static int nfacct_collect() { + // mark all old metrics as not-updated + struct nfacct_data *d; + for(d = nfacct_root.nfacct_metrics; d ; d = d->next) + d->updated = 0; + + // prepare the request + nfacct_root.seq++; + nfacct_root.nlh = nfacct_nlmsg_build_hdr(nfacct_root.buf, NFNL_MSG_ACCT_GET, NLM_F_DUMP, (uint32_t)nfacct_root.seq); + if(!nfacct_root.nlh) { + error("NFACCT: nfacct_nlmsg_build_hdr() failed"); + return 1; + } + + // send the request + if(mnl_socket_sendto(nfacct_root.mnl, nfacct_root.nlh, nfacct_root.nlh->nlmsg_len) < 0) { + error("NFACCT: mnl_socket_sendto() failed"); + return 1; + } + + // get the reply + ssize_t ret; + while((ret = mnl_socket_recvfrom(nfacct_root.mnl, nfacct_root.buf, nfacct_root.buf_size)) > 0) { + if(mnl_cb_run( + nfacct_root.buf + , (size_t)ret + , nfacct_root.seq + , nfacct_root.portid + , nfacct_callback + , NULL + ) <= 0) + break; + } + + // verify we run without issues + if (ret == -1) { + error("NFACCT: error communicating with kernel. This plugin can only work when netdata runs as root."); + return 1; + } + + return 0; +} + +static void nfacct_send_metrics() { + static RRDSET *st_bytes = NULL, *st_packets = NULL; + + if(!nfacct_root.nfacct_metrics) return; + struct nfacct_data *d; + + if(!st_packets) { + st_packets = rrdset_create_localhost( + "netfilter" + , "nfacct_packets" + , NULL + , "nfacct" + , NULL + , "Netfilter Accounting Packets" + , "packets/s" + , PLUGIN_NFACCT_NAME + , NULL + , NETDATA_CHART_PRIO_NETFILTER_PACKETS + , nfacct_root.update_every + , RRDSET_TYPE_STACKED + ); + } + else rrdset_next(st_packets); + + for(d = nfacct_root.nfacct_metrics; d ; d = d->next) { + if(likely(d->updated)) { + if(unlikely(!d->rd_packets)) + d->rd_packets = rrddim_add( + st_packets + , d->name + , NULL + , 1 + , nfacct_root.update_every + , RRD_ALGORITHM_INCREMENTAL + ); + + rrddim_set_by_pointer( + st_packets + , d->rd_packets + , (collected_number)d->pkts + ); + } + } + + rrdset_done(st_packets); + + // ---------------------------------------------------------------- + + st_bytes = rrdset_find_bytype_localhost("netfilter", "nfacct_bytes"); + if(!st_bytes) { + st_bytes = rrdset_create_localhost( + "netfilter" + , "nfacct_bytes" + , NULL + , "nfacct" + , NULL + , "Netfilter Accounting Bandwidth" + , "kilobytes/s" + , PLUGIN_NFACCT_NAME + , NULL + , NETDATA_CHART_PRIO_NETFILTER_BYTES + , nfacct_root.update_every + , RRDSET_TYPE_STACKED + ); + } + else rrdset_next(st_bytes); + + for(d = nfacct_root.nfacct_metrics; d ; d = d->next) { + if(likely(d->updated)) { + if(unlikely(!d->rd_bytes)) + d->rd_bytes = rrddim_add( + st_bytes + , d->name + , NULL + , 1 + , 1000 * nfacct_root.update_every + , RRD_ALGORITHM_INCREMENTAL + ); + + rrddim_set_by_pointer( + st_bytes + , d->rd_bytes + , (collected_number)d->bytes + ); + } + } + + rrdset_done(st_bytes); +} + +#endif // HAVE_LIBNETFILTER_ACCT +#endif // HAVE_LIBMNL + +// ---------------------------------------------------------------------------- + +static void nfacct_main_cleanup(void *ptr) { + struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; + static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; + info("cleaning up..."); + +#ifdef DO_NFACCT + nfacct_cleanup(); +#endif + +#ifdef DO_NFSTAT + nfstat_cleanup(); +#endif + + static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; +} + +void *nfacct_main(void *ptr) { + netdata_thread_cleanup_push(nfacct_main_cleanup, ptr); + + int update_every = (int)config_get_number("plugin:netfilter", "update every", localhost->rrd_update_every); + if(update_every < localhost->rrd_update_every) + update_every = localhost->rrd_update_every; + +#ifdef DO_NFACCT + int nfacct = !nfacct_init(update_every); +#endif + +#ifdef DO_NFSTAT + int nfstat = !nfstat_init(update_every); +#endif + + // ------------------------------------------------------------------------ + + usec_t step = update_every * USEC_PER_SEC; + heartbeat_t hb; + heartbeat_init(&hb); + for(;;) { + heartbeat_next(&hb, step); + + if(unlikely(netdata_exit)) break; + +#ifdef DO_NFACCT + if(likely(nfacct)) { + nfacct = !nfacct_collect(); + + if(likely(nfacct)) + nfacct_send_metrics(); + } +#endif + +#ifdef DO_NFSTAT + if(likely(nfstat)) { + nfstat = !nfstat_collect(); + + if(likely(nfstat)) + nfstat_send_metrics(); + } +#endif + } + + netdata_thread_cleanup_pop(1); + return NULL; +} + +#endif // INTERNAL_PLUGIN_NFACCT diff --git a/collectors/nfacct.plugin/plugin_nfacct.h b/collectors/nfacct.plugin/plugin_nfacct.h new file mode 100644 index 000000000..4311ccecf --- /dev/null +++ b/collectors/nfacct.plugin/plugin_nfacct.h @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_NFACCT_H +#define NETDATA_NFACCT_H 1 + +#include "../../daemon/common.h" + +#if defined(INTERNAL_PLUGIN_NFACCT) + +#define NETDATA_PLUGIN_HOOK_LINUX_NFACCT \ + { \ + .name = "PLUGIN[nfacct]", \ + .config_section = CONFIG_SECTION_PLUGINS, \ + .config_name = "nfacct", \ + .enabled = 1, \ + .thread = NULL, \ + .init_routine = NULL, \ + .start_routine = nfacct_main \ + }, + +extern void *nfacct_main(void *ptr); + +#else // !defined(INTERNAL_PLUGIN_NFACCT) + +#define NETDATA_PLUGIN_HOOK_LINUX_NFACCT + +#endif // defined(INTERNAL_PLUGIN_NFACCT) + +#endif /* NETDATA_NFACCT_H */ + diff --git a/collectors/node.d.plugin/Makefile.am b/collectors/node.d.plugin/Makefile.am new file mode 100644 index 000000000..4de13cf76 --- /dev/null +++ b/collectors/node.d.plugin/Makefile.am @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +CLEANFILES = \ + node.d.plugin \ + $(NULL) + +include $(top_srcdir)/build/subst.inc +SUFFIXES = .in + +dist_libconfig_DATA = \ + node.d.conf \ + $(NULL) + +dist_plugins_SCRIPTS = \ + node.d.plugin \ + $(NULL) + +dist_noinst_DATA = \ + node.d.plugin.in \ + README.md \ + $(NULL) + +usernodeconfigdir=$(configdir)/node.d +dist_usernodeconfig_DATA = \ + $(top_srcdir)/installer/.keep \ + $(NULL) + +nodeconfigdir=$(libconfigdir)/node.d +dist_nodeconfig_DATA = \ + $(top_srcdir)/installer/.keep \ + $(NULL) + +dist_node_DATA = \ + $(NULL) + +include fronius/Makefile.inc +include named/Makefile.inc +include sma_webbox/Makefile.inc +include snmp/Makefile.inc +include stiebeleltron/Makefile.inc + +nodemodulesdir=$(nodedir)/node_modules +dist_nodemodules_DATA = \ + node_modules/netdata.js \ + node_modules/extend.js \ + node_modules/pixl-xml.js \ + node_modules/net-snmp.js \ + node_modules/asn1-ber.js \ + $(NULL) + +nodemoduleslibberdir=$(nodedir)/node_modules/lib/ber +dist_nodemoduleslibber_DATA = \ + node_modules/lib/ber/index.js \ + node_modules/lib/ber/errors.js \ + node_modules/lib/ber/reader.js \ + node_modules/lib/ber/types.js \ + node_modules/lib/ber/writer.js \ + $(NULL) diff --git a/collectors/node.d.plugin/Makefile.in b/collectors/node.d.plugin/Makefile.in new file mode 100644 index 000000000..4aec01dea --- /dev/null +++ b/collectors/node.d.plugin/Makefile.in @@ -0,0 +1,805 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +DIST_COMMON = $(top_srcdir)/build/subst.inc \ + $(srcdir)/fronius/Makefile.inc $(srcdir)/named/Makefile.inc \ + $(srcdir)/sma_webbox/Makefile.inc $(srcdir)/snmp/Makefile.inc \ + $(srcdir)/stiebeleltron/Makefile.inc $(srcdir)/Makefile.in \ + $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \ + $(dist_libconfig_DATA) $(dist_node_DATA) \ + $(dist_nodeconfig_DATA) $(dist_nodemodules_DATA) \ + $(dist_nodemoduleslibber_DATA) $(dist_noinst_DATA) \ + $(dist_usernodeconfig_DATA) +subdir = collectors/node.d.plugin +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(pluginsdir)" \ + "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(nodedir)" \ + "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(nodemodulesdir)" \ + "$(DESTDIR)$(nodemoduleslibberdir)" \ + "$(DESTDIR)$(usernodeconfigdir)" +SCRIPTS = $(dist_plugins_SCRIPTS) +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_libconfig_DATA) $(dist_node_DATA) \ + $(dist_nodeconfig_DATA) $(dist_nodemodules_DATA) \ + $(dist_nodemoduleslibber_DATA) $(dist_noinst_DATA) \ + $(dist_usernodeconfig_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +CLEANFILES = \ + node.d.plugin \ + $(NULL) + +SUFFIXES = .in +dist_libconfig_DATA = \ + node.d.conf \ + $(NULL) + +dist_plugins_SCRIPTS = \ + node.d.plugin \ + $(NULL) + +# dist_nodeconfig_DATA += fronius/fronius.conf + +# do not install these files, but include them in the distribution +# dist_nodeconfig_DATA += named/named.conf + +# do not install these files, but include them in the distribution +# dist_nodeconfig_DATA += sma_webbox/sma_webbox.conf + +# do not install these files, but include them in the distribution +# dist_nodeconfig_DATA += snmp/snmp.conf + +# do not install these files, but include them in the distribution +# dist_nodeconfig_DATA += stiebeleltron/stiebeleltron.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA = node.d.plugin.in README.md $(NULL) \ + fronius/README.md fronius/Makefile.inc named/README.md \ + named/Makefile.inc sma_webbox/README.md \ + sma_webbox/Makefile.inc snmp/README.md snmp/Makefile.inc \ + stiebeleltron/README.md stiebeleltron/Makefile.inc +usernodeconfigdir = $(configdir)/node.d +dist_usernodeconfig_DATA = \ + $(top_srcdir)/installer/.keep \ + $(NULL) + +nodeconfigdir = $(libconfigdir)/node.d +dist_nodeconfig_DATA = \ + $(top_srcdir)/installer/.keep \ + $(NULL) + + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files +dist_node_DATA = $(NULL) fronius/fronius.node.js named/named.node.js \ + sma_webbox/sma_webbox.node.js snmp/snmp.node.js \ + stiebeleltron/stiebeleltron.node.js +nodemodulesdir = $(nodedir)/node_modules +dist_nodemodules_DATA = \ + node_modules/netdata.js \ + node_modules/extend.js \ + node_modules/pixl-xml.js \ + node_modules/net-snmp.js \ + node_modules/asn1-ber.js \ + $(NULL) + +nodemoduleslibberdir = $(nodedir)/node_modules/lib/ber +dist_nodemoduleslibber_DATA = \ + node_modules/lib/ber/index.js \ + node_modules/lib/ber/errors.js \ + node_modules/lib/ber/reader.js \ + node_modules/lib/ber/types.js \ + node_modules/lib/ber/writer.js \ + $(NULL) + +all: all-am + +.SUFFIXES: +.SUFFIXES: .in +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/fronius/Makefile.inc $(srcdir)/named/Makefile.inc $(srcdir)/sma_webbox/Makefile.inc $(srcdir)/snmp/Makefile.inc $(srcdir)/stiebeleltron/Makefile.inc $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/node.d.plugin/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu collectors/node.d.plugin/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; +$(top_srcdir)/build/subst.inc $(srcdir)/fronius/Makefile.inc $(srcdir)/named/Makefile.inc $(srcdir)/sma_webbox/Makefile.inc $(srcdir)/snmp/Makefile.inc $(srcdir)/stiebeleltron/Makefile.inc: + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS) + @$(NORMAL_INSTALL) + @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ + done | \ + sed -e 'p;s,.*/,,;n' \ + -e 'h;s|.*|.|' \ + -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ + $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ + { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ + if ($$2 == $$4) { files[d] = files[d] " " $$1; \ + if (++n[d] == $(am__install_max)) { \ + print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ + else { print "f", d "/" $$4, $$1 } } \ + END { for (d in files) print "f", d, files[d] }' | \ + while read type dir files; do \ + if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ + test -z "$$files" || { \ + echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \ + $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \ + } \ + ; done + +uninstall-dist_pluginsSCRIPTS: + @$(NORMAL_UNINSTALL) + @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \ + files=`for p in $$list; do echo "$$p"; done | \ + sed -e 's,.*/,,;$(transform)'`; \ + dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir) +install-dist_libconfigDATA: $(dist_libconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \ + done + +uninstall-dist_libconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir) +install-dist_nodeDATA: $(dist_node_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_node_DATA)'; test -n "$(nodedir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(nodedir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(nodedir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodedir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(nodedir)" || exit $$?; \ + done + +uninstall-dist_nodeDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_node_DATA)'; test -n "$(nodedir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(nodedir)'; $(am__uninstall_files_from_dir) +install-dist_nodeconfigDATA: $(dist_nodeconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_nodeconfig_DATA)'; test -n "$(nodeconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(nodeconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(nodeconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodeconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(nodeconfigdir)" || exit $$?; \ + done + +uninstall-dist_nodeconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_nodeconfig_DATA)'; test -n "$(nodeconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(nodeconfigdir)'; $(am__uninstall_files_from_dir) +install-dist_nodemodulesDATA: $(dist_nodemodules_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_nodemodules_DATA)'; test -n "$(nodemodulesdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(nodemodulesdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(nodemodulesdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodemodulesdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(nodemodulesdir)" || exit $$?; \ + done + +uninstall-dist_nodemodulesDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_nodemodules_DATA)'; test -n "$(nodemodulesdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(nodemodulesdir)'; $(am__uninstall_files_from_dir) +install-dist_nodemoduleslibberDATA: $(dist_nodemoduleslibber_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_nodemoduleslibber_DATA)'; test -n "$(nodemoduleslibberdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(nodemoduleslibberdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(nodemoduleslibberdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodemoduleslibberdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(nodemoduleslibberdir)" || exit $$?; \ + done + +uninstall-dist_nodemoduleslibberDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_nodemoduleslibber_DATA)'; test -n "$(nodemoduleslibberdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(nodemoduleslibberdir)'; $(am__uninstall_files_from_dir) +install-dist_usernodeconfigDATA: $(dist_usernodeconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_usernodeconfig_DATA)'; test -n "$(usernodeconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(usernodeconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(usernodeconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(usernodeconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(usernodeconfigdir)" || exit $$?; \ + done + +uninstall-dist_usernodeconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_usernodeconfig_DATA)'; test -n "$(usernodeconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(usernodeconfigdir)'; $(am__uninstall_files_from_dir) +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(SCRIPTS) $(DATA) +installdirs: + for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(nodedir)" "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(nodemodulesdir)" "$(DESTDIR)$(nodemoduleslibberdir)" "$(DESTDIR)$(usernodeconfigdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-dist_libconfigDATA install-dist_nodeDATA \ + install-dist_nodeconfigDATA install-dist_nodemodulesDATA \ + install-dist_nodemoduleslibberDATA install-dist_pluginsSCRIPTS \ + install-dist_usernodeconfigDATA + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-dist_libconfigDATA uninstall-dist_nodeDATA \ + uninstall-dist_nodeconfigDATA uninstall-dist_nodemodulesDATA \ + uninstall-dist_nodemoduleslibberDATA \ + uninstall-dist_pluginsSCRIPTS \ + uninstall-dist_usernodeconfigDATA + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dist_libconfigDATA \ + install-dist_nodeDATA install-dist_nodeconfigDATA \ + install-dist_nodemodulesDATA \ + install-dist_nodemoduleslibberDATA install-dist_pluginsSCRIPTS \ + install-dist_usernodeconfigDATA install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + installcheck installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am \ + uninstall-dist_libconfigDATA uninstall-dist_nodeDATA \ + uninstall-dist_nodeconfigDATA uninstall-dist_nodemodulesDATA \ + uninstall-dist_nodemoduleslibberDATA \ + uninstall-dist_pluginsSCRIPTS \ + uninstall-dist_usernodeconfigDATA + +.in: + if sed \ + -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \ + -e 's#[@]sbindir_POST@#$(sbindir)#g' \ + -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \ + -e 's#[@]pythondir_POST@#$(pythondir)#g' \ + -e 's#[@]configdir_POST@#$(configdir)#g' \ + -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \ + -e 's#[@]cachedir_POST@#$(cachedir)#g' \ + $< > $@.tmp; then \ + mv "$@.tmp" "$@"; \ + else \ + rm -f "$@.tmp"; \ + false; \ + fi + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/collectors/node.d.plugin/README.md b/collectors/node.d.plugin/README.md new file mode 100644 index 000000000..dd977017d --- /dev/null +++ b/collectors/node.d.plugin/README.md @@ -0,0 +1,218 @@ +# node.d.plugin + +`node.d.plugin` is a netdata external plugin. It is an **orchestrator** for data collection modules written in `node.js`. + +1. It runs as an independent process `ps fax` shows it +2. It is started and stopped automatically by netdata +3. It communicates with netdata via a unidirectional pipe (sending data to the netdata daemon) +4. Supports any number of data collection **modules** +5. Allows each **module** to have one or more data collection **jobs** +6. Each **job** is collecting one or more metrics from a single data source + +# Motivation + +Node.js is perfect for asynchronous operations. It is very fast and quite common (actually the whole web is based on it). +Since data collection is not a CPU intensive task, node.js is an ideal solution for it. + +`node.d.plugin` is a netdata plugin that provides an abstraction layer to allow easy and quick development of data +collectors in node.js. It also manages all its data collectors (placed in `/usr/libexec/netdata/node.d`) using a single +instance of node, thus lowering the memory footprint of data collection. + +Of course, there can be independent plugins written in node.js (placed in `/usr/libexec/netdata/plugins`). +These will have to be developed using the guidelines of **[External Plugins](../plugins.d/)**. + +To run `node.js` plugins you need to have `node` installed in your system. + +In some older systems, the package named `node` is not node.js. It is a terminal emulation program called `ax25-node`. +In this case the node.js package may be referred as `nodejs`. Once you install `nodejs`, we suggest to link +`/usr/bin/nodejs` to `/usr/bin/node`, so that typing `node` in your terminal, opens node.js. +For more information check the **[[Installation]]** guide. + +## configuring `node.d.plugin` + +`node.d.plugin` can work even without any configuration. Its default configuration file is +[/etc/netdata/node.d.conf](node.d.conf) (to edit it on your system run `/etc/netdata/edit-config node.d.conf`). + +## configuring `node.d.plugin` modules + +`node.d.plugin` modules accept configuration in `JSON` format. + +Unfortunately, `JSON` files do not accept comments. So, the best way to describe them is to have markdown text files +with instructions. + +`JSON` has a very strict formatting. If you get errors from netdata at `/var/log/netdata/error.log` that a certain +configuration file cannot be loaded, we suggest to verify it at [http://jsonlint.com/](http://jsonlint.com/). + +The files in this directory, provide usable examples for configuring each `node.d.plugin` module. + + +## debugging modules written for node.d.plugin + +To test `node.d.plugin` modules, which are placed in `/usr/libexec/netdata/node.d`, you can run `node.d.plugin` by hand, +like this: + +```sh +# become user netdata +sudo su -s /bin/sh netdata + +# run the plugin in debug mode +/usr/libexec/netdata/plugins.d/node.d.plugin debug 1 X Y Z +``` + +`node.d.plugin` will run in `debug` mode (lots of debug info), with an update frequency of `1` second, evaluating only +the collector scripts `X` (i.e. `/usr/libexec/netdata/node.d/X.node.js`), `Y` and `Z`. +You can define zero or more modules. If none is defined, `node.d.plugin` will evaluate all modules available. + +Keep in mind that if your configs are not in `/etc/netdata`, you should do the following before running `node.d.plugin`: + +```sh +export NETDATA_USER_CONFIG_DIR="/path/to/etc/netdata" +``` + +--- + +## developing `node.d.plugin` modules + +Your data collection module should be split in 3 parts: + + - a function to fetch the data from its source. `node.d.plugin` already can fetch data from web sources, + so you don't need to do anything about it for http. + + - a function to process the fetched/manipulate the data fetched. This function will make a number of calls + to create charts and dimensions and pass the collected values to netdata. + This is the only function you need to write for collecting http JSON data. + + - a `configure` and an `update` function, which take care of your module configuration and data refresh + respectively. You can use the supplied ones. + +Your module will automatically be able to process any number of servers, with different settings (even different +data collection frequencies). You will write just the work needed for one and `node.d.plugin` will do the rest. +For each server you are going to fetch data from, you will have to create a `service` (more later). + +### writing the data collection module + +To provide a module called `mymodule`, you have create the file `/usr/libexec/netdata/node.d/mymodule.node.js`, with this structure: + +```js + +// the processor is needed only +// if you need a custom processor +// other than http +netdata.processors.myprocessor = { + name: 'myprocessor', + + process: function(service, callback) { + + /* do data collection here */ + + callback(data); + } +}; + +// this is the mymodule definition +var mymodule = { + processResponse: function(service, data) { + + /* send information to the netdata server here */ + + }, + + configure: function(config) { + var eligible_services = 0; + + if(typeof(config.servers) === 'undefined' || config.servers.length === 0) { + + /* + * create a service using internal defaults; + * this is used for auto-detecting the settings + * if possible + */ + + netdata.service({ + name: 'a name for this service', + update_every: this.update_every, + module: this, + processor: netdata.processors.myprocessor, + // any other information your processor needs + }).execute(this.processResponse); + + eligible_services++; + } + else { + + /* + * create a service for each server in the + * configuration file + */ + + var len = config.servers.length; + while(len--) { + var server = config.servers[len]; + + netdata.service({ + name: server.name, + update_every: server.update_every, + module: this, + processor: netdata.processors.myprocessor, + // any other information your processor needs + }).execute(this.processResponse); + + eligible_services++; + } + } + + return eligible_services; + }, + + update: function(service, callback) { + + /* + * this function is called when each service + * created by the configure function, needs to + * collect updated values. + * + * You normally will not need to change it. + */ + + service.execute(function(service, data) { + mymodule.processResponse(service, data); + callback(); + }); + }, +}; + +module.exports = mymodule; +``` + +#### configure(config) + +`configure(config)` is called just once, when `node.d.plugin` starts. +The config file will contain the contents of `/etc/netdata/node.d/mymodule.conf`. +This file should have the following format: + +```js +{ + "enable_autodetect": false, + "update_every": 5, + "servers": [ { /* server 1 */ }, { /* server 2 */ } ] +} +``` + +If the config file `/etc/netdata/node.d/mymodule.conf` does not give a `enable_autodetect` or `update_every`, these +will be added by `node.d.plugin`. So you module will always have them. + +The configuration file `/etc/netdata/node.d/mymodule.conf` may contain whatever else is needed for `mymodule`. + +#### processResponse(data) + +`data` may be `null` or whatever the processor specified in the `service` returned. + +The `service` object defines a set of functions to allow you send information to the netdata core about: + +1. Charts and dimension definitions +2. Updated values, from the collected values + +--- + +*FIXME: document an operational node.d.plugin data collector - the best example is the +[snmp collector](snmp/snmp.node.js)* diff --git a/collectors/node.d.plugin/fronius/Makefile.inc b/collectors/node.d.plugin/fronius/Makefile.inc new file mode 100644 index 000000000..da0743a88 --- /dev/null +++ b/collectors/node.d.plugin/fronius/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_node_DATA += fronius/fronius.node.js +# dist_nodeconfig_DATA += fronius/fronius.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += fronius/README.md fronius/Makefile.inc + diff --git a/collectors/node.d.plugin/fronius/README.md b/collectors/node.d.plugin/fronius/README.md new file mode 100644 index 000000000..dd2846990 --- /dev/null +++ b/collectors/node.d.plugin/fronius/README.md @@ -0,0 +1,120 @@ +# fronius + +This module collects metrics from the configured solar power installation from Fronius Symo. + +**Requirements** + * Configuration file `fronius.conf` in the node.d netdata config dir (default: `/etc/netdata/node.d/fronius.conf`) + * Fronius Symo with network access (http) + +It produces per server: + +1. **Power** + * Current power input from the grid (positive values), output to the grid (negative values), in W + * Current power input from the solar panels, in W + * Current power stored in the accumulator (if present), in W (in theory, untested) + +2. **Consumption** + * Local consumption in W + +3. **Autonomy** + * Relative autonomy in %. 100 % autonomy means that the solar panels are delivering more power than it is needed by local consumption. + * Relative self consumption in %. The lower the better + +4. **Energy** + * The energy produced during the current day, in kWh + * The energy produced during the current year, in kWh + +5. **Inverter** + * The current power output from the connected inverters, in W, one dimension per inverter. At least one is always present. + + +### configuration + +Sample: + +```json +{ + "enable_autodetect": false, + "update_every": 5, + "servers": [ + { + "name": "Symo", + "hostname": "symo.ip.or.dns", + "update_every": 5, + "api_path": "/solar_api/v1/GetPowerFlowRealtimeData.fcgi" + } + ] +} +``` + +If no configuration is given, the module will be disabled. Each `update_every` is optional, the default is `5`. + +--- + +[Fronius Symo 8.2](https://www.fronius.com/en/photovoltaics/products/all-products/inverters/fronius-symo/fronius-symo-8-2-3-m) + +The plugin has been tested with a single inverter, namely Fronius Symo 8.2-3-M: + +- Datalogger version: 240.162630 +- Software version: 3.7.4-6 +- Hardware version: 2.4D + +Other products and versions may work, but without any guarantees. + +Example netdata configuration for node.d/fronius.conf. Copy this section to fronius.conf and change name/ip. +The module supports any number of servers. Sometimes there is a lag when collecting every 3 seconds, so 5 should be okay too. You can modify this per server. +```json +{ + "enable_autodetect": false, + "update_every": 5, + "servers": [ + { + "name": "solar", + "hostname": "symo.ip.or.dns", + "update_every": 5, + "api_path": "/solar_api/v1/GetPowerFlowRealtimeData.fcgi" + } + ] +} +``` + +The output of /solar_api/v1/GetPowerFlowRealtimeData.fcgi looks like this: +```json +{ + "Head" : { + "RequestArguments" : {}, + "Status" : { + "Code" : 0, + "Reason" : "", + "UserMessage" : "" + }, + "Timestamp" : "2017-07-05T12:35:12+02:00" + }, + "Body" : { + "Data" : { + "Site" : { + "Mode" : "meter", + "P_Grid" : -6834.549847, + "P_Load" : -1271.450153, + "P_Akku" : null, + "P_PV" : 8106, + "rel_SelfConsumption" : 15.685297, + "rel_Autonomy" : 100, + "E_Day" : 35020, + "E_Year" : 5826076, + "E_Total" : 14788870, + "Meter_Location" : "grid" + }, + "Inverters" : { + "1" : { + "DT" : 123, + "P" : 8106, + "E_Day" : 35020, + "E_Year" : 5826076, + "E_Total" : 14788870 + } + } + } + } +} +``` diff --git a/collectors/node.d.plugin/fronius/fronius.node.js b/collectors/node.d.plugin/fronius/fronius.node.js new file mode 100644 index 000000000..436f3a325 --- /dev/null +++ b/collectors/node.d.plugin/fronius/fronius.node.js @@ -0,0 +1,400 @@ +"use strict"; +// SPDX-License-Identifier: GPL-3.0-or-later + +// This program will connect to one or more Fronius Symo Inverters. +// to get the Solar Power Generated (current, today). + +// example configuration in netdata/conf.d/node.d/fronius.conf.md + +require("url"); +require("http"); +var netdata = require("netdata"); + +netdata.debug("loaded " + __filename + " plugin"); + +var fronius = { + name: "Fronius", + enable_autodetect: false, + update_every: 5, + base_priority: 60000, + charts: {}, + + powerGridId: "p_grid", + powerPvId: "p_pv", + powerAccuId: "p_akku", // not my typo! Using the ID from the AP + consumptionLoadId: "p_load", + autonomyId: "rel_autonomy", + consumptionSelfId: "rel_selfconsumption", + solarConsumptionId: "solar_consumption", + energyTodayId: "e_day", + energyYearId: "e_year", + + createBasicDimension: function (id, name, divisor) { + return { + id: id, // the unique id of the dimension + name: name, // the name of the dimension + algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm + multiplier: 1, // the multiplier + divisor: divisor, // the divisor + hidden: false // is hidden (boolean) + }; + }, + + // Gets the site power chart. Will be created if not existing. + getSitePowerChart: function (service, suffix) { + var id = this.getChartId(service, suffix); + var chart = fronius.charts[id]; + if (fronius.isDefined(chart)) return chart; + + var dim = {}; + dim[fronius.powerGridId] = this.createBasicDimension(fronius.powerGridId, "grid", 1); + dim[fronius.powerPvId] = this.createBasicDimension(fronius.powerPvId, "photovoltaics", 1); + dim[fronius.powerAccuId] = this.createBasicDimension(fronius.powerAccuId, "accumulator", 1); + + chart = { + id: id, // the unique id of the chart + name: "", // the unique name of the chart + title: service.name + " Current Site Power", // the title of the chart + units: "W", // the units of the chart dimensions + family: "power", // the family of the chart + context: "fronius.power", // the context of the chart + type: netdata.chartTypes.area, // the type of the chart + priority: fronius.base_priority + 1, // the priority relative to others in the same family + update_every: service.update_every, // the expected update frequency of the chart + dimensions: dim + }; + chart = service.chart(id, chart); + fronius.charts[id] = chart; + + return chart; + }, + + // Gets the site consumption chart. Will be created if not existing. + getSiteConsumptionChart: function (service, suffix) { + var id = this.getChartId(service, suffix); + var chart = fronius.charts[id]; + if (fronius.isDefined(chart)) return chart; + var dim = {}; + dim[fronius.consumptionLoadId] = this.createBasicDimension(fronius.consumptionLoadId, "load", 1); + + chart = { + id: id, // the unique id of the chart + name: "", // the unique name of the chart + title: service.name + " Current Load", // the title of the chart + units: "W", // the units of the chart dimensions + family: "consumption", // the family of the chart + context: "fronius.consumption", // the context of the chart + type: netdata.chartTypes.area, // the type of the chart + priority: fronius.base_priority + 2, // the priority relative to others in the same family + update_every: service.update_every, // the expected update frequency of the chart + dimensions: dim + }; + chart = service.chart(id, chart); + fronius.charts[id] = chart; + + return chart; + }, + + // Gets the site consumption chart. Will be created if not existing. + getSiteAutonomyChart: function (service, suffix) { + var id = this.getChartId(service, suffix); + var chart = fronius.charts[id]; + if (fronius.isDefined(chart)) return chart; + var dim = {}; + dim[fronius.autonomyId] = this.createBasicDimension(fronius.autonomyId, "autonomy", 1); + dim[fronius.consumptionSelfId] = this.createBasicDimension(fronius.consumptionSelfId, "self_consumption", 1); + dim[fronius.solarConsumptionId] = this.createBasicDimension(fronius.solarConsumptionId, "solar_consumption", 1); + + chart = { + id: id, // the unique id of the chart + name: "", // the unique name of the chart + title: service.name + " Current Autonomy", // the title of the chart + units: "%", // the units of the chart dimensions + family: "autonomy", // the family of the chart + context: "fronius.autonomy", // the context of the chart + type: netdata.chartTypes.area, // the type of the chart + priority: fronius.base_priority + 3, // the priority relative to others in the same family + update_every: service.update_every, // the expected update frequency of the chart + dimensions: dim + }; + chart = service.chart(id, chart); + fronius.charts[id] = chart; + + return chart; + }, + + // Gets the site energy chart for today. Will be created if not existing. + getSiteEnergyTodayChart: function (service, suffix) { + var chartId = this.getChartId(service, suffix); + var chart = fronius.charts[chartId]; + if (fronius.isDefined(chart)) return chart; + var dim = {}; + dim[fronius.energyTodayId] = this.createBasicDimension(fronius.energyTodayId, "today", 1000); + chart = { + id: chartId, // the unique id of the chart + name: "", // the unique name of the chart + title: service.name + " Energy production for today",// the title of the chart + units: "kWh", // the units of the chart dimensions + family: "energy", // the family of the chart + context: "fronius.energy.today", // the context of the chart + type: netdata.chartTypes.area, // the type of the chart + priority: fronius.base_priority + 4, // the priority relative to others in the same family + update_every: service.update_every, // the expected update frequency of the chart + dimensions: dim + }; + chart = service.chart(chartId, chart); + fronius.charts[chartId] = chart; + + return chart; + }, + + // Gets the site energy chart for today. Will be created if not existing. + getSiteEnergyYearChart: function (service, suffix) { + var chartId = this.getChartId(service, suffix); + var chart = fronius.charts[chartId]; + if (fronius.isDefined(chart)) return chart; + var dim = {}; + dim[fronius.energyYearId] = this.createBasicDimension(fronius.energyYearId, "year", 1000); + chart = { + id: chartId, // the unique id of the chart + name: "", // the unique name of the chart + title: service.name + " Energy production for this year",// the title of the chart + units: "kWh", // the units of the chart dimensions + family: "energy", // the family of the chart + context: "fronius.energy.year", // the context of the chart + type: netdata.chartTypes.area, // the type of the chart + priority: fronius.base_priority + 5, // the priority relative to others in the same family + update_every: service.update_every, // the expected update frequency of the chart + dimensions: dim + }; + chart = service.chart(chartId, chart); + fronius.charts[chartId] = chart; + + return chart; + }, + + // Gets the inverter power chart. Will be created if not existing. + // Needs the array of inverters in order to create a chart with all inverters as dimensions + getInverterPowerChart: function (service, suffix, inverters) { + var chartId = this.getChartId(service, suffix); + var chart = fronius.charts[chartId]; + if (fronius.isDefined(chart)) return chart; + + var dim = {}; + for (var key in inverters) { + if (inverters.hasOwnProperty(key)) { + var name = key; + if (!isNaN(key)) name = "inverter_" + key; + dim[key] = this.createBasicDimension("inverter_" + key, name, 1); + } + } + + chart = { + id: chartId, // the unique id of the chart + name: "", // the unique name of the chart + title: service.name + " Current Inverter Output",// the title of the chart + units: "W", // the units of the chart dimensions + family: "inverters", // the family of the chart + context: "fronius.inverter.output", // the context of the chart + type: netdata.chartTypes.stacked, // the type of the chart + priority: fronius.base_priority + 6, // the priority relative to others in the same family + update_every: service.update_every, // the expected update frequency of the chart + dimensions: dim + }; + chart = service.chart(chartId, chart); + fronius.charts[chartId] = chart; + + return chart; + }, + + processResponse: function (service, content) { + var json = fronius.convertToJson(content); + if (json === null) return; + + // add the service + service.commit(); + + var chartDefinitions = fronius.parseCharts(service, json); + var chartCount = chartDefinitions.length; + while (chartCount--) { + var chartObj = chartDefinitions[chartCount]; + service.begin(chartObj.chart); + var dimCount = chartObj.dimensions.length; + while (dimCount--) { + var dim = chartObj.dimensions[dimCount]; + service.set(dim.name, dim.value); + } + service.end(); + } + }, + + parseCharts: function (service, json) { + var site = json.Body.Data.Site; + return [ + this.parsePowerChart(service, site), + this.parseConsumptionChart(service, site), + this.parseAutonomyChart(service, site), + this.parseEnergyTodayChart(service, site), + this.parseEnergyYearChart(service, site), + this.parseInverterChart(service, json.Body.Data.Inverters) + ]; + }, + + parsePowerChart: function (service, site) { + return this.getChart(this.getSitePowerChart(service, "power"), + [ + this.getDimension(this.powerGridId, Math.round(site.P_Grid)), + this.getDimension(this.powerPvId, Math.round(Math.max(site.P_PV, 0))), + this.getDimension(this.powerAccuId, Math.round(site.P_Akku)) + ] + ); + }, + + parseConsumptionChart: function (service, site) { + return this.getChart(this.getSiteConsumptionChart(service, "consumption"), + [this.getDimension(this.consumptionLoadId, Math.round(Math.abs(site.P_Load)))] + ); + }, + + parseAutonomyChart: function (service, site) { + var selfConsumption = site.rel_SelfConsumption; + var solarConsumption = 0; + var load = Math.abs(site.P_Load); + var power = Math.max(site.P_PV, 0); + if (power <= 0) solarConsumption = 0; + else if (load >= power) solarConsumption = 100; + else solarConsumption = 100 / power * load; + return this.getChart(this.getSiteAutonomyChart(service, "autonomy"), + [ + this.getDimension(this.autonomyId, Math.round(site.rel_Autonomy)), + this.getDimension(this.consumptionSelfId, Math.round(selfConsumption === null ? 100 : selfConsumption)), + this.getDimension(this.solarConsumptionId, Math.round(solarConsumption)) + ] + ); + }, + + parseEnergyTodayChart: function (service, site) { + return this.getChart(this.getSiteEnergyTodayChart(service, "energy.today"), + [this.getDimension(this.energyTodayId, Math.round(Math.max(site.E_Day, 0)))] + ); + }, + + parseEnergyYearChart: function (service, site) { + return this.getChart(this.getSiteEnergyYearChart(service, "energy.year"), + [this.getDimension(this.energyYearId, Math.round(Math.max(site.E_Year, 0)))] + ); + }, + + parseInverterChart: function (service, inverters) { + var dimensions = []; + for (var key in inverters) { + if (inverters.hasOwnProperty(key)) { + dimensions.push(this.getDimension(key, Math.round(inverters[key].P))); + } + } + return this.getChart(this.getInverterPowerChart(service, "inverters.output", inverters), dimensions); + }, + + getDimension: function (name, value) { + return { + name: name, + value: value + }; + }, + + getChart: function (chart, dimensions) { + return { + chart: chart, + dimensions: dimensions + }; + }, + + getChartId: function (service, suffix) { + return "fronius_" + service.name + "." + suffix; + }, + + convertToJson: function (httpBody) { + if (httpBody === null) return null; + var json = httpBody; + // can't parse if it's already a json object, + // the check enables easier testing if the httpBody is already valid JSON. + if (typeof httpBody !== "object") { + try { + json = JSON.parse(httpBody); + } catch (error) { + netdata.error("fronius: Got a response, but it is not valid JSON. Ignoring. Error: " + error.message); + return null; + } + } + return this.isResponseValid(json) ? json : null; + }, + + // some basic validation + isResponseValid: function (json) { + if (this.isUndefined(json.Body)) return false; + if (this.isUndefined(json.Body.Data)) return false; + if (this.isUndefined(json.Body.Data.Site)) return false; + return this.isDefined(json.Body.Data.Inverters); + }, + + // module.serviceExecute() + // this function is called only from this module + // its purpose is to prepare the request and call + // netdata.serviceExecute() + serviceExecute: function (name, uri, update_every) { + netdata.debug(this.name + ": " + name + ": url: " + uri + ", update_every: " + update_every); + + var service = netdata.service({ + name: name, + request: netdata.requestFromURL("http://" + uri), + update_every: update_every, + module: this + }); + service.execute(this.processResponse); + }, + + + configure: function (config) { + if (fronius.isUndefined(config.servers)) return 0; + var added = 0; + var len = config.servers.length; + while (len--) { + var server = config.servers[len]; + if (fronius.isUndefined(server.update_every)) server.update_every = this.update_every; + if (fronius.areUndefined([server.name, server.hostname, server.api_path])) continue; + + var url = server.hostname + server.api_path; + this.serviceExecute(server.name, url, server.update_every); + added++; + } + return added; + }, + + // module.update() + // this is called repeatedly to collect data, by calling + // netdata.serviceExecute() + update: function (service, callback) { + service.execute(function (serv, data) { + service.module.processResponse(serv, data); + callback(); + }); + }, + + isUndefined: function (value) { + return typeof value === "undefined"; + }, + + areUndefined: function (valueArray) { + var i = 0; + for (i; i < valueArray.length; i++) { + if (this.isUndefined(valueArray[i])) return true; + } + return false; + }, + + isDefined: function (value) { + return typeof value !== "undefined"; + } +}; + +module.exports = fronius; diff --git a/collectors/node.d.plugin/named/Makefile.inc b/collectors/node.d.plugin/named/Makefile.inc new file mode 100644 index 000000000..95f423012 --- /dev/null +++ b/collectors/node.d.plugin/named/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_node_DATA += named/named.node.js +# dist_nodeconfig_DATA += named/named.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += named/README.md named/Makefile.inc + diff --git a/collectors/node.d.plugin/named/README.md b/collectors/node.d.plugin/named/README.md new file mode 100644 index 000000000..977a5015f --- /dev/null +++ b/collectors/node.d.plugin/named/README.md @@ -0,0 +1,342 @@ +# ISC Bind Statistics + +Using this netdata collector, you can monitor one or more ISC Bind servers. + +## Example netdata charts + +Depending on the number of views your bind has, you may get a large number of charts. +Here this is with just one view: + +![image](https://cloud.githubusercontent.com/assets/2662304/12765473/879b8e04-ca07-11e5-817d-b0651996c42b.png) +![image](https://cloud.githubusercontent.com/assets/2662304/12766538/12b272fa-ca0d-11e5-81e1-6a9f8ff488ff.png) + +## How it works + +The plugin will execute (from within node.js) the equivalent of: + +```sh +curl "http://localhost:8888/json/v1/server" +``` + +Here is a sample of the output this command produces. + +```js +{ + "json-stats-version":"1.0", + "boot-time":"2016-01-31T08:20:48Z", + "config-time":"2016-01-31T09:28:03Z", + "current-time":"2016-02-02T22:22:20Z", + "opcodes":{ + "QUERY":247816, + "IQUERY":0, + "STATUS":0, + "RESERVED3":0, + "NOTIFY":0, + "UPDATE":3813, + "RESERVED6":0, + "RESERVED7":0, + "RESERVED8":0, + "RESERVED9":0, + "RESERVED10":0, + "RESERVED11":0, + "RESERVED12":0, + "RESERVED13":0, + "RESERVED14":0, + "RESERVED15":0 + }, + "qtypes":{ + "A":89519, + "NS":863, + "CNAME":1, + "SOA":1, + "PTR":116779, + "MX":276, + "TXT":198, + "AAAA":39324, + "SRV":850, + "ANY":5 + }, + "nsstats":{ + "Requestv4":251630, + "ReqEdns0":1255, + "ReqTSIG":3813, + "ReqTCP":57, + "AuthQryRej":1455, + "RecQryRej":122, + "Response":245918, + "TruncatedResp":44, + "RespEDNS0":1255, + "RespTSIG":3813, + "QrySuccess":205159, + "QryAuthAns":119495, + "QryNoauthAns":120770, + "QryNxrrset":32711, + "QrySERVFAIL":262, + "QryNXDOMAIN":2395, + "QryRecursion":40885, + "QryDuplicate":5712, + "QryFailure":1577, + "UpdateDone":2514, + "UpdateFail":1299, + "UpdateBadPrereq":1276, + "QryUDP":246194, + "QryTCP":45, + "OtherOpt":101 + }, + "views":{ + "local":{ + "resolver":{ + "stats":{ + "Queryv4":74577, + "Responsev4":67032, + "NXDOMAIN":601, + "SERVFAIL":5, + "FORMERR":7, + "EDNS0Fail":7, + "Truncated":3071, + "Lame":4, + "Retry":11826, + "QueryTimeout":1838, + "GlueFetchv4":6864, + "GlueFetchv4Fail":30, + "QryRTT10":112, + "QryRTT100":42900, + "QryRTT500":23275, + "QryRTT800":534, + "QryRTT1600":97, + "QryRTT1600+":20, + "BucketSize":31, + "REFUSED":13 + }, + "qtypes":{ + "A":64931, + "NS":870, + "CNAME":185, + "PTR":5, + "MX":49, + "TXT":149, + "AAAA":7972, + "SRV":416 + }, + "cache":{ + "A":40356, + "NS":8032, + "CNAME":14477, + "PTR":2, + "MX":21, + "TXT":32, + "AAAA":3301, + "SRV":94, + "DS":237, + "RRSIG":2301, + "NSEC":126, + "!A":52, + "!NS":4, + "!TXT":1, + "!AAAA":3797, + "!SRV":9, + "NXDOMAIN":590 + }, + "cachestats":{ + "CacheHits":1085188, + "CacheMisses":109, + "QueryHits":464755, + "QueryMisses":55624, + "DeleteLRU":0, + "DeleteTTL":42615, + "CacheNodes":5188, + "CacheBuckets":2079, + "TreeMemTotal":2326026, + "TreeMemInUse":1508075, + "HeapMemMax":132096, + "HeapMemTotal":393216, + "HeapMemInUse":132096 + }, + "adb":{ + "nentries":1021, + "entriescnt":3157, + "nnames":1021, + "namescnt":3022 + } + } + }, + "public":{ + "resolver":{ + "stats":{ + "BucketSize":31 + }, + "qtypes":{ + }, + "cache":{ + }, + "cachestats":{ + "CacheHits":0, + "CacheMisses":0, + "QueryHits":0, + "QueryMisses":0, + "DeleteLRU":0, + "DeleteTTL":0, + "CacheNodes":0, + "CacheBuckets":64, + "TreeMemTotal":287392, + "TreeMemInUse":29608, + "HeapMemMax":1024, + "HeapMemTotal":262144, + "HeapMemInUse":1024 + }, + "adb":{ + "nentries":1021, + "nnames":1021 + } + } + }, + "_bind":{ + "resolver":{ + "stats":{ + "BucketSize":31 + }, + "qtypes":{ + }, + "cache":{ + }, + "cachestats":{ + "CacheHits":0, + "CacheMisses":0, + "QueryHits":0, + "QueryMisses":0, + "DeleteLRU":0, + "DeleteTTL":0, + "CacheNodes":0, + "CacheBuckets":64, + "TreeMemTotal":287392, + "TreeMemInUse":29608, + "HeapMemMax":1024, + "HeapMemTotal":262144, + "HeapMemInUse":1024 + }, + "adb":{ + "nentries":1021, + "nnames":1021 + } + } + } + } +} +``` + + +From this output it collects: + +- Global Received Requests by IP version (IPv4, IPv6) +- Global Successful Queries +- Current Recursive Clients +- Global Queries by IP Protocol (TCP, UDP) +- Global Queries Analysis +- Global Received Updates +- Global Query Failures +- Global Query Failures Analysis +- Other Global Server Statistics +- Global Incoming Requests by OpCode +- Global Incoming Requests by Query Type +- Global Socket Statistics (will only work if the url is `http://127.0.0.1:8888/json/v1`, i.e. without `/server`, but keep in mind this produces a very long output and probably will account for 0.5% CPU overhead alone, per bind server added) +- Per View Statistics (the following set will be added for each bind view): + - View, Resolver Active Queries + - View, Resolver Statistics + - View, Resolver Round Trip Timings + - View, Requests by Query Type + +## Configuration + +The collector (optionally) reads a configuration file named `/etc/netdata/node.d/named.conf`, with the following contents: + +```js +{ + "enable_autodetect": true, + "update_every": 5, + "servers": [ + { + "name": "bind1", + "url": "http://127.0.0.1:8888/json/v1/server", + "update_every": 1 + }, + { + "name": "bind2", + "url": "http://10.1.2.3:8888/json/v1/server", + "update_every": 2 + } + ] +} +``` + +You can add any number of bind servers. + +If the configuration file is missing, or the key `enable_autodetect` is `true`, the collector will also attempt to fetch `http://localhost:8888/json/v1/server` which, if successful will be added too. + +### XML instead of JSON, from bind + +The collector can also accept bind URLs that return XML output. This might required if you cannot have bind 9.10+ with JSON but you have an version of bind that supports XML statistics v3. Check [this](https://www.isc.org/blogs/bind-9-10-statistics-troubleshooting-and-zone-configuration/) for versions supported. + +In such cases, use a URL like this: + +```sh +curl "http://localhost:8888/xml/v3/server" +``` + +Only `xml` and `v3` has been tested. + +Keep in mind though, that XML parsing is done using javascript code, which requires a triple conversion: + +1. from XML to JSON using a javascript XML parser (**CPU intensive**), +2. which is then transformed to emulate the output of the JSON output of bind (**CPU intensive** - and yes the converted JSON from XML is different to the native JSON - even bind produces different names for various attributes), +3. which is then processed to generate the data for the charts (this will happen even if bind is producing JSON). + +In general, expect XML parsing to be 2 to 3 times more CPU intensive than JSON. + +**So, if you can use the JSON output of bind, prefer it over XML**. Keep also in mind that even bind will use more CPU when generating XML instead of JSON. + +The XML interface of bind is not autodetected. +You will have to provide the config file `/etc/netdata/node.d/named.conf`, like this: + +```js +{ + "enable_autodetect": false, + "update_every": 1, + "servers": [ + { + "name": "local", + "url": "http://localhost:8888/xml/v3/server", + "update_every": 1 + } + ] +} +``` + +Of course, you can monitor more than one bind servers. Each one can be configured with either JSON or XML output. + +## Auto-detection + +Auto-detection is controlled by `enable_autodetect` in the config file. The default is enabled, so that if the collector can connect to `http://localhost:8888/json/v1/server` to receive bind statistics, it will automatically enable it. + +## Bind (named) configuration + +To use this plugin, you have to have bind v9.10+ properly compiled to provide statistics in `JSON` format. + +For more information on how to get your bind installation ready, please refer to the [bind statistics channel developer comments](http://jpmens.net/2013/03/18/json-in-bind-9-s-statistics-server/) and to [bind documentation](https://ftp.isc.org/isc/bind/9.10.3/doc/arm/Bv9ARM.ch06.html#statistics) or [bind Knowledge Base article AA-01123](https://kb.isc.org/article/AA-01123/0). + +Normally, you will need something like this in your `named.conf`: + +``` +statistics-channels { + inet 127.0.0.1 port 8888 allow { 127.0.0.1; }; + inet ::1 port 8888 allow { ::1; }; +}; +``` + +(use the IPv4 or IPv6 line depending on what you are using, you can also use both) + +Verify it works by running the following command (the collector is written in node.js and will query your bind server directly, but if this command works, the collector should be able to work too): + +```sh +curl "http://localhost:8888/json/v1/server" +``` + diff --git a/collectors/node.d.plugin/named/named.node.js b/collectors/node.d.plugin/named/named.node.js new file mode 100644 index 000000000..d13c608cb --- /dev/null +++ b/collectors/node.d.plugin/named/named.node.js @@ -0,0 +1,610 @@ +'use strict'; +// SPDX-License-Identifier: GPL-3.0-or-later + +// collect statistics from bind (named) v9.10+ +// +// bind statistics documentation at: +// http://jpmens.net/2013/03/18/json-in-bind-9-s-statistics-server/ +// https://ftp.isc.org/isc/bind/9.10.3/doc/arm/Bv9ARM.ch06.html#statistics + +// example configuration in /etc/netdata/node.d/named.conf +// the module supports auto-detection if bind is running at localhost + +/* +{ + "enable_autodetect": true, + "update_every": 5, + "servers": [ + { + "name": "bind1", + "url": "http://127.0.0.1:8888/json/v1/server", + "update_every": 1 + }, + { + "name": "bind2", + "url": "http://10.0.0.1:8888/xml/v3/server", + "update_every": 2 + } + ] +} +*/ + +// the following is the bind named.conf configuration required + +/* +statistics-channels { + inet 127.0.0.1 port 8888 allow { 127.0.0.1; }; +}; +*/ + +require('url'); +require('http'); +var XML = require('pixl-xml'); +var netdata = require('netdata'); + +if(netdata.options.DEBUG === true) netdata.debug('loaded', __filename, 'plugin'); + +var named = { + name: __filename, + enable_autodetect: true, + update_every: 1, + base_priority: 60000, + charts: {}, + + chartFromMembersCreate: function(service, obj, id, title_suffix, units, family, context, type, priority, algorithm, multiplier, divisor) { + var chart = { + id: id, // the unique id of the chart + name: '', // the unique name of the chart + title: service.name + ' ' + title_suffix, // the title of the chart + units: units, // the units of the chart dimensions + family: family, // the family of the chart + context: context, // the context of the chart + type: type, // the type of the chart + priority: priority, // the priority relative to others in the same family + update_every: service.update_every, // the expected update frequency of the chart + dimensions: {} + }; + + var found = 0; + var dims = Object.keys(obj); + var len = dims.length; + for(var i = 0; i < len ;i++) { + var x = dims[i]; + + if(typeof(obj[x]) !== 'undefined' && obj[x] !== 0) { + found++; + chart.dimensions[x] = { + id: x, // the unique id of the dimension + name: x, // the name of the dimension + algorithm: algorithm, // the id of the netdata algorithm + multiplier: multiplier, // the multiplier + divisor: divisor, // the divisor + hidden: false // is hidden (boolean) + }; + } + } + + if(!found) + return null; + + chart = service.chart(id, chart); + this.charts[id] = chart; + return chart; + }, + + chartFromMembers: function(service, obj, id_suffix, title_suffix, units, family, context, type, priority, algorithm, multiplier, divisor) { + var id = 'named_' + service.name + '.' + id_suffix; + var chart = this.charts[id]; + var dims, len, x, i; + + if(typeof chart === 'undefined') { + chart = this.chartFromMembersCreate(service, obj, id, title_suffix, units, family, context, type, priority, algorithm, multiplier, divisor); + if(chart === null) return false; + } + else { + // check if we need to re-generate the chart + dims = Object.keys(obj); + len = dims.length; + for(i = 0; i < len ;i++) { + x = dims[i]; + if(typeof(chart.dimensions[x]) === 'undefined') { + chart = this.chartFromMembersCreate(service, obj, id, title_suffix, units, family, context, type, priority, algorithm, multiplier, divisor); + if(chart === null) return false; + break; + } + } + } + + service.begin(chart); + + var found = 0; + dims = Object.keys(obj); + len = dims.length; + for(i = 0; i < len ;i++) { + x = dims[i]; + if(typeof(chart.dimensions[x]) !== 'undefined') { + found++; + service.set(x, obj[x]); + } + } + + service.end(); + + return (found > 0); + }, + + // an index to map values to different charts + lookups: { + nsstats: {}, + resolver_stats: {}, + numfetch: {} + }, + + // transform the XML response of bind + // to the JSON response of bind + xml2js: function(service, data_xml) { + var d = XML.parse(data_xml); + if(d === null) return null; + + var a, aa, alen, alen2; + + var data = {}; + var len = d.server.counters.length; + while(len--) { + a = d.server.counters[len]; + if(typeof a.counter === 'undefined') continue; + if(a.type === 'opcode') a.type = 'opcodes'; + else if(a.type === 'qtype') a.type = 'qtypes'; + else if(a.type === 'nsstat') a.type = 'nsstats'; + aa = data[a.type] = {}; + alen = 0; + alen2 = a.counter.length; + while(alen < alen2) { + aa[a.counter[alen].name] = parseInt(a.counter[alen]._Data, 10); + alen++; + } + } + + data.views = {}; + var vlen = d.views.view.length; + while(vlen--) { + var vname = d.views.view[vlen].name; + data.views[vname] = { resolver: {} }; + len = d.views.view[vlen].counters.length; + while(len--) { + a = d.views.view[vlen].counters[len]; + if(typeof a.counter === 'undefined') continue; + if(a.type === 'resstats') a.type = 'stats'; + else if(a.type === 'resqtype') a.type = 'qtypes'; + else if(a.type === 'adbstat') a.type = 'adb'; + aa = data.views[vname].resolver[a.type] = {}; + alen = 0; + alen2 = a.counter.length; + while(alen < alen2) { + aa[a.counter[alen].name] = parseInt(a.counter[alen]._Data, 10); + alen++; + } + } + } + + return data; + }, + + processResponse: function(service, data) { + if(data !== null) { + var r, x, look, id, chart, keys, len; + + // parse XML or JSON + // pepending on the URL given + if(service.request.path.match(/^\/xml/) !== null) + r = named.xml2js(service, data); + else + r = JSON.parse(data); + + if(typeof r === 'undefined' || r === null) { + service.error("Cannot parse these data: " + data.toString()); + return; + } + + if(service.added !== true) + service.commit(); + + if(typeof r.nsstats !== 'undefined') { + // we split the nsstats object to several others + var global_requests = {}, global_requests_enable = false; + var global_failures = {}, global_failures_enable = false; + var global_failures_detail = {}, global_failures_detail_enable = false; + var global_updates = {}, global_updates_enable = false; + var protocol_queries = {}, protocol_queries_enable = false; + var global_queries = {}, global_queries_enable = false; + var global_queries_success = {}, global_queries_success_enable = false; + var default_enable = false; + var RecursClients = 0; + + // RecursClients is an absolute value + if(typeof r.nsstats['RecursClients'] !== 'undefined') { + RecursClients = r.nsstats['RecursClients']; + delete r.nsstats['RecursClients']; + } + + keys = Object.keys(r.nsstats); + len = keys.length; + while(len--) { + x = keys[len]; + + // we maintain an index of the values found + // mapping them to objects splitted + + look = named.lookups.nsstats[x]; + if(typeof look === 'undefined') { + // a new value, not found in the index + // index it: + if(x === 'Requestv4') { + named.lookups.nsstats[x] = { + name: 'IPv4', + type: 'global_requests' + }; + } + else if(x === 'Requestv6') { + named.lookups.nsstats[x] = { + name: 'IPv6', + type: 'global_requests' + }; + } + else if(x === 'QryFailure') { + named.lookups.nsstats[x] = { + name: 'failures', + type: 'global_failures' + }; + } + else if(x === 'QryUDP') { + named.lookups.nsstats[x] = { + name: 'UDP', + type: 'protocol_queries' + }; + } + else if(x === 'QryTCP') { + named.lookups.nsstats[x] = { + name: 'TCP', + type: 'protocol_queries' + }; + } + else if(x === 'QrySuccess') { + named.lookups.nsstats[x] = { + name: 'queries', + type: 'global_queries_success' + }; + } + else if(x.match(/QryRej$/) !== null) { + named.lookups.nsstats[x] = { + name: x, + type: 'global_failures_detail' + }; + } + else if(x.match(/^Qry/) !== null) { + named.lookups.nsstats[x] = { + name: x, + type: 'global_queries' + }; + } + else if(x.match(/^Update/) !== null) { + named.lookups.nsstats[x] = { + name: x, + type: 'global_updates' + }; + } + else { + // values not mapped, will remain + // in the default map + named.lookups.nsstats[x] = { + name: x, + type: 'default' + }; + } + + look = named.lookups.nsstats[x]; + // netdata.error('lookup nsstats value: ' + x + ' >>> ' + named.lookups.nsstats[x].type); + } + + switch(look.type) { + case 'global_requests': global_requests[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_requests_enable = true; break; + case 'global_queries': global_queries[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_queries_enable = true; break; + case 'global_queries_success': global_queries_success[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_queries_success_enable = true; break; + case 'global_updates': global_updates[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_updates_enable = true; break; + case 'protocol_queries': protocol_queries[look.name] = r.nsstats[x]; delete r.nsstats[x]; protocol_queries_enable = true; break; + case 'global_failures': global_failures[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_failures_enable = true; break; + case 'global_failures_detail': global_failures_detail[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_failures_detail_enable = true; break; + default: default_enable = true; break; + } + } + + if(global_requests_enable === true) + service.module.chartFromMembers(service, global_requests, 'received_requests', 'Bind, Global Received Requests by IP version', 'requests/s', 'requests', 'named.requests', netdata.chartTypes.stacked, named.base_priority + 1, netdata.chartAlgorithms.incremental, 1, 1); + + if(global_queries_success_enable === true) + service.module.chartFromMembers(service, global_queries_success, 'global_queries_success', 'Bind, Global Successful Queries', 'queries/s', 'queries', 'named.queries_succcess', netdata.chartTypes.line, named.base_priority + 2, netdata.chartAlgorithms.incremental, 1, 1); + + if(protocol_queries_enable === true) + service.module.chartFromMembers(service, protocol_queries, 'protocols_queries', 'Bind, Global Queries by IP Protocol', 'queries/s', 'queries', 'named.protocol_queries', netdata.chartTypes.stacked, named.base_priority + 3, netdata.chartAlgorithms.incremental, 1, 1); + + if(global_queries_enable === true) + service.module.chartFromMembers(service, global_queries, 'global_queries', 'Bind, Global Queries Analysis', 'queries/s', 'queries', 'named.global_queries', netdata.chartTypes.stacked, named.base_priority + 4, netdata.chartAlgorithms.incremental, 1, 1); + + if(global_updates_enable === true) + service.module.chartFromMembers(service, global_updates, 'received_updates', 'Bind, Global Received Updates', 'updates/s', 'updates', 'named.global_updates', netdata.chartTypes.stacked, named.base_priority + 5, netdata.chartAlgorithms.incremental, 1, 1); + + if(global_failures_enable === true) + service.module.chartFromMembers(service, global_failures, 'query_failures', 'Bind, Global Query Failures', 'failures/s', 'failures', 'named.global_failures', netdata.chartTypes.line, named.base_priority + 6, netdata.chartAlgorithms.incremental, 1, 1); + + if(global_failures_detail_enable === true) + service.module.chartFromMembers(service, global_failures_detail, 'query_failures_detail', 'Bind, Global Query Failures Analysis', 'failures/s', 'failures', 'named.global_failures_detail', netdata.chartTypes.stacked, named.base_priority + 7, netdata.chartAlgorithms.incremental, 1, 1); + + if(default_enable === true) + service.module.chartFromMembers(service, r.nsstats, 'nsstats', 'Bind, Other Global Server Statistics', 'operations/s', 'other', 'named.nsstats', netdata.chartTypes.line, named.base_priority + 8, netdata.chartAlgorithms.incremental, 1, 1); + + // RecursClients chart + id = 'named_' + service.name + '.recursive_clients'; + chart = named.charts[id]; + + if(typeof chart === 'undefined') { + chart = { + id: id, // the unique id of the chart + name: '', // the unique name of the chart + title: service.name + ' Bind, Current Recursive Clients', // the title of the chart + units: 'clients', // the units of the chart dimensions + family: 'clients', // the family of the chart + context: 'named.recursive_clients', // the context of the chart + type: netdata.chartTypes.line, // the type of the chart + priority: named.base_priority + 1, // the priority relative to others in the same family + update_every: service.update_every, // the expected update frequency of the chart + dimensions: { + 'clients': { + id: 'clients', // the unique id of the dimension + name: '', // the name of the dimension + algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm + multiplier: 1, // the multiplier + divisor: 1, // the divisor + hidden: false // is hidden (boolean) + } + } + }; + + chart = service.chart(id, chart); + named.charts[id] = chart; + } + + service.begin(chart); + service.set('clients', RecursClients); + service.end(); + } + + if(typeof r.opcodes !== 'undefined') + service.module.chartFromMembers(service, r.opcodes, 'in_opcodes', 'Bind, Global Incoming Requests by OpCode', 'requests/s', 'requests', 'named.in_opcodes', netdata.chartTypes.stacked, named.base_priority + 9, netdata.chartAlgorithms.incremental, 1, 1); + + if(typeof r.qtypes !== 'undefined') + service.module.chartFromMembers(service, r.qtypes, 'in_qtypes', 'Bind, Global Incoming Requests by Query Type', 'requests/s', 'requests', 'named.in_qtypes', netdata.chartTypes.stacked, named.base_priority + 10, netdata.chartAlgorithms.incremental, 1, 1); + + if(typeof r.sockstats !== 'undefined') + service.module.chartFromMembers(service, r.sockstats, 'in_sockstats', 'Bind, Global Socket Statistics', 'operations/s', 'sockets', 'named.in_sockstats', netdata.chartTypes.line, named.base_priority + 11, netdata.chartAlgorithms.incremental, 1, 1); + + if(typeof r.views !== 'undefined') { + keys = Object.keys(r.views); + len = keys.length; + while(len--) { + x = keys[len]; + var resolver = r.views[x].resolver; + + if(typeof resolver !== 'undefined') { + if(typeof resolver.stats !== 'undefined') { + var NumFetch = 0; + var key = service.name + '.' + x; + var rtt = {}, rtt_enable = false; + default_enable = false; + + // NumFetch is an absolute value + if(typeof resolver.stats['NumFetch'] !== 'undefined') { + named.lookups.numfetch[key] = true; + NumFetch = resolver.stats['NumFetch']; + delete resolver.stats['NumFetch']; + } + if(typeof resolver.stats['BucketSize'] !== 'undefined') { + delete resolver.stats['BucketSize']; + } + + // split the QryRTT* from the main chart + var ykeys = Object.keys(resolver.stats); + var ylen = ykeys.length; + while(ylen--) { + var y = ykeys[ylen]; + + // we maintain an index of the values found + // mapping them to objects splitted + + look = named.lookups.resolver_stats[y]; + if(typeof look === 'undefined') { + if(y.match(/^QryRTT/) !== null) { + named.lookups.resolver_stats[y] = { + name: y, + type: 'rtt' + }; + } + else { + named.lookups.resolver_stats[y] = { + name: y, + type: 'default' + }; + } + + look = named.lookups.resolver_stats[y]; + // netdata.error('lookup resolver stats value: ' + y + ' >>> ' + look.type); + } + + switch(look.type) { + case 'rtt': rtt[look.name] = resolver.stats[y]; delete resolver.stats[y]; rtt_enable = true; break; + default: default_enable = true; break; + } + } + + if(rtt_enable) + service.module.chartFromMembers(service, rtt, 'view_resolver_rtt_' + x, 'Bind, ' + x + ' View, Resolver Round Trip Timings', 'queries/s', 'view_' + x, 'named.resolver_rtt', netdata.chartTypes.stacked, named.base_priority + 12, netdata.chartAlgorithms.incremental, 1, 1); + + if(default_enable) + service.module.chartFromMembers(service, resolver.stats, 'view_resolver_stats_' + x, 'Bind, ' + x + ' View, Resolver Statistics', 'operations/s', 'view_' + x, 'named.resolver_stats', netdata.chartTypes.line, named.base_priority + 13, netdata.chartAlgorithms.incremental, 1, 1); + + // NumFetch chart + if(typeof named.lookups.numfetch[key] !== 'undefined') { + id = 'named_' + service.name + '.view_resolver_numfetch_' + x; + chart = named.charts[id]; + + if(typeof chart === 'undefined') { + chart = { + id: id, // the unique id of the chart + name: '', // the unique name of the chart + title: service.name + ' Bind, ' + x + ' View, Resolver Active Queries', // the title of the chart + units: 'queries', // the units of the chart dimensions + family: 'view_' + x, // the family of the chart + context: 'named.resolver_active_queries', // the context of the chart + type: netdata.chartTypes.line, // the type of the chart + priority: named.base_priority + 1001, // the priority relative to others in the same family + update_every: service.update_every, // the expected update frequency of the chart + dimensions: { + 'queries': { + id: 'queries', // the unique id of the dimension + name: '', // the name of the dimension + algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm + multiplier: 1, // the multiplier + divisor: 1, // the divisor + hidden: false // is hidden (boolean) + } + } + }; + + chart = service.chart(id, chart); + named.charts[id] = chart; + } + + service.begin(chart); + service.set('queries', NumFetch); + service.end(); + } + } + } + + if(typeof resolver.qtypes !== 'undefined') + service.module.chartFromMembers(service, resolver.qtypes, 'view_resolver_qtypes_' + x, 'Bind, ' + x + ' View, Requests by Query Type', 'requests/s', 'view_' + x, 'named.resolver_qtypes', netdata.chartTypes.stacked, named.base_priority + 14, netdata.chartAlgorithms.incremental, 1, 1); + + //if(typeof resolver.cache !== 'undefined') + // service.module.chartFromMembers(service, resolver.cache, 'view_resolver_cache_' + x, 'Bind, ' + x + ' View, Cache Entries', 'entries', 'view_' + x, 'named.resolver_cache', netdata.chartTypes.stacked, named.base_priority + 15, netdata.chartAlgorithms.absolute, 1, 1); + + if(typeof resolver.cachestats['CacheHits'] !== 'undefined' && resolver.cachestats['CacheHits'] > 0) { + id = 'named_' + service.name + '.view_resolver_cachehits_' + x; + chart = named.charts[id]; + + if(typeof chart === 'undefined') { + chart = { + id: id, // the unique id of the chart + name: '', // the unique name of the chart + title: service.name + ' Bind, ' + x + ' View, Resolver Cache Hits', // the title of the chart + units: 'operations/s', // the units of the chart dimensions + family: 'view_' + x, // the family of the chart + context: 'named.resolver_cache_hits', // the context of the chart + type: netdata.chartTypes.area, // the type of the chart + priority: named.base_priority + 1100, // the priority relative to others in the same family + update_every: service.update_every, // the expected update frequency of the chart + dimensions: { + 'CacheHits': { + id: 'CacheHits', // the unique id of the dimension + name: 'hits', // the name of the dimension + algorithm: netdata.chartAlgorithms.incremental,// the id of the netdata algorithm + multiplier: 1, // the multiplier + divisor: 1, // the divisor + hidden: false // is hidden (boolean) + }, + 'CacheMisses': { + id: 'CacheMisses', // the unique id of the dimension + name: 'misses', // the name of the dimension + algorithm: netdata.chartAlgorithms.incremental,// the id of the netdata algorithm + multiplier: -1, // the multiplier + divisor: 1, // the divisor + hidden: false // is hidden (boolean) + } + } + }; + + chart = service.chart(id, chart); + named.charts[id] = chart; + } + + service.begin(chart); + service.set('CacheHits', resolver.cachestats['CacheHits']); + service.set('CacheMisses', resolver.cachestats['CacheMisses']); + service.end(); + } + + // this is wrong, it contains many types of info: + // 1. CacheHits, CacheMisses - incremental (added above) + // 2. QueryHits, QueryMisses - incremental + // 3. DeleteLRU, DeleteTTL - incremental + // 4. CacheNodes, CacheBuckets - absolute + // 5. TreeMemTotal, TreeMemInUse - absolute + // 6. HeapMemMax, HeapMemTotal, HeapMemInUse - absolute + //if(typeof resolver.cachestats !== 'undefined') + // service.module.chartFromMembers(service, resolver.cachestats, 'view_resolver_cachestats_' + x, 'Bind, ' + x + ' View, Cache Statistics', 'requests/s', 'view_' + x, 'named.resolver_cache_stats', netdata.chartTypes.line, named.base_priority + 1001, netdata.chartAlgorithms.incremental, 1, 1); + + //if(typeof resolver.adb !== 'undefined') + // service.module.chartFromMembers(service, resolver.adb, 'view_resolver_adb_' + x, 'Bind, ' + x + ' View, ADB Statistics', 'entries', 'view_' + x, 'named.resolver_adb', netdata.chartTypes.line, named.base_priority + 1002, netdata.chartAlgorithms.absolute, 1, 1); + } + } + } + }, + + // module.serviceExecute() + // this function is called only from this module + // its purpose is to prepare the request and call + // netdata.serviceExecute() + serviceExecute: function(name, a_url, update_every) { + if(netdata.options.DEBUG === true) netdata.debug(this.name + ': ' + name + ': url: ' + a_url + ', update_every: ' + update_every); + var service = netdata.service({ + name: name, + request: netdata.requestFromURL(a_url), + update_every: update_every, + module: this + }); + + service.execute(this.processResponse); + }, + + configure: function(config) { + var added = 0; + + if(this.enable_autodetect === true) { + this.serviceExecute('local', 'http://localhost:8888/json/v1/server', this.update_every); + added++; + } + + if(typeof(config.servers) !== 'undefined') { + var len = config.servers.length; + while(len--) { + if(typeof config.servers[len].update_every === 'undefined') + config.servers[len].update_every = this.update_every; + + this.serviceExecute(config.servers[len].name, config.servers[len].url, config.servers[len].update_every); + added++; + } + } + + return added; + }, + + // module.update() + // this is called repeatidly to collect data, by calling + // netdata.serviceExecute() + update: function(service, callback) { + service.execute(function(serv, data) { + service.module.processResponse(serv, data); + callback(); + }); + } +}; + +module.exports = named; diff --git a/collectors/node.d.plugin/node.d.conf b/collectors/node.d.plugin/node.d.conf new file mode 100644 index 000000000..95aec99ce --- /dev/null +++ b/collectors/node.d.plugin/node.d.conf @@ -0,0 +1,39 @@ +{ + "___help_1": "Default options for node.d.plugin - this is a JSON file.", + "___help_2": "Use http://jsonlint.com/ to verify it is valid JSON.", + "___help_3": "------------------------------------------------------------", + + "___help_update_every": "Minimum data collection frequency for all node.d/*.node.js modules. Set it to 0 to inherit it from netdata.", + "update_every": 0, + + "___help_modules_enable_autodetect": "Enable/disable auto-detection for node.d/*.node.js modules that support it.", + "modules_enable_autodetect": true, + + "___help_modules_enable_all": "Enable all node.d/*.node.js modules by default.", + "modules_enable_all": true, + + "___help_modules": "Enable/disable the following modules. Give only XXX for node.d/XXX.node.js", + "modules": { + "named": { + "enabled": true + }, + "sma_webbox": { + "enabled": true + }, + "snmp": { + "enabled": true + } + }, + + "___help_paths": "Paths that control the operation of node.d.plugin", + "paths": { + "___help_plugins": "The full path to the modules javascript node.d/ directory", + "plugins": null, + + "___help_config": "The full path to the modules configs node.d/ directory", + "config": null, + + "___help_modules": "Array of paths to add to node.js when searching for node_modules", + "modules": [] + } +} diff --git a/collectors/node.d.plugin/node.d.plugin b/collectors/node.d.plugin/node.d.plugin new file mode 100644 index 000000000..2570220c2 --- /dev/null +++ b/collectors/node.d.plugin/node.d.plugin @@ -0,0 +1,303 @@ +#!/usr/bin/env bash +':' //; exec "$(command -v nodejs || command -v node || echo "ERROR node IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@" + +// shebang hack from: +// http://unix.stackexchange.com/questions/65235/universal-node-js-shebang + +// Initially this is run as a shell script. +// Then, the second line, finds nodejs or node or js in the system path +// and executes it with the shell parameters. + +// netdata +// real-time performance and health monitoring, done right! +// (C) 2017 Costa Tsaousis +// SPDX-License-Identifier: GPL-3.0-or-later + +// -------------------------------------------------------------------------------------------------------------------- + +'use strict'; + +// -------------------------------------------------------------------------------------------------------------------- +// get NETDATA environment variables + +var NETDATA_PLUGINS_DIR = process.env.NETDATA_PLUGINS_DIR || __dirname; +var NETDATA_USER_CONFIG_DIR = process.env.NETDATA_USER_CONFIG_DIR || '/usr/local/etc/netdata'; +var NETDATA_STOCK_CONFIG_DIR = process.env.NETDATA_STOCK_CONFIG_DIR || '/usr/local/lib/netdata/conf.d'; +var NETDATA_UPDATE_EVERY = process.env.NETDATA_UPDATE_EVERY || 1; +var NODE_D_DIR = NETDATA_PLUGINS_DIR + '/../node.d'; + +// make sure the modules are found +process.mainModule.paths.unshift(NODE_D_DIR + '/node_modules'); +process.mainModule.paths.unshift(NODE_D_DIR); + + +// -------------------------------------------------------------------------------------------------------------------- +// load required modules + +var fs = require('fs'); +var url = require('url'); +var util = require('util'); +var http = require('http'); +var path = require('path'); +var extend = require('extend'); +var netdata = require('netdata'); + + +// -------------------------------------------------------------------------------------------------------------------- +// configuration + +function netdata_read_json_config_file(module_filename) { + var f = path.basename(module_filename); + + var ufilename, sfilename; + + var m = f.match('.plugin' + '$'); + if(m !== null) { + ufilename = netdata.options.paths.config + '/' + f.substring(0, m.index) + '.conf'; + sfilename = netdata.options.paths.stock_config + '/' + f.substring(0, m.index) + '.conf'; + } + + m = f.match('.node.js' + '$'); + if(m !== null) { + ufilename = netdata.options.paths.config + '/node.d/' + f.substring(0, m.index) + '.conf'; + sfilename = netdata.options.paths.stock_config + '/node.d/' + f.substring(0, m.index) + '.conf'; + } + + try { + netdata.debug('loading module\'s ' + module_filename + ' user-config ' + ufilename); + return JSON.parse(fs.readFileSync(ufilename, 'utf8')); + } + catch(e) { + netdata.error('Cannot read user-configuration file ' + ufilename + ': ' + e.message + '.'); + dumpError(e); + } + + try { + netdata.debug('loading module\'s ' + module_filename + ' stock-config ' + sfilename); + return JSON.parse(fs.readFileSync(sfilename, 'utf8')); + } + catch(e) { + netdata.error('Cannot read stock-configuration file ' + sfilename + ': ' + e.message + ', using internal defaults.'); + dumpError(e); + } + + return {}; +} + +// internal defaults +extend(true, netdata.options, { + filename: path.basename(__filename), + + update_every: NETDATA_UPDATE_EVERY, + + paths: { + plugins: NETDATA_PLUGINS_DIR, + config: NETDATA_USER_CONFIG_DIR, + stock_config: NETDATA_STOCK_CONFIG_DIR, + modules: [] + }, + + modules_enable_autodetect: true, + modules_enable_all: true, + modules: {} +}); + +// load configuration file +netdata.options_loaded = netdata_read_json_config_file(__filename); +extend(true, netdata.options, netdata.options_loaded); + +if(!netdata.options.paths.plugins) + netdata.options.paths.plugins = NETDATA_PLUGINS_DIR; + +if(!netdata.options.paths.config) + netdata.options.paths.config = NETDATA_USER_CONFIG_DIR; + +if(!netdata.options.paths.stock_config) + netdata.options.paths.stock_config = NETDATA_STOCK_CONFIG_DIR; + +// console.error('merged netdata object:'); +// console.error(util.inspect(netdata, {depth: 10})); + + +// apply module paths to node.js process +function applyModulePaths() { + var len = netdata.options.paths.modules.length; + while(len--) + process.mainModule.paths.unshift(netdata.options.paths.modules[len]); +} +applyModulePaths(); + + +// -------------------------------------------------------------------------------------------------------------------- +// tracing + +function dumpError(err) { + if (typeof err === 'object') { + if (err.stack) { + netdata.debug(err.stack); + } + } +} + +// -------------------------------------------------------------------------------------------------------------------- +// get command line arguments +{ + var found_myself = false; + var found_number = false; + var found_modules = false; + process.argv.forEach(function (val, index, array) { + netdata.debug('PARAM: ' + val); + + if(!found_myself) { + if(val === __filename) + found_myself = true; + } + else { + switch(val) { + case 'debug': + netdata.options.DEBUG = true; + netdata.debug('DEBUG enabled'); + break; + + default: + if(found_number === true) { + if(found_modules === false) { + for(var i in netdata.options.modules) + netdata.options.modules[i].enabled = false; + } + + if(typeof netdata.options.modules[val] === 'undefined') + netdata.options.modules[val] = {}; + + netdata.options.modules[val].enabled = true; + netdata.options.modules_enable_all = false; + netdata.debug('enabled module ' + val); + } + else { + try { + var x = parseInt(val); + if(x > 0) { + netdata.options.update_every = x; + if(netdata.options.update_every < NETDATA_UPDATE_EVERY) { + netdata.options.update_every = NETDATA_UPDATE_EVERY; + netdata.debug('Update frequency ' + x + 's is too low'); + } + + found_number = true; + netdata.debug('Update frequency set to ' + netdata.options.update_every + ' seconds'); + } + else netdata.error('Ignoring parameter: ' + val); + } + catch(e) { + netdata.error('Cannot get value of parameter: ' + val); + dumpError(e); + } + } + break; + } + } + }); +} + +if(netdata.options.update_every < 1) { + netdata.debug('Adjusting update frequency to 1 second'); + netdata.options.update_every = 1; +} + +// -------------------------------------------------------------------------------------------------------------------- +// find modules + +function findModules() { + var found = 0; + + var files = fs.readdirSync(NODE_D_DIR); + var len = files.length; + while(len--) { + var m = files[len].match('.node.js' + '$'); + if(m !== null) { + var n = files[len].substring(0, m.index); + + if(typeof(netdata.options.modules[n]) === 'undefined') + netdata.options.modules[n] = { name: n, enabled: netdata.options.modules_enable_all }; + + if(netdata.options.modules[n].enabled === true) { + netdata.options.modules[n].name = n; + netdata.options.modules[n].filename = NODE_D_DIR + '/' + files[len]; + netdata.options.modules[n].loaded = false; + + // load the module + try { + netdata.debug('loading module ' + netdata.options.modules[n].filename); + netdata.options.modules[n].module = require(netdata.options.modules[n].filename); + netdata.options.modules[n].module.name = n; + netdata.debug('loaded module ' + netdata.options.modules[n].name + ' from ' + netdata.options.modules[n].filename); + } + catch(e) { + netdata.options.modules[n].enabled = false; + netdata.error('Cannot load module: ' + netdata.options.modules[n].filename + ' exception: ' + e); + dumpError(e); + continue; + } + + // load its configuration + var c = { + enable_autodetect: netdata.options.modules_enable_autodetect, + update_every: netdata.options.update_every + }; + + var c2 = netdata_read_json_config_file(files[len]); + extend(true, c, c2); + + // call module auto-detection / configuration + try { + netdata.modules_configuring++; + netdata.debug('Configuring module ' + netdata.options.modules[n].name); + var serv = netdata.configure(netdata.options.modules[n].module, c, function() { + netdata.debug('Configured module ' + netdata.options.modules[n].name); + netdata.modules_configuring--; + }); + + netdata.debug('Configuring module ' + netdata.options.modules[n].name + ' reports ' + serv + ' eligible services.'); + } + catch(e) { + netdata.modules_configuring--; + netdata.options.modules[n].enabled = false; + netdata.error('Failed module auto-detection: ' + netdata.options.modules[n].name + ' exception: ' + e + ', disabling module.'); + dumpError(e); + continue; + } + + netdata.options.modules[n].loaded = true; + found++; + } + } + } + + // netdata.debug(netdata.options.modules); + return found; +} + +if(findModules() === 0) { + netdata.error('Cannot load any .node.js module from: ' + NODE_D_DIR); + netdata.disableNodePlugin(); + process.exit(1); +} + + +// -------------------------------------------------------------------------------------------------------------------- +// start + +function start_when_configuring_ends() { + if(netdata.modules_configuring > 0) { + netdata.debug('Waiting modules configuration, still running ' + netdata.modules_configuring); + setTimeout(start_when_configuring_ends, 500); + return; + } + + netdata.modules_configuring = 0; + netdata.start(); +} +start_when_configuring_ends(); + +//netdata.debug('netdata object:') +//netdata.debug(netdata); diff --git a/collectors/node.d.plugin/node.d.plugin.in b/collectors/node.d.plugin/node.d.plugin.in new file mode 100755 index 000000000..05c126e90 --- /dev/null +++ b/collectors/node.d.plugin/node.d.plugin.in @@ -0,0 +1,303 @@ +#!/usr/bin/env bash +':' //; exec "$(command -v nodejs || command -v node || echo "ERROR node IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@" + +// shebang hack from: +// http://unix.stackexchange.com/questions/65235/universal-node-js-shebang + +// Initially this is run as a shell script. +// Then, the second line, finds nodejs or node or js in the system path +// and executes it with the shell parameters. + +// netdata +// real-time performance and health monitoring, done right! +// (C) 2017 Costa Tsaousis +// SPDX-License-Identifier: GPL-3.0-or-later + +// -------------------------------------------------------------------------------------------------------------------- + +'use strict'; + +// -------------------------------------------------------------------------------------------------------------------- +// get NETDATA environment variables + +var NETDATA_PLUGINS_DIR = process.env.NETDATA_PLUGINS_DIR || __dirname; +var NETDATA_USER_CONFIG_DIR = process.env.NETDATA_USER_CONFIG_DIR || '@configdir_POST@'; +var NETDATA_STOCK_CONFIG_DIR = process.env.NETDATA_STOCK_CONFIG_DIR || '@libconfigdir_POST@'; +var NETDATA_UPDATE_EVERY = process.env.NETDATA_UPDATE_EVERY || 1; +var NODE_D_DIR = NETDATA_PLUGINS_DIR + '/../node.d'; + +// make sure the modules are found +process.mainModule.paths.unshift(NODE_D_DIR + '/node_modules'); +process.mainModule.paths.unshift(NODE_D_DIR); + + +// -------------------------------------------------------------------------------------------------------------------- +// load required modules + +var fs = require('fs'); +var url = require('url'); +var util = require('util'); +var http = require('http'); +var path = require('path'); +var extend = require('extend'); +var netdata = require('netdata'); + + +// -------------------------------------------------------------------------------------------------------------------- +// configuration + +function netdata_read_json_config_file(module_filename) { + var f = path.basename(module_filename); + + var ufilename, sfilename; + + var m = f.match('.plugin' + '$'); + if(m !== null) { + ufilename = netdata.options.paths.config + '/' + f.substring(0, m.index) + '.conf'; + sfilename = netdata.options.paths.stock_config + '/' + f.substring(0, m.index) + '.conf'; + } + + m = f.match('.node.js' + '$'); + if(m !== null) { + ufilename = netdata.options.paths.config + '/node.d/' + f.substring(0, m.index) + '.conf'; + sfilename = netdata.options.paths.stock_config + '/node.d/' + f.substring(0, m.index) + '.conf'; + } + + try { + netdata.debug('loading module\'s ' + module_filename + ' user-config ' + ufilename); + return JSON.parse(fs.readFileSync(ufilename, 'utf8')); + } + catch(e) { + netdata.error('Cannot read user-configuration file ' + ufilename + ': ' + e.message + '.'); + dumpError(e); + } + + try { + netdata.debug('loading module\'s ' + module_filename + ' stock-config ' + sfilename); + return JSON.parse(fs.readFileSync(sfilename, 'utf8')); + } + catch(e) { + netdata.error('Cannot read stock-configuration file ' + sfilename + ': ' + e.message + ', using internal defaults.'); + dumpError(e); + } + + return {}; +} + +// internal defaults +extend(true, netdata.options, { + filename: path.basename(__filename), + + update_every: NETDATA_UPDATE_EVERY, + + paths: { + plugins: NETDATA_PLUGINS_DIR, + config: NETDATA_USER_CONFIG_DIR, + stock_config: NETDATA_STOCK_CONFIG_DIR, + modules: [] + }, + + modules_enable_autodetect: true, + modules_enable_all: true, + modules: {} +}); + +// load configuration file +netdata.options_loaded = netdata_read_json_config_file(__filename); +extend(true, netdata.options, netdata.options_loaded); + +if(!netdata.options.paths.plugins) + netdata.options.paths.plugins = NETDATA_PLUGINS_DIR; + +if(!netdata.options.paths.config) + netdata.options.paths.config = NETDATA_USER_CONFIG_DIR; + +if(!netdata.options.paths.stock_config) + netdata.options.paths.stock_config = NETDATA_STOCK_CONFIG_DIR; + +// console.error('merged netdata object:'); +// console.error(util.inspect(netdata, {depth: 10})); + + +// apply module paths to node.js process +function applyModulePaths() { + var len = netdata.options.paths.modules.length; + while(len--) + process.mainModule.paths.unshift(netdata.options.paths.modules[len]); +} +applyModulePaths(); + + +// -------------------------------------------------------------------------------------------------------------------- +// tracing + +function dumpError(err) { + if (typeof err === 'object') { + if (err.stack) { + netdata.debug(err.stack); + } + } +} + +// -------------------------------------------------------------------------------------------------------------------- +// get command line arguments +{ + var found_myself = false; + var found_number = false; + var found_modules = false; + process.argv.forEach(function (val, index, array) { + netdata.debug('PARAM: ' + val); + + if(!found_myself) { + if(val === __filename) + found_myself = true; + } + else { + switch(val) { + case 'debug': + netdata.options.DEBUG = true; + netdata.debug('DEBUG enabled'); + break; + + default: + if(found_number === true) { + if(found_modules === false) { + for(var i in netdata.options.modules) + netdata.options.modules[i].enabled = false; + } + + if(typeof netdata.options.modules[val] === 'undefined') + netdata.options.modules[val] = {}; + + netdata.options.modules[val].enabled = true; + netdata.options.modules_enable_all = false; + netdata.debug('enabled module ' + val); + } + else { + try { + var x = parseInt(val); + if(x > 0) { + netdata.options.update_every = x; + if(netdata.options.update_every < NETDATA_UPDATE_EVERY) { + netdata.options.update_every = NETDATA_UPDATE_EVERY; + netdata.debug('Update frequency ' + x + 's is too low'); + } + + found_number = true; + netdata.debug('Update frequency set to ' + netdata.options.update_every + ' seconds'); + } + else netdata.error('Ignoring parameter: ' + val); + } + catch(e) { + netdata.error('Cannot get value of parameter: ' + val); + dumpError(e); + } + } + break; + } + } + }); +} + +if(netdata.options.update_every < 1) { + netdata.debug('Adjusting update frequency to 1 second'); + netdata.options.update_every = 1; +} + +// -------------------------------------------------------------------------------------------------------------------- +// find modules + +function findModules() { + var found = 0; + + var files = fs.readdirSync(NODE_D_DIR); + var len = files.length; + while(len--) { + var m = files[len].match('.node.js' + '$'); + if(m !== null) { + var n = files[len].substring(0, m.index); + + if(typeof(netdata.options.modules[n]) === 'undefined') + netdata.options.modules[n] = { name: n, enabled: netdata.options.modules_enable_all }; + + if(netdata.options.modules[n].enabled === true) { + netdata.options.modules[n].name = n; + netdata.options.modules[n].filename = NODE_D_DIR + '/' + files[len]; + netdata.options.modules[n].loaded = false; + + // load the module + try { + netdata.debug('loading module ' + netdata.options.modules[n].filename); + netdata.options.modules[n].module = require(netdata.options.modules[n].filename); + netdata.options.modules[n].module.name = n; + netdata.debug('loaded module ' + netdata.options.modules[n].name + ' from ' + netdata.options.modules[n].filename); + } + catch(e) { + netdata.options.modules[n].enabled = false; + netdata.error('Cannot load module: ' + netdata.options.modules[n].filename + ' exception: ' + e); + dumpError(e); + continue; + } + + // load its configuration + var c = { + enable_autodetect: netdata.options.modules_enable_autodetect, + update_every: netdata.options.update_every + }; + + var c2 = netdata_read_json_config_file(files[len]); + extend(true, c, c2); + + // call module auto-detection / configuration + try { + netdata.modules_configuring++; + netdata.debug('Configuring module ' + netdata.options.modules[n].name); + var serv = netdata.configure(netdata.options.modules[n].module, c, function() { + netdata.debug('Configured module ' + netdata.options.modules[n].name); + netdata.modules_configuring--; + }); + + netdata.debug('Configuring module ' + netdata.options.modules[n].name + ' reports ' + serv + ' eligible services.'); + } + catch(e) { + netdata.modules_configuring--; + netdata.options.modules[n].enabled = false; + netdata.error('Failed module auto-detection: ' + netdata.options.modules[n].name + ' exception: ' + e + ', disabling module.'); + dumpError(e); + continue; + } + + netdata.options.modules[n].loaded = true; + found++; + } + } + } + + // netdata.debug(netdata.options.modules); + return found; +} + +if(findModules() === 0) { + netdata.error('Cannot load any .node.js module from: ' + NODE_D_DIR); + netdata.disableNodePlugin(); + process.exit(1); +} + + +// -------------------------------------------------------------------------------------------------------------------- +// start + +function start_when_configuring_ends() { + if(netdata.modules_configuring > 0) { + netdata.debug('Waiting modules configuration, still running ' + netdata.modules_configuring); + setTimeout(start_when_configuring_ends, 500); + return; + } + + netdata.modules_configuring = 0; + netdata.start(); +} +start_when_configuring_ends(); + +//netdata.debug('netdata object:') +//netdata.debug(netdata); diff --git a/collectors/node.d.plugin/node_modules/asn1-ber.js b/collectors/node.d.plugin/node_modules/asn1-ber.js new file mode 100644 index 000000000..55c8f688e --- /dev/null +++ b/collectors/node.d.plugin/node_modules/asn1-ber.js @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: MIT + +var Ber = require('./lib/ber/index') + +exports.Ber = Ber +exports.BerReader = Ber.Reader +exports.BerWriter = Ber.Writer diff --git a/collectors/node.d.plugin/node_modules/extend.js b/collectors/node.d.plugin/node_modules/extend.js new file mode 100644 index 000000000..3cd2e9155 --- /dev/null +++ b/collectors/node.d.plugin/node_modules/extend.js @@ -0,0 +1,88 @@ +// https://github.com/justmoon/node-extend +// SPDX-License-Identifier: MIT + +'use strict'; + +var hasOwn = Object.prototype.hasOwnProperty; +var toStr = Object.prototype.toString; + +var isArray = function isArray(arr) { + if (typeof Array.isArray === 'function') { + return Array.isArray(arr); + } + + return toStr.call(arr) === '[object Array]'; +}; + +var isPlainObject = function isPlainObject(obj) { + if (!obj || toStr.call(obj) !== '[object Object]') { + return false; + } + + var hasOwnConstructor = hasOwn.call(obj, 'constructor'); + var hasIsPrototypeOf = obj.constructor && obj.constructor.prototype && hasOwn.call(obj.constructor.prototype, 'isPrototypeOf'); + // Not own constructor property must be Object + if (obj.constructor && !hasOwnConstructor && !hasIsPrototypeOf) { + return false; + } + + // Own properties are enumerated firstly, so to speed up, + // if last one is own, then all properties are own. + var key; + for (key in obj) { /**/ } + + return typeof key === 'undefined' || hasOwn.call(obj, key); +}; + +module.exports = function extend() { + var options, name, src, copy, copyIsArray, clone; + var target = arguments[0]; + var i = 1; + var length = arguments.length; + var deep = false; + + // Handle a deep copy situation + if (typeof target === 'boolean') { + deep = target; + target = arguments[1] || {}; + // skip the boolean and the target + i = 2; + } else if ((typeof target !== 'object' && typeof target !== 'function') || target == null) { + target = {}; + } + + for (; i < length; ++i) { + options = arguments[i]; + // Only deal with non-null/undefined values + if (options != null) { + // Extend the base object + for (name in options) { + src = target[name]; + copy = options[name]; + + // Prevent never-ending loop + if (target !== copy) { + // Recurse if we're merging plain objects or arrays + if (deep && copy && (isPlainObject(copy) || (copyIsArray = isArray(copy)))) { + if (copyIsArray) { + copyIsArray = false; + clone = src && isArray(src) ? src : []; + } else { + clone = src && isPlainObject(src) ? src : {}; + } + + // Never move original objects, clone them + target[name] = extend(deep, clone, copy); + + // Don't bring in undefined values + } else if (typeof copy !== 'undefined') { + target[name] = copy; + } + } + } + } + } + + // Return the modified object + return target; +}; diff --git a/collectors/node.d.plugin/node_modules/lib/ber/errors.js b/collectors/node.d.plugin/node_modules/lib/ber/errors.js new file mode 100644 index 000000000..1c0df7b13 --- /dev/null +++ b/collectors/node.d.plugin/node_modules/lib/ber/errors.js @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: MIT + +module.exports = { + InvalidAsn1Error: function(msg) { + var e = new Error() + e.name = 'InvalidAsn1Error' + e.message = msg || '' + return e + } +} diff --git a/collectors/node.d.plugin/node_modules/lib/ber/index.js b/collectors/node.d.plugin/node_modules/lib/ber/index.js new file mode 100644 index 000000000..eb69ec526 --- /dev/null +++ b/collectors/node.d.plugin/node_modules/lib/ber/index.js @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: MIT + +var errors = require('./errors') +var types = require('./types') + +var Reader = require('./reader') +var Writer = require('./writer') + +for (var t in types) + if (types.hasOwnProperty(t)) + exports[t] = types[t] + +for (var e in errors) + if (errors.hasOwnProperty(e)) + exports[e] = errors[e] + +exports.Reader = Reader +exports.Writer = Writer diff --git a/collectors/node.d.plugin/node_modules/lib/ber/reader.js b/collectors/node.d.plugin/node_modules/lib/ber/reader.js new file mode 100644 index 000000000..06decf4b9 --- /dev/null +++ b/collectors/node.d.plugin/node_modules/lib/ber/reader.js @@ -0,0 +1,270 @@ +// SPDX-License-Identifier: MIT + +var assert = require('assert'); + +var ASN1 = require('./types'); +var errors = require('./errors'); + + +///--- Globals + +var InvalidAsn1Error = errors.InvalidAsn1Error; + + + +///--- API + +function Reader(data) { + if (!data || !Buffer.isBuffer(data)) + throw new TypeError('data must be a node Buffer'); + + this._buf = data; + this._size = data.length; + + // These hold the "current" state + this._len = 0; + this._offset = 0; +} + +Object.defineProperty(Reader.prototype, 'length', { + enumerable: true, + get: function () { return (this._len); } +}); + +Object.defineProperty(Reader.prototype, 'offset', { + enumerable: true, + get: function () { return (this._offset); } +}); + +Object.defineProperty(Reader.prototype, 'remain', { + get: function () { return (this._size - this._offset); } +}); + +Object.defineProperty(Reader.prototype, 'buffer', { + get: function () { return (this._buf.slice(this._offset)); } +}); + + +/** + * Reads a single byte and advances offset; you can pass in `true` to make this + * a "peek" operation (i.e., get the byte, but don't advance the offset). + * + * @param {Boolean} peek true means don't move offset. + * @return {Number} the next byte, null if not enough data. + */ +Reader.prototype.readByte = function(peek) { + if (this._size - this._offset < 1) + return null; + + var b = this._buf[this._offset] & 0xff; + + if (!peek) + this._offset += 1; + + return b; +}; + + +Reader.prototype.peek = function() { + return this.readByte(true); +}; + + +/** + * Reads a (potentially) variable length off the BER buffer. This call is + * not really meant to be called directly, as callers have to manipulate + * the internal buffer afterwards. + * + * As a result of this call, you can call `Reader.length`, until the + * next thing called that does a readLength. + * + * @return {Number} the amount of offset to advance the buffer. + * @throws {InvalidAsn1Error} on bad ASN.1 + */ +Reader.prototype.readLength = function(offset) { + if (offset === undefined) + offset = this._offset; + + if (offset >= this._size) + return null; + + var lenB = this._buf[offset++] & 0xff; + if (lenB === null) + return null; + + if ((lenB & 0x80) == 0x80) { + lenB &= 0x7f; + + if (lenB == 0) + throw InvalidAsn1Error('Indefinite length not supported'); + + if (lenB > 4) + throw InvalidAsn1Error('encoding too long'); + + if (this._size - offset < lenB) + return null; + + this._len = 0; + for (var i = 0; i < lenB; i++) + this._len = (this._len << 8) + (this._buf[offset++] & 0xff); + + } else { + // Wasn't a variable length + this._len = lenB; + } + + return offset; +}; + + +/** + * Parses the next sequence in this BER buffer. + * + * To get the length of the sequence, call `Reader.length`. + * + * @return {Number} the sequence's tag. + */ +Reader.prototype.readSequence = function(tag) { + var seq = this.peek(); + if (seq === null) + return null; + if (tag !== undefined && tag !== seq) + throw InvalidAsn1Error('Expected 0x' + tag.toString(16) + + ': got 0x' + seq.toString(16)); + + var o = this.readLength(this._offset + 1); // stored in `length` + if (o === null) + return null; + + this._offset = o; + return seq; +}; + + +Reader.prototype.readInt = function(tag) { + if (typeof(tag) !== 'number') + tag = ASN1.Integer; + + return this._readTag(ASN1.Integer); +}; + + +Reader.prototype.readBoolean = function(tag) { + if (typeof(tag) !== 'number') + tag = ASN1.Boolean; + + return (this._readTag(tag) === 0 ? false : true); +}; + + +Reader.prototype.readEnumeration = function(tag) { + if (typeof(tag) !== 'number') + tag = ASN1.Enumeration; + + return this._readTag(ASN1.Enumeration); +}; + + +Reader.prototype.readString = function(tag, retbuf) { + if (!tag) + tag = ASN1.OctetString; + + var b = this.peek(); + if (b === null) + return null; + + if (b !== tag) + throw InvalidAsn1Error('Expected 0x' + tag.toString(16) + + ': got 0x' + b.toString(16)); + + var o = this.readLength(this._offset + 1); // stored in `length` + + if (o === null) + return null; + + if (this.length > this._size - o) + return null; + + this._offset = o; + + if (this.length === 0) + return retbuf ? new Buffer(0) : ''; + + var str = this._buf.slice(this._offset, this._offset + this.length); + this._offset += this.length; + + return retbuf ? str : str.toString('utf8'); +}; + +Reader.prototype.readOID = function(tag) { + if (!tag) + tag = ASN1.OID; + + var b = this.readString(tag, true); + if (b === null) + return null; + + var values = []; + var value = 0; + + for (var i = 0; i < b.length; i++) { + var byte = b[i] & 0xff; + + value <<= 7; + value += byte & 0x7f; + if ((byte & 0x80) == 0) { + values.push(value >>> 0); + value = 0; + } + } + + value = values.shift(); + values.unshift(value % 40); + values.unshift((value / 40) >> 0); + + return values.join('.'); +}; + + +Reader.prototype._readTag = function(tag) { + assert.ok(tag !== undefined); + + var b = this.peek(); + + if (b === null) + return null; + + if (b !== tag) + throw InvalidAsn1Error('Expected 0x' + tag.toString(16) + + ': got 0x' + b.toString(16)); + + var o = this.readLength(this._offset + 1); // stored in `length` + if (o === null) + return null; + + if (this.length > 4) + throw InvalidAsn1Error('Integer too long: ' + this.length); + + if (this.length > this._size - o) + return null; + this._offset = o; + + var fb = this._buf[this._offset]; + var value = 0; + + for (var i = 0; i < this.length; i++) { + value <<= 8; + value |= (this._buf[this._offset++] & 0xff); + } + + if ((fb & 0x80) == 0x80 && i !== 4) + value -= (1 << (i * 8)); + + return value >> 0; +}; + + + +///--- Exported API + +module.exports = Reader; diff --git a/collectors/node.d.plugin/node_modules/lib/ber/types.js b/collectors/node.d.plugin/node_modules/lib/ber/types.js new file mode 100644 index 000000000..7519ddcf5 --- /dev/null +++ b/collectors/node.d.plugin/node_modules/lib/ber/types.js @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: MIT + +module.exports = { + EOC: 0, + Boolean: 1, + Integer: 2, + BitString: 3, + OctetString: 4, + Null: 5, + OID: 6, + ObjectDescriptor: 7, + External: 8, + Real: 9, + Enumeration: 10, + PDV: 11, + Utf8String: 12, + RelativeOID: 13, + Sequence: 16, + Set: 17, + NumericString: 18, + PrintableString: 19, + T61String: 20, + VideotexString: 21, + IA5String: 22, + UTCTime: 23, + GeneralizedTime: 24, + GraphicString: 25, + VisibleString: 26, + GeneralString: 28, + UniversalString: 29, + CharacterString: 30, + BMPString: 31, + Constructor: 32, + Context: 128 +} diff --git a/collectors/node.d.plugin/node_modules/lib/ber/writer.js b/collectors/node.d.plugin/node_modules/lib/ber/writer.js new file mode 100644 index 000000000..d3a718f14 --- /dev/null +++ b/collectors/node.d.plugin/node_modules/lib/ber/writer.js @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: MIT + +var assert = require('assert'); +var ASN1 = require('./types'); +var errors = require('./errors'); + + +///--- Globals + +var InvalidAsn1Error = errors.InvalidAsn1Error; + +var DEFAULT_OPTS = { + size: 1024, + growthFactor: 8 +}; + + +///--- Helpers + +function merge(from, to) { + assert.ok(from); + assert.equal(typeof(from), 'object'); + assert.ok(to); + assert.equal(typeof(to), 'object'); + + var keys = Object.getOwnPropertyNames(from); + keys.forEach(function(key) { + if (to[key]) + return; + + var value = Object.getOwnPropertyDescriptor(from, key); + Object.defineProperty(to, key, value); + }); + + return to; +} + + + +///--- API + +function Writer(options) { + options = merge(DEFAULT_OPTS, options || {}); + + this._buf = new Buffer(options.size || 1024); + this._size = this._buf.length; + this._offset = 0; + this._options = options; + + // A list of offsets in the buffer where we need to insert + // sequence tag/len pairs. + this._seq = []; +} + +Object.defineProperty(Writer.prototype, 'buffer', { + get: function () { + if (this._seq.length) + throw new InvalidAsn1Error(this._seq.length + ' unended sequence(s)'); + + return (this._buf.slice(0, this._offset)); + } +}); + +Writer.prototype.writeByte = function(b) { + if (typeof(b) !== 'number') + throw new TypeError('argument must be a Number'); + + this._ensure(1); + this._buf[this._offset++] = b; +}; + + +Writer.prototype.writeInt = function(i, tag) { + if (typeof(i) !== 'number') + throw new TypeError('argument must be a Number'); + if (typeof(tag) !== 'number') + tag = ASN1.Integer; + + var sz = 4; + + while ((((i & 0xff800000) === 0) || ((i & 0xff800000) === 0xff800000 >> 0)) && + (sz > 1)) { + sz--; + i <<= 8; + } + + if (sz > 4) + throw new InvalidAsn1Error('BER ints cannot be > 0xffffffff'); + + this._ensure(2 + sz); + this._buf[this._offset++] = tag; + this._buf[this._offset++] = sz; + + while (sz-- > 0) { + this._buf[this._offset++] = ((i & 0xff000000) >>> 24); + i <<= 8; + } + +}; + + +Writer.prototype.writeNull = function() { + this.writeByte(ASN1.Null); + this.writeByte(0x00); +}; + + +Writer.prototype.writeEnumeration = function(i, tag) { + if (typeof(i) !== 'number') + throw new TypeError('argument must be a Number'); + if (typeof(tag) !== 'number') + tag = ASN1.Enumeration; + + return this.writeInt(i, tag); +}; + + +Writer.prototype.writeBoolean = function(b, tag) { + if (typeof(b) !== 'boolean') + throw new TypeError('argument must be a Boolean'); + if (typeof(tag) !== 'number') + tag = ASN1.Boolean; + + this._ensure(3); + this._buf[this._offset++] = tag; + this._buf[this._offset++] = 0x01; + this._buf[this._offset++] = b ? 0xff : 0x00; +}; + + +Writer.prototype.writeString = function(s, tag) { + if (typeof(s) !== 'string') + throw new TypeError('argument must be a string (was: ' + typeof(s) + ')'); + if (typeof(tag) !== 'number') + tag = ASN1.OctetString; + + var len = Buffer.byteLength(s); + this.writeByte(tag); + this.writeLength(len); + if (len) { + this._ensure(len); + this._buf.write(s, this._offset); + this._offset += len; + } +}; + + +Writer.prototype.writeBuffer = function(buf, tag) { + if (!Buffer.isBuffer(buf)) + throw new TypeError('argument must be a buffer'); + + // If no tag is specified we will assume `buf` already contains tag and length + if (typeof(tag) === 'number') { + this.writeByte(tag); + this.writeLength(buf.length); + } + + this._ensure(buf.length); + buf.copy(this._buf, this._offset, 0, buf.length); + this._offset += buf.length; +}; + + +Writer.prototype.writeStringArray = function(strings, tag) { + if (! (strings instanceof Array)) + throw new TypeError('argument must be an Array[String]'); + + var self = this; + strings.forEach(function(s) { + self.writeString(s, tag); + }); +}; + +// This is really to solve DER cases, but whatever for now +Writer.prototype.writeOID = function(s, tag) { + if (typeof(s) !== 'string') + throw new TypeError('argument must be a string'); + if (typeof(tag) !== 'number') + tag = ASN1.OID; + + if (!/^([0-9]+\.){3,}[0-9]+$/.test(s)) + throw new Error('argument is not a valid OID string'); + + function encodeOctet(bytes, octet) { + if (octet < 128) { + bytes.push(octet); + } else if (octet < 16384) { + bytes.push((octet >>> 7) | 0x80); + bytes.push(octet & 0x7F); + } else if (octet < 2097152) { + bytes.push((octet >>> 14) | 0x80); + bytes.push(((octet >>> 7) | 0x80) & 0xFF); + bytes.push(octet & 0x7F); + } else if (octet < 268435456) { + bytes.push((octet >>> 21) | 0x80); + bytes.push(((octet >>> 14) | 0x80) & 0xFF); + bytes.push(((octet >>> 7) | 0x80) & 0xFF); + bytes.push(octet & 0x7F); + } else { + bytes.push(((octet >>> 28) | 0x80) & 0xFF); + bytes.push(((octet >>> 21) | 0x80) & 0xFF); + bytes.push(((octet >>> 14) | 0x80) & 0xFF); + bytes.push(((octet >>> 7) | 0x80) & 0xFF); + bytes.push(octet & 0x7F); + } + } + + var tmp = s.split('.'); + var bytes = []; + bytes.push(parseInt(tmp[0], 10) * 40 + parseInt(tmp[1], 10)); + tmp.slice(2).forEach(function(b) { + encodeOctet(bytes, parseInt(b, 10)); + }); + + var self = this; + this._ensure(2 + bytes.length); + this.writeByte(tag); + this.writeLength(bytes.length); + bytes.forEach(function(b) { + self.writeByte(b); + }); +}; + + +Writer.prototype.writeLength = function(len) { + if (typeof(len) !== 'number') + throw new TypeError('argument must be a Number'); + + this._ensure(4); + + if (len <= 0x7f) { + this._buf[this._offset++] = len; + } else if (len <= 0xff) { + this._buf[this._offset++] = 0x81; + this._buf[this._offset++] = len; + } else if (len <= 0xffff) { + this._buf[this._offset++] = 0x82; + this._buf[this._offset++] = len >> 8; + this._buf[this._offset++] = len; + } else if (len <= 0xffffff) { + this._buf[this._offset++] = 0x83; + this._buf[this._offset++] = len >> 16; + this._buf[this._offset++] = len >> 8; + this._buf[this._offset++] = len; + } else { + throw new InvalidAsn1Error('Length too long (> 4 bytes)'); + } +}; + +Writer.prototype.startSequence = function(tag) { + if (typeof(tag) !== 'number') + tag = ASN1.Sequence | ASN1.Constructor; + + this.writeByte(tag); + this._seq.push(this._offset); + this._ensure(3); + this._offset += 3; +}; + + +Writer.prototype.endSequence = function() { + var seq = this._seq.pop(); + var start = seq + 3; + var len = this._offset - start; + + if (len <= 0x7f) { + this._shift(start, len, -2); + this._buf[seq] = len; + } else if (len <= 0xff) { + this._shift(start, len, -1); + this._buf[seq] = 0x81; + this._buf[seq + 1] = len; + } else if (len <= 0xffff) { + this._buf[seq] = 0x82; + this._buf[seq + 1] = len >> 8; + this._buf[seq + 2] = len; + } else if (len <= 0xffffff) { + this._shift(start, len, 1); + this._buf[seq] = 0x83; + this._buf[seq + 1] = len >> 16; + this._buf[seq + 2] = len >> 8; + this._buf[seq + 3] = len; + } else { + throw new InvalidAsn1Error('Sequence too long'); + } +}; + + +Writer.prototype._shift = function(start, len, shift) { + assert.ok(start !== undefined); + assert.ok(len !== undefined); + assert.ok(shift); + + this._buf.copy(this._buf, start + shift, start, start + len); + this._offset += shift; +}; + +Writer.prototype._ensure = function(len) { + assert.ok(len); + + if (this._size - this._offset < len) { + var sz = this._size * this._options.growthFactor; + if (sz - this._offset < len) + sz += len; + + var buf = new Buffer(sz); + + this._buf.copy(buf, 0, 0, this._offset); + this._buf = buf; + this._size = sz; + } +}; + + + +///--- Exported API + +module.exports = Writer; diff --git a/collectors/node.d.plugin/node_modules/net-snmp.js b/collectors/node.d.plugin/node_modules/net-snmp.js new file mode 100644 index 000000000..484597dcb --- /dev/null +++ b/collectors/node.d.plugin/node_modules/net-snmp.js @@ -0,0 +1,1465 @@ + +// Copyright 2013 Stephen Vickers +// SPDX-License-Identifier: MIT + +var ber = require ("asn1-ber").Ber; +var dgram = require ("dgram"); +var events = require ("events"); +var util = require ("util"); + +/***************************************************************************** + ** Constants + **/ + +function _expandConstantObject (object) { + var keys = []; + for (var key in object) + keys.push (key); + for (var i = 0; i < keys.length; i++) + object[object[keys[i]]] = parseInt (keys[i]); +} + +var ErrorStatus = { + 0: "NoError", + 1: "TooBig", + 2: "NoSuchName", + 3: "BadValue", + 4: "ReadOnly", + 5: "GeneralError", + 6: "NoAccess", + 7: "WrongType", + 8: "WrongLength", + 9: "WrongEncoding", + 10: "WrongValue", + 11: "NoCreation", + 12: "InconsistentValue", + 13: "ResourceUnavailable", + 14: "CommitFailed", + 15: "UndoFailed", + 16: "AuthorizationError", + 17: "NotWritable", + 18: "InconsistentName" +}; + +_expandConstantObject (ErrorStatus); + +var ObjectType = { + 1: "Boolean", + 2: "Integer", + 4: "OctetString", + 5: "Null", + 6: "OID", + 64: "IpAddress", + 65: "Counter", + 66: "Gauge", + 67: "TimeTicks", + 68: "Opaque", + 70: "Counter64", + 128: "NoSuchObject", + 129: "NoSuchInstance", + 130: "EndOfMibView" +}; + +_expandConstantObject (ObjectType); + +ObjectType.Integer32 = ObjectType.Integer; +ObjectType.Counter32 = ObjectType.Counter; +ObjectType.Gauge32 = ObjectType.Gauge; +ObjectType.Unsigned32 = ObjectType.Gauge32; + +var PduType = { + 160: "GetRequest", + 161: "GetNextRequest", + 162: "GetResponse", + 163: "SetRequest", + 164: "Trap", + 165: "GetBulkRequest", + 166: "InformRequest", + 167: "TrapV2", + 168: "Report" +}; + +_expandConstantObject (PduType); + +var TrapType = { + 0: "ColdStart", + 1: "WarmStart", + 2: "LinkDown", + 3: "LinkUp", + 4: "AuthenticationFailure", + 5: "EgpNeighborLoss", + 6: "EnterpriseSpecific" +}; + +_expandConstantObject (TrapType); + +var Version1 = 0; +var Version2c = 1; + +/***************************************************************************** + ** Exception class definitions + **/ + +function ResponseInvalidError (message) { + this.name = "ResponseInvalidError"; + this.message = message; + Error.captureStackTrace(this, ResponseInvalidError); +} +util.inherits (ResponseInvalidError, Error); + +function RequestInvalidError (message) { + this.name = "RequestInvalidError"; + this.message = message; + Error.captureStackTrace(this, RequestInvalidError); +} +util.inherits (RequestInvalidError, Error); + +function RequestFailedError (message, status) { + this.name = "RequestFailedError"; + this.message = message; + this.status = status; + Error.captureStackTrace(this, RequestFailedError); +} +util.inherits (RequestFailedError, Error); + +function RequestTimedOutError (message) { + this.name = "RequestTimedOutError"; + this.message = message; + Error.captureStackTrace(this, RequestTimedOutError); +} +util.inherits (RequestTimedOutError, Error); + +/***************************************************************************** + ** OID and varbind helper functions + **/ + +function isVarbindError (varbind) { + return !!(varbind.type == ObjectType.NoSuchObject + || varbind.type == ObjectType.NoSuchInstance + || varbind.type == ObjectType.EndOfMibView); +} + +function varbindError (varbind) { + return (ObjectType[varbind.type] || "NotAnError") + ": " + varbind.oid; +} + +function oidFollowsOid (oidString, nextString) { + var oid = {str: oidString, len: oidString.length, idx: 0}; + var next = {str: nextString, len: nextString.length, idx: 0}; + var dotCharCode = ".".charCodeAt (0); + + function getNumber (item) { + var n = 0; + if (item.idx >= item.len) + return null; + while (item.idx < item.len) { + var charCode = item.str.charCodeAt (item.idx++); + if (charCode == dotCharCode) + return n; + n = (n ? (n * 10) : n) + (charCode - 48); + } + return n; + } + + while (1) { + var oidNumber = getNumber (oid); + var nextNumber = getNumber (next); + + if (oidNumber !== null) { + if (nextNumber !== null) { + if (nextNumber > oidNumber) { + return true; + } else if (nextNumber < oidNumber) { + return false; + } + } else { + return true; + } + } else { + return true; + } + } +} + +function oidInSubtree (oidString, nextString) { + var oid = oidString.split ("."); + var next = nextString.split ("."); + + if (oid.length > next.length) + return false; + + for (var i = 0; i < oid.length; i++) { + if (next[i] != oid[i]) + return false; + } + + return true; +} + +/** + ** Some SNMP agents produce integers on the wire such as 00 ff ff ff ff. + ** The ASN.1 BER parser we use throws an error when parsing this, which we + ** believe is correct. So, we decided not to bother the "asn1" developer(s) + ** with this, instead opting to work around it here. + ** + ** If an integer is 5 bytes in length we check if the first byte is 0, and if so + ** simply drop it and parse it like it was a 4 byte integer, otherwise throw + ** an error since the integer is too large. + **/ + +function readInt (buffer) { + return readUint (buffer, true); +} + +function readUint (buffer, isSigned) { + buffer.readByte (); + var length = buffer.readByte (); + var value = 0; + var signedBitSet = false; + + if (length > 5) { + throw new RangeError ("Integer too long '" + length + "'"); + } else if (length == 5) { + if (buffer.readByte () !== 0) + throw new RangeError ("Integer too long '" + length + "'"); + length = 4; + } + + for (var i = 0; i < length; i++) { + value *= 256; + value += buffer.readByte (); + + if (isSigned && i <= 0) { + if ((value & 0x80) == 0x80) + signedBitSet = true; + } + } + + if (signedBitSet) + value -= (1 << (i * 8)); + + return value; +} + +function readUint64 (buffer) { + var value = buffer.readString (ObjectType.Counter64, true); + + return value; +} + +function readVarbinds (buffer, varbinds) { + buffer.readSequence (); + + while (1) { + buffer.readSequence (); + var oid = buffer.readOID (); + var type = buffer.peek (); + + if (type == null) + break; + + var value; + + if (type == ObjectType.Boolean) { + value = buffer.readBoolean (); + } else if (type == ObjectType.Integer) { + value = readInt (buffer); + } else if (type == ObjectType.OctetString) { + value = buffer.readString (null, true); + } else if (type == ObjectType.Null) { + buffer.readByte (); + buffer.readByte (); + value = null; + } else if (type == ObjectType.OID) { + value = buffer.readOID (); + } else if (type == ObjectType.IpAddress) { + var bytes = buffer.readString (ObjectType.IpAddress, true); + if (bytes.length != 4) + throw new ResponseInvalidError ("Length '" + bytes.length + + "' of IP address '" + bytes.toString ("hex") + + "' is not 4"); + value = bytes[0] + "." + bytes[1] + "." + bytes[2] + "." + bytes[3]; + } else if (type == ObjectType.Counter) { + value = readUint (buffer); + } else if (type == ObjectType.Gauge) { + value = readUint (buffer); + } else if (type == ObjectType.TimeTicks) { + value = readUint (buffer); + } else if (type == ObjectType.Opaque) { + value = buffer.readString (ObjectType.Opaque, true); + } else if (type == ObjectType.Counter64) { + value = readUint64 (buffer); + } else if (type == ObjectType.NoSuchObject) { + buffer.readByte (); + buffer.readByte (); + value = null; + } else if (type == ObjectType.NoSuchInstance) { + buffer.readByte (); + buffer.readByte (); + value = null; + } else if (type == ObjectType.EndOfMibView) { + buffer.readByte (); + buffer.readByte (); + value = null; + } else { + throw new ResponseInvalidError ("Unknown type '" + type + + "' in response"); + } + + varbinds.push ({ + oid: oid, + type: type, + value: value + }); + } +} + +function writeUint (buffer, type, value) { + var b = new Buffer (4); + b.writeUInt32BE (value, 0); + buffer.writeBuffer (b, type); +} + +function writeUint64 (buffer, value) { + buffer.writeBuffer (value, ObjectType.Counter64); +} + +function writeVarbinds (buffer, varbinds) { + buffer.startSequence (); + for (var i = 0; i < varbinds.length; i++) { + buffer.startSequence (); + buffer.writeOID (varbinds[i].oid); + + if (varbinds[i].type && varbinds[i].hasOwnProperty("value")) { + var type = varbinds[i].type; + var value = varbinds[i].value; + + if (type == ObjectType.Boolean) { + buffer.writeBoolean (value ? true : false); + } else if (type == ObjectType.Integer) { // also Integer32 + buffer.writeInt (value); + } else if (type == ObjectType.OctetString) { + if (typeof value == "string") + buffer.writeString (value); + else + buffer.writeBuffer (value, ObjectType.OctetString); + } else if (type == ObjectType.Null) { + buffer.writeNull (); + } else if (type == ObjectType.OID) { + buffer.writeOID (value); + } else if (type == ObjectType.IpAddress) { + var bytes = value.split ("."); + if (bytes.length != 4) + throw new RequestInvalidError ("Invalid IP address '" + + value + "'"); + buffer.writeBuffer (new Buffer (bytes), 64); + } else if (type == ObjectType.Counter) { // also Counter32 + writeUint (buffer, ObjectType.Counter, value); + } else if (type == ObjectType.Gauge) { // also Gauge32 & Unsigned32 + writeUint (buffer, ObjectType.Gauge, value); + } else if (type == ObjectType.TimeTicks) { + writeUint (buffer, ObjectType.TimeTicks, value); + } else if (type == ObjectType.Opaque) { + buffer.writeBuffer (value, ObjectType.Opaque); + } else if (type == ObjectType.Counter64) { + writeUint64 (buffer, value); + } else { + throw new RequestInvalidError ("Unknown type '" + type + + "' in request"); + } + } else { + buffer.writeNull (); + } + + buffer.endSequence (); + } + buffer.endSequence (); +} + +/***************************************************************************** + ** PDU class definitions + **/ + +var SimplePdu = function (id, varbinds, options) { + this.id = id; + this.varbinds = varbinds; + this.options = options || {}; +}; + +SimplePdu.prototype.toBuffer = function (buffer) { + buffer.startSequence (this.type); + + buffer.writeInt (this.id); + buffer.writeInt ((this.type == PduType.GetBulkRequest) + ? (this.options.nonRepeaters || 0) + : 0); + buffer.writeInt ((this.type == PduType.GetBulkRequest) + ? (this.options.maxRepetitions || 0) + : 0); + + writeVarbinds (buffer, this.varbinds); + + buffer.endSequence (); +}; + +var GetBulkRequestPdu = function () { + this.type = PduType.GetBulkRequest; + GetBulkRequestPdu.super_.apply (this, arguments); +}; + +util.inherits (GetBulkRequestPdu, SimplePdu); + +var GetNextRequestPdu = function () { + this.type = PduType.GetNextRequest; + GetNextRequestPdu.super_.apply (this, arguments); +}; + +util.inherits (GetNextRequestPdu, SimplePdu); + +var GetResponsePdu = function (buffer) { + this.type = PduType.GetResponse; + + buffer.readSequence (this.type); + + this.id = buffer.readInt (); + + this.errorStatus = buffer.readInt (); + this.errorIndex = buffer.readInt (); + + this.varbinds = []; + + readVarbinds (buffer, this.varbinds); +}; + +var GetRequestPdu = function () { + this.type = PduType.GetRequest; + GetRequestPdu.super_.apply (this, arguments); +}; + +util.inherits (GetRequestPdu, SimplePdu); + +var InformRequestPdu = function () { + this.type = PduType.InformRequest; + InformRequestPdu.super_.apply (this, arguments); +}; + +util.inherits (InformRequestPdu, SimplePdu); + +var SetRequestPdu = function () { + this.type = PduType.SetRequest; + SetRequestPdu.super_.apply (this, arguments); +}; + +util.inherits (SetRequestPdu, SimplePdu); + +var TrapPdu = function (typeOrOid, varbinds, options) { + this.type = PduType.Trap; + + this.agentAddr = options.agentAddr || "127.0.0.1"; + this.upTime = options.upTime; + + if (typeof typeOrOid == "string") { + this.generic = TrapType.EnterpriseSpecific; + this.specific = parseInt (typeOrOid.match (/\.(\d+)$/)[1]); + this.enterprise = typeOrOid.replace (/\.(\d+)$/, ""); + } else { + this.generic = typeOrOid; + this.specific = 0; + this.enterprise = "1.3.6.1.4.1"; + } + + this.varbinds = varbinds; +}; + +TrapPdu.prototype.toBuffer = function (buffer) { + buffer.startSequence (this.type); + + buffer.writeOID (this.enterprise); + buffer.writeBuffer (new Buffer (this.agentAddr.split (".")), + ObjectType.IpAddress); + buffer.writeInt (this.generic); + buffer.writeInt (this.specific); + writeUint (buffer, ObjectType.TimeTicks, + this.upTime || Math.floor (process.uptime () * 100)); + + writeVarbinds (buffer, this.varbinds); + + buffer.endSequence (); +}; + +var TrapV2Pdu = function () { + this.type = PduType.TrapV2; + TrapV2Pdu.super_.apply (this, arguments); +}; + +util.inherits (TrapV2Pdu, SimplePdu); + +/***************************************************************************** + ** Message class definitions + **/ + +var RequestMessage = function (version, community, pdu) { + this.version = version; + this.community = community; + this.pdu = pdu; +}; + +RequestMessage.prototype.toBuffer = function () { + if (this.buffer) + return this.buffer; + + var writer = new ber.Writer (); + + writer.startSequence (); + + writer.writeInt (this.version); + writer.writeString (this.community); + + this.pdu.toBuffer (writer); + + writer.endSequence (); + + this.buffer = writer.buffer; + + return this.buffer; +}; + +var ResponseMessage = function (buffer) { + var reader = new ber.Reader (buffer); + + reader.readSequence (); + + this.version = reader.readInt (); + this.community = reader.readString (); + + var type = reader.peek (); + + if (type == PduType.GetResponse) { + this.pdu = new GetResponsePdu (reader); + } else { + throw new ResponseInvalidError ("Unknown PDU type '" + type + + "' in response"); + } +}; + +/***************************************************************************** + ** Session class definition + **/ + +var Session = function (target, community, options) { + this.target = target || "127.0.0.1"; + this.community = community || "public"; + + this.version = (options && options.version) + ? options.version + : Version1; + + this.transport = (options && options.transport) + ? options.transport + : "udp4"; + this.port = (options && options.port ) + ? options.port + : 161; + this.trapPort = (options && options.trapPort ) + ? options.trapPort + : 162; + + this.retries = (options && (options.retries || options.retries == 0)) + ? options.retries + : 1; + this.timeout = (options && options.timeout) + ? options.timeout + : 5000; + + this.sourceAddress = (options && options.sourceAddress ) + ? options.sourceAddress + : undefined; + this.sourcePort = (options && options.sourcePort ) + ? parseInt(options.sourcePort) + : undefined; + + this.idBitsSize = (options && options.idBitsSize) + ? parseInt(options.idBitsSize) + : 32; + + this.reqs = {}; + this.reqCount = 0; + + this.dgram = dgram.createSocket (this.transport); + this.dgram.unref(); + + var me = this; + this.dgram.on ("message", me.onMsg.bind (me)); + this.dgram.on ("close", me.onClose.bind (me)); + this.dgram.on ("error", me.onError.bind (me)); + + if (this.sourceAddress || this.sourcePort) + this.dgram.bind (this.sourcePort, this.sourceAddress); +}; + +util.inherits (Session, events.EventEmitter); + +Session.prototype.close = function () { + this.dgram.close (); + return this; +}; + +Session.prototype.cancelRequests = function (error) { + var id; + for (id in this.reqs) { + var req = this.reqs[id]; + this.unregisterRequest (req.id); + req.responseCb (error); + } +}; + +function _generateId (bitSize) { + if (bitSize === 16) { + return Math.floor(Math.random() * 10000) % 65535; + } + return Math.floor(Math.random() * 100000000) % 4294967295; +} + +Session.prototype.get = function (oids, responseCb) { + function feedCb (req, message) { + var pdu = message.pdu; + var varbinds = []; + + if (req.message.pdu.varbinds.length != pdu.varbinds.length) { + req.responseCb (new ResponseInvalidError ("Requested OIDs do not " + + "match response OIDs")); + } else { + for (var i = 0; i < req.message.pdu.varbinds.length; i++) { + if (req.message.pdu.varbinds[i].oid != pdu.varbinds[i].oid) { + req.responseCb (new ResponseInvalidError ("OID '" + + req.message.pdu.varbinds[i].oid + + "' in request at positiion '" + i + "' does not " + + "match OID '" + pdu.varbinds[i].oid + "' in response " + + "at position '" + i + "'")); + return; + } else { + varbinds.push (pdu.varbinds[i]); + } + } + + req.responseCb (null, varbinds); + } + } + + var pduVarbinds = []; + + for (var i = 0; i < oids.length; i++) { + var varbind = { + oid: oids[i] + }; + pduVarbinds.push (varbind); + } + + this.simpleGet (GetRequestPdu, feedCb, pduVarbinds, responseCb); + + return this; +}; + +Session.prototype.getBulk = function () { + var oids, nonRepeaters, maxRepetitions, responseCb; + + if (arguments.length >= 4) { + oids = arguments[0]; + nonRepeaters = arguments[1]; + maxRepetitions = arguments[2]; + responseCb = arguments[3]; + } else if (arguments.length >= 3) { + oids = arguments[0]; + nonRepeaters = arguments[1]; + maxRepetitions = 10; + responseCb = arguments[2]; + } else { + oids = arguments[0]; + nonRepeaters = 0; + maxRepetitions = 10; + responseCb = arguments[1]; + } + + function feedCb (req, message) { + var pdu = message.pdu; + var varbinds = []; + var i = 0; + + // first walk through and grab non-repeaters + if (pdu.varbinds.length < nonRepeaters) { + req.responseCb (new ResponseInvalidError ("Varbind count in " + + "response '" + pdu.varbinds.length + "' is less than " + + "non-repeaters '" + nonRepeaters + "' in request")); + } else { + for ( ; i < nonRepeaters; i++) { + if (isVarbindError (pdu.varbinds[i])) { + varbinds.push (pdu.varbinds[i]); + } else if (! oidFollowsOid (req.message.pdu.varbinds[i].oid, + pdu.varbinds[i].oid)) { + req.responseCb (new ResponseInvalidError ("OID '" + + req.message.pdu.varbinds[i].oid + "' in request at " + + "positiion '" + i + "' does not precede " + + "OID '" + pdu.varbinds[i].oid + "' in response " + + "at position '" + i + "'")); + return; + } else { + varbinds.push (pdu.varbinds[i]); + } + } + } + + var repeaters = req.message.pdu.varbinds.length - nonRepeaters; + + // secondly walk through and grab repeaters + if (pdu.varbinds.length % (repeaters)) { + req.responseCb (new ResponseInvalidError ("Varbind count in " + + "response '" + pdu.varbinds.length + "' is not a " + + "multiple of repeaters '" + repeaters + + "' plus non-repeaters '" + nonRepeaters + "' in request")); + } else { + while (i < pdu.varbinds.length) { + for (var j = 0; j < repeaters; j++, i++) { + var reqIndex = nonRepeaters + j; + var respIndex = i; + + if (isVarbindError (pdu.varbinds[respIndex])) { + if (! varbinds[reqIndex]) + varbinds[reqIndex] = []; + varbinds[reqIndex].push (pdu.varbinds[respIndex]); + } else if (! oidFollowsOid ( + req.message.pdu.varbinds[reqIndex].oid, + pdu.varbinds[respIndex].oid)) { + req.responseCb (new ResponseInvalidError ("OID '" + + req.message.pdu.varbinds[reqIndex].oid + + "' in request at positiion '" + (reqIndex) + + "' does not precede OID '" + + pdu.varbinds[respIndex].oid + + "' in response at position '" + (respIndex) + "'")); + return; + } else { + if (! varbinds[reqIndex]) + varbinds[reqIndex] = []; + varbinds[reqIndex].push (pdu.varbinds[respIndex]); + } + } + } + } + + req.responseCb (null, varbinds); + } + + var pduVarbinds = []; + + for (var i = 0; i < oids.length; i++) { + var varbind = { + oid: oids[i] + }; + pduVarbinds.push (varbind); + } + + var options = { + nonRepeaters: nonRepeaters, + maxRepetitions: maxRepetitions + }; + + this.simpleGet (GetBulkRequestPdu, feedCb, pduVarbinds, responseCb, + options); + + return this; +}; + +Session.prototype.getNext = function (oids, responseCb) { + function feedCb (req, message) { + var pdu = message.pdu; + var varbinds = []; + + if (req.message.pdu.varbinds.length != pdu.varbinds.length) { + req.responseCb (new ResponseInvalidError ("Requested OIDs do not " + + "match response OIDs")); + } else { + for (var i = 0; i < req.message.pdu.varbinds.length; i++) { + if (isVarbindError (pdu.varbinds[i])) { + varbinds.push (pdu.varbinds[i]); + } else if (! oidFollowsOid (req.message.pdu.varbinds[i].oid, + pdu.varbinds[i].oid)) { + req.responseCb (new ResponseInvalidError ("OID '" + + req.message.pdu.varbinds[i].oid + "' in request at " + + "positiion '" + i + "' does not precede " + + "OID '" + pdu.varbinds[i].oid + "' in response " + + "at position '" + i + "'")); + return; + } else { + varbinds.push (pdu.varbinds[i]); + } + } + + req.responseCb (null, varbinds); + } + } + + var pduVarbinds = []; + + for (var i = 0; i < oids.length; i++) { + var varbind = { + oid: oids[i] + }; + pduVarbinds.push (varbind); + } + + this.simpleGet (GetNextRequestPdu, feedCb, pduVarbinds, responseCb); + + return this; +}; + +Session.prototype.inform = function () { + var typeOrOid = arguments[0]; + var varbinds, options = {}, responseCb; + + /** + ** Support the following signatures: + ** + ** typeOrOid, varbinds, options, callback + ** typeOrOid, varbinds, callback + ** typeOrOid, options, callback + ** typeOrOid, callback + **/ + if (arguments.length >= 4) { + varbinds = arguments[1]; + options = arguments[2]; + responseCb = arguments[3]; + } else if (arguments.length >= 3) { + if (arguments[1].constructor != Array) { + varbinds = []; + options = arguments[1]; + responseCb = arguments[2]; + } else { + varbinds = arguments[1]; + responseCb = arguments[2]; + } + } else { + varbinds = []; + responseCb = arguments[1]; + } + + function feedCb (req, message) { + var pdu = message.pdu; + var varbinds = []; + + if (req.message.pdu.varbinds.length != pdu.varbinds.length) { + req.responseCb (new ResponseInvalidError ("Inform OIDs do not " + + "match response OIDs")); + } else { + for (var i = 0; i < req.message.pdu.varbinds.length; i++) { + if (req.message.pdu.varbinds[i].oid != pdu.varbinds[i].oid) { + req.responseCb (new ResponseInvalidError ("OID '" + + req.message.pdu.varbinds[i].oid + + "' in inform at positiion '" + i + "' does not " + + "match OID '" + pdu.varbinds[i].oid + "' in response " + + "at position '" + i + "'")); + return; + } else { + varbinds.push (pdu.varbinds[i]); + } + } + + req.responseCb (null, varbinds); + } + } + + if (typeof typeOrOid != "string") + typeOrOid = "1.3.6.1.6.3.1.1.5." + (typeOrOid + 1); + + var pduVarbinds = [ + { + oid: "1.3.6.1.2.1.1.3.0", + type: ObjectType.TimeTicks, + value: options.upTime || Math.floor (process.uptime () * 100) + }, + { + oid: "1.3.6.1.6.3.1.1.4.1.0", + type: ObjectType.OID, + value: typeOrOid + } + ]; + + for (var i = 0; i < varbinds.length; i++) { + var varbind = { + oid: varbinds[i].oid, + type: varbinds[i].type, + value: varbinds[i].value + }; + pduVarbinds.push (varbind); + } + + options.port = this.trapPort; + + this.simpleGet (InformRequestPdu, feedCb, pduVarbinds, responseCb, options); + + return this; +}; + +Session.prototype.onClose = function () { + this.cancelRequests (new Error ("Socket forcibly closed")); + this.emit ("close"); +}; + +Session.prototype.onError = function (error) { + this.emit (error); +}; + +Session.prototype.onMsg = function (buffer, remote) { + try { + var message = new ResponseMessage (buffer); + + var req = this.unregisterRequest (message.pdu.id); + if (! req) + return; + + try { + if (message.version != req.message.version) { + req.responseCb (new ResponseInvalidError ("Version in request '" + + req.message.version + "' does not match version in " + + "response '" + message.version)); + } else if (message.community != req.message.community) { + req.responseCb (new ResponseInvalidError ("Community '" + + req.message.community + "' in request does not match " + + "community '" + message.community + "' in response")); + } else if (message.pdu.type == PduType.GetResponse) { + req.onResponse (req, message); + } else { + req.responseCb (new ResponseInvalidError ("Unknown PDU type '" + + message.pdu.type + "' in response")); + } + } catch (error) { + req.responseCb (error); + } + } catch (error) { + this.emit("error", error); + } +}; + +Session.prototype.onSimpleGetResponse = function (req, message) { + var pdu = message.pdu; + + if (pdu.errorStatus > 0) { + var statusString = ErrorStatus[pdu.errorStatus] + || ErrorStatus.GeneralError; + var statusCode = ErrorStatus[statusString] + || ErrorStatus[ErrorStatus.GeneralError]; + + if (pdu.errorIndex <= 0 || pdu.errorIndex > pdu.varbinds.length) { + req.responseCb (new RequestFailedError (statusString, statusCode)); + } else { + var oid = pdu.varbinds[pdu.errorIndex - 1].oid; + var error = new RequestFailedError (statusString + ": " + oid, + statusCode); + req.responseCb (error); + } + } else { + req.feedCb (req, message); + } +}; + +Session.prototype.registerRequest = function (req) { + if (! this.reqs[req.id]) { + this.reqs[req.id] = req; + if (this.reqCount <= 0) + this.dgram.ref(); + this.reqCount++; + } + var me = this; + req.timer = setTimeout (function () { + if (req.retries-- > 0) { + me.send (req); + } else { + me.unregisterRequest (req.id); + req.responseCb (new RequestTimedOutError ( + "Request timed out")); + } + }, req.timeout); +}; + +Session.prototype.send = function (req, noWait) { + try { + var me = this; + + var buffer = req.message.toBuffer (); + + this.dgram.send (buffer, 0, buffer.length, req.port, this.target, + function (error, bytes) { + if (error) { + req.responseCb (error); + } else { + if (noWait) { + req.responseCb (null); + } else { + me.registerRequest (req); + } + } + }); + } catch (error) { + req.responseCb (error); + } + + return this; +}; + +Session.prototype.set = function (varbinds, responseCb) { + function feedCb (req, message) { + var pdu = message.pdu; + var varbinds = []; + + if (req.message.pdu.varbinds.length != pdu.varbinds.length) { + req.responseCb (new ResponseInvalidError ("Requested OIDs do not " + + "match response OIDs")); + } else { + for (var i = 0; i < req.message.pdu.varbinds.length; i++) { + if (req.message.pdu.varbinds[i].oid != pdu.varbinds[i].oid) { + req.responseCb (new ResponseInvalidError ("OID '" + + req.message.pdu.varbinds[i].oid + + "' in request at positiion '" + i + "' does not " + + "match OID '" + pdu.varbinds[i].oid + "' in response " + + "at position '" + i + "'")); + return; + } else { + varbinds.push (pdu.varbinds[i]); + } + } + + req.responseCb (null, varbinds); + } + } + + var pduVarbinds = []; + + for (var i = 0; i < varbinds.length; i++) { + var varbind = { + oid: varbinds[i].oid, + type: varbinds[i].type, + value: varbinds[i].value + }; + pduVarbinds.push (varbind); + } + + this.simpleGet (SetRequestPdu, feedCb, pduVarbinds, responseCb); + + return this; +}; + +Session.prototype.simpleGet = function (pduClass, feedCb, varbinds, + responseCb, options) { + var req = {}; + + try { + var id = _generateId (this.idBitsSize); + var pdu = new pduClass (id, varbinds, options); + var message = new RequestMessage (this.version, this.community, pdu); + + req = { + id: id, + message: message, + responseCb: responseCb, + retries: this.retries, + timeout: this.timeout, + onResponse: this.onSimpleGetResponse, + feedCb: feedCb, + port: (options && options.port) ? options.port : this.port + }; + + this.send (req); + } catch (error) { + if (req.responseCb) + req.responseCb (error); + } +}; + +function subtreeCb (req, varbinds) { + var done = 0; + + for (var i = varbinds.length; i > 0; i--) { + if (! oidInSubtree (req.baseOid, varbinds[i - 1].oid)) { + done = 1; + varbinds.pop (); + } + } + + if (varbinds.length > 0) + req.feedCb (varbinds); + + if (done) + return true; +} + +Session.prototype.subtree = function () { + var me = this; + var oid = arguments[0]; + var maxRepetitions, feedCb, doneCb; + + if (arguments.length < 4) { + maxRepetitions = 20; + feedCb = arguments[1]; + doneCb = arguments[2]; + } else { + maxRepetitions = arguments[1]; + feedCb = arguments[2]; + doneCb = arguments[3]; + } + + var req = { + feedCb: feedCb, + doneCb: doneCb, + maxRepetitions: maxRepetitions, + baseOid: oid + }; + + this.walk (oid, maxRepetitions, subtreeCb.bind (me, req), doneCb); + + return this; +}; + +function tableColumnsResponseCb (req, error) { + if (error) { + req.responseCb (error); + } else if (req.error) { + req.responseCb (req.error); + } else { + if (req.columns.length > 0) { + var column = req.columns.pop (); + var me = this; + this.subtree (req.rowOid + column, req.maxRepetitions, + tableColumnsFeedCb.bind (me, req), + tableColumnsResponseCb.bind (me, req)); + } else { + req.responseCb (null, req.table); + } + } +} + +function tableColumnsFeedCb (req, varbinds) { + for (var i = 0; i < varbinds.length; i++) { + if (isVarbindError (varbinds[i])) { + req.error = new RequestFailedError (varbindError (varbind[i])); + return true; + } + + var oid = varbinds[i].oid.replace (req.rowOid, ""); + if (oid && oid != varbinds[i].oid) { + var match = oid.match (/^(\d+)\.(.+)$/); + if (match && match[1] > 0) { + if (! req.table[match[2]]) + req.table[match[2]] = {}; + req.table[match[2]][match[1]] = varbinds[i].value; + } + } + } +} + +Session.prototype.tableColumns = function () { + var me = this; + + var oid = arguments[0]; + var columns = arguments[1]; + var maxRepetitions, responseCb; + + if (arguments.length < 4) { + responseCb = arguments[2]; + maxRepetitions = 20; + } else { + maxRepetitions = arguments[2]; + responseCb = arguments[3]; + } + + var req = { + responseCb: responseCb, + maxRepetitions: maxRepetitions, + baseOid: oid, + rowOid: oid + ".1.", + columns: columns.slice(0), + table: {} + }; + + if (req.columns.length > 0) { + var column = req.columns.pop (); + this.subtree (req.rowOid + column, maxRepetitions, + tableColumnsFeedCb.bind (me, req), + tableColumnsResponseCb.bind (me, req)); + } + + return this; +}; + +function tableResponseCb (req, error) { + if (error) + req.responseCb (error); + else if (req.error) + req.responseCb (req.error); + else + req.responseCb (null, req.table); +} + +function tableFeedCb (req, varbinds) { + for (var i = 0; i < varbinds.length; i++) { + if (isVarbindError (varbinds[i])) { + req.error = new RequestFailedError (varbindError (varbind[i])); + return true; + } + + var oid = varbinds[i].oid.replace (req.rowOid, ""); + if (oid && oid != varbinds[i].oid) { + var match = oid.match (/^(\d+)\.(.+)$/); + if (match && match[1] > 0) { + if (! req.table[match[2]]) + req.table[match[2]] = {}; + req.table[match[2]][match[1]] = varbinds[i].value; + } + } + } +} + +Session.prototype.table = function () { + var me = this; + + var oid = arguments[0]; + var maxRepetitions, responseCb; + + if (arguments.length < 3) { + responseCb = arguments[1]; + maxRepetitions = 20; + } else { + maxRepetitions = arguments[1]; + responseCb = arguments[2]; + } + + var req = { + responseCb: responseCb, + maxRepetitions: maxRepetitions, + baseOid: oid, + rowOid: oid + ".1.", + table: {} + }; + + this.subtree (oid, maxRepetitions, tableFeedCb.bind (me, req), + tableResponseCb.bind (me, req)); + + return this; +}; + +Session.prototype.trap = function () { + var req = {}; + + try { + var typeOrOid = arguments[0]; + var varbinds, options = {}, responseCb; + + /** + ** Support the following signatures: + ** + ** typeOrOid, varbinds, options, callback + ** typeOrOid, varbinds, agentAddr, callback + ** typeOrOid, varbinds, callback + ** typeOrOid, agentAddr, callback + ** typeOrOid, options, callback + ** typeOrOid, callback + **/ + if (arguments.length >= 4) { + varbinds = arguments[1]; + if (typeof arguments[2] == "string") { + options.agentAddr = arguments[2]; + } else if (arguments[2].constructor != Array) { + options = arguments[2]; + } + responseCb = arguments[3]; + } else if (arguments.length >= 3) { + if (typeof arguments[1] == "string") { + varbinds = []; + options.agentAddr = arguments[1]; + } else if (arguments[1].constructor != Array) { + varbinds = []; + options = arguments[1]; + } else { + varbinds = arguments[1]; + agentAddr = null; + } + responseCb = arguments[2]; + } else { + varbinds = []; + responseCb = arguments[1]; + } + + var pdu, pduVarbinds = []; + + for (var i = 0; i < varbinds.length; i++) { + var varbind = { + oid: varbinds[i].oid, + type: varbinds[i].type, + value: varbinds[i].value + }; + pduVarbinds.push (varbind); + } + + var id = _generateId (this.idBitsSize); + + if (this.version == Version2c) { + if (typeof typeOrOid != "string") + typeOrOid = "1.3.6.1.6.3.1.1.5." + (typeOrOid + 1); + + pduVarbinds.unshift ( + { + oid: "1.3.6.1.2.1.1.3.0", + type: ObjectType.TimeTicks, + value: options.upTime || Math.floor (process.uptime () * 100) + }, + { + oid: "1.3.6.1.6.3.1.1.4.1.0", + type: ObjectType.OID, + value: typeOrOid + } + ); + + pdu = new TrapV2Pdu (id, pduVarbinds, options); + } else { + pdu = new TrapPdu (typeOrOid, pduVarbinds, options); + } + + var message = new RequestMessage (this.version, this.community, pdu); + + req = { + id: id, + message: message, + responseCb: responseCb, + port: this.trapPort + }; + + this.send (req, true); + } catch (error) { + if (req.responseCb) + req.responseCb (error); + } + + return this; +}; + +Session.prototype.unregisterRequest = function (id) { + var req = this.reqs[id]; + if (req) { + delete this.reqs[id]; + clearTimeout (req.timer); + delete req.timer; + this.reqCount--; + if (this.reqCount <= 0) + this.dgram.unref(); + return req; + } else { + return null; + } +}; + +function walkCb (req, error, varbinds) { + var done = 0; + var oid; + + if (error) { + if (error instanceof RequestFailedError) { + if (error.status != ErrorStatus.NoSuchName) { + req.doneCb (error); + return; + } else { + // signal the version 1 walk code below that it should stop + done = 1; + } + } else { + req.doneCb (error); + return; + } + } + + if (this.version == Version2c) { + for (var i = varbinds[0].length; i > 0; i--) { + if (varbinds[0][i - 1].type == ObjectType.EndOfMibView) { + varbinds[0].pop (); + done = 1; + } + } + if (req.feedCb (varbinds[0])) + done = 1; + if (! done) + oid = varbinds[0][varbinds[0].length - 1].oid; + } else { + if (! done) { + if (req.feedCb (varbinds)) { + done = 1; + } else { + oid = varbinds[0].oid; + } + } + } + + if (done) + req.doneCb (null); + else + this.walk (oid, req.maxRepetitions, req.feedCb, req.doneCb, + req.baseOid); +} + +Session.prototype.walk = function () { + var me = this; + var oid = arguments[0]; + var maxRepetitions, feedCb, doneCb, baseOid; + + if (arguments.length < 4) { + maxRepetitions = 20; + feedCb = arguments[1]; + doneCb = arguments[2]; + } else { + maxRepetitions = arguments[1]; + feedCb = arguments[2]; + doneCb = arguments[3]; + } + + var req = { + maxRepetitions: maxRepetitions, + feedCb: feedCb, + doneCb: doneCb + }; + + if (this.version == Version2c) + this.getBulk ([oid], 0, maxRepetitions, + walkCb.bind (me, req)); + else + this.getNext ([oid], walkCb.bind (me, req)); + + return this; +}; + +/***************************************************************************** + ** Exports + **/ + +exports.Session = Session; + +exports.createSession = function (target, community, options) { + return new Session (target, community, options); +}; + +exports.isVarbindError = isVarbindError; +exports.varbindError = varbindError; + +exports.Version1 = Version1; +exports.Version2c = Version2c; + +exports.ErrorStatus = ErrorStatus; +exports.TrapType = TrapType; +exports.ObjectType = ObjectType; + +exports.ResponseInvalidError = ResponseInvalidError; +exports.RequestInvalidError = RequestInvalidError; +exports.RequestFailedError = RequestFailedError; +exports.RequestTimedOutError = RequestTimedOutError; + +/** + ** We've added this for testing. + **/ +exports.ObjectParser = { + readInt: readInt, + readUint: readUint +}; diff --git a/collectors/node.d.plugin/node_modules/netdata.js b/collectors/node.d.plugin/node_modules/netdata.js new file mode 100644 index 000000000..603922c6e --- /dev/null +++ b/collectors/node.d.plugin/node_modules/netdata.js @@ -0,0 +1,654 @@ +'use strict'; + +// netdata +// real-time performance and health monitoring, done right! +// (C) 2016 Costa Tsaousis +// SPDX-License-Identifier: GPL-3.0-or-later + +var url = require('url'); +var http = require('http'); +var util = require('util'); + +/* +var netdata = require('netdata'); + +var example_chart = { + id: 'id', // the unique id of the chart + name: 'name', // the name of the chart + title: 'title', // the title of the chart + units: 'units', // the units of the chart dimensions + family: 'family', // the family of the chart + context: 'context', // the context of the chart + type: netdata.chartTypes.line, // the type of the chart + priority: 0, // the priority relative to others in the same family + update_every: 1, // the expected update frequency of the chart + dimensions: { + 'dim1': { + id: 'dim1', // the unique id of the dimension + name: 'name', // the name of the dimension + algorithm: netdata.chartAlgorithms.absolute, // the id of the netdata algorithm + multiplier: 1, // the multiplier + divisor: 1, // the divisor + hidden: false, // is hidden (boolean) + }, + 'dim2': { + id: 'dim2', // the unique id of the dimension + name: 'name', // the name of the dimension + algorithm: 'absolute', // the id of the netdata algorithm + multiplier: 1, // the multiplier + divisor: 1, // the divisor + hidden: false, // is hidden (boolean) + } + // add as many dimensions as needed + } +}; +*/ + +var netdata = { + options: { + filename: __filename, + DEBUG: false, + update_every: 1 + }, + + chartAlgorithms: { + incremental: 'incremental', + absolute: 'absolute', + percentage_of_absolute_row: 'percentage-of-absolute-row', + percentage_of_incremental_row: 'percentage-of-incremental-row' + }, + + chartTypes: { + line: 'line', + area: 'area', + stacked: 'stacked' + }, + + services: new Array(), + modules_configuring: 0, + charts: {}, + + processors: { + http: { + name: 'http', + + process: function(service, callback) { + var __DEBUG = netdata.options.DEBUG; + + if(__DEBUG === true) + netdata.debug(service.module.name + ': ' + service.name + ': making ' + this.name + ' request: ' + netdata.stringify(service.request)); + + var req = http.request(service.request, function(response) { + if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': got server response...'); + + var end = false; + var data = ''; + response.setEncoding('utf8'); + + if(response.statusCode !== 200) { + if(end === false) { + service.error('Got HTTP code ' + response.statusCode + ', failed to get data.'); + end = true; + return callback(null); + } + } + + response.on('data', function(chunk) { + if(end === false) data += chunk; + }); + + response.on('error', function() { + if(end === false) { + service.error(': Read error, failed to get data.'); + end = true; + return callback(null); + } + }); + + response.on('end', function() { + if(end === false) { + if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': read completed.'); + end = true; + return callback(data); + } + }); + }); + + req.on('error', function(e) { + if(__DEBUG === true) netdata.debug('Failed to make request: ' + netdata.stringify(service.request) + ', message: ' + e.message); + service.error('Failed to make request, message: ' + e.message); + return callback(null); + }); + + // write data to request body + if(typeof service.postData !== 'undefined' && service.request.method === 'POST') { + if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': posting data: ' + service.postData); + req.write(service.postData); + } + + req.end(); + } + } + }, + + stringify: function(obj) { + return util.inspect(obj, {depth: 10}); + }, + + zeropad2: function(s) { + return ("00" + s).slice(-2); + }, + + logdate: function(d) { + if(typeof d === 'undefined') d = new Date(); + return d.getFullYear().toString() + '-' + this.zeropad2(d.getMonth() + 1) + '-' + this.zeropad2(d.getDate()) + + ' ' + this.zeropad2(d.getHours()) + ':' + this.zeropad2(d.getMinutes()) + ':' + this.zeropad2(d.getSeconds()); + }, + + // show debug info, if debug is enabled + debug: function(msg) { + if(this.options.DEBUG === true) { + console.error(this.logdate() + ': ' + netdata.options.filename + ': DEBUG: ' + ((typeof(msg) === 'object')?netdata.stringify(msg):msg).toString()); + } + }, + + // log an error + error: function(msg) { + console.error(this.logdate() + ': ' + netdata.options.filename + ': ERROR: ' + ((typeof(msg) === 'object')?netdata.stringify(msg):msg).toString()); + }, + + // send data to netdata + send: function(msg) { + console.log(msg.toString()); + }, + + service: function(service) { + if(typeof service === 'undefined') + service = {}; + + var now = Date.now(); + + service._current_chart = null; // the current chart we work on + service._queue = ''; // data to be sent to netdata + + service.error_reported = false; // error log flood control + + service.added = false; // added to netdata.services + service.enabled = true; + service.updates = 0; + service.running = false; + service.started = 0; + service.ended = 0; + + if(typeof service.module === 'undefined') { + service.module = { name: 'not-defined-module' }; + service.error('Attempted to create service without a module.'); + service.enabled = false; + } + + if(typeof service.name === 'undefined') { + service.name = 'unnamed@' + service.module.name + '/' + now; + } + + if(typeof service.processor === 'undefined') + service.processor = netdata.processors.http; + + if(typeof service.update_every === 'undefined') + service.update_every = service.module.update_every; + + if(typeof service.update_every === 'undefined') + service.update_every = netdata.options.update_every; + + if(service.update_every < netdata.options.update_every) + service.update_every = netdata.options.update_every; + + // align the runs + service.next_run = now - (now % (service.update_every * 1000)) + (service.update_every * 1000); + + service.commit = function() { + if(this.added !== true) { + this.added = true; + + var now = Date.now(); + this.next_run = now - (now % (service.update_every * 1000)) + (service.update_every * 1000); + + netdata.services.push(this); + if(netdata.options.DEBUG === true) netdata.debug(this.module.name + ': ' + this.name + ': service committed.'); + } + }; + + service.execute = function(responseProcessor) { + var __DEBUG = netdata.options.DEBUG; + + if(service.enabled === false) + return responseProcessor(null); + + this.module.active++; + this.running = true; + this.started = Date.now(); + this.updates++; + + if(__DEBUG === true) + netdata.debug(this.module.name + ': ' + this.name + ': making ' + this.processor.name + ' request: ' + netdata.stringify(this)); + + this.processor.process(this, function(response) { + service.ended = Date.now(); + service.duration = service.ended - service.started; + + if(typeof response === 'undefined') + response = null; + + if(response !== null) + service.errorClear(); + + if(__DEBUG === true) + netdata.debug(service.module.name + ': ' + service.name + ': processing ' + service.processor.name + ' response (received in ' + (service.ended - service.started).toString() + ' ms)'); + + try { + responseProcessor(service, response); + } + catch(e) { + netdata.error(e); + service.error("responseProcessor failed process response data."); + } + + service.running = false; + service.module.active--; + if(service.module.active < 0) { + service.module.active = 0; + if(__DEBUG === true) + netdata.debug(service.module.name + ': active module counter below zero.'); + } + + if(service.module.active === 0) { + // check if we run under configure + if(service.module.configure_callback !== null) { + if(__DEBUG === true) + netdata.debug(service.module.name + ': configuration finish callback called from processResponse().'); + + var configure_callback = service.module.configure_callback; + service.module.configure_callback = null; + configure_callback(); + } + } + }); + }; + + service.update = function() { + if(netdata.options.DEBUG === true) + netdata.debug(this.module.name + ': ' + this.name + ': starting data collection...'); + + this.module.update(this, function() { + if(netdata.options.DEBUG === true) + netdata.debug(service.module.name + ': ' + service.name + ': data collection ended in ' + service.duration.toString() + ' ms.'); + }); + }; + + service.error = function(message) { + if(this.error_reported === false) { + netdata.error(this.module.name + ': ' + this.name + ': ' + message); + this.error_reported = true; + } + else if(netdata.options.DEBUG === true) + netdata.debug(this.module.name + ': ' + this.name + ': ' + message); + }; + + service.errorClear = function() { + this.error_reported = false; + }; + + service.queue = function(txt) { + this._queue += txt + '\n'; + }; + + service._send_chart_to_netdata = function(chart) { + // internal function to send a chart to netdata + this.queue('CHART "' + chart.id + '" "' + chart.name + '" "' + chart.title + '" "' + chart.units + '" "' + chart.family + '" "' + chart.context + '" "' + chart.type + '" ' + chart.priority.toString() + ' ' + chart.update_every.toString()); + + if(typeof(chart.dimensions) !== 'undefined') { + var dims = Object.keys(chart.dimensions); + var len = dims.length; + while(len--) { + var d = chart.dimensions[dims[len]]; + + this.queue('DIMENSION "' + d.id + '" "' + d.name + '" "' + d.algorithm + '" ' + d.multiplier.toString() + ' ' + d.divisor.toString() + ' ' + ((d.hidden === true) ? 'hidden' : '').toString()); + d._created = true; + d._updated = false; + } + } + + chart._created = true; + chart._updated = false; + }; + + // begin data collection for a chart + service.begin = function(chart) { + if(this._current_chart !== null && this._current_chart !== chart) { + this.error('Called begin() for chart ' + chart.id + ' while chart ' + this._current_chart.id + ' is still open. Closing it.'); + this.end(); + } + + if(typeof(chart.id) === 'undefined' || netdata.charts[chart.id] !== chart) { + this.error('Called begin() for chart ' + chart.id + ' that is not mine. Where did you find it? Ignoring it.'); + return false; + } + + if(netdata.options.DEBUG === true) netdata.debug('setting current chart to ' + chart.id); + this._current_chart = chart; + this._current_chart._began = true; + + if(this._current_chart._dimensions_count !== 0) { + if(this._current_chart._created === false || this._current_chart._updated === true) + this._send_chart_to_netdata(this._current_chart); + + var now = this.ended; + this.queue('BEGIN ' + this._current_chart.id + ' ' + ((this._current_chart._last_updated > 0)?((now - this._current_chart._last_updated) * 1000):'').toString()); + } + // else this.error('Called begin() for chart ' + chart.id + ' which is empty.'); + + this._current_chart._last_updated = now; + this._current_chart._began = true; + this._current_chart._counter++; + + return true; + }; + + // set a collected value for a chart + // we do most things on the first value we attempt to set + service.set = function(dimension, value) { + if(this._current_chart === null) { + this.error('Called set(' + dimension + ', ' + value + ') without an open chart.'); + return false; + } + + if(typeof(this._current_chart.dimensions[dimension]) === 'undefined') { + this.error('Called set(' + dimension + ', ' + value + ') but dimension "' + dimension + '" does not exist in chart "' + this._current_chart.id + '".'); + return false; + } + + if(typeof value === 'undefined' || value === null) + return false; + + if(this._current_chart._dimensions_count !== 0) + this.queue('SET ' + dimension + ' = ' + value.toString()); + + return true; + }; + + // end data collection for the current chart - after calling begin() + service.end = function() { + if(this._current_chart !== null && this._current_chart._began === false) { + this.error('Called end() without an open chart.'); + return false; + } + + if(this._current_chart._dimensions_count !== 0) { + this.queue('END'); + netdata.send(this._queue); + } + + this._queue = ''; + this._current_chart._began = false; + if(netdata.options.DEBUG === true) netdata.debug('sent chart ' + this._current_chart.id); + this._current_chart = null; + return true; + }; + + // discard the collected values for the current chart - after calling begin() + service.flush = function() { + if(this._current_chart === null || this._current_chart._began === false) { + this.error('Called flush() without an open chart.'); + return false; + } + + this._queue = ''; + this._current_chart._began = false; + this._current_chart = null; + return true; + }; + + // create a netdata chart + service.chart = function(id, chart) { + var __DEBUG = netdata.options.DEBUG; + + if(typeof(netdata.charts[id]) === 'undefined') { + netdata.charts[id] = { + _created: false, + _updated: true, + _began: false, + _counter: 0, + _last_updated: 0, + _dimensions_count: 0, + id: id, + name: id, + title: 'untitled chart', + units: 'a unit', + family: '', + context: '', + type: netdata.chartTypes.line, + priority: 50000, + update_every: netdata.options.update_every, + dimensions: {} + }; + } + + var c = netdata.charts[id]; + + if(typeof(chart.name) !== 'undefined' && chart.name !== c.name) { + if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its name'); + c.name = chart.name; + c._updated = true; + } + + if(typeof(chart.title) !== 'undefined' && chart.title !== c.title) { + if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its title'); + c.title = chart.title; + c._updated = true; + } + + if(typeof(chart.units) !== 'undefined' && chart.units !== c.units) { + if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its units'); + c.units = chart.units; + c._updated = true; + } + + if(typeof(chart.family) !== 'undefined' && chart.family !== c.family) { + if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its family'); + c.family = chart.family; + c._updated = true; + } + + if(typeof(chart.context) !== 'undefined' && chart.context !== c.context) { + if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its context'); + c.context = chart.context; + c._updated = true; + } + + if(typeof(chart.type) !== 'undefined' && chart.type !== c.type) { + if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its type'); + c.type = chart.type; + c._updated = true; + } + + if(typeof(chart.priority) !== 'undefined' && chart.priority !== c.priority) { + if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its priority'); + c.priority = chart.priority; + c._updated = true; + } + + if(typeof(chart.update_every) !== 'undefined' && chart.update_every !== c.update_every) { + if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its update_every from ' + c.update_every + ' to ' + chart.update_every); + c.update_every = chart.update_every; + c._updated = true; + } + + if(typeof(chart.dimensions) !== 'undefined') { + var dims = Object.keys(chart.dimensions); + var len = dims.length; + while(len--) { + var x = dims[len]; + + if(typeof(c.dimensions[x]) === 'undefined') { + c._dimensions_count++; + + c.dimensions[x] = { + _created: false, + _updated: false, + id: x, // the unique id of the dimension + name: x, // the name of the dimension + algorithm: netdata.chartAlgorithms.absolute, // the id of the netdata algorithm + multiplier: 1, // the multiplier + divisor: 1, // the divisor + hidden: false // is hidden (boolean) + }; + + if(__DEBUG === true) netdata.debug('chart ' + id + ' created dimension ' + x); + c._updated = true; + } + + var dim = chart.dimensions[x]; + var d = c.dimensions[x]; + + if(typeof(dim.name) !== 'undefined' && d.name !== dim.name) { + if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its name'); + d.name = dim.name; + d._updated = true; + } + + if(typeof(dim.algorithm) !== 'undefined' && d.algorithm !== dim.algorithm) { + if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its algorithm from ' + d.algorithm + ' to ' + dim.algorithm); + d.algorithm = dim.algorithm; + d._updated = true; + } + + if(typeof(dim.multiplier) !== 'undefined' && d.multiplier !== dim.multiplier) { + if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its multiplier'); + d.multiplier = dim.multiplier; + d._updated = true; + } + + if(typeof(dim.divisor) !== 'undefined' && d.divisor !== dim.divisor) { + if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its divisor'); + d.divisor = dim.divisor; + d._updated = true; + } + + if(typeof(dim.hidden) !== 'undefined' && d.hidden !== dim.hidden) { + if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its hidden status'); + d.hidden = dim.hidden; + d._updated = true; + } + + if(d._updated) c._updated = true; + } + } + + //if(netdata.options.DEBUG === true) netdata.debug(netdata.charts); + return netdata.charts[id]; + }; + + return service; + }, + + runAllServices: function() { + if(netdata.options.DEBUG === true) netdata.debug('runAllServices()'); + + var now = Date.now(); + var len = netdata.services.length; + while(len--) { + var service = netdata.services[len]; + + if(service.enabled === false || service.running === true) continue; + if(now <= service.next_run) continue; + + service.update(); + + now = Date.now(); + service.next_run = now - (now % (service.update_every * 1000)) + (service.update_every * 1000); + } + + // 1/10th of update_every in pause + setTimeout(netdata.runAllServices, netdata.options.update_every * 100); + }, + + start: function() { + if(netdata.options.DEBUG === true) this.debug('started, services: ' + netdata.stringify(this.services)); + + if(this.services.length === 0) { + this.disableNodePlugin(); + + // eslint suggested way to exit + var exit = process.exit; + exit(1); + } + else this.runAllServices(); + }, + + // disable the whole node.js plugin + disableNodePlugin: function() { + this.send('DISABLE'); + + // eslint suggested way to exit + var exit = process.exit; + exit(1); + }, + + requestFromParams: function(protocol, hostname, port, path, method) { + return { + protocol: protocol, + hostname: hostname, + port: port, + path: path, + //family: 4, + method: method, + headers: { + 'Content-Type': 'application/x-www-form-urlencoded', + 'Connection': 'keep-alive' + }, + agent: new http.Agent({ + keepAlive: true, + keepAliveMsecs: netdata.options.update_every * 1000, + maxSockets: 2, // it must be 2 to work + maxFreeSockets: 1 + }) + }; + }, + + requestFromURL: function(a_url) { + var u = url.parse(a_url); + return netdata.requestFromParams(u.protocol, u.hostname, u.port, u.path, 'GET'); + }, + + configure: function(module, config, callback) { + if(netdata.options.DEBUG === true) this.debug(module.name + ': configuring (update_every: ' + this.options.update_every + ')...'); + + module.active = 0; + module.update_every = this.options.update_every; + + if(typeof config.update_every !== 'undefined') + module.update_every = config.update_every; + + module.enable_autodetect = (config.enable_autodetect)?true:false; + + if(typeof(callback) === 'function') + module.configure_callback = callback; + else + module.configure_callback = null; + + var added = module.configure(config); + + if(netdata.options.DEBUG === true) this.debug(module.name + ': configured, reporting ' + added + ' eligible services.'); + + if(module.configure_callback !== null && added === 0) { + if(netdata.options.DEBUG === true) this.debug(module.name + ': configuration finish callback called from configure().'); + var configure_callback = module.configure_callback; + module.configure_callback = null; + configure_callback(); + } + + return added; + } +}; + +if(netdata.options.DEBUG === true) netdata.debug('loaded netdata from:', __filename); +module.exports = netdata; diff --git a/collectors/node.d.plugin/node_modules/pixl-xml.js b/collectors/node.d.plugin/node_modules/pixl-xml.js new file mode 100644 index 000000000..48de89e77 --- /dev/null +++ b/collectors/node.d.plugin/node_modules/pixl-xml.js @@ -0,0 +1,607 @@ +// SPDX-License-Identifier: MIT +/* + JavaScript XML Library + Plus a bunch of object utility functions + + Usage: + var XML = require('pixl-xml'); + var myxmlstring = '' + + 'Hello' + + 'Content' + + ''; + + var tree = XML.parse( myxmlstring, { preserveAttributes: true }); + console.log( tree ); + + tree.Simple = "Hello2"; + tree.Node._Attribs.Key = "Value2"; + tree.Node._Data = "Content2"; + tree.New = "I added this"; + + console.log( XML.stringify( tree, 'Document' ) ); + + Copyright (c) 2004 - 2015 Joseph Huckaby + Released under the MIT License + This version is for Node.JS, converted in 2012. +*/ + +var fs = require('fs'); + +var indent_string = "\t"; +var xml_header = ''; +var sort_args = null; +var re_valid_tag_name = /^\w[\w\-\:]*$/; + +var XML = exports.XML = function XML(args) { + // class constructor for XML parser class + // pass in args hash or text to parse + if (!args) args = ''; + if (isa_hash(args)) { + for (var key in args) this[key] = args[key]; + } + else this.text = args || ''; + + // stringify buffers + if (this.text instanceof Buffer) { + this.text = this.text.toString(); + } + + if (!this.text.match(/^\s*]+)>/g; +XML.prototype.patSpecialTag = /^\s*([\!\?])/; +XML.prototype.patPITag = /^\s*\?/; +XML.prototype.patCommentTag = /^\s*\!--/; +XML.prototype.patDTDTag = /^\s*\!DOCTYPE/; +XML.prototype.patCDATATag = /^\s*\!\s*\[\s*CDATA/; +XML.prototype.patStandardTag = /^\s*(\/?)([\w\-\:\.]+)\s*(.*)$/; +XML.prototype.patSelfClosing = /\/\s*$/; +XML.prototype.patAttrib = new RegExp("([\\w\\-\\:\\.]+)\\s*=\\s*([\\\"\\'])([^\\2]*?)\\2", "g"); +XML.prototype.patPINode = /^\s*\?\s*([\w\-\:]+)\s*(.*)$/; +XML.prototype.patEndComment = /--$/; +XML.prototype.patNextClose = /([^>]*?)>/g; +XML.prototype.patExternalDTDNode = new RegExp("^\\s*\\!DOCTYPE\\s+([\\w\\-\\:]+)\\s+(SYSTEM|PUBLIC)\\s+\\\"([^\\\"]+)\\\""); +XML.prototype.patInlineDTDNode = /^\s*\!DOCTYPE\s+([\w\-\:]+)\s+\[/; +XML.prototype.patEndDTD = /\]$/; +XML.prototype.patDTDNode = /^\s*\!DOCTYPE\s+([\w\-\:]+)\s+\[(.*)\]/; +XML.prototype.patEndCDATA = /\]\]$/; +XML.prototype.patCDATANode = /^\s*\!\s*\[\s*CDATA\s*\[([^]*)\]\]/; + +XML.prototype.attribsKey = '_Attribs'; +XML.prototype.dataKey = '_Data'; + +XML.prototype.parse = function(branch, name) { + // parse text into XML tree, recurse for nested nodes + if (!branch) branch = this.tree; + if (!name) name = null; + var foundClosing = false; + var matches = null; + + // match each tag, plus preceding text + while ( matches = this.patTag.exec(this.text) ) { + var before = matches[1]; + var tag = matches[2]; + + // text leading up to tag = content of parent node + if (before.match(/\S/)) { + if (typeof(branch[this.dataKey]) != 'undefined') branch[this.dataKey] += ' '; else branch[this.dataKey] = ''; + branch[this.dataKey] += trim(decode_entities(before)); + } + + // parse based on tag type + if (tag.match(this.patSpecialTag)) { + // special tag + if (tag.match(this.patPITag)) tag = this.parsePINode(tag); + else if (tag.match(this.patCommentTag)) tag = this.parseCommentNode(tag); + else if (tag.match(this.patDTDTag)) tag = this.parseDTDNode(tag); + else if (tag.match(this.patCDATATag)) { + tag = this.parseCDATANode(tag); + if (typeof(branch[this.dataKey]) != 'undefined') branch[this.dataKey] += ' '; else branch[this.dataKey] = ''; + branch[this.dataKey] += trim(decode_entities(tag)); + } // cdata + else { + this.throwParseError( "Malformed special tag", tag ); + break; + } // error + + if (tag == null) break; + continue; + } // special tag + else { + // Tag is standard, so parse name and attributes (if any) + var matches = tag.match(this.patStandardTag); + if (!matches) { + this.throwParseError( "Malformed tag", tag ); + break; + } + + var closing = matches[1]; + var nodeName = this.lowerCase ? matches[2].toLowerCase() : matches[2]; + var attribsRaw = matches[3]; + + // If this is a closing tag, make sure it matches its opening tag + if (closing) { + if (nodeName == (name || '')) { + foundClosing = 1; + break; + } + else { + this.throwParseError( "Mismatched closing tag (expected )", tag ); + break; + } + } // closing tag + else { + // Not a closing tag, so parse attributes into hash. If tag + // is self-closing, no recursive parsing is needed. + var selfClosing = !!attribsRaw.match(this.patSelfClosing); + var leaf = {}; + var attribs = leaf; + + // preserve attributes means they go into a sub-hash named "_Attribs" + // the XML composer honors this for restoring the tree back into XML + if (this.preserveAttributes) { + leaf[this.attribsKey] = {}; + attribs = leaf[this.attribsKey]; + } + + // parse attributes + this.patAttrib.lastIndex = 0; + while ( matches = this.patAttrib.exec(attribsRaw) ) { + var key = this.lowerCase ? matches[1].toLowerCase() : matches[1]; + attribs[ key ] = decode_entities( matches[3] ); + } // foreach attrib + + // if no attribs found, but we created the _Attribs subhash, clean it up now + if (this.preserveAttributes && !num_keys(attribs)) { + delete leaf[this.attribsKey]; + } + + // Recurse for nested nodes + if (!selfClosing) { + this.parse( leaf, nodeName ); + if (this.error()) break; + } + + // Compress into simple node if text only + var num_leaf_keys = num_keys(leaf); + if ((typeof(leaf[this.dataKey]) != 'undefined') && (num_leaf_keys == 1)) { + leaf = leaf[this.dataKey]; + } + else if (!num_leaf_keys) { + leaf = ''; + } + + // Add leaf to parent branch + if (typeof(branch[nodeName]) != 'undefined') { + if (isa_array(branch[nodeName])) { + branch[nodeName].push( leaf ); + } + else { + var temp = branch[nodeName]; + branch[nodeName] = [ temp, leaf ]; + } + } + else { + branch[nodeName] = leaf; + } + + if (this.error() || (branch == this.tree)) break; + } // not closing + } // standard tag + } // main reg exp + + // Make sure we found the closing tag + if (name && !foundClosing) { + this.throwParseError( "Missing closing tag (expected )", name ); + } + + // If we are the master node, finish parsing and setup our doc node + if (branch == this.tree) { + if (typeof(this.tree[this.dataKey]) != 'undefined') delete this.tree[this.dataKey]; + + if (num_keys(this.tree) > 1) { + this.throwParseError( 'Only one top-level node is allowed in document', first_key(this.tree) ); + return; + } + + this.documentNodeName = first_key(this.tree); + if (this.documentNodeName) { + this.tree = this.tree[this.documentNodeName]; + } + } +}; + +XML.prototype.throwParseError = function(key, tag) { + // log error and locate current line number in source XML document + var parsedSource = this.text.substring(0, this.patTag.lastIndex); + var eolMatch = parsedSource.match(/\n/g); + var lineNum = (eolMatch ? eolMatch.length : 0) + 1; + lineNum -= tag.match(/\n/) ? tag.match(/\n/g).length : 0; + + this.errors.push({ + type: 'Parse', + key: key, + text: '<' + tag + '>', + line: lineNum + }); + + // Throw actual error (must wrap parse in try/catch) + throw new Error( this.getLastError() ); +}; + +XML.prototype.error = function() { + // return number of errors + return this.errors.length; +}; + +XML.prototype.getError = function(error) { + // get formatted error + var text = ''; + if (!error) return ''; + + text = (error.type || 'General') + ' Error'; + if (error.code) text += ' ' + error.code; + text += ': ' + error.key; + + if (error.line) text += ' on line ' + error.line; + if (error.text) text += ': ' + error.text; + + return text; +}; + +XML.prototype.getLastError = function() { + // Get most recently thrown error in plain text format + if (!this.error()) return ''; + return this.getError( this.errors[this.errors.length - 1] ); +}; + +XML.prototype.parsePINode = function(tag) { + // Parse Processor Instruction Node, e.g. + if (!tag.match(this.patPINode)) { + this.throwParseError( "Malformed processor instruction", tag ); + return null; + } + + this.piNodeList.push( tag ); + return tag; +}; + +XML.prototype.parseCommentNode = function(tag) { + // Parse Comment Node, e.g. + var matches = null; + this.patNextClose.lastIndex = this.patTag.lastIndex; + + while (!tag.match(this.patEndComment)) { + if (matches = this.patNextClose.exec(this.text)) { + tag += '>' + matches[1]; + } + else { + this.throwParseError( "Unclosed comment tag", tag ); + return null; + } + } + + this.patTag.lastIndex = this.patNextClose.lastIndex; + return tag; +}; + +XML.prototype.parseDTDNode = function(tag) { + // Parse Document Type Descriptor Node, e.g. + var matches = null; + + if (tag.match(this.patExternalDTDNode)) { + // tag is external, and thus self-closing + this.dtdNodeList.push( tag ); + } + else if (tag.match(this.patInlineDTDNode)) { + // Tag is inline, so check for nested nodes. + this.patNextClose.lastIndex = this.patTag.lastIndex; + + while (!tag.match(this.patEndDTD)) { + if (matches = this.patNextClose.exec(this.text)) { + tag += '>' + matches[1]; + } + else { + this.throwParseError( "Unclosed DTD tag", tag ); + return null; + } + } + + this.patTag.lastIndex = this.patNextClose.lastIndex; + + // Make sure complete tag is well-formed, and push onto DTD stack. + if (tag.match(this.patDTDNode)) { + this.dtdNodeList.push( tag ); + } + else { + this.throwParseError( "Malformed DTD tag", tag ); + return null; + } + } + else { + this.throwParseError( "Malformed DTD tag", tag ); + return null; + } + + return tag; +}; + +XML.prototype.parseCDATANode = function(tag) { + // Parse CDATA Node, e.g. + var matches = null; + this.patNextClose.lastIndex = this.patTag.lastIndex; + + while (!tag.match(this.patEndCDATA)) { + if (matches = this.patNextClose.exec(this.text)) { + tag += '>' + matches[1]; + } + else { + this.throwParseError( "Unclosed CDATA tag", tag ); + return null; + } + } + + this.patTag.lastIndex = this.patNextClose.lastIndex; + + if (matches = tag.match(this.patCDATANode)) { + return matches[1]; + } + else { + this.throwParseError( "Malformed CDATA tag", tag ); + return null; + } +}; + +XML.prototype.getTree = function() { + // get reference to parsed XML tree + return this.tree; +}; + +XML.prototype.compose = function() { + // compose tree back into XML + var raw = compose_xml( this.tree, this.documentNodeName ); + var body = raw.substring( raw.indexOf("\n") + 1, raw.length ); + var xml = ''; + + if (this.piNodeList.length) { + for (var idx = 0, len = this.piNodeList.length; idx < len; idx++) { + xml += '<' + this.piNodeList[idx] + '>' + "\n"; + } + } + else { + xml += xml_header + "\n"; + } + + if (this.dtdNodeList.length) { + for (var idx = 0, len = this.dtdNodeList.length; idx < len; idx++) { + xml += '<' + this.dtdNodeList[idx] + '>' + "\n"; + } + } + + xml += body; + return xml; +}; + +// +// Static Utility Functions: +// + +var parse_xml = exports.parse = function parse_xml(text, opts) { + // turn text into XML tree quickly + if (!opts) opts = {}; + opts.text = text; + var parser = new XML(opts); + return parser.error() ? parser.getLastError() : parser.getTree(); +}; + +var trim = exports.trim = function trim(text) { + // strip whitespace from beginning and end of string + if (text == null) return ''; + + if (text && text.replace) { + text = text.replace(/^\s+/, ""); + text = text.replace(/\s+$/, ""); + } + + return text; +}; + +var encode_entities = exports.encodeEntities = function encode_entities(text) { + // Simple entitize exports.for = function for composing XML + if (text == null) return ''; + + if (text && text.replace) { + text = text.replace(/\&/g, "&"); // MUST BE FIRST + text = text.replace(//g, ">"); + } + + return text; +}; + +var encode_attrib_entities = exports.encodeAttribEntities = function encode_attrib_entities(text) { + // Simple entitize exports.for = function for composing XML attributes + if (text == null) return ''; + + if (text && text.replace) { + text = text.replace(/\&/g, "&"); // MUST BE FIRST + text = text.replace(//g, ">"); + text = text.replace(/\"/g, """); + text = text.replace(/\'/g, "'"); + } + + return text; +}; + +var decode_entities = exports.decodeEntities = function decode_entities(text) { + // Decode XML entities into raw ASCII + if (text == null) return ''; + + if (text && text.replace && text.match(/\&/)) { + text = text.replace(/\<\;/g, "<"); + text = text.replace(/\>\;/g, ">"); + text = text.replace(/\"\;/g, '"'); + text = text.replace(/\&apos\;/g, "'"); + text = text.replace(/\&\;/g, "&"); // MUST BE LAST + } + + return text; +}; + +var compose_xml = exports.stringify = function compose_xml(node, name, indent) { + // Compose node into XML including attributes + // Recurse for child nodes + var xml = ""; + + // If this is the root node, set the indent to 0 + // and setup the XML header (PI node) + if (!indent) { + indent = 0; + xml = xml_header + "\n"; + + if (!name) { + // no name provided, assume content is wrapped in it + name = first_key(node); + node = node[name]; + } + } + + // Setup the indent text + var indent_text = ""; + for (var k = 0; k < indent; k++) indent_text += indent_string; + + if ((typeof(node) == 'object') && (node != null)) { + // node is object -- now see if it is an array or hash + if (!node.length) { // what about zero-length array? + // node is hash + xml += indent_text + "<" + name; + + var num_keys = 0; + var has_attribs = 0; + for (var key in node) num_keys++; // there must be a better way... + + if (node["_Attribs"]) { + has_attribs = 1; + var sorted_keys = hash_keys_to_array(node["_Attribs"]).sort(); + for (var idx = 0, len = sorted_keys.length; idx < len; idx++) { + var key = sorted_keys[idx]; + xml += " " + key + "=\"" + encode_attrib_entities(node["_Attribs"][key]) + "\""; + } + } // has attribs + + if (num_keys > has_attribs) { + // has child elements + xml += ">"; + + if (node["_Data"]) { + // simple text child node + xml += encode_entities(node["_Data"]) + "\n"; + } // just text + else { + xml += "\n"; + + var sorted_keys = hash_keys_to_array(node).sort(); + for (var idx = 0, len = sorted_keys.length; idx < len; idx++) { + var key = sorted_keys[idx]; + if ((key != "_Attribs") && key.match(re_valid_tag_name)) { + // recurse for node, with incremented indent value + xml += compose_xml( node[key], key, indent + 1 ); + } // not _Attribs key + } // foreach key + + xml += indent_text + "\n"; + } // real children + } + else { + // no child elements, so self-close + xml += "/>\n"; + } + } // standard node + else { + // node is array + for (var idx = 0; idx < node.length; idx++) { + // recurse for node in array with same indent + xml += compose_xml( node[idx], name, indent ); + } + } // array of nodes + } // complex node + else { + // node is simple string + xml += indent_text + "<" + name + ">" + encode_entities(node) + "\n"; + } // simple text node + + return xml; +}; + +var always_array = exports.alwaysArray = function always_array(obj, key) { + // if object is not array, return array containing object + // if key is passed, work like XMLalwaysarray() instead + if (key) { + if ((typeof(obj[key]) != 'object') || (typeof(obj[key].length) == 'undefined')) { + var temp = obj[key]; + delete obj[key]; + obj[key] = new Array(); + obj[key][0] = temp; + } + return null; + } + else { + if ((typeof(obj) != 'object') || (typeof(obj.length) == 'undefined')) { return [ obj ]; } + else return obj; + } +}; + +var hash_keys_to_array = exports.hashKeysToArray = function hash_keys_to_array(hash) { + // convert hash keys to array (discard values) + var array = []; + for (var key in hash) array.push(key); + return array; +}; + +var isa_hash = exports.isaHash = function isa_hash(arg) { + // determine if arg is a hash + return( !!arg && (typeof(arg) == 'object') && (typeof(arg.length) == 'undefined') ); +}; + +var isa_array = exports.isaArray = function isa_array(arg) { + // determine if arg is an array or is array-like + if (typeof(arg) == 'array') return true; + return( !!arg && (typeof(arg) == 'object') && (typeof(arg.length) != 'undefined') ); +}; + +var first_key = exports.firstKey = function first_key(hash) { + // return first key from hash (unordered) + for (var key in hash) return key; + return null; // no keys in hash +}; + +var num_keys = exports.numKeys = function num_keys(hash) { + // count the number of keys in a hash + var count = 0; + for (var a in hash) count++; + return count; +}; diff --git a/collectors/node.d.plugin/sma_webbox/Makefile.inc b/collectors/node.d.plugin/sma_webbox/Makefile.inc new file mode 100644 index 000000000..38f2fe97a --- /dev/null +++ b/collectors/node.d.plugin/sma_webbox/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_node_DATA += sma_webbox/sma_webbox.node.js +# dist_nodeconfig_DATA += sma_webbox/sma_webbox.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += sma_webbox/README.md sma_webbox/Makefile.inc + diff --git a/collectors/node.d.plugin/sma_webbox/README.md b/collectors/node.d.plugin/sma_webbox/README.md new file mode 100644 index 000000000..1512c7008 --- /dev/null +++ b/collectors/node.d.plugin/sma_webbox/README.md @@ -0,0 +1,25 @@ + +[SMA Sunny Webbox](http://files.sma.de/dl/4253/WEBBOX-DUS131916W.pdf) + +Example netdata configuration for node.d/sma_webbox.conf + +The module supports any number of name servers, like this: + +```json +{ + "enable_autodetect": false, + "update_every": 5, + "servers": [ + { + "name": "plant1", + "hostname": "10.0.1.1", + "update_every": 10 + }, + { + "name": "plant2", + "hostname": "10.0.2.1", + "update_every": 15 + } + ] +} +``` diff --git a/collectors/node.d.plugin/sma_webbox/sma_webbox.node.js b/collectors/node.d.plugin/sma_webbox/sma_webbox.node.js new file mode 100644 index 000000000..b9a168adc --- /dev/null +++ b/collectors/node.d.plugin/sma_webbox/sma_webbox.node.js @@ -0,0 +1,238 @@ +'use strict'; +// SPDX-License-Identifier: GPL-3.0-or-later + +// This program will connect to one or more SMA Sunny Webboxes +// to get the Solar Power Generated (current, today, total). + +// example configuration in /etc/netdata/node.d/sma_webbox.conf +/* +{ + "enable_autodetect": false, + "update_every": 5, + "servers": [ + { + "name": "plant1", + "hostname": "10.0.1.1", + "update_every": 10 + }, + { + "name": "plant2", + "hostname": "10.0.2.1", + "update_every": 15 + } + ] +} +*/ + +require('url'); +require('http'); +var netdata = require('netdata'); + +if(netdata.options.DEBUG === true) netdata.debug('loaded ' + __filename + ' plugin'); + +var webbox = { + name: __filename, + enable_autodetect: true, + update_every: 1, + base_priority: 60000, + charts: {}, + + processResponse: function(service, data) { + if(data !== null) { + var r = JSON.parse(data); + + var d = { + 'GriPwr': { + unit: null, + value: null + }, + 'GriEgyTdy': { + unit: null, + value: null + }, + 'GriEgyTot': { + unit: null, + value: null + } + }; + + // parse the webbox response + // and put it in our d object + var found = 0; + var len = r.result.overview.length; + while(len--) { + var e = r.result.overview[len]; + if(typeof(d[e.meta]) !== 'undefined') { + found++; + d[e.meta].value = e.value; + d[e.meta].unit = e.unit; + } + } + + // add the service + if(found > 0 && service.added !== true) + service.commit(); + + // Grid Current Power Chart + if(d['GriPwr'].value !== null) { + var id = 'smawebbox_' + service.name + '.current'; + var chart = webbox.charts[id]; + + if(typeof chart === 'undefined') { + chart = { + id: id, // the unique id of the chart + name: '', // the unique name of the chart + title: service.name + ' Current Grid Power', // the title of the chart + units: d['GriPwr'].unit, // the units of the chart dimensions + family: 'now', // the family of the chart + context: 'smawebbox.grid_power', // the context of the chart + type: netdata.chartTypes.area, // the type of the chart + priority: webbox.base_priority + 1, // the priority relative to others in the same family + update_every: service.update_every, // the expected update frequency of the chart + dimensions: { + 'GriPwr': { + id: 'GriPwr', // the unique id of the dimension + name: 'power', // the name of the dimension + algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm + multiplier: 1, // the multiplier + divisor: 1, // the divisor + hidden: false // is hidden (boolean) + } + } + }; + + chart = service.chart(id, chart); + webbox.charts[id] = chart; + } + + service.begin(chart); + service.set('GriPwr', Math.round(d['GriPwr'].value)); + service.end(); + } + + if(d['GriEgyTdy'].value !== null) { + var id = 'smawebbox_' + service.name + '.today'; + var chart = webbox.charts[id]; + + if(typeof chart === 'undefined') { + chart = { + id: id, // the unique id of the chart + name: '', // the unique name of the chart + title: service.name + ' Today Grid Power', // the title of the chart + units: d['GriEgyTdy'].unit, // the units of the chart dimensions + family: 'today', // the family of the chart + context: 'smawebbox.grid_power_today', // the context of the chart + type: netdata.chartTypes.area, // the type of the chart + priority: webbox.base_priority + 2, // the priority relative to others in the same family + update_every: service.update_every, // the expected update frequency of the chart + dimensions: { + 'GriEgyTdy': { + id: 'GriEgyTdy', // the unique id of the dimension + name: 'power', // the name of the dimension + algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm + multiplier: 1, // the multiplier + divisor: 1000, // the divisor + hidden: false // is hidden (boolean) + } + } + }; + + chart = service.chart(id, chart); + webbox.charts[id] = chart; + } + + service.begin(chart); + service.set('GriEgyTdy', Math.round(d['GriEgyTdy'].value * 1000)); + service.end(); + } + + if(d['GriEgyTot'].value !== null) { + var id = 'smawebbox_' + service.name + '.total'; + var chart = webbox.charts[id]; + + if(typeof chart === 'undefined') { + chart = { + id: id, // the unique id of the chart + name: '', // the unique name of the chart + title: service.name + ' Total Grid Power', // the title of the chart + units: d['GriEgyTot'].unit, // the units of the chart dimensions + family: 'total', // the family of the chart + context: 'smawebbox.grid_power_total', // the context of the chart + type: netdata.chartTypes.area, // the type of the chart + priority: webbox.base_priority + 3, // the priority relative to others in the same family + update_every: service.update_every, // the expected update frequency of the chart + dimensions: { + 'GriEgyTot': { + id: 'GriEgyTot', // the unique id of the dimension + name: 'power', // the name of the dimension + algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm + multiplier: 1, // the multiplier + divisor: 1000, // the divisor + hidden: false // is hidden (boolean) + } + } + }; + + chart = service.chart(id, chart); + webbox.charts[id] = chart; + } + + service.begin(chart); + service.set('GriEgyTot', Math.round(d['GriEgyTot'].value * 1000)); + service.end(); + } + } + }, + + // module.serviceExecute() + // this function is called only from this module + // its purpose is to prepare the request and call + // netdata.serviceExecute() + serviceExecute: function(name, hostname, update_every) { + if(netdata.options.DEBUG === true) netdata.debug(this.name + ': ' + name + ': hostname: ' + hostname + ', update_every: ' + update_every); + + var service = netdata.service({ + name: name, + request: netdata.requestFromURL('http://' + hostname + '/rpc'), + update_every: update_every, + module: this + }); + service.postData = 'RPC={"proc":"GetPlantOverview","format":"JSON","version":"1.0","id":"1"}'; + service.request.method = 'POST'; + service.request.headers['Content-Length'] = service.postData.length; + + service.execute(this.processResponse); + }, + + configure: function(config) { + var added = 0; + + if(typeof(config.servers) !== 'undefined') { + var len = config.servers.length; + while(len--) { + if(typeof config.servers[len].update_every === 'undefined') + config.servers[len].update_every = this.update_every; + + if(config.servers[len].update_every < 5) + config.servers[len].update_every = 5; + + this.serviceExecute(config.servers[len].name, config.servers[len].hostname, config.servers[len].update_every); + added++; + } + } + + return added; + }, + + // module.update() + // this is called repeatidly to collect data, by calling + // netdata.serviceExecute() + update: function(service, callback) { + service.execute(function(serv, data) { + service.module.processResponse(serv, data); + callback(); + }); + }, +}; + +module.exports = webbox; diff --git a/collectors/node.d.plugin/snmp/Makefile.inc b/collectors/node.d.plugin/snmp/Makefile.inc new file mode 100644 index 000000000..26448a1ce --- /dev/null +++ b/collectors/node.d.plugin/snmp/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_node_DATA += snmp/snmp.node.js +# dist_nodeconfig_DATA += snmp/snmp.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += snmp/README.md snmp/Makefile.inc + diff --git a/collectors/node.d.plugin/snmp/README.md b/collectors/node.d.plugin/snmp/README.md new file mode 100644 index 000000000..a307a3642 --- /dev/null +++ b/collectors/node.d.plugin/snmp/README.md @@ -0,0 +1,357 @@ +# SNMP Data Collector + +Using this collector, netdata can collect data from any SNMP device. + +This collector supports: + +- any number of SNMP devices +- each SNMP device can be used to collect data for any number of charts +- each chart may have any number of dimensions +- each SNMP device may have a different update frequency +- each SNMP device will accept one or more batches to report values (you can set `max_request_size` per SNMP server, to control the size of batches). + +## Configuration + +You will need to create the file `/etc/netdata/node.d/snmp.conf` with data like the following. + +In this example: + + - the SNMP device is `10.11.12.8`. + - the SNMP community is `public`. + - we will update the values every 10 seconds (`update_every: 10` under the server `10.11.12.8`). + - we define 2 charts `snmp_switch.bandwidth_port1` and `snmp_switch.bandwidth_port2`, each having 2 dimensions: `in` and `out`. + +```json +{ + "enable_autodetect": false, + "update_every": 5, + "max_request_size": 100, + "servers": [ + { + "hostname": "10.11.12.8", + "community": "public", + "update_every": 10, + "max_request_size": 50, + "options": { "timeout": 10000 }, + "charts": { + "snmp_switch.bandwidth_port1": { + "title": "Switch Bandwidth for port 1", + "units": "kilobits/s", + "type": "area", + "priority": 1, + "family": "ports", + "dimensions": { + "in": { + "oid": "1.3.6.1.2.1.2.2.1.10.1", + "algorithm": "incremental", + "multiplier": 8, + "divisor": 1024, + "offset": 0 + }, + "out": { + "oid": "1.3.6.1.2.1.2.2.1.16.1", + "algorithm": "incremental", + "multiplier": -8, + "divisor": 1024, + "offset": 0 + } + } + }, + "snmp_switch.bandwidth_port2": { + "title": "Switch Bandwidth for port 2", + "units": "kilobits/s", + "type": "area", + "priority": 1, + "family": "ports", + "dimensions": { + "in": { + "oid": "1.3.6.1.2.1.2.2.1.10.2", + "algorithm": "incremental", + "multiplier": 8, + "divisor": 1024, + "offset": 0 + }, + "out": { + "oid": "1.3.6.1.2.1.2.2.1.16.2", + "algorithm": "incremental", + "multiplier": -8, + "divisor": 1024, + "offset": 0 + } + } + } + } + } + ] +} +``` + +`update_every` is the update frequency for each server, in seconds. + +`max_request_size` limits the maximum number of OIDs that will be requested in a single call. The default is 50. Lower this number of you get `TooBig` errors in netdata error.log. + +`family` sets the name of the submenu of the dashboard each chart will appear under. + +If you need to define many charts using incremental OIDs, you can use something like this: + +This is like the previous, but the option `multiply_range` given, will multiply the current chart from `1` to `24` inclusive, producing 24 charts in total for the 24 ports of the switch `10.11.12.8`. + +Each of the 24 new charts will have its id (1-24) appended at: + +1. its chart unique id, i.e. `snmp_switch.bandwidth_port1` to `snmp_switch.bandwidth_port24` +2. its `title`, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24` +3. its `oid` (for all dimensions), i.e. dimension `in` will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24` +3. its priority (which will be incremented for each chart so that the charts will appear on the dashboard in this order) + +```json +{ + "enable_autodetect": false, + "update_every": 10, + "servers": [ + { + "hostname": "10.11.12.8", + "community": "public", + "update_every": 10, + "options": { "timeout": 20000 }, + "charts": { + "snmp_switch.bandwidth_port": { + "title": "Switch Bandwidth for port ", + "units": "kilobits/s", + "type": "area", + "priority": 1, + "family": "ports", + "multiply_range": [ 1, 24 ], + "dimensions": { + "in": { + "oid": "1.3.6.1.2.1.2.2.1.10.", + "algorithm": "incremental", + "multiplier": 8, + "divisor": 1024, + "offset": 0 + }, + "out": { + "oid": "1.3.6.1.2.1.2.2.1.16.", + "algorithm": "incremental", + "multiplier": -8, + "divisor": 1024, + "offset": 0 + } + } + } + } + } + ] +} +``` + +The `options` given for each server, are: + + - `timeout`, the time to wait for the SNMP device to respond. The default is 5000 ms. + - `version`, the SNMP version to use. `0` is Version 1, `1` is Version 2c. The default is Version 1 (`0`). + - `transport`, the default is `udp4`. + - `port`, the port of the SNMP device to connect to. The default is `161`. + - `retries`, the number of attempts to make to fetch the data. The default is `1`. + +## Retrieving names from snmp + +You can append a value retrieved from SNMP to the title, by adding `titleoid` to the chart. + +You can set a dimension name to a value retrieved from SNMP, by adding `oidname` to the dimension. + +Both of the above will participate in `multiply_range`. + + +## Testing the configuration + +To test it, you can run: + +```sh +/usr/libexec/netdata/plugins.d/node.d.plugin 1 snmp +``` + +The above will run it on your console and you will be able to see what netdata sees, but also errors. You can get a very detailed output by appending `debug` to the command line. + +If it works, restart netdata to activate the snmp collector and refresh the dashboard (if your SNMP device responds with a delay, you may need to refresh the dashboard in a few seconds). + +## Data collection speed + +Keep in mind that many SNMP switches and routers are very slow. They may not be able to report values per second. If you run `node.d.plugin` in `debug` mode, it will report the time it took for the SNMP device to respond. My switch, for example, needs 7-8 seconds to respond for the traffic on 24 ports (48 OIDs, in/out). + +Also, if you use many SNMP clients on the same SNMP device at the same time, values may be skipped. This is a problem of the SNMP device, not this collector. + +## Finding OIDs + +Use `snmpwalk`, like this: + +```sh +snmpwalk -t 20 -v 1 -O fn -c public 10.11.12.8 +``` + +- `-t 20` is the timeout in seconds +- `-v 1` is the SNMP version +- `-O fn` will display full OIDs in numeric format (you may want to run it also without this option to see human readable output of OIDs) +- `-c public` is the SNMP community +- `10.11.12.8` is the SNMP device + +Keep in mind that `snmpwalk` outputs the OIDs with a dot in front them. You should remove this dot when adding OIDs to the configuration file of this collector. + +## Example: Linksys SRW2024P + +This is what I use for my Linksys SRW2024P. It creates: + +1. A chart for power consumption (it is a PoE switch) +2. Two charts for packets received (total packets received and packets received with errors) +3. One chart for packets output +4. 24 charts, one for each port of the switch. It also appends the port names, as defined at the switch, to the chart titles. + +This switch also reports various other metrics, like snmp, packets per port, etc. Unfortunately it does not report CPU utilization or backplane utilization. + +This switch has a very slow SNMP processors. To respond, it needs about 8 seconds, so I have set the refresh frequency (`update_every`) to 15 seconds. + +```json +{ + "enable_autodetect": false, + "update_every": 5, + "servers": [ + { + "hostname": "10.11.12.8", + "community": "public", + "update_every": 15, + "options": { "timeout": 20000, "version": 1 }, + "charts": { + "snmp_switch.power": { + "title": "Switch Power Supply", + "units": "watts", + "type": "line", + "priority": 10, + "family": "power", + "dimensions": { + "supply": { + "oid": ".1.3.6.1.2.1.105.1.3.1.1.2.1", + "algorithm": "absolute", + "multiplier": 1, + "divisor": 1, + "offset": 0 + }, + "used": { + "oid": ".1.3.6.1.2.1.105.1.3.1.1.4.1", + "algorithm": "absolute", + "multiplier": 1, + "divisor": 1, + "offset": 0 + } + } + } + , "snmp_switch.input": { + "title": "Switch Packets Input", + "units": "packets/s", + "type": "area", + "priority": 20, + "family": "IP", + "dimensions": { + "receives": { + "oid": ".1.3.6.1.2.1.4.3.0", + "algorithm": "incremental", + "multiplier": 1, + "divisor": 1, + "offset": 0 + } + , "discards": { + "oid": ".1.3.6.1.2.1.4.8.0", + "algorithm": "incremental", + "multiplier": 1, + "divisor": 1, + "offset": 0 + } + } + } + , "snmp_switch.input_errors": { + "title": "Switch Received Packets with Errors", + "units": "packets/s", + "type": "line", + "priority": 30, + "family": "IP", + "dimensions": { + "bad_header": { + "oid": ".1.3.6.1.2.1.4.4.0", + "algorithm": "incremental", + "multiplier": 1, + "divisor": 1, + "offset": 0 + } + , "bad_address": { + "oid": ".1.3.6.1.2.1.4.5.0", + "algorithm": "incremental", + "multiplier": 1, + "divisor": 1, + "offset": 0 + } + , "unknown_protocol": { + "oid": ".1.3.6.1.2.1.4.7.0", + "algorithm": "incremental", + "multiplier": 1, + "divisor": 1, + "offset": 0 + } + } + } + , "snmp_switch.output": { + "title": "Switch Output Packets", + "units": "packets/s", + "type": "line", + "priority": 40, + "family": "IP", + "dimensions": { + "requests": { + "oid": ".1.3.6.1.2.1.4.10.0", + "algorithm": "incremental", + "multiplier": 1, + "divisor": 1, + "offset": 0 + } + , "discards": { + "oid": ".1.3.6.1.2.1.4.11.0", + "algorithm": "incremental", + "multiplier": -1, + "divisor": 1, + "offset": 0 + } + , "no_route": { + "oid": ".1.3.6.1.2.1.4.12.0", + "algorithm": "incremental", + "multiplier": -1, + "divisor": 1, + "offset": 0 + } + } + } + , "snmp_switch.bandwidth_port": { + "title": "Switch Bandwidth for port ", + "titleoid": ".1.3.6.1.2.1.31.1.1.1.18.", + "units": "kilobits/s", + "type": "area", + "priority": 100, + "family": "ports", + "multiply_range": [ 1, 24 ], + "dimensions": { + "in": { + "oid": ".1.3.6.1.2.1.2.2.1.10.", + "algorithm": "incremental", + "multiplier": 8, + "divisor": 1024, + "offset": 0 + } + , "out": { + "oid": ".1.3.6.1.2.1.2.2.1.16.", + "algorithm": "incremental", + "multiplier": -8, + "divisor": 1024, + "offset": 0 + } + } + } + } + } + ] +} +``` diff --git a/collectors/node.d.plugin/snmp/snmp.node.js b/collectors/node.d.plugin/snmp/snmp.node.js new file mode 100644 index 000000000..a051d3d3a --- /dev/null +++ b/collectors/node.d.plugin/snmp/snmp.node.js @@ -0,0 +1,516 @@ +'use strict'; +// SPDX-License-Identifier: GPL-3.0-or-later +// netdata snmp module +// This program will connect to one or more SNMP Agents +// + +// example configuration in /etc/netdata/node.d/snmp.conf +/* +{ + "enable_autodetect": false, + "update_every": 5, + "max_request_size": 50, + "servers": [ + { + "hostname": "10.11.12.8", + "community": "public", + "update_every": 10, + "max_request_size": 50, + "options": { "timeout": 10000 }, + "charts": { + "snmp_switch.bandwidth_port1": { + "title": "Switch Bandwidth for port 1", + "units": "kilobits/s", + "type": "area", + "priority": 1, + "dimensions": { + "in": { + "oid": ".1.3.6.1.2.1.2.2.1.10.1", + "algorithm": "incremental", + "multiplier": 8, + "divisor": 1024, + "offset": 0 + }, + "out": { + "oid": ".1.3.6.1.2.1.2.2.1.16.1", + "algorithm": "incremental", + "multiplier": -8, + "divisor": 1024, + "offset": 0 + } + } + }, + "snmp_switch.bandwidth_port2": { + "title": "Switch Bandwidth for port 2", + "units": "kilobits/s", + "type": "area", + "priority": 1, + "dimensions": { + "in": { + "oid": ".1.3.6.1.2.1.2.2.1.10.2", + "algorithm": "incremental", + "multiplier": 8, + "divisor": 1024, + "offset": 0 + }, + "out": { + "oid": ".1.3.6.1.2.1.2.2.1.16.2", + "algorithm": "incremental", + "multiplier": -8, + "divisor": 1024, + "offset": 0 + } + } + } + } + } + ] +} +*/ + +// You can also give ranges of charts like the following. +// This will append 1-24 to id, title, oid (on each dimension) +// so that 24 charts will be created. +/* +{ + "enable_autodetect": false, + "update_every": 10, + "max_request_size": 50, + "servers": [ + { + "hostname": "10.11.12.8", + "community": "public", + "update_every": 10, + "max_request_size": 50, + "options": { "timeout": 20000 }, + "charts": { + "snmp_switch.bandwidth_port": { + "title": "Switch Bandwidth for port ", + "units": "kilobits/s", + "type": "area", + "priority": 1, + "multiply_range": [ 1, 24 ], + "dimensions": { + "in": { + "oid": ".1.3.6.1.2.1.2.2.1.10.", + "algorithm": "incremental", + "multiplier": 8, + "divisor": 1024, + "offset": 0 + }, + "out": { + "oid": ".1.3.6.1.2.1.2.2.1.16.", + "algorithm": "incremental", + "multiplier": -8, + "divisor": 1024, + "offset": 0 + } + } + } + } + } + ] +} +*/ + +var net_snmp = require('net-snmp'); +var extend = require('extend'); +var netdata = require('netdata'); + +if(netdata.options.DEBUG === true) netdata.debug('loaded', __filename, ' plugin'); + +netdata.processors.snmp = { + name: 'snmp', + + fixoid: function(oid) { + if(typeof oid !== 'string') + return oid; + + if(oid.charAt(0) === '.') + return oid.substring(1, oid.length); + + return oid; + }, + + prepare: function(service) { + var __DEBUG = netdata.options.DEBUG; + + if(typeof service.snmp_oids === 'undefined' || service.snmp_oids === null || service.snmp_oids.length === 0) { + // this is the first time we see this service + + if(__DEBUG === true) + netdata.debug(service.module.name + ': ' + service.name + ': preparing ' + this.name + ' OIDs'); + + // build an index of all OIDs + service.snmp_oids_index = {}; + var chart_keys = Object.keys(service.request.charts); + var chart_keys_len = chart_keys.length; + while(chart_keys_len--) { + var c = chart_keys[chart_keys_len]; + var chart = service.request.charts[c]; + + // for each chart + + if(__DEBUG === true) + netdata.debug(service.module.name + ': ' + service.name + ': indexing ' + this.name + ' chart: ' + c); + + if(typeof chart.titleoid !== 'undefined') { + service.snmp_oids_index[this.fixoid(chart.titleoid)] = { + type: 'title', + link: chart + }; + } + + var dim_keys = Object.keys(chart.dimensions); + var dim_keys_len = dim_keys.length; + while(dim_keys_len--) { + var d = dim_keys[dim_keys_len]; + var dim = chart.dimensions[d]; + + // for each dimension in the chart + + var oid = this.fixoid(dim.oid); + var oidname = this.fixoid(dim.oidname); + + if(__DEBUG === true) + netdata.debug(service.module.name + ': ' + service.name + ': indexing ' + this.name + ' chart: ' + c + ', dimension: ' + d + ', OID: ' + oid + ", OID name: " + oidname); + + // link it to the point we need to set the value to + service.snmp_oids_index[oid] = { + type: 'value', + link: dim + }; + + if(typeof oidname !== 'undefined') + service.snmp_oids_index[oidname] = { + type: 'name', + link: dim + }; + + // and set the value to null + dim.value = null; + } + } + + if(__DEBUG === true) + netdata.debug(service.module.name + ': ' + service.name + ': indexed ' + this.name + ' OIDs: ' + netdata.stringify(service.snmp_oids_index)); + + // now create the array of OIDs needed by net-snmp + service.snmp_oids = Object.keys(service.snmp_oids_index); + + if(__DEBUG === true) + netdata.debug(service.module.name + ': ' + service.name + ': final list of ' + this.name + ' OIDs: ' + netdata.stringify(service.snmp_oids)); + + service.snmp_oids_cleaned = 0; + } + else if(service.snmp_oids_cleaned === 0) { + service.snmp_oids_cleaned = 1; + + // the second time, keep only values + + service.snmp_oids = new Array(); + var oid_keys = Object.keys(service.snmp_oids_index); + var oid_keys_len = oid_keys.length; + while(oid_keys_len--) { + if (service.snmp_oids_index[oid_keys[oid_keys_len]].type === 'value') + service.snmp_oids.push(oid_keys[oid_keys_len]); + } + } + }, + + getdata: function(service, index, ok, failed, callback) { + var __DEBUG = netdata.options.DEBUG; + var that = this; + + if(index >= service.snmp_oids.length) { + callback((ok > 0)?{ ok: ok, failed: failed }:null); + return; + } + + var slice; + if(service.snmp_oids.length <= service.request.max_request_size) { + slice = service.snmp_oids; + index = service.snmp_oids.length; + } + else if(service.snmp_oids.length - index <= service.request.max_request_size) { + slice = service.snmp_oids.slice(index, service.snmp_oids.length); + index = service.snmp_oids.length; + } + else { + slice = service.snmp_oids.slice(index, index + service.request.max_request_size); + index += service.request.max_request_size; + } + + if(__DEBUG === true) + netdata.debug(service.module.name + ': ' + service.name + ': making ' + slice.length + ' entries request, max is: ' + service.request.max_request_size); + + service.snmp_session.get(slice, function(error, varbinds) { + if(error) { + service.error('Received error = ' + netdata.stringify(error) + ' varbinds = ' + netdata.stringify(varbinds)); + + // make all values null + var len = slice.length; + while(len--) + service.snmp_oids_index[slice[len]].value = null; + } + else { + if(__DEBUG === true) + netdata.debug(service.module.name + ': ' + service.name + ': got valid ' + service.module.name + ' response: ' + netdata.stringify(varbinds)); + + var varbinds_len = varbinds.length; + for(var i = 0; i < varbinds_len ; i++) { + var value = null; + + if(net_snmp.isVarbindError(varbinds[i])) { + if(__DEBUG === true) + netdata.debug(service.module.name + ': ' + service.name + ': failed ' + service.module.name + ' get for OIDs ' + varbinds[i].oid); + + service.error('OID ' + varbinds[i].oid + ' gave error: ' + snmp.varbindError(varbinds[i])); + value = null; + failed++; + } + else { + // test fom Counter64 + // varbinds[i].type = net_snmp.ObjectType.Counter64; + // varbinds[i].value = new Buffer([0x34, 0x49, 0x2e, 0xdc, 0xd1]); + + switch(varbinds[i].type) { + case net_snmp.ObjectType.OctetString: + if (service.snmp_oids_index[varbinds[i].oid].type !== 'title' && service.snmp_oids_index[varbinds[i].oid].type !== 'name') { + // parse floating point values, exposed as strings + value = parseFloat(varbinds[i].value) * 1000; + if (__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof(varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as float in string)"); + } + else { + // just use the string + value = varbinds[i].value; + if (__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof(varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as string)"); + } + break; + + case net_snmp.ObjectType.Counter64: + // copy the buffer + value = '0x' + varbinds[i].value.toString('hex'); + if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof(varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as buffer)"); + break; + + case net_snmp.ObjectType.Integer: + case net_snmp.ObjectType.Counter: + case net_snmp.ObjectType.Gauge: + default: + value = varbinds[i].value; + if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof(varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as number)"); + break; + } + + ok++; + } + + if(value !== null) { + switch(service.snmp_oids_index[varbinds[i].oid].type) { + case 'title': service.snmp_oids_index[varbinds[i].oid].link.title += ' ' + value; break; + case 'name' : service.snmp_oids_index[varbinds[i].oid].link.name = value.toString().replace(/\W/g, '_'); break; + case 'value': service.snmp_oids_index[varbinds[i].oid].link.value = value; break; + } + } + } + + if(__DEBUG === true) + netdata.debug(service.module.name + ': ' + service.name + ': finished ' + service.module.name + ' with ' + ok + ' successful and ' + failed + ' failed values'); + } + that.getdata(service, index, ok, failed, callback); + }); + }, + + process: function(service, callback) { + var __DEBUG = netdata.options.DEBUG; + + this.prepare(service); + + if(service.snmp_oids.length === 0) { + // no OIDs found for this service + + if(__DEBUG === true) + service.error('no OIDs to process.'); + + callback(null); + return; + } + + if(typeof service.snmp_session === 'undefined' || service.snmp_session === null) { + // no SNMP session has been created for this service + // the SNMP session is just the initialization of NET-SNMP + + if(__DEBUG === true) + netdata.debug(service.module.name + ': ' + service.name + ': opening ' + this.name + ' session on ' + service.request.hostname + ' community ' + service.request.community + ' options ' + netdata.stringify(service.request.options)); + + // create the SNMP session + service.snmp_session = net_snmp.createSession (service.request.hostname, service.request.community, service.request.options); + + if(__DEBUG === true) + netdata.debug(service.module.name + ': ' + service.name + ': got ' + this.name + ' session: ' + netdata.stringify(service.snmp_session)); + + // if we later need traps, this is how to do it: + //service.snmp_session.trap(net_snmp.TrapType.LinkDown, function(error) { + // if(error) console.error('trap error: ' + netdata.stringify(error)); + //}); + } + + // do it, get the SNMP values for the sessions we need + this.getdata(service, 0, 0, 0, callback); + } +}; + +var snmp = { + name: __filename, + enable_autodetect: true, + update_every: 1, + base_priority: 50000, + + charts: {}, + + processResponse: function(service, data) { + if(data !== null) { + if(service.added !== true) + service.commit(); + + var chart_keys = Object.keys(service.request.charts); + var chart_keys_len = chart_keys.length; + for(var i = 0; i < chart_keys_len; i++) { + var c = chart_keys[i]; + + var chart = snmp.charts[c]; + if(typeof chart === 'undefined') { + chart = service.chart(c, service.request.charts[c]); + snmp.charts[c] = chart; + } + + service.begin(chart); + + var dimensions = service.request.charts[c].dimensions; + var dim_keys = Object.keys(dimensions); + var dim_keys_len = dim_keys.length; + for(var j = 0; j < dim_keys_len ; j++) { + var d = dim_keys[j]; + + if (dimensions[d].value !== null) { + if(typeof dimensions[d].offset === 'number') + service.set(d, dimensions[d].value + dimensions[d].offset); + else + service.set(d, dimensions[d].value); + } + } + + service.end(); + } + } + }, + + // module.serviceExecute() + // this function is called only from this module + // its purpose is to prepare the request and call + // netdata.serviceExecute() + serviceExecute: function(conf) { + var __DEBUG = netdata.options.DEBUG; + + if(__DEBUG === true) + netdata.debug(this.name + ': snmp hostname: ' + conf.hostname + ', update_every: ' + conf.update_every); + + var service = netdata.service({ + name: conf.hostname, + request: conf, + update_every: conf.update_every, + module: this, + processor: netdata.processors.snmp + }); + + // multiply the charts, if required + var chart_keys = Object.keys(service.request.charts); + var chart_keys_len = chart_keys.length; + for( var i = 0; i < chart_keys_len ; i++ ) { + var c = chart_keys[i]; + var service_request_chart = service.request.charts[c]; + + if(__DEBUG === true) + netdata.debug(this.name + ': snmp hostname: ' + conf.hostname + ', examining chart: ' + c); + + if(typeof service_request_chart.update_every === 'undefined') + service_request_chart.update_every = service.update_every; + + if(typeof service_request_chart.multiply_range !== 'undefined') { + var from = service_request_chart.multiply_range[0]; + var to = service_request_chart.multiply_range[1]; + var prio = service_request_chart.priority || 1; + + if(prio < snmp.base_priority) prio += snmp.base_priority; + + while(from <= to) { + var id = c + from.toString(); + var chart = extend(true, {}, service_request_chart); + chart.title += from.toString(); + + if(typeof chart.titleoid !== 'undefined') + chart.titleoid += from.toString(); + + chart.priority = prio++; + + var dim_keys = Object.keys(chart.dimensions); + var dim_keys_len = dim_keys.length; + for(var j = 0; j < dim_keys_len ; j++) { + var d = dim_keys[j]; + + chart.dimensions[d].oid += from.toString(); + + if(typeof chart.dimensions[d].oidname !== 'undefined') + chart.dimensions[d].oidname += from.toString(); + } + service.request.charts[id] = chart; + from++; + } + + delete service.request.charts[c]; + } + else { + if(service.request.charts[c].priority < snmp.base_priority) + service.request.charts[c].priority += snmp.base_priority; + } + } + + service.execute(this.processResponse); + }, + + configure: function(config) { + var added = 0; + + if(typeof config.max_request_size === 'undefined') + config.max_request_size = 50; + + if(typeof(config.servers) !== 'undefined') { + var len = config.servers.length; + while(len--) { + if(typeof config.servers[len].update_every === 'undefined') + config.servers[len].update_every = this.update_every; + + if(typeof config.servers[len].max_request_size === 'undefined') + config.servers[len].max_request_size = config.max_request_size; + + this.serviceExecute(config.servers[len]); + added++; + } + } + + return added; + }, + + // module.update() + // this is called repeatidly to collect data, by calling + // service.execute() + update: function(service, callback) { + service.execute(function(serv, data) { + service.module.processResponse(serv, data); + callback(); + }); + } +}; + +module.exports = snmp; diff --git a/collectors/node.d.plugin/stiebeleltron/Makefile.inc b/collectors/node.d.plugin/stiebeleltron/Makefile.inc new file mode 100644 index 000000000..0c6e1e213 --- /dev/null +++ b/collectors/node.d.plugin/stiebeleltron/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_node_DATA += stiebeleltron/stiebeleltron.node.js +# dist_nodeconfig_DATA += stiebeleltron/stiebeleltron.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += stiebeleltron/README.md stiebeleltron/Makefile.inc + diff --git a/collectors/node.d.plugin/stiebeleltron/README.md b/collectors/node.d.plugin/stiebeleltron/README.md new file mode 100644 index 000000000..002a31571 --- /dev/null +++ b/collectors/node.d.plugin/stiebeleltron/README.md @@ -0,0 +1,505 @@ +# stiebel eltron + +This module collects metrics from the configured heat pump and hot water installation from Stiebel Eltron ISG web. + +**Requirements** + * Configuration file `stiebeleltron.conf` in the node.d netdata config dir (default: `/etc/netdata/node.d/stiebeleltron.conf`) + * Stiebel Eltron ISG web with network access (http), without password login + +The charts are configurable, however, the provided default configuration collects the following: + +1. **General** + * Outside temperature in C + * Condenser temperature in C + * Heating circuit pressure in bar + * Flow rate in l/min + * Output of water and heat pumps in % + +2. **Heating** + * Heat circuit 1 temperature in C (set/actual) + * Heat circuit 2 temperature in C (set/actual) + * Flow temperature in C (set/actual) + * Buffer temperature in C (set/actual) + * Pre-flow temperature in C + +3. **Hot Water** + * Hot water temperature in C (set/actual) + +4. **Room Temperature** + * Heat circuit 1 room temperature in C (set/actual) + * Heat circuit 2 room temperature in C (set/actual) + +5. **Eletric Reheating** + * Dual Mode Reheating temperature in C (hot water/heating) + +6. **Process Data** + * Remaining compressor rest time in s + +7. **Runtime** + * Compressor runtime hours (hot water/heating) + * Reheating runtime hours (reheating 1/reheating 2) + +8. **Energy** + * Compressor today in kWh (hot water/heating) + * Compressor Total in kWh (hot water/heating) + + +### configuration + +If no configuration is given, the module will be disabled. Each `update_every` is optional, the default is `10`. + +--- + +[Stiebel Eltron Heat pump system with ISG](https://www.stiebel-eltron.com/en/home/products-solutions/renewables/controller_energymanagement/internet_servicegateway/isg_web.html) + +Original author: BrainDoctor (github) + +The module supports any metrics that are parseable with RegEx. There is no API that gives direct access to the values (AFAIK), so the "workaround" is to parse the HTML output of the ISG. + +### Testing +This plugin has been tested within the following environment: + * ISG version: 8.5.6 + * MFG version: 12 + * Controller version: 9 + * July (summer time, not much activity) + * Interface language: English + * login- and password-less ISG web access (without HTTPS it's useless anyway) + * Heatpump model: WPL 25 I-2 + * Hot water boiler model: 820 WT 1 + +So, if the language is set to english, copy the following configuration into `/etc/netdata/node.d/stiebeleltron.conf` and change the `url`s. + +In my case, the ISG is relatively slow with responding (at least 1s, but also up to 4s). Collecting metrics every 10s is more than enough for me. + +### How to update the config + +* The dimensions support variable digits, the default is `1`. Most of the values printed by ISG are using 1 digit, some use 2. +* The dimensions also support the `multiplier` and `divisor` attributes, however the divisor gets overridden by `digits`, if specified. Default is `1`. +* The test string for the regex is always the whole HTML output from the url. For each parameter you need to have a regular expression that extracts the value from the HTML source in the first capture group. + Recommended: [regexr.com](https://regexr.com/) for testing and matching, [freeformatter.com](https://www.freeformatter.com/json-escape.html) for escaping the newly created regex for the JSON config. + +The charts are being generated using the configuration below. So if your installation is in another language or has other metrics, just adapt the structure or regexes. +### Configuration template +```json +{ + "enable_autodetect": false, + "update_every": 10, + "pages": [ + { + "name": "System", + "id": "system", + "url": "http://machine.ip.or.dns/?s=1,0", + "update_every": 10, + "categories": [ + { + "id": "eletricreheating", + "name": "electric reheating", + "charts": [ + { + "title": "Dual Mode Reheating Temperature", + "id": "reheatingtemp", + "unit": "Celsius", + "type": "line", + "prio": 1, + "dimensions": [ + { + "name": "Heating", + "id": "dualmodeheatingtemp", + "regex": "DUAL MODE TEMP HEATING<\\\/td>\\s*(-?[0-9]+,[0-9]+).*<\\\/td>" + }, + { + "name": "Hot Water", + "id" : "dualmodehotwatertemp", + "regex": "DUAL MODE TEMP DHW<\\\/td>\\s*(-?[0-9]+,[0-9]+).*<\\\/td>" + } + ] + } + ] + }, + { + "id": "roomtemp", + "name": "room temperature", + "charts": [ + { + "title": "Heat Circuit 1", + "id": "hc1", + "unit": "Celsius", + "type": "line", + "prio": 1, + "dimensions": [ + { + "name": "Actual", + "id": "actual", + "regex": "\\s*ACTUAL TEMPERATURE HC 1<\\\/td>\\s*(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" + }, + { + "name": "Set", + "id" : "set", + "regex": "\\s*SET TEMPERATURE HC 1<\\\/td>\\s*(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" + } + ] + }, + { + "title": "Heat Circuit 2", + "id": "hc2", + "unit": "Celsius", + "type": "line", + "prio": 2, + "dimensions": [ + { + "name": "Actual", + "id": "actual", + "regex": "\\s*ACTUAL TEMPERATURE HC 2<\\\/td>\\s*(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" + }, + { + "name": "Set", + "id" : "set", + "regex": "\\s*SET TEMPERATURE HC 2<\\\/td>\\s*(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" + } + ] + } + ] + }, + { + "id": "heating", + "name": "heating", + "charts": [ + { + "title": "Heat Circuit 1", + "id": "hc1", + "unit": "Celsius", + "type": "line", + "prio": 1, + "dimensions": [ + { + "name": "Actual", + "id": "actual", + "regex": "\\s*ACTUAL TEMPERATURE HC 1<\\\/td>\\s*(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" + }, + { + "name": "Set", + "id" : "set", + "regex": "\\s*SET TEMPERATURE HC 1<\\\/td>\\s*(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" + } + ] + }, + { + "title": "Heat Circuit 2", + "id": "hc2", + "unit": "Celsius", + "type": "line", + "prio": 2, + "dimensions": [ + { + "name": "Actual", + "id": "actual", + "regex": "\\s*ACTUAL TEMPERATURE HC 2<\\\/td>\\s*(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" + }, + { + "name": "Set", + "id" : "set", + "regex": "\\s*SET TEMPERATURE HC 2<\\\/td>\\s*(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" + } + ] + }, + { + "title": "Flow Temperature", + "id": "flowtemp", + "unit": "Celsius", + "type": "line", + "prio": 3, + "dimensions": [ + { + "name": "Heating", + "id": "heating", + "regex": "ACTUAL FLOW TEMPERATURE WP<\\\/td>\\s*(-?[0-9]+,[0-9]+).*<\\\/td>" + }, + { + "name": "Reheating", + "id" : "reheating", + "regex": "ACTUAL FLOW TEMPERATURE NHZ<\\\/td>\\s*(-?[0-9]+,[0-9]+).*<\\\/td>" + } + ] + }, + { + "title": "Buffer Temperature", + "id": "buffertemp", + "unit": "Celsius", + "type": "line", + "prio": 4, + "dimensions": [ + { + "name": "Actual", + "id": "actual", + "regex": "ACTUAL BUFFER TEMPERATURE<\\\/td>\\s*(-?[0-9]+,[0-9]+).*<\\\/td>" + }, + { + "name": "Set", + "id" : "set", + "regex": "SET BUFFER TEMPERATURE<\\\/td>\\s*(-?[0-9]+,[0-9]+).*<\\\/td>" + } + ] + }, + { + "title": "Fixed Temperature", + "id": "fixedtemp", + "unit": "Celsius", + "type": "line", + "prio": 5, + "dimensions": [ + { + "name": "Set", + "id" : "setfixed", + "regex": "SET FIXED TEMPERATURE<\\\/td>\\s*(-?[0-9]+,[0-9]+).*<\\\/td>" + } + ] + }, + { + "title": "Pre-flow Temperature", + "id": "preflowtemp", + "unit": "Celsius", + "type": "line", + "prio": 6, + "dimensions": [ + { + "name": "Actual", + "id": "actualreturn", + "regex": "ACTUAL RETURN TEMPERATURE<\\\/td>\\s*(-?[0-9]+,[0-9]+).*<\\\/td>" + } + ] + } + ] + }, + { + "id": "hotwater", + "name": "hot water", + "charts": [ + { + "title": "Hot Water Temperature", + "id": "hotwatertemp", + "unit": "Celsius", + "type": "line", + "prio": 1, + "dimensions": [ + { + "name": "Actual", + "id": "actual", + "regex": "ACTUAL TEMPERATURE<\\\/td>\\s*(-?[0-9]+,[0-9]+).*<\\\/td>" + }, + { + "name": "Set", + "id" : "set", + "regex": "SET TEMPERATURE<\\\/td>\\s*(-?[0-9]+,[0-9]+).*<\\\/td>" + } + ] + } + ] + }, + { + "id": "general", + "name": "general", + "charts": [ + { + "title": "Outside Temperature", + "id": "outside", + "unit": "Celsius", + "type": "line", + "prio": 1, + "dimensions": [ + { + "name": "Outside temperature", + "id": "outsidetemp", + "regex": "OUTSIDE TEMPERATURE<\\\/td>\\s*(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" + } + ] + }, + { + "title": "Condenser Temperature", + "id": "condenser", + "unit": "Celsius", + "type": "line", + "prio": 2, + "dimensions": [ + { + "name": "Condenser", + "id": "condenser", + "regex": "CONDENSER TEMP\\.<\\\/td>\\s*(-?[0-9]+,[0-9]+).*<\\\/td>" + } + ] + }, + { + "title": "Heating Circuit Pressure", + "id": "heatingcircuit", + "unit": "bar", + "type": "line", + "prio": 3, + "dimensions": [ + { + "name": "Heating Circuit", + "id": "heatingcircuit", + "digits": 2, + "regex": "PRESSURE HTG CIRC<\\\/td>\\s*(-?[0-9]+,[0-9]*).*<\\\/td>" + } + ] + }, + { + "title": "Flow Rate", + "id": "flowrate", + "unit": "liters/min", + "type": "line", + "prio": 4, + "dimensions": [ + { + "name": "Flow Rate", + "id": "flowrate", + "digits": 2, + "regex": "FLOW RATE<\\\/td>\\s*(-?[0-9]+,[0-9]+).*<\\\/td>" + } + ] + }, + { + "title": "Output", + "id": "output", + "unit": "%", + "type": "line", + "prio": 5, + "dimensions": [ + { + "name": "Heat Pump", + "id": "outputheatpump", + "regex": "OUTPUT HP<\\\/td>\\s*(-?[0-9]+,?[0-9]*).*<\\\/td>" + }, + { + "name": "Water Pump", + "id": "intpumprate", + "regex": "INT PUMP RATE<\\\/td>\\s*(-?[0-9]+,?[0-9]*).*<\\\/td>" + } + ] + } + ] + } + ] + }, + { + "name": "Heat Pump", + "id": "heatpump", + "url": "http://machine.ip.or.dns/?s=1,1", + "update_every": 10, + "categories": [ + { + "id": "runtime", + "name": "runtime", + "charts": [ + { + "title": "Compressor", + "id": "compressor", + "unit": "h", + "type": "line", + "prio": 1, + "dimensions": [ + { + "name": "Heating", + "id": "heating", + "regex": "RNT COMP 1 HEA<\\\/td>\\s*(-?[0-9]+,?[0-9]*)" + }, + { + "name": "Hot Water", + "id" : "hotwater", + "regex": "RNT COMP 1 DHW<\\\/td>\\s*(-?[0-9]+,?[0-9]*)" + } + ] + }, + { + "title": "Reheating", + "id": "reheating", + "unit": "h", + "type": "line", + "prio": 2, + "dimensions": [ + { + "name": "Reheating 1", + "id": "rh1", + "regex": "BH 1<\\\/td>\\s*(-?[0-9]+,?[0-9]*)" + }, + { + "name": "Reheating 2", + "id" : "rh2", + "regex": "BH 2<\\\/td>\\s*(-?[0-9]+,?[0-9]*)" + } + ] + } + ] + }, + { + "id": "processdata", + "name": "process data", + "charts": [ + { + "title": "Remaining Compressor Rest Time", + "id": "remaincomp", + "unit": "s", + "type": "line", + "prio": 1, + "dimensions": [ + { + "name": "Timer", + "id": "timer", + "regex": "COMP DLAY CNTR<\\\/td>\\s*(-?[0-9]+,?[0-9]*)" + } + ] + } + ] + }, + { + "id": "energy", + "name": "energy", + "charts": [ + { + "title": "Compressor Today", + "id": "compressorday", + "unit": "kWh", + "type": "line", + "prio": 1, + "dimensions": [ + { + "name": "Heating", + "id": "heating", + "digits": 3, + "regex": "COMPRESSOR HEATING DAY<\\\/td>\\s*(-?[0-9]+,?[0-9]*)" + }, + { + "name": "Hot Water", + "id": "hotwater", + "digits": 3, + "regex": "COMPRESSOR DHW DAY<\\\/td>\\s*(-?[0-9]+,?[0-9]*)" + } + ] + }, + { + "title": "Compressor Total", + "id": "compressortotal", + "unit": "MWh", + "type": "line", + "prio": 2, + "dimensions": [ + { + "name": "Heating", + "id": "heating", + "digits": 3, + "regex": "COMPRESSOR HEATING TOTAL<\\\/td>\\s*(-?[0-9]+,?[0-9]*)" + }, + { + "name": "Hot Water", + "id": "hotwater", + "digits": 3, + "regex": "COMPRESSOR DHW TOTAL<\\\/td>\\s*(-?[0-9]+,?[0-9]*)" + } + ] + } + ] + } + ] + } + ] +} +``` diff --git a/collectors/node.d.plugin/stiebeleltron/stiebeleltron.node.js b/collectors/node.d.plugin/stiebeleltron/stiebeleltron.node.js new file mode 100644 index 000000000..250c26540 --- /dev/null +++ b/collectors/node.d.plugin/stiebeleltron/stiebeleltron.node.js @@ -0,0 +1,197 @@ +'use strict'; +// SPDX-License-Identifier: GPL-3.0-or-later + +// This program will connect to one Stiebel Eltron ISG for heatpump heating +// to get the heat pump metrics. + +// example configuration in netdata/conf.d/node.d/stiebeleltron.conf.md + +require("url"); +require("http"); +var netdata = require("netdata"); + +netdata.debug("loaded " + __filename + " plugin"); + +var stiebeleltron = { + name: "Stiebel Eltron", + enable_autodetect: false, + update_every: 10, + base_priority: 60000, + charts: {}, + pages: {}, + + createBasicDimension: function (id, name, multiplier, divisor) { + return { + id: id, // the unique id of the dimension + name: name, // the name of the dimension + algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm + multiplier: multiplier, // the multiplier + divisor: divisor, // the divisor + hidden: false // is hidden (boolean) + }; + }, + + processResponse: function (service, html) { + if (html === null) return; + + // add the service + service.commit(); + + var page = stiebeleltron.pages[service.name]; + var categories = page.categories; + var categoriesCount = categories.length; + while (categoriesCount--) { + var context = { + html: html, + service: service, + category: categories[categoriesCount], + page: page, + chartDefinition: null, + dimension: null + }; + stiebeleltron.processCategory(context); + + } + }, + + processCategory: function (context) { + var charts = context.category.charts; + var chartCount = charts.length; + while (chartCount--) { + context.chartDefinition = charts[chartCount]; + stiebeleltron.processChart(context); + } + }, + + processChart: function (context) { + var dimensions = context.chartDefinition.dimensions; + var dimensionCount = dimensions.length; + context.service.begin(stiebeleltron.getChartFromContext(context)); + + while (dimensionCount--) { + context.dimension = dimensions[dimensionCount]; + stiebeleltron.processDimension(context); + } + context.service.end(); + }, + + processDimension: function (context) { + var dimension = context.dimension; + var match = new RegExp(dimension.regex).exec(context.html); + if (match === null) return; + var value = match[1].replace(",", "."); + // most values have a single digit by default, which requires the values to be multiplied. can be overridden. + if (stiebeleltron.isDefined(dimension.digits)) { + value *= Math.pow(10, dimension.digits); + } else { + value *= 10; + } + context.service.set(stiebeleltron.getDimensionId(context), value); + }, + + getChartFromContext: function (context) { + var chartId = this.getChartId(context); + var chart = stiebeleltron.charts[chartId]; + if (stiebeleltron.isDefined(chart)) return chart; + + var chartDefinition = context.chartDefinition; + var service = context.service; + var dimensions = {}; + + var dimCount = chartDefinition.dimensions.length; + while (dimCount--) { + var dim = chartDefinition.dimensions[dimCount]; + var multiplier = 1; + var divisor = 10; + if (stiebeleltron.isDefined(dim.digits)) divisor = Math.pow(10, Math.max(0, dim.digits)); + if (stiebeleltron.isDefined(dim.multiplier)) multiplier = dim.multiplier; + if (stiebeleltron.isDefined(dim.divisor)) divisor = dim.divisor; + context.dimension = dim; + var dimId = this.getDimensionId(context); + dimensions[dimId] = this.createBasicDimension(dimId, dim.name, multiplier, divisor); + } + + chart = { + id: chartId, + name: '', + title: chartDefinition.title, + units: chartDefinition.unit, + family: context.category.name, + context: 'stiebeleltron.' + context.category.id + '.' + chartDefinition.id, + type: chartDefinition.type, + priority: stiebeleltron.base_priority + chartDefinition.prio,// the priority relative to others in the same family + update_every: service.update_every, // the expected update frequency of the chart + dimensions: dimensions + }; + chart = service.chart(chartId, chart); + stiebeleltron.charts[chartId] = chart; + + return chart; + }, + + // module.serviceExecute() + // this function is called only from this module + // its purpose is to prepare the request and call + // netdata.serviceExecute() + serviceExecute: function (name, uri, update_every) { + netdata.debug(this.name + ': ' + name + ': url: ' + uri + ', update_every: ' + update_every); + + var service = netdata.service({ + name: name, + request: netdata.requestFromURL(uri), + update_every: update_every, + module: this + }); + service.execute(this.processResponse); + }, + + + configure: function (config) { + if (stiebeleltron.isUndefined(config.pages)) return 0; + var added = 0; + var pageCount = config.pages.length; + while (pageCount--) { + var page = config.pages[pageCount]; + // some validation + if (stiebeleltron.isUndefined(page.categories) || page.categories.length < 1) { + netdata.error("Your Stiebel Eltron config is invalid. Disabling plugin."); + return 0; + } + if (stiebeleltron.isUndefined(page.update_every)) page.update_every = this.update_every; + this.pages[page.name] = page; + this.serviceExecute(page.name, page.url, page.update_every); + added++; + } + return added; + }, + + // module.update() + // this is called repeatedly to collect data, by calling + // netdata.serviceExecute() + update: function (service, callback) { + service.execute(function (serv, data) { + service.module.processResponse(serv, data); + callback(); + }); + }, + + getChartId: function (context) { + return "stiebeleltron_" + context.page.id + + "." + context.category.id + + "." + context.chartDefinition.id; + }, + + getDimensionId: function (context) { + return context.dimension.id; + }, + + isUndefined: function (value) { + return typeof value === 'undefined'; + }, + + isDefined: function (value) { + return typeof value !== 'undefined'; + } +}; + +module.exports = stiebeleltron; diff --git a/collectors/plugins.d/Makefile.am b/collectors/plugins.d/Makefile.am new file mode 100644 index 000000000..59250a997 --- /dev/null +++ b/collectors/plugins.d/Makefile.am @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +SUBDIRS = \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/collectors/plugins.d/Makefile.in b/collectors/plugins.d/Makefile.in new file mode 100644 index 000000000..b2c112811 --- /dev/null +++ b/collectors/plugins.d/Makefile.in @@ -0,0 +1,647 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = collectors/plugins.d +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + distdir +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = $(SUBDIRS) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +SUBDIRS = \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-recursive + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/plugins.d/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu collectors/plugins.d/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-recursive +all-am: Makefile $(DATA) +installdirs: installdirs-recursive +installdirs-am: +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-recursive + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-recursive + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: + +.MAKE: $(am__recursive_targets) install-am install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ + check-am clean clean-generic cscopelist-am ctags ctags-am \ + distclean distclean-generic distclean-tags distdir dvi dvi-am \ + html html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs installdirs-am maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/collectors/plugins.d/README.md b/collectors/plugins.d/README.md new file mode 100644 index 000000000..d3aa5b5b0 --- /dev/null +++ b/collectors/plugins.d/README.md @@ -0,0 +1,472 @@ +# Netdata External Plugins + +`plugins.d` is the netdata internal plugin that collects metrics +from external processes, thus allowing netdata to use **external plugins**. + +## Provided External Plugins + +plugin|language|O/S|description +:---:|:---:|:---:|:--- +[apps.plugin](../apps.plugin/)|`C`|linux, freebsd|monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**. +[charts.d.plugin](../charts.d.plugin/)|`BASH`|all|a **plugin orchestrator** for data collection modules written in `BASH` v4+. +[fping.plugin](../fping.plugin/)|`C`|all|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points. +[freeipmi.plugin](../freeipmi.plugin/)|`C`|linux|collects metrics from enterprise hardware sensors, on Linux servers. +[node.d.plugin](../node.d.plugin/)|`node.js`|all|a **plugin orchestrator** for data collection modules written in `node.js`. +[python.d.plugin](../python.d.plugin/)|`python`|all|a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported). + +Plugin orchestrators may also be described as **modular plugins**. They are modular since they accept custom made modules to be included. Writing modules for these plugins is easier than accessing the native netdata API directly. You will find modules already available for each orchestrator under the directory of the particular modular plugin (e.g. under python.d.plugin for the python orchestrator). +Each of these modular plugins has each own methods for defining modules. Please check the examples and their documentation. + +## Motivation + +This plugin allows netdata to use **external plugins** for data collection: + +1. external data collection plugins may be written in any computer language. + +2. external data collection plugins may use O/S capabilities or `setuid` to + run with escalated privileges (compared to the netdata daemon). + The communication between the external plugin and netdata is unidirectional + (from the plugin to netdata), so that netdata cannot manipulate an external + plugin running with escalated privileges. + +## Operation + +Each of the external plugins is expected to run forever. +Netdata will start it when it starts and stop it when it exits. + +If the external plugin exits or crashes, netdata will log an error. +If the external plugin exits or crashes without pushing metrics to netdata, netdata will not start it again. +- Plugins that exit with any value other than zero, will be disabled. Plugins that exit with zero, will be restarted after some time. +- Plugins may also be disabled by netdata if they output things that netdata does not understand. + +The `stdout` of external plugins is connected to netdata to receive metrics, +with the API defined below. + +The `stderr` of external plugins is connected to netdata `error.log`. + +Plugins can create any number of charts with any number of dimensions each. Each chart can have its own characteristics independently of the others generated by the same plugin. For example, one chart may have an update frequency of 1 second, another may have 5 seconds and a third may have 10 seconds. + +## Configuration + +Netdata will supply the environment variables `NETDATA_USER_CONFIG_DIR` (for user supplied) and `NETDATA_STOCK_CONFIG_DIR` (for netdata supplied) configuration files to identify the directory where configuration files are stored. It is up to the plugin to read the configuration it needs. + +The `netdata.conf` section [plugins] section contains a list of all the plugins found at the system where netdata runs, with a boolean setting to enable them or not. + +Example: + +``` +[plugins] + # enable running new plugins = yes + # check for new plugins every = 60 + + # charts.d = yes + # fping = yes + # node.d = yes + # python.d = yes +``` + +The setting `enable running new plugins` changes the default behavior for all external plugins. +So if set to `no`, only the plugins that are explicitly set to `yes` will be run. + +The setting `check for new plugins every` controls the time the directory `/usr/libexec/netdata/plugins.d` +will be rescanned for new plugins. So, new plugins can give added anytime. + +For each of the external plugins enabled, another `netdata.conf` section +is created, in the form of `[plugin:NAME]`, where `NAME` is the name of the external plugin. +This section allows controlling the update frequency of the plugin and provide +additional command line arguments to it. + +For example, for `apps.plugin` the following section is available: + +``` +[plugin:apps] + # update every = 1 + # command options = +``` + +- `update every` controls the granularity of the external plugin. +- `command options` allows giving additional command line options to the plugin. + + +Netdata will provide to the extrenal plugins the environment variable `NETDATA_UPDATE_EVERY`, in seconds (the default is 1). This is the **minimum update frequency** for all charts. A plugin that is updating values more frequently than this, is just wasting resources. + +Netdata will call the plugin with just one command line parameter: the number of seconds the user requested this plugin to update its data (by default is also 1). + +Other than the above, the plugin configuration is up to the plugin. + +Keep in mind, that the user may use netdata configuration to overwrite chart and dimension parameters. This is transparent to the plugin. + +### Autoconfiguration + +Plugins should attempt to autoconfigure themselves when possible. + +For example, if your plugin wants to monitor `squid`, you can search for it on port `3128` or `8080`. If any succeeds, you can proceed. If it fails you can output an error (on stderr) saying that you cannot find `squid` running and giving instructions about the plugin configuration. Then you can stop (exit with non-zero value), so that netdata will not attempt to start the plugin again. + +## External Plugins API + +Any program that can print a few values to its standard output can become a netdata external plugin. + +There are 7 lines netdata parses. lines starting with: + +- `CHART` - create or update a chart +- `DIMENSION` - add or update a dimension to the chart just created +- `BEGIN` - initialize data collection for a chart +- `SET` - set the value of a dimension for the initialized chart +- `END` - complete data collection for the initialized chart +- `FLUSH` - ignore the last collected values +- `DISABLE` - disable this plugin + +a single program can produce any number of charts with any number of dimensions each. + +Charts can be added any time (not just the beginning). + +### command line parameters + +The plugin **MUST** accept just **one** parameter: **the number of seconds it is +expected to update the values for its charts**. The value passed by netdata +to the plugin is controlled via its configuration file (so there is no need +for the plugin to handle this configuration option). + +The external plugin can overwrite the update frequency. For example, the server may +request per second updates, but the plugin may ignore it and update its charts +every 5 seconds. + +### environment variables + +There are a few environment variables that are set by `netdata` and are +available for the plugin to use. + +variable|description +:------:|:---------- +`NETDATA_USER_CONFIG_DIR`|The directory where all netdata related user configuration should be stored. If the plugin requires custom user configuration, this is the place the user has saved it (normally under `/etc/netdata`). +`NETDATA_STOCK_CONFIG_DIR`|The directory where all netdata related stock configuration should be stored. If the plugin is shipped with configuration files, this is the place they can be found (normally under `/usr/lib/netdata/conf.d`). +`NETDATA_PLUGINS_DIR`|The directory where all netdata plugins are stored. +`NETDATA_WEB_DIR`|The directory where the web files of netdata are saved. +`NETDATA_CACHE_DIR`|The directory where the cache files of netdata are stored. Use this directory if the plugin requires a place to store data. A new directory should be created for the plugin for this purpose, inside this directory. +`NETDATA_LOG_DIR`|The directory where the log files are stored. By default the `stderr` output of the plugin will be saved in the `error.log` file of netdata. +`NETDATA_HOST_PREFIX`|This is used in environments where system directories like `/sys` and `/proc` have to be accessed at a different path. +`NETDATA_DEBUG_FLAGS`|This is a number (probably in hex starting with `0x`), that enables certain netdata debugging features. Check **[[Tracing Options]]** for more information. +`NETDATA_UPDATE_EVERY`|The minimum number of seconds between chart refreshes. This is like the **internal clock** of netdata (it is user configurable, defaulting to `1`). There is no meaning for a plugin to update its values more frequently than this number of seconds. + + +### The output of the plugin + +The plugin should output instructions for netdata to its output (`stdout`). Since this uses pipes, please make sure you flush stdout after every iteration. + +#### DISABLE + +`DISABLE` will disable this plugin. This will prevent netdata from restarting the plugin. You can also exit with the value `1` to have the same effect. + +#### CHART + +`CHART` defines a new chart. + +the template is: + +> CHART type.id name title units [family [context [charttype [priority [update_every [options [plugin [module]]]]]]]] + + where: + - `type.id` + + uniquely identifies the chart, + this is what will be needed to add values to the chart + + the `type` part controls the menu the charts will appear in + + - `name` + + is the name that will be presented to the user instead of `id` in `type.id`. This means that only the `id` part of `type.id` is changed. When a name has been given, the chart is index (and can be referred) as both `type.id` and `type.name`. You can set name to `''`, or `null`, or `(null)` to disable it. + + - `title` + + the text above the chart + + - `units` + + the label of the vertical axis of the chart, + all dimensions added to a chart should have the same units + of measurement + + - `family` + + is used to group charts together + (for example all eth0 charts should say: eth0), + if empty or missing, the `id` part of `type.id` will be used + + this controls the sub-menu on the dashboard + + - `context` + + the context is giving the template of the chart. For example, if multiple charts present the same information for a different family, they should have the same `context` + + this is used for looking up rendering information for the chart (colors, sizes, informational texts) and also apply alarms to it + + - `charttype` + + one of `line`, `area` or `stacked`, + if empty or missing, the `line` will be used + + - `priority` + + is the relative priority of the charts as rendered on the web page, + lower numbers make the charts appear before the ones with higher numbers, + if empty or missing, `1000` will be used + + - `update_every` + + overwrite the update frequency set by the server, + if empty or missing, the user configured value will be used + + - `options` + + a space separated list of options, enclosed in quotes. 4 options are currently supported: `obsolete` to mark a chart as obsolete (netdata will hide it and delete it after some time), `detail` to mark a chart as insignificant (this may be used by dashboards to make the charts smaller, or somehow visualize properly a less important chart), `store_first` to make netdata store the first collected value, assuming there was an invisible previous value set to zero (this is used by statsd charts - if the first data collected value of incremental dimensions is not zero based, unrealistic spikes will appear with this option set) and `hidden` to perform all operations on a chart, but do not offer it on dashboards (the chart will be send to backends). `CHART` options have been added in netdata v1.7 and the `hidden` option was added in 1.10. + + - `plugin` and `module` + + both are just names that are used to let the user identify the plugin and the module that generated the chart. If `plugin` is unset or empty, netdata will automatically set the filename of the plugin that generated the chart. `module` has not default. + + +#### DIMENSION + +`DIMENSION` defines a new dimension for the chart + +the template is: + +> DIMENSION id [name [algorithm [multiplier [divisor [hidden]]]]] + + where: + + - `id` + + the `id` of this dimension (it is a text value, not numeric), + this will be needed later to add values to the dimension + + We suggest to avoid using `.` in dimension ids. Backends expect metrics to be `.` separated and people will get confused if a dimension id contains a dot. + + - `name` + + the name of the dimension as it will appear at the legend of the chart, + if empty or missing the `id` will be used + + - `algorithm` + + one of: + + * `absolute` + + the value is to drawn as-is (interpolated to second boundary), + if `algorithm` is empty, invalid or missing, `absolute` is used + + * `incremental` + + the value increases over time, + the difference from the last value is presented in the chart, + the server interpolates the value and calculates a per second figure + + * `percentage-of-absolute-row` + + the % of this value compared to the total of all dimensions + + * `percentage-of-incremental-row` + + the % of this value compared to the incremental total of + all dimensions + + - `multiplier` + + an integer value to multiply the collected value, + if empty or missing, `1` is used + + - `divisor` + + an integer value to divide the collected value, + if empty or missing, `1` is used + + - `hidden` + + giving the keyword `hidden` will make this dimension hidden, + it will take part in the calculations but will not be presented in the chart + + +#### VARIABLE + +> VARIABLE [SCOPE] name = value + +`VARIABLE` defines a variable that can be used in alarms. This is to used for setting constants (like the max connections a server may accept). + +Variables support 2 scopes: + +- `GLOBAL` or `HOST` to define the variable at the host level. +- `LOCAL` or `CHART` to define the variable at the chart level. Use chart-local variables when the same variable may exist for different charts (i.e. netdata monitors 2 mysql servers, and you need to set the `max_connections` each server accepts). Using chart-local variables is the ideal to build alarm templates. + +The position of the `VARIABLE` line, sets its default scope (in case you do not specify a scope). So, defining a `VARIABLE` before any `CHART`, or between `END` and `BEGIN` (outside any chart), sets `GLOBAL` scope, while defining a `VARIABLE` just after a `CHART` or a `DIMENSION`, or within the `BEGIN` - `END` block of a chart, sets `LOCAL` scope. + +These variables can be set and updated at any point. + +Variable names should use alphanumeric characters, the `.` and the `_`. + +The `value` is floating point (netdata used `long double`). + +Variables are transferred to upstream netdata servers (streaming and database replication). + +## Data collection + +data collection is defined as a series of `BEGIN` -> `SET` -> `END` lines + +> BEGIN type.id [microseconds] + + - `type.id` + + is the unique identification of the chart (as given in `CHART`) + + - `microseconds` + + is the number of microseconds since the last update of the chart. It is optional. + + Under heavy system load, the system may have some latency transferring + data from the plugins to netdata via the pipe. This number improves + accuracy significantly, since the plugin is able to calculate the + duration between its iterations better than netdata. + + The first time the plugin is started, no microseconds should be given + to netdata. + +> SET id = value + + - `id` + + is the unique identification of the dimension (of the chart just began) + + - `value` + + is the collected value, only integer values are collected. If you want to push fractional values, multiply this value by 100 or 1000 and set the `DIMENSION` divider to 1000. + +> END + + END does not take any parameters, it commits the collected values for all dimensions to the chart. If a dimensions was not `SET`, its value will be empty for this commit. + +More `SET` lines may appear to update all the dimensions of the chart. +All of them in one `BEGIN` -> `END` block. + +All `SET` lines within a single `BEGIN` -> `END` block have to refer to the +same chart. + +If more charts need to be updated, each chart should have its own +`BEGIN` -> `SET` -> `END` block. + +If, for any reason, a plugin has issued a `BEGIN` but wants to cancel it, +it can issue a `FLUSH`. The `FLUSH` command will instruct netdata to ignore +all the values collected since the last `BEGIN` command. + +If a plugin does not behave properly (outputs invalid lines, or does not +follow these guidelines), will be disabled by netdata. + +### collected values + +netdata will collect any **signed** value in the 64bit range: +`-9.223.372.036.854.775.808` to `+9.223.372.036.854.775.807` + +If a value is not collected, leave it empty, like this: + +`SET id = ` + +or do not output the line at all. + +## Modular Plugins + +1. **python**, use `python.d.plugin`, there are many examples in the [python.d directory](../python.d.plugin) + + python is ideal for netdata plugins. It is a simple, yet powerful way to collect data, it has a very small memory footprint, although it is not the most CPU efficient way to do it. + +2. **node.js**, use `node.d.plugin`, there are a few examples in the [node.d directory](../node.d.plugin) + + node.js is the fastest scripting language for collecting data. If your plugin needs to do a lot of work, compute values, etc, node.js is probably the best choice before moving to compiled code. Keep in mind though that node.js is not memory efficient; it will probably need more RAM compared to python. + +3. **BASH**, use `charts.d.plugin`, there are many examples in the [charts.d directory](../charts.d.plugin) + + BASH is the simplest scripting language for collecting values. It is the less efficient though in terms of CPU resources. You can use it to collect data quickly, but extensive use of it might use a lot of system resources. + +4. **C** + + Of course, C is the most efficient way of collecting data. This is why netdata itself is written in C. + +--- + +## Writing Plugins Properly + +There are a few rules for writing plugins properly: + +1. Respect system resources + + Pay special attention to efficiency: + + - Initialize everything once, at the beginning. Initialization is not an expensive operation. Your plugin will most probably be started once and run forever. So, do whatever heavy operation is needed at the beginning, just once. + - Do the absolutely minimum while iterating to collect values repeatedly. + - If you need to connect to another server to collect values, avoid re-connects if possible. Connect just once, with keep-alive (for HTTP) enabled and collect values using the same connection. + - Avoid any CPU or memory heavy operation while collecting data. If you control memory allocation, avoid any memory allocation white iterating to collect values. + - Avoid running external commands when possible. If you are writing shell scripts avoid especially pipes (each pipe is another fork, a very expensive operation). + +2. The best way to iterate at a constant pace is this pseudo code: + +```js + var update_every = argv[1] * 1000; /* seconds * 1000 = milliseconds */ + + readConfiguration(); + + if(!verifyWeCanCollectValues()) { + print "DISABLE"; + exit(1); + } + + createCharts(); /* print CHART and DIMENSION statements */ + + var loops = 0; + var last_run = 0; + var next_run = 0; + var dt_since_last_run = 0; + var now = 0; + + FOREVER { + /* find the current time in milliseconds */ + now = currentTimeStampInMilliseconds(); + + /* + * find the time of the next loop + * this makes sure we are always aligned + * with the netdata daemon + */ + next_run = now - (now % update_every) + update_every; + + /* + * wait until it is time + * it is important to do it in a loop + * since many wait functions can be interrupted + */ + while( now < next_run ) { + sleepMilliseconds(next_run - now); + now = currentTimeStampInMilliseconds(); + } + + /* calculate the time passed since the last run */ + if ( loops > 0 ) + dt_since_last_run = (now - last_run) * 1000; /* in microseconds */ + + /* prepare for the next loop */ + last_run = now; + loops++; + + /* do your magic here to collect values */ + collectValues(); + + /* send the collected data to netdata */ + printValues(dt_since_last_run); /* print BEGIN, SET, END statements */ + } +``` + + Using the above procedure, your plugin will be synchronized to start data collection on steps of `update_every`. There will be no need to keep track of latencies in data collection. + + Netdata interpolates values to second boundaries, so even if your plugin is not perfectly aligned it does not matter. Netdata will find out. When your plugin works in increments of `update_every`, there will be no gaps in the charts due to the possible cumulative micro-delays in data collection. Gaps will only appear if the data collection is really delayed. + +3. If you are not sure of memory leaks, exit every one hour. Netdata will re-start your process. + +4. If possible, try to autodetect if your plugin should be enabled, without any configuration. diff --git a/collectors/plugins.d/plugins_d.c b/collectors/plugins.d/plugins_d.c new file mode 100644 index 000000000..465ecd796 --- /dev/null +++ b/collectors/plugins.d/plugins_d.c @@ -0,0 +1,696 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugins_d.h" + +char *plugin_directories[PLUGINSD_MAX_DIRECTORIES] = { NULL }; +char *netdata_configured_plugins_dir_base; + +struct plugind *pluginsd_root = NULL; + +static inline int pluginsd_space(char c) { + switch(c) { + case ' ': + case '\t': + case '\r': + case '\n': + case '=': + return 1; + + default: + return 0; + } +} + +inline int config_isspace(char c) { + switch(c) { + case ' ': + case '\t': + case '\r': + case '\n': + case ',': + return 1; + + default: + return 0; + } +} + +// split a text into words, respecting quotes +inline int quoted_strings_splitter(char *str, char **words, int max_words, int (*custom_isspace)(char)) { + char *s = str, quote = 0; + int i = 0, j; + + // skip all white space + while(unlikely(custom_isspace(*s))) s++; + + // check for quote + if(unlikely(*s == '\'' || *s == '"')) { + quote = *s; // remember the quote + s++; // skip the quote + } + + // store the first word + words[i++] = s; + + // while we have something + while(likely(*s)) { + // if it is escape + if(unlikely(*s == '\\' && s[1])) { + s += 2; + continue; + } + + // if it is quote + else if(unlikely(*s == quote)) { + quote = 0; + *s = ' '; + continue; + } + + // if it is a space + else if(unlikely(quote == 0 && custom_isspace(*s))) { + + // terminate the word + *s++ = '\0'; + + // skip all white space + while(likely(custom_isspace(*s))) s++; + + // check for quote + if(unlikely(*s == '\'' || *s == '"')) { + quote = *s; // remember the quote + s++; // skip the quote + } + + // if we reached the end, stop + if(unlikely(!*s)) break; + + // store the next word + if(likely(i < max_words)) words[i++] = s; + else break; + } + + // anything else + else s++; + } + + // terminate the words + j = i; + while(likely(j < max_words)) words[j++] = NULL; + + return i; +} + +inline int pluginsd_split_words(char *str, char **words, int max_words) { + return quoted_strings_splitter(str, words, max_words, pluginsd_space); +} + +inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int trust_durations) { + int enabled = cd->enabled; + + if(!fp || !enabled) { + cd->enabled = 0; + return 0; + } + + size_t count = 0; + + char line[PLUGINSD_LINE_MAX + 1]; + + char *words[PLUGINSD_MAX_WORDS] = { NULL }; + uint32_t BEGIN_HASH = simple_hash(PLUGINSD_KEYWORD_BEGIN); + uint32_t END_HASH = simple_hash(PLUGINSD_KEYWORD_END); + uint32_t FLUSH_HASH = simple_hash(PLUGINSD_KEYWORD_FLUSH); + uint32_t CHART_HASH = simple_hash(PLUGINSD_KEYWORD_CHART); + uint32_t DIMENSION_HASH = simple_hash(PLUGINSD_KEYWORD_DIMENSION); + uint32_t DISABLE_HASH = simple_hash(PLUGINSD_KEYWORD_DISABLE); + uint32_t VARIABLE_HASH = simple_hash(PLUGINSD_KEYWORD_VARIABLE); + + RRDSET *st = NULL; + uint32_t hash; + + errno = 0; + clearerr(fp); + + if(unlikely(fileno(fp) == -1)) { + error("file descriptor given is not a valid stream"); + goto cleanup; + } + + while(!ferror(fp)) { + if(unlikely(netdata_exit)) break; + + char *r = fgets(line, PLUGINSD_LINE_MAX, fp); + if(unlikely(!r)) { + error("read failed"); + break; + } + + if(unlikely(netdata_exit)) break; + + line[PLUGINSD_LINE_MAX] = '\0'; + + int w = pluginsd_split_words(line, words, PLUGINSD_MAX_WORDS); + char *s = words[0]; + if(unlikely(!s || !*s || !w)) { + continue; + } + + // debug(D_PLUGINSD, "PLUGINSD: words 0='%s' 1='%s' 2='%s' 3='%s' 4='%s' 5='%s' 6='%s' 7='%s' 8='%s' 9='%s'", words[0], words[1], words[2], words[3], words[4], words[5], words[6], words[7], words[8], words[9]); + + if(likely(!simple_hash_strcmp(s, "SET", &hash))) { + char *dimension = words[1]; + char *value = words[2]; + + if(unlikely(!dimension || !*dimension)) { + error("requested a SET on chart '%s' of host '%s', without a dimension. Disabling it.", st->id, host->hostname); + enabled = 0; + break; + } + + if(unlikely(!value || !*value)) value = NULL; + + if(unlikely(!st)) { + error("requested a SET on dimension %s with value %s on host '%s', without a BEGIN. Disabling it.", dimension, value?value:"", host->hostname); + enabled = 0; + break; + } + + if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) + debug(D_PLUGINSD, "is setting dimension %s/%s to %s", st->id, dimension, value?value:""); + + if(value) { + RRDDIM *rd = rrddim_find(st, dimension); + if(unlikely(!rd)) { + error("requested a SET to dimension with id '%s' on stats '%s' (%s) on host '%s', which does not exist. Disabling it.", dimension, st->name, st->id, st->rrdhost->hostname); + enabled = 0; + break; + } + else + rrddim_set_by_pointer(st, rd, strtoll(value, NULL, 0)); + } + } + else if(likely(hash == BEGIN_HASH && !strcmp(s, PLUGINSD_KEYWORD_BEGIN))) { + char *id = words[1]; + char *microseconds_txt = words[2]; + + if(unlikely(!id)) { + error("requested a BEGIN without a chart id for host '%s'. Disabling it.", host->hostname); + enabled = 0; + break; + } + + st = rrdset_find(host, id); + if(unlikely(!st)) { + error("requested a BEGIN on chart '%s', which does not exist on host '%s'. Disabling it.", id, host->hostname); + enabled = 0; + break; + } + + if(likely(st->counter_done)) { + usec_t microseconds = 0; + if(microseconds_txt && *microseconds_txt) microseconds = str2ull(microseconds_txt); + + if(likely(microseconds)) { + if(trust_durations) + rrdset_next_usec_unfiltered(st, microseconds); + else + rrdset_next_usec(st, microseconds); + } + else rrdset_next(st); + } + } + else if(likely(hash == END_HASH && !strcmp(s, PLUGINSD_KEYWORD_END))) { + if(unlikely(!st)) { + error("requested an END, without a BEGIN on host '%s'. Disabling it.", host->hostname); + enabled = 0; + break; + } + + if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) + debug(D_PLUGINSD, "requested an END on chart %s", st->id); + + rrdset_done(st); + st = NULL; + + count++; + } + else if(likely(hash == CHART_HASH && !strcmp(s, PLUGINSD_KEYWORD_CHART))) { + st = NULL; + + char *type = words[1]; + char *name = words[2]; + char *title = words[3]; + char *units = words[4]; + char *family = words[5]; + char *context = words[6]; + char *chart = words[7]; + char *priority_s = words[8]; + char *update_every_s = words[9]; + char *options = words[10]; + char *plugin = words[11]; + char *module = words[12]; + + // parse the id from type + char *id = NULL; + if(likely(type && (id = strchr(type, '.')))) { + *id = '\0'; + id++; + } + + // make sure we have the required variables + if(unlikely(!type || !*type || !id || !*id)) { + error("requested a CHART, without a type.id, on host '%s'. Disabling it.", host->hostname); + enabled = 0; + break; + } + + // parse the name, and make sure it does not include 'type.' + if(unlikely(name && *name)) { + // when data are coming from slaves + // name will be type.name + // so we have to remove 'type.' from name too + size_t len = strlen(type); + if(strncmp(type, name, len) == 0 && name[len] == '.') + name = &name[len + 1]; + + // if the name is the same with the id, + // or is just 'NULL', clear it. + if(unlikely(strcmp(name, id) == 0 || strcasecmp(name, "NULL") == 0 || strcasecmp(name, "(NULL)") == 0)) + name = NULL; + } + + int priority = 1000; + if(likely(priority_s && *priority_s)) priority = str2i(priority_s); + + int update_every = cd->update_every; + if(likely(update_every_s && *update_every_s)) update_every = str2i(update_every_s); + if(unlikely(!update_every)) update_every = cd->update_every; + + RRDSET_TYPE chart_type = RRDSET_TYPE_LINE; + if(unlikely(chart)) chart_type = rrdset_type_id(chart); + + if(unlikely(name && !*name)) name = NULL; + if(unlikely(family && !*family)) family = NULL; + if(unlikely(context && !*context)) context = NULL; + if(unlikely(!title)) title = ""; + if(unlikely(!units)) units = "unknown"; + + debug(D_PLUGINSD, "creating chart type='%s', id='%s', name='%s', family='%s', context='%s', chart='%s', priority=%d, update_every=%d" + , type, id + , name?name:"" + , family?family:"" + , context?context:"" + , rrdset_type_name(chart_type) + , priority + , update_every + ); + + st = rrdset_create( + host + , type + , id + , name + , family + , context + , title + , units + , (plugin && *plugin)?plugin:cd->filename + , module + , priority + , update_every + , chart_type + ); + + if(options && *options) { + if(strstr(options, "obsolete")) + rrdset_is_obsolete(st); + else + rrdset_isnot_obsolete(st); + + if(strstr(options, "detail")) + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + else + rrdset_flag_clear(st, RRDSET_FLAG_DETAIL); + + if(strstr(options, "hidden")) + rrdset_flag_set(st, RRDSET_FLAG_HIDDEN); + else + rrdset_flag_clear(st, RRDSET_FLAG_HIDDEN); + + if(strstr(options, "store_first")) + rrdset_flag_set(st, RRDSET_FLAG_STORE_FIRST); + else + rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST); + } + else { + rrdset_isnot_obsolete(st); + rrdset_flag_clear(st, RRDSET_FLAG_DETAIL); + rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST); + } + } + else if(likely(hash == DIMENSION_HASH && !strcmp(s, PLUGINSD_KEYWORD_DIMENSION))) { + char *id = words[1]; + char *name = words[2]; + char *algorithm = words[3]; + char *multiplier_s = words[4]; + char *divisor_s = words[5]; + char *options = words[6]; + + if(unlikely(!id || !*id)) { + error("requested a DIMENSION, without an id, host '%s' and chart '%s'. Disabling it.", host->hostname, st?st->id:"UNSET"); + enabled = 0; + break; + } + + if(unlikely(!st)) { + error("requested a DIMENSION, without a CHART, on host '%s'. Disabling it.", host->hostname); + enabled = 0; + break; + } + + long multiplier = 1; + if(multiplier_s && *multiplier_s) multiplier = strtol(multiplier_s, NULL, 0); + if(unlikely(!multiplier)) multiplier = 1; + + long divisor = 1; + if(likely(divisor_s && *divisor_s)) divisor = strtol(divisor_s, NULL, 0); + if(unlikely(!divisor)) divisor = 1; + + if(unlikely(!algorithm || !*algorithm)) algorithm = "absolute"; + + if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) + debug(D_PLUGINSD, "creating dimension in chart %s, id='%s', name='%s', algorithm='%s', multiplier=%ld, divisor=%ld, hidden='%s'" + , st->id + , id + , name?name:"" + , rrd_algorithm_name(rrd_algorithm_id(algorithm)) + , multiplier + , divisor + , options?options:"" + ); + + RRDDIM *rd = rrddim_add(st, id, name, multiplier, divisor, rrd_algorithm_id(algorithm)); + rrddim_flag_clear(rd, RRDDIM_FLAG_HIDDEN); + rrddim_flag_clear(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS); + if(options && *options) { + if(strstr(options, "hidden") != NULL) rrddim_flag_set(rd, RRDDIM_FLAG_HIDDEN); + if(strstr(options, "noreset") != NULL) rrddim_flag_set(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS); + if(strstr(options, "nooverflow") != NULL) rrddim_flag_set(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS); + } + } + else if(likely(hash == VARIABLE_HASH && !strcmp(s, PLUGINSD_KEYWORD_VARIABLE))) { + char *name = words[1]; + char *value = words[2]; + int global = (st)?0:1; + + if(name && *name) { + if((strcmp(name, "GLOBAL") == 0 || strcmp(name, "HOST") == 0)) { + global = 1; + name = words[2]; + value = words[3]; + } + else if((strcmp(name, "LOCAL") == 0 || strcmp(name, "CHART") == 0)) { + global = 0; + name = words[2]; + value = words[3]; + } + } + + if(unlikely(!name || !*name)) { + error("requested a VARIABLE on host '%s', without a variable name. Disabling it.", host->hostname); + enabled = 0; + break; + } + + if(unlikely(!value || !*value)) + value = NULL; + + if(value) { + char *endptr = NULL; + calculated_number v = (calculated_number)str2ld(value, &endptr); + + if(unlikely(endptr && *endptr)) { + if(endptr == value) + error("the value '%s' of VARIABLE '%s' on host '%s' cannot be parsed as a number", value, name, host->hostname); + else + error("the value '%s' of VARIABLE '%s' on host '%s' has leftovers: '%s'", value, name, host->hostname, endptr); + } + + if(global) { + RRDVAR *rv = rrdvar_custom_host_variable_create(host, name); + if (rv) rrdvar_custom_host_variable_set(host, rv, v); + else error("cannot find/create HOST VARIABLE '%s' on host '%s'", name, host->hostname); + } + else if(st) { + RRDSETVAR *rs = rrdsetvar_custom_chart_variable_create(st, name); + if (rs) rrdsetvar_custom_chart_variable_set(rs, v); + else error("cannot find/create CHART VARIABLE '%s' on host '%s', chart '%s'", name, host->hostname, st->id); + } + else + error("cannot find/create CHART VARIABLE '%s' on host '%s' without a chart", name, host->hostname); + } + else + error("cannot set %s VARIABLE '%s' on host '%s' to an empty value", (global)?"HOST":"CHART", name, host->hostname); + } + else if(likely(hash == FLUSH_HASH && !strcmp(s, PLUGINSD_KEYWORD_FLUSH))) { + debug(D_PLUGINSD, "requested a FLUSH"); + st = NULL; + } + else if(unlikely(hash == DISABLE_HASH && !strcmp(s, PLUGINSD_KEYWORD_DISABLE))) { + info("called DISABLE. Disabling it."); + enabled = 0; + break; + } + else { + error("sent command '%s' which is not known by netdata, for host '%s'. Disabling it.", s, host->hostname); + enabled = 0; + break; + } + } + +cleanup: + cd->enabled = enabled; + + if(likely(count)) { + cd->successful_collections += count; + cd->serial_failures = 0; + } + else + cd->serial_failures++; + + return count; +} + +static void pluginsd_worker_thread_cleanup(void *arg) { + struct plugind *cd = (struct plugind *)arg; + + if(cd->enabled && !cd->obsolete) { + cd->obsolete = 1; + + info("data collection thread exiting"); + + if (cd->pid) { + siginfo_t info; + info("killing child process pid %d", cd->pid); + if (killpid(cd->pid, SIGTERM) != -1) { + info("waiting for child process pid %d to exit...", cd->pid); + waitid(P_PID, (id_t) cd->pid, &info, WEXITED); + } + cd->pid = 0; + } + } +} + +void *pluginsd_worker_thread(void *arg) { + netdata_thread_cleanup_push(pluginsd_worker_thread_cleanup, arg); + + struct plugind *cd = (struct plugind *)arg; + + cd->obsolete = 0; + size_t count = 0; + + while(!netdata_exit) { + FILE *fp = mypopen(cd->cmd, &cd->pid); + if(unlikely(!fp)) { + error("Cannot popen(\"%s\", \"r\").", cd->cmd); + break; + } + + info("connected to '%s' running on pid %d", cd->fullfilename, cd->pid); + count = pluginsd_process(localhost, cd, fp, 0); + error("'%s' (pid %d) disconnected after %zu successful data collections (ENDs).", cd->fullfilename, cd->pid, count); + killpid(cd->pid, SIGTERM); + + // get the return code + int code = mypclose(fp, cd->pid); + + if(code != 0) { + // the plugin reports failure + + if(likely(!cd->successful_collections)) { + // nothing collected - disable it + error("'%s' (pid %d) exited with error code %d. Disabling it.", cd->fullfilename, cd->pid, code); + cd->enabled = 0; + } + else { + // we have collected something + + if(likely(cd->serial_failures <= 10)) { + error("'%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times). %s", cd->fullfilename, cd->pid, code, cd->successful_collections, cd->enabled?"Waiting a bit before starting it again.":"Will not start it again - it is disabled."); + sleep((unsigned int) (cd->update_every * 10)); + } + else { + error("'%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times). We tried %zu times to restart it, but it failed to generate data. Disabling it.", cd->fullfilename, cd->pid, code, cd->successful_collections, cd->serial_failures); + cd->enabled = 0; + } + } + } + else { + // the plugin reports success + + if(unlikely(!cd->successful_collections)) { + // we have collected nothing so far + + if(likely(cd->serial_failures <= 10)) { + error("'%s' (pid %d) does not generate useful output but it reports success (exits with 0). %s.", cd->fullfilename, cd->pid, cd->enabled?"Waiting a bit before starting it again.":"Will not start it again - it is now disabled."); + sleep((unsigned int) (cd->update_every * 10)); + } + else { + error("'%s' (pid %d) does not generate useful output, although it reports success (exits with 0), but we have tried %zu times to collect something. Disabling it.", cd->fullfilename, cd->pid, cd->serial_failures); + cd->enabled = 0; + } + } + else + sleep((unsigned int) cd->update_every); + } + cd->pid = 0; + + if(unlikely(!cd->enabled)) break; + } + + netdata_thread_cleanup_pop(1); + return NULL; +} + +static void pluginsd_main_cleanup(void *data) { + struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data; + static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; + info("cleaning up..."); + + struct plugind *cd; + for (cd = pluginsd_root; cd; cd = cd->next) { + if (cd->enabled && !cd->obsolete) { + info("stopping plugin thread: %s", cd->id); + netdata_thread_cancel(cd->thread); + } + } + + info("cleanup completed."); + static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; +} + +void *pluginsd_main(void *ptr) { + netdata_thread_cleanup_push(pluginsd_main_cleanup, ptr); + + int automatic_run = config_get_boolean(CONFIG_SECTION_PLUGINS, "enable running new plugins", 1); + int scan_frequency = (int) config_get_number(CONFIG_SECTION_PLUGINS, "check for new plugins every", 60); + if(scan_frequency < 1) scan_frequency = 1; + + // store the errno for each plugins directory + // so that we don't log broken directories on each loop + int directory_errors[PLUGINSD_MAX_DIRECTORIES] = { 0 }; + + while(!netdata_exit) { + int idx; + const char *directory_name; + + for( idx = 0; idx < PLUGINSD_MAX_DIRECTORIES && (directory_name = plugin_directories[idx]) ; idx++ ) { + if(unlikely(netdata_exit)) break; + + errno = 0; + DIR *dir = opendir(directory_name); + if(unlikely(!dir)) { + if(directory_errors[idx] != errno) { + directory_errors[idx] = errno; + error("cannot open plugins directory '%s'", directory_name); + } + continue; + } + + struct dirent *file = NULL; + while(likely((file = readdir(dir)))) { + if(unlikely(netdata_exit)) break; + + debug(D_PLUGINSD, "examining file '%s'", file->d_name); + + if(unlikely(strcmp(file->d_name, ".") == 0 || strcmp(file->d_name, "..") == 0)) continue; + + int len = (int) strlen(file->d_name); + if(unlikely(len <= (int)PLUGINSD_FILE_SUFFIX_LEN)) continue; + if(unlikely(strcmp(PLUGINSD_FILE_SUFFIX, &file->d_name[len - (int)PLUGINSD_FILE_SUFFIX_LEN]) != 0)) { + debug(D_PLUGINSD, "file '%s' does not end in '%s'", file->d_name, PLUGINSD_FILE_SUFFIX); + continue; + } + + char pluginname[CONFIG_MAX_NAME + 1]; + snprintfz(pluginname, CONFIG_MAX_NAME, "%.*s", (int)(len - PLUGINSD_FILE_SUFFIX_LEN), file->d_name); + int enabled = config_get_boolean(CONFIG_SECTION_PLUGINS, pluginname, automatic_run); + + if(unlikely(!enabled)) { + debug(D_PLUGINSD, "plugin '%s' is not enabled", file->d_name); + continue; + } + + // check if it runs already + struct plugind *cd; + for(cd = pluginsd_root ; cd ; cd = cd->next) + if(unlikely(strcmp(cd->filename, file->d_name) == 0)) break; + + if(likely(cd && !cd->obsolete)) { + debug(D_PLUGINSD, "plugin '%s' is already running", cd->filename); + continue; + } + + // it is not running + // allocate a new one, or use the obsolete one + if(unlikely(!cd)) { + cd = callocz(sizeof(struct plugind), 1); + + snprintfz(cd->id, CONFIG_MAX_NAME, "plugin:%s", pluginname); + + strncpyz(cd->filename, file->d_name, FILENAME_MAX); + snprintfz(cd->fullfilename, FILENAME_MAX, "%s/%s", directory_name, cd->filename); + + cd->enabled = enabled; + cd->update_every = (int) config_get_number(cd->id, "update every", localhost->rrd_update_every); + cd->started_t = now_realtime_sec(); + + char *def = ""; + snprintfz(cd->cmd, PLUGINSD_CMD_MAX, "exec %s %d %s", cd->fullfilename, cd->update_every, config_get(cd->id, "command options", def)); + + // link it + if(likely(pluginsd_root)) cd->next = pluginsd_root; + pluginsd_root = cd; + + // it is not currently running + cd->obsolete = 1; + + if(cd->enabled) { + char tag[NETDATA_THREAD_TAG_MAX + 1]; + snprintfz(tag, NETDATA_THREAD_TAG_MAX, "PLUGINSD[%s]", pluginname); + // spawn a new thread for it + netdata_thread_create(&cd->thread, tag, NETDATA_THREAD_OPTION_DEFAULT, pluginsd_worker_thread, cd); + } + } + } + + closedir(dir); + } + + sleep((unsigned int) scan_frequency); + } + + netdata_thread_cleanup_pop(1); + return NULL; +} diff --git a/collectors/plugins.d/plugins_d.h b/collectors/plugins.d/plugins_d.h new file mode 100644 index 000000000..adccf3f0f --- /dev/null +++ b/collectors/plugins.d/plugins_d.h @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_PLUGINS_D_H +#define NETDATA_PLUGINS_D_H 1 + +#include "../../daemon/common.h" + +#define NETDATA_PLUGIN_HOOK_PLUGINSD \ + { \ + .name = "PLUGINSD", \ + .config_section = NULL, \ + .config_name = NULL, \ + .enabled = 1, \ + .thread = NULL, \ + .init_routine = NULL, \ + .start_routine = pluginsd_main \ + }, + + +#define PLUGINSD_FILE_SUFFIX ".plugin" +#define PLUGINSD_FILE_SUFFIX_LEN strlen(PLUGINSD_FILE_SUFFIX) +#define PLUGINSD_CMD_MAX (FILENAME_MAX*2) + +#define PLUGINSD_KEYWORD_CHART "CHART" +#define PLUGINSD_KEYWORD_DIMENSION "DIMENSION" +#define PLUGINSD_KEYWORD_BEGIN "BEGIN" +#define PLUGINSD_KEYWORD_END "END" +#define PLUGINSD_KEYWORD_FLUSH "FLUSH" +#define PLUGINSD_KEYWORD_DISABLE "DISABLE" +#define PLUGINSD_KEYWORD_VARIABLE "VARIABLE" + +#define PLUGINSD_LINE_MAX 1024 +#define PLUGINSD_MAX_WORDS 20 + +#define PLUGINSD_MAX_DIRECTORIES 20 +extern char *plugin_directories[PLUGINSD_MAX_DIRECTORIES]; + +struct plugind { + char id[CONFIG_MAX_NAME+1]; // config node id + + char filename[FILENAME_MAX+1]; // just the filename + char fullfilename[FILENAME_MAX+1]; // with path + char cmd[PLUGINSD_CMD_MAX+1]; // the command that it executes + + volatile pid_t pid; + netdata_thread_t thread; + + size_t successful_collections; // the number of times we have seen + // values collected from this plugin + + size_t serial_failures; // the number of times the plugin started + // without collecting values + + int update_every; // the plugin default data collection frequency + volatile sig_atomic_t obsolete; // do not touch this structure after setting this to 1 + volatile sig_atomic_t enabled; // if this is enabled or not + + time_t started_t; + + struct plugind *next; +}; + +extern struct plugind *pluginsd_root; + +extern void *pluginsd_main(void *ptr); + +extern size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int trust_durations); +extern int pluginsd_split_words(char *str, char **words, int max_words); + +extern int quoted_strings_splitter(char *str, char **words, int max_words, int (*custom_isspace)(char)); +extern int config_isspace(char c); + +#endif /* NETDATA_PLUGINS_D_H */ diff --git a/collectors/proc.plugin/Makefile.am b/collectors/proc.plugin/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/collectors/proc.plugin/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/collectors/proc.plugin/Makefile.in b/collectors/proc.plugin/Makefile.in new file mode 100644 index 000000000..f6db90c87 --- /dev/null +++ b/collectors/proc.plugin/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = collectors/proc.plugin +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/proc.plugin/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu collectors/proc.plugin/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/collectors/proc.plugin/README.md b/collectors/proc.plugin/README.md new file mode 100644 index 000000000..9d444f3d0 --- /dev/null +++ b/collectors/proc.plugin/README.md @@ -0,0 +1,200 @@ + +# proc.plugin + + - `/proc/net/dev` (all network interfaces for all their values) + - `/proc/diskstats` (all disks for all their values) + - `/proc/net/snmp` (total IPv4, TCP and UDP usage) + - `/proc/net/snmp6` (total IPv6 usage) + - `/proc/net/netstat` (more IPv4 usage) + - `/proc/net/stat/nf_conntrack` (connection tracking performance) + - `/proc/net/stat/synproxy` (synproxy performance) + - `/proc/net/ip_vs/stats` (IPVS connection statistics) + - `/proc/stat` (CPU utilization) + - `/proc/meminfo` (memory information) + - `/proc/vmstat` (system performance) + - `/proc/net/rpc/nfsd` (NFS server statistics for both v3 and v4 NFS servers) + - `/sys/fs/cgroup` (Control Groups - Linux Containers) + - `/proc/self/mountinfo` (mount points) + - `/proc/interrupts` (total and per core hardware interrupts) + - `/proc/softirqs` (total and per core software interrupts) + - `/proc/loadavg` (system load and total processes running) + - `/proc/sys/kernel/random/entropy_avail` (random numbers pool availability - used in cryptography) + - `ksm` Kernel Same-Page Merging performance (several files under `/sys/kernel/mm/ksm`). + - `netdata` (internal netdata resources utilization) + + +--- + +# Monitoring Disks + +> Live demo of disk monitoring at: **[http://london.netdata.rocks](https://registry.my-netdata.io/#menu_disk)** + +Performance monitoring for Linux disks is quite complicated. The main reason is the plethora of disk technologies available. There are many different hardware disk technologies, but there are even more **virtual disk** technologies that can provide additional storage features. + +Hopefully, the Linux kernel provides many metrics that can provide deep insights of what our disks our doing. The kernel measures all these metrics on all layers of storage: **virtual disks**, **physical disks** and **partitions of disks**. + +Let's see the list of metrics provided by netdata for each of the above: + +### I/O bandwidth/s (kb/s) + +The amount of data transferred from and to the disk. + +### I/O operations/s + +The number of I/O operations completed. + +### Queued I/O operations + +The number of currently queued I/O operations. For traditional disks that execute commands one after another, one of them is being run by the disk and the rest are just waiting in a queue. + +### Backlog size (time in ms) + +The expected duration of the currently queued I/O operations. + +### Utilization (time percentage) + +The percentage of time the disk was busy with something. This is a very interesting metric, since for most disks, that execute commands sequentially, **this is the key indication of congestion**. A sequential disk that is 100% of the available time busy, has no time to do anything more, so even if the bandwidth or the number of operations executed by the disk is low, its capacity has been reached. + +Of course, for newer disk technologies (like fusion cards) that are capable to execute multiple commands in parallel, this metric is just meaningless. + +### Average I/O operation time (ms) + +The average time for I/O requests issued to the device to be served. This includes the time spent by the requests in queue and the time spent servicing them. + +### Average I/O operation size (kb) + +The average amount of data of the completed I/O operations. + +### Average Service Time (ms) + +The average service time for completed I/O operations. This metric is calculated using the total busy time of the disk and the number of completed operations. If the disk is able to execute multiple parallel operations the reporting average service time will be misleading. + +### Merged I/O operations/s + +The Linux kernel is capable of merging I/O operations. So, if two requests to read data from the disk are adjacent, the Linux kernel may merge them to one before giving them to disk. This metric measures the number of operations that have been merged by the Linux kernel. + +### Total I/O time + +The sum of the duration of all completed I/O operations. This number can exceed the interval if the disk is able to execute multiple I/O operations in parallel. + +### Space usage + +For mounted disks, netdata will provide a chart for their space, with 3 dimensions: + +1. free +2. used +3. reserved for root + +### inode usage + +For mounted disks, netdata will provide a chart for their inodes (number of file and directories), with 3 dimensions: + +1. free +2. used +3. reserved for root + +--- + +## disk names + +netdata will automatically set the name of disks on the dashboard, from the mount point they are mounted, of course only when they are mounted. Changes in mount points are not currently detected (you will have to restart netdata to change the name of the disk). + +--- + +## performance metrics + +By default netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). + +netdata categorizes all block devices in 3 categories: + +1. physical disks (i.e. block devices that does not have slaves and are not partitions) +2. virtual disks (i.e. block devices that have slaves - like RAID devices) +3. disk partitions (i.e. block devices that are part of a physical disk) + +Performance metrics are enabled by default for all disk devices, except partitions and not-mounted virtual disks. Of course, you can enable/disable monitoring any block device by editing the netdata configuration file. + +### netdata configuration + +You can get the running netdata configuration using this: + +```sh +cd /etc/netdata +curl "http://localhost:19999/netdata.conf" >netdata.conf.new +mv netdata.conf.new netdata.conf +``` + +Then edit `netdata.conf` and find the following section. This is the basic plugin configuration. + +``` +[plugin:proc:/proc/diskstats] + # enable new disks detected at runtime = yes + # performance metrics for physical disks = auto + # performance metrics for virtual disks = no + # performance metrics for partitions = no + # performance metrics for mounted filesystems = no + # performance metrics for mounted virtual disks = auto + # space metrics for mounted filesystems = auto + # bandwidth for all disks = auto + # operations for all disks = auto + # merged operations for all disks = auto + # i/o time for all disks = auto + # queued operations for all disks = auto + # utilization percentage for all disks = auto + # backlog for all disks = auto + # space usage for all disks = auto + # inodes usage for all disks = auto + # filename to monitor = /proc/diskstats + # path to get block device infos = /sys/dev/block/%lu:%lu/%s + # path to get h/w sector size = /sys/block/%s/queue/hw_sector_size + # path to get h/w sector size for partitions = /sys/dev/block/%lu:%lu/subsystem/%s/../queue +/hw_sector_size + +``` + +For each virtual disk, physical disk and partition you will have a section like this: + +``` +[plugin:proc:/proc/diskstats:sda] + # enable = yes + # enable performance metrics = auto + # bandwidth = auto + # operations = auto + # merged operations = auto + # i/o time = auto + # queued operations = auto + # utilization percentage = auto + # backlog = auto +``` + +For all configuration options: +- `auto` = enable monitoring if the collected values are not zero +- `yes` = enable monitoring +- `no` = disable monitoring + +Of course, to set options, you will have to uncomment them. The comments show the internal defaults. + +After saving `/etc/netdata/netdata.conf`, restart your netdata to apply them. + +#### Disabling performance metrics for individual device and to multiple devices by device type +You can pretty easy disable performance metrics for individual device, for ex.: +``` +[plugin:proc:/proc/diskstats:sda] + enable performance metrics = no +``` +But sometimes you need disable performance metrics for all devices with the same type, to do it you need to figure out device type from `/proc/diskstats` for ex.: +``` + 7 0 loop0 1651 0 3452 168 0 0 0 0 0 8 168 + 7 1 loop1 4955 0 11924 880 0 0 0 0 0 64 880 + 7 2 loop2 36 0 216 4 0 0 0 0 0 4 4 + 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0 + 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0 + 251 2 zram2 27487 0 219896 188 79953 0 639624 1640 0 1828 1828 + 251 3 zram3 27348 0 218784 152 79952 0 639616 1960 0 2060 2104 +``` +All zram devices starts with `251` number and all loop devices starts with `7`. +So, to disable performance metrics for all loop devices you could add `performance metrics for disks with major 7 = no` to `[plugin:proc:/proc/diskstats]` section. +``` +[plugin:proc:/proc/diskstats] + performance metrics for disks with major 7 = no +``` + diff --git a/collectors/proc.plugin/ipc.c b/collectors/proc.plugin/ipc.c new file mode 100644 index 000000000..6c6bee519 --- /dev/null +++ b/collectors/proc.plugin/ipc.c @@ -0,0 +1,263 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#include +#include +#include + + +#ifndef SEMVMX +#define SEMVMX 32767 /* <= 32767 semaphore maximum value */ +#endif + +/* Some versions of libc only define IPC_INFO when __USE_GNU is defined. */ +#ifndef IPC_INFO +#define IPC_INFO 3 +#endif + +struct ipc_limits { + uint64_t shmmni; /* max number of segments */ + uint64_t shmmax; /* max segment size */ + uint64_t shmall; /* max total shared memory */ + uint64_t shmmin; /* min segment size */ + + int semmni; /* max number of arrays */ + int semmsl; /* max semaphores per array */ + int semmns; /* max semaphores system wide */ + int semopm; /* max ops per semop call */ + unsigned int semvmx; /* semaphore max value (constant) */ + + int msgmni; /* max queues system wide */ + size_t msgmax; /* max size of message */ + int msgmnb; /* default max size of queue */ +}; + +struct ipc_status { + int semusz; /* current number of arrays */ + int semaem; /* current semaphores system wide */ +}; + +/* + * The last arg of semctl is a union semun, but where is it defined? X/OPEN + * tells us to define it ourselves, but until recently Linux include files + * would also define it. + */ +#ifndef HAVE_UNION_SEMUN +/* according to X/OPEN we have to define it ourselves */ +union semun { + int val; + struct semid_ds *buf; + unsigned short int *array; + struct seminfo *__buf; +}; +#endif + +static inline int ipc_sem_get_limits(struct ipc_limits *lim) { + static procfile *ff = NULL; + static int error_shown = 0; + static char filename[FILENAME_MAX + 1] = ""; + + if(unlikely(!filename[0])) + snprintfz(filename, FILENAME_MAX, "%s/proc/sys/kernel/sem", netdata_configured_host_prefix); + + if(unlikely(!ff)) { + ff = procfile_open(filename, NULL, PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) { + if(unlikely(!error_shown)) { + error("IPC: Cannot open file '%s'.", filename); + error_shown = 1; + } + goto ipc; + } + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) { + if(unlikely(!error_shown)) { + error("IPC: Cannot read file '%s'.", filename); + error_shown = 1; + } + goto ipc; + } + + if(procfile_lines(ff) >= 1 && procfile_linewords(ff, 0) >= 4) { + lim->semvmx = SEMVMX; + lim->semmsl = str2i(procfile_lineword(ff, 0, 0)); + lim->semmns = str2i(procfile_lineword(ff, 0, 1)); + lim->semopm = str2i(procfile_lineword(ff, 0, 2)); + lim->semmni = str2i(procfile_lineword(ff, 0, 3)); + return 0; + } + else { + if(unlikely(!error_shown)) { + error("IPC: Invalid content in file '%s'.", filename); + error_shown = 1; + } + goto ipc; + } + +ipc: + // cannot do it from the file + // query IPC + { + struct seminfo seminfo = {.semmni = 0}; + union semun arg = {.array = (ushort *) &seminfo}; + + if(unlikely(semctl(0, 0, IPC_INFO, arg) < 0)) { + error("IPC: Failed to read '%s' and request IPC_INFO with semctl().", filename); + goto error; + } + + lim->semvmx = SEMVMX; + lim->semmni = seminfo.semmni; + lim->semmsl = seminfo.semmsl; + lim->semmns = seminfo.semmns; + lim->semopm = seminfo.semopm; + return 0; + } + +error: + lim->semvmx = 0; + lim->semmni = 0; + lim->semmsl = 0; + lim->semmns = 0; + lim->semopm = 0; + return -1; +} + +/* +printf ("------ Semaphore Limits --------\n"); +printf ("max number of arrays = %d\n", limits.semmni); +printf ("max semaphores per array = %d\n", limits.semmsl); +printf ("max semaphores system wide = %d\n", limits.semmns); +printf ("max ops per semop call = %d\n", limits.semopm); +printf ("semaphore max value = %u\n", limits.semvmx); + +printf ("------ Semaphore Status --------\n"); +printf ("used arrays = %d\n", status.semusz); +printf ("allocated semaphores = %d\n", status.semaem); +*/ + +static inline int ipc_sem_get_status(struct ipc_status *st) { + struct seminfo seminfo; + union semun arg; + + arg.array = (ushort *) (void *) &seminfo; + + if(unlikely(semctl (0, 0, SEM_INFO, arg) < 0)) { + /* kernel not configured for semaphores */ + static int error_shown = 0; + if(unlikely(!error_shown)) { + error("IPC: kernel is not configured for semaphores"); + error_shown = 1; + } + st->semusz = 0; + st->semaem = 0; + return -1; + } + + st->semusz = seminfo.semusz; + st->semaem = seminfo.semaem; + return 0; +} + +int do_ipc(int update_every, usec_t dt) { + (void)dt; + + static int initialized = 0, read_limits_next = -1; + static struct ipc_limits limits; + static struct ipc_status status; + static RRDVAR *arrays_max = NULL, *semaphores_max = NULL; + static RRDSET *st_semaphores = NULL, *st_arrays = NULL; + static RRDDIM *rd_semaphores = NULL, *rd_arrays = NULL; + + if(unlikely(!initialized)) { + initialized = 1; + + // make sure it works + if(ipc_sem_get_limits(&limits) == -1) { + error("unable to fetch semaphore limits"); + return 1; + } + + // make sure it works + if(ipc_sem_get_status(&status) == -1) { + error("unable to fetch semaphore statistics"); + return 1; + } + + // create the charts + if(unlikely(!st_semaphores)) { + st_semaphores = rrdset_create_localhost( + "system" + , "ipc_semaphores" + , NULL + , "ipc semaphores" + , NULL + , "IPC Semaphores" + , "semaphores" + , PLUGIN_PROC_NAME + , "ipc" + , NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES + , localhost->rrd_update_every + , RRDSET_TYPE_AREA + ); + rd_semaphores = rrddim_add(st_semaphores, "semaphores", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + if(unlikely(!st_arrays)) { + st_arrays = rrdset_create_localhost( + "system" + , "ipc_semaphore_arrays" + , NULL + , "ipc semaphores" + , NULL + , "IPC Semaphore Arrays" + , "arrays" + , PLUGIN_PROC_NAME + , "ipc" + , NETDATA_CHART_PRIO_SYSTEM_IPC_SEM_ARRAYS + , localhost->rrd_update_every + , RRDSET_TYPE_AREA + ); + rd_arrays = rrddim_add(st_arrays, "arrays", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + // variables + semaphores_max = rrdvar_custom_host_variable_create(localhost, "ipc_semaphores_max"); + arrays_max = rrdvar_custom_host_variable_create(localhost, "ipc_semaphores_arrays_max"); + } + + if(unlikely(read_limits_next < 0)) { + if(unlikely(ipc_sem_get_limits(&limits) == -1)) { + error("Unable to fetch semaphore limits."); + } + else { + if(semaphores_max) rrdvar_custom_host_variable_set(localhost, semaphores_max, limits.semmns); + if(arrays_max) rrdvar_custom_host_variable_set(localhost, arrays_max, limits.semmni); + + st_arrays->red = limits.semmni; + st_semaphores->red = limits.semmns; + + read_limits_next = 60 / update_every; + } + } + else + read_limits_next--; + + if(unlikely(ipc_sem_get_status(&status) == -1)) { + error("Unable to get semaphore statistics"); + return 0; + } + + if(st_semaphores->counter_done) rrdset_next(st_semaphores); + rrddim_set_by_pointer(st_semaphores, rd_semaphores, status.semaem); + rrdset_done(st_semaphores); + + if(st_arrays->counter_done) rrdset_next(st_arrays); + rrddim_set_by_pointer(st_arrays, rd_arrays, status.semusz); + rrdset_done(st_arrays); + + return 0; +} diff --git a/collectors/proc.plugin/plugin_proc.c b/collectors/proc.plugin/plugin_proc.c new file mode 100644 index 000000000..0c3244d61 --- /dev/null +++ b/collectors/proc.plugin/plugin_proc.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +static struct proc_module { + const char *name; + const char *dim; + + int enabled; + + int (*func)(int update_every, usec_t dt); + usec_t duration; + + RRDDIM *rd; + +} proc_modules[] = { + + // system metrics + { .name = "/proc/stat", .dim = "stat", .func = do_proc_stat }, + { .name = "/proc/uptime", .dim = "uptime", .func = do_proc_uptime }, + { .name = "/proc/loadavg", .dim = "loadavg", .func = do_proc_loadavg }, + { .name = "/proc/sys/kernel/random/entropy_avail", .dim = "entropy", .func = do_proc_sys_kernel_random_entropy_avail }, + + // CPU metrics + { .name = "/proc/interrupts", .dim = "interrupts", .func = do_proc_interrupts }, + { .name = "/proc/softirqs", .dim = "softirqs", .func = do_proc_softirqs }, + + // memory metrics + { .name = "/proc/vmstat", .dim = "vmstat", .func = do_proc_vmstat }, + { .name = "/proc/meminfo", .dim = "meminfo", .func = do_proc_meminfo }, + { .name = "/sys/kernel/mm/ksm", .dim = "ksm", .func = do_sys_kernel_mm_ksm }, + { .name = "/sys/devices/system/edac/mc", .dim = "ecc", .func = do_proc_sys_devices_system_edac_mc }, + { .name = "/sys/devices/system/node", .dim = "numa", .func = do_proc_sys_devices_system_node }, + + // network metrics + { .name = "/proc/net/dev", .dim = "netdev", .func = do_proc_net_dev }, + { .name = "/proc/net/sockstat", .dim = "sockstat", .func = do_proc_net_sockstat }, + { .name = "/proc/net/sockstat6", .dim = "sockstat6", .func = do_proc_net_sockstat6 }, + { .name = "/proc/net/netstat", .dim = "netstat", .func = do_proc_net_netstat }, // this has to be before /proc/net/snmp, because there is a shared metric + { .name = "/proc/net/snmp", .dim = "snmp", .func = do_proc_net_snmp }, + { .name = "/proc/net/snmp6", .dim = "snmp6", .func = do_proc_net_snmp6 }, + { .name = "/proc/net/sctp/snmp", .dim = "sctp", .func = do_proc_net_sctp_snmp }, + { .name = "/proc/net/softnet_stat", .dim = "softnet", .func = do_proc_net_softnet_stat }, + { .name = "/proc/net/ip_vs/stats", .dim = "ipvs", .func = do_proc_net_ip_vs_stats }, + + // firewall metrics + { .name = "/proc/net/stat/conntrack", .dim = "conntrack", .func = do_proc_net_stat_conntrack }, + { .name = "/proc/net/stat/synproxy", .dim = "synproxy", .func = do_proc_net_stat_synproxy }, + + // disk metrics + { .name = "/proc/diskstats", .dim = "diskstats", .func = do_proc_diskstats }, + + // NFS metrics + { .name = "/proc/net/rpc/nfsd", .dim = "nfsd", .func = do_proc_net_rpc_nfsd }, + { .name = "/proc/net/rpc/nfs", .dim = "nfs", .func = do_proc_net_rpc_nfs }, + + // ZFS metrics + { .name = "/proc/spl/kstat/zfs/arcstats", .dim = "zfs_arcstats", .func = do_proc_spl_kstat_zfs_arcstats }, + + // BTRFS metrics + { .name = "/sys/fs/btrfs", .dim = "btrfs", .func = do_sys_fs_btrfs }, + + // IPC metrics + { .name = "ipc", .dim = "ipc", .func = do_ipc }, + + // the terminator of this array + { .name = NULL, .dim = NULL, .func = NULL } +}; + +static void proc_main_cleanup(void *ptr) { + struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; + static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; + + info("cleaning up..."); + + static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; +} + +void *proc_main(void *ptr) { + netdata_thread_cleanup_push(proc_main_cleanup, ptr); + + int vdo_cpu_netdata = config_get_boolean("plugin:proc", "netdata server resources", 1); + + // check the enabled status for each module + int i; + for(i = 0 ; proc_modules[i].name ;i++) { + struct proc_module *pm = &proc_modules[i]; + + pm->enabled = config_get_boolean("plugin:proc", pm->name, 1); + pm->duration = 0ULL; + pm->rd = NULL; + } + + usec_t step = localhost->rrd_update_every * USEC_PER_SEC; + heartbeat_t hb; + heartbeat_init(&hb); + size_t iterations = 0; + + while(!netdata_exit) { + iterations++; + (void)iterations; + + usec_t hb_dt = heartbeat_next(&hb, step); + usec_t duration = 0ULL; + + if(unlikely(netdata_exit)) break; + + // BEGIN -- the job to be done + + for(i = 0 ; proc_modules[i].name ;i++) { + struct proc_module *pm = &proc_modules[i]; + if(unlikely(!pm->enabled)) continue; + + debug(D_PROCNETDEV_LOOP, "PROC calling %s.", pm->name); + +//#ifdef NETDATA_LOG_ALLOCATIONS +// if(pm->func == do_proc_interrupts) +// log_thread_memory_allocations = iterations; +//#endif + pm->enabled = !pm->func(localhost->rrd_update_every, hb_dt); + pm->duration = heartbeat_monotonic_dt_to_now_usec(&hb) - duration; + duration += pm->duration; + +//#ifdef NETDATA_LOG_ALLOCATIONS +// if(pm->func == do_proc_interrupts) +// log_thread_memory_allocations = 0; +//#endif + + if(unlikely(netdata_exit)) break; + } + + // END -- the job is done + + // -------------------------------------------------------------------- + + if(vdo_cpu_netdata) { + static RRDSET *st = NULL; + + if(unlikely(!st)) { + st = rrdset_find_bytype_localhost("netdata", "plugin_proc_modules"); + + if(!st) { + st = rrdset_create_localhost( + "netdata" + , "plugin_proc_modules" + , NULL + , "proc" + , NULL + , "NetData Proc Plugin Modules Durations" + , "milliseconds/run" + , "netdata" + , "stats" + , 132001 + , localhost->rrd_update_every + , RRDSET_TYPE_STACKED + ); + + for(i = 0 ; proc_modules[i].name ;i++) { + struct proc_module *pm = &proc_modules[i]; + if(unlikely(!pm->enabled)) continue; + + pm->rd = rrddim_add(st, pm->dim, NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + } + } + } + else rrdset_next(st); + + for(i = 0 ; proc_modules[i].name ;i++) { + struct proc_module *pm = &proc_modules[i]; + if(unlikely(!pm->enabled)) continue; + + rrddim_set_by_pointer(st, pm->rd, pm->duration); + } + rrdset_done(st); + + global_statistics_charts(); + registry_statistics(); + } + } + + netdata_thread_cleanup_pop(1); + return NULL; +} + +int get_numa_node_count(void) +{ + static int numa_node_count = -1; + + if (numa_node_count != -1) + return numa_node_count; + + numa_node_count = 0; + + char name[FILENAME_MAX + 1]; + snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/node"); + char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name); + + DIR *dir = opendir(dirname); + if(dir) { + struct dirent *de = NULL; + while((de = readdir(dir))) { + if(de->d_type != DT_DIR) + continue; + + if(strncmp(de->d_name, "node", 4) != 0) + continue; + + if(!isdigit(de->d_name[4])) + continue; + + numa_node_count++; + } + closedir(dir); + } + + return numa_node_count; +} diff --git a/collectors/proc.plugin/plugin_proc.h b/collectors/proc.plugin/plugin_proc.h new file mode 100644 index 000000000..bfefe1ad4 --- /dev/null +++ b/collectors/proc.plugin/plugin_proc.h @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_PLUGIN_PROC_H +#define NETDATA_PLUGIN_PROC_H 1 + +#include "../../daemon/common.h" + +#if (TARGET_OS == OS_LINUX) + +#define NETDATA_PLUGIN_HOOK_LINUX_PROC \ + { \ + .name = "PLUGIN[proc]", \ + .config_section = CONFIG_SECTION_PLUGINS, \ + .config_name = "proc", \ + .enabled = 1, \ + .thread = NULL, \ + .init_routine = NULL, \ + .start_routine = proc_main \ + }, + + +#define PLUGIN_PROC_CONFIG_NAME "proc" +#define PLUGIN_PROC_NAME PLUGIN_PROC_CONFIG_NAME ".plugin" + +extern void *proc_main(void *ptr); + +extern int do_proc_net_dev(int update_every, usec_t dt); +extern int do_proc_diskstats(int update_every, usec_t dt); +extern int do_proc_net_snmp(int update_every, usec_t dt); +extern int do_proc_net_snmp6(int update_every, usec_t dt); +extern int do_proc_net_netstat(int update_every, usec_t dt); +extern int do_proc_net_stat_conntrack(int update_every, usec_t dt); +extern int do_proc_net_ip_vs_stats(int update_every, usec_t dt); +extern int do_proc_stat(int update_every, usec_t dt); +extern int do_proc_meminfo(int update_every, usec_t dt); +extern int do_proc_vmstat(int update_every, usec_t dt); +extern int do_proc_net_rpc_nfs(int update_every, usec_t dt); +extern int do_proc_net_rpc_nfsd(int update_every, usec_t dt); +extern int do_proc_sys_kernel_random_entropy_avail(int update_every, usec_t dt); +extern int do_proc_interrupts(int update_every, usec_t dt); +extern int do_proc_softirqs(int update_every, usec_t dt); +extern int do_sys_kernel_mm_ksm(int update_every, usec_t dt); +extern int do_proc_loadavg(int update_every, usec_t dt); +extern int do_proc_net_stat_synproxy(int update_every, usec_t dt); +extern int do_proc_net_softnet_stat(int update_every, usec_t dt); +extern int do_proc_uptime(int update_every, usec_t dt); +extern int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt); +extern int do_proc_sys_devices_system_node(int update_every, usec_t dt); +extern int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt); +extern int do_sys_fs_btrfs(int update_every, usec_t dt); +extern int do_proc_net_sockstat(int update_every, usec_t dt); +extern int do_proc_net_sockstat6(int update_every, usec_t dt); +extern int do_proc_net_sctp_snmp(int update_every, usec_t dt); +extern int do_ipc(int update_every, usec_t dt); +extern int get_numa_node_count(void); + +// metrics that need to be shared among data collectors +extern unsigned long long tcpext_TCPSynRetrans; + +// netdev renames +extern void netdev_rename_device_add(const char *host_device, const char *container_device, const char *container_name); +extern void netdev_rename_device_del(const char *host_device); + +#include "proc_self_mountinfo.h" +#include "zfs_common.h" + +#else // (TARGET_OS == OS_LINUX) + +#define NETDATA_PLUGIN_HOOK_LINUX_PROC + +#endif // (TARGET_OS == OS_LINUX) + + +#endif /* NETDATA_PLUGIN_PROC_H */ diff --git a/collectors/proc.plugin/proc_diskstats.c b/collectors/proc.plugin/proc_diskstats.c new file mode 100644 index 000000000..387b395a3 --- /dev/null +++ b/collectors/proc.plugin/proc_diskstats.c @@ -0,0 +1,1649 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define RRD_TYPE_DISK "disk" +#define PLUGIN_PROC_MODULE_DISKSTATS_NAME "/proc/diskstats" +#define CONFIG_SECTION_PLUGIN_PROC_DISKSTATS "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_DISKSTATS_NAME + +#define DISK_TYPE_UNKNOWN 0 +#define DISK_TYPE_PHYSICAL 1 +#define DISK_TYPE_PARTITION 2 +#define DISK_TYPE_VIRTUAL 3 + +#define DEFAULT_EXCLUDED_DISKS "loop* ram*" + +static struct disk { + char *disk; // the name of the disk (sda, sdb, etc, after being looked up) + char *device; // the device of the disk (before being looked up) + unsigned long major; + unsigned long minor; + int sector_size; + int type; + + char *mount_point; + + // disk options caching + int do_io; + int do_ops; + int do_mops; + int do_iotime; + int do_qops; + int do_util; + int do_backlog; + int do_bcache; + + int updated; + + int device_is_bcache; + + char *bcache_filename_dirty_data; + char *bcache_filename_writeback_rate; + char *bcache_filename_cache_congested; + char *bcache_filename_cache_available_percent; + char *bcache_filename_stats_five_minute_cache_hit_ratio; + char *bcache_filename_stats_hour_cache_hit_ratio; + char *bcache_filename_stats_day_cache_hit_ratio; + char *bcache_filename_stats_total_cache_hit_ratio; + char *bcache_filename_stats_total_cache_hits; + char *bcache_filename_stats_total_cache_misses; + char *bcache_filename_stats_total_cache_miss_collisions; + char *bcache_filename_stats_total_cache_bypass_hits; + char *bcache_filename_stats_total_cache_bypass_misses; + char *bcache_filename_stats_total_cache_readaheads; + char *bcache_filename_cache_read_races; + char *bcache_filename_cache_io_errors; + char *bcache_filename_priority_stats; + + usec_t bcache_priority_stats_update_every_usec; + usec_t bcache_priority_stats_elapsed_usec; + + RRDSET *st_io; + RRDDIM *rd_io_reads; + RRDDIM *rd_io_writes; + + RRDSET *st_ops; + RRDDIM *rd_ops_reads; + RRDDIM *rd_ops_writes; + + RRDSET *st_qops; + RRDDIM *rd_qops_operations; + + RRDSET *st_backlog; + RRDDIM *rd_backlog_backlog; + + RRDSET *st_util; + RRDDIM *rd_util_utilization; + + RRDSET *st_mops; + RRDDIM *rd_mops_reads; + RRDDIM *rd_mops_writes; + + RRDSET *st_iotime; + RRDDIM *rd_iotime_reads; + RRDDIM *rd_iotime_writes; + + RRDSET *st_await; + RRDDIM *rd_await_reads; + RRDDIM *rd_await_writes; + + RRDSET *st_avgsz; + RRDDIM *rd_avgsz_reads; + RRDDIM *rd_avgsz_writes; + + RRDSET *st_svctm; + RRDDIM *rd_svctm_svctm; + + RRDSET *st_bcache_size; + RRDDIM *rd_bcache_dirty_size; + + RRDSET *st_bcache_usage; + RRDDIM *rd_bcache_available_percent; + + RRDSET *st_bcache_hit_ratio; + RRDDIM *rd_bcache_hit_ratio_5min; + RRDDIM *rd_bcache_hit_ratio_1hour; + RRDDIM *rd_bcache_hit_ratio_1day; + RRDDIM *rd_bcache_hit_ratio_total; + + RRDSET *st_bcache; + RRDDIM *rd_bcache_hits; + RRDDIM *rd_bcache_misses; + RRDDIM *rd_bcache_miss_collisions; + + RRDSET *st_bcache_bypass; + RRDDIM *rd_bcache_bypass_hits; + RRDDIM *rd_bcache_bypass_misses; + + RRDSET *st_bcache_rates; + RRDDIM *rd_bcache_rate_congested; + RRDDIM *rd_bcache_readaheads; + RRDDIM *rd_bcache_rate_writeback; + + RRDSET *st_bcache_cache_allocations; + RRDDIM *rd_bcache_cache_allocations_unused; + RRDDIM *rd_bcache_cache_allocations_clean; + RRDDIM *rd_bcache_cache_allocations_dirty; + RRDDIM *rd_bcache_cache_allocations_metadata; + RRDDIM *rd_bcache_cache_allocations_unknown; + + RRDSET *st_bcache_cache_read_races; + RRDDIM *rd_bcache_cache_read_races; + RRDDIM *rd_bcache_cache_io_errors; + + struct disk *next; +} *disk_root = NULL; + +#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete(st); (st) = NULL; } } while(st) + +// static char *path_to_get_hw_sector_size = NULL; +// static char *path_to_get_hw_sector_size_partitions = NULL; +static char *path_to_sys_dev_block_major_minor_string = NULL; +static char *path_to_sys_block_device = NULL; +static char *path_to_sys_block_device_bcache = NULL; +static char *path_to_sys_devices_virtual_block_device = NULL; +static char *path_to_device_mapper = NULL; +static char *path_to_device_label = NULL; +static char *path_to_device_id = NULL; +static char *path_to_veritas_volume_groups = NULL; +static int name_disks_by_id = CONFIG_BOOLEAN_NO; +static int global_bcache_priority_stats_update_every = 0; // disabled by default + +static int global_enable_new_disks_detected_at_runtime = CONFIG_BOOLEAN_YES, + global_enable_performance_for_physical_disks = CONFIG_BOOLEAN_AUTO, + global_enable_performance_for_virtual_disks = CONFIG_BOOLEAN_AUTO, + global_enable_performance_for_partitions = CONFIG_BOOLEAN_NO, + global_do_io = CONFIG_BOOLEAN_AUTO, + global_do_ops = CONFIG_BOOLEAN_AUTO, + global_do_mops = CONFIG_BOOLEAN_AUTO, + global_do_iotime = CONFIG_BOOLEAN_AUTO, + global_do_qops = CONFIG_BOOLEAN_AUTO, + global_do_util = CONFIG_BOOLEAN_AUTO, + global_do_backlog = CONFIG_BOOLEAN_AUTO, + global_do_bcache = CONFIG_BOOLEAN_AUTO, + globals_initialized = 0, + global_cleanup_removed_disks = 1; + +static SIMPLE_PATTERN *excluded_disks = NULL; + +static unsigned long long int bcache_read_number_with_units(const char *filename) { + char buffer[50 + 1]; + if(read_file(filename, buffer, 50) == 0) { + static int unknown_units_error = 10; + + char *end = NULL; + long double value = str2ld(buffer, &end); + if(end && *end) { + if(*end == 'k') + return (unsigned long long int)(value * 1024.0); + else if(*end == 'M') + return (unsigned long long int)(value * 1024.0 * 1024.0); + else if(*end == 'G') + return (unsigned long long int)(value * 1024.0 * 1024.0 * 1024.0); + else if(unknown_units_error > 0) { + error("bcache file '%s' provides value '%s' with unknown units '%s'", filename, buffer, end); + unknown_units_error--; + } + } + + return (unsigned long long int)value; + } + + return 0; +} + +void bcache_read_priority_stats(struct disk *d, const char *family, int update_every, usec_t dt) { + static procfile *ff = NULL; + static char *separators = " \t:%[]"; + + static ARL_BASE *arl_base = NULL; + + static unsigned long long unused; + static unsigned long long clean; + static unsigned long long dirty; + static unsigned long long metadata; + static unsigned long long unknown; + + // check if it is time to update this metric + d->bcache_priority_stats_elapsed_usec += dt; + if(likely(d->bcache_priority_stats_elapsed_usec < d->bcache_priority_stats_update_every_usec)) return; + d->bcache_priority_stats_elapsed_usec = 0; + + // initialize ARL + if(unlikely(!arl_base)) { + arl_base = arl_create("bcache/priority_stats", NULL, 60); + arl_expect(arl_base, "Unused", &unused); + arl_expect(arl_base, "Clean", &clean); + arl_expect(arl_base, "Dirty", &dirty); + arl_expect(arl_base, "Metadata", &metadata); + } + + ff = procfile_reopen(ff, d->bcache_filename_priority_stats, separators, PROCFILE_FLAG_DEFAULT); + if(likely(ff)) ff = procfile_readall(ff); + if(unlikely(!ff)) { + separators = " \t:%[]"; + return; + } + + // do not reset the separators on every iteration + separators = NULL; + + arl_begin(arl_base); + unused = clean = dirty = metadata = unknown = 0; + + size_t lines = procfile_lines(ff), l; + + for(l = 0; l < lines ;l++) { + size_t words = procfile_linewords(ff, l); + if(unlikely(words < 2)) { + if(unlikely(words)) error("Cannot read '%s' line %zu. Expected 2 params, read %zu.", d->bcache_filename_priority_stats, l, words); + continue; + } + + if(unlikely(arl_check(arl_base, + procfile_lineword(ff, l, 0), + procfile_lineword(ff, l, 1)))) break; + } + + unknown = 100 - unused - clean - dirty - metadata; + + // create / update the cache allocations chart + { + if(unlikely(!d->st_bcache_cache_allocations)) { + d->st_bcache_cache_allocations = rrdset_create_localhost( + "disk_bcache_cache_alloc" + , d->device + , d->disk + , family + , "disk.bcache_cache_alloc" + , "BCache Cache Allocations" + , "percentage" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_DISKSTATS_NAME + , NETDATA_CHART_PRIO_BCACHE_CACHE_ALLOC + , update_every + , RRDSET_TYPE_STACKED + ); + + d->rd_bcache_cache_allocations_unused = rrddim_add(d->st_bcache_cache_allocations, "unused", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + d->rd_bcache_cache_allocations_dirty = rrddim_add(d->st_bcache_cache_allocations, "dirty", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + d->rd_bcache_cache_allocations_clean = rrddim_add(d->st_bcache_cache_allocations, "clean", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + d->rd_bcache_cache_allocations_metadata = rrddim_add(d->st_bcache_cache_allocations, "metadata", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + d->rd_bcache_cache_allocations_unknown = rrddim_add(d->st_bcache_cache_allocations, "undefined", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + d->bcache_priority_stats_update_every_usec = update_every * USEC_PER_SEC; + } + else rrdset_next(d->st_bcache_cache_allocations); + + rrddim_set_by_pointer(d->st_bcache_cache_allocations, d->rd_bcache_cache_allocations_unused, unused); + rrddim_set_by_pointer(d->st_bcache_cache_allocations, d->rd_bcache_cache_allocations_dirty, dirty); + rrddim_set_by_pointer(d->st_bcache_cache_allocations, d->rd_bcache_cache_allocations_clean, clean); + rrddim_set_by_pointer(d->st_bcache_cache_allocations, d->rd_bcache_cache_allocations_metadata, metadata); + rrddim_set_by_pointer(d->st_bcache_cache_allocations, d->rd_bcache_cache_allocations_unknown, unknown); + rrdset_done(d->st_bcache_cache_allocations); + } +} + +static inline int is_major_enabled(int major) { + static int8_t *major_configs = NULL; + static size_t major_size = 0; + + if(major < 0) return 1; + + size_t wanted_size = (size_t)major + 1; + + if(major_size < wanted_size) { + major_configs = reallocz(major_configs, wanted_size * sizeof(int8_t)); + + size_t i; + for(i = major_size; i < wanted_size ; i++) + major_configs[i] = -1; + + major_size = wanted_size; + } + + if(major_configs[major] == -1) { + char buffer[CONFIG_MAX_NAME + 1]; + snprintfz(buffer, CONFIG_MAX_NAME, "performance metrics for disks with major %d", major); + major_configs[major] = (char)config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, buffer, 1); + } + + return (int)major_configs[major]; +} + +static inline int get_disk_name_from_path(const char *path, char *result, size_t result_size, unsigned long major, unsigned long minor, char *disk, char *prefix, int depth) { + //info("DEVICE-MAPPER ('%s', %lu:%lu): examining directory '%s' (allowed depth %d).", disk, major, minor, path, depth); + + int found = 0; + + DIR *dir = opendir(path); + if (!dir) { + error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot open directory '%s'.", disk, major, minor, path); + goto failed; + } + + struct dirent *de = NULL; + while ((de = readdir(dir))) { + if(de->d_type == DT_DIR) { + if((de->d_name[0] == '.' && de->d_name[1] == '\0') || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')) + continue; + + if(depth <= 0) { + error("DEVICE-MAPPER ('%s', %lu:%lu): Depth limit reached for path '%s/%s'. Ignoring path.", disk, major, minor, path, de->d_name); + break; + } + else { + char *path_nested = NULL; + char *prefix_nested = NULL; + + { + char buffer[FILENAME_MAX + 1]; + snprintfz(buffer, FILENAME_MAX, "%s/%s", path, de->d_name); + path_nested = strdupz(buffer); + + snprintfz(buffer, FILENAME_MAX, "%s%s%s", (prefix)?prefix:"", (prefix)?"_":"", de->d_name); + prefix_nested = strdupz(buffer); + } + + found = get_disk_name_from_path(path_nested, result, result_size, major, minor, disk, prefix_nested, depth - 1); + freez(path_nested); + freez(prefix_nested); + + if(found) break; + } + } + else if(de->d_type == DT_LNK || de->d_type == DT_BLK) { + char filename[FILENAME_MAX + 1]; + + if(de->d_type == DT_LNK) { + snprintfz(filename, FILENAME_MAX, "%s/%s", path, de->d_name); + ssize_t len = readlink(filename, result, result_size - 1); + if(len <= 0) { + error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot read link '%s'.", disk, major, minor, filename); + continue; + } + + result[len] = '\0'; + if(result[0] != '/') + snprintfz(filename, FILENAME_MAX, "%s/%s", path, result); + else + strncpyz(filename, result, FILENAME_MAX); + } + else { + snprintfz(filename, FILENAME_MAX, "%s/%s", path, de->d_name); + } + + struct stat sb; + if(stat(filename, &sb) == -1) { + error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot stat() file '%s'.", disk, major, minor, filename); + continue; + } + + if((sb.st_mode & S_IFMT) != S_IFBLK) { + //info("DEVICE-MAPPER ('%s', %lu:%lu): file '%s' is not a block device.", disk, major, minor, filename); + continue; + } + + if(major(sb.st_rdev) != major || minor(sb.st_rdev) != minor) { + //info("DEVICE-MAPPER ('%s', %lu:%lu): filename '%s' does not match %lu:%lu.", disk, major, minor, filename, (unsigned long)major(sb.st_rdev), (unsigned long)minor(sb.st_rdev)); + continue; + } + + //info("DEVICE-MAPPER ('%s', %lu:%lu): filename '%s' matches.", disk, major, minor, filename); + + snprintfz(result, result_size - 1, "%s%s%s", (prefix)?prefix:"", (prefix)?"_":"", de->d_name); + found = 1; + break; + } + } + closedir(dir); + + +failed: + + if(!found) + result[0] = '\0'; + + return found; +} + +static inline char *get_disk_name(unsigned long major, unsigned long minor, char *disk) { + char result[FILENAME_MAX + 1] = ""; + + if(!path_to_device_mapper || !*path_to_device_mapper || !get_disk_name_from_path(path_to_device_mapper, result, FILENAME_MAX + 1, major, minor, disk, NULL, 0)) + if(!path_to_device_label || !*path_to_device_label || !get_disk_name_from_path(path_to_device_label, result, FILENAME_MAX + 1, major, minor, disk, NULL, 0)) + if(!path_to_veritas_volume_groups || !*path_to_veritas_volume_groups || !get_disk_name_from_path(path_to_veritas_volume_groups, result, FILENAME_MAX + 1, major, minor, disk, "vx", 2)) + if(name_disks_by_id != CONFIG_BOOLEAN_YES || !path_to_device_id || !*path_to_device_id || !get_disk_name_from_path(path_to_device_id, result, FILENAME_MAX + 1, major, minor, disk, NULL, 0)) + strncpy(result, disk, FILENAME_MAX); + + if(!result[0]) + strncpy(result, disk, FILENAME_MAX); + + netdata_fix_chart_name(result); + return strdup(result); +} + +static void get_disk_config(struct disk *d) { + int def_enable = global_enable_new_disks_detected_at_runtime; + + if(def_enable != CONFIG_BOOLEAN_NO && (simple_pattern_matches(excluded_disks, d->device) || simple_pattern_matches(excluded_disks, d->disk))) + def_enable = CONFIG_BOOLEAN_NO; + + char var_name[4096 + 1]; + snprintfz(var_name, 4096, CONFIG_SECTION_PLUGIN_PROC_DISKSTATS ":%s", d->disk); + + def_enable = config_get_boolean_ondemand(var_name, "enable", def_enable); + if(unlikely(def_enable == CONFIG_BOOLEAN_NO)) { + // the user does not want any metrics for this disk + d->do_io = CONFIG_BOOLEAN_NO; + d->do_ops = CONFIG_BOOLEAN_NO; + d->do_mops = CONFIG_BOOLEAN_NO; + d->do_iotime = CONFIG_BOOLEAN_NO; + d->do_qops = CONFIG_BOOLEAN_NO; + d->do_util = CONFIG_BOOLEAN_NO; + d->do_backlog = CONFIG_BOOLEAN_NO; + d->do_bcache = CONFIG_BOOLEAN_NO; + } + else { + // this disk is enabled + // check its direct settings + + int def_performance = CONFIG_BOOLEAN_AUTO; + + // since this is 'on demand' we can figure the performance settings + // based on the type of disk + + if(!d->device_is_bcache) { + switch(d->type) { + default: + case DISK_TYPE_UNKNOWN: + break; + + case DISK_TYPE_PHYSICAL: + def_performance = global_enable_performance_for_physical_disks; + break; + + case DISK_TYPE_PARTITION: + def_performance = global_enable_performance_for_partitions; + break; + + case DISK_TYPE_VIRTUAL: + def_performance = global_enable_performance_for_virtual_disks; + break; + } + } + + // check if we have to disable performance for this disk + if(def_performance) + def_performance = is_major_enabled((int)d->major); + + // ------------------------------------------------------------ + // now we have def_performance and def_space + // to work further + + // def_performance + // check the user configuration (this will also show our 'on demand' decision) + def_performance = config_get_boolean_ondemand(var_name, "enable performance metrics", def_performance); + + int ddo_io = CONFIG_BOOLEAN_NO, + ddo_ops = CONFIG_BOOLEAN_NO, + ddo_mops = CONFIG_BOOLEAN_NO, + ddo_iotime = CONFIG_BOOLEAN_NO, + ddo_qops = CONFIG_BOOLEAN_NO, + ddo_util = CONFIG_BOOLEAN_NO, + ddo_backlog = CONFIG_BOOLEAN_NO, + ddo_bcache = CONFIG_BOOLEAN_NO; + + // we enable individual performance charts only when def_performance is not disabled + if(unlikely(def_performance != CONFIG_BOOLEAN_NO)) { + ddo_io = global_do_io, + ddo_ops = global_do_ops, + ddo_mops = global_do_mops, + ddo_iotime = global_do_iotime, + ddo_qops = global_do_qops, + ddo_util = global_do_util, + ddo_backlog = global_do_backlog, + ddo_bcache = global_do_bcache; + } + + d->do_io = config_get_boolean_ondemand(var_name, "bandwidth", ddo_io); + d->do_ops = config_get_boolean_ondemand(var_name, "operations", ddo_ops); + d->do_mops = config_get_boolean_ondemand(var_name, "merged operations", ddo_mops); + d->do_iotime = config_get_boolean_ondemand(var_name, "i/o time", ddo_iotime); + d->do_qops = config_get_boolean_ondemand(var_name, "queued operations", ddo_qops); + d->do_util = config_get_boolean_ondemand(var_name, "utilization percentage", ddo_util); + d->do_backlog = config_get_boolean_ondemand(var_name, "backlog", ddo_backlog); + + if(d->device_is_bcache) + d->do_bcache = config_get_boolean_ondemand(var_name, "bcache", ddo_bcache); + else + d->do_bcache = 0; + } +} + +static struct disk *get_disk(unsigned long major, unsigned long minor, char *disk) { + static struct mountinfo *disk_mountinfo_root = NULL; + + struct disk *d; + + // search for it in our RAM list. + // this is sequential, but since we just walk through + // and the number of disks / partitions in a system + // should not be that many, it should be acceptable + for(d = disk_root; d ; d = d->next) + if(unlikely(d->major == major && d->minor == minor)) + return d; + + // not found + // create a new disk structure + d = (struct disk *)callocz(1, sizeof(struct disk)); + + d->disk = get_disk_name(major, minor, disk); + d->device = strdupz(disk); + d->major = major; + d->minor = minor; + d->type = DISK_TYPE_UNKNOWN; // Default type. Changed later if not correct. + d->sector_size = 512; // the default, will be changed below + d->next = NULL; + + // append it to the list + if(unlikely(!disk_root)) + disk_root = d; + else { + struct disk *last; + for(last = disk_root; last->next ;last = last->next); + last->next = d; + } + + char buffer[FILENAME_MAX + 1]; + + // find if it is a physical disk + // by checking if /sys/block/DISK is readable. + snprintfz(buffer, FILENAME_MAX, path_to_sys_block_device, disk); + if(likely(access(buffer, R_OK) == 0)) { + // assign it here, but it will be overwritten if it is not a physical disk + d->type = DISK_TYPE_PHYSICAL; + } + + // find if it is a partition + // by checking if /sys/dev/block/MAJOR:MINOR/partition is readable. + snprintfz(buffer, FILENAME_MAX, path_to_sys_dev_block_major_minor_string, major, minor, "partition"); + if(likely(access(buffer, R_OK) == 0)) { + d->type = DISK_TYPE_PARTITION; + } + else { + // find if it is a virtual disk + // by checking if /sys/devices/virtual/block/DISK is readable. + snprintfz(buffer, FILENAME_MAX, path_to_sys_devices_virtual_block_device, disk); + if(likely(access(buffer, R_OK) == 0)) { + d->type = DISK_TYPE_VIRTUAL; + } + else { + // find if it is a virtual device + // by checking if /sys/dev/block/MAJOR:MINOR/slaves has entries + snprintfz(buffer, FILENAME_MAX, path_to_sys_dev_block_major_minor_string, major, minor, "slaves/"); + DIR *dirp = opendir(buffer); + if (likely(dirp != NULL)) { + struct dirent *dp; + while ((dp = readdir(dirp))) { + // . and .. are also files in empty folders. + if (unlikely(strcmp(dp->d_name, ".") == 0 || strcmp(dp->d_name, "..") == 0)) { + continue; + } + + d->type = DISK_TYPE_VIRTUAL; + + // Stop the loop after we found one file. + break; + } + if (unlikely(closedir(dirp) == -1)) + error("Unable to close dir %s", buffer); + } + } + } + + // ------------------------------------------------------------------------ + // check if we can find its mount point + + // mountinfo_find() can be called with NULL disk_mountinfo_root + struct mountinfo *mi = mountinfo_find(disk_mountinfo_root, d->major, d->minor); + if(unlikely(!mi)) { + // mountinfo_free_all can be called with NULL + mountinfo_free_all(disk_mountinfo_root); + disk_mountinfo_root = mountinfo_read(0); + mi = mountinfo_find(disk_mountinfo_root, d->major, d->minor); + } + + if(unlikely(mi)) + d->mount_point = strdupz(mi->mount_point); + else + d->mount_point = NULL; + + // ------------------------------------------------------------------------ + // find the disk sector size + + /* + * sector size is always 512 bytes inside the kernel #3481 + * + { + char tf[FILENAME_MAX + 1], *t; + strncpyz(tf, d->device, FILENAME_MAX); + + // replace all / with ! + for(t = tf; *t ;t++) + if(unlikely(*t == '/')) *t = '!'; + + if(likely(d->type == DISK_TYPE_PARTITION)) + snprintfz(buffer, FILENAME_MAX, path_to_get_hw_sector_size_partitions, d->major, d->minor, tf); + else + snprintfz(buffer, FILENAME_MAX, path_to_get_hw_sector_size, tf); + + FILE *fpss = fopen(buffer, "r"); + if(likely(fpss)) { + char buffer2[1024 + 1]; + char *tmp = fgets(buffer2, 1024, fpss); + + if(likely(tmp)) { + d->sector_size = str2i(tmp); + if(unlikely(d->sector_size <= 0)) { + error("Invalid sector size %d for device %s in %s. Assuming 512.", d->sector_size, d->device, buffer); + d->sector_size = 512; + } + } + else error("Cannot read data for sector size for device %s from %s. Assuming 512.", d->device, buffer); + + fclose(fpss); + } + else error("Cannot read sector size for device %s from %s. Assuming 512.", d->device, buffer); + } + */ + + // ------------------------------------------------------------------------ + // check if the device is a bcache + + struct stat bcache; + snprintfz(buffer, FILENAME_MAX, path_to_sys_block_device_bcache, disk); + if(unlikely(stat(buffer, &bcache) == 0 && (bcache.st_mode & S_IFMT) == S_IFDIR)) { + // we have the 'bcache' directory + d->device_is_bcache = 1; + + char buffer2[FILENAME_MAX + 1]; + + snprintfz(buffer2, FILENAME_MAX, "%s/cache/congested", buffer); + if(access(buffer2, R_OK) == 0) + d->bcache_filename_cache_congested = strdupz(buffer2); + else + error("bcache file '%s' cannot be read.", buffer2); + + snprintfz(buffer2, FILENAME_MAX, "%s/readahead", buffer); + if(access(buffer2, R_OK) == 0) + d->bcache_filename_stats_total_cache_readaheads = strdupz(buffer2); + else + error("bcache file '%s' cannot be read.", buffer2); + + snprintfz(buffer2, FILENAME_MAX, "%s/cache/cache0/priority_stats", buffer); // only one cache is supported by bcache + if(access(buffer2, R_OK) == 0) + d->bcache_filename_priority_stats = strdupz(buffer2); + else + error("bcache file '%s' cannot be read.", buffer2); + + snprintfz(buffer2, FILENAME_MAX, "%s/cache/internal/cache_read_races", buffer); + if(access(buffer2, R_OK) == 0) + d->bcache_filename_cache_read_races = strdupz(buffer2); + else + error("bcache file '%s' cannot be read.", buffer2); + + snprintfz(buffer2, FILENAME_MAX, "%s/cache/cache0/io_errors", buffer); + if(access(buffer2, R_OK) == 0) + d->bcache_filename_cache_io_errors = strdupz(buffer2); + else + error("bcache file '%s' cannot be read.", buffer2); + + snprintfz(buffer2, FILENAME_MAX, "%s/dirty_data", buffer); + if(access(buffer2, R_OK) == 0) + d->bcache_filename_dirty_data = strdupz(buffer2); + else + error("bcache file '%s' cannot be read.", buffer2); + + snprintfz(buffer2, FILENAME_MAX, "%s/writeback_rate", buffer); + if(access(buffer2, R_OK) == 0) + d->bcache_filename_writeback_rate = strdupz(buffer2); + else + error("bcache file '%s' cannot be read.", buffer2); + + snprintfz(buffer2, FILENAME_MAX, "%s/cache/cache_available_percent", buffer); + if(access(buffer2, R_OK) == 0) + d->bcache_filename_cache_available_percent = strdupz(buffer2); + else + error("bcache file '%s' cannot be read.", buffer2); + + snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_hits", buffer); + if(access(buffer2, R_OK) == 0) + d->bcache_filename_stats_total_cache_hits = strdupz(buffer2); + else + error("bcache file '%s' cannot be read.", buffer2); + + snprintfz(buffer2, FILENAME_MAX, "%s/stats_five_minute/cache_hit_ratio", buffer); + if(access(buffer2, R_OK) == 0) + d->bcache_filename_stats_five_minute_cache_hit_ratio = strdupz(buffer2); + else + error("bcache file '%s' cannot be read.", buffer2); + + snprintfz(buffer2, FILENAME_MAX, "%s/stats_hour/cache_hit_ratio", buffer); + if(access(buffer2, R_OK) == 0) + d->bcache_filename_stats_hour_cache_hit_ratio = strdupz(buffer2); + else + error("bcache file '%s' cannot be read.", buffer2); + + snprintfz(buffer2, FILENAME_MAX, "%s/stats_day/cache_hit_ratio", buffer); + if(access(buffer2, R_OK) == 0) + d->bcache_filename_stats_day_cache_hit_ratio = strdupz(buffer2); + else + error("bcache file '%s' cannot be read.", buffer2); + + snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_hit_ratio", buffer); + if(access(buffer2, R_OK) == 0) + d->bcache_filename_stats_total_cache_hit_ratio = strdupz(buffer2); + else + error("bcache file '%s' cannot be read.", buffer2); + + snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_misses", buffer); + if(access(buffer2, R_OK) == 0) + d->bcache_filename_stats_total_cache_misses = strdupz(buffer2); + else + error("bcache file '%s' cannot be read.", buffer2); + + snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_bypass_hits", buffer); + if(access(buffer2, R_OK) == 0) + d->bcache_filename_stats_total_cache_bypass_hits = strdupz(buffer2); + else + error("bcache file '%s' cannot be read.", buffer2); + + snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_bypass_misses", buffer); + if(access(buffer2, R_OK) == 0) + d->bcache_filename_stats_total_cache_bypass_misses = strdupz(buffer2); + else + error("bcache file '%s' cannot be read.", buffer2); + + snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_miss_collisions", buffer); + if(access(buffer2, R_OK) == 0) + d->bcache_filename_stats_total_cache_miss_collisions = strdupz(buffer2); + else + error("bcache file '%s' cannot be read.", buffer2); + } + + get_disk_config(d); + return d; +} + +int do_proc_diskstats(int update_every, usec_t dt) { + static procfile *ff = NULL; + + if(unlikely(!globals_initialized)) { + globals_initialized = 1; + + global_enable_new_disks_detected_at_runtime = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "enable new disks detected at runtime", global_enable_new_disks_detected_at_runtime); + global_enable_performance_for_physical_disks = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "performance metrics for physical disks", global_enable_performance_for_physical_disks); + global_enable_performance_for_virtual_disks = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "performance metrics for virtual disks", global_enable_performance_for_virtual_disks); + global_enable_performance_for_partitions = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "performance metrics for partitions", global_enable_performance_for_partitions); + + global_do_io = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bandwidth for all disks", global_do_io); + global_do_ops = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "operations for all disks", global_do_ops); + global_do_mops = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "merged operations for all disks", global_do_mops); + global_do_iotime = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "i/o time for all disks", global_do_iotime); + global_do_qops = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "queued operations for all disks", global_do_qops); + global_do_util = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "utilization percentage for all disks", global_do_util); + global_do_backlog = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "backlog for all disks", global_do_backlog); + global_do_bcache = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache for all disks", global_do_bcache); + global_bcache_priority_stats_update_every = (int)config_get_number(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache priority stats update every", global_bcache_priority_stats_update_every); + + global_cleanup_removed_disks = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "remove charts of removed disks" , global_cleanup_removed_disks); + + char buffer[FILENAME_MAX + 1]; + + snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s"); + path_to_sys_block_device = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get block device", buffer); + + snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s/bcache"); + path_to_sys_block_device_bcache = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get block device bcache", buffer); + + snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/virtual/block/%s"); + path_to_sys_devices_virtual_block_device = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get virtual block device", buffer); + + snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/dev/block/%lu:%lu/%s"); + path_to_sys_dev_block_major_minor_string = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get block device infos", buffer); + + //snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s/queue/hw_sector_size"); + //path_to_get_hw_sector_size = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get h/w sector size", buffer); + + //snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/dev/block/%lu:%lu/subsystem/%s/../queue/hw_sector_size"); + //path_to_get_hw_sector_size_partitions = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get h/w sector size for partitions", buffer); + + snprintfz(buffer, FILENAME_MAX, "%s/dev/mapper", netdata_configured_host_prefix); + path_to_device_mapper = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to device mapper", buffer); + + snprintfz(buffer, FILENAME_MAX, "%s/dev/disk/by-label", netdata_configured_host_prefix); + path_to_device_label = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to /dev/disk/by-label", buffer); + + snprintfz(buffer, FILENAME_MAX, "%s/dev/disk/by-id", netdata_configured_host_prefix); + path_to_device_id = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to /dev/disk/by-id", buffer); + + snprintfz(buffer, FILENAME_MAX, "%s/dev/vx/dsk", netdata_configured_host_prefix); + path_to_veritas_volume_groups = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to /dev/vx/dsk", buffer); + + name_disks_by_id = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "name disks by id", name_disks_by_id); + + excluded_disks = simple_pattern_create( + config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "exclude disks", DEFAULT_EXCLUDED_DISKS) + , NULL + , SIMPLE_PATTERN_EXACT + ); + } + + // -------------------------------------------------------------------------- + + if(unlikely(!ff)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/diskstats"); + ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT); + } + if(unlikely(!ff)) return 0; + + ff = procfile_readall(ff); + if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time + + size_t lines = procfile_lines(ff), l; + + collected_number system_read_kb = 0, system_write_kb = 0; + + for(l = 0; l < lines ;l++) { + // -------------------------------------------------------------------------- + // Read parameters + + char *disk; + unsigned long major = 0, minor = 0; + + collected_number reads = 0, mreads = 0, readsectors = 0, readms = 0, + writes = 0, mwrites = 0, writesectors = 0, writems = 0, + queued_ios = 0, busy_ms = 0, backlog_ms = 0; + + collected_number last_reads = 0, last_readsectors = 0, last_readms = 0, + last_writes = 0, last_writesectors = 0, last_writems = 0, + last_busy_ms = 0; + + size_t words = procfile_linewords(ff, l); + if(unlikely(words < 14)) continue; + + major = str2ul(procfile_lineword(ff, l, 0)); + minor = str2ul(procfile_lineword(ff, l, 1)); + disk = procfile_lineword(ff, l, 2); + + // # of reads completed # of writes completed + // This is the total number of reads or writes completed successfully. + reads = str2ull(procfile_lineword(ff, l, 3)); // rd_ios + writes = str2ull(procfile_lineword(ff, l, 7)); // wr_ios + + // # of reads merged # of writes merged + // Reads and writes which are adjacent to each other may be merged for + // efficiency. Thus two 4K reads may become one 8K read before it is + // ultimately handed to the disk, and so it will be counted (and queued) + mreads = str2ull(procfile_lineword(ff, l, 4)); // rd_merges_or_rd_sec + mwrites = str2ull(procfile_lineword(ff, l, 8)); // wr_merges + + // # of sectors read # of sectors written + // This is the total number of sectors read or written successfully. + readsectors = str2ull(procfile_lineword(ff, l, 5)); // rd_sec_or_wr_ios + writesectors = str2ull(procfile_lineword(ff, l, 9)); // wr_sec + + // # of milliseconds spent reading # of milliseconds spent writing + // This is the total number of milliseconds spent by all reads or writes (as + // measured from __make_request() to end_that_request_last()). + readms = str2ull(procfile_lineword(ff, l, 6)); // rd_ticks_or_wr_sec + writems = str2ull(procfile_lineword(ff, l, 10)); // wr_ticks + + // # of I/Os currently in progress + // The only field that should go to zero. Incremented as requests are + // given to appropriate struct request_queue and decremented as they finish. + queued_ios = str2ull(procfile_lineword(ff, l, 11)); // ios_pgr + + // # of milliseconds spent doing I/Os + // This field increases so long as field queued_ios is nonzero. + busy_ms = str2ull(procfile_lineword(ff, l, 12)); // tot_ticks + + // weighted # of milliseconds spent doing I/Os + // This field is incremented at each I/O start, I/O completion, I/O + // merge, or read of these stats by the number of I/Os in progress + // (field queued_ios) times the number of milliseconds spent doing I/O since the + // last update of this field. This can provide an easy measure of both + // I/O completion time and the backlog that may be accumulating. + backlog_ms = str2ull(procfile_lineword(ff, l, 13)); // rq_ticks + + + // -------------------------------------------------------------------------- + // remove slashes from disk names + char *s; + for(s = disk; *s ;s++) + if(*s == '/') *s = '_'; + + // -------------------------------------------------------------------------- + // get a disk structure for the disk + + struct disk *d = get_disk(major, minor, disk); + d->updated = 1; + + // -------------------------------------------------------------------------- + // count the global system disk I/O of physical disks + + if(unlikely(d->type == DISK_TYPE_PHYSICAL)) { + system_read_kb += readsectors * d->sector_size / 1024; + system_write_kb += writesectors * d->sector_size / 1024; + } + + // -------------------------------------------------------------------------- + // Set its family based on mount point + + char *family = d->mount_point; + if(!family) family = d->disk; + + + // -------------------------------------------------------------------------- + // Do performance metrics + + if(d->do_io == CONFIG_BOOLEAN_YES || (d->do_io == CONFIG_BOOLEAN_AUTO && (readsectors || writesectors))) { + d->do_io = CONFIG_BOOLEAN_YES; + + if(unlikely(!d->st_io)) { + d->st_io = rrdset_create_localhost( + RRD_TYPE_DISK + , d->device + , d->disk + , family + , "disk.io" + , "Disk I/O Bandwidth" + , "kilobytes/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_DISKSTATS_NAME + , NETDATA_CHART_PRIO_DISK_IO + , update_every + , RRDSET_TYPE_AREA + ); + + d->rd_io_reads = rrddim_add(d->st_io, "reads", NULL, d->sector_size, 1024, RRD_ALGORITHM_INCREMENTAL); + d->rd_io_writes = rrddim_add(d->st_io, "writes", NULL, d->sector_size * -1, 1024, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(d->st_io); + + last_readsectors = rrddim_set_by_pointer(d->st_io, d->rd_io_reads, readsectors); + last_writesectors = rrddim_set_by_pointer(d->st_io, d->rd_io_writes, writesectors); + rrdset_done(d->st_io); + } + + // -------------------------------------------------------------------- + + if(d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && (reads || writes))) { + d->do_ops = CONFIG_BOOLEAN_YES; + + if(unlikely(!d->st_ops)) { + d->st_ops = rrdset_create_localhost( + "disk_ops" + , d->device + , d->disk + , family + , "disk.ops" + , "Disk Completed I/O Operations" + , "operations/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_DISKSTATS_NAME + , NETDATA_CHART_PRIO_DISK_OPS + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(d->st_ops, RRDSET_FLAG_DETAIL); + + d->rd_ops_reads = rrddim_add(d->st_ops, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + d->rd_ops_writes = rrddim_add(d->st_ops, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(d->st_ops); + + last_reads = rrddim_set_by_pointer(d->st_ops, d->rd_ops_reads, reads); + last_writes = rrddim_set_by_pointer(d->st_ops, d->rd_ops_writes, writes); + rrdset_done(d->st_ops); + } + + // -------------------------------------------------------------------- + + if(d->do_qops == CONFIG_BOOLEAN_YES || (d->do_qops == CONFIG_BOOLEAN_AUTO && queued_ios)) { + d->do_qops = CONFIG_BOOLEAN_YES; + + if(unlikely(!d->st_qops)) { + d->st_qops = rrdset_create_localhost( + "disk_qops" + , d->device + , d->disk + , family + , "disk.qops" + , "Disk Current I/O Operations" + , "operations" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_DISKSTATS_NAME + , NETDATA_CHART_PRIO_DISK_QOPS + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(d->st_qops, RRDSET_FLAG_DETAIL); + + d->rd_qops_operations = rrddim_add(d->st_qops, "operations", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(d->st_qops); + + rrddim_set_by_pointer(d->st_qops, d->rd_qops_operations, queued_ios); + rrdset_done(d->st_qops); + } + + // -------------------------------------------------------------------- + + if(d->do_backlog == CONFIG_BOOLEAN_YES || (d->do_backlog == CONFIG_BOOLEAN_AUTO && backlog_ms)) { + d->do_backlog = CONFIG_BOOLEAN_YES; + + if(unlikely(!d->st_backlog)) { + d->st_backlog = rrdset_create_localhost( + "disk_backlog" + , d->device + , d->disk + , family + , "disk.backlog" + , "Disk Backlog" + , "backlog (ms)" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_DISKSTATS_NAME + , NETDATA_CHART_PRIO_DISK_BACKLOG + , update_every + , RRDSET_TYPE_AREA + ); + + rrdset_flag_set(d->st_backlog, RRDSET_FLAG_DETAIL); + + d->rd_backlog_backlog = rrddim_add(d->st_backlog, "backlog", NULL, 1, 10, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(d->st_backlog); + + rrddim_set_by_pointer(d->st_backlog, d->rd_backlog_backlog, backlog_ms); + rrdset_done(d->st_backlog); + } + + // -------------------------------------------------------------------- + + if(d->do_util == CONFIG_BOOLEAN_YES || (d->do_util == CONFIG_BOOLEAN_AUTO && busy_ms)) { + d->do_util = CONFIG_BOOLEAN_YES; + + if(unlikely(!d->st_util)) { + d->st_util = rrdset_create_localhost( + "disk_util" + , d->device + , d->disk + , family + , "disk.util" + , "Disk Utilization Time" + , "% of time working" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_DISKSTATS_NAME + , NETDATA_CHART_PRIO_DISK_UTIL + , update_every + , RRDSET_TYPE_AREA + ); + + rrdset_flag_set(d->st_util, RRDSET_FLAG_DETAIL); + + d->rd_util_utilization = rrddim_add(d->st_util, "utilization", NULL, 1, 10, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(d->st_util); + + last_busy_ms = rrddim_set_by_pointer(d->st_util, d->rd_util_utilization, busy_ms); + rrdset_done(d->st_util); + } + + // -------------------------------------------------------------------- + + if(d->do_mops == CONFIG_BOOLEAN_YES || (d->do_mops == CONFIG_BOOLEAN_AUTO && (mreads || mwrites))) { + d->do_mops = CONFIG_BOOLEAN_YES; + + if(unlikely(!d->st_mops)) { + d->st_mops = rrdset_create_localhost( + "disk_mops" + , d->device + , d->disk + , family + , "disk.mops" + , "Disk Merged Operations" + , "merged operations/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_DISKSTATS_NAME + , NETDATA_CHART_PRIO_DISK_MOPS + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(d->st_mops, RRDSET_FLAG_DETAIL); + + d->rd_mops_reads = rrddim_add(d->st_mops, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + d->rd_mops_writes = rrddim_add(d->st_mops, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(d->st_mops); + + rrddim_set_by_pointer(d->st_mops, d->rd_mops_reads, mreads); + rrddim_set_by_pointer(d->st_mops, d->rd_mops_writes, mwrites); + rrdset_done(d->st_mops); + } + + // -------------------------------------------------------------------- + + if(d->do_iotime == CONFIG_BOOLEAN_YES || (d->do_iotime == CONFIG_BOOLEAN_AUTO && (readms || writems))) { + d->do_iotime = CONFIG_BOOLEAN_YES; + + if(unlikely(!d->st_iotime)) { + d->st_iotime = rrdset_create_localhost( + "disk_iotime" + , d->device + , d->disk + , family + , "disk.iotime" + , "Disk Total I/O Time" + , "milliseconds/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_DISKSTATS_NAME + , NETDATA_CHART_PRIO_DISK_IOTIME + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(d->st_iotime, RRDSET_FLAG_DETAIL); + + d->rd_iotime_reads = rrddim_add(d->st_iotime, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + d->rd_iotime_writes = rrddim_add(d->st_iotime, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(d->st_iotime); + + last_readms = rrddim_set_by_pointer(d->st_iotime, d->rd_iotime_reads, readms); + last_writems = rrddim_set_by_pointer(d->st_iotime, d->rd_iotime_writes, writems); + rrdset_done(d->st_iotime); + } + + // -------------------------------------------------------------------- + // calculate differential charts + // only if this is not the first time we run + + if(likely(dt)) { + if( (d->do_iotime == CONFIG_BOOLEAN_YES || (d->do_iotime == CONFIG_BOOLEAN_AUTO && (readms || writems))) && + (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && (reads || writes)))) { + + if(unlikely(!d->st_await)) { + d->st_await = rrdset_create_localhost( + "disk_await" + , d->device + , d->disk + , family + , "disk.await" + , "Average Completed I/O Operation Time" + , "ms per operation" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_DISKSTATS_NAME + , NETDATA_CHART_PRIO_DISK_AWAIT + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(d->st_await, RRDSET_FLAG_DETAIL); + + d->rd_await_reads = rrddim_add(d->st_await, "reads", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + d->rd_await_writes = rrddim_add(d->st_await, "writes", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(d->st_await); + + rrddim_set_by_pointer(d->st_await, d->rd_await_reads, (reads - last_reads) ? (readms - last_readms) / (reads - last_reads) : 0); + rrddim_set_by_pointer(d->st_await, d->rd_await_writes, (writes - last_writes) ? (writems - last_writems) / (writes - last_writes) : 0); + rrdset_done(d->st_await); + } + + if( (d->do_io == CONFIG_BOOLEAN_YES || (d->do_io == CONFIG_BOOLEAN_AUTO && (readsectors || writesectors))) && + (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && (reads || writes)))) { + + if(unlikely(!d->st_avgsz)) { + d->st_avgsz = rrdset_create_localhost( + "disk_avgsz" + , d->device + , d->disk + , family + , "disk.avgsz" + , "Average Completed I/O Operation Bandwidth" + , "kilobytes per operation" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_DISKSTATS_NAME + , NETDATA_CHART_PRIO_DISK_AVGSZ + , update_every + , RRDSET_TYPE_AREA + ); + + rrdset_flag_set(d->st_avgsz, RRDSET_FLAG_DETAIL); + + d->rd_avgsz_reads = rrddim_add(d->st_avgsz, "reads", NULL, d->sector_size, 1024, RRD_ALGORITHM_ABSOLUTE); + d->rd_avgsz_writes = rrddim_add(d->st_avgsz, "writes", NULL, d->sector_size * -1, 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(d->st_avgsz); + + rrddim_set_by_pointer(d->st_avgsz, d->rd_avgsz_reads, (reads - last_reads) ? (readsectors - last_readsectors) / (reads - last_reads) : 0); + rrddim_set_by_pointer(d->st_avgsz, d->rd_avgsz_writes, (writes - last_writes) ? (writesectors - last_writesectors) / (writes - last_writes) : 0); + rrdset_done(d->st_avgsz); + } + + if( (d->do_util == CONFIG_BOOLEAN_YES || (d->do_util == CONFIG_BOOLEAN_AUTO && busy_ms)) && + (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && (reads || writes)))) { + + if(unlikely(!d->st_svctm)) { + d->st_svctm = rrdset_create_localhost( + "disk_svctm" + , d->device + , d->disk + , family + , "disk.svctm" + , "Average Service Time" + , "ms per operation" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_DISKSTATS_NAME + , NETDATA_CHART_PRIO_DISK_SVCTM + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(d->st_svctm, RRDSET_FLAG_DETAIL); + + d->rd_svctm_svctm = rrddim_add(d->st_svctm, "svctm", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(d->st_svctm); + + rrddim_set_by_pointer(d->st_svctm, d->rd_svctm_svctm, ((reads - last_reads) + (writes - last_writes)) ? (busy_ms - last_busy_ms) / ((reads - last_reads) + (writes - last_writes)) : 0); + rrdset_done(d->st_svctm); + } + } + + // -------------------------------------------------------------------------- + // read bcache metrics and generate the bcache charts + + if(d->device_is_bcache && d->do_bcache != CONFIG_BOOLEAN_NO) { + unsigned long long int + stats_total_cache_bypass_hits = 0, + stats_total_cache_bypass_misses = 0, + stats_total_cache_hits = 0, + stats_total_cache_miss_collisions = 0, + stats_total_cache_misses = 0, + stats_five_minute_cache_hit_ratio = 0, + stats_hour_cache_hit_ratio = 0, + stats_day_cache_hit_ratio = 0, + stats_total_cache_hit_ratio = 0, + cache_available_percent = 0, + cache_readaheads = 0, + cache_read_races = 0, + cache_io_errors = 0, + cache_congested = 0, + dirty_data = 0, + writeback_rate = 0; + + // read the bcache values + + if(d->bcache_filename_dirty_data) + dirty_data = bcache_read_number_with_units(d->bcache_filename_dirty_data); + + if(d->bcache_filename_writeback_rate) + writeback_rate = bcache_read_number_with_units(d->bcache_filename_writeback_rate); + + if(d->bcache_filename_cache_congested) + cache_congested = bcache_read_number_with_units(d->bcache_filename_cache_congested); + + if(d->bcache_filename_cache_available_percent) + read_single_number_file(d->bcache_filename_cache_available_percent, &cache_available_percent); + + if(d->bcache_filename_stats_five_minute_cache_hit_ratio) + read_single_number_file(d->bcache_filename_stats_five_minute_cache_hit_ratio, &stats_five_minute_cache_hit_ratio); + + if(d->bcache_filename_stats_hour_cache_hit_ratio) + read_single_number_file(d->bcache_filename_stats_hour_cache_hit_ratio, &stats_hour_cache_hit_ratio); + + if(d->bcache_filename_stats_day_cache_hit_ratio) + read_single_number_file(d->bcache_filename_stats_day_cache_hit_ratio, &stats_day_cache_hit_ratio); + + if(d->bcache_filename_stats_total_cache_hit_ratio) + read_single_number_file(d->bcache_filename_stats_total_cache_hit_ratio, &stats_total_cache_hit_ratio); + + if(d->bcache_filename_stats_total_cache_hits) + read_single_number_file(d->bcache_filename_stats_total_cache_hits, &stats_total_cache_hits); + + if(d->bcache_filename_stats_total_cache_misses) + read_single_number_file(d->bcache_filename_stats_total_cache_misses, &stats_total_cache_misses); + + if(d->bcache_filename_stats_total_cache_miss_collisions) + read_single_number_file(d->bcache_filename_stats_total_cache_miss_collisions, &stats_total_cache_miss_collisions); + + if(d->bcache_filename_stats_total_cache_bypass_hits) + read_single_number_file(d->bcache_filename_stats_total_cache_bypass_hits, &stats_total_cache_bypass_hits); + + if(d->bcache_filename_stats_total_cache_bypass_misses) + read_single_number_file(d->bcache_filename_stats_total_cache_bypass_misses, &stats_total_cache_bypass_misses); + + if(d->bcache_filename_stats_total_cache_readaheads) + cache_readaheads = bcache_read_number_with_units(d->bcache_filename_stats_total_cache_readaheads); + + if(d->bcache_filename_cache_read_races) + read_single_number_file(d->bcache_filename_cache_read_races, &cache_read_races); + + if(d->bcache_filename_cache_io_errors) + read_single_number_file(d->bcache_filename_cache_io_errors, &cache_io_errors); + + if(d->bcache_filename_priority_stats && global_bcache_priority_stats_update_every >= 1) + bcache_read_priority_stats(d, family, global_bcache_priority_stats_update_every, dt); + + // update the charts + + { + if(unlikely(!d->st_bcache_hit_ratio)) { + d->st_bcache_hit_ratio = rrdset_create_localhost( + "disk_bcache_hit_ratio" + , d->device + , d->disk + , family + , "disk.bcache_hit_ratio" + , "BCache Cache Hit Ratio" + , "percentage" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_DISKSTATS_NAME + , NETDATA_CHART_PRIO_BCACHE_HIT_RATIO + , update_every + , RRDSET_TYPE_LINE + ); + + d->rd_bcache_hit_ratio_5min = rrddim_add(d->st_bcache_hit_ratio, "5min", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + d->rd_bcache_hit_ratio_1hour = rrddim_add(d->st_bcache_hit_ratio, "1hour", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + d->rd_bcache_hit_ratio_1day = rrddim_add(d->st_bcache_hit_ratio, "1day", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + d->rd_bcache_hit_ratio_total = rrddim_add(d->st_bcache_hit_ratio, "ever", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(d->st_bcache_hit_ratio); + + rrddim_set_by_pointer(d->st_bcache_hit_ratio, d->rd_bcache_hit_ratio_5min, stats_five_minute_cache_hit_ratio); + rrddim_set_by_pointer(d->st_bcache_hit_ratio, d->rd_bcache_hit_ratio_1hour, stats_hour_cache_hit_ratio); + rrddim_set_by_pointer(d->st_bcache_hit_ratio, d->rd_bcache_hit_ratio_1day, stats_day_cache_hit_ratio); + rrddim_set_by_pointer(d->st_bcache_hit_ratio, d->rd_bcache_hit_ratio_total, stats_total_cache_hit_ratio); + rrdset_done(d->st_bcache_hit_ratio); + } + + { + + if(unlikely(!d->st_bcache_rates)) { + d->st_bcache_rates = rrdset_create_localhost( + "disk_bcache_rates" + , d->device + , d->disk + , family + , "disk.bcache_rates" + , "BCache Rates" + , "KB/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_DISKSTATS_NAME + , NETDATA_CHART_PRIO_BCACHE_RATES + , update_every + , RRDSET_TYPE_AREA + ); + + d->rd_bcache_rate_congested = rrddim_add(d->st_bcache_rates, "congested", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + d->rd_bcache_rate_writeback = rrddim_add(d->st_bcache_rates, "writeback", NULL, -1, 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(d->st_bcache_rates); + + rrddim_set_by_pointer(d->st_bcache_rates, d->rd_bcache_rate_writeback, writeback_rate); + rrddim_set_by_pointer(d->st_bcache_rates, d->rd_bcache_rate_congested, cache_congested); + rrdset_done(d->st_bcache_rates); + } + + { + if(unlikely(!d->st_bcache_size)) { + d->st_bcache_size = rrdset_create_localhost( + "disk_bcache_size" + , d->device + , d->disk + , family + , "disk.bcache_size" + , "BCache Cache Sizes" + , "MB" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_DISKSTATS_NAME + , NETDATA_CHART_PRIO_BCACHE_SIZE + , update_every + , RRDSET_TYPE_AREA + ); + + d->rd_bcache_dirty_size = rrddim_add(d->st_bcache_size, "dirty", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(d->st_bcache_size); + + rrddim_set_by_pointer(d->st_bcache_size, d->rd_bcache_dirty_size, dirty_data); + rrdset_done(d->st_bcache_size); + } + + { + if(unlikely(!d->st_bcache_usage)) { + d->st_bcache_usage = rrdset_create_localhost( + "disk_bcache_usage" + , d->device + , d->disk + , family + , "disk.bcache_usage" + , "BCache Cache Usage" + , "percent" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_DISKSTATS_NAME + , NETDATA_CHART_PRIO_BCACHE_USAGE + , update_every + , RRDSET_TYPE_AREA + ); + + d->rd_bcache_available_percent = rrddim_add(d->st_bcache_usage, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(d->st_bcache_usage); + + rrddim_set_by_pointer(d->st_bcache_usage, d->rd_bcache_available_percent, cache_available_percent); + rrdset_done(d->st_bcache_usage); + } + + { + + if(unlikely(!d->st_bcache_cache_read_races)) { + d->st_bcache_cache_read_races = rrdset_create_localhost( + "disk_bcache_cache_read_races" + , d->device + , d->disk + , family + , "disk.bcache_cache_read_races" + , "BCache Cache Read Races" + , "operations/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_DISKSTATS_NAME + , NETDATA_CHART_PRIO_BCACHE_CACHE_READ_RACES + , update_every + , RRDSET_TYPE_LINE + ); + + d->rd_bcache_cache_read_races = rrddim_add(d->st_bcache_cache_read_races, "races", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + d->rd_bcache_cache_io_errors = rrddim_add(d->st_bcache_cache_read_races, "errors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(d->st_bcache_cache_read_races); + + rrddim_set_by_pointer(d->st_bcache_cache_read_races, d->rd_bcache_cache_read_races, cache_read_races); + rrddim_set_by_pointer(d->st_bcache_cache_read_races, d->rd_bcache_cache_io_errors, cache_io_errors); + rrdset_done(d->st_bcache_cache_read_races); + } + + if(d->do_bcache == CONFIG_BOOLEAN_YES || (d->do_bcache == CONFIG_BOOLEAN_AUTO && (stats_total_cache_hits != 0 || stats_total_cache_misses != 0 || stats_total_cache_miss_collisions != 0))) { + + if(unlikely(!d->st_bcache)) { + d->st_bcache = rrdset_create_localhost( + "disk_bcache" + , d->device + , d->disk + , family + , "disk.bcache" + , "BCache Cache I/O Operations" + , "operations/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_DISKSTATS_NAME + , NETDATA_CHART_PRIO_BCACHE_OPS + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(d->st_bcache, RRDSET_FLAG_DETAIL); + + d->rd_bcache_hits = rrddim_add(d->st_bcache, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + d->rd_bcache_misses = rrddim_add(d->st_bcache, "misses", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + d->rd_bcache_miss_collisions = rrddim_add(d->st_bcache, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + d->rd_bcache_readaheads = rrddim_add(d->st_bcache, "readaheads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(d->st_bcache); + + rrddim_set_by_pointer(d->st_bcache, d->rd_bcache_hits, stats_total_cache_hits); + rrddim_set_by_pointer(d->st_bcache, d->rd_bcache_misses, stats_total_cache_misses); + rrddim_set_by_pointer(d->st_bcache, d->rd_bcache_miss_collisions, stats_total_cache_miss_collisions); + rrddim_set_by_pointer(d->st_bcache, d->rd_bcache_readaheads, cache_readaheads); + rrdset_done(d->st_bcache); + } + + if(d->do_bcache == CONFIG_BOOLEAN_YES || (d->do_bcache == CONFIG_BOOLEAN_AUTO && (stats_total_cache_bypass_hits != 0 || stats_total_cache_bypass_misses != 0))) { + + if(unlikely(!d->st_bcache_bypass)) { + d->st_bcache_bypass = rrdset_create_localhost( + "disk_bcache_bypass" + , d->device + , d->disk + , family + , "disk.bcache_bypass" + , "BCache Cache Bypass I/O Operations" + , "operations/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_DISKSTATS_NAME + , NETDATA_CHART_PRIO_BCACHE_BYPASS + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(d->st_bcache_bypass, RRDSET_FLAG_DETAIL); + + d->rd_bcache_bypass_hits = rrddim_add(d->st_bcache_bypass, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + d->rd_bcache_bypass_misses = rrddim_add(d->st_bcache_bypass, "misses", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(d->st_bcache_bypass); + + rrddim_set_by_pointer(d->st_bcache_bypass, d->rd_bcache_bypass_hits, stats_total_cache_bypass_hits); + rrddim_set_by_pointer(d->st_bcache_bypass, d->rd_bcache_bypass_misses, stats_total_cache_bypass_misses); + rrdset_done(d->st_bcache_bypass); + } + } + } + + + // ------------------------------------------------------------------------ + // update the system total I/O + + if(global_do_io == CONFIG_BOOLEAN_YES || (global_do_io == CONFIG_BOOLEAN_AUTO && (system_read_kb || system_write_kb))) { + static RRDSET *st_io = NULL; + static RRDDIM *rd_in = NULL, *rd_out = NULL; + + if(unlikely(!st_io)) { + st_io = rrdset_create_localhost( + "system" + , "io" + , NULL + , "disk" + , NULL + , "Disk I/O" + , "kilobytes/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_DISKSTATS_NAME + , NETDATA_CHART_PRIO_SYSTEM_IO + , update_every + , RRDSET_TYPE_AREA + ); + + rd_in = rrddim_add(st_io, "in", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out = rrddim_add(st_io, "out", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st_io); + + rrddim_set_by_pointer(st_io, rd_in, system_read_kb); + rrddim_set_by_pointer(st_io, rd_out, system_write_kb); + rrdset_done(st_io); + } + + + // ------------------------------------------------------------------------ + // cleanup removed disks + + struct disk *d = disk_root, *last = NULL; + while(d) { + if(unlikely(global_cleanup_removed_disks && !d->updated)) { + struct disk *t = d; + + rrdset_obsolete_and_pointer_null(d->st_avgsz); + rrdset_obsolete_and_pointer_null(d->st_await); + rrdset_obsolete_and_pointer_null(d->st_backlog); + rrdset_obsolete_and_pointer_null(d->st_io); + rrdset_obsolete_and_pointer_null(d->st_iotime); + rrdset_obsolete_and_pointer_null(d->st_mops); + rrdset_obsolete_and_pointer_null(d->st_ops); + rrdset_obsolete_and_pointer_null(d->st_qops); + rrdset_obsolete_and_pointer_null(d->st_svctm); + rrdset_obsolete_and_pointer_null(d->st_util); + rrdset_obsolete_and_pointer_null(d->st_bcache); + rrdset_obsolete_and_pointer_null(d->st_bcache_bypass); + rrdset_obsolete_and_pointer_null(d->st_bcache_rates); + rrdset_obsolete_and_pointer_null(d->st_bcache_size); + rrdset_obsolete_and_pointer_null(d->st_bcache_usage); + rrdset_obsolete_and_pointer_null(d->st_bcache_hit_ratio); + + if(d == disk_root) { + disk_root = d = d->next; + last = NULL; + } + else if(last) { + last->next = d = d->next; + } + + freez(t->bcache_filename_dirty_data); + freez(t->bcache_filename_writeback_rate); + freez(t->bcache_filename_cache_congested); + freez(t->bcache_filename_cache_available_percent); + freez(t->bcache_filename_stats_five_minute_cache_hit_ratio); + freez(t->bcache_filename_stats_hour_cache_hit_ratio); + freez(t->bcache_filename_stats_day_cache_hit_ratio); + freez(t->bcache_filename_stats_total_cache_hit_ratio); + freez(t->bcache_filename_stats_total_cache_hits); + freez(t->bcache_filename_stats_total_cache_misses); + freez(t->bcache_filename_stats_total_cache_miss_collisions); + freez(t->bcache_filename_stats_total_cache_bypass_hits); + freez(t->bcache_filename_stats_total_cache_bypass_misses); + freez(t->bcache_filename_stats_total_cache_readaheads); + freez(t->bcache_filename_cache_read_races); + freez(t->bcache_filename_cache_io_errors); + freez(t->bcache_filename_priority_stats); + + freez(t->disk); + freez(t->device); + freez(t->mount_point); + freez(t); + } + else { + d->updated = 0; + last = d; + d = d->next; + } + } + + return 0; +} diff --git a/collectors/proc.plugin/proc_interrupts.c b/collectors/proc.plugin/proc_interrupts.c new file mode 100644 index 000000000..73b117179 --- /dev/null +++ b/collectors/proc.plugin/proc_interrupts.c @@ -0,0 +1,248 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define PLUGIN_PROC_MODULE_INTERRUPTS_NAME "/proc/interrupts" +#define CONFIG_SECTION_PLUGIN_PROC_INTERRUPTS "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_INTERRUPTS_NAME + +#define MAX_INTERRUPT_NAME 50 + +struct cpu_interrupt { + unsigned long long value; + RRDDIM *rd; +}; + +struct interrupt { + int used; + char *id; + char name[MAX_INTERRUPT_NAME + 1]; + RRDDIM *rd; + unsigned long long total; + struct cpu_interrupt cpu[]; +}; + +// since each interrupt is variable in size +// we use this to calculate its record size +#define recordsize(cpus) (sizeof(struct interrupt) + ((cpus) * sizeof(struct cpu_interrupt))) + +// given a base, get a pointer to each record +#define irrindex(base, line, cpus) ((struct interrupt *)&((char *)(base))[(line) * recordsize(cpus)]) + +static inline struct interrupt *get_interrupts_array(size_t lines, int cpus) { + static struct interrupt *irrs = NULL; + static size_t allocated = 0; + + if(unlikely(lines != allocated)) { + size_t l; + int c; + + irrs = (struct interrupt *)reallocz(irrs, lines * recordsize(cpus)); + + // reset all interrupt RRDDIM pointers as any line could have shifted + for(l = 0; l < lines ;l++) { + struct interrupt *irr = irrindex(irrs, l, cpus); + irr->rd = NULL; + irr->name[0] = '\0'; + for(c = 0; c < cpus ;c++) + irr->cpu[c].rd = NULL; + } + + allocated = lines; + } + + return irrs; +} + +int do_proc_interrupts(int update_every, usec_t dt) { + (void)dt; + static procfile *ff = NULL; + static int cpus = -1, do_per_core = CONFIG_BOOLEAN_INVALID; + struct interrupt *irrs = NULL; + + if(unlikely(do_per_core == CONFIG_BOOLEAN_INVALID)) + do_per_core = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_INTERRUPTS, "interrupts per core", CONFIG_BOOLEAN_AUTO); + + if(unlikely(!ff)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/interrupts"); + ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_INTERRUPTS, "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT); + } + if(unlikely(!ff)) + return 1; + + ff = procfile_readall(ff); + if(unlikely(!ff)) + return 0; // we return 0, so that we will retry to open it next time + + size_t lines = procfile_lines(ff), l; + size_t words = procfile_linewords(ff, 0); + + if(unlikely(!lines)) { + error("Cannot read /proc/interrupts, zero lines reported."); + return 1; + } + + // find how many CPUs are there + if(unlikely(cpus == -1)) { + uint32_t w; + cpus = 0; + for(w = 0; w < words ; w++) { + if(likely(strncmp(procfile_lineword(ff, 0, w), "CPU", 3) == 0)) + cpus++; + } + } + + if(unlikely(!cpus)) { + error("PLUGIN: PROC_INTERRUPTS: Cannot find the number of CPUs in /proc/interrupts"); + return 1; + } + + // allocate the size we need; + irrs = get_interrupts_array(lines, cpus); + irrs[0].used = 0; + + // loop through all lines + for(l = 1; l < lines ;l++) { + struct interrupt *irr = irrindex(irrs, l, cpus); + irr->used = 0; + irr->total = 0; + + words = procfile_linewords(ff, l); + if(unlikely(!words)) continue; + + irr->id = procfile_lineword(ff, l, 0); + if(unlikely(!irr->id || !irr->id[0])) continue; + + size_t idlen = strlen(irr->id); + if(irr->id[idlen - 1] == ':') + irr->id[--idlen] = '\0'; + + int c; + for(c = 0; c < cpus ;c++) { + if(likely((c + 1) < (int)words)) + irr->cpu[c].value = str2ull(procfile_lineword(ff, l, (uint32_t)(c + 1))); + else + irr->cpu[c].value = 0; + + irr->total += irr->cpu[c].value; + } + + if(unlikely(isdigit(irr->id[0]) && (uint32_t)(cpus + 2) < words)) { + strncpyz(irr->name, procfile_lineword(ff, l, words - 1), MAX_INTERRUPT_NAME); + size_t nlen = strlen(irr->name); + if(likely(nlen + 1 + idlen <= MAX_INTERRUPT_NAME)) { + irr->name[nlen] = '_'; + strncpyz(&irr->name[nlen + 1], irr->id, MAX_INTERRUPT_NAME - nlen - 1); + } + else { + irr->name[MAX_INTERRUPT_NAME - idlen - 1] = '_'; + strncpyz(&irr->name[MAX_INTERRUPT_NAME - idlen], irr->id, idlen); + } + } + else { + strncpyz(irr->name, irr->id, MAX_INTERRUPT_NAME); + } + + irr->used = 1; + } + + // -------------------------------------------------------------------- + + static RRDSET *st_system_interrupts = NULL; + if(unlikely(!st_system_interrupts)) + st_system_interrupts = rrdset_create_localhost( + "system" + , "interrupts" + , NULL + , "interrupts" + , NULL + , "System interrupts" + , "interrupts/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_INTERRUPTS_NAME + , NETDATA_CHART_PRIO_SYSTEM_INTERRUPTS + , update_every + , RRDSET_TYPE_STACKED + ); + else + rrdset_next(st_system_interrupts); + + for(l = 0; l < lines ;l++) { + struct interrupt *irr = irrindex(irrs, l, cpus); + if(irr->used && irr->total) { + // some interrupt may have changed without changing the total number of lines + // if the same number of interrupts have been added and removed between two + // calls of this function. + if(unlikely(!irr->rd || strncmp(irr->rd->name, irr->name, MAX_INTERRUPT_NAME) != 0)) { + irr->rd = rrddim_add(st_system_interrupts, irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_set_name(st_system_interrupts, irr->rd, irr->name); + + // also reset per cpu RRDDIMs to avoid repeating strncmp() in the per core loop + if(likely(do_per_core != CONFIG_BOOLEAN_NO)) { + int c; + for(c = 0; c < cpus; c++) irr->cpu[c].rd = NULL; + } + } + + rrddim_set_by_pointer(st_system_interrupts, irr->rd, irr->total); + } + } + + rrdset_done(st_system_interrupts); + + // -------------------------------------------------------------------- + + if(likely(do_per_core != CONFIG_BOOLEAN_NO)) { + static RRDSET **core_st = NULL; + static int old_cpus = 0; + + if(old_cpus < cpus) { + core_st = reallocz(core_st, sizeof(RRDSET *) * cpus); + memset(&core_st[old_cpus], 0, sizeof(RRDSET *) * (cpus - old_cpus)); + old_cpus = cpus; + } + + int c; + + for(c = 0; c < cpus ;c++) { + if(unlikely(!core_st[c])) { + char id[50+1]; + snprintfz(id, 50, "cpu%d_interrupts", c); + + char title[100+1]; + snprintfz(title, 100, "CPU%d Interrupts", c); + core_st[c] = rrdset_create_localhost( + "cpu" + , id + , NULL + , "interrupts" + , "cpu.interrupts" + , title + , "interrupts/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_INTERRUPTS_NAME + , NETDATA_CHART_PRIO_INTERRUPTS_PER_CORE + c + , update_every + , RRDSET_TYPE_STACKED + ); + } + else rrdset_next(core_st[c]); + + for(l = 0; l < lines ;l++) { + struct interrupt *irr = irrindex(irrs, l, cpus); + if(irr->used && (do_per_core == CONFIG_BOOLEAN_YES || irr->cpu[c].value)) { + if(unlikely(!irr->cpu[c].rd)) { + irr->cpu[c].rd = rrddim_add(core_st[c], irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_set_name(core_st[c], irr->cpu[c].rd, irr->name); + } + + rrddim_set_by_pointer(core_st[c], irr->cpu[c].rd, irr->cpu[c].value); + } + } + + rrdset_done(core_st[c]); + } + } + + return 0; +} diff --git a/collectors/proc.plugin/proc_loadavg.c b/collectors/proc.plugin/proc_loadavg.c new file mode 100644 index 000000000..db95b1689 --- /dev/null +++ b/collectors/proc.plugin/proc_loadavg.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define PLUGIN_PROC_MODULE_LOADAVG_NAME "/proc/loadavg" +#define CONFIG_SECTION_PLUGIN_PROC_LOADAVG "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_LOADAVG_NAME + +// linux calculates this once every 5 seconds +#define MIN_LOADAVG_UPDATE_EVERY 5 + +int do_proc_loadavg(int update_every, usec_t dt) { + static procfile *ff = NULL; + static int do_loadavg = -1, do_all_processes = -1; + static usec_t next_loadavg_dt = 0; + + if(unlikely(!ff)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/loadavg"); + + ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_LOADAVG, "filename to monitor", filename), " \t,:|/", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) + return 1; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) + return 0; // we return 0, so that we will retry to open it next time + + if(unlikely(do_loadavg == -1)) { + do_loadavg = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_LOADAVG, "enable load average", 1); + do_all_processes = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_LOADAVG, "enable total processes", 1); + } + + if(unlikely(procfile_lines(ff) < 1)) { + error("/proc/loadavg has no lines."); + return 1; + } + if(unlikely(procfile_linewords(ff, 0) < 6)) { + error("/proc/loadavg has less than 6 words in it."); + return 1; + } + + double load1 = strtod(procfile_lineword(ff, 0, 0), NULL); + double load5 = strtod(procfile_lineword(ff, 0, 1), NULL); + double load15 = strtod(procfile_lineword(ff, 0, 2), NULL); + + //unsigned long long running_processes = str2ull(procfile_lineword(ff, 0, 3)); + unsigned long long active_processes = str2ull(procfile_lineword(ff, 0, 4)); + //unsigned long long next_pid = str2ull(procfile_lineword(ff, 0, 5)); + + + // -------------------------------------------------------------------- + + if(next_loadavg_dt <= dt) { + if(likely(do_loadavg)) { + static RRDSET *load_chart = NULL; + static RRDDIM *rd_load1 = NULL, *rd_load5 = NULL, *rd_load15 = NULL; + + if(unlikely(!load_chart)) { + load_chart = rrdset_create_localhost( + "system" + , "load" + , NULL + , "load" + , NULL + , "System Load Average" + , "load" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_LOADAVG_NAME + , NETDATA_CHART_PRIO_SYSTEM_LOAD + , (update_every < MIN_LOADAVG_UPDATE_EVERY) ? MIN_LOADAVG_UPDATE_EVERY : update_every + , RRDSET_TYPE_LINE + ); + + rd_load1 = rrddim_add(load_chart, "load1", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + rd_load5 = rrddim_add(load_chart, "load5", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + rd_load15 = rrddim_add(load_chart, "load15", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + } + else + rrdset_next(load_chart); + + rrddim_set_by_pointer(load_chart, rd_load1, (collected_number) (load1 * 1000)); + rrddim_set_by_pointer(load_chart, rd_load5, (collected_number) (load5 * 1000)); + rrddim_set_by_pointer(load_chart, rd_load15, (collected_number) (load15 * 1000)); + rrdset_done(load_chart); + + next_loadavg_dt = load_chart->update_every * USEC_PER_SEC; + } + else next_loadavg_dt = MIN_LOADAVG_UPDATE_EVERY * USEC_PER_SEC; + } + else next_loadavg_dt -= dt; + + // -------------------------------------------------------------------- + + if(likely(do_all_processes)) { + static RRDSET *processes_chart = NULL; + static RRDDIM *rd_active = NULL; + + if(unlikely(!processes_chart)) { + processes_chart = rrdset_create_localhost( + "system" + , "active_processes" + , NULL + , "processes" + , NULL + , "System Active Processes" + , "processes" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_LOADAVG_NAME + , NETDATA_CHART_PRIO_SYSTEM_ACTIVE_PROCESSES + , update_every + , RRDSET_TYPE_LINE + ); + + rd_active = rrddim_add(processes_chart, "active", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(processes_chart); + + rrddim_set_by_pointer(processes_chart, rd_active, active_processes); + rrdset_done(processes_chart); + } + + return 0; +} diff --git a/collectors/proc.plugin/proc_meminfo.c b/collectors/proc.plugin/proc_meminfo.c new file mode 100644 index 000000000..f77159ebd --- /dev/null +++ b/collectors/proc.plugin/proc_meminfo.c @@ -0,0 +1,519 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define PLUGIN_PROC_MODULE_MEMINFO_NAME "/proc/meminfo" +#define CONFIG_SECTION_PLUGIN_PROC_MEMINFO "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_MEMINFO_NAME + +int do_proc_meminfo(int update_every, usec_t dt) { + (void)dt; + + static procfile *ff = NULL; + static int do_ram = -1, do_swap = -1, do_hwcorrupt = -1, do_committed = -1, do_writeback = -1, do_kernel = -1, do_slab = -1, do_hugepages = -1, do_transparent_hugepages = -1; + + static ARL_BASE *arl_base = NULL; + static ARL_ENTRY *arl_hwcorrupted = NULL, *arl_memavailable = NULL; + + static unsigned long long + MemTotal = 0, + MemFree = 0, + MemAvailable = 0, + Buffers = 0, + Cached = 0, + //SwapCached = 0, + //Active = 0, + //Inactive = 0, + //ActiveAnon = 0, + //InactiveAnon = 0, + //ActiveFile = 0, + //InactiveFile = 0, + //Unevictable = 0, + //Mlocked = 0, + SwapTotal = 0, + SwapFree = 0, + Dirty = 0, + Writeback = 0, + //AnonPages = 0, + //Mapped = 0, + //Shmem = 0, + Slab = 0, + SReclaimable = 0, + SUnreclaim = 0, + KernelStack = 0, + PageTables = 0, + NFS_Unstable = 0, + Bounce = 0, + WritebackTmp = 0, + //CommitLimit = 0, + Committed_AS = 0, + //VmallocTotal = 0, + VmallocUsed = 0, + //VmallocChunk = 0, + AnonHugePages = 0, + ShmemHugePages = 0, + HugePages_Total = 0, + HugePages_Free = 0, + HugePages_Rsvd = 0, + HugePages_Surp = 0, + Hugepagesize = 0, + //DirectMap4k = 0, + //DirectMap2M = 0, + HardwareCorrupted = 0; + + if(unlikely(!arl_base)) { + do_ram = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "system ram", 1); + do_swap = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "system swap", CONFIG_BOOLEAN_AUTO); + do_hwcorrupt = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "hardware corrupted ECC", CONFIG_BOOLEAN_AUTO); + do_committed = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "committed memory", 1); + do_writeback = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "writeback memory", 1); + do_kernel = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "kernel memory", 1); + do_slab = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "slab memory", 1); + do_hugepages = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "hugepages", CONFIG_BOOLEAN_AUTO); + do_transparent_hugepages = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "transparent hugepages", CONFIG_BOOLEAN_AUTO); + + arl_base = arl_create("meminfo", NULL, 60); + arl_expect(arl_base, "MemTotal", &MemTotal); + arl_expect(arl_base, "MemFree", &MemFree); + arl_memavailable = arl_expect(arl_base, "MemAvailable", &MemAvailable); + arl_expect(arl_base, "Buffers", &Buffers); + arl_expect(arl_base, "Cached", &Cached); + //arl_expect(arl_base, "SwapCached", &SwapCached); + //arl_expect(arl_base, "Active", &Active); + //arl_expect(arl_base, "Inactive", &Inactive); + //arl_expect(arl_base, "ActiveAnon", &ActiveAnon); + //arl_expect(arl_base, "InactiveAnon", &InactiveAnon); + //arl_expect(arl_base, "ActiveFile", &ActiveFile); + //arl_expect(arl_base, "InactiveFile", &InactiveFile); + //arl_expect(arl_base, "Unevictable", &Unevictable); + //arl_expect(arl_base, "Mlocked", &Mlocked); + arl_expect(arl_base, "SwapTotal", &SwapTotal); + arl_expect(arl_base, "SwapFree", &SwapFree); + arl_expect(arl_base, "Dirty", &Dirty); + arl_expect(arl_base, "Writeback", &Writeback); + //arl_expect(arl_base, "AnonPages", &AnonPages); + //arl_expect(arl_base, "Mapped", &Mapped); + //arl_expect(arl_base, "Shmem", &Shmem); + arl_expect(arl_base, "Slab", &Slab); + arl_expect(arl_base, "SReclaimable", &SReclaimable); + arl_expect(arl_base, "SUnreclaim", &SUnreclaim); + arl_expect(arl_base, "KernelStack", &KernelStack); + arl_expect(arl_base, "PageTables", &PageTables); + arl_expect(arl_base, "NFS_Unstable", &NFS_Unstable); + arl_expect(arl_base, "Bounce", &Bounce); + arl_expect(arl_base, "WritebackTmp", &WritebackTmp); + //arl_expect(arl_base, "CommitLimit", &CommitLimit); + arl_expect(arl_base, "Committed_AS", &Committed_AS); + //arl_expect(arl_base, "VmallocTotal", &VmallocTotal); + arl_expect(arl_base, "VmallocUsed", &VmallocUsed); + //arl_expect(arl_base, "VmallocChunk", &VmallocChunk); + arl_hwcorrupted = arl_expect(arl_base, "HardwareCorrupted", &HardwareCorrupted); + arl_expect(arl_base, "AnonHugePages", &AnonHugePages); + arl_expect(arl_base, "ShmemHugePages", &ShmemHugePages); + arl_expect(arl_base, "HugePages_Total", &HugePages_Total); + arl_expect(arl_base, "HugePages_Free", &HugePages_Free); + arl_expect(arl_base, "HugePages_Rsvd", &HugePages_Rsvd); + arl_expect(arl_base, "HugePages_Surp", &HugePages_Surp); + arl_expect(arl_base, "Hugepagesize", &Hugepagesize); + //arl_expect(arl_base, "DirectMap4k", &DirectMap4k); + //arl_expect(arl_base, "DirectMap2M", &DirectMap2M); + } + + if(unlikely(!ff)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/meminfo"); + ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) + return 1; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) + return 0; // we return 0, so that we will retry to open it next time + + size_t lines = procfile_lines(ff), l; + + arl_begin(arl_base); + + for(l = 0; l < lines ;l++) { + size_t words = procfile_linewords(ff, l); + if(unlikely(words < 2)) continue; + + if(unlikely(arl_check(arl_base, + procfile_lineword(ff, l, 0), + procfile_lineword(ff, l, 1)))) break; + } + + // -------------------------------------------------------------------- + + // http://stackoverflow.com/questions/3019748/how-to-reliably-measure-available-memory-in-linux + unsigned long long MemCached = Cached + Slab; + unsigned long long MemUsed = MemTotal - MemFree - MemCached - Buffers; + + if(do_ram) { + { + static RRDSET *st_system_ram = NULL; + static RRDDIM *rd_free = NULL, *rd_used = NULL, *rd_cached = NULL, *rd_buffers = NULL; + + if(unlikely(!st_system_ram)) { + st_system_ram = rrdset_create_localhost( + "system" + , "ram" + , NULL + , "ram" + , NULL + , "System RAM" + , "MB" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_MEMINFO_NAME + , NETDATA_CHART_PRIO_SYSTEM_RAM + , update_every + , RRDSET_TYPE_STACKED + ); + + rd_free = rrddim_add(st_system_ram, "free", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + rd_used = rrddim_add(st_system_ram, "used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + rd_cached = rrddim_add(st_system_ram, "cached", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + rd_buffers = rrddim_add(st_system_ram, "buffers", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st_system_ram); + + rrddim_set_by_pointer(st_system_ram, rd_free, MemFree); + rrddim_set_by_pointer(st_system_ram, rd_used, MemUsed); + rrddim_set_by_pointer(st_system_ram, rd_cached, MemCached); + rrddim_set_by_pointer(st_system_ram, rd_buffers, Buffers); + + rrdset_done(st_system_ram); + } + + if(arl_memavailable->flags & ARL_ENTRY_FLAG_FOUND) { + static RRDSET *st_mem_available = NULL; + static RRDDIM *rd_avail = NULL; + + if(unlikely(!st_mem_available)) { + st_mem_available = rrdset_create_localhost( + "mem" + , "available" + , NULL + , "system" + , NULL + , "Available RAM for applications" + , "MB" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_MEMINFO_NAME + , NETDATA_CHART_PRIO_MEM_SYSTEM_AVAILABLE + , update_every + , RRDSET_TYPE_AREA + ); + + rd_avail = rrddim_add(st_mem_available, "MemAvailable", "avail", 1, 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st_mem_available); + + rrddim_set_by_pointer(st_mem_available, rd_avail, MemAvailable); + + rrdset_done(st_mem_available); + } + } + + // -------------------------------------------------------------------- + + unsigned long long SwapUsed = SwapTotal - SwapFree; + + if(do_swap == CONFIG_BOOLEAN_YES || SwapTotal || SwapUsed || SwapFree) { + do_swap = CONFIG_BOOLEAN_YES; + + static RRDSET *st_system_swap = NULL; + static RRDDIM *rd_free = NULL, *rd_used = NULL; + + if(unlikely(!st_system_swap)) { + st_system_swap = rrdset_create_localhost( + "system" + , "swap" + , NULL + , "swap" + , NULL + , "System Swap" + , "MB" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_MEMINFO_NAME + , NETDATA_CHART_PRIO_SYSTEM_SWAP + , update_every + , RRDSET_TYPE_STACKED + ); + + rrdset_flag_set(st_system_swap, RRDSET_FLAG_DETAIL); + + rd_free = rrddim_add(st_system_swap, "free", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + rd_used = rrddim_add(st_system_swap, "used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st_system_swap); + + rrddim_set_by_pointer(st_system_swap, rd_used, SwapUsed); + rrddim_set_by_pointer(st_system_swap, rd_free, SwapFree); + + rrdset_done(st_system_swap); + } + + // -------------------------------------------------------------------- + + if(arl_hwcorrupted->flags & ARL_ENTRY_FLAG_FOUND && (do_hwcorrupt == CONFIG_BOOLEAN_YES || (do_hwcorrupt == CONFIG_BOOLEAN_AUTO && HardwareCorrupted > 0))) { + do_hwcorrupt = CONFIG_BOOLEAN_YES; + + static RRDSET *st_mem_hwcorrupt = NULL; + static RRDDIM *rd_corrupted = NULL; + + if(unlikely(!st_mem_hwcorrupt)) { + st_mem_hwcorrupt = rrdset_create_localhost( + "mem" + , "hwcorrupt" + , NULL + , "ecc" + , NULL + , "Corrupted Memory, detected by ECC" + , "MB" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_MEMINFO_NAME + , NETDATA_CHART_PRIO_MEM_HW + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(st_mem_hwcorrupt, RRDSET_FLAG_DETAIL); + + rd_corrupted = rrddim_add(st_mem_hwcorrupt, "HardwareCorrupted", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st_mem_hwcorrupt); + + rrddim_set_by_pointer(st_mem_hwcorrupt, rd_corrupted, HardwareCorrupted); + + rrdset_done(st_mem_hwcorrupt); + } + + // -------------------------------------------------------------------- + + if(do_committed) { + static RRDSET *st_mem_committed = NULL; + static RRDDIM *rd_committed = NULL; + + if(unlikely(!st_mem_committed)) { + st_mem_committed = rrdset_create_localhost( + "mem" + , "committed" + , NULL + , "system" + , NULL + , "Committed (Allocated) Memory" + , "MB" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_MEMINFO_NAME + , NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED + , update_every + , RRDSET_TYPE_AREA + ); + + rrdset_flag_set(st_mem_committed, RRDSET_FLAG_DETAIL); + + rd_committed = rrddim_add(st_mem_committed, "Committed_AS", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st_mem_committed); + + rrddim_set_by_pointer(st_mem_committed, rd_committed, Committed_AS); + + rrdset_done(st_mem_committed); + } + + // -------------------------------------------------------------------- + + if(do_writeback) { + static RRDSET *st_mem_writeback = NULL; + static RRDDIM *rd_dirty = NULL, *rd_writeback = NULL, *rd_fusewriteback = NULL, *rd_nfs_writeback = NULL, *rd_bounce = NULL; + + if(unlikely(!st_mem_writeback)) { + st_mem_writeback = rrdset_create_localhost( + "mem" + , "writeback" + , NULL + , "kernel" + , NULL + , "Writeback Memory" + , "MB" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_MEMINFO_NAME + , NETDATA_CHART_PRIO_MEM_KERNEL + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st_mem_writeback, RRDSET_FLAG_DETAIL); + + rd_dirty = rrddim_add(st_mem_writeback, "Dirty", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + rd_writeback = rrddim_add(st_mem_writeback, "Writeback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + rd_fusewriteback = rrddim_add(st_mem_writeback, "FuseWriteback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + rd_nfs_writeback = rrddim_add(st_mem_writeback, "NfsWriteback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + rd_bounce = rrddim_add(st_mem_writeback, "Bounce", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st_mem_writeback); + + rrddim_set_by_pointer(st_mem_writeback, rd_dirty, Dirty); + rrddim_set_by_pointer(st_mem_writeback, rd_writeback, Writeback); + rrddim_set_by_pointer(st_mem_writeback, rd_fusewriteback, WritebackTmp); + rrddim_set_by_pointer(st_mem_writeback, rd_nfs_writeback, NFS_Unstable); + rrddim_set_by_pointer(st_mem_writeback, rd_bounce, Bounce); + + rrdset_done(st_mem_writeback); + } + + // -------------------------------------------------------------------- + + if(do_kernel) { + static RRDSET *st_mem_kernel = NULL; + static RRDDIM *rd_slab = NULL, *rd_kernelstack = NULL, *rd_pagetables = NULL, *rd_vmallocused = NULL; + + if(unlikely(!st_mem_kernel)) { + st_mem_kernel = rrdset_create_localhost( + "mem" + , "kernel" + , NULL + , "kernel" + , NULL + , "Memory Used by Kernel" + , "MB" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_MEMINFO_NAME + , NETDATA_CHART_PRIO_MEM_KERNEL + 1 + , update_every + , RRDSET_TYPE_STACKED + ); + + rrdset_flag_set(st_mem_kernel, RRDSET_FLAG_DETAIL); + + rd_slab = rrddim_add(st_mem_kernel, "Slab", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + rd_kernelstack = rrddim_add(st_mem_kernel, "KernelStack", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + rd_pagetables = rrddim_add(st_mem_kernel, "PageTables", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + rd_vmallocused = rrddim_add(st_mem_kernel, "VmallocUsed", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st_mem_kernel); + + rrddim_set_by_pointer(st_mem_kernel, rd_slab, Slab); + rrddim_set_by_pointer(st_mem_kernel, rd_kernelstack, KernelStack); + rrddim_set_by_pointer(st_mem_kernel, rd_pagetables, PageTables); + rrddim_set_by_pointer(st_mem_kernel, rd_vmallocused, VmallocUsed); + + rrdset_done(st_mem_kernel); + } + + // -------------------------------------------------------------------- + + if(do_slab) { + static RRDSET *st_mem_slab = NULL; + static RRDDIM *rd_reclaimable = NULL, *rd_unreclaimable = NULL; + + if(unlikely(!st_mem_slab)) { + st_mem_slab = rrdset_create_localhost( + "mem" + , "slab" + , NULL + , "slab" + , NULL + , "Reclaimable Kernel Memory" + , "MB" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_MEMINFO_NAME + , NETDATA_CHART_PRIO_MEM_SLAB + , update_every + , RRDSET_TYPE_STACKED + ); + + rrdset_flag_set(st_mem_slab, RRDSET_FLAG_DETAIL); + + rd_reclaimable = rrddim_add(st_mem_slab, "reclaimable", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + rd_unreclaimable = rrddim_add(st_mem_slab, "unreclaimable", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st_mem_slab); + + rrddim_set_by_pointer(st_mem_slab, rd_reclaimable, SReclaimable); + rrddim_set_by_pointer(st_mem_slab, rd_unreclaimable, SUnreclaim); + + rrdset_done(st_mem_slab); + } + + // -------------------------------------------------------------------- + + if(do_hugepages == CONFIG_BOOLEAN_YES || (do_hugepages == CONFIG_BOOLEAN_AUTO && Hugepagesize != 0 && HugePages_Total != 0)) { + do_hugepages = CONFIG_BOOLEAN_YES; + + static RRDSET *st_mem_hugepages = NULL; + static RRDDIM *rd_used = NULL, *rd_free = NULL, *rd_rsvd = NULL, *rd_surp = NULL; + + if(unlikely(!st_mem_hugepages)) { + st_mem_hugepages = rrdset_create_localhost( + "mem" + , "hugepages" + , NULL + , "hugepages" + , NULL + , "Dedicated HugePages Memory" + , "MB" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_MEMINFO_NAME + , NETDATA_CHART_PRIO_MEM_HUGEPAGES + 1 + , update_every + , RRDSET_TYPE_STACKED + ); + + rrdset_flag_set(st_mem_hugepages, RRDSET_FLAG_DETAIL); + + rd_free = rrddim_add(st_mem_hugepages, "free", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE); + rd_used = rrddim_add(st_mem_hugepages, "used", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE); + rd_surp = rrddim_add(st_mem_hugepages, "surplus", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE); + rd_rsvd = rrddim_add(st_mem_hugepages, "reserved", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st_mem_hugepages); + + rrddim_set_by_pointer(st_mem_hugepages, rd_used, HugePages_Total - HugePages_Free - HugePages_Rsvd); + rrddim_set_by_pointer(st_mem_hugepages, rd_free, HugePages_Free); + rrddim_set_by_pointer(st_mem_hugepages, rd_rsvd, HugePages_Rsvd); + rrddim_set_by_pointer(st_mem_hugepages, rd_surp, HugePages_Surp); + + rrdset_done(st_mem_hugepages); + } + + // -------------------------------------------------------------------- + + if(do_transparent_hugepages == CONFIG_BOOLEAN_YES || (do_transparent_hugepages == CONFIG_BOOLEAN_AUTO && (AnonHugePages != 0 || ShmemHugePages != 0))) { + do_transparent_hugepages = CONFIG_BOOLEAN_YES; + + static RRDSET *st_mem_transparent_hugepages = NULL; + static RRDDIM *rd_anonymous = NULL, *rd_shared = NULL; + + if(unlikely(!st_mem_transparent_hugepages)) { + st_mem_transparent_hugepages = rrdset_create_localhost( + "mem" + , "transparent_hugepages" + , NULL + , "hugepages" + , NULL + , "Transparent HugePages Memory" + , "MB" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_MEMINFO_NAME + , NETDATA_CHART_PRIO_MEM_HUGEPAGES + , update_every + , RRDSET_TYPE_STACKED + ); + + rrdset_flag_set(st_mem_transparent_hugepages, RRDSET_FLAG_DETAIL); + + rd_anonymous = rrddim_add(st_mem_transparent_hugepages, "anonymous", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + rd_shared = rrddim_add(st_mem_transparent_hugepages, "shmem", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st_mem_transparent_hugepages); + + rrddim_set_by_pointer(st_mem_transparent_hugepages, rd_anonymous, AnonHugePages); + rrddim_set_by_pointer(st_mem_transparent_hugepages, rd_shared, ShmemHugePages); + + rrdset_done(st_mem_transparent_hugepages); + } + + return 0; +} + diff --git a/collectors/proc.plugin/proc_net_dev.c b/collectors/proc.plugin/proc_net_dev.c new file mode 100644 index 000000000..97cbc060a --- /dev/null +++ b/collectors/proc.plugin/proc_net_dev.c @@ -0,0 +1,912 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define PLUGIN_PROC_MODULE_NETDEV_NAME "/proc/net/dev" +#define CONFIG_SECTION_PLUGIN_PROC_NETDEV "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NETDEV_NAME + +// ---------------------------------------------------------------------------- +// netdev list + +static struct netdev { + char *name; + uint32_t hash; + size_t len; + + // flags + int virtual; + int configured; + int enabled; + int updated; + + int do_bandwidth; + int do_packets; + int do_errors; + int do_drops; + int do_fifo; + int do_compressed; + int do_events; + + const char *chart_type_net_bytes; + const char *chart_type_net_packets; + const char *chart_type_net_errors; + const char *chart_type_net_fifo; + const char *chart_type_net_events; + const char *chart_type_net_drops; + const char *chart_type_net_compressed; + + const char *chart_id_net_bytes; + const char *chart_id_net_packets; + const char *chart_id_net_errors; + const char *chart_id_net_fifo; + const char *chart_id_net_events; + const char *chart_id_net_drops; + const char *chart_id_net_compressed; + + const char *chart_family; + + int flipped; + unsigned long priority; + + // data collected + kernel_uint_t rbytes; + kernel_uint_t rpackets; + kernel_uint_t rerrors; + kernel_uint_t rdrops; + kernel_uint_t rfifo; + kernel_uint_t rframe; + kernel_uint_t rcompressed; + kernel_uint_t rmulticast; + + kernel_uint_t tbytes; + kernel_uint_t tpackets; + kernel_uint_t terrors; + kernel_uint_t tdrops; + kernel_uint_t tfifo; + kernel_uint_t tcollisions; + kernel_uint_t tcarrier; + kernel_uint_t tcompressed; + kernel_uint_t speed_max; + + // charts + RRDSET *st_bandwidth; + RRDSET *st_packets; + RRDSET *st_errors; + RRDSET *st_drops; + RRDSET *st_fifo; + RRDSET *st_compressed; + RRDSET *st_events; + + // dimensions + RRDDIM *rd_rbytes; + RRDDIM *rd_rpackets; + RRDDIM *rd_rerrors; + RRDDIM *rd_rdrops; + RRDDIM *rd_rfifo; + RRDDIM *rd_rframe; + RRDDIM *rd_rcompressed; + RRDDIM *rd_rmulticast; + + RRDDIM *rd_tbytes; + RRDDIM *rd_tpackets; + RRDDIM *rd_terrors; + RRDDIM *rd_tdrops; + RRDDIM *rd_tfifo; + RRDDIM *rd_tcollisions; + RRDDIM *rd_tcarrier; + RRDDIM *rd_tcompressed; + + struct netdev *next; +} *netdev_root = NULL, *netdev_last_used = NULL; + +static size_t netdev_added = 0, netdev_found = 0; + +// ---------------------------------------------------------------------------- + +static void netdev_charts_release(struct netdev *d) { + if(d->st_bandwidth) rrdset_is_obsolete(d->st_bandwidth); + if(d->st_packets) rrdset_is_obsolete(d->st_packets); + if(d->st_errors) rrdset_is_obsolete(d->st_errors); + if(d->st_drops) rrdset_is_obsolete(d->st_drops); + if(d->st_fifo) rrdset_is_obsolete(d->st_fifo); + if(d->st_compressed) rrdset_is_obsolete(d->st_compressed); + if(d->st_events) rrdset_is_obsolete(d->st_events); + + d->st_bandwidth = NULL; + d->st_compressed = NULL; + d->st_drops = NULL; + d->st_errors = NULL; + d->st_events = NULL; + d->st_fifo = NULL; + d->st_packets = NULL; + + d->rd_rbytes = NULL; + d->rd_rpackets = NULL; + d->rd_rerrors = NULL; + d->rd_rdrops = NULL; + d->rd_rfifo = NULL; + d->rd_rframe = NULL; + d->rd_rcompressed = NULL; + d->rd_rmulticast = NULL; + + d->rd_tbytes = NULL; + d->rd_tpackets = NULL; + d->rd_terrors = NULL; + d->rd_tdrops = NULL; + d->rd_tfifo = NULL; + d->rd_tcollisions = NULL; + d->rd_tcarrier = NULL; + d->rd_tcompressed = NULL; +} + +static void netdev_free_strings(struct netdev *d) { + freez((void *)d->chart_type_net_bytes); + freez((void *)d->chart_type_net_compressed); + freez((void *)d->chart_type_net_drops); + freez((void *)d->chart_type_net_errors); + freez((void *)d->chart_type_net_events); + freez((void *)d->chart_type_net_fifo); + freez((void *)d->chart_type_net_packets); + + freez((void *)d->chart_id_net_bytes); + freez((void *)d->chart_id_net_compressed); + freez((void *)d->chart_id_net_drops); + freez((void *)d->chart_id_net_errors); + freez((void *)d->chart_id_net_events); + freez((void *)d->chart_id_net_fifo); + freez((void *)d->chart_id_net_packets); + + freez((void *)d->chart_family); +} + +static void netdev_free(struct netdev *d) { + netdev_charts_release(d); + netdev_free_strings(d); + + freez((void *)d->name); + freez((void *)d); + netdev_added--; +} + + +// ---------------------------------------------------------------------------- +// netdev renames + +static struct netdev_rename { + const char *host_device; + uint32_t hash; + + const char *container_device; + const char *container_name; + + int processed; + + struct netdev_rename *next; +} *netdev_rename_root = NULL; + +static int netdev_pending_renames = 0; +static netdata_mutex_t netdev_rename_mutex = NETDATA_MUTEX_INITIALIZER; + +static struct netdev_rename *netdev_rename_find(const char *host_device, uint32_t hash) { + struct netdev_rename *r; + + for(r = netdev_rename_root; r ; r = r->next) + if(r->hash == hash && !strcmp(host_device, r->host_device)) + return r; + + return NULL; +} + +// other threads can call this function to register a rename to a netdev +void netdev_rename_device_add(const char *host_device, const char *container_device, const char *container_name) { + netdata_mutex_lock(&netdev_rename_mutex); + + uint32_t hash = simple_hash(host_device); + struct netdev_rename *r = netdev_rename_find(host_device, hash); + if(!r) { + r = callocz(1, sizeof(struct netdev_rename)); + r->host_device = strdupz(host_device); + r->container_device = strdupz(container_device); + r->container_name = strdupz(container_name); + r->hash = hash; + r->next = netdev_rename_root; + r->processed = 0; + netdev_rename_root = r; + netdev_pending_renames++; + info("CGROUP: registered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name); + } + else { + if(strcmp(r->container_device, container_device) != 0 || strcmp(r->container_name, container_name) != 0) { + freez((void *) r->container_device); + freez((void *) r->container_name); + + r->container_device = strdupz(container_device); + r->container_name = strdupz(container_name); + r->processed = 0; + netdev_pending_renames++; + info("CGROUP: altered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name); + } + } + + netdata_mutex_unlock(&netdev_rename_mutex); +} + +// other threads can call this function to delete a rename to a netdev +void netdev_rename_device_del(const char *host_device) { + netdata_mutex_lock(&netdev_rename_mutex); + + struct netdev_rename *r, *last = NULL; + + uint32_t hash = simple_hash(host_device); + for(r = netdev_rename_root; r ; last = r, r = r->next) { + if (r->hash == hash && !strcmp(host_device, r->host_device)) { + if (netdev_rename_root == r) + netdev_rename_root = r->next; + else if (last) + last->next = r->next; + + if(!r->processed) + netdev_pending_renames--; + + info("CGROUP: unregistered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name); + + freez((void *) r->host_device); + freez((void *) r->container_name); + freez((void *) r->container_device); + freez((void *) r); + break; + } + } + + netdata_mutex_unlock(&netdev_rename_mutex); +} + +static inline void netdev_rename_cgroup(struct netdev *d, struct netdev_rename *r) { + info("CGROUP: renaming network interface '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name); + + netdev_charts_release(d); + netdev_free_strings(d); + + char buffer[RRD_ID_LENGTH_MAX + 1]; + + snprintfz(buffer, RRD_ID_LENGTH_MAX, "cgroup_%s", r->container_name); + d->chart_type_net_bytes = strdupz(buffer); + d->chart_type_net_compressed = strdupz(buffer); + d->chart_type_net_drops = strdupz(buffer); + d->chart_type_net_errors = strdupz(buffer); + d->chart_type_net_events = strdupz(buffer); + d->chart_type_net_fifo = strdupz(buffer); + d->chart_type_net_packets = strdupz(buffer); + + snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_%s", r->container_device); + d->chart_id_net_bytes = strdupz(buffer); + + snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_compressed_%s", r->container_device); + d->chart_id_net_compressed = strdupz(buffer); + + snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_drops_%s", r->container_device); + d->chart_id_net_drops = strdupz(buffer); + + snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_errors_%s", r->container_device); + d->chart_id_net_errors = strdupz(buffer); + + snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_events_%s", r->container_device); + d->chart_id_net_events = strdupz(buffer); + + snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_fifo_%s", r->container_device); + d->chart_id_net_fifo = strdupz(buffer); + + snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_packets_%s", r->container_device); + d->chart_id_net_packets = strdupz(buffer); + + snprintfz(buffer, RRD_ID_LENGTH_MAX, "net %s", r->container_device); + d->chart_family = strdupz(buffer); + + d->priority = NETDATA_CHART_PRIO_CGROUP_NET_IFACE; + d->flipped = 1; +} + +static inline void netdev_rename(struct netdev *d) { + struct netdev_rename *r = netdev_rename_find(d->name, d->hash); + if(unlikely(r && !r->processed)) { + netdev_rename_cgroup(d, r); + r->processed = 1; + netdev_pending_renames--; + } +} + +static inline void netdev_rename_lock(struct netdev *d) { + netdata_mutex_lock(&netdev_rename_mutex); + netdev_rename(d); + netdata_mutex_unlock(&netdev_rename_mutex); +} + +static inline void netdev_rename_all_lock(void) { + netdata_mutex_lock(&netdev_rename_mutex); + + struct netdev *d; + for(d = netdev_root; d ; d = d->next) + netdev_rename(d); + + netdev_pending_renames = 0; + netdata_mutex_unlock(&netdev_rename_mutex); +} + +// ---------------------------------------------------------------------------- +// netdev data collection + +static void netdev_cleanup() { + if(likely(netdev_found == netdev_added)) return; + + netdev_added = 0; + struct netdev *d = netdev_root, *last = NULL; + while(d) { + if(unlikely(!d->updated)) { + // info("Removing network device '%s', linked after '%s'", d->name, last?last->name:"ROOT"); + + if(netdev_last_used == d) + netdev_last_used = last; + + struct netdev *t = d; + + if(d == netdev_root || !last) + netdev_root = d = d->next; + + else + last->next = d = d->next; + + t->next = NULL; + netdev_free(t); + } + else { + netdev_added++; + last = d; + d->updated = 0; + d = d->next; + } + } +} + +static struct netdev *get_netdev(const char *name) { + struct netdev *d; + + uint32_t hash = simple_hash(name); + + // search it, from the last position to the end + for(d = netdev_last_used ; d ; d = d->next) { + if(unlikely(hash == d->hash && !strcmp(name, d->name))) { + netdev_last_used = d->next; + return d; + } + } + + // search it from the beginning to the last position we used + for(d = netdev_root ; d != netdev_last_used ; d = d->next) { + if(unlikely(hash == d->hash && !strcmp(name, d->name))) { + netdev_last_used = d->next; + return d; + } + } + + // create a new one + d = callocz(1, sizeof(struct netdev)); + d->name = strdupz(name); + d->hash = simple_hash(d->name); + d->len = strlen(d->name); + + d->chart_type_net_bytes = strdupz("net"); + d->chart_type_net_compressed = strdupz("net_compressed"); + d->chart_type_net_drops = strdupz("net_drops"); + d->chart_type_net_errors = strdupz("net_errors"); + d->chart_type_net_events = strdupz("net_events"); + d->chart_type_net_fifo = strdupz("net_fifo"); + d->chart_type_net_packets = strdupz("net_packets"); + + d->chart_id_net_bytes = strdupz(d->name); + d->chart_id_net_compressed = strdupz(d->name); + d->chart_id_net_drops = strdupz(d->name); + d->chart_id_net_errors = strdupz(d->name); + d->chart_id_net_events = strdupz(d->name); + d->chart_id_net_fifo = strdupz(d->name); + d->chart_id_net_packets = strdupz(d->name); + + d->chart_family = strdupz(d->name); + d->priority = NETDATA_CHART_PRIO_FIRST_NET_IFACE; + + netdev_rename_lock(d); + + netdev_added++; + + // link it to the end + if(netdev_root) { + struct netdev *e; + for(e = netdev_root; e->next ; e = e->next) ; + e->next = d; + } + else + netdev_root = d; + + return d; +} + +int do_proc_net_dev(int update_every, usec_t dt) { + (void)dt; + static SIMPLE_PATTERN *disabled_list = NULL; + static procfile *ff = NULL; + static int enable_new_interfaces = -1; + static int do_bandwidth = -1, do_packets = -1, do_errors = -1, do_drops = -1, do_fifo = -1, do_compressed = -1, do_events = -1; + static char *path_to_sys_devices_virtual_net = NULL; + static char *path_to_sys_net_speed = NULL; + + if(unlikely(enable_new_interfaces == -1)) { + char filename[FILENAME_MAX + 1]; + + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/virtual/net/%s"); + path_to_sys_devices_virtual_net = config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "path to get virtual interfaces", filename); + + enable_new_interfaces = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "enable new interfaces detected at runtime", CONFIG_BOOLEAN_AUTO); + + do_bandwidth = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "bandwidth for all interfaces", CONFIG_BOOLEAN_AUTO); + do_packets = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "packets for all interfaces", CONFIG_BOOLEAN_AUTO); + do_errors = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "errors for all interfaces", CONFIG_BOOLEAN_AUTO); + do_drops = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "drops for all interfaces", CONFIG_BOOLEAN_AUTO); + do_fifo = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "fifo for all interfaces", CONFIG_BOOLEAN_AUTO); + do_compressed = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "compressed packets for all interfaces", CONFIG_BOOLEAN_AUTO); + do_events = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "frames, collisions, carrier counters for all interfaces", CONFIG_BOOLEAN_AUTO); + + disabled_list = simple_pattern_create(config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "disable by default interfaces matching", "lo fireqos* *-ifb"), NULL, SIMPLE_PATTERN_EXACT); + } + + if(unlikely(!ff)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, (*netdata_configured_host_prefix)?"/proc/1/net/dev":"/proc/net/dev"); + ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "filename to monitor", filename), " \t,:|", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) return 1; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time + + // rename all the devices, if we have pending renames + if(unlikely(netdev_pending_renames)) + netdev_rename_all_lock(); + + netdev_found = 0; + + kernel_uint_t system_rbytes = 0; + kernel_uint_t system_tbytes = 0; + + size_t lines = procfile_lines(ff), l; + for(l = 2; l < lines ;l++) { + // require 17 words on each line + if(unlikely(procfile_linewords(ff, l) < 17)) continue; + + struct netdev *d = get_netdev(procfile_lineword(ff, l, 0)); + d->updated = 1; + netdev_found++; + + if(unlikely(!d->configured)) { + // this is the first time we see this interface + + // remember we configured it + d->configured = 1; + + d->enabled = enable_new_interfaces; + + if(d->enabled) + d->enabled = !simple_pattern_matches(disabled_list, d->name); + + char buffer[FILENAME_MAX + 1]; + + snprintfz(buffer, FILENAME_MAX, path_to_sys_devices_virtual_net, d->name); + if(likely(access(buffer, R_OK) == 0)) { + d->virtual = 1; + } + else + d->virtual = 0; + + // set nic speed if present + if(likely(!d->virtual)) { + snprintfz(buffer, FILENAME_MAX, "%s/sys/class/net/%s/speed", netdata_configured_host_prefix, d->name); + path_to_sys_net_speed = config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "path to get net device speed", buffer); + int ret = read_single_number_file(path_to_sys_net_speed, (unsigned long long*)&d->speed_max); + if(ret) error("Cannot read '%s'.", path_to_sys_net_speed); + } + + snprintfz(buffer, FILENAME_MAX, "plugin:proc:/proc/net/dev:%s", d->name); + d->enabled = config_get_boolean_ondemand(buffer, "enabled", d->enabled); + d->virtual = config_get_boolean(buffer, "virtual", d->virtual); + + if(d->enabled == CONFIG_BOOLEAN_NO) + continue; + + d->do_bandwidth = config_get_boolean_ondemand(buffer, "bandwidth", do_bandwidth); + d->do_packets = config_get_boolean_ondemand(buffer, "packets", do_packets); + d->do_errors = config_get_boolean_ondemand(buffer, "errors", do_errors); + d->do_drops = config_get_boolean_ondemand(buffer, "drops", do_drops); + d->do_fifo = config_get_boolean_ondemand(buffer, "fifo", do_fifo); + d->do_compressed = config_get_boolean_ondemand(buffer, "compressed", do_compressed); + d->do_events = config_get_boolean_ondemand(buffer, "events", do_events); + } + + if(unlikely(!d->enabled)) + continue; + + if(likely(d->do_bandwidth != CONFIG_BOOLEAN_NO || !d->virtual)) { + d->rbytes = str2kernel_uint_t(procfile_lineword(ff, l, 1)); + d->tbytes = str2kernel_uint_t(procfile_lineword(ff, l, 9)); + + if(likely(!d->virtual)) { + system_rbytes += d->rbytes; + system_tbytes += d->tbytes; + } + } + + if(likely(d->do_packets != CONFIG_BOOLEAN_NO)) { + d->rpackets = str2kernel_uint_t(procfile_lineword(ff, l, 2)); + d->rmulticast = str2kernel_uint_t(procfile_lineword(ff, l, 8)); + d->tpackets = str2kernel_uint_t(procfile_lineword(ff, l, 10)); + } + + if(likely(d->do_errors != CONFIG_BOOLEAN_NO)) { + d->rerrors = str2kernel_uint_t(procfile_lineword(ff, l, 3)); + d->terrors = str2kernel_uint_t(procfile_lineword(ff, l, 11)); + } + + if(likely(d->do_drops != CONFIG_BOOLEAN_NO)) { + d->rdrops = str2kernel_uint_t(procfile_lineword(ff, l, 4)); + d->tdrops = str2kernel_uint_t(procfile_lineword(ff, l, 12)); + } + + if(likely(d->do_fifo != CONFIG_BOOLEAN_NO)) { + d->rfifo = str2kernel_uint_t(procfile_lineword(ff, l, 5)); + d->tfifo = str2kernel_uint_t(procfile_lineword(ff, l, 13)); + } + + if(likely(d->do_compressed != CONFIG_BOOLEAN_NO)) { + d->rcompressed = str2kernel_uint_t(procfile_lineword(ff, l, 7)); + d->tcompressed = str2kernel_uint_t(procfile_lineword(ff, l, 16)); + } + + if(likely(d->do_events != CONFIG_BOOLEAN_NO)) { + d->rframe = str2kernel_uint_t(procfile_lineword(ff, l, 6)); + d->tcollisions = str2kernel_uint_t(procfile_lineword(ff, l, 14)); + d->tcarrier = str2kernel_uint_t(procfile_lineword(ff, l, 15)); + } + + // -------------------------------------------------------------------- + + if(unlikely((d->do_bandwidth == CONFIG_BOOLEAN_AUTO && (d->rbytes || d->tbytes)))) + d->do_bandwidth = CONFIG_BOOLEAN_YES; + + if(d->do_bandwidth == CONFIG_BOOLEAN_YES) { + if(unlikely(!d->st_bandwidth)) { + + d->st_bandwidth = rrdset_create_localhost( + d->chart_type_net_bytes + , d->chart_id_net_bytes + , NULL + , d->chart_family + , "net.net" + , "Bandwidth" + , "kilobits/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETDEV_NAME + , d->priority + , update_every + , RRDSET_TYPE_AREA + ); + + RRDSETVAR *nic_speed_max = rrdsetvar_custom_chart_variable_create(d->st_bandwidth, "nic_speed_max"); + if(nic_speed_max) rrdsetvar_custom_chart_variable_set(nic_speed_max, (calculated_number)d->speed_max); + + d->rd_rbytes = rrddim_add(d->st_bandwidth, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + d->rd_tbytes = rrddim_add(d->st_bandwidth, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + + if(d->flipped) { + // flip receive/trasmit + + RRDDIM *td = d->rd_rbytes; + d->rd_rbytes = d->rd_tbytes; + d->rd_tbytes = td; + } + } + else rrdset_next(d->st_bandwidth); + + rrddim_set_by_pointer(d->st_bandwidth, d->rd_rbytes, (collected_number)d->rbytes); + rrddim_set_by_pointer(d->st_bandwidth, d->rd_tbytes, (collected_number)d->tbytes); + rrdset_done(d->st_bandwidth); + } + + // -------------------------------------------------------------------- + + if(unlikely((d->do_packets == CONFIG_BOOLEAN_AUTO && (d->rpackets || d->tpackets || d->rmulticast)))) + d->do_packets = CONFIG_BOOLEAN_YES; + + if(d->do_packets == CONFIG_BOOLEAN_YES) { + if(unlikely(!d->st_packets)) { + + d->st_packets = rrdset_create_localhost( + d->chart_type_net_packets + , d->chart_id_net_packets + , NULL + , d->chart_family + , "net.packets" + , "Packets" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETDEV_NAME + , d->priority + 1 + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(d->st_packets, RRDSET_FLAG_DETAIL); + + d->rd_rpackets = rrddim_add(d->st_packets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + d->rd_tpackets = rrddim_add(d->st_packets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + d->rd_rmulticast = rrddim_add(d->st_packets, "multicast", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + if(d->flipped) { + // flip receive/trasmit + + RRDDIM *td = d->rd_rpackets; + d->rd_rpackets = d->rd_tpackets; + d->rd_tpackets = td; + } + } + else rrdset_next(d->st_packets); + + rrddim_set_by_pointer(d->st_packets, d->rd_rpackets, (collected_number)d->rpackets); + rrddim_set_by_pointer(d->st_packets, d->rd_tpackets, (collected_number)d->tpackets); + rrddim_set_by_pointer(d->st_packets, d->rd_rmulticast, (collected_number)d->rmulticast); + rrdset_done(d->st_packets); + } + + // -------------------------------------------------------------------- + + if(unlikely((d->do_errors == CONFIG_BOOLEAN_AUTO && (d->rerrors || d->terrors)))) + d->do_errors = CONFIG_BOOLEAN_YES; + + if(d->do_errors == CONFIG_BOOLEAN_YES) { + if(unlikely(!d->st_errors)) { + + d->st_errors = rrdset_create_localhost( + d->chart_type_net_errors + , d->chart_id_net_errors + , NULL + , d->chart_family + , "net.errors" + , "Interface Errors" + , "errors/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETDEV_NAME + , d->priority + 2 + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(d->st_errors, RRDSET_FLAG_DETAIL); + + d->rd_rerrors = rrddim_add(d->st_errors, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + d->rd_terrors = rrddim_add(d->st_errors, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + + if(d->flipped) { + // flip receive/trasmit + + RRDDIM *td = d->rd_rerrors; + d->rd_rerrors = d->rd_terrors; + d->rd_terrors = td; + } + } + else rrdset_next(d->st_errors); + + rrddim_set_by_pointer(d->st_errors, d->rd_rerrors, (collected_number)d->rerrors); + rrddim_set_by_pointer(d->st_errors, d->rd_terrors, (collected_number)d->terrors); + rrdset_done(d->st_errors); + } + + // -------------------------------------------------------------------- + + if(unlikely((d->do_drops == CONFIG_BOOLEAN_AUTO && (d->rdrops || d->tdrops)))) + d->do_drops = CONFIG_BOOLEAN_YES; + + if(d->do_drops == CONFIG_BOOLEAN_YES) { + if(unlikely(!d->st_drops)) { + + d->st_drops = rrdset_create_localhost( + d->chart_type_net_drops + , d->chart_id_net_drops + , NULL + , d->chart_family + , "net.drops" + , "Interface Drops" + , "drops/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETDEV_NAME + , d->priority + 3 + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(d->st_drops, RRDSET_FLAG_DETAIL); + + d->rd_rdrops = rrddim_add(d->st_drops, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + d->rd_tdrops = rrddim_add(d->st_drops, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + + if(d->flipped) { + // flip receive/trasmit + + RRDDIM *td = d->rd_rdrops; + d->rd_rdrops = d->rd_tdrops; + d->rd_tdrops = td; + } + } + else rrdset_next(d->st_drops); + + rrddim_set_by_pointer(d->st_drops, d->rd_rdrops, (collected_number)d->rdrops); + rrddim_set_by_pointer(d->st_drops, d->rd_tdrops, (collected_number)d->tdrops); + rrdset_done(d->st_drops); + } + + // -------------------------------------------------------------------- + + if(unlikely((d->do_fifo == CONFIG_BOOLEAN_AUTO && (d->rfifo || d->tfifo)))) + d->do_fifo = CONFIG_BOOLEAN_YES; + + if(d->do_fifo == CONFIG_BOOLEAN_YES) { + if(unlikely(!d->st_fifo)) { + + d->st_fifo = rrdset_create_localhost( + d->chart_type_net_fifo + , d->chart_id_net_fifo + , NULL + , d->chart_family + , "net.fifo" + , "Interface FIFO Buffer Errors" + , "errors" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETDEV_NAME + , d->priority + 4 + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(d->st_fifo, RRDSET_FLAG_DETAIL); + + d->rd_rfifo = rrddim_add(d->st_fifo, "receive", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + d->rd_tfifo = rrddim_add(d->st_fifo, "transmit", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + + if(d->flipped) { + // flip receive/trasmit + + RRDDIM *td = d->rd_rfifo; + d->rd_rfifo = d->rd_tfifo; + d->rd_tfifo = td; + } + } + else rrdset_next(d->st_fifo); + + rrddim_set_by_pointer(d->st_fifo, d->rd_rfifo, (collected_number)d->rfifo); + rrddim_set_by_pointer(d->st_fifo, d->rd_tfifo, (collected_number)d->tfifo); + rrdset_done(d->st_fifo); + } + + // -------------------------------------------------------------------- + + if(unlikely((d->do_compressed == CONFIG_BOOLEAN_AUTO && (d->rcompressed || d->tcompressed)))) + d->do_compressed = CONFIG_BOOLEAN_YES; + + if(d->do_compressed == CONFIG_BOOLEAN_YES) { + if(unlikely(!d->st_compressed)) { + + d->st_compressed = rrdset_create_localhost( + d->chart_type_net_compressed + , d->chart_id_net_compressed + , NULL + , d->chart_family + , "net.compressed" + , "Compressed Packets" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETDEV_NAME + , d->priority + 5 + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(d->st_compressed, RRDSET_FLAG_DETAIL); + + d->rd_rcompressed = rrddim_add(d->st_compressed, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + d->rd_tcompressed = rrddim_add(d->st_compressed, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + + if(d->flipped) { + // flip receive/trasmit + + RRDDIM *td = d->rd_rcompressed; + d->rd_rcompressed = d->rd_tcompressed; + d->rd_tcompressed = td; + } + } + else rrdset_next(d->st_compressed); + + rrddim_set_by_pointer(d->st_compressed, d->rd_rcompressed, (collected_number)d->rcompressed); + rrddim_set_by_pointer(d->st_compressed, d->rd_tcompressed, (collected_number)d->tcompressed); + rrdset_done(d->st_compressed); + } + + // -------------------------------------------------------------------- + + if(unlikely((d->do_events == CONFIG_BOOLEAN_AUTO && (d->rframe || d->tcollisions || d->tcarrier)))) + d->do_events = CONFIG_BOOLEAN_YES; + + if(d->do_events == CONFIG_BOOLEAN_YES) { + if(unlikely(!d->st_events)) { + + d->st_events = rrdset_create_localhost( + d->chart_type_net_events + , d->chart_id_net_events + , NULL + , d->chart_family + , "net.events" + , "Network Interface Events" + , "events/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETDEV_NAME + , d->priority + 6 + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(d->st_events, RRDSET_FLAG_DETAIL); + + d->rd_rframe = rrddim_add(d->st_events, "frames", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + d->rd_tcollisions = rrddim_add(d->st_events, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + d->rd_tcarrier = rrddim_add(d->st_events, "carrier", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(d->st_events); + + rrddim_set_by_pointer(d->st_events, d->rd_rframe, (collected_number)d->rframe); + rrddim_set_by_pointer(d->st_events, d->rd_tcollisions, (collected_number)d->tcollisions); + rrddim_set_by_pointer(d->st_events, d->rd_tcarrier, (collected_number)d->tcarrier); + rrdset_done(d->st_events); + } + } + + if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && (system_rbytes || system_tbytes))) { + do_bandwidth = CONFIG_BOOLEAN_YES; + static RRDSET *st_system_net = NULL; + static RRDDIM *rd_in = NULL, *rd_out = NULL; + + if(unlikely(!st_system_net)) { + st_system_net = rrdset_create_localhost( + "system" + , "net" + , NULL + , "network" + , NULL + , "Physical Network Interfaces Aggregated Bandwidth" + , "kilobits/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETDEV_NAME + , NETDATA_CHART_PRIO_SYSTEM_NET + , update_every + , RRDSET_TYPE_AREA + ); + + rd_in = rrddim_add(st_system_net, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + rd_out = rrddim_add(st_system_net, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_system_net); + + rrddim_set_by_pointer(st_system_net, rd_in, (collected_number)system_rbytes); + rrddim_set_by_pointer(st_system_net, rd_out, (collected_number)system_tbytes); + + rrdset_done(st_system_net); + } + + netdev_cleanup(); + + return 0; +} diff --git a/collectors/proc.plugin/proc_net_ip_vs_stats.c b/collectors/proc.plugin/proc_net_ip_vs_stats.c new file mode 100644 index 000000000..43dcf2a88 --- /dev/null +++ b/collectors/proc.plugin/proc_net_ip_vs_stats.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define RRD_TYPE_NET_IPVS "ipvs" +#define PLUGIN_PROC_MODULE_NET_IPVS_NAME "/proc/net/ip_vs_stats" +#define CONFIG_SECTION_PLUGIN_PROC_NET_IPVS "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NET_IPVS_NAME + +int do_proc_net_ip_vs_stats(int update_every, usec_t dt) { + (void)dt; + static int do_bandwidth = -1, do_sockets = -1, do_packets = -1; + static procfile *ff = NULL; + + if(do_bandwidth == -1) do_bandwidth = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NET_IPVS, "IPVS bandwidth", 1); + if(do_sockets == -1) do_sockets = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NET_IPVS, "IPVS connections", 1); + if(do_packets == -1) do_packets = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NET_IPVS, "IPVS packets", 1); + + if(!ff) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/ip_vs_stats"); + ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_NET_IPVS, "filename to monitor", filename), " \t,:|", PROCFILE_FLAG_DEFAULT); + } + if(!ff) return 1; + + ff = procfile_readall(ff); + if(!ff) return 0; // we return 0, so that we will retry to open it next time + + // make sure we have 3 lines + if(procfile_lines(ff) < 3) return 1; + + // make sure we have 5 words on the 3rd line + if(procfile_linewords(ff, 2) < 5) return 1; + + unsigned long long entries, InPackets, OutPackets, InBytes, OutBytes; + + entries = strtoull(procfile_lineword(ff, 2, 0), NULL, 16); + InPackets = strtoull(procfile_lineword(ff, 2, 1), NULL, 16); + OutPackets = strtoull(procfile_lineword(ff, 2, 2), NULL, 16); + InBytes = strtoull(procfile_lineword(ff, 2, 3), NULL, 16); + OutBytes = strtoull(procfile_lineword(ff, 2, 4), NULL, 16); + + + // -------------------------------------------------------------------- + + if(do_sockets) { + static RRDSET *st = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_IPVS + , "sockets" + , NULL + , RRD_TYPE_NET_IPVS + , NULL + , "IPVS New Connections" + , "connections/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_IPVS_NAME + , NETDATA_CHART_PRIO_IPVS_SOCKETS + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "connections", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set(st, "connections", entries); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_packets) { + static RRDSET *st = NULL; + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_IPVS + , "packets" + , NULL + , RRD_TYPE_NET_IPVS + , NULL + , "IPVS Packets" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_IPVS_NAME + , NETDATA_CHART_PRIO_IPVS_PACKETS + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set(st, "received", InPackets); + rrddim_set(st, "sent", OutPackets); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_bandwidth) { + static RRDSET *st = NULL; + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_IPVS + , "net" + , NULL + , RRD_TYPE_NET_IPVS + , NULL + , "IPVS Bandwidth" + , "kilobits/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_IPVS_NAME + , NETDATA_CHART_PRIO_IPVS_NET + , update_every + , RRDSET_TYPE_AREA + ); + + rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set(st, "received", InBytes); + rrddim_set(st, "sent", OutBytes); + rrdset_done(st); + } + + return 0; +} diff --git a/collectors/proc.plugin/proc_net_netstat.c b/collectors/proc.plugin/proc_net_netstat.c new file mode 100644 index 000000000..2dc3c59c0 --- /dev/null +++ b/collectors/proc.plugin/proc_net_netstat.c @@ -0,0 +1,818 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define RRD_TYPE_NET_NETSTAT "ip" +#define PLUGIN_PROC_MODULE_NETSTAT_NAME "/proc/net/netstat" +#define CONFIG_SECTION_PLUGIN_PROC_NETSTAT "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NETSTAT_NAME + +unsigned long long tcpext_TCPSynRetrans = 0; + +static void parse_line_pair(procfile *ff, ARL_BASE *base, size_t header_line, size_t values_line) { + size_t hwords = procfile_linewords(ff, header_line); + size_t vwords = procfile_linewords(ff, values_line); + size_t w; + + if(unlikely(vwords > hwords)) { + error("File /proc/net/netstat on header line %zu has %zu words, but on value line %zu has %zu words.", header_line, hwords, values_line, vwords); + vwords = hwords; + } + + for(w = 1; w < vwords ;w++) { + if(unlikely(arl_check(base, procfile_lineword(ff, header_line, w), procfile_lineword(ff, values_line, w)))) + break; + } +} + +int do_proc_net_netstat(int update_every, usec_t dt) { + (void)dt; + + static int do_bandwidth = -1, do_inerrors = -1, do_mcast = -1, do_bcast = -1, do_mcast_p = -1, do_bcast_p = -1, do_ecn = -1, \ + do_tcpext_reorder = -1, do_tcpext_syscookies = -1, do_tcpext_ofo = -1, do_tcpext_connaborts = -1, do_tcpext_memory = -1, + do_tcpext_syn_queue = -1, do_tcpext_accept_queue = -1; + + static uint32_t hash_ipext = 0, hash_tcpext = 0; + static procfile *ff = NULL; + + static ARL_BASE *arl_tcpext = NULL; + static ARL_BASE *arl_ipext = NULL; + + // -------------------------------------------------------------------- + // IP + + // IP bandwidth + static unsigned long long ipext_InOctets = 0; + static unsigned long long ipext_OutOctets = 0; + + // IP input errors + static unsigned long long ipext_InNoRoutes = 0; + static unsigned long long ipext_InTruncatedPkts = 0; + static unsigned long long ipext_InCsumErrors = 0; + + // IP multicast bandwidth + static unsigned long long ipext_InMcastOctets = 0; + static unsigned long long ipext_OutMcastOctets = 0; + + // IP multicast packets + static unsigned long long ipext_InMcastPkts = 0; + static unsigned long long ipext_OutMcastPkts = 0; + + // IP broadcast bandwidth + static unsigned long long ipext_InBcastOctets = 0; + static unsigned long long ipext_OutBcastOctets = 0; + + // IP broadcast packets + static unsigned long long ipext_InBcastPkts = 0; + static unsigned long long ipext_OutBcastPkts = 0; + + // IP ECN + static unsigned long long ipext_InNoECTPkts = 0; + static unsigned long long ipext_InECT1Pkts = 0; + static unsigned long long ipext_InECT0Pkts = 0; + static unsigned long long ipext_InCEPkts = 0; + + // -------------------------------------------------------------------- + // IP TCP + + // IP TCP Reordering + static unsigned long long tcpext_TCPRenoReorder = 0; + static unsigned long long tcpext_TCPFACKReorder = 0; + static unsigned long long tcpext_TCPSACKReorder = 0; + static unsigned long long tcpext_TCPTSReorder = 0; + + // IP TCP SYN Cookies + static unsigned long long tcpext_SyncookiesSent = 0; + static unsigned long long tcpext_SyncookiesRecv = 0; + static unsigned long long tcpext_SyncookiesFailed = 0; + + // IP TCP Out Of Order Queue + // http://www.spinics.net/lists/netdev/msg204696.html + static unsigned long long tcpext_TCPOFOQueue = 0; // Number of packets queued in OFO queue + static unsigned long long tcpext_TCPOFODrop = 0; // Number of packets meant to be queued in OFO but dropped because socket rcvbuf limit hit. + static unsigned long long tcpext_TCPOFOMerge = 0; // Number of packets in OFO that were merged with other packets. + static unsigned long long tcpext_OfoPruned = 0; // packets dropped from out-of-order queue because of socket buffer overrun + + // IP TCP connection resets + // https://github.com/ecki/net-tools/blob/bd8bceaed2311651710331a7f8990c3e31be9840/statistics.c + static unsigned long long tcpext_TCPAbortOnData = 0; // connections reset due to unexpected data + static unsigned long long tcpext_TCPAbortOnClose = 0; // connections reset due to early user close + static unsigned long long tcpext_TCPAbortOnMemory = 0; // connections aborted due to memory pressure + static unsigned long long tcpext_TCPAbortOnTimeout = 0; // connections aborted due to timeout + static unsigned long long tcpext_TCPAbortOnLinger = 0; // connections aborted after user close in linger timeout + static unsigned long long tcpext_TCPAbortFailed = 0; // times unable to send RST due to no memory + + // https://perfchron.com/2015/12/26/investigating-linux-network-issues-with-netstat-and-nstat/ + static unsigned long long tcpext_ListenOverflows = 0; // times the listen queue of a socket overflowed + static unsigned long long tcpext_ListenDrops = 0; // SYNs to LISTEN sockets ignored + + // IP TCP memory pressures + static unsigned long long tcpext_TCPMemoryPressures = 0; + + static unsigned long long tcpext_TCPReqQFullDrop = 0; + static unsigned long long tcpext_TCPReqQFullDoCookies = 0; + + // shared: tcpext_TCPSynRetrans + + + if(unlikely(!arl_ipext)) { + hash_ipext = simple_hash("IpExt"); + hash_tcpext = simple_hash("TcpExt"); + + do_bandwidth = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "bandwidth", CONFIG_BOOLEAN_AUTO); + do_inerrors = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "input errors", CONFIG_BOOLEAN_AUTO); + do_mcast = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "multicast bandwidth", CONFIG_BOOLEAN_AUTO); + do_bcast = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "broadcast bandwidth", CONFIG_BOOLEAN_AUTO); + do_mcast_p = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "multicast packets", CONFIG_BOOLEAN_AUTO); + do_bcast_p = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "broadcast packets", CONFIG_BOOLEAN_AUTO); + do_ecn = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "ECN packets", CONFIG_BOOLEAN_AUTO); + + do_tcpext_reorder = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP reorders", CONFIG_BOOLEAN_AUTO); + do_tcpext_syscookies = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP SYN cookies", CONFIG_BOOLEAN_AUTO); + do_tcpext_ofo = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP out-of-order queue", CONFIG_BOOLEAN_AUTO); + do_tcpext_connaborts = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP connection aborts", CONFIG_BOOLEAN_AUTO); + do_tcpext_memory = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP memory pressures", CONFIG_BOOLEAN_AUTO); + + do_tcpext_syn_queue = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP SYN queue", CONFIG_BOOLEAN_AUTO); + do_tcpext_accept_queue = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP accept queue", CONFIG_BOOLEAN_AUTO); + + arl_ipext = arl_create("netstat/ipext", NULL, 60); + arl_tcpext = arl_create("netstat/tcpext", NULL, 60); + + // -------------------------------------------------------------------- + // IP + + if(do_bandwidth != CONFIG_BOOLEAN_NO) { + arl_expect(arl_ipext, "InOctets", &ipext_InOctets); + arl_expect(arl_ipext, "OutOctets", &ipext_OutOctets); + } + + if(do_inerrors != CONFIG_BOOLEAN_NO) { + arl_expect(arl_ipext, "InNoRoutes", &ipext_InNoRoutes); + arl_expect(arl_ipext, "InTruncatedPkts", &ipext_InTruncatedPkts); + arl_expect(arl_ipext, "InCsumErrors", &ipext_InCsumErrors); + } + + if(do_mcast != CONFIG_BOOLEAN_NO) { + arl_expect(arl_ipext, "InMcastOctets", &ipext_InMcastOctets); + arl_expect(arl_ipext, "OutMcastOctets", &ipext_OutMcastOctets); + } + + if(do_mcast_p != CONFIG_BOOLEAN_NO) { + arl_expect(arl_ipext, "InMcastPkts", &ipext_InMcastPkts); + arl_expect(arl_ipext, "OutMcastPkts", &ipext_OutMcastPkts); + } + + if(do_bcast != CONFIG_BOOLEAN_NO) { + arl_expect(arl_ipext, "InBcastPkts", &ipext_InBcastPkts); + arl_expect(arl_ipext, "OutBcastPkts", &ipext_OutBcastPkts); + } + + if(do_bcast_p != CONFIG_BOOLEAN_NO) { + arl_expect(arl_ipext, "InBcastOctets", &ipext_InBcastOctets); + arl_expect(arl_ipext, "OutBcastOctets", &ipext_OutBcastOctets); + } + + if(do_ecn != CONFIG_BOOLEAN_NO) { + arl_expect(arl_ipext, "InNoECTPkts", &ipext_InNoECTPkts); + arl_expect(arl_ipext, "InECT1Pkts", &ipext_InECT1Pkts); + arl_expect(arl_ipext, "InECT0Pkts", &ipext_InECT0Pkts); + arl_expect(arl_ipext, "InCEPkts", &ipext_InCEPkts); + } + + // -------------------------------------------------------------------- + // IP TCP + + if(do_tcpext_reorder != CONFIG_BOOLEAN_NO) { + arl_expect(arl_tcpext, "TCPFACKReorder", &tcpext_TCPFACKReorder); + arl_expect(arl_tcpext, "TCPSACKReorder", &tcpext_TCPSACKReorder); + arl_expect(arl_tcpext, "TCPRenoReorder", &tcpext_TCPRenoReorder); + arl_expect(arl_tcpext, "TCPTSReorder", &tcpext_TCPTSReorder); + } + + if(do_tcpext_syscookies != CONFIG_BOOLEAN_NO) { + arl_expect(arl_tcpext, "SyncookiesSent", &tcpext_SyncookiesSent); + arl_expect(arl_tcpext, "SyncookiesRecv", &tcpext_SyncookiesRecv); + arl_expect(arl_tcpext, "SyncookiesFailed", &tcpext_SyncookiesFailed); + } + + if(do_tcpext_ofo != CONFIG_BOOLEAN_NO) { + arl_expect(arl_tcpext, "TCPOFOQueue", &tcpext_TCPOFOQueue); + arl_expect(arl_tcpext, "TCPOFODrop", &tcpext_TCPOFODrop); + arl_expect(arl_tcpext, "TCPOFOMerge", &tcpext_TCPOFOMerge); + arl_expect(arl_tcpext, "OfoPruned", &tcpext_OfoPruned); + } + + if(do_tcpext_connaborts != CONFIG_BOOLEAN_NO) { + arl_expect(arl_tcpext, "TCPAbortOnData", &tcpext_TCPAbortOnData); + arl_expect(arl_tcpext, "TCPAbortOnClose", &tcpext_TCPAbortOnClose); + arl_expect(arl_tcpext, "TCPAbortOnMemory", &tcpext_TCPAbortOnMemory); + arl_expect(arl_tcpext, "TCPAbortOnTimeout", &tcpext_TCPAbortOnTimeout); + arl_expect(arl_tcpext, "TCPAbortOnLinger", &tcpext_TCPAbortOnLinger); + arl_expect(arl_tcpext, "TCPAbortFailed", &tcpext_TCPAbortFailed); + } + + if(do_tcpext_memory != CONFIG_BOOLEAN_NO) { + arl_expect(arl_tcpext, "TCPMemoryPressures", &tcpext_TCPMemoryPressures); + } + + if(do_tcpext_accept_queue != CONFIG_BOOLEAN_NO) { + arl_expect(arl_tcpext, "ListenOverflows", &tcpext_ListenOverflows); + arl_expect(arl_tcpext, "ListenDrops", &tcpext_ListenDrops); + } + + if(do_tcpext_syn_queue != CONFIG_BOOLEAN_NO) { + arl_expect(arl_tcpext, "TCPReqQFullDrop", &tcpext_TCPReqQFullDrop); + arl_expect(arl_tcpext, "TCPReqQFullDoCookies", &tcpext_TCPReqQFullDoCookies); + } + + // shared metrics + arl_expect(arl_tcpext, "TCPSynRetrans", &tcpext_TCPSynRetrans); + } + + if(unlikely(!ff)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/netstat"); + ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) return 1; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time + + size_t lines = procfile_lines(ff), l; + size_t words; + + arl_begin(arl_ipext); + arl_begin(arl_tcpext); + + for(l = 0; l < lines ;l++) { + char *key = procfile_lineword(ff, l, 0); + uint32_t hash = simple_hash(key); + + if(unlikely(hash == hash_ipext && strcmp(key, "IpExt") == 0)) { + size_t h = l++; + + words = procfile_linewords(ff, l); + if(unlikely(words < 2)) { + error("Cannot read /proc/net/netstat IpExt line. Expected 2+ params, read %zu.", words); + continue; + } + + parse_line_pair(ff, arl_ipext, h, l); + + // -------------------------------------------------------------------- + + if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && (ipext_InOctets || ipext_OutOctets))) { + do_bandwidth = CONFIG_BOOLEAN_YES; + static RRDSET *st_system_ip = NULL; + static RRDDIM *rd_in = NULL, *rd_out = NULL; + + if(unlikely(!st_system_ip)) { + st_system_ip = rrdset_create_localhost( + "system" + , RRD_TYPE_NET_NETSTAT + , NULL + , "network" + , NULL + , "IP Bandwidth" + , "kilobits/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETSTAT_NAME + , NETDATA_CHART_PRIO_SYSTEM_IP + , update_every + , RRDSET_TYPE_AREA + ); + + rd_in = rrddim_add(st_system_ip, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + rd_out = rrddim_add(st_system_ip, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_system_ip); + + rrddim_set_by_pointer(st_system_ip, rd_in, ipext_InOctets); + rrddim_set_by_pointer(st_system_ip, rd_out, ipext_OutOctets); + + rrdset_done(st_system_ip); + } + + // -------------------------------------------------------------------- + + if(do_inerrors == CONFIG_BOOLEAN_YES || (do_inerrors == CONFIG_BOOLEAN_AUTO && (ipext_InNoRoutes || ipext_InTruncatedPkts))) { + do_inerrors = CONFIG_BOOLEAN_YES; + static RRDSET *st_ip_inerrors = NULL; + static RRDDIM *rd_noroutes = NULL, *rd_truncated = NULL, *rd_checksum = NULL; + + if(unlikely(!st_ip_inerrors)) { + st_ip_inerrors = rrdset_create_localhost( + RRD_TYPE_NET_NETSTAT + , "inerrors" + , NULL + , "errors" + , NULL + , "IP Input Errors" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETSTAT_NAME + , NETDATA_CHART_PRIO_IP_ERRORS + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(st_ip_inerrors, RRDSET_FLAG_DETAIL); + + rd_noroutes = rrddim_add(st_ip_inerrors, "InNoRoutes", "noroutes", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_truncated = rrddim_add(st_ip_inerrors, "InTruncatedPkts", "truncated", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_checksum = rrddim_add(st_ip_inerrors, "InCsumErrors", "checksum", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_ip_inerrors); + + rrddim_set_by_pointer(st_ip_inerrors, rd_noroutes, ipext_InNoRoutes); + rrddim_set_by_pointer(st_ip_inerrors, rd_truncated, ipext_InTruncatedPkts); + rrddim_set_by_pointer(st_ip_inerrors, rd_checksum, ipext_InCsumErrors); + + rrdset_done(st_ip_inerrors); + } + + // -------------------------------------------------------------------- + + if(do_mcast == CONFIG_BOOLEAN_YES || (do_mcast == CONFIG_BOOLEAN_AUTO && (ipext_InMcastOctets || ipext_OutMcastOctets))) { + do_mcast = CONFIG_BOOLEAN_YES; + static RRDSET *st_ip_mcast = NULL; + static RRDDIM *rd_in = NULL, *rd_out = NULL; + + if(unlikely(!st_ip_mcast)) { + st_ip_mcast = rrdset_create_localhost( + RRD_TYPE_NET_NETSTAT + , "mcast" + , NULL + , "multicast" + , NULL + , "IP Multicast Bandwidth" + , "kilobits/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETSTAT_NAME + , NETDATA_CHART_PRIO_IP_MCAST + , update_every + , RRDSET_TYPE_AREA + ); + + rrdset_flag_set(st_ip_mcast, RRDSET_FLAG_DETAIL); + + rd_in = rrddim_add(st_ip_mcast, "InMcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + rd_out = rrddim_add(st_ip_mcast, "OutMcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_ip_mcast); + + rrddim_set_by_pointer(st_ip_mcast, rd_in, ipext_InMcastOctets); + rrddim_set_by_pointer(st_ip_mcast, rd_out, ipext_OutMcastOctets); + + rrdset_done(st_ip_mcast); + } + + // -------------------------------------------------------------------- + + if(do_bcast == CONFIG_BOOLEAN_YES || (do_bcast == CONFIG_BOOLEAN_AUTO && (ipext_InBcastOctets || ipext_OutBcastOctets))) { + do_bcast = CONFIG_BOOLEAN_YES; + + static RRDSET *st_ip_bcast = NULL; + static RRDDIM *rd_in = NULL, *rd_out = NULL; + + if(unlikely(!st_ip_bcast)) { + st_ip_bcast = rrdset_create_localhost( + RRD_TYPE_NET_NETSTAT + , "bcast" + , NULL + , "broadcast" + , NULL + , "IP Broadcast Bandwidth" + , "kilobits/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETSTAT_NAME + , NETDATA_CHART_PRIO_IP_BCAST + , update_every + , RRDSET_TYPE_AREA + ); + + rrdset_flag_set(st_ip_bcast, RRDSET_FLAG_DETAIL); + + rd_in = rrddim_add(st_ip_bcast, "InBcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + rd_out = rrddim_add(st_ip_bcast, "OutBcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_ip_bcast); + + rrddim_set_by_pointer(st_ip_bcast, rd_in, ipext_InBcastOctets); + rrddim_set_by_pointer(st_ip_bcast, rd_out, ipext_OutBcastOctets); + + rrdset_done(st_ip_bcast); + } + + // -------------------------------------------------------------------- + + if(do_mcast_p == CONFIG_BOOLEAN_YES || (do_mcast_p == CONFIG_BOOLEAN_AUTO && (ipext_InMcastPkts || ipext_OutMcastPkts))) { + do_mcast_p = CONFIG_BOOLEAN_YES; + + static RRDSET *st_ip_mcastpkts = NULL; + static RRDDIM *rd_in = NULL, *rd_out = NULL; + + if(unlikely(!st_ip_mcastpkts)) { + st_ip_mcastpkts = rrdset_create_localhost( + RRD_TYPE_NET_NETSTAT + , "mcastpkts" + , NULL + , "multicast" + , NULL + , "IP Multicast Packets" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETSTAT_NAME + , NETDATA_CHART_PRIO_IP_MCAST_PACKETS + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(st_ip_mcastpkts, RRDSET_FLAG_DETAIL); + + rd_in = rrddim_add(st_ip_mcastpkts, "InMcastPkts", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out = rrddim_add(st_ip_mcastpkts, "OutMcastPkts", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st_ip_mcastpkts); + + rrddim_set_by_pointer(st_ip_mcastpkts, rd_in, ipext_InMcastPkts); + rrddim_set_by_pointer(st_ip_mcastpkts, rd_out, ipext_OutMcastPkts); + + rrdset_done(st_ip_mcastpkts); + } + + // -------------------------------------------------------------------- + + if(do_bcast_p == CONFIG_BOOLEAN_YES || (do_bcast_p == CONFIG_BOOLEAN_AUTO && (ipext_InBcastPkts || ipext_OutBcastPkts))) { + do_bcast_p = CONFIG_BOOLEAN_YES; + + static RRDSET *st_ip_bcastpkts = NULL; + static RRDDIM *rd_in = NULL, *rd_out = NULL; + + if(unlikely(!st_ip_bcastpkts)) { + st_ip_bcastpkts = rrdset_create_localhost( + RRD_TYPE_NET_NETSTAT + , "bcastpkts" + , NULL + , "broadcast" + , NULL + , "IP Broadcast Packets" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETSTAT_NAME + , NETDATA_CHART_PRIO_IP_BCAST_PACKETS + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(st_ip_bcastpkts, RRDSET_FLAG_DETAIL); + + rd_in = rrddim_add(st_ip_bcastpkts, "InBcastPkts", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out = rrddim_add(st_ip_bcastpkts, "OutBcastPkts", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_ip_bcastpkts); + + rrddim_set_by_pointer(st_ip_bcastpkts, rd_in, ipext_InBcastPkts); + rrddim_set_by_pointer(st_ip_bcastpkts, rd_out, ipext_OutBcastPkts); + + rrdset_done(st_ip_bcastpkts); + } + + // -------------------------------------------------------------------- + + if(do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && (ipext_InCEPkts || ipext_InECT0Pkts || ipext_InECT1Pkts || ipext_InNoECTPkts))) { + do_ecn = CONFIG_BOOLEAN_YES; + + static RRDSET *st_ecnpkts = NULL; + static RRDDIM *rd_cep = NULL, *rd_noectp = NULL, *rd_ectp0 = NULL, *rd_ectp1 = NULL; + + if(unlikely(!st_ecnpkts)) { + st_ecnpkts = rrdset_create_localhost( + RRD_TYPE_NET_NETSTAT + , "ecnpkts" + , NULL + , "ecn" + , NULL + , "IP ECN Statistics" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETSTAT_NAME + , NETDATA_CHART_PRIO_IP_ECN + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(st_ecnpkts, RRDSET_FLAG_DETAIL); + + rd_cep = rrddim_add(st_ecnpkts, "InCEPkts", "CEP", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_noectp = rrddim_add(st_ecnpkts, "InNoECTPkts", "NoECTP", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_ectp0 = rrddim_add(st_ecnpkts, "InECT0Pkts", "ECTP0", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_ectp1 = rrddim_add(st_ecnpkts, "InECT1Pkts", "ECTP1", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st_ecnpkts); + + rrddim_set_by_pointer(st_ecnpkts, rd_cep, ipext_InCEPkts); + rrddim_set_by_pointer(st_ecnpkts, rd_noectp, ipext_InNoECTPkts); + rrddim_set_by_pointer(st_ecnpkts, rd_ectp0, ipext_InECT0Pkts); + rrddim_set_by_pointer(st_ecnpkts, rd_ectp1, ipext_InECT1Pkts); + + rrdset_done(st_ecnpkts); + } + } + else if(unlikely(hash == hash_tcpext && strcmp(key, "TcpExt") == 0)) { + size_t h = l++; + + words = procfile_linewords(ff, l); + if(unlikely(words < 2)) { + error("Cannot read /proc/net/netstat TcpExt line. Expected 2+ params, read %zu.", words); + continue; + } + + parse_line_pair(ff, arl_tcpext, h, l); + + // -------------------------------------------------------------------- + + if(do_tcpext_memory == CONFIG_BOOLEAN_YES || (do_tcpext_memory == CONFIG_BOOLEAN_AUTO && (tcpext_TCPMemoryPressures))) { + do_tcpext_memory = CONFIG_BOOLEAN_YES; + + static RRDSET *st_tcpmemorypressures = NULL; + static RRDDIM *rd_pressures = NULL; + + if(unlikely(!st_tcpmemorypressures)) { + st_tcpmemorypressures = rrdset_create_localhost( + RRD_TYPE_NET_NETSTAT + , "tcpmemorypressures" + , NULL + , "tcp" + , NULL + , "TCP Memory Pressures" + , "events/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETSTAT_NAME + , NETDATA_CHART_PRIO_IP_TCP_MEM + , update_every + , RRDSET_TYPE_LINE + ); + + rd_pressures = rrddim_add(st_tcpmemorypressures, "TCPMemoryPressures", "pressures", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_tcpmemorypressures); + + rrddim_set_by_pointer(st_tcpmemorypressures, rd_pressures, tcpext_TCPMemoryPressures); + + rrdset_done(st_tcpmemorypressures); + } + + // -------------------------------------------------------------------- + + if(do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && (tcpext_TCPAbortOnData || tcpext_TCPAbortOnClose || tcpext_TCPAbortOnMemory || tcpext_TCPAbortOnTimeout || tcpext_TCPAbortOnLinger || tcpext_TCPAbortFailed))) { + do_tcpext_connaborts = CONFIG_BOOLEAN_YES; + + static RRDSET *st_tcpconnaborts = NULL; + static RRDDIM *rd_baddata = NULL, *rd_userclosed = NULL, *rd_nomemory = NULL, *rd_timeout = NULL, *rd_linger = NULL, *rd_failed = NULL; + + if(unlikely(!st_tcpconnaborts)) { + st_tcpconnaborts = rrdset_create_localhost( + RRD_TYPE_NET_NETSTAT + , "tcpconnaborts" + , NULL + , "tcp" + , NULL + , "TCP Connection Aborts" + , "connections/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETSTAT_NAME + , NETDATA_CHART_PRIO_IP_TCP_CONNABORTS + , update_every + , RRDSET_TYPE_LINE + ); + + rd_baddata = rrddim_add(st_tcpconnaborts, "TCPAbortOnData", "baddata", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_userclosed = rrddim_add(st_tcpconnaborts, "TCPAbortOnClose", "userclosed", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_nomemory = rrddim_add(st_tcpconnaborts, "TCPAbortOnMemory", "nomemory", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_timeout = rrddim_add(st_tcpconnaborts, "TCPAbortOnTimeout", "timeout", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_linger = rrddim_add(st_tcpconnaborts, "TCPAbortOnLinger", "linger", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_failed = rrddim_add(st_tcpconnaborts, "TCPAbortFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_tcpconnaborts); + + rrddim_set_by_pointer(st_tcpconnaborts, rd_baddata, tcpext_TCPAbortOnData); + rrddim_set_by_pointer(st_tcpconnaborts, rd_userclosed, tcpext_TCPAbortOnClose); + rrddim_set_by_pointer(st_tcpconnaborts, rd_nomemory, tcpext_TCPAbortOnMemory); + rrddim_set_by_pointer(st_tcpconnaborts, rd_timeout, tcpext_TCPAbortOnTimeout); + rrddim_set_by_pointer(st_tcpconnaborts, rd_linger, tcpext_TCPAbortOnLinger); + rrddim_set_by_pointer(st_tcpconnaborts, rd_failed, tcpext_TCPAbortFailed); + + rrdset_done(st_tcpconnaborts); + } + + // -------------------------------------------------------------------- + + if(do_tcpext_reorder == CONFIG_BOOLEAN_YES || (do_tcpext_reorder == CONFIG_BOOLEAN_AUTO && (tcpext_TCPRenoReorder || tcpext_TCPFACKReorder || tcpext_TCPSACKReorder || tcpext_TCPTSReorder))) { + do_tcpext_reorder = CONFIG_BOOLEAN_YES; + + static RRDSET *st_tcpreorders = NULL; + static RRDDIM *rd_timestamp = NULL, *rd_sack = NULL, *rd_fack = NULL, *rd_reno = NULL; + + if(unlikely(!st_tcpreorders)) { + st_tcpreorders = rrdset_create_localhost( + RRD_TYPE_NET_NETSTAT + , "tcpreorders" + , NULL + , "tcp" + , NULL + , "TCP Reordered Packets by Detection Method" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETSTAT_NAME + , NETDATA_CHART_PRIO_IP_TCP_REORDERS + , update_every + , RRDSET_TYPE_LINE + ); + + rd_timestamp = rrddim_add(st_tcpreorders, "TCPTSReorder", "timestamp", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_sack = rrddim_add(st_tcpreorders, "TCPSACKReorder", "sack", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_fack = rrddim_add(st_tcpreorders, "TCPFACKReorder", "fack", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_reno = rrddim_add(st_tcpreorders, "TCPRenoReorder", "reno", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_tcpreorders); + + rrddim_set_by_pointer(st_tcpreorders, rd_timestamp, tcpext_TCPTSReorder); + rrddim_set_by_pointer(st_tcpreorders, rd_sack, tcpext_TCPSACKReorder); + rrddim_set_by_pointer(st_tcpreorders, rd_fack, tcpext_TCPFACKReorder); + rrddim_set_by_pointer(st_tcpreorders, rd_reno, tcpext_TCPRenoReorder); + + rrdset_done(st_tcpreorders); + } + + // -------------------------------------------------------------------- + + if(do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && (tcpext_TCPOFOQueue || tcpext_TCPOFODrop || tcpext_TCPOFOMerge))) { + do_tcpext_ofo = CONFIG_BOOLEAN_YES; + + static RRDSET *st_ip_tcpofo = NULL; + static RRDDIM *rd_inqueue = NULL, *rd_dropped = NULL, *rd_merged = NULL, *rd_pruned = NULL; + + if(unlikely(!st_ip_tcpofo)) { + + st_ip_tcpofo = rrdset_create_localhost( + RRD_TYPE_NET_NETSTAT + , "tcpofo" + , NULL + , "tcp" + , NULL + , "TCP Out-Of-Order Queue" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETSTAT_NAME + , NETDATA_CHART_PRIO_IP_TCP_OFO + , update_every + , RRDSET_TYPE_LINE + ); + + rd_inqueue = rrddim_add(st_ip_tcpofo, "TCPOFOQueue", "inqueue", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_dropped = rrddim_add(st_ip_tcpofo, "TCPOFODrop", "dropped", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_merged = rrddim_add(st_ip_tcpofo, "TCPOFOMerge", "merged", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_pruned = rrddim_add(st_ip_tcpofo, "OfoPruned", "pruned", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_ip_tcpofo); + + rrddim_set_by_pointer(st_ip_tcpofo, rd_inqueue, tcpext_TCPOFOQueue); + rrddim_set_by_pointer(st_ip_tcpofo, rd_dropped, tcpext_TCPOFODrop); + rrddim_set_by_pointer(st_ip_tcpofo, rd_merged, tcpext_TCPOFOMerge); + rrddim_set_by_pointer(st_ip_tcpofo, rd_pruned, tcpext_OfoPruned); + + rrdset_done(st_ip_tcpofo); + } + + // -------------------------------------------------------------------- + + if(do_tcpext_syscookies == CONFIG_BOOLEAN_YES || (do_tcpext_syscookies == CONFIG_BOOLEAN_AUTO && (tcpext_SyncookiesSent || tcpext_SyncookiesRecv || tcpext_SyncookiesFailed))) { + do_tcpext_syscookies = CONFIG_BOOLEAN_YES; + + static RRDSET *st_syncookies = NULL; + static RRDDIM *rd_received = NULL, *rd_sent = NULL, *rd_failed = NULL; + + if(unlikely(!st_syncookies)) { + + st_syncookies = rrdset_create_localhost( + RRD_TYPE_NET_NETSTAT + , "tcpsyncookies" + , NULL + , "tcp" + , NULL + , "TCP SYN Cookies" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETSTAT_NAME + , NETDATA_CHART_PRIO_IP_TCP_SYNCOOKIES + , update_every + , RRDSET_TYPE_LINE + ); + + rd_received = rrddim_add(st_syncookies, "SyncookiesRecv", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_sent = rrddim_add(st_syncookies, "SyncookiesSent", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_failed = rrddim_add(st_syncookies, "SyncookiesFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_syncookies); + + rrddim_set_by_pointer(st_syncookies, rd_received, tcpext_SyncookiesRecv); + rrddim_set_by_pointer(st_syncookies, rd_sent, tcpext_SyncookiesSent); + rrddim_set_by_pointer(st_syncookies, rd_failed, tcpext_SyncookiesFailed); + + rrdset_done(st_syncookies); + } + + // -------------------------------------------------------------------- + + if(do_tcpext_syn_queue == CONFIG_BOOLEAN_YES || (do_tcpext_syn_queue == CONFIG_BOOLEAN_AUTO && (tcpext_TCPReqQFullDrop || tcpext_TCPReqQFullDoCookies))) { + do_tcpext_syn_queue = CONFIG_BOOLEAN_YES; + + static RRDSET *st_syn_queue = NULL; + static RRDDIM + *rd_TCPReqQFullDrop = NULL, + *rd_TCPReqQFullDoCookies = NULL; + + if(unlikely(!st_syn_queue)) { + + st_syn_queue = rrdset_create_localhost( + RRD_TYPE_NET_NETSTAT + , "tcp_syn_queue" + , NULL + , "tcp" + , NULL + , "TCP SYN Queue Issues" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETSTAT_NAME + , NETDATA_CHART_PRIO_IP_TCP_SYN_QUEUE + , update_every + , RRDSET_TYPE_LINE + ); + + rd_TCPReqQFullDrop = rrddim_add(st_syn_queue, "TCPReqQFullDrop", "drops", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_TCPReqQFullDoCookies = rrddim_add(st_syn_queue, "TCPReqQFullDoCookies", "cookies", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_syn_queue); + + rrddim_set_by_pointer(st_syn_queue, rd_TCPReqQFullDrop, tcpext_TCPReqQFullDrop); + rrddim_set_by_pointer(st_syn_queue, rd_TCPReqQFullDoCookies, tcpext_TCPReqQFullDoCookies); + + rrdset_done(st_syn_queue); + } + + // -------------------------------------------------------------------- + + if(do_tcpext_accept_queue == CONFIG_BOOLEAN_YES || (do_tcpext_accept_queue == CONFIG_BOOLEAN_AUTO && (tcpext_ListenOverflows || tcpext_ListenDrops))) { + do_tcpext_accept_queue = CONFIG_BOOLEAN_YES; + + static RRDSET *st_accept_queue = NULL; + static RRDDIM *rd_overflows = NULL, + *rd_drops = NULL; + + if(unlikely(!st_accept_queue)) { + + st_accept_queue = rrdset_create_localhost( + RRD_TYPE_NET_NETSTAT + , "tcp_accept_queue" + , NULL + , "tcp" + , NULL + , "TCP Accept Queue Issues" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NETSTAT_NAME + , NETDATA_CHART_PRIO_IP_TCP_ACCEPT_QUEUE + , update_every + , RRDSET_TYPE_LINE + ); + + rd_overflows = rrddim_add(st_accept_queue, "ListenOverflows", "overflows", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_drops = rrddim_add(st_accept_queue, "ListenDrops", "drops", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_accept_queue); + + rrddim_set_by_pointer(st_accept_queue, rd_overflows, tcpext_ListenOverflows); + rrddim_set_by_pointer(st_accept_queue, rd_drops, tcpext_ListenDrops); + + rrdset_done(st_accept_queue); + } + + } + } + + return 0; +} diff --git a/collectors/proc.plugin/proc_net_rpc_nfs.c b/collectors/proc.plugin/proc_net_rpc_nfs.c new file mode 100644 index 000000000..f5702859c --- /dev/null +++ b/collectors/proc.plugin/proc_net_rpc_nfs.c @@ -0,0 +1,454 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define PLUGIN_PROC_MODULE_NFS_NAME "/proc/net/rpc/nfs" +#define CONFIG_SECTION_PLUGIN_PROC_NFS "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NFS_NAME + +struct nfs_procs { + char name[30]; + unsigned long long value; + int present; + RRDDIM *rd; +}; + +struct nfs_procs nfs_proc2_values[] = { + { "null" , 0ULL, 0, NULL} + , {"getattr" , 0ULL, 0, NULL} + , {"setattr" , 0ULL, 0, NULL} + , {"root" , 0ULL, 0, NULL} + , {"lookup" , 0ULL, 0, NULL} + , {"readlink", 0ULL, 0, NULL} + , {"read" , 0ULL, 0, NULL} + , {"wrcache" , 0ULL, 0, NULL} + , {"write" , 0ULL, 0, NULL} + , {"create" , 0ULL, 0, NULL} + , {"remove" , 0ULL, 0, NULL} + , {"rename" , 0ULL, 0, NULL} + , {"link" , 0ULL, 0, NULL} + , {"symlink" , 0ULL, 0, NULL} + , {"mkdir" , 0ULL, 0, NULL} + , {"rmdir" , 0ULL, 0, NULL} + , {"readdir" , 0ULL, 0, NULL} + , {"fsstat" , 0ULL, 0, NULL} + , + + /* termination */ + { "" , 0ULL, 0, NULL} +}; + +struct nfs_procs nfs_proc3_values[] = { + { "null" , 0ULL, 0, NULL} + , {"getattr" , 0ULL, 0, NULL} + , {"setattr" , 0ULL, 0, NULL} + , {"lookup" , 0ULL, 0, NULL} + , {"access" , 0ULL, 0, NULL} + , {"readlink" , 0ULL, 0, NULL} + , {"read" , 0ULL, 0, NULL} + , {"write" , 0ULL, 0, NULL} + , {"create" , 0ULL, 0, NULL} + , {"mkdir" , 0ULL, 0, NULL} + , {"symlink" , 0ULL, 0, NULL} + , {"mknod" , 0ULL, 0, NULL} + , {"remove" , 0ULL, 0, NULL} + , {"rmdir" , 0ULL, 0, NULL} + , {"rename" , 0ULL, 0, NULL} + , {"link" , 0ULL, 0, NULL} + , {"readdir" , 0ULL, 0, NULL} + , {"readdirplus", 0ULL, 0, NULL} + , {"fsstat" , 0ULL, 0, NULL} + , {"fsinfo" , 0ULL, 0, NULL} + , {"pathconf" , 0ULL, 0, NULL} + , {"commit" , 0ULL, 0, NULL} + , + + /* termination */ + { "" , 0ULL, 0, NULL} +}; + +struct nfs_procs nfs_proc4_values[] = { + { "null" , 0ULL, 0, NULL} + , {"read" , 0ULL, 0, NULL} + , {"write" , 0ULL, 0, NULL} + , {"commit" , 0ULL, 0, NULL} + , {"open" , 0ULL, 0, NULL} + , {"open_conf" , 0ULL, 0, NULL} + , {"open_noat" , 0ULL, 0, NULL} + , {"open_dgrd" , 0ULL, 0, NULL} + , {"close" , 0ULL, 0, NULL} + , {"setattr" , 0ULL, 0, NULL} + , {"fsinfo" , 0ULL, 0, NULL} + , {"renew" , 0ULL, 0, NULL} + , {"setclntid" , 0ULL, 0, NULL} + , {"confirm" , 0ULL, 0, NULL} + , {"lock" , 0ULL, 0, NULL} + , {"lockt" , 0ULL, 0, NULL} + , {"locku" , 0ULL, 0, NULL} + , {"access" , 0ULL, 0, NULL} + , {"getattr" , 0ULL, 0, NULL} + , {"lookup" , 0ULL, 0, NULL} + , {"lookup_root" , 0ULL, 0, NULL} + , {"remove" , 0ULL, 0, NULL} + , {"rename" , 0ULL, 0, NULL} + , {"link" , 0ULL, 0, NULL} + , {"symlink" , 0ULL, 0, NULL} + , {"create" , 0ULL, 0, NULL} + , {"pathconf" , 0ULL, 0, NULL} + , {"statfs" , 0ULL, 0, NULL} + , {"readlink" , 0ULL, 0, NULL} + , {"readdir" , 0ULL, 0, NULL} + , {"server_caps" , 0ULL, 0, NULL} + , {"delegreturn" , 0ULL, 0, NULL} + , {"getacl" , 0ULL, 0, NULL} + , {"setacl" , 0ULL, 0, NULL} + , {"fs_locations" , 0ULL, 0, NULL} + , {"rel_lkowner" , 0ULL, 0, NULL} + , {"secinfo" , 0ULL, 0, NULL} + , {"fsid_present" , 0ULL, 0, NULL} + , + + /* nfsv4.1 client ops */ + { "exchange_id" , 0ULL, 0, NULL} + , {"create_session" , 0ULL, 0, NULL} + , {"destroy_session" , 0ULL, 0, NULL} + , {"sequence" , 0ULL, 0, NULL} + , {"get_lease_time" , 0ULL, 0, NULL} + , {"reclaim_comp" , 0ULL, 0, NULL} + , {"layoutget" , 0ULL, 0, NULL} + , {"getdevinfo" , 0ULL, 0, NULL} + , {"layoutcommit" , 0ULL, 0, NULL} + , {"layoutreturn" , 0ULL, 0, NULL} + , {"secinfo_no" , 0ULL, 0, NULL} + , {"test_stateid" , 0ULL, 0, NULL} + , {"free_stateid" , 0ULL, 0, NULL} + , {"getdevicelist" , 0ULL, 0, NULL} + , {"bind_conn_to_ses", 0ULL, 0, NULL} + , {"destroy_clientid", 0ULL, 0, NULL} + , + + /* nfsv4.2 client ops */ + { "seek" , 0ULL, 0, NULL} + , {"allocate" , 0ULL, 0, NULL} + , {"deallocate" , 0ULL, 0, NULL} + , {"layoutstats" , 0ULL, 0, NULL} + , {"clone" , 0ULL, 0, NULL} + , + + /* termination */ + { "" , 0ULL, 0, NULL} +}; + +int do_proc_net_rpc_nfs(int update_every, usec_t dt) { + (void)dt; + + static procfile *ff = NULL; + static int do_net = -1, do_rpc = -1, do_proc2 = -1, do_proc3 = -1, do_proc4 = -1; + static int proc2_warning = 0, proc3_warning = 0, proc4_warning = 0; + + if(!ff) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/rpc/nfs"); + ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_NFS, "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT); + } + if(!ff) return 1; + + ff = procfile_readall(ff); + if(!ff) return 0; // we return 0, so that we will retry to open it next time + + if(do_net == -1) do_net = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NFS, "network", 1); + if(do_rpc == -1) do_rpc = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NFS, "rpc", 1); + if(do_proc2 == -1) do_proc2 = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NFS, "NFS v2 procedures", 1); + if(do_proc3 == -1) do_proc3 = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NFS, "NFS v3 procedures", 1); + if(do_proc4 == -1) do_proc4 = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NFS, "NFS v4 procedures", 1); + + // if they are enabled, reset them to 1 + // later we do them =2 to avoid doing strcmp() for all lines + if(do_net) do_net = 1; + if(do_rpc) do_rpc = 1; + if(do_proc2) do_proc2 = 1; + if(do_proc3) do_proc3 = 1; + if(do_proc4) do_proc4 = 1; + + size_t lines = procfile_lines(ff), l; + + char *type; + unsigned long long net_count = 0, net_udp_count = 0, net_tcp_count = 0, net_tcp_connections = 0; + unsigned long long rpc_calls = 0, rpc_retransmits = 0, rpc_auth_refresh = 0; + + for(l = 0; l < lines ;l++) { + size_t words = procfile_linewords(ff, l); + if(!words) continue; + + type = procfile_lineword(ff, l, 0); + + if(do_net == 1 && strcmp(type, "net") == 0) { + if(words < 5) { + error("%s line of /proc/net/rpc/nfs has %zu words, expected %d", type, words, 5); + continue; + } + + net_count = str2ull(procfile_lineword(ff, l, 1)); + net_udp_count = str2ull(procfile_lineword(ff, l, 2)); + net_tcp_count = str2ull(procfile_lineword(ff, l, 3)); + net_tcp_connections = str2ull(procfile_lineword(ff, l, 4)); + + unsigned long long sum = net_count + net_udp_count + net_tcp_count + net_tcp_connections; + if(sum == 0ULL) do_net = -1; + else do_net = 2; + } + else if(do_rpc == 1 && strcmp(type, "rpc") == 0) { + if(words < 4) { + error("%s line of /proc/net/rpc/nfs has %zu words, expected %d", type, words, 6); + continue; + } + + rpc_calls = str2ull(procfile_lineword(ff, l, 1)); + rpc_retransmits = str2ull(procfile_lineword(ff, l, 2)); + rpc_auth_refresh = str2ull(procfile_lineword(ff, l, 3)); + + unsigned long long sum = rpc_calls + rpc_retransmits + rpc_auth_refresh; + if(sum == 0ULL) do_rpc = -1; + else do_rpc = 2; + } + else if(do_proc2 == 1 && strcmp(type, "proc2") == 0) { + // the first number is the count of numbers present + // so we start for word 2 + + unsigned long long sum = 0; + unsigned int i, j; + for(i = 0, j = 2; j < words && nfs_proc2_values[i].name[0] ; i++, j++) { + nfs_proc2_values[i].value = str2ull(procfile_lineword(ff, l, j)); + nfs_proc2_values[i].present = 1; + sum += nfs_proc2_values[i].value; + } + + if(sum == 0ULL) { + if(!proc2_warning) { + error("Disabling /proc/net/rpc/nfs v2 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it."); + proc2_warning = 1; + } + do_proc2 = 0; + } + else do_proc2 = 2; + } + else if(do_proc3 == 1 && strcmp(type, "proc3") == 0) { + // the first number is the count of numbers present + // so we start for word 2 + + unsigned long long sum = 0; + unsigned int i, j; + for(i = 0, j = 2; j < words && nfs_proc3_values[i].name[0] ; i++, j++) { + nfs_proc3_values[i].value = str2ull(procfile_lineword(ff, l, j)); + nfs_proc3_values[i].present = 1; + sum += nfs_proc3_values[i].value; + } + + if(sum == 0ULL) { + if(!proc3_warning) { + info("Disabling /proc/net/rpc/nfs v3 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it."); + proc3_warning = 1; + } + do_proc3 = 0; + } + else do_proc3 = 2; + } + else if(do_proc4 == 1 && strcmp(type, "proc4") == 0) { + // the first number is the count of numbers present + // so we start for word 2 + + unsigned long long sum = 0; + unsigned int i, j; + for(i = 0, j = 2; j < words && nfs_proc4_values[i].name[0] ; i++, j++) { + nfs_proc4_values[i].value = str2ull(procfile_lineword(ff, l, j)); + nfs_proc4_values[i].present = 1; + sum += nfs_proc4_values[i].value; + } + + if(sum == 0ULL) { + if(!proc4_warning) { + info("Disabling /proc/net/rpc/nfs v4 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it."); + proc4_warning = 1; + } + do_proc4 = 0; + } + else do_proc4 = 2; + } + } + + // -------------------------------------------------------------------- + + if(do_net == 2) { + static RRDSET *st = NULL; + static RRDDIM *rd_udp = NULL, + *rd_tcp = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "nfs" + , "net" + , NULL + , "network" + , NULL + , "NFS Client Network" + , "operations/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NFS_NAME + , NETDATA_CHART_PRIO_NFS_NET + , update_every + , RRDSET_TYPE_STACKED + ); + + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_udp = rrddim_add(st, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_tcp = rrddim_add(st, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + // ignore net_count, net_tcp_connections + (void)net_count; + (void)net_tcp_connections; + + rrddim_set_by_pointer(st, rd_udp, net_udp_count); + rrddim_set_by_pointer(st, rd_tcp, net_tcp_count); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_rpc == 2) { + static RRDSET *st = NULL; + static RRDDIM *rd_calls = NULL, + *rd_retransmits = NULL, + *rd_auth_refresh = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "nfs" + , "rpc" + , NULL + , "rpc" + , NULL + , "NFS Client Remote Procedure Calls Statistics" + , "calls/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NFS_NAME + , NETDATA_CHART_PRIO_NFS_RPC + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_calls = rrddim_add(st, "calls", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_retransmits = rrddim_add(st, "retransmits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_auth_refresh = rrddim_add(st, "auth_refresh", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_calls, rpc_calls); + rrddim_set_by_pointer(st, rd_retransmits, rpc_retransmits); + rrddim_set_by_pointer(st, rd_auth_refresh, rpc_auth_refresh); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_proc2 == 2) { + static RRDSET *st = NULL; + if(unlikely(!st)) { + st = rrdset_create_localhost( + "nfs" + , "proc2" + , NULL + , "nfsv2rpc" + , NULL + , "NFS v2 Client Remote Procedure Calls" + , "calls/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NFS_NAME + , NETDATA_CHART_PRIO_NFS_PROC2 + , update_every + , RRDSET_TYPE_STACKED + ); + } + else rrdset_next(st); + + size_t i; + for(i = 0; nfs_proc2_values[i].present ; i++) { + if(unlikely(!nfs_proc2_values[i].rd)) + nfs_proc2_values[i].rd = rrddim_add(st, nfs_proc2_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st, nfs_proc2_values[i].rd, nfs_proc2_values[i].value); + } + + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_proc3 == 2) { + static RRDSET *st = NULL; + if(unlikely(!st)) { + st = rrdset_create_localhost( + "nfs" + , "proc3" + , NULL + , "nfsv3rpc" + , NULL + , "NFS v3 Client Remote Procedure Calls" + , "calls/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NFS_NAME + , NETDATA_CHART_PRIO_NFS_PROC3 + , update_every + , RRDSET_TYPE_STACKED + ); + } + else rrdset_next(st); + + size_t i; + for(i = 0; nfs_proc3_values[i].present ; i++) { + if(unlikely(!nfs_proc3_values[i].rd)) + nfs_proc3_values[i].rd = rrddim_add(st, nfs_proc3_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st, nfs_proc3_values[i].rd, nfs_proc3_values[i].value); + } + + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_proc4 == 2) { + static RRDSET *st = NULL; + if(unlikely(!st)) { + st = rrdset_create_localhost( + "nfs" + , "proc4" + , NULL + , "nfsv4rpc" + , NULL + , "NFS v4 Client Remote Procedure Calls" + , "calls/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NFS_NAME + , NETDATA_CHART_PRIO_NFS_PROC4 + , update_every + , RRDSET_TYPE_STACKED + ); + } + else rrdset_next(st); + + size_t i; + for(i = 0; nfs_proc4_values[i].present ; i++) { + if(unlikely(!nfs_proc4_values[i].rd)) + nfs_proc4_values[i].rd = rrddim_add(st, nfs_proc4_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st, nfs_proc4_values[i].rd, nfs_proc4_values[i].value); + } + + rrdset_done(st); + } + + return 0; +} diff --git a/collectors/proc.plugin/proc_net_rpc_nfsd.c b/collectors/proc.plugin/proc_net_rpc_nfsd.c new file mode 100644 index 000000000..20b87e9dd --- /dev/null +++ b/collectors/proc.plugin/proc_net_rpc_nfsd.c @@ -0,0 +1,1006 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define PLUGIN_PROC_MODULE_NFSD_NAME "/proc/net/rpc/nfsd" + +struct nfsd_procs { + char name[30]; + unsigned long long value; + int present; + RRDDIM *rd; +}; + +struct nfsd_procs nfsd_proc2_values[] = { + { "null" , 0ULL, 0, NULL} + , {"getattr" , 0ULL, 0, NULL} + , {"setattr" , 0ULL, 0, NULL} + , {"root" , 0ULL, 0, NULL} + , {"lookup" , 0ULL, 0, NULL} + , {"readlink", 0ULL, 0, NULL} + , {"read" , 0ULL, 0, NULL} + , {"wrcache" , 0ULL, 0, NULL} + , {"write" , 0ULL, 0, NULL} + , {"create" , 0ULL, 0, NULL} + , {"remove" , 0ULL, 0, NULL} + , {"rename" , 0ULL, 0, NULL} + , {"link" , 0ULL, 0, NULL} + , {"symlink" , 0ULL, 0, NULL} + , {"mkdir" , 0ULL, 0, NULL} + , {"rmdir" , 0ULL, 0, NULL} + , {"readdir" , 0ULL, 0, NULL} + , {"fsstat" , 0ULL, 0, NULL} + , + + /* termination */ + { "" , 0ULL, 0, NULL} +}; + +struct nfsd_procs nfsd_proc3_values[] = { + { "null" , 0ULL, 0, NULL} + , {"getattr" , 0ULL, 0, NULL} + , {"setattr" , 0ULL, 0, NULL} + , {"lookup" , 0ULL, 0, NULL} + , {"access" , 0ULL, 0, NULL} + , {"readlink" , 0ULL, 0, NULL} + , {"read" , 0ULL, 0, NULL} + , {"write" , 0ULL, 0, NULL} + , {"create" , 0ULL, 0, NULL} + , {"mkdir" , 0ULL, 0, NULL} + , {"symlink" , 0ULL, 0, NULL} + , {"mknod" , 0ULL, 0, NULL} + , {"remove" , 0ULL, 0, NULL} + , {"rmdir" , 0ULL, 0, NULL} + , {"rename" , 0ULL, 0, NULL} + , {"link" , 0ULL, 0, NULL} + , {"readdir" , 0ULL, 0, NULL} + , {"readdirplus", 0ULL, 0, NULL} + , {"fsstat" , 0ULL, 0, NULL} + , {"fsinfo" , 0ULL, 0, NULL} + , {"pathconf" , 0ULL, 0, NULL} + , {"commit" , 0ULL, 0, NULL} + , + + /* termination */ + { "" , 0ULL, 0, NULL} +}; + +struct nfsd_procs nfsd_proc4_values[] = { + { "null" , 0ULL, 0, NULL} + , {"read" , 0ULL, 0, NULL} + , {"write" , 0ULL, 0, NULL} + , {"commit" , 0ULL, 0, NULL} + , {"open" , 0ULL, 0, NULL} + , {"open_conf" , 0ULL, 0, NULL} + , {"open_noat" , 0ULL, 0, NULL} + , {"open_dgrd" , 0ULL, 0, NULL} + , {"close" , 0ULL, 0, NULL} + , {"setattr" , 0ULL, 0, NULL} + , {"fsinfo" , 0ULL, 0, NULL} + , {"renew" , 0ULL, 0, NULL} + , {"setclntid" , 0ULL, 0, NULL} + , {"confirm" , 0ULL, 0, NULL} + , {"lock" , 0ULL, 0, NULL} + , {"lockt" , 0ULL, 0, NULL} + , {"locku" , 0ULL, 0, NULL} + , {"access" , 0ULL, 0, NULL} + , {"getattr" , 0ULL, 0, NULL} + , {"lookup" , 0ULL, 0, NULL} + , {"lookup_root" , 0ULL, 0, NULL} + , {"remove" , 0ULL, 0, NULL} + , {"rename" , 0ULL, 0, NULL} + , {"link" , 0ULL, 0, NULL} + , {"symlink" , 0ULL, 0, NULL} + , {"create" , 0ULL, 0, NULL} + , {"pathconf" , 0ULL, 0, NULL} + , {"statfs" , 0ULL, 0, NULL} + , {"readlink" , 0ULL, 0, NULL} + , {"readdir" , 0ULL, 0, NULL} + , {"server_caps" , 0ULL, 0, NULL} + , {"delegreturn" , 0ULL, 0, NULL} + , {"getacl" , 0ULL, 0, NULL} + , {"setacl" , 0ULL, 0, NULL} + , {"fs_locations" , 0ULL, 0, NULL} + , {"rel_lkowner" , 0ULL, 0, NULL} + , {"secinfo" , 0ULL, 0, NULL} + , {"fsid_present" , 0ULL, 0, NULL} + , + + /* nfsv4.1 client ops */ + { "exchange_id" , 0ULL, 0, NULL} + , {"create_session" , 0ULL, 0, NULL} + , {"destroy_session" , 0ULL, 0, NULL} + , {"sequence" , 0ULL, 0, NULL} + , {"get_lease_time" , 0ULL, 0, NULL} + , {"reclaim_comp" , 0ULL, 0, NULL} + , {"layoutget" , 0ULL, 0, NULL} + , {"getdevinfo" , 0ULL, 0, NULL} + , {"layoutcommit" , 0ULL, 0, NULL} + , {"layoutreturn" , 0ULL, 0, NULL} + , {"secinfo_no" , 0ULL, 0, NULL} + , {"test_stateid" , 0ULL, 0, NULL} + , {"free_stateid" , 0ULL, 0, NULL} + , {"getdevicelist" , 0ULL, 0, NULL} + , {"bind_conn_to_ses", 0ULL, 0, NULL} + , {"destroy_clientid", 0ULL, 0, NULL} + , + + /* nfsv4.2 client ops */ + { "seek" , 0ULL, 0, NULL} + , {"allocate" , 0ULL, 0, NULL} + , {"deallocate" , 0ULL, 0, NULL} + , {"layoutstats" , 0ULL, 0, NULL} + , {"clone" , 0ULL, 0, NULL} + , + + /* termination */ + { "" , 0ULL, 0, NULL} +}; + +struct nfsd_procs nfsd4_ops_values[] = { + { "unused_op0" , 0ULL, 0, NULL} + , {"unused_op1" , 0ULL, 0, NULL} + , {"future_op2" , 0ULL, 0, NULL} + , {"access" , 0ULL, 0, NULL} + , {"close" , 0ULL, 0, NULL} + , {"commit" , 0ULL, 0, NULL} + , {"create" , 0ULL, 0, NULL} + , {"delegpurge" , 0ULL, 0, NULL} + , {"delegreturn" , 0ULL, 0, NULL} + , {"getattr" , 0ULL, 0, NULL} + , {"getfh" , 0ULL, 0, NULL} + , {"link" , 0ULL, 0, NULL} + , {"lock" , 0ULL, 0, NULL} + , {"lockt" , 0ULL, 0, NULL} + , {"locku" , 0ULL, 0, NULL} + , {"lookup" , 0ULL, 0, NULL} + , {"lookup_root" , 0ULL, 0, NULL} + , {"nverify" , 0ULL, 0, NULL} + , {"open" , 0ULL, 0, NULL} + , {"openattr" , 0ULL, 0, NULL} + , {"open_confirm" , 0ULL, 0, NULL} + , {"open_downgrade" , 0ULL, 0, NULL} + , {"putfh" , 0ULL, 0, NULL} + , {"putpubfh" , 0ULL, 0, NULL} + , {"putrootfh" , 0ULL, 0, NULL} + , {"read" , 0ULL, 0, NULL} + , {"readdir" , 0ULL, 0, NULL} + , {"readlink" , 0ULL, 0, NULL} + , {"remove" , 0ULL, 0, NULL} + , {"rename" , 0ULL, 0, NULL} + , {"renew" , 0ULL, 0, NULL} + , {"restorefh" , 0ULL, 0, NULL} + , {"savefh" , 0ULL, 0, NULL} + , {"secinfo" , 0ULL, 0, NULL} + , {"setattr" , 0ULL, 0, NULL} + , {"setclientid" , 0ULL, 0, NULL} + , {"setclientid_confirm" , 0ULL, 0, NULL} + , {"verify" , 0ULL, 0, NULL} + , {"write" , 0ULL, 0, NULL} + , {"release_lockowner" , 0ULL, 0, NULL} + , + + /* nfs41 */ + { "backchannel_ctl" , 0ULL, 0, NULL} + , {"bind_conn_to_session", 0ULL, 0, NULL} + , {"exchange_id" , 0ULL, 0, NULL} + , {"create_session" , 0ULL, 0, NULL} + , {"destroy_session" , 0ULL, 0, NULL} + , {"free_stateid" , 0ULL, 0, NULL} + , {"get_dir_delegation" , 0ULL, 0, NULL} + , {"getdeviceinfo" , 0ULL, 0, NULL} + , {"getdevicelist" , 0ULL, 0, NULL} + , {"layoutcommit" , 0ULL, 0, NULL} + , {"layoutget" , 0ULL, 0, NULL} + , {"layoutreturn" , 0ULL, 0, NULL} + , {"secinfo_no_name" , 0ULL, 0, NULL} + , {"sequence" , 0ULL, 0, NULL} + , {"set_ssv" , 0ULL, 0, NULL} + , {"test_stateid" , 0ULL, 0, NULL} + , {"want_delegation" , 0ULL, 0, NULL} + , {"destroy_clientid" , 0ULL, 0, NULL} + , {"reclaim_complete" , 0ULL, 0, NULL} + , + + /* nfs42 */ + { "allocate" , 0ULL, 0, NULL} + , {"copy" , 0ULL, 0, NULL} + , {"copy_notify" , 0ULL, 0, NULL} + , {"deallocate" , 0ULL, 0, NULL} + , {"ioadvise" , 0ULL, 0, NULL} + , {"layouterror" , 0ULL, 0, NULL} + , {"layoutstats" , 0ULL, 0, NULL} + , {"offload_cancel" , 0ULL, 0, NULL} + , {"offload_status" , 0ULL, 0, NULL} + , {"read_plus" , 0ULL, 0, NULL} + , {"seek" , 0ULL, 0, NULL} + , {"write_same" , 0ULL, 0, NULL} + , + + /* termination */ + { "" , 0ULL, 0, NULL} +}; + + +int do_proc_net_rpc_nfsd(int update_every, usec_t dt) { + (void)dt; + static procfile *ff = NULL; + static int do_rc = -1, do_fh = -1, do_io = -1, do_th = -1, do_ra = -1, do_net = -1, do_rpc = -1, do_proc2 = -1, do_proc3 = -1, do_proc4 = -1, do_proc4ops = -1; + static int ra_warning = 0, th_warning = 0, proc2_warning = 0, proc3_warning = 0, proc4_warning = 0, proc4ops_warning = 0; + + if(unlikely(!ff)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/rpc/nfsd"); + ff = procfile_open(config_get("plugin:proc:/proc/net/rpc/nfsd", "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) return 1; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time + + if(unlikely(do_rc == -1)) { + do_rc = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "read cache", 1); + do_fh = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "file handles", 1); + do_io = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "I/O", 1); + do_th = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "threads", 1); + do_ra = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "read ahead", 1); + do_net = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "network", 1); + do_rpc = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "rpc", 1); + do_proc2 = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "NFS v2 procedures", 1); + do_proc3 = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "NFS v3 procedures", 1); + do_proc4 = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "NFS v4 procedures", 1); + do_proc4ops = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "NFS v4 operations", 1); + } + + // if they are enabled, reset them to 1 + // later we do them = 2 to avoid doing strcmp() for all lines + if(do_rc) do_rc = 1; + if(do_fh) do_fh = 1; + if(do_io) do_io = 1; + if(do_th) do_th = 1; + if(do_ra) do_ra = 1; + if(do_net) do_net = 1; + if(do_rpc) do_rpc = 1; + if(do_proc2) do_proc2 = 1; + if(do_proc3) do_proc3 = 1; + if(do_proc4) do_proc4 = 1; + if(do_proc4ops) do_proc4ops = 1; + + size_t lines = procfile_lines(ff), l; + + char *type; + unsigned long long rc_hits = 0, rc_misses = 0, rc_nocache = 0; + unsigned long long fh_stale = 0, fh_total_lookups = 0, fh_anonymous_lookups = 0, fh_dir_not_in_dcache = 0, fh_non_dir_not_in_dcache = 0; + unsigned long long io_read = 0, io_write = 0; + unsigned long long th_threads = 0, th_fullcnt = 0, th_hist10 = 0, th_hist20 = 0, th_hist30 = 0, th_hist40 = 0, th_hist50 = 0, th_hist60 = 0, th_hist70 = 0, th_hist80 = 0, th_hist90 = 0, th_hist100 = 0; + unsigned long long ra_size = 0, ra_hist10 = 0, ra_hist20 = 0, ra_hist30 = 0, ra_hist40 = 0, ra_hist50 = 0, ra_hist60 = 0, ra_hist70 = 0, ra_hist80 = 0, ra_hist90 = 0, ra_hist100 = 0, ra_none = 0; + unsigned long long net_count = 0, net_udp_count = 0, net_tcp_count = 0, net_tcp_connections = 0; + unsigned long long rpc_calls = 0, rpc_bad_format = 0, rpc_bad_auth = 0, rpc_bad_client = 0; + + for(l = 0; l < lines ;l++) { + size_t words = procfile_linewords(ff, l); + if(unlikely(!words)) continue; + + type = procfile_lineword(ff, l, 0); + + if(do_rc == 1 && strcmp(type, "rc") == 0) { + if(unlikely(words < 4)) { + error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 4); + continue; + } + + rc_hits = str2ull(procfile_lineword(ff, l, 1)); + rc_misses = str2ull(procfile_lineword(ff, l, 2)); + rc_nocache = str2ull(procfile_lineword(ff, l, 3)); + + unsigned long long sum = rc_hits + rc_misses + rc_nocache; + if(sum == 0ULL) do_rc = -1; + else do_rc = 2; + } + else if(do_fh == 1 && strcmp(type, "fh") == 0) { + if(unlikely(words < 6)) { + error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 6); + continue; + } + + fh_stale = str2ull(procfile_lineword(ff, l, 1)); + fh_total_lookups = str2ull(procfile_lineword(ff, l, 2)); + fh_anonymous_lookups = str2ull(procfile_lineword(ff, l, 3)); + fh_dir_not_in_dcache = str2ull(procfile_lineword(ff, l, 4)); + fh_non_dir_not_in_dcache = str2ull(procfile_lineword(ff, l, 5)); + + unsigned long long sum = fh_stale + fh_total_lookups + fh_anonymous_lookups + fh_dir_not_in_dcache + fh_non_dir_not_in_dcache; + if(sum == 0ULL) do_fh = -1; + else do_fh = 2; + } + else if(do_io == 1 && strcmp(type, "io") == 0) { + if(unlikely(words < 3)) { + error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 3); + continue; + } + + io_read = str2ull(procfile_lineword(ff, l, 1)); + io_write = str2ull(procfile_lineword(ff, l, 2)); + + unsigned long long sum = io_read + io_write; + if(sum == 0ULL) do_io = -1; + else do_io = 2; + } + else if(do_th == 1 && strcmp(type, "th") == 0) { + if(unlikely(words < 13)) { + error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 13); + continue; + } + + th_threads = str2ull(procfile_lineword(ff, l, 1)); + th_fullcnt = str2ull(procfile_lineword(ff, l, 2)); + th_hist10 = (unsigned long long)(atof(procfile_lineword(ff, l, 3)) * 1000.0); + th_hist20 = (unsigned long long)(atof(procfile_lineword(ff, l, 4)) * 1000.0); + th_hist30 = (unsigned long long)(atof(procfile_lineword(ff, l, 5)) * 1000.0); + th_hist40 = (unsigned long long)(atof(procfile_lineword(ff, l, 6)) * 1000.0); + th_hist50 = (unsigned long long)(atof(procfile_lineword(ff, l, 7)) * 1000.0); + th_hist60 = (unsigned long long)(atof(procfile_lineword(ff, l, 8)) * 1000.0); + th_hist70 = (unsigned long long)(atof(procfile_lineword(ff, l, 9)) * 1000.0); + th_hist80 = (unsigned long long)(atof(procfile_lineword(ff, l, 10)) * 1000.0); + th_hist90 = (unsigned long long)(atof(procfile_lineword(ff, l, 11)) * 1000.0); + th_hist100 = (unsigned long long)(atof(procfile_lineword(ff, l, 12)) * 1000.0); + + // threads histogram has been disabled on recent kernels + // http://permalink.gmane.org/gmane.linux.nfs/24528 + unsigned long long sum = th_hist10 + th_hist20 + th_hist30 + th_hist40 + th_hist50 + th_hist60 + th_hist70 + th_hist80 + th_hist90 + th_hist100; + if(sum == 0ULL) { + if(!th_warning) { + info("Disabling /proc/net/rpc/nfsd threads histogram. It seems unused on this machine. It will be enabled automatically when found with data in it."); + th_warning = 1; + } + do_th = -1; + } + else do_th = 2; + } + else if(do_ra == 1 && strcmp(type, "ra") == 0) { + if(unlikely(words < 13)) { + error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 13); + continue; + } + + ra_size = str2ull(procfile_lineword(ff, l, 1)); + ra_hist10 = str2ull(procfile_lineword(ff, l, 2)); + ra_hist20 = str2ull(procfile_lineword(ff, l, 3)); + ra_hist30 = str2ull(procfile_lineword(ff, l, 4)); + ra_hist40 = str2ull(procfile_lineword(ff, l, 5)); + ra_hist50 = str2ull(procfile_lineword(ff, l, 6)); + ra_hist60 = str2ull(procfile_lineword(ff, l, 7)); + ra_hist70 = str2ull(procfile_lineword(ff, l, 8)); + ra_hist80 = str2ull(procfile_lineword(ff, l, 9)); + ra_hist90 = str2ull(procfile_lineword(ff, l, 10)); + ra_hist100 = str2ull(procfile_lineword(ff, l, 11)); + ra_none = str2ull(procfile_lineword(ff, l, 12)); + + unsigned long long sum = ra_hist10 + ra_hist20 + ra_hist30 + ra_hist40 + ra_hist50 + ra_hist60 + ra_hist70 + ra_hist80 + ra_hist90 + ra_hist100 + ra_none; + if(sum == 0ULL) { + if(!ra_warning) { + info("Disabling /proc/net/rpc/nfsd read ahead histogram. It seems unused on this machine. It will be enabled automatically when found with data in it."); + ra_warning = 1; + } + do_ra = -1; + } + else do_ra = 2; + } + else if(do_net == 1 && strcmp(type, "net") == 0) { + if(unlikely(words < 5)) { + error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 5); + continue; + } + + net_count = str2ull(procfile_lineword(ff, l, 1)); + net_udp_count = str2ull(procfile_lineword(ff, l, 2)); + net_tcp_count = str2ull(procfile_lineword(ff, l, 3)); + net_tcp_connections = str2ull(procfile_lineword(ff, l, 4)); + + unsigned long long sum = net_count + net_udp_count + net_tcp_count + net_tcp_connections; + if(sum == 0ULL) do_net = -1; + else do_net = 2; + } + else if(do_rpc == 1 && strcmp(type, "rpc") == 0) { + if(unlikely(words < 6)) { + error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 6); + continue; + } + + rpc_calls = str2ull(procfile_lineword(ff, l, 1)); + rpc_bad_format = str2ull(procfile_lineword(ff, l, 2)); + rpc_bad_auth = str2ull(procfile_lineword(ff, l, 3)); + rpc_bad_client = str2ull(procfile_lineword(ff, l, 4)); + + unsigned long long sum = rpc_calls + rpc_bad_format + rpc_bad_auth + rpc_bad_client; + if(sum == 0ULL) do_rpc = -1; + else do_rpc = 2; + } + else if(do_proc2 == 1 && strcmp(type, "proc2") == 0) { + // the first number is the count of numbers present + // so we start for word 2 + + unsigned long long sum = 0; + unsigned int i, j; + for(i = 0, j = 2; j < words && nfsd_proc2_values[i].name[0] ; i++, j++) { + nfsd_proc2_values[i].value = str2ull(procfile_lineword(ff, l, j)); + nfsd_proc2_values[i].present = 1; + sum += nfsd_proc2_values[i].value; + } + + if(sum == 0ULL) { + if(!proc2_warning) { + error("Disabling /proc/net/rpc/nfsd v2 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it."); + proc2_warning = 1; + } + do_proc2 = 0; + } + else do_proc2 = 2; + } + else if(do_proc3 == 1 && strcmp(type, "proc3") == 0) { + // the first number is the count of numbers present + // so we start for word 2 + + unsigned long long sum = 0; + unsigned int i, j; + for(i = 0, j = 2; j < words && nfsd_proc3_values[i].name[0] ; i++, j++) { + nfsd_proc3_values[i].value = str2ull(procfile_lineword(ff, l, j)); + nfsd_proc3_values[i].present = 1; + sum += nfsd_proc3_values[i].value; + } + + if(sum == 0ULL) { + if(!proc3_warning) { + info("Disabling /proc/net/rpc/nfsd v3 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it."); + proc3_warning = 1; + } + do_proc3 = 0; + } + else do_proc3 = 2; + } + else if(do_proc4 == 1 && strcmp(type, "proc4") == 0) { + // the first number is the count of numbers present + // so we start for word 2 + + unsigned long long sum = 0; + unsigned int i, j; + for(i = 0, j = 2; j < words && nfsd_proc4_values[i].name[0] ; i++, j++) { + nfsd_proc4_values[i].value = str2ull(procfile_lineword(ff, l, j)); + nfsd_proc4_values[i].present = 1; + sum += nfsd_proc4_values[i].value; + } + + if(sum == 0ULL) { + if(!proc4_warning) { + info("Disabling /proc/net/rpc/nfsd v4 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it."); + proc4_warning = 1; + } + do_proc4 = 0; + } + else do_proc4 = 2; + } + else if(do_proc4ops == 1 && strcmp(type, "proc4ops") == 0) { + // the first number is the count of numbers present + // so we start for word 2 + + unsigned long long sum = 0; + unsigned int i, j; + for(i = 0, j = 2; j < words && nfsd4_ops_values[i].name[0] ; i++, j++) { + nfsd4_ops_values[i].value = str2ull(procfile_lineword(ff, l, j)); + nfsd4_ops_values[i].present = 1; + sum += nfsd4_ops_values[i].value; + } + + if(sum == 0ULL) { + if(!proc4ops_warning) { + info("Disabling /proc/net/rpc/nfsd v4 operations chart. It seems unused on this machine. It will be enabled automatically when found with data in it."); + proc4ops_warning = 1; + } + do_proc4ops = 0; + } + else do_proc4ops = 2; + } + } + + // -------------------------------------------------------------------- + + if(do_rc == 2) { + static RRDSET *st = NULL; + static RRDDIM *rd_hits = NULL, + *rd_misses = NULL, + *rd_nocache = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "nfsd" + , "readcache" + , NULL + , "cache" + , NULL + , "NFS Server Read Cache" + , "reads/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NFSD_NAME + , NETDATA_CHART_PRIO_NFSD_READCACHE + , update_every + , RRDSET_TYPE_STACKED + ); + + rd_hits = rrddim_add(st, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_misses = rrddim_add(st, "misses", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_nocache = rrddim_add(st, "nocache", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_hits, rc_hits); + rrddim_set_by_pointer(st, rd_misses, rc_misses); + rrddim_set_by_pointer(st, rd_nocache, rc_nocache); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_fh == 2) { + static RRDSET *st = NULL; + static RRDDIM *rd_stale = NULL, + *rd_total_lookups = NULL, + *rd_anonymous_lookups = NULL, + *rd_dir_not_in_dcache = NULL, + *rd_non_dir_not_in_dcache = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "nfsd" + , "filehandles" + , NULL + , "filehandles" + , NULL + , "NFS Server File Handles" + , "handles/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NFSD_NAME + , NETDATA_CHART_PRIO_NFSD_FILEHANDLES + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_stale = rrddim_add(st, "stale", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rd_total_lookups = rrddim_add(st, "total_lookups", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_anonymous_lookups = rrddim_add(st, "anonymous_lookups", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_dir_not_in_dcache = rrddim_add(st, "dir_not_in_dcache", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_non_dir_not_in_dcache = rrddim_add(st, "non_dir_not_in_dcache", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_stale, fh_stale); + rrddim_set_by_pointer(st, rd_total_lookups, fh_total_lookups); + rrddim_set_by_pointer(st, rd_anonymous_lookups, fh_anonymous_lookups); + rrddim_set_by_pointer(st, rd_dir_not_in_dcache, fh_dir_not_in_dcache); + rrddim_set_by_pointer(st, rd_non_dir_not_in_dcache, fh_non_dir_not_in_dcache); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_io == 2) { + static RRDSET *st = NULL; + static RRDDIM *rd_read = NULL, + *rd_write = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "nfsd" + , "io" + , NULL + , "io" + , NULL + , "NFS Server I/O" + , "kilobytes/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NFSD_NAME + , NETDATA_CHART_PRIO_NFSD_IO + , update_every + , RRDSET_TYPE_AREA + ); + + rd_read = rrddim_add(st, "read", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); + rd_write = rrddim_add(st, "write", NULL, -1, 1000, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_read, io_read); + rrddim_set_by_pointer(st, rd_write, io_write); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_th == 2) { + { + static RRDSET *st = NULL; + static RRDDIM *rd_threads = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "nfsd" + , "threads" + , NULL + , "threads" + , NULL + , "NFS Server Threads" + , "threads" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NFSD_NAME + , NETDATA_CHART_PRIO_NFSD_THREADS + , update_every + , RRDSET_TYPE_LINE + ); + + rd_threads = rrddim_add(st, "threads", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_threads, th_threads); + rrdset_done(st); + } + + { + static RRDSET *st = NULL; + static RRDDIM *rd_full_count = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "nfsd" + , "threads_fullcnt" + , NULL + , "threads" + , NULL + , "NFS Server Threads Full Count" + , "ops/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NFSD_NAME + , NETDATA_CHART_PRIO_NFSD_THREADS_FULLCNT + , update_every + , RRDSET_TYPE_LINE + ); + + rd_full_count = rrddim_add(st, "full_count", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_full_count, th_fullcnt); + rrdset_done(st); + } + + { + static RRDSET *st = NULL; + static RRDDIM *rd_th_hist10 = NULL, + *rd_th_hist20 = NULL, + *rd_th_hist30 = NULL, + *rd_th_hist40 = NULL, + *rd_th_hist50 = NULL, + *rd_th_hist60 = NULL, + *rd_th_hist70 = NULL, + *rd_th_hist80 = NULL, + *rd_th_hist90 = NULL, + *rd_th_hist100 = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "nfsd" + , "threads_histogram" + , NULL + , "threads" + , NULL + , "NFS Server Threads Usage Histogram" + , "percentage" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NFSD_NAME + , NETDATA_CHART_PRIO_NFSD_THREADS_HISTOGRAM + , update_every + , RRDSET_TYPE_LINE + ); + + rd_th_hist10 = rrddim_add(st, "0%-10%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + rd_th_hist20 = rrddim_add(st, "10%-20%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + rd_th_hist30 = rrddim_add(st, "20%-30%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + rd_th_hist40 = rrddim_add(st, "30%-40%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + rd_th_hist50 = rrddim_add(st, "40%-50%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + rd_th_hist60 = rrddim_add(st, "50%-60%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + rd_th_hist70 = rrddim_add(st, "60%-70%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + rd_th_hist80 = rrddim_add(st, "70%-80%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + rd_th_hist90 = rrddim_add(st, "80%-90%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + rd_th_hist100 = rrddim_add(st, "90%-100%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_th_hist10, th_hist10); + rrddim_set_by_pointer(st, rd_th_hist20, th_hist20); + rrddim_set_by_pointer(st, rd_th_hist30, th_hist30); + rrddim_set_by_pointer(st, rd_th_hist40, th_hist40); + rrddim_set_by_pointer(st, rd_th_hist50, th_hist50); + rrddim_set_by_pointer(st, rd_th_hist60, th_hist60); + rrddim_set_by_pointer(st, rd_th_hist70, th_hist70); + rrddim_set_by_pointer(st, rd_th_hist80, th_hist80); + rrddim_set_by_pointer(st, rd_th_hist90, th_hist90); + rrddim_set_by_pointer(st, rd_th_hist100, th_hist100); + rrdset_done(st); + } + } + + // -------------------------------------------------------------------- + + if(do_ra == 2) { + static RRDSET *st = NULL; + static RRDDIM *rd_ra_hist10 = NULL, + *rd_ra_hist20 = NULL, + *rd_ra_hist30 = NULL, + *rd_ra_hist40 = NULL, + *rd_ra_hist50 = NULL, + *rd_ra_hist60 = NULL, + *rd_ra_hist70 = NULL, + *rd_ra_hist80 = NULL, + *rd_ra_hist90 = NULL, + *rd_ra_hist100 = NULL, + *rd_ra_none = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "nfsd" + , "readahead" + , NULL + , "readahead" + , NULL + , "NFS Server Read Ahead Depth" + , "percentage" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NFSD_NAME + , NETDATA_CHART_PRIO_NFSD_READAHEAD + , update_every + , RRDSET_TYPE_STACKED + ); + + rd_ra_hist10 = rrddim_add(st, "10%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_ra_hist20 = rrddim_add(st, "20%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_ra_hist30 = rrddim_add(st, "30%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_ra_hist40 = rrddim_add(st, "40%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_ra_hist50 = rrddim_add(st, "50%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_ra_hist60 = rrddim_add(st, "60%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_ra_hist70 = rrddim_add(st, "70%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_ra_hist80 = rrddim_add(st, "80%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_ra_hist90 = rrddim_add(st, "90%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_ra_hist100 = rrddim_add(st, "100%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_ra_none = rrddim_add(st, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + } + else rrdset_next(st); + + // ignore ra_size + (void)ra_size; + + rrddim_set_by_pointer(st, rd_ra_hist10, ra_hist10); + rrddim_set_by_pointer(st, rd_ra_hist20, ra_hist20); + rrddim_set_by_pointer(st, rd_ra_hist30, ra_hist30); + rrddim_set_by_pointer(st, rd_ra_hist40, ra_hist40); + rrddim_set_by_pointer(st, rd_ra_hist50, ra_hist50); + rrddim_set_by_pointer(st, rd_ra_hist60, ra_hist60); + rrddim_set_by_pointer(st, rd_ra_hist70, ra_hist70); + rrddim_set_by_pointer(st, rd_ra_hist80, ra_hist80); + rrddim_set_by_pointer(st, rd_ra_hist90, ra_hist90); + rrddim_set_by_pointer(st, rd_ra_hist100,ra_hist100); + rrddim_set_by_pointer(st, rd_ra_none, ra_none); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_net == 2) { + static RRDSET *st = NULL; + static RRDDIM *rd_udp = NULL, + *rd_tcp = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "nfsd" + , "net" + , NULL + , "network" + , NULL + , "NFS Server Network Statistics" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NFSD_NAME + , NETDATA_CHART_PRIO_NFSD_NET + , update_every + , RRDSET_TYPE_STACKED + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_udp = rrddim_add(st, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_tcp = rrddim_add(st, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + // ignore net_count, net_tcp_connections + (void)net_count; + (void)net_tcp_connections; + + rrddim_set_by_pointer(st, rd_udp, net_udp_count); + rrddim_set_by_pointer(st, rd_tcp, net_tcp_count); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_rpc == 2) { + static RRDSET *st = NULL; + static RRDDIM *rd_calls = NULL, + *rd_bad_format = NULL, + *rd_bad_auth = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "nfsd" + , "rpc" + , NULL + , "rpc" + , NULL + , "NFS Server Remote Procedure Calls Statistics" + , "calls/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NFSD_NAME + , NETDATA_CHART_PRIO_NFSD_RPC + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_calls = rrddim_add(st, "calls", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_bad_format = rrddim_add(st, "bad_format", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_bad_auth = rrddim_add(st, "bad_auth", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + // ignore rpc_bad_client + (void)rpc_bad_client; + + rrddim_set_by_pointer(st, rd_calls, rpc_calls); + rrddim_set_by_pointer(st, rd_bad_format, rpc_bad_format); + rrddim_set_by_pointer(st, rd_bad_auth, rpc_bad_auth); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_proc2 == 2) { + static RRDSET *st = NULL; + if(unlikely(!st)) { + st = rrdset_create_localhost( + "nfsd" + , "proc2" + , NULL + , "nfsv2rpc" + , NULL + , "NFS v2 Server Remote Procedure Calls" + , "calls/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NFSD_NAME + , NETDATA_CHART_PRIO_NFSD_PROC2 + , update_every + , RRDSET_TYPE_STACKED + ); + } + else rrdset_next(st); + + size_t i; + for(i = 0; nfsd_proc2_values[i].present ; i++) { + if(unlikely(!nfsd_proc2_values[i].rd)) + nfsd_proc2_values[i].rd = rrddim_add(st, nfsd_proc2_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st, nfsd_proc2_values[i].rd, nfsd_proc2_values[i].value); + } + + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_proc3 == 2) { + static RRDSET *st = NULL; + if(unlikely(!st)) { + st = rrdset_create_localhost( + "nfsd" + , "proc3" + , NULL + , "nfsv3rpc" + , NULL + , "NFS v3 Server Remote Procedure Calls" + , "calls/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NFSD_NAME + , NETDATA_CHART_PRIO_NFSD_PROC3 + , update_every + , RRDSET_TYPE_STACKED + ); + } + else rrdset_next(st); + + size_t i; + for(i = 0; nfsd_proc3_values[i].present ; i++) { + if(unlikely(!nfsd_proc3_values[i].rd)) + nfsd_proc3_values[i].rd = rrddim_add(st, nfsd_proc3_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st, nfsd_proc3_values[i].rd, nfsd_proc3_values[i].value); + } + + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_proc4 == 2) { + static RRDSET *st = NULL; + if(unlikely(!st)) { + st = rrdset_create_localhost( + "nfsd" + , "proc4" + , NULL + , "nfsv4rpc" + , NULL + , "NFS v4 Server Remote Procedure Calls" + , "calls/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NFSD_NAME + , NETDATA_CHART_PRIO_NFSD_PROC4 + , update_every + , RRDSET_TYPE_STACKED + ); + } + else rrdset_next(st); + + size_t i; + for(i = 0; nfsd_proc4_values[i].present ; i++) { + if(unlikely(!nfsd_proc4_values[i].rd)) + nfsd_proc4_values[i].rd = rrddim_add(st, nfsd_proc4_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st, nfsd_proc4_values[i].rd, nfsd_proc4_values[i].value); + } + + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_proc4ops == 2) { + static RRDSET *st = NULL; + if(unlikely(!st)) { + st = rrdset_create_localhost( + "nfsd" + , "proc4ops" + , NULL + , "nfsv2ops" + , NULL + , "NFS v4 Server Operations" + , "operations/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NFSD_NAME + , NETDATA_CHART_PRIO_NFSD_PROC4OPS + , update_every + , RRDSET_TYPE_STACKED + ); + } + else rrdset_next(st); + + size_t i; + for(i = 0; nfsd4_ops_values[i].present ; i++) { + if(unlikely(!nfsd4_ops_values[i].rd)) + nfsd4_ops_values[i].rd = rrddim_add(st, nfsd4_ops_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st, nfsd4_ops_values[i].rd, nfsd4_ops_values[i].value); + } + + rrdset_done(st); + } + + return 0; +} diff --git a/collectors/proc.plugin/proc_net_sctp_snmp.c b/collectors/proc.plugin/proc_net_sctp_snmp.c new file mode 100644 index 000000000..bd1062e98 --- /dev/null +++ b/collectors/proc.plugin/proc_net_sctp_snmp.c @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" +#define PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME "/proc/net/sctp/snmp" + +int do_proc_net_sctp_snmp(int update_every, usec_t dt) { + (void)dt; + + static procfile *ff = NULL; + + static int + do_associations = -1, + do_transitions = -1, + do_packet_errors = -1, + do_packets = -1, + do_fragmentation = -1, + do_chunk_types = -1; + + static ARL_BASE *arl_base = NULL; + + static unsigned long long SctpCurrEstab = 0ULL; + static unsigned long long SctpActiveEstabs = 0ULL; + static unsigned long long SctpPassiveEstabs = 0ULL; + static unsigned long long SctpAborteds = 0ULL; + static unsigned long long SctpShutdowns = 0ULL; + static unsigned long long SctpOutOfBlues = 0ULL; + static unsigned long long SctpChecksumErrors = 0ULL; + static unsigned long long SctpOutCtrlChunks = 0ULL; + static unsigned long long SctpOutOrderChunks = 0ULL; + static unsigned long long SctpOutUnorderChunks = 0ULL; + static unsigned long long SctpInCtrlChunks = 0ULL; + static unsigned long long SctpInOrderChunks = 0ULL; + static unsigned long long SctpInUnorderChunks = 0ULL; + static unsigned long long SctpFragUsrMsgs = 0ULL; + static unsigned long long SctpReasmUsrMsgs = 0ULL; + static unsigned long long SctpOutSCTPPacks = 0ULL; + static unsigned long long SctpInSCTPPacks = 0ULL; + static unsigned long long SctpT1InitExpireds = 0ULL; + static unsigned long long SctpT1CookieExpireds = 0ULL; + static unsigned long long SctpT2ShutdownExpireds = 0ULL; + static unsigned long long SctpT3RtxExpireds = 0ULL; + static unsigned long long SctpT4RtoExpireds = 0ULL; + static unsigned long long SctpT5ShutdownGuardExpireds = 0ULL; + static unsigned long long SctpDelaySackExpireds = 0ULL; + static unsigned long long SctpAutocloseExpireds = 0ULL; + static unsigned long long SctpT3Retransmits = 0ULL; + static unsigned long long SctpPmtudRetransmits = 0ULL; + static unsigned long long SctpFastRetransmits = 0ULL; + static unsigned long long SctpInPktSoftirq = 0ULL; + static unsigned long long SctpInPktBacklog = 0ULL; + static unsigned long long SctpInPktDiscards = 0ULL; + static unsigned long long SctpInDataChunkDiscards = 0ULL; + + if(unlikely(!arl_base)) { + do_associations = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "established associations", CONFIG_BOOLEAN_AUTO); + do_transitions = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "association transitions", CONFIG_BOOLEAN_AUTO); + do_fragmentation = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "fragmentation", CONFIG_BOOLEAN_AUTO); + do_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "packets", CONFIG_BOOLEAN_AUTO); + do_packet_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "packet errors", CONFIG_BOOLEAN_AUTO); + do_chunk_types = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "chunk types", CONFIG_BOOLEAN_AUTO); + + arl_base = arl_create("sctp", NULL, 60); + arl_expect(arl_base, "SctpCurrEstab", &SctpCurrEstab); + arl_expect(arl_base, "SctpActiveEstabs", &SctpActiveEstabs); + arl_expect(arl_base, "SctpPassiveEstabs", &SctpPassiveEstabs); + arl_expect(arl_base, "SctpAborteds", &SctpAborteds); + arl_expect(arl_base, "SctpShutdowns", &SctpShutdowns); + arl_expect(arl_base, "SctpOutOfBlues", &SctpOutOfBlues); + arl_expect(arl_base, "SctpChecksumErrors", &SctpChecksumErrors); + arl_expect(arl_base, "SctpOutCtrlChunks", &SctpOutCtrlChunks); + arl_expect(arl_base, "SctpOutOrderChunks", &SctpOutOrderChunks); + arl_expect(arl_base, "SctpOutUnorderChunks", &SctpOutUnorderChunks); + arl_expect(arl_base, "SctpInCtrlChunks", &SctpInCtrlChunks); + arl_expect(arl_base, "SctpInOrderChunks", &SctpInOrderChunks); + arl_expect(arl_base, "SctpInUnorderChunks", &SctpInUnorderChunks); + arl_expect(arl_base, "SctpFragUsrMsgs", &SctpFragUsrMsgs); + arl_expect(arl_base, "SctpReasmUsrMsgs", &SctpReasmUsrMsgs); + arl_expect(arl_base, "SctpOutSCTPPacks", &SctpOutSCTPPacks); + arl_expect(arl_base, "SctpInSCTPPacks", &SctpInSCTPPacks); + arl_expect(arl_base, "SctpT1InitExpireds", &SctpT1InitExpireds); + arl_expect(arl_base, "SctpT1CookieExpireds", &SctpT1CookieExpireds); + arl_expect(arl_base, "SctpT2ShutdownExpireds", &SctpT2ShutdownExpireds); + arl_expect(arl_base, "SctpT3RtxExpireds", &SctpT3RtxExpireds); + arl_expect(arl_base, "SctpT4RtoExpireds", &SctpT4RtoExpireds); + arl_expect(arl_base, "SctpT5ShutdownGuardExpireds", &SctpT5ShutdownGuardExpireds); + arl_expect(arl_base, "SctpDelaySackExpireds", &SctpDelaySackExpireds); + arl_expect(arl_base, "SctpAutocloseExpireds", &SctpAutocloseExpireds); + arl_expect(arl_base, "SctpT3Retransmits", &SctpT3Retransmits); + arl_expect(arl_base, "SctpPmtudRetransmits", &SctpPmtudRetransmits); + arl_expect(arl_base, "SctpFastRetransmits", &SctpFastRetransmits); + arl_expect(arl_base, "SctpInPktSoftirq", &SctpInPktSoftirq); + arl_expect(arl_base, "SctpInPktBacklog", &SctpInPktBacklog); + arl_expect(arl_base, "SctpInPktDiscards", &SctpInPktDiscards); + arl_expect(arl_base, "SctpInDataChunkDiscards", &SctpInDataChunkDiscards); + } + + if(unlikely(!ff)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/sctp/snmp"); + ff = procfile_open(config_get("plugin:proc:/proc/net/sctp/snmp", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) + return 1; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) + return 0; // we return 0, so that we will retry to open it next time + + size_t lines = procfile_lines(ff), l; + + arl_begin(arl_base); + + for(l = 0; l < lines ;l++) { + size_t words = procfile_linewords(ff, l); + if(unlikely(words < 2)) { + if(unlikely(words)) error("Cannot read /proc/net/sctp/snmp line %zu. Expected 2 params, read %zu.", l, words); + continue; + } + + if(unlikely(arl_check(arl_base, + procfile_lineword(ff, l, 0), + procfile_lineword(ff, l, 1)))) break; + } + + // -------------------------------------------------------------------- + + if(do_associations == CONFIG_BOOLEAN_YES || (do_associations == CONFIG_BOOLEAN_AUTO && SctpCurrEstab)) { + do_associations = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_established = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "sctp" + , "established" + , NULL + , "associations" + , NULL + , "SCTP current total number of established associations" + , "associations" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME + , NETDATA_CHART_PRIO_SCTP + , update_every + , RRDSET_TYPE_LINE + ); + + rd_established = rrddim_add(st, "SctpCurrEstab", "established", 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_established, SctpCurrEstab); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_transitions == CONFIG_BOOLEAN_YES || (do_transitions == CONFIG_BOOLEAN_AUTO && (SctpActiveEstabs || SctpPassiveEstabs || SctpAborteds || SctpShutdowns))) { + do_transitions = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_active = NULL, + *rd_passive = NULL, + *rd_aborted = NULL, + *rd_shutdown = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "sctp" + , "transitions" + , NULL + , "transitions" + , NULL + , "SCTP Association Transitions" + , "transitions/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME + , NETDATA_CHART_PRIO_SCTP + 10 + , update_every + , RRDSET_TYPE_LINE + ); + + rd_active = rrddim_add(st, "SctpActiveEstabs", "active", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_passive = rrddim_add(st, "SctpPassiveEstabs", "passive", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_aborted = rrddim_add(st, "SctpAborteds", "aborted", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_shutdown = rrddim_add(st, "SctpShutdowns", "shutdown", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_active, SctpActiveEstabs); + rrddim_set_by_pointer(st, rd_passive, SctpPassiveEstabs); + rrddim_set_by_pointer(st, rd_aborted, SctpAborteds); + rrddim_set_by_pointer(st, rd_shutdown, SctpShutdowns); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_packets == CONFIG_BOOLEAN_YES || (do_packets == CONFIG_BOOLEAN_AUTO && (SctpInSCTPPacks || SctpOutSCTPPacks))) { + do_packets = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_received = NULL, + *rd_sent = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "sctp" + , "packets" + , NULL + , "packets" + , NULL + , "SCTP Packets" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME + , NETDATA_CHART_PRIO_SCTP + 20 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_received = rrddim_add(st, "SctpInSCTPPacks", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_sent = rrddim_add(st, "SctpOutSCTPPacks", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_received, SctpInSCTPPacks); + rrddim_set_by_pointer(st, rd_sent, SctpOutSCTPPacks); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_packet_errors == CONFIG_BOOLEAN_YES || (do_packet_errors == CONFIG_BOOLEAN_AUTO && (SctpOutOfBlues || SctpChecksumErrors))) { + do_packet_errors = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_invalid = NULL, + *rd_csum = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "sctp" + , "packet_errors" + , NULL + , "packets" + , NULL + , "SCTP Packet Errors" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME + , NETDATA_CHART_PRIO_SCTP + 30 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_invalid = rrddim_add(st, "SctpOutOfBlues", "invalid", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_csum = rrddim_add(st, "SctpChecksumErrors", "checksum", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_invalid, SctpOutOfBlues); + rrddim_set_by_pointer(st, rd_csum, SctpChecksumErrors); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_fragmentation == CONFIG_BOOLEAN_YES || (do_fragmentation == CONFIG_BOOLEAN_AUTO && (SctpFragUsrMsgs || SctpReasmUsrMsgs))) { + do_fragmentation = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_fragmented = NULL, + *rd_reassembled = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "sctp" + , "fragmentation" + , NULL + , "fragmentation" + , NULL + , "SCTP Fragmentation" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME + , NETDATA_CHART_PRIO_SCTP + 40 + , update_every + , RRDSET_TYPE_LINE); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_reassembled = rrddim_add(st, "SctpReasmUsrMsgs", "reassembled", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_fragmented = rrddim_add(st, "SctpFragUsrMsgs", "fragmented", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_reassembled, SctpReasmUsrMsgs); + rrddim_set_by_pointer(st, rd_fragmented, SctpFragUsrMsgs); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_chunk_types == CONFIG_BOOLEAN_YES || (do_chunk_types == CONFIG_BOOLEAN_AUTO + && (SctpInCtrlChunks || SctpInOrderChunks || SctpInUnorderChunks || SctpOutCtrlChunks || SctpOutOrderChunks || SctpOutUnorderChunks))) { + do_chunk_types = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM + *rd_InCtrl = NULL, + *rd_InOrder = NULL, + *rd_InUnorder = NULL, + *rd_OutCtrl = NULL, + *rd_OutOrder = NULL, + *rd_OutUnorder = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "sctp" + , "chunks" + , NULL + , "chunks" + , NULL + , "SCTP Chunk Types" + , "chunks/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME + , NETDATA_CHART_PRIO_SCTP + 50 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_InCtrl = rrddim_add(st, "SctpInCtrlChunks", "InCtrl", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InOrder = rrddim_add(st, "SctpInOrderChunks", "InOrder", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InUnorder = rrddim_add(st, "SctpInUnorderChunks", "InUnorder", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutCtrl = rrddim_add(st, "SctpOutCtrlChunks", "OutCtrl", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutOrder = rrddim_add(st, "SctpOutOrderChunks", "OutOrder", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutUnorder = rrddim_add(st, "SctpOutUnorderChunks", "OutUnorder", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_InCtrl, SctpInCtrlChunks); + rrddim_set_by_pointer(st, rd_InOrder, SctpInOrderChunks); + rrddim_set_by_pointer(st, rd_InUnorder, SctpInUnorderChunks); + rrddim_set_by_pointer(st, rd_OutCtrl, SctpOutCtrlChunks); + rrddim_set_by_pointer(st, rd_OutOrder, SctpOutOrderChunks); + rrddim_set_by_pointer(st, rd_OutUnorder, SctpOutUnorderChunks); + rrdset_done(st); + } + + return 0; +} + diff --git a/collectors/proc.plugin/proc_net_snmp.c b/collectors/proc.plugin/proc_net_snmp.c new file mode 100644 index 000000000..ffd368f6e --- /dev/null +++ b/collectors/proc.plugin/proc_net_snmp.c @@ -0,0 +1,1085 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" +#define PLUGIN_PROC_MODULE_NET_SNMP_NAME "/proc/net/snmp" + +#define RRD_TYPE_NET_SNMP "ipv4" + +static struct proc_net_snmp { + // kernel_uint_t ip_Forwarding; + kernel_uint_t ip_DefaultTTL; + kernel_uint_t ip_InReceives; + kernel_uint_t ip_InHdrErrors; + kernel_uint_t ip_InAddrErrors; + kernel_uint_t ip_ForwDatagrams; + kernel_uint_t ip_InUnknownProtos; + kernel_uint_t ip_InDiscards; + kernel_uint_t ip_InDelivers; + kernel_uint_t ip_OutRequests; + kernel_uint_t ip_OutDiscards; + kernel_uint_t ip_OutNoRoutes; + kernel_uint_t ip_ReasmTimeout; + kernel_uint_t ip_ReasmReqds; + kernel_uint_t ip_ReasmOKs; + kernel_uint_t ip_ReasmFails; + kernel_uint_t ip_FragOKs; + kernel_uint_t ip_FragFails; + kernel_uint_t ip_FragCreates; + + kernel_uint_t icmp_InMsgs; + kernel_uint_t icmp_OutMsgs; + kernel_uint_t icmp_InErrors; + kernel_uint_t icmp_OutErrors; + kernel_uint_t icmp_InCsumErrors; + + kernel_uint_t icmpmsg_InEchoReps; + kernel_uint_t icmpmsg_OutEchoReps; + kernel_uint_t icmpmsg_InDestUnreachs; + kernel_uint_t icmpmsg_OutDestUnreachs; + kernel_uint_t icmpmsg_InRedirects; + kernel_uint_t icmpmsg_OutRedirects; + kernel_uint_t icmpmsg_InEchos; + kernel_uint_t icmpmsg_OutEchos; + kernel_uint_t icmpmsg_InRouterAdvert; + kernel_uint_t icmpmsg_OutRouterAdvert; + kernel_uint_t icmpmsg_InRouterSelect; + kernel_uint_t icmpmsg_OutRouterSelect; + kernel_uint_t icmpmsg_InTimeExcds; + kernel_uint_t icmpmsg_OutTimeExcds; + kernel_uint_t icmpmsg_InParmProbs; + kernel_uint_t icmpmsg_OutParmProbs; + kernel_uint_t icmpmsg_InTimestamps; + kernel_uint_t icmpmsg_OutTimestamps; + kernel_uint_t icmpmsg_InTimestampReps; + kernel_uint_t icmpmsg_OutTimestampReps; + + //kernel_uint_t tcp_RtoAlgorithm; + //kernel_uint_t tcp_RtoMin; + //kernel_uint_t tcp_RtoMax; + ssize_t tcp_MaxConn; + kernel_uint_t tcp_ActiveOpens; + kernel_uint_t tcp_PassiveOpens; + kernel_uint_t tcp_AttemptFails; + kernel_uint_t tcp_EstabResets; + kernel_uint_t tcp_CurrEstab; + kernel_uint_t tcp_InSegs; + kernel_uint_t tcp_OutSegs; + kernel_uint_t tcp_RetransSegs; + kernel_uint_t tcp_InErrs; + kernel_uint_t tcp_OutRsts; + kernel_uint_t tcp_InCsumErrors; + + kernel_uint_t udp_InDatagrams; + kernel_uint_t udp_NoPorts; + kernel_uint_t udp_InErrors; + kernel_uint_t udp_OutDatagrams; + kernel_uint_t udp_RcvbufErrors; + kernel_uint_t udp_SndbufErrors; + kernel_uint_t udp_InCsumErrors; + kernel_uint_t udp_IgnoredMulti; + + kernel_uint_t udplite_InDatagrams; + kernel_uint_t udplite_NoPorts; + kernel_uint_t udplite_InErrors; + kernel_uint_t udplite_OutDatagrams; + kernel_uint_t udplite_RcvbufErrors; + kernel_uint_t udplite_SndbufErrors; + kernel_uint_t udplite_InCsumErrors; + kernel_uint_t udplite_IgnoredMulti; +} snmp_root = { 0 }; + +int do_proc_net_snmp(int update_every, usec_t dt) { + (void)dt; + + static procfile *ff = NULL; + static int do_ip_packets = -1, do_ip_fragsout = -1, do_ip_fragsin = -1, do_ip_errors = -1, + do_tcp_sockets = -1, do_tcp_packets = -1, do_tcp_errors = -1, do_tcp_handshake = -1, do_tcp_opens = -1, + do_udp_packets = -1, do_udp_errors = -1, do_icmp_packets = -1, do_icmpmsg = -1, do_udplite_packets = -1; + static uint32_t hash_ip = 0, hash_icmp = 0, hash_tcp = 0, hash_udp = 0, hash_icmpmsg = 0, hash_udplite = 0; + + static ARL_BASE *arl_ip = NULL, + *arl_icmp = NULL, + *arl_icmpmsg = NULL, + *arl_tcp = NULL, + *arl_udp = NULL, + *arl_udplite = NULL; + + static RRDVAR *tcp_max_connections_var = NULL; + + if(unlikely(!arl_ip)) { + do_ip_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 packets", CONFIG_BOOLEAN_AUTO); + do_ip_fragsout = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 fragments sent", CONFIG_BOOLEAN_AUTO); + do_ip_fragsin = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 fragments assembly", CONFIG_BOOLEAN_AUTO); + do_ip_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 errors", CONFIG_BOOLEAN_AUTO); + do_tcp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 TCP connections", CONFIG_BOOLEAN_AUTO); + do_tcp_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 TCP packets", CONFIG_BOOLEAN_AUTO); + do_tcp_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 TCP errors", CONFIG_BOOLEAN_AUTO); + do_tcp_opens = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 TCP opens", CONFIG_BOOLEAN_AUTO); + do_tcp_handshake = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 TCP handshake issues", CONFIG_BOOLEAN_AUTO); + do_udp_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 UDP packets", CONFIG_BOOLEAN_AUTO); + do_udp_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 UDP errors", CONFIG_BOOLEAN_AUTO); + do_icmp_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 ICMP packets", CONFIG_BOOLEAN_AUTO); + do_icmpmsg = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 ICMP messages", CONFIG_BOOLEAN_AUTO); + do_udplite_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 UDPLite packets", CONFIG_BOOLEAN_AUTO); + + hash_ip = simple_hash("Ip"); + hash_tcp = simple_hash("Tcp"); + hash_udp = simple_hash("Udp"); + hash_icmp = simple_hash("Icmp"); + hash_icmpmsg = simple_hash("IcmpMsg"); + hash_udplite = simple_hash("UdpLite"); + + arl_ip = arl_create("snmp/Ip", arl_callback_str2kernel_uint_t, 60); + // arl_expect(arl_ip, "Forwarding", &snmp_root.ip_Forwarding); + arl_expect(arl_ip, "DefaultTTL", &snmp_root.ip_DefaultTTL); + arl_expect(arl_ip, "InReceives", &snmp_root.ip_InReceives); + arl_expect(arl_ip, "InHdrErrors", &snmp_root.ip_InHdrErrors); + arl_expect(arl_ip, "InAddrErrors", &snmp_root.ip_InAddrErrors); + arl_expect(arl_ip, "ForwDatagrams", &snmp_root.ip_ForwDatagrams); + arl_expect(arl_ip, "InUnknownProtos", &snmp_root.ip_InUnknownProtos); + arl_expect(arl_ip, "InDiscards", &snmp_root.ip_InDiscards); + arl_expect(arl_ip, "InDelivers", &snmp_root.ip_InDelivers); + arl_expect(arl_ip, "OutRequests", &snmp_root.ip_OutRequests); + arl_expect(arl_ip, "OutDiscards", &snmp_root.ip_OutDiscards); + arl_expect(arl_ip, "OutNoRoutes", &snmp_root.ip_OutNoRoutes); + arl_expect(arl_ip, "ReasmTimeout", &snmp_root.ip_ReasmTimeout); + arl_expect(arl_ip, "ReasmReqds", &snmp_root.ip_ReasmReqds); + arl_expect(arl_ip, "ReasmOKs", &snmp_root.ip_ReasmOKs); + arl_expect(arl_ip, "ReasmFails", &snmp_root.ip_ReasmFails); + arl_expect(arl_ip, "FragOKs", &snmp_root.ip_FragOKs); + arl_expect(arl_ip, "FragFails", &snmp_root.ip_FragFails); + arl_expect(arl_ip, "FragCreates", &snmp_root.ip_FragCreates); + + arl_icmp = arl_create("snmp/Icmp", arl_callback_str2kernel_uint_t, 60); + arl_expect(arl_icmp, "InMsgs", &snmp_root.icmp_InMsgs); + arl_expect(arl_icmp, "OutMsgs", &snmp_root.icmp_OutMsgs); + arl_expect(arl_icmp, "InErrors", &snmp_root.icmp_InErrors); + arl_expect(arl_icmp, "OutErrors", &snmp_root.icmp_OutErrors); + arl_expect(arl_icmp, "InCsumErrors", &snmp_root.icmp_InCsumErrors); + + arl_icmpmsg = arl_create("snmp/Icmpmsg", arl_callback_str2kernel_uint_t, 60); + arl_expect(arl_icmpmsg, "InType0", &snmp_root.icmpmsg_InEchoReps); + arl_expect(arl_icmpmsg, "OutType0", &snmp_root.icmpmsg_OutEchoReps); + arl_expect(arl_icmpmsg, "InType3", &snmp_root.icmpmsg_InDestUnreachs); + arl_expect(arl_icmpmsg, "OutType3", &snmp_root.icmpmsg_OutDestUnreachs); + arl_expect(arl_icmpmsg, "InType5", &snmp_root.icmpmsg_InRedirects); + arl_expect(arl_icmpmsg, "OutType5", &snmp_root.icmpmsg_OutRedirects); + arl_expect(arl_icmpmsg, "InType8", &snmp_root.icmpmsg_InEchos); + arl_expect(arl_icmpmsg, "OutType8", &snmp_root.icmpmsg_OutEchos); + arl_expect(arl_icmpmsg, "InType9", &snmp_root.icmpmsg_InRouterAdvert); + arl_expect(arl_icmpmsg, "OutType9", &snmp_root.icmpmsg_OutRouterAdvert); + arl_expect(arl_icmpmsg, "InType10", &snmp_root.icmpmsg_InRouterSelect); + arl_expect(arl_icmpmsg, "OutType10", &snmp_root.icmpmsg_OutRouterSelect); + arl_expect(arl_icmpmsg, "InType11", &snmp_root.icmpmsg_InTimeExcds); + arl_expect(arl_icmpmsg, "OutType11", &snmp_root.icmpmsg_OutTimeExcds); + arl_expect(arl_icmpmsg, "InType12", &snmp_root.icmpmsg_InParmProbs); + arl_expect(arl_icmpmsg, "OutType12", &snmp_root.icmpmsg_OutParmProbs); + arl_expect(arl_icmpmsg, "InType13", &snmp_root.icmpmsg_InTimestamps); + arl_expect(arl_icmpmsg, "OutType13", &snmp_root.icmpmsg_OutTimestamps); + arl_expect(arl_icmpmsg, "InType14", &snmp_root.icmpmsg_InTimestampReps); + arl_expect(arl_icmpmsg, "OutType14", &snmp_root.icmpmsg_OutTimestampReps); + + arl_tcp = arl_create("snmp/Tcp", arl_callback_str2kernel_uint_t, 60); + // arl_expect(arl_tcp, "RtoAlgorithm", &snmp_root.tcp_RtoAlgorithm); + // arl_expect(arl_tcp, "RtoMin", &snmp_root.tcp_RtoMin); + // arl_expect(arl_tcp, "RtoMax", &snmp_root.tcp_RtoMax); + arl_expect_custom(arl_tcp, "MaxConn", arl_callback_ssize_t, &snmp_root.tcp_MaxConn); + arl_expect(arl_tcp, "ActiveOpens", &snmp_root.tcp_ActiveOpens); + arl_expect(arl_tcp, "PassiveOpens", &snmp_root.tcp_PassiveOpens); + arl_expect(arl_tcp, "AttemptFails", &snmp_root.tcp_AttemptFails); + arl_expect(arl_tcp, "EstabResets", &snmp_root.tcp_EstabResets); + arl_expect(arl_tcp, "CurrEstab", &snmp_root.tcp_CurrEstab); + arl_expect(arl_tcp, "InSegs", &snmp_root.tcp_InSegs); + arl_expect(arl_tcp, "OutSegs", &snmp_root.tcp_OutSegs); + arl_expect(arl_tcp, "RetransSegs", &snmp_root.tcp_RetransSegs); + arl_expect(arl_tcp, "InErrs", &snmp_root.tcp_InErrs); + arl_expect(arl_tcp, "OutRsts", &snmp_root.tcp_OutRsts); + arl_expect(arl_tcp, "InCsumErrors", &snmp_root.tcp_InCsumErrors); + + arl_udp = arl_create("snmp/Udp", arl_callback_str2kernel_uint_t, 60); + arl_expect(arl_udp, "InDatagrams", &snmp_root.udp_InDatagrams); + arl_expect(arl_udp, "NoPorts", &snmp_root.udp_NoPorts); + arl_expect(arl_udp, "InErrors", &snmp_root.udp_InErrors); + arl_expect(arl_udp, "OutDatagrams", &snmp_root.udp_OutDatagrams); + arl_expect(arl_udp, "RcvbufErrors", &snmp_root.udp_RcvbufErrors); + arl_expect(arl_udp, "SndbufErrors", &snmp_root.udp_SndbufErrors); + arl_expect(arl_udp, "InCsumErrors", &snmp_root.udp_InCsumErrors); + arl_expect(arl_udp, "IgnoredMulti", &snmp_root.udp_IgnoredMulti); + + arl_udplite = arl_create("snmp/Udplite", arl_callback_str2kernel_uint_t, 60); + arl_expect(arl_udplite, "InDatagrams", &snmp_root.udplite_InDatagrams); + arl_expect(arl_udplite, "NoPorts", &snmp_root.udplite_NoPorts); + arl_expect(arl_udplite, "InErrors", &snmp_root.udplite_InErrors); + arl_expect(arl_udplite, "OutDatagrams", &snmp_root.udplite_OutDatagrams); + arl_expect(arl_udplite, "RcvbufErrors", &snmp_root.udplite_RcvbufErrors); + arl_expect(arl_udplite, "SndbufErrors", &snmp_root.udplite_SndbufErrors); + arl_expect(arl_udplite, "InCsumErrors", &snmp_root.udplite_InCsumErrors); + arl_expect(arl_udplite, "IgnoredMulti", &snmp_root.udplite_IgnoredMulti); + + tcp_max_connections_var = rrdvar_custom_host_variable_create(localhost, "tcp_max_connections"); + } + + if(unlikely(!ff)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/snmp"); + ff = procfile_open(config_get("plugin:proc:/proc/net/snmp", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) return 1; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time + + size_t lines = procfile_lines(ff), l; + size_t words, w; + + for(l = 0; l < lines ;l++) { + char *key = procfile_lineword(ff, l, 0); + uint32_t hash = simple_hash(key); + + if(unlikely(hash == hash_ip && strcmp(key, "Ip") == 0)) { + size_t h = l++; + + if(strcmp(procfile_lineword(ff, l, 0), "Ip") != 0) { + error("Cannot read Ip line from /proc/net/snmp."); + break; + } + + words = procfile_linewords(ff, l); + if(words < 3) { + error("Cannot read /proc/net/snmp Ip line. Expected 3+ params, read %zu.", words); + continue; + } + + arl_begin(arl_ip); + for(w = 1; w < words ; w++) { + if (unlikely(arl_check(arl_ip, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0)) + break; + } + + // -------------------------------------------------------------------- + + if(do_ip_packets == CONFIG_BOOLEAN_YES || (do_ip_packets == CONFIG_BOOLEAN_AUTO && (snmp_root.ip_OutRequests || snmp_root.ip_InReceives || snmp_root.ip_ForwDatagrams || snmp_root.ip_InDelivers))) { + do_ip_packets = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_InReceives = NULL, + *rd_OutRequests = NULL, + *rd_ForwDatagrams = NULL, + *rd_InDelivers = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP + , "packets" + , NULL + , "packets" + , NULL + , "IPv4 Packets" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP_NAME + , NETDATA_CHART_PRIO_IPV4_PACKETS + , update_every + , RRDSET_TYPE_LINE + ); + + rd_InReceives = rrddim_add(st, "InReceives", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutRequests = rrddim_add(st, "OutRequests", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_ForwDatagrams = rrddim_add(st, "ForwDatagrams", "forwarded", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InDelivers = rrddim_add(st, "InDelivers", "delivered", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_OutRequests, (collected_number)snmp_root.ip_OutRequests); + rrddim_set_by_pointer(st, rd_InReceives, (collected_number)snmp_root.ip_InReceives); + rrddim_set_by_pointer(st, rd_ForwDatagrams, (collected_number)snmp_root.ip_ForwDatagrams); + rrddim_set_by_pointer(st, rd_InDelivers, (collected_number)snmp_root.ip_InDelivers); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_ip_fragsout == CONFIG_BOOLEAN_YES || (do_ip_fragsout == CONFIG_BOOLEAN_AUTO && (snmp_root.ip_FragOKs || snmp_root.ip_FragFails || snmp_root.ip_FragCreates))) { + do_ip_fragsout = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_FragOKs = NULL, + *rd_FragFails = NULL, + *rd_FragCreates = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP + , "fragsout" + , NULL + , "fragments" + , NULL + , "IPv4 Fragments Sent" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP_NAME + , NETDATA_CHART_PRIO_IPV4_FRAGMENTS + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_FragOKs = rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_FragFails = rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_FragCreates = rrddim_add(st, "FragCreates", "created", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_FragOKs, (collected_number)snmp_root.ip_FragOKs); + rrddim_set_by_pointer(st, rd_FragFails, (collected_number)snmp_root.ip_FragFails); + rrddim_set_by_pointer(st, rd_FragCreates, (collected_number)snmp_root.ip_FragCreates); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_ip_fragsin == CONFIG_BOOLEAN_YES || (do_ip_fragsin == CONFIG_BOOLEAN_AUTO && (snmp_root.ip_ReasmOKs || snmp_root.ip_ReasmFails || snmp_root.ip_ReasmReqds))) { + do_ip_fragsin = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_ReasmOKs = NULL, + *rd_ReasmFails = NULL, + *rd_ReasmReqds = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP + , "fragsin" + , NULL + , "fragments" + , NULL + , "IPv4 Fragments Reassembly" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP_NAME + , NETDATA_CHART_PRIO_IPV4_FRAGMENTS + 1 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_ReasmOKs = rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_ReasmFails = rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_ReasmReqds = rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_ReasmOKs, (collected_number)snmp_root.ip_ReasmOKs); + rrddim_set_by_pointer(st, rd_ReasmFails, (collected_number)snmp_root.ip_ReasmFails); + rrddim_set_by_pointer(st, rd_ReasmReqds, (collected_number)snmp_root.ip_ReasmReqds); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_ip_errors == CONFIG_BOOLEAN_YES || (do_ip_errors == CONFIG_BOOLEAN_AUTO && (snmp_root.ip_InDiscards || snmp_root.ip_OutDiscards || snmp_root.ip_InHdrErrors || snmp_root.ip_InAddrErrors || snmp_root.ip_InUnknownProtos || snmp_root.ip_OutNoRoutes))) { + do_ip_errors = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_InDiscards = NULL, + *rd_OutDiscards = NULL, + *rd_InHdrErrors = NULL, + *rd_OutNoRoutes = NULL, + *rd_InAddrErrors = NULL, + *rd_InUnknownProtos = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP + , "errors" + , NULL + , "errors" + , NULL + , "IPv4 Errors" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP_NAME + , NETDATA_CHART_PRIO_IPV4_ERRORS + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_InDiscards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutDiscards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + + rd_InHdrErrors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutNoRoutes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + + rd_InAddrErrors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InUnknownProtos = rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_InDiscards, (collected_number)snmp_root.ip_InDiscards); + rrddim_set_by_pointer(st, rd_OutDiscards, (collected_number)snmp_root.ip_OutDiscards); + rrddim_set_by_pointer(st, rd_InHdrErrors, (collected_number)snmp_root.ip_InHdrErrors); + rrddim_set_by_pointer(st, rd_InAddrErrors, (collected_number)snmp_root.ip_InAddrErrors); + rrddim_set_by_pointer(st, rd_InUnknownProtos, (collected_number)snmp_root.ip_InUnknownProtos); + rrddim_set_by_pointer(st, rd_OutNoRoutes, (collected_number)snmp_root.ip_OutNoRoutes); + rrdset_done(st); + } + } + else if(unlikely(hash == hash_icmp && strcmp(key, "Icmp") == 0)) { + size_t h = l++; + + if(strcmp(procfile_lineword(ff, l, 0), "Icmp") != 0) { + error("Cannot read Icmp line from /proc/net/snmp."); + break; + } + + words = procfile_linewords(ff, l); + if(words < 3) { + error("Cannot read /proc/net/snmp Icmp line. Expected 3+ params, read %zu.", words); + continue; + } + + arl_begin(arl_icmp); + for(w = 1; w < words ; w++) { + if (unlikely(arl_check(arl_icmp, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0)) + break; + } + + // -------------------------------------------------------------------- + + if(do_icmp_packets == CONFIG_BOOLEAN_YES || (do_icmp_packets == CONFIG_BOOLEAN_AUTO && (snmp_root.icmp_InMsgs || snmp_root.icmp_OutMsgs || snmp_root.icmp_InErrors || snmp_root.icmp_OutErrors || snmp_root.icmp_InCsumErrors))) { + do_icmp_packets = CONFIG_BOOLEAN_YES; + + { + static RRDSET *st_packets = NULL; + static RRDDIM *rd_InMsgs = NULL, + *rd_OutMsgs = NULL; + + if(unlikely(!st_packets)) { + st_packets = rrdset_create_localhost( + RRD_TYPE_NET_SNMP + , "icmp" + , NULL + , "icmp" + , NULL + , "IPv4 ICMP Packets" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP_NAME + , NETDATA_CHART_PRIO_IPV4_ICMP + , update_every + , RRDSET_TYPE_LINE + ); + + rd_InMsgs = rrddim_add(st_packets, "InMsgs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutMsgs = rrddim_add(st_packets, "OutMsgs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st_packets); + + rrddim_set_by_pointer(st_packets, rd_InMsgs, (collected_number)snmp_root.icmp_InMsgs); + rrddim_set_by_pointer(st_packets, rd_OutMsgs, (collected_number)snmp_root.icmp_OutMsgs); + + rrdset_done(st_packets); + } + + { + static RRDSET *st_errors = NULL; + static RRDDIM *rd_InErrors = NULL, + *rd_OutErrors = NULL, + *rd_InCsumErrors = NULL; + + if(unlikely(!st_errors)) { + st_errors = rrdset_create_localhost( + RRD_TYPE_NET_SNMP + , "icmp_errors" + , NULL + , "icmp" + , NULL + , "IPv4 ICMP Errors" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP_NAME + , NETDATA_CHART_PRIO_IPV4_ICMP + 1 + , update_every + , RRDSET_TYPE_LINE + ); + + rd_InErrors = rrddim_add(st_errors, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutErrors = rrddim_add(st_errors, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InCsumErrors = rrddim_add(st_errors, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st_errors); + + rrddim_set_by_pointer(st_errors, rd_InErrors, (collected_number)snmp_root.icmp_InErrors); + rrddim_set_by_pointer(st_errors, rd_OutErrors, (collected_number)snmp_root.icmp_OutErrors); + rrddim_set_by_pointer(st_errors, rd_InCsumErrors, (collected_number)snmp_root.icmp_InCsumErrors); + + rrdset_done(st_errors); + } + } + } + else if(unlikely(hash == hash_icmpmsg && strcmp(key, "IcmpMsg") == 0)) { + size_t h = l++; + + if(strcmp(procfile_lineword(ff, l, 0), "IcmpMsg") != 0) { + error("Cannot read IcmpMsg line from /proc/net/snmp."); + break; + } + + words = procfile_linewords(ff, l); + if(words < 2) { + error("Cannot read /proc/net/snmp IcmpMsg line. Expected 2+ params, read %zu.", words); + continue; + } + + arl_begin(arl_icmpmsg); + for(w = 1; w < words ; w++) { + if (unlikely(arl_check(arl_icmpmsg, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0)) + break; + } + + // -------------------------------------------------------------------- + + if(do_icmpmsg == CONFIG_BOOLEAN_YES || (do_icmpmsg == CONFIG_BOOLEAN_AUTO && ( + snmp_root.icmpmsg_InEchoReps + || snmp_root.icmpmsg_OutEchoReps + || snmp_root.icmpmsg_InDestUnreachs + || snmp_root.icmpmsg_OutDestUnreachs + || snmp_root.icmpmsg_InRedirects + || snmp_root.icmpmsg_OutRedirects + || snmp_root.icmpmsg_InEchos + || snmp_root.icmpmsg_OutEchos + || snmp_root.icmpmsg_InRouterAdvert + || snmp_root.icmpmsg_OutRouterAdvert + || snmp_root.icmpmsg_InRouterSelect + || snmp_root.icmpmsg_OutRouterSelect + || snmp_root.icmpmsg_InTimeExcds + || snmp_root.icmpmsg_OutTimeExcds + || snmp_root.icmpmsg_InParmProbs + || snmp_root.icmpmsg_OutParmProbs + || snmp_root.icmpmsg_InTimestamps + || snmp_root.icmpmsg_OutTimestamps + || snmp_root.icmpmsg_InTimestampReps + || snmp_root.icmpmsg_OutTimestampReps + ))) { + do_icmpmsg = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_InEchoReps = NULL, + *rd_OutEchoReps = NULL, + *rd_InDestUnreachs = NULL, + *rd_OutDestUnreachs = NULL, + *rd_InRedirects = NULL, + *rd_OutRedirects = NULL, + *rd_InEchos = NULL, + *rd_OutEchos = NULL, + *rd_InRouterAdvert = NULL, + *rd_OutRouterAdvert = NULL, + *rd_InRouterSelect = NULL, + *rd_OutRouterSelect = NULL, + *rd_InTimeExcds = NULL, + *rd_OutTimeExcds = NULL, + *rd_InParmProbs = NULL, + *rd_OutParmProbs = NULL, + *rd_InTimestamps = NULL, + *rd_OutTimestamps = NULL, + *rd_InTimestampReps = NULL, + *rd_OutTimestampReps = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP + , "icmpmsg" + , NULL + , "icmp" + , NULL + , "IPv4 ICMP Messages" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP_NAME + , NETDATA_CHART_PRIO_IPV4_ICMP + 2 + , update_every + , RRDSET_TYPE_LINE + ); + + rd_InEchoReps = rrddim_add(st, "InType0", "InEchoReps", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutEchoReps = rrddim_add(st, "OutType0", "OutEchoReps", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InDestUnreachs = rrddim_add(st, "InType3", "InDestUnreachs", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutDestUnreachs = rrddim_add(st, "OutType3", "OutDestUnreachs", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InRedirects = rrddim_add(st, "InType5", "InRedirects", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutRedirects = rrddim_add(st, "OutType5", "OutRedirects", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InEchos = rrddim_add(st, "InType8", "InEchos", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutEchos = rrddim_add(st, "OutType8", "OutEchos", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InRouterAdvert = rrddim_add(st, "InType9", "InRouterAdvert", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutRouterAdvert = rrddim_add(st, "OutType9", "OutRouterAdvert", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InRouterSelect = rrddim_add(st, "InType10", "InRouterSelect", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutRouterSelect = rrddim_add(st, "OutType10", "OutRouterSelect", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InTimeExcds = rrddim_add(st, "InType11", "InTimeExcds", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutTimeExcds = rrddim_add(st, "OutType11", "OutTimeExcds", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InParmProbs = rrddim_add(st, "InType12", "InParmProbs", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutParmProbs = rrddim_add(st, "OutType12", "OutParmProbs", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InTimestamps = rrddim_add(st, "InType13", "InTimestamps", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutTimestamps = rrddim_add(st, "OutType13", "OutTimestamps", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InTimestampReps = rrddim_add(st, "InType14", "InTimestampReps", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutTimestampReps = rrddim_add(st, "OutType14", "OutTimestampReps", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_InEchoReps, (collected_number)snmp_root.icmpmsg_InEchoReps); + rrddim_set_by_pointer(st, rd_OutEchoReps, (collected_number)snmp_root.icmpmsg_OutEchoReps); + rrddim_set_by_pointer(st, rd_InDestUnreachs, (collected_number)snmp_root.icmpmsg_InDestUnreachs); + rrddim_set_by_pointer(st, rd_OutDestUnreachs, (collected_number)snmp_root.icmpmsg_OutDestUnreachs); + rrddim_set_by_pointer(st, rd_InRedirects, (collected_number)snmp_root.icmpmsg_InRedirects); + rrddim_set_by_pointer(st, rd_OutRedirects, (collected_number)snmp_root.icmpmsg_OutRedirects); + rrddim_set_by_pointer(st, rd_InEchos, (collected_number)snmp_root.icmpmsg_InEchos); + rrddim_set_by_pointer(st, rd_OutEchos, (collected_number)snmp_root.icmpmsg_OutEchos); + rrddim_set_by_pointer(st, rd_InRouterAdvert, (collected_number)snmp_root.icmpmsg_InRouterAdvert); + rrddim_set_by_pointer(st, rd_OutRouterAdvert, (collected_number)snmp_root.icmpmsg_OutRouterAdvert); + rrddim_set_by_pointer(st, rd_InRouterSelect, (collected_number)snmp_root.icmpmsg_InRouterSelect); + rrddim_set_by_pointer(st, rd_OutRouterSelect, (collected_number)snmp_root.icmpmsg_OutRouterSelect); + rrddim_set_by_pointer(st, rd_InTimeExcds, (collected_number)snmp_root.icmpmsg_InTimeExcds); + rrddim_set_by_pointer(st, rd_OutTimeExcds, (collected_number)snmp_root.icmpmsg_OutTimeExcds); + rrddim_set_by_pointer(st, rd_InParmProbs, (collected_number)snmp_root.icmpmsg_InParmProbs); + rrddim_set_by_pointer(st, rd_OutParmProbs, (collected_number)snmp_root.icmpmsg_OutParmProbs); + rrddim_set_by_pointer(st, rd_InTimestamps, (collected_number)snmp_root.icmpmsg_InTimestamps); + rrddim_set_by_pointer(st, rd_OutTimestamps, (collected_number)snmp_root.icmpmsg_OutTimestamps); + rrddim_set_by_pointer(st, rd_InTimestampReps, (collected_number)snmp_root.icmpmsg_InTimestampReps); + rrddim_set_by_pointer(st, rd_OutTimestampReps, (collected_number)snmp_root.icmpmsg_OutTimestampReps); + + rrdset_done(st); + } + } + else if(unlikely(hash == hash_tcp && strcmp(key, "Tcp") == 0)) { + size_t h = l++; + + if(strcmp(procfile_lineword(ff, l, 0), "Tcp") != 0) { + error("Cannot read Tcp line from /proc/net/snmp."); + break; + } + + words = procfile_linewords(ff, l); + if(words < 3) { + error("Cannot read /proc/net/snmp Tcp line. Expected 3+ params, read %zu.", words); + continue; + } + + arl_begin(arl_tcp); + for(w = 1; w < words ; w++) { + if (unlikely(arl_check(arl_tcp, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0)) + break; + } + + // -------------------------------------------------------------------- + + // this is smart enough to update it, only when it is changed + rrdvar_custom_host_variable_set(localhost, tcp_max_connections_var, snmp_root.tcp_MaxConn); + + // -------------------------------------------------------------------- + + // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html + if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO && snmp_root.tcp_CurrEstab)) { + do_tcp_sockets = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_CurrEstab = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP + , "tcpsock" + , NULL + , "tcp" + , NULL + , "IPv4 TCP Connections" + , "active connections" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP_NAME + , NETDATA_CHART_PRIO_IPV4_TCP + , update_every + , RRDSET_TYPE_LINE + ); + + rd_CurrEstab = rrddim_add(st, "CurrEstab", "connections", 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_CurrEstab, (collected_number)snmp_root.tcp_CurrEstab); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_tcp_packets == CONFIG_BOOLEAN_YES || (do_tcp_packets == CONFIG_BOOLEAN_AUTO && (snmp_root.tcp_InSegs || snmp_root.tcp_OutSegs))) { + do_tcp_packets = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_InSegs = NULL, + *rd_OutSegs = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP + , "tcppackets" + , NULL + , "tcp" + , NULL + , "IPv4 TCP Packets" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP_NAME + , NETDATA_CHART_PRIO_IPV4_TCP + 4 + , update_every + , RRDSET_TYPE_LINE + ); + + rd_InSegs = rrddim_add(st, "InSegs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutSegs = rrddim_add(st, "OutSegs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_InSegs, (collected_number)snmp_root.tcp_InSegs); + rrddim_set_by_pointer(st, rd_OutSegs, (collected_number)snmp_root.tcp_OutSegs); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_tcp_errors == CONFIG_BOOLEAN_YES || (do_tcp_errors == CONFIG_BOOLEAN_AUTO && (snmp_root.tcp_InErrs || snmp_root.tcp_InCsumErrors || snmp_root.tcp_RetransSegs))) { + do_tcp_errors = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_InErrs = NULL, + *rd_InCsumErrors = NULL, + *rd_RetransSegs = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP + , "tcperrors" + , NULL + , "tcp" + , NULL + , "IPv4 TCP Errors" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP_NAME + , NETDATA_CHART_PRIO_IPV4_TCP + 20 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_InErrs = rrddim_add(st, "InErrs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_RetransSegs = rrddim_add(st, "RetransSegs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_InErrs, (collected_number)snmp_root.tcp_InErrs); + rrddim_set_by_pointer(st, rd_InCsumErrors, (collected_number)snmp_root.tcp_InCsumErrors); + rrddim_set_by_pointer(st, rd_RetransSegs, (collected_number)snmp_root.tcp_RetransSegs); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_tcp_opens == CONFIG_BOOLEAN_YES || (do_tcp_opens == CONFIG_BOOLEAN_AUTO && (snmp_root.tcp_ActiveOpens || snmp_root.tcp_PassiveOpens))) { + do_tcp_opens = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_ActiveOpens = NULL, + *rd_PassiveOpens = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP + , "tcpopens" + , NULL + , "tcp" + , NULL + , "IPv4 TCP Opens" + , "connections/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP_NAME + , NETDATA_CHART_PRIO_IPV4_TCP + 5 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_ActiveOpens = rrddim_add(st, "ActiveOpens", "active", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_PassiveOpens = rrddim_add(st, "PassiveOpens", "passive", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_ActiveOpens, (collected_number)snmp_root.tcp_ActiveOpens); + rrddim_set_by_pointer(st, rd_PassiveOpens, (collected_number)snmp_root.tcp_PassiveOpens); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_tcp_handshake == CONFIG_BOOLEAN_YES || (do_tcp_handshake == CONFIG_BOOLEAN_AUTO && (snmp_root.tcp_EstabResets || snmp_root.tcp_OutRsts || snmp_root.tcp_AttemptFails))) { + do_tcp_handshake = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_EstabResets = NULL, + *rd_OutRsts = NULL, + *rd_AttemptFails = NULL, + *rd_TCPSynRetrans = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP + , "tcphandshake" + , NULL + , "tcp" + , NULL + , "IPv4 TCP Handshake Issues" + , "events/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP_NAME + , NETDATA_CHART_PRIO_IPV4_TCP + 30 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_EstabResets = rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutRsts = rrddim_add(st, "OutRsts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_AttemptFails = rrddim_add(st, "AttemptFails", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_TCPSynRetrans = rrddim_add(st, "TCPSynRetrans", "SynRetrans", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_EstabResets, (collected_number)snmp_root.tcp_EstabResets); + rrddim_set_by_pointer(st, rd_OutRsts, (collected_number)snmp_root.tcp_OutRsts); + rrddim_set_by_pointer(st, rd_AttemptFails, (collected_number)snmp_root.tcp_AttemptFails); + rrddim_set_by_pointer(st, rd_TCPSynRetrans, tcpext_TCPSynRetrans); + rrdset_done(st); + } + } + else if(unlikely(hash == hash_udp && strcmp(key, "Udp") == 0)) { + size_t h = l++; + + if(strcmp(procfile_lineword(ff, l, 0), "Udp") != 0) { + error("Cannot read Udp line from /proc/net/snmp."); + break; + } + + words = procfile_linewords(ff, l); + if(words < 3) { + error("Cannot read /proc/net/snmp Udp line. Expected 3+ params, read %zu.", words); + continue; + } + + arl_begin(arl_udp); + for(w = 1; w < words ; w++) { + if (unlikely(arl_check(arl_udp, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0)) + break; + } + + // -------------------------------------------------------------------- + + // see http://net-snmp.sourceforge.net/docs/mibs/udp.html + if(do_udp_packets == CONFIG_BOOLEAN_YES || (do_udp_packets == CONFIG_BOOLEAN_AUTO && (snmp_root.udp_InDatagrams || snmp_root.udp_OutDatagrams))) { + do_udp_packets = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_InDatagrams = NULL, + *rd_OutDatagrams = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP + , "udppackets" + , NULL + , "udp" + , NULL + , "IPv4 UDP Packets" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP_NAME + , NETDATA_CHART_PRIO_IPV4_UDP + , update_every + , RRDSET_TYPE_LINE + ); + + rd_InDatagrams = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutDatagrams = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_InDatagrams, (collected_number)snmp_root.udp_InDatagrams); + rrddim_set_by_pointer(st, rd_OutDatagrams, (collected_number)snmp_root.udp_OutDatagrams); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_udp_errors == CONFIG_BOOLEAN_YES || (do_udp_errors == CONFIG_BOOLEAN_AUTO && ( + snmp_root.udp_InErrors + || snmp_root.udp_NoPorts + || snmp_root.udp_RcvbufErrors + || snmp_root.udp_SndbufErrors + || snmp_root.udp_InCsumErrors + || snmp_root.udp_IgnoredMulti + ))) { + do_udp_errors = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_RcvbufErrors = NULL, + *rd_SndbufErrors = NULL, + *rd_InErrors = NULL, + *rd_NoPorts = NULL, + *rd_InCsumErrors = NULL, + *rd_IgnoredMulti = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP + , "udperrors" + , NULL + , "udp" + , NULL + , "IPv4 UDP Errors" + , "events/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP_NAME + , NETDATA_CHART_PRIO_IPV4_UDP + 10 + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_NoPorts = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_IgnoredMulti = rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_InErrors, (collected_number)snmp_root.udp_InErrors); + rrddim_set_by_pointer(st, rd_NoPorts, (collected_number)snmp_root.udp_NoPorts); + rrddim_set_by_pointer(st, rd_RcvbufErrors, (collected_number)snmp_root.udp_RcvbufErrors); + rrddim_set_by_pointer(st, rd_SndbufErrors, (collected_number)snmp_root.udp_SndbufErrors); + rrddim_set_by_pointer(st, rd_InCsumErrors, (collected_number)snmp_root.udp_InCsumErrors); + rrddim_set_by_pointer(st, rd_IgnoredMulti, (collected_number)snmp_root.udp_IgnoredMulti); + rrdset_done(st); + } + } + else if(unlikely(hash == hash_udplite && strcmp(key, "UdpLite") == 0)) { + size_t h = l++; + + if(strcmp(procfile_lineword(ff, l, 0), "UdpLite") != 0) { + error("Cannot read UdpLite line from /proc/net/snmp."); + break; + } + + words = procfile_linewords(ff, l); + if(words < 3) { + error("Cannot read /proc/net/snmp UdpLite line. Expected 3+ params, read %zu.", words); + continue; + } + + arl_begin(arl_udplite); + for(w = 1; w < words ; w++) { + if (unlikely(arl_check(arl_udplite, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0)) + break; + } + + // -------------------------------------------------------------------- + + if(do_udplite_packets == CONFIG_BOOLEAN_YES || (do_udplite_packets == CONFIG_BOOLEAN_AUTO && ( + snmp_root.udplite_InDatagrams + || snmp_root.udplite_OutDatagrams + || snmp_root.udplite_NoPorts + || snmp_root.udplite_InErrors + || snmp_root.udplite_InCsumErrors + || snmp_root.udplite_RcvbufErrors + || snmp_root.udplite_SndbufErrors + || snmp_root.udplite_IgnoredMulti + ))) { + do_udplite_packets = CONFIG_BOOLEAN_YES; + + { + static RRDSET *st = NULL; + static RRDDIM *rd_InDatagrams = NULL, + *rd_OutDatagrams = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP + , "udplite" + , NULL + , "udplite" + , NULL + , "IPv4 UDPLite Packets" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP_NAME + , NETDATA_CHART_PRIO_IPV4_UDPLITE + , update_every + , RRDSET_TYPE_LINE + ); + + rd_InDatagrams = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutDatagrams = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_InDatagrams, (collected_number)snmp_root.udplite_InDatagrams); + rrddim_set_by_pointer(st, rd_OutDatagrams, (collected_number)snmp_root.udplite_OutDatagrams); + rrdset_done(st); + } + + { + static RRDSET *st = NULL; + static RRDDIM *rd_RcvbufErrors = NULL, + *rd_SndbufErrors = NULL, + *rd_InErrors = NULL, + *rd_NoPorts = NULL, + *rd_InCsumErrors = NULL, + *rd_IgnoredMulti = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP + , "udplite_errors" + , NULL + , "udplite" + , NULL + , "IPv4 UDPLite Errors" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP_NAME + , NETDATA_CHART_PRIO_IPV4_UDPLITE + 10 + , update_every + , RRDSET_TYPE_LINE); + + rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_NoPorts = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_IgnoredMulti = rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_NoPorts, (collected_number)snmp_root.udplite_NoPorts); + rrddim_set_by_pointer(st, rd_InErrors, (collected_number)snmp_root.udplite_InErrors); + rrddim_set_by_pointer(st, rd_InCsumErrors, (collected_number)snmp_root.udplite_InCsumErrors); + rrddim_set_by_pointer(st, rd_RcvbufErrors, (collected_number)snmp_root.udplite_RcvbufErrors); + rrddim_set_by_pointer(st, rd_SndbufErrors, (collected_number)snmp_root.udplite_SndbufErrors); + rrddim_set_by_pointer(st, rd_IgnoredMulti, (collected_number)snmp_root.udplite_IgnoredMulti); + rrdset_done(st); + } + } + } + } + + return 0; +} + diff --git a/collectors/proc.plugin/proc_net_snmp6.c b/collectors/proc.plugin/proc_net_snmp6.c new file mode 100644 index 000000000..f0084aa26 --- /dev/null +++ b/collectors/proc.plugin/proc_net_snmp6.c @@ -0,0 +1,1268 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define RRD_TYPE_NET_SNMP6 "ipv6" +#define PLUGIN_PROC_MODULE_NET_SNMP6_NAME "/proc/net/snmp6" + +int do_proc_net_snmp6(int update_every, usec_t dt) { + (void)dt; + + static procfile *ff = NULL; + + static int do_ip_packets = -1, + do_ip_fragsout = -1, + do_ip_fragsin = -1, + do_ip_errors = -1, + do_udplite_packets = -1, + do_udplite_errors = -1, + do_udp_packets = -1, + do_udp_errors = -1, + do_bandwidth = -1, + do_mcast = -1, + do_bcast = -1, + do_mcast_p = -1, + do_icmp = -1, + do_icmp_redir = -1, + do_icmp_errors = -1, + do_icmp_echos = -1, + do_icmp_groupmemb = -1, + do_icmp_router = -1, + do_icmp_neighbor = -1, + do_icmp_mldv2 = -1, + do_icmp_types = -1, + do_ect = -1; + + static ARL_BASE *arl_base = NULL; + + static unsigned long long Ip6InReceives = 0ULL; + static unsigned long long Ip6InHdrErrors = 0ULL; + static unsigned long long Ip6InTooBigErrors = 0ULL; + static unsigned long long Ip6InNoRoutes = 0ULL; + static unsigned long long Ip6InAddrErrors = 0ULL; + static unsigned long long Ip6InUnknownProtos = 0ULL; + static unsigned long long Ip6InTruncatedPkts = 0ULL; + static unsigned long long Ip6InDiscards = 0ULL; + static unsigned long long Ip6InDelivers = 0ULL; + static unsigned long long Ip6OutForwDatagrams = 0ULL; + static unsigned long long Ip6OutRequests = 0ULL; + static unsigned long long Ip6OutDiscards = 0ULL; + static unsigned long long Ip6OutNoRoutes = 0ULL; + static unsigned long long Ip6ReasmTimeout = 0ULL; + static unsigned long long Ip6ReasmReqds = 0ULL; + static unsigned long long Ip6ReasmOKs = 0ULL; + static unsigned long long Ip6ReasmFails = 0ULL; + static unsigned long long Ip6FragOKs = 0ULL; + static unsigned long long Ip6FragFails = 0ULL; + static unsigned long long Ip6FragCreates = 0ULL; + static unsigned long long Ip6InMcastPkts = 0ULL; + static unsigned long long Ip6OutMcastPkts = 0ULL; + static unsigned long long Ip6InOctets = 0ULL; + static unsigned long long Ip6OutOctets = 0ULL; + static unsigned long long Ip6InMcastOctets = 0ULL; + static unsigned long long Ip6OutMcastOctets = 0ULL; + static unsigned long long Ip6InBcastOctets = 0ULL; + static unsigned long long Ip6OutBcastOctets = 0ULL; + static unsigned long long Ip6InNoECTPkts = 0ULL; + static unsigned long long Ip6InECT1Pkts = 0ULL; + static unsigned long long Ip6InECT0Pkts = 0ULL; + static unsigned long long Ip6InCEPkts = 0ULL; + static unsigned long long Icmp6InMsgs = 0ULL; + static unsigned long long Icmp6InErrors = 0ULL; + static unsigned long long Icmp6OutMsgs = 0ULL; + static unsigned long long Icmp6OutErrors = 0ULL; + static unsigned long long Icmp6InCsumErrors = 0ULL; + static unsigned long long Icmp6InDestUnreachs = 0ULL; + static unsigned long long Icmp6InPktTooBigs = 0ULL; + static unsigned long long Icmp6InTimeExcds = 0ULL; + static unsigned long long Icmp6InParmProblems = 0ULL; + static unsigned long long Icmp6InEchos = 0ULL; + static unsigned long long Icmp6InEchoReplies = 0ULL; + static unsigned long long Icmp6InGroupMembQueries = 0ULL; + static unsigned long long Icmp6InGroupMembResponses = 0ULL; + static unsigned long long Icmp6InGroupMembReductions = 0ULL; + static unsigned long long Icmp6InRouterSolicits = 0ULL; + static unsigned long long Icmp6InRouterAdvertisements = 0ULL; + static unsigned long long Icmp6InNeighborSolicits = 0ULL; + static unsigned long long Icmp6InNeighborAdvertisements = 0ULL; + static unsigned long long Icmp6InRedirects = 0ULL; + static unsigned long long Icmp6InMLDv2Reports = 0ULL; + static unsigned long long Icmp6OutDestUnreachs = 0ULL; + static unsigned long long Icmp6OutPktTooBigs = 0ULL; + static unsigned long long Icmp6OutTimeExcds = 0ULL; + static unsigned long long Icmp6OutParmProblems = 0ULL; + static unsigned long long Icmp6OutEchos = 0ULL; + static unsigned long long Icmp6OutEchoReplies = 0ULL; + static unsigned long long Icmp6OutGroupMembQueries = 0ULL; + static unsigned long long Icmp6OutGroupMembResponses = 0ULL; + static unsigned long long Icmp6OutGroupMembReductions = 0ULL; + static unsigned long long Icmp6OutRouterSolicits = 0ULL; + static unsigned long long Icmp6OutRouterAdvertisements = 0ULL; + static unsigned long long Icmp6OutNeighborSolicits = 0ULL; + static unsigned long long Icmp6OutNeighborAdvertisements = 0ULL; + static unsigned long long Icmp6OutRedirects = 0ULL; + static unsigned long long Icmp6OutMLDv2Reports = 0ULL; + static unsigned long long Icmp6InType1 = 0ULL; + static unsigned long long Icmp6InType128 = 0ULL; + static unsigned long long Icmp6InType129 = 0ULL; + static unsigned long long Icmp6InType136 = 0ULL; + static unsigned long long Icmp6OutType1 = 0ULL; + static unsigned long long Icmp6OutType128 = 0ULL; + static unsigned long long Icmp6OutType129 = 0ULL; + static unsigned long long Icmp6OutType133 = 0ULL; + static unsigned long long Icmp6OutType135 = 0ULL; + static unsigned long long Icmp6OutType143 = 0ULL; + static unsigned long long Udp6InDatagrams = 0ULL; + static unsigned long long Udp6NoPorts = 0ULL; + static unsigned long long Udp6InErrors = 0ULL; + static unsigned long long Udp6OutDatagrams = 0ULL; + static unsigned long long Udp6RcvbufErrors = 0ULL; + static unsigned long long Udp6SndbufErrors = 0ULL; + static unsigned long long Udp6InCsumErrors = 0ULL; + static unsigned long long Udp6IgnoredMulti = 0ULL; + static unsigned long long UdpLite6InDatagrams = 0ULL; + static unsigned long long UdpLite6NoPorts = 0ULL; + static unsigned long long UdpLite6InErrors = 0ULL; + static unsigned long long UdpLite6OutDatagrams = 0ULL; + static unsigned long long UdpLite6RcvbufErrors = 0ULL; + static unsigned long long UdpLite6SndbufErrors = 0ULL; + static unsigned long long UdpLite6InCsumErrors = 0ULL; + + if(unlikely(!arl_base)) { + do_ip_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 packets", CONFIG_BOOLEAN_AUTO); + do_ip_fragsout = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 fragments sent", CONFIG_BOOLEAN_AUTO); + do_ip_fragsin = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 fragments assembly", CONFIG_BOOLEAN_AUTO); + do_ip_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 errors", CONFIG_BOOLEAN_AUTO); + do_udp_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 UDP packets", CONFIG_BOOLEAN_AUTO); + do_udp_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 UDP errors", CONFIG_BOOLEAN_AUTO); + do_udplite_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 UDPlite packets", CONFIG_BOOLEAN_AUTO); + do_udplite_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 UDPlite errors", CONFIG_BOOLEAN_AUTO); + do_bandwidth = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "bandwidth", CONFIG_BOOLEAN_AUTO); + do_mcast = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "multicast bandwidth", CONFIG_BOOLEAN_AUTO); + do_bcast = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "broadcast bandwidth", CONFIG_BOOLEAN_AUTO); + do_mcast_p = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "multicast packets", CONFIG_BOOLEAN_AUTO); + do_icmp = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp", CONFIG_BOOLEAN_AUTO); + do_icmp_redir = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp redirects", CONFIG_BOOLEAN_AUTO); + do_icmp_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp errors", CONFIG_BOOLEAN_AUTO); + do_icmp_echos = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp echos", CONFIG_BOOLEAN_AUTO); + do_icmp_groupmemb = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp group membership", CONFIG_BOOLEAN_AUTO); + do_icmp_router = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp router", CONFIG_BOOLEAN_AUTO); + do_icmp_neighbor = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp neighbor", CONFIG_BOOLEAN_AUTO); + do_icmp_mldv2 = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp mldv2", CONFIG_BOOLEAN_AUTO); + do_icmp_types = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp types", CONFIG_BOOLEAN_AUTO); + do_ect = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ect", CONFIG_BOOLEAN_AUTO); + + arl_base = arl_create("snmp6", NULL, 60); + arl_expect(arl_base, "Ip6InReceives", &Ip6InReceives); + arl_expect(arl_base, "Ip6InHdrErrors", &Ip6InHdrErrors); + arl_expect(arl_base, "Ip6InTooBigErrors", &Ip6InTooBigErrors); + arl_expect(arl_base, "Ip6InNoRoutes", &Ip6InNoRoutes); + arl_expect(arl_base, "Ip6InAddrErrors", &Ip6InAddrErrors); + arl_expect(arl_base, "Ip6InUnknownProtos", &Ip6InUnknownProtos); + arl_expect(arl_base, "Ip6InTruncatedPkts", &Ip6InTruncatedPkts); + arl_expect(arl_base, "Ip6InDiscards", &Ip6InDiscards); + arl_expect(arl_base, "Ip6InDelivers", &Ip6InDelivers); + arl_expect(arl_base, "Ip6OutForwDatagrams", &Ip6OutForwDatagrams); + arl_expect(arl_base, "Ip6OutRequests", &Ip6OutRequests); + arl_expect(arl_base, "Ip6OutDiscards", &Ip6OutDiscards); + arl_expect(arl_base, "Ip6OutNoRoutes", &Ip6OutNoRoutes); + arl_expect(arl_base, "Ip6ReasmTimeout", &Ip6ReasmTimeout); + arl_expect(arl_base, "Ip6ReasmReqds", &Ip6ReasmReqds); + arl_expect(arl_base, "Ip6ReasmOKs", &Ip6ReasmOKs); + arl_expect(arl_base, "Ip6ReasmFails", &Ip6ReasmFails); + arl_expect(arl_base, "Ip6FragOKs", &Ip6FragOKs); + arl_expect(arl_base, "Ip6FragFails", &Ip6FragFails); + arl_expect(arl_base, "Ip6FragCreates", &Ip6FragCreates); + arl_expect(arl_base, "Ip6InMcastPkts", &Ip6InMcastPkts); + arl_expect(arl_base, "Ip6OutMcastPkts", &Ip6OutMcastPkts); + arl_expect(arl_base, "Ip6InOctets", &Ip6InOctets); + arl_expect(arl_base, "Ip6OutOctets", &Ip6OutOctets); + arl_expect(arl_base, "Ip6InMcastOctets", &Ip6InMcastOctets); + arl_expect(arl_base, "Ip6OutMcastOctets", &Ip6OutMcastOctets); + arl_expect(arl_base, "Ip6InBcastOctets", &Ip6InBcastOctets); + arl_expect(arl_base, "Ip6OutBcastOctets", &Ip6OutBcastOctets); + arl_expect(arl_base, "Ip6InNoECTPkts", &Ip6InNoECTPkts); + arl_expect(arl_base, "Ip6InECT1Pkts", &Ip6InECT1Pkts); + arl_expect(arl_base, "Ip6InECT0Pkts", &Ip6InECT0Pkts); + arl_expect(arl_base, "Ip6InCEPkts", &Ip6InCEPkts); + arl_expect(arl_base, "Icmp6InMsgs", &Icmp6InMsgs); + arl_expect(arl_base, "Icmp6InErrors", &Icmp6InErrors); + arl_expect(arl_base, "Icmp6OutMsgs", &Icmp6OutMsgs); + arl_expect(arl_base, "Icmp6OutErrors", &Icmp6OutErrors); + arl_expect(arl_base, "Icmp6InCsumErrors", &Icmp6InCsumErrors); + arl_expect(arl_base, "Icmp6InDestUnreachs", &Icmp6InDestUnreachs); + arl_expect(arl_base, "Icmp6InPktTooBigs", &Icmp6InPktTooBigs); + arl_expect(arl_base, "Icmp6InTimeExcds", &Icmp6InTimeExcds); + arl_expect(arl_base, "Icmp6InParmProblems", &Icmp6InParmProblems); + arl_expect(arl_base, "Icmp6InEchos", &Icmp6InEchos); + arl_expect(arl_base, "Icmp6InEchoReplies", &Icmp6InEchoReplies); + arl_expect(arl_base, "Icmp6InGroupMembQueries", &Icmp6InGroupMembQueries); + arl_expect(arl_base, "Icmp6InGroupMembResponses", &Icmp6InGroupMembResponses); + arl_expect(arl_base, "Icmp6InGroupMembReductions", &Icmp6InGroupMembReductions); + arl_expect(arl_base, "Icmp6InRouterSolicits", &Icmp6InRouterSolicits); + arl_expect(arl_base, "Icmp6InRouterAdvertisements", &Icmp6InRouterAdvertisements); + arl_expect(arl_base, "Icmp6InNeighborSolicits", &Icmp6InNeighborSolicits); + arl_expect(arl_base, "Icmp6InNeighborAdvertisements", &Icmp6InNeighborAdvertisements); + arl_expect(arl_base, "Icmp6InRedirects", &Icmp6InRedirects); + arl_expect(arl_base, "Icmp6InMLDv2Reports", &Icmp6InMLDv2Reports); + arl_expect(arl_base, "Icmp6OutDestUnreachs", &Icmp6OutDestUnreachs); + arl_expect(arl_base, "Icmp6OutPktTooBigs", &Icmp6OutPktTooBigs); + arl_expect(arl_base, "Icmp6OutTimeExcds", &Icmp6OutTimeExcds); + arl_expect(arl_base, "Icmp6OutParmProblems", &Icmp6OutParmProblems); + arl_expect(arl_base, "Icmp6OutEchos", &Icmp6OutEchos); + arl_expect(arl_base, "Icmp6OutEchoReplies", &Icmp6OutEchoReplies); + arl_expect(arl_base, "Icmp6OutGroupMembQueries", &Icmp6OutGroupMembQueries); + arl_expect(arl_base, "Icmp6OutGroupMembResponses", &Icmp6OutGroupMembResponses); + arl_expect(arl_base, "Icmp6OutGroupMembReductions", &Icmp6OutGroupMembReductions); + arl_expect(arl_base, "Icmp6OutRouterSolicits", &Icmp6OutRouterSolicits); + arl_expect(arl_base, "Icmp6OutRouterAdvertisements", &Icmp6OutRouterAdvertisements); + arl_expect(arl_base, "Icmp6OutNeighborSolicits", &Icmp6OutNeighborSolicits); + arl_expect(arl_base, "Icmp6OutNeighborAdvertisements", &Icmp6OutNeighborAdvertisements); + arl_expect(arl_base, "Icmp6OutRedirects", &Icmp6OutRedirects); + arl_expect(arl_base, "Icmp6OutMLDv2Reports", &Icmp6OutMLDv2Reports); + arl_expect(arl_base, "Icmp6InType1", &Icmp6InType1); + arl_expect(arl_base, "Icmp6InType128", &Icmp6InType128); + arl_expect(arl_base, "Icmp6InType129", &Icmp6InType129); + arl_expect(arl_base, "Icmp6InType136", &Icmp6InType136); + arl_expect(arl_base, "Icmp6OutType1", &Icmp6OutType1); + arl_expect(arl_base, "Icmp6OutType128", &Icmp6OutType128); + arl_expect(arl_base, "Icmp6OutType129", &Icmp6OutType129); + arl_expect(arl_base, "Icmp6OutType133", &Icmp6OutType133); + arl_expect(arl_base, "Icmp6OutType135", &Icmp6OutType135); + arl_expect(arl_base, "Icmp6OutType143", &Icmp6OutType143); + arl_expect(arl_base, "Udp6InDatagrams", &Udp6InDatagrams); + arl_expect(arl_base, "Udp6NoPorts", &Udp6NoPorts); + arl_expect(arl_base, "Udp6InErrors", &Udp6InErrors); + arl_expect(arl_base, "Udp6OutDatagrams", &Udp6OutDatagrams); + arl_expect(arl_base, "Udp6RcvbufErrors", &Udp6RcvbufErrors); + arl_expect(arl_base, "Udp6SndbufErrors", &Udp6SndbufErrors); + arl_expect(arl_base, "Udp6InCsumErrors", &Udp6InCsumErrors); + arl_expect(arl_base, "Udp6IgnoredMulti", &Udp6IgnoredMulti); + arl_expect(arl_base, "UdpLite6InDatagrams", &UdpLite6InDatagrams); + arl_expect(arl_base, "UdpLite6NoPorts", &UdpLite6NoPorts); + arl_expect(arl_base, "UdpLite6InErrors", &UdpLite6InErrors); + arl_expect(arl_base, "UdpLite6OutDatagrams", &UdpLite6OutDatagrams); + arl_expect(arl_base, "UdpLite6RcvbufErrors", &UdpLite6RcvbufErrors); + arl_expect(arl_base, "UdpLite6SndbufErrors", &UdpLite6SndbufErrors); + arl_expect(arl_base, "UdpLite6InCsumErrors", &UdpLite6InCsumErrors); + } + + if(unlikely(!ff)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/snmp6"); + ff = procfile_open(config_get("plugin:proc:/proc/net/snmp6", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) + return 1; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) + return 0; // we return 0, so that we will retry to open it next time + + size_t lines = procfile_lines(ff), l; + + arl_begin(arl_base); + + for(l = 0; l < lines ;l++) { + size_t words = procfile_linewords(ff, l); + if(unlikely(words < 2)) { + if(unlikely(words)) error("Cannot read /proc/net/snmp6 line %zu. Expected 2 params, read %zu.", l, words); + continue; + } + + if(unlikely(arl_check(arl_base, + procfile_lineword(ff, l, 0), + procfile_lineword(ff, l, 1)))) break; + } + + // -------------------------------------------------------------------- + + if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && (Ip6InOctets || Ip6OutOctets))) { + do_bandwidth = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_received = NULL, + *rd_sent = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "system" + , "ipv6" + , NULL + , "network" + , NULL + , "IPv6 Bandwidth" + , "kilobits/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_SYSTEM_IPV6 + , update_every + , RRDSET_TYPE_AREA + ); + + rd_received = rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + rd_sent = rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_received, Ip6InOctets); + rrddim_set_by_pointer(st, rd_sent, Ip6OutOctets); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_ip_packets == CONFIG_BOOLEAN_YES || (do_ip_packets == CONFIG_BOOLEAN_AUTO && (Ip6InReceives || Ip6OutRequests || Ip6InDelivers || Ip6OutForwDatagrams))) { + do_ip_packets = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_received = NULL, + *rd_sent = NULL, + *rd_forwarded = NULL, + *rd_delivers = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP6 + , "packets" + , NULL + , "packets" + , NULL + , "IPv6 Packets" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_IPV6_PACKETS + , update_every + , RRDSET_TYPE_LINE + ); + + rd_received = rrddim_add(st, "InReceives", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_sent = rrddim_add(st, "OutRequests", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_forwarded = rrddim_add(st, "OutForwDatagrams", "forwarded", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_delivers = rrddim_add(st, "InDelivers", "delivers", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_received, Ip6InReceives); + rrddim_set_by_pointer(st, rd_sent, Ip6OutRequests); + rrddim_set_by_pointer(st, rd_forwarded, Ip6OutForwDatagrams); + rrddim_set_by_pointer(st, rd_delivers, Ip6InDelivers); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_ip_fragsout == CONFIG_BOOLEAN_YES || (do_ip_fragsout == CONFIG_BOOLEAN_AUTO && (Ip6FragOKs || Ip6FragFails || Ip6FragCreates))) { + do_ip_fragsout = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_ok = NULL, + *rd_failed = NULL, + *rd_all = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP6 + , "fragsout" + , NULL + , "fragments6" + , NULL + , "IPv6 Fragments Sent" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_IPV6_FRAGSOUT + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_ok = rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_failed = rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_all = rrddim_add(st, "FragCreates", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_ok, Ip6FragOKs); + rrddim_set_by_pointer(st, rd_failed, Ip6FragFails); + rrddim_set_by_pointer(st, rd_all, Ip6FragCreates); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_ip_fragsin == CONFIG_BOOLEAN_YES || (do_ip_fragsin == CONFIG_BOOLEAN_AUTO + && ( + Ip6ReasmOKs + || Ip6ReasmFails + || Ip6ReasmTimeout + || Ip6ReasmReqds + ))) { + do_ip_fragsin = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_ok = NULL, + *rd_failed = NULL, + *rd_timeout = NULL, + *rd_all = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP6 + , "fragsin" + , NULL + , "fragments6" + , NULL + , "IPv6 Fragments Reassembly" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_IPV6_FRAGSIN + , update_every + , RRDSET_TYPE_LINE); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_ok = rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_failed = rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_timeout = rrddim_add(st, "ReasmTimeout", "timeout", -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_all = rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_ok, Ip6ReasmOKs); + rrddim_set_by_pointer(st, rd_failed, Ip6ReasmFails); + rrddim_set_by_pointer(st, rd_timeout, Ip6ReasmTimeout); + rrddim_set_by_pointer(st, rd_all, Ip6ReasmReqds); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_ip_errors == CONFIG_BOOLEAN_YES || (do_ip_errors == CONFIG_BOOLEAN_AUTO + && ( + Ip6InDiscards + || Ip6OutDiscards + || Ip6InHdrErrors + || Ip6InAddrErrors + || Ip6InUnknownProtos + || Ip6InTooBigErrors + || Ip6InTruncatedPkts + || Ip6InNoRoutes + ))) { + do_ip_errors = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_InDiscards = NULL, + *rd_OutDiscards = NULL, + *rd_InHdrErrors = NULL, + *rd_InAddrErrors = NULL, + *rd_InUnknownProtos = NULL, + *rd_InTooBigErrors = NULL, + *rd_InTruncatedPkts = NULL, + *rd_InNoRoutes = NULL, + *rd_OutNoRoutes = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP6 + , "errors" + , NULL + , "errors" + , NULL + , "IPv6 Errors" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_IPV6_ERRORS + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_InDiscards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutDiscards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InHdrErrors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InAddrErrors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InUnknownProtos = rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InTooBigErrors = rrddim_add(st, "InTooBigErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InTruncatedPkts = rrddim_add(st, "InTruncatedPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InNoRoutes = rrddim_add(st, "InNoRoutes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutNoRoutes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_InDiscards, Ip6InDiscards); + rrddim_set_by_pointer(st, rd_OutDiscards, Ip6OutDiscards); + rrddim_set_by_pointer(st, rd_InHdrErrors, Ip6InHdrErrors); + rrddim_set_by_pointer(st, rd_InAddrErrors, Ip6InAddrErrors); + rrddim_set_by_pointer(st, rd_InUnknownProtos, Ip6InUnknownProtos); + rrddim_set_by_pointer(st, rd_InTooBigErrors, Ip6InTooBigErrors); + rrddim_set_by_pointer(st, rd_InTruncatedPkts, Ip6InTruncatedPkts); + rrddim_set_by_pointer(st, rd_InNoRoutes, Ip6InNoRoutes); + rrddim_set_by_pointer(st, rd_OutNoRoutes, Ip6OutNoRoutes); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_udp_packets == CONFIG_BOOLEAN_YES || (do_udp_packets == CONFIG_BOOLEAN_AUTO && (Udp6InDatagrams || Udp6OutDatagrams))) { + do_udp_packets = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_received = NULL, + *rd_sent = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP6 + , "udppackets" + , NULL + , "udp6" + , NULL + , "IPv6 UDP Packets" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_IPV6_UDP_PACKETS + , update_every + , RRDSET_TYPE_LINE + ); + + rd_received = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_sent = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_received, Udp6InDatagrams); + rrddim_set_by_pointer(st, rd_sent, Udp6OutDatagrams); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_udp_errors == CONFIG_BOOLEAN_YES || (do_udp_errors == CONFIG_BOOLEAN_AUTO + && ( + Udp6InErrors + || Udp6NoPorts + || Udp6RcvbufErrors + || Udp6SndbufErrors + || Udp6InCsumErrors + || Udp6IgnoredMulti + ))) { + do_udp_errors = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_RcvbufErrors = NULL, + *rd_SndbufErrors = NULL, + *rd_InErrors = NULL, + *rd_NoPorts = NULL, + *rd_InCsumErrors = NULL, + *rd_IgnoredMulti = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP6 + , "udperrors" + , NULL + , "udp6" + , NULL + , "IPv6 UDP Errors" + , "events/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_IPV6_UDP_ERRORS + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_NoPorts = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_IgnoredMulti = rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_RcvbufErrors, Udp6RcvbufErrors); + rrddim_set_by_pointer(st, rd_SndbufErrors, Udp6SndbufErrors); + rrddim_set_by_pointer(st, rd_InErrors, Udp6InErrors); + rrddim_set_by_pointer(st, rd_NoPorts, Udp6NoPorts); + rrddim_set_by_pointer(st, rd_InCsumErrors, Udp6InCsumErrors); + rrddim_set_by_pointer(st, rd_IgnoredMulti, Udp6IgnoredMulti); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_udplite_packets == CONFIG_BOOLEAN_YES || (do_udplite_packets == CONFIG_BOOLEAN_AUTO && (UdpLite6InDatagrams || UdpLite6OutDatagrams))) { + do_udplite_packets = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_received = NULL, + *rd_sent = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP6 + , "udplitepackets" + , NULL + , "udplite6" + , NULL + , "IPv6 UDPlite Packets" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_IPV6_UDPLITE_PACKETS + , update_every + , RRDSET_TYPE_LINE + ); + + rd_received = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_sent = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_received, UdpLite6InDatagrams); + rrddim_set_by_pointer(st, rd_sent, UdpLite6OutDatagrams); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_udplite_errors == CONFIG_BOOLEAN_YES || (do_udplite_errors == CONFIG_BOOLEAN_AUTO + && ( + UdpLite6InErrors + || UdpLite6NoPorts + || UdpLite6RcvbufErrors + || UdpLite6SndbufErrors + || Udp6InCsumErrors + || UdpLite6InCsumErrors + ))) { + do_udplite_errors = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_RcvbufErrors = NULL, + *rd_SndbufErrors = NULL, + *rd_InErrors = NULL, + *rd_NoPorts = NULL, + *rd_InCsumErrors = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP6 + , "udpliteerrors" + , NULL + , "udplite6" + , NULL + , "IPv6 UDP Lite Errors" + , "events/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_IPV6_UDPLITE_ERRORS + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_NoPorts = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_InErrors, UdpLite6InErrors); + rrddim_set_by_pointer(st, rd_NoPorts, UdpLite6NoPorts); + rrddim_set_by_pointer(st, rd_RcvbufErrors, UdpLite6RcvbufErrors); + rrddim_set_by_pointer(st, rd_SndbufErrors, UdpLite6SndbufErrors); + rrddim_set_by_pointer(st, rd_InCsumErrors, UdpLite6InCsumErrors); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_mcast == CONFIG_BOOLEAN_YES || (do_mcast == CONFIG_BOOLEAN_AUTO && (Ip6OutMcastOctets || Ip6InMcastOctets))) { + do_mcast = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_Ip6InMcastOctets = NULL, + *rd_Ip6OutMcastOctets = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP6 + , "mcast" + , NULL + , "multicast6" + , NULL + , "IPv6 Multicast Bandwidth" + , "kilobits/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_IPV6_MCAST + , update_every + , RRDSET_TYPE_AREA + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_Ip6InMcastOctets = rrddim_add(st, "InMcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + rd_Ip6OutMcastOctets = rrddim_add(st, "OutMcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_Ip6InMcastOctets, Ip6InMcastOctets); + rrddim_set_by_pointer(st, rd_Ip6OutMcastOctets, Ip6OutMcastOctets); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_bcast == CONFIG_BOOLEAN_YES || (do_bcast == CONFIG_BOOLEAN_AUTO && (Ip6OutBcastOctets || Ip6InBcastOctets))) { + do_bcast = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_Ip6InBcastOctets = NULL, + *rd_Ip6OutBcastOctets = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP6 + , "bcast" + , NULL + , "broadcast6" + , NULL + , "IPv6 Broadcast Bandwidth" + , "kilobits/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_IPV6_BCAST + , update_every + , RRDSET_TYPE_AREA + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_Ip6InBcastOctets = rrddim_add(st, "InBcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + rd_Ip6OutBcastOctets = rrddim_add(st, "OutBcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_Ip6InBcastOctets, Ip6InBcastOctets); + rrddim_set_by_pointer(st, rd_Ip6OutBcastOctets, Ip6OutBcastOctets); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_mcast_p == CONFIG_BOOLEAN_YES || (do_mcast_p == CONFIG_BOOLEAN_AUTO && (Ip6OutMcastPkts || Ip6InMcastPkts))) { + do_mcast_p = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_Ip6InMcastPkts = NULL, + *rd_Ip6OutMcastPkts = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP6 + , "mcastpkts" + , NULL + , "multicast6" + , NULL + , "IPv6 Multicast Packets" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_IPV6_MCAST_PACKETS + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_Ip6InMcastPkts = rrddim_add(st, "InMcastPkts", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_Ip6OutMcastPkts = rrddim_add(st, "OutMcastPkts", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_Ip6InMcastPkts, Ip6InMcastPkts); + rrddim_set_by_pointer(st, rd_Ip6OutMcastPkts, Ip6OutMcastPkts); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_icmp == CONFIG_BOOLEAN_YES || (do_icmp == CONFIG_BOOLEAN_AUTO && (Icmp6InMsgs || Icmp6OutMsgs))) { + do_icmp = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_Icmp6InMsgs = NULL, + *rd_Icmp6OutMsgs = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP6 + , "icmp" + , NULL + , "icmp6" + , NULL + , "IPv6 ICMP Messages" + , "messages/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_IPV6_ICMP + , update_every + , RRDSET_TYPE_LINE + ); + + rd_Icmp6InMsgs = rrddim_add(st, "InMsgs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_Icmp6OutMsgs = rrddim_add(st, "OutMsgs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_Icmp6InMsgs, Icmp6InMsgs); + rrddim_set_by_pointer(st, rd_Icmp6OutMsgs, Icmp6OutMsgs); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_icmp_redir == CONFIG_BOOLEAN_YES || (do_icmp_redir == CONFIG_BOOLEAN_AUTO && (Icmp6InRedirects || Icmp6OutRedirects))) { + do_icmp_redir = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_Icmp6InRedirects = NULL, + *rd_Icmp6OutRedirects = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP6 + , "icmpredir" + , NULL + , "icmp6" + , NULL + , "IPv6 ICMP Redirects" + , "redirects/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_IPV6_ICMP_REDIR + , update_every + , RRDSET_TYPE_LINE + ); + + rd_Icmp6InRedirects = rrddim_add(st, "InRedirects", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_Icmp6OutRedirects = rrddim_add(st, "OutRedirects", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_Icmp6InRedirects, Icmp6InRedirects); + rrddim_set_by_pointer(st, rd_Icmp6OutRedirects, Icmp6OutRedirects); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_icmp_errors == CONFIG_BOOLEAN_YES || (do_icmp_errors == CONFIG_BOOLEAN_AUTO + && ( + Icmp6InErrors + || Icmp6OutErrors + || Icmp6InCsumErrors + || Icmp6InDestUnreachs + || Icmp6InPktTooBigs + || Icmp6InTimeExcds + || Icmp6InParmProblems + || Icmp6OutDestUnreachs + || Icmp6OutPktTooBigs + || Icmp6OutTimeExcds + || Icmp6OutParmProblems + ))) { + do_icmp_errors = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_InErrors = NULL, + *rd_OutErrors = NULL, + *rd_InCsumErrors = NULL, + *rd_InDestUnreachs = NULL, + *rd_InPktTooBigs = NULL, + *rd_InTimeExcds = NULL, + *rd_InParmProblems = NULL, + *rd_OutDestUnreachs = NULL, + *rd_OutPktTooBigs = NULL, + *rd_OutTimeExcds = NULL, + *rd_OutParmProblems = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP6 + , "icmperrors" + , NULL + , "icmp6" + , NULL + , "IPv6 ICMP Errors" + , "errors/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_IPV6_ICMP_ERRORS + , update_every + , RRDSET_TYPE_LINE + ); + + rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutErrors = rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InDestUnreachs = rrddim_add(st, "InDestUnreachs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InPktTooBigs = rrddim_add(st, "InPktTooBigs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InTimeExcds = rrddim_add(st, "InTimeExcds", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InParmProblems = rrddim_add(st, "InParmProblems", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutDestUnreachs = rrddim_add(st, "OutDestUnreachs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutPktTooBigs = rrddim_add(st, "OutPktTooBigs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutTimeExcds = rrddim_add(st, "OutTimeExcds", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutParmProblems = rrddim_add(st, "OutParmProblems", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_InErrors, Icmp6InErrors); + rrddim_set_by_pointer(st, rd_OutErrors, Icmp6OutErrors); + rrddim_set_by_pointer(st, rd_InCsumErrors, Icmp6InCsumErrors); + rrddim_set_by_pointer(st, rd_InDestUnreachs, Icmp6InDestUnreachs); + rrddim_set_by_pointer(st, rd_InPktTooBigs, Icmp6InPktTooBigs); + rrddim_set_by_pointer(st, rd_InTimeExcds, Icmp6InTimeExcds); + rrddim_set_by_pointer(st, rd_InParmProblems, Icmp6InParmProblems); + rrddim_set_by_pointer(st, rd_OutDestUnreachs, Icmp6OutDestUnreachs); + rrddim_set_by_pointer(st, rd_OutPktTooBigs, Icmp6OutPktTooBigs); + rrddim_set_by_pointer(st, rd_OutTimeExcds, Icmp6OutTimeExcds); + rrddim_set_by_pointer(st, rd_OutParmProblems, Icmp6OutParmProblems); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_icmp_echos == CONFIG_BOOLEAN_YES || (do_icmp_echos == CONFIG_BOOLEAN_AUTO + && ( + Icmp6InEchos + || Icmp6OutEchos + || Icmp6InEchoReplies + || Icmp6OutEchoReplies + ))) { + do_icmp_echos = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_InEchos = NULL, + *rd_OutEchos = NULL, + *rd_InEchoReplies = NULL, + *rd_OutEchoReplies = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP6 + , "icmpechos" + , NULL + , "icmp6" + , NULL + , "IPv6 ICMP Echo" + , "messages/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_IPV6_ICMP_ECHOS + , update_every + , RRDSET_TYPE_LINE + ); + + rd_InEchos = rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutEchos = rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InEchoReplies = rrddim_add(st, "InEchoReplies", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutEchoReplies = rrddim_add(st, "OutEchoReplies", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_InEchos, Icmp6InEchos); + rrddim_set_by_pointer(st, rd_OutEchos, Icmp6OutEchos); + rrddim_set_by_pointer(st, rd_InEchoReplies, Icmp6InEchoReplies); + rrddim_set_by_pointer(st, rd_OutEchoReplies, Icmp6OutEchoReplies); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_icmp_groupmemb == CONFIG_BOOLEAN_YES || (do_icmp_groupmemb == CONFIG_BOOLEAN_AUTO + && ( + Icmp6InGroupMembQueries + || Icmp6OutGroupMembQueries + || Icmp6InGroupMembResponses + || Icmp6OutGroupMembResponses + || Icmp6InGroupMembReductions + || Icmp6OutGroupMembReductions + ))) { + do_icmp_groupmemb = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_InQueries = NULL, + *rd_OutQueries = NULL, + *rd_InResponses = NULL, + *rd_OutResponses = NULL, + *rd_InReductions = NULL, + *rd_OutReductions = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP6 + , "groupmemb" + , NULL + , "icmp6" + , NULL + , "IPv6 ICMP Group Membership" + , "messages/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_IPV6_ICMP_GROUPMEMB + , update_every + , RRDSET_TYPE_LINE); + + rd_InQueries = rrddim_add(st, "InQueries", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutQueries = rrddim_add(st, "OutQueries", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InResponses = rrddim_add(st, "InResponses", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutResponses = rrddim_add(st, "OutResponses", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InReductions = rrddim_add(st, "InReductions", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutReductions = rrddim_add(st, "OutReductions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_InQueries, Icmp6InGroupMembQueries); + rrddim_set_by_pointer(st, rd_OutQueries, Icmp6OutGroupMembQueries); + rrddim_set_by_pointer(st, rd_InResponses, Icmp6InGroupMembResponses); + rrddim_set_by_pointer(st, rd_OutResponses, Icmp6OutGroupMembResponses); + rrddim_set_by_pointer(st, rd_InReductions, Icmp6InGroupMembReductions); + rrddim_set_by_pointer(st, rd_OutReductions, Icmp6OutGroupMembReductions); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_icmp_router == CONFIG_BOOLEAN_YES || (do_icmp_router == CONFIG_BOOLEAN_AUTO + && ( + Icmp6InRouterSolicits + || Icmp6OutRouterSolicits + || Icmp6InRouterAdvertisements + || Icmp6OutRouterAdvertisements + ))) { + do_icmp_router = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_InSolicits = NULL, + *rd_OutSolicits = NULL, + *rd_InAdvertisements = NULL, + *rd_OutAdvertisements = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP6 + , "icmprouter" + , NULL + , "icmp6" + , NULL + , "IPv6 Router Messages" + , "messages/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_IPV6_ICMP_ROUTER + , update_every + , RRDSET_TYPE_LINE + ); + + rd_InSolicits = rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutSolicits = rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InAdvertisements = rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutAdvertisements = rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_InSolicits, Icmp6InRouterSolicits); + rrddim_set_by_pointer(st, rd_OutSolicits, Icmp6OutRouterSolicits); + rrddim_set_by_pointer(st, rd_InAdvertisements, Icmp6InRouterAdvertisements); + rrddim_set_by_pointer(st, rd_OutAdvertisements, Icmp6OutRouterAdvertisements); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_icmp_neighbor == CONFIG_BOOLEAN_YES || (do_icmp_neighbor == CONFIG_BOOLEAN_AUTO + && ( + Icmp6InNeighborSolicits + || Icmp6OutNeighborSolicits + || Icmp6InNeighborAdvertisements + || Icmp6OutNeighborAdvertisements + ))) { + do_icmp_neighbor = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_InSolicits = NULL, + *rd_OutSolicits = NULL, + *rd_InAdvertisements = NULL, + *rd_OutAdvertisements = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP6 + , "icmpneighbor" + , NULL + , "icmp6" + , NULL + , "IPv6 Neighbor Messages" + , "messages/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_IPV6_ICMP_NEIGHBOR + , update_every + , RRDSET_TYPE_LINE + ); + + rd_InSolicits = rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutSolicits = rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InAdvertisements = rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutAdvertisements = rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_InSolicits, Icmp6InNeighborSolicits); + rrddim_set_by_pointer(st, rd_OutSolicits, Icmp6OutNeighborSolicits); + rrddim_set_by_pointer(st, rd_InAdvertisements, Icmp6InNeighborAdvertisements); + rrddim_set_by_pointer(st, rd_OutAdvertisements, Icmp6OutNeighborAdvertisements); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_icmp_mldv2 == CONFIG_BOOLEAN_YES || (do_icmp_mldv2 == CONFIG_BOOLEAN_AUTO && (Icmp6InMLDv2Reports || Icmp6OutMLDv2Reports))) { + do_icmp_mldv2 = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_InMLDv2Reports = NULL, + *rd_OutMLDv2Reports = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP6 + , "icmpmldv2" + , NULL + , "icmp6" + , NULL + , "IPv6 ICMP MLDv2 Reports" + , "reports/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_IPV6_ICMP_LDV2 + , update_every + , RRDSET_TYPE_LINE + ); + + rd_InMLDv2Reports = rrddim_add(st, "InMLDv2Reports", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutMLDv2Reports = rrddim_add(st, "OutMLDv2Reports", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_InMLDv2Reports, Icmp6InMLDv2Reports); + rrddim_set_by_pointer(st, rd_OutMLDv2Reports, Icmp6OutMLDv2Reports); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_icmp_types == CONFIG_BOOLEAN_YES || (do_icmp_types == CONFIG_BOOLEAN_AUTO + && ( + Icmp6InType1 + || Icmp6InType128 + || Icmp6InType129 + || Icmp6InType136 + || Icmp6OutType1 + || Icmp6OutType128 + || Icmp6OutType129 + || Icmp6OutType133 + || Icmp6OutType135 + || Icmp6OutType143 + ))) { + do_icmp_types = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_InType1 = NULL, + *rd_InType128 = NULL, + *rd_InType129 = NULL, + *rd_InType136 = NULL, + *rd_OutType1 = NULL, + *rd_OutType128 = NULL, + *rd_OutType129 = NULL, + *rd_OutType133 = NULL, + *rd_OutType135 = NULL, + *rd_OutType143 = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP6 + , "icmptypes" + , NULL + , "icmp6" + , NULL + , "IPv6 ICMP Types" + , "messages/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_IPV6_ICMP_TYPES + , update_every + , RRDSET_TYPE_LINE + ); + + rd_InType1 = rrddim_add(st, "InType1", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InType128 = rrddim_add(st, "InType128", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InType129 = rrddim_add(st, "InType129", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InType136 = rrddim_add(st, "InType136", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutType1 = rrddim_add(st, "OutType1", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutType128 = rrddim_add(st, "OutType128", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutType129 = rrddim_add(st, "OutType129", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutType133 = rrddim_add(st, "OutType133", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutType135 = rrddim_add(st, "OutType135", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_OutType143 = rrddim_add(st, "OutType143", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_InType1, Icmp6InType1); + rrddim_set_by_pointer(st, rd_InType128, Icmp6InType128); + rrddim_set_by_pointer(st, rd_InType129, Icmp6InType129); + rrddim_set_by_pointer(st, rd_InType136, Icmp6InType136); + rrddim_set_by_pointer(st, rd_OutType1, Icmp6OutType1); + rrddim_set_by_pointer(st, rd_OutType128, Icmp6OutType128); + rrddim_set_by_pointer(st, rd_OutType129, Icmp6OutType129); + rrddim_set_by_pointer(st, rd_OutType133, Icmp6OutType133); + rrddim_set_by_pointer(st, rd_OutType135, Icmp6OutType135); + rrddim_set_by_pointer(st, rd_OutType143, Icmp6OutType143); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_ect == CONFIG_BOOLEAN_YES || (do_ect == CONFIG_BOOLEAN_AUTO + && ( + Ip6InNoECTPkts + || Ip6InECT1Pkts + || Ip6InECT0Pkts + || Ip6InCEPkts + ))) { + do_ect = CONFIG_BOOLEAN_YES; + static RRDSET *st = NULL; + static RRDDIM *rd_InNoECTPkts = NULL, + *rd_InECT1Pkts = NULL, + *rd_InECT0Pkts = NULL, + *rd_InCEPkts = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_SNMP6 + , "ect" + , NULL + , "packets" + , NULL + , "IPv6 ECT Packets" + , "packets/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SNMP6_NAME + , NETDATA_CHART_PRIO_IPV6_ECT + , update_every + , RRDSET_TYPE_LINE + ); + + rd_InNoECTPkts = rrddim_add(st, "InNoECTPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InECT1Pkts = rrddim_add(st, "InECT1Pkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InECT0Pkts = rrddim_add(st, "InECT0Pkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_InCEPkts = rrddim_add(st, "InCEPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_InNoECTPkts, Ip6InNoECTPkts); + rrddim_set_by_pointer(st, rd_InECT1Pkts, Ip6InECT1Pkts); + rrddim_set_by_pointer(st, rd_InECT0Pkts, Ip6InECT0Pkts); + rrddim_set_by_pointer(st, rd_InCEPkts, Ip6InCEPkts); + rrdset_done(st); + } + + return 0; +} + diff --git a/collectors/proc.plugin/proc_net_sockstat.c b/collectors/proc.plugin/proc_net_sockstat.c new file mode 100644 index 000000000..0c3b6e196 --- /dev/null +++ b/collectors/proc.plugin/proc_net_sockstat.c @@ -0,0 +1,518 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME "/proc/net/sockstat" + +static struct proc_net_sockstat { + kernel_uint_t sockets_used; + + kernel_uint_t tcp_inuse; + kernel_uint_t tcp_orphan; + kernel_uint_t tcp_tw; + kernel_uint_t tcp_alloc; + kernel_uint_t tcp_mem; + + kernel_uint_t udp_inuse; + kernel_uint_t udp_mem; + + kernel_uint_t udplite_inuse; + + kernel_uint_t raw_inuse; + + kernel_uint_t frag_inuse; + kernel_uint_t frag_memory; +} sockstat_root = { 0 }; + + +static int read_tcp_mem(void) { + static char *filename = NULL; + static RRDVAR *tcp_mem_low_threshold = NULL, + *tcp_mem_pressure_threshold = NULL, + *tcp_mem_high_threshold = NULL; + + if(unlikely(!tcp_mem_low_threshold)) { + tcp_mem_low_threshold = rrdvar_custom_host_variable_create(localhost, "tcp_mem_low"); + tcp_mem_pressure_threshold = rrdvar_custom_host_variable_create(localhost, "tcp_mem_pressure"); + tcp_mem_high_threshold = rrdvar_custom_host_variable_create(localhost, "tcp_mem_high"); + } + + if(unlikely(!filename)) { + char buffer[FILENAME_MAX + 1]; + snprintfz(buffer, FILENAME_MAX, "%s/proc/sys/net/ipv4/tcp_mem", netdata_configured_host_prefix); + filename = strdupz(buffer); + } + + char buffer[200 + 1], *start, *end; + if(read_file(filename, buffer, 200) != 0) return 1; + buffer[200] = '\0'; + + unsigned long long low = 0, pressure = 0, high = 0; + + start = buffer; + low = strtoull(start, &end, 10); + + start = end; + pressure = strtoull(start, &end, 10); + + start = end; + high = strtoull(start, &end, 10); + + // fprintf(stderr, "TCP MEM low = %llu, pressure = %llu, high = %llu\n", low, pressure, high); + + rrdvar_custom_host_variable_set(localhost, tcp_mem_low_threshold, low * sysconf(_SC_PAGESIZE) / 1024.0); + rrdvar_custom_host_variable_set(localhost, tcp_mem_pressure_threshold, pressure * sysconf(_SC_PAGESIZE) / 1024.0); + rrdvar_custom_host_variable_set(localhost, tcp_mem_high_threshold, high * sysconf(_SC_PAGESIZE) / 1024.0); + + return 0; +} + +static kernel_uint_t read_tcp_max_orphans(void) { + static char *filename = NULL; + static RRDVAR *tcp_max_orphans_var = NULL; + + if(unlikely(!filename)) { + char buffer[FILENAME_MAX + 1]; + snprintfz(buffer, FILENAME_MAX, "%s/proc/sys/net/ipv4/tcp_max_orphans", netdata_configured_host_prefix); + filename = strdupz(buffer); + } + + unsigned long long tcp_max_orphans = 0; + if(read_single_number_file(filename, &tcp_max_orphans) == 0) { + + if(unlikely(!tcp_max_orphans_var)) + tcp_max_orphans_var = rrdvar_custom_host_variable_create(localhost, "tcp_max_orphans"); + + rrdvar_custom_host_variable_set(localhost, tcp_max_orphans_var, tcp_max_orphans); + return tcp_max_orphans; + } + + return 0; +} + +int do_proc_net_sockstat(int update_every, usec_t dt) { + (void)dt; + + static procfile *ff = NULL; + + static uint32_t hash_sockets = 0, + hash_raw = 0, + hash_frag = 0, + hash_tcp = 0, + hash_udp = 0, + hash_udplite = 0; + + static long long update_constants_every = 60, update_constants_count = 0; + + static ARL_BASE *arl_sockets = NULL; + static ARL_BASE *arl_tcp = NULL; + static ARL_BASE *arl_udp = NULL; + static ARL_BASE *arl_udplite = NULL; + static ARL_BASE *arl_raw = NULL; + static ARL_BASE *arl_frag = NULL; + + static int do_sockets = -1, do_tcp_sockets = -1, do_tcp_mem = -1, do_udp_sockets = -1, do_udp_mem = -1, do_udplite_sockets = -1, do_raw_sockets = -1, do_frag_sockets = -1, do_frag_mem = -1; + + static char *keys[7] = { NULL }; + static uint32_t hashes[7] = { 0 }; + static ARL_BASE *bases[7] = { NULL }; + + if(unlikely(!arl_sockets)) { + do_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 sockets", CONFIG_BOOLEAN_AUTO); + do_tcp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 TCP sockets", CONFIG_BOOLEAN_AUTO); + do_tcp_mem = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 TCP memory", CONFIG_BOOLEAN_AUTO); + do_udp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 UDP sockets", CONFIG_BOOLEAN_AUTO); + do_udp_mem = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 UDP memory", CONFIG_BOOLEAN_AUTO); + do_udplite_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 UDPLITE sockets", CONFIG_BOOLEAN_AUTO); + do_raw_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 RAW sockets", CONFIG_BOOLEAN_AUTO); + do_frag_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 FRAG sockets", CONFIG_BOOLEAN_AUTO); + do_frag_mem = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 FRAG memory", CONFIG_BOOLEAN_AUTO); + + update_constants_every = config_get_number("plugin:proc:/proc/net/sockstat", "update constants every", update_constants_every); + update_constants_count = update_constants_every; + + arl_sockets = arl_create("sockstat/sockets", arl_callback_str2kernel_uint_t, 60); + arl_expect(arl_sockets, "used", &sockstat_root.sockets_used); + + arl_tcp = arl_create("sockstat/TCP", arl_callback_str2kernel_uint_t, 60); + arl_expect(arl_tcp, "inuse", &sockstat_root.tcp_inuse); + arl_expect(arl_tcp, "orphan", &sockstat_root.tcp_orphan); + arl_expect(arl_tcp, "tw", &sockstat_root.tcp_tw); + arl_expect(arl_tcp, "alloc", &sockstat_root.tcp_alloc); + arl_expect(arl_tcp, "mem", &sockstat_root.tcp_mem); + + arl_udp = arl_create("sockstat/UDP", arl_callback_str2kernel_uint_t, 60); + arl_expect(arl_udp, "inuse", &sockstat_root.udp_inuse); + arl_expect(arl_udp, "mem", &sockstat_root.udp_mem); + + arl_udplite = arl_create("sockstat/UDPLITE", arl_callback_str2kernel_uint_t, 60); + arl_expect(arl_udplite, "inuse", &sockstat_root.udplite_inuse); + + arl_raw = arl_create("sockstat/RAW", arl_callback_str2kernel_uint_t, 60); + arl_expect(arl_raw, "inuse", &sockstat_root.raw_inuse); + + arl_frag = arl_create("sockstat/FRAG", arl_callback_str2kernel_uint_t, 60); + arl_expect(arl_frag, "inuse", &sockstat_root.frag_inuse); + arl_expect(arl_frag, "memory", &sockstat_root.frag_memory); + + hash_sockets = simple_hash("sockets"); + hash_tcp = simple_hash("TCP"); + hash_udp = simple_hash("UDP"); + hash_udplite = simple_hash("UDPLITE"); + hash_raw = simple_hash("RAW"); + hash_frag = simple_hash("FRAG"); + + keys[0] = "sockets"; hashes[0] = hash_sockets; bases[0] = arl_sockets; + keys[1] = "TCP"; hashes[1] = hash_tcp; bases[1] = arl_tcp; + keys[2] = "UDP"; hashes[2] = hash_udp; bases[2] = arl_udp; + keys[3] = "UDPLITE"; hashes[3] = hash_udplite; bases[3] = arl_udplite; + keys[4] = "RAW"; hashes[4] = hash_raw; bases[4] = arl_raw; + keys[5] = "FRAG"; hashes[5] = hash_frag; bases[5] = arl_frag; + keys[6] = NULL; // terminator + } + + update_constants_count += update_every; + if(unlikely(update_constants_count > update_constants_every)) { + read_tcp_max_orphans(); + read_tcp_mem(); + update_constants_count = 0; + } + + if(unlikely(!ff)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/sockstat"); + ff = procfile_open(config_get("plugin:proc:/proc/net/sockstat", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) return 1; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time + + size_t lines = procfile_lines(ff), l; + + for(l = 0; l < lines ;l++) { + size_t words = procfile_linewords(ff, l); + char *key = procfile_lineword(ff, l, 0); + uint32_t hash = simple_hash(key); + + int k; + for(k = 0; keys[k] ; k++) { + if(unlikely(hash == hashes[k] && strcmp(key, keys[k]) == 0)) { + // fprintf(stderr, "KEY: '%s', l=%zu, w=1, words=%zu\n", key, l, words); + ARL_BASE *arl = bases[k]; + arl_begin(arl); + size_t w = 1; + + while(w + 1 < words) { + char *name = procfile_lineword(ff, l, w); w++; + char *value = procfile_lineword(ff, l, w); w++; + // fprintf(stderr, " > NAME '%s', VALUE '%s', l=%zu, w=%zu, words=%zu\n", name, value, l, w, words); + if(unlikely(arl_check(arl, name, value) != 0)) + break; + } + + break; + } + } + } + + // ------------------------------------------------------------------------ + + if(do_sockets == CONFIG_BOOLEAN_YES || (do_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.sockets_used)) { + do_sockets = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_used = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "sockstat_sockets" + , NULL + , "sockets" + , NULL + , "IPv4 Sockets Used" + , "sockets" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME + , NETDATA_CHART_PRIO_IPV4_SOCKETS + , update_every + , RRDSET_TYPE_LINE + ); + + rd_used = rrddim_add(st, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_used, (collected_number)sockstat_root.sockets_used); + rrdset_done(st); + } + + // ------------------------------------------------------------------------ + + if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO && (sockstat_root.tcp_inuse || sockstat_root.tcp_orphan || sockstat_root.tcp_tw || sockstat_root.tcp_alloc))) { + do_tcp_sockets = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_inuse = NULL, + *rd_orphan = NULL, + *rd_timewait = NULL, + *rd_alloc = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "sockstat_tcp_sockets" + , NULL + , "tcp" + , NULL + , "IPv4 TCP Sockets" + , "sockets" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME + , NETDATA_CHART_PRIO_IPV4_TCP_SOCKETS + , update_every + , RRDSET_TYPE_LINE + ); + + rd_alloc = rrddim_add(st, "alloc", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rd_orphan = rrddim_add(st, "orphan", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rd_timewait = rrddim_add(st, "timewait", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.tcp_inuse); + rrddim_set_by_pointer(st, rd_orphan, (collected_number)sockstat_root.tcp_orphan); + rrddim_set_by_pointer(st, rd_timewait, (collected_number)sockstat_root.tcp_tw); + rrddim_set_by_pointer(st, rd_alloc, (collected_number)sockstat_root.tcp_alloc); + rrdset_done(st); + } + + // ------------------------------------------------------------------------ + + if(do_tcp_mem == CONFIG_BOOLEAN_YES || (do_tcp_mem == CONFIG_BOOLEAN_AUTO && sockstat_root.tcp_mem)) { + do_tcp_mem = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_mem = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "sockstat_tcp_mem" + , NULL + , "tcp" + , NULL + , "IPv4 TCP Sockets Memory" + , "KB" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME + , NETDATA_CHART_PRIO_IPV4_TCP_MEM + , update_every + , RRDSET_TYPE_AREA + ); + + rd_mem = rrddim_add(st, "mem", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_mem, (collected_number)sockstat_root.tcp_mem); + rrdset_done(st); + } + + // ------------------------------------------------------------------------ + + if(do_udp_sockets == CONFIG_BOOLEAN_YES || (do_udp_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.udp_inuse)) { + do_udp_sockets = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_inuse = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "sockstat_udp_sockets" + , NULL + , "udp" + , NULL + , "IPv4 UDP Sockets" + , "sockets" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME + , NETDATA_CHART_PRIO_IPV4_UDP + , update_every + , RRDSET_TYPE_LINE + ); + + rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.udp_inuse); + rrdset_done(st); + } + + // ------------------------------------------------------------------------ + + if(do_udp_mem == CONFIG_BOOLEAN_YES || (do_udp_mem == CONFIG_BOOLEAN_AUTO && sockstat_root.udp_mem)) { + do_udp_mem = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_mem = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "sockstat_udp_mem" + , NULL + , "udp" + , NULL + , "IPv4 UDP Sockets Memory" + , "KB" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME + , NETDATA_CHART_PRIO_IPV4_UDP_MEM + , update_every + , RRDSET_TYPE_AREA + ); + + rd_mem = rrddim_add(st, "mem", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_mem, (collected_number)sockstat_root.udp_mem); + rrdset_done(st); + } + + // ------------------------------------------------------------------------ + + if(do_udplite_sockets == CONFIG_BOOLEAN_YES || (do_udplite_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.udplite_inuse)) { + do_udplite_sockets = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_inuse = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "sockstat_udplite_sockets" + , NULL + , "udplite" + , NULL + , "IPv4 UDPLITE Sockets" + , "sockets" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME + , NETDATA_CHART_PRIO_IPV4_UDPLITE + , update_every + , RRDSET_TYPE_LINE + ); + + rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.udplite_inuse); + rrdset_done(st); + } + + // ------------------------------------------------------------------------ + + if(do_raw_sockets == CONFIG_BOOLEAN_YES || (do_raw_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.raw_inuse)) { + do_raw_sockets = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_inuse = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "sockstat_raw_sockets" + , NULL + , "raw" + , NULL + , "IPv4 RAW Sockets" + , "sockets" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME + , NETDATA_CHART_PRIO_IPV4_RAW + , update_every + , RRDSET_TYPE_LINE + ); + + rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.raw_inuse); + rrdset_done(st); + } + + // ------------------------------------------------------------------------ + + if(do_frag_sockets == CONFIG_BOOLEAN_YES || (do_frag_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.frag_inuse)) { + do_frag_sockets = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_inuse = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "sockstat_frag_sockets" + , NULL + , "fragments" + , NULL + , "IPv4 FRAG Sockets" + , "fragments" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME + , NETDATA_CHART_PRIO_IPV4_FRAGMENTS + , update_every + , RRDSET_TYPE_LINE + ); + + rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.frag_inuse); + rrdset_done(st); + } + + // ------------------------------------------------------------------------ + + if(do_frag_mem == CONFIG_BOOLEAN_YES || (do_frag_mem == CONFIG_BOOLEAN_AUTO && sockstat_root.frag_memory)) { + do_frag_mem = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_mem = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "ipv4" + , "sockstat_frag_mem" + , NULL + , "fragments" + , NULL + , "IPv4 FRAG Sockets Memory" + , "KB" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME + , NETDATA_CHART_PRIO_IPV4_FRAGMENTS_MEM + , update_every + , RRDSET_TYPE_AREA + ); + + rd_mem = rrddim_add(st, "mem", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_mem, (collected_number)sockstat_root.frag_memory); + rrdset_done(st); + } + + return 0; +} + diff --git a/collectors/proc.plugin/proc_net_sockstat6.c b/collectors/proc.plugin/proc_net_sockstat6.c new file mode 100644 index 000000000..687b9bdeb --- /dev/null +++ b/collectors/proc.plugin/proc_net_sockstat6.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME "/proc/net/sockstat6" + +static struct proc_net_sockstat6 { + kernel_uint_t tcp6_inuse; + kernel_uint_t udp6_inuse; + kernel_uint_t udplite6_inuse; + kernel_uint_t raw6_inuse; + kernel_uint_t frag6_inuse; +} sockstat6_root = { 0 }; + +int do_proc_net_sockstat6(int update_every, usec_t dt) { + (void)dt; + + static procfile *ff = NULL; + + static uint32_t hash_raw = 0, + hash_frag = 0, + hash_tcp = 0, + hash_udp = 0, + hash_udplite = 0; + + static ARL_BASE *arl_tcp = NULL; + static ARL_BASE *arl_udp = NULL; + static ARL_BASE *arl_udplite = NULL; + static ARL_BASE *arl_raw = NULL; + static ARL_BASE *arl_frag = NULL; + + static int do_tcp_sockets = -1, do_udp_sockets = -1, do_udplite_sockets = -1, do_raw_sockets = -1, do_frag_sockets = -1; + + static char *keys[6] = { NULL }; + static uint32_t hashes[6] = { 0 }; + static ARL_BASE *bases[6] = { NULL }; + + if(unlikely(!arl_tcp)) { + do_tcp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 TCP sockets", CONFIG_BOOLEAN_AUTO); + do_udp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 UDP sockets", CONFIG_BOOLEAN_AUTO); + do_udplite_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 UDPLITE sockets", CONFIG_BOOLEAN_AUTO); + do_raw_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 RAW sockets", CONFIG_BOOLEAN_AUTO); + do_frag_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 FRAG sockets", CONFIG_BOOLEAN_AUTO); + + arl_tcp = arl_create("sockstat6/TCP6", arl_callback_str2kernel_uint_t, 60); + arl_expect(arl_tcp, "inuse", &sockstat6_root.tcp6_inuse); + + arl_udp = arl_create("sockstat6/UDP6", arl_callback_str2kernel_uint_t, 60); + arl_expect(arl_udp, "inuse", &sockstat6_root.udp6_inuse); + + arl_udplite = arl_create("sockstat6/UDPLITE6", arl_callback_str2kernel_uint_t, 60); + arl_expect(arl_udplite, "inuse", &sockstat6_root.udplite6_inuse); + + arl_raw = arl_create("sockstat6/RAW6", arl_callback_str2kernel_uint_t, 60); + arl_expect(arl_raw, "inuse", &sockstat6_root.raw6_inuse); + + arl_frag = arl_create("sockstat6/FRAG6", arl_callback_str2kernel_uint_t, 60); + arl_expect(arl_frag, "inuse", &sockstat6_root.frag6_inuse); + + hash_tcp = simple_hash("TCP6"); + hash_udp = simple_hash("UDP6"); + hash_udplite = simple_hash("UDPLITE6"); + hash_raw = simple_hash("RAW6"); + hash_frag = simple_hash("FRAG6"); + + keys[0] = "TCP6"; hashes[0] = hash_tcp; bases[0] = arl_tcp; + keys[1] = "UDP6"; hashes[1] = hash_udp; bases[1] = arl_udp; + keys[2] = "UDPLITE6"; hashes[2] = hash_udplite; bases[2] = arl_udplite; + keys[3] = "RAW6"; hashes[3] = hash_raw; bases[3] = arl_raw; + keys[4] = "FRAG6"; hashes[4] = hash_frag; bases[4] = arl_frag; + keys[5] = NULL; // terminator + } + + if(unlikely(!ff)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/sockstat6"); + ff = procfile_open(config_get("plugin:proc:/proc/net/sockstat6", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) return 1; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time + + size_t lines = procfile_lines(ff), l; + + for(l = 0; l < lines ;l++) { + size_t words = procfile_linewords(ff, l); + char *key = procfile_lineword(ff, l, 0); + uint32_t hash = simple_hash(key); + + int k; + for(k = 0; keys[k] ; k++) { + if(unlikely(hash == hashes[k] && strcmp(key, keys[k]) == 0)) { + // fprintf(stderr, "KEY: '%s', l=%zu, w=1, words=%zu\n", key, l, words); + ARL_BASE *arl = bases[k]; + arl_begin(arl); + size_t w = 1; + + while(w + 1 < words) { + char *name = procfile_lineword(ff, l, w); w++; + char *value = procfile_lineword(ff, l, w); w++; + // fprintf(stderr, " > NAME '%s', VALUE '%s', l=%zu, w=%zu, words=%zu\n", name, value, l, w, words); + if(unlikely(arl_check(arl, name, value) != 0)) + break; + } + + break; + } + } + } + + // ------------------------------------------------------------------------ + + if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO && (sockstat6_root.tcp6_inuse))) { + do_tcp_sockets = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_inuse = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6" + , "sockstat6_tcp_sockets" + , NULL + , "tcp6" + , NULL + , "IPv6 TCP Sockets" + , "sockets" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME + , NETDATA_CHART_PRIO_IPV6_TCP + , update_every + , RRDSET_TYPE_LINE + ); + + rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.tcp6_inuse); + rrdset_done(st); + } + + // ------------------------------------------------------------------------ + + if(do_udp_sockets == CONFIG_BOOLEAN_YES || (do_udp_sockets == CONFIG_BOOLEAN_AUTO && sockstat6_root.udp6_inuse)) { + do_udp_sockets = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_inuse = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6" + , "sockstat6_udp_sockets" + , NULL + , "udp6" + , NULL + , "IPv6 UDP Sockets" + , "sockets" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME + , NETDATA_CHART_PRIO_IPV6_UDP + , update_every + , RRDSET_TYPE_LINE + ); + + rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.udp6_inuse); + rrdset_done(st); + } + + // ------------------------------------------------------------------------ + + if(do_udplite_sockets == CONFIG_BOOLEAN_YES || (do_udplite_sockets == CONFIG_BOOLEAN_AUTO && sockstat6_root.udplite6_inuse)) { + do_udplite_sockets = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_inuse = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6" + , "sockstat6_udplite_sockets" + , NULL + , "udplite6" + , NULL + , "IPv6 UDPLITE Sockets" + , "sockets" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME + , NETDATA_CHART_PRIO_IPV6_UDPLITE + , update_every + , RRDSET_TYPE_LINE + ); + + rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.udplite6_inuse); + rrdset_done(st); + } + + // ------------------------------------------------------------------------ + + if(do_raw_sockets == CONFIG_BOOLEAN_YES || (do_raw_sockets == CONFIG_BOOLEAN_AUTO && sockstat6_root.raw6_inuse)) { + do_raw_sockets = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_inuse = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6" + , "sockstat6_raw_sockets" + , NULL + , "raw6" + , NULL + , "IPv6 RAW Sockets" + , "sockets" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME + , NETDATA_CHART_PRIO_IPV6_RAW + , update_every + , RRDSET_TYPE_LINE + ); + + rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.raw6_inuse); + rrdset_done(st); + } + + // ------------------------------------------------------------------------ + + if(do_frag_sockets == CONFIG_BOOLEAN_YES || (do_frag_sockets == CONFIG_BOOLEAN_AUTO && sockstat6_root.frag6_inuse)) { + do_frag_sockets = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + static RRDDIM *rd_inuse = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "ipv6" + , "sockstat6_frag_sockets" + , NULL + , "fragments6" + , NULL + , "IPv6 FRAG Sockets" + , "fragments" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME + , NETDATA_CHART_PRIO_IPV6_FRAGMENTS + , update_every + , RRDSET_TYPE_LINE + ); + + rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.frag6_inuse); + rrdset_done(st); + } + + return 0; +} diff --git a/collectors/proc.plugin/proc_net_softnet_stat.c b/collectors/proc.plugin/proc_net_softnet_stat.c new file mode 100644 index 000000000..7ec783e77 --- /dev/null +++ b/collectors/proc.plugin/proc_net_softnet_stat.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define PLUGIN_PROC_MODULE_NET_SOFTNET_NAME "/proc/net/softnet_stat" + +static inline char *softnet_column_name(size_t column) { + switch(column) { + // https://github.com/torvalds/linux/blob/a7fd20d1c476af4563e66865213474a2f9f473a4/net/core/net-procfs.c#L161-L166 + case 0: return "processed"; + case 1: return "dropped"; + case 2: return "squeezed"; + case 9: return "received_rps"; + case 10: return "flow_limit_count"; + default: return NULL; + } +} + +int do_proc_net_softnet_stat(int update_every, usec_t dt) { + (void)dt; + + static procfile *ff = NULL; + static int do_per_core = -1; + static size_t allocated_lines = 0, allocated_columns = 0; + static uint32_t *data = NULL; + + if(unlikely(do_per_core == -1)) do_per_core = config_get_boolean("plugin:proc:/proc/net/softnet_stat", "softnet_stat per core", 1); + + if(unlikely(!ff)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/softnet_stat"); + ff = procfile_open(config_get("plugin:proc:/proc/net/softnet_stat", "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) return 1; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time + + size_t lines = procfile_lines(ff), l; + size_t words = procfile_linewords(ff, 0), w; + + if(unlikely(!lines || !words)) { + error("Cannot read /proc/net/softnet_stat, %zu lines and %zu columns reported.", lines, words); + return 1; + } + + if(unlikely(lines > 200)) lines = 200; + if(unlikely(words > 50)) words = 50; + + if(unlikely(!data || lines > allocated_lines || words > allocated_columns)) { + freez(data); + allocated_lines = lines; + allocated_columns = words; + data = mallocz((allocated_lines + 1) * allocated_columns * sizeof(uint32_t)); + } + + // initialize to zero + memset(data, 0, (allocated_lines + 1) * allocated_columns * sizeof(uint32_t)); + + // parse the values + for(l = 0; l < lines ;l++) { + words = procfile_linewords(ff, l); + if(unlikely(!words)) continue; + + if(unlikely(words > allocated_columns)) + words = allocated_columns; + + for(w = 0; w < words ; w++) { + if(unlikely(softnet_column_name(w))) { + uint32_t t = (uint32_t)strtoul(procfile_lineword(ff, l, w), NULL, 16); + data[w] += t; + data[((l + 1) * allocated_columns) + w] = t; + } + } + } + + if(unlikely(data[(lines * allocated_columns)] == 0)) + lines--; + + RRDSET *st; + + // -------------------------------------------------------------------- + + st = rrdset_find_bytype_localhost("system", "softnet_stat"); + if(unlikely(!st)) { + st = rrdset_create_localhost( + "system" + , "softnet_stat" + , NULL + , "softnet_stat" + , "system.softnet_stat" + , "System softnet_stat" + , "events/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SOFTNET_NAME + , NETDATA_CHART_PRIO_SYSTEM_SOFTNET_STAT + , update_every + , RRDSET_TYPE_LINE + ); + for(w = 0; w < allocated_columns ;w++) + if(unlikely(softnet_column_name(w))) + rrddim_add(st, softnet_column_name(w), NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + for(w = 0; w < allocated_columns ;w++) + if(unlikely(softnet_column_name(w))) + rrddim_set(st, softnet_column_name(w), data[w]); + + rrdset_done(st); + + if(do_per_core) { + for(l = 0; l < lines ;l++) { + char id[50+1]; + snprintfz(id, 50, "cpu%zu_softnet_stat", l); + + st = rrdset_find_bytype_localhost("cpu", id); + if(unlikely(!st)) { + char title[100+1]; + snprintfz(title, 100, "CPU%zu softnet_stat", l); + + st = rrdset_create_localhost( + "cpu" + , id + , NULL + , "softnet_stat" + , "cpu.softnet_stat" + , title + , "events/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NET_SOFTNET_NAME + , NETDATA_CHART_PRIO_SOFTNET_PER_CORE + l + , update_every + , RRDSET_TYPE_LINE + ); + for(w = 0; w < allocated_columns ;w++) + if(unlikely(softnet_column_name(w))) + rrddim_add(st, softnet_column_name(w), NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + for(w = 0; w < allocated_columns ;w++) + if(unlikely(softnet_column_name(w))) + rrddim_set(st, softnet_column_name(w), data[((l + 1) * allocated_columns) + w]); + + rrdset_done(st); + } + } + + return 0; +} diff --git a/collectors/proc.plugin/proc_net_stat_conntrack.c b/collectors/proc.plugin/proc_net_stat_conntrack.c new file mode 100644 index 000000000..f5257c0a0 --- /dev/null +++ b/collectors/proc.plugin/proc_net_stat_conntrack.c @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define RRD_TYPE_NET_STAT_NETFILTER "netfilter" +#define RRD_TYPE_NET_STAT_CONNTRACK "conntrack" +#define PLUGIN_PROC_MODULE_CONNTRACK_NAME "/proc/net/stat/nf_conntrack" + +int do_proc_net_stat_conntrack(int update_every, usec_t dt) { + static procfile *ff = NULL; + static int do_sockets = -1, do_new = -1, do_changes = -1, do_expect = -1, do_search = -1, do_errors = -1; + static usec_t get_max_every = 10 * USEC_PER_SEC, usec_since_last_max = 0; + static int read_full = 1; + static char *nf_conntrack_filename, *nf_conntrack_count_filename, *nf_conntrack_max_filename; + static RRDVAR *rrdvar_max = NULL; + + unsigned long long aentries = 0, asearched = 0, afound = 0, anew = 0, ainvalid = 0, aignore = 0, adelete = 0, adelete_list = 0, + ainsert = 0, ainsert_failed = 0, adrop = 0, aearly_drop = 0, aicmp_error = 0, aexpect_new = 0, aexpect_create = 0, aexpect_delete = 0, asearch_restart = 0; + + if(unlikely(do_sockets == -1)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/stat/nf_conntrack"); + nf_conntrack_filename = config_get("plugin:proc:/proc/net/stat/nf_conntrack", "filename to monitor", filename); + + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/sys/net/netfilter/nf_conntrack_max"); + nf_conntrack_max_filename = config_get("plugin:proc:/proc/sys/net/netfilter/nf_conntrack_max", "filename to monitor", filename); + usec_since_last_max = get_max_every = config_get_number("plugin:proc:/proc/sys/net/netfilter/nf_conntrack_max", "read every seconds", 10) * USEC_PER_SEC; + + read_full = 1; + ff = procfile_open(nf_conntrack_filename, " \t:", PROCFILE_FLAG_DEFAULT); + if(!ff) read_full = 0; + + do_new = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter new connections", read_full); + do_changes = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter connection changes", read_full); + do_expect = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter connection expectations", read_full); + do_search = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter connection searches", read_full); + do_errors = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter errors", read_full); + + do_sockets = 1; + if(!read_full) { + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/sys/net/netfilter/nf_conntrack_count"); + nf_conntrack_count_filename = config_get("plugin:proc:/proc/sys/net/netfilter/nf_conntrack_count", "filename to monitor", filename); + + if(read_single_number_file(nf_conntrack_count_filename, &aentries)) + do_sockets = 0; + } + + do_sockets = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter connections", do_sockets); + + if(!do_sockets && !read_full) + return 1; + + rrdvar_max = rrdvar_custom_host_variable_create(localhost, "netfilter.conntrack.max"); + } + + if(likely(read_full)) { + if(unlikely(!ff)) { + ff = procfile_open(nf_conntrack_filename, " \t:", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) + return 0; // we return 0, so that we will retry to open it next time + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) + return 0; // we return 0, so that we will retry to open it next time + + size_t lines = procfile_lines(ff), l; + + for(l = 1; l < lines ;l++) { + size_t words = procfile_linewords(ff, l); + if(unlikely(words < 17)) { + if(unlikely(words)) error("Cannot read /proc/net/stat/nf_conntrack line. Expected 17 params, read %zu.", words); + continue; + } + + unsigned long long tentries = 0, tsearched = 0, tfound = 0, tnew = 0, tinvalid = 0, tignore = 0, tdelete = 0, tdelete_list = 0, tinsert = 0, tinsert_failed = 0, tdrop = 0, tearly_drop = 0, ticmp_error = 0, texpect_new = 0, texpect_create = 0, texpect_delete = 0, tsearch_restart = 0; + + tentries = strtoull(procfile_lineword(ff, l, 0), NULL, 16); + tsearched = strtoull(procfile_lineword(ff, l, 1), NULL, 16); + tfound = strtoull(procfile_lineword(ff, l, 2), NULL, 16); + tnew = strtoull(procfile_lineword(ff, l, 3), NULL, 16); + tinvalid = strtoull(procfile_lineword(ff, l, 4), NULL, 16); + tignore = strtoull(procfile_lineword(ff, l, 5), NULL, 16); + tdelete = strtoull(procfile_lineword(ff, l, 6), NULL, 16); + tdelete_list = strtoull(procfile_lineword(ff, l, 7), NULL, 16); + tinsert = strtoull(procfile_lineword(ff, l, 8), NULL, 16); + tinsert_failed = strtoull(procfile_lineword(ff, l, 9), NULL, 16); + tdrop = strtoull(procfile_lineword(ff, l, 10), NULL, 16); + tearly_drop = strtoull(procfile_lineword(ff, l, 11), NULL, 16); + ticmp_error = strtoull(procfile_lineword(ff, l, 12), NULL, 16); + texpect_new = strtoull(procfile_lineword(ff, l, 13), NULL, 16); + texpect_create = strtoull(procfile_lineword(ff, l, 14), NULL, 16); + texpect_delete = strtoull(procfile_lineword(ff, l, 15), NULL, 16); + tsearch_restart = strtoull(procfile_lineword(ff, l, 16), NULL, 16); + + if(unlikely(!aentries)) aentries = tentries; + + // sum all the cpus together + asearched += tsearched; // conntrack.search + afound += tfound; // conntrack.search + anew += tnew; // conntrack.new + ainvalid += tinvalid; // conntrack.new + aignore += tignore; // conntrack.new + adelete += tdelete; // conntrack.changes + adelete_list += tdelete_list; // conntrack.changes + ainsert += tinsert; // conntrack.changes + ainsert_failed += tinsert_failed; // conntrack.errors + adrop += tdrop; // conntrack.errors + aearly_drop += tearly_drop; // conntrack.errors + aicmp_error += ticmp_error; // conntrack.errors + aexpect_new += texpect_new; // conntrack.expect + aexpect_create += texpect_create; // conntrack.expect + aexpect_delete += texpect_delete; // conntrack.expect + asearch_restart += tsearch_restart; // conntrack.search + } + } + else { + if(unlikely(read_single_number_file(nf_conntrack_count_filename, &aentries))) + return 0; // we return 0, so that we will retry to open it next time + } + + usec_since_last_max += dt; + if(unlikely(rrdvar_max && usec_since_last_max >= get_max_every)) { + usec_since_last_max = 0; + + unsigned long long max; + if(likely(!read_single_number_file(nf_conntrack_max_filename, &max))) + rrdvar_custom_host_variable_set(localhost, rrdvar_max, max); + } + + // -------------------------------------------------------------------- + + if(do_sockets) { + static RRDSET *st = NULL; + static RRDDIM *rd_connections = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_STAT_NETFILTER + , RRD_TYPE_NET_STAT_CONNTRACK "_sockets" + , NULL + , RRD_TYPE_NET_STAT_CONNTRACK + , NULL + , "Connection Tracker Connections" + , "active connections" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_CONNTRACK_NAME + , NETDATA_CHART_PRIO_NETFILTER_SOCKETS + , update_every + , RRDSET_TYPE_LINE + ); + + rd_connections = rrddim_add(st, "connections", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_connections, aentries); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_new) { + static RRDSET *st = NULL; + static RRDDIM + *rd_new = NULL, + *rd_ignore = NULL, + *rd_invalid = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_STAT_NETFILTER + , RRD_TYPE_NET_STAT_CONNTRACK "_new" + , NULL + , RRD_TYPE_NET_STAT_CONNTRACK + , NULL + , "Connection Tracker New Connections" + , "connections/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_CONNTRACK_NAME + , NETDATA_CHART_PRIO_NETFILTER_NEW + , update_every + , RRDSET_TYPE_LINE + ); + + rd_new = rrddim_add(st, "new", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_ignore = rrddim_add(st, "ignore", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_invalid = rrddim_add(st, "invalid", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_new, anew); + rrddim_set_by_pointer(st, rd_ignore, aignore); + rrddim_set_by_pointer(st, rd_invalid, ainvalid); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_changes) { + static RRDSET *st = NULL; + static RRDDIM + *rd_inserted = NULL, + *rd_deleted = NULL, + *rd_delete_list = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_STAT_NETFILTER + , RRD_TYPE_NET_STAT_CONNTRACK "_changes" + , NULL + , RRD_TYPE_NET_STAT_CONNTRACK + , NULL + , "Connection Tracker Changes" + , "changes/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_CONNTRACK_NAME + , NETDATA_CHART_PRIO_NETFILTER_CHANGES + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_inserted = rrddim_add(st, "inserted", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_deleted = rrddim_add(st, "deleted", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_delete_list = rrddim_add(st, "delete_list", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_inserted, ainsert); + rrddim_set_by_pointer(st, rd_deleted, adelete); + rrddim_set_by_pointer(st, rd_delete_list, adelete_list); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_expect) { + static RRDSET *st = NULL; + static RRDDIM *rd_created = NULL, + *rd_deleted = NULL, + *rd_new = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_STAT_NETFILTER + , RRD_TYPE_NET_STAT_CONNTRACK "_expect" + , NULL + , RRD_TYPE_NET_STAT_CONNTRACK + , NULL + , "Connection Tracker Expectations" + , "expectations/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_CONNTRACK_NAME + , NETDATA_CHART_PRIO_NETFILTER_EXPECT + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_created = rrddim_add(st, "created", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_deleted = rrddim_add(st, "deleted", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_new = rrddim_add(st, "new", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_created, aexpect_create); + rrddim_set_by_pointer(st, rd_deleted, aexpect_delete); + rrddim_set_by_pointer(st, rd_new, aexpect_new); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_search) { + static RRDSET *st = NULL; + static RRDDIM *rd_searched = NULL, + *rd_restarted = NULL, + *rd_found = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_STAT_NETFILTER + , RRD_TYPE_NET_STAT_CONNTRACK "_search" + , NULL + , RRD_TYPE_NET_STAT_CONNTRACK + , NULL + , "Connection Tracker Searches" + , "searches/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_CONNTRACK_NAME + , NETDATA_CHART_PRIO_NETFILTER_SEARCH + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_searched = rrddim_add(st, "searched", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_restarted = rrddim_add(st, "restarted", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_found = rrddim_add(st, "found", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_searched, asearched); + rrddim_set_by_pointer(st, rd_restarted, asearch_restart); + rrddim_set_by_pointer(st, rd_found, afound); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if(do_errors) { + static RRDSET *st = NULL; + static RRDDIM *rd_icmp_error = NULL, + *rd_insert_failed = NULL, + *rd_drop = NULL, + *rd_early_drop = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_STAT_NETFILTER + , RRD_TYPE_NET_STAT_CONNTRACK "_errors" + , NULL + , RRD_TYPE_NET_STAT_CONNTRACK + , NULL + , "Connection Tracker Errors" + , "events/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_CONNTRACK_NAME + , NETDATA_CHART_PRIO_NETFILTER_ERRORS + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st, RRDSET_FLAG_DETAIL); + + rd_icmp_error = rrddim_add(st, "icmp_error", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_insert_failed = rrddim_add(st, "insert_failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_drop = rrddim_add(st, "drop", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_early_drop = rrddim_add(st, "early_drop", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_icmp_error, aicmp_error); + rrddim_set_by_pointer(st, rd_insert_failed, ainsert_failed); + rrddim_set_by_pointer(st, rd_drop, adrop); + rrddim_set_by_pointer(st, rd_early_drop, aearly_drop); + rrdset_done(st); + } + + return 0; +} diff --git a/collectors/proc.plugin/proc_net_stat_synproxy.c b/collectors/proc.plugin/proc_net_stat_synproxy.c new file mode 100644 index 000000000..f0c1f47c1 --- /dev/null +++ b/collectors/proc.plugin/proc_net_stat_synproxy.c @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define PLUGIN_PROC_MODULE_SYNPROXY_NAME "/proc/net/stat/synproxy" + +#define RRD_TYPE_NET_STAT_NETFILTER "netfilter" +#define RRD_TYPE_NET_STAT_SYNPROXY "synproxy" + +int do_proc_net_stat_synproxy(int update_every, usec_t dt) { + (void)dt; + + static int do_entries = -1, do_cookies = -1, do_syns = -1, do_reopened = -1; + static procfile *ff = NULL; + + if(unlikely(do_entries == -1)) { + do_entries = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY entries", CONFIG_BOOLEAN_AUTO); + do_cookies = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY cookies", CONFIG_BOOLEAN_AUTO); + do_syns = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY SYN received", CONFIG_BOOLEAN_AUTO); + do_reopened = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY connections reopened", CONFIG_BOOLEAN_AUTO); + } + + if(unlikely(!ff)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/stat/synproxy"); + ff = procfile_open(config_get("plugin:proc:/proc/net/stat/synproxy", "filename to monitor", filename), " \t,:|", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) + return 1; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) + return 0; // we return 0, so that we will retry to open it next time + + // make sure we have 3 lines + size_t lines = procfile_lines(ff), l; + if(unlikely(lines < 2)) { + error("/proc/net/stat/synproxy has %zu lines, expected no less than 2. Disabling it.", lines); + return 1; + } + + unsigned long long entries = 0, syn_received = 0, cookie_invalid = 0, cookie_valid = 0, cookie_retrans = 0, conn_reopened = 0; + + // synproxy gives its values per CPU + for(l = 1; l < lines ;l++) { + size_t words = procfile_linewords(ff, l); + if(unlikely(words < 6)) + continue; + + entries += strtoull(procfile_lineword(ff, l, 0), NULL, 16); + syn_received += strtoull(procfile_lineword(ff, l, 1), NULL, 16); + cookie_invalid += strtoull(procfile_lineword(ff, l, 2), NULL, 16); + cookie_valid += strtoull(procfile_lineword(ff, l, 3), NULL, 16); + cookie_retrans += strtoull(procfile_lineword(ff, l, 4), NULL, 16); + conn_reopened += strtoull(procfile_lineword(ff, l, 5), NULL, 16); + } + + unsigned long long events = entries + syn_received + cookie_invalid + cookie_valid + cookie_retrans + conn_reopened; + + // -------------------------------------------------------------------- + + if((do_entries == CONFIG_BOOLEAN_AUTO && events) || do_entries == CONFIG_BOOLEAN_YES) { + do_entries = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_STAT_NETFILTER + , RRD_TYPE_NET_STAT_SYNPROXY "_entries" + , NULL + , RRD_TYPE_NET_STAT_SYNPROXY + , NULL + , "SYNPROXY Entries Used" + , "entries" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_SYNPROXY_NAME + , NETDATA_CHART_PRIO_SYNPROXY_ENTRIES + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "entries", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set(st, "entries", entries); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if((do_syns == CONFIG_BOOLEAN_AUTO && events) || do_syns == CONFIG_BOOLEAN_YES) { + do_syns = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_STAT_NETFILTER + , RRD_TYPE_NET_STAT_SYNPROXY "_syn_received" + , NULL + , RRD_TYPE_NET_STAT_SYNPROXY + , NULL + , "SYNPROXY SYN Packets received" + , "SYN/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_SYNPROXY_NAME + , NETDATA_CHART_PRIO_SYNPROXY_SYN_RECEIVED + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set(st, "received", syn_received); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if((do_reopened == CONFIG_BOOLEAN_AUTO && events) || do_reopened == CONFIG_BOOLEAN_YES) { + do_reopened = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_STAT_NETFILTER + , RRD_TYPE_NET_STAT_SYNPROXY "_conn_reopened" + , NULL + , RRD_TYPE_NET_STAT_SYNPROXY + , NULL + , "SYNPROXY Connections Reopened" + , "connections/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_SYNPROXY_NAME + , NETDATA_CHART_PRIO_SYNPROXY_CONN_OPEN + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "reopened", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set(st, "reopened", conn_reopened); + rrdset_done(st); + } + + // -------------------------------------------------------------------- + + if((do_cookies == CONFIG_BOOLEAN_AUTO && events) || do_cookies == CONFIG_BOOLEAN_YES) { + do_cookies = CONFIG_BOOLEAN_YES; + + static RRDSET *st = NULL; + if(unlikely(!st)) { + st = rrdset_create_localhost( + RRD_TYPE_NET_STAT_NETFILTER + , RRD_TYPE_NET_STAT_SYNPROXY "_cookies" + , NULL + , RRD_TYPE_NET_STAT_SYNPROXY + , NULL + , "SYNPROXY TCP Cookies" + , "cookies/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_SYNPROXY_NAME + , NETDATA_CHART_PRIO_SYNPROXY_COOKIES + , update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(st, "valid", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "invalid", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(st, "retransmits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st); + + rrddim_set(st, "valid", cookie_valid); + rrddim_set(st, "invalid", cookie_invalid); + rrddim_set(st, "retransmits", cookie_retrans); + rrdset_done(st); + } + + return 0; +} diff --git a/collectors/proc.plugin/proc_self_mountinfo.c b/collectors/proc.plugin/proc_self_mountinfo.c new file mode 100644 index 000000000..3f17ccce2 --- /dev/null +++ b/collectors/proc.plugin/proc_self_mountinfo.c @@ -0,0 +1,403 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +// ---------------------------------------------------------------------------- +// taken from gnulib/mountlist.c + +#ifndef ME_REMOTE +/* A file system is "remote" if its Fs_name contains a ':' + or if (it is of type (smbfs or cifs) and its Fs_name starts with '//') + or Fs_name is equal to "-hosts" (used by autofs to mount remote fs). */ +# define ME_REMOTE(Fs_name, Fs_type) \ + (strchr (Fs_name, ':') != NULL \ + || ((Fs_name)[0] == '/' \ + && (Fs_name)[1] == '/' \ + && (strcmp (Fs_type, "smbfs") == 0 \ + || strcmp (Fs_type, "cifs") == 0)) \ + || (strcmp("-hosts", Fs_name) == 0)) +#endif + +#define ME_DUMMY_0(Fs_name, Fs_type) \ + (strcmp (Fs_type, "autofs") == 0 \ + || strcmp (Fs_type, "proc") == 0 \ + || strcmp (Fs_type, "subfs") == 0 \ + /* for Linux 2.6/3.x */ \ + || strcmp (Fs_type, "debugfs") == 0 \ + || strcmp (Fs_type, "devpts") == 0 \ + || strcmp (Fs_type, "fusectl") == 0 \ + || strcmp (Fs_type, "mqueue") == 0 \ + || strcmp (Fs_type, "rpc_pipefs") == 0 \ + || strcmp (Fs_type, "sysfs") == 0 \ + /* FreeBSD, Linux 2.4 */ \ + || strcmp (Fs_type, "devfs") == 0 \ + /* for NetBSD 3.0 */ \ + || strcmp (Fs_type, "kernfs") == 0 \ + /* for Irix 6.5 */ \ + || strcmp (Fs_type, "ignore") == 0) + +/* Historically, we have marked as "dummy" any file system of type "none", + but now that programs like du need to know about bind-mounted directories, + we grant an exception to any with "bind" in its list of mount options. + I.e., those are *not* dummy entries. */ +# define ME_DUMMY(Fs_name, Fs_type) \ + (ME_DUMMY_0 (Fs_name, Fs_type) || strcmp (Fs_type, "none") == 0) + +// ---------------------------------------------------------------------------- + +// find the mount info with the given major:minor +// in the supplied linked list of mountinfo structures +struct mountinfo *mountinfo_find(struct mountinfo *root, unsigned long major, unsigned long minor) { + struct mountinfo *mi; + + for(mi = root; mi ; mi = mi->next) + if(unlikely(mi->major == major && mi->minor == minor)) + return mi; + + return NULL; +} + +// find the mount info with the given filesystem and mount_source +// in the supplied linked list of mountinfo structures +struct mountinfo *mountinfo_find_by_filesystem_mount_source(struct mountinfo *root, const char *filesystem, const char *mount_source) { + struct mountinfo *mi; + uint32_t filesystem_hash = simple_hash(filesystem), mount_source_hash = simple_hash(mount_source); + + for(mi = root; mi ; mi = mi->next) + if(unlikely(mi->filesystem + && mi->mount_source + && mi->filesystem_hash == filesystem_hash + && mi->mount_source_hash == mount_source_hash + && !strcmp(mi->filesystem, filesystem) + && !strcmp(mi->mount_source, mount_source))) + return mi; + + return NULL; +} + +struct mountinfo *mountinfo_find_by_filesystem_super_option(struct mountinfo *root, const char *filesystem, const char *super_options) { + struct mountinfo *mi; + uint32_t filesystem_hash = simple_hash(filesystem); + + size_t solen = strlen(super_options); + + for(mi = root; mi ; mi = mi->next) + if(unlikely(mi->filesystem + && mi->super_options + && mi->filesystem_hash == filesystem_hash + && !strcmp(mi->filesystem, filesystem))) { + + // super_options is a comma separated list + char *s = mi->super_options, *e; + while(*s) { + e = s + 1; + while(*e && *e != ',') e++; + + size_t len = e - s; + if(unlikely(len == solen && !strncmp(s, super_options, len))) + return mi; + + if(*e == ',') s = ++e; + else s = e; + } + } + + return NULL; +} + +static void mountinfo_free(struct mountinfo *mi) { + freez(mi->root); + freez(mi->mount_point); + freez(mi->mount_options); + freez(mi->persistent_id); +/* + if(mi->optional_fields_count) { + int i; + for(i = 0; i < mi->optional_fields_count ; i++) + free(*mi->optional_fields[i]); + } + free(mi->optional_fields); +*/ + freez(mi->filesystem); + freez(mi->mount_source); + freez(mi->super_options); + freez(mi); +} + +// free a linked list of mountinfo structures +void mountinfo_free_all(struct mountinfo *mi) { + while(mi) { + struct mountinfo *t = mi; + mi = mi->next; + + mountinfo_free(t); + } +} + +static char *strdupz_decoding_octal(const char *string) { + char *buffer = strdupz(string); + + char *d = buffer; + const char *s = string; + + while(*s) { + if(unlikely(*s == '\\')) { + s++; + if(likely(isdigit(*s) && isdigit(s[1]) && isdigit(s[2]))) { + char c = *s++ - '0'; + c <<= 3; + c |= *s++ - '0'; + c <<= 3; + c |= *s++ - '0'; + *d++ = c; + } + else *d++ = '_'; + } + else *d++ = *s++; + } + *d = '\0'; + + return buffer; +} + +static inline int is_read_only(const char *s) { + if(!s) return 0; + + size_t len = strlen(s); + if(len < 2) return 0; + if(len == 2) { + if(!strcmp(s, "ro")) return 1; + return 0; + } + if(!strncmp(s, "ro,", 3)) return 1; + if(!strncmp(&s[len - 3], ",ro", 3)) return 1; + if(strstr(s, ",ro,")) return 1; + return 0; +} + +// read the whole mountinfo into a linked list +struct mountinfo *mountinfo_read(int do_statvfs) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/proc/self/mountinfo", netdata_configured_host_prefix); + procfile *ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) { + snprintfz(filename, FILENAME_MAX, "%s/proc/1/mountinfo", netdata_configured_host_prefix); + ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) return NULL; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) + return NULL; + + struct mountinfo *root = NULL, *last = NULL, *mi = NULL; + + unsigned long l, lines = procfile_lines(ff); + for(l = 0; l < lines ;l++) { + if(unlikely(procfile_linewords(ff, l) < 5)) + continue; + + mi = mallocz(sizeof(struct mountinfo)); + + unsigned long w = 0; + mi->id = str2ul(procfile_lineword(ff, l, w)); w++; + mi->parentid = str2ul(procfile_lineword(ff, l, w)); w++; + + char *major = procfile_lineword(ff, l, w), *minor; w++; + for(minor = major; *minor && *minor != ':' ;minor++) ; + + if(unlikely(!*minor)) { + error("Cannot parse major:minor on '%s' at line %lu of '%s'", major, l + 1, filename); + freez(mi); + continue; + } + + *minor = '\0'; + minor++; + + mi->flags = 0; + + mi->major = str2ul(major); + mi->minor = str2ul(minor); + + mi->root = strdupz(procfile_lineword(ff, l, w)); w++; + mi->root_hash = simple_hash(mi->root); + + mi->mount_point = strdupz_decoding_octal(procfile_lineword(ff, l, w)); w++; + mi->mount_point_hash = simple_hash(mi->mount_point); + + mi->persistent_id = strdupz(mi->mount_point); + netdata_fix_chart_id(mi->persistent_id); + mi->persistent_id_hash = simple_hash(mi->persistent_id); + + mi->mount_options = strdupz(procfile_lineword(ff, l, w)); w++; + + if(unlikely(is_read_only(mi->mount_options))) + mi->flags |= MOUNTINFO_READONLY; + + // count the optional fields +/* + unsigned long wo = w; +*/ + mi->optional_fields_count = 0; + char *s = procfile_lineword(ff, l, w); + while(*s && *s != '-') { + w++; + s = procfile_lineword(ff, l, w); + mi->optional_fields_count++; + } + +/* + if(unlikely(mi->optional_fields_count)) { + // we have some optional fields + // read them into a new array of pointers; + + mi->optional_fields = mallocz(mi->optional_fields_count * sizeof(char *)); + + int i; + for(i = 0; i < mi->optional_fields_count ; i++) { + *mi->optional_fields[wo] = strdupz(procfile_lineword(ff, l, w)); + wo++; + } + } + else + mi->optional_fields = NULL; +*/ + + if(likely(*s == '-')) { + w++; + + mi->filesystem = strdupz(procfile_lineword(ff, l, w)); w++; + mi->filesystem_hash = simple_hash(mi->filesystem); + + mi->mount_source = strdupz_decoding_octal(procfile_lineword(ff, l, w)); w++; + mi->mount_source_hash = simple_hash(mi->mount_source); + + mi->super_options = strdupz(procfile_lineword(ff, l, w)); w++; + + if(unlikely(is_read_only(mi->super_options))) + mi->flags |= MOUNTINFO_READONLY; + + if(unlikely(ME_DUMMY(mi->mount_source, mi->filesystem))) + mi->flags |= MOUNTINFO_IS_DUMMY; + + if(unlikely(ME_REMOTE(mi->mount_source, mi->filesystem))) + mi->flags |= MOUNTINFO_IS_REMOTE; + + // mark as BIND the duplicates (i.e. same filesystem + same source) + if(do_statvfs) { + struct stat buf; + if(unlikely(stat(mi->mount_point, &buf) == -1)) { + mi->st_dev = 0; + mi->flags |= MOUNTINFO_NO_STAT; + } + else { + mi->st_dev = buf.st_dev; + + struct mountinfo *mt; + for(mt = root; mt; mt = mt->next) { + if(unlikely(mt->st_dev == mi->st_dev && !(mt->flags & MOUNTINFO_IS_SAME_DEV))) { + if(strlen(mi->mount_point) < strlen(mt->mount_point)) + mt->flags |= MOUNTINFO_IS_SAME_DEV; + else + mi->flags |= MOUNTINFO_IS_SAME_DEV; + } + } + } + } + else { + mi->st_dev = 0; + } + } + else { + mi->filesystem = NULL; + mi->filesystem_hash = 0; + + mi->mount_source = NULL; + mi->mount_source_hash = 0; + + mi->super_options = NULL; + + mi->st_dev = 0; + } + + // check if it has size + if(do_statvfs && !(mi->flags & MOUNTINFO_IS_DUMMY)) { + struct statvfs buff_statvfs; + if(unlikely(statvfs(mi->mount_point, &buff_statvfs) < 0)) { + mi->flags |= MOUNTINFO_NO_STAT; + } + else if(unlikely(!buff_statvfs.f_blocks /* || !buff_statvfs.f_files */)) { + mi->flags |= MOUNTINFO_NO_SIZE; + } + } + + // link it + if(unlikely(!root)) + root = mi; + else + last->next = mi; + + last = mi; + mi->next = NULL; + +/* +#ifdef NETDATA_INTERNAL_CHECKS + fprintf(stderr, "MOUNTINFO: %ld %ld %lu:%lu root '%s', persistent id '%s', mount point '%s', mount options '%s', filesystem '%s', mount source '%s', super options '%s'%s%s%s%s%s%s\n", + mi->id, + mi->parentid, + mi->major, + mi->minor, + mi->root, + mi->persistent_id, + (mi->mount_point)?mi->mount_point:"", + (mi->mount_options)?mi->mount_options:"", + (mi->filesystem)?mi->filesystem:"", + (mi->mount_source)?mi->mount_source:"", + (mi->super_options)?mi->super_options:"", + (mi->flags & MOUNTINFO_IS_DUMMY)?" DUMMY":"", + (mi->flags & MOUNTINFO_IS_BIND)?" BIND":"", + (mi->flags & MOUNTINFO_IS_REMOTE)?" REMOTE":"", + (mi->flags & MOUNTINFO_NO_STAT)?" NOSTAT":"", + (mi->flags & MOUNTINFO_NO_SIZE)?" NOSIZE":"", + (mi->flags & MOUNTINFO_IS_SAME_DEV)?" SAMEDEV":"" + ); +#endif +*/ + } + +/* find if the mount options have "bind" in them + { + FILE *fp = setmntent(MOUNTED, "r"); + if (fp != NULL) { + struct mntent mntbuf; + struct mntent *mnt; + char buf[4096 + 1]; + + while ((mnt = getmntent_r(fp, &mntbuf, buf, 4096))) { + char *bind = hasmntopt(mnt, "bind"); + if(unlikely(bind)) { + struct mountinfo *mi; + for(mi = root; mi ; mi = mi->next) { + if(unlikely(strcmp(mnt->mnt_dir, mi->mount_point) == 0)) { + fprintf(stderr, "Mount point '%s' is BIND\n", mi->mount_point); + mi->flags |= MOUNTINFO_IS_BIND; + break; + } + } + +#ifdef NETDATA_INTERNAL_CHECKS + if(unlikely(!mi)) { + error("Mount point '%s' not found in /proc/self/mountinfo", mnt->mnt_dir); + } +#endif + } + } + endmntent(fp); + } + } +*/ + + procfile_close(ff); + return root; +} diff --git a/collectors/proc.plugin/proc_self_mountinfo.h b/collectors/proc.plugin/proc_self_mountinfo.h new file mode 100644 index 000000000..15d63c786 --- /dev/null +++ b/collectors/proc.plugin/proc_self_mountinfo.h @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_PROC_SELF_MOUNTINFO_H +#define NETDATA_PROC_SELF_MOUNTINFO_H 1 + +#define MOUNTINFO_IS_DUMMY 0x00000001 +#define MOUNTINFO_IS_REMOTE 0x00000002 +#define MOUNTINFO_IS_BIND 0x00000004 +#define MOUNTINFO_IS_SAME_DEV 0x00000008 +#define MOUNTINFO_NO_STAT 0x00000010 +#define MOUNTINFO_NO_SIZE 0x00000020 +#define MOUNTINFO_READONLY 0x00000040 + +struct mountinfo { + long id; // mount ID: unique identifier of the mount (may be reused after umount(2)). + long parentid; // parent ID: ID of parent mount (or of self for the top of the mount tree). + unsigned long major; // major:minor: value of st_dev for files on filesystem (see stat(2)). + unsigned long minor; + + char *persistent_id; // a calculated persistent id for the mount point + uint32_t persistent_id_hash; + + char *root; // root: root of the mount within the filesystem. + uint32_t root_hash; + + char *mount_point; // mount point: mount point relative to the process's root. + uint32_t mount_point_hash; + + char *mount_options; // mount options: per-mount options. + + int optional_fields_count; +/* + char ***optional_fields; // optional fields: zero or more fields of the form "tag[:value]". +*/ + char *filesystem; // filesystem type: name of filesystem in the form "type[.subtype]". + uint32_t filesystem_hash; + + char *mount_source; // mount source: filesystem-specific information or "none". + uint32_t mount_source_hash; + + char *super_options; // super options: per-superblock options. + + uint32_t flags; + + dev_t st_dev; // id of device as given by stat() + + struct mountinfo *next; +}; + +extern struct mountinfo *mountinfo_find(struct mountinfo *root, unsigned long major, unsigned long minor); +extern struct mountinfo *mountinfo_find_by_filesystem_mount_source(struct mountinfo *root, const char *filesystem, const char *mount_source); +extern struct mountinfo *mountinfo_find_by_filesystem_super_option(struct mountinfo *root, const char *filesystem, const char *super_options); + +extern void mountinfo_free_all(struct mountinfo *mi); +extern struct mountinfo *mountinfo_read(int do_statvfs); + +#endif /* NETDATA_PROC_SELF_MOUNTINFO_H */ \ No newline at end of file diff --git a/collectors/proc.plugin/proc_softirqs.c b/collectors/proc.plugin/proc_softirqs.c new file mode 100644 index 000000000..d68c69bb7 --- /dev/null +++ b/collectors/proc.plugin/proc_softirqs.c @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define PLUGIN_PROC_MODULE_SOFTIRQS_NAME "/proc/softirqs" + +#define MAX_INTERRUPT_NAME 50 + +struct cpu_interrupt { + unsigned long long value; + RRDDIM *rd; +}; + +struct interrupt { + int used; + char *id; + char name[MAX_INTERRUPT_NAME + 1]; + RRDDIM *rd; + unsigned long long total; + struct cpu_interrupt cpu[]; +}; + +// since each interrupt is variable in size +// we use this to calculate its record size +#define recordsize(cpus) (sizeof(struct interrupt) + ((cpus) * sizeof(struct cpu_interrupt))) + +// given a base, get a pointer to each record +#define irrindex(base, line, cpus) ((struct interrupt *)&((char *)(base))[(line) * recordsize(cpus)]) + +static inline struct interrupt *get_interrupts_array(size_t lines, int cpus) { + static struct interrupt *irrs = NULL; + static size_t allocated = 0; + + if(unlikely(lines != allocated)) { + uint32_t l; + int c; + + irrs = (struct interrupt *)reallocz(irrs, lines * recordsize(cpus)); + + // reset all interrupt RRDDIM pointers as any line could have shifted + for(l = 0; l < lines ;l++) { + struct interrupt *irr = irrindex(irrs, l, cpus); + irr->rd = NULL; + irr->name[0] = '\0'; + for(c = 0; c < cpus ;c++) + irr->cpu[c].rd = NULL; + } + + allocated = lines; + } + + return irrs; +} + +int do_proc_softirqs(int update_every, usec_t dt) { + (void)dt; + static procfile *ff = NULL; + static int cpus = -1, do_per_core = CONFIG_BOOLEAN_INVALID; + struct interrupt *irrs = NULL; + + if(unlikely(do_per_core == CONFIG_BOOLEAN_INVALID)) + do_per_core = config_get_boolean_ondemand("plugin:proc:/proc/softirqs", "interrupts per core", CONFIG_BOOLEAN_AUTO); + + if(unlikely(!ff)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/softirqs"); + ff = procfile_open(config_get("plugin:proc:/proc/softirqs", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) return 1; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time + + size_t lines = procfile_lines(ff), l; + size_t words = procfile_linewords(ff, 0); + + if(unlikely(!lines)) { + error("Cannot read /proc/softirqs, zero lines reported."); + return 1; + } + + // find how many CPUs are there + if(unlikely(cpus == -1)) { + uint32_t w; + cpus = 0; + for(w = 0; w < words ; w++) { + if(likely(strncmp(procfile_lineword(ff, 0, w), "CPU", 3) == 0)) + cpus++; + } + } + + if(unlikely(!cpus)) { + error("PLUGIN: PROC_SOFTIRQS: Cannot find the number of CPUs in /proc/softirqs"); + return 1; + } + + // allocate the size we need; + irrs = get_interrupts_array(lines, cpus); + irrs[0].used = 0; + + // loop through all lines + for(l = 1; l < lines ;l++) { + struct interrupt *irr = irrindex(irrs, l, cpus); + irr->used = 0; + irr->total = 0; + + words = procfile_linewords(ff, l); + if(unlikely(!words)) continue; + + irr->id = procfile_lineword(ff, l, 0); + if(unlikely(!irr->id || !irr->id[0])) continue; + + int c; + for(c = 0; c < cpus ;c++) { + if(likely((c + 1) < (int)words)) + irr->cpu[c].value = str2ull(procfile_lineword(ff, l, (uint32_t)(c + 1))); + else + irr->cpu[c].value = 0; + + irr->total += irr->cpu[c].value; + } + + strncpyz(irr->name, irr->id, MAX_INTERRUPT_NAME); + + irr->used = 1; + } + + // -------------------------------------------------------------------- + + static RRDSET *st_system_softirqs = NULL; + if(unlikely(!st_system_softirqs)) + st_system_softirqs = rrdset_create_localhost( + "system" + , "softirqs" + , NULL + , "softirqs" + , NULL + , "System softirqs" + , "softirqs/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_SOFTIRQS_NAME + , NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS + , update_every + , RRDSET_TYPE_STACKED + ); + else + rrdset_next(st_system_softirqs); + + for(l = 0; l < lines ;l++) { + struct interrupt *irr = irrindex(irrs, l, cpus); + + if(irr->used && irr->total) { + // some interrupt may have changed without changing the total number of lines + // if the same number of interrupts have been added and removed between two + // calls of this function. + if(unlikely(!irr->rd || strncmp(irr->name, irr->rd->name, MAX_INTERRUPT_NAME) != 0)) { + irr->rd = rrddim_add(st_system_softirqs, irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_set_name(st_system_softirqs, irr->rd, irr->name); + + // also reset per cpu RRDDIMs to avoid repeating strncmp() in the per core loop + if(likely(do_per_core != CONFIG_BOOLEAN_NO)) { + int c; + for(c = 0; c < cpus; c++) irr->cpu[c].rd = NULL; + } + } + + rrddim_set_by_pointer(st_system_softirqs, irr->rd, irr->total); + } + } + + rrdset_done(st_system_softirqs); + + // -------------------------------------------------------------------- + + if(do_per_core != CONFIG_BOOLEAN_NO) { + static RRDSET **core_st = NULL; + static int old_cpus = 0; + + if(old_cpus < cpus) { + core_st = reallocz(core_st, sizeof(RRDSET *) * cpus); + memset(&core_st[old_cpus], 0, sizeof(RRDSET *) * (cpus - old_cpus)); + old_cpus = cpus; + } + + int c; + + for(c = 0; c < cpus ; c++) { + if(unlikely(!core_st[c])) { + // find if everything is just zero + unsigned long long core_sum = 0; + + for (l = 0; l < lines; l++) { + struct interrupt *irr = irrindex(irrs, l, cpus); + if (unlikely(!irr->used)) continue; + core_sum += irr->cpu[c].value; + } + + if (unlikely(core_sum == 0)) continue; // try next core + + char id[50 + 1]; + snprintfz(id, 50, "cpu%d_softirqs", c); + + char title[100 + 1]; + snprintfz(title, 100, "CPU%d softirqs", c); + + core_st[c] = rrdset_create_localhost( + "cpu" + , id + , NULL + , "softirqs" + , "cpu.softirqs" + , title + , "softirqs/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_SOFTIRQS_NAME + , NETDATA_CHART_PRIO_SOFTIRQS_PER_CORE + c + , update_every + , RRDSET_TYPE_STACKED + ); + } + else + rrdset_next(core_st[c]); + + for(l = 0; l < lines ;l++) { + struct interrupt *irr = irrindex(irrs, l, cpus); + + if(irr->used && (do_per_core == CONFIG_BOOLEAN_YES || irr->cpu[c].value)) { + if(unlikely(!irr->cpu[c].rd)) { + irr->cpu[c].rd = rrddim_add(core_st[c], irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_set_name(core_st[c], irr->cpu[c].rd, irr->name); + } + + rrddim_set_by_pointer(core_st[c], irr->cpu[c].rd, irr->cpu[c].value); + } + } + + rrdset_done(core_st[c]); + } + } + + return 0; +} diff --git a/collectors/proc.plugin/proc_spl_kstat_zfs.c b/collectors/proc.plugin/proc_spl_kstat_zfs.c new file mode 100644 index 000000000..a96b236cb --- /dev/null +++ b/collectors/proc.plugin/proc_spl_kstat_zfs.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" +#include "zfs_common.h" + +#define ZFS_PROC_ARCSTATS "/proc/spl/kstat/zfs/arcstats" + +extern struct arcstats arcstats; + +int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt) { + (void)dt; + + static procfile *ff = NULL; + static ARL_BASE *arl_base = NULL; + + arcstats.l2exist = -1; + + if(unlikely(!arl_base)) { + arl_base = arl_create("arcstats", NULL, 60); + + arl_expect(arl_base, "hits", &arcstats.hits); + arl_expect(arl_base, "misses", &arcstats.misses); + arl_expect(arl_base, "demand_data_hits", &arcstats.demand_data_hits); + arl_expect(arl_base, "demand_data_misses", &arcstats.demand_data_misses); + arl_expect(arl_base, "demand_metadata_hits", &arcstats.demand_metadata_hits); + arl_expect(arl_base, "demand_metadata_misses", &arcstats.demand_metadata_misses); + arl_expect(arl_base, "prefetch_data_hits", &arcstats.prefetch_data_hits); + arl_expect(arl_base, "prefetch_data_misses", &arcstats.prefetch_data_misses); + arl_expect(arl_base, "prefetch_metadata_hits", &arcstats.prefetch_metadata_hits); + arl_expect(arl_base, "prefetch_metadata_misses", &arcstats.prefetch_metadata_misses); + arl_expect(arl_base, "mru_hits", &arcstats.mru_hits); + arl_expect(arl_base, "mru_ghost_hits", &arcstats.mru_ghost_hits); + arl_expect(arl_base, "mfu_hits", &arcstats.mfu_hits); + arl_expect(arl_base, "mfu_ghost_hits", &arcstats.mfu_ghost_hits); + arl_expect(arl_base, "deleted", &arcstats.deleted); + arl_expect(arl_base, "mutex_miss", &arcstats.mutex_miss); + arl_expect(arl_base, "evict_skip", &arcstats.evict_skip); + arl_expect(arl_base, "evict_not_enough", &arcstats.evict_not_enough); + arl_expect(arl_base, "evict_l2_cached", &arcstats.evict_l2_cached); + arl_expect(arl_base, "evict_l2_eligible", &arcstats.evict_l2_eligible); + arl_expect(arl_base, "evict_l2_ineligible", &arcstats.evict_l2_ineligible); + arl_expect(arl_base, "evict_l2_skip", &arcstats.evict_l2_skip); + arl_expect(arl_base, "hash_elements", &arcstats.hash_elements); + arl_expect(arl_base, "hash_elements_max", &arcstats.hash_elements_max); + arl_expect(arl_base, "hash_collisions", &arcstats.hash_collisions); + arl_expect(arl_base, "hash_chains", &arcstats.hash_chains); + arl_expect(arl_base, "hash_chain_max", &arcstats.hash_chain_max); + arl_expect(arl_base, "p", &arcstats.p); + arl_expect(arl_base, "c", &arcstats.c); + arl_expect(arl_base, "c_min", &arcstats.c_min); + arl_expect(arl_base, "c_max", &arcstats.c_max); + arl_expect(arl_base, "size", &arcstats.size); + arl_expect(arl_base, "hdr_size", &arcstats.hdr_size); + arl_expect(arl_base, "data_size", &arcstats.data_size); + arl_expect(arl_base, "metadata_size", &arcstats.metadata_size); + arl_expect(arl_base, "other_size", &arcstats.other_size); + arl_expect(arl_base, "anon_size", &arcstats.anon_size); + arl_expect(arl_base, "anon_evictable_data", &arcstats.anon_evictable_data); + arl_expect(arl_base, "anon_evictable_metadata", &arcstats.anon_evictable_metadata); + arl_expect(arl_base, "mru_size", &arcstats.mru_size); + arl_expect(arl_base, "mru_evictable_data", &arcstats.mru_evictable_data); + arl_expect(arl_base, "mru_evictable_metadata", &arcstats.mru_evictable_metadata); + arl_expect(arl_base, "mru_ghost_size", &arcstats.mru_ghost_size); + arl_expect(arl_base, "mru_ghost_evictable_data", &arcstats.mru_ghost_evictable_data); + arl_expect(arl_base, "mru_ghost_evictable_metadata", &arcstats.mru_ghost_evictable_metadata); + arl_expect(arl_base, "mfu_size", &arcstats.mfu_size); + arl_expect(arl_base, "mfu_evictable_data", &arcstats.mfu_evictable_data); + arl_expect(arl_base, "mfu_evictable_metadata", &arcstats.mfu_evictable_metadata); + arl_expect(arl_base, "mfu_ghost_size", &arcstats.mfu_ghost_size); + arl_expect(arl_base, "mfu_ghost_evictable_data", &arcstats.mfu_ghost_evictable_data); + arl_expect(arl_base, "mfu_ghost_evictable_metadata", &arcstats.mfu_ghost_evictable_metadata); + arl_expect(arl_base, "l2_hits", &arcstats.l2_hits); + arl_expect(arl_base, "l2_misses", &arcstats.l2_misses); + arl_expect(arl_base, "l2_feeds", &arcstats.l2_feeds); + arl_expect(arl_base, "l2_rw_clash", &arcstats.l2_rw_clash); + arl_expect(arl_base, "l2_read_bytes", &arcstats.l2_read_bytes); + arl_expect(arl_base, "l2_write_bytes", &arcstats.l2_write_bytes); + arl_expect(arl_base, "l2_writes_sent", &arcstats.l2_writes_sent); + arl_expect(arl_base, "l2_writes_done", &arcstats.l2_writes_done); + arl_expect(arl_base, "l2_writes_error", &arcstats.l2_writes_error); + arl_expect(arl_base, "l2_writes_lock_retry", &arcstats.l2_writes_lock_retry); + arl_expect(arl_base, "l2_evict_lock_retry", &arcstats.l2_evict_lock_retry); + arl_expect(arl_base, "l2_evict_reading", &arcstats.l2_evict_reading); + arl_expect(arl_base, "l2_evict_l1cached", &arcstats.l2_evict_l1cached); + arl_expect(arl_base, "l2_free_on_write", &arcstats.l2_free_on_write); + arl_expect(arl_base, "l2_cdata_free_on_write", &arcstats.l2_cdata_free_on_write); + arl_expect(arl_base, "l2_abort_lowmem", &arcstats.l2_abort_lowmem); + arl_expect(arl_base, "l2_cksum_bad", &arcstats.l2_cksum_bad); + arl_expect(arl_base, "l2_io_error", &arcstats.l2_io_error); + arl_expect(arl_base, "l2_size", &arcstats.l2_size); + arl_expect(arl_base, "l2_asize", &arcstats.l2_asize); + arl_expect(arl_base, "l2_hdr_size", &arcstats.l2_hdr_size); + arl_expect(arl_base, "l2_compress_successes", &arcstats.l2_compress_successes); + arl_expect(arl_base, "l2_compress_zeros", &arcstats.l2_compress_zeros); + arl_expect(arl_base, "l2_compress_failures", &arcstats.l2_compress_failures); + arl_expect(arl_base, "memory_throttle_count", &arcstats.memory_throttle_count); + arl_expect(arl_base, "duplicate_buffers", &arcstats.duplicate_buffers); + arl_expect(arl_base, "duplicate_buffers_size", &arcstats.duplicate_buffers_size); + arl_expect(arl_base, "duplicate_reads", &arcstats.duplicate_reads); + arl_expect(arl_base, "memory_direct_count", &arcstats.memory_direct_count); + arl_expect(arl_base, "memory_indirect_count", &arcstats.memory_indirect_count); + arl_expect(arl_base, "arc_no_grow", &arcstats.arc_no_grow); + arl_expect(arl_base, "arc_tempreserve", &arcstats.arc_tempreserve); + arl_expect(arl_base, "arc_loaned_bytes", &arcstats.arc_loaned_bytes); + arl_expect(arl_base, "arc_prune", &arcstats.arc_prune); + arl_expect(arl_base, "arc_meta_used", &arcstats.arc_meta_used); + arl_expect(arl_base, "arc_meta_limit", &arcstats.arc_meta_limit); + arl_expect(arl_base, "arc_meta_max", &arcstats.arc_meta_max); + arl_expect(arl_base, "arc_meta_min", &arcstats.arc_meta_min); + arl_expect(arl_base, "arc_need_free", &arcstats.arc_need_free); + arl_expect(arl_base, "arc_sys_free", &arcstats.arc_sys_free); + } + + if(unlikely(!ff)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, ZFS_PROC_ARCSTATS); + ff = procfile_open(config_get("plugin:proc:" ZFS_PROC_ARCSTATS, "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) + return 1; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) + return 0; // we return 0, so that we will retry to open it next time + + size_t lines = procfile_lines(ff), l; + + arl_begin(arl_base); + + for(l = 0; l < lines ;l++) { + size_t words = procfile_linewords(ff, l); + if(unlikely(words < 3)) { + if(unlikely(words)) error("Cannot read " ZFS_PROC_ARCSTATS " line %zu. Expected 3 params, read %zu.", l, words); + continue; + } + + const char *key = procfile_lineword(ff, l, 0); + const char *value = procfile_lineword(ff, l, 2); + + if(unlikely(arcstats.l2exist == -1)) { + if(key[0] == 'l' && key[1] == '2' && key[2] == '_') + arcstats.l2exist = 1; + } + + if(unlikely(arl_check(arl_base, key, value))) break; + } + + if(unlikely(arcstats.l2exist == -1)) + arcstats.l2exist = 0; + + generate_charts_arcstats(PLUGIN_PROC_NAME, ZFS_PROC_ARCSTATS, update_every); + generate_charts_arc_summary(PLUGIN_PROC_NAME, ZFS_PROC_ARCSTATS, update_every); + + return 0; +} diff --git a/collectors/proc.plugin/proc_stat.c b/collectors/proc.plugin/proc_stat.c new file mode 100644 index 000000000..fb77df647 --- /dev/null +++ b/collectors/proc.plugin/proc_stat.c @@ -0,0 +1,570 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define PLUGIN_PROC_MODULE_STAT_NAME "/proc/stat" + +struct per_core_single_number_file { + unsigned char found:1; + const char *filename; + int fd; + collected_number value; + RRDDIM *rd; +}; + +#define CORE_THROTTLE_COUNT_INDEX 0 +#define PACKAGE_THROTTLE_COUNT_INDEX 1 +#define SCALING_CUR_FREQ_INDEX 2 +#define PER_CORE_FILES 3 + +struct cpu_chart { + const char *id; + + RRDSET *st; + RRDDIM *rd_user; + RRDDIM *rd_nice; + RRDDIM *rd_system; + RRDDIM *rd_idle; + RRDDIM *rd_iowait; + RRDDIM *rd_irq; + RRDDIM *rd_softirq; + RRDDIM *rd_steal; + RRDDIM *rd_guest; + RRDDIM *rd_guest_nice; + + struct per_core_single_number_file files[PER_CORE_FILES]; +}; + +static int keep_per_core_fds_open = CONFIG_BOOLEAN_YES; + +static int read_per_core_files(struct cpu_chart *all_cpu_charts, size_t len, size_t index) { + char buf[50 + 1]; + size_t x, files_read = 0, files_nonzero = 0; + + for(x = 0; x < len ; x++) { + struct per_core_single_number_file *f = &all_cpu_charts[x].files[index]; + + f->found = 0; + + if(unlikely(!f->filename)) + continue; + + if(unlikely(f->fd == -1)) { + f->fd = open(f->filename, O_RDONLY); + if (unlikely(f->fd == -1)) { + error("Cannot open file '%s'", f->filename); + continue; + } + } + + ssize_t ret = read(f->fd, buf, 50); + if(unlikely(ret < 0)) { + // cannot read that file + + error("Cannot read file '%s'", f->filename); + close(f->fd); + f->fd = -1; + continue; + } + else { + // successful read + + // terminate the buffer + buf[ret] = '\0'; + + if(unlikely(keep_per_core_fds_open != CONFIG_BOOLEAN_YES)) { + close(f->fd); + f->fd = -1; + } + else if(lseek(f->fd, 0, SEEK_SET) == -1) { + error("Cannot seek in file '%s'", f->filename); + close(f->fd); + f->fd = -1; + } + } + + files_read++; + f->found = 1; + + f->value = str2ll(buf, NULL); + // info("read '%s', parsed as " COLLECTED_NUMBER_FORMAT, buf, f->value); + if(likely(f->value != 0)) + files_nonzero++; + } + + if(files_read == 0) + return -1; + + if(files_nonzero == 0) + return 0; + + return (int)files_nonzero; +} + +static void chart_per_core_files(struct cpu_chart *all_cpu_charts, size_t len, size_t index, RRDSET *st, collected_number multiplier, collected_number divisor, RRD_ALGORITHM algorithm) { + size_t x; + for(x = 0; x < len ; x++) { + struct per_core_single_number_file *f = &all_cpu_charts[x].files[index]; + + if(unlikely(!f->found)) + continue; + + if(unlikely(!f->rd)) + f->rd = rrddim_add(st, all_cpu_charts[x].id, NULL, multiplier, divisor, algorithm); + + rrddim_set_by_pointer(st, f->rd, f->value); + } +} + +int do_proc_stat(int update_every, usec_t dt) { + (void)dt; + + static struct cpu_chart *all_cpu_charts = NULL; + static size_t all_cpu_charts_size = 0; + static procfile *ff = NULL; + static int do_cpu = -1, do_cpu_cores = -1, do_interrupts = -1, do_context = -1, do_forks = -1, do_processes = -1, do_core_throttle_count = -1, do_package_throttle_count = -1, do_scaling_cur_freq = -1; + static uint32_t hash_intr, hash_ctxt, hash_processes, hash_procs_running, hash_procs_blocked; + static char *core_throttle_count_filename = NULL, *package_throttle_count_filename = NULL, *scaling_cur_freq_filename = NULL; + static RRDVAR *cpus_var = NULL; + size_t cores_found = (size_t)processors; + + if(unlikely(do_cpu == -1)) { + do_cpu = config_get_boolean("plugin:proc:/proc/stat", "cpu utilization", CONFIG_BOOLEAN_YES); + do_cpu_cores = config_get_boolean("plugin:proc:/proc/stat", "per cpu core utilization", CONFIG_BOOLEAN_YES); + do_interrupts = config_get_boolean("plugin:proc:/proc/stat", "cpu interrupts", CONFIG_BOOLEAN_YES); + do_context = config_get_boolean("plugin:proc:/proc/stat", "context switches", CONFIG_BOOLEAN_YES); + do_forks = config_get_boolean("plugin:proc:/proc/stat", "processes started", CONFIG_BOOLEAN_YES); + do_processes = config_get_boolean("plugin:proc:/proc/stat", "processes running", CONFIG_BOOLEAN_YES); + + // give sane defaults based on the number of processors + if(processors > 50) { + // the system has too many processors + keep_per_core_fds_open = CONFIG_BOOLEAN_NO; + do_core_throttle_count = CONFIG_BOOLEAN_NO; + do_package_throttle_count = CONFIG_BOOLEAN_NO; + do_scaling_cur_freq = CONFIG_BOOLEAN_NO; + } + else { + // the system has a reasonable number of processors + keep_per_core_fds_open = CONFIG_BOOLEAN_YES; + do_core_throttle_count = CONFIG_BOOLEAN_AUTO; + do_package_throttle_count = CONFIG_BOOLEAN_NO; + do_scaling_cur_freq = CONFIG_BOOLEAN_NO; + } + + keep_per_core_fds_open = config_get_boolean("plugin:proc:/proc/stat", "keep per core files open", keep_per_core_fds_open); + do_core_throttle_count = config_get_boolean_ondemand("plugin:proc:/proc/stat", "core_throttle_count", do_core_throttle_count); + do_package_throttle_count = config_get_boolean_ondemand("plugin:proc:/proc/stat", "package_throttle_count", do_package_throttle_count); + do_scaling_cur_freq = config_get_boolean_ondemand("plugin:proc:/proc/stat", "scaling_cur_freq", do_scaling_cur_freq); + + hash_intr = simple_hash("intr"); + hash_ctxt = simple_hash("ctxt"); + hash_processes = simple_hash("processes"); + hash_procs_running = simple_hash("procs_running"); + hash_procs_blocked = simple_hash("procs_blocked"); + + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/thermal_throttle/core_throttle_count"); + core_throttle_count_filename = config_get("plugin:proc:/proc/stat", "core_throttle_count filename to monitor", filename); + + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/thermal_throttle/package_throttle_count"); + package_throttle_count_filename = config_get("plugin:proc:/proc/stat", "package_throttle_count filename to monitor", filename); + + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/cpufreq/scaling_cur_freq"); + scaling_cur_freq_filename = config_get("plugin:proc:/proc/stat", "scaling_cur_freq filename to monitor", filename); + } + + if(unlikely(!ff)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/stat"); + ff = procfile_open(config_get("plugin:proc:/proc/stat", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) return 1; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time + + size_t lines = procfile_lines(ff), l; + size_t words; + + unsigned long long processes = 0, running = 0 , blocked = 0; + + for(l = 0; l < lines ;l++) { + char *row_key = procfile_lineword(ff, l, 0); + uint32_t hash = simple_hash(row_key); + + // faster strncmp(row_key, "cpu", 3) == 0 + if(likely(row_key[0] == 'c' && row_key[1] == 'p' && row_key[2] == 'u')) { + words = procfile_linewords(ff, l); + if(unlikely(words < 9)) { + error("Cannot read /proc/stat cpu line. Expected 9 params, read %zu.", words); + continue; + } + + size_t core = (row_key[3] == '\0') ? 0 : str2ul(&row_key[3]) + 1; + if(core > 0) cores_found = core; + + if(likely((core == 0 && do_cpu) || (core > 0 && do_cpu_cores))) { + char *id; + unsigned long long user = 0, nice = 0, system = 0, idle = 0, iowait = 0, irq = 0, softirq = 0, steal = 0, guest = 0, guest_nice = 0; + + id = row_key; + user = str2ull(procfile_lineword(ff, l, 1)); + nice = str2ull(procfile_lineword(ff, l, 2)); + system = str2ull(procfile_lineword(ff, l, 3)); + idle = str2ull(procfile_lineword(ff, l, 4)); + iowait = str2ull(procfile_lineword(ff, l, 5)); + irq = str2ull(procfile_lineword(ff, l, 6)); + softirq = str2ull(procfile_lineword(ff, l, 7)); + steal = str2ull(procfile_lineword(ff, l, 8)); + + guest = str2ull(procfile_lineword(ff, l, 9)); + user -= guest; + + guest_nice = str2ull(procfile_lineword(ff, l, 10)); + nice -= guest_nice; + + char *title, *type, *context, *family; + long priority; + + if(core >= all_cpu_charts_size) { + size_t old_cpu_charts_size = all_cpu_charts_size; + all_cpu_charts_size = core + 1; + all_cpu_charts = reallocz(all_cpu_charts, sizeof(struct cpu_chart) * all_cpu_charts_size); + memset(&all_cpu_charts[old_cpu_charts_size], 0, sizeof(struct cpu_chart) * (all_cpu_charts_size - old_cpu_charts_size)); + } + struct cpu_chart *cpu_chart = &all_cpu_charts[core]; + + if(unlikely(!cpu_chart->st)) { + cpu_chart->id = strdupz(id); + + if(core == 0) { + title = "Total CPU utilization"; + type = "system"; + context = "system.cpu"; + family = id; + priority = NETDATA_CHART_PRIO_SYSTEM_CPU; + } + else { + title = "Core utilization"; + type = "cpu"; + context = "cpu.cpu"; + family = "utilization"; + priority = NETDATA_CHART_PRIO_CPU_PER_CORE; + + // TODO: check for /sys/devices/system/cpu/cpu*/cpufreq/scaling_cur_freq + // TODO: check for /sys/devices/system/cpu/cpu*/cpufreq/stats/time_in_state + + char filename[FILENAME_MAX + 1]; + struct stat stbuf; + + if(do_core_throttle_count != CONFIG_BOOLEAN_NO) { + snprintfz(filename, FILENAME_MAX, core_throttle_count_filename, id); + if (stat(filename, &stbuf) == 0) { + cpu_chart->files[CORE_THROTTLE_COUNT_INDEX].filename = strdupz(filename); + cpu_chart->files[CORE_THROTTLE_COUNT_INDEX].fd = -1; + do_core_throttle_count = CONFIG_BOOLEAN_YES; + } + } + + if(do_package_throttle_count != CONFIG_BOOLEAN_NO) { + snprintfz(filename, FILENAME_MAX, package_throttle_count_filename, id); + if (stat(filename, &stbuf) == 0) { + cpu_chart->files[PACKAGE_THROTTLE_COUNT_INDEX].filename = strdupz(filename); + cpu_chart->files[PACKAGE_THROTTLE_COUNT_INDEX].fd = -1; + do_package_throttle_count = CONFIG_BOOLEAN_YES; + } + } + + if(do_scaling_cur_freq != CONFIG_BOOLEAN_NO) { + snprintfz(filename, FILENAME_MAX, scaling_cur_freq_filename, id); + if (stat(filename, &stbuf) == 0) { + cpu_chart->files[SCALING_CUR_FREQ_INDEX].filename = strdupz(filename); + cpu_chart->files[SCALING_CUR_FREQ_INDEX].fd = -1; + do_scaling_cur_freq = CONFIG_BOOLEAN_YES; + } + } + } + + cpu_chart->st = rrdset_create_localhost( + type + , id + , NULL + , family + , context + , title + , "percentage" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_STAT_NAME + , priority + core + , update_every + , RRDSET_TYPE_STACKED + ); + + long multiplier = 1; + long divisor = 1; // sysconf(_SC_CLK_TCK); + + cpu_chart->rd_guest_nice = rrddim_add(cpu_chart->st, "guest_nice", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + cpu_chart->rd_guest = rrddim_add(cpu_chart->st, "guest", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + cpu_chart->rd_steal = rrddim_add(cpu_chart->st, "steal", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + cpu_chart->rd_softirq = rrddim_add(cpu_chart->st, "softirq", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + cpu_chart->rd_irq = rrddim_add(cpu_chart->st, "irq", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + cpu_chart->rd_user = rrddim_add(cpu_chart->st, "user", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + cpu_chart->rd_system = rrddim_add(cpu_chart->st, "system", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + cpu_chart->rd_nice = rrddim_add(cpu_chart->st, "nice", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + cpu_chart->rd_iowait = rrddim_add(cpu_chart->st, "iowait", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + cpu_chart->rd_idle = rrddim_add(cpu_chart->st, "idle", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rrddim_hide(cpu_chart->st, "idle"); + + if(unlikely(core == 0 && cpus_var == NULL)) + cpus_var = rrdvar_custom_host_variable_create(localhost, "active_processors"); + } + else rrdset_next(cpu_chart->st); + + rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_user, user); + rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_nice, nice); + rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_system, system); + rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_idle, idle); + rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_iowait, iowait); + rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_irq, irq); + rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_softirq, softirq); + rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_steal, steal); + rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_guest, guest); + rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_guest_nice, guest_nice); + rrdset_done(cpu_chart->st); + } + } + else if(unlikely(hash == hash_intr && strcmp(row_key, "intr") == 0)) { + if(likely(do_interrupts)) { + static RRDSET *st_intr = NULL; + static RRDDIM *rd_interrupts = NULL; + unsigned long long value = str2ull(procfile_lineword(ff, l, 1)); + + if(unlikely(!st_intr)) { + st_intr = rrdset_create_localhost( + "system" + , "intr" + , NULL + , "interrupts" + , NULL + , "CPU Interrupts" + , "interrupts/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_STAT_NAME + , NETDATA_CHART_PRIO_SYSTEM_INTR + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(st_intr, RRDSET_FLAG_DETAIL); + + rd_interrupts = rrddim_add(st_intr, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st_intr); + + rrddim_set_by_pointer(st_intr, rd_interrupts, value); + rrdset_done(st_intr); + } + } + else if(unlikely(hash == hash_ctxt && strcmp(row_key, "ctxt") == 0)) { + if(likely(do_context)) { + static RRDSET *st_ctxt = NULL; + static RRDDIM *rd_switches = NULL; + unsigned long long value = str2ull(procfile_lineword(ff, l, 1)); + + if(unlikely(!st_ctxt)) { + st_ctxt = rrdset_create_localhost( + "system" + , "ctxt" + , NULL + , "processes" + , NULL + , "CPU Context Switches" + , "context switches/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_STAT_NAME + , NETDATA_CHART_PRIO_SYSTEM_CTXT + , update_every + , RRDSET_TYPE_LINE + ); + + rd_switches = rrddim_add(st_ctxt, "switches", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st_ctxt); + + rrddim_set_by_pointer(st_ctxt, rd_switches, value); + rrdset_done(st_ctxt); + } + } + else if(unlikely(hash == hash_processes && !processes && strcmp(row_key, "processes") == 0)) { + processes = str2ull(procfile_lineword(ff, l, 1)); + } + else if(unlikely(hash == hash_procs_running && !running && strcmp(row_key, "procs_running") == 0)) { + running = str2ull(procfile_lineword(ff, l, 1)); + } + else if(unlikely(hash == hash_procs_blocked && !blocked && strcmp(row_key, "procs_blocked") == 0)) { + blocked = str2ull(procfile_lineword(ff, l, 1)); + } + } + + // -------------------------------------------------------------------- + + if(likely(do_forks)) { + static RRDSET *st_forks = NULL; + static RRDDIM *rd_started = NULL; + + if(unlikely(!st_forks)) { + st_forks = rrdset_create_localhost( + "system" + , "forks" + , NULL + , "processes" + , NULL + , "Started Processes" + , "processes/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_STAT_NAME + , NETDATA_CHART_PRIO_SYSTEM_FORKS + , update_every + , RRDSET_TYPE_LINE + ); + rrdset_flag_set(st_forks, RRDSET_FLAG_DETAIL); + + rd_started = rrddim_add(st_forks, "started", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st_forks); + + rrddim_set_by_pointer(st_forks, rd_started, processes); + rrdset_done(st_forks); + } + + // -------------------------------------------------------------------- + + if(likely(do_processes)) { + static RRDSET *st_processes = NULL; + static RRDDIM *rd_running = NULL; + static RRDDIM *rd_blocked = NULL; + + if(unlikely(!st_processes)) { + st_processes = rrdset_create_localhost( + "system" + , "processes" + , NULL + , "processes" + , NULL + , "System Processes" + , "processes" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_STAT_NAME + , NETDATA_CHART_PRIO_SYSTEM_PROCESSES + , update_every + , RRDSET_TYPE_LINE + ); + + rd_running = rrddim_add(st_processes, "running", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rd_blocked = rrddim_add(st_processes, "blocked", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st_processes); + + rrddim_set_by_pointer(st_processes, rd_running, running); + rrddim_set_by_pointer(st_processes, rd_blocked, blocked); + rrdset_done(st_processes); + } + + if(likely(all_cpu_charts_size > 1)) { + if(likely(do_core_throttle_count != CONFIG_BOOLEAN_NO)) { + int r = read_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, CORE_THROTTLE_COUNT_INDEX); + if(likely(r != -1 && (do_core_throttle_count == CONFIG_BOOLEAN_YES || r > 0))) { + do_core_throttle_count = CONFIG_BOOLEAN_YES; + + static RRDSET *st_core_throttle_count = NULL; + + if (unlikely(!st_core_throttle_count)) + st_core_throttle_count = rrdset_create_localhost( + "cpu" + , "core_throttling" + , NULL + , "throttling" + , "cpu.core_throttling" + , "Core Thermal Throttling Events" + , "events/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_STAT_NAME + , NETDATA_CHART_PRIO_CORE_THROTTLING + , update_every + , RRDSET_TYPE_LINE + ); + else + rrdset_next(st_core_throttle_count); + + chart_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, CORE_THROTTLE_COUNT_INDEX, st_core_throttle_count, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrdset_done(st_core_throttle_count); + } + } + + if(likely(do_package_throttle_count != CONFIG_BOOLEAN_NO)) { + int r = read_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, PACKAGE_THROTTLE_COUNT_INDEX); + if(likely(r != -1 && (do_package_throttle_count == CONFIG_BOOLEAN_YES || r > 0))) { + do_package_throttle_count = CONFIG_BOOLEAN_YES; + + static RRDSET *st_package_throttle_count = NULL; + + if(unlikely(!st_package_throttle_count)) + st_package_throttle_count = rrdset_create_localhost( + "cpu" + , "package_throttling" + , NULL + , "throttling" + , "cpu.package_throttling" + , "Package Thermal Throttling Events" + , "events/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_STAT_NAME + , NETDATA_CHART_PRIO_PACKAGE_THROTTLING + , update_every + , RRDSET_TYPE_LINE + ); + else + rrdset_next(st_package_throttle_count); + + chart_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, PACKAGE_THROTTLE_COUNT_INDEX, st_package_throttle_count, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrdset_done(st_package_throttle_count); + } + } + + if(likely(do_scaling_cur_freq != CONFIG_BOOLEAN_NO)) { + int r = read_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, SCALING_CUR_FREQ_INDEX); + if(likely(r != -1 && (do_scaling_cur_freq == CONFIG_BOOLEAN_YES || r > 0))) { + do_scaling_cur_freq = CONFIG_BOOLEAN_YES; + + static RRDSET *st_scaling_cur_freq = NULL; + + if(unlikely(!st_scaling_cur_freq)) + st_scaling_cur_freq = rrdset_create_localhost( + "cpu" + , "scaling_cur_freq" + , NULL + , "cpufreq" + , "cpu.scaling_cur_freq" + , "Per CPU Core, Current CPU Scaling Frequency" + , "MHz" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_STAT_NAME + , 5003 + , update_every + , RRDSET_TYPE_LINE + ); + else + rrdset_next(st_scaling_cur_freq); + + chart_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, SCALING_CUR_FREQ_INDEX, st_scaling_cur_freq, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + rrdset_done(st_scaling_cur_freq); + } + } + } + + if(cpus_var) + rrdvar_custom_host_variable_set(localhost, cpus_var, cores_found); + + return 0; +} diff --git a/collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c b/collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c new file mode 100644 index 000000000..20d2116ce --- /dev/null +++ b/collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +int do_proc_sys_kernel_random_entropy_avail(int update_every, usec_t dt) { + (void)dt; + + static procfile *ff = NULL; + + if(unlikely(!ff)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/sys/kernel/random/entropy_avail"); + ff = procfile_open(config_get("plugin:proc:/proc/sys/kernel/random/entropy_avail", "filename to monitor", filename), "", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) return 1; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time + + unsigned long long entropy = str2ull(procfile_lineword(ff, 0, 0)); + + static RRDSET *st = NULL; + static RRDDIM *rd = NULL; + + if(unlikely(!st)) { + st = rrdset_create_localhost( + "system" + , "entropy" + , NULL + , "entropy" + , NULL + , "Available Entropy" + , "entropy" + , PLUGIN_PROC_NAME + , "/proc/sys/kernel/random/entropy_avail" + , NETDATA_CHART_PRIO_SYSTEM_ENTROPY + , update_every + , RRDSET_TYPE_LINE + ); + + rd = rrddim_add(st, "entropy", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd, entropy); + rrdset_done(st); + + return 0; +} diff --git a/collectors/proc.plugin/proc_uptime.c b/collectors/proc.plugin/proc_uptime.c new file mode 100644 index 000000000..142ae2d0c --- /dev/null +++ b/collectors/proc.plugin/proc_uptime.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +static inline collected_number uptime_from_boottime(void) { +#ifdef CLOCK_BOOTTIME_IS_AVAILABLE + return now_boottime_usec() / 1000; +#else + error("uptime cannot be read from CLOCK_BOOTTIME on this system."); + return 0; +#endif +} + +static procfile *read_proc_uptime_ff = NULL; +static inline collected_number read_proc_uptime(void) { + if(unlikely(!read_proc_uptime_ff)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/uptime"); + + read_proc_uptime_ff = procfile_open(config_get("plugin:proc:/proc/uptime", "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT); + if(unlikely(!read_proc_uptime_ff)) return 0; + } + + read_proc_uptime_ff = procfile_readall(read_proc_uptime_ff); + if(unlikely(!read_proc_uptime_ff)) return 0; + + if(unlikely(procfile_lines(read_proc_uptime_ff) < 1)) { + error("/proc/uptime has no lines."); + return 0; + } + if(unlikely(procfile_linewords(read_proc_uptime_ff, 0) < 1)) { + error("/proc/uptime has less than 1 word in it."); + return 0; + } + + return (collected_number)(strtold(procfile_lineword(read_proc_uptime_ff, 0, 0), NULL) * 1000.0); +} + +int do_proc_uptime(int update_every, usec_t dt) { + (void)dt; + + static int use_boottime = -1; + + if(unlikely(use_boottime == -1)) { + collected_number uptime_boottime = uptime_from_boottime(); + collected_number uptime_proc = read_proc_uptime(); + + long long delta = (long long)uptime_boottime - (long long)uptime_proc; + if(delta < 0) delta = -delta; + + if(delta <= 1000 && uptime_boottime != 0) { + procfile_close(read_proc_uptime_ff); + info("Using now_boottime_usec() for uptime (dt is %lld ms)", delta); + use_boottime = 1; + } + else if(uptime_proc != 0) { + info("Using /proc/uptime for uptime (dt is %lld ms)", delta); + use_boottime = 0; + } + else { + error("Cannot find any way to read uptime on this system."); + return 1; + } + } + + collected_number uptime; + if(use_boottime) + uptime = uptime_from_boottime(); + else + uptime = read_proc_uptime(); + + + // -------------------------------------------------------------------- + + static RRDSET *st = NULL; + static RRDDIM *rd = NULL; + + if(unlikely(!st)) { + + st = rrdset_create_localhost( + "system" + , "uptime" + , NULL + , "uptime" + , NULL + , "System Uptime" + , "seconds" + , PLUGIN_PROC_NAME + , "/proc/uptime" + , NETDATA_CHART_PRIO_SYSTEM_UPTIME + , update_every + , RRDSET_TYPE_LINE + ); + + rd = rrddim_add(st, "uptime", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + } + else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd, uptime); + + rrdset_done(st); + + return 0; +} diff --git a/collectors/proc.plugin/proc_vmstat.c b/collectors/proc.plugin/proc_vmstat.c new file mode 100644 index 000000000..f7c93c20a --- /dev/null +++ b/collectors/proc.plugin/proc_vmstat.c @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define PLUGIN_PROC_MODULE_VMSTAT_NAME "/proc/vmstat" + +int do_proc_vmstat(int update_every, usec_t dt) { + (void)dt; + + static procfile *ff = NULL; + static int do_swapio = -1, do_io = -1, do_pgfaults = -1, do_numa = -1; + static int has_numa = -1; + + static ARL_BASE *arl_base = NULL; + static unsigned long long numa_foreign = 0ULL; + static unsigned long long numa_hint_faults = 0ULL; + static unsigned long long numa_hint_faults_local = 0ULL; + static unsigned long long numa_huge_pte_updates = 0ULL; + static unsigned long long numa_interleave = 0ULL; + static unsigned long long numa_local = 0ULL; + static unsigned long long numa_other = 0ULL; + static unsigned long long numa_pages_migrated = 0ULL; + static unsigned long long numa_pte_updates = 0ULL; + static unsigned long long pgfault = 0ULL; + static unsigned long long pgmajfault = 0ULL; + static unsigned long long pgpgin = 0ULL; + static unsigned long long pgpgout = 0ULL; + static unsigned long long pswpin = 0ULL; + static unsigned long long pswpout = 0ULL; + + if(unlikely(!arl_base)) { + do_swapio = config_get_boolean_ondemand("plugin:proc:/proc/vmstat", "swap i/o", CONFIG_BOOLEAN_AUTO); + do_io = config_get_boolean("plugin:proc:/proc/vmstat", "disk i/o", 1); + do_pgfaults = config_get_boolean("plugin:proc:/proc/vmstat", "memory page faults", 1); + do_numa = config_get_boolean_ondemand("plugin:proc:/proc/vmstat", "system-wide numa metric summary", CONFIG_BOOLEAN_AUTO); + + + arl_base = arl_create("vmstat", NULL, 60); + arl_expect(arl_base, "pgfault", &pgfault); + arl_expect(arl_base, "pgmajfault", &pgmajfault); + arl_expect(arl_base, "pgpgin", &pgpgin); + arl_expect(arl_base, "pgpgout", &pgpgout); + arl_expect(arl_base, "pswpin", &pswpin); + arl_expect(arl_base, "pswpout", &pswpout); + + if(do_numa == CONFIG_BOOLEAN_YES || (do_numa == CONFIG_BOOLEAN_AUTO && get_numa_node_count() >= 2)) { + arl_expect(arl_base, "numa_foreign", &numa_foreign); + arl_expect(arl_base, "numa_hint_faults_local", &numa_hint_faults_local); + arl_expect(arl_base, "numa_hint_faults", &numa_hint_faults); + arl_expect(arl_base, "numa_huge_pte_updates", &numa_huge_pte_updates); + arl_expect(arl_base, "numa_interleave", &numa_interleave); + arl_expect(arl_base, "numa_local", &numa_local); + arl_expect(arl_base, "numa_other", &numa_other); + arl_expect(arl_base, "numa_pages_migrated", &numa_pages_migrated); + arl_expect(arl_base, "numa_pte_updates", &numa_pte_updates); + } + else { + // Do not expect numa metrics when they are not needed. + // By not adding them, the ARL will stop processing the file + // when all the expected metrics are collected. + // Also ARL will not parse their values. + has_numa = 0; + do_numa = CONFIG_BOOLEAN_NO; + } + } + + if(unlikely(!ff)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/vmstat"); + ff = procfile_open(config_get("plugin:proc:/proc/vmstat", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) return 1; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time + + size_t lines = procfile_lines(ff), l; + + arl_begin(arl_base); + for(l = 0; l < lines ;l++) { + size_t words = procfile_linewords(ff, l); + if(unlikely(words < 2)) { + if(unlikely(words)) error("Cannot read /proc/vmstat line %zu. Expected 2 params, read %zu.", l, words); + continue; + } + + if(unlikely(arl_check(arl_base, + procfile_lineword(ff, l, 0), + procfile_lineword(ff, l, 1)))) break; + } + + // -------------------------------------------------------------------- + + if(pswpin || pswpout || do_swapio == CONFIG_BOOLEAN_YES) { + do_swapio = CONFIG_BOOLEAN_YES; + + static RRDSET *st_swapio = NULL; + static RRDDIM *rd_in = NULL, *rd_out = NULL; + + if(unlikely(!st_swapio)) { + st_swapio = rrdset_create_localhost( + "system" + , "swapio" + , NULL + , "swap" + , NULL + , "Swap I/O" + , "kilobytes/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_VMSTAT_NAME + , NETDATA_CHART_PRIO_SYSTEM_SWAPIO + , update_every + , RRDSET_TYPE_AREA + ); + + rd_in = rrddim_add(st_swapio, "in", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL); + rd_out = rrddim_add(st_swapio, "out", NULL, -sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st_swapio); + + rrddim_set_by_pointer(st_swapio, rd_in, pswpin); + rrddim_set_by_pointer(st_swapio, rd_out, pswpout); + rrdset_done(st_swapio); + } + + // -------------------------------------------------------------------- + + if(do_io) { + static RRDSET *st_io = NULL; + static RRDDIM *rd_in = NULL, *rd_out = NULL; + + if(unlikely(!st_io)) { + st_io = rrdset_create_localhost( + "system" + , "pgpgio" + , NULL + , "disk" + , NULL + , "Memory Paged from/to disk" + , "kilobytes/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_VMSTAT_NAME + , NETDATA_CHART_PRIO_SYSTEM_PGPGIO + , update_every + , RRDSET_TYPE_AREA + ); + + rd_in = rrddim_add(st_io, "in", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_out = rrddim_add(st_io, "out", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st_io); + + rrddim_set_by_pointer(st_io, rd_in, pgpgin); + rrddim_set_by_pointer(st_io, rd_out, pgpgout); + rrdset_done(st_io); + } + + // -------------------------------------------------------------------- + + if(do_pgfaults) { + static RRDSET *st_pgfaults = NULL; + static RRDDIM *rd_minor = NULL, *rd_major = NULL; + + if(unlikely(!st_pgfaults)) { + st_pgfaults = rrdset_create_localhost( + "mem" + , "pgfaults" + , NULL + , "system" + , NULL + , "Memory Page Faults" + , "page faults/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_VMSTAT_NAME + , NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(st_pgfaults, RRDSET_FLAG_DETAIL); + + rd_minor = rrddim_add(st_pgfaults, "minor", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_major = rrddim_add(st_pgfaults, "major", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st_pgfaults); + + rrddim_set_by_pointer(st_pgfaults, rd_minor, pgfault); + rrddim_set_by_pointer(st_pgfaults, rd_major, pgmajfault); + rrdset_done(st_pgfaults); + } + + // -------------------------------------------------------------------- + + // Ondemand criteria for NUMA. Since this won't change at run time, we + // check it only once. We check whether the node count is >= 2 because + // single-node systems have uninteresting statistics (since all accesses + // are local). + if(unlikely(has_numa == -1)) + + has_numa = (numa_local || numa_foreign || numa_interleave || numa_other || numa_pte_updates || + numa_huge_pte_updates || numa_hint_faults || numa_hint_faults_local || numa_pages_migrated) ? 1 : 0; + + if(do_numa == CONFIG_BOOLEAN_YES || (do_numa == CONFIG_BOOLEAN_AUTO && has_numa)) { + do_numa = CONFIG_BOOLEAN_YES; + + static RRDSET *st_numa = NULL; + static RRDDIM *rd_local = NULL, *rd_foreign = NULL, *rd_interleave = NULL, *rd_other = NULL, *rd_pte_updates = NULL, *rd_huge_pte_updates = NULL, *rd_hint_faults = NULL, *rd_hint_faults_local = NULL, *rd_pages_migrated = NULL; + + if(unlikely(!st_numa)) { + st_numa = rrdset_create_localhost( + "mem" + , "numa" + , NULL + , "numa" + , NULL + , "NUMA events" + , "events/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_VMSTAT_NAME + , NETDATA_CHART_PRIO_MEM_NUMA + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(st_numa, RRDSET_FLAG_DETAIL); + + // These depend on CONFIG_NUMA in the kernel. + rd_local = rrddim_add(st_numa, "local", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_foreign = rrddim_add(st_numa, "foreign", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_interleave = rrddim_add(st_numa, "interleave", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_other = rrddim_add(st_numa, "other", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + // The following stats depend on CONFIG_NUMA_BALANCING in the + // kernel. + rd_pte_updates = rrddim_add(st_numa, "pte_updates", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_huge_pte_updates = rrddim_add(st_numa, "huge_pte_updates", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_hint_faults = rrddim_add(st_numa, "hint_faults", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_hint_faults_local = rrddim_add(st_numa, "hint_faults_local", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_pages_migrated = rrddim_add(st_numa, "pages_migrated", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(st_numa); + + rrddim_set_by_pointer(st_numa, rd_local, numa_local); + rrddim_set_by_pointer(st_numa, rd_foreign, numa_foreign); + rrddim_set_by_pointer(st_numa, rd_interleave, numa_interleave); + rrddim_set_by_pointer(st_numa, rd_other, numa_other); + + rrddim_set_by_pointer(st_numa, rd_pte_updates, numa_pte_updates); + rrddim_set_by_pointer(st_numa, rd_huge_pte_updates, numa_huge_pte_updates); + rrddim_set_by_pointer(st_numa, rd_hint_faults, numa_hint_faults); + rrddim_set_by_pointer(st_numa, rd_hint_faults_local, numa_hint_faults_local); + rrddim_set_by_pointer(st_numa, rd_pages_migrated, numa_pages_migrated); + + rrdset_done(st_numa); + } + + return 0; +} + diff --git a/collectors/proc.plugin/sys_devices_system_edac_mc.c b/collectors/proc.plugin/sys_devices_system_edac_mc.c new file mode 100644 index 000000000..03cbfff83 --- /dev/null +++ b/collectors/proc.plugin/sys_devices_system_edac_mc.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +struct mc { + char *name; + char ce_updated; + char ue_updated; + + char *ce_count_filename; + char *ue_count_filename; + + procfile *ce_ff; + procfile *ue_ff; + + collected_number ce_count; + collected_number ue_count; + + RRDDIM *ce_rd; + RRDDIM *ue_rd; + + struct mc *next; +}; +static struct mc *mc_root = NULL; + +static void find_all_mc() { + char name[FILENAME_MAX + 1]; + snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/edac/mc"); + char *dirname = config_get("plugin:proc:/sys/devices/system/edac/mc", "directory to monitor", name); + + DIR *dir = opendir(dirname); + if(unlikely(!dir)) { + error("Cannot read ECC memory errors directory '%s'", dirname); + return; + } + + struct dirent *de = NULL; + while((de = readdir(dir))) { + if(de->d_type == DT_DIR && de->d_name[0] == 'm' && de->d_name[1] == 'c' && isdigit(de->d_name[2])) { + struct mc *m = callocz(1, sizeof(struct mc)); + m->name = strdupz(de->d_name); + + struct stat st; + + snprintfz(name, FILENAME_MAX, "%s/%s/ce_count", dirname, de->d_name); + if(stat(name, &st) != -1) + m->ce_count_filename = strdupz(name); + + snprintfz(name, FILENAME_MAX, "%s/%s/ue_count", dirname, de->d_name); + if(stat(name, &st) != -1) + m->ue_count_filename = strdupz(name); + + if(!m->ce_count_filename && !m->ue_count_filename) { + freez(m->name); + freez(m); + } + else { + m->next = mc_root; + mc_root = m; + } + } + } + + closedir(dir); +} + +int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt) { + (void)dt; + + if(unlikely(mc_root == NULL)) { + find_all_mc(); + if(unlikely(mc_root == NULL)) + return 1; + } + + static int do_ce = -1, do_ue = -1; + calculated_number ce_sum = 0, ue_sum = 0; + struct mc *m; + + if(unlikely(do_ce == -1)) { + do_ce = config_get_boolean_ondemand("plugin:proc:/sys/devices/system/edac/mc", "enable ECC memory correctable errors", CONFIG_BOOLEAN_AUTO); + do_ue = config_get_boolean_ondemand("plugin:proc:/sys/devices/system/edac/mc", "enable ECC memory uncorrectable errors", CONFIG_BOOLEAN_AUTO); + } + + if(do_ce != CONFIG_BOOLEAN_NO) { + for(m = mc_root; m; m = m->next) { + if(m->ce_count_filename) { + m->ce_updated = 0; + + if(unlikely(!m->ce_ff)) { + m->ce_ff = procfile_open(m->ce_count_filename, " \t", PROCFILE_FLAG_DEFAULT); + if(unlikely(!m->ce_ff)) + continue; + } + + m->ce_ff = procfile_readall(m->ce_ff); + if(unlikely(!m->ce_ff || procfile_lines(m->ce_ff) < 1 || procfile_linewords(m->ce_ff, 0) < 1)) + continue; + + m->ce_count = str2ull(procfile_lineword(m->ce_ff, 0, 0)); + ce_sum += m->ce_count; + m->ce_updated = 1; + } + } + } + + if(do_ue != CONFIG_BOOLEAN_NO) { + for(m = mc_root; m; m = m->next) { + if(m->ue_count_filename) { + m->ue_updated = 0; + + if(unlikely(!m->ue_ff)) { + m->ue_ff = procfile_open(m->ue_count_filename, " \t", PROCFILE_FLAG_DEFAULT); + if(unlikely(!m->ue_ff)) + continue; + } + + m->ue_ff = procfile_readall(m->ue_ff); + if(unlikely(!m->ue_ff || procfile_lines(m->ue_ff) < 1 || procfile_linewords(m->ue_ff, 0) < 1)) + continue; + + m->ue_count = str2ull(procfile_lineword(m->ue_ff, 0, 0)); + ue_sum += m->ue_count; + m->ue_updated = 1; + } + } + } + + // -------------------------------------------------------------------- + + if(do_ce == CONFIG_BOOLEAN_YES || (do_ce == CONFIG_BOOLEAN_AUTO && ce_sum > 0)) { + do_ce = CONFIG_BOOLEAN_YES; + + static RRDSET *ce_st = NULL; + + if(unlikely(!ce_st)) { + ce_st = rrdset_create_localhost( + "mem" + , "ecc_ce" + , NULL + , "ecc" + , NULL + , "ECC Memory Correctable Errors" + , "errors" + , PLUGIN_PROC_NAME + , "/sys/devices/system/edac/mc" + , NETDATA_CHART_PRIO_MEM_HW_ECC_CE + , update_every + , RRDSET_TYPE_LINE + ); + } + else + rrdset_next(ce_st); + + for(m = mc_root; m; m = m->next) { + if (m->ce_count_filename && m->ce_updated) { + if(unlikely(!m->ce_rd)) + m->ce_rd = rrddim_add(ce_st, m->name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(ce_st, m->ce_rd, m->ce_count); + } + } + + rrdset_done(ce_st); + } + + // -------------------------------------------------------------------- + + if(do_ue == CONFIG_BOOLEAN_YES || (do_ue == CONFIG_BOOLEAN_AUTO && ue_sum > 0)) { + do_ue = CONFIG_BOOLEAN_YES; + + static RRDSET *ue_st = NULL; + + if(unlikely(!ue_st)) { + ue_st = rrdset_create_localhost( + "mem" + , "ecc_ue" + , NULL + , "ecc" + , NULL + , "ECC Memory Uncorrectable Errors" + , "errors" + , PLUGIN_PROC_NAME + , "/sys/devices/system/edac/mc" + , NETDATA_CHART_PRIO_MEM_HW_ECC_UE + , update_every + , RRDSET_TYPE_LINE + ); + } + else + rrdset_next(ue_st); + + for(m = mc_root; m; m = m->next) { + if (m->ue_count_filename && m->ue_updated) { + if(unlikely(!m->ue_rd)) + m->ue_rd = rrddim_add(ue_st, m->name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(ue_st, m->ue_rd, m->ue_count); + } + } + + rrdset_done(ue_st); + } + + return 0; +} diff --git a/collectors/proc.plugin/sys_devices_system_node.c b/collectors/proc.plugin/sys_devices_system_node.c new file mode 100644 index 000000000..6e6d0acca --- /dev/null +++ b/collectors/proc.plugin/sys_devices_system_node.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +struct node { + char *name; + char *numastat_filename; + procfile *numastat_ff; + RRDSET *numastat_st; + struct node *next; +}; +static struct node *numa_root = NULL; + +static int find_all_nodes() { + int numa_node_count = 0; + char name[FILENAME_MAX + 1]; + snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/node"); + char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name); + + DIR *dir = opendir(dirname); + if(!dir) { + error("Cannot read NUMA node directory '%s'", dirname); + return 0; + } + + struct dirent *de = NULL; + while((de = readdir(dir))) { + if(de->d_type != DT_DIR) + continue; + + if(strncmp(de->d_name, "node", 4) != 0) + continue; + + if(!isdigit(de->d_name[4])) + continue; + + numa_node_count++; + + struct node *m = callocz(1, sizeof(struct node)); + m->name = strdupz(de->d_name); + + struct stat st; + + snprintfz(name, FILENAME_MAX, "%s/%s/numastat", dirname, de->d_name); + if(stat(name, &st) == -1) { + freez(m->name); + freez(m); + continue; + } + + m->numastat_filename = strdupz(name); + + m->next = numa_root; + numa_root = m; + } + + closedir(dir); + + return numa_node_count; +} + +int do_proc_sys_devices_system_node(int update_every, usec_t dt) { + (void)dt; + + static uint32_t hash_local_node = 0, hash_numa_foreign = 0, hash_interleave_hit = 0, hash_other_node = 0, hash_numa_hit = 0, hash_numa_miss = 0; + static int do_numastat = -1, numa_node_count = 0; + struct node *m; + + if(unlikely(numa_root == NULL)) { + numa_node_count = find_all_nodes(); + if(unlikely(numa_root == NULL)) + return 1; + } + + if(unlikely(do_numastat == -1)) { + do_numastat = config_get_boolean_ondemand("plugin:proc:/sys/devices/system/node", "enable per-node numa metrics", CONFIG_BOOLEAN_AUTO); + + hash_local_node = simple_hash("local_node"); + hash_numa_foreign = simple_hash("numa_foreign"); + hash_interleave_hit = simple_hash("interleave_hit"); + hash_other_node = simple_hash("other_node"); + hash_numa_hit = simple_hash("numa_hit"); + hash_numa_miss = simple_hash("numa_miss"); + } + + if(do_numastat == CONFIG_BOOLEAN_YES || (do_numastat == CONFIG_BOOLEAN_AUTO && numa_node_count >= 2)) { + for(m = numa_root; m; m = m->next) { + if(m->numastat_filename) { + + if(unlikely(!m->numastat_ff)) { + m->numastat_ff = procfile_open(m->numastat_filename, " ", PROCFILE_FLAG_DEFAULT); + + if(unlikely(!m->numastat_ff)) + continue; + } + + m->numastat_ff = procfile_readall(m->numastat_ff); + if(unlikely(!m->numastat_ff || procfile_lines(m->numastat_ff) < 1 || procfile_linewords(m->numastat_ff, 0) < 1)) + continue; + + if(unlikely(!m->numastat_st)) { + m->numastat_st = rrdset_create_localhost( + "mem" + , m->name + , NULL + , "numa" + , NULL + , "NUMA events" + , "events/s" + , PLUGIN_PROC_NAME + , "/sys/devices/system/node" + , NETDATA_CHART_PRIO_MEM_NUMA_NODES + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(m->numastat_st, RRDSET_FLAG_DETAIL); + + rrddim_add(m->numastat_st, "numa_hit", "hit", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(m->numastat_st, "numa_miss", "miss", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(m->numastat_st, "local_node", "local", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(m->numastat_st, "numa_foreign", "foreign", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(m->numastat_st, "interleave_hit", "interleave", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(m->numastat_st, "other_node", "other", 1, 1, RRD_ALGORITHM_INCREMENTAL); + + } + else rrdset_next(m->numastat_st); + + size_t lines = procfile_lines(m->numastat_ff), l; + for(l = 0; l < lines; l++) { + size_t words = procfile_linewords(m->numastat_ff, l); + + if(unlikely(words < 2)) { + if(unlikely(words)) + error("Cannot read %s numastat line %zu. Expected 2 params, read %zu.", m->name, l, words); + continue; + } + + char *name = procfile_lineword(m->numastat_ff, l, 0); + char *value = procfile_lineword(m->numastat_ff, l, 1); + + if (unlikely(!name || !*name || !value || !*value)) + continue; + + uint32_t hash = simple_hash(name); + if(likely( + (hash == hash_numa_hit && !strcmp(name, "numa_hit")) + || (hash == hash_numa_miss && !strcmp(name, "numa_miss")) + || (hash == hash_local_node && !strcmp(name, "local_node")) + || (hash == hash_numa_foreign && !strcmp(name, "numa_foreign")) + || (hash == hash_interleave_hit && !strcmp(name, "interleave_hit")) + || (hash == hash_other_node && !strcmp(name, "other_node")) + )) + rrddim_set(m->numastat_st, name, (collected_number)str2kernel_uint_t(value)); + } + + rrdset_done(m->numastat_st); + } + } + } + + return 0; +} diff --git a/collectors/proc.plugin/sys_fs_btrfs.c b/collectors/proc.plugin/sys_fs_btrfs.c new file mode 100644 index 000000000..ed980cea5 --- /dev/null +++ b/collectors/proc.plugin/sys_fs_btrfs.c @@ -0,0 +1,722 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define PLUGIN_PROC_MODULE_BTRFS_NAME "/sys/fs/btrfs" + +typedef struct btrfs_disk { + char *name; + uint32_t hash; + int exists; + + char *size_filename; + char *hw_sector_size_filename; + unsigned long long size; + unsigned long long hw_sector_size; + + struct btrfs_disk *next; +} BTRFS_DISK; + +typedef struct btrfs_node { + int exists; + int logged_error; + + char *id; + uint32_t hash; + + char *label; + + // unsigned long long int sectorsize; + // unsigned long long int nodesize; + // unsigned long long int quota_override; + + #define declare_btrfs_allocation_section_field(SECTION, FIELD) \ + char *allocation_ ## SECTION ## _ ## FIELD ## _filename; \ + unsigned long long int allocation_ ## SECTION ## _ ## FIELD; + + #define declare_btrfs_allocation_field(FIELD) \ + char *allocation_ ## FIELD ## _filename; \ + unsigned long long int allocation_ ## FIELD; + + RRDSET *st_allocation_disks; + RRDDIM *rd_allocation_disks_unallocated; + RRDDIM *rd_allocation_disks_data_used; + RRDDIM *rd_allocation_disks_data_free; + RRDDIM *rd_allocation_disks_metadata_used; + RRDDIM *rd_allocation_disks_metadata_free; + RRDDIM *rd_allocation_disks_system_used; + RRDDIM *rd_allocation_disks_system_free; + unsigned long long all_disks_total; + + RRDSET *st_allocation_data; + RRDDIM *rd_allocation_data_free; + RRDDIM *rd_allocation_data_used; + declare_btrfs_allocation_section_field(data, total_bytes) + declare_btrfs_allocation_section_field(data, bytes_used) + declare_btrfs_allocation_section_field(data, disk_total) + declare_btrfs_allocation_section_field(data, disk_used) + + RRDSET *st_allocation_metadata; + RRDDIM *rd_allocation_metadata_free; + RRDDIM *rd_allocation_metadata_used; + RRDDIM *rd_allocation_metadata_reserved; + declare_btrfs_allocation_section_field(metadata, total_bytes) + declare_btrfs_allocation_section_field(metadata, bytes_used) + declare_btrfs_allocation_section_field(metadata, disk_total) + declare_btrfs_allocation_section_field(metadata, disk_used) + //declare_btrfs_allocation_field(global_rsv_reserved) + declare_btrfs_allocation_field(global_rsv_size) + + RRDSET *st_allocation_system; + RRDDIM *rd_allocation_system_free; + RRDDIM *rd_allocation_system_used; + declare_btrfs_allocation_section_field(system, total_bytes) + declare_btrfs_allocation_section_field(system, bytes_used) + declare_btrfs_allocation_section_field(system, disk_total) + declare_btrfs_allocation_section_field(system, disk_used) + + BTRFS_DISK *disks; + + struct btrfs_node *next; +} BTRFS_NODE; + +static BTRFS_NODE *nodes = NULL; + +static inline void btrfs_free_disk(BTRFS_DISK *d) { + freez(d->name); + freez(d->size_filename); + freez(d->hw_sector_size_filename); + freez(d); +} + +static inline void btrfs_free_node(BTRFS_NODE *node) { + // info("BTRFS: destroying '%s'", node->id); + + if(node->st_allocation_disks) + rrdset_is_obsolete(node->st_allocation_disks); + + if(node->st_allocation_data) + rrdset_is_obsolete(node->st_allocation_data); + + if(node->st_allocation_metadata) + rrdset_is_obsolete(node->st_allocation_metadata); + + if(node->st_allocation_system) + rrdset_is_obsolete(node->st_allocation_system); + + freez(node->allocation_data_bytes_used_filename); + freez(node->allocation_data_total_bytes_filename); + + freez(node->allocation_metadata_bytes_used_filename); + freez(node->allocation_metadata_total_bytes_filename); + + freez(node->allocation_system_bytes_used_filename); + freez(node->allocation_system_total_bytes_filename); + + while(node->disks) { + BTRFS_DISK *d = node->disks; + node->disks = node->disks->next; + btrfs_free_disk(d); + } + + freez(node->label); + freez(node->id); + freez(node); +} + +static inline int find_btrfs_disks(BTRFS_NODE *node, const char *path) { + char filename[FILENAME_MAX + 1]; + + node->all_disks_total = 0; + + BTRFS_DISK *d; + for(d = node->disks ; d ; d = d->next) + d->exists = 0; + + DIR *dir = opendir(path); + if (!dir) { + if(!node->logged_error) { + error("BTRFS: Cannot open directory '%s'.", path); + node->logged_error = 1; + } + return 1; + } + node->logged_error = 0; + + struct dirent *de = NULL; + while ((de = readdir(dir))) { + if (de->d_type != DT_LNK + || !strcmp(de->d_name, ".") + || !strcmp(de->d_name, "..") + ) { + // info("BTRFS: ignoring '%s'", de->d_name); + continue; + } + + uint32_t hash = simple_hash(de->d_name); + + // -------------------------------------------------------------------- + // search for it + + for(d = node->disks ; d ; d = d->next) { + if(hash == d->hash && !strcmp(de->d_name, d->name)) + break; + } + + // -------------------------------------------------------------------- + // did we find it? + + if(!d) { + d = callocz(sizeof(BTRFS_DISK), 1); + + d->name = strdupz(de->d_name); + d->hash = simple_hash(d->name); + + snprintfz(filename, FILENAME_MAX, "%s/%s/size", path, de->d_name); + d->size_filename = strdupz(filename); + + // for bcache + snprintfz(filename, FILENAME_MAX, "%s/%s/bcache/../queue/hw_sector_size", path, de->d_name); + struct stat sb; + if(stat(filename, &sb) == -1) { + // for disks + snprintfz(filename, FILENAME_MAX, "%s/%s/queue/hw_sector_size", path, de->d_name); + if(stat(filename, &sb) == -1) + // for partitions + snprintfz(filename, FILENAME_MAX, "%s/%s/../queue/hw_sector_size", path, de->d_name); + } + + d->hw_sector_size_filename = strdupz(filename); + + // link it + d->next = node->disks; + node->disks = d; + } + + d->exists = 1; + + + // -------------------------------------------------------------------- + // update the values + + if(read_single_number_file(d->size_filename, &d->size) != 0) { + error("BTRFS: failed to read '%s'", d->size_filename); + d->exists = 0; + continue; + } + + if(read_single_number_file(d->hw_sector_size_filename, &d->hw_sector_size) != 0) { + error("BTRFS: failed to read '%s'", d->hw_sector_size_filename); + d->exists = 0; + continue; + } + + node->all_disks_total += d->size * d->hw_sector_size; + } + closedir(dir); + + // ------------------------------------------------------------------------ + // cleanup + + BTRFS_DISK *last = NULL; + d = node->disks; + + while(d) { + if(unlikely(!d->exists)) { + if(unlikely(node->disks == d)) { + node->disks = d->next; + btrfs_free_disk(d); + d = node->disks; + last = NULL; + } + else { + last->next = d->next; + btrfs_free_disk(d); + d = last->next; + } + + continue; + } + + last = d; + d = d->next; + } + + return 0; +} + + +static inline int find_all_btrfs_pools(const char *path) { + static int logged_error = 0; + char filename[FILENAME_MAX + 1]; + + BTRFS_NODE *node; + for(node = nodes ; node ; node = node->next) + node->exists = 0; + + DIR *dir = opendir(path); + if (!dir) { + if(!logged_error) { + error("BTRFS: Cannot open directory '%s'.", path); + logged_error = 1; + } + return 1; + } + logged_error = 0; + + struct dirent *de = NULL; + while ((de = readdir(dir))) { + if(de->d_type != DT_DIR + || !strcmp(de->d_name, ".") + || !strcmp(de->d_name, "..") + || !strcmp(de->d_name, "features") + ) { + // info("BTRFS: ignoring '%s'", de->d_name); + continue; + } + + uint32_t hash = simple_hash(de->d_name); + + // search for it + for(node = nodes ; node ; node = node->next) { + if(hash == node->hash && !strcmp(de->d_name, node->id)) + break; + } + + // did we find it? + if(node) { + // info("BTRFS: already exists '%s'", de->d_name); + node->exists = 1; + + // update the disk sizes + snprintfz(filename, FILENAME_MAX, "%s/%s/devices", path, de->d_name); + find_btrfs_disks(node, filename); + + continue; + } + + // info("BTRFS: adding '%s'", de->d_name); + + // not found, create it + node = callocz(sizeof(BTRFS_NODE), 1); + + node->id = strdupz(de->d_name); + node->hash = simple_hash(node->id); + node->exists = 1; + + { + char label[FILENAME_MAX + 1] = ""; + + snprintfz(filename, FILENAME_MAX, "%s/%s/label", path, de->d_name); + read_file(filename, label, FILENAME_MAX); + + char *s = label; + if (s[0]) + s = trim(label); + + if(s && s[0]) + node->label = strdupz(s); + else + node->label = strdupz(node->id); + } + + //snprintfz(filename, FILENAME_MAX, "%s/%s/sectorsize", path, de->d_name); + //if(read_single_number_file(filename, &node->sectorsize) != 0) { + // error("BTRFS: failed to read '%s'", filename); + // btrfs_free_node(node); + // continue; + //} + + //snprintfz(filename, FILENAME_MAX, "%s/%s/nodesize", path, de->d_name); + //if(read_single_number_file(filename, &node->nodesize) != 0) { + // error("BTRFS: failed to read '%s'", filename); + // btrfs_free_node(node); + // continue; + //} + + //snprintfz(filename, FILENAME_MAX, "%s/%s/quota_override", path, de->d_name); + //if(read_single_number_file(filename, &node->quota_override) != 0) { + // error("BTRFS: failed to read '%s'", filename); + // btrfs_free_node(node); + // continue; + //} + + // -------------------------------------------------------------------- + // macros to simplify our life + + #define init_btrfs_allocation_field(FIELD) {\ + snprintfz(filename, FILENAME_MAX, "%s/%s/allocation/" #FIELD, path, de->d_name); \ + if(read_single_number_file(filename, &node->allocation_ ## FIELD) != 0) {\ + error("BTRFS: failed to read '%s'", filename);\ + btrfs_free_node(node);\ + continue;\ + }\ + if(!node->allocation_ ## FIELD ## _filename)\ + node->allocation_ ## FIELD ## _filename = strdupz(filename);\ + } + + #define init_btrfs_allocation_section_field(SECTION, FIELD) {\ + snprintfz(filename, FILENAME_MAX, "%s/%s/allocation/" #SECTION "/" #FIELD, path, de->d_name); \ + if(read_single_number_file(filename, &node->allocation_ ## SECTION ## _ ## FIELD) != 0) {\ + error("BTRFS: failed to read '%s'", filename);\ + btrfs_free_node(node);\ + continue;\ + }\ + if(!node->allocation_ ## SECTION ## _ ## FIELD ## _filename)\ + node->allocation_ ## SECTION ## _ ## FIELD ## _filename = strdupz(filename);\ + } + + // -------------------------------------------------------------------- + // allocation/data + + init_btrfs_allocation_section_field(data, total_bytes); + init_btrfs_allocation_section_field(data, bytes_used); + init_btrfs_allocation_section_field(data, disk_total); + init_btrfs_allocation_section_field(data, disk_used); + + + // -------------------------------------------------------------------- + // allocation/metadata + + init_btrfs_allocation_section_field(metadata, total_bytes); + init_btrfs_allocation_section_field(metadata, bytes_used); + init_btrfs_allocation_section_field(metadata, disk_total); + init_btrfs_allocation_section_field(metadata, disk_used); + + init_btrfs_allocation_field(global_rsv_size); + // init_btrfs_allocation_field(global_rsv_reserved); + + + // -------------------------------------------------------------------- + // allocation/system + + init_btrfs_allocation_section_field(system, total_bytes); + init_btrfs_allocation_section_field(system, bytes_used); + init_btrfs_allocation_section_field(system, disk_total); + init_btrfs_allocation_section_field(system, disk_used); + + + // -------------------------------------------------------------------- + // find all disks related to this node + // and collect their sizes + + snprintfz(filename, FILENAME_MAX, "%s/%s/devices", path, de->d_name); + find_btrfs_disks(node, filename); + + + // -------------------------------------------------------------------- + // link it + + // info("BTRFS: linking '%s'", node->id); + node->next = nodes; + nodes = node; + } + closedir(dir); + + + // ------------------------------------------------------------------------ + // cleanup + + BTRFS_NODE *last = NULL; + node = nodes; + + while(node) { + if(unlikely(!node->exists)) { + if(unlikely(nodes == node)) { + nodes = node->next; + btrfs_free_node(node); + node = nodes; + last = NULL; + } + else { + last->next = node->next; + btrfs_free_node(node); + node = last->next; + } + + continue; + } + + last = node; + node = node->next; + } + + return 0; +} + +int do_sys_fs_btrfs(int update_every, usec_t dt) { + static int initialized = 0 + , do_allocation_disks = CONFIG_BOOLEAN_AUTO + , do_allocation_system = CONFIG_BOOLEAN_AUTO + , do_allocation_data = CONFIG_BOOLEAN_AUTO + , do_allocation_metadata = CONFIG_BOOLEAN_AUTO; + + static usec_t refresh_delta = 0, refresh_every = 60 * USEC_PER_SEC; + static char *btrfs_path = NULL; + + (void)dt; + + if(unlikely(!initialized)) { + initialized = 1; + + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/fs/btrfs"); + btrfs_path = config_get("plugin:proc:/sys/fs/btrfs", "path to monitor", filename); + + refresh_every = config_get_number("plugin:proc:/sys/fs/btrfs", "check for btrfs changes every", refresh_every / USEC_PER_SEC) * USEC_PER_SEC; + refresh_delta = refresh_every; + + do_allocation_disks = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "physical disks allocation", do_allocation_disks); + do_allocation_data = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "data allocation", do_allocation_data); + do_allocation_metadata = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "metadata allocation", do_allocation_metadata); + do_allocation_system = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "system allocation", do_allocation_system); + } + + refresh_delta += dt; + if(refresh_delta >= refresh_every) { + refresh_delta = 0; + find_all_btrfs_pools(btrfs_path); + } + + BTRFS_NODE *node; + for(node = nodes; node ; node = node->next) { + // -------------------------------------------------------------------- + // allocation/system + + #define collect_btrfs_allocation_field(FIELD) \ + read_single_number_file(node->allocation_ ## FIELD ## _filename, &node->allocation_ ## FIELD) + + #define collect_btrfs_allocation_section_field(SECTION, FIELD) \ + read_single_number_file(node->allocation_ ## SECTION ## _ ## FIELD ## _filename, &node->allocation_ ## SECTION ## _ ## FIELD) + + if(do_allocation_disks != CONFIG_BOOLEAN_NO) { + if( collect_btrfs_allocation_section_field(data, disk_total) != 0 + || collect_btrfs_allocation_section_field(data, disk_used) != 0 + || collect_btrfs_allocation_section_field(metadata, disk_total) != 0 + || collect_btrfs_allocation_section_field(metadata, disk_used) != 0 + || collect_btrfs_allocation_section_field(system, disk_total) != 0 + || collect_btrfs_allocation_section_field(system, disk_used) != 0) { + error("BTRFS: failed to collect physical disks allocation for '%s'", node->id); + // make it refresh btrfs at the next iteration + refresh_delta = refresh_every; + continue; + } + } + + if(do_allocation_data != CONFIG_BOOLEAN_NO) { + if (collect_btrfs_allocation_section_field(data, total_bytes) != 0 + || collect_btrfs_allocation_section_field(data, bytes_used) != 0) { + error("BTRFS: failed to collect allocation/data for '%s'", node->id); + // make it refresh btrfs at the next iteration + refresh_delta = refresh_every; + continue; + } + } + + if(do_allocation_metadata != CONFIG_BOOLEAN_NO) { + if (collect_btrfs_allocation_section_field(metadata, total_bytes) != 0 + || collect_btrfs_allocation_section_field(metadata, bytes_used) != 0 + || collect_btrfs_allocation_field(global_rsv_size) != 0 + ) { + error("BTRFS: failed to collect allocation/metadata for '%s'", node->id); + // make it refresh btrfs at the next iteration + refresh_delta = refresh_every; + continue; + } + } + + if(do_allocation_system != CONFIG_BOOLEAN_NO) { + if (collect_btrfs_allocation_section_field(system, total_bytes) != 0 + || collect_btrfs_allocation_section_field(system, bytes_used) != 0) { + error("BTRFS: failed to collect allocation/system for '%s'", node->id); + // make it refresh btrfs at the next iteration + refresh_delta = refresh_every; + continue; + } + } + + // -------------------------------------------------------------------- + // allocation/disks + + if(do_allocation_disks == CONFIG_BOOLEAN_YES || (do_allocation_disks == CONFIG_BOOLEAN_AUTO && node->all_disks_total && node->allocation_data_disk_total)) { + do_allocation_disks = CONFIG_BOOLEAN_YES; + + if(unlikely(!node->st_allocation_disks)) { + char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1]; + + snprintf(id, RRD_ID_LENGTH_MAX, "disk_%s", node->id); + snprintf(name, RRD_ID_LENGTH_MAX, "disk_%s", node->label); + snprintf(title, 200, "BTRFS Physical Disk Allocation for %s", node->label); + + netdata_fix_chart_id(id); + netdata_fix_chart_name(name); + + node->st_allocation_disks = rrdset_create_localhost( + "btrfs" + , id + , name + , node->label + , "btrfs.disk" + , title + , "MB" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_BTRFS_NAME + , NETDATA_CHART_PRIO_BTRFS_DISK + , update_every + , RRDSET_TYPE_STACKED + ); + + node->rd_allocation_disks_unallocated = rrddim_add(node->st_allocation_disks, "unallocated", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + node->rd_allocation_disks_data_free = rrddim_add(node->st_allocation_disks, "data_free", "data free", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + node->rd_allocation_disks_data_used = rrddim_add(node->st_allocation_disks, "data_used", "data used", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + node->rd_allocation_disks_metadata_free = rrddim_add(node->st_allocation_disks, "meta_free", "meta free", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + node->rd_allocation_disks_metadata_used = rrddim_add(node->st_allocation_disks, "meta_used", "meta used", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + node->rd_allocation_disks_system_free = rrddim_add(node->st_allocation_disks, "sys_free", "sys free", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + node->rd_allocation_disks_system_used = rrddim_add(node->st_allocation_disks, "sys_used", "sys used", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(node->st_allocation_disks); + + // unsigned long long disk_used = node->allocation_data_disk_used + node->allocation_metadata_disk_used + node->allocation_system_disk_used; + unsigned long long disk_total = node->allocation_data_disk_total + node->allocation_metadata_disk_total + node->allocation_system_disk_total; + unsigned long long disk_unallocated = node->all_disks_total - disk_total; + + rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_unallocated, disk_unallocated); + rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_data_used, node->allocation_data_disk_used); + rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_data_free, node->allocation_data_disk_total - node->allocation_data_disk_used); + rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_metadata_used, node->allocation_metadata_disk_used); + rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_metadata_free, node->allocation_metadata_disk_total - node->allocation_metadata_disk_used); + rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_system_used, node->allocation_system_disk_used); + rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_system_free, node->allocation_system_disk_total - node->allocation_system_disk_used); + rrdset_done(node->st_allocation_disks); + } + + + // -------------------------------------------------------------------- + // allocation/data + + if(do_allocation_data == CONFIG_BOOLEAN_YES || (do_allocation_data == CONFIG_BOOLEAN_AUTO && node->allocation_data_total_bytes)) { + do_allocation_data = CONFIG_BOOLEAN_YES; + + if(unlikely(!node->st_allocation_data)) { + char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1]; + + snprintf(id, RRD_ID_LENGTH_MAX, "data_%s", node->id); + snprintf(name, RRD_ID_LENGTH_MAX, "data_%s", node->label); + snprintf(title, 200, "BTRFS Data Allocation for %s", node->label); + + netdata_fix_chart_id(id); + netdata_fix_chart_name(name); + + node->st_allocation_data = rrdset_create_localhost( + "btrfs" + , id + , name + , node->label + , "btrfs.data" + , title + , "MB" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_BTRFS_NAME + , NETDATA_CHART_PRIO_BTRFS_DATA + , update_every + , RRDSET_TYPE_STACKED + ); + + node->rd_allocation_data_free = rrddim_add(node->st_allocation_data, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + node->rd_allocation_data_used = rrddim_add(node->st_allocation_data, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(node->st_allocation_data); + + rrddim_set_by_pointer(node->st_allocation_data, node->rd_allocation_data_free, node->allocation_data_total_bytes - node->allocation_data_bytes_used); + rrddim_set_by_pointer(node->st_allocation_data, node->rd_allocation_data_used, node->allocation_data_bytes_used); + rrdset_done(node->st_allocation_data); + } + + // -------------------------------------------------------------------- + // allocation/metadata + + if(do_allocation_metadata == CONFIG_BOOLEAN_YES || (do_allocation_metadata == CONFIG_BOOLEAN_AUTO && node->allocation_metadata_total_bytes)) { + do_allocation_metadata = CONFIG_BOOLEAN_YES; + + if(unlikely(!node->st_allocation_metadata)) { + char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1]; + + snprintf(id, RRD_ID_LENGTH_MAX, "metadata_%s", node->id); + snprintf(name, RRD_ID_LENGTH_MAX, "metadata_%s", node->label); + snprintf(title, 200, "BTRFS Metadata Allocation for %s", node->label); + + netdata_fix_chart_id(id); + netdata_fix_chart_name(name); + + node->st_allocation_metadata = rrdset_create_localhost( + "btrfs" + , id + , name + , node->label + , "btrfs.metadata" + , title + , "MB" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_BTRFS_NAME + , NETDATA_CHART_PRIO_BTRFS_METADATA + , update_every + , RRDSET_TYPE_STACKED + ); + + node->rd_allocation_metadata_free = rrddim_add(node->st_allocation_metadata, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + node->rd_allocation_metadata_used = rrddim_add(node->st_allocation_metadata, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + node->rd_allocation_metadata_reserved = rrddim_add(node->st_allocation_metadata, "reserved", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(node->st_allocation_metadata); + + rrddim_set_by_pointer(node->st_allocation_metadata, node->rd_allocation_metadata_free, node->allocation_metadata_total_bytes - node->allocation_metadata_bytes_used - node->allocation_global_rsv_size); + rrddim_set_by_pointer(node->st_allocation_metadata, node->rd_allocation_metadata_used, node->allocation_metadata_bytes_used); + rrddim_set_by_pointer(node->st_allocation_metadata, node->rd_allocation_metadata_reserved, node->allocation_global_rsv_size); + rrdset_done(node->st_allocation_metadata); + } + + // -------------------------------------------------------------------- + // allocation/system + + if(do_allocation_system == CONFIG_BOOLEAN_YES || (do_allocation_system == CONFIG_BOOLEAN_AUTO && node->allocation_system_total_bytes)) { + do_allocation_system = CONFIG_BOOLEAN_YES; + + if(unlikely(!node->st_allocation_system)) { + char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1]; + + snprintf(id, RRD_ID_LENGTH_MAX, "system_%s", node->id); + snprintf(name, RRD_ID_LENGTH_MAX, "system_%s", node->label); + snprintf(title, 200, "BTRFS System Allocation for %s", node->label); + + netdata_fix_chart_id(id); + netdata_fix_chart_name(name); + + node->st_allocation_system = rrdset_create_localhost( + "btrfs" + , id + , name + , node->label + , "btrfs.system" + , title + , "MB" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_BTRFS_NAME + , NETDATA_CHART_PRIO_BTRFS_SYSTEM + , update_every + , RRDSET_TYPE_STACKED + ); + + node->rd_allocation_system_free = rrddim_add(node->st_allocation_system, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + node->rd_allocation_system_used = rrddim_add(node->st_allocation_system, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(node->st_allocation_system); + + rrddim_set_by_pointer(node->st_allocation_system, node->rd_allocation_system_free, node->allocation_system_total_bytes - node->allocation_system_bytes_used); + rrddim_set_by_pointer(node->st_allocation_system, node->rd_allocation_system_used, node->allocation_system_bytes_used); + rrdset_done(node->st_allocation_system); + } + } + + return 0; +} + diff --git a/collectors/proc.plugin/sys_kernel_mm_ksm.c b/collectors/proc.plugin/sys_kernel_mm_ksm.c new file mode 100644 index 000000000..0f5c79c49 --- /dev/null +++ b/collectors/proc.plugin/sys_kernel_mm_ksm.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define PLUGIN_PROC_MODULE_KSM_NAME "/sys/kernel/mm/ksm" + +typedef struct ksm_name_value { + char filename[FILENAME_MAX + 1]; + unsigned long long value; +} KSM_NAME_VALUE; + +#define PAGES_SHARED 0 +#define PAGES_SHARING 1 +#define PAGES_UNSHARED 2 +#define PAGES_VOLATILE 3 +#define PAGES_TO_SCAN 4 + +KSM_NAME_VALUE values[] = { + [PAGES_SHARED] = { "/sys/kernel/mm/ksm/pages_shared", 0ULL }, + [PAGES_SHARING] = { "/sys/kernel/mm/ksm/pages_sharing", 0ULL }, + [PAGES_UNSHARED] = { "/sys/kernel/mm/ksm/pages_unshared", 0ULL }, + [PAGES_VOLATILE] = { "/sys/kernel/mm/ksm/pages_volatile", 0ULL }, + // [PAGES_TO_SCAN] = { "/sys/kernel/mm/ksm/pages_to_scan", 0ULL }, +}; + +int do_sys_kernel_mm_ksm(int update_every, usec_t dt) { + (void)dt; + static procfile *ff_pages_shared = NULL, *ff_pages_sharing = NULL, *ff_pages_unshared = NULL, *ff_pages_volatile = NULL/*, *ff_pages_to_scan = NULL*/; + static unsigned long page_size = 0; + + if(unlikely(page_size == 0)) + page_size = (unsigned long)sysconf(_SC_PAGESIZE); + + if(unlikely(!ff_pages_shared)) { + snprintfz(values[PAGES_SHARED].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_shared"); + snprintfz(values[PAGES_SHARED].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_shared", values[PAGES_SHARED].filename)); + ff_pages_shared = procfile_open(values[PAGES_SHARED].filename, " \t:", PROCFILE_FLAG_DEFAULT); + } + + if(unlikely(!ff_pages_sharing)) { + snprintfz(values[PAGES_SHARING].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_sharing"); + snprintfz(values[PAGES_SHARING].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_sharing", values[PAGES_SHARING].filename)); + ff_pages_sharing = procfile_open(values[PAGES_SHARING].filename, " \t:", PROCFILE_FLAG_DEFAULT); + } + + if(unlikely(!ff_pages_unshared)) { + snprintfz(values[PAGES_UNSHARED].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_unshared"); + snprintfz(values[PAGES_UNSHARED].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_unshared", values[PAGES_UNSHARED].filename)); + ff_pages_unshared = procfile_open(values[PAGES_UNSHARED].filename, " \t:", PROCFILE_FLAG_DEFAULT); + } + + if(unlikely(!ff_pages_volatile)) { + snprintfz(values[PAGES_VOLATILE].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_volatile"); + snprintfz(values[PAGES_VOLATILE].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_volatile", values[PAGES_VOLATILE].filename)); + ff_pages_volatile = procfile_open(values[PAGES_VOLATILE].filename, " \t:", PROCFILE_FLAG_DEFAULT); + } + + //if(unlikely(!ff_pages_to_scan)) { + // snprintfz(values[PAGES_TO_SCAN].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_to_scan"); + // snprintfz(values[PAGES_TO_SCAN].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_to_scan", values[PAGES_TO_SCAN].filename)); + // ff_pages_to_scan = procfile_open(values[PAGES_TO_SCAN].filename, " \t:", PROCFILE_FLAG_DEFAULT); + //} + + if(unlikely(!ff_pages_shared || !ff_pages_sharing || !ff_pages_unshared || !ff_pages_volatile /*|| !ff_pages_to_scan */)) + return 1; + + unsigned long long pages_shared = 0, pages_sharing = 0, pages_unshared = 0, pages_volatile = 0, /*pages_to_scan = 0,*/ offered = 0, saved = 0; + + ff_pages_shared = procfile_readall(ff_pages_shared); + if(unlikely(!ff_pages_shared)) return 0; // we return 0, so that we will retry to open it next time + pages_shared = str2ull(procfile_lineword(ff_pages_shared, 0, 0)); + + ff_pages_sharing = procfile_readall(ff_pages_sharing); + if(unlikely(!ff_pages_sharing)) return 0; // we return 0, so that we will retry to open it next time + pages_sharing = str2ull(procfile_lineword(ff_pages_sharing, 0, 0)); + + ff_pages_unshared = procfile_readall(ff_pages_unshared); + if(unlikely(!ff_pages_unshared)) return 0; // we return 0, so that we will retry to open it next time + pages_unshared = str2ull(procfile_lineword(ff_pages_unshared, 0, 0)); + + ff_pages_volatile = procfile_readall(ff_pages_volatile); + if(unlikely(!ff_pages_volatile)) return 0; // we return 0, so that we will retry to open it next time + pages_volatile = str2ull(procfile_lineword(ff_pages_volatile, 0, 0)); + + //ff_pages_to_scan = procfile_readall(ff_pages_to_scan); + //if(unlikely(!ff_pages_to_scan)) return 0; // we return 0, so that we will retry to open it next time + //pages_to_scan = str2ull(procfile_lineword(ff_pages_to_scan, 0, 0)); + + offered = pages_sharing + pages_shared + pages_unshared + pages_volatile; + saved = pages_sharing; + + if(unlikely(!offered /*|| !pages_to_scan*/)) return 0; + + // -------------------------------------------------------------------- + + { + static RRDSET *st_mem_ksm = NULL; + static RRDDIM *rd_shared = NULL, *rd_unshared = NULL, *rd_sharing = NULL, *rd_volatile = NULL/*, *rd_to_scan = NULL*/; + + if (unlikely(!st_mem_ksm)) { + st_mem_ksm = rrdset_create_localhost( + "mem" + , "ksm" + , NULL + , "ksm" + , NULL + , "Kernel Same Page Merging" + , "MB" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_KSM_NAME + , NETDATA_CHART_PRIO_MEM_KSM + , update_every + , RRDSET_TYPE_AREA + ); + + rd_shared = rrddim_add(st_mem_ksm, "shared", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rd_unshared = rrddim_add(st_mem_ksm, "unshared", NULL, -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rd_sharing = rrddim_add(st_mem_ksm, "sharing", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rd_volatile = rrddim_add(st_mem_ksm, "volatile", NULL, -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + //rd_to_scan = rrddim_add(st_mem_ksm, "to_scan", "to scan", -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } + else + rrdset_next(st_mem_ksm); + + rrddim_set_by_pointer(st_mem_ksm, rd_shared, pages_shared * page_size); + rrddim_set_by_pointer(st_mem_ksm, rd_unshared, pages_unshared * page_size); + rrddim_set_by_pointer(st_mem_ksm, rd_sharing, pages_sharing * page_size); + rrddim_set_by_pointer(st_mem_ksm, rd_volatile, pages_volatile * page_size); + //rrddim_set_by_pointer(st_mem_ksm, rd_to_scan, pages_to_scan * page_size); + + rrdset_done(st_mem_ksm); + } + + // -------------------------------------------------------------------- + + { + static RRDSET *st_mem_ksm_savings = NULL; + static RRDDIM *rd_savings = NULL, *rd_offered = NULL; + + if (unlikely(!st_mem_ksm_savings)) { + st_mem_ksm_savings = rrdset_create_localhost( + "mem" + , "ksm_savings" + , NULL + , "ksm" + , NULL + , "Kernel Same Page Merging Savings" + , "MB" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_KSM_NAME + , NETDATA_CHART_PRIO_MEM_KSM_SAVINGS + , update_every + , RRDSET_TYPE_AREA + ); + + rd_savings = rrddim_add(st_mem_ksm_savings, "savings", NULL, -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rd_offered = rrddim_add(st_mem_ksm_savings, "offered", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } + else + rrdset_next(st_mem_ksm_savings); + + rrddim_set_by_pointer(st_mem_ksm_savings, rd_savings, saved * page_size); + rrddim_set_by_pointer(st_mem_ksm_savings, rd_offered, offered * page_size); + + rrdset_done(st_mem_ksm_savings); + } + + // -------------------------------------------------------------------- + + { + static RRDSET *st_mem_ksm_ratios = NULL; + static RRDDIM *rd_savings = NULL; + + if (unlikely(!st_mem_ksm_ratios)) { + st_mem_ksm_ratios = rrdset_create_localhost( + "mem" + , "ksm_ratios" + , NULL + , "ksm" + , NULL + , "Kernel Same Page Merging Effectiveness" + , "percentage" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_KSM_NAME + , NETDATA_CHART_PRIO_MEM_KSM_RATIOS + , update_every + , RRDSET_TYPE_LINE + ); + + rd_savings = rrddim_add(st_mem_ksm_ratios, "savings", NULL, 1, 10000, RRD_ALGORITHM_ABSOLUTE); + } + else + rrdset_next(st_mem_ksm_ratios); + + rrddim_set_by_pointer(st_mem_ksm_ratios, rd_savings, (saved * 1000000) / offered); + + rrdset_done(st_mem_ksm_ratios); + } + + return 0; +} diff --git a/collectors/proc.plugin/zfs_common.c b/collectors/proc.plugin/zfs_common.c new file mode 100644 index 000000000..1aaceb908 --- /dev/null +++ b/collectors/proc.plugin/zfs_common.c @@ -0,0 +1,714 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "zfs_common.h" + +struct arcstats arcstats = { 0 }; + +void generate_charts_arcstats(const char *plugin, const char *module, int update_every) { + + // ARC reads + unsigned long long aread = arcstats.hits + arcstats.misses; + + // Demand reads + unsigned long long dhit = arcstats.demand_data_hits + arcstats.demand_metadata_hits; + unsigned long long dmiss = arcstats.demand_data_misses + arcstats.demand_metadata_misses; + unsigned long long dread = dhit + dmiss; + + // Prefetch reads + unsigned long long phit = arcstats.prefetch_data_hits + arcstats.prefetch_metadata_hits; + unsigned long long pmiss = arcstats.prefetch_data_misses + arcstats.prefetch_metadata_misses; + unsigned long long pread = phit + pmiss; + + // Metadata reads + unsigned long long mhit = arcstats.prefetch_metadata_hits + arcstats.demand_metadata_hits; + unsigned long long mmiss = arcstats.prefetch_metadata_misses + arcstats.demand_metadata_misses; + unsigned long long mread = mhit + mmiss; + + // l2 reads + unsigned long long l2hit = arcstats.l2_hits; + unsigned long long l2miss = arcstats.l2_misses; + unsigned long long l2read = l2hit + l2miss; + + // -------------------------------------------------------------------- + + { + static RRDSET *st_arc_size = NULL; + static RRDDIM *rd_arc_size = NULL; + static RRDDIM *rd_arc_target_size = NULL; + static RRDDIM *rd_arc_target_min_size = NULL; + static RRDDIM *rd_arc_target_max_size = NULL; + + if (unlikely(!st_arc_size)) { + st_arc_size = rrdset_create_localhost( + "zfs" + , "arc_size" + , NULL + , ZFS_FAMILY_SIZE + , NULL + , "ZFS ARC Size" + , "MB" + , plugin + , module + , NETDATA_CHART_PRIO_ZFS_ARC_SIZE + , update_every + , RRDSET_TYPE_AREA + ); + + rd_arc_size = rrddim_add(st_arc_size, "size", "arcsz", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rd_arc_target_size = rrddim_add(st_arc_size, "target", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rd_arc_target_min_size = rrddim_add(st_arc_size, "min", "min (hard limit)", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rd_arc_target_max_size = rrddim_add(st_arc_size, "max", "max (high water)", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } + else + rrdset_next(st_arc_size); + + rrddim_set_by_pointer(st_arc_size, rd_arc_size, arcstats.size); + rrddim_set_by_pointer(st_arc_size, rd_arc_target_size, arcstats.c); + rrddim_set_by_pointer(st_arc_size, rd_arc_target_min_size, arcstats.c_min); + rrddim_set_by_pointer(st_arc_size, rd_arc_target_max_size, arcstats.c_max); + rrdset_done(st_arc_size); + } + + // -------------------------------------------------------------------- + + if(likely(arcstats.l2exist)) { + static RRDSET *st_l2_size = NULL; + static RRDDIM *rd_l2_size = NULL; + static RRDDIM *rd_l2_asize = NULL; + + if (unlikely(!st_l2_size)) { + st_l2_size = rrdset_create_localhost( + "zfs" + , "l2_size" + , NULL + , ZFS_FAMILY_SIZE + , NULL + , "ZFS L2 ARC Size" + , "MB" + , plugin + , module + , NETDATA_CHART_PRIO_ZFS_L2_SIZE + , update_every + , RRDSET_TYPE_AREA + ); + + rd_l2_asize = rrddim_add(st_l2_size, "actual", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rd_l2_size = rrddim_add(st_l2_size, "size", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } + else + rrdset_next(st_l2_size); + + rrddim_set_by_pointer(st_l2_size, rd_l2_size, arcstats.l2_size); + rrddim_set_by_pointer(st_l2_size, rd_l2_asize, arcstats.l2_asize); + rrdset_done(st_l2_size); + } + + // -------------------------------------------------------------------- + + { + static RRDSET *st_reads = NULL; + static RRDDIM *rd_aread = NULL; + static RRDDIM *rd_dread = NULL; + static RRDDIM *rd_pread = NULL; + static RRDDIM *rd_mread = NULL; + static RRDDIM *rd_l2read = NULL; + + if (unlikely(!st_reads)) { + st_reads = rrdset_create_localhost( + "zfs" + , "reads" + , NULL + , ZFS_FAMILY_ACCESSES + , NULL + , "ZFS Reads" + , "reads/s" + , plugin + , module + , NETDATA_CHART_PRIO_ZFS_READS + , update_every + , RRDSET_TYPE_AREA + ); + + rd_aread = rrddim_add(st_reads, "areads", "arc", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_dread = rrddim_add(st_reads, "dreads", "demand", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_pread = rrddim_add(st_reads, "preads", "prefetch", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_mread = rrddim_add(st_reads, "mreads", "metadata", 1, 1, RRD_ALGORITHM_INCREMENTAL); + + if(arcstats.l2exist) + rd_l2read = rrddim_add(st_reads, "l2reads", "l2", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_reads); + + rrddim_set_by_pointer(st_reads, rd_aread, aread); + rrddim_set_by_pointer(st_reads, rd_dread, dread); + rrddim_set_by_pointer(st_reads, rd_pread, pread); + rrddim_set_by_pointer(st_reads, rd_mread, mread); + + if(arcstats.l2exist) + rrddim_set_by_pointer(st_reads, rd_l2read, l2read); + + rrdset_done(st_reads); + } + + // -------------------------------------------------------------------- + + if(likely(arcstats.l2exist)) { + static RRDSET *st_l2bytes = NULL; + static RRDDIM *rd_l2_read_bytes = NULL; + static RRDDIM *rd_l2_write_bytes = NULL; + + if (unlikely(!st_l2bytes)) { + st_l2bytes = rrdset_create_localhost( + "zfs" + , "bytes" + , NULL + , ZFS_FAMILY_ACCESSES + , NULL + , "ZFS ARC L2 Read/Write Rate" + , "kilobytes/s" + , plugin + , module + , NETDATA_CHART_PRIO_ZFS_IO + , update_every + , RRDSET_TYPE_AREA + ); + + rd_l2_read_bytes = rrddim_add(st_l2bytes, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL); + rd_l2_write_bytes = rrddim_add(st_l2bytes, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_l2bytes); + + rrddim_set_by_pointer(st_l2bytes, rd_l2_read_bytes, arcstats.l2_read_bytes); + rrddim_set_by_pointer(st_l2bytes, rd_l2_write_bytes, arcstats.l2_write_bytes); + rrdset_done(st_l2bytes); + } + + // -------------------------------------------------------------------- + + { + static RRDSET *st_ahits = NULL; + static RRDDIM *rd_ahits = NULL; + static RRDDIM *rd_amisses = NULL; + + if (unlikely(!st_ahits)) { + st_ahits = rrdset_create_localhost( + "zfs" + , "hits" + , NULL + , ZFS_FAMILY_EFFICIENCY + , NULL + , "ZFS ARC Hits" + , "percentage" + , plugin + , module + , NETDATA_CHART_PRIO_ZFS_HITS + , update_every + , RRDSET_TYPE_STACKED + ); + + rd_ahits = rrddim_add(st_ahits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_amisses = rrddim_add(st_ahits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + } + else + rrdset_next(st_ahits); + + rrddim_set_by_pointer(st_ahits, rd_ahits, arcstats.hits); + rrddim_set_by_pointer(st_ahits, rd_amisses, arcstats.misses); + rrdset_done(st_ahits); + } + + // -------------------------------------------------------------------- + + { + static RRDSET *st_dhits = NULL; + static RRDDIM *rd_dhits = NULL; + static RRDDIM *rd_dmisses = NULL; + + if (unlikely(!st_dhits)) { + st_dhits = rrdset_create_localhost( + "zfs" + , "dhits" + , NULL + , ZFS_FAMILY_EFFICIENCY + , NULL + , "ZFS Demand Hits" + , "percentage" + , plugin + , module + , NETDATA_CHART_PRIO_ZFS_DHITS + , update_every + , RRDSET_TYPE_STACKED + ); + + rd_dhits = rrddim_add(st_dhits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_dmisses = rrddim_add(st_dhits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + } + else + rrdset_next(st_dhits); + + rrddim_set_by_pointer(st_dhits, rd_dhits, dhit); + rrddim_set_by_pointer(st_dhits, rd_dmisses, dmiss); + rrdset_done(st_dhits); + } + + // -------------------------------------------------------------------- + + { + static RRDSET *st_phits = NULL; + static RRDDIM *rd_phits = NULL; + static RRDDIM *rd_pmisses = NULL; + + if (unlikely(!st_phits)) { + st_phits = rrdset_create_localhost( + "zfs" + , "phits" + , NULL + , ZFS_FAMILY_EFFICIENCY + , NULL + , "ZFS Prefetch Hits" + , "percentage" + , plugin + , module + , NETDATA_CHART_PRIO_ZFS_PHITS + , update_every + , RRDSET_TYPE_STACKED + ); + + rd_phits = rrddim_add(st_phits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_pmisses = rrddim_add(st_phits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + } + else + rrdset_next(st_phits); + + rrddim_set_by_pointer(st_phits, rd_phits, phit); + rrddim_set_by_pointer(st_phits, rd_pmisses, pmiss); + rrdset_done(st_phits); + } + + // -------------------------------------------------------------------- + + { + static RRDSET *st_mhits = NULL; + static RRDDIM *rd_mhits = NULL; + static RRDDIM *rd_mmisses = NULL; + + if (unlikely(!st_mhits)) { + st_mhits = rrdset_create_localhost( + "zfs" + , "mhits" + , NULL + , ZFS_FAMILY_EFFICIENCY + , NULL + , "ZFS Metadata Hits" + , "percentage" + , plugin + , module + , NETDATA_CHART_PRIO_ZFS_MHITS + , update_every + , RRDSET_TYPE_STACKED + ); + + rd_mhits = rrddim_add(st_mhits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_mmisses = rrddim_add(st_mhits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + } + else + rrdset_next(st_mhits); + + rrddim_set_by_pointer(st_mhits, rd_mhits, mhit); + rrddim_set_by_pointer(st_mhits, rd_mmisses, mmiss); + rrdset_done(st_mhits); + } + + // -------------------------------------------------------------------- + + if(likely(arcstats.l2exist)) { + static RRDSET *st_l2hits = NULL; + static RRDDIM *rd_l2hits = NULL; + static RRDDIM *rd_l2misses = NULL; + + if (unlikely(!st_l2hits)) { + st_l2hits = rrdset_create_localhost( + "zfs" + , "l2hits" + , NULL + , ZFS_FAMILY_EFFICIENCY + , NULL + , "ZFS L2 Hits" + , "percentage" + , plugin + , module + , NETDATA_CHART_PRIO_ZFS_L2HITS + , update_every + , RRDSET_TYPE_STACKED + ); + + rd_l2hits = rrddim_add(st_l2hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_l2misses = rrddim_add(st_l2hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + } + else + rrdset_next(st_l2hits); + + rrddim_set_by_pointer(st_l2hits, rd_l2hits, l2hit); + rrddim_set_by_pointer(st_l2hits, rd_l2misses, l2miss); + rrdset_done(st_l2hits); + } + + // -------------------------------------------------------------------- + + { + static RRDSET *st_list_hits = NULL; + static RRDDIM *rd_mfu = NULL; + static RRDDIM *rd_mru = NULL; + static RRDDIM *rd_mfug = NULL; + static RRDDIM *rd_mrug = NULL; + + if (unlikely(!st_list_hits)) { + st_list_hits = rrdset_create_localhost( + "zfs" + , "list_hits" + , NULL + , ZFS_FAMILY_EFFICIENCY + , NULL + , "ZFS List Hits" + , "hits/s" + , plugin + , module + , NETDATA_CHART_PRIO_ZFS_LIST_HITS + , update_every + , RRDSET_TYPE_AREA + ); + + rd_mfu = rrddim_add(st_list_hits, "mfu", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_mfug = rrddim_add(st_list_hits, "mfug", "mfu ghost", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_mru = rrddim_add(st_list_hits, "mru", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_mrug = rrddim_add(st_list_hits, "mrug", "mru ghost", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_list_hits); + + rrddim_set_by_pointer(st_list_hits, rd_mfu, arcstats.mfu_hits); + rrddim_set_by_pointer(st_list_hits, rd_mru, arcstats.mru_hits); + rrddim_set_by_pointer(st_list_hits, rd_mfug, arcstats.mfu_ghost_hits); + rrddim_set_by_pointer(st_list_hits, rd_mrug, arcstats.mru_ghost_hits); + rrdset_done(st_list_hits); + } +} + +void generate_charts_arc_summary(const char *plugin, const char *module, int update_every) { + unsigned long long arc_accesses_total = arcstats.hits + arcstats.misses; + unsigned long long real_hits = arcstats.mfu_hits + arcstats.mru_hits; + unsigned long long real_misses = arc_accesses_total - real_hits; + + //unsigned long long anon_hits = arcstats.hits - (arcstats.mfu_hits + arcstats.mru_hits + arcstats.mfu_ghost_hits + arcstats.mru_ghost_hits); + + unsigned long long arc_size = arcstats.size; + unsigned long long mru_size = arcstats.p; + //unsigned long long target_min_size = arcstats.c_min; + //unsigned long long target_max_size = arcstats.c_max; + unsigned long long target_size = arcstats.c; + //unsigned long long target_size_ratio = (target_max_size / target_min_size); + + unsigned long long mfu_size; + if(arc_size > target_size) + mfu_size = arc_size - mru_size; + else + mfu_size = target_size - mru_size; + + // -------------------------------------------------------------------- + + { + static RRDSET *st_arc_size_breakdown = NULL; + static RRDDIM *rd_most_recent = NULL; + static RRDDIM *rd_most_frequent = NULL; + + if (unlikely(!st_arc_size_breakdown)) { + st_arc_size_breakdown = rrdset_create_localhost( + "zfs" + , "arc_size_breakdown" + , NULL + , ZFS_FAMILY_EFFICIENCY + , NULL + , "ZFS ARC Size Breakdown" + , "percentage" + , plugin + , module + , NETDATA_CHART_PRIO_ZFS_ARC_SIZE_BREAKDOWN + , update_every + , RRDSET_TYPE_STACKED + ); + + rd_most_recent = rrddim_add(st_arc_size_breakdown, "recent", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL); + rd_most_frequent = rrddim_add(st_arc_size_breakdown, "frequent", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL); + } + else + rrdset_next(st_arc_size_breakdown); + + rrddim_set_by_pointer(st_arc_size_breakdown, rd_most_recent, mru_size); + rrddim_set_by_pointer(st_arc_size_breakdown, rd_most_frequent, mfu_size); + rrdset_done(st_arc_size_breakdown); + } + + // -------------------------------------------------------------------- + + { + static RRDSET *st_memory = NULL; +#ifndef __FreeBSD__ + static RRDDIM *rd_direct = NULL; +#endif + static RRDDIM *rd_throttled = NULL; +#ifndef __FreeBSD__ + static RRDDIM *rd_indirect = NULL; +#endif + + if (unlikely(!st_memory)) { + st_memory = rrdset_create_localhost( + "zfs" + , "memory_ops" + , NULL + , ZFS_FAMILY_OPERATIONS + , NULL + , "ZFS Memory Operations" + , "operations/s" + , plugin + , module + , NETDATA_CHART_PRIO_ZFS_MEMORY_OPS + , update_every + , RRDSET_TYPE_LINE + ); + +#ifndef __FreeBSD__ + rd_direct = rrddim_add(st_memory, "direct", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); +#endif + rd_throttled = rrddim_add(st_memory, "throttled", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); +#ifndef __FreeBSD__ + rd_indirect = rrddim_add(st_memory, "indirect", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); +#endif + } + else + rrdset_next(st_memory); + +#ifndef __FreeBSD__ + rrddim_set_by_pointer(st_memory, rd_direct, arcstats.memory_direct_count); +#endif + rrddim_set_by_pointer(st_memory, rd_throttled, arcstats.memory_throttle_count); +#ifndef __FreeBSD__ + rrddim_set_by_pointer(st_memory, rd_indirect, arcstats.memory_indirect_count); +#endif + rrdset_done(st_memory); + } + + // -------------------------------------------------------------------- + + { + static RRDSET *st_important_ops = NULL; + static RRDDIM *rd_deleted = NULL; + static RRDDIM *rd_mutex_misses = NULL; + static RRDDIM *rd_evict_skips = NULL; + static RRDDIM *rd_hash_collisions = NULL; + + if (unlikely(!st_important_ops)) { + st_important_ops = rrdset_create_localhost( + "zfs" + , "important_ops" + , NULL + , ZFS_FAMILY_OPERATIONS + , NULL + , "ZFS Important Operations" + , "operations/s" + , plugin + , module + , NETDATA_CHART_PRIO_ZFS_IMPORTANT_OPS + , update_every + , RRDSET_TYPE_LINE + ); + + rd_evict_skips = rrddim_add(st_important_ops, "eskip", "evict skip", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_deleted = rrddim_add(st_important_ops, "deleted", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_mutex_misses = rrddim_add(st_important_ops, "mtxmis", "mutex miss", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_hash_collisions = rrddim_add(st_important_ops, "hash_collisions", "hash collisions", 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_important_ops); + + rrddim_set_by_pointer(st_important_ops, rd_deleted, arcstats.deleted); + rrddim_set_by_pointer(st_important_ops, rd_evict_skips, arcstats.evict_skip); + rrddim_set_by_pointer(st_important_ops, rd_mutex_misses, arcstats.mutex_miss); + rrddim_set_by_pointer(st_important_ops, rd_hash_collisions, arcstats.hash_collisions); + rrdset_done(st_important_ops); + } + + // -------------------------------------------------------------------- + + { + static RRDSET *st_actual_hits = NULL; + static RRDDIM *rd_actual_hits = NULL; + static RRDDIM *rd_actual_misses = NULL; + + if (unlikely(!st_actual_hits)) { + st_actual_hits = rrdset_create_localhost( + "zfs" + , "actual_hits" + , NULL + , ZFS_FAMILY_EFFICIENCY + , NULL + , "ZFS Actual Cache Hits" + , "percentage" + , plugin + , module + , NETDATA_CHART_PRIO_ZFS_ACTUAL_HITS + , update_every + , RRDSET_TYPE_STACKED + ); + + rd_actual_hits = rrddim_add(st_actual_hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_actual_misses = rrddim_add(st_actual_hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + } + else + rrdset_next(st_actual_hits); + + rrddim_set_by_pointer(st_actual_hits, rd_actual_hits, real_hits); + rrddim_set_by_pointer(st_actual_hits, rd_actual_misses, real_misses); + rrdset_done(st_actual_hits); + } + + // -------------------------------------------------------------------- + + { + static RRDSET *st_demand_data_hits = NULL; + static RRDDIM *rd_demand_data_hits = NULL; + static RRDDIM *rd_demand_data_misses = NULL; + + if (unlikely(!st_demand_data_hits)) { + st_demand_data_hits = rrdset_create_localhost( + "zfs" + , "demand_data_hits" + , NULL + , ZFS_FAMILY_EFFICIENCY + , NULL + , "ZFS Data Demand Efficiency" + , "percentage" + , plugin + , module + , NETDATA_CHART_PRIO_ZFS_DEMAND_DATA_HITS + , update_every + , RRDSET_TYPE_STACKED + ); + + rd_demand_data_hits = rrddim_add(st_demand_data_hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_demand_data_misses = rrddim_add(st_demand_data_hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + } + else + rrdset_next(st_demand_data_hits); + + rrddim_set_by_pointer(st_demand_data_hits, rd_demand_data_hits, arcstats.demand_data_hits); + rrddim_set_by_pointer(st_demand_data_hits, rd_demand_data_misses, arcstats.demand_data_misses); + rrdset_done(st_demand_data_hits); + } + + // -------------------------------------------------------------------- + + { + static RRDSET *st_prefetch_data_hits = NULL; + static RRDDIM *rd_prefetch_data_hits = NULL; + static RRDDIM *rd_prefetch_data_misses = NULL; + + if (unlikely(!st_prefetch_data_hits)) { + st_prefetch_data_hits = rrdset_create_localhost( + "zfs" + , "prefetch_data_hits" + , NULL + , ZFS_FAMILY_EFFICIENCY + , NULL + , "ZFS Data Prefetch Efficiency" + , "percentage" + , plugin + , module + , NETDATA_CHART_PRIO_ZFS_PREFETCH_DATA_HITS + , update_every + , RRDSET_TYPE_STACKED + ); + + rd_prefetch_data_hits = rrddim_add(st_prefetch_data_hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rd_prefetch_data_misses = rrddim_add(st_prefetch_data_hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + } + else + rrdset_next(st_prefetch_data_hits); + + rrddim_set_by_pointer(st_prefetch_data_hits, rd_prefetch_data_hits, arcstats.prefetch_data_hits); + rrddim_set_by_pointer(st_prefetch_data_hits, rd_prefetch_data_misses, arcstats.prefetch_data_misses); + rrdset_done(st_prefetch_data_hits); + } + + // -------------------------------------------------------------------- + + { + static RRDSET *st_hash_elements = NULL; + static RRDDIM *rd_hash_elements_current = NULL; + static RRDDIM *rd_hash_elements_max = NULL; + + if (unlikely(!st_hash_elements)) { + st_hash_elements = rrdset_create_localhost( + "zfs" + , "hash_elements" + , NULL + , ZFS_FAMILY_HASH + , NULL + , "ZFS ARC Hash Elements" + , "elements" + , plugin + , module + , NETDATA_CHART_PRIO_ZFS_HASH_ELEMENTS + , update_every + , RRDSET_TYPE_LINE + ); + + rd_hash_elements_current = rrddim_add(st_hash_elements, "current", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rd_hash_elements_max = rrddim_add(st_hash_elements, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else + rrdset_next(st_hash_elements); + + rrddim_set_by_pointer(st_hash_elements, rd_hash_elements_current, arcstats.hash_elements); + rrddim_set_by_pointer(st_hash_elements, rd_hash_elements_max, arcstats.hash_elements_max); + rrdset_done(st_hash_elements); + } + + // -------------------------------------------------------------------- + + { + static RRDSET *st_hash_chains = NULL; + static RRDDIM *rd_hash_chains_current = NULL; + static RRDDIM *rd_hash_chains_max = NULL; + + if (unlikely(!st_hash_chains)) { + st_hash_chains = rrdset_create_localhost( + "zfs" + , "hash_chains" + , NULL + , ZFS_FAMILY_HASH + , NULL + , "ZFS ARC Hash Chains" + , "chains" + , plugin + , module + , NETDATA_CHART_PRIO_ZFS_HASH_CHAINS + , update_every + , RRDSET_TYPE_LINE + ); + + rd_hash_chains_current = rrddim_add(st_hash_chains, "current", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rd_hash_chains_max = rrddim_add(st_hash_chains, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else + rrdset_next(st_hash_chains); + + rrddim_set_by_pointer(st_hash_chains, rd_hash_chains_current, arcstats.hash_chains); + rrddim_set_by_pointer(st_hash_chains, rd_hash_chains_max, arcstats.hash_chain_max); + rrdset_done(st_hash_chains); + } + + // -------------------------------------------------------------------- + +} \ No newline at end of file diff --git a/collectors/proc.plugin/zfs_common.h b/collectors/proc.plugin/zfs_common.h new file mode 100644 index 000000000..fab54f59a --- /dev/null +++ b/collectors/proc.plugin/zfs_common.h @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_ZFS_COMMON_H +#define NETDATA_ZFS_COMMON_H 1 + +#include "../../daemon/common.h" + +#define ZFS_FAMILY_SIZE "size" +#define ZFS_FAMILY_EFFICIENCY "efficiency" +#define ZFS_FAMILY_ACCESSES "accesses" +#define ZFS_FAMILY_OPERATIONS "operations" +#define ZFS_FAMILY_HASH "hashes" + +struct arcstats { + // values + unsigned long long hits; + unsigned long long misses; + unsigned long long demand_data_hits; + unsigned long long demand_data_misses; + unsigned long long demand_metadata_hits; + unsigned long long demand_metadata_misses; + unsigned long long prefetch_data_hits; + unsigned long long prefetch_data_misses; + unsigned long long prefetch_metadata_hits; + unsigned long long prefetch_metadata_misses; + unsigned long long mru_hits; + unsigned long long mru_ghost_hits; + unsigned long long mfu_hits; + unsigned long long mfu_ghost_hits; + unsigned long long deleted; + unsigned long long mutex_miss; + unsigned long long evict_skip; + unsigned long long evict_not_enough; + unsigned long long evict_l2_cached; + unsigned long long evict_l2_eligible; + unsigned long long evict_l2_ineligible; + unsigned long long evict_l2_skip; + unsigned long long hash_elements; + unsigned long long hash_elements_max; + unsigned long long hash_collisions; + unsigned long long hash_chains; + unsigned long long hash_chain_max; + unsigned long long p; + unsigned long long c; + unsigned long long c_min; + unsigned long long c_max; + unsigned long long size; + unsigned long long hdr_size; + unsigned long long data_size; + unsigned long long metadata_size; + unsigned long long other_size; + unsigned long long anon_size; + unsigned long long anon_evictable_data; + unsigned long long anon_evictable_metadata; + unsigned long long mru_size; + unsigned long long mru_evictable_data; + unsigned long long mru_evictable_metadata; + unsigned long long mru_ghost_size; + unsigned long long mru_ghost_evictable_data; + unsigned long long mru_ghost_evictable_metadata; + unsigned long long mfu_size; + unsigned long long mfu_evictable_data; + unsigned long long mfu_evictable_metadata; + unsigned long long mfu_ghost_size; + unsigned long long mfu_ghost_evictable_data; + unsigned long long mfu_ghost_evictable_metadata; + unsigned long long l2_hits; + unsigned long long l2_misses; + unsigned long long l2_feeds; + unsigned long long l2_rw_clash; + unsigned long long l2_read_bytes; + unsigned long long l2_write_bytes; + unsigned long long l2_writes_sent; + unsigned long long l2_writes_done; + unsigned long long l2_writes_error; + unsigned long long l2_writes_lock_retry; + unsigned long long l2_evict_lock_retry; + unsigned long long l2_evict_reading; + unsigned long long l2_evict_l1cached; + unsigned long long l2_free_on_write; + unsigned long long l2_cdata_free_on_write; + unsigned long long l2_abort_lowmem; + unsigned long long l2_cksum_bad; + unsigned long long l2_io_error; + unsigned long long l2_size; + unsigned long long l2_asize; + unsigned long long l2_hdr_size; + unsigned long long l2_compress_successes; + unsigned long long l2_compress_zeros; + unsigned long long l2_compress_failures; + unsigned long long memory_throttle_count; + unsigned long long duplicate_buffers; + unsigned long long duplicate_buffers_size; + unsigned long long duplicate_reads; + unsigned long long memory_direct_count; + unsigned long long memory_indirect_count; + unsigned long long arc_no_grow; + unsigned long long arc_tempreserve; + unsigned long long arc_loaned_bytes; + unsigned long long arc_prune; + unsigned long long arc_meta_used; + unsigned long long arc_meta_limit; + unsigned long long arc_meta_max; + unsigned long long arc_meta_min; + unsigned long long arc_need_free; + unsigned long long arc_sys_free; + + // flags + int l2exist; +}; + +void generate_charts_arcstats(const char *plugin, const char *module, int update_every); +void generate_charts_arc_summary(const char *plugin, const char *module, int update_every); + +#endif //NETDATA_ZFS_COMMON_H diff --git a/collectors/python.d.plugin/Makefile.am b/collectors/python.d.plugin/Makefile.am new file mode 100644 index 000000000..5f214e436 --- /dev/null +++ b/collectors/python.d.plugin/Makefile.am @@ -0,0 +1,244 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +CLEANFILES = \ + python.d.plugin \ + $(NULL) + +include $(top_srcdir)/build/subst.inc +SUFFIXES = .in + +dist_libconfig_DATA = \ + python.d.conf \ + $(NULL) + +dist_plugins_SCRIPTS = \ + python.d.plugin \ + $(NULL) + +dist_noinst_DATA = \ + python.d.plugin.in \ + README.md \ + $(NULL) + +dist_python_SCRIPTS = \ + $(NULL) + +dist_python_DATA = \ + $(NULL) + +userpythonconfigdir=$(configdir)/python.d +dist_userpythonconfig_DATA = \ + $(top_srcdir)/installer/.keep \ + $(NULL) + +pythonconfigdir=$(libconfigdir)/python.d +dist_pythonconfig_DATA = \ + $(top_srcdir)/installer/.keep \ + $(NULL) + +include adaptec_raid/Makefile.inc +include apache/Makefile.inc +include beanstalk/Makefile.inc +include bind_rndc/Makefile.inc +include boinc/Makefile.inc +include ceph/Makefile.inc +include chrony/Makefile.inc +include couchdb/Makefile.inc +include cpufreq/Makefile.inc +include cpuidle/Makefile.inc +include dnsdist/Makefile.inc +include dns_query_time/Makefile.inc +include dockerd/Makefile.inc +include dovecot/Makefile.inc +include elasticsearch/Makefile.inc +include example/Makefile.inc +include exim/Makefile.inc +include fail2ban/Makefile.inc +include freeradius/Makefile.inc +include go_expvar/Makefile.inc +include haproxy/Makefile.inc +include hddtemp/Makefile.inc +include httpcheck/Makefile.inc +include icecast/Makefile.inc +include ipfs/Makefile.inc +include isc_dhcpd/Makefile.inc +include linux_power_supply/Makefile.inc +include litespeed/Makefile.inc +include logind/Makefile.inc +include mdstat/Makefile.inc +include megacli/Makefile.inc +include memcached/Makefile.inc +include mongodb/Makefile.inc +include monit/Makefile.inc +include mysql/Makefile.inc +include nginx/Makefile.inc +include nginx_plus/Makefile.inc +include nsd/Makefile.inc +include ntpd/Makefile.inc +include ovpn_status_log/Makefile.inc +include phpfpm/Makefile.inc +include portcheck/Makefile.inc +include postfix/Makefile.inc +include postgres/Makefile.inc +include powerdns/Makefile.inc +include proxysql/Makefile.inc +include puppet/Makefile.inc +include rabbitmq/Makefile.inc +include redis/Makefile.inc +include rethinkdbs/Makefile.inc +include retroshare/Makefile.inc +include samba/Makefile.inc +include sensors/Makefile.inc +include smartd_log/Makefile.inc +include spigotmc/Makefile.inc +include springboot/Makefile.inc +include squid/Makefile.inc +include tomcat/Makefile.inc +include traefik/Makefile.inc +include unbound/Makefile.inc +include uwsgi/Makefile.inc +include varnish/Makefile.inc +include w1sensor/Makefile.inc +include web_log/Makefile.inc + +pythonmodulesdir=$(pythondir)/python_modules +dist_pythonmodules_DATA = \ + python_modules/__init__.py \ + $(NULL) + +basesdir=$(pythonmodulesdir)/bases +dist_bases_DATA = \ + python_modules/bases/__init__.py \ + python_modules/bases/charts.py \ + python_modules/bases/collection.py \ + python_modules/bases/loaders.py \ + python_modules/bases/loggers.py \ + $(NULL) + +bases_framework_servicesdir=$(basesdir)/FrameworkServices +dist_bases_framework_services_DATA = \ + python_modules/bases/FrameworkServices/__init__.py \ + python_modules/bases/FrameworkServices/ExecutableService.py \ + python_modules/bases/FrameworkServices/LogService.py \ + python_modules/bases/FrameworkServices/MySQLService.py \ + python_modules/bases/FrameworkServices/SimpleService.py \ + python_modules/bases/FrameworkServices/SocketService.py \ + python_modules/bases/FrameworkServices/UrlService.py \ + $(NULL) + +third_partydir=$(pythonmodulesdir)/third_party +dist_third_party_DATA = \ + python_modules/third_party/__init__.py \ + python_modules/third_party/ordereddict.py \ + python_modules/third_party/lm_sensors.py \ + python_modules/third_party/mcrcon.py \ + python_modules/third_party/boinc_client.py \ + python_modules/third_party/monotonic.py \ + $(NULL) + +pythonyaml2dir=$(pythonmodulesdir)/pyyaml2 +dist_pythonyaml2_DATA = \ + python_modules/pyyaml2/__init__.py \ + python_modules/pyyaml2/composer.py \ + python_modules/pyyaml2/constructor.py \ + python_modules/pyyaml2/cyaml.py \ + python_modules/pyyaml2/dumper.py \ + python_modules/pyyaml2/emitter.py \ + python_modules/pyyaml2/error.py \ + python_modules/pyyaml2/events.py \ + python_modules/pyyaml2/loader.py \ + python_modules/pyyaml2/nodes.py \ + python_modules/pyyaml2/parser.py \ + python_modules/pyyaml2/reader.py \ + python_modules/pyyaml2/representer.py \ + python_modules/pyyaml2/resolver.py \ + python_modules/pyyaml2/scanner.py \ + python_modules/pyyaml2/serializer.py \ + python_modules/pyyaml2/tokens.py \ + $(NULL) + +pythonyaml3dir=$(pythonmodulesdir)/pyyaml3 +dist_pythonyaml3_DATA = \ + python_modules/pyyaml3/__init__.py \ + python_modules/pyyaml3/composer.py \ + python_modules/pyyaml3/constructor.py \ + python_modules/pyyaml3/cyaml.py \ + python_modules/pyyaml3/dumper.py \ + python_modules/pyyaml3/emitter.py \ + python_modules/pyyaml3/error.py \ + python_modules/pyyaml3/events.py \ + python_modules/pyyaml3/loader.py \ + python_modules/pyyaml3/nodes.py \ + python_modules/pyyaml3/parser.py \ + python_modules/pyyaml3/reader.py \ + python_modules/pyyaml3/representer.py \ + python_modules/pyyaml3/resolver.py \ + python_modules/pyyaml3/scanner.py \ + python_modules/pyyaml3/serializer.py \ + python_modules/pyyaml3/tokens.py \ + $(NULL) + +python_urllib3dir=$(pythonmodulesdir)/urllib3 +dist_python_urllib3_DATA = \ + python_modules/urllib3/__init__.py \ + python_modules/urllib3/_collections.py \ + python_modules/urllib3/connection.py \ + python_modules/urllib3/connectionpool.py \ + python_modules/urllib3/exceptions.py \ + python_modules/urllib3/fields.py \ + python_modules/urllib3/filepost.py \ + python_modules/urllib3/response.py \ + python_modules/urllib3/poolmanager.py \ + python_modules/urllib3/request.py \ + $(NULL) + +python_urllib3_utildir=$(python_urllib3dir)/util +dist_python_urllib3_util_DATA = \ + python_modules/urllib3/util/__init__.py \ + python_modules/urllib3/util/connection.py \ + python_modules/urllib3/util/request.py \ + python_modules/urllib3/util/response.py \ + python_modules/urllib3/util/retry.py \ + python_modules/urllib3/util/selectors.py \ + python_modules/urllib3/util/ssl_.py \ + python_modules/urllib3/util/timeout.py \ + python_modules/urllib3/util/url.py \ + python_modules/urllib3/util/wait.py \ + $(NULL) + +python_urllib3_packagesdir=$(python_urllib3dir)/packages +dist_python_urllib3_packages_DATA = \ + python_modules/urllib3/packages/__init__.py \ + python_modules/urllib3/packages/ordered_dict.py \ + python_modules/urllib3/packages/six.py \ + $(NULL) + +python_urllib3_backportsdir=$(python_urllib3_packagesdir)/backports +dist_python_urllib3_backports_DATA = \ + python_modules/urllib3/packages/backports/__init__.py \ + python_modules/urllib3/packages/backports/makefile.py \ + $(NULL) + +python_urllib3_ssl_match_hostnamedir=$(python_urllib3_packagesdir)/ssl_match_hostname +dist_python_urllib3_ssl_match_hostname_DATA = \ + python_modules/urllib3/packages/ssl_match_hostname/__init__.py \ + python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \ + $(NULL) + +python_urllib3_contribdir=$(python_urllib3dir)/contrib +dist_python_urllib3_contrib_DATA = \ + python_modules/urllib3/contrib/__init__.py \ + python_modules/urllib3/contrib/appengine.py \ + python_modules/urllib3/contrib/ntlmpool.py \ + python_modules/urllib3/contrib/pyopenssl.py \ + python_modules/urllib3/contrib/securetransport.py \ + python_modules/urllib3/contrib/socks.py \ + $(NULL) + +python_urllib3_securetransportdir=$(python_urllib3_contribdir)/_securetransport +dist_python_urllib3_securetransport_DATA = \ + python_modules/urllib3/contrib/_securetransport/__init__.py \ + python_modules/urllib3/contrib/_securetransport/bindings.py \ + python_modules/urllib3/contrib/_securetransport/low_level.py \ + $(NULL) diff --git a/collectors/python.d.plugin/Makefile.in b/collectors/python.d.plugin/Makefile.in new file mode 100644 index 000000000..ca2743d58 --- /dev/null +++ b/collectors/python.d.plugin/Makefile.in @@ -0,0 +1,1987 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +DIST_COMMON = $(top_srcdir)/build/subst.inc \ + $(srcdir)/adaptec_raid/Makefile.inc \ + $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc \ + $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc \ + $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc \ + $(srcdir)/couchdb/Makefile.inc $(srcdir)/cpufreq/Makefile.inc \ + $(srcdir)/cpuidle/Makefile.inc $(srcdir)/dnsdist/Makefile.inc \ + $(srcdir)/dns_query_time/Makefile.inc \ + $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc \ + $(srcdir)/elasticsearch/Makefile.inc \ + $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc \ + $(srcdir)/fail2ban/Makefile.inc \ + $(srcdir)/freeradius/Makefile.inc \ + $(srcdir)/go_expvar/Makefile.inc \ + $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc \ + $(srcdir)/httpcheck/Makefile.inc \ + $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc \ + $(srcdir)/isc_dhcpd/Makefile.inc \ + $(srcdir)/linux_power_supply/Makefile.inc \ + $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc \ + $(srcdir)/mdstat/Makefile.inc $(srcdir)/megacli/Makefile.inc \ + $(srcdir)/memcached/Makefile.inc \ + $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc \ + $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc \ + $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nsd/Makefile.inc \ + $(srcdir)/ntpd/Makefile.inc \ + $(srcdir)/ovpn_status_log/Makefile.inc \ + $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc \ + $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc \ + $(srcdir)/powerdns/Makefile.inc \ + $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc \ + $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc \ + $(srcdir)/rethinkdbs/Makefile.inc \ + $(srcdir)/retroshare/Makefile.inc $(srcdir)/samba/Makefile.inc \ + $(srcdir)/sensors/Makefile.inc \ + $(srcdir)/smartd_log/Makefile.inc \ + $(srcdir)/spigotmc/Makefile.inc \ + $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc \ + $(srcdir)/tomcat/Makefile.inc $(srcdir)/traefik/Makefile.inc \ + $(srcdir)/unbound/Makefile.inc $(srcdir)/uwsgi/Makefile.inc \ + $(srcdir)/varnish/Makefile.inc $(srcdir)/w1sensor/Makefile.inc \ + $(srcdir)/web_log/Makefile.inc $(srcdir)/Makefile.in \ + $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \ + $(dist_python_SCRIPTS) $(dist_bases_DATA) \ + $(dist_bases_framework_services_DATA) $(dist_libconfig_DATA) \ + $(dist_noinst_DATA) $(dist_python_DATA) \ + $(dist_python_urllib3_DATA) \ + $(dist_python_urllib3_backports_DATA) \ + $(dist_python_urllib3_contrib_DATA) \ + $(dist_python_urllib3_packages_DATA) \ + $(dist_python_urllib3_securetransport_DATA) \ + $(dist_python_urllib3_ssl_match_hostname_DATA) \ + $(dist_python_urllib3_util_DATA) $(dist_pythonconfig_DATA) \ + $(dist_pythonmodules_DATA) $(dist_pythonyaml2_DATA) \ + $(dist_pythonyaml3_DATA) $(dist_third_party_DATA) \ + $(dist_userpythonconfig_DATA) +subdir = collectors/python.d.plugin +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pythondir)" \ + "$(DESTDIR)$(basesdir)" \ + "$(DESTDIR)$(bases_framework_servicesdir)" \ + "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(pythondir)" \ + "$(DESTDIR)$(python_urllib3dir)" \ + "$(DESTDIR)$(python_urllib3_backportsdir)" \ + "$(DESTDIR)$(python_urllib3_contribdir)" \ + "$(DESTDIR)$(python_urllib3_packagesdir)" \ + "$(DESTDIR)$(python_urllib3_securetransportdir)" \ + "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" \ + "$(DESTDIR)$(python_urllib3_utildir)" \ + "$(DESTDIR)$(pythonconfigdir)" "$(DESTDIR)$(pythonmodulesdir)" \ + "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)" \ + "$(DESTDIR)$(third_partydir)" \ + "$(DESTDIR)$(userpythonconfigdir)" +SCRIPTS = $(dist_plugins_SCRIPTS) $(dist_python_SCRIPTS) +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_bases_DATA) $(dist_bases_framework_services_DATA) \ + $(dist_libconfig_DATA) $(dist_noinst_DATA) $(dist_python_DATA) \ + $(dist_python_urllib3_DATA) \ + $(dist_python_urllib3_backports_DATA) \ + $(dist_python_urllib3_contrib_DATA) \ + $(dist_python_urllib3_packages_DATA) \ + $(dist_python_urllib3_securetransport_DATA) \ + $(dist_python_urllib3_ssl_match_hostname_DATA) \ + $(dist_python_urllib3_util_DATA) $(dist_pythonconfig_DATA) \ + $(dist_pythonmodules_DATA) $(dist_pythonyaml2_DATA) \ + $(dist_pythonyaml3_DATA) $(dist_third_party_DATA) \ + $(dist_userpythonconfig_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +CLEANFILES = \ + python.d.plugin \ + $(NULL) + +SUFFIXES = .in +dist_libconfig_DATA = \ + python.d.conf \ + $(NULL) + +dist_plugins_SCRIPTS = \ + python.d.plugin \ + $(NULL) + + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution + +# do not install these files, but include them in the distribution +dist_noinst_DATA = python.d.plugin.in README.md $(NULL) \ + adaptec_raid/README.md adaptec_raid/Makefile.inc \ + apache/README.md apache/Makefile.inc beanstalk/README.md \ + beanstalk/Makefile.inc bind_rndc/README.md \ + bind_rndc/Makefile.inc boinc/README.md boinc/Makefile.inc \ + ceph/README.md ceph/Makefile.inc chrony/README.md \ + chrony/Makefile.inc couchdb/README.md couchdb/Makefile.inc \ + cpufreq/README.md cpufreq/Makefile.inc cpuidle/README.md \ + cpuidle/Makefile.inc dnsdist/README.md dnsdist/Makefile.inc \ + dns_query_time/README.md dns_query_time/Makefile.inc \ + dockerd/README.md dockerd/Makefile.inc dovecot/README.md \ + dovecot/Makefile.inc elasticsearch/README.md \ + elasticsearch/Makefile.inc example/README.md \ + example/Makefile.inc exim/README.md exim/Makefile.inc \ + fail2ban/README.md fail2ban/Makefile.inc freeradius/README.md \ + freeradius/Makefile.inc go_expvar/README.md \ + go_expvar/Makefile.inc haproxy/README.md haproxy/Makefile.inc \ + hddtemp/README.md hddtemp/Makefile.inc httpcheck/README.md \ + httpcheck/Makefile.inc icecast/README.md icecast/Makefile.inc \ + ipfs/README.md ipfs/Makefile.inc isc_dhcpd/README.md \ + isc_dhcpd/Makefile.inc linux_power_supply/README.md \ + linux_power_supply/Makefile.inc litespeed/README.md \ + litespeed/Makefile.inc logind/README.md logind/Makefile.inc \ + mdstat/README.md mdstat/Makefile.inc megacli/README.md \ + megacli/Makefile.inc memcached/README.md \ + memcached/Makefile.inc mongodb/README.md mongodb/Makefile.inc \ + monit/README.md monit/Makefile.inc mysql/README.md \ + mysql/Makefile.inc nginx/README.md nginx/Makefile.inc \ + nginx_plus/README.md nginx_plus/Makefile.inc nsd/README.md \ + nsd/Makefile.inc ntpd/README.md ntpd/Makefile.inc \ + ovpn_status_log/README.md ovpn_status_log/Makefile.inc \ + phpfpm/README.md phpfpm/Makefile.inc portcheck/README.md \ + portcheck/Makefile.inc postfix/README.md postfix/Makefile.inc \ + postgres/README.md postgres/Makefile.inc powerdns/README.md \ + powerdns/Makefile.inc proxysql/README.md proxysql/Makefile.inc \ + puppet/README.md puppet/Makefile.inc rabbitmq/README.md \ + rabbitmq/Makefile.inc redis/README.md redis/Makefile.inc \ + rethinkdbs/README.md rethinkdbs/Makefile.inc \ + retroshare/README.md retroshare/Makefile.inc samba/README.md \ + samba/Makefile.inc sensors/README.md sensors/Makefile.inc \ + smartd_log/README.md smartd_log/Makefile.inc \ + spigotmc/README.md spigotmc/Makefile.inc springboot/README.md \ + springboot/Makefile.inc squid/README.md squid/Makefile.inc \ + tomcat/README.md tomcat/Makefile.inc traefik/README.md \ + traefik/Makefile.inc unbound/README.md unbound/Makefile.inc \ + uwsgi/README.md uwsgi/Makefile.inc varnish/README.md \ + varnish/Makefile.inc w1sensor/README.md w1sensor/Makefile.inc \ + web_log/README.md web_log/Makefile.inc +dist_python_SCRIPTS = \ + $(NULL) + + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files +dist_python_DATA = $(NULL) adaptec_raid/adaptec_raid.chart.py \ + apache/apache.chart.py beanstalk/beanstalk.chart.py \ + bind_rndc/bind_rndc.chart.py boinc/boinc.chart.py \ + ceph/ceph.chart.py chrony/chrony.chart.py \ + couchdb/couchdb.chart.py cpufreq/cpufreq.chart.py \ + cpuidle/cpuidle.chart.py dnsdist/dnsdist.chart.py \ + dns_query_time/dns_query_time.chart.py \ + dockerd/dockerd.chart.py dovecot/dovecot.chart.py \ + elasticsearch/elasticsearch.chart.py example/example.chart.py \ + exim/exim.chart.py fail2ban/fail2ban.chart.py \ + freeradius/freeradius.chart.py go_expvar/go_expvar.chart.py \ + haproxy/haproxy.chart.py hddtemp/hddtemp.chart.py \ + httpcheck/httpcheck.chart.py icecast/icecast.chart.py \ + ipfs/ipfs.chart.py isc_dhcpd/isc_dhcpd.chart.py \ + linux_power_supply/linux_power_supply.chart.py \ + litespeed/litespeed.chart.py logind/logind.chart.py \ + mdstat/mdstat.chart.py megacli/megacli.chart.py \ + memcached/memcached.chart.py mongodb/mongodb.chart.py \ + monit/monit.chart.py mysql/mysql.chart.py nginx/nginx.chart.py \ + nginx_plus/nginx_plus.chart.py nsd/nsd.chart.py \ + ntpd/ntpd.chart.py ovpn_status_log/ovpn_status_log.chart.py \ + phpfpm/phpfpm.chart.py portcheck/portcheck.chart.py \ + postfix/postfix.chart.py postgres/postgres.chart.py \ + powerdns/powerdns.chart.py proxysql/proxysql.chart.py \ + puppet/puppet.chart.py rabbitmq/rabbitmq.chart.py \ + redis/redis.chart.py rethinkdbs/rethinkdbs.chart.py \ + retroshare/retroshare.chart.py samba/samba.chart.py \ + sensors/sensors.chart.py smartd_log/smartd_log.chart.py \ + spigotmc/spigotmc.chart.py springboot/springboot.chart.py \ + squid/squid.chart.py tomcat/tomcat.chart.py \ + traefik/traefik.chart.py unbound/unbound.chart.py \ + uwsgi/uwsgi.chart.py varnish/varnish.chart.py \ + w1sensor/w1sensor.chart.py web_log/web_log.chart.py +userpythonconfigdir = $(configdir)/python.d +dist_userpythonconfig_DATA = \ + $(top_srcdir)/installer/.keep \ + $(NULL) + +pythonconfigdir = $(libconfigdir)/python.d +dist_pythonconfig_DATA = $(top_srcdir)/installer/.keep $(NULL) \ + adaptec_raid/adaptec_raid.conf apache/apache.conf \ + beanstalk/beanstalk.conf bind_rndc/bind_rndc.conf \ + boinc/boinc.conf ceph/ceph.conf chrony/chrony.conf \ + couchdb/couchdb.conf cpufreq/cpufreq.conf cpuidle/cpuidle.conf \ + dnsdist/dnsdist.conf dns_query_time/dns_query_time.conf \ + dockerd/dockerd.conf dovecot/dovecot.conf \ + elasticsearch/elasticsearch.conf example/example.conf \ + exim/exim.conf fail2ban/fail2ban.conf \ + freeradius/freeradius.conf go_expvar/go_expvar.conf \ + haproxy/haproxy.conf hddtemp/hddtemp.conf \ + httpcheck/httpcheck.conf icecast/icecast.conf ipfs/ipfs.conf \ + isc_dhcpd/isc_dhcpd.conf \ + linux_power_supply/linux_power_supply.conf \ + litespeed/litespeed.conf logind/logind.conf mdstat/mdstat.conf \ + megacli/megacli.conf memcached/memcached.conf \ + mongodb/mongodb.conf monit/monit.conf mysql/mysql.conf \ + nginx/nginx.conf nginx_plus/nginx_plus.conf nsd/nsd.conf \ + ntpd/ntpd.conf ovpn_status_log/ovpn_status_log.conf \ + phpfpm/phpfpm.conf portcheck/portcheck.conf \ + postfix/postfix.conf postgres/postgres.conf \ + powerdns/powerdns.conf proxysql/proxysql.conf \ + puppet/puppet.conf rabbitmq/rabbitmq.conf redis/redis.conf \ + rethinkdbs/rethinkdbs.conf retroshare/retroshare.conf \ + samba/samba.conf sensors/sensors.conf \ + smartd_log/smartd_log.conf spigotmc/spigotmc.conf \ + springboot/springboot.conf squid/squid.conf tomcat/tomcat.conf \ + traefik/traefik.conf unbound/unbound.conf uwsgi/uwsgi.conf \ + varnish/varnish.conf w1sensor/w1sensor.conf \ + web_log/web_log.conf +pythonmodulesdir = $(pythondir)/python_modules +dist_pythonmodules_DATA = \ + python_modules/__init__.py \ + $(NULL) + +basesdir = $(pythonmodulesdir)/bases +dist_bases_DATA = \ + python_modules/bases/__init__.py \ + python_modules/bases/charts.py \ + python_modules/bases/collection.py \ + python_modules/bases/loaders.py \ + python_modules/bases/loggers.py \ + $(NULL) + +bases_framework_servicesdir = $(basesdir)/FrameworkServices +dist_bases_framework_services_DATA = \ + python_modules/bases/FrameworkServices/__init__.py \ + python_modules/bases/FrameworkServices/ExecutableService.py \ + python_modules/bases/FrameworkServices/LogService.py \ + python_modules/bases/FrameworkServices/MySQLService.py \ + python_modules/bases/FrameworkServices/SimpleService.py \ + python_modules/bases/FrameworkServices/SocketService.py \ + python_modules/bases/FrameworkServices/UrlService.py \ + $(NULL) + +third_partydir = $(pythonmodulesdir)/third_party +dist_third_party_DATA = \ + python_modules/third_party/__init__.py \ + python_modules/third_party/ordereddict.py \ + python_modules/third_party/lm_sensors.py \ + python_modules/third_party/mcrcon.py \ + python_modules/third_party/boinc_client.py \ + python_modules/third_party/monotonic.py \ + $(NULL) + +pythonyaml2dir = $(pythonmodulesdir)/pyyaml2 +dist_pythonyaml2_DATA = \ + python_modules/pyyaml2/__init__.py \ + python_modules/pyyaml2/composer.py \ + python_modules/pyyaml2/constructor.py \ + python_modules/pyyaml2/cyaml.py \ + python_modules/pyyaml2/dumper.py \ + python_modules/pyyaml2/emitter.py \ + python_modules/pyyaml2/error.py \ + python_modules/pyyaml2/events.py \ + python_modules/pyyaml2/loader.py \ + python_modules/pyyaml2/nodes.py \ + python_modules/pyyaml2/parser.py \ + python_modules/pyyaml2/reader.py \ + python_modules/pyyaml2/representer.py \ + python_modules/pyyaml2/resolver.py \ + python_modules/pyyaml2/scanner.py \ + python_modules/pyyaml2/serializer.py \ + python_modules/pyyaml2/tokens.py \ + $(NULL) + +pythonyaml3dir = $(pythonmodulesdir)/pyyaml3 +dist_pythonyaml3_DATA = \ + python_modules/pyyaml3/__init__.py \ + python_modules/pyyaml3/composer.py \ + python_modules/pyyaml3/constructor.py \ + python_modules/pyyaml3/cyaml.py \ + python_modules/pyyaml3/dumper.py \ + python_modules/pyyaml3/emitter.py \ + python_modules/pyyaml3/error.py \ + python_modules/pyyaml3/events.py \ + python_modules/pyyaml3/loader.py \ + python_modules/pyyaml3/nodes.py \ + python_modules/pyyaml3/parser.py \ + python_modules/pyyaml3/reader.py \ + python_modules/pyyaml3/representer.py \ + python_modules/pyyaml3/resolver.py \ + python_modules/pyyaml3/scanner.py \ + python_modules/pyyaml3/serializer.py \ + python_modules/pyyaml3/tokens.py \ + $(NULL) + +python_urllib3dir = $(pythonmodulesdir)/urllib3 +dist_python_urllib3_DATA = \ + python_modules/urllib3/__init__.py \ + python_modules/urllib3/_collections.py \ + python_modules/urllib3/connection.py \ + python_modules/urllib3/connectionpool.py \ + python_modules/urllib3/exceptions.py \ + python_modules/urllib3/fields.py \ + python_modules/urllib3/filepost.py \ + python_modules/urllib3/response.py \ + python_modules/urllib3/poolmanager.py \ + python_modules/urllib3/request.py \ + $(NULL) + +python_urllib3_utildir = $(python_urllib3dir)/util +dist_python_urllib3_util_DATA = \ + python_modules/urllib3/util/__init__.py \ + python_modules/urllib3/util/connection.py \ + python_modules/urllib3/util/request.py \ + python_modules/urllib3/util/response.py \ + python_modules/urllib3/util/retry.py \ + python_modules/urllib3/util/selectors.py \ + python_modules/urllib3/util/ssl_.py \ + python_modules/urllib3/util/timeout.py \ + python_modules/urllib3/util/url.py \ + python_modules/urllib3/util/wait.py \ + $(NULL) + +python_urllib3_packagesdir = $(python_urllib3dir)/packages +dist_python_urllib3_packages_DATA = \ + python_modules/urllib3/packages/__init__.py \ + python_modules/urllib3/packages/ordered_dict.py \ + python_modules/urllib3/packages/six.py \ + $(NULL) + +python_urllib3_backportsdir = $(python_urllib3_packagesdir)/backports +dist_python_urllib3_backports_DATA = \ + python_modules/urllib3/packages/backports/__init__.py \ + python_modules/urllib3/packages/backports/makefile.py \ + $(NULL) + +python_urllib3_ssl_match_hostnamedir = $(python_urllib3_packagesdir)/ssl_match_hostname +dist_python_urllib3_ssl_match_hostname_DATA = \ + python_modules/urllib3/packages/ssl_match_hostname/__init__.py \ + python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \ + $(NULL) + +python_urllib3_contribdir = $(python_urllib3dir)/contrib +dist_python_urllib3_contrib_DATA = \ + python_modules/urllib3/contrib/__init__.py \ + python_modules/urllib3/contrib/appengine.py \ + python_modules/urllib3/contrib/ntlmpool.py \ + python_modules/urllib3/contrib/pyopenssl.py \ + python_modules/urllib3/contrib/securetransport.py \ + python_modules/urllib3/contrib/socks.py \ + $(NULL) + +python_urllib3_securetransportdir = $(python_urllib3_contribdir)/_securetransport +dist_python_urllib3_securetransport_DATA = \ + python_modules/urllib3/contrib/_securetransport/__init__.py \ + python_modules/urllib3/contrib/_securetransport/bindings.py \ + python_modules/urllib3/contrib/_securetransport/low_level.py \ + $(NULL) + +all: all-am + +.SUFFIXES: +.SUFFIXES: .in +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/adaptec_raid/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc $(srcdir)/couchdb/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/cpuidle/Makefile.inc $(srcdir)/dnsdist/Makefile.inc $(srcdir)/dns_query_time/Makefile.inc $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc $(srcdir)/elasticsearch/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/fail2ban/Makefile.inc $(srcdir)/freeradius/Makefile.inc $(srcdir)/go_expvar/Makefile.inc $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/httpcheck/Makefile.inc $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc $(srcdir)/isc_dhcpd/Makefile.inc $(srcdir)/linux_power_supply/Makefile.inc $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc $(srcdir)/mdstat/Makefile.inc $(srcdir)/megacli/Makefile.inc $(srcdir)/memcached/Makefile.inc $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nsd/Makefile.inc $(srcdir)/ntpd/Makefile.inc $(srcdir)/ovpn_status_log/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc $(srcdir)/powerdns/Makefile.inc $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc $(srcdir)/rethinkdbs/Makefile.inc $(srcdir)/retroshare/Makefile.inc $(srcdir)/samba/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/smartd_log/Makefile.inc $(srcdir)/spigotmc/Makefile.inc $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/python.d.plugin/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu collectors/python.d.plugin/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; +$(top_srcdir)/build/subst.inc $(srcdir)/adaptec_raid/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc $(srcdir)/couchdb/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/cpuidle/Makefile.inc $(srcdir)/dnsdist/Makefile.inc $(srcdir)/dns_query_time/Makefile.inc $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc $(srcdir)/elasticsearch/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/fail2ban/Makefile.inc $(srcdir)/freeradius/Makefile.inc $(srcdir)/go_expvar/Makefile.inc $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/httpcheck/Makefile.inc $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc $(srcdir)/isc_dhcpd/Makefile.inc $(srcdir)/linux_power_supply/Makefile.inc $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc $(srcdir)/mdstat/Makefile.inc $(srcdir)/megacli/Makefile.inc $(srcdir)/memcached/Makefile.inc $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nsd/Makefile.inc $(srcdir)/ntpd/Makefile.inc $(srcdir)/ovpn_status_log/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc $(srcdir)/powerdns/Makefile.inc $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc $(srcdir)/rethinkdbs/Makefile.inc $(srcdir)/retroshare/Makefile.inc $(srcdir)/samba/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/smartd_log/Makefile.inc $(srcdir)/spigotmc/Makefile.inc $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc: + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS) + @$(NORMAL_INSTALL) + @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ + done | \ + sed -e 'p;s,.*/,,;n' \ + -e 'h;s|.*|.|' \ + -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ + $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ + { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ + if ($$2 == $$4) { files[d] = files[d] " " $$1; \ + if (++n[d] == $(am__install_max)) { \ + print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ + else { print "f", d "/" $$4, $$1 } } \ + END { for (d in files) print "f", d, files[d] }' | \ + while read type dir files; do \ + if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ + test -z "$$files" || { \ + echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \ + $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \ + } \ + ; done + +uninstall-dist_pluginsSCRIPTS: + @$(NORMAL_UNINSTALL) + @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \ + files=`for p in $$list; do echo "$$p"; done | \ + sed -e 's,.*/,,;$(transform)'`; \ + dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir) +install-dist_pythonSCRIPTS: $(dist_python_SCRIPTS) + @$(NORMAL_INSTALL) + @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ + done | \ + sed -e 'p;s,.*/,,;n' \ + -e 'h;s|.*|.|' \ + -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ + $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ + { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ + if ($$2 == $$4) { files[d] = files[d] " " $$1; \ + if (++n[d] == $(am__install_max)) { \ + print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ + else { print "f", d "/" $$4, $$1 } } \ + END { for (d in files) print "f", d, files[d] }' | \ + while read type dir files; do \ + if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ + test -z "$$files" || { \ + echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pythondir)$$dir'"; \ + $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pythondir)$$dir" || exit $$?; \ + } \ + ; done + +uninstall-dist_pythonSCRIPTS: + @$(NORMAL_UNINSTALL) + @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || exit 0; \ + files=`for p in $$list; do echo "$$p"; done | \ + sed -e 's,.*/,,;$(transform)'`; \ + dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir) +install-dist_basesDATA: $(dist_bases_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(basesdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(basesdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(basesdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(basesdir)" || exit $$?; \ + done + +uninstall-dist_basesDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(basesdir)'; $(am__uninstall_files_from_dir) +install-dist_bases_framework_servicesDATA: $(dist_bases_framework_services_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(bases_framework_servicesdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(bases_framework_servicesdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(bases_framework_servicesdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(bases_framework_servicesdir)" || exit $$?; \ + done + +uninstall-dist_bases_framework_servicesDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(bases_framework_servicesdir)'; $(am__uninstall_files_from_dir) +install-dist_libconfigDATA: $(dist_libconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \ + done + +uninstall-dist_libconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir) +install-dist_pythonDATA: $(dist_python_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythondir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(pythondir)" || exit $$?; \ + done + +uninstall-dist_pythonDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir) +install-dist_python_urllib3DATA: $(dist_python_urllib3_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3dir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(python_urllib3dir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3dir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3dir)" || exit $$?; \ + done + +uninstall-dist_python_urllib3DATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(python_urllib3dir)'; $(am__uninstall_files_from_dir) +install-dist_python_urllib3_backportsDATA: $(dist_python_urllib3_backports_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_backportsdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(python_urllib3_backportsdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_backportsdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_backportsdir)" || exit $$?; \ + done + +uninstall-dist_python_urllib3_backportsDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(python_urllib3_backportsdir)'; $(am__uninstall_files_from_dir) +install-dist_python_urllib3_contribDATA: $(dist_python_urllib3_contrib_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_contribdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(python_urllib3_contribdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_contribdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_contribdir)" || exit $$?; \ + done + +uninstall-dist_python_urllib3_contribDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(python_urllib3_contribdir)'; $(am__uninstall_files_from_dir) +install-dist_python_urllib3_packagesDATA: $(dist_python_urllib3_packages_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_packagesdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(python_urllib3_packagesdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_packagesdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_packagesdir)" || exit $$?; \ + done + +uninstall-dist_python_urllib3_packagesDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(python_urllib3_packagesdir)'; $(am__uninstall_files_from_dir) +install-dist_python_urllib3_securetransportDATA: $(dist_python_urllib3_securetransport_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit $$?; \ + done + +uninstall-dist_python_urllib3_securetransportDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(python_urllib3_securetransportdir)'; $(am__uninstall_files_from_dir) +install-dist_python_urllib3_ssl_match_hostnameDATA: $(dist_python_urllib3_ssl_match_hostname_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit $$?; \ + done + +uninstall-dist_python_urllib3_ssl_match_hostnameDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'; $(am__uninstall_files_from_dir) +install-dist_python_urllib3_utilDATA: $(dist_python_urllib3_util_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_utildir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(python_urllib3_utildir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_utildir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_utildir)" || exit $$?; \ + done + +uninstall-dist_python_urllib3_utilDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(python_urllib3_utildir)'; $(am__uninstall_files_from_dir) +install-dist_pythonconfigDATA: $(dist_pythonconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pythonconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pythonconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonconfigdir)" || exit $$?; \ + done + +uninstall-dist_pythonconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(pythonconfigdir)'; $(am__uninstall_files_from_dir) +install-dist_pythonmodulesDATA: $(dist_pythonmodules_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pythonmodulesdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pythonmodulesdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonmodulesdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonmodulesdir)" || exit $$?; \ + done + +uninstall-dist_pythonmodulesDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(pythonmodulesdir)'; $(am__uninstall_files_from_dir) +install-dist_pythonyaml2DATA: $(dist_pythonyaml2_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml2dir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pythonyaml2dir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml2dir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml2dir)" || exit $$?; \ + done + +uninstall-dist_pythonyaml2DATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(pythonyaml2dir)'; $(am__uninstall_files_from_dir) +install-dist_pythonyaml3DATA: $(dist_pythonyaml3_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml3dir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pythonyaml3dir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml3dir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml3dir)" || exit $$?; \ + done + +uninstall-dist_pythonyaml3DATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(pythonyaml3dir)'; $(am__uninstall_files_from_dir) +install-dist_third_partyDATA: $(dist_third_party_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(third_partydir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(third_partydir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(third_partydir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(third_partydir)" || exit $$?; \ + done + +uninstall-dist_third_partyDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(third_partydir)'; $(am__uninstall_files_from_dir) +install-dist_userpythonconfigDATA: $(dist_userpythonconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_userpythonconfig_DATA)'; test -n "$(userpythonconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(userpythonconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(userpythonconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userpythonconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(userpythonconfigdir)" || exit $$?; \ + done + +uninstall-dist_userpythonconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_userpythonconfig_DATA)'; test -n "$(userpythonconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(userpythonconfigdir)'; $(am__uninstall_files_from_dir) +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(SCRIPTS) $(DATA) +installdirs: + for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(basesdir)" "$(DESTDIR)$(bases_framework_servicesdir)" "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(python_urllib3dir)" "$(DESTDIR)$(python_urllib3_backportsdir)" "$(DESTDIR)$(python_urllib3_contribdir)" "$(DESTDIR)$(python_urllib3_packagesdir)" "$(DESTDIR)$(python_urllib3_securetransportdir)" "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" "$(DESTDIR)$(python_urllib3_utildir)" "$(DESTDIR)$(pythonconfigdir)" "$(DESTDIR)$(pythonmodulesdir)" "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)" "$(DESTDIR)$(third_partydir)" "$(DESTDIR)$(userpythonconfigdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-dist_basesDATA \ + install-dist_bases_framework_servicesDATA \ + install-dist_libconfigDATA install-dist_pluginsSCRIPTS \ + install-dist_pythonDATA install-dist_pythonSCRIPTS \ + install-dist_python_urllib3DATA \ + install-dist_python_urllib3_backportsDATA \ + install-dist_python_urllib3_contribDATA \ + install-dist_python_urllib3_packagesDATA \ + install-dist_python_urllib3_securetransportDATA \ + install-dist_python_urllib3_ssl_match_hostnameDATA \ + install-dist_python_urllib3_utilDATA \ + install-dist_pythonconfigDATA install-dist_pythonmodulesDATA \ + install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \ + install-dist_third_partyDATA install-dist_userpythonconfigDATA + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-dist_basesDATA \ + uninstall-dist_bases_framework_servicesDATA \ + uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS \ + uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \ + uninstall-dist_python_urllib3DATA \ + uninstall-dist_python_urllib3_backportsDATA \ + uninstall-dist_python_urllib3_contribDATA \ + uninstall-dist_python_urllib3_packagesDATA \ + uninstall-dist_python_urllib3_securetransportDATA \ + uninstall-dist_python_urllib3_ssl_match_hostnameDATA \ + uninstall-dist_python_urllib3_utilDATA \ + uninstall-dist_pythonconfigDATA \ + uninstall-dist_pythonmodulesDATA \ + uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \ + uninstall-dist_third_partyDATA \ + uninstall-dist_userpythonconfigDATA + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dist_basesDATA \ + install-dist_bases_framework_servicesDATA \ + install-dist_libconfigDATA install-dist_pluginsSCRIPTS \ + install-dist_pythonDATA install-dist_pythonSCRIPTS \ + install-dist_python_urllib3DATA \ + install-dist_python_urllib3_backportsDATA \ + install-dist_python_urllib3_contribDATA \ + install-dist_python_urllib3_packagesDATA \ + install-dist_python_urllib3_securetransportDATA \ + install-dist_python_urllib3_ssl_match_hostnameDATA \ + install-dist_python_urllib3_utilDATA \ + install-dist_pythonconfigDATA install-dist_pythonmodulesDATA \ + install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \ + install-dist_third_partyDATA install-dist_userpythonconfigDATA \ + install-dvi install-dvi-am install-exec install-exec-am \ + install-html install-html-am install-info install-info-am \ + install-man install-pdf install-pdf-am install-ps \ + install-ps-am install-strip installcheck installcheck-am \ + installdirs maintainer-clean maintainer-clean-generic \ + mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags-am \ + uninstall uninstall-am uninstall-dist_basesDATA \ + uninstall-dist_bases_framework_servicesDATA \ + uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS \ + uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \ + uninstall-dist_python_urllib3DATA \ + uninstall-dist_python_urllib3_backportsDATA \ + uninstall-dist_python_urllib3_contribDATA \ + uninstall-dist_python_urllib3_packagesDATA \ + uninstall-dist_python_urllib3_securetransportDATA \ + uninstall-dist_python_urllib3_ssl_match_hostnameDATA \ + uninstall-dist_python_urllib3_utilDATA \ + uninstall-dist_pythonconfigDATA \ + uninstall-dist_pythonmodulesDATA \ + uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \ + uninstall-dist_third_partyDATA \ + uninstall-dist_userpythonconfigDATA + +.in: + if sed \ + -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \ + -e 's#[@]sbindir_POST@#$(sbindir)#g' \ + -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \ + -e 's#[@]pythondir_POST@#$(pythondir)#g' \ + -e 's#[@]configdir_POST@#$(configdir)#g' \ + -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \ + -e 's#[@]cachedir_POST@#$(cachedir)#g' \ + $< > $@.tmp; then \ + mv "$@.tmp" "$@"; \ + else \ + rm -f "$@.tmp"; \ + false; \ + fi + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/collectors/python.d.plugin/README.md b/collectors/python.d.plugin/README.md new file mode 100644 index 000000000..df24cd18f --- /dev/null +++ b/collectors/python.d.plugin/README.md @@ -0,0 +1,198 @@ +# python.d.plugin + +`python.d.plugin` is a netdata external plugin. It is an **orchestrator** for data collection modules written in `python`. + +1. It runs as an independent process `ps fax` shows it +2. It is started and stopped automatically by netdata +3. It communicates with netdata via a unidirectional pipe (sending data to the netdata daemon) +4. Supports any number of data collection **modules** +5. Allows each **module** to have one or more data collection **jobs** +6. Each **job** is collecting one or more metrics from a single data source + + +## Disclaimer + +Every module should be compatible with python2 and python3. +All third party libraries should be installed system-wide or in `python_modules` directory. +Module configurations are written in YAML and **pyYAML is required**. + +Every configuration file must have one of two formats: + +- Configuration for only one job: + +```yaml +update_every : 2 # update frequency +retries : 1 # how many failures in update() is tolerated +priority : 20000 # where it is shown on dashboard + +other_var1 : bla # variables passed to module +other_var2 : alb +``` + +- Configuration for many jobs (ex. mysql): + +```yaml +# module defaults: +update_every : 2 +retries : 1 +priority : 20000 + +local: # job name + update_every : 5 # job update frequency + other_var1 : some_val # module specific variable + +other_job: + priority : 5 # job position on dashboard + retries : 20 # job retries + other_var2 : val # module specific variable +``` + +`update_every`, `retries`, and `priority` are always optional. + +--- + +## How to write a new module + +Writing new python module is simple. You just need to remember to include 5 major things: +- **ORDER** global list +- **CHART** global dictionary +- **Service** class +- **_get_data** method +- all code needs to be compatible with Python 2 (**≥ 2.7**) *and* 3 (**≥ 3.1**) + +If you plan to submit the module in a PR, make sure and go through the [PR checklist for new modules](https://github.com/netdata/netdata/wiki/New-Module-PR-Checklist) beforehand to make sure you have updated all the files you need to. + +### Global variables `ORDER` and `CHART` + +`ORDER` list should contain the order of chart ids. Example: +```py +ORDER = ['first_chart', 'second_chart', 'third_chart'] +``` + +`CHART` dictionary is a little bit trickier. It should contain the chart definition in following format: +```py +CHART = { + id: { + 'options': [name, title, units, family, context, charttype], + 'lines': [ + [unique_dimension_name, name, algorithm, multiplier, divisor] + ]} +``` + +All names are better explained in the [External Plugins](../) section. +Parameters like `priority` and `update_every` are handled by `python.d.plugin`. + +### `Service` class + +Every module needs to implement its own `Service` class. This class should inherit from one of the framework classes: + +- `SimpleService` +- `UrlService` +- `SocketService` +- `LogService` +- `ExecutableService` + +Also it needs to invoke the parent class constructor in a specific way as well as assign global variables to class variables. + +Simple example: +```py +from base import UrlService +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS +``` + +### `_get_data` collector/parser + +This method should grab raw data from `_get_raw_data`, parse it, and return a dictionary where keys are unique dimension names or `None` if no data is collected. + +Example: +```py +def _get_data(self): + try: + raw = self._get_raw_data().split(" ") + return {'active': int(raw[2])} + except (ValueError, AttributeError): + return None +``` + +More about framework classes +============================ + +Every framework class has some user-configurable variables which are specific to this particular class. Those variables should have default values initialized in the child class constructor. + +If module needs some additional user-configurable variable, it can be accessed from the `self.configuration` list and assigned in constructor or custom `check` method. Example: +```py +def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + try: + self.baseurl = str(self.configuration['baseurl']) + except (KeyError, TypeError): + self.baseurl = "http://localhost:5001" +``` + +Classes implement `_get_raw_data` which should be used to grab raw data. This method usually returns a list of strings. + +### `SimpleService` + +_This is last resort class, if a new module cannot be written by using other framework class this one can be used._ + +_Example: `mysql`, `sensors`_ + +It is the lowest-level class which implements most of module logic, like: +- threading +- handling run times +- chart formatting +- logging +- chart creation and updating + +### `LogService` + +_Examples: `apache_cache`, `nginx_log`_ + +_Variable from config file_: `log_path`. + +Object created from this class reads new lines from file specified in `log_path` variable. It will check if file exists and is readable. Also `_get_raw_data` returns list of strings where each string is one line from file specified in `log_path`. + +### `ExecutableService` + +_Examples: `exim`, `postfix`_ + +_Variable from config file_: `command`. + +This allows to execute a shell command in a secure way. It will check for invalid characters in `command` variable and won't proceed if there is one of: +- '&' +- '|' +- ';' +- '>' +- '<' + +For additional security it uses python `subprocess.Popen` (without `shell=True` option) to execute command. Command can be specified with absolute or relative name. When using relative name, it will try to find `command` in `PATH` environment variable as well as in `/sbin` and `/usr/sbin`. + +`_get_raw_data` returns list of decoded lines returned by `command`. + +### UrlService + +_Examples: `apache`, `nginx`, `tomcat`_ + +_Variables from config file_: `url`, `user`, `pass`. + +If data is grabbed by accessing service via HTTP protocol, this class can be used. It can handle HTTP Basic Auth when specified with `user` and `pass` credentials. + +`_get_raw_data` returns list of utf-8 decoded strings (lines). + +### SocketService + +_Examples: `dovecot`, `redis`_ + +_Variables from config file_: `unix_socket`, `host`, `port`, `request`. + +Object will try execute `request` using either `unix_socket` or TCP/IP socket with combination of `host` and `port`. This can access unix sockets with SOCK_STREAM or SOCK_DGRAM protocols and TCP/IP sockets in version 4 and 6 with SOCK_STREAM setting. + +Sockets are accessed in non-blocking mode with 15 second timeout. + +After every execution of `_get_raw_data` socket is closed, to prevent this module needs to set `_keep_alive` variable to `True` and implement custom `_check_raw_data` method. + +`_check_raw_data` should take raw data and return `True` if all data is received otherwise it should return `False`. Also it should do it in fast and efficient way. \ No newline at end of file diff --git a/collectors/python.d.plugin/adaptec_raid/Makefile.inc b/collectors/python.d.plugin/adaptec_raid/Makefile.inc new file mode 100644 index 000000000..716cdb235 --- /dev/null +++ b/collectors/python.d.plugin/adaptec_raid/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += adaptec_raid/adaptec_raid.chart.py +dist_pythonconfig_DATA += adaptec_raid/adaptec_raid.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += adaptec_raid/README.md adaptec_raid/Makefile.inc + diff --git a/collectors/python.d.plugin/adaptec_raid/README.md b/collectors/python.d.plugin/adaptec_raid/README.md new file mode 100644 index 000000000..499dc9190 --- /dev/null +++ b/collectors/python.d.plugin/adaptec_raid/README.md @@ -0,0 +1,46 @@ +# adaptec raid + +Module collects logical and physical devices health metrics. + +**Requirements:** +* `arcconf` program +* `sudo` program +* `netdata` user needs to be able to sudo the `arcconf` program without password + +To grab stats it executes: + * `sudo -n arcconf GETCONFIG 1 LD` + * `sudo -n arcconf GETCONFIG 1 PD` + + +It produces: + +1. **Logical Device Status** + +2. **Physical Device State** + +3. **Physical Device S.M.A.R.T warnings** + +4. **Physical Device Temperature** + +### prerequisite +This module uses `arcconf` which can only be executed by root. It uses +`sudo` and assumes that it is configured such that the `netdata` user can +execute `arcconf` as root without password. + +Add to `sudoers`: + + netdata ALL=(root) NOPASSWD: /path/to/arcconf + +### configuration + + **adaptec_raid** is disabled by default. Should be explicitly enabled in `python.d.conf`. + +```yaml +adaptec_raid: yes +``` + +#### Screenshot: + +![image](https://user-images.githubusercontent.com/22274335/47278133-6d306680-d601-11e8-87c2-cc9c0f42d686.png) + +--- diff --git a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py new file mode 100644 index 000000000..1fb1e4336 --- /dev/null +++ b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py @@ -0,0 +1,247 @@ +# -*- coding: utf-8 -*- +# Description: adaptec_raid netdata python.d module +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + + +import re + +from copy import deepcopy + +from bases.FrameworkServices.ExecutableService import ExecutableService +from bases.collection import find_binary + + +disabled_by_default = True + +update_every = 5 + +ORDER = [ + 'ld_status', + 'pd_state', + 'pd_smart_warnings', + 'pd_temperature', +] + +CHARTS = { + 'ld_status': { + 'options': [None, 'Status Is Not OK', 'bool', 'logical devices', 'adapter_raid.ld_status', 'line'], + 'lines': [] + }, + 'pd_state': { + 'options': [None, 'State Is Not OK', 'bool', 'physical devices', 'adapter_raid.pd_state', 'line'], + 'lines': [] + }, + 'pd_smart_warnings': { + 'options': [None, 'S.M.A.R.T warnings', 'count', 'physical devices', + 'adapter_raid.smart_warnings', 'line'], + 'lines': [] + }, + 'pd_temperature': { + 'options': [None, 'Temperature', 'celsius', 'physical devices', 'adapter_raid.temperature', 'line'], + 'lines': [] + }, +} + +SUDO = 'sudo' +ARCCONF = 'arcconf' + +BAD_LD_STATUS = ( + 'Degraded', + 'Failed', +) + +GOOD_PD_STATUS = ( + 'Online', +) + +RE_LD = re.compile( + r'Logical device number\s+([0-9]+).*?' + r'Status of logical device\s+: ([a-zA-Z]+)' +) + + +def find_lds(d): + d = ' '.join(v.strip() for v in d) + return [LD(*v) for v in RE_LD.findall(d)] + + +def find_pds(d): + pds = list() + pd = PD() + + for row in d: + row = row.strip() + if row.startswith('Device #'): + pd = PD() + pd.id = row.split('#')[-1] + elif not pd.id: + continue + + if row.startswith('State'): + v = row.split()[-1] + pd.state = v + elif row.startswith('S.M.A.R.T. warnings'): + v = row.split()[-1] + pd.smart_warnings = v + elif row.startswith('Temperature'): + v = row.split(':')[-1].split()[0] + pd.temperature = v + elif row.startswith('NCQ status'): + if pd.id and pd.state and pd.smart_warnings: + pds.append(pd) + pd = PD() + + return pds + + +class LD: + def __init__(self, ld_id, status): + self.id = ld_id + self.status = status + + def data(self): + return { + 'ld_{0}_status'.format(self.id): int(self.status in BAD_LD_STATUS) + } + + +class PD: + def __init__(self): + self.id = None + self.state = None + self.smart_warnings = None + self.temperature = None + + def data(self): + data = { + 'pd_{0}_state'.format(self.id): int(self.state not in GOOD_PD_STATUS), + 'pd_{0}_smart_warnings'.format(self.id): self.smart_warnings, + } + if self.temperature and self.temperature.isdigit(): + data['pd_{0}_temperature'.format(self.id)] = self.temperature + + return data + + +class Arcconf: + def __init__(self, arcconf): + self.arcconf = arcconf + + def ld_info(self): + return [self.arcconf, 'GETCONFIG', '1', 'LD'] + + def pd_info(self): + return [self.arcconf, 'GETCONFIG', '1', 'PD'] + + +# TODO: hardcoded sudo... +class SudoArcconf: + def __init__(self, arcconf, sudo): + self.arcconf = Arcconf(arcconf) + self.sudo = sudo + + def ld_info(self): + return [self.sudo, '-n'] + self.arcconf.ld_info() + + def pd_info(self): + return [self.sudo, '-n'] + self.arcconf.pd_info() + + +class Service(ExecutableService): + def __init__(self, configuration=None, name=None): + ExecutableService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = deepcopy(CHARTS) + self.use_sudo = self.configuration.get('use_sudo', True) + self.arcconf = None + + def execute(self, command, stderr=False): + return self._get_raw_data(command=command, stderr=stderr) + + def check(self): + arcconf = find_binary(ARCCONF) + if not arcconf: + self.error('can\'t locate "{0}" binary'.format(ARCCONF)) + return False + + sudo = find_binary(SUDO) + if self.use_sudo: + if not sudo: + self.error('can\'t locate "{0}" binary'.format(SUDO)) + return False + err = self.execute([sudo, '-n', '-v'], True) + if err: + self.error(' '.join(err)) + return False + + if self.use_sudo: + self.arcconf = SudoArcconf(arcconf, sudo) + else: + self.arcconf = Arcconf(arcconf) + + lds = self.get_lds() + if not lds: + return False + + self.debug('discovered logical devices ids: {0}'.format([ld.id for ld in lds])) + + pds = self.get_pds() + if not pds: + return False + + self.debug('discovered physical devices ids: {0}'.format([pd.id for pd in pds])) + + self.update_charts(lds, pds) + return True + + def get_data(self): + data = dict() + + for ld in self.get_lds(): + data.update(ld.data()) + + for pd in self.get_pds(): + data.update(pd.data()) + + return data + + def get_lds(self): + raw_lds = self.execute(self.arcconf.ld_info()) + if not raw_lds: + return None + + lds = find_lds(raw_lds) + if not lds: + self.error('failed to parse "{0}" output'.format(' '.join(self.arcconf.ld_info()))) + self.debug('output: {0}'.format(raw_lds)) + return None + return lds + + def get_pds(self): + raw_pds = self.execute(self.arcconf.pd_info()) + if not raw_pds: + return None + + pds = find_pds(raw_pds) + if not pds: + self.error('failed to parse "{0}" output'.format(' '.join(self.arcconf.pd_info()))) + self.debug('output: {0}'.format(raw_pds)) + return None + return pds + + def update_charts(self, lds, pds): + charts = self.definitions + for ld in lds: + dim = ['ld_{0}_status'.format(ld.id), 'ld {0}'.format(ld.id)] + charts['ld_status']['lines'].append(dim) + + for pd in pds: + dim = ['pd_{0}_state'.format(pd.id), 'pd {0}'.format(pd.id)] + charts['pd_state']['lines'].append(dim) + + dim = ['pd_{0}_smart_warnings'.format(pd.id), 'pd {0}'.format(pd.id)] + charts['pd_smart_warnings']['lines'].append(dim) + + dim = ['pd_{0}_temperature'.format(pd.id), 'pd {0}'.format(pd.id)] + charts['pd_temperature']['lines'].append(dim) diff --git a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf new file mode 100644 index 000000000..253cbf5a9 --- /dev/null +++ b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf @@ -0,0 +1,55 @@ +# netdata python.d.plugin configuration for adaptec raid +# +# This file is in YaML format. Generally the format is: +# +# name: value +# + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# ---------------------------------------------------------------------- diff --git a/collectors/python.d.plugin/apache/Makefile.inc b/collectors/python.d.plugin/apache/Makefile.inc new file mode 100644 index 000000000..70a421550 --- /dev/null +++ b/collectors/python.d.plugin/apache/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += apache/apache.chart.py +dist_pythonconfig_DATA += apache/apache.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += apache/README.md apache/Makefile.inc + diff --git a/collectors/python.d.plugin/apache/README.md b/collectors/python.d.plugin/apache/README.md new file mode 100644 index 000000000..c6d1d126a --- /dev/null +++ b/collectors/python.d.plugin/apache/README.md @@ -0,0 +1,59 @@ +# apache + +This module will monitor one or more Apache servers depending on configuration. + +**Requirements:** + * apache with enabled `mod_status` + +It produces the following charts: + +1. **Requests** in requests/s + * requests + +2. **Connections** + * connections + +3. **Async Connections** + * keepalive + * closing + * writing + +4. **Bandwidth** in kilobytes/s + * sent + +5. **Workers** + * idle + * busy + +6. **Lifetime Avg. Requests/s** in requests/s + * requests_sec + +7. **Lifetime Avg. Bandwidth/s** in kilobytes/s + * size_sec + +8. **Lifetime Avg. Response Size** in bytes/request + * size_req + +### configuration + +Needs only `url` to server's `server-status?auto` + +Here is an example for 2 servers: + +```yaml +update_every : 10 +priority : 90100 + +local: + url : 'http://localhost/server-status?auto' + retries : 20 + +remote: + url : 'http://www.apache.org/server-status?auto' + update_every : 5 + retries : 4 +``` + +Without configuration, module attempts to connect to `http://localhost/server-status?auto` + +--- diff --git a/collectors/python.d.plugin/apache/apache.chart.py b/collectors/python.d.plugin/apache/apache.chart.py new file mode 100644 index 000000000..d136274d0 --- /dev/null +++ b/collectors/python.d.plugin/apache/apache.chart.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# Description: apache netdata python.d module +# Author: Pawel Krupa (paulfantom) +# SPDX-License-Identifier: GPL-3.0-or-later + +from bases.FrameworkServices.UrlService import UrlService + +# default module values (can be overridden per job in `config`) +# update_every = 2 +priority = 60000 +retries = 60 + +# default job configuration (overridden by python.d.plugin) +# config = {'local': { +# 'update_every': update_every, +# 'retries': retries, +# 'priority': priority, +# 'url': 'http://www.apache.org/server-status?auto' +# }} + +# charts order (can be overridden if you want less charts, or different order) +ORDER = ['requests', 'connections', 'conns_async', 'net', 'workers', 'reqpersec', 'bytespersec', 'bytesperreq'] + +CHARTS = { + 'bytesperreq': { + 'options': [None, 'apache Lifetime Avg. Response Size', 'bytes/request', + 'statistics', 'apache.bytesperreq', 'area'], + 'lines': [ + ['size_req'] + ]}, + 'workers': { + 'options': [None, 'apache Workers', 'workers', 'workers', 'apache.workers', 'stacked'], + 'lines': [ + ['idle'], + ['busy'], + ]}, + 'reqpersec': { + 'options': [None, 'apache Lifetime Avg. Requests/s', 'requests/s', 'statistics', + 'apache.reqpersec', 'area'], + 'lines': [ + ['requests_sec'] + ]}, + 'bytespersec': { + 'options': [None, 'apache Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics', + 'apache.bytesperreq', 'area'], + 'lines': [ + ['size_sec', None, 'absolute', 8, 1000] + ]}, + 'requests': { + 'options': [None, 'apache Requests', 'requests/s', 'requests', 'apache.requests', 'line'], + 'lines': [ + ['requests', None, 'incremental'] + ]}, + 'net': { + 'options': [None, 'apache Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'], + 'lines': [ + ['sent', None, 'incremental', 8, 1] + ]}, + 'connections': { + 'options': [None, 'apache Connections', 'connections', 'connections', 'apache.connections', 'line'], + 'lines': [ + ['connections'] + ]}, + 'conns_async': { + 'options': [None, 'apache Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'], + 'lines': [ + ['keepalive'], + ['closing'], + ['writing'] + ]} +} + +ASSIGNMENT = { + 'BytesPerReq': 'size_req', + 'IdleWorkers': 'idle', + 'IdleServers': 'idle_servers', + 'BusyWorkers': 'busy', + 'BusyServers': 'busy_servers', + 'ReqPerSec': 'requests_sec', + 'BytesPerSec': 'size_sec', + 'Total Accesses': 'requests', + 'Total kBytes': 'sent', + 'ConnsTotal': 'connections', + 'ConnsAsyncKeepAlive': 'keepalive', + 'ConnsAsyncClosing': 'closing', + 'ConnsAsyncWriting': 'writing' +} + + +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + self.url = self.configuration.get('url', 'http://localhost/server-status?auto') + + def check(self): + self._manager = self._build_manager() + data = self._get_data() + if not data: + return None + + if 'idle_servers' in data: + self.module_name = 'lighttpd' + for chart in self.definitions: + if chart == 'workers': + lines = self.definitions[chart]['lines'] + lines[0] = ['idle_servers', 'idle'] + lines[1] = ['busy_servers', 'busy'] + opts = self.definitions[chart]['options'] + opts[1] = opts[1].replace('apache', 'lighttpd') + opts[4] = opts[4].replace('apache', 'lighttpd') + return True + + def _get_data(self): + """ + Format data received from http request + :return: dict + """ + raw_data = self._get_raw_data() + if not raw_data: + return None + data = dict() + + for row in raw_data.split('\n'): + tmp = row.split(':') + if tmp[0] in ASSIGNMENT: + try: + data[ASSIGNMENT[tmp[0]]] = int(float(tmp[1])) + except (IndexError, ValueError): + continue + return data or None diff --git a/collectors/python.d.plugin/apache/apache.conf b/collectors/python.d.plugin/apache/apache.conf new file mode 100644 index 000000000..8b606f7e0 --- /dev/null +++ b/collectors/python.d.plugin/apache/apache.conf @@ -0,0 +1,87 @@ +# netdata python.d.plugin configuration for apache +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, apache also supports the following: +# +# url: 'URL' # the URL to fetch apache's mod_status stats +# +# if the URL is password protected, the following are supported: +# +# user: 'username' +# pass: 'password' + +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +localhost: + name : 'local' + url : 'http://localhost/server-status?auto' + +localipv4: + name : 'local' + url : 'http://127.0.0.1/server-status?auto' + +localipv6: + name : 'local' + url : 'http://[::1]/server-status?auto' diff --git a/collectors/python.d.plugin/beanstalk/Makefile.inc b/collectors/python.d.plugin/beanstalk/Makefile.inc new file mode 100644 index 000000000..4bbb7087d --- /dev/null +++ b/collectors/python.d.plugin/beanstalk/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += beanstalk/beanstalk.chart.py +dist_pythonconfig_DATA += beanstalk/beanstalk.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += beanstalk/README.md beanstalk/Makefile.inc + diff --git a/collectors/python.d.plugin/beanstalk/README.md b/collectors/python.d.plugin/beanstalk/README.md new file mode 100644 index 000000000..c2d7d5787 --- /dev/null +++ b/collectors/python.d.plugin/beanstalk/README.md @@ -0,0 +1,103 @@ +# beanstalk + +Module provides server and tube-level statistics: + +**Requirements:** + * `python-beanstalkc` + +**Server statistics:** + +1. **Cpu usage** in cpu time + * user + * system + +2. **Jobs rate** in jobs/s + * total + * timeouts + +3. **Connections rate** in connections/s + * connections + +4. **Commands rate** in commands/s + * put + * peek + * peek-ready + * peek-delayed + * peek-buried + * reserve + * use + * watch + * ignore + * delete + * release + * bury + * kick + * stats + * stats-job + * stats-tube + * list-tubes + * list-tube-used + * list-tubes-watched + * pause-tube + +5. **Current tubes** in tubes + * tubes + +6. **Current jobs** in jobs + * urgent + * ready + * reserved + * delayed + * buried + +7. **Current connections** in connections + * written + * producers + * workers + * waiting + +8. **Binlog** in records/s + * written + * migrated + +9. **Uptime** in seconds + * uptime + +**Per tube statistics:** + +1. **Jobs rate** in jobs/s + * jobs + +2. **Jobs** in jobs + * using + * ready + * reserved + * delayed + * buried + +3. **Connections** in connections + * using + * waiting + * watching + +4. **Commands** in commands/s + * deletes + * pauses + +5. **Pause** in seconds + * since + * left + + +### configuration + +Sample: + +```yaml +host : '127.0.0.1' +port : 11300 +``` + +If no configuration is given, module will attempt to connect to beanstalkd on `127.0.0.1:11300` address + +--- diff --git a/collectors/python.d.plugin/beanstalk/beanstalk.chart.py b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py new file mode 100644 index 000000000..1472b4e1a --- /dev/null +++ b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py @@ -0,0 +1,247 @@ +# -*- coding: utf-8 -*- +# Description: beanstalk netdata python.d module +# Author: l2isbad +# SPDX-License-Identifier: GPL-3.0-or-later + +try: + import beanstalkc + BEANSTALKC = True +except ImportError: + BEANSTALKC = False + +from bases.FrameworkServices.SimpleService import SimpleService +from bases.loaders import safe_load + +# default module values (can be overridden per job in `config`) +# update_every = 2 +priority = 60000 +retries = 60 + +ORDER = ['cpu_usage', 'jobs_rate', 'connections_rate', 'commands_rate', 'current_tubes', 'current_jobs', + 'current_connections', 'binlog', 'uptime'] + +CHARTS = { + 'cpu_usage': { + 'options': [None, 'Cpu Usage', 'cpu time', 'server statistics', 'beanstalk.cpu_usage', 'area'], + 'lines': [ + ['rusage-utime', 'user', 'incremental'], + ['rusage-stime', 'system', 'incremental'] + ] + }, + 'jobs_rate': { + 'options': [None, 'Jobs Rate', 'jobs/s', 'server statistics', 'beanstalk.jobs_rate', 'line'], + 'lines': [ + ['total-jobs', 'total', 'incremental'], + ['job-timeouts', 'timeouts', 'incremental'] + ] + }, + 'connections_rate': { + 'options': [None, 'Connections Rate', 'connections/s', 'server statistics', 'beanstalk.connections_rate', + 'area'], + 'lines': [ + ['total-connections', 'connections', 'incremental'] + ] + }, + 'commands_rate': { + 'options': [None, 'Commands Rate', 'commands/s', 'server statistics', 'beanstalk.commands_rate', 'stacked'], + 'lines': [ + ['cmd-put', 'put', 'incremental'], + ['cmd-peek', 'peek', 'incremental'], + ['cmd-peek-ready', 'peek-ready', 'incremental'], + ['cmd-peek-delayed', 'peek-delayed', 'incremental'], + ['cmd-peek-buried', 'peek-buried', 'incremental'], + ['cmd-reserve', 'reserve', 'incremental'], + ['cmd-use', 'use', 'incremental'], + ['cmd-watch', 'watch', 'incremental'], + ['cmd-ignore', 'ignore', 'incremental'], + ['cmd-delete', 'delete', 'incremental'], + ['cmd-release', 'release', 'incremental'], + ['cmd-bury', 'bury', 'incremental'], + ['cmd-kick', 'kick', 'incremental'], + ['cmd-stats', 'stats', 'incremental'], + ['cmd-stats-job', 'stats-job', 'incremental'], + ['cmd-stats-tube', 'stats-tube', 'incremental'], + ['cmd-list-tubes', 'list-tubes', 'incremental'], + ['cmd-list-tube-used', 'list-tube-used', 'incremental'], + ['cmd-list-tubes-watched', 'list-tubes-watched', 'incremental'], + ['cmd-pause-tube', 'pause-tube', 'incremental'] + ] + }, + 'current_tubes': { + 'options': [None, 'Current Tubes', 'tubes', 'server statistics', 'beanstalk.current_tubes', 'area'], + 'lines': [ + ['current-tubes', 'tubes'] + ] + }, + 'current_jobs': { + 'options': [None, 'Current Jobs', 'jobs', 'server statistics', 'beanstalk.current_jobs', 'stacked'], + 'lines': [ + ['current-jobs-urgent', 'urgent'], + ['current-jobs-ready', 'ready'], + ['current-jobs-reserved', 'reserved'], + ['current-jobs-delayed', 'delayed'], + ['current-jobs-buried', 'buried'] + ] + }, + 'current_connections': { + 'options': [None, 'Current Connections', 'connections', 'server statistics', + 'beanstalk.current_connections', 'line'], + 'lines': [ + ['current-connections', 'written'], + ['current-producers', 'producers'], + ['current-workers', 'workers'], + ['current-waiting', 'waiting'] + ] + }, + 'binlog': { + 'options': [None, 'Binlog', 'records/s', 'server statistics', 'beanstalk.binlog', 'line'], + 'lines': [ + ['binlog-records-written', 'written', 'incremental'], + ['binlog-records-migrated', 'migrated', 'incremental'] + ] + }, + 'uptime': { + 'options': [None, 'Uptime', 'seconds', 'server statistics', 'beanstalk.uptime', 'line'], + 'lines': [ + ['uptime'], + ] + } +} + + +def tube_chart_template(name): + order = [ + '{0}_jobs_rate'.format(name), + '{0}_jobs'.format(name), + '{0}_connections'.format(name), + '{0}_commands'.format(name), + '{0}_pause'.format(name) + ] + family = 'tube {0}'.format(name) + + charts = { + order[0]: { + 'options': [None, 'Job Rate', 'jobs/s', family, 'beanstalk.jobs_rate', 'area'], + 'lines': [ + ['_'.join([name, 'total-jobs']), 'jobs', 'incremental'] + ] + }, + order[1]: { + 'options': [None, 'Jobs', 'jobs', family, 'beanstalk.jobs', 'stacked'], + 'lines': [ + ['_'.join([name, 'current-jobs-urgent']), 'urgent'], + ['_'.join([name, 'current-jobs-ready']), 'ready'], + ['_'.join([name, 'current-jobs-reserved']), 'reserved'], + ['_'.join([name, 'current-jobs-delayed']), 'delayed'], + ['_'.join([name, 'current-jobs-buried']), 'buried'] + ] + }, + order[2]: { + 'options': [None, 'Connections', 'connections', family, 'beanstalk.connections', 'stacked'], + 'lines': [ + ['_'.join([name, 'current-using']), 'using'], + ['_'.join([name, 'current-waiting']), 'waiting'], + ['_'.join([name, 'current-watching']), 'watching'] + ] + }, + order[3]: { + 'options': [None, 'Commands', 'commands/s', family, 'beanstalk.commands', 'stacked'], + 'lines': [ + ['_'.join([name, 'cmd-delete']), 'deletes', 'incremental'], + ['_'.join([name, 'cmd-pause-tube']), 'pauses', 'incremental'] + ] + }, + order[4]: { + 'options': [None, 'Pause', 'seconds', family, 'beanstalk.pause', 'stacked'], + 'lines': [ + ['_'.join([name, 'pause']), 'since'], + ['_'.join([name, 'pause-time-left']), 'left'] + ] + } + } + + return order, charts + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.configuration = configuration + self.order = list(ORDER) + self.definitions = dict(CHARTS) + self.conn = None + self.alive = True + + def check(self): + if not BEANSTALKC: + self.error("'beanstalkc' module is needed to use beanstalk.chart.py") + return False + + self.conn = self.connect() + + return True if self.conn else False + + def get_data(self): + """ + :return: dict + """ + if not self.is_alive(): + return None + + active_charts = self.charts.active_charts() + data = dict() + + try: + data.update(self.conn.stats()) + + for tube in self.conn.tubes(): + stats = self.conn.stats_tube(tube) + + if tube + '_jobs_rate' not in active_charts: + self.create_new_tube_charts(tube) + + for stat in stats: + data['_'.join([tube, stat])] = stats[stat] + + except beanstalkc.SocketError: + self.alive = False + return None + + return data or None + + def create_new_tube_charts(self, tube): + order, charts = tube_chart_template(tube) + + for chart_name in order: + params = [chart_name] + charts[chart_name]['options'] + dimensions = charts[chart_name]['lines'] + + new_chart = self.charts.add_chart(params) + for dimension in dimensions: + new_chart.add_dimension(dimension) + + def connect(self): + host = self.configuration.get('host', '127.0.0.1') + port = self.configuration.get('port', 11300) + timeout = self.configuration.get('timeout', 1) + try: + return beanstalkc.Connection(host=host, + port=port, + connect_timeout=timeout, + parse_yaml=safe_load) + except beanstalkc.SocketError as error: + self.error('Connection to {0}:{1} failed: {2}'.format(host, port, error)) + return None + + def reconnect(self): + try: + self.conn.reconnect() + self.alive = True + return True + except beanstalkc.SocketError: + return False + + def is_alive(self): + if not self.alive: + return self.reconnect() + return True diff --git a/collectors/python.d.plugin/beanstalk/beanstalk.conf b/collectors/python.d.plugin/beanstalk/beanstalk.conf new file mode 100644 index 000000000..940801877 --- /dev/null +++ b/collectors/python.d.plugin/beanstalk/beanstalk.conf @@ -0,0 +1,80 @@ +# netdata python.d.plugin configuration for beanstalk +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# chart_cleanup sets the default chart cleanup interval in iterations. +# A chart is marked as obsolete if it has not been updated +# 'chart_cleanup' iterations in a row. +# When a plugin sends the obsolete flag, the charts are not deleted +# from netdata immediately. +# They will be hidden immediately (not offered to dashboard viewer, +# streamed upstream and archived to backends) and deleted one hour +# later (configurable from netdata.conf). +# chart_cleanup: 10 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# chart_cleanup: 10 # the JOB's chart cleanup interval in iterations +# +# Additionally to the above, apache also supports the following: +# +# host: 'host' # Server ip address or hostname. Default: 127.0.0.1 +# port: port # Beanstalkd port. Default: +# +# ---------------------------------------------------------------------- diff --git a/collectors/python.d.plugin/bind_rndc/Makefile.inc b/collectors/python.d.plugin/bind_rndc/Makefile.inc new file mode 100644 index 000000000..72f391492 --- /dev/null +++ b/collectors/python.d.plugin/bind_rndc/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += bind_rndc/bind_rndc.chart.py +dist_pythonconfig_DATA += bind_rndc/bind_rndc.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += bind_rndc/README.md bind_rndc/Makefile.inc + diff --git a/collectors/python.d.plugin/bind_rndc/README.md b/collectors/python.d.plugin/bind_rndc/README.md new file mode 100644 index 000000000..688297ab3 --- /dev/null +++ b/collectors/python.d.plugin/bind_rndc/README.md @@ -0,0 +1,60 @@ +# bind_rndc + +Module parses bind dump file to collect real-time performance metrics + +**Requirements:** + * Version of bind must be 9.6 + + * Netdata must have permissions to run `rndc stats` + +It produces: + +1. **Name server statistics** + * requests + * responses + * success + * auth_answer + * nonauth_answer + * nxrrset + * failure + * nxdomain + * recursion + * duplicate + * rejections + +2. **Incoming queries** + * RESERVED0 + * A + * NS + * CNAME + * SOA + * PTR + * MX + * TXT + * X25 + * AAAA + * SRV + * NAPTR + * A6 + * DS + * RSIG + * DNSKEY + * SPF + * ANY + * DLV + +3. **Outgoing queries** + * Same as Incoming queries + + +### configuration + +Sample: + +```yaml +local: + named_stats_path : '/var/log/bind/named.stats' +``` + +If no configuration is given, module will attempt to read named.stats file at `/var/log/bind/named.stats` + +--- diff --git a/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py new file mode 100644 index 000000000..423232f65 --- /dev/null +++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py @@ -0,0 +1,240 @@ +# -*- coding: utf-8 -*- +# Description: bind rndc netdata python.d module +# Author: l2isbad +# SPDX-License-Identifier: GPL-3.0-or-later + +import os + +from collections import defaultdict +from subprocess import Popen + +from bases.collection import find_binary +from bases.FrameworkServices.SimpleService import SimpleService + +priority = 60000 +retries = 60 +update_every = 30 + +ORDER = ['name_server_statistics', 'incoming_queries', 'outgoing_queries', 'named_stats_size'] + +CHARTS = { + 'name_server_statistics': { + 'options': [None, 'Name Server Statistics', 'stats', 'name server statistics', + 'bind_rndc.name_server_statistics', 'line'], + 'lines': [ + ['nms_requests', 'requests', 'incremental'], + ['nms_rejected_queries', 'rejected_queries', 'incremental'], + ['nms_success', 'success', 'incremental'], + ['nms_failure', 'failure', 'incremental'], + ['nms_responses', 'responses', 'incremental'], + ['nms_duplicate', 'duplicate', 'incremental'], + ['nms_recursion', 'recursion', 'incremental'], + ['nms_nxrrset', 'nxrrset', 'incremental'], + ['nms_nxdomain', 'nxdomain', 'incremental'], + ['nms_non_auth_answer', 'non_auth_answer', 'incremental'], + ['nms_auth_answer', 'auth_answer', 'incremental'], + ['nms_dropped_queries', 'dropped_queries', 'incremental'], + ]}, + 'incoming_queries': { + 'options': [None, 'Incoming Queries', 'queries', 'incoming queries', 'bind_rndc.incoming_queries', 'line'], + 'lines': [ + ]}, + 'outgoing_queries': { + 'options': [None, 'Outgoing Queries', 'queries', 'outgoing queries', 'bind_rndc.outgoing_queries', 'line'], + 'lines': [ + ]}, + 'named_stats_size': { + 'options': [None, 'Named Stats File Size', 'MB', 'file size', 'bind_rndc.stats_size', 'line'], + 'lines': [ + ['stats_size', None, 'absolute', 1, 1 << 20] + ] + } +} + +NMS = { + 'nms_requests': [ + 'IPv4 requests received', + 'IPv6 requests received', + 'TCP requests received', + 'requests with EDNS(0) receive' + ], + 'nms_responses': [ + 'responses sent', + 'truncated responses sent', + 'responses with EDNS(0) sent', + 'requests with unsupported EDNS version received' + ], + 'nms_failure': [ + 'other query failures', + 'queries resulted in SERVFAIL' + ], + 'nms_auth_answer': ['queries resulted in authoritative answer'], + 'nms_non_auth_answer': ['queries resulted in non authoritative answer'], + 'nms_nxrrset': ['queries resulted in nxrrset'], + 'nms_success': ['queries resulted in successful answer'], + 'nms_nxdomain': ['queries resulted in NXDOMAIN'], + 'nms_recursion': ['queries caused recursion'], + 'nms_duplicate': ['duplicate queries received'], + 'nms_rejected_queries': [ + 'auth queries rejected', + 'recursive queries rejected' + ], + 'nms_dropped_queries': ['queries dropped'] +} + +STATS = ['Name Server Statistics', 'Incoming Queries', 'Outgoing Queries'] + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + self.named_stats_path = self.configuration.get('named_stats_path', '/var/log/bind/named.stats') + self.rndc = find_binary('rndc') + self.data = dict(nms_requests=0, nms_responses=0, nms_failure=0, nms_auth=0, + nms_non_auth=0, nms_nxrrset=0, nms_success=0, nms_nxdomain=0, + nms_recursion=0, nms_duplicate=0, nms_rejected_queries=0, + nms_dropped_queries=0) + + def check(self): + if not self.rndc: + self.error('Can\'t locate "rndc" binary or binary is not executable by netdata') + return False + + if not (os.path.isfile(self.named_stats_path) and os.access(self.named_stats_path, os.R_OK)): + self.error('Cannot access file %s' % self.named_stats_path) + return False + + run_rndc = Popen([self.rndc, 'stats'], shell=False) + run_rndc.wait() + + if not run_rndc.returncode: + return True + self.error('Not enough permissions to run "%s stats"' % self.rndc) + return False + + def _get_raw_data(self): + """ + Run 'rndc stats' and read last dump from named.stats + :return: dict + """ + result = dict() + try: + current_size = os.path.getsize(self.named_stats_path) + run_rndc = Popen([self.rndc, 'stats'], shell=False) + run_rndc.wait() + + if run_rndc.returncode: + return None + with open(self.named_stats_path) as named_stats: + named_stats.seek(current_size) + result['stats'] = named_stats.readlines() + result['size'] = current_size + return result + except (OSError, IOError): + return None + + def _get_data(self): + """ + Parse data from _get_raw_data() + :return: dict + """ + + raw_data = self._get_raw_data() + + if raw_data is None: + return None + parsed = dict() + for stat in STATS: + parsed[stat] = parse_stats(field=stat, + named_stats=raw_data['stats']) + + self.data.update(nms_mapper(data=parsed['Name Server Statistics'])) + + for elem in zip(['Incoming Queries', 'Outgoing Queries'], ['incoming_queries', 'outgoing_queries']): + parsed_key, chart_name = elem[0], elem[1] + for dimension_id, value in queries_mapper(data=parsed[parsed_key], + add=chart_name[:9]).items(): + + if dimension_id not in self.data: + dimension = dimension_id.replace(chart_name[:9], '') + if dimension_id not in self.charts[chart_name]: + self.charts[chart_name].add_dimension([dimension_id, dimension, 'incremental']) + + self.data[dimension_id] = value + + self.data['stats_size'] = raw_data['size'] + return self.data + + +def parse_stats(field, named_stats): + """ + :param field: str: + :param named_stats: list: + :return: dict + + Example: + filed: 'Incoming Queries' + names_stats (list of lines): + ++ Incoming Requests ++ + 1405660 QUERY + 3 NOTIFY + ++ Incoming Queries ++ + 1214961 A + 75 NS + 2 CNAME + 2897 SOA + 35544 PTR + 14 MX + 5822 TXT + 145974 AAAA + 371 SRV + ++ Outgoing Queries ++ + ... + + result: + {'A', 1214961, 'NS': 75, 'CNAME': 2, 'SOA': 2897, ...} + """ + data = dict() + ns = iter(named_stats) + for line in ns: + if field not in line: + continue + while True: + try: + line = next(ns) + except StopIteration: + break + if '++' not in line: + if '[' in line: + continue + v, k = line.strip().split(' ', 1) + if k not in data: + data[k] = 0 + data[k] += int(v) + continue + break + break + return data + + +def nms_mapper(data): + """ + :param data: dict + :return: dict(defaultdict) + """ + result = defaultdict(int) + for k, v in NMS.items(): + for elem in v: + result[k] += data.get(elem, 0) + return result + + +def queries_mapper(data, add): + """ + :param data: dict + :param add: str + :return: dict + """ + return dict([(add + k, v) for k, v in data.items()]) diff --git a/collectors/python.d.plugin/bind_rndc/bind_rndc.conf b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf new file mode 100644 index 000000000..71958ff98 --- /dev/null +++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf @@ -0,0 +1,112 @@ +# netdata python.d.plugin configuration for bind_rndc +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, bind_rndc also supports the following: +# +# named_stats_path: 'path to named.stats' # Default: '/var/log/bind/named.stats' +#------------------------------------------------------------------------------------------------------------------ +# IMPORTANT Information +# +# BIND APPEND logs at EVERY RUN. Its NOT RECOMMENDED to set update_every below 30 sec. +# STRONGLY RECOMMENDED to create a bind-rndc conf file for logrotate +# +# To set up your BIND to dump stats do the following: +# +# 1. add to 'named.conf.options' options {}: +# statistics-file "/var/log/bind/named.stats"; +# +# 2. Create bind/ directory in /var/log +# cd /var/log/ && mkdir bind +# +# 3. Change owner of directory to 'bind' user +# chown bind bind/ +# +# 4. RELOAD (NOT restart) BIND +# systemctl reload bind9.service +# +# 5. Run as a root 'rndc stats' to dump (BIND will create named.stats in new directory) +# +# +# To ALLOW NETDATA TO RUN 'rndc stats' change '/etc/bind/rndc.key' group to netdata +# chown :netdata rndc.key +# +# The last BUT NOT least is to create bind-rndc.conf in logrotate.d/ +# The working one +# /var/log/bind/named.stats { +# +# daily +# rotate 4 +# compress +# delaycompress +# create 0644 bind bind +# missingok +# postrotate +# rndc reload > /dev/null +# endscript +# } +# +# To test your logrotate conf file run as root: +# +# logrotate /etc/logrotate.d/bind-rndc -d (debug dry-run mode) +# +# ---------------------------------------------------------------------- diff --git a/collectors/python.d.plugin/boinc/Makefile.inc b/collectors/python.d.plugin/boinc/Makefile.inc new file mode 100644 index 000000000..319e19cfe --- /dev/null +++ b/collectors/python.d.plugin/boinc/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += boinc/boinc.chart.py +dist_pythonconfig_DATA += boinc/boinc.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += boinc/README.md boinc/Makefile.inc + diff --git a/collectors/python.d.plugin/boinc/README.md b/collectors/python.d.plugin/boinc/README.md new file mode 100644 index 000000000..595bcd3c0 --- /dev/null +++ b/collectors/python.d.plugin/boinc/README.md @@ -0,0 +1,28 @@ +# boinc + +This module monitors task counts for the Berkely Open Infrastructure +Networking Computing (BOINC) distributed computing client using the same +RPC interface that the BOINC monitoring GUI does. + +It provides charts tracking the total number of tasks and active tasks, +as well as ones tracking each of the possible states for tasks. + +### configuration + +BOINC requires use of a password to access it's RPC interface. You can +find this password in the `gui_rpc_auth.cfg` file in your BOINC directory. + +By default, the module will try to auto-detect the password by looking +in `/var/lib/boinc` for this file (this is the location most Linux +distributions use for a system-wide BOINC installation), so things may +just work without needing configuration for the local system. + +You can monitor remote systems as well: + +```yaml +remote: + hostname: some-host + password: some-password +``` + +--- diff --git a/collectors/python.d.plugin/boinc/boinc.chart.py b/collectors/python.d.plugin/boinc/boinc.chart.py new file mode 100644 index 000000000..d14754c4b --- /dev/null +++ b/collectors/python.d.plugin/boinc/boinc.chart.py @@ -0,0 +1,162 @@ +# -*- coding: utf-8 -*- +# Description: BOINC netdata python.d module +# Author: Austin S. Hemmelgarn (Ferroin) +# SPDX-License-Identifier: GPL-3.0-or-later + +import socket + +from bases.FrameworkServices.SimpleService import SimpleService + +from third_party import boinc_client + + +ORDER = ['tasks', 'states', 'sched_states', 'process_states'] + +CHARTS = { + 'tasks': { + 'options': [None, 'Overall Tasks', 'tasks', 'boinc', 'boinc.tasks', 'line'], + 'lines': [ + ['total', 'Total', 'absolute', 1, 1], + ['active', 'Active', 'absolute', 1, 1] + ] + }, + 'states': { + 'options': [None, 'Tasks per State', 'tasks', 'boinc', 'boinc.states', 'line'], + 'lines': [ + ['new', 'New', 'absolute', 1, 1], + ['downloading', 'Downloading', 'absolute', 1, 1], + ['downloaded', 'Ready to Run', 'absolute', 1, 1], + ['comperror', 'Compute Errors', 'absolute', 1, 1], + ['uploading', 'Uploading', 'absolute', 1, 1], + ['uploaded', 'Uploaded', 'absolute', 1, 1], + ['aborted', 'Aborted', 'absolute', 1, 1], + ['upload_failed', 'Failed Uploads', 'absolute', 1, 1] + ] + }, + 'sched_states': { + 'options': [None, 'Tasks per Scheduler State', 'tasks', 'boinc', 'boinc.sched', 'line'], + 'lines': [ + ['uninit_sched', 'Uninitialized', 'absolute', 1, 1], + ['preempted', 'Preempted', 'absolute', 1, 1], + ['scheduled', 'Scheduled', 'absolute', 1, 1] + ] + }, + 'process_states': { + 'options': [None, 'Tasks per Process State', 'tasks', 'boinc', 'boinc.process', 'line'], + 'lines': [ + ['uninit_proc', 'Uninitialized', 'absolute', 1, 1], + ['executing', 'Executing', 'absolute', 1, 1], + ['suspended', 'Suspended', 'absolute', 1, 1], + ['aborting', 'Aborted', 'absolute', 1, 1], + ['quit', 'Quit', 'absolute', 1, 1], + ['copy_pending', 'Copy Pending', 'absolute', 1, 1] + ] + } +} + +# A simple template used for pre-loading the return dictionary to make +# the _get_data() method simpler. +_DATA_TEMPLATE = { + 'total': 0, + 'active': 0, + 'new': 0, + 'downloading': 0, + 'downloaded': 0, + 'comperror': 0, + 'uploading': 0, + 'uploaded': 0, + 'aborted': 0, + 'upload_failed': 0, + 'uninit_sched': 0, + 'preempted': 0, + 'scheduled': 0, + 'uninit_proc': 0, + 'executing': 0, + 'suspended': 0, + 'aborting': 0, + 'quit': 0, + 'copy_pending': 0 +} + +# Map task states to dimensions +_TASK_MAP = { + boinc_client.ResultState.NEW: 'new', + boinc_client.ResultState.FILES_DOWNLOADING: 'downloading', + boinc_client.ResultState.FILES_DOWNLOADED: 'downloaded', + boinc_client.ResultState.COMPUTE_ERROR: 'comperror', + boinc_client.ResultState.FILES_UPLOADING: 'uploading', + boinc_client.ResultState.FILES_UPLOADED: 'uploaded', + boinc_client.ResultState.ABORTED: 'aborted', + boinc_client.ResultState.UPLOAD_FAILED: 'upload_failed' +} + +# Map scheduler states to dimensions +_SCHED_MAP = { + boinc_client.CpuSched.UNINITIALIZED: 'uninit_sched', + boinc_client.CpuSched.PREEMPTED: 'preempted', + boinc_client.CpuSched.SCHEDULED: 'scheduled', +} + +# Maps process states to dimensions +_PROC_MAP = { + boinc_client.Process.UNINITIALIZED: 'uninit_proc', + boinc_client.Process.EXECUTING: 'executing', + boinc_client.Process.SUSPENDED: 'suspended', + boinc_client.Process.ABORT_PENDING: 'aborted', + boinc_client.Process.QUIT_PENDING: 'quit', + boinc_client.Process.COPY_PENDING: 'copy_pending' +} + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + self.host = self.configuration.get('host', 'localhost') + self.port = self.configuration.get('port', 0) + self.password = self.configuration.get('password', '') + self.client = boinc_client.BoincClient(host=self.host, port=self.port, passwd=self.password) + self.alive = False + + def check(self): + return self.connect() + + def connect(self): + self.client.connect() + self.alive = self.client.connected and self.client.authorized + return self.alive + + def reconnect(self): + # The client class itself actually disconnects existing + # connections when it is told to connect, so we don't need to + # explicitly disconnect when we're just trying to reconnect. + return self.connect() + + def is_alive(self): + if not self.alive: + return self.reconnect() + return True + + def _get_data(self): + if not self.is_alive(): + return None + data = dict(_DATA_TEMPLATE) + results = [] + try: + results = self.client.get_tasks() + except socket.error: + self.error('Connection is dead') + self.alive = False + return None + for task in results: + data['total'] += 1 + data[_TASK_MAP[task.state]] += 1 + try: + if task.active_task: + data['active'] += 1 + data[_SCHED_MAP[task.scheduler_state]] += 1 + data[_PROC_MAP[task.active_task_state]] += 1 + except AttributeError: + pass + return data diff --git a/collectors/python.d.plugin/boinc/boinc.conf b/collectors/python.d.plugin/boinc/boinc.conf new file mode 100644 index 000000000..e59d2509d --- /dev/null +++ b/collectors/python.d.plugin/boinc/boinc.conf @@ -0,0 +1,68 @@ +# netdata python.d.plugin configuration for boinc +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, boinc also supports the following: +# +# hostname: localhost # The host running the BOINC client +# port: 31416 # The remote GUI RPC port for BOINC +# password: '' # The remote GUI RPC password diff --git a/collectors/python.d.plugin/ceph/Makefile.inc b/collectors/python.d.plugin/ceph/Makefile.inc new file mode 100644 index 000000000..15b039ef6 --- /dev/null +++ b/collectors/python.d.plugin/ceph/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += ceph/ceph.chart.py +dist_pythonconfig_DATA += ceph/ceph.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += ceph/README.md ceph/Makefile.inc + diff --git a/collectors/python.d.plugin/ceph/README.md b/collectors/python.d.plugin/ceph/README.md new file mode 100644 index 000000000..29dfe5d1d --- /dev/null +++ b/collectors/python.d.plugin/ceph/README.md @@ -0,0 +1,32 @@ +# ceph + +This module monitors the ceph cluster usage and consuption data of a server. + +It produces: + +* Cluster statistics (usage, available, latency, objects, read/write rate) +* OSD usage +* OSD latency +* Pool usage +* Pool read/write operations +* Pool read/write rate +* number of objects per pool + +**Requirements:** + +- `rados` python module +- Granting read permissions to ceph group from keyring file +```shell +# chmod 640 /etc/ceph/ceph.client.admin.keyring +``` + +### Configuration + +Sample: +```yaml +local: + config_file: '/etc/ceph/ceph.conf' + keyring_file: '/etc/ceph/ceph.client.admin.keyring' +``` + +--- diff --git a/collectors/python.d.plugin/ceph/ceph.chart.py b/collectors/python.d.plugin/ceph/ceph.chart.py new file mode 100644 index 000000000..31c764d0f --- /dev/null +++ b/collectors/python.d.plugin/ceph/ceph.chart.py @@ -0,0 +1,345 @@ +# -*- coding: utf-8 -*- +# Description: ceph netdata python.d module +# Author: Luis Eduardo (lets00) +# SPDX-License-Identifier: GPL-3.0-or-later + +try: + import rados + CEPH = True +except ImportError: + CEPH = False + +import os +import json +from bases.FrameworkServices.SimpleService import SimpleService + +# default module values (can be overridden per job in `config`) +update_every = 10 +priority = 60000 +retries = 60 + +ORDER = [ + 'general_usage', + 'general_objects', + 'general_bytes', + 'general_operations', + 'general_latency', + 'pool_usage', + 'pool_objects', + 'pool_read_bytes', + 'pool_write_bytes', + 'pool_read_operations', + 'pool_write_operations', + 'osd_usage', + 'osd_apply_latency', + 'osd_commit_latency' +] + +CHARTS = { + 'general_usage': { + 'options': [None, 'Ceph General Space', 'KB', 'general', 'ceph.general_usage', 'stacked'], + 'lines': [ + ['general_available', 'avail', 'absolute'], + ['general_usage', 'used', 'absolute'] + ] + }, + 'general_objects': { + 'options': [None, 'Ceph General Objects', 'objects', 'general', 'ceph.general_objects', 'area'], + 'lines': [ + ['general_objects', 'cluster', 'absolute'] + ] + }, + 'general_bytes': { + 'options': [None, 'Ceph General Read/Write Data/s', 'KB', 'general', 'ceph.general_bytes', + 'area'], + 'lines': [ + ['general_read_bytes', 'read', 'absolute', 1, 1024], + ['general_write_bytes', 'write', 'absolute', -1, 1024] + ] + }, + 'general_operations': { + 'options': [None, 'Ceph General Read/Write Operations/s', 'operations', 'general', 'ceph.general_operations', + 'area'], + 'lines': [ + ['general_read_operations', 'read', 'absolute', 1], + ['general_write_operations', 'write', 'absolute', -1] + ] + }, + 'general_latency': { + 'options': [None, 'Ceph General Apply/Commit latency', 'milliseconds', 'general', 'ceph.general_latency', + 'area'], + 'lines': [ + ['general_apply_latency', 'apply', 'absolute'], + ['general_commit_latency', 'commit', 'absolute'] + ] + }, + 'pool_usage': { + 'options': [None, 'Ceph Pools', 'KB', 'pool', 'ceph.pool_usage', 'line'], + 'lines': [] + }, + 'pool_objects': { + 'options': [None, 'Ceph Pools', 'objects', 'pool', 'ceph.pool_objects', 'line'], + 'lines': [] + }, + 'pool_read_bytes': { + 'options': [None, 'Ceph Read Pool Data/s', 'KB', 'pool', 'ceph.pool_read_bytes', 'area'], + 'lines': [] + }, + 'pool_write_bytes': { + 'options': [None, 'Ceph Write Pool Data/s', 'KB', 'pool', 'ceph.pool_write_bytes', 'area'], + 'lines': [] + }, + 'pool_read_operations': { + 'options': [None, 'Ceph Read Pool Operations/s', 'operations', 'pool', 'ceph.pool_read_operations', 'area'], + 'lines': [] + }, + 'pool_write_operations': { + 'options': [None, 'Ceph Write Pool Operations/s', 'operations', 'pool', 'ceph.pool_write_operations', 'area'], + 'lines': [] + }, + 'osd_usage': { + 'options': [None, 'Ceph OSDs', 'KB', 'osd', 'ceph.osd_usage', 'line'], + 'lines': [] + }, + 'osd_apply_latency': { + 'options': [None, 'Ceph OSDs apply latency', 'milliseconds', 'osd', 'ceph.apply_latency', 'line'], + 'lines': [] + }, + 'osd_commit_latency': { + 'options': [None, 'Ceph OSDs commit latency', 'milliseconds', 'osd', 'ceph.commit_latency', 'line'], + 'lines': [] + } + +} + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + self.config_file = self.configuration.get('config_file') + self.keyring_file = self.configuration.get('keyring_file') + + def check(self): + """ + Checks module + :return: + """ + if not CEPH: + self.error('rados module is needed to use ceph.chart.py') + return False + if not (self.config_file and self.keyring_file): + self.error('config_file and/or keyring_file is not defined') + return False + + # Verify files and permissions + if not (os.access(self.config_file, os.F_OK)): + self.error('{0} does not exist'.format(self.config_file)) + return False + if not (os.access(self.keyring_file, os.F_OK)): + self.error('{0} does not exist'.format(self.keyring_file)) + return False + if not (os.access(self.config_file, os.R_OK)): + self.error('Ceph plugin does not read {0}, define read permission.'.format(self.config_file)) + return False + if not (os.access(self.keyring_file, os.R_OK)): + self.error('Ceph plugin does not read {0}, define read permission.'.format(self.keyring_file)) + return False + try: + self.cluster = rados.Rados(conffile=self.config_file, + conf=dict(keyring=self.keyring_file)) + self.cluster.connect() + except rados.Error as error: + self.error(error) + return False + self.create_definitions() + return True + + def create_definitions(self): + """ + Create dynamically charts options + :return: None + """ + # Pool lines + for pool in sorted(self._get_df()['pools']): + self.definitions['pool_usage']['lines'].append([pool['name'], + pool['name'], + 'absolute']) + self.definitions['pool_objects']['lines'].append(["obj_{0}".format(pool['name']), + pool['name'], + 'absolute']) + self.definitions['pool_read_bytes']['lines'].append(['read_{0}'.format(pool['name']), + pool['name'], + 'absolute', 1, 1024]) + self.definitions['pool_write_bytes']['lines'].append(['write_{0}'.format(pool['name']), + pool['name'], + 'absolute', 1, 1024]) + self.definitions['pool_read_operations']['lines'].append(['read_operations_{0}'.format(pool['name']), + pool['name'], + 'absolute']) + self.definitions['pool_write_operations']['lines'].append(['write_operations_{0}'.format(pool['name']), + pool['name'], + 'absolute']) + + # OSD lines + for osd in sorted(self._get_osd_df()['nodes']): + self.definitions['osd_usage']['lines'].append([osd['name'], + osd['name'], + 'absolute']) + self.definitions['osd_apply_latency']['lines'].append(['apply_latency_{0}'.format(osd['name']), + osd['name'], + 'absolute']) + self.definitions['osd_commit_latency']['lines'].append(['commit_latency_{0}'.format(osd['name']), + osd['name'], + 'absolute']) + + def get_data(self): + """ + Catch all ceph data + :return: dict + """ + try: + data = {} + df = self._get_df() + osd_df = self._get_osd_df() + osd_perf = self._get_osd_perf() + pool_stats = self._get_osd_pool_stats() + data.update(self._get_general(osd_perf, pool_stats)) + for pool in df['pools']: + data.update(self._get_pool_usage(pool)) + data.update(self._get_pool_objects(pool)) + for pool_io in pool_stats: + data.update(self._get_pool_rw(pool_io)) + for osd in osd_df['nodes']: + data.update(self._get_osd_usage(osd)) + for osd_apply_commit in osd_perf['osd_perf_infos']: + data.update(self._get_osd_latency(osd_apply_commit)) + return data + except (ValueError, AttributeError) as error: + self.error(error) + return None + + def _get_general(self, osd_perf, pool_stats): + """ + Get ceph's general usage + :return: dict + """ + status = self.cluster.get_cluster_stats() + read_bytes_sec = 0 + write_bytes_sec = 0 + read_op_per_sec = 0 + write_op_per_sec = 0 + apply_latency = 0 + commit_latency = 0 + + for pool_rw_io_b in pool_stats: + read_bytes_sec += pool_rw_io_b['client_io_rate'].get('read_bytes_sec', 0) + write_bytes_sec += pool_rw_io_b['client_io_rate'].get('write_bytes_sec', 0) + read_op_per_sec += pool_rw_io_b['client_io_rate'].get('read_op_per_sec', 0) + write_op_per_sec += pool_rw_io_b['client_io_rate'].get('write_op_per_sec', 0) + for perf in osd_perf['osd_perf_infos']: + apply_latency += perf['perf_stats']['apply_latency_ms'] + commit_latency += perf['perf_stats']['commit_latency_ms'] + + return { + 'general_usage': int(status['kb_used']), + 'general_available': int(status['kb_avail']), + 'general_objects': int(status['num_objects']), + 'general_read_bytes': read_bytes_sec, + 'general_write_bytes': write_bytes_sec, + 'general_read_operations': read_op_per_sec, + 'general_write_operations': write_op_per_sec, + 'general_apply_latency': apply_latency, + 'general_commit_latency': commit_latency + } + + @staticmethod + def _get_pool_usage(pool): + """ + Process raw data into pool usage dict information + :return: A pool dict with pool name's key and usage bytes' value + """ + return {pool['name']: pool['stats']['kb_used']} + + @staticmethod + def _get_pool_objects(pool): + """ + Process raw data into pool usage dict information + :return: A pool dict with pool name's key and object numbers + """ + return {'obj_{0}'.format(pool['name']): pool['stats']['objects']} + + @staticmethod + def _get_pool_rw(pool): + """ + Get read/write kb and operations in a pool + :return: A pool dict with both read/write bytes and operations. + """ + return { + 'read_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_bytes_sec', 0)), + 'write_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_bytes_sec', 0)), + 'read_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_op_per_sec', 0)), + 'write_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_op_per_sec', 0)) + } + + @staticmethod + def _get_osd_usage(osd): + """ + Process raw data into osd dict information to get osd usage + :return: A osd dict with osd name's key and usage bytes' value + """ + return {osd['name']: float(osd['kb_used'])} + + @staticmethod + def _get_osd_latency(osd): + """ + Get ceph osd apply and commit latency + :return: A osd dict with osd name's key with both apply and commit latency values + """ + return { + 'apply_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['apply_latency_ms'], + 'commit_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['commit_latency_ms'] + } + + def _get_df(self): + """ + Get ceph df output + :return: ceph df --format json + """ + return json.loads(self.cluster.mon_command(json.dumps({ + 'prefix': 'df', + 'format': 'json' + }), '')[1]) + + def _get_osd_df(self): + """ + Get ceph osd df output + :return: ceph osd df --format json + """ + return json.loads(self.cluster.mon_command(json.dumps({ + 'prefix': 'osd df', + 'format': 'json' + }), '')[1]) + + def _get_osd_perf(self): + """ + Get ceph osd performance + :return: ceph osd perf --format json + """ + return json.loads(self.cluster.mon_command(json.dumps({ + 'prefix': 'osd perf', + 'format': 'json' + }), '')[1]) + + def _get_osd_pool_stats(self): + """ + Get ceph osd pool status. + This command is used to get information about both + read/write operation and bytes per second on each pool + :return: ceph osd pool stats --format json + """ + return json.loads(self.cluster.mon_command(json.dumps({ + 'prefix': 'osd pool stats', + 'format': 'json' + }), '')[1]) diff --git a/collectors/python.d.plugin/ceph/ceph.conf b/collectors/python.d.plugin/ceph/ceph.conf new file mode 100644 index 000000000..78ac1e251 --- /dev/null +++ b/collectors/python.d.plugin/ceph/ceph.conf @@ -0,0 +1,75 @@ +# netdata python.d.plugin configuration for ceph stats +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 10 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 10 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, ceph plugin also supports the following: +# +# config_file: 'config_file' # Ceph config file. +# keyring_file: 'keyring_file' # Ceph keyring file. netdata user must be added into ceph group +# # and keyring file must be read group permission. +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) +# +config_file: '/etc/ceph/ceph.conf' +keyring_file: '/etc/ceph/ceph.client.admin.keyring' + diff --git a/collectors/python.d.plugin/chrony/Makefile.inc b/collectors/python.d.plugin/chrony/Makefile.inc new file mode 100644 index 000000000..18a805b12 --- /dev/null +++ b/collectors/python.d.plugin/chrony/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += chrony/chrony.chart.py +dist_pythonconfig_DATA += chrony/chrony.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += chrony/README.md chrony/Makefile.inc + diff --git a/collectors/python.d.plugin/chrony/README.md b/collectors/python.d.plugin/chrony/README.md new file mode 100644 index 000000000..30636fe77 --- /dev/null +++ b/collectors/python.d.plugin/chrony/README.md @@ -0,0 +1,31 @@ +# chrony + +This module monitors the precision and statistics of a local chronyd server. + +It produces: + +* frequency +* last offset +* RMS offset +* residual freq +* root delay +* root dispersion +* skew +* system time + +**Requirements:** +Verify that user netdata can execute `chronyc tracking`. If necessary, update `/etc/chrony.conf`, `cmdallow`. + +### Configuration + +Sample: +```yaml +# data collection frequency: +update_every: 1 + +# chrony query command: +local: + command: 'chronyc -n tracking' +``` + +--- diff --git a/collectors/python.d.plugin/chrony/chrony.chart.py b/collectors/python.d.plugin/chrony/chrony.chart.py new file mode 100644 index 000000000..fd01d4e85 --- /dev/null +++ b/collectors/python.d.plugin/chrony/chrony.chart.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +# Description: chrony netdata python.d module +# Author: Dominik Schloesser (domschl) +# SPDX-License-Identifier: GPL-3.0-or-later + +from bases.FrameworkServices.ExecutableService import ExecutableService + +# default module values (can be overridden per job in `config`) +update_every = 5 +priority = 60000 +retries = 10 + +# charts order (can be overridden if you want less charts, or different order) +ORDER = ['system', 'offsets', 'stratum', 'root', 'frequency', 'residualfreq', 'skew'] + +CHARTS = { + 'system': { + 'options': [None, 'Chrony System Time Deltas', 'microseconds', 'system', 'chrony.system', 'area'], + 'lines': [ + ['timediff', 'system time', 'absolute', 1, 1000] + ] + }, + 'offsets': { + 'options': [None, 'Chrony System Time Offsets', 'microseconds', 'system', 'chrony.offsets', 'area'], + 'lines': [ + ['lastoffset', 'last offset', 'absolute', 1, 1000], + ['rmsoffset', 'RMS offset', 'absolute', 1, 1000] + ] + }, + 'stratum': { + 'options': [None, 'Chrony Stratum', 'stratum', 'root', 'chrony.stratum', 'line'], + 'lines': [ + ['stratum', None, 'absolute', 1, 1] + ] + }, + 'root': { + 'options': [None, 'Chrony Root Delays', 'milliseconds', 'root', 'chrony.root', 'line'], + 'lines': [ + ['rootdelay', 'delay', 'absolute', 1, 1000000], + ['rootdispersion', 'dispersion', 'absolute', 1, 1000000] + ] + }, + 'frequency': { + 'options': [None, 'Chrony Frequency', 'ppm', 'frequencies', 'chrony.frequency', 'area'], + 'lines': [ + ['frequency', None, 'absolute', 1, 1000] + ] + }, + 'residualfreq': { + 'options': [None, 'Chrony Residual frequency', 'ppm', 'frequencies', 'chrony.residualfreq', 'area'], + 'lines': [ + ['residualfreq', 'residual frequency', 'absolute', 1, 1000] + ] + }, + 'skew': { + 'options': [None, 'Chrony Skew, error bound on frequency', 'ppm', 'frequencies', 'chrony.skew', 'area'], + 'lines': [ + ['skew', None, 'absolute', 1, 1000] + ] + } +} + +CHRONY = [ + ('Frequency', 'frequency', 1e3), + ('Last offset', 'lastoffset', 1e9), + ('RMS offset', 'rmsoffset', 1e9), + ('Residual freq', 'residualfreq', 1e3), + ('Root delay', 'rootdelay', 1e9), + ('Root dispersion', 'rootdispersion', 1e9), + ('Skew', 'skew', 1e3), + ('Stratum', 'stratum', 1), + ('System time', 'timediff', 1e9) +] + + +class Service(ExecutableService): + def __init__(self, configuration=None, name=None): + ExecutableService.__init__( + self, configuration=configuration, name=name) + self.command = 'chronyc -n tracking' + self.order = ORDER + self.definitions = CHARTS + + def _get_data(self): + """ + Format data received from shell command + :return: dict + """ + raw_data = self._get_raw_data() + if not raw_data: + return None + + raw_data = (line.split(':', 1) for line in raw_data) + parsed, data = dict(), dict() + + for line in raw_data: + try: + key, value = (l.strip() for l in line) + except ValueError: + continue + if value: + parsed[key] = value.split()[0] + + for key, dim_id, multiplier in CHRONY: + try: + data[dim_id] = int(float(parsed[key]) * multiplier) + except (KeyError, ValueError): + continue + + return data or None diff --git a/collectors/python.d.plugin/chrony/chrony.conf b/collectors/python.d.plugin/chrony/chrony.conf new file mode 100644 index 000000000..9ac906b5f --- /dev/null +++ b/collectors/python.d.plugin/chrony/chrony.conf @@ -0,0 +1,79 @@ +# netdata python.d.plugin configuration for chrony +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +update_every: 5 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, chrony also supports the following: +# +# command: 'chrony tracking' # the command to run +# + +# ---------------------------------------------------------------------- +# REQUIRED chrony CONFIGURATION +# +# netdata will query chrony as user netdata. +# verify that user netdata is allowed to call 'chronyc tracking' +# Check cmdallow in chrony.conf +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS + +local: + command: 'chronyc -n tracking' diff --git a/collectors/python.d.plugin/couchdb/Makefile.inc b/collectors/python.d.plugin/couchdb/Makefile.inc new file mode 100644 index 000000000..89dfb51c7 --- /dev/null +++ b/collectors/python.d.plugin/couchdb/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += couchdb/couchdb.chart.py +dist_pythonconfig_DATA += couchdb/couchdb.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += couchdb/README.md couchdb/Makefile.inc + diff --git a/collectors/python.d.plugin/couchdb/README.md b/collectors/python.d.plugin/couchdb/README.md new file mode 100644 index 000000000..eff8c0810 --- /dev/null +++ b/collectors/python.d.plugin/couchdb/README.md @@ -0,0 +1,35 @@ +# couchdb + +This module monitors vital statistics of a local Apache CouchDB 2.x server, including: + +* Overall server reads/writes +* HTTP traffic breakdown + * Request methods (`GET`, `PUT`, `POST`, etc.) + * Response status codes (`200`, `201`, `4xx`, etc.) +* Active server tasks +* Replication status (CouchDB 2.1 and up only) +* Erlang VM stats +* Optional per-database statistics: sizes, # of docs, # of deleted docs + +### Configuration + +Sample for a local server running on port 5984: +```yaml +local: + user: 'admin' + pass: 'password' + node: 'couchdb@127.0.0.1' +``` + +Be sure to specify a correct admin-level username and password. + +You may also need to change the `node` name; this should match the value of `-name NODENAME` in your CouchDB's `etc/vm.args` file. Typically this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` / `couchdb@localhost` for a single-node server. + +If you want per-database statistics, these need to be added to the configuration, separated by spaces: +```yaml +local: + ... + databases: 'db1 db2 db3 ...' +``` + +--- diff --git a/collectors/python.d.plugin/couchdb/couchdb.chart.py b/collectors/python.d.plugin/couchdb/couchdb.chart.py new file mode 100644 index 000000000..5d6b9916f --- /dev/null +++ b/collectors/python.d.plugin/couchdb/couchdb.chart.py @@ -0,0 +1,411 @@ +# -*- coding: utf-8 -*- +# Description: couchdb netdata python.d module +# Author: wohali +# Thanks to l2isbad for good examples :) +# SPDX-License-Identifier: GPL-3.0-or-later + +from collections import namedtuple, defaultdict +from json import loads +from threading import Thread +from socket import gethostbyname, gaierror +try: + from queue import Queue +except ImportError: + from Queue import Queue + +from bases.FrameworkServices.UrlService import UrlService + +# default module values (can be overridden per job in `config`) +update_every = 1 +priority = 60000 +retries = 60 + +METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats']) + +OVERVIEW_STATS = [ + 'couchdb.database_reads.value', + 'couchdb.database_writes.value', + 'couchdb.httpd.view_reads.value', + 'couchdb.httpd_request_methods.COPY.value', + 'couchdb.httpd_request_methods.DELETE.value', + 'couchdb.httpd_request_methods.GET.value', + 'couchdb.httpd_request_methods.HEAD.value', + 'couchdb.httpd_request_methods.OPTIONS.value', + 'couchdb.httpd_request_methods.POST.value', + 'couchdb.httpd_request_methods.PUT.value', + 'couchdb.httpd_status_codes.200.value', + 'couchdb.httpd_status_codes.201.value', + 'couchdb.httpd_status_codes.202.value', + 'couchdb.httpd_status_codes.204.value', + 'couchdb.httpd_status_codes.206.value', + 'couchdb.httpd_status_codes.301.value', + 'couchdb.httpd_status_codes.302.value', + 'couchdb.httpd_status_codes.304.value', + 'couchdb.httpd_status_codes.400.value', + 'couchdb.httpd_status_codes.401.value', + 'couchdb.httpd_status_codes.403.value', + 'couchdb.httpd_status_codes.404.value', + 'couchdb.httpd_status_codes.405.value', + 'couchdb.httpd_status_codes.406.value', + 'couchdb.httpd_status_codes.409.value', + 'couchdb.httpd_status_codes.412.value', + 'couchdb.httpd_status_codes.413.value', + 'couchdb.httpd_status_codes.414.value', + 'couchdb.httpd_status_codes.415.value', + 'couchdb.httpd_status_codes.416.value', + 'couchdb.httpd_status_codes.417.value', + 'couchdb.httpd_status_codes.500.value', + 'couchdb.httpd_status_codes.501.value', + 'couchdb.open_os_files.value', + 'couch_replicator.jobs.running.value', + 'couch_replicator.jobs.pending.value', + 'couch_replicator.jobs.crashed.value', +] + +SYSTEM_STATS = [ + 'context_switches', + 'run_queue', + 'ets_table_count', + 'reductions', + 'memory.atom', + 'memory.atom_used', + 'memory.binary', + 'memory.code', + 'memory.ets', + 'memory.other', + 'memory.processes', + 'io_input', + 'io_output', + 'os_proc_count', + 'process_count', + 'internal_replication_jobs' +] + +DB_STATS = [ + 'doc_count', + 'doc_del_count', + 'sizes.file', + 'sizes.external', + 'sizes.active' +] + +ORDER = [ + 'activity', + 'request_methods', + 'response_codes', + 'active_tasks', + 'replicator_jobs', + 'open_files', + 'db_sizes_file', + 'db_sizes_external', + 'db_sizes_active', + 'db_doc_counts', + 'db_doc_del_counts', + 'erlang_memory', + 'erlang_proc_counts', + 'erlang_peak_msg_queue', + 'erlang_reductions' +] + +CHARTS = { + 'activity': { + 'options': [None, 'Overall Activity', 'req/s', + 'dbactivity', 'couchdb.activity', 'stacked'], + 'lines': [ + ['couchdb_database_reads', 'DB reads', 'incremental'], + ['couchdb_database_writes', 'DB writes', 'incremental'], + ['couchdb_httpd_view_reads', 'View reads', 'incremental'] + ] + }, + 'request_methods': { + 'options': [None, 'HTTP request methods', 'req/s', + 'httptraffic', 'couchdb.request_methods', + 'stacked'], + 'lines': [ + ['couchdb_httpd_request_methods_COPY', 'COPY', 'incremental'], + ['couchdb_httpd_request_methods_DELETE', 'DELETE', 'incremental'], + ['couchdb_httpd_request_methods_GET', 'GET', 'incremental'], + ['couchdb_httpd_request_methods_HEAD', 'HEAD', 'incremental'], + ['couchdb_httpd_request_methods_OPTIONS', 'OPTIONS', + 'incremental'], + ['couchdb_httpd_request_methods_POST', 'POST', 'incremental'], + ['couchdb_httpd_request_methods_PUT', 'PUT', 'incremental'] + ] + }, + 'response_codes': { + 'options': [None, 'HTTP response status codes', 'resp/s', + 'httptraffic', 'couchdb.response_codes', + 'stacked'], + 'lines': [ + ['couchdb_httpd_status_codes_200', '200 OK', 'incremental'], + ['couchdb_httpd_status_codes_201', '201 Created', 'incremental'], + ['couchdb_httpd_status_codes_202', '202 Accepted', 'incremental'], + ['couchdb_httpd_status_codes_2xx', 'Other 2xx Success', + 'incremental'], + ['couchdb_httpd_status_codes_3xx', '3xx Redirection', + 'incremental'], + ['couchdb_httpd_status_codes_4xx', '4xx Client error', + 'incremental'], + ['couchdb_httpd_status_codes_5xx', '5xx Server error', + 'incremental'] + ] + }, + 'open_files': { + 'options': [None, 'Open files', 'files', + 'ops', 'couchdb.open_files', 'line'], + 'lines': [ + ['couchdb_open_os_files', '# files', 'absolute'] + ] + }, + 'active_tasks': { + 'options': [None, 'Active task breakdown', 'tasks', + 'ops', 'couchdb.active_tasks', 'stacked'], + 'lines': [ + ['activetasks_indexer', 'Indexer', 'absolute'], + ['activetasks_database_compaction', 'DB Compaction', 'absolute'], + ['activetasks_replication', 'Replication', 'absolute'], + ['activetasks_view_compaction', 'View Compaction', 'absolute'] + ] + }, + 'replicator_jobs': { + 'options': [None, 'Replicator job breakdown', 'jobs', + 'ops', 'couchdb.replicator_jobs', 'stacked'], + 'lines': [ + ['couch_replicator_jobs_running', 'Running', 'absolute'], + ['couch_replicator_jobs_pending', 'Pending', 'absolute'], + ['couch_replicator_jobs_crashed', 'Crashed', 'absolute'], + ['internal_replication_jobs', 'Internal replication jobs', + 'absolute'] + ] + }, + 'erlang_memory': { + 'options': [None, 'Erlang VM memory usage', 'bytes', + 'erlang', 'couchdb.erlang_vm_memory', 'stacked'], + 'lines': [ + ['memory_atom', 'atom', 'absolute'], + ['memory_binary', 'binaries', 'absolute'], + ['memory_code', 'code', 'absolute'], + ['memory_ets', 'ets', 'absolute'], + ['memory_processes', 'procs', 'absolute'], + ['memory_other', 'other', 'absolute'] + ] + }, + 'erlang_reductions': { + 'options': [None, 'Erlang reductions', 'count', + 'erlang', 'couchdb.reductions', 'line'], + 'lines': [ + ['reductions', 'reductions', 'incremental'] + ] + }, + 'erlang_proc_counts': { + 'options': [None, 'Process counts', 'count', + 'erlang', 'couchdb.proccounts', 'line'], + 'lines': [ + ['os_proc_count', 'OS procs', 'absolute'], + ['process_count', 'erl procs', 'absolute'] + ] + }, + 'erlang_peak_msg_queue': { + 'options': [None, 'Peak message queue size', 'count', + 'erlang', 'couchdb.peakmsgqueue', + 'line'], + 'lines': [ + ['peak_msg_queue', 'peak size', 'absolute'] + ] + }, + # Lines for the following are added as part of check() + 'db_sizes_file': { + 'options': [None, 'Database sizes (file)', 'KB', + 'perdbstats', 'couchdb.db_sizes_file', 'line'], + 'lines': [] + }, + 'db_sizes_external': { + 'options': [None, 'Database sizes (external)', 'KB', + 'perdbstats', 'couchdb.db_sizes_external', 'line'], + 'lines': [] + }, + 'db_sizes_active': { + 'options': [None, 'Database sizes (active)', 'KB', + 'perdbstats', 'couchdb.db_sizes_active', 'line'], + 'lines': [] + }, + 'db_doc_counts': { + 'options': [None, 'Database # of docs', 'docs', + 'perdbstats', 'couchdb_db_doc_count', 'line'], + 'lines': [] + }, + 'db_doc_del_counts': { + 'options': [None, 'Database # of deleted docs', 'docs', + 'perdbstats', 'couchdb_db_doc_del_count', 'line'], + 'lines': [] + } +} + + +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + self.host = self.configuration.get('host', '127.0.0.1') + self.port = self.configuration.get('port', 5984) + self.node = self.configuration.get('node', 'couchdb@127.0.0.1') + self.scheme = self.configuration.get('scheme', 'http') + self.user = self.configuration.get('user') + self.password = self.configuration.get('pass') + try: + self.dbs = self.configuration.get('databases').split(' ') + except (KeyError, AttributeError): + self.dbs = [] + + def check(self): + if not (self.host and self.port): + self.error('Host is not defined in the module configuration file') + return False + try: + self.host = gethostbyname(self.host) + except gaierror as error: + self.error(str(error)) + return False + self.url = '{scheme}://{host}:{port}'.format(scheme=self.scheme, + host=self.host, + port=self.port) + stats = self.url + '/_node/{node}/_stats'.format(node=self.node) + active_tasks = self.url + '/_active_tasks' + system = self.url + '/_node/{node}/_system'.format(node=self.node) + self.methods = [METHODS(get_data=self._get_overview_stats, + url=stats, + stats=OVERVIEW_STATS), + METHODS(get_data=self._get_active_tasks_stats, + url=active_tasks, + stats=None), + METHODS(get_data=self._get_overview_stats, + url=system, + stats=SYSTEM_STATS), + METHODS(get_data=self._get_dbs_stats, + url=self.url, + stats=DB_STATS)] + # must initialise manager before using _get_raw_data + self._manager = self._build_manager() + self.dbs = [db for db in self.dbs + if self._get_raw_data(self.url + '/' + db)] + for db in self.dbs: + self.definitions['db_sizes_file']['lines'].append( + ['db_'+db+'_sizes_file', db, 'absolute', 1, 1000] + ) + self.definitions['db_sizes_external']['lines'].append( + ['db_'+db+'_sizes_external', db, 'absolute', 1, 1000] + ) + self.definitions['db_sizes_active']['lines'].append( + ['db_'+db+'_sizes_active', db, 'absolute', 1, 1000] + ) + self.definitions['db_doc_counts']['lines'].append( + ['db_'+db+'_doc_count', db, 'absolute'] + ) + self.definitions['db_doc_del_counts']['lines'].append( + ['db_'+db+'_doc_del_count', db, 'absolute'] + ) + return UrlService.check(self) + + def _get_data(self): + threads = list() + queue = Queue() + result = dict() + + for method in self.methods: + th = Thread(target=method.get_data, + args=(queue, method.url, method.stats)) + th.start() + threads.append(th) + + for thread in threads: + thread.join() + result.update(queue.get()) + + # self.info('couchdb result = ' + str(result)) + return result or None + + def _get_overview_stats(self, queue, url, stats): + raw_data = self._get_raw_data(url) + if not raw_data: + return queue.put(dict()) + data = loads(raw_data) + to_netdata = self._fetch_data(raw_data=data, metrics=stats) + if 'message_queues' in data: + to_netdata['peak_msg_queue'] = get_peak_msg_queue(data) + return queue.put(to_netdata) + + def _get_active_tasks_stats(self, queue, url, _): + taskdict = defaultdict(int) + taskdict["activetasks_indexer"] = 0 + taskdict["activetasks_database_compaction"] = 0 + taskdict["activetasks_replication"] = 0 + taskdict["activetasks_view_compaction"] = 0 + raw_data = self._get_raw_data(url) + if not raw_data: + return queue.put(dict()) + data = loads(raw_data) + for task in data: + taskdict["activetasks_" + task["type"]] += 1 + return queue.put(dict(taskdict)) + + def _get_dbs_stats(self, queue, url, stats): + to_netdata = {} + for db in self.dbs: + raw_data = self._get_raw_data(url + '/' + db) + if not raw_data: + continue + data = loads(raw_data) + for metric in stats: + value = data + metrics_list = metric.split('.') + try: + for m in metrics_list: + value = value[m] + except KeyError as e: + self.debug('cannot process ' + metric + ' for ' + db + + ": " + str(e)) + continue + metric_name = 'db_{0}_{1}'.format(db, '_'.join(metrics_list)) + to_netdata[metric_name] = value + return queue.put(to_netdata) + + def _fetch_data(self, raw_data, metrics): + data = dict() + for metric in metrics: + value = raw_data + metrics_list = metric.split('.') + try: + for m in metrics_list: + value = value[m] + except KeyError as e: + self.debug('cannot process ' + metric + ': ' + str(e)) + continue + # strip off .value from end of stat + if metrics_list[-1] == 'value': + metrics_list = metrics_list[:-1] + # sum up 3xx/4xx/5xx + if metrics_list[0:2] == ['couchdb', 'httpd_status_codes'] and \ + int(metrics_list[2]) > 202: + metrics_list[2] = '{0}xx'.format(int(metrics_list[2]) // 100) + if '_'.join(metrics_list) in data: + data['_'.join(metrics_list)] += value + else: + data['_'.join(metrics_list)] = value + else: + data['_'.join(metrics_list)] = value + return data + + +def get_peak_msg_queue(data): + maxsize = 0 + queues = data['message_queues'] + for queue in iter(queues.values()): + if isinstance(queue, dict) and 'count' in queue: + value = queue['count'] + elif isinstance(queue, int): + value = queue + else: + continue + maxsize = max(maxsize, value) + return maxsize diff --git a/collectors/python.d.plugin/couchdb/couchdb.conf b/collectors/python.d.plugin/couchdb/couchdb.conf new file mode 100644 index 000000000..5f6e75cff --- /dev/null +++ b/collectors/python.d.plugin/couchdb/couchdb.conf @@ -0,0 +1,91 @@ +# netdata python.d.plugin configuration for couchdb +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# By default, CouchDB only updates its stats every 10 seconds. +update_every: 10 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, the couchdb plugin also supports the following: +# +# host: 'ipaddress' # Server ip address or hostname. Default: 127.0.0.1 +# port: 'port' # CouchDB port. Default: 15672 +# scheme: 'scheme' # http or https. Default: http +# node: 'couchdb@127.0.0.1' # CouchDB node name. Same as -name vm.args argument. +# +# if the URL is password protected, the following are supported: +# +# user: 'username' +# pass: 'password' +# +# if db-specific stats are desired, place their names in databases: +# databases: 'npm-registry animaldb' +# +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) +# +localhost: + name: 'local' + host: '127.0.0.1' + port: '5984' + node: 'couchdb@127.0.0.1' + scheme: 'http' +# user: 'admin' +# pass: 'password' diff --git a/collectors/python.d.plugin/cpufreq/Makefile.inc b/collectors/python.d.plugin/cpufreq/Makefile.inc new file mode 100644 index 000000000..d6138801d --- /dev/null +++ b/collectors/python.d.plugin/cpufreq/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += cpufreq/cpufreq.chart.py +dist_pythonconfig_DATA += cpufreq/cpufreq.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += cpufreq/README.md cpufreq/Makefile.inc + diff --git a/collectors/python.d.plugin/cpufreq/README.md b/collectors/python.d.plugin/cpufreq/README.md new file mode 100644 index 000000000..33891d59d --- /dev/null +++ b/collectors/python.d.plugin/cpufreq/README.md @@ -0,0 +1,30 @@ +# cpufreq + +This module shows the current CPU frequency as set by the cpufreq kernel +module. + +**Requirement:** +You need to have `CONFIG_CPU_FREQ` and (optionally) `CONFIG_CPU_FREQ_STAT` +enabled in your kernel. + +This module tries to read from one of two possible locations. On +initialization, it tries to read the `time_in_state` files provided by +cpufreq\_stats. If this file does not exist, or doesn't contain valid data, it +falls back to using the more inaccurate `scaling_cur_freq` file (which only +represents the **current** CPU frequency, and doesn't account for any state +changes which happen between updates). + +It produces one chart with multiple lines (one line per core). + +### configuration + +Sample: + +```yaml +sys_dir: "/sys/devices" +``` + +If no configuration is given, module will search for cpufreq files in `/sys/devices` directory. +Directory is also prefixed with `NETDATA_HOST_PREFIX` if specified. + +--- diff --git a/collectors/python.d.plugin/cpufreq/cpufreq.chart.py b/collectors/python.d.plugin/cpufreq/cpufreq.chart.py new file mode 100644 index 000000000..cbbab6d7f --- /dev/null +++ b/collectors/python.d.plugin/cpufreq/cpufreq.chart.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- +# Description: cpufreq netdata python.d module +# Author: Pawel Krupa (paulfantom) +# Author: Steven Noonan (tycho) +# SPDX-License-Identifier: GPL-3.0-or-later + +import glob +import os + +from bases.FrameworkServices.SimpleService import SimpleService + +# default module values (can be overridden per job in `config`) +# update_every = 2 + +ORDER = ['cpufreq'] + +CHARTS = { + 'cpufreq': { + 'options': [None, 'CPU Clock', 'MHz', 'cpufreq', 'cpufreq.cpufreq', 'line'], + 'lines': [ + # lines are created dynamically in `check()` method + ] + } +} + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + prefix = os.getenv('NETDATA_HOST_PREFIX', "") + if prefix.endswith('/'): + prefix = prefix[:-1] + self.sys_dir = prefix + "/sys/devices" + SimpleService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + self.fake_name = 'cpu' + self.assignment = {} + self.accurate_exists = True + self.accurate_last = {} + + def _get_data(self): + data = {} + + if self.accurate_exists: + accurate_ok = True + + for name, paths in self.assignment.items(): + last = self.accurate_last[name] + + current = {} + deltas = {} + ticks_since_last = 0 + + for line in open(paths['accurate'], 'r'): + line = list(map(int, line.split())) + current[line[0]] = line[1] + ticks = line[1] - last.get(line[0], 0) + ticks_since_last += ticks + deltas[line[0]] = line[1] - last.get(line[0], 0) + + avg_freq = 0 + if ticks_since_last != 0: + for frequency, ticks in deltas.items(): + avg_freq += frequency * ticks + avg_freq /= ticks_since_last + + data[name] = avg_freq + self.accurate_last[name] = current + if avg_freq == 0 or ticks_since_last == 0: + # Delta is either too large or nonexistent, fall back to + # less accurate reading. This can happen if we switch + # to/from the 'schedutil' governor, which doesn't report + # stats. + accurate_ok = False + + if accurate_ok: + return data + + for name, paths in self.assignment.items(): + data[name] = open(paths['inaccurate'], 'r').read() + + return data + + def check(self): + try: + self.sys_dir = str(self.configuration['sys_dir']) + except (KeyError, TypeError): + self.error("No path specified. Using: '" + self.sys_dir + "'") + + for path in glob.glob(self.sys_dir + '/system/cpu/cpu*/cpufreq/stats/time_in_state'): + path_elem = path.split('/') + cpu = path_elem[-4] + if cpu not in self.assignment: + self.assignment[cpu] = {} + self.assignment[cpu]['accurate'] = path + self.accurate_last[cpu] = {} + + if not self.assignment: + self.accurate_exists = False + + for path in glob.glob(self.sys_dir + '/system/cpu/cpu*/cpufreq/scaling_cur_freq'): + path_elem = path.split('/') + cpu = path_elem[-3] + if cpu not in self.assignment: + self.assignment[cpu] = {} + self.assignment[cpu]['inaccurate'] = path + + if not self.assignment: + self.error("couldn't find a method to read cpufreq statistics") + return False + + for name in sorted(self.assignment, key=lambda v: int(v[3:])): + self.definitions[ORDER[0]]['lines'].append([name, name, 'absolute', 1, 1000]) + + return True diff --git a/collectors/python.d.plugin/cpufreq/cpufreq.conf b/collectors/python.d.plugin/cpufreq/cpufreq.conf new file mode 100644 index 000000000..0890245d9 --- /dev/null +++ b/collectors/python.d.plugin/cpufreq/cpufreq.conf @@ -0,0 +1,43 @@ +# netdata python.d.plugin configuration for cpufreq +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# The directory to search for the file scaling_cur_freq +sys_dir: "/sys/devices" diff --git a/collectors/python.d.plugin/cpuidle/Makefile.inc b/collectors/python.d.plugin/cpuidle/Makefile.inc new file mode 100644 index 000000000..66c47d3cf --- /dev/null +++ b/collectors/python.d.plugin/cpuidle/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += cpuidle/cpuidle.chart.py +dist_pythonconfig_DATA += cpuidle/cpuidle.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += cpuidle/README.md cpuidle/Makefile.inc + diff --git a/collectors/python.d.plugin/cpuidle/README.md b/collectors/python.d.plugin/cpuidle/README.md new file mode 100644 index 000000000..495169638 --- /dev/null +++ b/collectors/python.d.plugin/cpuidle/README.md @@ -0,0 +1,11 @@ +# cpuidle + +This module monitors the usage of CPU idle states. + +**Requirement:** +Your kernel needs to have `CONFIG_CPU_IDLE` enabled. + +It produces one stacked chart per CPU, showing the percentage of time spent in +each state. + +--- diff --git a/collectors/python.d.plugin/cpuidle/cpuidle.chart.py b/collectors/python.d.plugin/cpuidle/cpuidle.chart.py new file mode 100644 index 000000000..feac025bf --- /dev/null +++ b/collectors/python.d.plugin/cpuidle/cpuidle.chart.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- +# Description: cpuidle netdata python.d module +# Author: Steven Noonan (tycho) +# SPDX-License-Identifier: GPL-3.0-or-later + +import ctypes +import glob +import os +import platform + +from bases.FrameworkServices.SimpleService import SimpleService + +syscall = ctypes.CDLL('libc.so.6').syscall + +# default module values (can be overridden per job in `config`) +# update_every = 2 + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + prefix = os.getenv('NETDATA_HOST_PREFIX', "") + if prefix.endswith('/'): + prefix = prefix[:-1] + self.sys_dir = prefix + "/sys/devices/system/cpu" + self.schedstat_path = prefix + "/proc/schedstat" + SimpleService.__init__(self, configuration=configuration, name=name) + self.order = [] + self.definitions = {} + self.fake_name = 'cpu' + self.assignment = {} + self.last_schedstat = None + + @staticmethod + def __gettid(): + # This is horrendous. We need the *thread id* (not the *process id*), + # but there's no Python standard library way of doing that. If you need + # to enable this module on a non-x86 machine type, you'll have to find + # the Linux syscall number for gettid() and add it to the dictionary + # below. + syscalls = { + 'i386': 224, + 'x86_64': 186, + } + if platform.machine() not in syscalls: + return None + tid = syscall(syscalls[platform.machine()]) + return tid + + def __wake_cpus(self, cpus): + # Requires Python 3.3+. This will "tickle" each CPU to force it to + # update its idle counters. + if hasattr(os, 'sched_setaffinity'): + pid = self.__gettid() + save_affinity = os.sched_getaffinity(pid) + for idx in cpus: + os.sched_setaffinity(pid, [idx]) + os.sched_getaffinity(pid) + os.sched_setaffinity(pid, save_affinity) + + def __read_schedstat(self): + cpus = {} + for line in open(self.schedstat_path, 'r'): + if not line.startswith('cpu'): + continue + line = line.rstrip().split() + cpu = line[0] + active_time = line[7] + cpus[cpu] = int(active_time) // 1000 + return cpus + + def _get_data(self): + results = {} + + # Use the kernel scheduler stats to determine how much time was spent + # in C0 (active). + schedstat = self.__read_schedstat() + + # Determine if any of the CPUs are idle. If they are, then we need to + # tickle them in order to update their C-state residency statistics. + if self.last_schedstat is None: + needs_tickle = list(self.assignment.keys()) + else: + needs_tickle = [] + for cpu, active_time in self.last_schedstat.items(): + delta = schedstat[cpu] - active_time + if delta < 1: + needs_tickle.append(cpu) + + if needs_tickle: + # This line is critical for the stats to update. If we don't "tickle" + # idle CPUs, then the counters for those CPUs stop counting. + self.__wake_cpus([int(cpu[3:]) for cpu in needs_tickle]) + + # Re-read schedstat now that we've tickled any idlers. + schedstat = self.__read_schedstat() + + self.last_schedstat = schedstat + + for cpu, metrics in self.assignment.items(): + update_time = schedstat[cpu] + results[cpu + '_active_time'] = update_time + + for metric, path in metrics.items(): + residency = int(open(path, 'r').read()) + results[metric] = residency + + return results + + def check(self): + if self.__gettid() is None: + self.error('Cannot get thread ID. Stats would be completely broken.') + return False + + for path in sorted(glob.glob(self.sys_dir + '/cpu*/cpuidle/state*/name')): + # ['', 'sys', 'devices', 'system', 'cpu', 'cpu0', 'cpuidle', 'state3', 'name'] + path_elem = path.split('/') + cpu = path_elem[-4] + state = path_elem[-2] + statename = open(path, 'rt').read().rstrip() + + orderid = '%s_cpuidle' % (cpu,) + if orderid not in self.definitions: + self.order.append(orderid) + active_name = '%s_active_time' % (cpu,) + self.definitions[orderid] = { + 'options': [None, 'C-state residency', 'time%', 'cpuidle', 'cpuidle.cpuidle', 'stacked'], + 'lines': [ + [active_name, 'C0 (active)', 'percentage-of-incremental-row', 1, 1], + ], + } + self.assignment[cpu] = {} + + defid = '%s_%s_time' % (orderid, state) + + self.definitions[orderid]['lines'].append( + [defid, statename, 'percentage-of-incremental-row', 1, 1] + ) + + self.assignment[cpu][defid] = '/'.join(path_elem[:-1] + ['time']) + + # Sort order by kernel-specified CPU index + self.order.sort(key=lambda x: int(x.split('_')[0][3:])) + + if not self.definitions: + self.error("couldn't find cstate stats") + return False + + return True diff --git a/collectors/python.d.plugin/cpuidle/cpuidle.conf b/collectors/python.d.plugin/cpuidle/cpuidle.conf new file mode 100644 index 000000000..bc276fcd2 --- /dev/null +++ b/collectors/python.d.plugin/cpuidle/cpuidle.conf @@ -0,0 +1,40 @@ +# netdata python.d.plugin configuration for cpuidle +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 diff --git a/collectors/python.d.plugin/dns_query_time/Makefile.inc b/collectors/python.d.plugin/dns_query_time/Makefile.inc new file mode 100644 index 000000000..7eca3e0b6 --- /dev/null +++ b/collectors/python.d.plugin/dns_query_time/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += dns_query_time/dns_query_time.chart.py +dist_pythonconfig_DATA += dns_query_time/dns_query_time.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += dns_query_time/README.md dns_query_time/Makefile.inc + diff --git a/collectors/python.d.plugin/dns_query_time/README.md b/collectors/python.d.plugin/dns_query_time/README.md new file mode 100644 index 000000000..3703e8aaf --- /dev/null +++ b/collectors/python.d.plugin/dns_query_time/README.md @@ -0,0 +1,10 @@ +# dns_query_time + +This module provides DNS query time statistics. + +**Requirement:** +* `python-dnspython` package + +It produces one aggregate chart or one chart per DNS server, showing the query time. + +--- diff --git a/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py new file mode 100644 index 000000000..d3c3db788 --- /dev/null +++ b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- +# Description: dns_query_time netdata python.d module +# Author: l2isbad +# SPDX-License-Identifier: GPL-3.0-or-later + +from random import choice +from socket import getaddrinfo, gaierror +from threading import Thread + +try: + from time import monotonic as time +except ImportError: + from time import time + +try: + import dns.message + import dns.query + import dns.name + DNS_PYTHON = True +except ImportError: + DNS_PYTHON = False + +try: + from queue import Queue +except ImportError: + from Queue import Queue + +from bases.FrameworkServices.SimpleService import SimpleService + + +# default module values (can be overridden per job in `config`) +update_every = 5 +priority = 60000 +retries = 60 + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.order = list() + self.definitions = dict() + self.timeout = self.configuration.get('response_timeout', 4) + self.aggregate = self.configuration.get('aggregate', True) + self.domains = self.configuration.get('domains') + self.server_list = self.configuration.get('dns_servers') + + def check(self): + if not DNS_PYTHON: + self.error('\'python-dnspython\' package is needed to use dns_query_time.chart.py') + return False + + self.timeout = self.timeout if isinstance(self.timeout, int) else 4 + + if not all([self.domains, self.server_list, + isinstance(self.server_list, str), isinstance(self.domains, str)]): + self.error('server_list and domain_list can\'t be empty') + return False + else: + self.domains, self.server_list = self.domains.split(), self.server_list.split() + + for ns in self.server_list: + if not check_ns(ns): + self.info('Bad NS: %s' % ns) + self.server_list.remove(ns) + if not self.server_list: + return False + + data = self._get_data(timeout=1) + + down_servers = [s for s in data if data[s] == -100] + for down in down_servers: + down = down[3:].replace('_', '.') + self.info('Removed due to non response %s' % down) + self.server_list.remove(down) + if not self.server_list: + return False + + self.order, self.definitions = create_charts(aggregate=self.aggregate, server_list=self.server_list) + return True + + def _get_data(self, timeout=None): + return dns_request(self.server_list, timeout or self.timeout, self.domains) + + +def dns_request(server_list, timeout, domains): + threads = list() + que = Queue() + result = dict() + + def dns_req(ns, t, q): + domain = dns.name.from_text(choice(domains)) + request = dns.message.make_query(domain, dns.rdatatype.A) + + try: + dns_start = time() + dns.query.udp(request, ns, timeout=t) + dns_end = time() + query_time = round((dns_end - dns_start) * 1000) + q.put({'_'.join(['ns', ns.replace('.', '_')]): query_time}) + except dns.exception.Timeout: + q.put({'_'.join(['ns', ns.replace('.', '_')]): -100}) + + for server in server_list: + th = Thread(target=dns_req, args=(server, timeout, que)) + th.start() + threads.append(th) + + for th in threads: + th.join() + result.update(que.get()) + + return result + + +def check_ns(ns): + try: + return getaddrinfo(ns, 'domain')[0][4][0] + except gaierror: + return False + + +def create_charts(aggregate, server_list): + if aggregate: + order = ['dns_group'] + definitions = { + 'dns_group': { + 'options': [None, 'DNS Response Time', 'ms', 'name servers', 'dns_query_time.response_time', 'line'], + 'lines': [] + } + } + for ns in server_list: + definitions['dns_group']['lines'].append(['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute']) + + return order, definitions + else: + order = [''.join(['dns_', ns.replace('.', '_')]) for ns in server_list] + definitions = dict() + for ns in server_list: + definitions[''.join(['dns_', ns.replace('.', '_')])] = { + 'options': [None, 'DNS Response Time', 'ms', ns, 'dns_query_time.response_time', 'area'], + 'lines': [ + ['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute'] + ] + } + return order, definitions diff --git a/collectors/python.d.plugin/dns_query_time/dns_query_time.conf b/collectors/python.d.plugin/dns_query_time/dns_query_time.conf new file mode 100644 index 000000000..d32c6db83 --- /dev/null +++ b/collectors/python.d.plugin/dns_query_time/dns_query_time.conf @@ -0,0 +1,71 @@ +# netdata python.d.plugin configuration for dns_query_time +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, dns_query_time also supports the following: +# +# dns_servers: 'dns servers' # List of dns servers to query +# domains: 'domains' # List of domains +# aggregate: yes/no # Aggregate all servers in one chart or not +# response_timeout: 4 # Dns query response timeout (query = -100 if response time > response_time) +# +# ---------------------------------------------------------------------- \ No newline at end of file diff --git a/collectors/python.d.plugin/dnsdist/Makefile.inc b/collectors/python.d.plugin/dnsdist/Makefile.inc new file mode 100644 index 000000000..a53f518f0 --- /dev/null +++ b/collectors/python.d.plugin/dnsdist/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += dnsdist/dnsdist.chart.py +dist_pythonconfig_DATA += dnsdist/dnsdist.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += dnsdist/README.md dnsdist/Makefile.inc + diff --git a/collectors/python.d.plugin/dnsdist/README.md b/collectors/python.d.plugin/dnsdist/README.md new file mode 100644 index 000000000..b646ae27c --- /dev/null +++ b/collectors/python.d.plugin/dnsdist/README.md @@ -0,0 +1,54 @@ +# dnsdist + +Module monitor dnsdist performance and health metrics. + +Following charts are drawn: + +1. **Response latency** + * latency-slow + * latency100-1000 + * latency50-100 + * latency10-50 + * latency1-10 + * latency0-1 + +2. **Cache performance** + * cache-hits + * cache-misses + +3. **ACL events** + * acl-drops + * rule-drop + * rule-nxdomain + * rule-refused + +4. **Noncompliant data** + * empty-queries + * no-policy + * noncompliant-queries + * noncompliant-responses + +5. **Queries** + * queries + * rdqueries + * rdqueries + +6. **Health** + * downstream-send-errors + * downstream-timeouts + * servfail-responses + * trunc-failures + +### configuration + +```yaml +localhost: + name : 'local' + url : 'http://127.0.0.1:5053/jsonstat?command=stats' + user : 'username' + pass : 'password' + header: + X-API-Key: 'dnsdist-api-key' +``` + +--- diff --git a/collectors/python.d.plugin/dnsdist/dnsdist.chart.py b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py new file mode 100644 index 000000000..1aff3f803 --- /dev/null +++ b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-3.0-or-later + +from json import loads + +from bases.FrameworkServices.UrlService import UrlService + + +ORDER = [ + 'queries', + 'queries_dropped', + 'packets_dropped', + 'answers', + 'backend_responses', + 'backend_commerrors', + 'backend_errors', + 'cache', + 'servercpu', + 'servermem', + 'query_latency', + 'query_latency_avg' +] + + +CHARTS = { + 'queries': { + 'options': [None, 'Client queries received', 'queries/s', 'queries', 'dnsdist.queries', 'line'], + 'lines': [ + ['queries', 'all', 'incremental'], + ['rdqueries', 'recursive', 'incremental'], + ['empty-queries', 'empty', 'incremental'] + ] + }, + 'queries_dropped': { + 'options': [None, 'Client queries dropped', 'queries/s', 'queries', 'dnsdist.queries_dropped', 'line'], + 'lines': [ + ['rule-drop', 'rule drop', 'incremental'], + ['dyn-blocked', 'dynamic block', 'incremental'], + ['no-policy', 'no policy', 'incremental'], + ['noncompliant-queries', 'non compliant', 'incremental'] + ] + }, + 'packets_dropped': { + 'options': [None, 'Packets dropped', 'packets/s', 'packets', 'dnsdist.packets_dropped', 'line'], + 'lines': [ + ['acl-drops', 'acl', 'incremental'] + ] + }, + 'answers': { + 'options': [None, 'Answers statistics', 'answers/s', 'answers', 'dnsdist.answers', 'line'], + 'lines': [ + ['self-answered', 'self answered', 'incremental'], + ['rule-nxdomain', 'nxdomain', 'incremental', -1], + ['rule-refused', 'refused', 'incremental', -1], + ['trunc-failures', 'trunc failures', 'incremental', -1] + ] + }, + 'backend_responses': { + 'options': [None, 'Backend responses', 'responses/s', 'backends', 'dnsdist.backend_responses', 'line'], + 'lines': [ + ['responses', 'responses', 'incremental'] + ] + }, + 'backend_commerrors': { + 'options': [None, 'Backend Communication Errors', 'errors/s', 'backends', 'dnsdist.backend_commerrors', 'line'], + 'lines': [ + ['downstream-send-errors', 'send errors', 'incremental'] + ] + }, + 'backend_errors': { + 'options': [None, 'Backend error responses', 'responses/s', 'backends', 'dnsdist.backend_errors', 'line'], + 'lines': [ + ['downstream-timeouts', 'timeout', 'incremental'], + ['servfail-responses', 'servfail', 'incremental'], + ['noncompliant-responses', 'non compliant', 'incremental'] + ] + }, + 'cache': { + 'options': [None, 'Cache performance', 'answers/s', 'cache', 'dnsdist.cache', 'area'], + 'lines': [ + ['cache-hits', 'hits', 'incremental'], + ['cache-misses', 'misses', 'incremental', -1] + ] + }, + 'servercpu': { + 'options': [None, 'DNSDIST server CPU utilization', 'ms/s', 'server', 'dnsdist.servercpu', 'stacked'], + 'lines': [ + ['cpu-sys-msec', 'system state', 'incremental'], + ['cpu-user-msec', 'user state', 'incremental'] + ] + }, + 'servermem': { + 'options': [None, 'DNSDIST server memory utilization', 'MB', 'server', 'dnsdist.servermem', 'area'], + 'lines': [ + ['real-memory-usage', 'memory usage', 'absolute', 1, 1048576] + ] + }, + 'query_latency': { + 'options': [None, 'Query latency', 'queries/s', 'latency', 'dnsdist.query_latency', 'stacked'], + 'lines': [ + ['latency0-1', '1ms', 'incremental'], + ['latency1-10', '10ms', 'incremental'], + ['latency10-50', '50ms', 'incremental'], + ['latency50-100', '100ms', 'incremental'], + ['latency100-1000', '1sec', 'incremental'], + ['latency-slow', 'slow', 'incremental'] + ] + }, + 'query_latency_avg': { + 'options': [None, 'Average latency for the last N queries', 'ms/query', 'latency', + 'dnsdist.query_latency_avg', 'line'], + 'lines': [ + ['latency-avg100', '100', 'absolute'], + ['latency-avg1000', '1k', 'absolute'], + ['latency-avg10000', '10k', 'absolute'], + ['latency-avg1000000', '1000k', 'absolute'] + ] + } +} + + +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + + def _get_data(self): + data = self._get_raw_data() + if not data: + return None + + return loads(data) diff --git a/collectors/python.d.plugin/dnsdist/dnsdist.conf b/collectors/python.d.plugin/dnsdist/dnsdist.conf new file mode 100644 index 000000000..aec58b8e1 --- /dev/null +++ b/collectors/python.d.plugin/dnsdist/dnsdist.conf @@ -0,0 +1,85 @@ +# netdata python.d.plugin configuration for dnsdist +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +#update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +#retries: 600000 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +#autodetection_retry: 1 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# +# Additionally to the above, dnsdist also supports the following: +# +# url: 'URL' # the URL to fetch dnsdist performance statistics +# user: 'username' # username for basic auth +# pass: 'password' # password for basic auth +# header: +# X-API-Key: 'Key' # API key +# +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +# localhost: +# name : 'local' +# url : 'http://127.0.0.1:5053/jsonstat?command=stats' +# user : 'username' +# pass : 'password' +# header: +# X-API-Key: 'dnsdist-api-key' + + diff --git a/collectors/python.d.plugin/dockerd/Makefile.inc b/collectors/python.d.plugin/dockerd/Makefile.inc new file mode 100644 index 000000000..b100bc6a1 --- /dev/null +++ b/collectors/python.d.plugin/dockerd/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += dockerd/dockerd.chart.py +dist_pythonconfig_DATA += dockerd/dockerd.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += dockerd/README.md dockerd/Makefile.inc + diff --git a/collectors/python.d.plugin/dockerd/README.md b/collectors/python.d.plugin/dockerd/README.md new file mode 100644 index 000000000..d3f603808 --- /dev/null +++ b/collectors/python.d.plugin/dockerd/README.md @@ -0,0 +1,26 @@ +# dockerd + +Module monitor docker health metrics. + +**Requirement:** +* `docker` package + +Following charts are drawn: + +1. **running containers** + * count + +2. **healthy containers** + * count + +3. **unhealthy containers** + * count + +### configuration + +```yaml + update_every : 1 + priority : 60000 + ``` + +--- diff --git a/collectors/python.d.plugin/dockerd/dockerd.chart.py b/collectors/python.d.plugin/dockerd/dockerd.chart.py new file mode 100644 index 000000000..a0d3d7e65 --- /dev/null +++ b/collectors/python.d.plugin/dockerd/dockerd.chart.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Description: docker netdata python.d module +# Author: Kévin Darcel (@tuxity) + +try: + import docker + HAS_DOCKER = True +except ImportError: + HAS_DOCKER = False + +from bases.FrameworkServices.SimpleService import SimpleService + +# default module values (can be overridden per job in `config`) +# update_every = 1 +priority = 60000 +retries = 60 + +# charts order (can be overridden if you want less charts, or different order) +ORDER = [ + 'running_containers', + 'healthy_containers', + 'unhealthy_containers' +] + +CHARTS = { + 'running_containers': { + 'options': [None, 'Number of running containers', 'running containers', 'running containers', + 'docker.running_containers', 'line'], + 'lines': [ + ['running_containers', 'running'] + ] + }, + 'healthy_containers': { + 'options': [None, 'Number of healthy containers', 'healthy containers', 'healthy containers', + 'docker.healthy_containers', 'line'], + 'lines': [ + ['healthy_containers', 'healthy'] + ] + }, + 'unhealthy_containers': { + 'options': [None, 'Number of unhealthy containers', 'unhealthy containers', 'unhealthy containers', + 'docker.unhealthy_containers', 'line'], + 'lines': [ + ['unhealthy_containers', 'unhealthy'] + ] + } +} + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + + def check(self): + if not HAS_DOCKER: + self.error('\'docker\' package is needed to use docker.chart.py') + return False + + self.client = docker.DockerClient(base_url=self.configuration.get('url', 'unix://var/run/docker.sock')) + + try: + self.client.ping() + except docker.errors.APIError as error: + self.error(error) + return False + + return True + + def get_data(self): + data = dict() + data['running_containers'] = len(self.client.containers.list(sparse=True)) + data['healthy_containers'] = len(self.client.containers.list(filters={'health': 'healthy'}, sparse=True)) + data['unhealthy_containers'] = len(self.client.containers.list(filters={'health': 'unhealthy'}, sparse=True)) + + return data or None diff --git a/collectors/python.d.plugin/dockerd/dockerd.conf b/collectors/python.d.plugin/dockerd/dockerd.conf new file mode 100644 index 000000000..5ef17a1f5 --- /dev/null +++ b/collectors/python.d.plugin/dockerd/dockerd.conf @@ -0,0 +1,79 @@ +# netdata python.d.plugin configuration for dockerd health data API +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 10 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, dockerd plugin also supports the following: +# +# url: '://:/' +# # http://localhost:8080/health +# +# if the URL is password protected, the following are supported: +# +# user: 'username' +# pass: 'password' +# +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) +# +local: + url: 'unix://var/run/docker.sock' diff --git a/collectors/python.d.plugin/dovecot/Makefile.inc b/collectors/python.d.plugin/dovecot/Makefile.inc new file mode 100644 index 000000000..fd7d13bbb --- /dev/null +++ b/collectors/python.d.plugin/dovecot/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += dovecot/dovecot.chart.py +dist_pythonconfig_DATA += dovecot/dovecot.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += dovecot/README.md dovecot/Makefile.inc + diff --git a/collectors/python.d.plugin/dovecot/README.md b/collectors/python.d.plugin/dovecot/README.md new file mode 100644 index 000000000..50950ecc1 --- /dev/null +++ b/collectors/python.d.plugin/dovecot/README.md @@ -0,0 +1,73 @@ +# dovecot + +This module provides statistics information from Dovecot server. +Statistics are taken from dovecot socket by executing `EXPORT global` command. +More information about dovecot stats can be found on [project wiki page.](http://wiki2.dovecot.org/Statistics) + +**Requirement:** +Dovecot UNIX socket with R/W permissions for user netdata or Dovecot with configured TCP/IP socket. + +Module gives information with following charts: + +1. **sessions** + * active sessions + +2. **logins** + * logins + +3. **commands** - number of IMAP commands + * commands + +4. **Faults** + * minor + * major + +5. **Context Switches** + * volountary + * involountary + +6. **disk** in bytes/s + * read + * write + +7. **bytes** in bytes/s + * read + * write + +8. **number of syscalls** in syscalls/s + * read + * write + +9. **lookups** - number of lookups per second + * path + * attr + +10. **hits** - number of cache hits + * hits + +11. **attempts** - authorization attempts + * success + * failure + +12. **cache** - cached authorization hits + * hit + * miss + +### configuration + +Sample: + +```yaml +localtcpip: + name : 'local' + host : '127.0.0.1' + port : 24242 + +localsocket: + name : 'local' + socket : '/var/run/dovecot/stats' +``` + +If no configuration is given, module will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats` + +--- diff --git a/collectors/python.d.plugin/dovecot/dovecot.chart.py b/collectors/python.d.plugin/dovecot/dovecot.chart.py new file mode 100644 index 000000000..7fee3bfac --- /dev/null +++ b/collectors/python.d.plugin/dovecot/dovecot.chart.py @@ -0,0 +1,147 @@ +# -*- coding: utf-8 -*- +# Description: dovecot netdata python.d module +# Author: Pawel Krupa (paulfantom) +# SPDX-License-Identifier: GPL-3.0-or-later + +from bases.FrameworkServices.SocketService import SocketService + +# default module values (can be overridden per job in `config`) +# update_every = 2 +priority = 60000 +retries = 60 + +# charts order (can be overridden if you want less charts, or different order) +ORDER = [ + 'sessions', + 'logins', + 'commands', + 'faults', + 'context_switches', + 'io', + 'net', + 'syscalls', + 'lookup', + 'cache', + 'auth', + 'auth_cache' +] + +CHARTS = { + 'sessions': { + 'options': [None, 'Dovecot Active Sessions', 'number', 'sessions', 'dovecot.sessions', 'line'], + 'lines': [ + ['num_connected_sessions', 'active sessions', 'absolute'] + ] + }, + 'logins': { + 'options': [None, 'Dovecot Logins', 'number', 'logins', 'dovecot.logins', 'line'], + 'lines': [ + ['num_logins', 'logins', 'absolute'] + ] + }, + 'commands': { + 'options': [None, 'Dovecot Commands', 'commands', 'commands', 'dovecot.commands', 'line'], + 'lines': [ + ['num_cmds', 'commands', 'absolute'] + ] + }, + 'faults': { + 'options': [None, 'Dovecot Page Faults', 'faults', 'page faults', 'dovecot.faults', 'line'], + 'lines': [ + ['min_faults', 'minor', 'absolute'], + ['maj_faults', 'major', 'absolute'] + ] + }, + 'context_switches': { + 'options': [None, 'Dovecot Context Switches', '', 'context switches', 'dovecot.context_switches', 'line'], + 'lines': [ + ['vol_cs', 'voluntary', 'absolute'], + ['invol_cs', 'involuntary', 'absolute'] + ] + }, + 'io': { + 'options': [None, 'Dovecot Disk I/O', 'kilobytes/s', 'disk', 'dovecot.io', 'area'], + 'lines': [ + ['disk_input', 'read', 'incremental', 1, 1024], + ['disk_output', 'write', 'incremental', -1, 1024] + ] + }, + 'net': { + 'options': [None, 'Dovecot Network Bandwidth', 'kilobits/s', 'network', 'dovecot.net', 'area'], + 'lines': [ + ['read_bytes', 'read', 'incremental', 8, 1024], + ['write_bytes', 'write', 'incremental', -8, 1024] + ] + }, + 'syscalls': { + 'options': [None, 'Dovecot Number of SysCalls', 'syscalls/s', 'system', 'dovecot.syscalls', 'line'], + 'lines': [ + ['read_count', 'read', 'incremental'], + ['write_count', 'write', 'incremental'] + ] + }, + 'lookup': { + 'options': [None, 'Dovecot Lookups', 'number/s', 'lookups', 'dovecot.lookup', 'stacked'], + 'lines': [ + ['mail_lookup_path', 'path', 'incremental'], + ['mail_lookup_attr', 'attr', 'incremental'] + ] + }, + 'cache': { + 'options': [None, 'Dovecot Cache Hits', 'hits/s', 'cache', 'dovecot.cache', 'line'], + 'lines': [ + ['mail_cache_hits', 'hits', 'incremental'] + ] + }, + 'auth': { + 'options': [None, 'Dovecot Authentications', 'attempts', 'logins', 'dovecot.auth', 'stacked'], + 'lines': [ + ['auth_successes', 'ok', 'absolute'], + ['auth_failures', 'failed', 'absolute'] + ] + }, + 'auth_cache': { + 'options': [None, 'Dovecot Authentication Cache', 'number', 'cache', 'dovecot.auth_cache', 'stacked'], + 'lines': [ + ['auth_cache_hits', 'hit', 'absolute'], + ['auth_cache_misses', 'miss', 'absolute'] + ] + } +} + + +class Service(SocketService): + def __init__(self, configuration=None, name=None): + SocketService.__init__(self, configuration=configuration, name=name) + self.request = 'EXPORT\tglobal\r\n' + self.host = None # localhost + self.port = None # 24242 + # self._keep_alive = True + self.unix_socket = '/var/run/dovecot/stats' + self.order = ORDER + self.definitions = CHARTS + + def _get_data(self): + """ + Format data received from socket + :return: dict + """ + try: + raw = self._get_raw_data() + except (ValueError, AttributeError): + return None + + if raw is None: + self.debug('dovecot returned no data') + return None + + data = raw.split('\n')[:2] + desc = data[0].split('\t') + vals = data[1].split('\t') + ret = dict() + for i, _ in enumerate(desc): + try: + ret[str(desc[i])] = int(vals[i]) + except ValueError: + continue + return ret or None diff --git a/collectors/python.d.plugin/dovecot/dovecot.conf b/collectors/python.d.plugin/dovecot/dovecot.conf new file mode 100644 index 000000000..56c394991 --- /dev/null +++ b/collectors/python.d.plugin/dovecot/dovecot.conf @@ -0,0 +1,96 @@ +# netdata python.d.plugin configuration for dovecot +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, dovecot also supports the following: +# +# socket: 'path/to/dovecot/stats' +# +# or +# host: 'IP or HOSTNAME' # the host to connect to +# port: PORT # the port to connect to +# +# + +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +localhost: + name : 'local' + host : 'localhost' + port : 24242 + +localipv4: + name : 'local' + host : '127.0.0.1' + port : 24242 + +localipv6: + name : 'local' + host : '::1' + port : 24242 + +localsocket: + name : 'local' + socket : '/var/run/dovecot/stats' + diff --git a/collectors/python.d.plugin/elasticsearch/Makefile.inc b/collectors/python.d.plugin/elasticsearch/Makefile.inc new file mode 100644 index 000000000..15c63c2fa --- /dev/null +++ b/collectors/python.d.plugin/elasticsearch/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += elasticsearch/elasticsearch.chart.py +dist_pythonconfig_DATA += elasticsearch/elasticsearch.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += elasticsearch/README.md elasticsearch/Makefile.inc + diff --git a/collectors/python.d.plugin/elasticsearch/README.md b/collectors/python.d.plugin/elasticsearch/README.md new file mode 100644 index 000000000..75e17015b --- /dev/null +++ b/collectors/python.d.plugin/elasticsearch/README.md @@ -0,0 +1,60 @@ +# elasticsearch + +This module monitors Elasticsearch performance and health metrics. + +It produces: + +1. **Search performance** charts: + * Number of queries, fetches + * Time spent on queries, fetches + * Query and fetch latency + +2. **Indexing performance** charts: + * Number of documents indexed, index refreshes, flushes + * Time spent on indexing, refreshing, flushing + * Indexing and flushing latency + +3. **Memory usage and garbace collection** charts: + * JVM heap currently in use, committed + * Count of garbage collections + * Time spent on garbage collections + +4. **Host metrics** charts: + * Available file descriptors in percent + * Opened HTTP connections + * Cluster communication transport metrics + +5. **Queues and rejections** charts: + * Number of queued/rejected threads in thread pool + +6. **Fielddata cache** charts: + * Fielddata cache size + * Fielddata evictions and circuit breaker tripped count + +7. **Cluster health API** charts: + * Cluster status + * Nodes and tasks statistics + * Shards statistics + +8. **Cluster stats API** charts: + * Nodes statistics + * Query cache statistics + * Docs statistics + * Store statistics + * Indices and shards statistics + +### configuration + +Sample: + +```yaml +local: + host : 'ipaddress' # Server ip address or hostname + port : 'password' # Port on which elasticsearch listed + cluster_health : True/False # Calls to cluster health elasticsearch API. Enabled by default. + cluster_stats : True/False # Calls to cluster stats elasticsearch API. Enabled by default. +``` + +If no configuration is given, module will fail to run. + +--- diff --git a/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py new file mode 100644 index 000000000..3f431f6e0 --- /dev/null +++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py @@ -0,0 +1,644 @@ +# -*- coding: utf-8 -*- +# Description: elastic search node stats netdata python.d module +# Author: l2isbad +# SPDX-License-Identifier: GPL-3.0-or-later + +import json +import threading + +from collections import namedtuple +from socket import gethostbyname, gaierror + +try: + from queue import Queue +except ImportError: + from Queue import Queue + +from bases.FrameworkServices.UrlService import UrlService + +# default module values (can be overridden per job in `config`) +update_every = 5 + +METHODS = namedtuple('METHODS', ['get_data', 'url', 'run']) + +NODE_STATS = [ + 'indices.search.fetch_current', + 'indices.search.fetch_total', + 'indices.search.query_current', + 'indices.search.query_total', + 'indices.search.query_time_in_millis', + 'indices.search.fetch_time_in_millis', + 'indices.indexing.index_total', + 'indices.indexing.index_current', + 'indices.indexing.index_time_in_millis', + 'indices.refresh.total', + 'indices.refresh.total_time_in_millis', + 'indices.flush.total', + 'indices.flush.total_time_in_millis', + 'indices.translog.operations', + 'indices.translog.size_in_bytes', + 'indices.translog.uncommitted_operations', + 'indices.translog.uncommitted_size_in_bytes', + 'indices.segments.count', + 'indices.segments.terms_memory_in_bytes', + 'indices.segments.stored_fields_memory_in_bytes', + 'indices.segments.term_vectors_memory_in_bytes', + 'indices.segments.norms_memory_in_bytes', + 'indices.segments.points_memory_in_bytes', + 'indices.segments.doc_values_memory_in_bytes', + 'indices.segments.index_writer_memory_in_bytes', + 'indices.segments.version_map_memory_in_bytes', + 'indices.segments.fixed_bit_set_memory_in_bytes', + 'jvm.gc.collectors.young.collection_count', + 'jvm.gc.collectors.old.collection_count', + 'jvm.gc.collectors.young.collection_time_in_millis', + 'jvm.gc.collectors.old.collection_time_in_millis', + 'jvm.mem.heap_used_percent', + 'jvm.mem.heap_used_in_bytes', + 'jvm.mem.heap_committed_in_bytes', + 'jvm.buffer_pools.direct.count', + 'jvm.buffer_pools.direct.used_in_bytes', + 'jvm.buffer_pools.direct.total_capacity_in_bytes', + 'jvm.buffer_pools.mapped.count', + 'jvm.buffer_pools.mapped.used_in_bytes', + 'jvm.buffer_pools.mapped.total_capacity_in_bytes', + 'thread_pool.bulk.queue', + 'thread_pool.bulk.rejected', + 'thread_pool.write.queue', + 'thread_pool.write.rejected', + 'thread_pool.index.queue', + 'thread_pool.index.rejected', + 'thread_pool.search.queue', + 'thread_pool.search.rejected', + 'thread_pool.merge.queue', + 'thread_pool.merge.rejected', + 'indices.fielddata.memory_size_in_bytes', + 'indices.fielddata.evictions', + 'breakers.fielddata.tripped', + 'http.current_open', + 'transport.rx_size_in_bytes', + 'transport.tx_size_in_bytes', + 'process.max_file_descriptors', + 'process.open_file_descriptors' +] + +CLUSTER_STATS = [ + 'nodes.count.data_only', + 'nodes.count.master_data', + 'nodes.count.total', + 'nodes.count.master_only', + 'nodes.count.client', + 'indices.docs.count', + 'indices.query_cache.hit_count', + 'indices.query_cache.miss_count', + 'indices.store.size_in_bytes', + 'indices.count', + 'indices.shards.total' +] + +HEALTH_STATS = [ + 'number_of_nodes', + 'number_of_data_nodes', + 'number_of_pending_tasks', + 'number_of_in_flight_fetch', + 'active_shards', + 'relocating_shards', + 'unassigned_shards', + 'delayed_unassigned_shards', + 'initializing_shards', + 'active_shards_percent_as_number' +] + +LATENCY = { + 'query_latency': { + 'total': 'indices_search_query_total', + 'spent_time': 'indices_search_query_time_in_millis' + }, + 'fetch_latency': { + 'total': 'indices_search_fetch_total', + 'spent_time': 'indices_search_fetch_time_in_millis' + }, + 'indexing_latency': { + 'total': 'indices_indexing_index_total', + 'spent_time': 'indices_indexing_index_time_in_millis' + }, + 'flushing_latency': { + 'total': 'indices_flush_total', + 'spent_time': 'indices_flush_total_time_in_millis' + } +} + +# charts order (can be overridden if you want less charts, or different order) +ORDER = [ + 'search_performance_total', + 'search_performance_current', + 'search_performance_time', + 'search_latency', + 'index_performance_total', + 'index_performance_current', + 'index_performance_time', + 'index_latency', + 'index_translog_operations', + 'index_translog_size', + 'index_segments_count', + 'index_segments_memory_writer', + 'index_segments_memory', + 'jvm_mem_heap', + 'jvm_mem_heap_bytes', + 'jvm_buffer_pool_count', + 'jvm_direct_buffers_memory', + 'jvm_mapped_buffers_memory', + 'jvm_gc_count', + 'jvm_gc_time', + 'host_metrics_file_descriptors', + 'host_metrics_http', + 'host_metrics_transport', + 'thread_pool_queued', + 'thread_pool_rejected', + 'fielddata_cache', + 'fielddata_evictions_tripped', + 'cluster_health_status', + 'cluster_health_nodes', + 'cluster_health_shards', + 'cluster_stats_nodes', + 'cluster_stats_query_cache', + 'cluster_stats_docs', + 'cluster_stats_store', + 'cluster_stats_indices_shards', +] + +CHARTS = { + 'search_performance_total': { + 'options': [None, 'Queries And Fetches', 'number of', 'search performance', + 'elastic.search_performance_total', 'stacked'], + 'lines': [ + ['indices_search_query_total', 'queries', 'incremental'], + ['indices_search_fetch_total', 'fetches', 'incremental'] + ] + }, + 'search_performance_current': { + 'options': [None, 'Queries and Fetches In Progress', 'number of', 'search performance', + 'elastic.search_performance_current', 'stacked'], + 'lines': [ + ['indices_search_query_current', 'queries', 'absolute'], + ['indices_search_fetch_current', 'fetches', 'absolute'] + ] + }, + 'search_performance_time': { + 'options': [None, 'Time Spent On Queries And Fetches', 'seconds', 'search performance', + 'elastic.search_performance_time', 'stacked'], + 'lines': [ + ['indices_search_query_time_in_millis', 'query', 'incremental', 1, 1000], + ['indices_search_fetch_time_in_millis', 'fetch', 'incremental', 1, 1000] + ] + }, + 'search_latency': { + 'options': [None, 'Query And Fetch Latency', 'ms', 'search performance', 'elastic.search_latency', 'stacked'], + 'lines': [ + ['query_latency', 'query', 'absolute', 1, 1000], + ['fetch_latency', 'fetch', 'absolute', 1, 1000] + ] + }, + 'index_performance_total': { + 'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'number of', + 'indexing performance', 'elastic.index_performance_total', 'stacked'], + 'lines': [ + ['indices_indexing_index_total', 'indexed', 'incremental'], + ['indices_refresh_total', 'refreshes', 'incremental'], + ['indices_flush_total', 'flushes', 'incremental'] + ] + }, + 'index_performance_current': { + 'options': [None, 'Number Of Documents Currently Being Indexed', 'currently indexed', + 'indexing performance', 'elastic.index_performance_current', 'stacked'], + 'lines': [ + ['indices_indexing_index_current', 'documents', 'absolute'] + ] + }, + 'index_performance_time': { + 'options': [None, 'Time Spent On Indexing, Refreshing, Flushing', 'seconds', 'indexing performance', + 'elastic.index_performance_time', 'stacked'], + 'lines': [ + ['indices_indexing_index_time_in_millis', 'indexing', 'incremental', 1, 1000], + ['indices_refresh_total_time_in_millis', 'refreshing', 'incremental', 1, 1000], + ['indices_flush_total_time_in_millis', 'flushing', 'incremental', 1, 1000] + ] + }, + 'index_latency': { + 'options': [None, 'Indexing And Flushing Latency', 'ms', 'indexing performance', + 'elastic.index_latency', 'stacked'], + 'lines': [ + ['indexing_latency', 'indexing', 'absolute', 1, 1000], + ['flushing_latency', 'flushing', 'absolute', 1, 1000] + ] + }, + 'index_translog_operations': { + 'options': [None, 'Translog Operations', 'count', 'translog', + 'elastic.index_translog_operations', 'area'], + 'lines': [ + ['indices_translog_operations', 'total', 'absolute'], + ['indices_translog_uncommitted_operations', 'uncommited', 'absolute'] + ] + }, + 'index_translog_size': { + 'options': [None, 'Translog Size', 'MB', 'translog', + 'elastic.index_translog_size', 'area'], + 'lines': [ + ['indices_translog_size_in_bytes', 'total', 'absolute', 1, 1048567], + ['indices_translog_uncommitted_size_in_bytes', 'uncommited', 'absolute', 1, 1048567] + ] + }, + 'index_segments_count': { + 'options': [None, 'Total Number Of Indices Segments', 'count', 'indices segments', + 'elastic.index_segments_count', 'line'], + 'lines': [ + ['indices_segments_count', 'segments', 'absolute'] + ] + }, + 'index_segments_memory_writer': { + 'options': [None, 'Index Writer Memory Usage', 'MB', 'indices segments', + 'elastic.index_segments_memory_writer', 'area'], + 'lines': [ + ['indices_segments_index_writer_memory_in_bytes', 'total', 'absolute', 1, 1048567] + ] + }, + 'index_segments_memory': { + 'options': [None, 'Indices Segments Memory Usage', 'MB', 'indices segments', + 'elastic.index_segments_memory', 'stacked'], + 'lines': [ + ['indices_segments_terms_memory_in_bytes', 'terms', 'absolute', 1, 1048567], + ['indices_segments_stored_fields_memory_in_bytes', 'stored fields', 'absolute', 1, 1048567], + ['indices_segments_term_vectors_memory_in_bytes', 'term vectors', 'absolute', 1, 1048567], + ['indices_segments_norms_memory_in_bytes', 'norms', 'absolute', 1, 1048567], + ['indices_segments_points_memory_in_bytes', 'points', 'absolute', 1, 1048567], + ['indices_segments_doc_values_memory_in_bytes', 'doc values', 'absolute', 1, 1048567], + ['indices_segments_version_map_memory_in_bytes', 'version map', 'absolute', 1, 1048567], + ['indices_segments_fixed_bit_set_memory_in_bytes', 'fixed bit set', 'absolute', 1, 1048567] + ] + }, + 'jvm_mem_heap': { + 'options': [None, 'JVM Heap Percentage Currently in Use', 'percent', 'memory usage and gc', + 'elastic.jvm_heap', 'area'], + 'lines': [ + ['jvm_mem_heap_used_percent', 'inuse', 'absolute'] + ] + }, + 'jvm_mem_heap_bytes': { + 'options': [None, 'JVM Heap Commit And Usage', 'MB', 'memory usage and gc', + 'elastic.jvm_heap_bytes', 'area'], + 'lines': [ + ['jvm_mem_heap_committed_in_bytes', 'commited', 'absolute', 1, 1048576], + ['jvm_mem_heap_used_in_bytes', 'used', 'absolute', 1, 1048576] + ] + }, + 'jvm_buffer_pool_count': { + 'options': [None, 'JVM Buffers', 'count', 'memory usage and gc', + 'elastic.jvm_buffer_pool_count', 'line'], + 'lines': [ + ['jvm_buffer_pools_direct_count', 'direct', 'absolute'], + ['jvm_buffer_pools_mapped_count', 'mapped', 'absolute'] + ] + }, + 'jvm_direct_buffers_memory': { + 'options': [None, 'JVM Direct Buffers Memory', 'MB', 'memory usage and gc', + 'elastic.jvm_direct_buffers_memory', 'area'], + 'lines': [ + ['jvm_buffer_pools_direct_used_in_bytes', 'used', 'absolute', 1, 1048567], + ['jvm_buffer_pools_direct_total_capacity_in_bytes', 'total capacity', 'absolute', 1, 1048567] + ] + }, + 'jvm_mapped_buffers_memory': { + 'options': [None, 'JVM Mapped Buffers Memory', 'MB', 'memory usage and gc', + 'elastic.jvm_mapped_buffers_memory', 'area'], + 'lines': [ + ['jvm_buffer_pools_mapped_used_in_bytes', 'used', 'absolute', 1, 1048567], + ['jvm_buffer_pools_mapped_total_capacity_in_bytes', 'total capacity', 'absolute', 1, 1048567] + ] + }, + 'jvm_gc_count': { + 'options': [None, 'Garbage Collections', 'counts', 'memory usage and gc', 'elastic.gc_count', 'stacked'], + 'lines': [ + ['jvm_gc_collectors_young_collection_count', 'young', 'incremental'], + ['jvm_gc_collectors_old_collection_count', 'old', 'incremental'] + ] + }, + 'jvm_gc_time': { + 'options': [None, 'Time Spent On Garbage Collections', 'ms', 'memory usage and gc', + 'elastic.gc_time', 'stacked'], + 'lines': [ + ['jvm_gc_collectors_young_collection_time_in_millis', 'young', 'incremental'], + ['jvm_gc_collectors_old_collection_time_in_millis', 'old', 'incremental'] + ] + }, + 'thread_pool_queued': { + 'options': [None, 'Number Of Queued Threads In Thread Pool', 'queued threads', 'queues and rejections', + 'elastic.thread_pool_queued', 'stacked'], + 'lines': [ + ['thread_pool_bulk_queue', 'bulk', 'absolute'], + ['thread_pool_write_queue', 'write', 'absolute'], + ['thread_pool_index_queue', 'index', 'absolute'], + ['thread_pool_search_queue', 'search', 'absolute'], + ['thread_pool_merge_queue', 'merge', 'absolute'] + ] + }, + 'thread_pool_rejected': { + 'options': [None, 'Rejected Threads In Thread Pool', 'rejected threads', 'queues and rejections', + 'elastic.thread_pool_rejected', 'stacked'], + 'lines': [ + ['thread_pool_bulk_rejected', 'bulk', 'absolute'], + ['thread_pool_write_rejected', 'write', 'absolute'], + ['thread_pool_index_rejected', 'index', 'absolute'], + ['thread_pool_search_rejected', 'search', 'absolute'], + ['thread_pool_merge_rejected', 'merge', 'absolute'] + ] + }, + 'fielddata_cache': { + 'options': [None, 'Fielddata Cache', 'MB', 'fielddata cache', 'elastic.fielddata_cache', 'line'], + 'lines': [ + ['indices_fielddata_memory_size_in_bytes', 'cache', 'absolute', 1, 1048576] + ] + }, + 'fielddata_evictions_tripped': { + 'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'number of events', + 'fielddata cache', 'elastic.fielddata_evictions_tripped', 'line'], + 'lines': [ + ['indices_fielddata_evictions', 'evictions', 'incremental'], + ['indices_fielddata_tripped', 'tripped', 'incremental'] + ] + }, + 'cluster_health_nodes': { + 'options': [None, 'Nodes And Tasks Statistics', 'units', 'cluster health API', + 'elastic.cluster_health_nodes', 'stacked'], + 'lines': [ + ['number_of_nodes', 'nodes', 'absolute'], + ['number_of_data_nodes', 'data_nodes', 'absolute'], + ['number_of_pending_tasks', 'pending_tasks', 'absolute'], + ['number_of_in_flight_fetch', 'in_flight_fetch', 'absolute'] + ] + }, + 'cluster_health_status': { + 'options': [None, 'Cluster Status', 'status', 'cluster health API', + 'elastic.cluster_health_status', 'area'], + 'lines': [ + ['status_green', 'green', 'absolute'], + ['status_red', 'red', 'absolute'], + ['status_foo1', None, 'absolute'], + ['status_foo2', None, 'absolute'], + ['status_foo3', None, 'absolute'], + ['status_yellow', 'yellow', 'absolute'] + ] + }, + 'cluster_health_shards': { + 'options': [None, 'Shards Statistics', 'shards', 'cluster health API', + 'elastic.cluster_health_shards', 'stacked'], + 'lines': [ + ['active_shards', 'active_shards', 'absolute'], + ['relocating_shards', 'relocating_shards', 'absolute'], + ['unassigned_shards', 'unassigned', 'absolute'], + ['delayed_unassigned_shards', 'delayed_unassigned', 'absolute'], + ['initializing_shards', 'initializing', 'absolute'], + ['active_shards_percent_as_number', 'active_percent', 'absolute'] + ] + }, + 'cluster_stats_nodes': { + 'options': [None, 'Nodes Statistics', 'nodes', 'cluster stats API', + 'elastic.cluster_nodes', 'stacked'], + 'lines': [ + ['nodes_count_data_only', 'data_only', 'absolute'], + ['nodes_count_master_data', 'master_data', 'absolute'], + ['nodes_count_total', 'total', 'absolute'], + ['nodes_count_master_only', 'master_only', 'absolute'], + ['nodes_count_client', 'client', 'absolute'] + ] + }, + 'cluster_stats_query_cache': { + 'options': [None, 'Query Cache Statistics', 'queries', 'cluster stats API', + 'elastic.cluster_query_cache', 'stacked'], + 'lines': [ + ['indices_query_cache_hit_count', 'hit', 'incremental'], + ['indices_query_cache_miss_count', 'miss', 'incremental'] + ] + }, + 'cluster_stats_docs': { + 'options': [None, 'Docs Statistics', 'count', 'cluster stats API', + 'elastic.cluster_docs', 'line'], + 'lines': [ + ['indices_docs_count', 'docs', 'absolute'] + ] + }, + 'cluster_stats_store': { + 'options': [None, 'Store Statistics', 'MB', 'cluster stats API', + 'elastic.cluster_store', 'line'], + 'lines': [ + ['indices_store_size_in_bytes', 'size', 'absolute', 1, 1048567] + ] + }, + 'cluster_stats_indices_shards': { + 'options': [None, 'Indices And Shards Statistics', 'count', 'cluster stats API', + 'elastic.cluster_indices_shards', 'stacked'], + 'lines': [ + ['indices_count', 'indices', 'absolute'], + ['indices_shards_total', 'shards', 'absolute'] + ] + }, + 'host_metrics_transport': { + 'options': [None, 'Cluster Communication Transport Metrics', 'kilobit/s', 'host metrics', + 'elastic.host_transport', 'area'], + 'lines': [ + ['transport_rx_size_in_bytes', 'in', 'incremental', 8, 1000], + ['transport_tx_size_in_bytes', 'out', 'incremental', -8, 1000] + ] + }, + 'host_metrics_file_descriptors': { + 'options': [None, 'Available File Descriptors In Percent', 'percent', 'host metrics', + 'elastic.host_descriptors', 'area'], + 'lines': [ + ['file_descriptors_used', 'used', 'absolute', 1, 10] + ] + }, + 'host_metrics_http': { + 'options': [None, 'Opened HTTP Connections', 'connections', 'host metrics', + 'elastic.host_http_connections', 'line'], + 'lines': [ + ['http_current_open', 'opened', 'absolute', 1, 1] + ] + } +} + + +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + self.host = self.configuration.get('host') + self.port = self.configuration.get('port', 9200) + self.url = '{scheme}://{host}:{port}'.format(scheme=self.configuration.get('scheme', 'http'), + host=self.host, + port=self.port) + self.latency = dict() + self.methods = list() + + def check(self): + if not all([self.host, + self.port, + isinstance(self.host, str), + isinstance(self.port, (str, int))]): + self.error('Host is not defined in the module configuration file') + return False + + # Hostname -> ip address + try: + self.host = gethostbyname(self.host) + except gaierror as error: + self.error(str(error)) + return False + + # Create URL for every Elasticsearch API + self.methods = [METHODS(get_data=self._get_node_stats, + url=self.url + '/_nodes/_local/stats', + run=self.configuration.get('node_stats', True)), + METHODS(get_data=self._get_cluster_health, + url=self.url + '/_cluster/health', + run=self.configuration.get('cluster_health', True)), + METHODS(get_data=self._get_cluster_stats, + url=self.url + '/_cluster/stats', + run=self.configuration.get('cluster_stats', True))] + + # Remove disabled API calls from 'avail methods' + return UrlService.check(self) + + def _get_data(self): + threads = list() + queue = Queue() + result = dict() + + for method in self.methods: + if not method.run: + continue + th = threading.Thread(target=method.get_data, + args=(queue, method.url)) + th.start() + threads.append(th) + + for thread in threads: + thread.join() + result.update(queue.get()) + + return result or None + + def _get_cluster_health(self, queue, url): + """ + Format data received from http request + :return: dict + """ + + raw_data = self._get_raw_data(url) + + if not raw_data: + return queue.put(dict()) + + data = self.json_reply(raw_data) + + if not data: + return queue.put(dict()) + + to_netdata = fetch_data_(raw_data=data, + metrics=HEALTH_STATS) + + to_netdata.update({'status_green': 0, 'status_red': 0, 'status_yellow': 0, + 'status_foo1': 0, 'status_foo2': 0, 'status_foo3': 0}) + current_status = 'status_' + data['status'] + to_netdata[current_status] = 1 + + return queue.put(to_netdata) + + def _get_cluster_stats(self, queue, url): + """ + Format data received from http request + :return: dict + """ + + raw_data = self._get_raw_data(url) + + if not raw_data: + return queue.put(dict()) + + data = self.json_reply(raw_data) + + if not data: + return queue.put(dict()) + + to_netdata = fetch_data_(raw_data=data, + metrics=CLUSTER_STATS) + + return queue.put(to_netdata) + + def _get_node_stats(self, queue, url): + """ + Format data received from http request + :return: dict + """ + + raw_data = self._get_raw_data(url) + + if not raw_data: + return queue.put(dict()) + + data = self.json_reply(raw_data) + + if not data: + return queue.put(dict()) + + node = list(data['nodes'].keys())[0] + to_netdata = fetch_data_(raw_data=data['nodes'][node], + metrics=NODE_STATS) + + # Search, index, flush, fetch performance latency + for key in LATENCY: + try: + to_netdata[key] = self.find_avg(total=to_netdata[LATENCY[key]['total']], + spent_time=to_netdata[LATENCY[key]['spent_time']], + key=key) + except KeyError: + continue + if 'process_open_file_descriptors' in to_netdata and 'process_max_file_descriptors' in to_netdata: + to_netdata['file_descriptors_used'] = round(float(to_netdata['process_open_file_descriptors']) + / to_netdata['process_max_file_descriptors'] * 1000) + + return queue.put(to_netdata) + + def json_reply(self, reply): + try: + return json.loads(reply) + except ValueError as err: + self.error(err) + return None + + def find_avg(self, total, spent_time, key): + if key not in self.latency: + self.latency[key] = dict(total=total, + spent_time=spent_time) + return 0 + if self.latency[key]['total'] != total: + latency = float(spent_time - self.latency[key]['spent_time'])\ + / float(total - self.latency[key]['total']) * 1000 + self.latency[key]['total'] = total + self.latency[key]['spent_time'] = spent_time + return latency + self.latency[key]['spent_time'] = spent_time + return 0 + + +def fetch_data_(raw_data, metrics): + data = dict() + for metric in metrics: + value = raw_data + metrics_list = metric.split('.') + try: + for m in metrics_list: + value = value[m] + except KeyError: + continue + data['_'.join(metrics_list)] = value + return data diff --git a/collectors/python.d.plugin/elasticsearch/elasticsearch.conf b/collectors/python.d.plugin/elasticsearch/elasticsearch.conf new file mode 100644 index 000000000..213843bf9 --- /dev/null +++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.conf @@ -0,0 +1,83 @@ +# netdata python.d.plugin configuration for elasticsearch stats +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, elasticsearch plugin also supports the following: +# +# host: 'ipaddress' # Server ip address or hostname. +# port: 'port' # Port on which elasticsearch listen. +# cluster_health: False/True # Calls to cluster health elasticsearch API. Enabled by default. +# cluster_stats: False/True # Calls to cluster stats elasticsearch API. Enabled by default. +# +# +# if the URL is password protected, the following are supported: +# +# user: 'username' +# pass: 'password' +# +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) +# +local: + host: '127.0.0.1' + port: '9200' diff --git a/collectors/python.d.plugin/example/Makefile.inc b/collectors/python.d.plugin/example/Makefile.inc new file mode 100644 index 000000000..1b027d5a7 --- /dev/null +++ b/collectors/python.d.plugin/example/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += example/example.chart.py +dist_pythonconfig_DATA += example/example.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += example/README.md example/Makefile.inc + diff --git a/collectors/python.d.plugin/example/README.md b/collectors/python.d.plugin/example/README.md new file mode 100644 index 000000000..f9f314ac4 --- /dev/null +++ b/collectors/python.d.plugin/example/README.md @@ -0,0 +1 @@ +An example python data collection module. \ No newline at end of file diff --git a/collectors/python.d.plugin/example/example.chart.py b/collectors/python.d.plugin/example/example.chart.py new file mode 100644 index 000000000..85defa4d1 --- /dev/null +++ b/collectors/python.d.plugin/example/example.chart.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Description: example netdata python.d module +# Author: Put your name here (your github login) +# SPDX-License-Identifier: GPL-3.0-or-later + +from random import SystemRandom + +from bases.FrameworkServices.SimpleService import SimpleService + +# default module values +# update_every = 4 +priority = 90000 +retries = 60 + +ORDER = ['random'] +CHARTS = { + 'random': { + 'options': [None, 'A random number', 'random number', 'random', 'random', 'line'], + 'lines': [ + ['random1'] + ] + } +} + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + self.random = SystemRandom() + + @staticmethod + def check(): + return True + + def get_data(self): + data = dict() + + for i in range(1, 4): + dimension_id = ''.join(['random', str(i)]) + + if dimension_id not in self.charts['random']: + self.charts['random'].add_dimension([dimension_id]) + + data[dimension_id] = self.random.randint(0, 100) + + return data diff --git a/collectors/python.d.plugin/example/example.conf b/collectors/python.d.plugin/example/example.conf new file mode 100644 index 000000000..e7fed9b50 --- /dev/null +++ b/collectors/python.d.plugin/example/example.conf @@ -0,0 +1,70 @@ +# netdata python.d.plugin configuration for example +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, example also supports the following: +# +# - none +# +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) diff --git a/collectors/python.d.plugin/exim/Makefile.inc b/collectors/python.d.plugin/exim/Makefile.inc new file mode 100644 index 000000000..36ffa56d2 --- /dev/null +++ b/collectors/python.d.plugin/exim/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += exim/exim.chart.py +dist_pythonconfig_DATA += exim/exim.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += exim/README.md exim/Makefile.inc + diff --git a/collectors/python.d.plugin/exim/README.md b/collectors/python.d.plugin/exim/README.md new file mode 100644 index 000000000..b9a62cad9 --- /dev/null +++ b/collectors/python.d.plugin/exim/README.md @@ -0,0 +1,13 @@ +# exim + +Simple module executing `exim -bpc` to grab exim queue. +This command can take a lot of time to finish its execution thus it is not recommended to run it every second. + +It produces only one chart: + +1. **Exim Queue Emails** + * emails + +Configuration is not needed. + +--- diff --git a/collectors/python.d.plugin/exim/exim.chart.py b/collectors/python.d.plugin/exim/exim.chart.py new file mode 100644 index 000000000..5431dd46b --- /dev/null +++ b/collectors/python.d.plugin/exim/exim.chart.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Description: exim netdata python.d module +# Author: Pawel Krupa (paulfantom) +# SPDX-License-Identifier: GPL-3.0-or-later + +from bases.FrameworkServices.ExecutableService import ExecutableService + +# default module values (can be overridden per job in `config`) +# update_every = 2 +priority = 60000 +retries = 60 + +# charts order (can be overridden if you want less charts, or different order) +ORDER = ['qemails'] + +CHARTS = { + 'qemails': { + 'options': [None, 'Exim Queue Emails', 'emails', 'queue', 'exim.qemails', 'line'], + 'lines': [ + ['emails', None, 'absolute'] + ] + } +} + + +class Service(ExecutableService): + def __init__(self, configuration=None, name=None): + ExecutableService.__init__(self, configuration=configuration, name=name) + self.command = 'exim -bpc' + self.order = ORDER + self.definitions = CHARTS + + def _get_data(self): + """ + Format data received from shell command + :return: dict + """ + try: + return {'emails': int(self._get_raw_data()[0])} + except (ValueError, AttributeError): + return None diff --git a/collectors/python.d.plugin/exim/exim.conf b/collectors/python.d.plugin/exim/exim.conf new file mode 100644 index 000000000..2add7b2cb --- /dev/null +++ b/collectors/python.d.plugin/exim/exim.conf @@ -0,0 +1,93 @@ +# netdata python.d.plugin configuration for exim +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# exim is slow, so once every 10 seconds +update_every: 10 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, exim also supports the following: +# +# command: 'exim -bpc' # the command to run +# + +# ---------------------------------------------------------------------- +# REQUIRED exim CONFIGURATION +# +# netdata will query exim as user netdata. +# By default exim will refuse to respond. +# +# To allow querying exim as non-admin user, please set the following +# to your exim configuration: +# +# queue_list_requires_admin = false +# +# Your exim configuration should be in +# +# /etc/exim/exim4.conf +# or +# /etc/exim4/conf.d/main/000_local_options +# +# Please consult your distribution information to find the exact file. + +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS + +local: + command: 'exim -bpc' diff --git a/collectors/python.d.plugin/fail2ban/Makefile.inc b/collectors/python.d.plugin/fail2ban/Makefile.inc new file mode 100644 index 000000000..31e117e53 --- /dev/null +++ b/collectors/python.d.plugin/fail2ban/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += fail2ban/fail2ban.chart.py +dist_pythonconfig_DATA += fail2ban/fail2ban.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += fail2ban/README.md fail2ban/Makefile.inc + diff --git a/collectors/python.d.plugin/fail2ban/README.md b/collectors/python.d.plugin/fail2ban/README.md new file mode 100644 index 000000000..2ab021965 --- /dev/null +++ b/collectors/python.d.plugin/fail2ban/README.md @@ -0,0 +1,23 @@ +# fail2ban + +Module monitor fail2ban log file to show all bans for all active jails + +**Requirements:** + * fail2ban.log file MUST BE readable by netdata (A good idea is to add **create 0640 root netdata** to fail2ban conf at logrotate.d) + +It produces one chart with multiple lines (one line per jail) + +### configuration + +Sample: + +```yaml +local: + log_path: '/var/log/fail2ban.log' + conf_path: '/etc/fail2ban/jail.local' + exclude: 'dropbear apache' +``` +If no configuration is given, module will attempt to read log file at `/var/log/fail2ban.log` and conf file at `/etc/fail2ban/jail.local`. +If conf file is not found default jail is `ssh`. + +--- diff --git a/collectors/python.d.plugin/fail2ban/fail2ban.chart.py b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py new file mode 100644 index 000000000..954689008 --- /dev/null +++ b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py @@ -0,0 +1,196 @@ +# -*- coding: utf-8 -*- +# Description: fail2ban log netdata python.d module +# Author: l2isbad +# SPDX-License-Identifier: GPL-3.0-or-later + +import re +import os + +from collections import defaultdict +from glob import glob + +from bases.FrameworkServices.LogService import LogService + + +ORDER = [ + 'jails_bans', + 'jails_in_jail', +] + + +def charts(jails): + """ + Chart definitions creating + """ + + ch = { + ORDER[0]: { + 'options': [None, 'Jails Ban Rate', 'bans/s', 'bans', 'jail.bans', 'line'], + 'lines': [] + }, + ORDER[1]: { + 'options': [None, 'Banned IPs (since the last restart of netdata)', 'IPs', 'in jail', + 'jail.in_jail', 'line'], + 'lines': [] + }, + } + for jail in jails: + ch[ORDER[0]]['lines'].append([jail, jail, 'incremental']) + ch[ORDER[1]]['lines'].append(['{0}_in_jail'.format(jail), jail, 'absolute']) + + return ch + + +RE_JAILS = re.compile(r'\[([a-zA-Z0-9_-]+)\][^\[\]]+?enabled\s+= (true|false)') + +# Example: +# 2018-09-12 11:45:53,715 fail2ban.actions[25029]: WARNING [ssh] Unban 195.201.88.33 +# 2018-09-12 11:45:58,727 fail2ban.actions[25029]: WARNING [ssh] Ban 217.59.246.27 +RE_DATA = re.compile(r'\[(?P[A-Za-z-_0-9]+)\] (?PUnban|Ban) (?P[a-f0-9.:]+)') + +DEFAULT_JAILS = [ + 'ssh', +] + + +class Service(LogService): + def __init__(self, configuration=None, name=None): + LogService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = dict() + + self.log_path = self.configuration.get('log_path', '/var/log/fail2ban.log') + self.conf_path = self.configuration.get('conf_path', '/etc/fail2ban/jail.local') + self.conf_dir = self.configuration.get('conf_dir', '/etc/fail2ban/jail.d/') + self.exclude = self.configuration.get('exclude', str()) + + self.monitoring_jails = list() + self.banned_ips = defaultdict(set) + self.data = dict() + + def check(self): + """ + :return: bool + """ + if not self.conf_path.endswith(('.conf', '.local')): + self.error('{0} is a wrong conf path name, must be *.conf or *.local'.format(self.conf_path)) + return False + + if not os.access(self.log_path, os.R_OK): + self.error('{0} is not readable'.format(self.log_path)) + return False + + if os.path.getsize(self.log_path) == 0: + self.error('{0} is empty'.format(self.log_path)) + return False + + self.monitoring_jails = self.jails_auto_detection() + for jail in self.monitoring_jails: + self.data[jail] = 0 + self.data['{0}_in_jail'.format(jail)] = 0 + + self.definitions = charts(self.monitoring_jails) + self.info('monitoring jails: {0}'.format(self.monitoring_jails)) + + return True + + def get_data(self): + """ + :return: dict + """ + raw = self._get_raw_data() + + if not raw: + return None if raw is None else self.data + + for row in raw: + match = RE_DATA.search(row) + + if not match: + continue + + match = match.groupdict() + + if match['jail'] not in self.monitoring_jails: + continue + + jail, action, ip = match['jail'], match['action'], match['ip'] + + if action == 'Ban': + self.data[jail] += 1 + if ip not in self.banned_ips[jail]: + self.banned_ips[jail].add(ip) + self.data['{0}_in_jail'.format(jail)] += 1 + else: + if ip in self.banned_ips[jail]: + self.banned_ips[jail].remove(ip) + self.data['{0}_in_jail'.format(jail)] -= 1 + + return self.data + + def get_files_from_dir(self, dir_path, suffix): + """ + :return: list + """ + if not os.path.isdir(dir_path): + self.error('{0} is not a directory'.format(dir_path)) + return list() + + return glob('{0}/*.{1}'.format(self.conf_dir, suffix)) + + def get_jails_from_file(self, file_path): + """ + :return: list + """ + if not os.access(file_path, os.R_OK): + self.error('{0} is not readable or not exist'.format(file_path)) + return list() + + with open(file_path, 'rt') as f: + lines = f.readlines() + raw = ' '.join(line for line in lines if line.startswith(('[', 'enabled'))) + + match = RE_JAILS.findall(raw) + # Result: [('ssh', 'true'), ('dropbear', 'true'), ('pam-generic', 'true'), ...] + + if not match: + self.debug('{0} parse failed'.format(file_path)) + return list() + + return match + + def jails_auto_detection(self): + """ + :return: list + + Parses jail configuration files. Returns list of enabled jails. + According man jail.conf parse order must be + * jail.conf + * jail.d/*.conf (in alphabetical order) + * jail.local + * jail.d/*.local (in alphabetical order) + """ + jails_files, all_jails, active_jails = list(), list(), list() + + jails_files.append('{0}.conf'.format(self.conf_path.rsplit('.')[0])) + jails_files.extend(self.get_files_from_dir(self.conf_dir, 'conf')) + jails_files.append('{0}.local'.format(self.conf_path.rsplit('.')[0])) + jails_files.extend(self.get_files_from_dir(self.conf_dir, 'local')) + + self.debug('config files to parse: {0}'.format(jails_files)) + + for f in jails_files: + all_jails.extend(self.get_jails_from_file(f)) + + exclude = self.exclude.split() + + for name, status in all_jails: + if name in exclude: + continue + + if status == 'true' and name not in active_jails: + active_jails.append(name) + elif status == 'false' and name in active_jails: + active_jails.remove(name) + + return active_jails or DEFAULT_JAILS diff --git a/collectors/python.d.plugin/fail2ban/fail2ban.conf b/collectors/python.d.plugin/fail2ban/fail2ban.conf new file mode 100644 index 000000000..60ca87231 --- /dev/null +++ b/collectors/python.d.plugin/fail2ban/fail2ban.conf @@ -0,0 +1,70 @@ +# netdata python.d.plugin configuration for fail2ban +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, fail2ban also supports the following: +# +# log_path: 'path to fail2ban.log' # Default: '/var/log/fail2ban.log' +# conf_path: 'path to jail.local/jail.conf' # Default: '/etc/fail2ban/jail.local' +# conf_dir: 'path to jail.d/' # Default: '/etc/fail2ban/jail.d/' +# exclude: 'jails you want to exclude from autodetection' # Default: none +#------------------------------------------------------------------------------------------------------------------ diff --git a/collectors/python.d.plugin/freeradius/Makefile.inc b/collectors/python.d.plugin/freeradius/Makefile.inc new file mode 100644 index 000000000..54aa6492f --- /dev/null +++ b/collectors/python.d.plugin/freeradius/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += freeradius/freeradius.chart.py +dist_pythonconfig_DATA += freeradius/freeradius.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += freeradius/README.md freeradius/Makefile.inc + diff --git a/collectors/python.d.plugin/freeradius/README.md b/collectors/python.d.plugin/freeradius/README.md new file mode 100644 index 000000000..e5fe88ec3 --- /dev/null +++ b/collectors/python.d.plugin/freeradius/README.md @@ -0,0 +1,70 @@ +# freeradius + +Uses the `radclient` command to provide freeradius statistics. It is not recommended to run it every second. + +It produces: + +1. **Authentication counters:** + * access-accepts + * access-rejects + * auth-dropped-requests + * auth-duplicate-requests + * auth-invalid-requests + * auth-malformed-requests + * auth-unknown-types + +2. **Accounting counters:** [optional] + * accounting-requests + * accounting-responses + * acct-dropped-requests + * acct-duplicate-requests + * acct-invalid-requests + * acct-malformed-requests + * acct-unknown-types + +3. **Proxy authentication counters:** [optional] + * proxy-access-accepts + * proxy-access-rejects + * proxy-auth-dropped-requests + * proxy-auth-duplicate-requests + * proxy-auth-invalid-requests + * proxy-auth-malformed-requests + * proxy-auth-unknown-types + +4. **Proxy accounting counters:** [optional] + * proxy-accounting-requests + * proxy-accounting-responses + * proxy-acct-dropped-requests + * proxy-acct-duplicate-requests + * proxy-acct-invalid-requests + * proxy-acct-malformed-requests + * proxy-acct-unknown-typesa + + +### configuration + +Sample: + +```yaml +local: + host : 'localhost' + port : '18121' + secret : 'adminsecret' + acct : False # Freeradius accounting statistics. + proxy_auth : False # Freeradius proxy authentication statistics. + proxy_acct : False # Freeradius proxy accounting statistics. +``` + +**Freeradius server configuration:** + +The configuration for the status server is automatically created in the sites-available directory. +By default, server is enabled and can be queried from every client. +FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled. + +To do this, create a link from the sites-enabled directory to the status file in the sites-available directory: + * cd sites-enabled + * ln -s ../sites-available/status status + +and restart/reload your FREERADIUS server. + +--- diff --git a/collectors/python.d.plugin/freeradius/freeradius.chart.py b/collectors/python.d.plugin/freeradius/freeradius.chart.py new file mode 100644 index 000000000..3126831b7 --- /dev/null +++ b/collectors/python.d.plugin/freeradius/freeradius.chart.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +# Description: freeradius netdata python.d module +# Author: l2isbad +# SPDX-License-Identifier: GPL-3.0-or-later + +from re import findall +from subprocess import Popen, PIPE + +from bases.collection import find_binary +from bases.FrameworkServices.SimpleService import SimpleService + +# default module values (can be overridden per job in `config`) +priority = 60000 +retries = 60 +update_every = 15 + +RADIUS_MSG = 'Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept' + +# charts order (can be overridden if you want less charts, or different order) +ORDER = ['authentication', 'accounting', 'proxy-auth', 'proxy-acct'] + +CHARTS = { + 'authentication': { + 'options': [None, 'Authentication', 'packets/s', 'Authentication', 'freerad.auth', 'line'], + 'lines': [ + ['access-accepts', None, 'incremental'], + ['access-rejects', None, 'incremental'], + ['auth-dropped-requests', 'dropped-requests', 'incremental'], + ['auth-duplicate-requests', 'duplicate-requests', 'incremental'], + ['auth-invalid-requests', 'invalid-requests', 'incremental'], + ['auth-malformed-requests', 'malformed-requests', 'incremental'], + ['auth-unknown-types', 'unknown-types', 'incremental'] + ] + }, + 'accounting': { + 'options': [None, 'Accounting', 'packets/s', 'Accounting', 'freerad.acct', 'line'], + 'lines': [ + ['accounting-requests', 'requests', 'incremental'], + ['accounting-responses', 'responses', 'incremental'], + ['acct-dropped-requests', 'dropped-requests', 'incremental'], + ['acct-duplicate-requests', 'duplicate-requests', 'incremental'], + ['acct-invalid-requests', 'invalid-requests', 'incremental'], + ['acct-malformed-requests', 'malformed-requests', 'incremental'], + ['acct-unknown-types', 'unknown-types', 'incremental'] + ] + }, + 'proxy-auth': { + 'options': [None, 'Proxy Authentication', 'packets/s', 'Authentication', 'freerad.proxy.auth', 'line'], + 'lines': [ + ['proxy-access-accepts', 'access-accepts', 'incremental'], + ['proxy-access-rejects', 'access-rejects', 'incremental'], + ['proxy-auth-dropped-requests', 'dropped-requests', 'incremental'], + ['proxy-auth-duplicate-requests', 'duplicate-requests', 'incremental'], + ['proxy-auth-invalid-requests', 'invalid-requests', 'incremental'], + ['proxy-auth-malformed-requests', 'malformed-requests', 'incremental'], + ['proxy-auth-unknown-types', 'unknown-types', 'incremental'] + ] + }, + 'proxy-acct': { + 'options': [None, 'Proxy Accounting', 'packets/s', 'Accounting', 'freerad.proxy.acct', 'line'], + 'lines': [ + ['proxy-accounting-requests', 'requests', 'incremental'], + ['proxy-accounting-responses', 'responses', 'incremental'], + ['proxy-acct-dropped-requests', 'dropped-requests', 'incremental'], + ['proxy-acct-duplicate-requests', 'duplicate-requests', 'incremental'], + ['proxy-acct-invalid-requests', 'invalid-requests', 'incremental'], + ['proxy-acct-malformed-requests', 'malformed-requests', 'incremental'], + ['proxy-acct-unknown-types', 'unknown-types', 'incremental'] + ] + } +} + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.definitions = CHARTS + self.host = self.configuration.get('host', 'localhost') + self.port = self.configuration.get('port', '18121') + self.secret = self.configuration.get('secret') + self.acct = self.configuration.get('acct', False) + self.proxy_auth = self.configuration.get('proxy_auth', False) + self.proxy_acct = self.configuration.get('proxy_acct', False) + chart_choice = [True, bool(self.acct), bool(self.proxy_auth), bool(self.proxy_acct)] + self.order = [chart for chart, choice in zip(ORDER, chart_choice) if choice] + self.echo = find_binary('echo') + self.radclient = find_binary('radclient') + self.sub_echo = [self.echo, RADIUS_MSG] + self.sub_radclient = [self.radclient, '-r', '1', '-t', '1', '-x', + ':'.join([self.host, self.port]), 'status', self.secret] + + def check(self): + if not all([self.echo, self.radclient]): + self.error('Can\'t locate "radclient" binary or binary is not executable by netdata') + return False + if not self.secret: + self.error('"secret" not set') + return None + + if self._get_raw_data(): + return True + self.error('Request returned no data. Is server alive?') + return False + + def _get_data(self): + """ + Format data received from shell command + :return: dict + """ + result = self._get_raw_data() + return dict([(elem[0].lower(), int(elem[1])) for elem in findall(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)', result)]) + + def _get_raw_data(self): + """ + The following code is equivalent to + 'echo "Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept" + | radclient -t 1 -r 1 host:port status secret' + :return: str + """ + try: + process_echo = Popen(self.sub_echo, stdout=PIPE, stderr=PIPE, shell=False) + process_rad = Popen(self.sub_radclient, stdin=process_echo.stdout, stdout=PIPE, stderr=PIPE, shell=False) + process_echo.stdout.close() + raw_result = process_rad.communicate()[0] + except OSError: + return None + if process_rad.returncode is 0: + return raw_result.decode() + return None diff --git a/collectors/python.d.plugin/freeradius/freeradius.conf b/collectors/python.d.plugin/freeradius/freeradius.conf new file mode 100644 index 000000000..3336d4c49 --- /dev/null +++ b/collectors/python.d.plugin/freeradius/freeradius.conf @@ -0,0 +1,82 @@ +# netdata python.d.plugin configuration for freeradius +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, freeradius also supports the following: +# +# host: 'host' # Default: 'localhost'. Server ip address or hostname. +# port: 'port' # Default: '18121'. Port on which freeradius server listen (type = status). +# secret: 'secret' # Default: 'adminsecret'. +# acct: yes/no # Default: no. Freeradius accounting statistics. +# proxy_auth: yes/no # Default: no. Freeradius proxy authentication statistics. +# proxy_acct: yes/no # Default: no. Freeradius proxy accounting statistics. +# +# ------------------------------------------------------------------------------------------------------------------ +# Freeradius server configuration: +# The configuration for the status server is automatically created in the sites-available directory. +# By default, server is enabled and can be queried from every client. +# FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled. +# To do this, create a link from the sites-enabled directory to the status file in the sites-available directory: +# cd sites-enabled +# ln -s ../sites-available/status status +# and restart/reload your FREERADIUS server. +# ------------------------------------------------------------------------------------------------------------------ diff --git a/collectors/python.d.plugin/go_expvar/Makefile.inc b/collectors/python.d.plugin/go_expvar/Makefile.inc new file mode 100644 index 000000000..74f50d765 --- /dev/null +++ b/collectors/python.d.plugin/go_expvar/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += go_expvar/go_expvar.chart.py +dist_pythonconfig_DATA += go_expvar/go_expvar.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += go_expvar/README.md go_expvar/Makefile.inc + diff --git a/collectors/python.d.plugin/go_expvar/README.md b/collectors/python.d.plugin/go_expvar/README.md new file mode 100644 index 000000000..6309c195f --- /dev/null +++ b/collectors/python.d.plugin/go_expvar/README.md @@ -0,0 +1,276 @@ +# go_expvar + +The `go_expvar` module can monitor any Go application that exposes its metrics with the use of +`expvar` package from the Go standard library. + +`go_expvar` produces charts for Go runtime memory statistics and optionally any number of custom charts. + +For the memory statistics, it produces the following charts: + +1. **Heap allocations** in kB + * alloc: size of objects allocated on the heap + * inuse: size of allocated heap spans + +2. **Stack allocations** in kB + * inuse: size of allocated stack spans + +3. **MSpan allocations** in kB + * inuse: size of allocated mspan structures + +4. **MCache allocations** in kB + * inuse: size of allocated mcache structures + +5. **Virtual memory** in kB + * sys: size of reserved virtual address space + +6. **Live objects** + * live: number of live objects in memory + +7. **GC pauses average** in ns + * avg: average duration of all GC stop-the-world pauses + + +## Monitoring Go Applications + +Netdata can be used to monitor running Go applications that expose their metrics with +the use of the [expvar package](https://golang.org/pkg/expvar/) included in Go standard library. + +The `expvar` package exposes these metrics over HTTP and is very easy to use. +Consider this minimal sample below: + +```go +package main + +import ( + _ "expvar" + "net/http" +) + +func main() { + http.ListenAndServe("127.0.0.1:8080", nil) +} +``` + +When imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that +exposes Go runtime's memory statistics in JSON format. You can inspect the output by opening +the URL in your browser (or by using `wget` or `curl`). + +Sample output: + +```json +{ +"cmdline": ["./expvar-demo-binary"], +"memstats": {"Alloc":630856,"TotalAlloc":630856,"Sys":3346432,"Lookups":27, } +} +``` + +You can of course expose and monitor your own variables as well. +Here is a sample Go application that exposes a few custom variables: + +```go +package main + +import ( + "expvar" + "net/http" + "runtime" + "time" +) + +func main() { + + tick := time.NewTicker(1 * time.Second) + num_go := expvar.NewInt("runtime.goroutines") + counters := expvar.NewMap("counters") + counters.Set("cnt1", new(expvar.Int)) + counters.Set("cnt2", new(expvar.Float)) + + go http.ListenAndServe(":8080", nil) + + for { + select { + case <- tick.C: + num_go.Set(int64(runtime.NumGoroutine())) + counters.Add("cnt1", 1) + counters.AddFloat("cnt2", 1.452) + } + } +} +``` + +Apart from the runtime memory stats, this application publishes two counters and the +number of currently running Goroutines and updates these stats every second. + +In the next section, we will cover how to monitor and chart these exposed stats with +the use of `netdata`s ```go_expvar``` module. + +### Using netdata go_expvar module + +The `go_expvar` module is disabled by default. To enable it, edit [`python.d.conf`](../python.d.conf) +(to edit it on your system run `/etc/netdata/edit-config python.d.conf`), and change the `go_expvar` +variable to `yes`: + +``` +# Enable / Disable python.d.plugin modules +#default_run: yes +# +# If "default_run" = "yes" the default for all modules is enabled (yes). +# Setting any of these to "no" will disable it. +# +# If "default_run" = "no" the default for all modules is disabled (no). +# Setting any of these to "yes" will enable it. +... +go_expvar: yes +... +``` + +Next, we need to edit the module configuration file (found at [`/etc/netdata/python.d/go_expvar.conf`](go_expvar.conf) by default) +(to edit it on your system run `/etc/netdata/edit-config python.d/go_expvar.conf`). +The module configuration consists of jobs, where each job can be used to monitor a separate Go application. +Let's see a sample job configuration: + +``` +# /etc/netdata/python.d/go_expvar.conf + +app1: + name : 'app1' + url : 'http://127.0.0.1:8080/debug/vars' + collect_memstats: true + extra_charts: {} +``` + +Let's go over each of the defined options: + + name: 'app1' + +This is the job name that will appear at the netdata dashboard. +If not defined, the job_name (top level key) will be used. + + url: 'http://127.0.0.1:8080/debug/vars' + +This is the URL of the expvar endpoint. As the expvar handler can be installed +in a custom path, the whole URL has to be specified. This value is mandatory. + + collect_memstats: true + +Whether to enable collecting stats about Go runtime's memory. You can find more +information about the exposed values at the [runtime package docs](https://golang.org/pkg/runtime/#MemStats). + + extra_charts: {} + +Enables the user to specify custom expvars to monitor and chart. +Will be explained in more detail below. + +**Note: if `collect_memstats` is disabled and no `extra_charts` are defined, the plugin will +disable itself, as there will be no data to collect!** + +Apart from these options, each job supports options inherited from netdata's `python.d.plugin` +and its base `UrlService` class. These are: + + update_every: 1 # the job's data collection frequency + priority: 60000 # the job's order on the dashboard + retries: 60 # the job's number of restoration attempts + user: admin # use when the expvar endpoint is protected by HTTP Basic Auth + password: sekret # use when the expvar endpoint is protected by HTTP Basic Auth + +### Monitoring custom vars with go_expvar + +Now, memory stats might be useful, but what if you want netdata to monitor some custom values +that your Go application exposes? The `go_expvar` module can do that as well with the use of +the `extra_charts` configuration variable. + +The `extra_charts` variable is a YaML list of netdata chart definitions. +Each chart definition has the following keys: + + id: netdata chart ID + options: a key-value mapping of chart options + lines: a list of line definitions + +**Note: please do not use dots in the chart or line ID field. +See [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.** + +Please see these two links to the official netdata documentation for more information about the values: + +- [External plugins - charts](../../plugins.d/#chart) +- [Chart variables](https://github.com/netdata/netdata/wiki/How-to-write-new-module#global-variables-order-and-chart) + +**Line definitions** + +Each chart can define multiple lines (dimensions). +A line definition is a key-value mapping of line options. +Each line can have the following options: + + # mandatory + expvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint + expvar_type: value type; supported are "float" or "int" + id: the id of this line/dimension in netdata + + # optional - netdata defaults are used if these options are not defined + name: '' + algorithm: absolute + multiplier: 1 + divisor: 100 if expvar_type == float, 1 if expvar_type == int + hidden: False + +Please see the following link for more information about the options and their default values: +[External plugins - dimensions](../../plugins.d/#dimension) + +Apart from top-level expvars, this plugin can also parse expvars stored in a multi-level map; +All dicts in the resulting JSON document are then flattened to one level. +Expvar names are joined together with '.' when flattening. + +Example: +``` +{ + "counters": {"cnt1": 1042, "cnt2": 1512.9839999999983}, + "runtime.goroutines": 5 +} +``` + +In the above case, the exported variables will be available under `runtime.goroutines`, +`counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision, +the first defined key wins and all subsequent keys with the same name are ignored. + +**Configuration example** + +The configuration below matches the second Go application described above. +Netdata will monitor and chart memory stats for the application, as well as a custom chart of +running goroutines and two dummy counters. + +``` +app1: + name : 'app1' + url : 'http://127.0.0.1:8080/debug/vars' + collect_memstats: true + extra_charts: + - id: "runtime_goroutines" + options: + name: num_goroutines + title: "runtime: number of goroutines" + units: goroutines + family: runtime + context: expvar.runtime.goroutines + chart_type: line + lines: + - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines} + - id: "foo_counters" + options: + name: counters + title: "some random counters" + units: awesomeness + family: counters + context: expvar.foo.counters + chart_type: line + lines: + - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1} + - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2} +``` + +**Netdata charts example** + +The images below show how do the final charts in netdata look. + +![Memory stats charts](https://cloud.githubusercontent.com/assets/15180106/26762052/62b4af58-493b-11e7-9e69-146705acfc2c.png) + +![Custom charts](https://cloud.githubusercontent.com/assets/15180106/26762051/62ae915e-493b-11e7-8518-bd25a3886650.png) + diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.chart.py b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py new file mode 100644 index 000000000..76e8b72ec --- /dev/null +++ b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py @@ -0,0 +1,245 @@ +# -*- coding: utf-8 -*- +# Description: go_expvar netdata python.d module +# Author: Jan Kral (kralewitz) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import division +import json + +from bases.FrameworkServices.UrlService import UrlService + +# default module values (can be overridden per job in `config`) +# update_every = 2 +priority = 60000 +retries = 60 + + +MEMSTATS_CHARTS = { + 'memstats_heap': { + 'options': ['heap', 'memory: size of heap memory structures', 'kB', 'memstats', + 'expvar.memstats.heap', 'line'], + 'lines': [ + ['memstats_heap_alloc', 'alloc', 'absolute', 1, 1024], + ['memstats_heap_inuse', 'inuse', 'absolute', 1, 1024] + ] + }, + 'memstats_stack': { + 'options': ['stack', 'memory: size of stack memory structures', 'kB', 'memstats', + 'expvar.memstats.stack', 'line'], + 'lines': [ + ['memstats_stack_inuse', 'inuse', 'absolute', 1, 1024] + ] + }, + 'memstats_mspan': { + 'options': ['mspan', 'memory: size of mspan memory structures', 'kB', 'memstats', + 'expvar.memstats.mspan', 'line'], + 'lines': [ + ['memstats_mspan_inuse', 'inuse', 'absolute', 1, 1024] + ] + }, + 'memstats_mcache': { + 'options': ['mcache', 'memory: size of mcache memory structures', 'kB', 'memstats', + 'expvar.memstats.mcache', 'line'], + 'lines': [ + ['memstats_mcache_inuse', 'inuse', 'absolute', 1, 1024] + ] + }, + 'memstats_live_objects': { + 'options': ['live_objects', 'memory: number of live objects', 'objects', 'memstats', + 'expvar.memstats.live_objects', 'line'], + 'lines': [ + ['memstats_live_objects', 'live'] + ] + }, + 'memstats_sys': { + 'options': ['sys', 'memory: size of reserved virtual address space', 'kB', 'memstats', + 'expvar.memstats.sys', 'line'], + 'lines': [ + ['memstats_sys', 'sys', 'absolute', 1, 1024] + ] + }, + 'memstats_gc_pauses': { + 'options': ['gc_pauses', 'memory: average duration of GC pauses', 'ns', 'memstats', + 'expvar.memstats.gc_pauses', 'line'], + 'lines': [ + ['memstats_gc_pauses', 'avg'] + ] + } +} + +MEMSTATS_ORDER = ['memstats_heap', 'memstats_stack', 'memstats_mspan', 'memstats_mcache', + 'memstats_sys', 'memstats_live_objects', 'memstats_gc_pauses'] + + +def flatten(d, top='', sep='.'): + items = [] + for key, val in d.items(): + nkey = top + sep + key if top else key + if isinstance(val, dict): + items.extend(flatten(val, nkey, sep=sep).items()) + else: + items.append((nkey, val)) + return dict(items) + + +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + + # if memstats collection is enabled, add the charts and their order + if self.configuration.get('collect_memstats'): + self.definitions = dict(MEMSTATS_CHARTS) + self.order = list(MEMSTATS_ORDER) + else: + self.definitions = dict() + self.order = list() + + # if extra charts are defined, parse their config + extra_charts = self.configuration.get('extra_charts') + if extra_charts: + self._parse_extra_charts_config(extra_charts) + + def check(self): + """ + Check if the module can collect data: + 1) At least one JOB configuration has to be specified + 2) The JOB configuration needs to define the URL and either collect_memstats must be enabled or at least one + extra_chart must be defined. + + The configuration and URL check is provided by the UrlService class. + """ + + if not (self.configuration.get('extra_charts') or self.configuration.get('collect_memstats')): + self.error('Memstats collection is disabled and no extra_charts are defined, disabling module.') + return False + + return UrlService.check(self) + + def _parse_extra_charts_config(self, extra_charts_config): + + # a place to store the expvar keys and their types + self.expvars = dict() + + for chart in extra_charts_config: + + chart_dict = dict() + chart_id = chart.get('id') + chart_lines = chart.get('lines') + chart_opts = chart.get('options', dict()) + + if not all([chart_id, chart_lines]): + self.info('Chart {0} has no ID or no lines defined, skipping'.format(chart)) + continue + + chart_dict['options'] = [ + chart_opts.get('name', ''), + chart_opts.get('title', ''), + chart_opts.get('units', ''), + chart_opts.get('family', ''), + chart_opts.get('context', ''), + chart_opts.get('chart_type', 'line') + ] + chart_dict['lines'] = list() + + # add the lines to the chart + for line in chart_lines: + + ev_key = line.get('expvar_key') + ev_type = line.get('expvar_type') + line_id = line.get('id') + + if not all([ev_key, ev_type, line_id]): + self.info('Line missing expvar_key, expvar_type, or line_id, skipping: {0}'.format(line)) + continue + + if ev_type not in ['int', 'float']: + self.info('Unsupported expvar_type "{0}". Must be "int" or "float"'.format(ev_type)) + continue + + if ev_key in self.expvars: + self.info('Duplicate expvar key {0}: skipping line.'.format(ev_key)) + continue + + self.expvars[ev_key] = (ev_type, line_id) + + chart_dict['lines'].append( + [ + line.get('id', ''), + line.get('name', ''), + line.get('algorithm', ''), + line.get('multiplier', 1), + line.get('divisor', 100 if ev_type == 'float' else 1), + line.get('hidden', False) + ] + ) + + self.order.append(chart_id) + self.definitions[chart_id] = chart_dict + + def _get_data(self): + """ + Format data received from http request + :return: dict + """ + + raw_data = self._get_raw_data() + if not raw_data: + return None + + data = json.loads(raw_data) + + expvars = dict() + if self.configuration.get('collect_memstats'): + expvars.update(self._parse_memstats(data)) + + if self.configuration.get('extra_charts'): + # the memstats part of the data has been already parsed, so we remove it before flattening and checking + # the rest of the data, thus avoiding needless iterating over the multiply nested memstats dict. + del (data['memstats']) + flattened = flatten(data) + for k, v in flattened.items(): + ev = self.expvars.get(k) + if not ev: + # expvar is not defined in config, skip it + continue + try: + key_type, line_id = ev + if key_type == 'int': + expvars[line_id] = int(v) + elif key_type == 'float': + # if the value type is float, multiply it by 1000 and set line divisor to 1000 + expvars[line_id] = float(v) * 100 + except ValueError: + self.info('Failed to parse value for key {0} as {1}, ignoring key.'.format(k, key_type)) + del self.expvars[k] + + return expvars + + @staticmethod + def _parse_memstats(data): + + memstats = data['memstats'] + + # calculate the number of live objects in memory + live_objs = int(memstats['Mallocs']) - int(memstats['Frees']) + + # calculate GC pause times average + # the Go runtime keeps the last 256 GC pause durations in a circular buffer, + # so we need to filter out the 0 values before the buffer is filled + gc_pauses = memstats['PauseNs'] + try: + gc_pause_avg = sum(gc_pauses) / len([x for x in gc_pauses if x > 0]) + # no GC cycles have occured yet + except ZeroDivisionError: + gc_pause_avg = 0 + + return { + 'memstats_heap_alloc': memstats['HeapAlloc'], + 'memstats_heap_inuse': memstats['HeapInuse'], + 'memstats_stack_inuse': memstats['StackInuse'], + 'memstats_mspan_inuse': memstats['MSpanInuse'], + 'memstats_mcache_inuse': memstats['MCacheInuse'], + 'memstats_sys': memstats['Sys'], + 'memstats_live_objects': live_objs, + 'memstats_gc_pauses': gc_pause_avg, + } diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.conf b/collectors/python.d.plugin/go_expvar/go_expvar.conf new file mode 100644 index 000000000..ba8922d2e --- /dev/null +++ b/collectors/python.d.plugin/go_expvar/go_expvar.conf @@ -0,0 +1,110 @@ +# netdata python.d.plugin configuration for go_expvar +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, this plugin also supports the following: +# +# url: 'http://127.0.0.1/debug/vars' # the URL of the expvar endpoint +# +# As the plugin cannot possibly know the port your application listens on, there is no default value. Please include +# the whole path of the endpoint, as the expvar handler can be installed in a non-standard location. +# +# if the URL is password protected, the following are supported: +# +# user: 'username' +# pass: 'password' +# +# collect_memstats: true # enables charts for Go runtime's memory statistics +# extra_charts: {} # defines extra data/charts to monitor, please see the example below +# +# If collect_memstats is disabled and no extra charts are defined, this module will disable itself, as it has no data to +# collect. +# +# Please visit the module wiki page for more information on how to use the extra_charts variable: +# +# https://github.com/netdata/netdata/wiki/Monitoring-Go-Applications#monitoring-custom-vars-with-go_expvar +# +# Configuration example +# --------------------- + +#app1: +# name : 'app1' +# url : 'http://127.0.0.1:8080/debug/vars' +# collect_memstats: true +# extra_charts: +# - id: "runtime_goroutines" +# options: +# name: num_goroutines +# title: "runtime: number of goroutines" +# units: goroutines +# family: runtime +# context: expvar.runtime.goroutines +# chart_type: line +# lines: +# - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines} +# - id: "foo_counters" +# options: +# name: counters +# title: "some random counters" +# units: awesomeness +# family: counters +# context: expvar.foo.counters +# chart_type: line +# lines: +# - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1} +# - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2} + diff --git a/collectors/python.d.plugin/haproxy/Makefile.inc b/collectors/python.d.plugin/haproxy/Makefile.inc new file mode 100644 index 000000000..ad24deaa0 --- /dev/null +++ b/collectors/python.d.plugin/haproxy/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += haproxy/haproxy.chart.py +dist_pythonconfig_DATA += haproxy/haproxy.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += haproxy/README.md haproxy/Makefile.inc + diff --git a/collectors/python.d.plugin/haproxy/README.md b/collectors/python.d.plugin/haproxy/README.md new file mode 100644 index 000000000..4bff25670 --- /dev/null +++ b/collectors/python.d.plugin/haproxy/README.md @@ -0,0 +1,49 @@ +# haproxy + +Module monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current. +And health metrics such as backend servers status (server check should be used). + +Plugin can obtain data from url **OR** unix socket. + +**Requirement:** +Socket MUST be readable AND writable by netdata user. + +It produces: + +1. **Frontend** family charts + * Kilobytes in/s + * Kilobytes out/s + * Sessions current + * Sessions in queue current + +2. **Backend** family charts + * Kilobytes in/s + * Kilobytes out/s + * Sessions current + * Sessions in queue current + +3. **Health** chart + * number of failed servers for every backend (in DOWN state) + + +### configuration + +Sample: + +```yaml +via_url: + user : 'username' # ONLY IF stats auth is used + pass : 'password' # # ONLY IF stats auth is used + url : 'http://ip.address:port/url;csv;norefresh' +``` + +OR + +```yaml +via_socket: + socket : 'path/to/haproxy/sock' +``` + +If no configuration is given, module will fail to run. + +--- diff --git a/collectors/python.d.plugin/haproxy/haproxy.chart.py b/collectors/python.d.plugin/haproxy/haproxy.chart.py new file mode 100644 index 000000000..a46689f50 --- /dev/null +++ b/collectors/python.d.plugin/haproxy/haproxy.chart.py @@ -0,0 +1,370 @@ +# -*- coding: utf-8 -*- +# Description: haproxy netdata python.d module +# Author: l2isbad, ktarasz +# SPDX-License-Identifier: GPL-3.0-or-later + +from collections import defaultdict +from re import compile as re_compile + +try: + from urlparse import urlparse +except ImportError: + from urllib.parse import urlparse + +from bases.FrameworkServices.SocketService import SocketService +from bases.FrameworkServices.UrlService import UrlService + + +# default module values (can be overridden per job in `config`) +# update_every = 2 +priority = 60000 +retries = 60 + +# charts order (can be overridden if you want less charts, or different order) +ORDER = [ + 'fbin', + 'fbout', + 'fscur', + 'fqcur', + 'fhrsp_1xx', + 'fhrsp_2xx', + 'fhrsp_3xx', + 'fhrsp_4xx', + 'fhrsp_5xx', + 'fhrsp_other', + 'fhrsp_total', + 'bbin', + 'bbout', + 'bscur', + 'bqcur', + 'bhrsp_1xx', + 'bhrsp_2xx', + 'bhrsp_3xx', + 'bhrsp_4xx', + 'bhrsp_5xx', + 'bhrsp_other', + 'bhrsp_total', + 'bqtime', + 'bttime', + 'brtime', + 'bctime', + 'health_sup', + 'health_sdown', + 'health_bdown', + 'health_idle' +] + +CHARTS = { + 'fbin': { + 'options': [None, 'Kilobytes In', 'KB/s', 'frontend', 'haproxy_f.bin', 'line'], + 'lines': [] + }, + 'fbout': { + 'options': [None, 'Kilobytes Out', 'KB/s', 'frontend', 'haproxy_f.bout', 'line'], + 'lines': [] + }, + 'fscur': { + 'options': [None, 'Sessions Active', 'sessions', 'frontend', 'haproxy_f.scur', 'line'], + 'lines': [] + }, + 'fqcur': { + 'options': [None, 'Session In Queue', 'sessions', 'frontend', 'haproxy_f.qcur', 'line'], + 'lines': [] + }, + 'fhrsp_1xx': { + 'options': [None, 'HTTP responses with 1xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_1xx', 'line'], + 'lines': [] + }, + 'fhrsp_2xx': { + 'options': [None, 'HTTP responses with 2xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_2xx', 'line'], + 'lines': [] + }, + 'fhrsp_3xx': { + 'options': [None, 'HTTP responses with 3xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_3xx', 'line'], + 'lines': [] + }, + 'fhrsp_4xx': { + 'options': [None, 'HTTP responses with 4xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_4xx', 'line'], + 'lines': [] + }, + 'fhrsp_5xx': { + 'options': [None, 'HTTP responses with 5xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_5xx', 'line'], + 'lines': [] + }, + 'fhrsp_other': { + 'options': [None, 'HTTP responses with other codes (protocol error)', 'responses/s', 'frontend', + 'haproxy_f.hrsp_other', 'line'], + 'lines': [] + }, + 'fhrsp_total': { + 'options': [None, 'HTTP responses', 'responses', 'frontend', 'haproxy_f.hrsp_total', 'line'], + 'lines': [] + }, + 'bbin': { + 'options': [None, 'Kilobytes In', 'KB/s', 'backend', 'haproxy_b.bin', 'line'], + 'lines': [] + }, + 'bbout': { + 'options': [None, 'Kilobytes Out', 'KB/s', 'backend', 'haproxy_b.bout', 'line'], + 'lines': [] + }, + 'bscur': { + 'options': [None, 'Sessions Active', 'sessions', 'backend', 'haproxy_b.scur', 'line'], + 'lines': [] + }, + 'bqcur': { + 'options': [None, 'Sessions In Queue', 'sessions', 'backend', 'haproxy_b.qcur', 'line'], + 'lines': [] + }, + 'bhrsp_1xx': { + 'options': [None, 'HTTP responses with 1xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_1xx', 'line'], + 'lines': [] + }, + 'bhrsp_2xx': { + 'options': [None, 'HTTP responses with 2xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_2xx', 'line'], + 'lines': [] + }, + 'bhrsp_3xx': { + 'options': [None, 'HTTP responses with 3xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_3xx', 'line'], + 'lines': [] + }, + 'bhrsp_4xx': { + 'options': [None, 'HTTP responses with 4xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_4xx', 'line'], + 'lines': [] + }, + 'bhrsp_5xx': { + 'options': [None, 'HTTP responses with 5xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_5xx', 'line'], + 'lines': [] + }, + 'bhrsp_other': { + 'options': [None, 'HTTP responses with other codes (protocol error)', 'responses/s', 'backend', + 'haproxy_b.hrsp_other', 'line'], + 'lines': [] + }, + 'bhrsp_total': { + 'options': [None, 'HTTP responses (total)', 'responses/s', 'backend', 'haproxy_b.hrsp_total', 'line'], + 'lines': [] + }, + 'bqtime': { + 'options': [None, 'The average queue time over the 1024 last requests', 'ms', 'backend', + 'haproxy_b.qtime', 'line'], + 'lines': [] + }, + 'bctime': { + 'options': [None, 'The average connect time over the 1024 last requests', 'ms', 'backend', + 'haproxy_b.ctime', 'line'], + 'lines': [] + }, + 'brtime': { + 'options': [None, 'The average response time over the 1024 last requests', 'ms', 'backend', + 'haproxy_b.rtime', 'line'], + 'lines': [] + }, + 'bttime': { + 'options': [None, 'The average total session time over the 1024 last requests', 'ms', 'backend', + 'haproxy_b.ttime', 'line'], + 'lines': [] + }, + 'health_sdown': { + 'options': [None, 'Backend Servers In DOWN State', 'failed servers', 'health', + 'haproxy_hs.down', 'line'], + 'lines': [] + }, + 'health_sup': { + 'options': [None, 'Backend Servers In UP State', 'health servers', 'health', + 'haproxy_hs.up', 'line'], + 'lines': [] + }, + 'health_bdown': { + 'options': [None, 'Is Backend Alive? 1 = DOWN', 'failed backend', 'health', 'haproxy_hb.down', 'line'], + 'lines': [] + }, + 'health_idle': { + 'options': [None, 'The Ratio Of Polling Time Vs Total Time', 'percent', 'health', 'haproxy.idle', 'line'], + 'lines': [ + ['idle', None, 'absolute'] + ] + } +} + + +METRICS = { + 'bin': {'algorithm': 'incremental', 'divisor': 1024}, + 'bout': {'algorithm': 'incremental', 'divisor': 1024}, + 'scur': {'algorithm': 'absolute', 'divisor': 1}, + 'qcur': {'algorithm': 'absolute', 'divisor': 1}, + 'hrsp_1xx': {'algorithm': 'incremental', 'divisor': 1}, + 'hrsp_2xx': {'algorithm': 'incremental', 'divisor': 1}, + 'hrsp_3xx': {'algorithm': 'incremental', 'divisor': 1}, + 'hrsp_4xx': {'algorithm': 'incremental', 'divisor': 1}, + 'hrsp_5xx': {'algorithm': 'incremental', 'divisor': 1}, + 'hrsp_other': {'algorithm': 'incremental', 'divisor': 1} +} + + +BACKEND_METRICS = { + 'qtime': {'algorithm': 'absolute', 'divisor': 1}, + 'ctime': {'algorithm': 'absolute', 'divisor': 1}, + 'rtime': {'algorithm': 'absolute', 'divisor': 1}, + 'ttime': {'algorithm': 'absolute', 'divisor': 1} +} + + +REGEX = dict(url=re_compile(r'idle = (?P[0-9]+)'), + socket=re_compile(r'Idle_pct: (?P[0-9]+)')) + + +class Service(UrlService, SocketService): + def __init__(self, configuration=None, name=None): + if 'socket' in configuration: + SocketService.__init__(self, configuration=configuration, name=name) + self.poll = SocketService + self.options_ = dict(regex=REGEX['socket'], + stat='show stat\n'.encode(), + info='show info\n'.encode()) + else: + UrlService.__init__(self, configuration=configuration, name=name) + self.poll = UrlService + self.options_ = dict(regex=REGEX['url'], + stat=self.url, + info=url_remove_params(self.url)) + self.order = ORDER + self.definitions = CHARTS + + def check(self): + if self.poll.check(self): + self.create_charts() + self.info('We are using %s.' % self.poll.__name__) + return True + return False + + def _get_data(self): + to_netdata = dict() + self.request, self.url = self.options_['stat'], self.options_['stat'] + stat_data = self._get_stat_data() + self.request, self.url = self.options_['info'], self.options_['info'] + info_data = self._get_info_data(regex=self.options_['regex']) + + to_netdata.update(stat_data) + to_netdata.update(info_data) + return to_netdata or None + + def _get_stat_data(self): + """ + :return: dict + """ + raw_data = self.poll._get_raw_data(self) + + if not raw_data: + return dict() + + raw_data = raw_data.splitlines() + self.data = parse_data_([dict(zip(raw_data[0].split(','), raw_data[_].split(','))) + for _ in range(1, len(raw_data))]) + if not self.data: + return dict() + + stat_data = dict() + + for frontend in self.data['frontend']: + for metric in METRICS: + idx = frontend['# pxname'].replace('.', '_') + stat_data['_'.join(['frontend', metric, idx])] = frontend.get(metric) or 0 + + for backend in self.data['backend']: + name, idx = backend['# pxname'], backend['# pxname'].replace('.', '_') + stat_data['hsup_' + idx] = len([server for server in self.data['servers'] + if server_status(server, name, 'UP')]) + stat_data['hsdown_' + idx] = len([server for server in self.data['servers'] + if server_status(server, name, 'DOWN')]) + stat_data['hbdown_' + idx] = 1 if backend.get('status') == 'DOWN' else 0 + for metric in BACKEND_METRICS: + stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0 + hrsp_total = 0 + for metric in METRICS: + stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0 + if metric.startswith('hrsp_'): + hrsp_total += int(backend.get(metric) or 0) + stat_data['_'.join(['backend', 'hrsp_total', idx])] = hrsp_total + return stat_data + + def _get_info_data(self, regex): + """ + :return: dict + """ + raw_data = self.poll._get_raw_data(self) + if not raw_data: + return dict() + + match = regex.search(raw_data) + return match.groupdict() if match else dict() + + @staticmethod + def _check_raw_data(data): + """ + Check if all data has been gathered from socket + :param data: str + :return: boolean + """ + return not bool(data) + + def create_charts(self): + for front in self.data['frontend']: + name, idx = front['# pxname'], front['# pxname'].replace('.', '_') + for metric in METRICS: + self.definitions['f' + metric]['lines'].append(['_'.join(['frontend', metric, idx]), + name, METRICS[metric]['algorithm'], 1, + METRICS[metric]['divisor']]) + self.definitions['fhrsp_total']['lines'].append(['_'.join(['frontend', 'hrsp_total', idx]), + name, 'incremental', 1, 1]) + for back in self.data['backend']: + name, idx = back['# pxname'], back['# pxname'].replace('.', '_') + for metric in METRICS: + self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]), + name, METRICS[metric]['algorithm'], 1, + METRICS[metric]['divisor']]) + self.definitions['bhrsp_total']['lines'].append(['_'.join(['backend', 'hrsp_total', idx]), + name, 'incremental', 1, 1]) + for metric in BACKEND_METRICS: + self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]), + name, BACKEND_METRICS[metric]['algorithm'], 1, + BACKEND_METRICS[metric]['divisor']]) + self.definitions['health_sup']['lines'].append(['hsup_' + idx, name, 'absolute']) + self.definitions['health_sdown']['lines'].append(['hsdown_' + idx, name, 'absolute']) + self.definitions['health_bdown']['lines'].append(['hbdown_' + idx, name, 'absolute']) + + +def parse_data_(data): + def is_backend(backend): + return backend.get('svname') == 'BACKEND' and backend.get('# pxname') != 'stats' + + def is_frontend(frontend): + return frontend.get('svname') == 'FRONTEND' and frontend.get('# pxname') != 'stats' + + def is_server(server): + return not server.get('svname', '').startswith(('FRONTEND', 'BACKEND')) + + if not data: + return None + + result = defaultdict(list) + for elem in data: + if is_backend(elem): + result['backend'].append(elem) + continue + elif is_frontend(elem): + result['frontend'].append(elem) + continue + elif is_server(elem): + result['servers'].append(elem) + + return result or None + + +def server_status(server, backend_name, status='DOWN'): + return server.get('# pxname') == backend_name and server.get('status') == status + + +def url_remove_params(url): + parsed = urlparse(url or str()) + return '{scheme}://{netloc}{path}'.format(scheme=parsed.scheme, netloc=parsed.netloc, path=parsed.path) diff --git a/collectors/python.d.plugin/haproxy/haproxy.conf b/collectors/python.d.plugin/haproxy/haproxy.conf new file mode 100644 index 000000000..a40dd76a5 --- /dev/null +++ b/collectors/python.d.plugin/haproxy/haproxy.conf @@ -0,0 +1,85 @@ +# netdata python.d.plugin configuration for haproxy +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, haproxy also supports the following: +# +# IMPORTANT: socket MUST BE readable AND writable by netdata user +# +# socket: 'path/to/haproxy/sock' +# +# OR +# url: 'http://:/;csv;norefresh' +# [user: USERNAME] only if stats auth is used +# [pass: PASSWORD] only if stats auth is used + +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +#via_url: +# user : 'admin' +# pass : 'password' +# url : 'http://127.0.0.1:7000/haproxy_stats;csv;norefresh' + +#via_socket: +# socket: '/var/run/haproxy/admin.sock' diff --git a/collectors/python.d.plugin/hddtemp/Makefile.inc b/collectors/python.d.plugin/hddtemp/Makefile.inc new file mode 100644 index 000000000..22852b646 --- /dev/null +++ b/collectors/python.d.plugin/hddtemp/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += hddtemp/hddtemp.chart.py +dist_pythonconfig_DATA += hddtemp/hddtemp.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += hddtemp/README.md hddtemp/Makefile.inc + diff --git a/collectors/python.d.plugin/hddtemp/README.md b/collectors/python.d.plugin/hddtemp/README.md new file mode 100644 index 000000000..1236186a5 --- /dev/null +++ b/collectors/python.d.plugin/hddtemp/README.md @@ -0,0 +1,22 @@ +# hddtemp + +Module monitors disk temperatures from one or more hddtemp daemons. + +**Requirement:** +Running `hddtemp` in daemonized mode with access on tcp port + +It produces one chart **Temperature** with dynamic number of dimensions (one per disk) + +### configuration + +Sample: + +```yaml +update_every: 3 +host: "127.0.0.1" +port: 7634 +``` + +If no configuration is given, module will attempt to connect to hddtemp daemon on `127.0.0.1:7634` address + +--- diff --git a/collectors/python.d.plugin/hddtemp/hddtemp.chart.py b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py new file mode 100644 index 000000000..dea701171 --- /dev/null +++ b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +# Description: hddtemp netdata python.d module +# Author: Pawel Krupa (paulfantom) +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + + +import re + +from copy import deepcopy + +from bases.FrameworkServices.SocketService import SocketService + + +ORDER = ['temperatures'] + +CHARTS = { + 'temperatures': { + 'options': ['disks_temp', 'Disks Temperatures', 'Celsius', 'temperatures', 'hddtemp.temperatures', 'line'], + 'lines': [ + # lines are created dynamically in `check()` method + ]}} + +RE = re.compile(r'\/dev\/([^|]+)\|([^|]+)\|([0-9]+|SLP|UNK)\|') + + +class Disk: + def __init__(self, id_, name, temp): + self.id = id_.split('/')[-1] + self.name = name.replace(' ', '_') + self.temp = temp if temp.isdigit() else 0 + + def __repr__(self): + return self.id + + +class Service(SocketService): + def __init__(self, configuration=None, name=None): + SocketService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = deepcopy(CHARTS) + self._keep_alive = False + self.request = "" + self.host = "127.0.0.1" + self.port = 7634 + self.do_only = self.configuration.get('devices') + + def get_disks(self): + r = self._get_raw_data() + + if not r: + return None + + m = RE.findall(r) + + if not m: + self.error("received data doesn't have needed records") + return None + + rv = [Disk(*d) for d in m] + self.debug('available disks: {0}'.format(rv)) + + if self.do_only: + return [v for v in rv if v.id in self.do_only] + return rv + + def get_data(self): + """ + Get data from TCP/IP socket + :return: dict + """ + + disks = self.get_disks() + + if not disks: + return None + + return dict((d.id, d.temp) for d in disks) + + def check(self): + """ + Parse configuration, check if hddtemp is available, and dynamically create chart lines data + :return: boolean + """ + self._parse_config() + disks = self.get_disks() + + if not disks: + return False + + for d in disks: + n = d.id if d.id.startswith('sd') else d.name + dim = [d.id, n] + self.definitions['temperatures']['lines'].append(dim) + + return True + + @staticmethod + def _check_raw_data(data): + return not bool(data) diff --git a/collectors/python.d.plugin/hddtemp/hddtemp.conf b/collectors/python.d.plugin/hddtemp/hddtemp.conf new file mode 100644 index 000000000..9165798a2 --- /dev/null +++ b/collectors/python.d.plugin/hddtemp/hddtemp.conf @@ -0,0 +1,97 @@ +# netdata python.d.plugin configuration for hddtemp +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, hddtemp also supports the following: +# +# host: 'IP or HOSTNAME' # the host to connect to +# port: PORT # the port to connect to +# + +# By default this module will try to autodetect disks +# (autodetection works only for disk which names start with "sd"). +# However this can be overridden by setting variable `disks` to +# array of desired disks. Example for two disks: +# +# devices: +# - sda +# - sdb +# + +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +localhost: + name: 'local' + host: 'localhost' + port: 7634 + +localipv4: + name: 'local' + host: '127.0.0.1' + port: 7634 + +localipv6: + name: 'local' + host: '::1' + port: 7634 diff --git a/collectors/python.d.plugin/httpcheck/Makefile.inc b/collectors/python.d.plugin/httpcheck/Makefile.inc new file mode 100644 index 000000000..4a5bd856d --- /dev/null +++ b/collectors/python.d.plugin/httpcheck/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += httpcheck/httpcheck.chart.py +dist_pythonconfig_DATA += httpcheck/httpcheck.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += httpcheck/README.md httpcheck/Makefile.inc + diff --git a/collectors/python.d.plugin/httpcheck/README.md b/collectors/python.d.plugin/httpcheck/README.md new file mode 100644 index 000000000..759107663 --- /dev/null +++ b/collectors/python.d.plugin/httpcheck/README.md @@ -0,0 +1,41 @@ +# httpcheck + +Module monitors remote http server for availability and response time. + +Following charts are drawn per job: + +1. **Response time** ms + * Time in 0.1 ms resolution in which the server responds. + If the connection failed, the value is missing. + +2. **Status** boolean + * Connection successful + * Unexpected content: No Regex match found in the response + * Unexpected status code: Do we get 500 errors? + * Connection failed: port not listening or blocked + * Connection timed out: host or port unreachable + +### configuration + +Sample configuration and their default values. + +```yaml +server: + url: 'http://host:port/path' # required + status_accepted: # optional + - 200 + timeout: 1 # optional, supports decimals (e.g. 0.2) + update_every: 3 # optional + regex: 'REGULAR_EXPRESSION' # optional, see https://docs.python.org/3/howto/regex.html + redirect: yes # optional +``` + +### notes + + * The status chart is primarily intended for alarms, badges or for access via API. + * A system/service/firewall might block netdata's access if a portscan or + similar is detected. + * This plugin is meant for simple use cases. Currently, the accuracy of the + response time is low and should be used as reference only. + +--- diff --git a/collectors/python.d.plugin/httpcheck/httpcheck.chart.py b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py new file mode 100644 index 000000000..f046f33c0 --- /dev/null +++ b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- +# Description: http check netdata python.d module +# Original Author: ccremer (github.com/ccremer) +# SPDX-License-Identifier: GPL-3.0-or-later + +import urllib3 +import re + +try: + from time import monotonic as time +except ImportError: + from time import time + +from bases.FrameworkServices.UrlService import UrlService + +# default module values (can be overridden per job in `config`) +update_every = 3 +priority = 60000 +retries = 60 + +# Response +HTTP_RESPONSE_TIME = 'time' +HTTP_RESPONSE_LENGTH = 'length' + +# Status dimensions +HTTP_SUCCESS = 'success' +HTTP_BAD_CONTENT = 'bad_content' +HTTP_BAD_STATUS = 'bad_status' +HTTP_TIMEOUT = 'timeout' +HTTP_NO_CONNECTION = 'no_connection' + +ORDER = ['response_time', 'response_length', 'status'] + +CHARTS = { + 'response_time': { + 'options': [None, 'HTTP response time', 'ms', 'response', 'httpcheck.responsetime', 'line'], + 'lines': [ + [HTTP_RESPONSE_TIME, 'time', 'absolute', 100, 1000] + ] + }, + 'response_length': { + 'options': [None, 'HTTP response body length', 'characters', 'response', 'httpcheck.responselength', 'line'], + 'lines': [ + [HTTP_RESPONSE_LENGTH, 'length', 'absolute'] + ] + }, + 'status': { + 'options': [None, 'HTTP status', 'boolean', 'status', 'httpcheck.status', 'line'], + 'lines': [ + [HTTP_SUCCESS, 'success', 'absolute'], + [HTTP_BAD_CONTENT, 'bad content', 'absolute'], + [HTTP_BAD_STATUS, 'bad status', 'absolute'], + [HTTP_TIMEOUT, 'timeout', 'absolute'], + [HTTP_NO_CONNECTION, 'no connection', 'absolute'] + ] + } +} + + +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + pattern = self.configuration.get('regex') + self.regex = re.compile(pattern) if pattern else None + self.status_codes_accepted = self.configuration.get('status_accepted', [200]) + self.follow_redirect = self.configuration.get('redirect', True) + self.order = ORDER + self.definitions = CHARTS + + def _get_data(self): + """ + Format data received from http request + :return: dict + """ + data = dict() + data[HTTP_SUCCESS] = 0 + data[HTTP_BAD_CONTENT] = 0 + data[HTTP_BAD_STATUS] = 0 + data[HTTP_TIMEOUT] = 0 + data[HTTP_NO_CONNECTION] = 0 + url = self.url + try: + start = time() + status, content = self._get_raw_data_with_status(retries=1 if self.follow_redirect else False, + redirect=self.follow_redirect) + diff = time() - start + data[HTTP_RESPONSE_TIME] = max(round(diff * 10000), 0) + self.debug('Url: {url}. Host responded with status code {code} in {diff} s'.format( + url=url, code=status, diff=diff + )) + self.process_response(content, data, status) + + except urllib3.exceptions.NewConnectionError as error: + self.debug('Connection failed: {url}. Error: {error}'.format(url=url, error=error)) + data[HTTP_NO_CONNECTION] = 1 + + except (urllib3.exceptions.TimeoutError, urllib3.exceptions.PoolError) as error: + self.debug('Connection timed out: {url}. Error: {error}'.format(url=url, error=error)) + data[HTTP_TIMEOUT] = 1 + + except urllib3.exceptions.HTTPError as error: + self.debug('Connection failed: {url}. Error: {error}'.format(url=url, error=error)) + data[HTTP_NO_CONNECTION] = 1 + + except (TypeError, AttributeError) as error: + self.error('Url: {url}. Error: {error}'.format(url=url, error=error)) + return None + + return data + + def process_response(self, content, data, status): + data[HTTP_RESPONSE_LENGTH] = len(content) + self.debug('Content: \n\n{content}\n'.format(content=content)) + if status in self.status_codes_accepted: + if self.regex and self.regex.search(content) is None: + self.debug('No match for regex "{regex}" found'.format(regex=self.regex.pattern)) + data[HTTP_BAD_CONTENT] = 1 + else: + data[HTTP_SUCCESS] = 1 + else: + data[HTTP_BAD_STATUS] = 1 diff --git a/collectors/python.d.plugin/httpcheck/httpcheck.conf b/collectors/python.d.plugin/httpcheck/httpcheck.conf new file mode 100644 index 000000000..bd21b5af8 --- /dev/null +++ b/collectors/python.d.plugin/httpcheck/httpcheck.conf @@ -0,0 +1,100 @@ +# netdata python.d.plugin configuration for httpcheck +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the httpcheck default is used, which is at 3 seconds. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# chart_cleanup sets the default chart cleanup interval in iterations. +# A chart is marked as obsolete if it has not been updated +# 'chart_cleanup' iterations in a row. +# They will be hidden immediately (not offered to dashboard viewer, +# streamed upstream and archived to backends) and deleted one hour +# later (configurable from netdata.conf). +# -- For this plugin, cleanup MUST be disabled, otherwise we lose response +# time charts +chart_cleanup: 0 + +# Autodetection and retries do not work for this plugin + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# ------------------------------- +# ATTENTION: Any valid configuration will be accepted, even if initial connection fails! +# ------------------------------- +# +# There is intentionally no default config, e.g. for 'localhost' + +# job_name: +# name: myname # [optional] the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 3 # [optional] the JOB's data collection frequency +# priority: 60000 # [optional] the JOB's order on the dashboard +# retries: 60 # [optional] the JOB's number of restoration attempts +# timeout: 1 # [optional] the timeout when connecting, supports decimals (e.g. 0.5s) +# url: 'http[s]://host-ip-or-dns[:port][path]' +# # [required] the remote host url to connect to. If [:port] is missing, it defaults to 80 +# # for HTTP and 443 for HTTPS. [path] is optional too, defaults to / +# method: GET # [optional] the HTTP request method (POST, PUT, DELETE, HEAD etc.) +# redirect: yes # [optional] If the remote host returns 3xx status codes, the redirection url will be +# # followed (default). +# status_accepted: # [optional] By default, 200 is accepted. Anything else will result in 'bad status' in the +# # status chart, however: The response time will still be > 0, since the +# # host responded with something. +# # If redirect is enabled, the accepted status will be checked against the redirected page. +# - 200 # Multiple status codes are possible. If you specify 'status_accepted', you would still +# # need to add '200'. E.g. 'status_accepted: [301]' will trigger an error in 'bad status' +# # if code is 200. Do specify numerical entries such as 200, not 'OK'. +# regex: None # [optional] If the status code is accepted, the content of the response will be searched for this +# # regex (if defined). Be aware that you may need to escape the regex string. If redirect is enabled, +# # the regex will be matched to the redirected page, not the initial 3xx response. + +# Simple example: +# +# jira: +# url: 'https://jira.localdomain/' + + +# Complex example: +# +# cool_website: +# url: 'http://cool.website:8080/home' +# status_accepted: +# - 200 +# - 204 +# regex: My cool website!<\/title> +# timeout: 2 + +# This plugin is intended for simple cases. Currently, the accuracy of the response time is low and should be used as reference only. + diff --git a/collectors/python.d.plugin/icecast/Makefile.inc b/collectors/python.d.plugin/icecast/Makefile.inc new file mode 100644 index 000000000..cb7c6fa0e --- /dev/null +++ b/collectors/python.d.plugin/icecast/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += icecast/icecast.chart.py +dist_pythonconfig_DATA += icecast/icecast.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += icecast/README.md icecast/Makefile.inc + diff --git a/collectors/python.d.plugin/icecast/README.md b/collectors/python.d.plugin/icecast/README.md new file mode 100644 index 000000000..a28a6c398 --- /dev/null +++ b/collectors/python.d.plugin/icecast/README.md @@ -0,0 +1,26 @@ +# icecast + +This module will monitor number of listeners for active sources. + +**Requirements:** + * icecast version >= 2.4.0 + +It produces the following charts: + +1. **Listeners** in listeners + * source number + +### configuration + +Needs only `url` to server's `/status-json.xsl` + +Here is an example for remote server: + +```yaml +remote: + url : 'http://1.2.3.4:8443/status-json.xsl' +``` + +Without configuration, module attempts to connect to `http://localhost:8443/status-json.xsl` + +--- diff --git a/collectors/python.d.plugin/icecast/icecast.chart.py b/collectors/python.d.plugin/icecast/icecast.chart.py new file mode 100644 index 000000000..d8813f9ba --- /dev/null +++ b/collectors/python.d.plugin/icecast/icecast.chart.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- +# Description: icecast netdata python.d module +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +import json + +from bases.FrameworkServices.UrlService import UrlService + + +priority = 60000 +retries = 60 + +# charts order (can be overridden if you want less charts, or different order) +ORDER = ['listeners'] + +CHARTS = { + 'listeners': { + 'options': [None, 'Number Of Listeners', 'listeners', 'listeners', 'icecast.listeners', 'line'], + 'lines': [ + ] + } +} + + +class Source: + def __init__(self, idx, data): + self.name = 'source_{0}'.format(idx) + self.is_active = data.get('stream_start') and data.get('server_name') + self.listeners = data['listeners'] + + +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + self.url = self.configuration.get('url') + self._manager = self._build_manager() + + def check(self): + """ + Add active sources to the "listeners" chart + :return: bool + """ + sources = self.get_sources() + if not sources: + return None + + active_sources = 0 + for idx, raw_source in enumerate(sources): + if Source(idx, raw_source).is_active: + active_sources += 1 + dim_id = 'source_{0}'.format(idx) + dim = 'source {0}'.format(idx) + self.definitions['listeners']['lines'].append([dim_id, dim]) + + return bool(active_sources) + + def _get_data(self): + """ + Get number of listeners for every source + :return: dict + """ + sources = self.get_sources() + if not sources: + return None + + data = dict() + + for idx, raw_source in enumerate(sources): + source = Source(idx, raw_source) + data[source.name] = source.listeners + + return data + + def get_sources(self): + """ + Format data received from http request and return list of sources + :return: list + """ + + raw_data = self._get_raw_data() + if not raw_data: + return None + + try: + data = json.loads(raw_data) + except ValueError as error: + self.error('JSON decode error:', error) + return None + + sources = data['icestats'].get('source') + if not sources: + return None + + return sources if isinstance(sources, list) else [sources] diff --git a/collectors/python.d.plugin/icecast/icecast.conf b/collectors/python.d.plugin/icecast/icecast.conf new file mode 100644 index 000000000..a900d06d3 --- /dev/null +++ b/collectors/python.d.plugin/icecast/icecast.conf @@ -0,0 +1,83 @@ +# netdata python.d.plugin configuration for icecast +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, icecast also supports the following: +# +# url: 'URL' # the URL to fetch icecast's stats +# +# if the URL is password protected, the following are supported: +# +# user: 'username' +# pass: 'password' + +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +localhost: + name : 'local' + url : 'http://localhost:8443/status-json.xsl' + +localipv4: + name : 'local' + url : 'http://127.0.0.1:8443/status-json.xsl' \ No newline at end of file diff --git a/collectors/python.d.plugin/ipfs/Makefile.inc b/collectors/python.d.plugin/ipfs/Makefile.inc new file mode 100644 index 000000000..68458cb38 --- /dev/null +++ b/collectors/python.d.plugin/ipfs/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += ipfs/ipfs.chart.py +dist_pythonconfig_DATA += ipfs/ipfs.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += ipfs/README.md ipfs/Makefile.inc + diff --git a/collectors/python.d.plugin/ipfs/README.md b/collectors/python.d.plugin/ipfs/README.md new file mode 100644 index 000000000..a30649a5f --- /dev/null +++ b/collectors/python.d.plugin/ipfs/README.md @@ -0,0 +1,25 @@ +# ipfs + +Module monitors [IPFS](https://ipfs.io) basic information. + +1. **Bandwidth** in kbits/s + * in + * out + +2. **Peers** + * peers + +### configuration + +Only url to IPFS server is needed. + +Sample: + +```yaml +localhost: + name : 'local' + url : 'http://localhost:5001' +``` + +--- + diff --git a/collectors/python.d.plugin/ipfs/ipfs.chart.py b/collectors/python.d.plugin/ipfs/ipfs.chart.py new file mode 100644 index 000000000..3f6794e48 --- /dev/null +++ b/collectors/python.d.plugin/ipfs/ipfs.chart.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Description: IPFS netdata python.d module +# Authors: davidak +# SPDX-License-Identifier: GPL-3.0-or-later + +import json + +from bases.FrameworkServices.UrlService import UrlService + +# default module values (can be overridden per job in `config`) +# update_every = 2 +priority = 60000 +retries = 60 + +# default job configuration (overridden by python.d.plugin) +# config = {'local': { +# 'update_every': update_every, +# 'retries': retries, +# 'priority': priority, +# 'url': 'http://localhost:5001' +# }} + +# charts order (can be overridden if you want less charts, or different order) +ORDER = ['bandwidth', 'peers', 'repo_size', 'repo_objects'] + +CHARTS = { + 'bandwidth': { + 'options': [None, 'IPFS Bandwidth', 'kbits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'], + 'lines': [ + ['in', None, 'absolute', 8, 1000], + ['out', None, 'absolute', -8, 1000] + ] + }, + 'peers': { + 'options': [None, 'IPFS Peers', 'peers', 'Peers', 'ipfs.peers', 'line'], + 'lines': [ + ['peers', None, 'absolute'] + ] + }, + 'repo_size': { + 'options': [None, 'IPFS Repo Size', 'GB', 'Size', 'ipfs.repo_size', 'area'], + 'lines': [ + ['avail', None, 'absolute', 1, 1e9], + ['size', None, 'absolute', 1, 1e9], + ] + }, + 'repo_objects': { + 'options': [None, 'IPFS Repo Objects', 'objects', 'Objects', 'ipfs.repo_objects', 'line'], + 'lines': [ + ['objects', None, 'absolute', 1, 1], + ['pinned', None, 'absolute', 1, 1], + ['recursive_pins', None, 'absolute', 1, 1] + ] + } +} + +SI_zeroes = { + 'k': 3, + 'm': 6, + 'g': 9, + 't': 12, + 'p': 15, + 'e': 18, + 'z': 21, + 'y': 24 +} + + +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + self.baseurl = self.configuration.get('url', 'http://localhost:5001') + self.order = ORDER + self.definitions = CHARTS + self.__storage_max = None + self.do_pinapi = self.configuration.get('pinapi') + + def _get_json(self, sub_url): + """ + :return: json decoding of the specified url + """ + self.url = self.baseurl + sub_url + try: + return json.loads(self._get_raw_data()) + except (TypeError, ValueError): + return dict() + + @staticmethod + def _recursive_pins(keys): + return sum(1 for k in keys if keys[k]['Type'] == b'recursive') + + @staticmethod + def _dehumanize(store_max): + # convert from '10Gb' to 10000000000 + if not isinstance(store_max, int): + store_max = store_max.lower() + if store_max.endswith('b'): + val, units = store_max[:-2], store_max[-2] + if units in SI_zeroes: + val += '0'*SI_zeroes[units] + store_max = val + try: + store_max = int(store_max) + except (TypeError, ValueError): + store_max = None + return store_max + + def _storagemax(self, store_cfg): + if self.__storage_max is None: + self.__storage_max = self._dehumanize(store_cfg) + return self.__storage_max + + def _get_data(self): + """ + Get data from API + :return: dict + """ + # suburl : List of (result-key, original-key, transform-func) + cfg = { + '/api/v0/stats/bw': + [('in', 'RateIn', int), ('out', 'RateOut', int)], + '/api/v0/swarm/peers': + [('peers', 'Peers', len)], + '/api/v0/stats/repo': + [('size', 'RepoSize', int), ('objects', 'NumObjects', int), ('avail', 'StorageMax', self._storagemax)], + } + if self.do_pinapi: + cfg.update({ + '/api/v0/pin/ls': + [('pinned', 'Keys', len), ('recursive_pins', 'Keys', self._recursive_pins)] + }) + r = dict() + for suburl in cfg: + in_json = self._get_json(suburl) + for new_key, orig_key, xmute in cfg[suburl]: + try: + r[new_key] = xmute(in_json[orig_key]) + except Exception: + continue + return r or None diff --git a/collectors/python.d.plugin/ipfs/ipfs.conf b/collectors/python.d.plugin/ipfs/ipfs.conf new file mode 100644 index 000000000..e3df0f6bb --- /dev/null +++ b/collectors/python.d.plugin/ipfs/ipfs.conf @@ -0,0 +1,79 @@ +# netdata python.d.plugin configuration for ipfs +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, ipfs also supports the following: +# +# url: 'URL' # URL to the IPFS API +# pinapi: no # Set status of IPFS pinned object polling +# # Currently defaults to disabled due to IPFS Bug +# # https://github.com/ipfs/go-ipfs/issues/3874 +# # resulting in very high CPU Usage +# +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +localhost: + name : 'local' + url : 'http://localhost:5001' + pinapi : no diff --git a/collectors/python.d.plugin/isc_dhcpd/Makefile.inc b/collectors/python.d.plugin/isc_dhcpd/Makefile.inc new file mode 100644 index 000000000..44343fc9d --- /dev/null +++ b/collectors/python.d.plugin/isc_dhcpd/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += isc_dhcpd/isc_dhcpd.chart.py +dist_pythonconfig_DATA += isc_dhcpd/isc_dhcpd.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += isc_dhcpd/README.md isc_dhcpd/Makefile.inc + diff --git a/collectors/python.d.plugin/isc_dhcpd/README.md b/collectors/python.d.plugin/isc_dhcpd/README.md new file mode 100644 index 000000000..334d86e33 --- /dev/null +++ b/collectors/python.d.plugin/isc_dhcpd/README.md @@ -0,0 +1,34 @@ +# isc_dhcpd + +Module monitor leases database to show all active leases for given pools. + +**Requirements:** + * dhcpd leases file MUST BE readable by netdata + * pools MUST BE in CIDR format + +It produces: + +1. **Pools utilization** Aggregate chart for all pools. + * utilization in percent + +2. **Total leases** + * leases (overall number of leases for all pools) + +3. **Active leases** for every pools + * leases (number of active leases in pool) + + +### configuration + +Sample: + +```yaml +local: + leases_path : '/var/lib/dhcp/dhcpd.leases' + pools : '192.168.3.0/24 192.168.4.0/24 192.168.5.0/24' +``` + +In case of python2 you need to install `py2-ipaddress` to make plugin work. +The module will not work If no configuration is given. + +--- diff --git a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py new file mode 100644 index 000000000..a9f274949 --- /dev/null +++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py @@ -0,0 +1,195 @@ +# -*- coding: utf-8 -*- +# Description: isc dhcpd lease netdata python.d module +# Author: l2isbad +# SPDX-License-Identifier: GPL-3.0-or-later + +import os +import re +import time + + +try: + import ipaddress + HAVE_IP_ADDRESS = True +except ImportError: + HAVE_IP_ADDRESS = False + +from collections import defaultdict +from copy import deepcopy + +from bases.FrameworkServices.SimpleService import SimpleService + +priority = 60000 +retries = 60 + +ORDER = ['pools_utilization', 'pools_active_leases', 'leases_total'] + +CHARTS = { + 'pools_utilization': { + 'options': [None, 'Pools Utilization', '%', 'utilization', 'isc_dhcpd.utilization', 'line'], + 'lines': [] + }, + 'pools_active_leases': { + 'options': [None, 'Active Leases Per Pool', 'leases', 'active leases', 'isc_dhcpd.active_leases', 'line'], + 'lines': [] + }, + 'leases_total': { + 'options': [None, 'All Active Leases', 'leases', 'active leases', 'isc_dhcpd.leases_total', 'line'], + 'lines': [ + ['leases_total', 'leases', 'absolute'] + ], + 'variables': [ + ['leases_size'] + ] + } +} + + +class DhcpdLeasesFile: + def __init__(self, path): + self.path = path + self.mod_time = 0 + self.size = 0 + + def is_valid(self): + return os.path.isfile(self.path) and os.access(self.path, os.R_OK) + + def is_changed(self): + mod_time = os.path.getmtime(self.path) + if mod_time != self.mod_time: + self.mod_time = mod_time + self.size = int(os.path.getsize(self.path) / 1024) + return True + return False + + def get_data(self): + try: + with open(self.path) as leases: + result = defaultdict(dict) + for row in leases: + row = row.strip() + if row.startswith('lease'): + address = row[6:-2] + elif row.startswith('iaaddr'): + address = row[7:-2] + elif row.startswith('ends'): + result[address]['ends'] = row[5:-1] + elif row.startswith('binding state'): + result[address]['state'] = row[14:-1] + return dict((k, v) for k, v in result.items() if len(v) == 2) + except (OSError, IOError): + return None + + +class Pool: + def __init__(self, name, network): + self.id = re.sub(r'[:/.-]+', '_', name) + self.name = name + self.network = ipaddress.ip_network(address=u'%s' % network) + + def num_hosts(self): + return self.network.num_addresses - 2 + + def __contains__(self, item): + return item.address in self.network + + +class Lease: + def __init__(self, address, ends, state): + self.address = ipaddress.ip_address(address=u'%s' % address) + self.ends = ends + self.state = state + + def is_active(self, current_time): + # lease_end_time might be epoch + if self.ends.startswith('epoch'): + epoch = int(self.ends.split()[1].replace(';', '')) + return epoch - current_time > 0 + # max. int for lease-time causes lease to expire in year 2038. + # dhcpd puts 'never' in the ends section of active lease + elif self.ends == 'never': + return True + return time.mktime(time.strptime(self.ends, '%w %Y/%m/%d %H:%M:%S')) - current_time > 0 + + def is_valid(self): + return self.state == 'active' + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = deepcopy(CHARTS) + + lease_path = self.configuration.get('leases_path', '/var/lib/dhcp/dhcpd.leases') + self.dhcpd_leases = DhcpdLeasesFile(path=lease_path) + self.pools = list() + self.data = dict() + + # Will work only with 'default' db-time-format (weekday year/month/day hour:minute:second) + # TODO: update algorithm to parse correctly 'local' db-time-format + + def check(self): + if not HAVE_IP_ADDRESS: + self.error("'python-ipaddress' module is needed") + return False + + if not self.dhcpd_leases.is_valid(): + self.error("Make sure '{path}' is exist and readable by netdata".format(path=self.dhcpd_leases.path)) + return False + + pools = self.configuration.get('pools') + if not pools: + self.error('Pools are not defined') + return False + + for pool in pools: + try: + new_pool = Pool(name=pool, network=pools[pool]) + except ValueError as error: + self.error("'{pool}' was removed, error: {error}".format(pool=pools[pool], error=error)) + else: + self.pools.append(new_pool) + + self.create_charts() + return bool(self.pools) + + def get_data(self): + """ + :return: dict + """ + if not self.dhcpd_leases.is_changed(): + return self.data + + raw_leases = self.dhcpd_leases.get_data() + if not raw_leases: + self.data = dict() + return None + + active_leases = list() + current_time = time.mktime(time.gmtime()) + + for address in raw_leases: + try: + new_lease = Lease(address, **raw_leases[address]) + except ValueError: + continue + else: + if new_lease.is_active(current_time) and new_lease.is_valid(): + active_leases.append(new_lease) + + for pool in self.pools: + count = len([ip for ip in active_leases if ip in pool]) + self.data[pool.id + '_active_leases'] = count + self.data[pool.id + '_utilization'] = float(count) / pool.num_hosts() * 10000 + + self.data['leases_size'] = self.dhcpd_leases.size + self.data['leases_total'] = len(active_leases) + + return self.data + + def create_charts(self): + for pool in self.pools: + self.definitions['pools_utilization']['lines'].append([pool.id + '_utilization', pool.name, + 'absolute', 1, 100]) + self.definitions['pools_active_leases']['lines'].append([pool.id + '_active_leases', pool.name]) diff --git a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf new file mode 100644 index 000000000..4a4c4a5e3 --- /dev/null +++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf @@ -0,0 +1,81 @@ +# netdata python.d.plugin configuration for isc dhcpd leases +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, isc_dhcpd supports the following: +# +# leases_path: 'PATH' # the path to dhcpd.leases file +# pools: +# office: '192.168.2.0/24' # name(dimension): pool in CIDR format +# wifi: '192.168.3.0/24' # name(dimension): pool in CIDR format +# 192.168.4.0/24: '192.168.4.0/24' # name(dimension): pool in CIDR format +# +#----------------------------------------------------------------------- +# IMPORTANT notes +# +# 1. Make sure leases file is readable by netdata. +# 2. Current implementation works only with 'default' db-time-format +# (weekday year/month/day hour:minute:second). +# This is the default, so it will work in most cases. +# 3. Pools MUST BE in CIDR format. +# +# ---------------------------------------------------------------------- diff --git a/collectors/python.d.plugin/linux_power_supply/Makefile.inc b/collectors/python.d.plugin/linux_power_supply/Makefile.inc new file mode 100644 index 000000000..1864ba524 --- /dev/null +++ b/collectors/python.d.plugin/linux_power_supply/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += linux_power_supply/linux_power_supply.chart.py +dist_pythonconfig_DATA += linux_power_supply/linux_power_supply.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += linux_power_supply/README.md linux_power_supply/Makefile.inc + diff --git a/collectors/python.d.plugin/linux_power_supply/README.md b/collectors/python.d.plugin/linux_power_supply/README.md new file mode 100644 index 000000000..5cfbe41ce --- /dev/null +++ b/collectors/python.d.plugin/linux_power_supply/README.md @@ -0,0 +1,67 @@ +# linux\_power\_supply + +This module monitors variosu metrics reported by power supply drivers +on Linux. This allows tracking and alerting on things like remaining +battery capacity. + +Depending on the uderlying driver, it may provide the following charts +and metrics: + +1. Capacity: The power supply capacity expressed as a percentage. + * capacity\_now + +2. Charge: The charge for the power supply, expressed as microamphours. + * charge\_full\_design + * charge\_full + * charge\_now + * charge\_empty + * charge\_empty\_design + +3. Energy: The energy for the power supply, expressed as microwatthours. + * energy\_full\_design + * energy\_full + * energy\_now + * energy\_empty + * energy\_empty\_design + +2. Voltage: The voltage for the power supply, expressed as microvolts. + * voltage\_max\_design + * voltage\_max + * voltage\_now + * voltage\_min + * voltage\_min\_design + +### configuration + +Sample: + +```yaml +battery: + supply: 'BAT0' + charts: 'capacity charge energy voltage' +``` + +The `supply` key specifies the name of the power supply device to monitor. +You can use `ls /sys/class/power_supply` to get a list of such devices +on your system. + +The `charts` key is a space separated list of which charts to try +to display. It defaults to trying to display everything. + +### notes + +* Most drivers provide at least the first chart. Battery powered ACPI +compliant systems (like most laptops) provide all but the third, but do +not provide all of the metrics for each chart. + +* Current, energy, and voltages are reported with a _very_ high precision +by the power\_supply framework. Usually, this is far higher than the +actual hardware supports reporting, so expect to see changes in these +charts jump instead of scaling smoothly. + +* If `max` or `full` attribute is defined by the driver, but not a +corresponding `min or `empty` attribute, then netdata will still provide +the corresponding `min` or `empty`, which will then always read as zero. +This way, alerts which match on these will still work. + +--- diff --git a/collectors/python.d.plugin/linux_power_supply/linux_power_supply.chart.py b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.chart.py new file mode 100644 index 000000000..71d834e5d --- /dev/null +++ b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.chart.py @@ -0,0 +1,160 @@ +# -*- coding: utf-8 -*- +# Description: Linux power_supply netdata python.d module +# Author: Austin S. Hemmelgarn (Ferroin) + +import os +import platform + +from bases.FrameworkServices.SimpleService import SimpleService + +# Everything except percentages is reported as µ units. +PRECISION = 10 ** 6 + +# A priority of 90000 places us next to the other PSU related stuff. +PRIORITY = 90000 + +# We add our charts dynamically when we probe for the device attributes, +# so these are empty by default. +ORDER = [] + +CHARTS = {} + + +def get_capacity_chart(syspath): + # Capacity is measured in percent. We track one value. + options = [None, 'Capacity', '%', 'power_supply', 'power_supply.capacity', 'line'] + lines = list() + attr_now = 'capacity' + if get_sysfs_value(os.path.join(syspath, attr_now)) is not None: + lines.append([attr_now, attr_now, 'absolute', 1, 1]) + return {'capacity': {'options': options, 'lines': lines}}, [attr_now] + else: + return None, None + + +def get_generic_chart(syspath, name, unit, maxname, minname): + # Used to generate charts for energy, charge, and voltage. + options = [None, name.title(), unit, 'power_supply', 'power_supply.{0}'.format(name), 'line'] + lines = list() + attrlist = list() + attr_max_design = '{0}_{1}_design'.format(name, maxname) + attr_max = '{0}_{1}'.format(name, maxname) + attr_now = '{0}_now'.format(name) + attr_min = '{0}_{1}'.format(name, minname) + attr_min_design = '{0}_{1}_design'.format(name, minname) + if get_sysfs_value(os.path.join(syspath, attr_now)) is not None: + lines.append([attr_now, attr_now, 'absolute', 1, PRECISION]) + attrlist.append(attr_now) + else: + return None, None + if get_sysfs_value(os.path.join(syspath, attr_max)) is not None: + lines.insert(0, [attr_max, attr_max, 'absolute', 1, PRECISION]) + lines.append([attr_min, attr_min, 'absolute', 1, PRECISION]) + attrlist.append(attr_max) + attrlist.append(attr_min) + elif get_sysfs_value(os.path.join(syspath, attr_min)) is not None: + lines.append([attr_min, attr_min, 'absolute', 1, PRECISION]) + attrlist.append(attr_min) + if get_sysfs_value(os.path.join(syspath, attr_max_design)) is not None: + lines.insert(0, [attr_max_design, attr_max_design, 'absolute', 1, PRECISION]) + lines.append([attr_min_design, attr_min_design, 'absolute', 1, PRECISION]) + attrlist.append(attr_max_design) + attrlist.append(attr_min_design) + elif get_sysfs_value(os.path.join(syspath, attr_min_design)) is not None: + lines.append([attr_min_design, attr_min_design, 'absolute', 1, PRECISION]) + attrlist.append(attr_min_design) + return {name: {'options': options, 'lines': lines}}, attrlist + + +def get_charge_chart(syspath): + # Charge is measured in microamphours. We track up to five + # attributes. + return get_generic_chart(syspath, 'charge', 'µAh', 'full', 'empty') + + +def get_energy_chart(syspath): + # Energy is measured in microwatthours. We track up to five + # attributes. + return get_generic_chart(syspath, 'energy', 'µWh', 'full', 'empty') + + +def get_voltage_chart(syspath): + # Voltage is measured in microvolts. We track up to five attributes. + return get_generic_chart(syspath, 'voltage', 'µV', 'min', 'max') + + +# This is a list of functions for generating charts. Used below to save +# a bit of code (and to make it a bit easier to add new charts). +GET_CHART = { + 'capacity': get_capacity_chart, + 'charge': get_charge_chart, + 'energy': get_energy_chart, + 'voltage': get_voltage_chart +} + + +# This opens the specified file and returns the value in it or None if +# the file doesn't exist. +def get_sysfs_value(filepath): + try: + with open(filepath, 'r') as datasource: + return int(datasource.read()) + except (OSError, IOError): + return None + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.definitions = dict() + self.order = list() + self.attrlist = list() + self.supply = self.configuration.get('supply', None) + if self.supply is not None: + self.syspath = '/sys/class/power_supply/{0}'.format(self.supply) + self.types = self.configuration.get('charts', 'capacity').split() + + def check(self): + if platform.system() != 'Linux': + self.error('Only supported on Linux.') + return False + if self.supply is None: + self.error('No power supply specified for monitoring.') + return False + if not self.types: + self.error('No attributes requested for monitoring.') + return False + if not os.access(self.syspath, os.R_OK): + self.error('Unable to access {0}'.format(self.syspath)) + return False + return self.create_charts() + + def create_charts(self): + chartset = set(GET_CHART).intersection(set(self.types)) + if not chartset: + self.error('No valid attributes requested for monitoring.') + return False + charts = dict() + attrlist = list() + for item in chartset: + chart, attrs = GET_CHART[item](self.syspath) + if chart is not None: + charts.update(chart) + attrlist.extend(attrs) + if len(charts) == 0: + self.error('No charts can be created.') + return False + self.definitions.update(charts) + self.order.extend(sorted(charts)) + self.attrlist.extend(attrlist) + return True + + def _get_data(self): + data = dict() + for attr in self.attrlist: + attrpath = os.path.join(self.syspath, attr) + if attr.endswith(('_min', '_min_design', '_empty', '_empty_design')): + data[attr] = get_sysfs_value(attrpath) or 0 + else: + data[attr] = get_sysfs_value(attrpath) + return data diff --git a/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf new file mode 100644 index 000000000..3cb610f7f --- /dev/null +++ b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf @@ -0,0 +1,81 @@ +# netdata python.d.plugin configuration for linux_power_supply +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_everye +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# In addition to the above parameters, linux_power_supply also supports +# the following extra parameters. +# +# supply: '' # the name of the power supply to monitor +# charts: 'capacity' # a space separated list of the charts to try +# # and generate valid charts are 'capacity', +# # 'charge', 'current', and 'voltage' +# +# Note that linux_power_supply will not automatically detect power +# supplies in the system, you have to manually specify which ones you +# want it to monitor. +# +# The following config will work to monitor the first battery in most +# ACPI compliant battery powered systems (such as most laptops). +# +# battery: +# name: battery +# supply: BAT0 diff --git a/collectors/python.d.plugin/litespeed/Makefile.inc b/collectors/python.d.plugin/litespeed/Makefile.inc new file mode 100644 index 000000000..5dd645020 --- /dev/null +++ b/collectors/python.d.plugin/litespeed/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += litespeed/litespeed.chart.py +dist_pythonconfig_DATA += litespeed/litespeed.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += litespeed/README.md litespeed/Makefile.inc + diff --git a/collectors/python.d.plugin/litespeed/README.md b/collectors/python.d.plugin/litespeed/README.md new file mode 100644 index 000000000..d1482f33c --- /dev/null +++ b/collectors/python.d.plugin/litespeed/README.md @@ -0,0 +1,47 @@ +# litespeed + +Module monitor litespeed web server performance metrics. + +It produces: + +1. **Network Throughput HTTP** in kilobits/s + * in + * out + +2. **Network Throughput HTTPS** in kilobits/s + * in + * out + +3. **Connections HTTP** in connections + * free + * used + +4. **Connections HTTPS** in connections + * free + * used + +5. **Requests** in requests/s + * requests + +6. **Requests In Processing** in requests + * processing + +7. **Public Cache Hits** in hits/s + * hits + +8. **Private Cache Hits** in hits/s + * hits + +9. **Static Hits** in hits/s + * hits + + +### configuration +```yaml +local: + path : 'PATH' +``` + +If no configuration is given, module will use "/tmp/lshttpd/". + +--- diff --git a/collectors/python.d.plugin/litespeed/litespeed.chart.py b/collectors/python.d.plugin/litespeed/litespeed.chart.py new file mode 100644 index 000000000..efdc6869c --- /dev/null +++ b/collectors/python.d.plugin/litespeed/litespeed.chart.py @@ -0,0 +1,186 @@ +# -*- coding: utf-8 -*- +# Description: litespeed netdata python.d module +# Author: Ilya Maschenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +import glob +import re +import os + +from collections import namedtuple + +from bases.FrameworkServices.SimpleService import SimpleService + + +update_every = 10 + +# charts order (can be overridden if you want less charts, or different order) +ORDER = [ + 'net_throughput_http', 'net_throughput_https', # net throughput + 'connections_http', 'connections_https', # connections + 'requests', 'requests_processing', # requests + 'pub_cache_hits', 'private_cache_hits', # cache + 'static_hits' # static +] + +CHARTS = { + 'net_throughput_http': { + 'options': [None, 'Network Throughput HTTP', 'kilobits/s', 'net throughput', + 'litespeed.net_throughput', 'area'], + 'lines': [ + ['bps_in', 'in', 'absolute'], + ['bps_out', 'out', 'absolute', -1] + ] + }, + 'net_throughput_https': { + 'options': [None, 'Network Throughput HTTPS', 'kilobits/s', 'net throughput', + 'litespeed.net_throughput', 'area'], + 'lines': [ + ['ssl_bps_in', 'in', 'absolute'], + ['ssl_bps_out', 'out', 'absolute', -1] + ] + }, + 'connections_http': { + 'options': [None, 'Connections HTTP', 'conns', 'connections', 'litespeed.connections', 'stacked'], + 'lines': [ + ['conn_free', 'free', 'absolute'], + ['conn_used', 'used', 'absolute'] + ] + }, + 'connections_https': { + 'options': [None, 'Connections HTTPS', 'conns', 'connections', 'litespeed.connections', 'stacked'], + 'lines': [ + ['ssl_conn_free', 'free', 'absolute'], + ['ssl_conn_used', 'used', 'absolute'] + ] + }, + 'requests': { + 'options': [None, 'Requests', 'requests/s', 'requests', 'litespeed.requests', 'line'], + 'lines': [ + ['requests', None, 'absolute', 1, 100] + ] + }, + 'requests_processing': { + 'options': [None, 'Requests In Processing', 'requests', 'requests', 'litespeed.requests_processing', 'line'], + 'lines': [ + ['requests_processing', 'processing', 'absolute'] + ] + }, + 'pub_cache_hits': { + 'options': [None, 'Public Cache Hits', 'hits/s', 'cache', 'litespeed.cache', 'line'], + 'lines': [ + ['pub_cache_hits', 'hits', 'absolute', 1, 100] + ] + }, + 'private_cache_hits': { + 'options': [None, 'Private Cache Hits', 'hits/s', 'cache', 'litespeed.cache', 'line'], + 'lines': [ + ['private_cache_hits', 'hits', 'absolute', 1, 100] + ] + }, + 'static_hits': { + 'options': [None, 'Static Hits', 'hits/s', 'static', 'litespeed.static', 'line'], + 'lines': [ + ['static_hits', 'hits', 'absolute', 1, 100] + ] + } +} + +t = namedtuple('T', ['key', 'id', 'mul']) + +T = [ + t('BPS_IN', 'bps_in', 8), + t('BPS_OUT', 'bps_out', 8), + t('SSL_BPS_IN', 'ssl_bps_in', 8), + t('SSL_BPS_OUT', 'ssl_bps_out', 8), + t('REQ_PER_SEC', 'requests', 100), + t('REQ_PROCESSING', 'requests_processing', 1), + t('PUB_CACHE_HITS_PER_SEC', 'pub_cache_hits', 100), + t('PRIVATE_CACHE_HITS_PER_SEC', 'private_cache_hits', 100), + t('STATIC_HITS_PER_SEC', 'static_hits', 100), + t('PLAINCONN', 'conn_used', 1), + t('AVAILCONN', 'conn_free', 1), + t('SSLCONN', 'ssl_conn_used', 1), + t('AVAILSSL', 'ssl_conn_free', 1), +] + +RE = re.compile(r'([A-Z_]+): ([0-9.]+)') + +ZERO_DATA = { + 'bps_in': 0, + 'bps_out': 0, + 'ssl_bps_in': 0, + 'ssl_bps_out': 0, + 'requests': 0, + 'requests_processing': 0, + 'pub_cache_hits': 0, + 'private_cache_hits': 0, + 'static_hits': 0, + 'conn_used': 0, + 'conn_free': 0, + 'ssl_conn_used': 0, + 'ssl_conn_free': 0, +} + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + self.path = self.configuration.get('path', '/tmp/lshttpd/') + self.files = list() + + def check(self): + if not self.path: + self.error('"path" not specified') + return False + + fs = glob.glob(os.path.join(self.path, '.rtreport*')) + + if not fs: + self.error('"{0}" has no "rtreport" files or dir is not readable'.format(self.path)) + return None + + self.debug('stats files:', fs) + + for f in fs: + if not is_readable_file(f): + self.error('{0} is not readable'.format(f)) + continue + self.files.append(f) + + return bool(self.files) + + def get_data(self): + """ + Format data received from http request + :return: dict + """ + data = dict(ZERO_DATA) + + for f in self.files: + try: + with open(f) as b: + lines = b.readlines() + except (OSError, IOError) as err: + self.error(err) + return None + else: + parse_file(data, lines) + + return data + + +def parse_file(data, lines): + for line in lines: + if not line.startswith(('BPS_IN:', 'MAXCONN:', 'REQ_RATE []:')): + continue + m = dict(RE.findall(line)) + for v in T: + if v.key in m: + data[v.id] += float(m[v.key]) * v.mul + + +def is_readable_file(v): + return os.path.isfile(v) and os.access(v, os.R_OK) diff --git a/collectors/python.d.plugin/litespeed/litespeed.conf b/collectors/python.d.plugin/litespeed/litespeed.conf new file mode 100644 index 000000000..17d0f690e --- /dev/null +++ b/collectors/python.d.plugin/litespeed/litespeed.conf @@ -0,0 +1,74 @@ +# netdata python.d.plugin configuration for litespeed +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, lightspeed also supports the following: +# +# path: 'PATH' # path to lightspeed stats files directory +# +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +localhost: + name : 'local' + path : '/tmp/lshttpd/' diff --git a/collectors/python.d.plugin/logind/Makefile.inc b/collectors/python.d.plugin/logind/Makefile.inc new file mode 100644 index 000000000..adadab120 --- /dev/null +++ b/collectors/python.d.plugin/logind/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += logind/logind.chart.py +dist_pythonconfig_DATA += logind/logind.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += logind/README.md logind/Makefile.inc + diff --git a/collectors/python.d.plugin/logind/README.md b/collectors/python.d.plugin/logind/README.md new file mode 100644 index 000000000..8f8670d4a --- /dev/null +++ b/collectors/python.d.plugin/logind/README.md @@ -0,0 +1,54 @@ +# logind + +This module monitors active sessions, users, and seats tracked by systemd-logind or elogind. + +It provides the following charts: + +1. **Sessions** Tracks the total number of sessions. + * Graphical: Local graphical sessions (running X11, or Wayland, or something else). + * Console: Local console sessions. + * Remote: Remote sessions. + +2. **Users** Tracks total number of unique user logins of each type. + * Graphical + * Console + * Remote + +3. **Seats** Total number of seats in use. + * Seats + +### configuration + +This module needs no configuration. Just make sure the netdata user +can run the `loginctl` command and get a session list without having to +specify a path. + +This will work with any command that can output data in the _exact_ +same format as `loginctl list-sessions --no-legend`. If you have some +other command you want to use that outputs data in this format, you can +specify it using the `command` key like so: + +```yaml +command: '/path/to/other/command' +``` + +### notes + +* This module's ability to track logins is dependent on what PAM services +are configured to register sessions with logind. In particular, for +most systems, it will only track TTY logins, local desktop logins, +and logins through remote shell connections. + +* The users chart counts _usernames_ not UID's. This is potentially +important in configurations where multiple users have the same UID. + +* The users chart counts any given user name up to once for _each_ type +of login. So if the same user has a graphical and a console login on a +system, they will show up once in the graphical count, and once in the +console count. + +* Because the data collection process is rather expensive, this plugin +is currently disabled by default, and needs to be explicitly enabled in +`/etc/netdata/python.d.conf` before it will run. + +--- diff --git a/collectors/python.d.plugin/logind/logind.chart.py b/collectors/python.d.plugin/logind/logind.chart.py new file mode 100644 index 000000000..bfc486c7f --- /dev/null +++ b/collectors/python.d.plugin/logind/logind.chart.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +# Description: logind netdata python.d module +# Author: Austin S. Hemmelgarn (Ferroin) +# SPDX-License-Identifier: GPL-3.0-or-later + +from bases.FrameworkServices.ExecutableService import ExecutableService + +priority = 59999 +disabled_by_default = True + +ORDER = ['sessions', 'users', 'seats'] + +CHARTS = { + 'sessions': { + 'options': [None, 'Logind Sessions', 'sessions', 'sessions', 'logind.sessions', 'stacked'], + 'lines': [ + ['sessions_graphical', 'Graphical', 'absolute', 1, 1], + ['sessions_console', 'Console', 'absolute', 1, 1], + ['sessions_remote', 'Remote', 'absolute', 1, 1] + ] + }, + 'users': { + 'options': [None, 'Logind Users', 'users', 'users', 'logind.users', 'stacked'], + 'lines': [ + ['users_graphical', 'Graphical', 'absolute', 1, 1], + ['users_console', 'Console', 'absolute', 1, 1], + ['users_remote', 'Remote', 'absolute', 1, 1] + ] + }, + 'seats': { + 'options': [None, 'Logind Seats', 'seats', 'seats', 'logind.seats', 'line'], + 'lines': [ + ['seats', 'Active Seats', 'absolute', 1, 1] + ] + } +} + + +class Service(ExecutableService): + def __init__(self, configuration=None, name=None): + ExecutableService.__init__(self, configuration=configuration, name=name) + self.command = 'loginctl list-sessions --no-legend' + self.order = ORDER + self.definitions = CHARTS + + def _get_data(self): + ret = { + 'sessions_graphical': 0, + 'sessions_console': 0, + 'sessions_remote': 0, + } + users = { + 'graphical': list(), + 'console': list(), + 'remote': list() + } + seats = list() + data = self._get_raw_data() + + for item in data: + fields = item.split() + if len(fields) == 3: + users['remote'].append(fields[2]) + ret['sessions_remote'] += 1 + elif len(fields) == 4: + users['graphical'].append(fields[2]) + ret['sessions_graphical'] += 1 + seats.append(fields[3]) + elif len(fields) == 5: + users['console'].append(fields[2]) + ret['sessions_console'] += 1 + seats.append(fields[3]) + + ret['users_graphical'] = len(set(users['graphical'])) + ret['users_console'] = len(set(users['console'])) + ret['users_remote'] = len(set(users['remote'])) + ret['seats'] = len(set(seats)) + + return ret diff --git a/collectors/python.d.plugin/logind/logind.conf b/collectors/python.d.plugin/logind/logind.conf new file mode 100644 index 000000000..0623493de --- /dev/null +++ b/collectors/python.d.plugin/logind/logind.conf @@ -0,0 +1,62 @@ +# netdata python.d.plugin configuration for logind +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds diff --git a/collectors/python.d.plugin/mdstat/Makefile.inc b/collectors/python.d.plugin/mdstat/Makefile.inc new file mode 100644 index 000000000..5125a271b --- /dev/null +++ b/collectors/python.d.plugin/mdstat/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += mdstat/mdstat.chart.py +dist_pythonconfig_DATA += mdstat/mdstat.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += mdstat/README.md mdstat/Makefile.inc + diff --git a/collectors/python.d.plugin/mdstat/README.md b/collectors/python.d.plugin/mdstat/README.md new file mode 100644 index 000000000..1ff8f7dab --- /dev/null +++ b/collectors/python.d.plugin/mdstat/README.md @@ -0,0 +1,26 @@ +# mdstat + +Module monitor /proc/mdstat + +It produces: + +1. **Health** Number of failed disks in every array (aggregate chart). + +2. **Disks stats** + * total (number of devices array ideally would have) + * inuse (number of devices currently are in use) + +3. **Current status** + * resync in percent + * recovery in percent + * reshape in percent + * check in percent + +4. **Operation status** (if resync/recovery/reshape/check is active) + * finish in minutes + * speed in megabytes/s + +### configuration +No configuration is needed. + +--- diff --git a/collectors/python.d.plugin/mdstat/mdstat.chart.py b/collectors/python.d.plugin/mdstat/mdstat.chart.py new file mode 100644 index 000000000..b7306b6a7 --- /dev/null +++ b/collectors/python.d.plugin/mdstat/mdstat.chart.py @@ -0,0 +1,205 @@ +# -*- coding: utf-8 -*- +# Description: mdstat netdata python.d module +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +import re + +from collections import defaultdict + +from bases.FrameworkServices.SimpleService import SimpleService + +MDSTAT = '/proc/mdstat' +MISMATCH_CNT = '/sys/block/{0}/md/mismatch_cnt' + +ORDER = ['mdstat_health'] + +CHARTS = { + 'mdstat_health': { + 'options': [None, 'Faulty Devices In MD', 'failed disks', 'health', 'md.health', 'line'], + 'lines': [] + } +} + +RE_DISKS = re.compile(r' (?P<array>[a-zA-Z_0-9]+) : active .+\[' + r'(?P<total_disks>[0-9]+)/' + r'(?P<inuse_disks>[0-9]+)\]') + +RE_STATUS = re.compile(r' (?P<array>[a-zA-Z_0-9]+) : active .+ ' + r'(?P<operation>[a-z]+) =[ ]{1,2}' + r'(?P<operation_status>[0-9.]+).+finish=' + r'(?P<finish_in>([0-9.]+))min speed=' + r'(?P<speed>[0-9]+)') + + +def md_charts(name): + order = [ + '{0}_disks'.format(name), + '{0}_operation'.format(name), + '{0}_mismatch_cnt'.format(name), + '{0}_finish'.format(name), + '{0}_speed'.format(name) + ] + + charts = dict() + charts[order[0]] = { + 'options': [None, 'Disks Stats', 'disks', name, 'md.disks', 'stacked'], + 'lines': [ + ['{0}_total_disks'.format(name), 'total', 'absolute'], + ['{0}_inuse_disks'.format(name), 'inuse', 'absolute'] + ] + } + + charts[order[1]] = { + 'options': [None, 'Current Status', 'percent', name, 'md.status', 'line'], + 'lines': [ + ['{0}_resync'.format(name), 'resync', 'absolute', 1, 100], + ['{0}_recovery'.format(name), 'recovery', 'absolute', 1, 100], + ['{0}_reshape'.format(name), 'reshape', 'absolute', 1, 100], + ['{0}_check'.format(name), 'check', 'absolute', 1, 100], + ] + } + + charts[order[2]] = { + 'options': [None, 'Mismatch Count', 'unsynchronized blocks', name, 'md.mismatch_cnt', 'line'], + 'lines': [ + ['{0}_mismatch_cnt'.format(name), 'count', 'absolute'] + ] + } + + charts[order[3]] = { + 'options': [None, 'Approximate Time Until Finish', 'seconds', name, 'md.rate', 'line'], + 'lines': [ + ['{0}_finish_in'.format(name), 'finish in', 'absolute', 1, 1000] + ] + } + + charts[order[4]] = { + 'options': [None, 'Operation Speed', 'KB/s', name, 'md.rate', 'line'], + 'lines': [ + ['{0}_speed'.format(name), 'speed', 'absolute', 1, 1000] + ] + } + + return order, charts + + +class MD: + def __init__(self, raw_data): + self.name = raw_data['array'] + self.d = raw_data + + def data(self): + rv = { + 'total_disks': self.d['total_disks'], + 'inuse_disks': self.d['inuse_disks'], + 'health': int(self.d['total_disks']) - int(self.d['inuse_disks']), + 'resync': 0, + 'recovery': 0, + 'reshape': 0, + 'check': 0, + 'finish_in': 0, + 'speed': 0, + } + + v = read_lines(MISMATCH_CNT.format(self.name)) + if v: + rv['mismatch_cnt'] = v + + if self.d.get('operation'): + rv[self.d['operation']] = float(self.d['operation_status']) * 100 + rv['finish_in'] = float(self.d['finish_in']) * 1000 * 60 + rv['speed'] = float(self.d['speed']) * 1000 + + return dict(('{0}_{1}'.format(self.name, k), v) for k, v in rv.items()) + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + self.mds = list() + + @staticmethod + def get_mds(): + raw = read_lines(MDSTAT) + + if not raw: + return None + + return find_mds(raw) + + def get_data(self): + """ + Parse data from _get_raw_data() + :return: dict + """ + mds = self.get_mds() + + if not mds: + return None + + data = dict() + for md in mds: + if md.name not in self.mds: + self.mds.append(md.name) + self.add_new_md_charts(md.name) + data.update(md.data()) + return data + + def check(self): + if not self.get_mds(): + self.error('Failed to read data from {0} or there is no active arrays'.format(MDSTAT)) + return False + return True + + def add_new_md_charts(self, name): + order, charts = md_charts(name) + + self.charts['mdstat_health'].add_dimension(['{0}_health'.format(name), name]) + + for chart_name in order: + params = [chart_name] + charts[chart_name]['options'] + dims = charts[chart_name]['lines'] + + chart = self.charts.add_chart(params) + for dim in dims: + chart.add_dimension(dim) + + +def find_mds(raw_data): + data = defaultdict(str) + counter = 1 + + for row in (elem.strip() for elem in raw_data): + if not row: + counter += 1 + continue + data[counter] = ' '.join([data[counter], row]) + + mds = list() + + for v in data.values(): + m = RE_DISKS.search(v) + + if not m: + continue + + d = m.groupdict() + + m = RE_STATUS.search(v) + if m: + d.update(m.groupdict()) + + mds.append(MD(d)) + + return sorted(mds, key=lambda md: md.name) + + +def read_lines(path): + try: + with open(path) as f: + return f.readlines() + except (IOError, OSError): + return None diff --git a/collectors/python.d.plugin/mdstat/mdstat.conf b/collectors/python.d.plugin/mdstat/mdstat.conf new file mode 100644 index 000000000..66a2f153c --- /dev/null +++ b/collectors/python.d.plugin/mdstat/mdstat.conf @@ -0,0 +1,32 @@ +# netdata python.d.plugin configuration for mdstat +# +# This file is in YaML format. Generally the format is: +# +# name: value +# + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 diff --git a/collectors/python.d.plugin/megacli/Makefile.inc b/collectors/python.d.plugin/megacli/Makefile.inc new file mode 100644 index 000000000..83680d723 --- /dev/null +++ b/collectors/python.d.plugin/megacli/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += megacli/megacli.chart.py +dist_pythonconfig_DATA += megacli/megacli.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += megacli/README.md megacli/Makefile.inc + diff --git a/collectors/python.d.plugin/megacli/README.md b/collectors/python.d.plugin/megacli/README.md new file mode 100644 index 000000000..d288a6353 --- /dev/null +++ b/collectors/python.d.plugin/megacli/README.md @@ -0,0 +1,48 @@ +# megacli + +Module collects adapter, physical drives and battery stats. + +**Requirements:** + * `megacli` program + * `sudo` program + * `netdata` user needs to be able to be able to sudo the `megacli` program without password + +To grab stats it executes: + * `sudo -n megacli -LDPDInfo -aAll` + * `sudo -n megacli -AdpBbuCmd -a0` + + +It produces: + +1. **Adapter State** + +2. **Physical Drives Media Errors** + +3. **Physical Drives Predictive Failures** + +4. **Battery Relative State of Charge** + +5. **Battery Cycle Count** + +### prerequisite +This module uses `megacli` which can only be executed by root. It uses +`sudo` and assumes that it is configured such that the `netdata` user can +execute `megacli` as root without password. + +Add to `sudoers`: + + netdata ALL=(root) NOPASSWD: /path/to/megacli + +### configuration + +**megacli** is disabled by default. Should be explicitly enabled in `python.d.conf`. +```yaml +megacli: yes +``` + +Battery stats disabled by default. To enable them modify `megacli.conf`. +```yaml +do_battery: yes +``` + +--- diff --git a/collectors/python.d.plugin/megacli/megacli.chart.py b/collectors/python.d.plugin/megacli/megacli.chart.py new file mode 100644 index 000000000..41a1079f6 --- /dev/null +++ b/collectors/python.d.plugin/megacli/megacli.chart.py @@ -0,0 +1,279 @@ +# -*- coding: utf-8 -*- +# Description: megacli netdata python.d module +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + + +import re + +from bases.FrameworkServices.ExecutableService import ExecutableService +from bases.collection import find_binary + + +disabled_by_default = True + +update_every = 5 + + +def adapter_charts(ads): + order = [ + 'adapter_degraded', + ] + + def dims(ad): + return [['adapter_{0}_degraded'.format(a.id), 'adapter {0}'.format(a.id)] for a in ad] + + charts = { + 'adapter_degraded': { + 'options': [None, 'Adapter State', 'is degraded', 'adapter', 'megacli.adapter_degraded', 'line'], + 'lines': dims(ads) + }, + } + + return order, charts + + +def pd_charts(pds): + order = [ + 'pd_media_error', + 'pd_predictive_failure', + ] + + def dims(k, pd): + return [['slot_{0}_{1}'.format(p.id, k), 'slot {0}'.format(p.id), 'incremental'] for p in pd] + + charts = { + 'pd_media_error': { + 'options': [None, 'Physical Drives Media Errors', 'errors/s', 'pd', 'megacli.pd_media_error', 'line'], + 'lines': dims('media_error', pds) + }, + 'pd_predictive_failure': { + 'options': [None, 'Physical Drives Predictive Failures', 'failures/s', 'pd', + 'megacli.pd_predictive_failure', 'line'], + 'lines': dims('predictive_failure', pds) + } + } + + return order, charts + + +def battery_charts(bats): + order = list() + charts = dict() + + for b in bats: + order.append('bbu_{0}_relative_charge'.format(b.id)) + charts.update( + { + 'bbu_{0}_relative_charge'.format(b.id): { + 'options': [None, 'Relative State of Charge', '%', 'battery', + 'megacli.bbu_relative_charge', 'line'], + 'lines': [ + ['bbu_{0}_relative_charge'.format(b.id), 'adapter {0}'.format(b.id)], + ] + } + } + ) + + for b in bats: + order.append('bbu_{0}_cycle_count'.format(b.id)) + charts.update( + { + 'bbu_{0}_cycle_count'.format(b.id): { + 'options': [None, 'Cycle Count', 'cycle count', 'battery', 'megacli.bbu_cycle_count', 'line'], + 'lines': [ + ['bbu_{0}_cycle_count'.format(b.id), 'adapter {0}'.format(b.id)], + ] + } + } + ) + + return order, charts + + +RE_ADAPTER = re.compile( + r'Adapter #([0-9]+) State(?:\s+)?: ([a-zA-Z]+)' +) + +RE_VD = re.compile( + r'Slot Number: ([0-9]+) Media Error Count: ([0-9]+) Predictive Failure Count: ([0-9]+)' +) + +RE_BATTERY = re.compile( + r'BBU Capacity Info for Adapter: ([0-9]+) Relative State of Charge: ([0-9]+) % Cycle Count: ([0-9]+)' +) + + +def find_adapters(d): + keys = ('Adapter #', 'State') + d = ' '.join(v.strip() for v in d if v.startswith(keys)) + return [Adapter(*v) for v in RE_ADAPTER.findall(d)] + + +def find_pds(d): + keys = ('Slot Number', 'Media Error Count', 'Predictive Failure Count') + d = ' '.join(v.strip() for v in d if v.startswith(keys)) + return [PD(*v) for v in RE_VD.findall(d)] + + +def find_batteries(d): + keys = ('BBU Capacity Info for Adapter', 'Relative State of Charge', 'Cycle Count') + d = ' '.join(v.strip() for v in d if v.strip().startswith(keys)) + return [Battery(*v) for v in RE_BATTERY.findall(d)] + + +class Adapter: + def __init__(self, n, state): + self.id = n + self.state = int(state == 'Degraded') + + def data(self): + return { + 'adapter_{0}_degraded'.format(self.id): self.state, + } + + +class PD: + def __init__(self, n, media_err, predict_fail): + self.id = n + self.media_err = media_err + self.predict_fail = predict_fail + + def data(self): + return { + 'slot_{0}_media_error'.format(self.id): self.media_err, + 'slot_{0}_predictive_failure'.format(self.id): self.predict_fail, + } + + +class Battery: + def __init__(self, adapt_id, rel_charge, cycle_count): + self.id = adapt_id + self.rel_charge = rel_charge + self.cycle_count = cycle_count + + def data(self): + return { + 'bbu_{0}_relative_charge'.format(self.id): self.rel_charge, + 'bbu_{0}_cycle_count'.format(self.id): self.cycle_count, + } + + +# TODO: hardcoded sudo... +class Megacli: + def __init__(self): + self.s = find_binary('sudo') + self.m = find_binary('megacli') + self.sudo_check = [self.s, '-n', '-v'] + self.disk_info = [self.s, '-n', self.m, '-LDPDInfo', '-aAll', '-NoLog'] + self.battery_info = [self.s, '-n', self.m, '-AdpBbuCmd', '-a0', '-NoLog'] + + def __bool__(self): + return bool(self.s and self.m) + + def __nonzero__(self): + return self.__bool__() + + +class Service(ExecutableService): + def __init__(self, configuration=None, name=None): + ExecutableService.__init__(self, configuration=configuration, name=name) + self.order = list() + self.definitions = dict() + self.megacli = Megacli() + self.do_battery = self.configuration.get('do_battery') + + def check_sudo(self): + err = self._get_raw_data(command=self.megacli.sudo_check, stderr=True) + if err: + self.error(''.join(err)) + return False + return True + + def check_disk_info(self): + d = self._get_raw_data(command=self.megacli.disk_info) + if not d: + return False + + ads = find_adapters(d) + pds = find_pds(d) + + if not (ads and pds): + self.error('failed to parse "{0}" output'.format(' '.join(self.megacli.disk_info))) + return False + + o, c = adapter_charts(ads) + self.order.extend(o) + self.definitions.update(c) + + o, c = pd_charts(pds) + self.order.extend(o) + self.definitions.update(c) + + return True + + def check_battery(self): + d = self._get_raw_data(command=self.megacli.battery_info) + if not d: + return False + + bats = find_batteries(d) + + if not bats: + self.error('failed to parse "{0}" output'.format(' '.join(self.megacli.battery_info))) + return False + + o, c = battery_charts(bats) + self.order.extend(o) + self.definitions.update(c) + return True + + def check(self): + if not self.megacli: + self.error('can\'t locate "sudo" or "megacli" binary') + return None + + if not (self.check_sudo() and self.check_disk_info()): + return False + + if self.do_battery: + self.do_battery = self.check_battery() + + return True + + def get_data(self): + data = dict() + + data.update(self.get_adapter_pd_data()) + + if self.do_battery: + data.update(self.get_battery_data()) + + return data or None + + def get_adapter_pd_data(self): + raw = self._get_raw_data(command=self.megacli.disk_info) + data = dict() + + if not raw: + return data + + for a in find_adapters(raw): + data.update(a.data()) + + for p in find_pds(raw): + data.update(p.data()) + + return data + + def get_battery_data(self): + raw = self._get_raw_data(command=self.megacli.battery_info) + data = dict() + + if not raw: + return data + + for b in find_batteries(raw): + data.update(b.data()) + + return data diff --git a/collectors/python.d.plugin/megacli/megacli.conf b/collectors/python.d.plugin/megacli/megacli.conf new file mode 100644 index 000000000..73afb2f7f --- /dev/null +++ b/collectors/python.d.plugin/megacli/megacli.conf @@ -0,0 +1,62 @@ +# netdata python.d.plugin configuration for megacli +# +# This file is in YaML format. Generally the format is: +# +# name: value +# + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, megacli also supports the following: +# +# do_battery: yes/no # default is no. Battery stats (adds additional call to megacli `megacli -AdpBbuCmd -a0`). +# +# ---------------------------------------------------------------------- +# uncomment the line below to collect battery statistics +# do_battery: yes diff --git a/collectors/python.d.plugin/memcached/Makefile.inc b/collectors/python.d.plugin/memcached/Makefile.inc new file mode 100644 index 000000000..e60357161 --- /dev/null +++ b/collectors/python.d.plugin/memcached/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += memcached/memcached.chart.py +dist_pythonconfig_DATA += memcached/memcached.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += memcached/README.md memcached/Makefile.inc + diff --git a/collectors/python.d.plugin/memcached/README.md b/collectors/python.d.plugin/memcached/README.md new file mode 100644 index 000000000..3521c109d --- /dev/null +++ b/collectors/python.d.plugin/memcached/README.md @@ -0,0 +1,69 @@ +# memcached + +Memcached monitoring module. Data grabbed from [stats interface](https://github.com/memcached/memcached/wiki/Commands#stats). + +1. **Network** in kilobytes/s + * read + * written + +2. **Connections** per second + * current + * rejected + * total + +3. **Items** in cluster + * current + * total + +4. **Evicted and Reclaimed** items + * evicted + * reclaimed + +5. **GET** requests/s + * hits + * misses + +6. **GET rate** rate in requests/s + * rate + +7. **SET rate** rate in requests/s + * rate + +8. **DELETE** requests/s + * hits + * misses + +9. **CAS** requests/s + * hits + * misses + * bad value + +10. **Increment** requests/s + * hits + * misses + +11. **Decrement** requests/s + * hits + * misses + +12. **Touch** requests/s + * hits + * misses + +13. **Touch rate** rate in requests/s + * rate + +### configuration + +Sample: + +```yaml +localtcpip: + name : 'local' + host : '127.0.0.1' + port : 24242 +``` + +If no configuration is given, module will attempt to connect to memcached instance on `127.0.0.1:11211` address. + +--- diff --git a/collectors/python.d.plugin/memcached/memcached.chart.py b/collectors/python.d.plugin/memcached/memcached.chart.py new file mode 100644 index 000000000..3c310ec69 --- /dev/null +++ b/collectors/python.d.plugin/memcached/memcached.chart.py @@ -0,0 +1,198 @@ +# -*- coding: utf-8 -*- +# Description: memcached netdata python.d module +# Author: Pawel Krupa (paulfantom) +# SPDX-License-Identifier: GPL-3.0-or-later + +from bases.FrameworkServices.SocketService import SocketService + +# default module values (can be overridden per job in `config`) +# update_every = 2 +priority = 60000 +retries = 60 + +# default job configuration (overridden by python.d.plugin) +# config = {'local': { +# 'update_every': update_every, +# 'retries': retries, +# 'priority': priority, +# 'host': 'localhost', +# 'port': 11211, +# 'unix_socket': None +# }} + +ORDER = ['cache', 'net', 'connections', 'items', 'evicted_reclaimed', + 'get', 'get_rate', 'set_rate', 'cas', 'delete', 'increment', 'decrement', 'touch', 'touch_rate'] + +CHARTS = { + 'cache': { + 'options': [None, 'Cache Size', 'megabytes', 'cache', 'memcached.cache', 'stacked'], + 'lines': [ + ['avail', 'available', 'absolute', 1, 1048576], + ['used', 'used', 'absolute', 1, 1048576] + ] + }, + 'net': { + 'options': [None, 'Network', 'kilobits/s', 'network', 'memcached.net', 'area'], + 'lines': [ + ['bytes_read', 'in', 'incremental', 8, 1024], + ['bytes_written', 'out', 'incremental', -8, 1024] + ] + }, + 'connections': { + 'options': [None, 'Connections', 'connections/s', 'connections', 'memcached.connections', 'line'], + 'lines': [ + ['curr_connections', 'current', 'incremental'], + ['rejected_connections', 'rejected', 'incremental'], + ['total_connections', 'total', 'incremental'] + ] + }, + 'items': { + 'options': [None, 'Items', 'items', 'items', 'memcached.items', 'line'], + 'lines': [ + ['curr_items', 'current', 'absolute'], + ['total_items', 'total', 'absolute'] + ] + }, + 'evicted_reclaimed': { + 'options': [None, 'Items', 'items', 'items', 'memcached.evicted_reclaimed', 'line'], + 'lines': [ + ['reclaimed', 'reclaimed', 'absolute'], + ['evictions', 'evicted', 'absolute'] + ] + }, + 'get': { + 'options': [None, 'Requests', 'requests', 'get ops', 'memcached.get', 'stacked'], + 'lines': [ + ['get_hits', 'hits', 'percent-of-absolute-row'], + ['get_misses', 'misses', 'percent-of-absolute-row'] + ] + }, + 'get_rate': { + 'options': [None, 'Rate', 'requests/s', 'get ops', 'memcached.get_rate', 'line'], + 'lines': [ + ['cmd_get', 'rate', 'incremental'] + ] + }, + 'set_rate': { + 'options': [None, 'Rate', 'requests/s', 'set ops', 'memcached.set_rate', 'line'], + 'lines': [ + ['cmd_set', 'rate', 'incremental'] + ] + }, + 'delete': { + 'options': [None, 'Requests', 'requests', 'delete ops', 'memcached.delete', 'stacked'], + 'lines': [ + ['delete_hits', 'hits', 'percent-of-absolute-row'], + ['delete_misses', 'misses', 'percent-of-absolute-row'], + ] + }, + 'cas': { + 'options': [None, 'Requests', 'requests', 'check and set ops', 'memcached.cas', 'stacked'], + 'lines': [ + ['cas_hits', 'hits', 'percent-of-absolute-row'], + ['cas_misses', 'misses', 'percent-of-absolute-row'], + ['cas_badval', 'bad value', 'percent-of-absolute-row'] + ] + }, + 'increment': { + 'options': [None, 'Requests', 'requests', 'increment ops', 'memcached.increment', 'stacked'], + 'lines': [ + ['incr_hits', 'hits', 'percent-of-absolute-row'], + ['incr_misses', 'misses', 'percent-of-absolute-row'] + ] + }, + 'decrement': { + 'options': [None, 'Requests', 'requests', 'decrement ops', 'memcached.decrement', 'stacked'], + 'lines': [ + ['decr_hits', 'hits', 'percent-of-absolute-row'], + ['decr_misses', 'misses', 'percent-of-absolute-row'] + ] + }, + 'touch': { + 'options': [None, 'Requests', 'requests', 'touch ops', 'memcached.touch', 'stacked'], + 'lines': [ + ['touch_hits', 'hits', 'percent-of-absolute-row'], + ['touch_misses', 'misses', 'percent-of-absolute-row'] + ] + }, + 'touch_rate': { + 'options': [None, 'Rate', 'requests/s', 'touch ops', 'memcached.touch_rate', 'line'], + 'lines': [ + ['cmd_touch', 'rate', 'incremental'] + ] + } +} + + +class Service(SocketService): + def __init__(self, configuration=None, name=None): + SocketService.__init__(self, configuration=configuration, name=name) + self.request = 'stats\r\n' + self.host = 'localhost' + self.port = 11211 + self._keep_alive = True + self.unix_socket = None + self.order = ORDER + self.definitions = CHARTS + + def _get_data(self): + """ + Get data from socket + :return: dict + """ + response = self._get_raw_data() + if response is None: + # error has already been logged + return None + + if response.startswith('ERROR'): + self.error('received ERROR') + return None + + try: + parsed = response.split('\n') + except AttributeError: + self.error('response is invalid/empty') + return None + + # split the response + data = {} + for line in parsed: + if line.startswith('STAT'): + try: + t = line[5:].split(' ') + data[t[0]] = t[1] + except (IndexError, ValueError): + self.debug('invalid line received: ' + str(line)) + + if not data: + self.error("received data doesn't have any records") + return None + + # custom calculations + try: + data['avail'] = int(data['limit_maxbytes']) - int(data['bytes']) + data['used'] = int(data['bytes']) + except (KeyError, ValueError, TypeError): + pass + + return data + + def _check_raw_data(self, data): + if data.endswith('END\r\n'): + self.debug('received full response from memcached') + return True + + self.debug('waiting more data from memcached') + return False + + def check(self): + """ + Parse configuration, check if memcached is available + :return: boolean + """ + self._parse_config() + data = self._get_data() + if data is None: + return False + return True diff --git a/collectors/python.d.plugin/memcached/memcached.conf b/collectors/python.d.plugin/memcached/memcached.conf new file mode 100644 index 000000000..85c3daf65 --- /dev/null +++ b/collectors/python.d.plugin/memcached/memcached.conf @@ -0,0 +1,92 @@ +# netdata python.d.plugin configuration for memcached +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, memcached also supports the following: +# +# socket: 'path/to/memcached.sock' +# +# or +# host: 'IP or HOSTNAME' # the host to connect to +# port: PORT # the port to connect to +# +# + +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +localhost: + name : 'local' + host : 'localhost' + port : 11211 + +localipv4: + name : 'local' + host : '127.0.0.1' + port : 11211 + +localipv6: + name : 'local' + host : '::1' + port : 11211 + diff --git a/collectors/python.d.plugin/mongodb/Makefile.inc b/collectors/python.d.plugin/mongodb/Makefile.inc new file mode 100644 index 000000000..784945aa6 --- /dev/null +++ b/collectors/python.d.plugin/mongodb/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += mongodb/mongodb.chart.py +dist_pythonconfig_DATA += mongodb/mongodb.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += mongodb/README.md mongodb/Makefile.inc + diff --git a/collectors/python.d.plugin/mongodb/README.md b/collectors/python.d.plugin/mongodb/README.md new file mode 100644 index 000000000..8e5f652c5 --- /dev/null +++ b/collectors/python.d.plugin/mongodb/README.md @@ -0,0 +1,141 @@ +# mongodb + +Module monitor mongodb performance and health metrics + +**Requirements:** + * `python-pymongo` package v2.4+. + +You need to install it manually. + + +Number of charts depends on mongodb version, storage engine and other features (replication): + +1. **Read requests**: + * query + * getmore (operation the cursor executes to get additional data from query) + +2. **Write requests**: + * insert + * delete + * update + +3. **Active clients**: + * readers (number of clients with read operations in progress or queued) + * writers (number of clients with write operations in progress or queued) + +4. **Journal transactions**: + * commits (count of transactions that have been written to the journal) + +5. **Data written to the journal**: + * volume (volume of data) + +6. **Background flush** (MMAPv1): + * average ms (average time taken by flushes to execute) + * last ms (time taken by the last flush) + +8. **Read tickets** (WiredTiger): + * in use (number of read tickets in use) + * available (number of available read tickets remaining) + +9. **Write tickets** (WiredTiger): + * in use (number of write tickets in use) + * available (number of available write tickets remaining) + +10. **Cursors**: + * opened (number of cursors currently opened by MongoDB for clients) + * timedOut (number of cursors that have timed) + * noTimeout (number of open cursors with timeout disabled) + +11. **Connections**: + * connected (number of clients currently connected to the database server) + * unused (number of unused connections available for new clients) + +12. **Memory usage metrics**: + * virtual + * resident (amount of memory used by the database process) + * mapped + * non mapped + +13. **Page faults**: + * page faults (number of times MongoDB had to request from disk) + +14. **Cache metrics** (WiredTiger): + * percentage of bytes currently in the cache (amount of space taken by cached data) + * percantage of tracked dirty bytes in the cache (amount of space taken by dirty data) + +15. **Pages evicted from cache** (WiredTiger): + * modified + * unmodified + +16. **Queued requests**: + * readers (number of read request currently queued) + * writers (number of write request currently queued) + +17. **Errors**: + * msg (number of message assertions raised) + * warning (number of warning assertions raised) + * regular (number of regular assertions raised) + * user (number of assertions corresponding to errors generated by users) + +18. **Storage metrics** (one chart for every database) + * dataSize (size of all documents + padding in the database) + * indexSize (size of all indexes in the database) + * storageSize (size of all extents in the database) + +19. **Documents in the database** (one chart for all databases) + * documents (number of objects in the database among all the collections) + +20. **tcmalloc metrics** + * central cache free + * current total thread cache + * pageheap free + * pageheap unmapped + * thread cache free + * transfer cache free + * heap size + +21. **Commands total/failed rate** + * count + * createIndex + * delete + * eval + * findAndModify + * insert + +22. **Locks metrics** (acquireCount metrics - number of times the lock was acquired in the specified mode) + * Global lock + * Database lock + * Collection lock + * Metadata lock + * oplog lock + +23. **Replica set members state** + * state + +24. **Oplog window** + * window (interval of time between the oldest and the latest entries in the oplog) + +25. **Replication lag** + * member (time when last entry from the oplog was applied for every member) + +26. **Replication set member heartbeat latency** + * member (time when last heartbeat was received from replica set member) + + +### configuration + +Sample: + +```yaml +local: + name : 'local' + host : '127.0.0.1' + port : 27017 + user : 'netdata' + pass : 'netdata' + +``` + +If no configuration is given, module will attempt to connect to mongodb daemon on `127.0.0.1:27017` address + +--- diff --git a/collectors/python.d.plugin/mongodb/mongodb.chart.py b/collectors/python.d.plugin/mongodb/mongodb.chart.py new file mode 100644 index 000000000..10344342d --- /dev/null +++ b/collectors/python.d.plugin/mongodb/mongodb.chart.py @@ -0,0 +1,731 @@ +# -*- coding: utf-8 -*- +# Description: mongodb netdata python.d module +# Author: l2isbad +# SPDX-License-Identifier: GPL-3.0-or-later + +from copy import deepcopy +from datetime import datetime +from sys import exc_info + +try: + from pymongo import MongoClient, ASCENDING, DESCENDING + from pymongo.errors import PyMongoError + PYMONGO = True +except ImportError: + PYMONGO = False + +from bases.FrameworkServices.SimpleService import SimpleService + +# default module values (can be overridden per job in `config`) +# update_every = 2 +priority = 60000 +retries = 60 + +REPL_SET_STATES = [ + ('1', 'primary'), + ('8', 'down'), + ('2', 'secondary'), + ('3', 'recovering'), + ('5', 'startup2'), + ('4', 'fatal'), + ('7', 'arbiter'), + ('6', 'unknown'), + ('9', 'rollback'), + ('10', 'removed'), + ('0', 'startup') +] + + +def multiply_by_100(value): + return value * 100 + + +DEFAULT_METRICS = [ + ('opcounters.delete', None, None), + ('opcounters.update', None, None), + ('opcounters.insert', None, None), + ('opcounters.query', None, None), + ('opcounters.getmore', None, None), + ('globalLock.activeClients.readers', 'activeClients_readers', None), + ('globalLock.activeClients.writers', 'activeClients_writers', None), + ('connections.available', 'connections_available', None), + ('connections.current', 'connections_current', None), + ('mem.mapped', None, None), + ('mem.resident', None, None), + ('mem.virtual', None, None), + ('globalLock.currentQueue.readers', 'currentQueue_readers', None), + ('globalLock.currentQueue.writers', 'currentQueue_writers', None), + ('asserts.msg', None, None), + ('asserts.regular', None, None), + ('asserts.user', None, None), + ('asserts.warning', None, None), + ('extra_info.page_faults', None, None), + ('metrics.record.moves', None, None), + ('backgroundFlushing.average_ms', None, multiply_by_100), + ('backgroundFlushing.last_ms', None, multiply_by_100), + ('backgroundFlushing.flushes', None, multiply_by_100), + ('metrics.cursor.timedOut', None, None), + ('metrics.cursor.open.total', 'cursor_total', None), + ('metrics.cursor.open.noTimeout', None, None), + ('cursors.timedOut', None, None), + ('cursors.totalOpen', 'cursor_total', None) +] + +DUR = [ + ('dur.commits', None, None), + ('dur.journaledMB', None, multiply_by_100) +] + +WIREDTIGER = [ + ('wiredTiger.concurrentTransactions.read.available', 'wiredTigerRead_available', None), + ('wiredTiger.concurrentTransactions.read.out', 'wiredTigerRead_out', None), + ('wiredTiger.concurrentTransactions.write.available', 'wiredTigerWrite_available', None), + ('wiredTiger.concurrentTransactions.write.out', 'wiredTigerWrite_out', None), + ('wiredTiger.cache.bytes currently in the cache', None, None), + ('wiredTiger.cache.tracked dirty bytes in the cache', None, None), + ('wiredTiger.cache.maximum bytes configured', None, None), + ('wiredTiger.cache.unmodified pages evicted', 'unmodified', None), + ('wiredTiger.cache.modified pages evicted', 'modified', None) +] + +TCMALLOC = [ + ('tcmalloc.generic.current_allocated_bytes', None, None), + ('tcmalloc.generic.heap_size', None, None), + ('tcmalloc.tcmalloc.central_cache_free_bytes', None, None), + ('tcmalloc.tcmalloc.current_total_thread_cache_bytes', None, None), + ('tcmalloc.tcmalloc.pageheap_free_bytes', None, None), + ('tcmalloc.tcmalloc.pageheap_unmapped_bytes', None, None), + ('tcmalloc.tcmalloc.thread_cache_free_bytes', None, None), + ('tcmalloc.tcmalloc.transfer_cache_free_bytes', None, None) +] + +COMMANDS = [ + ('metrics.commands.count.total', 'count_total', None), + ('metrics.commands.createIndexes.total', 'createIndexes_total', None), + ('metrics.commands.delete.total', 'delete_total', None), + ('metrics.commands.eval.total', 'eval_total', None), + ('metrics.commands.findAndModify.total', 'findAndModify_total', None), + ('metrics.commands.insert.total', 'insert_total', None), + ('metrics.commands.delete.total', 'delete_total', None), + ('metrics.commands.count.failed', 'count_failed', None), + ('metrics.commands.createIndexes.failed', 'createIndexes_failed', None), + ('metrics.commands.delete.failed', 'delete_failed', None), + ('metrics.commands.eval.failed', 'eval_failed', None), + ('metrics.commands.findAndModify.failed', 'findAndModify_failed', None), + ('metrics.commands.insert.failed', 'insert_failed', None), + ('metrics.commands.delete.failed', 'delete_failed', None) +] + +LOCKS = [ + ('locks.Collection.acquireCount.R', 'Collection_R', None), + ('locks.Collection.acquireCount.r', 'Collection_r', None), + ('locks.Collection.acquireCount.W', 'Collection_W', None), + ('locks.Collection.acquireCount.w', 'Collection_w', None), + ('locks.Database.acquireCount.R', 'Database_R', None), + ('locks.Database.acquireCount.r', 'Database_r', None), + ('locks.Database.acquireCount.W', 'Database_W', None), + ('locks.Database.acquireCount.w', 'Database_w', None), + ('locks.Global.acquireCount.R', 'Global_R', None), + ('locks.Global.acquireCount.r', 'Global_r', None), + ('locks.Global.acquireCount.W', 'Global_W', None), + ('locks.Global.acquireCount.w', 'Global_w', None), + ('locks.Metadata.acquireCount.R', 'Metadata_R', None), + ('locks.Metadata.acquireCount.w', 'Metadata_w', None), + ('locks.oplog.acquireCount.r', 'oplog_r', None), + ('locks.oplog.acquireCount.w', 'oplog_w', None) +] + +DBSTATS = [ + 'dataSize', + 'indexSize', + 'storageSize', + 'objects' +] + +# charts order (can be overridden if you want less charts, or different order) +ORDER = [ + 'read_operations', + 'write_operations', + 'active_clients', + 'journaling_transactions', + 'journaling_volume', + 'background_flush_average', + 'background_flush_last', + 'background_flush_rate', + 'wiredtiger_read', + 'wiredtiger_write', + 'cursors', + 'connections', + 'memory', + 'page_faults', + 'queued_requests', + 'record_moves', + 'wiredtiger_cache', + 'wiredtiger_pages_evicted', + 'asserts', + 'locks_collection', + 'locks_database', + 'locks_global', + 'locks_metadata', + 'locks_oplog', + 'dbstats_objects', + 'tcmalloc_generic', + 'tcmalloc_metrics', + 'command_total_rate', + 'command_failed_rate' +] + +CHARTS = { + 'read_operations': { + 'options': [None, 'Received read requests', 'requests/s', 'throughput metrics', + 'mongodb.read_operations', 'line'], + 'lines': [ + ['query', None, 'incremental'], + ['getmore', None, 'incremental'] + ] + }, + 'write_operations': { + 'options': [None, 'Received write requests', 'requests/s', 'throughput metrics', + 'mongodb.write_operations', 'line'], + 'lines': [ + ['insert', None, 'incremental'], + ['update', None, 'incremental'], + ['delete', None, 'incremental'] + ] + }, + 'active_clients': { + 'options': [None, 'Clients with read or write operations in progress or queued', 'clients', + 'throughput metrics', 'mongodb.active_clients', 'line'], + 'lines': [ + ['activeClients_readers', 'readers', 'absolute'], + ['activeClients_writers', 'writers', 'absolute'] + ] + }, + 'journaling_transactions': { + 'options': [None, 'Transactions that have been written to the journal', 'commits', + 'database performance', 'mongodb.journaling_transactions', 'line'], + 'lines': [ + ['commits', None, 'absolute'] + ] + }, + 'journaling_volume': { + 'options': [None, 'Volume of data written to the journal', 'MB', 'database performance', + 'mongodb.journaling_volume', 'line'], + 'lines': [ + ['journaledMB', 'volume', 'absolute', 1, 100] + ] + }, + 'background_flush_average': { + 'options': [None, 'Average time taken by flushes to execute', 'ms', 'database performance', + 'mongodb.background_flush_average', 'line'], + 'lines': [ + ['average_ms', 'time', 'absolute', 1, 100] + ] + }, + 'background_flush_last': { + 'options': [None, 'Time taken by the last flush operation to execute', 'ms', 'database performance', + 'mongodb.background_flush_last', 'line'], + 'lines': [ + ['last_ms', 'time', 'absolute', 1, 100] + ] + }, + 'background_flush_rate': { + 'options': [None, 'Flushes rate', 'flushes', 'database performance', 'mongodb.background_flush_rate', 'line'], + 'lines': [ + ['flushes', 'flushes', 'incremental', 1, 1] + ] + }, + 'wiredtiger_read': { + 'options': [None, 'Read tickets in use and remaining', 'tickets', 'database performance', + 'mongodb.wiredtiger_read', 'stacked'], + 'lines': [ + ['wiredTigerRead_available', 'available', 'absolute', 1, 1], + ['wiredTigerRead_out', 'inuse', 'absolute', 1, 1] + ] + }, + 'wiredtiger_write': { + 'options': [None, 'Write tickets in use and remaining', 'tickets', 'database performance', + 'mongodb.wiredtiger_write', 'stacked'], + 'lines': [ + ['wiredTigerWrite_available', 'available', 'absolute', 1, 1], + ['wiredTigerWrite_out', 'inuse', 'absolute', 1, 1] + ] + }, + 'cursors': { + 'options': [None, 'Currently openned cursors, cursors with timeout disabled and timed out cursors', + 'cursors', 'database performance', 'mongodb.cursors', 'stacked'], + 'lines': [ + ['cursor_total', 'openned', 'absolute', 1, 1], + ['noTimeout', None, 'absolute', 1, 1], + ['timedOut', None, 'incremental', 1, 1] + ] + }, + 'connections': { + 'options': [None, 'Currently connected clients and unused connections', 'connections', + 'resource utilization', 'mongodb.connections', 'stacked'], + 'lines': [ + ['connections_available', 'unused', 'absolute', 1, 1], + ['connections_current', 'connected', 'absolute', 1, 1] + ] + }, + 'memory': { + 'options': [None, 'Memory metrics', 'MB', 'resource utilization', 'mongodb.memory', 'stacked'], + 'lines': [ + ['virtual', None, 'absolute', 1, 1], + ['resident', None, 'absolute', 1, 1], + ['nonmapped', None, 'absolute', 1, 1], + ['mapped', None, 'absolute', 1, 1] + ] + }, + 'page_faults': { + 'options': [None, 'Number of times MongoDB had to fetch data from disk', 'request/s', + 'resource utilization', 'mongodb.page_faults', 'line'], + 'lines': [ + ['page_faults', None, 'incremental', 1, 1] + ] + }, + 'queued_requests': { + 'options': [None, 'Currently queued read and write requests', 'requests', 'resource saturation', + 'mongodb.queued_requests', 'line'], + 'lines': [ + ['currentQueue_readers', 'readers', 'absolute', 1, 1], + ['currentQueue_writers', 'writers', 'absolute', 1, 1] + ] + }, + 'record_moves': { + 'options': [None, 'Number of times documents had to be moved on-disk', 'number', + 'resource saturation', 'mongodb.record_moves', 'line'], + 'lines': [ + ['moves', None, 'incremental', 1, 1] + ] + }, + 'asserts': { + 'options': [ + None, + 'Number of message, warning, regular, corresponding to errors generated by users assertions raised', + 'number', 'errors (asserts)', 'mongodb.asserts', 'line'], + 'lines': [ + ['msg', None, 'incremental', 1, 1], + ['warning', None, 'incremental', 1, 1], + ['regular', None, 'incremental', 1, 1], + ['user', None, 'incremental', 1, 1] + ] + }, + 'wiredtiger_cache': { + 'options': [None, 'The percentage of the wiredTiger cache that is in use and cache with dirty bytes', + 'percent', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'], + 'lines': [ + ['wiredTiger_percent_clean', 'inuse', 'absolute', 1, 1000], + ['wiredTiger_percent_dirty', 'dirty', 'absolute', 1, 1000] + ] + }, + 'wiredtiger_pages_evicted': { + 'options': [None, 'Pages evicted from the cache', + 'pages', 'resource utilization', 'mongodb.wiredtiger_pages_evicted', 'stacked'], + 'lines': [ + ['unmodified', None, 'absolute', 1, 1], + ['modified', None, 'absolute', 1, 1] + ] + }, + 'dbstats_objects': { + 'options': [None, 'Number of documents in the database among all the collections', 'documents', + 'storage size metrics', 'mongodb.dbstats_objects', 'stacked'], + 'lines': [] + }, + 'tcmalloc_generic': { + 'options': [None, 'Tcmalloc generic metrics', 'MB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'], + 'lines': [ + ['current_allocated_bytes', 'allocated', 'absolute', 1, 1048576], + ['heap_size', 'heap_size', 'absolute', 1, 1048576] + ] + }, + 'tcmalloc_metrics': { + 'options': [None, 'Tcmalloc metrics', 'KB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'], + 'lines': [ + ['central_cache_free_bytes', 'central_cache_free', 'absolute', 1, 1024], + ['current_total_thread_cache_bytes', 'current_total_thread_cache', 'absolute', 1, 1024], + ['pageheap_free_bytes', 'pageheap_free', 'absolute', 1, 1024], + ['pageheap_unmapped_bytes', 'pageheap_unmapped', 'absolute', 1, 1024], + ['thread_cache_free_bytes', 'thread_cache_free', 'absolute', 1, 1024], + ['transfer_cache_free_bytes', 'transfer_cache_free', 'absolute', 1, 1024] + ] + }, + 'command_total_rate': { + 'options': [None, 'Commands total rate', 'commands/s', 'commands', 'mongodb.command_total_rate', 'stacked'], + 'lines': [ + ['count_total', 'count', 'incremental', 1, 1], + ['createIndexes_total', 'createIndexes', 'incremental', 1, 1], + ['delete_total', 'delete', 'incremental', 1, 1], + ['eval_total', 'eval', 'incremental', 1, 1], + ['findAndModify_total', 'findAndModify', 'incremental', 1, 1], + ['insert_total', 'insert', 'incremental', 1, 1], + ['update_total', 'update', 'incremental', 1, 1] + ] + }, + 'command_failed_rate': { + 'options': [None, 'Commands failed rate', 'commands/s', 'commands', 'mongodb.command_failed_rate', 'stacked'], + 'lines': [ + ['count_failed', 'count', 'incremental', 1, 1], + ['createIndexes_failed', 'createIndexes', 'incremental', 1, 1], + ['delete_failed', 'delete', 'incremental', 1, 1], + ['eval_failed', 'eval', 'incremental', 1, 1], + ['findAndModify_failed', 'findAndModify', 'incremental', 1, 1], + ['insert_failed', 'insert', 'incremental', 1, 1], + ['update_failed', 'update', 'incremental', 1, 1] + ] + }, + 'locks_collection': { + 'options': [None, 'Collection lock. Number of times the lock was acquired in the specified mode', + 'locks', 'locks metrics', 'mongodb.locks_collection', 'stacked'], + 'lines': [ + ['Collection_R', 'shared', 'incremental'], + ['Collection_W', 'exclusive', 'incremental'], + ['Collection_r', 'intent_shared', 'incremental'], + ['Collection_w', 'intent_exclusive', 'incremental'] + ] + }, + 'locks_database': { + 'options': [None, 'Database lock. Number of times the lock was acquired in the specified mode', + 'locks', 'locks metrics', 'mongodb.locks_database', 'stacked'], + 'lines': [ + ['Database_R', 'shared', 'incremental'], + ['Database_W', 'exclusive', 'incremental'], + ['Database_r', 'intent_shared', 'incremental'], + ['Database_w', 'intent_exclusive', 'incremental'] + ] + }, + 'locks_global': { + 'options': [None, 'Global lock. Number of times the lock was acquired in the specified mode', + 'locks', 'locks metrics', 'mongodb.locks_global', 'stacked'], + 'lines': [ + ['Global_R', 'shared', 'incremental'], + ['Global_W', 'exclusive', 'incremental'], + ['Global_r', 'intent_shared', 'incremental'], + ['Global_w', 'intent_exclusive', 'incremental'] + ] + }, + 'locks_metadata': { + 'options': [None, 'Metadata lock. Number of times the lock was acquired in the specified mode', + 'locks', 'locks metrics', 'mongodb.locks_metadata', 'stacked'], + 'lines': [ + ['Metadata_R', 'shared', 'incremental'], + ['Metadata_w', 'intent_exclusive', 'incremental'] + ] + }, + 'locks_oplog': { + 'options': [None, 'Lock on the oplog. Number of times the lock was acquired in the specified mode', + 'locks', 'locks metrics', 'mongodb.locks_oplog', 'stacked'], + 'lines': [ + ['oplog_r', 'intent_shared', 'incremental'], + ['oplog_w', 'intent_exclusive', 'incremental'] + ] + } +} + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.order = ORDER[:] + self.definitions = deepcopy(CHARTS) + self.user = self.configuration.get('user') + self.password = self.configuration.get('pass') + self.host = self.configuration.get('host', '127.0.0.1') + self.port = self.configuration.get('port', 27017) + self.timeout = self.configuration.get('timeout', 100) + self.metrics_to_collect = deepcopy(DEFAULT_METRICS) + self.connection = None + self.do_replica = None + self.databases = list() + + def check(self): + if not PYMONGO: + self.error('Pymongo package v2.4+ is needed to use mongodb.chart.py') + return False + self.connection, server_status, error = self._create_connection() + if error: + self.error(error) + return False + + self.build_metrics_to_collect_(server_status) + + try: + data = self._get_data() + except (LookupError, SyntaxError, AttributeError): + self.error('Type: %s, error: %s' % (str(exc_info()[0]), str(exc_info()[1]))) + return False + if isinstance(data, dict) and data: + self._data_from_check = data + self.create_charts_(server_status) + return True + self.error('_get_data() returned no data or type is not <dict>') + return False + + def build_metrics_to_collect_(self, server_status): + + self.do_replica = 'repl' in server_status + if 'dur' in server_status: + self.metrics_to_collect.extend(DUR) + if 'tcmalloc' in server_status: + self.metrics_to_collect.extend(TCMALLOC) + if 'commands' in server_status['metrics']: + self.metrics_to_collect.extend(COMMANDS) + if 'wiredTiger' in server_status: + self.metrics_to_collect.extend(WIREDTIGER) + if 'Collection' in server_status['locks']: + self.metrics_to_collect.extend(LOCKS) + + def create_charts_(self, server_status): + + if 'dur' not in server_status: + self.order.remove('journaling_transactions') + self.order.remove('journaling_volume') + + if 'backgroundFlushing' not in server_status: + self.order.remove('background_flush_average') + self.order.remove('background_flush_last') + self.order.remove('background_flush_rate') + + if 'wiredTiger' not in server_status: + self.order.remove('wiredtiger_write') + self.order.remove('wiredtiger_read') + self.order.remove('wiredtiger_cache') + + if 'tcmalloc' not in server_status: + self.order.remove('tcmalloc_generic') + self.order.remove('tcmalloc_metrics') + + if 'commands' not in server_status['metrics']: + self.order.remove('command_total_rate') + self.order.remove('command_failed_rate') + + if 'Collection' not in server_status['locks']: + self.order.remove('locks_collection') + self.order.remove('locks_database') + self.order.remove('locks_global') + self.order.remove('locks_metadata') + + if 'oplog' not in server_status['locks']: + self.order.remove('locks_oplog') + + for dbase in self.databases: + self.order.append('_'.join([dbase, 'dbstats'])) + self.definitions['_'.join([dbase, 'dbstats'])] = { + 'options': [None, '%s: size of all documents, indexes, extents' % dbase, 'KB', + 'storage size metrics', 'mongodb.dbstats', 'line'], + 'lines': [ + ['_'.join([dbase, 'dataSize']), 'documents', 'absolute', 1, 1024], + ['_'.join([dbase, 'indexSize']), 'indexes', 'absolute', 1, 1024], + ['_'.join([dbase, 'storageSize']), 'extents', 'absolute', 1, 1024] + ]} + self.definitions['dbstats_objects']['lines'].append(['_'.join([dbase, 'objects']), dbase, 'absolute']) + + if self.do_replica: + def create_lines(hosts, string): + lines = list() + for host in hosts: + dim_id = '_'.join([host, string]) + lines.append([dim_id, host, 'absolute', 1, 1000]) + return lines + + def create_state_lines(states): + lines = list() + for state, description in states: + dim_id = '_'.join([host, 'state', state]) + lines.append([dim_id, description, 'absolute', 1, 1]) + return lines + + all_hosts = server_status['repl']['hosts'] + server_status['repl'].get('arbiters', list()) + this_host = server_status['repl']['me'] + other_hosts = [host for host in all_hosts if host != this_host] + + if 'local' in self.databases: + self.order.append('oplog_window') + self.definitions['oplog_window'] = { + 'options': [None, 'Interval of time between the oldest and the latest entries in the oplog', + 'seconds', 'replication and oplog', 'mongodb.oplog_window', 'line'], + 'lines': [['timeDiff', 'window', 'absolute', 1, 1000]]} + # Create "heartbeat delay" chart + self.order.append('heartbeat_delay') + self.definitions['heartbeat_delay'] = { + 'options': [ + None, + 'Time when last heartbeat was received from the replica set member (lastHeartbeatRecv)', + 'seconds ago', 'replication and oplog', 'mongodb.replication_heartbeat_delay', 'stacked'], + 'lines': create_lines(other_hosts, 'heartbeat_lag')} + # Create "optimedate delay" chart + self.order.append('optimedate_delay') + self.definitions['optimedate_delay'] = { + 'options': [None, 'Time when last entry from the oplog was applied (optimeDate)', + 'seconds ago', 'replication and oplog', 'mongodb.replication_optimedate_delay', 'stacked'], + 'lines': create_lines(all_hosts, 'optimedate')} + # Create "replica set members state" chart + for host in all_hosts: + chart_name = '_'.join([host, 'state']) + self.order.append(chart_name) + self.definitions[chart_name] = { + 'options': [None, 'Replica set member (%s) current state' % host, 'state', + 'replication and oplog', 'mongodb.replication_state', 'line'], + 'lines': create_state_lines(REPL_SET_STATES)} + + def _get_raw_data(self): + raw_data = dict() + + raw_data.update(self.get_server_status() or dict()) + raw_data.update(self.get_db_stats() or dict()) + raw_data.update(self.get_repl_set_get_status() or dict()) + raw_data.update(self.get_get_replication_info() or dict()) + + return raw_data or None + + def get_server_status(self): + raw_data = dict() + try: + raw_data['serverStatus'] = self.connection.admin.command('serverStatus') + except PyMongoError: + return None + else: + return raw_data + + def get_db_stats(self): + if not self.databases: + return None + + raw_data = dict() + raw_data['dbStats'] = dict() + try: + for dbase in self.databases: + raw_data['dbStats'][dbase] = self.connection[dbase].command('dbStats') + return raw_data + except PyMongoError: + return None + + def get_repl_set_get_status(self): + if not self.do_replica: + return None + + raw_data = dict() + try: + raw_data['replSetGetStatus'] = self.connection.admin.command('replSetGetStatus') + return raw_data + except PyMongoError: + return None + + def get_get_replication_info(self): + if not (self.do_replica and 'local' in self.databases): + return None + + raw_data = dict() + raw_data['getReplicationInfo'] = dict() + try: + raw_data['getReplicationInfo']['ASCENDING'] = self.connection.local.oplog.rs.find().sort( + '$natural', ASCENDING).limit(1)[0] + raw_data['getReplicationInfo']['DESCENDING'] = self.connection.local.oplog.rs.find().sort( + '$natural', DESCENDING).limit(1)[0] + return raw_data + except PyMongoError: + return None + + def _get_data(self): + """ + :return: dict + """ + raw_data = self._get_raw_data() + + if not raw_data: + return None + + to_netdata = dict() + serverStatus = raw_data['serverStatus'] + dbStats = raw_data.get('dbStats') + replSetGetStatus = raw_data.get('replSetGetStatus') + getReplicationInfo = raw_data.get('getReplicationInfo') + utc_now = datetime.utcnow() + + # serverStatus + for metric, new_name, func in self.metrics_to_collect: + value = serverStatus + for key in metric.split('.'): + try: + value = value[key] + except KeyError: + break + + if not isinstance(value, dict) and key: + to_netdata[new_name or key] = value if not func else func(value) + + to_netdata['nonmapped'] = to_netdata['virtual'] - serverStatus['mem'].get('mappedWithJournal', + to_netdata['mapped']) + if to_netdata.get('maximum bytes configured'): + maximum = to_netdata['maximum bytes configured'] + to_netdata['wiredTiger_percent_clean'] = int(to_netdata['bytes currently in the cache'] + * 100 / maximum * 1000) + to_netdata['wiredTiger_percent_dirty'] = int(to_netdata['tracked dirty bytes in the cache'] + * 100 / maximum * 1000) + + # dbStats + if dbStats: + for dbase in dbStats: + for metric in DBSTATS: + key = '_'.join([dbase, metric]) + to_netdata[key] = dbStats[dbase][metric] + + # replSetGetStatus + if replSetGetStatus: + other_hosts = list() + members = replSetGetStatus['members'] + unix_epoch = datetime(1970, 1, 1, 0, 0) + + for member in members: + if not member.get('self'): + other_hosts.append(member) + # Replica set time diff between current time and time when last entry from the oplog was applied + if member.get('optimeDate', unix_epoch) != unix_epoch: + member_optimedate = member['name'] + '_optimedate' + to_netdata.update({member_optimedate: int(delta_calculation(delta=utc_now - member['optimeDate'], + multiplier=1000))}) + # Replica set members state + member_state = member['name'] + '_state' + for elem in REPL_SET_STATES: + state = elem[0] + to_netdata.update({'_'.join([member_state, state]): 0}) + to_netdata.update({'_'.join([member_state, str(member['state'])]): member['state']}) + # Heartbeat lag calculation + for other in other_hosts: + if other['lastHeartbeatRecv'] != unix_epoch: + node = other['name'] + '_heartbeat_lag' + to_netdata[node] = int(delta_calculation(delta=utc_now - other['lastHeartbeatRecv'], + multiplier=1000)) + + if getReplicationInfo: + first_event = getReplicationInfo['ASCENDING']['ts'].as_datetime() + last_event = getReplicationInfo['DESCENDING']['ts'].as_datetime() + to_netdata['timeDiff'] = int(delta_calculation(delta=last_event - first_event, multiplier=1000)) + + return to_netdata + + def _create_connection(self): + conn_vars = {'host': self.host, 'port': self.port} + if hasattr(MongoClient, 'server_selection_timeout'): + conn_vars.update({'serverselectiontimeoutms': self.timeout}) + try: + connection = MongoClient(**conn_vars) + if self.user and self.password: + connection.admin.authenticate(name=self.user, password=self.password) + # elif self.user: + # connection.admin.authenticate(name=self.user, mechanism='MONGODB-X509') + server_status = connection.admin.command('serverStatus') + except PyMongoError as error: + return None, None, str(error) + else: + try: + self.databases = connection.database_names() + except PyMongoError as error: + self.info('Can\'t collect databases: %s' % str(error)) + return connection, server_status, None + + +def delta_calculation(delta, multiplier=1): + if hasattr(delta, 'total_seconds'): + return delta.total_seconds() * multiplier + return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6 * multiplier diff --git a/collectors/python.d.plugin/mongodb/mongodb.conf b/collectors/python.d.plugin/mongodb/mongodb.conf new file mode 100644 index 000000000..62faef68d --- /dev/null +++ b/collectors/python.d.plugin/mongodb/mongodb.conf @@ -0,0 +1,84 @@ +# netdata python.d.plugin configuration for mongodb +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, mongodb also supports the following: +# +# host: 'IP or HOSTNAME' # type <str> the host to connect to +# port: PORT # type <int> the port to connect to +# +# in all cases, the following can also be set: +# +# user: 'username' # the mongodb username to use +# pass: 'password' # the mongodb password to use +# + +# ---------------------------------------------------------------------- +# to connect to the mongodb on localhost, without a password: +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +local: + name : 'local' + host : '127.0.0.1' + port : 27017 diff --git a/collectors/python.d.plugin/monit/Makefile.inc b/collectors/python.d.plugin/monit/Makefile.inc new file mode 100644 index 000000000..4a3673fd5 --- /dev/null +++ b/collectors/python.d.plugin/monit/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += monit/monit.chart.py +dist_pythonconfig_DATA += monit/monit.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += monit/README.md monit/Makefile.inc + diff --git a/collectors/python.d.plugin/monit/README.md b/collectors/python.d.plugin/monit/README.md new file mode 100644 index 000000000..6d10240c9 --- /dev/null +++ b/collectors/python.d.plugin/monit/README.md @@ -0,0 +1,33 @@ +# monit + +Monit monitoring module. Data is grabbed from stats XML interface (exists for a long time, but not mentioned in official documentation). Mostly this plugin shows statuses of monit targets, i.e. [statuses of specified checks](https://mmonit.com/monit/documentation/monit.html#Service-checks). + +1. **Filesystems** + * Filesystems + * Directories + * Files + * Pipes + +2. **Applications** + * Processes (+threads/childs) + * Programs + +3. **Network** + * Hosts (+latency) + * Network interfaces + +### configuration + +Sample: + +```yaml +local: + name : 'local' + url : 'http://localhost:2812' + user: : admin + pass: : monit +``` + +If no configuration is given, module will attempt to connect to monit as `http://localhost:2812`. + +--- diff --git a/collectors/python.d.plugin/monit/monit.chart.py b/collectors/python.d.plugin/monit/monit.chart.py new file mode 100644 index 000000000..51943c0e1 --- /dev/null +++ b/collectors/python.d.plugin/monit/monit.chart.py @@ -0,0 +1,166 @@ +# -*- coding: utf-8 -*- +# Description: monit netdata python.d module +# Author: Evgeniy K. (n0guest) +# SPDX-License-Identifier: GPL-3.0-or-later + +import xml.etree.ElementTree as ET +from bases.FrameworkServices.UrlService import UrlService + +# default module values (can be overridden per job in `config`) +# update_every = 2 +priority = 60000 +retries = 60 + +# see enum State_Type from monit.h (https://bitbucket.org/tildeslash/monit/src/master/src/monit.h) +MONIT_SERVICE_NAMES = ['Filesystem', 'Directory', 'File', 'Process', 'Host', 'System', 'Fifo', 'Program', 'Net'] +DEFAULT_SERVICES_IDS = [0, 1, 2, 3, 4, 6, 7, 8] + +# charts order (can be overridden if you want less charts, or different order) +ORDER = [ + 'filesystem', + 'directory', + 'file', + 'process', + 'process_uptime', + 'process_threads', + 'process_children', + 'host', + 'host_latency', + 'system', + 'fifo', + 'program', + 'net' +] +CHARTS = { + 'filesystem': { + 'options': ['filesystems', 'Filesystems', 'filesystems', 'filesystem', 'monit.filesystems', 'line'], + 'lines': [] + }, + 'directory': { + 'options': ['directories', 'Directories', 'directories', 'filesystem', 'monit.directories', 'line'], + 'lines': [] + }, + 'file': { + 'options': ['files', 'Files', 'files', 'filesystem', 'monit.files', 'line'], + 'lines': [] + }, + 'fifo': { + 'options': ['fifos', 'Pipes (fifo)', 'pipes', 'filesystem', 'monit.fifos', 'line'], + 'lines': [] + }, + 'program': { + 'options': ['programs', 'Programs statuses', 'programs', 'applications', 'monit.programs', 'line'], + 'lines': [] + }, + 'process': { + 'options': ['processes', 'Processes statuses', 'processes', 'applications', 'monit.services', 'line'], + 'lines': [] + }, + 'process_uptime': { + 'options': ['processes uptime', 'Processes uptime', 'seconds', 'applications', + 'monit.process_uptime', 'line', 'hidden'], + 'lines': [] + }, + 'process_threads': { + 'options': ['processes threads', 'Processes threads', 'threads', 'applications', + 'monit.process_threads', 'line'], + 'lines': [] + }, + 'process_children': { + 'options': ['processes childrens', 'Child processes', 'childrens', 'applications', + 'monit.process_childrens', 'line'], + 'lines': [] + }, + 'host': { + 'options': ['hosts', 'Hosts', 'hosts', 'network', 'monit.hosts', 'line'], + 'lines': [] + }, + 'host_latency': { + 'options': ['hosts latency', 'Hosts latency', 'milliseconds/s', 'network', 'monit.host_latency', 'line'], + 'lines': [] + }, + 'net': { + 'options': ['interfaces', 'Network interfaces and addresses', 'interfaces', 'network', + 'monit.networks', 'line'], + 'lines': [] + }, +} + + +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + base_url = self.configuration.get('url', 'http://localhost:2812') + self.url = '{0}/_status?format=xml&level=full'.format(base_url) + self.order = ORDER + self.definitions = CHARTS + + def parse(self, data): + try: + xml = ET.fromstring(data) + except ET.ParseError: + self.error("URL {0} didn't return a vaild XML page. Please check your settings.".format(self.url)) + return None + return xml + + def check(self): + self._manager = self._build_manager() + raw_data = self._get_raw_data() + if not raw_data: + return None + return bool(self.parse(raw_data)) + + def _get_data(self): + raw_data = self._get_raw_data() + if not raw_data: + return None + xml = self.parse(raw_data) + if not xml: + return None + + data = {} + for service_id in DEFAULT_SERVICES_IDS: + service_category = MONIT_SERVICE_NAMES[service_id].lower() + if service_category == 'system': + self.debug("Skipping service from 'System' category, because it's useless in graphs") + continue + + xpath_query = "./service[@type='{0}']".format(service_id) + self.debug('Searching for {0} as {1}'.format(service_category, xpath_query)) + for service_node in xml.findall(xpath_query): + + service_name = service_node.find('name').text + service_status = service_node.find('status').text + service_monitoring = service_node.find('monitor').text + self.debug('=> found {0} with type={1}, status={2}, monitoring={3}'.format(service_name, + service_id, service_status, service_monitoring)) + + dimension_key = service_category + '_' + service_name + if dimension_key not in self.charts[service_category]: + self.charts[service_category].add_dimension([dimension_key, service_name, 'absolute']) + data[dimension_key] = 1 if service_status == '0' and service_monitoring == '1' else 0 + + if service_category == 'process': + for subnode in ('uptime', 'threads', 'children'): + subnode_value = service_node.find(subnode) + if subnode_value is None: + continue + if subnode == 'uptime' and int(subnode_value.text) < 0: + self.debug('Skipping bugged metrics with negative uptime (monit before v5.16') + continue + dimension_key = 'process_{0}_{1}'.format(subnode, service_name) + if dimension_key not in self.charts['process_' + subnode]: + self.charts['process_' + subnode].add_dimension([dimension_key, service_name, 'absolute']) + data[dimension_key] = int(subnode_value.text) + + if service_category == 'host': + subnode_value = service_node.find('./icmp/responsetime') + if subnode_value is None: + continue + dimension_key = 'host_latency_{0}'.format(service_name) + if dimension_key not in self.charts['host_latency']: + self.charts['host_latency'].add_dimension([dimension_key, service_name, + 'absolute', 1000, 1000000]) + data[dimension_key] = float(subnode_value.text) * 1000000 + + return data or None diff --git a/collectors/python.d.plugin/monit/monit.conf b/collectors/python.d.plugin/monit/monit.conf new file mode 100644 index 000000000..f9c26dbc3 --- /dev/null +++ b/collectors/python.d.plugin/monit/monit.conf @@ -0,0 +1,88 @@ +# netdata python.d.plugin configuration for monit +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, this plugin also supports the following: +# +# url: 'URL' # the URL to fetch monit's status stats +# +# if the URL is password protected, the following are supported: +# +# user: 'username' +# pass: 'password' +# +# Example +# +# local: +# name : 'Local Monit' +# url : 'http://localhost:2812' +# +# "local" will show up in Netdata logs. "Reverse Proxy" will show up in the menu +# in the monit section. + +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +localhost: + name : 'local' + url : 'http://localhost:2812' diff --git a/collectors/python.d.plugin/mysql/Makefile.inc b/collectors/python.d.plugin/mysql/Makefile.inc new file mode 100644 index 000000000..03e8b65eb --- /dev/null +++ b/collectors/python.d.plugin/mysql/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += mysql/mysql.chart.py +dist_pythonconfig_DATA += mysql/mysql.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += mysql/README.md mysql/Makefile.inc + diff --git a/collectors/python.d.plugin/mysql/README.md b/collectors/python.d.plugin/mysql/README.md new file mode 100644 index 000000000..e38098e7e --- /dev/null +++ b/collectors/python.d.plugin/mysql/README.md @@ -0,0 +1,90 @@ +# mysql + +Module monitors one or more mysql servers + +**Requirements:** + * python library [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) (faster) or [PyMySQL](https://github.com/PyMySQL/PyMySQL) (slower) + +It will produce following charts (if data is available): + +1. **Bandwidth** in kbps + * in + * out + +2. **Queries** in queries/sec + * queries + * questions + * slow queries + +3. **Operations** in operations/sec + * opened tables + * flush + * commit + * delete + * prepare + * read first + * read key + * read next + * read prev + * read random + * read random next + * rollback + * save point + * update + * write + +4. **Table Locks** in locks/sec + * immediate + * waited + +5. **Select Issues** in issues/sec + * full join + * full range join + * range + * range check + * scan + +6. **Sort Issues** in issues/sec + * merge passes + * range + * scan + +### configuration + +You can provide, per server, the following: + +1. username which have access to database (defaults to 'root') +2. password (defaults to none) +3. mysql my.cnf configuration file +4. mysql socket (optional) +5. mysql host (ip or hostname) +6. mysql port (defaults to 3306) + +Here is an example for 3 servers: + +```yaml +update_every : 10 +priority : 90100 +retries : 5 + +local: + 'my.cnf' : '/etc/mysql/my.cnf' + priority : 90000 + +local_2: + user : 'root' + pass : 'blablablabla' + socket : '/var/run/mysqld/mysqld.sock' + update_every : 1 + +remote: + user : 'admin' + pass : 'bla' + host : 'example.org' + port : 9000 + retries : 20 +``` + +If no configuration is given, module will attempt to connect to mysql server via unix socket at `/var/run/mysqld/mysqld.sock` without password and with username `root` + +--- diff --git a/collectors/python.d.plugin/mysql/mysql.chart.py b/collectors/python.d.plugin/mysql/mysql.chart.py new file mode 100644 index 000000000..c4d1e8b3a --- /dev/null +++ b/collectors/python.d.plugin/mysql/mysql.chart.py @@ -0,0 +1,602 @@ +# -*- coding: utf-8 -*- +# Description: MySQL netdata python.d module +# Author: Pawel Krupa (paulfantom) +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +from bases.FrameworkServices.MySQLService import MySQLService + +# default module values (can be overridden per job in `config`) +# update_every = 3 +priority = 60000 +retries = 60 + +# query executed on MySQL server +QUERY_GLOBAL = 'SHOW GLOBAL STATUS;' +QUERY_SLAVE = 'SHOW SLAVE STATUS;' +QUERY_VARIABLES = 'SHOW GLOBAL VARIABLES LIKE \'max_connections\';' + +GLOBAL_STATS = [ + 'Bytes_received', + 'Bytes_sent', + 'Queries', + 'Questions', + 'Slow_queries', + 'Handler_commit', + 'Handler_delete', + 'Handler_prepare', + 'Handler_read_first', + 'Handler_read_key', + 'Handler_read_next', + 'Handler_read_prev', + 'Handler_read_rnd', + 'Handler_read_rnd_next', + 'Handler_rollback', + 'Handler_savepoint', + 'Handler_savepoint_rollback', + 'Handler_update', + 'Handler_write', + 'Table_locks_immediate', + 'Table_locks_waited', + 'Select_full_join', + 'Select_full_range_join', + 'Select_range', + 'Select_range_check', + 'Select_scan', + 'Sort_merge_passes', + 'Sort_range', + 'Sort_scan', + 'Created_tmp_disk_tables', + 'Created_tmp_files', + 'Created_tmp_tables', + 'Connections', + 'Aborted_connects', + 'Max_used_connections', + 'Binlog_cache_disk_use', + 'Binlog_cache_use', + 'Threads_connected', + 'Threads_created', + 'Threads_cached', + 'Threads_running', + 'Thread_cache_misses', + 'Innodb_data_read', + 'Innodb_data_written', + 'Innodb_data_reads', + 'Innodb_data_writes', + 'Innodb_data_fsyncs', + 'Innodb_data_pending_reads', + 'Innodb_data_pending_writes', + 'Innodb_data_pending_fsyncs', + 'Innodb_log_waits', + 'Innodb_log_write_requests', + 'Innodb_log_writes', + 'Innodb_os_log_fsyncs', + 'Innodb_os_log_pending_fsyncs', + 'Innodb_os_log_pending_writes', + 'Innodb_os_log_written', + 'Innodb_row_lock_current_waits', + 'Innodb_rows_inserted', + 'Innodb_rows_read', + 'Innodb_rows_updated', + 'Innodb_rows_deleted', + 'Innodb_buffer_pool_pages_data', + 'Innodb_buffer_pool_pages_dirty', + 'Innodb_buffer_pool_pages_free', + 'Innodb_buffer_pool_pages_flushed', + 'Innodb_buffer_pool_pages_misc', + 'Innodb_buffer_pool_pages_total', + 'Innodb_buffer_pool_bytes_data', + 'Innodb_buffer_pool_bytes_dirty', + 'Innodb_buffer_pool_read_ahead', + 'Innodb_buffer_pool_read_ahead_evicted', + 'Innodb_buffer_pool_read_ahead_rnd', + 'Innodb_buffer_pool_read_requests', + 'Innodb_buffer_pool_write_requests', + 'Innodb_buffer_pool_reads', + 'Innodb_buffer_pool_wait_free', + 'Qcache_hits', + 'Qcache_lowmem_prunes', + 'Qcache_inserts', + 'Qcache_not_cached', + 'Qcache_queries_in_cache', + 'Qcache_free_memory', + 'Qcache_free_blocks', + 'Qcache_total_blocks', + 'Key_blocks_unused', + 'Key_blocks_used', + 'Key_blocks_not_flushed', + 'Key_read_requests', + 'Key_write_requests', + 'Key_reads', + 'Key_writes', + 'Open_files', + 'Opened_files', + 'Binlog_stmt_cache_disk_use', + 'Binlog_stmt_cache_use', + 'Connection_errors_accept', + 'Connection_errors_internal', + 'Connection_errors_max_connections', + 'Connection_errors_peer_address', + 'Connection_errors_select', + 'Connection_errors_tcpwrap', + 'wsrep_local_recv_queue', + 'wsrep_local_send_queue', + 'wsrep_received', + 'wsrep_replicated', + 'wsrep_received_bytes', + 'wsrep_replicated_bytes', + 'wsrep_local_bf_aborts', + 'wsrep_local_cert_failures', + 'wsrep_flow_control_paused_ns', + 'Com_delete', + 'Com_insert', + 'Com_select', + 'Com_update', + 'Com_replace' +] + + +def slave_seconds(value): + try: + return int(value) + except (TypeError, ValueError): + return -1 + + +def slave_running(value): + return 1 if value == 'Yes' else -1 + + +SLAVE_STATS = [ + ('Seconds_Behind_Master', slave_seconds), + ('Slave_SQL_Running', slave_running), + ('Slave_IO_Running', slave_running) +] + +VARIABLES = [ + 'max_connections' +] + +ORDER = [ + 'net', + 'queries', + 'queries_type', + 'handlers', + 'table_locks', + 'join_issues', + 'sort_issues', + 'tmp', + 'connections', + 'connections_active', + 'connection_errors', + 'binlog_cache', + 'binlog_stmt_cache', + 'threads', + 'thread_cache_misses', + 'innodb_io', + 'innodb_io_ops', + 'innodb_io_pending_ops', + 'innodb_log', + 'innodb_os_log', + 'innodb_os_log_io', + 'innodb_cur_row_lock', + 'innodb_rows', + 'innodb_buffer_pool_pages', + 'innodb_buffer_pool_bytes', + 'innodb_buffer_pool_read_ahead', + 'innodb_buffer_pool_reqs', + 'innodb_buffer_pool_ops', + 'qcache_ops', + 'qcache', + 'qcache_freemem', + 'qcache_memblocks', + 'key_blocks', + 'key_requests', + 'key_disk_ops', + 'files', + 'files_rate', + 'slave_behind', + 'slave_status', + 'galera_writesets', + 'galera_bytes', + 'galera_queue', + 'galera_conflicts', + 'galera_flow_control' +] + +CHARTS = { + 'net': { + 'options': [None, 'mysql Bandwidth', 'kilobits/s', 'bandwidth', 'mysql.net', 'area'], + 'lines': [ + ['Bytes_received', 'in', 'incremental', 8, 1024], + ['Bytes_sent', 'out', 'incremental', -8, 1024] + ] + }, + 'queries': { + 'options': [None, 'mysql Queries', 'queries/s', 'queries', 'mysql.queries', 'line'], + 'lines': [ + ['Queries', 'queries', 'incremental'], + ['Questions', 'questions', 'incremental'], + ['Slow_queries', 'slow_queries', 'incremental'] + ] + }, + 'queries_type': { + 'options': [None, 'mysql Query type', 'queries/s', 'query_types', 'mysql.queries_type', 'stacked'], + 'lines': [ + ['Com_select', 'select', 'incremental'], + ['Com_delete', 'delete', 'incremental'], + ['Com_update', 'update', 'incremental'], + ['Com_insert', 'insert', 'incremental'], + ['Qcache_hits', 'cache_hits', 'incremental'], + ['Com_replace', 'replace', 'incremental'] + ] + }, + 'handlers': { + 'options': [None, 'mysql Handlers', 'handlers/s', 'handlers', 'mysql.handlers', 'line'], + 'lines': [ + ['Handler_commit', 'commit', 'incremental'], + ['Handler_delete', 'delete', 'incremental'], + ['Handler_prepare', 'prepare', 'incremental'], + ['Handler_read_first', 'read_first', 'incremental'], + ['Handler_read_key', 'read_key', 'incremental'], + ['Handler_read_next', 'read_next', 'incremental'], + ['Handler_read_prev', 'read_prev', 'incremental'], + ['Handler_read_rnd', 'read_rnd', 'incremental'], + ['Handler_read_rnd_next', 'read_rnd_next', 'incremental'], + ['Handler_rollback', 'rollback', 'incremental'], + ['Handler_savepoint', 'savepoint', 'incremental'], + ['Handler_savepoint_rollback', 'savepoint_rollback', 'incremental'], + ['Handler_update', 'update', 'incremental'], + ['Handler_write', 'write', 'incremental'] + ] + }, + 'table_locks': { + 'options': [None, 'mysql Tables Locks', 'locks/s', 'locks', 'mysql.table_locks', 'line'], + 'lines': [ + ['Table_locks_immediate', 'immediate', 'incremental'], + ['Table_locks_waited', 'waited', 'incremental', -1, 1] + ] + }, + 'join_issues': { + 'options': [None, 'mysql Select Join Issues', 'joins/s', 'issues', 'mysql.join_issues', 'line'], + 'lines': [ + ['Select_full_join', 'full_join', 'incremental'], + ['Select_full_range_join', 'full_range_join', 'incremental'], + ['Select_range', 'range', 'incremental'], + ['Select_range_check', 'range_check', 'incremental'], + ['Select_scan', 'scan', 'incremental'] + ] + }, + 'sort_issues': { + 'options': [None, 'mysql Sort Issues', 'issues/s', 'issues', 'mysql.sort_issues', 'line'], + 'lines': [ + ['Sort_merge_passes', 'merge_passes', 'incremental'], + ['Sort_range', 'range', 'incremental'], + ['Sort_scan', 'scan', 'incremental'] + ] + }, + 'tmp': { + 'options': [None, 'mysql Tmp Operations', 'counter', 'temporaries', 'mysql.tmp', 'line'], + 'lines': [ + ['Created_tmp_disk_tables', 'disk_tables', 'incremental'], + ['Created_tmp_files', 'files', 'incremental'], + ['Created_tmp_tables', 'tables', 'incremental'] + ] + }, + 'connections': { + 'options': [None, 'mysql Connections', 'connections/s', 'connections', 'mysql.connections', 'line'], + 'lines': [ + ['Connections', 'all', 'incremental'], + ['Aborted_connects', 'aborted', 'incremental'] + ] + }, + 'connections_active': { + 'options': [None, 'mysql Connections Active', 'connections', 'connections', 'mysql.connections_active', 'line'], + 'lines': [ + ['Threads_connected', 'active', 'absolute'], + ['max_connections', 'limit', 'absolute'], + ['Max_used_connections', 'max_active', 'absolute'] + ] + }, + 'binlog_cache': { + 'options': [None, 'mysql Binlog Cache', 'transactions/s', 'binlog', 'mysql.binlog_cache', 'line'], + 'lines': [ + ['Binlog_cache_disk_use', 'disk', 'incremental'], + ['Binlog_cache_use', 'all', 'incremental'] + ] + }, + 'threads': { + 'options': [None, 'mysql Threads', 'threads', 'threads', 'mysql.threads', 'line'], + 'lines': [ + ['Threads_connected', 'connected', 'absolute'], + ['Threads_created', 'created', 'incremental'], + ['Threads_cached', 'cached', 'absolute', -1, 1], + ['Threads_running', 'running', 'absolute'], + ] + }, + 'thread_cache_misses': { + 'options': [None, 'mysql Threads Cache Misses', 'misses', 'threads', 'mysql.thread_cache_misses', 'area'], + 'lines': [ + ['Thread_cache_misses', 'misses', 'absolute', 1, 100] + ] + }, + 'innodb_io': { + 'options': [None, 'mysql InnoDB I/O Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_io', 'area'], + 'lines': [ + ['Innodb_data_read', 'read', 'incremental', 1, 1024], + ['Innodb_data_written', 'write', 'incremental', -1, 1024] + ] + }, + 'innodb_io_ops': { + 'options': [None, 'mysql InnoDB I/O Operations', 'operations/s', 'innodb', 'mysql.innodb_io_ops', 'line'], + 'lines': [ + ['Innodb_data_reads', 'reads', 'incremental'], + ['Innodb_data_writes', 'writes', 'incremental', -1, 1], + ['Innodb_data_fsyncs', 'fsyncs', 'incremental'] + ] + }, + 'innodb_io_pending_ops': { + 'options': [None, 'mysql InnoDB Pending I/O Operations', 'operations', 'innodb', + 'mysql.innodb_io_pending_ops', 'line'], + 'lines': [ + ['Innodb_data_pending_reads', 'reads', 'absolute'], + ['Innodb_data_pending_writes', 'writes', 'absolute', -1, 1], + ['Innodb_data_pending_fsyncs', 'fsyncs', 'absolute'] + ] + }, + 'innodb_log': { + 'options': [None, 'mysql InnoDB Log Operations', 'operations/s', 'innodb', 'mysql.innodb_log', 'line'], + 'lines': [ + ['Innodb_log_waits', 'waits', 'incremental'], + ['Innodb_log_write_requests', 'write_requests', 'incremental', -1, 1], + ['Innodb_log_writes', 'writes', 'incremental', -1, 1], + ] + }, + 'innodb_os_log': { + 'options': [None, 'mysql InnoDB OS Log Operations', 'operations', 'innodb', 'mysql.innodb_os_log', 'line'], + 'lines': [ + ['Innodb_os_log_fsyncs', 'fsyncs', 'incremental'], + ['Innodb_os_log_pending_fsyncs', 'pending_fsyncs', 'absolute'], + ['Innodb_os_log_pending_writes', 'pending_writes', 'absolute', -1, 1], + ] + }, + 'innodb_os_log_io': { + 'options': [None, 'mysql InnoDB OS Log Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_os_log_io', 'area'], + 'lines': [ + ['Innodb_os_log_written', 'write', 'incremental', -1, 1024], + ] + }, + 'innodb_cur_row_lock': { + 'options': [None, 'mysql InnoDB Current Row Locks', 'operations', 'innodb', + 'mysql.innodb_cur_row_lock', 'area'], + 'lines': [ + ['Innodb_row_lock_current_waits', 'current_waits', 'absolute'] + ] + }, + 'innodb_rows': { + 'options': [None, 'mysql InnoDB Row Operations', 'operations/s', 'innodb', 'mysql.innodb_rows', 'area'], + 'lines': [ + ['Innodb_rows_inserted', 'inserted', 'incremental'], + ['Innodb_rows_read', 'read', 'incremental', 1, 1], + ['Innodb_rows_updated', 'updated', 'incremental', 1, 1], + ['Innodb_rows_deleted', 'deleted', 'incremental', -1, 1], + ] + }, + 'innodb_buffer_pool_pages': { + 'options': [None, 'mysql InnoDB Buffer Pool Pages', 'pages', 'innodb', + 'mysql.innodb_buffer_pool_pages', 'line'], + 'lines': [ + ['Innodb_buffer_pool_pages_data', 'data', 'absolute'], + ['Innodb_buffer_pool_pages_dirty', 'dirty', 'absolute', -1, 1], + ['Innodb_buffer_pool_pages_free', 'free', 'absolute'], + ['Innodb_buffer_pool_pages_flushed', 'flushed', 'incremental', -1, 1], + ['Innodb_buffer_pool_pages_misc', 'misc', 'absolute', -1, 1], + ['Innodb_buffer_pool_pages_total', 'total', 'absolute'] + ] + }, + 'innodb_buffer_pool_bytes': { + 'options': [None, 'mysql InnoDB Buffer Pool Bytes', 'MB', 'innodb', 'mysql.innodb_buffer_pool_bytes', 'area'], + 'lines': [ + ['Innodb_buffer_pool_bytes_data', 'data', 'absolute', 1, 1024 * 1024], + ['Innodb_buffer_pool_bytes_dirty', 'dirty', 'absolute', -1, 1024 * 1024] + ] + }, + 'innodb_buffer_pool_read_ahead': { + 'options': [None, 'mysql InnoDB Buffer Pool Read Ahead', 'operations/s', 'innodb', + 'mysql.innodb_buffer_pool_read_ahead', 'area'], + 'lines': [ + ['Innodb_buffer_pool_read_ahead', 'all', 'incremental'], + ['Innodb_buffer_pool_read_ahead_evicted', 'evicted', 'incremental', -1, 1], + ['Innodb_buffer_pool_read_ahead_rnd', 'random', 'incremental'] + ] + }, + 'innodb_buffer_pool_reqs': { + 'options': [None, 'mysql InnoDB Buffer Pool Requests', 'requests/s', 'innodb', + 'mysql.innodb_buffer_pool_reqs', 'area'], + 'lines': [ + ['Innodb_buffer_pool_read_requests', 'reads', 'incremental'], + ['Innodb_buffer_pool_write_requests', 'writes', 'incremental', -1, 1] + ] + }, + 'innodb_buffer_pool_ops': { + 'options': [None, 'mysql InnoDB Buffer Pool Operations', 'operations/s', 'innodb', + 'mysql.innodb_buffer_pool_ops', 'area'], + 'lines': [ + ['Innodb_buffer_pool_reads', 'disk reads', 'incremental'], + ['Innodb_buffer_pool_wait_free', 'wait free', 'incremental', -1, 1] + ] + }, + 'qcache_ops': { + 'options': [None, 'mysql QCache Operations', 'queries/s', 'qcache', 'mysql.qcache_ops', 'line'], + 'lines': [ + ['Qcache_hits', 'hits', 'incremental'], + ['Qcache_lowmem_prunes', 'lowmem prunes', 'incremental', -1, 1], + ['Qcache_inserts', 'inserts', 'incremental'], + ['Qcache_not_cached', 'not cached', 'incremental', -1, 1] + ] + }, + 'qcache': { + 'options': [None, 'mysql QCache Queries in Cache', 'queries', 'qcache', 'mysql.qcache', 'line'], + 'lines': [ + ['Qcache_queries_in_cache', 'queries', 'absolute'] + ] + }, + 'qcache_freemem': { + 'options': [None, 'mysql QCache Free Memory', 'MB', 'qcache', 'mysql.qcache_freemem', 'area'], + 'lines': [ + ['Qcache_free_memory', 'free', 'absolute', 1, 1024 * 1024] + ] + }, + 'qcache_memblocks': { + 'options': [None, 'mysql QCache Memory Blocks', 'blocks', 'qcache', 'mysql.qcache_memblocks', 'line'], + 'lines': [ + ['Qcache_free_blocks', 'free', 'absolute'], + ['Qcache_total_blocks', 'total', 'absolute'] + ] + }, + 'key_blocks': { + 'options': [None, 'mysql MyISAM Key Cache Blocks', 'blocks', 'myisam', 'mysql.key_blocks', 'line'], + 'lines': [ + ['Key_blocks_unused', 'unused', 'absolute'], + ['Key_blocks_used', 'used', 'absolute', -1, 1], + ['Key_blocks_not_flushed', 'not flushed', 'absolute'] + ] + }, + 'key_requests': { + 'options': [None, 'mysql MyISAM Key Cache Requests', 'requests/s', 'myisam', 'mysql.key_requests', 'area'], + 'lines': [ + ['Key_read_requests', 'reads', 'incremental'], + ['Key_write_requests', 'writes', 'incremental', -1, 1] + ] + }, + 'key_disk_ops': { + 'options': [None, 'mysql MyISAM Key Cache Disk Operations', 'operations/s', + 'myisam', 'mysql.key_disk_ops', 'area'], + 'lines': [ + ['Key_reads', 'reads', 'incremental'], + ['Key_writes', 'writes', 'incremental', -1, 1] + ] + }, + 'files': { + 'options': [None, 'mysql Open Files', 'files', 'files', 'mysql.files', 'line'], + 'lines': [ + ['Open_files', 'files', 'absolute'] + ] + }, + 'files_rate': { + 'options': [None, 'mysql Opened Files Rate', 'files/s', 'files', 'mysql.files_rate', 'line'], + 'lines': [ + ['Opened_files', 'files', 'incremental'] + ] + }, + 'binlog_stmt_cache': { + 'options': [None, 'mysql Binlog Statement Cache', 'statements/s', 'binlog', + 'mysql.binlog_stmt_cache', 'line'], + 'lines': [ + ['Binlog_stmt_cache_disk_use', 'disk', 'incremental'], + ['Binlog_stmt_cache_use', 'all', 'incremental'] + ] + }, + 'connection_errors': { + 'options': [None, 'mysql Connection Errors', 'connections/s', 'connections', + 'mysql.connection_errors', 'line'], + 'lines': [ + ['Connection_errors_accept', 'accept', 'incremental'], + ['Connection_errors_internal', 'internal', 'incremental'], + ['Connection_errors_max_connections', 'max', 'incremental'], + ['Connection_errors_peer_address', 'peer_addr', 'incremental'], + ['Connection_errors_select', 'select', 'incremental'], + ['Connection_errors_tcpwrap', 'tcpwrap', 'incremental'] + ] + }, + 'slave_behind': { + 'options': [None, 'Slave Behind Seconds', 'seconds', 'slave', 'mysql.slave_behind', 'line'], + 'lines': [ + ['Seconds_Behind_Master', 'seconds', 'absolute'] + ] + }, + 'slave_status': { + 'options': [None, 'Slave Status', 'status', 'slave', 'mysql.slave_status', 'line'], + 'lines': [ + ['Slave_SQL_Running', 'sql_running', 'absolute'], + ['Slave_IO_Running', 'io_running', 'absolute'] + ] + }, + 'galera_writesets': { + 'options': [None, 'Replicated writesets', 'writesets/s', 'galera', 'mysql.galera_writesets', 'line'], + 'lines': [ + ['wsrep_received', 'rx', 'incremental'], + ['wsrep_replicated', 'tx', 'incremental', -1, 1], + ] + }, + 'galera_bytes': { + 'options': [None, 'Replicated bytes', 'KB/s', 'galera', 'mysql.galera_bytes', 'area'], + 'lines': [ + ['wsrep_received_bytes', 'rx', 'incremental', 1, 1024], + ['wsrep_replicated_bytes', 'tx', 'incremental', -1, 1024], + ] + }, + 'galera_queue': { + 'options': [None, 'Galera queue', 'writesets', 'galera', 'mysql.galera_queue', 'line'], + 'lines': [ + ['wsrep_local_recv_queue', 'rx', 'absolute'], + ['wsrep_local_send_queue', 'tx', 'absolute', -1, 1], + ] + }, + 'galera_conflicts': { + 'options': [None, 'Replication conflicts', 'transactions', 'galera', 'mysql.galera_conflicts', 'area'], + 'lines': [ + ['wsrep_local_bf_aborts', 'bf_aborts', 'incremental'], + ['wsrep_local_cert_failures', 'cert_fails', 'incremental', -1, 1], + ] + }, + 'galera_flow_control': { + 'options': [None, 'Flow control', 'millisec', 'galera', 'mysql.galera_flow_control', 'area'], + 'lines': [ + ['wsrep_flow_control_paused_ns', 'paused', 'incremental', 1, 1000000], + ] + } +} + + +class Service(MySQLService): + def __init__(self, configuration=None, name=None): + MySQLService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + self.queries = dict(global_status=QUERY_GLOBAL, slave_status=QUERY_SLAVE, variables=QUERY_VARIABLES) + + def _get_data(self): + + raw_data = self._get_raw_data(description=True) + + if not raw_data: + return None + + to_netdata = dict() + + if 'global_status' in raw_data: + global_status = dict(raw_data['global_status'][0]) + for key in GLOBAL_STATS: + if key in global_status: + to_netdata[key] = global_status[key] + if 'Threads_created' in to_netdata and 'Connections' in to_netdata: + to_netdata['Thread_cache_misses'] = round(int(to_netdata['Threads_created']) + / float(to_netdata['Connections']) * 10000) + + if 'slave_status' in raw_data: + if raw_data['slave_status'][0]: + slave_raw_data = dict(zip([e[0] for e in raw_data['slave_status'][1]], raw_data['slave_status'][0][0])) + for key, func in SLAVE_STATS: + if key in slave_raw_data: + to_netdata[key] = func(slave_raw_data[key]) + else: + self.queries.pop('slave_status') + + if 'variables' in raw_data: + variables = dict(raw_data['variables'][0]) + for key in VARIABLES: + if key in variables: + to_netdata[key] = variables[key] + + return to_netdata or None diff --git a/collectors/python.d.plugin/mysql/mysql.conf b/collectors/python.d.plugin/mysql/mysql.conf new file mode 100644 index 000000000..b5956a2c6 --- /dev/null +++ b/collectors/python.d.plugin/mysql/mysql.conf @@ -0,0 +1,286 @@ +# netdata python.d.plugin configuration for mysql +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, mysql also supports the following: +# +# socket: 'path/to/mysql.sock' +# +# or +# host: 'IP or HOSTNAME' # the host to connect to +# port: PORT # the port to connect to +# +# in all cases, the following can also be set: +# +# user: 'username' # the mysql username to use +# pass: 'password' # the mysql password to use +# + +# ---------------------------------------------------------------------- +# mySQL CONFIGURATION +# +# netdata does not need any privilege - only the ability to connect +# to the mysql server (netdata will not be able to see any data). +# +# Execute these commands to give the local user 'netdata' the ability +# to connect to the mysql server on localhost, without a password: +# +# > create user 'netdata'@'localhost'; +# > grant usage on *.* to 'netdata'@'localhost'; +# > flush privileges; +# +# with the above statements, netdata will be able to gather mysql +# statistics, without the ability to see or alter any data or affect +# mysql operation in any way. No change is required below. +# +# If you need to monitor mysql replication too, use this instead: +# +# > create user 'netdata'@'localhost'; +# > grant replication client on *.* to 'netdata'@'localhost'; +# > flush privileges; +# + +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +mycnf1: + name : 'local' + 'my.cnf' : '/etc/my.cnf' + +mycnf2: + name : 'local' + 'my.cnf' : '/etc/mysql/my.cnf' + +debiancnf: + name : 'local' + 'my.cnf' : '/etc/mysql/debian.cnf' + +socket1: + name : 'local' + # user : '' + # pass : '' + socket : '/var/run/mysqld/mysqld.sock' + +socket2: + name : 'local' + # user : '' + # pass : '' + socket : '/var/run/mysqld/mysql.sock' + +socket3: + name : 'local' + # user : '' + # pass : '' + socket : '/var/lib/mysql/mysql.sock' + +socket4: + name : 'local' + # user : '' + # pass : '' + socket : '/tmp/mysql.sock' + +tcp: + name : 'local' + # user : '' + # pass : '' + host : 'localhost' + port : '3306' + # keep in mind port might be ignored by mysql, if host = 'localhost' + # http://serverfault.com/questions/337818/how-to-force-mysql-to-connect-by-tcp-instead-of-a-unix-socket/337844#337844 + +tcpipv4: + name : 'local' + # user : '' + # pass : '' + host : '127.0.0.1' + port : '3306' + +tcpipv6: + name : 'local' + # user : '' + # pass : '' + host : '::1' + port : '3306' + + +# Now we try the same as above with user: root +# A few systems configure mysql to accept passwordless +# root access. + +mycnf1_root: + name : 'local' + user : 'root' + 'my.cnf' : '/etc/my.cnf' + +mycnf2_root: + name : 'local' + user : 'root' + 'my.cnf' : '/etc/mysql/my.cnf' + +socket1_root: + name : 'local' + user : 'root' + # pass : '' + socket : '/var/run/mysqld/mysqld.sock' + +socket2_root: + name : 'local' + user : 'root' + # pass : '' + socket : '/var/run/mysqld/mysql.sock' + +socket3_root: + name : 'local' + user : 'root' + # pass : '' + socket : '/var/lib/mysql/mysql.sock' + +socket4_root: + name : 'local' + user : 'root' + # pass : '' + socket : '/tmp/mysql.sock' + +tcp_root: + name : 'local' + user : 'root' + # pass : '' + host : 'localhost' + port : '3306' + # keep in mind port might be ignored by mysql, if host = 'localhost' + # http://serverfault.com/questions/337818/how-to-force-mysql-to-connect-by-tcp-instead-of-a-unix-socket/337844#337844 + +tcpipv4_root: + name : 'local' + user : 'root' + # pass : '' + host : '127.0.0.1' + port : '3306' + +tcpipv6_root: + name : 'local' + user : 'root' + # pass : '' + host : '::1' + port : '3306' + + +# Now we try the same as above with user: netdata + +mycnf1_netdata: + name : 'local' + user : 'netdata' + 'my.cnf' : '/etc/my.cnf' + +mycnf2_netdata: + name : 'local' + user : 'netdata' + 'my.cnf' : '/etc/mysql/my.cnf' + +socket1_netdata: + name : 'local' + user : 'netdata' + # pass : '' + socket : '/var/run/mysqld/mysqld.sock' + +socket2_netdata: + name : 'local' + user : 'netdata' + # pass : '' + socket : '/var/run/mysqld/mysql.sock' + +socket3_netdata: + name : 'local' + user : 'netdata' + # pass : '' + socket : '/var/lib/mysql/mysql.sock' + +socket4_netdata: + name : 'local' + user : 'netdata' + # pass : '' + socket : '/tmp/mysql.sock' + +tcp_netdata: + name : 'local' + user : 'netdata' + # pass : '' + host : 'localhost' + port : '3306' + # keep in mind port might be ignored by mysql, if host = 'localhost' + # http://serverfault.com/questions/337818/how-to-force-mysql-to-connect-by-tcp-instead-of-a-unix-socket/337844#337844 + +tcpipv4_netdata: + name : 'local' + user : 'netdata' + # pass : '' + host : '127.0.0.1' + port : '3306' + +tcpipv6_netdata: + name : 'local' + user : 'netdata' + # pass : '' + host : '::1' + port : '3306' + diff --git a/collectors/python.d.plugin/nginx/Makefile.inc b/collectors/python.d.plugin/nginx/Makefile.inc new file mode 100644 index 000000000..4636aa830 --- /dev/null +++ b/collectors/python.d.plugin/nginx/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += nginx/nginx.chart.py +dist_pythonconfig_DATA += nginx/nginx.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += nginx/README.md nginx/Makefile.inc + diff --git a/collectors/python.d.plugin/nginx/README.md b/collectors/python.d.plugin/nginx/README.md new file mode 100644 index 000000000..007f45c7c --- /dev/null +++ b/collectors/python.d.plugin/nginx/README.md @@ -0,0 +1,45 @@ +# nginx + +This module will monitor one or more nginx servers depending on configuration. Servers can be either local or remote. + +**Requirements:** + * nginx with configured 'ngx_http_stub_status_module' + * 'location /stub_status' + +Example nginx configuration can be found in 'python.d/nginx.conf' + +It produces following charts: + +1. **Active Connections** + * active + +2. **Requests** in requests/s + * requests + +3. **Active Connections by Status** + * reading + * writing + * waiting + +4. **Connections Rate** in connections/s + * accepts + * handled + +### configuration + +Needs only `url` to server's `stub_status` + +Here is an example for local server: + +```yaml +update_every : 10 +priority : 90100 + +local: + url : 'http://localhost/stub_status' + retries : 10 +``` + +Without configuration, module attempts to connect to `http://localhost/stub_status` + +--- diff --git a/collectors/python.d.plugin/nginx/nginx.chart.py b/collectors/python.d.plugin/nginx/nginx.chart.py new file mode 100644 index 000000000..09c6bbd37 --- /dev/null +++ b/collectors/python.d.plugin/nginx/nginx.chart.py @@ -0,0 +1,80 @@ +# -*- coding: utf-8 -*- +# Description: nginx netdata python.d module +# Author: Pawel Krupa (paulfantom) +# SPDX-License-Identifier: GPL-3.0-or-later + +from bases.FrameworkServices.UrlService import UrlService + +# default module values (can be overridden per job in `config`) +# update_every = 2 +priority = 60000 +retries = 60 + +# default job configuration (overridden by python.d.plugin) +# config = {'local': { +# 'update_every': update_every, +# 'retries': retries, +# 'priority': priority, +# 'url': 'http://localhost/stub_status' +# }} + +# charts order (can be overridden if you want less charts, or different order) +ORDER = ['connections', 'requests', 'connection_status', 'connect_rate'] + +CHARTS = { + 'connections': { + 'options': [None, 'nginx Active Connections', 'connections', 'active connections', + 'nginx.connections', 'line'], + 'lines': [ + ['active'] + ] + }, + 'requests': { + 'options': [None, 'nginx Requests', 'requests/s', 'requests', 'nginx.requests', 'line'], + 'lines': [ + ['requests', None, 'incremental'] + ] + }, + 'connection_status': { + 'options': [None, 'nginx Active Connections by Status', 'connections', 'status', + 'nginx.connection_status', 'line'], + 'lines': [ + ['reading'], + ['writing'], + ['waiting', 'idle'] + ] + }, + 'connect_rate': { + 'options': [None, 'nginx Connections Rate', 'connections/s', 'connections rate', + 'nginx.connect_rate', 'line'], + 'lines': [ + ['accepts', 'accepted', 'incremental'], + ['handled', None, 'incremental'] + ] + } +} + + +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + self.url = self.configuration.get('url', 'http://localhost/stub_status') + self.order = ORDER + self.definitions = CHARTS + + def _get_data(self): + """ + Format data received from http request + :return: dict + """ + try: + raw = self._get_raw_data().split(" ") + return {'active': int(raw[2]), + 'requests': int(raw[9]), + 'reading': int(raw[11]), + 'writing': int(raw[13]), + 'waiting': int(raw[15]), + 'accepts': int(raw[7]), + 'handled': int(raw[8])} + except (ValueError, AttributeError): + return None diff --git a/collectors/python.d.plugin/nginx/nginx.conf b/collectors/python.d.plugin/nginx/nginx.conf new file mode 100644 index 000000000..71c521066 --- /dev/null +++ b/collectors/python.d.plugin/nginx/nginx.conf @@ -0,0 +1,109 @@ +# netdata python.d.plugin configuration for nginx +# +# You must have ngx_http_stub_status_module configured on your nginx server for this +# plugin to work. The following is an example config. +# It must be located inside a server { } block. +# +# location /stub_status { +# stub_status; +# # Security: Only allow access from the IP below. +# allow 192.168.1.200; +# # Deny anyone else +# deny all; +# } +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, this plugin also supports the following: +# +# url: 'URL' # the URL to fetch nginx's status stats +# +# if the URL is password protected, the following are supported: +# +# user: 'username' +# pass: 'password' +# +# Example +# +# RemoteNginx: +# name : 'Reverse_Proxy' +# url : 'http://yourdomain.com/stub_status' +# +# "RemoteNginx" will show up in Netdata logs. "Reverse Proxy" will show up in the menu +# in the nginx section. + +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +localhost: + name : 'local' + url : 'http://localhost/stub_status' + +localipv4: + name : 'local' + url : 'http://127.0.0.1/stub_status' + +localipv6: + name : 'local' + url : 'http://[::1]/stub_status' + diff --git a/collectors/python.d.plugin/nginx_plus/Makefile.inc b/collectors/python.d.plugin/nginx_plus/Makefile.inc new file mode 100644 index 000000000..d3fdeaf2b --- /dev/null +++ b/collectors/python.d.plugin/nginx_plus/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += nginx_plus/nginx_plus.chart.py +dist_pythonconfig_DATA += nginx_plus/nginx_plus.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += nginx_plus/README.md nginx_plus/Makefile.inc + diff --git a/collectors/python.d.plugin/nginx_plus/README.md b/collectors/python.d.plugin/nginx_plus/README.md new file mode 100644 index 000000000..43ec867a3 --- /dev/null +++ b/collectors/python.d.plugin/nginx_plus/README.md @@ -0,0 +1,125 @@ +# nginx_plus + +This module will monitor one or more nginx_plus servers depending on configuration. +Servers can be either local or remote. + +Example nginx_plus configuration can be found in 'python.d/nginx_plus.conf' + +It produces following charts: + +1. **Requests total** in requests/s + * total + +2. **Requests current** in requests + * current + +3. **Connection Statistics** in connections/s + * accepted + * dropped + +4. **Workers Statistics** in workers + * idle + * active + +5. **SSL Handshakes** in handshakes/s + * successful + * failed + +6. **SSL Session Reuses** in sessions/s + * reused + +7. **SSL Memory Usage** in percent + * usage + +8. **Processes** in processes + * respawned + +For every server zone: + +1. **Processing** in requests + * processing + +2. **Requests** in requests/s + * requests + +3. **Responses** in requests/s + * 1xx + * 2xx + * 3xx + * 4xx + * 5xx + +4. **Traffic** in kilobits/s + * received + * sent + +For every upstream: + +1. **Peers Requests** in requests/s + * peer name (dimension per peer) + +2. **All Peers Responses** in responses/s + * 1xx + * 2xx + * 3xx + * 4xx + * 5xx + +3. **Peer Responses** in requests/s (for every peer) + * 1xx + * 2xx + * 3xx + * 4xx + * 5xx + +4. **Peers Connections** in active + * peer name (dimension per peer) + +5. **Peers Connections Usage** in percent + * peer name (dimension per peer) + +6. **All Peers Traffic** in KB + * received + * sent + +7. **Peer Traffic** in KB/s (for every peer) + * received + * sent + +8. **Peer Timings** in ms (for every peer) + * header + * response + +9. **Memory Usage** in percent + * usage + +10. **Peers Status** in state + * peer name (dimension per peer) + +11. **Peers Total Downtime** in seconds + * peer name (dimension per peer) + +For every cache: + +1. **Traffic** in KB + * served + * written + * bypass + +2. **Memory Usage** in percent + * usage + +### configuration + +Needs only `url` to server's `status` + +Here is an example for local server: + +```yaml +local: + url : 'http://localhost/status' +``` + +Without configuration, module fail to start. + +--- diff --git a/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py new file mode 100644 index 000000000..1392f5a56 --- /dev/null +++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py @@ -0,0 +1,492 @@ +# -*- coding: utf-8 -*- +# Description: nginx_plus netdata python.d module +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +import re + +from collections import defaultdict +from copy import deepcopy +from json import loads + +try: + from collections import OrderedDict +except ImportError: + from third_party.ordereddict import OrderedDict + +from bases.FrameworkServices.UrlService import UrlService + +# default module values (can be overridden per job in `config`) +update_every = 1 +priority = 60000 +retries = 60 + +# charts order (can be overridden if you want less charts, or different order) +ORDER = [ + 'requests_total', + 'requests_current', + 'connections_statistics', + 'connections_workers', + 'ssl_handshakes', + 'ssl_session_reuses', + 'ssl_memory_usage', + 'processes' +] + +CHARTS = { + 'requests_total': { + 'options': [None, 'Requests Total', 'requests/s', 'requests', 'nginx_plus.requests_total', 'line'], + 'lines': [ + ['requests_total', 'total', 'incremental'] + ] + }, + 'requests_current': { + 'options': [None, 'Requests Current', 'requests', 'requests', 'nginx_plus.requests_current', 'line'], + 'lines': [ + ['requests_current', 'current'] + ] + }, + 'connections_statistics': { + 'options': [None, 'Connections Statistics', 'connections/s', + 'connections', 'nginx_plus.connections_statistics', 'stacked'], + 'lines': [ + ['connections_accepted', 'accepted', 'incremental'], + ['connections_dropped', 'dropped', 'incremental'] + ] + }, + 'connections_workers': { + 'options': [None, 'Workers Statistics', 'workers', + 'connections', 'nginx_plus.connections_workers', 'stacked'], + 'lines': [ + ['connections_idle', 'idle'], + ['connections_active', 'active'] + ] + }, + 'ssl_handshakes': { + 'options': [None, 'SSL Handshakes', 'handshakes/s', 'ssl', 'nginx_plus.ssl_handshakes', 'stacked'], + 'lines': [ + ['ssl_handshakes', 'successful', 'incremental'], + ['ssl_handshakes_failed', 'failed', 'incremental'] + ] + }, + 'ssl_session_reuses': { + 'options': [None, 'Session Reuses', 'sessions/s', 'ssl', 'nginx_plus.ssl_session_reuses', 'line'], + 'lines': [ + ['ssl_session_reuses', 'reused', 'incremental'] + ] + }, + 'ssl_memory_usage': { + 'options': [None, 'Memory Usage', '%', 'ssl', 'nginx_plus.ssl_memory_usage', 'area'], + 'lines': [ + ['ssl_memory_usage', 'usage', 'absolute', 1, 100] + ] + }, + 'processes': { + 'options': [None, 'Processes', 'processes', 'processes', 'nginx_plus.processes', 'line'], + 'lines': [ + ['processes_respawned', 'respawned'] + ] + } +} + + +def cache_charts(cache): + family = 'cache {0}'.format(cache.real_name) + charts = OrderedDict() + + charts['{0}_traffic'.format(cache.name)] = { + 'options': [None, 'Traffic', 'KB', family, 'nginx_plus.cache_traffic', 'stacked'], + 'lines': [ + ['_'.join([cache.name, 'hit_bytes']), 'served', 'absolute', 1, 1024], + ['_'.join([cache.name, 'miss_bytes_written']), 'written', 'absolute', 1, 1024], + ['_'.join([cache.name, 'miss_bytes']), 'bypass', 'absolute', 1, 1024] + ] + } + charts['{0}_memory_usage'.format(cache.name)] = { + 'options': [None, 'Memory Usage', '%', family, 'nginx_plus.cache_memory_usage', 'area'], + 'lines': [ + ['_'.join([cache.name, 'memory_usage']), 'usage', 'absolute', 1, 100], + ] + } + return charts + + +def web_zone_charts(wz): + charts = OrderedDict() + family = 'web zone {name}'.format(name=wz.real_name) + + # Processing + charts['zone_{name}_processing'.format(name=wz.name)] = { + 'options': [None, 'Zone "{name}" Processing'.format(name=wz.name), 'requests', family, + 'nginx_plus.web_zone_processing', 'line'], + 'lines': [ + ['_'.join([wz.name, 'processing']), 'processing'] + ] + } + # Requests + charts['zone_{name}_requests'.format(name=wz.name)] = { + 'options': [None, 'Zone "{name}" Requests'.format(name=wz.name), 'requests/s', family, + 'nginx_plus.web_zone_requests', 'line'], + 'lines': [ + ['_'.join([wz.name, 'requests']), 'requests', 'incremental'] + ] + } + # Response Codes + charts['zone_{name}_responses'.format(name=wz.name)] = { + 'options': [None, 'Zone "{name}" Responses'.format(name=wz.name), 'requests/s', family, + 'nginx_plus.web_zone_responses', 'stacked'], + 'lines': [ + ['_'.join([wz.name, 'responses_2xx']), '2xx', 'incremental'], + ['_'.join([wz.name, 'responses_5xx']), '5xx', 'incremental'], + ['_'.join([wz.name, 'responses_3xx']), '3xx', 'incremental'], + ['_'.join([wz.name, 'responses_4xx']), '4xx', 'incremental'], + ['_'.join([wz.name, 'responses_1xx']), '1xx', 'incremental'] + ] + } + # Traffic + charts['zone_{name}_net'.format(name=wz.name)] = { + 'options': [None, 'Zone "{name}" Traffic'.format(name=wz.name), 'kilobits/s', family, + 'nginx_plus.zone_net', 'area'], + 'lines': [ + ['_'.join([wz.name, 'received']), 'received', 'incremental', 1, 1000], + ['_'.join([wz.name, 'sent']), 'sent', 'incremental', -1, 1000] + ] + } + return charts + + +def web_upstream_charts(wu): + def dimensions(value, a='absolute', m=1, d=1): + dims = list() + for p in wu: + dims.append(['_'.join([wu.name, p.server, value]), p.real_server, a, m, d]) + return dims + + charts = OrderedDict() + family = 'web upstream {name}'.format(name=wu.real_name) + + # Requests + charts['web_upstream_{name}_requests'.format(name=wu.name)] = { + 'options': [None, 'Peers Requests', 'requests/s', family, 'nginx_plus.web_upstream_requests', 'line'], + 'lines': dimensions('requests', 'incremental') + } + # Responses Codes + charts['web_upstream_{name}_all_responses'.format(name=wu.name)] = { + 'options': [None, 'All Peers Responses', 'responses/s', family, + 'nginx_plus.web_upstream_all_responses', 'stacked'], + 'lines': [ + ['_'.join([wu.name, 'responses_2xx']), '2xx', 'incremental'], + ['_'.join([wu.name, 'responses_5xx']), '5xx', 'incremental'], + ['_'.join([wu.name, 'responses_3xx']), '3xx', 'incremental'], + ['_'.join([wu.name, 'responses_4xx']), '4xx', 'incremental'], + ['_'.join([wu.name, 'responses_1xx']), '1xx', 'incremental'], + ] + } + for peer in wu: + charts['web_upstream_{0}_{1}_responses'.format(wu.name, peer.server)] = { + 'options': [None, 'Peer "{0}" Responses'.format(peer.real_server), 'responses/s', family, + 'nginx_plus.web_upstream_peer_responses', 'stacked'], + 'lines': [ + ['_'.join([wu.name, peer.server, 'responses_2xx']), '2xx', 'incremental'], + ['_'.join([wu.name, peer.server, 'responses_5xx']), '5xx', 'incremental'], + ['_'.join([wu.name, peer.server, 'responses_3xx']), '3xx', 'incremental'], + ['_'.join([wu.name, peer.server, 'responses_4xx']), '4xx', 'incremental'], + ['_'.join([wu.name, peer.server, 'responses_1xx']), '1xx', 'incremental'] + ] + } + # Connections + charts['web_upstream_{name}_connections'.format(name=wu.name)] = { + 'options': [None, 'Peers Connections', 'active', family, 'nginx_plus.web_upstream_connections', 'line'], + 'lines': dimensions('active') + } + charts['web_upstream_{name}_connections_usage'.format(name=wu.name)] = { + 'options': [None, 'Peers Connections Usage', '%', family, 'nginx_plus.web_upstream_connections_usage', 'line'], + 'lines': dimensions('connections_usage', d=100) + } + # Traffic + charts['web_upstream_{0}_all_net'.format(wu.name)] = { + 'options': [None, 'All Peers Traffic', 'kilobits/s', family, 'nginx_plus.web_upstream_all_net', 'area'], + 'lines': [ + ['{0}_received'.format(wu.name), 'received', 'incremental', 1, 1000], + ['{0}_sent'.format(wu.name), 'sent', 'incremental', -1, 1000] + ] + } + for peer in wu: + charts['web_upstream_{0}_{1}_net'.format(wu.name, peer.server)] = { + 'options': [None, 'Peer "{0}" Traffic'.format(peer.real_server), 'kilobits/s', family, + 'nginx_plus.web_upstream_peer_traffic', 'area'], + 'lines': [ + ['{0}_{1}_received'.format(wu.name, peer.server), 'received', 'incremental', 1, 1000], + ['{0}_{1}_sent'.format(wu.name, peer.server), 'sent', 'incremental', -1, 1000] + ] + } + # Response Time + for peer in wu: + charts['web_upstream_{0}_{1}_timings'.format(wu.name, peer.server)] = { + 'options': [None, 'Peer "{0}" Timings'.format(peer.real_server), 'ms', family, + 'nginx_plus.web_upstream_peer_timings', 'line'], + 'lines': [ + ['_'.join([wu.name, peer.server, 'header_time']), 'header'], + ['_'.join([wu.name, peer.server, 'response_time']), 'response'] + ] + } + # Memory Usage + charts['web_upstream_{name}_memory_usage'.format(name=wu.name)] = { + 'options': [None, 'Memory Usage', '%', family, 'nginx_plus.web_upstream_memory_usage', 'area'], + 'lines': [ + ['_'.join([wu.name, 'memory_usage']), 'usage', 'absolute', 1, 100] + ] + } + # State + charts['web_upstream_{name}_status'.format(name=wu.name)] = { + 'options': [None, 'Peers Status', 'state', family, 'nginx_plus.web_upstream_status', 'line'], + 'lines': dimensions('state') + } + # Downtime + charts['web_upstream_{name}_downtime'.format(name=wu.name)] = { + 'options': [None, 'Peers Downtime', 'seconds', family, 'nginx_plus.web_upstream_peer_downtime', 'line'], + 'lines': dimensions('downtime', d=1000) + } + + return charts + + +METRICS = { + 'SERVER': [ + 'processes.respawned', + 'connections.accepted', + 'connections.dropped', + 'connections.active', + 'connections.idle', + 'ssl.handshakes', + 'ssl.handshakes_failed', + 'ssl.session_reuses', + 'requests.total', + 'requests.current', + 'slabs.SSL.pages.free', + 'slabs.SSL.pages.used' + ], + 'WEB_ZONE': [ + 'processing', + 'requests', + 'responses.1xx', + 'responses.2xx', + 'responses.3xx', + 'responses.4xx', + 'responses.5xx', + 'discarded', + 'received', + 'sent' + ], + 'WEB_UPSTREAM_PEER': [ + 'id', + 'server', + 'name', + 'state', + 'active', + 'max_conns', + 'requests', + 'header_time', # alive only + 'response_time', # alive only + 'responses.1xx', + 'responses.2xx', + 'responses.3xx', + 'responses.4xx', + 'responses.5xx', + 'sent', + 'received', + 'downtime' + ], + 'WEB_UPSTREAM_SUMMARY': [ + 'responses.1xx', + 'responses.2xx', + 'responses.3xx', + 'responses.4xx', + 'responses.5xx', + 'sent', + 'received' + ], + 'CACHE': [ + 'hit.bytes', # served + 'miss.bytes_written', # written + 'miss.bytes' # bypass + + ] +} + +BAD_SYMBOLS = re.compile(r'[:/.-]+') + + +class Cache: + key = 'caches' + charts = cache_charts + + def __init__(self, **kw): + self.real_name = kw['name'] + self.name = BAD_SYMBOLS.sub('_', self.real_name) + + def memory_usage(self, data): + used = data['slabs'][self.real_name]['pages']['used'] + free = data['slabs'][self.real_name]['pages']['free'] + return used / float(free + used) * 1e4 + + def get_data(self, raw_data): + zone_data = raw_data['caches'][self.real_name] + data = parse_json(zone_data, METRICS['CACHE']) + data['memory_usage'] = self.memory_usage(raw_data) + return dict(('_'.join([self.name, k]), v) for k, v in data.items()) + + +class WebZone: + key = 'server_zones' + charts = web_zone_charts + + def __init__(self, **kw): + self.real_name = kw['name'] + self.name = BAD_SYMBOLS.sub('_', self.real_name) + + def get_data(self, raw_data): + zone_data = raw_data['server_zones'][self.real_name] + data = parse_json(zone_data, METRICS['WEB_ZONE']) + return dict(('_'.join([self.name, k]), v) for k, v in data.items()) + + +class WebUpstream: + key = 'upstreams' + charts = web_upstream_charts + + def __init__(self, **kw): + self.real_name = kw['name'] + self.name = BAD_SYMBOLS.sub('_', self.real_name) + self.peers = OrderedDict() + + peers = kw['response']['upstreams'][self.real_name]['peers'] + for peer in peers: + self.add_peer(peer['id'], peer['server']) + + def __iter__(self): + return iter(self.peers.values()) + + def add_peer(self, idx, server): + peer = WebUpstreamPeer(idx, server) + self.peers[peer.real_server] = peer + return peer + + def peers_stats(self, peers): + peers = {int(peer['id']): peer for peer in peers} + data = dict() + for peer in self.peers.values(): + if not peer.active: + continue + try: + data.update(peer.get_data(peers[peer.id])) + except KeyError: + peer.active = False + return data + + def memory_usage(self, data): + used = data['slabs'][self.real_name]['pages']['used'] + free = data['slabs'][self.real_name]['pages']['free'] + return used / float(free + used) * 1e4 + + def summary_stats(self, data): + rv = defaultdict(int) + for metric in METRICS['WEB_UPSTREAM_SUMMARY']: + for peer in self.peers.values(): + if peer.active: + metric = '_'.join(metric.split('.')) + rv[metric] += data['_'.join([peer.server, metric])] + return rv + + def get_data(self, raw_data): + data = dict() + peers = raw_data['upstreams'][self.real_name]['peers'] + data.update(self.peers_stats(peers)) + data.update(self.summary_stats(data)) + data['memory_usage'] = self.memory_usage(raw_data) + return dict(('_'.join([self.name, k]), v) for k, v in data.items()) + + +class WebUpstreamPeer: + def __init__(self, idx, server): + self.id = idx + self.real_server = server + self.server = BAD_SYMBOLS.sub('_', self.real_server) + self.active = True + + def get_data(self, raw): + data = dict(header_time=0, response_time=0, max_conns=0) + data.update(parse_json(raw, METRICS['WEB_UPSTREAM_PEER'])) + data['connections_usage'] = 0 if not data['max_conns'] else data['active'] / float(data['max_conns']) * 1e4 + data['state'] = int(data['state'] == 'up') + return dict(('_'.join([self.server, k]), v) for k, v in data.items()) + + +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + self.order = list(ORDER) + self.definitions = deepcopy(CHARTS) + self.objects = dict() + + def check(self): + if not self.url: + self.error('URL is not defined') + return None + + self._manager = self._build_manager() + if not self._manager: + return None + + raw_data = self._get_raw_data() + if not raw_data: + return None + + try: + response = loads(raw_data) + except ValueError: + return None + + for obj_cls in [WebZone, WebUpstream, Cache]: + for obj_name in response.get(obj_cls.key, list()): + obj = obj_cls(name=obj_name, response=response) + self.objects[obj.real_name] = obj + charts = obj_cls.charts(obj) + for chart in charts: + self.order.append(chart) + self.definitions[chart] = charts[chart] + + return bool(self.objects) + + def _get_data(self): + """ + Format data received from http request + :return: dict + """ + raw_data = self._get_raw_data() + if not raw_data: + return None + response = loads(raw_data) + + data = parse_json(response, METRICS['SERVER']) + data['ssl_memory_usage'] = data['slabs_SSL_pages_used'] / float(data['slabs_SSL_pages_free']) * 1e4 + + for obj in self.objects.values(): + if obj.real_name in response[obj.key]: + data.update(obj.get_data(response)) + + return data + + +def parse_json(raw_data, metrics): + data = dict() + for metric in metrics: + value = raw_data + metrics_list = metric.split('.') + try: + for m in metrics_list: + value = value[m] + except KeyError: + continue + data['_'.join(metrics_list)] = value + return data diff --git a/collectors/python.d.plugin/nginx_plus/nginx_plus.conf b/collectors/python.d.plugin/nginx_plus/nginx_plus.conf new file mode 100644 index 000000000..7b5c8f43f --- /dev/null +++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.conf @@ -0,0 +1,87 @@ +# netdata python.d.plugin configuration for nginx_plus +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, nginx_plus also supports the following: +# +# url: 'URL' # the URL to fetch nginx_plus's stats +# +# if the URL is password protected, the following are supported: +# +# user: 'username' +# pass: 'password' + +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +localhost: + name : 'local' + url : 'http://localhost/status' + +localipv4: + name : 'local' + url : 'http://127.0.0.1/status' + +localipv6: + name : 'local' + url : 'http://[::1]/status' diff --git a/collectors/python.d.plugin/nsd/Makefile.inc b/collectors/python.d.plugin/nsd/Makefile.inc new file mode 100644 index 000000000..58e9fd67d --- /dev/null +++ b/collectors/python.d.plugin/nsd/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += nsd/nsd.chart.py +dist_pythonconfig_DATA += nsd/nsd.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += nsd/README.md nsd/Makefile.inc + diff --git a/collectors/python.d.plugin/nsd/README.md b/collectors/python.d.plugin/nsd/README.md new file mode 100644 index 000000000..02c302f41 --- /dev/null +++ b/collectors/python.d.plugin/nsd/README.md @@ -0,0 +1,54 @@ +# nsd + +Module uses the `nsd-control stats_noreset` command to provide `nsd` statistics. + +**Requirements:** + * Version of `nsd` must be 4.0+ + * Netdata must have permissions to run `nsd-control stats_noreset` + +It produces: + +1. **Queries** + * queries + +2. **Zones** + * master + * slave + +3. **Protocol** + * udp + * udp6 + * tcp + * tcp6 + +4. **Query Type** + * A + * NS + * CNAME + * SOA + * PTR + * HINFO + * MX + * NAPTR + * TXT + * AAAA + * SRV + * ANY + +5. **Transfer** + * NOTIFY + * AXFR + +6. **Return Code** + * NOERROR + * FORMERR + * SERVFAIL + * NXDOMAIN + * NOTIMP + * REFUSED + * YXDOMAIN + + +Configuration is not needed. + +--- diff --git a/collectors/python.d.plugin/nsd/nsd.chart.py b/collectors/python.d.plugin/nsd/nsd.chart.py new file mode 100644 index 000000000..d713f46bd --- /dev/null +++ b/collectors/python.d.plugin/nsd/nsd.chart.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +# Description: NSD `nsd-control stats_noreset` netdata python.d module +# Author: <383c57 at gmail.com> +# SPDX-License-Identifier: GPL-3.0-or-later + +import re + +from bases.FrameworkServices.ExecutableService import ExecutableService + +# default module values (can be overridden per job in `config`) +priority = 60000 +retries = 5 +update_every = 30 + +# charts order (can be overridden if you want less charts, or different order) +ORDER = ['queries', 'zones', 'protocol', 'type', 'transfer', 'rcode'] + +CHARTS = { + 'queries': { + 'options': [None, 'queries', 'queries/s', 'queries', 'nsd.queries', 'line'], + 'lines': [ + ['num_queries', 'queries', 'incremental'] + ] + }, + 'zones': { + 'options': [None, 'zones', 'zones', 'zones', 'nsd.zones', 'stacked'], + 'lines': [ + ['zone_master', 'master', 'absolute'], + ['zone_slave', 'slave', 'absolute'] + ] + }, + 'protocol': { + 'options': [None, 'protocol', 'queries/s', 'protocol', 'nsd.protocols', 'stacked'], + 'lines': [ + ['num_udp', 'udp', 'incremental'], + ['num_udp6', 'udp6', 'incremental'], + ['num_tcp', 'tcp', 'incremental'], + ['num_tcp6', 'tcp6', 'incremental'] + ] + }, + 'type': { + 'options': [None, 'query type', 'queries/s', 'query type', 'nsd.type', 'stacked'], + 'lines': [ + ['num_type_A', 'A', 'incremental'], + ['num_type_NS', 'NS', 'incremental'], + ['num_type_CNAME', 'CNAME', 'incremental'], + ['num_type_SOA', 'SOA', 'incremental'], + ['num_type_PTR', 'PTR', 'incremental'], + ['num_type_HINFO', 'HINFO', 'incremental'], + ['num_type_MX', 'MX', 'incremental'], + ['num_type_NAPTR', 'NAPTR', 'incremental'], + ['num_type_TXT', 'TXT', 'incremental'], + ['num_type_AAAA', 'AAAA', 'incremental'], + ['num_type_SRV', 'SRV', 'incremental'], + ['num_type_TYPE255', 'ANY', 'incremental'] + ] + }, + 'transfer': { + 'options': [None, 'transfer', 'queries/s', 'transfer', 'nsd.transfer', 'stacked'], + 'lines': [ + ['num_opcode_NOTIFY', 'NOTIFY', 'incremental'], + ['num_type_TYPE252', 'AXFR', 'incremental'] + ] + }, + 'rcode': { + 'options': [None, 'return code', 'queries/s', 'return code', 'nsd.rcode', 'stacked'], + 'lines': [ + ['num_rcode_NOERROR', 'NOERROR', 'incremental'], + ['num_rcode_FORMERR', 'FORMERR', 'incremental'], + ['num_rcode_SERVFAIL', 'SERVFAIL', 'incremental'], + ['num_rcode_NXDOMAIN', 'NXDOMAIN', 'incremental'], + ['num_rcode_NOTIMP', 'NOTIMP', 'incremental'], + ['num_rcode_REFUSED', 'REFUSED', 'incremental'], + ['num_rcode_YXDOMAIN', 'YXDOMAIN', 'incremental'] + ] + } +} + + +class Service(ExecutableService): + def __init__(self, configuration=None, name=None): + ExecutableService.__init__( + self, configuration=configuration, name=name) + self.command = 'nsd-control stats_noreset' + self.order = ORDER + self.definitions = CHARTS + self.regex = re.compile(r'([A-Za-z0-9.]+)=(\d+)') + + def _get_data(self): + lines = self._get_raw_data() + if not lines: + return None + + r = self.regex + stats = dict((k.replace('.', '_'), int(v)) + for k, v in r.findall(''.join(lines))) + stats.setdefault('num_opcode_NOTIFY', 0) + stats.setdefault('num_type_TYPE252', 0) + stats.setdefault('num_type_TYPE255', 0) + return stats diff --git a/collectors/python.d.plugin/nsd/nsd.conf b/collectors/python.d.plugin/nsd/nsd.conf new file mode 100644 index 000000000..078e97216 --- /dev/null +++ b/collectors/python.d.plugin/nsd/nsd.conf @@ -0,0 +1,93 @@ +# netdata python.d.plugin configuration for nsd +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# nsd-control is slow, so once every 30 seconds +# update_every: 30 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, nsd also supports the following: +# +# command: 'nsd-control stats_noreset' # the command to run +# + +# ---------------------------------------------------------------------- +# IMPORTANT Information +# +# Netdata must have permissions to run `nsd-control stats_noreset` command +# +# - Example-1 (use "sudo") +# 1. sudoers (e.g. visudo -f /etc/sudoers.d/netdata) +# Defaults:netdata !requiretty +# netdata ALL=(ALL) NOPASSWD: /usr/sbin/nsd-control stats_noreset +# 2. etc/netdata/python.d/nsd.conf +# local: +# update_every: 30 +# command: 'sudo /usr/sbin/nsd-control stats_noreset' +# +# - Example-2 (add "netdata" user to "nsd" group) +# usermod -aG nsd netdata +# + +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS + +local: + update_every: 30 + command: 'nsd-control stats_noreset' diff --git a/collectors/python.d.plugin/ntpd/Makefile.inc b/collectors/python.d.plugin/ntpd/Makefile.inc new file mode 100644 index 000000000..81210ebab --- /dev/null +++ b/collectors/python.d.plugin/ntpd/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += ntpd/ntpd.chart.py +dist_pythonconfig_DATA += ntpd/ntpd.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += ntpd/README.md ntpd/Makefile.inc + diff --git a/collectors/python.d.plugin/ntpd/README.md b/collectors/python.d.plugin/ntpd/README.md new file mode 100644 index 000000000..b0fa17fde --- /dev/null +++ b/collectors/python.d.plugin/ntpd/README.md @@ -0,0 +1,71 @@ +# ntpd + +Module monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](http://doc.ntp.org/current-stable/ntpq.html). + +**Requirements:** + * Version: `NTPv4` + * Local interrogation allowed in `/etc/ntp.conf` (default): + +``` +# Local users may interrogate the ntp server more closely. +restrict 127.0.0.1 +restrict ::1 +``` + +It produces: + +1. system + * offset + * jitter + * frequency + * delay + * dispersion + * stratum + * tc + * precision + +2. peers + * offset + * delay + * dispersion + * jitter + * rootdelay + * rootdispersion + * stratum + * hmode + * pmode + * hpoll + * ppoll + * precision + +**configuration** + +Sample: + +```yaml +update_every: 10 + +host: 'localhost' +port: '123' +show_peers: yes +# hide peers with source address in ranges 127.0.0.0/8 and 192.168.0.0/16 +peer_filter: '(127\..*)|(192\.168\..*)' +# check for new/changed peers every 60 updates +peer_rescan: 60 +``` + +Sample (multiple jobs): + +Note: `ntp.conf` on the host `otherhost` must be configured to allow queries from our local host by including a line like `restrict <IP> nomodify notrap nopeer`. + +```yaml +local: + host: 'localhost' + +otherhost: + host: 'otherhost' +``` + +If no configuration is given, module will attempt to connect to `ntpd` on `::1:123` or `127.0.0.1:123` and show charts for the systemvars. Use `show_peers: yes` to also show the charts for configured peers. Local peers in the range `127.0.0.0/8` are hidden by default, use `peer_filter: ''` to show all peers. + +--- diff --git a/collectors/python.d.plugin/ntpd/ntpd.chart.py b/collectors/python.d.plugin/ntpd/ntpd.chart.py new file mode 100644 index 000000000..79d557c80 --- /dev/null +++ b/collectors/python.d.plugin/ntpd/ntpd.chart.py @@ -0,0 +1,390 @@ +# -*- coding: utf-8 -*- +# Description: ntpd netdata python.d module +# Author: Sven Mäder (rda0) +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +import struct +import re + +from bases.FrameworkServices.SocketService import SocketService + +# default module values +update_every = 1 +priority = 60000 +retries = 60 + +# NTP Control Message Protocol constants +MODE = 6 +HEADER_FORMAT = '!BBHHHHH' +HEADER_LEN = 12 +OPCODES = { + 'readstat': 1, + 'readvar': 2 +} + +# Maximal dimension precision +PRECISION = 1000000 + +# Static charts +ORDER = [ + 'sys_offset', + 'sys_jitter', + 'sys_frequency', + 'sys_wander', + 'sys_rootdelay', + 'sys_rootdisp', + 'sys_stratum', + 'sys_tc', + 'sys_precision', + 'peer_offset', + 'peer_delay', + 'peer_dispersion', + 'peer_jitter', + 'peer_xleave', + 'peer_rootdelay', + 'peer_rootdisp', + 'peer_stratum', + 'peer_hmode', + 'peer_pmode', + 'peer_hpoll', + 'peer_ppoll', + 'peer_precision' +] + +CHARTS = { + 'sys_offset': { + 'options': [None, 'Combined offset of server relative to this host', 'ms', 'system', 'ntpd.sys_offset', 'area'], + 'lines': [ + ['offset', 'offset', 'absolute', 1, PRECISION] + ] + }, + 'sys_jitter': { + 'options': [None, 'Combined system jitter and clock jitter', 'ms', 'system', 'ntpd.sys_jitter', 'line'], + 'lines': [ + ['sys_jitter', 'system', 'absolute', 1, PRECISION], + ['clk_jitter', 'clock', 'absolute', 1, PRECISION] + ] + }, + 'sys_frequency': { + 'options': [None, 'Frequency offset relative to hardware clock', 'ppm', 'system', 'ntpd.sys_frequency', 'area'], + 'lines': [ + ['frequency', 'frequency', 'absolute', 1, PRECISION] + ] + }, + 'sys_wander': { + 'options': [None, 'Clock frequency wander', 'ppm', 'system', 'ntpd.sys_wander', 'area'], + 'lines': [ + ['clk_wander', 'clock', 'absolute', 1, PRECISION] + ] + }, + 'sys_rootdelay': { + 'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'system', + 'ntpd.sys_rootdelay', 'area'], + 'lines': [ + ['rootdelay', 'delay', 'absolute', 1, PRECISION] + ] + }, + 'sys_rootdisp': { + 'options': [None, 'Total root dispersion to the primary reference clock', 'ms', 'system', + 'ntpd.sys_rootdisp', 'area'], + 'lines': [ + ['rootdisp', 'dispersion', 'absolute', 1, PRECISION] + ] + }, + 'sys_stratum': { + 'options': [None, 'Stratum (1-15)', 'stratum', 'system', 'ntpd.sys_stratum', 'line'], + 'lines': [ + ['stratum', 'stratum', 'absolute', 1, PRECISION] + ] + }, + 'sys_tc': { + 'options': [None, 'Time constant and poll exponent (3-17)', 'log2 s', 'system', 'ntpd.sys_tc', 'line'], + 'lines': [ + ['tc', 'current', 'absolute', 1, PRECISION], + ['mintc', 'minimum', 'absolute', 1, PRECISION] + ] + }, + 'sys_precision': { + 'options': [None, 'Precision', 'log2 s', 'system', 'ntpd.sys_precision', 'line'], + 'lines': [ + ['precision', 'precision', 'absolute', 1, PRECISION] + ] + } +} + +PEER_CHARTS = { + 'peer_offset': { + 'options': [None, 'Filter offset', 'ms', 'peers', 'ntpd.peer_offset', 'line'], + 'lines': [] + }, + 'peer_delay': { + 'options': [None, 'Filter delay', 'ms', 'peers', 'ntpd.peer_delay', 'line'], + 'lines': [] + }, + 'peer_dispersion': { + 'options': [None, 'Filter dispersion', 'ms', 'peers', 'ntpd.peer_dispersion', 'line'], + 'lines': [] + }, + 'peer_jitter': { + 'options': [None, 'Filter jitter', 'ms', 'peers', 'ntpd.peer_jitter', 'line'], + 'lines': [] + }, + 'peer_xleave': { + 'options': [None, 'Interleave delay', 'ms', 'peers', 'ntpd.peer_xleave', 'line'], + 'lines': [] + }, + 'peer_rootdelay': { + 'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'peers', + 'ntpd.peer_rootdelay', 'line'], + 'lines': [] + }, + 'peer_rootdisp': { + 'options': [None, 'Total root dispersion to the primary reference clock', 'ms', 'peers', + 'ntpd.peer_rootdisp', 'line'], + 'lines': [] + }, + 'peer_stratum': { + 'options': [None, 'Stratum (1-15)', 'stratum', 'peers', 'ntpd.peer_stratum', 'line'], + 'lines': [] + }, + 'peer_hmode': { + 'options': [None, 'Host mode (1-6)', 'hmode', 'peers', 'ntpd.peer_hmode', 'line'], + 'lines': [] + }, + 'peer_pmode': { + 'options': [None, 'Peer mode (1-5)', 'pmode', 'peers', 'ntpd.peer_pmode', 'line'], + 'lines': [] + }, + 'peer_hpoll': { + 'options': [None, 'Host poll exponent', 'log2 s', 'peers', 'ntpd.peer_hpoll', 'line'], + 'lines': [] + }, + 'peer_ppoll': { + 'options': [None, 'Peer poll exponent', 'log2 s', 'peers', 'ntpd.peer_ppoll', 'line'], + 'lines': [] + }, + 'peer_precision': { + 'options': [None, 'Precision', 'log2 s', 'peers', 'ntpd.peer_precision', 'line'], + 'lines': [] + } +} + + +class Base: + regex = re.compile(r'([a-z_]+)=((?:-)?[0-9]+(?:\.[0-9]+)?)') + + @staticmethod + def get_header(associd=0, operation='readvar'): + """ + Constructs the NTP Control Message header: + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |LI | VN |Mode |R|E|M| OpCode | Sequence Number | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Status | Association ID | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Offset | Count | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + """ + version = 2 + sequence = 1 + status = 0 + offset = 0 + count = 0 + header = struct.pack(HEADER_FORMAT, (version << 3 | MODE), OPCODES[operation], + sequence, status, associd, offset, count) + return header + + +class System(Base): + def __init__(self): + self.request = self.get_header() + + def get_data(self, raw): + """ + Extracts key=value pairs with float/integer from ntp response packet data. + """ + data = dict() + for key, value in self.regex.findall(raw): + data[key] = float(value) * PRECISION + return data + + +class Peer(Base): + def __init__(self, idx, name): + self.id = idx + self.real_name = name + self.name = name.replace('.', '_') + self.request = self.get_header(self.id) + + def get_data(self, raw): + """ + Extracts key=value pairs with float/integer from ntp response packet data. + """ + data = dict() + for key, value in self.regex.findall(raw): + dimension = '_'.join([self.name, key]) + data[dimension] = float(value) * PRECISION + return data + + +class Service(SocketService): + def __init__(self, configuration=None, name=None): + SocketService.__init__(self, configuration=configuration, name=name) + self.order = list(ORDER) + self.definitions = dict(CHARTS) + + self.port = 'ntp' + self.dgram_socket = True + self.system = System() + self.peers = dict() + self.request = str() + self.retries = 0 + self.show_peers = self.configuration.get('show_peers', False) + self.peer_rescan = self.configuration.get('peer_rescan', 60) + + if self.show_peers: + self.definitions.update(PEER_CHARTS) + + def check(self): + """ + Checks if we can get valid systemvars. + If not, returns None to disable module. + """ + self._parse_config() + + peer_filter = self.configuration.get('peer_filter', r'127\..*') + try: + self.peer_filter = re.compile(r'^((0\.0\.0\.0)|({0}))$'.format(peer_filter)) + except re.error as error: + self.error('Compile pattern error (peer_filter) : {0}'.format(error)) + return None + + self.request = self.system.request + raw_systemvars = self._get_raw_data() + + if not self.system.get_data(raw_systemvars): + return None + + return True + + def get_data(self): + """ + Gets systemvars data on each update. + Gets peervars data for all peers on each update. + """ + data = dict() + + self.request = self.system.request + raw = self._get_raw_data() + if not raw: + return None + + data.update(self.system.get_data(raw)) + + if not self.show_peers: + return data + + if not self.peers or self.runs_counter % self.peer_rescan == 0 or self.retries > 8: + self.find_new_peers() + + for peer in self.peers.values(): + self.request = peer.request + peer_data = peer.get_data(self._get_raw_data()) + if peer_data: + data.update(peer_data) + else: + self.retries += 1 + + return data + + def find_new_peers(self): + new_peers = dict((p.real_name, p) for p in self.get_peers()) + if new_peers: + + peers_to_remove = set(self.peers) - set(new_peers) + peers_to_add = set(new_peers) - set(self.peers) + + for peer_name in peers_to_remove: + self.hide_old_peer_from_charts(self.peers[peer_name]) + del self.peers[peer_name] + + for peer_name in peers_to_add: + self.add_new_peer_to_charts(new_peers[peer_name]) + + self.peers.update(new_peers) + self.retries = 0 + + def add_new_peer_to_charts(self, peer): + for chart_id in set(self.charts.charts) & set(PEER_CHARTS): + dim_id = peer.name + chart_id[4:] + if dim_id not in self.charts[chart_id]: + self.charts[chart_id].add_dimension([dim_id, peer.real_name, 'absolute', 1, PRECISION]) + else: + self.charts[chart_id].hide_dimension(dim_id, reverse=True) + + def hide_old_peer_from_charts(self, peer): + for chart_id in set(self.charts.charts) & set(PEER_CHARTS): + dim_id = peer.name + chart_id[4:] + self.charts[chart_id].hide_dimension(dim_id) + + def get_peers(self): + self.request = Base.get_header(operation='readstat') + + raw_data = self._get_raw_data(raw=True) + if not raw_data: + return list() + + peer_ids = self.get_peer_ids(raw_data) + if not peer_ids: + return list() + + new_peers = list() + for peer_id in peer_ids: + self.request = Base.get_header(peer_id) + raw_peer_data = self._get_raw_data() + if not raw_peer_data: + continue + srcadr = re.search(r'(srcadr)=([^,]+)', raw_peer_data) + if not srcadr: + continue + srcadr = srcadr.group(2) + if self.peer_filter.search(srcadr): + continue + stratum = re.search(r'(stratum)=([^,]+)', raw_peer_data) + if not stratum: + continue + if int(stratum.group(2)) > 15: + continue + + new_peer = Peer(idx=peer_id, name=srcadr) + new_peers.append(new_peer) + return new_peers + + def get_peer_ids(self, res): + """ + Unpack the NTP Control Message header + Get data length from header + Get list of association ids returned in the readstat response + """ + + try: + count = struct.unpack(HEADER_FORMAT, res[:HEADER_LEN])[6] + except struct.error as error: + self.error('error unpacking header: {0}'.format(error)) + return None + if not count: + self.error('empty data field in NTP control packet') + return None + + data_end = HEADER_LEN + count + data = res[HEADER_LEN:data_end] + data_format = ''.join(['!', 'H' * int(count / 2)]) + try: + peer_ids = list(struct.unpack(data_format, data))[::2] + except struct.error as error: + self.error('error unpacking data: {0}'.format(error)) + return None + return peer_ids diff --git a/collectors/python.d.plugin/ntpd/ntpd.conf b/collectors/python.d.plugin/ntpd/ntpd.conf new file mode 100644 index 000000000..7adc4074b --- /dev/null +++ b/collectors/python.d.plugin/ntpd/ntpd.conf @@ -0,0 +1,91 @@ +# netdata python.d.plugin configuration for ntpd +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# +# Additionally to the above, ntp also supports the following: +# +# host: 'localhost' # the host to query +# port: '123' # the UDP port where `ntpd` listens +# show_peers: no # use `yes` to show peer charts. enabling this +# # option is recommended only for debugging, as +# # it could possibly imply memory leaks if the +# # peers change frequently. +# peer_filter: '127\..*' # regex to exclude peers +# # by default local peers are hidden +# # use `''` to show all peers. +# peer_rescan: 60 # interval (>0) to check for new/changed peers +# # use `1` to check on every update +# +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +localhost: + name: 'local' + host: 'localhost' + port: '123' + show_peers: no + +localhost_ipv4: + name: 'local' + host: '127.0.0.1' + port: '123' + show_peers: no + +localhost_ipv6: + name: 'local' + host: '::1' + port: '123' + show_peers: no diff --git a/collectors/python.d.plugin/ovpn_status_log/Makefile.inc b/collectors/python.d.plugin/ovpn_status_log/Makefile.inc new file mode 100644 index 000000000..1fbc506d6 --- /dev/null +++ b/collectors/python.d.plugin/ovpn_status_log/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += ovpn_status_log/ovpn_status_log.chart.py +dist_pythonconfig_DATA += ovpn_status_log/ovpn_status_log.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += ovpn_status_log/README.md ovpn_status_log/Makefile.inc + diff --git a/collectors/python.d.plugin/ovpn_status_log/README.md b/collectors/python.d.plugin/ovpn_status_log/README.md new file mode 100644 index 000000000..be1ea279e --- /dev/null +++ b/collectors/python.d.plugin/ovpn_status_log/README.md @@ -0,0 +1,32 @@ +# ovpn_status_log + +Module monitor openvpn-status log file. + +**Requirements:** + + * If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files + so that multiple instances do not overwrite each other's output files. + + * Make sure NETDATA USER CAN READ openvpn-status.log + + * Update_every interval MUST MATCH interval on which OpenVPN writes operational status to log file. + +It produces: + +1. **Users** OpenVPN active users + * users + +2. **Traffic** OpenVPN overall bandwidth usage in kilobit/s + * in + * out + +### configuration + +Sample: + +```yaml +default + log_path : '/var/log/openvpn-status.log' +``` + +--- diff --git a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py new file mode 100644 index 000000000..64d7062d9 --- /dev/null +++ b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +# Description: openvpn status log netdata python.d module +# Author: l2isbad +# SPDX-License-Identifier: GPL-3.0-or-later + +from re import compile as r_compile + +from bases.FrameworkServices.SimpleService import SimpleService + +priority = 60000 +retries = 60 +update_every = 10 + +ORDER = ['users', 'traffic'] +CHARTS = { + 'users': { + 'options': [None, 'OpenVPN Active Users', 'active users', 'users', 'openvpn_status.users', 'line'], + 'lines': [ + ['users', None, 'absolute'], + ] + }, + 'traffic': { + 'options': [None, 'OpenVPN Traffic', 'KB/s', 'traffic', 'openvpn_status.traffic', 'area'], + 'lines': [ + ['bytes_in', 'in', 'incremental', 1, 1 << 10], ['bytes_out', 'out', 'incremental', 1, -1 << 10] + ] + } +} + +TLS_REGEX = r_compile(r'(?:[0-9a-f:]+|(?:\d{1,3}(?:\.\d{1,3}){3}(?::\d+)?)) (?P<bytes_in>\d+) (?P<bytes_out>\d+)') +STATIC_KEY_REGEX = r_compile(r'TCP/[A-Z]+ (?P<direction>(?:read|write)) bytes,(?P<bytes>\d+)') + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + self.log_path = self.configuration.get('log_path') + self.regex = { + 'tls': TLS_REGEX, + 'static_key': STATIC_KEY_REGEX + } + + def check(self): + if not (self.log_path and isinstance(self.log_path, str)): + self.error("'log_path' is not defined") + return False + + data = self._get_raw_data() + if not data: + self.error('Make sure that the openvpn status log file exists and netdata has permission to read it') + return None + + found = None + for row in data: + if 'ROUTING' in row: + self.get_data = self.get_data_tls + found = True + break + elif 'STATISTICS' in row: + self.get_data = self.get_data_static_key + found = True + break + if found: + return True + self.error('Failed to parse ovpenvpn log file') + return False + + def _get_raw_data(self): + """ + Open log file + :return: str + """ + + try: + with open(self.log_path) as log: + raw_data = log.readlines() or None + except OSError: + return None + else: + return raw_data + + def get_data_static_key(self): + """ + Parse openvpn-status log file. + """ + + raw_data = self._get_raw_data() + if not raw_data: + return None + + data = dict(bytes_in=0, bytes_out=0) + + for row in raw_data: + match = self.regex['static_key'].search(row) + if match: + match = match.groupdict() + if match['direction'] == 'read': + data['bytes_in'] += int(match['bytes']) + else: + data['bytes_out'] += int(match['bytes']) + + return data or None + + def get_data_tls(self): + """ + Parse openvpn-status log file. + """ + + raw_data = self._get_raw_data() + if not raw_data: + return None + + data = dict(users=0, bytes_in=0, bytes_out=0) + for row in raw_data: + columns = row.split(',') if ',' in row else row.split() + if 'UNDEF' in columns: + # see https://openvpn.net/archive/openvpn-users/2004-08/msg00116.html + continue + + match = self.regex['tls'].search(' '.join(columns)) + if match: + match = match.groupdict() + data['users'] += 1 + data['bytes_in'] += int(match['bytes_in']) + data['bytes_out'] += int(match['bytes_out']) + + return data or None diff --git a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf new file mode 100644 index 000000000..6fb35a530 --- /dev/null +++ b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf @@ -0,0 +1,99 @@ +# netdata python.d.plugin configuration for openvpn status log +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, openvpn status log also supports the following: +# +# log_path: 'PATH' # the path to openvpn status log file +# +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) +# +# IMPORTANT information +# +# 1. If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files +# so that multiple instances do not overwrite each other's output files. +# 2. Make sure NETDATA USER CAN READ openvpn-status.log +# +# * cd into directory with openvpn-status.log and run the following commands as root +# * #chown :netdata openvpn-status.log && chmod 640 openvpn-status.log +# * To check permission and group membership run +# * #ls -l openvpn-status.log +# -rw-r----- 1 root netdata 359 dec 21 21:22 openvpn-status.log +# +# 3. Update_every interval MUST MATCH interval on which OpenVPN writes operational status to log file. +# If its not true traffic chart WILL DISPLAY WRONG values +# +# Default OpenVPN update interval is 10 second on Debian 8 +# # ps -C openvpn -o command= +# /usr/sbin/openvpn --daemon ovpn-server --status /run/openvpn/server.status 10 --cd /etc/openvpn --config /etc/openvpn/server.conf +# +# 4. Confirm status is configured in your OpenVPN configuration. +# * Open OpenVPN config in an editor (e.g. sudo nano /etc/openvpn/default.conf) +# * Confirm status is enabled with below: +# status /var/log/openvpn-status.log +# +#default: +# log_path: '/var/log/openvpn-status.log' +# +# ---------------------------------------------------------------------- diff --git a/collectors/python.d.plugin/phpfpm/Makefile.inc b/collectors/python.d.plugin/phpfpm/Makefile.inc new file mode 100644 index 000000000..ff312fe18 --- /dev/null +++ b/collectors/python.d.plugin/phpfpm/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += phpfpm/phpfpm.chart.py +dist_pythonconfig_DATA += phpfpm/phpfpm.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += phpfpm/README.md phpfpm/Makefile.inc + diff --git a/collectors/python.d.plugin/phpfpm/README.md b/collectors/python.d.plugin/phpfpm/README.md new file mode 100644 index 000000000..66930463f --- /dev/null +++ b/collectors/python.d.plugin/phpfpm/README.md @@ -0,0 +1,40 @@ +# phpfpm + +This module will monitor one or more php-fpm instances depending on configuration. + +**Requirements:** + * php-fpm with enabled `status` page + * access to `status` page via web server + +It produces following charts: + +1. **Active Connections** + * active + * maxActive + * idle + +2. **Requests** in requests/s + * requests + +3. **Performance** + * reached + * slow + +### configuration + +Needs only `url` to server's `status` + +Here is an example for local instance: + +```yaml +update_every : 3 +priority : 90100 + +local: + url : 'http://localhost/status' + retries : 10 +``` + +Without configuration, module attempts to connect to `http://localhost/status` + +--- diff --git a/collectors/python.d.plugin/phpfpm/phpfpm.chart.py b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py new file mode 100644 index 000000000..a3f0963fc --- /dev/null +++ b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py @@ -0,0 +1,177 @@ +# -*- coding: utf-8 -*- +# Description: PHP-FPM netdata python.d module +# Author: Pawel Krupa (paulfantom) +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +import json +import re + +from bases.FrameworkServices.UrlService import UrlService + +# default module values (can be overridden per job in `config`) +# update_every = 2 +priority = 60000 +retries = 60 + +# default job configuration (overridden by python.d.plugin) +# config = {'local': { +# 'update_every': update_every, +# 'retries': retries, +# 'priority': priority, +# 'url': 'http://localhost/status?full&json' +# }} + +# charts order (can be overridden if you want less charts, or different order) + +POOL_INFO = [ + ('active processes', 'active'), + ('max active processes', 'maxActive'), + ('idle processes', 'idle'), + ('accepted conn', 'requests'), + ('max children reached', 'reached'), + ('slow requests', 'slow') +] + +PER_PROCESS_INFO = [ + ('request duration', 'ReqDur'), + ('last request cpu', 'ReqCpu'), + ('last request memory', 'ReqMem') +] + + +def average(collection): + return sum(collection, 0.0) / max(len(collection), 1) + + +CALC = [ + ('min', min), + ('max', max), + ('avg', average) +] + +ORDER = ['connections', 'requests', 'performance', 'request_duration', 'request_cpu', 'request_mem'] + +CHARTS = { + 'connections': { + 'options': [None, 'PHP-FPM Active Connections', 'connections', 'active connections', 'phpfpm.connections', + 'line'], + 'lines': [ + ['active'], + ['maxActive', 'max active'], + ['idle'] + ] + }, + 'requests': { + 'options': [None, 'PHP-FPM Requests', 'requests/s', 'requests', 'phpfpm.requests', 'line'], + 'lines': [ + ['requests', None, 'incremental'] + ] + }, + 'performance': { + 'options': [None, 'PHP-FPM Performance', 'status', 'performance', 'phpfpm.performance', 'line'], + 'lines': [ + ['reached', 'max children reached'], + ['slow', 'slow requests'] + ] + }, + 'request_duration': { + 'options': [None, 'PHP-FPM Request Duration', 'milliseconds', 'request duration', 'phpfpm.request_duration', + 'line'], + 'lines': [ + ['minReqDur', 'min', 'absolute', 1, 1000], + ['maxReqDur', 'max', 'absolute', 1, 1000], + ['avgReqDur', 'avg', 'absolute', 1, 1000] + ] + }, + 'request_cpu': { + 'options': [None, 'PHP-FPM Request CPU', 'percent', 'request CPU', 'phpfpm.request_cpu', 'line'], + 'lines': [ + ['minReqCpu', 'min'], + ['maxReqCpu', 'max'], + ['avgReqCpu', 'avg'] + ] + }, + 'request_mem': { + 'options': [None, 'PHP-FPM Request Memory', 'kilobytes', 'request memory', 'phpfpm.request_mem', 'line'], + 'lines': [ + ['minReqMem', 'min', 'absolute', 1, 1024], + ['maxReqMem', 'max', 'absolute', 1, 1024], + ['avgReqMem', 'avg', 'absolute', 1, 1024] + ] + } +} + + +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + self.url = self.configuration.get('url', 'http://localhost/status?full&json') + self.order = ORDER + self.definitions = CHARTS + self.regex = re.compile(r'([a-z][a-z ]+): ([\d.]+)') + self.json = '&json' in self.url or '?json' in self.url + self.json_full = self.url.endswith(('?full&json', '?json&full')) + self.if_all_processes_running = dict([(c_name + p_name, 0) for c_name, func in CALC + for metric, p_name in PER_PROCESS_INFO]) + + def _get_data(self): + """ + Format data received from http request + :return: dict + """ + raw = self._get_raw_data() + if not raw: + return None + + raw_json = parse_raw_data_(is_json=self.json, regex=self.regex, raw_data=raw) + + # Per Pool info: active connections, requests and performance charts + to_netdata = fetch_data_(raw_data=raw_json, metrics_list=POOL_INFO) + + # Per Process Info: duration, cpu and memory charts (min, max, avg) + if self.json_full: + p_info = dict() + to_netdata.update(self.if_all_processes_running) # If all processes are in running state + # Metrics are always 0 if the process is not in Idle state because calculation is done + # when the request processing has terminated + for process in [p for p in raw_json['processes'] if p['state'] == 'Idle']: + p_info.update(fetch_data_(raw_data=process, metrics_list=PER_PROCESS_INFO, pid=str(process['pid']))) + + if p_info: + for new_name in PER_PROCESS_INFO: + for name, func in CALC: + to_netdata[name + new_name[1]] = func([p_info[k] for k in p_info if new_name[1] in k]) + + return to_netdata or None + + +def fetch_data_(raw_data, metrics_list, pid=''): + """ + :param raw_data: dict + :param metrics_list: list + :param pid: str + :return: dict + """ + result = dict() + for metric, new_name in metrics_list: + if metric in raw_data: + result[new_name + pid] = float(raw_data[metric]) + return result + + +def parse_raw_data_(is_json, regex, raw_data): + """ + :param is_json: bool + :param regex: compiled regular expr + :param raw_data: dict + :return: dict + """ + if is_json: + try: + return json.loads(raw_data) + except ValueError: + return dict() + else: + raw_data = ' '.join(raw_data.split()) + return dict(regex.findall(raw_data)) diff --git a/collectors/python.d.plugin/phpfpm/phpfpm.conf b/collectors/python.d.plugin/phpfpm/phpfpm.conf new file mode 100644 index 000000000..571eb9156 --- /dev/null +++ b/collectors/python.d.plugin/phpfpm/phpfpm.conf @@ -0,0 +1,90 @@ +# netdata python.d.plugin configuration for PHP-FPM +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, PHP-FPM also supports the following: +# +# url: 'URL' # the URL to fetch nginx's status stats +# # Be sure and include ?full&status at the end of the url +# +# if the URL is password protected, the following are supported: +# +# user: 'username' +# pass: 'password' +# + +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +localhost: + name : 'local' + url : "http://localhost/status?full&json" + +localipv4: + name : 'local' + url : "http://127.0.0.1/status?full&json" + +localipv6: + name : 'local' + url : "http://[::1]/status?full&json" + diff --git a/collectors/python.d.plugin/portcheck/Makefile.inc b/collectors/python.d.plugin/portcheck/Makefile.inc new file mode 100644 index 000000000..76763f02f --- /dev/null +++ b/collectors/python.d.plugin/portcheck/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += portcheck/portcheck.chart.py +dist_pythonconfig_DATA += portcheck/portcheck.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += portcheck/README.md portcheck/Makefile.inc + diff --git a/collectors/python.d.plugin/portcheck/README.md b/collectors/python.d.plugin/portcheck/README.md new file mode 100644 index 000000000..f1338d576 --- /dev/null +++ b/collectors/python.d.plugin/portcheck/README.md @@ -0,0 +1,35 @@ +# portcheck + +Module monitors a remote TCP service. + +Following charts are drawn per host: + +1. **Latency** ms + * Time required to connect to a TCP port. + Displays latency in 0.1 ms resolution. If the connection failed, the value is missing. + +2. **Status** boolean + * Connection successful + * Could not create socket: possible DNS problems + * Connection refused: port not listening or blocked + * Connection timed out: host or port unreachable + + +### configuration + +```yaml +server: + host: 'dns or ip' # required + port: 22 # required + timeout: 1 # optional + update_every: 1 # optional +``` + +### notes + + * The error chart is intended for alarms, badges or for access via API. + * A system/service/firewall might block netdata's access if a portscan or + similar is detected. + * Currently, the accuracy of the latency is low and should be used as reference only. + +--- diff --git a/collectors/python.d.plugin/portcheck/portcheck.chart.py b/collectors/python.d.plugin/portcheck/portcheck.chart.py new file mode 100644 index 000000000..e86f82544 --- /dev/null +++ b/collectors/python.d.plugin/portcheck/portcheck.chart.py @@ -0,0 +1,161 @@ +# -*- coding: utf-8 -*- +# Description: simple port check netdata python.d module +# Original Author: ccremer (github.com/ccremer) +# SPDX-License-Identifier: GPL-3.0-or-later + +import socket + +try: + from time import monotonic as time +except ImportError: + from time import time + +from bases.FrameworkServices.SimpleService import SimpleService + +# default module values (can be overridden per job in `config`) +priority = 60000 +retries = 60 + +PORT_LATENCY = 'connect' + +PORT_SUCCESS = 'success' +PORT_TIMEOUT = 'timeout' +PORT_FAILED = 'no_connection' + +ORDER = ['latency', 'status'] + +CHARTS = { + 'latency': { + 'options': [None, 'TCP connect latency', 'ms', 'latency', 'portcheck.latency', 'line'], + 'lines': [ + [PORT_LATENCY, 'connect', 'absolute', 100, 1000] + ] + }, + 'status': { + 'options': [None, 'Portcheck status', 'boolean', 'status', 'portcheck.status', 'line'], + 'lines': [ + [PORT_SUCCESS, 'success', 'absolute'], + [PORT_TIMEOUT, 'timeout', 'absolute'], + [PORT_FAILED, 'no connection', 'absolute'] + ] + } +} + + +# Not deriving from SocketService, too much is different +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + self.host = self.configuration.get('host') + self.port = self.configuration.get('port') + self.timeout = self.configuration.get('timeout', 1) + + def check(self): + """ + Parse configuration, check if configuration is available, and dynamically create chart lines data + :return: boolean + """ + if self.host is None or self.port is None: + self.error('Host or port missing') + return False + if not isinstance(self.port, int): + self.error('"port" is not an integer. Specify a numerical value, not service name.') + return False + + self.debug('Enabled portcheck: {host}:{port}, update every {update}s, timeout: {timeout}s'.format( + host=self.host, port=self.port, update=self.update_every, timeout=self.timeout + )) + # We will accept any (valid-ish) configuration, even if initial connection fails (a service might be down from + # the beginning) + return True + + def _get_data(self): + """ + Get data from socket + :return: dict + """ + data = dict() + data[PORT_SUCCESS] = 0 + data[PORT_TIMEOUT] = 0 + data[PORT_FAILED] = 0 + + success = False + try: + for socket_config in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM): + # use first working socket + sock = self._create_socket(socket_config) + if sock is not None: + self._connect2socket(data, socket_config, sock) + self._disconnect(sock) + success = True + break + except socket.gaierror as error: + self.debug('Failed to connect to "{host}:{port}", error: {error}'.format( + host=self.host, port=self.port, error=error + )) + + # We could not connect + if not success: + data[PORT_FAILED] = 1 + + return data + + def _create_socket(self, socket_config): + af, sock_type, proto, _, sa = socket_config + try: + self.debug('Creating socket to "{address}", port {port}'.format(address=sa[0], port=sa[1])) + sock = socket.socket(af, sock_type, proto) + sock.settimeout(self.timeout) + return sock + except socket.error as error: + self.debug('Failed to create socket "{address}", port {port}, error: {error}'.format( + address=sa[0], port=sa[1], error=error + )) + return None + + def _connect2socket(self, data, socket_config, sock): + """ + Connect to a socket, passing the result of getaddrinfo() + :return: dict + """ + + af, _, proto, _, sa = socket_config + port = str(sa[1]) + try: + self.debug('Connecting socket to "{address}", port {port}'.format(address=sa[0], port=port)) + start = time() + sock.connect(sa) + diff = time() - start + self.debug('Connected to "{address}", port {port}, latency {latency}'.format( + address=sa[0], port=port, latency=diff + )) + # we will set it at least 0.1 ms. 0.0 would mean failed connection (handy for 3rd-party-APIs) + data[PORT_LATENCY] = max(round(diff * 10000), 0) + data[PORT_SUCCESS] = 1 + + except socket.timeout as error: + self.debug('Socket timed out on "{address}", port {port}, error: {error}'.format( + address=sa[0], port=port, error=error + )) + data[PORT_TIMEOUT] = 1 + + except socket.error as error: + self.debug('Failed to connect to "{address}", port {port}, error: {error}'.format( + address=sa[0], port=port, error=error + )) + data[PORT_FAILED] = 1 + + def _disconnect(self, sock): + """ + Close socket connection + :return: + """ + if sock is not None: + try: + self.debug('Closing socket') + sock.shutdown(2) # 0 - read, 1 - write, 2 - all + sock.close() + except socket.error: + pass diff --git a/collectors/python.d.plugin/portcheck/portcheck.conf b/collectors/python.d.plugin/portcheck/portcheck.conf new file mode 100644 index 000000000..b3dd8bd3f --- /dev/null +++ b/collectors/python.d.plugin/portcheck/portcheck.conf @@ -0,0 +1,70 @@ +# netdata python.d.plugin configuration for portcheck +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# chart_cleanup sets the default chart cleanup interval in iterations. +# A chart is marked as obsolete if it has not been updated +# 'chart_cleanup' iterations in a row. +# They will be hidden immediately (not offered to dashboard viewer, +# streamed upstream and archived to backends) and deleted one hour +# later (configurable from netdata.conf). +# -- For this plugin, cleanup MUST be disabled, otherwise we lose latency chart +chart_cleanup: 0 + +# Autodetection and retries do not work for this plugin + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# ------------------------------- +# ATTENTION: Any valid configuration will be accepted, even if initial connection fails! +# ------------------------------- +# +# There is intentionally no default config for 'localhost' + +# job_name: +# name: myname # [optional] the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # [optional] the JOB's data collection frequency +# priority: 60000 # [optional] the JOB's order on the dashboard +# retries: 60 # [optional] the JOB's number of restoration attempts +# timeout: 1 # [optional] the socket timeout when connecting +# host: 'dns or ip' # [required] the remote host address in either IPv4, IPv6 or as DNS name. +# port: 22 # [required] the port number to check. Specify an integer, not service name. + +# You just have been warned about possible portscan blocking. The portcheck plugin is meant for simple use cases. +# Currently, the accuracy of the latency is low and should be used as reference only. + diff --git a/collectors/python.d.plugin/postfix/Makefile.inc b/collectors/python.d.plugin/postfix/Makefile.inc new file mode 100644 index 000000000..f4091b217 --- /dev/null +++ b/collectors/python.d.plugin/postfix/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += postfix/postfix.chart.py +dist_pythonconfig_DATA += postfix/postfix.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += postfix/README.md postfix/Makefile.inc + diff --git a/collectors/python.d.plugin/postfix/README.md b/collectors/python.d.plugin/postfix/README.md new file mode 100644 index 000000000..77c95ff44 --- /dev/null +++ b/collectors/python.d.plugin/postfix/README.md @@ -0,0 +1,15 @@ +# postfix + +Simple module executing `postfix -p` to grab postfix queue. + +It produces only two charts: + +1. **Postfix Queue Emails** + * emails + +2. **Postfix Queue Emails Size** in KB + * size + +Configuration is not needed. + +--- diff --git a/collectors/python.d.plugin/postfix/postfix.chart.py b/collectors/python.d.plugin/postfix/postfix.chart.py new file mode 100644 index 000000000..bdbd0feea --- /dev/null +++ b/collectors/python.d.plugin/postfix/postfix.chart.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Description: postfix netdata python.d module +# Author: Pawel Krupa (paulfantom) +# SPDX-License-Identifier: GPL-3.0-or-later + +from bases.FrameworkServices.ExecutableService import ExecutableService + +# default module values (can be overridden per job in `config`) +# update_every = 2 +priority = 60000 +retries = 60 + +# charts order (can be overridden if you want less charts, or different order) +ORDER = ['qemails', 'qsize'] + +CHARTS = { + 'qemails': { + 'options': [None, 'Postfix Queue Emails', 'emails', 'queue', 'postfix.qemails', 'line'], + 'lines': [ + ['emails', None, 'absolute'] + ] + }, + 'qsize': { + 'options': [None, 'Postfix Queue Emails Size', 'emails size in KB', 'queue', 'postfix.qsize', 'area'], + 'lines': [ + ['size', None, 'absolute'] + ] + } +} + + +class Service(ExecutableService): + def __init__(self, configuration=None, name=None): + ExecutableService.__init__(self, configuration=configuration, name=name) + self.command = 'postqueue -p' + self.order = ORDER + self.definitions = CHARTS + + def _get_data(self): + """ + Format data received from shell command + :return: dict + """ + try: + raw = self._get_raw_data()[-1].split(' ') + if raw[0] == 'Mail' and raw[1] == 'queue': + return {'emails': 0, + 'size': 0} + + return {'emails': raw[4], + 'size': raw[1]} + except (ValueError, AttributeError): + return None diff --git a/collectors/python.d.plugin/postfix/postfix.conf b/collectors/python.d.plugin/postfix/postfix.conf new file mode 100644 index 000000000..e0d5a5f83 --- /dev/null +++ b/collectors/python.d.plugin/postfix/postfix.conf @@ -0,0 +1,74 @@ +# netdata python.d.plugin configuration for postfix +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# postfix is slow, so once every 10 seconds +update_every: 10 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, postfix also supports the following: +# +# command: 'postqueue -p' # the command to run +# + +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS + +local: + command: 'postqueue -p' diff --git a/collectors/python.d.plugin/postgres/Makefile.inc b/collectors/python.d.plugin/postgres/Makefile.inc new file mode 100644 index 000000000..91a185cb9 --- /dev/null +++ b/collectors/python.d.plugin/postgres/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += postgres/postgres.chart.py +dist_pythonconfig_DATA += postgres/postgres.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += postgres/README.md postgres/Makefile.inc + diff --git a/collectors/python.d.plugin/postgres/README.md b/collectors/python.d.plugin/postgres/README.md new file mode 100644 index 000000000..e7b108d36 --- /dev/null +++ b/collectors/python.d.plugin/postgres/README.md @@ -0,0 +1,68 @@ +# postgres + +Module monitors one or more postgres servers. + +**Requirements:** + + * `python-psycopg2` package. You have to install it manually. + +Following charts are drawn: + +1. **Database size** MB + * size + +2. **Current Backend Processes** processes + * active + +3. **Write-Ahead Logging Statistics** files/s + * total + * ready + * done + +4. **Checkpoints** writes/s + * scheduled + * requested + +5. **Current connections to db** count + * connections + +6. **Tuples returned from db** tuples/s + * sequential + * bitmap + +7. **Tuple reads from db** reads/s + * disk + * cache + +8. **Transactions on db** transactions/s + * committed + * rolled back + +9. **Tuples written to db** writes/s + * inserted + * updated + * deleted + * conflicts + +10. **Locks on db** count per type + * locks + +### configuration + +```yaml +socket: + name : 'socket' + user : 'postgres' + database : 'postgres' + +tcp: + name : 'tcp' + user : 'postgres' + database : 'postgres' + host : 'localhost' + port : 5432 +``` + +When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:5432`. + +--- diff --git a/collectors/python.d.plugin/postgres/postgres.chart.py b/collectors/python.d.plugin/postgres/postgres.chart.py new file mode 100644 index 000000000..7f43877c3 --- /dev/null +++ b/collectors/python.d.plugin/postgres/postgres.chart.py @@ -0,0 +1,823 @@ +# -*- coding: utf-8 -*- +# Description: example netdata python.d module +# Authors: facetoe, dangtranhoang +# SPDX-License-Identifier: GPL-3.0-or-later + +from copy import deepcopy + +try: + import psycopg2 + from psycopg2 import extensions + from psycopg2.extras import DictCursor + from psycopg2 import OperationalError + PSYCOPG2 = True +except ImportError: + PSYCOPG2 = False + +from bases.FrameworkServices.SimpleService import SimpleService + +# default module values +update_every = 1 +priority = 60000 +retries = 60 + +METRICS = { + 'DATABASE': [ + 'connections', + 'xact_commit', + 'xact_rollback', + 'blks_read', + 'blks_hit', + 'tup_returned', + 'tup_fetched', + 'tup_inserted', + 'tup_updated', + 'tup_deleted', + 'conflicts', + 'temp_files', + 'temp_bytes', + 'size' + ], + 'BACKENDS': [ + 'backends_active', + 'backends_idle' + ], + 'INDEX_STATS': [ + 'index_count', + 'index_size' + ], + 'TABLE_STATS': [ + 'table_size', + 'table_count' + ], + 'WAL': [ + 'written_wal', + 'recycled_wal', + 'total_wal' + ], + 'WAL_WRITES': [ + 'wal_writes' + ], + 'ARCHIVE': [ + 'ready_count', + 'done_count', + 'file_count' + ], + 'BGWRITER': [ + 'checkpoint_scheduled', + 'checkpoint_requested', + 'buffers_checkpoint', + 'buffers_clean', + 'maxwritten_clean', + 'buffers_backend', + 'buffers_alloc', + 'buffers_backend_fsync' + ], + 'LOCKS': [ + 'ExclusiveLock', + 'RowShareLock', + 'SIReadLock', + 'ShareUpdateExclusiveLock', + 'AccessExclusiveLock', + 'AccessShareLock', + 'ShareRowExclusiveLock', + 'ShareLock', + 'RowExclusiveLock' + ], + 'AUTOVACUUM': [ + 'analyze', + 'vacuum_analyze', + 'vacuum', + 'vacuum_freeze', + 'brin_summarize' + ], + 'STANDBY_DELTA': [ + 'sent_delta', + 'write_delta', + 'flush_delta', + 'replay_delta' + ], + 'REPSLOT_FILES': [ + 'replslot_wal_keep', + 'replslot_files' + ] +} + +QUERIES = { + 'WAL': """ +SELECT + count(*) as total_wal, + count(*) FILTER (WHERE type = 'recycled') AS recycled_wal, + count(*) FILTER (WHERE type = 'written') AS written_wal +FROM + (SELECT + wal.name, + pg_{0}file_name( + CASE pg_is_in_recovery() + WHEN true THEN NULL + ELSE pg_current_{0}_{1}() + END ), + CASE + WHEN wal.name > pg_{0}file_name( + CASE pg_is_in_recovery() + WHEN true THEN NULL + ELSE pg_current_{0}_{1}() + END ) THEN 'recycled' + ELSE 'written' + END AS type + FROM pg_catalog.pg_ls_dir('pg_{0}') AS wal(name) + WHERE name ~ '^[0-9A-F]{{24}}$' + ORDER BY + (pg_stat_file('pg_{0}/'||name)).modification, + wal.name DESC) sub; +""", + 'ARCHIVE': """ +SELECT + CAST(COUNT(*) AS INT) AS file_count, + CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)),0) AS INT) AS ready_count, + CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)),0) AS INT) AS done_count +FROM + pg_catalog.pg_ls_dir('pg_{0}/archive_status') AS archive_files (archive_file); +""", + 'BACKENDS': """ +SELECT + count(*) - (SELECT count(*) + FROM pg_stat_activity + WHERE state = 'idle') + AS backends_active, + (SELECT count(*) + FROM pg_stat_activity + WHERE state = 'idle') + AS backends_idle +FROM pg_stat_activity; +""", + 'TABLE_STATS': """ +SELECT + ((sum(relpages) * 8) * 1024) AS table_size, + count(1) AS table_count +FROM pg_class +WHERE relkind IN ('r', 't'); +""", + 'INDEX_STATS': """ +SELECT + ((sum(relpages) * 8) * 1024) AS index_size, + count(1) AS index_count +FROM pg_class +WHERE relkind = 'i'; +""", + 'DATABASE': """ +SELECT + datname AS database_name, + numbackends AS connections, + xact_commit AS xact_commit, + xact_rollback AS xact_rollback, + blks_read AS blks_read, + blks_hit AS blks_hit, + tup_returned AS tup_returned, + tup_fetched AS tup_fetched, + tup_inserted AS tup_inserted, + tup_updated AS tup_updated, + tup_deleted AS tup_deleted, + conflicts AS conflicts, + pg_database_size(datname) AS size, + temp_files AS temp_files, + temp_bytes AS temp_bytes +FROM pg_stat_database +WHERE datname IN %(databases)s ; +""", + 'BGWRITER': """ +SELECT + checkpoints_timed AS checkpoint_scheduled, + checkpoints_req AS checkpoint_requested, + buffers_checkpoint * current_setting('block_size')::numeric buffers_checkpoint, + buffers_clean * current_setting('block_size')::numeric buffers_clean, + maxwritten_clean, + buffers_backend * current_setting('block_size')::numeric buffers_backend, + buffers_alloc * current_setting('block_size')::numeric buffers_alloc, + buffers_backend_fsync +FROM pg_stat_bgwriter; +""", + 'LOCKS': """ +SELECT + pg_database.datname as database_name, + mode, + count(mode) AS locks_count +FROM pg_locks +INNER JOIN pg_database + ON pg_database.oid = pg_locks.database +GROUP BY datname, mode +ORDER BY datname, mode; +""", + 'FIND_DATABASES': """ +SELECT + datname +FROM pg_stat_database +WHERE + has_database_privilege( + (SELECT current_user), datname, 'connect') + AND NOT datname ~* '^template\d '; +""", + 'FIND_STANDBY': """ +SELECT + application_name +FROM pg_stat_replication +WHERE application_name IS NOT NULL +GROUP BY application_name; +""", + 'FIND_REPLICATION_SLOT': """ +SELECT slot_name +FROM pg_replication_slots; +""", + 'STANDBY_DELTA': """ +SELECT + application_name, + pg_{0}_{1}_diff( + CASE pg_is_in_recovery() + WHEN true THEN pg_last_{0}_receive_{1}() + ELSE pg_current_{0}_{1}() + END, + sent_{1}) AS sent_delta, + pg_{0}_{1}_diff( + CASE pg_is_in_recovery() + WHEN true THEN pg_last_{0}_receive_{1}() + ELSE pg_current_{0}_{1}() + END, + write_{1}) AS write_delta, + pg_{0}_{1}_diff( + CASE pg_is_in_recovery() + WHEN true THEN pg_last_{0}_receive_{1}() + ELSE pg_current_{0}_{1}() + END, + flush_{1}) AS flush_delta, + pg_{0}_{1}_diff( + CASE pg_is_in_recovery() + WHEN true THEN pg_last_{0}_receive_{1}() + ELSE pg_current_{0}_{1}() + END, + replay_{1}) AS replay_delta +FROM pg_stat_replication +WHERE application_name IS NOT NULL; +""", + 'REPSLOT_FILES': """ +WITH wal_size AS ( + SELECT + current_setting('wal_block_size')::INT * setting::INT AS val + FROM pg_settings + WHERE name = 'wal_segment_size' + ) +SELECT + slot_name, + slot_type, + replslot_wal_keep, + count(slot_file) AS replslot_files +FROM + (SELECT + slot.slot_name, + CASE + WHEN slot_file <> 'state' THEN 1 + END AS slot_file , + slot_type, + COALESCE ( + floor( + (pg_wal_lsn_diff(pg_current_wal_lsn (),slot.restart_lsn) + - (pg_walfile_name_offset (restart_lsn)).file_offset) / (s.val) + ),0) AS replslot_wal_keep + FROM pg_replication_slots slot + LEFT JOIN ( + SELECT + slot2.slot_name, + pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file + FROM pg_replication_slots slot2 + ) files (slot_name, slot_file) + ON slot.slot_name = files.slot_name + CROSS JOIN wal_size s + ) AS d +GROUP BY + slot_name, + slot_type, + replslot_wal_keep; +""", + 'IF_SUPERUSER': """ +SELECT current_setting('is_superuser') = 'on' AS is_superuser; +""", + 'DETECT_SERVER_VERSION': """ +SHOW server_version_num; +""", + 'AUTOVACUUM': """ +SELECT + count(*) FILTER (WHERE query LIKE 'autovacuum: ANALYZE%%') AS analyze, + count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM ANALYZE%%') AS vacuum_analyze, + count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM%%' + AND query NOT LIKE 'autovacuum: VACUUM ANALYZE%%' + AND query NOT LIKE '%%to prevent wraparound%%') AS vacuum, + count(*) FILTER (WHERE query LIKE '%%to prevent wraparound%%') AS vacuum_freeze, + count(*) FILTER (WHERE query LIKE 'autovacuum: BRIN summarize%%') AS brin_summarize +FROM pg_stat_activity +WHERE query NOT LIKE '%%pg_stat_activity%%'; +""", + 'DIFF_LSN': """ +SELECT + pg_{0}_{1}_diff( + CASE pg_is_in_recovery() + WHEN true THEN pg_last_{0}_receive_{1}() + ELSE pg_current_{0}_{1}() + END, + '0/0') as wal_writes ; +""" +} + + +QUERY_STATS = { + QUERIES['DATABASE']: METRICS['DATABASE'], + QUERIES['BACKENDS']: METRICS['BACKENDS'], + QUERIES['LOCKS']: METRICS['LOCKS'] +} + +ORDER = [ + 'db_stat_temp_files', + 'db_stat_temp_bytes', + 'db_stat_blks', + 'db_stat_tuple_returned', + 'db_stat_tuple_write', + 'db_stat_transactions', + 'db_stat_connections', + 'database_size', + 'backend_process', + 'index_count', + 'index_size', + 'table_count', + 'table_size', + 'wal', + 'wal_writes', + 'archive_wal', + 'checkpointer', + 'stat_bgwriter_alloc', + 'stat_bgwriter_checkpoint', + 'stat_bgwriter_backend', + 'stat_bgwriter_backend_fsync', + 'stat_bgwriter_bgwriter', + 'stat_bgwriter_maxwritten', + 'replication_slot', + 'standby_delta', + 'autovacuum' +] + +CHARTS = { + 'db_stat_transactions': { + 'options': [None, 'Transactions on db', 'transactions/s', 'db statistics', 'postgres.db_stat_transactions', + 'line'], + 'lines': [ + ['xact_commit', 'committed', 'incremental'], + ['xact_rollback', 'rolled back', 'incremental'] + ] + }, + 'db_stat_connections': { + 'options': [None, 'Current connections to db', 'count', 'db statistics', 'postgres.db_stat_connections', + 'line'], + 'lines': [ + ['connections', 'connections', 'absolute'] + ] + }, + 'db_stat_blks': { + 'options': [None, 'Disk blocks reads from db', 'reads/s', 'db statistics', 'postgres.db_stat_blks', 'line'], + 'lines': [ + ['blks_read', 'disk', 'incremental'], + ['blks_hit', 'cache', 'incremental'] + ] + }, + 'db_stat_tuple_returned': { + 'options': [None, 'Tuples returned from db', 'tuples/s', 'db statistics', 'postgres.db_stat_tuple_returned', + 'line'], + 'lines': [ + ['tup_returned', 'sequential', 'incremental'], + ['tup_fetched', 'bitmap', 'incremental'] + ] + }, + 'db_stat_tuple_write': { + 'options': [None, 'Tuples written to db', 'writes/s', 'db statistics', 'postgres.db_stat_tuple_write', 'line'], + 'lines': [ + ['tup_inserted', 'inserted', 'incremental'], + ['tup_updated', 'updated', 'incremental'], + ['tup_deleted', 'deleted', 'incremental'], + ['conflicts', 'conflicts', 'incremental'] + ] + }, + 'db_stat_temp_bytes': { + 'options': [None, 'Temp files written to disk', 'KB/s', 'db statistics', 'postgres.db_stat_temp_bytes', + 'line'], + 'lines': [ + ['temp_bytes', 'size', 'incremental', 1, 1024] + ] + }, + 'db_stat_temp_files': { + 'options': [None, 'Temp files written to disk', 'files', 'db statistics', 'postgres.db_stat_temp_files', + 'line'], + 'lines': [ + ['temp_files', 'files', 'incremental'] + ] + }, + 'database_size': { + 'options': [None, 'Database size', 'MB', 'database size', 'postgres.db_size', 'stacked'], + 'lines': [ + ] + }, + 'backend_process': { + 'options': [None, 'Current Backend Processes', 'processes', 'backend processes', 'postgres.backend_process', + 'line'], + 'lines': [ + ['backends_active', 'active', 'absolute'], + ['backends_idle', 'idle', 'absolute'] + ] + }, + 'index_count': { + 'options': [None, 'Total indexes', 'index', 'indexes', 'postgres.index_count', 'line'], + 'lines': [ + ['index_count', 'total', 'absolute'] + ] + }, + 'index_size': { + 'options': [None, 'Indexes size', 'MB', 'indexes', 'postgres.index_size', 'line'], + 'lines': [ + ['index_size', 'size', 'absolute', 1, 1024 * 1024] + ] + }, + 'table_count': { + 'options': [None, 'Total Tables', 'tables', 'tables', 'postgres.table_count', 'line'], + 'lines': [ + ['table_count', 'total', 'absolute'] + ] + }, + 'table_size': { + 'options': [None, 'Tables size', 'MB', 'tables', 'postgres.table_size', 'line'], + 'lines': [ + ['table_size', 'size', 'absolute', 1, 1024 * 1024] + ] + }, + 'wal': { + 'options': [None, 'Write-Ahead Logs', 'files', 'wal', 'postgres.wal', 'line'], + 'lines': [ + ['written_wal', 'written', 'absolute'], + ['recycled_wal', 'recycled', 'absolute'], + ['total_wal', 'total', 'absolute'] + ] + }, + 'wal_writes': { + 'options': [None, 'Write-Ahead Logs', 'kilobytes/s', 'wal_writes', 'postgres.wal_writes', 'line'], + 'lines': [ + ['wal_writes', 'writes', 'incremental', 1, 1024] + ] + }, + 'archive_wal': { + 'options': [None, 'Archive Write-Ahead Logs', 'files/s', 'archive wal', 'postgres.archive_wal', 'line'], + 'lines': [ + ['file_count', 'total', 'incremental'], + ['ready_count', 'ready', 'incremental'], + ['done_count', 'done', 'incremental'] + ] + }, + 'checkpointer': { + 'options': [None, 'Checkpoints', 'writes', 'checkpointer', 'postgres.checkpointer', 'line'], + 'lines': [ + ['checkpoint_scheduled', 'scheduled', 'incremental'], + ['checkpoint_requested', 'requested', 'incremental'] + ] + }, + 'stat_bgwriter_alloc': { + 'options': [None, 'Buffers allocated', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_alloc', 'line'], + 'lines': [ + ['buffers_alloc', 'alloc', 'incremental', 1, 1024] + ] + }, + 'stat_bgwriter_checkpoint': { + 'options': [None, 'Buffers written during checkpoints', 'kilobytes/s', 'bgwriter', + 'postgres.stat_bgwriter_checkpoint', 'line'], + 'lines': [ + ['buffers_checkpoint', 'checkpoint', 'incremental', 1, 1024] + ] + }, + 'stat_bgwriter_backend': { + 'options': [None, 'Buffers written directly by a backend', 'kilobytes/s', 'bgwriter', + 'postgres.stat_bgwriter_backend', 'line'], + 'lines': [ + ['buffers_backend', 'backend', 'incremental', 1, 1024] + ] + }, + 'stat_bgwriter_backend_fsync': { + 'options': [None, 'Fsync by backend', 'times', 'bgwriter', 'postgres.stat_bgwriter_backend_fsync', 'line'], + 'lines': [ + ['buffers_backend_fsync', 'backend fsync', 'incremental'] + ] + }, + 'stat_bgwriter_bgwriter': { + 'options': [None, 'Buffers written by the background writer', 'kilobytes/s', 'bgwriter', + 'postgres.bgwriter_bgwriter', 'line'], + 'lines': [ + ['buffers_clean', 'clean', 'incremental', 1, 1024] + ] + }, + 'stat_bgwriter_maxwritten': { + 'options': [None, 'Too many buffers written', 'times', 'bgwriter', 'postgres.stat_bgwriter_maxwritten', + 'line'], + 'lines': [ + ['maxwritten_clean', 'maxwritten', 'incremental'] + ] + }, + 'autovacuum': { + 'options': [None, 'Autovacuum workers', 'workers', 'autovacuum', 'postgres.autovacuum', 'line'], + 'lines': [ + ['analyze', 'analyze', 'absolute'], + ['vacuum', 'vacuum', 'absolute'], + ['vacuum_analyze', 'vacuum analyze', 'absolute'], + ['vacuum_freeze', 'vacuum freeze', 'absolute'], + ['brin_summarize', 'brin summarize', 'absolute'] + ] + }, + 'standby_delta': { + 'options': [None, 'Standby delta', 'kilobytes', 'replication delta', 'postgres.standby_delta', 'line'], + 'lines': [ + ['sent_delta', 'sent delta', 'absolute', 1, 1024], + ['write_delta', 'write delta', 'absolute', 1, 1024], + ['flush_delta', 'flush delta', 'absolute', 1, 1024], + ['replay_delta', 'replay delta', 'absolute', 1, 1024] + ] + }, + 'replication_slot': { + 'options': [None, 'Replication slot files', 'files', 'replication slot', 'postgres.replication_slot', 'line'], + 'lines': [ + ['replslot_wal_keep', 'wal keeped', 'absolute'], + ['replslot_files', 'pg_replslot files', 'absolute'] + ] + } +} + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.order = ORDER[:] + self.definitions = deepcopy(CHARTS) + self.table_stats = configuration.pop('table_stats', False) + self.index_stats = configuration.pop('index_stats', False) + self.database_poll = configuration.pop('database_poll', None) + self.configuration = configuration + self.connection = False + self.server_version = None + self.data = dict() + self.locks_zeroed = dict() + self.databases = list() + self.secondaries = list() + self.replication_slots = list() + self.queries = QUERY_STATS.copy() + + def _connect(self): + params = dict(user='postgres', + database=None, + password=None, + host=None, + port=5432) + params.update(self.configuration) + + if not self.connection: + try: + self.connection = psycopg2.connect(**params) + self.connection.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT) + self.connection.set_session(readonly=True) + except OperationalError as error: + return False, str(error) + return True, True + + def check(self): + if not PSYCOPG2: + self.error('\'python-psycopg2\' module is needed to use postgres.chart.py') + return False + result, error = self._connect() + if not result: + conf = dict((k, (lambda k, v: v if k != 'password' else '*****')(k, v)) + for k, v in self.configuration.items()) + self.error('Failed to connect to %s. Error: %s' % (str(conf), error)) + return False + try: + cursor = self.connection.cursor() + self.databases = discover_databases_(cursor, QUERIES['FIND_DATABASES']) + is_superuser = check_if_superuser_(cursor, QUERIES['IF_SUPERUSER']) + self.secondaries = discover_secondaries_(cursor, QUERIES['FIND_STANDBY']) + self.server_version = detect_server_version(cursor, QUERIES['DETECT_SERVER_VERSION']) + if self.server_version >= 94000: + self.replication_slots = discover_replication_slots_(cursor, QUERIES['FIND_REPLICATION_SLOT']) + cursor.close() + + if self.database_poll and isinstance(self.database_poll, str): + self.databases = [dbase for dbase in self.databases if dbase in self.database_poll.split()] \ + or self.databases + + self.locks_zeroed = populate_lock_types(self.databases) + self.add_additional_queries_(is_superuser) + self.create_dynamic_charts_() + return True + except Exception as error: + self.error(str(error)) + return False + + def add_additional_queries_(self, is_superuser): + + if self.server_version >= 100000: + wal = 'wal' + lsn = 'lsn' + else: + wal = 'xlog' + lsn = 'location' + self.queries[QUERIES['BGWRITER']] = METRICS['BGWRITER'] + self.queries[QUERIES['DIFF_LSN'].format(wal, lsn)] = METRICS['WAL_WRITES'] + self.queries[QUERIES['STANDBY_DELTA'].format(wal, lsn)] = METRICS['STANDBY_DELTA'] + + if self.index_stats: + self.queries[QUERIES['INDEX_STATS']] = METRICS['INDEX_STATS'] + if self.table_stats: + self.queries[QUERIES['TABLE_STATS']] = METRICS['TABLE_STATS'] + if is_superuser: + self.queries[QUERIES['ARCHIVE'].format(wal)] = METRICS['ARCHIVE'] + if self.server_version >= 90400: + self.queries[QUERIES['WAL'].format(wal, lsn)] = METRICS['WAL'] + if self.server_version >= 100000: + self.queries[QUERIES['REPSLOT_FILES']] = METRICS['REPSLOT_FILES'] + if self.server_version >= 90400: + self.queries[QUERIES['AUTOVACUUM']] = METRICS['AUTOVACUUM'] + + def create_dynamic_charts_(self): + + for database_name in self.databases[::-1]: + self.definitions['database_size']['lines'].append( + [database_name + '_size', database_name, 'absolute', 1, 1024 * 1024]) + for chart_name in [name for name in self.order if name.startswith('db_stat')]: + add_database_stat_chart_(order=self.order, definitions=self.definitions, + name=chart_name, database_name=database_name) + + add_database_lock_chart_(order=self.order, definitions=self.definitions, database_name=database_name) + + for application_name in self.secondaries[::-1]: + add_replication_delta_chart_( + order=self.order, + definitions=self.definitions, + name='standby_delta', + application_name=application_name) + + for slot_name in self.replication_slots[::-1]: + add_replication_slot_chart_( + order=self.order, + definitions=self.definitions, + name='replication_slot', + slot_name=slot_name) + + def _get_data(self): + result, _ = self._connect() + if result: + cursor = self.connection.cursor(cursor_factory=DictCursor) + try: + self.data.update(self.locks_zeroed) + for query, metrics in self.queries.items(): + self.query_stats_(cursor, query, metrics) + + except OperationalError: + self.connection = False + cursor.close() + return None + else: + cursor.close() + return self.data + else: + return None + + def query_stats_(self, cursor, query, metrics): + cursor.execute(query, dict(databases=tuple(self.databases))) + for row in cursor: + for metric in metrics: + if 'database_name' in row: + dimension_id = '_'.join([row['database_name'], metric]) + elif 'application_name' in row: + dimension_id = '_'.join([row['application_name'], metric]) + elif 'slot_name' in row: + dimension_id = '_'.join([row['slot_name'], metric]) + else: + dimension_id = metric + if metric in row: + if row[metric] is not None: + self.data[dimension_id] = int(row[metric]) + elif 'locks_count' in row: + self.data[dimension_id] = row['locks_count'] if metric == row['mode'] else 0 + + +def discover_databases_(cursor, query): + cursor.execute(query) + result = list() + for db in [database[0] for database in cursor]: + if db not in result: + result.append(db) + return result + + +def discover_secondaries_(cursor, query): + cursor.execute(query) + result = list() + for sc in [standby[0] for standby in cursor]: + if sc not in result: + result.append(sc) + return result + + +def discover_replication_slots_(cursor, query): + cursor.execute(query) + result = list() + for slot in [replication_slot[0] for replication_slot in cursor]: + if slot not in result: + result.append(slot) + return result + + +def check_if_superuser_(cursor, query): + cursor.execute(query) + return cursor.fetchone()[0] + + +def detect_server_version(cursor, query): + cursor.execute(query) + return int(cursor.fetchone()[0]) + + +def populate_lock_types(databases): + result = dict() + for database in databases: + for lock_type in METRICS['LOCKS']: + key = '_'.join([database, lock_type]) + result[key] = 0 + + return result + + +def add_database_lock_chart_(order, definitions, database_name): + def create_lines(database): + result = list() + for lock_type in METRICS['LOCKS']: + dimension_id = '_'.join([database, lock_type]) + result.append([dimension_id, lock_type, 'absolute']) + return result + + chart_name = database_name + '_locks' + order.insert(-1, chart_name) + definitions[chart_name] = { + 'options': + [None, 'Locks on db: ' + database_name, 'locks', 'db ' + database_name, 'postgres.db_locks', 'line'], + 'lines': create_lines(database_name) + } + + +def add_database_stat_chart_(order, definitions, name, database_name): + def create_lines(database, lines): + result = list() + for line in lines: + new_line = ['_'.join([database, line[0]])] + line[1:] + result.append(new_line) + return result + + chart_template = CHARTS[name] + chart_name = '_'.join([database_name, name]) + order.insert(0, chart_name) + name, title, units, _, context, chart_type = chart_template['options'] + definitions[chart_name] = { + 'options': [name, title + ': ' + database_name, units, 'db ' + database_name, context, chart_type], + 'lines': create_lines(database_name, chart_template['lines'])} + + +def add_replication_delta_chart_(order, definitions, name, application_name): + def create_lines(standby, lines): + result = list() + for line in lines: + new_line = ['_'.join([standby, line[0]])] + line[1:] + result.append(new_line) + return result + + chart_template = CHARTS[name] + chart_name = '_'.join([application_name, name]) + position = order.index('database_size') + order.insert(position, chart_name) + name, title, units, family, context, chart_type = chart_template['options'] + definitions[chart_name] = { + 'options': [name, title + ': ' + application_name, units, 'replication delta', context, chart_type], + 'lines': create_lines(application_name, chart_template['lines'])} + + +def add_replication_slot_chart_(order, definitions, name, slot_name): + def create_lines(slot, lines): + result = list() + for line in lines: + new_line = ['_'.join([slot, line[0]])] + line[1:] + result.append(new_line) + return result + + chart_template = CHARTS[name] + chart_name = '_'.join([slot_name, name]) + position = order.index('database_size') + order.insert(position, chart_name) + name, title, units, family, context, chart_type = chart_template['options'] + definitions[chart_name] = { + 'options': [name, title + ': ' + slot_name, units, 'replication slot files', context, chart_type], + 'lines': create_lines(slot_name, chart_template['lines'])} diff --git a/collectors/python.d.plugin/postgres/postgres.conf b/collectors/python.d.plugin/postgres/postgres.conf new file mode 100644 index 000000000..b69ca3717 --- /dev/null +++ b/collectors/python.d.plugin/postgres/postgres.conf @@ -0,0 +1,124 @@ +# netdata python.d.plugin configuration for postgresql +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# A single connection is required in order to pull statistics. +# +# Connections can be configured with the following options: +# +# database : 'example_db_name' +# user : 'example_user' +# password : 'example_pass' +# host : 'localhost' +# port : 5432 +# +# Additionally, the following options allow selective disabling of charts +# +# table_stats : false +# index_stats : false +# database_poll : 'dbase_name1 dbase_name2' # poll only specified databases (all other will be excluded from charts) +# +# Postgres permissions are configured at its pg_hba.conf file. You can +# "trust" local clients to allow netdata to connect, or you can create +# a postgres user for netdata and add its password below to allow +# netdata connect. +# +# Postgres supported versions are : +# - 9.3 (without autovacuum) +# - 9.4 +# - 9.5 +# - 9.6 +# - 10 +# +# Superuser access is needed for theses charts: +# Write-Ahead Logs +# Archive Write-Ahead Logs +# +# Autovacuum charts is allowed since Postgres 9.4 +# ---------------------------------------------------------------------- + +socket: + name : 'local' + user : 'postgres' + database : 'postgres' + +tcp: + name : 'local' + database : 'postgres' + user : 'postgres' + host : 'localhost' + port : 5432 + +tcpipv4: + name : 'local' + database : 'postgres' + user : 'postgres' + host : '127.0.0.1' + port : 5432 + +tcpipv6: + name : 'local' + database : 'postgres' + user : 'postgres' + host : '::1' + port : 5432 + diff --git a/collectors/python.d.plugin/powerdns/Makefile.inc b/collectors/python.d.plugin/powerdns/Makefile.inc new file mode 100644 index 000000000..256d32a40 --- /dev/null +++ b/collectors/python.d.plugin/powerdns/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += powerdns/powerdns.chart.py +dist_pythonconfig_DATA += powerdns/powerdns.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += powerdns/README.md powerdns/Makefile.inc + diff --git a/collectors/python.d.plugin/powerdns/README.md b/collectors/python.d.plugin/powerdns/README.md new file mode 100644 index 000000000..3c4b145e0 --- /dev/null +++ b/collectors/python.d.plugin/powerdns/README.md @@ -0,0 +1,77 @@ +# powerdns + +Module monitor powerdns performance and health metrics. + +Powerdns charts: + +1. **Queries and Answers** + * udp-queries + * udp-answers + * tcp-queries + * tcp-answers + +2. **Cache Usage** + * query-cache-hit + * query-cache-miss + * packetcache-hit + * packetcache-miss + +3. **Cache Size** + * query-cache-size + * packetcache-size + * key-cache-size + * meta-cache-size + +4. **Latency** + * latency + + Powerdns Recursor charts: + + 1. **Questions In** + * questions + * ipv6-questions + * tcp-queries + +2. **Questions Out** + * all-outqueries + * ipv6-outqueries + * tcp-outqueries + * throttled-outqueries + +3. **Answer Times** + * answers-slow + * answers0-1 + * answers1-10 + * answers10-100 + * answers100-1000 + +4. **Timeouts** + * outgoing-timeouts + * outgoing4-timeouts + * outgoing6-timeouts + +5. **Drops** + * over-capacity-drops + +6. **Cache Usage** + * cache-hits + * cache-misses + * packetcache-hits + * packetcache-misses + +7. **Cache Size** + * cache-entries + * packetcache-entries + * negcache-entries + +### configuration + +```yaml +local: + name : 'local' + url : 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics' + header : + X-API-Key: 'change_me' +``` + +--- diff --git a/collectors/python.d.plugin/powerdns/powerdns.chart.py b/collectors/python.d.plugin/powerdns/powerdns.chart.py new file mode 100644 index 000000000..4264621b2 --- /dev/null +++ b/collectors/python.d.plugin/powerdns/powerdns.chart.py @@ -0,0 +1,150 @@ +# -*- coding: utf-8 -*- +# Description: powerdns netdata python.d module +# Author: Ilya Mashchenko (l2isbad) +# Author: Luke Whitworth +# SPDX-License-Identifier: GPL-3.0-or-later + +from json import loads + +from bases.FrameworkServices.UrlService import UrlService + +priority = 60000 +retries = 60 +# update_every = 3 + +ORDER = ['questions', 'cache_usage', 'cache_size', 'latency'] +CHARTS = { + 'questions': { + 'options': [None, 'PowerDNS Queries and Answers', 'count', 'questions', 'powerdns.questions', 'line'], + 'lines': [ + ['udp-queries', None, 'incremental'], + ['udp-answers', None, 'incremental'], + ['tcp-queries', None, 'incremental'], + ['tcp-answers', None, 'incremental'] + ] + }, + 'cache_usage': { + 'options': [None, 'PowerDNS Cache Usage', 'count', 'cache', 'powerdns.cache_usage', 'line'], + 'lines': [ + ['query-cache-hit', None, 'incremental'], + ['query-cache-miss', None, 'incremental'], + ['packetcache-hit', 'packet-cache-hit', 'incremental'], + ['packetcache-miss', 'packet-cache-miss', 'incremental'] + ] + }, + 'cache_size': { + 'options': [None, 'PowerDNS Cache Size', 'count', 'cache', 'powerdns.cache_size', 'line'], + 'lines': [ + ['query-cache-size', None, 'absolute'], + ['packetcache-size', 'packet-cache-size', 'absolute'], + ['key-cache-size', None, 'absolute'], + ['meta-cache-size', None, 'absolute'] + ] + }, + 'latency': { + 'options': [None, 'PowerDNS Latency', 'microseconds', 'latency', 'powerdns.latency', 'line'], + 'lines': [ + ['latency', None, 'absolute'] + ] + } +} + +RECURSOR_ORDER = ['questions-in', 'questions-out', 'answer-times', 'timeouts', 'drops', 'cache_usage', 'cache_size'] + +RECURSOR_CHARTS = { + 'questions-in': { + 'options': [None, 'PowerDNS Recursor Questions In', 'count', 'questions', 'powerdns_recursor.questions-in', + 'line'], + 'lines': [ + ['questions', None, 'incremental'], + ['ipv6-questions', None, 'incremental'], + ['tcp-questions', None, 'incremental'] + ] + }, + 'questions-out': { + 'options': [None, 'PowerDNS Recursor Questions Out', 'count', 'questions', 'powerdns_recursor.questions-out', + 'line'], + 'lines': [ + ['all-outqueries', None, 'incremental'], + ['ipv6-outqueries', None, 'incremental'], + ['tcp-outqueries', None, 'incremental'], + ['throttled-outqueries', None, 'incremental'] + ] + }, + 'answer-times': { + 'options': [None, 'PowerDNS Recursor Answer Times', 'count', 'performance', 'powerdns_recursor.answer-times', + 'line'], + 'lines': [ + ['answers-slow', None, 'incremental'], + ['answers0-1', None, 'incremental'], + ['answers1-10', None, 'incremental'], + ['answers10-100', None, 'incremental'], + ['answers100-1000', None, 'incremental'] + ] + }, + 'timeouts': { + 'options': [None, 'PowerDNS Recursor Questions Time', 'count', 'performance', 'powerdns_recursor.timeouts', + 'line'], + 'lines': [ + ['outgoing-timeouts', None, 'incremental'], + ['outgoing4-timeouts', None, 'incremental'], + ['outgoing6-timeouts', None, 'incremental'] + ] + }, + 'drops': { + 'options': [None, 'PowerDNS Recursor Drops', 'count', 'performance', 'powerdns_recursor.drops', 'line'], + 'lines': [ + ['over-capacity-drops', None, 'incremental'] + ] + }, + 'cache_usage': { + 'options': [None, 'PowerDNS Recursor Cache Usage', 'count', 'cache', 'powerdns_recursor.cache_usage', 'line'], + 'lines': [ + ['cache-hits', None, 'incremental'], + ['cache-misses', None, 'incremental'], + ['packetcache-hits', 'packet-cache-hit', 'incremental'], + ['packetcache-misses', 'packet-cache-miss', 'incremental'] + ] + }, + 'cache_size': { + 'options': [None, 'PowerDNS Recursor Cache Size', 'count', 'cache', 'powerdns_recursor.cache_size', 'line'], + 'lines': [ + ['cache-entries', None, 'absolute'], + ['packetcache-entries', None, 'absolute'], + ['negcache-entries', None, 'absolute'] + ] + } +} + + +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + + def check(self): + self._manager = self._build_manager() + if not self._manager: + return None + + d = self._get_data() + if not d: + return False + + if is_recursor(d): + self.order = RECURSOR_ORDER + self.definitions = RECURSOR_CHARTS + self.module_name = 'powerdns_recursor' + + return True + + def _get_data(self): + data = self._get_raw_data() + if not data: + return None + return dict((d['name'], d['value']) for d in loads(data)) + + +def is_recursor(d): + return 'over-capacity-drops' in d and 'tcp-questions' in d diff --git a/collectors/python.d.plugin/powerdns/powerdns.conf b/collectors/python.d.plugin/powerdns/powerdns.conf new file mode 100644 index 000000000..ca6200df1 --- /dev/null +++ b/collectors/python.d.plugin/powerdns/powerdns.conf @@ -0,0 +1,78 @@ +# netdata python.d.plugin configuration for powerdns +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, apache also supports the following: +# +# url: 'URL' # the URL to fetch powerdns performance statistics +# header: +# X-API-Key: 'Key' # API key +# +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +# localhost: +# name : 'local' +# url : 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics' +# header: +# X-API-Key: 'change_me' diff --git a/collectors/python.d.plugin/proxysql/Makefile.inc b/collectors/python.d.plugin/proxysql/Makefile.inc new file mode 100644 index 000000000..66be372ce --- /dev/null +++ b/collectors/python.d.plugin/proxysql/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += proxysql/proxysql.chart.py +dist_pythonconfig_DATA += proxysql/proxysql.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += proxysql/README.md proxysql/Makefile.inc + diff --git a/collectors/python.d.plugin/proxysql/README.md b/collectors/python.d.plugin/proxysql/README.md new file mode 100644 index 000000000..02388276e --- /dev/null +++ b/collectors/python.d.plugin/proxysql/README.md @@ -0,0 +1,62 @@ +# proxysql + +This module monitors proxysql backend and frontend performance metrics. + +It produces: + +1. **Connections (frontend)** + * connected: number of frontend connections currently connected + * aborted: number of frontend connections aborted due to invalid credential or max_connections reached + * non_idle: number of frontend connections that are not currently idle + * created: number of frontend connections created +2. **Questions (frontend)** + * questions: total number of queries sent from frontends + * slow_queries: number of queries that ran for longer than the threshold in milliseconds defined in global variable `mysql-long_query_time` +3. **Overall Bandwith (backends)** + * in + * out +4. **Status (backends)** + * Backends + * `1=ONLINE`: backend server is fully operational + * `2=SHUNNED`: backend sever is temporarily taken out of use because of either too many connection errors in a time that was too short, or replication lag exceeded the allowed threshold + * `3=OFFLINE_SOFT`: when a server is put into OFFLINE_SOFT mode, new incoming connections aren't accepted anymore, while the existing connections are kept until they became inactive. In other words, connections are kept in use until the current transaction is completed. This allows to gracefully detach a backend + * `4=OFFLINE_HARD`: when a server is put into OFFLINE_HARD mode, the existing connections are dropped, while new incoming connections aren't accepted either. This is equivalent to deleting the server from a hostgroup, or temporarily taking it out of the hostgroup for maintenance work + * `-1`: Unknown status +5. **Bandwith (backends)** + * Backends + * in + * out +6. **Queries (backends)** + * Backends + * queries +7. **Latency (backends)** + * Backends + * ping time +8. **Pool connections (backends)** + * Backends + * Used: The number of connections are currently used by ProxySQL for sending queries to the backend server. + * Free: The number of connections are currently free. + * Established/OK: The number of connections were established successfully. + * Error: The number of connections weren't established successfully. +9. **Commands** + * Commands + * Count + * Duration (Total duration for each command) +10. **Commands Histogram** + * Commands + * 100us, 500us, ..., 10s, inf: the total number of commands of the given type which executed within the specified time limit and the previous one. + +### configuration + +```yaml +tcpipv4: + name : 'local' + user : 'stats' + pass : 'stats' + host : '127.0.0.1' + port : '6032' +``` + +If no configuration is given, module will fail to run. + +--- diff --git a/collectors/python.d.plugin/proxysql/proxysql.chart.py b/collectors/python.d.plugin/proxysql/proxysql.chart.py new file mode 100644 index 000000000..f7e3d49f9 --- /dev/null +++ b/collectors/python.d.plugin/proxysql/proxysql.chart.py @@ -0,0 +1,356 @@ +# -*- coding: utf-8 -*- +# Description: Proxysql netdata python.d module +# Author: Ali Borhani (alibo) +# SPDX-License-Identifier: GPL-3.0+ + +from bases.FrameworkServices.MySQLService import MySQLService + +# default module values (can be overridden per job in `config`) +# update_every = 3 +priority = 60000 +retries = 60 + + +def query(table, *params): + return 'SELECT {params} FROM {table}'.format(table=table, params=', '.join(params)) + + +# https://github.com/sysown/proxysql/blob/master/doc/admin_tables.md#stats_mysql_global +QUERY_GLOBAL = query( + "stats_mysql_global", + "Variable_Name", + "Variable_Value" +) + +# https://github.com/sysown/proxysql/blob/master/doc/admin_tables.md#stats_mysql_connection_pool +QUERY_CONNECTION_POOL = query( + "stats_mysql_connection_pool", + "hostgroup", + "srv_host", + "srv_port", + "status", + "ConnUsed", + "ConnFree", + "ConnOK", + "ConnERR", + "Queries", + "Bytes_data_sent", + "Bytes_data_recv", + "Latency_us" +) + +# https://github.com/sysown/proxysql/blob/master/doc/admin_tables.md#stats_mysql_commands_counters +QUERY_COMMANDS = query( + "stats_mysql_commands_counters", + "Command", + "Total_Time_us", + "Total_cnt", + "cnt_100us", + "cnt_500us", + "cnt_1ms", + "cnt_5ms", + "cnt_10ms", + "cnt_50ms", + "cnt_100ms", + "cnt_500ms", + "cnt_1s", + "cnt_5s", + "cnt_10s", + "cnt_INFs" +) + +GLOBAL_STATS = [ + 'client_connections_aborted', + 'client_connections_connected', + 'client_connections_created', + 'client_connections_non_idle', + 'proxysql_uptime', + 'questions', + 'slow_queries' +] + +CONNECTION_POOL_STATS = [ + 'status', + 'connused', + 'connfree', + 'connok', + 'connerr', + 'queries', + 'bytes_data_sent', + 'bytes_data_recv', + 'latency_us' +] + +ORDER = [ + 'connections', + 'active_transactions', + 'questions', + 'pool_overall_net', + 'commands_count', + 'commands_duration', + 'pool_status', + 'pool_net', + 'pool_queries', + 'pool_latency', + 'pool_connection_used', + 'pool_connection_free', + 'pool_connection_ok', + 'pool_connection_error' +] + +HISTOGRAM_ORDER = [ + '100us', + '500us', + '1ms', + '5ms', + '10ms', + '50ms', + '100ms', + '500ms', + '1s', + '5s', + '10s', + 'inf' +] + +STATUS = { + "ONLINE": 1, + "SHUNNED": 2, + "OFFLINE_SOFT": 3, + "OFFLINE_HARD": 4 +} + +CHARTS = { + 'pool_status': { + 'options': [None, 'ProxySQL Backend Status', 'status', 'status', 'proxysql.pool_status', 'line'], + 'lines': [] + }, + 'pool_net': { + 'options': [None, 'ProxySQL Backend Bandwidth', 'kilobits/s', 'bandwidth', 'proxysql.pool_net', 'area'], + 'lines': [] + }, + 'pool_overall_net': { + 'options': [None, 'ProxySQL Backend Overall Bandwidth', 'kilobits/s', 'overall_bandwidth', + 'proxysql.pool_overall_net', 'area'], + 'lines': [ + ['bytes_data_recv', 'in', 'incremental', 8, 1024], + ['bytes_data_sent', 'out', 'incremental', -8, 1024] + ] + }, + 'questions': { + 'options': [None, 'ProxySQL Frontend Questions', 'questions/s', 'questions', 'proxysql.questions', 'line'], + 'lines': [ + ['questions', 'questions', 'incremental'], + ['slow_queries', 'slow_queries', 'incremental'] + ] + }, + 'pool_queries': { + 'options': [None, 'ProxySQL Backend Queries', 'queries/s', 'queries', 'proxysql.queries', 'line'], + 'lines': [] + }, + 'active_transactions': { + 'options': [None, 'ProxySQL Frontend Active Transactions', 'transactions/s', 'active_transactions', + 'proxysql.active_transactions', 'line'], + 'lines': [ + ['active_transactions', 'active_transactions', 'absolute'] + ] + }, + 'pool_latency': { + 'options': [None, 'ProxySQL Backend Latency', 'ms', 'latency', 'proxysql.latency', 'line'], + 'lines': [] + }, + 'connections': { + 'options': [None, 'ProxySQL Frontend Connections', 'connections/s', 'connections', 'proxysql.connections', + 'line'], + 'lines': [ + ['client_connections_connected', 'connected', 'absolute'], + ['client_connections_created', 'created', 'incremental'], + ['client_connections_aborted', 'aborted', 'incremental'], + ['client_connections_non_idle', 'non_idle', 'absolute'] + ] + }, + 'pool_connection_used': { + 'options': [None, 'ProxySQL Used Connections', 'connections', 'pool_connections', + 'proxysql.pool_used_connections', 'line'], + 'lines': [] + }, + 'pool_connection_free': { + 'options': [None, 'ProxySQL Free Connections', 'connections', 'pool_connections', + 'proxysql.pool_free_connections', 'line'], + 'lines': [] + }, + 'pool_connection_ok': { + 'options': [None, 'ProxySQL Established Connections', 'connections', 'pool_connections', + 'proxysql.pool_ok_connections', 'line'], + 'lines': [] + }, + 'pool_connection_error': { + 'options': [None, 'ProxySQL Error Connections', 'connections', 'pool_connections', + 'proxysql.pool_error_connections', 'line'], + 'lines': [] + }, + 'commands_count': { + 'options': [None, 'ProxySQL Commands', 'commands', 'commands', 'proxysql.commands_count', 'line'], + 'lines': [] + }, + 'commands_duration': { + 'options': [None, 'ProxySQL Commands Duration', 'ms', 'commands', 'proxysql.commands_duration', 'line'], + 'lines': [] + } +} + + +class Service(MySQLService): + def __init__(self, configuration=None, name=None): + MySQLService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + self.queries = dict( + global_status=QUERY_GLOBAL, + connection_pool_status=QUERY_CONNECTION_POOL, + commands_status=QUERY_COMMANDS + ) + + def _get_data(self): + raw_data = self._get_raw_data(description=True) + + if not raw_data: + return None + + to_netdata = dict() + + if 'global_status' in raw_data: + global_status = dict(raw_data['global_status'][0]) + for key in global_status: + if key.lower() in GLOBAL_STATS: + to_netdata[key.lower()] = global_status[key] + + if 'connection_pool_status' in raw_data: + + to_netdata['bytes_data_recv'] = 0 + to_netdata['bytes_data_sent'] = 0 + + for record in raw_data['connection_pool_status'][0]: + backend = self.generate_backend(record) + name = self.generate_backend_name(backend) + + for key in backend: + if key in CONNECTION_POOL_STATS: + if key == 'status': + backend[key] = self.convert_status(backend[key]) + + if len(self.charts) > 0: + if (name + '_status') not in self.charts['pool_status']: + self.add_backend_dimensions(name) + + to_netdata["{0}_{1}".format(name, key)] = backend[key] + + if key == 'bytes_data_recv': + to_netdata['bytes_data_recv'] += int(backend[key]) + + if key == 'bytes_data_sent': + to_netdata['bytes_data_sent'] += int(backend[key]) + + if 'commands_status' in raw_data: + for record in raw_data['commands_status'][0]: + cmd = self.generate_command_stats(record) + name = cmd['name'] + + if len(self.charts) > 0: + if (name + '_count') not in self.charts['commands_count']: + self.add_command_dimensions(name) + self.add_histogram_chart(cmd) + + to_netdata[name + '_count'] = cmd['count'] + to_netdata[name + '_duration'] = cmd['duration'] + for histogram in cmd['histogram']: + dimId = 'commands_histogram_{0}_{1}'.format(name, histogram) + to_netdata[dimId] = cmd['histogram'][histogram] + + return to_netdata or None + + def add_backend_dimensions(self, name): + self.charts['pool_status'].add_dimension([name + '_status', name, 'absolute']) + self.charts['pool_net'].add_dimension([name + '_bytes_data_recv', 'from_' + name, 'incremental', 8, 1024]) + self.charts['pool_net'].add_dimension([name + '_bytes_data_sent', 'to_' + name, 'incremental', -8, 1024]) + self.charts['pool_queries'].add_dimension([name + '_queries', name, 'incremental']) + self.charts['pool_latency'].add_dimension([name + '_latency_us', name, 'absolute', 1, 1000]) + self.charts['pool_connection_used'].add_dimension([name + '_connused', name, 'absolute']) + self.charts['pool_connection_free'].add_dimension([name + '_connfree', name, 'absolute']) + self.charts['pool_connection_ok'].add_dimension([name + '_connok', name, 'incremental']) + self.charts['pool_connection_error'].add_dimension([name + '_connerr', name, 'incremental']) + + def add_command_dimensions(self, cmd): + self.charts['commands_count'].add_dimension([cmd + '_count', cmd, 'incremental']) + self.charts['commands_duration'].add_dimension([cmd + '_duration', cmd, 'incremental', 1, 1000]) + + def add_histogram_chart(self, cmd): + chart = self.charts.add_chart(self.histogram_chart(cmd)) + + for histogram in HISTOGRAM_ORDER: + dimId = 'commands_histogram_{0}_{1}'.format(cmd['name'], histogram) + chart.add_dimension([dimId, histogram, 'incremental']) + + @staticmethod + def histogram_chart(cmd): + return [ + 'commands_historgram_' + cmd['name'], + None, + 'ProxySQL {0} Command Histogram'.format(cmd['name'].title()), + 'commands', + 'commands_histogram', + 'proxysql.commands_histogram_' + cmd['name'], + 'stacked' + ] + + @staticmethod + def generate_backend(data): + return { + 'hostgroup': data[0], + 'srv_host': data[1], + 'srv_port': data[2], + 'status': data[3], + 'connused': data[4], + 'connfree': data[5], + 'connok': data[6], + 'connerr': data[7], + 'queries': data[8], + 'bytes_data_sent': data[9], + 'bytes_data_recv': data[10], + 'latency_us': data[11] + } + + @staticmethod + def generate_command_stats(data): + return { + 'name': data[0].lower(), + 'duration': data[1], + 'count': data[2], + 'histogram': { + '100us': data[3], + '500us': data[4], + '1ms': data[5], + '5ms': data[6], + '10ms': data[7], + '50ms': data[8], + '100ms': data[9], + '500ms': data[10], + '1s': data[11], + '5s': data[12], + '10s': data[13], + 'inf': data[14] + } + } + + @staticmethod + def generate_backend_name(backend): + hostgroup = backend['hostgroup'].replace(' ', '_').lower() + host = backend['srv_host'].replace('.', '_') + + return "{0}_{1}_{2}".format(hostgroup, host, backend['srv_port']) + + @staticmethod + def convert_status(status): + if status in STATUS: + return STATUS[status] + return -1 diff --git a/collectors/python.d.plugin/proxysql/proxysql.conf b/collectors/python.d.plugin/proxysql/proxysql.conf new file mode 100644 index 000000000..d29c2e5be --- /dev/null +++ b/collectors/python.d.plugin/proxysql/proxysql.conf @@ -0,0 +1,118 @@ +# netdata python.d.plugin configuration for ProxySQL +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, proxysql also supports the following: +# +# host: 'IP or HOSTNAME' # the host to connect to +# port: PORT # the port to connect to +# +# in all cases, the following can also be set: +# +# user: 'username' # the proxysql username to use +# pass: 'password' # the proxysql password to use +# + +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +tcp: + name : 'local' + user : 'stats' + pass : 'stats' + host : 'localhost' + port : '6032' + +tcpipv4: + name : 'local' + user : 'stats' + pass : 'stats' + host : '127.0.0.1' + port : '6032' + +tcpipv6: + name : 'local' + user : 'stats' + pass : 'stats' + host : '::1' + port : '6032' + +tcp_admin: + name : 'local' + user : 'admin' + pass : 'admin' + host : 'localhost' + port : '6032' + +tcpipv4_admin: + name : 'local' + user : 'admin' + pass : 'admin' + host : '127.0.0.1' + port : '6032' + +tcpipv6_admin: + name : 'local' + user : 'admin' + pass : 'admin' + host : '::1' + port : '6032' diff --git a/collectors/python.d.plugin/puppet/Makefile.inc b/collectors/python.d.plugin/puppet/Makefile.inc new file mode 100644 index 000000000..fe94b9254 --- /dev/null +++ b/collectors/python.d.plugin/puppet/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += puppet/puppet.chart.py +dist_pythonconfig_DATA += puppet/puppet.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += puppet/README.md puppet/Makefile.inc + diff --git a/collectors/python.d.plugin/puppet/README.md b/collectors/python.d.plugin/puppet/README.md new file mode 100644 index 000000000..8304c831e --- /dev/null +++ b/collectors/python.d.plugin/puppet/README.md @@ -0,0 +1,48 @@ +# puppet + +Monitor status of Puppet Server and Puppet DB. + +Following charts are drawn: + +1. **JVM Heap** + * committed (allocated from OS) + * used (actual use) +2. **JVM Non-Heap** + * committed (allocated from OS) + * used (actual use) +3. **CPU Usage** + * execution + * GC (taken by garbage collection) +4. **File Descriptors** + * max + * used + + +### configuration + +```yaml +puppetdb: + url: 'https://fqdn.example.com:8081' + tls_cert_file: /path/to/client.crt + tls_key_file: /path/to/client.key + autodetection_retry: 1 + retries: 3600 + +puppetserver: + url: 'https://fqdn.example.com:8140' + autodetection_retry: 1 + retries: 3600 +``` + +When no configuration is given then `https://fqdn.example.com:8140` is +tried without any retries. + +### notes + +* Exact Fully Qualified Domain Name of the node should be used. +* Usually Puppet Server/DB startup time is VERY long. So, there should + be quite reasonable retry count. +* Secure PuppetDB config may require client certificate. Not applies + to default PuppetDB configuration though. + +--- diff --git a/collectors/python.d.plugin/puppet/puppet.chart.py b/collectors/python.d.plugin/puppet/puppet.chart.py new file mode 100644 index 000000000..5c8e48bd9 --- /dev/null +++ b/collectors/python.d.plugin/puppet/puppet.chart.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- +# Description: puppet netdata python.d module +# Author: Andrey Galkin <andrey@futoin.org> (andvgal) +# SPDX-License-Identifier: GPL-3.0-or-later +# +# This module should work both with OpenSource and PE versions +# of PuppetServer and PuppetDB. +# +# NOTE: PuppetDB may be configured to require proper TLS +# client certificate for security reasons. Use tls_key_file +# and tls_cert_file options then. +# + +from bases.FrameworkServices.UrlService import UrlService +from json import loads +import socket + +update_every = 5 +priority = 60000 +# very long clojure-based service startup time +retries = 180 + +MB = 1048576 +CPU_SCALE = 1000 +ORDER = [ + 'jvm_heap', + 'jvm_nonheap', + 'cpu', + 'fd_open', +] +CHARTS = { + 'jvm_heap': { + 'options': [None, 'JVM Heap', 'MB', 'resources', 'puppet.jvm', 'area'], + 'lines': [ + ['jvm_heap_committed', 'committed', 'absolute', 1, MB], + ['jvm_heap_used', 'used', 'absolute', 1, MB], + ], + 'variables': [ + ['jvm_heap_max'], + ['jvm_heap_init'], + ], + }, + 'jvm_nonheap': { + 'options': [None, 'JVM Non-Heap', 'MB', 'resources', 'puppet.jvm', 'area'], + 'lines': [ + ['jvm_nonheap_committed', 'committed', 'absolute', 1, MB], + ['jvm_nonheap_used', 'used', 'absolute', 1, MB], + ], + 'variables': [ + ['jvm_nonheap_max'], + ['jvm_nonheap_init'], + ], + }, + 'cpu': { + 'options': [None, 'CPU usage', 'percentage', 'resources', 'puppet.cpu', 'stacked'], + 'lines': [ + ['cpu_time', 'execution', 'absolute', 1, CPU_SCALE], + ['gc_time', 'GC', 'absolute', 1, CPU_SCALE], + ] + }, + 'fd_open': { + 'options': [None, 'File Descriptors', 'descriptors', 'resources', 'puppet.fdopen', 'line'], + 'lines': [ + ['fd_used', 'used', 'absolute'], + ], + 'variables': [ + ['fd_max'], + ], + }, +} + + +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + self.url = 'https://{0}:8140'.format(socket.getfqdn()) + self.order = ORDER + self.definitions = CHARTS + + def _get_data(self): + # NOTE: there are several ways to retrieve data + # 1. Only PE versions: + # https://puppet.com/docs/pe/2018.1/api_status/status_api_metrics_endpoints.html + # 2. Inidividual Metrics API (JMX): + # https://puppet.com/docs/pe/2018.1/api_status/metrics_api.html + # 3. Extended status at debug level: + # https://puppet.com/docs/pe/2018.1/api_status/status_api_json_endpoints.html + # + # For sake of simplicity and efficiency the status one is used.. + + raw_data = self._get_raw_data(self.url + '/status/v1/services?level=debug') + + if raw_data is None: + return None + + raw_data = loads(raw_data) + data = {} + + try: + try: + jvm_metrics = raw_data['status-service']['status']['experimental']['jvm-metrics'] + except KeyError: + jvm_metrics = raw_data['status-service']['status']['jvm-metrics'] + + heap_mem = jvm_metrics['heap-memory'] + non_heap_mem = jvm_metrics['non-heap-memory'] + + for k in ['max', 'committed', 'used', 'init']: + data['jvm_heap_'+k] = heap_mem[k] + data['jvm_nonheap_'+k] = non_heap_mem[k] + + fd_open = jvm_metrics['file-descriptors'] + data['fd_max'] = fd_open['max'] + data['fd_used'] = fd_open['used'] + + data['cpu_time'] = int(jvm_metrics['cpu-usage'] * CPU_SCALE) + data['gc_time'] = int(jvm_metrics['gc-cpu-usage'] * CPU_SCALE) + except KeyError: + pass + + return data or None diff --git a/collectors/python.d.plugin/puppet/puppet.conf b/collectors/python.d.plugin/puppet/puppet.conf new file mode 100644 index 000000000..991bfabed --- /dev/null +++ b/collectors/python.d.plugin/puppet/puppet.conf @@ -0,0 +1,98 @@ +# netdata python.d.plugin configuration for Puppet Server and Puppet DB +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# These configuration comes from UrlService base: +# url: # HTTP or HTTPS URL +# tls_verify: False # Control HTTPS server certificate verification +# tls_ca_file: # Optional CA (bundle) file to use +# tls_cert_file: # Optional client certificate file +# tls_key_file: # Optional client key file +# +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) +# puppet: +# url: 'https://<FQDN>:8140' +# + +# +# Production configuration should look like below. +# +# NOTE: usually Puppet Server/DB startup time is VERY long. So, there should +# be quite reasonable retry count. +# +# NOTE: secure PuppetDB config may require client certificate. +# Not applies to default PuppetDB configuration though. +# +# puppetdb: +# url: 'https://fqdn.example.com:8081' +# tls_cert_file: /path/to/client.crt +# tls_key_file: /path/to/client.key +# autodetection_retry: 1 +# retries: 3600 +# +# puppetserver: +# url: 'https://fqdn.example.com:8140' +# autodetection_retry: 1 +# retries: 3600 +# diff --git a/collectors/python.d.plugin/python.d.conf b/collectors/python.d.plugin/python.d.conf new file mode 100644 index 000000000..97f4cb8d5 --- /dev/null +++ b/collectors/python.d.plugin/python.d.conf @@ -0,0 +1,97 @@ +# netdata python.d.plugin configuration +# +# This file is in YaML format. +# Generally the format is: +# +# name: value +# + +# Enable / disable the whole python.d.plugin (all its modules) +enabled: yes + +# ---------------------------------------------------------------------- +# Enable / Disable python.d.plugin modules +#default_run: yes +# +# If "default_run" = "yes" the default for all modules is enabled (yes). +# Setting any of these to "no" will disable it. +# +# If "default_run" = "no" the default for all modules is disabled (no). +# Setting any of these to "yes" will enable it. + +# Enable / Disable explicit garbage collection (full collection run). Default is enabled. +gc_run: yes + +# Garbage collection interval in seconds. Default is 300. +gc_interval: 300 + +# apache: yes + +# apache_cache has been replaced by web_log +apache_cache: no +# beanstalk: yes +# bind_rndc: yes +# boinc: yes +# ceph: yes +chrony: no +# couchdb: yes +# cpufreq: yes +# cpuidle: yes +# dns_query_time: yes +# dnsdist: yes +# dovecot: yes +# elasticsearch: yes + +# this is just an example +example: no + +# exim: yes +# fail2ban: yes +# freeradius: yes +go_expvar: no + +# gunicorn_log has been replaced by web_log +gunicorn_log: no +# haproxy: yes +# hddtemp: yes +# icecast: yes +# ipfs: yes +# isc_dhcpd: yes +# linux_power_supply: yes +# litespeed: yes +logind: no +# mdstat: yes +# memcached: yes +# mongodb: yes +# monit: yes +# mysql: yes +# nginx: yes +# nginx_plus: yes + +# nginx_log has been replaced by web_log +nginx_log: no +# nsd: yes +# ntpd: yes +# ovpn_status_log: yes +# phpfpm: yes +# postfix: yes +# postgres: yes +# powerdns: yes +# proxysql: yes +# puppet: yes +# rabbitmq: yes +# redis: yes +# rethinkdbs: yes +# retroshare: yes +# samba: yes +# sensors: yes +# smartd_log: yes +# spigotmc: yes +# springboot: yes +# squid: yes +# tomcat: yes +unbound: no +# uwsgi: yes +# varnish: yes +# w1sensor: yes +# web_log: yes diff --git a/collectors/python.d.plugin/python.d.plugin b/collectors/python.d.plugin/python.d.plugin new file mode 100644 index 000000000..264c3383d --- /dev/null +++ b/collectors/python.d.plugin/python.d.plugin @@ -0,0 +1,427 @@ +#!/usr/bin/env bash +'''':; exec "$(command -v python || command -v python3 || command -v python2 || +echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@" # ''' + +# -*- coding: utf-8 -*- +# Description: +# Author: Pawel Krupa (paulfantom) +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +import gc +import os +import sys +import threading + +from re import sub +from sys import version_info, argv +from time import sleep + +GC_RUN = True +GC_COLLECT_EVERY = 300 + +PY_VERSION = version_info[:2] + +USER_CONFIG_DIR = os.getenv('NETDATA_USER_CONFIG_DIR', '/usr/local/etc/netdata') +STOCK_CONFIG_DIR = os.getenv('NETDATA_STOCK_CONFIG_DIR', '/usr/local/lib/netdata/conf.d') + +PLUGINS_USER_CONFIG_DIR = os.path.join(USER_CONFIG_DIR, 'python.d') +PLUGINS_STOCK_CONFIG_DIR = os.path.join(STOCK_CONFIG_DIR, 'python.d') + + +PLUGINS_DIR = os.path.abspath(os.getenv( + 'NETDATA_PLUGINS_DIR', + os.path.dirname(__file__)) + '/../python.d') + + +PYTHON_MODULES_DIR = os.path.join(PLUGINS_DIR, 'python_modules') + +sys.path.append(PYTHON_MODULES_DIR) + +from bases.loaders import ModuleAndConfigLoader # noqa: E402 +from bases.loggers import PythonDLogger # noqa: E402 +from bases.collection import setdefault_values, run_and_exit # noqa: E402 + +try: + from collections import OrderedDict +except ImportError: + from third_party.ordereddict import OrderedDict + +BASE_CONFIG = {'update_every': os.getenv('NETDATA_UPDATE_EVERY', 1), + 'retries': 60, + 'priority': 60000, + 'autodetection_retry': 0, + 'chart_cleanup': 10, + 'name': str()} + + +MODULE_EXTENSION = '.chart.py' +OBSOLETE_MODULES = ['apache_cache', 'gunicorn_log', 'nginx_log'] + + +def module_ok(m): + return m.endswith(MODULE_EXTENSION) and m[:-len(MODULE_EXTENSION)] not in OBSOLETE_MODULES + + +ALL_MODULES = [m for m in sorted(os.listdir(PLUGINS_DIR)) if module_ok(m)] + + +def parse_cmd(): + debug = 'debug' in argv[1:] + trace = 'trace' in argv[1:] + override_update_every = next((arg for arg in argv[1:] if arg.isdigit() and int(arg) > 1), False) + modules = [''.join([m, MODULE_EXTENSION]) for m in argv[1:] if ''.join([m, MODULE_EXTENSION]) in ALL_MODULES] + return debug, trace, override_update_every, modules or ALL_MODULES + + +def multi_job_check(config): + return next((True for key in config if isinstance(config[key], dict)), False) + + +class RawModule: + def __init__(self, name, path, explicitly_enabled=True): + self.name = name + self.path = path + self.explicitly_enabled = explicitly_enabled + + +class Job(object): + def __init__(self, initialized_job, job_id): + """ + :param initialized_job: instance of <Class Service> + :param job_id: <str> + """ + self.job = initialized_job + self.id = job_id # key in Modules.jobs() + self.module_name = self.job.__module__ # used in Plugin.delete_job() + self.recheck_every = self.job.configuration.pop('autodetection_retry') + self.checked = False # used in Plugin.check_job() + self.created = False # used in Plugin.create_job_charts() + if self.job.update_every < int(OVERRIDE_UPDATE_EVERY): + self.job.update_every = int(OVERRIDE_UPDATE_EVERY) + + def __getattr__(self, item): + return getattr(self.job, item) + + def __repr__(self): + return self.job.__repr__() + + def is_dead(self): + return bool(self.ident) and not self.is_alive() + + def not_launched(self): + return not bool(self.ident) + + def is_autodetect(self): + return self.recheck_every + + +class Module(object): + def __init__(self, service, config): + """ + :param service: <Module> + :param config: <dict> + """ + self.service = service + self.name = service.__name__ + self.config = self.jobs_configurations_builder(config) + self.jobs = OrderedDict() + self.counter = 1 + + self.initialize_jobs() + + def __repr__(self): + return "<Class Module '{name}'>".format(name=self.name) + + def __iter__(self): + return iter(OrderedDict(self.jobs).values()) + + def __getitem__(self, item): + return self.jobs[item] + + def __delitem__(self, key): + del self.jobs[key] + + def __len__(self): + return len(self.jobs) + + def __bool__(self): + return bool(self.jobs) + + def __nonzero__(self): + return self.__bool__() + + def jobs_configurations_builder(self, config): + """ + :param config: <dict> + :return: + """ + counter = 0 + job_base_config = dict() + + for attr in BASE_CONFIG: + job_base_config[attr] = config.pop(attr, getattr(self.service, attr, BASE_CONFIG[attr])) + + if not config: + config = {str(): dict()} + elif not multi_job_check(config): + config = {str(): config} + + for job_name in config: + if not isinstance(config[job_name], dict): + continue + + job_config = setdefault_values(config[job_name], base_dict=job_base_config) + job_name = sub(r'\s+', '_', job_name) + config[job_name]['name'] = sub(r'\s+', '_', config[job_name]['name']) + counter += 1 + job_id = 'job' + str(counter).zfill(3) + + yield job_id, job_name, job_config + + def initialize_jobs(self): + """ + :return: + """ + for job_id, job_name, job_config in self.config: + job_config['job_name'] = job_name + job_config['override_name'] = job_config.pop('name') + + try: + initialized_job = self.service.Service(configuration=job_config) + except Exception as error: + Logger.error("job initialization: '{module_name} {job_name}' " + "=> ['FAILED'] ({error})".format(module_name=self.name, + job_name=job_name, + error=error)) + continue + else: + Logger.debug("job initialization: '{module_name} {job_name}' " + "=> ['OK']".format(module_name=self.name, + job_name=job_name or self.name)) + self.jobs[job_id] = Job(initialized_job=initialized_job, + job_id=job_id) + del self.config + del self.service + + +class Plugin(object): + def __init__(self): + self.loader = ModuleAndConfigLoader() + self.modules = OrderedDict() + self.sleep_time = 1 + self.runs_counter = 0 + + user_config = os.path.join(USER_CONFIG_DIR, 'python.d.conf') + stock_config = os.path.join(STOCK_CONFIG_DIR, 'python.d.conf') + + Logger.debug("loading '{0}'".format(user_config)) + self.config, error = self.loader.load_config_from_file(user_config) + + if error: + Logger.error("cannot load '{0}': {1}. Will try stock version.".format(user_config, error)) + Logger.debug("loading '{0}'".format(stock_config)) + self.config, error = self.loader.load_config_from_file(stock_config) + if error: + Logger.error("cannot load '{0}': {1}".format(stock_config, error)) + + self.do_gc = self.config.get("gc_run", GC_RUN) + self.gc_interval = self.config.get("gc_interval", GC_COLLECT_EVERY) + + if not self.config.get('enabled', True): + run_and_exit(Logger.info)('DISABLED in configuration file.') + + self.load_and_initialize_modules() + if not self.modules: + run_and_exit(Logger.info)('No modules to run. Exit...') + + def __iter__(self): + return iter(OrderedDict(self.modules).values()) + + @property + def jobs(self): + return (job for mod in self for job in mod) + + @property + def dead_jobs(self): + return (job for job in self.jobs if job.is_dead()) + + @property + def autodetect_jobs(self): + return [job for job in self.jobs if job.not_launched()] + + def enabled_modules(self): + for mod in MODULES_TO_RUN: + mod_name = mod[:-len(MODULE_EXTENSION)] + mod_path = os.path.join(PLUGINS_DIR, mod) + if any( + [ + self.config.get('default_run', True) and self.config.get(mod_name, True), + (not self.config.get('default_run')) and self.config.get(mod_name), + ] + ): + yield RawModule( + name=mod_name, + path=mod_path, + explicitly_enabled=self.config.get(mod_name), + ) + + def load_and_initialize_modules(self): + for mod in self.enabled_modules(): + + # Load module from file ------------------------------------------------------------ + loaded_module, error = self.loader.load_module_from_file(mod.name, mod.path) + log = Logger.error if error else Logger.debug + log("module load source: '{module_name}' => [{status}]".format(status='FAILED' if error else 'OK', + module_name=mod.name)) + if error: + Logger.error("load source error : {0}".format(error)) + continue + + # Load module config from file ------------------------------------------------------ + user_config = os.path.join(PLUGINS_USER_CONFIG_DIR, mod.name + '.conf') + stock_config = os.path.join(PLUGINS_STOCK_CONFIG_DIR, mod.name + '.conf') + + Logger.debug("loading '{0}'".format(user_config)) + loaded_config, error = self.loader.load_config_from_file(user_config) + if error: + Logger.error("cannot load '{0}' : {1}. Will try stock version.".format(user_config, error)) + Logger.debug("loading '{0}'".format(stock_config)) + loaded_config, error = self.loader.load_config_from_file(stock_config) + + if error: + Logger.error("cannot load '{0}': {1}".format(stock_config, error)) + + # Skip disabled modules + if getattr(loaded_module, 'disabled_by_default', False) and not mod.explicitly_enabled: + Logger.info("module '{0}' disabled by default".format(loaded_module.__name__)) + continue + + # Module initialization --------------------------------------------------- + + initialized_module = Module(service=loaded_module, config=loaded_config) + Logger.debug("module status: '{module_name}' => [{status}] " + "(jobs: {jobs_number})".format(status='OK' if initialized_module else 'FAILED', + module_name=initialized_module.name, + jobs_number=len(initialized_module))) + if initialized_module: + self.modules[initialized_module.name] = initialized_module + + @staticmethod + def check_job(job): + """ + :param job: <Job> + :return: + """ + try: + check_ok = bool(job.check()) + except Exception as error: + job.error('check() unhandled exception: {error}'.format(error=error)) + return None + else: + return check_ok + + @staticmethod + def create_job_charts(job): + """ + :param job: <Job> + :return: + """ + try: + create_ok = job.create() + except Exception as error: + job.error('create() unhandled exception: {error}'.format(error=error)) + return False + else: + return create_ok + + def delete_job(self, job): + """ + :param job: <Job> + :return: + """ + del self.modules[job.module_name][job.id] + + def run_check(self): + checked = list() + for job in self.jobs: + if job.name in checked: + job.info('check() => [DROPPED] (already served by another job)') + self.delete_job(job) + continue + ok = self.check_job(job) + if ok: + job.info('check() => [OK]') + checked.append(job.name) + job.checked = True + continue + if not job.is_autodetect() or ok is None: + job.info('check() => [FAILED]') + self.delete_job(job) + else: + job.info('check() => [RECHECK] (autodetection_retry: {0})'.format(job.recheck_every)) + + def run_create(self): + for job in self.jobs: + if not job.checked: + # skip autodetection_retry jobs + continue + ok = self.create_job_charts(job) + if ok: + job.debug('create() => [OK] (charts: {0})'.format(len(job.charts))) + job.created = True + continue + job.error('create() => [FAILED] (charts: {0})'.format(len(job.charts))) + self.delete_job(job) + + def start(self): + self.run_check() + self.run_create() + for job in self.jobs: + if job.created: + job.start() + + while True: + if threading.active_count() <= 1 and not self.autodetect_jobs: + run_and_exit(Logger.info)('FINISHED') + + sleep(self.sleep_time) + self.cleanup() + self.autodetect_retry() + + # FIXME: https://github.com/netdata/netdata/issues/3817 + if self.do_gc and self.runs_counter % self.gc_interval == 0: + v = gc.collect() + Logger.debug("GC full collection run result: {0}".format(v)) + + def cleanup(self): + for job in self.dead_jobs: + self.delete_job(job) + for mod in self: + if not mod: + del self.modules[mod.name] + + def autodetect_retry(self): + self.runs_counter += self.sleep_time + for job in self.autodetect_jobs: + if self.runs_counter % job.recheck_every == 0: + checked = self.check_job(job) + if checked: + created = self.create_job_charts(job) + if not created: + self.delete_job(job) + continue + job.start() + + +if __name__ == '__main__': + DEBUG, TRACE, OVERRIDE_UPDATE_EVERY, MODULES_TO_RUN = parse_cmd() + Logger = PythonDLogger() + if DEBUG: + Logger.logger.severity = 'DEBUG' + if TRACE: + Logger.log_traceback = True + Logger.info('Using python {version}'.format(version=PY_VERSION[0])) + + plugin = Plugin() + plugin.start() diff --git a/collectors/python.d.plugin/python.d.plugin.in b/collectors/python.d.plugin/python.d.plugin.in new file mode 100755 index 000000000..7ac03fd99 --- /dev/null +++ b/collectors/python.d.plugin/python.d.plugin.in @@ -0,0 +1,427 @@ +#!/usr/bin/env bash +'''':; exec "$(command -v python || command -v python3 || command -v python2 || +echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@" # ''' + +# -*- coding: utf-8 -*- +# Description: +# Author: Pawel Krupa (paulfantom) +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +import gc +import os +import sys +import threading + +from re import sub +from sys import version_info, argv +from time import sleep + +GC_RUN = True +GC_COLLECT_EVERY = 300 + +PY_VERSION = version_info[:2] + +USER_CONFIG_DIR = os.getenv('NETDATA_USER_CONFIG_DIR', '@configdir_POST@') +STOCK_CONFIG_DIR = os.getenv('NETDATA_STOCK_CONFIG_DIR', '@libconfigdir_POST@') + +PLUGINS_USER_CONFIG_DIR = os.path.join(USER_CONFIG_DIR, 'python.d') +PLUGINS_STOCK_CONFIG_DIR = os.path.join(STOCK_CONFIG_DIR, 'python.d') + + +PLUGINS_DIR = os.path.abspath(os.getenv( + 'NETDATA_PLUGINS_DIR', + os.path.dirname(__file__)) + '/../python.d') + + +PYTHON_MODULES_DIR = os.path.join(PLUGINS_DIR, 'python_modules') + +sys.path.append(PYTHON_MODULES_DIR) + +from bases.loaders import ModuleAndConfigLoader # noqa: E402 +from bases.loggers import PythonDLogger # noqa: E402 +from bases.collection import setdefault_values, run_and_exit # noqa: E402 + +try: + from collections import OrderedDict +except ImportError: + from third_party.ordereddict import OrderedDict + +BASE_CONFIG = {'update_every': os.getenv('NETDATA_UPDATE_EVERY', 1), + 'retries': 60, + 'priority': 60000, + 'autodetection_retry': 0, + 'chart_cleanup': 10, + 'name': str()} + + +MODULE_EXTENSION = '.chart.py' +OBSOLETE_MODULES = ['apache_cache', 'gunicorn_log', 'nginx_log'] + + +def module_ok(m): + return m.endswith(MODULE_EXTENSION) and m[:-len(MODULE_EXTENSION)] not in OBSOLETE_MODULES + + +ALL_MODULES = [m for m in sorted(os.listdir(PLUGINS_DIR)) if module_ok(m)] + + +def parse_cmd(): + debug = 'debug' in argv[1:] + trace = 'trace' in argv[1:] + override_update_every = next((arg for arg in argv[1:] if arg.isdigit() and int(arg) > 1), False) + modules = [''.join([m, MODULE_EXTENSION]) for m in argv[1:] if ''.join([m, MODULE_EXTENSION]) in ALL_MODULES] + return debug, trace, override_update_every, modules or ALL_MODULES + + +def multi_job_check(config): + return next((True for key in config if isinstance(config[key], dict)), False) + + +class RawModule: + def __init__(self, name, path, explicitly_enabled=True): + self.name = name + self.path = path + self.explicitly_enabled = explicitly_enabled + + +class Job(object): + def __init__(self, initialized_job, job_id): + """ + :param initialized_job: instance of <Class Service> + :param job_id: <str> + """ + self.job = initialized_job + self.id = job_id # key in Modules.jobs() + self.module_name = self.job.__module__ # used in Plugin.delete_job() + self.recheck_every = self.job.configuration.pop('autodetection_retry') + self.checked = False # used in Plugin.check_job() + self.created = False # used in Plugin.create_job_charts() + if self.job.update_every < int(OVERRIDE_UPDATE_EVERY): + self.job.update_every = int(OVERRIDE_UPDATE_EVERY) + + def __getattr__(self, item): + return getattr(self.job, item) + + def __repr__(self): + return self.job.__repr__() + + def is_dead(self): + return bool(self.ident) and not self.is_alive() + + def not_launched(self): + return not bool(self.ident) + + def is_autodetect(self): + return self.recheck_every + + +class Module(object): + def __init__(self, service, config): + """ + :param service: <Module> + :param config: <dict> + """ + self.service = service + self.name = service.__name__ + self.config = self.jobs_configurations_builder(config) + self.jobs = OrderedDict() + self.counter = 1 + + self.initialize_jobs() + + def __repr__(self): + return "<Class Module '{name}'>".format(name=self.name) + + def __iter__(self): + return iter(OrderedDict(self.jobs).values()) + + def __getitem__(self, item): + return self.jobs[item] + + def __delitem__(self, key): + del self.jobs[key] + + def __len__(self): + return len(self.jobs) + + def __bool__(self): + return bool(self.jobs) + + def __nonzero__(self): + return self.__bool__() + + def jobs_configurations_builder(self, config): + """ + :param config: <dict> + :return: + """ + counter = 0 + job_base_config = dict() + + for attr in BASE_CONFIG: + job_base_config[attr] = config.pop(attr, getattr(self.service, attr, BASE_CONFIG[attr])) + + if not config: + config = {str(): dict()} + elif not multi_job_check(config): + config = {str(): config} + + for job_name in config: + if not isinstance(config[job_name], dict): + continue + + job_config = setdefault_values(config[job_name], base_dict=job_base_config) + job_name = sub(r'\s+', '_', job_name) + config[job_name]['name'] = sub(r'\s+', '_', config[job_name]['name']) + counter += 1 + job_id = 'job' + str(counter).zfill(3) + + yield job_id, job_name, job_config + + def initialize_jobs(self): + """ + :return: + """ + for job_id, job_name, job_config in self.config: + job_config['job_name'] = job_name + job_config['override_name'] = job_config.pop('name') + + try: + initialized_job = self.service.Service(configuration=job_config) + except Exception as error: + Logger.error("job initialization: '{module_name} {job_name}' " + "=> ['FAILED'] ({error})".format(module_name=self.name, + job_name=job_name, + error=error)) + continue + else: + Logger.debug("job initialization: '{module_name} {job_name}' " + "=> ['OK']".format(module_name=self.name, + job_name=job_name or self.name)) + self.jobs[job_id] = Job(initialized_job=initialized_job, + job_id=job_id) + del self.config + del self.service + + +class Plugin(object): + def __init__(self): + self.loader = ModuleAndConfigLoader() + self.modules = OrderedDict() + self.sleep_time = 1 + self.runs_counter = 0 + + user_config = os.path.join(USER_CONFIG_DIR, 'python.d.conf') + stock_config = os.path.join(STOCK_CONFIG_DIR, 'python.d.conf') + + Logger.debug("loading '{0}'".format(user_config)) + self.config, error = self.loader.load_config_from_file(user_config) + + if error: + Logger.error("cannot load '{0}': {1}. Will try stock version.".format(user_config, error)) + Logger.debug("loading '{0}'".format(stock_config)) + self.config, error = self.loader.load_config_from_file(stock_config) + if error: + Logger.error("cannot load '{0}': {1}".format(stock_config, error)) + + self.do_gc = self.config.get("gc_run", GC_RUN) + self.gc_interval = self.config.get("gc_interval", GC_COLLECT_EVERY) + + if not self.config.get('enabled', True): + run_and_exit(Logger.info)('DISABLED in configuration file.') + + self.load_and_initialize_modules() + if not self.modules: + run_and_exit(Logger.info)('No modules to run. Exit...') + + def __iter__(self): + return iter(OrderedDict(self.modules).values()) + + @property + def jobs(self): + return (job for mod in self for job in mod) + + @property + def dead_jobs(self): + return (job for job in self.jobs if job.is_dead()) + + @property + def autodetect_jobs(self): + return [job for job in self.jobs if job.not_launched()] + + def enabled_modules(self): + for mod in MODULES_TO_RUN: + mod_name = mod[:-len(MODULE_EXTENSION)] + mod_path = os.path.join(PLUGINS_DIR, mod) + if any( + [ + self.config.get('default_run', True) and self.config.get(mod_name, True), + (not self.config.get('default_run')) and self.config.get(mod_name), + ] + ): + yield RawModule( + name=mod_name, + path=mod_path, + explicitly_enabled=self.config.get(mod_name), + ) + + def load_and_initialize_modules(self): + for mod in self.enabled_modules(): + + # Load module from file ------------------------------------------------------------ + loaded_module, error = self.loader.load_module_from_file(mod.name, mod.path) + log = Logger.error if error else Logger.debug + log("module load source: '{module_name}' => [{status}]".format(status='FAILED' if error else 'OK', + module_name=mod.name)) + if error: + Logger.error("load source error : {0}".format(error)) + continue + + # Load module config from file ------------------------------------------------------ + user_config = os.path.join(PLUGINS_USER_CONFIG_DIR, mod.name + '.conf') + stock_config = os.path.join(PLUGINS_STOCK_CONFIG_DIR, mod.name + '.conf') + + Logger.debug("loading '{0}'".format(user_config)) + loaded_config, error = self.loader.load_config_from_file(user_config) + if error: + Logger.error("cannot load '{0}' : {1}. Will try stock version.".format(user_config, error)) + Logger.debug("loading '{0}'".format(stock_config)) + loaded_config, error = self.loader.load_config_from_file(stock_config) + + if error: + Logger.error("cannot load '{0}': {1}".format(stock_config, error)) + + # Skip disabled modules + if getattr(loaded_module, 'disabled_by_default', False) and not mod.explicitly_enabled: + Logger.info("module '{0}' disabled by default".format(loaded_module.__name__)) + continue + + # Module initialization --------------------------------------------------- + + initialized_module = Module(service=loaded_module, config=loaded_config) + Logger.debug("module status: '{module_name}' => [{status}] " + "(jobs: {jobs_number})".format(status='OK' if initialized_module else 'FAILED', + module_name=initialized_module.name, + jobs_number=len(initialized_module))) + if initialized_module: + self.modules[initialized_module.name] = initialized_module + + @staticmethod + def check_job(job): + """ + :param job: <Job> + :return: + """ + try: + check_ok = bool(job.check()) + except Exception as error: + job.error('check() unhandled exception: {error}'.format(error=error)) + return None + else: + return check_ok + + @staticmethod + def create_job_charts(job): + """ + :param job: <Job> + :return: + """ + try: + create_ok = job.create() + except Exception as error: + job.error('create() unhandled exception: {error}'.format(error=error)) + return False + else: + return create_ok + + def delete_job(self, job): + """ + :param job: <Job> + :return: + """ + del self.modules[job.module_name][job.id] + + def run_check(self): + checked = list() + for job in self.jobs: + if job.name in checked: + job.info('check() => [DROPPED] (already served by another job)') + self.delete_job(job) + continue + ok = self.check_job(job) + if ok: + job.info('check() => [OK]') + checked.append(job.name) + job.checked = True + continue + if not job.is_autodetect() or ok is None: + job.info('check() => [FAILED]') + self.delete_job(job) + else: + job.info('check() => [RECHECK] (autodetection_retry: {0})'.format(job.recheck_every)) + + def run_create(self): + for job in self.jobs: + if not job.checked: + # skip autodetection_retry jobs + continue + ok = self.create_job_charts(job) + if ok: + job.debug('create() => [OK] (charts: {0})'.format(len(job.charts))) + job.created = True + continue + job.error('create() => [FAILED] (charts: {0})'.format(len(job.charts))) + self.delete_job(job) + + def start(self): + self.run_check() + self.run_create() + for job in self.jobs: + if job.created: + job.start() + + while True: + if threading.active_count() <= 1 and not self.autodetect_jobs: + run_and_exit(Logger.info)('FINISHED') + + sleep(self.sleep_time) + self.cleanup() + self.autodetect_retry() + + # FIXME: https://github.com/netdata/netdata/issues/3817 + if self.do_gc and self.runs_counter % self.gc_interval == 0: + v = gc.collect() + Logger.debug("GC full collection run result: {0}".format(v)) + + def cleanup(self): + for job in self.dead_jobs: + self.delete_job(job) + for mod in self: + if not mod: + del self.modules[mod.name] + + def autodetect_retry(self): + self.runs_counter += self.sleep_time + for job in self.autodetect_jobs: + if self.runs_counter % job.recheck_every == 0: + checked = self.check_job(job) + if checked: + created = self.create_job_charts(job) + if not created: + self.delete_job(job) + continue + job.start() + + +if __name__ == '__main__': + DEBUG, TRACE, OVERRIDE_UPDATE_EVERY, MODULES_TO_RUN = parse_cmd() + Logger = PythonDLogger() + if DEBUG: + Logger.logger.severity = 'DEBUG' + if TRACE: + Logger.log_traceback = True + Logger.info('Using python {version}'.format(version=PY_VERSION[0])) + + plugin = Plugin() + plugin.start() diff --git a/collectors/python.d.plugin/python_modules/__init__.py b/collectors/python.d.plugin/python_modules/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py new file mode 100644 index 000000000..72f9ff714 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +# Description: +# Author: Pawel Krupa (paulfantom) +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +import os + +from subprocess import Popen, PIPE + +from bases.FrameworkServices.SimpleService import SimpleService +from bases.collection import find_binary + + +class ExecutableService(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.command = None + + def _get_raw_data(self, stderr=False, command=None): + """ + Get raw data from executed command + :return: <list> + """ + try: + p = Popen(command if command else self.command, stdout=PIPE, stderr=PIPE) + except Exception as error: + self.error('Executing command {command} resulted in error: {error}'.format(command=command or self.command, + error=error)) + return None + data = list() + std = p.stderr if stderr else p.stdout + for line in std: + try: + data.append(line.decode('utf-8')) + except TypeError: + continue + + return data + + def check(self): + """ + Parse basic configuration, check if command is whitelisted and is returning values + :return: <boolean> + """ + # Preference: 1. "command" from configuration file 2. "command" from plugin (if specified) + if 'command' in self.configuration: + self.command = self.configuration['command'] + + # "command" must be: 1.not None 2. type <str> + if not (self.command and isinstance(self.command, str)): + self.error('Command is not defined or command type is not <str>') + return False + + # Split "command" into: 1. command <str> 2. options <list> + command, opts = self.command.split()[0], self.command.split()[1:] + + # Check for "bad" symbols in options. No pipes, redirects etc. + opts_list = ['&', '|', ';', '>', '<'] + bad_opts = set(''.join(opts)) & set(opts_list) + if bad_opts: + self.error("Bad command argument(s): {opts}".format(opts=bad_opts)) + return False + + # Find absolute path ('echo' => '/bin/echo') + if '/' not in command: + command = find_binary(command) + if not command: + self.error('Can\'t locate "{command}" binary'.format(command=self.command)) + return False + # Check if binary exist and executable + else: + if not os.access(command, os.X_OK): + self.error('"{binary}" is not executable'.format(binary=command)) + return False + + self.command = [command] + opts if opts else [command] + + try: + data = self._get_data() + except Exception as error: + self.error('_get_data() failed. Command: {command}. Error: {error}'.format(command=self.command, + error=error)) + return False + + if isinstance(data, dict) and data: + return True + self.error('Command "{command}" returned no data'.format(command=self.command)) + return False diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py new file mode 100644 index 000000000..5acfd73f8 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py @@ -0,0 +1,80 @@ +# -*- coding: utf-8 -*- +# Description: +# Author: Pawel Krupa (paulfantom) +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +from glob import glob +import os + +from bases.FrameworkServices.SimpleService import SimpleService + + +class LogService(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.log_path = self.configuration.get('path') + self.__glob_path = self.log_path + self._last_position = 0 + self.__re_find = dict(current=0, run=0, maximum=60) + + def _get_raw_data(self): + """ + Get log lines since last poll + :return: list + """ + lines = list() + try: + if self.__re_find['current'] == self.__re_find['run']: + self._find_recent_log_file() + size = os.path.getsize(self.log_path) + if size == self._last_position: + self.__re_find['current'] += 1 + return list() # return empty list if nothing has changed + elif size < self._last_position: + self._last_position = 0 # read from beginning if file has shrunk + + with open(self.log_path) as fp: + fp.seek(self._last_position) + for line in fp: + lines.append(line) + self._last_position = fp.tell() + self.__re_find['current'] = 0 + except (OSError, IOError) as error: + self.__re_find['current'] += 1 + self.error(str(error)) + + return lines or None + + def _find_recent_log_file(self): + """ + :return: + """ + self.__re_find['run'] = self.__re_find['maximum'] + self.__re_find['current'] = 0 + self.__glob_path = self.__glob_path or self.log_path # workaround for modules w/o config files + path_list = glob(self.__glob_path) + if path_list: + self.log_path = max(path_list) + return True + return False + + def check(self): + """ + Parse basic configuration and check if log file exists + :return: boolean + """ + if not self.log_path: + self.error('No path to log specified') + return None + + if self._find_recent_log_file() and os.access(self.log_path, os.R_OK) and os.path.isfile(self.log_path): + return True + self.error('Cannot access {0}'.format(self.log_path)) + return False + + def create(self): + # set cursor at last byte of log file + self._last_position = os.path.getsize(self.log_path) + status = SimpleService.create(self) + return status diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py new file mode 100644 index 000000000..53807e2c4 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py @@ -0,0 +1,159 @@ +# -*- coding: utf-8 -*- +# Description: +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +from sys import exc_info + +try: + import MySQLdb + + PY_MYSQL = True +except ImportError: + try: + import pymysql as MySQLdb + + PY_MYSQL = True + except ImportError: + PY_MYSQL = False + +from bases.FrameworkServices.SimpleService import SimpleService + + +class MySQLService(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.__connection = None + self.__conn_properties = dict() + self.extra_conn_properties = dict() + self.__queries = self.configuration.get('queries', dict()) + self.queries = dict() + + def __connect(self): + try: + connection = MySQLdb.connect(connect_timeout=self.update_every, **self.__conn_properties) + except (MySQLdb.MySQLError, TypeError, AttributeError) as error: + return None, str(error) + else: + return connection, None + + def check(self): + def get_connection_properties(conf, extra_conf): + properties = dict() + if conf.get('user'): + properties['user'] = conf['user'] + if conf.get('pass'): + properties['passwd'] = conf['pass'] + if conf.get('socket'): + properties['unix_socket'] = conf['socket'] + elif conf.get('host'): + properties['host'] = conf['host'] + properties['port'] = int(conf.get('port', 3306)) + elif conf.get('my.cnf'): + if MySQLdb.__name__ == 'pymysql': + self.error('"my.cnf" parsing is not working for pymysql') + else: + properties['read_default_file'] = conf['my.cnf'] + if isinstance(extra_conf, dict) and extra_conf: + properties.update(extra_conf) + + return properties or None + + def is_valid_queries_dict(raw_queries, log_error): + """ + :param raw_queries: dict: + :param log_error: function: + :return: dict or None + + raw_queries is valid when: type <dict> and not empty after is_valid_query(for all queries) + """ + + def is_valid_query(query): + return all([isinstance(query, str), + query.startswith(('SELECT', 'select', 'SHOW', 'show'))]) + + if hasattr(raw_queries, 'keys') and raw_queries: + valid_queries = dict([(n, q) for n, q in raw_queries.items() if is_valid_query(q)]) + bad_queries = set(raw_queries) - set(valid_queries) + + if bad_queries: + log_error('Removed query(s): {queries}'.format(queries=bad_queries)) + return valid_queries + else: + log_error('Unsupported "queries" format. Must be not empty <dict>') + return None + + if not PY_MYSQL: + self.error('MySQLdb or PyMySQL module is needed to use mysql.chart.py plugin') + return False + + # Preference: 1. "queries" from the configuration file 2. "queries" from the module + self.queries = self.__queries or self.queries + # Check if "self.queries" exist, not empty and all queries are in valid format + self.queries = is_valid_queries_dict(self.queries, self.error) + if not self.queries: + return None + + # Get connection properties + self.__conn_properties = get_connection_properties(self.configuration, self.extra_conn_properties) + if not self.__conn_properties: + self.error('Connection properties are missing') + return False + + # Create connection to the database + self.__connection, error = self.__connect() + if error: + self.error('Can\'t establish connection to MySQL: {error}'.format(error=error)) + return False + + try: + data = self._get_data() + except Exception as error: + self.error('_get_data() failed. Error: {error}'.format(error=error)) + return False + + if isinstance(data, dict) and data: + return True + self.error("_get_data() returned no data or type is not <dict>") + return False + + def _get_raw_data(self, description=None): + """ + Get raw data from MySQL server + :return: dict: fetchall() or (fetchall(), description) + """ + + if not self.__connection: + self.__connection, error = self.__connect() + if error: + return None + + raw_data = dict() + queries = dict(self.queries) + try: + with self.__connection as cursor: + for name, query in queries.items(): + try: + cursor.execute(query) + except (MySQLdb.ProgrammingError, MySQLdb.OperationalError) as error: + if self.__is_error_critical(err_class=exc_info()[0], err_text=str(error)): + raise RuntimeError + self.error('Removed query: {name}[{query}]. Error: error'.format(name=name, + query=query, + error=error)) + self.queries.pop(name) + continue + else: + raw_data[name] = (cursor.fetchall(), cursor.description) if description else cursor.fetchall() + self.__connection.commit() + except (MySQLdb.MySQLError, RuntimeError, TypeError, AttributeError): + self.__connection.close() + self.__connection = None + return None + else: + return raw_data or None + + @staticmethod + def __is_error_critical(err_class, err_text): + return err_class == MySQLdb.OperationalError and all(['denied' not in err_text, + 'Unknown column' not in err_text]) diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py new file mode 100644 index 000000000..dd53fbc14 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py @@ -0,0 +1,261 @@ +# -*- coding: utf-8 -*- +# Description: +# Author: Pawel Krupa (paulfantom) +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +from threading import Thread +from time import sleep + +from third_party.monotonic import monotonic + +from bases.charts import Charts, ChartError, create_runtime_chart +from bases.collection import OldVersionCompatibility, safe_print +from bases.loggers import PythonDLimitedLogger + +RUNTIME_CHART_UPDATE = 'BEGIN netdata.runtime_{job_name} {since_last}\n' \ + 'SET run_time = {elapsed}\n' \ + 'END\n' + + +class RuntimeCounters: + def __init__(self, configuration): + """ + :param configuration: <dict> + """ + self.FREQ = int(configuration.pop('update_every')) + self.START_RUN = 0 + self.NEXT_RUN = 0 + self.PREV_UPDATE = 0 + self.SINCE_UPDATE = 0 + self.ELAPSED = 0 + self.RETRIES = 0 + self.RETRIES_MAX = configuration.pop('retries') + self.PENALTY = 0 + self.RUNS = 1 + + def is_sleep_time(self): + return self.START_RUN < self.NEXT_RUN + + +class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, object): + """ + Prototype of Service class. + Implemented basic functionality to run jobs by `python.d.plugin` + """ + def __init__(self, configuration, name=''): + """ + :param configuration: <dict> + :param name: <str> + """ + Thread.__init__(self) + self.daemon = True + PythonDLimitedLogger.__init__(self) + OldVersionCompatibility.__init__(self) + self.configuration = configuration + self.order = list() + self.definitions = dict() + + self.module_name = self.__module__ + self.job_name = configuration.pop('job_name') + self.override_name = configuration.pop('override_name') + self.fake_name = None + + self._runtime_counters = RuntimeCounters(configuration=configuration) + self.charts = Charts(job_name=self.actual_name, + priority=configuration.pop('priority'), + cleanup=configuration.pop('chart_cleanup'), + get_update_every=self.get_update_every, + module_name=self.module_name) + + def __repr__(self): + return '<{cls_bases}: {name}>'.format(cls_bases=', '.join(c.__name__ for c in self.__class__.__bases__), + name=self.name) + + @property + def name(self): + if self.job_name: + return '_'.join([self.module_name, self.override_name or self.job_name]) + return self.module_name + + def actual_name(self): + return self.fake_name or self.name + + @property + def runs_counter(self): + return self._runtime_counters.RUNS + + @property + def update_every(self): + return self._runtime_counters.FREQ + + @update_every.setter + def update_every(self, value): + """ + :param value: <int> + :return: + """ + self._runtime_counters.FREQ = value + + def get_update_every(self): + return self.update_every + + def check(self): + """ + check() prototype + :return: boolean + """ + self.debug("job doesn't implement check() method. Using default which simply invokes get_data().") + data = self.get_data() + if data and isinstance(data, dict): + return True + self.debug('returned value is wrong: {0}'.format(data)) + return False + + @create_runtime_chart + def create(self): + for chart_name in self.order: + chart_config = self.definitions.get(chart_name) + + if not chart_config: + self.debug("create() => [NOT ADDED] chart '{chart_name}' not in definitions. " + "Skipping it.".format(chart_name=chart_name)) + continue + + # create chart + chart_params = [chart_name] + chart_config['options'] + try: + self.charts.add_chart(params=chart_params) + except ChartError as error: + self.error("create() => [NOT ADDED] (chart '{chart}': {error})".format(chart=chart_name, + error=error)) + continue + + # add dimensions to chart + for dimension in chart_config['lines']: + try: + self.charts[chart_name].add_dimension(dimension) + except ChartError as error: + self.error("create() => [NOT ADDED] (dimension '{dimension}': {error})".format(dimension=dimension, + error=error)) + continue + + # add variables to chart + if 'variables' in chart_config: + for variable in chart_config['variables']: + try: + self.charts[chart_name].add_variable(variable) + except ChartError as error: + self.error("create() => [NOT ADDED] (variable '{var}': {error})".format(var=variable, + error=error)) + continue + + del self.order + del self.definitions + + # True if job has at least 1 chart else False + return bool(self.charts) + + def run(self): + """ + Runs job in thread. Handles retries. + Exits when job failed or timed out. + :return: None + """ + job = self._runtime_counters + self.debug('started, update frequency: {freq}, ' + 'retries: {retries}'.format(freq=job.FREQ, retries=job.RETRIES_MAX - job.RETRIES)) + + while True: + job.START_RUN = monotonic() + + job.NEXT_RUN = job.START_RUN - (job.START_RUN % job.FREQ) + job.FREQ + job.PENALTY + + self.sleep_until_next_run() + + if job.PREV_UPDATE: + job.SINCE_UPDATE = int((job.START_RUN - job.PREV_UPDATE) * 1e6) + + try: + updated = self.update(interval=job.SINCE_UPDATE) + except Exception as error: + self.error('update() unhandled exception: {error}'.format(error=error)) + updated = False + + job.RUNS += 1 + + if not updated: + if not self.manage_retries(): + return + else: + job.ELAPSED = int((monotonic() - job.START_RUN) * 1e3) + job.PREV_UPDATE = job.START_RUN + job.RETRIES, job.PENALTY = 0, 0 + safe_print(RUNTIME_CHART_UPDATE.format(job_name=self.name, + since_last=job.SINCE_UPDATE, + elapsed=job.ELAPSED)) + self.debug('update => [{status}] (elapsed time: {elapsed}, ' + 'retries left: {retries})'.format(status='OK' if updated else 'FAILED', + elapsed=job.ELAPSED if updated else '-', + retries=job.RETRIES_MAX - job.RETRIES)) + + def update(self, interval): + """ + :return: + """ + data = self.get_data() + if not data: + self.debug('get_data() returned no data') + return False + elif not isinstance(data, dict): + self.debug('get_data() returned incorrect type data') + return False + + updated = False + + for chart in self.charts: + if chart.flags.obsoleted: + if chart.can_be_updated(data): + chart.refresh() + else: + continue + elif self.charts.cleanup and chart.penalty >= self.charts.cleanup: + chart.obsolete() + self.error("chart '{0}' was suppressed due to non updating".format(chart.name)) + continue + + ok = chart.update(data, interval) + if ok: + updated = True + + if not updated: + self.debug('none of the charts has been updated') + + return updated + + def manage_retries(self): + rc = self._runtime_counters + rc.RETRIES += 1 + if rc.RETRIES % 5 == 0: + rc.PENALTY = int(rc.RETRIES * self.update_every / 2) + if rc.RETRIES >= rc.RETRIES_MAX: + self.error('stopped after {0} data collection failures in a row'.format(rc.RETRIES_MAX)) + return False + return True + + def sleep_until_next_run(self): + job = self._runtime_counters + + # sleep() is interruptable + while job.is_sleep_time(): + sleep_time = job.NEXT_RUN - job.START_RUN + self.debug('sleeping for {sleep_time} to reach frequency of {freq} sec'.format(sleep_time=sleep_time, + freq=job.FREQ + job.PENALTY)) + sleep(sleep_time) + job.START_RUN = monotonic() + + def get_data(self): + return self._get_data() + + def _get_data(self): + raise NotImplementedError diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py new file mode 100644 index 000000000..e85455307 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py @@ -0,0 +1,309 @@ +# -*- coding: utf-8 -*- +# Description: +# Author: Pawel Krupa (paulfantom) +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +import socket + +try: + import ssl +except ImportError: + _TLS_SUPPORT = False +else: + _TLS_SUPPORT = True + +from bases.FrameworkServices.SimpleService import SimpleService + + +class SocketService(SimpleService): + def __init__(self, configuration=None, name=None): + self._sock = None + self._keep_alive = False + self.host = 'localhost' + self.port = None + self.unix_socket = None + self.dgram_socket = False + self.request = '' + self.tls = False + self.cert = None + self.key = None + self.__socket_config = None + self.__empty_request = "".encode() + SimpleService.__init__(self, configuration=configuration, name=name) + + def _socket_error(self, message=None): + if self.unix_socket is not None: + self.error('unix socket "{socket}": {message}'.format(socket=self.unix_socket, + message=message)) + else: + if self.__socket_config is not None: + _, _, _, _, sa = self.__socket_config + self.error('socket to "{address}" port {port}: {message}'.format(address=sa[0], + port=sa[1], + message=message)) + else: + self.error('unknown socket: {0}'.format(message)) + + def _connect2socket(self, res=None): + """ + Connect to a socket, passing the result of getaddrinfo() + :return: boolean + """ + if res is None: + res = self.__socket_config + if res is None: + self.error("Cannot create socket to 'None':") + return False + + af, sock_type, proto, _, sa = res + try: + self.debug('Creating socket to "{address}", port {port}'.format(address=sa[0], port=sa[1])) + self._sock = socket.socket(af, sock_type, proto) + except socket.error as error: + self.error('Failed to create socket "{address}", port {port}, error: {error}'.format(address=sa[0], + port=sa[1], + error=error)) + self._sock = None + self.__socket_config = None + return False + + if self.tls: + try: + self.debug('Encapsulating socket with TLS') + self._sock = ssl.wrap_socket(self._sock, + keyfile=self.key, + certfile=self.cert, + server_side=False, + cert_reqs=ssl.CERT_NONE) + except (socket.error, ssl.SSLError) as error: + self.error('Failed to wrap socket.') + self._disconnect() + self.__socket_config = None + return False + + try: + self.debug('connecting socket to "{address}", port {port}'.format(address=sa[0], port=sa[1])) + self._sock.connect(sa) + except (socket.error, ssl.SSLError) as error: + self.error('Failed to connect to "{address}", port {port}, error: {error}'.format(address=sa[0], + port=sa[1], + error=error)) + self._disconnect() + self.__socket_config = None + return False + + self.debug('connected to "{address}", port {port}'.format(address=sa[0], port=sa[1])) + self.__socket_config = res + return True + + def _connect2unixsocket(self): + """ + Connect to a unix socket, given its filename + :return: boolean + """ + if self.unix_socket is None: + self.error("cannot connect to unix socket 'None'") + return False + + try: + self.debug('attempting DGRAM unix socket "{0}"'.format(self.unix_socket)) + self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + self._sock.connect(self.unix_socket) + self.debug('connected DGRAM unix socket "{0}"'.format(self.unix_socket)) + return True + except socket.error as error: + self.debug('Failed to connect DGRAM unix socket "{socket}": {error}'.format(socket=self.unix_socket, + error=error)) + + try: + self.debug('attempting STREAM unix socket "{0}"'.format(self.unix_socket)) + self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + self._sock.connect(self.unix_socket) + self.debug('connected STREAM unix socket "{0}"'.format(self.unix_socket)) + return True + except socket.error as error: + self.debug('Failed to connect STREAM unix socket "{socket}": {error}'.format(socket=self.unix_socket, + error=error)) + self._sock = None + return False + + def _connect(self): + """ + Recreate socket and connect to it since sockets cannot be reused after closing + Available configurations are IPv6, IPv4 or UNIX socket + :return: + """ + try: + if self.unix_socket is not None: + self._connect2unixsocket() + + else: + if self.__socket_config is not None: + self._connect2socket() + else: + if self.dgram_socket: + sock_type = socket.SOCK_DGRAM + else: + sock_type = socket.SOCK_STREAM + for res in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, sock_type): + if self._connect2socket(res): + break + + except Exception: + self._sock = None + self.__socket_config = None + + if self._sock is not None: + self._sock.setblocking(0) + self._sock.settimeout(5) + self.debug('set socket timeout to: {0}'.format(self._sock.gettimeout())) + + def _disconnect(self): + """ + Close socket connection + :return: + """ + if self._sock is not None: + try: + self.debug('closing socket') + self._sock.shutdown(2) # 0 - read, 1 - write, 2 - all + self._sock.close() + except Exception: + pass + self._sock = None + + def _send(self, request=None): + """ + Send request. + :return: boolean + """ + # Send request if it is needed + if self.request != self.__empty_request: + try: + self.debug('sending request: {0}'.format(request or self.request)) + self._sock.send(request or self.request) + except Exception as error: + self._socket_error('error sending request: {0}'.format(error)) + self._disconnect() + return False + return True + + def _receive(self, raw=False): + """ + Receive data from socket + :param raw: set `True` to return bytes + :type raw: bool + :return: decoded str or raw bytes + :rtype: str/bytes + """ + data = "" if not raw else b"" + while True: + self.debug('receiving response') + try: + buf = self._sock.recv(4096) + except Exception as error: + self._socket_error('failed to receive response: {0}'.format(error)) + self._disconnect() + break + + if buf is None or len(buf) == 0: # handle server disconnect + if data == "" or data == b"": + self._socket_error('unexpectedly disconnected') + else: + self.debug('server closed the connection') + self._disconnect() + break + + self.debug('received data') + data += buf.decode('utf-8', 'ignore') if not raw else buf + if self._check_raw_data(data): + break + + self.debug('final response: {0}'.format(data)) + return data + + def _get_raw_data(self, raw=False, request=None): + """ + Get raw data with low-level "socket" module. + :param raw: set `True` to return bytes + :type raw: bool + :return: decoded data (str) or raw data (bytes) + :rtype: str/bytes + """ + if self._sock is None: + self._connect() + if self._sock is None: + return None + + # Send request if it is needed + if not self._send(request): + return None + + data = self._receive(raw) + + if not self._keep_alive: + self._disconnect() + + return data + + @staticmethod + def _check_raw_data(data): + """ + Check if all data has been gathered from socket + :param data: str + :return: boolean + """ + return bool(data) + + def _parse_config(self): + """ + Parse configuration data + :return: boolean + """ + try: + self.unix_socket = str(self.configuration['socket']) + except (KeyError, TypeError): + self.debug('No unix socket specified. Trying TCP/IP socket.') + self.unix_socket = None + try: + self.host = str(self.configuration['host']) + except (KeyError, TypeError): + self.debug('No host specified. Using: "{0}"'.format(self.host)) + try: + self.port = int(self.configuration['port']) + except (KeyError, TypeError): + self.debug('No port specified. Using: "{0}"'.format(self.port)) + + self.tls = bool(self.configuration.get('tls', self.tls)) + if self.tls and not _TLS_SUPPORT: + self.warning('TLS requested but no TLS module found, disabling TLS support.') + self.tls = False + if _TLS_SUPPORT and not self.tls: + self.debug('No TLS preference specified, not using TLS.') + + if self.tls and _TLS_SUPPORT: + self.key = self.configuration.get('tls_key_file') + self.cert = self.configuration.get('tls_cert_file') + if not self.cert: + # If there's not a valid certificate, clear the key too. + self.debug('No valid TLS client certificate configuration found.') + self.key = None + self.cert = None + elif not self.key: + # If a key isn't listed, the config may still be + # valid, because there may be a key attached to the + # certificate. + self.info('No TLS client key specified, assuming it\'s attached to the certificate.') + self.key = None + + try: + self.request = str(self.configuration['request']) + except (KeyError, TypeError): + self.debug('No request specified. Using: "{0}"'.format(self.request)) + + self.request = self.request.encode() + + def check(self): + self._parse_config() + return SimpleService.check(self) diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py new file mode 100644 index 000000000..856f38851 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py @@ -0,0 +1,146 @@ +# -*- coding: utf-8 -*- +# Description: +# Author: Pawel Krupa (paulfantom) +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +import urllib3 + +from bases.FrameworkServices.SimpleService import SimpleService + +try: + urllib3.disable_warnings() +except AttributeError: + pass + + +class UrlService(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.url = self.configuration.get('url') + self.user = self.configuration.get('user') + self.password = self.configuration.get('pass') + self.proxy_user = self.configuration.get('proxy_user') + self.proxy_password = self.configuration.get('proxy_pass') + self.proxy_url = self.configuration.get('proxy_url') + self.method = self.configuration.get('method', 'GET') + self.header = self.configuration.get('header') + self.request_timeout = self.configuration.get('timeout', 1) + self.tls_verify = self.configuration.get('tls_verify') + self.tls_ca_file = self.configuration.get('tls_ca_file') + self.tls_key_file = self.configuration.get('tls_key_file') + self.tls_cert_file = self.configuration.get('tls_cert_file') + self._manager = None + + def __make_headers(self, **header_kw): + user = header_kw.get('user') or self.user + password = header_kw.get('pass') or self.password + proxy_user = header_kw.get('proxy_user') or self.proxy_user + proxy_password = header_kw.get('proxy_pass') or self.proxy_password + custom_header = header_kw.get('header') or self.header + header_params = dict(keep_alive=True) + proxy_header_params = dict() + if user and password: + header_params['basic_auth'] = '{user}:{password}'.format(user=user, + password=password) + if proxy_user and proxy_password: + proxy_header_params['proxy_basic_auth'] = '{user}:{password}'.format(user=proxy_user, + password=proxy_password) + try: + header, proxy_header = urllib3.make_headers(**header_params), urllib3.make_headers(**proxy_header_params) + except TypeError as error: + self.error('build_header() error: {error}'.format(error=error)) + return None, None + else: + header.update(custom_header or dict()) + return header, proxy_header + + def _build_manager(self, **header_kw): + header, proxy_header = self.__make_headers(**header_kw) + if header is None or proxy_header is None: + return None + proxy_url = header_kw.get('proxy_url') or self.proxy_url + if proxy_url: + manager = urllib3.ProxyManager + params = dict(proxy_url=proxy_url, headers=header, proxy_headers=proxy_header) + else: + manager = urllib3.PoolManager + params = dict(headers=header) + tls_cert_file = self.tls_cert_file + if tls_cert_file: + params['cert_file'] = tls_cert_file + # NOTE: key_file is useless without cert_file, but + # cert_file may include the key as well. + tls_key_file = self.tls_key_file + if tls_key_file: + params['key_file'] = tls_key_file + tls_ca_file = self.tls_ca_file + if tls_ca_file: + params['ca_certs'] = tls_ca_file + try: + url = header_kw.get('url') or self.url + if url.startswith('https') and not self.tls_verify and not tls_ca_file: + params['ca_certs'] = None + return manager(assert_hostname=False, cert_reqs='CERT_NONE', **params) + return manager(**params) + except (urllib3.exceptions.ProxySchemeUnknown, TypeError) as error: + self.error('build_manager() error:', str(error)) + return None + + def _get_raw_data(self, url=None, manager=None): + """ + Get raw data from http request + :return: str + """ + try: + status, data = self._get_raw_data_with_status(url, manager) + except (urllib3.exceptions.HTTPError, TypeError, AttributeError) as error: + self.error('Url: {url}. Error: {error}'.format(url=url or self.url, error=error)) + return None + + if status == 200: + return data + else: + self.debug('Url: {url}. Http response status code: {code}'.format(url=url or self.url, code=status)) + return None + + def _get_raw_data_with_status(self, url=None, manager=None, retries=1, redirect=True): + """ + Get status and response body content from http request. Does not catch exceptions + :return: int, str + """ + url = url or self.url + manager = manager or self._manager + response = manager.request(method=self.method, + url=url, + timeout=self.request_timeout, + retries=retries, + headers=manager.headers, + redirect=redirect) + if isinstance(response.data, str): + return response.status, response.data + return response.status, response.data.decode() + + def check(self): + """ + Format configuration data and try to connect to server + :return: boolean + """ + if not (self.url and isinstance(self.url, str)): + self.error('URL is not defined or type is not <str>') + return False + + self._manager = self._build_manager() + if not self._manager: + return False + + try: + data = self._get_data() + except Exception as error: + self.error('_get_data() failed. Url: {url}. Error: {error}'.format(url=self.url, error=error)) + return False + + if isinstance(data, dict) and data: + return True + self.error('_get_data() returned no data or type is not <dict>') + return False diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/collectors/python.d.plugin/python_modules/bases/__init__.py b/collectors/python.d.plugin/python_modules/bases/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/collectors/python.d.plugin/python_modules/bases/charts.py b/collectors/python.d.plugin/python_modules/bases/charts.py new file mode 100644 index 000000000..2963739ec --- /dev/null +++ b/collectors/python.d.plugin/python_modules/bases/charts.py @@ -0,0 +1,394 @@ +# -*- coding: utf-8 -*- +# Description: +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +from bases.collection import safe_print + +CHART_PARAMS = ['type', 'id', 'name', 'title', 'units', 'family', 'context', 'chart_type', 'hidden'] +DIMENSION_PARAMS = ['id', 'name', 'algorithm', 'multiplier', 'divisor', 'hidden'] +VARIABLE_PARAMS = ['id', 'value'] + +CHART_TYPES = ['line', 'area', 'stacked'] +DIMENSION_ALGORITHMS = ['absolute', 'incremental', 'percentage-of-absolute-row', 'percentage-of-incremental-row'] + +CHART_BEGIN = 'BEGIN {type}.{id} {since_last}\n' +CHART_CREATE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \ + "{chart_type} {priority} {update_every} '{hidden}' 'python.d.plugin' '{module_name}'\n" +CHART_OBSOLETE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \ + "{chart_type} {priority} {update_every} '{hidden} obsolete'\n" + + +DIMENSION_CREATE = "DIMENSION '{id}' '{name}' {algorithm} {multiplier} {divisor} '{hidden}'\n" +DIMENSION_SET = "SET '{id}' = {value}\n" + +CHART_VARIABLE_SET = "VARIABLE CHART '{id}' = {value}\n" + +RUNTIME_CHART_CREATE = "CHART netdata.runtime_{job_name} '' 'Execution time for {job_name}' 'ms' 'python.d' " \ + "netdata.pythond_runtime line 145000 {update_every}\n" \ + "DIMENSION run_time 'run time' absolute 1 1\n" + + +def create_runtime_chart(func): + """ + Calls a wrapped function, then prints runtime chart to stdout. + + Used as a decorator for SimpleService.create() method. + The whole point of making 'create runtime chart' functionality as a decorator was + to help users who re-implements create() in theirs classes. + + :param func: class method + :return: + """ + def wrapper(*args, **kwargs): + self = args[0] + ok = func(*args, **kwargs) + if ok: + safe_print(RUNTIME_CHART_CREATE.format(job_name=self.name, + update_every=self._runtime_counters.FREQ)) + return ok + return wrapper + + +class ChartError(Exception): + """Base-class for all exceptions raised by this module""" + + +class DuplicateItemError(ChartError): + """Occurs when user re-adds a chart or a dimension that has already been added""" + + +class ItemTypeError(ChartError): + """Occurs when user passes value of wrong type to Chart, Dimension or ChartVariable class""" + + +class ItemValueError(ChartError): + """Occurs when user passes inappropriate value to Chart, Dimension or ChartVariable class""" + + +class Charts: + """Represent a collection of charts + + All charts stored in a dict. + Chart is a instance of Chart class. + Charts adding must be done using Charts.add_chart() method only""" + def __init__(self, job_name, priority, cleanup, get_update_every, module_name): + """ + :param job_name: <bound method> + :param priority: <int> + :param get_update_every: <bound method> + """ + self.job_name = job_name + self.priority = priority + self.cleanup = cleanup + self.get_update_every = get_update_every + self.module_name = module_name + self.charts = dict() + + def __len__(self): + return len(self.charts) + + def __iter__(self): + return iter(self.charts.values()) + + def __repr__(self): + return 'Charts({0})'.format(self) + + def __str__(self): + return str([chart for chart in self.charts]) + + def __contains__(self, item): + return item in self.charts + + def __getitem__(self, item): + return self.charts[item] + + def __delitem__(self, key): + del self.charts[key] + + def __bool__(self): + return bool(self.charts) + + def __nonzero__(self): + return self.__bool__() + + def add_chart(self, params): + """ + Create Chart instance and add it to the dict + + Manually adds job name, priority and update_every to params. + :param params: <list> + :return: + """ + params = [self.job_name()] + params + new_chart = Chart(params) + + new_chart.params['update_every'] = self.get_update_every() + new_chart.params['priority'] = self.priority + new_chart.params['module_name'] = self.module_name + + self.priority += 1 + self.charts[new_chart.id] = new_chart + + return new_chart + + def active_charts(self): + return [chart.id for chart in self if not chart.flags.obsoleted] + + +class Chart: + """Represent a chart""" + def __init__(self, params): + """ + :param params: <list> + """ + if not isinstance(params, list): + raise ItemTypeError("'chart' must be a list type") + if not len(params) >= 8: + raise ItemValueError("invalid value for 'chart', must be {0}".format(CHART_PARAMS)) + + self.params = dict(zip(CHART_PARAMS, (p or str() for p in params))) + self.name = '{type}.{id}'.format(type=self.params['type'], + id=self.params['id']) + if self.params.get('chart_type') not in CHART_TYPES: + self.params['chart_type'] = 'absolute' + hidden = str(self.params.get('hidden', '')) + self.params['hidden'] = 'hidden' if hidden == 'hidden' else '' + + self.dimensions = list() + self.variables = set() + self.flags = ChartFlags() + self.penalty = 0 + + def __getattr__(self, item): + try: + return self.params[item] + except KeyError: + raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self), + attr=item)) + + def __repr__(self): + return 'Chart({0})'.format(self.id) + + def __str__(self): + return self.id + + def __iter__(self): + return iter(self.dimensions) + + def __contains__(self, item): + return item in [dimension.id for dimension in self.dimensions] + + def add_variable(self, variable): + """ + :param variable: <list> + :return: + """ + self.variables.add(ChartVariable(variable)) + + def add_dimension(self, dimension): + """ + :param dimension: <list> + :return: + """ + dim = Dimension(dimension) + + if dim.id in self: + raise DuplicateItemError("'{dimension}' already in '{chart}' dimensions".format(dimension=dim.id, + chart=self.name)) + self.refresh() + self.dimensions.append(dim) + return dim + + def hide_dimension(self, dimension_id, reverse=False): + if dimension_id in self: + idx = self.dimensions.index(dimension_id) + dimension = self.dimensions[idx] + dimension.params['hidden'] = 'hidden' if not reverse else str() + self.refresh() + + def create(self): + """ + :return: + """ + chart = CHART_CREATE.format(**self.params) + dimensions = ''.join([dimension.create() for dimension in self.dimensions]) + variables = ''.join([var.set(var.value) for var in self.variables if var]) + + self.flags.push = False + self.flags.created = True + + safe_print(chart + dimensions + variables) + + def can_be_updated(self, data): + for dim in self.dimensions: + if dim.get_value(data) is not None: + return True + return False + + def update(self, data, interval): + updated_dimensions, updated_variables = str(), str() + + for dim in self.dimensions: + value = dim.get_value(data) + if value is not None: + updated_dimensions += dim.set(value) + + for var in self.variables: + value = var.get_value(data) + if value is not None: + updated_variables += var.set(value) + + if updated_dimensions: + since_last = interval if self.flags.updated else 0 + + if self.flags.push: + self.create() + + chart_begin = CHART_BEGIN.format(type=self.type, id=self.id, since_last=since_last) + safe_print(chart_begin, updated_dimensions, updated_variables, 'END\n') + + self.flags.updated = True + self.penalty = 0 + else: + self.penalty += 1 + self.flags.updated = False + + return bool(updated_dimensions) + + def obsolete(self): + self.flags.obsoleted = True + if self.flags.created: + safe_print(CHART_OBSOLETE.format(**self.params)) + + def refresh(self): + self.penalty = 0 + self.flags.push = True + self.flags.obsoleted = False + + +class Dimension: + """Represent a dimension""" + def __init__(self, params): + """ + :param params: <list> + """ + if not isinstance(params, list): + raise ItemTypeError("'dimension' must be a list type") + if not params: + raise ItemValueError("invalid value for 'dimension', must be {0}".format(DIMENSION_PARAMS)) + + self.params = dict(zip(DIMENSION_PARAMS, (p or str() for p in params))) + self.params['name'] = self.params.get('name') or self.params['id'] + + if self.params.get('algorithm') not in DIMENSION_ALGORITHMS: + self.params['algorithm'] = 'absolute' + if not isinstance(self.params.get('multiplier'), int): + self.params['multiplier'] = 1 + if not isinstance(self.params.get('divisor'), int): + self.params['divisor'] = 1 + self.params.setdefault('hidden', '') + + def __getattr__(self, item): + try: + return self.params[item] + except KeyError: + raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self), + attr=item)) + + def __repr__(self): + return 'Dimension({0})'.format(self.id) + + def __str__(self): + return self.id + + def __eq__(self, other): + if not isinstance(other, Dimension): + return self.id == other + return self.id == other.id + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(repr(self)) + + def create(self): + return DIMENSION_CREATE.format(**self.params) + + def set(self, value): + """ + :param value: <str>: must be a digit + :return: + """ + return DIMENSION_SET.format(id=self.id, + value=value) + + def get_value(self, data): + try: + return int(data[self.id]) + except (KeyError, TypeError): + return None + + +class ChartVariable: + """Represent a chart variable""" + def __init__(self, params): + """ + :param params: <list> + """ + if not isinstance(params, list): + raise ItemTypeError("'variable' must be a list type") + if not params: + raise ItemValueError("invalid value for 'variable' must be: {0}".format(VARIABLE_PARAMS)) + + self.params = dict(zip(VARIABLE_PARAMS, params)) + self.params.setdefault('value', None) + + def __getattr__(self, item): + try: + return self.params[item] + except KeyError: + raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self), + attr=item)) + + def __bool__(self): + return self.value is not None + + def __nonzero__(self): + return self.__bool__() + + def __repr__(self): + return 'ChartVariable({0})'.format(self.id) + + def __str__(self): + return self.id + + def __eq__(self, other): + if isinstance(other, ChartVariable): + return self.id == other.id + return False + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(repr(self)) + + def set(self, value): + return CHART_VARIABLE_SET.format(id=self.id, + value=value) + + def get_value(self, data): + try: + return int(data[self.id]) + except (KeyError, TypeError): + return None + + +class ChartFlags: + def __init__(self): + self.push = True + self.created = False + self.updated = False + self.obsoleted = False diff --git a/collectors/python.d.plugin/python_modules/bases/collection.py b/collectors/python.d.plugin/python_modules/bases/collection.py new file mode 100644 index 000000000..479a3b610 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/bases/collection.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- +# Description: +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +import os + +PATH = os.getenv('PATH', '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin').split(':') + +CHART_BEGIN = 'BEGIN {0} {1}\n' +CHART_CREATE = "CHART {0} '{1}' '{2}' '{3}' '{4}' '{5}' {6} {7} {8}\n" +DIMENSION_CREATE = "DIMENSION '{0}' '{1}' {2} {3} {4} '{5}'\n" +DIMENSION_SET = "SET '{0}' = {1}\n" + + +def setdefault_values(config, base_dict): + for key, value in base_dict.items(): + config.setdefault(key, value) + return config + + +def run_and_exit(func): + def wrapper(*args, **kwargs): + func(*args, **kwargs) + exit(1) + return wrapper + + +def on_try_except_finally(on_except=(None, ), on_finally=(None, )): + except_func = on_except[0] + finally_func = on_finally[0] + + def decorator(func): + def wrapper(*args, **kwargs): + try: + func(*args, **kwargs) + except Exception: + if except_func: + except_func(*on_except[1:]) + finally: + if finally_func: + finally_func(*on_finally[1:]) + return wrapper + return decorator + + +def static_vars(**kwargs): + def decorate(func): + for k in kwargs: + setattr(func, k, kwargs[k]) + return func + return decorate + + +@on_try_except_finally(on_except=(exit, 1)) +def safe_print(*msg): + """ + :param msg: + :return: + """ + print(''.join(msg)) + + +def find_binary(binary): + """ + :param binary: <str> + :return: + """ + for directory in PATH: + binary_name = '/'.join([directory, binary]) + if os.path.isfile(binary_name) and os.access(binary_name, os.X_OK): + return binary_name + return None + + +def read_last_line(f): + with open(f, 'rb') as opened: + opened.seek(-2, 2) + while opened.read(1) != b'\n': + opened.seek(-2, 1) + if opened.tell() == 0: + break + result = opened.readline() + return result.decode() + + +class OldVersionCompatibility: + + def __init__(self): + self._data_stream = str() + + def begin(self, type_id, microseconds=0): + """ + :param type_id: <str> + :param microseconds: <str> or <int>: must be a digit + :return: + """ + self._data_stream += CHART_BEGIN.format(type_id, microseconds) + + def set(self, dim_id, value): + """ + :param dim_id: <str> + :param value: <int> or <str>: must be a digit + :return: + """ + self._data_stream += DIMENSION_SET.format(dim_id, value) + + def end(self): + self._data_stream += 'END\n' + + def chart(self, type_id, name='', title='', units='', family='', category='', chart_type='line', + priority='', update_every=''): + """ + :param type_id: <str> + :param name: <str> + :param title: <str> + :param units: <str> + :param family: <str> + :param category: <str> + :param chart_type: <str> + :param priority: <str> or <int> + :param update_every: <str> or <int> + :return: + """ + self._data_stream += CHART_CREATE.format(type_id, name, title, units, + family, category, chart_type, + priority, update_every) + + def dimension(self, dim_id, name=None, algorithm="absolute", multiplier=1, divisor=1, hidden=False): + """ + :param dim_id: <str> + :param name: <str> or None + :param algorithm: <str> + :param multiplier: <str> or <int>: must be a digit + :param divisor: <str> or <int>: must be a digit + :param hidden: <str>: literally "hidden" or "" + :return: + """ + self._data_stream += DIMENSION_CREATE.format(dim_id, name or dim_id, algorithm, + multiplier, divisor, hidden or str()) + + @on_try_except_finally(on_except=(exit, 1)) + def commit(self): + print(self._data_stream) + self._data_stream = str() diff --git a/collectors/python.d.plugin/python_modules/bases/loaders.py b/collectors/python.d.plugin/python_modules/bases/loaders.py new file mode 100644 index 000000000..9eb268ce7 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/bases/loaders.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +# Description: +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +import types + +from sys import version_info + +PY_VERSION = version_info[:2] + +try: + if PY_VERSION > (3, 1): + from pyyaml3 import SafeLoader as YamlSafeLoader + else: + from pyyaml2 import SafeLoader as YamlSafeLoader +except ImportError: + from yaml import SafeLoader as YamlSafeLoader + + +if PY_VERSION > (3, 1): + from importlib.machinery import SourceFileLoader + DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map' +else: + from imp import load_source as SourceFileLoader + DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map' + +try: + from collections import OrderedDict +except ImportError: + from third_party.ordereddict import OrderedDict + + +def dict_constructor(loader, node): + return OrderedDict(loader.construct_pairs(node)) + + +def safe_load(stream): + loader = YamlSafeLoader(stream) + try: + return loader.get_single_data() + finally: + loader.dispose() + + +YamlSafeLoader.add_constructor(DEFAULT_MAPPING_TAG, dict_constructor) + + +class YamlOrderedLoader: + @staticmethod + def load_config_from_file(file_name): + opened, loaded = False, False + try: + stream = open(file_name, 'r') + opened = True + loader = YamlSafeLoader(stream) + loaded = True + parsed = loader.get_single_data() or dict() + except Exception as error: + return dict(), error + else: + return parsed, None + finally: + if opened: + stream.close() + if loaded: + loader.dispose() + + +class SourceLoader: + @staticmethod + def load_module_from_file(name, path): + try: + loaded = SourceFileLoader(name, path) + if isinstance(loaded, types.ModuleType): + return loaded, None + return loaded.load_module(), None + except Exception as error: + return None, error + + +class ModuleAndConfigLoader(YamlOrderedLoader, SourceLoader): + pass diff --git a/collectors/python.d.plugin/python_modules/bases/loggers.py b/collectors/python.d.plugin/python_modules/bases/loggers.py new file mode 100644 index 000000000..39be77a79 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/bases/loggers.py @@ -0,0 +1,206 @@ +# -*- coding: utf-8 -*- +# Description: +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +import logging +import traceback + +from sys import exc_info + +try: + from time import monotonic as time +except ImportError: + from time import time + +from bases.collection import on_try_except_finally + + +LOGGING_LEVELS = {'CRITICAL': 50, + 'ERROR': 40, + 'WARNING': 30, + 'INFO': 20, + 'DEBUG': 10, + 'NOTSET': 0} + +DEFAULT_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s : %(message)s' +DEFAULT_LOG_TIME_FORMAT = '%Y-%m-%d %H:%M:%S' + +PYTHON_D_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s: %(module_name)s: %(job_name)s: %(message)s' +PYTHON_D_LOG_NAME = 'python.d' + + +def limiter(log_max_count=30, allowed_in_seconds=60): + def on_decorator(func): + + def on_call(*args): + current_time = args[0]._runtime_counters.START_RUN + lc = args[0]._logger_counters + + if lc.logged and lc.logged % log_max_count == 0: + if current_time - lc.time_to_compare <= allowed_in_seconds: + lc.dropped += 1 + return + lc.time_to_compare = current_time + + lc.logged += 1 + func(*args) + + return on_call + return on_decorator + + +def add_traceback(func): + def on_call(*args): + self = args[0] + + if not self.log_traceback: + func(*args) + else: + if exc_info()[0]: + func(*args) + func(self, traceback.format_exc()) + else: + func(*args) + + return on_call + + +class LoggerCounters: + def __init__(self): + self.logged = 0 + self.dropped = 0 + self.time_to_compare = time() + + def __repr__(self): + return 'LoggerCounter(logged: {logged}, dropped: {dropped})'.format(logged=self.logged, + dropped=self.dropped) + + +class BaseLogger(object): + def __init__(self, logger_name, log_fmt=DEFAULT_LOG_LINE_FORMAT, date_fmt=DEFAULT_LOG_TIME_FORMAT, + handler=logging.StreamHandler): + """ + :param logger_name: <str> + :param log_fmt: <str> + :param date_fmt: <str> + :param handler: <logging handler> + """ + self.logger = logging.getLogger(logger_name) + if not self.has_handlers(): + self.severity = 'INFO' + self.logger.addHandler(handler()) + self.set_formatter(fmt=log_fmt, date_fmt=date_fmt) + + def __repr__(self): + return '<Logger: {name})>'.format(name=self.logger.name) + + def set_formatter(self, fmt, date_fmt=DEFAULT_LOG_TIME_FORMAT): + """ + :param fmt: <str> + :param date_fmt: <str> + :return: + """ + if self.has_handlers(): + self.logger.handlers[0].setFormatter(logging.Formatter(fmt=fmt, datefmt=date_fmt)) + + def has_handlers(self): + return self.logger.handlers + + @property + def severity(self): + return self.logger.getEffectiveLevel() + + @severity.setter + def severity(self, level): + """ + :param level: <str> or <int> + :return: + """ + if level in LOGGING_LEVELS: + self.logger.setLevel(LOGGING_LEVELS[level]) + + def debug(self, *msg, **kwargs): + self.logger.debug(' '.join(map(str, msg)), **kwargs) + + def info(self, *msg, **kwargs): + self.logger.info(' '.join(map(str, msg)), **kwargs) + + def warning(self, *msg, **kwargs): + self.logger.warning(' '.join(map(str, msg)), **kwargs) + + def error(self, *msg, **kwargs): + self.logger.error(' '.join(map(str, msg)), **kwargs) + + def alert(self, *msg, **kwargs): + self.logger.critical(' '.join(map(str, msg)), **kwargs) + + @on_try_except_finally(on_finally=(exit, 1)) + def fatal(self, *msg, **kwargs): + self.logger.critical(' '.join(map(str, msg)), **kwargs) + + +class PythonDLogger(object): + def __init__(self, logger_name=PYTHON_D_LOG_NAME, log_fmt=PYTHON_D_LOG_LINE_FORMAT): + """ + :param logger_name: <str> + :param log_fmt: <str> + """ + self.logger = BaseLogger(logger_name, log_fmt=log_fmt) + self.module_name = 'plugin' + self.job_name = 'main' + self._logger_counters = LoggerCounters() + + _LOG_TRACEBACK = False + + @property + def log_traceback(self): + return PythonDLogger._LOG_TRACEBACK + + @log_traceback.setter + def log_traceback(self, value): + PythonDLogger._LOG_TRACEBACK = value + + def debug(self, *msg): + self.logger.debug(*msg, extra={'module_name': self.module_name, + 'job_name': self.job_name or self.module_name}) + + def info(self, *msg): + self.logger.info(*msg, extra={'module_name': self.module_name, + 'job_name': self.job_name or self.module_name}) + + def warning(self, *msg): + self.logger.warning(*msg, extra={'module_name': self.module_name, + 'job_name': self.job_name or self.module_name}) + + @add_traceback + def error(self, *msg): + self.logger.error(*msg, extra={'module_name': self.module_name, + 'job_name': self.job_name or self.module_name}) + + @add_traceback + def alert(self, *msg): + self.logger.alert(*msg, extra={'module_name': self.module_name, + 'job_name': self.job_name or self.module_name}) + + def fatal(self, *msg): + self.logger.fatal(*msg, extra={'module_name': self.module_name, + 'job_name': self.job_name or self.module_name}) + + +class PythonDLimitedLogger(PythonDLogger): + @limiter() + def info(self, *msg): + PythonDLogger.info(self, *msg) + + @limiter() + def warning(self, *msg): + PythonDLogger.warning(self, *msg) + + @limiter() + def error(self, *msg): + PythonDLogger.error(self, *msg) + + @limiter() + def alert(self, *msg): + PythonDLogger.alert(self, *msg) diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py b/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py new file mode 100644 index 000000000..4d560e438 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py @@ -0,0 +1,316 @@ +# SPDX-License-Identifier: MIT + +from error import * + +from tokens import * +from events import * +from nodes import * + +from loader import * +from dumper import * + +__version__ = '3.11' + +try: + from cyaml import * + __with_libyaml__ = True +except ImportError: + __with_libyaml__ = False + +def scan(stream, Loader=Loader): + """ + Scan a YAML stream and produce scanning tokens. + """ + loader = Loader(stream) + try: + while loader.check_token(): + yield loader.get_token() + finally: + loader.dispose() + +def parse(stream, Loader=Loader): + """ + Parse a YAML stream and produce parsing events. + """ + loader = Loader(stream) + try: + while loader.check_event(): + yield loader.get_event() + finally: + loader.dispose() + +def compose(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + loader = Loader(stream) + try: + return loader.get_single_node() + finally: + loader.dispose() + +def compose_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding representation trees. + """ + loader = Loader(stream) + try: + while loader.check_node(): + yield loader.get_node() + finally: + loader.dispose() + +def load(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + """ + loader = Loader(stream) + try: + return loader.get_single_data() + finally: + loader.dispose() + +def load_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + """ + loader = Loader(stream) + try: + while loader.check_data(): + yield loader.get_data() + finally: + loader.dispose() + +def safe_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + Resolve only basic YAML tags. + """ + return load(stream, SafeLoader) + +def safe_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + return load_all(stream, SafeLoader) + +def emit(events, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + from StringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + try: + for event in events: + dumper.emit(event) + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize_all(nodes, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for node in nodes: + dumper.serialize(node) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize(node, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + return serialize_all([node], stream, Dumper=Dumper, **kwds) + +def dump_all(documents, stream=None, Dumper=Dumper, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of Python objects into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for data in documents: + dumper.represent(data) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def dump(data, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a Python object into a YAML stream. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=Dumper, **kwds) + +def safe_dump_all(documents, stream=None, **kwds): + """ + Serialize a sequence of Python objects into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all(documents, stream, Dumper=SafeDumper, **kwds) + +def safe_dump(data, stream=None, **kwds): + """ + Serialize a Python object into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=SafeDumper, **kwds) + +def add_implicit_resolver(tag, regexp, first=None, + Loader=Loader, Dumper=Dumper): + """ + Add an implicit scalar detector. + If an implicit scalar value matches the given regexp, + the corresponding tag is assigned to the scalar. + first is a sequence of possible initial characters or None. + """ + Loader.add_implicit_resolver(tag, regexp, first) + Dumper.add_implicit_resolver(tag, regexp, first) + +def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): + """ + Add a path based resolver for the given tag. + A path is a list of keys that forms a path + to a node in the representation tree. + Keys can be string values, integers, or None. + """ + Loader.add_path_resolver(tag, path, kind) + Dumper.add_path_resolver(tag, path, kind) + +def add_constructor(tag, constructor, Loader=Loader): + """ + Add a constructor for the given tag. + Constructor is a function that accepts a Loader instance + and a node object and produces the corresponding Python object. + """ + Loader.add_constructor(tag, constructor) + +def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader): + """ + Add a multi-constructor for the given tag prefix. + Multi-constructor is called for a node if its tag starts with tag_prefix. + Multi-constructor accepts a Loader instance, a tag suffix, + and a node object and produces the corresponding Python object. + """ + Loader.add_multi_constructor(tag_prefix, multi_constructor) + +def add_representer(data_type, representer, Dumper=Dumper): + """ + Add a representer for the given type. + Representer is a function accepting a Dumper instance + and an instance of the given data type + and producing the corresponding representation node. + """ + Dumper.add_representer(data_type, representer) + +def add_multi_representer(data_type, multi_representer, Dumper=Dumper): + """ + Add a representer for the given type. + Multi-representer is a function accepting a Dumper instance + and an instance of the given data type or subtype + and producing the corresponding representation node. + """ + Dumper.add_multi_representer(data_type, multi_representer) + +class YAMLObjectMetaclass(type): + """ + The metaclass for YAMLObject. + """ + def __init__(cls, name, bases, kwds): + super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) + if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: + cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) + cls.yaml_dumper.add_representer(cls, cls.to_yaml) + +class YAMLObject(object): + """ + An object that can dump itself to a YAML stream + and load itself from a YAML stream. + """ + + __metaclass__ = YAMLObjectMetaclass + __slots__ = () # no direct instantiation, so allow immutable subclasses + + yaml_loader = Loader + yaml_dumper = Dumper + + yaml_tag = None + yaml_flow_style = None + + def from_yaml(cls, loader, node): + """ + Convert a representation node to a Python object. + """ + return loader.construct_yaml_object(node, cls) + from_yaml = classmethod(from_yaml) + + def to_yaml(cls, dumper, data): + """ + Convert a Python object to a representation node. + """ + return dumper.represent_yaml_object(cls.yaml_tag, data, cls, + flow_style=cls.yaml_flow_style) + to_yaml = classmethod(to_yaml) + diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/composer.py b/collectors/python.d.plugin/python_modules/pyyaml2/composer.py new file mode 100644 index 000000000..6b41b8067 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml2/composer.py @@ -0,0 +1,140 @@ +# SPDX-License-Identifier: MIT + +__all__ = ['Composer', 'ComposerError'] + +from error import MarkedYAMLError +from events import * +from nodes import * + +class ComposerError(MarkedYAMLError): + pass + +class Composer(object): + + def __init__(self): + self.anchors = {} + + def check_node(self): + # Drop the STREAM-START event. + if self.check_event(StreamStartEvent): + self.get_event() + + # If there are more documents available? + return not self.check_event(StreamEndEvent) + + def get_node(self): + # Get the root node of the next document. + if not self.check_event(StreamEndEvent): + return self.compose_document() + + def get_single_node(self): + # Drop the STREAM-START event. + self.get_event() + + # Compose a document if the stream is not empty. + document = None + if not self.check_event(StreamEndEvent): + document = self.compose_document() + + # Ensure that the stream contains no more documents. + if not self.check_event(StreamEndEvent): + event = self.get_event() + raise ComposerError("expected a single document in the stream", + document.start_mark, "but found another document", + event.start_mark) + + # Drop the STREAM-END event. + self.get_event() + + return document + + def compose_document(self): + # Drop the DOCUMENT-START event. + self.get_event() + + # Compose the root node. + node = self.compose_node(None, None) + + # Drop the DOCUMENT-END event. + self.get_event() + + self.anchors = {} + return node + + def compose_node(self, parent, index): + if self.check_event(AliasEvent): + event = self.get_event() + anchor = event.anchor + if anchor not in self.anchors: + raise ComposerError(None, None, "found undefined alias %r" + % anchor.encode('utf-8'), event.start_mark) + return self.anchors[anchor] + event = self.peek_event() + anchor = event.anchor + if anchor is not None: + if anchor in self.anchors: + raise ComposerError("found duplicate anchor %r; first occurence" + % anchor.encode('utf-8'), self.anchors[anchor].start_mark, + "second occurence", event.start_mark) + self.descend_resolver(parent, index) + if self.check_event(ScalarEvent): + node = self.compose_scalar_node(anchor) + elif self.check_event(SequenceStartEvent): + node = self.compose_sequence_node(anchor) + elif self.check_event(MappingStartEvent): + node = self.compose_mapping_node(anchor) + self.ascend_resolver() + return node + + def compose_scalar_node(self, anchor): + event = self.get_event() + tag = event.tag + if tag is None or tag == u'!': + tag = self.resolve(ScalarNode, event.value, event.implicit) + node = ScalarNode(tag, event.value, + event.start_mark, event.end_mark, style=event.style) + if anchor is not None: + self.anchors[anchor] = node + return node + + def compose_sequence_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(SequenceNode, None, start_event.implicit) + node = SequenceNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + index = 0 + while not self.check_event(SequenceEndEvent): + node.value.append(self.compose_node(node, index)) + index += 1 + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + + def compose_mapping_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(MappingNode, None, start_event.implicit) + node = MappingNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + while not self.check_event(MappingEndEvent): + #key_event = self.peek_event() + item_key = self.compose_node(node, None) + #if item_key in node.value: + # raise ComposerError("while composing a mapping", start_event.start_mark, + # "found duplicate key", key_event.start_mark) + item_value = self.compose_node(node, item_key) + #node.value[item_key] = item_value + node.value.append((item_key, item_value)) + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py b/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py new file mode 100644 index 000000000..8ad1b90a7 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py @@ -0,0 +1,676 @@ +# SPDX-License-Identifier: MIT + +__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', + 'ConstructorError'] + +from error import * +from nodes import * + +import datetime + +import binascii, re, sys, types + +class ConstructorError(MarkedYAMLError): + pass + +class BaseConstructor(object): + + yaml_constructors = {} + yaml_multi_constructors = {} + + def __init__(self): + self.constructed_objects = {} + self.recursive_objects = {} + self.state_generators = [] + self.deep_construct = False + + def check_data(self): + # If there are more documents available? + return self.check_node() + + def get_data(self): + # Construct and return the next document. + if self.check_node(): + return self.construct_document(self.get_node()) + + def get_single_data(self): + # Ensure that the stream contains a single document and construct it. + node = self.get_single_node() + if node is not None: + return self.construct_document(node) + return None + + def construct_document(self, node): + data = self.construct_object(node) + while self.state_generators: + state_generators = self.state_generators + self.state_generators = [] + for generator in state_generators: + for dummy in generator: + pass + self.constructed_objects = {} + self.recursive_objects = {} + self.deep_construct = False + return data + + def construct_object(self, node, deep=False): + if node in self.constructed_objects: + return self.constructed_objects[node] + if deep: + old_deep = self.deep_construct + self.deep_construct = True + if node in self.recursive_objects: + raise ConstructorError(None, None, + "found unconstructable recursive node", node.start_mark) + self.recursive_objects[node] = None + constructor = None + tag_suffix = None + if node.tag in self.yaml_constructors: + constructor = self.yaml_constructors[node.tag] + else: + for tag_prefix in self.yaml_multi_constructors: + if node.tag.startswith(tag_prefix): + tag_suffix = node.tag[len(tag_prefix):] + constructor = self.yaml_multi_constructors[tag_prefix] + break + else: + if None in self.yaml_multi_constructors: + tag_suffix = node.tag + constructor = self.yaml_multi_constructors[None] + elif None in self.yaml_constructors: + constructor = self.yaml_constructors[None] + elif isinstance(node, ScalarNode): + constructor = self.__class__.construct_scalar + elif isinstance(node, SequenceNode): + constructor = self.__class__.construct_sequence + elif isinstance(node, MappingNode): + constructor = self.__class__.construct_mapping + if tag_suffix is None: + data = constructor(self, node) + else: + data = constructor(self, tag_suffix, node) + if isinstance(data, types.GeneratorType): + generator = data + data = generator.next() + if self.deep_construct: + for dummy in generator: + pass + else: + self.state_generators.append(generator) + self.constructed_objects[node] = data + del self.recursive_objects[node] + if deep: + self.deep_construct = old_deep + return data + + def construct_scalar(self, node): + if not isinstance(node, ScalarNode): + raise ConstructorError(None, None, + "expected a scalar node, but found %s" % node.id, + node.start_mark) + return node.value + + def construct_sequence(self, node, deep=False): + if not isinstance(node, SequenceNode): + raise ConstructorError(None, None, + "expected a sequence node, but found %s" % node.id, + node.start_mark) + return [self.construct_object(child, deep=deep) + for child in node.value] + + def construct_mapping(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + mapping = {} + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + try: + hash(key) + except TypeError, exc: + raise ConstructorError("while constructing a mapping", node.start_mark, + "found unacceptable key (%s)" % exc, key_node.start_mark) + value = self.construct_object(value_node, deep=deep) + mapping[key] = value + return mapping + + def construct_pairs(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + pairs = [] + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + value = self.construct_object(value_node, deep=deep) + pairs.append((key, value)) + return pairs + + def add_constructor(cls, tag, constructor): + if not 'yaml_constructors' in cls.__dict__: + cls.yaml_constructors = cls.yaml_constructors.copy() + cls.yaml_constructors[tag] = constructor + add_constructor = classmethod(add_constructor) + + def add_multi_constructor(cls, tag_prefix, multi_constructor): + if not 'yaml_multi_constructors' in cls.__dict__: + cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() + cls.yaml_multi_constructors[tag_prefix] = multi_constructor + add_multi_constructor = classmethod(add_multi_constructor) + +class SafeConstructor(BaseConstructor): + + def construct_scalar(self, node): + if isinstance(node, MappingNode): + for key_node, value_node in node.value: + if key_node.tag == u'tag:yaml.org,2002:value': + return self.construct_scalar(value_node) + return BaseConstructor.construct_scalar(self, node) + + def flatten_mapping(self, node): + merge = [] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == u'tag:yaml.org,2002:merge': + del node.value[index] + if isinstance(value_node, MappingNode): + self.flatten_mapping(value_node) + merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing a mapping", + node.start_mark, + "expected a mapping for merging, but found %s" + % subnode.id, subnode.start_mark) + self.flatten_mapping(subnode) + submerge.append(subnode.value) + submerge.reverse() + for value in submerge: + merge.extend(value) + else: + raise ConstructorError("while constructing a mapping", node.start_mark, + "expected a mapping or list of mappings for merging, but found %s" + % value_node.id, value_node.start_mark) + elif key_node.tag == u'tag:yaml.org,2002:value': + key_node.tag = u'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + if merge: + node.value = merge + node.value + + def construct_mapping(self, node, deep=False): + if isinstance(node, MappingNode): + self.flatten_mapping(node) + return BaseConstructor.construct_mapping(self, node, deep=deep) + + def construct_yaml_null(self, node): + self.construct_scalar(node) + return None + + bool_values = { + u'yes': True, + u'no': False, + u'true': True, + u'false': False, + u'on': True, + u'off': False, + } + + def construct_yaml_bool(self, node): + value = self.construct_scalar(node) + return self.bool_values[value.lower()] + + def construct_yaml_int(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '') + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '0': + return 0 + elif value.startswith('0b'): + return sign*int(value[2:], 2) + elif value.startswith('0x'): + return sign*int(value[2:], 16) + elif value[0] == '0': + return sign*int(value, 8) + elif ':' in value: + digits = [int(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*int(value) + + inf_value = 1e300 + while inf_value != inf_value*inf_value: + inf_value *= inf_value + nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). + + def construct_yaml_float(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '').lower() + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '.inf': + return sign*self.inf_value + elif value == '.nan': + return self.nan_value + elif ':' in value: + digits = [float(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*float(value) + + def construct_yaml_binary(self, node): + value = self.construct_scalar(node) + try: + return str(value).decode('base64') + except (binascii.Error, UnicodeEncodeError), exc: + raise ConstructorError(None, None, + "failed to decode base64 data: %s" % exc, node.start_mark) + + timestamp_regexp = re.compile( + ur'''^(?P<year>[0-9][0-9][0-9][0-9]) + -(?P<month>[0-9][0-9]?) + -(?P<day>[0-9][0-9]?) + (?:(?:[Tt]|[ \t]+) + (?P<hour>[0-9][0-9]?) + :(?P<minute>[0-9][0-9]) + :(?P<second>[0-9][0-9]) + (?:\.(?P<fraction>[0-9]*))? + (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?) + (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X) + + def construct_yaml_timestamp(self, node): + value = self.construct_scalar(node) + match = self.timestamp_regexp.match(node.value) + values = match.groupdict() + year = int(values['year']) + month = int(values['month']) + day = int(values['day']) + if not values['hour']: + return datetime.date(year, month, day) + hour = int(values['hour']) + minute = int(values['minute']) + second = int(values['second']) + fraction = 0 + if values['fraction']: + fraction = values['fraction'][:6] + while len(fraction) < 6: + fraction += '0' + fraction = int(fraction) + delta = None + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + tz_minute = int(values['tz_minute'] or 0) + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + data = datetime.datetime(year, month, day, hour, minute, second, fraction) + if delta: + data -= delta + return data + + def construct_yaml_omap(self, node): + # Note: we do not check for duplicate keys, because it's too + # CPU-expensive. + omap = [] + yield omap + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + omap.append((key, value)) + + def construct_yaml_pairs(self, node): + # Note: the same code as `construct_yaml_omap`. + pairs = [] + yield pairs + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + pairs.append((key, value)) + + def construct_yaml_set(self, node): + data = set() + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_str(self, node): + value = self.construct_scalar(node) + try: + return value.encode('ascii') + except UnicodeEncodeError: + return value + + def construct_yaml_seq(self, node): + data = [] + yield data + data.extend(self.construct_sequence(node)) + + def construct_yaml_map(self, node): + data = {} + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_object(self, node, cls): + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = self.construct_mapping(node, deep=True) + data.__setstate__(state) + else: + state = self.construct_mapping(node) + data.__dict__.update(state) + + def construct_undefined(self, node): + raise ConstructorError(None, None, + "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'), + node.start_mark) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:null', + SafeConstructor.construct_yaml_null) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:bool', + SafeConstructor.construct_yaml_bool) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:int', + SafeConstructor.construct_yaml_int) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:float', + SafeConstructor.construct_yaml_float) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:binary', + SafeConstructor.construct_yaml_binary) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:timestamp', + SafeConstructor.construct_yaml_timestamp) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:omap', + SafeConstructor.construct_yaml_omap) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:pairs', + SafeConstructor.construct_yaml_pairs) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:set', + SafeConstructor.construct_yaml_set) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:str', + SafeConstructor.construct_yaml_str) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:seq', + SafeConstructor.construct_yaml_seq) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:map', + SafeConstructor.construct_yaml_map) + +SafeConstructor.add_constructor(None, + SafeConstructor.construct_undefined) + +class Constructor(SafeConstructor): + + def construct_python_str(self, node): + return self.construct_scalar(node).encode('utf-8') + + def construct_python_unicode(self, node): + return self.construct_scalar(node) + + def construct_python_long(self, node): + return long(self.construct_yaml_int(node)) + + def construct_python_complex(self, node): + return complex(self.construct_scalar(node)) + + def construct_python_tuple(self, node): + return tuple(self.construct_sequence(node)) + + def find_python_module(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python module", mark, + "expected non-empty name appended to the tag", mark) + try: + __import__(name) + except ImportError, exc: + raise ConstructorError("while constructing a Python module", mark, + "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark) + return sys.modules[name] + + def find_python_name(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python object", mark, + "expected non-empty name appended to the tag", mark) + if u'.' in name: + module_name, object_name = name.rsplit('.', 1) + else: + module_name = '__builtin__' + object_name = name + try: + __import__(module_name) + except ImportError, exc: + raise ConstructorError("while constructing a Python object", mark, + "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark) + module = sys.modules[module_name] + if not hasattr(module, object_name): + raise ConstructorError("while constructing a Python object", mark, + "cannot find %r in the module %r" % (object_name.encode('utf-8'), + module.__name__), mark) + return getattr(module, object_name) + + def construct_python_name(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python name", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_name(suffix, node.start_mark) + + def construct_python_module(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python module", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_module(suffix, node.start_mark) + + class classobj: pass + + def make_python_instance(self, suffix, node, + args=None, kwds=None, newobj=False): + if not args: + args = [] + if not kwds: + kwds = {} + cls = self.find_python_name(suffix, node.start_mark) + if newobj and isinstance(cls, type(self.classobj)) \ + and not args and not kwds: + instance = self.classobj() + instance.__class__ = cls + return instance + elif newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + + def set_python_instance_state(self, instance, state): + if hasattr(instance, '__setstate__'): + instance.__setstate__(state) + else: + slotstate = {} + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if hasattr(instance, '__dict__'): + instance.__dict__.update(state) + elif state: + slotstate.update(state) + for key, value in slotstate.items(): + setattr(object, key, value) + + def construct_python_object(self, suffix, node): + # Format: + # !!python/object:module.name { ... state ... } + instance = self.make_python_instance(suffix, node, newobj=True) + yield instance + deep = hasattr(instance, '__setstate__') + state = self.construct_mapping(node, deep=deep) + self.set_python_instance_state(instance, state) + + def construct_python_object_apply(self, suffix, node, newobj=False): + # Format: + # !!python/object/apply # (or !!python/object/new) + # args: [ ... arguments ... ] + # kwds: { ... keywords ... } + # state: ... state ... + # listitems: [ ... listitems ... ] + # dictitems: { ... dictitems ... } + # or short format: + # !!python/object/apply [ ... arguments ... ] + # The difference between !!python/object/apply and !!python/object/new + # is how an object is created, check make_python_instance for details. + if isinstance(node, SequenceNode): + args = self.construct_sequence(node, deep=True) + kwds = {} + state = {} + listitems = [] + dictitems = {} + else: + value = self.construct_mapping(node, deep=True) + args = value.get('args', []) + kwds = value.get('kwds', {}) + state = value.get('state', {}) + listitems = value.get('listitems', []) + dictitems = value.get('dictitems', {}) + instance = self.make_python_instance(suffix, node, args, kwds, newobj) + if state: + self.set_python_instance_state(instance, state) + if listitems: + instance.extend(listitems) + if dictitems: + for key in dictitems: + instance[key] = dictitems[key] + return instance + + def construct_python_object_new(self, suffix, node): + return self.construct_python_object_apply(suffix, node, newobj=True) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/none', + Constructor.construct_yaml_null) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/bool', + Constructor.construct_yaml_bool) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/str', + Constructor.construct_python_str) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/unicode', + Constructor.construct_python_unicode) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/int', + Constructor.construct_yaml_int) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/long', + Constructor.construct_python_long) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/float', + Constructor.construct_yaml_float) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/complex', + Constructor.construct_python_complex) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/list', + Constructor.construct_yaml_seq) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/tuple', + Constructor.construct_python_tuple) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/dict', + Constructor.construct_yaml_map) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/name:', + Constructor.construct_python_name) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/module:', + Constructor.construct_python_module) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object:', + Constructor.construct_python_object) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/apply:', + Constructor.construct_python_object_apply) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/new:', + Constructor.construct_python_object_new) + diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py b/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py new file mode 100644 index 000000000..2858ab479 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py @@ -0,0 +1,86 @@ +# SPDX-License-Identifier: MIT + +__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', + 'CBaseDumper', 'CSafeDumper', 'CDumper'] + +from _yaml import CParser, CEmitter + +from constructor import * + +from serializer import * +from representer import * + +from resolver import * + +class CBaseLoader(CParser, BaseConstructor, BaseResolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class CSafeLoader(CParser, SafeConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class CLoader(CParser, Constructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + Constructor.__init__(self) + Resolver.__init__(self) + +class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CSafeDumper(CEmitter, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CDumper(CEmitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py b/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py new file mode 100644 index 000000000..3685cbeeb --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: MIT + +__all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] + +from emitter import * +from serializer import * +from representer import * +from resolver import * + +class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class Dumper(Emitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py b/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py new file mode 100644 index 000000000..9a460a0fd --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py @@ -0,0 +1,1141 @@ +# SPDX-License-Identifier: MIT + +# Emitter expects events obeying the following grammar: +# stream ::= STREAM-START document* STREAM-END +# document ::= DOCUMENT-START node DOCUMENT-END +# node ::= SCALAR | sequence | mapping +# sequence ::= SEQUENCE-START node* SEQUENCE-END +# mapping ::= MAPPING-START (node node)* MAPPING-END + +__all__ = ['Emitter', 'EmitterError'] + +from error import YAMLError +from events import * + +class EmitterError(YAMLError): + pass + +class ScalarAnalysis(object): + def __init__(self, scalar, empty, multiline, + allow_flow_plain, allow_block_plain, + allow_single_quoted, allow_double_quoted, + allow_block): + self.scalar = scalar + self.empty = empty + self.multiline = multiline + self.allow_flow_plain = allow_flow_plain + self.allow_block_plain = allow_block_plain + self.allow_single_quoted = allow_single_quoted + self.allow_double_quoted = allow_double_quoted + self.allow_block = allow_block + +class Emitter(object): + + DEFAULT_TAG_PREFIXES = { + u'!' : u'!', + u'tag:yaml.org,2002:' : u'!!', + } + + def __init__(self, stream, canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + + # The stream should have the methods `write` and possibly `flush`. + self.stream = stream + + # Encoding can be overriden by STREAM-START. + self.encoding = None + + # Emitter is a state machine with a stack of states to handle nested + # structures. + self.states = [] + self.state = self.expect_stream_start + + # Current event and the event queue. + self.events = [] + self.event = None + + # The current indentation level and the stack of previous indents. + self.indents = [] + self.indent = None + + # Flow level. + self.flow_level = 0 + + # Contexts. + self.root_context = False + self.sequence_context = False + self.mapping_context = False + self.simple_key_context = False + + # Characteristics of the last emitted character: + # - current position. + # - is it a whitespace? + # - is it an indention character + # (indentation space, '-', '?', or ':')? + self.line = 0 + self.column = 0 + self.whitespace = True + self.indention = True + + # Whether the document requires an explicit document indicator + self.open_ended = False + + # Formatting details. + self.canonical = canonical + self.allow_unicode = allow_unicode + self.best_indent = 2 + if indent and 1 < indent < 10: + self.best_indent = indent + self.best_width = 80 + if width and width > self.best_indent*2: + self.best_width = width + self.best_line_break = u'\n' + if line_break in [u'\r', u'\n', u'\r\n']: + self.best_line_break = line_break + + # Tag prefixes. + self.tag_prefixes = None + + # Prepared anchor and tag. + self.prepared_anchor = None + self.prepared_tag = None + + # Scalar analysis and style. + self.analysis = None + self.style = None + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def emit(self, event): + self.events.append(event) + while not self.need_more_events(): + self.event = self.events.pop(0) + self.state() + self.event = None + + # In some cases, we wait for a few next events before emitting. + + def need_more_events(self): + if not self.events: + return True + event = self.events[0] + if isinstance(event, DocumentStartEvent): + return self.need_events(1) + elif isinstance(event, SequenceStartEvent): + return self.need_events(2) + elif isinstance(event, MappingStartEvent): + return self.need_events(3) + else: + return False + + def need_events(self, count): + level = 0 + for event in self.events[1:]: + if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): + level += 1 + elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): + level -= 1 + elif isinstance(event, StreamEndEvent): + level = -1 + if level < 0: + return False + return (len(self.events) < count+1) + + def increase_indent(self, flow=False, indentless=False): + self.indents.append(self.indent) + if self.indent is None: + if flow: + self.indent = self.best_indent + else: + self.indent = 0 + elif not indentless: + self.indent += self.best_indent + + # States. + + # Stream handlers. + + def expect_stream_start(self): + if isinstance(self.event, StreamStartEvent): + if self.event.encoding and not getattr(self.stream, 'encoding', None): + self.encoding = self.event.encoding + self.write_stream_start() + self.state = self.expect_first_document_start + else: + raise EmitterError("expected StreamStartEvent, but got %s" + % self.event) + + def expect_nothing(self): + raise EmitterError("expected nothing, but got %s" % self.event) + + # Document handlers. + + def expect_first_document_start(self): + return self.expect_document_start(first=True) + + def expect_document_start(self, first=False): + if isinstance(self.event, DocumentStartEvent): + if (self.event.version or self.event.tags) and self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + if self.event.version: + version_text = self.prepare_version(self.event.version) + self.write_version_directive(version_text) + self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() + if self.event.tags: + handles = self.event.tags.keys() + handles.sort() + for handle in handles: + prefix = self.event.tags[handle] + self.tag_prefixes[prefix] = handle + handle_text = self.prepare_tag_handle(handle) + prefix_text = self.prepare_tag_prefix(prefix) + self.write_tag_directive(handle_text, prefix_text) + implicit = (first and not self.event.explicit and not self.canonical + and not self.event.version and not self.event.tags + and not self.check_empty_document()) + if not implicit: + self.write_indent() + self.write_indicator(u'---', True) + if self.canonical: + self.write_indent() + self.state = self.expect_document_root + elif isinstance(self.event, StreamEndEvent): + if self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + self.write_stream_end() + self.state = self.expect_nothing + else: + raise EmitterError("expected DocumentStartEvent, but got %s" + % self.event) + + def expect_document_end(self): + if isinstance(self.event, DocumentEndEvent): + self.write_indent() + if self.event.explicit: + self.write_indicator(u'...', True) + self.write_indent() + self.flush_stream() + self.state = self.expect_document_start + else: + raise EmitterError("expected DocumentEndEvent, but got %s" + % self.event) + + def expect_document_root(self): + self.states.append(self.expect_document_end) + self.expect_node(root=True) + + # Node handlers. + + def expect_node(self, root=False, sequence=False, mapping=False, + simple_key=False): + self.root_context = root + self.sequence_context = sequence + self.mapping_context = mapping + self.simple_key_context = simple_key + if isinstance(self.event, AliasEvent): + self.expect_alias() + elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): + self.process_anchor(u'&') + self.process_tag() + if isinstance(self.event, ScalarEvent): + self.expect_scalar() + elif isinstance(self.event, SequenceStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_sequence(): + self.expect_flow_sequence() + else: + self.expect_block_sequence() + elif isinstance(self.event, MappingStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_mapping(): + self.expect_flow_mapping() + else: + self.expect_block_mapping() + else: + raise EmitterError("expected NodeEvent, but got %s" % self.event) + + def expect_alias(self): + if self.event.anchor is None: + raise EmitterError("anchor is not specified for alias") + self.process_anchor(u'*') + self.state = self.states.pop() + + def expect_scalar(self): + self.increase_indent(flow=True) + self.process_scalar() + self.indent = self.indents.pop() + self.state = self.states.pop() + + # Flow sequence handlers. + + def expect_flow_sequence(self): + self.write_indicator(u'[', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_sequence_item + + def expect_first_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + def expect_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + # Flow mapping handlers. + + def expect_flow_mapping(self): + self.write_indicator(u'{', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_mapping_key + + def expect_first_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + def expect_flow_mapping_value(self): + if self.canonical or self.column > self.best_width: + self.write_indent() + self.write_indicator(u':', True) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + # Block sequence handlers. + + def expect_block_sequence(self): + indentless = (self.mapping_context and not self.indention) + self.increase_indent(flow=False, indentless=indentless) + self.state = self.expect_first_block_sequence_item + + def expect_first_block_sequence_item(self): + return self.expect_block_sequence_item(first=True) + + def expect_block_sequence_item(self, first=False): + if not first and isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + self.write_indicator(u'-', True, indention=True) + self.states.append(self.expect_block_sequence_item) + self.expect_node(sequence=True) + + # Block mapping handlers. + + def expect_block_mapping(self): + self.increase_indent(flow=False) + self.state = self.expect_first_block_mapping_key + + def expect_first_block_mapping_key(self): + return self.expect_block_mapping_key(first=True) + + def expect_block_mapping_key(self, first=False): + if not first and isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + if self.check_simple_key(): + self.states.append(self.expect_block_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True, indention=True) + self.states.append(self.expect_block_mapping_value) + self.expect_node(mapping=True) + + def expect_block_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + def expect_block_mapping_value(self): + self.write_indent() + self.write_indicator(u':', True, indention=True) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + # Checkers. + + def check_empty_sequence(self): + return (isinstance(self.event, SequenceStartEvent) and self.events + and isinstance(self.events[0], SequenceEndEvent)) + + def check_empty_mapping(self): + return (isinstance(self.event, MappingStartEvent) and self.events + and isinstance(self.events[0], MappingEndEvent)) + + def check_empty_document(self): + if not isinstance(self.event, DocumentStartEvent) or not self.events: + return False + event = self.events[0] + return (isinstance(event, ScalarEvent) and event.anchor is None + and event.tag is None and event.implicit and event.value == u'') + + def check_simple_key(self): + length = 0 + if isinstance(self.event, NodeEvent) and self.event.anchor is not None: + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + length += len(self.prepared_anchor) + if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ + and self.event.tag is not None: + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(self.event.tag) + length += len(self.prepared_tag) + if isinstance(self.event, ScalarEvent): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + length += len(self.analysis.scalar) + return (length < 128 and (isinstance(self.event, AliasEvent) + or (isinstance(self.event, ScalarEvent) + and not self.analysis.empty and not self.analysis.multiline) + or self.check_empty_sequence() or self.check_empty_mapping())) + + # Anchor, Tag, and Scalar processors. + + def process_anchor(self, indicator): + if self.event.anchor is None: + self.prepared_anchor = None + return + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + if self.prepared_anchor: + self.write_indicator(indicator+self.prepared_anchor, True) + self.prepared_anchor = None + + def process_tag(self): + tag = self.event.tag + if isinstance(self.event, ScalarEvent): + if self.style is None: + self.style = self.choose_scalar_style() + if ((not self.canonical or tag is None) and + ((self.style == '' and self.event.implicit[0]) + or (self.style != '' and self.event.implicit[1]))): + self.prepared_tag = None + return + if self.event.implicit[0] and tag is None: + tag = u'!' + self.prepared_tag = None + else: + if (not self.canonical or tag is None) and self.event.implicit: + self.prepared_tag = None + return + if tag is None: + raise EmitterError("tag is not specified") + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(tag) + if self.prepared_tag: + self.write_indicator(self.prepared_tag, True) + self.prepared_tag = None + + def choose_scalar_style(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.event.style == '"' or self.canonical: + return '"' + if not self.event.style and self.event.implicit[0]: + if (not (self.simple_key_context and + (self.analysis.empty or self.analysis.multiline)) + and (self.flow_level and self.analysis.allow_flow_plain + or (not self.flow_level and self.analysis.allow_block_plain))): + return '' + if self.event.style and self.event.style in '|>': + if (not self.flow_level and not self.simple_key_context + and self.analysis.allow_block): + return self.event.style + if not self.event.style or self.event.style == '\'': + if (self.analysis.allow_single_quoted and + not (self.simple_key_context and self.analysis.multiline)): + return '\'' + return '"' + + def process_scalar(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.style is None: + self.style = self.choose_scalar_style() + split = (not self.simple_key_context) + #if self.analysis.multiline and split \ + # and (not self.style or self.style in '\'\"'): + # self.write_indent() + if self.style == '"': + self.write_double_quoted(self.analysis.scalar, split) + elif self.style == '\'': + self.write_single_quoted(self.analysis.scalar, split) + elif self.style == '>': + self.write_folded(self.analysis.scalar) + elif self.style == '|': + self.write_literal(self.analysis.scalar) + else: + self.write_plain(self.analysis.scalar, split) + self.analysis = None + self.style = None + + # Analyzers. + + def prepare_version(self, version): + major, minor = version + if major != 1: + raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) + return u'%d.%d' % (major, minor) + + def prepare_tag_handle(self, handle): + if not handle: + raise EmitterError("tag handle must not be empty") + if handle[0] != u'!' or handle[-1] != u'!': + raise EmitterError("tag handle must start and end with '!': %r" + % (handle.encode('utf-8'))) + for ch in handle[1:-1]: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the tag handle: %r" + % (ch.encode('utf-8'), handle.encode('utf-8'))) + return handle + + def prepare_tag_prefix(self, prefix): + if not prefix: + raise EmitterError("tag prefix must not be empty") + chunks = [] + start = end = 0 + if prefix[0] == u'!': + end = 1 + while end < len(prefix): + ch = prefix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?!:@&=+$,_.~*\'()[]': + end += 1 + else: + if start < end: + chunks.append(prefix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(prefix[start:end]) + return u''.join(chunks) + + def prepare_tag(self, tag): + if not tag: + raise EmitterError("tag must not be empty") + if tag == u'!': + return tag + handle = None + suffix = tag + prefixes = self.tag_prefixes.keys() + prefixes.sort() + for prefix in prefixes: + if tag.startswith(prefix) \ + and (prefix == u'!' or len(prefix) < len(tag)): + handle = self.tag_prefixes[prefix] + suffix = tag[len(prefix):] + chunks = [] + start = end = 0 + while end < len(suffix): + ch = suffix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.~*\'()[]' \ + or (ch == u'!' and handle != u'!'): + end += 1 + else: + if start < end: + chunks.append(suffix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(suffix[start:end]) + suffix_text = u''.join(chunks) + if handle: + return u'%s%s' % (handle, suffix_text) + else: + return u'!<%s>' % suffix_text + + def prepare_anchor(self, anchor): + if not anchor: + raise EmitterError("anchor must not be empty") + for ch in anchor: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the anchor: %r" + % (ch.encode('utf-8'), anchor.encode('utf-8'))) + return anchor + + def analyze_scalar(self, scalar): + + # Empty scalar is a special case. + if not scalar: + return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, + allow_flow_plain=False, allow_block_plain=True, + allow_single_quoted=True, allow_double_quoted=True, + allow_block=False) + + # Indicators and special characters. + block_indicators = False + flow_indicators = False + line_breaks = False + special_characters = False + + # Important whitespace combinations. + leading_space = False + leading_break = False + trailing_space = False + trailing_break = False + break_space = False + space_break = False + + # Check document indicators. + if scalar.startswith(u'---') or scalar.startswith(u'...'): + block_indicators = True + flow_indicators = True + + # First character or preceded by a whitespace. + preceeded_by_whitespace = True + + # Last character or followed by a whitespace. + followed_by_whitespace = (len(scalar) == 1 or + scalar[1] in u'\0 \t\r\n\x85\u2028\u2029') + + # The previous character is a space. + previous_space = False + + # The previous character is a break. + previous_break = False + + index = 0 + while index < len(scalar): + ch = scalar[index] + + # Check for indicators. + if index == 0: + # Leading indicators are special characters. + if ch in u'#,[]{}&*!|>\'\"%@`': + flow_indicators = True + block_indicators = True + if ch in u'?:': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'-' and followed_by_whitespace: + flow_indicators = True + block_indicators = True + else: + # Some indicators cannot appear within a scalar as well. + if ch in u',?[]{}': + flow_indicators = True + if ch == u':': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'#' and preceeded_by_whitespace: + flow_indicators = True + block_indicators = True + + # Check for line breaks, special, and unicode characters. + if ch in u'\n\x85\u2028\u2029': + line_breaks = True + if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'): + if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF': + unicode_characters = True + if not self.allow_unicode: + special_characters = True + else: + special_characters = True + + # Detect important whitespace combinations. + if ch == u' ': + if index == 0: + leading_space = True + if index == len(scalar)-1: + trailing_space = True + if previous_break: + break_space = True + previous_space = True + previous_break = False + elif ch in u'\n\x85\u2028\u2029': + if index == 0: + leading_break = True + if index == len(scalar)-1: + trailing_break = True + if previous_space: + space_break = True + previous_space = False + previous_break = True + else: + previous_space = False + previous_break = False + + # Prepare for the next character. + index += 1 + preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029') + followed_by_whitespace = (index+1 >= len(scalar) or + scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029') + + # Let's decide what styles are allowed. + allow_flow_plain = True + allow_block_plain = True + allow_single_quoted = True + allow_double_quoted = True + allow_block = True + + # Leading and trailing whitespaces are bad for plain scalars. + if (leading_space or leading_break + or trailing_space or trailing_break): + allow_flow_plain = allow_block_plain = False + + # We do not permit trailing spaces for block scalars. + if trailing_space: + allow_block = False + + # Spaces at the beginning of a new line are only acceptable for block + # scalars. + if break_space: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + + # Spaces followed by breaks, as well as special character are only + # allowed for double quoted scalars. + if space_break or special_characters: + allow_flow_plain = allow_block_plain = \ + allow_single_quoted = allow_block = False + + # Although the plain scalar writer supports breaks, we never emit + # multiline plain scalars. + if line_breaks: + allow_flow_plain = allow_block_plain = False + + # Flow indicators are forbidden for flow plain scalars. + if flow_indicators: + allow_flow_plain = False + + # Block indicators are forbidden for block plain scalars. + if block_indicators: + allow_block_plain = False + + return ScalarAnalysis(scalar=scalar, + empty=False, multiline=line_breaks, + allow_flow_plain=allow_flow_plain, + allow_block_plain=allow_block_plain, + allow_single_quoted=allow_single_quoted, + allow_double_quoted=allow_double_quoted, + allow_block=allow_block) + + # Writers. + + def flush_stream(self): + if hasattr(self.stream, 'flush'): + self.stream.flush() + + def write_stream_start(self): + # Write BOM if needed. + if self.encoding and self.encoding.startswith('utf-16'): + self.stream.write(u'\uFEFF'.encode(self.encoding)) + + def write_stream_end(self): + self.flush_stream() + + def write_indicator(self, indicator, need_whitespace, + whitespace=False, indention=False): + if self.whitespace or not need_whitespace: + data = indicator + else: + data = u' '+indicator + self.whitespace = whitespace + self.indention = self.indention and indention + self.column += len(data) + self.open_ended = False + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_indent(self): + indent = self.indent or 0 + if not self.indention or self.column > indent \ + or (self.column == indent and not self.whitespace): + self.write_line_break() + if self.column < indent: + self.whitespace = True + data = u' '*(indent-self.column) + self.column = indent + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_line_break(self, data=None): + if data is None: + data = self.best_line_break + self.whitespace = True + self.indention = True + self.line += 1 + self.column = 0 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_version_directive(self, version_text): + data = u'%%YAML %s' % version_text + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + def write_tag_directive(self, handle_text, prefix_text): + data = u'%%TAG %s %s' % (handle_text, prefix_text) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + # Scalar streams. + + def write_single_quoted(self, text, split=True): + self.write_indicator(u'\'', True) + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch is None or ch != u' ': + if start+1 == end and self.column > self.best_width and split \ + and start != 0 and end != len(text): + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'': + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch == u'\'': + data = u'\'\'' + self.column += 2 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + self.write_indicator(u'\'', False) + + ESCAPE_REPLACEMENTS = { + u'\0': u'0', + u'\x07': u'a', + u'\x08': u'b', + u'\x09': u't', + u'\x0A': u'n', + u'\x0B': u'v', + u'\x0C': u'f', + u'\x0D': u'r', + u'\x1B': u'e', + u'\"': u'\"', + u'\\': u'\\', + u'\x85': u'N', + u'\xA0': u'_', + u'\u2028': u'L', + u'\u2029': u'P', + } + + def write_double_quoted(self, text, split=True): + self.write_indicator(u'"', True) + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \ + or not (u'\x20' <= ch <= u'\x7E' + or (self.allow_unicode + and (u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD'))): + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + if ch in self.ESCAPE_REPLACEMENTS: + data = u'\\'+self.ESCAPE_REPLACEMENTS[ch] + elif ch <= u'\xFF': + data = u'\\x%02X' % ord(ch) + elif ch <= u'\uFFFF': + data = u'\\u%04X' % ord(ch) + else: + data = u'\\U%08X' % ord(ch) + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end+1 + if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \ + and self.column+(end-start) > self.best_width and split: + data = text[start:end]+u'\\' + if start < end: + start = end + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_indent() + self.whitespace = False + self.indention = False + if text[start] == u' ': + data = u'\\' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + end += 1 + self.write_indicator(u'"', False) + + def determine_block_hints(self, text): + hints = u'' + if text: + if text[0] in u' \n\x85\u2028\u2029': + hints += unicode(self.best_indent) + if text[-1] not in u'\n\x85\u2028\u2029': + hints += u'-' + elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029': + hints += u'+' + return hints + + def write_folded(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'>'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + leading_space = True + spaces = False + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if not leading_space and ch is not None and ch != u' ' \ + and text[start] == u'\n': + self.write_line_break() + leading_space = (ch == u' ') + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + elif spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width: + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + spaces = (ch == u' ') + end += 1 + + def write_literal(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'|'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + else: + if ch is None or ch in u'\n\x85\u2028\u2029': + data = text[start:end] + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + + def write_plain(self, text, split=True): + if self.root_context: + self.open_ended = True + if not text: + return + if not self.whitespace: + data = u' ' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.whitespace = False + self.indention = False + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width and split: + self.write_indent() + self.whitespace = False + self.indention = False + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + self.whitespace = False + self.indention = False + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/error.py b/collectors/python.d.plugin/python_modules/pyyaml2/error.py new file mode 100644 index 000000000..5466be721 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml2/error.py @@ -0,0 +1,76 @@ +# SPDX-License-Identifier: MIT + +__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] + +class Mark(object): + + def __init__(self, name, index, line, column, buffer, pointer): + self.name = name + self.index = index + self.line = line + self.column = column + self.buffer = buffer + self.pointer = pointer + + def get_snippet(self, indent=4, max_length=75): + if self.buffer is None: + return None + head = '' + start = self.pointer + while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029': + start -= 1 + if self.pointer-start > max_length/2-1: + head = ' ... ' + start += 5 + break + tail = '' + end = self.pointer + while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029': + end += 1 + if end-self.pointer > max_length/2-1: + tail = ' ... ' + end -= 5 + break + snippet = self.buffer[start:end].encode('utf-8') + return ' '*indent + head + snippet + tail + '\n' \ + + ' '*(indent+self.pointer-start+len(head)) + '^' + + def __str__(self): + snippet = self.get_snippet() + where = " in \"%s\", line %d, column %d" \ + % (self.name, self.line+1, self.column+1) + if snippet is not None: + where += ":\n"+snippet + return where + +class YAMLError(Exception): + pass + +class MarkedYAMLError(YAMLError): + + def __init__(self, context=None, context_mark=None, + problem=None, problem_mark=None, note=None): + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + + def __str__(self): + lines = [] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None \ + and (self.problem is None or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None: + lines.append(self.note) + return '\n'.join(lines) + diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/events.py b/collectors/python.d.plugin/python_modules/pyyaml2/events.py new file mode 100644 index 000000000..283452add --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml2/events.py @@ -0,0 +1,87 @@ +# SPDX-License-Identifier: MIT + +# Abstract classes. + +class Event(object): + def __init__(self, start_mark=None, end_mark=None): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] + if hasattr(self, key)] + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +class NodeEvent(Event): + def __init__(self, anchor, start_mark=None, end_mark=None): + self.anchor = anchor + self.start_mark = start_mark + self.end_mark = end_mark + +class CollectionStartEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, + flow_style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class CollectionEndEvent(Event): + pass + +# Implementations. + +class StreamStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndEvent(Event): + pass + +class DocumentStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None, version=None, tags=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + self.version = version + self.tags = tags + +class DocumentEndEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + +class AliasEvent(NodeEvent): + pass + +class ScalarEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, value, + start_mark=None, end_mark=None, style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class SequenceStartEvent(CollectionStartEvent): + pass + +class SequenceEndEvent(CollectionEndEvent): + pass + +class MappingStartEvent(CollectionStartEvent): + pass + +class MappingEndEvent(CollectionEndEvent): + pass + diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/loader.py b/collectors/python.d.plugin/python_modules/pyyaml2/loader.py new file mode 100644 index 000000000..1c195531f --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml2/loader.py @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: MIT + +__all__ = ['BaseLoader', 'SafeLoader', 'Loader'] + +from reader import * +from scanner import * +from parser import * +from composer import * +from constructor import * +from resolver import * + +class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + Constructor.__init__(self) + Resolver.__init__(self) + diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py b/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py new file mode 100644 index 000000000..ed2a1b43e --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py @@ -0,0 +1,50 @@ +# SPDX-License-Identifier: MIT + +class Node(object): + def __init__(self, tag, value, start_mark, end_mark): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + value = self.value + #if isinstance(value, list): + # if len(value) == 0: + # value = '<empty>' + # elif len(value) == 1: + # value = '<1 item>' + # else: + # value = '<%d items>' % len(value) + #else: + # if len(value) > 75: + # value = repr(value[:70]+u' ... ') + # else: + # value = repr(value) + value = repr(value) + return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) + +class ScalarNode(Node): + id = 'scalar' + def __init__(self, tag, value, + start_mark=None, end_mark=None, style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class CollectionNode(Node): + def __init__(self, tag, value, + start_mark=None, end_mark=None, flow_style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class SequenceNode(CollectionNode): + id = 'sequence' + +class MappingNode(CollectionNode): + id = 'mapping' + diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/parser.py b/collectors/python.d.plugin/python_modules/pyyaml2/parser.py new file mode 100644 index 000000000..97ba08337 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml2/parser.py @@ -0,0 +1,590 @@ +# SPDX-License-Identifier: MIT + +# The following YAML grammar is LL(1) and is parsed by a recursive descent +# parser. +# +# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +# implicit_document ::= block_node DOCUMENT-END* +# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +# block_node_or_indentless_sequence ::= +# ALIAS +# | properties (block_content | indentless_block_sequence)? +# | block_content +# | indentless_block_sequence +# block_node ::= ALIAS +# | properties block_content? +# | block_content +# flow_node ::= ALIAS +# | properties flow_content? +# | flow_content +# properties ::= TAG ANCHOR? | ANCHOR TAG? +# block_content ::= block_collection | flow_collection | SCALAR +# flow_content ::= flow_collection | SCALAR +# block_collection ::= block_sequence | block_mapping +# flow_collection ::= flow_sequence | flow_mapping +# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +# block_mapping ::= BLOCK-MAPPING_START +# ((KEY block_node_or_indentless_sequence?)? +# (VALUE block_node_or_indentless_sequence?)?)* +# BLOCK-END +# flow_sequence ::= FLOW-SEQUENCE-START +# (flow_sequence_entry FLOW-ENTRY)* +# flow_sequence_entry? +# FLOW-SEQUENCE-END +# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# flow_mapping ::= FLOW-MAPPING-START +# (flow_mapping_entry FLOW-ENTRY)* +# flow_mapping_entry? +# FLOW-MAPPING-END +# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# +# FIRST sets: +# +# stream: { STREAM-START } +# explicit_document: { DIRECTIVE DOCUMENT-START } +# implicit_document: FIRST(block_node) +# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_sequence: { BLOCK-SEQUENCE-START } +# block_mapping: { BLOCK-MAPPING-START } +# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } +# indentless_sequence: { ENTRY } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_sequence: { FLOW-SEQUENCE-START } +# flow_mapping: { FLOW-MAPPING-START } +# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } +# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } + +__all__ = ['Parser', 'ParserError'] + +from error import MarkedYAMLError +from tokens import * +from events import * +from scanner import * + +class ParserError(MarkedYAMLError): + pass + +class Parser(object): + # Since writing a recursive-descendant parser is a straightforward task, we + # do not give many comments here. + + DEFAULT_TAGS = { + u'!': u'!', + u'!!': u'tag:yaml.org,2002:', + } + + def __init__(self): + self.current_event = None + self.yaml_version = None + self.tag_handles = {} + self.states = [] + self.marks = [] + self.state = self.parse_stream_start + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def check_event(self, *choices): + # Check the type of the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + if self.current_event is not None: + if not choices: + return True + for choice in choices: + if isinstance(self.current_event, choice): + return True + return False + + def peek_event(self): + # Get the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + return self.current_event + + def get_event(self): + # Get the next event and proceed further. + if self.current_event is None: + if self.state: + self.current_event = self.state() + value = self.current_event + self.current_event = None + return value + + # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END + # implicit_document ::= block_node DOCUMENT-END* + # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* + + def parse_stream_start(self): + + # Parse the stream start. + token = self.get_token() + event = StreamStartEvent(token.start_mark, token.end_mark, + encoding=token.encoding) + + # Prepare the next state. + self.state = self.parse_implicit_document_start + + return event + + def parse_implicit_document_start(self): + + # Parse an implicit document. + if not self.check_token(DirectiveToken, DocumentStartToken, + StreamEndToken): + self.tag_handles = self.DEFAULT_TAGS + token = self.peek_token() + start_mark = end_mark = token.start_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=False) + + # Prepare the next state. + self.states.append(self.parse_document_end) + self.state = self.parse_block_node + + return event + + else: + return self.parse_document_start() + + def parse_document_start(self): + + # Parse any extra document end indicators. + while self.check_token(DocumentEndToken): + self.get_token() + + # Parse an explicit document. + if not self.check_token(StreamEndToken): + token = self.peek_token() + start_mark = token.start_mark + version, tags = self.process_directives() + if not self.check_token(DocumentStartToken): + raise ParserError(None, None, + "expected '<document start>', but found %r" + % self.peek_token().id, + self.peek_token().start_mark) + token = self.get_token() + end_mark = token.end_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=True, version=version, tags=tags) + self.states.append(self.parse_document_end) + self.state = self.parse_document_content + else: + # Parse the end of the stream. + token = self.get_token() + event = StreamEndEvent(token.start_mark, token.end_mark) + assert not self.states + assert not self.marks + self.state = None + return event + + def parse_document_end(self): + + # Parse the document end. + token = self.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + if self.check_token(DocumentEndToken): + token = self.get_token() + end_mark = token.end_mark + explicit = True + event = DocumentEndEvent(start_mark, end_mark, + explicit=explicit) + + # Prepare the next state. + self.state = self.parse_document_start + + return event + + def parse_document_content(self): + if self.check_token(DirectiveToken, + DocumentStartToken, DocumentEndToken, StreamEndToken): + event = self.process_empty_scalar(self.peek_token().start_mark) + self.state = self.states.pop() + return event + else: + return self.parse_block_node() + + def process_directives(self): + self.yaml_version = None + self.tag_handles = {} + while self.check_token(DirectiveToken): + token = self.get_token() + if token.name == u'YAML': + if self.yaml_version is not None: + raise ParserError(None, None, + "found duplicate YAML directive", token.start_mark) + major, minor = token.value + if major != 1: + raise ParserError(None, None, + "found incompatible YAML document (version 1.* is required)", + token.start_mark) + self.yaml_version = token.value + elif token.name == u'TAG': + handle, prefix = token.value + if handle in self.tag_handles: + raise ParserError(None, None, + "duplicate tag handle %r" % handle.encode('utf-8'), + token.start_mark) + self.tag_handles[handle] = prefix + if self.tag_handles: + value = self.yaml_version, self.tag_handles.copy() + else: + value = self.yaml_version, None + for key in self.DEFAULT_TAGS: + if key not in self.tag_handles: + self.tag_handles[key] = self.DEFAULT_TAGS[key] + return value + + # block_node_or_indentless_sequence ::= ALIAS + # | properties (block_content | indentless_block_sequence)? + # | block_content + # | indentless_block_sequence + # block_node ::= ALIAS + # | properties block_content? + # | block_content + # flow_node ::= ALIAS + # | properties flow_content? + # | flow_content + # properties ::= TAG ANCHOR? | ANCHOR TAG? + # block_content ::= block_collection | flow_collection | SCALAR + # flow_content ::= flow_collection | SCALAR + # block_collection ::= block_sequence | block_mapping + # flow_collection ::= flow_sequence | flow_mapping + + def parse_block_node(self): + return self.parse_node(block=True) + + def parse_flow_node(self): + return self.parse_node() + + def parse_block_node_or_indentless_sequence(self): + return self.parse_node(block=True, indentless_sequence=True) + + def parse_node(self, block=False, indentless_sequence=False): + if self.check_token(AliasToken): + token = self.get_token() + event = AliasEvent(token.value, token.start_mark, token.end_mark) + self.state = self.states.pop() + else: + anchor = None + tag = None + start_mark = end_mark = tag_mark = None + if self.check_token(AnchorToken): + token = self.get_token() + start_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if self.check_token(TagToken): + token = self.get_token() + tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + elif self.check_token(TagToken): + token = self.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + if self.check_token(AnchorToken): + token = self.get_token() + end_mark = token.end_mark + anchor = token.value + if tag is not None: + handle, suffix = tag + if handle is not None: + if handle not in self.tag_handles: + raise ParserError("while parsing a node", start_mark, + "found undefined tag handle %r" % handle.encode('utf-8'), + tag_mark) + tag = self.tag_handles[handle]+suffix + else: + tag = suffix + #if tag == u'!': + # raise ParserError("while parsing a node", start_mark, + # "found non-specific tag '!'", tag_mark, + # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") + if start_mark is None: + start_mark = end_mark = self.peek_token().start_mark + event = None + implicit = (tag is None or tag == u'!') + if indentless_sequence and self.check_token(BlockEntryToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark) + self.state = self.parse_indentless_sequence_entry + else: + if self.check_token(ScalarToken): + token = self.get_token() + end_mark = token.end_mark + if (token.plain and tag is None) or tag == u'!': + implicit = (True, False) + elif tag is None: + implicit = (False, True) + else: + implicit = (False, False) + event = ScalarEvent(anchor, tag, implicit, token.value, + start_mark, end_mark, style=token.style) + self.state = self.states.pop() + elif self.check_token(FlowSequenceStartToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_sequence_first_entry + elif self.check_token(FlowMappingStartToken): + end_mark = self.peek_token().end_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_mapping_first_key + elif block and self.check_token(BlockSequenceStartToken): + end_mark = self.peek_token().start_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_sequence_first_entry + elif block and self.check_token(BlockMappingStartToken): + end_mark = self.peek_token().start_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_mapping_first_key + elif anchor is not None or tag is not None: + # Empty scalars are allowed even if a tag or an anchor is + # specified. + event = ScalarEvent(anchor, tag, (implicit, False), u'', + start_mark, end_mark) + self.state = self.states.pop() + else: + if block: + node = 'block' + else: + node = 'flow' + token = self.peek_token() + raise ParserError("while parsing a %s node" % node, start_mark, + "expected the node content, but found %r" % token.id, + token.start_mark) + return event + + # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END + + def parse_block_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_sequence_entry() + + def parse_block_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, BlockEndToken): + self.states.append(self.parse_block_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_block_sequence_entry + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block collection", self.marks[-1], + "expected <block end>, but found %r" % token.id, token.start_mark) + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ + + def parse_indentless_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, + KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_indentless_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_indentless_sequence_entry + return self.process_empty_scalar(token.end_mark) + token = self.peek_token() + event = SequenceEndEvent(token.start_mark, token.start_mark) + self.state = self.states.pop() + return event + + # block_mapping ::= BLOCK-MAPPING_START + # ((KEY block_node_or_indentless_sequence?)? + # (VALUE block_node_or_indentless_sequence?)?)* + # BLOCK-END + + def parse_block_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_mapping_key() + + def parse_block_mapping_key(self): + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_value) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block mapping", self.marks[-1], + "expected <block end>, but found %r" % token.id, token.start_mark) + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_block_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_key) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_block_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + # flow_sequence ::= FLOW-SEQUENCE-START + # (flow_sequence_entry FLOW-ENTRY)* + # flow_sequence_entry? + # FLOW-SEQUENCE-END + # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + # + # Note that while production rules for both flow_sequence_entry and + # flow_mapping_entry are equal, their interpretations are different. + # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` + # generate an inline mapping (set syntax). + + def parse_flow_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_sequence_entry(first=True) + + def parse_flow_sequence_entry(self, first=False): + if not self.check_token(FlowSequenceEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow sequence", self.marks[-1], + "expected ',' or ']', but got %r" % token.id, token.start_mark) + + if self.check_token(KeyToken): + token = self.peek_token() + event = MappingStartEvent(None, None, True, + token.start_mark, token.end_mark, + flow_style=True) + self.state = self.parse_flow_sequence_entry_mapping_key + return event + elif not self.check_token(FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry) + return self.parse_flow_node() + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_sequence_entry_mapping_key(self): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_value + return self.process_empty_scalar(token.end_mark) + + def parse_flow_sequence_entry_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_end) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_end + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_sequence_entry_mapping_end + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_sequence_entry_mapping_end(self): + self.state = self.parse_flow_sequence_entry + token = self.peek_token() + return MappingEndEvent(token.start_mark, token.start_mark) + + # flow_mapping ::= FLOW-MAPPING-START + # (flow_mapping_entry FLOW-ENTRY)* + # flow_mapping_entry? + # FLOW-MAPPING-END + # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + + def parse_flow_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_mapping_key(first=True) + + def parse_flow_mapping_key(self, first=False): + if not self.check_token(FlowMappingEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow mapping", self.marks[-1], + "expected ',' or '}', but got %r" % token.id, token.start_mark) + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(token.end_mark) + elif not self.check_token(FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_empty_value) + return self.parse_flow_node() + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_key) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_mapping_empty_value(self): + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(self.peek_token().start_mark) + + def process_empty_scalar(self, mark): + return ScalarEvent(None, None, (True, False), u'', mark, mark) + diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/reader.py b/collectors/python.d.plugin/python_modules/pyyaml2/reader.py new file mode 100644 index 000000000..8d422954e --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml2/reader.py @@ -0,0 +1,191 @@ +# SPDX-License-Identifier: MIT +# This module contains abstractions for the input stream. You don't have to +# looks further, there are no pretty code. +# +# We define two classes here. +# +# Mark(source, line, column) +# It's just a record and its only use is producing nice error messages. +# Parser does not use it for any other purposes. +# +# Reader(source, data) +# Reader determines the encoding of `data` and converts it to unicode. +# Reader provides the following methods and attributes: +# reader.peek(length=1) - return the next `length` characters +# reader.forward(length=1) - move the current position to `length` characters. +# reader.index - the number of the current character. +# reader.line, stream.column - the line and the column of the current character. + +__all__ = ['Reader', 'ReaderError'] + +from error import YAMLError, Mark + +import codecs, re + +class ReaderError(YAMLError): + + def __init__(self, name, position, character, encoding, reason): + self.name = name + self.character = character + self.position = position + self.encoding = encoding + self.reason = reason + + def __str__(self): + if isinstance(self.character, str): + return "'%s' codec can't decode byte #x%02x: %s\n" \ + " in \"%s\", position %d" \ + % (self.encoding, ord(self.character), self.reason, + self.name, self.position) + else: + return "unacceptable character #x%04x: %s\n" \ + " in \"%s\", position %d" \ + % (self.character, self.reason, + self.name, self.position) + +class Reader(object): + # Reader: + # - determines the data encoding and converts it to unicode, + # - checks if characters are in allowed range, + # - adds '\0' to the end. + + # Reader accepts + # - a `str` object, + # - a `unicode` object, + # - a file-like object with its `read` method returning `str`, + # - a file-like object with its `read` method returning `unicode`. + + # Yeah, it's ugly and slow. + + def __init__(self, stream): + self.name = None + self.stream = None + self.stream_pointer = 0 + self.eof = True + self.buffer = u'' + self.pointer = 0 + self.raw_buffer = None + self.raw_decode = None + self.encoding = None + self.index = 0 + self.line = 0 + self.column = 0 + if isinstance(stream, unicode): + self.name = "<unicode string>" + self.check_printable(stream) + self.buffer = stream+u'\0' + elif isinstance(stream, str): + self.name = "<string>" + self.raw_buffer = stream + self.determine_encoding() + else: + self.stream = stream + self.name = getattr(stream, 'name', "<file>") + self.eof = False + self.raw_buffer = '' + self.determine_encoding() + + def peek(self, index=0): + try: + return self.buffer[self.pointer+index] + except IndexError: + self.update(index+1) + return self.buffer[self.pointer+index] + + def prefix(self, length=1): + if self.pointer+length >= len(self.buffer): + self.update(length) + return self.buffer[self.pointer:self.pointer+length] + + def forward(self, length=1): + if self.pointer+length+1 >= len(self.buffer): + self.update(length+1) + while length: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch in u'\n\x85\u2028\u2029' \ + or (ch == u'\r' and self.buffer[self.pointer] != u'\n'): + self.line += 1 + self.column = 0 + elif ch != u'\uFEFF': + self.column += 1 + length -= 1 + + def get_mark(self): + if self.stream is None: + return Mark(self.name, self.index, self.line, self.column, + self.buffer, self.pointer) + else: + return Mark(self.name, self.index, self.line, self.column, + None, None) + + def determine_encoding(self): + while not self.eof and len(self.raw_buffer) < 2: + self.update_raw() + if not isinstance(self.raw_buffer, unicode): + if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): + self.raw_decode = codecs.utf_16_le_decode + self.encoding = 'utf-16-le' + elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): + self.raw_decode = codecs.utf_16_be_decode + self.encoding = 'utf-16-be' + else: + self.raw_decode = codecs.utf_8_decode + self.encoding = 'utf-8' + self.update(1) + + NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]') + def check_printable(self, data): + match = self.NON_PRINTABLE.search(data) + if match: + character = match.group() + position = self.index+(len(self.buffer)-self.pointer)+match.start() + raise ReaderError(self.name, position, ord(character), + 'unicode', "special characters are not allowed") + + def update(self, length): + if self.raw_buffer is None: + return + self.buffer = self.buffer[self.pointer:] + self.pointer = 0 + while len(self.buffer) < length: + if not self.eof: + self.update_raw() + if self.raw_decode is not None: + try: + data, converted = self.raw_decode(self.raw_buffer, + 'strict', self.eof) + except UnicodeDecodeError, exc: + character = exc.object[exc.start] + if self.stream is not None: + position = self.stream_pointer-len(self.raw_buffer)+exc.start + else: + position = exc.start + raise ReaderError(self.name, position, character, + exc.encoding, exc.reason) + else: + data = self.raw_buffer + converted = len(data) + self.check_printable(data) + self.buffer += data + self.raw_buffer = self.raw_buffer[converted:] + if self.eof: + self.buffer += u'\0' + self.raw_buffer = None + break + + def update_raw(self, size=1024): + data = self.stream.read(size) + if data: + self.raw_buffer += data + self.stream_pointer += len(data) + else: + self.eof = True + +#try: +# import psyco +# psyco.bind(Reader) +#except ImportError: +# pass + diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/representer.py b/collectors/python.d.plugin/python_modules/pyyaml2/representer.py new file mode 100644 index 000000000..0a1404eca --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml2/representer.py @@ -0,0 +1,485 @@ +# SPDX-License-Identifier: MIT + +__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', + 'RepresenterError'] + +from error import * +from nodes import * + +import datetime + +import sys, copy_reg, types + +class RepresenterError(YAMLError): + pass + +class BaseRepresenter(object): + + yaml_representers = {} + yaml_multi_representers = {} + + def __init__(self, default_style=None, default_flow_style=None): + self.default_style = default_style + self.default_flow_style = default_flow_style + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent(self, data): + node = self.represent_data(data) + self.serialize(node) + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def get_classobj_bases(self, cls): + bases = [cls] + for base in cls.__bases__: + bases.extend(self.get_classobj_bases(base)) + return bases + + def represent_data(self, data): + if self.ignore_aliases(data): + self.alias_key = None + else: + self.alias_key = id(data) + if self.alias_key is not None: + if self.alias_key in self.represented_objects: + node = self.represented_objects[self.alias_key] + #if node is None: + # raise RepresenterError("recursive objects are not allowed: %r" % data) + return node + #self.represented_objects[alias_key] = None + self.object_keeper.append(data) + data_types = type(data).__mro__ + if type(data) is types.InstanceType: + data_types = self.get_classobj_bases(data.__class__)+list(data_types) + if data_types[0] in self.yaml_representers: + node = self.yaml_representers[data_types[0]](self, data) + else: + for data_type in data_types: + if data_type in self.yaml_multi_representers: + node = self.yaml_multi_representers[data_type](self, data) + break + else: + if None in self.yaml_multi_representers: + node = self.yaml_multi_representers[None](self, data) + elif None in self.yaml_representers: + node = self.yaml_representers[None](self, data) + else: + node = ScalarNode(None, unicode(data)) + #if alias_key is not None: + # self.represented_objects[alias_key] = node + return node + + def add_representer(cls, data_type, representer): + if not 'yaml_representers' in cls.__dict__: + cls.yaml_representers = cls.yaml_representers.copy() + cls.yaml_representers[data_type] = representer + add_representer = classmethod(add_representer) + + def add_multi_representer(cls, data_type, representer): + if not 'yaml_multi_representers' in cls.__dict__: + cls.yaml_multi_representers = cls.yaml_multi_representers.copy() + cls.yaml_multi_representers[data_type] = representer + add_multi_representer = classmethod(add_multi_representer) + + def represent_scalar(self, tag, value, style=None): + if style is None: + style = self.default_style + node = ScalarNode(tag, value, style=style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + + def represent_sequence(self, tag, sequence, flow_style=None): + value = [] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item in sequence: + node_item = self.represent_data(item) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_mapping(self, tag, mapping, flow_style=None): + value = [] + node = MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = mapping.items() + mapping.sort() + for item_key, item_value in mapping: + node_key = self.represent_data(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def ignore_aliases(self, data): + return False + +class SafeRepresenter(BaseRepresenter): + + def ignore_aliases(self, data): + if data in [None, ()]: + return True + if isinstance(data, (str, unicode, bool, int, float)): + return True + + def represent_none(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:null', + u'null') + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:str', data) + + def represent_bool(self, data): + if data: + value = u'true' + else: + value = u'false' + return self.represent_scalar(u'tag:yaml.org,2002:bool', value) + + def represent_int(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + def represent_long(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + inf_value = 1e300 + while repr(inf_value) != repr(inf_value*inf_value): + inf_value *= inf_value + + def represent_float(self, data): + if data != data or (data == 0.0 and data == 1.0): + value = u'.nan' + elif data == self.inf_value: + value = u'.inf' + elif data == -self.inf_value: + value = u'-.inf' + else: + value = unicode(repr(data)).lower() + # Note that in some cases `repr(data)` represents a float number + # without the decimal parts. For instance: + # >>> repr(1e17) + # '1e17' + # Unfortunately, this is not a valid float representation according + # to the definition of the `!!float` tag. We fix this by adding + # '.0' before the 'e' symbol. + if u'.' not in value and u'e' in value: + value = value.replace(u'e', u'.0e', 1) + return self.represent_scalar(u'tag:yaml.org,2002:float', value) + + def represent_list(self, data): + #pairs = (len(data) > 0 and isinstance(data, list)) + #if pairs: + # for item in data: + # if not isinstance(item, tuple) or len(item) != 2: + # pairs = False + # break + #if not pairs: + return self.represent_sequence(u'tag:yaml.org,2002:seq', data) + #value = [] + #for item_key, item_value in data: + # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', + # [(item_key, item_value)])) + #return SequenceNode(u'tag:yaml.org,2002:pairs', value) + + def represent_dict(self, data): + return self.represent_mapping(u'tag:yaml.org,2002:map', data) + + def represent_set(self, data): + value = {} + for key in data: + value[key] = None + return self.represent_mapping(u'tag:yaml.org,2002:set', value) + + def represent_date(self, data): + value = unicode(data.isoformat()) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_datetime(self, data): + value = unicode(data.isoformat(' ')) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + return self.represent_mapping(tag, state, flow_style=flow_style) + + def represent_undefined(self, data): + raise RepresenterError("cannot represent an object: %s" % data) + +SafeRepresenter.add_representer(type(None), + SafeRepresenter.represent_none) + +SafeRepresenter.add_representer(str, + SafeRepresenter.represent_str) + +SafeRepresenter.add_representer(unicode, + SafeRepresenter.represent_unicode) + +SafeRepresenter.add_representer(bool, + SafeRepresenter.represent_bool) + +SafeRepresenter.add_representer(int, + SafeRepresenter.represent_int) + +SafeRepresenter.add_representer(long, + SafeRepresenter.represent_long) + +SafeRepresenter.add_representer(float, + SafeRepresenter.represent_float) + +SafeRepresenter.add_representer(list, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(tuple, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(dict, + SafeRepresenter.represent_dict) + +SafeRepresenter.add_representer(set, + SafeRepresenter.represent_set) + +SafeRepresenter.add_representer(datetime.date, + SafeRepresenter.represent_date) + +SafeRepresenter.add_representer(datetime.datetime, + SafeRepresenter.represent_datetime) + +SafeRepresenter.add_representer(None, + SafeRepresenter.represent_undefined) + +class Representer(SafeRepresenter): + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:python/str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + tag = None + try: + data.encode('ascii') + tag = u'tag:yaml.org,2002:python/unicode' + except UnicodeEncodeError: + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data) + + def represent_long(self, data): + tag = u'tag:yaml.org,2002:int' + if int(data) is not data: + tag = u'tag:yaml.org,2002:python/long' + return self.represent_scalar(tag, unicode(data)) + + def represent_complex(self, data): + if data.imag == 0.0: + data = u'%r' % data.real + elif data.real == 0.0: + data = u'%rj' % data.imag + elif data.imag > 0: + data = u'%r+%rj' % (data.real, data.imag) + else: + data = u'%r%rj' % (data.real, data.imag) + return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data) + + def represent_tuple(self, data): + return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data) + + def represent_name(self, data): + name = u'%s.%s' % (data.__module__, data.__name__) + return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'') + + def represent_module(self, data): + return self.represent_scalar( + u'tag:yaml.org,2002:python/module:'+data.__name__, u'') + + def represent_instance(self, data): + # For instances of classic classes, we use __getinitargs__ and + # __getstate__ to serialize the data. + + # If data.__getinitargs__ exists, the object must be reconstructed by + # calling cls(**args), where args is a tuple returned by + # __getinitargs__. Otherwise, the cls.__init__ method should never be + # called and the class instance is created by instantiating a trivial + # class and assigning to the instance's __class__ variable. + + # If data.__getstate__ exists, it returns the state of the object. + # Otherwise, the state of the object is data.__dict__. + + # We produce either a !!python/object or !!python/object/new node. + # If data.__getinitargs__ does not exist and state is a dictionary, we + # produce a !!python/object node . Otherwise we produce a + # !!python/object/new node. + + cls = data.__class__ + class_name = u'%s.%s' % (cls.__module__, cls.__name__) + args = None + state = None + if hasattr(data, '__getinitargs__'): + args = list(data.__getinitargs__()) + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__ + if args is None and isinstance(state, dict): + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+class_name, state) + if isinstance(state, dict) and not state: + return self.represent_sequence( + u'tag:yaml.org,2002:python/object/new:'+class_name, args) + value = {} + if args: + value['args'] = args + value['state'] = state + return self.represent_mapping( + u'tag:yaml.org,2002:python/object/new:'+class_name, value) + + def represent_object(self, data): + # We use __reduce__ API to save the data. data.__reduce__ returns + # a tuple of length 2-5: + # (function, args, state, listitems, dictitems) + + # For reconstructing, we calls function(*args), then set its state, + # listitems, and dictitems if they are not None. + + # A special case is when function.__name__ == '__newobj__'. In this + # case we create the object with args[0].__new__(*args). + + # Another special case is when __reduce__ returns a string - we don't + # support it. + + # We produce a !!python/object, !!python/object/new or + # !!python/object/apply node. + + cls = type(data) + if cls in copy_reg.dispatch_table: + reduce = copy_reg.dispatch_table[cls](data) + elif hasattr(data, '__reduce_ex__'): + reduce = data.__reduce_ex__(2) + elif hasattr(data, '__reduce__'): + reduce = data.__reduce__() + else: + raise RepresenterError("cannot represent object: %r" % data) + reduce = (list(reduce)+[None]*5)[:5] + function, args, state, listitems, dictitems = reduce + args = list(args) + if state is None: + state = {} + if listitems is not None: + listitems = list(listitems) + if dictitems is not None: + dictitems = dict(dictitems) + if function.__name__ == '__newobj__': + function = args[0] + args = args[1:] + tag = u'tag:yaml.org,2002:python/object/new:' + newobj = True + else: + tag = u'tag:yaml.org,2002:python/object/apply:' + newobj = False + function_name = u'%s.%s' % (function.__module__, function.__name__) + if not args and not listitems and not dictitems \ + and isinstance(state, dict) and newobj: + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+function_name, state) + if not listitems and not dictitems \ + and isinstance(state, dict) and not state: + return self.represent_sequence(tag+function_name, args) + value = {} + if args: + value['args'] = args + if state or not isinstance(state, dict): + value['state'] = state + if listitems: + value['listitems'] = listitems + if dictitems: + value['dictitems'] = dictitems + return self.represent_mapping(tag+function_name, value) + +Representer.add_representer(str, + Representer.represent_str) + +Representer.add_representer(unicode, + Representer.represent_unicode) + +Representer.add_representer(long, + Representer.represent_long) + +Representer.add_representer(complex, + Representer.represent_complex) + +Representer.add_representer(tuple, + Representer.represent_tuple) + +Representer.add_representer(type, + Representer.represent_name) + +Representer.add_representer(types.ClassType, + Representer.represent_name) + +Representer.add_representer(types.FunctionType, + Representer.represent_name) + +Representer.add_representer(types.BuiltinFunctionType, + Representer.represent_name) + +Representer.add_representer(types.ModuleType, + Representer.represent_module) + +Representer.add_multi_representer(types.InstanceType, + Representer.represent_instance) + +Representer.add_multi_representer(object, + Representer.represent_object) + diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py b/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py new file mode 100644 index 000000000..49922debf --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py @@ -0,0 +1,225 @@ +# SPDX-License-Identifier: MIT + +__all__ = ['BaseResolver', 'Resolver'] + +from error import * +from nodes import * + +import re + +class ResolverError(YAMLError): + pass + +class BaseResolver(object): + + DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str' + DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq' + DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map' + + yaml_implicit_resolvers = {} + yaml_path_resolvers = {} + + def __init__(self): + self.resolver_exact_paths = [] + self.resolver_prefix_paths = [] + + def add_implicit_resolver(cls, tag, regexp, first): + if not 'yaml_implicit_resolvers' in cls.__dict__: + cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy() + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + add_implicit_resolver = classmethod(add_implicit_resolver) + + def add_path_resolver(cls, tag, path, kind=None): + # Note: `add_path_resolver` is experimental. The API could be changed. + # `new_path` is a pattern that is matched against the path from the + # root to the node that is being considered. `node_path` elements are + # tuples `(node_check, index_check)`. `node_check` is a node class: + # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` + # matches any kind of a node. `index_check` could be `None`, a boolean + # value, a string value, or a number. `None` and `False` match against + # any _value_ of sequence and mapping nodes. `True` matches against + # any _key_ of a mapping node. A string `index_check` matches against + # a mapping value that corresponds to a scalar key which content is + # equal to the `index_check` value. An integer `index_check` matches + # against a sequence value with the index equal to `index_check`. + if not 'yaml_path_resolvers' in cls.__dict__: + cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() + new_path = [] + for element in path: + if isinstance(element, (list, tuple)): + if len(element) == 2: + node_check, index_check = element + elif len(element) == 1: + node_check = element[0] + index_check = True + else: + raise ResolverError("Invalid path element: %s" % element) + else: + node_check = None + index_check = element + if node_check is str: + node_check = ScalarNode + elif node_check is list: + node_check = SequenceNode + elif node_check is dict: + node_check = MappingNode + elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ + and not isinstance(node_check, basestring) \ + and node_check is not None: + raise ResolverError("Invalid node checker: %s" % node_check) + if not isinstance(index_check, (basestring, int)) \ + and index_check is not None: + raise ResolverError("Invalid index checker: %s" % index_check) + new_path.append((node_check, index_check)) + if kind is str: + kind = ScalarNode + elif kind is list: + kind = SequenceNode + elif kind is dict: + kind = MappingNode + elif kind not in [ScalarNode, SequenceNode, MappingNode] \ + and kind is not None: + raise ResolverError("Invalid node kind: %s" % kind) + cls.yaml_path_resolvers[tuple(new_path), kind] = tag + add_path_resolver = classmethod(add_path_resolver) + + def descend_resolver(self, current_node, current_index): + if not self.yaml_path_resolvers: + return + exact_paths = {} + prefix_paths = [] + if current_node: + depth = len(self.resolver_prefix_paths) + for path, kind in self.resolver_prefix_paths[-1]: + if self.check_resolver_prefix(depth, path, kind, + current_node, current_index): + if len(path) > depth: + prefix_paths.append((path, kind)) + else: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + for path, kind in self.yaml_path_resolvers: + if not path: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + prefix_paths.append((path, kind)) + self.resolver_exact_paths.append(exact_paths) + self.resolver_prefix_paths.append(prefix_paths) + + def ascend_resolver(self): + if not self.yaml_path_resolvers: + return + self.resolver_exact_paths.pop() + self.resolver_prefix_paths.pop() + + def check_resolver_prefix(self, depth, path, kind, + current_node, current_index): + node_check, index_check = path[depth-1] + if isinstance(node_check, basestring): + if current_node.tag != node_check: + return + elif node_check is not None: + if not isinstance(current_node, node_check): + return + if index_check is True and current_index is not None: + return + if (index_check is False or index_check is None) \ + and current_index is None: + return + if isinstance(index_check, basestring): + if not (isinstance(current_index, ScalarNode) + and index_check == current_index.value): + return + elif isinstance(index_check, int) and not isinstance(index_check, bool): + if index_check != current_index: + return + return True + + def resolve(self, kind, value, implicit): + if kind is ScalarNode and implicit[0]: + if value == u'': + resolvers = self.yaml_implicit_resolvers.get(u'', []) + else: + resolvers = self.yaml_implicit_resolvers.get(value[0], []) + resolvers += self.yaml_implicit_resolvers.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if self.yaml_path_resolvers: + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + +class Resolver(BaseResolver): + pass + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:bool', + re.compile(ur'''^(?:yes|Yes|YES|no|No|NO + |true|True|TRUE|false|False|FALSE + |on|On|ON|off|Off|OFF)$''', re.X), + list(u'yYnNtTfFoO')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:float', + re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? + |\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* + |[-+]?\.(?:inf|Inf|INF) + |\.(?:nan|NaN|NAN))$''', re.X), + list(u'-+0123456789.')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:int', + re.compile(ur'''^(?:[-+]?0b[0-1_]+ + |[-+]?0[0-7_]+ + |[-+]?(?:0|[1-9][0-9_]*) + |[-+]?0x[0-9a-fA-F_]+ + |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), + list(u'-+0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:merge', + re.compile(ur'^(?:<<)$'), + [u'<']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:null', + re.compile(ur'''^(?: ~ + |null|Null|NULL + | )$''', re.X), + [u'~', u'n', u'N', u'']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:timestamp', + re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] + |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? + (?:[Tt]|[ \t]+)[0-9][0-9]? + :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? + (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), + list(u'0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:value', + re.compile(ur'^(?:=)$'), + [u'=']) + +# The following resolver is only for documentation purposes. It cannot work +# because plain scalars cannot start with '!', '&', or '*'. +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:yaml', + re.compile(ur'^(?:!|&|\*)$'), + list(u'!&*')) + diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py b/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py new file mode 100644 index 000000000..971da6127 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py @@ -0,0 +1,1458 @@ +# SPDX-License-Identifier: MIT + +# Scanner produces tokens of the following types: +# STREAM-START +# STREAM-END +# DIRECTIVE(name, value) +# DOCUMENT-START +# DOCUMENT-END +# BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START +# BLOCK-END +# FLOW-SEQUENCE-START +# FLOW-MAPPING-START +# FLOW-SEQUENCE-END +# FLOW-MAPPING-END +# BLOCK-ENTRY +# FLOW-ENTRY +# KEY +# VALUE +# ALIAS(value) +# ANCHOR(value) +# TAG(value) +# SCALAR(value, plain, style) +# +# Read comments in the Scanner code for more details. +# + +__all__ = ['Scanner', 'ScannerError'] + +from error import MarkedYAMLError +from tokens import * + +class ScannerError(MarkedYAMLError): + pass + +class SimpleKey(object): + # See below simple keys treatment. + + def __init__(self, token_number, required, index, line, column, mark): + self.token_number = token_number + self.required = required + self.index = index + self.line = line + self.column = column + self.mark = mark + +class Scanner(object): + + def __init__(self): + """Initialize the scanner.""" + # It is assumed that Scanner and Reader will have a common descendant. + # Reader do the dirty work of checking for BOM and converting the + # input data to Unicode. It also adds NUL to the end. + # + # Reader supports the following methods + # self.peek(i=0) # peek the next i-th character + # self.prefix(l=1) # peek the next l characters + # self.forward(l=1) # read the next l characters and move the pointer. + + # Had we reached the end of the stream? + self.done = False + + # The number of unclosed '{' and '['. `flow_level == 0` means block + # context. + self.flow_level = 0 + + # List of processed tokens that are not yet emitted. + self.tokens = [] + + # Add the STREAM-START token. + self.fetch_stream_start() + + # Number of tokens that were emitted through the `get_token` method. + self.tokens_taken = 0 + + # The current indentation level. + self.indent = -1 + + # Past indentation levels. + self.indents = [] + + # Variables related to simple keys treatment. + + # A simple key is a key that is not denoted by the '?' indicator. + # Example of simple keys: + # --- + # block simple key: value + # ? not a simple key: + # : { flow simple key: value } + # We emit the KEY token before all keys, so when we find a potential + # simple key, we try to locate the corresponding ':' indicator. + # Simple keys should be limited to a single line and 1024 characters. + + # Can a simple key start at the current position? A simple key may + # start: + # - at the beginning of the line, not counting indentation spaces + # (in block context), + # - after '{', '[', ',' (in the flow context), + # - after '?', ':', '-' (in the block context). + # In the block context, this flag also signifies if a block collection + # may start at the current position. + self.allow_simple_key = True + + # Keep track of possible simple keys. This is a dictionary. The key + # is `flow_level`; there can be no more that one possible simple key + # for each level. The value is a SimpleKey record: + # (token_number, required, index, line, column, mark) + # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), + # '[', or '{' tokens. + self.possible_simple_keys = {} + + # Public methods. + + def check_token(self, *choices): + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # Return the next token, but do not delete if from the queue. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + return self.tokens[0] + + def get_token(self): + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + self.tokens_taken += 1 + return self.tokens.pop(0) + + # Private methods. + + def need_more_tokens(self): + if self.done: + return False + if not self.tokens: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + + def fetch_more_tokens(self): + + # Eat whitespaces and comments until we reach the next token. + self.scan_to_next_token() + + # Remove obsolete possible simple keys. + self.stale_possible_simple_keys() + + # Compare the current indentation and column. It may add some tokens + # and decrease the current indentation level. + self.unwind_indent(self.column) + + # Peek the next character. + ch = self.peek() + + # Is it the end of stream? + if ch == u'\0': + return self.fetch_stream_end() + + # Is it a directive? + if ch == u'%' and self.check_directive(): + return self.fetch_directive() + + # Is it the document start? + if ch == u'-' and self.check_document_start(): + return self.fetch_document_start() + + # Is it the document end? + if ch == u'.' and self.check_document_end(): + return self.fetch_document_end() + + # TODO: support for BOM within a stream. + #if ch == u'\uFEFF': + # return self.fetch_bom() <-- issue BOMToken + + # Note: the order of the following checks is NOT significant. + + # Is it the flow sequence start indicator? + if ch == u'[': + return self.fetch_flow_sequence_start() + + # Is it the flow mapping start indicator? + if ch == u'{': + return self.fetch_flow_mapping_start() + + # Is it the flow sequence end indicator? + if ch == u']': + return self.fetch_flow_sequence_end() + + # Is it the flow mapping end indicator? + if ch == u'}': + return self.fetch_flow_mapping_end() + + # Is it the flow entry indicator? + if ch == u',': + return self.fetch_flow_entry() + + # Is it the block entry indicator? + if ch == u'-' and self.check_block_entry(): + return self.fetch_block_entry() + + # Is it the key indicator? + if ch == u'?' and self.check_key(): + return self.fetch_key() + + # Is it the value indicator? + if ch == u':' and self.check_value(): + return self.fetch_value() + + # Is it an alias? + if ch == u'*': + return self.fetch_alias() + + # Is it an anchor? + if ch == u'&': + return self.fetch_anchor() + + # Is it a tag? + if ch == u'!': + return self.fetch_tag() + + # Is it a literal scalar? + if ch == u'|' and not self.flow_level: + return self.fetch_literal() + + # Is it a folded scalar? + if ch == u'>' and not self.flow_level: + return self.fetch_folded() + + # Is it a single quoted scalar? + if ch == u'\'': + return self.fetch_single() + + # Is it a double quoted scalar? + if ch == u'\"': + return self.fetch_double() + + # It must be a plain scalar then. + if self.check_plain(): + return self.fetch_plain() + + # No? It's an error. Let's produce a nice error message. + raise ScannerError("while scanning for the next token", None, + "found character %r that cannot start any token" + % ch.encode('utf-8'), self.get_mark()) + + # Simple keys treatment. + + def next_possible_simple_key(self): + # Return the number of the nearest possible simple key. Actually we + # don't need to loop through the whole dictionary. We may replace it + # with the following code: + # if not self.possible_simple_keys: + # return None + # return self.possible_simple_keys[ + # min(self.possible_simple_keys.keys())].token_number + min_token_number = None + for level in self.possible_simple_keys: + key = self.possible_simple_keys[level] + if min_token_number is None or key.token_number < min_token_number: + min_token_number = key.token_number + return min_token_number + + def stale_possible_simple_keys(self): + # Remove entries that are no longer possible simple keys. According to + # the YAML specification, simple keys + # - should be limited to a single line, + # - should be no longer than 1024 characters. + # Disabling this procedure will allow simple keys of any length and + # height (may cause problems if indentation is broken though). + for level in self.possible_simple_keys.keys(): + key = self.possible_simple_keys[level] + if key.line != self.line \ + or self.index-key.index > 1024: + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not found expected ':'", self.get_mark()) + del self.possible_simple_keys[level] + + def save_possible_simple_key(self): + # The next token may start a simple key. We check if it's possible + # and save its position. This function is called for + # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. + + # Check if a simple key is required at the current position. + required = not self.flow_level and self.indent == self.column + + # A simple key is required only if it is the first token in the current + # line. Therefore it is always allowed. + assert self.allow_simple_key or not required + + # The next token might be a simple key. Let's save it's number and + # position. + if self.allow_simple_key: + self.remove_possible_simple_key() + token_number = self.tokens_taken+len(self.tokens) + key = SimpleKey(token_number, required, + self.index, self.line, self.column, self.get_mark()) + self.possible_simple_keys[self.flow_level] = key + + def remove_possible_simple_key(self): + # Remove the saved possible key position at the current flow level. + if self.flow_level in self.possible_simple_keys: + key = self.possible_simple_keys[self.flow_level] + + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not found expected ':'", self.get_mark()) + + del self.possible_simple_keys[self.flow_level] + + # Indentation functions. + + def unwind_indent(self, column): + + ## In flow context, tokens should respect indentation. + ## Actually the condition should be `self.indent >= column` according to + ## the spec. But this condition will prohibit intuitively correct + ## constructions such as + ## key : { + ## } + #if self.flow_level and self.indent > column: + # raise ScannerError(None, None, + # "invalid intendation or unclosed '[' or '{'", + # self.get_mark()) + + # In the flow context, indentation is ignored. We make the scanner less + # restrictive then specification requires. + if self.flow_level: + return + + # In block context, we may need to issue the BLOCK-END tokens. + while self.indent > column: + mark = self.get_mark() + self.indent = self.indents.pop() + self.tokens.append(BlockEndToken(mark, mark)) + + def add_indent(self, column): + # Check if we need to increase indentation. + if self.indent < column: + self.indents.append(self.indent) + self.indent = column + return True + return False + + # Fetchers. + + def fetch_stream_start(self): + # We always add STREAM-START as the first token and STREAM-END as the + # last token. + + # Read the token. + mark = self.get_mark() + + # Add STREAM-START. + self.tokens.append(StreamStartToken(mark, mark, + encoding=self.encoding)) + + + def fetch_stream_end(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + self.possible_simple_keys = {} + + # Read the token. + mark = self.get_mark() + + # Add STREAM-END. + self.tokens.append(StreamEndToken(mark, mark)) + + # The steam is finished. + self.done = True + + def fetch_directive(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Scan and add DIRECTIVE. + self.tokens.append(self.scan_directive()) + + def fetch_document_start(self): + self.fetch_document_indicator(DocumentStartToken) + + def fetch_document_end(self): + self.fetch_document_indicator(DocumentEndToken) + + def fetch_document_indicator(self, TokenClass): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. Note that there could not be a block collection + # after '---'. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Add DOCUMENT-START or DOCUMENT-END. + start_mark = self.get_mark() + self.forward(3) + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_start(self): + self.fetch_flow_collection_start(FlowSequenceStartToken) + + def fetch_flow_mapping_start(self): + self.fetch_flow_collection_start(FlowMappingStartToken) + + def fetch_flow_collection_start(self, TokenClass): + + # '[' and '{' may start a simple key. + self.save_possible_simple_key() + + # Increase the flow level. + self.flow_level += 1 + + # Simple keys are allowed after '[' and '{'. + self.allow_simple_key = True + + # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_end(self): + self.fetch_flow_collection_end(FlowSequenceEndToken) + + def fetch_flow_mapping_end(self): + self.fetch_flow_collection_end(FlowMappingEndToken) + + def fetch_flow_collection_end(self, TokenClass): + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Decrease the flow level. + self.flow_level -= 1 + + # No simple keys after ']' or '}'. + self.allow_simple_key = False + + # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_entry(self): + + # Simple keys are allowed after ','. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add FLOW-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(FlowEntryToken(start_mark, end_mark)) + + def fetch_block_entry(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a new entry? + if not self.allow_simple_key: + raise ScannerError(None, None, + "sequence entries are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-SEQUENCE-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockSequenceStartToken(mark, mark)) + + # It's an error for the block entry to occur in the flow context, + # but we let the parser detect this. + else: + pass + + # Simple keys are allowed after '-'. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add BLOCK-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(BlockEntryToken(start_mark, end_mark)) + + def fetch_key(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a key (not nessesary a simple)? + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping keys are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-MAPPING-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after '?' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add KEY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(KeyToken(start_mark, end_mark)) + + def fetch_value(self): + + # Do we determine a simple key? + if self.flow_level in self.possible_simple_keys: + + # Add KEY. + key = self.possible_simple_keys[self.flow_level] + del self.possible_simple_keys[self.flow_level] + self.tokens.insert(key.token_number-self.tokens_taken, + KeyToken(key.mark, key.mark)) + + # If this key starts a new block mapping, we need to add + # BLOCK-MAPPING-START. + if not self.flow_level: + if self.add_indent(key.column): + self.tokens.insert(key.token_number-self.tokens_taken, + BlockMappingStartToken(key.mark, key.mark)) + + # There cannot be two simple keys one after another. + self.allow_simple_key = False + + # It must be a part of a complex key. + else: + + # Block context needs additional checks. + # (Do we really need them? They will be catched by the parser + # anyway.) + if not self.flow_level: + + # We are allowed to start a complex value if and only if + # we can start a simple key. + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping values are not allowed here", + self.get_mark()) + + # If this value starts a new block mapping, we need to add + # BLOCK-MAPPING-START. It will be detected as an error later by + # the parser. + if not self.flow_level: + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after ':' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add VALUE. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(ValueToken(start_mark, end_mark)) + + def fetch_alias(self): + + # ALIAS could be a simple key. + self.save_possible_simple_key() + + # No simple keys after ALIAS. + self.allow_simple_key = False + + # Scan and add ALIAS. + self.tokens.append(self.scan_anchor(AliasToken)) + + def fetch_anchor(self): + + # ANCHOR could start a simple key. + self.save_possible_simple_key() + + # No simple keys after ANCHOR. + self.allow_simple_key = False + + # Scan and add ANCHOR. + self.tokens.append(self.scan_anchor(AnchorToken)) + + def fetch_tag(self): + + # TAG could start a simple key. + self.save_possible_simple_key() + + # No simple keys after TAG. + self.allow_simple_key = False + + # Scan and add TAG. + self.tokens.append(self.scan_tag()) + + def fetch_literal(self): + self.fetch_block_scalar(style='|') + + def fetch_folded(self): + self.fetch_block_scalar(style='>') + + def fetch_block_scalar(self, style): + + # A simple key may follow a block scalar. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Scan and add SCALAR. + self.tokens.append(self.scan_block_scalar(style)) + + def fetch_single(self): + self.fetch_flow_scalar(style='\'') + + def fetch_double(self): + self.fetch_flow_scalar(style='"') + + def fetch_flow_scalar(self, style): + + # A flow scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after flow scalars. + self.allow_simple_key = False + + # Scan and add SCALAR. + self.tokens.append(self.scan_flow_scalar(style)) + + def fetch_plain(self): + + # A plain scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after plain scalars. But note that `scan_plain` will + # change this flag if the scan is finished at the beginning of the + # line. + self.allow_simple_key = False + + # Scan and add SCALAR. May change `allow_simple_key`. + self.tokens.append(self.scan_plain()) + + # Checkers. + + def check_directive(self): + + # DIRECTIVE: ^ '%' ... + # The '%' indicator is already checked. + if self.column == 0: + return True + + def check_document_start(self): + + # DOCUMENT-START: ^ '---' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'---' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_document_end(self): + + # DOCUMENT-END: ^ '...' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'...' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_block_entry(self): + + # BLOCK-ENTRY: '-' (' '|'\n') + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_key(self): + + # KEY(flow context): '?' + if self.flow_level: + return True + + # KEY(block context): '?' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_value(self): + + # VALUE(flow context): ':' + if self.flow_level: + return True + + # VALUE(block context): ':' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_plain(self): + + # A plain scalar may start with any non-space character except: + # '-', '?', ':', ',', '[', ']', '{', '}', + # '#', '&', '*', '!', '|', '>', '\'', '\"', + # '%', '@', '`'. + # + # It may also start with + # '-', '?', ':' + # if it is followed by a non-space character. + # + # Note that we limit the last rule to the block context (except the + # '-' character) because we want the flow context to be space + # independent. + ch = self.peek() + return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ + or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029' + and (ch == u'-' or (not self.flow_level and ch in u'?:'))) + + # Scanners. + + def scan_to_next_token(self): + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if <TAB>: + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + + if self.index == 0 and self.peek() == u'\uFEFF': + self.forward() + found = False + while not found: + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + if self.scan_line_break(): + if not self.flow_level: + self.allow_simple_key = True + else: + found = True + + def scan_directive(self): + # See the specification for details. + start_mark = self.get_mark() + self.forward() + name = self.scan_directive_name(start_mark) + value = None + if name == u'YAML': + value = self.scan_yaml_directive_value(start_mark) + end_mark = self.get_mark() + elif name == u'TAG': + value = self.scan_tag_directive_value(start_mark) + end_mark = self.get_mark() + else: + end_mark = self.get_mark() + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + self.scan_directive_ignored_line(start_mark) + return DirectiveToken(name, value, start_mark, end_mark) + + def scan_directive_name(self, start_mark): + # See the specification for details. + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return value + + def scan_yaml_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + major = self.scan_yaml_directive_number(start_mark) + if self.peek() != '.': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or '.', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + minor = self.scan_yaml_directive_number(start_mark) + if self.peek() not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or ' ', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + return (major, minor) + + def scan_yaml_directive_number(self, start_mark): + # See the specification for details. + ch = self.peek() + if not (u'0' <= ch <= u'9'): + raise ScannerError("while scanning a directive", start_mark, + "expected a digit, but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 0 + while u'0' <= self.peek(length) <= u'9': + length += 1 + value = int(self.prefix(length)) + self.forward(length) + return value + + def scan_tag_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + handle = self.scan_tag_directive_handle(start_mark) + while self.peek() == u' ': + self.forward() + prefix = self.scan_tag_directive_prefix(start_mark) + return (handle, prefix) + + def scan_tag_directive_handle(self, start_mark): + # See the specification for details. + value = self.scan_tag_handle('directive', start_mark) + ch = self.peek() + if ch != u' ': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_tag_directive_prefix(self, start_mark): + # See the specification for details. + value = self.scan_tag_uri('directive', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_directive_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_anchor(self, TokenClass): + # The specification does not restrict characters for anchors and + # aliases. This may lead to problems, for instance, the document: + # [ *alias, value ] + # can be interpteted in two ways, as + # [ "value" ] + # and + # [ *alias , "value" ] + # Therefore we restrict aliases to numbers and ASCII letters. + start_mark = self.get_mark() + indicator = self.peek() + if indicator == u'*': + name = 'alias' + else: + name = 'anchor' + self.forward() + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`': + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + end_mark = self.get_mark() + return TokenClass(value, start_mark, end_mark) + + def scan_tag(self): + # See the specification for details. + start_mark = self.get_mark() + ch = self.peek(1) + if ch == u'<': + handle = None + self.forward(2) + suffix = self.scan_tag_uri('tag', start_mark) + if self.peek() != u'>': + raise ScannerError("while parsing a tag", start_mark, + "expected '>', but found %r" % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + elif ch in u'\0 \t\r\n\x85\u2028\u2029': + handle = None + suffix = u'!' + self.forward() + else: + length = 1 + use_handle = False + while ch not in u'\0 \r\n\x85\u2028\u2029': + if ch == u'!': + use_handle = True + break + length += 1 + ch = self.peek(length) + handle = u'!' + if use_handle: + handle = self.scan_tag_handle('tag', start_mark) + else: + handle = u'!' + self.forward() + suffix = self.scan_tag_uri('tag', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a tag", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + value = (handle, suffix) + end_mark = self.get_mark() + return TagToken(value, start_mark, end_mark) + + def scan_block_scalar(self, style): + # See the specification for details. + + if style == '>': + folded = True + else: + folded = False + + chunks = [] + start_mark = self.get_mark() + + # Scan the header. + self.forward() + chomping, increment = self.scan_block_scalar_indicators(start_mark) + self.scan_block_scalar_ignored_line(start_mark) + + # Determine the indentation level and go to the first non-empty line. + min_indent = self.indent+1 + if min_indent < 1: + min_indent = 1 + if increment is None: + breaks, max_indent, end_mark = self.scan_block_scalar_indentation() + indent = max(min_indent, max_indent) + else: + indent = min_indent+increment-1 + breaks, end_mark = self.scan_block_scalar_breaks(indent) + line_break = u'' + + # Scan the inner part of the block scalar. + while self.column == indent and self.peek() != u'\0': + chunks.extend(breaks) + leading_non_space = self.peek() not in u' \t' + length = 0 + while self.peek(length) not in u'\0\r\n\x85\u2028\u2029': + length += 1 + chunks.append(self.prefix(length)) + self.forward(length) + line_break = self.scan_line_break() + breaks, end_mark = self.scan_block_scalar_breaks(indent) + if self.column == indent and self.peek() != u'\0': + + # Unfortunately, folding rules are ambiguous. + # + # This is the folding according to the specification: + + if folded and line_break == u'\n' \ + and leading_non_space and self.peek() not in u' \t': + if not breaks: + chunks.append(u' ') + else: + chunks.append(line_break) + + # This is Clark Evans's interpretation (also in the spec + # examples): + # + #if folded and line_break == u'\n': + # if not breaks: + # if self.peek() not in ' \t': + # chunks.append(u' ') + # else: + # chunks.append(line_break) + #else: + # chunks.append(line_break) + else: + break + + # Chomp the tail. + if chomping is not False: + chunks.append(line_break) + if chomping is True: + chunks.extend(breaks) + + # We are done. + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + def scan_block_scalar_indicators(self, start_mark): + # See the specification for details. + chomping = None + increment = None + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + elif ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected chomping or indentation indicators, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return chomping, increment + + def scan_block_scalar_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_block_scalar_indentation(self): + # See the specification for details. + chunks = [] + max_indent = 0 + end_mark = self.get_mark() + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() != u' ': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + else: + self.forward() + if self.column > max_indent: + max_indent = self.column + return chunks, max_indent, end_mark + + def scan_block_scalar_breaks(self, indent): + # See the specification for details. + chunks = [] + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + while self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + return chunks, end_mark + + def scan_flow_scalar(self, style): + # See the specification for details. + # Note that we loose indentation rules for quoted scalars. Quoted + # scalars don't need to adhere indentation because " and ' clearly + # mark the beginning and the end of them. Therefore we are less + # restrictive then the specification requires. We only need to check + # that document separators are not included in scalars. + if style == '"': + double = True + else: + double = False + chunks = [] + start_mark = self.get_mark() + quote = self.peek() + self.forward() + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + while self.peek() != quote: + chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + self.forward() + end_mark = self.get_mark() + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + ESCAPE_REPLACEMENTS = { + u'0': u'\0', + u'a': u'\x07', + u'b': u'\x08', + u't': u'\x09', + u'\t': u'\x09', + u'n': u'\x0A', + u'v': u'\x0B', + u'f': u'\x0C', + u'r': u'\x0D', + u'e': u'\x1B', + u' ': u'\x20', + u'\"': u'\"', + u'\\': u'\\', + u'N': u'\x85', + u'_': u'\xA0', + u'L': u'\u2028', + u'P': u'\u2029', + } + + ESCAPE_CODES = { + u'x': 2, + u'u': 4, + u'U': 8, + } + + def scan_flow_scalar_non_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + length = 0 + while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029': + length += 1 + if length: + chunks.append(self.prefix(length)) + self.forward(length) + ch = self.peek() + if not double and ch == u'\'' and self.peek(1) == u'\'': + chunks.append(u'\'') + self.forward(2) + elif (double and ch == u'\'') or (not double and ch in u'\"\\'): + chunks.append(ch) + self.forward() + elif double and ch == u'\\': + self.forward() + ch = self.peek() + if ch in self.ESCAPE_REPLACEMENTS: + chunks.append(self.ESCAPE_REPLACEMENTS[ch]) + self.forward() + elif ch in self.ESCAPE_CODES: + length = self.ESCAPE_CODES[ch] + self.forward() + for k in range(length): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "expected escape sequence of %d hexdecimal numbers, but found %r" % + (length, self.peek(k).encode('utf-8')), self.get_mark()) + code = int(self.prefix(length), 16) + chunks.append(unichr(code)) + self.forward(length) + elif ch in u'\r\n\x85\u2028\u2029': + self.scan_line_break() + chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) + else: + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark()) + else: + return chunks + + def scan_flow_scalar_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + length = 0 + while self.peek(length) in u' \t': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch == u'\0': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected end of stream", self.get_mark()) + elif ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + breaks = self.scan_flow_scalar_breaks(double, start_mark) + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + else: + chunks.append(whitespaces) + return chunks + + def scan_flow_scalar_breaks(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + # Instead of checking indentation, we check for document + # separators. + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected document separator", self.get_mark()) + while self.peek() in u' \t': + self.forward() + if self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + else: + return chunks + + def scan_plain(self): + # See the specification for details. + # We add an additional restriction for the flow context: + # plain scalars in the flow context cannot contain ',', ':' and '?'. + # We also keep track of the `allow_simple_key` flag here. + # Indentation rules are loosed for the flow context. + chunks = [] + start_mark = self.get_mark() + end_mark = start_mark + indent = self.indent+1 + # We allow zero indentation for scalars, but then we need to check for + # document separators at the beginning of the line. + #if indent == 0: + # indent = 1 + spaces = [] + while True: + length = 0 + if self.peek() == u'#': + break + while True: + ch = self.peek(length) + if ch in u'\0 \t\r\n\x85\u2028\u2029' \ + or (not self.flow_level and ch == u':' and + self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \ + or (self.flow_level and ch in u',:?[]{}'): + break + length += 1 + # It's not clear what we should do with ':' in the flow context. + if (self.flow_level and ch == u':' + and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'): + self.forward(length) + raise ScannerError("while scanning a plain scalar", start_mark, + "found unexpected ':'", self.get_mark(), + "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.") + if length == 0: + break + self.allow_simple_key = False + chunks.extend(spaces) + chunks.append(self.prefix(length)) + self.forward(length) + end_mark = self.get_mark() + spaces = self.scan_plain_spaces(indent, start_mark) + if not spaces or self.peek() == u'#' \ + or (not self.flow_level and self.column < indent): + break + return ScalarToken(u''.join(chunks), True, start_mark, end_mark) + + def scan_plain_spaces(self, indent, start_mark): + # See the specification for details. + # The specification is really confusing about tabs in plain scalars. + # We just forbid them completely. Do not use tabs in YAML! + chunks = [] + length = 0 + while self.peek(length) in u' ': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + self.allow_simple_key = True + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + breaks = [] + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() == ' ': + self.forward() + else: + breaks.append(self.scan_line_break()) + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + elif whitespaces: + chunks.append(whitespaces) + return chunks + + def scan_tag_handle(self, name, start_mark): + # See the specification for details. + # For some strange reasons, the specification does not allow '_' in + # tag handles. I have allowed it anyway. + ch = self.peek() + if ch != u'!': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 1 + ch = self.peek(length) + if ch != u' ': + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if ch != u'!': + self.forward(length) + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length += 1 + value = self.prefix(length) + self.forward(length) + return value + + def scan_tag_uri(self, name, start_mark): + # See the specification for details. + # Note: we do not check if URI is well-formed. + chunks = [] + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.!~*\'()[]%': + if ch == u'%': + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + chunks.append(self.scan_uri_escapes(name, start_mark)) + else: + length += 1 + ch = self.peek(length) + if length: + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + if not chunks: + raise ScannerError("while parsing a %s" % name, start_mark, + "expected URI, but found %r" % ch.encode('utf-8'), + self.get_mark()) + return u''.join(chunks) + + def scan_uri_escapes(self, name, start_mark): + # See the specification for details. + bytes = [] + mark = self.get_mark() + while self.peek() == u'%': + self.forward() + for k in range(2): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected URI escape sequence of 2 hexdecimal numbers, but found %r" % + (self.peek(k).encode('utf-8')), self.get_mark()) + bytes.append(chr(int(self.prefix(2), 16))) + self.forward(2) + try: + value = unicode(''.join(bytes), 'utf-8') + except UnicodeDecodeError, exc: + raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) + return value + + def scan_line_break(self): + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.peek() + if ch in u'\r\n\x85': + if self.prefix(2) == u'\r\n': + self.forward(2) + else: + self.forward() + return u'\n' + elif ch in u'\u2028\u2029': + self.forward() + return ch + return u'' + +#try: +# import psyco +# psyco.bind(Scanner) +#except ImportError: +# pass + diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py b/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py new file mode 100644 index 000000000..15fdbb0c0 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py @@ -0,0 +1,112 @@ +# SPDX-License-Identifier: MIT + +__all__ = ['Serializer', 'SerializerError'] + +from error import YAMLError +from events import * +from nodes import * + +class SerializerError(YAMLError): + pass + +class Serializer(object): + + ANCHOR_TEMPLATE = u'id%03d' + + def __init__(self, encoding=None, + explicit_start=None, explicit_end=None, version=None, tags=None): + self.use_encoding = encoding + self.use_explicit_start = explicit_start + self.use_explicit_end = explicit_end + self.use_version = version + self.use_tags = tags + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + self.closed = None + + def open(self): + if self.closed is None: + self.emit(StreamStartEvent(encoding=self.use_encoding)) + self.closed = False + elif self.closed: + raise SerializerError("serializer is closed") + else: + raise SerializerError("serializer is already opened") + + def close(self): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif not self.closed: + self.emit(StreamEndEvent()) + self.closed = True + + #def __del__(self): + # self.close() + + def serialize(self, node): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif self.closed: + raise SerializerError("serializer is closed") + self.emit(DocumentStartEvent(explicit=self.use_explicit_start, + version=self.use_version, tags=self.use_tags)) + self.anchor_node(node) + self.serialize_node(node, None, None) + self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + + def anchor_node(self, node): + if node in self.anchors: + if self.anchors[node] is None: + self.anchors[node] = self.generate_anchor(node) + else: + self.anchors[node] = None + if isinstance(node, SequenceNode): + for item in node.value: + self.anchor_node(item) + elif isinstance(node, MappingNode): + for key, value in node.value: + self.anchor_node(key) + self.anchor_node(value) + + def generate_anchor(self, node): + self.last_anchor_id += 1 + return self.ANCHOR_TEMPLATE % self.last_anchor_id + + def serialize_node(self, node, parent, index): + alias = self.anchors[node] + if node in self.serialized_nodes: + self.emit(AliasEvent(alias)) + else: + self.serialized_nodes[node] = True + self.descend_resolver(parent, index) + if isinstance(node, ScalarNode): + detected_tag = self.resolve(ScalarNode, node.value, (True, False)) + default_tag = self.resolve(ScalarNode, node.value, (False, True)) + implicit = (node.tag == detected_tag), (node.tag == default_tag) + self.emit(ScalarEvent(alias, node.tag, implicit, node.value, + style=node.style)) + elif isinstance(node, SequenceNode): + implicit = (node.tag + == self.resolve(SequenceNode, node.value, True)) + self.emit(SequenceStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + index = 0 + for item in node.value: + self.serialize_node(item, node, index) + index += 1 + self.emit(SequenceEndEvent()) + elif isinstance(node, MappingNode): + implicit = (node.tag + == self.resolve(MappingNode, node.value, True)) + self.emit(MappingStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + for key, value in node.value: + self.serialize_node(key, node, None) + self.serialize_node(value, node, key) + self.emit(MappingEndEvent()) + self.ascend_resolver() + diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py b/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py new file mode 100644 index 000000000..c5c4fb116 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py @@ -0,0 +1,105 @@ +# SPDX-License-Identifier: MIT + +class Token(object): + def __init__(self, start_mark, end_mark): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in self.__dict__ + if not key.endswith('_mark')] + attributes.sort() + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +#class BOMToken(Token): +# id = '<byte order mark>' + +class DirectiveToken(Token): + id = '<directive>' + def __init__(self, name, value, start_mark, end_mark): + self.name = name + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class DocumentStartToken(Token): + id = '<document start>' + +class DocumentEndToken(Token): + id = '<document end>' + +class StreamStartToken(Token): + id = '<stream start>' + def __init__(self, start_mark=None, end_mark=None, + encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndToken(Token): + id = '<stream end>' + +class BlockSequenceStartToken(Token): + id = '<block sequence start>' + +class BlockMappingStartToken(Token): + id = '<block mapping start>' + +class BlockEndToken(Token): + id = '<block end>' + +class FlowSequenceStartToken(Token): + id = '[' + +class FlowMappingStartToken(Token): + id = '{' + +class FlowSequenceEndToken(Token): + id = ']' + +class FlowMappingEndToken(Token): + id = '}' + +class KeyToken(Token): + id = '?' + +class ValueToken(Token): + id = ':' + +class BlockEntryToken(Token): + id = '-' + +class FlowEntryToken(Token): + id = ',' + +class AliasToken(Token): + id = '<alias>' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class AnchorToken(Token): + id = '<anchor>' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class TagToken(Token): + id = '<tag>' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class ScalarToken(Token): + id = '<scalar>' + def __init__(self, value, plain, start_mark, end_mark, style=None): + self.value = value + self.plain = plain + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py b/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py new file mode 100644 index 000000000..a884b33cf --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py @@ -0,0 +1,313 @@ +# SPDX-License-Identifier: MIT + +from .error import * + +from .tokens import * +from .events import * +from .nodes import * + +from .loader import * +from .dumper import * + +__version__ = '3.11' +try: + from .cyaml import * + __with_libyaml__ = True +except ImportError: + __with_libyaml__ = False + +import io + +def scan(stream, Loader=Loader): + """ + Scan a YAML stream and produce scanning tokens. + """ + loader = Loader(stream) + try: + while loader.check_token(): + yield loader.get_token() + finally: + loader.dispose() + +def parse(stream, Loader=Loader): + """ + Parse a YAML stream and produce parsing events. + """ + loader = Loader(stream) + try: + while loader.check_event(): + yield loader.get_event() + finally: + loader.dispose() + +def compose(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + loader = Loader(stream) + try: + return loader.get_single_node() + finally: + loader.dispose() + +def compose_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding representation trees. + """ + loader = Loader(stream) + try: + while loader.check_node(): + yield loader.get_node() + finally: + loader.dispose() + +def load(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + """ + loader = Loader(stream) + try: + return loader.get_single_data() + finally: + loader.dispose() + +def load_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + """ + loader = Loader(stream) + try: + while loader.check_data(): + yield loader.get_data() + finally: + loader.dispose() + +def safe_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + Resolve only basic YAML tags. + """ + return load(stream, SafeLoader) + +def safe_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + return load_all(stream, SafeLoader) + +def emit(events, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + stream = io.StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + try: + for event in events: + dumper.emit(event) + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize_all(nodes, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + stream = io.StringIO() + else: + stream = io.BytesIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for node in nodes: + dumper.serialize(node) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize(node, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + return serialize_all([node], stream, Dumper=Dumper, **kwds) + +def dump_all(documents, stream=None, Dumper=Dumper, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of Python objects into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + stream = io.StringIO() + else: + stream = io.BytesIO() + getvalue = stream.getvalue + dumper = Dumper(stream, default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for data in documents: + dumper.represent(data) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def dump(data, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a Python object into a YAML stream. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=Dumper, **kwds) + +def safe_dump_all(documents, stream=None, **kwds): + """ + Serialize a sequence of Python objects into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all(documents, stream, Dumper=SafeDumper, **kwds) + +def safe_dump(data, stream=None, **kwds): + """ + Serialize a Python object into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=SafeDumper, **kwds) + +def add_implicit_resolver(tag, regexp, first=None, + Loader=Loader, Dumper=Dumper): + """ + Add an implicit scalar detector. + If an implicit scalar value matches the given regexp, + the corresponding tag is assigned to the scalar. + first is a sequence of possible initial characters or None. + """ + Loader.add_implicit_resolver(tag, regexp, first) + Dumper.add_implicit_resolver(tag, regexp, first) + +def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): + """ + Add a path based resolver for the given tag. + A path is a list of keys that forms a path + to a node in the representation tree. + Keys can be string values, integers, or None. + """ + Loader.add_path_resolver(tag, path, kind) + Dumper.add_path_resolver(tag, path, kind) + +def add_constructor(tag, constructor, Loader=Loader): + """ + Add a constructor for the given tag. + Constructor is a function that accepts a Loader instance + and a node object and produces the corresponding Python object. + """ + Loader.add_constructor(tag, constructor) + +def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader): + """ + Add a multi-constructor for the given tag prefix. + Multi-constructor is called for a node if its tag starts with tag_prefix. + Multi-constructor accepts a Loader instance, a tag suffix, + and a node object and produces the corresponding Python object. + """ + Loader.add_multi_constructor(tag_prefix, multi_constructor) + +def add_representer(data_type, representer, Dumper=Dumper): + """ + Add a representer for the given type. + Representer is a function accepting a Dumper instance + and an instance of the given data type + and producing the corresponding representation node. + """ + Dumper.add_representer(data_type, representer) + +def add_multi_representer(data_type, multi_representer, Dumper=Dumper): + """ + Add a representer for the given type. + Multi-representer is a function accepting a Dumper instance + and an instance of the given data type or subtype + and producing the corresponding representation node. + """ + Dumper.add_multi_representer(data_type, multi_representer) + +class YAMLObjectMetaclass(type): + """ + The metaclass for YAMLObject. + """ + def __init__(cls, name, bases, kwds): + super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) + if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: + cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) + cls.yaml_dumper.add_representer(cls, cls.to_yaml) + +class YAMLObject(metaclass=YAMLObjectMetaclass): + """ + An object that can dump itself to a YAML stream + and load itself from a YAML stream. + """ + + __slots__ = () # no direct instantiation, so allow immutable subclasses + + yaml_loader = Loader + yaml_dumper = Dumper + + yaml_tag = None + yaml_flow_style = None + + @classmethod + def from_yaml(cls, loader, node): + """ + Convert a representation node to a Python object. + """ + return loader.construct_yaml_object(node, cls) + + @classmethod + def to_yaml(cls, dumper, data): + """ + Convert a Python object to a representation node. + """ + return dumper.represent_yaml_object(cls.yaml_tag, data, cls, + flow_style=cls.yaml_flow_style) + diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/composer.py b/collectors/python.d.plugin/python_modules/pyyaml3/composer.py new file mode 100644 index 000000000..c418bba91 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml3/composer.py @@ -0,0 +1,140 @@ +# SPDX-License-Identifier: MIT + +__all__ = ['Composer', 'ComposerError'] + +from .error import MarkedYAMLError +from .events import * +from .nodes import * + +class ComposerError(MarkedYAMLError): + pass + +class Composer: + + def __init__(self): + self.anchors = {} + + def check_node(self): + # Drop the STREAM-START event. + if self.check_event(StreamStartEvent): + self.get_event() + + # If there are more documents available? + return not self.check_event(StreamEndEvent) + + def get_node(self): + # Get the root node of the next document. + if not self.check_event(StreamEndEvent): + return self.compose_document() + + def get_single_node(self): + # Drop the STREAM-START event. + self.get_event() + + # Compose a document if the stream is not empty. + document = None + if not self.check_event(StreamEndEvent): + document = self.compose_document() + + # Ensure that the stream contains no more documents. + if not self.check_event(StreamEndEvent): + event = self.get_event() + raise ComposerError("expected a single document in the stream", + document.start_mark, "but found another document", + event.start_mark) + + # Drop the STREAM-END event. + self.get_event() + + return document + + def compose_document(self): + # Drop the DOCUMENT-START event. + self.get_event() + + # Compose the root node. + node = self.compose_node(None, None) + + # Drop the DOCUMENT-END event. + self.get_event() + + self.anchors = {} + return node + + def compose_node(self, parent, index): + if self.check_event(AliasEvent): + event = self.get_event() + anchor = event.anchor + if anchor not in self.anchors: + raise ComposerError(None, None, "found undefined alias %r" + % anchor, event.start_mark) + return self.anchors[anchor] + event = self.peek_event() + anchor = event.anchor + if anchor is not None: + if anchor in self.anchors: + raise ComposerError("found duplicate anchor %r; first occurence" + % anchor, self.anchors[anchor].start_mark, + "second occurence", event.start_mark) + self.descend_resolver(parent, index) + if self.check_event(ScalarEvent): + node = self.compose_scalar_node(anchor) + elif self.check_event(SequenceStartEvent): + node = self.compose_sequence_node(anchor) + elif self.check_event(MappingStartEvent): + node = self.compose_mapping_node(anchor) + self.ascend_resolver() + return node + + def compose_scalar_node(self, anchor): + event = self.get_event() + tag = event.tag + if tag is None or tag == '!': + tag = self.resolve(ScalarNode, event.value, event.implicit) + node = ScalarNode(tag, event.value, + event.start_mark, event.end_mark, style=event.style) + if anchor is not None: + self.anchors[anchor] = node + return node + + def compose_sequence_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == '!': + tag = self.resolve(SequenceNode, None, start_event.implicit) + node = SequenceNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + index = 0 + while not self.check_event(SequenceEndEvent): + node.value.append(self.compose_node(node, index)) + index += 1 + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + + def compose_mapping_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == '!': + tag = self.resolve(MappingNode, None, start_event.implicit) + node = MappingNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + while not self.check_event(MappingEndEvent): + #key_event = self.peek_event() + item_key = self.compose_node(node, None) + #if item_key in node.value: + # raise ComposerError("while composing a mapping", start_event.start_mark, + # "found duplicate key", key_event.start_mark) + item_value = self.compose_node(node, item_key) + #node.value[item_key] = item_value + node.value.append((item_key, item_value)) + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py b/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py new file mode 100644 index 000000000..ee09a7a7e --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py @@ -0,0 +1,687 @@ +# SPDX-License-Identifier: MIT + +__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', + 'ConstructorError'] + +from .error import * +from .nodes import * + +import collections, datetime, base64, binascii, re, sys, types + +class ConstructorError(MarkedYAMLError): + pass + +class BaseConstructor: + + yaml_constructors = {} + yaml_multi_constructors = {} + + def __init__(self): + self.constructed_objects = {} + self.recursive_objects = {} + self.state_generators = [] + self.deep_construct = False + + def check_data(self): + # If there are more documents available? + return self.check_node() + + def get_data(self): + # Construct and return the next document. + if self.check_node(): + return self.construct_document(self.get_node()) + + def get_single_data(self): + # Ensure that the stream contains a single document and construct it. + node = self.get_single_node() + if node is not None: + return self.construct_document(node) + return None + + def construct_document(self, node): + data = self.construct_object(node) + while self.state_generators: + state_generators = self.state_generators + self.state_generators = [] + for generator in state_generators: + for dummy in generator: + pass + self.constructed_objects = {} + self.recursive_objects = {} + self.deep_construct = False + return data + + def construct_object(self, node, deep=False): + if node in self.constructed_objects: + return self.constructed_objects[node] + if deep: + old_deep = self.deep_construct + self.deep_construct = True + if node in self.recursive_objects: + raise ConstructorError(None, None, + "found unconstructable recursive node", node.start_mark) + self.recursive_objects[node] = None + constructor = None + tag_suffix = None + if node.tag in self.yaml_constructors: + constructor = self.yaml_constructors[node.tag] + else: + for tag_prefix in self.yaml_multi_constructors: + if node.tag.startswith(tag_prefix): + tag_suffix = node.tag[len(tag_prefix):] + constructor = self.yaml_multi_constructors[tag_prefix] + break + else: + if None in self.yaml_multi_constructors: + tag_suffix = node.tag + constructor = self.yaml_multi_constructors[None] + elif None in self.yaml_constructors: + constructor = self.yaml_constructors[None] + elif isinstance(node, ScalarNode): + constructor = self.__class__.construct_scalar + elif isinstance(node, SequenceNode): + constructor = self.__class__.construct_sequence + elif isinstance(node, MappingNode): + constructor = self.__class__.construct_mapping + if tag_suffix is None: + data = constructor(self, node) + else: + data = constructor(self, tag_suffix, node) + if isinstance(data, types.GeneratorType): + generator = data + data = next(generator) + if self.deep_construct: + for dummy in generator: + pass + else: + self.state_generators.append(generator) + self.constructed_objects[node] = data + del self.recursive_objects[node] + if deep: + self.deep_construct = old_deep + return data + + def construct_scalar(self, node): + if not isinstance(node, ScalarNode): + raise ConstructorError(None, None, + "expected a scalar node, but found %s" % node.id, + node.start_mark) + return node.value + + def construct_sequence(self, node, deep=False): + if not isinstance(node, SequenceNode): + raise ConstructorError(None, None, + "expected a sequence node, but found %s" % node.id, + node.start_mark) + return [self.construct_object(child, deep=deep) + for child in node.value] + + def construct_mapping(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + mapping = {} + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + if not isinstance(key, collections.Hashable): + raise ConstructorError("while constructing a mapping", node.start_mark, + "found unhashable key", key_node.start_mark) + value = self.construct_object(value_node, deep=deep) + mapping[key] = value + return mapping + + def construct_pairs(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + pairs = [] + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + value = self.construct_object(value_node, deep=deep) + pairs.append((key, value)) + return pairs + + @classmethod + def add_constructor(cls, tag, constructor): + if not 'yaml_constructors' in cls.__dict__: + cls.yaml_constructors = cls.yaml_constructors.copy() + cls.yaml_constructors[tag] = constructor + + @classmethod + def add_multi_constructor(cls, tag_prefix, multi_constructor): + if not 'yaml_multi_constructors' in cls.__dict__: + cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() + cls.yaml_multi_constructors[tag_prefix] = multi_constructor + +class SafeConstructor(BaseConstructor): + + def construct_scalar(self, node): + if isinstance(node, MappingNode): + for key_node, value_node in node.value: + if key_node.tag == 'tag:yaml.org,2002:value': + return self.construct_scalar(value_node) + return super().construct_scalar(node) + + def flatten_mapping(self, node): + merge = [] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == 'tag:yaml.org,2002:merge': + del node.value[index] + if isinstance(value_node, MappingNode): + self.flatten_mapping(value_node) + merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing a mapping", + node.start_mark, + "expected a mapping for merging, but found %s" + % subnode.id, subnode.start_mark) + self.flatten_mapping(subnode) + submerge.append(subnode.value) + submerge.reverse() + for value in submerge: + merge.extend(value) + else: + raise ConstructorError("while constructing a mapping", node.start_mark, + "expected a mapping or list of mappings for merging, but found %s" + % value_node.id, value_node.start_mark) + elif key_node.tag == 'tag:yaml.org,2002:value': + key_node.tag = 'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + if merge: + node.value = merge + node.value + + def construct_mapping(self, node, deep=False): + if isinstance(node, MappingNode): + self.flatten_mapping(node) + return super().construct_mapping(node, deep=deep) + + def construct_yaml_null(self, node): + self.construct_scalar(node) + return None + + bool_values = { + 'yes': True, + 'no': False, + 'true': True, + 'false': False, + 'on': True, + 'off': False, + } + + def construct_yaml_bool(self, node): + value = self.construct_scalar(node) + return self.bool_values[value.lower()] + + def construct_yaml_int(self, node): + value = self.construct_scalar(node) + value = value.replace('_', '') + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '0': + return 0 + elif value.startswith('0b'): + return sign*int(value[2:], 2) + elif value.startswith('0x'): + return sign*int(value[2:], 16) + elif value[0] == '0': + return sign*int(value, 8) + elif ':' in value: + digits = [int(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*int(value) + + inf_value = 1e300 + while inf_value != inf_value*inf_value: + inf_value *= inf_value + nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). + + def construct_yaml_float(self, node): + value = self.construct_scalar(node) + value = value.replace('_', '').lower() + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '.inf': + return sign*self.inf_value + elif value == '.nan': + return self.nan_value + elif ':' in value: + digits = [float(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*float(value) + + def construct_yaml_binary(self, node): + try: + value = self.construct_scalar(node).encode('ascii') + except UnicodeEncodeError as exc: + raise ConstructorError(None, None, + "failed to convert base64 data into ascii: %s" % exc, + node.start_mark) + try: + if hasattr(base64, 'decodebytes'): + return base64.decodebytes(value) + else: + return base64.decodestring(value) + except binascii.Error as exc: + raise ConstructorError(None, None, + "failed to decode base64 data: %s" % exc, node.start_mark) + + timestamp_regexp = re.compile( + r'''^(?P<year>[0-9][0-9][0-9][0-9]) + -(?P<month>[0-9][0-9]?) + -(?P<day>[0-9][0-9]?) + (?:(?:[Tt]|[ \t]+) + (?P<hour>[0-9][0-9]?) + :(?P<minute>[0-9][0-9]) + :(?P<second>[0-9][0-9]) + (?:\.(?P<fraction>[0-9]*))? + (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?) + (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X) + + def construct_yaml_timestamp(self, node): + value = self.construct_scalar(node) + match = self.timestamp_regexp.match(node.value) + values = match.groupdict() + year = int(values['year']) + month = int(values['month']) + day = int(values['day']) + if not values['hour']: + return datetime.date(year, month, day) + hour = int(values['hour']) + minute = int(values['minute']) + second = int(values['second']) + fraction = 0 + if values['fraction']: + fraction = values['fraction'][:6] + while len(fraction) < 6: + fraction += '0' + fraction = int(fraction) + delta = None + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + tz_minute = int(values['tz_minute'] or 0) + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + data = datetime.datetime(year, month, day, hour, minute, second, fraction) + if delta: + data -= delta + return data + + def construct_yaml_omap(self, node): + # Note: we do not check for duplicate keys, because it's too + # CPU-expensive. + omap = [] + yield omap + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + omap.append((key, value)) + + def construct_yaml_pairs(self, node): + # Note: the same code as `construct_yaml_omap`. + pairs = [] + yield pairs + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + pairs.append((key, value)) + + def construct_yaml_set(self, node): + data = set() + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_str(self, node): + return self.construct_scalar(node) + + def construct_yaml_seq(self, node): + data = [] + yield data + data.extend(self.construct_sequence(node)) + + def construct_yaml_map(self, node): + data = {} + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_object(self, node, cls): + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = self.construct_mapping(node, deep=True) + data.__setstate__(state) + else: + state = self.construct_mapping(node) + data.__dict__.update(state) + + def construct_undefined(self, node): + raise ConstructorError(None, None, + "could not determine a constructor for the tag %r" % node.tag, + node.start_mark) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:null', + SafeConstructor.construct_yaml_null) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:bool', + SafeConstructor.construct_yaml_bool) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:int', + SafeConstructor.construct_yaml_int) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:float', + SafeConstructor.construct_yaml_float) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:binary', + SafeConstructor.construct_yaml_binary) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:timestamp', + SafeConstructor.construct_yaml_timestamp) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:omap', + SafeConstructor.construct_yaml_omap) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:pairs', + SafeConstructor.construct_yaml_pairs) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:set', + SafeConstructor.construct_yaml_set) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:str', + SafeConstructor.construct_yaml_str) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:seq', + SafeConstructor.construct_yaml_seq) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:map', + SafeConstructor.construct_yaml_map) + +SafeConstructor.add_constructor(None, + SafeConstructor.construct_undefined) + +class Constructor(SafeConstructor): + + def construct_python_str(self, node): + return self.construct_scalar(node) + + def construct_python_unicode(self, node): + return self.construct_scalar(node) + + def construct_python_bytes(self, node): + try: + value = self.construct_scalar(node).encode('ascii') + except UnicodeEncodeError as exc: + raise ConstructorError(None, None, + "failed to convert base64 data into ascii: %s" % exc, + node.start_mark) + try: + if hasattr(base64, 'decodebytes'): + return base64.decodebytes(value) + else: + return base64.decodestring(value) + except binascii.Error as exc: + raise ConstructorError(None, None, + "failed to decode base64 data: %s" % exc, node.start_mark) + + def construct_python_long(self, node): + return self.construct_yaml_int(node) + + def construct_python_complex(self, node): + return complex(self.construct_scalar(node)) + + def construct_python_tuple(self, node): + return tuple(self.construct_sequence(node)) + + def find_python_module(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python module", mark, + "expected non-empty name appended to the tag", mark) + try: + __import__(name) + except ImportError as exc: + raise ConstructorError("while constructing a Python module", mark, + "cannot find module %r (%s)" % (name, exc), mark) + return sys.modules[name] + + def find_python_name(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python object", mark, + "expected non-empty name appended to the tag", mark) + if '.' in name: + module_name, object_name = name.rsplit('.', 1) + else: + module_name = 'builtins' + object_name = name + try: + __import__(module_name) + except ImportError as exc: + raise ConstructorError("while constructing a Python object", mark, + "cannot find module %r (%s)" % (module_name, exc), mark) + module = sys.modules[module_name] + if not hasattr(module, object_name): + raise ConstructorError("while constructing a Python object", mark, + "cannot find %r in the module %r" + % (object_name, module.__name__), mark) + return getattr(module, object_name) + + def construct_python_name(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python name", node.start_mark, + "expected the empty value, but found %r" % value, node.start_mark) + return self.find_python_name(suffix, node.start_mark) + + def construct_python_module(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python module", node.start_mark, + "expected the empty value, but found %r" % value, node.start_mark) + return self.find_python_module(suffix, node.start_mark) + + def make_python_instance(self, suffix, node, + args=None, kwds=None, newobj=False): + if not args: + args = [] + if not kwds: + kwds = {} + cls = self.find_python_name(suffix, node.start_mark) + if newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + + def set_python_instance_state(self, instance, state): + if hasattr(instance, '__setstate__'): + instance.__setstate__(state) + else: + slotstate = {} + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if hasattr(instance, '__dict__'): + instance.__dict__.update(state) + elif state: + slotstate.update(state) + for key, value in slotstate.items(): + setattr(object, key, value) + + def construct_python_object(self, suffix, node): + # Format: + # !!python/object:module.name { ... state ... } + instance = self.make_python_instance(suffix, node, newobj=True) + yield instance + deep = hasattr(instance, '__setstate__') + state = self.construct_mapping(node, deep=deep) + self.set_python_instance_state(instance, state) + + def construct_python_object_apply(self, suffix, node, newobj=False): + # Format: + # !!python/object/apply # (or !!python/object/new) + # args: [ ... arguments ... ] + # kwds: { ... keywords ... } + # state: ... state ... + # listitems: [ ... listitems ... ] + # dictitems: { ... dictitems ... } + # or short format: + # !!python/object/apply [ ... arguments ... ] + # The difference between !!python/object/apply and !!python/object/new + # is how an object is created, check make_python_instance for details. + if isinstance(node, SequenceNode): + args = self.construct_sequence(node, deep=True) + kwds = {} + state = {} + listitems = [] + dictitems = {} + else: + value = self.construct_mapping(node, deep=True) + args = value.get('args', []) + kwds = value.get('kwds', {}) + state = value.get('state', {}) + listitems = value.get('listitems', []) + dictitems = value.get('dictitems', {}) + instance = self.make_python_instance(suffix, node, args, kwds, newobj) + if state: + self.set_python_instance_state(instance, state) + if listitems: + instance.extend(listitems) + if dictitems: + for key in dictitems: + instance[key] = dictitems[key] + return instance + + def construct_python_object_new(self, suffix, node): + return self.construct_python_object_apply(suffix, node, newobj=True) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/none', + Constructor.construct_yaml_null) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/bool', + Constructor.construct_yaml_bool) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/str', + Constructor.construct_python_str) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/unicode', + Constructor.construct_python_unicode) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/bytes', + Constructor.construct_python_bytes) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/int', + Constructor.construct_yaml_int) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/long', + Constructor.construct_python_long) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/float', + Constructor.construct_yaml_float) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/complex', + Constructor.construct_python_complex) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/list', + Constructor.construct_yaml_seq) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/tuple', + Constructor.construct_python_tuple) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/dict', + Constructor.construct_yaml_map) + +Constructor.add_multi_constructor( + 'tag:yaml.org,2002:python/name:', + Constructor.construct_python_name) + +Constructor.add_multi_constructor( + 'tag:yaml.org,2002:python/module:', + Constructor.construct_python_module) + +Constructor.add_multi_constructor( + 'tag:yaml.org,2002:python/object:', + Constructor.construct_python_object) + +Constructor.add_multi_constructor( + 'tag:yaml.org,2002:python/object/apply:', + Constructor.construct_python_object_apply) + +Constructor.add_multi_constructor( + 'tag:yaml.org,2002:python/object/new:', + Constructor.construct_python_object_new) + diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py b/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py new file mode 100644 index 000000000..e6c16d894 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py @@ -0,0 +1,86 @@ +# SPDX-License-Identifier: MIT + +__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', + 'CBaseDumper', 'CSafeDumper', 'CDumper'] + +from _yaml import CParser, CEmitter + +from .constructor import * + +from .serializer import * +from .representer import * + +from .resolver import * + +class CBaseLoader(CParser, BaseConstructor, BaseResolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class CSafeLoader(CParser, SafeConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class CLoader(CParser, Constructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + Constructor.__init__(self) + Resolver.__init__(self) + +class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CSafeDumper(CEmitter, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CDumper(CEmitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py b/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py new file mode 100644 index 000000000..ba590c6e6 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: MIT + +__all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] + +from .emitter import * +from .serializer import * +from .representer import * +from .resolver import * + +class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class Dumper(Emitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py b/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py new file mode 100644 index 000000000..d4be65a8e --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py @@ -0,0 +1,1138 @@ +# SPDX-License-Identifier: MIT + +# Emitter expects events obeying the following grammar: +# stream ::= STREAM-START document* STREAM-END +# document ::= DOCUMENT-START node DOCUMENT-END +# node ::= SCALAR | sequence | mapping +# sequence ::= SEQUENCE-START node* SEQUENCE-END +# mapping ::= MAPPING-START (node node)* MAPPING-END + +__all__ = ['Emitter', 'EmitterError'] + +from .error import YAMLError +from .events import * + +class EmitterError(YAMLError): + pass + +class ScalarAnalysis: + def __init__(self, scalar, empty, multiline, + allow_flow_plain, allow_block_plain, + allow_single_quoted, allow_double_quoted, + allow_block): + self.scalar = scalar + self.empty = empty + self.multiline = multiline + self.allow_flow_plain = allow_flow_plain + self.allow_block_plain = allow_block_plain + self.allow_single_quoted = allow_single_quoted + self.allow_double_quoted = allow_double_quoted + self.allow_block = allow_block + +class Emitter: + + DEFAULT_TAG_PREFIXES = { + '!' : '!', + 'tag:yaml.org,2002:' : '!!', + } + + def __init__(self, stream, canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + + # The stream should have the methods `write` and possibly `flush`. + self.stream = stream + + # Encoding can be overriden by STREAM-START. + self.encoding = None + + # Emitter is a state machine with a stack of states to handle nested + # structures. + self.states = [] + self.state = self.expect_stream_start + + # Current event and the event queue. + self.events = [] + self.event = None + + # The current indentation level and the stack of previous indents. + self.indents = [] + self.indent = None + + # Flow level. + self.flow_level = 0 + + # Contexts. + self.root_context = False + self.sequence_context = False + self.mapping_context = False + self.simple_key_context = False + + # Characteristics of the last emitted character: + # - current position. + # - is it a whitespace? + # - is it an indention character + # (indentation space, '-', '?', or ':')? + self.line = 0 + self.column = 0 + self.whitespace = True + self.indention = True + + # Whether the document requires an explicit document indicator + self.open_ended = False + + # Formatting details. + self.canonical = canonical + self.allow_unicode = allow_unicode + self.best_indent = 2 + if indent and 1 < indent < 10: + self.best_indent = indent + self.best_width = 80 + if width and width > self.best_indent*2: + self.best_width = width + self.best_line_break = '\n' + if line_break in ['\r', '\n', '\r\n']: + self.best_line_break = line_break + + # Tag prefixes. + self.tag_prefixes = None + + # Prepared anchor and tag. + self.prepared_anchor = None + self.prepared_tag = None + + # Scalar analysis and style. + self.analysis = None + self.style = None + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def emit(self, event): + self.events.append(event) + while not self.need_more_events(): + self.event = self.events.pop(0) + self.state() + self.event = None + + # In some cases, we wait for a few next events before emitting. + + def need_more_events(self): + if not self.events: + return True + event = self.events[0] + if isinstance(event, DocumentStartEvent): + return self.need_events(1) + elif isinstance(event, SequenceStartEvent): + return self.need_events(2) + elif isinstance(event, MappingStartEvent): + return self.need_events(3) + else: + return False + + def need_events(self, count): + level = 0 + for event in self.events[1:]: + if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): + level += 1 + elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): + level -= 1 + elif isinstance(event, StreamEndEvent): + level = -1 + if level < 0: + return False + return (len(self.events) < count+1) + + def increase_indent(self, flow=False, indentless=False): + self.indents.append(self.indent) + if self.indent is None: + if flow: + self.indent = self.best_indent + else: + self.indent = 0 + elif not indentless: + self.indent += self.best_indent + + # States. + + # Stream handlers. + + def expect_stream_start(self): + if isinstance(self.event, StreamStartEvent): + if self.event.encoding and not hasattr(self.stream, 'encoding'): + self.encoding = self.event.encoding + self.write_stream_start() + self.state = self.expect_first_document_start + else: + raise EmitterError("expected StreamStartEvent, but got %s" + % self.event) + + def expect_nothing(self): + raise EmitterError("expected nothing, but got %s" % self.event) + + # Document handlers. + + def expect_first_document_start(self): + return self.expect_document_start(first=True) + + def expect_document_start(self, first=False): + if isinstance(self.event, DocumentStartEvent): + if (self.event.version or self.event.tags) and self.open_ended: + self.write_indicator('...', True) + self.write_indent() + if self.event.version: + version_text = self.prepare_version(self.event.version) + self.write_version_directive(version_text) + self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() + if self.event.tags: + handles = sorted(self.event.tags.keys()) + for handle in handles: + prefix = self.event.tags[handle] + self.tag_prefixes[prefix] = handle + handle_text = self.prepare_tag_handle(handle) + prefix_text = self.prepare_tag_prefix(prefix) + self.write_tag_directive(handle_text, prefix_text) + implicit = (first and not self.event.explicit and not self.canonical + and not self.event.version and not self.event.tags + and not self.check_empty_document()) + if not implicit: + self.write_indent() + self.write_indicator('---', True) + if self.canonical: + self.write_indent() + self.state = self.expect_document_root + elif isinstance(self.event, StreamEndEvent): + if self.open_ended: + self.write_indicator('...', True) + self.write_indent() + self.write_stream_end() + self.state = self.expect_nothing + else: + raise EmitterError("expected DocumentStartEvent, but got %s" + % self.event) + + def expect_document_end(self): + if isinstance(self.event, DocumentEndEvent): + self.write_indent() + if self.event.explicit: + self.write_indicator('...', True) + self.write_indent() + self.flush_stream() + self.state = self.expect_document_start + else: + raise EmitterError("expected DocumentEndEvent, but got %s" + % self.event) + + def expect_document_root(self): + self.states.append(self.expect_document_end) + self.expect_node(root=True) + + # Node handlers. + + def expect_node(self, root=False, sequence=False, mapping=False, + simple_key=False): + self.root_context = root + self.sequence_context = sequence + self.mapping_context = mapping + self.simple_key_context = simple_key + if isinstance(self.event, AliasEvent): + self.expect_alias() + elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): + self.process_anchor('&') + self.process_tag() + if isinstance(self.event, ScalarEvent): + self.expect_scalar() + elif isinstance(self.event, SequenceStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_sequence(): + self.expect_flow_sequence() + else: + self.expect_block_sequence() + elif isinstance(self.event, MappingStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_mapping(): + self.expect_flow_mapping() + else: + self.expect_block_mapping() + else: + raise EmitterError("expected NodeEvent, but got %s" % self.event) + + def expect_alias(self): + if self.event.anchor is None: + raise EmitterError("anchor is not specified for alias") + self.process_anchor('*') + self.state = self.states.pop() + + def expect_scalar(self): + self.increase_indent(flow=True) + self.process_scalar() + self.indent = self.indents.pop() + self.state = self.states.pop() + + # Flow sequence handlers. + + def expect_flow_sequence(self): + self.write_indicator('[', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_sequence_item + + def expect_first_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(']', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + def expect_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(',', False) + self.write_indent() + self.write_indicator(']', False) + self.state = self.states.pop() + else: + self.write_indicator(',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + # Flow mapping handlers. + + def expect_flow_mapping(self): + self.write_indicator('{', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_mapping_key + + def expect_first_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator('}', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator('?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(',', False) + self.write_indent() + self.write_indicator('}', False) + self.state = self.states.pop() + else: + self.write_indicator(',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator('?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_simple_value(self): + self.write_indicator(':', False) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + def expect_flow_mapping_value(self): + if self.canonical or self.column > self.best_width: + self.write_indent() + self.write_indicator(':', True) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + # Block sequence handlers. + + def expect_block_sequence(self): + indentless = (self.mapping_context and not self.indention) + self.increase_indent(flow=False, indentless=indentless) + self.state = self.expect_first_block_sequence_item + + def expect_first_block_sequence_item(self): + return self.expect_block_sequence_item(first=True) + + def expect_block_sequence_item(self, first=False): + if not first and isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + self.write_indicator('-', True, indention=True) + self.states.append(self.expect_block_sequence_item) + self.expect_node(sequence=True) + + # Block mapping handlers. + + def expect_block_mapping(self): + self.increase_indent(flow=False) + self.state = self.expect_first_block_mapping_key + + def expect_first_block_mapping_key(self): + return self.expect_block_mapping_key(first=True) + + def expect_block_mapping_key(self, first=False): + if not first and isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + if self.check_simple_key(): + self.states.append(self.expect_block_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator('?', True, indention=True) + self.states.append(self.expect_block_mapping_value) + self.expect_node(mapping=True) + + def expect_block_mapping_simple_value(self): + self.write_indicator(':', False) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + def expect_block_mapping_value(self): + self.write_indent() + self.write_indicator(':', True, indention=True) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + # Checkers. + + def check_empty_sequence(self): + return (isinstance(self.event, SequenceStartEvent) and self.events + and isinstance(self.events[0], SequenceEndEvent)) + + def check_empty_mapping(self): + return (isinstance(self.event, MappingStartEvent) and self.events + and isinstance(self.events[0], MappingEndEvent)) + + def check_empty_document(self): + if not isinstance(self.event, DocumentStartEvent) or not self.events: + return False + event = self.events[0] + return (isinstance(event, ScalarEvent) and event.anchor is None + and event.tag is None and event.implicit and event.value == '') + + def check_simple_key(self): + length = 0 + if isinstance(self.event, NodeEvent) and self.event.anchor is not None: + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + length += len(self.prepared_anchor) + if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ + and self.event.tag is not None: + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(self.event.tag) + length += len(self.prepared_tag) + if isinstance(self.event, ScalarEvent): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + length += len(self.analysis.scalar) + return (length < 128 and (isinstance(self.event, AliasEvent) + or (isinstance(self.event, ScalarEvent) + and not self.analysis.empty and not self.analysis.multiline) + or self.check_empty_sequence() or self.check_empty_mapping())) + + # Anchor, Tag, and Scalar processors. + + def process_anchor(self, indicator): + if self.event.anchor is None: + self.prepared_anchor = None + return + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + if self.prepared_anchor: + self.write_indicator(indicator+self.prepared_anchor, True) + self.prepared_anchor = None + + def process_tag(self): + tag = self.event.tag + if isinstance(self.event, ScalarEvent): + if self.style is None: + self.style = self.choose_scalar_style() + if ((not self.canonical or tag is None) and + ((self.style == '' and self.event.implicit[0]) + or (self.style != '' and self.event.implicit[1]))): + self.prepared_tag = None + return + if self.event.implicit[0] and tag is None: + tag = '!' + self.prepared_tag = None + else: + if (not self.canonical or tag is None) and self.event.implicit: + self.prepared_tag = None + return + if tag is None: + raise EmitterError("tag is not specified") + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(tag) + if self.prepared_tag: + self.write_indicator(self.prepared_tag, True) + self.prepared_tag = None + + def choose_scalar_style(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.event.style == '"' or self.canonical: + return '"' + if not self.event.style and self.event.implicit[0]: + if (not (self.simple_key_context and + (self.analysis.empty or self.analysis.multiline)) + and (self.flow_level and self.analysis.allow_flow_plain + or (not self.flow_level and self.analysis.allow_block_plain))): + return '' + if self.event.style and self.event.style in '|>': + if (not self.flow_level and not self.simple_key_context + and self.analysis.allow_block): + return self.event.style + if not self.event.style or self.event.style == '\'': + if (self.analysis.allow_single_quoted and + not (self.simple_key_context and self.analysis.multiline)): + return '\'' + return '"' + + def process_scalar(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.style is None: + self.style = self.choose_scalar_style() + split = (not self.simple_key_context) + #if self.analysis.multiline and split \ + # and (not self.style or self.style in '\'\"'): + # self.write_indent() + if self.style == '"': + self.write_double_quoted(self.analysis.scalar, split) + elif self.style == '\'': + self.write_single_quoted(self.analysis.scalar, split) + elif self.style == '>': + self.write_folded(self.analysis.scalar) + elif self.style == '|': + self.write_literal(self.analysis.scalar) + else: + self.write_plain(self.analysis.scalar, split) + self.analysis = None + self.style = None + + # Analyzers. + + def prepare_version(self, version): + major, minor = version + if major != 1: + raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) + return '%d.%d' % (major, minor) + + def prepare_tag_handle(self, handle): + if not handle: + raise EmitterError("tag handle must not be empty") + if handle[0] != '!' or handle[-1] != '!': + raise EmitterError("tag handle must start and end with '!': %r" % handle) + for ch in handle[1:-1]: + if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-_'): + raise EmitterError("invalid character %r in the tag handle: %r" + % (ch, handle)) + return handle + + def prepare_tag_prefix(self, prefix): + if not prefix: + raise EmitterError("tag prefix must not be empty") + chunks = [] + start = end = 0 + if prefix[0] == '!': + end = 1 + while end < len(prefix): + ch = prefix[end] + if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-;/?!:@&=+$,_.~*\'()[]': + end += 1 + else: + if start < end: + chunks.append(prefix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append('%%%02X' % ord(ch)) + if start < end: + chunks.append(prefix[start:end]) + return ''.join(chunks) + + def prepare_tag(self, tag): + if not tag: + raise EmitterError("tag must not be empty") + if tag == '!': + return tag + handle = None + suffix = tag + prefixes = sorted(self.tag_prefixes.keys()) + for prefix in prefixes: + if tag.startswith(prefix) \ + and (prefix == '!' or len(prefix) < len(tag)): + handle = self.tag_prefixes[prefix] + suffix = tag[len(prefix):] + chunks = [] + start = end = 0 + while end < len(suffix): + ch = suffix[end] + if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-;/?:@&=+$,_.~*\'()[]' \ + or (ch == '!' and handle != '!'): + end += 1 + else: + if start < end: + chunks.append(suffix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append('%%%02X' % ord(ch)) + if start < end: + chunks.append(suffix[start:end]) + suffix_text = ''.join(chunks) + if handle: + return '%s%s' % (handle, suffix_text) + else: + return '!<%s>' % suffix_text + + def prepare_anchor(self, anchor): + if not anchor: + raise EmitterError("anchor must not be empty") + for ch in anchor: + if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-_'): + raise EmitterError("invalid character %r in the anchor: %r" + % (ch, anchor)) + return anchor + + def analyze_scalar(self, scalar): + + # Empty scalar is a special case. + if not scalar: + return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, + allow_flow_plain=False, allow_block_plain=True, + allow_single_quoted=True, allow_double_quoted=True, + allow_block=False) + + # Indicators and special characters. + block_indicators = False + flow_indicators = False + line_breaks = False + special_characters = False + + # Important whitespace combinations. + leading_space = False + leading_break = False + trailing_space = False + trailing_break = False + break_space = False + space_break = False + + # Check document indicators. + if scalar.startswith('---') or scalar.startswith('...'): + block_indicators = True + flow_indicators = True + + # First character or preceded by a whitespace. + preceeded_by_whitespace = True + + # Last character or followed by a whitespace. + followed_by_whitespace = (len(scalar) == 1 or + scalar[1] in '\0 \t\r\n\x85\u2028\u2029') + + # The previous character is a space. + previous_space = False + + # The previous character is a break. + previous_break = False + + index = 0 + while index < len(scalar): + ch = scalar[index] + + # Check for indicators. + if index == 0: + # Leading indicators are special characters. + if ch in '#,[]{}&*!|>\'\"%@`': + flow_indicators = True + block_indicators = True + if ch in '?:': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == '-' and followed_by_whitespace: + flow_indicators = True + block_indicators = True + else: + # Some indicators cannot appear within a scalar as well. + if ch in ',?[]{}': + flow_indicators = True + if ch == ':': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == '#' and preceeded_by_whitespace: + flow_indicators = True + block_indicators = True + + # Check for line breaks, special, and unicode characters. + if ch in '\n\x85\u2028\u2029': + line_breaks = True + if not (ch == '\n' or '\x20' <= ch <= '\x7E'): + if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF' + or '\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF': + unicode_characters = True + if not self.allow_unicode: + special_characters = True + else: + special_characters = True + + # Detect important whitespace combinations. + if ch == ' ': + if index == 0: + leading_space = True + if index == len(scalar)-1: + trailing_space = True + if previous_break: + break_space = True + previous_space = True + previous_break = False + elif ch in '\n\x85\u2028\u2029': + if index == 0: + leading_break = True + if index == len(scalar)-1: + trailing_break = True + if previous_space: + space_break = True + previous_space = False + previous_break = True + else: + previous_space = False + previous_break = False + + # Prepare for the next character. + index += 1 + preceeded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029') + followed_by_whitespace = (index+1 >= len(scalar) or + scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029') + + # Let's decide what styles are allowed. + allow_flow_plain = True + allow_block_plain = True + allow_single_quoted = True + allow_double_quoted = True + allow_block = True + + # Leading and trailing whitespaces are bad for plain scalars. + if (leading_space or leading_break + or trailing_space or trailing_break): + allow_flow_plain = allow_block_plain = False + + # We do not permit trailing spaces for block scalars. + if trailing_space: + allow_block = False + + # Spaces at the beginning of a new line are only acceptable for block + # scalars. + if break_space: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + + # Spaces followed by breaks, as well as special character are only + # allowed for double quoted scalars. + if space_break or special_characters: + allow_flow_plain = allow_block_plain = \ + allow_single_quoted = allow_block = False + + # Although the plain scalar writer supports breaks, we never emit + # multiline plain scalars. + if line_breaks: + allow_flow_plain = allow_block_plain = False + + # Flow indicators are forbidden for flow plain scalars. + if flow_indicators: + allow_flow_plain = False + + # Block indicators are forbidden for block plain scalars. + if block_indicators: + allow_block_plain = False + + return ScalarAnalysis(scalar=scalar, + empty=False, multiline=line_breaks, + allow_flow_plain=allow_flow_plain, + allow_block_plain=allow_block_plain, + allow_single_quoted=allow_single_quoted, + allow_double_quoted=allow_double_quoted, + allow_block=allow_block) + + # Writers. + + def flush_stream(self): + if hasattr(self.stream, 'flush'): + self.stream.flush() + + def write_stream_start(self): + # Write BOM if needed. + if self.encoding and self.encoding.startswith('utf-16'): + self.stream.write('\uFEFF'.encode(self.encoding)) + + def write_stream_end(self): + self.flush_stream() + + def write_indicator(self, indicator, need_whitespace, + whitespace=False, indention=False): + if self.whitespace or not need_whitespace: + data = indicator + else: + data = ' '+indicator + self.whitespace = whitespace + self.indention = self.indention and indention + self.column += len(data) + self.open_ended = False + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_indent(self): + indent = self.indent or 0 + if not self.indention or self.column > indent \ + or (self.column == indent and not self.whitespace): + self.write_line_break() + if self.column < indent: + self.whitespace = True + data = ' '*(indent-self.column) + self.column = indent + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_line_break(self, data=None): + if data is None: + data = self.best_line_break + self.whitespace = True + self.indention = True + self.line += 1 + self.column = 0 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_version_directive(self, version_text): + data = '%%YAML %s' % version_text + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + def write_tag_directive(self, handle_text, prefix_text): + data = '%%TAG %s %s' % (handle_text, prefix_text) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + # Scalar streams. + + def write_single_quoted(self, text, split=True): + self.write_indicator('\'', True) + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch is None or ch != ' ': + if start+1 == end and self.column > self.best_width and split \ + and start != 0 and end != len(text): + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch is None or ch not in '\n\x85\u2028\u2029': + if text[start] == '\n': + self.write_line_break() + for br in text[start:end]: + if br == '\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + start = end + else: + if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'': + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch == '\'': + data = '\'\'' + self.column += 2 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ch is not None: + spaces = (ch == ' ') + breaks = (ch in '\n\x85\u2028\u2029') + end += 1 + self.write_indicator('\'', False) + + ESCAPE_REPLACEMENTS = { + '\0': '0', + '\x07': 'a', + '\x08': 'b', + '\x09': 't', + '\x0A': 'n', + '\x0B': 'v', + '\x0C': 'f', + '\x0D': 'r', + '\x1B': 'e', + '\"': '\"', + '\\': '\\', + '\x85': 'N', + '\xA0': '_', + '\u2028': 'L', + '\u2029': 'P', + } + + def write_double_quoted(self, text, split=True): + self.write_indicator('"', True) + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \ + or not ('\x20' <= ch <= '\x7E' + or (self.allow_unicode + and ('\xA0' <= ch <= '\uD7FF' + or '\uE000' <= ch <= '\uFFFD'))): + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + if ch in self.ESCAPE_REPLACEMENTS: + data = '\\'+self.ESCAPE_REPLACEMENTS[ch] + elif ch <= '\xFF': + data = '\\x%02X' % ord(ch) + elif ch <= '\uFFFF': + data = '\\u%04X' % ord(ch) + else: + data = '\\U%08X' % ord(ch) + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end+1 + if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \ + and self.column+(end-start) > self.best_width and split: + data = text[start:end]+'\\' + if start < end: + start = end + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_indent() + self.whitespace = False + self.indention = False + if text[start] == ' ': + data = '\\' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + end += 1 + self.write_indicator('"', False) + + def determine_block_hints(self, text): + hints = '' + if text: + if text[0] in ' \n\x85\u2028\u2029': + hints += str(self.best_indent) + if text[-1] not in '\n\x85\u2028\u2029': + hints += '-' + elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029': + hints += '+' + return hints + + def write_folded(self, text): + hints = self.determine_block_hints(text) + self.write_indicator('>'+hints, True) + if hints[-1:] == '+': + self.open_ended = True + self.write_line_break() + leading_space = True + spaces = False + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in '\n\x85\u2028\u2029': + if not leading_space and ch is not None and ch != ' ' \ + and text[start] == '\n': + self.write_line_break() + leading_space = (ch == ' ') + for br in text[start:end]: + if br == '\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + elif spaces: + if ch != ' ': + if start+1 == end and self.column > self.best_width: + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + else: + if ch is None or ch in ' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in '\n\x85\u2028\u2029') + spaces = (ch == ' ') + end += 1 + + def write_literal(self, text): + hints = self.determine_block_hints(text) + self.write_indicator('|'+hints, True) + if hints[-1:] == '+': + self.open_ended = True + self.write_line_break() + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in '\n\x85\u2028\u2029': + for br in text[start:end]: + if br == '\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + else: + if ch is None or ch in '\n\x85\u2028\u2029': + data = text[start:end] + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in '\n\x85\u2028\u2029') + end += 1 + + def write_plain(self, text, split=True): + if self.root_context: + self.open_ended = True + if not text: + return + if not self.whitespace: + data = ' ' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.whitespace = False + self.indention = False + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch != ' ': + if start+1 == end and self.column > self.best_width and split: + self.write_indent() + self.whitespace = False + self.indention = False + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch not in '\n\x85\u2028\u2029': + if text[start] == '\n': + self.write_line_break() + for br in text[start:end]: + if br == '\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + self.whitespace = False + self.indention = False + start = end + else: + if ch is None or ch in ' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + spaces = (ch == ' ') + breaks = (ch in '\n\x85\u2028\u2029') + end += 1 + diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/error.py b/collectors/python.d.plugin/python_modules/pyyaml3/error.py new file mode 100644 index 000000000..5fec7d449 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml3/error.py @@ -0,0 +1,76 @@ +# SPDX-License-Identifier: MIT + +__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] + +class Mark: + + def __init__(self, name, index, line, column, buffer, pointer): + self.name = name + self.index = index + self.line = line + self.column = column + self.buffer = buffer + self.pointer = pointer + + def get_snippet(self, indent=4, max_length=75): + if self.buffer is None: + return None + head = '' + start = self.pointer + while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029': + start -= 1 + if self.pointer-start > max_length/2-1: + head = ' ... ' + start += 5 + break + tail = '' + end = self.pointer + while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029': + end += 1 + if end-self.pointer > max_length/2-1: + tail = ' ... ' + end -= 5 + break + snippet = self.buffer[start:end] + return ' '*indent + head + snippet + tail + '\n' \ + + ' '*(indent+self.pointer-start+len(head)) + '^' + + def __str__(self): + snippet = self.get_snippet() + where = " in \"%s\", line %d, column %d" \ + % (self.name, self.line+1, self.column+1) + if snippet is not None: + where += ":\n"+snippet + return where + +class YAMLError(Exception): + pass + +class MarkedYAMLError(YAMLError): + + def __init__(self, context=None, context_mark=None, + problem=None, problem_mark=None, note=None): + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + + def __str__(self): + lines = [] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None \ + and (self.problem is None or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None: + lines.append(self.note) + return '\n'.join(lines) + diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/events.py b/collectors/python.d.plugin/python_modules/pyyaml3/events.py new file mode 100644 index 000000000..283452add --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml3/events.py @@ -0,0 +1,87 @@ +# SPDX-License-Identifier: MIT + +# Abstract classes. + +class Event(object): + def __init__(self, start_mark=None, end_mark=None): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] + if hasattr(self, key)] + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +class NodeEvent(Event): + def __init__(self, anchor, start_mark=None, end_mark=None): + self.anchor = anchor + self.start_mark = start_mark + self.end_mark = end_mark + +class CollectionStartEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, + flow_style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class CollectionEndEvent(Event): + pass + +# Implementations. + +class StreamStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndEvent(Event): + pass + +class DocumentStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None, version=None, tags=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + self.version = version + self.tags = tags + +class DocumentEndEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + +class AliasEvent(NodeEvent): + pass + +class ScalarEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, value, + start_mark=None, end_mark=None, style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class SequenceStartEvent(CollectionStartEvent): + pass + +class SequenceEndEvent(CollectionEndEvent): + pass + +class MappingStartEvent(CollectionStartEvent): + pass + +class MappingEndEvent(CollectionEndEvent): + pass + diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/loader.py b/collectors/python.d.plugin/python_modules/pyyaml3/loader.py new file mode 100644 index 000000000..7ef6cf815 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml3/loader.py @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: MIT + +__all__ = ['BaseLoader', 'SafeLoader', 'Loader'] + +from .reader import * +from .scanner import * +from .parser import * +from .composer import * +from .constructor import * +from .resolver import * + +class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + Constructor.__init__(self) + Resolver.__init__(self) + diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py b/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py new file mode 100644 index 000000000..ed2a1b43e --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py @@ -0,0 +1,50 @@ +# SPDX-License-Identifier: MIT + +class Node(object): + def __init__(self, tag, value, start_mark, end_mark): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + value = self.value + #if isinstance(value, list): + # if len(value) == 0: + # value = '<empty>' + # elif len(value) == 1: + # value = '<1 item>' + # else: + # value = '<%d items>' % len(value) + #else: + # if len(value) > 75: + # value = repr(value[:70]+u' ... ') + # else: + # value = repr(value) + value = repr(value) + return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) + +class ScalarNode(Node): + id = 'scalar' + def __init__(self, tag, value, + start_mark=None, end_mark=None, style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class CollectionNode(Node): + def __init__(self, tag, value, + start_mark=None, end_mark=None, flow_style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class SequenceNode(CollectionNode): + id = 'sequence' + +class MappingNode(CollectionNode): + id = 'mapping' + diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/parser.py b/collectors/python.d.plugin/python_modules/pyyaml3/parser.py new file mode 100644 index 000000000..bcec7f994 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml3/parser.py @@ -0,0 +1,590 @@ +# SPDX-License-Identifier: MIT + +# The following YAML grammar is LL(1) and is parsed by a recursive descent +# parser. +# +# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +# implicit_document ::= block_node DOCUMENT-END* +# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +# block_node_or_indentless_sequence ::= +# ALIAS +# | properties (block_content | indentless_block_sequence)? +# | block_content +# | indentless_block_sequence +# block_node ::= ALIAS +# | properties block_content? +# | block_content +# flow_node ::= ALIAS +# | properties flow_content? +# | flow_content +# properties ::= TAG ANCHOR? | ANCHOR TAG? +# block_content ::= block_collection | flow_collection | SCALAR +# flow_content ::= flow_collection | SCALAR +# block_collection ::= block_sequence | block_mapping +# flow_collection ::= flow_sequence | flow_mapping +# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +# block_mapping ::= BLOCK-MAPPING_START +# ((KEY block_node_or_indentless_sequence?)? +# (VALUE block_node_or_indentless_sequence?)?)* +# BLOCK-END +# flow_sequence ::= FLOW-SEQUENCE-START +# (flow_sequence_entry FLOW-ENTRY)* +# flow_sequence_entry? +# FLOW-SEQUENCE-END +# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# flow_mapping ::= FLOW-MAPPING-START +# (flow_mapping_entry FLOW-ENTRY)* +# flow_mapping_entry? +# FLOW-MAPPING-END +# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# +# FIRST sets: +# +# stream: { STREAM-START } +# explicit_document: { DIRECTIVE DOCUMENT-START } +# implicit_document: FIRST(block_node) +# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_sequence: { BLOCK-SEQUENCE-START } +# block_mapping: { BLOCK-MAPPING-START } +# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } +# indentless_sequence: { ENTRY } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_sequence: { FLOW-SEQUENCE-START } +# flow_mapping: { FLOW-MAPPING-START } +# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } +# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } + +__all__ = ['Parser', 'ParserError'] + +from .error import MarkedYAMLError +from .tokens import * +from .events import * +from .scanner import * + +class ParserError(MarkedYAMLError): + pass + +class Parser: + # Since writing a recursive-descendant parser is a straightforward task, we + # do not give many comments here. + + DEFAULT_TAGS = { + '!': '!', + '!!': 'tag:yaml.org,2002:', + } + + def __init__(self): + self.current_event = None + self.yaml_version = None + self.tag_handles = {} + self.states = [] + self.marks = [] + self.state = self.parse_stream_start + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def check_event(self, *choices): + # Check the type of the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + if self.current_event is not None: + if not choices: + return True + for choice in choices: + if isinstance(self.current_event, choice): + return True + return False + + def peek_event(self): + # Get the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + return self.current_event + + def get_event(self): + # Get the next event and proceed further. + if self.current_event is None: + if self.state: + self.current_event = self.state() + value = self.current_event + self.current_event = None + return value + + # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END + # implicit_document ::= block_node DOCUMENT-END* + # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* + + def parse_stream_start(self): + + # Parse the stream start. + token = self.get_token() + event = StreamStartEvent(token.start_mark, token.end_mark, + encoding=token.encoding) + + # Prepare the next state. + self.state = self.parse_implicit_document_start + + return event + + def parse_implicit_document_start(self): + + # Parse an implicit document. + if not self.check_token(DirectiveToken, DocumentStartToken, + StreamEndToken): + self.tag_handles = self.DEFAULT_TAGS + token = self.peek_token() + start_mark = end_mark = token.start_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=False) + + # Prepare the next state. + self.states.append(self.parse_document_end) + self.state = self.parse_block_node + + return event + + else: + return self.parse_document_start() + + def parse_document_start(self): + + # Parse any extra document end indicators. + while self.check_token(DocumentEndToken): + self.get_token() + + # Parse an explicit document. + if not self.check_token(StreamEndToken): + token = self.peek_token() + start_mark = token.start_mark + version, tags = self.process_directives() + if not self.check_token(DocumentStartToken): + raise ParserError(None, None, + "expected '<document start>', but found %r" + % self.peek_token().id, + self.peek_token().start_mark) + token = self.get_token() + end_mark = token.end_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=True, version=version, tags=tags) + self.states.append(self.parse_document_end) + self.state = self.parse_document_content + else: + # Parse the end of the stream. + token = self.get_token() + event = StreamEndEvent(token.start_mark, token.end_mark) + assert not self.states + assert not self.marks + self.state = None + return event + + def parse_document_end(self): + + # Parse the document end. + token = self.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + if self.check_token(DocumentEndToken): + token = self.get_token() + end_mark = token.end_mark + explicit = True + event = DocumentEndEvent(start_mark, end_mark, + explicit=explicit) + + # Prepare the next state. + self.state = self.parse_document_start + + return event + + def parse_document_content(self): + if self.check_token(DirectiveToken, + DocumentStartToken, DocumentEndToken, StreamEndToken): + event = self.process_empty_scalar(self.peek_token().start_mark) + self.state = self.states.pop() + return event + else: + return self.parse_block_node() + + def process_directives(self): + self.yaml_version = None + self.tag_handles = {} + while self.check_token(DirectiveToken): + token = self.get_token() + if token.name == 'YAML': + if self.yaml_version is not None: + raise ParserError(None, None, + "found duplicate YAML directive", token.start_mark) + major, minor = token.value + if major != 1: + raise ParserError(None, None, + "found incompatible YAML document (version 1.* is required)", + token.start_mark) + self.yaml_version = token.value + elif token.name == 'TAG': + handle, prefix = token.value + if handle in self.tag_handles: + raise ParserError(None, None, + "duplicate tag handle %r" % handle, + token.start_mark) + self.tag_handles[handle] = prefix + if self.tag_handles: + value = self.yaml_version, self.tag_handles.copy() + else: + value = self.yaml_version, None + for key in self.DEFAULT_TAGS: + if key not in self.tag_handles: + self.tag_handles[key] = self.DEFAULT_TAGS[key] + return value + + # block_node_or_indentless_sequence ::= ALIAS + # | properties (block_content | indentless_block_sequence)? + # | block_content + # | indentless_block_sequence + # block_node ::= ALIAS + # | properties block_content? + # | block_content + # flow_node ::= ALIAS + # | properties flow_content? + # | flow_content + # properties ::= TAG ANCHOR? | ANCHOR TAG? + # block_content ::= block_collection | flow_collection | SCALAR + # flow_content ::= flow_collection | SCALAR + # block_collection ::= block_sequence | block_mapping + # flow_collection ::= flow_sequence | flow_mapping + + def parse_block_node(self): + return self.parse_node(block=True) + + def parse_flow_node(self): + return self.parse_node() + + def parse_block_node_or_indentless_sequence(self): + return self.parse_node(block=True, indentless_sequence=True) + + def parse_node(self, block=False, indentless_sequence=False): + if self.check_token(AliasToken): + token = self.get_token() + event = AliasEvent(token.value, token.start_mark, token.end_mark) + self.state = self.states.pop() + else: + anchor = None + tag = None + start_mark = end_mark = tag_mark = None + if self.check_token(AnchorToken): + token = self.get_token() + start_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if self.check_token(TagToken): + token = self.get_token() + tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + elif self.check_token(TagToken): + token = self.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + if self.check_token(AnchorToken): + token = self.get_token() + end_mark = token.end_mark + anchor = token.value + if tag is not None: + handle, suffix = tag + if handle is not None: + if handle not in self.tag_handles: + raise ParserError("while parsing a node", start_mark, + "found undefined tag handle %r" % handle, + tag_mark) + tag = self.tag_handles[handle]+suffix + else: + tag = suffix + #if tag == '!': + # raise ParserError("while parsing a node", start_mark, + # "found non-specific tag '!'", tag_mark, + # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") + if start_mark is None: + start_mark = end_mark = self.peek_token().start_mark + event = None + implicit = (tag is None or tag == '!') + if indentless_sequence and self.check_token(BlockEntryToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark) + self.state = self.parse_indentless_sequence_entry + else: + if self.check_token(ScalarToken): + token = self.get_token() + end_mark = token.end_mark + if (token.plain and tag is None) or tag == '!': + implicit = (True, False) + elif tag is None: + implicit = (False, True) + else: + implicit = (False, False) + event = ScalarEvent(anchor, tag, implicit, token.value, + start_mark, end_mark, style=token.style) + self.state = self.states.pop() + elif self.check_token(FlowSequenceStartToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_sequence_first_entry + elif self.check_token(FlowMappingStartToken): + end_mark = self.peek_token().end_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_mapping_first_key + elif block and self.check_token(BlockSequenceStartToken): + end_mark = self.peek_token().start_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_sequence_first_entry + elif block and self.check_token(BlockMappingStartToken): + end_mark = self.peek_token().start_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_mapping_first_key + elif anchor is not None or tag is not None: + # Empty scalars are allowed even if a tag or an anchor is + # specified. + event = ScalarEvent(anchor, tag, (implicit, False), '', + start_mark, end_mark) + self.state = self.states.pop() + else: + if block: + node = 'block' + else: + node = 'flow' + token = self.peek_token() + raise ParserError("while parsing a %s node" % node, start_mark, + "expected the node content, but found %r" % token.id, + token.start_mark) + return event + + # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END + + def parse_block_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_sequence_entry() + + def parse_block_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, BlockEndToken): + self.states.append(self.parse_block_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_block_sequence_entry + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block collection", self.marks[-1], + "expected <block end>, but found %r" % token.id, token.start_mark) + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ + + def parse_indentless_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, + KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_indentless_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_indentless_sequence_entry + return self.process_empty_scalar(token.end_mark) + token = self.peek_token() + event = SequenceEndEvent(token.start_mark, token.start_mark) + self.state = self.states.pop() + return event + + # block_mapping ::= BLOCK-MAPPING_START + # ((KEY block_node_or_indentless_sequence?)? + # (VALUE block_node_or_indentless_sequence?)?)* + # BLOCK-END + + def parse_block_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_mapping_key() + + def parse_block_mapping_key(self): + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_value) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block mapping", self.marks[-1], + "expected <block end>, but found %r" % token.id, token.start_mark) + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_block_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_key) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_block_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + # flow_sequence ::= FLOW-SEQUENCE-START + # (flow_sequence_entry FLOW-ENTRY)* + # flow_sequence_entry? + # FLOW-SEQUENCE-END + # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + # + # Note that while production rules for both flow_sequence_entry and + # flow_mapping_entry are equal, their interpretations are different. + # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` + # generate an inline mapping (set syntax). + + def parse_flow_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_sequence_entry(first=True) + + def parse_flow_sequence_entry(self, first=False): + if not self.check_token(FlowSequenceEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow sequence", self.marks[-1], + "expected ',' or ']', but got %r" % token.id, token.start_mark) + + if self.check_token(KeyToken): + token = self.peek_token() + event = MappingStartEvent(None, None, True, + token.start_mark, token.end_mark, + flow_style=True) + self.state = self.parse_flow_sequence_entry_mapping_key + return event + elif not self.check_token(FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry) + return self.parse_flow_node() + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_sequence_entry_mapping_key(self): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_value + return self.process_empty_scalar(token.end_mark) + + def parse_flow_sequence_entry_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_end) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_end + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_sequence_entry_mapping_end + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_sequence_entry_mapping_end(self): + self.state = self.parse_flow_sequence_entry + token = self.peek_token() + return MappingEndEvent(token.start_mark, token.start_mark) + + # flow_mapping ::= FLOW-MAPPING-START + # (flow_mapping_entry FLOW-ENTRY)* + # flow_mapping_entry? + # FLOW-MAPPING-END + # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + + def parse_flow_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_mapping_key(first=True) + + def parse_flow_mapping_key(self, first=False): + if not self.check_token(FlowMappingEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow mapping", self.marks[-1], + "expected ',' or '}', but got %r" % token.id, token.start_mark) + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(token.end_mark) + elif not self.check_token(FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_empty_value) + return self.parse_flow_node() + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_key) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_mapping_empty_value(self): + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(self.peek_token().start_mark) + + def process_empty_scalar(self, mark): + return ScalarEvent(None, None, (True, False), '', mark, mark) + diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/reader.py b/collectors/python.d.plugin/python_modules/pyyaml3/reader.py new file mode 100644 index 000000000..0a515fd64 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml3/reader.py @@ -0,0 +1,193 @@ +# SPDX-License-Identifier: MIT +# This module contains abstractions for the input stream. You don't have to +# looks further, there are no pretty code. +# +# We define two classes here. +# +# Mark(source, line, column) +# It's just a record and its only use is producing nice error messages. +# Parser does not use it for any other purposes. +# +# Reader(source, data) +# Reader determines the encoding of `data` and converts it to unicode. +# Reader provides the following methods and attributes: +# reader.peek(length=1) - return the next `length` characters +# reader.forward(length=1) - move the current position to `length` characters. +# reader.index - the number of the current character. +# reader.line, stream.column - the line and the column of the current character. + +__all__ = ['Reader', 'ReaderError'] + +from .error import YAMLError, Mark + +import codecs, re + +class ReaderError(YAMLError): + + def __init__(self, name, position, character, encoding, reason): + self.name = name + self.character = character + self.position = position + self.encoding = encoding + self.reason = reason + + def __str__(self): + if isinstance(self.character, bytes): + return "'%s' codec can't decode byte #x%02x: %s\n" \ + " in \"%s\", position %d" \ + % (self.encoding, ord(self.character), self.reason, + self.name, self.position) + else: + return "unacceptable character #x%04x: %s\n" \ + " in \"%s\", position %d" \ + % (self.character, self.reason, + self.name, self.position) + +class Reader(object): + # Reader: + # - determines the data encoding and converts it to a unicode string, + # - checks if characters are in allowed range, + # - adds '\0' to the end. + + # Reader accepts + # - a `bytes` object, + # - a `str` object, + # - a file-like object with its `read` method returning `str`, + # - a file-like object with its `read` method returning `unicode`. + + # Yeah, it's ugly and slow. + + def __init__(self, stream): + self.name = None + self.stream = None + self.stream_pointer = 0 + self.eof = True + self.buffer = '' + self.pointer = 0 + self.raw_buffer = None + self.raw_decode = None + self.encoding = None + self.index = 0 + self.line = 0 + self.column = 0 + if isinstance(stream, str): + self.name = "<unicode string>" + self.check_printable(stream) + self.buffer = stream+'\0' + elif isinstance(stream, bytes): + self.name = "<byte string>" + self.raw_buffer = stream + self.determine_encoding() + else: + self.stream = stream + self.name = getattr(stream, 'name', "<file>") + self.eof = False + self.raw_buffer = None + self.determine_encoding() + + def peek(self, index=0): + try: + return self.buffer[self.pointer+index] + except IndexError: + self.update(index+1) + return self.buffer[self.pointer+index] + + def prefix(self, length=1): + if self.pointer+length >= len(self.buffer): + self.update(length) + return self.buffer[self.pointer:self.pointer+length] + + def forward(self, length=1): + if self.pointer+length+1 >= len(self.buffer): + self.update(length+1) + while length: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch in '\n\x85\u2028\u2029' \ + or (ch == '\r' and self.buffer[self.pointer] != '\n'): + self.line += 1 + self.column = 0 + elif ch != '\uFEFF': + self.column += 1 + length -= 1 + + def get_mark(self): + if self.stream is None: + return Mark(self.name, self.index, self.line, self.column, + self.buffer, self.pointer) + else: + return Mark(self.name, self.index, self.line, self.column, + None, None) + + def determine_encoding(self): + while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2): + self.update_raw() + if isinstance(self.raw_buffer, bytes): + if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): + self.raw_decode = codecs.utf_16_le_decode + self.encoding = 'utf-16-le' + elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): + self.raw_decode = codecs.utf_16_be_decode + self.encoding = 'utf-16-be' + else: + self.raw_decode = codecs.utf_8_decode + self.encoding = 'utf-8' + self.update(1) + + NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]') + def check_printable(self, data): + match = self.NON_PRINTABLE.search(data) + if match: + character = match.group() + position = self.index+(len(self.buffer)-self.pointer)+match.start() + raise ReaderError(self.name, position, ord(character), + 'unicode', "special characters are not allowed") + + def update(self, length): + if self.raw_buffer is None: + return + self.buffer = self.buffer[self.pointer:] + self.pointer = 0 + while len(self.buffer) < length: + if not self.eof: + self.update_raw() + if self.raw_decode is not None: + try: + data, converted = self.raw_decode(self.raw_buffer, + 'strict', self.eof) + except UnicodeDecodeError as exc: + character = self.raw_buffer[exc.start] + if self.stream is not None: + position = self.stream_pointer-len(self.raw_buffer)+exc.start + else: + position = exc.start + raise ReaderError(self.name, position, character, + exc.encoding, exc.reason) + else: + data = self.raw_buffer + converted = len(data) + self.check_printable(data) + self.buffer += data + self.raw_buffer = self.raw_buffer[converted:] + if self.eof: + self.buffer += '\0' + self.raw_buffer = None + break + + def update_raw(self, size=4096): + data = self.stream.read(size) + if self.raw_buffer is None: + self.raw_buffer = data + else: + self.raw_buffer += data + self.stream_pointer += len(data) + if not data: + self.eof = True + +#try: +# import psyco +# psyco.bind(Reader) +#except ImportError: +# pass + diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/representer.py b/collectors/python.d.plugin/python_modules/pyyaml3/representer.py new file mode 100644 index 000000000..756a18dcc --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml3/representer.py @@ -0,0 +1,375 @@ +# SPDX-License-Identifier: MIT + +__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', + 'RepresenterError'] + +from .error import * +from .nodes import * + +import datetime, sys, copyreg, types, base64 + +class RepresenterError(YAMLError): + pass + +class BaseRepresenter: + + yaml_representers = {} + yaml_multi_representers = {} + + def __init__(self, default_style=None, default_flow_style=None): + self.default_style = default_style + self.default_flow_style = default_flow_style + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent(self, data): + node = self.represent_data(data) + self.serialize(node) + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent_data(self, data): + if self.ignore_aliases(data): + self.alias_key = None + else: + self.alias_key = id(data) + if self.alias_key is not None: + if self.alias_key in self.represented_objects: + node = self.represented_objects[self.alias_key] + #if node is None: + # raise RepresenterError("recursive objects are not allowed: %r" % data) + return node + #self.represented_objects[alias_key] = None + self.object_keeper.append(data) + data_types = type(data).__mro__ + if data_types[0] in self.yaml_representers: + node = self.yaml_representers[data_types[0]](self, data) + else: + for data_type in data_types: + if data_type in self.yaml_multi_representers: + node = self.yaml_multi_representers[data_type](self, data) + break + else: + if None in self.yaml_multi_representers: + node = self.yaml_multi_representers[None](self, data) + elif None in self.yaml_representers: + node = self.yaml_representers[None](self, data) + else: + node = ScalarNode(None, str(data)) + #if alias_key is not None: + # self.represented_objects[alias_key] = node + return node + + @classmethod + def add_representer(cls, data_type, representer): + if not 'yaml_representers' in cls.__dict__: + cls.yaml_representers = cls.yaml_representers.copy() + cls.yaml_representers[data_type] = representer + + @classmethod + def add_multi_representer(cls, data_type, representer): + if not 'yaml_multi_representers' in cls.__dict__: + cls.yaml_multi_representers = cls.yaml_multi_representers.copy() + cls.yaml_multi_representers[data_type] = representer + + def represent_scalar(self, tag, value, style=None): + if style is None: + style = self.default_style + node = ScalarNode(tag, value, style=style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + + def represent_sequence(self, tag, sequence, flow_style=None): + value = [] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item in sequence: + node_item = self.represent_data(item) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_mapping(self, tag, mapping, flow_style=None): + value = [] + node = MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = list(mapping.items()) + try: + mapping = sorted(mapping) + except TypeError: + pass + for item_key, item_value in mapping: + node_key = self.represent_data(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def ignore_aliases(self, data): + return False + +class SafeRepresenter(BaseRepresenter): + + def ignore_aliases(self, data): + if data in [None, ()]: + return True + if isinstance(data, (str, bytes, bool, int, float)): + return True + + def represent_none(self, data): + return self.represent_scalar('tag:yaml.org,2002:null', 'null') + + def represent_str(self, data): + return self.represent_scalar('tag:yaml.org,2002:str', data) + + def represent_binary(self, data): + if hasattr(base64, 'encodebytes'): + data = base64.encodebytes(data).decode('ascii') + else: + data = base64.encodestring(data).decode('ascii') + return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|') + + def represent_bool(self, data): + if data: + value = 'true' + else: + value = 'false' + return self.represent_scalar('tag:yaml.org,2002:bool', value) + + def represent_int(self, data): + return self.represent_scalar('tag:yaml.org,2002:int', str(data)) + + inf_value = 1e300 + while repr(inf_value) != repr(inf_value*inf_value): + inf_value *= inf_value + + def represent_float(self, data): + if data != data or (data == 0.0 and data == 1.0): + value = '.nan' + elif data == self.inf_value: + value = '.inf' + elif data == -self.inf_value: + value = '-.inf' + else: + value = repr(data).lower() + # Note that in some cases `repr(data)` represents a float number + # without the decimal parts. For instance: + # >>> repr(1e17) + # '1e17' + # Unfortunately, this is not a valid float representation according + # to the definition of the `!!float` tag. We fix this by adding + # '.0' before the 'e' symbol. + if '.' not in value and 'e' in value: + value = value.replace('e', '.0e', 1) + return self.represent_scalar('tag:yaml.org,2002:float', value) + + def represent_list(self, data): + #pairs = (len(data) > 0 and isinstance(data, list)) + #if pairs: + # for item in data: + # if not isinstance(item, tuple) or len(item) != 2: + # pairs = False + # break + #if not pairs: + return self.represent_sequence('tag:yaml.org,2002:seq', data) + #value = [] + #for item_key, item_value in data: + # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', + # [(item_key, item_value)])) + #return SequenceNode(u'tag:yaml.org,2002:pairs', value) + + def represent_dict(self, data): + return self.represent_mapping('tag:yaml.org,2002:map', data) + + def represent_set(self, data): + value = {} + for key in data: + value[key] = None + return self.represent_mapping('tag:yaml.org,2002:set', value) + + def represent_date(self, data): + value = data.isoformat() + return self.represent_scalar('tag:yaml.org,2002:timestamp', value) + + def represent_datetime(self, data): + value = data.isoformat(' ') + return self.represent_scalar('tag:yaml.org,2002:timestamp', value) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + return self.represent_mapping(tag, state, flow_style=flow_style) + + def represent_undefined(self, data): + raise RepresenterError("cannot represent an object: %s" % data) + +SafeRepresenter.add_representer(type(None), + SafeRepresenter.represent_none) + +SafeRepresenter.add_representer(str, + SafeRepresenter.represent_str) + +SafeRepresenter.add_representer(bytes, + SafeRepresenter.represent_binary) + +SafeRepresenter.add_representer(bool, + SafeRepresenter.represent_bool) + +SafeRepresenter.add_representer(int, + SafeRepresenter.represent_int) + +SafeRepresenter.add_representer(float, + SafeRepresenter.represent_float) + +SafeRepresenter.add_representer(list, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(tuple, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(dict, + SafeRepresenter.represent_dict) + +SafeRepresenter.add_representer(set, + SafeRepresenter.represent_set) + +SafeRepresenter.add_representer(datetime.date, + SafeRepresenter.represent_date) + +SafeRepresenter.add_representer(datetime.datetime, + SafeRepresenter.represent_datetime) + +SafeRepresenter.add_representer(None, + SafeRepresenter.represent_undefined) + +class Representer(SafeRepresenter): + + def represent_complex(self, data): + if data.imag == 0.0: + data = '%r' % data.real + elif data.real == 0.0: + data = '%rj' % data.imag + elif data.imag > 0: + data = '%r+%rj' % (data.real, data.imag) + else: + data = '%r%rj' % (data.real, data.imag) + return self.represent_scalar('tag:yaml.org,2002:python/complex', data) + + def represent_tuple(self, data): + return self.represent_sequence('tag:yaml.org,2002:python/tuple', data) + + def represent_name(self, data): + name = '%s.%s' % (data.__module__, data.__name__) + return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '') + + def represent_module(self, data): + return self.represent_scalar( + 'tag:yaml.org,2002:python/module:'+data.__name__, '') + + def represent_object(self, data): + # We use __reduce__ API to save the data. data.__reduce__ returns + # a tuple of length 2-5: + # (function, args, state, listitems, dictitems) + + # For reconstructing, we calls function(*args), then set its state, + # listitems, and dictitems if they are not None. + + # A special case is when function.__name__ == '__newobj__'. In this + # case we create the object with args[0].__new__(*args). + + # Another special case is when __reduce__ returns a string - we don't + # support it. + + # We produce a !!python/object, !!python/object/new or + # !!python/object/apply node. + + cls = type(data) + if cls in copyreg.dispatch_table: + reduce = copyreg.dispatch_table[cls](data) + elif hasattr(data, '__reduce_ex__'): + reduce = data.__reduce_ex__(2) + elif hasattr(data, '__reduce__'): + reduce = data.__reduce__() + else: + raise RepresenterError("cannot represent object: %r" % data) + reduce = (list(reduce)+[None]*5)[:5] + function, args, state, listitems, dictitems = reduce + args = list(args) + if state is None: + state = {} + if listitems is not None: + listitems = list(listitems) + if dictitems is not None: + dictitems = dict(dictitems) + if function.__name__ == '__newobj__': + function = args[0] + args = args[1:] + tag = 'tag:yaml.org,2002:python/object/new:' + newobj = True + else: + tag = 'tag:yaml.org,2002:python/object/apply:' + newobj = False + function_name = '%s.%s' % (function.__module__, function.__name__) + if not args and not listitems and not dictitems \ + and isinstance(state, dict) and newobj: + return self.represent_mapping( + 'tag:yaml.org,2002:python/object:'+function_name, state) + if not listitems and not dictitems \ + and isinstance(state, dict) and not state: + return self.represent_sequence(tag+function_name, args) + value = {} + if args: + value['args'] = args + if state or not isinstance(state, dict): + value['state'] = state + if listitems: + value['listitems'] = listitems + if dictitems: + value['dictitems'] = dictitems + return self.represent_mapping(tag+function_name, value) + +Representer.add_representer(complex, + Representer.represent_complex) + +Representer.add_representer(tuple, + Representer.represent_tuple) + +Representer.add_representer(type, + Representer.represent_name) + +Representer.add_representer(types.FunctionType, + Representer.represent_name) + +Representer.add_representer(types.BuiltinFunctionType, + Representer.represent_name) + +Representer.add_representer(types.ModuleType, + Representer.represent_module) + +Representer.add_multi_representer(object, + Representer.represent_object) + diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py b/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py new file mode 100644 index 000000000..50945e04d --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py @@ -0,0 +1,225 @@ +# SPDX-License-Identifier: MIT + +__all__ = ['BaseResolver', 'Resolver'] + +from .error import * +from .nodes import * + +import re + +class ResolverError(YAMLError): + pass + +class BaseResolver: + + DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str' + DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq' + DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map' + + yaml_implicit_resolvers = {} + yaml_path_resolvers = {} + + def __init__(self): + self.resolver_exact_paths = [] + self.resolver_prefix_paths = [] + + @classmethod + def add_implicit_resolver(cls, tag, regexp, first): + if not 'yaml_implicit_resolvers' in cls.__dict__: + cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy() + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + + @classmethod + def add_path_resolver(cls, tag, path, kind=None): + # Note: `add_path_resolver` is experimental. The API could be changed. + # `new_path` is a pattern that is matched against the path from the + # root to the node that is being considered. `node_path` elements are + # tuples `(node_check, index_check)`. `node_check` is a node class: + # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` + # matches any kind of a node. `index_check` could be `None`, a boolean + # value, a string value, or a number. `None` and `False` match against + # any _value_ of sequence and mapping nodes. `True` matches against + # any _key_ of a mapping node. A string `index_check` matches against + # a mapping value that corresponds to a scalar key which content is + # equal to the `index_check` value. An integer `index_check` matches + # against a sequence value with the index equal to `index_check`. + if not 'yaml_path_resolvers' in cls.__dict__: + cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() + new_path = [] + for element in path: + if isinstance(element, (list, tuple)): + if len(element) == 2: + node_check, index_check = element + elif len(element) == 1: + node_check = element[0] + index_check = True + else: + raise ResolverError("Invalid path element: %s" % element) + else: + node_check = None + index_check = element + if node_check is str: + node_check = ScalarNode + elif node_check is list: + node_check = SequenceNode + elif node_check is dict: + node_check = MappingNode + elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ + and not isinstance(node_check, str) \ + and node_check is not None: + raise ResolverError("Invalid node checker: %s" % node_check) + if not isinstance(index_check, (str, int)) \ + and index_check is not None: + raise ResolverError("Invalid index checker: %s" % index_check) + new_path.append((node_check, index_check)) + if kind is str: + kind = ScalarNode + elif kind is list: + kind = SequenceNode + elif kind is dict: + kind = MappingNode + elif kind not in [ScalarNode, SequenceNode, MappingNode] \ + and kind is not None: + raise ResolverError("Invalid node kind: %s" % kind) + cls.yaml_path_resolvers[tuple(new_path), kind] = tag + + def descend_resolver(self, current_node, current_index): + if not self.yaml_path_resolvers: + return + exact_paths = {} + prefix_paths = [] + if current_node: + depth = len(self.resolver_prefix_paths) + for path, kind in self.resolver_prefix_paths[-1]: + if self.check_resolver_prefix(depth, path, kind, + current_node, current_index): + if len(path) > depth: + prefix_paths.append((path, kind)) + else: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + for path, kind in self.yaml_path_resolvers: + if not path: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + prefix_paths.append((path, kind)) + self.resolver_exact_paths.append(exact_paths) + self.resolver_prefix_paths.append(prefix_paths) + + def ascend_resolver(self): + if not self.yaml_path_resolvers: + return + self.resolver_exact_paths.pop() + self.resolver_prefix_paths.pop() + + def check_resolver_prefix(self, depth, path, kind, + current_node, current_index): + node_check, index_check = path[depth-1] + if isinstance(node_check, str): + if current_node.tag != node_check: + return + elif node_check is not None: + if not isinstance(current_node, node_check): + return + if index_check is True and current_index is not None: + return + if (index_check is False or index_check is None) \ + and current_index is None: + return + if isinstance(index_check, str): + if not (isinstance(current_index, ScalarNode) + and index_check == current_index.value): + return + elif isinstance(index_check, int) and not isinstance(index_check, bool): + if index_check != current_index: + return + return True + + def resolve(self, kind, value, implicit): + if kind is ScalarNode and implicit[0]: + if value == '': + resolvers = self.yaml_implicit_resolvers.get('', []) + else: + resolvers = self.yaml_implicit_resolvers.get(value[0], []) + resolvers += self.yaml_implicit_resolvers.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if self.yaml_path_resolvers: + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + +class Resolver(BaseResolver): + pass + +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:bool', + re.compile(r'''^(?:yes|Yes|YES|no|No|NO + |true|True|TRUE|false|False|FALSE + |on|On|ON|off|Off|OFF)$''', re.X), + list('yYnNtTfFoO')) + +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:float', + re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? + |\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* + |[-+]?\.(?:inf|Inf|INF) + |\.(?:nan|NaN|NAN))$''', re.X), + list('-+0123456789.')) + +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:int', + re.compile(r'''^(?:[-+]?0b[0-1_]+ + |[-+]?0[0-7_]+ + |[-+]?(?:0|[1-9][0-9_]*) + |[-+]?0x[0-9a-fA-F_]+ + |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), + list('-+0123456789')) + +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:merge', + re.compile(r'^(?:<<)$'), + ['<']) + +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:null', + re.compile(r'''^(?: ~ + |null|Null|NULL + | )$''', re.X), + ['~', 'n', 'N', '']) + +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:timestamp', + re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] + |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? + (?:[Tt]|[ \t]+)[0-9][0-9]? + :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? + (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), + list('0123456789')) + +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:value', + re.compile(r'^(?:=)$'), + ['=']) + +# The following resolver is only for documentation purposes. It cannot work +# because plain scalars cannot start with '!', '&', or '*'. +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:yaml', + re.compile(r'^(?:!|&|\*)$'), + list('!&*')) + diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py b/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py new file mode 100644 index 000000000..b55854e8b --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py @@ -0,0 +1,1449 @@ +# SPDX-License-Identifier: MIT + +# Scanner produces tokens of the following types: +# STREAM-START +# STREAM-END +# DIRECTIVE(name, value) +# DOCUMENT-START +# DOCUMENT-END +# BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START +# BLOCK-END +# FLOW-SEQUENCE-START +# FLOW-MAPPING-START +# FLOW-SEQUENCE-END +# FLOW-MAPPING-END +# BLOCK-ENTRY +# FLOW-ENTRY +# KEY +# VALUE +# ALIAS(value) +# ANCHOR(value) +# TAG(value) +# SCALAR(value, plain, style) +# +# Read comments in the Scanner code for more details. +# + +__all__ = ['Scanner', 'ScannerError'] + +from .error import MarkedYAMLError +from .tokens import * + +class ScannerError(MarkedYAMLError): + pass + +class SimpleKey: + # See below simple keys treatment. + + def __init__(self, token_number, required, index, line, column, mark): + self.token_number = token_number + self.required = required + self.index = index + self.line = line + self.column = column + self.mark = mark + +class Scanner: + + def __init__(self): + """Initialize the scanner.""" + # It is assumed that Scanner and Reader will have a common descendant. + # Reader do the dirty work of checking for BOM and converting the + # input data to Unicode. It also adds NUL to the end. + # + # Reader supports the following methods + # self.peek(i=0) # peek the next i-th character + # self.prefix(l=1) # peek the next l characters + # self.forward(l=1) # read the next l characters and move the pointer. + + # Had we reached the end of the stream? + self.done = False + + # The number of unclosed '{' and '['. `flow_level == 0` means block + # context. + self.flow_level = 0 + + # List of processed tokens that are not yet emitted. + self.tokens = [] + + # Add the STREAM-START token. + self.fetch_stream_start() + + # Number of tokens that were emitted through the `get_token` method. + self.tokens_taken = 0 + + # The current indentation level. + self.indent = -1 + + # Past indentation levels. + self.indents = [] + + # Variables related to simple keys treatment. + + # A simple key is a key that is not denoted by the '?' indicator. + # Example of simple keys: + # --- + # block simple key: value + # ? not a simple key: + # : { flow simple key: value } + # We emit the KEY token before all keys, so when we find a potential + # simple key, we try to locate the corresponding ':' indicator. + # Simple keys should be limited to a single line and 1024 characters. + + # Can a simple key start at the current position? A simple key may + # start: + # - at the beginning of the line, not counting indentation spaces + # (in block context), + # - after '{', '[', ',' (in the flow context), + # - after '?', ':', '-' (in the block context). + # In the block context, this flag also signifies if a block collection + # may start at the current position. + self.allow_simple_key = True + + # Keep track of possible simple keys. This is a dictionary. The key + # is `flow_level`; there can be no more that one possible simple key + # for each level. The value is a SimpleKey record: + # (token_number, required, index, line, column, mark) + # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), + # '[', or '{' tokens. + self.possible_simple_keys = {} + + # Public methods. + + def check_token(self, *choices): + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # Return the next token, but do not delete if from the queue. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + return self.tokens[0] + + def get_token(self): + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + self.tokens_taken += 1 + return self.tokens.pop(0) + + # Private methods. + + def need_more_tokens(self): + if self.done: + return False + if not self.tokens: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + + def fetch_more_tokens(self): + + # Eat whitespaces and comments until we reach the next token. + self.scan_to_next_token() + + # Remove obsolete possible simple keys. + self.stale_possible_simple_keys() + + # Compare the current indentation and column. It may add some tokens + # and decrease the current indentation level. + self.unwind_indent(self.column) + + # Peek the next character. + ch = self.peek() + + # Is it the end of stream? + if ch == '\0': + return self.fetch_stream_end() + + # Is it a directive? + if ch == '%' and self.check_directive(): + return self.fetch_directive() + + # Is it the document start? + if ch == '-' and self.check_document_start(): + return self.fetch_document_start() + + # Is it the document end? + if ch == '.' and self.check_document_end(): + return self.fetch_document_end() + + # TODO: support for BOM within a stream. + #if ch == '\uFEFF': + # return self.fetch_bom() <-- issue BOMToken + + # Note: the order of the following checks is NOT significant. + + # Is it the flow sequence start indicator? + if ch == '[': + return self.fetch_flow_sequence_start() + + # Is it the flow mapping start indicator? + if ch == '{': + return self.fetch_flow_mapping_start() + + # Is it the flow sequence end indicator? + if ch == ']': + return self.fetch_flow_sequence_end() + + # Is it the flow mapping end indicator? + if ch == '}': + return self.fetch_flow_mapping_end() + + # Is it the flow entry indicator? + if ch == ',': + return self.fetch_flow_entry() + + # Is it the block entry indicator? + if ch == '-' and self.check_block_entry(): + return self.fetch_block_entry() + + # Is it the key indicator? + if ch == '?' and self.check_key(): + return self.fetch_key() + + # Is it the value indicator? + if ch == ':' and self.check_value(): + return self.fetch_value() + + # Is it an alias? + if ch == '*': + return self.fetch_alias() + + # Is it an anchor? + if ch == '&': + return self.fetch_anchor() + + # Is it a tag? + if ch == '!': + return self.fetch_tag() + + # Is it a literal scalar? + if ch == '|' and not self.flow_level: + return self.fetch_literal() + + # Is it a folded scalar? + if ch == '>' and not self.flow_level: + return self.fetch_folded() + + # Is it a single quoted scalar? + if ch == '\'': + return self.fetch_single() + + # Is it a double quoted scalar? + if ch == '\"': + return self.fetch_double() + + # It must be a plain scalar then. + if self.check_plain(): + return self.fetch_plain() + + # No? It's an error. Let's produce a nice error message. + raise ScannerError("while scanning for the next token", None, + "found character %r that cannot start any token" % ch, + self.get_mark()) + + # Simple keys treatment. + + def next_possible_simple_key(self): + # Return the number of the nearest possible simple key. Actually we + # don't need to loop through the whole dictionary. We may replace it + # with the following code: + # if not self.possible_simple_keys: + # return None + # return self.possible_simple_keys[ + # min(self.possible_simple_keys.keys())].token_number + min_token_number = None + for level in self.possible_simple_keys: + key = self.possible_simple_keys[level] + if min_token_number is None or key.token_number < min_token_number: + min_token_number = key.token_number + return min_token_number + + def stale_possible_simple_keys(self): + # Remove entries that are no longer possible simple keys. According to + # the YAML specification, simple keys + # - should be limited to a single line, + # - should be no longer than 1024 characters. + # Disabling this procedure will allow simple keys of any length and + # height (may cause problems if indentation is broken though). + for level in list(self.possible_simple_keys): + key = self.possible_simple_keys[level] + if key.line != self.line \ + or self.index-key.index > 1024: + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not found expected ':'", self.get_mark()) + del self.possible_simple_keys[level] + + def save_possible_simple_key(self): + # The next token may start a simple key. We check if it's possible + # and save its position. This function is called for + # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. + + # Check if a simple key is required at the current position. + required = not self.flow_level and self.indent == self.column + + # A simple key is required only if it is the first token in the current + # line. Therefore it is always allowed. + assert self.allow_simple_key or not required + + # The next token might be a simple key. Let's save it's number and + # position. + if self.allow_simple_key: + self.remove_possible_simple_key() + token_number = self.tokens_taken+len(self.tokens) + key = SimpleKey(token_number, required, + self.index, self.line, self.column, self.get_mark()) + self.possible_simple_keys[self.flow_level] = key + + def remove_possible_simple_key(self): + # Remove the saved possible key position at the current flow level. + if self.flow_level in self.possible_simple_keys: + key = self.possible_simple_keys[self.flow_level] + + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not found expected ':'", self.get_mark()) + + del self.possible_simple_keys[self.flow_level] + + # Indentation functions. + + def unwind_indent(self, column): + + ## In flow context, tokens should respect indentation. + ## Actually the condition should be `self.indent >= column` according to + ## the spec. But this condition will prohibit intuitively correct + ## constructions such as + ## key : { + ## } + #if self.flow_level and self.indent > column: + # raise ScannerError(None, None, + # "invalid intendation or unclosed '[' or '{'", + # self.get_mark()) + + # In the flow context, indentation is ignored. We make the scanner less + # restrictive then specification requires. + if self.flow_level: + return + + # In block context, we may need to issue the BLOCK-END tokens. + while self.indent > column: + mark = self.get_mark() + self.indent = self.indents.pop() + self.tokens.append(BlockEndToken(mark, mark)) + + def add_indent(self, column): + # Check if we need to increase indentation. + if self.indent < column: + self.indents.append(self.indent) + self.indent = column + return True + return False + + # Fetchers. + + def fetch_stream_start(self): + # We always add STREAM-START as the first token and STREAM-END as the + # last token. + + # Read the token. + mark = self.get_mark() + + # Add STREAM-START. + self.tokens.append(StreamStartToken(mark, mark, + encoding=self.encoding)) + + + def fetch_stream_end(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + self.possible_simple_keys = {} + + # Read the token. + mark = self.get_mark() + + # Add STREAM-END. + self.tokens.append(StreamEndToken(mark, mark)) + + # The steam is finished. + self.done = True + + def fetch_directive(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Scan and add DIRECTIVE. + self.tokens.append(self.scan_directive()) + + def fetch_document_start(self): + self.fetch_document_indicator(DocumentStartToken) + + def fetch_document_end(self): + self.fetch_document_indicator(DocumentEndToken) + + def fetch_document_indicator(self, TokenClass): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. Note that there could not be a block collection + # after '---'. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Add DOCUMENT-START or DOCUMENT-END. + start_mark = self.get_mark() + self.forward(3) + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_start(self): + self.fetch_flow_collection_start(FlowSequenceStartToken) + + def fetch_flow_mapping_start(self): + self.fetch_flow_collection_start(FlowMappingStartToken) + + def fetch_flow_collection_start(self, TokenClass): + + # '[' and '{' may start a simple key. + self.save_possible_simple_key() + + # Increase the flow level. + self.flow_level += 1 + + # Simple keys are allowed after '[' and '{'. + self.allow_simple_key = True + + # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_end(self): + self.fetch_flow_collection_end(FlowSequenceEndToken) + + def fetch_flow_mapping_end(self): + self.fetch_flow_collection_end(FlowMappingEndToken) + + def fetch_flow_collection_end(self, TokenClass): + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Decrease the flow level. + self.flow_level -= 1 + + # No simple keys after ']' or '}'. + self.allow_simple_key = False + + # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_entry(self): + + # Simple keys are allowed after ','. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add FLOW-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(FlowEntryToken(start_mark, end_mark)) + + def fetch_block_entry(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a new entry? + if not self.allow_simple_key: + raise ScannerError(None, None, + "sequence entries are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-SEQUENCE-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockSequenceStartToken(mark, mark)) + + # It's an error for the block entry to occur in the flow context, + # but we let the parser detect this. + else: + pass + + # Simple keys are allowed after '-'. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add BLOCK-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(BlockEntryToken(start_mark, end_mark)) + + def fetch_key(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a key (not nessesary a simple)? + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping keys are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-MAPPING-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after '?' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add KEY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(KeyToken(start_mark, end_mark)) + + def fetch_value(self): + + # Do we determine a simple key? + if self.flow_level in self.possible_simple_keys: + + # Add KEY. + key = self.possible_simple_keys[self.flow_level] + del self.possible_simple_keys[self.flow_level] + self.tokens.insert(key.token_number-self.tokens_taken, + KeyToken(key.mark, key.mark)) + + # If this key starts a new block mapping, we need to add + # BLOCK-MAPPING-START. + if not self.flow_level: + if self.add_indent(key.column): + self.tokens.insert(key.token_number-self.tokens_taken, + BlockMappingStartToken(key.mark, key.mark)) + + # There cannot be two simple keys one after another. + self.allow_simple_key = False + + # It must be a part of a complex key. + else: + + # Block context needs additional checks. + # (Do we really need them? They will be catched by the parser + # anyway.) + if not self.flow_level: + + # We are allowed to start a complex value if and only if + # we can start a simple key. + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping values are not allowed here", + self.get_mark()) + + # If this value starts a new block mapping, we need to add + # BLOCK-MAPPING-START. It will be detected as an error later by + # the parser. + if not self.flow_level: + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after ':' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add VALUE. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(ValueToken(start_mark, end_mark)) + + def fetch_alias(self): + + # ALIAS could be a simple key. + self.save_possible_simple_key() + + # No simple keys after ALIAS. + self.allow_simple_key = False + + # Scan and add ALIAS. + self.tokens.append(self.scan_anchor(AliasToken)) + + def fetch_anchor(self): + + # ANCHOR could start a simple key. + self.save_possible_simple_key() + + # No simple keys after ANCHOR. + self.allow_simple_key = False + + # Scan and add ANCHOR. + self.tokens.append(self.scan_anchor(AnchorToken)) + + def fetch_tag(self): + + # TAG could start a simple key. + self.save_possible_simple_key() + + # No simple keys after TAG. + self.allow_simple_key = False + + # Scan and add TAG. + self.tokens.append(self.scan_tag()) + + def fetch_literal(self): + self.fetch_block_scalar(style='|') + + def fetch_folded(self): + self.fetch_block_scalar(style='>') + + def fetch_block_scalar(self, style): + + # A simple key may follow a block scalar. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Scan and add SCALAR. + self.tokens.append(self.scan_block_scalar(style)) + + def fetch_single(self): + self.fetch_flow_scalar(style='\'') + + def fetch_double(self): + self.fetch_flow_scalar(style='"') + + def fetch_flow_scalar(self, style): + + # A flow scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after flow scalars. + self.allow_simple_key = False + + # Scan and add SCALAR. + self.tokens.append(self.scan_flow_scalar(style)) + + def fetch_plain(self): + + # A plain scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after plain scalars. But note that `scan_plain` will + # change this flag if the scan is finished at the beginning of the + # line. + self.allow_simple_key = False + + # Scan and add SCALAR. May change `allow_simple_key`. + self.tokens.append(self.scan_plain()) + + # Checkers. + + def check_directive(self): + + # DIRECTIVE: ^ '%' ... + # The '%' indicator is already checked. + if self.column == 0: + return True + + def check_document_start(self): + + # DOCUMENT-START: ^ '---' (' '|'\n') + if self.column == 0: + if self.prefix(3) == '---' \ + and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': + return True + + def check_document_end(self): + + # DOCUMENT-END: ^ '...' (' '|'\n') + if self.column == 0: + if self.prefix(3) == '...' \ + and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': + return True + + def check_block_entry(self): + + # BLOCK-ENTRY: '-' (' '|'\n') + return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029' + + def check_key(self): + + # KEY(flow context): '?' + if self.flow_level: + return True + + # KEY(block context): '?' (' '|'\n') + else: + return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029' + + def check_value(self): + + # VALUE(flow context): ':' + if self.flow_level: + return True + + # VALUE(block context): ':' (' '|'\n') + else: + return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029' + + def check_plain(self): + + # A plain scalar may start with any non-space character except: + # '-', '?', ':', ',', '[', ']', '{', '}', + # '#', '&', '*', '!', '|', '>', '\'', '\"', + # '%', '@', '`'. + # + # It may also start with + # '-', '?', ':' + # if it is followed by a non-space character. + # + # Note that we limit the last rule to the block context (except the + # '-' character) because we want the flow context to be space + # independent. + ch = self.peek() + return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ + or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029' + and (ch == '-' or (not self.flow_level and ch in '?:'))) + + # Scanners. + + def scan_to_next_token(self): + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if <TAB>: + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + + if self.index == 0 and self.peek() == '\uFEFF': + self.forward() + found = False + while not found: + while self.peek() == ' ': + self.forward() + if self.peek() == '#': + while self.peek() not in '\0\r\n\x85\u2028\u2029': + self.forward() + if self.scan_line_break(): + if not self.flow_level: + self.allow_simple_key = True + else: + found = True + + def scan_directive(self): + # See the specification for details. + start_mark = self.get_mark() + self.forward() + name = self.scan_directive_name(start_mark) + value = None + if name == 'YAML': + value = self.scan_yaml_directive_value(start_mark) + end_mark = self.get_mark() + elif name == 'TAG': + value = self.scan_tag_directive_value(start_mark) + end_mark = self.get_mark() + else: + end_mark = self.get_mark() + while self.peek() not in '\0\r\n\x85\u2028\u2029': + self.forward() + self.scan_directive_ignored_line(start_mark) + return DirectiveToken(name, value, start_mark, end_mark) + + def scan_directive_name(self, start_mark): + # See the specification for details. + length = 0 + ch = self.peek(length) + while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch, self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch, self.get_mark()) + return value + + def scan_yaml_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == ' ': + self.forward() + major = self.scan_yaml_directive_number(start_mark) + if self.peek() != '.': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or '.', but found %r" % self.peek(), + self.get_mark()) + self.forward() + minor = self.scan_yaml_directive_number(start_mark) + if self.peek() not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or ' ', but found %r" % self.peek(), + self.get_mark()) + return (major, minor) + + def scan_yaml_directive_number(self, start_mark): + # See the specification for details. + ch = self.peek() + if not ('0' <= ch <= '9'): + raise ScannerError("while scanning a directive", start_mark, + "expected a digit, but found %r" % ch, self.get_mark()) + length = 0 + while '0' <= self.peek(length) <= '9': + length += 1 + value = int(self.prefix(length)) + self.forward(length) + return value + + def scan_tag_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == ' ': + self.forward() + handle = self.scan_tag_directive_handle(start_mark) + while self.peek() == ' ': + self.forward() + prefix = self.scan_tag_directive_prefix(start_mark) + return (handle, prefix) + + def scan_tag_directive_handle(self, start_mark): + # See the specification for details. + value = self.scan_tag_handle('directive', start_mark) + ch = self.peek() + if ch != ' ': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch, self.get_mark()) + return value + + def scan_tag_directive_prefix(self, start_mark): + # See the specification for details. + value = self.scan_tag_uri('directive', start_mark) + ch = self.peek() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch, self.get_mark()) + return value + + def scan_directive_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == ' ': + self.forward() + if self.peek() == '#': + while self.peek() not in '\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in '\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a comment or a line break, but found %r" + % ch, self.get_mark()) + self.scan_line_break() + + def scan_anchor(self, TokenClass): + # The specification does not restrict characters for anchors and + # aliases. This may lead to problems, for instance, the document: + # [ *alias, value ] + # can be interpteted in two ways, as + # [ "value" ] + # and + # [ *alias , "value" ] + # Therefore we restrict aliases to numbers and ASCII letters. + start_mark = self.get_mark() + indicator = self.peek() + if indicator == '*': + name = 'alias' + else: + name = 'anchor' + self.forward() + length = 0 + ch = self.peek(length) + while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch, self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`': + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch, self.get_mark()) + end_mark = self.get_mark() + return TokenClass(value, start_mark, end_mark) + + def scan_tag(self): + # See the specification for details. + start_mark = self.get_mark() + ch = self.peek(1) + if ch == '<': + handle = None + self.forward(2) + suffix = self.scan_tag_uri('tag', start_mark) + if self.peek() != '>': + raise ScannerError("while parsing a tag", start_mark, + "expected '>', but found %r" % self.peek(), + self.get_mark()) + self.forward() + elif ch in '\0 \t\r\n\x85\u2028\u2029': + handle = None + suffix = '!' + self.forward() + else: + length = 1 + use_handle = False + while ch not in '\0 \r\n\x85\u2028\u2029': + if ch == '!': + use_handle = True + break + length += 1 + ch = self.peek(length) + handle = '!' + if use_handle: + handle = self.scan_tag_handle('tag', start_mark) + else: + handle = '!' + self.forward() + suffix = self.scan_tag_uri('tag', start_mark) + ch = self.peek() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a tag", start_mark, + "expected ' ', but found %r" % ch, self.get_mark()) + value = (handle, suffix) + end_mark = self.get_mark() + return TagToken(value, start_mark, end_mark) + + def scan_block_scalar(self, style): + # See the specification for details. + + if style == '>': + folded = True + else: + folded = False + + chunks = [] + start_mark = self.get_mark() + + # Scan the header. + self.forward() + chomping, increment = self.scan_block_scalar_indicators(start_mark) + self.scan_block_scalar_ignored_line(start_mark) + + # Determine the indentation level and go to the first non-empty line. + min_indent = self.indent+1 + if min_indent < 1: + min_indent = 1 + if increment is None: + breaks, max_indent, end_mark = self.scan_block_scalar_indentation() + indent = max(min_indent, max_indent) + else: + indent = min_indent+increment-1 + breaks, end_mark = self.scan_block_scalar_breaks(indent) + line_break = '' + + # Scan the inner part of the block scalar. + while self.column == indent and self.peek() != '\0': + chunks.extend(breaks) + leading_non_space = self.peek() not in ' \t' + length = 0 + while self.peek(length) not in '\0\r\n\x85\u2028\u2029': + length += 1 + chunks.append(self.prefix(length)) + self.forward(length) + line_break = self.scan_line_break() + breaks, end_mark = self.scan_block_scalar_breaks(indent) + if self.column == indent and self.peek() != '\0': + + # Unfortunately, folding rules are ambiguous. + # + # This is the folding according to the specification: + + if folded and line_break == '\n' \ + and leading_non_space and self.peek() not in ' \t': + if not breaks: + chunks.append(' ') + else: + chunks.append(line_break) + + # This is Clark Evans's interpretation (also in the spec + # examples): + # + #if folded and line_break == '\n': + # if not breaks: + # if self.peek() not in ' \t': + # chunks.append(' ') + # else: + # chunks.append(line_break) + #else: + # chunks.append(line_break) + else: + break + + # Chomp the tail. + if chomping is not False: + chunks.append(line_break) + if chomping is True: + chunks.extend(breaks) + + # We are done. + return ScalarToken(''.join(chunks), False, start_mark, end_mark, + style) + + def scan_block_scalar_indicators(self, start_mark): + # See the specification for details. + chomping = None + increment = None + ch = self.peek() + if ch in '+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch in '0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + elif ch in '0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + ch = self.peek() + if ch in '+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected chomping or indentation indicators, but found %r" + % ch, self.get_mark()) + return chomping, increment + + def scan_block_scalar_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == ' ': + self.forward() + if self.peek() == '#': + while self.peek() not in '\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in '\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected a comment or a line break, but found %r" % ch, + self.get_mark()) + self.scan_line_break() + + def scan_block_scalar_indentation(self): + # See the specification for details. + chunks = [] + max_indent = 0 + end_mark = self.get_mark() + while self.peek() in ' \r\n\x85\u2028\u2029': + if self.peek() != ' ': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + else: + self.forward() + if self.column > max_indent: + max_indent = self.column + return chunks, max_indent, end_mark + + def scan_block_scalar_breaks(self, indent): + # See the specification for details. + chunks = [] + end_mark = self.get_mark() + while self.column < indent and self.peek() == ' ': + self.forward() + while self.peek() in '\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + while self.column < indent and self.peek() == ' ': + self.forward() + return chunks, end_mark + + def scan_flow_scalar(self, style): + # See the specification for details. + # Note that we loose indentation rules for quoted scalars. Quoted + # scalars don't need to adhere indentation because " and ' clearly + # mark the beginning and the end of them. Therefore we are less + # restrictive then the specification requires. We only need to check + # that document separators are not included in scalars. + if style == '"': + double = True + else: + double = False + chunks = [] + start_mark = self.get_mark() + quote = self.peek() + self.forward() + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + while self.peek() != quote: + chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + self.forward() + end_mark = self.get_mark() + return ScalarToken(''.join(chunks), False, start_mark, end_mark, + style) + + ESCAPE_REPLACEMENTS = { + '0': '\0', + 'a': '\x07', + 'b': '\x08', + 't': '\x09', + '\t': '\x09', + 'n': '\x0A', + 'v': '\x0B', + 'f': '\x0C', + 'r': '\x0D', + 'e': '\x1B', + ' ': '\x20', + '\"': '\"', + '\\': '\\', + 'N': '\x85', + '_': '\xA0', + 'L': '\u2028', + 'P': '\u2029', + } + + ESCAPE_CODES = { + 'x': 2, + 'u': 4, + 'U': 8, + } + + def scan_flow_scalar_non_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + length = 0 + while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029': + length += 1 + if length: + chunks.append(self.prefix(length)) + self.forward(length) + ch = self.peek() + if not double and ch == '\'' and self.peek(1) == '\'': + chunks.append('\'') + self.forward(2) + elif (double and ch == '\'') or (not double and ch in '\"\\'): + chunks.append(ch) + self.forward() + elif double and ch == '\\': + self.forward() + ch = self.peek() + if ch in self.ESCAPE_REPLACEMENTS: + chunks.append(self.ESCAPE_REPLACEMENTS[ch]) + self.forward() + elif ch in self.ESCAPE_CODES: + length = self.ESCAPE_CODES[ch] + self.forward() + for k in range(length): + if self.peek(k) not in '0123456789ABCDEFabcdef': + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "expected escape sequence of %d hexdecimal numbers, but found %r" % + (length, self.peek(k)), self.get_mark()) + code = int(self.prefix(length), 16) + chunks.append(chr(code)) + self.forward(length) + elif ch in '\r\n\x85\u2028\u2029': + self.scan_line_break() + chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) + else: + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "found unknown escape character %r" % ch, self.get_mark()) + else: + return chunks + + def scan_flow_scalar_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + length = 0 + while self.peek(length) in ' \t': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch == '\0': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected end of stream", self.get_mark()) + elif ch in '\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + breaks = self.scan_flow_scalar_breaks(double, start_mark) + if line_break != '\n': + chunks.append(line_break) + elif not breaks: + chunks.append(' ') + chunks.extend(breaks) + else: + chunks.append(whitespaces) + return chunks + + def scan_flow_scalar_breaks(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + # Instead of checking indentation, we check for document + # separators. + prefix = self.prefix(3) + if (prefix == '---' or prefix == '...') \ + and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected document separator", self.get_mark()) + while self.peek() in ' \t': + self.forward() + if self.peek() in '\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + else: + return chunks + + def scan_plain(self): + # See the specification for details. + # We add an additional restriction for the flow context: + # plain scalars in the flow context cannot contain ',', ':' and '?'. + # We also keep track of the `allow_simple_key` flag here. + # Indentation rules are loosed for the flow context. + chunks = [] + start_mark = self.get_mark() + end_mark = start_mark + indent = self.indent+1 + # We allow zero indentation for scalars, but then we need to check for + # document separators at the beginning of the line. + #if indent == 0: + # indent = 1 + spaces = [] + while True: + length = 0 + if self.peek() == '#': + break + while True: + ch = self.peek(length) + if ch in '\0 \t\r\n\x85\u2028\u2029' \ + or (not self.flow_level and ch == ':' and + self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \ + or (self.flow_level and ch in ',:?[]{}'): + break + length += 1 + # It's not clear what we should do with ':' in the flow context. + if (self.flow_level and ch == ':' + and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'): + self.forward(length) + raise ScannerError("while scanning a plain scalar", start_mark, + "found unexpected ':'", self.get_mark(), + "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.") + if length == 0: + break + self.allow_simple_key = False + chunks.extend(spaces) + chunks.append(self.prefix(length)) + self.forward(length) + end_mark = self.get_mark() + spaces = self.scan_plain_spaces(indent, start_mark) + if not spaces or self.peek() == '#' \ + or (not self.flow_level and self.column < indent): + break + return ScalarToken(''.join(chunks), True, start_mark, end_mark) + + def scan_plain_spaces(self, indent, start_mark): + # See the specification for details. + # The specification is really confusing about tabs in plain scalars. + # We just forbid them completely. Do not use tabs in YAML! + chunks = [] + length = 0 + while self.peek(length) in ' ': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch in '\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + self.allow_simple_key = True + prefix = self.prefix(3) + if (prefix == '---' or prefix == '...') \ + and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': + return + breaks = [] + while self.peek() in ' \r\n\x85\u2028\u2029': + if self.peek() == ' ': + self.forward() + else: + breaks.append(self.scan_line_break()) + prefix = self.prefix(3) + if (prefix == '---' or prefix == '...') \ + and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': + return + if line_break != '\n': + chunks.append(line_break) + elif not breaks: + chunks.append(' ') + chunks.extend(breaks) + elif whitespaces: + chunks.append(whitespaces) + return chunks + + def scan_tag_handle(self, name, start_mark): + # See the specification for details. + # For some strange reasons, the specification does not allow '_' in + # tag handles. I have allowed it anyway. + ch = self.peek() + if ch != '!': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch, self.get_mark()) + length = 1 + ch = self.peek(length) + if ch != ' ': + while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-_': + length += 1 + ch = self.peek(length) + if ch != '!': + self.forward(length) + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch, self.get_mark()) + length += 1 + value = self.prefix(length) + self.forward(length) + return value + + def scan_tag_uri(self, name, start_mark): + # See the specification for details. + # Note: we do not check if URI is well-formed. + chunks = [] + length = 0 + ch = self.peek(length) + while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-;/?:@&=+$,_.!~*\'()[]%': + if ch == '%': + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + chunks.append(self.scan_uri_escapes(name, start_mark)) + else: + length += 1 + ch = self.peek(length) + if length: + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + if not chunks: + raise ScannerError("while parsing a %s" % name, start_mark, + "expected URI, but found %r" % ch, self.get_mark()) + return ''.join(chunks) + + def scan_uri_escapes(self, name, start_mark): + # See the specification for details. + codes = [] + mark = self.get_mark() + while self.peek() == '%': + self.forward() + for k in range(2): + if self.peek(k) not in '0123456789ABCDEFabcdef': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected URI escape sequence of 2 hexdecimal numbers, but found %r" + % self.peek(k), self.get_mark()) + codes.append(int(self.prefix(2), 16)) + self.forward(2) + try: + value = bytes(codes).decode('utf-8') + except UnicodeDecodeError as exc: + raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) + return value + + def scan_line_break(self): + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.peek() + if ch in '\r\n\x85': + if self.prefix(2) == '\r\n': + self.forward(2) + else: + self.forward() + return '\n' + elif ch in '\u2028\u2029': + self.forward() + return ch + return '' + +#try: +# import psyco +# psyco.bind(Scanner) +#except ImportError: +# pass + diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py b/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py new file mode 100644 index 000000000..1ba2f7f9d --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py @@ -0,0 +1,112 @@ +# SPDX-License-Identifier: MIT + +__all__ = ['Serializer', 'SerializerError'] + +from .error import YAMLError +from .events import * +from .nodes import * + +class SerializerError(YAMLError): + pass + +class Serializer: + + ANCHOR_TEMPLATE = 'id%03d' + + def __init__(self, encoding=None, + explicit_start=None, explicit_end=None, version=None, tags=None): + self.use_encoding = encoding + self.use_explicit_start = explicit_start + self.use_explicit_end = explicit_end + self.use_version = version + self.use_tags = tags + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + self.closed = None + + def open(self): + if self.closed is None: + self.emit(StreamStartEvent(encoding=self.use_encoding)) + self.closed = False + elif self.closed: + raise SerializerError("serializer is closed") + else: + raise SerializerError("serializer is already opened") + + def close(self): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif not self.closed: + self.emit(StreamEndEvent()) + self.closed = True + + #def __del__(self): + # self.close() + + def serialize(self, node): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif self.closed: + raise SerializerError("serializer is closed") + self.emit(DocumentStartEvent(explicit=self.use_explicit_start, + version=self.use_version, tags=self.use_tags)) + self.anchor_node(node) + self.serialize_node(node, None, None) + self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + + def anchor_node(self, node): + if node in self.anchors: + if self.anchors[node] is None: + self.anchors[node] = self.generate_anchor(node) + else: + self.anchors[node] = None + if isinstance(node, SequenceNode): + for item in node.value: + self.anchor_node(item) + elif isinstance(node, MappingNode): + for key, value in node.value: + self.anchor_node(key) + self.anchor_node(value) + + def generate_anchor(self, node): + self.last_anchor_id += 1 + return self.ANCHOR_TEMPLATE % self.last_anchor_id + + def serialize_node(self, node, parent, index): + alias = self.anchors[node] + if node in self.serialized_nodes: + self.emit(AliasEvent(alias)) + else: + self.serialized_nodes[node] = True + self.descend_resolver(parent, index) + if isinstance(node, ScalarNode): + detected_tag = self.resolve(ScalarNode, node.value, (True, False)) + default_tag = self.resolve(ScalarNode, node.value, (False, True)) + implicit = (node.tag == detected_tag), (node.tag == default_tag) + self.emit(ScalarEvent(alias, node.tag, implicit, node.value, + style=node.style)) + elif isinstance(node, SequenceNode): + implicit = (node.tag + == self.resolve(SequenceNode, node.value, True)) + self.emit(SequenceStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + index = 0 + for item in node.value: + self.serialize_node(item, node, index) + index += 1 + self.emit(SequenceEndEvent()) + elif isinstance(node, MappingNode): + implicit = (node.tag + == self.resolve(MappingNode, node.value, True)) + self.emit(MappingStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + for key, value in node.value: + self.serialize_node(key, node, None) + self.serialize_node(value, node, key) + self.emit(MappingEndEvent()) + self.ascend_resolver() + diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py b/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py new file mode 100644 index 000000000..c5c4fb116 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py @@ -0,0 +1,105 @@ +# SPDX-License-Identifier: MIT + +class Token(object): + def __init__(self, start_mark, end_mark): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in self.__dict__ + if not key.endswith('_mark')] + attributes.sort() + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +#class BOMToken(Token): +# id = '<byte order mark>' + +class DirectiveToken(Token): + id = '<directive>' + def __init__(self, name, value, start_mark, end_mark): + self.name = name + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class DocumentStartToken(Token): + id = '<document start>' + +class DocumentEndToken(Token): + id = '<document end>' + +class StreamStartToken(Token): + id = '<stream start>' + def __init__(self, start_mark=None, end_mark=None, + encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndToken(Token): + id = '<stream end>' + +class BlockSequenceStartToken(Token): + id = '<block sequence start>' + +class BlockMappingStartToken(Token): + id = '<block mapping start>' + +class BlockEndToken(Token): + id = '<block end>' + +class FlowSequenceStartToken(Token): + id = '[' + +class FlowMappingStartToken(Token): + id = '{' + +class FlowSequenceEndToken(Token): + id = ']' + +class FlowMappingEndToken(Token): + id = '}' + +class KeyToken(Token): + id = '?' + +class ValueToken(Token): + id = ':' + +class BlockEntryToken(Token): + id = '-' + +class FlowEntryToken(Token): + id = ',' + +class AliasToken(Token): + id = '<alias>' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class AnchorToken(Token): + id = '<anchor>' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class TagToken(Token): + id = '<tag>' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class ScalarToken(Token): + id = '<scalar>' + def __init__(self, value, plain, start_mark, end_mark, style=None): + self.value = value + self.plain = plain + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + diff --git a/collectors/python.d.plugin/python_modules/third_party/__init__.py b/collectors/python.d.plugin/python_modules/third_party/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/collectors/python.d.plugin/python_modules/third_party/boinc_client.py b/collectors/python.d.plugin/python_modules/third_party/boinc_client.py new file mode 100644 index 000000000..ec21779a0 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/third_party/boinc_client.py @@ -0,0 +1,515 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# client.py - Somewhat higher-level GUI_RPC API for BOINC core client +# +# Copyright (C) 2013 Rodrigo Silva (MestreLion) <linux@rodrigosilva.com> +# Copyright (C) 2017 Austin S. Hemmelgarn +# +# SPDX-License-Identifier: GPL-3.0 + +# Based on client/boinc_cmd.cpp + +import hashlib +import socket +import sys +import time +from functools import total_ordering +from xml.etree import ElementTree + +GUI_RPC_PASSWD_FILE = "/var/lib/boinc/gui_rpc_auth.cfg" + +GUI_RPC_HOSTNAME = None # localhost +GUI_RPC_PORT = 31416 +GUI_RPC_TIMEOUT = 1 + +class Rpc(object): + ''' Class to perform GUI RPC calls to a BOINC core client. + Usage in a context manager ('with' block) is recommended to ensure + disconnect() is called. Using the same instance for all calls is also + recommended so it reuses the same socket connection + ''' + def __init__(self, hostname="", port=0, timeout=0, text_output=False): + self.hostname = hostname + self.port = port + self.timeout = timeout + self.sock = None + self.text_output = text_output + + @property + def sockargs(self): + return (self.hostname, self.port, self.timeout) + + def __enter__(self): self.connect(*self.sockargs); return self + def __exit__(self, *args): self.disconnect() + + def connect(self, hostname="", port=0, timeout=0): + ''' Connect to (hostname, port) with timeout in seconds. + Hostname defaults to None (localhost), and port to 31416 + Calling multiple times will disconnect previous connection (if any), + and (re-)connect to host. + ''' + if self.sock: + self.disconnect() + + self.hostname = hostname or GUI_RPC_HOSTNAME + self.port = port or GUI_RPC_PORT + self.timeout = timeout or GUI_RPC_TIMEOUT + + self.sock = socket.create_connection(self.sockargs[0:2], self.sockargs[2]) + + def disconnect(self): + ''' Disconnect from host. Calling multiple times is OK (idempotent) + ''' + if self.sock: + self.sock.close() + self.sock = None + + def call(self, request, text_output=None): + ''' Do an RPC call. Pack and send the XML request and return the + unpacked reply. request can be either plain XML text or a + xml.etree.ElementTree.Element object. Return ElementTree.Element + or XML text according to text_output flag. + Will auto-connect if not connected. + ''' + if text_output is None: + text_output = self.text_output + + if not self.sock: + self.connect(*self.sockargs) + + if not isinstance(request, ElementTree.Element): + request = ElementTree.fromstring(request) + + # pack request + end = '\003' + if sys.version_info[0] < 3: + req = "<boinc_gui_rpc_request>\n{0}\n</boinc_gui_rpc_request>\n{1}".format(ElementTree.tostring(request).replace(' />', '/>'), end) + else: + req = "<boinc_gui_rpc_request>\n{0}\n</boinc_gui_rpc_request>\n{1}".format(ElementTree.tostring(request, encoding='unicode').replace(' />', '/>'), end).encode() + + try: + self.sock.sendall(req) + except (socket.error, socket.herror, socket.gaierror, socket.timeout): + raise + + req = "" + while True: + try: + buf = self.sock.recv(8192) + if not buf: + raise socket.error("No data from socket") + if sys.version_info[0] >= 3: + buf = buf.decode() + except socket.error: + raise + n = buf.find(end) + if not n == -1: break + req += buf + req += buf[:n] + + # unpack reply (remove root tag, ie: first and last lines) + req = '\n'.join(req.strip().rsplit('\n')[1:-1]) + + if text_output: + return req + else: + return ElementTree.fromstring(req) + +def setattrs_from_xml(obj, xml, attrfuncdict={}): + ''' Helper to set values for attributes of a class instance by mapping + matching tags from a XML file. + attrfuncdict is a dict of functions to customize value data type of + each attribute. It falls back to simple int/float/bool/str detection + based on values defined in __init__(). This would not be needed if + Boinc used standard RPC protocol, which includes data type in XML. + ''' + if not isinstance(xml, ElementTree.Element): + xml = ElementTree.fromstring(xml) + for e in list(xml): + if hasattr(obj, e.tag): + attr = getattr(obj, e.tag) + attrfunc = attrfuncdict.get(e.tag, None) + if attrfunc is None: + if isinstance(attr, bool): attrfunc = parse_bool + elif isinstance(attr, int): attrfunc = parse_int + elif isinstance(attr, float): attrfunc = parse_float + elif isinstance(attr, str): attrfunc = parse_str + elif isinstance(attr, list): attrfunc = parse_list + else: attrfunc = lambda x: x + setattr(obj, e.tag, attrfunc(e)) + else: + pass + #print "class missing attribute '%s': %r" % (e.tag, obj) + return obj + + +def parse_bool(e): + ''' Helper to convert ElementTree.Element.text to boolean. + Treat '<foo/>' (and '<foo>[[:blank:]]</foo>') as True + Treat '0' and 'false' as False + ''' + if e.text is None: + return True + else: + return bool(e.text) and not e.text.strip().lower() in ('0', 'false') + + +def parse_int(e): + ''' Helper to convert ElementTree.Element.text to integer. + Treat '<foo/>' (and '<foo></foo>') as 0 + ''' + # int(float()) allows casting to int a value expressed as float in XML + return 0 if e.text is None else int(float(e.text.strip())) + + +def parse_float(e): + ''' Helper to convert ElementTree.Element.text to float. ''' + return 0.0 if e.text is None else float(e.text.strip()) + + +def parse_str(e): + ''' Helper to convert ElementTree.Element.text to string. ''' + return "" if e.text is None else e.text.strip() + + +def parse_list(e): + ''' Helper to convert ElementTree.Element to list. For now, simply return + the list of root element's children + ''' + return list(e) + + +class Enum(object): + UNKNOWN = -1 # Not in original API + + @classmethod + def name(cls, value): + ''' Quick-and-dirty fallback for getting the "name" of an enum item ''' + + # value as string, if it matches an enum attribute. + # Allows short usage as Enum.name("VALUE") besides Enum.name(Enum.VALUE) + if hasattr(cls, str(value)): + return cls.name(getattr(cls, value, None)) + + # value not handled in subclass name() + for k, v in cls.__dict__.items(): + if v == value: + return k.lower().replace('_', ' ') + + # value not found + return cls.name(Enum.UNKNOWN) + + +class CpuSched(Enum): + ''' values of ACTIVE_TASK::scheduler_state and ACTIVE_TASK::next_scheduler_state + "SCHEDULED" is synonymous with "executing" except when CPU throttling + is in use. + ''' + UNINITIALIZED = 0 + PREEMPTED = 1 + SCHEDULED = 2 + + +class ResultState(Enum): + ''' Values of RESULT::state in client. + THESE MUST BE IN NUMERICAL ORDER + (because of the > comparison in RESULT::computing_done()) + see html/inc/common_defs.inc + ''' + NEW = 0 + #// New result + FILES_DOWNLOADING = 1 + #// Input files for result (WU, app version) are being downloaded + FILES_DOWNLOADED = 2 + #// Files are downloaded, result can be (or is being) computed + COMPUTE_ERROR = 3 + #// computation failed; no file upload + FILES_UPLOADING = 4 + #// Output files for result are being uploaded + FILES_UPLOADED = 5 + #// Files are uploaded, notify scheduling server at some point + ABORTED = 6 + #// result was aborted + UPLOAD_FAILED = 7 + #// some output file permanent failure + + +class Process(Enum): + ''' values of ACTIVE_TASK::task_state ''' + UNINITIALIZED = 0 + #// process doesn't exist yet + EXECUTING = 1 + #// process is running, as far as we know + SUSPENDED = 9 + #// we've sent it a "suspend" message + ABORT_PENDING = 5 + #// process exceeded limits; send "abort" message, waiting to exit + QUIT_PENDING = 8 + #// we've sent it a "quit" message, waiting to exit + COPY_PENDING = 10 + #// waiting for async file copies to finish + + +class _Struct(object): + ''' base helper class with common methods for all classes derived from + BOINC's C++ structs + ''' + @classmethod + def parse(cls, xml): + return setattrs_from_xml(cls(), xml) + + def __str__(self, indent=0): + buf = '{0}{1}:\n'.format('\t' * indent, self.__class__.__name__) + for attr in self.__dict__: + value = getattr(self, attr) + if isinstance(value, list): + buf += '{0}\t{1} [\n'.format('\t' * indent, attr) + for v in value: buf += '\t\t{0}\t\t,\n'.format(v) + buf += '\t]\n' + else: + buf += '{0}\t{1}\t{2}\n'.format('\t' * indent, + attr, + value.__str__(indent+2) + if isinstance(value, _Struct) + else repr(value)) + return buf + + +@total_ordering +class VersionInfo(_Struct): + def __init__(self, major=0, minor=0, release=0): + self.major = major + self.minor = minor + self.release = release + + @property + def _tuple(self): + return (self.major, self.minor, self.release) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self._tuple == other._tuple + + def __ne__(self, other): + return not self.__eq__(other) + + def __gt__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return self._tuple > other._tuple + + def __str__(self): + return "{0}.{1}.{2}".format(self.major, self.minor, self.release) + + def __repr__(self): + return "{0}{1}".format(self.__class__.__name__, self._tuple) + + +class Result(_Struct): + ''' Also called "task" in some contexts ''' + def __init__(self): + # Names and values follow lib/gui_rpc_client.h @ RESULT + # Order too, except when grouping contradicts client/result.cpp + # RESULT::write_gui(), then XML order is used. + + self.name = "" + self.wu_name = "" + self.version_num = 0 + #// identifies the app used + self.plan_class = "" + self.project_url = "" # from PROJECT.master_url + self.report_deadline = 0.0 # seconds since epoch + self.received_time = 0.0 # seconds since epoch + #// when we got this from server + self.ready_to_report = False + #// we're ready to report this result to the server; + #// either computation is done and all the files have been uploaded + #// or there was an error + self.got_server_ack = False + #// we've received the ack for this result from the server + self.final_cpu_time = 0.0 + self.final_elapsed_time = 0.0 + self.state = ResultState.NEW + self.estimated_cpu_time_remaining = 0.0 + #// actually, estimated elapsed time remaining + self.exit_status = 0 + #// return value from the application + self.suspended_via_gui = False + self.project_suspended_via_gui = False + self.edf_scheduled = False + #// temporary used to tell GUI that this result is deadline-scheduled + self.coproc_missing = False + #// a coproc needed by this job is missing + #// (e.g. because user removed their GPU board). + self.scheduler_wait = False + self.scheduler_wait_reason = "" + self.network_wait = False + self.resources = "" + #// textual description of resources used + + #// the following defined if active + # XML is generated in client/app.cpp ACTIVE_TASK::write_gui() + self.active_task = False + self.active_task_state = Process.UNINITIALIZED + self.app_version_num = 0 + self.slot = -1 + self.pid = 0 + self.scheduler_state = CpuSched.UNINITIALIZED + self.checkpoint_cpu_time = 0.0 + self.current_cpu_time = 0.0 + self.fraction_done = 0.0 + self.elapsed_time = 0.0 + self.swap_size = 0 + self.working_set_size_smoothed = 0.0 + self.too_large = False + self.needs_shmem = False + self.graphics_exec_path = "" + self.web_graphics_url = "" + self.remote_desktop_addr = "" + self.slot_path = "" + #// only present if graphics_exec_path is + + # The following are not in original API, but are present in RPC XML reply + self.completed_time = 0.0 + #// time when ready_to_report was set + self.report_immediately = False + self.working_set_size = 0 + self.page_fault_rate = 0.0 + #// derived by higher-level code + + # The following are in API, but are NEVER in RPC XML reply. Go figure + self.signal = 0 + + self.app = None # APP* + self.wup = None # WORKUNIT* + self.project = None # PROJECT* + self.avp = None # APP_VERSION* + + @classmethod + def parse(cls, xml): + if not isinstance(xml, ElementTree.Element): + xml = ElementTree.fromstring(xml) + + # parse main XML + result = super(Result, cls).parse(xml) + + # parse '<active_task>' children + active_task = xml.find('active_task') + if active_task is None: + result.active_task = False # already the default after __init__() + else: + result.active_task = True # already the default after main parse + result = setattrs_from_xml(result, active_task) + + #// if CPU time is nonzero but elapsed time is zero, + #// we must be talking to an old client. + #// Set elapsed = CPU + #// (easier to deal with this here than in the manager) + if result.current_cpu_time != 0 and result.elapsed_time == 0: + result.elapsed_time = result.current_cpu_time + + if result.final_cpu_time != 0 and result.final_elapsed_time == 0: + result.final_elapsed_time = result.final_cpu_time + + return result + + def __str__(self): + buf = '{0}:\n'.format(self.__class__.__name__) + for attr in self.__dict__: + value = getattr(self, attr) + if attr in ['received_time', 'report_deadline']: + value = time.ctime(value) + buf += '\t{0}\t{1}\n'.format(attr, value) + return buf + + +class BoincClient(object): + + def __init__(self, host="", port=0, passwd=None): + self.hostname = host + self.port = port + self.passwd = passwd + self.rpc = Rpc(text_output=False) + self.version = None + self.authorized = False + + # Informative, not authoritative. Records status of *last* RPC call, + # but does not infer success about the *next* one. + # Thus, it should be read *after* an RPC call, not prior to one + self.connected = False + + def __enter__(self): self.connect(); return self + def __exit__(self, *args): self.disconnect() + + def connect(self): + try: + self.rpc.connect(self.hostname, self.port) + self.connected = True + except socket.error: + self.connected = False + return + self.authorized = self.authorize(self.passwd) + self.version = self.exchange_versions() + + def disconnect(self): + self.rpc.disconnect() + + def authorize(self, password): + ''' Request authorization. If password is None and we are connecting + to localhost, try to read password from the local config file + GUI_RPC_PASSWD_FILE. If file can't be read (not found or no + permission to read), try to authorize with a blank password. + If authorization is requested and fails, all subsequent calls + will be refused with socket.error 'Connection reset by peer' (104). + Since most local calls do no require authorization, do not attempt + it if you're not sure about the password. + ''' + if password is None and not self.hostname: + password = read_gui_rpc_password() or "" + nonce = self.rpc.call('<auth1/>').text + authhash = hashlib.md5('{0}{1}'.format(nonce, password).encode()).hexdigest().lower() + reply = self.rpc.call('<auth2><nonce_hash>{0}</nonce_hash></auth2>'.format(authhash)) + + if reply.tag == 'authorized': + return True + else: + return False + + def exchange_versions(self): + ''' Return VersionInfo instance with core client version info ''' + return VersionInfo.parse(self.rpc.call('<exchange_versions/>')) + + def get_tasks(self): + ''' Same as get_results(active_only=False) ''' + return self.get_results(False) + + def get_results(self, active_only=False): + ''' Get a list of results. + Those that are in progress will have information such as CPU time + and fraction done. Each result includes a name; + Use CC_STATE::lookup_result() to find this result in the current static state; + if it's not there, call get_state() again. + ''' + reply = self.rpc.call("<get_results><active_only>{0}</active_only></get_results>".format(1 if active_only else 0)) + if not reply.tag == 'results': + return [] + + results = [] + for item in list(reply): + results.append(Result.parse(item)) + + return results + + +def read_gui_rpc_password(): + ''' Read password string from GUI_RPC_PASSWD_FILE file, trim the last CR + (if any), and return it + ''' + try: + with open(GUI_RPC_PASSWD_FILE, 'r') as f: + buf = f.read() + if buf.endswith('\n'): return buf[:-1] # trim last CR + else: return buf + except IOError: + # Permission denied or File not found. + pass diff --git a/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py b/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py new file mode 100644 index 000000000..f10cd6209 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py @@ -0,0 +1,258 @@ +# SPDX-License-Identifier: LGPL-2.1 +""" +@package sensors.py +Python Bindings for libsensors3 + +use the documentation of libsensors for the low level API. +see example.py for high level API usage. + +@author: Pavel Rojtberg (http://www.rojtberg.net) +@see: https://github.com/paroj/sensors.py +@copyright: LGPLv2 (same as libsensors) <http://opensource.org/licenses/LGPL-2.1> +""" + +from ctypes import * +import ctypes.util + +_libc = cdll.LoadLibrary(ctypes.util.find_library("c")) +# see https://github.com/paroj/sensors.py/issues/1 +_libc.free.argtypes = [c_void_p] +_hdl = cdll.LoadLibrary(ctypes.util.find_library("sensors")) + +version = c_char_p.in_dll(_hdl, "libsensors_version").value.decode("ascii") + + +class bus_id(Structure): + _fields_ = [("type", c_short), + ("nr", c_short)] + + +class chip_name(Structure): + _fields_ = [("prefix", c_char_p), + ("bus", bus_id), + ("addr", c_int), + ("path", c_char_p)] + + +class feature(Structure): + _fields_ = [("name", c_char_p), + ("number", c_int), + ("type", c_int)] + + # sensors_feature_type + IN = 0x00 + FAN = 0x01 + TEMP = 0x02 + POWER = 0x03 + ENERGY = 0x04 + CURR = 0x05 + HUMIDITY = 0x06 + MAX_MAIN = 0x7 + VID = 0x10 + INTRUSION = 0x11 + MAX_OTHER = 0x12 + BEEP_ENABLE = 0x18 + + +class subfeature(Structure): + _fields_ = [("name", c_char_p), + ("number", c_int), + ("type", c_int), + ("mapping", c_int), + ("flags", c_uint)] + + +_hdl.sensors_get_detected_chips.restype = POINTER(chip_name) +_hdl.sensors_get_features.restype = POINTER(feature) +_hdl.sensors_get_all_subfeatures.restype = POINTER(subfeature) +_hdl.sensors_get_label.restype = c_void_p # return pointer instead of str so we can free it +_hdl.sensors_get_adapter_name.restype = c_char_p # docs do not say whether to free this or not +_hdl.sensors_strerror.restype = c_char_p + +### RAW API ### +MODE_R = 1 +MODE_W = 2 +COMPUTE_MAPPING = 4 + + +def init(cfg_file=None): + file = _libc.fopen(cfg_file.encode("utf-8"), "r") if cfg_file is not None else None + + if _hdl.sensors_init(file) != 0: + raise Exception("sensors_init failed") + + if file is not None: + _libc.fclose(file) + + +def cleanup(): + _hdl.sensors_cleanup() + + +def parse_chip_name(orig_name): + ret = chip_name() + err = _hdl.sensors_parse_chip_name(orig_name.encode("utf-8"), byref(ret)) + + if err < 0: + raise Exception(strerror(err)) + + return ret + + +def strerror(errnum): + return _hdl.sensors_strerror(errnum).decode("utf-8") + + +def free_chip_name(chip): + _hdl.sensors_free_chip_name(byref(chip)) + + +def get_detected_chips(match, nr): + """ + @return: (chip, next nr to query) + """ + _nr = c_int(nr) + + if match is not None: + match = byref(match) + + chip = _hdl.sensors_get_detected_chips(match, byref(_nr)) + chip = chip.contents if bool(chip) else None + return chip, _nr.value + + +def chip_snprintf_name(chip, buffer_size=200): + """ + @param buffer_size defaults to the size used in the sensors utility + """ + ret = create_string_buffer(buffer_size) + err = _hdl.sensors_snprintf_chip_name(ret, buffer_size, byref(chip)) + + if err < 0: + raise Exception(strerror(err)) + + return ret.value.decode("utf-8") + + +def do_chip_sets(chip): + """ + @attention this function was not tested + """ + err = _hdl.sensors_do_chip_sets(byref(chip)) + if err < 0: + raise Exception(strerror(err)) + + +def get_adapter_name(bus): + return _hdl.sensors_get_adapter_name(byref(bus)).decode("utf-8") + + +def get_features(chip, nr): + """ + @return: (feature, next nr to query) + """ + _nr = c_int(nr) + feature = _hdl.sensors_get_features(byref(chip), byref(_nr)) + feature = feature.contents if bool(feature) else None + return feature, _nr.value + + +def get_label(chip, feature): + ptr = _hdl.sensors_get_label(byref(chip), byref(feature)) + val = cast(ptr, c_char_p).value.decode("utf-8") + _libc.free(ptr) + return val + + +def get_all_subfeatures(chip, feature, nr): + """ + @return: (subfeature, next nr to query) + """ + _nr = c_int(nr) + subfeature = _hdl.sensors_get_all_subfeatures(byref(chip), byref(feature), byref(_nr)) + subfeature = subfeature.contents if bool(subfeature) else None + return subfeature, _nr.value + + +def get_value(chip, subfeature_nr): + val = c_double() + err = _hdl.sensors_get_value(byref(chip), subfeature_nr, byref(val)) + if err < 0: + raise Exception(strerror(err)) + return val.value + + +def set_value(chip, subfeature_nr, value): + """ + @attention this function was not tested + """ + val = c_double(value) + err = _hdl.sensors_set_value(byref(chip), subfeature_nr, byref(val)) + if err < 0: + raise Exception(strerror(err)) + + +### Convenience API ### +class ChipIterator: + def __init__(self, match=None): + self.match = parse_chip_name(match) if match is not None else None + self.nr = 0 + + def __iter__(self): + return self + + def __next__(self): + chip, self.nr = get_detected_chips(self.match, self.nr) + + if chip is None: + raise StopIteration + + return chip + + def __del__(self): + if self.match is not None: + free_chip_name(self.match) + + def next(self): # python2 compability + return self.__next__() + + +class FeatureIterator: + def __init__(self, chip): + self.chip = chip + self.nr = 0 + + def __iter__(self): + return self + + def __next__(self): + feature, self.nr = get_features(self.chip, self.nr) + + if feature is None: + raise StopIteration + + return feature + + def next(self): # python2 compability + return self.__next__() + + +class SubFeatureIterator: + def __init__(self, chip, feature): + self.chip = chip + self.feature = feature + self.nr = 0 + + def __iter__(self): + return self + + def __next__(self): + subfeature, self.nr = get_all_subfeatures(self.chip, self.feature, self.nr) + + if subfeature is None: + raise StopIteration + + return subfeature + + def next(self): # python2 compability + return self.__next__() diff --git a/collectors/python.d.plugin/python_modules/third_party/mcrcon.py b/collectors/python.d.plugin/python_modules/third_party/mcrcon.py new file mode 100644 index 000000000..a65a304b6 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/third_party/mcrcon.py @@ -0,0 +1,74 @@ +# Minecraft Remote Console module. +# +# Copyright (C) 2015 Barnaby Gale +# +# SPDX-License-Identifier: MIT + +import socket +import select +import struct +import time + + +class MCRconException(Exception): + pass + + +class MCRcon(object): + socket = None + + def connect(self, host, port, password): + if self.socket is not None: + raise MCRconException("Already connected") + self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.socket.settimeout(0.9) + self.socket.connect((host, port)) + self.send(3, password) + + def disconnect(self): + if self.socket is None: + raise MCRconException("Already disconnected") + self.socket.close() + self.socket = None + + def read(self, length): + data = b"" + while len(data) < length: + data += self.socket.recv(length - len(data)) + return data + + def send(self, out_type, out_data): + if self.socket is None: + raise MCRconException("Must connect before sending data") + + # Send a request packet + out_payload = struct.pack('<ii', 0, out_type) + out_data.encode('utf8') + b'\x00\x00' + out_length = struct.pack('<i', len(out_payload)) + self.socket.send(out_length + out_payload) + + # Read response packets + in_data = "" + while True: + # Read a packet + in_length, = struct.unpack('<i', self.read(4)) + in_payload = self.read(in_length) + in_id = struct.unpack('<ii', in_payload[:8]) + in_data_partial, in_padding = in_payload[8:-2], in_payload[-2:] + + # Sanity checks + if in_padding != b'\x00\x00': + raise MCRconException("Incorrect padding") + if in_id == -1: + raise MCRconException("Login failed") + + # Record the response + in_data += in_data_partial.decode('utf8') + + # If there's nothing more to receive, return the response + if len(select.select([self.socket], [], [], 0)[0]) == 0: + return in_data + + def command(self, command): + result = self.send(2, command) + time.sleep(0.003) # MC-72390 workaround + return result diff --git a/collectors/python.d.plugin/python_modules/third_party/monotonic.py b/collectors/python.d.plugin/python_modules/third_party/monotonic.py new file mode 100644 index 000000000..da04bb857 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/third_party/monotonic.py @@ -0,0 +1,171 @@ +# -*- coding: utf-8 -*- +# +# SPDX-License-Identifier: Apache-2.0 +""" + monotonic + ~~~~~~~~~ + + This module provides a ``monotonic()`` function which returns the + value (in fractional seconds) of a clock which never goes backwards. + + On Python 3.3 or newer, ``monotonic`` will be an alias of + ``time.monotonic`` from the standard library. On older versions, + it will fall back to an equivalent implementation: + + +-------------+----------------------------------------+ + | Linux, BSD | ``clock_gettime(3)`` | + +-------------+----------------------------------------+ + | Windows | ``GetTickCount`` or ``GetTickCount64`` | + +-------------+----------------------------------------+ + | OS X | ``mach_absolute_time`` | + +-------------+----------------------------------------+ + + If no suitable implementation exists for the current platform, + attempting to import this module (or to import from it) will + cause a ``RuntimeError`` exception to be raised. + + + Copyright 2014, 2015, 2016 Ori Livneh <ori@wikimedia.org> + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +""" +import time + + +__all__ = ('monotonic',) + + +try: + monotonic = time.monotonic +except AttributeError: + import ctypes + import ctypes.util + import os + import sys + import threading + try: + if sys.platform == 'darwin': # OS X, iOS + # See Technical Q&A QA1398 of the Mac Developer Library: + # <https://developer.apple.com/library/mac/qa/qa1398/> + libc = ctypes.CDLL('/usr/lib/libc.dylib', use_errno=True) + + class mach_timebase_info_data_t(ctypes.Structure): + """System timebase info. Defined in <mach/mach_time.h>.""" + _fields_ = (('numer', ctypes.c_uint32), + ('denom', ctypes.c_uint32)) + + mach_absolute_time = libc.mach_absolute_time + mach_absolute_time.restype = ctypes.c_uint64 + + timebase = mach_timebase_info_data_t() + libc.mach_timebase_info(ctypes.byref(timebase)) + ticks_per_second = timebase.numer / timebase.denom * 1.0e9 + + def monotonic(): + """Monotonic clock, cannot go backward.""" + return mach_absolute_time() / ticks_per_second + + elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'): + if sys.platform.startswith('cygwin'): + # Note: cygwin implements clock_gettime (CLOCK_MONOTONIC = 4) since + # version 1.7.6. Using raw WinAPI for maximum version compatibility. + + # Ugly hack using the wrong calling convention (in 32-bit mode) + # because ctypes has no windll under cygwin (and it also seems that + # the code letting you select stdcall in _ctypes doesn't exist under + # the preprocessor definitions relevant to cygwin). + # This is 'safe' because: + # 1. The ABI of GetTickCount and GetTickCount64 is identical for + # both calling conventions because they both have no parameters. + # 2. libffi masks the problem because after making the call it doesn't + # touch anything through esp and epilogue code restores a correct + # esp from ebp afterwards. + try: + kernel32 = ctypes.cdll.kernel32 + except OSError: # 'No such file or directory' + kernel32 = ctypes.cdll.LoadLibrary('kernel32.dll') + else: + kernel32 = ctypes.windll.kernel32 + + GetTickCount64 = getattr(kernel32, 'GetTickCount64', None) + if GetTickCount64: + # Windows Vista / Windows Server 2008 or newer. + GetTickCount64.restype = ctypes.c_ulonglong + + def monotonic(): + """Monotonic clock, cannot go backward.""" + return GetTickCount64() / 1000.0 + + else: + # Before Windows Vista. + GetTickCount = kernel32.GetTickCount + GetTickCount.restype = ctypes.c_uint32 + + get_tick_count_lock = threading.Lock() + get_tick_count_last_sample = 0 + get_tick_count_wraparounds = 0 + + def monotonic(): + """Monotonic clock, cannot go backward.""" + global get_tick_count_last_sample + global get_tick_count_wraparounds + + with get_tick_count_lock: + current_sample = GetTickCount() + if current_sample < get_tick_count_last_sample: + get_tick_count_wraparounds += 1 + get_tick_count_last_sample = current_sample + + final_milliseconds = get_tick_count_wraparounds << 32 + final_milliseconds += get_tick_count_last_sample + return final_milliseconds / 1000.0 + + else: + try: + clock_gettime = ctypes.CDLL(ctypes.util.find_library('c'), + use_errno=True).clock_gettime + except Exception: + clock_gettime = ctypes.CDLL(ctypes.util.find_library('rt'), + use_errno=True).clock_gettime + + class timespec(ctypes.Structure): + """Time specification, as described in clock_gettime(3).""" + _fields_ = (('tv_sec', ctypes.c_long), + ('tv_nsec', ctypes.c_long)) + + if sys.platform.startswith('linux'): + CLOCK_MONOTONIC = 1 + elif sys.platform.startswith('freebsd'): + CLOCK_MONOTONIC = 4 + elif sys.platform.startswith('sunos5'): + CLOCK_MONOTONIC = 4 + elif 'bsd' in sys.platform: + CLOCK_MONOTONIC = 3 + elif sys.platform.startswith('aix'): + CLOCK_MONOTONIC = ctypes.c_longlong(10) + + def monotonic(): + """Monotonic clock, cannot go backward.""" + ts = timespec() + if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(ts)): + errno = ctypes.get_errno() + raise OSError(errno, os.strerror(errno)) + return ts.tv_sec + ts.tv_nsec / 1.0e9 + + # Perform a sanity-check. + if monotonic() - monotonic() > 0: + raise ValueError('monotonic() is not monotonic!') + + except Exception as e: + raise RuntimeError('no suitable implementation for this system: ' + repr(e)) diff --git a/collectors/python.d.plugin/python_modules/third_party/ordereddict.py b/collectors/python.d.plugin/python_modules/third_party/ordereddict.py new file mode 100644 index 000000000..589401b8f --- /dev/null +++ b/collectors/python.d.plugin/python_modules/third_party/ordereddict.py @@ -0,0 +1,110 @@ +# Copyright (c) 2009 Raymond Hettinger +# +# SPDX-License-Identifier: MIT + +from UserDict import DictMixin + + +class OrderedDict(dict, DictMixin): + + def __init__(self, *args, **kwds): + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__end + except AttributeError: + self.clear() + self.update(*args, **kwds) + + def clear(self): + self.__end = end = [] + end += [None, end, end] # sentinel node for doubly linked list + self.__map = {} # key --> [key, prev, next] + dict.clear(self) + + def __setitem__(self, key, value): + if key not in self: + end = self.__end + curr = end[1] + curr[2] = end[1] = self.__map[key] = [key, curr, end] + dict.__setitem__(self, key, value) + + def __delitem__(self, key): + dict.__delitem__(self, key) + key, prev, next = self.__map.pop(key) + prev[2] = next + next[1] = prev + + def __iter__(self): + end = self.__end + curr = end[2] + while curr is not end: + yield curr[0] + curr = curr[2] + + def __reversed__(self): + end = self.__end + curr = end[1] + while curr is not end: + yield curr[0] + curr = curr[1] + + def popitem(self, last=True): + if not self: + raise KeyError('dictionary is empty') + if last: + key = reversed(self).next() + else: + key = iter(self).next() + value = self.pop(key) + return key, value + + def __reduce__(self): + items = [[k, self[k]] for k in self] + tmp = self.__map, self.__end + del self.__map, self.__end + inst_dict = vars(self).copy() + self.__map, self.__end = tmp + if inst_dict: + return self.__class__, (items,), inst_dict + return self.__class__, (items,) + + def keys(self): + return list(self) + + setdefault = DictMixin.setdefault + update = DictMixin.update + pop = DictMixin.pop + values = DictMixin.values + items = DictMixin.items + iterkeys = DictMixin.iterkeys + itervalues = DictMixin.itervalues + iteritems = DictMixin.iteritems + + def __repr__(self): + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, self.items()) + + def copy(self): + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + d = cls() + for key in iterable: + d[key] = value + return d + + def __eq__(self, other): + if isinstance(other, OrderedDict): + if len(self) != len(other): + return False + for p, q in zip(self.items(), other.items()): + if p != q: + return False + return True + return dict.__eq__(self, other) + + def __ne__(self, other): + return not self == other diff --git a/collectors/python.d.plugin/python_modules/urllib3/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/__init__.py new file mode 100644 index 000000000..3add84816 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/__init__.py @@ -0,0 +1,98 @@ +# SPDX-License-Identifier: MIT +""" +urllib3 - Thread-safe connection pooling and re-using. +""" + +from __future__ import absolute_import +import warnings + +from .connectionpool import ( + HTTPConnectionPool, + HTTPSConnectionPool, + connection_from_url +) + +from . import exceptions +from .filepost import encode_multipart_formdata +from .poolmanager import PoolManager, ProxyManager, proxy_from_url +from .response import HTTPResponse +from .util.request import make_headers +from .util.url import get_host +from .util.timeout import Timeout +from .util.retry import Retry + + +# Set default logging handler to avoid "No handler found" warnings. +import logging +try: # Python 2.7+ + from logging import NullHandler +except ImportError: + class NullHandler(logging.Handler): + def emit(self, record): + pass + +__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)' +__license__ = 'MIT' +__version__ = '1.21.1' + +__all__ = ( + 'HTTPConnectionPool', + 'HTTPSConnectionPool', + 'PoolManager', + 'ProxyManager', + 'HTTPResponse', + 'Retry', + 'Timeout', + 'add_stderr_logger', + 'connection_from_url', + 'disable_warnings', + 'encode_multipart_formdata', + 'get_host', + 'make_headers', + 'proxy_from_url', +) + +logging.getLogger(__name__).addHandler(NullHandler()) + + +def add_stderr_logger(level=logging.DEBUG): + """ + Helper for quickly adding a StreamHandler to the logger. Useful for + debugging. + + Returns the handler after adding it. + """ + # This method needs to be in this __init__.py to get the __name__ correct + # even if urllib3 is vendored within another package. + logger = logging.getLogger(__name__) + handler = logging.StreamHandler() + handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) + logger.addHandler(handler) + logger.setLevel(level) + logger.debug('Added a stderr logging handler to logger: %s', __name__) + return handler + + +# ... Clean up. +del NullHandler + + +# All warning filters *must* be appended unless you're really certain that they +# shouldn't be: otherwise, it's very hard for users to use most Python +# mechanisms to silence them. +# SecurityWarning's always go off by default. +warnings.simplefilter('always', exceptions.SecurityWarning, append=True) +# SubjectAltNameWarning's should go off once per host +warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True) +# InsecurePlatformWarning's don't vary between requests, so we keep it default. +warnings.simplefilter('default', exceptions.InsecurePlatformWarning, + append=True) +# SNIMissingWarnings should go off only once. +warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True) + + +def disable_warnings(category=exceptions.HTTPWarning): + """ + Helper for quickly disabling all urllib3 warnings. + """ + warnings.simplefilter('ignore', category) diff --git a/collectors/python.d.plugin/python_modules/urllib3/_collections.py b/collectors/python.d.plugin/python_modules/urllib3/_collections.py new file mode 100644 index 000000000..c1d2fad36 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/_collections.py @@ -0,0 +1,315 @@ +# SPDX-License-Identifier: MIT +from __future__ import absolute_import +from collections import Mapping, MutableMapping +try: + from threading import RLock +except ImportError: # Platform-specific: No threads available + class RLock: + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_value, traceback): + pass + + +try: # Python 2.7+ + from collections import OrderedDict +except ImportError: + from .packages.ordered_dict import OrderedDict +from .packages.six import iterkeys, itervalues, PY3 + + +__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict'] + + +_Null = object() + + +class RecentlyUsedContainer(MutableMapping): + """ + Provides a thread-safe dict-like container which maintains up to + ``maxsize`` keys while throwing away the least-recently-used keys beyond + ``maxsize``. + + :param maxsize: + Maximum number of recent elements to retain. + + :param dispose_func: + Every time an item is evicted from the container, + ``dispose_func(value)`` is called. Callback which will get called + """ + + ContainerCls = OrderedDict + + def __init__(self, maxsize=10, dispose_func=None): + self._maxsize = maxsize + self.dispose_func = dispose_func + + self._container = self.ContainerCls() + self.lock = RLock() + + def __getitem__(self, key): + # Re-insert the item, moving it to the end of the eviction line. + with self.lock: + item = self._container.pop(key) + self._container[key] = item + return item + + def __setitem__(self, key, value): + evicted_value = _Null + with self.lock: + # Possibly evict the existing value of 'key' + evicted_value = self._container.get(key, _Null) + self._container[key] = value + + # If we didn't evict an existing value, we might have to evict the + # least recently used item from the beginning of the container. + if len(self._container) > self._maxsize: + _key, evicted_value = self._container.popitem(last=False) + + if self.dispose_func and evicted_value is not _Null: + self.dispose_func(evicted_value) + + def __delitem__(self, key): + with self.lock: + value = self._container.pop(key) + + if self.dispose_func: + self.dispose_func(value) + + def __len__(self): + with self.lock: + return len(self._container) + + def __iter__(self): + raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.') + + def clear(self): + with self.lock: + # Copy pointers to all values, then wipe the mapping + values = list(itervalues(self._container)) + self._container.clear() + + if self.dispose_func: + for value in values: + self.dispose_func(value) + + def keys(self): + with self.lock: + return list(iterkeys(self._container)) + + +class HTTPHeaderDict(MutableMapping): + """ + :param headers: + An iterable of field-value pairs. Must not contain multiple field names + when compared case-insensitively. + + :param kwargs: + Additional field-value pairs to pass in to ``dict.update``. + + A ``dict`` like container for storing HTTP Headers. + + Field names are stored and compared case-insensitively in compliance with + RFC 7230. Iteration provides the first case-sensitive key seen for each + case-insensitive pair. + + Using ``__setitem__`` syntax overwrites fields that compare equal + case-insensitively in order to maintain ``dict``'s api. For fields that + compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add`` + in a loop. + + If multiple fields that are equal case-insensitively are passed to the + constructor or ``.update``, the behavior is undefined and some will be + lost. + + >>> headers = HTTPHeaderDict() + >>> headers.add('Set-Cookie', 'foo=bar') + >>> headers.add('set-cookie', 'baz=quxx') + >>> headers['content-length'] = '7' + >>> headers['SET-cookie'] + 'foo=bar, baz=quxx' + >>> headers['Content-Length'] + '7' + """ + + def __init__(self, headers=None, **kwargs): + super(HTTPHeaderDict, self).__init__() + self._container = OrderedDict() + if headers is not None: + if isinstance(headers, HTTPHeaderDict): + self._copy_from(headers) + else: + self.extend(headers) + if kwargs: + self.extend(kwargs) + + def __setitem__(self, key, val): + self._container[key.lower()] = [key, val] + return self._container[key.lower()] + + def __getitem__(self, key): + val = self._container[key.lower()] + return ', '.join(val[1:]) + + def __delitem__(self, key): + del self._container[key.lower()] + + def __contains__(self, key): + return key.lower() in self._container + + def __eq__(self, other): + if not isinstance(other, Mapping) and not hasattr(other, 'keys'): + return False + if not isinstance(other, type(self)): + other = type(self)(other) + return (dict((k.lower(), v) for k, v in self.itermerged()) == + dict((k.lower(), v) for k, v in other.itermerged())) + + def __ne__(self, other): + return not self.__eq__(other) + + if not PY3: # Python 2 + iterkeys = MutableMapping.iterkeys + itervalues = MutableMapping.itervalues + + __marker = object() + + def __len__(self): + return len(self._container) + + def __iter__(self): + # Only provide the originally cased names + for vals in self._container.values(): + yield vals[0] + + def pop(self, key, default=__marker): + '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + ''' + # Using the MutableMapping function directly fails due to the private marker. + # Using ordinary dict.pop would expose the internal structures. + # So let's reinvent the wheel. + try: + value = self[key] + except KeyError: + if default is self.__marker: + raise + return default + else: + del self[key] + return value + + def discard(self, key): + try: + del self[key] + except KeyError: + pass + + def add(self, key, val): + """Adds a (name, value) pair, doesn't overwrite the value if it already + exists. + + >>> headers = HTTPHeaderDict(foo='bar') + >>> headers.add('Foo', 'baz') + >>> headers['foo'] + 'bar, baz' + """ + key_lower = key.lower() + new_vals = [key, val] + # Keep the common case aka no item present as fast as possible + vals = self._container.setdefault(key_lower, new_vals) + if new_vals is not vals: + vals.append(val) + + def extend(self, *args, **kwargs): + """Generic import function for any type of header-like object. + Adapted version of MutableMapping.update in order to insert items + with self.add instead of self.__setitem__ + """ + if len(args) > 1: + raise TypeError("extend() takes at most 1 positional " + "arguments ({0} given)".format(len(args))) + other = args[0] if len(args) >= 1 else () + + if isinstance(other, HTTPHeaderDict): + for key, val in other.iteritems(): + self.add(key, val) + elif isinstance(other, Mapping): + for key in other: + self.add(key, other[key]) + elif hasattr(other, "keys"): + for key in other.keys(): + self.add(key, other[key]) + else: + for key, value in other: + self.add(key, value) + + for key, value in kwargs.items(): + self.add(key, value) + + def getlist(self, key): + """Returns a list of all the values for the named field. Returns an + empty list if the key doesn't exist.""" + try: + vals = self._container[key.lower()] + except KeyError: + return [] + else: + return vals[1:] + + # Backwards compatibility for httplib + getheaders = getlist + getallmatchingheaders = getlist + iget = getlist + + def __repr__(self): + return "%s(%s)" % (type(self).__name__, dict(self.itermerged())) + + def _copy_from(self, other): + for key in other: + val = other.getlist(key) + if isinstance(val, list): + # Don't need to convert tuples + val = list(val) + self._container[key.lower()] = [key] + val + + def copy(self): + clone = type(self)() + clone._copy_from(self) + return clone + + def iteritems(self): + """Iterate over all header lines, including duplicate ones.""" + for key in self: + vals = self._container[key.lower()] + for val in vals[1:]: + yield vals[0], val + + def itermerged(self): + """Iterate over all headers, merging duplicate ones together.""" + for key in self: + val = self._container[key.lower()] + yield val[0], ', '.join(val[1:]) + + def items(self): + return list(self.iteritems()) + + @classmethod + def from_httplib(cls, message): # Python 2 + """Read headers from a Python 2 httplib message object.""" + # python2.7 does not expose a proper API for exporting multiheaders + # efficiently. This function re-reads raw lines from the message + # object and extracts the multiheaders properly. + headers = [] + + for line in message.headers: + if line.startswith((' ', '\t')): + key, value = headers[-1] + headers[-1] = (key, value + '\r\n' + line.rstrip()) + continue + + key, value = line.split(':', 1) + headers.append((key, value.strip())) + + return cls(headers) diff --git a/collectors/python.d.plugin/python_modules/urllib3/connection.py b/collectors/python.d.plugin/python_modules/urllib3/connection.py new file mode 100644 index 000000000..f757493c7 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/connection.py @@ -0,0 +1,374 @@ +# SPDX-License-Identifier: MIT +from __future__ import absolute_import +import datetime +import logging +import os +import sys +import socket +from socket import error as SocketError, timeout as SocketTimeout +import warnings +from .packages import six +from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection +from .packages.six.moves.http_client import HTTPException # noqa: F401 + +try: # Compiled with SSL? + import ssl + BaseSSLError = ssl.SSLError +except (ImportError, AttributeError): # Platform-specific: No SSL. + ssl = None + + class BaseSSLError(BaseException): + pass + + +try: # Python 3: + # Not a no-op, we're adding this to the namespace so it can be imported. + ConnectionError = ConnectionError +except NameError: # Python 2: + class ConnectionError(Exception): + pass + + +from .exceptions import ( + NewConnectionError, + ConnectTimeoutError, + SubjectAltNameWarning, + SystemTimeWarning, +) +from .packages.ssl_match_hostname import match_hostname, CertificateError + +from .util.ssl_ import ( + resolve_cert_reqs, + resolve_ssl_version, + assert_fingerprint, + create_urllib3_context, + ssl_wrap_socket +) + + +from .util import connection + +from ._collections import HTTPHeaderDict + +log = logging.getLogger(__name__) + +port_by_scheme = { + 'http': 80, + 'https': 443, +} + +# When updating RECENT_DATE, move it to +# within two years of the current date, and no +# earlier than 6 months ago. +RECENT_DATE = datetime.date(2016, 1, 1) + + +class DummyConnection(object): + """Used to detect a failed ConnectionCls import.""" + pass + + +class HTTPConnection(_HTTPConnection, object): + """ + Based on httplib.HTTPConnection but provides an extra constructor + backwards-compatibility layer between older and newer Pythons. + + Additional keyword parameters are used to configure attributes of the connection. + Accepted parameters include: + + - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool` + - ``source_address``: Set the source address for the current connection. + + .. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x + + - ``socket_options``: Set specific options on the underlying socket. If not specified, then + defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling + Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy. + + For example, if you wish to enable TCP Keep Alive in addition to the defaults, + you might pass:: + + HTTPConnection.default_socket_options + [ + (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), + ] + + Or you may want to disable the defaults by passing an empty list (e.g., ``[]``). + """ + + default_port = port_by_scheme['http'] + + #: Disable Nagle's algorithm by default. + #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]`` + default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)] + + #: Whether this connection verifies the host's certificate. + is_verified = False + + def __init__(self, *args, **kw): + if six.PY3: # Python 3 + kw.pop('strict', None) + + # Pre-set source_address in case we have an older Python like 2.6. + self.source_address = kw.get('source_address') + + if sys.version_info < (2, 7): # Python 2.6 + # _HTTPConnection on Python 2.6 will balk at this keyword arg, but + # not newer versions. We can still use it when creating a + # connection though, so we pop it *after* we have saved it as + # self.source_address. + kw.pop('source_address', None) + + #: The socket options provided by the user. If no options are + #: provided, we use the default options. + self.socket_options = kw.pop('socket_options', self.default_socket_options) + + # Superclass also sets self.source_address in Python 2.7+. + _HTTPConnection.__init__(self, *args, **kw) + + def _new_conn(self): + """ Establish a socket connection and set nodelay settings on it. + + :return: New socket connection. + """ + extra_kw = {} + if self.source_address: + extra_kw['source_address'] = self.source_address + + if self.socket_options: + extra_kw['socket_options'] = self.socket_options + + try: + conn = connection.create_connection( + (self.host, self.port), self.timeout, **extra_kw) + + except SocketTimeout as e: + raise ConnectTimeoutError( + self, "Connection to %s timed out. (connect timeout=%s)" % + (self.host, self.timeout)) + + except SocketError as e: + raise NewConnectionError( + self, "Failed to establish a new connection: %s" % e) + + return conn + + def _prepare_conn(self, conn): + self.sock = conn + # the _tunnel_host attribute was added in python 2.6.3 (via + # http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do + # not have them. + if getattr(self, '_tunnel_host', None): + # TODO: Fix tunnel so it doesn't depend on self.sock state. + self._tunnel() + # Mark this connection as not reusable + self.auto_open = 0 + + def connect(self): + conn = self._new_conn() + self._prepare_conn(conn) + + def request_chunked(self, method, url, body=None, headers=None): + """ + Alternative to the common request method, which sends the + body with chunked encoding and not as one block + """ + headers = HTTPHeaderDict(headers if headers is not None else {}) + skip_accept_encoding = 'accept-encoding' in headers + skip_host = 'host' in headers + self.putrequest( + method, + url, + skip_accept_encoding=skip_accept_encoding, + skip_host=skip_host + ) + for header, value in headers.items(): + self.putheader(header, value) + if 'transfer-encoding' not in headers: + self.putheader('Transfer-Encoding', 'chunked') + self.endheaders() + + if body is not None: + stringish_types = six.string_types + (six.binary_type,) + if isinstance(body, stringish_types): + body = (body,) + for chunk in body: + if not chunk: + continue + if not isinstance(chunk, six.binary_type): + chunk = chunk.encode('utf8') + len_str = hex(len(chunk))[2:] + self.send(len_str.encode('utf-8')) + self.send(b'\r\n') + self.send(chunk) + self.send(b'\r\n') + + # After the if clause, to always have a closed body + self.send(b'0\r\n\r\n') + + +class HTTPSConnection(HTTPConnection): + default_port = port_by_scheme['https'] + + ssl_version = None + + def __init__(self, host, port=None, key_file=None, cert_file=None, + strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + ssl_context=None, **kw): + + HTTPConnection.__init__(self, host, port, strict=strict, + timeout=timeout, **kw) + + self.key_file = key_file + self.cert_file = cert_file + self.ssl_context = ssl_context + + # Required property for Google AppEngine 1.9.0 which otherwise causes + # HTTPS requests to go out as HTTP. (See Issue #356) + self._protocol = 'https' + + def connect(self): + conn = self._new_conn() + self._prepare_conn(conn) + + if self.ssl_context is None: + self.ssl_context = create_urllib3_context( + ssl_version=resolve_ssl_version(None), + cert_reqs=resolve_cert_reqs(None), + ) + + self.sock = ssl_wrap_socket( + sock=conn, + keyfile=self.key_file, + certfile=self.cert_file, + ssl_context=self.ssl_context, + ) + + +class VerifiedHTTPSConnection(HTTPSConnection): + """ + Based on httplib.HTTPSConnection but wraps the socket with + SSL certification. + """ + cert_reqs = None + ca_certs = None + ca_cert_dir = None + ssl_version = None + assert_fingerprint = None + + def set_cert(self, key_file=None, cert_file=None, + cert_reqs=None, ca_certs=None, + assert_hostname=None, assert_fingerprint=None, + ca_cert_dir=None): + """ + This method should only be called once, before the connection is used. + """ + # If cert_reqs is not provided, we can try to guess. If the user gave + # us a cert database, we assume they want to use it: otherwise, if + # they gave us an SSL Context object we should use whatever is set for + # it. + if cert_reqs is None: + if ca_certs or ca_cert_dir: + cert_reqs = 'CERT_REQUIRED' + elif self.ssl_context is not None: + cert_reqs = self.ssl_context.verify_mode + + self.key_file = key_file + self.cert_file = cert_file + self.cert_reqs = cert_reqs + self.assert_hostname = assert_hostname + self.assert_fingerprint = assert_fingerprint + self.ca_certs = ca_certs and os.path.expanduser(ca_certs) + self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir) + + def connect(self): + # Add certificate verification + conn = self._new_conn() + + hostname = self.host + if getattr(self, '_tunnel_host', None): + # _tunnel_host was added in Python 2.6.3 + # (See: http://hg.python.org/cpython/rev/0f57b30a152f) + + self.sock = conn + # Calls self._set_hostport(), so self.host is + # self._tunnel_host below. + self._tunnel() + # Mark this connection as not reusable + self.auto_open = 0 + + # Override the host with the one we're requesting data from. + hostname = self._tunnel_host + + is_time_off = datetime.date.today() < RECENT_DATE + if is_time_off: + warnings.warn(( + 'System time is way off (before {0}). This will probably ' + 'lead to SSL verification errors').format(RECENT_DATE), + SystemTimeWarning + ) + + # Wrap socket using verification with the root certs in + # trusted_root_certs + if self.ssl_context is None: + self.ssl_context = create_urllib3_context( + ssl_version=resolve_ssl_version(self.ssl_version), + cert_reqs=resolve_cert_reqs(self.cert_reqs), + ) + + context = self.ssl_context + context.verify_mode = resolve_cert_reqs(self.cert_reqs) + self.sock = ssl_wrap_socket( + sock=conn, + keyfile=self.key_file, + certfile=self.cert_file, + ca_certs=self.ca_certs, + ca_cert_dir=self.ca_cert_dir, + server_hostname=hostname, + ssl_context=context) + + if self.assert_fingerprint: + assert_fingerprint(self.sock.getpeercert(binary_form=True), + self.assert_fingerprint) + elif context.verify_mode != ssl.CERT_NONE \ + and not getattr(context, 'check_hostname', False) \ + and self.assert_hostname is not False: + # While urllib3 attempts to always turn off hostname matching from + # the TLS library, this cannot always be done. So we check whether + # the TLS Library still thinks it's matching hostnames. + cert = self.sock.getpeercert() + if not cert.get('subjectAltName', ()): + warnings.warn(( + 'Certificate for {0} has no `subjectAltName`, falling back to check for a ' + '`commonName` for now. This feature is being removed by major browsers and ' + 'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 ' + 'for details.)'.format(hostname)), + SubjectAltNameWarning + ) + _match_hostname(cert, self.assert_hostname or hostname) + + self.is_verified = ( + context.verify_mode == ssl.CERT_REQUIRED or + self.assert_fingerprint is not None + ) + + +def _match_hostname(cert, asserted_hostname): + try: + match_hostname(cert, asserted_hostname) + except CertificateError as e: + log.error( + 'Certificate did not match expected hostname: %s. ' + 'Certificate: %s', asserted_hostname, cert + ) + # Add cert to exception and reraise so client code can inspect + # the cert when catching the exception, if they want to + e._peer_cert = cert + raise + + +if ssl: + # Make a copy for testing. + UnverifiedHTTPSConnection = HTTPSConnection + HTTPSConnection = VerifiedHTTPSConnection +else: + HTTPSConnection = DummyConnection diff --git a/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py b/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py new file mode 100644 index 000000000..90e4c86a5 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py @@ -0,0 +1,900 @@ +# SPDX-License-Identifier: MIT +from __future__ import absolute_import +import errno +import logging +import sys +import warnings + +from socket import error as SocketError, timeout as SocketTimeout +import socket + + +from .exceptions import ( + ClosedPoolError, + ProtocolError, + EmptyPoolError, + HeaderParsingError, + HostChangedError, + LocationValueError, + MaxRetryError, + ProxyError, + ReadTimeoutError, + SSLError, + TimeoutError, + InsecureRequestWarning, + NewConnectionError, +) +from .packages.ssl_match_hostname import CertificateError +from .packages import six +from .packages.six.moves import queue +from .connection import ( + port_by_scheme, + DummyConnection, + HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection, + HTTPException, BaseSSLError, +) +from .request import RequestMethods +from .response import HTTPResponse + +from .util.connection import is_connection_dropped +from .util.request import set_file_position +from .util.response import assert_header_parsing +from .util.retry import Retry +from .util.timeout import Timeout +from .util.url import get_host, Url + + +if six.PY2: + # Queue is imported for side effects on MS Windows + import Queue as _unused_module_Queue # noqa: F401 + +xrange = six.moves.xrange + +log = logging.getLogger(__name__) + +_Default = object() + + +# Pool objects +class ConnectionPool(object): + """ + Base class for all connection pools, such as + :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. + """ + + scheme = None + QueueCls = queue.LifoQueue + + def __init__(self, host, port=None): + if not host: + raise LocationValueError("No host specified.") + + self.host = _ipv6_host(host).lower() + self.port = port + + def __str__(self): + return '%s(host=%r, port=%r)' % (type(self).__name__, + self.host, self.port) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + # Return False to re-raise any potential exceptions + return False + + def close(self): + """ + Close all pooled connections and disable the pool. + """ + pass + + +# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 +_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK]) + + +class HTTPConnectionPool(ConnectionPool, RequestMethods): + """ + Thread-safe connection pool for one host. + + :param host: + Host used for this HTTP Connection (e.g. "localhost"), passed into + :class:`httplib.HTTPConnection`. + + :param port: + Port used for this HTTP Connection (None is equivalent to 80), passed + into :class:`httplib.HTTPConnection`. + + :param strict: + Causes BadStatusLine to be raised if the status line can't be parsed + as a valid HTTP/1.0 or 1.1 status line, passed into + :class:`httplib.HTTPConnection`. + + .. note:: + Only works in Python 2. This parameter is ignored in Python 3. + + :param timeout: + Socket timeout in seconds for each individual connection. This can + be a float or integer, which sets the timeout for the HTTP request, + or an instance of :class:`urllib3.util.Timeout` which gives you more + fine-grained control over request timeouts. After the constructor has + been parsed, this is always a `urllib3.util.Timeout` object. + + :param maxsize: + Number of connections to save that can be reused. More than 1 is useful + in multithreaded situations. If ``block`` is set to False, more + connections will be created but they will not be saved once they've + been used. + + :param block: + If set to True, no more than ``maxsize`` connections will be used at + a time. When no free connections are available, the call will block + until a connection has been released. This is a useful side effect for + particular multithreaded situations where one does not want to use more + than maxsize connections per host to prevent flooding. + + :param headers: + Headers to include with all requests, unless other headers are given + explicitly. + + :param retries: + Retry configuration to use by default with requests in this pool. + + :param _proxy: + Parsed proxy URL, should not be used directly, instead, see + :class:`urllib3.connectionpool.ProxyManager`" + + :param _proxy_headers: + A dictionary with proxy headers, should not be used directly, + instead, see :class:`urllib3.connectionpool.ProxyManager`" + + :param \\**conn_kw: + Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, + :class:`urllib3.connection.HTTPSConnection` instances. + """ + + scheme = 'http' + ConnectionCls = HTTPConnection + ResponseCls = HTTPResponse + + def __init__(self, host, port=None, strict=False, + timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, + headers=None, retries=None, + _proxy=None, _proxy_headers=None, + **conn_kw): + ConnectionPool.__init__(self, host, port) + RequestMethods.__init__(self, headers) + + self.strict = strict + + if not isinstance(timeout, Timeout): + timeout = Timeout.from_float(timeout) + + if retries is None: + retries = Retry.DEFAULT + + self.timeout = timeout + self.retries = retries + + self.pool = self.QueueCls(maxsize) + self.block = block + + self.proxy = _proxy + self.proxy_headers = _proxy_headers or {} + + # Fill the queue up so that doing get() on it will block properly + for _ in xrange(maxsize): + self.pool.put(None) + + # These are mostly for testing and debugging purposes. + self.num_connections = 0 + self.num_requests = 0 + self.conn_kw = conn_kw + + if self.proxy: + # Enable Nagle's algorithm for proxies, to avoid packet fragmentation. + # We cannot know if the user has added default socket options, so we cannot replace the + # list. + self.conn_kw.setdefault('socket_options', []) + + def _new_conn(self): + """ + Return a fresh :class:`HTTPConnection`. + """ + self.num_connections += 1 + log.debug("Starting new HTTP connection (%d): %s", + self.num_connections, self.host) + + conn = self.ConnectionCls(host=self.host, port=self.port, + timeout=self.timeout.connect_timeout, + strict=self.strict, **self.conn_kw) + return conn + + def _get_conn(self, timeout=None): + """ + Get a connection. Will return a pooled connection if one is available. + + If no connections are available and :prop:`.block` is ``False``, then a + fresh connection is returned. + + :param timeout: + Seconds to wait before giving up and raising + :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and + :prop:`.block` is ``True``. + """ + conn = None + try: + conn = self.pool.get(block=self.block, timeout=timeout) + + except AttributeError: # self.pool is None + raise ClosedPoolError(self, "Pool is closed.") + + except queue.Empty: + if self.block: + raise EmptyPoolError(self, + "Pool reached maximum size and no more " + "connections are allowed.") + pass # Oh well, we'll create a new connection then + + # If this is a persistent connection, check if it got disconnected + if conn and is_connection_dropped(conn): + log.debug("Resetting dropped connection: %s", self.host) + conn.close() + if getattr(conn, 'auto_open', 1) == 0: + # This is a proxied connection that has been mutated by + # httplib._tunnel() and cannot be reused (since it would + # attempt to bypass the proxy) + conn = None + + return conn or self._new_conn() + + def _put_conn(self, conn): + """ + Put a connection back into the pool. + + :param conn: + Connection object for the current host and port as returned by + :meth:`._new_conn` or :meth:`._get_conn`. + + If the pool is already full, the connection is closed and discarded + because we exceeded maxsize. If connections are discarded frequently, + then maxsize should be increased. + + If the pool is closed, then the connection will be closed and discarded. + """ + try: + self.pool.put(conn, block=False) + return # Everything is dandy, done. + except AttributeError: + # self.pool is None. + pass + except queue.Full: + # This should never happen if self.block == True + log.warning( + "Connection pool is full, discarding connection: %s", + self.host) + + # Connection never got put back into the pool, close it. + if conn: + conn.close() + + def _validate_conn(self, conn): + """ + Called right before a request is made, after the socket is created. + """ + pass + + def _prepare_proxy(self, conn): + # Nothing to do for HTTP connections. + pass + + def _get_timeout(self, timeout): + """ Helper that always returns a :class:`urllib3.util.Timeout` """ + if timeout is _Default: + return self.timeout.clone() + + if isinstance(timeout, Timeout): + return timeout.clone() + else: + # User passed us an int/float. This is for backwards compatibility, + # can be removed later + return Timeout.from_float(timeout) + + def _raise_timeout(self, err, url, timeout_value): + """Is the error actually a timeout? Will raise a ReadTimeout or pass""" + + if isinstance(err, SocketTimeout): + raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) + + # See the above comment about EAGAIN in Python 3. In Python 2 we have + # to specifically catch it and throw the timeout error + if hasattr(err, 'errno') and err.errno in _blocking_errnos: + raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) + + # Catch possible read timeouts thrown as SSL errors. If not the + # case, rethrow the original. We need to do this because of: + # http://bugs.python.org/issue10272 + if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6 + raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) + + def _make_request(self, conn, method, url, timeout=_Default, chunked=False, + **httplib_request_kw): + """ + Perform a request on a given urllib connection object taken from our + pool. + + :param conn: + a connection from one of our connection pools + + :param timeout: + Socket timeout in seconds for the request. This can be a + float or integer, which will set the same timeout value for + the socket connect and the socket read, or an instance of + :class:`urllib3.util.Timeout`, which gives you more fine-grained + control over your timeouts. + """ + self.num_requests += 1 + + timeout_obj = self._get_timeout(timeout) + timeout_obj.start_connect() + conn.timeout = timeout_obj.connect_timeout + + # Trigger any extra validation we need to do. + try: + self._validate_conn(conn) + except (SocketTimeout, BaseSSLError) as e: + # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout. + self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) + raise + + # conn.request() calls httplib.*.request, not the method in + # urllib3.request. It also calls makefile (recv) on the socket. + if chunked: + conn.request_chunked(method, url, **httplib_request_kw) + else: + conn.request(method, url, **httplib_request_kw) + + # Reset the timeout for the recv() on the socket + read_timeout = timeout_obj.read_timeout + + # App Engine doesn't have a sock attr + if getattr(conn, 'sock', None): + # In Python 3 socket.py will catch EAGAIN and return None when you + # try and read into the file pointer created by http.client, which + # instead raises a BadStatusLine exception. Instead of catching + # the exception and assuming all BadStatusLine exceptions are read + # timeouts, check for a zero timeout before making the request. + if read_timeout == 0: + raise ReadTimeoutError( + self, url, "Read timed out. (read timeout=%s)" % read_timeout) + if read_timeout is Timeout.DEFAULT_TIMEOUT: + conn.sock.settimeout(socket.getdefaulttimeout()) + else: # None or a value + conn.sock.settimeout(read_timeout) + + # Receive the response from the server + try: + try: # Python 2.7, use buffering of HTTP responses + httplib_response = conn.getresponse(buffering=True) + except TypeError: # Python 2.6 and older, Python 3 + try: + httplib_response = conn.getresponse() + except Exception as e: + # Remove the TypeError from the exception chain in Python 3; + # otherwise it looks like a programming error was the cause. + six.raise_from(e, None) + except (SocketTimeout, BaseSSLError, SocketError) as e: + self._raise_timeout(err=e, url=url, timeout_value=read_timeout) + raise + + # AppEngine doesn't have a version attr. + http_version = getattr(conn, '_http_vsn_str', 'HTTP/?') + log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port, + method, url, http_version, httplib_response.status, + httplib_response.length) + + try: + assert_header_parsing(httplib_response.msg) + except HeaderParsingError as hpe: # Platform-specific: Python 3 + log.warning( + 'Failed to parse headers (url=%s): %s', + self._absolute_url(url), hpe, exc_info=True) + + return httplib_response + + def _absolute_url(self, path): + return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url + + def close(self): + """ + Close all pooled connections and disable the pool. + """ + # Disable access to the pool + old_pool, self.pool = self.pool, None + + try: + while True: + conn = old_pool.get(block=False) + if conn: + conn.close() + + except queue.Empty: + pass # Done. + + def is_same_host(self, url): + """ + Check if the given ``url`` is a member of the same host as this + connection pool. + """ + if url.startswith('/'): + return True + + # TODO: Add optional support for socket.gethostbyname checking. + scheme, host, port = get_host(url) + + host = _ipv6_host(host).lower() + + # Use explicit default port for comparison when none is given + if self.port and not port: + port = port_by_scheme.get(scheme) + elif not self.port and port == port_by_scheme.get(scheme): + port = None + + return (scheme, host, port) == (self.scheme, self.host, self.port) + + def urlopen(self, method, url, body=None, headers=None, retries=None, + redirect=True, assert_same_host=True, timeout=_Default, + pool_timeout=None, release_conn=None, chunked=False, + body_pos=None, **response_kw): + """ + Get a connection from the pool and perform an HTTP request. This is the + lowest level call for making a request, so you'll need to specify all + the raw details. + + .. note:: + + More commonly, it's appropriate to use a convenience method provided + by :class:`.RequestMethods`, such as :meth:`request`. + + .. note:: + + `release_conn` will only behave as expected if + `preload_content=False` because we want to make + `preload_content=False` the default behaviour someday soon without + breaking backwards compatibility. + + :param method: + HTTP request method (such as GET, POST, PUT, etc.) + + :param body: + Data to send in the request body (useful for creating + POST requests, see HTTPConnectionPool.post_url for + more convenience). + + :param headers: + Dictionary of custom headers to send, such as User-Agent, + If-None-Match, etc. If None, pool headers are used. If provided, + these headers completely replace any pool-specific headers. + + :param retries: + Configure the number of retries to allow before raising a + :class:`~urllib3.exceptions.MaxRetryError` exception. + + Pass ``None`` to retry until you receive a response. Pass a + :class:`~urllib3.util.retry.Retry` object for fine-grained control + over different types of retries. + Pass an integer number to retry connection errors that many times, + but no other types of errors. Pass zero to never retry. + + If ``False``, then retries are disabled and any exception is raised + immediately. Also, instead of raising a MaxRetryError on redirects, + the redirect response will be returned. + + :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. + + :param redirect: + If True, automatically handle redirects (status codes 301, 302, + 303, 307, 308). Each redirect counts as a retry. Disabling retries + will disable redirect, too. + + :param assert_same_host: + If ``True``, will make sure that the host of the pool requests is + consistent else will raise HostChangedError. When False, you can + use the pool on an HTTP proxy and request foreign hosts. + + :param timeout: + If specified, overrides the default timeout for this one + request. It may be a float (in seconds) or an instance of + :class:`urllib3.util.Timeout`. + + :param pool_timeout: + If set and the pool is set to block=True, then this method will + block for ``pool_timeout`` seconds and raise EmptyPoolError if no + connection is available within the time period. + + :param release_conn: + If False, then the urlopen call will not release the connection + back into the pool once a response is received (but will release if + you read the entire contents of the response such as when + `preload_content=True`). This is useful if you're not preloading + the response's content immediately. You will need to call + ``r.release_conn()`` on the response ``r`` to return the connection + back into the pool. If None, it takes the value of + ``response_kw.get('preload_content', True)``. + + :param chunked: + If True, urllib3 will send the body using chunked transfer + encoding. Otherwise, urllib3 will send the body using the standard + content-length form. Defaults to False. + + :param int body_pos: + Position to seek to in file-like body in the event of a retry or + redirect. Typically this won't need to be set because urllib3 will + auto-populate the value when needed. + + :param \\**response_kw: + Additional parameters are passed to + :meth:`urllib3.response.HTTPResponse.from_httplib` + """ + if headers is None: + headers = self.headers + + if not isinstance(retries, Retry): + retries = Retry.from_int(retries, redirect=redirect, default=self.retries) + + if release_conn is None: + release_conn = response_kw.get('preload_content', True) + + # Check host + if assert_same_host and not self.is_same_host(url): + raise HostChangedError(self, url, retries) + + conn = None + + # Track whether `conn` needs to be released before + # returning/raising/recursing. Update this variable if necessary, and + # leave `release_conn` constant throughout the function. That way, if + # the function recurses, the original value of `release_conn` will be + # passed down into the recursive call, and its value will be respected. + # + # See issue #651 [1] for details. + # + # [1] <https://github.com/shazow/urllib3/issues/651> + release_this_conn = release_conn + + # Merge the proxy headers. Only do this in HTTP. We have to copy the + # headers dict so we can safely change it without those changes being + # reflected in anyone else's copy. + if self.scheme == 'http': + headers = headers.copy() + headers.update(self.proxy_headers) + + # Must keep the exception bound to a separate variable or else Python 3 + # complains about UnboundLocalError. + err = None + + # Keep track of whether we cleanly exited the except block. This + # ensures we do proper cleanup in finally. + clean_exit = False + + # Rewind body position, if needed. Record current position + # for future rewinds in the event of a redirect/retry. + body_pos = set_file_position(body, body_pos) + + try: + # Request a connection from the queue. + timeout_obj = self._get_timeout(timeout) + conn = self._get_conn(timeout=pool_timeout) + + conn.timeout = timeout_obj.connect_timeout + + is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None) + if is_new_proxy_conn: + self._prepare_proxy(conn) + + # Make the request on the httplib connection object. + httplib_response = self._make_request(conn, method, url, + timeout=timeout_obj, + body=body, headers=headers, + chunked=chunked) + + # If we're going to release the connection in ``finally:``, then + # the response doesn't need to know about the connection. Otherwise + # it will also try to release it and we'll have a double-release + # mess. + response_conn = conn if not release_conn else None + + # Pass method to Response for length checking + response_kw['request_method'] = method + + # Import httplib's response into our own wrapper object + response = self.ResponseCls.from_httplib(httplib_response, + pool=self, + connection=response_conn, + retries=retries, + **response_kw) + + # Everything went great! + clean_exit = True + + except queue.Empty: + # Timed out by queue. + raise EmptyPoolError(self, "No pool connections are available.") + + except (BaseSSLError, CertificateError) as e: + # Close the connection. If a connection is reused on which there + # was a Certificate error, the next request will certainly raise + # another Certificate error. + clean_exit = False + raise SSLError(e) + + except SSLError: + # Treat SSLError separately from BaseSSLError to preserve + # traceback. + clean_exit = False + raise + + except (TimeoutError, HTTPException, SocketError, ProtocolError) as e: + # Discard the connection for these exceptions. It will be + # be replaced during the next _get_conn() call. + clean_exit = False + + if isinstance(e, (SocketError, NewConnectionError)) and self.proxy: + e = ProxyError('Cannot connect to proxy.', e) + elif isinstance(e, (SocketError, HTTPException)): + e = ProtocolError('Connection aborted.', e) + + retries = retries.increment(method, url, error=e, _pool=self, + _stacktrace=sys.exc_info()[2]) + retries.sleep() + + # Keep track of the error for the retry warning. + err = e + + finally: + if not clean_exit: + # We hit some kind of exception, handled or otherwise. We need + # to throw the connection away unless explicitly told not to. + # Close the connection, set the variable to None, and make sure + # we put the None back in the pool to avoid leaking it. + conn = conn and conn.close() + release_this_conn = True + + if release_this_conn: + # Put the connection back to be reused. If the connection is + # expired then it will be None, which will get replaced with a + # fresh connection during _get_conn. + self._put_conn(conn) + + if not conn: + # Try again + log.warning("Retrying (%r) after connection " + "broken by '%r': %s", retries, err, url) + return self.urlopen(method, url, body, headers, retries, + redirect, assert_same_host, + timeout=timeout, pool_timeout=pool_timeout, + release_conn=release_conn, body_pos=body_pos, + **response_kw) + + # Handle redirect? + redirect_location = redirect and response.get_redirect_location() + if redirect_location: + if response.status == 303: + method = 'GET' + + try: + retries = retries.increment(method, url, response=response, _pool=self) + except MaxRetryError: + if retries.raise_on_redirect: + # Release the connection for this response, since we're not + # returning it to be released manually. + response.release_conn() + raise + return response + + retries.sleep_for_retry(response) + log.debug("Redirecting %s -> %s", url, redirect_location) + return self.urlopen( + method, redirect_location, body, headers, + retries=retries, redirect=redirect, + assert_same_host=assert_same_host, + timeout=timeout, pool_timeout=pool_timeout, + release_conn=release_conn, body_pos=body_pos, + **response_kw) + + # Check if we should retry the HTTP response. + has_retry_after = bool(response.getheader('Retry-After')) + if retries.is_retry(method, response.status, has_retry_after): + try: + retries = retries.increment(method, url, response=response, _pool=self) + except MaxRetryError: + if retries.raise_on_status: + # Release the connection for this response, since we're not + # returning it to be released manually. + response.release_conn() + raise + return response + retries.sleep(response) + log.debug("Retry: %s", url) + return self.urlopen( + method, url, body, headers, + retries=retries, redirect=redirect, + assert_same_host=assert_same_host, + timeout=timeout, pool_timeout=pool_timeout, + release_conn=release_conn, + body_pos=body_pos, **response_kw) + + return response + + +class HTTPSConnectionPool(HTTPConnectionPool): + """ + Same as :class:`.HTTPConnectionPool`, but HTTPS. + + When Python is compiled with the :mod:`ssl` module, then + :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates, + instead of :class:`.HTTPSConnection`. + + :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``, + ``assert_hostname`` and ``host`` in this order to verify connections. + If ``assert_hostname`` is False, no verification is done. + + The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, + ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is + available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade + the connection socket into an SSL socket. + """ + + scheme = 'https' + ConnectionCls = HTTPSConnection + + def __init__(self, host, port=None, + strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, + block=False, headers=None, retries=None, + _proxy=None, _proxy_headers=None, + key_file=None, cert_file=None, cert_reqs=None, + ca_certs=None, ssl_version=None, + assert_hostname=None, assert_fingerprint=None, + ca_cert_dir=None, **conn_kw): + + HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize, + block, headers, retries, _proxy, _proxy_headers, + **conn_kw) + + if ca_certs and cert_reqs is None: + cert_reqs = 'CERT_REQUIRED' + + self.key_file = key_file + self.cert_file = cert_file + self.cert_reqs = cert_reqs + self.ca_certs = ca_certs + self.ca_cert_dir = ca_cert_dir + self.ssl_version = ssl_version + self.assert_hostname = assert_hostname + self.assert_fingerprint = assert_fingerprint + + def _prepare_conn(self, conn): + """ + Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket` + and establish the tunnel if proxy is used. + """ + + if isinstance(conn, VerifiedHTTPSConnection): + conn.set_cert(key_file=self.key_file, + cert_file=self.cert_file, + cert_reqs=self.cert_reqs, + ca_certs=self.ca_certs, + ca_cert_dir=self.ca_cert_dir, + assert_hostname=self.assert_hostname, + assert_fingerprint=self.assert_fingerprint) + conn.ssl_version = self.ssl_version + return conn + + def _prepare_proxy(self, conn): + """ + Establish tunnel connection early, because otherwise httplib + would improperly set Host: header to proxy's IP:port. + """ + # Python 2.7+ + try: + set_tunnel = conn.set_tunnel + except AttributeError: # Platform-specific: Python 2.6 + set_tunnel = conn._set_tunnel + + if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older + set_tunnel(self.host, self.port) + else: + set_tunnel(self.host, self.port, self.proxy_headers) + + conn.connect() + + def _new_conn(self): + """ + Return a fresh :class:`httplib.HTTPSConnection`. + """ + self.num_connections += 1 + log.debug("Starting new HTTPS connection (%d): %s", + self.num_connections, self.host) + + if not self.ConnectionCls or self.ConnectionCls is DummyConnection: + raise SSLError("Can't connect to HTTPS URL because the SSL " + "module is not available.") + + actual_host = self.host + actual_port = self.port + if self.proxy is not None: + actual_host = self.proxy.host + actual_port = self.proxy.port + + conn = self.ConnectionCls(host=actual_host, port=actual_port, + timeout=self.timeout.connect_timeout, + strict=self.strict, **self.conn_kw) + + return self._prepare_conn(conn) + + def _validate_conn(self, conn): + """ + Called right before a request is made, after the socket is created. + """ + super(HTTPSConnectionPool, self)._validate_conn(conn) + + # Force connect early to allow us to validate the connection. + if not getattr(conn, 'sock', None): # AppEngine might not have `.sock` + conn.connect() + + if not conn.is_verified: + warnings.warn(( + 'Unverified HTTPS request is being made. ' + 'Adding certificate verification is strongly advised. See: ' + 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' + '#ssl-warnings'), + InsecureRequestWarning) + + +def connection_from_url(url, **kw): + """ + Given a url, return an :class:`.ConnectionPool` instance of its host. + + This is a shortcut for not having to parse out the scheme, host, and port + of the url before creating an :class:`.ConnectionPool` instance. + + :param url: + Absolute URL string that must include the scheme. Port is optional. + + :param \\**kw: + Passes additional parameters to the constructor of the appropriate + :class:`.ConnectionPool`. Useful for specifying things like + timeout, maxsize, headers, etc. + + Example:: + + >>> conn = connection_from_url('http://google.com/') + >>> r = conn.request('GET', '/') + """ + scheme, host, port = get_host(url) + port = port or port_by_scheme.get(scheme, 80) + if scheme == 'https': + return HTTPSConnectionPool(host, port=port, **kw) + else: + return HTTPConnectionPool(host, port=port, **kw) + + +def _ipv6_host(host): + """ + Process IPv6 address literals + """ + + # httplib doesn't like it when we include brackets in IPv6 addresses + # Specifically, if we include brackets but also pass the port then + # httplib crazily doubles up the square brackets on the Host header. + # Instead, we need to make sure we never pass ``None`` as the port. + # However, for backward compatibility reasons we can't actually + # *assert* that. See http://bugs.python.org/issue28539 + # + # Also if an IPv6 address literal has a zone identifier, the + # percent sign might be URIencoded, convert it back into ASCII + if host.startswith('[') and host.endswith(']'): + host = host.replace('%25', '%').strip('[]') + return host diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py new file mode 100644 index 000000000..bb826673f --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py @@ -0,0 +1,591 @@ +# SPDX-License-Identifier: MIT +""" +This module uses ctypes to bind a whole bunch of functions and constants from +SecureTransport. The goal here is to provide the low-level API to +SecureTransport. These are essentially the C-level functions and constants, and +they're pretty gross to work with. + +This code is a bastardised version of the code found in Will Bond's oscrypto +library. An enormous debt is owed to him for blazing this trail for us. For +that reason, this code should be considered to be covered both by urllib3's +license and by oscrypto's: + + Copyright (c) 2015-2016 Will Bond <will@wbond.net> + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +""" +from __future__ import absolute_import + +import platform +from ctypes.util import find_library +from ctypes import ( + c_void_p, c_int32, c_char_p, c_size_t, c_byte, c_uint32, c_ulong, c_long, + c_bool +) +from ctypes import CDLL, POINTER, CFUNCTYPE + + +security_path = find_library('Security') +if not security_path: + raise ImportError('The library Security could not be found') + + +core_foundation_path = find_library('CoreFoundation') +if not core_foundation_path: + raise ImportError('The library CoreFoundation could not be found') + + +version = platform.mac_ver()[0] +version_info = tuple(map(int, version.split('.'))) +if version_info < (10, 8): + raise OSError( + 'Only OS X 10.8 and newer are supported, not %s.%s' % ( + version_info[0], version_info[1] + ) + ) + +Security = CDLL(security_path, use_errno=True) +CoreFoundation = CDLL(core_foundation_path, use_errno=True) + +Boolean = c_bool +CFIndex = c_long +CFStringEncoding = c_uint32 +CFData = c_void_p +CFString = c_void_p +CFArray = c_void_p +CFMutableArray = c_void_p +CFDictionary = c_void_p +CFError = c_void_p +CFType = c_void_p +CFTypeID = c_ulong + +CFTypeRef = POINTER(CFType) +CFAllocatorRef = c_void_p + +OSStatus = c_int32 + +CFDataRef = POINTER(CFData) +CFStringRef = POINTER(CFString) +CFArrayRef = POINTER(CFArray) +CFMutableArrayRef = POINTER(CFMutableArray) +CFDictionaryRef = POINTER(CFDictionary) +CFArrayCallBacks = c_void_p +CFDictionaryKeyCallBacks = c_void_p +CFDictionaryValueCallBacks = c_void_p + +SecCertificateRef = POINTER(c_void_p) +SecExternalFormat = c_uint32 +SecExternalItemType = c_uint32 +SecIdentityRef = POINTER(c_void_p) +SecItemImportExportFlags = c_uint32 +SecItemImportExportKeyParameters = c_void_p +SecKeychainRef = POINTER(c_void_p) +SSLProtocol = c_uint32 +SSLCipherSuite = c_uint32 +SSLContextRef = POINTER(c_void_p) +SecTrustRef = POINTER(c_void_p) +SSLConnectionRef = c_uint32 +SecTrustResultType = c_uint32 +SecTrustOptionFlags = c_uint32 +SSLProtocolSide = c_uint32 +SSLConnectionType = c_uint32 +SSLSessionOption = c_uint32 + + +try: + Security.SecItemImport.argtypes = [ + CFDataRef, + CFStringRef, + POINTER(SecExternalFormat), + POINTER(SecExternalItemType), + SecItemImportExportFlags, + POINTER(SecItemImportExportKeyParameters), + SecKeychainRef, + POINTER(CFArrayRef), + ] + Security.SecItemImport.restype = OSStatus + + Security.SecCertificateGetTypeID.argtypes = [] + Security.SecCertificateGetTypeID.restype = CFTypeID + + Security.SecIdentityGetTypeID.argtypes = [] + Security.SecIdentityGetTypeID.restype = CFTypeID + + Security.SecKeyGetTypeID.argtypes = [] + Security.SecKeyGetTypeID.restype = CFTypeID + + Security.SecCertificateCreateWithData.argtypes = [ + CFAllocatorRef, + CFDataRef + ] + Security.SecCertificateCreateWithData.restype = SecCertificateRef + + Security.SecCertificateCopyData.argtypes = [ + SecCertificateRef + ] + Security.SecCertificateCopyData.restype = CFDataRef + + Security.SecCopyErrorMessageString.argtypes = [ + OSStatus, + c_void_p + ] + Security.SecCopyErrorMessageString.restype = CFStringRef + + Security.SecIdentityCreateWithCertificate.argtypes = [ + CFTypeRef, + SecCertificateRef, + POINTER(SecIdentityRef) + ] + Security.SecIdentityCreateWithCertificate.restype = OSStatus + + Security.SecKeychainCreate.argtypes = [ + c_char_p, + c_uint32, + c_void_p, + Boolean, + c_void_p, + POINTER(SecKeychainRef) + ] + Security.SecKeychainCreate.restype = OSStatus + + Security.SecKeychainDelete.argtypes = [ + SecKeychainRef + ] + Security.SecKeychainDelete.restype = OSStatus + + Security.SecPKCS12Import.argtypes = [ + CFDataRef, + CFDictionaryRef, + POINTER(CFArrayRef) + ] + Security.SecPKCS12Import.restype = OSStatus + + SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t)) + SSLWriteFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t)) + + Security.SSLSetIOFuncs.argtypes = [ + SSLContextRef, + SSLReadFunc, + SSLWriteFunc + ] + Security.SSLSetIOFuncs.restype = OSStatus + + Security.SSLSetPeerID.argtypes = [ + SSLContextRef, + c_char_p, + c_size_t + ] + Security.SSLSetPeerID.restype = OSStatus + + Security.SSLSetCertificate.argtypes = [ + SSLContextRef, + CFArrayRef + ] + Security.SSLSetCertificate.restype = OSStatus + + Security.SSLSetCertificateAuthorities.argtypes = [ + SSLContextRef, + CFTypeRef, + Boolean + ] + Security.SSLSetCertificateAuthorities.restype = OSStatus + + Security.SSLSetConnection.argtypes = [ + SSLContextRef, + SSLConnectionRef + ] + Security.SSLSetConnection.restype = OSStatus + + Security.SSLSetPeerDomainName.argtypes = [ + SSLContextRef, + c_char_p, + c_size_t + ] + Security.SSLSetPeerDomainName.restype = OSStatus + + Security.SSLHandshake.argtypes = [ + SSLContextRef + ] + Security.SSLHandshake.restype = OSStatus + + Security.SSLRead.argtypes = [ + SSLContextRef, + c_char_p, + c_size_t, + POINTER(c_size_t) + ] + Security.SSLRead.restype = OSStatus + + Security.SSLWrite.argtypes = [ + SSLContextRef, + c_char_p, + c_size_t, + POINTER(c_size_t) + ] + Security.SSLWrite.restype = OSStatus + + Security.SSLClose.argtypes = [ + SSLContextRef + ] + Security.SSLClose.restype = OSStatus + + Security.SSLGetNumberSupportedCiphers.argtypes = [ + SSLContextRef, + POINTER(c_size_t) + ] + Security.SSLGetNumberSupportedCiphers.restype = OSStatus + + Security.SSLGetSupportedCiphers.argtypes = [ + SSLContextRef, + POINTER(SSLCipherSuite), + POINTER(c_size_t) + ] + Security.SSLGetSupportedCiphers.restype = OSStatus + + Security.SSLSetEnabledCiphers.argtypes = [ + SSLContextRef, + POINTER(SSLCipherSuite), + c_size_t + ] + Security.SSLSetEnabledCiphers.restype = OSStatus + + Security.SSLGetNumberEnabledCiphers.argtype = [ + SSLContextRef, + POINTER(c_size_t) + ] + Security.SSLGetNumberEnabledCiphers.restype = OSStatus + + Security.SSLGetEnabledCiphers.argtypes = [ + SSLContextRef, + POINTER(SSLCipherSuite), + POINTER(c_size_t) + ] + Security.SSLGetEnabledCiphers.restype = OSStatus + + Security.SSLGetNegotiatedCipher.argtypes = [ + SSLContextRef, + POINTER(SSLCipherSuite) + ] + Security.SSLGetNegotiatedCipher.restype = OSStatus + + Security.SSLGetNegotiatedProtocolVersion.argtypes = [ + SSLContextRef, + POINTER(SSLProtocol) + ] + Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus + + Security.SSLCopyPeerTrust.argtypes = [ + SSLContextRef, + POINTER(SecTrustRef) + ] + Security.SSLCopyPeerTrust.restype = OSStatus + + Security.SecTrustSetAnchorCertificates.argtypes = [ + SecTrustRef, + CFArrayRef + ] + Security.SecTrustSetAnchorCertificates.restype = OSStatus + + Security.SecTrustSetAnchorCertificatesOnly.argstypes = [ + SecTrustRef, + Boolean + ] + Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus + + Security.SecTrustEvaluate.argtypes = [ + SecTrustRef, + POINTER(SecTrustResultType) + ] + Security.SecTrustEvaluate.restype = OSStatus + + Security.SecTrustGetCertificateCount.argtypes = [ + SecTrustRef + ] + Security.SecTrustGetCertificateCount.restype = CFIndex + + Security.SecTrustGetCertificateAtIndex.argtypes = [ + SecTrustRef, + CFIndex + ] + Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef + + Security.SSLCreateContext.argtypes = [ + CFAllocatorRef, + SSLProtocolSide, + SSLConnectionType + ] + Security.SSLCreateContext.restype = SSLContextRef + + Security.SSLSetSessionOption.argtypes = [ + SSLContextRef, + SSLSessionOption, + Boolean + ] + Security.SSLSetSessionOption.restype = OSStatus + + Security.SSLSetProtocolVersionMin.argtypes = [ + SSLContextRef, + SSLProtocol + ] + Security.SSLSetProtocolVersionMin.restype = OSStatus + + Security.SSLSetProtocolVersionMax.argtypes = [ + SSLContextRef, + SSLProtocol + ] + Security.SSLSetProtocolVersionMax.restype = OSStatus + + Security.SecCopyErrorMessageString.argtypes = [ + OSStatus, + c_void_p + ] + Security.SecCopyErrorMessageString.restype = CFStringRef + + Security.SSLReadFunc = SSLReadFunc + Security.SSLWriteFunc = SSLWriteFunc + Security.SSLContextRef = SSLContextRef + Security.SSLProtocol = SSLProtocol + Security.SSLCipherSuite = SSLCipherSuite + Security.SecIdentityRef = SecIdentityRef + Security.SecKeychainRef = SecKeychainRef + Security.SecTrustRef = SecTrustRef + Security.SecTrustResultType = SecTrustResultType + Security.SecExternalFormat = SecExternalFormat + Security.OSStatus = OSStatus + + Security.kSecImportExportPassphrase = CFStringRef.in_dll( + Security, 'kSecImportExportPassphrase' + ) + Security.kSecImportItemIdentity = CFStringRef.in_dll( + Security, 'kSecImportItemIdentity' + ) + + # CoreFoundation time! + CoreFoundation.CFRetain.argtypes = [ + CFTypeRef + ] + CoreFoundation.CFRetain.restype = CFTypeRef + + CoreFoundation.CFRelease.argtypes = [ + CFTypeRef + ] + CoreFoundation.CFRelease.restype = None + + CoreFoundation.CFGetTypeID.argtypes = [ + CFTypeRef + ] + CoreFoundation.CFGetTypeID.restype = CFTypeID + + CoreFoundation.CFStringCreateWithCString.argtypes = [ + CFAllocatorRef, + c_char_p, + CFStringEncoding + ] + CoreFoundation.CFStringCreateWithCString.restype = CFStringRef + + CoreFoundation.CFStringGetCStringPtr.argtypes = [ + CFStringRef, + CFStringEncoding + ] + CoreFoundation.CFStringGetCStringPtr.restype = c_char_p + + CoreFoundation.CFStringGetCString.argtypes = [ + CFStringRef, + c_char_p, + CFIndex, + CFStringEncoding + ] + CoreFoundation.CFStringGetCString.restype = c_bool + + CoreFoundation.CFDataCreate.argtypes = [ + CFAllocatorRef, + c_char_p, + CFIndex + ] + CoreFoundation.CFDataCreate.restype = CFDataRef + + CoreFoundation.CFDataGetLength.argtypes = [ + CFDataRef + ] + CoreFoundation.CFDataGetLength.restype = CFIndex + + CoreFoundation.CFDataGetBytePtr.argtypes = [ + CFDataRef + ] + CoreFoundation.CFDataGetBytePtr.restype = c_void_p + + CoreFoundation.CFDictionaryCreate.argtypes = [ + CFAllocatorRef, + POINTER(CFTypeRef), + POINTER(CFTypeRef), + CFIndex, + CFDictionaryKeyCallBacks, + CFDictionaryValueCallBacks + ] + CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef + + CoreFoundation.CFDictionaryGetValue.argtypes = [ + CFDictionaryRef, + CFTypeRef + ] + CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef + + CoreFoundation.CFArrayCreate.argtypes = [ + CFAllocatorRef, + POINTER(CFTypeRef), + CFIndex, + CFArrayCallBacks, + ] + CoreFoundation.CFArrayCreate.restype = CFArrayRef + + CoreFoundation.CFArrayCreateMutable.argtypes = [ + CFAllocatorRef, + CFIndex, + CFArrayCallBacks + ] + CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef + + CoreFoundation.CFArrayAppendValue.argtypes = [ + CFMutableArrayRef, + c_void_p + ] + CoreFoundation.CFArrayAppendValue.restype = None + + CoreFoundation.CFArrayGetCount.argtypes = [ + CFArrayRef + ] + CoreFoundation.CFArrayGetCount.restype = CFIndex + + CoreFoundation.CFArrayGetValueAtIndex.argtypes = [ + CFArrayRef, + CFIndex + ] + CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p + + CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll( + CoreFoundation, 'kCFAllocatorDefault' + ) + CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks') + CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll( + CoreFoundation, 'kCFTypeDictionaryKeyCallBacks' + ) + CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll( + CoreFoundation, 'kCFTypeDictionaryValueCallBacks' + ) + + CoreFoundation.CFTypeRef = CFTypeRef + CoreFoundation.CFArrayRef = CFArrayRef + CoreFoundation.CFStringRef = CFStringRef + CoreFoundation.CFDictionaryRef = CFDictionaryRef + +except (AttributeError): + raise ImportError('Error initializing ctypes') + + +class CFConst(object): + """ + A class object that acts as essentially a namespace for CoreFoundation + constants. + """ + kCFStringEncodingUTF8 = CFStringEncoding(0x08000100) + + +class SecurityConst(object): + """ + A class object that acts as essentially a namespace for Security constants. + """ + kSSLSessionOptionBreakOnServerAuth = 0 + + kSSLProtocol2 = 1 + kSSLProtocol3 = 2 + kTLSProtocol1 = 4 + kTLSProtocol11 = 7 + kTLSProtocol12 = 8 + + kSSLClientSide = 1 + kSSLStreamType = 0 + + kSecFormatPEMSequence = 10 + + kSecTrustResultInvalid = 0 + kSecTrustResultProceed = 1 + # This gap is present on purpose: this was kSecTrustResultConfirm, which + # is deprecated. + kSecTrustResultDeny = 3 + kSecTrustResultUnspecified = 4 + kSecTrustResultRecoverableTrustFailure = 5 + kSecTrustResultFatalTrustFailure = 6 + kSecTrustResultOtherError = 7 + + errSSLProtocol = -9800 + errSSLWouldBlock = -9803 + errSSLClosedGraceful = -9805 + errSSLClosedNoNotify = -9816 + errSSLClosedAbort = -9806 + + errSSLXCertChainInvalid = -9807 + errSSLCrypto = -9809 + errSSLInternal = -9810 + errSSLCertExpired = -9814 + errSSLCertNotYetValid = -9815 + errSSLUnknownRootCert = -9812 + errSSLNoRootCert = -9813 + errSSLHostNameMismatch = -9843 + errSSLPeerHandshakeFail = -9824 + errSSLPeerUserCancelled = -9839 + errSSLWeakPeerEphemeralDHKey = -9850 + errSSLServerAuthCompleted = -9841 + errSSLRecordOverflow = -9847 + + errSecVerifyFailed = -67808 + errSecNoTrustSettings = -25263 + errSecItemNotFound = -25300 + errSecInvalidTrustSettings = -25262 + + # Cipher suites. We only pick the ones our default cipher string allows. + TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C + TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030 + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B + TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F + TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3 + TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F + TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2 + TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024 + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028 + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014 + TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B + TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A + TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039 + TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038 + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023 + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027 + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009 + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013 + TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067 + TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040 + TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033 + TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032 + TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D + TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C + TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D + TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C + TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035 + TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py new file mode 100644 index 000000000..0f79a1372 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py @@ -0,0 +1,344 @@ +# SPDX-License-Identifier: MIT +""" +Low-level helpers for the SecureTransport bindings. + +These are Python functions that are not directly related to the high-level APIs +but are necessary to get them to work. They include a whole bunch of low-level +CoreFoundation messing about and memory management. The concerns in this module +are almost entirely about trying to avoid memory leaks and providing +appropriate and useful assistance to the higher-level code. +""" +import base64 +import ctypes +import itertools +import re +import os +import ssl +import tempfile + +from .bindings import Security, CoreFoundation, CFConst + + +# This regular expression is used to grab PEM data out of a PEM bundle. +_PEM_CERTS_RE = re.compile( + b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL +) + + +def _cf_data_from_bytes(bytestring): + """ + Given a bytestring, create a CFData object from it. This CFData object must + be CFReleased by the caller. + """ + return CoreFoundation.CFDataCreate( + CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring) + ) + + +def _cf_dictionary_from_tuples(tuples): + """ + Given a list of Python tuples, create an associated CFDictionary. + """ + dictionary_size = len(tuples) + + # We need to get the dictionary keys and values out in the same order. + keys = (t[0] for t in tuples) + values = (t[1] for t in tuples) + cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys) + cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values) + + return CoreFoundation.CFDictionaryCreate( + CoreFoundation.kCFAllocatorDefault, + cf_keys, + cf_values, + dictionary_size, + CoreFoundation.kCFTypeDictionaryKeyCallBacks, + CoreFoundation.kCFTypeDictionaryValueCallBacks, + ) + + +def _cf_string_to_unicode(value): + """ + Creates a Unicode string from a CFString object. Used entirely for error + reporting. + + Yes, it annoys me quite a lot that this function is this complex. + """ + value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p)) + + string = CoreFoundation.CFStringGetCStringPtr( + value_as_void_p, + CFConst.kCFStringEncodingUTF8 + ) + if string is None: + buffer = ctypes.create_string_buffer(1024) + result = CoreFoundation.CFStringGetCString( + value_as_void_p, + buffer, + 1024, + CFConst.kCFStringEncodingUTF8 + ) + if not result: + raise OSError('Error copying C string from CFStringRef') + string = buffer.value + if string is not None: + string = string.decode('utf-8') + return string + + +def _assert_no_error(error, exception_class=None): + """ + Checks the return code and throws an exception if there is an error to + report + """ + if error == 0: + return + + cf_error_string = Security.SecCopyErrorMessageString(error, None) + output = _cf_string_to_unicode(cf_error_string) + CoreFoundation.CFRelease(cf_error_string) + + if output is None or output == u'': + output = u'OSStatus %s' % error + + if exception_class is None: + exception_class = ssl.SSLError + + raise exception_class(output) + + +def _cert_array_from_pem(pem_bundle): + """ + Given a bundle of certs in PEM format, turns them into a CFArray of certs + that can be used to validate a cert chain. + """ + der_certs = [ + base64.b64decode(match.group(1)) + for match in _PEM_CERTS_RE.finditer(pem_bundle) + ] + if not der_certs: + raise ssl.SSLError("No root certificates specified") + + cert_array = CoreFoundation.CFArrayCreateMutable( + CoreFoundation.kCFAllocatorDefault, + 0, + ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks) + ) + if not cert_array: + raise ssl.SSLError("Unable to allocate memory!") + + try: + for der_bytes in der_certs: + certdata = _cf_data_from_bytes(der_bytes) + if not certdata: + raise ssl.SSLError("Unable to allocate memory!") + cert = Security.SecCertificateCreateWithData( + CoreFoundation.kCFAllocatorDefault, certdata + ) + CoreFoundation.CFRelease(certdata) + if not cert: + raise ssl.SSLError("Unable to build cert object!") + + CoreFoundation.CFArrayAppendValue(cert_array, cert) + CoreFoundation.CFRelease(cert) + except Exception: + # We need to free the array before the exception bubbles further. + # We only want to do that if an error occurs: otherwise, the caller + # should free. + CoreFoundation.CFRelease(cert_array) + + return cert_array + + +def _is_cert(item): + """ + Returns True if a given CFTypeRef is a certificate. + """ + expected = Security.SecCertificateGetTypeID() + return CoreFoundation.CFGetTypeID(item) == expected + + +def _is_identity(item): + """ + Returns True if a given CFTypeRef is an identity. + """ + expected = Security.SecIdentityGetTypeID() + return CoreFoundation.CFGetTypeID(item) == expected + + +def _temporary_keychain(): + """ + This function creates a temporary Mac keychain that we can use to work with + credentials. This keychain uses a one-time password and a temporary file to + store the data. We expect to have one keychain per socket. The returned + SecKeychainRef must be freed by the caller, including calling + SecKeychainDelete. + + Returns a tuple of the SecKeychainRef and the path to the temporary + directory that contains it. + """ + # Unfortunately, SecKeychainCreate requires a path to a keychain. This + # means we cannot use mkstemp to use a generic temporary file. Instead, + # we're going to create a temporary directory and a filename to use there. + # This filename will be 8 random bytes expanded into base64. We also need + # some random bytes to password-protect the keychain we're creating, so we + # ask for 40 random bytes. + random_bytes = os.urandom(40) + filename = base64.b64encode(random_bytes[:8]).decode('utf-8') + password = base64.b64encode(random_bytes[8:]) # Must be valid UTF-8 + tempdirectory = tempfile.mkdtemp() + + keychain_path = os.path.join(tempdirectory, filename).encode('utf-8') + + # We now want to create the keychain itself. + keychain = Security.SecKeychainRef() + status = Security.SecKeychainCreate( + keychain_path, + len(password), + password, + False, + None, + ctypes.byref(keychain) + ) + _assert_no_error(status) + + # Having created the keychain, we want to pass it off to the caller. + return keychain, tempdirectory + + +def _load_items_from_file(keychain, path): + """ + Given a single file, loads all the trust objects from it into arrays and + the keychain. + Returns a tuple of lists: the first list is a list of identities, the + second a list of certs. + """ + certificates = [] + identities = [] + result_array = None + + with open(path, 'rb') as f: + raw_filedata = f.read() + + try: + filedata = CoreFoundation.CFDataCreate( + CoreFoundation.kCFAllocatorDefault, + raw_filedata, + len(raw_filedata) + ) + result_array = CoreFoundation.CFArrayRef() + result = Security.SecItemImport( + filedata, # cert data + None, # Filename, leaving it out for now + None, # What the type of the file is, we don't care + None, # what's in the file, we don't care + 0, # import flags + None, # key params, can include passphrase in the future + keychain, # The keychain to insert into + ctypes.byref(result_array) # Results + ) + _assert_no_error(result) + + # A CFArray is not very useful to us as an intermediary + # representation, so we are going to extract the objects we want + # and then free the array. We don't need to keep hold of keys: the + # keychain already has them! + result_count = CoreFoundation.CFArrayGetCount(result_array) + for index in range(result_count): + item = CoreFoundation.CFArrayGetValueAtIndex( + result_array, index + ) + item = ctypes.cast(item, CoreFoundation.CFTypeRef) + + if _is_cert(item): + CoreFoundation.CFRetain(item) + certificates.append(item) + elif _is_identity(item): + CoreFoundation.CFRetain(item) + identities.append(item) + finally: + if result_array: + CoreFoundation.CFRelease(result_array) + + CoreFoundation.CFRelease(filedata) + + return (identities, certificates) + + +def _load_client_cert_chain(keychain, *paths): + """ + Load certificates and maybe keys from a number of files. Has the end goal + of returning a CFArray containing one SecIdentityRef, and then zero or more + SecCertificateRef objects, suitable for use as a client certificate trust + chain. + """ + # Ok, the strategy. + # + # This relies on knowing that macOS will not give you a SecIdentityRef + # unless you have imported a key into a keychain. This is a somewhat + # artificial limitation of macOS (for example, it doesn't necessarily + # affect iOS), but there is nothing inside Security.framework that lets you + # get a SecIdentityRef without having a key in a keychain. + # + # So the policy here is we take all the files and iterate them in order. + # Each one will use SecItemImport to have one or more objects loaded from + # it. We will also point at a keychain that macOS can use to work with the + # private key. + # + # Once we have all the objects, we'll check what we actually have. If we + # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise, + # we'll take the first certificate (which we assume to be our leaf) and + # ask the keychain to give us a SecIdentityRef with that cert's associated + # key. + # + # We'll then return a CFArray containing the trust chain: one + # SecIdentityRef and then zero-or-more SecCertificateRef objects. The + # responsibility for freeing this CFArray will be with the caller. This + # CFArray must remain alive for the entire connection, so in practice it + # will be stored with a single SSLSocket, along with the reference to the + # keychain. + certificates = [] + identities = [] + + # Filter out bad paths. + paths = (path for path in paths if path) + + try: + for file_path in paths: + new_identities, new_certs = _load_items_from_file( + keychain, file_path + ) + identities.extend(new_identities) + certificates.extend(new_certs) + + # Ok, we have everything. The question is: do we have an identity? If + # not, we want to grab one from the first cert we have. + if not identities: + new_identity = Security.SecIdentityRef() + status = Security.SecIdentityCreateWithCertificate( + keychain, + certificates[0], + ctypes.byref(new_identity) + ) + _assert_no_error(status) + identities.append(new_identity) + + # We now want to release the original certificate, as we no longer + # need it. + CoreFoundation.CFRelease(certificates.pop(0)) + + # We now need to build a new CFArray that holds the trust chain. + trust_chain = CoreFoundation.CFArrayCreateMutable( + CoreFoundation.kCFAllocatorDefault, + 0, + ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks), + ) + for item in itertools.chain(identities, certificates): + # ArrayAppendValue does a CFRetain on the item. That's fine, + # because the finally block will release our other refs to them. + CoreFoundation.CFArrayAppendValue(trust_chain, item) + + return trust_chain + finally: + for obj in itertools.chain(identities, certificates): + CoreFoundation.CFRelease(obj) diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py new file mode 100644 index 000000000..e74589fa8 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py @@ -0,0 +1,297 @@ +# SPDX-License-Identifier: MIT +""" +This module provides a pool manager that uses Google App Engine's +`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_. + +Example usage:: + + from urllib3 import PoolManager + from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox + + if is_appengine_sandbox(): + # AppEngineManager uses AppEngine's URLFetch API behind the scenes + http = AppEngineManager() + else: + # PoolManager uses a socket-level API behind the scenes + http = PoolManager() + + r = http.request('GET', 'https://google.com/') + +There are `limitations <https://cloud.google.com/appengine/docs/python/\ +urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be +the best choice for your application. There are three options for using +urllib3 on Google App Engine: + +1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is + cost-effective in many circumstances as long as your usage is within the + limitations. +2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets. + Sockets also have `limitations and restrictions + <https://cloud.google.com/appengine/docs/python/sockets/\ + #limitations-and-restrictions>`_ and have a lower free quota than URLFetch. + To use sockets, be sure to specify the following in your ``app.yaml``:: + + env_variables: + GAE_USE_SOCKETS_HTTPLIB : 'true' + +3. If you are using `App Engine Flexible +<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard +:class:`PoolManager` without any configuration or special environment variables. +""" + +from __future__ import absolute_import +import logging +import os +import warnings +from ..packages.six.moves.urllib.parse import urljoin + +from ..exceptions import ( + HTTPError, + HTTPWarning, + MaxRetryError, + ProtocolError, + TimeoutError, + SSLError +) + +from ..packages.six import BytesIO +from ..request import RequestMethods +from ..response import HTTPResponse +from ..util.timeout import Timeout +from ..util.retry import Retry + +try: + from google.appengine.api import urlfetch +except ImportError: + urlfetch = None + + +log = logging.getLogger(__name__) + + +class AppEnginePlatformWarning(HTTPWarning): + pass + + +class AppEnginePlatformError(HTTPError): + pass + + +class AppEngineManager(RequestMethods): + """ + Connection manager for Google App Engine sandbox applications. + + This manager uses the URLFetch service directly instead of using the + emulated httplib, and is subject to URLFetch limitations as described in + the App Engine documentation `here + <https://cloud.google.com/appengine/docs/python/urlfetch>`_. + + Notably it will raise an :class:`AppEnginePlatformError` if: + * URLFetch is not available. + * If you attempt to use this on App Engine Flexible, as full socket + support is available. + * If a request size is more than 10 megabytes. + * If a response size is more than 32 megabtyes. + * If you use an unsupported request method such as OPTIONS. + + Beyond those cases, it will raise normal urllib3 errors. + """ + + def __init__(self, headers=None, retries=None, validate_certificate=True, + urlfetch_retries=True): + if not urlfetch: + raise AppEnginePlatformError( + "URLFetch is not available in this environment.") + + if is_prod_appengine_mvms(): + raise AppEnginePlatformError( + "Use normal urllib3.PoolManager instead of AppEngineManager" + "on Managed VMs, as using URLFetch is not necessary in " + "this environment.") + + warnings.warn( + "urllib3 is using URLFetch on Google App Engine sandbox instead " + "of sockets. To use sockets directly instead of URLFetch see " + "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.", + AppEnginePlatformWarning) + + RequestMethods.__init__(self, headers) + self.validate_certificate = validate_certificate + self.urlfetch_retries = urlfetch_retries + + self.retries = retries or Retry.DEFAULT + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + # Return False to re-raise any potential exceptions + return False + + def urlopen(self, method, url, body=None, headers=None, + retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT, + **response_kw): + + retries = self._get_retries(retries, redirect) + + try: + follow_redirects = ( + redirect and + retries.redirect != 0 and + retries.total) + response = urlfetch.fetch( + url, + payload=body, + method=method, + headers=headers or {}, + allow_truncated=False, + follow_redirects=self.urlfetch_retries and follow_redirects, + deadline=self._get_absolute_timeout(timeout), + validate_certificate=self.validate_certificate, + ) + except urlfetch.DeadlineExceededError as e: + raise TimeoutError(self, e) + + except urlfetch.InvalidURLError as e: + if 'too large' in str(e): + raise AppEnginePlatformError( + "URLFetch request too large, URLFetch only " + "supports requests up to 10mb in size.", e) + raise ProtocolError(e) + + except urlfetch.DownloadError as e: + if 'Too many redirects' in str(e): + raise MaxRetryError(self, url, reason=e) + raise ProtocolError(e) + + except urlfetch.ResponseTooLargeError as e: + raise AppEnginePlatformError( + "URLFetch response too large, URLFetch only supports" + "responses up to 32mb in size.", e) + + except urlfetch.SSLCertificateError as e: + raise SSLError(e) + + except urlfetch.InvalidMethodError as e: + raise AppEnginePlatformError( + "URLFetch does not support method: %s" % method, e) + + http_response = self._urlfetch_response_to_http_response( + response, retries=retries, **response_kw) + + # Handle redirect? + redirect_location = redirect and http_response.get_redirect_location() + if redirect_location: + # Check for redirect response + if (self.urlfetch_retries and retries.raise_on_redirect): + raise MaxRetryError(self, url, "too many redirects") + else: + if http_response.status == 303: + method = 'GET' + + try: + retries = retries.increment(method, url, response=http_response, _pool=self) + except MaxRetryError: + if retries.raise_on_redirect: + raise MaxRetryError(self, url, "too many redirects") + return http_response + + retries.sleep_for_retry(http_response) + log.debug("Redirecting %s -> %s", url, redirect_location) + redirect_url = urljoin(url, redirect_location) + return self.urlopen( + method, redirect_url, body, headers, + retries=retries, redirect=redirect, + timeout=timeout, **response_kw) + + # Check if we should retry the HTTP response. + has_retry_after = bool(http_response.getheader('Retry-After')) + if retries.is_retry(method, http_response.status, has_retry_after): + retries = retries.increment( + method, url, response=http_response, _pool=self) + log.debug("Retry: %s", url) + retries.sleep(http_response) + return self.urlopen( + method, url, + body=body, headers=headers, + retries=retries, redirect=redirect, + timeout=timeout, **response_kw) + + return http_response + + def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw): + + if is_prod_appengine(): + # Production GAE handles deflate encoding automatically, but does + # not remove the encoding header. + content_encoding = urlfetch_resp.headers.get('content-encoding') + + if content_encoding == 'deflate': + del urlfetch_resp.headers['content-encoding'] + + transfer_encoding = urlfetch_resp.headers.get('transfer-encoding') + # We have a full response's content, + # so let's make sure we don't report ourselves as chunked data. + if transfer_encoding == 'chunked': + encodings = transfer_encoding.split(",") + encodings.remove('chunked') + urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings) + + return HTTPResponse( + # In order for decoding to work, we must present the content as + # a file-like object. + body=BytesIO(urlfetch_resp.content), + headers=urlfetch_resp.headers, + status=urlfetch_resp.status_code, + **response_kw + ) + + def _get_absolute_timeout(self, timeout): + if timeout is Timeout.DEFAULT_TIMEOUT: + return None # Defer to URLFetch's default. + if isinstance(timeout, Timeout): + if timeout._read is not None or timeout._connect is not None: + warnings.warn( + "URLFetch does not support granular timeout settings, " + "reverting to total or default URLFetch timeout.", + AppEnginePlatformWarning) + return timeout.total + return timeout + + def _get_retries(self, retries, redirect): + if not isinstance(retries, Retry): + retries = Retry.from_int( + retries, redirect=redirect, default=self.retries) + + if retries.connect or retries.read or retries.redirect: + warnings.warn( + "URLFetch only supports total retries and does not " + "recognize connect, read, or redirect retry parameters.", + AppEnginePlatformWarning) + + return retries + + +def is_appengine(): + return (is_local_appengine() or + is_prod_appengine() or + is_prod_appengine_mvms()) + + +def is_appengine_sandbox(): + return is_appengine() and not is_prod_appengine_mvms() + + +def is_local_appengine(): + return ('APPENGINE_RUNTIME' in os.environ and + 'Development/' in os.environ['SERVER_SOFTWARE']) + + +def is_prod_appengine(): + return ('APPENGINE_RUNTIME' in os.environ and + 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and + not is_prod_appengine_mvms()) + + +def is_prod_appengine_mvms(): + return os.environ.get('GAE_VM', False) == 'true' diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py new file mode 100644 index 000000000..3f8c9ebf5 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py @@ -0,0 +1,113 @@ +# SPDX-License-Identifier: MIT +""" +NTLM authenticating pool, contributed by erikcederstran + +Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10 +""" +from __future__ import absolute_import + +from logging import getLogger +from ntlm import ntlm + +from .. import HTTPSConnectionPool +from ..packages.six.moves.http_client import HTTPSConnection + + +log = getLogger(__name__) + + +class NTLMConnectionPool(HTTPSConnectionPool): + """ + Implements an NTLM authentication version of an urllib3 connection pool + """ + + scheme = 'https' + + def __init__(self, user, pw, authurl, *args, **kwargs): + """ + authurl is a random URL on the server that is protected by NTLM. + user is the Windows user, probably in the DOMAIN\\username format. + pw is the password for the user. + """ + super(NTLMConnectionPool, self).__init__(*args, **kwargs) + self.authurl = authurl + self.rawuser = user + user_parts = user.split('\\', 1) + self.domain = user_parts[0].upper() + self.user = user_parts[1] + self.pw = pw + + def _new_conn(self): + # Performs the NTLM handshake that secures the connection. The socket + # must be kept open while requests are performed. + self.num_connections += 1 + log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s', + self.num_connections, self.host, self.authurl) + + headers = {} + headers['Connection'] = 'Keep-Alive' + req_header = 'Authorization' + resp_header = 'www-authenticate' + + conn = HTTPSConnection(host=self.host, port=self.port) + + # Send negotiation message + headers[req_header] = ( + 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser)) + log.debug('Request headers: %s', headers) + conn.request('GET', self.authurl, None, headers) + res = conn.getresponse() + reshdr = dict(res.getheaders()) + log.debug('Response status: %s %s', res.status, res.reason) + log.debug('Response headers: %s', reshdr) + log.debug('Response data: %s [...]', res.read(100)) + + # Remove the reference to the socket, so that it can not be closed by + # the response object (we want to keep the socket open) + res.fp = None + + # Server should respond with a challenge message + auth_header_values = reshdr[resp_header].split(', ') + auth_header_value = None + for s in auth_header_values: + if s[:5] == 'NTLM ': + auth_header_value = s[5:] + if auth_header_value is None: + raise Exception('Unexpected %s response header: %s' % + (resp_header, reshdr[resp_header])) + + # Send authentication message + ServerChallenge, NegotiateFlags = \ + ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value) + auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge, + self.user, + self.domain, + self.pw, + NegotiateFlags) + headers[req_header] = 'NTLM %s' % auth_msg + log.debug('Request headers: %s', headers) + conn.request('GET', self.authurl, None, headers) + res = conn.getresponse() + log.debug('Response status: %s %s', res.status, res.reason) + log.debug('Response headers: %s', dict(res.getheaders())) + log.debug('Response data: %s [...]', res.read()[:100]) + if res.status != 200: + if res.status == 401: + raise Exception('Server rejected request: wrong ' + 'username or password') + raise Exception('Wrong server response: %s %s' % + (res.status, res.reason)) + + res.fp = None + log.debug('Connection established') + return conn + + def urlopen(self, method, url, body=None, headers=None, retries=3, + redirect=True, assert_same_host=True): + if headers is None: + headers = {} + headers['Connection'] = 'Keep-Alive' + return super(NTLMConnectionPool, self).urlopen(method, url, body, + headers, retries, + redirect, + assert_same_host) diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py new file mode 100644 index 000000000..8d373507d --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py @@ -0,0 +1,458 @@ +# SPDX-License-Identifier: MIT +""" +SSL with SNI_-support for Python 2. Follow these instructions if you would +like to verify SSL certificates in Python 2. Note, the default libraries do +*not* do certificate checking; you need to do additional work to validate +certificates yourself. + +This needs the following packages installed: + +* pyOpenSSL (tested with 16.0.0) +* cryptography (minimum 1.3.4, from pyopenssl) +* idna (minimum 2.0, from cryptography) + +However, pyopenssl depends on cryptography, which depends on idna, so while we +use all three directly here we end up having relatively few packages required. + +You can install them with the following command: + + pip install pyopenssl cryptography idna + +To activate certificate checking, call +:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code +before you begin making HTTP requests. This can be done in a ``sitecustomize`` +module, or at any other time before your application begins using ``urllib3``, +like this:: + + try: + import urllib3.contrib.pyopenssl + urllib3.contrib.pyopenssl.inject_into_urllib3() + except ImportError: + pass + +Now you can use :mod:`urllib3` as you normally would, and it will support SNI +when the required modules are installed. + +Activating this module also has the positive side effect of disabling SSL/TLS +compression in Python 2 (see `CRIME attack`_). + +If you want to configure the default list of supported cipher suites, you can +set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable. + +.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication +.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit) +""" +from __future__ import absolute_import + +import OpenSSL.SSL +from cryptography import x509 +from cryptography.hazmat.backends.openssl import backend as openssl_backend +from cryptography.hazmat.backends.openssl.x509 import _Certificate + +from socket import timeout, error as SocketError +from io import BytesIO + +try: # Platform-specific: Python 2 + from socket import _fileobject +except ImportError: # Platform-specific: Python 3 + _fileobject = None + from ..packages.backports.makefile import backport_makefile + +import logging +import ssl + +try: + import six +except ImportError: + from ..packages import six + +import sys + +from .. import util + +__all__ = ['inject_into_urllib3', 'extract_from_urllib3'] + +# SNI always works. +HAS_SNI = True + +# Map from urllib3 to PyOpenSSL compatible parameter-values. +_openssl_versions = { + ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD, + ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD, +} + +if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'): + _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD + +if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'): + _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD + +try: + _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD}) +except AttributeError: + pass + +_stdlib_to_openssl_verify = { + ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE, + ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER, + ssl.CERT_REQUIRED: + OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, +} +_openssl_to_stdlib_verify = dict( + (v, k) for k, v in _stdlib_to_openssl_verify.items() +) + +# OpenSSL will only write 16K at a time +SSL_WRITE_BLOCKSIZE = 16384 + +orig_util_HAS_SNI = util.HAS_SNI +orig_util_SSLContext = util.ssl_.SSLContext + + +log = logging.getLogger(__name__) + + +def inject_into_urllib3(): + 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.' + + _validate_dependencies_met() + + util.ssl_.SSLContext = PyOpenSSLContext + util.HAS_SNI = HAS_SNI + util.ssl_.HAS_SNI = HAS_SNI + util.IS_PYOPENSSL = True + util.ssl_.IS_PYOPENSSL = True + + +def extract_from_urllib3(): + 'Undo monkey-patching by :func:`inject_into_urllib3`.' + + util.ssl_.SSLContext = orig_util_SSLContext + util.HAS_SNI = orig_util_HAS_SNI + util.ssl_.HAS_SNI = orig_util_HAS_SNI + util.IS_PYOPENSSL = False + util.ssl_.IS_PYOPENSSL = False + + +def _validate_dependencies_met(): + """ + Verifies that PyOpenSSL's package-level dependencies have been met. + Throws `ImportError` if they are not met. + """ + # Method added in `cryptography==1.1`; not available in older versions + from cryptography.x509.extensions import Extensions + if getattr(Extensions, "get_extension_for_class", None) is None: + raise ImportError("'cryptography' module missing required functionality. " + "Try upgrading to v1.3.4 or newer.") + + # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509 + # attribute is only present on those versions. + from OpenSSL.crypto import X509 + x509 = X509() + if getattr(x509, "_x509", None) is None: + raise ImportError("'pyOpenSSL' module missing required functionality. " + "Try upgrading to v0.14 or newer.") + + +def _dnsname_to_stdlib(name): + """ + Converts a dNSName SubjectAlternativeName field to the form used by the + standard library on the given Python version. + + Cryptography produces a dNSName as a unicode string that was idna-decoded + from ASCII bytes. We need to idna-encode that string to get it back, and + then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib + uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8). + """ + def idna_encode(name): + """ + Borrowed wholesale from the Python Cryptography Project. It turns out + that we can't just safely call `idna.encode`: it can explode for + wildcard names. This avoids that problem. + """ + import idna + + for prefix in [u'*.', u'.']: + if name.startswith(prefix): + name = name[len(prefix):] + return prefix.encode('ascii') + idna.encode(name) + return idna.encode(name) + + name = idna_encode(name) + if sys.version_info >= (3, 0): + name = name.decode('utf-8') + return name + + +def get_subj_alt_name(peer_cert): + """ + Given an PyOpenSSL certificate, provides all the subject alternative names. + """ + # Pass the cert to cryptography, which has much better APIs for this. + # This is technically using private APIs, but should work across all + # relevant versions until PyOpenSSL gets something proper for this. + cert = _Certificate(openssl_backend, peer_cert._x509) + + # We want to find the SAN extension. Ask Cryptography to locate it (it's + # faster than looping in Python) + try: + ext = cert.extensions.get_extension_for_class( + x509.SubjectAlternativeName + ).value + except x509.ExtensionNotFound: + # No such extension, return the empty list. + return [] + except (x509.DuplicateExtension, x509.UnsupportedExtension, + x509.UnsupportedGeneralNameType, UnicodeError) as e: + # A problem has been found with the quality of the certificate. Assume + # no SAN field is present. + log.warning( + "A problem was encountered with the certificate that prevented " + "urllib3 from finding the SubjectAlternativeName field. This can " + "affect certificate validation. The error was %s", + e, + ) + return [] + + # We want to return dNSName and iPAddress fields. We need to cast the IPs + # back to strings because the match_hostname function wants them as + # strings. + # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8 + # decoded. This is pretty frustrating, but that's what the standard library + # does with certificates, and so we need to attempt to do the same. + names = [ + ('DNS', _dnsname_to_stdlib(name)) + for name in ext.get_values_for_type(x509.DNSName) + ] + names.extend( + ('IP Address', str(name)) + for name in ext.get_values_for_type(x509.IPAddress) + ) + + return names + + +class WrappedSocket(object): + '''API-compatibility wrapper for Python OpenSSL's Connection-class. + + Note: _makefile_refs, _drop() and _reuse() are needed for the garbage + collector of pypy. + ''' + + def __init__(self, connection, socket, suppress_ragged_eofs=True): + self.connection = connection + self.socket = socket + self.suppress_ragged_eofs = suppress_ragged_eofs + self._makefile_refs = 0 + self._closed = False + + def fileno(self): + return self.socket.fileno() + + # Copy-pasted from Python 3.5 source code + def _decref_socketios(self): + if self._makefile_refs > 0: + self._makefile_refs -= 1 + if self._closed: + self.close() + + def recv(self, *args, **kwargs): + try: + data = self.connection.recv(*args, **kwargs) + except OpenSSL.SSL.SysCallError as e: + if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'): + return b'' + else: + raise SocketError(str(e)) + except OpenSSL.SSL.ZeroReturnError as e: + if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: + return b'' + else: + raise + except OpenSSL.SSL.WantReadError: + rd = util.wait_for_read(self.socket, self.socket.gettimeout()) + if not rd: + raise timeout('The read operation timed out') + else: + return self.recv(*args, **kwargs) + else: + return data + + def recv_into(self, *args, **kwargs): + try: + return self.connection.recv_into(*args, **kwargs) + except OpenSSL.SSL.SysCallError as e: + if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'): + return 0 + else: + raise SocketError(str(e)) + except OpenSSL.SSL.ZeroReturnError as e: + if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: + return 0 + else: + raise + except OpenSSL.SSL.WantReadError: + rd = util.wait_for_read(self.socket, self.socket.gettimeout()) + if not rd: + raise timeout('The read operation timed out') + else: + return self.recv_into(*args, **kwargs) + + def settimeout(self, timeout): + return self.socket.settimeout(timeout) + + def _send_until_done(self, data): + while True: + try: + return self.connection.send(data) + except OpenSSL.SSL.WantWriteError: + wr = util.wait_for_write(self.socket, self.socket.gettimeout()) + if not wr: + raise timeout() + continue + except OpenSSL.SSL.SysCallError as e: + raise SocketError(str(e)) + + def sendall(self, data): + total_sent = 0 + while total_sent < len(data): + sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE]) + total_sent += sent + + def shutdown(self): + # FIXME rethrow compatible exceptions should we ever use this + self.connection.shutdown() + + def close(self): + if self._makefile_refs < 1: + try: + self._closed = True + return self.connection.close() + except OpenSSL.SSL.Error: + return + else: + self._makefile_refs -= 1 + + def getpeercert(self, binary_form=False): + x509 = self.connection.get_peer_certificate() + + if not x509: + return x509 + + if binary_form: + return OpenSSL.crypto.dump_certificate( + OpenSSL.crypto.FILETYPE_ASN1, + x509) + + return { + 'subject': ( + (('commonName', x509.get_subject().CN),), + ), + 'subjectAltName': get_subj_alt_name(x509) + } + + def _reuse(self): + self._makefile_refs += 1 + + def _drop(self): + if self._makefile_refs < 1: + self.close() + else: + self._makefile_refs -= 1 + + +if _fileobject: # Platform-specific: Python 2 + def makefile(self, mode, bufsize=-1): + self._makefile_refs += 1 + return _fileobject(self, mode, bufsize, close=True) +else: # Platform-specific: Python 3 + makefile = backport_makefile + +WrappedSocket.makefile = makefile + + +class PyOpenSSLContext(object): + """ + I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible + for translating the interface of the standard library ``SSLContext`` object + to calls into PyOpenSSL. + """ + def __init__(self, protocol): + self.protocol = _openssl_versions[protocol] + self._ctx = OpenSSL.SSL.Context(self.protocol) + self._options = 0 + self.check_hostname = False + + @property + def options(self): + return self._options + + @options.setter + def options(self, value): + self._options = value + self._ctx.set_options(value) + + @property + def verify_mode(self): + return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()] + + @verify_mode.setter + def verify_mode(self, value): + self._ctx.set_verify( + _stdlib_to_openssl_verify[value], + _verify_callback + ) + + def set_default_verify_paths(self): + self._ctx.set_default_verify_paths() + + def set_ciphers(self, ciphers): + if isinstance(ciphers, six.text_type): + ciphers = ciphers.encode('utf-8') + self._ctx.set_cipher_list(ciphers) + + def load_verify_locations(self, cafile=None, capath=None, cadata=None): + if cafile is not None: + cafile = cafile.encode('utf-8') + if capath is not None: + capath = capath.encode('utf-8') + self._ctx.load_verify_locations(cafile, capath) + if cadata is not None: + self._ctx.load_verify_locations(BytesIO(cadata)) + + def load_cert_chain(self, certfile, keyfile=None, password=None): + self._ctx.use_certificate_file(certfile) + if password is not None: + self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password) + self._ctx.use_privatekey_file(keyfile or certfile) + + def wrap_socket(self, sock, server_side=False, + do_handshake_on_connect=True, suppress_ragged_eofs=True, + server_hostname=None): + cnx = OpenSSL.SSL.Connection(self._ctx, sock) + + if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3 + server_hostname = server_hostname.encode('utf-8') + + if server_hostname is not None: + cnx.set_tlsext_host_name(server_hostname) + + cnx.set_connect_state() + + while True: + try: + cnx.do_handshake() + except OpenSSL.SSL.WantReadError: + rd = util.wait_for_read(sock, sock.gettimeout()) + if not rd: + raise timeout('select timed out') + continue + except OpenSSL.SSL.Error as e: + raise ssl.SSLError('bad handshake: %r' % e) + break + + return WrappedSocket(cnx, sock) + + +def _verify_callback(cnx, x509, err_no, err_depth, return_code): + return err_no == 0 diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py new file mode 100644 index 000000000..fcc30118c --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py @@ -0,0 +1,808 @@ +# SPDX-License-Identifier: MIT +""" +SecureTranport support for urllib3 via ctypes. + +This makes platform-native TLS available to urllib3 users on macOS without the +use of a compiler. This is an important feature because the Python Package +Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL +that ships with macOS is not capable of doing TLSv1.2. The only way to resolve +this is to give macOS users an alternative solution to the problem, and that +solution is to use SecureTransport. + +We use ctypes here because this solution must not require a compiler. That's +because pip is not allowed to require a compiler either. + +This is not intended to be a seriously long-term solution to this problem. +The hope is that PEP 543 will eventually solve this issue for us, at which +point we can retire this contrib module. But in the short term, we need to +solve the impending tire fire that is Python on Mac without this kind of +contrib module. So...here we are. + +To use this module, simply import and inject it:: + + import urllib3.contrib.securetransport + urllib3.contrib.securetransport.inject_into_urllib3() + +Happy TLSing! +""" +from __future__ import absolute_import + +import contextlib +import ctypes +import errno +import os.path +import shutil +import socket +import ssl +import threading +import weakref + +from .. import util +from ._securetransport.bindings import ( + Security, SecurityConst, CoreFoundation +) +from ._securetransport.low_level import ( + _assert_no_error, _cert_array_from_pem, _temporary_keychain, + _load_client_cert_chain +) + +try: # Platform-specific: Python 2 + from socket import _fileobject +except ImportError: # Platform-specific: Python 3 + _fileobject = None + from ..packages.backports.makefile import backport_makefile + +try: + memoryview(b'') +except NameError: + raise ImportError("SecureTransport only works on Pythons with memoryview") + +__all__ = ['inject_into_urllib3', 'extract_from_urllib3'] + +# SNI always works +HAS_SNI = True + +orig_util_HAS_SNI = util.HAS_SNI +orig_util_SSLContext = util.ssl_.SSLContext + +# This dictionary is used by the read callback to obtain a handle to the +# calling wrapped socket. This is a pretty silly approach, but for now it'll +# do. I feel like I should be able to smuggle a handle to the wrapped socket +# directly in the SSLConnectionRef, but for now this approach will work I +# guess. +# +# We need to lock around this structure for inserts, but we don't do it for +# reads/writes in the callbacks. The reasoning here goes as follows: +# +# 1. It is not possible to call into the callbacks before the dictionary is +# populated, so once in the callback the id must be in the dictionary. +# 2. The callbacks don't mutate the dictionary, they only read from it, and +# so cannot conflict with any of the insertions. +# +# This is good: if we had to lock in the callbacks we'd drastically slow down +# the performance of this code. +_connection_refs = weakref.WeakValueDictionary() +_connection_ref_lock = threading.Lock() + +# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over +# for no better reason than we need *a* limit, and this one is right there. +SSL_WRITE_BLOCKSIZE = 16384 + +# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to +# individual cipher suites. We need to do this becuase this is how +# SecureTransport wants them. +CIPHER_SUITES = [ + SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + SecurityConst.TLS_DHE_DSS_WITH_AES_256_GCM_SHA384, + SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, + SecurityConst.TLS_DHE_DSS_WITH_AES_128_GCM_SHA256, + SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, + SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, + SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, + SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, + SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, + SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA, + SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA, + SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, + SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, + SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA, + SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA, + SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384, + SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256, + SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256, + SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256, + SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA, + SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA, +] + +# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of +# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version. +_protocol_to_min_max = { + ssl.PROTOCOL_SSLv23: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12), +} + +if hasattr(ssl, "PROTOCOL_SSLv2"): + _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = ( + SecurityConst.kSSLProtocol2, SecurityConst.kSSLProtocol2 + ) +if hasattr(ssl, "PROTOCOL_SSLv3"): + _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = ( + SecurityConst.kSSLProtocol3, SecurityConst.kSSLProtocol3 + ) +if hasattr(ssl, "PROTOCOL_TLSv1"): + _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = ( + SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol1 + ) +if hasattr(ssl, "PROTOCOL_TLSv1_1"): + _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = ( + SecurityConst.kTLSProtocol11, SecurityConst.kTLSProtocol11 + ) +if hasattr(ssl, "PROTOCOL_TLSv1_2"): + _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = ( + SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12 + ) +if hasattr(ssl, "PROTOCOL_TLS"): + _protocol_to_min_max[ssl.PROTOCOL_TLS] = _protocol_to_min_max[ssl.PROTOCOL_SSLv23] + + +def inject_into_urllib3(): + """ + Monkey-patch urllib3 with SecureTransport-backed SSL-support. + """ + util.ssl_.SSLContext = SecureTransportContext + util.HAS_SNI = HAS_SNI + util.ssl_.HAS_SNI = HAS_SNI + util.IS_SECURETRANSPORT = True + util.ssl_.IS_SECURETRANSPORT = True + + +def extract_from_urllib3(): + """ + Undo monkey-patching by :func:`inject_into_urllib3`. + """ + util.ssl_.SSLContext = orig_util_SSLContext + util.HAS_SNI = orig_util_HAS_SNI + util.ssl_.HAS_SNI = orig_util_HAS_SNI + util.IS_SECURETRANSPORT = False + util.ssl_.IS_SECURETRANSPORT = False + + +def _read_callback(connection_id, data_buffer, data_length_pointer): + """ + SecureTransport read callback. This is called by ST to request that data + be returned from the socket. + """ + wrapped_socket = None + try: + wrapped_socket = _connection_refs.get(connection_id) + if wrapped_socket is None: + return SecurityConst.errSSLInternal + base_socket = wrapped_socket.socket + + requested_length = data_length_pointer[0] + + timeout = wrapped_socket.gettimeout() + error = None + read_count = 0 + buffer = (ctypes.c_char * requested_length).from_address(data_buffer) + buffer_view = memoryview(buffer) + + try: + while read_count < requested_length: + if timeout is None or timeout >= 0: + readables = util.wait_for_read([base_socket], timeout) + if not readables: + raise socket.error(errno.EAGAIN, 'timed out') + + # We need to tell ctypes that we have a buffer that can be + # written to. Upsettingly, we do that like this: + chunk_size = base_socket.recv_into( + buffer_view[read_count:requested_length] + ) + read_count += chunk_size + if not chunk_size: + if not read_count: + return SecurityConst.errSSLClosedGraceful + break + except (socket.error) as e: + error = e.errno + + if error is not None and error != errno.EAGAIN: + if error == errno.ECONNRESET: + return SecurityConst.errSSLClosedAbort + raise + + data_length_pointer[0] = read_count + + if read_count != requested_length: + return SecurityConst.errSSLWouldBlock + + return 0 + except Exception as e: + if wrapped_socket is not None: + wrapped_socket._exception = e + return SecurityConst.errSSLInternal + + +def _write_callback(connection_id, data_buffer, data_length_pointer): + """ + SecureTransport write callback. This is called by ST to request that data + actually be sent on the network. + """ + wrapped_socket = None + try: + wrapped_socket = _connection_refs.get(connection_id) + if wrapped_socket is None: + return SecurityConst.errSSLInternal + base_socket = wrapped_socket.socket + + bytes_to_write = data_length_pointer[0] + data = ctypes.string_at(data_buffer, bytes_to_write) + + timeout = wrapped_socket.gettimeout() + error = None + sent = 0 + + try: + while sent < bytes_to_write: + if timeout is None or timeout >= 0: + writables = util.wait_for_write([base_socket], timeout) + if not writables: + raise socket.error(errno.EAGAIN, 'timed out') + chunk_sent = base_socket.send(data) + sent += chunk_sent + + # This has some needless copying here, but I'm not sure there's + # much value in optimising this data path. + data = data[chunk_sent:] + except (socket.error) as e: + error = e.errno + + if error is not None and error != errno.EAGAIN: + if error == errno.ECONNRESET: + return SecurityConst.errSSLClosedAbort + raise + + data_length_pointer[0] = sent + if sent != bytes_to_write: + return SecurityConst.errSSLWouldBlock + + return 0 + except Exception as e: + if wrapped_socket is not None: + wrapped_socket._exception = e + return SecurityConst.errSSLInternal + + +# We need to keep these two objects references alive: if they get GC'd while +# in use then SecureTransport could attempt to call a function that is in freed +# memory. That would be...uh...bad. Yeah, that's the word. Bad. +_read_callback_pointer = Security.SSLReadFunc(_read_callback) +_write_callback_pointer = Security.SSLWriteFunc(_write_callback) + + +class WrappedSocket(object): + """ + API-compatibility wrapper for Python's OpenSSL wrapped socket object. + + Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage + collector of PyPy. + """ + def __init__(self, socket): + self.socket = socket + self.context = None + self._makefile_refs = 0 + self._closed = False + self._exception = None + self._keychain = None + self._keychain_dir = None + self._client_cert_chain = None + + # We save off the previously-configured timeout and then set it to + # zero. This is done because we use select and friends to handle the + # timeouts, but if we leave the timeout set on the lower socket then + # Python will "kindly" call select on that socket again for us. Avoid + # that by forcing the timeout to zero. + self._timeout = self.socket.gettimeout() + self.socket.settimeout(0) + + @contextlib.contextmanager + def _raise_on_error(self): + """ + A context manager that can be used to wrap calls that do I/O from + SecureTransport. If any of the I/O callbacks hit an exception, this + context manager will correctly propagate the exception after the fact. + This avoids silently swallowing those exceptions. + + It also correctly forces the socket closed. + """ + self._exception = None + + # We explicitly don't catch around this yield because in the unlikely + # event that an exception was hit in the block we don't want to swallow + # it. + yield + if self._exception is not None: + exception, self._exception = self._exception, None + self.close() + raise exception + + def _set_ciphers(self): + """ + Sets up the allowed ciphers. By default this matches the set in + util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done + custom and doesn't allow changing at this time, mostly because parsing + OpenSSL cipher strings is going to be a freaking nightmare. + """ + ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES) + result = Security.SSLSetEnabledCiphers( + self.context, ciphers, len(CIPHER_SUITES) + ) + _assert_no_error(result) + + def _custom_validate(self, verify, trust_bundle): + """ + Called when we have set custom validation. We do this in two cases: + first, when cert validation is entirely disabled; and second, when + using a custom trust DB. + """ + # If we disabled cert validation, just say: cool. + if not verify: + return + + # We want data in memory, so load it up. + if os.path.isfile(trust_bundle): + with open(trust_bundle, 'rb') as f: + trust_bundle = f.read() + + cert_array = None + trust = Security.SecTrustRef() + + try: + # Get a CFArray that contains the certs we want. + cert_array = _cert_array_from_pem(trust_bundle) + + # Ok, now the hard part. We want to get the SecTrustRef that ST has + # created for this connection, shove our CAs into it, tell ST to + # ignore everything else it knows, and then ask if it can build a + # chain. This is a buuuunch of code. + result = Security.SSLCopyPeerTrust( + self.context, ctypes.byref(trust) + ) + _assert_no_error(result) + if not trust: + raise ssl.SSLError("Failed to copy trust reference") + + result = Security.SecTrustSetAnchorCertificates(trust, cert_array) + _assert_no_error(result) + + result = Security.SecTrustSetAnchorCertificatesOnly(trust, True) + _assert_no_error(result) + + trust_result = Security.SecTrustResultType() + result = Security.SecTrustEvaluate( + trust, ctypes.byref(trust_result) + ) + _assert_no_error(result) + finally: + if trust: + CoreFoundation.CFRelease(trust) + + if cert_array is None: + CoreFoundation.CFRelease(cert_array) + + # Ok, now we can look at what the result was. + successes = ( + SecurityConst.kSecTrustResultUnspecified, + SecurityConst.kSecTrustResultProceed + ) + if trust_result.value not in successes: + raise ssl.SSLError( + "certificate verify failed, error code: %d" % + trust_result.value + ) + + def handshake(self, + server_hostname, + verify, + trust_bundle, + min_version, + max_version, + client_cert, + client_key, + client_key_passphrase): + """ + Actually performs the TLS handshake. This is run automatically by + wrapped socket, and shouldn't be needed in user code. + """ + # First, we do the initial bits of connection setup. We need to create + # a context, set its I/O funcs, and set the connection reference. + self.context = Security.SSLCreateContext( + None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType + ) + result = Security.SSLSetIOFuncs( + self.context, _read_callback_pointer, _write_callback_pointer + ) + _assert_no_error(result) + + # Here we need to compute the handle to use. We do this by taking the + # id of self modulo 2**31 - 1. If this is already in the dictionary, we + # just keep incrementing by one until we find a free space. + with _connection_ref_lock: + handle = id(self) % 2147483647 + while handle in _connection_refs: + handle = (handle + 1) % 2147483647 + _connection_refs[handle] = self + + result = Security.SSLSetConnection(self.context, handle) + _assert_no_error(result) + + # If we have a server hostname, we should set that too. + if server_hostname: + if not isinstance(server_hostname, bytes): + server_hostname = server_hostname.encode('utf-8') + + result = Security.SSLSetPeerDomainName( + self.context, server_hostname, len(server_hostname) + ) + _assert_no_error(result) + + # Setup the ciphers. + self._set_ciphers() + + # Set the minimum and maximum TLS versions. + result = Security.SSLSetProtocolVersionMin(self.context, min_version) + _assert_no_error(result) + result = Security.SSLSetProtocolVersionMax(self.context, max_version) + _assert_no_error(result) + + # If there's a trust DB, we need to use it. We do that by telling + # SecureTransport to break on server auth. We also do that if we don't + # want to validate the certs at all: we just won't actually do any + # authing in that case. + if not verify or trust_bundle is not None: + result = Security.SSLSetSessionOption( + self.context, + SecurityConst.kSSLSessionOptionBreakOnServerAuth, + True + ) + _assert_no_error(result) + + # If there's a client cert, we need to use it. + if client_cert: + self._keychain, self._keychain_dir = _temporary_keychain() + self._client_cert_chain = _load_client_cert_chain( + self._keychain, client_cert, client_key + ) + result = Security.SSLSetCertificate( + self.context, self._client_cert_chain + ) + _assert_no_error(result) + + while True: + with self._raise_on_error(): + result = Security.SSLHandshake(self.context) + + if result == SecurityConst.errSSLWouldBlock: + raise socket.timeout("handshake timed out") + elif result == SecurityConst.errSSLServerAuthCompleted: + self._custom_validate(verify, trust_bundle) + continue + else: + _assert_no_error(result) + break + + def fileno(self): + return self.socket.fileno() + + # Copy-pasted from Python 3.5 source code + def _decref_socketios(self): + if self._makefile_refs > 0: + self._makefile_refs -= 1 + if self._closed: + self.close() + + def recv(self, bufsiz): + buffer = ctypes.create_string_buffer(bufsiz) + bytes_read = self.recv_into(buffer, bufsiz) + data = buffer[:bytes_read] + return data + + def recv_into(self, buffer, nbytes=None): + # Read short on EOF. + if self._closed: + return 0 + + if nbytes is None: + nbytes = len(buffer) + + buffer = (ctypes.c_char * nbytes).from_buffer(buffer) + processed_bytes = ctypes.c_size_t(0) + + with self._raise_on_error(): + result = Security.SSLRead( + self.context, buffer, nbytes, ctypes.byref(processed_bytes) + ) + + # There are some result codes that we want to treat as "not always + # errors". Specifically, those are errSSLWouldBlock, + # errSSLClosedGraceful, and errSSLClosedNoNotify. + if (result == SecurityConst.errSSLWouldBlock): + # If we didn't process any bytes, then this was just a time out. + # However, we can get errSSLWouldBlock in situations when we *did* + # read some data, and in those cases we should just read "short" + # and return. + if processed_bytes.value == 0: + # Timed out, no data read. + raise socket.timeout("recv timed out") + elif result in (SecurityConst.errSSLClosedGraceful, SecurityConst.errSSLClosedNoNotify): + # The remote peer has closed this connection. We should do so as + # well. Note that we don't actually return here because in + # principle this could actually be fired along with return data. + # It's unlikely though. + self.close() + else: + _assert_no_error(result) + + # Ok, we read and probably succeeded. We should return whatever data + # was actually read. + return processed_bytes.value + + def settimeout(self, timeout): + self._timeout = timeout + + def gettimeout(self): + return self._timeout + + def send(self, data): + processed_bytes = ctypes.c_size_t(0) + + with self._raise_on_error(): + result = Security.SSLWrite( + self.context, data, len(data), ctypes.byref(processed_bytes) + ) + + if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0: + # Timed out + raise socket.timeout("send timed out") + else: + _assert_no_error(result) + + # We sent, and probably succeeded. Tell them how much we sent. + return processed_bytes.value + + def sendall(self, data): + total_sent = 0 + while total_sent < len(data): + sent = self.send(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE]) + total_sent += sent + + def shutdown(self): + with self._raise_on_error(): + Security.SSLClose(self.context) + + def close(self): + # TODO: should I do clean shutdown here? Do I have to? + if self._makefile_refs < 1: + self._closed = True + if self.context: + CoreFoundation.CFRelease(self.context) + self.context = None + if self._client_cert_chain: + CoreFoundation.CFRelease(self._client_cert_chain) + self._client_cert_chain = None + if self._keychain: + Security.SecKeychainDelete(self._keychain) + CoreFoundation.CFRelease(self._keychain) + shutil.rmtree(self._keychain_dir) + self._keychain = self._keychain_dir = None + return self.socket.close() + else: + self._makefile_refs -= 1 + + def getpeercert(self, binary_form=False): + # Urgh, annoying. + # + # Here's how we do this: + # + # 1. Call SSLCopyPeerTrust to get hold of the trust object for this + # connection. + # 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf. + # 3. To get the CN, call SecCertificateCopyCommonName and process that + # string so that it's of the appropriate type. + # 4. To get the SAN, we need to do something a bit more complex: + # a. Call SecCertificateCopyValues to get the data, requesting + # kSecOIDSubjectAltName. + # b. Mess about with this dictionary to try to get the SANs out. + # + # This is gross. Really gross. It's going to be a few hundred LoC extra + # just to repeat something that SecureTransport can *already do*. So my + # operating assumption at this time is that what we want to do is + # instead to just flag to urllib3 that it shouldn't do its own hostname + # validation when using SecureTransport. + if not binary_form: + raise ValueError( + "SecureTransport only supports dumping binary certs" + ) + trust = Security.SecTrustRef() + certdata = None + der_bytes = None + + try: + # Grab the trust store. + result = Security.SSLCopyPeerTrust( + self.context, ctypes.byref(trust) + ) + _assert_no_error(result) + if not trust: + # Probably we haven't done the handshake yet. No biggie. + return None + + cert_count = Security.SecTrustGetCertificateCount(trust) + if not cert_count: + # Also a case that might happen if we haven't handshaked. + # Handshook? Handshaken? + return None + + leaf = Security.SecTrustGetCertificateAtIndex(trust, 0) + assert leaf + + # Ok, now we want the DER bytes. + certdata = Security.SecCertificateCopyData(leaf) + assert certdata + + data_length = CoreFoundation.CFDataGetLength(certdata) + data_buffer = CoreFoundation.CFDataGetBytePtr(certdata) + der_bytes = ctypes.string_at(data_buffer, data_length) + finally: + if certdata: + CoreFoundation.CFRelease(certdata) + if trust: + CoreFoundation.CFRelease(trust) + + return der_bytes + + def _reuse(self): + self._makefile_refs += 1 + + def _drop(self): + if self._makefile_refs < 1: + self.close() + else: + self._makefile_refs -= 1 + + +if _fileobject: # Platform-specific: Python 2 + def makefile(self, mode, bufsize=-1): + self._makefile_refs += 1 + return _fileobject(self, mode, bufsize, close=True) +else: # Platform-specific: Python 3 + def makefile(self, mode="r", buffering=None, *args, **kwargs): + # We disable buffering with SecureTransport because it conflicts with + # the buffering that ST does internally (see issue #1153 for more). + buffering = 0 + return backport_makefile(self, mode, buffering, *args, **kwargs) + +WrappedSocket.makefile = makefile + + +class SecureTransportContext(object): + """ + I am a wrapper class for the SecureTransport library, to translate the + interface of the standard library ``SSLContext`` object to calls into + SecureTransport. + """ + def __init__(self, protocol): + self._min_version, self._max_version = _protocol_to_min_max[protocol] + self._options = 0 + self._verify = False + self._trust_bundle = None + self._client_cert = None + self._client_key = None + self._client_key_passphrase = None + + @property + def check_hostname(self): + """ + SecureTransport cannot have its hostname checking disabled. For more, + see the comment on getpeercert() in this file. + """ + return True + + @check_hostname.setter + def check_hostname(self, value): + """ + SecureTransport cannot have its hostname checking disabled. For more, + see the comment on getpeercert() in this file. + """ + pass + + @property + def options(self): + # TODO: Well, crap. + # + # So this is the bit of the code that is the most likely to cause us + # trouble. Essentially we need to enumerate all of the SSL options that + # users might want to use and try to see if we can sensibly translate + # them, or whether we should just ignore them. + return self._options + + @options.setter + def options(self, value): + # TODO: Update in line with above. + self._options = value + + @property + def verify_mode(self): + return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE + + @verify_mode.setter + def verify_mode(self, value): + self._verify = True if value == ssl.CERT_REQUIRED else False + + def set_default_verify_paths(self): + # So, this has to do something a bit weird. Specifically, what it does + # is nothing. + # + # This means that, if we had previously had load_verify_locations + # called, this does not undo that. We need to do that because it turns + # out that the rest of the urllib3 code will attempt to load the + # default verify paths if it hasn't been told about any paths, even if + # the context itself was sometime earlier. We resolve that by just + # ignoring it. + pass + + def load_default_certs(self): + return self.set_default_verify_paths() + + def set_ciphers(self, ciphers): + # For now, we just require the default cipher string. + if ciphers != util.ssl_.DEFAULT_CIPHERS: + raise ValueError( + "SecureTransport doesn't support custom cipher strings" + ) + + def load_verify_locations(self, cafile=None, capath=None, cadata=None): + # OK, we only really support cadata and cafile. + if capath is not None: + raise ValueError( + "SecureTransport does not support cert directories" + ) + + self._trust_bundle = cafile or cadata + + def load_cert_chain(self, certfile, keyfile=None, password=None): + self._client_cert = certfile + self._client_key = keyfile + self._client_cert_passphrase = password + + def wrap_socket(self, sock, server_side=False, + do_handshake_on_connect=True, suppress_ragged_eofs=True, + server_hostname=None): + # So, what do we do here? Firstly, we assert some properties. This is a + # stripped down shim, so there is some functionality we don't support. + # See PEP 543 for the real deal. + assert not server_side + assert do_handshake_on_connect + assert suppress_ragged_eofs + + # Ok, we're good to go. Now we want to create the wrapped socket object + # and store it in the appropriate place. + wrapped_socket = WrappedSocket(sock) + + # Now we can handshake + wrapped_socket.handshake( + server_hostname, self._verify, self._trust_bundle, + self._min_version, self._max_version, self._client_cert, + self._client_key, self._client_key_passphrase + ) + return wrapped_socket diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py new file mode 100644 index 000000000..1cb79285b --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py @@ -0,0 +1,189 @@ +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: MIT +""" +This module contains provisional support for SOCKS proxies from within +urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and +SOCKS5. To enable its functionality, either install PySocks or install this +module with the ``socks`` extra. + +The SOCKS implementation supports the full range of urllib3 features. It also +supports the following SOCKS features: + +- SOCKS4 +- SOCKS4a +- SOCKS5 +- Usernames and passwords for the SOCKS proxy + +Known Limitations: + +- Currently PySocks does not support contacting remote websites via literal + IPv6 addresses. Any such connection attempt will fail. You must use a domain + name. +- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any + such connection attempt will fail. +""" +from __future__ import absolute_import + +try: + import socks +except ImportError: + import warnings + from ..exceptions import DependencyWarning + + warnings.warn(( + 'SOCKS support in urllib3 requires the installation of optional ' + 'dependencies: specifically, PySocks. For more information, see ' + 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies' + ), + DependencyWarning + ) + raise + +from socket import error as SocketError, timeout as SocketTimeout + +from ..connection import ( + HTTPConnection, HTTPSConnection +) +from ..connectionpool import ( + HTTPConnectionPool, HTTPSConnectionPool +) +from ..exceptions import ConnectTimeoutError, NewConnectionError +from ..poolmanager import PoolManager +from ..util.url import parse_url + +try: + import ssl +except ImportError: + ssl = None + + +class SOCKSConnection(HTTPConnection): + """ + A plain-text HTTP connection that connects via a SOCKS proxy. + """ + def __init__(self, *args, **kwargs): + self._socks_options = kwargs.pop('_socks_options') + super(SOCKSConnection, self).__init__(*args, **kwargs) + + def _new_conn(self): + """ + Establish a new connection via the SOCKS proxy. + """ + extra_kw = {} + if self.source_address: + extra_kw['source_address'] = self.source_address + + if self.socket_options: + extra_kw['socket_options'] = self.socket_options + + try: + conn = socks.create_connection( + (self.host, self.port), + proxy_type=self._socks_options['socks_version'], + proxy_addr=self._socks_options['proxy_host'], + proxy_port=self._socks_options['proxy_port'], + proxy_username=self._socks_options['username'], + proxy_password=self._socks_options['password'], + proxy_rdns=self._socks_options['rdns'], + timeout=self.timeout, + **extra_kw + ) + + except SocketTimeout as e: + raise ConnectTimeoutError( + self, "Connection to %s timed out. (connect timeout=%s)" % + (self.host, self.timeout)) + + except socks.ProxyError as e: + # This is fragile as hell, but it seems to be the only way to raise + # useful errors here. + if e.socket_err: + error = e.socket_err + if isinstance(error, SocketTimeout): + raise ConnectTimeoutError( + self, + "Connection to %s timed out. (connect timeout=%s)" % + (self.host, self.timeout) + ) + else: + raise NewConnectionError( + self, + "Failed to establish a new connection: %s" % error + ) + else: + raise NewConnectionError( + self, + "Failed to establish a new connection: %s" % e + ) + + except SocketError as e: # Defensive: PySocks should catch all these. + raise NewConnectionError( + self, "Failed to establish a new connection: %s" % e) + + return conn + + +# We don't need to duplicate the Verified/Unverified distinction from +# urllib3/connection.py here because the HTTPSConnection will already have been +# correctly set to either the Verified or Unverified form by that module. This +# means the SOCKSHTTPSConnection will automatically be the correct type. +class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection): + pass + + +class SOCKSHTTPConnectionPool(HTTPConnectionPool): + ConnectionCls = SOCKSConnection + + +class SOCKSHTTPSConnectionPool(HTTPSConnectionPool): + ConnectionCls = SOCKSHTTPSConnection + + +class SOCKSProxyManager(PoolManager): + """ + A version of the urllib3 ProxyManager that routes connections via the + defined SOCKS proxy. + """ + pool_classes_by_scheme = { + 'http': SOCKSHTTPConnectionPool, + 'https': SOCKSHTTPSConnectionPool, + } + + def __init__(self, proxy_url, username=None, password=None, + num_pools=10, headers=None, **connection_pool_kw): + parsed = parse_url(proxy_url) + + if parsed.scheme == 'socks5': + socks_version = socks.PROXY_TYPE_SOCKS5 + rdns = False + elif parsed.scheme == 'socks5h': + socks_version = socks.PROXY_TYPE_SOCKS5 + rdns = True + elif parsed.scheme == 'socks4': + socks_version = socks.PROXY_TYPE_SOCKS4 + rdns = False + elif parsed.scheme == 'socks4a': + socks_version = socks.PROXY_TYPE_SOCKS4 + rdns = True + else: + raise ValueError( + "Unable to determine SOCKS version from %s" % proxy_url + ) + + self.proxy_url = proxy_url + + socks_options = { + 'socks_version': socks_version, + 'proxy_host': parsed.host, + 'proxy_port': parsed.port, + 'username': username, + 'password': password, + 'rdns': rdns + } + connection_pool_kw['_socks_options'] = socks_options + + super(SOCKSProxyManager, self).__init__( + num_pools, headers, **connection_pool_kw + ) + + self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme diff --git a/collectors/python.d.plugin/python_modules/urllib3/exceptions.py b/collectors/python.d.plugin/python_modules/urllib3/exceptions.py new file mode 100644 index 000000000..a71cabe06 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/exceptions.py @@ -0,0 +1,247 @@ +# SPDX-License-Identifier: MIT +from __future__ import absolute_import +from .packages.six.moves.http_client import ( + IncompleteRead as httplib_IncompleteRead +) +# Base Exceptions + + +class HTTPError(Exception): + "Base exception used by this module." + pass + + +class HTTPWarning(Warning): + "Base warning used by this module." + pass + + +class PoolError(HTTPError): + "Base exception for errors caused within a pool." + def __init__(self, pool, message): + self.pool = pool + HTTPError.__init__(self, "%s: %s" % (pool, message)) + + def __reduce__(self): + # For pickling purposes. + return self.__class__, (None, None) + + +class RequestError(PoolError): + "Base exception for PoolErrors that have associated URLs." + def __init__(self, pool, url, message): + self.url = url + PoolError.__init__(self, pool, message) + + def __reduce__(self): + # For pickling purposes. + return self.__class__, (None, self.url, None) + + +class SSLError(HTTPError): + "Raised when SSL certificate fails in an HTTPS connection." + pass + + +class ProxyError(HTTPError): + "Raised when the connection to a proxy fails." + pass + + +class DecodeError(HTTPError): + "Raised when automatic decoding based on Content-Type fails." + pass + + +class ProtocolError(HTTPError): + "Raised when something unexpected happens mid-request/response." + pass + + +#: Renamed to ProtocolError but aliased for backwards compatibility. +ConnectionError = ProtocolError + + +# Leaf Exceptions + +class MaxRetryError(RequestError): + """Raised when the maximum number of retries is exceeded. + + :param pool: The connection pool + :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool` + :param string url: The requested Url + :param exceptions.Exception reason: The underlying error + + """ + + def __init__(self, pool, url, reason=None): + self.reason = reason + + message = "Max retries exceeded with url: %s (Caused by %r)" % ( + url, reason) + + RequestError.__init__(self, pool, url, message) + + +class HostChangedError(RequestError): + "Raised when an existing pool gets a request for a foreign host." + + def __init__(self, pool, url, retries=3): + message = "Tried to open a foreign host with url: %s" % url + RequestError.__init__(self, pool, url, message) + self.retries = retries + + +class TimeoutStateError(HTTPError): + """ Raised when passing an invalid state to a timeout """ + pass + + +class TimeoutError(HTTPError): + """ Raised when a socket timeout error occurs. + + Catching this error will catch both :exc:`ReadTimeoutErrors + <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`. + """ + pass + + +class ReadTimeoutError(TimeoutError, RequestError): + "Raised when a socket timeout occurs while receiving data from a server" + pass + + +# This timeout error does not have a URL attached and needs to inherit from the +# base HTTPError +class ConnectTimeoutError(TimeoutError): + "Raised when a socket timeout occurs while connecting to a server" + pass + + +class NewConnectionError(ConnectTimeoutError, PoolError): + "Raised when we fail to establish a new connection. Usually ECONNREFUSED." + pass + + +class EmptyPoolError(PoolError): + "Raised when a pool runs out of connections and no more are allowed." + pass + + +class ClosedPoolError(PoolError): + "Raised when a request enters a pool after the pool has been closed." + pass + + +class LocationValueError(ValueError, HTTPError): + "Raised when there is something wrong with a given URL input." + pass + + +class LocationParseError(LocationValueError): + "Raised when get_host or similar fails to parse the URL input." + + def __init__(self, location): + message = "Failed to parse: %s" % location + HTTPError.__init__(self, message) + + self.location = location + + +class ResponseError(HTTPError): + "Used as a container for an error reason supplied in a MaxRetryError." + GENERIC_ERROR = 'too many error responses' + SPECIFIC_ERROR = 'too many {status_code} error responses' + + +class SecurityWarning(HTTPWarning): + "Warned when perfoming security reducing actions" + pass + + +class SubjectAltNameWarning(SecurityWarning): + "Warned when connecting to a host with a certificate missing a SAN." + pass + + +class InsecureRequestWarning(SecurityWarning): + "Warned when making an unverified HTTPS request." + pass + + +class SystemTimeWarning(SecurityWarning): + "Warned when system time is suspected to be wrong" + pass + + +class InsecurePlatformWarning(SecurityWarning): + "Warned when certain SSL configuration is not available on a platform." + pass + + +class SNIMissingWarning(HTTPWarning): + "Warned when making a HTTPS request without SNI available." + pass + + +class DependencyWarning(HTTPWarning): + """ + Warned when an attempt is made to import a module with missing optional + dependencies. + """ + pass + + +class ResponseNotChunked(ProtocolError, ValueError): + "Response needs to be chunked in order to read it as chunks." + pass + + +class BodyNotHttplibCompatible(HTTPError): + """ + Body should be httplib.HTTPResponse like (have an fp attribute which + returns raw chunks) for read_chunked(). + """ + pass + + +class IncompleteRead(HTTPError, httplib_IncompleteRead): + """ + Response length doesn't match expected Content-Length + + Subclass of http_client.IncompleteRead to allow int value + for `partial` to avoid creating large objects on streamed + reads. + """ + def __init__(self, partial, expected): + super(IncompleteRead, self).__init__(partial, expected) + + def __repr__(self): + return ('IncompleteRead(%i bytes read, ' + '%i more expected)' % (self.partial, self.expected)) + + +class InvalidHeader(HTTPError): + "The header provided was somehow invalid." + pass + + +class ProxySchemeUnknown(AssertionError, ValueError): + "ProxyManager does not support the supplied scheme" + # TODO(t-8ch): Stop inheriting from AssertionError in v2.0. + + def __init__(self, scheme): + message = "Not supported proxy scheme %s" % scheme + super(ProxySchemeUnknown, self).__init__(message) + + +class HeaderParsingError(HTTPError): + "Raised by assert_header_parsing, but we convert it to a log.warning statement." + def __init__(self, defects, unparsed_data): + message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data) + super(HeaderParsingError, self).__init__(message) + + +class UnrewindableBodyError(HTTPError): + "urllib3 encountered an error when trying to rewind a body" + pass diff --git a/collectors/python.d.plugin/python_modules/urllib3/fields.py b/collectors/python.d.plugin/python_modules/urllib3/fields.py new file mode 100644 index 000000000..de7577b74 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/fields.py @@ -0,0 +1,179 @@ +# SPDX-License-Identifier: MIT +from __future__ import absolute_import +import email.utils +import mimetypes + +from .packages import six + + +def guess_content_type(filename, default='application/octet-stream'): + """ + Guess the "Content-Type" of a file. + + :param filename: + The filename to guess the "Content-Type" of using :mod:`mimetypes`. + :param default: + If no "Content-Type" can be guessed, default to `default`. + """ + if filename: + return mimetypes.guess_type(filename)[0] or default + return default + + +def format_header_param(name, value): + """ + Helper function to format and quote a single header parameter. + + Particularly useful for header parameters which might contain + non-ASCII values, like file names. This follows RFC 2231, as + suggested by RFC 2388 Section 4.4. + + :param name: + The name of the parameter, a string expected to be ASCII only. + :param value: + The value of the parameter, provided as a unicode string. + """ + if not any(ch in value for ch in '"\\\r\n'): + result = '%s="%s"' % (name, value) + try: + result.encode('ascii') + except (UnicodeEncodeError, UnicodeDecodeError): + pass + else: + return result + if not six.PY3 and isinstance(value, six.text_type): # Python 2: + value = value.encode('utf-8') + value = email.utils.encode_rfc2231(value, 'utf-8') + value = '%s*=%s' % (name, value) + return value + + +class RequestField(object): + """ + A data container for request body parameters. + + :param name: + The name of this request field. + :param data: + The data/value body. + :param filename: + An optional filename of the request field. + :param headers: + An optional dict-like object of headers to initially use for the field. + """ + def __init__(self, name, data, filename=None, headers=None): + self._name = name + self._filename = filename + self.data = data + self.headers = {} + if headers: + self.headers = dict(headers) + + @classmethod + def from_tuples(cls, fieldname, value): + """ + A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. + + Supports constructing :class:`~urllib3.fields.RequestField` from + parameter of key/value strings AND key/filetuple. A filetuple is a + (filename, data, MIME type) tuple where the MIME type is optional. + For example:: + + 'foo': 'bar', + 'fakefile': ('foofile.txt', 'contents of foofile'), + 'realfile': ('barfile.txt', open('realfile').read()), + 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), + 'nonamefile': 'contents of nonamefile field', + + Field names and filenames must be unicode. + """ + if isinstance(value, tuple): + if len(value) == 3: + filename, data, content_type = value + else: + filename, data = value + content_type = guess_content_type(filename) + else: + filename = None + content_type = None + data = value + + request_param = cls(fieldname, data, filename=filename) + request_param.make_multipart(content_type=content_type) + + return request_param + + def _render_part(self, name, value): + """ + Overridable helper function to format a single header parameter. + + :param name: + The name of the parameter, a string expected to be ASCII only. + :param value: + The value of the parameter, provided as a unicode string. + """ + return format_header_param(name, value) + + def _render_parts(self, header_parts): + """ + Helper function to format and quote a single header. + + Useful for single headers that are composed of multiple items. E.g., + 'Content-Disposition' fields. + + :param header_parts: + A sequence of (k, v) typles or a :class:`dict` of (k, v) to format + as `k1="v1"; k2="v2"; ...`. + """ + parts = [] + iterable = header_parts + if isinstance(header_parts, dict): + iterable = header_parts.items() + + for name, value in iterable: + if value is not None: + parts.append(self._render_part(name, value)) + + return '; '.join(parts) + + def render_headers(self): + """ + Renders the headers for this request field. + """ + lines = [] + + sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location'] + for sort_key in sort_keys: + if self.headers.get(sort_key, False): + lines.append('%s: %s' % (sort_key, self.headers[sort_key])) + + for header_name, header_value in self.headers.items(): + if header_name not in sort_keys: + if header_value: + lines.append('%s: %s' % (header_name, header_value)) + + lines.append('\r\n') + return '\r\n'.join(lines) + + def make_multipart(self, content_disposition=None, content_type=None, + content_location=None): + """ + Makes this request field into a multipart request field. + + This method overrides "Content-Disposition", "Content-Type" and + "Content-Location" headers to the request parameter. + + :param content_type: + The 'Content-Type' of the request body. + :param content_location: + The 'Content-Location' of the request body. + + """ + self.headers['Content-Disposition'] = content_disposition or 'form-data' + self.headers['Content-Disposition'] += '; '.join([ + '', self._render_parts( + (('name', self._name), ('filename', self._filename)) + ) + ]) + self.headers['Content-Type'] = content_type + self.headers['Content-Location'] = content_location diff --git a/collectors/python.d.plugin/python_modules/urllib3/filepost.py b/collectors/python.d.plugin/python_modules/urllib3/filepost.py new file mode 100644 index 000000000..3febc9cfe --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/filepost.py @@ -0,0 +1,95 @@ +# SPDX-License-Identifier: MIT +from __future__ import absolute_import +import codecs + +from uuid import uuid4 +from io import BytesIO + +from .packages import six +from .packages.six import b +from .fields import RequestField + +writer = codecs.lookup('utf-8')[3] + + +def choose_boundary(): + """ + Our embarrassingly-simple replacement for mimetools.choose_boundary. + """ + return uuid4().hex + + +def iter_field_objects(fields): + """ + Iterate over fields. + + Supports list of (k, v) tuples and dicts, and lists of + :class:`~urllib3.fields.RequestField`. + + """ + if isinstance(fields, dict): + i = six.iteritems(fields) + else: + i = iter(fields) + + for field in i: + if isinstance(field, RequestField): + yield field + else: + yield RequestField.from_tuples(*field) + + +def iter_fields(fields): + """ + .. deprecated:: 1.6 + + Iterate over fields. + + The addition of :class:`~urllib3.fields.RequestField` makes this function + obsolete. Instead, use :func:`iter_field_objects`, which returns + :class:`~urllib3.fields.RequestField` objects. + + Supports list of (k, v) tuples and dicts. + """ + if isinstance(fields, dict): + return ((k, v) for k, v in six.iteritems(fields)) + + return ((k, v) for k, v in fields) + + +def encode_multipart_formdata(fields, boundary=None): + """ + Encode a dictionary of ``fields`` using the multipart/form-data MIME format. + + :param fields: + Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`). + + :param boundary: + If not specified, then a random boundary will be generated using + :func:`mimetools.choose_boundary`. + """ + body = BytesIO() + if boundary is None: + boundary = choose_boundary() + + for field in iter_field_objects(fields): + body.write(b('--%s\r\n' % (boundary))) + + writer(body).write(field.render_headers()) + data = field.data + + if isinstance(data, int): + data = str(data) # Backwards compatibility + + if isinstance(data, six.text_type): + writer(body).write(data) + else: + body.write(data) + + body.write(b'\r\n') + + body.write(b('--%s--\r\n' % (boundary))) + + content_type = str('multipart/form-data; boundary=%s' % boundary) + + return body.getvalue(), content_type diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py new file mode 100644 index 000000000..170e974c1 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py @@ -0,0 +1,5 @@ +from __future__ import absolute_import + +from . import ssl_match_hostname + +__all__ = ('ssl_match_hostname', ) diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py new file mode 100644 index 000000000..8ab122f8b --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: MIT +""" +backports.makefile +~~~~~~~~~~~~~~~~~~ + +Backports the Python 3 ``socket.makefile`` method for use with anything that +wants to create a "fake" socket object. +""" +import io + +from socket import SocketIO + + +def backport_makefile(self, mode="r", buffering=None, encoding=None, + errors=None, newline=None): + """ + Backport of ``socket.makefile`` from Python 3.5. + """ + if not set(mode) <= set(["r", "w", "b"]): + raise ValueError( + "invalid mode %r (only r, w, b allowed)" % (mode,) + ) + writing = "w" in mode + reading = "r" in mode or not writing + assert reading or writing + binary = "b" in mode + rawmode = "" + if reading: + rawmode += "r" + if writing: + rawmode += "w" + raw = SocketIO(self, rawmode) + self._makefile_refs += 1 + if buffering is None: + buffering = -1 + if buffering < 0: + buffering = io.DEFAULT_BUFFER_SIZE + if buffering == 0: + if not binary: + raise ValueError("unbuffered streams must be binary") + return raw + if reading and writing: + buffer = io.BufferedRWPair(raw, raw, buffering) + elif reading: + buffer = io.BufferedReader(raw, buffering) + else: + assert writing + buffer = io.BufferedWriter(raw, buffering) + if binary: + return buffer + text = io.TextIOWrapper(buffer, encoding, errors, newline) + text.mode = mode + return text diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py new file mode 100644 index 000000000..9f7c0e6b8 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py @@ -0,0 +1,260 @@ +# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. +# Passes Python2.7's test suite and incorporates all the latest updates. +# Copyright 2009 Raymond Hettinger, released under the MIT License. +# http://code.activestate.com/recipes/576693/ +# SPDX-License-Identifier: MIT +try: + from thread import get_ident as _get_ident +except ImportError: + from dummy_thread import get_ident as _get_ident + +try: + from _abcoll import KeysView, ValuesView, ItemsView +except ImportError: + pass + + +class OrderedDict(dict): + 'Dictionary that remembers insertion order' + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as for regular dictionaries. + + # The internal self.__map dictionary maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + + def __init__(self, *args, **kwds): + '''Initialize an ordered dictionary. Signature is the same as for + regular dictionaries, but keyword arguments are not recommended + because their insertion order is arbitrary. + + ''' + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__root = root = [] # sentinel node + root[:] = [root, root, None] + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, dict_setitem=dict.__setitem__): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link which goes at the end of the linked + # list, and the inherited dictionary is updated with the new key/value pair. + if key not in self: + root = self.__root + last = root[0] + last[1] = root[0] = self.__map[key] = [last, root, key] + dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which is + # then removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link_prev, link_next, key = self.__map.pop(key) + link_prev[1] = link_next + link_next[0] = link_prev + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + root = self.__root + curr = root[1] + while curr is not root: + yield curr[2] + curr = curr[1] + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + root = self.__root + curr = root[0] + while curr is not root: + yield curr[2] + curr = curr[0] + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + try: + for node in self.__map.itervalues(): + del node[:] + root = self.__root + root[:] = [root, root, None] + self.__map.clear() + except AttributeError: + pass + dict.clear(self) + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + root = self.__root + if last: + link = root[0] + link_prev = link[0] + link_prev[1] = root + root[0] = link_prev + else: + link = root[1] + link_next = link[1] + root[1] = link_next + link_next[0] = root + key = link[2] + del self.__map[key] + value = dict.pop(self, key) + return key, value + + # -- the following methods do not depend on the internal structure -- + + def keys(self): + 'od.keys() -> list of keys in od' + return list(self) + + def values(self): + 'od.values() -> list of values in od' + return [self[key] for key in self] + + def items(self): + 'od.items() -> list of (key, value) pairs in od' + return [(key, self[key]) for key in self] + + def iterkeys(self): + 'od.iterkeys() -> an iterator over the keys in od' + return iter(self) + + def itervalues(self): + 'od.itervalues -> an iterator over the values in od' + for k in self: + yield self[k] + + def iteritems(self): + 'od.iteritems -> an iterator over the (key, value) items in od' + for k in self: + yield (k, self[k]) + + def update(*args, **kwds): + '''od.update(E, **F) -> None. Update od from dict/iterable E and F. + + If E is a dict instance, does: for k in E: od[k] = E[k] + If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] + Or if E is an iterable of items, does: for k, v in E: od[k] = v + In either case, this is followed by: for k, v in F.items(): od[k] = v + + ''' + if len(args) > 2: + raise TypeError('update() takes at most 2 positional ' + 'arguments (%d given)' % (len(args),)) + elif not args: + raise TypeError('update() takes at least 1 argument (0 given)') + self = args[0] + # Make progressively weaker assumptions about "other" + other = () + if len(args) == 2: + other = args[1] + if isinstance(other, dict): + for key in other: + self[key] = other[key] + elif hasattr(other, 'keys'): + for key in other.keys(): + self[key] = other[key] + else: + for key, value in other: + self[key] = value + for key, value in kwds.items(): + self[key] = value + + __update = update # let subclasses override update without breaking __init__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + def __repr__(self, _repr_running={}): + 'od.__repr__() <==> repr(od)' + call_key = id(self), _get_ident() + if call_key in _repr_running: + return '...' + _repr_running[call_key] = 1 + try: + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, self.items()) + finally: + del _repr_running[call_key] + + def __reduce__(self): + 'Return state information for pickling' + items = [[k, self[k]] for k in self] + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S + and values equal to v (which defaults to None). + + ''' + d = cls() + for key in iterable: + d[key] = value + return d + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return len(self)==len(other) and self.items() == other.items() + return dict.__eq__(self, other) + + def __ne__(self, other): + return not self == other + + # -- the following methods are only used in Python 2.7 -- + + def viewkeys(self): + "od.viewkeys() -> a set-like object providing a view on od's keys" + return KeysView(self) + + def viewvalues(self): + "od.viewvalues() -> an object providing a view on od's values" + return ValuesView(self) + + def viewitems(self): + "od.viewitems() -> a set-like object providing a view on od's items" + return ItemsView(self) diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/six.py b/collectors/python.d.plugin/python_modules/urllib3/packages/six.py new file mode 100644 index 000000000..31df5012b --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/packages/six.py @@ -0,0 +1,852 @@ +"""Utilities for writing code that runs on Python 2 and 3""" + +# Copyright (c) 2010-2015 Benjamin Peterson +# +# SPDX-License-Identifier: MIT + +from __future__ import absolute_import + +import functools +import itertools +import operator +import sys +import types + +__author__ = "Benjamin Peterson <benjamin@python.org>" +__version__ = "1.10.0" + + +# Useful for very coarse version differentiation. +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 +PY34 = sys.version_info[0:2] >= (3, 4) + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) # Invokes __set__. + try: + # This is a bit ugly, but it avoids running this again by + # removing this descriptor. + delattr(obj.__class__, self.name) + except AttributeError: + pass + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + def __getattr__(self, attr): + _module = self._resolve() + value = getattr(_module, attr) + setattr(self, attr, value) + return value + + +class _LazyModule(types.ModuleType): + + def __init__(self, name): + super(_LazyModule, self).__init__(name) + self.__doc__ = self.__class__.__doc__ + + def __dir__(self): + attrs = ["__doc__", "__name__"] + attrs += [attr.name for attr in self._moved_attributes] + return attrs + + # Subclasses should override this + _moved_attributes = [] + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + +class _SixMetaPathImporter(object): + + """ + A meta path importer to import six.moves and its submodules. + + This class implements a PEP302 finder and loader. It should be compatible + with Python 2.5 and all existing versions of Python3 + """ + + def __init__(self, six_module_name): + self.name = six_module_name + self.known_modules = {} + + def _add_module(self, mod, *fullnames): + for fullname in fullnames: + self.known_modules[self.name + "." + fullname] = mod + + def _get_module(self, fullname): + return self.known_modules[self.name + "." + fullname] + + def find_module(self, fullname, path=None): + if fullname in self.known_modules: + return self + return None + + def __get_module(self, fullname): + try: + return self.known_modules[fullname] + except KeyError: + raise ImportError("This loader does not know module " + fullname) + + def load_module(self, fullname): + try: + # in case of a reload + return sys.modules[fullname] + except KeyError: + pass + mod = self.__get_module(fullname) + if isinstance(mod, MovedModule): + mod = mod._resolve() + else: + mod.__loader__ = self + sys.modules[fullname] = mod + return mod + + def is_package(self, fullname): + """ + Return true, if the named module is a package. + + We need this method to get correct spec objects with + Python 3.4 (see PEP451) + """ + return hasattr(self.__get_module(fullname), "__path__") + + def get_code(self, fullname): + """Return None + + Required, if is_package is implemented""" + self.__get_module(fullname) # eventually raises ImportError + return None + get_source = get_code # same as get_code + +_importer = _SixMetaPathImporter(__name__) + + +class _MovedItems(_LazyModule): + + """Lazy loading of moved objects""" + __path__ = [] # mark as package + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("intern", "__builtin__", "sys"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), + MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), + MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("UserDict", "UserDict", "collections"), + MovedAttribute("UserList", "UserList", "collections"), + MovedAttribute("UserString", "UserString", "collections"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("copyreg", "copy_reg"), + MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), + MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("_thread", "thread", "_thread"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), + MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), + MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), + MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), +] +# Add windows specific modules. +if sys.platform == "win32": + _moved_attributes += [ + MovedModule("winreg", "_winreg"), + ] + +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) + if isinstance(attr, MovedModule): + _importer._add_module(attr, "moves." + attr.name) +del attr + +_MovedItems._moved_attributes = _moved_attributes + +moves = _MovedItems(__name__ + ".moves") +_importer._add_module(moves, "moves") + + +class Module_six_moves_urllib_parse(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_parse""" + + +_urllib_parse_moved_attributes = [ + MovedAttribute("ParseResult", "urlparse", "urllib.parse"), + MovedAttribute("SplitResult", "urlparse", "urllib.parse"), + MovedAttribute("parse_qs", "urlparse", "urllib.parse"), + MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), + MovedAttribute("urldefrag", "urlparse", "urllib.parse"), + MovedAttribute("urljoin", "urlparse", "urllib.parse"), + MovedAttribute("urlparse", "urlparse", "urllib.parse"), + MovedAttribute("urlsplit", "urlparse", "urllib.parse"), + MovedAttribute("urlunparse", "urlparse", "urllib.parse"), + MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), + MovedAttribute("quote", "urllib", "urllib.parse"), + MovedAttribute("quote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote", "urllib", "urllib.parse"), + MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute("urlencode", "urllib", "urllib.parse"), + MovedAttribute("splitquery", "urllib", "urllib.parse"), + MovedAttribute("splittag", "urllib", "urllib.parse"), + MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), + MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), + MovedAttribute("uses_params", "urlparse", "urllib.parse"), + MovedAttribute("uses_query", "urlparse", "urllib.parse"), + MovedAttribute("uses_relative", "urlparse", "urllib.parse"), +] +for attr in _urllib_parse_moved_attributes: + setattr(Module_six_moves_urllib_parse, attr.name, attr) +del attr + +Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes + +_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", "moves.urllib.parse") + + +class Module_six_moves_urllib_error(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_error""" + + +_urllib_error_moved_attributes = [ + MovedAttribute("URLError", "urllib2", "urllib.error"), + MovedAttribute("HTTPError", "urllib2", "urllib.error"), + MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), +] +for attr in _urllib_error_moved_attributes: + setattr(Module_six_moves_urllib_error, attr.name, attr) +del attr + +Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes + +_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", "moves.urllib.error") + + +class Module_six_moves_urllib_request(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_request""" + + +_urllib_request_moved_attributes = [ + MovedAttribute("urlopen", "urllib2", "urllib.request"), + MovedAttribute("install_opener", "urllib2", "urllib.request"), + MovedAttribute("build_opener", "urllib2", "urllib.request"), + MovedAttribute("pathname2url", "urllib", "urllib.request"), + MovedAttribute("url2pathname", "urllib", "urllib.request"), + MovedAttribute("getproxies", "urllib", "urllib.request"), + MovedAttribute("Request", "urllib2", "urllib.request"), + MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), + MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), + MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), + MovedAttribute("BaseHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), + MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), + MovedAttribute("FileHandler", "urllib2", "urllib.request"), + MovedAttribute("FTPHandler", "urllib2", "urllib.request"), + MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), + MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), + MovedAttribute("urlretrieve", "urllib", "urllib.request"), + MovedAttribute("urlcleanup", "urllib", "urllib.request"), + MovedAttribute("URLopener", "urllib", "urllib.request"), + MovedAttribute("FancyURLopener", "urllib", "urllib.request"), + MovedAttribute("proxy_bypass", "urllib", "urllib.request"), +] +for attr in _urllib_request_moved_attributes: + setattr(Module_six_moves_urllib_request, attr.name, attr) +del attr + +Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes + +_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", "moves.urllib.request") + + +class Module_six_moves_urllib_response(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_response""" + + +_urllib_response_moved_attributes = [ + MovedAttribute("addbase", "urllib", "urllib.response"), + MovedAttribute("addclosehook", "urllib", "urllib.response"), + MovedAttribute("addinfo", "urllib", "urllib.response"), + MovedAttribute("addinfourl", "urllib", "urllib.response"), +] +for attr in _urllib_response_moved_attributes: + setattr(Module_six_moves_urllib_response, attr.name, attr) +del attr + +Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes + +_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", "moves.urllib.response") + + +class Module_six_moves_urllib_robotparser(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_robotparser""" + + +_urllib_robotparser_moved_attributes = [ + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), +] +for attr in _urllib_robotparser_moved_attributes: + setattr(Module_six_moves_urllib_robotparser, attr.name, attr) +del attr + +Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes + +_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", "moves.urllib.robotparser") + + +class Module_six_moves_urllib(types.ModuleType): + + """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" + __path__ = [] # mark as package + parse = _importer._get_module("moves.urllib_parse") + error = _importer._get_module("moves.urllib_error") + request = _importer._get_module("moves.urllib_request") + response = _importer._get_module("moves.urllib_response") + robotparser = _importer._get_module("moves.urllib_robotparser") + + def __dir__(self): + return ['parse', 'error', 'request', 'response', 'robotparser'] + +_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), + "moves.urllib") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_closure = "__closure__" + _func_code = "__code__" + _func_defaults = "__defaults__" + _func_globals = "__globals__" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_closure = "func_closure" + _func_code = "func_code" + _func_defaults = "func_defaults" + _func_globals = "func_globals" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +try: + callable = callable +except NameError: + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + + +if PY3: + def get_unbound_function(unbound): + return unbound + + create_bound_method = types.MethodType + + def create_unbound_method(func, cls): + return func + + Iterator = object +else: + def get_unbound_function(unbound): + return unbound.im_func + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) + + def create_unbound_method(func, cls): + return types.MethodType(func, None, cls) + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_closure = operator.attrgetter(_func_closure) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) +get_function_globals = operator.attrgetter(_func_globals) + + +if PY3: + def iterkeys(d, **kw): + return iter(d.keys(**kw)) + + def itervalues(d, **kw): + return iter(d.values(**kw)) + + def iteritems(d, **kw): + return iter(d.items(**kw)) + + def iterlists(d, **kw): + return iter(d.lists(**kw)) + + viewkeys = operator.methodcaller("keys") + + viewvalues = operator.methodcaller("values") + + viewitems = operator.methodcaller("items") +else: + def iterkeys(d, **kw): + return d.iterkeys(**kw) + + def itervalues(d, **kw): + return d.itervalues(**kw) + + def iteritems(d, **kw): + return d.iteritems(**kw) + + def iterlists(d, **kw): + return d.iterlists(**kw) + + viewkeys = operator.methodcaller("viewkeys") + + viewvalues = operator.methodcaller("viewvalues") + + viewitems = operator.methodcaller("viewitems") + +_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") +_add_doc(itervalues, "Return an iterator over the values of a dictionary.") +_add_doc(iteritems, + "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc(iterlists, + "Return an iterator over the (key, [values]) pairs of a dictionary.") + + +if PY3: + def b(s): + return s.encode("latin-1") + + def u(s): + return s + unichr = chr + import struct + int2byte = struct.Struct(">B").pack + del struct + byte2int = operator.itemgetter(0) + indexbytes = operator.getitem + iterbytes = iter + import io + StringIO = io.StringIO + BytesIO = io.BytesIO + _assertCountEqual = "assertCountEqual" + if sys.version_info[1] <= 1: + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" + else: + _assertRaisesRegex = "assertRaisesRegex" + _assertRegex = "assertRegex" +else: + def b(s): + return s + # Workaround for standalone backslash + + def u(s): + return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") + unichr = unichr + int2byte = chr + + def byte2int(bs): + return ord(bs[0]) + + def indexbytes(buf, i): + return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) + import StringIO + StringIO = BytesIO = StringIO.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +def assertCountEqual(self, *args, **kwargs): + return getattr(self, _assertCountEqual)(*args, **kwargs) + + +def assertRaisesRegex(self, *args, **kwargs): + return getattr(self, _assertRaisesRegex)(*args, **kwargs) + + +def assertRegex(self, *args, **kwargs): + return getattr(self, _assertRegex)(*args, **kwargs) + + +if PY3: + exec_ = getattr(moves.builtins, "exec") + + def reraise(tp, value, tb=None): + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + +else: + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + + exec_("""def reraise(tp, value, tb=None): + raise tp, value, tb +""") + + +if sys.version_info[:2] == (3, 2): + exec_("""def raise_from(value, from_value): + if from_value is None: + raise value + raise value from from_value +""") +elif sys.version_info[:2] > (3, 2): + exec_("""def raise_from(value, from_value): + raise value from from_value +""") +else: + def raise_from(value, from_value): + raise value + + +print_ = getattr(moves.builtins, "print", None) +if print_ is None: + def print_(*args, **kwargs): + """The new-style print function for Python 2.4 and 2.5.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + + def write(data): + if not isinstance(data, basestring): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and + isinstance(data, unicode) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: + errors = "strict" + data = data.encode(fp.encoding, errors) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) +if sys.version_info[:2] < (3, 3): + _print = print_ + + def print_(*args, **kwargs): + fp = kwargs.get("file", sys.stdout) + flush = kwargs.pop("flush", False) + _print(*args, **kwargs) + if flush and fp is not None: + fp.flush() + +_add_doc(reraise, """Reraise an exception.""") + +if sys.version_info[0:2] < (3, 4): + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + def wrapper(f): + f = functools.wraps(wrapped, assigned, updated)(f) + f.__wrapped__ = wrapped + return f + return wrapper +else: + wraps = functools.wraps + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(meta): + + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + return type.__new__(metaclass, 'temporary_class', (), {}) + + +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + + +def python_2_unicode_compatible(klass): + """ + A decorator that defines __unicode__ and __str__ methods under Python 2. + Under Python 3 it does nothing. + + To support Python 2 and 3 with a single code base, define a __str__ method + returning text and apply this decorator to the class. + """ + if PY2: + if '__str__' not in klass.__dict__: + raise ValueError("@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % + klass.__name__) + klass.__unicode__ = klass.__str__ + klass.__str__ = lambda self: self.__unicode__().encode('utf-8') + return klass + + +# Complete the moves implementation. +# This code is at the end of this module to speed up module loading. +# Turn this module into a package. +__path__ = [] # required for PEP 302 and PEP 451 +__package__ = __name__ # see PEP 366 @ReservedAssignment +if globals().get("__spec__") is not None: + __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable +# Remove other six meta path importers, since they cause problems. This can +# happen if six is removed from sys.modules and then reloaded. (Setuptools does +# this for some reason.) +if sys.meta_path: + for i, importer in enumerate(sys.meta_path): + # Here's some real nastiness: Another "instance" of the six module might + # be floating around. Therefore, we can't use isinstance() to check for + # the six meta path importer, since the other six instance will have + # inserted an importer with different class. + if (type(importer).__name__ == "_SixMetaPathImporter" and + importer.name == __name__): + del sys.meta_path[i] + break + del i, importer +# Finally, add the importer to the meta path import hook. +sys.meta_path.append(_importer) diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py new file mode 100644 index 000000000..2aeeeff91 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: MIT +import sys + +try: + # Our match_hostname function is the same as 3.5's, so we only want to + # import the match_hostname function if it's at least that good. + if sys.version_info < (3, 5): + raise ImportError("Fallback to vendored code") + + from ssl import CertificateError, match_hostname +except ImportError: + try: + # Backport of the function from a pypi module + from backports.ssl_match_hostname import CertificateError, match_hostname + except ImportError: + # Our vendored copy + from ._implementation import CertificateError, match_hostname + +# Not needed, but documenting what we provide. +__all__ = ('CertificateError', 'match_hostname') diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py new file mode 100644 index 000000000..647e081da --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py @@ -0,0 +1,156 @@ +"""The match_hostname() function from Python 3.3.3, essential when using SSL.""" + +# SPDX-License-Identifier: Python-2.0 + +import re +import sys + +# ipaddress has been backported to 2.6+ in pypi. If it is installed on the +# system, use it to handle IPAddress ServerAltnames (this was added in +# python-3.5) otherwise only do DNS matching. This allows +# backports.ssl_match_hostname to continue to be used all the way back to +# python-2.4. +try: + import ipaddress +except ImportError: + ipaddress = None + +__version__ = '3.5.0.1' + + +class CertificateError(ValueError): + pass + + +def _dnsname_match(dn, hostname, max_wildcards=1): + """Matching according to RFC 6125, section 6.4.3 + + http://tools.ietf.org/html/rfc6125#section-6.4.3 + """ + pats = [] + if not dn: + return False + + # Ported from python3-syntax: + # leftmost, *remainder = dn.split(r'.') + parts = dn.split(r'.') + leftmost = parts[0] + remainder = parts[1:] + + wildcards = leftmost.count('*') + if wildcards > max_wildcards: + # Issue #17980: avoid denials of service by refusing more + # than one wildcard per fragment. A survey of established + # policy among SSL implementations showed it to be a + # reasonable choice. + raise CertificateError( + "too many wildcards in certificate DNS name: " + repr(dn)) + + # speed up common case w/o wildcards + if not wildcards: + return dn.lower() == hostname.lower() + + # RFC 6125, section 6.4.3, subitem 1. + # The client SHOULD NOT attempt to match a presented identifier in which + # the wildcard character comprises a label other than the left-most label. + if leftmost == '*': + # When '*' is a fragment by itself, it matches a non-empty dotless + # fragment. + pats.append('[^.]+') + elif leftmost.startswith('xn--') or hostname.startswith('xn--'): + # RFC 6125, section 6.4.3, subitem 3. + # The client SHOULD NOT attempt to match a presented identifier + # where the wildcard character is embedded within an A-label or + # U-label of an internationalized domain name. + pats.append(re.escape(leftmost)) + else: + # Otherwise, '*' matches any dotless string, e.g. www* + pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) + + # add the remaining fragments, ignore any wildcards + for frag in remainder: + pats.append(re.escape(frag)) + + pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) + return pat.match(hostname) + + +def _to_unicode(obj): + if isinstance(obj, str) and sys.version_info < (3,): + obj = unicode(obj, encoding='ascii', errors='strict') + return obj + +def _ipaddress_match(ipname, host_ip): + """Exact matching of IP addresses. + + RFC 6125 explicitly doesn't define an algorithm for this + (section 1.7.2 - "Out of Scope"). + """ + # OpenSSL may add a trailing newline to a subjectAltName's IP address + # Divergence from upstream: ipaddress can't handle byte str + ip = ipaddress.ip_address(_to_unicode(ipname).rstrip()) + return ip == host_ip + + +def match_hostname(cert, hostname): + """Verify that *cert* (in decoded format as returned by + SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 + rules are followed, but IP addresses are not accepted for *hostname*. + + CertificateError is raised on failure. On success, the function + returns nothing. + """ + if not cert: + raise ValueError("empty or no certificate, match_hostname needs a " + "SSL socket or SSL context with either " + "CERT_OPTIONAL or CERT_REQUIRED") + try: + # Divergence from upstream: ipaddress can't handle byte str + host_ip = ipaddress.ip_address(_to_unicode(hostname)) + except ValueError: + # Not an IP address (common case) + host_ip = None + except UnicodeError: + # Divergence from upstream: Have to deal with ipaddress not taking + # byte strings. addresses should be all ascii, so we consider it not + # an ipaddress in this case + host_ip = None + except AttributeError: + # Divergence from upstream: Make ipaddress library optional + if ipaddress is None: + host_ip = None + else: + raise + dnsnames = [] + san = cert.get('subjectAltName', ()) + for key, value in san: + if key == 'DNS': + if host_ip is None and _dnsname_match(value, hostname): + return + dnsnames.append(value) + elif key == 'IP Address': + if host_ip is not None and _ipaddress_match(value, host_ip): + return + dnsnames.append(value) + if not dnsnames: + # The subject is only checked when there is no dNSName entry + # in subjectAltName + for sub in cert.get('subject', ()): + for key, value in sub: + # XXX according to RFC 2818, the most specific Common Name + # must be used. + if key == 'commonName': + if _dnsname_match(value, hostname): + return + dnsnames.append(value) + if len(dnsnames) > 1: + raise CertificateError("hostname %r " + "doesn't match either of %s" + % (hostname, ', '.join(map(repr, dnsnames)))) + elif len(dnsnames) == 1: + raise CertificateError("hostname %r " + "doesn't match %r" + % (hostname, dnsnames[0])) + else: + raise CertificateError("no appropriate commonName or " + "subjectAltName fields were found") diff --git a/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py b/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py new file mode 100644 index 000000000..adea9bc01 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py @@ -0,0 +1,441 @@ +# SPDX-License-Identifier: MIT +from __future__ import absolute_import +import collections +import functools +import logging + +from ._collections import RecentlyUsedContainer +from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool +from .connectionpool import port_by_scheme +from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown +from .packages.six.moves.urllib.parse import urljoin +from .request import RequestMethods +from .util.url import parse_url +from .util.retry import Retry + + +__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url'] + + +log = logging.getLogger(__name__) + +SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs', + 'ssl_version', 'ca_cert_dir', 'ssl_context') + +# All known keyword arguments that could be provided to the pool manager, its +# pools, or the underlying connections. This is used to construct a pool key. +_key_fields = ( + 'key_scheme', # str + 'key_host', # str + 'key_port', # int + 'key_timeout', # int or float or Timeout + 'key_retries', # int or Retry + 'key_strict', # bool + 'key_block', # bool + 'key_source_address', # str + 'key_key_file', # str + 'key_cert_file', # str + 'key_cert_reqs', # str + 'key_ca_certs', # str + 'key_ssl_version', # str + 'key_ca_cert_dir', # str + 'key_ssl_context', # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext + 'key_maxsize', # int + 'key_headers', # dict + 'key__proxy', # parsed proxy url + 'key__proxy_headers', # dict + 'key_socket_options', # list of (level (int), optname (int), value (int or str)) tuples + 'key__socks_options', # dict + 'key_assert_hostname', # bool or string + 'key_assert_fingerprint', # str +) + +#: The namedtuple class used to construct keys for the connection pool. +#: All custom key schemes should include the fields in this key at a minimum. +PoolKey = collections.namedtuple('PoolKey', _key_fields) + + +def _default_key_normalizer(key_class, request_context): + """ + Create a pool key out of a request context dictionary. + + According to RFC 3986, both the scheme and host are case-insensitive. + Therefore, this function normalizes both before constructing the pool + key for an HTTPS request. If you wish to change this behaviour, provide + alternate callables to ``key_fn_by_scheme``. + + :param key_class: + The class to use when constructing the key. This should be a namedtuple + with the ``scheme`` and ``host`` keys at a minimum. + :type key_class: namedtuple + :param request_context: + A dictionary-like object that contain the context for a request. + :type request_context: dict + + :return: A namedtuple that can be used as a connection pool key. + :rtype: PoolKey + """ + # Since we mutate the dictionary, make a copy first + context = request_context.copy() + context['scheme'] = context['scheme'].lower() + context['host'] = context['host'].lower() + + # These are both dictionaries and need to be transformed into frozensets + for key in ('headers', '_proxy_headers', '_socks_options'): + if key in context and context[key] is not None: + context[key] = frozenset(context[key].items()) + + # The socket_options key may be a list and needs to be transformed into a + # tuple. + socket_opts = context.get('socket_options') + if socket_opts is not None: + context['socket_options'] = tuple(socket_opts) + + # Map the kwargs to the names in the namedtuple - this is necessary since + # namedtuples can't have fields starting with '_'. + for key in list(context.keys()): + context['key_' + key] = context.pop(key) + + # Default to ``None`` for keys missing from the context + for field in key_class._fields: + if field not in context: + context[field] = None + + return key_class(**context) + + +#: A dictionary that maps a scheme to a callable that creates a pool key. +#: This can be used to alter the way pool keys are constructed, if desired. +#: Each PoolManager makes a copy of this dictionary so they can be configured +#: globally here, or individually on the instance. +key_fn_by_scheme = { + 'http': functools.partial(_default_key_normalizer, PoolKey), + 'https': functools.partial(_default_key_normalizer, PoolKey), +} + +pool_classes_by_scheme = { + 'http': HTTPConnectionPool, + 'https': HTTPSConnectionPool, +} + + +class PoolManager(RequestMethods): + """ + Allows for arbitrary requests while transparently keeping track of + necessary connection pools for you. + + :param num_pools: + Number of connection pools to cache before discarding the least + recently used pool. + + :param headers: + Headers to include with all requests, unless other headers are given + explicitly. + + :param \\**connection_pool_kw: + Additional parameters are used to create fresh + :class:`urllib3.connectionpool.ConnectionPool` instances. + + Example:: + + >>> manager = PoolManager(num_pools=2) + >>> r = manager.request('GET', 'http://google.com/') + >>> r = manager.request('GET', 'http://google.com/mail') + >>> r = manager.request('GET', 'http://yahoo.com/') + >>> len(manager.pools) + 2 + + """ + + proxy = None + + def __init__(self, num_pools=10, headers=None, **connection_pool_kw): + RequestMethods.__init__(self, headers) + self.connection_pool_kw = connection_pool_kw + self.pools = RecentlyUsedContainer(num_pools, + dispose_func=lambda p: p.close()) + + # Locally set the pool classes and keys so other PoolManagers can + # override them. + self.pool_classes_by_scheme = pool_classes_by_scheme + self.key_fn_by_scheme = key_fn_by_scheme.copy() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.clear() + # Return False to re-raise any potential exceptions + return False + + def _new_pool(self, scheme, host, port, request_context=None): + """ + Create a new :class:`ConnectionPool` based on host, port, scheme, and + any additional pool keyword arguments. + + If ``request_context`` is provided, it is provided as keyword arguments + to the pool class used. This method is used to actually create the + connection pools handed out by :meth:`connection_from_url` and + companion methods. It is intended to be overridden for customization. + """ + pool_cls = self.pool_classes_by_scheme[scheme] + if request_context is None: + request_context = self.connection_pool_kw.copy() + + # Although the context has everything necessary to create the pool, + # this function has historically only used the scheme, host, and port + # in the positional args. When an API change is acceptable these can + # be removed. + for key in ('scheme', 'host', 'port'): + request_context.pop(key, None) + + if scheme == 'http': + for kw in SSL_KEYWORDS: + request_context.pop(kw, None) + + return pool_cls(host, port, **request_context) + + def clear(self): + """ + Empty our store of pools and direct them all to close. + + This will not affect in-flight connections, but they will not be + re-used after completion. + """ + self.pools.clear() + + def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None): + """ + Get a :class:`ConnectionPool` based on the host, port, and scheme. + + If ``port`` isn't given, it will be derived from the ``scheme`` using + ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is + provided, it is merged with the instance's ``connection_pool_kw`` + variable and used to create the new connection pool, if one is + needed. + """ + + if not host: + raise LocationValueError("No host specified.") + + request_context = self._merge_pool_kwargs(pool_kwargs) + request_context['scheme'] = scheme or 'http' + if not port: + port = port_by_scheme.get(request_context['scheme'].lower(), 80) + request_context['port'] = port + request_context['host'] = host + + return self.connection_from_context(request_context) + + def connection_from_context(self, request_context): + """ + Get a :class:`ConnectionPool` based on the request context. + + ``request_context`` must at least contain the ``scheme`` key and its + value must be a key in ``key_fn_by_scheme`` instance variable. + """ + scheme = request_context['scheme'].lower() + pool_key_constructor = self.key_fn_by_scheme[scheme] + pool_key = pool_key_constructor(request_context) + + return self.connection_from_pool_key(pool_key, request_context=request_context) + + def connection_from_pool_key(self, pool_key, request_context=None): + """ + Get a :class:`ConnectionPool` based on the provided pool key. + + ``pool_key`` should be a namedtuple that only contains immutable + objects. At a minimum it must have the ``scheme``, ``host``, and + ``port`` fields. + """ + with self.pools.lock: + # If the scheme, host, or port doesn't match existing open + # connections, open a new ConnectionPool. + pool = self.pools.get(pool_key) + if pool: + return pool + + # Make a fresh ConnectionPool of the desired type + scheme = request_context['scheme'] + host = request_context['host'] + port = request_context['port'] + pool = self._new_pool(scheme, host, port, request_context=request_context) + self.pools[pool_key] = pool + + return pool + + def connection_from_url(self, url, pool_kwargs=None): + """ + Similar to :func:`urllib3.connectionpool.connection_from_url`. + + If ``pool_kwargs`` is not provided and a new pool needs to be + constructed, ``self.connection_pool_kw`` is used to initialize + the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` + is provided, it is used instead. Note that if a new pool does not + need to be created for the request, the provided ``pool_kwargs`` are + not used. + """ + u = parse_url(url) + return self.connection_from_host(u.host, port=u.port, scheme=u.scheme, + pool_kwargs=pool_kwargs) + + def _merge_pool_kwargs(self, override): + """ + Merge a dictionary of override values for self.connection_pool_kw. + + This does not modify self.connection_pool_kw and returns a new dict. + Any keys in the override dictionary with a value of ``None`` are + removed from the merged dictionary. + """ + base_pool_kwargs = self.connection_pool_kw.copy() + if override: + for key, value in override.items(): + if value is None: + try: + del base_pool_kwargs[key] + except KeyError: + pass + else: + base_pool_kwargs[key] = value + return base_pool_kwargs + + def urlopen(self, method, url, redirect=True, **kw): + """ + Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen` + with custom cross-host redirect logic and only sends the request-uri + portion of the ``url``. + + The given ``url`` parameter must be absolute, such that an appropriate + :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. + """ + u = parse_url(url) + conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) + + kw['assert_same_host'] = False + kw['redirect'] = False + if 'headers' not in kw: + kw['headers'] = self.headers + + if self.proxy is not None and u.scheme == "http": + response = conn.urlopen(method, url, **kw) + else: + response = conn.urlopen(method, u.request_uri, **kw) + + redirect_location = redirect and response.get_redirect_location() + if not redirect_location: + return response + + # Support relative URLs for redirecting. + redirect_location = urljoin(url, redirect_location) + + # RFC 7231, Section 6.4.4 + if response.status == 303: + method = 'GET' + + retries = kw.get('retries') + if not isinstance(retries, Retry): + retries = Retry.from_int(retries, redirect=redirect) + + try: + retries = retries.increment(method, url, response=response, _pool=conn) + except MaxRetryError: + if retries.raise_on_redirect: + raise + return response + + kw['retries'] = retries + kw['redirect'] = redirect + + log.info("Redirecting %s -> %s", url, redirect_location) + return self.urlopen(method, redirect_location, **kw) + + +class ProxyManager(PoolManager): + """ + Behaves just like :class:`PoolManager`, but sends all requests through + the defined proxy, using the CONNECT method for HTTPS URLs. + + :param proxy_url: + The URL of the proxy to be used. + + :param proxy_headers: + A dictionary contaning headers that will be sent to the proxy. In case + of HTTP they are being sent with each request, while in the + HTTPS/CONNECT case they are sent only once. Could be used for proxy + authentication. + + Example: + >>> proxy = urllib3.ProxyManager('http://localhost:3128/') + >>> r1 = proxy.request('GET', 'http://google.com/') + >>> r2 = proxy.request('GET', 'http://httpbin.org/') + >>> len(proxy.pools) + 1 + >>> r3 = proxy.request('GET', 'https://httpbin.org/') + >>> r4 = proxy.request('GET', 'https://twitter.com/') + >>> len(proxy.pools) + 3 + + """ + + def __init__(self, proxy_url, num_pools=10, headers=None, + proxy_headers=None, **connection_pool_kw): + + if isinstance(proxy_url, HTTPConnectionPool): + proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host, + proxy_url.port) + proxy = parse_url(proxy_url) + if not proxy.port: + port = port_by_scheme.get(proxy.scheme, 80) + proxy = proxy._replace(port=port) + + if proxy.scheme not in ("http", "https"): + raise ProxySchemeUnknown(proxy.scheme) + + self.proxy = proxy + self.proxy_headers = proxy_headers or {} + + connection_pool_kw['_proxy'] = self.proxy + connection_pool_kw['_proxy_headers'] = self.proxy_headers + + super(ProxyManager, self).__init__( + num_pools, headers, **connection_pool_kw) + + def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None): + if scheme == "https": + return super(ProxyManager, self).connection_from_host( + host, port, scheme, pool_kwargs=pool_kwargs) + + return super(ProxyManager, self).connection_from_host( + self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs) + + def _set_proxy_headers(self, url, headers=None): + """ + Sets headers needed by proxies: specifically, the Accept and Host + headers. Only sets headers not provided by the user. + """ + headers_ = {'Accept': '*/*'} + + netloc = parse_url(url).netloc + if netloc: + headers_['Host'] = netloc + + if headers: + headers_.update(headers) + return headers_ + + def urlopen(self, method, url, redirect=True, **kw): + "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." + u = parse_url(url) + + if u.scheme == "http": + # For proxied HTTPS requests, httplib sets the necessary headers + # on the CONNECT to the proxy. For HTTP, we'll definitely + # need to set 'Host' at the very least. + headers = kw.get('headers', self.headers) + kw['headers'] = self._set_proxy_headers(url, headers) + + return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw) + + +def proxy_from_url(url, **kw): + return ProxyManager(proxy_url=url, **kw) diff --git a/collectors/python.d.plugin/python_modules/urllib3/request.py b/collectors/python.d.plugin/python_modules/urllib3/request.py new file mode 100644 index 000000000..f78331975 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/request.py @@ -0,0 +1,149 @@ +# SPDX-License-Identifier: MIT +from __future__ import absolute_import + +from .filepost import encode_multipart_formdata +from .packages.six.moves.urllib.parse import urlencode + + +__all__ = ['RequestMethods'] + + +class RequestMethods(object): + """ + Convenience mixin for classes who implement a :meth:`urlopen` method, such + as :class:`~urllib3.connectionpool.HTTPConnectionPool` and + :class:`~urllib3.poolmanager.PoolManager`. + + Provides behavior for making common types of HTTP request methods and + decides which type of request field encoding to use. + + Specifically, + + :meth:`.request_encode_url` is for sending requests whose fields are + encoded in the URL (such as GET, HEAD, DELETE). + + :meth:`.request_encode_body` is for sending requests whose fields are + encoded in the *body* of the request using multipart or www-form-urlencoded + (such as for POST, PUT, PATCH). + + :meth:`.request` is for making any kind of request, it will look up the + appropriate encoding format and use one of the above two methods to make + the request. + + Initializer parameters: + + :param headers: + Headers to include with all requests, unless other headers are given + explicitly. + """ + + _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS']) + + def __init__(self, headers=None): + self.headers = headers or {} + + def urlopen(self, method, url, body=None, headers=None, + encode_multipart=True, multipart_boundary=None, + **kw): # Abstract + raise NotImplemented("Classes extending RequestMethods must implement " + "their own ``urlopen`` method.") + + def request(self, method, url, fields=None, headers=None, **urlopen_kw): + """ + Make a request using :meth:`urlopen` with the appropriate encoding of + ``fields`` based on the ``method`` used. + + This is a convenience method that requires the least amount of manual + effort. It can be used in most situations, while still having the + option to drop down to more specific methods when necessary, such as + :meth:`request_encode_url`, :meth:`request_encode_body`, + or even the lowest level :meth:`urlopen`. + """ + method = method.upper() + + if method in self._encode_url_methods: + return self.request_encode_url(method, url, fields=fields, + headers=headers, + **urlopen_kw) + else: + return self.request_encode_body(method, url, fields=fields, + headers=headers, + **urlopen_kw) + + def request_encode_url(self, method, url, fields=None, headers=None, + **urlopen_kw): + """ + Make a request using :meth:`urlopen` with the ``fields`` encoded in + the url. This is useful for request methods like GET, HEAD, DELETE, etc. + """ + if headers is None: + headers = self.headers + + extra_kw = {'headers': headers} + extra_kw.update(urlopen_kw) + + if fields: + url += '?' + urlencode(fields) + + return self.urlopen(method, url, **extra_kw) + + def request_encode_body(self, method, url, fields=None, headers=None, + encode_multipart=True, multipart_boundary=None, + **urlopen_kw): + """ + Make a request using :meth:`urlopen` with the ``fields`` encoded in + the body. This is useful for request methods like POST, PUT, PATCH, etc. + + When ``encode_multipart=True`` (default), then + :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode + the payload with the appropriate content type. Otherwise + :meth:`urllib.urlencode` is used with the + 'application/x-www-form-urlencoded' content type. + + Multipart encoding must be used when posting files, and it's reasonably + safe to use it in other times too. However, it may break request + signing, such as with OAuth. + + Supports an optional ``fields`` parameter of key/value strings AND + key/filetuple. A filetuple is a (filename, data, MIME type) tuple where + the MIME type is optional. For example:: + + fields = { + 'foo': 'bar', + 'fakefile': ('foofile.txt', 'contents of foofile'), + 'realfile': ('barfile.txt', open('realfile').read()), + 'typedfile': ('bazfile.bin', open('bazfile').read(), + 'image/jpeg'), + 'nonamefile': 'contents of nonamefile field', + } + + When uploading a file, providing a filename (the first parameter of the + tuple) is optional but recommended to best mimick behavior of browsers. + + Note that if ``headers`` are supplied, the 'Content-Type' header will + be overwritten because it depends on the dynamic random boundary string + which is used to compose the body of the request. The random boundary + string can be explicitly set with the ``multipart_boundary`` parameter. + """ + if headers is None: + headers = self.headers + + extra_kw = {'headers': {}} + + if fields: + if 'body' in urlopen_kw: + raise TypeError( + "request got values for both 'fields' and 'body', can only specify one.") + + if encode_multipart: + body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary) + else: + body, content_type = urlencode(fields), 'application/x-www-form-urlencoded' + + extra_kw['body'] = body + extra_kw['headers'] = {'Content-Type': content_type} + + extra_kw['headers'].update(headers) + extra_kw.update(urlopen_kw) + + return self.urlopen(method, url, **extra_kw) diff --git a/collectors/python.d.plugin/python_modules/urllib3/response.py b/collectors/python.d.plugin/python_modules/urllib3/response.py new file mode 100644 index 000000000..cf14a3076 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/response.py @@ -0,0 +1,623 @@ +# SPDX-License-Identifier: MIT +from __future__ import absolute_import +from contextlib import contextmanager +import zlib +import io +import logging +from socket import timeout as SocketTimeout +from socket import error as SocketError + +from ._collections import HTTPHeaderDict +from .exceptions import ( + BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError, + ResponseNotChunked, IncompleteRead, InvalidHeader +) +from .packages.six import string_types as basestring, binary_type, PY3 +from .packages.six.moves import http_client as httplib +from .connection import HTTPException, BaseSSLError +from .util.response import is_fp_closed, is_response_to_head + +log = logging.getLogger(__name__) + + +class DeflateDecoder(object): + + def __init__(self): + self._first_try = True + self._data = binary_type() + self._obj = zlib.decompressobj() + + def __getattr__(self, name): + return getattr(self._obj, name) + + def decompress(self, data): + if not data: + return data + + if not self._first_try: + return self._obj.decompress(data) + + self._data += data + try: + decompressed = self._obj.decompress(data) + if decompressed: + self._first_try = False + self._data = None + return decompressed + except zlib.error: + self._first_try = False + self._obj = zlib.decompressobj(-zlib.MAX_WBITS) + try: + return self.decompress(self._data) + finally: + self._data = None + + +class GzipDecoder(object): + + def __init__(self): + self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) + + def __getattr__(self, name): + return getattr(self._obj, name) + + def decompress(self, data): + if not data: + return data + return self._obj.decompress(data) + + +def _get_decoder(mode): + if mode == 'gzip': + return GzipDecoder() + + return DeflateDecoder() + + +class HTTPResponse(io.IOBase): + """ + HTTP Response container. + + Backwards-compatible to httplib's HTTPResponse but the response ``body`` is + loaded and decoded on-demand when the ``data`` property is accessed. This + class is also compatible with the Python standard library's :mod:`io` + module, and can hence be treated as a readable object in the context of that + framework. + + Extra parameters for behaviour not present in httplib.HTTPResponse: + + :param preload_content: + If True, the response's body will be preloaded during construction. + + :param decode_content: + If True, attempts to decode specific content-encoding's based on headers + (like 'gzip' and 'deflate') will be skipped and raw data will be used + instead. + + :param original_response: + When this HTTPResponse wrapper is generated from an httplib.HTTPResponse + object, it's convenient to include the original for debug purposes. It's + otherwise unused. + + :param retries: + The retries contains the last :class:`~urllib3.util.retry.Retry` that + was used during the request. + + :param enforce_content_length: + Enforce content length checking. Body returned by server must match + value of Content-Length header, if present. Otherwise, raise error. + """ + + CONTENT_DECODERS = ['gzip', 'deflate'] + REDIRECT_STATUSES = [301, 302, 303, 307, 308] + + def __init__(self, body='', headers=None, status=0, version=0, reason=None, + strict=0, preload_content=True, decode_content=True, + original_response=None, pool=None, connection=None, + retries=None, enforce_content_length=False, request_method=None): + + if isinstance(headers, HTTPHeaderDict): + self.headers = headers + else: + self.headers = HTTPHeaderDict(headers) + self.status = status + self.version = version + self.reason = reason + self.strict = strict + self.decode_content = decode_content + self.retries = retries + self.enforce_content_length = enforce_content_length + + self._decoder = None + self._body = None + self._fp = None + self._original_response = original_response + self._fp_bytes_read = 0 + + if body and isinstance(body, (basestring, binary_type)): + self._body = body + + self._pool = pool + self._connection = connection + + if hasattr(body, 'read'): + self._fp = body + + # Are we using the chunked-style of transfer encoding? + self.chunked = False + self.chunk_left = None + tr_enc = self.headers.get('transfer-encoding', '').lower() + # Don't incur the penalty of creating a list and then discarding it + encodings = (enc.strip() for enc in tr_enc.split(",")) + if "chunked" in encodings: + self.chunked = True + + # Determine length of response + self.length_remaining = self._init_length(request_method) + + # If requested, preload the body. + if preload_content and not self._body: + self._body = self.read(decode_content=decode_content) + + def get_redirect_location(self): + """ + Should we redirect and where to? + + :returns: Truthy redirect location string if we got a redirect status + code and valid location. ``None`` if redirect status and no + location. ``False`` if not a redirect status code. + """ + if self.status in self.REDIRECT_STATUSES: + return self.headers.get('location') + + return False + + def release_conn(self): + if not self._pool or not self._connection: + return + + self._pool._put_conn(self._connection) + self._connection = None + + @property + def data(self): + # For backwords-compat with earlier urllib3 0.4 and earlier. + if self._body: + return self._body + + if self._fp: + return self.read(cache_content=True) + + @property + def connection(self): + return self._connection + + def tell(self): + """ + Obtain the number of bytes pulled over the wire so far. May differ from + the amount of content returned by :meth:``HTTPResponse.read`` if bytes + are encoded on the wire (e.g, compressed). + """ + return self._fp_bytes_read + + def _init_length(self, request_method): + """ + Set initial length value for Response content if available. + """ + length = self.headers.get('content-length') + + if length is not None and self.chunked: + # This Response will fail with an IncompleteRead if it can't be + # received as chunked. This method falls back to attempt reading + # the response before raising an exception. + log.warning("Received response with both Content-Length and " + "Transfer-Encoding set. This is expressly forbidden " + "by RFC 7230 sec 3.3.2. Ignoring Content-Length and " + "attempting to process response as Transfer-Encoding: " + "chunked.") + return None + + elif length is not None: + try: + # RFC 7230 section 3.3.2 specifies multiple content lengths can + # be sent in a single Content-Length header + # (e.g. Content-Length: 42, 42). This line ensures the values + # are all valid ints and that as long as the `set` length is 1, + # all values are the same. Otherwise, the header is invalid. + lengths = set([int(val) for val in length.split(',')]) + if len(lengths) > 1: + raise InvalidHeader("Content-Length contained multiple " + "unmatching values (%s)" % length) + length = lengths.pop() + except ValueError: + length = None + else: + if length < 0: + length = None + + # Convert status to int for comparison + # In some cases, httplib returns a status of "_UNKNOWN" + try: + status = int(self.status) + except ValueError: + status = 0 + + # Check for responses that shouldn't include a body + if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD': + length = 0 + + return length + + def _init_decoder(self): + """ + Set-up the _decoder attribute if necessary. + """ + # Note: content-encoding value should be case-insensitive, per RFC 7230 + # Section 3.2 + content_encoding = self.headers.get('content-encoding', '').lower() + if self._decoder is None and content_encoding in self.CONTENT_DECODERS: + self._decoder = _get_decoder(content_encoding) + + def _decode(self, data, decode_content, flush_decoder): + """ + Decode the data passed in and potentially flush the decoder. + """ + try: + if decode_content and self._decoder: + data = self._decoder.decompress(data) + except (IOError, zlib.error) as e: + content_encoding = self.headers.get('content-encoding', '').lower() + raise DecodeError( + "Received response with content-encoding: %s, but " + "failed to decode it." % content_encoding, e) + + if flush_decoder and decode_content: + data += self._flush_decoder() + + return data + + def _flush_decoder(self): + """ + Flushes the decoder. Should only be called if the decoder is actually + being used. + """ + if self._decoder: + buf = self._decoder.decompress(b'') + return buf + self._decoder.flush() + + return b'' + + @contextmanager + def _error_catcher(self): + """ + Catch low-level python exceptions, instead re-raising urllib3 + variants, so that low-level exceptions are not leaked in the + high-level api. + + On exit, release the connection back to the pool. + """ + clean_exit = False + + try: + try: + yield + + except SocketTimeout: + # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but + # there is yet no clean way to get at it from this context. + raise ReadTimeoutError(self._pool, None, 'Read timed out.') + + except BaseSSLError as e: + # FIXME: Is there a better way to differentiate between SSLErrors? + if 'read operation timed out' not in str(e): # Defensive: + # This shouldn't happen but just in case we're missing an edge + # case, let's avoid swallowing SSL errors. + raise + + raise ReadTimeoutError(self._pool, None, 'Read timed out.') + + except (HTTPException, SocketError) as e: + # This includes IncompleteRead. + raise ProtocolError('Connection broken: %r' % e, e) + + # If no exception is thrown, we should avoid cleaning up + # unnecessarily. + clean_exit = True + finally: + # If we didn't terminate cleanly, we need to throw away our + # connection. + if not clean_exit: + # The response may not be closed but we're not going to use it + # anymore so close it now to ensure that the connection is + # released back to the pool. + if self._original_response: + self._original_response.close() + + # Closing the response may not actually be sufficient to close + # everything, so if we have a hold of the connection close that + # too. + if self._connection: + self._connection.close() + + # If we hold the original response but it's closed now, we should + # return the connection back to the pool. + if self._original_response and self._original_response.isclosed(): + self.release_conn() + + def read(self, amt=None, decode_content=None, cache_content=False): + """ + Similar to :meth:`httplib.HTTPResponse.read`, but with two additional + parameters: ``decode_content`` and ``cache_content``. + + :param amt: + How much of the content to read. If specified, caching is skipped + because it doesn't make sense to cache partial content as the full + response. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + + :param cache_content: + If True, will save the returned data such that the same result is + returned despite of the state of the underlying file object. This + is useful if you want the ``.data`` property to continue working + after having ``.read()`` the file object. (Overridden if ``amt`` is + set.) + """ + self._init_decoder() + if decode_content is None: + decode_content = self.decode_content + + if self._fp is None: + return + + flush_decoder = False + data = None + + with self._error_catcher(): + if amt is None: + # cStringIO doesn't like amt=None + data = self._fp.read() + flush_decoder = True + else: + cache_content = False + data = self._fp.read(amt) + if amt != 0 and not data: # Platform-specific: Buggy versions of Python. + # Close the connection when no data is returned + # + # This is redundant to what httplib/http.client _should_ + # already do. However, versions of python released before + # December 15, 2012 (http://bugs.python.org/issue16298) do + # not properly close the connection in all cases. There is + # no harm in redundantly calling close. + self._fp.close() + flush_decoder = True + if self.enforce_content_length and self.length_remaining not in (0, None): + # This is an edge case that httplib failed to cover due + # to concerns of backward compatibility. We're + # addressing it here to make sure IncompleteRead is + # raised during streaming, so all calls with incorrect + # Content-Length are caught. + raise IncompleteRead(self._fp_bytes_read, self.length_remaining) + + if data: + self._fp_bytes_read += len(data) + if self.length_remaining is not None: + self.length_remaining -= len(data) + + data = self._decode(data, decode_content, flush_decoder) + + if cache_content: + self._body = data + + return data + + def stream(self, amt=2**16, decode_content=None): + """ + A generator wrapper for the read() method. A call will block until + ``amt`` bytes have been read from the connection or until the + connection is closed. + + :param amt: + How much of the content to read. The generator will return up to + much data per iteration, but may return less. This is particularly + likely when using compressed data. However, the empty string will + never be returned. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + """ + if self.chunked and self.supports_chunked_reads(): + for line in self.read_chunked(amt, decode_content=decode_content): + yield line + else: + while not is_fp_closed(self._fp): + data = self.read(amt=amt, decode_content=decode_content) + + if data: + yield data + + @classmethod + def from_httplib(ResponseCls, r, **response_kw): + """ + Given an :class:`httplib.HTTPResponse` instance ``r``, return a + corresponding :class:`urllib3.response.HTTPResponse` object. + + Remaining parameters are passed to the HTTPResponse constructor, along + with ``original_response=r``. + """ + headers = r.msg + + if not isinstance(headers, HTTPHeaderDict): + if PY3: # Python 3 + headers = HTTPHeaderDict(headers.items()) + else: # Python 2 + headers = HTTPHeaderDict.from_httplib(headers) + + # HTTPResponse objects in Python 3 don't have a .strict attribute + strict = getattr(r, 'strict', 0) + resp = ResponseCls(body=r, + headers=headers, + status=r.status, + version=r.version, + reason=r.reason, + strict=strict, + original_response=r, + **response_kw) + return resp + + # Backwards-compatibility methods for httplib.HTTPResponse + def getheaders(self): + return self.headers + + def getheader(self, name, default=None): + return self.headers.get(name, default) + + # Overrides from io.IOBase + def close(self): + if not self.closed: + self._fp.close() + + if self._connection: + self._connection.close() + + @property + def closed(self): + if self._fp is None: + return True + elif hasattr(self._fp, 'isclosed'): + return self._fp.isclosed() + elif hasattr(self._fp, 'closed'): + return self._fp.closed + else: + return True + + def fileno(self): + if self._fp is None: + raise IOError("HTTPResponse has no file to get a fileno from") + elif hasattr(self._fp, "fileno"): + return self._fp.fileno() + else: + raise IOError("The file-like object this HTTPResponse is wrapped " + "around has no file descriptor") + + def flush(self): + if self._fp is not None and hasattr(self._fp, 'flush'): + return self._fp.flush() + + def readable(self): + # This method is required for `io` module compatibility. + return True + + def readinto(self, b): + # This method is required for `io` module compatibility. + temp = self.read(len(b)) + if len(temp) == 0: + return 0 + else: + b[:len(temp)] = temp + return len(temp) + + def supports_chunked_reads(self): + """ + Checks if the underlying file-like object looks like a + httplib.HTTPResponse object. We do this by testing for the fp + attribute. If it is present we assume it returns raw chunks as + processed by read_chunked(). + """ + return hasattr(self._fp, 'fp') + + def _update_chunk_length(self): + # First, we'll figure out length of a chunk and then + # we'll try to read it from socket. + if self.chunk_left is not None: + return + line = self._fp.fp.readline() + line = line.split(b';', 1)[0] + try: + self.chunk_left = int(line, 16) + except ValueError: + # Invalid chunked protocol response, abort. + self.close() + raise httplib.IncompleteRead(line) + + def _handle_chunk(self, amt): + returned_chunk = None + if amt is None: + chunk = self._fp._safe_read(self.chunk_left) + returned_chunk = chunk + self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. + self.chunk_left = None + elif amt < self.chunk_left: + value = self._fp._safe_read(amt) + self.chunk_left = self.chunk_left - amt + returned_chunk = value + elif amt == self.chunk_left: + value = self._fp._safe_read(amt) + self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. + self.chunk_left = None + returned_chunk = value + else: # amt > self.chunk_left + returned_chunk = self._fp._safe_read(self.chunk_left) + self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. + self.chunk_left = None + return returned_chunk + + def read_chunked(self, amt=None, decode_content=None): + """ + Similar to :meth:`HTTPResponse.read`, but with an additional + parameter: ``decode_content``. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + """ + self._init_decoder() + # FIXME: Rewrite this method and make it a class with a better structured logic. + if not self.chunked: + raise ResponseNotChunked( + "Response is not chunked. " + "Header 'transfer-encoding: chunked' is missing.") + if not self.supports_chunked_reads(): + raise BodyNotHttplibCompatible( + "Body should be httplib.HTTPResponse like. " + "It should have have an fp attribute which returns raw chunks.") + + # Don't bother reading the body of a HEAD request. + if self._original_response and is_response_to_head(self._original_response): + self._original_response.close() + return + + with self._error_catcher(): + while True: + self._update_chunk_length() + if self.chunk_left == 0: + break + chunk = self._handle_chunk(amt) + decoded = self._decode(chunk, decode_content=decode_content, + flush_decoder=False) + if decoded: + yield decoded + + if decode_content: + # On CPython and PyPy, we should never need to flush the + # decoder. However, on Jython we *might* need to, so + # lets defensively do it anyway. + decoded = self._flush_decoder() + if decoded: # Platform-specific: Jython. + yield decoded + + # Chunk content ends with \r\n: discard it. + while True: + line = self._fp.fp.readline() + if not line: + # Some sites may not end with '\r\n'. + break + if line == b'\r\n': + break + + # We read everything; close the "file". + if self._original_response: + self._original_response.close() diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py new file mode 100644 index 000000000..bba628d98 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: MIT +from __future__ import absolute_import +# For backwards compatibility, provide imports that used to be here. +from .connection import is_connection_dropped +from .request import make_headers +from .response import is_fp_closed +from .ssl_ import ( + SSLContext, + HAS_SNI, + IS_PYOPENSSL, + IS_SECURETRANSPORT, + assert_fingerprint, + resolve_cert_reqs, + resolve_ssl_version, + ssl_wrap_socket, +) +from .timeout import ( + current_time, + Timeout, +) + +from .retry import Retry +from .url import ( + get_host, + parse_url, + split_first, + Url, +) +from .wait import ( + wait_for_read, + wait_for_write +) + +__all__ = ( + 'HAS_SNI', + 'IS_PYOPENSSL', + 'IS_SECURETRANSPORT', + 'SSLContext', + 'Retry', + 'Timeout', + 'Url', + 'assert_fingerprint', + 'current_time', + 'is_connection_dropped', + 'is_fp_closed', + 'get_host', + 'parse_url', + 'make_headers', + 'resolve_cert_reqs', + 'resolve_ssl_version', + 'split_first', + 'ssl_wrap_socket', + 'wait_for_read', + 'wait_for_write' +) diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/connection.py b/collectors/python.d.plugin/python_modules/urllib3/util/connection.py new file mode 100644 index 000000000..3bd69e8fa --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/util/connection.py @@ -0,0 +1,131 @@ +# SPDX-License-Identifier: MIT +from __future__ import absolute_import +import socket +from .wait import wait_for_read +from .selectors import HAS_SELECT, SelectorError + + +def is_connection_dropped(conn): # Platform-specific + """ + Returns True if the connection is dropped and should be closed. + + :param conn: + :class:`httplib.HTTPConnection` object. + + Note: For platforms like AppEngine, this will always return ``False`` to + let the platform handle connection recycling transparently for us. + """ + sock = getattr(conn, 'sock', False) + if sock is False: # Platform-specific: AppEngine + return False + if sock is None: # Connection already closed (such as by httplib). + return True + + if not HAS_SELECT: + return False + + try: + return bool(wait_for_read(sock, timeout=0.0)) + except SelectorError: + return True + + +# This function is copied from socket.py in the Python 2.7 standard +# library test suite. Added to its signature is only `socket_options`. +# One additional modification is that we avoid binding to IPv6 servers +# discovered in DNS if the system doesn't have IPv6 functionality. +def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + source_address=None, socket_options=None): + """Connect to *address* and return the socket object. + + Convenience function. Connect to *address* (a 2-tuple ``(host, + port)``) and return the socket object. Passing the optional + *timeout* parameter will set the timeout on the socket instance + before attempting to connect. If no *timeout* is supplied, the + global default timeout setting returned by :func:`getdefaulttimeout` + is used. If *source_address* is set it must be a tuple of (host, port) + for the socket to bind as a source address before making the connection. + An host of '' or port 0 tells the OS to use the default. + """ + + host, port = address + if host.startswith('['): + host = host.strip('[]') + err = None + + # Using the value from allowed_gai_family() in the context of getaddrinfo lets + # us select whether to work with IPv4 DNS records, IPv6 records, or both. + # The original create_connection function always returns all records. + family = allowed_gai_family() + + for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + sock = None + try: + sock = socket.socket(af, socktype, proto) + + # If provided, set socket level options before connecting. + _set_socket_options(sock, socket_options) + + if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: + sock.settimeout(timeout) + if source_address: + sock.bind(source_address) + sock.connect(sa) + return sock + + except socket.error as e: + err = e + if sock is not None: + sock.close() + sock = None + + if err is not None: + raise err + + raise socket.error("getaddrinfo returns an empty list") + + +def _set_socket_options(sock, options): + if options is None: + return + + for opt in options: + sock.setsockopt(*opt) + + +def allowed_gai_family(): + """This function is designed to work in the context of + getaddrinfo, where family=socket.AF_UNSPEC is the default and + will perform a DNS search for both IPv6 and IPv4 records.""" + + family = socket.AF_INET + if HAS_IPV6: + family = socket.AF_UNSPEC + return family + + +def _has_ipv6(host): + """ Returns True if the system can bind an IPv6 address. """ + sock = None + has_ipv6 = False + + if socket.has_ipv6: + # has_ipv6 returns true if cPython was compiled with IPv6 support. + # It does not tell us if the system has IPv6 support enabled. To + # determine that we must bind to an IPv6 address. + # https://github.com/shazow/urllib3/pull/611 + # https://bugs.python.org/issue658327 + try: + sock = socket.socket(socket.AF_INET6) + sock.bind((host, 0)) + has_ipv6 = True + except Exception: + pass + + if sock: + sock.close() + return has_ipv6 + + +HAS_IPV6 = _has_ipv6('::1') diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/request.py b/collectors/python.d.plugin/python_modules/urllib3/util/request.py new file mode 100644 index 000000000..18f27b032 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/util/request.py @@ -0,0 +1,119 @@ +# SPDX-License-Identifier: MIT +from __future__ import absolute_import +from base64 import b64encode + +from ..packages.six import b, integer_types +from ..exceptions import UnrewindableBodyError + +ACCEPT_ENCODING = 'gzip,deflate' +_FAILEDTELL = object() + + +def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, + basic_auth=None, proxy_basic_auth=None, disable_cache=None): + """ + Shortcuts for generating request headers. + + :param keep_alive: + If ``True``, adds 'connection: keep-alive' header. + + :param accept_encoding: + Can be a boolean, list, or string. + ``True`` translates to 'gzip,deflate'. + List will get joined by comma. + String will be used as provided. + + :param user_agent: + String representing the user-agent you want, such as + "python-urllib3/0.6" + + :param basic_auth: + Colon-separated username:password string for 'authorization: basic ...' + auth header. + + :param proxy_basic_auth: + Colon-separated username:password string for 'proxy-authorization: basic ...' + auth header. + + :param disable_cache: + If ``True``, adds 'cache-control: no-cache' header. + + Example:: + + >>> make_headers(keep_alive=True, user_agent="Batman/1.0") + {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'} + >>> make_headers(accept_encoding=True) + {'accept-encoding': 'gzip,deflate'} + """ + headers = {} + if accept_encoding: + if isinstance(accept_encoding, str): + pass + elif isinstance(accept_encoding, list): + accept_encoding = ','.join(accept_encoding) + else: + accept_encoding = ACCEPT_ENCODING + headers['accept-encoding'] = accept_encoding + + if user_agent: + headers['user-agent'] = user_agent + + if keep_alive: + headers['connection'] = 'keep-alive' + + if basic_auth: + headers['authorization'] = 'Basic ' + \ + b64encode(b(basic_auth)).decode('utf-8') + + if proxy_basic_auth: + headers['proxy-authorization'] = 'Basic ' + \ + b64encode(b(proxy_basic_auth)).decode('utf-8') + + if disable_cache: + headers['cache-control'] = 'no-cache' + + return headers + + +def set_file_position(body, pos): + """ + If a position is provided, move file to that point. + Otherwise, we'll attempt to record a position for future use. + """ + if pos is not None: + rewind_body(body, pos) + elif getattr(body, 'tell', None) is not None: + try: + pos = body.tell() + except (IOError, OSError): + # This differentiates from None, allowing us to catch + # a failed `tell()` later when trying to rewind the body. + pos = _FAILEDTELL + + return pos + + +def rewind_body(body, body_pos): + """ + Attempt to rewind body to a certain position. + Primarily used for request redirects and retries. + + :param body: + File-like object that supports seek. + + :param int pos: + Position to seek to in file. + """ + body_seek = getattr(body, 'seek', None) + if body_seek is not None and isinstance(body_pos, integer_types): + try: + body_seek(body_pos) + except (IOError, OSError): + raise UnrewindableBodyError("An error occurred when rewinding request " + "body for redirect/retry.") + elif body_pos is _FAILEDTELL: + raise UnrewindableBodyError("Unable to record file position for rewinding " + "request body during a redirect/retry.") + else: + raise ValueError("body_pos must be of type integer, " + "instead it was %s." % type(body_pos)) diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/response.py b/collectors/python.d.plugin/python_modules/urllib3/util/response.py new file mode 100644 index 000000000..e4cda93d4 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/util/response.py @@ -0,0 +1,82 @@ +# SPDX-License-Identifier: MIT +from __future__ import absolute_import +from ..packages.six.moves import http_client as httplib + +from ..exceptions import HeaderParsingError + + +def is_fp_closed(obj): + """ + Checks whether a given file-like object is closed. + + :param obj: + The file-like object to check. + """ + + try: + # Check `isclosed()` first, in case Python3 doesn't set `closed`. + # GH Issue #928 + return obj.isclosed() + except AttributeError: + pass + + try: + # Check via the official file-like-object way. + return obj.closed + except AttributeError: + pass + + try: + # Check if the object is a container for another file-like object that + # gets released on exhaustion (e.g. HTTPResponse). + return obj.fp is None + except AttributeError: + pass + + raise ValueError("Unable to determine whether fp is closed.") + + +def assert_header_parsing(headers): + """ + Asserts whether all headers have been successfully parsed. + Extracts encountered errors from the result of parsing headers. + + Only works on Python 3. + + :param headers: Headers to verify. + :type headers: `httplib.HTTPMessage`. + + :raises urllib3.exceptions.HeaderParsingError: + If parsing errors are found. + """ + + # This will fail silently if we pass in the wrong kind of parameter. + # To make debugging easier add an explicit check. + if not isinstance(headers, httplib.HTTPMessage): + raise TypeError('expected httplib.Message, got {0}.'.format( + type(headers))) + + defects = getattr(headers, 'defects', None) + get_payload = getattr(headers, 'get_payload', None) + + unparsed_data = None + if get_payload: # Platform-specific: Python 3. + unparsed_data = get_payload() + + if defects or unparsed_data: + raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data) + + +def is_response_to_head(response): + """ + Checks whether the request of a response has been a HEAD-request. + Handles the quirks of AppEngine. + + :param conn: + :type conn: :class:`httplib.HTTPResponse` + """ + # FIXME: Can we do this somehow without accessing private httplib _method? + method = response._method + if isinstance(method, int): # Platform-specific: Appengine + return method == 3 + return method.upper() == 'HEAD' diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/retry.py b/collectors/python.d.plugin/python_modules/urllib3/util/retry.py new file mode 100644 index 000000000..61e63afec --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/util/retry.py @@ -0,0 +1,402 @@ +# SPDX-License-Identifier: MIT +from __future__ import absolute_import +import time +import logging +from collections import namedtuple +from itertools import takewhile +import email +import re + +from ..exceptions import ( + ConnectTimeoutError, + MaxRetryError, + ProtocolError, + ReadTimeoutError, + ResponseError, + InvalidHeader, +) +from ..packages import six + + +log = logging.getLogger(__name__) + +# Data structure for representing the metadata of requests that result in a retry. +RequestHistory = namedtuple('RequestHistory', ["method", "url", "error", + "status", "redirect_location"]) + + +class Retry(object): + """ Retry configuration. + + Each retry attempt will create a new Retry object with updated values, so + they can be safely reused. + + Retries can be defined as a default for a pool:: + + retries = Retry(connect=5, read=2, redirect=5) + http = PoolManager(retries=retries) + response = http.request('GET', 'http://example.com/') + + Or per-request (which overrides the default for the pool):: + + response = http.request('GET', 'http://example.com/', retries=Retry(10)) + + Retries can be disabled by passing ``False``:: + + response = http.request('GET', 'http://example.com/', retries=False) + + Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless + retries are disabled, in which case the causing exception will be raised. + + :param int total: + Total number of retries to allow. Takes precedence over other counts. + + Set to ``None`` to remove this constraint and fall back on other + counts. It's a good idea to set this to some sensibly-high value to + account for unexpected edge cases and avoid infinite retry loops. + + Set to ``0`` to fail on the first retry. + + Set to ``False`` to disable and imply ``raise_on_redirect=False``. + + :param int connect: + How many connection-related errors to retry on. + + These are errors raised before the request is sent to the remote server, + which we assume has not triggered the server to process the request. + + Set to ``0`` to fail on the first retry of this type. + + :param int read: + How many times to retry on read errors. + + These errors are raised after the request was sent to the server, so the + request may have side-effects. + + Set to ``0`` to fail on the first retry of this type. + + :param int redirect: + How many redirects to perform. Limit this to avoid infinite redirect + loops. + + A redirect is a HTTP response with a status code 301, 302, 303, 307 or + 308. + + Set to ``0`` to fail on the first retry of this type. + + Set to ``False`` to disable and imply ``raise_on_redirect=False``. + + :param int status: + How many times to retry on bad status codes. + + These are retries made on responses, where status code matches + ``status_forcelist``. + + Set to ``0`` to fail on the first retry of this type. + + :param iterable method_whitelist: + Set of uppercased HTTP method verbs that we should retry on. + + By default, we only retry on methods which are considered to be + idempotent (multiple requests with the same parameters end with the + same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`. + + Set to a ``False`` value to retry on any verb. + + :param iterable status_forcelist: + A set of integer HTTP status codes that we should force a retry on. + A retry is initiated if the request method is in ``method_whitelist`` + and the response status code is in ``status_forcelist``. + + By default, this is disabled with ``None``. + + :param float backoff_factor: + A backoff factor to apply between attempts after the second try + (most errors are resolved immediately by a second try without a + delay). urllib3 will sleep for:: + + {backoff factor} * (2 ^ ({number of total retries} - 1)) + + seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep + for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer + than :attr:`Retry.BACKOFF_MAX`. + + By default, backoff is disabled (set to 0). + + :param bool raise_on_redirect: Whether, if the number of redirects is + exhausted, to raise a MaxRetryError, or to return a response with a + response code in the 3xx range. + + :param bool raise_on_status: Similar meaning to ``raise_on_redirect``: + whether we should raise an exception, or return a response, + if status falls in ``status_forcelist`` range and retries have + been exhausted. + + :param tuple history: The history of the request encountered during + each call to :meth:`~Retry.increment`. The list is in the order + the requests occurred. Each list item is of class :class:`RequestHistory`. + + :param bool respect_retry_after_header: + Whether to respect Retry-After header on status codes defined as + :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not. + + """ + + DEFAULT_METHOD_WHITELIST = frozenset([ + 'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE']) + + RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503]) + + #: Maximum backoff time. + BACKOFF_MAX = 120 + + def __init__(self, total=10, connect=None, read=None, redirect=None, status=None, + method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None, + backoff_factor=0, raise_on_redirect=True, raise_on_status=True, + history=None, respect_retry_after_header=True): + + self.total = total + self.connect = connect + self.read = read + self.status = status + + if redirect is False or total is False: + redirect = 0 + raise_on_redirect = False + + self.redirect = redirect + self.status_forcelist = status_forcelist or set() + self.method_whitelist = method_whitelist + self.backoff_factor = backoff_factor + self.raise_on_redirect = raise_on_redirect + self.raise_on_status = raise_on_status + self.history = history or tuple() + self.respect_retry_after_header = respect_retry_after_header + + def new(self, **kw): + params = dict( + total=self.total, + connect=self.connect, read=self.read, redirect=self.redirect, status=self.status, + method_whitelist=self.method_whitelist, + status_forcelist=self.status_forcelist, + backoff_factor=self.backoff_factor, + raise_on_redirect=self.raise_on_redirect, + raise_on_status=self.raise_on_status, + history=self.history, + ) + params.update(kw) + return type(self)(**params) + + @classmethod + def from_int(cls, retries, redirect=True, default=None): + """ Backwards-compatibility for the old retries format.""" + if retries is None: + retries = default if default is not None else cls.DEFAULT + + if isinstance(retries, Retry): + return retries + + redirect = bool(redirect) and None + new_retries = cls(retries, redirect=redirect) + log.debug("Converted retries value: %r -> %r", retries, new_retries) + return new_retries + + def get_backoff_time(self): + """ Formula for computing the current backoff + + :rtype: float + """ + # We want to consider only the last consecutive errors sequence (Ignore redirects). + consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None, + reversed(self.history)))) + if consecutive_errors_len <= 1: + return 0 + + backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1)) + return min(self.BACKOFF_MAX, backoff_value) + + def parse_retry_after(self, retry_after): + # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4 + if re.match(r"^\s*[0-9]+\s*$", retry_after): + seconds = int(retry_after) + else: + retry_date_tuple = email.utils.parsedate(retry_after) + if retry_date_tuple is None: + raise InvalidHeader("Invalid Retry-After header: %s" % retry_after) + retry_date = time.mktime(retry_date_tuple) + seconds = retry_date - time.time() + + if seconds < 0: + seconds = 0 + + return seconds + + def get_retry_after(self, response): + """ Get the value of Retry-After in seconds. """ + + retry_after = response.getheader("Retry-After") + + if retry_after is None: + return None + + return self.parse_retry_after(retry_after) + + def sleep_for_retry(self, response=None): + retry_after = self.get_retry_after(response) + if retry_after: + time.sleep(retry_after) + return True + + return False + + def _sleep_backoff(self): + backoff = self.get_backoff_time() + if backoff <= 0: + return + time.sleep(backoff) + + def sleep(self, response=None): + """ Sleep between retry attempts. + + This method will respect a server's ``Retry-After`` response header + and sleep the duration of the time requested. If that is not present, it + will use an exponential backoff. By default, the backoff factor is 0 and + this method will return immediately. + """ + + if response: + slept = self.sleep_for_retry(response) + if slept: + return + + self._sleep_backoff() + + def _is_connection_error(self, err): + """ Errors when we're fairly sure that the server did not receive the + request, so it should be safe to retry. + """ + return isinstance(err, ConnectTimeoutError) + + def _is_read_error(self, err): + """ Errors that occur after the request has been started, so we should + assume that the server began processing it. + """ + return isinstance(err, (ReadTimeoutError, ProtocolError)) + + def _is_method_retryable(self, method): + """ Checks if a given HTTP method should be retried upon, depending if + it is included on the method whitelist. + """ + if self.method_whitelist and method.upper() not in self.method_whitelist: + return False + + return True + + def is_retry(self, method, status_code, has_retry_after=False): + """ Is this method/status code retryable? (Based on whitelists and control + variables such as the number of total retries to allow, whether to + respect the Retry-After header, whether this header is present, and + whether the returned status code is on the list of status codes to + be retried upon on the presence of the aforementioned header) + """ + if not self._is_method_retryable(method): + return False + + if self.status_forcelist and status_code in self.status_forcelist: + return True + + return (self.total and self.respect_retry_after_header and + has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES)) + + def is_exhausted(self): + """ Are we out of retries? """ + retry_counts = (self.total, self.connect, self.read, self.redirect, self.status) + retry_counts = list(filter(None, retry_counts)) + if not retry_counts: + return False + + return min(retry_counts) < 0 + + def increment(self, method=None, url=None, response=None, error=None, + _pool=None, _stacktrace=None): + """ Return a new Retry object with incremented retry counters. + + :param response: A response object, or None, if the server did not + return a response. + :type response: :class:`~urllib3.response.HTTPResponse` + :param Exception error: An error encountered during the request, or + None if the response was received successfully. + + :return: A new ``Retry`` object. + """ + if self.total is False and error: + # Disabled, indicate to re-raise the error. + raise six.reraise(type(error), error, _stacktrace) + + total = self.total + if total is not None: + total -= 1 + + connect = self.connect + read = self.read + redirect = self.redirect + status_count = self.status + cause = 'unknown' + status = None + redirect_location = None + + if error and self._is_connection_error(error): + # Connect retry? + if connect is False: + raise six.reraise(type(error), error, _stacktrace) + elif connect is not None: + connect -= 1 + + elif error and self._is_read_error(error): + # Read retry? + if read is False or not self._is_method_retryable(method): + raise six.reraise(type(error), error, _stacktrace) + elif read is not None: + read -= 1 + + elif response and response.get_redirect_location(): + # Redirect retry? + if redirect is not None: + redirect -= 1 + cause = 'too many redirects' + redirect_location = response.get_redirect_location() + status = response.status + + else: + # Incrementing because of a server error like a 500 in + # status_forcelist and a the given method is in the whitelist + cause = ResponseError.GENERIC_ERROR + if response and response.status: + if status_count is not None: + status_count -= 1 + cause = ResponseError.SPECIFIC_ERROR.format( + status_code=response.status) + status = response.status + + history = self.history + (RequestHistory(method, url, error, status, redirect_location),) + + new_retry = self.new( + total=total, + connect=connect, read=read, redirect=redirect, status=status_count, + history=history) + + if new_retry.is_exhausted(): + raise MaxRetryError(_pool, url, error or ResponseError(cause)) + + log.debug("Incremented Retry for (url='%s'): %r", url, new_retry) + + return new_retry + + def __repr__(self): + return ('{cls.__name__}(total={self.total}, connect={self.connect}, ' + 'read={self.read}, redirect={self.redirect}, status={self.status})').format( + cls=type(self), self=self) + + +# For backwards compatibility (equivalent to pre-v1.9): +Retry.DEFAULT = Retry(3) diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py b/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py new file mode 100644 index 000000000..c0997b1a2 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py @@ -0,0 +1,582 @@ +# SPDX-License-Identifier: MIT +# Backport of selectors.py from Python 3.5+ to support Python < 3.4 +# Also has the behavior specified in PEP 475 which is to retry syscalls +# in the case of an EINTR error. This module is required because selectors34 +# does not follow this behavior and instead returns that no dile descriptor +# events have occurred rather than retry the syscall. The decision to drop +# support for select.devpoll is made to maintain 100% test coverage. + +import errno +import math +import select +import socket +import sys +import time +from collections import namedtuple, Mapping + +try: + monotonic = time.monotonic +except (AttributeError, ImportError): # Python 3.3< + monotonic = time.time + +EVENT_READ = (1 << 0) +EVENT_WRITE = (1 << 1) + +HAS_SELECT = True # Variable that shows whether the platform has a selector. +_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None. +_DEFAULT_SELECTOR = None + + +class SelectorError(Exception): + def __init__(self, errcode): + super(SelectorError, self).__init__() + self.errno = errcode + + def __repr__(self): + return "<SelectorError errno={0}>".format(self.errno) + + def __str__(self): + return self.__repr__() + + +def _fileobj_to_fd(fileobj): + """ Return a file descriptor from a file object. If + given an integer will simply return that integer back. """ + if isinstance(fileobj, int): + fd = fileobj + else: + try: + fd = int(fileobj.fileno()) + except (AttributeError, TypeError, ValueError): + raise ValueError("Invalid file object: {0!r}".format(fileobj)) + if fd < 0: + raise ValueError("Invalid file descriptor: {0}".format(fd)) + return fd + + +# Determine which function to use to wrap system calls because Python 3.5+ +# already handles the case when system calls are interrupted. +if sys.version_info >= (3, 5): + def _syscall_wrapper(func, _, *args, **kwargs): + """ This is the short-circuit version of the below logic + because in Python 3.5+ all system calls automatically restart + and recalculate their timeouts. """ + try: + return func(*args, **kwargs) + except (OSError, IOError, select.error) as e: + errcode = None + if hasattr(e, "errno"): + errcode = e.errno + raise SelectorError(errcode) +else: + def _syscall_wrapper(func, recalc_timeout, *args, **kwargs): + """ Wrapper function for syscalls that could fail due to EINTR. + All functions should be retried if there is time left in the timeout + in accordance with PEP 475. """ + timeout = kwargs.get("timeout", None) + if timeout is None: + expires = None + recalc_timeout = False + else: + timeout = float(timeout) + if timeout < 0.0: # Timeout less than 0 treated as no timeout. + expires = None + else: + expires = monotonic() + timeout + + args = list(args) + if recalc_timeout and "timeout" not in kwargs: + raise ValueError( + "Timeout must be in args or kwargs to be recalculated") + + result = _SYSCALL_SENTINEL + while result is _SYSCALL_SENTINEL: + try: + result = func(*args, **kwargs) + # OSError is thrown by select.select + # IOError is thrown by select.epoll.poll + # select.error is thrown by select.poll.poll + # Aren't we thankful for Python 3.x rework for exceptions? + except (OSError, IOError, select.error) as e: + # select.error wasn't a subclass of OSError in the past. + errcode = None + if hasattr(e, "errno"): + errcode = e.errno + elif hasattr(e, "args"): + errcode = e.args[0] + + # Also test for the Windows equivalent of EINTR. + is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and + errcode == errno.WSAEINTR)) + + if is_interrupt: + if expires is not None: + current_time = monotonic() + if current_time > expires: + raise OSError(errno=errno.ETIMEDOUT) + if recalc_timeout: + if "timeout" in kwargs: + kwargs["timeout"] = expires - current_time + continue + if errcode: + raise SelectorError(errcode) + else: + raise + return result + + +SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data']) + + +class _SelectorMapping(Mapping): + """ Mapping of file objects to selector keys """ + + def __init__(self, selector): + self._selector = selector + + def __len__(self): + return len(self._selector._fd_to_key) + + def __getitem__(self, fileobj): + try: + fd = self._selector._fileobj_lookup(fileobj) + return self._selector._fd_to_key[fd] + except KeyError: + raise KeyError("{0!r} is not registered.".format(fileobj)) + + def __iter__(self): + return iter(self._selector._fd_to_key) + + +class BaseSelector(object): + """ Abstract Selector class + + A selector supports registering file objects to be monitored + for specific I/O events. + + A file object is a file descriptor or any object with a + `fileno()` method. An arbitrary object can be attached to the + file object which can be used for example to store context info, + a callback, etc. + + A selector can use various implementations (select(), poll(), epoll(), + and kqueue()) depending on the platform. The 'DefaultSelector' class uses + the most efficient implementation for the current platform. + """ + def __init__(self): + # Maps file descriptors to keys. + self._fd_to_key = {} + + # Read-only mapping returned by get_map() + self._map = _SelectorMapping(self) + + def _fileobj_lookup(self, fileobj): + """ Return a file descriptor from a file object. + This wraps _fileobj_to_fd() to do an exhaustive + search in case the object is invalid but we still + have it in our map. Used by unregister() so we can + unregister an object that was previously registered + even if it is closed. It is also used by _SelectorMapping + """ + try: + return _fileobj_to_fd(fileobj) + except ValueError: + + # Search through all our mapped keys. + for key in self._fd_to_key.values(): + if key.fileobj is fileobj: + return key.fd + + # Raise ValueError after all. + raise + + def register(self, fileobj, events, data=None): + """ Register a file object for a set of events to monitor. """ + if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)): + raise ValueError("Invalid events: {0!r}".format(events)) + + key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data) + + if key.fd in self._fd_to_key: + raise KeyError("{0!r} (FD {1}) is already registered" + .format(fileobj, key.fd)) + + self._fd_to_key[key.fd] = key + return key + + def unregister(self, fileobj): + """ Unregister a file object from being monitored. """ + try: + key = self._fd_to_key.pop(self._fileobj_lookup(fileobj)) + except KeyError: + raise KeyError("{0!r} is not registered".format(fileobj)) + + # Getting the fileno of a closed socket on Windows errors with EBADF. + except socket.error as e: # Platform-specific: Windows. + if e.errno != errno.EBADF: + raise + else: + for key in self._fd_to_key.values(): + if key.fileobj is fileobj: + self._fd_to_key.pop(key.fd) + break + else: + raise KeyError("{0!r} is not registered".format(fileobj)) + return key + + def modify(self, fileobj, events, data=None): + """ Change a registered file object monitored events and data. """ + # NOTE: Some subclasses optimize this operation even further. + try: + key = self._fd_to_key[self._fileobj_lookup(fileobj)] + except KeyError: + raise KeyError("{0!r} is not registered".format(fileobj)) + + if events != key.events: + self.unregister(fileobj) + key = self.register(fileobj, events, data) + + elif data != key.data: + # Use a shortcut to update the data. + key = key._replace(data=data) + self._fd_to_key[key.fd] = key + + return key + + def select(self, timeout=None): + """ Perform the actual selection until some monitored file objects + are ready or the timeout expires. """ + raise NotImplementedError() + + def close(self): + """ Close the selector. This must be called to ensure that all + underlying resources are freed. """ + self._fd_to_key.clear() + self._map = None + + def get_key(self, fileobj): + """ Return the key associated with a registered file object. """ + mapping = self.get_map() + if mapping is None: + raise RuntimeError("Selector is closed") + try: + return mapping[fileobj] + except KeyError: + raise KeyError("{0!r} is not registered".format(fileobj)) + + def get_map(self): + """ Return a mapping of file objects to selector keys """ + return self._map + + def _key_from_fd(self, fd): + """ Return the key associated to a given file descriptor + Return None if it is not found. """ + try: + return self._fd_to_key[fd] + except KeyError: + return None + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + +# Almost all platforms have select.select() +if hasattr(select, "select"): + class SelectSelector(BaseSelector): + """ Select-based selector. """ + def __init__(self): + super(SelectSelector, self).__init__() + self._readers = set() + self._writers = set() + + def register(self, fileobj, events, data=None): + key = super(SelectSelector, self).register(fileobj, events, data) + if events & EVENT_READ: + self._readers.add(key.fd) + if events & EVENT_WRITE: + self._writers.add(key.fd) + return key + + def unregister(self, fileobj): + key = super(SelectSelector, self).unregister(fileobj) + self._readers.discard(key.fd) + self._writers.discard(key.fd) + return key + + def _select(self, r, w, timeout=None): + """ Wrapper for select.select because timeout is a positional arg """ + return select.select(r, w, [], timeout) + + def select(self, timeout=None): + # Selecting on empty lists on Windows errors out. + if not len(self._readers) and not len(self._writers): + return [] + + timeout = None if timeout is None else max(timeout, 0.0) + ready = [] + r, w, _ = _syscall_wrapper(self._select, True, self._readers, + self._writers, timeout) + r = set(r) + w = set(w) + for fd in r | w: + events = 0 + if fd in r: + events |= EVENT_READ + if fd in w: + events |= EVENT_WRITE + + key = self._key_from_fd(fd) + if key: + ready.append((key, events & key.events)) + return ready + + +if hasattr(select, "poll"): + class PollSelector(BaseSelector): + """ Poll-based selector """ + def __init__(self): + super(PollSelector, self).__init__() + self._poll = select.poll() + + def register(self, fileobj, events, data=None): + key = super(PollSelector, self).register(fileobj, events, data) + event_mask = 0 + if events & EVENT_READ: + event_mask |= select.POLLIN + if events & EVENT_WRITE: + event_mask |= select.POLLOUT + self._poll.register(key.fd, event_mask) + return key + + def unregister(self, fileobj): + key = super(PollSelector, self).unregister(fileobj) + self._poll.unregister(key.fd) + return key + + def _wrap_poll(self, timeout=None): + """ Wrapper function for select.poll.poll() so that + _syscall_wrapper can work with only seconds. """ + if timeout is not None: + if timeout <= 0: + timeout = 0 + else: + # select.poll.poll() has a resolution of 1 millisecond, + # round away from zero to wait *at least* timeout seconds. + timeout = math.ceil(timeout * 1e3) + + result = self._poll.poll(timeout) + return result + + def select(self, timeout=None): + ready = [] + fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout) + for fd, event_mask in fd_events: + events = 0 + if event_mask & ~select.POLLIN: + events |= EVENT_WRITE + if event_mask & ~select.POLLOUT: + events |= EVENT_READ + + key = self._key_from_fd(fd) + if key: + ready.append((key, events & key.events)) + + return ready + + +if hasattr(select, "epoll"): + class EpollSelector(BaseSelector): + """ Epoll-based selector """ + def __init__(self): + super(EpollSelector, self).__init__() + self._epoll = select.epoll() + + def fileno(self): + return self._epoll.fileno() + + def register(self, fileobj, events, data=None): + key = super(EpollSelector, self).register(fileobj, events, data) + events_mask = 0 + if events & EVENT_READ: + events_mask |= select.EPOLLIN + if events & EVENT_WRITE: + events_mask |= select.EPOLLOUT + _syscall_wrapper(self._epoll.register, False, key.fd, events_mask) + return key + + def unregister(self, fileobj): + key = super(EpollSelector, self).unregister(fileobj) + try: + _syscall_wrapper(self._epoll.unregister, False, key.fd) + except SelectorError: + # This can occur when the fd was closed since registry. + pass + return key + + def select(self, timeout=None): + if timeout is not None: + if timeout <= 0: + timeout = 0.0 + else: + # select.epoll.poll() has a resolution of 1 millisecond + # but luckily takes seconds so we don't need a wrapper + # like PollSelector. Just for better rounding. + timeout = math.ceil(timeout * 1e3) * 1e-3 + timeout = float(timeout) + else: + timeout = -1.0 # epoll.poll() must have a float. + + # We always want at least 1 to ensure that select can be called + # with no file descriptors registered. Otherwise will fail. + max_events = max(len(self._fd_to_key), 1) + + ready = [] + fd_events = _syscall_wrapper(self._epoll.poll, True, + timeout=timeout, + maxevents=max_events) + for fd, event_mask in fd_events: + events = 0 + if event_mask & ~select.EPOLLIN: + events |= EVENT_WRITE + if event_mask & ~select.EPOLLOUT: + events |= EVENT_READ + + key = self._key_from_fd(fd) + if key: + ready.append((key, events & key.events)) + return ready + + def close(self): + self._epoll.close() + super(EpollSelector, self).close() + + +if hasattr(select, "kqueue"): + class KqueueSelector(BaseSelector): + """ Kqueue / Kevent-based selector """ + def __init__(self): + super(KqueueSelector, self).__init__() + self._kqueue = select.kqueue() + + def fileno(self): + return self._kqueue.fileno() + + def register(self, fileobj, events, data=None): + key = super(KqueueSelector, self).register(fileobj, events, data) + if events & EVENT_READ: + kevent = select.kevent(key.fd, + select.KQ_FILTER_READ, + select.KQ_EV_ADD) + + _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) + + if events & EVENT_WRITE: + kevent = select.kevent(key.fd, + select.KQ_FILTER_WRITE, + select.KQ_EV_ADD) + + _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) + + return key + + def unregister(self, fileobj): + key = super(KqueueSelector, self).unregister(fileobj) + if key.events & EVENT_READ: + kevent = select.kevent(key.fd, + select.KQ_FILTER_READ, + select.KQ_EV_DELETE) + try: + _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) + except SelectorError: + pass + if key.events & EVENT_WRITE: + kevent = select.kevent(key.fd, + select.KQ_FILTER_WRITE, + select.KQ_EV_DELETE) + try: + _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) + except SelectorError: + pass + + return key + + def select(self, timeout=None): + if timeout is not None: + timeout = max(timeout, 0) + + max_events = len(self._fd_to_key) * 2 + ready_fds = {} + + kevent_list = _syscall_wrapper(self._kqueue.control, True, + None, max_events, timeout) + + for kevent in kevent_list: + fd = kevent.ident + event_mask = kevent.filter + events = 0 + if event_mask == select.KQ_FILTER_READ: + events |= EVENT_READ + if event_mask == select.KQ_FILTER_WRITE: + events |= EVENT_WRITE + + key = self._key_from_fd(fd) + if key: + if key.fd not in ready_fds: + ready_fds[key.fd] = (key, events & key.events) + else: + old_events = ready_fds[key.fd][1] + ready_fds[key.fd] = (key, (events | old_events) & key.events) + + return list(ready_fds.values()) + + def close(self): + self._kqueue.close() + super(KqueueSelector, self).close() + + +if not hasattr(select, 'select'): # Platform-specific: AppEngine + HAS_SELECT = False + + +def _can_allocate(struct): + """ Checks that select structs can be allocated by the underlying + operating system, not just advertised by the select module. We don't + check select() because we'll be hopeful that most platforms that + don't have it available will not advertise it. (ie: GAE) """ + try: + # select.poll() objects won't fail until used. + if struct == 'poll': + p = select.poll() + p.poll(0) + + # All others will fail on allocation. + else: + getattr(select, struct)().close() + return True + except (OSError, AttributeError) as e: + return False + + +# Choose the best implementation, roughly: +# kqueue == epoll > poll > select. Devpoll not supported. (See above) +# select() also can't accept a FD > FD_SETSIZE (usually around 1024) +def DefaultSelector(): + """ This function serves as a first call for DefaultSelector to + detect if the select module is being monkey-patched incorrectly + by eventlet, greenlet, and preserve proper behavior. """ + global _DEFAULT_SELECTOR + if _DEFAULT_SELECTOR is None: + if _can_allocate('kqueue'): + _DEFAULT_SELECTOR = KqueueSelector + elif _can_allocate('epoll'): + _DEFAULT_SELECTOR = EpollSelector + elif _can_allocate('poll'): + _DEFAULT_SELECTOR = PollSelector + elif hasattr(select, 'select'): + _DEFAULT_SELECTOR = SelectSelector + else: # Platform-specific: AppEngine + raise ValueError('Platform does not have a selector') + return _DEFAULT_SELECTOR() diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py b/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py new file mode 100644 index 000000000..ece3ec39e --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py @@ -0,0 +1,338 @@ +# SPDX-License-Identifier: MIT +from __future__ import absolute_import +import errno +import warnings +import hmac + +from binascii import hexlify, unhexlify +from hashlib import md5, sha1, sha256 + +from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning + + +SSLContext = None +HAS_SNI = False +IS_PYOPENSSL = False +IS_SECURETRANSPORT = False + +# Maps the length of a digest to a possible hash function producing this digest +HASHFUNC_MAP = { + 32: md5, + 40: sha1, + 64: sha256, +} + + +def _const_compare_digest_backport(a, b): + """ + Compare two digests of equal length in constant time. + + The digests must be of type str/bytes. + Returns True if the digests match, and False otherwise. + """ + result = abs(len(a) - len(b)) + for l, r in zip(bytearray(a), bytearray(b)): + result |= l ^ r + return result == 0 + + +_const_compare_digest = getattr(hmac, 'compare_digest', + _const_compare_digest_backport) + + +try: # Test for SSL features + import ssl + from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23 + from ssl import HAS_SNI # Has SNI? +except ImportError: + pass + + +try: + from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION +except ImportError: + OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000 + OP_NO_COMPRESSION = 0x20000 + +# A secure default. +# Sources for more information on TLS ciphers: +# +# - https://wiki.mozilla.org/Security/Server_Side_TLS +# - https://www.ssllabs.com/projects/best-practices/index.html +# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ +# +# The general intent is: +# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE), +# - prefer ECDHE over DHE for better performance, +# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and +# security, +# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common, +# - disable NULL authentication, MD5 MACs and DSS for security reasons. +DEFAULT_CIPHERS = ':'.join([ + 'ECDH+AESGCM', + 'ECDH+CHACHA20', + 'DH+AESGCM', + 'DH+CHACHA20', + 'ECDH+AES256', + 'DH+AES256', + 'ECDH+AES128', + 'DH+AES', + 'RSA+AESGCM', + 'RSA+AES', + '!aNULL', + '!eNULL', + '!MD5', +]) + +try: + from ssl import SSLContext # Modern SSL? +except ImportError: + import sys + + class SSLContext(object): # Platform-specific: Python 2 & 3.1 + supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or + (3, 2) <= sys.version_info) + + def __init__(self, protocol_version): + self.protocol = protocol_version + # Use default values from a real SSLContext + self.check_hostname = False + self.verify_mode = ssl.CERT_NONE + self.ca_certs = None + self.options = 0 + self.certfile = None + self.keyfile = None + self.ciphers = None + + def load_cert_chain(self, certfile, keyfile): + self.certfile = certfile + self.keyfile = keyfile + + def load_verify_locations(self, cafile=None, capath=None): + self.ca_certs = cafile + + if capath is not None: + raise SSLError("CA directories not supported in older Pythons") + + def set_ciphers(self, cipher_suite): + if not self.supports_set_ciphers: + raise TypeError( + 'Your version of Python does not support setting ' + 'a custom cipher suite. Please upgrade to Python ' + '2.7, 3.2, or later if you need this functionality.' + ) + self.ciphers = cipher_suite + + def wrap_socket(self, socket, server_hostname=None, server_side=False): + warnings.warn( + 'A true SSLContext object is not available. This prevents ' + 'urllib3 from configuring SSL appropriately and may cause ' + 'certain SSL connections to fail. You can upgrade to a newer ' + 'version of Python to solve this. For more information, see ' + 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' + '#ssl-warnings', + InsecurePlatformWarning + ) + kwargs = { + 'keyfile': self.keyfile, + 'certfile': self.certfile, + 'ca_certs': self.ca_certs, + 'cert_reqs': self.verify_mode, + 'ssl_version': self.protocol, + 'server_side': server_side, + } + if self.supports_set_ciphers: # Platform-specific: Python 2.7+ + return wrap_socket(socket, ciphers=self.ciphers, **kwargs) + else: # Platform-specific: Python 2.6 + return wrap_socket(socket, **kwargs) + + +def assert_fingerprint(cert, fingerprint): + """ + Checks if given fingerprint matches the supplied certificate. + + :param cert: + Certificate as bytes object. + :param fingerprint: + Fingerprint as string of hexdigits, can be interspersed by colons. + """ + + fingerprint = fingerprint.replace(':', '').lower() + digest_length = len(fingerprint) + hashfunc = HASHFUNC_MAP.get(digest_length) + if not hashfunc: + raise SSLError( + 'Fingerprint of invalid length: {0}'.format(fingerprint)) + + # We need encode() here for py32; works on py2 and p33. + fingerprint_bytes = unhexlify(fingerprint.encode()) + + cert_digest = hashfunc(cert).digest() + + if not _const_compare_digest(cert_digest, fingerprint_bytes): + raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".' + .format(fingerprint, hexlify(cert_digest))) + + +def resolve_cert_reqs(candidate): + """ + Resolves the argument to a numeric constant, which can be passed to + the wrap_socket function/method from the ssl module. + Defaults to :data:`ssl.CERT_NONE`. + If given a string it is assumed to be the name of the constant in the + :mod:`ssl` module or its abbrevation. + (So you can specify `REQUIRED` instead of `CERT_REQUIRED`. + If it's neither `None` nor a string we assume it is already the numeric + constant which can directly be passed to wrap_socket. + """ + if candidate is None: + return CERT_NONE + + if isinstance(candidate, str): + res = getattr(ssl, candidate, None) + if res is None: + res = getattr(ssl, 'CERT_' + candidate) + return res + + return candidate + + +def resolve_ssl_version(candidate): + """ + like resolve_cert_reqs + """ + if candidate is None: + return PROTOCOL_SSLv23 + + if isinstance(candidate, str): + res = getattr(ssl, candidate, None) + if res is None: + res = getattr(ssl, 'PROTOCOL_' + candidate) + return res + + return candidate + + +def create_urllib3_context(ssl_version=None, cert_reqs=None, + options=None, ciphers=None): + """All arguments have the same meaning as ``ssl_wrap_socket``. + + By default, this function does a lot of the same work that + ``ssl.create_default_context`` does on Python 3.4+. It: + + - Disables SSLv2, SSLv3, and compression + - Sets a restricted set of server ciphers + + If you wish to enable SSLv3, you can do:: + + from urllib3.util import ssl_ + context = ssl_.create_urllib3_context() + context.options &= ~ssl_.OP_NO_SSLv3 + + You can do the same to enable compression (substituting ``COMPRESSION`` + for ``SSLv3`` in the last line above). + + :param ssl_version: + The desired protocol version to use. This will default to + PROTOCOL_SSLv23 which will negotiate the highest protocol that both + the server and your installation of OpenSSL support. + :param cert_reqs: + Whether to require the certificate verification. This defaults to + ``ssl.CERT_REQUIRED``. + :param options: + Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``, + ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``. + :param ciphers: + Which cipher suites to allow the server to select. + :returns: + Constructed SSLContext object with specified options + :rtype: SSLContext + """ + context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23) + + # Setting the default here, as we may have no ssl module on import + cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs + + if options is None: + options = 0 + # SSLv2 is easily broken and is considered harmful and dangerous + options |= OP_NO_SSLv2 + # SSLv3 has several problems and is now dangerous + options |= OP_NO_SSLv3 + # Disable compression to prevent CRIME attacks for OpenSSL 1.0+ + # (issue #309) + options |= OP_NO_COMPRESSION + + context.options |= options + + if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6 + context.set_ciphers(ciphers or DEFAULT_CIPHERS) + + context.verify_mode = cert_reqs + if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2 + # We do our own verification, including fingerprints and alternative + # hostnames. So disable it here + context.check_hostname = False + return context + + +def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, + ca_certs=None, server_hostname=None, + ssl_version=None, ciphers=None, ssl_context=None, + ca_cert_dir=None): + """ + All arguments except for server_hostname, ssl_context, and ca_cert_dir have + the same meaning as they do when using :func:`ssl.wrap_socket`. + + :param server_hostname: + When SNI is supported, the expected hostname of the certificate + :param ssl_context: + A pre-made :class:`SSLContext` object. If none is provided, one will + be created using :func:`create_urllib3_context`. + :param ciphers: + A string of ciphers we wish the client to support. This is not + supported on Python 2.6 as the ssl module does not support it. + :param ca_cert_dir: + A directory containing CA certificates in multiple separate files, as + supported by OpenSSL's -CApath flag or the capath argument to + SSLContext.load_verify_locations(). + """ + context = ssl_context + if context is None: + # Note: This branch of code and all the variables in it are no longer + # used by urllib3 itself. We should consider deprecating and removing + # this code. + context = create_urllib3_context(ssl_version, cert_reqs, + ciphers=ciphers) + + if ca_certs or ca_cert_dir: + try: + context.load_verify_locations(ca_certs, ca_cert_dir) + except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2 + raise SSLError(e) + # Py33 raises FileNotFoundError which subclasses OSError + # These are not equivalent unless we check the errno attribute + except OSError as e: # Platform-specific: Python 3.3 and beyond + if e.errno == errno.ENOENT: + raise SSLError(e) + raise + elif getattr(context, 'load_default_certs', None) is not None: + # try to load OS default certs; works well on Windows (require Python3.4+) + context.load_default_certs() + + if certfile: + context.load_cert_chain(certfile, keyfile) + if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI + return context.wrap_socket(sock, server_hostname=server_hostname) + + warnings.warn( + 'An HTTPS request has been made, but the SNI (Subject Name ' + 'Indication) extension to TLS is not available on this platform. ' + 'This may cause the server to present an incorrect TLS ' + 'certificate, which can cause validation failures. You can upgrade to ' + 'a newer version of Python to solve this. For more information, see ' + 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' + '#ssl-warnings', + SNIMissingWarning + ) + return context.wrap_socket(sock) diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py b/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py new file mode 100644 index 000000000..4041cf9b9 --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py @@ -0,0 +1,243 @@ +# SPDX-License-Identifier: MIT +from __future__ import absolute_import +# The default socket timeout, used by httplib to indicate that no timeout was +# specified by the user +from socket import _GLOBAL_DEFAULT_TIMEOUT +import time + +from ..exceptions import TimeoutStateError + +# A sentinel value to indicate that no timeout was specified by the user in +# urllib3 +_Default = object() + + +# Use time.monotonic if available. +current_time = getattr(time, "monotonic", time.time) + + +class Timeout(object): + """ Timeout configuration. + + Timeouts can be defined as a default for a pool:: + + timeout = Timeout(connect=2.0, read=7.0) + http = PoolManager(timeout=timeout) + response = http.request('GET', 'http://example.com/') + + Or per-request (which overrides the default for the pool):: + + response = http.request('GET', 'http://example.com/', timeout=Timeout(10)) + + Timeouts can be disabled by setting all the parameters to ``None``:: + + no_timeout = Timeout(connect=None, read=None) + response = http.request('GET', 'http://example.com/, timeout=no_timeout) + + + :param total: + This combines the connect and read timeouts into one; the read timeout + will be set to the time leftover from the connect attempt. In the + event that both a connect timeout and a total are specified, or a read + timeout and a total are specified, the shorter timeout will be applied. + + Defaults to None. + + :type total: integer, float, or None + + :param connect: + The maximum amount of time to wait for a connection attempt to a server + to succeed. Omitting the parameter will default the connect timeout to + the system default, probably `the global default timeout in socket.py + <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_. + None will set an infinite timeout for connection attempts. + + :type connect: integer, float, or None + + :param read: + The maximum amount of time to wait between consecutive + read operations for a response from the server. Omitting + the parameter will default the read timeout to the system + default, probably `the global default timeout in socket.py + <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_. + None will set an infinite timeout. + + :type read: integer, float, or None + + .. note:: + + Many factors can affect the total amount of time for urllib3 to return + an HTTP response. + + For example, Python's DNS resolver does not obey the timeout specified + on the socket. Other factors that can affect total request time include + high CPU load, high swap, the program running at a low priority level, + or other behaviors. + + In addition, the read and total timeouts only measure the time between + read operations on the socket connecting the client and the server, + not the total amount of time for the request to return a complete + response. For most requests, the timeout is raised because the server + has not sent the first byte in the specified time. This is not always + the case; if a server streams one byte every fifteen seconds, a timeout + of 20 seconds will not trigger, even though the request will take + several minutes to complete. + + If your goal is to cut off any request after a set amount of wall clock + time, consider having a second "watcher" thread to cut off a slow + request. + """ + + #: A sentinel object representing the default timeout value + DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT + + def __init__(self, total=None, connect=_Default, read=_Default): + self._connect = self._validate_timeout(connect, 'connect') + self._read = self._validate_timeout(read, 'read') + self.total = self._validate_timeout(total, 'total') + self._start_connect = None + + def __str__(self): + return '%s(connect=%r, read=%r, total=%r)' % ( + type(self).__name__, self._connect, self._read, self.total) + + @classmethod + def _validate_timeout(cls, value, name): + """ Check that a timeout attribute is valid. + + :param value: The timeout value to validate + :param name: The name of the timeout attribute to validate. This is + used to specify in error messages. + :return: The validated and casted version of the given value. + :raises ValueError: If it is a numeric value less than or equal to + zero, or the type is not an integer, float, or None. + """ + if value is _Default: + return cls.DEFAULT_TIMEOUT + + if value is None or value is cls.DEFAULT_TIMEOUT: + return value + + if isinstance(value, bool): + raise ValueError("Timeout cannot be a boolean value. It must " + "be an int, float or None.") + try: + float(value) + except (TypeError, ValueError): + raise ValueError("Timeout value %s was %s, but it must be an " + "int, float or None." % (name, value)) + + try: + if value <= 0: + raise ValueError("Attempted to set %s timeout to %s, but the " + "timeout cannot be set to a value less " + "than or equal to 0." % (name, value)) + except TypeError: # Python 3 + raise ValueError("Timeout value %s was %s, but it must be an " + "int, float or None." % (name, value)) + + return value + + @classmethod + def from_float(cls, timeout): + """ Create a new Timeout from a legacy timeout value. + + The timeout value used by httplib.py sets the same timeout on the + connect(), and recv() socket requests. This creates a :class:`Timeout` + object that sets the individual timeouts to the ``timeout`` value + passed to this function. + + :param timeout: The legacy timeout value. + :type timeout: integer, float, sentinel default object, or None + :return: Timeout object + :rtype: :class:`Timeout` + """ + return Timeout(read=timeout, connect=timeout) + + def clone(self): + """ Create a copy of the timeout object + + Timeout properties are stored per-pool but each request needs a fresh + Timeout object to ensure each one has its own start/stop configured. + + :return: a copy of the timeout object + :rtype: :class:`Timeout` + """ + # We can't use copy.deepcopy because that will also create a new object + # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to + # detect the user default. + return Timeout(connect=self._connect, read=self._read, + total=self.total) + + def start_connect(self): + """ Start the timeout clock, used during a connect() attempt + + :raises urllib3.exceptions.TimeoutStateError: if you attempt + to start a timer that has been started already. + """ + if self._start_connect is not None: + raise TimeoutStateError("Timeout timer has already been started.") + self._start_connect = current_time() + return self._start_connect + + def get_connect_duration(self): + """ Gets the time elapsed since the call to :meth:`start_connect`. + + :return: Elapsed time. + :rtype: float + :raises urllib3.exceptions.TimeoutStateError: if you attempt + to get duration for a timer that hasn't been started. + """ + if self._start_connect is None: + raise TimeoutStateError("Can't get connect duration for timer " + "that has not started.") + return current_time() - self._start_connect + + @property + def connect_timeout(self): + """ Get the value to use when setting a connection timeout. + + This will be a positive float or integer, the value None + (never timeout), or the default system timeout. + + :return: Connect timeout. + :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None + """ + if self.total is None: + return self._connect + + if self._connect is None or self._connect is self.DEFAULT_TIMEOUT: + return self.total + + return min(self._connect, self.total) + + @property + def read_timeout(self): + """ Get the value for the read timeout. + + This assumes some time has elapsed in the connection timeout and + computes the read timeout appropriately. + + If self.total is set, the read timeout is dependent on the amount of + time taken by the connect timeout. If the connection time has not been + established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be + raised. + + :return: Value to use for the read timeout. + :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None + :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect` + has not yet been called on this object. + """ + if (self.total is not None and + self.total is not self.DEFAULT_TIMEOUT and + self._read is not None and + self._read is not self.DEFAULT_TIMEOUT): + # In case the connect timeout has not yet been established. + if self._start_connect is None: + return self._read + return max(0, min(self.total - self.get_connect_duration(), + self._read)) + elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT: + return max(0, self.total - self.get_connect_duration()) + else: + return self._read diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/url.py b/collectors/python.d.plugin/python_modules/urllib3/util/url.py new file mode 100644 index 000000000..99fd6534a --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/util/url.py @@ -0,0 +1,231 @@ +# SPDX-License-Identifier: MIT +from __future__ import absolute_import +from collections import namedtuple + +from ..exceptions import LocationParseError + + +url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'] + +# We only want to normalize urls with an HTTP(S) scheme. +# urllib3 infers URLs without a scheme (None) to be http. +NORMALIZABLE_SCHEMES = ('http', 'https', None) + + +class Url(namedtuple('Url', url_attrs)): + """ + Datastructure for representing an HTTP URL. Used as a return value for + :func:`parse_url`. Both the scheme and host are normalized as they are + both case-insensitive according to RFC 3986. + """ + __slots__ = () + + def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, + query=None, fragment=None): + if path and not path.startswith('/'): + path = '/' + path + if scheme: + scheme = scheme.lower() + if host and scheme in NORMALIZABLE_SCHEMES: + host = host.lower() + return super(Url, cls).__new__(cls, scheme, auth, host, port, path, + query, fragment) + + @property + def hostname(self): + """For backwards-compatibility with urlparse. We're nice like that.""" + return self.host + + @property + def request_uri(self): + """Absolute path including the query string.""" + uri = self.path or '/' + + if self.query is not None: + uri += '?' + self.query + + return uri + + @property + def netloc(self): + """Network location including host and port""" + if self.port: + return '%s:%d' % (self.host, self.port) + return self.host + + @property + def url(self): + """ + Convert self into a url + + This function should more or less round-trip with :func:`.parse_url`. The + returned url may not be exactly the same as the url inputted to + :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls + with a blank port will have : removed). + + Example: :: + + >>> U = parse_url('http://google.com/mail/') + >>> U.url + 'http://google.com/mail/' + >>> Url('http', 'username:password', 'host.com', 80, + ... '/path', 'query', 'fragment').url + 'http://username:password@host.com:80/path?query#fragment' + """ + scheme, auth, host, port, path, query, fragment = self + url = '' + + # We use "is not None" we want things to happen with empty strings (or 0 port) + if scheme is not None: + url += scheme + '://' + if auth is not None: + url += auth + '@' + if host is not None: + url += host + if port is not None: + url += ':' + str(port) + if path is not None: + url += path + if query is not None: + url += '?' + query + if fragment is not None: + url += '#' + fragment + + return url + + def __str__(self): + return self.url + + +def split_first(s, delims): + """ + Given a string and an iterable of delimiters, split on the first found + delimiter. Return two split parts and the matched delimiter. + + If not found, then the first part is the full input string. + + Example:: + + >>> split_first('foo/bar?baz', '?/=') + ('foo', 'bar?baz', '/') + >>> split_first('foo/bar?baz', '123') + ('foo/bar?baz', '', None) + + Scales linearly with number of delims. Not ideal for large number of delims. + """ + min_idx = None + min_delim = None + for d in delims: + idx = s.find(d) + if idx < 0: + continue + + if min_idx is None or idx < min_idx: + min_idx = idx + min_delim = d + + if min_idx is None or min_idx < 0: + return s, '', None + + return s[:min_idx], s[min_idx + 1:], min_delim + + +def parse_url(url): + """ + Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is + performed to parse incomplete urls. Fields not provided will be None. + + Partly backwards-compatible with :mod:`urlparse`. + + Example:: + + >>> parse_url('http://google.com/mail/') + Url(scheme='http', host='google.com', port=None, path='/mail/', ...) + >>> parse_url('google.com:80') + Url(scheme=None, host='google.com', port=80, path=None, ...) + >>> parse_url('/foo?bar') + Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...) + """ + + # While this code has overlap with stdlib's urlparse, it is much + # simplified for our needs and less annoying. + # Additionally, this implementations does silly things to be optimal + # on CPython. + + if not url: + # Empty + return Url() + + scheme = None + auth = None + host = None + port = None + path = None + fragment = None + query = None + + # Scheme + if '://' in url: + scheme, url = url.split('://', 1) + + # Find the earliest Authority Terminator + # (http://tools.ietf.org/html/rfc3986#section-3.2) + url, path_, delim = split_first(url, ['/', '?', '#']) + + if delim: + # Reassemble the path + path = delim + path_ + + # Auth + if '@' in url: + # Last '@' denotes end of auth part + auth, url = url.rsplit('@', 1) + + # IPv6 + if url and url[0] == '[': + host, url = url.split(']', 1) + host += ']' + + # Port + if ':' in url: + _host, port = url.split(':', 1) + + if not host: + host = _host + + if port: + # If given, ports must be integers. No whitespace, no plus or + # minus prefixes, no non-integer digits such as ^2 (superscript). + if not port.isdigit(): + raise LocationParseError(url) + try: + port = int(port) + except ValueError: + raise LocationParseError(url) + else: + # Blank ports are cool, too. (rfc3986#section-3.2.3) + port = None + + elif not host and url: + host = url + + if not path: + return Url(scheme, auth, host, port, path, query, fragment) + + # Fragment + if '#' in path: + path, fragment = path.split('#', 1) + + # Query + if '?' in path: + path, query = path.split('?', 1) + + return Url(scheme, auth, host, port, path, query, fragment) + + +def get_host(url): + """ + Deprecated. Use :func:`parse_url` instead. + """ + p = parse_url(url) + return p.scheme or 'http', p.hostname, p.port diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/wait.py b/collectors/python.d.plugin/python_modules/urllib3/util/wait.py new file mode 100644 index 000000000..21e72979c --- /dev/null +++ b/collectors/python.d.plugin/python_modules/urllib3/util/wait.py @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: MIT +from .selectors import ( + HAS_SELECT, + DefaultSelector, + EVENT_READ, + EVENT_WRITE +) + + +def _wait_for_io_events(socks, events, timeout=None): + """ Waits for IO events to be available from a list of sockets + or optionally a single socket if passed in. Returns a list of + sockets that can be interacted with immediately. """ + if not HAS_SELECT: + raise ValueError('Platform does not have a selector') + if not isinstance(socks, list): + # Probably just a single socket. + if hasattr(socks, "fileno"): + socks = [socks] + # Otherwise it might be a non-list iterable. + else: + socks = list(socks) + with DefaultSelector() as selector: + for sock in socks: + selector.register(sock, events) + return [key[0].fileobj for key in + selector.select(timeout) if key[1] & events] + + +def wait_for_read(socks, timeout=None): + """ Waits for reading to be available from a list of sockets + or optionally a single socket if passed in. Returns a list of + sockets that can be read from immediately. """ + return _wait_for_io_events(socks, EVENT_READ, timeout) + + +def wait_for_write(socks, timeout=None): + """ Waits for writing to be available from a list of sockets + or optionally a single socket if passed in. Returns a list of + sockets that can be written to immediately. """ + return _wait_for_io_events(socks, EVENT_WRITE, timeout) diff --git a/collectors/python.d.plugin/rabbitmq/Makefile.inc b/collectors/python.d.plugin/rabbitmq/Makefile.inc new file mode 100644 index 000000000..7e67ef512 --- /dev/null +++ b/collectors/python.d.plugin/rabbitmq/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += rabbitmq/rabbitmq.chart.py +dist_pythonconfig_DATA += rabbitmq/rabbitmq.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += rabbitmq/README.md rabbitmq/Makefile.inc + diff --git a/collectors/python.d.plugin/rabbitmq/README.md b/collectors/python.d.plugin/rabbitmq/README.md new file mode 100644 index 000000000..22d367c4d --- /dev/null +++ b/collectors/python.d.plugin/rabbitmq/README.md @@ -0,0 +1,56 @@ +# rabbitmq + +Module monitor rabbitmq performance and health metrics. + +Following charts are drawn: + +1. **Queued Messages** + * ready + * unacknowledged + +2. **Message Rates** + * ack + * redelivered + * deliver + * publish + +3. **Global Counts** + * channels + * consumers + * connections + * queues + * exchanges + +4. **File Descriptors** + * used descriptors + +5. **Socket Descriptors** + * used descriptors + +6. **Erlang processes** + * used processes + +7. **Erlang run queue** + * Erlang run queue + +8. **Memory** + * free memory in megabytes + +9. **Disk Space** + * free disk space in gigabytes + +### configuration + +```yaml +socket: + name : 'local' + host : '127.0.0.1' + port : 15672 + user : 'guest' + pass : 'guest' + +``` + +When no configuration file is found, module tries to connect to: `localhost:15672`. + +--- diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py new file mode 100644 index 000000000..8298b4032 --- /dev/null +++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py @@ -0,0 +1,207 @@ +# -*- coding: utf-8 -*- +# Description: rabbitmq netdata python.d module +# Author: l2isbad +# SPDX-License-Identifier: GPL-3.0-or-later + +from collections import namedtuple +from json import loads +from socket import gethostbyname, gaierror +from threading import Thread +try: + from queue import Queue +except ImportError: + from Queue import Queue + +from bases.FrameworkServices.UrlService import UrlService + +# default module values (can be overridden per job in `config`) +update_every = 1 +priority = 60000 +retries = 60 + +METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats']) + +NODE_STATS = [ + 'fd_used', + 'mem_used', + 'sockets_used', + 'proc_used', + 'disk_free', + 'run_queue' +] + +OVERVIEW_STATS = [ + 'object_totals.channels', + 'object_totals.consumers', + 'object_totals.connections', + 'object_totals.queues', + 'object_totals.exchanges', + 'queue_totals.messages_ready', + 'queue_totals.messages_unacknowledged', + 'message_stats.ack', + 'message_stats.redeliver', + 'message_stats.deliver', + 'message_stats.publish' +] + +ORDER = [ + 'queued_messages', + 'message_rates', + 'global_counts', + 'file_descriptors', + 'socket_descriptors', + 'erlang_processes', + 'erlang_run_queue', + 'memory', + 'disk_space' +] + +CHARTS = { + 'file_descriptors': { + 'options': [None, 'File Descriptors', 'descriptors', 'overview', 'rabbitmq.file_descriptors', 'line'], + 'lines': [ + ['fd_used', 'used', 'absolute'] + ] + }, + 'memory': { + 'options': [None, 'Memory', 'MB', 'overview', 'rabbitmq.memory', 'line'], + 'lines': [ + ['mem_used', 'used', 'absolute', 1, 1024 << 10] + ] + }, + 'disk_space': { + 'options': [None, 'Disk Space', 'GB', 'overview', 'rabbitmq.disk_space', 'line'], + 'lines': [ + ['disk_free', 'free', 'absolute', 1, 1024 ** 3] + ] + }, + 'socket_descriptors': { + 'options': [None, 'Socket Descriptors', 'descriptors', 'overview', 'rabbitmq.sockets', 'line'], + 'lines': [ + ['sockets_used', 'used', 'absolute'] + ] + }, + 'erlang_processes': { + 'options': [None, 'Erlang Processes', 'processes', 'overview', 'rabbitmq.processes', 'line'], + 'lines': [ + ['proc_used', 'used', 'absolute'] + ] + }, + 'erlang_run_queue': { + 'options': [None, 'Erlang Run Queue', 'processes', 'overview', 'rabbitmq.erlang_run_queue', 'line'], + 'lines': [ + ['run_queue', 'length', 'absolute'] + ] + }, + 'global_counts': { + 'options': [None, 'Global Counts', 'counts', 'overview', 'rabbitmq.global_counts', 'line'], + 'lines': [ + ['object_totals_channels', 'channels', 'absolute'], + ['object_totals_consumers', 'consumers', 'absolute'], + ['object_totals_connections', 'connections', 'absolute'], + ['object_totals_queues', 'queues', 'absolute'], + ['object_totals_exchanges', 'exchanges', 'absolute'] + ] + }, + 'queued_messages': { + 'options': [None, 'Queued Messages', 'messages', 'overview', 'rabbitmq.queued_messages', 'stacked'], + 'lines': [ + ['queue_totals_messages_ready', 'ready', 'absolute'], + ['queue_totals_messages_unacknowledged', 'unacknowledged', 'absolute'] + ] + }, + 'message_rates': { + 'options': [None, 'Message Rates', 'messages/s', 'overview', 'rabbitmq.message_rates', 'stacked'], + 'lines': [ + ['message_stats_ack', 'ack', 'incremental'], + ['message_stats_redeliver', 'redeliver', 'incremental'], + ['message_stats_deliver', 'deliver', 'incremental'], + ['message_stats_publish', 'publish', 'incremental'] + ] + } +} + + +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + self.host = self.configuration.get('host', '127.0.0.1') + self.port = self.configuration.get('port', 15672) + self.scheme = self.configuration.get('scheme', 'http') + + def check(self): + # We can't start if <host> AND <port> not specified + if not (self.host and self.port): + self.error('Host is not defined in the module configuration file') + return False + + # Hostname -> ip address + try: + self.host = gethostbyname(self.host) + except gaierror as error: + self.error(str(error)) + return False + + # Add handlers (auth, self signed cert accept) + self.url = '{scheme}://{host}:{port}/api'.format(scheme=self.scheme, + host=self.host, + port=self.port) + # Add methods + api_node = self.url + '/nodes' + api_overview = self.url + '/overview' + self.methods = [METHODS(get_data=self._get_overview_stats, + url=api_node, + stats=NODE_STATS), + METHODS(get_data=self._get_overview_stats, + url=api_overview, + stats=OVERVIEW_STATS)] + return UrlService.check(self) + + def _get_data(self): + threads = list() + queue = Queue() + result = dict() + + for method in self.methods: + th = Thread(target=method.get_data, + args=(queue, method.url, method.stats)) + th.start() + threads.append(th) + + for thread in threads: + thread.join() + result.update(queue.get()) + + return result or None + + def _get_overview_stats(self, queue, url, stats): + """ + Format data received from http request + :return: dict + """ + + raw_data = self._get_raw_data(url) + + if not raw_data: + return queue.put(dict()) + data = loads(raw_data) + data = data[0] if isinstance(data, list) else data + + to_netdata = fetch_data(raw_data=data, metrics=stats) + return queue.put(to_netdata) + + +def fetch_data(raw_data, metrics): + data = dict() + for metric in metrics: + value = raw_data + metrics_list = metric.split('.') + try: + for m in metrics_list: + value = value[m] + except KeyError: + continue + data['_'.join(metrics_list)] = value + return data diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.conf b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf new file mode 100644 index 000000000..3f90da8a2 --- /dev/null +++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf @@ -0,0 +1,82 @@ +# netdata python.d.plugin configuration for rabbitmq +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, rabbitmq plugin also supports the following: +# +# host: 'ipaddress' # Server ip address or hostname. Default: 127.0.0.1 +# port: 'port' # Rabbitmq port. Default: 15672 +# scheme: 'scheme' # http or https. Default: http +# +# if the URL is password protected, the following are supported: +# +# user: 'username' +# pass: 'password' +# +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) +# +local: + host: '127.0.0.1' + user: 'guest' + pass: 'guest' diff --git a/collectors/python.d.plugin/redis/Makefile.inc b/collectors/python.d.plugin/redis/Makefile.inc new file mode 100644 index 000000000..6aab08977 --- /dev/null +++ b/collectors/python.d.plugin/redis/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += redis/redis.chart.py +dist_pythonconfig_DATA += redis/redis.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += redis/README.md redis/Makefile.inc + diff --git a/collectors/python.d.plugin/redis/README.md b/collectors/python.d.plugin/redis/README.md new file mode 100644 index 000000000..8d21df0ca --- /dev/null +++ b/collectors/python.d.plugin/redis/README.md @@ -0,0 +1,42 @@ +# redis + +Get INFO data from redis instance. + +Following charts are drawn: + +1. **Operations** per second + * operations + +2. **Hit rate** in percent + * rate + +3. **Memory utilization** in kilobytes + * total + * lua + +4. **Database keys** + * lines are creates dynamically based on how many databases are there + +5. **Clients** + * connected + * blocked + +6. **Slaves** + * connected + +### configuration + +```yaml +socket: + name : 'local' + socket : '/var/lib/redis/redis.sock' + +localhost: + name : 'local' + host : 'localhost' + port : 6379 +``` + +When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:6379`. + +--- diff --git a/collectors/python.d.plugin/redis/redis.chart.py b/collectors/python.d.plugin/redis/redis.chart.py new file mode 100644 index 000000000..37d55ebfe --- /dev/null +++ b/collectors/python.d.plugin/redis/redis.chart.py @@ -0,0 +1,261 @@ +# -*- coding: utf-8 -*- +# Description: redis netdata python.d module +# Author: Pawel Krupa (paulfantom) +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +import re + +from copy import deepcopy + +from bases.FrameworkServices.SocketService import SocketService + +REDIS_ORDER = [ + 'operations', + 'hit_rate', + 'memory', + 'keys_redis', + 'eviction', + 'net', + 'connections', + 'clients', + 'slaves', + 'persistence', + 'bgsave_now', + 'bgsave_health', + 'uptime', +] + +PIKA_ORDER = [ + 'operations', + 'hit_rate', + 'memory', + 'keys_pika', + 'connections', + 'clients', + 'slaves', + 'uptime', +] + + +CHARTS = { + 'operations': { + 'options': [None, 'Operations', 'operations/s', 'operations', 'redis.operations', 'line'], + 'lines': [ + ['total_commands_processed', 'commands', 'incremental'], + ['instantaneous_ops_per_sec', 'operations', 'absolute'] + ] + }, + 'hit_rate': { + 'options': [None, 'Hit rate', 'percent', 'hits', 'redis.hit_rate', 'line'], + 'lines': [ + ['hit_rate', 'rate', 'absolute'] + ] + }, + 'memory': { + 'options': [None, 'Memory utilization', 'kilobytes', 'memory', 'redis.memory', 'line'], + 'lines': [ + ['used_memory', 'total', 'absolute', 1, 1024], + ['used_memory_lua', 'lua', 'absolute', 1, 1024] + ] + }, + 'net': { + 'options': [None, 'Bandwidth', 'kilobits/s', 'network', 'redis.net', 'area'], + 'lines': [ + ['total_net_input_bytes', 'in', 'incremental', 8, 1024], + ['total_net_output_bytes', 'out', 'incremental', -8, 1024] + ] + }, + 'keys_redis': { + 'options': [None, 'Keys per Database', 'keys', 'keys', 'redis.keys', 'line'], + 'lines': [] + }, + 'keys_pika': { + 'options': [None, 'Keys', 'keys', 'keys', 'redis.keys', 'line'], + 'lines': [ + ['kv_keys', 'kv', 'absolute'], + ['hash_keys', 'hash', 'absolute'], + ['list_keys', 'list', 'absolute'], + ['zset_keys', 'zset', 'absolute'], + ['set_keys', 'set', 'absolute'] + ] + }, + 'eviction': { + 'options': [None, 'Evicted Keys', 'keys', 'keys', 'redis.eviction', 'line'], + 'lines': [ + ['evicted_keys', 'evicted', 'absolute'] + ] + }, + 'connections': { + 'options': [None, 'Connections', 'connections/s', 'connections', 'redis.connections', 'line'], + 'lines': [ + ['total_connections_received', 'received', 'incremental', 1], + ['rejected_connections', 'rejected', 'incremental', -1] + ] + }, + 'clients': { + 'options': [None, 'Clients', 'clients', 'connections', 'redis.clients', 'line'], + 'lines': [ + ['connected_clients', 'connected', 'absolute', 1], + ['blocked_clients', 'blocked', 'absolute', -1] + ] + }, + 'slaves': { + 'options': [None, 'Slaves', 'slaves', 'replication', 'redis.slaves', 'line'], + 'lines': [ + ['connected_slaves', 'connected', 'absolute'] + ] + }, + 'persistence': { + 'options': [None, 'Persistence Changes Since Last Save', 'changes', 'persistence', + 'redis.rdb_changes', 'line'], + 'lines': [ + ['rdb_changes_since_last_save', 'changes', 'absolute'] + ] + }, + 'bgsave_now': { + 'options': [None, 'Duration of the RDB Save Operation', 'seconds', 'persistence', + 'redis.bgsave_now', 'absolute'], + 'lines': [ + ['rdb_bgsave_in_progress', 'rdb save', 'absolute'] + ] + }, + 'bgsave_health': { + 'options': [None, 'Status of the Last RDB Save Operation', 'status', 'persistence', + 'redis.bgsave_health', 'line'], + 'lines': [ + ['rdb_last_bgsave_status', 'rdb save', 'absolute'] + ] + }, + 'uptime': { + 'options': [None, 'Uptime', 'seconds', 'uptime', 'redis.uptime', 'line'], + 'lines': [ + ['uptime_in_seconds', 'uptime', 'absolute'] + ] + } +} + + +def copy_chart(name): + return {name: deepcopy(CHARTS[name])} + + +RE = re.compile(r'\n([a-z_0-9 ]+):(?:keys=)?([^,\r]+)') + + +class Service(SocketService): + def __init__(self, configuration=None, name=None): + SocketService.__init__(self, configuration=configuration, name=name) + self._keep_alive = True + + self.order = list() + self.definitions = dict() + + self.host = self.configuration.get('host', 'localhost') + self.port = self.configuration.get('port', 6379) + self.unix_socket = self.configuration.get('socket') + p = self.configuration.get('pass') + + self.auth_request = 'AUTH {0} \r\n'.format(p).encode() if p else None + self.request = 'INFO\r\n'.encode() + self.bgsave_time = 0 + + def do_auth(self): + resp = self._get_raw_data(request=self.auth_request) + if not resp: + return False + if resp.strip() != '+OK': + self.error('invalid password') + return False + return True + + def get_raw_and_parse(self): + if self.auth_request and not self.do_auth(): + return None + + resp = self._get_raw_data() + + if not resp: + return None + + parsed = RE.findall(resp) + + if not parsed: + self.error('response is invalid/empty') + return None + + return dict((k.replace(' ', '_'), v) for k, v in parsed) + + def get_data(self): + """ + Get data from socket + :return: dict + """ + data = self.get_raw_and_parse() + + if not data: + return None + + try: + data['hit_rate'] = ( + (int(data['keyspace_hits']) * 100) / (int(data['keyspace_hits']) + int(data['keyspace_misses'])) + ) + except (KeyError, ZeroDivisionError): + data['hit_rate'] = 0 + + if data.get('redis_version') and data.get('rdb_bgsave_in_progress'): + self.get_data_redis_specific(data) + + return data + + def get_data_redis_specific(self, data): + if data['rdb_bgsave_in_progress'] != '0': + self.bgsave_time += self.update_every + else: + self.bgsave_time = 0 + + data['rdb_last_bgsave_status'] = 0 if data['rdb_last_bgsave_status'] == 'ok' else 1 + data['rdb_bgsave_in_progress'] = self.bgsave_time + + def check(self): + """ + Parse configuration, check if redis is available, and dynamically create chart lines data + :return: boolean + """ + data = self.get_raw_and_parse() + + if not data: + return False + + self.order = PIKA_ORDER if data.get('pika_version') else REDIS_ORDER + + for n in self.order: + self.definitions.update(copy_chart(n)) + + if data.get('redis_version'): + for k in data: + if k.startswith('db'): + self.definitions['keys_redis']['lines'].append([k, None, 'absolute']) + + return True + + def _check_raw_data(self, data): + """ + Check if all data has been gathered from socket. + Parse first line containing message length and check against received message + :param data: str + :return: boolean + """ + length = len(data) + supposed = data.split('\n')[0][1:-1] + offset = len(supposed) + 4 # 1 dollar sing, 1 new line character + 1 ending sequence '\r\n' + if not supposed.isdigit(): + return True + supposed = int(supposed) + + if length - offset >= supposed: + self.debug('received full response from redis') + return True + + self.debug('waiting more data from redis') + return False diff --git a/collectors/python.d.plugin/redis/redis.conf b/collectors/python.d.plugin/redis/redis.conf new file mode 100644 index 000000000..6363f6da7 --- /dev/null +++ b/collectors/python.d.plugin/redis/redis.conf @@ -0,0 +1,112 @@ +# netdata python.d.plugin configuration for redis +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, redis also supports the following: +# +# socket: 'path/to/redis.sock' +# +# or +# host: 'IP or HOSTNAME' # the host to connect to +# port: PORT # the port to connect to +# +# and +# pass: 'password' # the redis password to use for AUTH command +# + +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +socket1: + name : 'local' + socket : '/tmp/redis.sock' + # pass : '' + +socket2: + name : 'local' + socket : '/var/run/redis/redis.sock' + # pass : '' + +socket3: + name : 'local' + socket : '/var/lib/redis/redis.sock' + # pass : '' + +localhost: + name : 'local' + host : 'localhost' + port : 6379 + # pass : '' + +localipv4: + name : 'local' + host : '127.0.0.1' + port : 6379 + # pass : '' + +localipv6: + name : 'local' + host : '::1' + port : 6379 + # pass : '' + diff --git a/collectors/python.d.plugin/rethinkdbs/Makefile.inc b/collectors/python.d.plugin/rethinkdbs/Makefile.inc new file mode 100644 index 000000000..dec604464 --- /dev/null +++ b/collectors/python.d.plugin/rethinkdbs/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += rethinkdbs/rethinkdbs.chart.py +dist_pythonconfig_DATA += rethinkdbs/rethinkdbs.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += rethinkdbs/README.md rethinkdbs/Makefile.inc + diff --git a/collectors/python.d.plugin/rethinkdbs/README.md b/collectors/python.d.plugin/rethinkdbs/README.md new file mode 100644 index 000000000..5d357fa49 --- /dev/null +++ b/collectors/python.d.plugin/rethinkdbs/README.md @@ -0,0 +1,34 @@ +# rethinkdbs + +Module monitor rethinkdb health metrics. + +Following charts are drawn: + +1. **Connected Servers** + * connected + * missing + +2. **Active Clients** + * active + +3. **Queries** per second + * queries + +4. **Documents** per second + * documents + +### configuration + +```yaml + +localhost: + name : 'local' + host : '127.0.0.1' + port : 28015 + user : "user" + password : "pass" +``` + +When no configuration file is found, module tries to connect to `127.0.0.1:28015`. + +--- diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py new file mode 100644 index 000000000..127e9ad4b --- /dev/null +++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py @@ -0,0 +1,235 @@ +# -*- coding: utf-8 -*- +# Description: rethinkdb netdata python.d module +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +try: + import rethinkdb as rdb + HAS_RETHINKDB = True +except ImportError: + HAS_RETHINKDB = False + +from bases.FrameworkServices.SimpleService import SimpleService + +ORDER = [ + 'cluster_connected_servers', + 'cluster_clients_active', + 'cluster_queries', + 'cluster_documents', +] + + +def cluster_charts(): + return { + 'cluster_connected_servers': { + 'options': [None, 'Connected Servers', 'servers', 'cluster', 'rethinkdb.cluster_connected_servers', + 'stacked'], + 'lines': [ + ['cluster_servers_connected', 'connected'], + ['cluster_servers_missing', 'missing'], + ] + }, + 'cluster_clients_active': { + 'options': [None, 'Active Clients', 'clients', 'cluster', 'rethinkdb.cluster_clients_active', + 'line'], + 'lines': [ + ['cluster_clients_active', 'active'], + ] + }, + 'cluster_queries': { + 'options': [None, 'Queries', 'queries/s', 'cluster', 'rethinkdb.cluster_queries', 'line'], + 'lines': [ + ['cluster_queries_per_sec', 'queries'], + ] + }, + 'cluster_documents': { + 'options': [None, 'Documents', 'documents/s', 'cluster', 'rethinkdb.cluster_documents', 'line'], + 'lines': [ + ['cluster_read_docs_per_sec', 'reads'], + ['cluster_written_docs_per_sec', 'writes'], + ] + }, + } + + +def server_charts(n): + o = [ + '{0}_client_connections'.format(n), + '{0}_clients_active'.format(n), + '{0}_queries'.format(n), + '{0}_documents'.format(n), + ] + f = 'server {0}'.format(n) + + c = { + o[0]: { + 'options': [None, 'Client Connections', 'connections', f, 'rethinkdb.client_connections', 'line'], + 'lines': [ + ['{0}_client_connections'.format(n), 'connections'], + ] + }, + o[1]: { + 'options': [None, 'Active Clients', 'clients', f, 'rethinkdb.clients_active', 'line'], + 'lines': [ + ['{0}_clients_active'.format(n), 'active'], + ] + }, + o[2]: { + 'options': [None, 'Queries', 'queries/s', f, 'rethinkdb.queries', 'line'], + 'lines': [ + ['{0}_queries_total'.format(n), 'queries', 'incremental'], + ] + }, + o[3]: { + 'options': [None, 'Documents', 'documents/s', f, 'rethinkdb.documents', 'line'], + 'lines': [ + ['{0}_read_docs_total'.format(n), 'reads', 'incremental'], + ['{0}_written_docs_total'.format(n), 'writes', 'incremental'], + ] + }, + } + + return o, c + + +class Cluster: + def __init__(self, raw): + self.raw = raw + + def data(self): + qe = self.raw['query_engine'] + + return { + 'cluster_clients_active': qe['clients_active'], + 'cluster_queries_per_sec': qe['queries_per_sec'], + 'cluster_read_docs_per_sec': qe['read_docs_per_sec'], + 'cluster_written_docs_per_sec': qe['written_docs_per_sec'], + 'cluster_servers_connected': 0, + 'cluster_servers_missing': 0, + } + + +class Server: + def __init__(self, raw): + self.name = raw['server'] + self.raw = raw + + def error(self): + return self.raw.get('error') + + def data(self): + qe = self.raw['query_engine'] + + d = { + 'client_connections': qe['client_connections'], + 'clients_active': qe['clients_active'], + 'queries_total': qe['queries_total'], + 'read_docs_total': qe['read_docs_total'], + 'written_docs_total': qe['written_docs_total'], + } + + return dict(('{0}_{1}'.format(self.name, k), d[k]) for k in d) + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.order = list(ORDER) + self.definitions = cluster_charts() + + self.host = self.configuration.get('host', '127.0.0.1') + self.port = self.configuration.get('port', 28015) + self.user = self.configuration.get('user', 'admin') + self.password = self.configuration.get('password') + self.timeout = self.configuration.get('timeout', 2) + + self.conn = None + self.alive = True + + def check(self): + if not HAS_RETHINKDB: + self.error('"rethinkdb" module is needed to use rethinkdbs.py') + return False + + if not self.connect(): + return None + + stats = self.get_stats() + + if not stats: + return None + + for v in stats[1:]: + if get_id(v) == 'server': + o, c = server_charts(v['server']) + self.order.extend(o) + self.definitions.update(c) + + return True + + def get_data(self): + if not self.is_alive(): + return None + + stats = self.get_stats() + + if not stats: + return None + + data = dict() + + # cluster + data.update(Cluster(stats[0]).data()) + + # servers + for v in stats[1:]: + if get_id(v) != 'server': + continue + + s = Server(v) + + if s.error(): + data['cluster_servers_missing'] += 1 + else: + data['cluster_servers_connected'] += 1 + data.update(s.data()) + + return data + + def get_stats(self): + try: + return list(rdb.db('rethinkdb').table('stats').run(self.conn).items) + except rdb.errors.ReqlError: + self.alive = False + return None + + def connect(self): + try: + self.conn = rdb.connect( + host=self.host, + port=self.port, + user=self.user, + password=self.password, + timeout=self.timeout, + ) + self.alive = True + return True + except rdb.errors.ReqlError as error: + self.error('Connection to {0}:{1} failed: {2}'.format(self.host, self.port, error)) + return False + + def reconnect(self): + # The connection is already closed after rdb.errors.ReqlError, + # so we do not need to call conn.close() + if self.connect(): + return True + return False + + def is_alive(self): + if not self.alive: + return self.reconnect() + return True + + +def get_id(v): + return v['id'][0] diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf new file mode 100644 index 000000000..73544fc2e --- /dev/null +++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf @@ -0,0 +1,78 @@ +# netdata python.d.plugin configuration for rethinkdb +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, rethinkdb also supports the following: +# +# host: IP or HOSTNAME # default is 'localhost' +# port: PORT # default is 28015 +# user: USERNAME # default is 'admin' +# password: PASSWORD # not set by default +# timeout: TIMEOUT # default is 2 + +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +local: + name: 'local' + host: 'localhost' diff --git a/collectors/python.d.plugin/retroshare/Makefile.inc b/collectors/python.d.plugin/retroshare/Makefile.inc new file mode 100644 index 000000000..891193e6d --- /dev/null +++ b/collectors/python.d.plugin/retroshare/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += retroshare/retroshare.chart.py +dist_pythonconfig_DATA += retroshare/retroshare.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += retroshare/README.md retroshare/Makefile.inc + diff --git a/collectors/python.d.plugin/retroshare/README.md b/collectors/python.d.plugin/retroshare/README.md new file mode 100644 index 000000000..e95095c65 --- /dev/null +++ b/collectors/python.d.plugin/retroshare/README.md @@ -0,0 +1 @@ +# retroshare diff --git a/collectors/python.d.plugin/retroshare/retroshare.chart.py b/collectors/python.d.plugin/retroshare/retroshare.chart.py new file mode 100644 index 000000000..1d8e35050 --- /dev/null +++ b/collectors/python.d.plugin/retroshare/retroshare.chart.py @@ -0,0 +1,80 @@ +# -*- coding: utf-8 -*- +# Description: RetroShare netdata python.d module +# Authors: sehraf +# SPDX-License-Identifier: GPL-3.0-or-later + +import json + +from bases.FrameworkServices.UrlService import UrlService + +# default module values (can be overridden per job in `config`) +# update_every = 2 +priority = 60000 +retries = 60 + +# charts order (can be overridden if you want less charts, or different order) +ORDER = ['bandwidth', 'peers', 'dht'] + +CHARTS = { + 'bandwidth': { + 'options': [None, 'RetroShare Bandwidth', 'kB/s', 'RetroShare', 'retroshare.bandwidth', 'area'], + 'lines': [ + ['bandwidth_up_kb', 'Upload'], + ['bandwidth_down_kb', 'Download'] + ] + }, + 'peers': { + 'options': [None, 'RetroShare Peers', 'peers', 'RetroShare', 'retroshare.peers', 'line'], + 'lines': [ + ['peers_all', 'All friends'], + ['peers_connected', 'Connected friends'] + ] + }, + 'dht': { + 'options': [None, 'Retroshare DHT', 'peers', 'RetroShare', 'retroshare.dht', 'line'], + 'lines': [ + ['dht_size_all', 'DHT nodes estimated'], + ['dht_size_rs', 'RS nodes estimated'] + ] + } +} + + +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + self.baseurl = self.configuration.get('url', 'http://localhost:9090') + self.order = ORDER + self.definitions = CHARTS + + def _get_stats(self): + """ + Format data received from http request + :return: dict + """ + try: + raw = self._get_raw_data() + parsed = json.loads(raw) + if str(parsed['returncode']) != 'ok': + return None + except (TypeError, ValueError): + return None + + return parsed['data'][0] + + def _get_data(self): + """ + Get data from API + :return: dict + """ + self.url = self.baseurl + '/api/v2/stats' + data = self._get_stats() + if data is None: + return None + + data['bandwidth_up_kb'] = data['bandwidth_up_kb'] * -1 + if data['dht_active'] is False: + data['dht_size_all'] = None + data['dht_size_rs'] = None + + return data diff --git a/collectors/python.d.plugin/retroshare/retroshare.conf b/collectors/python.d.plugin/retroshare/retroshare.conf new file mode 100644 index 000000000..9c92583f7 --- /dev/null +++ b/collectors/python.d.plugin/retroshare/retroshare.conf @@ -0,0 +1,74 @@ +# netdata python.d.plugin configuration for RetroShare +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, RetroShare also supports the following: +# +# - url: 'url' # the URL to the WebUI +# +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +localhost: + name: 'local' + url: 'http://localhost:9090' diff --git a/collectors/python.d.plugin/samba/Makefile.inc b/collectors/python.d.plugin/samba/Makefile.inc new file mode 100644 index 000000000..230a8ba43 --- /dev/null +++ b/collectors/python.d.plugin/samba/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += samba/samba.chart.py +dist_pythonconfig_DATA += samba/samba.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += samba/README.md samba/Makefile.inc + diff --git a/collectors/python.d.plugin/samba/README.md b/collectors/python.d.plugin/samba/README.md new file mode 100644 index 000000000..44610d373 --- /dev/null +++ b/collectors/python.d.plugin/samba/README.md @@ -0,0 +1,67 @@ +# samba + +Performance metrics of Samba file sharing. + +**Requirements:** +* `smbstatus` program +* `sudo` program +* `smbd` must be compiled with profiling enabled +* `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level` +* `netdata` user needs to be able to sudo the `smbstatus` program without password + +It produces the following charts: + +1. **Syscall R/Ws** in kilobytes/s + * sendfile + * recvfle + +2. **Smb2 R/Ws** in kilobytes/s + * readout + * writein + * readin + * writeout + +3. **Smb2 Create/Close** in operations/s + * create + * close + +4. **Smb2 Info** in operations/s + * getinfo + * setinfo + +5. **Smb2 Find** in operations/s + * find + +6. **Smb2 Notify** in operations/s + * notify + +7. **Smb2 Lesser Ops** as counters + * tcon + * negprot + * tdis + * cancel + * logoff + * flush + * lock + * keepalive + * break + * sessetup + +### prerequisite +This module uses `smbstatus` which can only be executed by root. It uses +`sudo` and assumes that it is configured such that the `netdata` user can +execute `smbstatus` as root without password. + +Add to `sudoers`: + + netdata ALL=(root) NOPASSWD: /path/to/smbstatus + +### configuration + + **samba** is disabled by default. Should be explicitly enabled in `python.d.conf`. + +```yaml +samba: yes +``` + +--- diff --git a/collectors/python.d.plugin/samba/samba.chart.py b/collectors/python.d.plugin/samba/samba.chart.py new file mode 100644 index 000000000..b2278de9e --- /dev/null +++ b/collectors/python.d.plugin/samba/samba.chart.py @@ -0,0 +1,138 @@ +# -*- coding: utf-8 -*- +# Description: samba netdata python.d module +# Author: Christopher Cox <chris_cox@endlessnow.com> +# SPDX-License-Identifier: GPL-3.0-or-later +# +# The netdata user needs to be able to be able to sudo the smbstatus program +# without password: +# netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P +# +# This makes calls to smbstatus -P +# +# This just looks at a couple of values out of syscall, and some from smb2. +# +# The Lesser Ops chart is merely a display of current counter values. They +# didn't seem to change much to me. However, if you notice something changing +# a lot there, bring one or more out into its own chart and make it incremental +# (like find and notify... good examples). + +import re + +from bases.collection import find_binary +from bases.FrameworkServices.ExecutableService import ExecutableService + + +disabled_by_default = True + +# default module values (can be overridden per job in `config`) +update_every = 5 +priority = 60000 +retries = 60 + +ORDER = [ + 'syscall_rw', + 'smb2_rw', + 'smb2_create_close', + 'smb2_info', + 'smb2_find', + 'smb2_notify', + 'smb2_sm_count' +] + +CHARTS = { + 'syscall_rw': { + 'options': [None, 'R/Ws', 'kilobytes/s', 'syscall', 'syscall.rw', 'area'], + 'lines': [ + ['syscall_sendfile_bytes', 'sendfile', 'incremental', 1, 1024], + ['syscall_recvfile_bytes', 'recvfile', 'incremental', -1, 1024] + ] + }, + 'smb2_rw': { + 'options': [None, 'R/Ws', 'kilobytes/s', 'smb2', 'smb2.rw', 'area'], + 'lines': [ + ['smb2_read_outbytes', 'readout', 'incremental', 1, 1024], + ['smb2_write_inbytes', 'writein', 'incremental', -1, 1024], + ['smb2_read_inbytes', 'readin', 'incremental', 1, 1024], + ['smb2_write_outbytes', 'writeout', 'incremental', -1, 1024] + ] + }, + 'smb2_create_close': { + 'options': [None, 'Create/Close', 'operations/s', 'smb2', 'smb2.create_close', 'line'], + 'lines': [ + ['smb2_create_count', 'create', 'incremental', 1, 1], + ['smb2_close_count', 'close', 'incremental', -1, 1] + ] + }, + 'smb2_info': { + 'options': [None, 'Info', 'operations/s', 'smb2', 'smb2.get_set_info', 'line'], + 'lines': [ + ['smb2_getinfo_count', 'getinfo', 'incremental', 1, 1], + ['smb2_setinfo_count', 'setinfo', 'incremental', -1, 1] + ] + }, + 'smb2_find': { + 'options': [None, 'Find', 'operations/s', 'smb2', 'smb2.find', 'line'], + 'lines': [ + ['smb2_find_count', 'find', 'incremental', 1, 1] + ] + }, + 'smb2_notify': { + 'options': [None, 'Notify', 'operations/s', 'smb2', 'smb2.notify', 'line'], + 'lines': [ + ['smb2_notify_count', 'notify', 'incremental', 1, 1] + ] + }, + 'smb2_sm_count': { + 'options': [None, 'Lesser Ops', 'count', 'smb2', 'smb2.sm_counters', 'stacked'], + 'lines': [ + ['smb2_tcon_count', 'tcon', 'absolute', 1, 1], + ['smb2_negprot_count', 'negprot', 'absolute', 1, 1], + ['smb2_tdis_count', 'tdis', 'absolute', 1, 1], + ['smb2_cancel_count', 'cancel', 'absolute', 1, 1], + ['smb2_logoff_count', 'logoff', 'absolute', 1, 1], + ['smb2_flush_count', 'flush', 'absolute', 1, 1], + ['smb2_lock_count', 'lock', 'absolute', 1, 1], + ['smb2_keepalive_count', 'keepalive', 'absolute', 1, 1], + ['smb2_break_count', 'break', 'absolute', 1, 1], + ['smb2_sessetup_count', 'sessetup', 'absolute', 1, 1] + ] + } +} + + +class Service(ExecutableService): + def __init__(self, configuration=None, name=None): + ExecutableService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + self.rgx_smb2 = re.compile(r'(smb2_[^:]+|syscall_.*file_bytes):\s+(\d+)') + + def check(self): + sudo_binary, smbstatus_binary = find_binary('sudo'), find_binary('smbstatus') + + if not (sudo_binary and smbstatus_binary): + self.error("Can\'t locate 'sudo' or 'smbstatus' binary") + return False + + self.command = [sudo_binary, '-v'] + err = self._get_raw_data(stderr=True) + if err: + self.error(''.join(err)) + return False + + self.command = ' '.join([sudo_binary, '-n', smbstatus_binary, '-P']) + + return ExecutableService.check(self) + + def _get_data(self): + """ + Format data received from shell command + :return: dict + """ + raw_data = self._get_raw_data() + if not raw_data: + return None + + parsed = self.rgx_smb2.findall(' '.join(raw_data)) + + return dict(parsed) or None diff --git a/collectors/python.d.plugin/samba/samba.conf b/collectors/python.d.plugin/samba/samba.conf new file mode 100644 index 000000000..ee513c60f --- /dev/null +++ b/collectors/python.d.plugin/samba/samba.conf @@ -0,0 +1,62 @@ +# netdata python.d.plugin configuration for samba +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +update_every: 5 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds \ No newline at end of file diff --git a/collectors/python.d.plugin/sensors/Makefile.inc b/collectors/python.d.plugin/sensors/Makefile.inc new file mode 100644 index 000000000..5fb26e1c8 --- /dev/null +++ b/collectors/python.d.plugin/sensors/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += sensors/sensors.chart.py +dist_pythonconfig_DATA += sensors/sensors.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += sensors/README.md sensors/Makefile.inc + diff --git a/collectors/python.d.plugin/sensors/README.md b/collectors/python.d.plugin/sensors/README.md new file mode 100644 index 000000000..eb1642d90 --- /dev/null +++ b/collectors/python.d.plugin/sensors/README.md @@ -0,0 +1,17 @@ +# sensors + +System sensors information. + +Charts are created dynamically. + +### configuration + +For detailed configuration information please read [`sensors.conf`](sensors.conf) file. + +### possible issues + +There have been reports from users that on certain servers, ACPI ring buffer errors are printed by the kernel (`dmesg`) when ACPI sensors are being accessed. +We are tracking such cases in issue [#827](https://github.com/netdata/netdata/issues/827). +Please join this discussion for help. + +--- diff --git a/collectors/python.d.plugin/sensors/sensors.chart.py b/collectors/python.d.plugin/sensors/sensors.chart.py new file mode 100644 index 000000000..69d2bfe99 --- /dev/null +++ b/collectors/python.d.plugin/sensors/sensors.chart.py @@ -0,0 +1,146 @@ +# -*- coding: utf-8 -*- +# Description: sensors netdata python.d plugin +# Author: Pawel Krupa (paulfantom) +# SPDX-License-Identifier: GPL-3.0-or-later + +from bases.FrameworkServices.SimpleService import SimpleService +from third_party import lm_sensors as sensors + +# default module values (can be overridden per job in `config`) +# update_every = 2 + +ORDER = ['temperature', 'fan', 'voltage', 'current', 'power', 'energy', 'humidity'] + +# This is a prototype of chart definition which is used to dynamically create self.definitions +CHARTS = { + 'temperature': { + 'options': [None, ' temperature', 'Celsius', 'temperature', 'sensors.temperature', 'line'], + 'lines': [ + [None, None, 'absolute', 1, 1000] + ] + }, + 'voltage': { + 'options': [None, ' voltage', 'Volts', 'voltage', 'sensors.voltage', 'line'], + 'lines': [ + [None, None, 'absolute', 1, 1000] + ] + }, + 'current': { + 'options': [None, ' current', 'Ampere', 'current', 'sensors.current', 'line'], + 'lines': [ + [None, None, 'absolute', 1, 1000] + ] + }, + 'power': { + 'options': [None, ' power', 'Watt', 'power', 'sensors.power', 'line'], + 'lines': [ + [None, None, 'absolute', 1, 1000000] + ] + }, + 'fan': { + 'options': [None, ' fans speed', 'Rotations/min', 'fans', 'sensors.fan', 'line'], + 'lines': [ + [None, None, 'absolute', 1, 1000] + ] + }, + 'energy': { + 'options': [None, ' energy', 'Joule', 'energy', 'sensors.energy', 'areastack'], + 'lines': [ + [None, None, 'incremental', 1, 1000000] + ] + }, + 'humidity': { + 'options': [None, ' humidity', 'Percent', 'humidity', 'sensors.humidity', 'line'], + 'lines': [ + [None, None, 'absolute', 1, 1000] + ] + } +} + +LIMITS = { + 'temperature': [-127, 1000], + 'voltage': [-127, 127], + 'current': [-127, 127], + 'fan': [0, 65535] +} + +TYPE_MAP = { + 0: 'voltage', + 1: 'fan', + 2: 'temperature', + 3: 'power', + 4: 'energy', + 5: 'current', + 6: 'humidity', + 7: 'max_main', + 16: 'vid', + 17: 'intrusion', + 18: 'max_other', + 24: 'beep_enable' +} + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.order = list() + self.definitions = dict() + self.chips = list() + + def get_data(self): + data = dict() + try: + for chip in sensors.ChipIterator(): + prefix = sensors.chip_snprintf_name(chip) + for feature in sensors.FeatureIterator(chip): + sfi = sensors.SubFeatureIterator(chip, feature) + for sf in sfi: + val = sensors.get_value(chip, sf.number) + break + type_name = TYPE_MAP[feature.type] + if type_name in LIMITS: + limit = LIMITS[type_name] + if val < limit[0] or val > limit[1]: + continue + data[prefix + '_' + str(feature.name.decode())] = int(val * 1000) + except Exception as error: + self.error(error) + return None + + return data or None + + def create_definitions(self): + for sensor in ORDER: + for chip in sensors.ChipIterator(): + chip_name = sensors.chip_snprintf_name(chip) + if self.chips and not any([chip_name.startswith(ex) for ex in self.chips]): + continue + for feature in sensors.FeatureIterator(chip): + sfi = sensors.SubFeatureIterator(chip, feature) + vals = [sensors.get_value(chip, sf.number) for sf in sfi] + if vals[0] == 0: + continue + if TYPE_MAP[feature.type] == sensor: + # create chart + name = chip_name + '_' + TYPE_MAP[feature.type] + if name not in self.order: + self.order.append(name) + chart_def = list(CHARTS[sensor]['options']) + chart_def[1] = chip_name + chart_def[1] + self.definitions[name] = {'options': chart_def} + self.definitions[name]['lines'] = [] + line = list(CHARTS[sensor]['lines'][0]) + line[0] = chip_name + '_' + str(feature.name.decode()) + line[1] = sensors.get_label(chip, feature) + self.definitions[name]['lines'].append(line) + + def check(self): + try: + sensors.init() + except Exception as error: + self.error(error) + return False + + self.create_definitions() + + return True diff --git a/collectors/python.d.plugin/sensors/sensors.conf b/collectors/python.d.plugin/sensors/sensors.conf new file mode 100644 index 000000000..83bbffd7d --- /dev/null +++ b/collectors/python.d.plugin/sensors/sensors.conf @@ -0,0 +1,63 @@ +# netdata python.d.plugin configuration for sensors +# +# This file is in YaML format. Generally the format is: +# +# name: value +# + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# Limit the number of sensors types. +# Comment the ones you want to disable. +# Also, re-arranging this list controls the order of the charts at the +# netdata dashboard. + +types: + - temperature + - fan + - voltage + - current + - power + - energy + - humidity + +# ---------------------------------------------------------------------- +# Limit the number of sensors chips. +# Uncomment the first line (chips:) and add chip names below it. +# The chip names that start with like that will be matched. +# You can find the chip names using the sensors command. + +#chips: +# - i8k +# - coretemp +# +# chip names can be found using the sensors shell command +# the prefix is matched (anything that starts like that) +# +#---------------------------------------------------------------------- + diff --git a/collectors/python.d.plugin/smartd_log/Makefile.inc b/collectors/python.d.plugin/smartd_log/Makefile.inc new file mode 100644 index 000000000..dc1d0f3fb --- /dev/null +++ b/collectors/python.d.plugin/smartd_log/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += smartd_log/smartd_log.chart.py +dist_pythonconfig_DATA += smartd_log/smartd_log.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += smartd_log/README.md smartd_log/Makefile.inc + diff --git a/collectors/python.d.plugin/smartd_log/README.md b/collectors/python.d.plugin/smartd_log/README.md new file mode 100644 index 000000000..121a63573 --- /dev/null +++ b/collectors/python.d.plugin/smartd_log/README.md @@ -0,0 +1,38 @@ +# smartd_log + +Module monitor `smartd` log files to collect HDD/SSD S.M.A.R.T attributes. + +It produces following charts (you can add additional attributes in the module configuration file): + +1. **Read Error Rate** attribute 1 + +2. **Start/Stop Count** attribute 4 + +3. **Reallocated Sectors Count** attribute 5 + +4. **Seek Error Rate** attribute 7 + +5. **Power-On Hours Count** attribute 9 + +6. **Power Cycle Count** attribute 12 + +7. **Load/Unload Cycles** attribute 193 + +8. **Temperature** attribute 194 + +9. **Current Pending Sectors** attribute 197 + +10. **Off-Line Uncorrectable** attribute 198 + +11. **Write Error Rate** attribute 200 + +### configuration + +```yaml +local: + log_path : '/var/log/smartd/' +``` + +If no configuration is given, module will attempt to read log files in /var/log/smartd/ directory. + +--- diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py new file mode 100644 index 000000000..21dbccecc --- /dev/null +++ b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py @@ -0,0 +1,353 @@ +# -*- coding: utf-8 -*- +# Description: smart netdata python.d module +# Author: l2isbad, vorph1 +# SPDX-License-Identifier: GPL-3.0-or-later + +import os +import re + +from collections import namedtuple +from time import time + +from bases.collection import read_last_line +from bases.FrameworkServices.SimpleService import SimpleService + +# charts order (can be overridden if you want less charts, or different order) +ORDER = ['1', '4', '5', '7', '9', '12', '193', '194', '197', '198', '200'] + +SMART_ATTR = { + '1': 'Read Error Rate', + '2': 'Throughput Performance', + '3': 'Spin-Up Time', + '4': 'Start/Stop Count', + '5': 'Reallocated Sectors Count', + '6': 'Read Channel Margin', + '7': 'Seek Error Rate', + '8': 'Seek Time Performance', + '9': 'Power-On Hours Count', + '10': 'Spin-up Retries', + '11': 'Calibration Retries', + '12': 'Power Cycle Count', + '13': 'Soft Read Error Rate', + '100': 'Erase/Program Cycles', + '103': 'Translation Table Rebuild', + '108': 'Unknown (108)', + '170': 'Reserved Block Count', + '171': 'Program Fail Count', + '172': 'Erase Fail Count', + '173': 'Wear Leveller Worst Case Erase Count', + '174': 'Unexpected Power Loss', + '175': 'Program Fail Count', + '176': 'Erase Fail Count', + '177': 'Wear Leveling Count', + '178': 'Used Reserved Block Count', + '179': 'Used Reserved Block Count', + '180': 'Unused Reserved Block Count', + '181': 'Program Fail Count', + '182': 'Erase Fail Count', + '183': 'SATA Downshifts', + '184': 'End-to-End error', + '185': 'Head Stability', + '186': 'Induced Op-Vibration Detection', + '187': 'Reported Uncorrectable Errors', + '188': 'Command Timeout', + '189': 'High Fly Writes', + '190': 'Temperature', + '191': 'G-Sense Errors', + '192': 'Power-Off Retract Cycles', + '193': 'Load/Unload Cycles', + '194': 'Temperature', + '195': 'Hardware ECC Recovered', + '196': 'Reallocation Events', + '197': 'Current Pending Sectors', + '198': 'Off-line Uncorrectable', + '199': 'UDMA CRC Error Rate', + '200': 'Write Error Rate', + '201': 'Soft Read Errors', + '202': 'Data Address Mark Errors', + '203': 'Run Out Cancel', + '204': 'Soft ECC Corrections', + '205': 'Thermal Asperity Rate', + '206': 'Flying Height', + '207': 'Spin High Current', + '209': 'Offline Seek Performance', + '220': 'Disk Shift', + '221': 'G-Sense Error Rate', + '222': 'Loaded Hours', + '223': 'Load/Unload Retries', + '224': 'Load Friction', + '225': 'Load/Unload Cycles', + '226': 'Load-in Time', + '227': 'Torque Amplification Count', + '228': 'Power-Off Retracts', + '230': 'GMR Head Amplitude', + '231': 'Temperature', + '232': 'Available Reserved Space', + '233': 'Media Wearout Indicator', + '240': 'Head Flying Hours', + '241': 'Total LBAs Written', + '242': 'Total LBAs Read', + '250': 'Read Error Retry Rate' +} + +LIMIT = namedtuple('LIMIT', ['min', 'max']) + +LIMITS = { + '194': LIMIT(0, 200) +} + +RESCAN_INTERVAL = 60 + +REGEX = re.compile( + '(\d+);' # attribute + '(\d+);' # normalized value + '(\d+)', # raw value + re.X +) + + +def chart_template(chart_name): + units, attr_id = chart_name.split('_')[-2:] + title = '{value_type} {description}'.format(value_type=units.capitalize(), + description=SMART_ATTR[attr_id]) + family = SMART_ATTR[attr_id].lower() + + return { + chart_name: { + 'options': [None, title, units, family, 'smartd_log.' + chart_name, 'line'], + 'lines': [] + } + } + + +def handle_os_error(method): + def on_call(*args): + try: + return method(*args) + except OSError: + return None + return on_call + + +class SmartAttribute(object): + def __init__(self, idx, normalized, raw): + self.id = idx + self.normalized = normalized + self._raw = raw + + @property + def raw(self): + if self.id in LIMITS: + limit = LIMITS[self.id] + if limit.min <= int(self._raw) <= limit.max: + return self._raw + return None + return self._raw + + @raw.setter + def raw(self, value): + self._raw = value + + +class DiskLogFile: + def __init__(self, path): + self.path = path + self.size = os.path.getsize(path) + + @handle_os_error + def is_changed(self): + new_size = os.path.getsize(self.path) + old_size, self.size = self.size, new_size + + return new_size != old_size and new_size + + @staticmethod + @handle_os_error + def is_valid(log_file, exclude): + return all([log_file.endswith('.csv'), + not [p for p in exclude if p in log_file], + os.access(log_file, os.R_OK), + os.path.getsize(log_file)]) + + +class Disk: + def __init__(self, full_path, age): + self.log_file = DiskLogFile(full_path) + self.name = os.path.basename(full_path).split('.')[-3] + self.age = int(age) + self.status = True + self.attributes = dict() + + self.get_attributes() + + def __eq__(self, other): + if isinstance(other, Disk): + return self.name == other.name + return self.name == other + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(repr(self)) + + @handle_os_error + def is_active(self): + return (time() - os.path.getmtime(self.log_file.path)) / 60 < self.age + + @handle_os_error + def get_attributes(self): + last_line = read_last_line(self.log_file.path) + self.attributes = dict((attr, SmartAttribute(attr, normalized, raw)) for attr, normalized, raw + in REGEX.findall(last_line)) + return True + + def data(self): + data = dict() + for attr in self.attributes.values(): + data['_'.join([self.name, 'normalized', attr.id])] = attr.normalized + if attr.raw is not None: + data['_'.join([self.name, 'raw', attr.id])] = attr.raw + return data + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.log_path = self.configuration.get('log_path', '/var/log/smartd') + self.raw = self.configuration.get('raw_values', True) + self.exclude = self.configuration.get('exclude_disks', str()).split() + self.age = self.configuration.get('age', 30) + + self.runs = 0 + self.disks = list() + self.order = list() + self.definitions = dict() + + def check(self): + self.disks = self.scan() + + if not self.disks: + return None + + user_defined_sa = self.configuration.get('smart_attributes') + + if user_defined_sa: + order = user_defined_sa.split() or ORDER + else: + order = ORDER + + self.create_charts(order) + + return True + + def get_data(self): + self.runs += 1 + + if self.runs % RESCAN_INTERVAL == 0: + self.cleanup_and_rescan() + + data = dict() + + for disk in self.disks: + + if not disk.status: + continue + + changed = disk.log_file.is_changed() + + # True = changed, False = unchanged, None = Exception + if changed is None: + disk.status = False + continue + + if changed: + success = disk.get_attributes() + if not success: + disk.status = False + continue + + data.update(disk.data()) + + return data or None + + def create_charts(self, order): + for attr in order: + raw_name, normalized_name = 'attr_id_raw_' + attr, 'attr_id_normalized_' + attr + raw, normalized = chart_template(raw_name), chart_template(normalized_name) + self.order.extend([normalized_name, raw_name]) + self.definitions.update(raw) + self.definitions.update(normalized) + + for disk in self.disks: + if attr not in disk.attributes: + self.debug("'{disk}' has no attribute '{attr_id}'".format(disk=disk.name, + attr_id=attr)) + continue + normalized[normalized_name]['lines'].append(['_'.join([disk.name, 'normalized', attr]), disk.name]) + + if not self.raw: + continue + + if disk.attributes[attr].raw is not None: + raw[raw_name]['lines'].append(['_'.join([disk.name, 'raw', attr]), disk.name]) + continue + self.debug("'{disk}' attribute '{attr_id}' value not in {limits}".format(disk=disk.name, + attr_id=attr, + limits=LIMITS[attr])) + + def cleanup_and_rescan(self): + self.cleanup() + new_disks = self.scan(only_new=True) + + for disk in new_disks: + valid = False + + for chart in self.charts: + value_type, idx = chart.id.split('_')[2:] + + if idx in disk.attributes: + valid = True + dimension_id = '_'.join([disk.name, value_type, idx]) + + if dimension_id in chart: + chart.hide_dimension(dimension_id=dimension_id, reverse=True) + else: + chart.add_dimension([dimension_id, disk.name]) + if valid: + self.disks.append(disk) + + def cleanup(self): + for disk in self.disks: + + if not disk.is_active(): + disk.status = False + if not disk.status: + for chart in self.charts: + dimension_id = '_'.join([disk.name, chart.id[8:]]) + chart.hide_dimension(dimension_id=dimension_id) + + self.disks = [disk for disk in self.disks if disk.status] + + def scan(self, only_new=None): + new_disks = list() + for f in os.listdir(self.log_path): + full_path = os.path.join(self.log_path, f) + + if DiskLogFile.is_valid(full_path, self.exclude): + disk = Disk(full_path, self.age) + + active = disk.is_active() + if active is None: + continue + if active: + if not only_new: + new_disks.append(disk) + else: + if disk not in self.disks: + new_disks.append(disk) + else: + if not only_new: + self.debug("'{disk}' not updated in the last {age} minutes, " + "skipping it.".format(disk=disk.name, age=self.age)) + return new_disks diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.conf b/collectors/python.d.plugin/smartd_log/smartd_log.conf new file mode 100644 index 000000000..3fab3f1c0 --- /dev/null +++ b/collectors/python.d.plugin/smartd_log/smartd_log.conf @@ -0,0 +1,90 @@ +# netdata python.d.plugin configuration for smartd log +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, smartd_log also supports the following: +# +# log_path: '/path/to/smartdlogs' # path to smartd log files. Default is /var/log/smartd +# raw_values: yes # enable/disable raw values charts. Enabled by default. +# smart_attributes: '1 2 3 4 44' # smart attributes charts. Default are ['1', '4', '5', '7', '9', '12', '193', '194', '197', '198', '200']. +# exclude_disks: 'PATTERN1 PATTERN2' # space separated patterns. If the pattern is in the drive name, the module will not collect data for it. +# +# ---------------------------------------------------------------------- +# Additional information +# Plugin reads smartd log files (-A option). +# You need to add (man smartd) to /etc/default/smartmontools '-i 600 -A /var/log/smartd/' to pass additional options to smartd on startup +# Then restart smartd service and check /path/log/smartdlogs +# ls /var/log/smartd/ +# CDC_WD10EZEX_00BN5A0-WD_WCC3F7FLVZS9.ata.csv WDC_WD10EZEX_00BN5A0-WD_WCC3F7FLVZS9.ata.csv ZDC_WD10EZEX_00BN5A0-WD_WCC3F7FLVZS9.ata.csv +# +# Smartd APPEND logs at every run. Its NOT RECOMMENDED to set '-i' option below 60 sec. +# STRONGLY RECOMMENDED to create smartd conf file for logrotate +# +# RAW vs NORMALIZED values +# "Normalized value", commonly referred to as just "value". This is a most universal measurement, on the scale from 0 (bad) to some maximum (good) value. +# Maximum values are typically 100, 200 or 253. Rule of thumb is: high values are good, low values are bad. +# +# "Raw value" - the value of the attribute as it is tracked by the device, before any normalization takes place. +# Some raw numbers provide valuable insight when properly interpreted. These cases will be discussed later on. +# Raw values are typically listed in hexadecimal numbers. The raw value has different structure for different vendors and is often not meaningful as a decimal number. +# +# ---------------------------------------------------------------------- diff --git a/collectors/python.d.plugin/spigotmc/Makefile.inc b/collectors/python.d.plugin/spigotmc/Makefile.inc new file mode 100644 index 000000000..f9fa8b6b0 --- /dev/null +++ b/collectors/python.d.plugin/spigotmc/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += spigotmc/spigotmc.chart.py +dist_pythonconfig_DATA += spigotmc/spigotmc.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += spigotmc/README.md spigotmc/Makefile.inc + diff --git a/collectors/python.d.plugin/spigotmc/README.md b/collectors/python.d.plugin/spigotmc/README.md new file mode 100644 index 000000000..ae5602587 --- /dev/null +++ b/collectors/python.d.plugin/spigotmc/README.md @@ -0,0 +1,22 @@ +# spigotmc + +This module does some really basic monitoring for Spigot Minecraft servers. + +It provides two charts, one tracking server-side ticks-per-second in +1, 5 and 15 minute averages, and one tracking the number of currently +active users. + +This is not compatible with Spigot plugins which change the format of +the data returned by the `tps` or `list` console commands. + +### configuration + +```yaml +host: localhost +port: 25575 +password: pass +``` + +By default, a connection to port 25575 on the local system is attempted with an empty password. + +--- diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.chart.py b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py new file mode 100644 index 000000000..a5e5ee0ee --- /dev/null +++ b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +# Description: spigotmc netdata python.d module +# Author: Austin S. Hemmelgarn (Ferroin) +# SPDX-License-Identifier: GPL-3.0-or-later + +import socket +import platform + +from bases.FrameworkServices.SimpleService import SimpleService + +from third_party import mcrcon + +# Update only every 5 seconds because collection takes in excess of +# 100ms sometimes, and mos tpeople won't care about second-by-second data. +update_every = 5 + +PRECISION = 100 + +ORDER = ['tps', 'users'] + +CHARTS = { + 'tps': { + 'options': [None, 'Spigot Ticks Per Second', 'ticks', 'spigotmc', 'spigotmc.tps', 'line'], + 'lines': [ + ['tps1', '1 Minute Average', 'absolute', 1, PRECISION], + ['tps5', '5 Minute Average', 'absolute', 1, PRECISION], + ['tps15', '15 Minute Average', 'absolute', 1, PRECISION] + ] + }, + 'users': { + 'options': [None, 'Minecraft Users', 'users', 'spigotmc', 'spigotmc.users', 'area'], + 'lines': [ + ['users', 'Users', 'absolute', 1, 1] + ] + } +} + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + self.host = self.configuration.get('host', 'localhost') + self.port = self.configuration.get('port', 25575) + self.password = self.configuration.get('password', '') + self.console = mcrcon.MCRcon() + self.alive = True + + def check(self): + if platform.system() != 'Linux': + self.error('Only supported on Linux.') + return False + try: + self.connect() + except (mcrcon.MCRconException, socket.error) as err: + self.error('Error connecting.') + self.error(repr(err)) + return False + return True + + def connect(self): + self.console.connect(self.host, self.port, self.password) + + def reconnect(self): + try: + try: + self.console.disconnect() + except mcrcon.MCRconException: + pass + self.console.connect(self.host, self.port, self.password) + self.alive = True + except (mcrcon.MCRconException, socket.error) as err: + self.error('Error connecting.') + self.error(repr(err)) + return False + return True + + def is_alive(self): + if (not self.alive) or \ + self.console.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_INFO, 0) != 1: + return self.reconnect() + return True + + def _get_data(self): + if not self.is_alive(): + return None + data = {} + try: + raw = self.console.command('tps') + # The above command returns a string that looks like this: + # '§6TPS from last 1m, 5m, 15m: §a19.99, §a19.99, §a19.99\n' + # The values we care about are the three numbers after the : + tmp = raw.split(':')[1].split(',') + data['tps1'] = float(tmp[0].lstrip(u' §a*')) * PRECISION + data['tps5'] = float(tmp[1].lstrip(u' §a*')) * PRECISION + data['tps15'] = float(tmp[2].lstrip(u' §a*').rstrip()) * PRECISION + except mcrcon.MCRconException: + self.error('Unable to fetch TPS values.') + except socket.error: + self.error('Connection is dead.') + self.alive = False + return None + except (TypeError, LookupError): + self.error('Unable to process TPS values.') + try: + raw = self.console.command('list') + # The above command returns a string that looks like this: + # 'There are 0/20 players online:' + # We care about the first number here. + data['users'] = int(raw.split()[2].split('/')[0]) + except mcrcon.MCRconException: + self.error('Unable to fetch user counts.') + except socket.error: + self.error('Connection is dead.') + self.alive = False + return None + except (TypeError, LookupError): + self.error('Unable to process user counts.') + return data diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.conf b/collectors/python.d.plugin/spigotmc/spigotmc.conf new file mode 100644 index 000000000..3ba492def --- /dev/null +++ b/collectors/python.d.plugin/spigotmc/spigotmc.conf @@ -0,0 +1,68 @@ +# netdata python.d.plugin configuration for spigotmc +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# In addition to the above, spigotmc supports the following: +# +# host: localhost # The host to connect to. Defaults to the local system. +# port: 25575 # THe port the remote console is listening on. +# password: '' # The remote console password. Most be set correctly. diff --git a/collectors/python.d.plugin/springboot/Makefile.inc b/collectors/python.d.plugin/springboot/Makefile.inc new file mode 100644 index 000000000..06775f937 --- /dev/null +++ b/collectors/python.d.plugin/springboot/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += springboot/springboot.chart.py +dist_pythonconfig_DATA += springboot/springboot.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += springboot/README.md springboot/Makefile.inc + diff --git a/collectors/python.d.plugin/springboot/README.md b/collectors/python.d.plugin/springboot/README.md new file mode 100644 index 000000000..008436a4f --- /dev/null +++ b/collectors/python.d.plugin/springboot/README.md @@ -0,0 +1,129 @@ +# springboot + +This module will monitor one or more Java Spring-boot applications depending on configuration. + +It produces following charts: + +1. **Response Codes** in requests/s + * 1xx + * 2xx + * 3xx + * 4xx + * 5xx + * others + +2. **Threads** + * daemon + * total + +3. **GC Time** in milliseconds and **GC Operations** in operations/s + * Copy + * MarkSweep + * ... + +4. **Heap Mmeory Usage** in KB + * used + * committed + +### configuration + +Please see the [Monitoring Java Spring Boot Applications](https://github.com/netdata/netdata/wiki/Monitoring-Java-Spring-Boot-Applications) page for detailed info about module configuration. + +--- + +# Monitoring Java Spring Boot Applications + +Netdata can be used to monitor running Java [Spring Boot](https://spring.io/) applications that expose their metrics with the use of the **Spring Boot Actuator** included in Spring Boot library. + +The Spring Boot Actuator exposes these metrics over HTTP and is very easy to use: +* add `org.springframework.boot:spring-boot-starter-actuator` to your application dependencies +* set `endpoints.metrics.sensitive=false` in your `application.properties` + +You can create custom Metrics by add and inject a PublicMetrics in your application. +This is a example to add custom metrics: +```java +package com.example; + +import org.springframework.boot.actuate.endpoint.PublicMetrics; +import org.springframework.boot.actuate.metrics.Metric; +import org.springframework.stereotype.Service; + +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryPoolMXBean; +import java.util.ArrayList; +import java.util.Collection; + +@Service +public class HeapPoolMetrics implements PublicMetrics { + + private static final String PREFIX = "mempool."; + private static final String KEY_EDEN = PREFIX + "eden"; + private static final String KEY_SURVIVOR = PREFIX + "survivor"; + private static final String KEY_TENURED = PREFIX + "tenured"; + + @Override + public Collection<Metric<?>> metrics() { + Collection<Metric<?>> result = new ArrayList<>(4); + for (MemoryPoolMXBean mem : ManagementFactory.getMemoryPoolMXBeans()) { + String poolName = mem.getName(); + String name = null; + if (poolName.indexOf("Eden Space") != -1) { + name = KEY_EDEN; + } else if (poolName.indexOf("Survivor Space") != -1) { + name = KEY_SURVIVOR; + } else if (poolName.indexOf("Tenured Gen") != -1 || poolName.indexOf("Old Gen") != -1) { + name = KEY_TENURED; + } + + if (name != null) { + result.add(newMemoryMetric(name, mem.getUsage().getMax())); + result.add(newMemoryMetric(name + ".init", mem.getUsage().getInit())); + result.add(newMemoryMetric(name + ".committed", mem.getUsage().getCommitted())); + result.add(newMemoryMetric(name + ".used", mem.getUsage().getUsed())); + } + } + return result; + } + + private Metric<Long> newMemoryMetric(String name, long bytes) { + return new Metric<>(name, bytes / 1024); + } +} +``` + +Please refer [Spring Boot Actuator: Production-ready features](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready.html) and [81. Actuator - Part IX. ‘How-to’ guides](https://docs.spring.io/spring-boot/docs/current/reference/html/howto-actuator.html) for more information. + +## Using netdata springboot module + +The springboot module is enabled by default. It looks up `http://localhost:8080/metrics` and `http://127.0.0.1:8080/metrics` to detect Spring Boot application by default. You can change it by editing `/etc/netdata/python.d/springboot.conf` (to edit it on your system run `/etc/netdata/edit-config python.d/springboot.conf`). + +This module defines some common charts, and you can add custom charts by change the configurations. + +The configuration format is like: +```yaml +<id>: + name: '<name>' + url: '<metrics endpoint>' # ex. http://localhost:8080/metrics + user: '<username>' # optional + pass: '<password>' # optional + defaults: + [<chart-id>]: true|false + extras: + - id: '<chart-id>' + options: + title: '***' + units: '***' + family: '***' + context: 'springboot.***' + charttype: 'stacked' | 'area' | 'line' + lines: + - { dimension: 'myapp_ok', name: 'ok', algorithm: 'absolute', multiplier: 1, divisor: 1} # it shows "myapp.ok" metrics + - { dimension: 'myapp_ng', name: 'ng', algorithm: 'absolute', multiplier: 1, divisor: 1} # it shows "myapp.ng" metrics +``` + +By default, it creates `response_code`, `threads`, `gc_time`, `gc_ope` abd `heap` charts. +You can disable the default charts by set `defaults.<chart-id>: false`. + +The dimension name of extras charts should replace `.` to `_`. + +Please check [springboot.conf](springboot.conf) for more examples. \ No newline at end of file diff --git a/collectors/python.d.plugin/springboot/springboot.chart.py b/collectors/python.d.plugin/springboot/springboot.chart.py new file mode 100644 index 000000000..7df37e1d0 --- /dev/null +++ b/collectors/python.d.plugin/springboot/springboot.chart.py @@ -0,0 +1,159 @@ +# -*- coding: utf-8 -*- +# Description: tomcat netdata python.d module +# Author: Wing924 +# SPDX-License-Identifier: GPL-3.0-or-later + +import json +from bases.FrameworkServices.UrlService import UrlService + +# default module values (can be overridden per job in `config`) +# update_every = 2 +priority = 60000 +retries = 60 + + +DEFAULT_ORDER = ['response_code', 'threads', 'gc_time', 'gc_ope', 'heap'] + +DEFAULT_CHARTS = { + 'response_code': { + 'options': [None, "Response Codes", "requests/s", "response", "springboot.response_code", "stacked"], + 'lines': [ + ["resp_other", 'Other', 'incremental'], + ["resp_1xx", '1xx', 'incremental'], + ["resp_2xx", '2xx', 'incremental'], + ["resp_3xx", '3xx', 'incremental'], + ["resp_4xx", '4xx', 'incremental'], + ["resp_5xx", '5xx', 'incremental'], + ] + }, + 'threads': { + 'options': [None, "Threads", "current threads", "threads", "springboot.threads", "area"], + 'lines': [ + ["threads_daemon", 'daemon', 'absolute'], + ["threads", 'total', 'absolute'], + ] + }, + 'gc_time': { + 'options': [None, "GC Time", "milliseconds", "garbage collection", "springboot.gc_time", "stacked"], + 'lines': [ + ["gc_copy_time", 'Copy', 'incremental'], + ["gc_marksweepcompact_time", 'MarkSweepCompact', 'incremental'], + ["gc_parnew_time", 'ParNew', 'incremental'], + ["gc_concurrentmarksweep_time", 'ConcurrentMarkSweep', 'incremental'], + ["gc_ps_scavenge_time", 'PS Scavenge', 'incremental'], + ["gc_ps_marksweep_time", 'PS MarkSweep', 'incremental'], + ["gc_g1_young_generation_time", 'G1 Young Generation', 'incremental'], + ["gc_g1_old_generation_time", 'G1 Old Generation', 'incremental'], + ] + }, + 'gc_ope': { + 'options': [None, "GC Operations", "operations/s", "garbage collection", "springboot.gc_ope", "stacked"], + 'lines': [ + ["gc_copy_count", 'Copy', 'incremental'], + ["gc_marksweepcompact_count", 'MarkSweepCompact', 'incremental'], + ["gc_parnew_count", 'ParNew', 'incremental'], + ["gc_concurrentmarksweep_count", 'ConcurrentMarkSweep', 'incremental'], + ["gc_ps_scavenge_count", 'PS Scavenge', 'incremental'], + ["gc_ps_marksweep_count", 'PS MarkSweep', 'incremental'], + ["gc_g1_young_generation_count", 'G1 Young Generation', 'incremental'], + ["gc_g1_old_generation_count", 'G1 Old Generation', 'incremental'], + ] + }, + 'heap': { + 'options': [None, "Heap Memory Usage", "KB", "heap memory", "springboot.heap", "area"], + 'lines': [ + ["heap_committed", 'committed', "absolute"], + ["heap_used", 'used', "absolute"], + ] + } +} + + +class ExtraChartError(ValueError): + pass + + +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + self.url = self.configuration.get('url', "http://localhost:8080/metrics") + self._setup_charts() + + def _get_data(self): + """ + Format data received from http request + :return: dict + """ + raw_data = self._get_raw_data() + if not raw_data: + return None + + try: + data = json.loads(raw_data) + except ValueError: + self.debug('%s is not a vaild JSON page' % self.url) + return None + + result = { + 'resp_1xx': 0, + 'resp_2xx': 0, + 'resp_3xx': 0, + 'resp_4xx': 0, + 'resp_5xx': 0, + 'resp_other': 0, + } + + for key, value in data.iteritems(): + if 'counter.status.' in key: + status_type = key[15:16] + 'xx' + if status_type[0] not in '12345': + status_type = 'other' + result['resp_' + status_type] += value + else: + result[key.replace('.', '_')] = value + + return result or None + + def _setup_charts(self): + self.order = [] + self.definitions = {} + defaults = self.configuration.get('defaults', {}) + + for chart in DEFAULT_ORDER: + if defaults.get(chart, True): + self.order.append(chart) + self.definitions[chart] = DEFAULT_CHARTS[chart] + + for extra in self.configuration.get('extras', []): + self._add_extra_chart(extra) + self.order.append(extra['id']) + + def _add_extra_chart(self, chart): + chart_id = chart.get('id', None) or self.die('id is not defined in extra chart') + options = chart.get('options', None) or self.die('option is not defined in extra chart: %s' % chart_id) + lines = chart.get('lines', None) or self.die('lines is not defined in extra chart: %s' % chart_id) + + title = options.get('title', None) or self.die('title is missing: %s' % chart_id) + units = options.get('units', None) or self.die('units is missing: %s' % chart_id) + family = options.get('family', title) + context = options.get('context', 'springboot.' + title) + charttype = options.get('charttype', 'line') + + result = { + 'options': [None, title, units, family, context, charttype], + 'lines': [], + } + + for line in lines: + dimension = line.get('dimension', None) or self.die('dimension is missing: %s' % chart_id) + name = line.get('name', dimension) + algorithm = line.get('algorithm', 'absolute') + multiplier = line.get('multiplier', 1) + divisor = line.get('divisor', 1) + result['lines'].append([dimension, name, algorithm, multiplier, divisor]) + + self.definitions[chart_id] = result + + @staticmethod + def die(error_message): + raise ExtraChartError(error_message) diff --git a/collectors/python.d.plugin/springboot/springboot.conf b/collectors/python.d.plugin/springboot/springboot.conf new file mode 100644 index 000000000..40b5fb437 --- /dev/null +++ b/collectors/python.d.plugin/springboot/springboot.conf @@ -0,0 +1,120 @@ +# netdata python.d.plugin configuration for springboot +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, this plugin also supports the following: +# +# url: 'http://127.0.0.1/metrics' # the URL of the spring boot actuator metrics +# +# if the URL is password protected, the following are supported: +# +# user: 'username' +# pass: 'password' +# +# defaults: +# [chart_id]: true | false # enables/disables default charts, defaults true. +# extras: {} # defines extra charts to monitor, please see the example below +# - id: [chart_id] +# options: {} +# lines: [] +# +# If all defaults is disabled and no extra charts are defined, this module will disable itself, as it has no data to +# collect. +# +# Configuration example +# --------------------- +# expample: +# name: 'example' +# url: 'http://localhost:8080/metrics' +# defaults: +# response_code: true +# threads: true +# gc_time: true +# gc_ope: true +# heap: false +# extras: +# - id: 'heap' +# options: { title: 'Heap Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap', charttype: 'stacked' } +# lines: +# - { dimension: 'mem_free', name: 'free'} +# - { dimension: 'mempool_eden_used', name: 'eden', algorithm: 'absolute', multiplier: 1, divisor: 1} +# - { dimension: 'mempool_survivor_used', name: 'survivor', algorithm: 'absolute', multiplier: 1, divisor: 1} +# - { dimension: 'mempool_tenured_used', name: 'tenured', algorithm: 'absolute', multiplier: 1, divisor: 1} +# - id: 'heap_eden' +# options: { title: 'Eden Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_eden', charttype: 'area' } +# lines: +# - { dimension: 'mempool_eden_used', name: 'used'} +# - { dimension: 'mempool_eden_committed', name: 'commited'} +# - id: 'heap_survivor' +# options: { title: 'Survivor Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_survivor', charttype: 'area' } +# lines: +# - { dimension: 'mempool_survivor_used', name: 'used'} +# - { dimension: 'mempool_survivor_committed', name: 'commited'} +# - id: 'heap_tenured' +# options: { title: 'Tenured Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_tenured', charttype: 'area' } +# lines: +# - { dimension: 'mempool_tenured_used', name: 'used'} +# - { dimension: 'mempool_tenured_committed', name: 'commited'} + + +local: + name: 'local' + url: 'http://localhost:8080/metrics' + +local_ip: + name: 'local' + url: 'http://127.0.0.1:8080/metrics' diff --git a/collectors/python.d.plugin/squid/Makefile.inc b/collectors/python.d.plugin/squid/Makefile.inc new file mode 100644 index 000000000..76ecff81e --- /dev/null +++ b/collectors/python.d.plugin/squid/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += squid/squid.chart.py +dist_pythonconfig_DATA += squid/squid.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += squid/README.md squid/Makefile.inc + diff --git a/collectors/python.d.plugin/squid/README.md b/collectors/python.d.plugin/squid/README.md new file mode 100644 index 000000000..9c9b62f27 --- /dev/null +++ b/collectors/python.d.plugin/squid/README.md @@ -0,0 +1,38 @@ +# squid + +This module will monitor one or more squid instances depending on configuration. + +It produces following charts: + +1. **Client Bandwidth** in kilobits/s + * in + * out + * hits + +2. **Client Requests** in requests/s + * requests + * hits + * errors + +3. **Server Bandwidth** in kilobits/s + * in + * out + +4. **Server Requests** in requests/s + * requests + * errors + +### configuration + +```yaml +priority : 50000 + +local: + request : 'cache_object://localhost:3128/counters' + host : 'localhost' + port : 3128 +``` + +Without any configuration module will try to autodetect where squid presents its `counters` data + +--- diff --git a/collectors/python.d.plugin/squid/squid.chart.py b/collectors/python.d.plugin/squid/squid.chart.py new file mode 100644 index 000000000..fd54168f0 --- /dev/null +++ b/collectors/python.d.plugin/squid/squid.chart.py @@ -0,0 +1,125 @@ +# -*- coding: utf-8 -*- +# Description: squid netdata python.d module +# Author: Pawel Krupa (paulfantom) +# SPDX-License-Identifier: GPL-3.0-or-later + +from bases.FrameworkServices.SocketService import SocketService + + +# default module values (can be overridden per job in `config`) +# update_every = 2 +priority = 60000 +retries = 60 + +# charts order (can be overridden if you want less charts, or different order) +ORDER = ['clients_net', 'clients_requests', 'servers_net', 'servers_requests'] + +CHARTS = { + 'clients_net': { + 'options': [None, 'Squid Client Bandwidth', 'kilobits/s', 'clients', 'squid.clients_net', 'area'], + 'lines': [ + ['client_http_kbytes_in', 'in', 'incremental', 8, 1], + ['client_http_kbytes_out', 'out', 'incremental', -8, 1], + ['client_http_hit_kbytes_out', 'hits', 'incremental', -8, 1] + ] + }, + 'clients_requests': { + 'options': [None, 'Squid Client Requests', 'requests/s', 'clients', 'squid.clients_requests', 'line'], + 'lines': [ + ['client_http_requests', 'requests', 'incremental'], + ['client_http_hits', 'hits', 'incremental'], + ['client_http_errors', 'errors', 'incremental', -1, 1] + ] + }, + 'servers_net': { + 'options': [None, 'Squid Server Bandwidth', 'kilobits/s', 'servers', 'squid.servers_net', 'area'], + 'lines': [ + ['server_all_kbytes_in', 'in', 'incremental', 8, 1], + ['server_all_kbytes_out', 'out', 'incremental', -8, 1] + ] + }, + 'servers_requests': { + 'options': [None, 'Squid Server Requests', 'requests/s', 'servers', 'squid.servers_requests', 'line'], + 'lines': [ + ['server_all_requests', 'requests', 'incremental'], + ['server_all_errors', 'errors', 'incremental', -1, 1] + ] + } +} + + +class Service(SocketService): + def __init__(self, configuration=None, name=None): + SocketService.__init__(self, configuration=configuration, name=name) + self._keep_alive = True + self.request = '' + self.host = 'localhost' + self.port = 3128 + self.order = ORDER + self.definitions = CHARTS + + def _get_data(self): + """ + Get data via http request + :return: dict + """ + response = self._get_raw_data() + + data = dict() + try: + raw = '' + for tmp in response.split('\r\n'): + if tmp.startswith('sample_time'): + raw = tmp + break + + if raw.startswith('<'): + self.error('invalid data received') + return None + + for row in raw.split('\n'): + if row.startswith(('client', 'server.all')): + tmp = row.split('=') + data[tmp[0].replace('.', '_').strip(' ')] = int(tmp[1]) + + except (ValueError, AttributeError, TypeError): + self.error('invalid data received') + return None + + if not data: + self.error('no data received') + return None + return data + + def _check_raw_data(self, data): + header = data[:1024].lower() + + if 'connection: keep-alive' in header: + self._keep_alive = True + else: + self._keep_alive = False + + if data[-7:] == '\r\n0\r\n\r\n' and 'transfer-encoding: chunked' in header: # HTTP/1.1 response + self.debug('received full response from squid') + return True + + self.debug('waiting more data from squid') + return False + + def check(self): + """ + Parse essential configuration, autodetect squid configuration (if needed), and check if data is available + :return: boolean + """ + self._parse_config() + # format request + req = self.request.decode() + if not req.startswith('GET'): + req = 'GET ' + req + if not req.endswith(' HTTP/1.1\r\n\r\n'): + req += ' HTTP/1.1\r\n\r\n' + self.request = req.encode() + if self._get_data() is not None: + return True + else: + return False diff --git a/collectors/python.d.plugin/squid/squid.conf b/collectors/python.d.plugin/squid/squid.conf new file mode 100644 index 000000000..564187f00 --- /dev/null +++ b/collectors/python.d.plugin/squid/squid.conf @@ -0,0 +1,169 @@ +# netdata python.d.plugin configuration for squid +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, squid also supports the following: +# +# host : 'IP or HOSTNAME' # the host to connect to +# port : PORT # the port to connect to +# request: 'URL' # the URL to request from squid +# + +# ---------------------------------------------------------------------- +# SQUID CONFIGURATION +# +# See: +# http://wiki.squid-cache.org/Features/CacheManager +# +# In short, add to your squid configuration these: +# +# http_access allow localhost manager +# http_access deny manager +# +# To remotely monitor a squid: +# +# acl managerAdmin src 192.0.2.1 +# http_access allow localhost manager +# http_access allow managerAdmin manager +# http_access deny manager +# + +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +tcp3128old: + name : 'local' + host : 'localhost' + port : 3128 + request : 'cache_object://localhost:3128/counters' + +tcp8080old: + name : 'local' + host : 'localhost' + port : 8080 + request : 'cache_object://localhost:3128/counters' + +tcp3128new: + name : 'local' + host : 'localhost' + port : 3128 + request : '/squid-internal-mgr/counters' + +tcp8080new: + name : 'local' + host : 'localhost' + port : 8080 + request : '/squid-internal-mgr/counters' + +# IPv4 + +tcp3128oldipv4: + name : 'local' + host : '127.0.0.1' + port : 3128 + request : 'cache_object://127.0.0.1:3128/counters' + +tcp8080oldipv4: + name : 'local' + host : '127.0.0.1' + port : 8080 + request : 'cache_object://127.0.0.1:3128/counters' + +tcp3128newipv4: + name : 'local' + host : '127.0.0.1' + port : 3128 + request : '/squid-internal-mgr/counters' + +tcp8080newipv4: + name : 'local' + host : '127.0.0.1' + port : 8080 + request : '/squid-internal-mgr/counters' + +# IPv6 + +tcp3128oldipv6: + name : 'local' + host : '::1' + port : 3128 + request : 'cache_object://[::1]:3128/counters' + +tcp8080oldipv6: + name : 'local' + host : '::1' + port : 8080 + request : 'cache_object://[::1]:3128/counters' + +tcp3128newipv6: + name : 'local' + host : '::1' + port : 3128 + request : '/squid-internal-mgr/counters' + +tcp8080newipv6: + name : 'local' + host : '::1' + port : 8080 + request : '/squid-internal-mgr/counters' + diff --git a/collectors/python.d.plugin/tomcat/Makefile.inc b/collectors/python.d.plugin/tomcat/Makefile.inc new file mode 100644 index 000000000..940a7835e --- /dev/null +++ b/collectors/python.d.plugin/tomcat/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += tomcat/tomcat.chart.py +dist_pythonconfig_DATA += tomcat/tomcat.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += tomcat/README.md tomcat/Makefile.inc + diff --git a/collectors/python.d.plugin/tomcat/README.md b/collectors/python.d.plugin/tomcat/README.md new file mode 100644 index 000000000..e548bd338 --- /dev/null +++ b/collectors/python.d.plugin/tomcat/README.md @@ -0,0 +1,33 @@ +# tomcat + +Present tomcat containers memory utilization. + +Charts: + +1. **Requests** per second + * accesses + +2. **Volume** in KB/s + * volume + +3. **Threads** + * current + * busy + +4. **JVM Free Memory** in MB + * jvm + +### configuration + +```yaml +localhost: + name : 'local' + url : 'http://127.0.0.1:8080/manager/status?XML=true' + user : 'tomcat_username' + pass : 'secret_tomcat_password' +``` + +Without configuration, module attempts to connect to `http://localhost:8080/manager/status?XML=true`, without any credentials. +So it will probably fail. + +--- diff --git a/collectors/python.d.plugin/tomcat/tomcat.chart.py b/collectors/python.d.plugin/tomcat/tomcat.chart.py new file mode 100644 index 000000000..3c2d0ed40 --- /dev/null +++ b/collectors/python.d.plugin/tomcat/tomcat.chart.py @@ -0,0 +1,163 @@ +# -*- coding: utf-8 -*- +# Description: tomcat netdata python.d module +# Author: Pawel Krupa (paulfantom) +# Author: Wei He (Wing924) +# SPDX-License-Identifier: GPL-3.0-or-later + +import xml.etree.ElementTree as ET + +from bases.FrameworkServices.UrlService import UrlService + +# default module values (can be overridden per job in `config`) +# update_every = 2 +priority = 60000 +retries = 60 + +# charts order (can be overridden if you want less charts, or different order) +ORDER = ['accesses', 'bandwidth', 'processing_time', 'threads', 'jvm', 'jvm_eden', 'jvm_survivor', 'jvm_tenured'] + +CHARTS = { + 'accesses': { + 'options': [None, 'Requests', 'requests/s', 'statistics', 'tomcat.accesses', 'area'], + 'lines': [ + ['requestCount', 'accesses', 'incremental'], + ['errorCount', 'errors', 'incremental'], + ] + }, + 'bandwidth': { + 'options': [None, 'Bandwidth', 'KB/s', 'statistics', 'tomcat.bandwidth', 'area'], + 'lines': [ + ['bytesSent', 'sent', 'incremental', 1, 1024], + ['bytesReceived', 'received', 'incremental', 1, 1024], + ] + }, + 'processing_time': { + 'options': [None, 'processing time', 'seconds', 'statistics', 'tomcat.processing_time', 'area'], + 'lines': [ + ['processingTime', 'processing time', 'incremental', 1, 1000] + ] + }, + 'threads': { + 'options': [None, 'Threads', 'current threads', 'statistics', 'tomcat.threads', 'area'], + 'lines': [ + ['currentThreadCount', 'current', 'absolute'], + ['currentThreadsBusy', 'busy', 'absolute'] + ] + }, + 'jvm': { + 'options': [None, 'JVM Memory Pool Usage', 'MB', 'memory', 'tomcat.jvm', 'stacked'], + 'lines': [ + ['free', 'free', 'absolute', 1, 1048576], + ['eden_used', 'eden', 'absolute', 1, 1048576], + ['survivor_used', 'survivor', 'absolute', 1, 1048576], + ['tenured_used', 'tenured', 'absolute', 1, 1048576], + ['code_cache_used', 'code cache', 'absolute', 1, 1048576], + ['compressed_used', 'compressed', 'absolute', 1, 1048576], + ['metaspace_used', 'metaspace', 'absolute', 1, 1048576], + ] + }, + 'jvm_eden': { + 'options': [None, 'Eden Memory Usage', 'MB', 'memory', 'tomcat.jvm_eden', 'area'], + 'lines': [ + ['eden_used', 'used', 'absolute', 1, 1048576], + ['eden_committed', 'committed', 'absolute', 1, 1048576], + ['eden_max', 'max', 'absolute', 1, 1048576] + ] + }, + 'jvm_survivor': { + 'options': [None, 'Survivor Memory Usage', 'MB', 'memory', 'tomcat.jvm_survivor', 'area'], + 'lines': [ + ['survivor_used', 'used', 'absolute', 1, 1048576], + ['survivor_committed', 'committed', 'absolute', 1, 1048576], + ['survivor_max', 'max', 'absolute', 1, 1048576] + ] + }, + 'jvm_tenured': { + 'options': [None, 'Tenured Memory Usage', 'MB', 'memory', 'tomcat.jvm_tenured', 'area'], + 'lines': [ + ['tenured_used', 'used', 'absolute', 1, 1048576], + ['tenured_committed', 'committed', 'absolute', 1, 1048576], + ['tenured_max', 'max', 'absolute', 1, 1048576] + ] + } +} + + +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + self.url = self.configuration.get('url', 'http://127.0.0.1:8080/manager/status?XML=true') + self.connector_name = self.configuration.get('connector_name', None) + self.order = ORDER + self.definitions = CHARTS + + def _get_data(self): + """ + Format data received from http request + :return: dict + """ + data = None + raw_data = self._get_raw_data() + if raw_data: + try: + xml = ET.fromstring(raw_data) + except ET.ParseError: + self.debug('%s is not a vaild XML page. Please add "?XML=true" to tomcat status page.' % self.url) + return None + data = {} + + jvm = xml.find('jvm') + + connector = None + if self.connector_name: + for conn in xml.findall('connector'): + if self.connector_name in conn.get('name'): + connector = conn + break + else: + connector = xml.find('connector') + + memory = jvm.find('memory') + data['free'] = memory.get('free') + data['total'] = memory.get('total') + + for pool in jvm.findall('memorypool'): + name = pool.get('name') + if 'Eden Space' in name: + data['eden_used'] = pool.get('usageUsed') + data['eden_committed'] = pool.get('usageCommitted') + data['eden_max'] = pool.get('usageMax') + elif 'Survivor Space' in name: + data['survivor_used'] = pool.get('usageUsed') + data['survivor_committed'] = pool.get('usageCommitted') + data['survivor_max'] = pool.get('usageMax') + elif 'Tenured Gen' in name or 'Old Gen' in name: + data['tenured_used'] = pool.get('usageUsed') + data['tenured_committed'] = pool.get('usageCommitted') + data['tenured_max'] = pool.get('usageMax') + elif name == 'Code Cache': + data['code_cache_used'] = pool.get('usageUsed') + data['code_cache_committed'] = pool.get('usageCommitted') + data['code_cache_max'] = pool.get('usageMax') + elif name == 'Compressed': + data['compressed_used'] = pool.get('usageUsed') + data['compressed_committed'] = pool.get('usageCommitted') + data['compressed_max'] = pool.get('usageMax') + elif name == 'Metaspace': + data['metaspace_used'] = pool.get('usageUsed') + data['metaspace_committed'] = pool.get('usageCommitted') + data['metaspace_max'] = pool.get('usageMax') + + if connector: + thread_info = connector.find('threadInfo') + data['currentThreadsBusy'] = thread_info.get('currentThreadsBusy') + data['currentThreadCount'] = thread_info.get('currentThreadCount') + + request_info = connector.find('requestInfo') + data['processingTime'] = request_info.get('processingTime') + data['requestCount'] = request_info.get('requestCount') + data['errorCount'] = request_info.get('errorCount') + data['bytesReceived'] = request_info.get('bytesReceived') + data['bytesSent'] = request_info.get('bytesSent') + + return data or None diff --git a/collectors/python.d.plugin/tomcat/tomcat.conf b/collectors/python.d.plugin/tomcat/tomcat.conf new file mode 100644 index 000000000..c63f06cfa --- /dev/null +++ b/collectors/python.d.plugin/tomcat/tomcat.conf @@ -0,0 +1,91 @@ +# netdata python.d.plugin configuration for tomcat +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, tomcat also supports the following: +# +# url: 'URL' # the URL to fetch nginx's status stats +# +# if the URL is password protected, the following are supported: +# +# user: 'username' +# pass: 'password' +# +# if you have multiple connectors, the following are supported: +# +# connector_name: 'ajp-bio-8009' # default is null, which use first connector in status XML +# +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +localhost: + name : 'local' + url : 'http://localhost:8080/manager/status?XML=true' + +localipv4: + name : 'local' + url : 'http://127.0.0.1:8080/manager/status?XML=true' + +localipv6: + name : 'local' + url : 'http://[::1]:8080/manager/status?XML=true' diff --git a/collectors/python.d.plugin/traefik/Makefile.inc b/collectors/python.d.plugin/traefik/Makefile.inc new file mode 100644 index 000000000..926d56dda --- /dev/null +++ b/collectors/python.d.plugin/traefik/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += traefik/traefik.chart.py +dist_pythonconfig_DATA += traefik/traefik.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += traefik/README.md traefik/Makefile.inc + diff --git a/collectors/python.d.plugin/traefik/README.md b/collectors/python.d.plugin/traefik/README.md new file mode 100644 index 000000000..9b4a18208 --- /dev/null +++ b/collectors/python.d.plugin/traefik/README.md @@ -0,0 +1,54 @@ +# traefik + +Module uses the `health` API to provide statistics. + +It produces: + +1. **Responses** by statuses + * success (1xx, 2xx, 304) + * error (5xx) + * redirect (3xx except 304) + * bad (4xx) + * other (all other responses) + +2. **Responses** by codes + * 2xx (successful) + * 5xx (internal server errors) + * 3xx (redirect) + * 4xx (bad) + * 1xx (informational) + * other (non-standart responses) + +3. **Detailed Response Codes** requests/s (number of responses for each response code family individually) + +4. **Requests**/s + * request statistics + +5. **Total response time** + * sum of all response time + +6. **Average response time** + +7. **Average response time per iteration** + +8. **Uptime** + * Traefik server uptime + +### configuration + +Needs only `url` to server's `health` + +Here is an example for local server: + +```yaml +update_every : 1 +priority : 60000 + +local: + url : 'http://localhost:8080/health' + retries : 10 +``` + +Without configuration, module attempts to connect to `http://localhost:8080/health`. + +--- diff --git a/collectors/python.d.plugin/traefik/traefik.chart.py b/collectors/python.d.plugin/traefik/traefik.chart.py new file mode 100644 index 000000000..dc8933220 --- /dev/null +++ b/collectors/python.d.plugin/traefik/traefik.chart.py @@ -0,0 +1,195 @@ +# -*- coding: utf-8 -*- +# Description: traefik netdata python.d module +# Author: Alexandre Menezes (@ale_menezes) +# SPDX-License-Identifier: GPL-3.0-or-later + +from json import loads +from collections import defaultdict +from bases.FrameworkServices.UrlService import UrlService + +# default module values (can be overridden per job in `config`) +update_every = 1 +priority = 60000 +retries = 10 + +# charts order (can be overridden if you want less charts, or different order) +ORDER = [ + 'response_statuses', + 'response_codes', + 'detailed_response_codes', + 'requests', + 'total_response_time', + 'average_response_time', + 'average_response_time_per_iteration', + 'uptime' +] + +CHARTS = { + 'response_statuses': { + 'options': [None, 'Response statuses', 'requests/s', 'responses', 'traefik.response_statuses', 'stacked'], + 'lines': [ + ['successful_requests', 'success', 'incremental'], + ['server_errors', 'error', 'incremental'], + ['redirects', 'redirect', 'incremental'], + ['bad_requests', 'bad', 'incremental'], + ['other_requests', 'other', 'incremental'] + ] + }, + 'response_codes': { + 'options': [None, 'Responses by codes', 'requests/s', 'responses', 'traefik.response_codes', 'stacked'], + 'lines': [ + ['2xx', None, 'incremental'], + ['5xx', None, 'incremental'], + ['3xx', None, 'incremental'], + ['4xx', None, 'incremental'], + ['1xx', None, 'incremental'], + ['other', None, 'incremental'] + ] + }, + 'detailed_response_codes': { + 'options': [None, 'Detailed response codes', 'requests/s', 'responses', 'traefik.detailed_response_codes', + 'stacked'], + 'lines': [] + }, + 'requests': { + 'options': [None, 'Requests', 'requests/s', 'requests', 'traefik.requests', 'line'], + 'lines': [ + ['total_count', 'requests', 'incremental'] + ] + }, + 'total_response_time': { + 'options': [None, 'Total response time', 'seconds', 'timings', 'traefik.total_response_time', 'line'], + 'lines': [ + ['total_response_time_sec', 'response', 'absolute', 1, 10000] + ] + }, + 'average_response_time': { + 'options': [None, 'Average response time', 'milliseconds', 'timings', 'traefik.average_response_time', 'line'], + 'lines': [ + ['average_response_time_sec', 'response', 'absolute', 1, 1000] + ] + }, + 'average_response_time_per_iteration': { + 'options': [None, 'Average response time per iteration', 'milliseconds', 'timings', + 'traefik.average_response_time_per_iteration', 'line'], + 'lines': [ + ['average_response_time_per_iteration_sec', 'response', 'incremental', 1, 10000] + ] + }, + 'uptime': { + 'options': [None, 'Uptime', 'seconds', 'uptime', 'traefik.uptime', 'line'], + 'lines': [ + ['uptime_sec', 'uptime', 'absolute'] + ] + } +} + +HEALTH_STATS = [ + 'uptime_sec', + 'average_response_time_sec', + 'total_response_time_sec', + 'total_count', + 'total_status_code_count' +] + + +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + self.url = self.configuration.get('url', 'http://localhost:8080/health') + self.order = ORDER + self.definitions = CHARTS + self.data = { + 'successful_requests': 0, 'redirects': 0, 'bad_requests': 0, + 'server_errors': 0, 'other_requests': 0, '1xx': 0, '2xx': 0, + '3xx': 0, '4xx': 0, '5xx': 0, 'other': 0, + 'average_response_time_per_iteration_sec': 0 + } + self.last_total_response_time = 0 + self.last_total_count = 0 + + def _get_data(self): + data = self._get_raw_data() + + if not data: + return None + + data = loads(data) + + self.get_data_per_code_status(raw_data=data) + + self.get_data_per_code_family(raw_data=data) + + self.get_data_per_code(raw_data=data) + + self.data.update(fetch_data_(raw_data=data, metrics=HEALTH_STATS)) + + self.data['average_response_time_sec'] *= 1000000 + self.data['total_response_time_sec'] *= 10000 + if data['total_count'] != self.last_total_count: + self.data['average_response_time_per_iteration_sec'] = \ + (data['total_response_time_sec'] - self.last_total_response_time) * \ + 1000000 / (data['total_count'] - self.last_total_count) + else: + self.data['average_response_time_per_iteration_sec'] = 0 + self.last_total_response_time = data['total_response_time_sec'] + self.last_total_count = data['total_count'] + + return self.data or None + + def get_data_per_code_status(self, raw_data): + data = defaultdict(int) + for code, value in raw_data['total_status_code_count'].items(): + code_prefix = code[0] + if code_prefix == '1' or code_prefix == '2' or code == '304': + data['successful_requests'] += value + elif code_prefix == '3': + data['redirects'] += value + elif code_prefix == '4': + data['bad_requests'] += value + elif code_prefix == '5': + data['server_errors'] += value + else: + data['other_requests'] += value + self.data.update(data) + + def get_data_per_code_family(self, raw_data): + data = defaultdict(int) + for code, value in raw_data['total_status_code_count'].items(): + code_prefix = code[0] + if code_prefix == '1': + data['1xx'] += value + elif code_prefix == '2': + data['2xx'] += value + elif code_prefix == '3': + data['3xx'] += value + elif code_prefix == '4': + data['4xx'] += value + elif code_prefix == '5': + data['5xx'] += value + else: + data['other'] += value + self.data.update(data) + + def get_data_per_code(self, raw_data): + for code, value in raw_data['total_status_code_count'].items(): + if self.charts: + if code not in self.data: + self.charts['detailed_response_codes'].add_dimension([code, code, 'incremental']) + self.data[code] = value + + +def fetch_data_(raw_data, metrics): + data = dict() + + for metric in metrics: + value = raw_data + metrics_list = metric.split('.') + try: + for m in metrics_list: + value = value[m] + except KeyError: + continue + data['_'.join(metrics_list)] = value + + return data diff --git a/collectors/python.d.plugin/traefik/traefik.conf b/collectors/python.d.plugin/traefik/traefik.conf new file mode 100644 index 000000000..909b9e549 --- /dev/null +++ b/collectors/python.d.plugin/traefik/traefik.conf @@ -0,0 +1,79 @@ +# netdata python.d.plugin configuration for traefik health data API +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 10 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, traefik plugin also supports the following: +# +# url: '<scheme>://<host>:<port>/<health_page_api>' +# # http://localhost:8080/health +# +# if the URL is password protected, the following are supported: +# +# user: 'username' +# pass: 'password' +# +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) +# +local: + url: 'http://localhost:8080/health' diff --git a/collectors/python.d.plugin/unbound/Makefile.inc b/collectors/python.d.plugin/unbound/Makefile.inc new file mode 100644 index 000000000..59c306aed --- /dev/null +++ b/collectors/python.d.plugin/unbound/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += unbound/unbound.chart.py +dist_pythonconfig_DATA += unbound/unbound.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += unbound/README.md unbound/Makefile.inc + diff --git a/collectors/python.d.plugin/unbound/README.md b/collectors/python.d.plugin/unbound/README.md new file mode 100644 index 000000000..3b4fa16fd --- /dev/null +++ b/collectors/python.d.plugin/unbound/README.md @@ -0,0 +1,76 @@ +# unbound + +Monitoring uses the remote control interface to fetch statistics. + +Provides the following charts: + +1. **Queries Processed** + * Ratelimited + * Cache Misses + * Cache Hits + * Expired + * Prefetched + * Recursive + +2. **Request List** + * Average Size + * Max Size + * Overwritten Requests + * Overruns + * Current Size + * User Requests + +3. **Recursion Timings** + * Average recursion processing time + * Median recursion processing time + +If extended stats are enabled, also provides: + +4. **Cache Sizes** + * Message Cache + * RRset Cache + * Infra Cache + * DNSSEC Key Cache + * DNSCrypt Shared Secret Cache + * DNSCrypt Nonce Cache + +### configuration + +Unbound must be manually configured to enable the remote-control protocol. +Check the Unbound documentation for info on how to do this. Additionally, +if you want to take advantage of the autodetection this plugin offers, +you will need to make sure your `unbound.conf` file only uses spaces for +indentation (the default config shipped by most distributions uses tabs +instead of spaces). + +Once you have the Unbound control protocol enabled, you need to make sure +that either the certificate and key are readable by Netdata (if you're +using the regular control interface), or that the socket is accessible +to Netdata (if you're using a UNIX socket for the contorl interface). + +By default, for the local system, everything can be auto-detected +assuming Unbound is configured correctly and has been told to listen +on the loopback interface or a UNIX socket. This is done by looking +up info in the Unbound config file specified by the `ubconf` key. + +To enable extended stats for a given job, add `extended: yes` to the +definition. + +You can also enable per-thread charts for a given job by adding +`per_thread: yes` to the definition. Note that the numbe rof threads +is only checked on startup. + +A basic local configuration with extended statistics and per-thread +charts looks like this: + +```yaml +local: + ubconf: /etc/unbound/unbound.conf + extended: yes + per_thread: yes +``` + +While it's a bit more complicated to set up correctly, it is recommended +that you use a UNIX socket as it provides far better performance. + +--- diff --git a/collectors/python.d.plugin/unbound/unbound.chart.py b/collectors/python.d.plugin/unbound/unbound.chart.py new file mode 100644 index 000000000..52fcbf7e2 --- /dev/null +++ b/collectors/python.d.plugin/unbound/unbound.chart.py @@ -0,0 +1,275 @@ +# -*- coding: utf-8 -*- +# Description: unbound netdata python.d module +# Author: Austin S. Hemmelgarn (Ferroin) +# SPDX-License-Identifier: GPL-3.0-or-later + +import os +import sys + +from copy import deepcopy + +from bases.FrameworkServices.SocketService import SocketService +from bases.loaders import YamlOrderedLoader + +PRECISION = 1000 + +ORDER = ['queries', 'recursion', 'reqlist'] + +CHARTS = { + 'queries': { + 'options': [None, 'Queries Processed', 'queries', 'Unbound', 'unbound.queries', 'line'], + 'lines': [ + ['ratelimit', 'ratelimited', 'absolute', 1, 1], + ['cachemiss', 'cache_miss', 'absolute', 1, 1], + ['cachehit', 'cache_hit', 'absolute', 1, 1], + ['expired', 'expired', 'absolute', 1, 1], + ['prefetch', 'prefetched', 'absolute', 1, 1], + ['recursive', 'recursive', 'absolute', 1, 1] + ] + }, + 'recursion': { + 'options': [None, 'Recursion Timings', 'seconds', 'Unbound', 'unbound.recursion', 'line'], + 'lines': [ + ['recursive_avg', 'average', 'absolute', 1, PRECISION], + ['recursive_med', 'median', 'absolute', 1, PRECISION] + ] + }, + 'reqlist': { + 'options': [None, 'Request List', 'items', 'Unbound', 'unbound.reqlist', 'line'], + 'lines': [ + ['reqlist_avg', 'average_size', 'absolute', 1, 1], + ['reqlist_max', 'maximum_size', 'absolute', 1, 1], + ['reqlist_overwritten', 'overwritten_requests', 'absolute', 1, 1], + ['reqlist_exceeded', 'overruns', 'absolute', 1, 1], + ['reqlist_current', 'current_size', 'absolute', 1, 1], + ['reqlist_user', 'user_requests', 'absolute', 1, 1] + ] + } +} + +# These get added too if we are told to use extended stats. +EXTENDED_ORDER = ['cache'] + +EXTENDED_CHARTS = { + 'cache': { + 'options': [None, 'Cache Sizes', 'items', 'Unbound', 'unbound.cache', 'stacked'], + 'lines': [ + ['cache_message', 'message_cache', 'absolute', 1, 1], + ['cache_rrset', 'rrset_cache', 'absolute', 1, 1], + ['cache_infra', 'infra_cache', 'absolute', 1, 1], + ['cache_key', 'dnssec_key_cache', 'absolute', 1, 1], + ['cache_dnscss', 'dnscrypt_Shared_Secret_cache', 'absolute', 1, 1], + ['cache_dnscn', 'dnscrypt_Nonce_cache', 'absolute', 1, 1] + ] + } +} + +# This is used as a templates for the per-thread charts. +PER_THREAD_CHARTS = { + '_queries': { + 'options': [None, '{longname} Queries Processed', 'queries', 'Queries Processed', + 'unbound.threads.queries', 'line'], + 'lines': [ + ['{shortname}_ratelimit', 'ratelimited', 'absolute', 1, 1], + ['{shortname}_cachemiss', 'cache_miss', 'absolute', 1, 1], + ['{shortname}_cachehit', 'cache_hit', 'absolute', 1, 1], + ['{shortname}_expired', 'expired', 'absolute', 1, 1], + ['{shortname}_prefetch', 'prefetched', 'absolute', 1, 1], + ['{shortname}_recursive', 'recursive', 'absolute', 1, 1] + ] + }, + '_recursion': { + 'options': [None, '{longname} Recursion Timings', 'seconds', 'Recursive Timings', + 'unbound.threads.recursion', 'line'], + 'lines': [ + ['{shortname}_recursive_avg', 'average', 'absolute', 1, PRECISION], + ['{shortname}_recursive_med', 'median', 'absolute', 1, PRECISION] + ] + }, + '_reqlist': { + 'options': [None, '{longname} Request List', 'items', 'Request List', 'unbound.threads.reqlist', 'line'], + 'lines': [ + ['{shortname}_reqlist_avg', 'average_size', 'absolute', 1, 1], + ['{shortname}_reqlist_max', 'maximum_size', 'absolute', 1, 1], + ['{shortname}_reqlist_overwritten', 'overwritten_requests', 'absolute', 1, 1], + ['{shortname}_reqlist_exceeded', 'overruns', 'absolute', 1, 1], + ['{shortname}_reqlist_current', 'current_size', 'absolute', 1, 1], + ['{shortname}_reqlist_user', 'user_requests', 'absolute', 1, 1] + ] + } +} + + +# This maps the Unbound stat names to our names and precision requiremnets. +STAT_MAP = { + 'total.num.queries_ip_ratelimited': ('ratelimit', 1), + 'total.num.cachehits': ('cachehit', 1), + 'total.num.cachemiss': ('cachemiss', 1), + 'total.num.zero_ttl': ('expired', 1), + 'total.num.prefetch': ('prefetch', 1), + 'total.num.recursivereplies': ('recursive', 1), + 'total.requestlist.avg': ('reqlist_avg', 1), + 'total.requestlist.max': ('reqlist_max', 1), + 'total.requestlist.overwritten': ('reqlist_overwritten', 1), + 'total.requestlist.exceeded': ('reqlist_exceeded', 1), + 'total.requestlist.current.all': ('reqlist_current', 1), + 'total.requestlist.current.user': ('reqlist_user', 1), + 'total.recursion.time.avg': ('recursive_avg', PRECISION), + 'total.recursion.time.median': ('recursive_med', PRECISION), + 'msg.cache.count': ('cache_message', 1), + 'rrset.cache.count': ('cache_rrset', 1), + 'infra.cache.count': ('cache_infra', 1), + 'key.cache.count': ('cache_key', 1), + 'dnscrypt_shared_secret.cache.count': ('cache_dnscss', 1), + 'dnscrypt_nonce.cache.count': ('cache_dnscn', 1) +} + +# Same as above, but for per-thread stats. +PER_THREAD_STAT_MAP = { + '{shortname}.num.queries_ip_ratelimited': ('{shortname}_ratelimit', 1), + '{shortname}.num.cachehits': ('{shortname}_cachehit', 1), + '{shortname}.num.cachemiss': ('{shortname}_cachemiss', 1), + '{shortname}.num.zero_ttl': ('{shortname}_expired', 1), + '{shortname}.num.prefetch': ('{shortname}_prefetch', 1), + '{shortname}.num.recursivereplies': ('{shortname}_recursive', 1), + '{shortname}.requestlist.avg': ('{shortname}_reqlist_avg', 1), + '{shortname}.requestlist.max': ('{shortname}_reqlist_max', 1), + '{shortname}.requestlist.overwritten': ('{shortname}_reqlist_overwritten', 1), + '{shortname}.requestlist.exceeded': ('{shortname}_reqlist_exceeded', 1), + '{shortname}.requestlist.current.all': ('{shortname}_reqlist_current', 1), + '{shortname}.requestlist.current.user': ('{shortname}_reqlist_user', 1), + '{shortname}.recursion.time.avg': ('{shortname}_recursive_avg', PRECISION), + '{shortname}.recursion.time.median': ('{shortname}_recursive_med', PRECISION) +} + + +# Used to actually generate per-thread charts. +def _get_perthread_info(thread): + sname = 'thread{0}'.format(thread) + lname = 'Thread {0}'.format(thread) + charts = dict() + order = [] + statmap = dict() + + for item in PER_THREAD_CHARTS: + cname = '{0}{1}'.format(sname, item) + chart = deepcopy(PER_THREAD_CHARTS[item]) + chart['options'][1] = chart['options'][1].format(longname=lname) + + for index, line in enumerate(chart['lines']): + chart['lines'][index][0] = line[0].format(shortname=sname) + + order.append(cname) + charts[cname] = chart + + for key, value in PER_THREAD_STAT_MAP.items(): + statmap[key.format(shortname=sname)] = (value[0].format(shortname=sname), value[1]) + + return (charts, order, statmap) + + +class Service(SocketService): + def __init__(self, configuration=None, name=None): + # The unbound control protocol is always TLS encapsulated + # unless it's used over a UNIX socket, so enable TLS _before_ + # doing the normal SocketService initialization. + configuration['tls'] = True + self.port = 8935 + SocketService.__init__(self, configuration, name) + self.ext = self.configuration.get('extended', None) + self.ubconf = self.configuration.get('ubconf', None) + self.perthread = self.configuration.get('per_thread', False) + self.threads = None + self.order = deepcopy(ORDER) + self.definitions = deepcopy(CHARTS) + self.request = 'UBCT1 stats\n' + self.statmap = deepcopy(STAT_MAP) + self._parse_config() + self._auto_config() + self.debug('Extended stats: {0}'.format(self.ext)) + self.debug('Per-thread stats: {0}'.format(self.perthread)) + if self.ext: + self.order = self.order + EXTENDED_ORDER + self.definitions.update(EXTENDED_CHARTS) + if self.unix_socket: + self.debug('Using unix socket: {0}'.format(self.unix_socket)) + else: + self.debug('Connecting to: {0}:{1}'.format(self.host, self.port)) + self.debug('Using key: {0}'.format(self.key)) + self.debug('Using certificate: {0}'.format(self.cert)) + + def _auto_config(self): + if self.ubconf and os.access(self.ubconf, os.R_OK): + self.debug('Unbound config: {0}'.format(self.ubconf)) + conf = YamlOrderedLoader.load_config_from_file(self.ubconf)[0] + if self.ext is None: + if 'extended-statistics' in conf['server']: + self.ext = conf['server']['extended-statistics'] + if 'remote-control' in conf: + if conf['remote-control'].get('control-use-cert', False): + self.key = self.key or conf['remote-control'].get('control-key-file') + self.cert = self.cert or conf['remote-control'].get('control-cert-file') + self.port = self.port or conf['remote-control'].get('control-port') + else: + self.unix_socket = self.unix_socket or conf['remote-control'].get('control-interface') + else: + self.debug('Unbound configuration not found.') + if not self.key: + self.key = '/etc/unbound/unbound_control.key' + if not self.cert: + self.cert = '/etc/unbound/unbound_control.pem' + if not self.port: + self.port = 8953 + + def _generate_perthread_charts(self): + tmporder = list() + for thread in range(0, self.threads): + charts, order, statmap = _get_perthread_info(thread) + tmporder.extend(order) + self.definitions.update(charts) + self.statmap.update(statmap) + self.order.extend(sorted(tmporder)) + + def check(self): + # Check if authentication is working. + self._connect() + result = bool(self._sock) + self._disconnect() + # If auth works, and we need per-thread charts, query the server + # to see how many threads it's using. This somewhat abuses the + # SocketService API to get the data we need. + if result and self.perthread: + tmp = self.request + if sys.version_info[0] < 3: + self.request = 'UBCT1 status\n' + else: + self.request = b'UBCT1 status\n' + raw = self._get_raw_data() + for line in raw.splitlines(): + if line.startswith('threads'): + self.threads = int(line.split()[1]) + self._generate_perthread_charts() + break + if self.threads is None: + self.info('Unable to auto-detect thread counts, disabling per-thread stats.') + self.perthread = False + self.request = tmp + return result + + @staticmethod + def _check_raw_data(data): + # The server will close the connection when it's done sending + # data, so just keep looping until that happens. + return False + + def _get_data(self): + raw = self._get_raw_data() + data = dict() + tmp = dict() + for line in raw.splitlines(): + stat = line.split('=') + tmp[stat[0]] = stat[1] + for item in self.statmap: + if item in tmp: + data[self.statmap[item][0]] = float(tmp[item]) * self.statmap[item][1] + return data diff --git a/collectors/python.d.plugin/unbound/unbound.conf b/collectors/python.d.plugin/unbound/unbound.conf new file mode 100644 index 000000000..46c4b097f --- /dev/null +++ b/collectors/python.d.plugin/unbound/unbound.conf @@ -0,0 +1,87 @@ +# netdata python.d.plugin configuration for unbound +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_everye +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, unbound also supports the following: +# +# host: localhost # The host to connect to. +# port: 8953 # WHat port to use (defaults to 8953) +# socket: /path/to/socket # A path to a UNIX socket to use instead +# # of a TCP connection +# tls_key_file: /path/to/key # The key file to use for authentication +# tls_cert_file: /path/to/key # The certificate to use for authentication +# extended: false # Whether to collect extended stats or not +# per_thread: false # Whether to show charts for per-thread stats +# +# In addition to the above, you can set the following to try and +# auto-detect most settings based on the unbound configuration: +# +# ubconf: /etc/unbound/unbound.conf +# +# Note that the SSL key and certificate need to be readable by the user +# unbound runs as if you're using the regular control interface. +# If you're using a UNIX socket, that has to be readable by the netdata user. + +# The following should work for most users if they have unbound configured +# correctly. +local: + ubconf: /etc/unbound/unbound.conf diff --git a/collectors/python.d.plugin/uwsgi/Makefile.inc b/collectors/python.d.plugin/uwsgi/Makefile.inc new file mode 100644 index 000000000..75d96de0e --- /dev/null +++ b/collectors/python.d.plugin/uwsgi/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += uwsgi/uwsgi.chart.py +dist_pythonconfig_DATA += uwsgi/uwsgi.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += uwsgi/README.md uwsgi/Makefile.inc + diff --git a/collectors/python.d.plugin/uwsgi/README.md b/collectors/python.d.plugin/uwsgi/README.md new file mode 100644 index 000000000..a062710df --- /dev/null +++ b/collectors/python.d.plugin/uwsgi/README.md @@ -0,0 +1,37 @@ +# uwsgi + +Module monitor uwsgi performance metrics. + +https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html + +lines are creates dynamically based on how many workers are there + +Following charts are drawn: + +1. **Requests** + * requests per second + * transmitted data + * average request time + +2. **Memory** + * rss + * vsz + +3. **Exceptions** +4. **Harakiris** +5. **Respawns** + +### configuration + +```yaml +socket: + name : 'local' + socket : '/tmp/stats.socket' + +localhost: + name : 'local' + host : 'localhost' + port : 1717 +``` + +When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:1717`. diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.chart.py b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py new file mode 100644 index 000000000..5ebcfb55b --- /dev/null +++ b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- +# Description: uwsgi netdata python.d module +# Author: Robbert Segeren (robbert-ef) +# SPDX-License-Identifier: GPL-3.0-or-later + +import json +from copy import deepcopy +from bases.FrameworkServices.SocketService import SocketService + +# default module values (can be overridden per job in `config`) +# update_every = 2 +priority = 60000 +retries = 60 + +ORDER = [ + 'requests', + 'tx', + 'avg_rt', + 'memory_rss', + 'memory_vsz', + 'exceptions', + 'harakiri', + 'respawn', +] + +DYNAMIC_CHARTS = [ + 'requests', + 'tx', + 'avg_rt', + 'memory_rss', + 'memory_vsz', +] + +# NOTE: lines are created dynamically in `check()` method +CHARTS = { + 'requests': { + 'options': [None, 'Requests', 'requests/s', 'requests', 'uwsgi.requests', 'stacked'], + 'lines': [ + ['requests', 'requests', 'incremental'] + ] + }, + 'tx': { + 'options': [None, 'Transmitted data', 'KB/s', 'requests', 'uwsgi.tx', 'stacked'], + 'lines': [ + ['tx', 'tx', 'incremental'] + ] + }, + 'avg_rt': { + 'options': [None, 'Average request time', 'ms', 'requests', 'uwsgi.avg_rt', 'line'], + 'lines': [ + ['avg_rt', 'avg_rt', 'absolute'] + ] + }, + 'memory_rss': { + 'options': [None, 'RSS (Resident Set Size)', 'MB', 'memory', 'uwsgi.memory_rss', 'stacked'], + 'lines': [ + ['memory_rss', 'memory_rss', 'absolute', 1, 1024 * 1024] + ] + }, + 'memory_vsz': { + 'options': [None, 'VSZ (Virtual Memory Size)', 'MB', 'memory', 'uwsgi.memory_vsz', 'stacked'], + 'lines': [ + ['memory_vsz', 'memory_vsz', 'absolute', 1, 1024 * 1024] + ] + }, + 'exceptions': { + 'options': [None, 'Exceptions', 'exceptions', 'exceptions', 'uwsgi.exceptions', 'line'], + 'lines': [ + ['exceptions', 'exceptions', 'incremental'] + ] + }, + 'harakiri': { + 'options': [None, 'Harakiris', 'harakiris', 'harakiris', 'uwsgi.harakiris', 'line'], + 'lines': [ + ['harakiri_count', 'harakiris', 'incremental'] + ] + }, + 'respawn': { + 'options': [None, 'Respawns', 'respawns', 'respawns', 'uwsgi.respawns', 'line'], + 'lines': [ + ['respawn_count', 'respawns', 'incremental'] + ] + }, +} + + +class Service(SocketService): + def __init__(self, configuration=None, name=None): + super(Service, self).__init__(configuration=configuration, name=name) + self.url = self.configuration.get('host', 'localhost') + self.port = self.configuration.get('port', 1717) + self.order = ORDER + self.definitions = deepcopy(CHARTS) + + # Clear dynamic dimensions, these are added during `_get_data()` to allow adding workers at run-time + for chart in DYNAMIC_CHARTS: + self.definitions[chart]['lines'] = [] + + self.last_result = {} + self.workers = [] + + def read_data(self): + """ + Read data from socket and parse as JSON. + :return: (dict) stats + """ + raw_data = self._get_raw_data() + if not raw_data: + return None + try: + return json.loads(raw_data) + except ValueError as err: + self.error(err) + return None + + def check(self): + """ + Parse configuration and check if we can read data. + :return: boolean + """ + self._parse_config() + return bool(self.read_data()) + + def add_worker_dimensions(self, key): + """ + Helper to add dimensions for a worker. + :param key: (int or str) worker identifier + :return: + """ + for chart in DYNAMIC_CHARTS: + for line in CHARTS[chart]['lines']: + dimension_id = '{}_{}'.format(line[0], key) + dimension_name = str(key) + + dimension = [dimension_id, dimension_name] + line[2:] + self.charts[chart].add_dimension(dimension) + + @staticmethod + def _check_raw_data(data): + # The server will close the connection when it's done sending + # data, so just keep looping until that happens. + return False + + def _get_data(self): + """ + Read data from socket + :return: dict + """ + stats = self.read_data() + if not stats: + return None + + result = { + 'exceptions': 0, + 'harakiri_count': 0, + 'respawn_count': 0, + } + + for worker in stats['workers']: + key = worker['pid'] + + # Add dimensions for new workers + if key not in self.workers: + self.add_worker_dimensions(key) + self.workers.append(key) + + result['requests_{}'.format(key)] = worker['requests'] + result['tx_{}'.format(key)] = worker['tx'] + result['avg_rt_{}'.format(key)] = worker['avg_rt'] + + # avg_rt is not reset by uwsgi, so reset here + if self.last_result.get('requests_{}'.format(key)) == worker['requests']: + result['avg_rt_{}'.format(key)] = 0 + + result['memory_rss_{}'.format(key)] = worker['rss'] + result['memory_vsz_{}'.format(key)] = worker['vsz'] + + result['exceptions'] += worker['exceptions'] + result['harakiri_count'] += worker['harakiri_count'] + result['respawn_count'] += worker['respawn_count'] + + self.last_result = result + return result diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.conf b/collectors/python.d.plugin/uwsgi/uwsgi.conf new file mode 100644 index 000000000..be1c2ada3 --- /dev/null +++ b/collectors/python.d.plugin/uwsgi/uwsgi.conf @@ -0,0 +1,94 @@ +# netdata python.d.plugin configuration for uwsgi +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, uwsgi also supports the following: +# +# socket: 'path/to/uwsgistats.sock' +# +# or +# host: 'IP or HOSTNAME' # the host to connect to +# port: PORT # the port to connect to +# +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) +# + +socket: + name : 'local' + socket : '/tmp/stats.socket' + +localhost: + name : 'local' + host : 'localhost' + port : 1717 + +localipv4: + name : 'local' + host : '127.0.0.1' + port : 1717 + +localipv6: + name : 'local' + host : '::1' + port : 1717 diff --git a/collectors/python.d.plugin/varnish/Makefile.inc b/collectors/python.d.plugin/varnish/Makefile.inc new file mode 100644 index 000000000..2469b0592 --- /dev/null +++ b/collectors/python.d.plugin/varnish/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += varnish/varnish.chart.py +dist_pythonconfig_DATA += varnish/varnish.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += varnish/README.md varnish/Makefile.inc + diff --git a/collectors/python.d.plugin/varnish/README.md b/collectors/python.d.plugin/varnish/README.md new file mode 100644 index 000000000..96c7cafaa --- /dev/null +++ b/collectors/python.d.plugin/varnish/README.md @@ -0,0 +1,69 @@ +# varnish + +Module uses the `varnishstat` command to provide varnish cache statistics. + +It produces: + +1. **Connections Statistics** in connections/s + * accepted + * dropped + +2. **Client Requests** in requests/s + * received + +3. **All History Hit Rate Ratio** in percent + * hit + * miss + * hitpass + +4. **Current Poll Hit Rate Ratio** in percent + * hit + * miss + * hitpass + +5. **Expired Objects** in expired/s + * objects + +6. **Least Recently Used Nuked Objects** in nuked/s + * objects + + +7. **Number Of Threads In All Pools** in threads + * threads + +8. **Threads Statistics** in threads/s + * created + * failed + * limited + +9. **Current Queue Length** in requests + * in queue + +10. **Backend Connections Statistics** in connections/s + * successful + * unhealthy + * reused + * closed + * resycled + * failed + +10. **Requests To The Backend** in requests/s + * received + +11. **ESI Statistics** in problems/s + * errors + * warnings + +12. **Memory Usage** in MB + * free + * allocated + +13. **Uptime** in seconds + * uptime + + +### configuration + +No configuration is needed. + +--- diff --git a/collectors/python.d.plugin/varnish/varnish.chart.py b/collectors/python.d.plugin/varnish/varnish.chart.py new file mode 100644 index 000000000..d889c2b33 --- /dev/null +++ b/collectors/python.d.plugin/varnish/varnish.chart.py @@ -0,0 +1,252 @@ +# -*- coding: utf-8 -*- +# Description: varnish netdata python.d module +# Author: l2isbad +# SPDX-License-Identifier: GPL-3.0-or-later + +import re + +from bases.collection import find_binary +from bases.FrameworkServices.ExecutableService import ExecutableService + +# default module values (can be overridden per job in `config`) +# update_every = 2 +priority = 60000 +retries = 60 + +ORDER = [ + 'session_connections', + 'client_requests', + 'all_time_hit_rate', + 'current_poll_hit_rate', + 'cached_objects_expired', + 'cached_objects_nuked', + 'threads_total', + 'threads_statistics', + 'threads_queue_len', + 'backend_connections', + 'backend_requests', + 'esi_statistics', + 'memory_usage', + 'uptime' +] + +CHARTS = { + 'session_connections': { + 'options': [None, 'Connections Statistics', 'connections/s', + 'client metrics', 'varnish.session_connection', 'line'], + 'lines': [ + ['sess_conn', 'accepted', 'incremental'], + ['sess_dropped', 'dropped', 'incremental'] + ] + }, + 'client_requests': { + 'options': [None, 'Client Requests', 'requests/s', + 'client metrics', 'varnish.client_requests', 'line'], + 'lines': [ + ['client_req', 'received', 'incremental'] + ] + }, + 'all_time_hit_rate': { + 'options': [None, 'All History Hit Rate Ratio', 'percent', 'cache performance', + 'varnish.all_time_hit_rate', 'stacked'], + 'lines': [ + ['cache_hit', 'hit', 'percentage-of-absolute-row'], + ['cache_miss', 'miss', 'percentage-of-absolute-row'], + ['cache_hitpass', 'hitpass', 'percentage-of-absolute-row']] + }, + 'current_poll_hit_rate': { + 'options': [None, 'Current Poll Hit Rate Ratio', 'percent', 'cache performance', + 'varnish.current_poll_hit_rate', 'stacked'], + 'lines': [ + ['cache_hit', 'hit', 'percentage-of-incremental-row'], + ['cache_miss', 'miss', 'percentage-of-incremental-row'], + ['cache_hitpass', 'hitpass', 'percentage-of-incremental-row'] + ] + }, + 'cached_objects_expired': { + 'options': [None, 'Expired Objects', 'expired/s', 'cache performance', + 'varnish.cached_objects_expired', 'line'], + 'lines': [ + ['n_expired', 'objects', 'incremental'] + ] + }, + 'cached_objects_nuked': { + 'options': [None, 'Least Recently Used Nuked Objects', 'nuked/s', 'cache performance', + 'varnish.cached_objects_nuked', 'line'], + 'lines': [ + ['n_lru_nuked', 'objects', 'incremental'] + ] + }, + 'threads_total': { + 'options': [None, 'Number Of Threads In All Pools', 'number', 'thread related metrics', + 'varnish.threads_total', 'line'], + 'lines': [ + ['threads', None, 'absolute'] + ] + }, + 'threads_statistics': { + 'options': [None, 'Threads Statistics', 'threads/s', 'thread related metrics', + 'varnish.threads_statistics', 'line'], + 'lines': [ + ['threads_created', 'created', 'incremental'], + ['threads_failed', 'failed', 'incremental'], + ['threads_limited', 'limited', 'incremental'] + ] + }, + 'threads_queue_len': { + 'options': [None, 'Current Queue Length', 'requests', 'thread related metrics', + 'varnish.threads_queue_len', 'line'], + 'lines': [ + ['thread_queue_len', 'in queue'] + ] + }, + 'backend_connections': { + 'options': [None, 'Backend Connections Statistics', 'connections/s', 'backend metrics', + 'varnish.backend_connections', 'line'], + 'lines': [ + ['backend_conn', 'successful', 'incremental'], + ['backend_unhealthy', 'unhealthy', 'incremental'], + ['backend_reuse', 'reused', 'incremental'], + ['backend_toolate', 'closed', 'incremental'], + ['backend_recycle', 'resycled', 'incremental'], + ['backend_fail', 'failed', 'incremental'] + ] + }, + 'backend_requests': { + 'options': [None, 'Requests To The Backend', 'requests/s', 'backend metrics', + 'varnish.backend_requests', 'line'], + 'lines': [ + ['backend_req', 'sent', 'incremental'] + ] + }, + 'esi_statistics': { + 'options': [None, 'ESI Statistics', 'problems/s', 'esi related metrics', 'varnish.esi_statistics', 'line'], + 'lines': [ + ['esi_errors', 'errors', 'incremental'], + ['esi_warnings', 'warnings', 'incremental'] + ] + }, + 'memory_usage': { + 'options': [None, 'Memory Usage', 'MB', 'memory usage', 'varnish.memory_usage', 'stacked'], + 'lines': [ + ['memory_free', 'free', 'absolute', 1, 1 << 20], + ['memory_allocated', 'allocated', 'absolute', 1, 1 << 20]] + }, + 'uptime': { + 'lines': [ + ['uptime', None, 'absolute'] + ], + 'options': [None, 'Uptime', 'seconds', 'uptime', 'varnish.uptime', 'line'] + } +} + + +class Parser: + _backend_new = re.compile(r'VBE.([\d\w_.]+)\(.*?\).(beresp[\w_]+)\s+(\d+)') + _backend_old = re.compile(r'VBE\.[\d\w-]+\.([\w\d_]+).(beresp[\w_]+)\s+(\d+)') + _default = re.compile(r'([A-Z]+\.)?([\d\w_.]+)\s+(\d+)') + + def __init__(self): + self.re_default = None + self.re_backend = None + + def init(self, data): + data = ''.join(data) + parsed_main = Parser._default.findall(data) + if parsed_main: + self.re_default = Parser._default + + parsed_backend = Parser._backend_new.findall(data) + if parsed_backend: + self.re_backend = Parser._backend_new + else: + parsed_backend = Parser._backend_old.findall(data) + if parsed_backend: + self.re_backend = Parser._backend_old + + def server_stats(self, data): + return self.re_default.findall(''.join(data)) + + def backend_stats(self, data): + return self.re_backend.findall(''.join(data)) + + +class Service(ExecutableService): + def __init__(self, configuration=None, name=None): + ExecutableService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + varnishstat = find_binary('varnishstat') + self.command = [varnishstat, '-1'] if varnishstat else None + self.parser = Parser() + + def check(self): + if not self.command: + self.error("Can't locate 'varnishstat' binary or binary is not executable by user netdata") + return False + + # STDOUT is not empty + reply = self._get_raw_data() + if not reply: + self.error("No output from 'varnishstat'. Not enough privileges?") + return False + + self.parser.init(reply) + + # Output is parsable + if not self.parser.re_default: + self.error('Cant parse the output...') + return False + + if self.parser.re_backend: + backends = [b[0] for b in self.parser.backend_stats(reply)[::2]] + self.create_backends_charts(backends) + return True + + def get_data(self): + """ + Format data received from shell command + :return: dict + """ + raw = self._get_raw_data() + if not raw: + return None + + data = dict() + server_stats = self.parser.server_stats(raw) + if not server_stats: + return None + + if self.parser.re_backend: + backend_stats = self.parser.backend_stats(raw) + data.update(dict(('_'.join([name, param]), value) for name, param, value in backend_stats)) + + data.update(dict((param, value) for _, param, value in server_stats)) + + # varnish 5 uses default.g_bytes and default.g_space + data['memory_allocated'] = data.get('s0.g_bytes') or data.get('default.g_bytes') + data['memory_free'] = data.get('s0.g_space') or data.get('default.g_space') + + return data + + def create_backends_charts(self, backends): + for backend in backends: + chart_name = ''.join([backend, '_response_statistics']) + title = 'Backend "{0}"'.format(backend.capitalize()) + hdr_bytes = ''.join([backend, '_beresp_hdrbytes']) + body_bytes = ''.join([backend, '_beresp_bodybytes']) + + chart = { + chart_name: + { + 'options': [None, title, 'kilobits/s', 'backend response statistics', + 'varnish.backend', 'area'], + 'lines': [ + [hdr_bytes, 'header', 'incremental', 8, 1000], + [body_bytes, 'body', 'incremental', -8, 1000] + ] + } + } + + self.order.insert(0, chart_name) + self.definitions.update(chart) diff --git a/collectors/python.d.plugin/varnish/varnish.conf b/collectors/python.d.plugin/varnish/varnish.conf new file mode 100644 index 000000000..4b069d514 --- /dev/null +++ b/collectors/python.d.plugin/varnish/varnish.conf @@ -0,0 +1,64 @@ +# netdata python.d.plugin configuration for varnish +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# ---------------------------------------------------------------------- diff --git a/collectors/python.d.plugin/w1sensor/Makefile.inc b/collectors/python.d.plugin/w1sensor/Makefile.inc new file mode 100644 index 000000000..bddf146f5 --- /dev/null +++ b/collectors/python.d.plugin/w1sensor/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += w1sensor/w1sensor.chart.py +dist_pythonconfig_DATA += w1sensor/w1sensor.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += w1sensor/README.md w1sensor/Makefile.inc + diff --git a/collectors/python.d.plugin/w1sensor/README.md b/collectors/python.d.plugin/w1sensor/README.md new file mode 100644 index 000000000..b18f08351 --- /dev/null +++ b/collectors/python.d.plugin/w1sensor/README.md @@ -0,0 +1,13 @@ +# w1sensor + +Data from 1-Wire sensors. +On Linux these are supported by the wire, w1_gpio, and w1_therm modules. +Currently temperature sensors are supported and automatically detected. + +Charts are created dynamically based on the number of detected sensors. + +### configuration + +For detailed configuration information please read [`w1sensor.conf`](w1sensor.conf) file. + +--- diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.chart.py b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py new file mode 100644 index 000000000..493c4a135 --- /dev/null +++ b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +# Description: 1-wire temperature monitor netdata python.d module +# Author: Diomidis Spinellis <http://www.spinellis.gr> +# SPDX-License-Identifier: GPL-3.0-or-later + +import os +import re +from bases.FrameworkServices.SimpleService import SimpleService + +# default module values (can be overridden per job in `config`) +update_every = 5 + +# Location where 1-Wire devices can be found +W1_DIR = '/sys/bus/w1/devices/' + +# Lines matching the following regular expression contain a temperature value +RE_TEMP = re.compile(r' t=(\d+)') + +ORDER = ['temp'] + +CHARTS = { + 'temp': { + 'options': [None, '1-Wire Temperature Sensor', 'Celsius', 'Temperature', 'w1sensor.temp', 'line'], + 'lines': [] + } +} + +# Known and supported family members +# Based on linux/drivers/w1/w1_family.h and w1/slaves/w1_therm.c +THERM_FAMILY = { + '10': 'W1_THERM_DS18S20', + '22': 'W1_THERM_DS1822', + '28': 'W1_THERM_DS18B20', + '3b': 'W1_THERM_DS1825', + '42': 'W1_THERM_DS28EA00', +} + + +class Service(SimpleService): + """Provide netdata service for 1-Wire sensors""" + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + self.probes = [] + + def check(self): + """Auto-detect available 1-Wire sensors, setting line definitions + and probes to be monitored.""" + try: + file_names = os.listdir(W1_DIR) + except OSError as err: + self.error(err) + return False + + lines = [] + for file_name in file_names: + if file_name[2] != '-': + continue + if not file_name[0:2] in THERM_FAMILY: + continue + + self.probes.append(file_name) + identifier = file_name[3:] + name = identifier + config_name = self.configuration.get('name_' + identifier) + if config_name: + name = config_name + lines.append(['w1sensor_temp_' + identifier, name, 'absolute', + 1, 10]) + self.definitions['temp']['lines'] = lines + return len(self.probes) > 0 + + def get_data(self): + """Return data read from sensors.""" + data = dict() + + for file_name in self.probes: + file_path = W1_DIR + file_name + '/w1_slave' + identifier = file_name[3:] + try: + with open(file_path, 'r') as device_file: + for line in device_file: + matched = RE_TEMP.search(line) + if matched: + # Round to one decimal digit to filter-out noise + value = round(int(matched.group(1)) / 1000., 1) + value = int(value * 10) + data['w1sensor_temp_' + identifier] = value + except (OSError, IOError) as err: + self.error(err) + continue + return data or None diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.conf b/collectors/python.d.plugin/w1sensor/w1sensor.conf new file mode 100644 index 000000000..a4aed8dd7 --- /dev/null +++ b/collectors/python.d.plugin/w1sensor/w1sensor.conf @@ -0,0 +1,74 @@ +# netdata python.d.plugin configuration for w1sensor +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 5 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 5 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, example also supports the following: +# +# name_<1-Wire id>: '<human readable name>' +# This allows associating a human readable name with a sensor's 1-Wire +# identifier. Example: +# name_00000022276e: 'Machine room' +# name_00000022298f: 'Rack 12' +# +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) diff --git a/collectors/python.d.plugin/web_log/Makefile.inc b/collectors/python.d.plugin/web_log/Makefile.inc new file mode 100644 index 000000000..893115992 --- /dev/null +++ b/collectors/python.d.plugin/web_log/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += web_log/web_log.chart.py +dist_pythonconfig_DATA += web_log/web_log.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += web_log/README.md web_log/Makefile.inc + diff --git a/collectors/python.d.plugin/web_log/README.md b/collectors/python.d.plugin/web_log/README.md new file mode 100644 index 000000000..6e8ea1dd5 --- /dev/null +++ b/collectors/python.d.plugin/web_log/README.md @@ -0,0 +1,64 @@ +# web_log + +Tails the apache/nginx/lighttpd/gunicorn log files to collect real-time web-server statistics. + +It produces following charts: + +1. **Response by type** requests/s + * success (1xx, 2xx, 304) + * error (5xx) + * redirect (3xx except 304) + * bad (4xx) + * other (all other responses) + +2. **Response by code family** requests/s + * 1xx (informational) + * 2xx (successful) + * 3xx (redirect) + * 4xx (bad) + * 5xx (internal server errors) + * other (non-standart responses) + * unmatched (the lines in the log file that are not matched) + +3. **Detailed Response Codes** requests/s (number of responses for each response code family individually) + +4. **Bandwidth** KB/s + * received (bandwidth of requests) + * send (bandwidth of responses) + +5. **Timings** ms (request processing time) + * min (bandwidth of requests) + * max (bandwidth of responses) + * average (bandwidth of responses) + +6. **Request per url** requests/s (configured by user) + +7. **Http Methods** requests/s (requests per http method) + +8. **Http Versions** requests/s (requests per http version) + +9. **IP protocols** requests/s (requests per ip protocol version) + +10. **Current Poll Unique Client IPs** unique ips/s (unique client IPs per data collection iteration) + +11. **All Time Unique Client IPs** unique ips/s (unique client IPs since the last restart of netdata) + + +### configuration + +```yaml +nginx_log: + name : 'nginx_log' + path : '/var/log/nginx/access.log' + +apache_log: + name : 'apache_log' + path : '/var/log/apache/other_vhosts_access.log' + categories: + cacti : 'cacti.*' + observium : 'observium' +``` + +Module has preconfigured jobs for nginx, apache and gunicorn on various distros. + +--- diff --git a/collectors/python.d.plugin/web_log/web_log.chart.py b/collectors/python.d.plugin/web_log/web_log.chart.py new file mode 100644 index 000000000..20e15f4cb --- /dev/null +++ b/collectors/python.d.plugin/web_log/web_log.chart.py @@ -0,0 +1,1194 @@ +# -*- coding: utf-8 -*- +# Description: web log netdata python.d module +# Author: l2isbad +# SPDX-License-Identifier: GPL-3.0-or-later + +import bisect +import re +import os + +from collections import namedtuple, defaultdict +from copy import deepcopy + +try: + from itertools import filterfalse +except ImportError: + from itertools import ifilter as filter + from itertools import ifilterfalse as filterfalse + +try: + from sys import maxint +except ImportError: + from sys import maxsize as maxint + +from bases.collection import read_last_line +from bases.FrameworkServices.LogService import LogService + + +ORDER_APACHE_CACHE = ['apache_cache'] + +ORDER_WEB = [ + 'response_statuses', + 'response_codes', + 'bandwidth', + 'response_time', + 'response_time_hist', + 'response_time_upstream', + 'response_time_upstream_hist', + 'requests_per_url', + 'requests_per_user_defined', + 'http_method', + 'vhost', + 'port', + 'http_version', + 'requests_per_ipproto', + 'clients', + 'clients_all' +] + +ORDER_SQUID = [ + 'squid_response_statuses', + 'squid_response_codes', + 'squid_detailed_response_codes', + 'squid_method', + 'squid_mime_type', + 'squid_hier_code', + 'squid_transport_methods', + 'squid_transport_errors', + 'squid_code', + 'squid_handling_opts', + 'squid_object_types', + 'squid_cache_events', + 'squid_bytes', + 'squid_duration', + 'squid_clients', + 'squid_clients_all' +] + +CHARTS_WEB = { + 'response_codes': { + 'options': [None, 'Response Codes', 'requests/s', 'responses', 'web_log.response_codes', 'stacked'], + 'lines': [ + ['2xx', None, 'incremental'], + ['5xx', None, 'incremental'], + ['3xx', None, 'incremental'], + ['4xx', None, 'incremental'], + ['1xx', None, 'incremental'], + ['0xx', 'other', 'incremental'], + ['unmatched', None, 'incremental'] + ] + }, + 'bandwidth': { + 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'web_log.bandwidth', 'area'], + 'lines': [ + ['resp_length', 'received', 'incremental', 8, 1000], + ['bytes_sent', 'sent', 'incremental', -8, 1000] + ] + }, + 'response_time': { + 'options': [None, 'Processing Time', 'milliseconds', 'timings', 'web_log.response_time', 'area'], + 'lines': [ + ['resp_time_min', 'min', 'incremental', 1, 1000], + ['resp_time_max', 'max', 'incremental', 1, 1000], + ['resp_time_avg', 'avg', 'incremental', 1, 1000] + ] + }, + 'response_time_hist': { + 'options': [None, 'Processing Time Histogram', 'requests/s', 'timings', 'web_log.response_time_hist', 'line'], + 'lines': [] + }, + 'response_time_upstream': { + 'options': [None, 'Processing Time Upstream', 'milliseconds', 'timings', + 'web_log.response_time_upstream', 'area'], + 'lines': [ + ['resp_time_upstream_min', 'min', 'incremental', 1, 1000], + ['resp_time_upstream_max', 'max', 'incremental', 1, 1000], + ['resp_time_upstream_avg', 'avg', 'incremental', 1, 1000] + ] + }, + 'response_time_upstream_hist': { + 'options': [None, 'Processing Time Histogram', 'requests/s', 'timings', + 'web_log.response_time_upstream_hist', 'line'], + 'lines': [] + }, + 'clients': { + 'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'clients', 'web_log.clients', 'stacked'], + 'lines': [ + ['unique_cur_ipv4', 'ipv4', 'incremental', 1, 1], + ['unique_cur_ipv6', 'ipv6', 'incremental', 1, 1] + ] + }, + 'clients_all': { + 'options': [None, 'All Time Unique Client IPs', 'unique ips', 'clients', 'web_log.clients_all', 'stacked'], + 'lines': [ + ['unique_tot_ipv4', 'ipv4', 'absolute', 1, 1], + ['unique_tot_ipv6', 'ipv6', 'absolute', 1, 1] + ] + }, + 'http_method': { + 'options': [None, 'Requests Per HTTP Method', 'requests/s', 'http methods', 'web_log.http_method', 'stacked'], + 'lines': [ + ['GET', 'GET', 'incremental', 1, 1] + ] + }, + 'http_version': { + 'options': [None, 'Requests Per HTTP Version', 'requests/s', 'http versions', + 'web_log.http_version', 'stacked'], + 'lines': [] + }, + 'requests_per_ipproto': { + 'options': [None, 'Requests Per IP Protocol', 'requests/s', 'ip protocols', 'web_log.requests_per_ipproto', + 'stacked'], + 'lines': [ + ['req_ipv4', 'ipv4', 'incremental', 1, 1], + ['req_ipv6', 'ipv6', 'incremental', 1, 1] + ] + }, + 'response_statuses': { + 'options': [None, 'Response Statuses', 'requests/s', 'responses', 'web_log.response_statuses', 'stacked'], + 'lines': [ + ['successful_requests', 'success', 'incremental', 1, 1], + ['server_errors', 'error', 'incremental', 1, 1], + ['redirects', 'redirect', 'incremental', 1, 1], + ['bad_requests', 'bad', 'incremental', 1, 1], + ['other_requests', 'other', 'incremental', 1, 1] + ] + }, + 'requests_per_url': { + 'options': [None, 'Requests Per Url', 'requests/s', 'urls', 'web_log.requests_per_url', 'stacked'], + 'lines': [ + ['url_pattern_other', 'other', 'incremental', 1, 1] + ] + }, + 'requests_per_user_defined': { + 'options': [None, 'Requests Per User Defined Pattern', 'requests/s', 'user defined', + 'web_log.requests_per_user_defined', 'stacked'], + 'lines': [ + ['user_pattern_other', 'other', 'incremental', 1, 1] + ] + }, + 'port': { + 'options': [None, 'Requests Per Port', 'requests/s', 'port', 'web_log.port', 'stacked'], + 'lines': [ + ['port_80', 'http', 'incremental', 1, 1], + ['port_443', 'https', 'incremental', 1, 1] + ] + }, + 'vhost': { + 'options': [None, 'Requests Per Vhost', 'requests/s', 'vhost', 'web_log.vhost', 'stacked'], + 'lines': [] + } +} + +CHARTS_APACHE_CACHE = { + 'apache_cache': { + 'options': [None, 'Apache Cached Responses', 'percent cached', 'cached', 'web_log.apache_cache_cache', + 'stacked'], + 'lines': [ + ['hit', 'cache', 'percentage-of-absolute-row'], + ['miss', None, 'percentage-of-absolute-row'], + ['other', None, 'percentage-of-absolute-row'] + ] + } +} + +CHARTS_SQUID = { + 'squid_duration': { + 'options': [None, 'Elapsed Time The Transaction Busied The Cache', + 'milliseconds', 'squid_timings', 'web_log.squid_duration', 'area'], + 'lines': [ + ['duration_min', 'min', 'incremental', 1, 1000], + ['duration_max', 'max', 'incremental', 1, 1000], + ['duration_avg', 'avg', 'incremental', 1, 1000] + ] + }, + 'squid_bytes': { + 'options': [None, 'Amount Of Data Delivered To The Clients', + 'kilobits/s', 'squid_bandwidth', 'web_log.squid_bytes', 'area'], + 'lines': [ + ['bytes', 'sent', 'incremental', 8, 1000] + ] + }, + 'squid_response_statuses': { + 'options': [None, 'Response Statuses', 'responses/s', 'squid_responses', 'web_log.squid_response_statuses', + 'stacked'], + 'lines': [ + ['successful_requests', 'success', 'incremental', 1, 1], + ['server_errors', 'error', 'incremental', 1, 1], + ['redirects', 'redirect', 'incremental', 1, 1], + ['bad_requests', 'bad', 'incremental', 1, 1], + ['other_requests', 'other', 'incremental', 1, 1] + ] + }, + 'squid_response_codes': { + 'options': [None, 'Response Codes', 'responses/s', 'squid_responses', + 'web_log.squid_response_codes', 'stacked'], + 'lines': [ + ['2xx', None, 'incremental'], + ['5xx', None, 'incremental'], + ['3xx', None, 'incremental'], + ['4xx', None, 'incremental'], + ['1xx', None, 'incremental'], + ['0xx', None, 'incremental'], + ['other', None, 'incremental'], + ['unmatched', None, 'incremental'] + ] + }, + 'squid_code': { + 'options': [None, 'Responses Per Cache Result Of The Request', + 'requests/s', 'squid_squid_cache', 'web_log.squid_code', 'stacked'], + 'lines': [] + }, + 'squid_detailed_response_codes': { + 'options': [None, 'Detailed Response Codes', + 'responses/s', 'squid_responses', 'web_log.squid_detailed_response_codes', 'stacked'], + 'lines': [] + }, + 'squid_hier_code': { + 'options': [None, 'Responses Per Hierarchy Code', + 'requests/s', 'squid_hierarchy', 'web_log.squid_hier_code', 'stacked'], + 'lines': [] + }, + 'squid_method': { + 'options': [None, 'Requests Per Method', + 'requests/s', 'squid_requests', 'web_log.squid_method', 'stacked'], + 'lines': [] + }, + 'squid_mime_type': { + 'options': [None, 'Requests Per MIME Type', + 'requests/s', 'squid_requests', 'web_log.squid_mime_type', 'stacked'], + 'lines': [] + }, + 'squid_clients': { + 'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'squid_clients', + 'web_log.squid_clients', 'stacked'], + 'lines': [ + ['unique_ipv4', 'ipv4', 'incremental'], + ['unique_ipv6', 'ipv6', 'incremental'] + ] + }, + 'squid_clients_all': { + 'options': [None, 'All Time Unique Client IPs', 'unique ips', 'squid_clients', + 'web_log.squid_clients_all', 'stacked'], + 'lines': [ + ['unique_tot_ipv4', 'ipv4', 'absolute'], + ['unique_tot_ipv6', 'ipv6', 'absolute'] + ] + }, + 'squid_transport_methods': { + 'options': [None, 'Transport Methods', 'requests/s', 'squid_squid_transport', + 'web_log.squid_transport_methods', 'stacked'], + 'lines': [] + }, + 'squid_transport_errors': { + 'options': [None, 'Transport Errors', 'requests/s', 'squid_squid_transport', + 'web_log.squid_transport_errors', 'stacked'], + 'lines': [] + }, + 'squid_handling_opts': { + 'options': [None, 'Handling Opts', 'requests/s', 'squid_squid_cache', + 'web_log.squid_handling_opts', 'stacked'], + 'lines': [] + }, + 'squid_object_types': { + 'options': [None, 'Object Types', 'objects/s', 'squid_squid_cache', + 'web_log.squid_object_types', 'stacked'], + 'lines': [] + }, + 'squid_cache_events': { + 'options': [None, 'Cache Events', 'events/s', 'squid_squid_cache', + 'web_log.squid_cache_events', 'stacked'], + 'lines': [] + } +} + +NAMED_PATTERN = namedtuple('PATTERN', ['description', 'func']) + +DET_RESP_AGGR = ['', '_1xx', '_2xx', '_3xx', '_4xx', '_5xx', '_Other'] + +SQUID_CODES = { + 'TCP': 'squid_transport_methods', + 'UDP': 'squid_transport_methods', + 'NONE': 'squid_transport_methods', + 'CLIENT': 'squid_handling_opts', + 'IMS': 'squid_handling_opts', + 'ASYNC': 'squid_handling_opts', + 'SWAPFAIL': 'squid_handling_opts', + 'REFRESH': 'squid_handling_opts', + 'SHARED': 'squid_handling_opts', + 'REPLY': 'squid_handling_opts', + 'NEGATIVE': 'squid_object_types', + 'STALE': 'squid_object_types', + 'OFFLINE': 'squid_object_types', + 'INVALID': 'squid_object_types', + 'FAIL': 'squid_object_types', + 'MODIFIED': 'squid_object_types', + 'UNMODIFIED': 'squid_object_types', + 'REDIRECT': 'squid_object_types', + 'HIT': 'squid_cache_events', + 'MEM': 'squid_cache_events', + 'MISS': 'squid_cache_events', + 'DENIED': 'squid_cache_events', + 'NOFETCH': 'squid_cache_events', + 'TUNNEL': 'squid_cache_events', + 'ABORTED': 'squid_transport_errors', + 'TIMEOUT': 'squid_transport_errors' +} + +REQUEST_REGEX = re.compile(r'(?P<method>[A-Z]+) (?P<url>[^ ]+) [A-Z]+/(?P<http_version>\d(?:.\d)?)') + +MIME_TYPES = ['application', 'audio', 'example', 'font', 'image', 'message', 'model', 'multipart', 'text', 'video'] + + +class Service(LogService): + def __init__(self, configuration=None, name=None): + """ + :param configuration: + :param name: + """ + LogService.__init__(self, configuration=configuration, name=name) + self.configuration = configuration + self.log_path = self.configuration.get('path') + self.job = None + + def check(self): + """ + :return: bool + + 1. "log_path" is specified in the module configuration file + 2. "log_path" must be readable by netdata user and must exist + 3. "log_path' must not be empty. We need at least 1 line to find appropriate pattern to parse + 4. other checks depends on log "type" + """ + + log_type = self.configuration.get('type', 'web') + log_types = dict(web=Web, apache_cache=ApacheCache, squid=Squid) + + if log_type not in log_types: + self.error('bad log type {log_type}. Supported types: {types}'.format(log_type=log_type, + types=log_types.keys())) + return False + + if not self.log_path: + self.error('log path is not specified') + return False + + if not (self._find_recent_log_file() and os.access(self.log_path, os.R_OK)): + self.error('{log_file} not readable or not exist'.format(log_file=self.log_path)) + return False + + if not os.path.getsize(self.log_path): + self.error('{log_file} is empty'.format(log_file=self.log_path)) + return False + + self.job = log_types[log_type](self) + if self.job.check(): + self.order = self.job.order + self.definitions = self.job.definitions + return True + return False + + def _get_data(self): + return self.job.get_data(self._get_raw_data()) + + +class Web: + def __init__(self, service): + self.service = service + self.order = ORDER_WEB[:] + self.definitions = deepcopy(CHARTS_WEB) + self.pre_filter = check_patterns('filter', self.configuration.get('filter')) + self.storage = dict() + self.data = { + 'bytes_sent': 0, + 'resp_length': 0, + 'resp_time_min': 0, + 'resp_time_max': 0, + 'resp_time_avg': 0, + 'resp_time_upstream_min': 0, + 'resp_time_upstream_max': 0, + 'resp_time_upstream_avg': 0, + 'unique_cur_ipv4': 0, + 'unique_cur_ipv6': 0, + '2xx': 0, + '5xx': 0, + '3xx': 0, + '4xx': 0, + '1xx': 0, + '0xx': 0, + 'unmatched': 0, + 'req_ipv4': 0, + 'req_ipv6': 0, + 'unique_tot_ipv4': 0, + 'unique_tot_ipv6': 0, + 'successful_requests': 0, + 'redirects': 0, + 'bad_requests': 0, + 'server_errors': 0, + 'other_requests': 0, + 'GET': 0 + } + + def __getattr__(self, item): + return getattr(self.service, item) + + def check(self): + last_line = read_last_line(self.log_path) + if not last_line: + return False + # Custom_log_format or predefined log format. + if self.configuration.get('custom_log_format'): + match_dict, error = self.find_regex_custom(last_line) + else: + match_dict, error = self.find_regex(last_line) + + # "match_dict" is None if there are any problems + if match_dict is None: + self.error(error) + return False + + self.storage['unique_all_time'] = list() + self.storage['url_pattern'] = check_patterns('url_pattern', self.configuration.get('categories')) + self.storage['user_pattern'] = check_patterns('user_pattern', self.configuration.get('user_defined')) + + self.create_web_charts(match_dict) # Create charts + self.info('Collected data: %s' % list(match_dict.keys())) + return True + + def create_web_charts(self, match_dict): + """ + :param match_dict: dict: regex.search.groupdict(). Ex. {'address': '127.0.0.1', 'code': '200', 'method': 'GET'} + :return: + Create/remove additional charts depending on the 'match_dict' keys and configuration file options + """ + if 'resp_time' not in match_dict: + self.order.remove('response_time') + self.order.remove('response_time_hist') + if 'resp_time_upstream' not in match_dict: + self.order.remove('response_time_upstream') + self.order.remove('response_time_upstream_hist') + + # Add 'response_time_hist' and 'response_time_upstream_hist' charts if is specified in the configuration + histogram = self.configuration.get('histogram', None) + if isinstance(histogram, list): + self.storage['bucket_index'] = histogram[:] + self.storage['bucket_index'].append(maxint) + self.storage['buckets'] = [0] * (len(histogram) + 1) + self.storage['upstream_buckets'] = [0] * (len(histogram) + 1) + hist_lines = self.definitions['response_time_hist']['lines'] + upstream_hist_lines = self.definitions['response_time_upstream_hist']['lines'] + for i, le in enumerate(histogram): + hist_key = 'response_time_hist_%d' % i + upstream_hist_key = 'response_time_upstream_hist_%d' % i + hist_lines.append([hist_key, str(le), 'incremental', 1, 1]) + upstream_hist_lines.append([upstream_hist_key, str(le), 'incremental', 1, 1]) + + hist_lines.append(['response_time_hist_%d' % len(histogram), '+Inf', 'incremental', 1, 1]) + upstream_hist_lines.append(['response_time_upstream_hist_%d' % len(histogram), '+Inf', 'incremental', 1, 1]) + elif histogram is not None: + self.error('expect histogram list, but was {0}'.format(type(histogram))) + + if not self.configuration.get('all_time', True): + self.order.remove('clients_all') + + # Add 'detailed_response_codes' chart if specified in the configuration + if self.configuration.get('detailed_response_codes', True): + if self.configuration.get('detailed_response_aggregate', True): + codes = DET_RESP_AGGR[:1] + else: + codes = DET_RESP_AGGR[1:] + + for code in codes: + self.order.append('detailed_response_codes%s' % code) + self.definitions['detailed_response_codes%s' % code] = { + 'options': [None, 'Detailed Response Codes %s' % code[1:], 'requests/s', 'responses', + 'web_log.detailed_response_codes%s' % code, 'stacked'], + 'lines': [] + } + + # Add 'requests_per_url' chart if specified in the configuration + if self.storage['url_pattern']: + for elem in self.storage['url_pattern']: + dim = [elem.description, elem.description[12:], 'incremental'] + self.definitions['requests_per_url']['lines'].append(dim) + self.data[elem.description] = 0 + self.data['url_pattern_other'] = 0 + else: + self.order.remove('requests_per_url') + + # Add 'requests_per_user_defined' chart if specified in the configuration + if self.storage['user_pattern'] and 'user_defined' in match_dict: + for elem in self.storage['user_pattern']: + dim = [elem.description, elem.description[13:], 'incremental'] + self.definitions['requests_per_user_defined']['lines'].append(dim) + self.data[elem.description] = 0 + self.data['user_pattern_other'] = 0 + else: + self.order.remove('requests_per_user_defined') + + def get_data(self, raw_data=None): + """ + Parses new log lines + :return: dict OR None + None if _get_raw_data method fails. + In all other cases - dict. + """ + if not raw_data: + return None if raw_data is None else self.data + + filtered_data = filter_data(raw_data=raw_data, pre_filter=self.pre_filter) + + unique_current = set() + timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0)) + + for line in filtered_data: + match = self.storage['regex'].search(line) + if match: + match_dict = match.groupdict() + try: + code = match_dict['code'][0] + 'xx' + self.data[code] += 1 + except KeyError: + self.data['0xx'] += 1 + # detailed response code + if self.configuration.get('detailed_response_codes', True): + self.get_data_per_response_codes_detailed(code=match_dict['code']) + # response statuses + self.get_data_per_statuses(code=match_dict['code']) + # requests per user defined pattern + if self.storage['user_pattern'] and 'user_defined' in match_dict: + self.get_data_per_pattern(row=match_dict['user_defined'], + other='user_pattern_other', + pattern=self.storage['user_pattern']) + # method, url, http version + self.get_data_from_request_field(match_dict=match_dict) + # bandwidth sent + bytes_sent = match_dict['bytes_sent'] if '-' not in match_dict['bytes_sent'] else 0 + self.data['bytes_sent'] += int(bytes_sent) + # request processing time and bandwidth received + if 'resp_length' in match_dict: + resp_length = match_dict['resp_length'] if '-' not in match_dict['resp_length'] else 0 + self.data['resp_length'] += int(resp_length) + if 'resp_time' in match_dict: + resp_time = self.storage['func_resp_time'](float(match_dict['resp_time'])) + get_timings(timings=timings['resp_time'], time=resp_time) + if 'bucket_index' in self.storage: + get_hist(self.storage['bucket_index'], self.storage['buckets'], resp_time / 1000) + if 'resp_time_upstream' in match_dict and match_dict['resp_time_upstream'] != '-': + resp_time_upstream = self.storage['func_resp_time'](float(match_dict['resp_time_upstream'])) + get_timings(timings=timings['resp_time_upstream'], time=resp_time_upstream) + if 'bucket_index' in self.storage: + get_hist(self.storage['bucket_index'], self.storage['upstream_buckets'], resp_time / 1000) + # requests per ip proto + proto = 'ipv6' if ':' in match_dict['address'] else 'ipv4' + self.data['req_' + proto] += 1 + # unique clients ips + if self.configuration.get('all_time', True): + if address_not_in_pool(pool=self.storage['unique_all_time'], + address=match_dict['address'], + pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']): + self.data['unique_tot_' + proto] += 1 + if match_dict['address'] not in unique_current: + self.data['unique_cur_' + proto] += 1 + unique_current.add(match_dict['address']) + else: + self.data['unmatched'] += 1 + + # timings + for elem in timings: + self.data[elem + '_min'] += timings[elem]['minimum'] + self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count'] + self.data[elem + '_max'] += timings[elem]['maximum'] + + # histogram + if 'bucket_index' in self.storage: + buckets = self.storage['buckets'] + upstream_buckets = self.storage['upstream_buckets'] + for i in range(0, len(self.storage['bucket_index'])): + hist_key = 'response_time_hist_%d' % i + upstream_hist_key = 'response_time_upstream_hist_%d' % i + self.data[hist_key] = buckets[i] + self.data[upstream_hist_key] = upstream_buckets[i] + + return self.data + + def find_regex(self, last_line): + """ + :param last_line: str: literally last line from log file + :return: tuple where: + [0]: dict or None: match_dict or None + [1]: str: error description + We need to find appropriate pattern for current log file + All logic is do a regex search through the string for all predefined patterns + until we find something or fail. + """ + # REGEX: 1.IPv4 address 2.HTTP method 3. URL 4. Response code + # 5. Bytes sent 6. Response length 7. Response process time + default = re.compile(r'(?P<address>[\da-f.:]+|localhost)' + r' -.*?"(?P<request>[^"]*)"' + r' (?P<code>[1-9]\d{2})' + r' (?P<bytes_sent>\d+|-)') + + apache_ext_insert = re.compile(r'(?P<address>[\da-f.:]+|localhost)' + r' -.*?"(?P<request>[^"]*)"' + r' (?P<code>[1-9]\d{2})' + r' (?P<bytes_sent>\d+|-)' + r' (?P<resp_length>\d+|-)' + r' (?P<resp_time>\d+) ') + + apache_ext_append = re.compile(r'(?P<address>[\da-f.:]+|localhost)' + r' -.*?"(?P<request>[^"]*)"' + r' (?P<code>[1-9]\d{2})' + r' (?P<bytes_sent>\d+|-)' + r' .*?' + r' (?P<resp_length>\d+|-)' + r' (?P<resp_time>\d+)' + r'(?: |$)') + + nginx_ext_insert = re.compile(r'(?P<address>[\da-f.:]+)' + r' -.*?"(?P<request>[^"]*)"' + r' (?P<code>[1-9]\d{2})' + r' (?P<bytes_sent>\d+)' + r' (?P<resp_length>\d+)' + r' (?P<resp_time>\d+\.\d+) ') + + nginx_ext2_insert = re.compile(r'(?P<address>[\da-f.:]+)' + r' -.*?"(?P<request>[^"]*)"' + r' (?P<code>[1-9]\d{2})' + r' (?P<bytes_sent>\d+)' + r' (?P<resp_length>\d+)' + r' (?P<resp_time>\d+\.\d+)' + r' (?P<resp_time_upstream>[\d.-]+) ') + + nginx_ext_append = re.compile(r'(?P<address>[\da-f.:]+)' + r' -.*?"(?P<request>[^"]*)"' + r' (?P<code>[1-9]\d{2})' + r' (?P<bytes_sent>\d+)' + r' .*?' + r' (?P<resp_length>\d+)' + r' (?P<resp_time>\d+\.\d+)') + + def func_usec(time): + return time + + def func_sec(time): + return time * 1000000 + + r_regex = [apache_ext_insert, apache_ext_append, + nginx_ext2_insert, nginx_ext_insert, nginx_ext_append, + default] + r_function = [func_usec, func_usec, func_sec, func_sec, func_sec, func_usec] + regex_function = zip(r_regex, r_function) + + match_dict = dict() + for regex, func in regex_function: + match = regex.search(last_line) + if match: + self.storage['regex'] = regex + self.storage['func_resp_time'] = func + match_dict = match.groupdict() + break + + return find_regex_return(match_dict=match_dict or None, + msg='Unknown log format. You need to use "custom_log_format" feature.') + + def find_regex_custom(self, last_line): + """ + :param last_line: str: literally last line from log file + :return: tuple where: + [0]: dict or None: match_dict or None + [1]: str: error description + + We are here only if "custom_log_format" is in logs. We need to make sure: + 1. "custom_log_format" is a dict + 2. "pattern" in "custom_log_format" and pattern is <str> instance + 3. if "time_multiplier" is in "custom_log_format" it must be <int> or <float> instance + + If all parameters is ok we need to make sure: + 1. Pattern search is success + 2. Pattern search contains named subgroups (?P<subgroup_name>) (= "match_dict") + + If pattern search is success we need to make sure: + 1. All mandatory keys ['address', 'code', 'bytes_sent', 'method', 'url'] are in "match_dict" + + If this is True we need to make sure: + 1. All mandatory key values from "match_dict" have the correct format + ("code" is integer, "method" is uppercase word, etc) + + If non mandatory keys in "match_dict" we need to make sure: + 1. All non mandatory key values from match_dict ['resp_length', 'resp_time'] have the correct format + ("resp_length" is integer or "-", "resp_time" is integer or float) + + """ + if not hasattr(self.configuration.get('custom_log_format'), 'keys'): + return find_regex_return(msg='Custom log: "custom_log_format" is not a <dict>') + + pattern = self.configuration.get('custom_log_format', dict()).get('pattern') + if not (pattern and isinstance(pattern, str)): + return find_regex_return(msg='Custom log: "pattern" option is not specified or type is not <str>') + + resp_time_func = self.configuration.get('custom_log_format', dict()).get('time_multiplier') or 0 + + if not isinstance(resp_time_func, (int, float)): + return find_regex_return(msg='Custom log: "time_multiplier" is not an integer or a float') + + try: + regex = re.compile(pattern) + except re.error as error: + return find_regex_return(msg='Pattern compile error: %s' % str(error)) + + match = regex.search(last_line) + if not match: + return find_regex_return(msg='Custom log: pattern search FAILED') + + match_dict = match.groupdict() or None + if match_dict is None: + return find_regex_return(msg='Custom log: search OK but contains no named subgroups' + ' (you need to use ?P<subgroup_name>)') + mandatory_dict = {'address': r'[\w.:-]+', + 'code': r'[1-9]\d{2}', + 'bytes_sent': r'\d+|-'} + optional_dict = {'resp_length': r'\d+|-', + 'resp_time': r'[\d.]+', + 'resp_time_upstream': r'[\d.-]+', + 'method': r'[A-Z]+', + 'http_version': r'\d(?:.\d)?'} + + mandatory_values = set(mandatory_dict) - set(match_dict) + if mandatory_values: + return find_regex_return(msg='Custom log: search OK but some mandatory keys (%s) are missing' + % list(mandatory_values)) + for key in mandatory_dict: + if not re.search(mandatory_dict[key], match_dict[key]): + return find_regex_return(msg='Custom log: can\'t parse "%s": %s' + % (key, match_dict[key])) + + optional_values = set(optional_dict) & set(match_dict) + for key in optional_values: + if not re.search(optional_dict[key], match_dict[key]): + return find_regex_return(msg='Custom log: can\'t parse "%s": %s' + % (key, match_dict[key])) + + dot_in_time = '.' in match_dict.get('resp_time', '') + if dot_in_time: + self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1000000) + else: + self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1) + + self.storage['regex'] = regex + return find_regex_return(match_dict=match_dict) + + def get_data_from_request_field(self, match_dict): + if match_dict.get('request'): + match_dict = REQUEST_REGEX.search(match_dict['request']) + if match_dict: + match_dict = match_dict.groupdict() + else: + return + # requests per url + if match_dict.get('url') and self.storage['url_pattern']: + self.get_data_per_pattern(row=match_dict['url'], + other='url_pattern_other', + pattern=self.storage['url_pattern']) + # requests per http method + if match_dict.get('method'): + if match_dict['method'] not in self.data: + self.charts['http_method'].add_dimension([match_dict['method'], + match_dict['method'], + 'incremental']) + self.data[match_dict['method']] = 0 + self.data[match_dict['method']] += 1 + # requests per http version + if match_dict.get('http_version'): + dim_id = match_dict['http_version'].replace('.', '_') + if dim_id not in self.data: + self.charts['http_version'].add_dimension([dim_id, + match_dict['http_version'], + 'incremental']) + self.data[dim_id] = 0 + self.data[dim_id] += 1 + # requests per port number + if match_dict.get('port'): + if match_dict['port'] not in self.data: + self.charts['port'].add_dimension([match_dict['port'], + match_dict['port'], + 'incremental']) + self.data[match_dict['port']] = 0 + self.data[match_dict['port']] += 1 + # requests per vhost + if match_dict.get('vhost'): + dim_id = match_dict['vhost'].replace('.', '_') + if dim_id not in self.data: + self.charts['vhost'].add_dimension([dim_id, + match_dict['vhost'], + 'incremental']) + self.data[dim_id] = 0 + self.data[dim_id] += 1 + + def get_data_per_response_codes_detailed(self, code): + """ + :param code: str: CODE from parsed line. Ex.: '202, '499' + :return: + Calls add_new_dimension method If the value is found for the first time + """ + if code not in self.data: + if self.configuration.get('detailed_response_aggregate', True): + self.charts['detailed_response_codes'].add_dimension([code, code, 'incremental']) + self.data[code] = 0 + else: + code_index = int(code[0]) if int(code[0]) < 6 else 6 + chart_key = 'detailed_response_codes' + DET_RESP_AGGR[code_index] + self.charts[chart_key].add_dimension([code, code, 'incremental']) + self.data[code] = 0 + self.data[code] += 1 + + def get_data_per_pattern(self, row, other, pattern): + """ + :param row: str: + :param other: str: + :param pattern: named tuple: (['pattern_description', 'regular expression']) + :return: + Scan through string looking for the first location where patterns produce a match for all user + defined patterns + """ + match = None + for elem in pattern: + if elem.func(row): + self.data[elem.description] += 1 + match = True + break + if not match: + self.data[other] += 1 + + def get_data_per_statuses(self, code): + """ + :param code: str: response status code. Ex.: '202', '499' + :return: + """ + code_class = code[0] + if code_class == '2' or code == '304' or code_class == '1': + self.data['successful_requests'] += 1 + elif code_class == '3': + self.data['redirects'] += 1 + elif code_class == '4': + self.data['bad_requests'] += 1 + elif code_class == '5': + self.data['server_errors'] += 1 + else: + self.data['other_requests'] += 1 + + +class ApacheCache: + def __init__(self, service): + self.service = service + self.order = ORDER_APACHE_CACHE + self.definitions = CHARTS_APACHE_CACHE + + @staticmethod + def check(): + return True + + @staticmethod + def get_data(raw_data=None): + data = dict(hit=0, miss=0, other=0) + if not raw_data: + return None if raw_data is None else data + + for line in raw_data: + if 'cache hit' in line: + data['hit'] += 1 + elif 'cache miss' in line: + data['miss'] += 1 + else: + data['other'] += 1 + return data + + +class Squid: + def __init__(self, service): + self.service = service + self.order = ORDER_SQUID + self.definitions = CHARTS_SQUID + self.pre_filter = check_patterns('filter', self.configuration.get('filter')) + self.storage = dict() + self.data = { + 'duration_max': 0, + 'duration_avg': 0, + 'duration_min': 0, + 'bytes': 0, + '0xx': 0, + '1xx': 0, + '2xx': 0, + '3xx': 0, + '4xx': 0, + '5xx': 0, + 'other': 0, + 'unmatched': 0, + 'unique_ipv4': 0, + 'unique_ipv6': 0, + 'unique_tot_ipv4': 0, + 'unique_tot_ipv6': 0, + 'successful_requests': 0, + 'redirects': 0, + 'bad_requests': 0, + 'server_errors': 0, + 'other_requests': 0 + } + + def __getattr__(self, item): + return getattr(self.service, item) + + def check(self): + last_line = read_last_line(self.log_path) + if not last_line: + return False + self.storage['unique_all_time'] = list() + self.storage['regex'] = re.compile(r'[0-9.]+\s+(?P<duration>[0-9]+)' + r' (?P<client_address>[\da-f.:]+)' + r' (?P<squid_code>[A-Z_]+)/' + r'(?P<http_code>[0-9]+)' + r' (?P<bytes>[0-9]+)' + r' (?P<method>[A-Z_]+)' + r' (?P<url>[^ ]+)' + r' (?P<user>[^ ]+)' + r' (?P<hier_code>[A-Z_]+)/[\da-z.:-]+' + r' (?P<mime_type>[A-Za-z-]*)') + + match = self.storage['regex'].search(last_line) + if not match: + self.error('Regex not matches (%s)' % self.storage['regex'].pattern) + return False + self.storage['dynamic'] = { + 'http_code': { + 'chart': 'squid_detailed_response_codes', + 'func_dim_id': None, + 'func_dim': None + }, + 'hier_code': { + 'chart': 'squid_hier_code', + 'func_dim_id': None, + 'func_dim': lambda v: v.replace('HIER_', '') + }, + 'method': { + 'chart': 'squid_method', + 'func_dim_id': None, + 'func_dim': None + }, + 'mime_type': { + 'chart': 'squid_mime_type', + 'func_dim_id': lambda v: str.lower(v) if str.lower(v) in MIME_TYPES else 'unknown', + 'func_dim': None + } + } + if not self.configuration.get('all_time', True): + self.order.remove('squid_clients_all') + return True + + def get_data(self, raw_data=None): + if not raw_data: + return None if raw_data is None else self.data + + filtered_data = filter_data(raw_data=raw_data, pre_filter=self.pre_filter) + + unique_ip = set() + timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0)) + + for row in filtered_data: + match = self.storage['regex'].search(row) + if match: + match = match.groupdict() + if match['duration'] != '0': + get_timings(timings=timings['duration'], time=float(match['duration']) * 1000) + try: + self.data[match['http_code'][0] + 'xx'] += 1 + except KeyError: + self.data['other'] += 1 + + self.get_data_per_statuses(match['http_code']) + + self.get_data_per_squid_code(match['squid_code']) + + self.data['bytes'] += int(match['bytes']) + + proto = 'ipv4' if '.' in match['client_address'] else 'ipv6' + # unique clients ips + if self.configuration.get('all_time', True): + if address_not_in_pool(pool=self.storage['unique_all_time'], + address=match['client_address'], + pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']): + self.data['unique_tot_' + proto] += 1 + + if match['client_address'] not in unique_ip: + self.data['unique_' + proto] += 1 + unique_ip.add(match['client_address']) + + for key, values in self.storage['dynamic'].items(): + if match[key] == '-': + continue + dimension_id = values['func_dim_id'](match[key]) if values['func_dim_id'] else match[key] + if dimension_id not in self.data: + dimension = values['func_dim'](match[key]) if values['func_dim'] else dimension_id + self.charts[values['chart']].add_dimension([dimension_id, + dimension, + 'incremental']) + self.data[dimension_id] = 0 + self.data[dimension_id] += 1 + else: + self.data['unmatched'] += 1 + + for elem in timings: + self.data[elem + '_min'] += timings[elem]['minimum'] + self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count'] + self.data[elem + '_max'] += timings[elem]['maximum'] + return self.data + + def get_data_per_statuses(self, code): + """ + :param code: str: response status code. Ex.: '202', '499' + :return: + """ + code_class = code[0] + if code_class == '2' or code == '304' or code_class == '1' or code == '000': + self.data['successful_requests'] += 1 + elif code_class == '3': + self.data['redirects'] += 1 + elif code_class == '4': + self.data['bad_requests'] += 1 + elif code_class == '5' or code_class == '6': + self.data['server_errors'] += 1 + else: + self.data['other_requests'] += 1 + + def get_data_per_squid_code(self, code): + """ + :param code: str: squid response code. Ex.: 'TCP_MISS', 'TCP_MISS_ABORTED' + :return: + """ + if code not in self.data: + self.charts['squid_code'].add_dimension([code, code, 'incremental']) + self.data[code] = 0 + self.data[code] += 1 + + for tag in code.split('_'): + try: + chart_key = SQUID_CODES[tag] + except KeyError: + continue + dimension_id = '_'.join(['code_detailed', tag]) + if dimension_id not in self.data: + self.charts[chart_key].add_dimension([dimension_id, tag, 'incremental']) + self.data[dimension_id] = 0 + self.data[dimension_id] += 1 + + +def get_timings(timings, time): + """ + :param timings: + :param time: + :return: + """ + if timings['minimum'] is None: + timings['minimum'] = time + if time > timings['maximum']: + timings['maximum'] = time + elif time < timings['minimum']: + timings['minimum'] = time + timings['summary'] += time + timings['count'] += 1 + + +def get_hist(index, buckets, time): + """ + :param index: histogram index (Ex. [10, 50, 100, 150, ...]) + :param buckets: histogram buckets + :param time: time + :return: None + """ + for i in range(len(index)-1, -1, -1): + if time <= index[i]: + buckets[i] += 1 + else: + break + + +def address_not_in_pool(pool, address, pool_size): + """ + :param pool: list of ip addresses + :param address: ip address + :param pool_size: current pool size + :return: True if address not in pool. False otherwise. + """ + index = bisect.bisect_left(pool, address) + if index < pool_size: + if pool[index] == address: + return False + bisect.insort_left(pool, address) + return True + bisect.insort_left(pool, address) + return True + + +def find_regex_return(match_dict=None, msg='Generic error message'): + """ + :param match_dict: dict: re.search.groupdict() or None + :param msg: str: error description + :return: tuple: + """ + return match_dict, msg + + +def check_patterns(string, dimension_regex_dict): + """ + :param string: str: + :param dimension_regex_dict: dict: ex. {'dim1': '<pattern1>', 'dim2': '<pattern2>'} + :return: list of named tuples or None: + We need to make sure all patterns are valid regular expressions + """ + if not hasattr(dimension_regex_dict, 'keys'): + return None + + result = list() + + def valid_pattern(pattern): + """ + :param pattern: str + :return: re.compile(pattern) or None + """ + if not isinstance(pattern, str): + return False + try: + return re.compile(pattern) + except re.error: + return False + + def func_search(pattern): + def closure(v): + return pattern.search(v) + + return closure + + for dimension, regex in dimension_regex_dict.items(): + valid = valid_pattern(regex) + if isinstance(dimension, str) and valid_pattern: + func = func_search(valid) + result.append(NAMED_PATTERN(description='_'.join([string, dimension]), + func=func)) + return result or None + + +def filter_data(raw_data, pre_filter): + """ + :param raw_data: + :param pre_filter: + :return: + """ + + if not pre_filter: + return raw_data + filtered = raw_data + for elem in pre_filter: + if elem.description == 'filter_include': + filtered = filter(elem.func, filtered) + elif elem.description == 'filter_exclude': + filtered = filterfalse(elem.func, filtered) + return filtered diff --git a/collectors/python.d.plugin/web_log/web_log.conf b/collectors/python.d.plugin/web_log/web_log.conf new file mode 100644 index 000000000..a67957aef --- /dev/null +++ b/collectors/python.d.plugin/web_log/web_log.conf @@ -0,0 +1,206 @@ +# netdata python.d.plugin configuration for web log +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. + +# ---------------------------------------------------------------------- +# PLUGIN CONFIGURATION +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, web_log also supports the following: +# +# path: 'PATH' # the path to web server log file +# path: 'PATH[0-9]*[0-9]' # log files with date suffix are also supported +# detailed_response_codes: yes/no # default: yes. Additional chart where response codes are not grouped +# detailed_response_aggregate: yes/no # default: yes. Not aggregated detailed response codes charts +# all_time : yes/no # default: yes. All time unique client IPs chart (50000 addresses ~ 400KB) +# filter: # filter with regex +# include: 'REGEX' # only those rows that matches the regex +# exclude: 'REGEX' # all rows except those that matches the regex +# categories: # requests per url chart configuration +# cacti: 'cacti.*' # name(dimension): REGEX to match +# observium: 'observium.*' # name(dimension): REGEX to match +# stub_status: 'stub_status' # name(dimension): REGEX to match +# user_defined: # requests per pattern in <user_defined> field (custom_log_format) +# cacti: 'cacti.*' # name(dimension): REGEX to match +# observium: 'observium.*' # name(dimension): REGEX to match +# stub_status: 'stub_status' # name(dimension): REGEX to match +# custom_log_format: # define a custom log format +# pattern: '(?P<address>[\da-f.:]+) -.*?"(?P<method>[A-Z]+) (?P<url>.*?)" (?P<code>[1-9]\d{2}) (?P<bytes_sent>\d+) (?P<resp_length>\d+) (?P<resp_time>\d+\.\d+) ' +# time_multiplier: 1000000 # type <int>/<float> - convert time to microseconds +# histogram: [1,3,10,30,100, ...] # type list of int - Cumulative histogram of response time in milli seconds + +# ---------------------------------------------------------------------- +# WEB SERVER CONFIGURATION +# +# Make sure the web server log directory and the web server log files +# can be read by user 'netdata'. +# +# To enable the timings chart and the requests size dimension, the +# web server needs to log them. This is how to add them: +# +# nginx: +# log_format netdata '$remote_addr - $remote_user [$time_local] ' +# '"$request" $status $body_bytes_sent ' +# '$request_length $request_time $upstream_response_time ' +# '"$http_referer" "$http_user_agent"'; +# access_log /var/log/nginx/access.log netdata; +# +# apache (you need mod_logio enabled): +# LogFormat "%h %l %u %t \"%r\" %>s %O %I %D \"%{Referer}i\" \"%{User-Agent}i\"" vhost_netdata +# LogFormat "%h %l %u %t \"%r\" %>s %O %I %D \"%{Referer}i\" \"%{User-Agent}i\"" netdata +# CustomLog "/var/log/apache2/access.log" netdata + +# ---------------------------------------------------------------------- +# VHOST AND PORT +# if your want to graph the request/sec per virtual host and per port (to check the number of requests in http vs https) + +# in apache : (%v gives the hostname, %p the port number) +# LogFormat "%v %p %h %t \"%r\" %>s %O %I %D \"%{Referer}i\" \"%{User-Agent}i\"" vhost_netdata +# +# and in this file in apache_vhosts_log section, add : +# custom_log_format: +# pattern: '(?P<vhost>[a-zA-Z\d.-_]+) (?P<port>\d+) (?P<address>[\da-f.:]+) \[.*\] "(?P<method>[A-Z]+)[^"]*" (?P<code>[1-9]\d{2}) (?P<bytes_sent>\d+) (?P<resp_length>\d+) (?P<resp_time>\d+)' + +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them per web server will run (when they have the same name) + + +# ------------------------------------------- +# nginx log on various distros + +# debian, arch +nginx_log: + name: 'nginx' + path: '/var/log/nginx/access.log' + +# gentoo +nginx_log2: + name: 'nginx' + path: '/var/log/nginx/localhost.access_log' + + +# ------------------------------------------- +# apache log on various distros + +# debian +apache_log: + name: 'apache' + path: '/var/log/apache2/access.log' + +# gentoo +apache_log2: + name: 'apache' + path: '/var/log/apache2/access_log' + +# arch +apache_log3: + name: 'apache' + path: '/var/log/httpd/access_log' + +# debian +apache_vhosts_log: + name: 'apache_vhosts' + path: '/var/log/apache2/other_vhosts_access.log' + + +# ------------------------------------------- +# gunicorn log on various distros + +gunicorn_log: + name: 'gunicorn' + path: '/var/log/gunicorn/access.log' + +gunicorn_log2: + name: 'gunicorn' + path: '/var/log/gunicorn/gunicorn-access.log' + +# ------------------------------------------- +# Apache Cache +apache_cache: + name: 'apache_cache' + type: 'apache_cache' + path: '/var/log/apache/cache.log' + +apache2_cache: + name: 'apache_cache' + type: 'apache_cache' + path: '/var/log/apache2/cache.log' + +httpd_cache: + name: 'apache_cache' + type: 'apache_cache' + path: '/var/log/httpd/cache.log' + +# ------------------------------------------- +# Squid + +# debian/ubuntu +squid_log1: + name: 'squid' + type: 'squid' + path: '/var/log/squid3/access.log' + +#gentoo +squid_log2: + name: 'squid' + type: 'squid' + path: '/var/log/squid/access.log' diff --git a/collectors/statsd.plugin/Makefile.am b/collectors/statsd.plugin/Makefile.am new file mode 100644 index 000000000..7f09bacd7 --- /dev/null +++ b/collectors/statsd.plugin/Makefile.am @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +statsdconfigdir=$(libconfigdir)/statsd.d +dist_statsdconfig_DATA = \ + $(top_srcdir)/installer/.keep \ + example.conf \ + $(NULL) + +userstatsdconfigdir=$(configdir)/statsd.d +dist_userstatsdconfig_DATA = \ + $(top_srcdir)/installer/.keep \ + $(NULL) + diff --git a/collectors/statsd.plugin/Makefile.in b/collectors/statsd.plugin/Makefile.in new file mode 100644 index 000000000..5c16a86d1 --- /dev/null +++ b/collectors/statsd.plugin/Makefile.in @@ -0,0 +1,556 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = collectors/statsd.plugin +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) $(dist_statsdconfig_DATA) \ + $(dist_userstatsdconfig_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(statsdconfigdir)" \ + "$(DESTDIR)$(userstatsdconfigdir)" +DATA = $(dist_noinst_DATA) $(dist_statsdconfig_DATA) \ + $(dist_userstatsdconfig_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +statsdconfigdir = $(libconfigdir)/statsd.d +dist_statsdconfig_DATA = \ + $(top_srcdir)/installer/.keep \ + example.conf \ + $(NULL) + +userstatsdconfigdir = $(configdir)/statsd.d +dist_userstatsdconfig_DATA = \ + $(top_srcdir)/installer/.keep \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/statsd.plugin/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu collectors/statsd.plugin/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-dist_statsdconfigDATA: $(dist_statsdconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_statsdconfig_DATA)'; test -n "$(statsdconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(statsdconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(statsdconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(statsdconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(statsdconfigdir)" || exit $$?; \ + done + +uninstall-dist_statsdconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_statsdconfig_DATA)'; test -n "$(statsdconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(statsdconfigdir)'; $(am__uninstall_files_from_dir) +install-dist_userstatsdconfigDATA: $(dist_userstatsdconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_userstatsdconfig_DATA)'; test -n "$(userstatsdconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(userstatsdconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(userstatsdconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userstatsdconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(userstatsdconfigdir)" || exit $$?; \ + done + +uninstall-dist_userstatsdconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_userstatsdconfig_DATA)'; test -n "$(userstatsdconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(userstatsdconfigdir)'; $(am__uninstall_files_from_dir) +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: + for dir in "$(DESTDIR)$(statsdconfigdir)" "$(DESTDIR)$(userstatsdconfigdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-dist_statsdconfigDATA \ + install-dist_userstatsdconfigDATA + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-dist_statsdconfigDATA \ + uninstall-dist_userstatsdconfigDATA + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dist_statsdconfigDATA \ + install-dist_userstatsdconfigDATA install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + installcheck installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am \ + uninstall-dist_statsdconfigDATA \ + uninstall-dist_userstatsdconfigDATA + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/collectors/statsd.plugin/README.md b/collectors/statsd.plugin/README.md new file mode 100644 index 000000000..6ef038343 --- /dev/null +++ b/collectors/statsd.plugin/README.md @@ -0,0 +1,523 @@ +# Netdata Statsd + +statsd is a system to collect data from any application. Applications are sending metrics to it, usually via non-blocking UDP communication, and statsd servers collect these metrics, perform a few simple calculations on them and push them to backend time-series databases. + +There is a [plethora of client libraries](https://github.com/etsy/statsd/wiki#client-implementations) for embedding statsd metrics to any application framework. This makes statsd quite popular for custom application metrics. + +## netdata statsd + +netdata is a fully featured statsd server. It can collect statsd formatted metrics, visualize them on its dashboards, stream them to other netdata servers or archive them to backend time-series databases. + +netdata statsd is inside netdata (an internal plugin, running inside the netdata daemon), it is configured via `netdata.conf` and by-default listens on standard statsd ports (tcp and udp 8125 - yes, netdata statsd server supports both tcp and udp at the same time). + +Since statsd is embedded in netdata, it means you now have a statsd server embedded on all your servers. So, the application can send its metrics to `localhost:8125`. This provides a distributed statsd implementation. + +netdata statsd is fast. It can collect more than **1.200.000 metrics per second** on modern hardware, more than **200Mbps of sustained statsd traffic**, using 1 CPU core (yes, it is single threaded - actually double-threaded, one thread collects metrics, another one updates the charts from the collected data). + +## metrics supported by netdata + +netdata fully supports the statsd protocol. All statsd client libraries can be used with netdata too. + +- **Gauges** + + The application sends `name:value|g`, where `value` is any **decimal/fractional** number, statsd reports the latest value collected and the number of times it was updated (events). + + The application may increment or decrement a previous value, by setting the first character of the value to ` + ` or ` - ` (so, the only way to set a gauge to an absolute negative value, is to first set it to zero). + + Sampling rate is supported (check below). + + When a gauge is not collected and the setting is not to show gaps on the charts (the default), the last value will be shown, until a data collection event changes it. + +- **Counters** and **Meters** + + The application sends `name:value|c`, `name:value|C` or `name:value|m`, where `value` is a positive or negative **integer** number of events occurred, statsd reports the **rate** and the number of times it was updated (events). + + `:value` can be omitted and statsd will assume it is `1`. `|c`, `|C` and `|m` can be omitted an statsd will assume it is `|m`. So, the application may send just `name` and statsd will parse it as `name:1|m`. + + For counters use `|c` (esty/statsd compatible) or `|C` (brubeck compatible), for meters use `|m`. + + Sampling rate is supported (check below). + + When a counter or meter is not collected and the setting is not to show gaps on the charts (the default), zero will be shown, until a data collection event changes it. + +- **Timers** and **Histograms** + + The application sends `name:value|ms` or `name:value|h`, where ` value` is any **decimal/fractional** number, statsd reports **min**, **max**, **average**, **sum**, **95th percentile**, **median** and **standard deviation** and the total number of times it was updated (events). + + For timers use `|ms`, or histograms use `|h`. The only difference between the two, is the `units` of the charts (timers report milliseconds). + + Sampling rate is supported (check below). + + When a timer or histogram is not collected and the setting is not to show gaps on the charts (the default), zero will be shown, until a data collection event changes it. + +- **Sets** + + The application sends `name:value|s`, where `value` is anything (**number or text**, leading and trailing spaces are removed), statsd reports the number of unique values sent and the number of times it was updated (events). + + Sampling rate is **not** supported for Sets. `value` is always considered text. + + When a set is not collected and the setting is not to show gaps on the charts (the default), zero will be shown, until a data collection event changes it. + +#### Sampling Rates + +The application may append `|@sampling_rate`, where `sampling_rate` is a number from `0.0` to `1.0`, to have statsd extrapolate the value, to predict to total for the whole period. So, if the application reports to statsd a value for 1/10th of the time, it can append `|@0.1` to the metrics it sends to statsd. + +#### Overlapping metrics + +netdata statsd maintains different indexes for each of the types supported. This means the same metric `name` may exist under different types concurrently. + +#### Multiple metrics per packet + +netdata accepts multiple metrics per packet if each is terminated with `\n`. + +#### TCP packets + +netdata listens for both TCP and UDP packets. For TCP though, is it important to always append `\n` on each metric. netdata uses this to detect if a metric is split into multiple TCP packets. On disconnect, even the remaining (non terminated with `\n`) buffer, is processed. + +#### UDP packets + +When sending multiple packets over UDP, it is important not to exceed the network MTU (usually 1500 bytes minus a few bytes for the headers). netdata will accept UDP packets up to 9000 bytes, but the underlying network will not exceed MTU. + +## configuration + +This is the statsd configuration at `/etc/netdata/netdata.conf`: + +``` +[statsd] + # enabled = yes + # decimal detail = 1000 + # update every (flushInterval) = 1 + # udp messages to process at once = 10 + # create private charts for metrics matching = * + # max private charts allowed = 200 + # max private charts hard limit = 1000 + # private charts memory mode = save + # private charts history = 3996 + # histograms and timers percentile (percentThreshold) = 95.00000 + # add dimension for number of events received = yes + # gaps on gauges (deleteGauges) = no + # gaps on counters (deleteCounters) = no + # gaps on meters (deleteMeters) = no + # gaps on sets (deleteSets) = no + # gaps on histograms (deleteHistograms) = no + # gaps on timers (deleteTimers) = no + # listen backlog = 4096 + # default port = 8125 + # bind to = udp:localhost:8125 tcp:localhost:8125 +``` + +### statsd main config options +- `enabled = yes|no` + + controls if statsd will be enabled for this netdata. The default is enabled. + +- `default port = 8125` + + controls the port statsd will use. This is the default, since the next line, allows defining ports too. + +- `bind to = udp:localhost tcp:localhost` + + is a space separated list of IPs and ports to listen to. The format is `PROTOCOL:IP:PORT` - if `PORT` is omitted, the `default port` will be used. If `IP` is IPv6, it needs to be enclosed in `[]`. `IP` can also be ` * ` (to listen on all IPs) or even a hostname. + +- `update every (flushInterval) = 1` seconds, controls the frequency statsd will push the collected metrics to netdata charts. + +- `decimal detail = 1000` controls the number of fractional digits in gauges and histograms. netdata collects metrics using signed 64 bit integers and their fractional detail is controlled using multipliers and divisors. This setting is used to multiply all collected values to convert them to integers and is also set as the divisors, so that the final data will be a floating point number with this fractional detail (1000 = X.0 - X.999, 10000 = X.0 - X.9999, etc). + +The rest of the settings are discussed below. + +## statsd charts + +netdata can visualize statsd collected metrics in 2 ways: + +1. Each metric gets its own **private chart**. This is the default and does not require any configuration (although there are a few options to tweak). + +2. **Synthetic charts** can be created, combining multiple metrics, independently of their metric types. For this type of charts, special configuration is required, to define the chart title, type, units, its dimensions, etc. + +### private metric charts + +Private charts are controlled with `create private charts for metrics matching = *`. This setting accepts a space separated list of simple patterns (use `*` as wildcard, prepend a pattern with `!` for a negative match, the order of patterns is important). + +So to render charts for all `myapp.*` metrics, except `myapp.*.badmetric`, use: + +``` +create private charts for metrics matching = !myapp.*.badmetric myapp.* +``` + +The default is to render private charts for all metrics. + +The `memory mode` of the round robin database and the `history` of private metric charts are controlled with `private charts memory mode` and `private charts history`. The defaults for both settings is to use the global netdata settings. So, you need to edit them only when you want statsd to use different settings compared to the global ones. + +If you have thousands of metrics, each with its own private chart, you may notice that your web browser becomes slow when you view the netdata dashboard (this is a web browser issue we need to address at the netdata UI). So, netdata has a protection to stop creating charts when `max private charts allowed = 200` (soft limit) is reached. + +The metrics above this soft limit are still processed by netdata and will be available to be sent to backend time-series databases, up to `max private charts hard limit = 1000`. So, between 200 and 1000 charts, netdata will still generate charts, but they will automatically be created with `memory mode = none` (netdata will not maintain a database for them). These metrics will be sent to backend time series databases, if the backend configuration is set to `as collected`. + +Metrics above the hard limit are still collected, but they can only be used in synthetic charts (once a metric is added to chart, it will be sent to backend servers too). + +Example private charts (automatically generated without any configuration): + +#### counters + +- Scope: **count the events of something** (e.g. number of file downloads) +- Format: `name:INTEGER|c` or `name:INTEGER|C` or `name|c` +- statsd increments the counter by the `INTEGER` number supplied (positive, or negative). + +![image](https://cloud.githubusercontent.com/assets/2662304/26131553/4a26d19c-3aa3-11e7-94e8-c53b5ed6ebc3.png) + +#### gauges + +- Scope: **report the value of something** (e.g. cache memory used by the application server) +- Format: `name:FLOAT|g` +- statsd remembers the last value supplied, and can increment or decrement the latest value if `FLOAT` begins with ` + ` or ` - `. + +![image](https://cloud.githubusercontent.com/assets/2662304/26131575/5d54e6f0-3aa3-11e7-9099-bc4440cd4592.png) + +#### histograms + +- Scope: **statistics on a size of events** (e.g. statistics on the sizes of files downloaded) +- Format: `name:FLOAT|h` +- statsd maintains a list of all the values supplied and provides statistics on them. + +![image](https://cloud.githubusercontent.com/assets/2662304/26131587/704de72a-3aa3-11e7-9ea9-0d2bb778c150.png) + +The same chart with `sum` unselected, to show the detail of the dimensions supported: +![image](https://cloud.githubusercontent.com/assets/2662304/26131598/8076443a-3aa3-11e7-9ffa-ea535aee9c9f.png) + +#### meters + +This is identical to `counter`. + +- Scope: **count the events of something** (e.g. number of file downloads) +- Format: `name:INTEGER|m` or `name|m` or just `name` +- statsd increments the counter by the `INTEGER` number supplied (positive, or negative). + +![image](https://cloud.githubusercontent.com/assets/2662304/26131605/8fdf5a06-3aa3-11e7-963f-7ecf207d1dbc.png) + +#### sets + +- Scope: **count the unique occurrences of something** (e.g. unique filenames downloaded, or unique users that downloaded files) +- Format: `name:TEXT|s` +- statsd maintains a unique index of all values supplied, and reports the unique entries in it. + +![image](https://cloud.githubusercontent.com/assets/2662304/26131612/9eaa7b1a-3aa3-11e7-903b-d881e9a35be2.png) + +#### timers + +- Scope: **statistics on the duration of events** (e.g. statistics for the duration of file downloads) +- Format: `name:FLOAT|ms` +- statsd maintains a list of all the values supplied and provides statistics on them. + +![image](https://cloud.githubusercontent.com/assets/2662304/26131620/acbea6a4-3aa3-11e7-8bdd-4a8996847767.png) + +The same chart with the `sum` unselected: +![image](https://cloud.githubusercontent.com/assets/2662304/26131629/bc34f2d2-3aa3-11e7-8a07-f2fc94ba4352.png) + + + +### synthetic statsd charts + +Using synthetic charts, you can create dedicated sections on the dashboard to render the charts. You can control everything: the main menu, the submenus, the charts, the dimensions on each chart, etc. + +Synthetic charts are organized in + +- **applications** (i.e. entries at the main menu of the netdata dashboard) +- **charts for each application** (grouped in families - i.e. submenus at the dashboard menu) +- **statsd metrics for each chart** (i.e. dimensions of the charts) + +For each application you need to create a `.conf` file in `/etc/netdata/statsd.d`. + +So, to create the statsd application `myapp`, you can create the file `/etc/netdata/statsd.d/myapp.conf`, with this content: + +``` +[app] + name = myapp + metrics = myapp.* + private charts = no + gaps when not collected = no + memory mode = ram + history = 60 + +[dictionary] + m1 = metric1 + m2 = metric2 + +# replace 'mychart' with the chart id +# the chart will be named: myapp.mychart +[mychart] + name = mychart + title = my chart title + family = my family + context = chart.context + units = tests/s + priority = 91000 + type = area + dimension = myapp.metric1 m1 + dimension = myapp.metric2 m2 +``` + +Using the above configuration `myapp` should get its own section on the dashboard, having one chart with 2 dimensions. + +`[app]` starts a new application definition. The supported settings in this section are: + +- `name` defines the name of the app. +- `metrics` is a netdata simple pattern (space separated patterns, using `*` for wildcard, possibly starting with `!` for negative match). This pattern should match all the possible statsd metrics that will be participating in the application `myapp`. +- `private charts = yes|no`, enables or disables private charts for the metrics matched. +- `gaps when not collected = yes|no`, enables or disables gaps on the charts of the application, when metrics are not collected. +- `memory mode` sets the memory mode for all charts of the application. The default is the global default for netdata (not the global default for statsd private charts). +- `history` sets the size of the round robin database for this application. The default is the global default for netdata (not the global default for statsd private charts). + +`[dictionary]` defines name-value associations. These are used to renaming metrics, when added to synthetic charts. Metric names are also defined at each `dimension` line. However, using the dictionary dimension names can be declared globally, for each app and is the only way to rename dimensions when using patterns. Of course the dictionary can be empty or missing. + +Then, you can add any number of charts. Each chart should start with `[id]`. The chart will be called `app_name.id`. `family` controls the submenu on the dashboard. `context` controls the alarm templates. `priority` controls the ordering of the charts on the dashboard. The rest of the settings are informational. + +You can add any number of metrics to a chart, using `dimension` lines. These lines accept 5 space separated parameters: + +1. the metric name, as it is collected (it has to be matched by the `metrics = ` pattern of the app) +2. the dimension name, as it should be shown on the chart +3. an optional selector (type) of the value to shown (see below) +4. an optional multiplier +5. an optional divider +6. optional flags, space separated and enclosed in quotes. All the external plugins `DIMENSION` flags can be used. Currently the only usable flag is `hidden`, to add the dimension, but not show it on the dashboard. This is usually needed to have the values available for percentage calculation, or use them in alarms. + +So, the format is this: +``` +dimension = [pattern] METRIC NAME TYPE MULTIPLIER DIVIDER OPTIONS +``` + +`pattern` is a keyword. When set, `METRIC` is expected to be a netdata simple pattern that will be used to match all the statsd metrics to be added to the chart. So, `pattern` automatically matches any number of statsd metrics, all of which will be added as separate chart dimensions. + +`TYPE`, `MUTLIPLIER`, `DIVIDER` and `OPTIONS` are optional. + +`TYPE` can be: + +- `events` to show the number of events received by statsd for this metric +- `last` to show the last value, as calculated at the flush interval of the metric (the default) + +Then for histograms and timers the following types are also supported: + +- `min`, show the minimum value +- `max`, show the maximum value +- `sum`, show the sum of all values +- `average` (same as `last`) +- `percentile`, show the 95th percentile (or any other percentile, as configured at statsd global config) +- `median`, show the median of all values (i.e. sort all values and get the middle value) +- `stddev`, show the standard deviation of the values + +#### example synthetic charts + +statsd metrics: `foo` and `bar`. + +Contents of file `/etc/netdata/stats.d/foobar.conf`: + +``` +[app] + name = foobarapp + metrics = foo bar + private charts = yes + +[foobar_chart1] + title = Hey, foo and bar together + family = foobar_family + context = foobarapp.foobars + units = foobars + type = area + dimension = foo 'foo me' last 1 1 + dimension = bar 'bar me' last 1 1 +``` + +I sent to statsd: `foo:10|g` and `bar:20|g`. + +I got these private charts: + +![screenshot from 2017-08-03 23-28-19](https://user-images.githubusercontent.com/2662304/28942295-7c3a73a8-78a3-11e7-88e5-a9a006bb7465.png) + +and this synthetic chart: + +![screenshot from 2017-08-03 23-29-14](https://user-images.githubusercontent.com/2662304/28942317-958a2c68-78a3-11e7-853f-32850141dd36.png) + +#### dictionary to name dimensions + +The `[dictionary]` section accepts any number of `name = value` pairs. + +netdata uses this dictionary as follows: + +1. When a `dimension` has a non-empty `NAME`, that name is looked up at the dictionary. + +2. If the above lookup gives nothing, or the `dimension` has an empty `NAME`, the original statsd metric name is looked up at the dictionary. + +3. If any of the above succeeds, netdata uses the `value` of the dictionary, to set the name of the dimension. The dimensions will have as ID the original statsd metric name, and as name, the dictionary value. + +So, you can use the dictionary in 2 ways: + +1. set `dimension = myapp.metric1 ''` and have at the dictionary `myapp.metric1 = metric1 name` +2. set `dimension = myapp.metric1 'm1'` and have at the dictionary `m1 = metric1 name` + +In both cases, the dimension will be added with ID `myapp.metric1` and will be named `metric1 name`. So, in alarms you can use either of the 2 as `${myapp.metric1}` or `${metric1 name}`. + +> keep in mind that if you add multiple times the same statsd metric to a chart, netdata will append `TYPE` to the dimension ID, so `myapp.metric1` will be added as `myapp.metric1_last` or `myapp.metric1_events`, etc. If you add multiple times the same metric with the same `TYPE` to a chart, netdata will also append an incremental counter to the dimension ID, i.e. `myapp.metric1_last1`, `myapp.metric1_last2`, etc. + +#### dimension patterns + +netdata allows adding multiple dimensions to a chart, by matching the statsd metrics with a netdata simple pattern. + +Assume we have an API that provides statsd metrics for each response code per method it supports, like these: + +``` +myapp.api.get.200 +myapp.api.get.400 +myapp.api.get.500 +myapp.api.del.200 +myapp.api.del.400 +myapp.api.del.500 +myapp.api.post.200 +myapp.api.post.400 +myapp.api.post.500 +myapp.api.all.200 +myapp.api.all.400 +myapp.api.all.500 +``` + +To add all response codes of `myapp.api.get` to a chart use this: + +``` +[api_get_responses] + ... + dimension = pattern 'myapp.api.get.* '' last 1 1 +``` + +The above will add dimension named `200`, `400` and `500` (yes, netdata extracts the wildcarded part of the metric name - so the dimensions will be named with whatever the `*` matched). You can rename the dimensions with this: + +``` +[dictionary] + get.200 = 200 ok + get.400 = 400 bad request + get.500 = 500 cannot connect to db + +[api_get_responses] + ... + dimension = pattern 'myapp.api.get.* 'get.' last 1 1 +``` + +Note that we added a `NAME` to the dimension line with `get.`. This is prefixed to the wildcarded part of the metric name, to compose the key for looking up the dictionary. So `500` became `get.500` which was looked up to the dictionary to find value `500 cannot connect to db`. This way we can have different dimension names, for each of the API methods (i.e. `get.500 = 500 cannot connect to db` while `post.500 = 500 cannot write to disk`). + +To add all API methods to a chart, do this: + +``` +[ok_by_method] + ... + dimension = pattern 'myapp.api.*.200 '' last 1 1 +``` + +The above will add `get`, `post`, `del` and `all` to the chart. + +If `all` is not wanted (a `stacked` chart does not need the `all` dimension, since the sum of the dimensions provides the total), the line should be: + +``` +[ok_by_method] + ... + dimension = pattern '!myapp.api.all.* myapp.api.*.200 '' last 1 1 +``` + +With the above, all methods except `all` will be added to the chart. + +To automatically rename the methods, use this: + +``` +[dictionary] + method.get = GET + method.post = ADD + method.del = DELETE + +[ok_by_method] + ... + dimension = pattern '!myapp.api.all.* myapp.api.*.200 'method.' last 1 1 +``` + +Using the above, the dimensions will be added as `GET`, `ADD` and `DELETE`. + + +## interpolation + +~~If you send just one value to statsd, you will notice that the chart is created but no value is shown. The reason is that netdata interpolates all values at second boundaries. For incremental values (`counters` and `meters` in statsd terminology), if you send 10 at 00:00:00.500, 20 at 00:00:01.500 and 30 at 00:00:02.500, netdata will show 15 at 00:00:01 and 25 at 00:00:02.~~ + +~~This interpolation is automatic and global in netdata for all charts, for incremental values. This means that for the chart to start showing values you need to send 2 values across 2 flush intervals.~~ + +~~(although this is required for incremental values, netdata allows mixing incremental and absolute values on the same charts, so this little limitation [i.e. 2 values to start visualization], is applied on all netdata dimensions).~~ + +(statsd metrics do not loose their first data collection due to interpolation anymore - fixed with [PR #2411](https://github.com/netdata/netdata/pull/2411)) + +## sending statsd metrics from shell scripts + +You can send/update statsd metrics from shell scripts. You can use this feature, to visualize in netdata automated jobs you run on your servers. + +The command you need to run is: + +```sh +echo "NAME:VALUE|TYPE" | nc -u --send-only localhost 8125 +``` + +Where: + +- `NAME` is the metric name +- `VALUE` is the value for that metric (**gauges** `|g`, **timers** `|ms` and **histograms** `|h` accept decimal/fractional numbers, **counters** `|c` and **meters** `|m` accept integers, **sets** `|s` accept anything) +- `TYPE` is one of `g`, `ms`, `h`, `c`, `m`, `s` to select the metric type. + +So, to set `metric1` as gauge to value `10`, use: + +```sh +echo "metric1:10|g" | nc -u --send-only localhost 8125 +``` + +To increment `metric2` by `10`, as a counter, use: + +```sh +echo "metric2:10|c" | nc -u --send-only localhost 8125 +``` + +You can send multiple metrics like this: + +```sh +# send multiple metrics via UDP +printf "metric1:10|g\nmetric2:10|c\n" | nc -u --send-only localhost 8125 +``` + +Remember, for UDP communication each packet should not exceed the MTU. So, if you plan to push too many metrics at once, prefer TCP communication: + +```sh +# send multiple metrics via TCP +printf "metric1:10|g\nmetric2:10|c\n" | nc --send-only localhost 8125 +``` + +You can also use this little function to take care of all the details: + +```sh +#!/usr/bin/env bash + +STATSD_HOST="localhost" +STATSD_PORT="8125" +statsd() { + local udp="-u" all="${*}" + + # if the string length of all parameters given is above 1000, use TCP + [ "${#all}" -gt 1000 ] && udp= + + while [ ! -z "${1}" ] + do + printf "${1}\n" + shift + done | nc ${udp} --send-only ${STATSD_HOST} ${STATSD_PORT} || return 1 + + return 0 +} +``` + +You can use it like this: + +```sh +# first, source it in your script +source statsd.sh + +# then, at any point: +statsd "metric1:10|g" "metric2:10|c" ... +``` + +The function is smart enough to call `nc` just once and pass all the metrics to it. It will also automatically switch to TCP if the metrics to send are above 1000 bytes. diff --git a/collectors/statsd.plugin/example.conf b/collectors/statsd.plugin/example.conf new file mode 100644 index 000000000..2c7de6c7b --- /dev/null +++ b/collectors/statsd.plugin/example.conf @@ -0,0 +1,64 @@ +# statsd synthetic charts configuration + +# You can add many .conf files in /etc/netdata/statsd.d/, +# one for each of your apps. + +# start a new app - you can add many apps in the same file +[app] + # give a name for this app + # this controls the main menu on the dashboard + # and will be the prefix for all charts of the app + name = myexampleapp + + # match all the metrics of the app + metrics = myexampleapp.* + + # shall private charts of these metrics be created? + private charts = no + + # shall gaps be shown when metrics are not collected? + gaps when not collected = no + + # the memory mode for the charts of this app: none|map|save + # the default is to use the global memory mode + #memory mode = ram + + # the history size for the charts of this app, in seconds + # the default is to use the global history + #history = 3600 + +# create a chart +# this is its id - the chart will be named myexampleapp.myexamplechart +[myexamplechart] + # a name for the chart, similar to the id (2 names for each chart) + name = myexamplechart + + # the chart title + title = my chart title + + # the submenu of the dashboard + family = my family + + # the context for alarm templates + context = chart.context + + # the units of the chart + units = tests/s + + # the sorting priority of the chart on the dashboard + priority = 91000 + + # the type of chart to create: line | area | stacked + type = area + + # one or more dimensions for the chart + # type = events | last | min | max | sum | average | percentile | median | stddev + # events = the number of events for this metric + # last = the last value collected + # all the others are only valid for histograms and timers + dimension = myexampleapp.metric1 avg average 1 1 + dimension = myexampleapp.metric1 lower min 1 1 + dimension = myexampleapp.metric1 upper max 1 1 + dimension = myexampleapp.metric2 other last 1 1 + +# You can add as many charts as needed diff --git a/collectors/statsd.plugin/statsd.c b/collectors/statsd.plugin/statsd.c new file mode 100644 index 000000000..c92bfd1c2 --- /dev/null +++ b/collectors/statsd.plugin/statsd.c @@ -0,0 +1,2556 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "statsd.h" + +#define STATSD_CHART_PREFIX "statsd" + +#define PLUGIN_STATSD_NAME "statsd.plugin" + +// -------------------------------------------------------------------------------------- + +// #define STATSD_MULTITHREADED 1 + +#ifdef STATSD_MULTITHREADED +// DO NOT ENABLE MULTITHREADING - IT IS NOT WELL TESTED +#define STATSD_AVL_TREE avl_tree_lock +#define STATSD_AVL_INSERT avl_insert_lock +#define STATSD_AVL_SEARCH avl_search_lock +#define STATSD_AVL_INDEX_INIT { .avl_tree = { NULL, statsd_metric_compare }, .rwlock = AVL_LOCK_INITIALIZER } +#define STATSD_FIRST_PTR_MUTEX netdata_mutex_t first_mutex +#define STATSD_FIRST_PTR_MUTEX_INIT .first_mutex = NETDATA_MUTEX_INITIALIZER +#define STATSD_FIRST_PTR_MUTEX_LOCK(index) netdata_mutex_lock(&((index)->first_mutex)) +#define STATSD_FIRST_PTR_MUTEX_UNLOCK(index) netdata_mutex_unlock(&((index)->first_mutex)) +#define STATSD_DICTIONARY_OPTIONS DICTIONARY_FLAG_DEFAULT +#else +#define STATSD_AVL_TREE avl_tree +#define STATSD_AVL_INSERT avl_insert +#define STATSD_AVL_SEARCH avl_search +#define STATSD_AVL_INDEX_INIT { .root = NULL, .compar = statsd_metric_compare } +#define STATSD_FIRST_PTR_MUTEX +#define STATSD_FIRST_PTR_MUTEX_INIT +#define STATSD_FIRST_PTR_MUTEX_LOCK(index) +#define STATSD_FIRST_PTR_MUTEX_UNLOCK(index) +#define STATSD_DICTIONARY_OPTIONS DICTIONARY_FLAG_SINGLE_THREADED +#endif + +#define STATSD_DECIMAL_DETAIL 1000 // floating point values get multiplied by this, with the same divisor + +// -------------------------------------------------------------------------------------------------------------------- +// data specific to each metric type + +typedef struct statsd_metric_gauge { + LONG_DOUBLE value; +} STATSD_METRIC_GAUGE; + +typedef struct statsd_metric_counter { // counter and meter + long long value; +} STATSD_METRIC_COUNTER; + +typedef struct statsd_histogram_extensions { + netdata_mutex_t mutex; + + // average is stored in metric->last + collected_number last_min; + collected_number last_max; + collected_number last_percentile; + collected_number last_median; + collected_number last_stddev; + collected_number last_sum; + + int zeroed; + + RRDDIM *rd_min; + RRDDIM *rd_max; + RRDDIM *rd_percentile; + RRDDIM *rd_median; + RRDDIM *rd_stddev; + RRDDIM *rd_sum; + + size_t size; + size_t used; + LONG_DOUBLE *values; // dynamic array of values collected +} STATSD_METRIC_HISTOGRAM_EXTENSIONS; + +typedef struct statsd_metric_histogram { // histogram and timer + STATSD_METRIC_HISTOGRAM_EXTENSIONS *ext; +} STATSD_METRIC_HISTOGRAM; + +typedef struct statsd_metric_set { + DICTIONARY *dict; + size_t unique; +} STATSD_METRIC_SET; + + +// -------------------------------------------------------------------------------------------------------------------- +// this is a metric - for all types of metrics + +typedef enum statsd_metric_options { + STATSD_METRIC_OPTION_NONE = 0x00000000, // no options set + STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED = 0x00000001, // do not update the chart dimension, when this metric is not collected + STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED = 0x00000002, // render a private chart for this metric + STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED = 0x00000004, // the metric has been checked if it should get private chart or not + STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT = 0x00000008, // show the count of events for this private chart + STATSD_METRIC_OPTION_CHECKED_IN_APPS = 0x00000010, // set when this metric has been checked against apps + STATSD_METRIC_OPTION_USED_IN_APPS = 0x00000020, // set when this metric is used in apps + STATSD_METRIC_OPTION_CHECKED = 0x00000040, // set when the charting thread checks this metric for use in charts (its usefulness) + STATSD_METRIC_OPTION_USEFUL = 0x00000080, // set when the charting thread finds the metric useful (i.e. used in a chart) +} STATS_METRIC_OPTIONS; + +typedef enum statsd_metric_type { + STATSD_METRIC_TYPE_GAUGE, + STATSD_METRIC_TYPE_COUNTER, + STATSD_METRIC_TYPE_METER, + STATSD_METRIC_TYPE_TIMER, + STATSD_METRIC_TYPE_HISTOGRAM, + STATSD_METRIC_TYPE_SET +} STATSD_METRIC_TYPE; + + +typedef struct statsd_metric { + avl avl; // indexing - has to be first + + const char *name; // the name of the metric + uint32_t hash; // hash of the name + + STATSD_METRIC_TYPE type; + + // metadata about data collection + collected_number events; // the number of times this metric has been collected (never resets) + size_t count; // the number of times this metric has been collected since the last flush + + // the actual collected data + union { + STATSD_METRIC_GAUGE gauge; + STATSD_METRIC_COUNTER counter; + STATSD_METRIC_HISTOGRAM histogram; + STATSD_METRIC_SET set; + }; + + // chart related members + STATS_METRIC_OPTIONS options; // STATSD_METRIC_OPTION_* (bitfield) + char reset; // set to 1 by the charting thread to instruct the collector thread(s) to reset this metric + collected_number last; // the last value sent to netdata + RRDSET *st; // the private chart of this metric + RRDDIM *rd_value; // the dimension of this metric value + RRDDIM *rd_count; // the dimension for the number of events received + + // linking, used for walking through all metrics + struct statsd_metric *next; + struct statsd_metric *next_useful; +} STATSD_METRIC; + + +// -------------------------------------------------------------------------------------------------------------------- +// each type of metric has its own index + +typedef struct statsd_index { + char *name; // the name of the index of metrics + size_t events; // the number of events processed for this index + size_t metrics; // the number of metrics in this index + size_t useful; // the number of useful metrics in this index + + STATSD_AVL_TREE index; // the AVL tree + + STATSD_METRIC *first; // the linked list of metrics (new metrics are added in front) + STATSD_METRIC *first_useful; // the linked list of useful metrics (new metrics are added in front) + STATSD_FIRST_PTR_MUTEX; // when mutli-threading is enabled, a lock to protect the linked list + + STATS_METRIC_OPTIONS default_options; // default options for all metrics in this index +} STATSD_INDEX; + +static int statsd_metric_compare(void* a, void* b); + +// -------------------------------------------------------------------------------------------------------------------- +// synthetic charts + +typedef enum statsd_app_chart_dimension_value_type { + STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS, + STATSD_APP_CHART_DIM_VALUE_TYPE_LAST, + STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE, + STATSD_APP_CHART_DIM_VALUE_TYPE_SUM, + STATSD_APP_CHART_DIM_VALUE_TYPE_MIN, + STATSD_APP_CHART_DIM_VALUE_TYPE_MAX, + STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE, + STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN, + STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV +} STATSD_APP_CHART_DIM_VALUE_TYPE; + +typedef struct statsd_app_chart_dimension { + const char *name; // the name of this dimension + const char *metric; // the source metric name of this dimension + uint32_t metric_hash; // hash for fast string comparisons + + SIMPLE_PATTERN *metric_pattern; // set when the 'metric' is a simple pattern + + collected_number multiplier; // the multipler of the dimension + collected_number divisor; // the divisor of the dimension + RRDDIM_FLAGS flags; // the RRDDIM flags for this dimension + + STATSD_APP_CHART_DIM_VALUE_TYPE value_type; // which value to use of the source metric + + RRDDIM *rd; // a pointer to the RRDDIM that has been created for this dimension + collected_number *value_ptr; // a pointer to the source metric value + RRD_ALGORITHM algorithm; // the algorithm of this dimension + + struct statsd_app_chart_dimension *next; // the next dimension for this chart +} STATSD_APP_CHART_DIM; + +typedef struct statsd_app_chart { + const char *source; + const char *id; + const char *name; + const char *title; + const char *family; + const char *context; + const char *units; + long priority; + RRDSET_TYPE chart_type; + STATSD_APP_CHART_DIM *dimensions; + size_t dimensions_count; + size_t dimensions_linked_count; + + RRDSET *st; + struct statsd_app_chart *next; +} STATSD_APP_CHART; + +typedef struct statsd_app { + const char *name; + SIMPLE_PATTERN *metrics; + STATS_METRIC_OPTIONS default_options; + RRD_MEMORY_MODE rrd_memory_mode; + DICTIONARY *dict; + long rrd_history_entries; + + const char *source; + STATSD_APP_CHART *charts; + struct statsd_app *next; +} STATSD_APP; + +// -------------------------------------------------------------------------------------------------------------------- +// global statsd data + +struct collection_thread_status { + int status; + size_t max_sockets; + + netdata_thread_t thread; + struct rusage rusage; + RRDSET *st_cpu; + RRDDIM *rd_user; + RRDDIM *rd_system; +}; + +static struct statsd { + STATSD_INDEX gauges; + STATSD_INDEX counters; + STATSD_INDEX timers; + STATSD_INDEX histograms; + STATSD_INDEX meters; + STATSD_INDEX sets; + size_t unknown_types; + size_t socket_errors; + size_t tcp_socket_connects; + size_t tcp_socket_disconnects; + size_t tcp_socket_connected; + size_t tcp_socket_reads; + size_t tcp_packets_received; + size_t tcp_bytes_read; + size_t udp_socket_reads; + size_t udp_packets_received; + size_t udp_bytes_read; + + int enabled; + int update_every; + SIMPLE_PATTERN *charts_for; + + size_t tcp_idle_timeout; + collected_number decimal_detail; + size_t private_charts; + size_t max_private_charts; + size_t max_private_charts_hard; + RRD_MEMORY_MODE private_charts_memory_mode; + long private_charts_rrd_history_entries; + unsigned int private_charts_hidden:1; + + STATSD_APP *apps; + size_t recvmmsg_size; + size_t histogram_increase_step; + double histogram_percentile; + char *histogram_percentile_str; + + int threads; + struct collection_thread_status *collection_threads_status; + + LISTEN_SOCKETS sockets; +} statsd = { + .enabled = 1, + .max_private_charts = 200, + .max_private_charts_hard = 1000, + .private_charts_hidden = 0, + .recvmmsg_size = 10, + .decimal_detail = STATSD_DECIMAL_DETAIL, + + .gauges = { + .name = "gauge", + .events = 0, + .metrics = 0, + .index = STATSD_AVL_INDEX_INIT, + .default_options = STATSD_METRIC_OPTION_NONE, + .first = NULL, + STATSD_FIRST_PTR_MUTEX_INIT + }, + .counters = { + .name = "counter", + .events = 0, + .metrics = 0, + .index = STATSD_AVL_INDEX_INIT, + .default_options = STATSD_METRIC_OPTION_NONE, + .first = NULL, + STATSD_FIRST_PTR_MUTEX_INIT + }, + .timers = { + .name = "timer", + .events = 0, + .metrics = 0, + .index = STATSD_AVL_INDEX_INIT, + .default_options = STATSD_METRIC_OPTION_NONE, + .first = NULL, + STATSD_FIRST_PTR_MUTEX_INIT + }, + .histograms = { + .name = "histogram", + .events = 0, + .metrics = 0, + .index = STATSD_AVL_INDEX_INIT, + .default_options = STATSD_METRIC_OPTION_NONE, + .first = NULL, + STATSD_FIRST_PTR_MUTEX_INIT + }, + .meters = { + .name = "meter", + .events = 0, + .metrics = 0, + .index = STATSD_AVL_INDEX_INIT, + .default_options = STATSD_METRIC_OPTION_NONE, + .first = NULL, + STATSD_FIRST_PTR_MUTEX_INIT + }, + .sets = { + .name = "set", + .events = 0, + .metrics = 0, + .index = STATSD_AVL_INDEX_INIT, + .default_options = STATSD_METRIC_OPTION_NONE, + .first = NULL, + STATSD_FIRST_PTR_MUTEX_INIT + }, + + .tcp_idle_timeout = 600, + + .apps = NULL, + .histogram_percentile = 95.0, + .histogram_increase_step = 10, + .threads = 0, + .collection_threads_status = NULL, + .sockets = { + .config = &netdata_config, + .config_section = CONFIG_SECTION_STATSD, + .default_bind_to = "udp:localhost tcp:localhost", + .default_port = STATSD_LISTEN_PORT, + .backlog = STATSD_LISTEN_BACKLOG + }, +}; + + +// -------------------------------------------------------------------------------------------------------------------- +// statsd index management - add/find metrics + +static int statsd_metric_compare(void* a, void* b) { + if(((STATSD_METRIC *)a)->hash < ((STATSD_METRIC *)b)->hash) return -1; + else if(((STATSD_METRIC *)a)->hash > ((STATSD_METRIC *)b)->hash) return 1; + else return strcmp(((STATSD_METRIC *)a)->name, ((STATSD_METRIC *)b)->name); +} + +static inline STATSD_METRIC *statsd_metric_index_find(STATSD_INDEX *index, const char *name, uint32_t hash) { + STATSD_METRIC tmp; + tmp.name = name; + tmp.hash = (hash)?hash:simple_hash(tmp.name); + + return (STATSD_METRIC *)STATSD_AVL_SEARCH(&index->index, (avl *)&tmp); +} + +static inline STATSD_METRIC *statsd_find_or_add_metric(STATSD_INDEX *index, const char *name, STATSD_METRIC_TYPE type) { + debug(D_STATSD, "searching for metric '%s' under '%s'", name, index->name); + + uint32_t hash = simple_hash(name); + + STATSD_METRIC *m = statsd_metric_index_find(index, name, hash); + if(unlikely(!m)) { + debug(D_STATSD, "Creating new %s metric '%s'", index->name, name); + + m = (STATSD_METRIC *)callocz(sizeof(STATSD_METRIC), 1); + m->name = strdupz(name); + m->hash = hash; + m->type = type; + m->options = index->default_options; + + if(type == STATSD_METRIC_TYPE_HISTOGRAM || type == STATSD_METRIC_TYPE_TIMER) { + m->histogram.ext = callocz(sizeof(STATSD_METRIC_HISTOGRAM_EXTENSIONS), 1); + netdata_mutex_init(&m->histogram.ext->mutex); + } + STATSD_METRIC *n = (STATSD_METRIC *)STATSD_AVL_INSERT(&index->index, (avl *)m); + if(unlikely(n != m)) { + freez((void *)m->histogram.ext); + freez((void *)m->name); + freez((void *)m); + m = n; + } + else { + STATSD_FIRST_PTR_MUTEX_LOCK(index); + index->metrics++; + m->next = index->first; + index->first = m; + STATSD_FIRST_PTR_MUTEX_UNLOCK(index); + } + } + + index->events++; + return m; +} + + +// -------------------------------------------------------------------------------------------------------------------- +// statsd parsing numbers + +static inline LONG_DOUBLE statsd_parse_float(const char *v, LONG_DOUBLE def) { + LONG_DOUBLE value; + + if(likely(v && *v)) { + char *e = NULL; + value = str2ld(v, &e); + if(unlikely(e && *e)) + error("STATSD: excess data '%s' after value '%s'", e, v); + } + else + value = def; + + return value; +} + +static inline LONG_DOUBLE statsd_parse_sampling_rate(const char *v) { + LONG_DOUBLE sampling_rate = statsd_parse_float(v, 1.0); + if(unlikely(isless(sampling_rate, 0.001))) sampling_rate = 0.001; + if(unlikely(isgreater(sampling_rate, 1.0))) sampling_rate = 1.0; + return sampling_rate; +} + +static inline long long statsd_parse_int(const char *v, long long def) { + long long value; + + if(likely(v && *v)) { + char *e = NULL; + value = str2ll(v, &e); + if(unlikely(e && *e)) + error("STATSD: excess data '%s' after value '%s'", e, v); + } + else + value = def; + + return value; +} + + +// -------------------------------------------------------------------------------------------------------------------- +// statsd processors per metric type + +static inline void statsd_reset_metric(STATSD_METRIC *m) { + m->reset = 0; + m->count = 0; +} + +static inline int value_is_zinit(const char *value) { + return (value && *value == 'z' && *++value == 'i' && *++value == 'n' && *++value == 'i' && *++value == 't' && *++value == '\0'); +} + +#define is_metric_checked(m) ((m)->options & STATSD_METRIC_OPTION_CHECKED) +#define is_metric_useful_for_collection(m) (!is_metric_checked(m) || ((m)->options & STATSD_METRIC_OPTION_USEFUL)) + +static inline void statsd_process_gauge(STATSD_METRIC *m, const char *value, const char *sampling) { + if(!is_metric_useful_for_collection(m)) return; + + if(unlikely(!value || !*value)) { + error("STATSD: metric '%s' of type gauge, with empty value is ignored.", m->name); + return; + } + + if(unlikely(m->reset)) { + // no need to reset anything specific for gauges + statsd_reset_metric(m); + } + + if(unlikely(value_is_zinit(value))) { + // magic loading of metric, without affecting anything + } + else { + if (unlikely(*value == '+' || *value == '-')) + m->gauge.value += statsd_parse_float(value, 1.0) / statsd_parse_sampling_rate(sampling); + else + m->gauge.value = statsd_parse_float(value, 1.0); + + m->events++; + m->count++; + } +} + +static inline void statsd_process_counter_or_meter(STATSD_METRIC *m, const char *value, const char *sampling) { + if(!is_metric_useful_for_collection(m)) return; + + // we accept empty values for counters + + if(unlikely(m->reset)) statsd_reset_metric(m); + + if(unlikely(value_is_zinit(value))) { + // magic loading of metric, without affecting anything + } + else { + m->counter.value += llrintl((LONG_DOUBLE) statsd_parse_int(value, 1) / statsd_parse_sampling_rate(sampling)); + + m->events++; + m->count++; + } +} + +#define statsd_process_counter(m, value, sampling) statsd_process_counter_or_meter(m, value, sampling) +#define statsd_process_meter(m, value, sampling) statsd_process_counter_or_meter(m, value, sampling) + +static inline void statsd_process_histogram_or_timer(STATSD_METRIC *m, const char *value, const char *sampling, const char *type) { + if(!is_metric_useful_for_collection(m)) return; + + if(unlikely(!value || !*value)) { + error("STATSD: metric of type %s, with empty value is ignored.", type); + return; + } + + if(unlikely(m->reset)) { + m->histogram.ext->used = 0; + statsd_reset_metric(m); + } + + if(unlikely(value_is_zinit(value))) { + // magic loading of metric, without affecting anything + } + else { + LONG_DOUBLE v = statsd_parse_float(value, 1.0); + LONG_DOUBLE sampling_rate = statsd_parse_sampling_rate(sampling); + if(unlikely(isless(sampling_rate, 0.01))) sampling_rate = 0.01; + if(unlikely(isgreater(sampling_rate, 1.0))) sampling_rate = 1.0; + + long long samples = llrintl(1.0 / sampling_rate); + while(samples-- > 0) { + + if(unlikely(m->histogram.ext->used == m->histogram.ext->size)) { + netdata_mutex_lock(&m->histogram.ext->mutex); + m->histogram.ext->size += statsd.histogram_increase_step; + m->histogram.ext->values = reallocz(m->histogram.ext->values, sizeof(LONG_DOUBLE) * m->histogram.ext->size); + netdata_mutex_unlock(&m->histogram.ext->mutex); + } + + m->histogram.ext->values[m->histogram.ext->used++] = v; + } + + m->events++; + m->count++; + } +} + +#define statsd_process_timer(m, value, sampling) statsd_process_histogram_or_timer(m, value, sampling, "timer") +#define statsd_process_histogram(m, value, sampling) statsd_process_histogram_or_timer(m, value, sampling, "histogram") + +static inline void statsd_process_set(STATSD_METRIC *m, const char *value) { + if(!is_metric_useful_for_collection(m)) return; + + if(unlikely(!value || !*value)) { + error("STATSD: metric of type set, with empty value is ignored."); + return; + } + + if(unlikely(m->reset)) { + if(likely(m->set.dict)) { + dictionary_destroy(m->set.dict); + m->set.dict = NULL; + } + statsd_reset_metric(m); + } + + if (unlikely(!m->set.dict)) { + m->set.dict = dictionary_create(STATSD_DICTIONARY_OPTIONS | DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE); + m->set.unique = 0; + } + + if(unlikely(value_is_zinit(value))) { + // magic loading of metric, without affecting anything + } + else { + void *t = dictionary_get(m->set.dict, value); + if (unlikely(!t)) { + dictionary_set(m->set.dict, value, NULL, 1); + m->set.unique++; + } + + m->events++; + m->count++; + } +} + + +// -------------------------------------------------------------------------------------------------------------------- +// statsd parsing + +static void statsd_process_metric(const char *name, const char *value, const char *type, const char *sampling, const char *tags) { + (void)tags; + + debug(D_STATSD, "STATSD: raw metric '%s', value '%s', type '%s', sampling '%s', tags '%s'", name?name:"(null)", value?value:"(null)", type?type:"(null)", sampling?sampling:"(null)", tags?tags:"(null)"); + + if(unlikely(!name || !*name)) return; + if(unlikely(!type || !*type)) type = "m"; + + char t0 = type[0], t1 = type[1]; + + if(unlikely(t0 == 'g' && t1 == '\0')) { + statsd_process_gauge( + statsd_find_or_add_metric(&statsd.gauges, name, STATSD_METRIC_TYPE_GAUGE), + value, sampling); + } + else if(unlikely((t0 == 'c' || t0 == 'C') && t1 == '\0')) { + // etsy/statsd uses 'c' + // brubeck uses 'C' + statsd_process_counter( + statsd_find_or_add_metric(&statsd.counters, name, STATSD_METRIC_TYPE_COUNTER), + value, sampling); + } + else if(unlikely(t0 == 'm' && t1 == '\0')) { + statsd_process_meter( + statsd_find_or_add_metric(&statsd.meters, name, STATSD_METRIC_TYPE_METER), + value, sampling); + } + else if(unlikely(t0 == 'h' && t1 == '\0')) { + statsd_process_histogram( + statsd_find_or_add_metric(&statsd.histograms, name, STATSD_METRIC_TYPE_HISTOGRAM), + value, sampling); + } + else if(unlikely(t0 == 's' && t1 == '\0')) { + statsd_process_set( + statsd_find_or_add_metric(&statsd.sets, name, STATSD_METRIC_TYPE_SET), + value); + } + else if(unlikely(t0 == 'm' && t1 == 's' && type[2] == '\0')) { + statsd_process_timer( + statsd_find_or_add_metric(&statsd.timers, name, STATSD_METRIC_TYPE_TIMER), + value, sampling); + } + else { + statsd.unknown_types++; + error("STATSD: metric '%s' with value '%s' is sent with unknown metric type '%s'", name, value?value:"", type); + } +} + +static inline const char *statsd_parse_skip_up_to(const char *s, char d1, char d2) { + char c; + + for(c = *s; c && c != d1 && c != d2 && c != '\r' && c != '\n'; c = *++s) ; + + return s; +} + +const char *statsd_parse_skip_spaces(const char *s) { + char c; + + for(c = *s; c && ( c == ' ' || c == '\t' || c == '\r' || c == '\n' ); c = *++s) ; + + return s; +} + +static inline const char *statsd_parse_field_trim(const char *start, char *end) { + if(unlikely(!start)) { + start = end; + return start; + } + + while(start <= end && (*start == ' ' || *start == '\t')) + start++; + + *end = '\0'; + end--; + while(end >= start && (*end == ' ' || *end == '\t')) + *end-- = '\0'; + + return start; +} + +static inline size_t statsd_process(char *buffer, size_t size, int require_newlines) { + buffer[size] = '\0'; + debug(D_STATSD, "RECEIVED: %zu bytes: '%s'", size, buffer); + + const char *s = buffer; + while(*s) { + const char *name = NULL, *value = NULL, *type = NULL, *sampling = NULL, *tags = NULL; + char *name_end = NULL, *value_end = NULL, *type_end = NULL, *sampling_end = NULL, *tags_end = NULL; + + s = name_end = (char *)statsd_parse_skip_up_to(name = s, ':', '|'); + if(name == name_end) { + s = statsd_parse_skip_spaces(s); + continue; + } + + if(likely(*s == ':')) + s = value_end = (char *) statsd_parse_skip_up_to(value = ++s, '|', '|'); + + if(likely(*s == '|')) + s = type_end = (char *) statsd_parse_skip_up_to(type = ++s, '|', '@'); + + if(likely(*s == '|' || *s == '@')) { + s = sampling_end = (char *) statsd_parse_skip_up_to(sampling = ++s, '|', '#'); + if(*sampling == '@') sampling++; + } + + if(likely(*s == '|' || *s == '#')) { + s = tags_end = (char *) statsd_parse_skip_up_to(tags = ++s, '|', '|'); + if(*tags == '#') tags++; + } + + // skip everything until the end of the line + while(*s && *s != '\n') s++; + + if(unlikely(require_newlines && *s != '\n' && s > buffer)) { + // move the remaining data to the beginning + size -= (name - buffer); + memmove(buffer, name, size); + return size; + } + else + s = statsd_parse_skip_spaces(s); + + statsd_process_metric( + statsd_parse_field_trim(name, name_end) + , statsd_parse_field_trim(value, value_end) + , statsd_parse_field_trim(type, type_end) + , statsd_parse_field_trim(sampling, sampling_end) + , statsd_parse_field_trim(tags, tags_end) + ); + } + + return 0; +} + + +// -------------------------------------------------------------------------------------------------------------------- +// statsd pollfd interface + +#define STATSD_TCP_BUFFER_SIZE 65536 // minimize tcp reads +#define STATSD_UDP_BUFFER_SIZE 9000 // this should be up to MTU + +typedef enum { + STATSD_SOCKET_DATA_TYPE_TCP, + STATSD_SOCKET_DATA_TYPE_UDP +} STATSD_SOCKET_DATA_TYPE; + +struct statsd_tcp { + STATSD_SOCKET_DATA_TYPE type; + size_t size; + size_t len; + char buffer[]; +}; + +#ifdef HAVE_RECVMMSG +struct statsd_udp { + int *running; + STATSD_SOCKET_DATA_TYPE type; + size_t size; + struct iovec *iovecs; + struct mmsghdr *msgs; +}; +#else +struct statsd_udp { + int *running; + STATSD_SOCKET_DATA_TYPE type; + char buffer[STATSD_UDP_BUFFER_SIZE]; +}; +#endif + +// new TCP client connected +static void *statsd_add_callback(POLLINFO *pi, short int *events, void *data) { + (void)pi; + (void)data; + + *events = POLLIN; + + struct statsd_tcp *t = (struct statsd_tcp *)callocz(sizeof(struct statsd_tcp) + STATSD_TCP_BUFFER_SIZE, 1); + t->type = STATSD_SOCKET_DATA_TYPE_TCP; + t->size = STATSD_TCP_BUFFER_SIZE - 1; + statsd.tcp_socket_connects++; + statsd.tcp_socket_connected++; + + return t; +} + +// TCP client disconnected +static void statsd_del_callback(POLLINFO *pi) { + struct statsd_tcp *t = pi->data; + + if(likely(t)) { + if(t->type == STATSD_SOCKET_DATA_TYPE_TCP) { + if(t->len != 0) { + statsd.socket_errors++; + error("STATSD: client is probably sending unterminated metrics. Closed socket left with '%s'. Trying to process it.", t->buffer); + statsd_process(t->buffer, t->len, 0); + } + statsd.tcp_socket_disconnects++; + statsd.tcp_socket_connected--; + } + else + error("STATSD: internal error: received socket data type is %d, but expected %d", (int)t->type, (int)STATSD_SOCKET_DATA_TYPE_TCP); + + freez(t); + } +} + +// Receive data +static int statsd_rcv_callback(POLLINFO *pi, short int *events) { + *events = POLLIN; + + int fd = pi->fd; + + switch(pi->socktype) { + case SOCK_STREAM: { + struct statsd_tcp *d = (struct statsd_tcp *)pi->data; + if(unlikely(!d)) { + error("STATSD: internal error: expected TCP data pointer is NULL"); + statsd.socket_errors++; + return -1; + } + +#ifdef NETDATA_INTERNAL_CHECKS + if(unlikely(d->type != STATSD_SOCKET_DATA_TYPE_TCP)) { + error("STATSD: internal error: socket data type should be %d, but it is %d", (int)STATSD_SOCKET_DATA_TYPE_TCP, (int)d->type); + statsd.socket_errors++; + return -1; + } +#endif + + int ret = 0; + ssize_t rc; + do { + rc = recv(fd, &d->buffer[d->len], d->size - d->len, MSG_DONTWAIT); + if (rc < 0) { + // read failed + if (errno != EWOULDBLOCK && errno != EAGAIN && errno != EINTR) { + error("STATSD: recv() on TCP socket %d failed.", fd); + statsd.socket_errors++; + ret = -1; + } + } + else if (!rc) { + // connection closed + debug(D_STATSD, "STATSD: client disconnected."); + ret = -1; + } + else { + // data received + d->len += rc; + statsd.tcp_socket_reads++; + statsd.tcp_bytes_read += rc; + } + + if(likely(d->len > 0)) { + statsd.tcp_packets_received++; + d->len = statsd_process(d->buffer, d->len, 1); + } + + if(unlikely(ret == -1)) + return -1; + + } while (rc != -1); + break; + } + + case SOCK_DGRAM: { + struct statsd_udp *d = (struct statsd_udp *)pi->data; + if(unlikely(!d)) { + error("STATSD: internal error: expected UDP data pointer is NULL"); + statsd.socket_errors++; + return -1; + } + +#ifdef NETDATA_INTERNAL_CHECKS + if(unlikely(d->type != STATSD_SOCKET_DATA_TYPE_UDP)) { + error("STATSD: internal error: socket data should be %d, but it is %d", (int)d->type, (int)STATSD_SOCKET_DATA_TYPE_UDP); + statsd.socket_errors++; + return -1; + } +#endif + +#ifdef HAVE_RECVMMSG + ssize_t rc; + do { + rc = recvmmsg(fd, d->msgs, (unsigned int)d->size, MSG_DONTWAIT, NULL); + if (rc < 0) { + // read failed + if (errno != EWOULDBLOCK && errno != EAGAIN && errno != EINTR) { + error("STATSD: recvmmsg() on UDP socket %d failed.", fd); + statsd.socket_errors++; + return -1; + } + } else if (rc) { + // data received + statsd.udp_socket_reads++; + statsd.udp_packets_received += rc; + + size_t i; + for (i = 0; i < (size_t)rc; ++i) { + size_t len = (size_t)d->msgs[i].msg_len; + statsd.udp_bytes_read += len; + statsd_process(d->msgs[i].msg_hdr.msg_iov->iov_base, len, 0); + } + } + } while (rc != -1); + +#else // !HAVE_RECVMMSG + ssize_t rc; + do { + rc = recv(fd, d->buffer, STATSD_UDP_BUFFER_SIZE - 1, MSG_DONTWAIT); + if (rc < 0) { + // read failed + if (errno != EWOULDBLOCK && errno != EAGAIN && errno != EINTR) { + error("STATSD: recv() on UDP socket %d failed.", fd); + statsd.socket_errors++; + return -1; + } + } else if (rc) { + // data received + statsd.udp_socket_reads++; + statsd.udp_packets_received++; + statsd.udp_bytes_read += rc; + statsd_process(d->buffer, (size_t) rc, 0); + } + } while (rc != -1); +#endif + + break; + } + + default: { + error("STATSD: internal error: unknown socktype %d on socket %d", pi->socktype, fd); + statsd.socket_errors++; + return -1; + } + } + + return 0; +} + +static int statsd_snd_callback(POLLINFO *pi, short int *events) { + (void)pi; + (void)events; + + error("STATSD: snd_callback() called, but we never requested to send data to statsd clients."); + return -1; +} + +static void statsd_timer_callback(void *timer_data) { + struct collection_thread_status *status = timer_data; + getrusage(RUSAGE_THREAD, &status->rusage); +} + +// -------------------------------------------------------------------------------------------------------------------- +// statsd child thread to collect metrics from network + +void statsd_collector_thread_cleanup(void *data) { + struct statsd_udp *d = data; + *d->running = 0; + + info("cleaning up..."); + +#ifdef HAVE_RECVMMSG + size_t i; + for (i = 0; i < d->size; i++) + freez(d->iovecs[i].iov_base); + + freez(d->iovecs); + freez(d->msgs); +#endif + + freez(d); +} + +void *statsd_collector_thread(void *ptr) { + struct collection_thread_status *status = ptr; + status->status = 1; + + info("STATSD collector thread started with taskid %d", gettid()); + + struct statsd_udp *d = callocz(sizeof(struct statsd_udp), 1); + d->running = &status->status; + + netdata_thread_cleanup_push(statsd_collector_thread_cleanup, d); + +#ifdef HAVE_RECVMMSG + d->type = STATSD_SOCKET_DATA_TYPE_UDP; + d->size = statsd.recvmmsg_size; + d->iovecs = callocz(sizeof(struct iovec), d->size); + d->msgs = callocz(sizeof(struct mmsghdr), d->size); + + size_t i; + for (i = 0; i < d->size; i++) { + d->iovecs[i].iov_base = mallocz(STATSD_UDP_BUFFER_SIZE); + d->iovecs[i].iov_len = STATSD_UDP_BUFFER_SIZE - 1; + d->msgs[i].msg_hdr.msg_iov = &d->iovecs[i]; + d->msgs[i].msg_hdr.msg_iovlen = 1; + } +#endif + + poll_events(&statsd.sockets + , statsd_add_callback + , statsd_del_callback + , statsd_rcv_callback + , statsd_snd_callback + , statsd_timer_callback + , NULL + , (void *)d + , 0 // tcp request timeout, 0 = disabled + , statsd.tcp_idle_timeout // tcp idle timeout, 0 = disabled + , statsd.update_every * 1000 + , ptr // timer_data + , status->max_sockets + ); + + netdata_thread_cleanup_pop(1); + return NULL; +} + + +// -------------------------------------------------------------------------------------------------------------------- +// statsd applications configuration files parsing + +#define STATSD_CONF_LINE_MAX 8192 + +static STATSD_APP_CHART_DIM_VALUE_TYPE string2valuetype(const char *type, size_t line, const char *filename) { + if(!type || !*type) type = "last"; + + if(!strcmp(type, "events")) return STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS; + else if(!strcmp(type, "last")) return STATSD_APP_CHART_DIM_VALUE_TYPE_LAST; + else if(!strcmp(type, "min")) return STATSD_APP_CHART_DIM_VALUE_TYPE_MIN; + else if(!strcmp(type, "max")) return STATSD_APP_CHART_DIM_VALUE_TYPE_MAX; + else if(!strcmp(type, "sum")) return STATSD_APP_CHART_DIM_VALUE_TYPE_SUM; + else if(!strcmp(type, "average")) return STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE; + else if(!strcmp(type, "median")) return STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN; + else if(!strcmp(type, "stddev")) return STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV; + else if(!strcmp(type, "percentile")) return STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE; + + error("STATSD: invalid type '%s' at line %zu of file '%s'. Using 'last'.", type, line, filename); + return STATSD_APP_CHART_DIM_VALUE_TYPE_LAST; +} + +static const char *valuetype2string(STATSD_APP_CHART_DIM_VALUE_TYPE type) { + switch(type) { + case STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS: return "events"; + case STATSD_APP_CHART_DIM_VALUE_TYPE_LAST: return "last"; + case STATSD_APP_CHART_DIM_VALUE_TYPE_MIN: return "min"; + case STATSD_APP_CHART_DIM_VALUE_TYPE_MAX: return "max"; + case STATSD_APP_CHART_DIM_VALUE_TYPE_SUM: return "sum"; + case STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE: return "average"; + case STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN: return "median"; + case STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV: return "stddev"; + case STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE: return "percentile"; + } + + return "unknown"; +} + +static STATSD_APP_CHART_DIM *add_dimension_to_app_chart( + STATSD_APP *app + , STATSD_APP_CHART *chart + , const char *metric_name + , const char *dim_name + , collected_number multiplier + , collected_number divisor + , RRDDIM_FLAGS flags + , STATSD_APP_CHART_DIM_VALUE_TYPE value_type +) { + STATSD_APP_CHART_DIM *dim = callocz(sizeof(STATSD_APP_CHART_DIM), 1); + + dim->metric = strdupz(metric_name); + dim->metric_hash = simple_hash(dim->metric); + + dim->name = strdupz((dim_name)?dim_name:""); + dim->multiplier = multiplier; + dim->divisor = divisor; + dim->value_type = value_type; + dim->flags = flags; + + if(!dim->multiplier) + dim->multiplier = 1; + + if(!dim->divisor) + dim->divisor = 1; + + // append it to the list of dimension + STATSD_APP_CHART_DIM *tdim; + for(tdim = chart->dimensions; tdim && tdim->next ; tdim = tdim->next) ; + if(!tdim) { + dim->next = chart->dimensions; + chart->dimensions = dim; + } + else { + dim->next = tdim->next; + tdim->next = dim; + } + chart->dimensions_count++; + + debug(D_STATSD, "Added dimension '%s' to chart '%s' of app '%s', for metric '%s', with type %u, multiplier " COLLECTED_NUMBER_FORMAT ", divisor " COLLECTED_NUMBER_FORMAT, + dim->name, chart->id, app->name, dim->metric, dim->value_type, dim->multiplier, dim->divisor); + + return dim; +} + +static int statsd_readfile(const char *filename, STATSD_APP *app, STATSD_APP_CHART *chart, DICTIONARY *dict) { + debug(D_STATSD, "STATSD configuration reading file '%s'", filename); + + char *buffer = mallocz(STATSD_CONF_LINE_MAX + 1); + + FILE *fp = fopen(filename, "r"); + if(!fp) { + error("STATSD: cannot open file '%s'.", filename); + freez(buffer); + return -1; + } + + size_t line = 0; + char *s; + while(fgets(buffer, STATSD_CONF_LINE_MAX, fp) != NULL) { + buffer[STATSD_CONF_LINE_MAX] = '\0'; + line++; + + s = trim(buffer); + if (!s || *s == '#') { + debug(D_STATSD, "STATSD: ignoring line %zu of file '%s', it is empty.", line, filename); + continue; + } + + debug(D_STATSD, "STATSD: processing line %zu of file '%s': %s", line, filename, buffer); + + if(*s == 'i' && strncmp(s, "include", 7) == 0) { + s = trim(&s[7]); + if(s && *s) { + char *tmp; + if(*s == '/') + tmp = strdupz(s); + else { + // the file to be included is relative to current file + // find the directory name from the file we already read + char *filename2 = strdupz(filename); // copy filename, since dirname() will change it + char *dir = dirname(filename2); // find the directory part of the filename + tmp = strdupz_path_subpath(dir, s); // compose the new filename to read; + freez(filename2); // free the filename we copied + } + statsd_readfile(tmp, app, chart, dict); + freez(tmp); + } + else + error("STATSD: ignoring line %zu of file '%s', include filename is empty", line, filename); + + continue; + } + + int len = (int) strlen(s); + if (*s == '[' && s[len - 1] == ']') { + // new section + s[len - 1] = '\0'; + s++; + + if (!strcmp(s, "app")) { + // a new app + app = callocz(sizeof(STATSD_APP), 1); + app->name = strdupz("unnamed"); + app->rrd_memory_mode = localhost->rrd_memory_mode; + app->rrd_history_entries = localhost->rrd_history_entries; + + app->next = statsd.apps; + statsd.apps = app; + chart = NULL; + dict = NULL; + + { + char lineandfile[FILENAME_MAX + 1]; + snprintfz(lineandfile, FILENAME_MAX, "%zu@%s", line, filename); + app->source = strdupz(lineandfile); + } + } + else if(app) { + if(!strcmp(s, "dictionary")) { + if(!app->dict) + app->dict = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED); + + dict = app->dict; + } + else { + dict = NULL; + + // a new chart + chart = callocz(sizeof(STATSD_APP_CHART), 1); + netdata_fix_chart_id(s); + chart->id = strdupz(s); + chart->name = strdupz(s); + chart->title = strdupz("Statsd chart"); + chart->context = strdupz(s); + chart->family = strdupz("overview"); + chart->units = strdupz("value"); + chart->priority = NETDATA_CHART_PRIO_STATSD_PRIVATE; + chart->chart_type = RRDSET_TYPE_LINE; + + chart->next = app->charts; + app->charts = chart; + + { + char lineandfile[FILENAME_MAX + 1]; + snprintfz(lineandfile, FILENAME_MAX, "%zu@%s", line, filename); + chart->source = strdupz(lineandfile); + } + } + } + else + error("STATSD: ignoring line %zu ('%s') of file '%s', [app] is not defined.", line, s, filename); + + continue; + } + + if(!app) { + error("STATSD: ignoring line %zu ('%s') of file '%s', it is outside all sections.", line, s, filename); + continue; + } + + char *name = s; + char *value = strchr(s, '='); + if(!value) { + error("STATSD: ignoring line %zu ('%s') of file '%s', there is no = in it.", line, s, filename); + continue; + } + *value = '\0'; + value++; + + name = trim(name); + value = trim(value); + + if(!name || *name == '#') { + error("STATSD: ignoring line %zu of file '%s', name is empty.", line, filename); + continue; + } + if(!value) { + debug(D_CONFIG, "STATSD: ignoring line %zu of file '%s', value is empty.", line, filename); + continue; + } + + if(unlikely(dict)) { + // parse [dictionary] members + + dictionary_set(dict, name, value, strlen(value) + 1); + } + else if(!chart) { + // parse [app] members + + if(!strcmp(name, "name")) { + freez((void *)app->name); + netdata_fix_chart_name(value); + app->name = strdupz(value); + } + else if (!strcmp(name, "metrics")) { + simple_pattern_free(app->metrics); + app->metrics = simple_pattern_create(value, NULL, SIMPLE_PATTERN_EXACT); + } + else if (!strcmp(name, "private charts")) { + if (!strcmp(value, "yes") || !strcmp(value, "on")) + app->default_options |= STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED; + else + app->default_options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED; + } + else if (!strcmp(name, "gaps when not collected")) { + if (!strcmp(value, "yes") || !strcmp(value, "on")) + app->default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED; + } + else if (!strcmp(name, "memory mode")) { + app->rrd_memory_mode = rrd_memory_mode_id(value); + } + else if (!strcmp(name, "history")) { + app->rrd_history_entries = atol(value); + if (app->rrd_history_entries < 5) + app->rrd_history_entries = 5; + } + else { + error("STATSD: ignoring line %zu ('%s') of file '%s'. Unknown keyword for the [app] section.", line, name, filename); + continue; + } + } + else { + // parse [chart] members + + if(!strcmp(name, "name")) { + freez((void *)chart->name); + netdata_fix_chart_id(value); + chart->name = strdupz(value); + } + else if(!strcmp(name, "title")) { + freez((void *)chart->title); + chart->title = strdupz(value); + } + else if (!strcmp(name, "family")) { + freez((void *)chart->family); + chart->family = strdupz(value); + } + else if (!strcmp(name, "context")) { + freez((void *)chart->context); + netdata_fix_chart_id(value); + chart->context = strdupz(value); + } + else if (!strcmp(name, "units")) { + freez((void *)chart->units); + chart->units = strdupz(value); + } + else if (!strcmp(name, "priority")) { + chart->priority = atol(value); + } + else if (!strcmp(name, "type")) { + chart->chart_type = rrdset_type_id(value); + } + else if (!strcmp(name, "dimension")) { + // metric [name [type [multiplier [divisor]]]] + char *words[10]; + pluginsd_split_words(value, words, 10); + + int pattern = 0; + size_t i = 0; + char *metric_name = words[i++]; + + if(strcmp(metric_name, "pattern") == 0) { + metric_name = words[i++]; + pattern = 1; + } + + char *dim_name = words[i++]; + char *type = words[i++]; + char *multipler = words[i++]; + char *divisor = words[i++]; + char *options = words[i++]; + + RRDDIM_FLAGS flags = RRDDIM_FLAG_NONE; + if(options && *options) { + if(strstr(options, "hidden") != NULL) flags |= RRDDIM_FLAG_HIDDEN; + if(strstr(options, "noreset") != NULL) flags |= RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS; + if(strstr(options, "nooverflow") != NULL) flags |= RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS; + } + + if(!pattern) { + if(app->dict) { + if(dim_name && *dim_name) { + char *n = dictionary_get(app->dict, dim_name); + if(n) dim_name = n; + } + else { + dim_name = dictionary_get(app->dict, metric_name); + } + } + + if(!dim_name || !*dim_name) + dim_name = metric_name; + } + + STATSD_APP_CHART_DIM *dim = add_dimension_to_app_chart( + app + , chart + , metric_name + , dim_name + , (multipler && *multipler)?str2l(multipler):1 + , (divisor && *divisor)?str2l(divisor):1 + , flags + , string2valuetype(type, line, filename) + ); + + if(pattern) + dim->metric_pattern = simple_pattern_create(dim->metric, NULL, SIMPLE_PATTERN_EXACT); + } + else { + error("STATSD: ignoring line %zu ('%s') of file '%s'. Unknown keyword for the [%s] section.", line, name, filename, chart->id); + continue; + } + } + } + + freez(buffer); + fclose(fp); + return 0; +} + +static int statsd_file_callback(const char *filename, void *data) { + (void)data; + return statsd_readfile(filename, NULL, NULL, NULL); +} + +static inline void statsd_readdir(const char *user_path, const char *stock_path, const char *subpath) { + recursive_config_double_dir_load(user_path, stock_path, subpath, statsd_file_callback, NULL, 0); +} + +// -------------------------------------------------------------------------------------------------------------------- +// send metrics to netdata - in private charts - called from the main thread + +// extract chart type and chart id from metric name +static inline void statsd_get_metric_type_and_id(STATSD_METRIC *m, char *type, char *id, const char *defid, size_t len) { + char *s; + + snprintfz(type, len, "%s_%s_%s", STATSD_CHART_PREFIX, defid, m->name); + for(s = type; *s ;s++) + if(unlikely(*s == '.')) break; + + if(*s == '.') { + *s++ = '\0'; + strncpyz(id, s, len); + } + else { + strncpyz(id, defid, len); + } + + netdata_fix_chart_id(type); + netdata_fix_chart_id(id); +} + +static inline RRDSET *statsd_private_rrdset_create( + STATSD_METRIC *m + , const char *type + , const char *id + , const char *name + , const char *family + , const char *context + , const char *title + , const char *units + , long priority + , int update_every + , RRDSET_TYPE chart_type +) { + RRD_MEMORY_MODE memory_mode = statsd.private_charts_memory_mode; + long history = statsd.private_charts_rrd_history_entries; + + if(unlikely(statsd.private_charts >= statsd.max_private_charts)) { + debug(D_STATSD, "STATSD: metric '%s' will be charted with memory mode = none, because the maximum number of charts has been reached.", m->name); + info("STATSD: metric '%s' will be charted with memory mode = none, because the maximum number of charts (%zu) has been reached. Increase the number of charts by editing netdata.conf, [statsd] section.", m->name, statsd.max_private_charts); + memory_mode = RRD_MEMORY_MODE_NONE; + history = 5; + } + + statsd.private_charts++; + RRDSET *st = rrdset_create_custom( + localhost // host + , type // type + , id // id + , name // name + , family // family + , context // context + , title // title + , units // units + , PLUGIN_STATSD_NAME // plugin + , "private_chart" // module + , priority // priority + , update_every // update every + , chart_type // chart type + , memory_mode // memory mode + , history // history + ); + rrdset_flag_set(st, RRDSET_FLAG_STORE_FIRST); + + if(statsd.private_charts_hidden) + rrdset_flag_set(st, RRDSET_FLAG_HIDDEN); + + // rrdset_flag_set(st, RRDSET_FLAG_DEBUG); + return st; +} + +static inline void statsd_private_chart_gauge(STATSD_METRIC *m) { + debug(D_STATSD, "updating private chart for gauge metric '%s'", m->name); + + if(unlikely(!m->st)) { + char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1]; + statsd_get_metric_type_and_id(m, type, id, "gauge", RRD_ID_LENGTH_MAX); + + char context[RRD_ID_LENGTH_MAX + 1]; + snprintfz(context, RRD_ID_LENGTH_MAX, "statsd_gauge.%s", m->name); + + char title[RRD_ID_LENGTH_MAX + 1]; + snprintfz(title, RRD_ID_LENGTH_MAX, "statsd private chart for gauge %s", m->name); + + m->st = statsd_private_rrdset_create( + m + , type + , id + , NULL // name + , "gauges" // family (submenu) + , context // context + , title // title + , "value" // units + , NETDATA_CHART_PRIO_STATSD_PRIVATE + , statsd.update_every + , RRDSET_TYPE_LINE + ); + + m->rd_value = rrddim_add(m->st, "gauge", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE); + + if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT) + m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(m->st); + + rrddim_set_by_pointer(m->st, m->rd_value, m->last); + + if(m->rd_count) + rrddim_set_by_pointer(m->st, m->rd_count, m->events); + + rrdset_done(m->st); +} + +static inline void statsd_private_chart_counter_or_meter(STATSD_METRIC *m, const char *dim, const char *family) { + debug(D_STATSD, "updating private chart for %s metric '%s'", dim, m->name); + + if(unlikely(!m->st)) { + char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1]; + statsd_get_metric_type_and_id(m, type, id, dim, RRD_ID_LENGTH_MAX); + + char context[RRD_ID_LENGTH_MAX + 1]; + snprintfz(context, RRD_ID_LENGTH_MAX, "statsd_%s.%s", dim, m->name); + + char title[RRD_ID_LENGTH_MAX + 1]; + snprintfz(title, RRD_ID_LENGTH_MAX, "statsd private chart for %s %s", dim, m->name); + + m->st = statsd_private_rrdset_create( + m + , type + , id + , NULL // name + , family // family (submenu) + , context // context + , title // title + , "events/s" // units + , NETDATA_CHART_PRIO_STATSD_PRIVATE + , statsd.update_every + , RRDSET_TYPE_AREA + ); + + m->rd_value = rrddim_add(m->st, dim, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT) + m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(m->st); + + rrddim_set_by_pointer(m->st, m->rd_value, m->last); + + if(m->rd_count) + rrddim_set_by_pointer(m->st, m->rd_count, m->events); + + rrdset_done(m->st); +} + +static inline void statsd_private_chart_set(STATSD_METRIC *m) { + debug(D_STATSD, "updating private chart for set metric '%s'", m->name); + + if(unlikely(!m->st)) { + char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1]; + statsd_get_metric_type_and_id(m, type, id, "set", RRD_ID_LENGTH_MAX); + + char context[RRD_ID_LENGTH_MAX + 1]; + snprintfz(context, RRD_ID_LENGTH_MAX, "statsd_set.%s", m->name); + + char title[RRD_ID_LENGTH_MAX + 1]; + snprintfz(title, RRD_ID_LENGTH_MAX, "statsd private chart for set %s", m->name); + + m->st = statsd_private_rrdset_create( + m + , type + , id + , NULL // name + , "sets" // family (submenu) + , context // context + , title // title + , "entries" // units + , NETDATA_CHART_PRIO_STATSD_PRIVATE + , statsd.update_every + , RRDSET_TYPE_LINE + ); + + m->rd_value = rrddim_add(m->st, "set", "set size", 1, 1, RRD_ALGORITHM_ABSOLUTE); + + if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT) + m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(m->st); + + rrddim_set_by_pointer(m->st, m->rd_value, m->last); + + if(m->rd_count) + rrddim_set_by_pointer(m->st, m->rd_count, m->events); + + rrdset_done(m->st); +} + +static inline void statsd_private_chart_timer_or_histogram(STATSD_METRIC *m, const char *dim, const char *family, const char *units) { + debug(D_STATSD, "updating private chart for %s metric '%s'", dim, m->name); + + if(unlikely(!m->st)) { + char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1]; + statsd_get_metric_type_and_id(m, type, id, dim, RRD_ID_LENGTH_MAX); + + char context[RRD_ID_LENGTH_MAX + 1]; + snprintfz(context, RRD_ID_LENGTH_MAX, "statsd_%s.%s", dim, m->name); + + char title[RRD_ID_LENGTH_MAX + 1]; + snprintfz(title, RRD_ID_LENGTH_MAX, "statsd private chart for %s %s", dim, m->name); + + m->st = statsd_private_rrdset_create( + m + , type + , id + , NULL // name + , family // family (submenu) + , context // context + , title // title + , units // units + , NETDATA_CHART_PRIO_STATSD_PRIVATE + , statsd.update_every + , RRDSET_TYPE_AREA + ); + + m->histogram.ext->rd_min = rrddim_add(m->st, "min", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE); + m->histogram.ext->rd_max = rrddim_add(m->st, "max", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE); + m->rd_value = rrddim_add(m->st, "average", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE); + m->histogram.ext->rd_percentile = rrddim_add(m->st, statsd.histogram_percentile_str, NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE); + m->histogram.ext->rd_median = rrddim_add(m->st, "median", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE); + m->histogram.ext->rd_stddev = rrddim_add(m->st, "stddev", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE); + m->histogram.ext->rd_sum = rrddim_add(m->st, "sum", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE); + + if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT) + m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(m->st); + + rrddim_set_by_pointer(m->st, m->histogram.ext->rd_min, m->histogram.ext->last_min); + rrddim_set_by_pointer(m->st, m->histogram.ext->rd_max, m->histogram.ext->last_max); + rrddim_set_by_pointer(m->st, m->histogram.ext->rd_percentile, m->histogram.ext->last_percentile); + rrddim_set_by_pointer(m->st, m->histogram.ext->rd_median, m->histogram.ext->last_median); + rrddim_set_by_pointer(m->st, m->histogram.ext->rd_stddev, m->histogram.ext->last_stddev); + rrddim_set_by_pointer(m->st, m->histogram.ext->rd_sum, m->histogram.ext->last_sum); + rrddim_set_by_pointer(m->st, m->rd_value, m->last); + + if(m->rd_count) + rrddim_set_by_pointer(m->st, m->rd_count, m->events); + + rrdset_done(m->st); +} + +// -------------------------------------------------------------------------------------------------------------------- +// statsd flush metrics + +static inline void statsd_flush_gauge(STATSD_METRIC *m) { + debug(D_STATSD, "flushing gauge metric '%s'", m->name); + + int updated = 0; + if(unlikely(!m->reset && m->count)) { + m->last = (collected_number) (m->gauge.value * statsd.decimal_detail); + + m->reset = 1; + updated = 1; + } + + if(unlikely(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED)))) + statsd_private_chart_gauge(m); +} + +static inline void statsd_flush_counter_or_meter(STATSD_METRIC *m, const char *dim, const char *family) { + debug(D_STATSD, "flushing %s metric '%s'", dim, m->name); + + int updated = 0; + if(unlikely(!m->reset && m->count)) { + m->last = m->counter.value; + + m->reset = 1; + updated = 1; + } + + if(unlikely(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED)))) + statsd_private_chart_counter_or_meter(m, dim, family); +} + +static inline void statsd_flush_counter(STATSD_METRIC *m) { + statsd_flush_counter_or_meter(m, "counter", "counters"); +} + +static inline void statsd_flush_meter(STATSD_METRIC *m) { + statsd_flush_counter_or_meter(m, "meter", "meters"); +} + +static inline void statsd_flush_set(STATSD_METRIC *m) { + debug(D_STATSD, "flushing set metric '%s'", m->name); + + int updated = 0; + if(unlikely(!m->reset && m->count)) { + m->last = (collected_number)m->set.unique; + + m->reset = 1; + updated = 1; + } + else { + m->last = 0; + } + + if(unlikely(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED)))) + statsd_private_chart_set(m); +} + +static inline void statsd_flush_timer_or_histogram(STATSD_METRIC *m, const char *dim, const char *family, const char *units) { + debug(D_STATSD, "flushing %s metric '%s'", dim, m->name); + + int updated = 0; + if(unlikely(!m->reset && m->count && m->histogram.ext->used > 0)) { + netdata_mutex_lock(&m->histogram.ext->mutex); + + size_t len = m->histogram.ext->used; + LONG_DOUBLE *series = m->histogram.ext->values; + sort_series(series, len); + + m->histogram.ext->last_min = (collected_number)roundl(series[0] * statsd.decimal_detail); + m->histogram.ext->last_max = (collected_number)roundl(series[len - 1] * statsd.decimal_detail); + m->last = (collected_number)roundl(average(series, len) * statsd.decimal_detail); + m->histogram.ext->last_median = (collected_number)roundl(median_on_sorted_series(series, len) * statsd.decimal_detail); + m->histogram.ext->last_stddev = (collected_number)roundl(standard_deviation(series, len) * statsd.decimal_detail); + m->histogram.ext->last_sum = (collected_number)roundl(sum(series, len) * statsd.decimal_detail); + + size_t pct_len = (size_t)floor((double)len * statsd.histogram_percentile / 100.0); + if(pct_len < 1) + m->histogram.ext->last_percentile = (collected_number)(series[0] * statsd.decimal_detail); + else + m->histogram.ext->last_percentile = (collected_number)roundl(series[pct_len - 1] * statsd.decimal_detail); + + netdata_mutex_unlock(&m->histogram.ext->mutex); + + debug(D_STATSD, "STATSD %s metric %s: min " COLLECTED_NUMBER_FORMAT ", max " COLLECTED_NUMBER_FORMAT ", last " COLLECTED_NUMBER_FORMAT ", pcent " COLLECTED_NUMBER_FORMAT ", median " COLLECTED_NUMBER_FORMAT ", stddev " COLLECTED_NUMBER_FORMAT ", sum " COLLECTED_NUMBER_FORMAT, + dim, m->name, m->histogram.ext->last_min, m->histogram.ext->last_max, m->last, m->histogram.ext->last_percentile, m->histogram.ext->last_median, m->histogram.ext->last_stddev, m->histogram.ext->last_sum); + + m->histogram.ext->zeroed = 0; + m->reset = 1; + updated = 1; + } + else if(unlikely(!m->histogram.ext->zeroed)) { + // reset the metrics + // if we collected anything, they will be updated below + // this ensures that we report zeros if nothing is collected + + m->histogram.ext->last_min = 0; + m->histogram.ext->last_max = 0; + m->last = 0; + m->histogram.ext->last_median = 0; + m->histogram.ext->last_stddev = 0; + m->histogram.ext->last_sum = 0; + m->histogram.ext->last_percentile = 0; + + m->histogram.ext->zeroed = 1; + } + + if(unlikely(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED)))) + statsd_private_chart_timer_or_histogram(m, dim, family, units); +} + +static inline void statsd_flush_timer(STATSD_METRIC *m) { + statsd_flush_timer_or_histogram(m, "timer", "timers", "milliseconds"); +} + +static inline void statsd_flush_histogram(STATSD_METRIC *m) { + statsd_flush_timer_or_histogram(m, "histogram", "histograms", "value"); +} + +static inline RRD_ALGORITHM statsd_algorithm_for_metric(STATSD_METRIC *m) { + switch(m->type) { + default: + case STATSD_METRIC_TYPE_GAUGE: + case STATSD_METRIC_TYPE_SET: + case STATSD_METRIC_TYPE_TIMER: + case STATSD_METRIC_TYPE_HISTOGRAM: + return RRD_ALGORITHM_ABSOLUTE; + + case STATSD_METRIC_TYPE_METER: + case STATSD_METRIC_TYPE_COUNTER: + return RRD_ALGORITHM_INCREMENTAL; + } +} + +static inline void link_metric_to_app_dimension(STATSD_APP *app, STATSD_METRIC *m, STATSD_APP_CHART *chart, STATSD_APP_CHART_DIM *dim) { + if(dim->value_type == STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS) { + dim->value_ptr = &m->events; + dim->algorithm = RRD_ALGORITHM_INCREMENTAL; + } + else if(m->type == STATSD_METRIC_TYPE_HISTOGRAM || m->type == STATSD_METRIC_TYPE_TIMER) { + dim->algorithm = RRD_ALGORITHM_ABSOLUTE; + dim->divisor *= statsd.decimal_detail; + + switch(dim->value_type) { + case STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS: + // will never match - added to avoid warning + break; + + case STATSD_APP_CHART_DIM_VALUE_TYPE_LAST: + case STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE: + dim->value_ptr = &m->last; + break; + + case STATSD_APP_CHART_DIM_VALUE_TYPE_SUM: + dim->value_ptr = &m->histogram.ext->last_sum; + break; + + case STATSD_APP_CHART_DIM_VALUE_TYPE_MIN: + dim->value_ptr = &m->histogram.ext->last_min; + break; + + case STATSD_APP_CHART_DIM_VALUE_TYPE_MAX: + dim->value_ptr = &m->histogram.ext->last_max; + break; + + case STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN: + dim->value_ptr = &m->histogram.ext->last_median; + break; + + case STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE: + dim->value_ptr = &m->histogram.ext->last_percentile; + break; + + case STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV: + dim->value_ptr = &m->histogram.ext->last_stddev; + break; + } + } + else { + if (dim->value_type != STATSD_APP_CHART_DIM_VALUE_TYPE_LAST) + error("STATSD: unsupported value type for dimension '%s' of chart '%s' of app '%s' on metric '%s'", dim->name, chart->id, app->name, m->name); + + dim->value_ptr = &m->last; + dim->algorithm = statsd_algorithm_for_metric(m); + + if(m->type == STATSD_METRIC_TYPE_GAUGE) + dim->divisor *= statsd.decimal_detail; + } + + if(unlikely(chart->st && dim->rd)) { + rrddim_set_algorithm(chart->st, dim->rd, dim->algorithm); + rrddim_set_multiplier(chart->st, dim->rd, dim->multiplier); + rrddim_set_divisor(chart->st, dim->rd, dim->divisor); + } + + chart->dimensions_linked_count++; + m->options |= STATSD_METRIC_OPTION_USED_IN_APPS; + debug(D_STATSD, "metric '%s' of type %u linked with app '%s', chart '%s', dimension '%s', algorithm '%s'", m->name, m->type, app->name, chart->id, dim->name, rrd_algorithm_name(dim->algorithm)); +} + +static inline void check_if_metric_is_for_app(STATSD_INDEX *index, STATSD_METRIC *m) { + (void)index; + + STATSD_APP *app; + for(app = statsd.apps; app ;app = app->next) { + if(unlikely(simple_pattern_matches(app->metrics, m->name))) { + debug(D_STATSD, "metric '%s' matches app '%s'", m->name, app->name); + + // the metric should get the options from the app + + if(app->default_options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED) + m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED; + else + m->options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED; + + if(app->default_options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED) + m->options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED; + else + m->options &= ~STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED; + + m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED; + + // check if there is a chart in this app, willing to get this metric + STATSD_APP_CHART *chart; + for(chart = app->charts; chart; chart = chart->next) { + + STATSD_APP_CHART_DIM *dim; + for(dim = chart->dimensions; dim ; dim = dim->next) { + if(unlikely(dim->metric_pattern)) { + size_t dim_name_len = strlen(dim->name); + size_t wildcarded_len = dim_name_len + strlen(m->name) + 1; + char wildcarded[wildcarded_len]; + + strcpy(wildcarded, dim->name); + char *ws = &wildcarded[dim_name_len]; + + if(simple_pattern_matches_extract(dim->metric_pattern, m->name, ws, wildcarded_len - dim_name_len)) { + + char *final_name = NULL; + + if(app->dict) { + if(likely(*wildcarded)) { + // use the name of the wildcarded string + final_name = dictionary_get(app->dict, wildcarded); + } + + if(unlikely(!final_name)) { + // use the name of the metric + final_name = dictionary_get(app->dict, m->name); + } + } + + if(unlikely(!final_name)) + final_name = wildcarded; + + add_dimension_to_app_chart( + app + , chart + , m->name + , final_name + , dim->multiplier + , dim->divisor + , dim->flags + , dim->value_type + ); + + // the new dimension is appended to the list + // so, it will be matched and linked later too + } + } + else if(!dim->value_ptr && dim->metric_hash == m->hash && !strcmp(dim->metric, m->name)) { + // we have a match - this metric should be linked to this dimension + link_metric_to_app_dimension(app, m, chart, dim); + } + } + + } + } + } +} + +static inline RRDDIM *statsd_add_dim_to_app_chart(STATSD_APP *app, STATSD_APP_CHART *chart, STATSD_APP_CHART_DIM *dim) { + (void)app; + + // allow the same statsd metric to be added multiple times to the same chart + + STATSD_APP_CHART_DIM *tdim; + size_t count_same_metric = 0, count_same_metric_value_type = 0; + size_t pos_same_metric_value_type = 0; + + for (tdim = chart->dimensions; tdim && tdim->next; tdim = tdim->next) { + if (dim->metric_hash == tdim->metric_hash && !strcmp(dim->metric, tdim->metric)) { + count_same_metric++; + + if(dim->value_type == tdim->value_type) { + count_same_metric_value_type++; + if (tdim == dim) + pos_same_metric_value_type = count_same_metric_value_type; + } + } + } + + if(count_same_metric > 1) { + // the same metric is found multiple times + + size_t len = strlen(dim->metric) + 100; + char metric[ len + 1 ]; + + if(count_same_metric_value_type > 1) { + // the same metric, with the same value type, is added multiple times + snprintfz(metric, len, "%s_%s%zu", dim->metric, valuetype2string(dim->value_type), pos_same_metric_value_type); + } + else { + // the same metric, with different value type is added + snprintfz(metric, len, "%s_%s", dim->metric, valuetype2string(dim->value_type)); + } + + dim->rd = rrddim_add(chart->st, metric, dim->name, dim->multiplier, dim->divisor, dim->algorithm); + if(dim->flags != RRDDIM_FLAG_NONE) dim->rd->flags |= dim->flags; + return dim->rd; + } + + dim->rd = rrddim_add(chart->st, dim->metric, dim->name, dim->multiplier, dim->divisor, dim->algorithm); + if(dim->flags != RRDDIM_FLAG_NONE) dim->rd->flags |= dim->flags; + return dim->rd; +} + +static inline void statsd_update_app_chart(STATSD_APP *app, STATSD_APP_CHART *chart) { + debug(D_STATSD, "updating chart '%s' for app '%s'", chart->id, app->name); + + if(!chart->st) { + chart->st = rrdset_create_custom( + localhost // host + , app->name // type + , chart->id // id + , chart->name // name + , chart->family // family + , chart->context // context + , chart->title // title + , chart->units // units + , PLUGIN_STATSD_NAME // plugin + , chart->source // module + , chart->priority // priority + , statsd.update_every // update every + , chart->chart_type // chart type + , app->rrd_memory_mode // memory mode + , app->rrd_history_entries // history + ); + + rrdset_flag_set(chart->st, RRDSET_FLAG_STORE_FIRST); + // rrdset_flag_set(chart->st, RRDSET_FLAG_DEBUG); + } + else rrdset_next(chart->st); + + STATSD_APP_CHART_DIM *dim; + for(dim = chart->dimensions; dim ;dim = dim->next) { + if(likely(!dim->metric_pattern)) { + if (unlikely(!dim->rd)) + statsd_add_dim_to_app_chart(app, chart, dim); + + if (unlikely(dim->value_ptr)) { + debug(D_STATSD, "updating dimension '%s' (%s) of chart '%s' (%s) for app '%s' with value " COLLECTED_NUMBER_FORMAT, dim->name, dim->rd->id, chart->id, chart->st->id, app->name, *dim->value_ptr); + rrddim_set_by_pointer(chart->st, dim->rd, *dim->value_ptr); + } + } + } + + rrdset_done(chart->st); + debug(D_STATSD, "completed update of chart '%s' for app '%s'", chart->id, app->name); +} + +static inline void statsd_update_all_app_charts(void) { + // debug(D_STATSD, "updating app charts"); + + STATSD_APP *app; + for(app = statsd.apps; app ;app = app->next) { + // debug(D_STATSD, "updating charts for app '%s'", app->name); + + STATSD_APP_CHART *chart; + for(chart = app->charts; chart ;chart = chart->next) { + if(unlikely(chart->dimensions_linked_count)) { + statsd_update_app_chart(app, chart); + } + } + } + + // debug(D_STATSD, "completed update of app charts"); +} + +const char *statsd_metric_type_string(STATSD_METRIC_TYPE type) { + switch(type) { + case STATSD_METRIC_TYPE_COUNTER: return "counter"; + case STATSD_METRIC_TYPE_GAUGE: return "gauge"; + case STATSD_METRIC_TYPE_HISTOGRAM: return "histogram"; + case STATSD_METRIC_TYPE_METER: return "meter"; + case STATSD_METRIC_TYPE_SET: return "set"; + case STATSD_METRIC_TYPE_TIMER: return "timer"; + default: return "unknown"; + } +} + +static inline void statsd_flush_index_metrics(STATSD_INDEX *index, void (*flush_metric)(STATSD_METRIC *)) { + STATSD_METRIC *m; + + // find the useful metrics (incremental = each time we are called, we check the new metrics only) + for(m = index->first; m ; m = m->next) { + // since we add new metrics at the beginning + // check for useful charts, until the point we last checked + if(unlikely(is_metric_checked(m))) break; + + if(unlikely(!(m->options & STATSD_METRIC_OPTION_CHECKED_IN_APPS))) { + log_access("NEW STATSD METRIC '%s': '%s'", statsd_metric_type_string(m->type), m->name); + check_if_metric_is_for_app(index, m); + m->options |= STATSD_METRIC_OPTION_CHECKED_IN_APPS; + } + + if(unlikely(!(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED))) { + if(unlikely(statsd.private_charts >= statsd.max_private_charts_hard)) { + debug(D_STATSD, "STATSD: metric '%s' will not be charted, because the hard limit of the maximum number of charts has been reached.", m->name); + info("STATSD: metric '%s' will not be charted, because the hard limit of the maximum number of charts (%zu) has been reached. Increase the number of charts by editing netdata.conf, [statsd] section.", m->name, statsd.max_private_charts); + m->options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED; + } + else { + if (simple_pattern_matches(statsd.charts_for, m->name)) { + debug(D_STATSD, "STATSD: metric '%s' will be charted.", m->name); + m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED; + } else { + debug(D_STATSD, "STATSD: metric '%s' will not be charted.", m->name); + m->options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED; + } + } + + m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED; + } + + // mark it as checked + m->options |= STATSD_METRIC_OPTION_CHECKED; + + // check if it is used in charts + if((m->options & (STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED|STATSD_METRIC_OPTION_USED_IN_APPS)) && !(m->options & STATSD_METRIC_OPTION_USEFUL)) { + m->options |= STATSD_METRIC_OPTION_USEFUL; + index->useful++; + m->next_useful = index->first_useful; + index->first_useful = m; + } + } + + // flush all the useful metrics + for(m = index->first_useful; m ; m = m->next_useful) { + flush_metric(m); + } +} + + +// -------------------------------------------------------------------------------------- +// statsd main thread + +static int statsd_listen_sockets_setup(void) { + return listen_sockets_setup(&statsd.sockets); +} + +static void statsd_main_cleanup(void *data) { + struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data; + static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; + info("cleaning up..."); + + if (statsd.collection_threads_status) { + int i; + for (i = 0; i < statsd.threads; i++) { + if(statsd.collection_threads_status[i].status) { + info("STATSD: stopping data collection thread %d...", i + 1); + netdata_thread_cancel(statsd.collection_threads_status[i].thread); + } + else { + info("STATSD: data collection thread %d found stopped.", i + 1); + } + } + } + + info("STATSD: closing sockets..."); + listen_sockets_close(&statsd.sockets); + + info("STATSD: cleanup completed."); + static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; +} + +void *statsd_main(void *ptr) { + netdata_thread_cleanup_push(statsd_main_cleanup, ptr); + + // ---------------------------------------------------------------------------------------------------------------- + // statsd configuration + + statsd.enabled = config_get_boolean(CONFIG_SECTION_STATSD, "enabled", statsd.enabled); + + statsd.update_every = default_rrd_update_every; + statsd.update_every = (int)config_get_number(CONFIG_SECTION_STATSD, "update every (flushInterval)", statsd.update_every); + if(statsd.update_every < default_rrd_update_every) { + error("STATSD: minimum flush interval %d given, but the minimum is the update every of netdata. Using %d", statsd.update_every, default_rrd_update_every); + statsd.update_every = default_rrd_update_every; + } + +#ifdef HAVE_RECVMMSG + statsd.recvmmsg_size = (size_t)config_get_number(CONFIG_SECTION_STATSD, "udp messages to process at once", (long long)statsd.recvmmsg_size); +#endif + + statsd.charts_for = simple_pattern_create(config_get(CONFIG_SECTION_STATSD, "create private charts for metrics matching", "*"), NULL, SIMPLE_PATTERN_EXACT); + statsd.max_private_charts = (size_t)config_get_number(CONFIG_SECTION_STATSD, "max private charts allowed", (long long)statsd.max_private_charts); + statsd.max_private_charts_hard = (size_t)config_get_number(CONFIG_SECTION_STATSD, "max private charts hard limit", (long long)statsd.max_private_charts * 5); + statsd.private_charts_memory_mode = rrd_memory_mode_id(config_get(CONFIG_SECTION_STATSD, "private charts memory mode", rrd_memory_mode_name(default_rrd_memory_mode))); + statsd.private_charts_rrd_history_entries = (int)config_get_number(CONFIG_SECTION_STATSD, "private charts history", default_rrd_history_entries); + statsd.decimal_detail = (collected_number)config_get_number(CONFIG_SECTION_STATSD, "decimal detail", (long long int)statsd.decimal_detail); + statsd.tcp_idle_timeout = (size_t) config_get_number(CONFIG_SECTION_STATSD, "disconnect idle tcp clients after seconds", (long long int)statsd.tcp_idle_timeout); + statsd.private_charts_hidden = (unsigned int)config_get_boolean(CONFIG_SECTION_STATSD, "private charts hidden", statsd.private_charts_hidden); + + statsd.histogram_percentile = (double)config_get_float(CONFIG_SECTION_STATSD, "histograms and timers percentile (percentThreshold)", statsd.histogram_percentile); + if(isless(statsd.histogram_percentile, 0) || isgreater(statsd.histogram_percentile, 100)) { + error("STATSD: invalid histograms and timers percentile %0.5f given", statsd.histogram_percentile); + statsd.histogram_percentile = 95.0; + } + { + char buffer[100 + 1]; + snprintf(buffer, 100, "%0.1f%%", statsd.histogram_percentile); + statsd.histogram_percentile_str = strdupz(buffer); + } + + if(config_get_boolean(CONFIG_SECTION_STATSD, "add dimension for number of events received", 1)) { + statsd.gauges.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT; + statsd.counters.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT; + statsd.meters.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT; + statsd.sets.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT; + statsd.histograms.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT; + statsd.timers.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT; + } + + if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on gauges (deleteGauges)", 0)) + statsd.gauges.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED; + + if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on counters (deleteCounters)", 0)) + statsd.counters.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED; + + if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on meters (deleteMeters)", 0)) + statsd.meters.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED; + + if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on sets (deleteSets)", 0)) + statsd.sets.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED; + + if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on histograms (deleteHistograms)", 0)) + statsd.histograms.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED; + + if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on timers (deleteTimers)", 0)) + statsd.timers.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED; + + size_t max_sockets = (size_t)config_get_number(CONFIG_SECTION_STATSD, "statsd server max TCP sockets", (long long int)(rlimit_nofile.rlim_cur / 4)); + +#ifdef STATSD_MULTITHREADED + statsd.threads = (int)config_get_number(CONFIG_SECTION_STATSD, "threads", processors); + if(statsd.threads < 1) { + error("STATSD: Invalid number of threads %d, using %d", statsd.threads, processors); + statsd.threads = processors; + config_set_number(CONFIG_SECTION_STATSD, "collector threads", statsd.threads); + } +#else + statsd.threads = 1; +#endif + + // read custom application definitions + statsd_readdir(netdata_configured_user_config_dir, netdata_configured_stock_config_dir, "statsd.d"); + + // ---------------------------------------------------------------------------------------------------------------- + // statsd setup + + if(!statsd.enabled) return NULL; + + statsd_listen_sockets_setup(); + if(!statsd.sockets.opened) { + error("STATSD: No statsd sockets to listen to. statsd will be disabled."); + goto cleanup; + } + + statsd.collection_threads_status = callocz((size_t)statsd.threads, sizeof(struct collection_thread_status)); + + int i; + for(i = 0; i < statsd.threads ;i++) { + statsd.collection_threads_status[i].max_sockets = max_sockets / statsd.threads; + char tag[NETDATA_THREAD_TAG_MAX + 1]; + snprintfz(tag, NETDATA_THREAD_TAG_MAX, "STATSD_COLLECTOR[%d]", i + 1); + netdata_thread_create(&statsd.collection_threads_status[i].thread, tag, NETDATA_THREAD_OPTION_DEFAULT, statsd_collector_thread, &statsd.collection_threads_status[i]); + } + + // ---------------------------------------------------------------------------------------------------------------- + // statsd monitoring charts + + RRDSET *st_metrics = rrdset_create_localhost( + "netdata" + , "statsd_metrics" + , NULL + , "statsd" + , NULL + , "Metrics in the netdata statsd database" + , "metrics" + , PLUGIN_STATSD_NAME + , "stats" + , 132010 + , statsd.update_every + , RRDSET_TYPE_STACKED + ); + RRDDIM *rd_metrics_gauge = rrddim_add(st_metrics, "gauges", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + RRDDIM *rd_metrics_counter = rrddim_add(st_metrics, "counters", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + RRDDIM *rd_metrics_timer = rrddim_add(st_metrics, "timers", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + RRDDIM *rd_metrics_meter = rrddim_add(st_metrics, "meters", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + RRDDIM *rd_metrics_histogram = rrddim_add(st_metrics, "histograms", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + RRDDIM *rd_metrics_set = rrddim_add(st_metrics, "sets", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + RRDSET *st_useful_metrics = rrdset_create_localhost( + "netdata" + , "statsd_useful_metrics" + , NULL + , "statsd" + , NULL + , "Useful metrics in the netdata statsd database" + , "metrics" + , PLUGIN_STATSD_NAME + , "stats" + , 132010 + , statsd.update_every + , RRDSET_TYPE_STACKED + ); + RRDDIM *rd_useful_metrics_gauge = rrddim_add(st_useful_metrics, "gauges", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + RRDDIM *rd_useful_metrics_counter = rrddim_add(st_useful_metrics, "counters", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + RRDDIM *rd_useful_metrics_timer = rrddim_add(st_useful_metrics, "timers", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + RRDDIM *rd_useful_metrics_meter = rrddim_add(st_useful_metrics, "meters", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + RRDDIM *rd_useful_metrics_histogram = rrddim_add(st_useful_metrics, "histograms", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + RRDDIM *rd_useful_metrics_set = rrddim_add(st_useful_metrics, "sets", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + RRDSET *st_events = rrdset_create_localhost( + "netdata" + , "statsd_events" + , NULL + , "statsd" + , NULL + , "Events processed by the netdata statsd server" + , "events/s" + , PLUGIN_STATSD_NAME + , "stats" + , 132011 + , statsd.update_every + , RRDSET_TYPE_STACKED + ); + RRDDIM *rd_events_gauge = rrddim_add(st_events, "gauges", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + RRDDIM *rd_events_counter = rrddim_add(st_events, "counters", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + RRDDIM *rd_events_timer = rrddim_add(st_events, "timers", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + RRDDIM *rd_events_meter = rrddim_add(st_events, "meters", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + RRDDIM *rd_events_histogram = rrddim_add(st_events, "histograms", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + RRDDIM *rd_events_set = rrddim_add(st_events, "sets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + RRDDIM *rd_events_unknown = rrddim_add(st_events, "unknown", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + RRDDIM *rd_events_errors = rrddim_add(st_events, "errors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + RRDSET *st_reads = rrdset_create_localhost( + "netdata" + , "statsd_reads" + , NULL + , "statsd" + , NULL + , "Read operations made by the netdata statsd server" + , "reads/s" + , PLUGIN_STATSD_NAME + , "stats" + , 132012 + , statsd.update_every + , RRDSET_TYPE_STACKED + ); + RRDDIM *rd_reads_tcp = rrddim_add(st_reads, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + RRDDIM *rd_reads_udp = rrddim_add(st_reads, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + RRDSET *st_bytes = rrdset_create_localhost( + "netdata" + , "statsd_bytes" + , NULL + , "statsd" + , NULL + , "Bytes read by the netdata statsd server" + , "kilobits/s" + , PLUGIN_STATSD_NAME + , "stats" + , 132013 + , statsd.update_every + , RRDSET_TYPE_STACKED + ); + RRDDIM *rd_bytes_tcp = rrddim_add(st_bytes, "tcp", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + RRDDIM *rd_bytes_udp = rrddim_add(st_bytes, "udp", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + + RRDSET *st_packets = rrdset_create_localhost( + "netdata" + , "statsd_packets" + , NULL + , "statsd" + , NULL + , "Network packets processed by the netdata statsd server" + , "packets/s" + , PLUGIN_STATSD_NAME + , "stats" + , 132014 + , statsd.update_every + , RRDSET_TYPE_STACKED + ); + RRDDIM *rd_packets_tcp = rrddim_add(st_packets, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + RRDDIM *rd_packets_udp = rrddim_add(st_packets, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + RRDSET *st_tcp_connects = rrdset_create_localhost( + "netdata" + , "tcp_connects" + , NULL + , "statsd" + , NULL + , "statsd server TCP connects and disconnects" + , "events" + , PLUGIN_STATSD_NAME + , "stats" + , 132015 + , statsd.update_every + , RRDSET_TYPE_LINE + ); + RRDDIM *rd_tcp_connects = rrddim_add(st_tcp_connects, "connects", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + RRDDIM *rd_tcp_disconnects = rrddim_add(st_tcp_connects, "disconnects", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + + RRDSET *st_tcp_connected = rrdset_create_localhost( + "netdata" + , "tcp_connected" + , NULL + , "statsd" + , NULL + , "statsd server TCP connected sockets" + , "connected" + , PLUGIN_STATSD_NAME + , "stats" + , 132016 + , statsd.update_every + , RRDSET_TYPE_LINE + ); + RRDDIM *rd_tcp_connected = rrddim_add(st_tcp_connected, "connected", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + RRDSET *st_pcharts = rrdset_create_localhost( + "netdata" + , "private_charts" + , NULL + , "statsd" + , NULL + , "Private metric charts created by the netdata statsd server" + , "charts" + , PLUGIN_STATSD_NAME + , "stats" + , 132020 + , statsd.update_every + , RRDSET_TYPE_AREA + ); + RRDDIM *rd_pcharts = rrddim_add(st_pcharts, "charts", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + RRDSET *stcpu_thread = rrdset_create_localhost( + "netdata" + , "plugin_statsd_charting_cpu" + , NULL + , "statsd" + , "netdata.statsd_cpu" + , "NetData statsd charting thread CPU usage" + , "milliseconds/s" + , PLUGIN_STATSD_NAME + , "stats" + , 132001 + , statsd.update_every + , RRDSET_TYPE_STACKED + ); + + RRDDIM *rd_user = rrddim_add(stcpu_thread, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); + RRDDIM *rd_system = rrddim_add(stcpu_thread, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); + struct rusage thread; + + for(i = 0; i < statsd.threads ;i++) { + char id[100 + 1]; + char title[100 + 1]; + + snprintfz(id, 100, "plugin_statsd_collector%d_cpu", i + 1); + snprintfz(title, 100, "NetData statsd collector thread No %d CPU usage", i + 1); + + statsd.collection_threads_status[i].st_cpu = rrdset_create_localhost( + "netdata" + , id + , NULL + , "statsd" + , "netdata.statsd_cpu" + , title + , "milliseconds/s" + , PLUGIN_STATSD_NAME + , "stats" + , 132002 + i + , statsd.update_every + , RRDSET_TYPE_STACKED + ); + + statsd.collection_threads_status[i].rd_user = rrddim_add(statsd.collection_threads_status[i].st_cpu, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); + statsd.collection_threads_status[i].rd_system = rrddim_add(statsd.collection_threads_status[i].st_cpu, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); + } + + // ---------------------------------------------------------------------------------------------------------------- + // statsd thread to turn metrics into charts + + usec_t step = statsd.update_every * USEC_PER_SEC; + heartbeat_t hb; + heartbeat_init(&hb); + while(!netdata_exit) { + usec_t hb_dt = heartbeat_next(&hb, step); + + statsd_flush_index_metrics(&statsd.gauges, statsd_flush_gauge); + statsd_flush_index_metrics(&statsd.counters, statsd_flush_counter); + statsd_flush_index_metrics(&statsd.meters, statsd_flush_meter); + statsd_flush_index_metrics(&statsd.timers, statsd_flush_timer); + statsd_flush_index_metrics(&statsd.histograms, statsd_flush_histogram); + statsd_flush_index_metrics(&statsd.sets, statsd_flush_set); + + statsd_update_all_app_charts(); + + getrusage(RUSAGE_THREAD, &thread); + + if(unlikely(netdata_exit)) + break; + + if(likely(hb_dt)) { + rrdset_next(st_metrics); + rrdset_next(st_useful_metrics); + rrdset_next(st_events); + rrdset_next(st_reads); + rrdset_next(st_bytes); + rrdset_next(st_packets); + rrdset_next(st_tcp_connects); + rrdset_next(st_tcp_connected); + rrdset_next(st_pcharts); + rrdset_next(stcpu_thread); + for(i = 0; i < statsd.threads ;i++) + rrdset_next(statsd.collection_threads_status[i].st_cpu); + } + + rrddim_set_by_pointer(st_metrics, rd_metrics_gauge, (collected_number)statsd.gauges.metrics); + rrddim_set_by_pointer(st_metrics, rd_metrics_counter, (collected_number)statsd.counters.metrics); + rrddim_set_by_pointer(st_metrics, rd_metrics_timer, (collected_number)statsd.timers.metrics); + rrddim_set_by_pointer(st_metrics, rd_metrics_meter, (collected_number)statsd.meters.metrics); + rrddim_set_by_pointer(st_metrics, rd_metrics_histogram, (collected_number)statsd.histograms.metrics); + rrddim_set_by_pointer(st_metrics, rd_metrics_set, (collected_number)statsd.sets.metrics); + rrdset_done(st_metrics); + + rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_gauge, (collected_number)statsd.gauges.useful); + rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_counter, (collected_number)statsd.counters.useful); + rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_timer, (collected_number)statsd.timers.useful); + rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_meter, (collected_number)statsd.meters.useful); + rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_histogram, (collected_number)statsd.histograms.useful); + rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_set, (collected_number)statsd.sets.useful); + rrdset_done(st_useful_metrics); + + rrddim_set_by_pointer(st_events, rd_events_gauge, (collected_number)statsd.gauges.events); + rrddim_set_by_pointer(st_events, rd_events_counter, (collected_number)statsd.counters.events); + rrddim_set_by_pointer(st_events, rd_events_timer, (collected_number)statsd.timers.events); + rrddim_set_by_pointer(st_events, rd_events_meter, (collected_number)statsd.meters.events); + rrddim_set_by_pointer(st_events, rd_events_histogram, (collected_number)statsd.histograms.events); + rrddim_set_by_pointer(st_events, rd_events_set, (collected_number)statsd.sets.events); + rrddim_set_by_pointer(st_events, rd_events_unknown, (collected_number)statsd.unknown_types); + rrddim_set_by_pointer(st_events, rd_events_errors, (collected_number)statsd.socket_errors); + rrdset_done(st_events); + + rrddim_set_by_pointer(st_reads, rd_reads_tcp, (collected_number)statsd.tcp_socket_reads); + rrddim_set_by_pointer(st_reads, rd_reads_udp, (collected_number)statsd.udp_socket_reads); + rrdset_done(st_reads); + + rrddim_set_by_pointer(st_bytes, rd_bytes_tcp, (collected_number)statsd.tcp_bytes_read); + rrddim_set_by_pointer(st_bytes, rd_bytes_udp, (collected_number)statsd.udp_bytes_read); + rrdset_done(st_bytes); + + rrddim_set_by_pointer(st_packets, rd_packets_tcp, (collected_number)statsd.tcp_packets_received); + rrddim_set_by_pointer(st_packets, rd_packets_udp, (collected_number)statsd.udp_packets_received); + rrdset_done(st_packets); + + rrddim_set_by_pointer(st_tcp_connects, rd_tcp_connects, (collected_number)statsd.tcp_socket_connects); + rrddim_set_by_pointer(st_tcp_connects, rd_tcp_disconnects, (collected_number)statsd.tcp_socket_disconnects); + rrdset_done(st_tcp_connects); + + rrddim_set_by_pointer(st_tcp_connected, rd_tcp_connected, (collected_number)statsd.tcp_socket_connected); + rrdset_done(st_tcp_connected); + + rrddim_set_by_pointer(st_pcharts, rd_pcharts, (collected_number)statsd.private_charts); + rrdset_done(st_pcharts); + + rrddim_set_by_pointer(stcpu_thread, rd_user, thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec); + rrddim_set_by_pointer(stcpu_thread, rd_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec); + rrdset_done(stcpu_thread); + + for(i = 0; i < statsd.threads ;i++) { + rrddim_set_by_pointer(statsd.collection_threads_status[i].st_cpu, statsd.collection_threads_status[i].rd_user, statsd.collection_threads_status[i].rusage.ru_utime.tv_sec * 1000000ULL + statsd.collection_threads_status[i].rusage.ru_utime.tv_usec); + rrddim_set_by_pointer(statsd.collection_threads_status[i].st_cpu, statsd.collection_threads_status[i].rd_system, statsd.collection_threads_status[i].rusage.ru_stime.tv_sec * 1000000ULL + statsd.collection_threads_status[i].rusage.ru_stime.tv_usec); + rrdset_done(statsd.collection_threads_status[i].st_cpu); + } + } + +cleanup: ; // added semi-colon to prevent older gcc error: label at end of compound statement + netdata_thread_cleanup_pop(1); + return NULL; +} diff --git a/collectors/statsd.plugin/statsd.h b/collectors/statsd.plugin/statsd.h new file mode 100644 index 000000000..b741be76d --- /dev/null +++ b/collectors/statsd.plugin/statsd.h @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_STATSD_H +#define NETDATA_STATSD_H 1 + +#include "../../daemon/common.h" + +#define STATSD_LISTEN_PORT 8125 +#define STATSD_LISTEN_BACKLOG 4096 + +#define NETDATA_PLUGIN_HOOK_STATSD \ + { \ + .name = "STATSD", \ + .config_section = NULL, \ + .config_name = NULL, \ + .enabled = 1, \ + .thread = NULL, \ + .init_routine = NULL, \ + .start_routine = statsd_main \ + }, + + +extern void *statsd_main(void *ptr); + +#endif //NETDATA_STATSD_H diff --git a/collectors/tc.plugin/Makefile.am b/collectors/tc.plugin/Makefile.am new file mode 100644 index 000000000..f77e67d91 --- /dev/null +++ b/collectors/tc.plugin/Makefile.am @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +CLEANFILES = \ + tc-qos-helper.sh \ + $(NULL) + +include $(top_srcdir)/build/subst.inc +SUFFIXES = .in + +dist_plugins_SCRIPTS = \ + tc-qos-helper.sh \ + $(NULL) + +dist_noinst_DATA = \ + tc-qos-helper.sh.in \ + README.md \ + $(NULL) diff --git a/collectors/tc.plugin/Makefile.in b/collectors/tc.plugin/Makefile.in new file mode 100644 index 000000000..d336e1f0d --- /dev/null +++ b/collectors/tc.plugin/Makefile.in @@ -0,0 +1,562 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \ + $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \ + $(dist_noinst_DATA) +subdir = collectors/tc.plugin +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(pluginsdir)" +SCRIPTS = $(dist_plugins_SCRIPTS) +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +CLEANFILES = \ + tc-qos-helper.sh \ + $(NULL) + +SUFFIXES = .in +dist_plugins_SCRIPTS = \ + tc-qos-helper.sh \ + $(NULL) + +dist_noinst_DATA = \ + tc-qos-helper.sh.in \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +.SUFFIXES: .in +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/tc.plugin/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu collectors/tc.plugin/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; +$(top_srcdir)/build/subst.inc: + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS) + @$(NORMAL_INSTALL) + @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ + done | \ + sed -e 'p;s,.*/,,;n' \ + -e 'h;s|.*|.|' \ + -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ + $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ + { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ + if ($$2 == $$4) { files[d] = files[d] " " $$1; \ + if (++n[d] == $(am__install_max)) { \ + print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ + else { print "f", d "/" $$4, $$1 } } \ + END { for (d in files) print "f", d, files[d] }' | \ + while read type dir files; do \ + if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ + test -z "$$files" || { \ + echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \ + $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \ + } \ + ; done + +uninstall-dist_pluginsSCRIPTS: + @$(NORMAL_UNINSTALL) + @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \ + files=`for p in $$list; do echo "$$p"; done | \ + sed -e 's,.*/,,;$(transform)'`; \ + dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir) +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(SCRIPTS) $(DATA) +installdirs: + for dir in "$(DESTDIR)$(pluginsdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-dist_pluginsSCRIPTS + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-dist_pluginsSCRIPTS + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dist_pluginsSCRIPTS install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \ + uninstall-am uninstall-dist_pluginsSCRIPTS + +.in: + if sed \ + -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \ + -e 's#[@]sbindir_POST@#$(sbindir)#g' \ + -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \ + -e 's#[@]pythondir_POST@#$(pythondir)#g' \ + -e 's#[@]configdir_POST@#$(configdir)#g' \ + -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \ + -e 's#[@]cachedir_POST@#$(cachedir)#g' \ + $< > $@.tmp; then \ + mv "$@.tmp" "$@"; \ + else \ + rm -f "$@.tmp"; \ + false; \ + fi + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/collectors/tc.plugin/README.md b/collectors/tc.plugin/README.md new file mode 100644 index 000000000..6670c491f --- /dev/null +++ b/collectors/tc.plugin/README.md @@ -0,0 +1,183 @@ +## tc.plugin + +Live demo - **[see it in action here](https://registry.my-netdata.io/#menu_tc)** ! + +![qos](https://cloud.githubusercontent.com/assets/2662304/14439411/b7f36254-0033-11e6-93f0-c739bb6a1c3a.gif) + +Netdata monitors `tc` QoS classes for all interfaces. + +If you also use [FireQOS](http://firehol.org/tutorial/fireqos-new-user/) it will collect +interface and class names. + +There is a [shell helper](tc-qos-helper.sh.in) for this (all parsing is done by the plugin +in `C` code - this shell script is just a configuration for the command to run to get `tc` output). + +The source of the tc plugin is [here](plugin_tc.c). It is somewhat complex, because a state +machine was needed to keep track of all the `tc` classes, including the pseudo classes tc +dynamically creates. + +## Motivation + +One category of metrics missing in Linux monitoring, is bandwidth consumption for each open +socket (inbound and outbound traffic). So, you cannot tell how much bandwidth your web server, +your database server, your backup, your ssh sessions, etc are using. + +To solve this problem, the most *adventurous* Linux monitoring tools install kernel modules to +capture all traffic, analyze it and provide reports per application. A lot of work, CPU intensive +and with a great degree of risk (due to the kernel modules involved which might affect the +stability of the whole system). Not to mention that such solutions are probably better suited +for a core linux router in your network. + +Others use NFACCT, the netfilter accounting module which is already part of the Linux firewall. +However, this would require configuring a firewall on every system you want to measure bandwidth. + +QoS monitoring attempts to solve this in a much cleaner way. + +## Introduction to QoS + +One of the features the Linux kernel has, but it is rarely used, is its ability to +**apply QoS on traffic**. Even most interesting is that it can apply QoS to **both inbound and +outbound traffic**. + +QoS is about 2 features: + +1. **Classify traffic** + + Classification is the process of organizing traffic in groups, called **classes**. + Classification can evaluate every aspect of network packets, like source and destination ports, + source and destination IPs, netfilter marks, etc. + + When you classify traffic, you just assign a label to it. For example **I call `web server` + traffic, the traffic from my server's tcp/80, tcp/443 and to my server's tcp/80, tcp/443, + while I call `web surfing` all other tcp/80 and tcp/443 traffic**. You can use any combinations + you like. There is no limit. + +2. **Apply traffic shaping rules to these classes** + + Traffic shaping is used to control how network interface bandwidth should be shared among the + classes. Of course we are not interested for this feature to just monitor the traffic. + Classification will be enough for monitoring everything. + +The key reasons of applying QoS on all servers (even cloud ones) are: + + - **ensure administrative tasks (like ssh, dns, etc) will always have a small but guaranteed + bandwidth.** QoS can guarantee that services like ssh, dns, ntp, etc will always have a small + supply of bandwidth. So, no matter what happens, you will be able to ssh to your server and + DNS will always work. + + - **ensure other administrative tasks will not monopolize all the available bandwidth.** + Services like backups, file copies, database dumps, etc can easily monopolize all the + available bandwidth. It is common for example a nightly backup, or a huge file transfer + to negatively influence the end-user experience. QoS can fix that. + + - **ensure each end-user connection will get a fair cut of the available bandwidth.** + Several QoS queuing disciplines in Linux do this automatically, without any configuration from you. + The result is that new sockets are favored over older ones, so that users will get a snappier + experience, while others are transferring large amounts of traffic. + + - **protect the servers from DDoS attacks.** + When your system is under a DDoS attack, it will get a lot more bandwidth compared to the one it + can handle and probably your applications will crash. Setting a limit on the inbound traffic using + QoS, will protect your servers (throttle the requests) and depending on the size of the attack may + allow your legitimate users to access the server, while the attack is taking place. + + +Once **traffic classification** is applied, netdata can visualize the bandwidth consumption per +class in real-time (no configuration is needed for netdata - it will figure it out). + +QoS, is extremely light. You will configure it once, and this is it. It will not bother you again +and it will not use any noticeable CPU resources, especially on application and database servers. + +## QoS in Linux? Have you lost your mind? + +Yes I know... but no, I have not! + +Of course, `tc` is probably **the most undocumented, complicated and unfriendly** command in Linux. + +For example, for matching a simple port range in `tc`, e.g. all the high ports, from 1025 to 65535 +inclusive, you have to match these: + +``` +1025/0xffff 1026/0xfffe 1028/0xfffc 1032/0xfff8 1040/0xfff0 +1056/0xffe0 1088/0xffc0 1152/0xff80 1280/0xff00 1536/0xfe00 +2048/0xf800 4096/0xf000 8192/0xe000 16384/0xc000 32768/0x8000 +``` + +I know what you are thinking right now! **And I agree!** + +This is why I wrote **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**, a tool to +simplify QoS management in Linux. + +The **[FireHOL](https://firehol.org/)** package already distributes **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**. +Check the **[FireQOS tutorial](https://firehol.org/tutorial/fireqos-new-user/)** +to learn how to write your own QoS configuration. + +With **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**, it is **really simple for everyone +to use QoS in Linux**. Just install the package `firehol`. It should already be available for your +distribution. If not, check the **[FireHOL Installation Guide](https://firehol.org/installing/)**. +After that, you will have the `fireqos` command. + +This is the file `/etc/firehol/fireqos.conf` we use at the netdata demo site: + +```sh + # configure the netdata ports + server_netdata_ports="tcp/19999" + + interface eth0 world bidirectional ethernet balanced rate 50Mbit + class arp + match arp + + class icmp + match icmp + + class dns commit 1Mbit + server dns + client dns + + class ntp + server ntp + client ntp + + class ssh commit 2Mbit + server ssh + client ssh + + class rsync commit 2Mbit max 10Mbit + server rsync + client rsync + + class web_server commit 40Mbit + server http + server netdata + + class client + client surfing + + class nms commit 1Mbit + match input src 10.2.3.5 +``` + +Nothing more is needed. You just run `fireqos start` to apply this configuration, restart netdata +and you have real-time visualization of the bandwidth consumption of your applications. FireQOS is +not a daemon. It will just convert the configuration to `tc` commands. It will run them and it will +exit. + +**IMPORTANT**: If you copy this configuration to apply it to your system, please adapt the +speeds - experiment in non-production environments to learn the tool, before applying it on +your servers. + +And this is what you are going to get: + +![image](https://cloud.githubusercontent.com/assets/2662304/14436322/c91d90a4-0024-11e6-9fb1-57cdef1580df.png) + +--- + +## More examples: + +This is QoS from a linux router. Check these features: + +1. It is real-time (per second updates) +2. QoS really works in Linux - check that the `background` traffic is squeezed when `surfing` needs it. + +![test2](https://cloud.githubusercontent.com/assets/2662304/14093004/68966020-f553-11e5-98fe-ffee2086fafd.gif) + diff --git a/collectors/tc.plugin/plugin_tc.c b/collectors/tc.plugin/plugin_tc.c new file mode 100644 index 000000000..083cc2986 --- /dev/null +++ b/collectors/tc.plugin/plugin_tc.c @@ -0,0 +1,1168 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_tc.h" + +#define RRD_TYPE_TC "tc" +#define PLUGIN_TC_NAME "tc.plugin" + +// ---------------------------------------------------------------------------- +// /sbin/tc processor +// this requires the script plugins.d/tc-qos-helper.sh + +#define TC_LINE_MAX 1024 + +struct tc_class { + avl avl; + + char *id; + uint32_t hash; + + char *name; + + char *leafid; + uint32_t leaf_hash; + + char *parentid; + uint32_t parent_hash; + + char hasparent; + char isleaf; + char isqdisc; + char render; + + unsigned long long bytes; + unsigned long long packets; + unsigned long long dropped; + unsigned long long overlimits; + unsigned long long requeues; + unsigned long long lended; + unsigned long long borrowed; + unsigned long long giants; + unsigned long long tokens; + unsigned long long ctokens; + + RRDDIM *rd_bytes; + RRDDIM *rd_packets; + RRDDIM *rd_dropped; + RRDDIM *rd_tokens; + RRDDIM *rd_ctokens; + + char name_updated; + char updated; // updated bytes + int unupdated; // the number of times, this has been found un-updated + + struct tc_class *next; + struct tc_class *prev; +}; + +struct tc_device { + avl avl; + + char *id; + uint32_t hash; + + char *name; + char *family; + + char name_updated; + char family_updated; + + char enabled; + char enabled_bytes; + char enabled_packets; + char enabled_dropped; + char enabled_tokens; + char enabled_ctokens; + char enabled_all_classes_qdiscs; + + RRDSET *st_bytes; + RRDSET *st_packets; + RRDSET *st_dropped; + RRDSET *st_tokens; + RRDSET *st_ctokens; + + avl_tree classes_index; + + struct tc_class *classes; + struct tc_class *last_class; + + struct tc_device *next; + struct tc_device *prev; +}; + + +struct tc_device *tc_device_root = NULL; + +// ---------------------------------------------------------------------------- +// tc_device index + +static int tc_device_compare(void* a, void* b) { + if(((struct tc_device *)a)->hash < ((struct tc_device *)b)->hash) return -1; + else if(((struct tc_device *)a)->hash > ((struct tc_device *)b)->hash) return 1; + else return strcmp(((struct tc_device *)a)->id, ((struct tc_device *)b)->id); +} + +avl_tree tc_device_root_index = { + NULL, + tc_device_compare +}; + +#define tc_device_index_add(st) (struct tc_device *)avl_insert(&tc_device_root_index, (avl *)(st)) +#define tc_device_index_del(st) (struct tc_device *)avl_remove(&tc_device_root_index, (avl *)(st)) + +static inline struct tc_device *tc_device_index_find(const char *id, uint32_t hash) { + struct tc_device tmp; + tmp.id = (char *)id; + tmp.hash = (hash)?hash:simple_hash(tmp.id); + + return (struct tc_device *)avl_search(&(tc_device_root_index), (avl *)&tmp); +} + + +// ---------------------------------------------------------------------------- +// tc_class index + +static int tc_class_compare(void* a, void* b) { + if(((struct tc_class *)a)->hash < ((struct tc_class *)b)->hash) return -1; + else if(((struct tc_class *)a)->hash > ((struct tc_class *)b)->hash) return 1; + else return strcmp(((struct tc_class *)a)->id, ((struct tc_class *)b)->id); +} + +#define tc_class_index_add(st, rd) (struct tc_class *)avl_insert(&((st)->classes_index), (avl *)(rd)) +#define tc_class_index_del(st, rd) (struct tc_class *)avl_remove(&((st)->classes_index), (avl *)(rd)) + +static inline struct tc_class *tc_class_index_find(struct tc_device *st, const char *id, uint32_t hash) { + struct tc_class tmp; + tmp.id = (char *)id; + tmp.hash = (hash)?hash:simple_hash(tmp.id); + + return (struct tc_class *)avl_search(&(st->classes_index), (avl *) &tmp); +} + +// ---------------------------------------------------------------------------- + +static inline void tc_class_free(struct tc_device *n, struct tc_class *c) { + if(c == n->classes) { + if(likely(c->next)) + n->classes = c->next; + else + n->classes = c->prev; + } + + if(c == n->last_class) { + if(unlikely(c->next)) + n->last_class = c->next; + else + n->last_class = c->prev; + } + + if(c->next) c->next->prev = c->prev; + if(c->prev) c->prev->next = c->next; + + debug(D_TC_LOOP, "Removing from device '%s' class '%s', parentid '%s', leafid '%s', unused=%d", n->id, c->id, c->parentid?c->parentid:"", c->leafid?c->leafid:"", c->unupdated); + + if(unlikely(tc_class_index_del(n, c) != c)) + error("plugin_tc: INTERNAL ERROR: attempt remove class '%s' from device '%s': removed a different calls", c->id, n->id); + + freez(c->id); + freez(c->name); + freez(c->leafid); + freez(c->parentid); + freez(c); +} + +static inline void tc_device_classes_cleanup(struct tc_device *d) { + static int cleanup_every = 999; + + if(unlikely(cleanup_every > 0)) { + cleanup_every = (int) config_get_number("plugin:tc", "cleanup unused classes every", 120); + if(cleanup_every < 0) cleanup_every = -cleanup_every; + } + + d->name_updated = 0; + d->family_updated = 0; + + struct tc_class *c = d->classes; + while(c) { + if(unlikely(cleanup_every && c->unupdated >= cleanup_every)) { + struct tc_class *nc = c->next; + tc_class_free(d, c); + c = nc; + } + else { + c->updated = 0; + c->name_updated = 0; + + c = c->next; + } + } +} + +static inline void tc_device_commit(struct tc_device *d) { + static int enable_new_interfaces = -1, enable_bytes = -1, enable_packets = -1, enable_dropped = -1, enable_tokens = -1, enable_ctokens = -1, enabled_all_classes_qdiscs = -1; + + if(unlikely(enable_new_interfaces == -1)) { + enable_new_interfaces = config_get_boolean_ondemand("plugin:tc", "enable new interfaces detected at runtime", CONFIG_BOOLEAN_YES); + enable_bytes = config_get_boolean_ondemand("plugin:tc", "enable traffic charts for all interfaces", CONFIG_BOOLEAN_AUTO); + enable_packets = config_get_boolean_ondemand("plugin:tc", "enable packets charts for all interfaces", CONFIG_BOOLEAN_AUTO); + enable_dropped = config_get_boolean_ondemand("plugin:tc", "enable dropped charts for all interfaces", CONFIG_BOOLEAN_AUTO); + enable_tokens = config_get_boolean_ondemand("plugin:tc", "enable tokens charts for all interfaces", CONFIG_BOOLEAN_NO); + enable_ctokens = config_get_boolean_ondemand("plugin:tc", "enable ctokens charts for all interfaces", CONFIG_BOOLEAN_NO); + enabled_all_classes_qdiscs = config_get_boolean_ondemand("plugin:tc", "enable show all classes and qdiscs for all interfaces", CONFIG_BOOLEAN_NO); + } + + if(unlikely(d->enabled == (char)-1)) { + char var_name[CONFIG_MAX_NAME + 1]; + snprintfz(var_name, CONFIG_MAX_NAME, "qos for %s", d->id); + + d->enabled = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_new_interfaces); + + snprintfz(var_name, CONFIG_MAX_NAME, "traffic chart for %s", d->id); + d->enabled_bytes = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_bytes); + + snprintfz(var_name, CONFIG_MAX_NAME, "packets chart for %s", d->id); + d->enabled_packets = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_packets); + + snprintfz(var_name, CONFIG_MAX_NAME, "dropped packets chart for %s", d->id); + d->enabled_dropped = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_dropped); + + snprintfz(var_name, CONFIG_MAX_NAME, "tokens chart for %s", d->id); + d->enabled_tokens = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_tokens); + + snprintfz(var_name, CONFIG_MAX_NAME, "ctokens chart for %s", d->id); + d->enabled_ctokens = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_ctokens); + + snprintfz(var_name, CONFIG_MAX_NAME, "show all classes for %s", d->id); + d->enabled_all_classes_qdiscs = (char)config_get_boolean_ondemand("plugin:tc", var_name, enabled_all_classes_qdiscs); + } + + // we only need to add leaf classes + struct tc_class *c, *x /*, *root = NULL */; + unsigned long long bytes_sum = 0, packets_sum = 0, dropped_sum = 0, tokens_sum = 0, ctokens_sum = 0; + int active_nodes = 0, updated_classes = 0, updated_qdiscs = 0; + + // prepare all classes + // we set reasonable defaults for the rest of the code below + + for(c = d->classes ; c ; c = c->next) { + c->render = 0; // do not render this class + + c->isleaf = 1; // this is a leaf class + c->hasparent = 0; // without a parent + + if(unlikely(!c->updated)) + c->unupdated++; // increase its unupdated counter + else { + c->unupdated = 0; // reset its unupdated counter + + // count how many of each kind + if(c->isqdisc) + updated_qdiscs++; + else + updated_classes++; + } + } + + if(unlikely(!d->enabled || (!updated_classes && !updated_qdiscs))) { + debug(D_TC_LOOP, "TC: Ignoring TC device '%s'. It is not enabled/updated.", d->name?d->name:d->id); + tc_device_classes_cleanup(d); + return; + } + + if(unlikely(updated_classes && updated_qdiscs)) { + error("TC: device '%s' has active both classes (%d) and qdiscs (%d). Will render only qdiscs.", d->id, updated_classes, updated_qdiscs); + + // set all classes to !updated + for(c = d->classes ; c ; c = c->next) + if(unlikely(!c->isqdisc && c->updated)) + c->updated = 0; + + updated_classes = 0; + } + + // mark the classes as leafs and parents + // + // TC is hierarchical: + // - classes can have other classes in them + // - the same is true for qdiscs (i.e. qdiscs have classes, that have other qdiscs) + // + // we need to present a chart with leaf nodes only, so that the sum + // of all dimensions of the chart, will be the total utilization + // of the interface. + // + // here we try to find the ones we need to report + // by default all nodes are marked with: isleaf = 1 (see above) + // + // so, here we remove the isleaf flag from nodes in the middle + // and we add the hasparent flag to leaf nodes we found their parent + if(likely(!d->enabled_all_classes_qdiscs)) { + for(c = d->classes; c; c = c->next) { + if(unlikely(!c->updated)) continue; + + //debug(D_TC_LOOP, "TC: In device '%s', %s '%s' has leafid: '%s' and parentid '%s'.", + // d->id, + // c->isqdisc?"qdisc":"class", + // c->id, + // c->leafid?c->leafid:"NULL", + // c->parentid?c->parentid:"NULL"); + + // find if c is leaf or not + for(x = d->classes; x; x = x->next) { + if(unlikely(!x->updated || c == x || !x->parentid)) continue; + + // classes have both parentid and leafid + // qdiscs have only parentid + // the following works for both (it is an OR) + + if((c->hash == x->parent_hash && strcmp(c->id, x->parentid) == 0) || + (c->leafid && c->leaf_hash == x->parent_hash && strcmp(c->leafid, x->parentid) == 0)) { + // debug(D_TC_LOOP, "TC: In device '%s', %s '%s' (leafid: '%s') has as leaf %s '%s' (parentid: '%s').", d->name?d->name:d->id, c->isqdisc?"qdisc":"class", c->name?c->name:c->id, c->leafid?c->leafid:c->id, x->isqdisc?"qdisc":"class", x->name?x->name:x->id, x->parentid?x->parentid:x->id); + c->isleaf = 0; + x->hasparent = 1; + } + } + } + } + + for(c = d->classes ; c ; c = c->next) { + if(unlikely(!c->updated)) continue; + + // debug(D_TC_LOOP, "TC: device '%s', %s '%s' isleaf=%d, hasparent=%d", d->id, (c->isqdisc)?"qdisc":"class", c->id, c->isleaf, c->hasparent); + + if(unlikely((c->isleaf && c->hasparent) || d->enabled_all_classes_qdiscs)) { + c->render = 1; + active_nodes++; + bytes_sum += c->bytes; + packets_sum += c->packets; + dropped_sum += c->dropped; + tokens_sum += c->tokens; + ctokens_sum += c->ctokens; + } + + //if(unlikely(!c->hasparent)) { + // if(root) error("TC: multiple root class/qdisc for device '%s' (old: '%s', new: '%s')", d->id, root->id, c->id); + // root = c; + // debug(D_TC_LOOP, "TC: found root class/qdisc '%s'", root->id); + //} + } + +#ifdef NETDATA_INTERNAL_CHECKS + // dump all the list to see what we know + + if(unlikely(debug_flags & D_TC_LOOP)) { + for(c = d->classes ; c ; c = c->next) { + if(c->render) debug(D_TC_LOOP, "TC: final nodes dump for '%s': class %s, OK", d->name, c->id); + else debug(D_TC_LOOP, "TC: final nodes dump for '%s': class %s, IGNORE (updated: %d, isleaf: %d, hasparent: %d, parent: %s)", d->name?d->name:d->id, c->id, c->updated, c->isleaf, c->hasparent, c->parentid?c->parentid:"(unset)"); + } + } +#endif + + if(unlikely(!active_nodes)) { + debug(D_TC_LOOP, "TC: Ignoring TC device '%s'. No useful classes/qdiscs.", d->name?d->name:d->id); + tc_device_classes_cleanup(d); + return; + } + + debug(D_TC_LOOP, "TC: evaluating TC device '%s'. enabled = %d/%d (bytes: %d/%d, packets: %d/%d, dropped: %d/%d, tokens: %d/%d, ctokens: %d/%d, all_classes_qdiscs: %d/%d), classes: (bytes = %llu, packets = %llu, dropped = %llu, tokens = %llu, ctokens = %llu).", + d->name?d->name:d->id, + d->enabled, enable_new_interfaces, + d->enabled_bytes, enable_bytes, + d->enabled_packets, enable_packets, + d->enabled_dropped, enable_dropped, + d->enabled_tokens, enable_tokens, + d->enabled_ctokens, enable_ctokens, + d->enabled_all_classes_qdiscs, enabled_all_classes_qdiscs, + bytes_sum, + packets_sum, + dropped_sum, + tokens_sum, + ctokens_sum + ); + + // -------------------------------------------------------------------- + // bytes + + if(d->enabled_bytes == CONFIG_BOOLEAN_YES || (d->enabled_bytes == CONFIG_BOOLEAN_AUTO && bytes_sum)) { + d->enabled_bytes = CONFIG_BOOLEAN_YES; + + if(unlikely(!d->st_bytes)) + d->st_bytes = rrdset_create_localhost( + RRD_TYPE_TC + , d->id + , d->name ? d->name : d->id + , d->family ? d->family : d->id + , RRD_TYPE_TC ".qos" + , "Class Usage" + , "kilobits/s" + , PLUGIN_TC_NAME + , NULL + , NETDATA_CHART_PRIO_TC_QOS + , localhost->rrd_update_every + , d->enabled_all_classes_qdiscs ? RRDSET_TYPE_LINE : RRDSET_TYPE_STACKED + ); + + else { + rrdset_next(d->st_bytes); + if(unlikely(d->name_updated)) rrdset_set_name(d->st_bytes, d->name); + + // TODO + // update the family + } + + for(c = d->classes ; c ; c = c->next) { + if(unlikely(!c->render)) continue; + + if(unlikely(!c->rd_bytes)) + c->rd_bytes = rrddim_add(d->st_bytes, c->id, c->name?c->name:c->id, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + else if(unlikely(c->name_updated)) + rrddim_set_name(d->st_bytes, c->rd_bytes, c->name); + + rrddim_set_by_pointer(d->st_bytes, c->rd_bytes, c->bytes); + } + rrdset_done(d->st_bytes); + } + + // -------------------------------------------------------------------- + // packets + + if(d->enabled_packets == CONFIG_BOOLEAN_YES || (d->enabled_packets == CONFIG_BOOLEAN_AUTO && packets_sum)) { + d->enabled_packets = CONFIG_BOOLEAN_YES; + + if(unlikely(!d->st_packets)) { + char id[RRD_ID_LENGTH_MAX + 1]; + char name[RRD_ID_LENGTH_MAX + 1]; + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_packets", d->id); + snprintfz(name, RRD_ID_LENGTH_MAX, "%s_packets", d->name?d->name:d->id); + + d->st_packets = rrdset_create_localhost( + RRD_TYPE_TC + , id + , name + , d->family ? d->family : d->id + , RRD_TYPE_TC ".qos_packets" + , "Class Packets" + , "packets/s" + , PLUGIN_TC_NAME + , NULL + , NETDATA_CHART_PRIO_TC_QOS_PACKETS + , localhost->rrd_update_every + , d->enabled_all_classes_qdiscs ? RRDSET_TYPE_LINE : RRDSET_TYPE_STACKED + ); + } + else { + rrdset_next(d->st_packets); + + if(unlikely(d->name_updated)) { + char name[RRD_ID_LENGTH_MAX + 1]; + snprintfz(name, RRD_ID_LENGTH_MAX, "%s_packets", d->name?d->name:d->id); + rrdset_set_name(d->st_packets, name); + } + + // TODO + // update the family + } + + for(c = d->classes ; c ; c = c->next) { + if(unlikely(!c->render)) continue; + + if(unlikely(!c->rd_packets)) + c->rd_packets = rrddim_add(d->st_packets, c->id, c->name?c->name:c->id, 1, 1, RRD_ALGORITHM_INCREMENTAL); + else if(unlikely(c->name_updated)) + rrddim_set_name(d->st_packets, c->rd_packets, c->name); + + rrddim_set_by_pointer(d->st_packets, c->rd_packets, c->packets); + } + rrdset_done(d->st_packets); + } + + // -------------------------------------------------------------------- + // dropped + + if(d->enabled_dropped == CONFIG_BOOLEAN_YES || (d->enabled_dropped == CONFIG_BOOLEAN_AUTO && dropped_sum)) { + d->enabled_dropped = CONFIG_BOOLEAN_YES; + + if(unlikely(!d->st_dropped)) { + char id[RRD_ID_LENGTH_MAX + 1]; + char name[RRD_ID_LENGTH_MAX + 1]; + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_dropped", d->id); + snprintfz(name, RRD_ID_LENGTH_MAX, "%s_dropped", d->name?d->name:d->id); + + d->st_dropped = rrdset_create_localhost( + RRD_TYPE_TC + , id + , name + , d->family ? d->family : d->id + , RRD_TYPE_TC ".qos_dropped" + , "Class Dropped Packets" + , "packets/s" + , PLUGIN_TC_NAME + , NULL + , NETDATA_CHART_PRIO_TC_QOS_DROPPED + , localhost->rrd_update_every + , d->enabled_all_classes_qdiscs ? RRDSET_TYPE_LINE : RRDSET_TYPE_STACKED + ); + } + else { + rrdset_next(d->st_dropped); + + if(unlikely(d->name_updated)) { + char name[RRD_ID_LENGTH_MAX + 1]; + snprintfz(name, RRD_ID_LENGTH_MAX, "%s_dropped", d->name?d->name:d->id); + rrdset_set_name(d->st_dropped, name); + } + + // TODO + // update the family + } + + for(c = d->classes ; c ; c = c->next) { + if(unlikely(!c->render)) continue; + + if(unlikely(!c->rd_dropped)) + c->rd_dropped = rrddim_add(d->st_dropped, c->id, c->name?c->name:c->id, 1, 1, RRD_ALGORITHM_INCREMENTAL); + else if(unlikely(c->name_updated)) + rrddim_set_name(d->st_dropped, c->rd_dropped, c->name); + + rrddim_set_by_pointer(d->st_dropped, c->rd_dropped, c->dropped); + } + rrdset_done(d->st_dropped); + } + + // -------------------------------------------------------------------- + // tokens + + if(d->enabled_tokens == CONFIG_BOOLEAN_YES || (d->enabled_tokens == CONFIG_BOOLEAN_AUTO && tokens_sum)) { + d->enabled_tokens = CONFIG_BOOLEAN_YES; + + if(unlikely(!d->st_tokens)) { + char id[RRD_ID_LENGTH_MAX + 1]; + char name[RRD_ID_LENGTH_MAX + 1]; + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_tokens", d->id); + snprintfz(name, RRD_ID_LENGTH_MAX, "%s_tokens", d->name?d->name:d->id); + + d->st_tokens = rrdset_create_localhost( + RRD_TYPE_TC + , id + , name + , d->family ? d->family : d->id + , RRD_TYPE_TC ".qos_tokens" + , "Class Tokens" + , "tokens" + , PLUGIN_TC_NAME + , NULL + , NETDATA_CHART_PRIO_TC_QOS_TOCKENS + , localhost->rrd_update_every + , RRDSET_TYPE_LINE + ); + } + else { + rrdset_next(d->st_tokens); + + if(unlikely(d->name_updated)) { + char name[RRD_ID_LENGTH_MAX + 1]; + snprintfz(name, RRD_ID_LENGTH_MAX, "%s_tokens", d->name?d->name:d->id); + rrdset_set_name(d->st_tokens, name); + } + + // TODO + // update the family + } + + for(c = d->classes ; c ; c = c->next) { + if(unlikely(!c->render)) continue; + + if(unlikely(!c->rd_tokens)) { + c->rd_tokens = rrddim_add(d->st_tokens, c->id, c->name?c->name:c->id, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else if(unlikely(c->name_updated)) + rrddim_set_name(d->st_tokens, c->rd_tokens, c->name); + + rrddim_set_by_pointer(d->st_tokens, c->rd_tokens, c->tokens); + } + rrdset_done(d->st_tokens); + } + + // -------------------------------------------------------------------- + // ctokens + + if(d->enabled_ctokens == CONFIG_BOOLEAN_YES || (d->enabled_ctokens == CONFIG_BOOLEAN_AUTO && ctokens_sum)) { + d->enabled_ctokens = CONFIG_BOOLEAN_YES; + + if(unlikely(!d->st_ctokens)) { + char id[RRD_ID_LENGTH_MAX + 1]; + char name[RRD_ID_LENGTH_MAX + 1]; + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_ctokens", d->id); + snprintfz(name, RRD_ID_LENGTH_MAX, "%s_ctokens", d->name?d->name:d->id); + + d->st_ctokens = rrdset_create_localhost( + RRD_TYPE_TC + , id + , name + , d->family ? d->family : d->id + , RRD_TYPE_TC ".qos_ctokens" + , "Class cTokens" + , "ctokens" + , PLUGIN_TC_NAME + , NULL + , NETDATA_CHART_PRIO_TC_QOS_CTOCKENS + , localhost->rrd_update_every + , RRDSET_TYPE_LINE + ); + } + else { + debug(D_TC_LOOP, "TC: Updating _ctokens chart for device '%s'", d->name?d->name:d->id); + rrdset_next(d->st_ctokens); + + if(unlikely(d->name_updated)) { + char name[RRD_ID_LENGTH_MAX + 1]; + snprintfz(name, RRD_ID_LENGTH_MAX, "%s_ctokens", d->name?d->name:d->id); + rrdset_set_name(d->st_ctokens, name); + } + + // TODO + // update the family + } + + for(c = d->classes ; c ; c = c->next) { + if(unlikely(!c->render)) continue; + + if(unlikely(!c->rd_ctokens)) + c->rd_ctokens = rrddim_add(d->st_ctokens, c->id, c->name?c->name:c->id, 1, 1, RRD_ALGORITHM_ABSOLUTE); + else if(unlikely(c->name_updated)) + rrddim_set_name(d->st_ctokens, c->rd_ctokens, c->name); + + rrddim_set_by_pointer(d->st_ctokens, c->rd_ctokens, c->ctokens); + } + rrdset_done(d->st_ctokens); + } + + tc_device_classes_cleanup(d); +} + +static inline void tc_device_set_class_name(struct tc_device *d, char *id, char *name) { + if(unlikely(!name || !*name)) return; + + struct tc_class *c = tc_class_index_find(d, id, 0); + if(likely(c)) { + if(likely(c->name)) { + if(!strcmp(c->name, name)) return; + freez(c->name); + c->name = NULL; + } + + if(likely(name && *name && strcmp(c->id, name) != 0)) { + debug(D_TC_LOOP, "TC: Setting device '%s', class '%s' name to '%s'", d->id, id, name); + c->name = strdupz(name); + c->name_updated = 1; + } + } +} + +static inline void tc_device_set_device_name(struct tc_device *d, char *name) { + if(unlikely(!name || !*name)) return; + + if(d->name) { + if(!strcmp(d->name, name)) return; + freez(d->name); + d->name = NULL; + } + + if(likely(name && *name && strcmp(d->id, name) != 0)) { + debug(D_TC_LOOP, "TC: Setting device '%s' name to '%s'", d->id, name); + d->name = strdupz(name); + d->name_updated = 1; + } +} + +static inline void tc_device_set_device_family(struct tc_device *d, char *family) { + freez(d->family); + d->family = NULL; + + if(likely(family && *family && strcmp(d->id, family) != 0)) { + debug(D_TC_LOOP, "TC: Setting device '%s' family to '%s'", d->id, family); + d->family = strdupz(family); + d->family_updated = 1; + } + // no need for null termination - it is already null +} + +static inline struct tc_device *tc_device_create(char *id) +{ + struct tc_device *d = tc_device_index_find(id, 0); + + if(!d) { + debug(D_TC_LOOP, "TC: Creating device '%s'", id); + + d = callocz(1, sizeof(struct tc_device)); + + d->id = strdupz(id); + d->hash = simple_hash(d->id); + d->enabled = (char)-1; + + avl_init(&d->classes_index, tc_class_compare); + if(unlikely(tc_device_index_add(d) != d)) + error("plugin_tc: INTERNAL ERROR: removing device '%s' removed a different device.", d->id); + + if(!tc_device_root) { + tc_device_root = d; + } + else { + d->next = tc_device_root; + tc_device_root->prev = d; + tc_device_root = d; + } + } + + return(d); +} + +static inline struct tc_class *tc_class_add(struct tc_device *n, char *id, char qdisc, char *parentid, char *leafid) +{ + struct tc_class *c = tc_class_index_find(n, id, 0); + + if(!c) { + debug(D_TC_LOOP, "TC: Creating in device '%s', class id '%s', parentid '%s', leafid '%s'", n->id, id, parentid?parentid:"", leafid?leafid:""); + + c = callocz(1, sizeof(struct tc_class)); + + if(unlikely(!n->classes)) + n->classes = c; + + else if(likely(n->last_class)) { + n->last_class->next = c; + c->prev = n->last_class; + } + + n->last_class = c; + + c->id = strdupz(id); + c->hash = simple_hash(c->id); + + c->isqdisc = qdisc; + if(parentid && *parentid) { + c->parentid = strdupz(parentid); + c->parent_hash = simple_hash(c->parentid); + } + + if(leafid && *leafid) { + c->leafid = strdupz(leafid); + c->leaf_hash = simple_hash(c->leafid); + } + + if(unlikely(tc_class_index_add(n, c) != c)) + error("plugin_tc: INTERNAL ERROR: attempt index class '%s' on device '%s': already exists", c->id, n->id); + } + return(c); +} + +static inline void tc_device_free(struct tc_device *n) +{ + if(n->next) n->next->prev = n->prev; + if(n->prev) n->prev->next = n->next; + if(tc_device_root == n) { + if(n->next) tc_device_root = n->next; + else tc_device_root = n->prev; + } + + if(unlikely(tc_device_index_del(n) != n)) + error("plugin_tc: INTERNAL ERROR: removing device '%s' removed a different device.", n->id); + + while(n->classes) tc_class_free(n, n->classes); + + freez(n->id); + freez(n->name); + freez(n->family); + freez(n); +} + +static inline void tc_device_free_all() +{ + while(tc_device_root) + tc_device_free(tc_device_root); +} + +#define PLUGINSD_MAX_WORDS 20 + +static inline int tc_space(char c) { + switch(c) { + case ' ': + case '\t': + case '\r': + case '\n': + return 1; + + default: + return 0; + } +} + +static inline void tc_split_words(char *str, char **words, int max_words) { + char *s = str; + int i = 0; + + // skip all white space + while(tc_space(*s)) s++; + + // store the first word + words[i++] = s; + + // while we have something + while(*s) { + // if it is a space + if(unlikely(tc_space(*s))) { + + // terminate the word + *s++ = '\0'; + + // skip all white space + while(tc_space(*s)) s++; + + // if we reached the end, stop + if(!*s) break; + + // store the next word + if(i < max_words) words[i++] = s; + else break; + } + else s++; + } + + // terminate the words + while(i < max_words) words[i++] = NULL; +} + +static pid_t tc_child_pid = 0; + +static void tc_main_cleanup(void *ptr) { + struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; + static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; + + info("cleaning up..."); + + if(tc_child_pid) { + info("TC: killing with SIGTERM tc-qos-helper process %d", tc_child_pid); + if(killpid(tc_child_pid, SIGTERM) != -1) { + siginfo_t info; + + info("TC: waiting for tc plugin child process pid %d to exit...", tc_child_pid); + waitid(P_PID, (id_t) tc_child_pid, &info, WEXITED); + // info("TC: finished tc plugin child process pid %d.", tc_child_pid); + } + + tc_child_pid = 0; + } + + static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; +} + +void *tc_main(void *ptr) { + netdata_thread_cleanup_push(tc_main_cleanup, ptr); + + struct rusage thread; + + char command[FILENAME_MAX + 1]; + char *words[PLUGINSD_MAX_WORDS] = { NULL }; + + uint32_t BEGIN_HASH = simple_hash("BEGIN"); + uint32_t END_HASH = simple_hash("END"); + uint32_t QDISC_HASH = simple_hash("qdisc"); + uint32_t CLASS_HASH = simple_hash("class"); + uint32_t SENT_HASH = simple_hash("Sent"); + uint32_t LENDED_HASH = simple_hash("lended:"); + uint32_t TOKENS_HASH = simple_hash("tokens:"); + uint32_t SETDEVICENAME_HASH = simple_hash("SETDEVICENAME"); + uint32_t SETDEVICEGROUP_HASH = simple_hash("SETDEVICEGROUP"); + uint32_t SETCLASSNAME_HASH = simple_hash("SETCLASSNAME"); + uint32_t WORKTIME_HASH = simple_hash("WORKTIME"); +#ifdef DETACH_PLUGINS_FROM_NETDATA + uint32_t MYPID_HASH = simple_hash("MYPID"); +#endif + uint32_t first_hash; + + snprintfz(command, TC_LINE_MAX, "%s/tc-qos-helper.sh", netdata_configured_plugins_dir); + char *tc_script = config_get("plugin:tc", "script to run to get tc values", command); + + while(!netdata_exit) { + FILE *fp; + struct tc_device *device = NULL; + struct tc_class *class = NULL; + + snprintfz(command, TC_LINE_MAX, "exec %s %d", tc_script, localhost->rrd_update_every); + debug(D_TC_LOOP, "executing '%s'", command); + + fp = mypopen(command, (pid_t *)&tc_child_pid); + if(unlikely(!fp)) { + error("TC: Cannot popen(\"%s\", \"r\").", command); + goto cleanup; + } + + char buffer[TC_LINE_MAX+1] = ""; + while(fgets(buffer, TC_LINE_MAX, fp) != NULL) { + if(unlikely(netdata_exit)) break; + + buffer[TC_LINE_MAX] = '\0'; + // debug(D_TC_LOOP, "TC: read '%s'", buffer); + + tc_split_words(buffer, words, PLUGINSD_MAX_WORDS); + + if(unlikely(!words[0] || !*words[0])) { + // debug(D_TC_LOOP, "empty line"); + continue; + } + // else debug(D_TC_LOOP, "First word is '%s'", words[0]); + + first_hash = simple_hash(words[0]); + + if(unlikely(device && ((first_hash == CLASS_HASH && strcmp(words[0], "class") == 0) || (first_hash == QDISC_HASH && strcmp(words[0], "qdisc") == 0)))) { + // debug(D_TC_LOOP, "CLASS line on class id='%s', parent='%s', parentid='%s', leaf='%s', leafid='%s'", words[2], words[3], words[4], words[5], words[6]); + + char *type = words[1]; // the class/qdisc type: htb, fq_codel, etc + char *id = words[2]; // the class/qdisc major:minor + char *parent = words[3]; // the word 'parent' or 'root' + char *parentid = words[4]; // parentid + char *leaf = words[5]; // the word 'leaf' + char *leafid = words[6]; // leafid + + int parent_is_root = 0; + int parent_is_parent = 0; + if(likely(parent)) { + parent_is_parent = !strcmp(parent, "parent"); + + if(!parent_is_parent) + parent_is_root = !strcmp(parent, "root"); + } + + if(likely(type && id && (parent_is_root || parent_is_parent))) { + char qdisc = 0; + + if(first_hash == QDISC_HASH) { + qdisc = 1; + + if(!strcmp(type, "ingress")) { + // we don't want to get the ingress qdisc + // there should be an IFB interface for this + + class = NULL; + continue; + } + + if(parent_is_parent && parentid) { + // eliminate the minor number from parentid + // why: parentid is the id of the parent class + // but major: is also the id of the parent qdisc + + char *s = parentid; + while(*s && *s != ':') s++; + if(*s == ':') s[1] = '\0'; + } + } + + if(parent_is_root) { + parentid = NULL; + leafid = NULL; + } + else if(!leaf || strcmp(leaf, "leaf") != 0) + leafid = NULL; + + char leafbuf[20 + 1] = ""; + if(leafid && leafid[strlen(leafid) - 1] == ':') { + strncpyz(leafbuf, leafid, 20 - 1); + strcat(leafbuf, "1"); + leafid = leafbuf; + } + + class = tc_class_add(device, id, qdisc, parentid, leafid); + } + else { + // clear the last class + class = NULL; + } + } + else if(unlikely(first_hash == END_HASH && strcmp(words[0], "END") == 0)) { + // debug(D_TC_LOOP, "END line"); + + if(likely(device)) { + netdata_thread_disable_cancelability(); + tc_device_commit(device); + // tc_device_free(device); + netdata_thread_enable_cancelability(); + } + + device = NULL; + class = NULL; + } + else if(unlikely(first_hash == BEGIN_HASH && strcmp(words[0], "BEGIN") == 0)) { + // debug(D_TC_LOOP, "BEGIN line on device '%s'", words[1]); + + if(likely(words[1] && *words[1])) { + device = tc_device_create(words[1]); + } + else { + // tc_device_free(device); + device = NULL; + } + + class = NULL; + } + else if(unlikely(device && class && first_hash == SENT_HASH && strcmp(words[0], "Sent") == 0)) { + // debug(D_TC_LOOP, "SENT line '%s'", words[1]); + if(likely(words[1] && *words[1])) { + class->bytes = str2ull(words[1]); + class->updated = 1; + } + else { + class->updated = 0; + } + + if(likely(words[3] && *words[3])) + class->packets = str2ull(words[3]); + + if(likely(words[6] && *words[6])) + class->dropped = str2ull(words[6]); + + if(likely(words[8] && *words[8])) + class->overlimits = str2ull(words[8]); + + if(likely(words[10] && *words[10])) + class->requeues = str2ull(words[8]); + } + else if(unlikely(device && class && class->updated && first_hash == LENDED_HASH && strcmp(words[0], "lended:") == 0)) { + // debug(D_TC_LOOP, "LENDED line '%s'", words[1]); + if(likely(words[1] && *words[1])) + class->lended = str2ull(words[1]); + + if(likely(words[3] && *words[3])) + class->borrowed = str2ull(words[3]); + + if(likely(words[5] && *words[5])) + class->giants = str2ull(words[5]); + } + else if(unlikely(device && class && class->updated && first_hash == TOKENS_HASH && strcmp(words[0], "tokens:") == 0)) { + // debug(D_TC_LOOP, "TOKENS line '%s'", words[1]); + if(likely(words[1] && *words[1])) + class->tokens = str2ull(words[1]); + + if(likely(words[3] && *words[3])) + class->ctokens = str2ull(words[3]); + } + else if(unlikely(device && first_hash == SETDEVICENAME_HASH && strcmp(words[0], "SETDEVICENAME") == 0)) { + // debug(D_TC_LOOP, "SETDEVICENAME line '%s'", words[1]); + if(likely(words[1] && *words[1])) + tc_device_set_device_name(device, words[1]); + } + else if(unlikely(device && first_hash == SETDEVICEGROUP_HASH && strcmp(words[0], "SETDEVICEGROUP") == 0)) { + // debug(D_TC_LOOP, "SETDEVICEGROUP line '%s'", words[1]); + if(likely(words[1] && *words[1])) + tc_device_set_device_family(device, words[1]); + } + else if(unlikely(device && first_hash == SETCLASSNAME_HASH && strcmp(words[0], "SETCLASSNAME") == 0)) { + // debug(D_TC_LOOP, "SETCLASSNAME line '%s' '%s'", words[1], words[2]); + char *id = words[1]; + char *path = words[2]; + if(likely(id && *id && path && *path)) + tc_device_set_class_name(device, id, path); + } + else if(unlikely(first_hash == WORKTIME_HASH && strcmp(words[0], "WORKTIME") == 0)) { + // debug(D_TC_LOOP, "WORKTIME line '%s' '%s'", words[1], words[2]); + getrusage(RUSAGE_THREAD, &thread); + + static RRDSET *stcpu = NULL; + static RRDDIM *rd_user = NULL, *rd_system = NULL; + + if(unlikely(!stcpu)) { + stcpu = rrdset_create_localhost( + "netdata" + , "plugin_tc_cpu" + , NULL + , "tc.helper" + , NULL + , "NetData TC CPU usage" + , "milliseconds/s" + , PLUGIN_TC_NAME + , NULL + , NETDATA_CHART_PRIO_NETDATA_TC_CPU + , localhost->rrd_update_every + , RRDSET_TYPE_STACKED + ); + rd_user = rrddim_add(stcpu, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); + rd_system = rrddim_add(stcpu, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); + } + else rrdset_next(stcpu); + + rrddim_set_by_pointer(stcpu, rd_user , thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec); + rrddim_set_by_pointer(stcpu, rd_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec); + rrdset_done(stcpu); + + static RRDSET *sttime = NULL; + static RRDDIM *rd_run_time = NULL; + + if(unlikely(!sttime)) { + sttime = rrdset_create_localhost( + "netdata" + , "plugin_tc_time" + , NULL + , "tc.helper" + , NULL + , "NetData TC script execution" + , "milliseconds/run" + , PLUGIN_TC_NAME + , NULL + , NETDATA_CHART_PRIO_NETDATA_TC_TIME + , localhost->rrd_update_every + , RRDSET_TYPE_AREA + ); + rd_run_time = rrddim_add(sttime, "run_time", "run time", 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(sttime); + + rrddim_set_by_pointer(sttime, rd_run_time, str2ll(words[1], NULL)); + rrdset_done(sttime); + + } +#ifdef DETACH_PLUGINS_FROM_NETDATA + else if(unlikely(first_hash == MYPID_HASH && (strcmp(words[0], "MYPID") == 0))) { + // debug(D_TC_LOOP, "MYPID line '%s'", words[1]); + char *id = words[1]; + pid_t pid = atol(id); + + if(likely(pid)) tc_child_pid = pid; + + debug(D_TC_LOOP, "TC: Child PID is %d.", tc_child_pid); + } +#endif + //else { + // debug(D_TC_LOOP, "IGNORED line"); + //} + } + + // fgets() failed or loop broke + int code = mypclose(fp, (pid_t)tc_child_pid); + tc_child_pid = 0; + + if(unlikely(device)) { + // tc_device_free(device); + device = NULL; + class = NULL; + } + + if(unlikely(netdata_exit)) { + tc_device_free_all(); + goto cleanup; + } + + if(code == 1 || code == 127) { + // 1 = DISABLE + // 127 = cannot even run it + error("TC: tc-qos-helper.sh exited with code %d. Disabling it.", code); + + tc_device_free_all(); + goto cleanup; + } + + sleep((unsigned int) localhost->rrd_update_every); + } + +cleanup: ; // added semi-colon to prevent older gcc error: label at end of compound statement + netdata_thread_cleanup_pop(1); + return NULL; +} diff --git a/collectors/tc.plugin/plugin_tc.h b/collectors/tc.plugin/plugin_tc.h new file mode 100644 index 000000000..c64658415 --- /dev/null +++ b/collectors/tc.plugin/plugin_tc.h @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_PLUGIN_TC_H +#define NETDATA_PLUGIN_TC_H 1 + +#include "../../daemon/common.h" + +#if (TARGET_OS == OS_LINUX) + +#define NETDATA_PLUGIN_HOOK_LINUX_TC \ + { \ + .name = "PLUGIN[tc]", \ + .config_section = CONFIG_SECTION_PLUGINS, \ + .config_name = "tc", \ + .enabled = 1, \ + .thread = NULL, \ + .init_routine = NULL, \ + .start_routine = tc_main \ + }, + +extern void *tc_main(void *ptr); + +#else // (TARGET_OS == OS_LINUX) + +#define NETDATA_PLUGIN_HOOK_LINUX_TC + +#endif // (TARGET_OS == OS_LINUX) + + +#endif /* NETDATA_PLUGIN_TC_H */ + diff --git a/collectors/tc.plugin/tc-qos-helper.sh b/collectors/tc.plugin/tc-qos-helper.sh new file mode 100644 index 000000000..b49d1f509 --- /dev/null +++ b/collectors/tc.plugin/tc-qos-helper.sh @@ -0,0 +1,315 @@ +#!/usr/bin/env bash + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2017 Costa Tsaousis <costa@tsaousis.gr> +# SPDX-License-Identifier: GPL-3.0-or-later +# +# This script is a helper to allow netdata collect tc data. +# tc output parsing has been implemented in C, inside netdata +# This script allows setting names to dimensions. + +export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin" +export LC_ALL=C + + +# ----------------------------------------------------------------------------- +# logging functions + +PROGRAM_FILE="$0" +PROGRAM_NAME="$(basename $0)" +PROGRAM_NAME="${PROGRAM_NAME/.plugin}" + +logdate() { + date "+%Y-%m-%d %H:%M:%S" +} + +log() { + local status="${1}" + shift + + echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" + +} + +warning() { + log WARNING "${@}" +} + +error() { + log ERROR "${@}" +} + +info() { + log INFO "${@}" +} + +fatal() { + log FATAL "${@}" + exit 1 +} + +debug=0 +debug() { + [ $debug -eq 1 ] && log DEBUG "${@}" +} + +# ----------------------------------------------------------------------------- +# find /var/run/fireqos + +# the default +fireqos_run_dir="/var/run/fireqos" + +function realdir { + local r="$1" + local t=$(readlink "$r") + + while [ "$t" ] + do + r=$(cd $(dirname "$r") && cd $(dirname "$t") && pwd -P)/$(basename "$t") + t=$(readlink "$r") + done + + dirname "$r" +} + +if [ ! -d "${fireqos_run_dir}" ] + then + + # the fireqos executable - we will use it to find its config + fireqos="$(which fireqos 2>/dev/null || command -v fireqos 2>/dev/null)" + + if [ ! -z "${fireqos}" ] + then + + fireqos_exec_dir="$(realdir ${fireqos})" + + if [ ! -z "${fireqos_exec_dir}" -a "${fireqos_exec_dir}" != "." -a -f "${fireqos_exec_dir}/install.config" ] + then + + LOCALSTATEDIR= + source "${fireqos_exec_dir}/install.config" + + if [ -d "${LOCALSTATEDIR}/run/fireqos" ] + then + fireqos_run_dir="${LOCALSTATEDIR}/run/fireqos" + else + warning "FireQoS is installed as '${fireqos}', its installation config at '${fireqos_exec_dir}/install.config' specifies local state data at '${LOCALSTATEDIR}/run/fireqos', but this directory is not found or is not readable (check the permissions of its parents)." + fi + else + warning "Although FireQoS is installed on this system as '${fireqos}', I cannot find/read its installation configuration at '${fireqos_exec_dir}/install.config'." + fi + else + warning "FireQoS is not installed on this system. Use FireQoS to apply traffic QoS and expose the class names to netdata. Check https://github.com/netdata/netdata/wiki/You-should-install-QoS-on-all-your-servers" + fi +fi + +# ----------------------------------------------------------------------------- + +[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")" +[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata" +[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d" + +plugins_dir="${NETDATA_PLUGINS_DIR}" +tc="$(which tc 2>/dev/null || command -v tc 2>/dev/null)" + + +# ----------------------------------------------------------------------------- +# user configuration + +# time in seconds to refresh QoS class/qdisc names +qos_get_class_names_every=120 + +# time in seconds to exit - netdata will restart the script +qos_exit_every=3600 + +# what to use? classes or qdiscs? +tc_show="qdisc" # can also be "class" + + +# ----------------------------------------------------------------------------- +# check if we have a valid number for interval + +t=${1} +update_every=$((t)) +[ $((update_every)) -lt 1 ] && update_every=${NETDATA_UPDATE_EVERY} +[ $((update_every)) -lt 1 ] && update_every=1 + + +# ----------------------------------------------------------------------------- +# allow the user to override our defaults + +for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/tc-qos-helper.conf" "${NETDATA_USER_CONFIG_DIR}/tc-qos-helper.conf" +do + if [ -f "${CONFIG}" ] + then + info "Loading config file '${CONFIG}'..." + source "${CONFIG}" + [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'." + else + warning "Cannot find file '${CONFIG}'." + fi +done + +case "${tc_show}" in + qdisc|class) + ;; + + *) + error "tc_show variable can be either 'qdisc' or 'class' but is set to '${tc_show}'. Assuming it is 'qdisc'." + tc_show="qdisc" + ;; +esac + + +# ----------------------------------------------------------------------------- +# default sleep function + +LOOPSLEEPMS_LASTWORK=0 +loopsleepms() { + sleep $1 +} + +# if found and included, this file overwrites loopsleepms() +# with a high resolution timer function for precise looping. +. "${plugins_dir}/loopsleepms.sh.inc" + + +# ----------------------------------------------------------------------------- +# final checks we can run + +if [ -z "${tc}" -o ! -x "${tc}" ] + then + fatal "cannot find command 'tc' in this system." +fi + +tc_devices= +fix_names= + +# ----------------------------------------------------------------------------- + +setclassname() { + if [ "${tc_show}" = "qdisc" ] + then + echo "SETCLASSNAME $4 $2" + else + echo "SETCLASSNAME $3 $2" + fi +} + +show_tc_cls() { + [ "${tc_show}" = "qdisc" ] && return 1 + + local x="${1}" + + if [ -f /etc/iproute2/tc_cls ] + then + local classid name rest + while read classid name rest + do + [ -z "${classid}" -o -z "${name}" -o "${classid}" = "#" -o "${name}" = "#" -o "${classid:0:1}" = "#" -o "${name:0:1}" = "#" ] && continue + setclassname "" "${name}" "${classid}" + done </etc/iproute2/tc_cls + return 0 + fi + return 1 +} + +show_fireqos_names() { + local x="${1}" name n interface_dev interface_classes interface_classes_monitor + + if [ -f "${fireqos_run_dir}/ifaces/${x}" ] + then + name="$(<"${fireqos_run_dir}/ifaces/${x}")" + echo "SETDEVICENAME ${name}" + + interface_dev= + interface_classes= + interface_classes_monitor= + source "${fireqos_run_dir}/${name}.conf" + for n in ${interface_classes_monitor} + do + setclassname ${n//|/ } + done + [ ! -z "${interface_dev}" ] && echo "SETDEVICEGROUP ${interface_dev}" + + return 0 + fi + + return 1 +} + +show_tc() { + local x="${1}" + + echo "BEGIN ${x}" + + # netdata can parse the output of tc + ${tc} -s ${tc_show} show dev ${x} + + # check FireQOS names for classes + if [ ! -z "${fix_names}" ] + then + show_fireqos_names "${x}" || show_tc_cls "${x}" + fi + + echo "END ${x}" +} + +find_tc_devices() { + local count=0 devs= dev rest l + + # find all the devices in the system + # without forking + while IFS=":| " read dev rest + do + count=$((count + 1)) + [ ${count} -le 2 ] && continue + devs="${devs} ${dev}" + done </proc/net/dev + + # from all the devices find the ones + # that have QoS defined + # unfortunately, one fork per device cannot be avoided + tc_devices= + for dev in ${devs} + do + l="$(${tc} class show dev ${dev} 2>/dev/null)" + [ ! -z "${l}" ] && tc_devices="${tc_devices} ${dev}" + done +} + +# update devices and class names +# once every 2 minutes +names_every=$((qos_get_class_names_every / update_every)) + +# exit this script every hour +# it will be restarted automatically +exit_after=$((qos_exit_every / update_every)) + +c=0 +gc=0 +while [ 1 ] +do + fix_names= + c=$((c + 1)) + gc=$((gc + 1)) + + if [ ${c} -le 1 -o ${c} -ge ${names_every} ] + then + c=1 + fix_names="YES" + find_tc_devices + fi + + for d in ${tc_devices} + do + show_tc ${d} + done + + echo "WORKTIME ${LOOPSLEEPMS_LASTWORK}" + + loopsleepms ${update_every} + + [ ${gc} -gt ${exit_after} ] && exit 0 +done diff --git a/collectors/tc.plugin/tc-qos-helper.sh.in b/collectors/tc.plugin/tc-qos-helper.sh.in new file mode 100755 index 000000000..6f6b0a591 --- /dev/null +++ b/collectors/tc.plugin/tc-qos-helper.sh.in @@ -0,0 +1,315 @@ +#!/usr/bin/env bash + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2017 Costa Tsaousis <costa@tsaousis.gr> +# SPDX-License-Identifier: GPL-3.0-or-later +# +# This script is a helper to allow netdata collect tc data. +# tc output parsing has been implemented in C, inside netdata +# This script allows setting names to dimensions. + +export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin" +export LC_ALL=C + + +# ----------------------------------------------------------------------------- +# logging functions + +PROGRAM_FILE="$0" +PROGRAM_NAME="$(basename $0)" +PROGRAM_NAME="${PROGRAM_NAME/.plugin}" + +logdate() { + date "+%Y-%m-%d %H:%M:%S" +} + +log() { + local status="${1}" + shift + + echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" + +} + +warning() { + log WARNING "${@}" +} + +error() { + log ERROR "${@}" +} + +info() { + log INFO "${@}" +} + +fatal() { + log FATAL "${@}" + exit 1 +} + +debug=0 +debug() { + [ $debug -eq 1 ] && log DEBUG "${@}" +} + +# ----------------------------------------------------------------------------- +# find /var/run/fireqos + +# the default +fireqos_run_dir="/var/run/fireqos" + +function realdir { + local r="$1" + local t=$(readlink "$r") + + while [ "$t" ] + do + r=$(cd $(dirname "$r") && cd $(dirname "$t") && pwd -P)/$(basename "$t") + t=$(readlink "$r") + done + + dirname "$r" +} + +if [ ! -d "${fireqos_run_dir}" ] + then + + # the fireqos executable - we will use it to find its config + fireqos="$(which fireqos 2>/dev/null || command -v fireqos 2>/dev/null)" + + if [ ! -z "${fireqos}" ] + then + + fireqos_exec_dir="$(realdir ${fireqos})" + + if [ ! -z "${fireqos_exec_dir}" -a "${fireqos_exec_dir}" != "." -a -f "${fireqos_exec_dir}/install.config" ] + then + + LOCALSTATEDIR= + source "${fireqos_exec_dir}/install.config" + + if [ -d "${LOCALSTATEDIR}/run/fireqos" ] + then + fireqos_run_dir="${LOCALSTATEDIR}/run/fireqos" + else + warning "FireQoS is installed as '${fireqos}', its installation config at '${fireqos_exec_dir}/install.config' specifies local state data at '${LOCALSTATEDIR}/run/fireqos', but this directory is not found or is not readable (check the permissions of its parents)." + fi + else + warning "Although FireQoS is installed on this system as '${fireqos}', I cannot find/read its installation configuration at '${fireqos_exec_dir}/install.config'." + fi + else + warning "FireQoS is not installed on this system. Use FireQoS to apply traffic QoS and expose the class names to netdata. Check https://github.com/netdata/netdata/wiki/You-should-install-QoS-on-all-your-servers" + fi +fi + +# ----------------------------------------------------------------------------- + +[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")" +[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@" +[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@" + +plugins_dir="${NETDATA_PLUGINS_DIR}" +tc="$(which tc 2>/dev/null || command -v tc 2>/dev/null)" + + +# ----------------------------------------------------------------------------- +# user configuration + +# time in seconds to refresh QoS class/qdisc names +qos_get_class_names_every=120 + +# time in seconds to exit - netdata will restart the script +qos_exit_every=3600 + +# what to use? classes or qdiscs? +tc_show="qdisc" # can also be "class" + + +# ----------------------------------------------------------------------------- +# check if we have a valid number for interval + +t=${1} +update_every=$((t)) +[ $((update_every)) -lt 1 ] && update_every=${NETDATA_UPDATE_EVERY} +[ $((update_every)) -lt 1 ] && update_every=1 + + +# ----------------------------------------------------------------------------- +# allow the user to override our defaults + +for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/tc-qos-helper.conf" "${NETDATA_USER_CONFIG_DIR}/tc-qos-helper.conf" +do + if [ -f "${CONFIG}" ] + then + info "Loading config file '${CONFIG}'..." + source "${CONFIG}" + [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'." + else + warning "Cannot find file '${CONFIG}'." + fi +done + +case "${tc_show}" in + qdisc|class) + ;; + + *) + error "tc_show variable can be either 'qdisc' or 'class' but is set to '${tc_show}'. Assuming it is 'qdisc'." + tc_show="qdisc" + ;; +esac + + +# ----------------------------------------------------------------------------- +# default sleep function + +LOOPSLEEPMS_LASTWORK=0 +loopsleepms() { + sleep $1 +} + +# if found and included, this file overwrites loopsleepms() +# with a high resolution timer function for precise looping. +. "${plugins_dir}/loopsleepms.sh.inc" + + +# ----------------------------------------------------------------------------- +# final checks we can run + +if [ -z "${tc}" -o ! -x "${tc}" ] + then + fatal "cannot find command 'tc' in this system." +fi + +tc_devices= +fix_names= + +# ----------------------------------------------------------------------------- + +setclassname() { + if [ "${tc_show}" = "qdisc" ] + then + echo "SETCLASSNAME $4 $2" + else + echo "SETCLASSNAME $3 $2" + fi +} + +show_tc_cls() { + [ "${tc_show}" = "qdisc" ] && return 1 + + local x="${1}" + + if [ -f /etc/iproute2/tc_cls ] + then + local classid name rest + while read classid name rest + do + [ -z "${classid}" -o -z "${name}" -o "${classid}" = "#" -o "${name}" = "#" -o "${classid:0:1}" = "#" -o "${name:0:1}" = "#" ] && continue + setclassname "" "${name}" "${classid}" + done </etc/iproute2/tc_cls + return 0 + fi + return 1 +} + +show_fireqos_names() { + local x="${1}" name n interface_dev interface_classes interface_classes_monitor + + if [ -f "${fireqos_run_dir}/ifaces/${x}" ] + then + name="$(<"${fireqos_run_dir}/ifaces/${x}")" + echo "SETDEVICENAME ${name}" + + interface_dev= + interface_classes= + interface_classes_monitor= + source "${fireqos_run_dir}/${name}.conf" + for n in ${interface_classes_monitor} + do + setclassname ${n//|/ } + done + [ ! -z "${interface_dev}" ] && echo "SETDEVICEGROUP ${interface_dev}" + + return 0 + fi + + return 1 +} + +show_tc() { + local x="${1}" + + echo "BEGIN ${x}" + + # netdata can parse the output of tc + ${tc} -s ${tc_show} show dev ${x} + + # check FireQOS names for classes + if [ ! -z "${fix_names}" ] + then + show_fireqos_names "${x}" || show_tc_cls "${x}" + fi + + echo "END ${x}" +} + +find_tc_devices() { + local count=0 devs= dev rest l + + # find all the devices in the system + # without forking + while IFS=":| " read dev rest + do + count=$((count + 1)) + [ ${count} -le 2 ] && continue + devs="${devs} ${dev}" + done </proc/net/dev + + # from all the devices find the ones + # that have QoS defined + # unfortunately, one fork per device cannot be avoided + tc_devices= + for dev in ${devs} + do + l="$(${tc} class show dev ${dev} 2>/dev/null)" + [ ! -z "${l}" ] && tc_devices="${tc_devices} ${dev}" + done +} + +# update devices and class names +# once every 2 minutes +names_every=$((qos_get_class_names_every / update_every)) + +# exit this script every hour +# it will be restarted automatically +exit_after=$((qos_exit_every / update_every)) + +c=0 +gc=0 +while [ 1 ] +do + fix_names= + c=$((c + 1)) + gc=$((gc + 1)) + + if [ ${c} -le 1 -o ${c} -ge ${names_every} ] + then + c=1 + fix_names="YES" + find_tc_devices + fi + + for d in ${tc_devices} + do + show_tc ${d} + done + + echo "WORKTIME ${LOOPSLEEPMS_LASTWORK}" + + loopsleepms ${update_every} + + [ ${gc} -gt ${exit_after} ] && exit 0 +done diff --git a/conf.d/Makefile.am b/conf.d/Makefile.am deleted file mode 100644 index d79bb5ab8..000000000 --- a/conf.d/Makefile.am +++ /dev/null @@ -1,160 +0,0 @@ -# -# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com> -# -MAINTAINERCLEANFILES= $(srcdir)/Makefile.in - -dist_config_DATA = \ - apps_groups.conf \ - charts.d.conf \ - fping.conf \ - node.d.conf \ - python.d.conf \ - health_alarm_notify.conf \ - health_email_recipients.conf \ - stream.conf \ - $(NULL) - -nodeconfigdir=$(configdir)/node.d -dist_nodeconfig_DATA = \ - node.d/README.md \ - node.d/fronius.conf.md \ - node.d/named.conf.md \ - node.d/sma_webbox.conf.md \ - node.d/snmp.conf.md \ - node.d/stiebeleltron.conf.md \ - $(NULL) - -pythonconfigdir=$(configdir)/python.d -dist_pythonconfig_DATA = \ - python.d/apache.conf \ - python.d/beanstalk.conf \ - python.d/bind_rndc.conf \ - python.d/ceph.conf \ - python.d/chrony.conf \ - python.d/couchdb.conf \ - python.d/cpufreq.conf \ - python.d/dns_query_time.conf \ - python.d/dnsdist.conf \ - python.d/dovecot.conf \ - python.d/elasticsearch.conf \ - python.d/example.conf \ - python.d/exim.conf \ - python.d/fail2ban.conf \ - python.d/freeradius.conf \ - python.d/go_expvar.conf \ - python.d/haproxy.conf \ - python.d/hddtemp.conf \ - python.d/httpcheck.conf \ - python.d/icecast.conf \ - python.d/ipfs.conf \ - python.d/isc_dhcpd.conf \ - python.d/mdstat.conf \ - python.d/memcached.conf \ - python.d/mongodb.conf \ - python.d/mysql.conf \ - python.d/nginx.conf \ - python.d/nginx_plus.conf \ - python.d/nsd.conf \ - python.d/ntpd.conf \ - python.d/ovpn_status_log.conf \ - python.d/phpfpm.conf \ - python.d/portcheck.conf \ - python.d/postfix.conf \ - python.d/postgres.conf \ - python.d/powerdns.conf \ - python.d/rabbitmq.conf \ - python.d/redis.conf \ - python.d/retroshare.conf \ - python.d/samba.conf \ - python.d/sensors.conf \ - python.d/springboot.conf \ - python.d/squid.conf \ - python.d/smartd_log.conf \ - python.d/tomcat.conf \ - python.d/traefik.conf \ - python.d/varnish.conf \ - python.d/web_log.conf \ - $(NULL) - -healthconfigdir=$(configdir)/health.d - -dist_healthconfig_DATA = \ - health.d/apache.conf \ - health.d/backend.conf \ - health.d/beanstalkd.conf \ - health.d/bind_rndc.conf \ - health.d/btrfs.conf \ - health.d/ceph.conf \ - health.d/cpu.conf \ - health.d/couchdb.conf \ - health.d/disks.conf \ - health.d/elasticsearch.conf \ - health.d/entropy.conf \ - health.d/fping.conf \ - health.d/fronius.conf \ - health.d/haproxy.conf \ - health.d/httpcheck.conf \ - health.d/ipc.conf \ - health.d/ipfs.conf \ - health.d/ipmi.conf \ - health.d/isc_dhcpd.conf \ - health.d/lighttpd.conf \ - health.d/mdstat.conf \ - health.d/memcached.conf \ - health.d/memory.conf \ - health.d/mongodb.conf \ - health.d/mysql.conf \ - health.d/named.conf \ - health.d/net.conf \ - health.d/netfilter.conf \ - health.d/nginx.conf \ - health.d/nginx_plus.conf \ - health.d/portcheck.conf \ - health.d/postgres.conf \ - health.d/qos.conf \ - health.d/ram.conf \ - health.d/redis.conf \ - health.d/retroshare.conf \ - health.d/softnet.conf \ - health.d/squid.conf \ - health.d/stiebeleltron.conf \ - health.d/swap.conf \ - health.d/tcp_conn.conf \ - health.d/tcp_listen.conf \ - health.d/tcp_mem.conf \ - health.d/tcp_orphans.conf \ - health.d/tcp_resets.conf \ - health.d/udp_errors.conf \ - health.d/varnish.conf \ - health.d/web_log.conf \ - health.d/zfs.conf \ - $(NULL) - -chartsconfigdir=$(configdir)/charts.d -dist_chartsconfig_DATA = \ - charts.d/apache.conf \ - charts.d/apcupsd.conf \ - charts.d/cpufreq.conf \ - charts.d/exim.conf \ - charts.d/libreswan.conf \ - charts.d/load_average.conf \ - charts.d/mysql.conf \ - charts.d/nut.conf \ - charts.d/phpfpm.conf \ - charts.d/sensors.conf \ - charts.d/tomcat.conf \ - charts.d/ap.conf \ - charts.d/cpu_apps.conf \ - charts.d/example.conf \ - charts.d/hddtemp.conf \ - charts.d/mem_apps.conf \ - charts.d/nginx.conf \ - charts.d/opensips.conf \ - charts.d/postfix.conf \ - charts.d/squid.conf \ - $(NULL) - -statsdconfigdir=$(configdir)/statsd.d -dist_statsdconfig_DATA = \ - statsd.d/example.conf \ - $(NULL) diff --git a/conf.d/Makefile.in b/conf.d/Makefile.in deleted file mode 100644 index 48ce51191..000000000 --- a/conf.d/Makefile.in +++ /dev/null @@ -1,788 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = conf.d -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_chartsconfig_DATA) $(dist_config_DATA) \ - $(dist_healthconfig_DATA) $(dist_nodeconfig_DATA) \ - $(dist_pythonconfig_DATA) $(dist_statsdconfig_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \ - $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \ - $(top_srcdir)/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \ - $(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(chartsconfigdir)" \ - "$(DESTDIR)$(configdir)" "$(DESTDIR)$(healthconfigdir)" \ - "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(pythonconfigdir)" \ - "$(DESTDIR)$(statsdconfigdir)" -DATA = $(dist_chartsconfig_DATA) $(dist_config_DATA) \ - $(dist_healthconfig_DATA) $(dist_nodeconfig_DATA) \ - $(dist_pythonconfig_DATA) $(dist_statsdconfig_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ - -# -# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com> -# -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_config_DATA = \ - apps_groups.conf \ - charts.d.conf \ - fping.conf \ - node.d.conf \ - python.d.conf \ - health_alarm_notify.conf \ - health_email_recipients.conf \ - stream.conf \ - $(NULL) - -nodeconfigdir = $(configdir)/node.d -dist_nodeconfig_DATA = \ - node.d/README.md \ - node.d/fronius.conf.md \ - node.d/named.conf.md \ - node.d/sma_webbox.conf.md \ - node.d/snmp.conf.md \ - node.d/stiebeleltron.conf.md \ - $(NULL) - -pythonconfigdir = $(configdir)/python.d -dist_pythonconfig_DATA = \ - python.d/apache.conf \ - python.d/beanstalk.conf \ - python.d/bind_rndc.conf \ - python.d/ceph.conf \ - python.d/chrony.conf \ - python.d/couchdb.conf \ - python.d/cpufreq.conf \ - python.d/dns_query_time.conf \ - python.d/dnsdist.conf \ - python.d/dovecot.conf \ - python.d/elasticsearch.conf \ - python.d/example.conf \ - python.d/exim.conf \ - python.d/fail2ban.conf \ - python.d/freeradius.conf \ - python.d/go_expvar.conf \ - python.d/haproxy.conf \ - python.d/hddtemp.conf \ - python.d/httpcheck.conf \ - python.d/icecast.conf \ - python.d/ipfs.conf \ - python.d/isc_dhcpd.conf \ - python.d/mdstat.conf \ - python.d/memcached.conf \ - python.d/mongodb.conf \ - python.d/mysql.conf \ - python.d/nginx.conf \ - python.d/nginx_plus.conf \ - python.d/nsd.conf \ - python.d/ntpd.conf \ - python.d/ovpn_status_log.conf \ - python.d/phpfpm.conf \ - python.d/portcheck.conf \ - python.d/postfix.conf \ - python.d/postgres.conf \ - python.d/powerdns.conf \ - python.d/rabbitmq.conf \ - python.d/redis.conf \ - python.d/retroshare.conf \ - python.d/samba.conf \ - python.d/sensors.conf \ - python.d/springboot.conf \ - python.d/squid.conf \ - python.d/smartd_log.conf \ - python.d/tomcat.conf \ - python.d/traefik.conf \ - python.d/varnish.conf \ - python.d/web_log.conf \ - $(NULL) - -healthconfigdir = $(configdir)/health.d -dist_healthconfig_DATA = \ - health.d/apache.conf \ - health.d/backend.conf \ - health.d/beanstalkd.conf \ - health.d/bind_rndc.conf \ - health.d/btrfs.conf \ - health.d/ceph.conf \ - health.d/cpu.conf \ - health.d/couchdb.conf \ - health.d/disks.conf \ - health.d/elasticsearch.conf \ - health.d/entropy.conf \ - health.d/fping.conf \ - health.d/fronius.conf \ - health.d/haproxy.conf \ - health.d/httpcheck.conf \ - health.d/ipc.conf \ - health.d/ipfs.conf \ - health.d/ipmi.conf \ - health.d/isc_dhcpd.conf \ - health.d/lighttpd.conf \ - health.d/mdstat.conf \ - health.d/memcached.conf \ - health.d/memory.conf \ - health.d/mongodb.conf \ - health.d/mysql.conf \ - health.d/named.conf \ - health.d/net.conf \ - health.d/netfilter.conf \ - health.d/nginx.conf \ - health.d/nginx_plus.conf \ - health.d/portcheck.conf \ - health.d/postgres.conf \ - health.d/qos.conf \ - health.d/ram.conf \ - health.d/redis.conf \ - health.d/retroshare.conf \ - health.d/softnet.conf \ - health.d/squid.conf \ - health.d/stiebeleltron.conf \ - health.d/swap.conf \ - health.d/tcp_conn.conf \ - health.d/tcp_listen.conf \ - health.d/tcp_mem.conf \ - health.d/tcp_orphans.conf \ - health.d/tcp_resets.conf \ - health.d/udp_errors.conf \ - health.d/varnish.conf \ - health.d/web_log.conf \ - health.d/zfs.conf \ - $(NULL) - -chartsconfigdir = $(configdir)/charts.d -dist_chartsconfig_DATA = \ - charts.d/apache.conf \ - charts.d/apcupsd.conf \ - charts.d/cpufreq.conf \ - charts.d/exim.conf \ - charts.d/libreswan.conf \ - charts.d/load_average.conf \ - charts.d/mysql.conf \ - charts.d/nut.conf \ - charts.d/phpfpm.conf \ - charts.d/sensors.conf \ - charts.d/tomcat.conf \ - charts.d/ap.conf \ - charts.d/cpu_apps.conf \ - charts.d/example.conf \ - charts.d/hddtemp.conf \ - charts.d/mem_apps.conf \ - charts.d/nginx.conf \ - charts.d/opensips.conf \ - charts.d/postfix.conf \ - charts.d/squid.conf \ - $(NULL) - -statsdconfigdir = $(configdir)/statsd.d -dist_statsdconfig_DATA = \ - statsd.d/example.conf \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu conf.d/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu conf.d/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -install-dist_chartsconfigDATA: $(dist_chartsconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_chartsconfig_DATA)'; test -n "$(chartsconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(chartsconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(chartsconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(chartsconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(chartsconfigdir)" || exit $$?; \ - done - -uninstall-dist_chartsconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_chartsconfig_DATA)'; test -n "$(chartsconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(chartsconfigdir)'; $(am__uninstall_files_from_dir) -install-dist_configDATA: $(dist_config_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_config_DATA)'; test -n "$(configdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(configdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(configdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(configdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(configdir)" || exit $$?; \ - done - -uninstall-dist_configDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_config_DATA)'; test -n "$(configdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(configdir)'; $(am__uninstall_files_from_dir) -install-dist_healthconfigDATA: $(dist_healthconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_healthconfig_DATA)'; test -n "$(healthconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(healthconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(healthconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(healthconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(healthconfigdir)" || exit $$?; \ - done - -uninstall-dist_healthconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_healthconfig_DATA)'; test -n "$(healthconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(healthconfigdir)'; $(am__uninstall_files_from_dir) -install-dist_nodeconfigDATA: $(dist_nodeconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_nodeconfig_DATA)'; test -n "$(nodeconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(nodeconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(nodeconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodeconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(nodeconfigdir)" || exit $$?; \ - done - -uninstall-dist_nodeconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_nodeconfig_DATA)'; test -n "$(nodeconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(nodeconfigdir)'; $(am__uninstall_files_from_dir) -install-dist_pythonconfigDATA: $(dist_pythonconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pythonconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pythonconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonconfigdir)" || exit $$?; \ - done - -uninstall-dist_pythonconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(pythonconfigdir)'; $(am__uninstall_files_from_dir) -install-dist_statsdconfigDATA: $(dist_statsdconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_statsdconfig_DATA)'; test -n "$(statsdconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(statsdconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(statsdconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(statsdconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(statsdconfigdir)" || exit $$?; \ - done - -uninstall-dist_statsdconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_statsdconfig_DATA)'; test -n "$(statsdconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(statsdconfigdir)'; $(am__uninstall_files_from_dir) -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: - for dir in "$(DESTDIR)$(chartsconfigdir)" "$(DESTDIR)$(configdir)" "$(DESTDIR)$(healthconfigdir)" "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(pythonconfigdir)" "$(DESTDIR)$(statsdconfigdir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: install-dist_chartsconfigDATA install-dist_configDATA \ - install-dist_healthconfigDATA install-dist_nodeconfigDATA \ - install-dist_pythonconfigDATA install-dist_statsdconfigDATA - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-dist_chartsconfigDATA \ - uninstall-dist_configDATA uninstall-dist_healthconfigDATA \ - uninstall-dist_nodeconfigDATA uninstall-dist_pythonconfigDATA \ - uninstall-dist_statsdconfigDATA - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dist_chartsconfigDATA \ - install-dist_configDATA install-dist_healthconfigDATA \ - install-dist_nodeconfigDATA install-dist_pythonconfigDATA \ - install-dist_statsdconfigDATA install-dvi install-dvi-am \ - install-exec install-exec-am install-html install-html-am \ - install-info install-info-am install-man install-pdf \ - install-pdf-am install-ps install-ps-am install-strip \ - installcheck installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am \ - uninstall-dist_chartsconfigDATA uninstall-dist_configDATA \ - uninstall-dist_healthconfigDATA uninstall-dist_nodeconfigDATA \ - uninstall-dist_pythonconfigDATA \ - uninstall-dist_statsdconfigDATA - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/conf.d/apps_groups.conf b/conf.d/apps_groups.conf deleted file mode 100644 index 4356e4910..000000000 --- a/conf.d/apps_groups.conf +++ /dev/null @@ -1,282 +0,0 @@ -# -# apps.plugin process grouping -# -# The apps.plugin displays charts with information about the processes running. -# This config allows grouping processes together, so that several processes -# will be reported as one. -# -# Only groups in this file are reported. All other processes will be reported -# as 'other'. -# -# For each process given, its whole process tree will be grouped, not just -# the process matched. The plugin will include both parents and childs. -# -# The format is: -# -# group: process1 process2 process3 ... -# -# Each group can be given multiple times, to add more processes to it. -# -# The process names are the ones returned by: -# -# - ps -e or /proc/PID/stat -# - in case of substring mode (see below): /proc/PID/cmdline -# -# To add process names with spaces, enclose them in quotes (single or double) -# example: 'Plex Media Serv' "my other process". -# -# Wildcard support: -# You can add an asterisk (*) at the beginning and/or the end of a process: -# -# *name suffix mode: will search for processes ending with 'name' -# (/proc/PID/stat) -# -# name* prefix mode: will search for processes beginning with 'name' -# (/proc/PID/stat) -# -# *name* substring mode: will search for 'name' in the whole command line -# (/proc/PID/cmdline) -# -# If you enter even just one *name* (substring), apps.plugin will process -# /proc/PID/cmdline for all processes, just once (when they are first seen). -# -# To add processes with single quotes, enclose them in double quotes -# example: "process with this ' single quote" -# -# To add processes with double quotes, enclose them in single quotes: -# example: 'process with this " double quote' -# -# If a group or process name starts with a -, the dimension will be hidden -# (cpu chart only). -# -# If a process starts with a +, debugging will be enabled for it -# (debugging produces a lot of output - do not enable it in production systems) -# -# You can add any number of groups you like. Only the ones found running will -# affect the charts generated. However, producing charts with hundreds of -# dimensions may slow down your web browser. -# -# The order of the entries in this list is important: the first that matches -# a process is used, so put important ones at the top. Processes not matched -# by any row, will inherit it from their parents or children. -# -# The order also controls the order of the dimensions on the generated charts -# (although applications started after apps.plugin is started, will be appended -# to the existing list of dimensions the netdata daemon maintains). - -# ----------------------------------------------------------------------------- -# NETDATA processes accounting - -# netdata main process -netdata: netdata - -# netdata known plugins -# plugins not defined here will be accumulated in netdata, above -apps.plugin: apps.plugin -freeipmi.plugin: freeipmi.plugin -charts.d.plugin: *charts.d.plugin* -node.d.plugin: *node.d.plugin* -python.d.plugin: *python.d.plugin* -tc-qos-helper: *tc-qos-helper.sh* -fping: fping - -# ----------------------------------------------------------------------------- -# authentication/authorization related servers - -auth: radius* openldap* ldap* -fail2ban: fail2ban* - -# ----------------------------------------------------------------------------- -# web/ftp servers - -httpd: apache* httpd nginx* lighttpd -proxy: squid* c-icap squidGuard varnish* -php: php* -ftpd: proftpd in.tftpd vsftpd -uwsgi: uwsgi -unicorn: *unicorn* -puma: *puma* - -# ----------------------------------------------------------------------------- -# database servers - -sql: mysqld* mariad* postgres* postmaster* oracle_* ora_* -nosql: mongod redis* memcached *couchdb* -timedb: prometheus *carbon-cache.py* *carbon-aggregator.py* *graphite/manage.py* *net.opentsdb.tools.TSDMain* - -# ----------------------------------------------------------------------------- -# email servers - -email: dovecot imapd pop3d amavis* master zmstat* zmmailboxdmgr qmgr oqmgr saslauthd opendkim clamd freshclam unbound tlsmgr postfwd2 postscreen postfix smtp* lmtp* - -# ----------------------------------------------------------------------------- -# network, routing, VPN - -ppp: ppp* -vpn: openvpn pptp* cjdroute gvpe tincd -wifi: hostapd wpa_supplicant -routing: ospfd* ospf6d* bgpd isisd ripd ripngd pimd ldpd zebra vtysh bird* - -# ----------------------------------------------------------------------------- -# high availability and balancers - -camo: *camo* -balancer: ipvs_* haproxy -ha: corosync hs_logd ha_logd stonithd pacemakerd lrmd crmd - -# ----------------------------------------------------------------------------- -# telephony - -pbx: asterisk safe_asterisk *vicidial* -sip: opensips* stund - -# ----------------------------------------------------------------------------- -# chat - -chat: irssi *vines* *prosody* murmurd - -# ----------------------------------------------------------------------------- -# monitoring - -logs: ulogd* syslog* rsyslog* logrotate systemd-journald rotatelogs -nms: snmpd vnstatd smokeping zabbix* monit munin* mon openhpid watchdog tailon nrpe -splunk: splunkd -azure: mdsd *waagent* *omiserver* *omiagent* hv_kvp_daemon hv_vss_daemon *auoms* *omsagent* - -# ----------------------------------------------------------------------------- -# storage, file systems and file servers - -ceph: ceph-mds ceph-mgr ceph-mon ceph-osd radosgw* rbd-* -samba: smbd nmbd winbindd -nfs: rpcbind rpc.* nfs* -zfs: spl_* z_* txg_* zil_* arc_* l2arc* -btrfs: btrfs* -iscsi: iscsid iscsi_eh - -# ----------------------------------------------------------------------------- -# containers & virtual machines - -containers: lxc* docker* -VMs: vbox* VBox* qemu* - -# ----------------------------------------------------------------------------- -# ssh servers and clients - -ssh: ssh* scp - -# ----------------------------------------------------------------------------- -# print servers and clients - -print: cups* lpd lpq - -# ----------------------------------------------------------------------------- -# time servers and clients - -time: ntp* systemd-timesyncd - -# ----------------------------------------------------------------------------- -# dhcp servers and clients - -dhcp: *dhcp* - -# ----------------------------------------------------------------------------- -# name servers and clients - -named: named rncd dig -dnsdist: dnsdist - -# ----------------------------------------------------------------------------- -# installation / compilation / debugging - -build: cc1 cc1plus as gcc* cppcheck ld make cmake automake autoconf autoreconf -build: git gdb valgrind* - -# ----------------------------------------------------------------------------- -# antivirus - -antivirus: clam* *clam - -# ----------------------------------------------------------------------------- -# torrent clients - -torrents: *deluge* transmission* *SickBeard* *CouchPotato* *rtorrent* - -# ----------------------------------------------------------------------------- -# backup servers and clients - -backup: rsync bacula* - -# ----------------------------------------------------------------------------- -# cron - -cron: cron* atd anacron systemd-cron* - -# ----------------------------------------------------------------------------- -# UPS - -ups: upsmon upsd */nut/* - -# ----------------------------------------------------------------------------- -# media players, servers, clients - -media: mplayer vlc xine mediatomb omxplayer* kodi* xbmc* mediacenter eventlircd -media: mpd minidlnad mt-daapd avahi* Plex* - -# ----------------------------------------------------------------------------- -# java applications - -hdfsdatanode: *org.apache.hadoop.hdfs.server.datanode.DataNode* -hdfsnamenode: *org.apache.hadoop.hdfs.server.namenode.NameNode* -hdfsjournalnode: *org.apache.hadoop.hdfs.qjournal.server.JournalNode* -hdfszkfc: *org.apache.hadoop.hdfs.tools.DFSZKFailoverController* - -yarnnode: *org.apache.hadoop.yarn.server.nodemanager.NodeManager* -yarnmgr: *org.apache.hadoop.yarn.server.resourcemanager.ResourceManager* -yarnproxy: *org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer* - -sparkworker: *org.apache.spark.deploy.worker.Worker* -sparkmaster: *org.apache.spark.deploy.master.Master* - -hbaseregion: *org.apache.hadoop.hbase.regionserver.HRegionServer* -hbaserest: *org.apache.hadoop.hbase.rest.RESTServer* -hbasethrift: *org.apache.hadoop.hbase.thrift.ThriftServer* -hbasemaster: *org.apache.hadoop.hbase.master.HMaster* - -zookeeper: *org.apache.zookeeper.server.quorum.QuorumPeerMain* - -hive2: *org.apache.hive.service.server.HiveServer2* -hivemetastore: *org.apache.hadoop.hive.metastore.HiveMetaStore* - -solr: *solr.install.dir* - -airflow: *airflow* - -# ----------------------------------------------------------------------------- -# X - -X: X Xorg xinit lightdm xdm pulseaudio gkrellm xfwm4 xfdesktop xfce* Thunar -X: xfsettingsd xfconfd gnome-* gdm gconf* dconf* xfconf* *gvfs gvfs* kdm slim -X: evolution-* firefox chromium opera vivaldi-bin epiphany WebKit* - -# ----------------------------------------------------------------------------- -# Kernel / System - -ksmd: ksmd - -system: systemd* udisks* udevd* *udevd connmand ipv6_addrconf dbus-* rtkit* -system: inetd xinetd mdadm polkitd acpid uuidd packagekitd upowerd colord -system: accounts-daemon rngd haveged - -kernel: kthreadd kauditd lockd khelper kdevtmpfs khungtaskd rpciod -kernel: fsnotify_mark kthrotld deferwq scsi_* - -# ----------------------------------------------------------------------------- -# other application servers - -kafka: *kafka.Kafka* - -rabbitmq: *rabbitmq* - -sidekiq: *sidekiq* -java: java -ipfs: ipfs diff --git a/conf.d/charts.d.conf b/conf.d/charts.d.conf deleted file mode 100644 index acb2a6fae..000000000 --- a/conf.d/charts.d.conf +++ /dev/null @@ -1,63 +0,0 @@ -# This is the configuration for charts.d.plugin - -# Each of its collectors can read configuration eiher from this file -# or a NAME.conf file (where NAME is the collector name). -# The collector specific file has higher precedence. - -# This file is a shell script too. - -# ----------------------------------------------------------------------------- - -# number of seconds to run without restart -# after this time, charts.d.plugin will exit -# netdata will restart it, but a small gap -# will appear in the charts.d.plugin charts. -#restart_timeout=$[3600 * 4] - -# when making iterations, charts.d can loop more frequently -# to prevent plugins missing iterations. -# this is a percentage relative to update_every to align its -# iterations. -# The minimum is 10%, the maximum 100%. -# So, if update_every is 1 second and time_divisor is 50, -# charts.d will iterate every 500ms. -# Charts will be called to collect data only if the time -# passed since the last time the collected data is equal or -# above their update_every. -#time_divisor=50 - -# ----------------------------------------------------------------------------- - -# the default enable/disable for all charts.d collectors -# the default is "yes" -# enable_all_charts="yes" - -# BY DEFAULT ENABLED MODULES -# ap=yes -# nut=yes -# opensips=yes - -# ----------------------------------------------------------------------------- -# THESE NEED TO BE SET TO "force" TO BE ENABLED - -# Nothing useful. -# Just an example charts.d plugin you can use as a template. -# example=force - -# OLD MODULES THAT ARE NOW SERVED BY python.d.plugin -# apache=force -# cpufreq=force -# exim=force -# hddtemp=force -# mysql=force -# nginx=force -# phpfpm=force -# postfix=force -# sensors=force -# squid=force -# tomcat=force - -# OLD MODULES THAT ARE NOW SERVED BY NETDATA DAEMON -# cpu_apps=force -# mem_apps=force -# load_average=force diff --git a/conf.d/charts.d/ap.conf b/conf.d/charts.d/ap.conf deleted file mode 100644 index 38fc157ce..000000000 --- a/conf.d/charts.d/ap.conf +++ /dev/null @@ -1,23 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# nothing fancy to configure. -# this module will run -# iw dev - to find wireless devices in AP mode -# iw ${dev} station dump - to get connected clients -# based on the above, it generates several charts - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#ap_update_every= - -# the charts priority on the dashboard -#ap_priority=6900 - -# the number of retries to do in case of failure -# before disabling the module -#ap_retries=10 diff --git a/conf.d/charts.d/apache.conf b/conf.d/charts.d/apache.conf deleted file mode 100644 index 50914cf32..000000000 --- a/conf.d/charts.d/apache.conf +++ /dev/null @@ -1,30 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -# the URL to download apache status info -#apache_url="http://127.0.0.1:80/server-status?auto" -#apache_curl_opts= - -# convert apache floating point values -# to integer using this multiplier -# this only affects precision - the values -# will be in the proper units -#apache_decimal_detail=1000000 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#apache_update_every= - -# the charts priority on the dashboard -#apache_priority=60000 - -# the number of retries to do in case of failure -# before disabling the module -#apache_retries=10 diff --git a/conf.d/charts.d/apcupsd.conf b/conf.d/charts.d/apcupsd.conf deleted file mode 100644 index 679c0d61b..000000000 --- a/conf.d/charts.d/apcupsd.conf +++ /dev/null @@ -1,25 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# add all your APC UPSes in this array - uncomment it too -#declare -A apcupsd_sources=( -# ["local"]="127.0.0.1:3551" -#) - -# how long to wait for apcupsd to respond -#apcupsd_timeout=3 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#apcupsd_update_every=10 - -# the charts priority on the dashboard -#apcupsd_priority=90000 - -# the number of retries to do in case of failure -# before disabling the module -#apcupsd_retries=10 diff --git a/conf.d/charts.d/cpu_apps.conf b/conf.d/charts.d/cpu_apps.conf deleted file mode 100644 index 850cd0c6f..000000000 --- a/conf.d/charts.d/cpu_apps.conf +++ /dev/null @@ -1,19 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# app.plugin can do better - -#cpu_apps_apps= - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#cpu_apps_update_every=2 - -# the number of retries to do in case of failure -# before disabling the module -#cpu_apps_retries=10 diff --git a/conf.d/charts.d/cpufreq.conf b/conf.d/charts.d/cpufreq.conf deleted file mode 100644 index 7130555af..000000000 --- a/conf.d/charts.d/cpufreq.conf +++ /dev/null @@ -1,24 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -#cpufreq_sys_dir="/sys/devices" -#cpufreq_sys_depth=10 -#cpufreq_source_update=1 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#cpufreq_update_every= - -# the charts priority on the dashboard -#cpufreq_priority=10000 - -# the number of retries to do in case of failure -# before disabling the module -#cpufreq_retries=10 diff --git a/conf.d/charts.d/example.conf b/conf.d/charts.d/example.conf deleted file mode 100644 index 6232ca584..000000000 --- a/conf.d/charts.d/example.conf +++ /dev/null @@ -1,21 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# to enable this chart, you have to set this to 12345 -# (just a demonstration for something that needs to be checked) -#example_magic_number=12345 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#example_update_every= - -# the charts priority on the dashboard -#example_priority=150000 - -# the number of retries to do in case of failure -# before disabling the module -#example_retries=10 diff --git a/conf.d/charts.d/exim.conf b/conf.d/charts.d/exim.conf deleted file mode 100644 index f96ac4dbb..000000000 --- a/conf.d/charts.d/exim.conf +++ /dev/null @@ -1,24 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -# the exim command to run -# if empty, it will use the one found in the system path -#exim_command= - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#exim_update_every=5 - -# the charts priority on the dashboard -#exim_priority=60000 - -# the number of retries to do in case of failure -# before disabling the module -#exim_retries=10 diff --git a/conf.d/charts.d/hddtemp.conf b/conf.d/charts.d/hddtemp.conf deleted file mode 100644 index b6037b40e..000000000 --- a/conf.d/charts.d/hddtemp.conf +++ /dev/null @@ -1,23 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -#hddtemp_host="localhost" -#hddtemp_port="7634" - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#hddtemp_update_every=3 - -# the charts priority on the dashboard -#hddtemp_priority=90000 - -# the number of retries to do in case of failure -# before disabling the module -#hddtemp_retries=10 diff --git a/conf.d/charts.d/libreswan.conf b/conf.d/charts.d/libreswan.conf deleted file mode 100644 index 9b3ee77b7..000000000 --- a/conf.d/charts.d/libreswan.conf +++ /dev/null @@ -1,29 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ -# - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#libreswan_update_every=1 - -# the charts priority on the dashboard -#libreswan_priority=90000 - -# the number of retries to do in case of failure -# before disabling the module -#libreswan_retries=10 - -# set to 1, to run ipsec with sudo (the default) -# set to 0, to run ipsec without sudo -#libreswan_sudo=1 - -# TO ALLOW NETDATA RUN ipsec AS ROOT -# CREATE THE FILE: /etc/sudoers.d/netdata -# WITH THESE 2 LINES (uncommented of course): -# -# netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status -# netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus diff --git a/conf.d/charts.d/load_average.conf b/conf.d/charts.d/load_average.conf deleted file mode 100644 index 68979275f..000000000 --- a/conf.d/charts.d/load_average.conf +++ /dev/null @@ -1,22 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# netdata can collect this metric already - -#load_average_enabled=0 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#load_average_update_every=5 - -# the charts priority on the dashboard -#load_average_priority=100 - -# the number of retries to do in case of failure -# before disabling the module -#load_average_retries=10 diff --git a/conf.d/charts.d/mem_apps.conf b/conf.d/charts.d/mem_apps.conf deleted file mode 100644 index 75d24dc3e..000000000 --- a/conf.d/charts.d/mem_apps.conf +++ /dev/null @@ -1,19 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# app.plugin can do better - -#mem_apps_apps= - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#mem_apps_update_every=2 - -# the number of retries to do in case of failure -# before disabling the module -#mem_apps_retries=10 diff --git a/conf.d/charts.d/mysql.conf b/conf.d/charts.d/mysql.conf deleted file mode 100644 index 683e4af35..000000000 --- a/conf.d/charts.d/mysql.conf +++ /dev/null @@ -1,23 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -#mysql_cmds[name]="" -#mysql_opts[name]="" - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#mysql_update_every=2 - -# the charts priority on the dashboard -#mysql_priority=60000 - -# the number of retries to do in case of failure -# before disabling the module -#mysql_retries=10 diff --git a/conf.d/charts.d/nginx.conf b/conf.d/charts.d/nginx.conf deleted file mode 100644 index c46100a58..000000000 --- a/conf.d/charts.d/nginx.conf +++ /dev/null @@ -1,23 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -#nginx_url="http://127.0.0.1:80/stub_status" -#nginx_curl_opts="" - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#nginx_update_every= - -# the charts priority on the dashboard -#nginx_priority=60000 - -# the number of retries to do in case of failure -# before disabling the module -#nginx_retries=10 diff --git a/conf.d/charts.d/nut.conf b/conf.d/charts.d/nut.conf deleted file mode 100644 index d477ddd34..000000000 --- a/conf.d/charts.d/nut.conf +++ /dev/null @@ -1,28 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# a space separated list of UPS names -# if empty, the list returned by 'upsc -l' will be used -#nut_ups= - -# how much time in seconds, to wait for nut to respond -#nut_timeout=2 - -# set this to 1, to enable another chart showing the number -# of UPS clients connected to upsd -#nut_clients_chart=1 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#nut_update_every=2 - -# the charts priority on the dashboard -#nut_priority=90000 - -# the number of retries to do in case of failure -# before disabling the module -#nut_retries=10 diff --git a/conf.d/charts.d/opensips.conf b/conf.d/charts.d/opensips.conf deleted file mode 100644 index e25111dce..000000000 --- a/conf.d/charts.d/opensips.conf +++ /dev/null @@ -1,21 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -#opensips_opts="fifo get_statistics all" -#opensips_cmd= -#opensips_timeout=2 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#opensips_update_every=5 - -# the charts priority on the dashboard -#opensips_priority=80000 - -# the number of retries to do in case of failure -# before disabling the module -#opensips_retries=10 diff --git a/conf.d/charts.d/phpfpm.conf b/conf.d/charts.d/phpfpm.conf deleted file mode 100644 index e4dd0231b..000000000 --- a/conf.d/charts.d/phpfpm.conf +++ /dev/null @@ -1,27 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -# first, you need open php-fpm status in php-fpm.conf -# second, you need add status location in nginx.conf -# you can see, https://easyengine.io/tutorials/php/fpm-status-page/ -#phpfpm_urls[name]="" -#phpfpm_curl_opts[name]="" - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#phpfpm_update_every= - -# the charts priority on the dashboard -#phpfpm_priority=60000 - -# the number of retries to do in case of failure -# before disabling the module -#phpfpm_retries=10 - diff --git a/conf.d/charts.d/postfix.conf b/conf.d/charts.d/postfix.conf deleted file mode 100644 index b77817bd6..000000000 --- a/conf.d/charts.d/postfix.conf +++ /dev/null @@ -1,25 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -# the postqueue command -# if empty, it will use the one found in the system path -#postfix_postqueue= - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#postfix_update_every=15 - -# the charts priority on the dashboard -#postfix_priority=60000 - -# the number of retries to do in case of failure -# before disabling the module -#postfix_retries=10 - diff --git a/conf.d/charts.d/sensors.conf b/conf.d/charts.d/sensors.conf deleted file mode 100644 index bcb28807d..000000000 --- a/conf.d/charts.d/sensors.conf +++ /dev/null @@ -1,32 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -# the directory the kernel keeps sensor data -#sensors_sys_dir="/sys/devices" - -# how deep in the tree to check for sensor data -#sensors_sys_depth=10 - -# if set to 1, the script will overwrite internal -# script functions with code generated ones -# leave to 1, is faster -#sensors_source_update=1 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#sensors_update_every= - -# the charts priority on the dashboard -#sensors_priority=90000 - -# the number of retries to do in case of failure -# before disabling the module -#sensors_retries=10 - diff --git a/conf.d/charts.d/squid.conf b/conf.d/charts.d/squid.conf deleted file mode 100644 index 19e928f25..000000000 --- a/conf.d/charts.d/squid.conf +++ /dev/null @@ -1,26 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -#squid_host= -#squid_port= -#squid_url= -#squid_timeout=2 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#squid_update_every=2 - -# the charts priority on the dashboard -#squid_priority=60000 - -# the number of retries to do in case of failure -# before disabling the module -#squid_retries=10 - diff --git a/conf.d/charts.d/tomcat.conf b/conf.d/charts.d/tomcat.conf deleted file mode 100644 index e9f3eefa9..000000000 --- a/conf.d/charts.d/tomcat.conf +++ /dev/null @@ -1,38 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -# the URL to download tomcat status info -# usually http://localhost:8080/manager/status?XML=true -#tomcat_url="" -#tomcat_curl_opts="" - -# set tomcat username/password here -#tomcat_user="" -#tomcat_password="" - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#tomcat_update_every=1 - -# the charts priority on the dashboard -#tomcat_priority=60000 - -# the number of retries to do in case of failure -# before disabling the module -#tomcat_retries=10 - -# convert tomcat floating point values -# to integer using this multiplier -# this only affects precision - the values -# will be in the proper units -#tomcat_decimal_detail=1000000 - -# used by volume chart to convert bytes to KB -#tomcat_decimal_KB_detail=1000 diff --git a/conf.d/fping.conf b/conf.d/fping.conf deleted file mode 100644 index 63a7f7acd..000000000 --- a/conf.d/fping.conf +++ /dev/null @@ -1,44 +0,0 @@ -# no need for shebang - this file is sourced from fping.plugin - -# fping.plugin requires a recent version of fping. -# -# You can get it on your system, by running: -# -# /usr/libexec/netdata/plugins.d/fping.plugin install - -# ----------------------------------------------------------------------------- -# configuration options - -# The fping binary to use. We need one that can output netdata friendly info -# (supporting: -N). If you have multiple versions, put here the full filename -# of the right one - -#fping="/usr/local/bin/fping" - - -# a space separated list of hosts to fping -# we suggest to put names here and the IPs of these names in /etc/hosts - -hosts="" - - -# The update frequency of the chart - the default is inherited from netdata - -#update_every=2 - - -# The time in milliseconds (1 sec = 1000 ms) to ping the hosts -# by default 5 pings per host per iteration -# fping will not allow this to be below 20ms - -#ping_every="200" - - -# other fping options - defaults: -# -R = send packets with random data -# -b 56 = the number of bytes per packet -# -i 1 = 1 ms when sending packets to others hosts (switching hosts) -# -r 0 = never retry packets -# -t 5000 = per packet timeout at 5000 ms - -#fping_opts="-R -b 56 -i 1 -r 0 -t 5000" diff --git a/conf.d/health.d/apache.conf b/conf.d/health.d/apache.conf deleted file mode 100644 index 0c98b8778..000000000 --- a/conf.d/health.d/apache.conf +++ /dev/null @@ -1,14 +0,0 @@ - -# make sure apache is running - -template: apache_last_collected_secs - on: apache.requests - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - diff --git a/conf.d/health.d/backend.conf b/conf.d/health.d/backend.conf deleted file mode 100644 index 7af100d8f..000000000 --- a/conf.d/health.d/backend.conf +++ /dev/null @@ -1,45 +0,0 @@ - -# make sure we are sending data to backend - - alarm: backend_last_buffering - on: netdata.backend_metrics - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful buffering of backend data - to: dba - - alarm: backend_metrics_sent - on: netdata.backend_metrics - units: % - calc: abs($sent) * 100 / abs($buffered) - every: 10s - warn: $this != 100 - delay: down 5m multiplier 1.5 max 1h - info: percentage of metrics sent to the backend server - to: dba - - alarm: backend_metrics_lost - on: netdata.backend_metrics - units: metrics - calc: abs($lost) - every: 10s - crit: ($this != 0) || ($status == $CRITICAL && abs($sent) == 0) - delay: down 5m multiplier 1.5 max 1h - info: number of metrics lost due to repeating failures to contact the backend server - to: dba - -# this chart has been removed from netdata -# alarm: backend_slow -# on: netdata.backend_latency -# units: % -# calc: $latency * 100 / ($update_every * 1000) -# every: 10s -# warn: $this > 50 -# crit: $this > 100 -# delay: down 5m multiplier 1.5 max 1h -# info: the percentage of time between iterations needed by the backend time to process the data sent by netdata -# to: dba diff --git a/conf.d/health.d/beanstalkd.conf b/conf.d/health.d/beanstalkd.conf deleted file mode 100644 index 30dc27328..000000000 --- a/conf.d/health.d/beanstalkd.conf +++ /dev/null @@ -1,36 +0,0 @@ -# get the number of buried jobs in all queues - -template: server_buried_jobs - on: beanstalk.current_jobs - calc: $buried - units: jobs - every: 10s - warn: $this > 0 - crit: $this > 10 - delay: up 0 down 5m multiplier 1.2 max 1h - info: the number of buried jobs aggregated across all tubes - to: sysadmin - -# get the number of buried jobs per queue - -#template: tube_buried_jobs -# on: beanstalk.jobs -# calc: $buried -# units: jobs -# every: 10s -# warn: $this > 0 -# crit: $this > 10 -# delay: up 0 down 5m multiplier 1.2 max 1h -# info: the number of jobs buried per tube -# to: sysadmin - -# get the current number of tubes - -#template: number_of_tubes -# on: beanstalk.current_tubes -# calc: $tubes -# every: 10s -# warn: $this < 5 -# delay: up 0 down 5m multiplier 1.2 max 1h -# info: the current number of tubes on the server -# to: sysadmin diff --git a/conf.d/health.d/bind_rndc.conf b/conf.d/health.d/bind_rndc.conf deleted file mode 100644 index 4145e77cd..000000000 --- a/conf.d/health.d/bind_rndc.conf +++ /dev/null @@ -1,9 +0,0 @@ - template: bind_rndc_stats_file_size - on: bind_rndc.stats_size - units: megabytes - every: 60 - calc: $stats_size - warn: $this > 512 - crit: $this > 1024 - info: Bind stats file is very large! Consider to create logrotate conf file for it! - to: sysadmin diff --git a/conf.d/health.d/btrfs.conf b/conf.d/health.d/btrfs.conf deleted file mode 100644 index b27aa544f..000000000 --- a/conf.d/health.d/btrfs.conf +++ /dev/null @@ -1,57 +0,0 @@ - -template: btrfs_allocated - on: btrfs.disk - os: * - hosts: * -families: * - calc: 100 - ($unallocated * 100 / ($unallocated + $data_used + $data_free + $meta_used + $meta_free + $sys_used + $sys_free)) - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (90) : (95)) - crit: $this > (($status == $CRITICAL) ? (95) : (98)) - delay: up 1m down 15m multiplier 1.5 max 1h - info: the percentage of allocated BTRFS physical disk space - to: sysadmin - -template: btrfs_data - on: btrfs.data - os: * - hosts: * -families: * - calc: $used * 100 / ($used + $free) - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (90) : (95)) && $btrfs_allocated > 98 - crit: $this > (($status == $CRITICAL) ? (95) : (98)) && $btrfs_allocated > 98 - delay: up 1m down 15m multiplier 1.5 max 1h - info: the percentage of used BTRFS data space - to: sysadmin - -template: btrfs_metadata - on: btrfs.metadata - os: * - hosts: * -families: * - calc: ($used + $reserved) * 100 / ($used + $free + $reserved) - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (90) : (95)) && $btrfs_allocated > 98 - crit: $this > (($status == $CRITICAL) ? (95) : (98)) && $btrfs_allocated > 98 - delay: up 1m down 15m multiplier 1.5 max 1h - info: the percentage of used BTRFS metadata space - to: sysadmin - -template: btrfs_system - on: btrfs.system - os: * - hosts: * -families: * - calc: $used * 100 / ($used + $free) - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (90) : (95)) && $btrfs_allocated > 98 - crit: $this > (($status == $CRITICAL) ? (95) : (98)) && $btrfs_allocated > 98 - delay: up 1m down 15m multiplier 1.5 max 1h - info: the percentage of used BTRFS system space - to: sysadmin - diff --git a/conf.d/health.d/ceph.conf b/conf.d/health.d/ceph.conf deleted file mode 100644 index de16f7b6f..000000000 --- a/conf.d/health.d/ceph.conf +++ /dev/null @@ -1,13 +0,0 @@ -# low ceph disk available - -template: cluster_space_usage - on: ceph.general_usage - calc: $avail * 100 / ($avail + $used) - units: % - every: 10s - warn: $this < 10 - crit: $this < 1 - delay: down 5m multiplier 1.2 max 1h - info: ceph disk usage is almost full - to: sysadmin - diff --git a/conf.d/health.d/couchdb.conf b/conf.d/health.d/couchdb.conf deleted file mode 100644 index 4a2895280..000000000 --- a/conf.d/health.d/couchdb.conf +++ /dev/null @@ -1,13 +0,0 @@ - -# make sure couchdb is running - -template: couchdb_last_collected_secs - on: couchdb.request_methods - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: dba diff --git a/conf.d/health.d/cpu.conf b/conf.d/health.d/cpu.conf deleted file mode 100644 index fa8189856..000000000 --- a/conf.d/health.d/cpu.conf +++ /dev/null @@ -1,55 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - -template: 10min_cpu_usage - on: system.cpu - os: linux - hosts: * - lookup: average -10m unaligned of user,system,softirq,irq,guest - units: % - every: 1m - warn: $this > (($status >= $WARNING) ? (75) : (85)) - crit: $this > (($status == $CRITICAL) ? (85) : (95)) - delay: down 15m multiplier 1.5 max 1h - info: average cpu utilization for the last 10 minutes (excluding iowait, nice and steal) - to: sysadmin - -template: 10min_cpu_iowait - on: system.cpu - os: linux - hosts: * - lookup: average -10m unaligned of iowait - units: % - every: 1m - warn: $this > (($status >= $WARNING) ? (20) : (40)) - crit: $this > (($status == $CRITICAL) ? (40) : (50)) - delay: down 15m multiplier 1.5 max 1h - info: average CPU wait I/O for the last 10 minutes - to: sysadmin - -template: 20min_steal_cpu - on: system.cpu - os: linux - hosts: * - lookup: average -20m unaligned of steal - units: % - every: 5m - warn: $this > (($status >= $WARNING) ? (5) : (10)) - crit: $this > (($status == $CRITICAL) ? (20) : (30)) - delay: down 1h multiplier 1.5 max 2h - info: average CPU steal time for the last 20 minutes - to: sysadmin - -## FreeBSD -template: 10min_cpu_usage - on: system.cpu - os: freebsd - hosts: * - lookup: average -10m unaligned of user,system,interrupt - units: % - every: 1m - warn: $this > (($status >= $WARNING) ? (75) : (85)) - crit: $this > (($status == $CRITICAL) ? (85) : (95)) - delay: down 15m multiplier 1.5 max 1h - info: average cpu utilization for the last 10 minutes (excluding nice) - to: sysadmin diff --git a/conf.d/health.d/disks.conf b/conf.d/health.d/disks.conf deleted file mode 100644 index 26f85848a..000000000 --- a/conf.d/health.d/disks.conf +++ /dev/null @@ -1,167 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - - -# ----------------------------------------------------------------------------- -# low disk space - -# checking the latest collected values -# raise an alarm if the disk is low on -# available disk space - -template: disk_space_usage - on: disk.space - os: linux freebsd - hosts: * -families: * - calc: $used * 100 / ($avail + $used) - units: % - every: 1m - warn: $this > (($status >= $WARNING ) ? (80) : (90)) - crit: $this > (($status == $CRITICAL) ? (90) : (98)) - delay: up 1m down 15m multiplier 1.5 max 1h - info: current disk space usage - to: sysadmin - -template: disk_inode_usage - on: disk.inodes - os: linux freebsd - hosts: * -families: * - calc: $used * 100 / ($avail + $used) - units: % - every: 1m - warn: $this > (($status >= $WARNING) ? (80) : (90)) - crit: $this > (($status == $CRITICAL) ? (90) : (98)) - delay: up 1m down 15m multiplier 1.5 max 1h - info: current disk inode usage - to: sysadmin - - -# ----------------------------------------------------------------------------- -# disk fill rate - -# calculate the rate the disk fills -# use as base, the available space change -# during the last hour - -# this is just a calculation - it has no alarm -# we will use it in the next template to find -# the hours remaining - -template: disk_fill_rate - on: disk.space - os: linux freebsd - hosts: * -families: * - lookup: min -10m at -50m unaligned of avail - calc: ($this - $avail) / (($now - $after) / 3600) - every: 1m - units: GB/hour - info: average rate the disk fills up (positive), or frees up (negative) space, for the last hour - - -# calculate the hours remaining -# if the disk continues to fill -# in this rate - -template: out_of_disk_space_time - on: disk.space - os: linux freebsd - hosts: * -families: * - calc: ($disk_fill_rate > 0) ? ($avail / $disk_fill_rate) : (inf) - units: hours - every: 10s - warn: $this > 0 and $this < (($status >= $WARNING) ? (48) : (8)) - crit: $this > 0 and $this < (($status == $CRITICAL) ? (24) : (2)) - delay: down 15m multiplier 1.2 max 1h - info: estimated time the disk will run out of space, if the system continues to add data with the rate of the last hour - to: sysadmin - - -# ----------------------------------------------------------------------------- -# disk inode fill rate - -# calculate the rate the disk inodes are allocated -# use as base, the available inodes change -# during the last hour - -# this is just a calculation - it has no alarm -# we will use it in the next template to find -# the hours remaining - -template: disk_inode_rate - on: disk.inodes - os: linux freebsd - hosts: * -families: * - lookup: min -10m at -50m unaligned of avail - calc: ($this - $avail) / (($now - $after) / 3600) - every: 1m - units: inodes/hour - info: average rate at which disk inodes are allocated (positive), or freed (negative), for the last hour - -# calculate the hours remaining -# if the disk inodes are allocated -# in this rate - -template: out_of_disk_inodes_time - on: disk.inodes - os: linux freebsd - hosts: * -families: * - calc: ($disk_inode_rate > 0) ? ($avail / $disk_inode_rate) : (inf) - units: hours - every: 10s - warn: $this > 0 and $this < (($status >= $WARNING) ? (48) : (8)) - crit: $this > 0 and $this < (($status == $CRITICAL) ? (24) : (2)) - delay: down 15m multiplier 1.2 max 1h - info: estimated time the disk will run out of inodes, if the system continues to allocate inodes with the rate of the last hour - to: sysadmin - - -# ----------------------------------------------------------------------------- -# disk congestion - -# raise an alarm if the disk is congested -# by calculating the average disk utilization -# for the last 10 minutes - -template: 10min_disk_utilization - on: disk.util - os: linux freebsd - hosts: * -families: * - lookup: average -10m unaligned - units: % - every: 1m - green: 90 - red: 98 - warn: $this > $green * (($status >= $WARNING) ? (0.7) : (1)) - crit: $this > $red * (($status == $CRITICAL) ? (0.7) : (1)) - delay: down 15m multiplier 1.2 max 1h - info: the percentage of time the disk was busy, during the last 10 minutes - to: sysadmin - - -# raise an alarm if the disk backlog -# is above 1000ms (1s) per second -# for 10 minutes -# (i.e. the disk cannot catch up) - -template: 10min_disk_backlog - on: disk.backlog - os: linux - hosts: * -families: * - lookup: average -10m unaligned - units: ms - every: 1m - green: 2000 - red: 5000 - warn: $this > $green * (($status >= $WARNING) ? (0.7) : (1)) - crit: $this > $red * (($status == $CRITICAL) ? (0.7) : (1)) - delay: down 15m multiplier 1.2 max 1h - info: average of the kernel estimated disk backlog, for the last 10 minutes - to: sysadmin diff --git a/conf.d/health.d/elasticsearch.conf b/conf.d/health.d/elasticsearch.conf deleted file mode 100644 index dffd40965..000000000 --- a/conf.d/health.d/elasticsearch.conf +++ /dev/null @@ -1,9 +0,0 @@ - alarm: elasticsearch_last_collected - on: elasticsearch_local.cluster_health_status - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - info: number of seconds since the last successful data collection - to: sysadmin diff --git a/conf.d/health.d/entropy.conf b/conf.d/health.d/entropy.conf deleted file mode 100644 index 66d44ec13..000000000 --- a/conf.d/health.d/entropy.conf +++ /dev/null @@ -1,16 +0,0 @@ - -# check if entropy is too low -# the alarm is checked every 1 minute -# and examines the last hour of data - - alarm: lowest_entropy - on: system.entropy - os: linux - hosts: * - lookup: min -10m unaligned - units: entries - every: 5m - warn: $this < (($status >= $WARNING) ? (200) : (100)) - delay: down 1h multiplier 1.5 max 2h - info: minimum entries in the random numbers pool in the last 10 minutes - to: silent diff --git a/conf.d/health.d/fping.conf b/conf.d/health.d/fping.conf deleted file mode 100644 index 43658fef6..000000000 --- a/conf.d/health.d/fping.conf +++ /dev/null @@ -1,53 +0,0 @@ - -template: fping_last_collected_secs -families: * - on: fping.latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin - -template: host_reachable -families: * - on: fping.latency - calc: $average != nan - units: up/down - every: 10s - crit: $this == 0 - info: states if the remote host is reachable - delay: down 30m multiplier 1.5 max 2h - to: sysadmin - -template: host_latency -families: * - on: fping.latency - lookup: average -10s unaligned of average - units: ms - every: 10s - green: 500 - red: 1000 - warn: $this > $green OR $max > $red - crit: $this > $red - info: average round trip delay during the last 10 seconds - delay: down 30m multiplier 1.5 max 2h - to: sysadmin - -template: packet_loss -families: * - on: fping.quality - lookup: average -10m unaligned of returned - calc: 100 - $this - green: 1 - red: 10 - units: % - every: 10s - warn: $this > $green - crit: $this > $red - info: packet loss percentage - delay: down 30m multiplier 1.5 max 2h - to: sysadmin - diff --git a/conf.d/health.d/fronius.conf b/conf.d/health.d/fronius.conf deleted file mode 100644 index cdf6c8fcb..000000000 --- a/conf.d/health.d/fronius.conf +++ /dev/null @@ -1,11 +0,0 @@ -template: fronius_last_collected_secs -families: * - on: fronius.power - calc: $now - $last_collected_t - every: 10s - units: seconds ago - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sitemgr diff --git a/conf.d/health.d/haproxy.conf b/conf.d/health.d/haproxy.conf deleted file mode 100644 index e49c70d48..000000000 --- a/conf.d/health.d/haproxy.conf +++ /dev/null @@ -1,27 +0,0 @@ -template: haproxy_backend_server_status - on: haproxy_hs.down - units: failed servers - every: 10s - lookup: average -10s - crit: $this > 0 - info: number of failed haproxy backend servers - to: sysadmin - -template: haproxy_backend_status - on: haproxy_hb.down - units: failed backend - every: 10s - lookup: average -10s - crit: $this > 0 - info: number of failed haproxy backends - to: sysadmin - -template: haproxy_last_collected - on: haproxy_hb.down - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - info: number of seconds since the last successful data collection - to: sysadmin diff --git a/conf.d/health.d/httpcheck.conf b/conf.d/health.d/httpcheck.conf deleted file mode 100644 index 0ddf35eab..000000000 --- a/conf.d/health.d/httpcheck.conf +++ /dev/null @@ -1,99 +0,0 @@ -template: httpcheck_last_collected_secs -families: * - on: httpcheck.status - calc: $now - $last_collected_t - every: 10s - units: seconds ago - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin - -# This is a fast-reacting no-notification alarm ideal for custom dashboards or badges -template: web_service_up -families: * - on: httpcheck.status - lookup: average -1m unaligned percentage of success - calc: ($this < 75) ? (0) : ($this) - every: 5s - units: up/down - info: at least 75% verified responses during last 60 seconds, ideal for badges - to: silent - -template: web_service_bad_content -families: * - on: httpcheck.status - lookup: average -5m unaligned percentage of bad_content - every: 10s - units: % - warn: $this >= 10 AND $this < 40 - crit: $this >= 40 - delay: down 5m multiplier 1.5 max 1h - info: average of unexpected http response content during the last 5 minutes - options: no-clear-notification - to: webmaster - -template: web_service_bad_status -families: * - on: httpcheck.status - lookup: average -5m unaligned percentage of bad_status - every: 10s - units: % - warn: $this >= 10 AND $this < 40 - crit: $this >= 40 - delay: down 5m multiplier 1.5 max 1h - info: average of unexpected http status during the last 5 minutes - options: no-clear-notification - to: webmaster - -template: web_service_timeouts -families: * - on: httpcheck.status - lookup: average -5m unaligned percentage of timeout - every: 10s - units: % - info: average of timeouts during the last 5 minutes - -template: no_web_service_connections -families: * - on: httpcheck.status - lookup: average -5m unaligned percentage of no_connection - every: 10s - units: % - info: average of failed requests during the last 5 minutes - -# combined timeout & no connection alarm -template: web_service_unreachable -families: * - on: httpcheck.status - calc: ($no_web_service_connections >= $web_service_timeouts) ? ($no_web_service_connections) : ($web_service_timeouts) - units: % - every: 10s - warn: ($no_web_service_connections >= 10 OR $web_service_timeouts >= 10) AND ($no_web_service_connections < 40 OR $web_service_timeouts < 40) - crit: $no_web_service_connections >= 40 OR $web_service_timeouts >= 40 - delay: down 5m multiplier 1.5 max 1h - info: average of failed requests either due to timeouts or no connection during the last 5 minutes - options: no-clear-notification - to: webmaster - -template: 1h_web_service_response_time -families: * - on: httpcheck.responsetime - lookup: average -1h unaligned of time - every: 30s - units: ms - info: average response time over the last hour - -template: web_service_slow -families: * - on: httpcheck.responsetime - lookup: average -3m unaligned of time - units: ms - every: 10s - warn: ($this > ($1h_web_service_response_time * 2) ) - crit: ($this > ($1h_web_service_response_time * 3) ) - info: average response time over the last 3 minutes, compared to the average over the last hour - delay: down 5m multiplier 1.5 max 1h - options: no-clear-notification - to: webmaster diff --git a/conf.d/health.d/ipc.conf b/conf.d/health.d/ipc.conf deleted file mode 100644 index 03cf264d8..000000000 --- a/conf.d/health.d/ipc.conf +++ /dev/null @@ -1,28 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - - alarm: semaphores_used - on: system.ipc_semaphores - os: linux - hosts: * - calc: $semaphores * 100 / $ipc.semaphores.max - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (70) : (80)) - crit: $this > (($status == $CRITICAL) ? (70) : (90)) - delay: down 5m multiplier 1.5 max 1h - info: the percentage of IPC semaphores used - to: sysadmin - - alarm: semaphore_arrays_used - on: system.ipc_semaphore_arrays - os: linux - hosts: * - calc: $arrays * 100 / $ipc.semaphores.arrays.max - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (70) : (80)) - crit: $this > (($status == $CRITICAL) ? (70) : (90)) - delay: down 5m multiplier 1.5 max 1h - info: the percentage of IPC semaphore arrays used - to: sysadmin diff --git a/conf.d/health.d/ipfs.conf b/conf.d/health.d/ipfs.conf deleted file mode 100644 index 3f77572d6..000000000 --- a/conf.d/health.d/ipfs.conf +++ /dev/null @@ -1,11 +0,0 @@ - -template: ipfs_datastore_usage - on: ipfs.repo_size - calc: $size * 100 / $avail - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (80) : (90)) - crit: $this > (($status == $CRITICAL) ? (90) : (98)) - delay: down 15m multiplier 1.5 max 1h - info: ipfs Datastore close to running out of space - to: sysadmin diff --git a/conf.d/health.d/ipmi.conf b/conf.d/health.d/ipmi.conf deleted file mode 100644 index c25581964..000000000 --- a/conf.d/health.d/ipmi.conf +++ /dev/null @@ -1,20 +0,0 @@ - alarm: ipmi_sensors_states - on: ipmi.sensors_states - calc: $warning + $critical - units: sensors - every: 10s - warn: $this > 0 - crit: $critical > 0 - delay: up 5m down 15m multiplier 1.5 max 1h - info: the number IPMI sensors in non-nominal state - to: sysadmin - - alarm: ipmi_events - on: ipmi.events - calc: $events - units: events - every: 10s - warn: $this > 0 - delay: up 5m down 15m multiplier 1.5 max 1h - info: the number of events in the IPMI System Event Log (SEL) - to: sysadmin diff --git a/conf.d/health.d/isc_dhcpd.conf b/conf.d/health.d/isc_dhcpd.conf deleted file mode 100644 index 8054656ff..000000000 --- a/conf.d/health.d/isc_dhcpd.conf +++ /dev/null @@ -1,10 +0,0 @@ - template: isc_dhcpd_leases_size - on: isc_dhcpd.leases_total - units: KB - every: 60 - calc: $leases_size - warn: $this > 3072 - crit: $this > 6144 - delay: up 2m down 5m - info: dhcpd.leases file too big! Module can slow down your server. - to: sysadmin diff --git a/conf.d/health.d/lighttpd.conf b/conf.d/health.d/lighttpd.conf deleted file mode 100644 index 915907a4a..000000000 --- a/conf.d/health.d/lighttpd.conf +++ /dev/null @@ -1,14 +0,0 @@ - -# make sure lighttpd is running - -template: lighttpd_last_collected_secs - on: lighttpd.requests - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - diff --git a/conf.d/health.d/mdstat.conf b/conf.d/health.d/mdstat.conf deleted file mode 100644 index c9e7d20db..000000000 --- a/conf.d/health.d/mdstat.conf +++ /dev/null @@ -1,18 +0,0 @@ -template: mdstat_disks - on: md.disks - units: failed devices - every: 10s - calc: $total - $inuse - crit: $this > 0 - info: Array is degraded! - to: sysadmin - -template: mdstat_last_collected - on: md.disks - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - info: number of seconds since the last successful data collection - to: sysadmin diff --git a/conf.d/health.d/memcached.conf b/conf.d/health.d/memcached.conf deleted file mode 100644 index d248ef57a..000000000 --- a/conf.d/health.d/memcached.conf +++ /dev/null @@ -1,52 +0,0 @@ - -# make sure memcached is running - -template: memcached_last_collected_secs - on: memcached.cache - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: dba - - -# detect if memcached cache is full - -template: memcached_cache_memory_usage - on: memcached.cache - calc: $used * 100 / ($used + $available) - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (70) : (80)) - crit: $this > (($status == $CRITICAL) ? (80) : (90)) - delay: up 0 down 15m multiplier 1.5 max 1h - info: current cache memory usage - to: dba - - -# find the rate memcached cache is filling - -template: cache_fill_rate - on: memcached.cache - lookup: min -10m at -50m unaligned of available - calc: ($this - $available) / (($now - $after) / 3600) - units: KB/hour - every: 1m - info: average rate the cache fills up (positive), or frees up (negative) space, for the last hour - - -# find the hours remaining until memcached cache is full - -template: out_of_cache_space_time - on: memcached.cache - calc: ($cache_fill_rate > 0) ? ($available / $cache_fill_rate) : (inf) - units: hours - every: 10s - warn: $this > 0 and $this < (($status >= $WARNING) ? (48) : (8)) - crit: $this > 0 and $this < (($status == $CRITICAL) ? (24) : (2)) - delay: down 15m multiplier 1.5 max 1h - info: estimated time the cache will run out of space, if the system continues to add data with the rate of the last hour - to: dba diff --git a/conf.d/health.d/memory.conf b/conf.d/health.d/memory.conf deleted file mode 100644 index 4a0e6e522..000000000 --- a/conf.d/health.d/memory.conf +++ /dev/null @@ -1,38 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - - alarm: 1hour_ecc_memory_correctable - on: mem.ecc_ce - os: linux - hosts: * - lookup: sum -10m unaligned - units: errors - every: 1m - warn: $this > 0 - delay: down 1h multiplier 1.5 max 1h - info: number of ECC correctable errors during the last hour - to: sysadmin - - alarm: 1hour_ecc_memory_uncorrectable - on: mem.ecc_ue - os: linux - hosts: * - lookup: sum -10m unaligned - units: errors - every: 1m - crit: $this > 0 - delay: down 1h multiplier 1.5 max 1h - info: number of ECC uncorrectable errors during the last hour - to: sysadmin - - alarm: 1hour_memory_hw_corrupted - on: mem.hwcorrupt - os: linux - hosts: * - calc: $HardwareCorrupted - units: MB - every: 10s - warn: $this > 0 - delay: down 1h multiplier 1.5 max 1h - info: amount of memory corrupted due to a hardware failure - to: sysadmin diff --git a/conf.d/health.d/mongodb.conf b/conf.d/health.d/mongodb.conf deleted file mode 100644 index a80cb3112..000000000 --- a/conf.d/health.d/mongodb.conf +++ /dev/null @@ -1,13 +0,0 @@ - -# make sure mongodb is running - -template: mongodb_last_collected_secs - on: mongodb.read_operations - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: dba diff --git a/conf.d/health.d/mysql.conf b/conf.d/health.d/mysql.conf deleted file mode 100644 index 1eeb993f0..000000000 --- a/conf.d/health.d/mysql.conf +++ /dev/null @@ -1,85 +0,0 @@ - -# make sure mysql is running - -template: mysql_last_collected_secs - on: mysql.queries - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: dba - - -# ----------------------------------------------------------------------------- -# slow queries - -template: mysql_10s_slow_queries - on: mysql.queries - lookup: sum -10s of slow_queries - units: slow queries - every: 10s - warn: $this > (($status >= $WARNING) ? (5) : (10)) - crit: $this > (($status == $CRITICAL) ? (10) : (20)) - delay: down 5m multiplier 1.5 max 1h - info: number of mysql slow queries over the last 10 seconds - to: dba - - -# ----------------------------------------------------------------------------- -# lock waits - -template: mysql_10s_table_locks_immediate - on: mysql.table_locks - lookup: sum -10s absolute of immediate - units: immediate locks - every: 10s - info: number of table immediate locks over the last 10 seconds - to: dba - -template: mysql_10s_table_locks_waited - on: mysql.table_locks - lookup: sum -10s absolute of waited - units: waited locks - every: 10s - info: number of table waited locks over the last 10 seconds - to: dba - -template: mysql_10s_waited_locks_ratio - on: mysql.table_locks - calc: ( ($mysql_10s_table_locks_waited + $mysql_10s_table_locks_immediate) > 0 ) ? (($mysql_10s_table_locks_waited * 100) / ($mysql_10s_table_locks_waited + $mysql_10s_table_locks_immediate)) : 0 - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (10) : (25)) - crit: $this > (($status == $CRITICAL) ? (25) : (50)) - delay: down 30m multiplier 1.5 max 1h - info: the ratio of mysql waited table locks, for the last 10 seconds - to: dba - - -# ----------------------------------------------------------------------------- -# replication - -template: mysql_replication - on: mysql.slave_status - calc: ($sql_running == -1 OR $io_running == -1)?0:1 - units: ok/failed - every: 10s - crit: $this == 0 - delay: down 5m multiplier 1.5 max 1h - info: checks if mysql replication has stopped - to: dba - -template: mysql_replication_lag - on: mysql.slave_behind - calc: $seconds - units: seconds - every: 10s - warn: $this > (($status >= $WARNING) ? (5) : (10)) - crit: $this > (($status == $CRITICAL) ? (10) : (30)) - delay: down 15m multiplier 1.5 max 1h - info: the number of seconds mysql replication is behind this master - to: dba - diff --git a/conf.d/health.d/named.conf b/conf.d/health.d/named.conf deleted file mode 100644 index 4fc65c8ee..000000000 --- a/conf.d/health.d/named.conf +++ /dev/null @@ -1,14 +0,0 @@ - -# make sure named is running - -template: named_last_collected_secs - on: named.global_queries - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: domainadmin - diff --git a/conf.d/health.d/net.conf b/conf.d/health.d/net.conf deleted file mode 100644 index 22a88927d..000000000 --- a/conf.d/health.d/net.conf +++ /dev/null @@ -1,122 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - -# ----------------------------------------------------------------------------- -# dropped packets - -# check if an interface is dropping packets -# the alarm is checked every 1 minute -# and examines the last 10 minutes of data - -template: inbound_packets_dropped - on: net.drops - os: linux - hosts: * -families: * - lookup: sum -10m unaligned absolute of inbound - units: packets - every: 1m - warn: $this >= 5 - delay: down 1h multiplier 1.5 max 2h - info: interface inbound dropped packets in the last 10 minutes - to: sysadmin - -template: outbound_packets_dropped - on: net.drops - os: linux - hosts: * -families: * - lookup: sum -10m unaligned absolute of outbound - units: packets - every: 1m - warn: $this >= 5 - delay: down 1h multiplier 1.5 max 2h - info: interface outbound dropped packets in the last 10 minutes - to: sysadmin - -template: inbound_packets_dropped_ratio - on: net.packets - os: linux - hosts: * -families: * - lookup: sum -10m unaligned absolute of received - calc: (($inbound_packets_dropped != nan AND $this > 0) ? ($inbound_packets_dropped * 100 / $this) : (0)) - units: % - every: 1m - warn: $this >= 0.1 - crit: $this >= 2 - delay: down 1h multiplier 1.5 max 2h - info: the ratio of inbound dropped packets vs the total number of received packets of the network interface, during the last 10 minutes - to: sysadmin - -template: outbound_packets_dropped_ratio - on: net.packets - os: linux - hosts: * -families: * - lookup: sum -10m unaligned absolute of sent - calc: (($outbound_packets_dropped != nan AND $this > 0) ? ($outbound_packets_dropped * 100 / $this) : (0)) - units: % - every: 1m - warn: $this >= 0.1 - crit: $this >= 2 - delay: down 1h multiplier 1.5 max 2h - info: the ratio of outbound dropped packets vs the total number of sent packets of the network interface, during the last 10 minutes - to: sysadmin - - -# ----------------------------------------------------------------------------- -# FIFO errors - -# check if an interface is having FIFO -# buffer errors -# the alarm is checked every 1 minute -# and examines the last 10 minutes of data - -template: 10min_fifo_errors - on: net.fifo - os: linux - hosts: * -families: * - lookup: sum -10m unaligned absolute - units: errors - every: 1m - warn: $this > 0 - delay: down 1h multiplier 1.5 max 2h - info: interface fifo errors in the last 10 minutes - to: sysadmin - - -# ----------------------------------------------------------------------------- -# check for packet storms - -# 1. calculate the rate packets are received in 1m: 1m_received_packets_rate -# 2. do the same for the last 10s -# 3. raise an alarm if the later is 10x or 20x the first -# we assume the minimum packet storm should at least have -# 10000 packets/s, average of the last 10 seconds - -template: 1m_received_packets_rate - on: net.packets - os: linux freebsd - hosts: * -families: * - lookup: average -1m of received - units: packets - every: 10s - info: the average number of packets received during the last minute - -template: 10s_received_packets_storm - on: net.packets - os: linux freebsd - hosts: * -families: * - lookup: average -10s of received - calc: $this * 100 / (($1m_received_packets_rate < 1000)?(1000):($1m_received_packets_rate)) - every: 10s - units: % - warn: $this > (($status >= $WARNING)?(200):(5000)) - crit: $this > (($status >= $WARNING)?(5000):(6000)) -options: no-clear-notification - info: the % of the rate of received packets in the last 10 seconds, compared to the rate of the last minute (clear notification for this alarm will not be sent) - to: sysadmin diff --git a/conf.d/health.d/netfilter.conf b/conf.d/health.d/netfilter.conf deleted file mode 100644 index fa1732b33..000000000 --- a/conf.d/health.d/netfilter.conf +++ /dev/null @@ -1,29 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - - alarm: netfilter_last_collected_secs - on: netfilter.conntrack_sockets - os: linux - hosts: * - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin - - alarm: netfilter_conntrack_full - on: netfilter.conntrack_sockets - os: linux - hosts: * - lookup: max -10s unaligned of connections - calc: $this * 100 / $netfilter.conntrack.max - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (70) : (80)) - crit: $this > (($status == $CRITICAL) ? (80) : (90)) - delay: down 5m multiplier 1.5 max 1h - info: the number of connections tracked by the netfilter connection tracker, as a percentage of the connection tracker table size - to: sysadmin diff --git a/conf.d/health.d/nginx.conf b/conf.d/health.d/nginx.conf deleted file mode 100644 index a686c3d99..000000000 --- a/conf.d/health.d/nginx.conf +++ /dev/null @@ -1,14 +0,0 @@ - -# make sure nginx is running - -template: nginx_last_collected_secs - on: nginx.requests - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - diff --git a/conf.d/health.d/nginx_plus.conf b/conf.d/health.d/nginx_plus.conf deleted file mode 100644 index 5a171a76d..000000000 --- a/conf.d/health.d/nginx_plus.conf +++ /dev/null @@ -1,14 +0,0 @@ - -# make sure nginx_plus is running - -template: nginx_plus_last_collected_secs - on: nginx_plus.requests_total - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - diff --git a/conf.d/health.d/portcheck.conf b/conf.d/health.d/portcheck.conf deleted file mode 100644 index f42b63d30..000000000 --- a/conf.d/health.d/portcheck.conf +++ /dev/null @@ -1,48 +0,0 @@ -template: portcheck_last_collected_secs -families: * - on: portcheck.status - calc: $now - $last_collected_t - every: 10s - units: seconds ago - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin - -# This is a fast-reacting no-notification alarm ideal for custom dashboards or badges -template: service_reachable -families: * - on: portcheck.status - lookup: average -1m unaligned percentage of success - calc: ($this < 75) ? (0) : ($this) - every: 5s - units: up/down - info: at least 75% successful connections during last 60 seconds, ideal for badges - to: silent - -template: connection_timeouts -families: * - on: portcheck.status - lookup: average -5m unaligned percentage of timeout - every: 10s - units: % - warn: $this >= 10 AND $this < 40 - crit: $this >= 40 - delay: down 5m multiplier 1.5 max 1h - info: average of timeouts during the last 5 minutes - options: no-clear-notification - to: sysadmin - -template: connection_fails -families: * - on: portcheck.status - lookup: average -5m unaligned percentage of no_connection - every: 10s - units: % - warn: $this >= 10 AND $this < 40 - crit: $this >= 40 - delay: down 5m multiplier 1.5 max 1h - info: average of failed connections during the last 5 minutes - options: no-clear-notification - to: sysadmin diff --git a/conf.d/health.d/postgres.conf b/conf.d/health.d/postgres.conf deleted file mode 100644 index 4e0583b85..000000000 --- a/conf.d/health.d/postgres.conf +++ /dev/null @@ -1,13 +0,0 @@ - -# make sure postgres is running - -template: postgres_last_collected_secs - on: postgres.db_stat_transactions - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: dba diff --git a/conf.d/health.d/qos.conf b/conf.d/health.d/qos.conf deleted file mode 100644 index 7290d15ff..000000000 --- a/conf.d/health.d/qos.conf +++ /dev/null @@ -1,18 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - -# check if a QoS class is dropping packets -# the alarm is checked every 10 seconds -# and examines the last minute of data - -#template: 10min_qos_packet_drops -# on: tc.qos_dropped -# os: linux -# hosts: * -# lookup: sum -10m unaligned absolute -# every: 30s -# warn: $this > 0 -# delay: up 0 down 30m multiplier 1.5 max 1h -# units: packets -# info: dropped packets in the last 30 minutes -# to: sysadmin diff --git a/conf.d/health.d/ram.conf b/conf.d/health.d/ram.conf deleted file mode 100644 index b6dc5f945..000000000 --- a/conf.d/health.d/ram.conf +++ /dev/null @@ -1,64 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - - alarm: used_ram_to_ignore - on: system.ram - os: linux - hosts: * - calc: ($zfs.arc_size.arcsz = nan)?(0):($zfs.arc_size.arcsz) - every: 10s - info: the amount of memory that is reported as used, but it is actually capable for resizing itself based on the system needs (eg. ZFS ARC) - - alarm: ram_in_use - on: system.ram - os: linux - hosts: * -# calc: $used * 100 / ($used + $cached + $free) - calc: ($used - $used_ram_to_ignore) * 100 / ($used - $used_ram_to_ignore + $cached + $free) - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (80) : (90)) - crit: $this > (($status == $CRITICAL) ? (90) : (98)) - delay: down 15m multiplier 1.5 max 1h - info: system RAM used - to: sysadmin - - alarm: ram_available - on: mem.available - os: linux - hosts: * - calc: ($avail + $used_ram_to_ignore) * 100 / ($system.ram.used + $system.ram.cached + $system.ram.free + $system.ram.buffers) - units: % - every: 10s - warn: $this < (($status >= $WARNING) ? ( 5) : (10)) - crit: $this < (($status == $CRITICAL) ? (10) : ( 5)) - delay: down 15m multiplier 1.5 max 1h - info: estimated amount of RAM available for userspace processes, without causing swapping - to: sysadmin - -## FreeBSD -alarm: ram_in_use - on: system.ram - os: freebsd -hosts: * - calc: (($active + $wired) - $used_ram_to_ignore) * 100 / (($active + $wired) - $used_ram_to_ignore + $cached + $free) -units: % -every: 10s - warn: $this > (($status >= $WARNING) ? (80) : (90)) - crit: $this > (($status == $CRITICAL) ? (90) : (98)) -delay: down 15m multiplier 1.5 max 1h - info: system RAM usage - to: sysadmin - - alarm: ram_available - on: system.ram - os: freebsd - hosts: * - calc: ($free + $inactive + $used_ram_to_ignore) * 100 / ($free + $active + $inactive + $wired + $cache + $buffers) - units: % - every: 10s - warn: $this < (($status >= $WARNING) ? ( 5) : (10)) - crit: $this < (($status == $CRITICAL) ? (10) : ( 5)) - delay: down 15m multiplier 1.5 max 1h - info: estimated amount of RAM available for userspace processes, without causing swapping - to: sysadmin diff --git a/conf.d/health.d/redis.conf b/conf.d/health.d/redis.conf deleted file mode 100644 index c08a884a6..000000000 --- a/conf.d/health.d/redis.conf +++ /dev/null @@ -1,34 +0,0 @@ - -# make sure redis is running - -template: redis_last_collected_secs - on: redis.operations - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: dba - -template: redis_bgsave_broken -families: * - on: redis.bgsave_health - every: 10s - crit: $rdb_last_bgsave_status != 0 - units: ok/failed - info: states if redis bgsave is working - delay: down 5m multiplier 1.5 max 1h - to: dba - -template: redis_bgsave_slow -families: * - on: redis.bgsave_now - every: 10s - warn: $rdb_bgsave_in_progress > 600 - crit: $rdb_bgsave_in_progress > 1200 - units: seconds - info: the time redis needs to save its database - delay: down 5m multiplier 1.5 max 1h - to: dba diff --git a/conf.d/health.d/retroshare.conf b/conf.d/health.d/retroshare.conf deleted file mode 100644 index 2344b60ec..000000000 --- a/conf.d/health.d/retroshare.conf +++ /dev/null @@ -1,25 +0,0 @@ -# make sure RetroShare is running - -template: retroshare_last_collected_secs - on: retroshare.peers - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin - -# make sure the DHT is fine when active - -template: retroshare_dht_working - on: retroshare.dht - calc: $dht_size_all - units: peers - every: 1m - warn: $this < (($status >= $WARNING) ? (120) : (100)) - crit: $this < (($status == $CRITICAL) ? (10) : (1)) - delay: up 0 down 15m multiplier 1.5 max 1h - info: Checks if the DHT has enough peers to operate - to: sysadmin diff --git a/conf.d/health.d/softnet.conf b/conf.d/health.d/softnet.conf deleted file mode 100644 index 77c804bfd..000000000 --- a/conf.d/health.d/softnet.conf +++ /dev/null @@ -1,40 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - -# check for common /proc/net/softnet_stat errors - - alarm: 10min_netdev_backlog_exceeded - on: system.softnet_stat - os: linux - hosts: * - lookup: sum -10m unaligned absolute of dropped - units: packets - every: 1m - warn: $this > 0 - delay: down 1h multiplier 1.5 max 2h - info: number of packets dropped in the last 10min, because sysctl net.core.netdev_max_backlog was exceeded (this can be a cause for dropped packets) - to: sysadmin - - alarm: 10min_netdev_budget_ran_outs - on: system.softnet_stat - os: linux - hosts: * - lookup: sum -10m unaligned absolute of squeezed - units: events - every: 1m - warn: $this > (($status >= $WARNING) ? (0) : (10)) - delay: down 1h multiplier 1.5 max 2h - info: number of times, during the last 10min, ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs, with work remaining (this can be a cause for dropped packets) - to: silent - - alarm: 10min_netisr_backlog_exceeded - on: system.softnet_stat - os: freebsd - hosts: * - lookup: sum -10m unaligned absolute of qdrops - units: packets - every: 1m - warn: $this > 0 - delay: down 1h multiplier 1.5 max 2h - info: number of drops in the last 10min, because sysctl net.route.netisr_maxqlen was exceeded (this can be a cause for dropped packets) - to: sysadmin diff --git a/conf.d/health.d/squid.conf b/conf.d/health.d/squid.conf deleted file mode 100644 index 06cc9678f..000000000 --- a/conf.d/health.d/squid.conf +++ /dev/null @@ -1,14 +0,0 @@ - -# make sure squid is running - -template: squid_last_collected_secs - on: squid.clients_requests - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: proxyadmin - diff --git a/conf.d/health.d/stiebeleltron.conf b/conf.d/health.d/stiebeleltron.conf deleted file mode 100644 index e0361eb20..000000000 --- a/conf.d/health.d/stiebeleltron.conf +++ /dev/null @@ -1,11 +0,0 @@ -template: stiebeleltron_last_collected_secs -families: * - on: stiebeleltron.heating.hc1 - calc: $now - $last_collected_t - every: 10s - units: seconds ago - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sitemgr diff --git a/conf.d/health.d/swap.conf b/conf.d/health.d/swap.conf deleted file mode 100644 index f920b0807..000000000 --- a/conf.d/health.d/swap.conf +++ /dev/null @@ -1,43 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - - alarm: 30min_ram_swapped_out - on: system.swapio - os: linux freebsd - hosts: * - lookup: sum -30m unaligned absolute of out - # we have to convert KB to MB by dividing $this (i.e. the result of the lookup) with 1024 - calc: $this / 1024 * 100 / ( $system.ram.used + $system.ram.cached + $system.ram.free ) - units: % of RAM - every: 1m - warn: $this > (($status >= $WARNING) ? (10) : (20)) - crit: $this > (($status == $CRITICAL) ? (20) : (30)) - delay: up 0 down 15m multiplier 1.5 max 1h - info: the amount of memory swapped in the last 30 minutes, as a percentage of the system RAM - to: sysadmin - - alarm: ram_in_swap - on: system.swap - os: linux - hosts: * - calc: $used * 100 / ( $system.ram.used + $system.ram.cached + $system.ram.free ) - units: % of RAM - every: 10s - warn: $this > (($status >= $WARNING) ? (15) : (20)) - crit: $this > (($status == $CRITICAL) ? (40) : (50)) - delay: up 30s down 15m multiplier 1.5 max 1h - info: the swap memory used, as a percentage of the system RAM - to: sysadmin - - alarm: used_swap - on: system.swap - os: linux freebsd - hosts: * - calc: $used * 100 / ( $used + $free ) - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (80) : (90)) - crit: $this > (($status == $CRITICAL) ? (90) : (98)) - delay: up 30s down 15m multiplier 1.5 max 1h - info: the percentage of swap memory used - to: sysadmin diff --git a/conf.d/health.d/tcp_conn.conf b/conf.d/health.d/tcp_conn.conf deleted file mode 100644 index 7aa9a9800..000000000 --- a/conf.d/health.d/tcp_conn.conf +++ /dev/null @@ -1,19 +0,0 @@ - -# -# ${tcp_max_connections} may be nan or -1 if the system -# supports dynamic threshold for TCP connections. -# In this case, the alarm will always be zero. -# - - alarm: tcp_connections - on: ipv4.tcpsock - os: linux - hosts: * - calc: (${tcp_max_connections} > 0) ? ( ${connections} * 100 / ${tcp_max_connections} ) : 0 - units: % - every: 10s - warn: $this > (($status >= $WARNING ) ? ( 60 ) : ( 80 )) - crit: $this > (($status >= $CRITICAL) ? ( 80 ) : ( 90 )) - delay: up 0 down 5m multiplier 1.5 max 1h - info: the percentage of IPv4 TCP connections over the max allowed - to: sysadmin diff --git a/conf.d/health.d/tcp_listen.conf b/conf.d/health.d/tcp_listen.conf deleted file mode 100644 index 957964ae4..000000000 --- a/conf.d/health.d/tcp_listen.conf +++ /dev/null @@ -1,27 +0,0 @@ -# ----------------------------------------------------------------------------- -# tcp listen sockets issues - - alarm: 1m_ipv4_tcp_listen_overflows - on: ipv4.tcplistenissues - os: linux freebsd - hosts: * - lookup: sum -60s unaligned absolute of ListenOverflows - units: overflows - every: 10s - crit: $this > 0 - delay: up 0 down 5m multiplier 1.5 max 1h - info: the number of TCP listen socket overflows during the last minute - to: sysadmin - - alarm: 1m_ipv4_tcp_listen_drops - on: ipv4.tcplistenissues - os: linux - hosts: * - lookup: sum -60s unaligned absolute of ListenDrops - units: drops - every: 10s - crit: $this > 0 - delay: up 0 down 5m multiplier 1.5 max 1h - info: the number of TCP listen socket drops during the last minute - to: sysadmin - diff --git a/conf.d/health.d/tcp_mem.conf b/conf.d/health.d/tcp_mem.conf deleted file mode 100644 index 6927d5765..000000000 --- a/conf.d/health.d/tcp_mem.conf +++ /dev/null @@ -1,20 +0,0 @@ -# -# check -# http://blog.tsunanet.net/2011/03/out-of-socket-memory.html -# -# We give a warning when TCP is under memory pressure -# and a critical when TCP is 90% of its upper memory limit -# - - alarm: tcp_memory - on: ipv4.sockstat_tcp_mem - os: linux - hosts: * - calc: ${mem} * 100 / ${tcp_mem_high} - units: % - every: 10s - warn: ${mem} > (($status >= $WARNING ) ? ( ${tcp_mem_pressure} * 0.8 ) : ( ${tcp_mem_pressure} )) - crit: ${mem} > (($status >= $CRITICAL ) ? ( ${tcp_mem_pressure} ) : ( ${tcp_mem_high} * 0.9 )) - delay: up 0 down 5m multiplier 1.5 max 1h - info: the amount of TCP memory as a percentage of its max memory limit - to: sysadmin diff --git a/conf.d/health.d/tcp_orphans.conf b/conf.d/health.d/tcp_orphans.conf deleted file mode 100644 index 280d6590f..000000000 --- a/conf.d/health.d/tcp_orphans.conf +++ /dev/null @@ -1,21 +0,0 @@ - -# -# check -# http://blog.tsunanet.net/2011/03/out-of-socket-memory.html -# -# The kernel may penalize orphans by 2x or even 4x -# so we alarm warning at 25% and critical at 50% -# - - alarm: tcp_orphans - on: ipv4.sockstat_tcp_sockets - os: linux - hosts: * - calc: ${orphan} * 100 / ${tcp_max_orphans} - units: % - every: 10s - warn: $this > (($status >= $WARNING ) ? ( 20 ) : ( 25 )) - crit: $this > (($status >= $CRITICAL) ? ( 25 ) : ( 50 )) - delay: up 0 down 5m multiplier 1.5 max 1h - info: the percentage of orphan IPv4 TCP sockets over the max allowed (this may lead to too-many-orphans errors) - to: sysadmin diff --git a/conf.d/health.d/tcp_resets.conf b/conf.d/health.d/tcp_resets.conf deleted file mode 100644 index 91dad3c6a..000000000 --- a/conf.d/health.d/tcp_resets.conf +++ /dev/null @@ -1,67 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - -# ----------------------------------------------------------------------------- - - alarm: ipv4_tcphandshake_last_collected_secs - on: ipv4.tcphandshake - os: linux freebsd - hosts: * - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: up 0 down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin - -# ----------------------------------------------------------------------------- -# tcp resets this host sends - - alarm: 1m_ipv4_tcp_resets_sent - on: ipv4.tcphandshake - os: linux - hosts: * - lookup: average -1m at -10s unaligned absolute of OutRsts - units: tcp resets/s - every: 10s - info: average TCP RESETS this host is sending, over the last minute - - alarm: 10s_ipv4_tcp_resets_sent - on: ipv4.tcphandshake - os: linux - hosts: * - lookup: average -10s unaligned absolute of OutRsts - units: tcp resets/s - every: 10s - warn: $this > ((($1m_ipv4_tcp_resets_sent < 5)?(5):($1m_ipv4_tcp_resets_sent)) * (($status >= $WARNING) ? (1) : (20))) - delay: up 0 down 60m multiplier 1.2 max 2h - options: no-clear-notification - info: average TCP RESETS this host is sending, over the last 10 seconds (this can be an indication that a port scan is made, or that a service running on this host has crashed; clear notification for this alarm will not be sent) - to: sysadmin - -# ----------------------------------------------------------------------------- -# tcp resets this host receives - - alarm: 1m_ipv4_tcp_resets_received - on: ipv4.tcphandshake - os: linux freebsd - hosts: * - lookup: average -1m at -10s unaligned absolute of AttemptFails - units: tcp resets/s - every: 10s - info: average TCP RESETS this host is sending, over the last minute - - alarm: 10s_ipv4_tcp_resets_received - on: ipv4.tcphandshake - os: linux freebsd - hosts: * - lookup: average -10s unaligned absolute of AttemptFails - units: tcp resets/s - every: 10s - warn: $this > ((($1m_ipv4_tcp_resets_received < 5)?(5):($1m_ipv4_tcp_resets_received)) * (($status >= $WARNING) ? (1) : (10))) - delay: up 0 down 60m multiplier 1.2 max 2h - options: no-clear-notification - info: average TCP RESETS this host is receiving, over the last 10 seconds (this can be an indication that a service this host needs, has crashed; clear notification for this alarm will not be sent) - to: sysadmin diff --git a/conf.d/health.d/udp_errors.conf b/conf.d/health.d/udp_errors.conf deleted file mode 100644 index 382b39658..000000000 --- a/conf.d/health.d/udp_errors.conf +++ /dev/null @@ -1,49 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - -# ----------------------------------------------------------------------------- - - alarm: ipv4_udperrors_last_collected_secs - on: ipv4.udperrors - os: linux freebsd - hosts: * - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: up 0 down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin - -# ----------------------------------------------------------------------------- -# UDP receive buffer errors - - alarm: 1m_ipv4_udp_receive_buffer_errors - on: ipv4.udperrors - os: linux freebsd - hosts: * - lookup: sum -1m unaligned absolute of RcvbufErrors - units: errors - every: 10s - warn: $this > 0 - crit: $this > 100 - info: number of UDP receive buffer errors during the last minute - delay: up 0 down 60m multiplier 1.2 max 2h - to: sysadmin - -# ----------------------------------------------------------------------------- -# UDP send buffer errors - - alarm: 1m_ipv4_udp_send_buffer_errors - on: ipv4.udperrors - os: linux - hosts: * - lookup: sum -1m unaligned absolute of SndbufErrors - units: errors - every: 10s - warn: $this > 0 - crit: $this > 100 - info: number of UDP send buffer errors during the last minute - delay: up 0 down 60m multiplier 1.2 max 2h - to: sysadmin diff --git a/conf.d/health.d/varnish.conf b/conf.d/health.d/varnish.conf deleted file mode 100644 index cca7446b4..000000000 --- a/conf.d/health.d/varnish.conf +++ /dev/null @@ -1,9 +0,0 @@ - alarm: varnish_last_collected - on: varnish.uptime - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - info: number of seconds since the last successful data collection - to: sysadmin diff --git a/conf.d/health.d/web_log.conf b/conf.d/health.d/web_log.conf deleted file mode 100644 index d8be88b47..000000000 --- a/conf.d/health.d/web_log.conf +++ /dev/null @@ -1,163 +0,0 @@ - -# make sure we can collect web log data - -template: last_collected_secs - on: web_log.response_codes -families: * - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - - -# ----------------------------------------------------------------------------- -# high level response code alarms - -# the following alarms trigger only when there are enough data. -# we assume there are enough data when: -# -# $1m_requests > 120 -# -# i.e. when there are at least 120 requests during the last minute - -template: 1m_requests - on: web_log.response_statuses -families: * - lookup: sum -1m unaligned - calc: ($this == 0)?(1):($this) - units: requests - every: 10s - info: the sum of all HTTP requests over the last minute - -template: 1m_successful - on: web_log.response_statuses -families: * - lookup: sum -1m unaligned of successful_requests - calc: $this * 100 / $1m_requests - units: % - every: 10s - warn: ($1m_requests > 120) ? ($this < (($status >= $WARNING ) ? ( 95 ) : ( 85 )) ) : ( 0 ) - crit: ($1m_requests > 120) ? ($this < (($status == $CRITICAL) ? ( 85 ) : ( 75 )) ) : ( 0 ) - delay: up 2m down 15m multiplier 1.5 max 1h - info: the ratio of successful HTTP responses (1xx, 2xx, 304) over the last minute - to: webmaster - -template: 1m_redirects - on: web_log.response_statuses -families: * - lookup: sum -1m unaligned of redirects - calc: $this * 100 / $1m_requests - units: % - every: 10s - warn: ($1m_requests > 120) ? ($this > (($status >= $WARNING ) ? ( 1 ) : ( 20 )) ) : ( 0 ) - crit: ($1m_requests > 120) ? ($this > (($status == $CRITICAL) ? ( 20 ) : ( 30 )) ) : ( 0 ) - delay: up 2m down 15m multiplier 1.5 max 1h - info: the ratio of HTTP redirects (3xx except 304) over the last minute - to: webmaster - -template: 1m_bad_requests - on: web_log.response_statuses -families: * - lookup: sum -1m unaligned of bad_requests - calc: $this * 100 / $1m_requests - units: % - every: 10s - warn: ($1m_requests > 120) ? ($this > (($status >= $WARNING) ? ( 10 ) : ( 30 )) ) : ( 0 ) - crit: ($1m_requests > 120) ? ($this > (($status == $CRITICAL) ? ( 30 ) : ( 50 )) ) : ( 0 ) - delay: up 2m down 15m multiplier 1.5 max 1h - info: the ratio of HTTP bad requests (4xx) over the last minute - to: webmaster - -template: 1m_internal_errors - on: web_log.response_statuses -families: * - lookup: sum -1m unaligned of server_errors - calc: $this * 100 / $1m_requests - units: % - every: 10s - warn: ($1m_requests > 120) ? ($this > (($status >= $WARNING) ? ( 1 ) : ( 2 )) ) : ( 0 ) - crit: ($1m_requests > 120) ? ($this > (($status == $CRITICAL) ? ( 2 ) : ( 5 )) ) : ( 0 ) - delay: up 2m down 15m multiplier 1.5 max 1h - info: the ratio of HTTP internal server errors (5xx), over the last minute - to: webmaster - - -# ----------------------------------------------------------------------------- -# web slow - -# the following alarms trigger only when there are enough data. -# we assume there are enough data when: -# -# $1m_requests > 120 -# -# i.e. when there are at least 120 requests during the last minute - -template: 10m_response_time - on: web_log.response_time -families: * - lookup: average -10m unaligned of avg - units: ms - every: 30s - info: the average time to respond to HTTP requests, over the last 10 minutes - -template: web_slow - on: web_log.response_time -families: * - lookup: average -1m unaligned of avg - units: ms - every: 10s - green: 500 - red: 1000 - warn: ($1m_requests > 120) ? ($this > $green && $this > ($10m_response_time * 2) ) : ( 0 ) - crit: ($1m_requests > 120) ? ($this > $red && $this > ($10m_response_time * 4) ) : ( 0 ) - delay: down 15m multiplier 1.5 max 1h - info: the average time to respond to HTTP requests, over the last 1 minute - options: no-clear-notification - to: webmaster - -# ----------------------------------------------------------------------------- -# web too many or too few requests - -# the following alarms trigger only when there are enough data. -# we assume there are enough data when: -# -# $5m_successful_old > 120 -# -# i.e. when there were at least 120 requests during the 5 minutes starting -# at -10m and ending at -5m - -template: 5m_successful_old - on: web_log.response_statuses -families: * - lookup: average -5m at -5m unaligned of successful_requests - units: requests/s - every: 30s - info: average rate of successful HTTP requests over the last 5 minutes - -template: 5m_successful - on: web_log.response_statuses -families: * - lookup: average -5m unaligned of successful_requests - units: requests/s - every: 30s - info: average successful HTTP requests over the last 5 minutes - -template: 5m_requests_ratio - on: web_log.response_codes -families: * - calc: ($5m_successful_old > 0)?($5m_successful * 100 / $5m_successful_old):(100) - units: % - every: 30s - warn: ($5m_successful_old > 120) ? ($this > 200 OR $this < 50) : (0) - crit: ($5m_successful_old > 120) ? ($this > 400 OR $this < 25) : (0) - delay: down 15m multiplier 1.5 max 1h -options: no-clear-notification - info: the percentage of successful web requests over the last 5 minutes, \ - compared with the previous 5 minutes \ - (clear notification for this alarm will not be sent) - to: webmaster - diff --git a/conf.d/health.d/zfs.conf b/conf.d/health.d/zfs.conf deleted file mode 100644 index af73824e6..000000000 --- a/conf.d/health.d/zfs.conf +++ /dev/null @@ -1,10 +0,0 @@ - - alarm: zfs_memory_throttle - on: zfs.memory_ops - lookup: sum -10m unaligned absolute of throttled - units: events - every: 1m - warn: $this > 0 - delay: down 1h multiplier 1.5 max 2h - info: the number of times ZFS had to limit the ARC growth in the last 10 minutes - to: sysadmin diff --git a/conf.d/health_alarm_notify.conf b/conf.d/health_alarm_notify.conf deleted file mode 100755 index 0a95931ec..000000000 --- a/conf.d/health_alarm_notify.conf +++ /dev/null @@ -1,708 +0,0 @@ -# Configuration for alarm notifications -# -# This configuration is used by: alarm-notify.sh -# changes take effect immediately (the next alarm will use them). -# -# alarm-notify.sh can send: -# - e-mails (using the sendmail command), -# - push notifications to your mobile phone (pushover.net), -# - messages to your slack team (slack.com), -# - messages to your alerta server (alerta.io), -# - messages to your flock team (flock.com), -# - messages to your discord guild (discordapp.com), -# - messages to your telegram chat / group chat (telegram.org) -# - sms messages to your cell phone or any sms enabled device (twilio.com) -# - sms messages to your cell phone or any sms enabled device (messagebird.com) -# - notifications to users on pagerduty.com -# - messages to your irc channel on your selected network -# -# The 'to' line given at netdata alarms defines a *role*, so that many -# people can be notified for each role. -# -# This file is a BASH script itself. -# -# -#------------------------------------------------------------------------------ -# proxy configuration -# -# If you need to send curl based notifications (pushover, pushbullet, slack, alerta, -# flock, discord, telegram) via a proxy, set these to your proxy address: -#export http_proxy="http://10.0.0.1:3128/" -#export https_proxy="http://10.0.0.1:3128/" - - -#------------------------------------------------------------------------------ -# notifications images -# -# Images in notifications need to be downloaded from an Internet facing site. -# To allow notification providers fetch the icons/images, by default we set -# the URL of the global public netdata registry. -# If you have an Internet facing netdata (or you have copied the images/ folder -# of netdata to your web server), set its URL here, to fetch the notification -# images from it. -#images_base_url="http://my.public.netdata.server:19999" - - -#------------------------------------------------------------------------------ -# external commands - -# The full path to the sendmail command. -# If empty, the system $PATH will be searched for it. -# If not found, email notifications will be disabled (silently). -sendmail="" - -# The full path of the curl command. -# If empty, the system $PATH will be searched for it. -# If not found, most notifications will be silently disabled. -curl="" - -# The full path of the nc command. -# If empty, the system $PATH will be searched for it. -# If not found, irc notifications will be silently disabled. -nc="" - -#------------------------------------------------------------------------------ -# extra options for external commands -# -# In some cases, you may need to change what options get passed to an -# external command. Such cases are covered here. - -# Extra options to pass to curl. In most cases, you shouldn't need to add anything -# to this. If you're having issues with HTTPS connections, you might try adding -# '--insecure' here, but be warned that it will make it much easier for -# third-parties to block notification delivery, and may allow disclosure -# of potentially sensitive information. -#curl_options="--insecure" - -#------------------------------------------------------------------------------ -# NOTE ABOUT RECIPIENTS -# -# When you define recipients (all types): -# -# - emails addresses -# - pushover user tokens -# - telegram chat ids -# - slack channels -# - alerta environment -# - flock rooms -# - discord channels -# - hipchat rooms -# - sms phone numbers -# - pagerduty.com (pd) services -# - irc channels -# -# You can append |critical to limit the notifications to be sent. -# -# In these examples, the first recipient receives all the alarms -# while the second one receives only the critical ones: -# -# email : "user1@example.com user2@example.com|critical" -# pushover : "2987343...9437837 8756278...2362736|critical" -# telegram : "111827421 112746832|critical" -# slack : "alarms disasters|critical" -# alerta : "alarms disasters|critical" -# flock : "alarms disasters|critical" -# discord : "alarms disasters|critical" -# twilio : "+15555555555 +17777777777|critical" -# messagebird: "+15555555555 +17777777777|critical" -# kavenegar : "09155555555 09177777777|critical" -# pd : "<pd_service_key_1> <pd_service_key_2>|critical" -# irc : "<irc_channel_1> <irc_channel_2>|critical" -# -# If a recipient is set to empty string, the default recipient of the given -# notification method (email, pushover, telegram, slack, alerta, etc) will be used. -# To disable a notification, use the recipient called: disabled -# This works for all notification methods (including the default recipients). - - -#------------------------------------------------------------------------------ -# email global notification options - -# multiple recipients can be given like this: -# "admin1@example.com admin2@example.com ..." - -# the email address sending email notifications -# the default is the system user netdata runs as (usually: netdata) -# The following formats are supported: -# EMAIL_SENDER="user@domain" -# EMAIL_SENDER="User Name <user@domain>" -# EMAIL_SENDER="'User Name' <user@domain>" -# EMAIL_SENDER="\"User Name\" <user@domain>" -EMAIL_SENDER="" - -# enable/disable sending emails -SEND_EMAIL="YES" - -# if a role recipient is not configured, an email will be send to: -DEFAULT_RECIPIENT_EMAIL="root" -# to receive only critical alarms, set it to "root|critical" - -# Optionally specify the encoding to list in the Content-Type header. -# This doesn't change what encoding the e-mail is sent with, just what -# the headers say it was encoded as. -# This shouldn't need to be changed as it will almost always be -# autodetected from the environment. -#EMAIL_CHARSET="UTF-8" - - -#------------------------------------------------------------------------------ -# pushover (pushover.net) global notification options - -# multiple recipients can be given like this: -# "USERTOKEN1 USERTOKEN2 ..." - -# enable/disable sending pushover notifications -SEND_PUSHOVER="YES" - -# Login to pushover.net to get your pushover app token. -# You need only one for all your netdata servers (or you can have one for -# each of your netdata - your call). -# Without an app token, netdata cannot send pushover notifications. -PUSHOVER_APP_TOKEN="" - -# if a role's recipients are not configured, a notification will be send to -# this pushover user token (empty = do not send a notification for unconfigured -# roles): -DEFAULT_RECIPIENT_PUSHOVER="" - - -#------------------------------------------------------------------------------ -# pushbullet (pushbullet.com) push notification options - -# multiple recipients can be given like this: -# "user1@email.com user2@mail.com" - -# enable/disable sending pushbullet notifications -SEND_PUSHBULLET="YES" - -# Signup and Login to pushbullet.com -# To get your Access Token, go to https://www.pushbullet.com/#settings/account -# Create a new access token and paste it below. -# Then just set the recipients' emails. -# Please note that the if the email in the DEFAULT_RECIPIENT_PUSHBULLET does -# not have a pushbullet account, the pushbullet service will send an email -# to that address instead. - -# Without an access token, netdata cannot send pushbullet notifications. -PUSHBULLET_ACCESS_TOKEN="" -DEFAULT_RECIPIENT_PUSHBULLET="" - -# Device iden of the sending device. Optional. -PUSHBULLET_SOURCE_DEVICE="" - - -#------------------------------------------------------------------------------ -# Twilio (twilio.com) SMS options - -# multiple recipients can be given like this: -# "+15555555555 +17777777777" - -# enable/disable sending twilio SMS -SEND_TWILIO="YES" - -# Signup for free trial and select a SMS capable Twilio Number -# To get your Account SID and Token, go to https://www.twilio.com/console -# Place your sid, token and number below. -# Then just set the recipients' phone numbers. -# The trial account is only allowed to use the number specified when set up. - -# Without an account sid and token, netdata cannot send Twilio text messages. -TWILIO_ACCOUNT_SID="" -TWILIO_ACCOUNT_TOKEN="" -TWILIO_NUMBER="" -DEFAULT_RECIPIENT_TWILIO="" - - -#------------------------------------------------------------------------------ -# Messagebird (messagebird.com) SMS options - -# multiple recipients can be given like this: -# "+15555555555 +17777777777" - -# enable/disable sending messagebird SMS -SEND_MESSAGEBIRD="YES" - -# to get an access key, create a free account at https://www.messagebird.com -# verify and activate the account (no CC info needed) -# login to your account and enter your phonenumber to get some free credits -# to get the API key, click on 'API' in the sidebar, then 'API Access (REST)' -# click 'Add access key' and fill in data (you want a live key to send SMS) - -# Without an access key, netdata cannot send Messagebird text messages. -MESSAGEBIRD_ACCESS_KEY="" -MESSAGEBIRD_NUMBER="" -DEFAULT_RECIPIENT_MESSAGEBIRD="" - - -#------------------------------------------------------------------------------ -# Kavenegar (Kavenegar.com) SMS options - -# multiple recipients can be given like this: -# "09155555555 09177777777" - -# enable/disable sending kavenegar SMS -SEND_KAVENEGAR="YES" - -# to get an access key, after selecting and purchasing your desired service -# at http://kavenegar.com/pricing.html -# login to your account, go to your dashboard and my account are -# https://panel.kavenegar.com/Client/setting/account from API Key -# copy your api key. You can generate new API Key too. -# You can find and select kevenegar sender number from this place. - -# Without an API key, netdata cannot send KAVENEGAR text messages. -KAVENEGAR_API_KEY="" -KAVENEGAR_SENDER="" -DEFAULT_RECIPIENT_KAVENEGAR="" - - -#------------------------------------------------------------------------------ -# telegram (telegram.org) global notification options - -# To get your chat ID send the command /my_id to telegram bot @get_id. -# Users also need to open a query with the bot (see below). - -# note: multiple recipients can be given like this: -# "CHAT_ID_1 CHAT_ID_2 ..." - -# enable/disable sending telegram messages -SEND_TELEGRAM="YES" - -# Contact the bot @BotFather to create a new bot and receive a bot token. -# Without it, netdata cannot send telegram messages. -TELEGRAM_BOT_TOKEN="" - -# If a role's recipients are not configured, a message will be send to -# this chat id (empty = do not send a notification for unconfigured roles): -DEFAULT_RECIPIENT_TELEGRAM="" - - -#------------------------------------------------------------------------------ -# slack (slack.com) global notification options - -# multiple recipients can be given like this: -# "CHANNEL1 CHANNEL2 ..." - -# enable/disable sending slack notifications -SEND_SLACK="YES" - -# Login to slack.com and create an incoming webhook. You need only one for all -# your netdata servers (or you can have one for each of your netdata). -# Without it, netdata cannot send slack notifications. -# Get yours from: https://api.slack.com/incoming-webhooks -SLACK_WEBHOOK_URL="" - -# if a role's recipients are not configured, a notification will be send to -# this slack channel (empty = do not send a notification for unconfigured -# roles): -DEFAULT_RECIPIENT_SLACK="" - - -#------------------------------------------------------------------------------ -# alerta (alerta.io) global notification options - -# multiple recipients (Environments) can be given like this: -# "Production Development ..." - -# enable/disable sending alerta notifications -SEND_ALERTA="YES" - -# here set your alerta server API url -# this is the API url you defined when installed Alerta server, -# it is the same for all users. Do not include last slash. -# ALERTA_WEBHOOK_URL="https://<server>/alerta/api" -ALERTA_WEBHOOK_URL="" - -# Login with an administrative user to you Alerta server and create an API KEY -# with write permissions. -ALERTA_API_KEY="" - -# you can define environments in /etc/alertad.conf option ALLOWED_ENVIRONMENTS -# standard environments are Production and Development -# if a role's recipients are not configured, a notification will be send to -# this Environment (empty = do not send a notification for unconfigured roles): -DEFAULT_RECIPIENT_ALERTA="" - - -#------------------------------------------------------------------------------ -# flock (flock.com) global notification options - -# enable/disable sending flock notifications -SEND_FLOCK="YES" - -# Login to flock.com and create an incoming webhook. You need only one for all -# your netdata servers (or you can have one for each of your netdata). -# Without it, netdata cannot send flock notifications. -FLOCK_WEBHOOK_URL="" - -# if a role recipient is not configured, no notification will be sent -DEFAULT_RECIPIENT_FLOCK="" - - -#------------------------------------------------------------------------------ -# discord (discordapp.com) global notification options - -# multiple recipients can be given like this: -# "CHANNEL1 CHANNEL2 ..." - -# enable/disable sending discord notifications -SEND_DISCORD="YES" - -# Create a webhook by following the official documentation - -# https://support.discordapp.com/hc/en-us/articles/228383668-Intro-to-Webhooks -DISCORD_WEBHOOK_URL="" - -# if a role's recipients are not configured, a notification will be send to -# this discord channel (empty = do not send a notification for unconfigured -# roles): -DEFAULT_RECIPIENT_DISCORD="" - - -#------------------------------------------------------------------------------ -# hipchat global notification options - -# multiple recipients can be given like this: -# "ROOM1 ROOM2 ..." - -# enable/disable sending hipchat notifications -SEND_HIPCHAT="YES" - -# define hipchat server -HIPCHAT_SERVER="api.hipchat.com" - -# api.hipchat.com authorization token -# Without this, netdata cannot send hipchat notifications. -HIPCHAT_AUTH_TOKEN="" - -# if a role's recipients are not configured, a notification will be send to -# this hipchat room (empty = do not send a notification for unconfigured -# roles): -DEFAULT_RECIPIENT_HIPCHAT="" - - -#------------------------------------------------------------------------------ -# kafka notification options - -# enable/disable sending kafka notifications -SEND_KAFKA="YES" - -# The URL to POST kafka alarm data to. It should be the full URL. -KAFKA_URL="" - -# The IP to be used in the kafka message as the sender. -KAFKA_SENDER_IP="" - - -#------------------------------------------------------------------------------ -# pagerduty.com notification options -# -# pagerduty.com notifications require the pagerduty agent to be installed and -# a "Generic API" pagerduty service. -# https://www.pagerduty.com/docs/guides/agent-install-guide/ - -# multiple recipients can be given like this: -# "<pd_service_key_1> <pd_service_key_2> ..." - -# enable/disable sending pagerduty notifications -SEND_PD="YES" - -# if a role's recipients are not configured, a notification will be sent to -# the "General API" pagerduty.com service that uses this service key. -# (empty = do not send a notification for unconfigured roles): -DEFAULT_RECIPIENT_PD="" - - -#------------------------------------------------------------------------------ -# irc notification options -# -# irc notifications require only the nc utility to be installed. - -# multiple recipients can be given like this: -# "<irc_channel_1> <irc_channel_2> ..." - -# enable/disable sending irc notifications -SEND_IRC="YES" - -# if a role's recipients are not configured, a notification will not be sent. -# (empty = do not send a notification for unconfigured roles): -DEFAULT_RECIPIENT_IRC="" - -# The irc network to which the recipients belong. It must be the full network. -# e.g. "irc.freenode.net" -IRC_NETWORK="" - -# The irc nickname which is required to send the notification. It must not be -# an already registered name as the connection's MODE is defined as a 'guest'. -IRC_NICKNAME="" - -# The irc realname which is required in order to make the connection and is an -# extra identifier. -IRC_REALNAME="" - - -#------------------------------------------------------------------------------ -# custom notifications -# - -# enable/disable sending custom notifications -SEND_CUSTOM="YES" - -# if a role's recipients are not configured, use the following. -# (empty = do not send a notification for unconfigured roles) -DEFAULT_RECIPIENT_CUSTOM="" - -# The custom_sender() is a custom function to do whatever you need to do -custom_sender() { - # variables you can use: - # ${host} the host generated this event - # ${url_host} same as ${host} but URL encoded - # ${unique_id} the unique id of this event - # ${alarm_id} the unique id of the alarm that generated this event - # ${event_id} the incremental id of the event, for this alarm id - # ${when} the timestamp this event occurred - # ${name} the name of the alarm, as given in netdata health.d entries - # ${url_name} same as ${name} but URL encoded - # ${chart} the name of the chart (type.id) - # ${url_chart} same as ${chart} but URL encoded - # ${family} the family of the chart - # ${url_family} same as ${family} but URL encoded - # ${status} the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL - # ${old_status} the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL - # ${value} the current value of the alarm - # ${old_value} the previous value of the alarm - # ${src} the line number and file the alarm has been configured - # ${duration} the duration in seconds of the previous alarm state - # ${duration_txt} same as ${duration} for humans - # ${non_clear_duration} the total duration in seconds this is/was non-clear - # ${non_clear_duration_txt} same as ${non_clear_duration} for humans - # ${units} the units of the value - # ${info} a short description of the alarm - # ${value_string} friendly value (with units) - # ${old_value_string} friendly old value (with units) - # ${image} the URL of an image to represent the status of the alarm - # ${color} a color in #AABBCC format for the alarm - # ${goto_url} the URL the user can click to see the netdata dashboard - - # these are more human friendly: - # ${alarm} like "name = value units" - # ${status_message} like "needs attention", "recovered", "is critical" - # ${severity} like "Escalated to CRITICAL", "Recovered from WARNING" - # ${raised_for} like "(alarm was raised for 10 minutes)" - - # example human readable SMS - local msg="${host} ${status_message}: ${alarm} ${raised_for}" - - # limit it to 160 characters and encode it for use in a URL - urlencode "${msg:0:160}" >/dev/null; msg="${REPLY}" - - # a space separated list of the recipients to send alarms to - to="${1}" - - info "not sending custom notification to ${to}, for ${status} of '${host}.${chart}.${name}' - custom_sender() is not configured." -} - - -############################################################################### -# RECIPIENTS PER ROLE - -# ----------------------------------------------------------------------------- -# generic system alarms -# CPU, disks, network interfaces, entropy, etc - -role_recipients_email[sysadmin]="${DEFAULT_RECIPIENT_EMAIL}" - -role_recipients_pushover[sysadmin]="${DEFAULT_RECIPIENT_PUSHOVER}" - -role_recipients_pushbullet[sysadmin]="${DEFAULT_RECIPIENT_PUSHBULLET}" - -role_recipients_telegram[sysadmin]="${DEFAULT_RECIPIENT_TELEGRAM}" - -role_recipients_slack[sysadmin]="${DEFAULT_RECIPIENT_SLACK}" - -role_recipients_alerta[sysadmin]="${DEFAULT_RECIPIENT_ALERTA}" - -role_recipients_flock[sysadmin]="${DEFAULT_RECIPIENT_FLOCK}" - -role_recipients_discord[sysadmin]="${DEFAULT_RECIPIENT_DISCORD}" - -role_recipients_hipchat[sysadmin]="${DEFAULT_RECIPIENT_HIPCHAT}" - -role_recipients_twilio[sysadmin]="${DEFAULT_RECIPIENT_TWILIO}" - -role_recipients_messagebird[sysadmin]="${DEFAULT_RECIPIENT_MESSAGEBIRD}" - -role_recipients_kavenegar[sysadmin]="${DEFAULT_RECIPIENT_KAVENEGAR}" - -role_recipients_pd[sysadmin]="${DEFAULT_RECIPIENT_PD}" - -role_recipients_irc[sysadmin]="${DEFAULT_RECIPIENT_IRC}" - -role_recipients_custom[sysadmin]="${DEFAULT_RECIPIENT_CUSTOM}" - -# ----------------------------------------------------------------------------- -# DNS related alarms - -role_recipients_email[domainadmin]="${DEFAULT_RECIPIENT_EMAIL}" - -role_recipients_pushover[domainadmin]="${DEFAULT_RECIPIENT_PUSHOVER}" - -role_recipients_pushbullet[domainadmin]="${DEFAULT_RECIPIENT_PUSHBULLET}" - -role_recipients_telegram[domainadmin]="${DEFAULT_RECIPIENT_TELEGRAM}" - -role_recipients_slack[domainadmin]="${DEFAULT_RECIPIENT_SLACK}" - -role_recipients_alerta[domainadmin]="${DEFAULT_RECIPIENT_ALERTA}" - -role_recipients_flock[domainadmin]="${DEFAULT_RECIPIENT_FLOCK}" - -role_recipients_discord[domainadmin]="${DEFAULT_RECIPIENT_DISCORD}" - -role_recipients_hipchat[domainadmin]="${DEFAULT_RECIPIENT_HIPCHAT}" - -role_recipients_twilio[domainadmin]="${DEFAULT_RECIPIENT_TWILIO}" - -role_recipients_messagebird[domainadmin]="${DEFAULT_RECIPIENT_MESSAGEBIRD}" - -role_recipients_kavenegar[domainadmin]="${DEFAULT_RECIPIENT_KAVENEGAR}" - -role_recipients_pd[domainadmin]="${DEFAULT_RECIPIENT_PD}" - -role_recipients_irc[domainadmin]="${DEFAULT_RECIPIENT_IRC}" - -role_recipients_custom[domainadmin]="${DEFAULT_RECIPIENT_CUSTOM}" - -# ----------------------------------------------------------------------------- -# database servers alarms -# mysql, redis, memcached, postgres, etc - -role_recipients_email[dba]="${DEFAULT_RECIPIENT_EMAIL}" - -role_recipients_pushover[dba]="${DEFAULT_RECIPIENT_PUSHOVER}" - -role_recipients_pushbullet[dba]="${DEFAULT_RECIPIENT_PUSHBULLET}" - -role_recipients_telegram[dba]="${DEFAULT_RECIPIENT_TELEGRAM}" - -role_recipients_slack[dba]="${DEFAULT_RECIPIENT_SLACK}" - -role_recipients_alerta[dba]="${DEFAULT_RECIPIENT_ALERTA}" - -role_recipients_flock[dba]="${DEFAULT_RECIPIENT_FLOCK}" - -role_recipients_discord[dba]="${DEFAULT_RECIPIENT_DISCORD}" - -role_recipients_hipchat[dba]="${DEFAULT_RECIPIENT_HIPCHAT}" - -role_recipients_twilio[dba]="${DEFAULT_RECIPIENT_TWILIO}" - -role_recipients_messagebird[dba]="${DEFAULT_RECIPIENT_MESSAGEBIRD}" - -role_recipients_kavenegar[dba]="${DEFAULT_RECIPIENT_KAVENEGAR}" - -role_recipients_pd[dba]="${DEFAULT_RECIPIENT_PD}" - -role_recipients_irc[dba]="${DEFAULT_RECIPIENT_IRC}" - -role_recipients_custom[dba]="${DEFAULT_RECIPIENT_CUSTOM}" - -# ----------------------------------------------------------------------------- -# web servers alarms -# apache, nginx, lighttpd, etc - -role_recipients_email[webmaster]="${DEFAULT_RECIPIENT_EMAIL}" - -role_recipients_pushover[webmaster]="${DEFAULT_RECIPIENT_PUSHOVER}" - -role_recipients_pushbullet[webmaster]="${DEFAULT_RECIPIENT_PUSHBULLET}" - -role_recipients_telegram[webmaster]="${DEFAULT_RECIPIENT_TELEGRAM}" - -role_recipients_slack[webmaster]="${DEFAULT_RECIPIENT_SLACK}" - -role_recipients_alerta[webmaster]="${DEFAULT_RECIPIENT_ALERTA}" - -role_recipients_flock[webmaster]="${DEFAULT_RECIPIENT_FLOCK}" - -role_recipients_discord[webmaster]="${DEFAULT_RECIPIENT_DISCORD}" - -role_recipients_hipchat[webmaster]="${DEFAULT_RECIPIENT_HIPCHAT}" - -role_recipients_twilio[webmaster]="${DEFAULT_RECIPIENT_TWILIO}" - -role_recipients_messagebird[webmaster]="${DEFAULT_RECIPIENT_MESSAGEBIRD}" - -role_recipients_kavenegar[webmaster]="${DEFAULT_RECIPIENT_KAVENEGAR}" - -role_recipients_pd[webmaster]="${DEFAULT_RECIPIENT_PD}" - -role_recipients_irc[webmaster]="${DEFAULT_RECIPIENT_IRC}" - -role_recipients_custom[webmaster]="${DEFAULT_RECIPIENT_CUSTOM}" - -# ----------------------------------------------------------------------------- -# proxy servers alarms -# squid, etc - -role_recipients_email[proxyadmin]="${DEFAULT_RECIPIENT_EMAIL}" - -role_recipients_pushover[proxyadmin]="${DEFAULT_RECIPIENT_PUSHOVER}" - -role_recipients_pushbullet[proxyadmin]="${DEFAULT_RECIPIENT_PUSHBULLET}" - -role_recipients_telegram[proxyadmin]="${DEFAULT_RECIPIENT_TELEGRAM}" - -role_recipients_slack[proxyadmin]="${DEFAULT_RECIPIENT_SLACK}" - -role_recipients_alerta[proxyadmin]="${DEFAULT_RECIPIENT_ALERTA}" - -role_recipients_flock[proxyadmin]="${DEFAULT_RECIPIENT_FLOCK}" - -role_recipients_discord[proxyadmin]="${DEFAULT_RECIPIENT_DISCORD}" - -role_recipients_hipchat[proxyadmin]="${DEFAULT_RECIPIENT_HIPCHAT}" - -role_recipients_twilio[proxyadmin]="${DEFAULT_RECIPIENT_TWILIO}" - -role_recipients_messagebird[proxyadmin]="${DEFAULT_RECIPIENT_MESSAGEBIRD}" - -role_recipients_kavenegar[proxyadmin]="${DEFAULT_RECIPIENT_KAVENEGAR}" - -role_recipients_pd[proxyadmin]="${DEFAULT_RECIPIENT_PD}" - -role_recipients_irc[proxyadmin]="${DEFAULT_RECIPIENT_IRC}" - -role_recipients_custom[proxyadmin]="${DEFAULT_RECIPIENT_CUSTOM}" - -# ----------------------------------------------------------------------------- -# peripheral devices -# UPS, photovoltaics, etc - -role_recipients_email[sitemgr]="${DEFAULT_RECIPIENT_EMAIL}" - -role_recipients_pushover[sitemgr]="${DEFAULT_RECIPIENT_PUSHOVER}" - -role_recipients_pushbullet[sitemgr]="${DEFAULT_RECIPIENT_PUSHBULLET}" - -role_recipients_telegram[sitemgr]="${DEFAULT_RECIPIENT_TELEGRAM}" - -role_recipients_slack[sitemgr]="${DEFAULT_RECIPIENT_SLACK}" - -role_recipients_alerta[sitemgr]="${DEFAULT_RECIPIENT_ALERTA}" - -role_recipients_flock[sitemgr]="${DEFAULT_RECIPIENT_FLOCK}" - -role_recipients_discord[sitemgr]="${DEFAULT_RECIPIENT_DISCORD}" - -role_recipients_hipchat[sitemgr]="${DEFAULT_RECIPIENT_HIPCHAT}" - -role_recipients_twilio[sitemgr]="${DEFAULT_RECIPIENT_TWILIO}" - -role_recipients_messagebird[sitemgr]="${DEFAULT_RECIPIENT_MESSAGEBIRD}" - -role_recipients_kavenegar[sitemgr]="${DEFAULT_RECIPIENT_KAVENEGAR}" - -role_recipients_pd[sitemgr]="${DEFAULT_RECIPIENT_PD}" - -role_recipients_custom[sitemgr]="${DEFAULT_RECIPIENT_CUSTOM}" - diff --git a/conf.d/health_email_recipients.conf b/conf.d/health_email_recipients.conf deleted file mode 100644 index f56c6c64a..000000000 --- a/conf.d/health_email_recipients.conf +++ /dev/null @@ -1,2 +0,0 @@ -# OBSOLETE FILE -# REPLACED WITH health_alarm_notify.conf diff --git a/conf.d/node.d.conf b/conf.d/node.d.conf deleted file mode 100644 index 95aec99ce..000000000 --- a/conf.d/node.d.conf +++ /dev/null @@ -1,39 +0,0 @@ -{ - "___help_1": "Default options for node.d.plugin - this is a JSON file.", - "___help_2": "Use http://jsonlint.com/ to verify it is valid JSON.", - "___help_3": "------------------------------------------------------------", - - "___help_update_every": "Minimum data collection frequency for all node.d/*.node.js modules. Set it to 0 to inherit it from netdata.", - "update_every": 0, - - "___help_modules_enable_autodetect": "Enable/disable auto-detection for node.d/*.node.js modules that support it.", - "modules_enable_autodetect": true, - - "___help_modules_enable_all": "Enable all node.d/*.node.js modules by default.", - "modules_enable_all": true, - - "___help_modules": "Enable/disable the following modules. Give only XXX for node.d/XXX.node.js", - "modules": { - "named": { - "enabled": true - }, - "sma_webbox": { - "enabled": true - }, - "snmp": { - "enabled": true - } - }, - - "___help_paths": "Paths that control the operation of node.d.plugin", - "paths": { - "___help_plugins": "The full path to the modules javascript node.d/ directory", - "plugins": null, - - "___help_config": "The full path to the modules configs node.d/ directory", - "config": null, - - "___help_modules": "Array of paths to add to node.js when searching for node_modules", - "modules": [] - } -} diff --git a/conf.d/node.d/README.md b/conf.d/node.d/README.md deleted file mode 100644 index 45e3d02a6..000000000 --- a/conf.d/node.d/README.md +++ /dev/null @@ -1,7 +0,0 @@ -`node.d.plugin` modules accept configuration in JSON format. - -Unfortunately, JSON files do not accept comments. So, the best way to describe them is to have markdown text files with instructions. - -JSON has a very strict formatting. If you get errors from netdata at `/var/log/netdata/error.log` that a certain configuration file cannot be loaded, we suggest to verify it at [http://jsonlint.com/](http://jsonlint.com/). - -The files in this directory, provide usable examples for configuring each `node.d.plugin` module. diff --git a/conf.d/node.d/fronius.conf.md b/conf.d/node.d/fronius.conf.md deleted file mode 100644 index 622086b27..000000000 --- a/conf.d/node.d/fronius.conf.md +++ /dev/null @@ -1,67 +0,0 @@ -[Fronius Symo 8.2](https://www.fronius.com/en/photovoltaics/products/all-products/inverters/fronius-symo/fronius-symo-8-2-3-m) - -The plugin has been tested with a single inverter, namely Fronius Symo 8.2-3-M: - -- Datalogger version: 240.162630 -- Software version: 3.7.4-6 -- Hardware version: 2.4D - -Other products and versions may work, but without any guarantees. - -Example netdata configuration for node.d/fronius.conf. Copy this section to fronius.conf and change name/ip. -The module supports any number of servers. Sometimes there is a lag when collecting every 3 seconds, so 5 should be okay too. You can modify this per server. -```json -{ - "enable_autodetect": false, - "update_every": 5, - "servers": [ - { - "name": "solar", - "hostname": "symo.ip.or.dns", - "update_every": 5, - "api_path": "/solar_api/v1/GetPowerFlowRealtimeData.fcgi" - } - ] -} -``` - -The output of /solar_api/v1/GetPowerFlowRealtimeData.fcgi looks like this: -```json -{ - "Head" : { - "RequestArguments" : {}, - "Status" : { - "Code" : 0, - "Reason" : "", - "UserMessage" : "" - }, - "Timestamp" : "2017-07-05T12:35:12+02:00" - }, - "Body" : { - "Data" : { - "Site" : { - "Mode" : "meter", - "P_Grid" : -6834.549847, - "P_Load" : -1271.450153, - "P_Akku" : null, - "P_PV" : 8106, - "rel_SelfConsumption" : 15.685297, - "rel_Autonomy" : 100, - "E_Day" : 35020, - "E_Year" : 5826076, - "E_Total" : 14788870, - "Meter_Location" : "grid" - }, - "Inverters" : { - "1" : { - "DT" : 123, - "P" : 8106, - "E_Day" : 35020, - "E_Year" : 5826076, - "E_Total" : 14788870 - } - } - } - } -} -``` diff --git a/conf.d/node.d/named.conf.md b/conf.d/node.d/named.conf.md deleted file mode 100644 index fa843dd58..000000000 --- a/conf.d/node.d/named.conf.md +++ /dev/null @@ -1,344 +0,0 @@ -# ISC Bind Statistics - -Using this netdata collector, you can monitor one or more ISC Bind servers. - -The source code for this plugin in [here](https://github.com/firehol/netdata/blob/master/node.d/named.node.js). - -## Example netdata charts - -Depending on the number of views your bind has, you may get a large number of charts. -Here this is with just one view: - -![image](https://cloud.githubusercontent.com/assets/2662304/12765473/879b8e04-ca07-11e5-817d-b0651996c42b.png) -![image](https://cloud.githubusercontent.com/assets/2662304/12766538/12b272fa-ca0d-11e5-81e1-6a9f8ff488ff.png) - -## How it works - -The plugin will execute (from within node.js) the equivalent of: - -```sh -curl "http://localhost:8888/json/v1/server" -``` - -Here is a sample of the output this command produces. - -```js -{ - "json-stats-version":"1.0", - "boot-time":"2016-01-31T08:20:48Z", - "config-time":"2016-01-31T09:28:03Z", - "current-time":"2016-02-02T22:22:20Z", - "opcodes":{ - "QUERY":247816, - "IQUERY":0, - "STATUS":0, - "RESERVED3":0, - "NOTIFY":0, - "UPDATE":3813, - "RESERVED6":0, - "RESERVED7":0, - "RESERVED8":0, - "RESERVED9":0, - "RESERVED10":0, - "RESERVED11":0, - "RESERVED12":0, - "RESERVED13":0, - "RESERVED14":0, - "RESERVED15":0 - }, - "qtypes":{ - "A":89519, - "NS":863, - "CNAME":1, - "SOA":1, - "PTR":116779, - "MX":276, - "TXT":198, - "AAAA":39324, - "SRV":850, - "ANY":5 - }, - "nsstats":{ - "Requestv4":251630, - "ReqEdns0":1255, - "ReqTSIG":3813, - "ReqTCP":57, - "AuthQryRej":1455, - "RecQryRej":122, - "Response":245918, - "TruncatedResp":44, - "RespEDNS0":1255, - "RespTSIG":3813, - "QrySuccess":205159, - "QryAuthAns":119495, - "QryNoauthAns":120770, - "QryNxrrset":32711, - "QrySERVFAIL":262, - "QryNXDOMAIN":2395, - "QryRecursion":40885, - "QryDuplicate":5712, - "QryFailure":1577, - "UpdateDone":2514, - "UpdateFail":1299, - "UpdateBadPrereq":1276, - "QryUDP":246194, - "QryTCP":45, - "OtherOpt":101 - }, - "views":{ - "local":{ - "resolver":{ - "stats":{ - "Queryv4":74577, - "Responsev4":67032, - "NXDOMAIN":601, - "SERVFAIL":5, - "FORMERR":7, - "EDNS0Fail":7, - "Truncated":3071, - "Lame":4, - "Retry":11826, - "QueryTimeout":1838, - "GlueFetchv4":6864, - "GlueFetchv4Fail":30, - "QryRTT10":112, - "QryRTT100":42900, - "QryRTT500":23275, - "QryRTT800":534, - "QryRTT1600":97, - "QryRTT1600+":20, - "BucketSize":31, - "REFUSED":13 - }, - "qtypes":{ - "A":64931, - "NS":870, - "CNAME":185, - "PTR":5, - "MX":49, - "TXT":149, - "AAAA":7972, - "SRV":416 - }, - "cache":{ - "A":40356, - "NS":8032, - "CNAME":14477, - "PTR":2, - "MX":21, - "TXT":32, - "AAAA":3301, - "SRV":94, - "DS":237, - "RRSIG":2301, - "NSEC":126, - "!A":52, - "!NS":4, - "!TXT":1, - "!AAAA":3797, - "!SRV":9, - "NXDOMAIN":590 - }, - "cachestats":{ - "CacheHits":1085188, - "CacheMisses":109, - "QueryHits":464755, - "QueryMisses":55624, - "DeleteLRU":0, - "DeleteTTL":42615, - "CacheNodes":5188, - "CacheBuckets":2079, - "TreeMemTotal":2326026, - "TreeMemInUse":1508075, - "HeapMemMax":132096, - "HeapMemTotal":393216, - "HeapMemInUse":132096 - }, - "adb":{ - "nentries":1021, - "entriescnt":3157, - "nnames":1021, - "namescnt":3022 - } - } - }, - "public":{ - "resolver":{ - "stats":{ - "BucketSize":31 - }, - "qtypes":{ - }, - "cache":{ - }, - "cachestats":{ - "CacheHits":0, - "CacheMisses":0, - "QueryHits":0, - "QueryMisses":0, - "DeleteLRU":0, - "DeleteTTL":0, - "CacheNodes":0, - "CacheBuckets":64, - "TreeMemTotal":287392, - "TreeMemInUse":29608, - "HeapMemMax":1024, - "HeapMemTotal":262144, - "HeapMemInUse":1024 - }, - "adb":{ - "nentries":1021, - "nnames":1021 - } - } - }, - "_bind":{ - "resolver":{ - "stats":{ - "BucketSize":31 - }, - "qtypes":{ - }, - "cache":{ - }, - "cachestats":{ - "CacheHits":0, - "CacheMisses":0, - "QueryHits":0, - "QueryMisses":0, - "DeleteLRU":0, - "DeleteTTL":0, - "CacheNodes":0, - "CacheBuckets":64, - "TreeMemTotal":287392, - "TreeMemInUse":29608, - "HeapMemMax":1024, - "HeapMemTotal":262144, - "HeapMemInUse":1024 - }, - "adb":{ - "nentries":1021, - "nnames":1021 - } - } - } - } -} -``` - - -From this output it collects: - -- Global Received Requests by IP version (IPv4, IPv6) -- Global Successful Queries -- Current Recursive Clients -- Global Queries by IP Protocol (TCP, UDP) -- Global Queries Analysis -- Global Received Updates -- Global Query Failures -- Global Query Failures Analysis -- Other Global Server Statistics -- Global Incoming Requests by OpCode -- Global Incoming Requests by Query Type -- Global Socket Statistics (will only work if the url is `http://127.0.0.1:8888/json/v1`, i.e. without `/server`, but keep in mind this produces a very long output and probably will account for 0.5% CPU overhead alone, per bind server added) -- Per View Statistics (the following set will be added for each bind view): - - View, Resolver Active Queries - - View, Resolver Statistics - - View, Resolver Round Trip Timings - - View, Requests by Query Type - -## Configuration - -The collector (optionally) reads a configuration file named `/etc/netdata/node.d/named.conf`, with the following contents: - -```js -{ - "enable_autodetect": true, - "update_every": 5, - "servers": [ - { - "name": "bind1", - "url": "http://127.0.0.1:8888/json/v1/server", - "update_every": 1 - }, - { - "name": "bind2", - "url": "http://10.1.2.3:8888/json/v1/server", - "update_every": 2 - } - ] -} -``` - -You can add any number of bind servers. - -If the configuration file is missing, or the key `enable_autodetect` is `true`, the collector will also attempt to fetch `http://localhost:8888/json/v1/server` which, if successful will be added too. - -### XML instead of JSON, from bind - -The collector can also accept bind URLs that return XML output. This might required if you cannot have bind 9.10+ with JSON but you have an version of bind that supports XML statistics v3. Check [this](https://www.isc.org/blogs/bind-9-10-statistics-troubleshooting-and-zone-configuration/) for versions supported. - -In such cases, use a URL like this: - -```sh -curl "http://localhost:8888/xml/v3/server" -``` - -Only `xml` and `v3` has been tested. - -Keep in mind though, that XML parsing is done using javascript code, which requires a triple conversion: - -1. from XML to JSON using a javascript XML parser (**CPU intensive**), -2. which is then transformed to emulate the output of the JSON output of bind (**CPU intensive** - and yes the converted JSON from XML is different to the native JSON - even bind produces different names for various attributes), -3. which is then processed to generate the data for the charts (this will happen even if bind is producing JSON). - -In general, expect XML parsing to be 2 to 3 times more CPU intensive than JSON. - -**So, if you can use the JSON output of bind, prefer it over XML**. Keep also in mind that even bind will use more CPU when generating XML instead of JSON. - -The XML interface of bind is not autodetected. -You will have to provide the config file `/etc/netdata/node.d/named.conf`, like this: - -```js -{ - "enable_autodetect": false, - "update_every": 1, - "servers": [ - { - "name": "local", - "url": "http://localhost:8888/xml/v3/server", - "update_every": 1 - } - ] -} -``` - -Of course, you can monitor more than one bind servers. Each one can be configured with either JSON or XML output. - -## Auto-detection - -Auto-detection is controlled by `enable_autodetect` in the config file. The default is enabled, so that if the collector can connect to `http://localhost:8888/json/v1/server` to receive bind statistics, it will automatically enable it. - -## Bind (named) configuration - -To use this plugin, you have to have bind v9.10+ properly compiled to provide statistics in `JSON` format. - -For more information on how to get your bind installation ready, please refer to the [bind statistics channel developer comments](http://jpmens.net/2013/03/18/json-in-bind-9-s-statistics-server/) and to [bind documentation](https://ftp.isc.org/isc/bind/9.10.3/doc/arm/Bv9ARM.ch06.html#statistics) or [bind Knowledge Base article AA-01123](https://kb.isc.org/article/AA-01123/0). - -Normally, you will need something like this in your `named.conf`: - -``` -statistics-channels { - inet 127.0.0.1 port 8888 allow { 127.0.0.1; }; - inet ::1 port 8888 allow { ::1; }; -}; -``` - -(use the IPv4 or IPv6 line depending on what you are using, you can also use both) - -Verify it works by running the following command (the collector is written in node.js and will query your bind server directly, but if this command works, the collector should be able to work too): - -```sh -curl "http://localhost:8888/json/v1/server" -``` - diff --git a/conf.d/node.d/sma_webbox.conf.md b/conf.d/node.d/sma_webbox.conf.md deleted file mode 100644 index 19fdc9dd3..000000000 --- a/conf.d/node.d/sma_webbox.conf.md +++ /dev/null @@ -1,25 +0,0 @@ - -[SMA Sunny Webbox](http://www.solar-is-future.com/sma-technology-for-our-future/products/sunny-webbox/index.html) - -Example netdata configuration for node.d/sma_webbox.conf - -The module supports any number of name servers, like this: - -```json -{ - "enable_autodetect": false, - "update_every": 5, - "servers": [ - { - "name": "plant1", - "hostname": "10.0.1.1", - "update_every": 10 - }, - { - "name": "plant2", - "hostname": "10.0.2.1", - "update_every": 15 - } - ] -} -``` diff --git a/conf.d/node.d/snmp.conf.md b/conf.d/node.d/snmp.conf.md deleted file mode 100644 index 6b496f7a8..000000000 --- a/conf.d/node.d/snmp.conf.md +++ /dev/null @@ -1,359 +0,0 @@ -# SNMP Data Collector - -Using this collector, netdata can collect data from any SNMP device. - -This collector supports: - -- any number of SNMP devices -- each SNMP device can be used to collect data for any number of charts -- each chart may have any number of dimensions -- each SNMP device may have a different update frequency -- each SNMP device will accept one or more batches to report values (you can set `max_request_size` per SNMP server, to control the size of batches). - -The source code of the plugin is [here](https://github.com/firehol/netdata/blob/master/node.d/snmp.node.js). - -## Configuration - -You will need to create the file `/etc/netdata/node.d/snmp.conf` with data like the following. - -In this example: - - - the SNMP device is `10.11.12.8`. - - the SNMP community is `public`. - - we will update the values every 10 seconds (`update_every: 10` under the server `10.11.12.8`). - - we define 2 charts `snmp_switch.bandwidth_port1` and `snmp_switch.bandwidth_port2`, each having 2 dimensions: `in` and `out`. - -```js -{ - "enable_autodetect": false, - "update_every": 5, - "max_request_size": 100, - "servers": [ - { - "hostname": "10.11.12.8", - "community": "public", - "update_every": 10, - "max_request_size": 50, - "options": { "timeout": 10000 }, - "charts": { - "snmp_switch.bandwidth_port1": { - "title": "Switch Bandwidth for port 1", - "units": "kilobits/s", - "type": "area", - "priority": 1, - "family": "ports", - "dimensions": { - "in": { - "oid": "1.3.6.1.2.1.2.2.1.10.1", - "algorithm": "incremental", - "multiplier": 8, - "divisor": 1024, - "offset": 0 - }, - "out": { - "oid": "1.3.6.1.2.1.2.2.1.16.1", - "algorithm": "incremental", - "multiplier": -8, - "divisor": 1024, - "offset": 0 - } - } - }, - "snmp_switch.bandwidth_port2": { - "title": "Switch Bandwidth for port 2", - "units": "kilobits/s", - "type": "area", - "priority": 1, - "family": "ports", - "dimensions": { - "in": { - "oid": "1.3.6.1.2.1.2.2.1.10.2", - "algorithm": "incremental", - "multiplier": 8, - "divisor": 1024, - "offset": 0 - }, - "out": { - "oid": "1.3.6.1.2.1.2.2.1.16.2", - "algorithm": "incremental", - "multiplier": -8, - "divisor": 1024, - "offset": 0 - } - } - } - } - } - ] -} -``` - -`update_every` is the update frequency for each server, in seconds. - -`max_request_size` limits the maximum number of OIDs that will be requested in a single call. The default is 50. Lower this number of you get `TooBig` errors in netdata error.log. - -`family` sets the name of the submenu of the dashboard each chart will appear under. - -If you need to define many charts using incremental OIDs, you can use something like this: - -This is like the previous, but the option `multiply_range` given, will multiply the current chart from `1` to `24` inclusive, producing 24 charts in total for the 24 ports of the switch `10.11.12.8`. - -Each of the 24 new charts will have its id (1-24) appended at: - -1. its chart unique id, i.e. `snmp_switch.bandwidth_port1` to `snmp_switch.bandwidth_port24` -2. its `title`, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24` -3. its `oid` (for all dimensions), i.e. dimension `in` will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24` -3. its priority (which will be incremented for each chart so that the charts will appear on the dashboard in this order) - -```js -{ - "enable_autodetect": false, - "update_every": 10, - "servers": [ - { - "hostname": "10.11.12.8", - "community": "public", - "update_every": 10, - "options": { "timeout": 20000 }, - "charts": { - "snmp_switch.bandwidth_port": { - "title": "Switch Bandwidth for port ", - "units": "kilobits/s", - "type": "area", - "priority": 1, - "family": "ports", - "multiply_range": [ 1, 24 ], - "dimensions": { - "in": { - "oid": "1.3.6.1.2.1.2.2.1.10.", - "algorithm": "incremental", - "multiplier": 8, - "divisor": 1024, - "offset": 0 - }, - "out": { - "oid": "1.3.6.1.2.1.2.2.1.16.", - "algorithm": "incremental", - "multiplier": -8, - "divisor": 1024, - "offset": 0 - } - } - } - } - } - ] -} -``` - -The `options` given for each server, are: - - - `timeout`, the time to wait for the SNMP device to respond. The default is 5000 ms. - - `version`, the SNMP version to use. `0` is Version 1, `1` is Version 2c. The default is Version 1 (`0`). - - `transport`, the default is `udp4`. - - `port`, the port of the SNMP device to connect to. The default is `161`. - - `retries`, the number of attempts to make to fetch the data. The default is `1`. - -## Retreiving names from snmp - -You can append a value retrieved from SNMP to the title, by adding `titleoid` to the chart. - -You can set a dimension name to a value retrieved from SNMP, by adding `oidname` to the dimension. - -Both of the above will participate in `multiply_range`. - - -## Testing the configuration - -To test it, you can run: - -```sh -/usr/libexec/netdata/plugins.d/node.d.plugin 1 snmp -``` - -The above will run it on your console and you will be able to see what netdata sees, but also errors. You can get a very detailed output by appending `debug` to the command line. - -If it works, restart netdata to activate the snmp collector and refresh the dashboard (if your SNMP device responds with a delay, you may need to refresh the dashboard in a few seconds). - -## Data collection speed - -Keep in mind that many SNMP switches are routers are very slow. They may not be able to report values per second. If you run `node.d.plugin` in `debug` mode, it will report the time it took for the SNMP device to respond. My switch, for example, needs 7-8 seconds to respond for the traffic on 24 ports (48 OIDs, in/out). - -Also, if you use many SNMP clients on the same SNMP device at the same time, values may be skipped. This is a problem of the SNMP device, not this collector. - -## Finding OIDs - -Use `snmpwalk`, like this: - -```sh -snmpwalk -t 20 -v 1 -O fn -c public 10.11.12.8 -``` - -- `-t 20` is the timeout in seconds -- `-v 1` is the SNMP version -- `-O fn` will display full OIDs in numeric format (you may want to run it also without this option to see human readable output of OIDs) -- `-c public` is the SNMP community -- `10.11.12.8` is the SNMP device - -Keep in mind that `snmpwalk` outputs the OIDs with a dot in front them. You should remove this dot when adding OIDs to the configuration file of this collector. - -## Example: Linksys SRW2024P - -This is what I use for my Linksys SRW2024P. It creates: - -1. A chart for power consumption (it is a PoE switch) -2. Two charts for packets received (total packets received and packets received with errors) -3. One chart for packets output -4. 24 charts, one for each port of the switch. It also appends the port names, as defined at the switch, to the chart titles. - -This switch also reports various other metrics, like snmp, packets per port, etc. Unfortunately it does not report CPU utilization or backplane utilization. - -This switch has a very slow SNMP processors. To respond, it needs about 8 seconds, so I have set the refresh frequency (`update_every`) to 15 seconds. - -```js -{ - "enable_autodetect": false, - "update_every": 5, - "servers": [ - { - "hostname": "10.11.12.8", - "community": "public", - "update_every": 15, - "options": { "timeout": 20000, "version": 1 }, - "charts": { - "snmp_switch.power": { - "title": "Switch Power Supply", - "units": "watts", - "type": "line", - "priority": 10, - "family": "power", - "dimensions": { - "supply": { - "oid": ".1.3.6.1.2.1.105.1.3.1.1.2.1", - "algorithm": "absolute", - "multiplier": 1, - "divisor": 1, - "offset": 0 - }, - "used": { - "oid": ".1.3.6.1.2.1.105.1.3.1.1.4.1", - "algorithm": "absolute", - "multiplier": 1, - "divisor": 1, - "offset": 0 - } - } - } - , "snmp_switch.input": { - "title": "Switch Packets Input", - "units": "packets/s", - "type": "area", - "priority": 20, - "family": "IP", - "dimensions": { - "receives": { - "oid": ".1.3.6.1.2.1.4.3.0", - "algorithm": "incremental", - "multiplier": 1, - "divisor": 1, - "offset": 0 - } - , "discards": { - "oid": ".1.3.6.1.2.1.4.8.0", - "algorithm": "incremental", - "multiplier": 1, - "divisor": 1, - "offset": 0 - } - } - } - , "snmp_switch.input_errors": { - "title": "Switch Received Packets with Errors", - "units": "packets/s", - "type": "line", - "priority": 30, - "family": "IP", - "dimensions": { - "bad_header": { - "oid": ".1.3.6.1.2.1.4.4.0", - "algorithm": "incremental", - "multiplier": 1, - "divisor": 1, - "offset": 0 - } - , "bad_address": { - "oid": ".1.3.6.1.2.1.4.5.0", - "algorithm": "incremental", - "multiplier": 1, - "divisor": 1, - "offset": 0 - } - , "unknown_protocol": { - "oid": ".1.3.6.1.2.1.4.7.0", - "algorithm": "incremental", - "multiplier": 1, - "divisor": 1, - "offset": 0 - } - } - } - , "snmp_switch.output": { - "title": "Switch Output Packets", - "units": "packets/s", - "type": "line", - "priority": 40, - "family": "IP", - "dimensions": { - "requests": { - "oid": ".1.3.6.1.2.1.4.10.0", - "algorithm": "incremental", - "multiplier": 1, - "divisor": 1, - "offset": 0 - } - , "discards": { - "oid": ".1.3.6.1.2.1.4.11.0", - "algorithm": "incremental", - "multiplier": -1, - "divisor": 1, - "offset": 0 - } - , "no_route": { - "oid": ".1.3.6.1.2.1.4.12.0", - "algorithm": "incremental", - "multiplier": -1, - "divisor": 1, - "offset": 0 - } - } - } - , "snmp_switch.bandwidth_port": { - "title": "Switch Bandwidth for port ", - "titleoid": ".1.3.6.1.2.1.31.1.1.1.18.", - "units": "kilobits/s", - "type": "area", - "priority": 100, - "family": "ports", - "multiply_range": [ 1, 24 ], - "dimensions": { - "in": { - "oid": ".1.3.6.1.2.1.2.2.1.10.", - "algorithm": "incremental", - "multiplier": 8, - "divisor": 1024, - "offset": 0 - } - , "out": { - "oid": ".1.3.6.1.2.1.2.2.1.16.", - "algorithm": "incremental", - "multiplier": -8, - "divisor": 1024, - "offset": 0 - } - } - } - } - } - ] -} -``` diff --git a/conf.d/node.d/stiebeleltron.conf.md b/conf.d/node.d/stiebeleltron.conf.md deleted file mode 100644 index 6ae5aa1c7..000000000 --- a/conf.d/node.d/stiebeleltron.conf.md +++ /dev/null @@ -1,453 +0,0 @@ -[Stiebel Eltron Heat pump system with ISG](https://www.stiebel-eltron.com/en/home/products-solutions/renewables/controller_energymanagement/internet_servicegateway/isg_web.html) - -Original author: BrainDoctor (github) - -The module supports any metrics that are parseable with RegEx. There is no API that gives direct access to the values (AFAIK), so the "workaround" is to parse the HTML output of the ISG. - -### Testing -This plugin has been tested within the following environment: - * ISG version: 8.5.6 - * MFG version: 12 - * Controller version: 9 - * July (summer time, not much activity) - * Interface language: English - * login- and password-less ISG web access (without HTTPS it's useless anyway) - * Heatpump model: WPL 25 I-2 - * Hot water boiler model: 820 WT 1 - -So, if the language is set to english, copy the following configuration into `/etc/netdata/node.d/stiebeleltron.conf` and change the `url`s. - -In my case, the ISG is relatively slow with responding (at least 1s, but also up to 4s). Collecting metrics every 10s is more than enough for me. - -### How to update the config - -* The dimensions support variable digits, the default is `1`. Most of the values printed by ISG are using 1 digit, some use 2. -* The dimensions also support the `multiplier` and `divisor` attributes, however the divisor gets overridden by `digits`, if specified. Default is `1`. -* The test string for the regex is always the whole HTML output from the url. For each parameter you need to have a regular expression that extracts the value from the HTML source in the first capture group. - Recommended: [regexr.com](regexr.com) for testing and matching, [freeformatter.com](https://www.freeformatter.com/json-escape.html) for escaping the newly created regex for the JSON config. - -The charts are being generated using the configuration below. So if your installation is in another language or has other metrics, just adapt the structure or regexes. -### Configuration template -```json -{ - "enable_autodetect": false, - "update_every": 10, - "pages": [ - { - "name": "System", - "id": "system", - "url": "http://machine.ip.or.dns/?s=1,0", - "update_every": 10, - "categories": [ - { - "id": "eletricreheating", - "name": "electric reheating", - "charts": [ - { - "title": "Dual Mode Reheating Temperature", - "id": "reheatingtemp", - "unit": "Celsius", - "type": "line", - "prio": 1, - "dimensions": [ - { - "name": "Heating", - "id": "dualmodeheatingtemp", - "regex": "DUAL MODE TEMP HEATING<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - }, - { - "name": "Hot Water", - "id" : "dualmodehotwatertemp", - "regex": "DUAL MODE TEMP DHW<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - } - ] - } - ] - }, - { - "id": "roomtemp", - "name": "room temperature", - "charts": [ - { - "title": "Heat Circuit 1", - "id": "hc1", - "unit": "Celsius", - "type": "line", - "prio": 1, - "dimensions": [ - { - "name": "Actual", - "id": "actual", - "regex": "<tr class=\"even\">\\s*<td.*>ACTUAL TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" - }, - { - "name": "Set", - "id" : "set", - "regex": "<tr class=\"odd\">\\s*<td.*>SET TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" - } - ] - }, - { - "title": "Heat Circuit 2", - "id": "hc2", - "unit": "Celsius", - "type": "line", - "prio": 2, - "dimensions": [ - { - "name": "Actual", - "id": "actual", - "regex": "<tr class=\"even\">\\s*<td.*>ACTUAL TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" - }, - { - "name": "Set", - "id" : "set", - "regex": "<tr class=\"odd\">\\s*<td.*>SET TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" - } - ] - } - ] - }, - { - "id": "heating", - "name": "heating", - "charts": [ - { - "title": "Heat Circuit 1", - "id": "hc1", - "unit": "Celsius", - "type": "line", - "prio": 1, - "dimensions": [ - { - "name": "Actual", - "id": "actual", - "regex": "<tr class=\"odd\">\\s*<td.*>ACTUAL TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" - }, - { - "name": "Set", - "id" : "set", - "regex": "<tr class=\"even\">\\s*<td.*>SET TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" - } - ] - }, - { - "title": "Heat Circuit 2", - "id": "hc2", - "unit": "Celsius", - "type": "line", - "prio": 2, - "dimensions": [ - { - "name": "Actual", - "id": "actual", - "regex": "<tr class=\"odd\">\\s*<td.*>ACTUAL TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" - }, - { - "name": "Set", - "id" : "set", - "regex": "<tr class=\"even\">\\s*<td.*>SET TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" - } - ] - }, - { - "title": "Flow Temperature", - "id": "flowtemp", - "unit": "Celsius", - "type": "line", - "prio": 3, - "dimensions": [ - { - "name": "Heating", - "id": "heating", - "regex": "ACTUAL FLOW TEMPERATURE WP<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - }, - { - "name": "Reheating", - "id" : "reheating", - "regex": "ACTUAL FLOW TEMPERATURE NHZ<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - } - ] - }, - { - "title": "Buffer Temperature", - "id": "buffertemp", - "unit": "Celsius", - "type": "line", - "prio": 4, - "dimensions": [ - { - "name": "Actual", - "id": "actual", - "regex": "ACTUAL BUFFER TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - }, - { - "name": "Set", - "id" : "set", - "regex": "SET BUFFER TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - } - ] - }, - { - "title": "Fixed Temperature", - "id": "fixedtemp", - "unit": "Celsius", - "type": "line", - "prio": 5, - "dimensions": [ - { - "name": "Set", - "id" : "setfixed", - "regex": "SET FIXED TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - } - ] - }, - { - "title": "Pre-flow Temperature", - "id": "preflowtemp", - "unit": "Celsius", - "type": "line", - "prio": 6, - "dimensions": [ - { - "name": "Actual", - "id": "actualreturn", - "regex": "ACTUAL RETURN TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - } - ] - } - ] - }, - { - "id": "hotwater", - "name": "hot water", - "charts": [ - { - "title": "Hot Water Temperature", - "id": "hotwatertemp", - "unit": "Celsius", - "type": "line", - "prio": 1, - "dimensions": [ - { - "name": "Actual", - "id": "actual", - "regex": "ACTUAL TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - }, - { - "name": "Set", - "id" : "set", - "regex": "SET TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - } - ] - } - ] - }, - { - "id": "general", - "name": "general", - "charts": [ - { - "title": "Outside Temperature", - "id": "outside", - "unit": "Celsius", - "type": "line", - "prio": 1, - "dimensions": [ - { - "name": "Outside temperature", - "id": "outsidetemp", - "regex": "OUTSIDE TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" - } - ] - }, - { - "title": "Condenser Temperature", - "id": "condenser", - "unit": "Celsius", - "type": "line", - "prio": 2, - "dimensions": [ - { - "name": "Condenser", - "id": "condenser", - "regex": "CONDENSER TEMP\\.<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - } - ] - }, - { - "title": "Heating Circuit Pressure", - "id": "heatingcircuit", - "unit": "bar", - "type": "line", - "prio": 3, - "dimensions": [ - { - "name": "Heating Circuit", - "id": "heatingcircuit", - "digits": 2, - "regex": "PRESSURE HTG CIRC<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]*).*<\\\/td>" - } - ] - }, - { - "title": "Flow Rate", - "id": "flowrate", - "unit": "liters/min", - "type": "line", - "prio": 4, - "dimensions": [ - { - "name": "Flow Rate", - "id": "flowrate", - "digits": 2, - "regex": "FLOW RATE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - } - ] - }, - { - "title": "Output", - "id": "output", - "unit": "%", - "type": "line", - "prio": 5, - "dimensions": [ - { - "name": "Heat Pump", - "id": "outputheatpump", - "regex": "OUTPUT HP<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*).*<\\\/td>" - }, - { - "name": "Water Pump", - "id": "intpumprate", - "regex": "INT PUMP RATE<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*).*<\\\/td>" - } - ] - } - ] - } - ] - }, - { - "name": "Heat Pump", - "id": "heatpump", - "url": "http://machine.ip.or.dns/?s=1,1", - "update_every": 10, - "categories": [ - { - "id": "runtime", - "name": "runtime", - "charts": [ - { - "title": "Compressor", - "id": "compressor", - "unit": "h", - "type": "line", - "prio": 1, - "dimensions": [ - { - "name": "Heating", - "id": "heating", - "regex": "RNT COMP 1 HEA<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)" - }, - { - "name": "Hot Water", - "id" : "hotwater", - "regex": "RNT COMP 1 DHW<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)" - } - ] - }, - { - "title": "Reheating", - "id": "reheating", - "unit": "h", - "type": "line", - "prio": 2, - "dimensions": [ - { - "name": "Reheating 1", - "id": "rh1", - "regex": "BH 1<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)" - }, - { - "name": "Reheating 2", - "id" : "rh2", - "regex": "BH 2<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)" - } - ] - } - ] - }, - { - "id": "processdata", - "name": "process data", - "charts": [ - { - "title": "Remaining Compressor Rest Time", - "id": "remaincomp", - "unit": "s", - "type": "line", - "prio": 1, - "dimensions": [ - { - "name": "Timer", - "id": "timer", - "regex": "COMP DLAY CNTR<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)" - } - ] - } - ] - }, - { - "id": "energy", - "name": "energy", - "charts": [ - { - "title": "Compressor Today", - "id": "compressorday", - "unit": "kWh", - "type": "line", - "prio": 1, - "dimensions": [ - { - "name": "Heating", - "id": "heating", - "digits": 3, - "regex": "COMPRESSOR HEATING DAY<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)" - }, - { - "name": "Hot Water", - "id": "hotwater", - "digits": 3, - "regex": "COMPRESSOR DHW DAY<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)" - } - ] - }, - { - "title": "Compressor Total", - "id": "compressortotal", - "unit": "MWh", - "type": "line", - "prio": 2, - "dimensions": [ - { - "name": "Heating", - "id": "heating", - "digits": 3, - "regex": "COMPRESSOR HEATING TOTAL<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)" - }, - { - "name": "Hot Water", - "id": "hotwater", - "digits": 3, - "regex": "COMPRESSOR DHW TOTAL<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)" - } - ] - } - ] - } - ] - } - ] -} -``` diff --git a/conf.d/python.d.conf b/conf.d/python.d.conf deleted file mode 100644 index bb57738bb..000000000 --- a/conf.d/python.d.conf +++ /dev/null @@ -1,79 +0,0 @@ -# netdata python.d.plugin configuration -# -# This file is in YaML format. -# Generally the format is: -# -# name: value -# - -# Enable / disable the whole python.d.plugin (all its modules) -enabled: yes - -# ---------------------------------------------------------------------- -# Enable / Disable python.d.plugin modules -#default_run: yes -# -# If "default_run" = "yes" the default for all modules is enabled (yes). -# Setting any of these to "no" will disable it. -# -# If "default_run" = "no" the default for all modules is disabled (no). -# Setting any of these to "yes" will enable it. - -# apache_cache has been replaced by web_log -apache_cache: no -# apache: yes -# beanstalk: yes -# bind_rndc: yes -# ceph: yes -chrony: no -# couchdb: yes -# cpufreq: yes -# cpuidle: yes -# dns_query_time: yes -# dnsdist: yes -# dovecot: yes -# elasticsearch: yes - -# this is just an example -example: no - -# exim: yes -# fail2ban: yes -# freeradius: yes - -# gunicorn_log has been replaced by web_log -gunicorn_log: no -go_expvar: no -# haproxy: yes -# hddtemp: yes -# icecast: yes -# ipfs: yes -# isc_dhcpd: yes -# mdstat: yes -# memcached: yes -# mongodb: yes -# mysql: yes -# nginx: yes -# nginx_plus: yes -# nsd: yes -# ntpd: yes - -# nginx_log has been replaced by web_log -nginx_log: no -# ntpd: yes -# ovpn_status_log: yes -# phpfpm: yes -# postfix: yes -# postgres: yes -# powerdns: yes -# rabbitmq: yes -# redis: yes -# retroshare: yes -# sensors: yes -# samba: yes -# smartd_log: yes -# squid: yes -# springboot: yes -# tomcat: yes -# varnish: yes -# web_log: yes diff --git a/conf.d/python.d/apache.conf b/conf.d/python.d/apache.conf deleted file mode 100644 index 3bbc3f786..000000000 --- a/conf.d/python.d/apache.conf +++ /dev/null @@ -1,87 +0,0 @@ -# netdata python.d.plugin configuration for apache -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, apache also supports the following: -# -# url: 'URL' # the URL to fetch apache's mod_status stats -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name : 'local' - url : 'http://localhost/server-status?auto' - -localipv4: - name : 'local' - url : 'http://127.0.0.1/server-status?auto' - -localipv6: - name : 'local' - url : 'http://::1/server-status?auto' diff --git a/conf.d/python.d/beanstalk.conf b/conf.d/python.d/beanstalk.conf deleted file mode 100644 index 940801877..000000000 --- a/conf.d/python.d/beanstalk.conf +++ /dev/null @@ -1,80 +0,0 @@ -# netdata python.d.plugin configuration for beanstalk -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# chart_cleanup sets the default chart cleanup interval in iterations. -# A chart is marked as obsolete if it has not been updated -# 'chart_cleanup' iterations in a row. -# When a plugin sends the obsolete flag, the charts are not deleted -# from netdata immediately. -# They will be hidden immediately (not offered to dashboard viewer, -# streamed upstream and archived to backends) and deleted one hour -# later (configurable from netdata.conf). -# chart_cleanup: 10 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# chart_cleanup: 10 # the JOB's chart cleanup interval in iterations -# -# Additionally to the above, apache also supports the following: -# -# host: 'host' # Server ip address or hostname. Default: 127.0.0.1 -# port: port # Beanstalkd port. Default: -# -# ---------------------------------------------------------------------- diff --git a/conf.d/python.d/bind_rndc.conf b/conf.d/python.d/bind_rndc.conf deleted file mode 100644 index 71958ff98..000000000 --- a/conf.d/python.d/bind_rndc.conf +++ /dev/null @@ -1,112 +0,0 @@ -# netdata python.d.plugin configuration for bind_rndc -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, bind_rndc also supports the following: -# -# named_stats_path: 'path to named.stats' # Default: '/var/log/bind/named.stats' -#------------------------------------------------------------------------------------------------------------------ -# IMPORTANT Information -# -# BIND APPEND logs at EVERY RUN. Its NOT RECOMMENDED to set update_every below 30 sec. -# STRONGLY RECOMMENDED to create a bind-rndc conf file for logrotate -# -# To set up your BIND to dump stats do the following: -# -# 1. add to 'named.conf.options' options {}: -# statistics-file "/var/log/bind/named.stats"; -# -# 2. Create bind/ directory in /var/log -# cd /var/log/ && mkdir bind -# -# 3. Change owner of directory to 'bind' user -# chown bind bind/ -# -# 4. RELOAD (NOT restart) BIND -# systemctl reload bind9.service -# -# 5. Run as a root 'rndc stats' to dump (BIND will create named.stats in new directory) -# -# -# To ALLOW NETDATA TO RUN 'rndc stats' change '/etc/bind/rndc.key' group to netdata -# chown :netdata rndc.key -# -# The last BUT NOT least is to create bind-rndc.conf in logrotate.d/ -# The working one -# /var/log/bind/named.stats { -# -# daily -# rotate 4 -# compress -# delaycompress -# create 0644 bind bind -# missingok -# postrotate -# rndc reload > /dev/null -# endscript -# } -# -# To test your logrotate conf file run as root: -# -# logrotate /etc/logrotate.d/bind-rndc -d (debug dry-run mode) -# -# ---------------------------------------------------------------------- diff --git a/conf.d/python.d/ceph.conf b/conf.d/python.d/ceph.conf deleted file mode 100644 index 78ac1e251..000000000 --- a/conf.d/python.d/ceph.conf +++ /dev/null @@ -1,75 +0,0 @@ -# netdata python.d.plugin configuration for ceph stats -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 10 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 10 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, ceph plugin also supports the following: -# -# config_file: 'config_file' # Ceph config file. -# keyring_file: 'keyring_file' # Ceph keyring file. netdata user must be added into ceph group -# # and keyring file must be read group permission. -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) -# -config_file: '/etc/ceph/ceph.conf' -keyring_file: '/etc/ceph/ceph.client.admin.keyring' - diff --git a/conf.d/python.d/chrony.conf b/conf.d/python.d/chrony.conf deleted file mode 100644 index 9ac906b5f..000000000 --- a/conf.d/python.d/chrony.conf +++ /dev/null @@ -1,79 +0,0 @@ -# netdata python.d.plugin configuration for chrony -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -update_every: 5 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, chrony also supports the following: -# -# command: 'chrony tracking' # the command to run -# - -# ---------------------------------------------------------------------- -# REQUIRED chrony CONFIGURATION -# -# netdata will query chrony as user netdata. -# verify that user netdata is allowed to call 'chronyc tracking' -# Check cmdallow in chrony.conf -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS - -local: - command: 'chronyc -n tracking' diff --git a/conf.d/python.d/couchdb.conf b/conf.d/python.d/couchdb.conf deleted file mode 100644 index 5f6e75cff..000000000 --- a/conf.d/python.d/couchdb.conf +++ /dev/null @@ -1,91 +0,0 @@ -# netdata python.d.plugin configuration for couchdb -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# By default, CouchDB only updates its stats every 10 seconds. -update_every: 10 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, the couchdb plugin also supports the following: -# -# host: 'ipaddress' # Server ip address or hostname. Default: 127.0.0.1 -# port: 'port' # CouchDB port. Default: 15672 -# scheme: 'scheme' # http or https. Default: http -# node: 'couchdb@127.0.0.1' # CouchDB node name. Same as -name vm.args argument. -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' -# -# if db-specific stats are desired, place their names in databases: -# databases: 'npm-registry animaldb' -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) -# -localhost: - name: 'local' - host: '127.0.0.1' - port: '5984' - node: 'couchdb@127.0.0.1' - scheme: 'http' -# user: 'admin' -# pass: 'password' diff --git a/conf.d/python.d/cpufreq.conf b/conf.d/python.d/cpufreq.conf deleted file mode 100644 index 0890245d9..000000000 --- a/conf.d/python.d/cpufreq.conf +++ /dev/null @@ -1,43 +0,0 @@ -# netdata python.d.plugin configuration for cpufreq -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# The directory to search for the file scaling_cur_freq -sys_dir: "/sys/devices" diff --git a/conf.d/python.d/dns_query_time.conf b/conf.d/python.d/dns_query_time.conf deleted file mode 100644 index d32c6db83..000000000 --- a/conf.d/python.d/dns_query_time.conf +++ /dev/null @@ -1,71 +0,0 @@ -# netdata python.d.plugin configuration for dns_query_time -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, dns_query_time also supports the following: -# -# dns_servers: 'dns servers' # List of dns servers to query -# domains: 'domains' # List of domains -# aggregate: yes/no # Aggregate all servers in one chart or not -# response_timeout: 4 # Dns query response timeout (query = -100 if response time > response_time) -# -# ---------------------------------------------------------------------- \ No newline at end of file diff --git a/conf.d/python.d/dnsdist.conf b/conf.d/python.d/dnsdist.conf deleted file mode 100644 index aec58b8e1..000000000 --- a/conf.d/python.d/dnsdist.conf +++ /dev/null @@ -1,85 +0,0 @@ -# netdata python.d.plugin configuration for dnsdist -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -#update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -#retries: 600000 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -#autodetection_retry: 1 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# -# Additionally to the above, dnsdist also supports the following: -# -# url: 'URL' # the URL to fetch dnsdist performance statistics -# user: 'username' # username for basic auth -# pass: 'password' # password for basic auth -# header: -# X-API-Key: 'Key' # API key -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -# localhost: -# name : 'local' -# url : 'http://127.0.0.1:5053/jsonstat?command=stats' -# user : 'username' -# pass : 'password' -# header: -# X-API-Key: 'dnsdist-api-key' - - diff --git a/conf.d/python.d/dovecot.conf b/conf.d/python.d/dovecot.conf deleted file mode 100644 index 56c394991..000000000 --- a/conf.d/python.d/dovecot.conf +++ /dev/null @@ -1,96 +0,0 @@ -# netdata python.d.plugin configuration for dovecot -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, dovecot also supports the following: -# -# socket: 'path/to/dovecot/stats' -# -# or -# host: 'IP or HOSTNAME' # the host to connect to -# port: PORT # the port to connect to -# -# - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name : 'local' - host : 'localhost' - port : 24242 - -localipv4: - name : 'local' - host : '127.0.0.1' - port : 24242 - -localipv6: - name : 'local' - host : '::1' - port : 24242 - -localsocket: - name : 'local' - socket : '/var/run/dovecot/stats' - diff --git a/conf.d/python.d/elasticsearch.conf b/conf.d/python.d/elasticsearch.conf deleted file mode 100644 index 213843bf9..000000000 --- a/conf.d/python.d/elasticsearch.conf +++ /dev/null @@ -1,83 +0,0 @@ -# netdata python.d.plugin configuration for elasticsearch stats -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, elasticsearch plugin also supports the following: -# -# host: 'ipaddress' # Server ip address or hostname. -# port: 'port' # Port on which elasticsearch listen. -# cluster_health: False/True # Calls to cluster health elasticsearch API. Enabled by default. -# cluster_stats: False/True # Calls to cluster stats elasticsearch API. Enabled by default. -# -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) -# -local: - host: '127.0.0.1' - port: '9200' diff --git a/conf.d/python.d/example.conf b/conf.d/python.d/example.conf deleted file mode 100644 index e7fed9b50..000000000 --- a/conf.d/python.d/example.conf +++ /dev/null @@ -1,70 +0,0 @@ -# netdata python.d.plugin configuration for example -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, example also supports the following: -# -# - none -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) diff --git a/conf.d/python.d/exim.conf b/conf.d/python.d/exim.conf deleted file mode 100644 index 2add7b2cb..000000000 --- a/conf.d/python.d/exim.conf +++ /dev/null @@ -1,93 +0,0 @@ -# netdata python.d.plugin configuration for exim -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# exim is slow, so once every 10 seconds -update_every: 10 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, exim also supports the following: -# -# command: 'exim -bpc' # the command to run -# - -# ---------------------------------------------------------------------- -# REQUIRED exim CONFIGURATION -# -# netdata will query exim as user netdata. -# By default exim will refuse to respond. -# -# To allow querying exim as non-admin user, please set the following -# to your exim configuration: -# -# queue_list_requires_admin = false -# -# Your exim configuration should be in -# -# /etc/exim/exim4.conf -# or -# /etc/exim4/conf.d/main/000_local_options -# -# Please consult your distribution information to find the exact file. - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS - -local: - command: 'exim -bpc' diff --git a/conf.d/python.d/fail2ban.conf b/conf.d/python.d/fail2ban.conf deleted file mode 100644 index 60ca87231..000000000 --- a/conf.d/python.d/fail2ban.conf +++ /dev/null @@ -1,70 +0,0 @@ -# netdata python.d.plugin configuration for fail2ban -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, fail2ban also supports the following: -# -# log_path: 'path to fail2ban.log' # Default: '/var/log/fail2ban.log' -# conf_path: 'path to jail.local/jail.conf' # Default: '/etc/fail2ban/jail.local' -# conf_dir: 'path to jail.d/' # Default: '/etc/fail2ban/jail.d/' -# exclude: 'jails you want to exclude from autodetection' # Default: none -#------------------------------------------------------------------------------------------------------------------ diff --git a/conf.d/python.d/freeradius.conf b/conf.d/python.d/freeradius.conf deleted file mode 100644 index 3336d4c49..000000000 --- a/conf.d/python.d/freeradius.conf +++ /dev/null @@ -1,82 +0,0 @@ -# netdata python.d.plugin configuration for freeradius -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, freeradius also supports the following: -# -# host: 'host' # Default: 'localhost'. Server ip address or hostname. -# port: 'port' # Default: '18121'. Port on which freeradius server listen (type = status). -# secret: 'secret' # Default: 'adminsecret'. -# acct: yes/no # Default: no. Freeradius accounting statistics. -# proxy_auth: yes/no # Default: no. Freeradius proxy authentication statistics. -# proxy_acct: yes/no # Default: no. Freeradius proxy accounting statistics. -# -# ------------------------------------------------------------------------------------------------------------------ -# Freeradius server configuration: -# The configuration for the status server is automatically created in the sites-available directory. -# By default, server is enabled and can be queried from every client. -# FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled. -# To do this, create a link from the sites-enabled directory to the status file in the sites-available directory: -# cd sites-enabled -# ln -s ../sites-available/status status -# and restart/reload your FREERADIUS server. -# ------------------------------------------------------------------------------------------------------------------ diff --git a/conf.d/python.d/go_expvar.conf b/conf.d/python.d/go_expvar.conf deleted file mode 100644 index c352b1674..000000000 --- a/conf.d/python.d/go_expvar.conf +++ /dev/null @@ -1,110 +0,0 @@ -# netdata python.d.plugin configuration for go_expvar -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, this plugin also supports the following: -# -# url: 'http://127.0.0.1/debug/vars' # the URL of the expvar endpoint -# -# As the plugin cannot possibly know the port your application listens on, there is no default value. Please include -# the whole path of the endpoint, as the expvar handler can be installed in a non-standard location. -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' -# -# collect_memstats: true # enables charts for Go runtime's memory statistics -# extra_charts: {} # defines extra data/charts to monitor, please see the example below -# -# If collect_memstats is disabled and no extra charts are defined, this module will disable itself, as it has no data to -# collect. -# -# Please visit the module wiki page for more information on how to use the extra_charts variable: -# -# https://github.com/firehol/netdata/wiki/Monitoring-Go-Applications#monitoring-custom-vars-with-go_expvar -# -# Configuration example -# --------------------- - -#app1: -# name : 'app1' -# url : 'http://127.0.0.1:8080/debug/vars' -# collect_memstats: true -# extra_charts: -# - id: "runtime_goroutines" -# options: -# name: num_goroutines -# title: "runtime: number of goroutines" -# units: goroutines -# family: runtime -# context: expvar.runtime.goroutines -# chart_type: line -# lines: -# - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines} -# - id: "foo_counters" -# options: -# name: counters -# title: "some random counters" -# units: awesomeness -# family: counters -# context: expvar.foo.counters -# chart_type: line -# lines: -# - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1} -# - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2} - diff --git a/conf.d/python.d/haproxy.conf b/conf.d/python.d/haproxy.conf deleted file mode 100644 index a40dd76a5..000000000 --- a/conf.d/python.d/haproxy.conf +++ /dev/null @@ -1,85 +0,0 @@ -# netdata python.d.plugin configuration for haproxy -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, haproxy also supports the following: -# -# IMPORTANT: socket MUST BE readable AND writable by netdata user -# -# socket: 'path/to/haproxy/sock' -# -# OR -# url: 'http://<ip.address>:<port>/<url>;csv;norefresh' -# [user: USERNAME] only if stats auth is used -# [pass: PASSWORD] only if stats auth is used - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -#via_url: -# user : 'admin' -# pass : 'password' -# url : 'http://127.0.0.1:7000/haproxy_stats;csv;norefresh' - -#via_socket: -# socket: '/var/run/haproxy/admin.sock' diff --git a/conf.d/python.d/hddtemp.conf b/conf.d/python.d/hddtemp.conf deleted file mode 100644 index 9165798a2..000000000 --- a/conf.d/python.d/hddtemp.conf +++ /dev/null @@ -1,97 +0,0 @@ -# netdata python.d.plugin configuration for hddtemp -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, hddtemp also supports the following: -# -# host: 'IP or HOSTNAME' # the host to connect to -# port: PORT # the port to connect to -# - -# By default this module will try to autodetect disks -# (autodetection works only for disk which names start with "sd"). -# However this can be overridden by setting variable `disks` to -# array of desired disks. Example for two disks: -# -# devices: -# - sda -# - sdb -# - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name: 'local' - host: 'localhost' - port: 7634 - -localipv4: - name: 'local' - host: '127.0.0.1' - port: 7634 - -localipv6: - name: 'local' - host: '::1' - port: 7634 diff --git a/conf.d/python.d/httpcheck.conf b/conf.d/python.d/httpcheck.conf deleted file mode 100644 index 058e057a6..000000000 --- a/conf.d/python.d/httpcheck.conf +++ /dev/null @@ -1,99 +0,0 @@ -# netdata python.d.plugin configuration for httpcheck -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the httpcheck default is used, which is at 3 seconds. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# chart_cleanup sets the default chart cleanup interval in iterations. -# A chart is marked as obsolete if it has not been updated -# 'chart_cleanup' iterations in a row. -# They will be hidden immediately (not offered to dashboard viewer, -# streamed upstream and archived to backends) and deleted one hour -# later (configurable from netdata.conf). -# -- For this plugin, cleanup MUST be disabled, otherwise we lose response -# time charts -chart_cleanup: 0 - -# Autodetection and retries do not work for this plugin - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# ------------------------------- -# ATTENTION: Any valid configuration will be accepted, even if initial connection fails! -# ------------------------------- -# -# There is intentionally no default config, e.g. for 'localhost' - -# job_name: -# name: myname # [optional] the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 3 # [optional] the JOB's data collection frequency -# priority: 60000 # [optional] the JOB's order on the dashboard -# retries: 60 # [optional] the JOB's number of restoration attempts -# timeout: 1 # [optional] the timeout when connecting, supports decimals (e.g. 0.5s) -# url: 'http[s]://host-ip-or-dns[:port][path]' -# # [required] the remote host url to connect to. If [:port] is missing, it defaults to 80 -# # for HTTP and 443 for HTTPS. [path] is optional too, defaults to / -# redirect: yes # [optional] If the remote host returns 3xx status codes, the redirection url will be -# # followed (default). -# status_accepted: # [optional] By default, 200 is accepted. Anything else will result in 'bad status' in the -# # status chart, however: The response time will still be > 0, since the -# # host responded with something. -# # If redirect is enabled, the accepted status will be checked against the redirected page. -# - 200 # Multiple status codes are possible. If you specify 'status_accepted', you would still -# # need to add '200'. E.g. 'status_accepted: [301]' will trigger an error in 'bad status' -# # if code is 200. Do specify numerical entries such as 200, not 'OK'. -# regex: None # [optional] If the status code is accepted, the content of the response will be searched for this -# # regex (if defined). Be aware that you may need to escape the regex string. If redirect is enabled, -# # the regex will be matched to the redirected page, not the initial 3xx response. - -# Simple example: -# -# jira: -# url: 'https://jira.localdomain/' - - -# Complex example: -# -# cool_website: -# url: 'http://cool.website:8080/home' -# status_accepted: -# - 200 -# - 204 -# regex: <title>My cool website!<\/title> -# timeout: 2 - -# This plugin is intended for simple cases. Currently, the accuracy of the response time is low and should be used as reference only. - diff --git a/conf.d/python.d/icecast.conf b/conf.d/python.d/icecast.conf deleted file mode 100644 index a900d06d3..000000000 --- a/conf.d/python.d/icecast.conf +++ /dev/null @@ -1,83 +0,0 @@ -# netdata python.d.plugin configuration for icecast -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, icecast also supports the following: -# -# url: 'URL' # the URL to fetch icecast's stats -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name : 'local' - url : 'http://localhost:8443/status-json.xsl' - -localipv4: - name : 'local' - url : 'http://127.0.0.1:8443/status-json.xsl' \ No newline at end of file diff --git a/conf.d/python.d/ipfs.conf b/conf.d/python.d/ipfs.conf deleted file mode 100644 index c247c1b7a..000000000 --- a/conf.d/python.d/ipfs.conf +++ /dev/null @@ -1,74 +0,0 @@ -# netdata python.d.plugin configuration for ipfs -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, ipfs also supports the following: -# -# url: 'URL' # URL to the IPFS API -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name : 'local' - url : 'http://localhost:5001' diff --git a/conf.d/python.d/isc_dhcpd.conf b/conf.d/python.d/isc_dhcpd.conf deleted file mode 100644 index 4a4c4a5e3..000000000 --- a/conf.d/python.d/isc_dhcpd.conf +++ /dev/null @@ -1,81 +0,0 @@ -# netdata python.d.plugin configuration for isc dhcpd leases -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, isc_dhcpd supports the following: -# -# leases_path: 'PATH' # the path to dhcpd.leases file -# pools: -# office: '192.168.2.0/24' # name(dimension): pool in CIDR format -# wifi: '192.168.3.0/24' # name(dimension): pool in CIDR format -# 192.168.4.0/24: '192.168.4.0/24' # name(dimension): pool in CIDR format -# -#----------------------------------------------------------------------- -# IMPORTANT notes -# -# 1. Make sure leases file is readable by netdata. -# 2. Current implementation works only with 'default' db-time-format -# (weekday year/month/day hour:minute:second). -# This is the default, so it will work in most cases. -# 3. Pools MUST BE in CIDR format. -# -# ---------------------------------------------------------------------- diff --git a/conf.d/python.d/mdstat.conf b/conf.d/python.d/mdstat.conf deleted file mode 100644 index 66a2f153c..000000000 --- a/conf.d/python.d/mdstat.conf +++ /dev/null @@ -1,32 +0,0 @@ -# netdata python.d.plugin configuration for mdstat -# -# This file is in YaML format. Generally the format is: -# -# name: value -# - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 diff --git a/conf.d/python.d/memcached.conf b/conf.d/python.d/memcached.conf deleted file mode 100644 index 85c3daf65..000000000 --- a/conf.d/python.d/memcached.conf +++ /dev/null @@ -1,92 +0,0 @@ -# netdata python.d.plugin configuration for memcached -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, memcached also supports the following: -# -# socket: 'path/to/memcached.sock' -# -# or -# host: 'IP or HOSTNAME' # the host to connect to -# port: PORT # the port to connect to -# -# - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name : 'local' - host : 'localhost' - port : 11211 - -localipv4: - name : 'local' - host : '127.0.0.1' - port : 11211 - -localipv6: - name : 'local' - host : '::1' - port : 11211 - diff --git a/conf.d/python.d/mongodb.conf b/conf.d/python.d/mongodb.conf deleted file mode 100644 index 62faef68d..000000000 --- a/conf.d/python.d/mongodb.conf +++ /dev/null @@ -1,84 +0,0 @@ -# netdata python.d.plugin configuration for mongodb -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, mongodb also supports the following: -# -# host: 'IP or HOSTNAME' # type <str> the host to connect to -# port: PORT # type <int> the port to connect to -# -# in all cases, the following can also be set: -# -# user: 'username' # the mongodb username to use -# pass: 'password' # the mongodb password to use -# - -# ---------------------------------------------------------------------- -# to connect to the mongodb on localhost, without a password: -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -local: - name : 'local' - host : '127.0.0.1' - port : 27017 diff --git a/conf.d/python.d/mysql.conf b/conf.d/python.d/mysql.conf deleted file mode 100644 index b5956a2c6..000000000 --- a/conf.d/python.d/mysql.conf +++ /dev/null @@ -1,286 +0,0 @@ -# netdata python.d.plugin configuration for mysql -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, mysql also supports the following: -# -# socket: 'path/to/mysql.sock' -# -# or -# host: 'IP or HOSTNAME' # the host to connect to -# port: PORT # the port to connect to -# -# in all cases, the following can also be set: -# -# user: 'username' # the mysql username to use -# pass: 'password' # the mysql password to use -# - -# ---------------------------------------------------------------------- -# mySQL CONFIGURATION -# -# netdata does not need any privilege - only the ability to connect -# to the mysql server (netdata will not be able to see any data). -# -# Execute these commands to give the local user 'netdata' the ability -# to connect to the mysql server on localhost, without a password: -# -# > create user 'netdata'@'localhost'; -# > grant usage on *.* to 'netdata'@'localhost'; -# > flush privileges; -# -# with the above statements, netdata will be able to gather mysql -# statistics, without the ability to see or alter any data or affect -# mysql operation in any way. No change is required below. -# -# If you need to monitor mysql replication too, use this instead: -# -# > create user 'netdata'@'localhost'; -# > grant replication client on *.* to 'netdata'@'localhost'; -# > flush privileges; -# - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -mycnf1: - name : 'local' - 'my.cnf' : '/etc/my.cnf' - -mycnf2: - name : 'local' - 'my.cnf' : '/etc/mysql/my.cnf' - -debiancnf: - name : 'local' - 'my.cnf' : '/etc/mysql/debian.cnf' - -socket1: - name : 'local' - # user : '' - # pass : '' - socket : '/var/run/mysqld/mysqld.sock' - -socket2: - name : 'local' - # user : '' - # pass : '' - socket : '/var/run/mysqld/mysql.sock' - -socket3: - name : 'local' - # user : '' - # pass : '' - socket : '/var/lib/mysql/mysql.sock' - -socket4: - name : 'local' - # user : '' - # pass : '' - socket : '/tmp/mysql.sock' - -tcp: - name : 'local' - # user : '' - # pass : '' - host : 'localhost' - port : '3306' - # keep in mind port might be ignored by mysql, if host = 'localhost' - # http://serverfault.com/questions/337818/how-to-force-mysql-to-connect-by-tcp-instead-of-a-unix-socket/337844#337844 - -tcpipv4: - name : 'local' - # user : '' - # pass : '' - host : '127.0.0.1' - port : '3306' - -tcpipv6: - name : 'local' - # user : '' - # pass : '' - host : '::1' - port : '3306' - - -# Now we try the same as above with user: root -# A few systems configure mysql to accept passwordless -# root access. - -mycnf1_root: - name : 'local' - user : 'root' - 'my.cnf' : '/etc/my.cnf' - -mycnf2_root: - name : 'local' - user : 'root' - 'my.cnf' : '/etc/mysql/my.cnf' - -socket1_root: - name : 'local' - user : 'root' - # pass : '' - socket : '/var/run/mysqld/mysqld.sock' - -socket2_root: - name : 'local' - user : 'root' - # pass : '' - socket : '/var/run/mysqld/mysql.sock' - -socket3_root: - name : 'local' - user : 'root' - # pass : '' - socket : '/var/lib/mysql/mysql.sock' - -socket4_root: - name : 'local' - user : 'root' - # pass : '' - socket : '/tmp/mysql.sock' - -tcp_root: - name : 'local' - user : 'root' - # pass : '' - host : 'localhost' - port : '3306' - # keep in mind port might be ignored by mysql, if host = 'localhost' - # http://serverfault.com/questions/337818/how-to-force-mysql-to-connect-by-tcp-instead-of-a-unix-socket/337844#337844 - -tcpipv4_root: - name : 'local' - user : 'root' - # pass : '' - host : '127.0.0.1' - port : '3306' - -tcpipv6_root: - name : 'local' - user : 'root' - # pass : '' - host : '::1' - port : '3306' - - -# Now we try the same as above with user: netdata - -mycnf1_netdata: - name : 'local' - user : 'netdata' - 'my.cnf' : '/etc/my.cnf' - -mycnf2_netdata: - name : 'local' - user : 'netdata' - 'my.cnf' : '/etc/mysql/my.cnf' - -socket1_netdata: - name : 'local' - user : 'netdata' - # pass : '' - socket : '/var/run/mysqld/mysqld.sock' - -socket2_netdata: - name : 'local' - user : 'netdata' - # pass : '' - socket : '/var/run/mysqld/mysql.sock' - -socket3_netdata: - name : 'local' - user : 'netdata' - # pass : '' - socket : '/var/lib/mysql/mysql.sock' - -socket4_netdata: - name : 'local' - user : 'netdata' - # pass : '' - socket : '/tmp/mysql.sock' - -tcp_netdata: - name : 'local' - user : 'netdata' - # pass : '' - host : 'localhost' - port : '3306' - # keep in mind port might be ignored by mysql, if host = 'localhost' - # http://serverfault.com/questions/337818/how-to-force-mysql-to-connect-by-tcp-instead-of-a-unix-socket/337844#337844 - -tcpipv4_netdata: - name : 'local' - user : 'netdata' - # pass : '' - host : '127.0.0.1' - port : '3306' - -tcpipv6_netdata: - name : 'local' - user : 'netdata' - # pass : '' - host : '::1' - port : '3306' - diff --git a/conf.d/python.d/nginx.conf b/conf.d/python.d/nginx.conf deleted file mode 100644 index 71c521066..000000000 --- a/conf.d/python.d/nginx.conf +++ /dev/null @@ -1,109 +0,0 @@ -# netdata python.d.plugin configuration for nginx -# -# You must have ngx_http_stub_status_module configured on your nginx server for this -# plugin to work. The following is an example config. -# It must be located inside a server { } block. -# -# location /stub_status { -# stub_status; -# # Security: Only allow access from the IP below. -# allow 192.168.1.200; -# # Deny anyone else -# deny all; -# } -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, this plugin also supports the following: -# -# url: 'URL' # the URL to fetch nginx's status stats -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' -# -# Example -# -# RemoteNginx: -# name : 'Reverse_Proxy' -# url : 'http://yourdomain.com/stub_status' -# -# "RemoteNginx" will show up in Netdata logs. "Reverse Proxy" will show up in the menu -# in the nginx section. - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name : 'local' - url : 'http://localhost/stub_status' - -localipv4: - name : 'local' - url : 'http://127.0.0.1/stub_status' - -localipv6: - name : 'local' - url : 'http://[::1]/stub_status' - diff --git a/conf.d/python.d/nginx_plus.conf b/conf.d/python.d/nginx_plus.conf deleted file mode 100644 index 7b5c8f43f..000000000 --- a/conf.d/python.d/nginx_plus.conf +++ /dev/null @@ -1,87 +0,0 @@ -# netdata python.d.plugin configuration for nginx_plus -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, nginx_plus also supports the following: -# -# url: 'URL' # the URL to fetch nginx_plus's stats -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name : 'local' - url : 'http://localhost/status' - -localipv4: - name : 'local' - url : 'http://127.0.0.1/status' - -localipv6: - name : 'local' - url : 'http://[::1]/status' diff --git a/conf.d/python.d/nsd.conf b/conf.d/python.d/nsd.conf deleted file mode 100644 index 078e97216..000000000 --- a/conf.d/python.d/nsd.conf +++ /dev/null @@ -1,93 +0,0 @@ -# netdata python.d.plugin configuration for nsd -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# nsd-control is slow, so once every 30 seconds -# update_every: 30 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, nsd also supports the following: -# -# command: 'nsd-control stats_noreset' # the command to run -# - -# ---------------------------------------------------------------------- -# IMPORTANT Information -# -# Netdata must have permissions to run `nsd-control stats_noreset` command -# -# - Example-1 (use "sudo") -# 1. sudoers (e.g. visudo -f /etc/sudoers.d/netdata) -# Defaults:netdata !requiretty -# netdata ALL=(ALL) NOPASSWD: /usr/sbin/nsd-control stats_noreset -# 2. etc/netdata/python.d/nsd.conf -# local: -# update_every: 30 -# command: 'sudo /usr/sbin/nsd-control stats_noreset' -# -# - Example-2 (add "netdata" user to "nsd" group) -# usermod -aG nsd netdata -# - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS - -local: - update_every: 30 - command: 'nsd-control stats_noreset' diff --git a/conf.d/python.d/ntpd.conf b/conf.d/python.d/ntpd.conf deleted file mode 100644 index 7adc4074b..000000000 --- a/conf.d/python.d/ntpd.conf +++ /dev/null @@ -1,91 +0,0 @@ -# netdata python.d.plugin configuration for ntpd -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# -# Additionally to the above, ntp also supports the following: -# -# host: 'localhost' # the host to query -# port: '123' # the UDP port where `ntpd` listens -# show_peers: no # use `yes` to show peer charts. enabling this -# # option is recommended only for debugging, as -# # it could possibly imply memory leaks if the -# # peers change frequently. -# peer_filter: '127\..*' # regex to exclude peers -# # by default local peers are hidden -# # use `''` to show all peers. -# peer_rescan: 60 # interval (>0) to check for new/changed peers -# # use `1` to check on every update -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name: 'local' - host: 'localhost' - port: '123' - show_peers: no - -localhost_ipv4: - name: 'local' - host: '127.0.0.1' - port: '123' - show_peers: no - -localhost_ipv6: - name: 'local' - host: '::1' - port: '123' - show_peers: no diff --git a/conf.d/python.d/ovpn_status_log.conf b/conf.d/python.d/ovpn_status_log.conf deleted file mode 100644 index 907f014f5..000000000 --- a/conf.d/python.d/ovpn_status_log.conf +++ /dev/null @@ -1,95 +0,0 @@ -# netdata python.d.plugin configuration for openvpn status log -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, openvpn status log also supports the following: -# -# log_path: 'PATH' # the path to openvpn status log file -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) -# -# IMPORTANT information -# -# 1. If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files -# so that multiple instances do not overwrite each other's output files. -# 2. Make sure NETDATA USER CAN READ openvpn-status.log -# -# * cd into directory with openvpn-status.log and run the following commands as root -# * #chown :netdata openvpn-status.log && chmod 640 openvpn-status.log -# * To check permission and group membership run -# * #ls -l openvpn-status.log -# -rw-r----- 1 root netdata 359 dec 21 21:22 openvpn-status.log -# -# 3. Update_every interval MUST MATCH interval on which OpenVPN writes operational status to log file. -# If its not true traffic chart WILL DISPLAY WRONG values -# -# Default OpenVPN update interval is 10 second on Debian 8 -# # ps -C openvpn -o command= -# /usr/sbin/openvpn --daemon ovpn-server --status /run/openvpn/server.status 10 --cd /etc/openvpn --config /etc/openvpn/server.conf -# -# -#default: -# log_path: '/var/log/openvpn-status.log' -# -# ---------------------------------------------------------------------- \ No newline at end of file diff --git a/conf.d/python.d/phpfpm.conf b/conf.d/python.d/phpfpm.conf deleted file mode 100644 index 08688e2fa..000000000 --- a/conf.d/python.d/phpfpm.conf +++ /dev/null @@ -1,90 +0,0 @@ -# netdata python.d.plugin configuration for PHP-FPM -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, PHP-FPM also supports the following: -# -# url: 'URL' # the URL to fetch nginx's status stats -# # Be sure and include ?full&status at the end of the url -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' -# - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name : 'local' - url : "http://localhost/status?full&json" - -localipv4: - name : 'local' - url : "http://127.0.0.1/status?full&json" - -localipv6: - name : 'local' - url : "http://::1/status?full&json" - diff --git a/conf.d/python.d/portcheck.conf b/conf.d/python.d/portcheck.conf deleted file mode 100644 index b3dd8bd3f..000000000 --- a/conf.d/python.d/portcheck.conf +++ /dev/null @@ -1,70 +0,0 @@ -# netdata python.d.plugin configuration for portcheck -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# chart_cleanup sets the default chart cleanup interval in iterations. -# A chart is marked as obsolete if it has not been updated -# 'chart_cleanup' iterations in a row. -# They will be hidden immediately (not offered to dashboard viewer, -# streamed upstream and archived to backends) and deleted one hour -# later (configurable from netdata.conf). -# -- For this plugin, cleanup MUST be disabled, otherwise we lose latency chart -chart_cleanup: 0 - -# Autodetection and retries do not work for this plugin - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# ------------------------------- -# ATTENTION: Any valid configuration will be accepted, even if initial connection fails! -# ------------------------------- -# -# There is intentionally no default config for 'localhost' - -# job_name: -# name: myname # [optional] the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # [optional] the JOB's data collection frequency -# priority: 60000 # [optional] the JOB's order on the dashboard -# retries: 60 # [optional] the JOB's number of restoration attempts -# timeout: 1 # [optional] the socket timeout when connecting -# host: 'dns or ip' # [required] the remote host address in either IPv4, IPv6 or as DNS name. -# port: 22 # [required] the port number to check. Specify an integer, not service name. - -# You just have been warned about possible portscan blocking. The portcheck plugin is meant for simple use cases. -# Currently, the accuracy of the latency is low and should be used as reference only. - diff --git a/conf.d/python.d/postfix.conf b/conf.d/python.d/postfix.conf deleted file mode 100644 index e0d5a5f83..000000000 --- a/conf.d/python.d/postfix.conf +++ /dev/null @@ -1,74 +0,0 @@ -# netdata python.d.plugin configuration for postfix -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# postfix is slow, so once every 10 seconds -update_every: 10 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, postfix also supports the following: -# -# command: 'postqueue -p' # the command to run -# - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS - -local: - command: 'postqueue -p' diff --git a/conf.d/python.d/postgres.conf b/conf.d/python.d/postgres.conf deleted file mode 100644 index b69ca3717..000000000 --- a/conf.d/python.d/postgres.conf +++ /dev/null @@ -1,124 +0,0 @@ -# netdata python.d.plugin configuration for postgresql -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# A single connection is required in order to pull statistics. -# -# Connections can be configured with the following options: -# -# database : 'example_db_name' -# user : 'example_user' -# password : 'example_pass' -# host : 'localhost' -# port : 5432 -# -# Additionally, the following options allow selective disabling of charts -# -# table_stats : false -# index_stats : false -# database_poll : 'dbase_name1 dbase_name2' # poll only specified databases (all other will be excluded from charts) -# -# Postgres permissions are configured at its pg_hba.conf file. You can -# "trust" local clients to allow netdata to connect, or you can create -# a postgres user for netdata and add its password below to allow -# netdata connect. -# -# Postgres supported versions are : -# - 9.3 (without autovacuum) -# - 9.4 -# - 9.5 -# - 9.6 -# - 10 -# -# Superuser access is needed for theses charts: -# Write-Ahead Logs -# Archive Write-Ahead Logs -# -# Autovacuum charts is allowed since Postgres 9.4 -# ---------------------------------------------------------------------- - -socket: - name : 'local' - user : 'postgres' - database : 'postgres' - -tcp: - name : 'local' - database : 'postgres' - user : 'postgres' - host : 'localhost' - port : 5432 - -tcpipv4: - name : 'local' - database : 'postgres' - user : 'postgres' - host : '127.0.0.1' - port : 5432 - -tcpipv6: - name : 'local' - database : 'postgres' - user : 'postgres' - host : '::1' - port : 5432 - diff --git a/conf.d/python.d/powerdns.conf b/conf.d/python.d/powerdns.conf deleted file mode 100644 index ca6200df1..000000000 --- a/conf.d/python.d/powerdns.conf +++ /dev/null @@ -1,78 +0,0 @@ -# netdata python.d.plugin configuration for powerdns -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, apache also supports the following: -# -# url: 'URL' # the URL to fetch powerdns performance statistics -# header: -# X-API-Key: 'Key' # API key -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -# localhost: -# name : 'local' -# url : 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics' -# header: -# X-API-Key: 'change_me' diff --git a/conf.d/python.d/rabbitmq.conf b/conf.d/python.d/rabbitmq.conf deleted file mode 100644 index 3f90da8a2..000000000 --- a/conf.d/python.d/rabbitmq.conf +++ /dev/null @@ -1,82 +0,0 @@ -# netdata python.d.plugin configuration for rabbitmq -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, rabbitmq plugin also supports the following: -# -# host: 'ipaddress' # Server ip address or hostname. Default: 127.0.0.1 -# port: 'port' # Rabbitmq port. Default: 15672 -# scheme: 'scheme' # http or https. Default: http -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) -# -local: - host: '127.0.0.1' - user: 'guest' - pass: 'guest' diff --git a/conf.d/python.d/redis.conf b/conf.d/python.d/redis.conf deleted file mode 100644 index 6363f6da7..000000000 --- a/conf.d/python.d/redis.conf +++ /dev/null @@ -1,112 +0,0 @@ -# netdata python.d.plugin configuration for redis -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, redis also supports the following: -# -# socket: 'path/to/redis.sock' -# -# or -# host: 'IP or HOSTNAME' # the host to connect to -# port: PORT # the port to connect to -# -# and -# pass: 'password' # the redis password to use for AUTH command -# - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -socket1: - name : 'local' - socket : '/tmp/redis.sock' - # pass : '' - -socket2: - name : 'local' - socket : '/var/run/redis/redis.sock' - # pass : '' - -socket3: - name : 'local' - socket : '/var/lib/redis/redis.sock' - # pass : '' - -localhost: - name : 'local' - host : 'localhost' - port : 6379 - # pass : '' - -localipv4: - name : 'local' - host : '127.0.0.1' - port : 6379 - # pass : '' - -localipv6: - name : 'local' - host : '::1' - port : 6379 - # pass : '' - diff --git a/conf.d/python.d/retroshare.conf b/conf.d/python.d/retroshare.conf deleted file mode 100644 index 9c92583f7..000000000 --- a/conf.d/python.d/retroshare.conf +++ /dev/null @@ -1,74 +0,0 @@ -# netdata python.d.plugin configuration for RetroShare -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, RetroShare also supports the following: -# -# - url: 'url' # the URL to the WebUI -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name: 'local' - url: 'http://localhost:9090' diff --git a/conf.d/python.d/samba.conf b/conf.d/python.d/samba.conf deleted file mode 100644 index ee513c60f..000000000 --- a/conf.d/python.d/samba.conf +++ /dev/null @@ -1,62 +0,0 @@ -# netdata python.d.plugin configuration for samba -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -update_every: 5 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds \ No newline at end of file diff --git a/conf.d/python.d/sensors.conf b/conf.d/python.d/sensors.conf deleted file mode 100644 index 83bbffd7d..000000000 --- a/conf.d/python.d/sensors.conf +++ /dev/null @@ -1,63 +0,0 @@ -# netdata python.d.plugin configuration for sensors -# -# This file is in YaML format. Generally the format is: -# -# name: value -# - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# Limit the number of sensors types. -# Comment the ones you want to disable. -# Also, re-arranging this list controls the order of the charts at the -# netdata dashboard. - -types: - - temperature - - fan - - voltage - - current - - power - - energy - - humidity - -# ---------------------------------------------------------------------- -# Limit the number of sensors chips. -# Uncomment the first line (chips:) and add chip names below it. -# The chip names that start with like that will be matched. -# You can find the chip names using the sensors command. - -#chips: -# - i8k -# - coretemp -# -# chip names can be found using the sensors shell command -# the prefix is matched (anything that starts like that) -# -#---------------------------------------------------------------------- - diff --git a/conf.d/python.d/smartd_log.conf b/conf.d/python.d/smartd_log.conf deleted file mode 100644 index 3fab3f1c0..000000000 --- a/conf.d/python.d/smartd_log.conf +++ /dev/null @@ -1,90 +0,0 @@ -# netdata python.d.plugin configuration for smartd log -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, smartd_log also supports the following: -# -# log_path: '/path/to/smartdlogs' # path to smartd log files. Default is /var/log/smartd -# raw_values: yes # enable/disable raw values charts. Enabled by default. -# smart_attributes: '1 2 3 4 44' # smart attributes charts. Default are ['1', '4', '5', '7', '9', '12', '193', '194', '197', '198', '200']. -# exclude_disks: 'PATTERN1 PATTERN2' # space separated patterns. If the pattern is in the drive name, the module will not collect data for it. -# -# ---------------------------------------------------------------------- -# Additional information -# Plugin reads smartd log files (-A option). -# You need to add (man smartd) to /etc/default/smartmontools '-i 600 -A /var/log/smartd/' to pass additional options to smartd on startup -# Then restart smartd service and check /path/log/smartdlogs -# ls /var/log/smartd/ -# CDC_WD10EZEX_00BN5A0-WD_WCC3F7FLVZS9.ata.csv WDC_WD10EZEX_00BN5A0-WD_WCC3F7FLVZS9.ata.csv ZDC_WD10EZEX_00BN5A0-WD_WCC3F7FLVZS9.ata.csv -# -# Smartd APPEND logs at every run. Its NOT RECOMMENDED to set '-i' option below 60 sec. -# STRONGLY RECOMMENDED to create smartd conf file for logrotate -# -# RAW vs NORMALIZED values -# "Normalized value", commonly referred to as just "value". This is a most universal measurement, on the scale from 0 (bad) to some maximum (good) value. -# Maximum values are typically 100, 200 or 253. Rule of thumb is: high values are good, low values are bad. -# -# "Raw value" - the value of the attribute as it is tracked by the device, before any normalization takes place. -# Some raw numbers provide valuable insight when properly interpreted. These cases will be discussed later on. -# Raw values are typically listed in hexadecimal numbers. The raw value has different structure for different vendors and is often not meaningful as a decimal number. -# -# ---------------------------------------------------------------------- diff --git a/conf.d/python.d/springboot.conf b/conf.d/python.d/springboot.conf deleted file mode 100644 index 40b5fb437..000000000 --- a/conf.d/python.d/springboot.conf +++ /dev/null @@ -1,120 +0,0 @@ -# netdata python.d.plugin configuration for springboot -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, this plugin also supports the following: -# -# url: 'http://127.0.0.1/metrics' # the URL of the spring boot actuator metrics -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' -# -# defaults: -# [chart_id]: true | false # enables/disables default charts, defaults true. -# extras: {} # defines extra charts to monitor, please see the example below -# - id: [chart_id] -# options: {} -# lines: [] -# -# If all defaults is disabled and no extra charts are defined, this module will disable itself, as it has no data to -# collect. -# -# Configuration example -# --------------------- -# expample: -# name: 'example' -# url: 'http://localhost:8080/metrics' -# defaults: -# response_code: true -# threads: true -# gc_time: true -# gc_ope: true -# heap: false -# extras: -# - id: 'heap' -# options: { title: 'Heap Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap', charttype: 'stacked' } -# lines: -# - { dimension: 'mem_free', name: 'free'} -# - { dimension: 'mempool_eden_used', name: 'eden', algorithm: 'absolute', multiplier: 1, divisor: 1} -# - { dimension: 'mempool_survivor_used', name: 'survivor', algorithm: 'absolute', multiplier: 1, divisor: 1} -# - { dimension: 'mempool_tenured_used', name: 'tenured', algorithm: 'absolute', multiplier: 1, divisor: 1} -# - id: 'heap_eden' -# options: { title: 'Eden Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_eden', charttype: 'area' } -# lines: -# - { dimension: 'mempool_eden_used', name: 'used'} -# - { dimension: 'mempool_eden_committed', name: 'commited'} -# - id: 'heap_survivor' -# options: { title: 'Survivor Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_survivor', charttype: 'area' } -# lines: -# - { dimension: 'mempool_survivor_used', name: 'used'} -# - { dimension: 'mempool_survivor_committed', name: 'commited'} -# - id: 'heap_tenured' -# options: { title: 'Tenured Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_tenured', charttype: 'area' } -# lines: -# - { dimension: 'mempool_tenured_used', name: 'used'} -# - { dimension: 'mempool_tenured_committed', name: 'commited'} - - -local: - name: 'local' - url: 'http://localhost:8080/metrics' - -local_ip: - name: 'local' - url: 'http://127.0.0.1:8080/metrics' diff --git a/conf.d/python.d/squid.conf b/conf.d/python.d/squid.conf deleted file mode 100644 index 564187f00..000000000 --- a/conf.d/python.d/squid.conf +++ /dev/null @@ -1,169 +0,0 @@ -# netdata python.d.plugin configuration for squid -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, squid also supports the following: -# -# host : 'IP or HOSTNAME' # the host to connect to -# port : PORT # the port to connect to -# request: 'URL' # the URL to request from squid -# - -# ---------------------------------------------------------------------- -# SQUID CONFIGURATION -# -# See: -# http://wiki.squid-cache.org/Features/CacheManager -# -# In short, add to your squid configuration these: -# -# http_access allow localhost manager -# http_access deny manager -# -# To remotely monitor a squid: -# -# acl managerAdmin src 192.0.2.1 -# http_access allow localhost manager -# http_access allow managerAdmin manager -# http_access deny manager -# - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -tcp3128old: - name : 'local' - host : 'localhost' - port : 3128 - request : 'cache_object://localhost:3128/counters' - -tcp8080old: - name : 'local' - host : 'localhost' - port : 8080 - request : 'cache_object://localhost:3128/counters' - -tcp3128new: - name : 'local' - host : 'localhost' - port : 3128 - request : '/squid-internal-mgr/counters' - -tcp8080new: - name : 'local' - host : 'localhost' - port : 8080 - request : '/squid-internal-mgr/counters' - -# IPv4 - -tcp3128oldipv4: - name : 'local' - host : '127.0.0.1' - port : 3128 - request : 'cache_object://127.0.0.1:3128/counters' - -tcp8080oldipv4: - name : 'local' - host : '127.0.0.1' - port : 8080 - request : 'cache_object://127.0.0.1:3128/counters' - -tcp3128newipv4: - name : 'local' - host : '127.0.0.1' - port : 3128 - request : '/squid-internal-mgr/counters' - -tcp8080newipv4: - name : 'local' - host : '127.0.0.1' - port : 8080 - request : '/squid-internal-mgr/counters' - -# IPv6 - -tcp3128oldipv6: - name : 'local' - host : '::1' - port : 3128 - request : 'cache_object://[::1]:3128/counters' - -tcp8080oldipv6: - name : 'local' - host : '::1' - port : 8080 - request : 'cache_object://[::1]:3128/counters' - -tcp3128newipv6: - name : 'local' - host : '::1' - port : 3128 - request : '/squid-internal-mgr/counters' - -tcp8080newipv6: - name : 'local' - host : '::1' - port : 8080 - request : '/squid-internal-mgr/counters' - diff --git a/conf.d/python.d/tomcat.conf b/conf.d/python.d/tomcat.conf deleted file mode 100644 index c63f06cfa..000000000 --- a/conf.d/python.d/tomcat.conf +++ /dev/null @@ -1,91 +0,0 @@ -# netdata python.d.plugin configuration for tomcat -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, tomcat also supports the following: -# -# url: 'URL' # the URL to fetch nginx's status stats -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' -# -# if you have multiple connectors, the following are supported: -# -# connector_name: 'ajp-bio-8009' # default is null, which use first connector in status XML -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name : 'local' - url : 'http://localhost:8080/manager/status?XML=true' - -localipv4: - name : 'local' - url : 'http://127.0.0.1:8080/manager/status?XML=true' - -localipv6: - name : 'local' - url : 'http://[::1]:8080/manager/status?XML=true' diff --git a/conf.d/python.d/traefik.conf b/conf.d/python.d/traefik.conf deleted file mode 100644 index 909b9e549..000000000 --- a/conf.d/python.d/traefik.conf +++ /dev/null @@ -1,79 +0,0 @@ -# netdata python.d.plugin configuration for traefik health data API -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 10 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, traefik plugin also supports the following: -# -# url: '<scheme>://<host>:<port>/<health_page_api>' -# # http://localhost:8080/health -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) -# -local: - url: 'http://localhost:8080/health' diff --git a/conf.d/python.d/varnish.conf b/conf.d/python.d/varnish.conf deleted file mode 100644 index 4b069d514..000000000 --- a/conf.d/python.d/varnish.conf +++ /dev/null @@ -1,64 +0,0 @@ -# netdata python.d.plugin configuration for varnish -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# ---------------------------------------------------------------------- diff --git a/conf.d/python.d/web_log.conf b/conf.d/python.d/web_log.conf deleted file mode 100644 index c185f8d85..000000000 --- a/conf.d/python.d/web_log.conf +++ /dev/null @@ -1,195 +0,0 @@ -# netdata python.d.plugin configuration for web log -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. - -# ---------------------------------------------------------------------- -# PLUGIN CONFIGURATION -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, web_log also supports the following: -# -# path: 'PATH' # the path to web server log file -# path: 'PATH[0-9]*[0-9]' # log files with date suffix are also supported -# detailed_response_codes: yes/no # default: yes. Additional chart where response codes are not grouped -# detailed_response_aggregate: yes/no # default: yes. Not aggregated detailed response codes charts -# all_time : yes/no # default: yes. All time unique client IPs chart (50000 addresses ~ 400KB) -# filter: # filter with regex -# include: 'REGEX' # only those rows that matches the regex -# exclude: 'REGEX' # all rows except those that matches the regex -# categories: # requests per url chart configuration -# cacti: 'cacti.*' # name(dimension): REGEX to match -# observium: 'observium.*' # name(dimension): REGEX to match -# stub_status: 'stub_status' # name(dimension): REGEX to match -# user_defined: # requests per pattern in <user_defined> field (custom_log_format) -# cacti: 'cacti.*' # name(dimension): REGEX to match -# observium: 'observium.*' # name(dimension): REGEX to match -# stub_status: 'stub_status' # name(dimension): REGEX to match -# custom_log_format: # define a custom log format -# pattern: '(?P<address>[\da-f.:]+) -.*?"(?P<method>[A-Z]+) (?P<url>.*?)" (?P<code>[1-9]\d{2}) (?P<bytes_sent>\d+) (?P<resp_length>\d+) (?P<resp_time>\d+\.\d+) ' -# time_multiplier: 1000000 # type <int> - convert time to microseconds -# histogram: [1,3,10,30,100, ...] # type list of int - Cumulative histogram of response time in milli seconds - -# ---------------------------------------------------------------------- -# WEB SERVER CONFIGURATION -# -# Make sure the web server log directory and the web server log files -# can be read by user 'netdata'. -# -# To enable the timings chart and the requests size dimension, the -# web server needs to log them. This is how to add them: -# -# nginx: -# log_format netdata '$remote_addr - $remote_user [$time_local] ' -# '"$request" $status $body_bytes_sent ' -# '$request_length $request_time $upstream_response_time ' -# '"$http_referer" "$http_user_agent"'; -# access_log /var/log/nginx/access.log netdata; -# -# apache (you need mod_logio enabled): -# LogFormat "%h %l %u %t \"%r\" %>s %O %I %D \"%{Referer}i\" \"%{User-Agent}i\"" vhost_netdata -# LogFormat "%h %l %u %t \"%r\" %>s %O %I %D \"%{Referer}i\" \"%{User-Agent}i\"" netdata -# CustomLog "/var/log/apache2/access.log" netdata - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them per web server will run (when they have the same name) - - -# ------------------------------------------- -# nginx log on various distros - -# debian, arch -nginx_log: - name: 'nginx' - path: '/var/log/nginx/access.log' - -# gentoo -nginx_log2: - name: 'nginx' - path: '/var/log/nginx/localhost.access_log' - - -# ------------------------------------------- -# apache log on various distros - -# debian -apache_log: - name: 'apache' - path: '/var/log/apache2/access.log' - -# gentoo -apache_log2: - name: 'apache' - path: '/var/log/apache2/access_log' - -# arch -apache_log3: - name: 'apache' - path: '/var/log/httpd/access_log' - -# debian -apache_vhosts_log: - name: 'apache_vhosts' - path: '/var/log/apache2/other_vhosts_access.log' - - -# ------------------------------------------- -# gunicorn log on various distros - -gunicorn_log: - name: 'gunicorn' - path: '/var/log/gunicorn/access.log' - -gunicorn_log2: - name: 'gunicorn' - path: '/var/log/gunicorn/gunicorn-access.log' - -# ------------------------------------------- -# Apache Cache -apache_cache: - name: 'apache_cache' - type: 'apache_cache' - path: '/var/log/apache/cache.log' - -apache2_cache: - name: 'apache_cache' - type: 'apache_cache' - path: '/var/log/apache2/cache.log' - -httpd_cache: - name: 'apache_cache' - type: 'apache_cache' - path: '/var/log/httpd/cache.log' - -# ------------------------------------------- -# Squid - -# debian/ubuntu -squid_log1: - name: 'squid' - type: 'squid' - path: '/var/log/squid3/access.log' - -#gentoo -squid_log2: - name: 'squid' - type: 'squid' - path: '/var/log/squid/access.log' diff --git a/conf.d/statsd.d/example.conf b/conf.d/statsd.d/example.conf deleted file mode 100644 index 0af9dd27d..000000000 --- a/conf.d/statsd.d/example.conf +++ /dev/null @@ -1,65 +0,0 @@ -# statsd synthetic charts configuration - -# You can add many .conf files, one for each of your apps - -# start a new app - you can add many apps in the same file -[app] - # give a name for this app - # this controls the main menu on the dashboard - # and will be the prefix for all charts of the app - name = myapp - - # match all the metrics of the app - metrics = myapp.* - - # shall private charts of these metrics be created? - private charts = no - - # shall gaps be shown when metrics are not collected? - gaps when not collected = no - - # the memory mode for the charts of this app: none|map|save - # the default is to use the global memory mode - #memory mode = ram - - # the history size for the charts of this app, in seconds - # the default is to use the global history - #history = 3600 - - - -# create a chart -# this is its id - the chart will be named myapp.mychart -[mychart] - # a name for the chart, similar to the id (2 names for each chart) - name = mychart - - # the chart title - title = my chart title - - # the submenu of the dashboard - family = my family - - # the context for alarm templates - context = chart.context - - # the units of the chart - units = tests/s - - # the sorting priority of the chart on the dashboard - priority = 91000 - - # the type of chart to create: line | area | stacked - type = area - - # one or more dimensions for the chart - # type = events | last | min | max | sum | average | percentile | median | stddev - # events = the number of events for this metric - # last = the last value collected - # all the others are only valid for histograms and timers - dimension = myapp.metric1 avg average 1 1 - dimension = myapp.metric1 lower min 1 1 - dimension = myapp.metric1 upper max 1 1 - dimension = myapp.metric2 other last 1 1 - -# You can add as many charts as needed diff --git a/conf.d/stream.conf b/conf.d/stream.conf deleted file mode 100644 index d0c9a8b18..000000000 --- a/conf.d/stream.conf +++ /dev/null @@ -1,179 +0,0 @@ -# netdata configuration for aggregating data from remote hosts -# -# API keys authorize a pair of sending-receiving netdata servers. -# Once their communication is authorized, they can exchange metrics for any -# number of hosts. -# -# You can generate API keys, with the linux command: uuidgen - - -# ----------------------------------------------------------------------------- -# 1. ON SLAVE NETDATA - THE ONE THAT WILL BE SENDING METRICS - -[stream] - # Enable this on slaves, to have them send metrics. - enabled = no - - # Where is the receiving netdata? - # A space separated list of: - # - # [PROTOCOL:]HOST[%INTERFACE][:PORT] - # - # If many are given, the first available will get the metrics. - # - # PROTOCOL = tcp, udp, or unix (only tcp and unix are supported by masters) - # HOST = an IPv4, IPv6 IP, or a hostname, or a unix domain socket path. - # IPv6 IPs should be given with brackets [ip:address] - # INTERFACE = the network interface to use (only for IPv6) - # PORT = the port number or service name (/etc/services) - # - # This communication is not HTTP (it cannot be proxied by web proxies). - destination = - - # The API_KEY to use (as the sender) - api key = - - # The timeout to connect and send metrics - timeout seconds = 60 - - # If the destination line above does not specify a port, use this - default port = 19999 - - # The buffer to use for sending metrics. - # 1MB is good for 10-20 seconds of data, so increase this - # if you expect latencies. - buffer size bytes = 1048576 - - # If the connection fails, or it disconnects, - # retry after that many seconds. - reconnect delay seconds = 5 - - # Attempt to sync the clock the of the master with the clock of the - # slave for that many iterations, when starting. - initial clock resync iterations = 60 - - -# ----------------------------------------------------------------------------- -# 2. ON MASTER NETDATA - THE ONE THAT WILL BE RECEIVING METRICS - -# You can have one API key per slave, -# or the same API key for all slaves. -# -# netdata searches for options in this order: -# -# a) master netdata settings (netdata.conf) -# b) [API_KEY] section (below, settings for the API key) -# c) [MACHINE_GUID] section (below, settings for each machine) -# -# You can combine the above (the more specific setting will be used). - -# API key authentication -# If the key is not listed here, it will not be able to push metrics. - -# [API_KEY] is [YOUR-API-KEY], i.e [11111111-2222-3333-4444-555555555555] -[API_KEY] - # Default settings for this API key - - # You can disable the API key, by setting this to: no - # The default (for unknown API keys) is: no - enabled = no - - # A list of simple patterns matching the IPs of the servers that - # will be pushing metrics using this API key. - # The metrics are received via the API port, so the same IPs - # should also be matched at netdata.conf [web].allow connections from - allow from = * - - # The default history in entries, for all hosts using this API key. - # You can also set it per host below. - # If you don't set it here, the history size of the central netdata - # will be used. - default history = 3600 - - # The default memory mode to be used for all hosts using this API key. - # You can also set it per host below. - # If you don't set it here, the memory mode of netdata.conf will be used. - # Valid modes: - # save save on exit, load on start - # map like swap (continuously syncing to disks) - # ram keep it in RAM, don't touch the disk - # none no database at all (use this on headless proxies) - default memory mode = ram - - # Shall we enable health monitoring for the hosts using this API key? - # 3 possible values: - # yes enable alarms - # no do not enable alarms - # auto enable alarms, only when the sending netdata is connected - # You can also set it per host, below. - # The default is the same as to netdata.conf - health enabled by default = auto - - # postpone alarms for a short period after the sender is connected - default postpone alarms on connect seconds = 60 - - # allow or deny multiple connections for the same host? - # If you are sure all your netdata have their own machine GUID, - # set this to 'allow', since it allows faster reconnects. - # When set to 'deny', new connections for a host will not be - # accepted until an existing connection is cleared. - multiple connections = allow - - # need to route metrics differently? set these. - # the defaults are the ones at the [stream] section - #default proxy enabled = yes | no - #default proxy destination = IP:PORT IP:PORT ... - #default proxy api key = API_KEY - - -# ----------------------------------------------------------------------------- -# 3. PER SENDING HOST SETTINGS, ON MASTER NETDATA -# THIS IS OPTIONAL - YOU DON'T NEED IT - -# This section exists to give you finer control of the master settings for each -# slave host, when the same API key is used by many netdata slaves / proxies. -# -# Each netdata has a unique GUID - generated the first time netdata starts. -# You can find it at /var/lib/netdata/registry/netdata.public.unique.id -# (at the slave). -# -# The host sending data will have one. If the host is not ephemeral, -# you can give settings for each sending host here. - -[MACHINE_GUID] - # enable this host: yes | no - # When disabled, the master will not receive metrics for this host. - # THIS IS NOT A SECURITY MECHANISM - AN ATTACKER CAN SET ANY OTHER GUID. - # Use only the API key for security. - enabled = no - - # A list of simple patterns matching the IPs of the servers that - # will be pushing metrics using this MACHINE GUID. - # The metrics are received via the API port, so the same IPs - # should also be matched at netdata.conf [web].allow connections from - # and at stream.conf [API_KEY].allow from - allow from = * - - # The number of entries in the database - history = 3600 - - # The memory mode of the database: save | map | ram | none - memory mode = save - - # Health / alarms control: yes | no | auto - health enabled = yes - - # postpone alarms when the sender connects - postpone alarms on connect seconds = 60 - - # allow or deny multiple connections for the same host? - # If you are sure all your netdata have their own machine GUID, - # set this to 'allow', since it allows faster reconnects. - # When set to 'deny', new connections for a host will not be - # accepted until an existing connection is cleared. - multiple connections = allow - - # need to route metrics differently? - #proxy enabled = yes | no - #proxy destination = IP:PORT IP:PORT ... - #proxy api key = API_KEY diff --git a/config.h.in b/config.h.in index 1e817fb3a..fd7bc5eb8 100644 --- a/config.h.in +++ b/config.h.in @@ -59,6 +59,9 @@ */ #undef HAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT +/* Define to 1 if you have the `getpriority' function. */ +#undef HAVE_GETPRIORITY + /* Define to 1 if you have the <inttypes.h> header file. */ #undef HAVE_INTTYPES_H @@ -102,6 +105,12 @@ /* Define to 1 if you have the <resolv.h> header file. */ #undef HAVE_RESOLV_H +/* Define to 1 if you have the `sched_getparam' function. */ +#undef HAVE_SCHED_GETPARAM + +/* Define to 1 if you have the `sched_getscheduler' function. */ +#undef HAVE_SCHED_GETSCHEDULER + /* Define to 1 if you have the `sched_get_priority_max' function. */ #undef HAVE_SCHED_GET_PRIORITY_MAX @@ -114,6 +123,9 @@ /* Define 1 if you have setns() function */ #undef HAVE_SETNS +/* Define to 1 if you have the `setpriority' function. */ +#undef HAVE_SETPRIORITY + /* Define to 1 if you have the <stdint.h> header file. */ #undef HAVE_STDINT_H @@ -132,15 +144,27 @@ /* Define to 1 if the system has the type `struct timespec'. */ #undef HAVE_STRUCT_TIMESPEC +/* Define to 1 if you have the <sys/mount.h> header file. */ +#undef HAVE_SYS_MOUNT_H + /* Define to 1 if you have the <sys/prctl.h> header file. */ #undef HAVE_SYS_PRCTL_H +/* Define to 1 if you have the <sys/statfs.h> header file. */ +#undef HAVE_SYS_STATFS_H + +/* Define to 1 if you have the <sys/statvfs.h> header file. */ +#undef HAVE_SYS_STATVFS_H + /* Define to 1 if you have the <sys/stat.h> header file. */ #undef HAVE_SYS_STAT_H /* Define to 1 if you have the <sys/types.h> header file. */ #undef HAVE_SYS_TYPES_H +/* Define to 1 if you have the <sys/vfs.h> header file. */ +#undef HAVE_SYS_VFS_H + /* Define to 1 if you have the <unistd.h> header file. */ #undef HAVE_UNISTD_H diff --git a/configs.signatures b/configs.signatures index 1db5050d6..afc8dbe56 100644 --- a/configs.signatures +++ b/configs.signatures @@ -1,4 +1,5 @@ declare -A configs_signatures=( + ['00049600c2f9237e6c46b8b0f703c13c']='health.d/bcache.conf' ['00403e687213f3b7db9bf4563a5a92cc']='python.d/isc_dhcpd.conf' ['0056936ce99788ed9ae1c611c87aa6d8']='apps_groups.conf' ['007fc019fb32e952b509d455c016a002']='health.d/tcp_resets.conf' @@ -8,8 +9,10 @@ declare -A configs_signatures=( ['0147c7e8f8f57e37c5dade4e8aacacf9']='python.d/example.conf' ['017036c1dc32c9312b2704b839bd078f']='python.d/haproxy.conf' ['01c54057e0ca55b5bb49df1662d6b8c3']='python.d/web_log.conf' + ['024f4a6a431bcbc6acdb4184aa9661f3']='python.d/httpcheck.conf' ['02fa10fa85ab88e9723998de48d1aca0']='health.d/disks.conf' ['0314f0f1f88773c0ed9e9a908335e7ca']='health.d/tcp_mem.conf' + ['032ee2b3b6cb200bfdf1a0698c2457d6']='health.d/boinc.conf' ['03510c8b3a2a6e8535320cfb9ebef06a']='python.d/httpcheck.conf' ['036dc300bd7b0e0ef229b9822686d63e']='python.d/isc_dhcpd.conf' ['0388b873d0d7e47c19005b7241db77d8']='python.d/tomcat.conf' @@ -18,7 +21,9 @@ declare -A configs_signatures=( ['043f0a35dde85837fabeb85b990a41c1']='health.d/swap.conf' ['044496086420b531487d3c57600ca673']='apps_groups.conf' ['0529b679d3c0e7e6332753c7f6484731']='health.d/net.conf' + ['054a2eece27ee2f5928b8167f5989b65']='python.d/dockerd.conf' ['057d12aaff0467e64529e839a258806b']='health.d/entropy.conf' + ['05809c6662ba39f19cbec90234433d62']='health_alarm_notify.conf' ['059d98d0c562e1c81653d1e64673deab']='python.d/web_log.conf' ['05a8f39f134850c1e8d6267dbe706273']='health.d/web_log.conf' ['061c45b0e34170d357e47883166ecf40']='python.d/nginx.conf' @@ -31,12 +36,14 @@ declare -A configs_signatures=( ['084ee72d64760f2641b0720e79c922f3']='health.d/cpu.conf' ['0856124b1eecf01681b4fdf4e21efb3f']='health.d/net.conf' ['0862e7cf3d32ef48795702c6aefd27e0']='python.d/fail2ban.conf' + ['08de9ae0765a4161abe24c09e47b3454']='python.d/go_expvar.conf' ['08ff5218f938fc48e09e718821169d14']='health.d/redis.conf' ['091572888425bc3b8b559c3c53367ec7']='apps_groups.conf' ['09225283977a6584f8063016091cc4f2']='health.d/tcp_resets.conf' ['09264cec953ae1c4c2985e6446abb386']='health.d/mysql.conf' ['093540fdc2a228e976ce5d48a3adf9fc']='health.d/disks.conf' ['09e030d26be08a13fa3560e47fa27825']='apps_groups.conf' + ['0a5bc649295ba08f7e22e175901c1380']='python.d/unbound.conf' ['0a7039ecc7a86b480d9d499b12b02763']='python.d/freeradius.conf' ['0ad10fa896346202aee99384b0ec968d']='health.d/cpu.conf' ['0b6903f981cdb018c17802ac4e295609']='health.d/btrfs.conf' @@ -58,8 +65,11 @@ declare -A configs_signatures=( ['111401c47f94015cd12ad0b8a4393c4f']='health.d/softnet.conf' ['111ead4b350593dd69b6f7ac0307b49b']='python.d/httpcheck.conf' ['12a4c7803ae79506a14ea784fea60dce']='health.d/net.conf' + ['12d27b9f4d1696c2d49a77ed71d68e6f']='python.d/w1sensor.conf' + ['12e57bea1127933a4fe49ce2e9674f4d']='statsd.d/example.conf' ['13141998a5d71308d9c119834c27bfd3']='python.d.conf' ['13ccf65fd879795f0fcea89ade27c2d0']='health.d/swap.conf' + ['13e861a3d2f3075de883994ab54df658']='health.d/megacli.conf' ['1423e4f8c25a66c316be37da0d5c9c54']='health.d/btrfs.conf' ['142a5b693d34b0308bb0b8aec71fad79']='python.d/postfix.conf' ['14783e051650442ec9e2ed38d81d667e']='charts.d/exim.conf' @@ -72,6 +82,7 @@ declare -A configs_signatures=( ['178281aa2241d4a3e6b798bb9c4ae577']='python.d/haproxy.conf' ['17dc745a76ab4c37ee31a3224f644fc1']='charts.d/postfix.conf' ['18710ef6523cef8630d644ab270bfe02']='health.d/varnish.conf' + ['18c46f55d45a17d60c72877807d9a3d2']='health.d/udp_errors.conf' ['18ee1c6197a4381b1c1631ef6129824f']='apps_groups.conf' ['1972e48345e6c3f0d65f94a03317622b']='health_alarm_notify.conf' ['1bc518219377499d1d05d6ee770f1a87']='python.d/go_expvar.conf' @@ -81,6 +92,7 @@ declare -A configs_signatures=( ['1c71a8792c5c0ed035dd97af93a04838']='health_alarm_notify.conf' ['1d6efba856acaaaf3b50bc6d66611b92']='python.d/web_log.conf' ['1e09f326178acf07d361c08a44d8b1f3']='python.d/rabbitmq.conf' + ['1e0bc6a0ff701d16225383e5de76585b']='python.d/spigotmc.conf' ['1ea8e8ef1fa8a3a0fcdfba236f4cb195']='python.d/mysql.conf' ['1eb0bc80934a3166fcde4d153c476d14']='health.d/fping.conf' ['1ef0fd38e7969c023bc3fa6d89eaf6d6']='python.d/mdstat.conf' @@ -95,6 +107,7 @@ declare -A configs_signatures=( ['225792e33ddeea72992ffa5ab36d505f']='python.d/ntpd.conf' ['22952dbf42647c583b005054b23b545f']='health.d/disks.conf' ['22ceb822983134a7ca67343241f30341']='health.d/disks.conf' + ['2320314191d0f8e7548b9273b77ac5e3']='apps_groups.conf' ['2385e5d35b440619621c4af62492d91b']='health.d/disks.conf' ['23989e04f9c7694b7eab646f8949cd52']='python.d/portcheck.conf' ['23a5afe5260a7ad388e447709cb009df']='python.d/web_log.conf' @@ -104,7 +117,9 @@ declare -A configs_signatures=( ['24d02e4086fd60943c45d8de2e52a4fb']='python.d/springboot.conf' ['254de8ec49602bea2da3631676d7cfec']='health.d/cpu.conf' ['256a7f06f7e579a61752fc64418cffe5']='charts.d/nut.conf' + ['25a35a7c3c6092a839865e9be250c024']='health.d/ram.conf' ['262f98b3d88b98978cb08d566ce85a9d']='charts.d/squid.conf' + ['27a1dbd43abc7394dcd72efe797ee9af']='python.d.conf' ['2827de41cf34a91b7a8e4d8724f59668']='health.d/net.conf' ['28df44a90e8ea4c6156314c03e88bf44']='health.d/softnet.conf' ['292c6cbbb5c819bb91f87c02a45890c1']='health.d/swap.conf' @@ -116,20 +131,25 @@ declare -A configs_signatures=( ['2a0794fd43eadf30a51805bc9ba2c64d']='python.d/hddtemp.conf' ['2acae80dbdbe536a160f5b216bac84bc']='python.d/samba.conf' ['2ad55a5d1e885cf142849a78d4b00401']='health.d/net.conf' + ['2b0106e89ce622da2869cb0d201246d1']='python.d/unbound.conf' ['2bbbebf52f84fd27fbefecd2a8a8076f']='health.d/memcached.conf' ['2c2b7e8df922b2bf121fb7db32bbc3bd']='health.d/udp_errors.conf' ['2d1d7498c72f4245cf32902c2b7e71e0']='health.d/entropy.conf' ['2da0a2e7117292ece11d69723a294bd7']='python.d/mongodb.conf' ['2ee5df033fe9c65a45566b6760b856e3']='python.d/web_log.conf' ['2f05e09b69ea20cda56d8f8b6fd3e86d']='health.d/couchdb.conf' + ['2f13a6b7d11eda826ff26569b2a77080']='health.d/apcupsd.conf' ['2f3a8e33df83f14e0af8ca2465697215']='python.d/exim.conf' ['2f4a85fedecce1bf425fa1039f6b021e']='apps_groups.conf' ['2fa8fb929fd597f2ab97b6efc540a043']='health_alarm_notify.conf' ['307ac41f6c67fcf007d6f7135fac314c']='stream.conf' ['312b4b8e2805e19cf9be554b319567d6']='health.d/softnet.conf' + ['31471ee7eb6cfbb412587a837ffcfe6f']='python.d.conf' ['3161290af7c1909768253e714ea2c3de']='python.d/ceph.conf' ['318bb45755726a25120bb33413d4b582']='health.d/net.conf' ['318db50a701442890c269ab547041e97']='health.d/tcp_orphans.conf' + ['31e4058cfe0a01dd9ce4ae425fd7b4f1']='python.d/web_log.conf' + ['322ec5e7095912221110623c9d7130cf']='health_alarm_notify.conf' ['325617412a628e3bc776e3fbb777a2a6']='health.d/redis.conf' ['326e1477131e0f73304711135f70a2a5']='health.d/memcached.conf' ['32fde0057c790964f2c743cb3c9aad29']='health.d/nginx.conf' @@ -160,6 +180,7 @@ declare -A configs_signatures=( ['39b65042cafdd9b849a44ec81aa66cac']='health_alarm_notify.conf' ['39f9422b0f0c3eec11a31aff79d89514']='health.d/retroshare.conf' ['3a04a3bc66c49d0c24f65a44fd9caa80']='python.d/postgres.conf' + ['3a0f1f988d2111ba003199deca722d9b']='health_alarm_notify.conf' ['3a278ef6c66c122a407a0236c251119d']='python.d/nginx_plus.conf' ['3af522d65b50a5e447607ffb28c81ff5']='apps_groups.conf' ['3b1bfa40a4ff6a200bb2fc00bc51a664']='apps_groups.conf' @@ -167,10 +188,12 @@ declare -A configs_signatures=( ['3bc2776623889744a98178bad6fb3b79']='health.d/disks.conf' ['3bc2c4423b19779d49ee7935b2ea1431']='health.d/stiebeleltron.conf' ['3bc65e997ab59b9de390fdf63d77f5e1']='python.d/postgres.conf' + ['3c60691eb05d4d5bf78a41ed46303bb6']='python.d.conf' ['3c9c47163e9d4dbcb0079b6232398f2f']='apps_groups.conf' ['3ca696189911fb38a0319ddd71e9a395']='python.d/phpfpm.conf' ['3cc6255457d4cba881ae0554ae5d9190']='health.d/squid.conf' ['3d974ac9fdaa44d4527d6503bec35e34']='stream.conf' + ['3d9b33da0f40c2ceecd006ddfd44fd14']='python.d.conf' ['3f170e3343cd784983b019163393f5af']='health.d/nginx.conf' ['3f7b669fde5c63bd55cb6dd88866d306']='python.d/ceph.conf' ['3fbe85671efd5d07e51584ab8262b48b']='health.d/tcp_listen.conf' @@ -183,14 +206,17 @@ declare -A configs_signatures=( ['42ad0e70b1365b6e7244cc305dbaa529']='health_alarm_notify.conf' ['42bf1c7c64ed77038a0aa094d792a9e2']='python.d/mysql.conf' ['4332dee96e4f38fc73c962df3494ab7c']='health_alarm_notify.conf' + ['43739017b6195a6abec14a70fe0df224']='python.d/rethinkdbs.conf' ['43ebb7f224c3b232d8ad044d7e9508b6']='health.d/net.conf' ['43ef8c1e77054f53f9be9f381eb6cd67']='python.d/portcheck.conf' ['4401f0c6a101d35d2cb833e7b0aeb421']='health.d/qos.conf' ['444e20cf75e2cd019e8d412d5d1f4a7f']='charts.d/cpu_apps.conf' ['4461bfacf9a3da47770fb3ca31f4c91f']='health.d/net.conf' ['450667c552ab7a7d8d4a2c214fdacca5']='health.d/entropy.conf' + ['459e57e6acb389f4243f695a1e53ab2b']='health.d/boinc.conf' ['45a77ac36ba9f1898144b902de17204b']='health.d/memcached.conf' ['46798cda21e1a5faa769abf4e5d27c48']='health.d/disks.conf' + ['46dfa2b6a7e7c76532e00c1344d5d171']='python.d/logind.conf' ['46ef6c1b638e40a7dfd62defdc5f99a3']='health.d/retroshare.conf' ['47180421d580baeaedf8c0ef3d647fb5']='python.d/hddtemp.conf' ['48195c5c8c0476a49b714b4c76bdb570']='python.d/squid.conf' @@ -205,6 +231,7 @@ declare -A configs_signatures=( ['4d91ee6fe4c887ea3865ef36ac63da3c']='health.d/mysql.conf' ['4da1c0f009d87995ed66d84fae07f09a']='health.d/memory.conf' ['4dee2390e0bc89938dafa34a390dcf36']='charts.d/squid.conf' + ['4e07ea46dd54eb0bbb4f1c0982a71973']='python.d/cpuidle.conf' ['4e37502fdf1944d094dd8be1e1f5e9e6']='health.d/cpu.conf' ['4e59e91d800059183028bbb44cf5afd2']='health.d/httpcheck.conf' ['4e995acb0d6fd77403a2a9dca984b55b']='charts.d.conf' @@ -221,6 +248,7 @@ declare -A configs_signatures=( ['5278ebbae19c60db600f0a119cb3664e']='python.d/apache.conf' ['52d230aff57850a5aacc4e0420fcd8f5']='python.d.conf' ['52d4131cf9df84e2550b1a5d899ec61d']='health.d/swap.conf' + ['5306b64a1e6baacd9de721e1f56961a8']='health_alarm_notify.conf' ['53160707fdc6ce46c195b1b55bb0bcb1']='health.d/swap.conf' ['535e5113b07b0fc6f3abd59546c276f6']='charts.d.conf' ['5379cdc26d7725e2b0d688d785816cef']='python.d/mysql.conf' @@ -232,18 +260,23 @@ declare -A configs_signatures=( ['55608bdd908a3806df1468f6ee318b2b']='health.d/qos.conf' ['5598b83e915e31f68027afe324a427cd']='apps_groups.conf' ['55cc7e3fe365a77f8e92d01d7a428276']='health.d/ram.conf' + ['56324751bae48308f155c079ee7ed43f']='python.d/megacli.conf' ['565f11c38ae6bd5cc9d3c2adb542bc1b']='health.d/softnet.conf' ['5664a814f9351b55da76edd472169a73']='health_alarm_notify.conf' ['56b689031cdcf138064825f31474b37d']='apps_groups.conf' + ['56d072c4756b898d0c91f143b89e366b']='python.d.conf' ['573398335c0c71c075fa57f702bce287']='health.d/disks.conf' + ['579c7c41c756745f57afd11c94a879d7']='python.d.conf' ['57be306944cb09b7f024079728fd04b9']='apps_groups.conf' ['5829812db29598db5857c9f433e96fef']='python.d/apache.conf' + ['58439d9c1253e33c74b399e6853143ab']='health.d/apcupsd.conf' ['5855dd70d71c8497e5591b0690162c9c']='health.d/tcp_resets.conf' ['58660dfcc260f77deec94b328b3838e8']='health_alarm_notify.conf' ['58e835b7176865ec5a6f59f7aba832bf']='health.d/named.conf' ['598f9814966a9e2fe48e8218151d3fa6']='stream.conf' ['59dded33e3adfe622f36c557a4f4bed7']='health.d/net.conf' ['59dea5e3872e5fe4e6c535b216c516b4']='health.d/disks.conf' + ['5b5588b00d6829908c2c5ea3220cfa1c']='health.d/load.conf' ['5b917d894bb6a755d59264e9d48e9d56']='fping.conf' ['5bbef0708f5eff4d4a53aaf35fc48a62']='health.d/disks.conf' ['5bf51bb24fb41db9b1e448bd060d3f8c']='apps_groups.conf' @@ -256,6 +289,7 @@ declare -A configs_signatures=( ['5f109df927d5f20409c81f4bfca0c83e']='python.d/web_log.conf' ['5ff1bcaa58695754e2f6980bfe19f579']='health.d/entropy.conf' ['609c6c57605033da96ea65e50c90201c']='charts.d/apache.conf' + ['60a13375b3072300bd7552cb5ee9762b']='health.d/netfilter.conf' ['611130db85bad90f966b52055147c81e']='python.d/httpcheck.conf' ['61b7ed36f35e7bd930f5f7f91694a112']='charts.d/postfix.conf' ['621f10b257a11add5ff5aff41e9662e3']='health.d/memcached.conf' @@ -282,12 +316,14 @@ declare -A configs_signatures=( ['66c068eaa3672fbe4e2448e330b3511c']='python.d/web_log.conf' ['66dfe138058ca26a31a118007eb31f35']='health.d/nginx.conf' ['6814b9bc84483db428f6a479ba221855']='python.d/mysql.conf' + ['6848e78a5a1c349c6c42d6245d6530ad']='python.d/boinc.conf' ['68607aef1802ed3dc0cd593bf6073beb']='python.d/postfix.conf' ['6a18f61a595c0d48c3363bcc0dbfa6b9']='health_alarm_notify.conf' ['6a47af861ad3dd112124c37fbf09672b']='apps_groups.conf' ['6aa4507f86657383917a0407f2a9cc0d']='python.d.conf' ['6acad8ce5c33e642742825db0eb9bb56']='python.d/web_log.conf' ['6b39de5d85db45115db236347a6896d4']='health.d/named.conf' + ['6b598533309e08d71023e46801d45d7e']='apps_groups.conf' ['6bb278bd9e171c4cb5c0fe639231288b']='python.d/web_log.conf' ['6bf0de6e3b251b765b10a71d8c5c319d']='python.d/apache.conf' ['6c9f2f0abe49a6f1a69db052ebcef1bf']='python.d/elasticsearch.conf' @@ -297,6 +333,7 @@ declare -A configs_signatures=( ['6e8366993709652fe7fc00e5d6a0a136']='charts.d/mysql.conf' ['6ea958ca521e0514af57c08b518d8c5c']='health.d/backend.conf' ['6f303ccfdc21c7b122758cea8c15e249']='python.d.conf' + ['6f54474c885234af0c792d135644d230']='python.d.conf' ['7005feb3eb5d06416d07cdf7e7c54425']='python.d/ntpd.conf' ['70105b1744a8e13f49083d7f1981aea2']='python.d/ipfs.conf' ['707a63f53f4b32e01d134ae90ba94aad']='health_alarm_notify.conf' @@ -306,6 +343,7 @@ declare -A configs_signatures=( ['7120cba2f55b1c0a97a0e10d4f6ef751']='health.d/ipmi.conf' ['72246c32511197d87b004e67e4c8da36']='python.d/portcheck.conf' ['729b3e24a72f7d566fd429617d51a21b']='health.d/web_log.conf' + ['72ea87f658483f47c38994291af488e8']='health_alarm_notify.conf' ['73125ae64d5c6e9361944cd9bd14844e']='python.d/exim.conf' ['731a1fcfe9b2da1b9d685056a59541b8']='python.d/hddtemp.conf' ['73a8e10dfe4183aca751e9e2a80dabe3']='node.d.conf' @@ -323,8 +361,10 @@ declare -A configs_signatures=( ['769aa4cdcdc3d78d0328d1f9e4edcdf9']='python.d/mysql.conf' ['76a0c1b21e49850442a43efddb15a81e']='health.d/tcp_orphans.conf' ['76a31091f42f2be1fab3bb56bb7ea400']='health_alarm_notify.conf' + ['76edb4cc11935aadaff53129c63457aa']='python.d.conf' ['777f4da70f461ef675bde07fb3644312']='python.d/redis.conf' ['777f55a95c5c25cf6176fece1ebbf4b8']='apps_groups.conf' + ['77b256144293ebfabad31779a5326948']='python.d/phpfpm.conf' ['7808ba2ca26bd0642270740cf6a8ee59']='charts.d/mem_apps.conf' ['7830066c46a7e5f9682b8d3f4566b4e5']='python.d/cpufreq.conf' ['78bb08809dffcb62e9bc493840f9c039']='python.d/squid.conf' @@ -341,6 +381,7 @@ declare -A configs_signatures=( ['7deb236ec68a512b9bdd18e6a51d76f7']='python.d/mysql.conf' ['7e5fc1644aa7a54f9dbb1bd102521b09']='health.d/memcached.conf' ['7f13631183fbdf79c21c8e5a171e9b34']='health.d/zfs.conf' + ['7fb8184d56a27040e73261ed9c6fc76f']='health_alarm_notify.conf' ['80266bddd3df374923c750a6de91d120']='health.d/apache.conf' ['803a7f9dcb942eeac0fd764b9e3e38ca']='fping.conf' ['80d242d619eb7e91cebfdbf58d79b0f8']='health.d/disks.conf' @@ -349,6 +390,7 @@ declare -A configs_signatures=( ['81255035f6d53534938085df72cdef23']='health.d/nginx.conf' ['8170ba3ae507cf9322bd60350348552e']='health.d/net.conf' ['81af92c7050873c7de2fb42e0c3f04f4']='python.d/tomcat.conf' + ['81f7c857a9a7bcf12f500166bd6c7499']='health.d/linux_power_supply.conf' ['81fd16f29d5f3d422fe1cee82dc8ed9d']='health.d/cpu.conf' ['8213d921b6a8382e27052fb42d81db3d']='python.d/freeradius.conf' ['8214bb8f4b005aa4691fcd38f7331e8f']='health.d/swap.conf' @@ -362,12 +404,14 @@ declare -A configs_signatures=( ['846ce94bfeeb90c0dc6a89e8d25f1a68']='health.d/named.conf' ['846f6039460aa317f165d91a54cd8b07']='health.d/stiebeleltron.conf' ['8490f690d97adacc4e2096df82e7e8a5']='charts.d/cpufreq.conf' + ['86a0d8d5619b5134e2d050805b45d6c3']='python.d/unbound.conf' ['87155bea7383028b0c1846c802cfdd81']='python.d/mdstat.conf' ['871bbeea33b83ea9755600b6d574919c']='python.d/web_log.conf' ['87224d2f2b87646f3c0d38cc1eb30112']='python.d/nsd.conf' ['87615ae5ac2412d853c717383fa53781']='python.d/chrony.conf' ['87642c568093daf3b2c30c5beffe2225']='python.d/elasticsearch.conf' ['8810140ce9c09af1d18b9602c4003904']='health_alarm_notify.conf' + ['886975ecb9a4e856151dc71024b122e6']='health.d/apcupsd.conf' ['8891fb423f6b987281d7913bb6c1c024']='health.d/ipc.conf' ['88e3b51b6b3fe8f317df82a2d4fbb990']='python.d.conf' ['88f77865f75c9fb61c97d700bd4561ee']='python.d/mysql.conf' @@ -377,15 +421,18 @@ declare -A configs_signatures=( ['8a1b95d375992d7b11330a0ac46f369c']='health.d/disks.conf' ['8a66a3085ad8892a002ff39b18b2cb07']='python.d/fail2ban.conf' ['8abc7f66746b201b5b0af45c419d53bc']='health.d/bind_rndc.conf' + ['8b834a0f343a8e620dbb639270a84cce']='health.d/mdstat.conf' ['8c0f037f8ad506c41acdbc4f9f6cead6']='health_alarm_notify.conf' ['8c1d41e2c88aeca78bc319ed74c8748c']='python.d/phpfpm.conf' ['8d0552371a7c9725a04196fa560813d1']='health.d/cpu.conf' ['8d24873bb25c195026918f15626310ea']='health.d/softnet.conf' + ['8d736f551571675244d853bb2f53b3da']='health.d/load.conf' ['8dc0bd0a70b5117454bd5f5b98f91c2c']='health.d/disks.conf' ['8dc6a32b8e2995cbdd527c621a72c4fb']='health.d/ram.conf' ['8ec636a4f96158044d2cec0fd1ff8452']='python.d/rabbitmq.conf' ['8ed596c4f6f85b24a890cfe95f10ce9a']='python.d/ntpd.conf' ['8f4f925c1e97dd164007495ec5135ffc']='health.d/fping.conf' + ['8f520e787d995943e61a777c826bddf7']='python.d/litespeed.conf' ['8f7b734ea0f89abf8acbb47c50234477']='health.d/web_log.conf' ['8fd472a854b0996327e8ed3562161182']='health_alarm_notify.conf' ['919911d13901d60a7580f5dfd7fc87bb']='health.d/ram.conf' @@ -397,7 +444,9 @@ declare -A configs_signatures=( ['92024bbe088e55251665fb666305ff66']='python.d/mysql.conf' ['920574fcfe56d5c9c11a583905e9db62']='health.d/tcp_conn.conf' ['9347bcce0b3574ac5193d43248d2e3cc']='python.d/chrony.conf' + ['93c7c00103f63ea3b4eea1951dd16c95']='health_alarm_notify.conf' ['94bb961f83ec724cf86239328f73a3db']='health.d/redis.conf' + ['94e567bbefd37db0c55d880ff61188a6']='health_alarm_notify.conf' ['9542f80def48ba105190f6cdaa18248e']='health.d/mysql.conf' ['95a27691df972832a5e7626ae59b0af6']='python.d/portcheck.conf' ['96997c8bf3a65b9eac848cafa8c127d2']='python.d/portcheck.conf' @@ -406,14 +455,17 @@ declare -A configs_signatures=( ['97f337eb96213f3ede05e522e3743a6c']='python.d/memcached.conf' ['98e4dd6ba71bf76767bc59c63a51b617']='apps_groups.conf' ['98f6f917138949228b9fb88c61e5aea8']='charts.d/cpufreq.conf' + ['9962e20641036566279ac800861b7963']='python.d.conf' ['99a3de85d1e7826ed64a5f8576712e5d']='python.d.conf' ['99b06e68f1da5917ae4cf60e901439f6']='health.d/ram.conf' ['99b6030ce25c8fee4598179c0f95fb0b']='health.d/redis.conf' ['99c1617448abbdc493976ab9bda5ce02']='apps_groups.conf' ['9a525125e705ca5a3146b3399be4510a']='python.d/nginx_plus.conf' + ['9a89bbdf5d9732b9a29a0aa82714059b']='health.d/dockerd.conf' ['9a8a459a3841b78d4c6ef07428ad2fe1']='health.d/entropy.conf' ['9b6eee7f2febb29efac2b7ea9fcab9be']='charts.d/nut.conf' ['9c0185ceff15415bc59b2ce2c1f04367']='apps_groups.conf' + ['9c457056c9ee0d50f9717da647bbd444']='health_alarm_notify.conf' ['9c8ddfa810d83ae58c8614ee5229e66b']='health.d/disks.conf' ['9c981c75bdf4b1637f7113e7e45eb2bf']='health.d/memcached.conf' ['9d304e41e32721224a743f25534263d9']='python.d/retroshare.conf' @@ -449,6 +501,7 @@ declare -A configs_signatures=( ['a731b7b164f42717c1c9a778ee637ff3']='health.d/memcached.conf' ['a7320c6f26191b9599ec3bc4be007a93']='health.d/swap.conf' ['a752e51d923e15add4a11fa8f3be935a']='health_email_recipients.conf' + ['a78d59c2ad14a17b9b8c7fa5d796b427']='python.d.conf' ['a7cceeafb1e6ef1ead503ab65f687902']='apps_groups.conf' ['a8167dafeac0b66696a1d9b08e815cda']='health.d/disks.conf' ['a837986be634fd7648bcdf939019424a']='apps_groups.conf' @@ -468,6 +521,7 @@ declare -A configs_signatures=( ['ab3902bf769ed35219691c95a3954ebb']='python.d/portcheck.conf' ['abaf2e021f9f6ee5d1c4e4726f47348e']='health.d/ipc.conf' ['abe1a80ac6d6f97bd324e72f31e8256e']='health.d/ram.conf' + ['ac8a91f0297bf7ebb8970f8cae4b3477']='health.d/ipc.conf' ['acaa6731a272f6d251afb357e99b518f']='apps_groups.conf' ['ad15b251b93f8b16bb33ec508f44a598']='health.d/netfilter.conf' ['ade389c1b6efe0cff47c33e662731f0a']='python.d/squid.conf' @@ -490,8 +544,10 @@ declare -A configs_signatures=( ['b32164929eda7449a9677044e11151bf']='python.d.conf' ['b3d48935ab7f44a57d40ad349df0033d']='python.d/postgres.conf' ['b3fc4749b132e55ac0d3a0f92859237e']='health.d/tcp_resets.conf' + ['b44e33ba5c7a7306b467ac9c9b698895']='health.d/bcache.conf' ['b4825f731cc7eb03b374eade14a453c1']='health.d/net.conf' ['b5246eed059e33e0903a819fa5460ce0']='python.d/ipfs.conf' + ['b544e5934ac79a9548b5af6756c042a6']='apps_groups.conf' ['b5b5a8d6d991fb1cef8d80afa23ba114']='python.d/cpufreq.conf' ['b636e5e603f9d93e52c7577ac8c6bf0c']='health.d/entropy.conf' ['b68706bb8101ef85192db92f865a5d80']='health_alarm_notify.conf' @@ -505,6 +561,7 @@ declare -A configs_signatures=( ['b8969be5b3ceb4a99477937119bd4323']='python.d.conf' ['b8aff60806fb6829a4e72a824e655375']='health.d/beanstalkd.conf' ['b8b87574fd496a66ede884c5336493bd']='python.d/phpfpm.conf' + ['b8ca1449d142b7f1cd202d875d400882']='health.d/apcupsd.conf' ['b915126262d08aa9da81de539a58a3fb']='python.d/redis.conf' ['ba11ea2d2f632b2de4b1224bcdc54f07']='python.d/smartd_log.conf' ['bb51112d01ff20053196a57632df8962']='apps_groups.conf' @@ -515,8 +572,11 @@ declare -A configs_signatures=( ['bda5517ea01640cfdfa0a27549619d6a']='health.d/memcached.conf' ['bdec19a255367f22b6fb652d0bef6bad']='python.d/httpcheck.conf' ['bf66f113b2dd8d8fb444cbd5650f284c']='health_alarm_notify.conf' + ['bfa2f469e83cf2961963841e143049e6']='health.d/tcp_listen.conf' ['bfd35a87c77c3a1dbe218fd02b529208']='charts.d/example.conf' + ['bff38dfe6c879f93ac49b77990fce1cc']='python.d/ipfs.conf' ['c004430f55310ae9ed489c4905ed02cb']='charts.d/apache.conf' + ['c0385cdf9e87aca01f5dee2a5d89c467']='health_alarm_notify.conf' ['c080e006f544c949baca33cc24a9c126']='health_alarm_notify.conf' ['c0c4c63384ef408f0715331e7615aa60']='python.d/ceph.conf' ['c132d2e257fc4df2925be7ad75100d5b']='health.d/entropy.conf' @@ -534,6 +594,7 @@ declare -A configs_signatures=( ['c84fd3292710091802e443c8e688dee1']='health_alarm_notify.conf' ['c878060687b85c46006e9041f3632d88']='health_alarm_notify.conf' ['c88fb430f35b7d8f08775d84debffbd2']='python.d/phpfpm.conf' + ['c8e339491a83df22decbdf5f1f8a037f']='python.d.conf' ['c94cb4f4eeaa13c1dcee6248deb01829']='python.d/postgres.conf' ['c9a16df512b4a9ce7fa65f5a69bda20a']='python.d/web_log.conf' ['c9b792755de59d842ba95f8c315d94c8']='health.d/swap.conf' @@ -553,12 +614,16 @@ declare -A configs_signatures=( ['ccde91d209aeb02c4a6be0e43a8d92b3']='health.d/apache.conf' ['cce5176664d29d137fa7575b77de01e4']='health.d/tcp_resets.conf' ['cd08e5534c94bf1f2cd28396c76b8bbc']='health.d/ram.conf' + ['cd15a9a77a46d66ca0beb55f2acb7538']='health.d/mysql.conf' ['cd9a7de356d6424c4a71d87053726c86']='python.d/bind_rndc.conf' ['cdd504812ff93073c57d02209d4d0f69']='health.d/cpu.conf' + ['cde652b15742e377e98e79fb9eb2acab']='health_alarm_notify.conf' ['ce0fa3485a0d8d3aa80b25ab0c70cc5a']='charts.d/apcupsd.conf' ['ce2e8768964a936f58c4c2144aee8a01']='health_alarm_notify.conf' ['ce3b65eac6c472b21905f7f72104f4c9']='python.d/nginx.conf' + ['ce937f8b9ab7820b61ce9fcde6b946e8']='charts.d/nut.conf' ['cf2c9096b3a8c506a3ec76fa52574395']='charts.d/phpfpm.conf' + ['cf46545065f7698c4d529fdc77955274']='python.d/puppet.conf' ['cf48dfd828af70bea04db7a809f94358']='health.d/haproxy.conf' ['cf8b87ede2d3233b6f55f4690af7fb08']='python.d/smartd_log.conf' ['cfecf298bdafaa7e0a3a263548e82132']='python.d/sensors.conf' @@ -579,9 +644,11 @@ declare -A configs_signatures=( ['d56c28ece8354850011f213d94d02fe0']='python.d/hddtemp.conf' ['d5dab509d8792f795bece27de39dd476']='health.d/mysql.conf' ['d69eba15d3e968187a938a7b98e22dda']='python.d.conf' + ['d6cd34c96e47a8a63732a6f1512f5c39']='python.d/ovpn_status_log.conf' ['d712df81b17971884443a4a9bc996c9e']='health_alarm_notify.conf' ['d74dc63fbe631dab9a2ff1b0f5d71719']='python.d/hddtemp.conf' ['d7e0bd12d4a60a761dcab3531a841711']='python.d/phpfpm.conf' + ['d86e0502e394e0a16c0ca574db462653']='health.d/megacli.conf' ['d8dc489e32f7114c6298fce94e86a8ef']='health.d/entropy.conf' ['d9036091e2232fc2b8bfa8c7484dea28']='apps_groups.conf' ['d9258e671d0d0b6498af1ce16ef030d2']='apps_groups.conf' @@ -592,6 +659,7 @@ declare -A configs_signatures=( ['dc0d2b96378f290eec3fcf98b89ad824']='python.d/cpufreq.conf' ['dc9c2a66778623a759706c14c3d91983']='health.d/net.conf' ['dd220677c42c487549952048ee1f7750']='python.d/postgres.conf' + ['dd221c29dfb7c5586fc906748aa7c831']='health.d/tcp_listen.conf' ['dd7764507804a2296bfd091a58ad4ad7']='health.d/memcached.conf' ['dd8254ef74509a3e38cb2838e30f7e63']='health.d/disks.conf' ['ddda2bb1c88be03b637d3285406f7910']='health.d/named.conf' @@ -620,9 +688,11 @@ declare -A configs_signatures=( ['e3e0c742427c9609ce923e845a0c8532']='health.d/ceph.conf' ['e3e5bc57335c489f01b8559f5c70e112']='python.d/squid.conf' ['e40947d22f7ed5359f12fc89e3512963']='python.d/dovecot.conf' + ['e445de5a4d6953bddec36d85b1b2771e']='python.d/linux_power_supply.conf' ['e449e5582279742496550df14b6fca95']='health.d/entropy.conf' ['e4ed13f996434ac17b40a2228c96283b']='python.d/tomcat.conf' ['e5f32f54d6d6728f21f9ac26f37d6573']='python.d/example.conf' + ['e707ad89a146004ae281d66a4e01e5c1']='health.d/load.conf' ['e70a7ee4999f30c6ceb75f31088a3a34']='python.d/powerdns.conf' ['e734c5951a8764d4d9de046dd7cf7407']='health.d/softnet.conf' ['e7ae3f2b00b9e5178acfe4f5e46228b7']='health.d/tcp_resets.conf' @@ -644,6 +714,7 @@ declare -A configs_signatures=( ['edb48efc8f446624001e07d04f6cad1a']='apps_groups.conf' ['ee5343881744e6a97e6ee5cdd329cfb8']='health.d/retroshare.conf' ['eee974cea7534aeed2d38bcf0edf3f9e']='python.d/springboot.conf' + ['ef067629c7456cb934f110ce15200131']='stream.conf' ['ef1861bf5725d91e773cbdba05687597']='python.d.conf' ['ef9916ea144878a9f37cbb6b1b29da10']='health.d/squid.conf' ['f075be84c5bfac7e34de2a091841360c']='statsd.d/example.conf' @@ -651,14 +722,18 @@ declare -A configs_signatures=( ['f1446cb3f1a905ee06defa2aa15ee806']='python.d/web_log.conf' ['f1682835e3414f60284c13bf1662e50f']='health.d/web_log.conf' ['f1f114647ed185c4812c361b1d870b44']='python.d/sensors.conf' + ['f2622abcee86b514976a053b528553d4']='python.d/web_log.conf' ['f2f1b8656f5011e965ac45b818cf668d']='apps_groups.conf' + ['f3c56a769ffdf811ec13467d8f7cd3c0']='python.d/apache.conf' ['f42389e5497a28205ba6fef4f716db4f']='python.d/nginx.conf' ['f42df9f13abfae2426519c6728b34882']='charts.d/example.conf' + ['f4609cbd5e748f41ad4f1ea3d2dcfde0']='python.d.conf' ['f4c5d88c34d3fb853498124177cc77f1']='python.d.conf' ['f5736e0b2945182cb659cb0713eff923']='apps_groups.conf' ['f66e5236ba1245bb2e5fd99191f114c6']='charts.d/hddtemp.conf' ['f68ac0fca6b4ffc96097779344cabac6']='health.d/tcp_listen.conf' ['f6c6656f900ff52d159dca12d624016a']='python.d/postgres.conf' + ['f72e44a305567c9b21a244ebd6da6800']='health_alarm_notify.conf' ['f7401a6e7c7d4fe2e0e2be7f7f523275']='health.d/web_log.conf' ['f7a99e94231beda85c6254912d8d31c1']='python.d/tomcat.conf' ['f82924563e41d99cdae5431f0af69155']='python.d.conf' @@ -672,14 +747,17 @@ declare -A configs_signatures=( ['fbdb6f5d3906d3d8ea4e28f6ba6965a6']='python.d/go_expvar.conf' ['fc11bd9255ac382f442f31c1f1a32532']='health_alarm_notify.conf' ['fc40b83f173bc4676d686867a8369a62']='python.d/dns_query_time.conf' + ['fc64f44eb19a8b5a88ba8dc28de355f6']='python.d/puppet.conf' ['fc987459f82e251e31c41d822e5e8202']='python.d/nsd.conf' ['fd3164e6e8cb6726706267eae49aa082']='health_alarm_notify.conf' ['fdd11640ba626cc2064c2fe3ea3eee4c']='health.d/cpu.conf' ['fde44f62c8d7e52f09705cd273fae6b1']='charts.d/tomcat.conf' ['fdea185e0e52b459b48852aa37f20e0f']='apps_groups.conf' ['fe069e4d6579ecdda7f36ac2318ffefc']='python.d/exim.conf' + ['fe2b15369de13b83b18e2ff5c7594a57']='python.d/monit.conf' ['fe478efe2e721724edb1fe2ef1addf93']='health_alarm_notify.conf' ['feb8bcf828aa2529a7ee4a140feeb12d']='health.d/net.conf' ['ff1b3d8ae8b2149c711d8da9b7a9c4bd']='health_alarm_notify.conf' + ['ff3f0a9b1bf488a5075850cc16de3c26']='python.d/monit.conf' ['ff940c5396f16d05deb5c5859832ee48']='health.d/swap.conf' ) diff --git a/configure b/configure index 054590f7f..cbcc0a1ee 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for netdata 1.10.0. +# Generated by GNU Autoconf 2.69 for netdata 1.11.0_rolling. # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. @@ -577,12 +577,12 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='netdata' PACKAGE_TARNAME='netdata' -PACKAGE_VERSION='1.10.0' -PACKAGE_STRING='netdata 1.10.0' +PACKAGE_VERSION='1.11.0_rolling' +PACKAGE_STRING='netdata 1.11.0_rolling' PACKAGE_BUGREPORT='' PACKAGE_URL='' -ac_unique_file="src/main.c" +ac_unique_file="daemon/main.c" # Factoring default headers for most tests. ac_includes_default="\ #include <stdio.h> @@ -640,6 +640,7 @@ OPTIONAL_MATH_CLFAGS webdir pluginsdir logdir +libconfigdir configdir pythondir nodedir @@ -647,6 +648,7 @@ chartsdir cachedir registrydir varlibdir +build_target ENABLE_PLUGIN_CGROUP_NETWORK_FALSE ENABLE_PLUGIN_CGROUP_NETWORK_TRUE ENABLE_PLUGIN_NFACCT_FALSE @@ -1374,7 +1376,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures netdata 1.10.0 to adapt to many kinds of systems. +\`configure' configures netdata 1.11.0_rolling to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1444,7 +1446,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of netdata 1.10.0:";; + short | recursive ) echo "Configuration of netdata 1.11.0_rolling:";; esac cat <<\_ACEOF @@ -1587,7 +1589,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -netdata configure 1.10.0 +netdata configure 1.11.0_rolling generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -2365,7 +2367,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by netdata $as_me 1.10.0, which was +It was created by netdata $as_me 1.11.0_rolling, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -2646,6 +2648,10 @@ $as_echo "$as_me: creating cache $cache_file" >&6;} fi as_fn_append ac_header_list " sys/prctl.h" +as_fn_append ac_header_list " sys/vfs.h" +as_fn_append ac_header_list " sys/statfs.h" +as_fn_append ac_header_list " sys/statvfs.h" +as_fn_append ac_header_list " sys/mount.h" as_fn_append ac_func_list " accept4" as_fn_append ac_header_list " linux/netfilter/nfnetlink_conntrack.h" # Check that the precious variables saved in the cache have kept the same @@ -2746,7 +2752,7 @@ $as_echo "$as_me: ***************** MAINTAINER MODE *****************" >&6;} PACKAGE_BUILT_DATE=$(date '+%d %b %Y') fi -PACKAGE_RPM_VERSION="1.10.0" +PACKAGE_RPM_VERSION="1.10.1" @@ -2787,6 +2793,10 @@ ac_config_headers="$ac_config_headers config.h" + + + + am__api_version='1.14' # Find a good install program. We prefer a C program (faster), @@ -3273,7 +3283,7 @@ fi # Define the identity of the package. PACKAGE='netdata' - VERSION='1.10.0' + VERSION='1.11.0_rolling' cat >>confdefs.h <<_ACEOF @@ -3450,6 +3460,47 @@ END as_fn_error $? "Your 'rm' program is bad, sorry." "$LINENO" 5 fi fi + + # Check whether --enable-silent-rules was given. +if test "${enable_silent_rules+set}" = set; then : + enableval=$enable_silent_rules; +fi + +case $enable_silent_rules in # ((( + yes) AM_DEFAULT_VERBOSITY=0;; + no) AM_DEFAULT_VERBOSITY=1;; + *) AM_DEFAULT_VERBOSITY=0;; +esac +am_make=${MAKE-make} +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5 +$as_echo_n "checking whether $am_make supports nested variables... " >&6; } +if ${am_cv_make_support_nested_variables+:} false; then : + $as_echo_n "(cached) " >&6 +else + if $as_echo 'TRUE=$(BAR$(V)) +BAR0=false +BAR1=true +V=1 +am__doit: + @$(TRUE) +.PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then + am_cv_make_support_nested_variables=yes +else + am_cv_make_support_nested_variables=no +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5 +$as_echo "$am_cv_make_support_nested_variables" >&6; } +if test $am_cv_make_support_nested_variables = yes; then + AM_V='$(V)' + AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' +else + AM_V=$AM_DEFAULT_VERBOSITY + AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY +fi +AM_BACKSLASH='\' + + # Make sure we can run config.sub. $SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 @@ -4568,6 +4619,7 @@ fi + if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}pkg-config", so it can be a program name with args. @@ -5635,7 +5687,7 @@ _ACEOF fi done -for ac_func in sched_setscheduler sched_get_priority_min sched_get_priority_max nice +for ac_func in sched_setscheduler sched_getscheduler sched_getparam sched_get_priority_min sched_get_priority_max getpriority setpriority nice do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" @@ -6102,6 +6154,18 @@ done + + + + + + + + + + + + if test "${enable_accept4}" != "no"; then @@ -6131,14 +6195,17 @@ $as_echo_n "checking operating system... " >&6; } case "$host_os" in freebsd*) build_target=freebsd + build_target_id=2 CFLAGS="${CFLAGS} -I/usr/local/include" ;; darwin*) build_target=macos + build_target_id=3 LDFLAGS="${LDFLAGS} -framework CoreFoundation -framework IOKit" ;; *) build_target=linux + build_target_id=1 ;; esac @@ -6166,8 +6233,8 @@ else LINUX_FALSE= fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${build_target}" >&5 -$as_echo "${build_target}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${build_target} with id ${build_target_id}" >&5 +$as_echo "${build_target} with id ${build_target_id}" >&6; } # ----------------------------------------------------------------------------- @@ -7974,26 +8041,42 @@ cat >>confdefs.h <<_ACEOF _ACEOF -varlibdir="\$(localstatedir)/lib/netdata" +varlibdir="${localstatedir}/lib/netdata" +registrydir="${localstatedir}/lib/netdata/registry" +cachedir="${localstatedir}/cache/netdata" +chartsdir="${libexecdir}/netdata/charts.d" +nodedir="${libexecdir}/netdata/node.d" +pythondir="${libexecdir}/netdata/python.d" +configdir="${sysconfdir}/netdata" +libconfigdir="${libdir}/netdata/conf.d" +logdir="${localstatedir}/log/netdata" +pluginsdir="${libexecdir}/netdata/plugins.d" + + + -registrydir="\$(localstatedir)/lib/netdata/registry" -cachedir="\$(localstatedir)/cache/netdata" -chartsdir="\$(libexecdir)/netdata/charts.d" -nodedir="\$(libexecdir)/netdata/node.d" -pythondir="\$(libexecdir)/netdata/python.d" -configdir="\$(sysconfdir)/netdata" -logdir="\$(localstatedir)/log/netdata" -pluginsdir="\$(libexecdir)/netdata/plugins.d" +CPPFLAGS="\ + -DTARGET_OS=${build_target_id} \ + -DVARLIB_DIR=\"\\\"${varlibdir}\\\"\" \ + -DCACHE_DIR=\"\\\"${cachedir}\\\"\" \ + -DCONFIG_DIR=\"\\\"${configdir}\\\"\" \ + -DLIBCONFIG_DIR=\"\\\"${libconfigdir}\\\"\" \ + -DLOG_DIR=\"\\\"${logdir}\\\"\" \ + -DPLUGINS_DIR=\"\\\"${pluginsdir}\\\"\" \ + -DRUN_DIR=\"\\\"${localstatedir}/run/netdata\\\"\" \ + -DWEB_DIR=\"\\\"${webdir}\\\"\" \ +" + @@ -8007,7 +8090,7 @@ pluginsdir="\$(libexecdir)/netdata/plugins.d" -ac_config_files="$ac_config_files Makefile charts.d/Makefile conf.d/Makefile netdata.spec python.d/Makefile node.d/Makefile plugins.d/Makefile src/Makefile system/Makefile web/Makefile diagrams/Makefile makeself/Makefile contrib/Makefile tests/Makefile" +ac_config_files="$ac_config_files Makefile netdata.spec backends/graphite/Makefile backends/json/Makefile backends/Makefile backends/opentsdb/Makefile backends/prometheus/Makefile collectors/Makefile collectors/apps.plugin/Makefile collectors/cgroups.plugin/Makefile collectors/charts.d.plugin/Makefile collectors/checks.plugin/Makefile collectors/diskspace.plugin/Makefile collectors/fping.plugin/Makefile collectors/freebsd.plugin/Makefile collectors/freeipmi.plugin/Makefile collectors/idlejitter.plugin/Makefile collectors/macos.plugin/Makefile collectors/nfacct.plugin/Makefile collectors/node.d.plugin/Makefile collectors/plugins.d/Makefile collectors/proc.plugin/Makefile collectors/python.d.plugin/Makefile collectors/statsd.plugin/Makefile collectors/tc.plugin/Makefile contrib/Makefile daemon/Makefile database/Makefile diagrams/Makefile health/Makefile health/notifications/Makefile libnetdata/Makefile libnetdata/adaptive_resortable_list/Makefile libnetdata/avl/Makefile libnetdata/buffer/Makefile libnetdata/clocks/Makefile libnetdata/config/Makefile libnetdata/dictionary/Makefile libnetdata/eval/Makefile libnetdata/locks/Makefile libnetdata/log/Makefile libnetdata/popen/Makefile libnetdata/procfile/Makefile libnetdata/simple_pattern/Makefile libnetdata/socket/Makefile libnetdata/statistical/Makefile libnetdata/storage_number/Makefile libnetdata/threads/Makefile libnetdata/url/Makefile makeself/Makefile registry/Makefile streaming/Makefile system/Makefile tests/Makefile web/Makefile web/api/Makefile web/api/badges/Makefile web/api/exporters/Makefile web/api/exporters/shell/Makefile web/api/exporters/prometheus/Makefile web/api/formatters/Makefile web/api/formatters/csv/Makefile web/api/formatters/json/Makefile web/api/formatters/ssv/Makefile web/api/formatters/value/Makefile web/api/queries/Makefile web/api/queries/average/Makefile web/api/queries/des/Makefile web/api/queries/incremental_sum/Makefile web/api/queries/max/Makefile web/api/queries/median/Makefile web/api/queries/min/Makefile web/api/queries/ses/Makefile web/api/queries/stddev/Makefile web/api/queries/sum/Makefile web/gui/Makefile web/server/Makefile web/server/single/Makefile web/server/multi/Makefile web/server/static/Makefile" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure @@ -8575,7 +8658,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by netdata $as_me 1.10.0, which was +This file was extended by netdata $as_me 1.11.0_rolling, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -8641,7 +8724,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -netdata config.status 1.10.0 +netdata config.status 1.11.0_rolling configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" @@ -8773,19 +8856,85 @@ do "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;; "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; - "charts.d/Makefile") CONFIG_FILES="$CONFIG_FILES charts.d/Makefile" ;; - "conf.d/Makefile") CONFIG_FILES="$CONFIG_FILES conf.d/Makefile" ;; "netdata.spec") CONFIG_FILES="$CONFIG_FILES netdata.spec" ;; - "python.d/Makefile") CONFIG_FILES="$CONFIG_FILES python.d/Makefile" ;; - "node.d/Makefile") CONFIG_FILES="$CONFIG_FILES node.d/Makefile" ;; - "plugins.d/Makefile") CONFIG_FILES="$CONFIG_FILES plugins.d/Makefile" ;; - "src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;; - "system/Makefile") CONFIG_FILES="$CONFIG_FILES system/Makefile" ;; - "web/Makefile") CONFIG_FILES="$CONFIG_FILES web/Makefile" ;; + "backends/graphite/Makefile") CONFIG_FILES="$CONFIG_FILES backends/graphite/Makefile" ;; + "backends/json/Makefile") CONFIG_FILES="$CONFIG_FILES backends/json/Makefile" ;; + "backends/Makefile") CONFIG_FILES="$CONFIG_FILES backends/Makefile" ;; + "backends/opentsdb/Makefile") CONFIG_FILES="$CONFIG_FILES backends/opentsdb/Makefile" ;; + "backends/prometheus/Makefile") CONFIG_FILES="$CONFIG_FILES backends/prometheus/Makefile" ;; + "collectors/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/Makefile" ;; + "collectors/apps.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/apps.plugin/Makefile" ;; + "collectors/cgroups.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/cgroups.plugin/Makefile" ;; + "collectors/charts.d.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/charts.d.plugin/Makefile" ;; + "collectors/checks.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/checks.plugin/Makefile" ;; + "collectors/diskspace.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/diskspace.plugin/Makefile" ;; + "collectors/fping.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/fping.plugin/Makefile" ;; + "collectors/freebsd.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/freebsd.plugin/Makefile" ;; + "collectors/freeipmi.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/freeipmi.plugin/Makefile" ;; + "collectors/idlejitter.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/idlejitter.plugin/Makefile" ;; + "collectors/macos.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/macos.plugin/Makefile" ;; + "collectors/nfacct.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/nfacct.plugin/Makefile" ;; + "collectors/node.d.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/node.d.plugin/Makefile" ;; + "collectors/plugins.d/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/plugins.d/Makefile" ;; + "collectors/proc.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/proc.plugin/Makefile" ;; + "collectors/python.d.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/python.d.plugin/Makefile" ;; + "collectors/statsd.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/statsd.plugin/Makefile" ;; + "collectors/tc.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/tc.plugin/Makefile" ;; + "contrib/Makefile") CONFIG_FILES="$CONFIG_FILES contrib/Makefile" ;; + "daemon/Makefile") CONFIG_FILES="$CONFIG_FILES daemon/Makefile" ;; + "database/Makefile") CONFIG_FILES="$CONFIG_FILES database/Makefile" ;; "diagrams/Makefile") CONFIG_FILES="$CONFIG_FILES diagrams/Makefile" ;; + "health/Makefile") CONFIG_FILES="$CONFIG_FILES health/Makefile" ;; + "health/notifications/Makefile") CONFIG_FILES="$CONFIG_FILES health/notifications/Makefile" ;; + "libnetdata/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/Makefile" ;; + "libnetdata/adaptive_resortable_list/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/adaptive_resortable_list/Makefile" ;; + "libnetdata/avl/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/avl/Makefile" ;; + "libnetdata/buffer/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/buffer/Makefile" ;; + "libnetdata/clocks/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/clocks/Makefile" ;; + "libnetdata/config/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/config/Makefile" ;; + "libnetdata/dictionary/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/dictionary/Makefile" ;; + "libnetdata/eval/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/eval/Makefile" ;; + "libnetdata/locks/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/locks/Makefile" ;; + "libnetdata/log/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/log/Makefile" ;; + "libnetdata/popen/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/popen/Makefile" ;; + "libnetdata/procfile/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/procfile/Makefile" ;; + "libnetdata/simple_pattern/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/simple_pattern/Makefile" ;; + "libnetdata/socket/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/socket/Makefile" ;; + "libnetdata/statistical/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/statistical/Makefile" ;; + "libnetdata/storage_number/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/storage_number/Makefile" ;; + "libnetdata/threads/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/threads/Makefile" ;; + "libnetdata/url/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/url/Makefile" ;; "makeself/Makefile") CONFIG_FILES="$CONFIG_FILES makeself/Makefile" ;; - "contrib/Makefile") CONFIG_FILES="$CONFIG_FILES contrib/Makefile" ;; + "registry/Makefile") CONFIG_FILES="$CONFIG_FILES registry/Makefile" ;; + "streaming/Makefile") CONFIG_FILES="$CONFIG_FILES streaming/Makefile" ;; + "system/Makefile") CONFIG_FILES="$CONFIG_FILES system/Makefile" ;; "tests/Makefile") CONFIG_FILES="$CONFIG_FILES tests/Makefile" ;; + "web/Makefile") CONFIG_FILES="$CONFIG_FILES web/Makefile" ;; + "web/api/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/Makefile" ;; + "web/api/badges/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/badges/Makefile" ;; + "web/api/exporters/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/exporters/Makefile" ;; + "web/api/exporters/shell/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/exporters/shell/Makefile" ;; + "web/api/exporters/prometheus/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/exporters/prometheus/Makefile" ;; + "web/api/formatters/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/formatters/Makefile" ;; + "web/api/formatters/csv/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/formatters/csv/Makefile" ;; + "web/api/formatters/json/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/formatters/json/Makefile" ;; + "web/api/formatters/ssv/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/formatters/ssv/Makefile" ;; + "web/api/formatters/value/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/formatters/value/Makefile" ;; + "web/api/queries/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/Makefile" ;; + "web/api/queries/average/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/average/Makefile" ;; + "web/api/queries/des/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/des/Makefile" ;; + "web/api/queries/incremental_sum/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/incremental_sum/Makefile" ;; + "web/api/queries/max/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/max/Makefile" ;; + "web/api/queries/median/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/median/Makefile" ;; + "web/api/queries/min/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/min/Makefile" ;; + "web/api/queries/ses/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/ses/Makefile" ;; + "web/api/queries/stddev/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/stddev/Makefile" ;; + "web/api/queries/sum/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/sum/Makefile" ;; + "web/gui/Makefile") CONFIG_FILES="$CONFIG_FILES web/gui/Makefile" ;; + "web/server/Makefile") CONFIG_FILES="$CONFIG_FILES web/server/Makefile" ;; + "web/server/single/Makefile") CONFIG_FILES="$CONFIG_FILES web/server/single/Makefile" ;; + "web/server/multi/Makefile") CONFIG_FILES="$CONFIG_FILES web/server/multi/Makefile" ;; + "web/server/static/Makefile") CONFIG_FILES="$CONFIG_FILES web/server/static/Makefile" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac diff --git a/configure.ac b/configure.ac index d41ae9d63..2a7337379 100644 --- a/configure.ac +++ b/configure.ac @@ -1,16 +1,17 @@ # # Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com> +# SPDX-License-Identifier: GPL-3.0-or-later # AC_PREREQ(2.60) define([VERSION_MAJOR], [1]) define([VERSION_MINOR], [10]) -define([VERSION_FIX], [0]) +define([VERSION_FIX], [1]) define([VERSION_NUMBER], VERSION_MAJOR[.]VERSION_MINOR[.]VERSION_FIX) -define([VERSION_SUFFIX], []) +define([VERSION_SUFFIX], [_rolling]) dnl Set to "1" for a first RPM release of a new version -PACKAGE_RPM_RELEASE="1" +PACKAGE_RPM_RELEASE="0.0.$(echo VERSION_SUFFIX | sed s/^_//)" # We do not use m4_esyscmd_s to support older autoconf. define([VERSION_STRING], m4_esyscmd(git describe 2>/dev/null | sed 's/^v//' | tr -d '\n')) @@ -34,11 +35,19 @@ AC_SUBST([PACKAGE_RPM_RELEASE]) AC_CONFIG_AUX_DIR([.]) AC_CONFIG_HEADERS([config.h]) -AC_CONFIG_MACRO_DIR([m4]) -AC_CONFIG_SRCDIR([src/main.c]) -AM_INIT_AUTOMAKE([tar-pax]) +AC_CONFIG_MACRO_DIR([build/m4]) +AC_CONFIG_SRCDIR([daemon/main.c]) +define([AUTOMATE_INIT_OPTIONS], [tar-pax subdir-objects]) +m4_ifdef([AM_SILENT_RULES], [ + define([AUTOMATE_INIT_OPTIONS], [tar-pax silent-rules subdir-objects]) + ]) +AM_INIT_AUTOMAKE(AUTOMATE_INIT_OPTIONS) +m4_ifdef([AM_SILENT_RULES], [ + AM_SILENT_RULES([yes]) + ]) AC_CANONICAL_HOST AC_PROG_CC +AM_PROG_CC_C_O AC_PROG_INSTALL PKG_PROG_PKG_CONFIG AC_USE_SYSTEM_EXTENSIONS @@ -131,7 +140,7 @@ AX_GCC_FUNC_ATTRIBUTE([warn_unused_result]) AC_CHECK_TYPES([struct timespec, clockid_t], [], [], [[#include <time.h>]]) AC_SEARCH_LIBS([clock_gettime], [rt posix4]) AC_CHECK_FUNCS([clock_gettime]) -AC_CHECK_FUNCS([sched_setscheduler sched_get_priority_min sched_get_priority_max nice]) +AC_CHECK_FUNCS([sched_setscheduler sched_getscheduler sched_getparam sched_get_priority_min sched_get_priority_max getpriority setpriority nice]) AC_CHECK_FUNCS([recvmmsg]) AC_TYPE_INT8_T @@ -153,6 +162,10 @@ AC_HEADER_MAJOR AC_HEADER_RESOLV AC_CHECK_HEADERS_ONCE([sys/prctl.h]) +AC_CHECK_HEADERS_ONCE([sys/vfs.h]) +AC_CHECK_HEADERS_ONCE([sys/statfs.h]) +AC_CHECK_HEADERS_ONCE([sys/statvfs.h]) +AC_CHECK_HEADERS_ONCE([sys/mount.h]) if test "${enable_accept4}" != "no"; then AC_CHECK_FUNCS_ONCE(accept4) @@ -165,21 +178,24 @@ AC_MSG_CHECKING([operating system]) case "$host_os" in freebsd*) build_target=freebsd + build_target_id=2 CFLAGS="${CFLAGS} -I/usr/local/include" ;; darwin*) build_target=macos + build_target_id=3 LDFLAGS="${LDFLAGS} -framework CoreFoundation -framework IOKit" ;; *) build_target=linux + build_target_id=1 ;; esac AM_CONDITIONAL([FREEBSD], [test "${build_target}" = "freebsd"]) AM_CONDITIONAL([MACOS], [test "${build_target}" = "macos"]) AM_CONDITIONAL([LINUX], [test "${build_target}" = "linux"]) -AC_MSG_RESULT([${build_target}]) +AC_MSG_RESULT([${build_target} with id ${build_target_id}]) # ----------------------------------------------------------------------------- @@ -481,17 +497,42 @@ AC_MSG_RESULT([${enable_lto}]) AC_DEFINE_UNQUOTED([NETDATA_USER], ["${with_user}"], [use this user to drop privileged]) -AC_SUBST([varlibdir], ["\$(localstatedir)/lib/netdata"]) -AC_SUBST([registrydir], ["\$(localstatedir)/lib/netdata/registry"]) -AC_SUBST([cachedir], ["\$(localstatedir)/cache/netdata"]) -AC_SUBST([chartsdir], ["\$(libexecdir)/netdata/charts.d"]) -AC_SUBST([nodedir], ["\$(libexecdir)/netdata/node.d"]) -AC_SUBST([pythondir], ["\$(libexecdir)/netdata/python.d"]) -AC_SUBST([configdir], ["\$(sysconfdir)/netdata"]) -AC_SUBST([logdir], ["\$(localstatedir)/log/netdata"]) -AC_SUBST([pluginsdir], ["\$(libexecdir)/netdata/plugins.d"]) +varlibdir="${localstatedir}/lib/netdata" +registrydir="${localstatedir}/lib/netdata/registry" +cachedir="${localstatedir}/cache/netdata" +chartsdir="${libexecdir}/netdata/charts.d" +nodedir="${libexecdir}/netdata/node.d" +pythondir="${libexecdir}/netdata/python.d" +configdir="${sysconfdir}/netdata" +libconfigdir="${libdir}/netdata/conf.d" +logdir="${localstatedir}/log/netdata" +pluginsdir="${libexecdir}/netdata/plugins.d" + +AC_SUBST([build_target]) +AC_SUBST([varlibdir]) +AC_SUBST([registrydir]) +AC_SUBST([cachedir]) +AC_SUBST([chartsdir]) +AC_SUBST([nodedir]) +AC_SUBST([pythondir]) +AC_SUBST([configdir]) +AC_SUBST([libconfigdir]) +AC_SUBST([logdir]) +AC_SUBST([pluginsdir]) AC_SUBST([webdir]) +CPPFLAGS="\ + -DTARGET_OS=${build_target_id} \ + -DVARLIB_DIR=\"\\\"${varlibdir}\\\"\" \ + -DCACHE_DIR=\"\\\"${cachedir}\\\"\" \ + -DCONFIG_DIR=\"\\\"${configdir}\\\"\" \ + -DLIBCONFIG_DIR=\"\\\"${libconfigdir}\\\"\" \ + -DLOG_DIR=\"\\\"${logdir}\\\"\" \ + -DPLUGINS_DIR=\"\\\"${pluginsdir}\\\"\" \ + -DRUN_DIR=\"\\\"${localstatedir}/run/netdata\\\"\" \ + -DWEB_DIR=\"\\\"${webdir}\\\"\" \ +" + AC_SUBST([OPTIONAL_MATH_CLFAGS]) AC_SUBST([OPTIONAL_MATH_LIBS]) AC_SUBST([OPTIONAL_NFACCT_CLFAGS]) @@ -507,19 +548,85 @@ AC_SUBST([OPTIONAL_IPMIMONITORING_LIBS]) AC_CONFIG_FILES([ Makefile - charts.d/Makefile - conf.d/Makefile netdata.spec - python.d/Makefile - node.d/Makefile - plugins.d/Makefile - src/Makefile - system/Makefile - web/Makefile + backends/graphite/Makefile + backends/json/Makefile + backends/Makefile + backends/opentsdb/Makefile + backends/prometheus/Makefile + collectors/Makefile + collectors/apps.plugin/Makefile + collectors/cgroups.plugin/Makefile + collectors/charts.d.plugin/Makefile + collectors/checks.plugin/Makefile + collectors/diskspace.plugin/Makefile + collectors/fping.plugin/Makefile + collectors/freebsd.plugin/Makefile + collectors/freeipmi.plugin/Makefile + collectors/idlejitter.plugin/Makefile + collectors/macos.plugin/Makefile + collectors/nfacct.plugin/Makefile + collectors/node.d.plugin/Makefile + collectors/plugins.d/Makefile + collectors/proc.plugin/Makefile + collectors/python.d.plugin/Makefile + collectors/statsd.plugin/Makefile + collectors/tc.plugin/Makefile + contrib/Makefile + daemon/Makefile + database/Makefile diagrams/Makefile + health/Makefile + health/notifications/Makefile + libnetdata/Makefile + libnetdata/adaptive_resortable_list/Makefile + libnetdata/avl/Makefile + libnetdata/buffer/Makefile + libnetdata/clocks/Makefile + libnetdata/config/Makefile + libnetdata/dictionary/Makefile + libnetdata/eval/Makefile + libnetdata/locks/Makefile + libnetdata/log/Makefile + libnetdata/popen/Makefile + libnetdata/procfile/Makefile + libnetdata/simple_pattern/Makefile + libnetdata/socket/Makefile + libnetdata/statistical/Makefile + libnetdata/storage_number/Makefile + libnetdata/threads/Makefile + libnetdata/url/Makefile makeself/Makefile - contrib/Makefile + registry/Makefile + streaming/Makefile + system/Makefile tests/Makefile + web/Makefile + web/api/Makefile + web/api/badges/Makefile + web/api/exporters/Makefile + web/api/exporters/shell/Makefile + web/api/exporters/prometheus/Makefile + web/api/formatters/Makefile + web/api/formatters/csv/Makefile + web/api/formatters/json/Makefile + web/api/formatters/ssv/Makefile + web/api/formatters/value/Makefile + web/api/queries/Makefile + web/api/queries/average/Makefile + web/api/queries/des/Makefile + web/api/queries/incremental_sum/Makefile + web/api/queries/max/Makefile + web/api/queries/median/Makefile + web/api/queries/min/Makefile + web/api/queries/ses/Makefile + web/api/queries/stddev/Makefile + web/api/queries/sum/Makefile + web/gui/Makefile + web/server/Makefile + web/server/single/Makefile + web/server/multi/Makefile + web/server/static/Makefile ]) AC_OUTPUT diff --git a/contrib/Makefile.am b/contrib/Makefile.am index d9250179b..80d80d371 100644 --- a/contrib/Makefile.am +++ b/contrib/Makefile.am @@ -1,4 +1,6 @@ -MAINTAINERCLEANFILES= $(srcdir)/Makefile.in +# SPDX-License-Identifier: GPL-3.0-or-later + +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in dist_noinst_DATA = \ README.md \ @@ -22,7 +24,6 @@ dist_noinst_DATA = \ dist_noinst_SCRIPTS = \ debian/netdata.init \ - nc-backend.sh \ $(NULL) debian/changelog: diff --git a/contrib/Makefile.in b/contrib/Makefile.in index 094053096..789a7bfd9 100644 --- a/contrib/Makefile.in +++ b/contrib/Makefile.in @@ -14,6 +14,8 @@ @SET_MAKE@ +# SPDX-License-Identifier: GPL-3.0-or-later + VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' @@ -83,14 +85,16 @@ subdir = contrib DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(dist_noinst_SCRIPTS) $(dist_noinst_DATA) ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \ - $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \ - $(top_srcdir)/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \ - $(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d @@ -217,6 +221,7 @@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ +build_target = @build_target@ build_vendor = @build_vendor@ builddir = @builddir@ cachedir = @cachedir@ @@ -238,6 +243,7 @@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ +libconfigdir = @libconfigdir@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ @@ -287,7 +293,6 @@ dist_noinst_DATA = \ dist_noinst_SCRIPTS = \ debian/netdata.init \ - nc-backend.sh \ $(NULL) all: all-am diff --git a/contrib/debian/changelog b/contrib/debian/changelog new file mode 100644 index 000000000..730844070 --- /dev/null +++ b/contrib/debian/changelog @@ -0,0 +1,3 @@ +netdata (1.11.0~rolling) UNRELEASED; urgency=medium + * Latest release + -- Netdata Team <> Fri, 02 Nov 2018 14:25:18 +0000 diff --git a/contrib/debian/compat b/contrib/debian/compat new file mode 100644 index 000000000..ec635144f --- /dev/null +++ b/contrib/debian/compat @@ -0,0 +1 @@ +9 diff --git a/contrib/debian/control b/contrib/debian/control new file mode 100644 index 000000000..0f4f1bc05 --- /dev/null +++ b/contrib/debian/control @@ -0,0 +1,25 @@ +Source: netdata +Build-Depends: debhelper (>= 9), + dh-autoreconf, + dh-systemd (>= 1.5), + dpkg-dev (>= 1.13.19), + zlib1g-dev, + uuid-dev +Section: net +Priority: optional +Maintainer: Costa Tsaousis <costa@tsaousis.gr> +Standards-Version: 3.9.6 +Homepage: https://github.com/netdata/netdata/wiki + +Package: netdata +Architecture: any +Depends: adduser, + libcap2-bin (>= 1:2.0), + lsb-base (>= 3.1-23.2), + ${misc:Depends}, + ${shlibs:Depends} +Description: real-time charts for system monitoring + Netdata is a daemon that collects data in realtime (per second) + and presents a web site to view and analyze them. The presentation + is also real-time and full of interactive charts that precisely + render all collected values. diff --git a/contrib/debian/control.wheezy b/contrib/debian/control.wheezy new file mode 100644 index 000000000..cde1d560c --- /dev/null +++ b/contrib/debian/control.wheezy @@ -0,0 +1,25 @@ +Source: netdata +Build-Depends: debhelper (>= 9), + dh-autoreconf, + pkg-config, + dpkg-dev (>= 1.13.19), + zlib1g-dev, + uuid-dev +Section: net +Priority: optional +Maintainer: Costa Tsaousis <costa@tsaousis.gr> +Standards-Version: 3.9.6 +Homepage: https://github.com/netdata/netdata/wiki + +Package: netdata +Architecture: any +Depends: adduser, + libcap2-bin (>= 1:2.0), + lsb-base (>= 3.1-23.2), + ${misc:Depends}, + ${shlibs:Depends} +Description: real-time charts for system monitoring + Netdata is a daemon that collects data in realtime (per second) + and presents a web site to view and analyze them. The presentation + is also real-time and full of interactive charts that precisely + render all collected values. diff --git a/contrib/debian/copyright b/contrib/debian/copyright new file mode 100644 index 000000000..085580ea1 --- /dev/null +++ b/contrib/debian/copyright @@ -0,0 +1,10 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: Netdata +Upstream-Contact: Costa Tsaousis <costa@tsaousis.gr> +Source: https://github.com/netdata/netdata + +Files: * +Copyright: 2014-2016, Costa Tsaousis +License: GPL-3+ + On Debian systems, the complete text of the GNU General Public + License version 3 can be found in /usr/share/common-licenses/GPL-3. diff --git a/contrib/debian/netdata.conf b/contrib/debian/netdata.conf new file mode 100644 index 000000000..a963d80b7 --- /dev/null +++ b/contrib/debian/netdata.conf @@ -0,0 +1,16 @@ +# NetData Configuration + +# The current full configuration can be retrieved from the running +# server at the URL +# +# http://localhost:19999/netdata.conf +# +# for example: +# +# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf +# + +[global] + run as user = netdata + web files owner = root + web files group = netdata diff --git a/contrib/debian/netdata.default b/contrib/debian/netdata.default new file mode 100644 index 000000000..9e7f8ae6e --- /dev/null +++ b/contrib/debian/netdata.default @@ -0,0 +1,5 @@ +# Extra arguments to pass to netdata +# +#EXTRA_OPTS="" +#uncomment following line if you are building a wheezy-package +#EXTRA_OPTS="-P /var/run/netdata.pid" diff --git a/contrib/debian/netdata.docs b/contrib/debian/netdata.docs new file mode 100644 index 000000000..56631abf1 --- /dev/null +++ b/contrib/debian/netdata.docs @@ -0,0 +1 @@ +ChangeLog diff --git a/contrib/debian/netdata.init b/contrib/debian/netdata.init new file mode 100755 index 000000000..c1b2b74de --- /dev/null +++ b/contrib/debian/netdata.init @@ -0,0 +1,56 @@ +#!/bin/sh +# Start/stop the netdata daemon. +# +### BEGIN INIT INFO +# Provides: netdata +# Required-Start: $remote_fs +# Required-Stop: $remote_fs +# Should-Start: $network +# Should-Stop: $network +# Default-Start: 2 3 4 5 +# Default-Stop: +# Short-Description: Real-time charts for system monitoring +# Description: Netdata is a daemon that collects data in realtime (per second) +# and presents a web site to view and analyze them. The presentation +# is also real-time and full of interactive charts that precisely +# render all collected values. +### END INIT INFO + +PATH=/bin:/usr/bin:/sbin:/usr/sbin +DESC="netdata daemon" +NAME=netdata +DAEMON=/usr/sbin/netdata +PIDFILE=/var/run/netdata/netdata.pid +SCRIPTNAME=/etc/init.d/"$NAME" + +test -f $DAEMON || exit 0 + +. /lib/lsb/init-functions + +[ -r /etc/default/netdata ] && . /etc/default/netdata + +case "$1" in +start) log_daemon_msg "Starting real-time system monitoring" "netdata" + start_daemon -p $PIDFILE $DAEMON $EXTRA_OPTS + log_end_msg $? + ;; +stop) log_daemon_msg "Stopping real-time system monitoring" "netdata" + killproc -p $PIDFILE $DAEMON + RETVAL=$? + [ $RETVAL -eq 0 ] && [ -e "$PIDFILE" ] && rm -f $PIDFILE + log_end_msg $RETVAL + # wait for plugins to exit + sleep 1 + ;; +restart|force-reload) log_daemon_msg "Restarting real-time system monitoring" "netdata" + $0 stop + $0 start + ;; +status) + status_of_proc -p $PIDFILE $DAEMON $NAME && exit 0 || exit $? + ;; +*) log_action_msg "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" + exit 2 + ;; +esac +exit 0 diff --git a/contrib/debian/netdata.install b/contrib/debian/netdata.install new file mode 100644 index 000000000..45d42b635 --- /dev/null +++ b/contrib/debian/netdata.install @@ -0,0 +1 @@ +debian/netdata.conf /etc/netdata/ diff --git a/contrib/debian/netdata.lintian-overrides b/contrib/debian/netdata.lintian-overrides new file mode 100644 index 000000000..45b2d868f --- /dev/null +++ b/contrib/debian/netdata.lintian-overrides @@ -0,0 +1,16 @@ + +# See Debian policy 10.9. apps.plugin has extra capabilities, so don't let +# normal users run it. +netdata: non-standard-executable-perm usr/lib/*/netdata/plugins.d/apps.plugin 0754 != 0755 + + +# FontAwesome is at least in the fonts-font-awesome package, but this is +# not available in wheezy. glyphicons-halflings-regular isn't currently in +# a Debian package. Therefore don't complain about shipping them with netdata +# for the time being. +netdata: duplicate-font-file usr/share/netdata/fonts/* +netdata: font-in-non-font-package usr/share/netdata/fonts/* + +# Files here are marked as conffiles so that local updates to the html files +# isn't clobbered on upgrade. +netdata: non-etc-file-marked-as-conffile var/lib/netdata/www/* diff --git a/contrib/debian/netdata.postinst.in b/contrib/debian/netdata.postinst.in new file mode 100644 index 000000000..29615f541 --- /dev/null +++ b/contrib/debian/netdata.postinst.in @@ -0,0 +1,41 @@ +#! /bin/sh + +set -e + +case "$1" in + configure) + if [ -z "$2" ]; then + if ! getent group netdata >/dev/null; then + addgroup --quiet --system netdata + fi + + if ! getent passwd netdata >/dev/null; then + adduser --quiet --system --ingroup netdata --home /var/lib/netdata --no-create-home netdata + fi + + if ! dpkg-statoverride --list /var/lib/netdata >/dev/null 2>&1; then + dpkg-statoverride --update --add netdata netdata 0755 /var/lib/netdata + fi + + if ! dpkg-statoverride --list /var/lib/netdata/www >/dev/null 2>&1; then + dpkg-statoverride --update --add root netdata 0755 /var/lib/netdata/www + fi + + if ! dpkg-statoverride --list /var/cache/netdata >/dev/null 2>&1; then + dpkg-statoverride --update --add netdata netdata 0755 /var/cache/netdata + fi + + fi + + dpkg-statoverride --update --add --force root netdata 0775 /var/lib/netdata/registry + chown -R root:netdata /usr/share/netdata/* + chown -R root:netdata /usr/lib/@DEB_HOST_MULTIARCH@/netdata/plugins.d + setcap cap_dac_read_search,cap_sys_ptrace+ep /usr/lib/@DEB_HOST_MULTIARCH@/netdata/plugins.d/apps.plugin + +#PERMS# + ;; +esac + +#DEBHELPER# + +exit 0 diff --git a/contrib/debian/netdata.postrm b/contrib/debian/netdata.postrm new file mode 100644 index 000000000..4ab4eeadd --- /dev/null +++ b/contrib/debian/netdata.postrm @@ -0,0 +1,43 @@ +#! /bin/sh + +set -e + +case "$1" in + remove) + ;; + + purge) + if dpkg-statoverride --list | grep -qw /var/cache/netdata; then + dpkg-statoverride --remove /var/cache/netdata + fi + + if dpkg-statoverride --list | grep -qw /var/lib/netdata/www; then + dpkg-statoverride --remove /var/lib/netdata/www + fi + + if dpkg-statoverride --list | grep -qw /var/lib/netdata; then + dpkg-statoverride --remove /var/lib/netdata + fi + + if getent passwd netdata >/dev/null; then + if [ -x /usr/sbin/deluser ]; then + deluser --quiet --system netdata || echo "Unable to remove netdata user" + fi + fi + + if getent group netdata >/dev/null; then + if [ -x /usr/sbin/delgroup ]; then + delgroup --quiet --system netdata || echo "Unable to remove netdata group" + fi + fi + + ;; + + *) + ;; +esac + +#DEBHELPER# + +exit 0 + diff --git a/contrib/debian/netdata.service b/contrib/debian/netdata.service new file mode 100644 index 000000000..e5d3a3863 --- /dev/null +++ b/contrib/debian/netdata.service @@ -0,0 +1,14 @@ +[Unit] +Description=netdata real-time system monitoring +After=network.target + +[Service] +Type=simple +EnvironmentFile=-/etc/default/netdata +ExecStart=/usr/sbin/netdata -D $EXTRA_OPTS +TimeoutStopSec=30 +Restart=always +RestartSec=5 + +[Install] +WantedBy=multi-user.target diff --git a/contrib/debian/rules b/contrib/debian/rules new file mode 100755 index 000000000..ec4ec4182 --- /dev/null +++ b/contrib/debian/rules @@ -0,0 +1,87 @@ +#!/usr/bin/make -f + +# Find the arch we are building for, as this determines +# the location of plugins in /usr/lib +DEB_HOST_MULTIARCH ?= $(shell dpkg-architecture -qDEB_HOST_MULTIARCH) +TOP = $(CURDIR)/debian/netdata + +%: + # For jessie and beyond + # + dh $@ --with autoreconf,systemd + + # For wheezy or other non-systemd distributions use the following. You + # should also see contrib/README.md which gives details of updates to + # make to debian/control. + # + #dh $@ --with autoreconf + +override_dh_auto_configure: + dh_auto_configure -- --with-math --with-webdir=/var/lib/netdata/www + +debian/%.postinst: debian/%.postinst.in + sed 's/@DEB_HOST_MULTIARCH@/$(DEB_HOST_MULTIARCH)/g' $< > $@ + +override_dh_install: debian/netdata.postinst + dh_install + + # Remove unneeded .keep files + # + find "$(TOP)" -name .keep -exec rm '{}' ';' + + # Move files that local user shouldn't be editing to /usr/share/netdata + # + mkdir -p "$(TOP)/usr/share/netdata" + for D in $$(find "$(TOP)/var/lib/netdata/www/" -maxdepth 1 -type d -printf '%f '); do \ + echo Relocating $$D; \ + mv "$(TOP)/var/lib/netdata/www/$$D" "$(TOP)/usr/share/netdata/$$D"; \ + ln -s "/usr/share/netdata/$$D" "$(TOP)/var/lib/netdata/www/$$D"; \ + done + + # Update postinst to set correct group for www files on installation. + # Should probably be dpkg-statoverride really, but that gets *really* + # messy. We also set all web files in /var as conffiles so an upgrade + # doesn't splat them. + # + for D in $$(find "$(TOP)/var/lib/netdata/www/" -maxdepth 1 -type f -printf '%f '); do \ + echo Updating postinst for $$D; \ + sed -i "s/^#PERMS#/chgrp netdata \/var\/lib\/netdata\/www\/$$D\n#PERMS#/g" \ + $(CURDIR)/debian/netdata.postinst; \ + echo "/var/lib/netdata/www/$$D" >> $(CURDIR)/debian/netdata.conffiles; \ + done + sed -i "/^#PERMS#/d" $(CURDIR)/debian/netdata.postinst + +override_dh_installdocs: + dh_installdocs + + # Docs should not be under /usr/lib + # + mv $(TOP)/usr/lib/$(DEB_HOST_MULTIARCH)/netdata/plugins.d/README.md \ + $(TOP)/usr/share/doc/netdata/README.plugins.md + mv $(TOP)/usr/lib/$(DEB_HOST_MULTIARCH)/netdata/charts.d/README.md \ + $(TOP)/usr/share/doc/netdata/README.charts.md + + # This doc is currently empty, so no point installing it. + # + rm $(TOP)/usr/lib/$(DEB_HOST_MULTIARCH)/netdata/node.d/README.md + +override_dh_fixperms: + dh_fixperms + + # apps.plugin should only be runnable by the netdata user. It will be + # given extra capabilities in the postinst script. + # + chmod 0754 $(TOP)/usr/lib/$(DEB_HOST_MULTIARCH)/netdata/plugins.d/apps.plugin + +override_dh_installlogrotate: + cp system/netdata.logrotate debian/netdata.logrotate + dh_installlogrotate + +override_dh_clean: + dh_clean + + # Tidy up copied/generated files + # + -[ -r $(CURDIR)/debian/netdata.logrotate ] && rm $(CURDIR)/debian/netdata.logrotate + -[ -r $(CURDIR)/debian/netdata.postinst ] && rm $(CURDIR)/debian/netdata.postinst + -[ -r $(CURDIR)/debian/netdata.conffiles ] && rm $(CURDIR)/debian/netdata.conffiles diff --git a/contrib/debian/source/format b/contrib/debian/source/format new file mode 100644 index 000000000..89ae9db8f --- /dev/null +++ b/contrib/debian/source/format @@ -0,0 +1 @@ +3.0 (native) diff --git a/contrib/nc-backend.sh b/contrib/nc-backend.sh deleted file mode 100755 index aac5c20bb..000000000 --- a/contrib/nc-backend.sh +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/env bash - -MODE="${1}" -MY_PORT="${2}" -BACKEND_HOST="${3}" -BACKEND_PORT="${4}" -FILE="${NETDATA_NC_BACKEND_DIR-/tmp}/netdata-nc-backend-${MY_PORT}" - -log() { - logger --stderr --id=$$ --tag "netdata-nc-backend" "${*}" -} - -mync() { - local ret - - log "Running: nc ${*}" - nc "${@}" - ret=$? - - log "nc stopped with return code ${ret}." - - return ${ret} -} - -listen_save_replay_forever() { - local file="${1}" port="${2}" real_backend_host="${3}" real_backend_port="${4}" ret delay=1 started ended - - while [ 1 ] - do - log "Starting nc to listen on port ${port} and save metrics to ${file}" - - started=$(date +%s) - mync -l -p "${port}" | tee -a -p --output-error=exit "${file}" - ended=$(date +%s) - - if [ -s "${file}" ] - then - if [ ! -z "${real_backend_host}" -a ! -z "${real_backend_port}" ] - then - log "Attempting to send the metrics to the real backend at ${real_backend_host}:${real_backend_port}" - - mync "${real_backend_host}" "${real_backend_port}" <"${file}" - ret=$? - - if [ ${ret} -eq 0 ] - then - log "Successfuly sent the metrics to ${real_backend_host}:${real_backend_port}" - mv "${file}" "${file}.old" - touch "${file}" - else - log "Failed to send the metrics to ${real_backend_host}:${real_backend_port} (nc returned ${ret}) - appending more data to ${file}" - fi - else - log "No backend configured - appending more data to ${file}" - fi - fi - - # prevent a CPU hungry infinite loop - # if nc cannot listen to port - if [ $((ended - started)) -lt 5 ] - then - log "nc has been stopped too fast." - delay=30 - else - delay=1 - fi - - log "Waiting ${delay} seconds before listening again for data." - sleep ${delay} - done -} - -if [ "${MODE}" = "start" ] - then - - # start the listener, in exclusive mode - # only one can use the same file/port at a time - { - flock -n 9 - if [ $? -ne 0 ] - then - log "Cannot get exclusive lock on file ${FILE}.lock - Am I running multiple times?" - exit 2 - fi - - # save our PID to the lock file - echo "$$" >"${FILE}.lock" - - listen_save_replay_forever "${FILE}" ${MY_PORT} ${BACKEND_HOST} ${BACKEND_PORT} - ret=$? - - log "listener exited." - exit ${ret} - - } 9>>"${FILE}.lock" - - # we can only get here if ${FILE}.lock cannot be created - log "Cannot create file ${FILE}." - exit 3 - -elif [ "${MODE}" = "stop" ] - then - - { - flock -n 9 - if [ $? -ne 0 ] - then - pid=$(<${FILE}.lock) - log "Killing process ${pid}..." - kill -TERM -${pid} - exit 0 - fi - - log "File ${FILE}.lock has been locked by me but it shouldn't. Is a collector running?" - exit 4 - - } 9<"${FILE}.lock" - - log "File ${FILE}.lock does not exist. Is a collector running?" - exit 5 - -else - - cat <<EOF -Usage: - - "${0}" start|stop PORT [BACKEND_HOST BACKEND_PORT] - - PORT The port this script will listen - (configure netdata to use this as a second backend) - - BACKEND_HOST The real backend host - BACKEND_PORT The real backend port - - This script can act as fallback backend for netdata. - It will receive metrics from netdata, save them to - ${FILE} - and once netdata reconnects to the real-backend, this script - will push all metrics collected to the real-backend too and - wait for a failure to happen again. - - Only one netdata can connect to this script at a time. - If you need fallback for multiple netdata, run this script - multiple times with different ports. - - You can run me in the background with this: - - screen -d -m "${0}" start PORT [BACKEND_HOST BACKEND_PORT] -EOF - exit 1 -fi diff --git a/contrib/rhel/build-netdata-rpm.sh b/contrib/rhel/build-netdata-rpm.sh index 051969091..927318fb1 100755 --- a/contrib/rhel/build-netdata-rpm.sh +++ b/contrib/rhel/build-netdata-rpm.sh @@ -3,12 +3,13 @@ # docker run -it --rm centos:6.9 /bin/sh # yum -y install rpm-build redhat-rpm-config yum-utils autoconf automake curl gcc git libmnl-devel libuuid-devel make pkgconfig zlib-devel -cd $(dirname $0)/../../ || exit 1 +cd "$(dirname "$0")/../../" || exit 1 +# shellcheck disable=SC1091 source "installer/functions.sh" || exit 1 set -e -run ./autogen.sh +run autoreconf -ivf run ./configure --enable-maintainer-mode run make dist @@ -27,7 +28,7 @@ then fi srpm=$(run rpmbuild -ts "${tgz}" | cut -d ' ' -f 2) -if [ -z "${srpm}" -o ! -f "${srpm}" ] +if [ -z "${srpm}" ] || [ ! -f "${srpm}" ] then echo >&2 "Cannot find the generated SRPM file '${srpm}'" exit 1 diff --git a/coverity-scan.sh b/coverity-scan.sh index 5ca093140..1bf0a5804 100755 --- a/coverity-scan.sh +++ b/coverity-scan.sh @@ -1,47 +1,67 @@ #!/usr/bin/env bash +# shellcheck disable=SC2235 -cpus=$(grep ^processor </proc/cpuinfo| wc -l) +# To run this script you need to provide API token. This can be done either by: +# - Putting token in ".coverity-token" file +# - Assigning token value to COVERITY_SCAN_TOKEN environment variable +# Additionally script can install coverity tool on your computer. To do this just set environment variable INSTALL_COVERITY to "true" + +cpus=$(grep -c ^processor </proc/cpuinfo) [ -z "${cpus}" ] && cpus=1 -token= -[ -f .coverity-token ] && token="$(<.coverity-token)" -[ -z "${token}" ] && \ - echo >&2 "Save the coverity token to .coverity-token" && \ +token="${COVERITY_SCAN_TOKEN}" +([ -z "${token}" ] && [ -f .coverity-token ]) && token="$(<.coverity-token)" +if [ -z "${token}" ]; then + echo >&2 "Save the coverity token to .coverity-token or export it as COVERITY_SCAN_TOKEN." exit 1 +fi -echo >&2 "Coverity token: ${token}" - +# shellcheck disable=SC2230 covbuild="$(which cov-build 2>/dev/null || command -v cov-build 2>/dev/null)" -[ -z "${covbuild}" -a -f .coverity-build ] && covbuild="$(<.coverity-build)" -[ -z "${covbuild}" ] && \ - echo "Save command the full filename of cov-build in .coverity-build" && \ - exit 1 +([ -z "${covbuild}" ] && [ -f .coverity-build ]) && covbuild="$(<.coverity-build)" +if [ -z "${covbuild}" ]; then + echo "Cannot find 'cov-build' binary in \$PATH." + if [ "${INSTALL_COVERITY}" != "" ]; then + echo "Installing coverity..." + mkdir /tmp/coverity + curl -SL --data "token=${token}&project=netdata%2Fnetdata" https://scan.coverity.com/download/linux64 > /tmp/coverity_tool.tar.gz + tar -x -C /tmp/coverity/ -f /tmp/coverity_tool.tar.gz + sudo mv /tmp/coverity/cov-analysis-linux64-2017.07 /opt/coverity + export PATH=${PATH}:/opt/coverity/bin/ + # shellcheck disable=SC2230 + covbuild="$(which cov-build 2>/dev/null || command -v cov-build 2>/dev/null)" + else + echo "Save command the full filename of cov-build in .coverity-build" + exit 1 + fi +fi -[ ! -x "${covbuild}" ] && \ - echo "The command ${covbuild} is not executable. Save command the full filename of cov-build in .coverity-build" && \ +if [ ! -x "${covbuild}" ]; then + echo "The command ${covbuild} is not executable. Save command the full filename of cov-build in .coverity-build" exit 1 +fi -version="$(cat config.h | grep "^#define PACKAGE_VERSION" | cut -d '"' -f 2)" +version="$(grep "^#define PACKAGE_VERSION" config.h | cut -d '"' -f 2)" echo >&2 "Working on netdata version: ${version}" echo >&2 "Cleaning up old builds..." -make clean || exit 1 +make clean || echo "Nothing to clean" -[ -d "cov-int" ] && \ - rm -rf "cov-int" +[ -d "cov-int" ] && rm -rf "cov-int" -[ -f netdata-coverity-analysis.tgz ] && \ - rm netdata-coverity-analysis.tgz +[ -f netdata-coverity-analysis.tgz ] && rm netdata-coverity-analysis.tgz +autoreconf -ivf +./configure --enable-plugin-nfacct --enable-plugin-freeipmi "${covbuild}" --dir cov-int make -j${cpus} || exit 1 echo >&2 "Compressing data..." tar czvf netdata-coverity-analysis.tgz cov-int || exit 1 -echo >&2 "Sending analysis..." +echo >&2 "Sending analysis for version ${version} ..." curl --progress-bar --form token="${token}" \ --form email=costa@tsaousis.gr \ --form file=@netdata-coverity-analysis.tgz \ --form version="${version}" \ --form description="netdata, real-time performance monitoring, done right." \ - https://scan.coverity.com/builds?project=firehol%2Fnetdata + https://scan.coverity.com/builds?project=netdata%2Fnetdata diff --git a/cppcheck.sh b/cppcheck.sh index 841f01d26..ebbeeaf8f 100755 --- a/cppcheck.sh +++ b/cppcheck.sh @@ -2,25 +2,24 @@ # echo >>/tmp/cppcheck.log "cppcheck ${*}" +# shellcheck disable=SC2230 cppcheck=$(which cppcheck 2>/dev/null || command -v cppcheck 2>/dev/null) [ -z "${cppcheck}" ] && echo >&2 "install cppcheck." && exit 1 -[ -x "/home/costa/src/cppcheck.git/cppcheck" ] && \ - cppcheck="/home/costa/src/cppcheck.git/cppcheck" - processors=$(grep -c ^processor /proc/cpuinfo) [ $(( processors )) -lt 1 ] && processors=1 base="$(dirname "${0}")" [ "${base}" = "." ] && base="${PWD}" -cd "${base}/src" || exit 1 +cd "${base}" || exit 1 [ ! -d "cppcheck-build" ] && mkdir "cppcheck-build" file="${1}" shift -[ "${file}" = "${base}" -o -z "${file}" ] && file="${base}/src" +# shellcheck disable=SC2235 +([ "${file}" = "${base}" ] || [ -z "${file}" ]) && file="${base}" "${cppcheck}" \ -j ${processors} \ diff --git a/daemon/Makefile.am b/daemon/Makefile.am new file mode 100644 index 000000000..bdd02774c --- /dev/null +++ b/daemon/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES= $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/daemon/Makefile.in b/daemon/Makefile.in new file mode 100644 index 000000000..8c7ad21a5 --- /dev/null +++ b/daemon/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = daemon +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu daemon/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu daemon/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/daemon/README.md b/daemon/README.md new file mode 100644 index 000000000..2a5ebfa0c --- /dev/null +++ b/daemon/README.md @@ -0,0 +1,445 @@ +# Netdata daemon + + + +## Command line options + +Normally you don't need to supply any command line arguments to netdata. + +If you do though, they override the configuration equivalent options. + +To get a list of all command line parameters supported, run: + +```sh +netdata -h +``` + +The program will print the supported command line parameters. + +The command line options of the netdata 1.10.0 version are the following: +``` + + ^ + |.-. .-. .-. .-. . netdata + | '-' '-' '-' '-' real-time performance monitoring, done right! + +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+---> + + Copyright (C) 2016-2017, Costa Tsaousis <costa@tsaousis.gr> + Released under GNU General Public License v3 or later. + All rights reserved. + + Home Page : https://my-netdata.io + Source Code: https://github.com/netdata/netdata + Wiki / Docs: https://github.com/netdata/netdata/wiki + Support : https://github.com/netdata/netdata/issues + License : https://github.com/netdata/netdata/blob/master/LICENSE.md + + Twitter : https://twitter.com/linuxnetdata + Facebook : https://www.facebook.com/linuxnetdata/ + + + SYNOPSIS: netdata [options] + + Options: + + -c filename Configuration file to load. + Default: /etc/netdata/netdata.conf + + -D Do not fork. Run in the foreground. + Default: run in the background + + -h Display this help message. + + -P filename File to save a pid while running. + Default: do not save pid to a file + + -i IP The IP address to listen to. + Default: all IP addresses IPv4 and IPv6 + + -p port API/Web port to use. + Default: 19999 + + -s path Prefix for /proc and /sys (for containers). + Default: no prefix + + -t seconds The internal clock of netdata. + Default: 1 + + -u username Run as user. + Default: netdata + + -v Print netdata version and exit. + + -V Print netdata version and exit. + + -W options See Advanced options below. + + + Advanced options: + + -W stacksize=N Set the stacksize (in bytes). + + -W debug_flags=N Set runtime tracing to debug.log. + + -W unittest Run internal unittests and exit. + + -W set section option value + set netdata.conf option from the command line. + + -W simple-pattern pattern string + Check if string matches pattern and exit. + + + Signals netdata handles: + + - HUP Close and reopen log files. + - USR1 Save internal DB to disk. + - USR2 Reload health configuration. +``` + +## Log files + +netdata uses 3 log files: + +1. `error.log` +2. `access.log` +3. `debug.log` + +Any of them can be disabled by setting it to `/dev/null` or `none` in `netdata.conf`. +By default `error.log` and `access.log` are enabled. `debug.log` is only enabled if +debugging/tracing is also enabled (netdata needs to be compiled with debugging enabled). + +Log files are stored in `/var/log/netdata/` by default. + +#### error.log + +The `error.log` is the `stderr` of the netdata daemon and all external plugins run by netdata. + +So if any process, in the netdata process tree, writes anything to its standard error, +it will appear in `error.log`. + +For most netdata programs (including standard external plugins shipped by netdata), the +following lines may appear: + +tag|description +:---:|:---- +`INFO`|Something important the user should know. +`ERROR`|Something that might disable a part of netdata.<br/>The log line includes `errno` (if it is not zero). +`FATAL`|Something prevented a program from running.<br/>The log line includes `errno` (if it is not zero) and the program exited. + +So, when auto-detection of data collection fail, `ERROR` lines are logged and the relevant modules +are disabled, but the program continues to run. + +When a netdata program cannot run at all, a `FATAL` line is logged. + +#### access.log + +The `access.log` logs web requests. The format is: + +```txt +DATE: ID: (sent/all = SENT_BYTES/ALL_BYTES bytes PERCENT_COMPRESSION%, prep/sent/total PREP_TIME/SENT_TIME/TOTAL_TIME ms): ACTION CODE URL +``` + +where: + + - `ID` is the client ID. Client IDs are auto-incremented every time a client connects to netdata. + - `SENT_BYTES` is the number of bytes sent to the client, without the HTTP response header. + - `ALL_BYTES` is the number of bytes of the response, before compression. + - `PERCENT_COMPRESSION` is the percentage of traffic saved due to compression. + - `PREP_TIME` is the time in milliseconds needed to prepared the response. + - `SENT_TIME` is the time in milliseconds needed to sent the response to the client. + - `TOTAL_TIME` is the total time the request was inside netdata (from the first byte of the request to the last byte of the response). + - `ACTION` can be `filecopy`, `options` (used in CORS), `data` (API call). + + +#### debug.log + +See [debugging](#debugging). + + +## OOM Score + +netdata runs with `OOMScore = 1000`. This means netdata will be the first to be killed when your +server runs out of memory. + +You can set netdata OOMScore in `netdata.conf`, like this: + +``` +[global] + OOM score = 1000 +``` + +netdata logs its OOM score when it starts: + +```sh +# grep OOM /var/log/netdata/error.log +2017-10-15 03:47:31: netdata INFO : Adjusted my Out-Of-Memory (OOM) score from 0 to 1000. +``` + +#### OOM score and systemd + +netdata will not be able to lower its OOM Score below zero, when it is started as the `netdata` +user (systemd case). + +To allow netdata control its OOM Score in such cases, you will need to edit +`netdata.service` and set: + +``` +[Service] +# The minimum netdata Out-Of-Memory (OOM) score. +# netdata (via [global].OOM score in netdata.conf) can only increase the value set here. +# To decrease it, set the minimum here and set the same or a higher value in netdata.conf. +# Valid values: -1000 (never kill netdata) to 1000 (always kill netdata). +OOMScoreAdjust=-1000 +``` + +Run `systemctl daemon-reload` to reload these changes. + +The above, sets and OOMScore for netdata to `-1000`, so that netdata can increase it via +`netdata.conf`. + +If you want to control it entirely via systemd, you can set in `netdata.conf`: + +``` +[global] + OOM score = keep +``` + +Using the above, whatever OOM Score you have set at `netdata.service` will be maintained by netdata. + + +## netdata process scheduling policy + +By default netdata runs with the `idle` process scheduling policy, so that it uses CPU resources, only when there is idle CPU to spare. On very busy servers (or weak servers), this can lead to gaps on the charts. + +You can set netdata scheduling policy in `netdata.conf`, like this: + +``` +[global] + process scheduling policy = idle +``` + +You can use the following: + +policy|description +:-----:|:-------- +`idle`|use CPU only when there is spare - this is lower than nice 19 - it is the default for netdata and it is so low that netdata will run in "slow motion" under extreme system load, resulting in short (1-2 seconds) gaps at the charts. +`other`<br/>or<br/>`nice`|this is the default policy for all processes under Linux. It provides dynamic priorities based on the `nice` level of each process. Check below for setting this `nice` level for netdata. +`batch`|This policy is similar to `other` in that it schedules the thread according to its dynamic priority (based on the `nice` value). The difference is that this policy will cause the scheduler to always assume that the thread is CPU-intensive. Consequently, the scheduler will apply a small scheduling penalty with respect to wake-up behavior, so that this thread is mildly disfavored in scheduling decisions. +`fifo`|`fifo` can be used only with static priorities higher than 0, which means that when a `fifo` threads becomes runnable, it will always immediately preempt any currently running `other`, `batch`, or `idle` thread. `fifo` is a simple scheduling algorithm without time slicing. +`rr`|a simple enhancement of `fifo`. Everything described above for `fifo` also applies to `rr`, except that each thread is allowed to run only for a maximum time quantum. +`keep`<br/>or<br/>`none`|do not set scheduling policy, priority or nice level - i.e. keep running with whatever it is set already (e.g. by systemd). + +For more information see `man sched`. + +### scheduling priority for `rr` and `fifo` + +Once the policy is set to one of `rr` or `fifo`, the following will appear: + +``` +[global] + process scheduling priority = 0 +``` + +These priorities are usually from 0 to 99. Higher numbers make the process more important. + +### nice level for policies `other` or `batch` + +When the policy is set to `other`, `nice`, or `batch`, the following will appear: + +``` +[global] + process nice level = 19 +``` + +## scheduling settings and systemd + +netdata will not be able to set its scheduling policy and priority to more important values when it is started as the `netdata` user (systemd case). + +You can set these settings at `/etc/systemd/system/netdata.service`: + +``` +[Service] +# By default netdata switches to scheduling policy idle, which makes it use CPU, only +# when there is spare available. +# Valid policies: other (the system default) | batch | idle | fifo | rr +#CPUSchedulingPolicy=other + +# This sets the maximum scheduling priority netdata can set (for policies: rr and fifo). +# netdata (via [global].process scheduling priority in netdata.conf) can only lower this value. +# Priority gets values 1 (lowest) to 99 (highest). +#CPUSchedulingPriority=1 + +# For scheduling policy 'other' and 'batch', this sets the lowest niceness of netdata. +# netdata (via [global].process nice level in netdata.conf) can only increase the value set here. +#Nice=0 +``` + +Run `systemctl daemon-reload` to reload these changes. + +Now, tell netdata to keep these settings, as set by systemd, by editing `netdata.conf` and setting: + +``` +[global] + process scheduling policy = keep +``` + +Using the above, whatever scheduling settings you have set at `netdata.service` will be maintained by netdata. + + +#### Example 1: netdata with nice -1 on non-systemd systems + +On a system that is not based on systemd, to make netdata run with nice level -1 (a little bit higher to the default for all programs), edit netdata.conf and set: + +``` +[global] + process scheduling policy = other + process nice level = -1 +``` + +then execute this to restart netdata: + +```sh +sudo service netdata restart +``` + + +#### Example 2: netdata with nice -1 on systemd systems + +On a system that is based on systemd, to make netdata run with nice level -1 (a little bit higher to the default for all programs), edit netdata.conf and set: + +``` +[global] + process scheduling policy = keep +``` + +edit /etc/systemd/system/netdata.service and set: + +``` +[Service] +CPUSchedulingPolicy=other +Nice=-1 +``` + +then execute: + +```sh +sudo systemctl daemon-reload +sudo systemctl restart netdata +``` + +## virtual memory + +You may notice that netdata's virtual memory size, as reported by `ps` or `/proc/pid/status` +(or even netdata's applications virtual memory chart) is unrealistically high. + +For example, it may be reported to be 150+MB, even if the resident memory size is just 25MB. +Similar values may be reported for netdata plugins too. + +Check this for example: A netdata installation with default settings on Ubuntu 16.04LTS. +The top chart is **real memory used**, while the bottom one is **virtual memory**: + +![image](https://cloud.githubusercontent.com/assets/2662304/19013772/5eb7173e-87e3-11e6-8f2b-a2ccfeb06faf.png) + +#### why this happens? + +The system memory allocator allocates virtual memory arenas, per thread running. +On Linux systems this defaults to 16MB per thread on 64 bit machines. So, if you get the +difference between real and virtual memory and divide it by 16MB you will roughly get the +number of threads running. + +The system does this for speed. Having a separate memory arena for each thread, allows the +threads to run in parallel in multi-core systems, without any locks between them. + +This behaviour is system specific. For example, the chart above when running netdata on alpine +linux (that uses **musl** instead of **glibc**) is this: + +![image](https://cloud.githubusercontent.com/assets/2662304/19013807/7cf5878e-87e4-11e6-9651-082e68701eab.png) + +#### can we do anything to lower it? + +Since netdata already uses minimal memory allocations while it runs (i.e. it adapts its memory +on start, so that while repeatedly collects data it does not do memory allocations), it already +instructs the system memory allocator to minimize the memory arenas for each thread. We have also +added [2 configuration options](https://github.com/netdata/netdata/blob/5645b1ee35248d94e6931b64a8688f7f0d865ec6/src/main.c#L410-L418) +to allow you tweak these settings. + +However, even if we instructed the memory allocator to use just one arena, it seems it allocates +an arena per thread. + +netdata also supports `jemalloc` and `tcmalloc`, however both behave exactly the same to the +glibc memory allocator in this aspect. + +#### Is this a problem? + +No, it is not. + +Linux reserves real memory (physical RAM) in pages (on x86 machines pages are 4KB each). +So even if the system memory allocator is allocating huge amounts of virtual memory, +only the 4KB pages that are actually used are reserving physical RAM. The **real memory** chart +on netdata application section, shows the amount of physical memory these pages occupy(it +accounts the whole pages, even if parts of them are actually used). + + +## Debugging + +When you compile netdata with debugging: + +1. compiler optimizations for your CPU are disabled (netdata will run somewhat slower) + +2. a lot of code is added all over netdata, to log debug messages to `/var/log/netdata/debug.log`. However, nothing is printed by default. netdata allows you to select which sections of netdata you want to trace. Tracing is activated via the config option `debug flags`. It accepts a hex number, to enable or disable specific sections. You can find the options supported at [log.h](https://github.com/netdata/netdata/blob/master/libnetdata/log/log.h). They are the `D_*` defines. The value `0xffffffffffffffff` will enable all possible debug flags. + +Once netdata is compiled with debugging and tracing is enabled for a few sections, the file `/var/log/netdata/debug.log` will contain the messages. + +> Do not forget to disable tracing (`debug flags = 0`) when you are done tracing. The file `debug.log` can grow too fast. + +#### compiling netdata with debugging + +To compile netdata with debugging, use this: + +```sh +# step into the netdata source directory +cd /usr/src/netdata.git + +# run the installer with debugging enabled +CFLAGS="-O1 -ggdb -DNETDATA_INTERNAL_CHECKS=1" ./netdata-installer.sh +``` + +The above will compile and install netdata with debugging info embedded. You can now use `debug flags` to set the section(s) you need to trace. + +#### debugging crashes + +We have made the most to make netdata crash free. If however, netdata crashes on your system, it would be very helpful to provide stack traces of the crash. Without them, is will be almost impossible to find the issue (the code base is quite large to find such an issue by just objerving it). + +To provide stack traces, **you need to have netdata compiled with debugging**. There is no need to enable any tracing (`debug flags`). + +Then you need to be in one of the following 2 cases: + +1. netdata crashes and you have a core dump + +2. you can reproduce the crash + +If you are not on these cases, you need to find a way to be (i.e. if your system does not produce core dumps, check your distro documentation to enable them). + +#### netdata crashes and you have a core dump + +> you need to have netdata compiled with debugging info for this to work (check above) + +Run the following command and post the output on a github issue. + +```sh +gdb $(which netdata) /path/to/core/dump +``` + +#### you can reproduce a netdata crash on your system + +> you need to have netdata compiled with debugging info for this to work (check above) + +Install the package `valgrind` and run: + +```sh +valgrind $(which netdata) -D +``` + +netdata will start and it will be a lot slower. Now reproduce the crash and `valgrind` will dump on your console the stack trace. Open a new github issue and post the output. + diff --git a/daemon/common.c b/daemon/common.c new file mode 100644 index 000000000..e278cdf7c --- /dev/null +++ b/daemon/common.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "common.h" + +char *netdata_configured_hostname = NULL; +char *netdata_configured_user_config_dir = CONFIG_DIR; +char *netdata_configured_stock_config_dir = LIBCONFIG_DIR; +char *netdata_configured_log_dir = LOG_DIR; +char *netdata_configured_plugins_dir = NULL; +char *netdata_configured_web_dir = WEB_DIR; +char *netdata_configured_cache_dir = CACHE_DIR; +char *netdata_configured_varlib_dir = VARLIB_DIR; +char *netdata_configured_home_dir = CACHE_DIR; +char *netdata_configured_host_prefix = NULL; +char *netdata_configured_timezone = NULL; + diff --git a/daemon/common.h b/daemon/common.h new file mode 100644 index 000000000..d912a30e7 --- /dev/null +++ b/daemon/common.h @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_COMMON_H +#define NETDATA_COMMON_H 1 + +#include "../libnetdata/libnetdata.h" + +// ---------------------------------------------------------------------------- +// shortcuts for the default netdata configuration + +#define config_load(filename, overwrite_used) appconfig_load(&netdata_config, filename, overwrite_used) +#define config_get(section, name, default_value) appconfig_get(&netdata_config, section, name, default_value) +#define config_get_number(section, name, value) appconfig_get_number(&netdata_config, section, name, value) +#define config_get_float(section, name, value) appconfig_get_float(&netdata_config, section, name, value) +#define config_get_boolean(section, name, value) appconfig_get_boolean(&netdata_config, section, name, value) +#define config_get_boolean_ondemand(section, name, value) appconfig_get_boolean_ondemand(&netdata_config, section, name, value) + +#define config_set(section, name, default_value) appconfig_set(&netdata_config, section, name, default_value) +#define config_set_default(section, name, value) appconfig_set_default(&netdata_config, section, name, value) +#define config_set_number(section, name, value) appconfig_set_number(&netdata_config, section, name, value) +#define config_set_float(section, name, value) appconfig_set_float(&netdata_config, section, name, value) +#define config_set_boolean(section, name, value) appconfig_set_boolean(&netdata_config, section, name, value) + +#define config_exists(section, name) appconfig_exists(&netdata_config, section, name) +#define config_move(section_old, name_old, section_new, name_new) appconfig_move(&netdata_config, section_old, name_old, section_new, name_new) + +#define config_generate(buffer, only_changed) appconfig_generate(&netdata_config, buffer, only_changed) + + +// ---------------------------------------------------------------------------- +// netdata include files + +#include "global_statistics.h" + +// the netdata database +#include "database/rrd.h" + +// the netdata webserver(s) +#include "web/server/web_server.h" + +// streaming metrics between netdata servers +#include "streaming/rrdpush.h" + +// health monitoring and alarm notifications +#include "health/health.h" + +// the netdata registry +// the registry is actually an API feature +#include "registry/registry.h" + +// backends for archiving the metrics +#include "backends/backends.h" + +// the netdata API +#include "web/api/web_api_v1.h" + +// all data collection plugins +#include "collectors/all.h" + +// netdata unit tests +#include "unit_test.h" + +// the netdata deamon +#include "daemon.h" +#include "main.h" +#include "signals.h" + +// global netdata daemon variables +extern char *netdata_configured_hostname; +extern char *netdata_configured_user_config_dir; +extern char *netdata_configured_stock_config_dir; +extern char *netdata_configured_log_dir; +extern char *netdata_configured_plugins_dir_base; +extern char *netdata_configured_plugins_dir; +extern char *netdata_configured_web_dir; +extern char *netdata_configured_cache_dir; +extern char *netdata_configured_varlib_dir; +extern char *netdata_configured_home_dir; +extern char *netdata_configured_host_prefix; +extern char *netdata_configured_timezone; + +#endif /* NETDATA_COMMON_H */ diff --git a/daemon/daemon.c b/daemon/daemon.c new file mode 100644 index 000000000..4ad082b95 --- /dev/null +++ b/daemon/daemon.c @@ -0,0 +1,452 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "common.h" +#include <sched.h> + +char pidfile[FILENAME_MAX + 1] = ""; + +static void chown_open_file(int fd, uid_t uid, gid_t gid) { + if(fd == -1) return; + + struct stat buf; + + if(fstat(fd, &buf) == -1) { + error("Cannot fstat() fd %d", fd); + return; + } + + if((buf.st_uid != uid || buf.st_gid != gid) && S_ISREG(buf.st_mode)) { + if(fchown(fd, uid, gid) == -1) + error("Cannot fchown() fd %d.", fd); + } +} + +void create_needed_dir(const char *dir, uid_t uid, gid_t gid) +{ + // attempt to create the directory + if(mkdir(dir, 0755) == 0) { + // we created it + + // chown it to match the required user + if(chown(dir, uid, gid) == -1) + error("Cannot chown directory '%s' to %u:%u", dir, (unsigned int)uid, (unsigned int)gid); + } + else if(errno != EEXIST) + // log an error only if the directory does not exist + error("Cannot create directory '%s'", dir); +} + +int become_user(const char *username, int pid_fd) { + int am_i_root = (getuid() == 0)?1:0; + + struct passwd *pw = getpwnam(username); + if(!pw) { + error("User %s is not present.", username); + return -1; + } + + uid_t uid = pw->pw_uid; + gid_t gid = pw->pw_gid; + + create_needed_dir(netdata_configured_cache_dir, uid, gid); + create_needed_dir(netdata_configured_varlib_dir, uid, gid); + + if(pidfile[0]) { + if(chown(pidfile, uid, gid) == -1) + error("Cannot chown '%s' to %u:%u", pidfile, (unsigned int)uid, (unsigned int)gid); + } + + int ngroups = (int)sysconf(_SC_NGROUPS_MAX); + gid_t *supplementary_groups = NULL; + if(ngroups > 0) { + supplementary_groups = mallocz(sizeof(gid_t) * ngroups); + if(getgrouplist(username, gid, supplementary_groups, &ngroups) == -1) { + if(am_i_root) + error("Cannot get supplementary groups of user '%s'.", username); + + ngroups = 0; + } + } + + chown_open_file(STDOUT_FILENO, uid, gid); + chown_open_file(STDERR_FILENO, uid, gid); + chown_open_file(stdaccess_fd, uid, gid); + chown_open_file(pid_fd, uid, gid); + + if(supplementary_groups && ngroups > 0) { + if(setgroups((size_t)ngroups, supplementary_groups) == -1) { + if(am_i_root) + error("Cannot set supplementary groups for user '%s'", username); + } + ngroups = 0; + } + + if(supplementary_groups) + freez(supplementary_groups); + +#ifdef __APPLE__ + if(setregid(gid, gid) != 0) { +#else + if(setresgid(gid, gid, gid) != 0) { +#endif /* __APPLE__ */ + error("Cannot switch to user's %s group (gid: %u).", username, gid); + return -1; + } + +#ifdef __APPLE__ + if(setreuid(uid, uid) != 0) { +#else + if(setresuid(uid, uid, uid) != 0) { +#endif /* __APPLE__ */ + error("Cannot switch to user %s (uid: %u).", username, uid); + return -1; + } + + if(setgid(gid) != 0) { + error("Cannot switch to user's %s group (gid: %u).", username, gid); + return -1; + } + if(setegid(gid) != 0) { + error("Cannot effectively switch to user's %s group (gid: %u).", username, gid); + return -1; + } + if(setuid(uid) != 0) { + error("Cannot switch to user %s (uid: %u).", username, uid); + return -1; + } + if(seteuid(uid) != 0) { + error("Cannot effectively switch to user %s (uid: %u).", username, uid); + return -1; + } + + return(0); +} + +#ifndef OOM_SCORE_ADJ_MAX +#define OOM_SCORE_ADJ_MAX (1000) +#endif +#ifndef OOM_SCORE_ADJ_MIN +#define OOM_SCORE_ADJ_MIN (-1000) +#endif + +static void oom_score_adj(void) { + char buf[30 + 1]; + long long int old_score, wanted_score = OOM_SCORE_ADJ_MAX, final_score = 0; + + // read the existing score + if(read_single_signed_number_file("/proc/self/oom_score_adj", &old_score)) { + error("Out-Of-Memory (OOM) score setting is not supported on this system."); + return; + } + + if(old_score != 0) + wanted_score = old_score; + + // check the environment + char *s = getenv("OOMScoreAdjust"); + if(!s || !*s) { + snprintfz(buf, 30, "%d", (int)wanted_score); + s = buf; + } + + // check netdata.conf configuration + s = config_get(CONFIG_SECTION_GLOBAL, "OOM score", s); + if(s && *s && (isdigit(*s) || *s == '-' || *s == '+')) + wanted_score = atoll(s); + else if(s && !strcmp(s, "keep")) { + info("Out-Of-Memory (OOM) kept as-is (running with %d)", (int) old_score); + return; + } + else { + info("Out-Of-Memory (OOM) score not changed due to non-numeric setting: '%s' (running with %d)", s, (int)old_score); + return; + } + + if(wanted_score < OOM_SCORE_ADJ_MIN) { + error("Wanted Out-Of-Memory (OOM) score %d is too small. Using %d", (int)wanted_score, (int)OOM_SCORE_ADJ_MIN); + wanted_score = OOM_SCORE_ADJ_MIN; + } + + if(wanted_score > OOM_SCORE_ADJ_MAX) { + error("Wanted Out-Of-Memory (OOM) score %d is too big. Using %d", (int)wanted_score, (int)OOM_SCORE_ADJ_MAX); + wanted_score = OOM_SCORE_ADJ_MAX; + } + + if(old_score == wanted_score) { + info("Out-Of-Memory (OOM) score is already set to the wanted value %d", (int)old_score); + return; + } + + int written = 0; + int fd = open("/proc/self/oom_score_adj", O_WRONLY); + if(fd != -1) { + snprintfz(buf, 30, "%d", (int)wanted_score); + ssize_t len = strlen(buf); + if(len > 0 && write(fd, buf, (size_t)len) == len) written = 1; + close(fd); + + if(written) { + if(read_single_signed_number_file("/proc/self/oom_score_adj", &final_score)) + error("Adjusted my Out-Of-Memory (OOM) score to %d, but cannot verify it.", (int)wanted_score); + else if(final_score == wanted_score) + info("Adjusted my Out-Of-Memory (OOM) score from %d to %d.", (int)old_score, (int)final_score); + else + error("Adjusted my Out-Of-Memory (OOM) score from %d to %d, but it has been set to %d.", (int)old_score, (int)wanted_score, (int)final_score); + } + else + error("Failed to adjust my Out-Of-Memory (OOM) score to %d. Running with %d. (systemd systems may change it via netdata.service)", (int)wanted_score, (int)old_score); + } + else + error("Failed to adjust my Out-Of-Memory (OOM) score. Cannot open /proc/self/oom_score_adj for writing."); +} + +static void process_nice_level(void) { +#ifdef HAVE_NICE + int nice_level = (int)config_get_number(CONFIG_SECTION_GLOBAL, "process nice level", 19); + if(nice(nice_level) == -1) error("Cannot set netdata CPU nice level to %d.", nice_level); + else debug(D_SYSTEM, "Set netdata nice level to %d.", nice_level); +#endif // HAVE_NICE +}; + +#define SCHED_FLAG_NONE 0x00 +#define SCHED_FLAG_PRIORITY_CONFIGURABLE 0x01 // the priority is user configurable +#define SCHED_FLAG_KEEP_AS_IS 0x04 // do not attempt to set policy, priority or nice() +#define SCHED_FLAG_USE_NICE 0x08 // use nice() after setting this policy + +struct sched_def { + char *name; + int policy; + int priority; + uint8_t flags; +} scheduler_defaults[] = { + + // the order of array members is important! + // the first defined is the default used by netdata + + // the available members are important too! + // these are all the possible scheduling policies supported by netdata + +#ifdef SCHED_IDLE + { "idle", SCHED_IDLE, 0, SCHED_FLAG_NONE }, +#endif + +#ifdef SCHED_OTHER + { "other", SCHED_OTHER, 0, SCHED_FLAG_USE_NICE }, + { "nice", SCHED_OTHER, 0, SCHED_FLAG_USE_NICE }, +#endif + +#ifdef SCHED_RR + { "rr", SCHED_RR, 0, SCHED_FLAG_PRIORITY_CONFIGURABLE }, +#endif + +#ifdef SCHED_FIFO + { "fifo", SCHED_FIFO, 0, SCHED_FLAG_PRIORITY_CONFIGURABLE }, +#endif + +#ifdef SCHED_BATCH + { "batch", SCHED_BATCH, 0, SCHED_FLAG_USE_NICE }, +#endif + + // do not change the scheduling priority + { "keep", 0, 0, SCHED_FLAG_KEEP_AS_IS }, + { "none", 0, 0, SCHED_FLAG_KEEP_AS_IS }, + + // array termination + { NULL, 0, 0, 0 } +}; + + +#ifdef HAVE_SCHED_GETSCHEDULER +static void sched_getscheduler_report(void) { + int sched = sched_getscheduler(0); + if(sched == -1) { + error("Cannot get my current process scheduling policy."); + return; + } + else { + int i; + for(i = 0 ; scheduler_defaults[i].name ; i++) { + if(scheduler_defaults[i].policy == sched) { + if(scheduler_defaults[i].flags & SCHED_FLAG_PRIORITY_CONFIGURABLE) { + struct sched_param param; + if(sched_getparam(0, ¶m) == -1) { + error("Cannot get the process scheduling priority for my policy '%s'", scheduler_defaults[i].name); + return; + } + else { + info("Running with process scheduling policy '%s', priority %d", scheduler_defaults[i].name, param.sched_priority); + } + } + else if(scheduler_defaults[i].flags & SCHED_FLAG_USE_NICE) { + #ifdef HAVE_GETPRIORITY + int n = getpriority(PRIO_PROCESS, 0); + info("Running with process scheduling policy '%s', nice level %d", scheduler_defaults[i].name, n); + #else // !HAVE_GETPRIORITY + info("Running with process scheduling policy '%s'", scheduler_defaults[i].name); + #endif // !HAVE_GETPRIORITY + } + else { + info("Running with process scheduling policy '%s'", scheduler_defaults[i].name); + } + + return; + } + } + } +} +#else // !HAVE_SCHED_GETSCHEDULER +static void sched_getscheduler_report(void) { +#ifdef HAVE_GETPRIORITY + info("Running with priority %d", getpriority(PRIO_PROCESS, 0)); +#endif // HAVE_GETPRIORITY +} +#endif // !HAVE_SCHED_GETSCHEDULER + +#ifdef HAVE_SCHED_SETSCHEDULER + +static void sched_setscheduler_set(void) { + + if(scheduler_defaults[0].name) { + const char *name = scheduler_defaults[0].name; + int policy = scheduler_defaults[0].policy, priority = scheduler_defaults[0].priority; + uint8_t flags = scheduler_defaults[0].flags; + int found = 0; + + // read the configuration + name = config_get(CONFIG_SECTION_GLOBAL, "process scheduling policy", name); + int i; + for(i = 0 ; scheduler_defaults[i].name ; i++) { + if(!strcmp(name, scheduler_defaults[i].name)) { + found = 1; + policy = scheduler_defaults[i].policy; + priority = scheduler_defaults[i].priority; + flags = scheduler_defaults[i].flags; + + if(flags & SCHED_FLAG_KEEP_AS_IS) + goto report; + + if(flags & SCHED_FLAG_PRIORITY_CONFIGURABLE) + priority = (int)config_get_number(CONFIG_SECTION_GLOBAL, "process scheduling priority", priority); + +#ifdef HAVE_SCHED_GET_PRIORITY_MIN + errno = 0; + if(priority < sched_get_priority_min(policy)) { + error("scheduler %s (%d) priority %d is below the minimum %d. Using the minimum.", name, policy, priority, sched_get_priority_min(policy)); + priority = sched_get_priority_min(policy); + } +#endif +#ifdef HAVE_SCHED_GET_PRIORITY_MAX + errno = 0; + if(priority > sched_get_priority_max(policy)) { + error("scheduler %s (%d) priority %d is above the maximum %d. Using the maximum.", name, policy, priority, sched_get_priority_max(policy)); + priority = sched_get_priority_max(policy); + } +#endif + break; + } + } + + if(!found) { + error("Unknown scheduling policy '%s' - falling back to nice", name); + goto fallback; + } + + const struct sched_param param = { + .sched_priority = priority + }; + + errno = 0; + i = sched_setscheduler(0, policy, ¶m); + if(i != 0) { + error("Cannot adjust netdata scheduling policy to %s (%d), with priority %d. Falling back to nice.", name, policy, priority); + } + else { + info("Adjusted netdata scheduling policy to %s (%d), with priority %d.", name, policy, priority); + if(!(flags & SCHED_FLAG_USE_NICE)) + goto report; + } + } + +fallback: + process_nice_level(); + +report: + sched_getscheduler_report(); +} +#else // !HAVE_SCHED_SETSCHEDULER +static void sched_setscheduler_set(void) { + process_nice_level(); +} +#endif // !HAVE_SCHED_SETSCHEDULER + +int become_daemon(int dont_fork, const char *user) +{ + if(!dont_fork) { + int i = fork(); + if(i == -1) { + perror("cannot fork"); + exit(1); + } + if(i != 0) { + exit(0); // the parent + } + + // become session leader + if (setsid() < 0) { + perror("Cannot become session leader."); + exit(2); + } + + // fork() again + i = fork(); + if(i == -1) { + perror("cannot fork"); + exit(1); + } + if(i != 0) { + exit(0); // the parent + } + } + + // generate our pid file + int pidfd = -1; + if(pidfile[0]) { + pidfd = open(pidfile, O_WRONLY | O_CREAT, 0644); + if(pidfd >= 0) { + if(ftruncate(pidfd, 0) != 0) + error("Cannot truncate pidfile '%s'.", pidfile); + + char b[100]; + sprintf(b, "%d\n", getpid()); + ssize_t i = write(pidfd, b, strlen(b)); + if(i <= 0) + error("Cannot write pidfile '%s'.", pidfile); + } + else error("Failed to open pidfile '%s'.", pidfile); + } + + // Set new file permissions + umask(0007); + + // adjust my Out-Of-Memory score + oom_score_adj(); + + // never become a problem + sched_setscheduler_set(); + + if(user && *user) { + if(become_user(user, pidfd) != 0) { + error("Cannot become user '%s'. Continuing as we are.", user); + } + else debug(D_SYSTEM, "Successfully became user '%s'.", user); + } + else { + create_needed_dir(netdata_configured_cache_dir, getuid(), getgid()); + create_needed_dir(netdata_configured_varlib_dir, getuid(), getgid()); + } + + if(pidfd != -1) + close(pidfd); + + return(0); +} diff --git a/daemon/daemon.h b/daemon/daemon.h new file mode 100644 index 000000000..412691107 --- /dev/null +++ b/daemon/daemon.h @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_DAEMON_H +#define NETDATA_DAEMON_H 1 + +extern int become_user(const char *username, int pid_fd); + +extern int become_daemon(int dont_fork, const char *user); + +extern void netdata_cleanup_and_exit(int i); + +extern char pidfile[]; + +#endif /* NETDATA_DAEMON_H */ diff --git a/daemon/global_statistics.c b/daemon/global_statistics.c new file mode 100644 index 000000000..68933e195 --- /dev/null +++ b/daemon/global_statistics.c @@ -0,0 +1,533 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "common.h" + +#define GLOBAL_STATS_RESET_WEB_USEC_MAX 0x01 + + +static struct global_statistics { + volatile uint16_t connected_clients; + + volatile uint64_t web_requests; + volatile uint64_t web_usec; + volatile uint64_t web_usec_max; + volatile uint64_t bytes_received; + volatile uint64_t bytes_sent; + volatile uint64_t content_size; + volatile uint64_t compressed_content_size; + + volatile uint64_t web_client_count; + + volatile uint64_t rrdr_queries_made; + volatile uint64_t rrdr_db_points_read; + volatile uint64_t rrdr_result_points_generated; +} global_statistics = { + .connected_clients = 0, + .web_requests = 0, + .web_usec = 0, + .bytes_received = 0, + .bytes_sent = 0, + .content_size = 0, + .compressed_content_size = 0, + .web_client_count = 1, + + .rrdr_queries_made = 0, + .rrdr_db_points_read = 0, + .rrdr_result_points_generated = 0, +}; + +#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS) +#else +netdata_mutex_t global_statistics_mutex = NETDATA_MUTEX_INITIALIZER; + +static inline void global_statistics_lock(void) { + netdata_mutex_lock(&global_statistics_mutex); +} + +static inline void global_statistics_unlock(void) { + netdata_mutex_unlock(&global_statistics_mutex); +} +#endif + + +void rrdr_query_completed(uint64_t db_points_read, uint64_t result_points_generated) { +#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS) + __atomic_fetch_add(&global_statistics.rrdr_queries_made, 1, __ATOMIC_SEQ_CST); + __atomic_fetch_add(&global_statistics.rrdr_db_points_read, db_points_read, __ATOMIC_SEQ_CST); + __atomic_fetch_add(&global_statistics.rrdr_result_points_generated, result_points_generated, __ATOMIC_SEQ_CST); +#else + #warning NOT using atomic operations - using locks for global statistics + if (web_server_is_multithreaded) + global_statistics_lock(); + + global_statistics.rrdr_queries_made++; + global_statistics.rrdr_db_points_read += db_points_read; + global_statistics.rrdr_result_points_generated += result_points_generated; + + if (web_server_is_multithreaded) + global_statistics_unlock(); +#endif +} + +void finished_web_request_statistics(uint64_t dt, + uint64_t bytes_received, + uint64_t bytes_sent, + uint64_t content_size, + uint64_t compressed_content_size) { +#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS) + uint64_t old_web_usec_max = global_statistics.web_usec_max; + while(dt > old_web_usec_max) + __atomic_compare_exchange(&global_statistics.web_usec_max, &old_web_usec_max, &dt, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + + __atomic_fetch_add(&global_statistics.web_requests, 1, __ATOMIC_SEQ_CST); + __atomic_fetch_add(&global_statistics.web_usec, dt, __ATOMIC_SEQ_CST); + __atomic_fetch_add(&global_statistics.bytes_received, bytes_received, __ATOMIC_SEQ_CST); + __atomic_fetch_add(&global_statistics.bytes_sent, bytes_sent, __ATOMIC_SEQ_CST); + __atomic_fetch_add(&global_statistics.content_size, content_size, __ATOMIC_SEQ_CST); + __atomic_fetch_add(&global_statistics.compressed_content_size, compressed_content_size, __ATOMIC_SEQ_CST); +#else +#warning NOT using atomic operations - using locks for global statistics + if (web_server_is_multithreaded) + global_statistics_lock(); + + if (dt > global_statistics.web_usec_max) + global_statistics.web_usec_max = dt; + + global_statistics.web_requests++; + global_statistics.web_usec += dt; + global_statistics.bytes_received += bytes_received; + global_statistics.bytes_sent += bytes_sent; + global_statistics.content_size += content_size; + global_statistics.compressed_content_size += compressed_content_size; + + if (web_server_is_multithreaded) + global_statistics_unlock(); +#endif +} + +uint64_t web_client_connected(void) { +#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS) + __atomic_fetch_add(&global_statistics.connected_clients, 1, __ATOMIC_SEQ_CST); + uint64_t id = __atomic_fetch_add(&global_statistics.web_client_count, 1, __ATOMIC_SEQ_CST); +#else + if (web_server_is_multithreaded) + global_statistics_lock(); + + global_statistics.connected_clients++; + uint64_t id = global_statistics.web_client_count++; + + if (web_server_is_multithreaded) + global_statistics_unlock(); +#endif + + return id; +} + +void web_client_disconnected(void) { +#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS) + __atomic_fetch_sub(&global_statistics.connected_clients, 1, __ATOMIC_SEQ_CST); +#else + if (web_server_is_multithreaded) + global_statistics_lock(); + + global_statistics.connected_clients--; + + if (web_server_is_multithreaded) + global_statistics_unlock(); +#endif +} + + +static inline void global_statistics_copy(struct global_statistics *gs, uint8_t options) { +#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS) + gs->connected_clients = __atomic_fetch_add(&global_statistics.connected_clients, 0, __ATOMIC_SEQ_CST); + gs->web_requests = __atomic_fetch_add(&global_statistics.web_requests, 0, __ATOMIC_SEQ_CST); + gs->web_usec = __atomic_fetch_add(&global_statistics.web_usec, 0, __ATOMIC_SEQ_CST); + gs->web_usec_max = __atomic_fetch_add(&global_statistics.web_usec_max, 0, __ATOMIC_SEQ_CST); + gs->bytes_received = __atomic_fetch_add(&global_statistics.bytes_received, 0, __ATOMIC_SEQ_CST); + gs->bytes_sent = __atomic_fetch_add(&global_statistics.bytes_sent, 0, __ATOMIC_SEQ_CST); + gs->content_size = __atomic_fetch_add(&global_statistics.content_size, 0, __ATOMIC_SEQ_CST); + gs->compressed_content_size = __atomic_fetch_add(&global_statistics.compressed_content_size, 0, __ATOMIC_SEQ_CST); + gs->web_client_count = __atomic_fetch_add(&global_statistics.web_client_count, 0, __ATOMIC_SEQ_CST); + + gs->rrdr_queries_made = __atomic_fetch_add(&global_statistics.rrdr_queries_made, 0, __ATOMIC_SEQ_CST); + gs->rrdr_db_points_read = __atomic_fetch_add(&global_statistics.rrdr_db_points_read, 0, __ATOMIC_SEQ_CST); + gs->rrdr_result_points_generated = __atomic_fetch_add(&global_statistics.rrdr_result_points_generated, 0, __ATOMIC_SEQ_CST); + + if(options & GLOBAL_STATS_RESET_WEB_USEC_MAX) { + uint64_t n = 0; + __atomic_compare_exchange(&global_statistics.web_usec_max, &gs->web_usec_max, &n, 1, __ATOMIC_SEQ_CST, + __ATOMIC_SEQ_CST); + } +#else + global_statistics_lock(); + + memcpy(gs, (const void *)&global_statistics, sizeof(struct global_statistics)); + + if (options & GLOBAL_STATS_RESET_WEB_USEC_MAX) + global_statistics.web_usec_max = 0; + + global_statistics_unlock(); +#endif +} + +void global_statistics_charts(void) { + static unsigned long long old_web_requests = 0, + old_web_usec = 0, + old_content_size = 0, + old_compressed_content_size = 0; + + static collected_number compression_ratio = -1, + average_response_time = -1; + + struct global_statistics gs; + struct rusage me, thread; + + global_statistics_copy(&gs, GLOBAL_STATS_RESET_WEB_USEC_MAX); + getrusage(RUSAGE_THREAD, &thread); + getrusage(RUSAGE_SELF, &me); + + { + static RRDSET *st_cpu_thread = NULL; + static RRDDIM *rd_cpu_thread_user = NULL, + *rd_cpu_thread_system = NULL; + +#ifdef __FreeBSD__ + if (unlikely(!st_cpu_thread)) { + st_cpu_thread = rrdset_create_localhost( + "netdata" + , "plugin_freebsd_cpu" + , NULL + , "freebsd" + , NULL + , "NetData FreeBSD Plugin CPU usage" + , "milliseconds/s" + , "netdata" + , "stats" + , 132000 + , localhost->rrd_update_every + , RRDSET_TYPE_STACKED + ); +#else + if (unlikely(!st_cpu_thread)) { + st_cpu_thread = rrdset_create_localhost( + "netdata" + , "plugin_proc_cpu" + , NULL + , "proc" + , NULL + , "NetData Proc Plugin CPU usage" + , "milliseconds/s" + , "netdata" + , "stats" + , 132000 + , localhost->rrd_update_every + , RRDSET_TYPE_STACKED + ); +#endif + + rd_cpu_thread_user = rrddim_add(st_cpu_thread, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); + rd_cpu_thread_system = rrddim_add(st_cpu_thread, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_cpu_thread); + + rrddim_set_by_pointer(st_cpu_thread, rd_cpu_thread_user, thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec); + rrddim_set_by_pointer(st_cpu_thread, rd_cpu_thread_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec); + rrdset_done(st_cpu_thread); + } + + // ---------------------------------------------------------------- + + { + static RRDSET *st_cpu = NULL; + static RRDDIM *rd_cpu_user = NULL, + *rd_cpu_system = NULL; + + if (unlikely(!st_cpu)) { + st_cpu = rrdset_create_localhost( + "netdata" + , "server_cpu" + , NULL + , "netdata" + , NULL + , "NetData CPU usage" + , "milliseconds/s" + , "netdata" + , "stats" + , 130000 + , localhost->rrd_update_every + , RRDSET_TYPE_STACKED + ); + + rd_cpu_user = rrddim_add(st_cpu, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); + rd_cpu_system = rrddim_add(st_cpu, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_cpu); + + rrddim_set_by_pointer(st_cpu, rd_cpu_user, me.ru_utime.tv_sec * 1000000ULL + me.ru_utime.tv_usec); + rrddim_set_by_pointer(st_cpu, rd_cpu_system, me.ru_stime.tv_sec * 1000000ULL + me.ru_stime.tv_usec); + rrdset_done(st_cpu); + } + + // ---------------------------------------------------------------- + + { + static RRDSET *st_clients = NULL; + static RRDDIM *rd_clients = NULL; + + if (unlikely(!st_clients)) { + st_clients = rrdset_create_localhost( + "netdata" + , "clients" + , NULL + , "netdata" + , NULL + , "NetData Web Clients" + , "connected clients" + , "netdata" + , "stats" + , 130200 + , localhost->rrd_update_every + , RRDSET_TYPE_LINE + ); + + rd_clients = rrddim_add(st_clients, "clients", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else + rrdset_next(st_clients); + + rrddim_set_by_pointer(st_clients, rd_clients, gs.connected_clients); + rrdset_done(st_clients); + } + + // ---------------------------------------------------------------- + + { + static RRDSET *st_reqs = NULL; + static RRDDIM *rd_requests = NULL; + + if (unlikely(!st_reqs)) { + st_reqs = rrdset_create_localhost( + "netdata" + , "requests" + , NULL + , "netdata" + , NULL + , "NetData Web Requests" + , "requests/s" + , "netdata" + , "stats" + , 130300 + , localhost->rrd_update_every + , RRDSET_TYPE_LINE + ); + + rd_requests = rrddim_add(st_reqs, "requests", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_reqs); + + rrddim_set_by_pointer(st_reqs, rd_requests, (collected_number) gs.web_requests); + rrdset_done(st_reqs); + } + + // ---------------------------------------------------------------- + + { + static RRDSET *st_bytes = NULL; + static RRDDIM *rd_in = NULL, + *rd_out = NULL; + + if (unlikely(!st_bytes)) { + st_bytes = rrdset_create_localhost( + "netdata" + , "net" + , NULL + , "netdata" + , NULL + , "NetData Network Traffic" + , "kilobits/s" + , "netdata" + , "stats" + , 130000 + , localhost->rrd_update_every + , RRDSET_TYPE_AREA + ); + + rd_in = rrddim_add(st_bytes, "in", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + rd_out = rrddim_add(st_bytes, "out", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_bytes); + + rrddim_set_by_pointer(st_bytes, rd_in, (collected_number) gs.bytes_received); + rrddim_set_by_pointer(st_bytes, rd_out, (collected_number) gs.bytes_sent); + rrdset_done(st_bytes); + } + + // ---------------------------------------------------------------- + + { + static RRDSET *st_duration = NULL; + static RRDDIM *rd_average = NULL, + *rd_max = NULL; + + if (unlikely(!st_duration)) { + st_duration = rrdset_create_localhost( + "netdata" + , "response_time" + , NULL + , "netdata" + , NULL + , "NetData API Response Time" + , "ms/request" + , "netdata" + , "stats" + , 130400 + , localhost->rrd_update_every + , RRDSET_TYPE_LINE + ); + + rd_average = rrddim_add(st_duration, "average", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + rd_max = rrddim_add(st_duration, "max", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + } + else + rrdset_next(st_duration); + + uint64_t gweb_usec = gs.web_usec; + uint64_t gweb_requests = gs.web_requests; + + uint64_t web_usec = (gweb_usec >= old_web_usec) ? gweb_usec - old_web_usec : 0; + uint64_t web_requests = (gweb_requests >= old_web_requests) ? gweb_requests - old_web_requests : 0; + + old_web_usec = gweb_usec; + old_web_requests = gweb_requests; + + if (web_requests) + average_response_time = (collected_number) (web_usec / web_requests); + + if (unlikely(average_response_time != -1)) + rrddim_set_by_pointer(st_duration, rd_average, average_response_time); + else + rrddim_set_by_pointer(st_duration, rd_average, 0); + + rrddim_set_by_pointer(st_duration, rd_max, ((gs.web_usec_max)?(collected_number)gs.web_usec_max:average_response_time)); + rrdset_done(st_duration); + } + + // ---------------------------------------------------------------- + + { + static RRDSET *st_compression = NULL; + static RRDDIM *rd_savings = NULL; + + if (unlikely(!st_compression)) { + st_compression = rrdset_create_localhost( + "netdata" + , "compression_ratio" + , NULL + , "netdata" + , NULL + , "NetData API Responses Compression Savings Ratio" + , "percentage" + , "netdata" + , "stats" + , 130500 + , localhost->rrd_update_every + , RRDSET_TYPE_LINE + ); + + rd_savings = rrddim_add(st_compression, "savings", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + } + else + rrdset_next(st_compression); + + // since we don't lock here to read the global statistics + // read the smaller value first + unsigned long long gcompressed_content_size = gs.compressed_content_size; + unsigned long long gcontent_size = gs.content_size; + + unsigned long long compressed_content_size = gcompressed_content_size - old_compressed_content_size; + unsigned long long content_size = gcontent_size - old_content_size; + + old_compressed_content_size = gcompressed_content_size; + old_content_size = gcontent_size; + + if (content_size && content_size >= compressed_content_size) + compression_ratio = ((content_size - compressed_content_size) * 100 * 1000) / content_size; + + if (compression_ratio != -1) + rrddim_set_by_pointer(st_compression, rd_savings, compression_ratio); + + rrdset_done(st_compression); + } + + // ---------------------------------------------------------------- + + if(gs.rrdr_queries_made) { + static RRDSET *st_rrdr_queries = NULL; + static RRDDIM *rd_queries = NULL; + + if (unlikely(!st_rrdr_queries)) { + st_rrdr_queries = rrdset_create_localhost( + "netdata" + , "queries" + , NULL + , "queries" + , NULL + , "NetData API Queries" + , "queries/s" + , "netdata" + , "stats" + , 130500 + , localhost->rrd_update_every + , RRDSET_TYPE_LINE + ); + + rd_queries = rrddim_add(st_rrdr_queries, "queries", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_rrdr_queries); + + rrddim_set_by_pointer(st_rrdr_queries, rd_queries, (collected_number)gs.rrdr_queries_made); + + rrdset_done(st_rrdr_queries); + } + + // ---------------------------------------------------------------- + + if(gs.rrdr_db_points_read || gs.rrdr_result_points_generated) { + static RRDSET *st_rrdr_points = NULL; + static RRDDIM *rd_points_read = NULL; + static RRDDIM *rd_points_generated = NULL; + + if (unlikely(!st_rrdr_points)) { + st_rrdr_points = rrdset_create_localhost( + "netdata" + , "db_points" + , NULL + , "queries" + , NULL + , "NetData API Points" + , "points/s" + , "netdata" + , "stats" + , 130501 + , localhost->rrd_update_every + , RRDSET_TYPE_AREA + ); + + rd_points_read = rrddim_add(st_rrdr_points, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_points_generated = rrddim_add(st_rrdr_points, "generated", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st_rrdr_points); + + rrddim_set_by_pointer(st_rrdr_points, rd_points_read, (collected_number)gs.rrdr_db_points_read); + rrddim_set_by_pointer(st_rrdr_points, rd_points_generated, (collected_number)gs.rrdr_result_points_generated); + + rrdset_done(st_rrdr_points); + } +} diff --git a/daemon/global_statistics.h b/daemon/global_statistics.h new file mode 100644 index 000000000..9dd7db51a --- /dev/null +++ b/daemon/global_statistics.h @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_GLOBAL_STATISTICS_H +#define NETDATA_GLOBAL_STATISTICS_H 1 + +#include "common.h" + +// ---------------------------------------------------------------------------- +// global statistics + +extern void rrdr_query_completed(uint64_t db_points_read, uint64_t result_points_generated); + +extern void finished_web_request_statistics(uint64_t dt, + uint64_t bytes_received, + uint64_t bytes_sent, + uint64_t content_size, + uint64_t compressed_content_size); + +extern uint64_t web_client_connected(void); +extern void web_client_disconnected(void); +extern void global_statistics_charts(void); + +#endif /* NETDATA_GLOBAL_STATISTICS_H */ diff --git a/daemon/main.c b/daemon/main.c new file mode 100644 index 000000000..b2c4c80bf --- /dev/null +++ b/daemon/main.c @@ -0,0 +1,1098 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "common.h" + +struct config netdata_config = { + .sections = NULL, + .mutex = NETDATA_MUTEX_INITIALIZER, + .index = { + .avl_tree = { + .root = NULL, + .compar = appconfig_section_compare + }, + .rwlock = AVL_LOCK_INITIALIZER + } +}; + +void netdata_cleanup_and_exit(int ret) { + // enabling this, is wrong + // because the threads will be cancelled while cleaning up + // netdata_exit = 1; + + error_log_limit_unlimited(); + info("EXIT: netdata prepares to exit with code %d...", ret); + + // cleanup/save the database and exit + info("EXIT: cleaning up the database..."); + rrdhost_cleanup_all(); + + if(!ret) { + // exit cleanly + + // stop everything + info("EXIT: stopping master threads..."); + cancel_main_threads(); + + // free the database + info("EXIT: freeing database memory..."); + rrdhost_free_all(); + } + + // unlink the pid + if(pidfile[0]) { + info("EXIT: removing netdata PID file '%s'...", pidfile); + if(unlink(pidfile) != 0) + error("EXIT: cannot unlink pidfile '%s'.", pidfile); + } + + info("EXIT: all done - netdata is now exiting - bye bye..."); + exit(ret); +} + +struct netdata_static_thread static_threads[] = { + + NETDATA_PLUGIN_HOOK_CHECKS + NETDATA_PLUGIN_HOOK_FREEBSD + NETDATA_PLUGIN_HOOK_MACOS + + // linux internal plugins + NETDATA_PLUGIN_HOOK_LINUX_NFACCT + NETDATA_PLUGIN_HOOK_LINUX_PROC + NETDATA_PLUGIN_HOOK_LINUX_DISKSPACE + NETDATA_PLUGIN_HOOK_LINUX_CGROUPS + NETDATA_PLUGIN_HOOK_LINUX_TC + + NETDATA_PLUGIN_HOOK_IDLEJITTER + NETDATA_PLUGIN_HOOK_STATSD + + // common plugins for all systems + {"BACKENDS", NULL, NULL, 1, NULL, NULL, backends_main}, + {"WEB_SERVER[multi]", NULL, NULL, 1, NULL, NULL, socket_listen_main_multi_threaded}, + {"WEB_SERVER[single]", NULL, NULL, 0, NULL, NULL, socket_listen_main_single_threaded}, + {"WEB_SERVER[static1]", NULL, NULL, 0, NULL, NULL, socket_listen_main_static_threaded}, + {"STREAM", NULL, NULL, 0, NULL, NULL, rrdpush_sender_thread}, + + NETDATA_PLUGIN_HOOK_PLUGINSD + NETDATA_PLUGIN_HOOK_HEALTH + + {NULL, NULL, NULL, 0, NULL, NULL, NULL} +}; + +void web_server_threading_selection(void) { + web_server_mode = web_server_mode_id(config_get(CONFIG_SECTION_WEB, "mode", web_server_mode_name(web_server_mode))); + + int multi_threaded = (web_server_mode == WEB_SERVER_MODE_MULTI_THREADED); + int single_threaded = (web_server_mode == WEB_SERVER_MODE_SINGLE_THREADED); + int static_threaded = (web_server_mode == WEB_SERVER_MODE_STATIC_THREADED); + + int i; + for (i = 0; static_threads[i].name; i++) { + if (static_threads[i].start_routine == socket_listen_main_multi_threaded) + static_threads[i].enabled = multi_threaded; + + if (static_threads[i].start_routine == socket_listen_main_single_threaded) + static_threads[i].enabled = single_threaded; + + if (static_threads[i].start_routine == socket_listen_main_static_threaded) + static_threads[i].enabled = static_threaded; + } +} + +void web_server_config_options(void) { + web_client_timeout = (int) config_get_number(CONFIG_SECTION_WEB, "disconnect idle clients after seconds", web_client_timeout); + web_client_first_request_timeout = (int) config_get_number(CONFIG_SECTION_WEB, "timeout for first request", web_client_first_request_timeout); + web_client_streaming_rate_t = config_get_number(CONFIG_SECTION_WEB, "accept a streaming request every seconds", web_client_streaming_rate_t); + + respect_web_browser_do_not_track_policy = config_get_boolean(CONFIG_SECTION_WEB, "respect do not track policy", respect_web_browser_do_not_track_policy); + web_x_frame_options = config_get(CONFIG_SECTION_WEB, "x-frame-options response header", ""); + if(!*web_x_frame_options) web_x_frame_options = NULL; + + web_allow_connections_from = simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow connections from", "localhost *"), NULL, SIMPLE_PATTERN_EXACT); + web_allow_dashboard_from = simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow dashboard from", "localhost *"), NULL, SIMPLE_PATTERN_EXACT); + web_allow_badges_from = simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow badges from", "*"), NULL, SIMPLE_PATTERN_EXACT); + web_allow_registry_from = simple_pattern_create(config_get(CONFIG_SECTION_REGISTRY, "allow from", "*"), NULL, SIMPLE_PATTERN_EXACT); + web_allow_streaming_from = simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow streaming from", "*"), NULL, SIMPLE_PATTERN_EXACT); + web_allow_netdataconf_from = simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow netdata.conf from", "localhost fd* 10.* 192.168.* 172.16.* 172.17.* 172.18.* 172.19.* 172.20.* 172.21.* 172.22.* 172.23.* 172.24.* 172.25.* 172.26.* 172.27.* 172.28.* 172.29.* 172.30.* 172.31.*"), NULL, SIMPLE_PATTERN_EXACT); + +#ifdef NETDATA_WITH_ZLIB + web_enable_gzip = config_get_boolean(CONFIG_SECTION_WEB, "enable gzip compression", web_enable_gzip); + + char *s = config_get(CONFIG_SECTION_WEB, "gzip compression strategy", "default"); + if(!strcmp(s, "default")) + web_gzip_strategy = Z_DEFAULT_STRATEGY; + else if(!strcmp(s, "filtered")) + web_gzip_strategy = Z_FILTERED; + else if(!strcmp(s, "huffman only")) + web_gzip_strategy = Z_HUFFMAN_ONLY; + else if(!strcmp(s, "rle")) + web_gzip_strategy = Z_RLE; + else if(!strcmp(s, "fixed")) + web_gzip_strategy = Z_FIXED; + else { + error("Invalid compression strategy '%s'. Valid strategies are 'default', 'filtered', 'huffman only', 'rle' and 'fixed'. Proceeding with 'default'.", s); + web_gzip_strategy = Z_DEFAULT_STRATEGY; + } + + web_gzip_level = (int)config_get_number(CONFIG_SECTION_WEB, "gzip compression level", 3); + if(web_gzip_level < 1) { + error("Invalid compression level %d. Valid levels are 1 (fastest) to 9 (best ratio). Proceeding with level 1 (fastest compression).", web_gzip_level); + web_gzip_level = 1; + } + else if(web_gzip_level > 9) { + error("Invalid compression level %d. Valid levels are 1 (fastest) to 9 (best ratio). Proceeding with level 9 (best compression).", web_gzip_level); + web_gzip_level = 9; + } +#endif /* NETDATA_WITH_ZLIB */ +} + + +int killpid(pid_t pid, int signal) +{ + int ret = -1; + debug(D_EXIT, "Request to kill pid %d", pid); + + errno = 0; + if(kill(pid, 0) == -1) { + switch(errno) { + case ESRCH: + error("Request to kill pid %d, but it is not running.", pid); + break; + + case EPERM: + error("Request to kill pid %d, but I do not have enough permissions.", pid); + break; + + default: + error("Request to kill pid %d, but I received an error.", pid); + break; + } + } + else { + errno = 0; + ret = kill(pid, signal); + if(ret == -1) { + switch(errno) { + case ESRCH: + error("Cannot kill pid %d, but it is not running.", pid); + break; + + case EPERM: + error("Cannot kill pid %d, but I do not have enough permissions.", pid); + break; + + default: + error("Cannot kill pid %d, but I received an error.", pid); + break; + } + } + } + + return ret; +} + +void cancel_main_threads() { + error_log_limit_unlimited(); + + int i, found = 0; + usec_t max = 5 * USEC_PER_SEC, step = 100000; + for (i = 0; static_threads[i].name != NULL ; i++) { + if(static_threads[i].enabled == NETDATA_MAIN_THREAD_RUNNING) { + info("EXIT: Stopping master thread: %s", static_threads[i].name); + netdata_thread_cancel(*static_threads[i].thread); + found++; + } + } + + while(found && max > 0) { + max -= step; + info("Waiting %d threads to finish...", found); + sleep_usec(step); + found = 0; + for (i = 0; static_threads[i].name != NULL ; i++) { + if (static_threads[i].enabled != NETDATA_MAIN_THREAD_EXITED) + found++; + } + } + + if(found) { + for (i = 0; static_threads[i].name != NULL ; i++) { + if (static_threads[i].enabled != NETDATA_MAIN_THREAD_EXITED) + error("Master thread %s takes too long to exit. Giving up...", static_threads[i].name); + } + } + else + info("All threads finished."); +} + +struct option_def option_definitions[] = { + // opt description arg name default value + { 'c', "Configuration file to load.", "filename", CONFIG_DIR "/" CONFIG_FILENAME}, + { 'D', "Do not fork. Run in the foreground.", NULL, "run in the background"}, + { 'd', "Fork. Run in the background.", NULL, "run in the background"}, + { 'h', "Display this help message.", NULL, NULL}, + { 'P', "File to save a pid while running.", "filename", "do not save pid to a file"}, + { 'i', "The IP address to listen to.", "IP", "all IP addresses IPv4 and IPv6"}, + { 'p', "API/Web port to use.", "port", "19999"}, + { 's', "Prefix for /proc and /sys (for containers).", "path", "no prefix"}, + { 't', "The internal clock of netdata.", "seconds", "1"}, + { 'u', "Run as user.", "username", "netdata"}, + { 'v', "Print netdata version and exit.", NULL, NULL}, + { 'V', "Print netdata version and exit.", NULL, NULL}, + { 'W', "See Advanced options below.", "options", NULL}, +}; + +int help(int exitcode) { + FILE *stream; + if(exitcode == 0) + stream = stdout; + else + stream = stderr; + + int num_opts = sizeof(option_definitions) / sizeof(struct option_def); + int i; + int max_len_arg = 0; + + // Compute maximum argument length + for( i = 0; i < num_opts; i++ ) { + if(option_definitions[i].arg_name) { + int len_arg = (int)strlen(option_definitions[i].arg_name); + if(len_arg > max_len_arg) max_len_arg = len_arg; + } + } + + if(max_len_arg > 30) max_len_arg = 30; + if(max_len_arg < 20) max_len_arg = 20; + + fprintf(stream, "%s", "\n" + " ^\n" + " |.-. .-. .-. .-. . netdata \n" + " | '-' '-' '-' '-' real-time performance monitoring, done right! \n" + " +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+--->\n" + "\n" + " Copyright (C) 2016-2017, Costa Tsaousis <costa@tsaousis.gr>\n" + " Released under GNU General Public License v3 or later.\n" + " All rights reserved.\n" + "\n" + " Home Page : https://my-netdata.io\n" + " Source Code: https://github.com/netdata/netdata\n" + " Wiki / Docs: https://github.com/netdata/netdata/wiki\n" + " Support : https://github.com/netdata/netdata/issues\n" + " License : https://github.com/netdata/netdata/blob/master/LICENSE.md\n" + "\n" + " Twitter : https://twitter.com/linuxnetdata\n" + " Facebook : https://www.facebook.com/linuxnetdata/\n" + "\n" + "\n" + ); + + fprintf(stream, " SYNOPSIS: netdata [options]\n"); + fprintf(stream, "\n"); + fprintf(stream, " Options:\n\n"); + + // Output options description. + for( i = 0; i < num_opts; i++ ) { + fprintf(stream, " -%c %-*s %s", option_definitions[i].val, max_len_arg, option_definitions[i].arg_name ? option_definitions[i].arg_name : "", option_definitions[i].description); + if(option_definitions[i].default_value) { + fprintf(stream, "\n %c %-*s Default: %s\n", ' ', max_len_arg, "", option_definitions[i].default_value); + } else { + fprintf(stream, "\n"); + } + fprintf(stream, "\n"); + } + + fprintf(stream, "\n Advanced options:\n\n" + " -W stacksize=N Set the stacksize (in bytes).\n\n" + " -W debug_flags=N Set runtime tracing to debug.log.\n\n" + " -W unittest Run internal unittests and exit.\n\n" + " -W set section option value\n" + " set netdata.conf option from the command line.\n\n" + " -W simple-pattern pattern string\n" + " Check if string matches pattern and exit.\n\n" + ); + + fprintf(stream, "\n Signals netdata handles:\n\n" + " - HUP Close and reopen log files.\n" + " - USR1 Save internal DB to disk.\n" + " - USR2 Reload health configuration.\n" + "\n" + ); + + fflush(stream); + return exitcode; +} + +// TODO: Remove this function with the nix major release. +void remove_option(int opt_index, int *argc, char **argv) { + int i; + + // remove the options. + do { + *argc = *argc - 1; + for(i = opt_index; i < *argc; i++) { + argv[i] = argv[i+1]; + } + i = opt_index; + } while(argv[i][0] != '-' && opt_index >= *argc); +} + +static const char *verify_required_directory(const char *dir) { + if(chdir(dir) == -1) + fatal("Cannot cd to directory '%s'", dir); + + DIR *d = opendir(dir); + if(!d) + fatal("Cannot examine the contents of directory '%s'", dir); + closedir(d); + + return dir; +} + +void log_init(void) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/debug.log", netdata_configured_log_dir); + stdout_filename = config_get(CONFIG_SECTION_GLOBAL, "debug log", filename); + + snprintfz(filename, FILENAME_MAX, "%s/error.log", netdata_configured_log_dir); + stderr_filename = config_get(CONFIG_SECTION_GLOBAL, "error log", filename); + + snprintfz(filename, FILENAME_MAX, "%s/access.log", netdata_configured_log_dir); + stdaccess_filename = config_get(CONFIG_SECTION_GLOBAL, "access log", filename); + + error_log_throttle_period = config_get_number(CONFIG_SECTION_GLOBAL, "errors flood protection period", error_log_throttle_period); + error_log_errors_per_period = (unsigned long)config_get_number(CONFIG_SECTION_GLOBAL, "errors to trigger flood protection", (long long int)error_log_errors_per_period); + error_log_errors_per_period_backup = error_log_errors_per_period; + + setenv("NETDATA_ERRORS_THROTTLE_PERIOD", config_get(CONFIG_SECTION_GLOBAL, "errors flood protection period" , ""), 1); + setenv("NETDATA_ERRORS_PER_PERIOD", config_get(CONFIG_SECTION_GLOBAL, "errors to trigger flood protection", ""), 1); +} + +static void backwards_compatible_config() { + // allow existing configurations to work with the current version of netdata + + if(config_exists(CONFIG_SECTION_GLOBAL, "multi threaded web server")) { + int mode = config_get_boolean(CONFIG_SECTION_GLOBAL, "multi threaded web server", 1); + web_server_mode = (mode)?WEB_SERVER_MODE_MULTI_THREADED:WEB_SERVER_MODE_SINGLE_THREADED; + } + + // move [global] options to the [web] section + config_move(CONFIG_SECTION_GLOBAL, "http port listen backlog", + CONFIG_SECTION_WEB, "listen backlog"); + + config_move(CONFIG_SECTION_GLOBAL, "bind socket to IP", + CONFIG_SECTION_WEB, "bind to"); + + config_move(CONFIG_SECTION_GLOBAL, "bind to", + CONFIG_SECTION_WEB, "bind to"); + + config_move(CONFIG_SECTION_GLOBAL, "port", + CONFIG_SECTION_WEB, "default port"); + + config_move(CONFIG_SECTION_GLOBAL, "default port", + CONFIG_SECTION_WEB, "default port"); + + config_move(CONFIG_SECTION_GLOBAL, "disconnect idle web clients after seconds", + CONFIG_SECTION_WEB, "disconnect idle clients after seconds"); + + config_move(CONFIG_SECTION_GLOBAL, "respect web browser do not track policy", + CONFIG_SECTION_WEB, "respect do not track policy"); + + config_move(CONFIG_SECTION_GLOBAL, "web x-frame-options header", + CONFIG_SECTION_WEB, "x-frame-options response header"); + + config_move(CONFIG_SECTION_GLOBAL, "enable web responses gzip compression", + CONFIG_SECTION_WEB, "enable gzip compression"); + + config_move(CONFIG_SECTION_GLOBAL, "web compression strategy", + CONFIG_SECTION_WEB, "gzip compression strategy"); + + config_move(CONFIG_SECTION_GLOBAL, "web compression level", + CONFIG_SECTION_WEB, "gzip compression level"); + + config_move(CONFIG_SECTION_GLOBAL, "web files owner", + CONFIG_SECTION_WEB, "web files owner"); + + config_move(CONFIG_SECTION_GLOBAL, "web files group", + CONFIG_SECTION_WEB, "web files group"); + + config_move(CONFIG_SECTION_BACKEND, "opentsdb host tags", + CONFIG_SECTION_BACKEND, "host tags"); +} + +static void get_netdata_configured_variables() { + backwards_compatible_config(); + + // ------------------------------------------------------------------------ + // get the hostname + + char buf[HOSTNAME_MAX + 1]; + if(gethostname(buf, HOSTNAME_MAX) == -1) + error("Cannot get machine hostname."); + + netdata_configured_hostname = config_get(CONFIG_SECTION_GLOBAL, "hostname", buf); + debug(D_OPTIONS, "hostname set to '%s'", netdata_configured_hostname); + + // ------------------------------------------------------------------------ + // get default database size + + default_rrd_history_entries = (int) config_get_number(CONFIG_SECTION_GLOBAL, "history", align_entries_to_pagesize(default_rrd_memory_mode, RRD_DEFAULT_HISTORY_ENTRIES)); + + long h = align_entries_to_pagesize(default_rrd_memory_mode, default_rrd_history_entries); + if(h != default_rrd_history_entries) { + config_set_number(CONFIG_SECTION_GLOBAL, "history", h); + default_rrd_history_entries = (int)h; + } + + if(default_rrd_history_entries < 5 || default_rrd_history_entries > RRD_HISTORY_ENTRIES_MAX) { + error("Invalid history entries %d given. Defaulting to %d.", default_rrd_history_entries, RRD_DEFAULT_HISTORY_ENTRIES); + default_rrd_history_entries = RRD_DEFAULT_HISTORY_ENTRIES; + } + + // ------------------------------------------------------------------------ + // get default database update frequency + + default_rrd_update_every = (int) config_get_number(CONFIG_SECTION_GLOBAL, "update every", UPDATE_EVERY); + if(default_rrd_update_every < 1 || default_rrd_update_every > 600) { + error("Invalid data collection frequency (update every) %d given. Defaulting to %d.", default_rrd_update_every, UPDATE_EVERY_MAX); + default_rrd_update_every = UPDATE_EVERY; + } + + // ------------------------------------------------------------------------ + // get system paths + + netdata_configured_user_config_dir = config_get(CONFIG_SECTION_GLOBAL, "config directory", netdata_configured_user_config_dir); + netdata_configured_stock_config_dir = config_get(CONFIG_SECTION_GLOBAL, "stock config directory", netdata_configured_stock_config_dir); + netdata_configured_log_dir = config_get(CONFIG_SECTION_GLOBAL, "log directory", netdata_configured_log_dir); + netdata_configured_web_dir = config_get(CONFIG_SECTION_GLOBAL, "web files directory", netdata_configured_web_dir); + netdata_configured_cache_dir = config_get(CONFIG_SECTION_GLOBAL, "cache directory", netdata_configured_cache_dir); + netdata_configured_varlib_dir = config_get(CONFIG_SECTION_GLOBAL, "lib directory", netdata_configured_varlib_dir); + netdata_configured_home_dir = config_get(CONFIG_SECTION_GLOBAL, "home directory", netdata_configured_home_dir); + + { + char plugins_dirs[(FILENAME_MAX * 2) + 1]; + snprintfz(plugins_dirs, FILENAME_MAX * 2, "\"%s\" \"%s/custom-plugins.d\"", PLUGINS_DIR, CONFIG_DIR); + netdata_configured_plugins_dir_base = strdupz(config_get(CONFIG_SECTION_GLOBAL, "plugins directory", plugins_dirs)); + quoted_strings_splitter(netdata_configured_plugins_dir_base, plugin_directories, PLUGINSD_MAX_DIRECTORIES, config_isspace); + netdata_configured_plugins_dir = plugin_directories[0]; + } + + // ------------------------------------------------------------------------ + // get default memory mode for the database + + default_rrd_memory_mode = rrd_memory_mode_id(config_get(CONFIG_SECTION_GLOBAL, "memory mode", rrd_memory_mode_name(default_rrd_memory_mode))); + + // ------------------------------------------------------------------------ + + netdata_configured_host_prefix = config_get(CONFIG_SECTION_GLOBAL, "host access prefix", ""); + verify_netdata_host_prefix(); + + // -------------------------------------------------------------------- + // get KSM settings + +#ifdef MADV_MERGEABLE + enable_ksm = config_get_boolean(CONFIG_SECTION_GLOBAL, "memory deduplication (ksm)", enable_ksm); +#endif + + // -------------------------------------------------------------------- + // get various system parameters + + get_system_HZ(); + get_system_cpus(); + get_system_pid_max(); +} + +static void get_system_timezone(void) { + // avoid flood calls to stat(/etc/localtime) + // http://stackoverflow.com/questions/4554271/how-to-avoid-excessive-stat-etc-localtime-calls-in-strftime-on-linux + const char *tz = getenv("TZ"); + if(!tz || !*tz) + setenv("TZ", config_get(CONFIG_SECTION_GLOBAL, "TZ environment variable", ":/etc/localtime"), 0); + + char buffer[FILENAME_MAX + 1] = ""; + const char *timezone = NULL; + ssize_t ret; + + // use the TZ variable + if(tz && *tz && *tz != ':') { + timezone = tz; + // info("TIMEZONE: using TZ variable '%s'", timezone); + } + + // use the contents of /etc/timezone + if(!timezone && !read_file("/etc/timezone", buffer, FILENAME_MAX)) { + timezone = buffer; + // info("TIMEZONE: using the contents of /etc/timezone: '%s'", timezone); + } + + // read the link /etc/localtime + if(!timezone) { + ret = readlink("/etc/localtime", buffer, FILENAME_MAX); + + if(ret > 0) { + buffer[ret] = '\0'; + + char *cmp = "/usr/share/zoneinfo/"; + size_t cmp_len = strlen(cmp); + + char *s = strstr(buffer, cmp); + if (s && s[cmp_len]) { + timezone = &s[cmp_len]; + // info("TIMEZONE: using the link of /etc/localtime: '%s'", timezone); + } + } + else + buffer[0] = '\0'; + } + + // find the timezone from strftime() + if(!timezone) { + time_t t; + struct tm *tmp, tmbuf; + + t = now_realtime_sec(); + tmp = localtime_r(&t, &tmbuf); + + if (tmp != NULL) { + if(strftime(buffer, FILENAME_MAX, "%Z", tmp) == 0) + buffer[0] = '\0'; + else { + buffer[FILENAME_MAX] = '\0'; + timezone = buffer; + // info("TIMEZONE: using strftime(): '%s'", timezone); + } + } + } + + if(timezone && *timezone) { + // make sure it does not have illegal characters + // info("TIMEZONE: fixing '%s'", timezone); + + size_t len = strlen(timezone); + char tmp[len + 1]; + char *d = tmp; + *d = '\0'; + + while(*timezone) { + if(isalnum(*timezone) || *timezone == '_' || *timezone == '/') + *d++ = *timezone++; + else + timezone++; + } + *d = '\0'; + strncpyz(buffer, tmp, len); + timezone = buffer; + // info("TIMEZONE: fixed as '%s'", timezone); + } + + if(!timezone || !*timezone) + timezone = "unknown"; + + netdata_configured_timezone = config_get(CONFIG_SECTION_GLOBAL, "timezone", timezone); +} + +void set_global_environment() { + { + char b[16]; + snprintfz(b, 15, "%d", default_rrd_update_every); + setenv("NETDATA_UPDATE_EVERY", b, 1); + } + + setenv("NETDATA_HOSTNAME" , netdata_configured_hostname, 1); + setenv("NETDATA_CONFIG_DIR" , verify_required_directory(netdata_configured_user_config_dir), 1); + setenv("NETDATA_USER_CONFIG_DIR" , verify_required_directory(netdata_configured_user_config_dir), 1); + setenv("NETDATA_STOCK_CONFIG_DIR" , verify_required_directory(netdata_configured_stock_config_dir), 1); + setenv("NETDATA_PLUGINS_DIR" , verify_required_directory(netdata_configured_plugins_dir), 1); + setenv("NETDATA_WEB_DIR" , verify_required_directory(netdata_configured_web_dir), 1); + setenv("NETDATA_CACHE_DIR" , verify_required_directory(netdata_configured_cache_dir), 1); + setenv("NETDATA_LIB_DIR" , verify_required_directory(netdata_configured_varlib_dir), 1); + setenv("NETDATA_LOG_DIR" , verify_required_directory(netdata_configured_log_dir), 1); + setenv("HOME" , verify_required_directory(netdata_configured_home_dir), 1); + setenv("NETDATA_HOST_PREFIX" , netdata_configured_host_prefix, 1); + + get_system_timezone(); + + // set the path we need + char path[1024 + 1], *p = getenv("PATH"); + if(!p) p = "/bin:/usr/bin"; + snprintfz(path, 1024, "%s:%s", p, "/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin"); + setenv("PATH", config_get(CONFIG_SECTION_PLUGINS, "PATH environment variable", path), 1); + + // python options + p = getenv("PYTHONPATH"); + if(!p) p = ""; + setenv("PYTHONPATH", config_get(CONFIG_SECTION_PLUGINS, "PYTHONPATH environment variable", p), 1); + + // disable buffering for python plugins + setenv("PYTHONUNBUFFERED", "1", 1); + + // switch to standard locale for plugins + setenv("LC_ALL", "C", 1); +} + +static int load_netdata_conf(char *filename, char overwrite_used) { + errno = 0; + + int ret = 0; + + if(filename && *filename) { + ret = config_load(filename, overwrite_used); + if(!ret) + error("CONFIG: cannot load config file '%s'.", filename); + } + else { + filename = strdupz_path_subpath(netdata_configured_user_config_dir, "netdata.conf"); + + ret = config_load(filename, overwrite_used); + if(!ret) { + info("CONFIG: cannot load user config '%s'. Will try the stock version.", filename); + freez(filename); + + filename = strdupz_path_subpath(netdata_configured_stock_config_dir, "netdata.conf"); + ret = config_load(filename, overwrite_used); + if(!ret) + info("CONFIG: cannot load stock config '%s'. Running with internal defaults.", filename); + } + + freez(filename); + } + + return ret; +} + +int main(int argc, char **argv) { + int i; + int config_loaded = 0; + int dont_fork = 0; + size_t default_stacksize; + + // set the name for logging + program_name = "netdata"; + + // parse depercated options + // TODO: Remove this block with the next major release. + { + i = 1; + while(i < argc) { + if(strcmp(argv[i], "-pidfile") == 0 && (i+1) < argc) { + strncpyz(pidfile, argv[i+1], FILENAME_MAX); + fprintf(stderr, "%s: deprecated option -- %s -- please use -P instead.\n", argv[0], argv[i]); + remove_option(i, &argc, argv); + } + else if(strcmp(argv[i], "-nodaemon") == 0 || strcmp(argv[i], "-nd") == 0) { + dont_fork = 1; + fprintf(stderr, "%s: deprecated option -- %s -- please use -D instead.\n ", argv[0], argv[i]); + remove_option(i, &argc, argv); + } + else if(strcmp(argv[i], "-ch") == 0 && (i+1) < argc) { + config_set(CONFIG_SECTION_GLOBAL, "host access prefix", argv[i+1]); + fprintf(stderr, "%s: deprecated option -- %s -- please use -s instead.\n", argv[0], argv[i]); + remove_option(i, &argc, argv); + } + else if(strcmp(argv[i], "-l") == 0 && (i+1) < argc) { + config_set(CONFIG_SECTION_GLOBAL, "history", argv[i+1]); + fprintf(stderr, "%s: deprecated option -- %s -- This option will be removed with V2.*.\n", argv[0], argv[i]); + remove_option(i, &argc, argv); + } + else i++; + } + } + + // parse options + { + int num_opts = sizeof(option_definitions) / sizeof(struct option_def); + char optstring[(num_opts * 2) + 1]; + + int string_i = 0; + for( i = 0; i < num_opts; i++ ) { + optstring[string_i] = option_definitions[i].val; + string_i++; + if(option_definitions[i].arg_name) { + optstring[string_i] = ':'; + string_i++; + } + } + // terminate optstring + optstring[string_i] ='\0'; + optstring[(num_opts *2)] ='\0'; + + int opt; + while( (opt = getopt(argc, argv, optstring)) != -1 ) { + switch(opt) { + case 'c': + if(load_netdata_conf(optarg, 1) != 1) { + error("Cannot load configuration file %s.", optarg); + return 1; + } + else { + debug(D_OPTIONS, "Configuration loaded from %s.", optarg); + config_loaded = 1; + } + break; + case 'D': + dont_fork = 1; + break; + case 'd': + dont_fork = 0; + break; + case 'h': + return help(0); + case 'i': + config_set(CONFIG_SECTION_WEB, "bind to", optarg); + break; + case 'P': + strncpy(pidfile, optarg, FILENAME_MAX); + pidfile[FILENAME_MAX] = '\0'; + break; + case 'p': + config_set(CONFIG_SECTION_GLOBAL, "default port", optarg); + break; + case 's': + config_set(CONFIG_SECTION_GLOBAL, "host access prefix", optarg); + break; + case 't': + config_set(CONFIG_SECTION_GLOBAL, "update every", optarg); + break; + case 'u': + config_set(CONFIG_SECTION_GLOBAL, "run as user", optarg); + break; + case 'v': + case 'V': + printf("%s %s\n", program_name, program_version); + return 0; + case 'W': + { + char* stacksize_string = "stacksize="; + char* debug_flags_string = "debug_flags="; + + if(strcmp(optarg, "unittest") == 0) { + if(unit_test_buffer()) return 1; + if(unit_test_str2ld()) return 1; + get_netdata_configured_variables(); + default_rrd_update_every = 1; + default_rrd_memory_mode = RRD_MEMORY_MODE_RAM; + default_health_enabled = 0; + rrd_init("unittest"); + default_rrdpush_enabled = 0; + if(run_all_mockup_tests()) return 1; + if(unit_test_storage()) return 1; + fprintf(stderr, "\n\nALL TESTS PASSED\n\n"); + return 0; + } + else if(strcmp(optarg, "simple-pattern") == 0) { + if(optind + 2 > argc) { + fprintf(stderr, "%s", "\nUSAGE: -W simple-pattern 'pattern' 'string'\n\n" + " Checks if 'pattern' matches the given 'string'.\n" + " - 'pattern' can be one or more space separated words.\n" + " - each 'word' can contain one or more asterisks.\n" + " - words starting with '!' give negative matches.\n" + " - words are processed left to right\n" + "\n" + "Examples:\n" + "\n" + " > match all veth interfaces, except veth0:\n" + "\n" + " -W simple-pattern '!veth0 veth*' 'veth12'\n" + "\n" + "\n" + " > match all *.ext files directly in /path/:\n" + " (this will not match *.ext files in a subdir of /path/)\n" + "\n" + " -W simple-pattern '!/path/*/*.ext /path/*.ext' '/path/test.ext'\n" + "\n" + ); + return 1; + } + + const char *heystack = argv[optind]; + const char *needle = argv[optind + 1]; + size_t len = strlen(needle) + 1; + char wildcarded[len]; + + SIMPLE_PATTERN *p = simple_pattern_create(heystack, NULL, SIMPLE_PATTERN_EXACT); + int ret = simple_pattern_matches_extract(p, needle, wildcarded, len); + simple_pattern_free(p); + + if(ret) { + fprintf(stdout, "RESULT: MATCHED - pattern '%s' matches '%s', wildcarded '%s'\n", heystack, needle, wildcarded); + return 0; + } + else { + fprintf(stdout, "RESULT: NOT MATCHED - pattern '%s' does not match '%s', wildcarded '%s'\n", heystack, needle, wildcarded); + return 1; + } + } + else if(strncmp(optarg, stacksize_string, strlen(stacksize_string)) == 0) { + optarg += strlen(stacksize_string); + config_set(CONFIG_SECTION_GLOBAL, "pthread stack size", optarg); + } + else if(strncmp(optarg, debug_flags_string, strlen(debug_flags_string)) == 0) { + optarg += strlen(debug_flags_string); + config_set(CONFIG_SECTION_GLOBAL, "debug flags", optarg); + debug_flags = strtoull(optarg, NULL, 0); + } + else if(strcmp(optarg, "set") == 0) { + if(optind + 3 > argc) { + fprintf(stderr, "%s", "\nUSAGE: -W set 'section' 'key' 'value'\n\n" + " Overwrites settings of netdata.conf.\n" + "\n" + " These options interact with: -c netdata.conf\n" + " If -c netdata.conf is given on the command line,\n" + " before -W set... the user may overwrite command\n" + " line parameters at netdata.conf\n" + " If -c netdata.conf is given after (or missing)\n" + " -W set... the user cannot overwrite the command line\n" + " parameters." + "\n" + ); + return 1; + } + const char *section = argv[optind]; + const char *key = argv[optind + 1]; + const char *value = argv[optind + 2]; + optind += 3; + + // set this one as the default + // only if it is not already set in the config file + // so the caller can use -c netdata.conf before or + // after this parameter to prevent or allow overwriting + // variables at netdata.conf + config_set_default(section, key, value); + + // fprintf(stderr, "SET section '%s', key '%s', value '%s'\n", section, key, value); + } + else if(strcmp(optarg, "get") == 0) { + if(optind + 3 > argc) { + fprintf(stderr, "%s", "\nUSAGE: -W get 'section' 'key' 'value'\n\n" + " Prints settings of netdata.conf.\n" + "\n" + " These options interact with: -c netdata.conf\n" + " -c netdata.conf has to be given before -W get.\n" + "\n" + ); + return 1; + } + + if(!config_loaded) { + fprintf(stderr, "warning: no configuration file has been loaded. Use -c CONFIG_FILE, before -W get. Using default config.\n"); + load_netdata_conf(NULL, 0); + } + + backwards_compatible_config(); + get_netdata_configured_variables(); + + const char *section = argv[optind]; + const char *key = argv[optind + 1]; + const char *def = argv[optind + 2]; + const char *value = config_get(section, key, def); + printf("%s\n", value); + return 0; + } + else { + fprintf(stderr, "Unknown -W parameter '%s'\n", optarg); + return help(1); + } + } + break; + + default: /* ? */ + fprintf(stderr, "Unknown parameter '%c'\n", opt); + return help(1); + } + } + } + +#ifdef _SC_OPEN_MAX + // close all open file descriptors, except the standard ones + // the caller may have left open files (lxc-attach has this issue) + { + int fd; + for(fd = (int) (sysconf(_SC_OPEN_MAX) - 1); fd > 2; fd--) + if(fd_is_valid(fd)) close(fd); + } +#endif + + if(!config_loaded) + load_netdata_conf(NULL, 0); + + // ------------------------------------------------------------------------ + // initialize netdata + { + char *pmax = config_get(CONFIG_SECTION_GLOBAL, "glibc malloc arena max for plugins", "1"); + if(pmax && *pmax) + setenv("MALLOC_ARENA_MAX", pmax, 1); + +#if defined(HAVE_C_MALLOPT) + i = (int)config_get_number(CONFIG_SECTION_GLOBAL, "glibc malloc arena max for netdata", 1); + if(i > 0) + mallopt(M_ARENA_MAX, 1); +#endif + + // prepare configuration environment variables for the plugins + + get_netdata_configured_variables(); + set_global_environment(); + + // work while we are cd into config_dir + // to allow the plugins refer to their config + // files using relative filenames + if(chdir(netdata_configured_user_config_dir) == -1) + fatal("Cannot cd to '%s'", netdata_configured_user_config_dir); + } + + char *user = NULL; + + { + // -------------------------------------------------------------------- + // get the debugging flags from the configuration file + + char *flags = config_get(CONFIG_SECTION_GLOBAL, "debug flags", "0x0000000000000000"); + setenv("NETDATA_DEBUG_FLAGS", flags, 1); + + debug_flags = strtoull(flags, NULL, 0); + debug(D_OPTIONS, "Debug flags set to '0x%" PRIX64 "'.", debug_flags); + + if(debug_flags != 0) { + struct rlimit rl = { RLIM_INFINITY, RLIM_INFINITY }; + if(setrlimit(RLIMIT_CORE, &rl) != 0) + error("Cannot request unlimited core dumps for debugging... Proceeding anyway..."); + +#ifdef HAVE_SYS_PRCTL_H + prctl(PR_SET_DUMPABLE, 1, 0, 0, 0); +#endif + } + + + // -------------------------------------------------------------------- + // get log filenames and settings + + log_init(); + error_log_limit_unlimited(); + + + // -------------------------------------------------------------------- + // setup process signals + + // block signals while initializing threads. + // this causes the threads to block signals. + signals_block(); + + // setup the signals we want to use + signals_init(); + + // setup threads configs + default_stacksize = netdata_threads_init(); + + + // -------------------------------------------------------------------- + // check which threads are enabled and initialize them + + for (i = 0; static_threads[i].name != NULL ; i++) { + struct netdata_static_thread *st = &static_threads[i]; + + if(st->config_name) + st->enabled = config_get_boolean(st->config_section, st->config_name, st->enabled); + + if(st->enabled && st->init_routine) + st->init_routine(); + } + + + // -------------------------------------------------------------------- + // get the user we should run + + // IMPORTANT: this is required before web_files_uid() + if(getuid() == 0) { + user = config_get(CONFIG_SECTION_GLOBAL, "run as user", NETDATA_USER); + } + else { + struct passwd *passwd = getpwuid(getuid()); + user = config_get(CONFIG_SECTION_GLOBAL, "run as user", (passwd && passwd->pw_name)?passwd->pw_name:""); + } + + // -------------------------------------------------------------------- + // create the listening sockets + + web_client_api_v1_init(); + web_server_threading_selection(); + + if(web_server_mode != WEB_SERVER_MODE_NONE) + api_listen_sockets_setup(); + } + + // initialize the log files + open_all_log_files(); + +#ifdef NETDATA_INTERNAL_CHECKS + if(debug_flags != 0) { + struct rlimit rl = { RLIM_INFINITY, RLIM_INFINITY }; + if(setrlimit(RLIMIT_CORE, &rl) != 0) + error("Cannot request unlimited core dumps for debugging... Proceeding anyway..."); +#ifdef HAVE_SYS_PRCTL_H + prctl(PR_SET_DUMPABLE, 1, 0, 0, 0); +#endif + } +#endif /* NETDATA_INTERNAL_CHECKS */ + + // get the max file limit + if(getrlimit(RLIMIT_NOFILE, &rlimit_nofile) != 0) + error("getrlimit(RLIMIT_NOFILE) failed"); + else + info("resources control: allowed file descriptors: soft = %zu, max = %zu", (size_t)rlimit_nofile.rlim_cur, (size_t)rlimit_nofile.rlim_max); + + // fork, switch user, create pid file, set process priority + if(become_daemon(dont_fork, user) == -1) + fatal("Cannot daemonize myself."); + + info("netdata started on pid %d.", getpid()); + + // IMPORTANT: these have to run once, while single threaded + // but after we have switched user + web_files_uid(); + web_files_gid(); + + netdata_threads_init_after_fork((size_t)config_get_number(CONFIG_SECTION_GLOBAL, "pthread stack size", (long)default_stacksize)); + + // ------------------------------------------------------------------------ + // initialize rrd, registry, health, rrdpush, etc. + + rrd_init(netdata_configured_hostname); + + + // ------------------------------------------------------------------------ + // enable log flood protection + + error_log_limit_reset(); + + + // ------------------------------------------------------------------------ + // spawn the threads + + web_server_config_options(); + + for (i = 0; static_threads[i].name != NULL ; i++) { + struct netdata_static_thread *st = &static_threads[i]; + + if(st->enabled) { + st->thread = mallocz(sizeof(netdata_thread_t)); + debug(D_SYSTEM, "Starting thread %s.", st->name); + netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_DEFAULT, st->start_routine, st); + } + else debug(D_SYSTEM, "Not starting thread %s.", st->name); + } + + info("netdata initialization completed. Enjoy real-time performance monitoring!"); + + + // ------------------------------------------------------------------------ + // unblock signals + + signals_unblock(); + + // ------------------------------------------------------------------------ + // Handle signals + + signals_handle(); + + // should never reach this point + // but we need it for rpmlint #2752 + return 1; +} diff --git a/daemon/main.h b/daemon/main.h new file mode 100644 index 000000000..cb0bde6a9 --- /dev/null +++ b/daemon/main.h @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_MAIN_H +#define NETDATA_MAIN_H 1 + +#include "common.h" + +extern struct config netdata_config; + +#define NETDATA_MAIN_THREAD_RUNNING CONFIG_BOOLEAN_YES +#define NETDATA_MAIN_THREAD_EXITING (CONFIG_BOOLEAN_YES + 1) +#define NETDATA_MAIN_THREAD_EXITED CONFIG_BOOLEAN_NO + +/** + * This struct contains information about command line options. + */ +struct option_def { + /** The option character */ + const char val; + /** The name of the long option. */ + const char *description; + /** Short descripton what the option does */ + /** Name of the argument displayed in SYNOPSIS */ + const char *arg_name; + /** Default value if not set */ + const char *default_value; +}; + +struct netdata_static_thread { + char *name; // the name of the thread as it should appear in the logs + + char *config_section; // the section of netdata.conf to check if this is enabled or not + char *config_name; // the name of the config option to check if it is true or false + + volatile sig_atomic_t enabled; // the current status of the thread + + netdata_thread_t *thread; // internal use, to maintain a pointer to the created thread + + void (*init_routine) (void); // an initialization function to run before spawning the thread + void *(*start_routine) (void *); // the threaded worker +}; + +extern void cancel_main_threads(void); +extern int killpid(pid_t pid, int signal); +extern void netdata_cleanup_and_exit(int ret) NORETURN; + +#endif /* NETDATA_MAIN_H */ diff --git a/daemon/signals.c b/daemon/signals.c new file mode 100644 index 000000000..71f271887 --- /dev/null +++ b/daemon/signals.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "common.h" + +typedef enum signal_action { + NETDATA_SIGNAL_END_OF_LIST, + NETDATA_SIGNAL_IGNORE, + NETDATA_SIGNAL_EXIT_CLEANLY, + NETDATA_SIGNAL_SAVE_DATABASE, + NETDATA_SIGNAL_LOG_ROTATE, + NETDATA_SIGNAL_RELOAD_HEALTH, + NETDATA_SIGNAL_FATAL, +} SIGNAL_ACTION; + +static struct { + int signo; // the signal + const char *name; // the name of the signal + size_t count; // the number of signals received + SIGNAL_ACTION action; // the action to take +} signals_waiting[] = { + { SIGPIPE, "SIGPIPE", 0, NETDATA_SIGNAL_IGNORE }, + { SIGINT , "SIGINT", 0, NETDATA_SIGNAL_EXIT_CLEANLY }, + { SIGQUIT, "SIGQUIT", 0, NETDATA_SIGNAL_EXIT_CLEANLY }, + { SIGTERM, "SIGTERM", 0, NETDATA_SIGNAL_EXIT_CLEANLY }, + { SIGHUP, "SIGHUP", 0, NETDATA_SIGNAL_LOG_ROTATE }, + { SIGUSR1, "SIGUSR1", 0, NETDATA_SIGNAL_SAVE_DATABASE }, + { SIGUSR2, "SIGUSR2", 0, NETDATA_SIGNAL_RELOAD_HEALTH }, + { SIGBUS, "SIGBUS", 0, NETDATA_SIGNAL_FATAL }, + + // terminator + { 0, "NONE", 0, NETDATA_SIGNAL_END_OF_LIST } +}; + +static void signal_handler(int signo) { + // find the entry in the list + int i; + for(i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST ; i++) { + if(unlikely(signals_waiting[i].signo == signo)) { + signals_waiting[i].count++; + + if(signals_waiting[i].action == NETDATA_SIGNAL_FATAL) { + char buffer[200 + 1]; + snprintfz(buffer, 200, "\nSIGNAL HANLDER: received: %s. Oops! This is bad!\n", signals_waiting[i].name); + if(write(STDERR_FILENO, buffer, strlen(buffer)) == -1) { + // nothing to do - we cannot write but there is no way to complaint about it + ; + } + } + + return; + } + } +} + +void signals_block(void) { + sigset_t sigset; + sigfillset(&sigset); + + if(pthread_sigmask(SIG_BLOCK, &sigset, NULL) == -1) + error("SIGNAL: Could not block signals for threads"); +} + +void signals_unblock(void) { + sigset_t sigset; + sigfillset(&sigset); + + if(pthread_sigmask(SIG_UNBLOCK, &sigset, NULL) == -1) { + error("SIGNAL: Could not unblock signals for threads"); + } +} + +void signals_init(void) { + // Catch signals which we want to use + struct sigaction sa; + sa.sa_flags = 0; + + // ignore all signals while we run in a signal handler + sigfillset(&sa.sa_mask); + + int i; + for (i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST; i++) { + if(signals_waiting[i].action == NETDATA_SIGNAL_IGNORE) + sa.sa_handler = SIG_IGN; + else + sa.sa_handler = signal_handler; + + if(sigaction(signals_waiting[i].signo, &sa, NULL) == -1) + error("SIGNAL: Failed to change signal handler for: %s", signals_waiting[i].name); + } +} + +void signals_reset(void) { + struct sigaction sa; + sigemptyset(&sa.sa_mask); + sa.sa_handler = SIG_DFL; + sa.sa_flags = 0; + + int i; + for (i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST; i++) { + if(sigaction(signals_waiting[i].signo, &sa, NULL) == -1) + error("SIGNAL: Failed to reset signal handler for: %s", signals_waiting[i].name); + } +} + +void signals_handle(void) { + while(1) { + + // pause() causes the calling process (or thread) to sleep until a signal + // is delivered that either terminates the process or causes the invocation + // of a signal-catching function. + if(pause() == -1 && errno == EINTR) { + + // loop once, but keep looping while signals are coming in + // this is needed because a few operations may take some time + // so we need to check for new signals before pausing again + int found = 1; + while(found) { + found = 0; + + // execute the actions of the signals + int i; + for (i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST; i++) { + if (signals_waiting[i].count) { + found = 1; + signals_waiting[i].count = 0; + const char *name = signals_waiting[i].name; + + switch (signals_waiting[i].action) { + case NETDATA_SIGNAL_RELOAD_HEALTH: + error_log_limit_unlimited(); + info("SIGNAL: Received %s. Reloading HEALTH configuration...", name); + health_reload(); + error_log_limit_reset(); + break; + + case NETDATA_SIGNAL_SAVE_DATABASE: + error_log_limit_unlimited(); + info("SIGNAL: Received %s. Saving databases...", name); + rrdhost_save_all(); + info("Databases saved."); + error_log_limit_reset(); + break; + + case NETDATA_SIGNAL_LOG_ROTATE: + error_log_limit_unlimited(); + info("SIGNAL: Received %s. Reopening all log files...", name); + reopen_all_log_files(); + error_log_limit_reset(); + break; + + case NETDATA_SIGNAL_EXIT_CLEANLY: + error_log_limit_unlimited(); + info("SIGNAL: Received %s. Cleaning up to exit...", name); + netdata_cleanup_and_exit(0); + exit(0); + + case NETDATA_SIGNAL_FATAL: + fatal("SIGNAL: Received %s. netdata now exits.", name); + + default: + info("SIGNAL: Received %s. No signal handler configured. Ignoring it.", name); + break; + } + } + } + } + } + else + error("SIGNAL: pause() returned but it was not interrupted by a signal."); + } +} diff --git a/daemon/signals.h b/daemon/signals.h new file mode 100644 index 000000000..e7e64365d --- /dev/null +++ b/daemon/signals.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SIGNALS_H +#define NETDATA_SIGNALS_H 1 + +extern void signals_init(void); +extern void signals_block(void); +extern void signals_unblock(void); +extern void signals_reset(void); +extern void signals_handle(void) NORETURN; + +#endif //NETDATA_SIGNALS_H diff --git a/daemon/unit_test.c b/daemon/unit_test.c new file mode 100644 index 000000000..9978647b4 --- /dev/null +++ b/daemon/unit_test.c @@ -0,0 +1,1412 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "common.h" + +static int check_number_printing(void) { + struct { + calculated_number n; + const char *correct; + } values[] = { + { .n = 0, .correct = "0" }, + { .n = 0.0000001, .correct = "0.0000001" }, + { .n = 0.00000009, .correct = "0.0000001" }, + { .n = 0.000000001, .correct = "0" }, + { .n = 99.99999999999999999, .correct = "100" }, + { .n = -99.99999999999999999, .correct = "-100" }, + { .n = 123.4567890123456789, .correct = "123.456789" }, + { .n = 9999.9999999, .correct = "9999.9999999" }, + { .n = -9999.9999999, .correct = "-9999.9999999" }, + { .n = 0, .correct = NULL }, + }; + + char netdata[50], system[50]; + int i, failed = 0; + for(i = 0; values[i].correct ; i++) { + print_calculated_number(netdata, values[i].n); + snprintfz(system, 49, "%0.12" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE)values[i].n); + + int ok = 1; + if(strcmp(netdata, values[i].correct) != 0) { + ok = 0; + failed++; + } + + fprintf(stderr, "'%s' (system) printed as '%s' (netdata): %s\n", system, netdata, ok?"OK":"FAILED"); + } + + if(failed) return 1; + return 0; +} + +static int check_rrdcalc_comparisons(void) { + RRDCALC_STATUS a, b; + + // make sure calloc() sets the status to UNINITIALIZED + memset(&a, 0, sizeof(RRDCALC_STATUS)); + if(a != RRDCALC_STATUS_UNINITIALIZED) { + fprintf(stderr, "%s is not zero.\n", rrdcalc_status2string(RRDCALC_STATUS_UNINITIALIZED)); + return 1; + } + + a = RRDCALC_STATUS_REMOVED; + b = RRDCALC_STATUS_UNDEFINED; + if(!(a < b)) { + fprintf(stderr, "%s is not less than %s\n", rrdcalc_status2string(a), rrdcalc_status2string(b)); + return 1; + } + + a = RRDCALC_STATUS_UNDEFINED; + b = RRDCALC_STATUS_UNINITIALIZED; + if(!(a < b)) { + fprintf(stderr, "%s is not less than %s\n", rrdcalc_status2string(a), rrdcalc_status2string(b)); + return 1; + } + + a = RRDCALC_STATUS_UNINITIALIZED; + b = RRDCALC_STATUS_CLEAR; + if(!(a < b)) { + fprintf(stderr, "%s is not less than %s\n", rrdcalc_status2string(a), rrdcalc_status2string(b)); + return 1; + } + + a = RRDCALC_STATUS_CLEAR; + b = RRDCALC_STATUS_RAISED; + if(!(a < b)) { + fprintf(stderr, "%s is not less than %s\n", rrdcalc_status2string(a), rrdcalc_status2string(b)); + return 1; + } + + a = RRDCALC_STATUS_RAISED; + b = RRDCALC_STATUS_WARNING; + if(!(a < b)) { + fprintf(stderr, "%s is not less than %s\n", rrdcalc_status2string(a), rrdcalc_status2string(b)); + return 1; + } + + a = RRDCALC_STATUS_WARNING; + b = RRDCALC_STATUS_CRITICAL; + if(!(a < b)) { + fprintf(stderr, "%s is not less than %s\n", rrdcalc_status2string(a), rrdcalc_status2string(b)); + return 1; + } + + fprintf(stderr, "RRDCALC_STATUSes are sortable.\n"); + + return 0; +} + +int check_storage_number(calculated_number n, int debug) { + char buffer[100]; + uint32_t flags = SN_EXISTS; + + storage_number s = pack_storage_number(n, flags); + calculated_number d = unpack_storage_number(s); + + if(!does_storage_number_exist(s)) { + fprintf(stderr, "Exists flags missing for number " CALCULATED_NUMBER_FORMAT "!\n", n); + return 5; + } + + calculated_number ddiff = d - n; + calculated_number dcdiff = ddiff * 100.0 / n; + + if(dcdiff < 0) dcdiff = -dcdiff; + + size_t len = (size_t)print_calculated_number(buffer, d); + calculated_number p = str2ld(buffer, NULL); + calculated_number pdiff = n - p; + calculated_number pcdiff = pdiff * 100.0 / n; + if(pcdiff < 0) pcdiff = -pcdiff; + + if(debug) { + fprintf(stderr, + CALCULATED_NUMBER_FORMAT " original\n" + CALCULATED_NUMBER_FORMAT " packed and unpacked, (stored as 0x%08X, diff " CALCULATED_NUMBER_FORMAT ", " CALCULATED_NUMBER_FORMAT "%%)\n" + "%s printed after unpacked (%zu bytes)\n" + CALCULATED_NUMBER_FORMAT " re-parsed from printed (diff " CALCULATED_NUMBER_FORMAT ", " CALCULATED_NUMBER_FORMAT "%%)\n\n", + n, + d, s, ddiff, dcdiff, + buffer, len, + p, pdiff, pcdiff + ); + if(len != strlen(buffer)) fprintf(stderr, "ERROR: printed number %s is reported to have length %zu but it has %zu\n", buffer, len, strlen(buffer)); + if(dcdiff > ACCURACY_LOSS) fprintf(stderr, "WARNING: packing number " CALCULATED_NUMBER_FORMAT " has accuracy loss " CALCULATED_NUMBER_FORMAT " %%\n", n, dcdiff); + if(pcdiff > ACCURACY_LOSS) fprintf(stderr, "WARNING: re-parsing the packed, unpacked and printed number " CALCULATED_NUMBER_FORMAT " has accuracy loss " CALCULATED_NUMBER_FORMAT " %%\n", n, pcdiff); + } + + if(len != strlen(buffer)) return 1; + if(dcdiff > ACCURACY_LOSS) return 3; + if(pcdiff > ACCURACY_LOSS) return 4; + return 0; +} + +calculated_number storage_number_min(calculated_number n) { + calculated_number r = 1, last; + + do { + last = n; + n /= 2.0; + storage_number t = pack_storage_number(n, SN_EXISTS); + r = unpack_storage_number(t); + } while(r != 0.0 && r != last); + + return last; +} + +void benchmark_storage_number(int loop, int multiplier) { + int i, j; + calculated_number n, d; + storage_number s; + unsigned long long user, system, total, mine, their; + + char buffer[100]; + + struct rusage now, last; + + fprintf(stderr, "\n\nBenchmarking %d numbers, please wait...\n\n", loop); + + // ------------------------------------------------------------------------ + + fprintf(stderr, "SYSTEM LONG DOUBLE SIZE: %zu bytes\n", sizeof(calculated_number)); + fprintf(stderr, "NETDATA FLOATING POINT SIZE: %zu bytes\n", sizeof(storage_number)); + + mine = (calculated_number)sizeof(storage_number) * (calculated_number)loop; + their = (calculated_number)sizeof(calculated_number) * (calculated_number)loop; + + if(mine > their) { + fprintf(stderr, "\nNETDATA NEEDS %0.2" LONG_DOUBLE_MODIFIER " TIMES MORE MEMORY. Sorry!\n", (LONG_DOUBLE)(mine / their)); + } + else { + fprintf(stderr, "\nNETDATA INTERNAL FLOATING POINT ARITHMETICS NEEDS %0.2" LONG_DOUBLE_MODIFIER " TIMES LESS MEMORY.\n", (LONG_DOUBLE)(their / mine)); + } + + fprintf(stderr, "\nNETDATA FLOATING POINT\n"); + fprintf(stderr, "MIN POSITIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", storage_number_min(1)); + fprintf(stderr, "MAX POSITIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", (calculated_number)STORAGE_NUMBER_POSITIVE_MAX); + fprintf(stderr, "MIN NEGATIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", (calculated_number)STORAGE_NUMBER_NEGATIVE_MIN); + fprintf(stderr, "MAX NEGATIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", -storage_number_min(1)); + fprintf(stderr, "Maximum accuracy loss: " CALCULATED_NUMBER_FORMAT "%%\n\n\n", (calculated_number)ACCURACY_LOSS); + + // ------------------------------------------------------------------------ + + fprintf(stderr, "INTERNAL LONG DOUBLE PRINTING: "); + getrusage(RUSAGE_SELF, &last); + + // do the job + for(j = 1; j < 11 ;j++) { + n = STORAGE_NUMBER_POSITIVE_MIN * j; + + for(i = 0; i < loop ;i++) { + n *= multiplier; + if(n > STORAGE_NUMBER_POSITIVE_MAX) n = STORAGE_NUMBER_POSITIVE_MIN; + + print_calculated_number(buffer, n); + } + } + + getrusage(RUSAGE_SELF, &now); + user = now.ru_utime.tv_sec * 1000000ULL + now.ru_utime.tv_usec - last.ru_utime.tv_sec * 1000000ULL + last.ru_utime.tv_usec; + system = now.ru_stime.tv_sec * 1000000ULL + now.ru_stime.tv_usec - last.ru_stime.tv_sec * 1000000ULL + last.ru_stime.tv_usec; + total = user + system; + mine = total; + + fprintf(stderr, "user %0.5" LONG_DOUBLE_MODIFIER", system %0.5" LONG_DOUBLE_MODIFIER ", total %0.5" LONG_DOUBLE_MODIFIER "\n", (LONG_DOUBLE)(user / 1000000.0), (LONG_DOUBLE)(system / 1000000.0), (LONG_DOUBLE)(total / 1000000.0)); + + // ------------------------------------------------------------------------ + + fprintf(stderr, "SYSTEM LONG DOUBLE PRINTING: "); + getrusage(RUSAGE_SELF, &last); + + // do the job + for(j = 1; j < 11 ;j++) { + n = STORAGE_NUMBER_POSITIVE_MIN * j; + + for(i = 0; i < loop ;i++) { + n *= multiplier; + if(n > STORAGE_NUMBER_POSITIVE_MAX) n = STORAGE_NUMBER_POSITIVE_MIN; + snprintfz(buffer, 100, CALCULATED_NUMBER_FORMAT, n); + } + } + + getrusage(RUSAGE_SELF, &now); + user = now.ru_utime.tv_sec * 1000000ULL + now.ru_utime.tv_usec - last.ru_utime.tv_sec * 1000000ULL + last.ru_utime.tv_usec; + system = now.ru_stime.tv_sec * 1000000ULL + now.ru_stime.tv_usec - last.ru_stime.tv_sec * 1000000ULL + last.ru_stime.tv_usec; + total = user + system; + their = total; + + fprintf(stderr, "user %0.5" LONG_DOUBLE_MODIFIER ", system %0.5" LONG_DOUBLE_MODIFIER ", total %0.5" LONG_DOUBLE_MODIFIER "\n", (LONG_DOUBLE)(user / 1000000.0), (LONG_DOUBLE)(system / 1000000.0), (LONG_DOUBLE)(total / 1000000.0)); + + if(mine > total) { + fprintf(stderr, "NETDATA CODE IS SLOWER %0.2" LONG_DOUBLE_MODIFIER " %%\n", (LONG_DOUBLE)(mine * 100.0 / their - 100.0)); + } + else { + fprintf(stderr, "NETDATA CODE IS F A S T E R %0.2" LONG_DOUBLE_MODIFIER " %%\n", (LONG_DOUBLE)(their * 100.0 / mine - 100.0)); + } + + // ------------------------------------------------------------------------ + + fprintf(stderr, "\nINTERNAL LONG DOUBLE PRINTING WITH PACK / UNPACK: "); + getrusage(RUSAGE_SELF, &last); + + // do the job + for(j = 1; j < 11 ;j++) { + n = STORAGE_NUMBER_POSITIVE_MIN * j; + + for(i = 0; i < loop ;i++) { + n *= multiplier; + if(n > STORAGE_NUMBER_POSITIVE_MAX) n = STORAGE_NUMBER_POSITIVE_MIN; + + s = pack_storage_number(n, 1); + d = unpack_storage_number(s); + print_calculated_number(buffer, d); + } + } + + getrusage(RUSAGE_SELF, &now); + user = now.ru_utime.tv_sec * 1000000ULL + now.ru_utime.tv_usec - last.ru_utime.tv_sec * 1000000ULL + last.ru_utime.tv_usec; + system = now.ru_stime.tv_sec * 1000000ULL + now.ru_stime.tv_usec - last.ru_stime.tv_sec * 1000000ULL + last.ru_stime.tv_usec; + total = user + system; + mine = total; + + fprintf(stderr, "user %0.5" LONG_DOUBLE_MODIFIER ", system %0.5" LONG_DOUBLE_MODIFIER ", total %0.5" LONG_DOUBLE_MODIFIER "\n", (LONG_DOUBLE)(user / 1000000.0), (LONG_DOUBLE)(system / 1000000.0), (LONG_DOUBLE)(total / 1000000.0)); + + if(mine > their) { + fprintf(stderr, "WITH PACKING UNPACKING NETDATA CODE IS SLOWER %0.2" LONG_DOUBLE_MODIFIER " %%\n", (LONG_DOUBLE)(mine * 100.0 / their - 100.0)); + } + else { + fprintf(stderr, "EVEN WITH PACKING AND UNPACKING, NETDATA CODE IS F A S T E R %0.2" LONG_DOUBLE_MODIFIER " %%\n", (LONG_DOUBLE)(their * 100.0 / mine - 100.0)); + } + + // ------------------------------------------------------------------------ + +} + +static int check_storage_number_exists() { + uint32_t flags = SN_EXISTS; + + + for(flags = 0; flags < 7 ; flags++) { + if(get_storage_number_flags(flags << 24) != flags << 24) { + fprintf(stderr, "Flag 0x%08x is not checked correctly. It became 0x%08x\n", flags << 24, get_storage_number_flags(flags << 24)); + return 1; + } + } + + flags = SN_EXISTS; + calculated_number n = 0.0; + + storage_number s = pack_storage_number(n, flags); + calculated_number d = unpack_storage_number(s); + if(get_storage_number_flags(s) != flags) { + fprintf(stderr, "Wrong flags. Given %08x, Got %08x!\n", flags, get_storage_number_flags(s)); + return 1; + } + if(n != d) { + fprintf(stderr, "Wrong number returned. Expected " CALCULATED_NUMBER_FORMAT ", returned " CALCULATED_NUMBER_FORMAT "!\n", n, d); + return 1; + } + + return 0; +} + +int unit_test_storage() +{ + if(check_storage_number_exists()) return 0; + + calculated_number c, a = 0; + int i, j, g, r = 0; + + for(g = -1; g <= 1 ; g++) { + a = 0; + + if(!g) continue; + + for(j = 0; j < 9 ;j++) { + a += 0.0000001; + c = a * g; + for(i = 0; i < 21 ;i++, c *= 10) { + if(c > 0 && c < STORAGE_NUMBER_POSITIVE_MIN) continue; + if(c < 0 && c > STORAGE_NUMBER_NEGATIVE_MAX) continue; + + if(check_storage_number(c, 1)) return 1; + } + } + } + + benchmark_storage_number(1000000, 2); + return r; +} + +int unit_test_str2ld() { + char *values[] = { + "1.2345678", "-35.6", "0.00123", "23842384234234.2", ".1", "1.2e-10", + "hello", "1wrong", "nan", "inf", NULL + }; + + int i; + for(i = 0; values[i] ; i++) { + char *e_mine = "hello", *e_sys = "world"; + LONG_DOUBLE mine = str2ld(values[i], &e_mine); + LONG_DOUBLE sys = strtold(values[i], &e_sys); + + if(isnan(mine)) { + if(!isnan(sys)) { + fprintf(stderr, "Value '%s' is parsed as %" LONG_DOUBLE_MODIFIER ", but system believes it is %" LONG_DOUBLE_MODIFIER ".\n", values[i], mine, sys); + return -1; + } + } + else if(isinf(mine)) { + if(!isinf(sys)) { + fprintf(stderr, "Value '%s' is parsed as %" LONG_DOUBLE_MODIFIER ", but system believes it is %" LONG_DOUBLE_MODIFIER ".\n", values[i], mine, sys); + return -1; + } + } + else if(mine != sys && abs(mine-sys) > 0.000001) { + fprintf(stderr, "Value '%s' is parsed as %" LONG_DOUBLE_MODIFIER ", but system believes it is %" LONG_DOUBLE_MODIFIER ", delta %" LONG_DOUBLE_MODIFIER ".\n", values[i], mine, sys, sys-mine); + return -1; + } + + if(e_mine != e_sys) { + fprintf(stderr, "Value '%s' is parsed correctly, but endptr is not right\n", values[i]); + return -1; + } + + fprintf(stderr, "str2ld() parsed value '%s' exactly the same way with strtold(), returned %" LONG_DOUBLE_MODIFIER " vs %" LONG_DOUBLE_MODIFIER "\n", values[i], mine, sys); + } + + return 0; +} + +int unit_test_buffer() { + BUFFER *wb = buffer_create(1); + char string[2048 + 1]; + char final[9000 + 1]; + int i; + + for(i = 0; i < 2048; i++) + string[i] = (char)((i % 24) + 'a'); + string[2048] = '\0'; + + const char *fmt = "string1: %s\nstring2: %s\nstring3: %s\nstring4: %s"; + buffer_sprintf(wb, fmt, string, string, string, string); + snprintfz(final, 9000, fmt, string, string, string, string); + + const char *s = buffer_tostring(wb); + + if(buffer_strlen(wb) != strlen(final) || strcmp(s, final) != 0) { + fprintf(stderr, "\nbuffer_sprintf() is faulty.\n"); + fprintf(stderr, "\nstring : %s (length %zu)\n", string, strlen(string)); + fprintf(stderr, "\nbuffer : %s (length %zu)\n", s, buffer_strlen(wb)); + fprintf(stderr, "\nexpected: %s (length %zu)\n", final, strlen(final)); + buffer_free(wb); + return -1; + } + + fprintf(stderr, "buffer_sprintf() works as expected.\n"); + buffer_free(wb); + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- + +struct feed_values { + unsigned long long microseconds; + collected_number value; +}; + +struct test { + char name[100]; + char description[1024]; + + int update_every; + unsigned long long multiplier; + unsigned long long divisor; + RRD_ALGORITHM algorithm; + + unsigned long feed_entries; + unsigned long result_entries; + struct feed_values *feed; + calculated_number *results; + + collected_number *feed2; + calculated_number *results2; +}; + +// -------------------------------------------------------------------------------------------------------------------- +// test1 +// test absolute values stored + +struct feed_values test1_feed[] = { + { 0, 10 }, + { 1000000, 20 }, + { 1000000, 30 }, + { 1000000, 40 }, + { 1000000, 50 }, + { 1000000, 60 }, + { 1000000, 70 }, + { 1000000, 80 }, + { 1000000, 90 }, + { 1000000, 100 }, +}; + +calculated_number test1_results[] = { + 20, 30, 40, 50, 60, 70, 80, 90, 100 +}; + +struct test test1 = { + "test1", // name + "test absolute values stored at exactly second boundaries", + 1, // update_every + 1, // multiplier + 1, // divisor + RRD_ALGORITHM_ABSOLUTE, // algorithm + 10, // feed entries + 9, // result entries + test1_feed, // feed + test1_results, // results + NULL, // feed2 + NULL // results2 +}; + +// -------------------------------------------------------------------------------------------------------------------- +// test2 +// test absolute values stored in the middle of second boundaries + +struct feed_values test2_feed[] = { + { 500000, 10 }, + { 1000000, 20 }, + { 1000000, 30 }, + { 1000000, 40 }, + { 1000000, 50 }, + { 1000000, 60 }, + { 1000000, 70 }, + { 1000000, 80 }, + { 1000000, 90 }, + { 1000000, 100 }, +}; + +calculated_number test2_results[] = { + 20, 30, 40, 50, 60, 70, 80, 90, 100 +}; + +struct test test2 = { + "test2", // name + "test absolute values stored in the middle of second boundaries", + 1, // update_every + 1, // multiplier + 1, // divisor + RRD_ALGORITHM_ABSOLUTE, // algorithm + 10, // feed entries + 9, // result entries + test2_feed, // feed + test2_results, // results + NULL, // feed2 + NULL // results2 +}; + +// -------------------------------------------------------------------------------------------------------------------- +// test3 + +struct feed_values test3_feed[] = { + { 0, 10 }, + { 1000000, 20 }, + { 1000000, 30 }, + { 1000000, 40 }, + { 1000000, 50 }, + { 1000000, 60 }, + { 1000000, 70 }, + { 1000000, 80 }, + { 1000000, 90 }, + { 1000000, 100 }, +}; + +calculated_number test3_results[] = { + 10, 10, 10, 10, 10, 10, 10, 10, 10 +}; + +struct test test3 = { + "test3", // name + "test incremental values stored at exactly second boundaries", + 1, // update_every + 1, // multiplier + 1, // divisor + RRD_ALGORITHM_INCREMENTAL, // algorithm + 10, // feed entries + 9, // result entries + test3_feed, // feed + test3_results, // results + NULL, // feed2 + NULL // results2 +}; + +// -------------------------------------------------------------------------------------------------------------------- +// test4 + +struct feed_values test4_feed[] = { + { 500000, 10 }, + { 1000000, 20 }, + { 1000000, 30 }, + { 1000000, 40 }, + { 1000000, 50 }, + { 1000000, 60 }, + { 1000000, 70 }, + { 1000000, 80 }, + { 1000000, 90 }, + { 1000000, 100 }, +}; + +calculated_number test4_results[] = { + 10, 10, 10, 10, 10, 10, 10, 10, 10 +}; + +struct test test4 = { + "test4", // name + "test incremental values stored in the middle of second boundaries", + 1, // update_every + 1, // multiplier + 1, // divisor + RRD_ALGORITHM_INCREMENTAL, // algorithm + 10, // feed entries + 9, // result entries + test4_feed, // feed + test4_results, // results + NULL, // feed2 + NULL // results2 +}; + +// -------------------------------------------------------------------------------------------------------------------- +// test5 + +struct feed_values test5_feed[] = { + { 500000, 1000 }, + { 1000000, 2000 }, + { 1000000, 2000 }, + { 1000000, 2000 }, + { 1000000, 3000 }, + { 1000000, 2000 }, + { 1000000, 2000 }, + { 1000000, 2000 }, + { 1000000, 2000 }, + { 1000000, 2000 }, +}; + +calculated_number test5_results[] = { + 1000, 500, 0, 500, 500, 0, 0, 0, 0 +}; + +struct test test5 = { + "test5", // name + "test incremental values ups and downs", + 1, // update_every + 1, // multiplier + 1, // divisor + RRD_ALGORITHM_INCREMENTAL, // algorithm + 10, // feed entries + 9, // result entries + test5_feed, // feed + test5_results, // results + NULL, // feed2 + NULL // results2 +}; + +// -------------------------------------------------------------------------------------------------------------------- +// test6 + +struct feed_values test6_feed[] = { + { 250000, 1000 }, + { 250000, 2000 }, + { 250000, 3000 }, + { 250000, 4000 }, + { 250000, 5000 }, + { 250000, 6000 }, + { 250000, 7000 }, + { 250000, 8000 }, + { 250000, 9000 }, + { 250000, 10000 }, + { 250000, 11000 }, + { 250000, 12000 }, + { 250000, 13000 }, + { 250000, 14000 }, + { 250000, 15000 }, + { 250000, 16000 }, +}; + +calculated_number test6_results[] = { + 4000, 4000, 4000, 4000 +}; + +struct test test6 = { + "test6", // name + "test incremental values updated within the same second", + 1, // update_every + 1, // multiplier + 1, // divisor + RRD_ALGORITHM_INCREMENTAL, // algorithm + 16, // feed entries + 4, // result entries + test6_feed, // feed + test6_results, // results + NULL, // feed2 + NULL // results2 +}; + +// -------------------------------------------------------------------------------------------------------------------- +// test7 + +struct feed_values test7_feed[] = { + { 500000, 1000 }, + { 2000000, 2000 }, + { 2000000, 3000 }, + { 2000000, 4000 }, + { 2000000, 5000 }, + { 2000000, 6000 }, + { 2000000, 7000 }, + { 2000000, 8000 }, + { 2000000, 9000 }, + { 2000000, 10000 }, +}; + +calculated_number test7_results[] = { + 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500 +}; + +struct test test7 = { + "test7", // name + "test incremental values updated in long durations", + 1, // update_every + 1, // multiplier + 1, // divisor + RRD_ALGORITHM_INCREMENTAL, // algorithm + 10, // feed entries + 18, // result entries + test7_feed, // feed + test7_results, // results + NULL, // feed2 + NULL // results2 +}; + +// -------------------------------------------------------------------------------------------------------------------- +// test8 + +struct feed_values test8_feed[] = { + { 500000, 1000 }, + { 2000000, 2000 }, + { 2000000, 3000 }, + { 2000000, 4000 }, + { 2000000, 5000 }, + { 2000000, 6000 }, +}; + +calculated_number test8_results[] = { + 1250, 2000, 2250, 3000, 3250, 4000, 4250, 5000, 5250, 6000 +}; + +struct test test8 = { + "test8", // name + "test absolute values updated in long durations", + 1, // update_every + 1, // multiplier + 1, // divisor + RRD_ALGORITHM_ABSOLUTE, // algorithm + 6, // feed entries + 10, // result entries + test8_feed, // feed + test8_results, // results + NULL, // feed2 + NULL // results2 +}; + +// -------------------------------------------------------------------------------------------------------------------- +// test9 + +struct feed_values test9_feed[] = { + { 250000, 1000 }, + { 250000, 2000 }, + { 250000, 3000 }, + { 250000, 4000 }, + { 250000, 5000 }, + { 250000, 6000 }, + { 250000, 7000 }, + { 250000, 8000 }, + { 250000, 9000 }, + { 250000, 10000 }, + { 250000, 11000 }, + { 250000, 12000 }, + { 250000, 13000 }, + { 250000, 14000 }, + { 250000, 15000 }, + { 250000, 16000 }, +}; + +calculated_number test9_results[] = { + 4000, 8000, 12000, 16000 +}; + +struct test test9 = { + "test9", // name + "test absolute values updated within the same second", + 1, // update_every + 1, // multiplier + 1, // divisor + RRD_ALGORITHM_ABSOLUTE, // algorithm + 16, // feed entries + 4, // result entries + test9_feed, // feed + test9_results, // results + NULL, // feed2 + NULL // results2 +}; + +// -------------------------------------------------------------------------------------------------------------------- +// test10 + +struct feed_values test10_feed[] = { + { 500000, 1000 }, + { 600000, 1000 + 600 }, + { 200000, 1600 + 200 }, + { 1000000, 1800 + 1000 }, + { 200000, 2800 + 200 }, + { 2000000, 3000 + 2000 }, + { 600000, 5000 + 600 }, + { 400000, 5600 + 400 }, + { 900000, 6000 + 900 }, + { 1000000, 6900 + 1000 }, +}; + +calculated_number test10_results[] = { + 1000, 1000, 1000, 1000, 1000, 1000, 1000 +}; + +struct test test10 = { + "test10", // name + "test incremental values updated in short and long durations", + 1, // update_every + 1, // multiplier + 1, // divisor + RRD_ALGORITHM_INCREMENTAL, // algorithm + 10, // feed entries + 7, // result entries + test10_feed, // feed + test10_results, // results + NULL, // feed2 + NULL // results2 +}; + +// -------------------------------------------------------------------------------------------------------------------- +// test11 + +struct feed_values test11_feed[] = { + { 0, 10 }, + { 1000000, 20 }, + { 1000000, 30 }, + { 1000000, 40 }, + { 1000000, 50 }, + { 1000000, 60 }, + { 1000000, 70 }, + { 1000000, 80 }, + { 1000000, 90 }, + { 1000000, 100 }, +}; + +collected_number test11_feed2[] = { + 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 +}; + +calculated_number test11_results[] = { + 50, 50, 50, 50, 50, 50, 50, 50, 50 +}; + +calculated_number test11_results2[] = { + 50, 50, 50, 50, 50, 50, 50, 50, 50 +}; + +struct test test11 = { + "test11", // name + "test percentage-of-incremental-row with equal values", + 1, // update_every + 1, // multiplier + 1, // divisor + RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL, // algorithm + 10, // feed entries + 9, // result entries + test11_feed, // feed + test11_results, // results + test11_feed2, // feed2 + test11_results2 // results2 +}; + +// -------------------------------------------------------------------------------------------------------------------- +// test12 + +struct feed_values test12_feed[] = { + { 0, 10 }, + { 1000000, 20 }, + { 1000000, 30 }, + { 1000000, 40 }, + { 1000000, 50 }, + { 1000000, 60 }, + { 1000000, 70 }, + { 1000000, 80 }, + { 1000000, 90 }, + { 1000000, 100 }, +}; + +collected_number test12_feed2[] = { + 10*3, 20*3, 30*3, 40*3, 50*3, 60*3, 70*3, 80*3, 90*3, 100*3 +}; + +calculated_number test12_results[] = { + 25, 25, 25, 25, 25, 25, 25, 25, 25 +}; + +calculated_number test12_results2[] = { + 75, 75, 75, 75, 75, 75, 75, 75, 75 +}; + +struct test test12 = { + "test12", // name + "test percentage-of-incremental-row with equal values", + 1, // update_every + 1, // multiplier + 1, // divisor + RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL, // algorithm + 10, // feed entries + 9, // result entries + test12_feed, // feed + test12_results, // results + test12_feed2, // feed2 + test12_results2 // results2 +}; + +// -------------------------------------------------------------------------------------------------------------------- +// test13 + +struct feed_values test13_feed[] = { + { 500000, 1000 }, + { 600000, 1000 + 600 }, + { 200000, 1600 + 200 }, + { 1000000, 1800 + 1000 }, + { 200000, 2800 + 200 }, + { 2000000, 3000 + 2000 }, + { 600000, 5000 + 600 }, + { 400000, 5600 + 400 }, + { 900000, 6000 + 900 }, + { 1000000, 6900 + 1000 }, +}; + +calculated_number test13_results[] = { + 83.3333300, 100, 100, 100, 100, 100, 100 +}; + +struct test test13 = { + "test13", // name + "test incremental values updated in short and long durations", + 1, // update_every + 1, // multiplier + 1, // divisor + RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL, // algorithm + 10, // feed entries + 7, // result entries + test13_feed, // feed + test13_results, // results + NULL, // feed2 + NULL // results2 +}; + +// -------------------------------------------------------------------------------------------------------------------- +// test14 + +struct feed_values test14_feed[] = { + { 0, 0x015397dc42151c41ULL }, + { 13573000, 0x015397e612e3ff5dULL }, + { 29969000, 0x015397f905ecdaa8ULL }, + { 29958000, 0x0153980c2a6cb5e4ULL }, + { 30054000, 0x0153981f4032fb83ULL }, + { 34952000, 0x015398355efadaccULL }, + { 25046000, 0x01539845ba4b09f8ULL }, + { 29947000, 0x0153985948bf381dULL }, + { 30054000, 0x0153986c5b9c27e2ULL }, + { 29942000, 0x0153987f888982d0ULL }, +}; + +calculated_number test14_results[] = { + 23.1383300, 21.8515600, 21.8804600, 21.7788000, 22.0112200, 22.4386100, 22.0906100, 21.9150800 +}; + +struct test test14 = { + "test14", // name + "issue #981 with real data", + 30, // update_every + 8, // multiplier + 1000000000, // divisor + RRD_ALGORITHM_INCREMENTAL, // algorithm + 10, // feed entries + 8, // result entries + test14_feed, // feed + test14_results, // results + NULL, // feed2 + NULL // results2 +}; + +struct feed_values test14b_feed[] = { + { 0, 0 }, + { 13573000, 13573000 }, + { 29969000, 13573000 + 29969000 }, + { 29958000, 13573000 + 29969000 + 29958000 }, + { 30054000, 13573000 + 29969000 + 29958000 + 30054000 }, + { 34952000, 13573000 + 29969000 + 29958000 + 30054000 + 34952000 }, + { 25046000, 13573000 + 29969000 + 29958000 + 30054000 + 34952000 + 25046000 }, + { 29947000, 13573000 + 29969000 + 29958000 + 30054000 + 34952000 + 25046000 + 29947000 }, + { 30054000, 13573000 + 29969000 + 29958000 + 30054000 + 34952000 + 25046000 + 29947000 + 30054000 }, + { 29942000, 13573000 + 29969000 + 29958000 + 30054000 + 34952000 + 25046000 + 29947000 + 30054000 + 29942000 }, +}; + +calculated_number test14b_results[] = { + 1000000, 1000000, 1000000, 1000000, 1000000, 1000000, 1000000, 1000000 +}; + +struct test test14b = { + "test14b", // name + "issue #981 with dummy data", + 30, // update_every + 1, // multiplier + 1, // divisor + RRD_ALGORITHM_INCREMENTAL, // algorithm + 10, // feed entries + 8, // result entries + test14b_feed, // feed + test14b_results, // results + NULL, // feed2 + NULL // results2 +}; + +struct feed_values test14c_feed[] = { + { 29000000, 29000000 }, + { 1000000, 29000000 + 1000000 }, + { 30000000, 29000000 + 1000000 + 30000000 }, + { 30000000, 29000000 + 1000000 + 30000000 + 30000000 }, + { 30000000, 29000000 + 1000000 + 30000000 + 30000000 + 30000000 }, + { 30000000, 29000000 + 1000000 + 30000000 + 30000000 + 30000000 + 30000000 }, + { 30000000, 29000000 + 1000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 }, + { 30000000, 29000000 + 1000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 }, + { 30000000, 29000000 + 1000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 }, + { 30000000, 29000000 + 1000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 }, +}; + +calculated_number test14c_results[] = { + 1000000, 1000000, 1000000, 1000000, 1000000, 1000000, 1000000, 1000000, 1000000 +}; + +struct test test14c = { + "test14c", // name + "issue #981 with dummy data, checking for late start", + 30, // update_every + 1, // multiplier + 1, // divisor + RRD_ALGORITHM_INCREMENTAL, // algorithm + 10, // feed entries + 9, // result entries + test14c_feed, // feed + test14c_results, // results + NULL, // feed2 + NULL // results2 +}; + +// -------------------------------------------------------------------------------------------------------------------- +// test15 + +struct feed_values test15_feed[] = { + { 0, 1068066388 }, + { 1008752, 1068822698 }, + { 993809, 1069573072 }, + { 995911, 1070324135 }, + { 1014562, 1071078166 }, + { 994684, 1071831349 }, + { 993128, 1072235739 }, + { 1010332, 1072958871 }, + { 1003394, 1073707019 }, + { 995201, 1074460255 }, +}; + +collected_number test15_feed2[] = { + 178825286, 178825286, 178825286, 178825286, 178825498, 178825498, 179165652, 179202964, 179203282, 179204130 +}; + +calculated_number test15_results[] = { + 5857.4080000, 5898.4540000, 5891.6590000, 5806.3160000, 5914.2640000, 3202.2630000, 5589.6560000, 5822.5260000, 5911.7520000 +}; + +calculated_number test15_results2[] = { + 0.0000000, 0.0000000, 0.0024944, 1.6324779, 0.0212777, 2655.1890000, 290.5387000, 5.6733610, 6.5960220 +}; + +struct test test15 = { + "test15", // name + "test incremental with 2 dimensions", + 1, // update_every + 8, // multiplier + 1024, // divisor + RRD_ALGORITHM_INCREMENTAL, // algorithm + 10, // feed entries + 9, // result entries + test15_feed, // feed + test15_results, // results + test15_feed2, // feed2 + test15_results2 // results2 +}; + +// -------------------------------------------------------------------------------------------------------------------- + +int run_test(struct test *test) +{ + fprintf(stderr, "\nRunning test '%s':\n%s\n", test->name, test->description); + + default_rrd_memory_mode = RRD_MEMORY_MODE_ALLOC; + default_rrd_update_every = test->update_every; + + char name[101]; + snprintfz(name, 100, "unittest-%s", test->name); + + // create the chart + RRDSET *st = rrdset_create_localhost("netdata", name, name, "netdata", NULL, "Unit Testing", "a value", "unittest", NULL, 1 + , test->update_every, RRDSET_TYPE_LINE); + RRDDIM *rd = rrddim_add(st, "dim1", NULL, test->multiplier, test->divisor, test->algorithm); + + RRDDIM *rd2 = NULL; + if(test->feed2) + rd2 = rrddim_add(st, "dim2", NULL, test->multiplier, test->divisor, test->algorithm); + + rrdset_flag_set(st, RRDSET_FLAG_DEBUG); + + // feed it with the test data + time_t time_now = 0, time_start = now_realtime_sec(); + unsigned long c; + collected_number last = 0; + for(c = 0; c < test->feed_entries; c++) { + if(debug_flags) fprintf(stderr, "\n\n"); + + if(c) { + time_now += test->feed[c].microseconds; + fprintf(stderr, " > %s: feeding position %lu, after %0.3f seconds (%0.3f seconds from start), delta " CALCULATED_NUMBER_FORMAT ", rate " CALCULATED_NUMBER_FORMAT "\n", + test->name, c+1, + (float)test->feed[c].microseconds / 1000000.0, + (float)time_now / 1000000.0, + ((calculated_number)test->feed[c].value - (calculated_number)last) * (calculated_number)test->multiplier / (calculated_number)test->divisor, + (((calculated_number)test->feed[c].value - (calculated_number)last) * (calculated_number)test->multiplier / (calculated_number)test->divisor) / (calculated_number)test->feed[c].microseconds * (calculated_number)1000000); + + // rrdset_next_usec_unfiltered(st, test->feed[c].microseconds); + st->usec_since_last_update = test->feed[c].microseconds; + } + else { + fprintf(stderr, " > %s: feeding position %lu\n", test->name, c+1); + } + + fprintf(stderr, " >> %s with value " COLLECTED_NUMBER_FORMAT "\n", rd->name, test->feed[c].value); + rrddim_set(st, "dim1", test->feed[c].value); + last = test->feed[c].value; + + if(rd2) { + fprintf(stderr, " >> %s with value " COLLECTED_NUMBER_FORMAT "\n", rd2->name, test->feed2[c]); + rrddim_set(st, "dim2", test->feed2[c]); + } + + rrdset_done(st); + + // align the first entry to second boundary + if(!c) { + fprintf(stderr, " > %s: fixing first collection time to be %llu microseconds to second boundary\n", test->name, test->feed[c].microseconds); + rd->last_collected_time.tv_usec = st->last_collected_time.tv_usec = st->last_updated.tv_usec = test->feed[c].microseconds; + // time_start = st->last_collected_time.tv_sec; + } + } + + // check the result + int errors = 0; + + if(st->counter != test->result_entries) { + fprintf(stderr, " %s stored %zu entries, but we were expecting %lu, ### E R R O R ###\n", test->name, st->counter, test->result_entries); + errors++; + } + + unsigned long max = (st->counter < test->result_entries)?st->counter:test->result_entries; + for(c = 0 ; c < max ; c++) { + calculated_number v = unpack_storage_number(rd->values[c]); + calculated_number n = test->results[c]; + int same = (calculated_number_round(v * 10000000.0) == calculated_number_round(n * 10000000.0))?1:0; + fprintf(stderr, " %s/%s: checking position %lu (at %lu secs), expecting value " CALCULATED_NUMBER_FORMAT ", found " CALCULATED_NUMBER_FORMAT ", %s\n", + test->name, rd->name, c+1, + (rrdset_first_entry_t(st) + c * st->update_every) - time_start, + n, v, (same)?"OK":"### E R R O R ###"); + + if(!same) errors++; + + if(rd2) { + v = unpack_storage_number(rd2->values[c]); + n = test->results2[c]; + same = (calculated_number_round(v * 10000000.0) == calculated_number_round(n * 10000000.0))?1:0; + fprintf(stderr, " %s/%s: checking position %lu (at %lu secs), expecting value " CALCULATED_NUMBER_FORMAT ", found " CALCULATED_NUMBER_FORMAT ", %s\n", + test->name, rd2->name, c+1, + (rrdset_first_entry_t(st) + c * st->update_every) - time_start, + n, v, (same)?"OK":"### E R R O R ###"); + if(!same) errors++; + } + } + + return errors; +} + +static int test_variable_renames(void) { + fprintf(stderr, "Creating chart\n"); + RRDSET *st = rrdset_create_localhost("chart", "ID", NULL, "family", "context", "Unit Testing", "a value", "unittest", NULL, 1, 1, RRDSET_TYPE_LINE); + fprintf(stderr, "Created chart with id '%s', name '%s'\n", st->id, st->name); + + fprintf(stderr, "Creating dimension DIM1\n"); + RRDDIM *rd1 = rrddim_add(st, "DIM1", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + fprintf(stderr, "Created dimension with id '%s', name '%s'\n", rd1->id, rd1->name); + + fprintf(stderr, "Creating dimension DIM2\n"); + RRDDIM *rd2 = rrddim_add(st, "DIM2", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + fprintf(stderr, "Created dimension with id '%s', name '%s'\n", rd2->id, rd2->name); + + fprintf(stderr, "Renaming chart to CHARTNAME1\n"); + rrdset_set_name(st, "CHARTNAME1"); + fprintf(stderr, "Renamed chart with id '%s' to name '%s'\n", st->id, st->name); + + fprintf(stderr, "Renaming chart to CHARTNAME2\n"); + rrdset_set_name(st, "CHARTNAME2"); + fprintf(stderr, "Renamed chart with id '%s' to name '%s'\n", st->id, st->name); + + fprintf(stderr, "Renaming dimension DIM1 to DIM1NAME1\n"); + rrddim_set_name(st, rd1, "DIM1NAME1"); + fprintf(stderr, "Renamed dimension with id '%s' to name '%s'\n", rd1->id, rd1->name); + + fprintf(stderr, "Renaming dimension DIM1 to DIM1NAME2\n"); + rrddim_set_name(st, rd1, "DIM1NAME2"); + fprintf(stderr, "Renamed dimension with id '%s' to name '%s'\n", rd1->id, rd1->name); + + fprintf(stderr, "Renaming dimension DIM2 to DIM2NAME1\n"); + rrddim_set_name(st, rd2, "DIM2NAME1"); + fprintf(stderr, "Renamed dimension with id '%s' to name '%s'\n", rd2->id, rd2->name); + + fprintf(stderr, "Renaming dimension DIM2 to DIM2NAME2\n"); + rrddim_set_name(st, rd2, "DIM2NAME2"); + fprintf(stderr, "Renamed dimension with id '%s' to name '%s'\n", rd2->id, rd2->name); + + BUFFER *buf = buffer_create(1); + health_api_v1_chart_variables2json(st, buf); + fprintf(stderr, "%s", buffer_tostring(buf)); + buffer_free(buf); + return 1; +} + +int check_strdupz_path_subpath() { + + struct strdupz_path_subpath_checks { + const char *path; + const char *subpath; + const char *result; + } checks[] = { + { "", "", "." }, + { "/", "", "/" }, + { "/etc/netdata", "", "/etc/netdata" }, + { "/etc/netdata///", "", "/etc/netdata" }, + { "/etc/netdata///", "health.d", "/etc/netdata/health.d" }, + { "/etc/netdata///", "///health.d", "/etc/netdata/health.d" }, + { "/etc/netdata", "///health.d", "/etc/netdata/health.d" }, + { "", "///health.d", "./health.d" }, + { "/", "///health.d", "/health.d" }, + + // terminator + { NULL, NULL, NULL } + }; + + size_t i; + for(i = 0; checks[i].result ; i++) { + char *s = strdupz_path_subpath(checks[i].path, checks[i].subpath); + fprintf(stderr, "strdupz_path_subpath(\"%s\", \"%s\") = \"%s\": ", checks[i].path, checks[i].subpath, s); + if(!s || strcmp(s, checks[i].result) != 0) { + freez(s); + fprintf(stderr, "FAILED\n"); + return 1; + } + else { + freez(s); + fprintf(stderr, "OK\n"); + } + } + + return 0; +} + +int run_all_mockup_tests(void) +{ + if(check_strdupz_path_subpath()) + return 1; + + if(check_number_printing()) + return 1; + + if(check_rrdcalc_comparisons()) + return 1; + + if(!test_variable_renames()) + return 1; + + if(run_test(&test1)) + return 1; + + if(run_test(&test2)) + return 1; + + if(run_test(&test3)) + return 1; + + if(run_test(&test4)) + return 1; + + if(run_test(&test5)) + return 1; + + if(run_test(&test6)) + return 1; + + if(run_test(&test7)) + return 1; + + if(run_test(&test8)) + return 1; + + if(run_test(&test9)) + return 1; + + if(run_test(&test10)) + return 1; + + if(run_test(&test11)) + return 1; + + if(run_test(&test12)) + return 1; + + if(run_test(&test13)) + return 1; + + if(run_test(&test14)) + return 1; + + if(run_test(&test14b)) + return 1; + + if(run_test(&test14c)) + return 1; + + if(run_test(&test15)) + return 1; + + + + return 0; +} + +int unit_test(long delay, long shift) +{ + static int repeat = 0; + repeat++; + + char name[101]; + snprintfz(name, 100, "unittest-%d-%ld-%ld", repeat, delay, shift); + + //debug_flags = 0xffffffff; + default_rrd_memory_mode = RRD_MEMORY_MODE_ALLOC; + default_rrd_update_every = 1; + + int do_abs = 1; + int do_inc = 1; + int do_abst = 0; + int do_absi = 0; + + RRDSET *st = rrdset_create_localhost("netdata", name, name, "netdata", NULL, "Unit Testing", "a value", "unittest", NULL, 1, 1 + , RRDSET_TYPE_LINE); + rrdset_flag_set(st, RRDSET_FLAG_DEBUG); + + RRDDIM *rdabs = NULL; + RRDDIM *rdinc = NULL; + RRDDIM *rdabst = NULL; + RRDDIM *rdabsi = NULL; + + if(do_abs) rdabs = rrddim_add(st, "absolute", "absolute", 1, 1, RRD_ALGORITHM_ABSOLUTE); + if(do_inc) rdinc = rrddim_add(st, "incremental", "incremental", 1, 1, RRD_ALGORITHM_INCREMENTAL); + if(do_abst) rdabst = rrddim_add(st, "percentage-of-absolute-row", "percentage-of-absolute-row", 1, 1, RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL); + if(do_absi) rdabsi = rrddim_add(st, "percentage-of-incremental-row", "percentage-of-incremental-row", 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + + long increment = 1000; + collected_number i = 0; + + unsigned long c, dimensions = 0; + RRDDIM *rd; + for(rd = st->dimensions ; rd ; rd = rd->next) dimensions++; + + for(c = 0; c < 20 ;c++) { + i += increment; + + fprintf(stderr, "\n\nLOOP = %lu, DELAY = %ld, VALUE = " COLLECTED_NUMBER_FORMAT "\n", c, delay, i); + if(c) { + // rrdset_next_usec_unfiltered(st, delay); + st->usec_since_last_update = delay; + } + if(do_abs) rrddim_set(st, "absolute", i); + if(do_inc) rrddim_set(st, "incremental", i); + if(do_abst) rrddim_set(st, "percentage-of-absolute-row", i); + if(do_absi) rrddim_set(st, "percentage-of-incremental-row", i); + + if(!c) { + now_realtime_timeval(&st->last_collected_time); + st->last_collected_time.tv_usec = shift; + } + + // prevent it from deleting the dimensions + for(rd = st->dimensions ; rd ; rd = rd->next) + rd->last_collected_time.tv_sec = st->last_collected_time.tv_sec; + + rrdset_done(st); + } + + unsigned long oincrement = increment; + increment = increment * st->update_every * 1000000 / delay; + fprintf(stderr, "\n\nORIGINAL INCREMENT: %lu, INCREMENT %ld, DELAY %ld, SHIFT %ld\n", oincrement * 10, increment * 10, delay, shift); + + int ret = 0; + storage_number sn; + calculated_number cn, v; + for(c = 0 ; c < st->counter ; c++) { + fprintf(stderr, "\nPOSITION: c = %lu, EXPECTED VALUE %lu\n", c, (oincrement + c * increment + increment * (1000000 - shift) / 1000000 )* 10); + + for(rd = st->dimensions ; rd ; rd = rd->next) { + sn = rd->values[c]; + cn = unpack_storage_number(sn); + fprintf(stderr, "\t %s " CALCULATED_NUMBER_FORMAT " (PACKED AS " STORAGE_NUMBER_FORMAT ") -> ", rd->id, cn, sn); + + if(rd == rdabs) v = + ( oincrement + // + (increment * (1000000 - shift) / 1000000) + + (c + 1) * increment + ); + + else if(rd == rdinc) v = (c?(increment):(increment * (1000000 - shift) / 1000000)); + else if(rd == rdabst) v = oincrement / dimensions / 10; + else if(rd == rdabsi) v = oincrement / dimensions / 10; + else v = 0; + + if(v == cn) fprintf(stderr, "passed.\n"); + else { + fprintf(stderr, "ERROR! (expected " CALCULATED_NUMBER_FORMAT ")\n", v); + ret = 1; + } + } + } + + if(ret) + fprintf(stderr, "\n\nUNIT TEST(%ld, %ld) FAILED\n\n", delay, shift); + + return ret; +} diff --git a/daemon/unit_test.h b/daemon/unit_test.h new file mode 100644 index 000000000..0023c8de9 --- /dev/null +++ b/daemon/unit_test.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_UNIT_TEST_H +#define NETDATA_UNIT_TEST_H 1 + +extern int unit_test_storage(void); +extern int unit_test(long delay, long shift); +extern int run_all_mockup_tests(void); +extern int unit_test_str2ld(void); +extern int unit_test_buffer(void); + +#endif /* NETDATA_UNIT_TEST_H */ diff --git a/database/Makefile.am b/database/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/database/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/database/Makefile.in b/database/Makefile.in new file mode 100644 index 000000000..4f5b710c5 --- /dev/null +++ b/database/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = database +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu database/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu database/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/database/README.md b/database/README.md new file mode 100644 index 000000000..8f5e3a6df --- /dev/null +++ b/database/README.md @@ -0,0 +1,206 @@ +# netdata database + +Although `netdata` does all its calculations using `long double`, it stores all values using +a [custom-made 32-bit number](../libnetdata/storage_number/). + +So, for each dimension of a chart, netdata will need: `4 bytes for the value * the entries +of its history`. It will not store any other data for each value in the time series database. +Since all its values are stored in a time series with fixed step, the time each value +corresponds can be calculated at run time, using the position of a value in the round robin database. + +The default history is 3.600 entries, thus it will need 14.4KB for each chart dimension. +If you need 1.000 dimensions, they will occupy just 14.4MB. + +Of course, 3.600 entries is a very short history, especially if data collection frequency is set +to 1 second. You will have just one hour of data. + +For a day of data and 1.000 dimensions, you will need: 86.400 seconds * 4 bytes * 1.000 +dimensions = 345MB of RAM. + +Currently the only option you have to lower this number is to use +**[Memory Deduplication - Kernel Same Page Merging - KSM](#ksm)**. + +## Memory modes + +Currently netdata supports 5 memory modes: + +1. `ram`, data are purely in memory. Data are never saved on disk. This mode uses `mmap()` and + supports [KSM](#ksm). + +2. `save`, (the default) data are only in RAM while netdata runs and are saved to / loaded from + disk on netdata restart. It also uses `mmap()` and supports [KSM](#ksm). + +3. `map`, data are in memory mapped files. This works like the swap. Keep in mind though, this + will have a constant write on your disk. When netdata writes data on its memory, the Linux kernel + marks the related memory pages as dirty and automatically starts updating them on disk. + Unfortunately we cannot control how frequently this works. The Linux kernel uses exactly the + same algorithm it uses for its swap memory. Check below for additional information on running a + dedicated central netdata server. This mode uses `mmap()` but does not support [KSM](#ksm). + +4. `none`, without a database (collected metrics can only be streamed to another netdata). + +5. `alloc`, like `ram` but it uses `calloc()` and does not support [KSM](#ksm). This mode is the + fallback for all others except `none`. + +You can select the memory mode by editing netdata.conf and setting: + +``` +[global] + # ram, save (the default, save on exit, load on start), map (swap like) + memory mode = save + + # the directory where data are saved + cache directory = /var/cache/netdata +``` + +## Running netdata in embedded devices + +Embedded devices usually have very limited RAM resources available. + +There are 2 settings for you to tweak: + +1. `update every`, which controls the data collection frequency +2. `history`, which controls the size of the database in RAM + +By default `update every = 1` and `history = 3600`. This gives you an hour of data with per +second updates. + +If you set `update every = 2` and `history = 1800`, you will still have an hour of data, but +collected once every 2 seconds. This will **cut in half** both CPU and RAM resources consumed +by netdata. Of course experiment a bit. On very weak devices you might have to use +`update every = 5` and `history = 720` (still 1 hour of data, but 1/5 of the CPU and RAM resources). + +You can also disable [data collection plugins](../collectors) you don't need. +Disabling such plugins will also free both CPU and RAM resources. + +## running a dedicated central netdata server + +netdata allows streaming data between netdata nodes. This allows us to have a central netdata +server that will maintain the entire database for all nodes, and will also run health checks/alarms +for all nodes. + +For this central netdata, memory size can be a problem. Fortunately, netdata supports several +memory modes. What is interesting for this setup is `memory mode = map`. + +In this mode, the database of netdata is stored in memory mapped files. netdata continues to read +and write the database in memory, but the kernel automatically loads and saves memory pages from/to +disk. + +**We suggest _not_ to use this mode on nodes that run other applications.** There will always be +dirty memory to be synced and this syncing process may influence the way other applications work. +This mode however is ideal when we need a central netdata server that would normally need huge +amounts of memory. Using memory mode `map` we can overcome all memory restrictions. + +There are a few kernel options that provide finer control on the way this syncing works. But before +explaining them, a brief introduction of how netdata database works is needed. + +For each chart, netdata maps the following files: + +1. `chart/main.db`, this is the file that maintains chart information. Every time data are collected + for a chart, this is updated. + +2. `chart/dimension_name.db`, this is the file for each dimension. At its beginning there is a + header, followed by the round robin database where metrics are stored. + +So, every time netdata collects data, the following pages will become dirty: + +1. the chart file +2. the header part of all dimension files +3. if the collected metrics are stored far enough in the dimension file, another page will + become dirty, for each dimension + +Each page in Linux is 4KB. So, with 200 charts and 1000 dimensions, there will be 1200 to 2200 4KB +pages dirty pages every second. Of course 1200 of them will always be dirty (the chart header and +the dimensions headers) and 1000 will be dirty for about 1000 seconds (4 bytes per metric, 4KB per +page, so 1000 seconds, or 16 minutes per page). + +Hopefully, the Linux kernel does not sync all these data every second. The frequency they are +synced is controlled by `/proc/sys/vm/dirty_expire_centisecs` or the +`sysctl` `vm.dirty_expire_centisecs`. The default on most systems is 3000 (30 seconds). + +On a busy server centralizing metrics from 20+ servers you will experience this: + +![image](https://cloud.githubusercontent.com/assets/2662304/23834750/429ab0dc-0764-11e7-821a-d7908bc881ac.png) + +As you can see, there is quite some stress (this is `iowait`) every 30 seconds. + +A simple solution is to increase this time to 10 minutes (60000). This is the same system +with this setting in 10 minutes: + +![image](https://cloud.githubusercontent.com/assets/2662304/23834784/d2304f72-0764-11e7-8389-fb830ffd973a.png) + +Of course, setting this to 10 minutes means that data on disk might be up to 10 minutes old if you +get an abnormal shutdown. + +There are 2 more options to tweak: + +1. `dirty_background_ratio`, by default `10`. +2. `dirty_ratio`, by default `20`. + +These control the amount of memory that should be dirty for disk syncing to be triggered. +On dedicated netdata servers, you can use: `80` and `90` respectively, so that all RAM is given +to netdata. + +With these settings, you can expect a little `iowait` spike once every 10 minutes and in case +of system crash, data on disk will be up to 10 minutes old. + +![image](https://cloud.githubusercontent.com/assets/2662304/23835030/ba4bf506-0768-11e7-9bc6-3b23e080c69f.png) + +To have these settings automatically applied on boot, create the file `/etc/sysctl.d/netdata-memory.conf` with these contents: + +``` +vm.dirty_expire_centisecs = 60000 +vm.dirty_background_ratio = 80 +vm.dirty_ratio = 90 +vm.dirty_writeback_centisecs = 0 +``` + +## KSM + +Netdata offers all its round robin database to kernel for deduplication. + +In the past KSM has been criticized for consuming a lot of CPU resources. +Although this is true when KSM is used for deduplicating certain applications, it is not true with +netdata, since the netdata memory is written very infrequently (if you have 24 hours of metrics in +netdata, each byte at the in-memory database will be updated just once per day). + +KSM is a solution that will provide 60+% memory savings to netdata. + +#### Enable KSM in kernel + +You need to run a kernel compiled with: + +```sh +CONFIG_KSM=y +``` + +When KSM is enabled at the kernel is just available for the user to enable it. + +So, if you build a kernel with `CONFIG_KSM=y` you will just get a few files in `/sys/kernel/mm/ksm`. Nothing else happens. There is no performance penalty (apart I guess from the memory this code occupies into the kernel). + +The files that `CONFIG_KSM=y` offers include: + +- `/sys/kernel/mm/ksm/run` by default `0`. You have to set this to `1` for the kernel to spawn `ksmd`. +- `/sys/kernel/mm/ksm/sleep_millisecs`, by default `20`. The frequency ksmd should evaluate memory for deduplication. +- `/sys/kernel/mm/ksm/pages_to_scan`, by default `100`. The amount of pages ksmd will evaluate on each run. + +So, by default `ksmd` is just disabled. It will not harm performance and the user/admin can control the CPU resources he/she is willing `ksmd` to use. + +#### Run `ksmd` kernel daemon + +To activate / run `ksmd` you need to run: + +```sh +echo 1 >/sys/kernel/mm/ksm/run +echo 1000 >/sys/kernel/mm/ksm/sleep_millisecs +``` + +With these settings ksmd does not even appear in the running process list (it will run once per second and evaluate 100 pages for de-duplication). + +Put the above lines in your boot sequence (`/etc/rc.local` or equivalent) to have `ksmd` run at boot. + +## Monitoring Kernel Memory de-duplication performance + +Netdata will create charts for kernel memory de-duplication performance, like this: + +![image](https://cloud.githubusercontent.com/assets/2662304/11998786/eb23ae54-aab6-11e5-94d4-e848e8a5c56a.png) diff --git a/database/rrd.c b/database/rrd.c new file mode 100644 index 000000000..119efa62e --- /dev/null +++ b/database/rrd.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +#define NETDATA_RRD_INTERNALS 1 + +#include "rrd.h" + +// ---------------------------------------------------------------------------- +// globals + +/* +// if not zero it gives the time (in seconds) to remove un-updated dimensions +// DO NOT ENABLE +// if dimensions are removed, the chart generation will have to run again +int rrd_delete_unupdated_dimensions = 0; +*/ + +int default_rrd_update_every = UPDATE_EVERY; +int default_rrd_history_entries = RRD_DEFAULT_HISTORY_ENTRIES; +RRD_MEMORY_MODE default_rrd_memory_mode = RRD_MEMORY_MODE_SAVE; +int gap_when_lost_iterations_above = 1; + + +// ---------------------------------------------------------------------------- +// RRD - memory modes + +inline const char *rrd_memory_mode_name(RRD_MEMORY_MODE id) { + switch(id) { + case RRD_MEMORY_MODE_RAM: + return RRD_MEMORY_MODE_RAM_NAME; + + case RRD_MEMORY_MODE_MAP: + return RRD_MEMORY_MODE_MAP_NAME; + + case RRD_MEMORY_MODE_NONE: + return RRD_MEMORY_MODE_NONE_NAME; + + case RRD_MEMORY_MODE_SAVE: + return RRD_MEMORY_MODE_SAVE_NAME; + + case RRD_MEMORY_MODE_ALLOC: + return RRD_MEMORY_MODE_ALLOC_NAME; + } + + return RRD_MEMORY_MODE_SAVE_NAME; +} + +RRD_MEMORY_MODE rrd_memory_mode_id(const char *name) { + if(unlikely(!strcmp(name, RRD_MEMORY_MODE_RAM_NAME))) + return RRD_MEMORY_MODE_RAM; + + else if(unlikely(!strcmp(name, RRD_MEMORY_MODE_MAP_NAME))) + return RRD_MEMORY_MODE_MAP; + + else if(unlikely(!strcmp(name, RRD_MEMORY_MODE_NONE_NAME))) + return RRD_MEMORY_MODE_NONE; + + else if(unlikely(!strcmp(name, RRD_MEMORY_MODE_ALLOC_NAME))) + return RRD_MEMORY_MODE_ALLOC; + + return RRD_MEMORY_MODE_SAVE; +} + + +// ---------------------------------------------------------------------------- +// RRD - algorithms types + +RRD_ALGORITHM rrd_algorithm_id(const char *name) { + if(strcmp(name, RRD_ALGORITHM_INCREMENTAL_NAME) == 0) + return RRD_ALGORITHM_INCREMENTAL; + + else if(strcmp(name, RRD_ALGORITHM_ABSOLUTE_NAME) == 0) + return RRD_ALGORITHM_ABSOLUTE; + + else if(strcmp(name, RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL_NAME) == 0) + return RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL; + + else if(strcmp(name, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL_NAME) == 0) + return RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL; + + else + return RRD_ALGORITHM_ABSOLUTE; +} + +const char *rrd_algorithm_name(RRD_ALGORITHM algorithm) { + switch(algorithm) { + case RRD_ALGORITHM_ABSOLUTE: + default: + return RRD_ALGORITHM_ABSOLUTE_NAME; + + case RRD_ALGORITHM_INCREMENTAL: + return RRD_ALGORITHM_INCREMENTAL_NAME; + + case RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL: + return RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL_NAME; + + case RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL: + return RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL_NAME; + } +} + + +// ---------------------------------------------------------------------------- +// RRD - chart types + +inline RRDSET_TYPE rrdset_type_id(const char *name) { + if(unlikely(strcmp(name, RRDSET_TYPE_AREA_NAME) == 0)) + return RRDSET_TYPE_AREA; + + else if(unlikely(strcmp(name, RRDSET_TYPE_STACKED_NAME) == 0)) + return RRDSET_TYPE_STACKED; + + else // if(unlikely(strcmp(name, RRDSET_TYPE_LINE_NAME) == 0)) + return RRDSET_TYPE_LINE; +} + +const char *rrdset_type_name(RRDSET_TYPE chart_type) { + switch(chart_type) { + case RRDSET_TYPE_LINE: + default: + return RRDSET_TYPE_LINE_NAME; + + case RRDSET_TYPE_AREA: + return RRDSET_TYPE_AREA_NAME; + + case RRDSET_TYPE_STACKED: + return RRDSET_TYPE_STACKED_NAME; + } +} + + +// ---------------------------------------------------------------------------- +// RRD - cache directory + +char *rrdset_cache_dir(RRDHOST *host, const char *id, const char *config_section) { + char *ret = NULL; + + char b[FILENAME_MAX + 1]; + char n[FILENAME_MAX + 1]; + rrdset_strncpyz_name(b, id, FILENAME_MAX); + + snprintfz(n, FILENAME_MAX, "%s/%s", host->cache_dir, b); + ret = config_get(config_section, "cache directory", n); + + if(host->rrd_memory_mode == RRD_MEMORY_MODE_MAP || host->rrd_memory_mode == RRD_MEMORY_MODE_SAVE) { + int r = mkdir(ret, 0775); + if(r != 0 && errno != EEXIST) + error("Cannot create directory '%s'", ret); + } + + return ret; +} diff --git a/database/rrd.h b/database/rrd.h new file mode 100644 index 000000000..19eb100cd --- /dev/null +++ b/database/rrd.h @@ -0,0 +1,886 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_RRD_H +#define NETDATA_RRD_H 1 + +// forward typedefs +typedef struct rrdhost RRDHOST; +typedef struct rrddim RRDDIM; +typedef struct rrdset RRDSET; +typedef struct rrdvar RRDVAR; +typedef struct rrdsetvar RRDSETVAR; +typedef struct rrddimvar RRDDIMVAR; +typedef struct rrdcalc RRDCALC; +typedef struct rrdcalctemplate RRDCALCTEMPLATE; +typedef struct alarm_entry ALARM_ENTRY; + +#include "../daemon/common.h" +#include "web/api/queries/query.h" +#include "rrdvar.h" +#include "rrdsetvar.h" +#include "rrddimvar.h" +#include "rrdcalc.h" +#include "rrdcalctemplate.h" + +#define UPDATE_EVERY 1 +#define UPDATE_EVERY_MAX 3600 + +#define RRD_DEFAULT_HISTORY_ENTRIES 3600 +#define RRD_HISTORY_ENTRIES_MAX (86400*365) + +extern int default_rrd_update_every; +extern int default_rrd_history_entries; +extern int gap_when_lost_iterations_above; + +#define RRD_ID_LENGTH_MAX 200 + +#define RRDSET_MAGIC "NETDATA RRD SET FILE V019" +#define RRDDIMENSION_MAGIC "NETDATA RRD DIMENSION FILE V019" + +typedef long long total_number; +#define TOTAL_NUMBER_FORMAT "%lld" + +// ---------------------------------------------------------------------------- +// chart types + +typedef enum rrdset_type { + RRDSET_TYPE_LINE = 0, + RRDSET_TYPE_AREA = 1, + RRDSET_TYPE_STACKED = 2 +} RRDSET_TYPE; + +#define RRDSET_TYPE_LINE_NAME "line" +#define RRDSET_TYPE_AREA_NAME "area" +#define RRDSET_TYPE_STACKED_NAME "stacked" + +RRDSET_TYPE rrdset_type_id(const char *name); +const char *rrdset_type_name(RRDSET_TYPE chart_type); + + +// ---------------------------------------------------------------------------- +// memory mode + +typedef enum rrd_memory_mode { + RRD_MEMORY_MODE_NONE = 0, + RRD_MEMORY_MODE_RAM = 1, + RRD_MEMORY_MODE_MAP = 2, + RRD_MEMORY_MODE_SAVE = 3, + RRD_MEMORY_MODE_ALLOC = 4 +} RRD_MEMORY_MODE; + +#define RRD_MEMORY_MODE_NONE_NAME "none" +#define RRD_MEMORY_MODE_RAM_NAME "ram" +#define RRD_MEMORY_MODE_MAP_NAME "map" +#define RRD_MEMORY_MODE_SAVE_NAME "save" +#define RRD_MEMORY_MODE_ALLOC_NAME "alloc" + +extern RRD_MEMORY_MODE default_rrd_memory_mode; + +extern const char *rrd_memory_mode_name(RRD_MEMORY_MODE id); +extern RRD_MEMORY_MODE rrd_memory_mode_id(const char *name); + + +// ---------------------------------------------------------------------------- +// algorithms types + +typedef enum rrd_algorithm { + RRD_ALGORITHM_ABSOLUTE = 0, + RRD_ALGORITHM_INCREMENTAL = 1, + RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL = 2, + RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL = 3 +} RRD_ALGORITHM; + +#define RRD_ALGORITHM_ABSOLUTE_NAME "absolute" +#define RRD_ALGORITHM_INCREMENTAL_NAME "incremental" +#define RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL_NAME "percentage-of-incremental-row" +#define RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL_NAME "percentage-of-absolute-row" + +extern RRD_ALGORITHM rrd_algorithm_id(const char *name); +extern const char *rrd_algorithm_name(RRD_ALGORITHM algorithm); + +// ---------------------------------------------------------------------------- +// RRD FAMILY + +struct rrdfamily { + avl avl; + + const char *family; + uint32_t hash_family; + + size_t use_count; + + avl_tree_lock rrdvar_root_index; +}; +typedef struct rrdfamily RRDFAMILY; + + +// ---------------------------------------------------------------------------- +// flags +// use this for configuration flags, not for state control +// flags are set/unset in a manner that is not thread safe +// and may lead to missing information. + +typedef enum rrddim_flags { + RRDDIM_FLAG_NONE = 0, + RRDDIM_FLAG_HIDDEN = (1 << 0), // this dimension will not be offered to callers + RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS = (1 << 1) // do not offer RESET or OVERFLOW info to callers +} RRDDIM_FLAGS; + +#ifdef HAVE_C___ATOMIC +#define rrddim_flag_check(rd, flag) (__atomic_load_n(&((rd)->flags), __ATOMIC_SEQ_CST) & (flag)) +#define rrddim_flag_set(rd, flag) __atomic_or_fetch(&((rd)->flags), (flag), __ATOMIC_SEQ_CST) +#define rrddim_flag_clear(rd, flag) __atomic_and_fetch(&((rd)->flags), ~(flag), __ATOMIC_SEQ_CST) +#else +#define rrddim_flag_check(rd, flag) ((rd)->flags & (flag)) +#define rrddim_flag_set(rd, flag) (rd)->flags |= (flag) +#define rrddim_flag_clear(rd, flag) (rd)->flags &= ~(flag) +#endif + + +// ---------------------------------------------------------------------------- +// RRD DIMENSION - this is a metric + +struct rrddim { + // ------------------------------------------------------------------------ + // binary indexing structures + + avl avl; // the binary index - this has to be first member! + + // ------------------------------------------------------------------------ + // the dimension definition + + const char *id; // the id of this dimension (for internal identification) + const char *name; // the name of this dimension (as presented to user) + // this is a pointer to the config structure + // since the config always has a higher priority + // (the user overwrites the name of the charts) + // DO NOT FREE THIS - IT IS ALLOCATED IN CONFIG + + RRD_ALGORITHM algorithm; // the algorithm that is applied to add new collected values + RRD_MEMORY_MODE rrd_memory_mode; // the memory mode for this dimension + + collected_number multiplier; // the multiplier of the collected values + collected_number divisor; // the divider of the collected values + + uint32_t flags; // configuration flags for the dimension + + // ------------------------------------------------------------------------ + // members for temporary data we need for calculations + + uint32_t hash; // a simple hash of the id, to speed up searching / indexing + // instead of strcmp() every item in the binary index + // we first compare the hashes + + uint32_t hash_name; // a simple hash of the name + + char *cache_filename; // the filename we load/save from/to this set + + size_t collections_counter; // the number of times we added values to this rrdim + size_t unused[10]; + + unsigned int updated:1; // 1 when the dimension has been updated since the last processing + unsigned int exposed:1; // 1 when set what have sent this dimension to the central netdata + + struct timeval last_collected_time; // when was this dimension last updated + // this is actual date time we updated the last_collected_value + // THIS IS DIFFERENT FROM THE SAME MEMBER OF RRDSET + + calculated_number calculated_value; // the current calculated value, after applying the algorithm - resets to zero after being used + calculated_number last_calculated_value; // the last calculated value processed + + calculated_number last_stored_value; // the last value as stored in the database (after interpolation) + + collected_number collected_value; // the current value, as collected - resets to 0 after being used + collected_number last_collected_value; // the last value that was collected, after being processed + + // the *_volume members are used to calculate the accuracy of the rounding done by the + // storage number - they are printed to debug.log when debug is enabled for a set. + calculated_number collected_volume; // the sum of all collected values so far + calculated_number stored_volume; // the sum of all stored values so far + + struct rrddim *next; // linking of dimensions within the same data set + struct rrdset *rrdset; + + // ------------------------------------------------------------------------ + // members for checking the data when loading from disk + + long entries; // how many entries this dimension has in ram + // this is the same to the entries of the data set + // we set it here, to check the data when we load it from disk. + + int update_every; // every how many seconds is this updated + + size_t memsize; // the memory allocated for this dimension + + char magic[sizeof(RRDDIMENSION_MAGIC) + 1]; // a string to be saved, used to identify our data file + + struct rrddimvar *variables; + + // ------------------------------------------------------------------------ + // the values stored in this dimension, using our floating point numbers + + storage_number values[]; // the array of values - THIS HAS TO BE THE LAST MEMBER +}; + +// ---------------------------------------------------------------------------- +// these loop macros make sure the linked list is accessed with the right lock + +#define rrddim_foreach_read(rd, st) \ + for((rd) = (st)->dimensions, rrdset_check_rdlock(st); (rd) ; (rd) = (rd)->next) + +#define rrddim_foreach_write(rd, st) \ + for((rd) = (st)->dimensions, rrdset_check_wrlock(st); (rd) ; (rd) = (rd)->next) + + +// ---------------------------------------------------------------------------- +// RRDSET - this is a chart + +// use this for configuration flags, not for state control +// flags are set/unset in a manner that is not thread safe +// and may lead to missing information. + +typedef enum rrdset_flags { + RRDSET_FLAG_ENABLED = 1 << 0, // enables or disables a chart + RRDSET_FLAG_DETAIL = 1 << 1, // if set, the data set should be considered as a detail of another + // (the master data set should be the one that has the same family and is not detail) + RRDSET_FLAG_DEBUG = 1 << 2, // enables or disables debugging for a chart + RRDSET_FLAG_OBSOLETE = 1 << 3, // this is marked by the collector/module as obsolete + RRDSET_FLAG_BACKEND_SEND = 1 << 4, // if set, this chart should be sent to backends + RRDSET_FLAG_BACKEND_IGNORE = 1 << 5, // if set, this chart should not be sent to backends + RRDSET_FLAG_UPSTREAM_SEND = 1 << 6, // if set, this chart should be sent upstream (streaming) + RRDSET_FLAG_UPSTREAM_IGNORE = 1 << 7, // if set, this chart should not be sent upstream (streaming) + RRDSET_FLAG_UPSTREAM_EXPOSED = 1 << 8, // if set, we have sent this chart definition to netdata master (streaming) + RRDSET_FLAG_STORE_FIRST = 1 << 9, // if set, do not eliminate the first collection during interpolation + RRDSET_FLAG_HETEROGENEOUS = 1 << 10, // if set, the chart is not homogeneous (dimensions in it have multiple algorithms, multipliers or dividers) + RRDSET_FLAG_HOMEGENEOUS_CHECK = 1 << 11, // if set, the chart should be checked to determine if the dimensions as homogeneous + RRDSET_FLAG_HIDDEN = 1 << 12, // if set, do not show this chart on the dashboard, but use it for backends + RRDSET_FLAG_SYNC_CLOCK = 1 << 13, // if set, microseconds on next data collection will be ignored (the chart will be synced to now) +} RRDSET_FLAGS; + +#ifdef HAVE_C___ATOMIC +#define rrdset_flag_check(st, flag) (__atomic_load_n(&((st)->flags), __ATOMIC_SEQ_CST) & (flag)) +#define rrdset_flag_set(st, flag) __atomic_or_fetch(&((st)->flags), flag, __ATOMIC_SEQ_CST) +#define rrdset_flag_clear(st, flag) __atomic_and_fetch(&((st)->flags), ~flag, __ATOMIC_SEQ_CST) +#else +#define rrdset_flag_check(st, flag) ((st)->flags & (flag)) +#define rrdset_flag_set(st, flag) (st)->flags |= (flag) +#define rrdset_flag_clear(st, flag) (st)->flags &= ~(flag) +#endif +#define rrdset_flag_check_noatomic(st, flag) ((st)->flags & (flag)) + +struct rrdset { + // ------------------------------------------------------------------------ + // binary indexing structures + + avl avl; // the index, with key the id - this has to be first! + avl avlname; // the index, with key the name + + // ------------------------------------------------------------------------ + // the set configuration + + char id[RRD_ID_LENGTH_MAX + 1]; // id of the data set + + const char *name; // the name of this dimension (as presented to user) + // this is a pointer to the config structure + // since the config always has a higher priority + // (the user overwrites the name of the charts) + + char *config_section; // the config section for the chart + + char *type; // the type of graph RRD_TYPE_* (a category, for determining graphing options) + char *family; // grouping sets under the same family + char *title; // title shown to user + char *units; // units of measurement + + char *context; // the template of this data set + uint32_t hash_context; // the hash of the chart's context + + RRDSET_TYPE chart_type; // line, area, stacked + + int update_every; // every how many seconds is this updated? + + long entries; // total number of entries in the data set + + long current_entry; // the entry that is currently being updated + // it goes around in a round-robin fashion + + RRDSET_FLAGS flags; // configuration flags + + int gap_when_lost_iterations_above; // after how many lost iterations a gap should be stored + // netdata will interpolate values for gaps lower than this + + long priority; // the sorting priority of this chart + + + // ------------------------------------------------------------------------ + // members for temporary data we need for calculations + + RRD_MEMORY_MODE rrd_memory_mode; // if set to 1, this is memory mapped + + char *cache_dir; // the directory to store dimensions + char cache_filename[FILENAME_MAX+1]; // the filename to store this set + + netdata_rwlock_t rrdset_rwlock; // protects dimensions linked list + + size_t counter; // the number of times we added values to this database + size_t counter_done; // the number of times rrdset_done() has been called + + time_t last_accessed_time; // the last time this RRDSET has been accessed + time_t upstream_resync_time; // the timestamp up to which we should resync clock upstream + + char *plugin_name; // the name of the plugin that generated this + char *module_name; // the name of the plugin module that generated this + + size_t unused[6]; + + uint32_t hash; // a simple hash on the id, to speed up searching + // we first compare hashes, and only if the hashes are equal we do string comparisons + + uint32_t hash_name; // a simple hash on the name + + usec_t usec_since_last_update; // the time in microseconds since the last collection of data + + struct timeval last_updated; // when this data set was last updated (updated every time the rrd_stats_done() function) + struct timeval last_collected_time; // when did this data set last collected values + + total_number collected_total; // used internally to calculate percentages + total_number last_collected_total; // used internally to calculate percentages + + RRDFAMILY *rrdfamily; // pointer to RRDFAMILY this chart belongs to + RRDHOST *rrdhost; // pointer to RRDHOST this chart belongs to + + struct rrdset *next; // linking of rrdsets + + // ------------------------------------------------------------------------ + // local variables + + calculated_number green; // green threshold for this chart + calculated_number red; // red threshold for this chart + + avl_tree_lock rrdvar_root_index; // RRDVAR index for this chart + RRDSETVAR *variables; // RRDSETVAR linked list for this chart (one RRDSETVAR, many RRDVARs) + RRDCALC *alarms; // RRDCALC linked list for this chart + + // ------------------------------------------------------------------------ + // members for checking the data when loading from disk + + unsigned long memsize; // how much mem we have allocated for this (without dimensions) + + char magic[sizeof(RRDSET_MAGIC) + 1]; // our magic + + // ------------------------------------------------------------------------ + // the dimensions + + avl_tree_lock dimensions_index; // the root of the dimensions index + RRDDIM *dimensions; // the actual data for every dimension + +}; + +#define rrdset_rdlock(st) netdata_rwlock_rdlock(&((st)->rrdset_rwlock)) +#define rrdset_wrlock(st) netdata_rwlock_wrlock(&((st)->rrdset_rwlock)) +#define rrdset_unlock(st) netdata_rwlock_unlock(&((st)->rrdset_rwlock)) + + +// ---------------------------------------------------------------------------- +// these loop macros make sure the linked list is accessed with the right lock + +#define rrdset_foreach_read(st, host) \ + for((st) = (host)->rrdset_root, rrdhost_check_rdlock(host); st ; (st) = (st)->next) + +#define rrdset_foreach_write(st, host) \ + for((st) = (host)->rrdset_root, rrdhost_check_wrlock(host); st ; (st) = (st)->next) + + +// ---------------------------------------------------------------------------- +// RRDHOST flags +// use this for configuration flags, not for state control +// flags are set/unset in a manner that is not thread safe +// and may lead to missing information. + +typedef enum rrdhost_flags { + RRDHOST_FLAG_ORPHAN = 1 << 0, // this host is orphan (not receiving data) + RRDHOST_FLAG_DELETE_OBSOLETE_CHARTS = 1 << 1, // delete files of obsolete charts + RRDHOST_FLAG_DELETE_ORPHAN_HOST = 1 << 2, // delete the entire host when orphan + RRDHOST_FLAG_BACKEND_SEND = 1 << 3, // send it to backends + RRDHOST_FLAG_BACKEND_DONT_SEND = 1 << 4, // don't send it to backends +} RRDHOST_FLAGS; + +#ifdef HAVE_C___ATOMIC +#define rrdhost_flag_check(host, flag) (__atomic_load_n(&((host)->flags), __ATOMIC_SEQ_CST) & (flag)) +#define rrdhost_flag_set(host, flag) __atomic_or_fetch(&((host)->flags), flag, __ATOMIC_SEQ_CST) +#define rrdhost_flag_clear(host, flag) __atomic_and_fetch(&((host)->flags), ~flag, __ATOMIC_SEQ_CST) +#else +#define rrdhost_flag_check(host, flag) ((host)->flags & (flag)) +#define rrdhost_flag_set(host, flag) (host)->flags |= (flag) +#define rrdhost_flag_clear(host, flag) (host)->flags &= ~(flag) +#endif + +#ifdef NETDATA_INTERNAL_CHECKS +#define rrdset_debug(st, fmt, args...) do { if(unlikely(debug_flags & D_RRD_STATS && rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) \ + debug_int(__FILE__, __FUNCTION__, __LINE__, "%s: " fmt, st->name, ##args); } while(0) +#else +#define rrdset_debug(st, fmt, args...) debug_dummy() +#endif + +// ---------------------------------------------------------------------------- +// Health data + +struct alarm_entry { + uint32_t unique_id; + uint32_t alarm_id; + uint32_t alarm_event_id; + + time_t when; + time_t duration; + time_t non_clear_duration; + + char *name; + uint32_t hash_name; + + char *chart; + uint32_t hash_chart; + + char *family; + + char *exec; + char *recipient; + time_t exec_run_timestamp; + int exec_code; + + char *source; + char *units; + char *info; + + calculated_number old_value; + calculated_number new_value; + + char *old_value_string; + char *new_value_string; + + RRDCALC_STATUS old_status; + RRDCALC_STATUS new_status; + + uint32_t flags; + + int delay; + time_t delay_up_to_timestamp; + + uint32_t updated_by_id; + uint32_t updates_id; + + struct alarm_entry *next; +}; + + +typedef struct alarm_log { + uint32_t next_log_id; + uint32_t next_alarm_id; + unsigned int count; + unsigned int max; + ALARM_ENTRY *alarms; + netdata_rwlock_t alarm_log_rwlock; +} ALARM_LOG; + + +// ---------------------------------------------------------------------------- +// RRD HOST + +struct rrdhost { + avl avl; // the index of hosts + + // ------------------------------------------------------------------------ + // host information + + char *hostname; // the hostname of this host + uint32_t hash_hostname; // the hostname hash + + char *registry_hostname; // the registry hostname for this host + + char machine_guid[GUID_LEN + 1]; // the unique ID of this host + uint32_t hash_machine_guid; // the hash of the unique ID + + const char *os; // the O/S type of the host + const char *tags; // tags for this host + const char *timezone; // the timezone of the host + + RRDHOST_FLAGS flags; // flags about this RRDHOST + + int rrd_update_every; // the update frequency of the host + long rrd_history_entries; // the number of history entries for the host's charts + RRD_MEMORY_MODE rrd_memory_mode; // the memory more for the charts of this host + + char *cache_dir; // the directory to save RRD cache files + char *varlib_dir; // the directory to save health log + + char *program_name; // the program name that collects metrics for this host + char *program_version; // the program version that collects metrics for this host + + // ------------------------------------------------------------------------ + // streaming of data to remote hosts - rrdpush + + unsigned int rrdpush_send_enabled:1; // 1 when this host sends metrics to another netdata + char *rrdpush_send_destination; // where to send metrics to + char *rrdpush_send_api_key; // the api key at the receiving netdata + + // the following are state information for the threading + // streaming metrics from this netdata to an upstream netdata + volatile unsigned int rrdpush_sender_spawn:1; // 1 when the sender thread has been spawn + netdata_thread_t rrdpush_sender_thread; // the sender thread + + volatile unsigned int rrdpush_sender_connected:1; // 1 when the sender is ready to push metrics + int rrdpush_sender_socket; // the fd of the socket to the remote host, or -1 + + volatile unsigned int rrdpush_sender_error_shown:1; // 1 when we have logged a communication error + volatile unsigned int rrdpush_sender_join:1; // 1 when we have to join the sending thread + + SIMPLE_PATTERN *rrdpush_send_charts_matching; // pattern to match the charts to be sent + + // metrics may be collected asynchronously + // these synchronize all the threads willing the write to our sending buffer + netdata_mutex_t rrdpush_sender_buffer_mutex; // exclusive access to rrdpush_sender_buffer + int rrdpush_sender_pipe[2]; // collector to sender thread signaling + BUFFER *rrdpush_sender_buffer; // collector fills it, sender sends it + + + // ------------------------------------------------------------------------ + // streaming of data from remote hosts - rrdpush + + volatile size_t connected_senders; // when remote hosts are streaming to this + // host, this is the counter of connected clients + + time_t senders_disconnected_time; // the time the last sender was disconnected + + // ------------------------------------------------------------------------ + // health monitoring options + + unsigned int health_enabled:1; // 1 when this host has health enabled + time_t health_delay_up_to; // a timestamp to delay alarms processing up to + char *health_default_exec; // the full path of the alarms notifications program + char *health_default_recipient; // the default recipient for all alarms + char *health_log_filename; // the alarms event log filename + size_t health_log_entries_written; // the number of alarm events writtern to the alarms event log + FILE *health_log_fp; // the FILE pointer to the open alarms event log file + + // all RRDCALCs are primarily allocated and linked here + // RRDCALCs may be linked to charts at any point + // (charts may or may not exist when these are loaded) + RRDCALC *alarms; + + ALARM_LOG health_log; // alarms historical events (event log) + uint32_t health_last_processed_id; // the last processed health id from the log + uint32_t health_max_unique_id; // the max alarm log unique id given for the host + uint32_t health_max_alarm_id; // the max alarm id given for the host + + // templates of alarms + // these are used to create alarms when charts + // are created or renamed, that match them + RRDCALCTEMPLATE *templates; + + + // ------------------------------------------------------------------------ + // the charts of the host + + RRDSET *rrdset_root; // the host charts + + + // ------------------------------------------------------------------------ + // locks + + netdata_rwlock_t rrdhost_rwlock; // lock for this RRDHOST (protects rrdset_root linked list) + + // ------------------------------------------------------------------------ + // indexes + + avl_tree_lock rrdset_root_index; // the host's charts index (by id) + avl_tree_lock rrdset_root_index_name; // the host's charts index (by name) + + avl_tree_lock rrdfamily_root_index; // the host's chart families index + avl_tree_lock rrdvar_root_index; // the host's chart variables index + + struct rrdhost *next; +}; +extern RRDHOST *localhost; + +#define rrdhost_rdlock(host) netdata_rwlock_rdlock(&((host)->rrdhost_rwlock)) +#define rrdhost_wrlock(host) netdata_rwlock_wrlock(&((host)->rrdhost_rwlock)) +#define rrdhost_unlock(host) netdata_rwlock_unlock(&((host)->rrdhost_rwlock)) + +// ---------------------------------------------------------------------------- +// these loop macros make sure the linked list is accessed with the right lock + +#define rrdhost_foreach_read(var) \ + for((var) = localhost, rrd_check_rdlock(); var ; (var) = (var)->next) + +#define rrdhost_foreach_write(var) \ + for((var) = localhost, rrd_check_wrlock(); var ; (var) = (var)->next) + + +// ---------------------------------------------------------------------------- +// global lock for all RRDHOSTs + +extern netdata_rwlock_t rrd_rwlock; + +#define rrd_rdlock() netdata_rwlock_rdlock(&rrd_rwlock) +#define rrd_wrlock() netdata_rwlock_wrlock(&rrd_rwlock) +#define rrd_unlock() netdata_rwlock_unlock(&rrd_rwlock) + +// ---------------------------------------------------------------------------- + +extern size_t rrd_hosts_available; +extern time_t rrdhost_free_orphan_time; + +extern void rrd_init(char *hostname); + +extern RRDHOST *rrdhost_find_by_hostname(const char *hostname, uint32_t hash); +extern RRDHOST *rrdhost_find_by_guid(const char *guid, uint32_t hash); + +extern RRDHOST *rrdhost_find_or_create( + const char *hostname + , const char *registry_hostname + , const char *guid + , const char *os + , const char *timezone + , const char *tags + , const char *program_name + , const char *program_version + , int update_every + , long history + , RRD_MEMORY_MODE mode + , unsigned int health_enabled + , unsigned int rrdpush_enabled + , char *rrdpush_destination + , char *rrdpush_api_key + , char *rrdpush_send_charts_matching +); + +#if defined(NETDATA_INTERNAL_CHECKS) && defined(NETDATA_VERIFY_LOCKS) +extern void __rrdhost_check_wrlock(RRDHOST *host, const char *file, const char *function, const unsigned long line); +extern void __rrdhost_check_rdlock(RRDHOST *host, const char *file, const char *function, const unsigned long line); +extern void __rrdset_check_rdlock(RRDSET *st, const char *file, const char *function, const unsigned long line); +extern void __rrdset_check_wrlock(RRDSET *st, const char *file, const char *function, const unsigned long line); +extern void __rrd_check_rdlock(const char *file, const char *function, const unsigned long line); +extern void __rrd_check_wrlock(const char *file, const char *function, const unsigned long line); + +#define rrdhost_check_rdlock(host) __rrdhost_check_rdlock(host, __FILE__, __FUNCTION__, __LINE__) +#define rrdhost_check_wrlock(host) __rrdhost_check_wrlock(host, __FILE__, __FUNCTION__, __LINE__) +#define rrdset_check_rdlock(st) __rrdset_check_rdlock(st, __FILE__, __FUNCTION__, __LINE__) +#define rrdset_check_wrlock(st) __rrdset_check_wrlock(st, __FILE__, __FUNCTION__, __LINE__) +#define rrd_check_rdlock() __rrd_check_rdlock(__FILE__, __FUNCTION__, __LINE__) +#define rrd_check_wrlock() __rrd_check_wrlock(__FILE__, __FUNCTION__, __LINE__) + +#else +#define rrdhost_check_rdlock(host) (void)0 +#define rrdhost_check_wrlock(host) (void)0 +#define rrdset_check_rdlock(st) (void)0 +#define rrdset_check_wrlock(st) (void)0 +#define rrd_check_rdlock() (void)0 +#define rrd_check_wrlock() (void)0 +#endif + +// ---------------------------------------------------------------------------- +// RRDSET functions + +extern int rrdset_set_name(RRDSET *st, const char *name); + +extern RRDSET *rrdset_create_custom(RRDHOST *host + , const char *type + , const char *id + , const char *name + , const char *family + , const char *context + , const char *title + , const char *units + , const char *plugin + , const char *module + , long priority + , int update_every + , RRDSET_TYPE chart_type + , RRD_MEMORY_MODE memory_mode + , long history_entries); + +#define rrdset_create(host, type, id, name, family, context, title, units, plugin, module, priority, update_every, chart_type) \ + rrdset_create_custom(host, type, id, name, family, context, title, units, plugin, module, priority, update_every, chart_type, (host)->rrd_memory_mode, (host)->rrd_history_entries) + +#define rrdset_create_localhost(type, id, name, family, context, title, units, plugin, module, priority, update_every, chart_type) \ + rrdset_create(localhost, type, id, name, family, context, title, units, plugin, module, priority, update_every, chart_type) + +extern void rrdhost_free_all(void); +extern void rrdhost_save_all(void); +extern void rrdhost_cleanup_all(void); + +extern void rrdhost_cleanup_orphan_hosts_nolock(RRDHOST *protected); +extern void rrdhost_free(RRDHOST *host); +extern void rrdhost_save_charts(RRDHOST *host); +extern void rrdhost_delete_charts(RRDHOST *host); + +extern int rrdhost_should_be_removed(RRDHOST *host, RRDHOST *protected, time_t now); + +extern void rrdset_update_heterogeneous_flag(RRDSET *st); + +extern RRDSET *rrdset_find(RRDHOST *host, const char *id); +#define rrdset_find_localhost(id) rrdset_find(localhost, id) + +extern RRDSET *rrdset_find_bytype(RRDHOST *host, const char *type, const char *id); +#define rrdset_find_bytype_localhost(type, id) rrdset_find_bytype(localhost, type, id) + +extern RRDSET *rrdset_find_byname(RRDHOST *host, const char *name); +#define rrdset_find_byname_localhost(name) rrdset_find_byname(localhost, name) + +extern void rrdset_next_usec_unfiltered(RRDSET *st, usec_t microseconds); +extern void rrdset_next_usec(RRDSET *st, usec_t microseconds); +#define rrdset_next(st) rrdset_next_usec(st, 0ULL) + +extern void rrdset_done(RRDSET *st); + +extern void rrdset_is_obsolete(RRDSET *st); +extern void rrdset_isnot_obsolete(RRDSET *st); + +// checks if the RRDSET should be offered to viewers +#define rrdset_is_available_for_viewers(st) (rrdset_flag_check(st, RRDSET_FLAG_ENABLED) && !rrdset_flag_check(st, RRDSET_FLAG_HIDDEN) && !rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE) && (st)->dimensions && (st)->rrd_memory_mode != RRD_MEMORY_MODE_NONE) +#define rrdset_is_available_for_backends(st) (rrdset_flag_check(st, RRDSET_FLAG_ENABLED) && !rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE) && (st)->dimensions) + +// get the total duration in seconds of the round robin database +#define rrdset_duration(st) ((time_t)( (((st)->counter >= ((unsigned long)(st)->entries))?(unsigned long)(st)->entries:(st)->counter) * (st)->update_every )) + +// get the timestamp of the last entry in the round robin database +#define rrdset_last_entry_t(st) ((time_t)(((st)->last_updated.tv_sec))) + +// get the timestamp of first entry in the round robin database +#define rrdset_first_entry_t(st) ((time_t)(rrdset_last_entry_t(st) - rrdset_duration(st))) + +// get the last slot updated in the round robin database +#define rrdset_last_slot(st) ((size_t)(((st)->current_entry == 0) ? (st)->entries - 1 : (st)->current_entry - 1)) + +// get the first / oldest slot updated in the round robin database +// #define rrdset_first_slot(st) ((size_t)( (((st)->counter >= ((unsigned long)(st)->entries)) ? (unsigned long)( ((unsigned long)(st)->current_entry > 0) ? ((unsigned long)(st)->current_entry) : ((unsigned long)(st)->entries) ) - 1 : 0) )) + +// return the slot that has the oldest value + +static inline size_t rrdset_first_slot(RRDSET *st) { + if(st->counter >= (size_t)st->entries) { + // the database has been rotated at least once + // the oldest entry is the one that will be next + // overwritten by data collection + return (size_t)st->current_entry; + } + + // we do not have rotated the db yet + // so 0 is the first entry + return 0; +} + +// get the slot of the round robin database, for the given timestamp (t) +// it always returns a valid slot, although may not be for the time requested if the time is outside the round robin database +static inline size_t rrdset_time2slot(RRDSET *st, time_t t) { + size_t ret = 0; + + if(t >= rrdset_last_entry_t(st)) { + // the requested time is after the last entry we have + ret = rrdset_last_slot(st); + } + else { + if(t <= rrdset_first_entry_t(st)) { + // the requested time is before the first entry we have + ret = rrdset_first_slot(st); + } + else { + if(rrdset_last_slot(st) >= ((rrdset_last_entry_t(st) - t) / (size_t)(st->update_every))) + ret = rrdset_last_slot(st) - ((rrdset_last_entry_t(st) - t) / (size_t)(st->update_every)); + else + ret = rrdset_last_slot(st) - ((rrdset_last_entry_t(st) - t) / (size_t)(st->update_every)) + (unsigned long)st->entries; + } + } + + if(unlikely(ret >= (size_t)st->entries)) { + error("INTERNAL ERROR: rrdset_time2slot() on %s returns values outside entries", st->name); + ret = (size_t)(st->entries - 1); + } + + return ret; +} + +// get the timestamp of a specific slot in the round robin database +static inline time_t rrdset_slot2time(RRDSET *st, size_t slot) { + time_t ret; + + if(slot >= (size_t)st->entries) { + error("INTERNAL ERROR: caller of rrdset_slot2time() gives invalid slot %zu", slot); + slot = (size_t)st->entries - 1; + } + + if(slot > rrdset_last_slot(st)) { + ret = rrdset_last_entry_t(st) - (size_t)st->update_every * (rrdset_last_slot(st) - slot + (size_t)st->entries); + } + else { + ret = rrdset_last_entry_t(st) - (size_t)st->update_every; + } + + if(unlikely(ret < rrdset_first_entry_t(st))) { + error("INTERNAL ERROR: rrdset_slot2time() on %s returns time too far in the past", st->name); + ret = rrdset_first_entry_t(st); + } + + if(unlikely(ret > rrdset_last_entry_t(st))) { + error("INTERNAL ERROR: rrdset_slot2time() on %s returns time into the future", st->name); + ret = rrdset_last_entry_t(st); + } + + return ret; +} + +// ---------------------------------------------------------------------------- +// RRD DIMENSION functions + +extern RRDDIM *rrddim_add_custom(RRDSET *st, const char *id, const char *name, collected_number multiplier, collected_number divisor, RRD_ALGORITHM algorithm, RRD_MEMORY_MODE memory_mode); +#define rrddim_add(st, id, name, multiplier, divisor, algorithm) rrddim_add_custom(st, id, name, multiplier, divisor, algorithm, (st)->rrd_memory_mode) + +extern int rrddim_set_name(RRDSET *st, RRDDIM *rd, const char *name); +extern int rrddim_set_algorithm(RRDSET *st, RRDDIM *rd, RRD_ALGORITHM algorithm); +extern int rrddim_set_multiplier(RRDSET *st, RRDDIM *rd, collected_number multiplier); +extern int rrddim_set_divisor(RRDSET *st, RRDDIM *rd, collected_number divisor); + +extern RRDDIM *rrddim_find(RRDSET *st, const char *id); + +extern int rrddim_hide(RRDSET *st, const char *id); +extern int rrddim_unhide(RRDSET *st, const char *id); + +extern collected_number rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, collected_number value); +extern collected_number rrddim_set(RRDSET *st, const char *id, collected_number value); + +extern long align_entries_to_pagesize(RRD_MEMORY_MODE mode, long entries); + +// ---------------------------------------------------------------------------- +// RRD internal functions + +#ifdef NETDATA_RRD_INTERNALS + +extern avl_tree_lock rrdhost_root_index; + +extern char *rrdset_strncpyz_name(char *to, const char *from, size_t length); +extern char *rrdset_cache_dir(RRDHOST *host, const char *id, const char *config_section); + +extern void rrddim_free(RRDSET *st, RRDDIM *rd); + +extern int rrddim_compare(void* a, void* b); +extern int rrdset_compare(void* a, void* b); +extern int rrdset_compare_name(void* a, void* b); +extern int rrdfamily_compare(void *a, void *b); + +extern RRDFAMILY *rrdfamily_create(RRDHOST *host, const char *id); +extern void rrdfamily_free(RRDHOST *host, RRDFAMILY *rc); + +#define rrdset_index_add(host, st) (RRDSET *)avl_insert_lock(&((host)->rrdset_root_index), (avl *)(st)) +#define rrdset_index_del(host, st) (RRDSET *)avl_remove_lock(&((host)->rrdset_root_index), (avl *)(st)) +extern RRDSET *rrdset_index_del_name(RRDHOST *host, RRDSET *st); + +extern void rrdset_free(RRDSET *st); +extern void rrdset_reset(RRDSET *st); +extern void rrdset_save(RRDSET *st); +extern void rrdset_delete(RRDSET *st); + +extern void rrdhost_cleanup_obsolete_charts(RRDHOST *host); + +#endif /* NETDATA_RRD_INTERNALS */ + + +#endif /* NETDATA_RRD_H */ diff --git a/database/rrdcalc.c b/database/rrdcalc.c new file mode 100644 index 000000000..7f6a896b6 --- /dev/null +++ b/database/rrdcalc.c @@ -0,0 +1,429 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define NETDATA_HEALTH_INTERNALS +#include "rrd.h" + +// ---------------------------------------------------------------------------- +// RRDCALC management + +inline const char *rrdcalc_status2string(RRDCALC_STATUS status) { + switch(status) { + case RRDCALC_STATUS_REMOVED: + return "REMOVED"; + + case RRDCALC_STATUS_UNDEFINED: + return "UNDEFINED"; + + case RRDCALC_STATUS_UNINITIALIZED: + return "UNINITIALIZED"; + + case RRDCALC_STATUS_CLEAR: + return "CLEAR"; + + case RRDCALC_STATUS_RAISED: + return "RAISED"; + + case RRDCALC_STATUS_WARNING: + return "WARNING"; + + case RRDCALC_STATUS_CRITICAL: + return "CRITICAL"; + + default: + error("Unknown alarm status %d", status); + return "UNKNOWN"; + } +} + +static void rrdsetcalc_link(RRDSET *st, RRDCALC *rc) { + RRDHOST *host = st->rrdhost; + + debug(D_HEALTH, "Health linking alarm '%s.%s' to chart '%s' of host '%s'", rc->chart?rc->chart:"NOCHART", rc->name, st->id, host->hostname); + + rc->last_status_change = now_realtime_sec(); + rc->rrdset = st; + + rc->rrdset_next = st->alarms; + rc->rrdset_prev = NULL; + + if(rc->rrdset_next) + rc->rrdset_next->rrdset_prev = rc; + + st->alarms = rc; + + if(rc->update_every < rc->rrdset->update_every) { + error("Health alarm '%s.%s' has update every %d, less than chart update every %d. Setting alarm update frequency to %d.", rc->rrdset->id, rc->name, rc->update_every, rc->rrdset->update_every, rc->rrdset->update_every); + rc->update_every = rc->rrdset->update_every; + } + + if(!isnan(rc->green) && isnan(st->green)) { + debug(D_HEALTH, "Health alarm '%s.%s' green threshold set from " CALCULATED_NUMBER_FORMAT_AUTO " to " CALCULATED_NUMBER_FORMAT_AUTO ".", rc->rrdset->id, rc->name, rc->rrdset->green, rc->green); + st->green = rc->green; + } + + if(!isnan(rc->red) && isnan(st->red)) { + debug(D_HEALTH, "Health alarm '%s.%s' red threshold set from " CALCULATED_NUMBER_FORMAT_AUTO " to " CALCULATED_NUMBER_FORMAT_AUTO ".", rc->rrdset->id, rc->name, rc->rrdset->red, rc->red); + st->red = rc->red; + } + + rc->local = rrdvar_create_and_index("local", &st->rrdvar_root_index, rc->name, RRDVAR_TYPE_CALCULATED, RRDVAR_OPTION_RRDCALC_LOCAL_VAR, &rc->value); + rc->family = rrdvar_create_and_index("family", &st->rrdfamily->rrdvar_root_index, rc->name, RRDVAR_TYPE_CALCULATED, RRDVAR_OPTION_RRDCALC_FAMILY_VAR, &rc->value); + + char fullname[RRDVAR_MAX_LENGTH + 1]; + snprintfz(fullname, RRDVAR_MAX_LENGTH, "%s.%s", st->id, rc->name); + rc->hostid = rrdvar_create_and_index("host", &host->rrdvar_root_index, fullname, RRDVAR_TYPE_CALCULATED, RRDVAR_OPTION_RRDCALC_HOST_CHARTID_VAR, &rc->value); + + snprintfz(fullname, RRDVAR_MAX_LENGTH, "%s.%s", st->name, rc->name); + rc->hostname = rrdvar_create_and_index("host", &host->rrdvar_root_index, fullname, RRDVAR_TYPE_CALCULATED, RRDVAR_OPTION_RRDCALC_HOST_CHARTNAME_VAR, &rc->value); + + if(rc->hostid && !rc->hostname) + rc->hostid->options |= RRDVAR_OPTION_RRDCALC_HOST_CHARTNAME_VAR; + + if(!rc->units) rc->units = strdupz(st->units); + + { + time_t now = now_realtime_sec(); + health_alarm_log( + host, + rc->id, + rc->next_event_id++, + now, + rc->name, + rc->rrdset->id, + rc->rrdset->family, + rc->exec, + rc->recipient, + now - rc->last_status_change, + rc->old_value, + rc->value, + rc->status, + RRDCALC_STATUS_UNINITIALIZED, + rc->source, + rc->units, + rc->info, + 0, + 0 + ); + } +} + +static inline int rrdcalc_is_matching_this_rrdset(RRDCALC *rc, RRDSET *st) { + if( (rc->hash_chart == st->hash && !strcmp(rc->chart, st->id)) || + (rc->hash_chart == st->hash_name && !strcmp(rc->chart, st->name))) + return 1; + + return 0; +} + +// this has to be called while the RRDHOST is locked +inline void rrdsetcalc_link_matching(RRDSET *st) { + RRDHOST *host = st->rrdhost; + // debug(D_HEALTH, "find matching alarms for chart '%s'", st->id); + + RRDCALC *rc; + for(rc = host->alarms; rc ; rc = rc->next) { + if(unlikely(rc->rrdset)) + continue; + + if(unlikely(rrdcalc_is_matching_this_rrdset(rc, st))) + rrdsetcalc_link(st, rc); + } +} + +// this has to be called while the RRDHOST is locked +inline void rrdsetcalc_unlink(RRDCALC *rc) { + RRDSET *st = rc->rrdset; + + if(!st) { + debug(D_HEALTH, "Requested to unlink RRDCALC '%s.%s' which is not linked to any RRDSET", rc->chart?rc->chart:"NOCHART", rc->name); + error("Requested to unlink RRDCALC '%s.%s' which is not linked to any RRDSET", rc->chart?rc->chart:"NOCHART", rc->name); + return; + } + + RRDHOST *host = st->rrdhost; + + { + time_t now = now_realtime_sec(); + health_alarm_log( + host, + rc->id, + rc->next_event_id++, + now, + rc->name, + rc->rrdset->id, + rc->rrdset->family, + rc->exec, + rc->recipient, + now - rc->last_status_change, + rc->old_value, + rc->value, + rc->status, + RRDCALC_STATUS_REMOVED, + rc->source, + rc->units, + rc->info, + 0, + 0 + ); + } + + debug(D_HEALTH, "Health unlinking alarm '%s.%s' from chart '%s' of host '%s'", rc->chart?rc->chart:"NOCHART", rc->name, st->id, host->hostname); + + // unlink it + if(rc->rrdset_prev) + rc->rrdset_prev->rrdset_next = rc->rrdset_next; + + if(rc->rrdset_next) + rc->rrdset_next->rrdset_prev = rc->rrdset_prev; + + if(st->alarms == rc) + st->alarms = rc->rrdset_next; + + rc->rrdset_prev = rc->rrdset_next = NULL; + + rrdvar_free(host, &st->rrdvar_root_index, rc->local); + rc->local = NULL; + + rrdvar_free(host, &st->rrdfamily->rrdvar_root_index, rc->family); + rc->family = NULL; + + rrdvar_free(host, &host->rrdvar_root_index, rc->hostid); + rc->hostid = NULL; + + rrdvar_free(host, &host->rrdvar_root_index, rc->hostname); + rc->hostname = NULL; + + rc->rrdset = NULL; + + // RRDCALC will remain in RRDHOST + // so that if the matching chart is found in the future + // it will be applied automatically +} + +RRDCALC *rrdcalc_find(RRDSET *st, const char *name) { + RRDCALC *rc; + uint32_t hash = simple_hash(name); + + for( rc = st->alarms; rc ; rc = rc->rrdset_next ) { + if(unlikely(rc->hash == hash && !strcmp(rc->name, name))) + return rc; + } + + return NULL; +} + +inline int rrdcalc_exists(RRDHOST *host, const char *chart, const char *name, uint32_t hash_chart, uint32_t hash_name) { + RRDCALC *rc; + + if(unlikely(!chart)) { + error("attempt to find RRDCALC '%s' without giving a chart name", name); + return 1; + } + + if(unlikely(!hash_chart)) hash_chart = simple_hash(chart); + if(unlikely(!hash_name)) hash_name = simple_hash(name); + + // make sure it does not already exist + for(rc = host->alarms; rc ; rc = rc->next) { + if (unlikely(rc->chart && rc->hash == hash_name && rc->hash_chart == hash_chart && !strcmp(name, rc->name) && !strcmp(chart, rc->chart))) { + debug(D_HEALTH, "Health alarm '%s.%s' already exists in host '%s'.", chart, name, host->hostname); + info("Health alarm '%s.%s' already exists in host '%s'.", chart, name, host->hostname); + return 1; + } + } + + return 0; +} + +inline uint32_t rrdcalc_get_unique_id(RRDHOST *host, const char *chart, const char *name, uint32_t *next_event_id) { + if(chart && name) { + uint32_t hash_chart = simple_hash(chart); + uint32_t hash_name = simple_hash(name); + + // re-use old IDs, by looking them up in the alarm log + ALARM_ENTRY *ae; + for(ae = host->health_log.alarms; ae ;ae = ae->next) { + if(unlikely(ae->hash_name == hash_name && ae->hash_chart == hash_chart && !strcmp(name, ae->name) && !strcmp(chart, ae->chart))) { + if(next_event_id) *next_event_id = ae->alarm_event_id + 1; + return ae->alarm_id; + } + } + } + + return host->health_log.next_alarm_id++; +} + +inline void rrdcalc_create_part2(RRDHOST *host, RRDCALC *rc) { + rrdhost_check_rdlock(host); + + if(rc->calculation) { + rc->calculation->status = &rc->status; + rc->calculation->this = &rc->value; + rc->calculation->after = &rc->db_after; + rc->calculation->before = &rc->db_before; + rc->calculation->rrdcalc = rc; + } + + if(rc->warning) { + rc->warning->status = &rc->status; + rc->warning->this = &rc->value; + rc->warning->after = &rc->db_after; + rc->warning->before = &rc->db_before; + rc->warning->rrdcalc = rc; + } + + if(rc->critical) { + rc->critical->status = &rc->status; + rc->critical->this = &rc->value; + rc->critical->after = &rc->db_after; + rc->critical->before = &rc->db_before; + rc->critical->rrdcalc = rc; + } + + // link it to the host + if(likely(host->alarms)) { + // append it + RRDCALC *t; + for(t = host->alarms; t && t->next ; t = t->next) ; + t->next = rc; + } + else { + host->alarms = rc; + } + + // link it to its chart + RRDSET *st; + rrdset_foreach_read(st, host) { + if(rrdcalc_is_matching_this_rrdset(rc, st)) { + rrdsetcalc_link(st, rc); + break; + } + } +} + +inline RRDCALC *rrdcalc_create(RRDHOST *host, RRDCALCTEMPLATE *rt, const char *chart) { + + debug(D_HEALTH, "Health creating dynamic alarm (from template) '%s.%s'", chart, rt->name); + + if(rrdcalc_exists(host, chart, rt->name, 0, 0)) + return NULL; + + RRDCALC *rc = callocz(1, sizeof(RRDCALC)); + rc->next_event_id = 1; + rc->id = rrdcalc_get_unique_id(host, chart, rt->name, &rc->next_event_id); + rc->name = strdupz(rt->name); + rc->hash = simple_hash(rc->name); + rc->chart = strdupz(chart); + rc->hash_chart = simple_hash(rc->chart); + + if(rt->dimensions) rc->dimensions = strdupz(rt->dimensions); + + rc->green = rt->green; + rc->red = rt->red; + rc->value = NAN; + rc->old_value = NAN; + + rc->delay_up_duration = rt->delay_up_duration; + rc->delay_down_duration = rt->delay_down_duration; + rc->delay_max_duration = rt->delay_max_duration; + rc->delay_multiplier = rt->delay_multiplier; + + rc->group = rt->group; + rc->after = rt->after; + rc->before = rt->before; + rc->update_every = rt->update_every; + rc->options = rt->options; + + if(rt->exec) rc->exec = strdupz(rt->exec); + if(rt->recipient) rc->recipient = strdupz(rt->recipient); + if(rt->source) rc->source = strdupz(rt->source); + if(rt->units) rc->units = strdupz(rt->units); + if(rt->info) rc->info = strdupz(rt->info); + + if(rt->calculation) { + rc->calculation = expression_parse(rt->calculation->source, NULL, NULL); + if(!rc->calculation) + error("Health alarm '%s.%s': failed to parse calculation expression '%s'", chart, rt->name, rt->calculation->source); + } + if(rt->warning) { + rc->warning = expression_parse(rt->warning->source, NULL, NULL); + if(!rc->warning) + error("Health alarm '%s.%s': failed to re-parse warning expression '%s'", chart, rt->name, rt->warning->source); + } + if(rt->critical) { + rc->critical = expression_parse(rt->critical->source, NULL, NULL); + if(!rc->critical) + error("Health alarm '%s.%s': failed to re-parse critical expression '%s'", chart, rt->name, rt->critical->source); + } + + debug(D_HEALTH, "Health runtime added alarm '%s.%s': exec '%s', recipient '%s', green " CALCULATED_NUMBER_FORMAT_AUTO ", red " CALCULATED_NUMBER_FORMAT_AUTO ", lookup: group %d, after %d, before %d, options %u, dimensions '%s', update every %d, calculation '%s', warning '%s', critical '%s', source '%s', delay up %d, delay down %d, delay max %d, delay_multiplier %f", + (rc->chart)?rc->chart:"NOCHART", + rc->name, + (rc->exec)?rc->exec:"DEFAULT", + (rc->recipient)?rc->recipient:"DEFAULT", + rc->green, + rc->red, + (int)rc->group, + rc->after, + rc->before, + rc->options, + (rc->dimensions)?rc->dimensions:"NONE", + rc->update_every, + (rc->calculation)?rc->calculation->parsed_as:"NONE", + (rc->warning)?rc->warning->parsed_as:"NONE", + (rc->critical)?rc->critical->parsed_as:"NONE", + rc->source, + rc->delay_up_duration, + rc->delay_down_duration, + rc->delay_max_duration, + rc->delay_multiplier + ); + + rrdcalc_create_part2(host, rc); + return rc; +} + +void rrdcalc_free(RRDCALC *rc) { + if(unlikely(!rc)) return; + + expression_free(rc->calculation); + expression_free(rc->warning); + expression_free(rc->critical); + + freez(rc->name); + freez(rc->chart); + freez(rc->family); + freez(rc->dimensions); + freez(rc->exec); + freez(rc->recipient); + freez(rc->source); + freez(rc->units); + freez(rc->info); + freez(rc); +} + +void rrdcalc_unlink_and_free(RRDHOST *host, RRDCALC *rc) { + if(unlikely(!rc)) return; + + debug(D_HEALTH, "Health removing alarm '%s.%s' of host '%s'", rc->chart?rc->chart:"NOCHART", rc->name, host->hostname); + + // unlink it from RRDSET + if(rc->rrdset) rrdsetcalc_unlink(rc); + + // unlink it from RRDHOST + if(unlikely(rc == host->alarms)) + host->alarms = rc->next; + + else { + RRDCALC *t; + for(t = host->alarms; t && t->next != rc; t = t->next) ; + if(t) { + t->next = rc->next; + rc->next = NULL; + } + else + error("Cannot unlink alarm '%s.%s' from host '%s': not found", rc->chart?rc->chart:"NOCHART", rc->name, host->hostname); + } + + rrdcalc_free(rc); +} diff --git a/database/rrdcalc.h b/database/rrdcalc.h new file mode 100644 index 000000000..0c7cd0aa1 --- /dev/null +++ b/database/rrdcalc.h @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "rrd.h" + +#ifndef NETDATA_RRDCALC_H +#define NETDATA_RRDCALC_H 1 + +// calculated variables (defined in health configuration) +// These aggregate time-series data at fixed intervals +// (defined in their update_every member below) +// They increase the overhead of netdata. +// +// These calculations are allocated and linked (->next) +// under RRDHOST. +// Then are also linked to RRDSET (of course only when the +// chart is found, via ->rrdset_next and ->rrdset_prev). +// This double-linked list is maintained sorted at all times +// having as RRDSET.calculations the RRDCALC to be processed +// next. + +#define RRDCALC_FLAG_DB_ERROR 0x00000001 +#define RRDCALC_FLAG_DB_NAN 0x00000002 +/* #define RRDCALC_FLAG_DB_STALE 0x00000004 */ +#define RRDCALC_FLAG_CALC_ERROR 0x00000008 +#define RRDCALC_FLAG_WARN_ERROR 0x00000010 +#define RRDCALC_FLAG_CRIT_ERROR 0x00000020 +#define RRDCALC_FLAG_RUNNABLE 0x00000040 +#define RRDCALC_FLAG_NO_CLEAR_NOTIFICATION 0x80000000 + +struct rrdcalc { + uint32_t id; // the unique id of this alarm + uint32_t next_event_id; // the next event id that will be used for this alarm + + char *name; // the name of this alarm + uint32_t hash; + + char *exec; // the command to execute when this alarm switches state + char *recipient; // the recipient of the alarm (the first parameter to exec) + + char *chart; // the chart id this should be linked to + uint32_t hash_chart; + + char *source; // the source of this alarm + char *units; // the units of the alarm + char *info; // a short description of the alarm + + int update_every; // update frequency for the alarm + + // the red and green threshold of this alarm (to be set to the chart) + calculated_number green; + calculated_number red; + + // ------------------------------------------------------------------------ + // database lookup settings + + char *dimensions; // the chart dimensions + RRDR_GROUPING group; // grouping method: average, max, etc. + int before; // ending point in time-series + int after; // starting point in time-series + uint32_t options; // calculation options + + // ------------------------------------------------------------------------ + // expressions related to the alarm + + EVAL_EXPRESSION *calculation; // expression to calculate the value of the alarm + EVAL_EXPRESSION *warning; // expression to check the warning condition + EVAL_EXPRESSION *critical; // expression to check the critical condition + + // ------------------------------------------------------------------------ + // notification delay settings + + int delay_up_duration; // duration to delay notifications when alarm raises + int delay_down_duration; // duration to delay notifications when alarm lowers + int delay_max_duration; // the absolute max delay to apply to this alarm + float delay_multiplier; // multiplier for all delays when alarms switch status + // while now < delay_up_to + + // ------------------------------------------------------------------------ + // runtime information + + RRDCALC_STATUS status; // the current status of the alarm + + calculated_number value; // the current value of the alarm + calculated_number old_value; // the previous value of the alarm + + uint32_t rrdcalc_flags; // check RRDCALC_FLAG_* + + time_t last_updated; // the last update timestamp of the alarm + time_t next_update; // the next update timestamp of the alarm + time_t last_status_change; // the timestamp of the last time this alarm changed status + + time_t db_after; // the first timestamp evaluated by the db lookup + time_t db_before; // the last timestamp evaluated by the db lookup + + time_t delay_up_to_timestamp; // the timestamp up to which we should delay notifications + int delay_up_current; // the current up notification delay duration + int delay_down_current; // the current down notification delay duration + int delay_last; // the last delay we used + + // ------------------------------------------------------------------------ + // variables this alarm exposes to the rest of the alarms + + RRDVAR *local; + RRDVAR *family; + RRDVAR *hostid; + RRDVAR *hostname; + + // ------------------------------------------------------------------------ + // the chart this alarm it is linked to + + struct rrdset *rrdset; + + // linking of this alarm on its chart + struct rrdcalc *rrdset_next; + struct rrdcalc *rrdset_prev; + + struct rrdcalc *next; +}; + +#define RRDCALC_HAS_DB_LOOKUP(rc) ((rc)->after) + +extern void rrdsetcalc_link_matching(RRDSET *st); +extern void rrdsetcalc_unlink(RRDCALC *rc); +extern RRDCALC *rrdcalc_find(RRDSET *st, const char *name); + +extern const char *rrdcalc_status2string(RRDCALC_STATUS status); + +extern void rrdcalc_free(RRDCALC *rc); +extern void rrdcalc_unlink_and_free(RRDHOST *host, RRDCALC *rc); + +extern int rrdcalc_exists(RRDHOST *host, const char *chart, const char *name, uint32_t hash_chart, uint32_t hash_name); +extern uint32_t rrdcalc_get_unique_id(RRDHOST *host, const char *chart, const char *name, uint32_t *next_event_id); +extern RRDCALC *rrdcalc_create(RRDHOST *host, RRDCALCTEMPLATE *rt, const char *chart); +extern void rrdcalc_create_part2(RRDHOST *host, RRDCALC *rc); + +#endif //NETDATA_RRDCALC_H diff --git a/database/rrdcalctemplate.c b/database/rrdcalctemplate.c new file mode 100644 index 000000000..ba7e7ec94 --- /dev/null +++ b/database/rrdcalctemplate.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define NETDATA_HEALTH_INTERNALS +#include "rrd.h" + +// ---------------------------------------------------------------------------- +// RRDCALCTEMPLATE management + +void rrdcalctemplate_link_matching(RRDSET *st) { + RRDHOST *host = st->rrdhost; + RRDCALCTEMPLATE *rt; + + for(rt = host->templates; rt ; rt = rt->next) { + if(rt->hash_context == st->hash_context && !strcmp(rt->context, st->context) + && (!rt->family_pattern || simple_pattern_matches(rt->family_pattern, st->family))) { + RRDCALC *rc = rrdcalc_create(host, rt, st->id); + if(unlikely(!rc)) + info("Health tried to create alarm from template '%s' on chart '%s' of host '%s', but it failed", rt->name, st->id, host->hostname); + +#ifdef NETDATA_INTERNAL_CHECKS + else if(rc->rrdset != st) + error("Health alarm '%s.%s' should be linked to chart '%s', but it is not", rc->chart?rc->chart:"NOCHART", rc->name, st->id); +#endif + } + } +} + +inline void rrdcalctemplate_free(RRDCALCTEMPLATE *rt) { + if(unlikely(!rt)) return; + + expression_free(rt->calculation); + expression_free(rt->warning); + expression_free(rt->critical); + + freez(rt->family_match); + simple_pattern_free(rt->family_pattern); + + freez(rt->name); + freez(rt->exec); + freez(rt->recipient); + freez(rt->context); + freez(rt->source); + freez(rt->units); + freez(rt->info); + freez(rt->dimensions); + freez(rt); +} + +inline void rrdcalctemplate_unlink_and_free(RRDHOST *host, RRDCALCTEMPLATE *rt) { + if(unlikely(!rt)) return; + + debug(D_HEALTH, "Health removing template '%s' of host '%s'", rt->name, host->hostname); + + if(host->templates == rt) { + host->templates = rt->next; + } + else { + RRDCALCTEMPLATE *t; + for (t = host->templates; t && t->next != rt; t = t->next ) ; + if(t) { + t->next = rt->next; + rt->next = NULL; + } + else + error("Cannot find RRDCALCTEMPLATE '%s' linked in host '%s'", rt->name, host->hostname); + } + + rrdcalctemplate_free(rt); +} + + diff --git a/database/rrdcalctemplate.h b/database/rrdcalctemplate.h new file mode 100644 index 000000000..2235ecaa1 --- /dev/null +++ b/database/rrdcalctemplate.h @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_RRDCALCTEMPLATE_H +#define NETDATA_RRDCALCTEMPLATE_H 1 + +#include "rrd.h" + +// RRDCALCTEMPLATE +// these are to be applied to charts found dynamically +// based on their context. +struct rrdcalctemplate { + char *name; + uint32_t hash_name; + + char *exec; + char *recipient; + + char *context; + uint32_t hash_context; + + char *family_match; + SIMPLE_PATTERN *family_pattern; + + char *source; // the source of this alarm + char *units; // the units of the alarm + char *info; // a short description of the alarm + + int update_every; // update frequency for the alarm + + // the red and green threshold of this alarm (to be set to the chart) + calculated_number green; + calculated_number red; + + // ------------------------------------------------------------------------ + // database lookup settings + + char *dimensions; // the chart dimensions + RRDR_GROUPING group; // grouping method: average, max, etc. + int before; // ending point in time-series + int after; // starting point in time-series + uint32_t options; // calculation options + + // ------------------------------------------------------------------------ + // notification delay settings + + int delay_up_duration; // duration to delay notifications when alarm raises + int delay_down_duration; // duration to delay notifications when alarm lowers + int delay_max_duration; // the absolute max delay to apply to this alarm + float delay_multiplier; // multiplier for all delays when alarms switch status + + // ------------------------------------------------------------------------ + // expressions related to the alarm + + EVAL_EXPRESSION *calculation; + EVAL_EXPRESSION *warning; + EVAL_EXPRESSION *critical; + + struct rrdcalctemplate *next; +}; + +#define RRDCALCTEMPLATE_HAS_CALCULATION(rt) ((rt)->after) + +extern void rrdcalctemplate_link_matching(RRDSET *st); + +extern void rrdcalctemplate_free(RRDCALCTEMPLATE *rt); +extern void rrdcalctemplate_unlink_and_free(RRDHOST *host, RRDCALCTEMPLATE *rt); + +#endif //NETDATA_RRDCALCTEMPLATE_H diff --git a/database/rrddim.c b/database/rrddim.c new file mode 100644 index 000000000..95e485106 --- /dev/null +++ b/database/rrddim.c @@ -0,0 +1,397 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define NETDATA_RRD_INTERNALS +#include "rrd.h" + +// ---------------------------------------------------------------------------- +// RRDDIM index + +int rrddim_compare(void* a, void* b) { + if(((RRDDIM *)a)->hash < ((RRDDIM *)b)->hash) return -1; + else if(((RRDDIM *)a)->hash > ((RRDDIM *)b)->hash) return 1; + else return strcmp(((RRDDIM *)a)->id, ((RRDDIM *)b)->id); +} + +#define rrddim_index_add(st, rd) (RRDDIM *)avl_insert_lock(&((st)->dimensions_index), (avl *)(rd)) +#define rrddim_index_del(st,rd ) (RRDDIM *)avl_remove_lock(&((st)->dimensions_index), (avl *)(rd)) + +static inline RRDDIM *rrddim_index_find(RRDSET *st, const char *id, uint32_t hash) { + RRDDIM tmp = { + .id = id, + .hash = (hash)?hash:simple_hash(id) + }; + return (RRDDIM *)avl_search_lock(&(st->dimensions_index), (avl *) &tmp); +} + + +// ---------------------------------------------------------------------------- +// RRDDIM - find a dimension + +inline RRDDIM *rrddim_find(RRDSET *st, const char *id) { + debug(D_RRD_CALLS, "rrddim_find() for chart %s, dimension %s", st->name, id); + + return rrddim_index_find(st, id, 0); +} + + +// ---------------------------------------------------------------------------- +// RRDDIM rename a dimension + +inline int rrddim_set_name(RRDSET *st, RRDDIM *rd, const char *name) { + if(unlikely(!name || !*name || !strcmp(rd->name, name))) + return 0; + + debug(D_RRD_CALLS, "rrddim_set_name() from %s.%s to %s.%s", st->name, rd->name, st->name, name); + + char varname[CONFIG_MAX_NAME + 1]; + snprintfz(varname, CONFIG_MAX_NAME, "dim %s name", rd->id); + rd->name = config_set_default(st->config_section, varname, name); + rd->hash_name = simple_hash(rd->name); + rrddimvar_rename_all(rd); + rd->exposed = 0; + rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); + return 1; +} + +inline int rrddim_set_algorithm(RRDSET *st, RRDDIM *rd, RRD_ALGORITHM algorithm) { + if(unlikely(rd->algorithm == algorithm)) + return 0; + + debug(D_RRD_CALLS, "Updating algorithm of dimension '%s/%s' from %s to %s", st->id, rd->name, rrd_algorithm_name(rd->algorithm), rrd_algorithm_name(algorithm)); + rd->algorithm = algorithm; + rd->exposed = 0; + rrdset_flag_set(st, RRDSET_FLAG_HOMEGENEOUS_CHECK); + rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); + return 1; +} + +inline int rrddim_set_multiplier(RRDSET *st, RRDDIM *rd, collected_number multiplier) { + if(unlikely(rd->multiplier == multiplier)) + return 0; + + debug(D_RRD_CALLS, "Updating multiplier of dimension '%s/%s' from " COLLECTED_NUMBER_FORMAT " to " COLLECTED_NUMBER_FORMAT, st->id, rd->name, rd->multiplier, multiplier); + rd->multiplier = multiplier; + rd->exposed = 0; + rrdset_flag_set(st, RRDSET_FLAG_HOMEGENEOUS_CHECK); + rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); + return 1; +} + +inline int rrddim_set_divisor(RRDSET *st, RRDDIM *rd, collected_number divisor) { + if(unlikely(rd->divisor == divisor)) + return 0; + + debug(D_RRD_CALLS, "Updating divisor of dimension '%s/%s' from " COLLECTED_NUMBER_FORMAT " to " COLLECTED_NUMBER_FORMAT, st->id, rd->name, rd->divisor, divisor); + rd->divisor = divisor; + rd->exposed = 0; + rrdset_flag_set(st, RRDSET_FLAG_HOMEGENEOUS_CHECK); + rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); + return 1; +} + +// ---------------------------------------------------------------------------- +// RRDDIM create a dimension + +RRDDIM *rrddim_add_custom(RRDSET *st, const char *id, const char *name, collected_number multiplier, collected_number divisor, RRD_ALGORITHM algorithm, RRD_MEMORY_MODE memory_mode) { + rrdset_wrlock(st); + + rrdset_flag_set(st, RRDSET_FLAG_SYNC_CLOCK); + rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); + + RRDDIM *rd = rrddim_find(st, id); + if(unlikely(rd)) { + debug(D_RRD_CALLS, "Cannot create rrd dimension '%s/%s', it already exists.", st->id, name?name:"<NONAME>"); + + rrddim_set_name(st, rd, name); + rrddim_set_algorithm(st, rd, algorithm); + rrddim_set_multiplier(st, rd, multiplier); + rrddim_set_divisor(st, rd, divisor); + + rrdset_unlock(st); + return rd; + } + + RRDHOST *host = st->rrdhost; + char filename[FILENAME_MAX + 1]; + char fullfilename[FILENAME_MAX + 1]; + + char varname[CONFIG_MAX_NAME + 1]; + unsigned long size = sizeof(RRDDIM) + (st->entries * sizeof(storage_number)); + + debug(D_RRD_CALLS, "Adding dimension '%s/%s'.", st->id, id); + + rrdset_strncpyz_name(filename, id, FILENAME_MAX); + snprintfz(fullfilename, FILENAME_MAX, "%s/%s.db", st->cache_dir, filename); + + if(memory_mode == RRD_MEMORY_MODE_SAVE || memory_mode == RRD_MEMORY_MODE_MAP || memory_mode == RRD_MEMORY_MODE_RAM) { + rd = (RRDDIM *)mymmap( + (memory_mode == RRD_MEMORY_MODE_RAM)?NULL:fullfilename + , size + , ((memory_mode == RRD_MEMORY_MODE_MAP) ? MAP_SHARED : MAP_PRIVATE) + , 1 + ); + + if(likely(rd)) { + // we have a file mapped for rd + + memset(&rd->avl, 0, sizeof(avl)); + rd->id = NULL; + rd->name = NULL; + rd->cache_filename = NULL; + rd->variables = NULL; + rd->next = NULL; + rd->rrdset = NULL; + rd->exposed = 0; + + struct timeval now; + now_realtime_timeval(&now); + + if(memory_mode == RRD_MEMORY_MODE_RAM) { + memset(rd, 0, size); + } + else { + int reset = 0; + + if(strcmp(rd->magic, RRDDIMENSION_MAGIC) != 0) { + info("Initializing file %s.", fullfilename); + memset(rd, 0, size); + reset = 1; + } + else if(rd->memsize != size) { + error("File %s does not have the desired size, expected %lu but found %lu. Clearing it.", fullfilename, size, rd->memsize); + memset(rd, 0, size); + reset = 1; + } + else if(rd->update_every != st->update_every) { + error("File %s does not have the same update frequency, expected %d but found %d. Clearing it.", fullfilename, st->update_every, rd->update_every); + memset(rd, 0, size); + reset = 1; + } + else if(dt_usec(&now, &rd->last_collected_time) > (rd->entries * rd->update_every * USEC_PER_SEC)) { + info("File %s is too old (last collected %llu seconds ago, but the database is %ld seconds). Clearing it.", fullfilename, dt_usec(&now, &rd->last_collected_time) / USEC_PER_SEC, rd->entries * rd->update_every); + memset(rd, 0, size); + reset = 1; + } + + if(!reset) { + if(rd->algorithm != algorithm) { + info("File %s does not have the expected algorithm (expected %u '%s', found %u '%s'). Previous values may be wrong.", + fullfilename, algorithm, rrd_algorithm_name(algorithm), rd->algorithm, rrd_algorithm_name(rd->algorithm)); + } + + if(rd->multiplier != multiplier) { + info("File %s does not have the expected multiplier (expected " COLLECTED_NUMBER_FORMAT ", found " COLLECTED_NUMBER_FORMAT "). Previous values may be wrong.", fullfilename, multiplier, rd->multiplier); + } + + if(rd->divisor != divisor) { + info("File %s does not have the expected divisor (expected " COLLECTED_NUMBER_FORMAT ", found " COLLECTED_NUMBER_FORMAT "). Previous values may be wrong.", fullfilename, divisor, rd->divisor); + } + } + } + + // make sure we have the right memory mode + // even if we cleared the memory + rd->rrd_memory_mode = memory_mode; + } + } + + if(unlikely(!rd)) { + // if we didn't manage to get a mmap'd dimension, just create one + rd = callocz(1, size); + rd->rrd_memory_mode = (memory_mode == RRD_MEMORY_MODE_NONE) ? RRD_MEMORY_MODE_NONE : RRD_MEMORY_MODE_ALLOC; + } + + rd->memsize = size; + + strcpy(rd->magic, RRDDIMENSION_MAGIC); + + rd->id = strdupz(id); + rd->hash = simple_hash(rd->id); + + rd->cache_filename = strdupz(fullfilename); + + snprintfz(varname, CONFIG_MAX_NAME, "dim %s name", rd->id); + rd->name = config_get(st->config_section, varname, (name && *name)?name:rd->id); + rd->hash_name = simple_hash(rd->name); + + snprintfz(varname, CONFIG_MAX_NAME, "dim %s algorithm", rd->id); + rd->algorithm = rrd_algorithm_id(config_get(st->config_section, varname, rrd_algorithm_name(algorithm))); + + snprintfz(varname, CONFIG_MAX_NAME, "dim %s multiplier", rd->id); + rd->multiplier = config_get_number(st->config_section, varname, multiplier); + + snprintfz(varname, CONFIG_MAX_NAME, "dim %s divisor", rd->id); + rd->divisor = config_get_number(st->config_section, varname, divisor); + if(!rd->divisor) rd->divisor = 1; + + rd->entries = st->entries; + rd->update_every = st->update_every; + + if(rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST)) + rd->collections_counter = 1; + else + rd->collections_counter = 0; + + rd->updated = 0; + rd->flags = 0x00000000; + + rd->calculated_value = 0; + rd->last_calculated_value = 0; + rd->collected_value = 0; + rd->last_collected_value = 0; + rd->collected_volume = 0; + rd->stored_volume = 0; + rd->last_stored_value = 0; + rd->values[st->current_entry] = SN_EMPTY_SLOT; // pack_storage_number(0, SN_NOT_EXISTS); + rd->last_collected_time.tv_sec = 0; + rd->last_collected_time.tv_usec = 0; + rd->rrdset = st; + + // append this dimension + if(!st->dimensions) + st->dimensions = rd; + else { + RRDDIM *td = st->dimensions; + + if(td->algorithm != rd->algorithm || abs(td->multiplier) != abs(rd->multiplier) || abs(td->divisor) != abs(rd->divisor)) { + if(!rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS)) { + #ifdef NETDATA_INTERNAL_CHECKS + info("Dimension '%s' added on chart '%s' of host '%s' is not homogeneous to other dimensions already present (algorithm is '%s' vs '%s', multiplier is " COLLECTED_NUMBER_FORMAT " vs " COLLECTED_NUMBER_FORMAT ", divisor is " COLLECTED_NUMBER_FORMAT " vs " COLLECTED_NUMBER_FORMAT ").", + rd->name, + st->name, + host->hostname, + rrd_algorithm_name(rd->algorithm), rrd_algorithm_name(td->algorithm), + rd->multiplier, td->multiplier, + rd->divisor, td->divisor + ); + #endif + rrdset_flag_set(st, RRDSET_FLAG_HETEROGENEOUS); + } + } + + for(; td->next; td = td->next) ; + td->next = rd; + } + + if(host->health_enabled) { + rrddimvar_create(rd, RRDVAR_TYPE_CALCULATED, NULL, NULL, &rd->last_stored_value, RRDVAR_OPTION_DEFAULT); + rrddimvar_create(rd, RRDVAR_TYPE_COLLECTED, NULL, "_raw", &rd->last_collected_value, RRDVAR_OPTION_DEFAULT); + rrddimvar_create(rd, RRDVAR_TYPE_TIME_T, NULL, "_last_collected_t", &rd->last_collected_time.tv_sec, RRDVAR_OPTION_DEFAULT); + } + + if(unlikely(rrddim_index_add(st, rd) != rd)) + error("RRDDIM: INTERNAL ERROR: attempt to index duplicate dimension '%s' on chart '%s'", rd->id, st->id); + + rrdset_unlock(st); + return(rd); +} + +// ---------------------------------------------------------------------------- +// RRDDIM remove / free a dimension + +void rrddim_free(RRDSET *st, RRDDIM *rd) +{ + debug(D_RRD_CALLS, "rrddim_free() %s.%s", st->name, rd->name); + + if(rd == st->dimensions) + st->dimensions = rd->next; + else { + RRDDIM *i; + for (i = st->dimensions; i && i->next != rd; i = i->next) ; + + if (i && i->next == rd) + i->next = rd->next; + else + error("Request to free dimension '%s.%s' but it is not linked.", st->id, rd->name); + } + rd->next = NULL; + + while(rd->variables) + rrddimvar_free(rd->variables); + + if(unlikely(rrddim_index_del(st, rd) != rd)) + error("RRDDIM: INTERNAL ERROR: attempt to remove from index dimension '%s' on chart '%s', removed a different dimension.", rd->id, st->id); + + // free(rd->annotations); + + switch(rd->rrd_memory_mode) { + case RRD_MEMORY_MODE_SAVE: + case RRD_MEMORY_MODE_MAP: + case RRD_MEMORY_MODE_RAM: + debug(D_RRD_CALLS, "Unmapping dimension '%s'.", rd->name); + freez((void *)rd->id); + freez(rd->cache_filename); + munmap(rd, rd->memsize); + break; + + case RRD_MEMORY_MODE_ALLOC: + case RRD_MEMORY_MODE_NONE: + debug(D_RRD_CALLS, "Removing dimension '%s'.", rd->name); + freez((void *)rd->id); + freez(rd->cache_filename); + freez(rd); + break; + } +} + + +// ---------------------------------------------------------------------------- +// RRDDIM - set dimension options + +int rrddim_hide(RRDSET *st, const char *id) { + debug(D_RRD_CALLS, "rrddim_hide() for chart %s, dimension %s", st->name, id); + + RRDHOST *host = st->rrdhost; + + RRDDIM *rd = rrddim_find(st, id); + if(unlikely(!rd)) { + error("Cannot find dimension with id '%s' on stats '%s' (%s) on host '%s'.", id, st->name, st->id, host->hostname); + return 1; + } + + rrddim_flag_set(rd, RRDDIM_FLAG_HIDDEN); + return 0; +} + +int rrddim_unhide(RRDSET *st, const char *id) { + debug(D_RRD_CALLS, "rrddim_unhide() for chart %s, dimension %s", st->name, id); + + RRDHOST *host = st->rrdhost; + RRDDIM *rd = rrddim_find(st, id); + if(unlikely(!rd)) { + error("Cannot find dimension with id '%s' on stats '%s' (%s) on host '%s'.", id, st->name, st->id, host->hostname); + return 1; + } + + rrddim_flag_clear(rd, RRDDIM_FLAG_HIDDEN); + return 0; +} + + +// ---------------------------------------------------------------------------- +// RRDDIM - collect values for a dimension + +inline collected_number rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, collected_number value) { + debug(D_RRD_CALLS, "rrddim_set_by_pointer() for chart %s, dimension %s, value " COLLECTED_NUMBER_FORMAT, st->name, rd->name, value); + + now_realtime_timeval(&rd->last_collected_time); + rd->collected_value = value; + rd->updated = 1; + + rd->collections_counter++; + + // fprintf(stderr, "%s.%s %llu " COLLECTED_NUMBER_FORMAT " dt %0.6f" " rate " CALCULATED_NUMBER_FORMAT "\n", st->name, rd->name, st->usec_since_last_update, value, (float)((double)st->usec_since_last_update / (double)1000000), (calculated_number)((value - rd->last_collected_value) * (calculated_number)rd->multiplier / (calculated_number)rd->divisor * 1000000.0 / (calculated_number)st->usec_since_last_update)); + + return rd->last_collected_value; +} + +collected_number rrddim_set(RRDSET *st, const char *id, collected_number value) { + RRDHOST *host = st->rrdhost; + RRDDIM *rd = rrddim_find(st, id); + if(unlikely(!rd)) { + error("Cannot find dimension with id '%s' on stats '%s' (%s) on host '%s'.", id, st->name, st->id, host->hostname); + return 0; + } + + return rrddim_set_by_pointer(st, rd, value); +} diff --git a/database/rrddimvar.c b/database/rrddimvar.c new file mode 100644 index 000000000..3c2ed75e5 --- /dev/null +++ b/database/rrddimvar.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define NETDATA_HEALTH_INTERNALS +#include "rrd.h" + +// ---------------------------------------------------------------------------- +// RRDDIMVAR management +// DIMENSION VARIABLES + +#define RRDDIMVAR_ID_MAX 1024 + +static inline void rrddimvar_free_variables(RRDDIMVAR *rs) { + RRDDIM *rd = rs->rrddim; + RRDSET *st = rd->rrdset; + RRDHOST *host = st->rrdhost; + + // CHART VARIABLES FOR THIS DIMENSION + + rrdvar_free(host, &st->rrdvar_root_index, rs->var_local_id); + rs->var_local_id = NULL; + + rrdvar_free(host, &st->rrdvar_root_index, rs->var_local_name); + rs->var_local_name = NULL; + + // FAMILY VARIABLES FOR THIS DIMENSION + + rrdvar_free(host, &st->rrdfamily->rrdvar_root_index, rs->var_family_id); + rs->var_family_id = NULL; + + rrdvar_free(host, &st->rrdfamily->rrdvar_root_index, rs->var_family_name); + rs->var_family_name = NULL; + + rrdvar_free(host, &st->rrdfamily->rrdvar_root_index, rs->var_family_contextid); + rs->var_family_contextid = NULL; + + rrdvar_free(host, &st->rrdfamily->rrdvar_root_index, rs->var_family_contextname); + rs->var_family_contextname = NULL; + + // HOST VARIABLES FOR THIS DIMENSION + + rrdvar_free(host, &host->rrdvar_root_index, rs->var_host_chartidid); + rs->var_host_chartidid = NULL; + + rrdvar_free(host, &host->rrdvar_root_index, rs->var_host_chartidname); + rs->var_host_chartidname = NULL; + + rrdvar_free(host, &host->rrdvar_root_index, rs->var_host_chartnameid); + rs->var_host_chartnameid = NULL; + + rrdvar_free(host, &host->rrdvar_root_index, rs->var_host_chartnamename); + rs->var_host_chartnamename = NULL; + + // KEYS + + freez(rs->key_id); + rs->key_id = NULL; + + freez(rs->key_name); + rs->key_name = NULL; + + freez(rs->key_fullidid); + rs->key_fullidid = NULL; + + freez(rs->key_fullidname); + rs->key_fullidname = NULL; + + freez(rs->key_contextid); + rs->key_contextid = NULL; + + freez(rs->key_contextname); + rs->key_contextname = NULL; + + freez(rs->key_fullnameid); + rs->key_fullnameid = NULL; + + freez(rs->key_fullnamename); + rs->key_fullnamename = NULL; +} + +static inline void rrddimvar_create_variables(RRDDIMVAR *rs) { + rrddimvar_free_variables(rs); + + RRDDIM *rd = rs->rrddim; + RRDSET *st = rd->rrdset; + RRDHOST *host = st->rrdhost; + + char buffer[RRDDIMVAR_ID_MAX + 1]; + + // KEYS + + snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s%s%s", rs->prefix, rd->id, rs->suffix); + rs->key_id = strdupz(buffer); + + snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s%s%s", rs->prefix, rd->name, rs->suffix); + rs->key_name = strdupz(buffer); + + snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s.%s", st->id, rs->key_id); + rs->key_fullidid = strdupz(buffer); + + snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s.%s", st->id, rs->key_name); + rs->key_fullidname = strdupz(buffer); + + snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s.%s", st->context, rs->key_id); + rs->key_contextid = strdupz(buffer); + + snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s.%s", st->context, rs->key_name); + rs->key_contextname = strdupz(buffer); + + snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s.%s", st->name, rs->key_id); + rs->key_fullnameid = strdupz(buffer); + + snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s.%s", st->name, rs->key_name); + rs->key_fullnamename = strdupz(buffer); + + // CHART VARIABLES FOR THIS DIMENSION + // ----------------------------------- + // + // dimensions are available as: + // - $id + // - $name + + rs->var_local_id = rrdvar_create_and_index("local", &st->rrdvar_root_index, rs->key_id, rs->type, RRDVAR_OPTION_DEFAULT, rs->value); + rs->var_local_name = rrdvar_create_and_index("local", &st->rrdvar_root_index, rs->key_name, rs->type, RRDVAR_OPTION_DEFAULT, rs->value); + + // FAMILY VARIABLES FOR THIS DIMENSION + // ----------------------------------- + // + // dimensions are available as: + // - $id (only the first, when multiple overlap) + // - $name (only the first, when multiple overlap) + // - $chart-context.id + // - $chart-context.name + + rs->var_family_id = rrdvar_create_and_index("family", &st->rrdfamily->rrdvar_root_index, rs->key_id, rs->type, RRDVAR_OPTION_DEFAULT, rs->value); + rs->var_family_name = rrdvar_create_and_index("family", &st->rrdfamily->rrdvar_root_index, rs->key_name, rs->type, RRDVAR_OPTION_DEFAULT, rs->value); + rs->var_family_contextid = rrdvar_create_and_index("family", &st->rrdfamily->rrdvar_root_index, rs->key_contextid, rs->type, RRDVAR_OPTION_DEFAULT, rs->value); + rs->var_family_contextname = rrdvar_create_and_index("family", &st->rrdfamily->rrdvar_root_index, rs->key_contextname, rs->type, RRDVAR_OPTION_DEFAULT, rs->value); + + // HOST VARIABLES FOR THIS DIMENSION + // ----------------------------------- + // + // dimensions are available as: + // - $chart-id.id + // - $chart-id.name + // - $chart-name.id + // - $chart-name.name + + rs->var_host_chartidid = rrdvar_create_and_index("host", &host->rrdvar_root_index, rs->key_fullidid, rs->type, RRDVAR_OPTION_DEFAULT, rs->value); + rs->var_host_chartidname = rrdvar_create_and_index("host", &host->rrdvar_root_index, rs->key_fullidname, rs->type, RRDVAR_OPTION_DEFAULT, rs->value); + rs->var_host_chartnameid = rrdvar_create_and_index("host", &host->rrdvar_root_index, rs->key_fullnameid, rs->type, RRDVAR_OPTION_DEFAULT, rs->value); + rs->var_host_chartnamename = rrdvar_create_and_index("host", &host->rrdvar_root_index, rs->key_fullnamename, rs->type, RRDVAR_OPTION_DEFAULT, rs->value); +} + +RRDDIMVAR *rrddimvar_create(RRDDIM *rd, RRDVAR_TYPE type, const char *prefix, const char *suffix, void *value, RRDVAR_OPTIONS options) { + RRDSET *st = rd->rrdset; + (void)st; + + debug(D_VARIABLES, "RRDDIMSET create for chart id '%s' name '%s', dimension id '%s', name '%s%s%s'", st->id, st->name, rd->id, (prefix)?prefix:"", rd->name, (suffix)?suffix:""); + + if(!prefix) prefix = ""; + if(!suffix) suffix = ""; + + RRDDIMVAR *rs = (RRDDIMVAR *)callocz(1, sizeof(RRDDIMVAR)); + + rs->prefix = strdupz(prefix); + rs->suffix = strdupz(suffix); + + rs->type = type; + rs->value = value; + rs->options = options; + rs->rrddim = rd; + + rs->next = rd->variables; + rd->variables = rs; + + rrddimvar_create_variables(rs); + + return rs; +} + +void rrddimvar_rename_all(RRDDIM *rd) { + RRDSET *st = rd->rrdset; + (void)st; + + debug(D_VARIABLES, "RRDDIMSET rename for chart id '%s' name '%s', dimension id '%s', name '%s'", st->id, st->name, rd->id, rd->name); + + RRDDIMVAR *rs, *next = rd->variables; + while((rs = next)) { + next = rs->next; + rrddimvar_create_variables(rs); + } +} + +void rrddimvar_free(RRDDIMVAR *rs) { + RRDDIM *rd = rs->rrddim; + RRDSET *st = rd->rrdset; + debug(D_VARIABLES, "RRDDIMSET free for chart id '%s' name '%s', dimension id '%s', name '%s', prefix='%s', suffix='%s'", st->id, st->name, rd->id, rd->name, rs->prefix, rs->suffix); + + rrddimvar_free_variables(rs); + + if(rd->variables == rs) { + debug(D_VARIABLES, "RRDDIMSET removing first entry for chart id '%s' name '%s', dimension id '%s', name '%s'", st->id, st->name, rd->id, rd->name); + rd->variables = rs->next; + } + else { + debug(D_VARIABLES, "RRDDIMSET removing non-first entry for chart id '%s' name '%s', dimension id '%s', name '%s'", st->id, st->name, rd->id, rd->name); + RRDDIMVAR *t; + for (t = rd->variables; t && t->next != rs; t = t->next) ; + if(!t) error("RRDDIMVAR '%s' not found in dimension '%s/%s' variables linked list", rs->key_name, st->id, rd->id); + else t->next = rs->next; + } + + freez(rs->prefix); + freez(rs->suffix); + freez(rs); +} + diff --git a/database/rrddimvar.h b/database/rrddimvar.h new file mode 100644 index 000000000..3494824be --- /dev/null +++ b/database/rrddimvar.h @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_RRDDIMVAR_H +#define NETDATA_RRDDIMVAR_H 1 + +#include "rrd.h" + +// variables linked to individual dimensions +// We link variables to point the values that are already +// calculated / processed by the normal data collection process +// This means, there will be no speed penalty for using +// these variables +struct rrddimvar { + char *prefix; + char *suffix; + + char *key_id; // dimension id + char *key_name; // dimension name + char *key_contextid; // context + dimension id + char *key_contextname; // context + dimension name + char *key_fullidid; // chart type.chart id + dimension id + char *key_fullidname; // chart type.chart id + dimension name + char *key_fullnameid; // chart type.chart name + dimension id + char *key_fullnamename; // chart type.chart name + dimension name + + RRDVAR_TYPE type; + void *value; + + RRDVAR_OPTIONS options; + + RRDVAR *var_local_id; + RRDVAR *var_local_name; + + RRDVAR *var_family_id; + RRDVAR *var_family_name; + RRDVAR *var_family_contextid; + RRDVAR *var_family_contextname; + + RRDVAR *var_host_chartidid; + RRDVAR *var_host_chartidname; + RRDVAR *var_host_chartnameid; + RRDVAR *var_host_chartnamename; + + struct rrddim *rrddim; + + struct rrddimvar *next; +}; + + +extern void rrddimvar_rename_all(RRDDIM *rd); +extern RRDDIMVAR *rrddimvar_create(RRDDIM *rd, RRDVAR_TYPE type, const char *prefix, const char *suffix, void *value, RRDVAR_OPTIONS options); +extern void rrddimvar_free(RRDDIMVAR *rs); + + + +#endif //NETDATA_RRDDIMVAR_H diff --git a/database/rrdfamily.c b/database/rrdfamily.c new file mode 100644 index 000000000..f75f0adc3 --- /dev/null +++ b/database/rrdfamily.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define NETDATA_RRD_INTERNALS +#include "rrd.h" + +// ---------------------------------------------------------------------------- +// RRDFAMILY index + +int rrdfamily_compare(void *a, void *b) { + if(((RRDFAMILY *)a)->hash_family < ((RRDFAMILY *)b)->hash_family) return -1; + else if(((RRDFAMILY *)a)->hash_family > ((RRDFAMILY *)b)->hash_family) return 1; + else return strcmp(((RRDFAMILY *)a)->family, ((RRDFAMILY *)b)->family); +} + +#define rrdfamily_index_add(host, rc) (RRDFAMILY *)avl_insert_lock(&((host)->rrdfamily_root_index), (avl *)(rc)) +#define rrdfamily_index_del(host, rc) (RRDFAMILY *)avl_remove_lock(&((host)->rrdfamily_root_index), (avl *)(rc)) + +static RRDFAMILY *rrdfamily_index_find(RRDHOST *host, const char *id, uint32_t hash) { + RRDFAMILY tmp; + tmp.family = id; + tmp.hash_family = (hash)?hash:simple_hash(tmp.family); + + return (RRDFAMILY *)avl_search_lock(&(host->rrdfamily_root_index), (avl *) &tmp); +} + +RRDFAMILY *rrdfamily_create(RRDHOST *host, const char *id) { + RRDFAMILY *rc = rrdfamily_index_find(host, id, 0); + if(!rc) { + rc = callocz(1, sizeof(RRDFAMILY)); + + rc->family = strdupz(id); + rc->hash_family = simple_hash(rc->family); + + // initialize the variables index + avl_init_lock(&rc->rrdvar_root_index, rrdvar_compare); + + RRDFAMILY *ret = rrdfamily_index_add(host, rc); + if(ret != rc) + error("RRDFAMILY: INTERNAL ERROR: Expected to INSERT RRDFAMILY '%s' into index, but inserted '%s'.", rc->family, (ret)?ret->family:"NONE"); + } + + rc->use_count++; + return rc; +} + +void rrdfamily_free(RRDHOST *host, RRDFAMILY *rc) { + rc->use_count--; + if(!rc->use_count) { + RRDFAMILY *ret = rrdfamily_index_del(host, rc); + if(ret != rc) + error("RRDFAMILY: INTERNAL ERROR: Expected to DELETE RRDFAMILY '%s' from index, but deleted '%s'.", rc->family, (ret)?ret->family:"NONE"); + else { + debug(D_RRD_CALLS, "RRDFAMILY: Cleaning up remaining family variables for host '%s', family '%s'", host->hostname, rc->family); + rrdvar_free_remaining_variables(host, &rc->rrdvar_root_index); + + freez((void *) rc->family); + freez(rc); + } + } +} + diff --git a/database/rrdhost.c b/database/rrdhost.c new file mode 100644 index 000000000..43aa2daa2 --- /dev/null +++ b/database/rrdhost.c @@ -0,0 +1,743 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define NETDATA_RRD_INTERNALS +#include "rrd.h" + +RRDHOST *localhost = NULL; +size_t rrd_hosts_available = 0; +netdata_rwlock_t rrd_rwlock = NETDATA_RWLOCK_INITIALIZER; + +time_t rrdset_free_obsolete_time = 3600; +time_t rrdhost_free_orphan_time = 3600; + +// ---------------------------------------------------------------------------- +// RRDHOST index + +int rrdhost_compare(void* a, void* b) { + if(((RRDHOST *)a)->hash_machine_guid < ((RRDHOST *)b)->hash_machine_guid) return -1; + else if(((RRDHOST *)a)->hash_machine_guid > ((RRDHOST *)b)->hash_machine_guid) return 1; + else return strcmp(((RRDHOST *)a)->machine_guid, ((RRDHOST *)b)->machine_guid); +} + +avl_tree_lock rrdhost_root_index = { + .avl_tree = { NULL, rrdhost_compare }, + .rwlock = AVL_LOCK_INITIALIZER +}; + +RRDHOST *rrdhost_find_by_guid(const char *guid, uint32_t hash) { + debug(D_RRDHOST, "Searching in index for host with guid '%s'", guid); + + RRDHOST tmp; + strncpyz(tmp.machine_guid, guid, GUID_LEN); + tmp.hash_machine_guid = (hash)?hash:simple_hash(tmp.machine_guid); + + return (RRDHOST *)avl_search_lock(&(rrdhost_root_index), (avl *) &tmp); +} + +RRDHOST *rrdhost_find_by_hostname(const char *hostname, uint32_t hash) { + if(unlikely(!strcmp(hostname, "localhost"))) + return localhost; + + if(unlikely(!hash)) hash = simple_hash(hostname); + + rrd_rdlock(); + RRDHOST *host; + rrdhost_foreach_read(host) { + if(unlikely((hash == host->hash_hostname && !strcmp(hostname, host->hostname)))) { + rrd_unlock(); + return host; + } + } + rrd_unlock(); + + return NULL; +} + +#define rrdhost_index_add(rrdhost) (RRDHOST *)avl_insert_lock(&(rrdhost_root_index), (avl *)(rrdhost)) +#define rrdhost_index_del(rrdhost) (RRDHOST *)avl_remove_lock(&(rrdhost_root_index), (avl *)(rrdhost)) + + +// ---------------------------------------------------------------------------- +// RRDHOST - internal helpers + +static inline void rrdhost_init_tags(RRDHOST *host, const char *tags) { + if(host->tags && tags && !strcmp(host->tags, tags)) + return; + + void *old = (void *)host->tags; + host->tags = (tags && *tags)?strdupz(tags):NULL; + freez(old); +} + +static inline void rrdhost_init_hostname(RRDHOST *host, const char *hostname) { + if(host->hostname && hostname && !strcmp(host->hostname, hostname)) + return; + + void *old = host->hostname; + host->hostname = strdupz(hostname?hostname:"localhost"); + host->hash_hostname = simple_hash(host->hostname); + freez(old); +} + +static inline void rrdhost_init_os(RRDHOST *host, const char *os) { + if(host->os && os && !strcmp(host->os, os)) + return; + + void *old = (void *)host->os; + host->os = strdupz(os?os:"unknown"); + freez(old); +} + +static inline void rrdhost_init_timezone(RRDHOST *host, const char *timezone) { + if(host->timezone && timezone && !strcmp(host->timezone, timezone)) + return; + + void *old = (void *)host->timezone; + host->timezone = strdupz((timezone && *timezone)?timezone:"unknown"); + freez(old); +} + +static inline void rrdhost_init_machine_guid(RRDHOST *host, const char *machine_guid) { + strncpy(host->machine_guid, machine_guid, GUID_LEN); + host->machine_guid[GUID_LEN] = '\0'; + host->hash_machine_guid = simple_hash(host->machine_guid); +} + + +// ---------------------------------------------------------------------------- +// RRDHOST - add a host + +RRDHOST *rrdhost_create(const char *hostname, + const char *registry_hostname, + const char *guid, + const char *os, + const char *timezone, + const char *tags, + const char *program_name, + const char *program_version, + int update_every, + long entries, + RRD_MEMORY_MODE memory_mode, + unsigned int health_enabled, + unsigned int rrdpush_enabled, + char *rrdpush_destination, + char *rrdpush_api_key, + char *rrdpush_send_charts_matching, + int is_localhost +) { + debug(D_RRDHOST, "Host '%s': adding with guid '%s'", hostname, guid); + + rrd_check_wrlock(); + + RRDHOST *host = callocz(1, sizeof(RRDHOST)); + + host->rrd_update_every = (update_every > 0)?update_every:1; + host->rrd_history_entries = align_entries_to_pagesize(memory_mode, entries); + host->rrd_memory_mode = memory_mode; + host->health_enabled = (memory_mode == RRD_MEMORY_MODE_NONE)? 0 : health_enabled; + host->rrdpush_send_enabled = (rrdpush_enabled && rrdpush_destination && *rrdpush_destination && rrdpush_api_key && *rrdpush_api_key) ? 1 : 0; + host->rrdpush_send_destination = (host->rrdpush_send_enabled)?strdupz(rrdpush_destination):NULL; + host->rrdpush_send_api_key = (host->rrdpush_send_enabled)?strdupz(rrdpush_api_key):NULL; + host->rrdpush_send_charts_matching = simple_pattern_create(rrdpush_send_charts_matching, NULL, SIMPLE_PATTERN_EXACT); + + host->rrdpush_sender_pipe[0] = -1; + host->rrdpush_sender_pipe[1] = -1; + host->rrdpush_sender_socket = -1; + + netdata_mutex_init(&host->rrdpush_sender_buffer_mutex); + netdata_rwlock_init(&host->rrdhost_rwlock); + + rrdhost_init_hostname(host, hostname); + rrdhost_init_machine_guid(host, guid); + rrdhost_init_os(host, os); + rrdhost_init_timezone(host, timezone); + rrdhost_init_tags(host, tags); + + host->program_name = strdupz((program_name && *program_name)?program_name:"unknown"); + host->program_version = strdupz((program_version && *program_version)?program_version:"unknown"); + host->registry_hostname = strdupz((registry_hostname && *registry_hostname)?registry_hostname:hostname); + + avl_init_lock(&(host->rrdset_root_index), rrdset_compare); + avl_init_lock(&(host->rrdset_root_index_name), rrdset_compare_name); + avl_init_lock(&(host->rrdfamily_root_index), rrdfamily_compare); + avl_init_lock(&(host->rrdvar_root_index), rrdvar_compare); + + if(config_get_boolean(CONFIG_SECTION_GLOBAL, "delete obsolete charts files", 1)) + rrdhost_flag_set(host, RRDHOST_FLAG_DELETE_OBSOLETE_CHARTS); + + if(config_get_boolean(CONFIG_SECTION_GLOBAL, "delete orphan hosts files", 1) && !is_localhost) + rrdhost_flag_set(host, RRDHOST_FLAG_DELETE_ORPHAN_HOST); + + + // ------------------------------------------------------------------------ + // initialize health variables + + host->health_log.next_log_id = 1; + host->health_log.next_alarm_id = 1; + host->health_log.max = 1000; + host->health_log.next_log_id = + host->health_log.next_alarm_id = (uint32_t)now_realtime_sec(); + + long n = config_get_number(CONFIG_SECTION_HEALTH, "in memory max health log entries", host->health_log.max); + if(n < 10) { + error("Host '%s': health configuration has invalid max log entries %ld. Using default %u", host->hostname, n, host->health_log.max); + config_set_number(CONFIG_SECTION_HEALTH, "in memory max health log entries", (long)host->health_log.max); + } + else + host->health_log.max = (unsigned int)n; + + netdata_rwlock_init(&host->health_log.alarm_log_rwlock); + + char filename[FILENAME_MAX + 1]; + + if(is_localhost) { + + host->cache_dir = strdupz(netdata_configured_cache_dir); + host->varlib_dir = strdupz(netdata_configured_varlib_dir); + + } + else { + // this is not localhost - append our GUID to localhost path + + snprintfz(filename, FILENAME_MAX, "%s/%s", netdata_configured_cache_dir, host->machine_guid); + host->cache_dir = strdupz(filename); + + if(host->rrd_memory_mode == RRD_MEMORY_MODE_MAP || host->rrd_memory_mode == RRD_MEMORY_MODE_SAVE) { + int r = mkdir(host->cache_dir, 0775); + if(r != 0 && errno != EEXIST) + error("Host '%s': cannot create directory '%s'", host->hostname, host->cache_dir); + } + + snprintfz(filename, FILENAME_MAX, "%s/%s", netdata_configured_varlib_dir, host->machine_guid); + host->varlib_dir = strdupz(filename); + + if(host->health_enabled) { + int r = mkdir(host->varlib_dir, 0775); + if(r != 0 && errno != EEXIST) + error("Host '%s': cannot create directory '%s'", host->hostname, host->varlib_dir); + } + + } + + if(host->health_enabled) { + snprintfz(filename, FILENAME_MAX, "%s/health", host->varlib_dir); + int r = mkdir(filename, 0775); + if(r != 0 && errno != EEXIST) + error("Host '%s': cannot create directory '%s'", host->hostname, filename); + } + + snprintfz(filename, FILENAME_MAX, "%s/health/health-log.db", host->varlib_dir); + host->health_log_filename = strdupz(filename); + + snprintfz(filename, FILENAME_MAX, "%s/alarm-notify.sh", netdata_configured_plugins_dir); + host->health_default_exec = strdupz(config_get(CONFIG_SECTION_HEALTH, "script to execute on alarm", filename)); + host->health_default_recipient = strdupz("root"); + + + // ------------------------------------------------------------------------ + // load health configuration + + if(host->health_enabled) { + health_alarm_log_load(host); + health_alarm_log_open(host); + + rrdhost_wrlock(host); + health_readdir(host, health_user_config_dir(), health_stock_config_dir(), NULL); + rrdhost_unlock(host); + } + + + // ------------------------------------------------------------------------ + // link it and add it to the index + + if(is_localhost) { + host->next = localhost; + localhost = host; + } + else { + if(localhost) { + host->next = localhost->next; + localhost->next = host; + } + else localhost = host; + } + + RRDHOST *t = rrdhost_index_add(host); + + if(t != host) { + error("Host '%s': cannot add host with machine guid '%s' to index. It already exists as host '%s' with machine guid '%s'.", host->hostname, host->machine_guid, t->hostname, t->machine_guid); + rrdhost_free(host); + host = NULL; + } + else { + info("Host '%s' (at registry as '%s') with guid '%s' initialized" + ", os '%s'" + ", timezone '%s'" + ", tags '%s'" + ", program_name '%s'" + ", program_version '%s'" + ", update every %d" + ", memory mode %s" + ", history entries %ld" + ", streaming %s" + " (to '%s' with api key '%s')" + ", health %s" + ", cache_dir '%s'" + ", varlib_dir '%s'" + ", health_log '%s'" + ", alarms default handler '%s'" + ", alarms default recipient '%s'" + , host->hostname + , host->registry_hostname + , host->machine_guid + , host->os + , host->timezone + , (host->tags)?host->tags:"" + , host->program_name + , host->program_version + , host->rrd_update_every + , rrd_memory_mode_name(host->rrd_memory_mode) + , host->rrd_history_entries + , host->rrdpush_send_enabled?"enabled":"disabled" + , host->rrdpush_send_destination?host->rrdpush_send_destination:"" + , host->rrdpush_send_api_key?host->rrdpush_send_api_key:"" + , host->health_enabled?"enabled":"disabled" + , host->cache_dir + , host->varlib_dir + , host->health_log_filename + , host->health_default_exec + , host->health_default_recipient + ); + } + + rrd_hosts_available++; + + return host; +} + +RRDHOST *rrdhost_find_or_create( + const char *hostname + , const char *registry_hostname + , const char *guid + , const char *os + , const char *timezone + , const char *tags + , const char *program_name + , const char *program_version + , int update_every + , long history + , RRD_MEMORY_MODE mode + , unsigned int health_enabled + , unsigned int rrdpush_enabled + , char *rrdpush_destination + , char *rrdpush_api_key + , char *rrdpush_send_charts_matching +) { + debug(D_RRDHOST, "Searching for host '%s' with guid '%s'", hostname, guid); + + rrd_wrlock(); + RRDHOST *host = rrdhost_find_by_guid(guid, 0); + if(!host) { + host = rrdhost_create( + hostname + , registry_hostname + , guid + , os + , timezone + , tags + , program_name + , program_version + , update_every + , history + , mode + , health_enabled + , rrdpush_enabled + , rrdpush_destination + , rrdpush_api_key + , rrdpush_send_charts_matching + , 0 + ); + } + else { + host->health_enabled = health_enabled; + + if(strcmp(host->hostname, hostname) != 0) { + info("Host '%s' has been renamed to '%s'. If this is not intentional it may mean multiple hosts are using the same machine_guid.", host->hostname, hostname); + char *t = host->hostname; + host->hostname = strdupz(hostname); + host->hash_hostname = simple_hash(host->hostname); + freez(t); + } + + if(strcmp(host->program_name, program_name) != 0) { + info("Host '%s' switched program name from '%s' to '%s'", host->hostname, host->program_name, program_name); + char *t = host->program_name; + host->program_name = strdupz(program_name); + freez(t); + } + + if(strcmp(host->program_version, program_version) != 0) { + info("Host '%s' switched program version from '%s' to '%s'", host->hostname, host->program_version, program_version); + char *t = host->program_version; + host->program_version = strdupz(program_version); + freez(t); + } + + if(host->rrd_update_every != update_every) + error("Host '%s' has an update frequency of %d seconds, but the wanted one is %d seconds. Restart netdata here to apply the new settings.", host->hostname, host->rrd_update_every, update_every); + + if(host->rrd_history_entries < history) + error("Host '%s' has history of %ld entries, but the wanted one is %ld entries. Restart netdata here to apply the new settings.", host->hostname, host->rrd_history_entries, history); + + if(host->rrd_memory_mode != mode) + error("Host '%s' has memory mode '%s', but the wanted one is '%s'. Restart netdata here to apply the new settings.", host->hostname, rrd_memory_mode_name(host->rrd_memory_mode), rrd_memory_mode_name(mode)); + + // update host tags + rrdhost_init_tags(host, tags); + } + + rrdhost_cleanup_orphan_hosts_nolock(host); + + rrd_unlock(); + + return host; +} + +inline int rrdhost_should_be_removed(RRDHOST *host, RRDHOST *protected, time_t now) { + if(host != protected + && host != localhost + && rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN) + && !host->connected_senders + && host->senders_disconnected_time + && host->senders_disconnected_time + rrdhost_free_orphan_time < now) + return 1; + + return 0; +} + +void rrdhost_cleanup_orphan_hosts_nolock(RRDHOST *protected) { + time_t now = now_realtime_sec(); + + RRDHOST *host; + +restart_after_removal: + rrdhost_foreach_write(host) { + if(rrdhost_should_be_removed(host, protected, now)) { + info("Host '%s' with machine guid '%s' is obsolete - cleaning up.", host->hostname, host->machine_guid); + + if(rrdhost_flag_check(host, RRDHOST_FLAG_DELETE_ORPHAN_HOST)) + rrdhost_delete_charts(host); + else + rrdhost_save_charts(host); + + rrdhost_free(host); + goto restart_after_removal; + } + } +} + +// ---------------------------------------------------------------------------- +// RRDHOST global / startup initialization + +void rrd_init(char *hostname) { + rrdset_free_obsolete_time = config_get_number(CONFIG_SECTION_GLOBAL, "cleanup obsolete charts after seconds", rrdset_free_obsolete_time); + gap_when_lost_iterations_above = (int)config_get_number(CONFIG_SECTION_GLOBAL, "gap when lost iterations above", gap_when_lost_iterations_above); + if(gap_when_lost_iterations_above < 1) + gap_when_lost_iterations_above = 1; + + health_init(); + registry_init(); + rrdpush_init(); + + debug(D_RRDHOST, "Initializing localhost with hostname '%s'", hostname); + rrd_wrlock(); + localhost = rrdhost_create( + hostname + , registry_get_this_machine_hostname() + , registry_get_this_machine_guid() + , os_type + , netdata_configured_timezone + , config_get(CONFIG_SECTION_BACKEND, "host tags", "") + , program_name + , program_version + , default_rrd_update_every + , default_rrd_history_entries + , default_rrd_memory_mode + , default_health_enabled + , default_rrdpush_enabled + , default_rrdpush_destination + , default_rrdpush_api_key + , default_rrdpush_send_charts_matching + , 1 + ); + rrd_unlock(); +} + +// ---------------------------------------------------------------------------- +// RRDHOST - lock validations +// there are only used when NETDATA_INTERNAL_CHECKS is set + +void __rrdhost_check_rdlock(RRDHOST *host, const char *file, const char *function, const unsigned long line) { + debug(D_RRDHOST, "Checking read lock on host '%s'", host->hostname); + + int ret = netdata_rwlock_trywrlock(&host->rrdhost_rwlock); + if(ret == 0) + fatal("RRDHOST '%s' should be read-locked, but it is not, at function %s() at line %lu of file '%s'", host->hostname, function, line, file); +} + +void __rrdhost_check_wrlock(RRDHOST *host, const char *file, const char *function, const unsigned long line) { + debug(D_RRDHOST, "Checking write lock on host '%s'", host->hostname); + + int ret = netdata_rwlock_tryrdlock(&host->rrdhost_rwlock); + if(ret == 0) + fatal("RRDHOST '%s' should be write-locked, but it is not, at function %s() at line %lu of file '%s'", host->hostname, function, line, file); +} + +void __rrd_check_rdlock(const char *file, const char *function, const unsigned long line) { + debug(D_RRDHOST, "Checking read lock on all RRDs"); + + int ret = netdata_rwlock_trywrlock(&rrd_rwlock); + if(ret == 0) + fatal("RRDs should be read-locked, but it are not, at function %s() at line %lu of file '%s'", function, line, file); +} + +void __rrd_check_wrlock(const char *file, const char *function, const unsigned long line) { + debug(D_RRDHOST, "Checking write lock on all RRDs"); + + int ret = netdata_rwlock_tryrdlock(&rrd_rwlock); + if(ret == 0) + fatal("RRDs should be write-locked, but it are not, at function %s() at line %lu of file '%s'", function, line, file); +} + +// ---------------------------------------------------------------------------- +// RRDHOST - free + +void rrdhost_free(RRDHOST *host) { + if(!host) return; + + info("Freeing all memory for host '%s'...", host->hostname); + + rrd_check_wrlock(); // make sure the RRDs are write locked + + // stop a possibly running thread + rrdpush_sender_thread_stop(host); + + rrdhost_wrlock(host); // lock this RRDHOST + + // ------------------------------------------------------------------------ + // release its children resources + + while(host->rrdset_root) + rrdset_free(host->rrdset_root); + + while(host->alarms) + rrdcalc_unlink_and_free(host, host->alarms); + + while(host->templates) + rrdcalctemplate_unlink_and_free(host, host->templates); + + debug(D_RRD_CALLS, "RRDHOST: Cleaning up remaining host variables for host '%s'", host->hostname); + rrdvar_free_remaining_variables(host, &host->rrdvar_root_index); + + health_alarm_log_free(host); + + // ------------------------------------------------------------------------ + // remove it from the indexes + + if(rrdhost_index_del(host) != host) + error("RRDHOST '%s' removed from index, deleted the wrong entry.", host->hostname); + + + // ------------------------------------------------------------------------ + // unlink it from the host + + if(host == localhost) { + localhost = host->next; + } + else { + // find the previous one + RRDHOST *h; + for(h = localhost; h && h->next != host ; h = h->next) ; + + // bypass it + if(h) h->next = host->next; + else error("Request to free RRDHOST '%s': cannot find it", host->hostname); + } + + // ------------------------------------------------------------------------ + // free it + + freez((void *)host->tags); + freez((void *)host->os); + freez((void *)host->timezone); + freez(host->program_version); + freez(host->program_name); + freez(host->cache_dir); + freez(host->varlib_dir); + freez(host->rrdpush_send_api_key); + freez(host->rrdpush_send_destination); + freez(host->health_default_exec); + freez(host->health_default_recipient); + freez(host->health_log_filename); + freez(host->hostname); + freez(host->registry_hostname); + simple_pattern_free(host->rrdpush_send_charts_matching); + rrdhost_unlock(host); + netdata_rwlock_destroy(&host->health_log.alarm_log_rwlock); + netdata_rwlock_destroy(&host->rrdhost_rwlock); + freez(host); + + rrd_hosts_available--; +} + +void rrdhost_free_all(void) { + rrd_wrlock(); + while(localhost) rrdhost_free(localhost); + rrd_unlock(); +} + +// ---------------------------------------------------------------------------- +// RRDHOST - save host files + +void rrdhost_save_charts(RRDHOST *host) { + if(!host) return; + + info("Saving/Closing database of host '%s'...", host->hostname); + + RRDSET *st; + + // we get a write lock + // to ensure only one thread is saving the database + rrdhost_wrlock(host); + + rrdset_foreach_write(st, host) { + rrdset_rdlock(st); + rrdset_save(st); + rrdset_unlock(st); + } + + rrdhost_unlock(host); +} + +// ---------------------------------------------------------------------------- +// RRDHOST - delete host files + +void rrdhost_delete_charts(RRDHOST *host) { + if(!host) return; + + info("Deleting database of host '%s'...", host->hostname); + + RRDSET *st; + + // we get a write lock + // to ensure only one thread is saving the database + rrdhost_wrlock(host); + + rrdset_foreach_write(st, host) { + rrdset_rdlock(st); + rrdset_delete(st); + rrdset_unlock(st); + } + + recursively_delete_dir(host->cache_dir, "left over host"); + + rrdhost_unlock(host); +} + +// ---------------------------------------------------------------------------- +// RRDHOST - cleanup host files + +void rrdhost_cleanup_charts(RRDHOST *host) { + if(!host) return; + + info("Cleaning up database of host '%s'...", host->hostname); + + RRDSET *st; + uint32_t rrdhost_delete_obsolete_charts = rrdhost_flag_check(host, RRDHOST_FLAG_DELETE_OBSOLETE_CHARTS); + + // we get a write lock + // to ensure only one thread is saving the database + rrdhost_wrlock(host); + + rrdset_foreach_write(st, host) { + rrdset_rdlock(st); + + if(rrdhost_delete_obsolete_charts && rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)) + rrdset_delete(st); + else + rrdset_save(st); + + rrdset_unlock(st); + } + + rrdhost_unlock(host); +} + + +// ---------------------------------------------------------------------------- +// RRDHOST - save all hosts to disk + +void rrdhost_save_all(void) { + info("Saving database [%zu hosts(s)]...", rrd_hosts_available); + + rrd_rdlock(); + + RRDHOST *host; + rrdhost_foreach_read(host) + rrdhost_save_charts(host); + + rrd_unlock(); +} + +// ---------------------------------------------------------------------------- +// RRDHOST - save or delete all hosts from disk + +void rrdhost_cleanup_all(void) { + info("Cleaning up database [%zu hosts(s)]...", rrd_hosts_available); + + rrd_rdlock(); + + RRDHOST *host; + rrdhost_foreach_read(host) { + if(host != localhost && rrdhost_flag_check(host, RRDHOST_FLAG_DELETE_OBSOLETE_CHARTS) && !host->connected_senders) + rrdhost_delete_charts(host); + else + rrdhost_cleanup_charts(host); + } + + rrd_unlock(); +} + + +// ---------------------------------------------------------------------------- +// RRDHOST - save or delete all the host charts from disk + +void rrdhost_cleanup_obsolete_charts(RRDHOST *host) { + time_t now = now_realtime_sec(); + + RRDSET *st; + + uint32_t rrdhost_delete_obsolete_charts = rrdhost_flag_check(host, RRDHOST_FLAG_DELETE_OBSOLETE_CHARTS); + +restart_after_removal: + rrdset_foreach_write(st, host) { + if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE) + && st->last_accessed_time + rrdset_free_obsolete_time < now + && st->last_updated.tv_sec + rrdset_free_obsolete_time < now + && st->last_collected_time.tv_sec + rrdset_free_obsolete_time < now + )) { + + rrdset_rdlock(st); + + if(rrdhost_delete_obsolete_charts) + rrdset_delete(st); + else + rrdset_save(st); + + rrdset_unlock(st); + + rrdset_free(st); + goto restart_after_removal; + } + } +} diff --git a/database/rrdset.c b/database/rrdset.c new file mode 100644 index 000000000..3f5ba73b6 --- /dev/null +++ b/database/rrdset.c @@ -0,0 +1,1621 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define NETDATA_RRD_INTERNALS +#include "rrd.h" + +void __rrdset_check_rdlock(RRDSET *st, const char *file, const char *function, const unsigned long line) { + debug(D_RRD_CALLS, "Checking read lock on chart '%s'", st->id); + + int ret = netdata_rwlock_trywrlock(&st->rrdset_rwlock); + if(ret == 0) + fatal("RRDSET '%s' should be read-locked, but it is not, at function %s() at line %lu of file '%s'", st->id, function, line, file); +} + +void __rrdset_check_wrlock(RRDSET *st, const char *file, const char *function, const unsigned long line) { + debug(D_RRD_CALLS, "Checking write lock on chart '%s'", st->id); + + int ret = netdata_rwlock_tryrdlock(&st->rrdset_rwlock); + if(ret == 0) + fatal("RRDSET '%s' should be write-locked, but it is not, at function %s() at line %lu of file '%s'", st->id, function, line, file); +} + + +// ---------------------------------------------------------------------------- +// RRDSET index + +int rrdset_compare(void* a, void* b) { + if(((RRDSET *)a)->hash < ((RRDSET *)b)->hash) return -1; + else if(((RRDSET *)a)->hash > ((RRDSET *)b)->hash) return 1; + else return strcmp(((RRDSET *)a)->id, ((RRDSET *)b)->id); +} + +static RRDSET *rrdset_index_find(RRDHOST *host, const char *id, uint32_t hash) { + RRDSET tmp; + strncpyz(tmp.id, id, RRD_ID_LENGTH_MAX); + tmp.hash = (hash)?hash:simple_hash(tmp.id); + + return (RRDSET *)avl_search_lock(&(host->rrdset_root_index), (avl *) &tmp); +} + +// ---------------------------------------------------------------------------- +// RRDSET name index + +#define rrdset_from_avlname(avlname_ptr) ((RRDSET *)((avlname_ptr) - offsetof(RRDSET, avlname))) + +int rrdset_compare_name(void* a, void* b) { + RRDSET *A = rrdset_from_avlname(a); + RRDSET *B = rrdset_from_avlname(b); + + // fprintf(stderr, "COMPARING: %s with %s\n", A->name, B->name); + + if(A->hash_name < B->hash_name) return -1; + else if(A->hash_name > B->hash_name) return 1; + else return strcmp(A->name, B->name); +} + +RRDSET *rrdset_index_add_name(RRDHOST *host, RRDSET *st) { + void *result; + // fprintf(stderr, "ADDING: %s (name: %s)\n", st->id, st->name); + result = avl_insert_lock(&host->rrdset_root_index_name, (avl *) (&st->avlname)); + if(result) return rrdset_from_avlname(result); + return NULL; +} + +RRDSET *rrdset_index_del_name(RRDHOST *host, RRDSET *st) { + void *result; + // fprintf(stderr, "DELETING: %s (name: %s)\n", st->id, st->name); + result = (RRDSET *)avl_remove_lock(&((host)->rrdset_root_index_name), (avl *)(&st->avlname)); + if(result) return rrdset_from_avlname(result); + return NULL; +} + + +// ---------------------------------------------------------------------------- +// RRDSET - find charts + +static inline RRDSET *rrdset_index_find_name(RRDHOST *host, const char *name, uint32_t hash) { + void *result = NULL; + RRDSET tmp; + tmp.name = name; + tmp.hash_name = (hash)?hash:simple_hash(tmp.name); + + // fprintf(stderr, "SEARCHING: %s\n", name); + result = avl_search_lock(&host->rrdset_root_index_name, (avl *) (&(tmp.avlname))); + if(result) { + RRDSET *st = rrdset_from_avlname(result); + if(strcmp(st->magic, RRDSET_MAGIC) != 0) + error("Search for RRDSET %s returned an invalid RRDSET %s (name %s)", name, st->id, st->name); + + // fprintf(stderr, "FOUND: %s\n", name); + return rrdset_from_avlname(result); + } + // fprintf(stderr, "NOT FOUND: %s\n", name); + return NULL; +} + +inline RRDSET *rrdset_find(RRDHOST *host, const char *id) { + debug(D_RRD_CALLS, "rrdset_find() for chart '%s' in host '%s'", id, host->hostname); + RRDSET *st = rrdset_index_find(host, id, 0); + return(st); +} + +inline RRDSET *rrdset_find_bytype(RRDHOST *host, const char *type, const char *id) { + debug(D_RRD_CALLS, "rrdset_find_bytype() for chart '%s.%s' in host '%s'", type, id, host->hostname); + + char buf[RRD_ID_LENGTH_MAX + 1]; + strncpyz(buf, type, RRD_ID_LENGTH_MAX - 1); + strcat(buf, "."); + int len = (int) strlen(buf); + strncpyz(&buf[len], id, (size_t) (RRD_ID_LENGTH_MAX - len)); + + return(rrdset_find(host, buf)); +} + +inline RRDSET *rrdset_find_byname(RRDHOST *host, const char *name) { + debug(D_RRD_CALLS, "rrdset_find_byname() for chart '%s' in host '%s'", name, host->hostname); + RRDSET *st = rrdset_index_find_name(host, name, 0); + return(st); +} + +// ---------------------------------------------------------------------------- +// RRDSET - rename charts + +char *rrdset_strncpyz_name(char *to, const char *from, size_t length) { + char c, *p = to; + + while (length-- && (c = *from++)) { + if(c != '.' && !isalnum(c)) + c = '_'; + + *p++ = c; + } + + *p = '\0'; + + return to; +} + +int rrdset_set_name(RRDSET *st, const char *name) { + if(unlikely(st->name && !strcmp(st->name, name))) + return 1; + + RRDHOST *host = st->rrdhost; + + debug(D_RRD_CALLS, "rrdset_set_name() old: '%s', new: '%s'", st->name?st->name:"", name); + + char b[CONFIG_MAX_VALUE + 1]; + char n[RRD_ID_LENGTH_MAX + 1]; + + snprintfz(n, RRD_ID_LENGTH_MAX, "%s.%s", st->type, name); + rrdset_strncpyz_name(b, n, CONFIG_MAX_VALUE); + + if(rrdset_index_find_name(host, b, 0)) { + error("RRDSET: chart name '%s' on host '%s' already exists.", b, host->hostname); + return 0; + } + + if(st->name) { + rrdset_index_del_name(host, st); + st->name = config_set_default(st->config_section, "name", b); + st->hash_name = simple_hash(st->name); + rrdsetvar_rename_all(st); + } + else { + st->name = config_get(st->config_section, "name", b); + st->hash_name = simple_hash(st->name); + } + + rrdset_wrlock(st); + RRDDIM *rd; + rrddim_foreach_write(rd, st) + rrddimvar_rename_all(rd); + rrdset_unlock(st); + + if(unlikely(rrdset_index_add_name(host, st) != st)) + error("RRDSET: INTERNAL ERROR: attempted to index duplicate chart name '%s'", st->name); + + rrdset_flag_clear(st, RRDSET_FLAG_BACKEND_SEND); + rrdset_flag_clear(st, RRDSET_FLAG_BACKEND_IGNORE); + rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_SEND); + rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_IGNORE); + rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); + + return 1; +} + +inline void rrdset_is_obsolete(RRDSET *st) { + if(unlikely(!(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)))) { + rrdset_flag_set(st, RRDSET_FLAG_OBSOLETE); + rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); + + // the chart will not get more updates (data collection) + // so, we have to push its definition now + rrdset_push_chart_definition_now(st); + } +} + +inline void rrdset_isnot_obsolete(RRDSET *st) { + if(unlikely((rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)))) { + rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE); + rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); + + // the chart will be pushed upstream automatically + // due to data collection + } +} + +inline void rrdset_update_heterogeneous_flag(RRDSET *st) { + RRDHOST *host = st->rrdhost; + (void)host; + + RRDDIM *rd; + + rrdset_flag_clear(st, RRDSET_FLAG_HOMEGENEOUS_CHECK); + + RRD_ALGORITHM algorithm = st->dimensions->algorithm; + collected_number multiplier = abs(st->dimensions->multiplier); + collected_number divisor = abs(st->dimensions->divisor); + + rrddim_foreach_read(rd, st) { + if(algorithm != rd->algorithm || multiplier != abs(rd->multiplier) || divisor != abs(rd->divisor)) { + if(!rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS)) { + #ifdef NETDATA_INTERNAL_CHECKS + info("Dimension '%s' added on chart '%s' of host '%s' is not homogeneous to other dimensions already present (algorithm is '%s' vs '%s', multiplier is " COLLECTED_NUMBER_FORMAT " vs " COLLECTED_NUMBER_FORMAT ", divisor is " COLLECTED_NUMBER_FORMAT " vs " COLLECTED_NUMBER_FORMAT ").", + rd->name, + st->name, + host->hostname, + rrd_algorithm_name(rd->algorithm), rrd_algorithm_name(algorithm), + rd->multiplier, multiplier, + rd->divisor, divisor + ); + #endif + rrdset_flag_set(st, RRDSET_FLAG_HETEROGENEOUS); + } + return; + } + } + + rrdset_flag_clear(st, RRDSET_FLAG_HETEROGENEOUS); +} + +// ---------------------------------------------------------------------------- +// RRDSET - reset a chart + +void rrdset_reset(RRDSET *st) { + debug(D_RRD_CALLS, "rrdset_reset() %s", st->name); + + st->last_collected_time.tv_sec = 0; + st->last_collected_time.tv_usec = 0; + st->last_updated.tv_sec = 0; + st->last_updated.tv_usec = 0; + st->current_entry = 0; + st->counter = 0; + st->counter_done = 0; + + RRDDIM *rd; + rrddim_foreach_read(rd, st) { + rd->last_collected_time.tv_sec = 0; + rd->last_collected_time.tv_usec = 0; + rd->collections_counter = 0; + // memset(rd->values, 0, rd->entries * sizeof(storage_number)); + } +} + +// ---------------------------------------------------------------------------- +// RRDSET - helpers for rrdset_create() + +inline long align_entries_to_pagesize(RRD_MEMORY_MODE mode, long entries) { + if(unlikely(entries < 5)) entries = 5; + if(unlikely(entries > RRD_HISTORY_ENTRIES_MAX)) entries = RRD_HISTORY_ENTRIES_MAX; + + if(unlikely(mode == RRD_MEMORY_MODE_NONE || mode == RRD_MEMORY_MODE_ALLOC)) + return entries; + + long page = (size_t)sysconf(_SC_PAGESIZE); + long size = sizeof(RRDDIM) + entries * sizeof(storage_number); + if(unlikely(size % page)) { + size -= (size % page); + size += page; + + long n = (size - sizeof(RRDDIM)) / sizeof(storage_number); + return n; + } + + return entries; +} + +static inline void last_collected_time_align(RRDSET *st) { + st->last_collected_time.tv_sec -= st->last_collected_time.tv_sec % st->update_every; + + if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST))) + st->last_collected_time.tv_usec = 0; + else + st->last_collected_time.tv_usec = 500000; +} + +static inline void last_updated_time_align(RRDSET *st) { + st->last_updated.tv_sec -= st->last_updated.tv_sec % st->update_every; + st->last_updated.tv_usec = 0; +} + +// ---------------------------------------------------------------------------- +// RRDSET - free a chart + +void rrdset_free(RRDSET *st) { + if(unlikely(!st)) return; + + RRDHOST *host = st->rrdhost; + + rrdhost_check_wrlock(host); // make sure we have a write lock on the host + rrdset_wrlock(st); // lock this RRDSET + + // info("Removing chart '%s' ('%s')", st->id, st->name); + + // ------------------------------------------------------------------------ + // remove it from the indexes + + if(unlikely(rrdset_index_del(host, st) != st)) + error("RRDSET: INTERNAL ERROR: attempt to remove from index chart '%s', removed a different chart.", st->id); + + rrdset_index_del_name(host, st); + + // ------------------------------------------------------------------------ + // free its children structures + + while(st->variables) rrdsetvar_free(st->variables); + while(st->alarms) rrdsetcalc_unlink(st->alarms); + while(st->dimensions) rrddim_free(st, st->dimensions); + + rrdfamily_free(host, st->rrdfamily); + + debug(D_RRD_CALLS, "RRDSET: Cleaning up remaining chart variables for host '%s', chart '%s'", host->hostname, st->id); + rrdvar_free_remaining_variables(host, &st->rrdvar_root_index); + + // ------------------------------------------------------------------------ + // unlink it from the host + + if(st == host->rrdset_root) { + host->rrdset_root = st->next; + } + else { + // find the previous one + RRDSET *s; + for(s = host->rrdset_root; s && s->next != st ; s = s->next) ; + + // bypass it + if(s) s->next = st->next; + else error("Request to free RRDSET '%s': cannot find it under host '%s'", st->id, host->hostname); + } + + rrdset_unlock(st); + + // ------------------------------------------------------------------------ + // free it + + netdata_rwlock_destroy(&st->rrdset_rwlock); + + // free directly allocated members + freez(st->config_section); + freez(st->plugin_name); + freez(st->module_name); + + switch(st->rrd_memory_mode) { + case RRD_MEMORY_MODE_SAVE: + case RRD_MEMORY_MODE_MAP: + case RRD_MEMORY_MODE_RAM: + debug(D_RRD_CALLS, "Unmapping stats '%s'.", st->name); + munmap(st, st->memsize); + break; + + case RRD_MEMORY_MODE_ALLOC: + case RRD_MEMORY_MODE_NONE: + freez(st); + break; + } +} + +void rrdset_save(RRDSET *st) { + rrdset_check_rdlock(st); + + // info("Saving chart '%s' ('%s')", st->id, st->name); + + if(st->rrd_memory_mode == RRD_MEMORY_MODE_SAVE) { + debug(D_RRD_STATS, "Saving stats '%s' to '%s'.", st->name, st->cache_filename); + memory_file_save(st->cache_filename, st, st->memsize); + } + + RRDDIM *rd; + rrddim_foreach_read(rd, st) { + if(likely(rd->rrd_memory_mode == RRD_MEMORY_MODE_SAVE)) { + debug(D_RRD_STATS, "Saving dimension '%s' to '%s'.", rd->name, rd->cache_filename); + memory_file_save(rd->cache_filename, rd, rd->memsize); + } + } +} + +void rrdset_delete(RRDSET *st) { + RRDDIM *rd; + + rrdset_check_rdlock(st); + + info("Deleting chart '%s' ('%s') from disk...", st->id, st->name); + + if(st->rrd_memory_mode == RRD_MEMORY_MODE_SAVE || st->rrd_memory_mode == RRD_MEMORY_MODE_MAP) { + info("Deleting chart header file '%s'.", st->cache_filename); + if(unlikely(unlink(st->cache_filename) == -1)) + error("Cannot delete chart header file '%s'", st->cache_filename); + } + + rrddim_foreach_read(rd, st) { + if(likely(rd->rrd_memory_mode == RRD_MEMORY_MODE_SAVE || rd->rrd_memory_mode == RRD_MEMORY_MODE_MAP)) { + info("Deleting dimension file '%s'.", rd->cache_filename); + if(unlikely(unlink(rd->cache_filename) == -1)) + error("Cannot delete dimension file '%s'", rd->cache_filename); + } + } + + recursively_delete_dir(st->cache_dir, "left-over chart"); +} + +// ---------------------------------------------------------------------------- +// RRDSET - create a chart + +static inline RRDSET *rrdset_find_on_create(RRDHOST *host, const char *fullid) { + RRDSET *st = rrdset_find(host, fullid); + if(unlikely(st)) { + rrdset_isnot_obsolete(st); + debug(D_RRD_CALLS, "RRDSET '%s', already exists.", fullid); + return st; + } + + return NULL; +} + +RRDSET *rrdset_create_custom( + RRDHOST *host + , const char *type + , const char *id + , const char *name + , const char *family + , const char *context + , const char *title + , const char *units + , const char *plugin + , const char *module + , long priority + , int update_every + , RRDSET_TYPE chart_type + , RRD_MEMORY_MODE memory_mode + , long history_entries +) { + if(!type || !type[0]) { + fatal("Cannot create rrd stats without a type: id '%s', name '%s', family '%s', context '%s', title '%s', units '%s', plugin '%s', module '%s'." + , (id && *id)?id:"<unset>" + , (name && *name)?name:"<unset>" + , (family && *family)?family:"<unset>" + , (context && *context)?context:"<unset>" + , (title && *title)?title:"<unset>" + , (units && *units)?units:"<unset>" + , (plugin && *plugin)?plugin:"<unset>" + , (module && *module)?module:"<unset>" + ); + return NULL; + } + + if(!id || !id[0]) { + fatal("Cannot create rrd stats without an id: type '%s', name '%s', family '%s', context '%s', title '%s', units '%s', plugin '%s', module '%s'." + , type + , (name && *name)?name:"<unset>" + , (family && *family)?family:"<unset>" + , (context && *context)?context:"<unset>" + , (title && *title)?title:"<unset>" + , (units && *units)?units:"<unset>" + , (plugin && *plugin)?plugin:"<unset>" + , (module && *module)?module:"<unset>" + ); + return NULL; + } + + // ------------------------------------------------------------------------ + // check if it already exists + + char fullid[RRD_ID_LENGTH_MAX + 1]; + snprintfz(fullid, RRD_ID_LENGTH_MAX, "%s.%s", type, id); + + RRDSET *st = rrdset_find_on_create(host, fullid); + if(st) { + rrdset_flag_set(st, RRDSET_FLAG_SYNC_CLOCK); + rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); + return st; + } + + rrdhost_wrlock(host); + + st = rrdset_find_on_create(host, fullid); + if(st) { + rrdhost_unlock(host); + rrdset_flag_set(st, RRDSET_FLAG_SYNC_CLOCK); + rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); + return st; + } + + char fullfilename[FILENAME_MAX + 1]; + + // ------------------------------------------------------------------------ + // compose the config_section for this chart + + char config_section[RRD_ID_LENGTH_MAX + 1]; + if(host == localhost) + strcpy(config_section, fullid); + else + snprintfz(config_section, RRD_ID_LENGTH_MAX, "%s/%s", host->machine_guid, fullid); + + // ------------------------------------------------------------------------ + // get the options from the config, we need to create it + + long rentries = config_get_number(config_section, "history", history_entries); + long entries = align_entries_to_pagesize(memory_mode, rentries); + if(entries != rentries) entries = config_set_number(config_section, "history", entries); + + if(memory_mode == RRD_MEMORY_MODE_NONE && entries != rentries) + entries = config_set_number(config_section, "history", 10); + + int enabled = config_get_boolean(config_section, "enabled", 1); + if(!enabled) entries = 5; + + unsigned long size = sizeof(RRDSET); + char *cache_dir = rrdset_cache_dir(host, fullid, config_section); + + time_t now = now_realtime_sec(); + + // ------------------------------------------------------------------------ + // load it or allocate it + + debug(D_RRD_CALLS, "Creating RRD_STATS for '%s.%s'.", type, id); + + snprintfz(fullfilename, FILENAME_MAX, "%s/main.db", cache_dir); + if(memory_mode == RRD_MEMORY_MODE_SAVE || memory_mode == RRD_MEMORY_MODE_MAP || memory_mode == RRD_MEMORY_MODE_RAM) { + st = (RRDSET *) mymmap( + (memory_mode == RRD_MEMORY_MODE_RAM)?NULL:fullfilename + , size + , ((memory_mode == RRD_MEMORY_MODE_MAP) ? MAP_SHARED : MAP_PRIVATE) + , 0 + ); + + if(st) { + memset(&st->avl, 0, sizeof(avl)); + memset(&st->avlname, 0, sizeof(avl)); + memset(&st->rrdvar_root_index, 0, sizeof(avl_tree_lock)); + memset(&st->dimensions_index, 0, sizeof(avl_tree_lock)); + memset(&st->rrdset_rwlock, 0, sizeof(netdata_rwlock_t)); + + st->name = NULL; + st->config_section = NULL; + st->type = NULL; + st->family = NULL; + st->title = NULL; + st->units = NULL; + st->context = NULL; + st->cache_dir = NULL; + st->plugin_name = NULL; + st->module_name = NULL; + st->dimensions = NULL; + st->rrdfamily = NULL; + st->rrdhost = NULL; + st->next = NULL; + st->variables = NULL; + st->alarms = NULL; + st->flags = 0x00000000; + + if(memory_mode == RRD_MEMORY_MODE_RAM) { + memset(st, 0, size); + } + else { + if(strcmp(st->magic, RRDSET_MAGIC) != 0) { + info("Initializing file %s.", fullfilename); + memset(st, 0, size); + } + else if(strcmp(st->id, fullid) != 0) { + error("File %s contents are not for chart %s. Clearing it.", fullfilename, fullid); + // munmap(st, size); + // st = NULL; + memset(st, 0, size); + } + else if(st->memsize != size || st->entries != entries) { + error("File %s does not have the desired size. Clearing it.", fullfilename); + memset(st, 0, size); + } + else if(st->update_every != update_every) { + error("File %s does not have the desired update frequency. Clearing it.", fullfilename); + memset(st, 0, size); + } + else if((now - st->last_updated.tv_sec) > update_every * entries) { + error("File %s is too old. Clearing it.", fullfilename); + memset(st, 0, size); + } + else if(st->last_updated.tv_sec > now + update_every) { + error("File %s refers to the future by %zd secs. Resetting it to now.", fullfilename, (ssize_t)(st->last_updated.tv_sec - now)); + st->last_updated.tv_sec = now; + } + + // make sure the database is aligned + if(st->last_updated.tv_sec) { + st->update_every = update_every; + last_updated_time_align(st); + } + } + + // make sure we have the right memory mode + // even if we cleared the memory + st->rrd_memory_mode = memory_mode; + } + } + + if(unlikely(!st)) { + st = callocz(1, size); + st->rrd_memory_mode = (memory_mode == RRD_MEMORY_MODE_NONE) ? RRD_MEMORY_MODE_NONE : RRD_MEMORY_MODE_ALLOC; + } + + st->plugin_name = plugin?strdupz(plugin):NULL; + st->module_name = module?strdupz(module):NULL; + + st->config_section = strdupz(config_section); + st->rrdhost = host; + st->memsize = size; + st->entries = entries; + st->update_every = update_every; + + if(st->current_entry >= st->entries) st->current_entry = 0; + + strcpy(st->cache_filename, fullfilename); + strcpy(st->magic, RRDSET_MAGIC); + + strcpy(st->id, fullid); + st->hash = simple_hash(st->id); + + st->cache_dir = cache_dir; + + st->chart_type = rrdset_type_id(config_get(st->config_section, "chart type", rrdset_type_name(chart_type))); + st->type = config_get(st->config_section, "type", type); + + st->family = config_get(st->config_section, "family", family?family:st->type); + json_fix_string(st->family); + + st->units = config_get(st->config_section, "units", units?units:""); + json_fix_string(st->units); + + st->context = config_get(st->config_section, "context", context?context:st->id); + json_fix_string(st->context); + st->hash_context = simple_hash(st->context); + + st->priority = config_get_number(st->config_section, "priority", priority); + if(enabled) + rrdset_flag_set(st, RRDSET_FLAG_ENABLED); + else + rrdset_flag_clear(st, RRDSET_FLAG_ENABLED); + + rrdset_flag_clear(st, RRDSET_FLAG_DETAIL); + rrdset_flag_clear(st, RRDSET_FLAG_DEBUG); + rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE); + rrdset_flag_clear(st, RRDSET_FLAG_BACKEND_SEND); + rrdset_flag_clear(st, RRDSET_FLAG_BACKEND_IGNORE); + rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_SEND); + rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_IGNORE); + rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); + rrdset_flag_set(st, RRDSET_FLAG_SYNC_CLOCK); + + // if(!strcmp(st->id, "disk_util.dm-0")) { + // st->debug = 1; + // error("enabled debugging for '%s'", st->id); + // } + // else error("not enabled debugging for '%s'", st->id); + + st->green = NAN; + st->red = NAN; + + st->last_collected_time.tv_sec = 0; + st->last_collected_time.tv_usec = 0; + st->counter_done = 0; + + st->gap_when_lost_iterations_above = (int) (gap_when_lost_iterations_above + 2); + + st->last_accessed_time = 0; + st->upstream_resync_time = 0; + + avl_init_lock(&st->dimensions_index, rrddim_compare); + avl_init_lock(&st->rrdvar_root_index, rrdvar_compare); + + netdata_rwlock_init(&st->rrdset_rwlock); + + if(name && *name && rrdset_set_name(st, name)) + // we did set the name + ; + else + // could not use the name, use the id + rrdset_set_name(st, id); + + st->title = config_get(st->config_section, "title", title); + json_fix_string(st->title); + + st->rrdfamily = rrdfamily_create(host, st->family); + + st->next = host->rrdset_root; + host->rrdset_root = st; + + if(host->health_enabled) { + rrdsetvar_create(st, "last_collected_t", RRDVAR_TYPE_TIME_T, &st->last_collected_time.tv_sec, RRDVAR_OPTION_DEFAULT); + rrdsetvar_create(st, "collected_total_raw", RRDVAR_TYPE_TOTAL, &st->last_collected_total, RRDVAR_OPTION_DEFAULT); + rrdsetvar_create(st, "green", RRDVAR_TYPE_CALCULATED, &st->green, RRDVAR_OPTION_DEFAULT); + rrdsetvar_create(st, "red", RRDVAR_TYPE_CALCULATED, &st->red, RRDVAR_OPTION_DEFAULT); + rrdsetvar_create(st, "update_every", RRDVAR_TYPE_INT, &st->update_every, RRDVAR_OPTION_DEFAULT); + } + + if(unlikely(rrdset_index_add(host, st) != st)) + error("RRDSET: INTERNAL ERROR: attempt to index duplicate chart '%s'", st->id); + + rrdsetcalc_link_matching(st); + rrdcalctemplate_link_matching(st); + + rrdhost_cleanup_obsolete_charts(host); + + rrdhost_unlock(host); + + return(st); +} + + +// ---------------------------------------------------------------------------- +// RRDSET - data collection iteration control + +inline void rrdset_next_usec_unfiltered(RRDSET *st, usec_t microseconds) { + if(unlikely(!st->last_collected_time.tv_sec || !microseconds || (rrdset_flag_check_noatomic(st, RRDSET_FLAG_SYNC_CLOCK)))) { + // call the full next_usec() function + rrdset_next_usec(st, microseconds); + return; + } + + st->usec_since_last_update = microseconds; +} + +inline void rrdset_next_usec(RRDSET *st, usec_t microseconds) { + struct timeval now; + now_realtime_timeval(&now); + + #ifdef NETDATA_INTERNAL_CHECKS + char *discard_reason = NULL; + usec_t discarded = microseconds; + #endif + + if(unlikely(rrdset_flag_check_noatomic(st, RRDSET_FLAG_SYNC_CLOCK))) { + // the chart needs to be re-synced to current time + rrdset_flag_clear(st, RRDSET_FLAG_SYNC_CLOCK); + + // discard the microseconds supplied + microseconds = 0; + + #ifdef NETDATA_INTERNAL_CHECKS + if(!discard_reason) discard_reason = "SYNC CLOCK FLAG"; + #endif + } + + if(unlikely(!st->last_collected_time.tv_sec)) { + // the first entry + microseconds = st->update_every * USEC_PER_SEC; + #ifdef NETDATA_INTERNAL_CHECKS + if(!discard_reason) discard_reason = "FIRST DATA COLLECTION"; + #endif + } + else if(unlikely(!microseconds)) { + // no dt given by the plugin + microseconds = dt_usec(&now, &st->last_collected_time); + #ifdef NETDATA_INTERNAL_CHECKS + if(!discard_reason) discard_reason = "NO USEC GIVEN BY COLLECTOR"; + #endif + } + else { + // microseconds has the time since the last collection + susec_t since_last_usec = dt_usec_signed(&now, &st->last_collected_time); + + if(unlikely(since_last_usec < 0)) { + // oops! the database is in the future + info("RRD database for chart '%s' on host '%s' is %0.5" LONG_DOUBLE_MODIFIER " secs in the future (counter #%zu, update #%zu). Adjusting it to current time.", st->id, st->rrdhost->hostname, (LONG_DOUBLE)-since_last_usec / USEC_PER_SEC, st->counter, st->counter_done); + + st->last_collected_time.tv_sec = now.tv_sec - st->update_every; + st->last_collected_time.tv_usec = now.tv_usec; + last_collected_time_align(st); + + st->last_updated.tv_sec = now.tv_sec - st->update_every; + st->last_updated.tv_usec = now.tv_usec; + last_updated_time_align(st); + + microseconds = st->update_every * USEC_PER_SEC; + #ifdef NETDATA_INTERNAL_CHECKS + if(!discard_reason) discard_reason = "COLLECTION TIME IN FUTURE"; + #endif + } + else if(unlikely((usec_t)since_last_usec > (usec_t)(st->update_every * 5 * USEC_PER_SEC))) { + // oops! the database is too far behind + info("RRD database for chart '%s' on host '%s' is %0.5" LONG_DOUBLE_MODIFIER " secs in the past (counter #%zu, update #%zu). Adjusting it to current time.", st->id, st->rrdhost->hostname, (LONG_DOUBLE)since_last_usec / USEC_PER_SEC, st->counter, st->counter_done); + + microseconds = (usec_t)since_last_usec; + #ifdef NETDATA_INTERNAL_CHECKS + if(!discard_reason) discard_reason = "COLLECTION TIME TOO FAR IN THE PAST"; + #endif + } + +#ifdef NETDATA_INTERNAL_CHECKS + if(since_last_usec > 0 && (susec_t)microseconds < since_last_usec) { + static __thread susec_t min_delta = USEC_PER_SEC * 3600, permanent_min_delta = 0; + static __thread time_t last_t = 0; + + // the first time initialize it so that it will make the check later + if(last_t == 0) last_t = now.tv_sec + 60; + + susec_t delta = since_last_usec - (susec_t)microseconds; + if(delta < min_delta) min_delta = delta; + + if(now.tv_sec >= last_t + 60) { + last_t = now.tv_sec; + + if(min_delta > permanent_min_delta) { + info("MINIMUM MICROSECONDS DELTA of thread %d increased from %lld to %lld (+%lld)", gettid(), permanent_min_delta, min_delta, min_delta - permanent_min_delta); + permanent_min_delta = min_delta; + } + + min_delta = USEC_PER_SEC * 3600; + } + } +#endif + } + + #ifdef NETDATA_INTERNAL_CHECKS + debug(D_RRD_CALLS, "rrdset_next_usec() for chart %s with microseconds %llu", st->name, microseconds); + rrdset_debug(st, "NEXT: %llu microseconds", microseconds); + + if(discarded && discarded != microseconds) + info("host '%s', chart '%s': discarded data collection time of %llu usec, replaced with %llu usec, reason: '%s'", st->rrdhost->hostname, st->id, discarded, microseconds, discard_reason?discard_reason:"UNDEFINED"); + + #endif + + st->usec_since_last_update = microseconds; +} + + +// ---------------------------------------------------------------------------- +// RRDSET - process the collected values for all dimensions of a chart + +static inline usec_t rrdset_init_last_collected_time(RRDSET *st) { + now_realtime_timeval(&st->last_collected_time); + last_collected_time_align(st); + + usec_t last_collect_ut = st->last_collected_time.tv_sec * USEC_PER_SEC + st->last_collected_time.tv_usec; + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "initialized last collected time to %0.3" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE)last_collect_ut / USEC_PER_SEC); + #endif + + return last_collect_ut; +} + +static inline usec_t rrdset_update_last_collected_time(RRDSET *st) { + usec_t last_collect_ut = st->last_collected_time.tv_sec * USEC_PER_SEC + st->last_collected_time.tv_usec; + usec_t ut = last_collect_ut + st->usec_since_last_update; + st->last_collected_time.tv_sec = (time_t) (ut / USEC_PER_SEC); + st->last_collected_time.tv_usec = (suseconds_t) (ut % USEC_PER_SEC); + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "updated last collected time to %0.3" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE)last_collect_ut / USEC_PER_SEC); + #endif + + return last_collect_ut; +} + +static inline usec_t rrdset_init_last_updated_time(RRDSET *st) { + // copy the last collected time to last updated time + st->last_updated.tv_sec = st->last_collected_time.tv_sec; + st->last_updated.tv_usec = st->last_collected_time.tv_usec; + + if(rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST)) + st->last_updated.tv_sec -= st->update_every; + + last_updated_time_align(st); + + usec_t last_updated_ut = st->last_updated.tv_sec * USEC_PER_SEC + st->last_updated.tv_usec; + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "initialized last updated time to %0.3" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE)last_updated_ut / USEC_PER_SEC); + #endif + + return last_updated_ut; +} + +static inline void rrdset_done_push_exclusive(RRDSET *st) { +// usec_t update_every_ut = st->update_every * USEC_PER_SEC; // st->update_every in microseconds +// +// if(unlikely(st->usec_since_last_update > update_every_ut * remote_clock_resync_iterations)) { +// error("Chart '%s' was last collected %llu usec before. Resetting it.", st->id, st->usec_since_last_update); +// rrdset_reset(st); +// st->usec_since_last_update = update_every_ut; +// } + + if(unlikely(!st->last_collected_time.tv_sec)) { + // it is the first entry + // set the last_collected_time to now + rrdset_init_last_collected_time(st); + } + else { + // it is not the first entry + // calculate the proper last_collected_time, using usec_since_last_update + rrdset_update_last_collected_time(st); + } + + st->counter_done++; + + rrdset_rdlock(st); + rrdset_done_push(st); + rrdset_unlock(st); +} + + +static inline size_t rrdset_done_interpolate( + RRDSET *st + , usec_t update_every_ut + , usec_t last_stored_ut + , usec_t next_store_ut + , usec_t last_collect_ut + , usec_t now_collect_ut + , char store_this_entry + , uint32_t storage_flags +) { + RRDDIM *rd; + + size_t stored_entries = 0; // the number of entries we have stored in the db, during this call to rrdset_done() + + usec_t first_ut = last_stored_ut, last_ut = 0; + (void)first_ut; + + ssize_t iterations = (ssize_t)((now_collect_ut - last_stored_ut) / (update_every_ut)); + if((now_collect_ut % (update_every_ut)) == 0) iterations++; + + size_t counter = st->counter; + long current_entry = st->current_entry; + + for( ; next_store_ut <= now_collect_ut ; last_collect_ut = next_store_ut, next_store_ut += update_every_ut, iterations-- ) { + + #ifdef NETDATA_INTERNAL_CHECKS + if(iterations < 0) { error("INTERNAL CHECK: %s: iterations calculation wrapped! first_ut = %llu, last_stored_ut = %llu, next_store_ut = %llu, now_collect_ut = %llu", st->name, first_ut, last_stored_ut, next_store_ut, now_collect_ut); } + rrdset_debug(st, "last_stored_ut = %0.3" LONG_DOUBLE_MODIFIER " (last updated time)", (LONG_DOUBLE)last_stored_ut/USEC_PER_SEC); + rrdset_debug(st, "next_store_ut = %0.3" LONG_DOUBLE_MODIFIER " (next interpolation point)", (LONG_DOUBLE)next_store_ut/USEC_PER_SEC); + #endif + + last_ut = next_store_ut; + + rrddim_foreach_read(rd, st) { + calculated_number new_value; + + switch(rd->algorithm) { + case RRD_ALGORITHM_INCREMENTAL: + new_value = (calculated_number) + ( rd->calculated_value + * (calculated_number)(next_store_ut - last_collect_ut) + / (calculated_number)(now_collect_ut - last_collect_ut) + ); + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "%s: CALC2 INC " + CALCULATED_NUMBER_FORMAT " = " + CALCULATED_NUMBER_FORMAT + " * (%llu - %llu)" + " / (%llu - %llu)" + , rd->name + , new_value + , rd->calculated_value + , next_store_ut, last_collect_ut + , now_collect_ut, last_collect_ut + ); + #endif + + rd->calculated_value -= new_value; + new_value += rd->last_calculated_value; + rd->last_calculated_value = 0; + new_value /= (calculated_number)st->update_every; + + if(unlikely(next_store_ut - last_stored_ut < update_every_ut)) { + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "%s: COLLECTION POINT IS SHORT " CALCULATED_NUMBER_FORMAT " - EXTRAPOLATING", + rd->name + , (calculated_number)(next_store_ut - last_stored_ut) + ); + #endif + + new_value = new_value * (calculated_number)(st->update_every * USEC_PER_SEC) / (calculated_number)(next_store_ut - last_stored_ut); + } + break; + + case RRD_ALGORITHM_ABSOLUTE: + case RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL: + case RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL: + default: + if(iterations == 1) { + // this is the last iteration + // do not interpolate + // just show the calculated value + + new_value = rd->calculated_value; + } + else { + // we have missed an update + // interpolate in the middle values + + new_value = (calculated_number) + ( ( (rd->calculated_value - rd->last_calculated_value) + * (calculated_number)(next_store_ut - last_collect_ut) + / (calculated_number)(now_collect_ut - last_collect_ut) + ) + + rd->last_calculated_value + ); + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "%s: CALC2 DEF " + CALCULATED_NUMBER_FORMAT " = (((" + "(" CALCULATED_NUMBER_FORMAT " - " CALCULATED_NUMBER_FORMAT ")" + " * %llu" + " / %llu) + " CALCULATED_NUMBER_FORMAT + , rd->name + , new_value + , rd->calculated_value, rd->last_calculated_value + , (next_store_ut - first_ut) + , (now_collect_ut - first_ut), rd->last_calculated_value + ); + #endif + } + break; + } + + if(unlikely(!store_this_entry)) { + rd->values[current_entry] = SN_EMPTY_SLOT; //pack_storage_number(0, SN_NOT_EXISTS); + continue; + } + + if(likely(rd->updated && rd->collections_counter > 1 && iterations < st->gap_when_lost_iterations_above)) { + rd->values[current_entry] = pack_storage_number(new_value, storage_flags ); + rd->last_stored_value = new_value; + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "%s: STORE[%ld] " + CALCULATED_NUMBER_FORMAT " = " CALCULATED_NUMBER_FORMAT + , rd->name + , current_entry + , unpack_storage_number(rd->values[current_entry]), new_value + ); + #endif + + } + else { + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "%s: STORE[%ld] = NON EXISTING " + , rd->name + , current_entry + ); + #endif + + rd->values[current_entry] = SN_EMPTY_SLOT; // pack_storage_number(0, SN_NOT_EXISTS); + rd->last_stored_value = NAN; + } + + stored_entries++; + + #ifdef NETDATA_INTERNAL_CHECKS + if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) { + calculated_number t1 = new_value * (calculated_number)rd->multiplier / (calculated_number)rd->divisor; + calculated_number t2 = unpack_storage_number(rd->values[current_entry]); + + calculated_number accuracy = accuracy_loss(t1, t2); + debug(D_RRD_STATS, "%s/%s: UNPACK[%ld] = " CALCULATED_NUMBER_FORMAT " FLAGS=0x%08x (original = " CALCULATED_NUMBER_FORMAT ", accuracy loss = " CALCULATED_NUMBER_FORMAT "%%%s)" + , st->id, rd->name + , current_entry + , t2 + , get_storage_number_flags(rd->values[current_entry]) + , t1 + , accuracy + , (accuracy > ACCURACY_LOSS) ? " **TOO BIG** " : "" + ); + + rd->collected_volume += t1; + rd->stored_volume += t2; + + accuracy = accuracy_loss(rd->collected_volume, rd->stored_volume); + debug(D_RRD_STATS, "%s/%s: VOLUME[%ld] = " CALCULATED_NUMBER_FORMAT ", calculated = " CALCULATED_NUMBER_FORMAT ", accuracy loss = " CALCULATED_NUMBER_FORMAT "%%%s" + , st->id, rd->name + , current_entry + , rd->stored_volume + , rd->collected_volume + , accuracy + , (accuracy > ACCURACY_LOSS) ? " **TOO BIG** " : "" + ); + } + #endif + } + // reset the storage flags for the next point, if any; + storage_flags = SN_EXISTS; + + counter++; + current_entry = ((current_entry + 1) >= st->entries) ? 0 : current_entry + 1; + last_stored_ut = next_store_ut; + } + + st->counter = counter; + st->current_entry = current_entry; + + if(likely(last_ut)) { + st->last_updated.tv_sec = (time_t) (last_ut / USEC_PER_SEC); + st->last_updated.tv_usec = 0; + } + + return stored_entries; +} + +static inline void rrdset_done_fill_the_gap(RRDSET *st) { + usec_t update_every_ut = st->update_every * USEC_PER_SEC; + usec_t now_collect_ut = st->last_collected_time.tv_sec * USEC_PER_SEC + st->last_collected_time.tv_usec; + + long c = 0, entries = st->entries; + RRDDIM *rd; + rrddim_foreach_read(rd, st) { + usec_t next_store_ut = (st->last_updated.tv_sec + st->update_every) * USEC_PER_SEC; + long current_entry = st->current_entry; + + for(c = 0; c < entries && next_store_ut <= now_collect_ut ; next_store_ut += update_every_ut, c++) { + rd->values[current_entry] = SN_EMPTY_SLOT; + current_entry = ((current_entry + 1) >= entries) ? 0 : current_entry + 1; + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "%s: STORE[%ld] = NON EXISTING (FILLED THE GAP)", rd->name, current_entry); + #endif + } + } + + if(c > 0) { + c--; + st->last_updated.tv_sec += c * st->update_every; + + st->current_entry += c; + if(st->current_entry >= st->entries) + st->current_entry -= st->entries; + } +} + +void rrdset_done(RRDSET *st) { + if(unlikely(netdata_exit)) return; + + if(unlikely(st->rrd_memory_mode == RRD_MEMORY_MODE_NONE)) { + if(unlikely(st->rrdhost->rrdpush_send_enabled)) + rrdset_done_push_exclusive(st); + + return; + } + + debug(D_RRD_CALLS, "rrdset_done() for chart %s", st->name); + + RRDDIM *rd; + + char + store_this_entry = 1, // boolean: 1 = store this entry, 0 = don't store this entry + first_entry = 0; // boolean: 1 = this is the first entry seen for this chart, 0 = all other entries + + usec_t + last_collect_ut, // the timestamp in microseconds, of the last collected value + now_collect_ut, // the timestamp in microseconds, of this collected value (this is NOW) + last_stored_ut, // the timestamp in microseconds, of the last stored entry in the db + next_store_ut, // the timestamp in microseconds, of the next entry to store in the db + update_every_ut = st->update_every * USEC_PER_SEC; // st->update_every in microseconds + + netdata_thread_disable_cancelability(); + + // a read lock is OK here + rrdset_rdlock(st); + + if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE))) { + error("Chart '%s' has the OBSOLETE flag set, but it is collected.", st->id); + rrdset_isnot_obsolete(st); + } + + // check if the chart has a long time to be updated + if(unlikely(st->usec_since_last_update > st->entries * update_every_ut)) { + info("host '%s', chart %s: took too long to be updated (counter #%zu, update #%zu, %0.3" LONG_DOUBLE_MODIFIER " secs). Resetting it.", st->rrdhost->hostname, st->name, st->counter, st->counter_done, (LONG_DOUBLE)st->usec_since_last_update / USEC_PER_SEC); + rrdset_reset(st); + st->usec_since_last_update = update_every_ut; + store_this_entry = 0; + first_entry = 1; + } + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "microseconds since last update: %llu", st->usec_since_last_update); + #endif + + // set last_collected_time + if(unlikely(!st->last_collected_time.tv_sec)) { + // it is the first entry + // set the last_collected_time to now + last_collect_ut = rrdset_init_last_collected_time(st) - update_every_ut; + + // the first entry should not be stored + store_this_entry = 0; + first_entry = 1; + } + else { + // it is not the first entry + // calculate the proper last_collected_time, using usec_since_last_update + last_collect_ut = rrdset_update_last_collected_time(st); + } + + // if this set has not been updated in the past + // we fake the last_update time to be = now - usec_since_last_update + if(unlikely(!st->last_updated.tv_sec)) { + // it has never been updated before + // set a fake last_updated, in the past using usec_since_last_update + rrdset_init_last_updated_time(st); + + // the first entry should not be stored + store_this_entry = 0; + first_entry = 1; + } + + // check if we will re-write the entire data set + if(unlikely(dt_usec(&st->last_collected_time, &st->last_updated) > st->entries * update_every_ut)) { + info("%s: too old data (last updated at %ld.%ld, last collected at %ld.%ld). Resetting it. Will not store the next entry.", st->name, st->last_updated.tv_sec, st->last_updated.tv_usec, st->last_collected_time.tv_sec, st->last_collected_time.tv_usec); + rrdset_reset(st); + rrdset_init_last_updated_time(st); + + st->usec_since_last_update = update_every_ut; + + // the first entry should not be stored + store_this_entry = 0; + first_entry = 1; + } + + // these are the 3 variables that will help us in interpolation + // last_stored_ut = the last time we added a value to the storage + // now_collect_ut = the time the current value has been collected + // next_store_ut = the time of the next interpolation point + now_collect_ut = st->last_collected_time.tv_sec * USEC_PER_SEC + st->last_collected_time.tv_usec; + last_stored_ut = st->last_updated.tv_sec * USEC_PER_SEC + st->last_updated.tv_usec; + next_store_ut = (st->last_updated.tv_sec + st->update_every) * USEC_PER_SEC; + + if(unlikely(!st->counter_done)) { + // if we have not collected metrics this session (st->counter_done == 0) + // and we have collected metrics for this chart in the past (st->counter != 0) + // fill the gap (the chart has been just loaded from disk) + if(unlikely(st->counter)) { + rrdset_done_fill_the_gap(st); + last_stored_ut = st->last_updated.tv_sec * USEC_PER_SEC + st->last_updated.tv_usec; + next_store_ut = (st->last_updated.tv_sec + st->update_every) * USEC_PER_SEC; + } + + if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST))) { + store_this_entry = 1; + last_collect_ut = next_store_ut - update_every_ut; + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "Fixed first entry."); + #endif + } + else { + store_this_entry = 0; + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "Will not store the next entry."); + #endif + } + } + st->counter_done++; + + if(unlikely(st->rrdhost->rrdpush_send_enabled)) + rrdset_done_push(st); + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "last_collect_ut = %0.3" LONG_DOUBLE_MODIFIER " (last collection time)", (LONG_DOUBLE)last_collect_ut/USEC_PER_SEC); + rrdset_debug(st, "now_collect_ut = %0.3" LONG_DOUBLE_MODIFIER " (current collection time)", (LONG_DOUBLE)now_collect_ut/USEC_PER_SEC); + rrdset_debug(st, "last_stored_ut = %0.3" LONG_DOUBLE_MODIFIER " (last updated time)", (LONG_DOUBLE)last_stored_ut/USEC_PER_SEC); + rrdset_debug(st, "next_store_ut = %0.3" LONG_DOUBLE_MODIFIER " (next interpolation point)", (LONG_DOUBLE)next_store_ut/USEC_PER_SEC); + #endif + + // calculate totals and count the dimensions + int dimensions = 0; + st->collected_total = 0; + rrddim_foreach_read(rd, st) { + dimensions++; + if(likely(rd->updated)) + st->collected_total += rd->collected_value; + } + + uint32_t storage_flags = SN_EXISTS; + + // process all dimensions to calculate their values + // based on the collected figures only + // at this stage we do not interpolate anything + rrddim_foreach_read(rd, st) { + + if(unlikely(!rd->updated)) { + rd->calculated_value = 0; + continue; + } + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "%s: START " + " last_collected_value = " COLLECTED_NUMBER_FORMAT + " collected_value = " COLLECTED_NUMBER_FORMAT + " last_calculated_value = " CALCULATED_NUMBER_FORMAT + " calculated_value = " CALCULATED_NUMBER_FORMAT + , rd->name + , rd->last_collected_value + , rd->collected_value + , rd->last_calculated_value + , rd->calculated_value + ); + #endif + + switch(rd->algorithm) { + case RRD_ALGORITHM_ABSOLUTE: + rd->calculated_value = (calculated_number)rd->collected_value + * (calculated_number)rd->multiplier + / (calculated_number)rd->divisor; + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "%s: CALC ABS/ABS-NO-IN " + CALCULATED_NUMBER_FORMAT " = " + COLLECTED_NUMBER_FORMAT + " * " CALCULATED_NUMBER_FORMAT + " / " CALCULATED_NUMBER_FORMAT + , rd->name + , rd->calculated_value + , rd->collected_value + , (calculated_number)rd->multiplier + , (calculated_number)rd->divisor + ); + #endif + + break; + + case RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL: + if(unlikely(!st->collected_total)) + rd->calculated_value = 0; + else + // the percentage of the current value + // over the total of all dimensions + rd->calculated_value = + (calculated_number)100 + * (calculated_number)rd->collected_value + / (calculated_number)st->collected_total; + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "%s: CALC PCENT-ROW " + CALCULATED_NUMBER_FORMAT " = 100" + " * " COLLECTED_NUMBER_FORMAT + " / " COLLECTED_NUMBER_FORMAT + , rd->name + , rd->calculated_value + , rd->collected_value + , st->collected_total + ); + #endif + + break; + + case RRD_ALGORITHM_INCREMENTAL: + if(unlikely(rd->collections_counter <= 1)) { + rd->calculated_value = 0; + continue; + } + + // if the new is smaller than the old (an overflow, or reset), set the old equal to the new + // to reset the calculation (it will give zero as the calculation for this second) + if(unlikely(rd->last_collected_value > rd->collected_value)) { + debug(D_RRD_STATS, "%s.%s: RESET or OVERFLOW. Last collected value = " COLLECTED_NUMBER_FORMAT ", current = " COLLECTED_NUMBER_FORMAT + , st->name, rd->name + , rd->last_collected_value + , rd->collected_value); + + if(!(rrddim_flag_check(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS))) + storage_flags = SN_EXISTS_RESET; + + rd->last_collected_value = rd->collected_value; + } + + rd->calculated_value += + (calculated_number)(rd->collected_value - rd->last_collected_value) + * (calculated_number)rd->multiplier + / (calculated_number)rd->divisor; + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "%s: CALC INC PRE " + CALCULATED_NUMBER_FORMAT " = (" + COLLECTED_NUMBER_FORMAT " - " COLLECTED_NUMBER_FORMAT + ")" + " * " CALCULATED_NUMBER_FORMAT + " / " CALCULATED_NUMBER_FORMAT + , rd->name + , rd->calculated_value + , rd->collected_value, rd->last_collected_value + , (calculated_number)rd->multiplier + , (calculated_number)rd->divisor + ); + #endif + + break; + + case RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL: + if(unlikely(rd->collections_counter <= 1)) { + rd->calculated_value = 0; + continue; + } + + // if the new is smaller than the old (an overflow, or reset), set the old equal to the new + // to reset the calculation (it will give zero as the calculation for this second) + if(unlikely(rd->last_collected_value > rd->collected_value)) { + debug(D_RRD_STATS, "%s.%s: RESET or OVERFLOW. Last collected value = " COLLECTED_NUMBER_FORMAT ", current = " COLLECTED_NUMBER_FORMAT + , st->name, rd->name + , rd->last_collected_value + , rd->collected_value + ); + + if(!(rrddim_flag_check(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS))) + storage_flags = SN_EXISTS_RESET; + + rd->last_collected_value = rd->collected_value; + } + + // the percentage of the current increment + // over the increment of all dimensions together + if(unlikely(st->collected_total == st->last_collected_total)) + rd->calculated_value = 0; + else + rd->calculated_value = + (calculated_number)100 + * (calculated_number)(rd->collected_value - rd->last_collected_value) + / (calculated_number)(st->collected_total - st->last_collected_total); + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "%s: CALC PCENT-DIFF " + CALCULATED_NUMBER_FORMAT " = 100" + " * (" COLLECTED_NUMBER_FORMAT " - " COLLECTED_NUMBER_FORMAT ")" + " / (" COLLECTED_NUMBER_FORMAT " - " COLLECTED_NUMBER_FORMAT ")" + , rd->name + , rd->calculated_value + , rd->collected_value, rd->last_collected_value + , st->collected_total, st->last_collected_total + ); + #endif + + break; + + default: + // make the default zero, to make sure + // it gets noticed when we add new types + rd->calculated_value = 0; + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "%s: CALC " + CALCULATED_NUMBER_FORMAT " = 0" + , rd->name + , rd->calculated_value + ); + #endif + + break; + } + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "%s: PHASE2 " + " last_collected_value = " COLLECTED_NUMBER_FORMAT + " collected_value = " COLLECTED_NUMBER_FORMAT + " last_calculated_value = " CALCULATED_NUMBER_FORMAT + " calculated_value = " CALCULATED_NUMBER_FORMAT + , rd->name + , rd->last_collected_value + , rd->collected_value + , rd->last_calculated_value + , rd->calculated_value + ); + #endif + + } + + // at this point we have all the calculated values ready + // it is now time to interpolate values on a second boundary + +#ifdef NETDATA_INTERNAL_CHECKS + if(unlikely(now_collect_ut < next_store_ut)) { + // this is collected in the same interpolation point + rrdset_debug(st, "THIS IS IN THE SAME INTERPOLATION POINT"); + info("INTERNAL CHECK: host '%s', chart '%s' is collected in the same interpolation point: short by %llu microseconds", st->rrdhost->hostname, st->name, next_store_ut - now_collect_ut); + } +#endif + + rrdset_done_interpolate(st + , update_every_ut + , last_stored_ut + , next_store_ut + , last_collect_ut + , now_collect_ut + , store_this_entry + , storage_flags + ); + + st->last_collected_total = st->collected_total; + + rrddim_foreach_read(rd, st) { + if(unlikely(!rd->updated)) + continue; + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "%s: setting last_collected_value (old: " COLLECTED_NUMBER_FORMAT ") to last_collected_value (new: " COLLECTED_NUMBER_FORMAT ")", rd->name, rd->last_collected_value, rd->collected_value); + #endif + + rd->last_collected_value = rd->collected_value; + + switch(rd->algorithm) { + case RRD_ALGORITHM_INCREMENTAL: + if(unlikely(!first_entry)) { + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "%s: setting last_calculated_value (old: " CALCULATED_NUMBER_FORMAT ") to last_calculated_value (new: " CALCULATED_NUMBER_FORMAT ")", rd->name, rd->last_calculated_value + rd->calculated_value, rd->calculated_value); + #endif + + rd->last_calculated_value += rd->calculated_value; + } + else { + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "THIS IS THE FIRST POINT"); + #endif + } + break; + + case RRD_ALGORITHM_ABSOLUTE: + case RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL: + case RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL: + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "%s: setting last_calculated_value (old: " CALCULATED_NUMBER_FORMAT ") to last_calculated_value (new: " CALCULATED_NUMBER_FORMAT ")", rd->name, rd->last_calculated_value, rd->calculated_value); + #endif + + rd->last_calculated_value = rd->calculated_value; + break; + } + + rd->calculated_value = 0; + rd->collected_value = 0; + rd->updated = 0; + + #ifdef NETDATA_INTERNAL_CHECKS + rrdset_debug(st, "%s: END " + " last_collected_value = " COLLECTED_NUMBER_FORMAT + " collected_value = " COLLECTED_NUMBER_FORMAT + " last_calculated_value = " CALCULATED_NUMBER_FORMAT + " calculated_value = " CALCULATED_NUMBER_FORMAT + , rd->name + , rd->last_collected_value + , rd->collected_value + , rd->last_calculated_value + , rd->calculated_value + ); + #endif + + } + + // ALL DONE ABOUT THE DATA UPDATE + // -------------------------------------------------------------------- + +/* + // find if there are any obsolete dimensions (not updated recently) + if(unlikely(rrd_delete_unupdated_dimensions)) { + + for( rd = st->dimensions; likely(rd) ; rd = rd->next ) + if((rd->last_collected_time.tv_sec + (rrd_delete_unupdated_dimensions * st->update_every)) < st->last_collected_time.tv_sec) + break; + + if(unlikely(rd)) { + RRDDIM *last; + // there is dimension to free + // upgrade our read lock to a write lock + rrdset_unlock(st); + rrdset_wrlock(st); + + for( rd = st->dimensions, last = NULL ; likely(rd) ; ) { + // remove it only it is not updated in rrd_delete_unupdated_dimensions seconds + + if(unlikely((rd->last_collected_time.tv_sec + (rrd_delete_unupdated_dimensions * st->update_every)) < st->last_collected_time.tv_sec)) { + info("Removing obsolete dimension '%s' (%s) of '%s' (%s).", rd->name, rd->id, st->name, st->id); + + if(unlikely(!last)) { + st->dimensions = rd->next; + rd->next = NULL; + rrddim_free(st, rd); + rd = st->dimensions; + continue; + } + else { + last->next = rd->next; + rd->next = NULL; + rrddim_free(st, rd); + rd = last->next; + continue; + } + } + + last = rd; + rd = rd->next; + } + + if(unlikely(!st->dimensions)) { + info("Disabling chart %s (%s) since it does not have any dimensions", st->name, st->id); + st->enabled = 0; + } + } + } +*/ + + rrdset_unlock(st); + + netdata_thread_enable_cancelability(); +} diff --git a/database/rrdsetvar.c b/database/rrdsetvar.c new file mode 100644 index 000000000..1bb883f0b --- /dev/null +++ b/database/rrdsetvar.c @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define NETDATA_HEALTH_INTERNALS +#include "rrd.h" + +// ---------------------------------------------------------------------------- +// RRDSETVAR management +// CHART VARIABLES + +static inline void rrdsetvar_free_variables(RRDSETVAR *rs) { + RRDSET *st = rs->rrdset; + RRDHOST *host = st->rrdhost; + + // ------------------------------------------------------------------------ + // CHART + rrdvar_free(host, &st->rrdvar_root_index, rs->var_local); + rs->var_local = NULL; + + // ------------------------------------------------------------------------ + // FAMILY + rrdvar_free(host, &st->rrdfamily->rrdvar_root_index, rs->var_family); + rs->var_family = NULL; + + rrdvar_free(host, &st->rrdfamily->rrdvar_root_index, rs->var_family_name); + rs->var_family_name = NULL; + + // ------------------------------------------------------------------------ + // HOST + rrdvar_free(host, &host->rrdvar_root_index, rs->var_host); + rs->var_host = NULL; + + rrdvar_free(host, &host->rrdvar_root_index, rs->var_host_name); + rs->var_host_name = NULL; + + // ------------------------------------------------------------------------ + // KEYS + freez(rs->key_fullid); + rs->key_fullid = NULL; + + freez(rs->key_fullname); + rs->key_fullname = NULL; +} + +static inline void rrdsetvar_create_variables(RRDSETVAR *rs) { + RRDSET *st = rs->rrdset; + RRDHOST *host = st->rrdhost; + + RRDVAR_OPTIONS options = rs->options; + if(rs->options & RRDVAR_OPTION_ALLOCATED) + options &= ~ RRDVAR_OPTION_ALLOCATED; + + // ------------------------------------------------------------------------ + // free the old ones (if any) + + rrdsetvar_free_variables(rs); + + // ------------------------------------------------------------------------ + // KEYS + + char buffer[RRDVAR_MAX_LENGTH + 1]; + snprintfz(buffer, RRDVAR_MAX_LENGTH, "%s.%s", st->id, rs->variable); + rs->key_fullid = strdupz(buffer); + + snprintfz(buffer, RRDVAR_MAX_LENGTH, "%s.%s", st->name, rs->variable); + rs->key_fullname = strdupz(buffer); + + // ------------------------------------------------------------------------ + // CHART + rs->var_local = rrdvar_create_and_index("local", &st->rrdvar_root_index, rs->variable, rs->type, options, rs->value); + + // ------------------------------------------------------------------------ + // FAMILY + rs->var_family = rrdvar_create_and_index("family", &st->rrdfamily->rrdvar_root_index, rs->key_fullid, rs->type, options, rs->value); + rs->var_family_name = rrdvar_create_and_index("family", &st->rrdfamily->rrdvar_root_index, rs->key_fullname, rs->type, options, rs->value); + + // ------------------------------------------------------------------------ + // HOST + rs->var_host = rrdvar_create_and_index("host", &host->rrdvar_root_index, rs->key_fullid, rs->type, options, rs->value); + rs->var_host_name = rrdvar_create_and_index("host", &host->rrdvar_root_index, rs->key_fullname, rs->type, options, rs->value); +} + +RRDSETVAR *rrdsetvar_create(RRDSET *st, const char *variable, RRDVAR_TYPE type, void *value, RRDVAR_OPTIONS options) { + debug(D_VARIABLES, "RRDVARSET create for chart id '%s' name '%s' with variable name '%s'", st->id, st->name, variable); + RRDSETVAR *rs = (RRDSETVAR *)callocz(1, sizeof(RRDSETVAR)); + + rs->variable = strdupz(variable); + rs->hash = simple_hash(rs->variable); + rs->type = type; + rs->value = value; + rs->options = options; + rs->rrdset = st; + + rs->next = st->variables; + st->variables = rs; + + rrdsetvar_create_variables(rs); + + return rs; +} + +void rrdsetvar_rename_all(RRDSET *st) { + debug(D_VARIABLES, "RRDSETVAR rename for chart id '%s' name '%s'", st->id, st->name); + + RRDSETVAR *rs; + for(rs = st->variables; rs ; rs = rs->next) + rrdsetvar_create_variables(rs); + + rrdsetcalc_link_matching(st); +} + +void rrdsetvar_free(RRDSETVAR *rs) { + RRDSET *st = rs->rrdset; + debug(D_VARIABLES, "RRDSETVAR free for chart id '%s' name '%s', variable '%s'", st->id, st->name, rs->variable); + + if(st->variables == rs) { + st->variables = rs->next; + } + else { + RRDSETVAR *t; + for (t = st->variables; t && t->next != rs; t = t->next); + if(!t) error("RRDSETVAR '%s' not found in chart '%s' variables linked list", rs->key_fullname, st->id); + else t->next = rs->next; + } + + rrdsetvar_free_variables(rs); + + freez(rs->variable); + + if(rs->options & RRDVAR_OPTION_ALLOCATED) + freez(rs->value); + + freez(rs); +} + +// -------------------------------------------------------------------------------------------------------------------- +// custom chart variables + +RRDSETVAR *rrdsetvar_custom_chart_variable_create(RRDSET *st, const char *name) { + RRDHOST *host = st->rrdhost; + + char *n = strdupz(name); + rrdvar_fix_name(n); + uint32_t hash = simple_hash(n); + + rrdset_wrlock(st); + + // find it + RRDSETVAR *rs; + for(rs = st->variables; rs ; rs = rs->next) { + if(hash == rs->hash && strcmp(n, rs->variable) == 0) { + rrdset_unlock(st); + if(rs->options & RRDVAR_OPTION_CUSTOM_CHART_VAR) { + free(n); + return rs; + } + else { + error("RRDSETVAR: custom variable '%s' on chart '%s' of host '%s', conflicts with an internal chart variable", n, st->id, host->hostname); + free(n); + return NULL; + } + } + } + + // not found, allocate one + + calculated_number *v = mallocz(sizeof(calculated_number)); + *v = NAN; + + rs = rrdsetvar_create(st, n, RRDVAR_TYPE_CALCULATED, v, RRDVAR_OPTION_ALLOCATED|RRDVAR_OPTION_CUSTOM_CHART_VAR); + rrdset_unlock(st); + + freez(n); + return rs; +} + +void rrdsetvar_custom_chart_variable_set(RRDSETVAR *rs, calculated_number value) { + if(rs->type != RRDVAR_TYPE_CALCULATED || !(rs->options & RRDVAR_OPTION_CUSTOM_CHART_VAR) || !(rs->options & RRDVAR_OPTION_ALLOCATED)) { + error("RRDSETVAR: requested to set variable '%s' of chart '%s' on host '%s' to value " CALCULATED_NUMBER_FORMAT " but the variable is not a custom chart one.", rs->variable, rs->rrdset->id, rs->rrdset->rrdhost->hostname, value); + } + else { + calculated_number *v = rs->value; + if(*v != value) { + *v = value; + + // mark the chart to be sent upstream + rrdset_flag_clear(rs->rrdset, RRDSET_FLAG_UPSTREAM_EXPOSED); + } + } +} diff --git a/database/rrdsetvar.h b/database/rrdsetvar.h new file mode 100644 index 000000000..34a26d2f0 --- /dev/null +++ b/database/rrdsetvar.h @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_RRDSETVAR_H +#define NETDATA_RRDSETVAR_H 1 + +#include "rrd.h" + +// variables linked to charts +// We link variables to point to the values that are already +// calculated / processed by the normal data collection process +// This means, there will be no speed penalty for using +// these variables + +struct rrdsetvar { + char *variable; // variable name + uint32_t hash; // variable name hash + + char *key_fullid; // chart type.chart id.variable + char *key_fullname; // chart type.chart name.variable + + RRDVAR_TYPE type; + void *value; + + RRDVAR_OPTIONS options; + + RRDVAR *var_local; + RRDVAR *var_family; + RRDVAR *var_host; + RRDVAR *var_family_name; + RRDVAR *var_host_name; + + struct rrdset *rrdset; + + struct rrdsetvar *next; +}; + +extern RRDSETVAR *rrdsetvar_custom_chart_variable_create(RRDSET *st, const char *name); +extern void rrdsetvar_custom_chart_variable_set(RRDSETVAR *rv, calculated_number value); + +extern void rrdsetvar_rename_all(RRDSET *st); +extern RRDSETVAR *rrdsetvar_create(RRDSET *st, const char *variable, RRDVAR_TYPE type, void *value, RRDVAR_OPTIONS options); +extern void rrdsetvar_free(RRDSETVAR *rs); + +#endif //NETDATA_RRDSETVAR_H diff --git a/database/rrdvar.c b/database/rrdvar.c new file mode 100644 index 000000000..951a38cac --- /dev/null +++ b/database/rrdvar.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define NETDATA_HEALTH_INTERNALS +#include "rrd.h" + +// ---------------------------------------------------------------------------- +// RRDVAR management + +inline int rrdvar_fix_name(char *variable) { + int fixed = 0; + while(*variable) { + if (!isalnum(*variable) && *variable != '.' && *variable != '_') { + *variable++ = '_'; + fixed++; + } + else + variable++; + } + + return fixed; +} + +int rrdvar_compare(void* a, void* b) { + if(((RRDVAR *)a)->hash < ((RRDVAR *)b)->hash) return -1; + else if(((RRDVAR *)a)->hash > ((RRDVAR *)b)->hash) return 1; + else return strcmp(((RRDVAR *)a)->name, ((RRDVAR *)b)->name); +} + +static inline RRDVAR *rrdvar_index_add(avl_tree_lock *tree, RRDVAR *rv) { + RRDVAR *ret = (RRDVAR *)avl_insert_lock(tree, (avl *)(rv)); + if(ret != rv) + debug(D_VARIABLES, "Request to insert RRDVAR '%s' into index failed. Already exists.", rv->name); + + return ret; +} + +static inline RRDVAR *rrdvar_index_del(avl_tree_lock *tree, RRDVAR *rv) { + RRDVAR *ret = (RRDVAR *)avl_remove_lock(tree, (avl *)(rv)); + if(!ret) + error("Request to remove RRDVAR '%s' from index failed. Not Found.", rv->name); + + return ret; +} + +static inline RRDVAR *rrdvar_index_find(avl_tree_lock *tree, const char *name, uint32_t hash) { + RRDVAR tmp; + tmp.name = (char *)name; + tmp.hash = (hash)?hash:simple_hash(tmp.name); + + return (RRDVAR *)avl_search_lock(tree, (avl *)&tmp); +} + +inline void rrdvar_free(RRDHOST *host, avl_tree_lock *tree, RRDVAR *rv) { + (void)host; + + if(!rv) return; + + if(tree) { + debug(D_VARIABLES, "Deleting variable '%s'", rv->name); + if(unlikely(!rrdvar_index_del(tree, rv))) + error("RRDVAR: Attempted to delete variable '%s' from host '%s', but it is not found.", rv->name, host->hostname); + } + + if(rv->options & RRDVAR_OPTION_ALLOCATED) + freez(rv->value); + + freez(rv->name); + freez(rv); +} + +inline RRDVAR *rrdvar_create_and_index(const char *scope, avl_tree_lock *tree, const char *name, RRDVAR_TYPE type, RRDVAR_OPTIONS options, void *value) { + char *variable = strdupz(name); + rrdvar_fix_name(variable); + uint32_t hash = simple_hash(variable); + + RRDVAR *rv = rrdvar_index_find(tree, variable, hash); + if(unlikely(!rv)) { + debug(D_VARIABLES, "Variable '%s' not found in scope '%s'. Creating a new one.", variable, scope); + + rv = callocz(1, sizeof(RRDVAR)); + rv->name = variable; + rv->hash = hash; + rv->type = type; + rv->options = options; + rv->value = value; + rv->last_updated = now_realtime_sec(); + + RRDVAR *ret = rrdvar_index_add(tree, rv); + if(unlikely(ret != rv)) { + debug(D_VARIABLES, "Variable '%s' in scope '%s' already exists", variable, scope); + freez(rv); + freez(variable); + rv = NULL; + } + else + debug(D_VARIABLES, "Variable '%s' created in scope '%s'", variable, scope); + } + else { + debug(D_VARIABLES, "Variable '%s' is already found in scope '%s'.", variable, scope); + + // already exists + freez(variable); + + // this is important + // it must return NULL - not the existing variable - or double-free will happen + rv = NULL; + } + + return rv; +} + +void rrdvar_free_remaining_variables(RRDHOST *host, avl_tree_lock *tree_lock) { + // This is not bullet proof - avl should support some means to destroy it + // with a callback for each item already in the index + + RRDVAR *rv, *last = NULL; + while((rv = (RRDVAR *)tree_lock->avl_tree.root)) { + if(unlikely(rv == last)) { + error("RRDVAR: INTERNAL ERROR: Cannot cleanup tree of RRDVARs"); + break; + } + last = rv; + rrdvar_free(host, tree_lock, rv); + } +} + +// ---------------------------------------------------------------------------- +// CUSTOM HOST VARIABLES + +inline int rrdvar_callback_for_all_host_variables(RRDHOST *host, int (*callback)(void * /*rrdvar*/, void * /*data*/), void *data) { + return avl_traverse_lock(&host->rrdvar_root_index, callback, data); +} + +static RRDVAR *rrdvar_custom_variable_create(const char *scope, avl_tree_lock *tree_lock, const char *name) { + calculated_number *v = callocz(1, sizeof(calculated_number)); + *v = NAN; + + RRDVAR *rv = rrdvar_create_and_index(scope, tree_lock, name, RRDVAR_TYPE_CALCULATED, RRDVAR_OPTION_CUSTOM_HOST_VAR|RRDVAR_OPTION_ALLOCATED, v); + if(unlikely(!rv)) { + free(v); + debug(D_VARIABLES, "Requested variable '%s' already exists - possibly 2 plugins are updating it at the same time.", name); + + char *variable = strdupz(name); + rrdvar_fix_name(variable); + uint32_t hash = simple_hash(variable); + + // find the existing one to return it + rv = rrdvar_index_find(tree_lock, variable, hash); + + freez(variable); + } + + return rv; +} + +RRDVAR *rrdvar_custom_host_variable_create(RRDHOST *host, const char *name) { + return rrdvar_custom_variable_create("host", &host->rrdvar_root_index, name); +} + +void rrdvar_custom_host_variable_set(RRDHOST *host, RRDVAR *rv, calculated_number value) { + if(rv->type != RRDVAR_TYPE_CALCULATED || !(rv->options & RRDVAR_OPTION_CUSTOM_HOST_VAR) || !(rv->options & RRDVAR_OPTION_ALLOCATED)) + error("requested to set variable '%s' to value " CALCULATED_NUMBER_FORMAT " but the variable is not a custom one.", rv->name, value); + else { + calculated_number *v = rv->value; + if(*v != value) { + *v = value; + + rv->last_updated = now_realtime_sec(); + + // if the host is streaming, send this variable upstream immediately + rrdpush_sender_send_this_host_variable_now(host, rv); + } + } +} + +int foreach_host_variable_callback(RRDHOST *host, int (*callback)(RRDVAR * /*rv*/, void * /*data*/), void *data) { + return avl_traverse_lock(&host->rrdvar_root_index, (int (*)(void *, void *))callback, data); +} + +// ---------------------------------------------------------------------------- +// RRDVAR lookup + +calculated_number rrdvar2number(RRDVAR *rv) { + switch(rv->type) { + case RRDVAR_TYPE_CALCULATED: { + calculated_number *n = (calculated_number *)rv->value; + return *n; + } + + case RRDVAR_TYPE_TIME_T: { + time_t *n = (time_t *)rv->value; + return *n; + } + + case RRDVAR_TYPE_COLLECTED: { + collected_number *n = (collected_number *)rv->value; + return *n; + } + + case RRDVAR_TYPE_TOTAL: { + total_number *n = (total_number *)rv->value; + return *n; + } + + case RRDVAR_TYPE_INT: { + int *n = (int *)rv->value; + return *n; + } + + default: + error("I don't know how to convert RRDVAR type %u to calculated_number", rv->type); + return NAN; + } +} + +int health_variable_lookup(const char *variable, uint32_t hash, RRDCALC *rc, calculated_number *result) { + RRDSET *st = rc->rrdset; + if(!st) return 0; + + RRDHOST *host = st->rrdhost; + RRDVAR *rv; + + rv = rrdvar_index_find(&st->rrdvar_root_index, variable, hash); + if(rv) { + *result = rrdvar2number(rv); + return 1; + } + + rv = rrdvar_index_find(&st->rrdfamily->rrdvar_root_index, variable, hash); + if(rv) { + *result = rrdvar2number(rv); + return 1; + } + + rv = rrdvar_index_find(&host->rrdvar_root_index, variable, hash); + if(rv) { + *result = rrdvar2number(rv); + return 1; + } + + return 0; +} + +// ---------------------------------------------------------------------------- +// RRDVAR to JSON + +struct variable2json_helper { + BUFFER *buf; + size_t counter; +}; + +static int single_variable2json(void *entry, void *data) { + struct variable2json_helper *helper = (struct variable2json_helper *)data; + RRDVAR *rv = (RRDVAR *)entry; + calculated_number value = rrdvar2number(rv); + + if(unlikely(isnan(value) || isinf(value))) + buffer_sprintf(helper->buf, "%s\n\t\t\"%s\": null", helper->counter?",":"", rv->name); + else + buffer_sprintf(helper->buf, "%s\n\t\t\"%s\": %0.5" LONG_DOUBLE_MODIFIER, helper->counter?",":"", rv->name, (LONG_DOUBLE)value); + + helper->counter++; + + return 0; +} + +void health_api_v1_chart_variables2json(RRDSET *st, BUFFER *buf) { + RRDHOST *host = st->rrdhost; + + struct variable2json_helper helper = { + .buf = buf, + .counter = 0 + }; + + buffer_sprintf(buf, "{\n\t\"chart\": \"%s\",\n\t\"chart_name\": \"%s\",\n\t\"chart_context\": \"%s\",\n\t\"chart_variables\": {", st->id, st->name, st->context); + avl_traverse_lock(&st->rrdvar_root_index, single_variable2json, (void *)&helper); + buffer_sprintf(buf, "\n\t},\n\t\"family\": \"%s\",\n\t\"family_variables\": {", st->family); + helper.counter = 0; + avl_traverse_lock(&st->rrdfamily->rrdvar_root_index, single_variable2json, (void *)&helper); + buffer_sprintf(buf, "\n\t},\n\t\"host\": \"%s\",\n\t\"host_variables\": {", host->hostname); + helper.counter = 0; + avl_traverse_lock(&host->rrdvar_root_index, single_variable2json, (void *)&helper); + buffer_strcat(buf, "\n\t}\n}\n"); +} + diff --git a/database/rrdvar.h b/database/rrdvar.h new file mode 100644 index 000000000..6d1461b2a --- /dev/null +++ b/database/rrdvar.h @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_RRDVAR_H +#define NETDATA_RRDVAR_H 1 + +#include "libnetdata/libnetdata.h" + +extern int rrdvar_compare(void *a, void *b); + +typedef enum rrdvar_type { + RRDVAR_TYPE_CALCULATED = 1, + RRDVAR_TYPE_TIME_T = 2, + RRDVAR_TYPE_COLLECTED = 3, + RRDVAR_TYPE_TOTAL = 4, + RRDVAR_TYPE_INT = 5 +} RRDVAR_TYPE; + +typedef enum rrdvar_options { + RRDVAR_OPTION_DEFAULT = 0, + RRDVAR_OPTION_ALLOCATED = (1 << 0), // the value ptr is allocated (not a reference) + RRDVAR_OPTION_CUSTOM_HOST_VAR = (1 << 1), // this is a custom host variable, not associated with a dimension + RRDVAR_OPTION_CUSTOM_CHART_VAR = (1 << 2), // this is a custom chart variable, not associated with a dimension + RRDVAR_OPTION_RRDCALC_LOCAL_VAR = (1 << 3), // this is a an alarm variable, attached to a chart + RRDVAR_OPTION_RRDCALC_FAMILY_VAR = (1 << 4), // this is a an alarm variable, attached to a family + RRDVAR_OPTION_RRDCALC_HOST_CHARTID_VAR = (1 << 5), // this is a an alarm variable, attached to the host, using the chart id + RRDVAR_OPTION_RRDCALC_HOST_CHARTNAME_VAR = (1 << 6), // this is a an alarm variable, attached to the host, using the chart name +} RRDVAR_OPTIONS; + +// the variables as stored in the variables indexes +// there are 3 indexes: +// 1. at each chart (RRDSET.rrdvar_root_index) +// 2. at each context (RRDFAMILY.rrdvar_root_index) +// 3. at each host (RRDHOST.rrdvar_root_index) +struct rrdvar { + avl avl; + + char *name; + uint32_t hash; + + RRDVAR_TYPE type; + RRDVAR_OPTIONS options; + + void *value; + + time_t last_updated; +}; + +#define RRDVAR_MAX_LENGTH 1024 + +extern int rrdvar_fix_name(char *variable); + +#include "rrd.h" + +extern RRDVAR *rrdvar_custom_host_variable_create(RRDHOST *host, const char *name); +extern void rrdvar_custom_host_variable_set(RRDHOST *host, RRDVAR *rv, calculated_number value); +extern int foreach_host_variable_callback(RRDHOST *host, int (*callback)(RRDVAR *rv, void *data), void *data); +extern void rrdvar_free_remaining_variables(RRDHOST *host, avl_tree_lock *tree_lock); + +extern int rrdvar_callback_for_all_host_variables(RRDHOST *host, int (*callback)(void *rrdvar, void *data), void *data); + +extern calculated_number rrdvar2number(RRDVAR *rv); + +extern RRDVAR *rrdvar_create_and_index(const char *scope, avl_tree_lock *tree, const char *name, RRDVAR_TYPE type, RRDVAR_OPTIONS options, void *value); +extern void rrdvar_free(RRDHOST *host, avl_tree_lock *tree, RRDVAR *rv); + +#endif //NETDATA_RRDVAR_H diff --git a/diagrams/Makefile.am b/diagrams/Makefile.am index 420bd5246..04f99c8fd 100644 --- a/diagrams/Makefile.am +++ b/diagrams/Makefile.am @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-3.0-or-later MAINTAINERCLEANFILES= $(srcdir)/Makefile.in dist_noinst_DATA = \ diff --git a/diagrams/Makefile.in b/diagrams/Makefile.in index 46fbd54b5..5f75902de 100644 --- a/diagrams/Makefile.in +++ b/diagrams/Makefile.in @@ -83,14 +83,16 @@ subdir = diagrams DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(dist_noinst_SCRIPTS) $(dist_noinst_DATA) ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \ - $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \ - $(top_srcdir)/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \ - $(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d @@ -217,6 +219,7 @@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ +build_target = @build_target@ build_vendor = @build_vendor@ builddir = @builddir@ cachedir = @cachedir@ @@ -238,6 +241,7 @@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ +libconfigdir = @libconfigdir@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ @@ -264,6 +268,8 @@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ varlibdir = @varlibdir@ webdir = @webdir@ + +# SPDX-License-Identifier: GPL-3.0-or-later MAINTAINERCLEANFILES = $(srcdir)/Makefile.in dist_noinst_DATA = \ config.puml \ diff --git a/diagrams/build.sh b/diagrams/build.sh index 53f0ea7ab..9b3963e58 100755 --- a/diagrams/build.sh +++ b/diagrams/build.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later path=$(dirname "$0") cd "${path}" || exit 1 diff --git a/diagrams/netdata-overview.xml b/diagrams/netdata-overview.xml index 13c4320e8..7f8008542 100644 --- a/diagrams/netdata-overview.xml +++ b/diagrams/netdata-overview.xml @@ -1 +1 @@ -<mxfile userAgent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36" version="8.5.0" editor="www.draw.io" type="github"><diagram id="319b92f5-ce0c-3638-1d72-e7e1d323a6f9" name="Page-1">7X1Zc9s42u5vOReu6rmgC/tymaXd3VVfz+R0Z86cueoCQdDmtCSqKTmJ59d/AEXSJAjJWiBZkpNUJRIlccH74N2XG/xh+u2nSs0ffi0zM7lBIPt2gz/eIAQxA/Y/d+SpOQLdW3fkviqy5tjzgd+L/5rmYPPD+8ciM4vBF5dlOVkW8+FBXc5mRi8Hx1RVlV+HX8vLyfCqc3VvRgd+12oyPvqvIls+rI4KCp6P/2yK+4f2yhA0n6RK/3lflY+z5no3COf1n9XHU9Weq/n+4kFl5dfeIfzjDf5QleVy9Wr67YOZuMVtl231u7s1n3b3XZnZMvCDfy5M9Y/0P27JEJio1NKtuU33s0kx+3P1/mG5dCv9zv0Q3d0Xy4fH9FaXU/smLyrzUNqL3s3MMlNLZV+lkzK1/03VYmkq+8KSJb/N3IsHVS0X9cvZ4/LWHV9dqrthJLun7p5msXxqCeHWcj5+qOY5v5hqab6FEKHS9gygd/6fTDk1y+rJfq/5FRZ09bsWqUyw1YGvz3S38F0de+jRnDbHVAO1++7cz8vt1mT1UO3bHgHOlCAYbCSIBezcvSym9RZ6X///bjFf7UJgj6j2TV58M/as7x2VCru3/sc93qdyUSyLcmY/T8vl0j7B8xfeTYp798GynLdntu+Gj64n5WN2u1qAR7t49gmWFhbNWqjFwiwX9gViDGFA6lcUCUkszO5wmqNUA5YQ+yqB0PBEGqoSbgBREmKhMb+dz+7txQOQWyE1ALoxuHrYoSeHzt/N8mtZ/XmD2MR+831WfLEv793Lf376vT1qz9f7IPDdz5bdLtZ9+9WwCTdi05LF3d/DcuqWAtqXlVkU/214gcPmvCxmy3rV6fsb+tGh9XFpEVnLH/cD1UBwYvLlWmQu5koXs/vP7s3HhNgjuUVhI8Ugbt83Nxbgybtg6dsQM083fW7XA1orc/tAI+DCeVQ5N7OFFfoBMKArZFMMWeFzJyBHqcE1mxIrNiVSTROVcoQkyjnEej2b2hVYEhFPAko8ApfdegE2ds7oWlbG9NE1f1o+lDMHKqzLR/2QpUM44Z3g9CJW6gd53ymDH8pJWdWnatXBdXAawXKM3DDiHMqS+qPFWtw9ww1DgBnkMNEMGQYgShTluJGJKIVJZjUjCSGnLE8bsNUnf99XcGflzMRBIcUeCgkdszhEyRiF/JxRuJbHPcPxrrJ3W1qAVSbA5cg1cjmKHZfDQklC+lxOUpMmCGmBU0YFZxG5HJPA43KUjvGFQ8raOePra/Fn8Xwgycsq+aX8PIQQvXTOtifKQC1LAUdcZLqPMgXyBEOEMgIUkjyNhzII6ZCNIdDCpwczEUBZa3WeM8qmZrGw9EmLKktm5bLILY0dOhZDvLG3ije+whvHFIG+7gYgSCiCUnOap0ZGxBv14EZaR8WAqbGAYRAFbiP3E28eQE0eTR95Pfp/fSiW5ndrQLn3Xys1HxpsCysL/zQ9In/gH+GPjlZ5MZm0xxu9475SWWFXr//1D3f2T8Aiaw07bb9vRbG1ddXiwcGivuzcVIVdAFP9vjLtGnyU7t3SLS52b2sA1r9x7zrXnXtzP7G4aF5bnBS6/VL9PK0PETfX/aSW9lqzlXEIYBww8KGEo3SMBavPwQAY2oM7oOH0Wvv8YZ7Pp0NWI65RO6pfCsCyDCvTl1tAZ4nRVBpuJM5SEY+PtBKpVY4oxLdj9QhSHlCP8HEF18oB5Xaz76xK2wPvPv3aHau6g/P5pJFP9pS/2lsqdN+flfqnc4gMXKXn91os1XKRubWuymWpHXS3dKb98PnDJ/s7y6QQ+OfHT387il+tkdLtXfb3yWYfuz1QzBdmxdLqHVMj177flVkzLUyae8zaHs+UEbn2uDIJceVNLD2GggaQB3WAx2wSATEGOqTiFEgf4XIFyh692F+PZftBsnJdvnNShM2/PX/YnuV3y1HV1Em0zTAP7YgeND9aiKXKYgSB30xvYx0Pxr3LJP+wBP5SmK9DUMPNgYpdsStQihkLYJcakfk+3iB2o+DT84N0dusAngEDQvIzNiA2iPUvqpoVi4dEK/1gPAJv9vZfryXhHtxqAJpbRqT6GgDmJIEkR9AIkmMY0T8Coe+Ao3DsgIMwYEt0By8NerkqJihVMw91uwUWrgh1aIW6lKOM9FGHNEpSBlPNoOCpYVH9JXCIOhywYCUdg46fRBgHtLr95TMJyed8bmXz7XzyeF/M9hDRL2qe7aHF3OF837uctSFlu0gWSvqpd/3VmV/ltqblrFiW1VC72eZ+YigoNeWSTw3lBhxkcyzpBcWkr2agsTpiOQbSOqTAZCxllMXZmESQW4wxkQxYoeOlyAAckgyB8DM7suv8ZJvUypBiPi3Ofp/Wmui8KmpF/cFh9ftGvXu+ePLLp19/8fbq5gDbJexVyPBZ788+nT+X7nKzcvnglLGNUOjhoCPpqc1NXM7rAEcdWLP3nCzMLHNAmpWZb6xsDrPF87FIxrEK2KkGWkuVn8xOlb63mY8xRwOYg+gC4qn/XDgi/72Jqn61v0o+Ve42H8yjF+2Cu4W7LsUHLWpbQHIDTD+WJaWCSQ5SnWtrmmYaR7QFABjaAlYHGUEKkYAtQC/U/jTfCi+eAflVgkmuwCQwN6IPJgujhHCSskzwPNU0YlIbkUOZSIQc25UhkXhku/JYWMrKL0ZbnWsIp6uMjzlNxsIpZVCk/bwOwXGepBgpzIQEKc6jesd8PIGAjgVCkfYjx8cOAtTa9DSn36xeqKnJ6ry022nmoWtzVOmKHWW1cLSYgJIM3LM6RUluiM4yCFFuYrpnAfPy1zAYM7RWYA4Si8gZ428DQ1PzcUAA7Vaacj2Iq2klIKYZoX0JKgiUibK3CzNNFcpAxJQA5jG8kFFJA1nhIop+P0ot6p5g/9yiOIJguCyQBOyeSLb2eA3QWawB8NcAn3IN8DmsAWLkNdeAnMMaEOKvAR2LpOOtAT2LNfCjppCccg3YWayBX0DUKcsnWYPDc05jrAH01wCdcg3EOayBpP4SBFIIj7YE8hyWANERDELV5EdaAwwOXgM/gEHd35AXmtV/4qwa48NVE2IsS2ko9xTi3Vdtu0zTlwNd9plDga42zxRkXcbctuml24XavjbP7+5hVlZTNRnfxc5RTxp6lh/K6l7N7JcyhzF3Sw/lwpkndknBqkbXLf3MfZwVU1eoW84Wf3vhgU8Y+PvVTMvqKfnN/PVov2RvcOn57l8oJ98h+Cfa913NdyAYaNiaYCCXKQDH4UCh/EHOAwxIniSPOwD3w/JbRxH/VYi+rh7fOdfbB+FRbtB8c6UfbtuC5l7tq3efftnjfg+52WCcvm6g4NrlWL6yXG308ovzzIB5MTeLHZcvxh7+sVmtJtHG38Ao1gYOBFRPI/y2kX0iFEfdQ/Zt42fBIft6R2yJrTZCMWs3AlNTR5pZupgP4TTaCN2BFUqALicT+yyrIo/Tb6Lgjv/BxYsdYbIsKWcTR2ultVm4fb50+Q9L59fsFIQtBOZoV525QudhmsuQEhzqV8J3T2KPpc9tyb3/p5g9fouDssNSqgIb4BwyvX6YqmKFeefOr0rdqYjuvp4G6uGp9MBfZhtlyEEZYDHMH+GJABlwJfKA+RMnfWssAUKetSOkWgY33F1lzPvfP77ME18rxfLs953dZHo52WefnRT1oi063oT6o3lKQo7TU0H8V6X/0e/D9h3gVwlwKV8T4Ft4xftJpv1WBl1jhDoDyE8e9QPi0yLL3Bm7vNbpt3vXFPZ21YYVrf53p3XrAG6pu17dKRbKW8prqFQN0Fxjh8qtzftZudQPzS3snlQdKCLO80zGUm8xGyYDiTYBu+9jCRB2D4ttTNiQq7+35QLbyxn1SUNHt78aUq7VdUO7KTIL7FQ08GwEPntD9uGMO3KmLZap7gA5ZkPV42xW1zE4MjpbrjIqW9RPsihcUjhoHm5jkf9xmlau3aTbtK0kkeS6GGYHSDZme91OOLBX5Skrg8L+/rbZwrkK8x8es7nLs9Lz1zC8Qq0o8Oaswd3cGV1fifWdKCJAGkK4pyjvDp5rz4ggaJ77r+4M6/gATq028iwcXgPFP5mZqdQk+WWWl0mSPK9OH9bkoGYUr1I35nc+h/B48fo4oaLIOG+TnM+Wedc3+J/FWcG/XbQB+A+Lo74G+NEoeQ2GnNRXjP7nXOuzxf/qFl8J/utz0wfQPywC+Sq1/WAE/ePlqh2izVRmWi57FpVbWlPZB93Yx62HgOccHFD1u1Z9qInUNcYCz1k7dZqL/WCyLKZd6s5zU7kmGF53c3v1WuTKaFN8CVcjk2gtKF4vMN5WU7UwRWCcykJQKJXlOHGRVmKcgU/tP4/T+S+zd33PGqS3XN40rjXEVq61+vs/G+WuTGmAsiHf27r+Z+s7pgX8bZJngPM4SGAt4bsGByBgf4V8Cl21wCEuN7JFpvmr0h11LlWMDqH7pJj/vEYwCW3CgikVlNBIuWuwaxjQ+o4CdjYPFIDvkbs2pvJrhoR0PeZqT9fn64Y5uJfsIQONIEScvrFjmn2PchwvysHx60U5yBYFDd8JuydhhR+YPCVhtyjT+E7YfeUn8P3UJ6RsK7q/U/YYlIX4FSm7ReX1JVBW3BIcIq7OkAgSty3LCBJX0cFnH3sXNmqxjEN3P9OA8xOSHX0n+yuRnZFXJPsW9fWXQHYILo/uEHo+r864OgXhz8e9dRDh6S2SY7q/vheL+m73kBfraMQ9Hx/Wgcz8PIk76geyLXH3aYf5+rFDNZ8vzr4Zsqs+aaqu6lDeOWQxv2oj5HeWasGG5XRzD9NLaIKM/SYkJ26CPOa4V+KbPFdxiv0EjpOK0yvxT56rOB21FDqmOB1TN+SkjMH9R9L4/5a/vyAoX+iMHinRZunKNLUbYWn2SrJ51XrJYMQpgI0YESd2JV7Oc+Xq0KseOCZXP31P0+nT4q/JUPViu804u5QWzfWAd5DJlAxHIec8T4QhWmupMgIjtmhmfic+3IqCfuFJGyAZyAyCYmiAp4fT4q9HC6MhnNBVwmk1EddQikV/Iq5gBCQ5wUrjjIOcRu347euXrG0NP+j4HcBTm6FyaXCqTFb4o9w3pxFeKpxYDadcApQPuim7QXcswyQnWa5VzEF3o8aAJNAfEgXN0wud6zkvF8v7yviA2txs43o7eLvaGoc5ljI8wBxTLMlEDnRKGch1xA7eiHszC3CgxAGFZvRcas94Yy0YS+uFUZXVcofA2zye6SI5mQSuhMu+zFLNLYwItvxrhSpj1a4MgiyFIkU5jziJgEAPVQwG9CwcyNKM0xj+FcbE2o+c1eNPfmK7TX66Il7GHS+DQOaUDeb55MS+IgbnWGYax1THZNv3oJtOHIiMQonGqGtD6ZeGOoueqvz25EFut/lQ1wO5un2YgFaNUnI4AIO5idicKIUFzVkWD3K8dSl0kJMBlQ0EUpsvFXLFPPdZ3G4DpK4Ib3CFNwHp0IGheZpImGudCs0kjGgi+OXYjI3lKkQBe3MPv+ZZwK38Mp/94VpEPC7+mJT3HvJ2Gy51EfqalZyyYWMwhwNHRi51IqmwIsxAkhoZc3KU7xgjgdxR1E6TGvhUL3QWnjM9LdWHgOK7zY66CEDJujHJXYaYNitXRmsAyDRTCcgYlZmRKCcwJqCoZwHQ1qvaD8AERuEdu3z4WHiy1NTKG63Id/PbX5FkXLEwqQlKh5IxyxMtJYIm4xClPKIjQ3qVwF1D7IFoDJic5EI52Oy+mPn8azfP/hUBro4lQawxSlHf2qTaqv4UK0VNnmXUxPTW+tZmaKIRCehicZqyH6/fU7D61KHAzLJ9yk97mQC/dk0U3lX6oW5McMxktuZAknY3P9gtB/U/WN/XYF0nhFPMXOfeSEYWCI13jT8HAamL61gTbrb3bqKq6eFF0j+79h6up9ivvWTMGJB8qE+cTL3Tdpg8qNH4+g5363rinQKT1CsSPjEmR/k5fJzn/nHVOeY3kz027YzRYQvf5cKsy545xcKP5vyEFj7UK2GPhT+xOPr58+dPN/40j5e3eowd/NuPv39O7IWTL9DbuwdlSJ8nhLzGDTSYRhenUevr6zg3oyEzYLtDfy+XRd50Y9pN9sQApHIiL5kN72EAzM0RgYsUKowPXaGEB8y91ol14PiYrYTK+WR7BxsA3bTdf1xd46j1z/qyi3WFGj6N+42BQkmlMUju5fRSFOjwFGJGceunTmCY7ObXvhYzXtbN2+4oUhhYqeI8laTxG1HNklQIjKAiMmMRPZWCD5OJUWAOFQpGVM7ZbbQVzMQVersFQ3UeKCR5BpXqu4Igogk1VGPNOFcmZuIBHxrdBAb0bBbQs+k5Z4Fuh6G36uJ2UyUtzDSSeab7Lu40N0muLavKIEw5iphs8DwousEZbttvDFzcoT5ql8+r3qxnu84JhQzI3AyjwRgkIE+zDNnv0FREZGc+zFgAZsE0qigpoSPVui33eauqdQwtxytUIKHmqKHM8i5J85CCuFZv/07AQwLqnvETaHsqAuZuN7DhQmwfsVsO9vUwelwXnEADBYeDECZJZYK5UVDnTGMVMZts1EoIyUDCLA4YP20j3NicPtRf93UYRez+gK9fNss8sc4CQ7eO1QtBXEmjC3AbGjD16qQVHLweac/H8bkTaZuObGe9ZSGA3pR0ME7LDJXRRAzgdfL7VH3OK3NfLOp7PV7TqMM6W02fkudpI1Mze3z5VqOntkyfWj3K3UBSLM3UU6QOmqF3ptktL8ezYbDjCz+OvtIOETkD3rf8Wv5LPb3zOCBqOSCCt3GCzH0WuigfY8V2IB664Skfk5YFKEsjSLC21fkZUPH6tE7cZgy0hBVjE+NYqknro/pO2GOYE16N70kJez4ewatsrQbBkBtDAMZR0aO1VpNbuAtbcujHavL0vnKuJKdbvaDV1C6gmvhhHeedhDTYD7rxFAWW/e7uDt3JbfSfI0lOvySw2xwD125bjjPw7R4xy+/3v//6aZOm3tOH6/WZV8XCbK3GW9X3a1n9eRT9Op2U6U2v4kKXs9zVW9y1Q03vFrPp/LY+PM2GOrfc7Ly0B4r5os9Nal/hy7g9QgpWCMrgTt7dRdLUvbroju2/1MoGtgXUsVX183EtXveAto6q3eTQgDOqZfEDyrfZegfJjrGj8cGu4KTu8eyR2z7Q0pMPxpr+Kq2/4JZjXhazZX039P0N/eho8rgsFw0JtgHDKpawsDu7mN1/dm8+JiGzKobQHg0GkONmZCGhHUVmj72Ai2VZuamqXb7s6t8AO8/L6quqsi2bh14Z2SQd6lq8PcULlm/UZpb9BrENTSonIYGyquqDk4PbNpv13IXHcH1FGH0rNzvH4glqyThWAf+KgVbL59sI6ijqvN8xFQaMNRjq8rWPQr+NOO7ydDfJY6vcf+m0dTPLGpH50apo+s+btf3lyUqprJ7+v+MHt4Lw9sC/6wOoDt7XBz6ZqrCP4xa65hyLpaqW7WVKe2/tsbtiMnm+kd67Tjd4Jv+/mjUNWRJrpyJ/4B/hj62y8Ukt7T3N6rMi0Fy0eTyXo2C+Fcvm4Zp3/x688x9rLYasHVJp09J/dcw+7r1Z9vcJAia7NxuR1sNRCEbtscpM7L79YgY3EYJWc4VPjpP3gIz8UjLinWP1QM3PnhE6PhP2hnFw5NUFrJZhdCYLDvXU+1ojazbcsnehtmXL2jvzq7Yk2PEHsM3ked6eq7t+3qwd5U6f/rGYWGN9U7VK0y/1zaWASOzAfGdSnWqa1envrEkBAZQlWS6kARoCw1S8FBDqNdFHgbQi2Na2XExDoQZq88fFQ/o4mZjlZryhN4k3wdCqR5+A2OSDlCPMYSKzXOcpzBHPI+aW+njDbKztdmNRBhlHF4C3rFjosso2g+0quyUjUSNJUoUE6WcpU4ESI4mhODcp1BEbvhDPR40CRRcwlBG5R8jw9JzL0qbKHpdPm7G0W6PkC8ESXrVxVNhg3C/gEVKBhGItMysIc5RFTIQkGNyOOuuN+RIMeK18BfQc0bQ0E3NfqY01whBcY/Nj7Nps3lmDj0BeizjNGzABzBKpFLVQMoABGhFMZOgAxaExzAEJJy5Awi2/FpOi3Iyj3ToeXwSOLFNa9frUIJcp66tKQIiEc8uSrF0PMInIlKhgHlNC7RCSQY17qPtClHqvgNPmfNJ4z6rIPd50X+5FTzAYx83CNe4yghcfgrEb/9/l44jGEV3wE5Mv93PAI9C+b24MRlj/Tu53W46O1p8cKYYC2/6q2yQ+tPyzR4PwGsYwV7dg0nG57aqGWytl4IDbatdLnqZMptBIpNKG226dyfHhw91dHeIOhL+7z3yMHaU1k5e2wVva93EWcLvswdnHOINb+N7X8+614Y5ZOTMtnnpAG/H4HeW4LwGmC63M7QpZal78MVUzC71pTQuf3gBw+ZGdiKZtHmmbEBcomw3l6h8hkFeH3ALZ7Y5vJ82ju/T2qr6Ll0dQ7tyaL5g2Pw4NRsjr3/8pF8V0PjG39gsmz50o/1K/UV/NwhLg/xz12qMVHrLN5wKD26I8+cIMw8EHzzGt2iM/fPib/faHcv7UXBjY3/IXQstn/IQfykVdAfJ5ocrHRbG43Cf54YVbP+zsul4nApbNOt3eV/vMj11nu3qbZWCMws2hpPPRY2OIH+mFATnDt4wxTgQgTPCxIhvMK4FxRjAFtA4U0Dp2lCvBgrEHe/MOH+aLcRfMjVo+1plHVrt0zzHL3L+u3/DS3t7qozK/qTOThlx4LKFCHDcktdaOW14dWz7UF52bWdKE3NEHt6mNmiTLYtq8nZsqLyurz7jPm/t+aBvR9jvGHjJ7+UzSp2Kh3p8CTsb2Mg0Oeo5hsMEt6hB6eSuNgtxb+kH2SDjzY32mSCDbZNv1PZN8Dq/3GuRtJsPO6RyjHIjRqdbkc9y8mA4RIPt2BQp72k/rTJhj21Xqv5Y13q74zR89bnMa44kSL7On3dd9gzhkPe1e2Rkg6LgjdIigb8DxspqjoCVN0SCOC0SWMAoVkEYardXFOl6Y53ghciwwQh1P2lG8h+FsiyKE83W8lHleaHPr4LRY/TumMwIcvRcnoqXvQ8MBX21gxNAeIdsAKbcIhUSTARS4v6cjsaXtF0fk1f9/5Kqajm/q4ztiFfITkRp6xg3H42yPNjd0IB6ibNstWpy8DfGwakaYEc2HEw8pFwkAWpqMAW2JdbHiYdRnjYOxHhL04kbokwfh9wDQCmikBhriVLl2qr0AUIZgggFTqSAk5+nlBoC6Pq1tljceZ4mwgB4ShaGhLQJA3+3W3hV8u9VLFgP0lnJIIIQYiW7I3O5GrF/Lvfm8ES3aDr9j31yotgs4ZzdQIRda300FLEeoXWVjb1rtzntyqQWgmC2WanU7bjHc8B2wem8/rxwonxZLM13xlO37PA1mdCTuNI/zeVk5djO+a+fgSjKzbDI6wv6/wRlX3rnRif5rqtIed9Xhxf1jpbY9XV5WtY9vUXss512B26JzBLarsG4JRs9bBJ+053JcRxZjuZjbS9N6WFt7ppouDUXcjk/LumTTks/+85/H+sZhsjD20d3hH3Q5mbTLCZZuUb4Ui0fLSv9bP9jftqRa+Clyl3Cz9gFWi+ngmbvKQLB8KB8Xq1WcdgPo5qbGVnO/H1r4zYxxLUm7J7pxuw58+PTPm8Zp7E67sPCemJrO1fZdC0KPV9+VP0/Ge1jjlPJi404a3Przg//6vr3r39792iFp5oiRFYs/7X+/2HP9w32y7Hbd14eifrb6nNXjbD3ktnrAyaLsLW8dJXvhcaepybLaHx3cOB9CgDDfrFqxKDb+aJu1HrjoX962P/z06X8csq3sfv+3XdbJC41u/dPb29uGiB0vXpFotWu/PpjKbGCUx4sF7B//giKORjVyJvOx5s5jRbxOP+71q0nHE9Dh6opvsHaJ1J5Kux21/aBvIRhBE6hRnrrx1pjhiBNfgRdjoG1bwZe6qHeZfWeZ270BdFabuy+z1AfdbjVM1wM6XJc5WYaRiWxgljp3CE6ZolYEQxg1C9xvp0LZ2GHd5ZANJqlHmcbwCnOtF5mPt93qnK4Fb7KernNnKKBEpa4UStN2jjoVCc8YVIZppWOW1Ul/jjqBY8cERAFXa5yRjwHbdIvgnCPkfPunrw20mb1mo+ncNPnb63UL5lnmwWHfrevwwBFPp99yxUL/kT3o+Wjj7VbLcy0bz2oXpPE/2j04SECHOE2IYQakJKOGpus3Xgfbl7dep7FuByl8pOoeFAppxZjJ/fHnD/1mgMP8qXM0SZ7DWhtKS3YgcBuvHEvuSB1ST88yFtbcLiu/+A+JN8ownDiwDANppAkYaIaUJogLIkCqeG4iSmrkzQaATARkEg+NrjznmuW1/Tf1g6qWi/qlmuvH+SKrm3B6CMS7GcQXUn5K6pp4hDUhSA/zcmBCeUqpMFKhFEQsYxY+vAKNZREIhEJJFGN3LJ/aCNku0dHr4TB1cyCBiN3lcJiaBUWicgDTjHGdKhQPAhh5LjUZsD1DgwBbV9xBQUq8QxvoKyQ3qslNGUoH/i1Bc5pgqHnOgQF260fc8Qj45A55UMfkZjFi0niPHMxL4d51nyWUpdTwAffG3CQpygxXWZoKHrPPktcLFkoamBcYsOL3mJQSoOV2eY4XSct6TB+SGmZE9GlpjcVESqK5oBqmecTx0t2Ym2daBhr7hqZAxchJwtslOl4lG5a4bkaUEg3sa91vyccpTFLGcpURzgCJ2JIPwWEdsdWxxuRudbEBuWPUhuM9ch2vhdx2d6/MuJyBnA16T2WWUyuVCmw1MClIxKiSaBWobnfzsZ4tA7ubxahgat2Yb5PctDarMISpJANyE8vMGZDMAJVygCJ2hxpVaYbI3SWCHuhJHtObbJH5d8X0rm0ojAzN4WCYujWbEii14SiXGKURubkY6dSB3R1w8u4xwTFA7S0mm10vtckqWpsZqtOB2k1gnijLQFMl7GdURpyoLKBH7oDsDuWgRJHdbXuwt0luWsdssKTKmL6DRKYgTQTPDVKaCgh1RM28HXuxidwhVW2PjKMAud+0P4zWJQJYSUqyYS2K1czzVBvL7XOZkYiGGJcjQ2xccSYCrR15DAcJedP+MMpW5DY5VQN/mOPtlGADUo6lFesxNfPh7kYgNFot5BDbo1n1cft2trOC3WyUX8rPq5M+I+sKW79axNQeVExdfgwYygOSAIMpzQ1hKYmp3Lee0GfTPdC+UQQ85q3oPsuQ3KYEwafFXxMfTlfZAZbWrgGsc5WJgeNPYZwAbAUNSDHUKqJ6IbzRUBCjQMIpDrQJbALx8WNw5A17Ay0GxAoDmmg2YClWp09MxjKaA5zCnMQcmOALIToOyrCQihkjBkfetDeQ1nEbnMrU6EG6L9VZAnmuJM4wVzHTfSVFfid6QPHtWM3kASHCYsRuyBYewcAsrkFFqFcpOi4OdblVd2paTNxD/mwmX4wj9zAJC7aVoYNmOO7PTaAkd5dy08GUr6a4tT/ly/1qOOXreeAWvekN3NpE88ForXYf9WdrtQHPM6lpFX6+B6DeRKxt61gl9s8EjjNba3zLw1FZNzsOvhpvBrqFu3RcKL3bXtizULreHr0xd/SmP+Suh1nYR+wtp/4GaHewP+duK2S3pO4Du0P7mSCb+y73TovZFdmC+ICTR0I2Pzqyt3ANn6iVv+o38bdA7gYhQ7kahByaczwrl/qhuQV/CDY1IiOhnSVQipnL7Q10f5A8A5zHkeKYDh1FLDACOVLmb4Cye3iBL8QGY7WLl1AIhklRUoI0YakG0lCjGIno0cdeli0CoQBOKJEmRpcpuoeL9yJoKbErK7rjIDUupt7PrLBkTZAGRhOmNRcR8xm94Iwl5dhb3zX2OrCULkDLN+2/ZbU3jiDOJR7OGkQoyVJgCKSZzEBE09mfpWJNZzSi99FMZ7pHPuMVkbs2nYlQeT5Mk6NGJzCVgBhMEGER3fUE0ZfJHUp5lFE49R4pj1dE7lUiTc5hKgaeEkRAkhOCCQdKKhVRMDMwIndAMAe8JHF295v2g7I6j4ZYGY0pG9YiwCQXjjZY6hxlMfNo0IDcUIKALzyUWhGlMx99045QhlfCO+ca9rm5zHOYKM5yqE2eYQQjOkLbmPlGeuPA/sY8Br3fdF4kYys7K+WAKz/OwagkWSpzkcKIujnzfXmiNaAGk+8C1YxRKhbYm86LZLShN8j1IFNOUmr/AQRTRIyI2UyHS185DxhjMmSLRSH3HomRl2FXg7pwzAo9JHLleLWGrV2dZkkG7MvMZBKJiLIZQr8UWYotmTWJ2R4jRq+CVZvF9lyP7eGs17/gMdC1bdzT4NCUnedGmsnqnrLEteMutPGL7ldz5tbmW5xFR4UumBdzWOcoHw8GpnXKgEq4x7TOrbIj2BbuujMJFxEEBwGjWyjhtpGh+lefTFXYRXMu/+0DoW07/kG4qJ2TeCbhok7xaDszt26EneOgwFNhQatVHqGDL3vD3sVXKdNjwsv9BiKUp0FDDRKiqDDXWy/9CmEezplPTRmgJgkopDhGcIBda8X0qwR67B2MsqhwiJ6hLKooHfbZFv7BM9EERokj8bJDwuL+zPKeiC+lfe1w6+wQ6tX6dSMvjyHvt3BIngnA+MkRRs4LYcjXFPZGGPTP5Du/YiJst0TQt4UwdlYI48zzvwDfCb4twjj3TGogtstw2wdhPOR0HThxdIeMZz8NNiwDmRm7bp5b4mfKTAdzN86/5WQo2/lH5v66+ymXqtHc5EZbexefnRxqSDRU3BZqZr6H8+Qll93a6ckHD59vD3Td8zdOdt5xbHvMO+7NZQH2Zm+8adAbb/HlORIxHJKfH0xS2xdKT8wiKfNEJfZOk9WQwMSubpLXk0MGzkneh+HYOfnVDeX+3W4M9/5rpebDjehlduZ5jrQOyY2Mpa5ENFIiiacRdnmZgxbYoZbrR/Ir8j3SAM98Llw2g7dFPSZplpmqQV8DOHfcWZLGHp1aO9MkapYl1uYsZmaxSNzHST3b/a6uc7tzxuixB8FBchPIdc4nxfznl7SJHZA3NCBCEdCQO7s7eJDJynfwIOrHavL0vrLYMDV33byLa/h0SuHWJOpQFiCRpQ+6k9uQqJ88brWgOnk8AqmEpyghNvYuYB5wF+0hPgO0etPJhbxOT6CQppkZtH4QTCWasIwBJRnMIvoHpd/HhwdSh0VIJOyejbJOW7LmyyxbDV3rDWDbJNon5SwrZ7fTp6SR5rdFWX8yFNFvtMu/dHL+TqdGEGLqrEXT4MjANOFCygwSpKFqOwT4vCYCrrxJkhiOuUg3nHE4UWh3XAVUjZcU8611WraVFt7B9vY/iz1U8YDSvW2w/2Wl+9l6XDUaX2s97mgWHHaHq2mYs6dnq8Dhf7dbG252fog+Ppbd9Z+A7Kbu7zbyOf4mInysrndNyQeZY1EmJAWk8xa+2QutrTuNBdalX7a+iTbGdrrJGfpx4aRZT9QuNsvabc3oD/WJk4/eaZ/BI9+oNHbt/e+MTDMoUtKTxtJgnXCHAZSmOtPqeNLYn/C3gzg+Ur8VsUVe6ndWsomVgKHiTsg4M/FopbhtweF36u27IT1X3Gmph75TL2oZPMXjEMORxfjv/+8neyBV2b1LmwV5Vcv0bgp4DIH+k5kZN+h9dp+8b64zEOjijU5LlauqAilxCurEnzRvm6nTNMHGKAh5mmZ0nXk9jJfhSC5Wr0UfEQERzwIiHu2eqb6dhN/C7fqdy2zqkNZm2byesRCHkaj5be16uF08+CxktwadV8NCGKubLpIU5BkcFKIZCBNGTKop5ozpiFOQpJ8JigN1xSFPL40S/HuFFp5mqpV2fMQD3Rt1CzMOVqCDiuWD9OMUgwRLCIkEnFrtJmJ1s98yDYNxxyAWaPTJotid60BnF62vLVlAYFgvt/NKPpWPlYOEror5MpIuZcWLKb44VartWNxcPHGaW2Kl3WSSDK74DNfNjs0rhiuq4Zohk6lBqw0saMJUxkVqSMa13qxm2bdopHTFcqzgtoam1brwOHwWCpbuUey5ndL1psv5+aqcPyO518bWqlIJzVLAseHIyIit87s6mOfW+WNLngYYHI8XP43DosxUFZPEar1FbinpMDDiRLt5cM88UWdPiK0qynO7hbOBDM0MS5jlRlaOKqW6KuTnFBHgGzn3E3sV3+BZn1Jsb6zQ7Xl2SS/u2S5NLvMuqUHrUjePEu9iXk/oEDcNzRSKEu86vYZaqTQtltO/vH22ynp6ixJ/VR+XW51n0K5BIiztWwUEokpJFLHakY4U1EArehTKiaRH1VDj8PT54+KhdEmsm9i63JzJer1wAzUvBxxgkw3H18osQYznDGIDaczh44xSv4AvOHslUFy7R/ufrVTGtpXYFeXg7jlWrYYD4lSl+aDXW4ZgggFTqSAk52l67JRcFPR6RkAfRMO4hQiEgUMu4j0qR08vTO/LP8y3+RdV+eztLYYpOKPCgeFOQoKyNFcJkUK3vQMESqDIKNMsE5JEnM6Noe9jbCPFfe7WzgQ6cAjo66lwszIzNebyqpwVjyOBSnZC3KnL3R2EkvqjxVpQQZeqBJwqdGe5BAMcgYRqYRCHImG87b0kgH3LqeSMG6k5izgwBMNhPAuygDEgQ8wqimvl1EhaLAtjL2ImVqjMfDyd9xiz3fFEJaU4kSmkVCJi8ZRmrSfaKmKps7JzBjTnMcMf/ogDFginQRGnq8lZWJfz8qupsrG6f/Hhj20A9ywMMRBcCgwSIpi1dYhIFAaywRvLdSIgkVxhIjCJ2OfTWhVDvHEwVrYQDCn6F4o3/WBZ15OPtouPXuyMNisUkUxwDkCeCebQptr5zIJbQyI3BGJuNI7I3dqUrg5sMGBVokBL+LMGW+PFyF2bg80uDPHGUIYwpdyyMgpRlilGEg1ww9MEhDxJNYYK6JwCExFlmA9zTnHbCG6g3QeCHX6nubNC2QaWlho1WyzV5E8fbxefy74r3gikjJJEiVyqDOtEI0IbGQpQnhhA0iwHjEET0QYgwjcnZSCpjQScs2R3j8XJ2dq7iV0IlYz6oQ6xhsDFRwN2xBqhGENCEml0bi08keSg6/WLhLHQs0hMgbTfj9i3mXh9+FGg2Crclv04xVaobQl+4aH8PYhPIEo0lQZpVBMfNcRXBifCpNh1axcARayCFsTvqidYqKteoNaunTd3SEY+arF3ti2pDhy52Gtp5QYmDfrzAsBuDmm9G5rU2G2eM+ljJTi8RYJySJp/B2hzs3G7T9y/w9NvP56U3vav4bf/YzJ0D9EHPHr9BNqecHsOeDyRzrcqja51vkmRVmbxVXleOAQuPo6wMyt2yaBW58OZtPJWJzkUqPX7ApRkKqNaiJRkKmIIQfh+E8ICfrqQIOYeos9R5/vltw+bDNmmTvOiXb0Y1gaSS3DHjEvKOUqEVdeYyU0CtaqjUCIRGcsSLqExDkSQx5yg5fc0CRXXBaxUcqGJ7trMH3wcXXxhxY7MikvAACAJt0BDVoVMMFw5eUUiM0oTgDKUZoBlKtcRB4R4s9ogDc1qC0Ct88Udt81J26Njpco9d+GAG3uabDFihIZ6esyW8+yF7icRx4ls2BCrGxluiM1Rj3GrzKG63GjcgVzK7avTQi1Ihumha+AXAOl6REJO/Awjhsf8L+Q0iTM95PT8b3ZfzL79MZ/4gXgE3pqbznJBbjdnIqlBVq4CxwUbeSslzxOScaOl4jLnQzfdQZjrUiE7eSsCfmEWMp5P0uzp+F1YHdH0g7G279n2YW3cjDdt16XKTMtlrylrWS13vNEY+vBevVcR3OwSPVLv1cP2iD/S6cS9V19/jziEXeoe+fzh08XtkVfpT3zQHqEMvOoeGftl4XW0I91Di+BSJjoXRHHEnBZhWi2CkQRQboBlaJx6g5APoj4H/tC7QFcVEUhhIBHmXqM2cfVsPfCHzrVpXfjgFsm+Gx/eAtS+D/vfPR/6XoMlmp30av53KG+JxJJAxNzEJM/FuPfcEi7IrasBtieGnBHunRdidssBFaL5jhcpjDdxAsHQWKYYY0NHYjyteyrsbOZfo60tqU9vCkKz1eKY2mOityc5W54VL2qIRkxvP94UDBZCHibzxY3lHAlQD2WxYny+fx3gS4jx9TxGy0qZvPCyuobb/Y24iwQiIiE5Fpkr5cAQgiY8Y3KWUKxc0ZtGTKt4ih7BfngmMFQ7lIgPfQF6KQ7KQhttD/twe2sBZQc3TkiSSshYmhkHN9jYFRywJFNIcw01pWkaD27UGz5ihfS4BREP2BX8yCGaY6FtMXfJhGlZjgC3W3D5KgAnCZGJ1FqlitWAww3gRMoTrjKBAElTndN4gGMQeoBrzzHIWg10BSJRWgrYQ5Ujfk/6um6Iv5aZw96P/ws=</diagram></mxfile> \ No newline at end of file +<mxfile userAgent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36" version="9.3.1" editor="www.draw.io" type="github"><diagram id="319b92f5-ce0c-3638-1d72-e7e1d323a6f9" name="Page-1">7X1Zc9s4vu9nuQ+u6nmgC/vymKU9PVXTc3JO99y55ykFgqCtaUlUS3Icz6e/ADeRICRLMiRTcpKqxKJkCsT/h/++3OBPs+9/XarFw69FZqY3CGTfb/DnG4QghtT+5648V1cExNWF++Ukqz+0ufDb5D+mvgjqq4+TzKx6H1wXxXQ9WfQv6mI+N3rdu6aWy+Kp/7G8mPa/daHuzeDCb1pNh1f/NcnWD/VTULC5/ouZ3D803wxB/U6q9B/3y+JxXn/fDcJ5+ad6e6aae9WfXz2orHjqXMI/3+BPy6JYVz/Nvn8yU7e3zbZVv3e35d123UszXwd+4Z8rs/yv9N9uyxCYqtSSrV6m+7XpZP5H9fphvXY7/cH9Irq7n6wfHtNbXczsi7lZZ2qtej+tl8bY/2ZqtTZL+4MuplP7HcVy5V48qOV6dZvdLqaP95O5+8XHdfWF7bKRbJ+9fabV+rkhh9vRxfDR6qf9ZpZr8z2EC5U2dwCd+//VFDOzXj7bz9W/hYmofq+GK2SivvC0oT6E9TY9dChP62uqBtx9e+/NprudqR6qedkhQyyy5JOleSimPbKk0yLtk2We32YdklS0uHXXPYJgsJMgFrYL9+NkVh6kj+X/H1aL6iwCe0U1L/LJd2Pv+tFRaWJP2N/d430pVpP1pJjb99NivbZPsPnAh+nk3r2xLhbNne2r/qPrafGY3VYb8Gg3zz7B2sKi3gu1Wpm1gx5iDGFAyp8oEpJYmN3hNEepBiwh9qcEQsMTaahKuAFESYiFxvx2Mb+3Xx6AXIXUAOiG4Opgh54dOv8w66di+ccNYlP7yY/Z5Jv98d79+M8vvzVX7f06bwQ++7tluqttn34zbMKd2LRkcet7WM/cVkD749KsJv+peYHD5qKYzNflrtOPN/SzQ+vj2iKylELuF1QNwanJ11uRuVooPZnf/+5efE6IvZJbFNayzMq7+nW9sABnPgRL3/uYeb7pcrsO0BgYAo2AkfCoCKKjWJj5ymkBPUigK2RWDDH7o4AcpQaXzEpUzEqkmiYq5QhJlHOI9XZmdSi8pAB9fHHAhnKQ8iHGML9UjC2e1w/FvIsxXTzqhyztQwwfBLEX8VM+3MdWWfxUTItleatGXdwGsQFUh2gOo9AhLynfWm3F4gaCGALMIIeJZsgwAFGiKMe1tEQpTDIsqISQU5anNQDLm3/sKsDzYm7iIFNAD5m0MTF6yGQB7keuB5lLu/DCYm1p+uAk18j/KHb8DwslCenyP0lNmiCkBU4ZFZxF5H8EeChjOGAHSBFAGR4JykK61tPkj8nmQpIXy+Rvxe99CNFL529HogyUUhZwxEWmuyhTIE8wRCgjQCHJ03gog5CSHswQaBwLHZiJgMkg2EhQtg8vezBqah/GfqRYT3JLaYcRR4KZWa0sBdPJMutjkL1XDPIKgxxTBLqaHoAgoZbhaE7z1MiIGKSc9SBI4BCBUNKAoidiQHDguKrVx29q+mi6aOzQ/+lhsja/WaPLvX5aqkXfyFtZ0fiH6RD5E/8Mf3a0yifTaXO91kjulyqb2N3rfvzTnf0TsOIaY1Dbz1tgW/tYrR4cLMqvXZjlxG6AWf5WmYM1Pgr3au02F7uXJQDL33GvWqefe3E/tbiof7Y4mejmQ+XzNN5HXH/vF7W23zWvDEoA44CB96UebazMnvOLw4DUay4egIZxaFKLh0W+mPXZj7hGLar8UQCWZViZrnwDOkuMptJwI3GWini8pZFcraqOA7wFkQCcTuxMrdxY7nz7Lq+0ufDhy6/ttWV7cbGY1vLL3vJXu6SJ7nrFUv92Tg8LfEvHe7Zaq/Uqczu9LNaFdgrbni65n37/9MX+nmVbCPzz85e/vNo7d/BpqtbenqXuEdrtxrcXJouVqThgeZhKUNvXh/J2poVJc4+32+uZMiLXHhMnISa+SwLE0PEA8k4BwEOuikDAlIA0ioh96RgMQFshtkMv9udj0byRVN7RD07osMX3zZvNXX6zkFEzJwB3n4HQceng9rPFX6osRhD4H9M5dWfA+GrzBD138+5IyKHIFSjFjAWQS43IfCdyELlR0In7FgiTQxaNQMACkWPx84XsXJ+ijdi32/tNLeeT1UOilX7wnCdwdzjhes0O9+BWNdDcsiHVVQ0wJwkkOYJGkBzDiA4W2Fq6rXJAhx5miQPAG4uDJYLymavJFKXKk53wsCjGFeEQVThMOcpIF4dIoyRlMNUMCp4aFtUFA/s4xE10v4vDgP3LzyKcAyrg8fKahOR1vrCSroXkwSL7RTW1ubRaOJwfu8p5E8W2m2ShpJ8731/d+U2WNSvmE3u6+9rOPus5HZfp07PHV3aHrl5QYLrqCBqqLZaPIK1Dik7GUkZZnONKBLnFGBPJgBVO1Fesh2FwCANxcHbiOPjZjq7FwWQxm4z+9JYa62I5KdV55wl++nF8tx1fn6a9E7w7vncJJxgyPOpT26X+74X7unmxfnB02gmQDjpaQp/XVMXFooyrlFE9u+JkZeaZfSeZF5lv6OyO8cXzzkjGsQrYuAZaK5efzcaVvlubDxFHA4iD6GITpoa2hvk+8dzckB9kZ1yKn1uWRoQUmBvRNSJSnSeEk5RlgueppvGMCAboraeLsCYPoJuSFwqcXFIg9wWEZcU3ows/o/sqgylOPlmQWZtUpN1kAcFxnqQYKcyEBCnOI4JM+ikpaMjGZCDvCctLhZiTXL1yATWzCOjDa3eg4Yp9JcIhEEIMJen57HSKktwQnWUQotxE9dkx7vM5HEgX4IGIXnPt8kA45HNqMfQdo8PKJK4HhyX9BMQ0I7QrbgWBMlF2uTDTVKEMRAwr9zNWICGBsDIOcMIjkhQCIBykrLRPcHzOSozTCT3DCpKAmhvJsBruARrFHgB/D/A59wCPYQ8QI2+5B2QMe0CIvwdUnnEP6Cj2APt7QM65B2wUe4AGZ+Gce/D6XMYYewD9PUDn3AMxhj2Q1N+CQM3IybZAjmELEB3AIFDRdao9aBL/XrEHvreaur8hpyMr/0Qyunl/14QYylIaKoOD+PBd2y9f8eVYh33mUKyjyVYEWZtatW+S4n7Rlqf6+d0a5sVypqbDVRwc+KKhZ/mpWN6ruf1Q5jDmlvRQrJx5YrcUVLWibuvn7u1sMnOVosV89ZcXHvgtYj8bWuCmKmdzqWtgvlDrfEDoRzSv24LkQCjIsC2hIC5TAE7DkkK5Z5wHONKpU4C24v91mZGDKHDlUShL3A9OIfZReZIFmu+uxsCdY1Cv1f704cvfjljvaxYbjN2W1f2uo4tlNOvq5Bff3JkCi8nCrA7cvhP6jzZE7p1nFOs8B+Jr5xGO+8hGEQqrHSEbjzi/e8NL7HUWJvPmLDA1c+SYp6tFH1GDs9BeqIACamBU5QPnP0fBQ//T08QV5wGVZUkxnzr6Kq3Nyh31tQuAr52vs1Ua9hCi5zlYuCFIUp+wRC0m3hl7XcLTCM4UlyElPdTbg584H/sk8uXvk/nj9ziH4HWJQIHzOYb8pJ9malIdSSdIloVutVq3rueeRvu2aUtucW2EBLtXofwl/Kr8pRj2nPBklgz4RnnAnrvAlMHgEbyzRPz42+eXmfhbpQqO/iTaY6fX0/GcPJcwmG6pwsO7E71Of95EE3Lbdd6uJEU3eN5+Vfq/ul3Wfpy2yz5tM6WLVfis7W5acQYnt3zTszb0+e4R/ugmj3Z7IbSdFcoMMD8p1M98mE2yzN2xzVedfb937Whvqw6wqPrf3dZtDbil7vvKHrVQ3lJeInhZ4991hli67fo4L9b6oV7C4anSgbLiPM9kLDsBM9knNhvmG4ZcZ0eY3kPChmI6HU4QOPXOWZPUdHTHviblVgshdMgjc+b2wIONZb/xch3DsA9kmHtsU9l2csgdl4/zeZnz7cjoDPSlUdmqfJLVxCV7g/rhdvYEOE2nzK2HdJ9emSSS1iH6aSCSDTlhexJe2SDznCpGOLDTdJAYq47x02O2cGJVL8Yjxnd0rsC7M0oPcxq1bSi2N66IgHcI4ZGiv7041hYTQURtOgMfjPn46E6tqrKRHOOB+KBlbhfk5FW9LN6kcEz4ORyhvp9XYkqGS6DLbPgR8/lygf9ejfEweJUEvaPwusD6WxwFNEhvhKfL5RnlWdg0dBntaaiWOMbDMKho6B2H18Wl36QpABgch9NlOL5G+1maWbHumGdua83SPujOHnIdVGxSg8Cy2xTrU0mkthQYbHK9yuSopWvAOpm1CV+bhnZ1xkTZSe685co9zEVrTPF2uRBNf/wGgwgMk5kICiUzncb71oiIEXjf/v04W/xt/qHrg4P0lsub2gmHWOWEKz//i1HumykNUDbkpdvWPW17v7WAZ07yDHAeBwnMbx0OQMAYC3kf2gKS1zjnyB7FB29Kd9Q6XzF6Dd2nk8UvW6SO0CYsdVJBCY2UvQih18FMBoxuHmgcdUT24tvHsXQ5eetIx+i5LN5qjZs0hPpCUM1562gN9zJ9ZKAphQgkItAoua9DtvEjWHO6YA3HbxesIXsU4Pwg7JGEFX7I9ZyE3aOs6AdhjxXuwPeon5GyjV7xg7KnoCzEb0jZPToFXAJlxS3BIeLqDIkgcZuqoSBxFe2997nzxcYqenHo7idMcH5GsqMfZH8jsjPyhmTfox/EJZAdgsujO4SeQ661t85B+PH43l5FeHqL5JDub+9io77DP+RiOxlxx+NgeyUzHydxB/1r9iXuMd063z6SqRYdj9VYg5munqeuCCwdcGPIER9h7+YeLbveRvoqb+Mo+jZjv5XOmfs2D/nwlXgsxypksZ9kclYheyVey7EK2UFjrLcUsq+TAwO5/N/Fby+IzBcauEdKAFq7al3thnqaESX/rDtluutgkS59VQJ0BHD6RbrB0FgAmycKjbErcceOVdBAr1rjlIJmHA2EZ8+rP6f9Q8cOmz13KW3SLWXvBMhkSvrzrHOeJ8IQrbVUGYER26QL5OWJBwdao0Bk/aJmqr+AsNWfjxZZfYShq0RYNdXYUIpFd6qxYAQkOcFK44yDnEZEGIS+ZsyYGEIMBKY9NOlA1wCxpckmKw9iu/M4LxVirIRYLgHKex3O3VRClmGSkyzXKuZUQuYXu2AYQBgJJJdd0zyRRbFa3y+ND7LdPWSut9O+q3dyOGQpwz0cMsWSTORAp5SBXEfstE+86FkYh4263nP6oOvBoeuw4lTgRx+Ju716V4xE7pAIgcwp641Yyon9iRicY5lpHFPoCoiGI5YCih0JSF18RTzRYmpZfH/2gHjYIK/rAWLZB81CgxIl+8NHmBtczYlSWNCcZVGH4AyQSEMmRpNo0JPOV8QVJ4vc54eHjfq6IhjCCoYC0r6Zq3maSJhrnQrNJIyoIbbDBVpuSPHtsCBaogAIr2ioYfFtMf/qmjk8rr5OC6+sjB02G+xSzBFZ8zyYw57Fm0udSCoQlAaS1Mh4YJONt3Vj8PIA2GBowGEz/eAa0OYMEguFPsr4YZO/LgJlsmwocpchpk1l9FpTt56imakEZIzKzEiUExjR6KU+S2uy61/wA5Mrgtjc/ucD7DDH3RWJ1dJ7DLHGKEVdM4Nqq91RrBQ1eZZRE1GsNr3NWtUuUMYLYUCqNgHVsTbSCVb1ORSYeXZMWV8nXPlrW23+YakfJt/6KTmni2um7ep7x+VVxeTbi8S3lZWfY4Q190YeskCErO232HM4X1y/j3CPsw9TtZy9vvr0F9cIwXVr+rWTPBYVkw/1N+Dqh2TmfVGL0ld1wt7eTWxb/7FzoJR6ZY4jQ2lkSP73oykf/mcnsl+apRSC5qYLx+eqF8jSZI/NcIibZspE9W/5brKyL6Yd7rq5Q2QUP5nUCWY3T+HuT/ucE98Jz1/VWHp7qsG25IRzwHcwjigE31BB/8jhGxb9v/z++5cbfwjJy9A9Fcx64HpVtuw4weVV+9NgSlOcjqRvj62bwYgcsN+lfxTrSV53NTpM2p9EiN/N++vpgXS3z/0iBTjjfcc24QHjhwX82nGGzwwy7/h4soCDvWxumkY2rgpu0MVmezr+tgR+n8bdHjehzL4YJPdyLSkKNCsKMaa4icAnsgvvSiJO1t4sRX6YY/haPCqy7El2R5HCwEob59Ujte+YapakQmAEFZEZi+jVk147LBzqhsVlQPKdeKDVqSBXLOxmrbK0DzlxhV5iwVCZfQdJnkGluh46iGhCDdVYM86ViZl9BzxAkaZrWhdQUgQABS8SUD0UHZYlfC2MSzA3LdMCTSOZZ7ob9Epzk+TaMq4MwpSjmIH+dkB2w7lYQBsCAc5Fo+jsA22oSZN/r9pQBJoKL3OXhIRRqIaxnqT1uqK2RtX6QcBXcH/PSUMDTRdFwEJpu8WPWV3tsfrdHrYrZvVlujU0UHDYi/qRVCaYGwV1zjRWEZNpBs1CcDDqF0hzjeOeGXIKNhpOEbsD2NvXmzFPrrPAdKBT1TWLKylaB7ehYTdvTlrBwduRdjzOqoNIW/dcGvWRhQD2xT4HQ6MPxwnNbJP6rQA/V8vlpbmfrMq1nq4tzOt618yek80kg5mZP7681NNlg2y2C9cfSjaXeprVq+rbR5og8nLoEgbbOfAotREDZtgMNRgBM1w/Ff9Szx88logalojgbZyoYZenrorHWA56iPvV8ZQPScsClD3CCTGk4pW0Kx2lTMNeuitt3ANn0FWasoAfhD2FfdFMH3oLwo7HR3iVfZMg6HNjCBrPwQn6Jg2pu4cDsSGHflxOnz8unW/JKVsvaDWlT6gkfljH+SAhDbaArV1HgW2/u7tDd3If/edEktOvoW8PR8+pQwMO/BP0uGpV9N/+8euXXap7R0Eu92exnKzM3nq91XyfiuUf528r5Q1YvFvNZ4u+2i13OzTthcli1WUopf/wZeieIJUmhGZwJ+/uIinrXjVVy/m7iYbBab0ySqXLkK+Mx9143TOjWqq2kwoDDioZ8DO3WVevEh9D5+OD3cFp2dnVI/eVTYQftgOXw0YWIbkdRWwPPYMryzbdFMdBmveAo+fF8kkts10c/XrJJmlf3eLNLV4wfqN2hus2g6xpsnRCEiirrT44UbhvY0nPhXhS6dwZtYmLRZnAmlgsJXbFieWMpizTSpzY9sT0bu9YPDEtGccq4GAx0Kr5fB8xHUWf95sPwoC11k76e61Gv48wbrMtd0ljq91/a9V1M89qgfk5nRb6j5ut3aNJpVUun/+f4wa3gvDmwv+WF1AZzy8vfDHLiX0ct9El31it1XLdfE1h19Zcu5tMp5uFdF61msGG/P+q9zRkSmwd0/qJf4Y/N6rGF7W2a5qXd0Wg/tL68Vzagvk+WdcPV7/6394r/7G2YsgaIkttGvpX1+zj3pt195wgYLJ7sxNpHRyFYNRcW5qpWk++md4iQtCqv+GL4+MdICO/+MpPd6seqP61DUKHd8JeA36OvEzMahsGd7LgUM+dj9WSZseSvS9iNSvfujK/QqfJLd37F2CT3LM5ntWqN4e1pdwbZoRsqz+4W02tPd/n13WfwXeXKCKxA/idSXWqaVYmM7M6UQRQlmS5kAZoCAxT8RJFKO7r7xigobBoWp/18kREDFnx1vBbPK4e0kdr9a99DKJ3iUHBUNUJTUBs8l6yEuYwkVmu8xTmiOciIgY9nRgH5tdBEch/xugaMJhNVrpw9lAfgFfZeRSJEl2SKiRIN+uZCpQYSQzFuUmh5jE7PvY5HArVAzVNRi+2q9R2Dmept8we188+vg5rOnoh+MJV+zyFDcbd8g0hFUgo1jKzQjRHWcRUS+pVb7ScqoevkCPmkpo4bsXX2kzN/VLNfHgdlux7EfCyCprrVGZNSgJ5KRw1r+EFMEukUtSCywAGaET21Yza3CEceaj3e5TREW+OrqfJdFL42DqsS+1FYMuyrqrloga5TFlX8QJCJJxbxkVzCjCJ2ZTbY12t6d5lXfh8SeIQjCeZeFTl0fGmiHLo23uhErBgpC5C3KAtLtsn3t+c3w7Bw8cwhvG1B5OIe9qrOlOtlIG9065d/2maMplCI5FK69O+dwLDp093d2VYNxDybd/r4A4Fj0sEpPnZCryhfQdpJFTUerhjYYgzuIfHeTv32Orknxdz0+CpA7QBlzlQjvg8aLbSytxWyFKLydeZmlvozUpa+PQGgMvP7Ew0bdInmzywxjH0Qs76CYJXZZgpkOXtwoVJ/eguzXtZruLlEWsHd9AKpo8Pw2ER8tuPf8rVZLaYmlvn089zJ0y+lS/Uk1lZAvyfk373YIf7bHOTaH/r9Lszb0w/BPrqOX3L5spPn/5iP/2pWDzXXwzs74pQc7OdAdbXLesfDQjtmQJ/m+vbF77N3+Fdmr1Htp5aDne77SMG8qcmX29nprvC+ChS2ouUXhiGM3zLGONEAMIEH7aSDkb14YkK69t+rj35dyCHC5bwPNjFO3yYb8Z9YW7U+rHM+1g5FzpQ88z965qmru3yqreK/KbMC+nzgyGvDJ39EP/cOtiyurZ+KL90YeZJHfIsT8LS2pjJejKrXy7MMi+WVrK69+t1N2YoeNUk5vElr8RCvT9jNTCuhAbHWMYwHeAeieCdvIFaVetsfS96H468b4/UB6L9++7vSOLp3EsL4U0k+eBw+iAGPbjVlnj6zYvh6ADZ98sQP1KT36ZMn1rDV/+xrPG24jdfO9zmPGo89YZgseZcd02zkB5/uMc6QFC6F0HfgQugagavJU1RLxYGRJYwChWQRhqt1cW6APyhf0QOBQaOM2EjgLM9UsDH6wIo8nyiza2D06r6d0hnBDj6KM5ES9+bg4cqbjCIeniLsgAp93ALR5MBFLi/5yOxpe03R+Tq/6+5Ws6Gi/r8gViF/Eykhp5xw/EwYt7k5vXEQ5Rju0fTifchHsrhmDAjmvdHcFEuEgC0NBkD2hLrYsXDoPUVB0M9JOhPjNC6DMIfoYgKaKQEGuJUudaDnVBEhmCCAVOpICTn6eWGIqA3Wk7iYQw9NOkrCkNDe4QiftitnW/wzE3sj2oDnhWyv93q188Ceks5JBBC7MaxelwlohHbQnbojgsV0wA3hhyokNes65kClgmU3rGhA6304D0Xj+5e89VaVctxm1HO4qhe2/eXDofPq7WZVWxk/2Y7vUb3ibvN42JRLB2HGa7a+bSSzKzrgHbY5de7Y+WQG9zoP2ZZ2OuWs+WT+8el2vd2ebEs3Xqr0km5WEw3OSa176/ZhW1bMHjeSfBJO17GbWQx36oxK7NyyFRzp5IuNUXcIU+LskYOuNpjUBU0gvVDGReAycrYHXDv/lSXLFcjVtZub75NVo+Wif6nfL6/7Em88MPkLu1g63NUe+pQmpuncnXF46razFk7P2thSojV6/3UoHBuTFZ/ZbWW+jtc6r89huDTl3/e1I5j9wUri/epKQm/3L90PPSg5fr8KQ3eYxunmE92Hq3eQ2y24NePzar/58OvLbTmjizZZPWH/e9v9l7/5d5Zt8fw6WFSPlt5z+XjfDsG93rA6arobLQj7EuPO0tNlpU+6eBJ+hSChvluVYvVZOcv7bPXPTf9y+f4p79++bvDuJXfH/9yyD5tD8Dt/tXb29uaiC1zrkhUHeOnB7M0Ozjn6eIBx8fAoIijVQ0cynyovQcnwMCIPeyqG51rlqWVvveF3wgfosNS9a+nVgSX2fwIikxkPcvBWaw4ZYpaDglhzJRFSvtV75DCwNhegQLq6CU1y39ppuoq8zF4WDr/tWBQlgME7gwFlKjUZfxr2kwqpyLhGYPKMK10zIoSi2gvAEbg0KCUgaTsIxKhRovByUp/zR70YoDE3Zn/Dk+L/YlQ6vpzu4xaRt7U/Su2Z2bwdpB3S51AnBqS9mM9d/WJZ97uTaB8sjQPxXQngRqa2EO5lRiHpcpfC1sQjJDaqWU5RC+/FuI0IYYZkJKMGppuZwuNP2APxtAgDd8y0P0z7O1hlYXbgHDC4Jb0fhPFwGHAERGKpMRonPv5l0/dJmD9tJ0xasGbaEq9MPg6CLQD524BRFLw5t8eK0qGukooLeyI8opxyISVtQnL9/pMSLxTJuT8x5YJIY00AT39mNIEcUEESBXPTUTdBPnViDxU4HG6mp63wKF+UMv1qotDtdCPi4GOXJWLX1vdGClLXhHWhCDdTyOBCeUppcJIhVIQsSaRMV/HYlLcBgyx4Bi8KJWJQ9nWBHUOCehdD68pe4cISxohYT+bCIpE5QCmGeM6VSgeDPx+z1AG5ok1jZl7MwwitP5redg7JTcqyU0ZShnqZQfkNMFQ85wDA+zxj3nqiU/u4YnngRwgFiOMio9IG7wUDl62XEFZSg3vcXDMTZKizHCVpangUVuueG402XQN7jpvA8z7iOkKAVrul891lUdX4rJHRUo0sD/rbpcnTmGSMparjHAGSMQuT21PiqbQGzRdBbqcOjQGO0YxJj4ipetayG1Pd2UE5AzkrNeSJLOnW6lUYCu1pSA4HrkF75udUPKAfzLg/mIxCjWarJn3SW5aquMYwlSSHrkJpwkDkhmgUg5QxBYhg2K0ELnbfLdeTCRGIh3ZI8Hpiuld6t0YGZpD05vjq1ACpTYc5RKjNCI39xOiZKAlTKjl9hFh1wC195igc73UJlXEMzNUpz1VjcA8UZaBpkrY96iMKLulZ2pLGjK1Q37EKNKboPdMcFrGEbCkypiuWS1TkCaC5wYpTQWEOmJXnkYV7xB8L2VNRDnf79qLQstcaKwkJVk/6d7q5nmqjeX3ucxIHo/cnEHfmSbp8HyLQDiZxzCsybv2o1BWEdzkVPX8KI6/U4INSDmWVrRHZOjA68G0abjU89EHPCn48PD0iWLRT5M/JpsLZR/+vxW/VzfdQOsKmwBayJSuN0xdUgnoiwSSAIMpzQ1hKYmo4VuAYI9JWBM+oOQHq2WuKPFp9rz6c+qj7CrbAdLSbYB1rjIhuoxJYZwAbEUQSDHUKqLiwb0x4lbXDzh4Q07BE0V0yDv2E1oEiAoBmmjW4zNW109MxjKaA5zCnMRsCImGbGaY1ctCymeMmA55155CWsYBcCpTo3vptFRnCeS5kjjDXMVMp5XUJzi0ciVgXnI0JDmLEQsge3gLA+NgekVxXrHcsD7O5fncqdlk6h7yFzP9Zhy5+wlBsCmO6/UDcX9uAlWJh1Tc9QbN1PV93UEz7rf6g2Y2M1/oTWfmyy6a96a7NOeoO96lUQ1GUtYnBPFx5w1l2beuT2L/TuA0412GS+5Pa7k5cPbK8DDQPVypw1rRw87CkbWi5fHoTFqiN905Sx3Mwi5ibzn1D0Bzgv1RS3shuyF1F9gt2keCbO6741s95lBkCz/PHMgTIZufHNl7uI0vYYyzW8LoZjj7iTgsMIPzVPO5YRNJvsLEDFY6fwmFoJ9kIyVIE5ZqIA01ipGI3n4syUADb1oTdN2/ISssRqsdeoT79yKoKbGr0bnjIDUu4t7Nu7CETZAGRhOmNRcRM+QE9EgpAglyAZdNc6ZeR8p37dhlpZeOIM4l7s+xQijJUmAIpJnMQEzrWQzPbsBJdzLrmR6RIndFBC+tZyJUnpOew4wancBUAmIwQYRF9OQTKYcEH57wUB6djMKs92txd60ErzJtcg5T0XOXIAKSnBBMOFBSqYjSuZW7G3IHInWBwA3nMcj9rt2hrEy0IVZMY8r6Ce4wyQViltdKnaMsokNceA5xIdmA3DA4rjtGYhV9195QhisBnnMNu/xc5jlMFGc51CbPMIIRvaEc7EHvZupkj95Rzve7TpxkrDK2Ug648sMdjEqSpTIXKYyonjPuR1Vhmzrbm4cU6A0XJRGevevUSUZrioNc95LpJKX2H0AwRcQIFjEzmlNfgAdGNYRGzUbJjGZ7OMEu07gGZT2SFXtI5Mpxaw0b4zrNkgzYHzOTSSQiSmcIfce/lIHDCwPqWNQC6wPHaoBQ+XzVfa6512NzOeuU1D8GelcNy+xPVGFbdu1YtXkYeNOGMKmWniWuf/FEG7/4uxoRtTU7YxS9ANrQ365eAAezGulVR0IyZDUyoDuiKIXYAe5z1ACNNwkuEQR74aVbKOG+caTyt76Y5cRumgsQ7B82bfqX94JLzYizkQSX/NIe6bsK946agr4khKARjSfof8resSvyTQr+mFfwB4EIZXXQgO+ZRFFsr7da9w2CQpwzn5oyQE0S6jh2eMZwgJpH+BkvgppvEhSyKxjkXOEQPUM5V1FakrM9HIkj0QQGaSbxcknC4n5kWVLEl9K+drh3Lgn1fFvtjMBTyPs9PJcjARg/O8LIuBCGfE3haIRB/06+jywmwg5LG31fCGOjQhj3+8YA31u+L8I490xqIPbLhzsGYTzkm+35enSLjI07BxuWgcwMPTyb/uGZMrPe1ILxN0sM5Ub/zNxft55irWrNTe60tQ9x7XlFz1QO8yjaIe+vdJ7sNWr8JHOjmwtPJv06LbqzZtOAs2+f+bMnWnFnqgWwi73xxufuXOLLTfdP57ccFpC1W91zUvIuHIdOyic3zfg3e0Dc66elWvQPpJcPmuc50jokPzKWuoLTSLN4Pc2wzebsdZsNHJEjQhl7+Rf5AU4e/bicPn9cKv2HKQ/A7g0u/Tut3N57/lTrBgqk397d3aE76XE5chPIL+5mA1tBVWYDR6Cf8GQZCjRTwzxg0UfkcFblmGfVmJnOyJldx3Fp7id2/59vvUHwd/6Beqd9p6U7lXc6NYIQUyYlmTqmaWCacCFlBgnSUDXFwT78IiCL90UnDg1IaO7aG8vhl83ELhQ+UBIF588PZGcL3Nt/r44QoAFR+bpG2F1RudH5qha1W3W+A4X561ZYTQCbP29kucP/YUvrH3b+Guk5ZOflnwA7p+7vPiw7/iEifChcUeOb6aWFRBkgEBCue3hULrR+5jz6EvQyP2jjGY9fI7ONA+rHlZNmHWG7OkD53dGF41N54+Szd9sNeOQ7lcbliHUj0wyKlHSksTRYJ9xhAKWpztoR6yeQxgj0vR8HiOPDu3ftxUrEHklnP1jJ7m4sfeFAhlmiJyu3a0qKflDv2APpGc7npR76Qb2opa40MMr5xGL8t//7V3shVdm9S3YD+bKU6e3I06jerFJBv1MLJ+Wbb+yJdvFOZw/KKnlYSpyCMnCf5k1bZZom2BgFIU/TjG4ztPv+bhzJKcc83iICwj40adyfPR1N1u/hk/vBb3Y1ym+i5G9nNkRmKS+OzvE5zGFN/K6GwzBWdmAjKcgz2CtIMRAmjJhUU8wZ0xFHbDDkzdSlm5LibrA2lOh1eNreQc67cwZpZmamlXbMx4PiO/UqMw4qKELF8l7OYYpBgiWERAJOrXIUMefQ7z1DwVDTCuWPHtFT/gAg2k3rKlsWEBiW2+2cms/F49JBQi8ni/UpVTHzfWEFlCnha4Xk1G9JKXZ7RK8YqKgEaoZMpnpF+FjQhKmMi9SQjGu9WyuzL9FAR4vlkcFNynyjpOFhe+1Q4O2Idnf76WjvusyXV2W+Gcm9LpdW80polgKODUdGRuy43Xbb2PTbHroAaKAI8Igq/jMJ1Aejpnbl9iPFepJbqjo8uO02MzUZcKfD3MEd+yAMmRhg3CNdPC7sqtrT3B7rrCdRM8MSZjmUlapKqbZecZOCAHw76X5qv8W3mbZnFdqFTXRzn0MyDDvmT53OGMhr+PTpzv4JcNZt2VsnCZ6xfr0CCjRFoYGyTHpFOuxSpelkPfvTO3sSHHT2rojPV2UzudWNesXeEmFpXyogEFVKoohFUINBOVgGUqRC3XlolBSpt+b9i8fVQ/HNfaYPwd3JbtcLQVDyfMABNll/zqLMEsR4ziA2kMaclMto34pCIOAQDE1jRicK/jXjWA9RNq9SASAlGBCnKs173aMyBBMMmEoFITlPG+/O3lmHrQTeUzqjoHs1AvYg6odKRCDyHPJFH1Fi9hYid15kpitw82Uxnzz68ZGqF+/ezO7cVYsOsEn51morhKHLXQHO1XFnacgARyChWhjEoUgYbzptlIOeOZWccSO1VcEi+oLwYEQR40M/eBPc7IEJXZAk3Ymv1Xpi7BebqWUEcx9l4x50czjKqKQUJzKFlEpELMrSrPE4WtGZOvspZ0DzmPOlOfHUNQ5haKZ8aJDlEeGWkcBsaDksiiezzOYDTnbxzu99YLgR0xgILgUGCRHM6qxEJAoDWaOQ5ToRkEiuMBGYROwBJ6jf0xO6HnMBHIZ6t1wuuxviUD9YRvfso/DiPdsHo9AKViQTnAOQZ4I5FKpm6KfgVlXMDYGYG40j8kII/WJaEqgPgU248GIDgVtt17ysivWwJ94Z9hCmlFvGRyHKMsVIogGuOaCAkCepxlABnVNgImIPe10wMQx0VWumwl6s1+QF5pcaNV+t1XSAwYtPpT4UgwRSRkmiRC5VhnWiEaG1FAYoTwwgaZYDxqCJaHG0405fkMEkIIPJ4fbrCBmgmtrNUn30IXDx3uMD0UcoxpCQRBqdI2kN3By0nSWRMBaMFpspkPbzEbuEEtTngCjQJBSCQAADRamgG3jtUNOA9sJDxEcQn0CUaCoN0qgkPqqJrwxOhEmx6w4sAIrYbEsQvzmTYKHmTKGwQYSm/gjs4aF9084mr5zz1emM4oZ09No8AsBuXtPBMTQerD08I2mHIji8tVYmtypU9W8PbW4gY/uO+7d/+/1n4tHb7nf4XaSYDK0h+lQxz6HTtBY6cqrYOBJkp5N0aVZPyvMBInDxmfgHs2eXcmg1Q5xJK4N1kkOBGl80QEmmMqqFSEmmRET2zMTAFx3KsgeBoJo/YfIyNcPJUvvQu/wgB4alceVSrjHjknKOEmEVO2Zyk7gx1w5YIhEZyxIuoTEOWpBHlPvMzxYIjW4KuZ5Pmu96St4WcPmZxYOPrYtP/z+QrXEJGAAk4RZ8yCqgCYaV21kkMqM0AShDaQZYpvKIs9cRBz5bo2wIwFBeqjyxw6/fc6LSBDddJeDOHh179MOnoR4V8/Uie6Gbx7l63w9PSbW4/inZHZwZNnHra+C1Eh9I8du/7irUZqOftRgBplLQQXQkMGw0NOLuiE7Eo+WTc/vf96/25crHwXtzD1puye0ZTiQ1yMpk4LhlLaul5HlCMm60VFzmPKZ7EPvRkUCGaXBQEzmxtD5Xy0BHNP1gXIxkrE0D6xkhN02zoaWZFetOB8FiuT5woec84p0N7p1wuNsFO8ZWgcifRHLmVoFvf1oc1i71tPz+6cv4T0tng/un5fIaa1IG3vS0DD3CTd7m+/L+O82CS5noXBDFEXOahWk0C0YSQLkBlrVxGnXkJ/CnNgUajIiAeksiTHBGzayy0fr+XzuYoQkegFskuwEEeAtQ8zra7Kb24IzF8w/lLZFYEoiYG/nhVSEe3XifC3LrqlrtjSFnhHv3tRbaLQdUiPoznh0Wr2U6gqG5IjHG4w0Eelr2BzjYQ3CVJjn16U1BaDhQnGbpQ6I3Nxktz4oXr0QDprcXbwpGJZuzMhbeFG+u3ECAeiiLFV30/fUAX1x0cahIr5fK5BNfjUa71eirVP4EIiIhORaZK2zBEII6BGRyllCsXIGWRkxHLBiF0Nf9W2Og64MPRBbblpDX4N2caKPtR30Mvrf4tsMgJyRJJWQszYzDIKwNEA5YkimkuYaa0jRm9i0dxLcpGYKQBkwQergJMloMrhZuuHBaFAMYHhbrvgoYSkJkIrVWqWIlDHENQ5HyhKtMIEDSVOcR2z+RASdsGs92vSBNwlv89NuAnnnEEMqLp76bC2upjxKmRKaslLFch/DGC2LlImHCUGmE4SBihxxB+/EVBJpZlC/VALDDedA4GI5Vn1eWziujltrPgUDvrOipRB0Frpkq04JWqBM16ihlCdAmtWqYYCiPORhV+i3n+JDntHwgfs+5AM+5jtZcx1Af04QgwBQTJfXrDBgB0jThEmcupitwFtHzCgHEvubTtK/t0F8G+nKJw0XOmZhO6iZczf2BDHVE4b0xFEaSnGcwrxlKk1SFmUpYBvJcYiQzExFS0qsoxyygw7CAFCOXZM1tMHa3WLp1PphBXgp+Zw6ECnAcJdaeIlle8TBVAw4imBCZ4RwqTnJtIgLOGzJD+LCxYNB/4HvbRg247TVrT6uV37egLol/Z9BjCCSIAQJIBb20Fp9K40TDlClkPwNjNrWk0A9bB6LWoSDARTXd2wq9ZeFGSeoH5fsL8DtzW1Xw4yQxmudagh78iM6SPCc8N1SwXJCYJZOoDz+yX/LyES1VR+uzUplarI3+ulQWfR4K35nXqkShdH4qnjrfhEMhrZ2nQiDhioNyyVOMlIxau9kHId2kK/eSdwIS+IiuZ6MFYlpM5n6NEH5ndRwVAqnrmWG5YWXFNgiUhudJhnJAENVAy4h8kA+r02TA6ACh6uFLEsQvIDBz4ng54IIX38LqqGaTEjt7/y4lGtifXedRzeoydm4hmTKWq8zhlUSMZcpBORsbOlNYSB88vIh9tDB8yLK1mS18GL4/dy4EgODEOTRTUAljUrNCbaxForiB1mDgOo/YRQP5KiEN1OmGZuiSK2KE08naWHr6o2xqdeO9YZBb6OUwBVp0MSjsIpOMghQbLXlGIoYUCPI1QhhK5g6EFBq34DWA8MmkX6fFfR+CEF28Y+bY1s/lyBHElLZv9AZ7CZpAjfJUppnCLGZHoc0cr9ZAhjJgm4iAROZReoG/VIUzyKB9XcpuNl9lk9V6azpuKHP3rBpqs74eWya7AzNXkzQMgZfvCbFAITiGLOUTpxm9Eo4wWFBuYfX43e2aa67rbvO4WEyfR4vOcrlfy8V+bZbaB+pu5n01QJUcDRhnwKBu0lN6EvzEHp3X4lQEywCz1VqNl2s2y+tjcbdz82qwiFF/rBFkZGhSXyDHDAvwmblXejoZLxSb9fWxuNvNeTVYRH6XQopDru63QONZQeCqpn0IXLyX8QjTGiKRIJpmuPZ0syZRnbNEpIRh4Bqpwpi5DmjYsYgPJ2mSgG19UR16X6p8Xxbfn1d/Tn0Qvkcfo2VCSYoBcE7GDgiFMTrhWOdZmqNM04jhFurNcoU4kDIKWSBn0O95edEYtBaCGbDB9+hhdB0oJUCoTlulbao8oAlVPINGwxzlMfNuhh2xSCDSEpxaQ64o+WFp1g/2q7LUz/+i7y/X1ZXy84RADTNe4bBJns+Jm6NEsZtpLLKMRww8ezNXIQ6ULtaDqjwUXlHAz0JE+/Ypv/jc1yP93K7r852AUhOUdkdrC53liZYSQZNxiNKIKMTcQyGRQ6OkaXxxsVNrXqpdVLNU+Vzw4kF4DBdkMtGYMVxl/NOmiATRLEFCMZwqTLSMmPEvfA8NHjanDPZu2tUxYbQumtVMLdfZaD001fKGkcc62/36nTQW+gMTWQ4Bee1OmtVicl+sZ35GIn1/mdmWJQqZIC5EXmVm07omRcgMJIAApo2RmcmjJoL5PBGG5pns6i0zUqYYjqA8zlNH/tFyxXZ9/cPwTmIoEFAv8EzglQSewzLaSr/JiNHYLK/vOtrNma8GjJIN5loTMHRhX7t8fnxa3ftxNPoe/dcYicRayRCpymtTl01JIkTiSpW0walWEES0l4WPPxqac8hPNmvuzML5Ca7MfOXaHI+UH24W2D8Ou0fPXg9H9IQzYyE4no4hDltwsD3aS14jN2KCcpzwlCq7BZYbpWlTPpenNDEZk0JzoXEetwVHj/yhpj8yUMSJ4OEZquMQfffFV/N98U355529P3cdh66fFFWSaiNKwLU9X2SecCoI4poJrSLaplQOkqKDBZsw1HA7ivy7caXjxbrz8b9a1vrwa5E5QP78/wE=</diagram></mxfile> \ No newline at end of file diff --git a/docker-build.sh b/docker-build.sh deleted file mode 100644 index fd056c390..000000000 --- a/docker-build.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -# author : titpetric -# original: https://github.com/titpetric/netdata - -set -e -DEBIAN_FRONTEND=noninteractive - -# some mirrors have issues, i skipped httpredir in favor of an eu mirror - -# install dependencies for build - -apt-get -qq update -apt-get -y install zlib1g-dev uuid-dev libmnl-dev gcc make curl git autoconf autogen automake pkg-config netcat-openbsd jq -apt-get -y install autoconf-archive lm-sensors nodejs python python-mysqldb python-yaml - -# use the provided installer - -./netdata-installer.sh --dont-wait --dont-start-it - -# remove build dependencies - -cd / -rm -rf /netdata.git - -dpkg -P zlib1g-dev uuid-dev libmnl-dev gcc make git autoconf autogen automake pkg-config -apt-get -y autoremove -apt-get clean -rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* - - -# symlink access log and error log to stdout/stderr - -ln -sf /dev/stdout /var/log/netdata/access.log -ln -sf /dev/stdout /var/log/netdata/debug.log -ln -sf /dev/stderr /var/log/netdata/error.log diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 000000000..4362a273e --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,104 @@ +# SPDX-License-Identifier: GPL-3.0-or-later +# author : paulfantom + +# Cross-arch building is achieved by specifying ARCH as a build parameter with `--build-arg` option. +# It is automated in `build.sh` script +ARG ARCH=amd64-v3.8 +FROM multiarch/alpine:${ARCH} as builder + +# Install prerequisites +RUN apk --no-cache add alpine-sdk \ + autoconf \ + automake \ + bash \ + build-base \ + curl \ + jq \ + libmnl-dev \ + libuuid \ + lm_sensors \ + netcat-openbsd \ + nodejs \ + pkgconfig \ + py-mysqldb \ + py-psycopg2 \ + py-yaml \ + python \ + util-linux-dev \ + zlib-dev + +# Copy source +COPY . /opt/netdata.git +WORKDIR /opt/netdata.git + +# Install from source +RUN chmod +x netdata-installer.sh && \ + sync && sleep 1 && \ + ./netdata-installer.sh --dont-wait --dont-start-it + +# files to one directory +RUN mkdir -p /app/usr/sbin/ \ + /app/usr/share \ + /app/usr/libexec \ + /app/usr/lib \ + /app/var/cache \ + /app/var/lib \ + /app/etc && \ + mv /usr/share/netdata /app/usr/share/ && \ + mv /usr/libexec/netdata /app/usr/libexec/ && \ + mv /usr/lib/netdata /app/usr/lib/ && \ + mv /var/cache/netdata /app/var/cache/ && \ + mv /var/lib/netdata /app/var/lib/ && \ + mv /etc/netdata /app/etc/ && \ + mv /usr/sbin/netdata /app/usr/sbin/ && \ + mv docker/run.sh /app/usr/sbin/ && \ + chmod +x /app/usr/sbin/run.sh + +##################################################################### +ARG ARCH +FROM multiarch/alpine:${ARCH} + +# Reinstall some prerequisites +RUN apk --no-cache add curl \ + fping \ + jq \ + libuuid \ + lm_sensors \ + netcat-openbsd \ + nodejs \ + py-mysqldb \ + py-psycopg2 \ + py-yaml \ + python + +# Copy files over +COPY --from=builder /app / + +# Configure system +ARG NETDATA_UID=201 +ARG NETDATA_GID=201 +RUN \ + # fping from alpine apk is on a different location. Moving it. + mv /usr/sbin/fping /usr/local/bin/fping && \ + chmod 4755 /usr/local/bin/fping && \ + mkdir -p /var/log/netdata && \ + # Add netdata user + addgroup -g ${NETDATA_GID} -S netdata && \ + adduser -S -H -s /usr/sbin/nologin -u ${NETDATA_GID} -h /etc/netdata -G netdata netdata && \ + # Apply the permissions as described in + # https://github.com/netdata/netdata/wiki/netdata-security#netdata-directories + chown -R root:netdata /etc/netdata && \ + chown -R netdata:netdata /var/cache/netdata /var/lib/netdata /usr/share/netdata && \ + chown -R root:netdata /usr/lib/netdata && \ + chown -R root:netdata /usr/libexec/netdata/plugins.d/apps.plugin /usr/libexec/netdata/plugins.d/cgroup-network && \ + chmod 4750 /usr/libexec/netdata/plugins.d/cgroup-network /usr/libexec/netdata/plugins.d/apps.plugin && \ + chmod 0750 /var/lib/netdata /var/cache/netdata && \ + # Link log files to stdout + ln -sf /dev/stdout /var/log/netdata/access.log && \ + ln -sf /dev/stdout /var/log/netdata/debug.log && \ + ln -sf /dev/stderr /var/log/netdata/error.log + +ENV NETDATA_PORT 19999 +EXPOSE $NETDATA_PORT + +ENTRYPOINT ["/usr/sbin/run.sh"] diff --git a/docker/build.sh b/docker/build.sh new file mode 100755 index 000000000..908468d39 --- /dev/null +++ b/docker/build.sh @@ -0,0 +1,83 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-3.0-or-later +# Author : Pawel Krupa (paulfantom) +# Cross-arch docker build helper script +# Needs docker in version >18.02 due to usage of manifests + +set -e + +if [ "$1" == "" ]; then + VERSION=$(git tag --points-at) +else + VERSION="$1" +fi +if [ "${VERSION}" == "" ]; then + VERSION="latest" +fi + +REPOSITORY="${REPOSITORY:-netdata}" + +echo "Building $VERSION of netdata container" + +declare -A ARCH_MAP +ARCH_MAP=( ["i386"]="386" ["amd64"]="amd64" ["armhf"]="arm" ["aarch64"]="arm64") + +docker run --rm --privileged multiarch/qemu-user-static:register --reset + +if [ -f Dockerfile ]; then + cd ../ || exit 1 +fi + +# Build images using multi-arch Dockerfile. +for ARCH in i386 armhf aarch64 amd64; do + docker build --build-arg ARCH="${ARCH}-v3.8" --tag "${REPOSITORY}:${VERSION}-${ARCH}" --file docker/Dockerfile ./ & +done +wait + +# Create temporary docker CLI config with experimental features enabled (manifests v2 need it) +mkdir -p /tmp/docker +echo '{"experimental":"enabled"}' > /tmp/docker/config.json + +# Login to docker hub to allow for futher operations +if [ -z ${DOCKER_USERNAME+x} ] || [ -z ${DOCKER_PASSWORD+x} ]; then + echo "No docker hub username or password specified. Exiting without pushing images to registry" + exit 1 +fi +echo "$DOCKER_PASSWORD" | docker --config /tmp/docker login -u "$DOCKER_USERNAME" --password-stdin + +# Push images to registry +for ARCH in amd64 i386 armhf aarch64; do + docker --config /tmp/docker push "${REPOSITORY}:${VERSION}-${ARCH}" & +done +wait + +# Recreate docker manifest +docker --config /tmp/docker manifest create --amend \ + "${REPOSITORY}:${VERSION}" \ + "${REPOSITORY}:${VERSION}-i386" \ + "${REPOSITORY}:${VERSION}-armhf" \ + "${REPOSITORY}:${VERSION}-aarch64" \ + "${REPOSITORY}:${VERSION}-amd64" + +# Annotate manifest with CPU architecture information +for ARCH in i386 armhf aarch64 amd64; do + docker --config /tmp/docker manifest annotate "${REPOSITORY}:${VERSION}" "${REPOSITORY}:${VERSION}-${ARCH}" --os linux --arch "${ARCH_MAP[$ARCH]}" +done + +# Push manifest to docker hub +docker --config /tmp/docker manifest push -p "${REPOSITORY}:${VERSION}" + +# Show current manifest (debugging purpose only) +docker --config /tmp/docker manifest inspect "${REPOSITORY}:${VERSION}" + +# Push netdata images to firehol organization +# TODO: Remove it after we decide to deprecate firehol/netdata docker repo +if [ "$REPOSITORY" != "netdata" ]; then + echo "$OLD_DOCKER_PASSWORD" | docker login -u "$OLD_DOCKER_USERNAME" --password-stdin + for ARCH in amd64 i386 armhf aarch64; do + docker tag "${REPOSITORY}:${VERSION}-${ARCH}" "firehol/netdata:${ARCH}" + docker push "firehol/netdata:${ARCH}" + done + docker tag "${REPOSITORY}:${VERSION}-amd64" "firehol/netdata:${VERSION}" + docker push "firehol/netdata:${VERSION}" +fi diff --git a/docker/run.sh b/docker/run.sh new file mode 100644 index 000000000..b4cf52c7a --- /dev/null +++ b/docker/run.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +#set -e + +if [ ${PGID+x} ]; then + echo "Adding user netdata to group with id ${PGID}" + addgroup -g "${PGID}" -S hostgroup 2>/dev/null + sed -i "s/${PGID}:$/${PGID}:netdata/g" /etc/group +fi + +exec /usr/sbin/netdata -u netdata -D -s /host -p "${NETDATA_PORT}" "$@" diff --git a/health/Makefile.am b/health/Makefile.am new file mode 100644 index 000000000..829a41b3b --- /dev/null +++ b/health/Makefile.am @@ -0,0 +1,82 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +SUBDIRS = \ + notifications \ + $(NULL) + +CLEANFILES = \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +userhealthconfigdir=$(configdir)/health.d +dist_userhealthconfig_DATA = \ + $(top_srcdir)/installer/.keep \ + $(NULL) + +healthconfigdir=$(libconfigdir)/health.d +dist_healthconfig_DATA = \ + $(top_srcdir)/installer/.keep \ + health.d/adaptec_raid.conf \ + health.d/apache.conf \ + health.d/apcupsd.conf \ + health.d/backend.conf \ + health.d/bcache.conf \ + health.d/beanstalkd.conf \ + health.d/bind_rndc.conf \ + health.d/boinc.conf \ + health.d/btrfs.conf \ + health.d/ceph.conf \ + health.d/cpu.conf \ + health.d/couchdb.conf \ + health.d/disks.conf \ + health.d/dockerd.conf \ + health.d/elasticsearch.conf \ + health.d/entropy.conf \ + health.d/fping.conf \ + health.d/fronius.conf \ + health.d/haproxy.conf \ + health.d/httpcheck.conf \ + health.d/ipc.conf \ + health.d/ipfs.conf \ + health.d/ipmi.conf \ + health.d/isc_dhcpd.conf \ + health.d/lighttpd.conf \ + health.d/linux_power_supply.conf \ + health.d/load.conf \ + health.d/mdstat.conf \ + health.d/megacli.conf \ + health.d/memcached.conf \ + health.d/memory.conf \ + health.d/mongodb.conf \ + health.d/mysql.conf \ + health.d/named.conf \ + health.d/net.conf \ + health.d/netfilter.conf \ + health.d/nginx.conf \ + health.d/nginx_plus.conf \ + health.d/portcheck.conf \ + health.d/postgres.conf \ + health.d/qos.conf \ + health.d/ram.conf \ + health.d/redis.conf \ + health.d/retroshare.conf \ + health.d/softnet.conf \ + health.d/squid.conf \ + health.d/stiebeleltron.conf \ + health.d/swap.conf \ + health.d/tcp_conn.conf \ + health.d/tcp_listen.conf \ + health.d/tcp_mem.conf \ + health.d/tcp_orphans.conf \ + health.d/tcp_resets.conf \ + health.d/udp_errors.conf \ + health.d/varnish.conf \ + health.d/web_log.conf \ + health.d/zfs.conf \ + $(NULL) diff --git a/health/Makefile.in b/health/Makefile.in new file mode 100644 index 000000000..811f7f239 --- /dev/null +++ b/health/Makefile.in @@ -0,0 +1,800 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = health +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_healthconfig_DATA) $(dist_noinst_DATA) \ + $(dist_userhealthconfig_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(healthconfigdir)" \ + "$(DESTDIR)$(userhealthconfigdir)" +DATA = $(dist_healthconfig_DATA) $(dist_noinst_DATA) \ + $(dist_userhealthconfig_DATA) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + distdir +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = $(SUBDIRS) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +SUBDIRS = \ + notifications \ + $(NULL) + +CLEANFILES = \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +userhealthconfigdir = $(configdir)/health.d +dist_userhealthconfig_DATA = \ + $(top_srcdir)/installer/.keep \ + $(NULL) + +healthconfigdir = $(libconfigdir)/health.d +dist_healthconfig_DATA = \ + $(top_srcdir)/installer/.keep \ + health.d/adaptec_raid.conf \ + health.d/apache.conf \ + health.d/apcupsd.conf \ + health.d/backend.conf \ + health.d/bcache.conf \ + health.d/beanstalkd.conf \ + health.d/bind_rndc.conf \ + health.d/boinc.conf \ + health.d/btrfs.conf \ + health.d/ceph.conf \ + health.d/cpu.conf \ + health.d/couchdb.conf \ + health.d/disks.conf \ + health.d/dockerd.conf \ + health.d/elasticsearch.conf \ + health.d/entropy.conf \ + health.d/fping.conf \ + health.d/fronius.conf \ + health.d/haproxy.conf \ + health.d/httpcheck.conf \ + health.d/ipc.conf \ + health.d/ipfs.conf \ + health.d/ipmi.conf \ + health.d/isc_dhcpd.conf \ + health.d/lighttpd.conf \ + health.d/linux_power_supply.conf \ + health.d/load.conf \ + health.d/mdstat.conf \ + health.d/megacli.conf \ + health.d/memcached.conf \ + health.d/memory.conf \ + health.d/mongodb.conf \ + health.d/mysql.conf \ + health.d/named.conf \ + health.d/net.conf \ + health.d/netfilter.conf \ + health.d/nginx.conf \ + health.d/nginx_plus.conf \ + health.d/portcheck.conf \ + health.d/postgres.conf \ + health.d/qos.conf \ + health.d/ram.conf \ + health.d/redis.conf \ + health.d/retroshare.conf \ + health.d/softnet.conf \ + health.d/squid.conf \ + health.d/stiebeleltron.conf \ + health.d/swap.conf \ + health.d/tcp_conn.conf \ + health.d/tcp_listen.conf \ + health.d/tcp_mem.conf \ + health.d/tcp_orphans.conf \ + health.d/tcp_resets.conf \ + health.d/udp_errors.conf \ + health.d/varnish.conf \ + health.d/web_log.conf \ + health.d/zfs.conf \ + $(NULL) + +all: all-recursive + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu health/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu health/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-dist_healthconfigDATA: $(dist_healthconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_healthconfig_DATA)'; test -n "$(healthconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(healthconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(healthconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(healthconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(healthconfigdir)" || exit $$?; \ + done + +uninstall-dist_healthconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_healthconfig_DATA)'; test -n "$(healthconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(healthconfigdir)'; $(am__uninstall_files_from_dir) +install-dist_userhealthconfigDATA: $(dist_userhealthconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_userhealthconfig_DATA)'; test -n "$(userhealthconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(userhealthconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(userhealthconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userhealthconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(userhealthconfigdir)" || exit $$?; \ + done + +uninstall-dist_userhealthconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_userhealthconfig_DATA)'; test -n "$(userhealthconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(userhealthconfigdir)'; $(am__uninstall_files_from_dir) + +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-recursive +all-am: Makefile $(DATA) +installdirs: installdirs-recursive +installdirs-am: + for dir in "$(DESTDIR)$(healthconfigdir)" "$(DESTDIR)$(userhealthconfigdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-recursive + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-recursive + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: install-dist_healthconfigDATA \ + install-dist_userhealthconfigDATA + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: uninstall-dist_healthconfigDATA \ + uninstall-dist_userhealthconfigDATA + +.MAKE: $(am__recursive_targets) install-am install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ + check-am clean clean-generic cscopelist-am ctags ctags-am \ + distclean distclean-generic distclean-tags distdir dvi dvi-am \ + html html-am info info-am install install-am install-data \ + install-data-am install-dist_healthconfigDATA \ + install-dist_userhealthconfigDATA install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + installcheck installcheck-am installdirs installdirs-am \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-generic pdf pdf-am ps ps-am tags tags-am uninstall \ + uninstall-am uninstall-dist_healthconfigDATA \ + uninstall-dist_userhealthconfigDATA + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/health/README.md b/health/README.md new file mode 100644 index 000000000..597bd3c32 --- /dev/null +++ b/health/README.md @@ -0,0 +1,657 @@ + +# Health monitoring + +Each netdata node runs an independent thread evaluating health monitoring checks. +This thread has lock free access to the database, so that it can operate as a watchdog. + +Health checks (alarms) are attached to netdata charts, allowing netdata to automatically +activate an alarm as soon as a chart is created. This is very important for +netdata, since many charts are dynamically created during runtime (for example, the +chart tracking network interface packet drops, is automatically created on the first +packet dropped). + +Netdata also supports alarm **templates**, so that an alarm can be attached to all +the charts of the same context (i.e. all network interfaces, or all disks, or all mysql servers, etc.) + +Each alarm can execute a single query to the database using statistical algorithms against past data, +but alarms can be combined. So, if you need 2 queries in the database, you can combine +2 alarms together (both will run a query to the database, and the results can be combined). + +Each alarm has unlimited access to all the metrics collected. So, a single alarm can +use expressions combining the latest value of any number of metrics. + +## Health configuration reference + +Stock netdata health configuration is in `/usr/lib/netdata/conf.d/health.d`. +These files can be overwritten by copying them and editing them in `/etc/netdata/health.d` +(run `/etc/netdata/edit-config` to edit them). + +In `/etc/netdata/health.d` you can also put any number of files (in any number of sub-directories) +with a suffix `.conf` to have them processed by netdata. + +Health configuration can be reloaded at any time, without restarting netdata. +Just send netdata the SIGUSR2 signal, like this: + +```sh +killall -USR2 netdata +``` + +### Entities in the health files + +There are 2 entities: + +1. **alarms**, which are attached to specific charts, and + +2. **templates**, which define rules that should be applied to all charts having a + specific `context`. You can use this feature to apply **alarms** to all disks, + all network interfaces, all mysql databases, all nginx web servers, etc. + +Both of these entities have exactly the same format and feature set. +The only difference is the label `alarm` or `template`. + +netdata supports overriding **templates** with **alarms**. +For example, when a template is defined for a set of charts, an alarm with exactly the +same name attached to the same chart the template matches, will have higher precedence +(i.e. netdata will use the alarm on this chart and prevent the template from being applied +to it). + +### The format + +The following lines are parsed. + +#### alarm line `alarm` or `template` + +This line starts an alarm or alarm template. + +``` +alarm: NAME +``` + +or + +``` +template: NAME +``` + +This line has to be first on each alarm or template. +`NAME` is anything you would like to name it (the only symbols allowed are `.` and `_`). + +--- + +#### alarm line `on` + +This line defines the data the alarm should be attached to. + +For alarms: + +``` +on: CHART +``` + +For `CHART` you can use a chart `id` or `name` of the chart, as shown on the dashboard. + +For alarm templates: + +``` +on: CONTEXT +``` + +`CONTEXT` is the template of a chart. For example the charts `mysql_local.net` and +`mysql_server2.net` have the same context: `mysql.net`. So, you can use this to apply +alarms to all `mysql.net` charts. + +To find the `CONTEXT` of a chart hover over its date, above the legend. A tooltip will +appear with this format `plugin:nodule, context`. For example, the bandwidth chart of +a network interface says: + +``` +proc:/proc/dev/dev, net.net +``` + +So, `plugin = proc`, `module = /proc/net/dev` and `context = net.net`. + +--- + +#### alarm line `os` + +This alarm or template will be used only if the O/S of the host loading it, matches this +pattern list. The value is a space separated list of simple patterns (use `*` as wildcard, +prefix with `!` for a negative match, order is important). + +``` +os: linux freebsd macos +``` + +--- + +#### alarm line `hosts` + +This alarm or template will be used only if the hostname of the host loading it, matches +this pattern list. The value is a space separated list of simple patterns (use `*` as wildcard, +prefix with `!` for a negative match, order is important). + +``` +hosts: server1 server2 database* !redis3 redis* +``` + +The above says: use this alarm on all hosts named `server1`, `server2`, `database*`, and +all `redis*` except `redis3`. + +This is useful when you centralize metrics from multiple hosts, to one netdata. + +--- + +#### alarm line `families` + +This line is only used in alarm templates. It filters the charts. So, if you need to create +an alarm template for a few of a kind of chart (a few of your disks, or a few of your network +interfaces, or a few your mysql servers, etc), you can create an alarm template that would +normally be applied to all of them, and filter them by family. + +The format is: + +``` +families: SIMPLE PATTERN LIST +``` + +Simple patterns list is a lists of space separated patterns. Use ` * ` as wildcard and ` ! ` +for a negative match. Processing is left to right, and on the first hit (positive or negative), +processing stops. + +So. `families: *` means, match anything, while `families: !bad*pattern* *` means anything +except `bad*pattern*` (where `*` is a wildcard to match any sequence of characters). + +The family of a chart is usually the submenu of the netdata dashboard it appears. + +--- + +#### alarm line `lookup` + +This lines makes a database lookup to find a value. This result of this lookup is available as `$this`. + +The format is: + +``` +lookup: METHOD AFTER [at BEFORE] [every DURATION] [OPTIONS] [of DIMENSIONS] +``` + +Everything is the same with [badges](../web/api/badges/). In short: + +- `METHOD` is one of `average`, `min`, `max`, `sum`, `incremental-sum`. + This is required. + +- `AFTER` is a relative number of seconds, but it also accepts a single letter for changing + the units, like `-1s` = 1 second in the past, `-1m` = 1 minute in the past, `-1h` = 1 hour + in the past, `-1d` = 1 day in the past. You need a negative number (i.e. how far in the past + to look for the value). **This is required**. + +- `at BEFORE` is by default 0 and is not required. Using this you can define the end of the + lookup. So data will be evaluated between `AFTER` and `BEFORE`. + +- `every DURATION` sets the updated frequency of the lookup (supports single letter units as + above too). + +- `OPTIONS` is a space separated list of `percentage`, `absolute`, `min2max`, `unaligned`, + `match-ids`, `match-names`. Check the badges documentation for more info. + +- `of DIMENSIONS` is optional and has to be the last parameter. Dimensions have to be separated + by `,` or `|`. The space characters found in dimensions will be kept as-is (a few dimensions + have spaces in their names). This accepts netdata simple patterns and the `match-ids` and + `match-names` options affect the searches for dimensions. + +The result of the lookup will be available as `$this` and `$NAME` in expressions. +The timestamps of the timeframe evaluated by the database lookup is available as variables +`$after` and `$before` (both are unix timestamps). + +--- + +#### alarm line `calc` + +This expression is evaluated just after the `lookup` (if any). Its purpose is to apply some +calculation before using the value looked up from the db. + +You can also have an expression without a lookup, using other variables that are available. + +The result of the calculation will be available as `$this` in warning and critical expressions +(overwriting the `lookup` one). + +Format: + +``` +calc: EXPRESSION +``` + +Check [Expressions](#expressions) for more information. + +--- + +#### alarm line `every` + +Sets the update frequency of this alarm. This is the same to the `every DURATION` given +in the `lookup` lines. + +Format: + +``` +every: DURATION +``` + +`DURATION` accepts `s` for seconds, `m` is minutes, `h` for hours, `d` for days. + +--- + +#### alarm lines `green` and `red` + +Set the green and red thresholds of a chart. Both are available as `$green` and `$red` in +expressions. If multiple alarms define different thresholds, the ones defined by the first +alarm will be used. These will eventually visualized on the dashboard, so only one set of +them is allowed. If you need multiple sets of them in different alarms, use absolute numbers +instead of `$red` and `$green`. + +Format: + +``` +green: NUMBER +red: NUMBER +``` + +--- + +#### alarm lines `warn` and `crit` + +These expressions should evaluate to true or false (alternatively non-zero or zero). +They trigger the alarm. Both are optional. + +Format: + +``` +warn: EXPRESSION +crit: EXPRESSION +``` +Check [Expressions](#expressions) for more information. + +--- + +#### alarm line `to` + +This will be the first parameter of the script to be executed when the alarm switches status. +Its meaning is left up to the `exec` script. + +The default `exec` script, `alarm-notify.sh`, uses this field as a space separated list of roles, +which are then consulted to find the exact recipients per notification method. + +Format: + +``` +to: ROLE1 ROLE2 ROLE3 ... +``` + +--- + +#### alarm line `exec` + +The script that will be executed when the alarm changes status. + +Format: + +``` +exec: SCRIPT +``` + +The default `SCRIPT` is netdata's `alarm-notify.sh`, which supports all the notifications +methods netdata supports, including custom hooks. + +--- + +#### alarm line `delay` + +This is used to provide optional hysteresis settings for the notifications, to defend +against notification floods. These settings do not affect the actual alarm - only the time +the `exec` script is executed. + +Format: + +``` +delay: [[[up U] [down D] multiplier M] max X] +``` + +- `up U` defines the delay to be applied to a notification for an alarm that raised its status + (i.e. CLEAR to WARNING, CLEAR to CRITICAL, WARNING to CRITICAL). For example, `up 10s`, the + notification for this event will be sent 10 seconds after the actual event. This is used in + hope the alarm will get back to its previous state within the duration given. The default `U` + is zero. + +- `down D` defines the delay to be applied to a notification for an alarm that moves to lower + state (i.e. CRITICAL to WARNING, CRITICAL to CLEAR, WARNING to CLEAR). For example, `down 1m` + will delay the notification by 1 minute. This is used to prevent notifications for flapping + alarms. The default `D` is zero. + +- `mutliplier M` multiplies `U` and `D` when an alarm changes state, while a notification is + delayed. The default multiplier is `1.0`. + +- `max X` defines the maximum absolute notification delay an alarm may get. The default `X` + is `max(U * M, D * M)` (i.e. the max duration of `U` or `D` multiplied once with `M`). + + Example: + + `delay: up 10s down 15m multiplier 2 max 1h` + + The time is `00:00:00` and the status of the alarm is CLEAR. + + time of event|new status|delay|notification will be sent|why + -------------|----------|:---:|-------------------------|--- + 00:00:01 | WARNING | `up 10s` | 00:00:11 |first state switch + 00:00:05 | CLEAR | `down 15m x2`| 00:30:05 |the alarm changes state while a notification is delayed, so it was multiplied + 00:00:06 | WARNING | `up 10s x2 x2` | 00:00:26 |multiplied twice + 00:00:07|CLEAR|`down 15m x2 x2 x2`|00:45:07|multiplied 3 times. + + So: + - `U` and `D` are multiplied by `M` every time the alarm changes state (any state, not just + their matching one) and a delay is in place. + - All are reset to their defaults when the alarm switches state without a delay in place. + +--- + +### Expressions + +netdata has an internal [infix expression parser](../libnetdata/eval). +This parses expressions and creates an internal structure that allows fast execution of them. + +These operators are supported `+`, `-`, `*`, `/`, `<`, `<=`, `<>`, `!=`, `>`, `>=`, `&&`, `||`, +`!`, `AND`, `OR`, `NOT`. Boolean operators result in either `1` (true) or `0` (false). + +The conditional evaluation operator `?` is supported too. Using this operator IF-THEN-ELSE +conditional statements can be specified. The format is: `(condition) ? (true expression) : +(false expression)`. So, netdata will first evaluate the `condition` and based on the result +will either evaluate `true expression` or `false expression`. +Example: `($this > 0) ? ($avail * 2) : ($used / 2)`. +Nested such expressions are also supported (i.e. `true expression` and `false expression` can +contain conditional evaluations). + +Expressions also support the `abs()` function. + +Expressions can have variables. Variables start with `$`. Check below for more information. + +There are two special values you can use: + + - `nan`, for example `$this != nan` will check if the variable `this` is available. + A variable can be `nan` if the database lookup failed. All calculations (i.e. addition, + multiplication, etc) with a `nan` result in a `nan`. + + - `inf`, for example `$this != inf` will check if `this` is not infinite. A value or + variable can be infinite if divided by zero. All calculations (i.e. addition, + multiplication, etc) with a `inf` result in a `inf`. + +--- + +### Special use of the conditional operator + +A common (but not necessarily obvious) use of the conditional evaluation operator is +to provide [hysteresis](https://en.wikipedia.org/wiki/Hysteresis) around the critical +or warning thresholds. This usage helps to avoid bogus messages resulting from small +variations in the value when it is varying regularly but staying close to the threshold +value, without needing to delay sending messages at all. + +An example of such usage from the default CPU usage alarms bundled with netdata is: + +``` +warn: $this > (($status >= $WARNING) ? (75) : (85)) +crit: $this > (($status == $CRITICAL) ? (85) : (95)) +``` + +The above say: +* If the alarm is currently a warning, then the threshold for being considered a warning + is 75, otherwise it's 85. + +* If the alarm is currently critical, then the threshold for being considered critical + is 85, otherwise it's 95. + +Which in turn, results in the following behavior: +* While the value is rising, it will trigger a warning when it exceeds 85, and a critical + alert when it exceeds 95. + +* While the value is falling, it will return to a warning state when it goes below 85, + and a normal state when it goes below 75. + +* If the value is constantly varying between 80 and 90, then it will trigger a warning the + first time it goes above 85, but will remain a warning until it goes below 75 (or goes above 85). + +* If the value is constantly varying between 90 and 100, then it will trigger a critical alert + the first time it goes above 95, but will remain a critical alert goes below 85 (at which + point it will return to being a warning). + +--- + +### Variables + +netdata supports 3 new internal indexes for variables that will be used in health monitoring: + + - **chart local variables**. All the dimensions of the chart are exposed as local variables. + All chart alarms names are exposed as variables too. + + Charts also define a few special variables: + + - `$last_collected_t` is the unix timestamp of the last data collection + - `$collected_total_raw` is the sum of all the dimensions (their last collected values) + - `$update_every` is the update frequency of the chart + - `$green` and `$red` the threshold defined in alarms (these are per chart - the charts + inherits them from the the first alarm that defined them) + + Chart dimensions define their last calculated (i.e. interpolated) value, exactly as + shown on the charts, but also a variable with their name and suffix `_raw` that resolves + to the last collected value - as collected and another with suffix `_last_collected_t` + that resolves to unix timestamp the dimension was last collected (there may be dimensions + that fail to be collected while others continue normally). + + - **family variables**. Families are used to group charts together. For example all `eth0` + charts, have `family = eth0`. This index includes all local variables, but if there are + overlapping variables, only the first are exposed. + + - **host variables**. All the dimensions of all charts, including all alarms, in fullname. + Fullname is `CHART.VARIABLE`, where `CHART` is either the chart id or the chart name (both + are supported). + + - **special variables*** are: + + - `this`, which is resolved to the value of the current alarm. + + - `status`, which is resolved to the current status of the alarm (the current = the last + status, i.e. before the current database lookup and the evaluation of the `calc` line). + This values can be compared with `$REMOVED`, `$UNINITIALIZED`, `$UNDEFINED`, `$CLEAR`, + `$WARNING`, `$CRITICAL`. These values are incremental, ie. `$status > $CLEAL` works as + expected. + + - `now`, which is resolved to current unix timestamp. + +You can find all the variables that can be used for a given chart, using +`http://your.netdata.ip:19999/api/v1/alarm_variables?chart=NAME`. +This will dump all the indexes from the chart's perspective. +Example: [variables for the `system.cpu` chart of the registry](https://registry.my-netdata.io/api/v1/alarm_variables?chart=system.cpu). + +## Alarm Statuses + +Alarms can have the following statuses: + + - `REMOVED` - the alarm has been deleted (this happens when a SIGUSR2 is sent to netdata + to reload health configuration) + + - `UNINITIALIZED` - the alarm is not initialized yet + + - `UNDEFINED` - the alarm failed to be calculated (i.e. the database lookup failed, + a division by zero occurred, etc) + + - `CLEAR` - the alarm is not armed / raised (i.e. is OK) + + - `WARNING` - the warning expression resulted in true or non-zero + + - `CRITICAL` - the critical expression resulted in true or non-zero + +The external script will be called for all status changes. + +## Examples + + +Check the **[health.d directory](health.d)** for all alarms shipped with netdata. + +Here are a few examples: + +### Example 1 + +A simple check if an apache server is alive: + +``` +template: apache_last_collected_secs + on: apache.requests + calc: $now - $last_collected_t + every: 10s + warn: $this > ( 5 * $update_every) + crit: $this > (10 * $update_every) +``` + +The above checks that netdata is able to collect data from apache. In detail: + +``` +template: apache_last_collected_secs +``` + +The above defines a **template** named `apache_last_collected_secs`. +The name is important since `$apache_last_collected_secs` resolves to the `calc` line. +So, try to give something descriptive. + +``` + on: apache.requests +``` + +The above applies the **template** to all charts that have `context = apache.requests` +(i.e. all your apache servers). + +``` + calc: $now - $last_collected_t +``` + +- `$now` is a standard variable that resolves to the current timestamp. + +- `$last_collected_t` is the last data collection timestamp of the chart. + So this calculation gives the number of seconds passed since the last data collection. + +``` + every: 10s +``` + +The alarm will be evaluated every 10 seconds. + +``` + warn: $this > ( 5 * $update_every) + crit: $this > (10 * $update_every) +``` + +If these result in non-zero or true, they trigger the alarm. + +- `$this` refers to the value of this alarm (i.e. the result of the `calc` line. + We could also use `$apache_last_collected_secs`. + +`$update_every` is the update frequency of the chart, in seconds. + +So, the warning condition checks if we have not collected data from apache for 5 +iterations and the critical condition checks for 10 iterations. + +### Example 2 + +Check if any of the disks is critically low on disk space: + +``` +template: disk_full_percent + on: disk.space + calc: $used * 100 / ($avail + $used) + every: 1m + warn: $this > 80 + crit: $this > 95 +``` + +`$used` and `$avail` are the `used` and `avail` chart dimensions as shown on the dashboard. + +So, the `calc` line finds the percentage of used space. `$this` resolves to this percentage. + +### Example 3 + +Predict if any disk will run out of space in the near future. + +We do this in 2 steps: + +Calculate the disk fill rate: + +``` + template: disk_fill_rate + on: disk.space + lookup: max -1s at -30m unaligned of avail + calc: ($this - $avail) / (30 * 60) + every: 15s +``` + +In the `calc` line: `$this` is the result of the `lookup` line (i.e. the free space 30 minutes +ago) and `$avail` is the current disk free space. So the `calc` line will either have a positive +number of GB/second if the disk if filling up, or a negative number of GB/second if the disk is +freeing up space. + +There is no `warn` or `crit` lines here. So, this template will just do the calculation and +nothing more. + +Predict the hours after which the disk will run out of space: + +``` + template: disk_full_after_hours + on: disk.space + calc: $avail / $disk_fill_rate / 3600 + every: 10s + warn: $this > 0 and $this < 48 + crit: $this > 0 and $this < 24 +``` + +The `calc` line estimates the time in hours, we will run out of disk space. Of course, only +positive values are interesting for this check, so the warning and critical conditions check +for positive values and that we have enough free space for 48 and 24 hours respectively. + +Once this alarm triggers we will receive an email like this: + +![image](https://cloud.githubusercontent.com/assets/2662304/17839993/87872b32-6802-11e6-8e08-b2e4afef93bb.png) + +### Example 4 + +Check if any network interface is dropping packets: + +``` +template: 30min_packet_drops + on: net.drops + lookup: sum -30m unaligned absolute + every: 10s + crit: $this > 0 +``` + +The `lookup` line will calculate the sum of the all dropped packets in the last 30 minutes. + +The `crit` line will issue a critical alarm if even a single packet has been dropped. + +Note that the drops chart does not exist if a network interface has never dropped a single packet. +When netdata detects a dropped packet, it will add the chart and it will automatically attach this +alarm to it. + +## Troubleshooting + +You can compile netdata with [debugging](../daemon#debugging) and then set in `netdata.conf`: + +``` +[global] + debug flags = 0x0000000000800000 +``` + +Then check your `/var/log/netdata/debug.log`. It will show you how it works. +Important: this will generate a lot of output in debug.log. + +You can find the context of charts by looking up the chart in either +`http://your.netdata:19999/netdata.conf` or `http://your.netdata:19999/api/v1/charts`. + +You can find how netdata interpreted the expressions by examining the alarm at +`http://your.netdata:19999/api/v1/alarms?all`. For each expression, netdata will return the +expression as given in its config file, and the same expression with additional parentheses +added to indicate the evaluation flow of the expression. + diff --git a/health/health.c b/health/health.c new file mode 100644 index 000000000..ae0c464b1 --- /dev/null +++ b/health/health.c @@ -0,0 +1,750 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "health.h" + +unsigned int default_health_enabled = 1; + +// ---------------------------------------------------------------------------- +// health initialization + +inline char *health_user_config_dir(void) { + char buffer[FILENAME_MAX + 1]; + snprintfz(buffer, FILENAME_MAX, "%s/health.d", netdata_configured_user_config_dir); + return config_get(CONFIG_SECTION_HEALTH, "health configuration directory", buffer); +} + +inline char *health_stock_config_dir(void) { + char buffer[FILENAME_MAX + 1]; + snprintfz(buffer, FILENAME_MAX, "%s/health.d", netdata_configured_stock_config_dir); + return config_get(CONFIG_SECTION_HEALTH, "stock health configuration directory", buffer); +} + +void health_init(void) { + debug(D_HEALTH, "Health configuration initializing"); + + if(!(default_health_enabled = (unsigned int)config_get_boolean(CONFIG_SECTION_HEALTH, "enabled", default_health_enabled))) { + debug(D_HEALTH, "Health is disabled."); + return; + } +} + +// ---------------------------------------------------------------------------- +// re-load health configuration + +void health_reload_host(RRDHOST *host) { + if(unlikely(!host->health_enabled)) + return; + + char *user_path = health_user_config_dir(); + char *stock_path = health_stock_config_dir(); + + // free all running alarms + rrdhost_wrlock(host); + + while(host->templates) + rrdcalctemplate_unlink_and_free(host, host->templates); + + while(host->alarms) + rrdcalc_unlink_and_free(host, host->alarms); + + rrdhost_unlock(host); + + // invalidate all previous entries in the alarm log + ALARM_ENTRY *t; + for(t = host->health_log.alarms ; t ; t = t->next) { + if(t->new_status != RRDCALC_STATUS_REMOVED) + t->flags |= HEALTH_ENTRY_FLAG_UPDATED; + } + + rrdhost_rdlock(host); + // reset all thresholds to all charts + RRDSET *st; + rrdset_foreach_read(st, host) { + st->green = NAN; + st->red = NAN; + } + rrdhost_unlock(host); + + // load the new alarms + rrdhost_wrlock(host); + health_readdir(host, user_path, stock_path, NULL); + + // link the loaded alarms to their charts + rrdset_foreach_write(st, host) { + rrdsetcalc_link_matching(st); + rrdcalctemplate_link_matching(st); + } + + rrdhost_unlock(host); +} + +void health_reload(void) { + + rrd_rdlock(); + + RRDHOST *host; + rrdhost_foreach_read(host) + health_reload_host(host); + + rrd_unlock(); +} + +// ---------------------------------------------------------------------------- +// health main thread and friends + +static inline RRDCALC_STATUS rrdcalc_value2status(calculated_number n) { + if(isnan(n) || isinf(n)) return RRDCALC_STATUS_UNDEFINED; + if(n) return RRDCALC_STATUS_RAISED; + return RRDCALC_STATUS_CLEAR; +} + +#define ALARM_EXEC_COMMAND_LENGTH 8192 + +static inline void health_alarm_execute(RRDHOST *host, ALARM_ENTRY *ae) { + ae->flags |= HEALTH_ENTRY_FLAG_PROCESSED; + + if(unlikely(ae->new_status < RRDCALC_STATUS_CLEAR)) { + // do not send notifications for internal statuses + debug(D_HEALTH, "Health not sending notification for alarm '%s.%s' status %s (internal statuses)", ae->chart, ae->name, rrdcalc_status2string(ae->new_status)); + goto done; + } + + if(unlikely(ae->new_status <= RRDCALC_STATUS_CLEAR && (ae->flags & HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION))) { + // do not send notifications for disabled statuses + debug(D_HEALTH, "Health not sending notification for alarm '%s.%s' status %s (it has no-clear-notification enabled)", ae->chart, ae->name, rrdcalc_status2string(ae->new_status)); + // mark it as run, so that we will send the same alarm if it happens again + goto done; + } + + // find the previous notification for the same alarm + // which we have run the exec script + // exception: alarms with HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION set + if(likely(!(ae->flags & HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION))) { + uint32_t id = ae->alarm_id; + ALARM_ENTRY *t; + for(t = ae->next; t ; t = t->next) { + if(t->alarm_id == id && t->flags & HEALTH_ENTRY_FLAG_EXEC_RUN) + break; + } + + if(likely(t)) { + // we have executed this alarm notification in the past + if(t && t->new_status == ae->new_status) { + // don't send the notification for the same status again + debug(D_HEALTH, "Health not sending again notification for alarm '%s.%s' status %s", ae->chart, ae->name + , rrdcalc_status2string(ae->new_status)); + goto done; + } + } + else { + // we have not executed this alarm notification in the past + // so, don't send CLEAR notifications + if(unlikely(ae->new_status == RRDCALC_STATUS_CLEAR)) { + debug(D_HEALTH, "Health not sending notification for first initialization of alarm '%s.%s' status %s" + , ae->chart, ae->name, rrdcalc_status2string(ae->new_status)); + goto done; + } + } + } + + static char command_to_run[ALARM_EXEC_COMMAND_LENGTH + 1]; + pid_t command_pid; + + const char *exec = (ae->exec) ? ae->exec : host->health_default_exec; + const char *recipient = (ae->recipient) ? ae->recipient : host->health_default_recipient; + + snprintfz(command_to_run, ALARM_EXEC_COMMAND_LENGTH, "exec %s '%s' '%s' '%u' '%u' '%u' '%lu' '%s' '%s' '%s' '%s' '%s' '" CALCULATED_NUMBER_FORMAT_ZERO "' '" CALCULATED_NUMBER_FORMAT_ZERO "' '%s' '%u' '%u' '%s' '%s' '%s' '%s'", + exec, + recipient, + host->registry_hostname, + ae->unique_id, + ae->alarm_id, + ae->alarm_event_id, + (unsigned long)ae->when, + ae->name, + ae->chart?ae->chart:"NOCAHRT", + ae->family?ae->family:"NOFAMILY", + rrdcalc_status2string(ae->new_status), + rrdcalc_status2string(ae->old_status), + ae->new_value, + ae->old_value, + ae->source?ae->source:"UNKNOWN", + (uint32_t)ae->duration, + (uint32_t)ae->non_clear_duration, + ae->units?ae->units:"", + ae->info?ae->info:"", + ae->new_value_string, + ae->old_value_string + ); + + ae->flags |= HEALTH_ENTRY_FLAG_EXEC_RUN; + ae->exec_run_timestamp = now_realtime_sec(); + + debug(D_HEALTH, "executing command '%s'", command_to_run); + FILE *fp = mypopen(command_to_run, &command_pid); + if(!fp) { + error("HEALTH: Cannot popen(\"%s\", \"r\").", command_to_run); + goto done; + } + debug(D_HEALTH, "HEALTH reading from command (discarding command's output)"); + char buffer[100 + 1]; + while(fgets(buffer, 100, fp) != NULL) ; + ae->exec_code = mypclose(fp, command_pid); + debug(D_HEALTH, "done executing command - returned with code %d", ae->exec_code); + + if(ae->exec_code != 0) + ae->flags |= HEALTH_ENTRY_FLAG_EXEC_FAILED; + +done: + health_alarm_log_save(host, ae); +} + +static inline void health_process_notifications(RRDHOST *host, ALARM_ENTRY *ae) { + debug(D_HEALTH, "Health alarm '%s.%s' = " CALCULATED_NUMBER_FORMAT_AUTO " - changed status from %s to %s", + ae->chart?ae->chart:"NOCHART", ae->name, + ae->new_value, + rrdcalc_status2string(ae->old_status), + rrdcalc_status2string(ae->new_status) + ); + + health_alarm_execute(host, ae); +} + +static inline void health_alarm_log_process(RRDHOST *host) { + uint32_t first_waiting = (host->health_log.alarms)?host->health_log.alarms->unique_id:0; + time_t now = now_realtime_sec(); + + netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock); + + ALARM_ENTRY *ae; + for(ae = host->health_log.alarms; ae && ae->unique_id >= host->health_last_processed_id ; ae = ae->next) { + if(unlikely( + !(ae->flags & HEALTH_ENTRY_FLAG_PROCESSED) && + !(ae->flags & HEALTH_ENTRY_FLAG_UPDATED) + )) { + + if(unlikely(ae->unique_id < first_waiting)) + first_waiting = ae->unique_id; + + if(likely(now >= ae->delay_up_to_timestamp)) + health_process_notifications(host, ae); + } + } + + // remember this for the next iteration + host->health_last_processed_id = first_waiting; + + netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock); + + if(host->health_log.count <= host->health_log.max) + return; + + // cleanup excess entries in the log + netdata_rwlock_wrlock(&host->health_log.alarm_log_rwlock); + + ALARM_ENTRY *last = NULL; + unsigned int count = host->health_log.max * 2 / 3; + for(ae = host->health_log.alarms; ae && count ; count--, last = ae, ae = ae->next) ; + + if(ae && last && last->next == ae) + last->next = NULL; + else + ae = NULL; + + while(ae) { + debug(D_HEALTH, "Health removing alarm log entry with id: %u", ae->unique_id); + + ALARM_ENTRY *t = ae->next; + + health_alarm_log_free_one_nochecks_nounlink(ae); + + ae = t; + host->health_log.count--; + } + + netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock); +} + +static inline int rrdcalc_isrunnable(RRDCALC *rc, time_t now, time_t *next_run) { + if(unlikely(!rc->rrdset)) { + debug(D_HEALTH, "Health not running alarm '%s.%s'. It is not linked to a chart.", rc->chart?rc->chart:"NOCHART", rc->name); + return 0; + } + + if(unlikely(rc->next_update > now)) { + if (unlikely(*next_run > rc->next_update)) { + // update the next_run time of the main loop + // to run this alarm precisely the time required + *next_run = rc->next_update; + } + + debug(D_HEALTH, "Health not examining alarm '%s.%s' yet (will do in %d secs).", rc->chart?rc->chart:"NOCHART", rc->name, (int) (rc->next_update - now)); + return 0; + } + + if(unlikely(!rc->update_every)) { + debug(D_HEALTH, "Health not running alarm '%s.%s'. It does not have an update frequency", rc->chart?rc->chart:"NOCHART", rc->name); + return 0; + } + + if(unlikely(rrdset_flag_check(rc->rrdset, RRDSET_FLAG_OBSOLETE))) { + debug(D_HEALTH, "Health not running alarm '%s.%s'. The chart has been marked as obsolete", rc->chart?rc->chart:"NOCHART", rc->name); + return 0; + } + + if(unlikely(!rrdset_flag_check(rc->rrdset, RRDSET_FLAG_ENABLED))) { + debug(D_HEALTH, "Health not running alarm '%s.%s'. The chart is not enabled", rc->chart?rc->chart:"NOCHART", rc->name); + return 0; + } + + if(unlikely(!rc->rrdset->last_collected_time.tv_sec || rc->rrdset->counter_done < 2)) { + debug(D_HEALTH, "Health not running alarm '%s.%s'. Chart is not fully collected yet.", rc->chart?rc->chart:"NOCHART", rc->name); + return 0; + } + + int update_every = rc->rrdset->update_every; + time_t first = rrdset_first_entry_t(rc->rrdset); + time_t last = rrdset_last_entry_t(rc->rrdset); + + if(unlikely(now + update_every < first /* || now - update_every > last */)) { + debug(D_HEALTH + , "Health not examining alarm '%s.%s' yet (wanted time is out of bounds - we need %lu but got %lu - %lu)." + , rc->chart ? rc->chart : "NOCHART", rc->name, (unsigned long) now, (unsigned long) first + , (unsigned long) last); + return 0; + } + + if(RRDCALC_HAS_DB_LOOKUP(rc)) { + time_t needed = now + rc->before + rc->after; + + if(needed + update_every < first || needed - update_every > last) { + debug(D_HEALTH + , "Health not examining alarm '%s.%s' yet (not enough data yet - we need %lu but got %lu - %lu)." + , rc->chart ? rc->chart : "NOCHART", rc->name, (unsigned long) needed, (unsigned long) first + , (unsigned long) last); + return 0; + } + } + + return 1; +} + +static inline int check_if_resumed_from_suspention(void) { + static usec_t last_realtime = 0, last_monotonic = 0; + usec_t realtime = now_realtime_usec(), monotonic = now_monotonic_usec(); + int ret = 0; + + // detect if monotonic and realtime have twice the difference + // in which case we assume the system was just waken from hibernation + + if(last_realtime && last_monotonic && realtime - last_realtime > 2 * (monotonic - last_monotonic)) + ret = 1; + + last_realtime = realtime; + last_monotonic = monotonic; + + return ret; +} + +static void health_main_cleanup(void *ptr) { + struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; + static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; + + info("cleaning up..."); + + static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; +} + +void *health_main(void *ptr) { + netdata_thread_cleanup_push(health_main_cleanup, ptr); + + int min_run_every = (int)config_get_number(CONFIG_SECTION_HEALTH, "run at least every seconds", 10); + if(min_run_every < 1) min_run_every = 1; + + time_t now = now_realtime_sec(); + time_t hibernation_delay = config_get_number(CONFIG_SECTION_HEALTH, "postpone alarms during hibernation for seconds", 60); + + unsigned int loop = 0; + while(!netdata_exit) { + loop++; + debug(D_HEALTH, "Health monitoring iteration no %u started", loop); + + int runnable = 0, apply_hibernation_delay = 0; + time_t next_run = now + min_run_every; + RRDCALC *rc; + + if(unlikely(check_if_resumed_from_suspention())) { + apply_hibernation_delay = 1; + + info("Postponing alarm checks for %ld seconds, because it seems that the system was just resumed from suspension." + , hibernation_delay + ); + } + + rrd_rdlock(); + + RRDHOST *host; + rrdhost_foreach_read(host) { + if(unlikely(!host->health_enabled)) + continue; + + if(unlikely(apply_hibernation_delay)) { + + info("Postponing health checks for %ld seconds, on host '%s'." + , hibernation_delay + , host->hostname + ); + + host->health_delay_up_to = now + hibernation_delay; + } + + if(unlikely(host->health_delay_up_to)) { + if(unlikely(now < host->health_delay_up_to)) + continue; + + info("Resuming health checks on host '%s'.", host->hostname); + host->health_delay_up_to = 0; + } + + rrdhost_rdlock(host); + + // the first loop is to lookup values from the db + for(rc = host->alarms; rc; rc = rc->next) { + if(unlikely(!rrdcalc_isrunnable(rc, now, &next_run))) { + if(unlikely(rc->rrdcalc_flags & RRDCALC_FLAG_RUNNABLE)) + rc->rrdcalc_flags &= ~RRDCALC_FLAG_RUNNABLE; + continue; + } + + runnable++; + rc->old_value = rc->value; + rc->rrdcalc_flags |= RRDCALC_FLAG_RUNNABLE; + + // ------------------------------------------------------------ + // if there is database lookup, do it + + if(unlikely(RRDCALC_HAS_DB_LOOKUP(rc))) { + /* time_t old_db_timestamp = rc->db_before; */ + int value_is_null = 0; + + int ret = rrdset2value_api_v1(rc->rrdset + , NULL + , &rc->value + , rc->dimensions + , 1 + , rc->after + , rc->before + , rc->group + , 0 + , rc->options + , &rc->db_after + , &rc->db_before + , &value_is_null + ); + + if(unlikely(ret != 200)) { + // database lookup failed + rc->value = NAN; + rc->rrdcalc_flags |= RRDCALC_FLAG_DB_ERROR; + + debug(D_HEALTH + , "Health on host '%s', alarm '%s.%s': database lookup returned error %d" + , host->hostname + , rc->chart ? rc->chart : "NOCHART" + , rc->name + , ret + ); + } + else + rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_ERROR; + + /* - RRDCALC_FLAG_DB_STALE not currently used + if (unlikely(old_db_timestamp == rc->db_before)) { + // database is stale + + debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': database is stale", host->hostname, rc->chart?rc->chart:"NOCHART", rc->name); + + if (unlikely(!(rc->rrdcalc_flags & RRDCALC_FLAG_DB_STALE))) { + rc->rrdcalc_flags |= RRDCALC_FLAG_DB_STALE; + error("Health on host '%s', alarm '%s.%s': database is stale", host->hostname, rc->chart?rc->chart:"NOCHART", rc->name); + } + } + else if (unlikely(rc->rrdcalc_flags & RRDCALC_FLAG_DB_STALE)) + rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_STALE; + */ + + if(unlikely(value_is_null)) { + // collected value is null + rc->value = NAN; + rc->rrdcalc_flags |= RRDCALC_FLAG_DB_NAN; + + debug(D_HEALTH + , "Health on host '%s', alarm '%s.%s': database lookup returned empty value (possibly value is not collected yet)" + , host->hostname + , rc->chart ? rc->chart : "NOCHART" + , rc->name + ); + } + else + rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_NAN; + + debug(D_HEALTH + , "Health on host '%s', alarm '%s.%s': database lookup gave value " CALCULATED_NUMBER_FORMAT + , host->hostname + , rc->chart ? rc->chart : "NOCHART" + , rc->name + , rc->value + ); + } + + // ------------------------------------------------------------ + // if there is calculation expression, run it + + if(unlikely(rc->calculation)) { + if(unlikely(!expression_evaluate(rc->calculation))) { + // calculation failed + rc->value = NAN; + rc->rrdcalc_flags |= RRDCALC_FLAG_CALC_ERROR; + + debug(D_HEALTH + , "Health on host '%s', alarm '%s.%s': expression '%s' failed: %s" + , host->hostname + , rc->chart ? rc->chart : "NOCHART" + , rc->name + , rc->calculation->parsed_as + , buffer_tostring(rc->calculation->error_msg) + ); + } + else { + rc->rrdcalc_flags &= ~RRDCALC_FLAG_CALC_ERROR; + + debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': expression '%s' gave value " CALCULATED_NUMBER_FORMAT ": %s (source: %s)" + , host->hostname + , rc->chart ? rc->chart : "NOCHART" + , rc->name + , rc->calculation->parsed_as + , rc->calculation->result + , buffer_tostring(rc->calculation->error_msg) + , rc->source + ); + + rc->value = rc->calculation->result; + + if(rc->local) rc->local->last_updated = now; + if(rc->family) rc->family->last_updated = now; + if(rc->hostid) rc->hostid->last_updated = now; + if(rc->hostname) rc->hostname->last_updated = now; + } + } + } + rrdhost_unlock(host); + + if(unlikely(runnable && !netdata_exit)) { + rrdhost_rdlock(host); + + for(rc = host->alarms; rc; rc = rc->next) { + if(unlikely(!(rc->rrdcalc_flags & RRDCALC_FLAG_RUNNABLE))) + continue; + + RRDCALC_STATUS warning_status = RRDCALC_STATUS_UNDEFINED; + RRDCALC_STATUS critical_status = RRDCALC_STATUS_UNDEFINED; + + // -------------------------------------------------------- + // check the warning expression + + if(likely(rc->warning)) { + if(unlikely(!expression_evaluate(rc->warning))) { + // calculation failed + rc->rrdcalc_flags |= RRDCALC_FLAG_WARN_ERROR; + + debug(D_HEALTH + , "Health on host '%s', alarm '%s.%s': warning expression failed with error: %s" + , host->hostname + , rc->chart ? rc->chart : "NOCHART" + , rc->name + , buffer_tostring(rc->warning->error_msg) + ); + } + else { + rc->rrdcalc_flags &= ~RRDCALC_FLAG_WARN_ERROR; + debug(D_HEALTH + , "Health on host '%s', alarm '%s.%s': warning expression gave value " CALCULATED_NUMBER_FORMAT ": %s (source: %s)" + , host->hostname + , rc->chart ? rc->chart : "NOCHART" + , rc->name + , rc->warning->result + , buffer_tostring(rc->warning->error_msg) + , rc->source + ); + warning_status = rrdcalc_value2status(rc->warning->result); + } + } + + // -------------------------------------------------------- + // check the critical expression + + if(likely(rc->critical)) { + if(unlikely(!expression_evaluate(rc->critical))) { + // calculation failed + rc->rrdcalc_flags |= RRDCALC_FLAG_CRIT_ERROR; + + debug(D_HEALTH + , "Health on host '%s', alarm '%s.%s': critical expression failed with error: %s" + , host->hostname + , rc->chart ? rc->chart : "NOCHART" + , rc->name + , buffer_tostring(rc->critical->error_msg) + ); + } + else { + rc->rrdcalc_flags &= ~RRDCALC_FLAG_CRIT_ERROR; + debug(D_HEALTH + , "Health on host '%s', alarm '%s.%s': critical expression gave value " CALCULATED_NUMBER_FORMAT ": %s (source: %s)" + , host->hostname + , rc->chart ? rc->chart : "NOCHART" + , rc->name + , rc->critical->result + , buffer_tostring(rc->critical->error_msg) + , rc->source + ); + critical_status = rrdcalc_value2status(rc->critical->result); + } + } + + // -------------------------------------------------------- + // decide the final alarm status + + RRDCALC_STATUS status = RRDCALC_STATUS_UNDEFINED; + + switch(warning_status) { + case RRDCALC_STATUS_CLEAR: + status = RRDCALC_STATUS_CLEAR; + break; + + case RRDCALC_STATUS_RAISED: + status = RRDCALC_STATUS_WARNING; + break; + + default: + break; + } + + switch(critical_status) { + case RRDCALC_STATUS_CLEAR: + if(status == RRDCALC_STATUS_UNDEFINED) + status = RRDCALC_STATUS_CLEAR; + break; + + case RRDCALC_STATUS_RAISED: + status = RRDCALC_STATUS_CRITICAL; + break; + + default: + break; + } + + // -------------------------------------------------------- + // check if the new status and the old differ + + if(status != rc->status) { + int delay = 0; + + // apply trigger hysteresis + + if(now > rc->delay_up_to_timestamp) { + rc->delay_up_current = rc->delay_up_duration; + rc->delay_down_current = rc->delay_down_duration; + rc->delay_last = 0; + rc->delay_up_to_timestamp = 0; + } + else { + rc->delay_up_current = (int) (rc->delay_up_current * rc->delay_multiplier); + if(rc->delay_up_current > rc->delay_max_duration) + rc->delay_up_current = rc->delay_max_duration; + + rc->delay_down_current = (int) (rc->delay_down_current * rc->delay_multiplier); + if(rc->delay_down_current > rc->delay_max_duration) + rc->delay_down_current = rc->delay_max_duration; + } + + if(status > rc->status) + delay = rc->delay_up_current; + else + delay = rc->delay_down_current; + + // COMMENTED: because we do need to send raising alarms + // if(now + delay < rc->delay_up_to_timestamp) + // delay = (int)(rc->delay_up_to_timestamp - now); + + rc->delay_last = delay; + rc->delay_up_to_timestamp = now + delay; + + // add the alarm into the log + + health_alarm_log( + host + , rc->id + , rc->next_event_id++ + , now + , rc->name + , rc->rrdset->id + , rc->rrdset->family + , rc->exec + , rc->recipient + , now - rc->last_status_change + , rc->old_value + , rc->value + , rc->status + , status + , rc->source + , rc->units + , rc->info + , rc->delay_last + , (rc->options & RRDCALC_FLAG_NO_CLEAR_NOTIFICATION) ? HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION : 0 + ); + + rc->last_status_change = now; + rc->status = status; + } + + rc->last_updated = now; + rc->next_update = now + rc->update_every; + + if(next_run > rc->next_update) + next_run = rc->next_update; + } + + rrdhost_unlock(host); + } + + if(unlikely(netdata_exit)) + break; + + // execute notifications + // and cleanup + health_alarm_log_process(host); + + if(unlikely(netdata_exit)) + break; + + } /* rrdhost_foreach */ + + rrd_unlock(); + + if(unlikely(netdata_exit)) + break; + + now = now_realtime_sec(); + if(now < next_run) { + debug(D_HEALTH, "Health monitoring iteration no %u done. Next iteration in %d secs", loop, (int) (next_run - now)); + sleep_usec(USEC_PER_SEC * (usec_t) (next_run - now)); + now = now_realtime_sec(); + } + else + debug(D_HEALTH, "Health monitoring iteration no %u done. Next iteration now", loop); + + } // forever + + netdata_thread_cleanup_pop(1); + return NULL; +} diff --git a/health/health.d/adaptec_raid.conf b/health/health.d/adaptec_raid.conf new file mode 100644 index 000000000..a1301ce8a --- /dev/null +++ b/health/health.d/adaptec_raid.conf @@ -0,0 +1,24 @@ + +# logical device status check + +template: adapter_raid_ld_status + on: adapter_raid.ld_status + lookup: max -5s + units: bool + every: 10s + crit: $this > 0 + delay: down 5m multiplier 1.5 max 1h + info: at least 1 logical device is failed or degraded + to: sysadmin + +# physical device state check + +template: adapter_raid_pd_state + on: adapter_raid.pd_state + lookup: max -5s + units: bool + every: 10s + crit: $this > 0 + delay: down 5m multiplier 1.5 max 1h + info: at least 1 physical device is not in online state + to: sysadmin diff --git a/health/health.d/apache.conf b/health/health.d/apache.conf new file mode 100644 index 000000000..0c98b8778 --- /dev/null +++ b/health/health.d/apache.conf @@ -0,0 +1,14 @@ + +# make sure apache is running + +template: apache_last_collected_secs + on: apache.requests + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: webmaster + diff --git a/health/health.d/apcupsd.conf b/health/health.d/apcupsd.conf new file mode 100644 index 000000000..4f86037ba --- /dev/null +++ b/health/health.d/apcupsd.conf @@ -0,0 +1,40 @@ +# you can disable an alarm notification by setting the 'to' line to: silent + +template: 10min_ups_load + on: apcupsd.load + os: * + hosts: * + lookup: average -10m unaligned of percentage + units: % + every: 1m + warn: $this > (($status >= $WARNING) ? (70) : (80)) + crit: $this > (($status == $CRITICAL) ? (85) : (95)) + delay: down 10m multiplier 1.5 max 1h + info: average UPS load for the last 10 minutes + to: sitemgr + +# Discussion in https://github.com/netdata/netdata/pull/3928: +# Fire the alarm as soon as it's going on battery (99% charge) and clear only when full. +template: ups_charge + on: apcupsd.charge + os: * + hosts: * + lookup: average -60s unaligned of charge + units: % + every: 60s + warn: $this < 100 + crit: $this < (($status == $CRITICAL) ? (60) : (50)) + delay: down 10m multiplier 1.5 max 1h + info: current UPS charge, averaged over the last 60 seconds to reduce measurement errors + to: sitemgr + +template: apcupsd_last_collected_secs + on: apcupsd.load + calc: $now - $last_collected_t + every: 10s + units: seconds ago + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: sitemgr diff --git a/health/health.d/backend.conf b/health/health.d/backend.conf new file mode 100644 index 000000000..7af100d8f --- /dev/null +++ b/health/health.d/backend.conf @@ -0,0 +1,45 @@ + +# make sure we are sending data to backend + + alarm: backend_last_buffering + on: netdata.backend_metrics + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful buffering of backend data + to: dba + + alarm: backend_metrics_sent + on: netdata.backend_metrics + units: % + calc: abs($sent) * 100 / abs($buffered) + every: 10s + warn: $this != 100 + delay: down 5m multiplier 1.5 max 1h + info: percentage of metrics sent to the backend server + to: dba + + alarm: backend_metrics_lost + on: netdata.backend_metrics + units: metrics + calc: abs($lost) + every: 10s + crit: ($this != 0) || ($status == $CRITICAL && abs($sent) == 0) + delay: down 5m multiplier 1.5 max 1h + info: number of metrics lost due to repeating failures to contact the backend server + to: dba + +# this chart has been removed from netdata +# alarm: backend_slow +# on: netdata.backend_latency +# units: % +# calc: $latency * 100 / ($update_every * 1000) +# every: 10s +# warn: $this > 50 +# crit: $this > 100 +# delay: down 5m multiplier 1.5 max 1h +# info: the percentage of time between iterations needed by the backend time to process the data sent by netdata +# to: dba diff --git a/health/health.d/bcache.conf b/health/health.d/bcache.conf new file mode 100644 index 000000000..f0da9ac5e --- /dev/null +++ b/health/health.d/bcache.conf @@ -0,0 +1,22 @@ + +template: bcache_cache_errors + on: disk.bcache_cache_read_races + lookup: sum -10m unaligned absolute + units: errors + every: 1m + warn: $this > 0 + crit: $this > ( ($status >= $CRITICAL) ? (0) : (10) ) + delay: down 1h multiplier 1.5 max 2h + info: the number of times bcache had issues using the cache, during the last 10 mins (this usually means your SSD cache is failing) + to: sysadmin + +template: bcache_cache_dirty + on: disk.bcache_cache_alloc + calc: $dirty + $metadata + $undefined + units: % + every: 1m + warn: $this > ( ($status >= $WARNING ) ? ( 70 ) : ( 90 ) ) + crit: $this > ( ($status >= $CRITICAL) ? ( 90 ) : ( 95 ) ) + delay: up 1m down 1h multiplier 1.5 max 2h + info: the percentage of cache space used for dirty and metadata (this usually means your SSD cache is too small) + to: sysadmin diff --git a/health/health.d/beanstalkd.conf b/health/health.d/beanstalkd.conf new file mode 100644 index 000000000..30dc27328 --- /dev/null +++ b/health/health.d/beanstalkd.conf @@ -0,0 +1,36 @@ +# get the number of buried jobs in all queues + +template: server_buried_jobs + on: beanstalk.current_jobs + calc: $buried + units: jobs + every: 10s + warn: $this > 0 + crit: $this > 10 + delay: up 0 down 5m multiplier 1.2 max 1h + info: the number of buried jobs aggregated across all tubes + to: sysadmin + +# get the number of buried jobs per queue + +#template: tube_buried_jobs +# on: beanstalk.jobs +# calc: $buried +# units: jobs +# every: 10s +# warn: $this > 0 +# crit: $this > 10 +# delay: up 0 down 5m multiplier 1.2 max 1h +# info: the number of jobs buried per tube +# to: sysadmin + +# get the current number of tubes + +#template: number_of_tubes +# on: beanstalk.current_tubes +# calc: $tubes +# every: 10s +# warn: $this < 5 +# delay: up 0 down 5m multiplier 1.2 max 1h +# info: the current number of tubes on the server +# to: sysadmin diff --git a/health/health.d/bind_rndc.conf b/health/health.d/bind_rndc.conf new file mode 100644 index 000000000..4145e77cd --- /dev/null +++ b/health/health.d/bind_rndc.conf @@ -0,0 +1,9 @@ + template: bind_rndc_stats_file_size + on: bind_rndc.stats_size + units: megabytes + every: 60 + calc: $stats_size + warn: $this > 512 + crit: $this > 1024 + info: Bind stats file is very large! Consider to create logrotate conf file for it! + to: sysadmin diff --git a/health/health.d/boinc.conf b/health/health.d/boinc.conf new file mode 100644 index 000000000..43c588db6 --- /dev/null +++ b/health/health.d/boinc.conf @@ -0,0 +1,62 @@ +# Alarms for various BOINC issues. + +# Warn on any compute errors encountered. +template: boinc_compute_errors + on: boinc.states + os: * + hosts: * +families: * + lookup: average -10m unaligned of comperror + units: tasks + every: 1m + warn: $this > 0 + crit: $this > 1 + delay: up 1m down 5m multiplier 1.5 max 1h + info: the total number of compute errors over the past 10 minutes + to: sysadmin + +# Warn on lots of upload errors +template: boinc_upload_errors + on: boinc.states + os: * + hosts: * +families: * + lookup: average -10m unaligned of upload_failed + units: tasks + every: 1m + warn: $this > 0 + crit: $this > 1 + delay: up 1m down 5m multiplier 1.5 max 1h + info: the average number of failed uploads over the past 10 minutes + to: sysadmin + +# Warn on the task queue being empty +template: boinc_total_tasks + on: boinc.tasks + os: * + hosts: * +families: * + lookup: average -10m unaligned of total + units: tasks + every: 1m + warn: $this < 1 + crit: $this < 0.1 + delay: up 5m down 10m multiplier 1.5 max 1h + info: the total number of locally available tasks + to: sysadmin + +# Warn on no active tasks with a non-empty queue +template: boinc_active_tasks + on: boinc.tasks + os: * + hosts: * +families: * + lookup: average -10m unaligned of active + calc: ($boinc_total_tasks >= 1) ? ($this) : (inf) + units: tasks + every: 1m + warn: $this < 1 + crit: $this < 0.1 + delay: up 5m down 10m multiplier 1.5 max 1h + info: the total number of active tasks + to: sysadmin diff --git a/health/health.d/btrfs.conf b/health/health.d/btrfs.conf new file mode 100644 index 000000000..b27aa544f --- /dev/null +++ b/health/health.d/btrfs.conf @@ -0,0 +1,57 @@ + +template: btrfs_allocated + on: btrfs.disk + os: * + hosts: * +families: * + calc: 100 - ($unallocated * 100 / ($unallocated + $data_used + $data_free + $meta_used + $meta_free + $sys_used + $sys_free)) + units: % + every: 10s + warn: $this > (($status >= $WARNING) ? (90) : (95)) + crit: $this > (($status == $CRITICAL) ? (95) : (98)) + delay: up 1m down 15m multiplier 1.5 max 1h + info: the percentage of allocated BTRFS physical disk space + to: sysadmin + +template: btrfs_data + on: btrfs.data + os: * + hosts: * +families: * + calc: $used * 100 / ($used + $free) + units: % + every: 10s + warn: $this > (($status >= $WARNING) ? (90) : (95)) && $btrfs_allocated > 98 + crit: $this > (($status == $CRITICAL) ? (95) : (98)) && $btrfs_allocated > 98 + delay: up 1m down 15m multiplier 1.5 max 1h + info: the percentage of used BTRFS data space + to: sysadmin + +template: btrfs_metadata + on: btrfs.metadata + os: * + hosts: * +families: * + calc: ($used + $reserved) * 100 / ($used + $free + $reserved) + units: % + every: 10s + warn: $this > (($status >= $WARNING) ? (90) : (95)) && $btrfs_allocated > 98 + crit: $this > (($status == $CRITICAL) ? (95) : (98)) && $btrfs_allocated > 98 + delay: up 1m down 15m multiplier 1.5 max 1h + info: the percentage of used BTRFS metadata space + to: sysadmin + +template: btrfs_system + on: btrfs.system + os: * + hosts: * +families: * + calc: $used * 100 / ($used + $free) + units: % + every: 10s + warn: $this > (($status >= $WARNING) ? (90) : (95)) && $btrfs_allocated > 98 + crit: $this > (($status == $CRITICAL) ? (95) : (98)) && $btrfs_allocated > 98 + delay: up 1m down 15m multiplier 1.5 max 1h + info: the percentage of used BTRFS system space + to: sysadmin + diff --git a/health/health.d/ceph.conf b/health/health.d/ceph.conf new file mode 100644 index 000000000..de16f7b6f --- /dev/null +++ b/health/health.d/ceph.conf @@ -0,0 +1,13 @@ +# low ceph disk available + +template: cluster_space_usage + on: ceph.general_usage + calc: $avail * 100 / ($avail + $used) + units: % + every: 10s + warn: $this < 10 + crit: $this < 1 + delay: down 5m multiplier 1.2 max 1h + info: ceph disk usage is almost full + to: sysadmin + diff --git a/health/health.d/couchdb.conf b/health/health.d/couchdb.conf new file mode 100644 index 000000000..4a2895280 --- /dev/null +++ b/health/health.d/couchdb.conf @@ -0,0 +1,13 @@ + +# make sure couchdb is running + +template: couchdb_last_collected_secs + on: couchdb.request_methods + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: dba diff --git a/health/health.d/cpu.conf b/health/health.d/cpu.conf new file mode 100644 index 000000000..fa8189856 --- /dev/null +++ b/health/health.d/cpu.conf @@ -0,0 +1,55 @@ + +# you can disable an alarm notification by setting the 'to' line to: silent + +template: 10min_cpu_usage + on: system.cpu + os: linux + hosts: * + lookup: average -10m unaligned of user,system,softirq,irq,guest + units: % + every: 1m + warn: $this > (($status >= $WARNING) ? (75) : (85)) + crit: $this > (($status == $CRITICAL) ? (85) : (95)) + delay: down 15m multiplier 1.5 max 1h + info: average cpu utilization for the last 10 minutes (excluding iowait, nice and steal) + to: sysadmin + +template: 10min_cpu_iowait + on: system.cpu + os: linux + hosts: * + lookup: average -10m unaligned of iowait + units: % + every: 1m + warn: $this > (($status >= $WARNING) ? (20) : (40)) + crit: $this > (($status == $CRITICAL) ? (40) : (50)) + delay: down 15m multiplier 1.5 max 1h + info: average CPU wait I/O for the last 10 minutes + to: sysadmin + +template: 20min_steal_cpu + on: system.cpu + os: linux + hosts: * + lookup: average -20m unaligned of steal + units: % + every: 5m + warn: $this > (($status >= $WARNING) ? (5) : (10)) + crit: $this > (($status == $CRITICAL) ? (20) : (30)) + delay: down 1h multiplier 1.5 max 2h + info: average CPU steal time for the last 20 minutes + to: sysadmin + +## FreeBSD +template: 10min_cpu_usage + on: system.cpu + os: freebsd + hosts: * + lookup: average -10m unaligned of user,system,interrupt + units: % + every: 1m + warn: $this > (($status >= $WARNING) ? (75) : (85)) + crit: $this > (($status == $CRITICAL) ? (85) : (95)) + delay: down 15m multiplier 1.5 max 1h + info: average cpu utilization for the last 10 minutes (excluding nice) + to: sysadmin diff --git a/health/health.d/disks.conf b/health/health.d/disks.conf new file mode 100644 index 000000000..26f85848a --- /dev/null +++ b/health/health.d/disks.conf @@ -0,0 +1,167 @@ + +# you can disable an alarm notification by setting the 'to' line to: silent + + +# ----------------------------------------------------------------------------- +# low disk space + +# checking the latest collected values +# raise an alarm if the disk is low on +# available disk space + +template: disk_space_usage + on: disk.space + os: linux freebsd + hosts: * +families: * + calc: $used * 100 / ($avail + $used) + units: % + every: 1m + warn: $this > (($status >= $WARNING ) ? (80) : (90)) + crit: $this > (($status == $CRITICAL) ? (90) : (98)) + delay: up 1m down 15m multiplier 1.5 max 1h + info: current disk space usage + to: sysadmin + +template: disk_inode_usage + on: disk.inodes + os: linux freebsd + hosts: * +families: * + calc: $used * 100 / ($avail + $used) + units: % + every: 1m + warn: $this > (($status >= $WARNING) ? (80) : (90)) + crit: $this > (($status == $CRITICAL) ? (90) : (98)) + delay: up 1m down 15m multiplier 1.5 max 1h + info: current disk inode usage + to: sysadmin + + +# ----------------------------------------------------------------------------- +# disk fill rate + +# calculate the rate the disk fills +# use as base, the available space change +# during the last hour + +# this is just a calculation - it has no alarm +# we will use it in the next template to find +# the hours remaining + +template: disk_fill_rate + on: disk.space + os: linux freebsd + hosts: * +families: * + lookup: min -10m at -50m unaligned of avail + calc: ($this - $avail) / (($now - $after) / 3600) + every: 1m + units: GB/hour + info: average rate the disk fills up (positive), or frees up (negative) space, for the last hour + + +# calculate the hours remaining +# if the disk continues to fill +# in this rate + +template: out_of_disk_space_time + on: disk.space + os: linux freebsd + hosts: * +families: * + calc: ($disk_fill_rate > 0) ? ($avail / $disk_fill_rate) : (inf) + units: hours + every: 10s + warn: $this > 0 and $this < (($status >= $WARNING) ? (48) : (8)) + crit: $this > 0 and $this < (($status == $CRITICAL) ? (24) : (2)) + delay: down 15m multiplier 1.2 max 1h + info: estimated time the disk will run out of space, if the system continues to add data with the rate of the last hour + to: sysadmin + + +# ----------------------------------------------------------------------------- +# disk inode fill rate + +# calculate the rate the disk inodes are allocated +# use as base, the available inodes change +# during the last hour + +# this is just a calculation - it has no alarm +# we will use it in the next template to find +# the hours remaining + +template: disk_inode_rate + on: disk.inodes + os: linux freebsd + hosts: * +families: * + lookup: min -10m at -50m unaligned of avail + calc: ($this - $avail) / (($now - $after) / 3600) + every: 1m + units: inodes/hour + info: average rate at which disk inodes are allocated (positive), or freed (negative), for the last hour + +# calculate the hours remaining +# if the disk inodes are allocated +# in this rate + +template: out_of_disk_inodes_time + on: disk.inodes + os: linux freebsd + hosts: * +families: * + calc: ($disk_inode_rate > 0) ? ($avail / $disk_inode_rate) : (inf) + units: hours + every: 10s + warn: $this > 0 and $this < (($status >= $WARNING) ? (48) : (8)) + crit: $this > 0 and $this < (($status == $CRITICAL) ? (24) : (2)) + delay: down 15m multiplier 1.2 max 1h + info: estimated time the disk will run out of inodes, if the system continues to allocate inodes with the rate of the last hour + to: sysadmin + + +# ----------------------------------------------------------------------------- +# disk congestion + +# raise an alarm if the disk is congested +# by calculating the average disk utilization +# for the last 10 minutes + +template: 10min_disk_utilization + on: disk.util + os: linux freebsd + hosts: * +families: * + lookup: average -10m unaligned + units: % + every: 1m + green: 90 + red: 98 + warn: $this > $green * (($status >= $WARNING) ? (0.7) : (1)) + crit: $this > $red * (($status == $CRITICAL) ? (0.7) : (1)) + delay: down 15m multiplier 1.2 max 1h + info: the percentage of time the disk was busy, during the last 10 minutes + to: sysadmin + + +# raise an alarm if the disk backlog +# is above 1000ms (1s) per second +# for 10 minutes +# (i.e. the disk cannot catch up) + +template: 10min_disk_backlog + on: disk.backlog + os: linux + hosts: * +families: * + lookup: average -10m unaligned + units: ms + every: 1m + green: 2000 + red: 5000 + warn: $this > $green * (($status >= $WARNING) ? (0.7) : (1)) + crit: $this > $red * (($status == $CRITICAL) ? (0.7) : (1)) + delay: down 15m multiplier 1.2 max 1h + info: average of the kernel estimated disk backlog, for the last 10 minutes + to: sysadmin diff --git a/health/health.d/dockerd.conf b/health/health.d/dockerd.conf new file mode 100644 index 000000000..729906cdb --- /dev/null +++ b/health/health.d/dockerd.conf @@ -0,0 +1,8 @@ +template: docker_unhealthy_containers + on: docker.unhealthy_containers + units: unhealthy containers + every: 10s + lookup: average -10s + crit: $this > 0 + info: number of unhealthy containers + to: sysadmin diff --git a/health/health.d/elasticsearch.conf b/health/health.d/elasticsearch.conf new file mode 100644 index 000000000..dffd40965 --- /dev/null +++ b/health/health.d/elasticsearch.conf @@ -0,0 +1,9 @@ + alarm: elasticsearch_last_collected + on: elasticsearch_local.cluster_health_status + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + info: number of seconds since the last successful data collection + to: sysadmin diff --git a/health/health.d/entropy.conf b/health/health.d/entropy.conf new file mode 100644 index 000000000..66d44ec13 --- /dev/null +++ b/health/health.d/entropy.conf @@ -0,0 +1,16 @@ + +# check if entropy is too low +# the alarm is checked every 1 minute +# and examines the last hour of data + + alarm: lowest_entropy + on: system.entropy + os: linux + hosts: * + lookup: min -10m unaligned + units: entries + every: 5m + warn: $this < (($status >= $WARNING) ? (200) : (100)) + delay: down 1h multiplier 1.5 max 2h + info: minimum entries in the random numbers pool in the last 10 minutes + to: silent diff --git a/health/health.d/fping.conf b/health/health.d/fping.conf new file mode 100644 index 000000000..43658fef6 --- /dev/null +++ b/health/health.d/fping.conf @@ -0,0 +1,53 @@ + +template: fping_last_collected_secs +families: * + on: fping.latency + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: sysadmin + +template: host_reachable +families: * + on: fping.latency + calc: $average != nan + units: up/down + every: 10s + crit: $this == 0 + info: states if the remote host is reachable + delay: down 30m multiplier 1.5 max 2h + to: sysadmin + +template: host_latency +families: * + on: fping.latency + lookup: average -10s unaligned of average + units: ms + every: 10s + green: 500 + red: 1000 + warn: $this > $green OR $max > $red + crit: $this > $red + info: average round trip delay during the last 10 seconds + delay: down 30m multiplier 1.5 max 2h + to: sysadmin + +template: packet_loss +families: * + on: fping.quality + lookup: average -10m unaligned of returned + calc: 100 - $this + green: 1 + red: 10 + units: % + every: 10s + warn: $this > $green + crit: $this > $red + info: packet loss percentage + delay: down 30m multiplier 1.5 max 2h + to: sysadmin + diff --git a/health/health.d/fronius.conf b/health/health.d/fronius.conf new file mode 100644 index 000000000..cdf6c8fcb --- /dev/null +++ b/health/health.d/fronius.conf @@ -0,0 +1,11 @@ +template: fronius_last_collected_secs +families: * + on: fronius.power + calc: $now - $last_collected_t + every: 10s + units: seconds ago + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: sitemgr diff --git a/health/health.d/haproxy.conf b/health/health.d/haproxy.conf new file mode 100644 index 000000000..e49c70d48 --- /dev/null +++ b/health/health.d/haproxy.conf @@ -0,0 +1,27 @@ +template: haproxy_backend_server_status + on: haproxy_hs.down + units: failed servers + every: 10s + lookup: average -10s + crit: $this > 0 + info: number of failed haproxy backend servers + to: sysadmin + +template: haproxy_backend_status + on: haproxy_hb.down + units: failed backend + every: 10s + lookup: average -10s + crit: $this > 0 + info: number of failed haproxy backends + to: sysadmin + +template: haproxy_last_collected + on: haproxy_hb.down + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + info: number of seconds since the last successful data collection + to: sysadmin diff --git a/health/health.d/httpcheck.conf b/health/health.d/httpcheck.conf new file mode 100644 index 000000000..0ddf35eab --- /dev/null +++ b/health/health.d/httpcheck.conf @@ -0,0 +1,99 @@ +template: httpcheck_last_collected_secs +families: * + on: httpcheck.status + calc: $now - $last_collected_t + every: 10s + units: seconds ago + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: sysadmin + +# This is a fast-reacting no-notification alarm ideal for custom dashboards or badges +template: web_service_up +families: * + on: httpcheck.status + lookup: average -1m unaligned percentage of success + calc: ($this < 75) ? (0) : ($this) + every: 5s + units: up/down + info: at least 75% verified responses during last 60 seconds, ideal for badges + to: silent + +template: web_service_bad_content +families: * + on: httpcheck.status + lookup: average -5m unaligned percentage of bad_content + every: 10s + units: % + warn: $this >= 10 AND $this < 40 + crit: $this >= 40 + delay: down 5m multiplier 1.5 max 1h + info: average of unexpected http response content during the last 5 minutes + options: no-clear-notification + to: webmaster + +template: web_service_bad_status +families: * + on: httpcheck.status + lookup: average -5m unaligned percentage of bad_status + every: 10s + units: % + warn: $this >= 10 AND $this < 40 + crit: $this >= 40 + delay: down 5m multiplier 1.5 max 1h + info: average of unexpected http status during the last 5 minutes + options: no-clear-notification + to: webmaster + +template: web_service_timeouts +families: * + on: httpcheck.status + lookup: average -5m unaligned percentage of timeout + every: 10s + units: % + info: average of timeouts during the last 5 minutes + +template: no_web_service_connections +families: * + on: httpcheck.status + lookup: average -5m unaligned percentage of no_connection + every: 10s + units: % + info: average of failed requests during the last 5 minutes + +# combined timeout & no connection alarm +template: web_service_unreachable +families: * + on: httpcheck.status + calc: ($no_web_service_connections >= $web_service_timeouts) ? ($no_web_service_connections) : ($web_service_timeouts) + units: % + every: 10s + warn: ($no_web_service_connections >= 10 OR $web_service_timeouts >= 10) AND ($no_web_service_connections < 40 OR $web_service_timeouts < 40) + crit: $no_web_service_connections >= 40 OR $web_service_timeouts >= 40 + delay: down 5m multiplier 1.5 max 1h + info: average of failed requests either due to timeouts or no connection during the last 5 minutes + options: no-clear-notification + to: webmaster + +template: 1h_web_service_response_time +families: * + on: httpcheck.responsetime + lookup: average -1h unaligned of time + every: 30s + units: ms + info: average response time over the last hour + +template: web_service_slow +families: * + on: httpcheck.responsetime + lookup: average -3m unaligned of time + units: ms + every: 10s + warn: ($this > ($1h_web_service_response_time * 2) ) + crit: ($this > ($1h_web_service_response_time * 3) ) + info: average response time over the last 3 minutes, compared to the average over the last hour + delay: down 5m multiplier 1.5 max 1h + options: no-clear-notification + to: webmaster diff --git a/health/health.d/ipc.conf b/health/health.d/ipc.conf new file mode 100644 index 000000000..989d6e912 --- /dev/null +++ b/health/health.d/ipc.conf @@ -0,0 +1,28 @@ + +# you can disable an alarm notification by setting the 'to' line to: silent + + alarm: semaphores_used + on: system.ipc_semaphores + os: linux + hosts: * + calc: $semaphores * 100 / $ipc_semaphores_max + units: % + every: 10s + warn: $this > (($status >= $WARNING) ? (70) : (80)) + crit: $this > (($status == $CRITICAL) ? (70) : (90)) + delay: down 5m multiplier 1.5 max 1h + info: the percentage of IPC semaphores used + to: sysadmin + + alarm: semaphore_arrays_used + on: system.ipc_semaphore_arrays + os: linux + hosts: * + calc: $arrays * 100 / $ipc_semaphores_arrays_max + units: % + every: 10s + warn: $this > (($status >= $WARNING) ? (70) : (80)) + crit: $this > (($status == $CRITICAL) ? (70) : (90)) + delay: down 5m multiplier 1.5 max 1h + info: the percentage of IPC semaphore arrays used + to: sysadmin diff --git a/health/health.d/ipfs.conf b/health/health.d/ipfs.conf new file mode 100644 index 000000000..3f77572d6 --- /dev/null +++ b/health/health.d/ipfs.conf @@ -0,0 +1,11 @@ + +template: ipfs_datastore_usage + on: ipfs.repo_size + calc: $size * 100 / $avail + units: % + every: 10s + warn: $this > (($status >= $WARNING) ? (80) : (90)) + crit: $this > (($status == $CRITICAL) ? (90) : (98)) + delay: down 15m multiplier 1.5 max 1h + info: ipfs Datastore close to running out of space + to: sysadmin diff --git a/health/health.d/ipmi.conf b/health/health.d/ipmi.conf new file mode 100644 index 000000000..c25581964 --- /dev/null +++ b/health/health.d/ipmi.conf @@ -0,0 +1,20 @@ + alarm: ipmi_sensors_states + on: ipmi.sensors_states + calc: $warning + $critical + units: sensors + every: 10s + warn: $this > 0 + crit: $critical > 0 + delay: up 5m down 15m multiplier 1.5 max 1h + info: the number IPMI sensors in non-nominal state + to: sysadmin + + alarm: ipmi_events + on: ipmi.events + calc: $events + units: events + every: 10s + warn: $this > 0 + delay: up 5m down 15m multiplier 1.5 max 1h + info: the number of events in the IPMI System Event Log (SEL) + to: sysadmin diff --git a/health/health.d/isc_dhcpd.conf b/health/health.d/isc_dhcpd.conf new file mode 100644 index 000000000..8054656ff --- /dev/null +++ b/health/health.d/isc_dhcpd.conf @@ -0,0 +1,10 @@ + template: isc_dhcpd_leases_size + on: isc_dhcpd.leases_total + units: KB + every: 60 + calc: $leases_size + warn: $this > 3072 + crit: $this > 6144 + delay: up 2m down 5m + info: dhcpd.leases file too big! Module can slow down your server. + to: sysadmin diff --git a/health/health.d/lighttpd.conf b/health/health.d/lighttpd.conf new file mode 100644 index 000000000..915907a4a --- /dev/null +++ b/health/health.d/lighttpd.conf @@ -0,0 +1,14 @@ + +# make sure lighttpd is running + +template: lighttpd_last_collected_secs + on: lighttpd.requests + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: webmaster + diff --git a/health/health.d/linux_power_supply.conf b/health/health.d/linux_power_supply.conf new file mode 100644 index 000000000..27a172a14 --- /dev/null +++ b/health/health.d/linux_power_supply.conf @@ -0,0 +1,12 @@ +# Alert on low battery capacity. + +template: linux_power_supply_capacity + on: power_supply.capacity + calc: $capacity + units: % + every: 10s + warn: $this < 10 + crit: $this < 5 + delay: up 0 down 5m multiplier 1.2 max 1h + info: the percentage remaining capacity of the power supply + to: sysadmin diff --git a/health/health.d/load.conf b/health/health.d/load.conf new file mode 100644 index 000000000..ee0c54b8e --- /dev/null +++ b/health/health.d/load.conf @@ -0,0 +1,56 @@ + +# you can disable an alarm notification by setting the 'to' line to: silent + +# Calculate the base trigger point for the load average alarms. +# This is the maximum number of CPU's in the system over the past 1 +# minute, with a special case for a single CPU of setting the trigger at 2. + alarm: load_trigger + on: system.load + os: linux + hosts: * + calc: ($active_processors == nan or $active_processors == inf or $active_processors < 2) ? ( 2 ) : ( $active_processors ) + units: cpus + every: 1m + info: trigger point for load average alarms + +# Send alarms if the load average is unusually high. +# These intentionally _do not_ calculate the average over the sampled +# time period because the values being checked already are averages. + alarm: load_average_15 + on: system.load + os: linux + hosts: * + lookup: max -1m unaligned of load15 + units: load + every: 1m + warn: $this > (($status >= $WARNING) ? (1.75 * $load_trigger) : (2 * $load_trigger)) + crit: $this > (($status == $CRITICAL) ? (3.5 * $load_trigger) : (4 * $load_trigger)) + delay: down 15m multiplier 1.5 max 1h + info: fifteen-minute load average + to: sysadmin + + alarm: load_average_5 + on: system.load + os: linux + hosts: * + lookup: max -1m unaligned of load5 + units: load + every: 1m + warn: $this > (($status >= $WARNING) ? (3.5 * $load_trigger) : (4 * $load_trigger)) + crit: $this > (($status == $CRITICAL) ? (7 * $load_trigger) : (8 * $load_trigger)) + delay: down 15m multiplier 1.5 max 1h + info: five-minute load average + to: sysadmin + + alarm: load_average_1 + on: system.load + os: linux + hosts: * + lookup: max -1m unaligned of load1 + units: load + every: 1m + warn: $this > (($status >= $WARNING) ? (7 * $load_trigger) : (8 * $load_trigger)) + crit: $this > (($status == $CRITICAL) ? (14 * $load_trigger) : (16 * $load_trigger)) + delay: down 15m multiplier 1.5 max 1h + info: one-minute load average + to: sysadmin diff --git a/health/health.d/mdstat.conf b/health/health.d/mdstat.conf new file mode 100644 index 000000000..0f5f2837e --- /dev/null +++ b/health/health.d/mdstat.conf @@ -0,0 +1,27 @@ +template: mdstat_last_collected + on: md.disks + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + info: number of seconds since the last successful data collection + to: sysadmin + +template: mdstat_disks + on: md.disks + units: failed devices + every: 10s + calc: $total - $inuse + crit: $this > 0 + info: Array is degraded! + to: sysadmin + +template: mdstat_mismatch_cnt + on: md.mismatch_cnt + units: unsynchronized blocks + calc: $count + every: 10s + crit: $this > 0 + info: Mismatch count! + to: sysadmin diff --git a/health/health.d/megacli.conf b/health/health.d/megacli.conf new file mode 100644 index 000000000..1881a7be1 --- /dev/null +++ b/health/health.d/megacli.conf @@ -0,0 +1,48 @@ + alarm: adapter_state + on: megacli.adapter_degraded + units: is degraded + lookup: sum -10s + every: 10s + crit: $this > 0 + info: adapter state + to: sysadmin + + template: bbu_relative_charge + on: megacli.bbu_relative_charge + units: percent + lookup: average -10s + every: 10s + warn: $this <= (($status >= $WARNING) ? (85) : (80)) + crit: $this <= (($status == $CRITICAL) ? (50) : (40)) + info: BBU relative state of charge + to: sysadmin + + template: bbu_cycle_count + on: megacli.bbu_cycle_count + units: cycle count + lookup: average -10s + every: 10s + warn: $this >= 100 + crit: $this >= 500 + info: BBU cycle count + to: sysadmin + + alarm: pd_media_errors + on: megacli.pd_media_error + units: media errors + lookup: sum -10s + every: 10s + warn: $this > 0 + delay: down 1m multiplier 2 max 10m + info: physical drive media errors + to: sysadmin + + alarm: pd_predictive_failures + on: megacli.pd_predictive_failure + units: predictive failures + lookup: sum -10s + every: 10s + warn: $this > 0 + delay: down 1m multiplier 2 max 10m + info: physical drive predictive failures + to: sysadmin diff --git a/health/health.d/memcached.conf b/health/health.d/memcached.conf new file mode 100644 index 000000000..d248ef57a --- /dev/null +++ b/health/health.d/memcached.conf @@ -0,0 +1,52 @@ + +# make sure memcached is running + +template: memcached_last_collected_secs + on: memcached.cache + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: dba + + +# detect if memcached cache is full + +template: memcached_cache_memory_usage + on: memcached.cache + calc: $used * 100 / ($used + $available) + units: % + every: 10s + warn: $this > (($status >= $WARNING) ? (70) : (80)) + crit: $this > (($status == $CRITICAL) ? (80) : (90)) + delay: up 0 down 15m multiplier 1.5 max 1h + info: current cache memory usage + to: dba + + +# find the rate memcached cache is filling + +template: cache_fill_rate + on: memcached.cache + lookup: min -10m at -50m unaligned of available + calc: ($this - $available) / (($now - $after) / 3600) + units: KB/hour + every: 1m + info: average rate the cache fills up (positive), or frees up (negative) space, for the last hour + + +# find the hours remaining until memcached cache is full + +template: out_of_cache_space_time + on: memcached.cache + calc: ($cache_fill_rate > 0) ? ($available / $cache_fill_rate) : (inf) + units: hours + every: 10s + warn: $this > 0 and $this < (($status >= $WARNING) ? (48) : (8)) + crit: $this > 0 and $this < (($status == $CRITICAL) ? (24) : (2)) + delay: down 15m multiplier 1.5 max 1h + info: estimated time the cache will run out of space, if the system continues to add data with the rate of the last hour + to: dba diff --git a/health/health.d/memory.conf b/health/health.d/memory.conf new file mode 100644 index 000000000..4a0e6e522 --- /dev/null +++ b/health/health.d/memory.conf @@ -0,0 +1,38 @@ + +# you can disable an alarm notification by setting the 'to' line to: silent + + alarm: 1hour_ecc_memory_correctable + on: mem.ecc_ce + os: linux + hosts: * + lookup: sum -10m unaligned + units: errors + every: 1m + warn: $this > 0 + delay: down 1h multiplier 1.5 max 1h + info: number of ECC correctable errors during the last hour + to: sysadmin + + alarm: 1hour_ecc_memory_uncorrectable + on: mem.ecc_ue + os: linux + hosts: * + lookup: sum -10m unaligned + units: errors + every: 1m + crit: $this > 0 + delay: down 1h multiplier 1.5 max 1h + info: number of ECC uncorrectable errors during the last hour + to: sysadmin + + alarm: 1hour_memory_hw_corrupted + on: mem.hwcorrupt + os: linux + hosts: * + calc: $HardwareCorrupted + units: MB + every: 10s + warn: $this > 0 + delay: down 1h multiplier 1.5 max 1h + info: amount of memory corrupted due to a hardware failure + to: sysadmin diff --git a/health/health.d/mongodb.conf b/health/health.d/mongodb.conf new file mode 100644 index 000000000..a80cb3112 --- /dev/null +++ b/health/health.d/mongodb.conf @@ -0,0 +1,13 @@ + +# make sure mongodb is running + +template: mongodb_last_collected_secs + on: mongodb.read_operations + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: dba diff --git a/health/health.d/mysql.conf b/health/health.d/mysql.conf new file mode 100644 index 000000000..39c401915 --- /dev/null +++ b/health/health.d/mysql.conf @@ -0,0 +1,100 @@ + +# make sure mysql is running + +template: mysql_last_collected_secs + on: mysql.queries + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: dba + + +# ----------------------------------------------------------------------------- +# slow queries + +template: mysql_10s_slow_queries + on: mysql.queries + lookup: sum -10s of slow_queries + units: slow queries + every: 10s + warn: $this > (($status >= $WARNING) ? (5) : (10)) + crit: $this > (($status == $CRITICAL) ? (10) : (20)) + delay: down 5m multiplier 1.5 max 1h + info: number of mysql slow queries over the last 10 seconds + to: dba + + +# ----------------------------------------------------------------------------- +# lock waits + +template: mysql_10s_table_locks_immediate + on: mysql.table_locks + lookup: sum -10s absolute of immediate + units: immediate locks + every: 10s + info: number of table immediate locks over the last 10 seconds + to: dba + +template: mysql_10s_table_locks_waited + on: mysql.table_locks + lookup: sum -10s absolute of waited + units: waited locks + every: 10s + info: number of table waited locks over the last 10 seconds + to: dba + +template: mysql_10s_waited_locks_ratio + on: mysql.table_locks + calc: ( ($mysql_10s_table_locks_waited + $mysql_10s_table_locks_immediate) > 0 ) ? (($mysql_10s_table_locks_waited * 100) / ($mysql_10s_table_locks_waited + $mysql_10s_table_locks_immediate)) : 0 + units: % + every: 10s + warn: $this > (($status >= $WARNING) ? (10) : (25)) + crit: $this > (($status == $CRITICAL) ? (25) : (50)) + delay: down 30m multiplier 1.5 max 1h + info: the ratio of mysql waited table locks, for the last 10 seconds + to: dba + + +# ----------------------------------------------------------------------------- +# connections + +template: mysql_connections + on: mysql.connections_active + calc: $active * 100 / $limit + units: % + every: 10s + warn: $this > (($status >= $WARNING) ? (60) : (70)) + crit: $this > (($status == $CRITICAL) ? (80) : (90)) + delay: down 15m multiplier 1.5 max 1h + info: the ratio of current active connections vs the maximum possible number of connections + to: dba + + +# ----------------------------------------------------------------------------- +# replication + +template: mysql_replication + on: mysql.slave_status + calc: ($sql_running == -1 OR $io_running == -1)?0:1 + units: ok/failed + every: 10s + crit: $this == 0 + delay: down 5m multiplier 1.5 max 1h + info: checks if mysql replication has stopped + to: dba + +template: mysql_replication_lag + on: mysql.slave_behind + calc: $seconds + units: seconds + every: 10s + warn: $this > (($status >= $WARNING) ? (5) : (10)) + crit: $this > (($status == $CRITICAL) ? (10) : (30)) + delay: down 15m multiplier 1.5 max 1h + info: the number of seconds mysql replication is behind this master + to: dba + diff --git a/health/health.d/named.conf b/health/health.d/named.conf new file mode 100644 index 000000000..4fc65c8ee --- /dev/null +++ b/health/health.d/named.conf @@ -0,0 +1,14 @@ + +# make sure named is running + +template: named_last_collected_secs + on: named.global_queries + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: domainadmin + diff --git a/health/health.d/net.conf b/health/health.d/net.conf new file mode 100644 index 000000000..489016dd5 --- /dev/null +++ b/health/health.d/net.conf @@ -0,0 +1,155 @@ + +# you can disable an alarm notification by setting the 'to' line to: silent + +# ----------------------------------------------------------------------------- +# net traffic overflow + + template: 1m_received_traffic_overflow + on: net.net + os: linux + hosts: * + families: * + lookup: average -1m unaligned absolute of received + calc: ($nic_speed_max > 0) ? ($this * 100 / ($nic_speed_max * 1000)) : ( nan ) + units: % + every: 10s + warn: $this > (($status >= $WARNING) ? (80) : (85)) + crit: $this > (($status == $CRITICAL) ? (85) : (90)) + delay: down 1m multiplier 1.5 max 1h + info: interface received bandwidth usage over net device speed max + to: sysadmin + + template: 1m_sent_traffic_overflow + on: net.net + os: linux + hosts: * + families: * + lookup: average -1m unaligned absolute of sent + calc: ($nic_speed_max > 0) ? ($this * 100 / ($nic_speed_max * 1000)) : ( nan ) + units: % + every: 10s + warn: $this > (($status >= $WARNING) ? (80) : (85)) + crit: $this > (($status == $CRITICAL) ? (85) : (90)) + delay: down 1m multiplier 1.5 max 1h + info: interface sent bandwidth usage over net device speed max + to: sysadmin + +# ----------------------------------------------------------------------------- +# dropped packets + +# check if an interface is dropping packets +# the alarm is checked every 1 minute +# and examines the last 10 minutes of data + +template: inbound_packets_dropped + on: net.drops + os: linux + hosts: * +families: * + lookup: sum -10m unaligned absolute of inbound + units: packets + every: 1m + warn: $this >= 5 + delay: down 1h multiplier 1.5 max 2h + info: interface inbound dropped packets in the last 10 minutes + to: sysadmin + +template: outbound_packets_dropped + on: net.drops + os: linux + hosts: * +families: * + lookup: sum -10m unaligned absolute of outbound + units: packets + every: 1m + warn: $this >= 5 + delay: down 1h multiplier 1.5 max 2h + info: interface outbound dropped packets in the last 10 minutes + to: sysadmin + +template: inbound_packets_dropped_ratio + on: net.packets + os: linux + hosts: * +families: * + lookup: sum -10m unaligned absolute of received + calc: (($inbound_packets_dropped != nan AND $this > 0) ? ($inbound_packets_dropped * 100 / $this) : (0)) + units: % + every: 1m + warn: $this >= 0.1 + crit: $this >= 2 + delay: down 1h multiplier 1.5 max 2h + info: the ratio of inbound dropped packets vs the total number of received packets of the network interface, during the last 10 minutes + to: sysadmin + +template: outbound_packets_dropped_ratio + on: net.packets + os: linux + hosts: * +families: * + lookup: sum -10m unaligned absolute of sent + calc: (($outbound_packets_dropped != nan AND $this > 0) ? ($outbound_packets_dropped * 100 / $this) : (0)) + units: % + every: 1m + warn: $this >= 0.1 + crit: $this >= 2 + delay: down 1h multiplier 1.5 max 2h + info: the ratio of outbound dropped packets vs the total number of sent packets of the network interface, during the last 10 minutes + to: sysadmin + + +# ----------------------------------------------------------------------------- +# FIFO errors + +# check if an interface is having FIFO +# buffer errors +# the alarm is checked every 1 minute +# and examines the last 10 minutes of data + +template: 10min_fifo_errors + on: net.fifo + os: linux + hosts: * +families: * + lookup: sum -10m unaligned absolute + units: errors + every: 1m + warn: $this > 0 + delay: down 1h multiplier 1.5 max 2h + info: interface fifo errors in the last 10 minutes + to: sysadmin + + +# ----------------------------------------------------------------------------- +# check for packet storms + +# 1. calculate the rate packets are received in 1m: 1m_received_packets_rate +# 2. do the same for the last 10s +# 3. raise an alarm if the later is 10x or 20x the first +# we assume the minimum packet storm should at least have +# 10000 packets/s, average of the last 10 seconds + +template: 1m_received_packets_rate + on: net.packets + os: linux freebsd + hosts: * +families: * + lookup: average -1m unaligned of received + units: packets + every: 10s + info: the average number of packets received during the last minute + +template: 10s_received_packets_storm + on: net.packets + os: linux freebsd + hosts: * +families: * + lookup: average -10s unaligned of received + calc: $this * 100 / (($1m_received_packets_rate < 1000)?(1000):($1m_received_packets_rate)) + every: 10s + units: % + warn: $this > (($status >= $WARNING)?(200):(5000)) + crit: $this > (($status >= $WARNING)?(5000):(6000)) +options: no-clear-notification + info: the % of the rate of received packets in the last 10 seconds, compared to the rate of the last minute (clear notification for this alarm will not be sent) + to: sysadmin diff --git a/health/health.d/netfilter.conf b/health/health.d/netfilter.conf new file mode 100644 index 000000000..1d07752cc --- /dev/null +++ b/health/health.d/netfilter.conf @@ -0,0 +1,29 @@ + +# you can disable an alarm notification by setting the 'to' line to: silent + + alarm: netfilter_last_collected_secs + on: netfilter.conntrack_sockets + os: linux + hosts: * + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: sysadmin + + alarm: netfilter_conntrack_full + on: netfilter.conntrack_sockets + os: linux + hosts: * + lookup: max -10s unaligned of connections + calc: $this * 100 / $netfilter_conntrack_max + units: % + every: 10s + warn: $this > (($status >= $WARNING) ? (70) : (80)) + crit: $this > (($status == $CRITICAL) ? (80) : (90)) + delay: down 5m multiplier 1.5 max 1h + info: the number of connections tracked by the netfilter connection tracker, as a percentage of the connection tracker table size + to: sysadmin diff --git a/health/health.d/nginx.conf b/health/health.d/nginx.conf new file mode 100644 index 000000000..a686c3d99 --- /dev/null +++ b/health/health.d/nginx.conf @@ -0,0 +1,14 @@ + +# make sure nginx is running + +template: nginx_last_collected_secs + on: nginx.requests + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: webmaster + diff --git a/health/health.d/nginx_plus.conf b/health/health.d/nginx_plus.conf new file mode 100644 index 000000000..5a171a76d --- /dev/null +++ b/health/health.d/nginx_plus.conf @@ -0,0 +1,14 @@ + +# make sure nginx_plus is running + +template: nginx_plus_last_collected_secs + on: nginx_plus.requests_total + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: webmaster + diff --git a/health/health.d/portcheck.conf b/health/health.d/portcheck.conf new file mode 100644 index 000000000..f42b63d30 --- /dev/null +++ b/health/health.d/portcheck.conf @@ -0,0 +1,48 @@ +template: portcheck_last_collected_secs +families: * + on: portcheck.status + calc: $now - $last_collected_t + every: 10s + units: seconds ago + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: sysadmin + +# This is a fast-reacting no-notification alarm ideal for custom dashboards or badges +template: service_reachable +families: * + on: portcheck.status + lookup: average -1m unaligned percentage of success + calc: ($this < 75) ? (0) : ($this) + every: 5s + units: up/down + info: at least 75% successful connections during last 60 seconds, ideal for badges + to: silent + +template: connection_timeouts +families: * + on: portcheck.status + lookup: average -5m unaligned percentage of timeout + every: 10s + units: % + warn: $this >= 10 AND $this < 40 + crit: $this >= 40 + delay: down 5m multiplier 1.5 max 1h + info: average of timeouts during the last 5 minutes + options: no-clear-notification + to: sysadmin + +template: connection_fails +families: * + on: portcheck.status + lookup: average -5m unaligned percentage of no_connection + every: 10s + units: % + warn: $this >= 10 AND $this < 40 + crit: $this >= 40 + delay: down 5m multiplier 1.5 max 1h + info: average of failed connections during the last 5 minutes + options: no-clear-notification + to: sysadmin diff --git a/health/health.d/postgres.conf b/health/health.d/postgres.conf new file mode 100644 index 000000000..4e0583b85 --- /dev/null +++ b/health/health.d/postgres.conf @@ -0,0 +1,13 @@ + +# make sure postgres is running + +template: postgres_last_collected_secs + on: postgres.db_stat_transactions + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: dba diff --git a/health/health.d/qos.conf b/health/health.d/qos.conf new file mode 100644 index 000000000..7290d15ff --- /dev/null +++ b/health/health.d/qos.conf @@ -0,0 +1,18 @@ + +# you can disable an alarm notification by setting the 'to' line to: silent + +# check if a QoS class is dropping packets +# the alarm is checked every 10 seconds +# and examines the last minute of data + +#template: 10min_qos_packet_drops +# on: tc.qos_dropped +# os: linux +# hosts: * +# lookup: sum -10m unaligned absolute +# every: 30s +# warn: $this > 0 +# delay: up 0 down 30m multiplier 1.5 max 1h +# units: packets +# info: dropped packets in the last 30 minutes +# to: sysadmin diff --git a/health/health.d/ram.conf b/health/health.d/ram.conf new file mode 100644 index 000000000..4e437322c --- /dev/null +++ b/health/health.d/ram.conf @@ -0,0 +1,64 @@ + +# you can disable an alarm notification by setting the 'to' line to: silent + + alarm: used_ram_to_ignore + on: system.ram + os: linux freebsd + hosts: * + calc: ($zfs.arc_size.arcsz = nan)?(0):($zfs.arc_size.arcsz) + every: 10s + info: the amount of memory that is reported as used, but it is actually capable for resizing itself based on the system needs (eg. ZFS ARC) + + alarm: ram_in_use + on: system.ram + os: linux + hosts: * +# calc: $used * 100 / ($used + $cached + $free) + calc: ($used - $used_ram_to_ignore) * 100 / ($used - $used_ram_to_ignore + $cached + $free) + units: % + every: 10s + warn: $this > (($status >= $WARNING) ? (80) : (90)) + crit: $this > (($status == $CRITICAL) ? (90) : (98)) + delay: down 15m multiplier 1.5 max 1h + info: system RAM used + to: sysadmin + + alarm: ram_available + on: mem.available + os: linux + hosts: * + calc: ($avail + $used_ram_to_ignore) * 100 / ($system.ram.used + $system.ram.cached + $system.ram.free + $system.ram.buffers) + units: % + every: 10s + warn: $this < (($status >= $WARNING) ? ( 5) : (10)) + crit: $this < (($status == $CRITICAL) ? (10) : ( 5)) + delay: down 15m multiplier 1.5 max 1h + info: estimated amount of RAM available for userspace processes, without causing swapping + to: sysadmin + +## FreeBSD +alarm: ram_in_use + on: system.ram + os: freebsd +hosts: * + calc: ($active + $wired + $laundry + $buffers - $used_ram_to_ignore) * 100 / ($active + $wired + $laundry + $buffers - $used_ram_to_ignore + $cache + $free + $inactive) +units: % +every: 10s + warn: $this > (($status >= $WARNING) ? (80) : (90)) + crit: $this > (($status == $CRITICAL) ? (90) : (98)) +delay: down 15m multiplier 1.5 max 1h + info: system RAM usage + to: sysadmin + + alarm: ram_available + on: system.ram + os: freebsd + hosts: * + calc: ($free + $inactive + $used_ram_to_ignore) * 100 / ($free + $active + $inactive + $wired + $cache + $laundry + $buffers) + units: % + every: 10s + warn: $this < (($status >= $WARNING) ? ( 5) : (10)) + crit: $this < (($status == $CRITICAL) ? (10) : ( 5)) + delay: down 15m multiplier 1.5 max 1h + info: estimated amount of RAM available for userspace processes, without causing swapping + to: sysadmin diff --git a/health/health.d/redis.conf b/health/health.d/redis.conf new file mode 100644 index 000000000..c08a884a6 --- /dev/null +++ b/health/health.d/redis.conf @@ -0,0 +1,34 @@ + +# make sure redis is running + +template: redis_last_collected_secs + on: redis.operations + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: dba + +template: redis_bgsave_broken +families: * + on: redis.bgsave_health + every: 10s + crit: $rdb_last_bgsave_status != 0 + units: ok/failed + info: states if redis bgsave is working + delay: down 5m multiplier 1.5 max 1h + to: dba + +template: redis_bgsave_slow +families: * + on: redis.bgsave_now + every: 10s + warn: $rdb_bgsave_in_progress > 600 + crit: $rdb_bgsave_in_progress > 1200 + units: seconds + info: the time redis needs to save its database + delay: down 5m multiplier 1.5 max 1h + to: dba diff --git a/health/health.d/retroshare.conf b/health/health.d/retroshare.conf new file mode 100644 index 000000000..2344b60ec --- /dev/null +++ b/health/health.d/retroshare.conf @@ -0,0 +1,25 @@ +# make sure RetroShare is running + +template: retroshare_last_collected_secs + on: retroshare.peers + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: sysadmin + +# make sure the DHT is fine when active + +template: retroshare_dht_working + on: retroshare.dht + calc: $dht_size_all + units: peers + every: 1m + warn: $this < (($status >= $WARNING) ? (120) : (100)) + crit: $this < (($status == $CRITICAL) ? (10) : (1)) + delay: up 0 down 15m multiplier 1.5 max 1h + info: Checks if the DHT has enough peers to operate + to: sysadmin diff --git a/health/health.d/softnet.conf b/health/health.d/softnet.conf new file mode 100644 index 000000000..77c804bfd --- /dev/null +++ b/health/health.d/softnet.conf @@ -0,0 +1,40 @@ + +# you can disable an alarm notification by setting the 'to' line to: silent + +# check for common /proc/net/softnet_stat errors + + alarm: 10min_netdev_backlog_exceeded + on: system.softnet_stat + os: linux + hosts: * + lookup: sum -10m unaligned absolute of dropped + units: packets + every: 1m + warn: $this > 0 + delay: down 1h multiplier 1.5 max 2h + info: number of packets dropped in the last 10min, because sysctl net.core.netdev_max_backlog was exceeded (this can be a cause for dropped packets) + to: sysadmin + + alarm: 10min_netdev_budget_ran_outs + on: system.softnet_stat + os: linux + hosts: * + lookup: sum -10m unaligned absolute of squeezed + units: events + every: 1m + warn: $this > (($status >= $WARNING) ? (0) : (10)) + delay: down 1h multiplier 1.5 max 2h + info: number of times, during the last 10min, ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs, with work remaining (this can be a cause for dropped packets) + to: silent + + alarm: 10min_netisr_backlog_exceeded + on: system.softnet_stat + os: freebsd + hosts: * + lookup: sum -10m unaligned absolute of qdrops + units: packets + every: 1m + warn: $this > 0 + delay: down 1h multiplier 1.5 max 2h + info: number of drops in the last 10min, because sysctl net.route.netisr_maxqlen was exceeded (this can be a cause for dropped packets) + to: sysadmin diff --git a/health/health.d/squid.conf b/health/health.d/squid.conf new file mode 100644 index 000000000..06cc9678f --- /dev/null +++ b/health/health.d/squid.conf @@ -0,0 +1,14 @@ + +# make sure squid is running + +template: squid_last_collected_secs + on: squid.clients_requests + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: proxyadmin + diff --git a/health/health.d/stiebeleltron.conf b/health/health.d/stiebeleltron.conf new file mode 100644 index 000000000..e0361eb20 --- /dev/null +++ b/health/health.d/stiebeleltron.conf @@ -0,0 +1,11 @@ +template: stiebeleltron_last_collected_secs +families: * + on: stiebeleltron.heating.hc1 + calc: $now - $last_collected_t + every: 10s + units: seconds ago + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: sitemgr diff --git a/health/health.d/swap.conf b/health/health.d/swap.conf new file mode 100644 index 000000000..f920b0807 --- /dev/null +++ b/health/health.d/swap.conf @@ -0,0 +1,43 @@ + +# you can disable an alarm notification by setting the 'to' line to: silent + + alarm: 30min_ram_swapped_out + on: system.swapio + os: linux freebsd + hosts: * + lookup: sum -30m unaligned absolute of out + # we have to convert KB to MB by dividing $this (i.e. the result of the lookup) with 1024 + calc: $this / 1024 * 100 / ( $system.ram.used + $system.ram.cached + $system.ram.free ) + units: % of RAM + every: 1m + warn: $this > (($status >= $WARNING) ? (10) : (20)) + crit: $this > (($status == $CRITICAL) ? (20) : (30)) + delay: up 0 down 15m multiplier 1.5 max 1h + info: the amount of memory swapped in the last 30 minutes, as a percentage of the system RAM + to: sysadmin + + alarm: ram_in_swap + on: system.swap + os: linux + hosts: * + calc: $used * 100 / ( $system.ram.used + $system.ram.cached + $system.ram.free ) + units: % of RAM + every: 10s + warn: $this > (($status >= $WARNING) ? (15) : (20)) + crit: $this > (($status == $CRITICAL) ? (40) : (50)) + delay: up 30s down 15m multiplier 1.5 max 1h + info: the swap memory used, as a percentage of the system RAM + to: sysadmin + + alarm: used_swap + on: system.swap + os: linux freebsd + hosts: * + calc: $used * 100 / ( $used + $free ) + units: % + every: 10s + warn: $this > (($status >= $WARNING) ? (80) : (90)) + crit: $this > (($status == $CRITICAL) ? (90) : (98)) + delay: up 30s down 15m multiplier 1.5 max 1h + info: the percentage of swap memory used + to: sysadmin diff --git a/health/health.d/tcp_conn.conf b/health/health.d/tcp_conn.conf new file mode 100644 index 000000000..7aa9a9800 --- /dev/null +++ b/health/health.d/tcp_conn.conf @@ -0,0 +1,19 @@ + +# +# ${tcp_max_connections} may be nan or -1 if the system +# supports dynamic threshold for TCP connections. +# In this case, the alarm will always be zero. +# + + alarm: tcp_connections + on: ipv4.tcpsock + os: linux + hosts: * + calc: (${tcp_max_connections} > 0) ? ( ${connections} * 100 / ${tcp_max_connections} ) : 0 + units: % + every: 10s + warn: $this > (($status >= $WARNING ) ? ( 60 ) : ( 80 )) + crit: $this > (($status >= $CRITICAL) ? ( 80 ) : ( 90 )) + delay: up 0 down 5m multiplier 1.5 max 1h + info: the percentage of IPv4 TCP connections over the max allowed + to: sysadmin diff --git a/health/health.d/tcp_listen.conf b/health/health.d/tcp_listen.conf new file mode 100644 index 000000000..552930ab7 --- /dev/null +++ b/health/health.d/tcp_listen.conf @@ -0,0 +1,82 @@ +# +# There are two queues involved when incoming TCP connections are handled +# (both at the kernel): +# +# SYN queue +# The SYN queue tracks TCP handshakes until connections are fully established. +# It overflows when too many incoming TCP connection requests hang in the +# half-open state and the server is not configured to fall back to SYN cookies. +# Overflows are usually caused by SYN flood DoS attacks (i.e. someone sends +# lots of SYN packets and never completes the handshakes). +# +# Accept queue +# The accept queue holds fully established TCP connections waiting to be handled +# by the listening application. It overflows when the server application fails +# to accept new connections at the rate they are coming in. +# +# +# ----------------------------------------------------------------------------- +# tcp accept queue (at the kernel) + + alarm: 1m_tcp_accept_queue_overflows + on: ip.tcp_accept_queue + os: linux + hosts: * + lookup: sum -60s unaligned absolute of ListenOverflows + units: overflows + every: 10s + crit: $this > 0 + delay: up 0 down 5m multiplier 1.5 max 1h + info: the number of times the TCP accept queue of the kernel overflown, during the last minute + to: sysadmin + +# THIS IS TOO GENERIC +# CHECK: https://github.com/netdata/netdata/issues/3234#issuecomment-423935842 + alarm: 1m_tcp_accept_queue_drops + on: ip.tcp_accept_queue + os: linux + hosts: * + lookup: sum -60s unaligned absolute of ListenDrops + units: drops + every: 10s +# warn: $this > 0 + crit: $this > (($status == $CRITICAL) ? (0) : (150)) + delay: up 0 down 5m multiplier 1.5 max 1h + info: the number of times the TCP accept queue of the kernel dropped packets, during the last minute (includes bogus packets received) + to: sysadmin + + +# ----------------------------------------------------------------------------- +# tcp SYN queue (at the kernel) + +# When the SYN queue is full, either TcpExtTCPReqQFullDoCookies or +# TcpExtTCPReqQFullDrop is incremented, depending on whether SYN cookies are +# enabled or not. In both cases this probably indicates a SYN flood attack, +# so i guess a notification should be sent. + + alarm: 1m_tcp_syn_queue_drops + on: ip.tcp_syn_queue + os: linux + hosts: * + lookup: sum -60s unaligned absolute of TCPReqQFullDrop + units: drops + every: 10s + warn: $this > 0 + crit: $this > (($status == $CRITICAL) ? (0) : (60)) + delay: up 0 down 5m multiplier 1.5 max 1h + info: the number of times the TCP SYN queue of the kernel was full and dropped packets, during the last minute + to: sysadmin + + alarm: 1m_tcp_syn_queue_cookies + on: ip.tcp_syn_queue + os: linux + hosts: * + lookup: sum -60s unaligned absolute of TCPReqQFullDoCookies + units: cookies + every: 10s + warn: $this > 0 + crit: $this > (($status == $CRITICAL) ? (0) : (60)) + delay: up 0 down 5m multiplier 1.5 max 1h + info: the number of times the TCP SYN queue of the kernel was full and sent SYN cookies, during the last minute + to: sysadmin + diff --git a/health/health.d/tcp_mem.conf b/health/health.d/tcp_mem.conf new file mode 100644 index 000000000..6927d5765 --- /dev/null +++ b/health/health.d/tcp_mem.conf @@ -0,0 +1,20 @@ +# +# check +# http://blog.tsunanet.net/2011/03/out-of-socket-memory.html +# +# We give a warning when TCP is under memory pressure +# and a critical when TCP is 90% of its upper memory limit +# + + alarm: tcp_memory + on: ipv4.sockstat_tcp_mem + os: linux + hosts: * + calc: ${mem} * 100 / ${tcp_mem_high} + units: % + every: 10s + warn: ${mem} > (($status >= $WARNING ) ? ( ${tcp_mem_pressure} * 0.8 ) : ( ${tcp_mem_pressure} )) + crit: ${mem} > (($status >= $CRITICAL ) ? ( ${tcp_mem_pressure} ) : ( ${tcp_mem_high} * 0.9 )) + delay: up 0 down 5m multiplier 1.5 max 1h + info: the amount of TCP memory as a percentage of its max memory limit + to: sysadmin diff --git a/health/health.d/tcp_orphans.conf b/health/health.d/tcp_orphans.conf new file mode 100644 index 000000000..280d6590f --- /dev/null +++ b/health/health.d/tcp_orphans.conf @@ -0,0 +1,21 @@ + +# +# check +# http://blog.tsunanet.net/2011/03/out-of-socket-memory.html +# +# The kernel may penalize orphans by 2x or even 4x +# so we alarm warning at 25% and critical at 50% +# + + alarm: tcp_orphans + on: ipv4.sockstat_tcp_sockets + os: linux + hosts: * + calc: ${orphan} * 100 / ${tcp_max_orphans} + units: % + every: 10s + warn: $this > (($status >= $WARNING ) ? ( 20 ) : ( 25 )) + crit: $this > (($status >= $CRITICAL) ? ( 25 ) : ( 50 )) + delay: up 0 down 5m multiplier 1.5 max 1h + info: the percentage of orphan IPv4 TCP sockets over the max allowed (this may lead to too-many-orphans errors) + to: sysadmin diff --git a/health/health.d/tcp_resets.conf b/health/health.d/tcp_resets.conf new file mode 100644 index 000000000..91dad3c6a --- /dev/null +++ b/health/health.d/tcp_resets.conf @@ -0,0 +1,67 @@ + +# you can disable an alarm notification by setting the 'to' line to: silent + +# ----------------------------------------------------------------------------- + + alarm: ipv4_tcphandshake_last_collected_secs + on: ipv4.tcphandshake + os: linux freebsd + hosts: * + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: up 0 down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: sysadmin + +# ----------------------------------------------------------------------------- +# tcp resets this host sends + + alarm: 1m_ipv4_tcp_resets_sent + on: ipv4.tcphandshake + os: linux + hosts: * + lookup: average -1m at -10s unaligned absolute of OutRsts + units: tcp resets/s + every: 10s + info: average TCP RESETS this host is sending, over the last minute + + alarm: 10s_ipv4_tcp_resets_sent + on: ipv4.tcphandshake + os: linux + hosts: * + lookup: average -10s unaligned absolute of OutRsts + units: tcp resets/s + every: 10s + warn: $this > ((($1m_ipv4_tcp_resets_sent < 5)?(5):($1m_ipv4_tcp_resets_sent)) * (($status >= $WARNING) ? (1) : (20))) + delay: up 0 down 60m multiplier 1.2 max 2h + options: no-clear-notification + info: average TCP RESETS this host is sending, over the last 10 seconds (this can be an indication that a port scan is made, or that a service running on this host has crashed; clear notification for this alarm will not be sent) + to: sysadmin + +# ----------------------------------------------------------------------------- +# tcp resets this host receives + + alarm: 1m_ipv4_tcp_resets_received + on: ipv4.tcphandshake + os: linux freebsd + hosts: * + lookup: average -1m at -10s unaligned absolute of AttemptFails + units: tcp resets/s + every: 10s + info: average TCP RESETS this host is sending, over the last minute + + alarm: 10s_ipv4_tcp_resets_received + on: ipv4.tcphandshake + os: linux freebsd + hosts: * + lookup: average -10s unaligned absolute of AttemptFails + units: tcp resets/s + every: 10s + warn: $this > ((($1m_ipv4_tcp_resets_received < 5)?(5):($1m_ipv4_tcp_resets_received)) * (($status >= $WARNING) ? (1) : (10))) + delay: up 0 down 60m multiplier 1.2 max 2h + options: no-clear-notification + info: average TCP RESETS this host is receiving, over the last 10 seconds (this can be an indication that a service this host needs, has crashed; clear notification for this alarm will not be sent) + to: sysadmin diff --git a/health/health.d/udp_errors.conf b/health/health.d/udp_errors.conf new file mode 100644 index 000000000..5140228f5 --- /dev/null +++ b/health/health.d/udp_errors.conf @@ -0,0 +1,49 @@ + +# you can disable an alarm notification by setting the 'to' line to: silent + +# ----------------------------------------------------------------------------- + + alarm: ipv4_udperrors_last_collected_secs + on: ipv4.udperrors + os: linux freebsd + hosts: * + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: up 0 down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: sysadmin + +# ----------------------------------------------------------------------------- +# UDP receive buffer errors + + alarm: 1m_ipv4_udp_receive_buffer_errors + on: ipv4.udperrors + os: linux freebsd + hosts: * + lookup: sum -1m unaligned absolute of RcvbufErrors + units: errors + every: 10s + warn: $this > 0 + crit: $this > (($status == $CRITICAL) ? (0) : (100)) + info: number of UDP receive buffer errors during the last minute + delay: up 0 down 60m multiplier 1.2 max 2h + to: sysadmin + +# ----------------------------------------------------------------------------- +# UDP send buffer errors + + alarm: 1m_ipv4_udp_send_buffer_errors + on: ipv4.udperrors + os: linux + hosts: * + lookup: sum -1m unaligned absolute of SndbufErrors + units: errors + every: 10s + warn: $this > 0 + crit: $this > (($status == $CRITICAL) ? (0) : (100)) + info: number of UDP send buffer errors during the last minute + delay: up 0 down 60m multiplier 1.2 max 2h + to: sysadmin diff --git a/health/health.d/varnish.conf b/health/health.d/varnish.conf new file mode 100644 index 000000000..cca7446b4 --- /dev/null +++ b/health/health.d/varnish.conf @@ -0,0 +1,9 @@ + alarm: varnish_last_collected + on: varnish.uptime + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + info: number of seconds since the last successful data collection + to: sysadmin diff --git a/health/health.d/web_log.conf b/health/health.d/web_log.conf new file mode 100644 index 000000000..d8be88b47 --- /dev/null +++ b/health/health.d/web_log.conf @@ -0,0 +1,163 @@ + +# make sure we can collect web log data + +template: last_collected_secs + on: web_log.response_codes +families: * + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: webmaster + + +# ----------------------------------------------------------------------------- +# high level response code alarms + +# the following alarms trigger only when there are enough data. +# we assume there are enough data when: +# +# $1m_requests > 120 +# +# i.e. when there are at least 120 requests during the last minute + +template: 1m_requests + on: web_log.response_statuses +families: * + lookup: sum -1m unaligned + calc: ($this == 0)?(1):($this) + units: requests + every: 10s + info: the sum of all HTTP requests over the last minute + +template: 1m_successful + on: web_log.response_statuses +families: * + lookup: sum -1m unaligned of successful_requests + calc: $this * 100 / $1m_requests + units: % + every: 10s + warn: ($1m_requests > 120) ? ($this < (($status >= $WARNING ) ? ( 95 ) : ( 85 )) ) : ( 0 ) + crit: ($1m_requests > 120) ? ($this < (($status == $CRITICAL) ? ( 85 ) : ( 75 )) ) : ( 0 ) + delay: up 2m down 15m multiplier 1.5 max 1h + info: the ratio of successful HTTP responses (1xx, 2xx, 304) over the last minute + to: webmaster + +template: 1m_redirects + on: web_log.response_statuses +families: * + lookup: sum -1m unaligned of redirects + calc: $this * 100 / $1m_requests + units: % + every: 10s + warn: ($1m_requests > 120) ? ($this > (($status >= $WARNING ) ? ( 1 ) : ( 20 )) ) : ( 0 ) + crit: ($1m_requests > 120) ? ($this > (($status == $CRITICAL) ? ( 20 ) : ( 30 )) ) : ( 0 ) + delay: up 2m down 15m multiplier 1.5 max 1h + info: the ratio of HTTP redirects (3xx except 304) over the last minute + to: webmaster + +template: 1m_bad_requests + on: web_log.response_statuses +families: * + lookup: sum -1m unaligned of bad_requests + calc: $this * 100 / $1m_requests + units: % + every: 10s + warn: ($1m_requests > 120) ? ($this > (($status >= $WARNING) ? ( 10 ) : ( 30 )) ) : ( 0 ) + crit: ($1m_requests > 120) ? ($this > (($status == $CRITICAL) ? ( 30 ) : ( 50 )) ) : ( 0 ) + delay: up 2m down 15m multiplier 1.5 max 1h + info: the ratio of HTTP bad requests (4xx) over the last minute + to: webmaster + +template: 1m_internal_errors + on: web_log.response_statuses +families: * + lookup: sum -1m unaligned of server_errors + calc: $this * 100 / $1m_requests + units: % + every: 10s + warn: ($1m_requests > 120) ? ($this > (($status >= $WARNING) ? ( 1 ) : ( 2 )) ) : ( 0 ) + crit: ($1m_requests > 120) ? ($this > (($status == $CRITICAL) ? ( 2 ) : ( 5 )) ) : ( 0 ) + delay: up 2m down 15m multiplier 1.5 max 1h + info: the ratio of HTTP internal server errors (5xx), over the last minute + to: webmaster + + +# ----------------------------------------------------------------------------- +# web slow + +# the following alarms trigger only when there are enough data. +# we assume there are enough data when: +# +# $1m_requests > 120 +# +# i.e. when there are at least 120 requests during the last minute + +template: 10m_response_time + on: web_log.response_time +families: * + lookup: average -10m unaligned of avg + units: ms + every: 30s + info: the average time to respond to HTTP requests, over the last 10 minutes + +template: web_slow + on: web_log.response_time +families: * + lookup: average -1m unaligned of avg + units: ms + every: 10s + green: 500 + red: 1000 + warn: ($1m_requests > 120) ? ($this > $green && $this > ($10m_response_time * 2) ) : ( 0 ) + crit: ($1m_requests > 120) ? ($this > $red && $this > ($10m_response_time * 4) ) : ( 0 ) + delay: down 15m multiplier 1.5 max 1h + info: the average time to respond to HTTP requests, over the last 1 minute + options: no-clear-notification + to: webmaster + +# ----------------------------------------------------------------------------- +# web too many or too few requests + +# the following alarms trigger only when there are enough data. +# we assume there are enough data when: +# +# $5m_successful_old > 120 +# +# i.e. when there were at least 120 requests during the 5 minutes starting +# at -10m and ending at -5m + +template: 5m_successful_old + on: web_log.response_statuses +families: * + lookup: average -5m at -5m unaligned of successful_requests + units: requests/s + every: 30s + info: average rate of successful HTTP requests over the last 5 minutes + +template: 5m_successful + on: web_log.response_statuses +families: * + lookup: average -5m unaligned of successful_requests + units: requests/s + every: 30s + info: average successful HTTP requests over the last 5 minutes + +template: 5m_requests_ratio + on: web_log.response_codes +families: * + calc: ($5m_successful_old > 0)?($5m_successful * 100 / $5m_successful_old):(100) + units: % + every: 30s + warn: ($5m_successful_old > 120) ? ($this > 200 OR $this < 50) : (0) + crit: ($5m_successful_old > 120) ? ($this > 400 OR $this < 25) : (0) + delay: down 15m multiplier 1.5 max 1h +options: no-clear-notification + info: the percentage of successful web requests over the last 5 minutes, \ + compared with the previous 5 minutes \ + (clear notification for this alarm will not be sent) + to: webmaster + diff --git a/health/health.d/zfs.conf b/health/health.d/zfs.conf new file mode 100644 index 000000000..af73824e6 --- /dev/null +++ b/health/health.d/zfs.conf @@ -0,0 +1,10 @@ + + alarm: zfs_memory_throttle + on: zfs.memory_ops + lookup: sum -10m unaligned absolute of throttled + units: events + every: 1m + warn: $this > 0 + delay: down 1h multiplier 1.5 max 2h + info: the number of times ZFS had to limit the ARC growth in the last 10 minutes + to: sysadmin diff --git a/health/health.h b/health/health.h new file mode 100644 index 000000000..ff7a4d9bf --- /dev/null +++ b/health/health.h @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_HEALTH_H +#define NETDATA_HEALTH_H 1 + +#include "../daemon/common.h" + +#define NETDATA_PLUGIN_HOOK_HEALTH \ + { \ + .name = "HEALTH", \ + .config_section = NULL, \ + .config_name = NULL, \ + .enabled = 1, \ + .thread = NULL, \ + .init_routine = NULL, \ + .start_routine = health_main \ + }, + +extern unsigned int default_health_enabled; + +#define HEALTH_ENTRY_FLAG_PROCESSED 0x00000001 +#define HEALTH_ENTRY_FLAG_UPDATED 0x00000002 +#define HEALTH_ENTRY_FLAG_EXEC_RUN 0x00000004 +#define HEALTH_ENTRY_FLAG_EXEC_FAILED 0x00000008 +#define HEALTH_ENTRY_FLAG_SAVED 0x10000000 +#define HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION 0x80000000 + +extern void health_init(void); +extern void *health_main(void *ptr); + +extern void health_reload(void); + +extern int health_variable_lookup(const char *variable, uint32_t hash, RRDCALC *rc, calculated_number *result); +extern void health_alarms2json(RRDHOST *host, BUFFER *wb, int all); +extern void health_alarm_log2json(RRDHOST *host, BUFFER *wb, uint32_t after); + +void health_api_v1_chart_variables2json(RRDSET *st, BUFFER *buf); + +extern int health_alarm_log_open(RRDHOST *host); +extern void health_alarm_log_close(RRDHOST *host); +extern void health_log_rotate(RRDHOST *host); +extern void health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae); +extern ssize_t health_alarm_log_read(RRDHOST *host, FILE *fp, const char *filename); +extern void health_alarm_log_load(RRDHOST *host); + +extern void health_alarm_log( + RRDHOST *host, + uint32_t alarm_id, + uint32_t alarm_event_id, + time_t when, + const char *name, + const char *chart, + const char *family, + const char *exec, + const char *recipient, + time_t duration, + calculated_number old_value, + calculated_number new_value, + RRDCALC_STATUS old_status, + RRDCALC_STATUS new_status, + const char *source, + const char *units, + const char *info, + int delay, + uint32_t flags +); + +extern void health_readdir(RRDHOST *host, const char *user_path, const char *stock_path, const char *subpath); +extern char *health_user_config_dir(void); +extern char *health_stock_config_dir(void); +extern void health_reload_host(RRDHOST *host); +extern void health_alarm_log_free(RRDHOST *host); + +extern void health_alarm_log_free_one_nochecks_nounlink(ALARM_ENTRY *ae); + +#endif //NETDATA_HEALTH_H diff --git a/health/health_config.c b/health/health_config.c new file mode 100644 index 000000000..d4af9776f --- /dev/null +++ b/health/health_config.c @@ -0,0 +1,858 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "health.h" + +#define HEALTH_CONF_MAX_LINE 4096 + +#define HEALTH_ALARM_KEY "alarm" +#define HEALTH_TEMPLATE_KEY "template" +#define HEALTH_ON_KEY "on" +#define HEALTH_HOST_KEY "hosts" +#define HEALTH_OS_KEY "os" +#define HEALTH_FAMILIES_KEY "families" +#define HEALTH_LOOKUP_KEY "lookup" +#define HEALTH_CALC_KEY "calc" +#define HEALTH_EVERY_KEY "every" +#define HEALTH_GREEN_KEY "green" +#define HEALTH_RED_KEY "red" +#define HEALTH_WARN_KEY "warn" +#define HEALTH_CRIT_KEY "crit" +#define HEALTH_EXEC_KEY "exec" +#define HEALTH_RECIPIENT_KEY "to" +#define HEALTH_UNITS_KEY "units" +#define HEALTH_INFO_KEY "info" +#define HEALTH_DELAY_KEY "delay" +#define HEALTH_OPTIONS_KEY "options" + +static inline int rrdcalc_add_alarm_from_config(RRDHOST *host, RRDCALC *rc) { + if(!rc->chart) { + error("Health configuration for alarm '%s' does not have a chart", rc->name); + return 0; + } + + if(!rc->update_every) { + error("Health configuration for alarm '%s.%s' has no frequency (parameter 'every'). Ignoring it.", rc->chart?rc->chart:"NOCHART", rc->name); + return 0; + } + + if(!RRDCALC_HAS_DB_LOOKUP(rc) && !rc->calculation && !rc->warning && !rc->critical) { + error("Health configuration for alarm '%s.%s' is useless (no db lookup, no calculation, no warning and no critical expressions)", rc->chart?rc->chart:"NOCHART", rc->name); + return 0; + } + + if (rrdcalc_exists(host, rc->chart, rc->name, rc->hash_chart, rc->hash)) + return 0; + + rc->id = rrdcalc_get_unique_id(host, rc->chart, rc->name, &rc->next_event_id); + + debug(D_HEALTH, "Health configuration adding alarm '%s.%s' (%u): exec '%s', recipient '%s', green " CALCULATED_NUMBER_FORMAT_AUTO ", red " CALCULATED_NUMBER_FORMAT_AUTO ", lookup: group %d, after %d, before %d, options %u, dimensions '%s', update every %d, calculation '%s', warning '%s', critical '%s', source '%s', delay up %d, delay down %d, delay max %d, delay_multiplier %f", + rc->chart?rc->chart:"NOCHART", + rc->name, + rc->id, + (rc->exec)?rc->exec:"DEFAULT", + (rc->recipient)?rc->recipient:"DEFAULT", + rc->green, + rc->red, + (int)rc->group, + rc->after, + rc->before, + rc->options, + (rc->dimensions)?rc->dimensions:"NONE", + rc->update_every, + (rc->calculation)?rc->calculation->parsed_as:"NONE", + (rc->warning)?rc->warning->parsed_as:"NONE", + (rc->critical)?rc->critical->parsed_as:"NONE", + rc->source, + rc->delay_up_duration, + rc->delay_down_duration, + rc->delay_max_duration, + rc->delay_multiplier + ); + + rrdcalc_create_part2(host, rc); + return 1; +} + +static inline int rrdcalctemplate_add_template_from_config(RRDHOST *host, RRDCALCTEMPLATE *rt) { + if(unlikely(!rt->context)) { + error("Health configuration for template '%s' does not have a context", rt->name); + return 0; + } + + if(unlikely(!rt->update_every)) { + error("Health configuration for template '%s' has no frequency (parameter 'every'). Ignoring it.", rt->name); + return 0; + } + + if(unlikely(!RRDCALCTEMPLATE_HAS_CALCULATION(rt) && !rt->warning && !rt->critical)) { + error("Health configuration for template '%s' is useless (no calculation, no warning and no critical evaluation)", rt->name); + return 0; + } + + RRDCALCTEMPLATE *t, *last = NULL; + for (t = host->templates; t ; last = t, t = t->next) { + if(unlikely(t->hash_name == rt->hash_name + && !strcmp(t->name, rt->name) + && !strcmp(t->family_match?t->family_match:"*", rt->family_match?rt->family_match:"*") + )) { + error("Health configuration template '%s' already exists for host '%s'.", rt->name, host->hostname); + return 0; + } + } + + debug(D_HEALTH, "Health configuration adding template '%s': context '%s', exec '%s', recipient '%s', green " CALCULATED_NUMBER_FORMAT_AUTO ", red " CALCULATED_NUMBER_FORMAT_AUTO ", lookup: group %d, after %d, before %d, options %u, dimensions '%s', update every %d, calculation '%s', warning '%s', critical '%s', source '%s', delay up %d, delay down %d, delay max %d, delay_multiplier %f", + rt->name, + (rt->context)?rt->context:"NONE", + (rt->exec)?rt->exec:"DEFAULT", + (rt->recipient)?rt->recipient:"DEFAULT", + rt->green, + rt->red, + (int)rt->group, + rt->after, + rt->before, + rt->options, + (rt->dimensions)?rt->dimensions:"NONE", + rt->update_every, + (rt->calculation)?rt->calculation->parsed_as:"NONE", + (rt->warning)?rt->warning->parsed_as:"NONE", + (rt->critical)?rt->critical->parsed_as:"NONE", + rt->source, + rt->delay_up_duration, + rt->delay_down_duration, + rt->delay_max_duration, + rt->delay_multiplier + ); + + if(likely(last)) { + last->next = rt; + } + else { + rt->next = host->templates; + host->templates = rt; + } + + return 1; +} + +static inline int health_parse_duration(char *string, int *result) { + // make sure it is a number + if(!*string || !(isdigit(*string) || *string == '+' || *string == '-')) { + *result = 0; + return 0; + } + + char *e = NULL; + calculated_number n = str2ld(string, &e); + if(e && *e) { + switch (*e) { + case 'Y': + *result = (int) (n * 86400 * 365); + break; + case 'M': + *result = (int) (n * 86400 * 30); + break; + case 'w': + *result = (int) (n * 86400 * 7); + break; + case 'd': + *result = (int) (n * 86400); + break; + case 'h': + *result = (int) (n * 3600); + break; + case 'm': + *result = (int) (n * 60); + break; + + default: + case 's': + *result = (int) (n); + break; + } + } + else + *result = (int)(n); + + return 1; +} + +static inline int health_parse_delay( + size_t line, const char *filename, char *string, + int *delay_up_duration, + int *delay_down_duration, + int *delay_max_duration, + float *delay_multiplier) { + + char given_up = 0; + char given_down = 0; + char given_max = 0; + char given_multiplier = 0; + + char *s = string; + while(*s) { + char *key = s; + + while(*s && !isspace(*s)) s++; + while(*s && isspace(*s)) *s++ = '\0'; + + if(!*key) break; + + char *value = s; + while(*s && !isspace(*s)) s++; + while(*s && isspace(*s)) *s++ = '\0'; + + if(!strcasecmp(key, "up")) { + if (!health_parse_duration(value, delay_up_duration)) { + error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", + line, filename, value, key); + } + else given_up = 1; + } + else if(!strcasecmp(key, "down")) { + if (!health_parse_duration(value, delay_down_duration)) { + error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", + line, filename, value, key); + } + else given_down = 1; + } + else if(!strcasecmp(key, "multiplier")) { + *delay_multiplier = strtof(value, NULL); + if(isnan(*delay_multiplier) || isinf(*delay_multiplier) || islessequal(*delay_multiplier, 0)) { + error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", + line, filename, value, key); + } + else given_multiplier = 1; + } + else if(!strcasecmp(key, "max")) { + if (!health_parse_duration(value, delay_max_duration)) { + error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", + line, filename, value, key); + } + else given_max = 1; + } + else { + error("Health configuration at line %zu of file '%s': unknown keyword '%s'", + line, filename, key); + } + } + + if(!given_up) + *delay_up_duration = 0; + + if(!given_down) + *delay_down_duration = 0; + + if(!given_multiplier) + *delay_multiplier = 1.0; + + if(!given_max) { + if((*delay_max_duration) < (*delay_up_duration) * (*delay_multiplier)) + *delay_max_duration = (int)((*delay_up_duration) * (*delay_multiplier)); + + if((*delay_max_duration) < (*delay_down_duration) * (*delay_multiplier)) + *delay_max_duration = (int)((*delay_down_duration) * (*delay_multiplier)); + } + + return 1; +} + +static inline uint32_t health_parse_options(const char *s) { + uint32_t options = 0; + char buf[100+1] = ""; + + while(*s) { + buf[0] = '\0'; + + // skip spaces + while(*s && isspace(*s)) + s++; + + // find the next space + size_t count = 0; + while(*s && count < 100 && !isspace(*s)) + buf[count++] = *s++; + + if(buf[0]) { + buf[count] = '\0'; + + if(!strcasecmp(buf, "no-clear-notification") || !strcasecmp(buf, "no-clear")) + options |= RRDCALC_FLAG_NO_CLEAR_NOTIFICATION; + else + error("Ignoring unknown alarm option '%s'", buf); + } + } + + return options; +} + +static inline int health_parse_db_lookup( + size_t line, const char *filename, char *string, + RRDR_GROUPING *group_method, int *after, int *before, int *every, + uint32_t *options, char **dimensions +) { + debug(D_HEALTH, "Health configuration parsing database lookup %zu@%s: %s", line, filename, string); + + if(*dimensions) freez(*dimensions); + *dimensions = NULL; + *after = 0; + *before = 0; + *every = 0; + *options = 0; + + char *s = string, *key; + + // first is the group method + key = s; + while(*s && !isspace(*s)) s++; + while(*s && isspace(*s)) *s++ = '\0'; + if(!*s) { + error("Health configuration invalid chart calculation at line %zu of file '%s': expected group method followed by the 'after' time, but got '%s'", + line, filename, key); + return 0; + } + + if((*group_method = web_client_api_request_v1_data_group(key, RRDR_GROUPING_UNDEFINED)) == RRDR_GROUPING_UNDEFINED) { + error("Health configuration at line %zu of file '%s': invalid group method '%s'", + line, filename, key); + return 0; + } + + // then is the 'after' time + key = s; + while(*s && !isspace(*s)) s++; + while(*s && isspace(*s)) *s++ = '\0'; + + if(!health_parse_duration(key, after)) { + error("Health configuration at line %zu of file '%s': invalid duration '%s' after group method", + line, filename, key); + return 0; + } + + // sane defaults + *every = abs(*after); + + // now we may have optional parameters + while(*s) { + key = s; + while(*s && !isspace(*s)) s++; + while(*s && isspace(*s)) *s++ = '\0'; + if(!*key) break; + + if(!strcasecmp(key, "at")) { + char *value = s; + while(*s && !isspace(*s)) s++; + while(*s && isspace(*s)) *s++ = '\0'; + + if (!health_parse_duration(value, before)) { + error("Health configuration at line %zu of file '%s': invalid duration '%s' for '%s' keyword", + line, filename, value, key); + } + } + else if(!strcasecmp(key, HEALTH_EVERY_KEY)) { + char *value = s; + while(*s && !isspace(*s)) s++; + while(*s && isspace(*s)) *s++ = '\0'; + + if (!health_parse_duration(value, every)) { + error("Health configuration at line %zu of file '%s': invalid duration '%s' for '%s' keyword", + line, filename, value, key); + } + } + else if(!strcasecmp(key, "absolute") || !strcasecmp(key, "abs") || !strcasecmp(key, "absolute_sum")) { + *options |= RRDR_OPTION_ABSOLUTE; + } + else if(!strcasecmp(key, "min2max")) { + *options |= RRDR_OPTION_MIN2MAX; + } + else if(!strcasecmp(key, "null2zero")) { + *options |= RRDR_OPTION_NULL2ZERO; + } + else if(!strcasecmp(key, "percentage")) { + *options |= RRDR_OPTION_PERCENTAGE; + } + else if(!strcasecmp(key, "unaligned")) { + *options |= RRDR_OPTION_NOT_ALIGNED; + } + else if(!strcasecmp(key, "match-ids") || !strcasecmp(key, "match_ids")) { + *options |= RRDR_OPTION_MATCH_IDS; + } + else if(!strcasecmp(key, "match-names") || !strcasecmp(key, "match_names")) { + *options |= RRDR_OPTION_MATCH_NAMES; + } + else if(!strcasecmp(key, "of")) { + if(*s && strcasecmp(s, "all") != 0) + *dimensions = strdupz(s); + break; + } + else { + error("Health configuration at line %zu of file '%s': unknown keyword '%s'", + line, filename, key); + } + } + + return 1; +} + +static inline char *health_source_file(size_t line, const char *file) { + char buffer[FILENAME_MAX + 1]; + snprintfz(buffer, FILENAME_MAX, "%zu@%s", line, file); + return strdupz(buffer); +} + +static inline void strip_quotes(char *s) { + while(*s) { + if(*s == '\'' || *s == '"') *s = ' '; + s++; + } +} + +static int health_readfile(const char *filename, void *data) { + RRDHOST *host = (RRDHOST *)data; + + debug(D_HEALTH, "Health configuration reading file '%s'", filename); + + static uint32_t + hash_alarm = 0, + hash_template = 0, + hash_os = 0, + hash_on = 0, + hash_host = 0, + hash_families = 0, + hash_calc = 0, + hash_green = 0, + hash_red = 0, + hash_warn = 0, + hash_crit = 0, + hash_exec = 0, + hash_every = 0, + hash_lookup = 0, + hash_units = 0, + hash_info = 0, + hash_recipient = 0, + hash_delay = 0, + hash_options = 0; + + char buffer[HEALTH_CONF_MAX_LINE + 1]; + + if(unlikely(!hash_alarm)) { + hash_alarm = simple_uhash(HEALTH_ALARM_KEY); + hash_template = simple_uhash(HEALTH_TEMPLATE_KEY); + hash_on = simple_uhash(HEALTH_ON_KEY); + hash_os = simple_uhash(HEALTH_OS_KEY); + hash_host = simple_uhash(HEALTH_HOST_KEY); + hash_families = simple_uhash(HEALTH_FAMILIES_KEY); + hash_calc = simple_uhash(HEALTH_CALC_KEY); + hash_lookup = simple_uhash(HEALTH_LOOKUP_KEY); + hash_green = simple_uhash(HEALTH_GREEN_KEY); + hash_red = simple_uhash(HEALTH_RED_KEY); + hash_warn = simple_uhash(HEALTH_WARN_KEY); + hash_crit = simple_uhash(HEALTH_CRIT_KEY); + hash_exec = simple_uhash(HEALTH_EXEC_KEY); + hash_every = simple_uhash(HEALTH_EVERY_KEY); + hash_units = simple_hash(HEALTH_UNITS_KEY); + hash_info = simple_hash(HEALTH_INFO_KEY); + hash_recipient = simple_hash(HEALTH_RECIPIENT_KEY); + hash_delay = simple_uhash(HEALTH_DELAY_KEY); + hash_options = simple_uhash(HEALTH_OPTIONS_KEY); + } + + FILE *fp = fopen(filename, "r"); + if(!fp) { + error("Health configuration cannot read file '%s'.", filename); + return 0; + } + + RRDCALC *rc = NULL; + RRDCALCTEMPLATE *rt = NULL; + + int ignore_this = 0; + size_t line = 0, append = 0; + char *s; + while((s = fgets(&buffer[append], (int)(HEALTH_CONF_MAX_LINE - append), fp)) || append) { + int stop_appending = !s; + line++; + s = trim(buffer); + if(!s || *s == '#') continue; + + append = strlen(s); + if(!stop_appending && s[append - 1] == '\\') { + s[append - 1] = ' '; + append = &s[append] - buffer; + if(append < HEALTH_CONF_MAX_LINE) + continue; + else { + error("Health configuration has too long muli-line at line %zu of file '%s'.", line, filename); + } + } + append = 0; + + char *key = s; + while(*s && *s != ':') s++; + if(!*s) { + error("Health configuration has invalid line %zu of file '%s'. It does not contain a ':'. Ignoring it.", line, filename); + continue; + } + *s = '\0'; + s++; + + char *value = s; + key = trim_all(key); + value = trim_all(value); + + if(!key) { + error("Health configuration has invalid line %zu of file '%s'. Keyword is empty. Ignoring it.", line, filename); + continue; + } + + if(!value) { + error("Health configuration has invalid line %zu of file '%s'. value is empty. Ignoring it.", line, filename); + continue; + } + + uint32_t hash = simple_uhash(key); + + if(hash == hash_alarm && !strcasecmp(key, HEALTH_ALARM_KEY)) { + if (rc && (ignore_this || !rrdcalc_add_alarm_from_config(host, rc))) + rrdcalc_free(rc); + + if(rt) { + if (ignore_this || !rrdcalctemplate_add_template_from_config(host, rt)) + rrdcalctemplate_free(rt); + + rt = NULL; + } + + rc = callocz(1, sizeof(RRDCALC)); + rc->next_event_id = 1; + rc->name = strdupz(value); + rc->hash = simple_hash(rc->name); + rc->source = health_source_file(line, filename); + rc->green = NAN; + rc->red = NAN; + rc->value = NAN; + rc->old_value = NAN; + rc->delay_multiplier = 1.0; + + if(rrdvar_fix_name(rc->name)) + error("Health configuration renamed alarm '%s' to '%s'", value, rc->name); + + ignore_this = 0; + } + else if(hash == hash_template && !strcasecmp(key, HEALTH_TEMPLATE_KEY)) { + if(rc) { + if(ignore_this || !rrdcalc_add_alarm_from_config(host, rc)) + rrdcalc_free(rc); + + rc = NULL; + } + + if(rt && (ignore_this || !rrdcalctemplate_add_template_from_config(host, rt))) + rrdcalctemplate_free(rt); + + rt = callocz(1, sizeof(RRDCALCTEMPLATE)); + rt->name = strdupz(value); + rt->hash_name = simple_hash(rt->name); + rt->source = health_source_file(line, filename); + rt->green = NAN; + rt->red = NAN; + rt->delay_multiplier = 1.0; + + if(rrdvar_fix_name(rt->name)) + error("Health configuration renamed template '%s' to '%s'", value, rt->name); + + ignore_this = 0; + } + else if(hash == hash_os && !strcasecmp(key, HEALTH_OS_KEY)) { + char *os_match = value; + SIMPLE_PATTERN *os_pattern = simple_pattern_create(os_match, NULL, SIMPLE_PATTERN_EXACT); + + if(!simple_pattern_matches(os_pattern, host->os)) { + if(rc) + debug(D_HEALTH, "HEALTH on '%s' ignoring alarm '%s' defined at %zu@%s: host O/S does not match '%s'", host->hostname, rc->name, line, filename, os_match); + + if(rt) + debug(D_HEALTH, "HEALTH on '%s' ignoring template '%s' defined at %zu@%s: host O/S does not match '%s'", host->hostname, rt->name, line, filename, os_match); + + ignore_this = 1; + } + + simple_pattern_free(os_pattern); + } + else if(hash == hash_host && !strcasecmp(key, HEALTH_HOST_KEY)) { + char *host_match = value; + SIMPLE_PATTERN *host_pattern = simple_pattern_create(host_match, NULL, SIMPLE_PATTERN_EXACT); + + if(!simple_pattern_matches(host_pattern, host->hostname)) { + if(rc) + debug(D_HEALTH, "HEALTH on '%s' ignoring alarm '%s' defined at %zu@%s: hostname does not match '%s'", host->hostname, rc->name, line, filename, host_match); + + if(rt) + debug(D_HEALTH, "HEALTH on '%s' ignoring template '%s' defined at %zu@%s: hostname does not match '%s'", host->hostname, rt->name, line, filename, host_match); + + ignore_this = 1; + } + + simple_pattern_free(host_pattern); + } + else if(rc) { + if(hash == hash_on && !strcasecmp(key, HEALTH_ON_KEY)) { + if(rc->chart) { + if(strcmp(rc->chart, value) != 0) + error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rc->name, key, rc->chart, value, value); + + freez(rc->chart); + } + rc->chart = strdupz(value); + rc->hash_chart = simple_hash(rc->chart); + } + else if(hash == hash_lookup && !strcasecmp(key, HEALTH_LOOKUP_KEY)) { + health_parse_db_lookup(line, filename, value, &rc->group, &rc->after, &rc->before, + &rc->update_every, + &rc->options, &rc->dimensions); + } + else if(hash == hash_every && !strcasecmp(key, HEALTH_EVERY_KEY)) { + if(!health_parse_duration(value, &rc->update_every)) + error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' cannot parse duration: '%s'.", + line, filename, rc->name, key, value); + } + else if(hash == hash_green && !strcasecmp(key, HEALTH_GREEN_KEY)) { + char *e; + rc->green = str2ld(value, &e); + if(e && *e) { + error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' leaves this string unmatched: '%s'.", + line, filename, rc->name, key, e); + } + } + else if(hash == hash_red && !strcasecmp(key, HEALTH_RED_KEY)) { + char *e; + rc->red = str2ld(value, &e); + if(e && *e) { + error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' leaves this string unmatched: '%s'.", + line, filename, rc->name, key, e); + } + } + else if(hash == hash_calc && !strcasecmp(key, HEALTH_CALC_KEY)) { + const char *failed_at = NULL; + int error = 0; + rc->calculation = expression_parse(value, &failed_at, &error); + if(!rc->calculation) { + error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", + line, filename, rc->name, key, value, expression_strerror(error), failed_at); + } + } + else if(hash == hash_warn && !strcasecmp(key, HEALTH_WARN_KEY)) { + const char *failed_at = NULL; + int error = 0; + rc->warning = expression_parse(value, &failed_at, &error); + if(!rc->warning) { + error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", + line, filename, rc->name, key, value, expression_strerror(error), failed_at); + } + } + else if(hash == hash_crit && !strcasecmp(key, HEALTH_CRIT_KEY)) { + const char *failed_at = NULL; + int error = 0; + rc->critical = expression_parse(value, &failed_at, &error); + if(!rc->critical) { + error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", + line, filename, rc->name, key, value, expression_strerror(error), failed_at); + } + } + else if(hash == hash_exec && !strcasecmp(key, HEALTH_EXEC_KEY)) { + if(rc->exec) { + if(strcmp(rc->exec, value) != 0) + error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rc->name, key, rc->exec, value, value); + + freez(rc->exec); + } + rc->exec = strdupz(value); + } + else if(hash == hash_recipient && !strcasecmp(key, HEALTH_RECIPIENT_KEY)) { + if(rc->recipient) { + if(strcmp(rc->recipient, value) != 0) + error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rc->name, key, rc->recipient, value, value); + + freez(rc->recipient); + } + rc->recipient = strdupz(value); + } + else if(hash == hash_units && !strcasecmp(key, HEALTH_UNITS_KEY)) { + if(rc->units) { + if(strcmp(rc->units, value) != 0) + error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rc->name, key, rc->units, value, value); + + freez(rc->units); + } + rc->units = strdupz(value); + strip_quotes(rc->units); + } + else if(hash == hash_info && !strcasecmp(key, HEALTH_INFO_KEY)) { + if(rc->info) { + if(strcmp(rc->info, value) != 0) + error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rc->name, key, rc->info, value, value); + + freez(rc->info); + } + rc->info = strdupz(value); + strip_quotes(rc->info); + } + else if(hash == hash_delay && !strcasecmp(key, HEALTH_DELAY_KEY)) { + health_parse_delay(line, filename, value, &rc->delay_up_duration, &rc->delay_down_duration, &rc->delay_max_duration, &rc->delay_multiplier); + } + else if(hash == hash_options && !strcasecmp(key, HEALTH_OPTIONS_KEY)) { + rc->options |= health_parse_options(value); + } + else { + error("Health configuration at line %zu of file '%s' for alarm '%s' has unknown key '%s'.", + line, filename, rc->name, key); + } + } + else if(rt) { + if(hash == hash_on && !strcasecmp(key, HEALTH_ON_KEY)) { + if(rt->context) { + if(strcmp(rt->context, value) != 0) + error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rt->name, key, rt->context, value, value); + + freez(rt->context); + } + rt->context = strdupz(value); + rt->hash_context = simple_hash(rt->context); + } + else if(hash == hash_families && !strcasecmp(key, HEALTH_FAMILIES_KEY)) { + freez(rt->family_match); + simple_pattern_free(rt->family_pattern); + + rt->family_match = strdupz(value); + rt->family_pattern = simple_pattern_create(rt->family_match, NULL, SIMPLE_PATTERN_EXACT); + } + else if(hash == hash_lookup && !strcasecmp(key, HEALTH_LOOKUP_KEY)) { + health_parse_db_lookup(line, filename, value, &rt->group, &rt->after, &rt->before, + &rt->update_every, &rt->options, &rt->dimensions); + } + else if(hash == hash_every && !strcasecmp(key, HEALTH_EVERY_KEY)) { + if(!health_parse_duration(value, &rt->update_every)) + error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' cannot parse duration: '%s'.", + line, filename, rt->name, key, value); + } + else if(hash == hash_green && !strcasecmp(key, HEALTH_GREEN_KEY)) { + char *e; + rt->green = str2ld(value, &e); + if(e && *e) { + error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' leaves this string unmatched: '%s'.", + line, filename, rt->name, key, e); + } + } + else if(hash == hash_red && !strcasecmp(key, HEALTH_RED_KEY)) { + char *e; + rt->red = str2ld(value, &e); + if(e && *e) { + error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' leaves this string unmatched: '%s'.", + line, filename, rt->name, key, e); + } + } + else if(hash == hash_calc && !strcasecmp(key, HEALTH_CALC_KEY)) { + const char *failed_at = NULL; + int error = 0; + rt->calculation = expression_parse(value, &failed_at, &error); + if(!rt->calculation) { + error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", + line, filename, rt->name, key, value, expression_strerror(error), failed_at); + } + } + else if(hash == hash_warn && !strcasecmp(key, HEALTH_WARN_KEY)) { + const char *failed_at = NULL; + int error = 0; + rt->warning = expression_parse(value, &failed_at, &error); + if(!rt->warning) { + error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", + line, filename, rt->name, key, value, expression_strerror(error), failed_at); + } + } + else if(hash == hash_crit && !strcasecmp(key, HEALTH_CRIT_KEY)) { + const char *failed_at = NULL; + int error = 0; + rt->critical = expression_parse(value, &failed_at, &error); + if(!rt->critical) { + error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", + line, filename, rt->name, key, value, expression_strerror(error), failed_at); + } + } + else if(hash == hash_exec && !strcasecmp(key, HEALTH_EXEC_KEY)) { + if(rt->exec) { + if(strcmp(rt->exec, value) != 0) + error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rt->name, key, rt->exec, value, value); + + freez(rt->exec); + } + rt->exec = strdupz(value); + } + else if(hash == hash_recipient && !strcasecmp(key, HEALTH_RECIPIENT_KEY)) { + if(rt->recipient) { + if(strcmp(rt->recipient, value) != 0) + error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rt->name, key, rt->recipient, value, value); + + freez(rt->recipient); + } + rt->recipient = strdupz(value); + } + else if(hash == hash_units && !strcasecmp(key, HEALTH_UNITS_KEY)) { + if(rt->units) { + if(strcmp(rt->units, value) != 0) + error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rt->name, key, rt->units, value, value); + + freez(rt->units); + } + rt->units = strdupz(value); + strip_quotes(rt->units); + } + else if(hash == hash_info && !strcasecmp(key, HEALTH_INFO_KEY)) { + if(rt->info) { + if(strcmp(rt->info, value) != 0) + error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rt->name, key, rt->info, value, value); + + freez(rt->info); + } + rt->info = strdupz(value); + strip_quotes(rt->info); + } + else if(hash == hash_delay && !strcasecmp(key, HEALTH_DELAY_KEY)) { + health_parse_delay(line, filename, value, &rt->delay_up_duration, &rt->delay_down_duration, &rt->delay_max_duration, &rt->delay_multiplier); + } + else if(hash == hash_options && !strcasecmp(key, HEALTH_OPTIONS_KEY)) { + rt->options |= health_parse_options(value); + } + else { + error("Health configuration at line %zu of file '%s' for template '%s' has unknown key '%s'.", + line, filename, rt->name, key); + } + } + else { + error("Health configuration at line %zu of file '%s' has unknown key '%s'. Expected either '" HEALTH_ALARM_KEY "' or '" HEALTH_TEMPLATE_KEY "'.", + line, filename, key); + } + } + + if(rc && (ignore_this || !rrdcalc_add_alarm_from_config(host, rc))) + rrdcalc_free(rc); + + if(rt && (ignore_this || !rrdcalctemplate_add_template_from_config(host, rt))) + rrdcalctemplate_free(rt); + + fclose(fp); + return 1; +} + +void health_readdir(RRDHOST *host, const char *user_path, const char *stock_path, const char *subpath) { + if(unlikely(!host->health_enabled)) return; + recursive_config_double_dir_load(user_path, stock_path, subpath, health_readfile, (void *) host, 0); +} diff --git a/health/health_json.c b/health/health_json.c new file mode 100644 index 000000000..a049dc1b2 --- /dev/null +++ b/health/health_json.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "health.h" + +static inline void health_string2json(BUFFER *wb, const char *prefix, const char *label, const char *value, const char *suffix) { + if(value && *value) { + buffer_sprintf(wb, "%s\"%s\":\"", prefix, label); + buffer_strcat_htmlescape(wb, value); + buffer_strcat(wb, "\""); + buffer_strcat(wb, suffix); + } + else + buffer_sprintf(wb, "%s\"%s\":null%s", prefix, label, suffix); +} + +static inline void health_alarm_entry2json_nolock(BUFFER *wb, ALARM_ENTRY *ae, RRDHOST *host) { + buffer_sprintf(wb, + "\n\t{\n" + "\t\t\"hostname\": \"%s\",\n" + "\t\t\"unique_id\": %u,\n" + "\t\t\"alarm_id\": %u,\n" + "\t\t\"alarm_event_id\": %u,\n" + "\t\t\"name\": \"%s\",\n" + "\t\t\"chart\": \"%s\",\n" + "\t\t\"family\": \"%s\",\n" + "\t\t\"processed\": %s,\n" + "\t\t\"updated\": %s,\n" + "\t\t\"exec_run\": %lu,\n" + "\t\t\"exec_failed\": %s,\n" + "\t\t\"exec\": \"%s\",\n" + "\t\t\"recipient\": \"%s\",\n" + "\t\t\"exec_code\": %d,\n" + "\t\t\"source\": \"%s\",\n" + "\t\t\"units\": \"%s\",\n" + "\t\t\"when\": %lu,\n" + "\t\t\"duration\": %lu,\n" + "\t\t\"non_clear_duration\": %lu,\n" + "\t\t\"status\": \"%s\",\n" + "\t\t\"old_status\": \"%s\",\n" + "\t\t\"delay\": %d,\n" + "\t\t\"delay_up_to_timestamp\": %lu,\n" + "\t\t\"updated_by_id\": %u,\n" + "\t\t\"updates_id\": %u,\n" + "\t\t\"value_string\": \"%s\",\n" + "\t\t\"old_value_string\": \"%s\",\n" + , host->hostname + , ae->unique_id + , ae->alarm_id + , ae->alarm_event_id + , ae->name + , ae->chart + , ae->family + , (ae->flags & HEALTH_ENTRY_FLAG_PROCESSED)?"true":"false" + , (ae->flags & HEALTH_ENTRY_FLAG_UPDATED)?"true":"false" + , (unsigned long)ae->exec_run_timestamp + , (ae->flags & HEALTH_ENTRY_FLAG_EXEC_FAILED)?"true":"false" + , ae->exec?ae->exec:host->health_default_exec + , ae->recipient?ae->recipient:host->health_default_recipient + , ae->exec_code + , ae->source + , ae->units?ae->units:"" + , (unsigned long)ae->when + , (unsigned long)ae->duration + , (unsigned long)ae->non_clear_duration + , rrdcalc_status2string(ae->new_status) + , rrdcalc_status2string(ae->old_status) + , ae->delay + , (unsigned long)ae->delay_up_to_timestamp + , ae->updated_by_id + , ae->updates_id + , ae->new_value_string + , ae->old_value_string + ); + + health_string2json(wb, "\t\t", "info", ae->info?ae->info:"", ",\n"); + + if(unlikely(ae->flags & HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION)) { + buffer_strcat(wb, "\t\t\"no_clear_notification\": true,\n"); + } + + buffer_strcat(wb, "\t\t\"value\":"); + buffer_rrd_value(wb, ae->new_value); + buffer_strcat(wb, ",\n"); + + buffer_strcat(wb, "\t\t\"old_value\":"); + buffer_rrd_value(wb, ae->old_value); + buffer_strcat(wb, "\n"); + + buffer_strcat(wb, "\t}"); +} + +void health_alarm_log2json(RRDHOST *host, BUFFER *wb, uint32_t after) { + netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock); + + buffer_strcat(wb, "["); + + unsigned int max = host->health_log.max; + unsigned int count = 0; + ALARM_ENTRY *ae; + for(ae = host->health_log.alarms; ae && count < max ; count++, ae = ae->next) { + if(ae->unique_id > after) { + if(likely(count)) buffer_strcat(wb, ","); + health_alarm_entry2json_nolock(wb, ae, host); + } + } + + buffer_strcat(wb, "\n]\n"); + + netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock); +} + +static inline void health_rrdcalc2json_nolock(RRDHOST *host, BUFFER *wb, RRDCALC *rc) { + char value_string[100 + 1]; + format_value_and_unit(value_string, 100, rc->value, rc->units, -1); + + buffer_sprintf(wb, + "\t\t\"%s.%s\": {\n" + "\t\t\t\"id\": %lu,\n" + "\t\t\t\"name\": \"%s\",\n" + "\t\t\t\"chart\": \"%s\",\n" + "\t\t\t\"family\": \"%s\",\n" + "\t\t\t\"active\": %s,\n" + "\t\t\t\"exec\": \"%s\",\n" + "\t\t\t\"recipient\": \"%s\",\n" + "\t\t\t\"source\": \"%s\",\n" + "\t\t\t\"units\": \"%s\",\n" + "\t\t\t\"info\": \"%s\",\n" + "\t\t\t\"status\": \"%s\",\n" + "\t\t\t\"last_status_change\": %lu,\n" + "\t\t\t\"last_updated\": %lu,\n" + "\t\t\t\"next_update\": %lu,\n" + "\t\t\t\"update_every\": %d,\n" + "\t\t\t\"delay_up_duration\": %d,\n" + "\t\t\t\"delay_down_duration\": %d,\n" + "\t\t\t\"delay_max_duration\": %d,\n" + "\t\t\t\"delay_multiplier\": %f,\n" + "\t\t\t\"delay\": %d,\n" + "\t\t\t\"delay_up_to_timestamp\": %lu,\n" + "\t\t\t\"value_string\": \"%s\",\n" + , rc->chart, rc->name + , (unsigned long)rc->id + , rc->name + , rc->chart + , (rc->rrdset && rc->rrdset->family)?rc->rrdset->family:"" + , (rc->rrdset)?"true":"false" + , rc->exec?rc->exec:host->health_default_exec + , rc->recipient?rc->recipient:host->health_default_recipient + , rc->source + , rc->units?rc->units:"" + , rc->info?rc->info:"" + , rrdcalc_status2string(rc->status) + , (unsigned long)rc->last_status_change + , (unsigned long)rc->last_updated + , (unsigned long)rc->next_update + , rc->update_every + , rc->delay_up_duration + , rc->delay_down_duration + , rc->delay_max_duration + , rc->delay_multiplier + , rc->delay_last + , (unsigned long)rc->delay_up_to_timestamp + , value_string + ); + + if(unlikely(rc->options & RRDCALC_FLAG_NO_CLEAR_NOTIFICATION)) { + buffer_strcat(wb, "\t\t\t\"no_clear_notification\": true,\n"); + } + + if(RRDCALC_HAS_DB_LOOKUP(rc)) { + if(rc->dimensions && *rc->dimensions) + health_string2json(wb, "\t\t\t", "lookup_dimensions", rc->dimensions, ",\n"); + + buffer_sprintf(wb, + "\t\t\t\"db_after\": %lu,\n" + "\t\t\t\"db_before\": %lu,\n" + "\t\t\t\"lookup_method\": \"%s\",\n" + "\t\t\t\"lookup_after\": %d,\n" + "\t\t\t\"lookup_before\": %d,\n" + "\t\t\t\"lookup_options\": \"", + (unsigned long) rc->db_after, + (unsigned long) rc->db_before, + group_method2string(rc->group), + rc->after, + rc->before + ); + buffer_data_options2string(wb, rc->options); + buffer_strcat(wb, "\",\n"); + } + + if(rc->calculation) { + health_string2json(wb, "\t\t\t", "calc", rc->calculation->source, ",\n"); + health_string2json(wb, "\t\t\t", "calc_parsed", rc->calculation->parsed_as, ",\n"); + } + + if(rc->warning) { + health_string2json(wb, "\t\t\t", "warn", rc->warning->source, ",\n"); + health_string2json(wb, "\t\t\t", "warn_parsed", rc->warning->parsed_as, ",\n"); + } + + if(rc->critical) { + health_string2json(wb, "\t\t\t", "crit", rc->critical->source, ",\n"); + health_string2json(wb, "\t\t\t", "crit_parsed", rc->critical->parsed_as, ",\n"); + } + + buffer_strcat(wb, "\t\t\t\"green\":"); + buffer_rrd_value(wb, rc->green); + buffer_strcat(wb, ",\n"); + + buffer_strcat(wb, "\t\t\t\"red\":"); + buffer_rrd_value(wb, rc->red); + buffer_strcat(wb, ",\n"); + + buffer_strcat(wb, "\t\t\t\"value\":"); + buffer_rrd_value(wb, rc->value); + buffer_strcat(wb, "\n"); + + buffer_strcat(wb, "\t\t}"); +} + +//void health_rrdcalctemplate2json_nolock(BUFFER *wb, RRDCALCTEMPLATE *rt) { +// +//} + +void health_alarms2json(RRDHOST *host, BUFFER *wb, int all) { + int i; + + rrdhost_rdlock(host); + buffer_sprintf(wb, "{\n\t\"hostname\": \"%s\"," + "\n\t\"latest_alarm_log_unique_id\": %u," + "\n\t\"status\": %s," + "\n\t\"now\": %lu," + "\n\t\"alarms\": {\n", + host->hostname, + (host->health_log.next_log_id > 0)?(host->health_log.next_log_id - 1):0, + host->health_enabled?"true":"false", + (unsigned long)now_realtime_sec()); + + RRDCALC *rc; + for(i = 0, rc = host->alarms; rc ; rc = rc->next) { + if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec)) + continue; + + if(likely(!all && !(rc->status == RRDCALC_STATUS_WARNING || rc->status == RRDCALC_STATUS_CRITICAL))) + continue; + + if(likely(i)) buffer_strcat(wb, ",\n"); + health_rrdcalc2json_nolock(host, wb, rc); + i++; + } + +// buffer_strcat(wb, "\n\t},\n\t\"templates\": {"); +// RRDCALCTEMPLATE *rt; +// for(rt = host->templates; rt ; rt = rt->next) +// health_rrdcalctemplate2json_nolock(wb, rt); + + buffer_strcat(wb, "\n\t}\n}\n"); + rrdhost_unlock(host); +} + + + diff --git a/health/health_log.c b/health/health_log.c new file mode 100644 index 000000000..dd51be2af --- /dev/null +++ b/health/health_log.c @@ -0,0 +1,464 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "health.h" + +// ---------------------------------------------------------------------------- +// health alarm log load/save +// no need for locking - only one thread is reading / writing the alarms log + +inline int health_alarm_log_open(RRDHOST *host) { + if(host->health_log_fp) + fclose(host->health_log_fp); + + host->health_log_fp = fopen(host->health_log_filename, "a"); + + if(host->health_log_fp) { + if (setvbuf(host->health_log_fp, NULL, _IOLBF, 0) != 0) + error("HEALTH [%s]: cannot set line buffering on health log file '%s'.", host->hostname, host->health_log_filename); + return 0; + } + + error("HEALTH [%s]: cannot open health log file '%s'. Health data will be lost in case of netdata or server crash.", host->hostname, host->health_log_filename); + return -1; +} + +inline void health_alarm_log_close(RRDHOST *host) { + if(host->health_log_fp) { + fclose(host->health_log_fp); + host->health_log_fp = NULL; + } +} + +inline void health_log_rotate(RRDHOST *host) { + static size_t rotate_every = 0; + + if(unlikely(rotate_every == 0)) { + rotate_every = (size_t)config_get_number(CONFIG_SECTION_HEALTH, "rotate log every lines", 2000); + if(rotate_every < 100) rotate_every = 100; + } + + if(unlikely(host->health_log_entries_written > rotate_every)) { + health_alarm_log_close(host); + + char old_filename[FILENAME_MAX + 1]; + snprintfz(old_filename, FILENAME_MAX, "%s.old", host->health_log_filename); + + if(unlink(old_filename) == -1 && errno != ENOENT) + error("HEALTH [%s]: cannot remove old alarms log file '%s'", host->hostname, old_filename); + + if(link(host->health_log_filename, old_filename) == -1 && errno != ENOENT) + error("HEALTH [%s]: cannot move file '%s' to '%s'.", host->hostname, host->health_log_filename, old_filename); + + if(unlink(host->health_log_filename) == -1 && errno != ENOENT) + error("HEALTH [%s]: cannot remove old alarms log file '%s'", host->hostname, host->health_log_filename); + + // open it with truncate + host->health_log_fp = fopen(host->health_log_filename, "w"); + + if(host->health_log_fp) + fclose(host->health_log_fp); + else + error("HEALTH [%s]: cannot truncate health log '%s'", host->hostname, host->health_log_filename); + + host->health_log_fp = NULL; + + host->health_log_entries_written = 0; + health_alarm_log_open(host); + } +} + +inline void health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae) { + health_log_rotate(host); + + if(likely(host->health_log_fp)) { + if(unlikely(fprintf(host->health_log_fp + , "%c\t%s" + "\t%08x\t%08x\t%08x\t%08x\t%08x" + "\t%08x\t%08x\t%08x" + "\t%08x\t%08x\t%08x" + "\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" + "\t%d\t%d\t%d\t%d" + "\t" CALCULATED_NUMBER_FORMAT_AUTO "\t" CALCULATED_NUMBER_FORMAT_AUTO + "\n" + , (ae->flags & HEALTH_ENTRY_FLAG_SAVED)?'U':'A' + , host->hostname + + , ae->unique_id + , ae->alarm_id + , ae->alarm_event_id + , ae->updated_by_id + , ae->updates_id + + , (uint32_t)ae->when + , (uint32_t)ae->duration + , (uint32_t)ae->non_clear_duration + , (uint32_t)ae->flags + , (uint32_t)ae->exec_run_timestamp + , (uint32_t)ae->delay_up_to_timestamp + + , (ae->name)?ae->name:"" + , (ae->chart)?ae->chart:"" + , (ae->family)?ae->family:"" + , (ae->exec)?ae->exec:"" + , (ae->recipient)?ae->recipient:"" + , (ae->source)?ae->source:"" + , (ae->units)?ae->units:"" + , (ae->info)?ae->info:"" + + , ae->exec_code + , ae->new_status + , ae->old_status + , ae->delay + + , ae->new_value + , ae->old_value + ) < 0)) + error("HEALTH [%s]: failed to save alarm log entry to '%s'. Health data may be lost in case of abnormal restart.", host->hostname, host->health_log_filename); + else { + ae->flags |= HEALTH_ENTRY_FLAG_SAVED; + host->health_log_entries_written++; + } + } +} + +inline ssize_t health_alarm_log_read(RRDHOST *host, FILE *fp, const char *filename) { + errno = 0; + + char *s, *buf = mallocz(65536 + 1); + size_t line = 0, len = 0; + ssize_t loaded = 0, updated = 0, errored = 0, duplicate = 0; + + netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock); + + while((s = fgets_trim_len(buf, 65536, fp, &len))) { + host->health_log_entries_written++; + line++; + + int max_entries = 30, entries = 0; + char *pointers[max_entries]; + + pointers[entries++] = s++; + while(*s) { + if(unlikely(*s == '\t')) { + *s = '\0'; + pointers[entries++] = ++s; + if(entries >= max_entries) { + error("HEALTH [%s]: line %zu of file '%s' has more than %d entries. Ignoring excessive entries.", host->hostname, line, filename, max_entries); + break; + } + } + else s++; + } + + if(likely(*pointers[0] == 'U' || *pointers[0] == 'A')) { + ALARM_ENTRY *ae = NULL; + + if(entries < 26) { + error("HEALTH [%s]: line %zu of file '%s' should have at least 26 entries, but it has %d. Ignoring it.", host->hostname, line, filename, entries); + errored++; + continue; + } + + // check that we have valid ids + uint32_t unique_id = (uint32_t)strtoul(pointers[2], NULL, 16); + if(!unique_id) { + error("HEALTH [%s]: line %zu of file '%s' states alarm entry with invalid unique id %u (%s). Ignoring it.", host->hostname, line, filename, unique_id, pointers[2]); + errored++; + continue; + } + + uint32_t alarm_id = (uint32_t)strtoul(pointers[3], NULL, 16); + if(!alarm_id) { + error("HEALTH [%s]: line %zu of file '%s' states alarm entry for invalid alarm id %u (%s). Ignoring it.", host->hostname, line, filename, alarm_id, pointers[3]); + errored++; + continue; + } + + if(unlikely(*pointers[0] == 'A')) { + // make sure it is properly numbered + if(unlikely(host->health_log.alarms && unique_id < host->health_log.alarms->unique_id)) { + error("HEALTH [%s]: line %zu of file '%s' has alarm log entry %u in wrong order. Ignoring it.", host->hostname, line, filename, unique_id); + errored++; + continue; + } + + ae = callocz(1, sizeof(ALARM_ENTRY)); + } + else if(unlikely(*pointers[0] == 'U')) { + // find the original + for(ae = host->health_log.alarms; ae; ae = ae->next) { + if(unlikely(unique_id == ae->unique_id)) { + if(unlikely(*pointers[0] == 'A')) { + error("HEALTH [%s]: line %zu of file '%s' adds duplicate alarm log entry %u. Using the later." + , host->hostname, line, filename, unique_id); + *pointers[0] = 'U'; + duplicate++; + } + break; + } + else if(unlikely(unique_id > ae->unique_id)) { + // no need to continue + // the linked list is sorted + ae = NULL; + break; + } + } + } + + // if not found, skip this line + if(unlikely(!ae)) { + // error("HEALTH [%s]: line %zu of file '%s' updates alarm log entry with unique id %u, but it is not found.", host->hostname, line, filename, unique_id); + continue; + } + + // check for a possible host missmatch + //if(strcmp(pointers[1], host->hostname)) + // error("HEALTH [%s]: line %zu of file '%s' provides an alarm for host '%s' but this is named '%s'.", host->hostname, line, filename, pointers[1], host->hostname); + + ae->unique_id = unique_id; + ae->alarm_id = alarm_id; + ae->alarm_event_id = (uint32_t)strtoul(pointers[4], NULL, 16); + ae->updated_by_id = (uint32_t)strtoul(pointers[5], NULL, 16); + ae->updates_id = (uint32_t)strtoul(pointers[6], NULL, 16); + + ae->when = (uint32_t)strtoul(pointers[7], NULL, 16); + ae->duration = (uint32_t)strtoul(pointers[8], NULL, 16); + ae->non_clear_duration = (uint32_t)strtoul(pointers[9], NULL, 16); + + ae->flags = (uint32_t)strtoul(pointers[10], NULL, 16); + ae->flags |= HEALTH_ENTRY_FLAG_SAVED; + + ae->exec_run_timestamp = (uint32_t)strtoul(pointers[11], NULL, 16); + ae->delay_up_to_timestamp = (uint32_t)strtoul(pointers[12], NULL, 16); + + freez(ae->name); + ae->name = strdupz(pointers[13]); + ae->hash_name = simple_hash(ae->name); + + freez(ae->chart); + ae->chart = strdupz(pointers[14]); + ae->hash_chart = simple_hash(ae->chart); + + freez(ae->family); + ae->family = strdupz(pointers[15]); + + freez(ae->exec); + ae->exec = strdupz(pointers[16]); + if(!*ae->exec) { freez(ae->exec); ae->exec = NULL; } + + freez(ae->recipient); + ae->recipient = strdupz(pointers[17]); + if(!*ae->recipient) { freez(ae->recipient); ae->recipient = NULL; } + + freez(ae->source); + ae->source = strdupz(pointers[18]); + if(!*ae->source) { freez(ae->source); ae->source = NULL; } + + freez(ae->units); + ae->units = strdupz(pointers[19]); + if(!*ae->units) { freez(ae->units); ae->units = NULL; } + + freez(ae->info); + ae->info = strdupz(pointers[20]); + if(!*ae->info) { freez(ae->info); ae->info = NULL; } + + ae->exec_code = str2i(pointers[21]); + ae->new_status = str2i(pointers[22]); + ae->old_status = str2i(pointers[23]); + ae->delay = str2i(pointers[24]); + + ae->new_value = str2l(pointers[25]); + ae->old_value = str2l(pointers[26]); + + char value_string[100 + 1]; + freez(ae->old_value_string); + freez(ae->new_value_string); + ae->old_value_string = strdupz(format_value_and_unit(value_string, 100, ae->old_value, ae->units, -1)); + ae->new_value_string = strdupz(format_value_and_unit(value_string, 100, ae->new_value, ae->units, -1)); + + // add it to host if not already there + if(unlikely(*pointers[0] == 'A')) { + ae->next = host->health_log.alarms; + host->health_log.alarms = ae; + loaded++; + } + else updated++; + + if(unlikely(ae->unique_id > host->health_max_unique_id)) + host->health_max_unique_id = ae->unique_id; + + if(unlikely(ae->alarm_id >= host->health_max_alarm_id)) + host->health_max_alarm_id = ae->alarm_id; + } + else { + error("HEALTH [%s]: line %zu of file '%s' is invalid (unrecognized entry type '%s').", host->hostname, line, filename, pointers[0]); + errored++; + } + } + + netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock); + + freez(buf); + + if(!host->health_max_unique_id) host->health_max_unique_id = (uint32_t)now_realtime_sec(); + if(!host->health_max_alarm_id) host->health_max_alarm_id = (uint32_t)now_realtime_sec(); + + host->health_log.next_log_id = host->health_max_unique_id + 1; + host->health_log.next_alarm_id = host->health_max_alarm_id + 1; + + debug(D_HEALTH, "HEALTH [%s]: loaded file '%s' with %zd new alarm entries, updated %zd alarms, errors %zd entries, duplicate %zd", host->hostname, filename, loaded, updated, errored, duplicate); + return loaded; +} + +inline void health_alarm_log_load(RRDHOST *host) { + health_alarm_log_close(host); + + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s.old", host->health_log_filename); + FILE *fp = fopen(filename, "r"); + if(!fp) + error("HEALTH [%s]: cannot open health file: %s", host->hostname, filename); + else { + health_alarm_log_read(host, fp, filename); + fclose(fp); + } + + host->health_log_entries_written = 0; + fp = fopen(host->health_log_filename, "r"); + if(!fp) + error("HEALTH [%s]: cannot open health file: %s", host->hostname, host->health_log_filename); + else { + health_alarm_log_read(host, fp, host->health_log_filename); + fclose(fp); + } + + health_alarm_log_open(host); +} + + +// ---------------------------------------------------------------------------- +// health alarm log management + +inline void health_alarm_log( + RRDHOST *host, + uint32_t alarm_id, + uint32_t alarm_event_id, + time_t when, + const char *name, + const char *chart, + const char *family, + const char *exec, + const char *recipient, + time_t duration, + calculated_number old_value, + calculated_number new_value, + RRDCALC_STATUS old_status, + RRDCALC_STATUS new_status, + const char *source, + const char *units, + const char *info, + int delay, + uint32_t flags +) { + debug(D_HEALTH, "Health adding alarm log entry with id: %u", host->health_log.next_log_id); + + ALARM_ENTRY *ae = callocz(1, sizeof(ALARM_ENTRY)); + ae->name = strdupz(name); + ae->hash_name = simple_hash(ae->name); + + if(chart) { + ae->chart = strdupz(chart); + ae->hash_chart = simple_hash(ae->chart); + } + + if(family) + ae->family = strdupz(family); + + if(exec) ae->exec = strdupz(exec); + if(recipient) ae->recipient = strdupz(recipient); + if(source) ae->source = strdupz(source); + if(units) ae->units = strdupz(units); + if(info) ae->info = strdupz(info); + + ae->unique_id = host->health_log.next_log_id++; + ae->alarm_id = alarm_id; + ae->alarm_event_id = alarm_event_id; + ae->when = when; + ae->old_value = old_value; + ae->new_value = new_value; + + char value_string[100 + 1]; + ae->old_value_string = strdupz(format_value_and_unit(value_string, 100, ae->old_value, ae->units, -1)); + ae->new_value_string = strdupz(format_value_and_unit(value_string, 100, ae->new_value, ae->units, -1)); + + ae->old_status = old_status; + ae->new_status = new_status; + ae->duration = duration; + ae->delay = delay; + ae->delay_up_to_timestamp = when + delay; + + ae->flags |= flags; + + if(ae->old_status == RRDCALC_STATUS_WARNING || ae->old_status == RRDCALC_STATUS_CRITICAL) + ae->non_clear_duration += ae->duration; + + // link it + netdata_rwlock_wrlock(&host->health_log.alarm_log_rwlock); + ae->next = host->health_log.alarms; + host->health_log.alarms = ae; + host->health_log.count++; + netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock); + + // match previous alarms + netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock); + ALARM_ENTRY *t; + for(t = host->health_log.alarms ; t ; t = t->next) { + if(t != ae && t->alarm_id == ae->alarm_id) { + if(!(t->flags & HEALTH_ENTRY_FLAG_UPDATED) && !t->updated_by_id) { + t->flags |= HEALTH_ENTRY_FLAG_UPDATED; + t->updated_by_id = ae->unique_id; + ae->updates_id = t->unique_id; + + if((t->new_status == RRDCALC_STATUS_WARNING || t->new_status == RRDCALC_STATUS_CRITICAL) && + (t->old_status == RRDCALC_STATUS_WARNING || t->old_status == RRDCALC_STATUS_CRITICAL)) + ae->non_clear_duration += t->non_clear_duration; + + health_alarm_log_save(host, t); + } + + // no need to continue + break; + } + } + netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock); + + health_alarm_log_save(host, ae); +} + +inline void health_alarm_log_free_one_nochecks_nounlink(ALARM_ENTRY *ae) { + freez(ae->name); + freez(ae->chart); + freez(ae->family); + freez(ae->exec); + freez(ae->recipient); + freez(ae->source); + freez(ae->units); + freez(ae->info); + freez(ae->old_value_string); + freez(ae->new_value_string); + freez(ae); +} + +inline void health_alarm_log_free(RRDHOST *host) { + rrdhost_check_wrlock(host); + + netdata_rwlock_wrlock(&host->health_log.alarm_log_rwlock); + + ALARM_ENTRY *ae; + while((ae = host->health_log.alarms)) { + host->health_log.alarms = ae->next; + health_alarm_log_free_one_nochecks_nounlink(ae); + } + + netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock); +} diff --git a/health/notifications/Makefile.am b/health/notifications/Makefile.am new file mode 100644 index 000000000..a5b88f0de --- /dev/null +++ b/health/notifications/Makefile.am @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +CLEANFILES = \ + alarm-notify.sh \ + $(NULL) + +include $(top_srcdir)/build/subst.inc +SUFFIXES = .in + +dist_libconfig_DATA = \ + health_alarm_notify.conf \ + health_email_recipients.conf \ + $(NULL) + +dist_plugins_SCRIPTS = \ + alarm-notify.sh \ + alarm-email.sh \ + alarm-test.sh \ + $(NULL) + +dist_noinst_DATA = \ + alarm-notify.sh.in \ + README.md \ + $(NULL) + +include alerta/Makefile.inc +include awssns/Makefile.inc +include discord/Makefile.inc +include email/Makefile.inc +include flock/Makefile.inc +include irc/Makefile.inc +include kavenegar/Makefile.inc +include messagebird/Makefile.inc +include pagerduty/Makefile.inc +include pushbullet/Makefile.inc +include pushover/Makefile.inc +include rocketchat/Makefile.inc +include slack/Makefile.inc +include syslog/Makefile.inc +include telegram/Makefile.inc +include twilio/Makefile.inc +include web/Makefile.inc diff --git a/health/notifications/Makefile.in b/health/notifications/Makefile.in new file mode 100644 index 000000000..05a5814fc --- /dev/null +++ b/health/notifications/Makefile.in @@ -0,0 +1,754 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +DIST_COMMON = $(top_srcdir)/build/subst.inc \ + $(srcdir)/alerta/Makefile.inc $(srcdir)/awssns/Makefile.inc \ + $(srcdir)/discord/Makefile.inc $(srcdir)/email/Makefile.inc \ + $(srcdir)/flock/Makefile.inc $(srcdir)/irc/Makefile.inc \ + $(srcdir)/kavenegar/Makefile.inc \ + $(srcdir)/messagebird/Makefile.inc \ + $(srcdir)/pagerduty/Makefile.inc \ + $(srcdir)/pushbullet/Makefile.inc \ + $(srcdir)/pushover/Makefile.inc \ + $(srcdir)/rocketchat/Makefile.inc $(srcdir)/slack/Makefile.inc \ + $(srcdir)/syslog/Makefile.inc $(srcdir)/telegram/Makefile.inc \ + $(srcdir)/twilio/Makefile.inc $(srcdir)/web/Makefile.inc \ + $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_plugins_SCRIPTS) $(dist_libconfig_DATA) \ + $(dist_noinst_DATA) +subdir = health/notifications +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(pluginsdir)" \ + "$(DESTDIR)$(libconfigdir)" +SCRIPTS = $(dist_plugins_SCRIPTS) +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +CLEANFILES = \ + alarm-notify.sh \ + $(NULL) + +SUFFIXES = .in +dist_libconfig_DATA = \ + health_alarm_notify.conf \ + health_email_recipients.conf \ + $(NULL) + +dist_plugins_SCRIPTS = \ + alarm-notify.sh \ + alarm-email.sh \ + alarm-test.sh \ + $(NULL) + + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files + +# install these files +dist_noinst_DATA = alarm-notify.sh.in README.md $(NULL) \ + alerta/README.md alerta/Makefile.inc $(NULL) awssns/README.md \ + awssns/Makefile.inc $(NULL) discord/README.md \ + discord/Makefile.inc $(NULL) email/README.md \ + email/Makefile.inc $(NULL) flock/README.md flock/Makefile.inc \ + $(NULL) irc/README.md irc/Makefile.inc $(NULL) \ + kavenegar/README.md kavenegar/Makefile.inc $(NULL) \ + messagebird/README.md messagebird/Makefile.inc $(NULL) \ + pagerduty/README.md pagerduty/Makefile.inc $(NULL) \ + pushbullet/README.md pushbullet/Makefile.inc $(NULL) \ + pushover/README.md pushover/Makefile.inc $(NULL) \ + rocketchat/README.md rocketchat/Makefile.inc $(NULL) \ + slack/README.md slack/Makefile.inc $(NULL) syslog/README.md \ + syslog/Makefile.inc $(NULL) telegram/README.md \ + telegram/Makefile.inc $(NULL) twilio/README.md \ + twilio/Makefile.inc $(NULL) web/README.md web/Makefile.inc \ + $(NULL) +all: all-am + +.SUFFIXES: +.SUFFIXES: .in +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/alerta/Makefile.inc $(srcdir)/awssns/Makefile.inc $(srcdir)/discord/Makefile.inc $(srcdir)/email/Makefile.inc $(srcdir)/flock/Makefile.inc $(srcdir)/irc/Makefile.inc $(srcdir)/kavenegar/Makefile.inc $(srcdir)/messagebird/Makefile.inc $(srcdir)/pagerduty/Makefile.inc $(srcdir)/pushbullet/Makefile.inc $(srcdir)/pushover/Makefile.inc $(srcdir)/rocketchat/Makefile.inc $(srcdir)/slack/Makefile.inc $(srcdir)/syslog/Makefile.inc $(srcdir)/telegram/Makefile.inc $(srcdir)/twilio/Makefile.inc $(srcdir)/web/Makefile.inc $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu health/notifications/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu health/notifications/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; +$(top_srcdir)/build/subst.inc $(srcdir)/alerta/Makefile.inc $(srcdir)/awssns/Makefile.inc $(srcdir)/discord/Makefile.inc $(srcdir)/email/Makefile.inc $(srcdir)/flock/Makefile.inc $(srcdir)/irc/Makefile.inc $(srcdir)/kavenegar/Makefile.inc $(srcdir)/messagebird/Makefile.inc $(srcdir)/pagerduty/Makefile.inc $(srcdir)/pushbullet/Makefile.inc $(srcdir)/pushover/Makefile.inc $(srcdir)/rocketchat/Makefile.inc $(srcdir)/slack/Makefile.inc $(srcdir)/syslog/Makefile.inc $(srcdir)/telegram/Makefile.inc $(srcdir)/twilio/Makefile.inc $(srcdir)/web/Makefile.inc: + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS) + @$(NORMAL_INSTALL) + @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ + done | \ + sed -e 'p;s,.*/,,;n' \ + -e 'h;s|.*|.|' \ + -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ + $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ + { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ + if ($$2 == $$4) { files[d] = files[d] " " $$1; \ + if (++n[d] == $(am__install_max)) { \ + print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ + else { print "f", d "/" $$4, $$1 } } \ + END { for (d in files) print "f", d, files[d] }' | \ + while read type dir files; do \ + if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ + test -z "$$files" || { \ + echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \ + $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \ + } \ + ; done + +uninstall-dist_pluginsSCRIPTS: + @$(NORMAL_UNINSTALL) + @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \ + files=`for p in $$list; do echo "$$p"; done | \ + sed -e 's,.*/,,;$(transform)'`; \ + dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir) +install-dist_libconfigDATA: $(dist_libconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \ + done + +uninstall-dist_libconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir) +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(SCRIPTS) $(DATA) +installdirs: + for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(libconfigdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-dist_libconfigDATA \ + install-dist_pluginsSCRIPTS + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-dist_libconfigDATA \ + uninstall-dist_pluginsSCRIPTS + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dist_libconfigDATA \ + install-dist_pluginsSCRIPTS install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + installcheck installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am \ + uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS + +.in: + if sed \ + -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \ + -e 's#[@]sbindir_POST@#$(sbindir)#g' \ + -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \ + -e 's#[@]pythondir_POST@#$(pythondir)#g' \ + -e 's#[@]configdir_POST@#$(configdir)#g' \ + -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \ + -e 's#[@]cachedir_POST@#$(cachedir)#g' \ + $< > $@.tmp; then \ + mv "$@.tmp" "$@"; \ + else \ + rm -f "$@.tmp"; \ + false; \ + fi + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/health/notifications/README.md b/health/notifications/README.md new file mode 100644 index 000000000..c06638ade --- /dev/null +++ b/health/notifications/README.md @@ -0,0 +1,60 @@ +# Netdata alarm notifications + +The `exec` line in health configuration defines an external script that will be called once +the alarm is triggered. The default script is **[alarm-notify.sh](alarm-notify.sh.in)**. + +You can change the default script globally by editing `/etc/netdata/netdata.conf`. + +`alarm-notify.sh` is capable of sending notifications: + +- to multiple recipients +- using multiple notification methods +- filtering severity per recipient + +It uses **roles**. For example `sysadmin`, `webmaster`, `dba`, etc. + +Each alarm is assigned to one or more roles, using the `to` line of the alarm configuration. +Then `alarm-notify.sh` uses its own configuration file `/etc/netdata/health_alarm_notify.conf` +the default is [here](health_alarm_notify.conf) +(to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`) +to find the destination address of the notification for each method. + +Each role may have one or more destinations. + +So, for example the `sysadmin` role may send: + +1. emails to admin1@example.com and admin2@example.com +2. pushover.net notifications to USERTOKENS `A`, `B` and `C`. +3. pushbullet.com push notifications to admin1@example.com and admin2@example.com +4. messages to slack.com channel `#alarms` and `#systems`. +5. messages to Discord channels `#alarms` and `#systems`. + +## Configuration + +Edit [`/etc/netdata/health_alarm_notify.conf`](health_alarm_notify.conf) +by running `/etc/netdata/edit-config health_alarm_notify.conf`: + +- settings per notification method: + + all notification methods except email, require some configuration + (i.e. API keys, tokens, destination rooms, channels, etc). + +2. **recipients** per **role** per **notification method** + +## Testing Notifications + +You can run the following command by hand, to test alarms configuration: + +```sh +# become user netdata +su -s /bin/bash netdata + +# enable debugging info on the console +export NETDATA_ALARM_NOTIFY_DEBUG=1 + +# send test alarms to sysadmin +/usr/libexec/netdata/plugins.d/alarm-notify.sh test + +# send test alarms to any role +/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE" +``` diff --git a/health/notifications/alarm-email.sh b/health/notifications/alarm-email.sh new file mode 100755 index 000000000..69c4c3f8d --- /dev/null +++ b/health/notifications/alarm-email.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +# OBSOLETE - REPLACED WITH +# alarm-notify.sh + +${0/alarm-email.sh/alarm-notify.sh} "${@}" diff --git a/health/notifications/alarm-notify.sh b/health/notifications/alarm-notify.sh new file mode 100644 index 000000000..33a59590e --- /dev/null +++ b/health/notifications/alarm-notify.sh @@ -0,0 +1,2378 @@ +#!/usr/bin/env bash + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2017 Costa Tsaousis <costa@tsaousis.gr> +# SPDX-License-Identifier: GPL-3.0-or-later +# +# Script to send alarm notifications for netdata +# +# Features: +# - multiple notification methods +# - multiple roles per alarm +# - multiple recipients per role +# - severity filtering per recipient +# +# Supported notification methods: +# - emails by @ktsaou +# - slack.com notifications by @ktsaou +# - alerta.io notifications by @kattunga +# - discordapp.com notifications by @lowfive +# - pushover.net notifications by @ktsaou +# - pushbullet.com push notifications by Tiago Peralta @tperalta82 #1070 +# - telegram.org notifications by @hashworks #1002 +# - twilio.com notifications by Levi Blaney @shadycuz #1211 +# - kafka notifications by @ktsaou #1342 +# - pagerduty.com notifications by Jim Cooley @jimcooley #1373 +# - messagebird.com notifications by @tech_no_logical #1453 +# - hipchat notifications by @ktsaou #1561 +# - fleep notifications by @Ferroin +# - custom notifications by @ktsaou +# - syslog messages by @Ferroin +# - Microsoft Team notification by @tioumen + +# ----------------------------------------------------------------------------- +# testing notifications + + +if [ \( "${1}" = "test" -o "${2}" = "test" \) -a "${#}" -le 2 ] +then + if [ "${2}" = "test" ] + then + recipient="${1}" + else + recipient="${2}" + fi + + [ -z "${recipient}" ] && recipient="sysadmin" + + id=1 + last="CLEAR" + test_res=0 + for x in "WARNING" "CRITICAL" "CLEAR" + do + echo >&2 + echo >&2 "# SENDING TEST ${x} ALARM TO ROLE: ${recipient}" + + "${0}" "${recipient}" "$(hostname)" 1 1 "${id}" "$(date +%s)" "test_alarm" "test.chart" "test.family" "${x}" "${last}" 100 90 "${0}" 1 $((0 + id)) "units" "this is a test alarm to verify notifications work" "new value" "old value" + if [ $? -ne 0 ] + then + echo >&2 "# FAILED" + test_res=1 + else + echo >&2 "# OK" + fi + + last="${x}" + id=$((id + 1)) + done + + exit $test_res +fi + +export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin" +export LC_ALL=C + +# ----------------------------------------------------------------------------- + +PROGRAM_NAME="$(basename "${0}")" + +logdate() { + date "+%Y-%m-%d %H:%M:%S" +} + +log() { + local status="${1}" + shift + + echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" + +} + +warning() { + log WARNING "${@}" +} + +error() { + log ERROR "${@}" +} + +info() { + log INFO "${@}" +} + +fatal() { + log FATAL "${@}" + exit 1 +} + +debug=${NETDATA_ALARM_NOTIFY_DEBUG-0} +debug() { + [ "${debug}" = "1" ] && log DEBUG "${@}" +} + +docurl() { + if [ -z "${curl}" ] + then + error "\${curl} is unset." + return 1 + fi + + if [ "${debug}" = "1" ] + then + echo >&2 "--- BEGIN curl command ---" + printf >&2 "%q " ${curl} "${@}" + echo >&2 + echo >&2 "--- END curl command ---" + + local out=$(mktemp /tmp/netdata-health-alarm-notify-XXXXXXXX) + local code=$(${curl} ${curl_options} --write-out %{http_code} --output "${out}" --silent --show-error "${@}") + local ret=$? + echo >&2 "--- BEGIN received response ---" + cat >&2 "${out}" + echo >&2 + echo >&2 "--- END received response ---" + echo >&2 "RECEIVED HTTP RESPONSE CODE: ${code}" + rm "${out}" + echo "${code}" + return ${ret} + fi + + ${curl} ${curl_options} --write-out %{http_code} --output /dev/null --silent --show-error "${@}" + return $? +} + +# ----------------------------------------------------------------------------- +# this is to be overwritten by the config file + +custom_sender() { + info "not sending custom notification for ${status} of '${host}.${chart}.${name}'" +} + + +# ----------------------------------------------------------------------------- + +# check for BASH v4+ (required for associative arrays) +[ $(( ${BASH_VERSINFO[0]} )) -lt 4 ] && \ + fatal "BASH version 4 or later is required (this is ${BASH_VERSION})." + +# ----------------------------------------------------------------------------- +# defaults to allow running this script by hand + +[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata" +[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d" +[ -z "${NETDATA_CACHE_DIR}" ] && NETDATA_CACHE_DIR="/usr/local/var/cache/netdata" +[ -z "${NETDATA_REGISTRY_URL}" ] && NETDATA_REGISTRY_URL="https://registry.my-netdata.io" + +# ----------------------------------------------------------------------------- +# parse command line parameters + +roles="${1}" # the roles that should be notified for this event +host="${2}" # the host generated this event +unique_id="${3}" # the unique id of this event +alarm_id="${4}" # the unique id of the alarm that generated this event +event_id="${5}" # the incremental id of the event, for this alarm id +when="${6}" # the timestamp this event occurred +name="${7}" # the name of the alarm, as given in netdata health.d entries +chart="${8}" # the name of the chart (type.id) +family="${9}" # the family of the chart +status="${10}" # the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL +old_status="${11}" # the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL +value="${12}" # the current value of the alarm +old_value="${13}" # the previous value of the alarm +src="${14}" # the line number and file the alarm has been configured +duration="${15}" # the duration in seconds of the previous alarm state +non_clear_duration="${16}" # the total duration in seconds this is/was non-clear +units="${17}" # the units of the value +info="${18}" # a short description of the alarm +value_string="${19}" # friendly value (with units) +old_value_string="${20}" # friendly old value (with units) + +# ----------------------------------------------------------------------------- +# find a suitable hostname to use, if netdata did not supply a hostname + +this_host=$(hostname -s 2>/dev/null) +[ -z "${host}" ] && host="${this_host}" + +# ----------------------------------------------------------------------------- +# screen statuses we don't need to send a notification + +# don't do anything if this is not WARNING, CRITICAL or CLEAR +if [ "${status}" != "WARNING" -a "${status}" != "CRITICAL" -a "${status}" != "CLEAR" ] +then + info "not sending notification for ${status} of '${host}.${chart}.${name}'" + exit 1 +fi + +# don't do anything if this is CLEAR, but it was not WARNING or CRITICAL +if [ "${old_status}" != "WARNING" -a "${old_status}" != "CRITICAL" -a "${status}" = "CLEAR" ] +then + info "not sending notification for ${status} of '${host}.${chart}.${name}' (last status was ${old_status})" + exit 1 +fi + +# ----------------------------------------------------------------------------- +# load configuration + +# By default fetch images from the global public registry. +# This is required by default, since all notification methods need to download +# images via the Internet, and private registries might not be reachable. +# This can be overwritten at the configuration file. +images_base_url="https://registry.my-netdata.io" + +# curl options to use +curl_options="" + +# needed commands +# if empty they will be searched in the system path +curl= +sendmail= + +# enable / disable features +SEND_SLACK="YES" +SEND_MSTEAM="YES" +SEND_ALERTA="YES" +SEND_FLOCK="YES" +SEND_DISCORD="YES" +SEND_PUSHOVER="YES" +SEND_TWILIO="YES" +SEND_HIPCHAT="YES" +SEND_MESSAGEBIRD="YES" +SEND_KAVENEGAR="YES" +SEND_TELEGRAM="YES" +SEND_EMAIL="YES" +SEND_PUSHBULLET="YES" +SEND_KAFKA="YES" +SEND_PD="YES" +SEND_FLEEP="YES" +SEND_IRC="YES" +SEND_AWSSNS="YES" +SEND_SYSLOG="NO" +SEND_CUSTOM="YES" + +# slack configs +SLACK_WEBHOOK_URL= +DEFAULT_RECIPIENT_SLACK= +declare -A role_recipients_slack=() + +# Microsoft Team configs +MSTEAM_WEBHOOK_URL= +DEFAULT_RECIPIENT_MSTEAM= +declare -A role_recipients_msteam=() + +# rocketchat configs +ROCKETCHAT_WEBHOOK_URL= +DEFAULT_RECIPIENT_ROCKETCHAT= +declare -A role_recipients_rocketchat=() + +# alerta configs +ALERTA_WEBHOOK_URL= +ALERTA_API_KEY= +DEFAULT_RECIPIENT_ALERTA= +declare -A role_recipients_alerta=() + +# flock configs +FLOCK_WEBHOOK_URL= +DEFAULT_RECIPIENT_FLOCK= +declare -A role_recipients_flock=() + +# discord configs +DISCORD_WEBHOOK_URL= +DEFAULT_RECIPIENT_DISCORD= +declare -A role_recipients_discord=() + +# pushover configs +PUSHOVER_APP_TOKEN= +DEFAULT_RECIPIENT_PUSHOVER= +declare -A role_recipients_pushover=() + +# pushbullet configs +PUSHBULLET_ACCESS_TOKEN= +PUSHBULLET_SOURCE_DEVICE= +DEFAULT_RECIPIENT_PUSHBULLET= +declare -A role_recipients_pushbullet=() + +# twilio configs +TWILIO_ACCOUNT_SID= +TWILIO_ACCOUNT_TOKEN= +TWILIO_NUMBER= +DEFAULT_RECIPIENT_TWILIO= +declare -A role_recipients_twilio=() + +# hipchat configs +HIPCHAT_SERVER= +HIPCHAT_AUTH_TOKEN= +DEFAULT_RECIPIENT_HIPCHAT= +declare -A role_recipients_hipchat=() + +# messagebird configs +MESSAGEBIRD_ACCESS_KEY= +MESSAGEBIRD_NUMBER= +DEFAULT_RECIPIENT_MESSAGEBIRD= +declare -A role_recipients_messagebird=() + +# kavenegar configs +KAVENEGAR_API_KEY="" +KAVENEGAR_SENDER="" +DEFAULT_RECIPIENT_KAVENEGAR=() +declare -A role_recipients_kavenegar="" + +# telegram configs +TELEGRAM_BOT_TOKEN= +DEFAULT_RECIPIENT_TELEGRAM= +declare -A role_recipients_telegram=() + +# kafka configs +KAFKA_URL= +KAFKA_SENDER_IP= + +# pagerduty.com configs +PD_SERVICE_KEY= +DEFAULT_RECIPIENT_PD= +declare -A role_recipients_pd=() + +# fleep.io configs +FLEEP_SENDER="${host}" +DEFAULT_RECIPIENT_FLEEP= +declare -A role_recipients_fleep=() + +# Amazon SNS configs +DEFAULT_RECIPIENT_AWSSNS= +AWSSNS_MESSAGE_FORMAT= +declare -A role_recipients_awssns=() + +# syslog configs +SYSLOG_FACILITY= +declare -A role_recipients_syslog=() + +# custom configs +DEFAULT_RECIPIENT_CUSTOM= +declare -A role_recipients_custom=() + +# email configs +EMAIL_SENDER= +DEFAULT_RECIPIENT_EMAIL="root" +EMAIL_CHARSET=$(locale charmap 2>/dev/null) +EMAIL_THREADING= +declare -A role_recipients_email=() + +# irc configs +IRC_NICKNAME= +IRC_REALNAME= +DEFAULT_RECIPIENT_IRC= +IRC_NETWORK= +declare -A role_recipients_irc=() + +# load the stock and user configuration files +# these will overwrite the variables above + +for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/health_alarm_notify.conf" "${NETDATA_USER_CONFIG_DIR}/health_alarm_notify.conf" +do + if [ -f "${CONFIG}" ] + then + debug "Loading config file '${CONFIG}'..." + source "${CONFIG}" + [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'." + else + warning "Cannot find file '${CONFIG}'." + fi +done + +# If we didn't autodetect the character set for e-mail and it wasn't +# set by the user, we need to set it to a reasonable default. UTF-8 +# should be correct for almost all modern UNIX systems. +if [ -z ${EMAIL_CHARSET} ] + then + EMAIL_CHARSET="UTF-8" +fi + +# ----------------------------------------------------------------------------- +# filter a recipient based on alarm event severity + +filter_recipient_by_criticality() { + local method="${1}" x="${2}" r s + shift + + r="${x/|*/}" # the recipient + s="${x/*|/}" # the severity required for notifying this recipient + + # no severity filtering for this person + [ "${r}" = "${s}" ] && return 0 + + # the severity is invalid + s="${s^^}" + if [ "${s}" != "CRITICAL" ] + then + error "SEVERITY FILTERING for ${x} VIA ${method}: invalid severity '${s,,}', only 'critical' is supported." + return 0 + fi + + # create the status tracking directory for this user + [ ! -d "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}" ] && \ + mkdir -p "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}" + + case "${status}" in + CRITICAL) + # make sure he will get future notifications for this alarm too + touch "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" + debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: the alarm is CRITICAL (will now receive next status change)" + return 0 + ;; + + WARNING) + if [ -f "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" ] + then + # we do not remove the file, so that he will get future notifications of this alarm + debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: recipient has been notified for this alarm in the past (will still receive next status change)" + return 0 + fi + ;; + + *) + if [ -f "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" ] + then + # remove the file, so that he will only receive notifications for CRITICAL states for this alarm + rm "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" + debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: recipient has been notified for this alarm (will only receive CRITICAL notifications from now on)" + return 0 + fi + ;; + esac + + debug "SEVERITY FILTERING for ${x} VIA ${method}: BLOCK: recipient should not receive this notification" + return 1 +} + +# ----------------------------------------------------------------------------- +# find the recipients' addresses per method + +declare -A arr_slack=() +declare -A arr_msteam=() +declare -A arr_rocketchat=() +declare -A arr_alerta=() +declare -A arr_flock=() +declare -A arr_discord=() +declare -A arr_pushover=() +declare -A arr_pushbullet=() +declare -A arr_twilio=() +declare -A arr_hipchat=() +declare -A arr_telegram=() +declare -A arr_pd=() +declare -A arr_email=() +declare -A arr_custom=() +declare -A arr_messagebird=() +declare -A arr_kavenegar=() +declare -A arr_fleep=() +declare -A arr_irc=() +declare -A arr_syslog=() +declare -A arr_awssns=() + +# netdata may call us with multiple roles, and roles may have multiple but +# overlapping recipients - so, here we find the unique recipients. +for x in ${roles//,/ } +do + # the roles 'silent' and 'disabled' mean: + # don't send a notification for this role + [ "${x}" = "silent" -o "${x}" = "disabled" ] && continue + + # email + a="${role_recipients_email[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_EMAIL}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality email "${r}" && arr_email[${r/|*/}]="1" + done + + # pushover + a="${role_recipients_pushover[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_PUSHOVER}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality pushover "${r}" && arr_pushover[${r/|*/}]="1" + done + + # pushbullet + a="${role_recipients_pushbullet[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_PUSHBULLET}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality pushbullet "${r}" && arr_pushbullet[${r/|*/}]="1" + done + + # twilio + a="${role_recipients_twilio[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_TWILIO}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality twilio "${r}" && arr_twilio[${r/|*/}]="1" + done + + # hipchat + a="${role_recipients_hipchat[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_HIPCHAT}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality hipchat "${r}" && arr_hipchat[${r/|*/}]="1" + done + + # messagebird + a="${role_recipients_messagebird[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_MESSAGEBIRD}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality messagebird "${r}" && arr_messagebird[${r/|*/}]="1" + done + + # kavenegar + a="${role_recipients_kavenegar[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_KAVENEGAR}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality kavenegar "${r}" && arr_kavenegar[${r/|*/}]="1" + done + + # telegram + a="${role_recipients_telegram[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_TELEGRAM}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality telegram "${r}" && arr_telegram[${r/|*/}]="1" + done + + # slack + a="${role_recipients_slack[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_SLACK}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality slack "${r}" && arr_slack[${r/|*/}]="1" + done + + # Microsoft Team + a="${role_recipients_msteam[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_MSTEAM}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality msteam "${r}" && arr_msteam[${r/|*/}]="1" + done + + # rocketchat + a="${role_recipients_rocketchat[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_ROCKETCHAT}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality rocketchat "${r}" && arr_rocketchat[${r/|*/}]="1" + done + + # alerta + a="${role_recipients_alerta[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_ALERTA}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality alerta "${r}" && arr_alerta[${r/|*/}]="1" + done + + # flock + a="${role_recipients_flock[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_FLOCK}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality flock "${r}" && arr_flock[${r/|*/}]="1" + done + + # discord + a="${role_recipients_discord[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_DISCORD}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality discord "${r}" && arr_discord[${r/|*/}]="1" + done + + # pagerduty.com + a="${role_recipients_pd[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_PD}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality pd "${r}" && arr_pd[${r/|*/}]="1" + done + + # fleep.io + a="${role_recipients_fleep[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_FLEEP}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality fleep "${r}" && arr_fleep[${r/|*/}]="1" + done + + # irc + a="${role_recipients_irc[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_IRC}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality irc "${r}" && arr_irc[${r/|*/}]="1" + done + + # amazon sns + a="${role_recipients_awssns[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_AWSSNS}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality awssns "${r}" && arr_awssns[${r/|*/}]="1" + done + + # syslog + a="${role_recipients_syslog[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_SYSLOG}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality syslog "${r}" && arr_syslog[${r/|*/}]="1" + done + + # custom + a="${role_recipients_custom[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_CUSTOM}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality custom "${r}" && arr_custom[${r/|*/}]="1" + done + +done + +# build the list of slack recipients (channels) +to_slack="${!arr_slack[*]}" +[ -z "${to_slack}" ] && SEND_SLACK="NO" + +# build the list of Microsoft team recipients (channels) +to_msteam="${!arr_msteam[*]}" +[ -z "${to_msteam}" ] && SEND_MSTEAM="NO" + +# build the list of rocketchat recipients (channels) +to_rocketchat="${!arr_rocketchat[*]}" +[ -z "${to_rocketchat}" ] && SEND_ROCKETCHAT="NO" + +# build the list of alerta recipients (channels) +to_alerta="${!arr_alerta[*]}" +[ -z "${to_alerta}" ] && SEND_ALERTA="NO" + +# build the list of flock recipients (channels) +to_flock="${!arr_flock[*]}" +[ -z "${to_flock}" ] && SEND_FLOCK="NO" + +# build the list of discord recipients (channels) +to_discord="${!arr_discord[*]}" +[ -z "${to_discord}" ] && SEND_DISCORD="NO" + +# build the list of pushover recipients (user tokens) +to_pushover="${!arr_pushover[*]}" +[ -z "${to_pushover}" ] && SEND_PUSHOVER="NO" + +# build the list of pushbulet recipients (user tokens) +to_pushbullet="${!arr_pushbullet[*]}" +[ -z "${to_pushbullet}" ] && SEND_PUSHBULLET="NO" + +# build the list of twilio recipients (phone numbers) +to_twilio="${!arr_twilio[*]}" +[ -z "${to_twilio}" ] && SEND_TWILIO="NO" + +# build the list of hipchat recipients (rooms) +to_hipchat="${!arr_hipchat[*]}" +[ -z "${to_hipchat}" ] && SEND_HIPCHAT="NO" + +# build the list of messagebird recipients (phone numbers) +to_messagebird="${!arr_messagebird[*]}" +[ -z "${to_messagebird}" ] && SEND_MESSAGEBIRD="NO" + +# build the list of kavenegar recipients (phone numbers) +to_kavenegar="${!arr_kavenegar[*]}" +[ -z "${to_kavenegar}" ] && SEND_KAVENEGAR="NO" + +# check array of telegram recipients (chat ids) +to_telegram="${!arr_telegram[*]}" +[ -z "${to_telegram}" ] && SEND_TELEGRAM="NO" + +# build the list of pagerduty recipients (service keys) +to_pd="${!arr_pd[*]}" +[ -z "${to_pd}" ] && SEND_PD="NO" + +# build the list of fleep recipients (conversation webhooks) +to_fleep="${!arr_fleep[*]}" +[ -z "${to_fleep}" ] && SEND_FLEEP="NO" + +# build the list of custom recipients +to_custom="${!arr_custom[*]}" +[ -z "${to_custom}" ] && SEND_CUSTOM="NO" + +# build the list of email recipients (email addresses) +to_email= +for x in "${!arr_email[@]}" +do + [ ! -z "${to_email}" ] && to_email="${to_email}, " + to_email="${to_email}${x}" +done +[ -z "${to_email}" ] && SEND_EMAIL="NO" + +# build the list of irc recipients (channels) +to_irc="${!arr_irc[*]}" +[ -z "${to_irc}" ] && SEND_IRC="NO" + +# build the list of awssns recipients (facilities, servers, and prefixes) +to_awssns="${!arr_awssns[*]}" +[ -z "${to_awssns}" ] && SEND_AWSSNS="NO" + +# build the list of syslog recipients (facilities, servers, and prefixes) +to_syslog="${!arr_syslog[*]}" +[ -z "${to_syslog}" ] && SEND_SYSLOG="NO" + +# ----------------------------------------------------------------------------- +# verify the delivery methods supported + +# check slack +[ -z "${SLACK_WEBHOOK_URL}" ] && SEND_SLACK="NO" + +# check rocketchat +[ -z "${ROCKETCHAT_WEBHOOK_URL}" ] && SEND_ROCKETCHAT="NO" + +# check alerta +[ -z "${ALERTA_WEBHOOK_URL}" ] && SEND_ALERTA="NO" + +# check flock +[ -z "${FLOCK_WEBHOOK_URL}" ] && SEND_FLOCK="NO" + +# check discord +[ -z "${DISCORD_WEBHOOK_URL}" ] && SEND_DISCORD="NO" + +# check pushover +[ -z "${PUSHOVER_APP_TOKEN}" ] && SEND_PUSHOVER="NO" + +# check pushbullet +[ -z "${PUSHBULLET_ACCESS_TOKEN}" ] && SEND_PUSHBULLET="NO" + +# check twilio +[ -z "${TWILIO_ACCOUNT_TOKEN}" -o -z "${TWILIO_ACCOUNT_SID}" -o -z "${TWILIO_NUMBER}" ] && SEND_TWILIO="NO" + +# check hipchat +[ -z "${HIPCHAT_AUTH_TOKEN}" ] && SEND_HIPCHAT="NO" + +# check messagebird +[ -z "${MESSAGEBIRD_ACCESS_KEY}" -o -z "${MESSAGEBIRD_NUMBER}" ] && SEND_MESSAGEBIRD="NO" + +# check kavenegar +[ -z "${KAVENEGAR_API_KEY}" -o -z "${KAVENEGAR_SENDER}" ] && SEND_KAVENEGAR="NO" + +# check telegram +[ -z "${TELEGRAM_BOT_TOKEN}" ] && SEND_TELEGRAM="NO" + +# check kafka +[ -z "${KAFKA_URL}" -o -z "${KAFKA_SENDER_IP}" ] && SEND_KAFKA="NO" + +# check irc +[ -z "${IRC_NETWORK}" ] && SEND_IRC="NO" + +# check fleep +[ -z "${FLEEP_SERVER}" -o -z "${FLEEP_SENDER}" ] && SEND_FLEEP="NO" + +# check pagerduty.com +# if we need pd-send, check for the pd-send command +# https://www.pagerduty.com/docs/guides/agent-install-guide/ +if [ "${SEND_PD}" = "YES" ] + then + pd_send="$(which pd-send 2>/dev/null || command -v pd-send 2>/dev/null)" + if [ -z "${pd_send}" ] + then + error "Cannot find pd-send command in the system path. Disabling pagerduty.com notifications." + SEND_PD="NO" + fi +fi + +# if we need curl, check for the curl command +if [ \( \ + "${SEND_PUSHOVER}" = "YES" \ + -o "${SEND_SLACK}" = "YES" \ + -o "${SEND_ROCKETCHAT}" = "YES" \ + -o "${SEND_ALERTA}" = "YES" \ + -o "${SEND_FLOCK}" = "YES" \ + -o "${SEND_DISCORD}" = "YES" \ + -o "${SEND_HIPCHAT}" = "YES" \ + -o "${SEND_TWILIO}" = "YES" \ + -o "${SEND_MESSAGEBIRD}" = "YES" \ + -o "${SEND_KAVENEGAR}" = "YES" \ + -o "${SEND_TELEGRAM}" = "YES" \ + -o "${SEND_PUSHBULLET}" = "YES" \ + -o "${SEND_KAFKA}" = "YES" \ + -o "${SEND_FLEEP}" = "YES" \ + -o "${SEND_CUSTOM}" = "YES" \ + -o "${SEND_MSTEAM}" = "YES" \ + \) -a -z "${curl}" ] + then + curl="$(which curl 2>/dev/null || command -v curl 2>/dev/null)" + if [ -z "${curl}" ] + then + error "Cannot find curl command in the system path. Disabling all curl based notifications." + SEND_PUSHOVER="NO" + SEND_PUSHBULLET="NO" + SEND_TELEGRAM="NO" + SEND_SLACK="NO" + SEND_MSTEAM="NO" + SEND_ROCKETCHAT="NO" + SEND_ALERTA="NO" + SEND_FLOCK="NO" + SEND_DISCORD="NO" + SEND_TWILIO="NO" + SEND_HIPCHAT="NO" + SEND_MESSAGEBIRD="NO" + SEND_KAVENEGAR="NO" + SEND_KAFKA="NO" + SEND_FLEEP="NO" + SEND_CUSTOM="NO" + fi +fi + +# if we need sendmail, check for the sendmail command +if [ "${SEND_EMAIL}" = "YES" -a -z "${sendmail}" ] + then + sendmail="$(which sendmail 2>/dev/null || command -v sendmail 2>/dev/null)" + if [ -z "${sendmail}" ] + then + debug "Cannot find sendmail command in the system path. Disabling email notifications." + SEND_EMAIL="NO" + fi +fi + +# if we need logger, check for the logger command +if [ "${SEND_SYSLOG}" = "YES" -a -z "${logger}" ] + then + logger="$(which logger 2>/dev/null || command -v logger 2>/dev/null)" + if [ -z "${logger}" ] + then + debug "Cannot find logger command in the system path. Disabling syslog notifications." + SEND_SYSLOG="NO" + fi +fi + +# if we need aws, check for the aws command +if [ "${SEND_AWSSNS}" = "YES" -a -z "${aws}" ] + then + aws="$(which aws 2>/dev/null || command -v aws 2>/dev/null)" + if [ -z "${aws}" ] + then + debug "Cannot find aws command in the system path. Disabling Amazon SNS notifications." + SEND_AWSSNS="NO" + fi +fi + +# check that we have at least a method enabled +if [ "${SEND_EMAIL}" != "YES" \ + -a "${SEND_PUSHOVER}" != "YES" \ + -a "${SEND_TELEGRAM}" != "YES" \ + -a "${SEND_SLACK}" != "YES" \ + -a "${SEND_ROCKETCHAT}" != "YES" \ + -a "${SEND_ALERTA}" != "YES" \ + -a "${SEND_FLOCK}" != "YES" \ + -a "${SEND_DISCORD}" != "YES" \ + -a "${SEND_TWILIO}" != "YES" \ + -a "${SEND_HIPCHAT}" != "YES" \ + -a "${SEND_MESSAGEBIRD}" != "YES" \ + -a "${SEND_KAVENEGAR}" != "YES" \ + -a "${SEND_PUSHBULLET}" != "YES" \ + -a "${SEND_KAFKA}" != "YES" \ + -a "${SEND_PD}" != "YES" \ + -a "${SEND_FLEEP}" != "YES" \ + -a "${SEND_CUSTOM}" != "YES" \ + -a "${SEND_IRC}" != "YES" \ + -a "${SEND_AWSSNS}" != "YES" \ + -a "${SEND_SYSLOG}" != "YES" \ + -a "${SEND_MSTEAM}" != "YES" \ + ] + then + fatal "All notification methods are disabled. Not sending notification for host '${host}', chart '${chart}' to '${roles}' for '${name}' = '${value}' for status '${status}'." +fi + +# ----------------------------------------------------------------------------- +# get the date the alarm happened + +date=$(date --date=@${when} "${date_format}" 2>/dev/null) +[ -z "${date}" ] && date=$(date "${date_format}" 2>/dev/null) +[ -z "${date}" ] && date=$(date --date=@${when} 2>/dev/null) +[ -z "${date}" ] && date=$(date 2>/dev/null) + +# ---------------------------------------------------------------------------- +# prepare some extra headers if we've been asked to thread e-mails +if [ "${SEND_EMAIL}" == "YES" -a "${EMAIL_THREADING}" == "YES" ] ; then + email_thread_headers="In-Reply-To: <${chart}-${name}@${host}>\nReferences: <${chart}-${name}@${host}>" +else + email_thread_headers= +fi + +# ----------------------------------------------------------------------------- +# function to URL encode a string + +urlencode() { + local string="${1}" strlen encoded pos c o + + strlen=${#string} + for (( pos=0 ; pos<strlen ; pos++ )) + do + c=${string:${pos}:1} + case "${c}" in + [-_.~a-zA-Z0-9]) + o="${c}" + ;; + + *) + printf -v o '%%%02x' "'${c}" + ;; + esac + encoded+="${o}" + done + + REPLY="${encoded}" + echo "${REPLY}" +} + +# ----------------------------------------------------------------------------- +# function to convert a duration in seconds, to a human readable duration +# using DAYS, MINUTES, SECONDS + +duration4human() { + local s="${1}" d=0 h=0 m=0 ds="day" hs="hour" ms="minute" ss="second" ret + d=$(( s / 86400 )) + s=$(( s - (d * 86400) )) + h=$(( s / 3600 )) + s=$(( s - (h * 3600) )) + m=$(( s / 60 )) + s=$(( s - (m * 60) )) + + if [ ${d} -gt 0 ] + then + [ ${m} -ge 30 ] && h=$(( h + 1 )) + [ ${d} -gt 1 ] && ds="days" + [ ${h} -gt 1 ] && hs="hours" + if [ ${h} -gt 0 ] + then + ret="${d} ${ds} and ${h} ${hs}" + else + ret="${d} ${ds}" + fi + elif [ ${h} -gt 0 ] + then + [ ${s} -ge 30 ] && m=$(( m + 1 )) + [ ${h} -gt 1 ] && hs="hours" + [ ${m} -gt 1 ] && ms="minutes" + if [ ${m} -gt 0 ] + then + ret="${h} ${hs} and ${m} ${ms}" + else + ret="${h} ${hs}" + fi + elif [ ${m} -gt 0 ] + then + [ ${m} -gt 1 ] && ms="minutes" + [ ${s} -gt 1 ] && ss="seconds" + if [ ${s} -gt 0 ] + then + ret="${m} ${ms} and ${s} ${ss}" + else + ret="${m} ${ms}" + fi + else + [ ${s} -gt 1 ] && ss="seconds" + ret="${s} ${ss}" + fi + + REPLY="${ret}" + echo "${REPLY}" +} + +# ----------------------------------------------------------------------------- +# email sender + +send_email() { + local ret= opts=() sender_email="${EMAIL_SENDER}" sender_name= + if [ "${SEND_EMAIL}" = "YES" ] + then + + if [ ! -z "${EMAIL_SENDER}" ] + then + if [[ "${EMAIL_SENDER}" =~ ^\".*\"\ \<.*\>$ ]] + then + # the name includes double quotes + sender_email="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1)" + sender_name="$(echo "${EMAIL_SENDER}" | cut -d '"' -f 2)" + elif [[ "${EMAIL_SENDER}" =~ ^\'.*\'\ \<.*\>$ ]] + then + # the name includes single quotes + sender_email="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1)" + sender_name="$(echo "${EMAIL_SENDER}" | cut -d "'" -f 2)" + elif [[ "${EMAIL_SENDER}" =~ ^.*\ \<.*\>$ ]] + then + # the name does not have any quotes + sender_email="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1)" + sender_name="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)" + fi + fi + + [ ! -z "${sender_email}" ] && opts+=(-f "${sender_email}") + [ ! -z "${sender_name}" ] && opts+=(-F "${sender_name}") + + if [ "${debug}" = "1" ] + then + echo >&2 "--- BEGIN sendmail command ---" + printf >&2 "%q " "${sendmail}" -t "${opts[@]}" + echo >&2 + echo >&2 "--- END sendmail command ---" + fi + + "${sendmail}" -t "${opts[@]}" + ret=$? + + if [ ${ret} -eq 0 ] + then + info "sent email notification for: ${host} ${chart}.${name} is ${status} to '${to_email}'" + return 0 + else + error "failed to send email notification for: ${host} ${chart}.${name} is ${status} to '${to_email}' with error code ${ret}." + return 1 + fi + fi + + return 1 +} + +# ----------------------------------------------------------------------------- +# pushover sender + +send_pushover() { + local apptoken="${1}" usertokens="${2}" when="${3}" url="${4}" status="${5}" title="${6}" message="${7}" httpcode sent=0 user priority + + if [ "${SEND_PUSHOVER}" = "YES" -a ! -z "${apptoken}" -a ! -z "${usertokens}" -a ! -z "${title}" -a ! -z "${message}" ] + then + + # https://pushover.net/api + priority=-2 + case "${status}" in + CLEAR) priority=-1;; # low priority: no sound or vibration + WARNING) priority=0;; # normal priority: respect quiet hours + CRITICAL) priority=1;; # high priority: bypass quiet hours + *) priority=-2;; # lowest priority: no notification at all + esac + + for user in ${usertokens} + do + httpcode=$(docurl \ + --form-string "token=${apptoken}" \ + --form-string "user=${user}" \ + --form-string "html=1" \ + --form-string "title=${title}" \ + --form-string "message=${message}" \ + --form-string "timestamp=${when}" \ + --form-string "url=${url}" \ + --form-string "url_title=Open netdata dashboard to view the alarm" \ + --form-string "priority=${priority}" \ + https://api.pushover.net/1/messages.json) + + if [ "${httpcode}" = "200" ] + then + info "sent pushover notification for: ${host} ${chart}.${name} is ${status} to '${user}'" + sent=$((sent + 1)) + else + error "failed to send pushover notification for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + fi + + return 1 +} + +# ----------------------------------------------------------------------------- +# pushbullet sender + +send_pushbullet() { + local userapikey="${1}" source_device="${2}" recipients="${3}" url="${4}" title="${5}" message="${6}" httpcode sent=0 user + if [ "${SEND_PUSHBULLET}" = "YES" -a ! -z "${userapikey}" -a ! -z "${recipients}" -a ! -z "${message}" -a ! -z "${title}" ] + then + #https://docs.pushbullet.com/#create-push + for user in ${recipients} + do + httpcode=$(docurl \ + --header 'Access-Token: '${userapikey}'' \ + --header 'Content-Type: application/json' \ + --data-binary @<(cat <<EOF + {"title": "${title}", + "type": "link", + "email": "${user}", + "body": "$( echo -n ${message})", + "url": "${url}", + "source_device_iden": "${source_device}"} +EOF + ) "https://api.pushbullet.com/v2/pushes" -X POST) + + if [ "${httpcode}" = "200" ] + then + info "sent pushbullet notification for: ${host} ${chart}.${name} is ${status} to '${user}'" + sent=$((sent + 1)) + else + error "failed to send pushbullet notification for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + fi + + return 1 +} + +# ----------------------------------------------------------------------------- +# kafka sender + +send_kafka() { + local httpcode sent=0 + if [ "${SEND_KAFKA}" = "YES" ] + then + httpcode=$(docurl -X POST \ + --data "{host_ip:\"${KAFKA_SENDER_IP}\",when:${when},name:\"${name}\",chart:\"${chart}\",family:\"${family}\",status:\"${status}\",old_status:\"${old_status}\",value:${value},old_value:${old_value},duration:${duration},non_clear_duration:${non_clear_duration},units:\"${units}\",info:\"${info}\"}" \ + "${KAFKA_URL}") + + if [ "${httpcode}" = "204" ] + then + info "sent kafka data for: ${host} ${chart}.${name} is ${status} and ip '${KAFKA_SENDER_IP}'" + sent=$((sent + 1)) + else + error "failed to send kafka data for: ${host} ${chart}.${name} is ${status} and ip '${KAFKA_SENDER_IP}' with HTTP error code ${httpcode}." + fi + + [ ${sent} -gt 0 ] && return 0 + fi + + return 1 +} + +# ----------------------------------------------------------------------------- +# pagerduty.com sender + +send_pd() { + local recipients="${1}" sent=0 + unset t + case ${status} in + CLEAR) t='resolve';; + WARNING) t='trigger';; + CRITICAL) t='trigger';; + esac + + if [ ${SEND_PD} = "YES" -a ! -z "${t}" ] + then + for PD_SERVICE_KEY in ${recipients} + do + d="${status} ${name} = ${value_string} - ${host}, ${family}" + ${pd_send} -k ${PD_SERVICE_KEY} \ + -t ${t} \ + -d "${d}" \ + -i ${host}:${chart}:${name} \ + -f 'info'="${info}" \ + -f 'value_w_units'="${value_string}" \ + -f 'when'="${when}" \ + -f 'duration'="${duration}" \ + -f 'roles'="${roles}" \ + -f 'host'="${host}" \ + -f 'unique_id'="${unique_id}" \ + -f 'alarm_id'="${alarm_id}" \ + -f 'event_id'="${event_id}" \ + -f 'name'="${name}" \ + -f 'chart'="${chart}" \ + -f 'family'="${family}" \ + -f 'status'="${status}" \ + -f 'old_status'="${old_status}" \ + -f 'value'="${value}" \ + -f 'old_value'="${old_value}" \ + -f 'src'="${src}" \ + -f 'non_clear_duration'="${non_clear_duration}" \ + -f 'units'="${units}" + retval=$? + if [ ${retval} -eq 0 ] + then + info "sent pagerduty.com notification for host ${host} ${chart}.${name} using service key ${PD_SERVICE_KEY::-26}....: ${d}" + sent=$((sent + 1)) + else + error "failed to send pagerduty.com notification for ${host} ${chart}.${name} using service key ${PD_SERVICE_KEY::-26}.... (error code ${retval}): ${d}" + fi + done + + [ ${sent} -gt 0 ] && return 0 + fi + + return 1 +} + +# ----------------------------------------------------------------------------- +# twilio sender + +send_twilio() { + local accountsid="${1}" accounttoken="${2}" twilionumber="${3}" recipients="${4}" title="${5}" message="${6}" httpcode sent=0 user + if [ "${SEND_TWILIO}" = "YES" -a ! -z "${accountsid}" -a ! -z "${accounttoken}" -a ! -z "${twilionumber}" -a ! -z "${recipients}" -a ! -z "${message}" -a ! -z "${title}" ] + then + #https://www.twilio.com/packages/labs/code/bash/twilio-sms + for user in ${recipients} + do + httpcode=$(docurl -X POST \ + --data-urlencode "From=${twilionumber}" \ + --data-urlencode "To=${user}" \ + --data-urlencode "Body=${title} ${message}" \ + -u "${accountsid}:${accounttoken}" \ + "https://api.twilio.com/2010-04-01/Accounts/${accountsid}/Messages.json") + + if [ "${httpcode}" = "201" ] + then + info "sent Twilio SMS for: ${host} ${chart}.${name} is ${status} to '${user}'" + sent=$((sent + 1)) + else + error "failed to send Twilio SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + fi + + return 1 +} + + +# ----------------------------------------------------------------------------- +# hipchat sender + +send_hipchat() { + local authtoken="${1}" recipients="${2}" message="${3}" httpcode sent=0 room color sender msg_format notify + + # remove <small></small> from the message + message="${message//<small>/}" + message="${message//<\/small>/}" + + if [ "${SEND_HIPCHAT}" = "YES" -a ! -z "${HIPCHAT_SERVER}" -a ! -z "${authtoken}" -a ! -z "${recipients}" -a ! -z "${message}" ] + then + # A label to be shown in addition to the sender's name + # Valid length range: 0 - 64. + sender="netdata" + + # Valid values: html, text. + # Defaults to 'html'. + msg_format="html" + + # Background color for message. Valid values: yellow, green, red, purple, gray, random. Defaults to 'yellow'. + case "${status}" in + WARNING) color="yellow" ;; + CRITICAL) color="red" ;; + CLEAR) color="green" ;; + *) color="gray" ;; + esac + + # Whether this message should trigger a user notification (change the tab color, play a sound, notify mobile phones, etc). + # Each recipient's notification preferences are taken into account. + # Defaults to false. + notify="true" + + for room in ${recipients} + do + httpcode=$(docurl -X POST \ + -H "Content-type: application/json" \ + -H "Authorization: Bearer ${authtoken}" \ + -d "{\"color\": \"${color}\", \"from\": \"${host}\", \"message_format\": \"${msg_format}\", \"message\": \"${message}\", \"notify\": \"${notify}\"}" \ + "https://${HIPCHAT_SERVER}/v2/room/${room}/notification") + + if [ "${httpcode}" = "204" ] + then + info "sent HipChat notification for: ${host} ${chart}.${name} is ${status} to '${room}'" + sent=$((sent + 1)) + else + error "failed to send HipChat notification for: ${host} ${chart}.${name} is ${status} to '${room}' with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + fi + + return 1 +} + + +# ----------------------------------------------------------------------------- +# messagebird sender + +send_messagebird() { + local accesskey="${1}" messagebirdnumber="${2}" recipients="${3}" title="${4}" message="${5}" httpcode sent=0 user + if [ "${SEND_MESSAGEBIRD}" = "YES" -a ! -z "${accesskey}" -a ! -z "${messagebirdnumber}" -a ! -z "${recipients}" -a ! -z "${message}" -a ! -z "${title}" ] + then + #https://developers.messagebird.com/docs/messaging + for user in ${recipients} + do + httpcode=$(docurl -X POST \ + --data-urlencode "originator=${messagebirdnumber}" \ + --data-urlencode "recipients=${user}" \ + --data-urlencode "body=${title} ${message}" \ + --data-urlencode "datacoding=auto" \ + -H "Authorization: AccessKey ${accesskey}" \ + "https://rest.messagebird.com/messages") + + if [ "${httpcode}" = "201" ] + then + info "sent Messagebird SMS for: ${host} ${chart}.${name} is ${status} to '${user}'" + sent=$((sent + 1)) + else + error "failed to send Messagebird SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + fi + + return 1 +} + +# ----------------------------------------------------------------------------- +# kavenegar sender + +send_kavenegar() { + local API_KEY="${1}" kavenegarsender="${2}" recipients="${3}" title="${4}" message="${5}" httpcode sent=0 user + if [ "${SEND_KAVENEGAR}" = "YES" -a ! -z "${API_KEY}" -a ! -z "${kavenegarsender}" -a ! -z "${recipients}" -a ! -z "${message}" -a ! -z "${title}" ] + then + # http://api.kavenegar.com/v1/{API-KEY}/sms/send.json + for user in ${recipients} + do + httpcode=$(docurl -X POST http://api.kavenegar.com/v1/${API_KEY}/sms/send.json \ + --data-urlencode "sender=${kavenegarsender}" \ + --data-urlencode "receptor=${user}" \ + --data-urlencode "message=${title} ${message}") + + if [ "${httpcode}" = "201" ] + then + info "sent Kavenegar SMS for: ${host} ${chart}.${name} is ${status} to '${user}'" + sent=$((sent + 1)) + else + error "failed to send Kavenegar SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + fi + + return 1 +} + +# ----------------------------------------------------------------------------- +# telegram sender + +send_telegram() { + local bottoken="${1}" chatids="${2}" message="${3}" httpcode sent=0 chatid emoji disableNotification="" + + if [ "${status}" = "CLEAR" ]; then disableNotification="--data-urlencode disable_notification=true"; fi + + case "${status}" in + WARNING) emoji="⚠️" ;; + CRITICAL) emoji="🔴" ;; + CLEAR) emoji="✅" ;; + *) emoji="⚪️" ;; + esac + + if [ "${SEND_TELEGRAM}" = "YES" -a ! -z "${bottoken}" -a ! -z "${chatids}" -a ! -z "${message}" ]; + then + for chatid in ${chatids} + do + # https://core.telegram.org/bots/api#sendmessage + httpcode=$(docurl ${disableNotification} \ + --data-urlencode "parse_mode=HTML" \ + --data-urlencode "disable_web_page_preview=true" \ + --data-urlencode "text=${emoji} ${message}" \ + "https://api.telegram.org/bot${bottoken}/sendMessage?chat_id=${chatid}") + + if [ "${httpcode}" = "200" ] + then + info "sent telegram notification for: ${host} ${chart}.${name} is ${status} to '${chatid}'" + sent=$((sent + 1)) + elif [ "${httpcode}" = "401" ] + then + error "failed to send telegram notification for: ${host} ${chart}.${name} is ${status} to '${chatid}': Wrong bot token." + else + error "failed to send telegram notification for: ${host} ${chart}.${name} is ${status} to '${chatid}' with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + fi + + return 1 +} + +# ----------------------------------------------------------------------------- +# Microsoft Team sender + +send_msteam() { + + local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload + + [ "${SEND_MSTEAM}" != "YES" ] && return 1 + + case "${status}" in + WARNING) icon="${MSTEAM_ICON_WARNING}" && color="${MSTEAM_COLOR_WARNING}";; + CRITICAL) icon="${MSTEAM_ICON_CRITICAL}" && color="${MSTEAM_COLOR_CRITICAL}";; + CLEAR) icon="${MSTEAM_ICON_CLEAR}" && color="${MSTEAM_COLOR_CLEAR}";; + *) icon="${MSTEAM_ICON_DEFAULT}" && color="${MSTEAM_COLOR_DEFAULT}";; + esac + + for channel in ${channels} + do + ## More details are available here regarding the payload syntax options : https://docs.microsoft.com/en-us/outlook/actionable-messages/message-card-reference + ## Online designer : https://acdesignerbeta.azurewebsites.net/ + payload="$(cat <<EOF + { + "@context": "http://schema.org/extensions", + "@type": "MessageCard", + "themeColor": "${color}", + "title": "$icon Alert ${status} from netdata for ${host}", + "text": "${host} ${status_message}, ${chart} (_${family}_), *${alarm}*", + "potentialAction": [ + { + "@type": "OpenUri", + "name": "Netdata", + "targets": [ + { "os": "default", "uri": "${goto_url}" } + ] + } + ] + } +EOF + )" + + # Replacing in the webhook CHANNEL string by the MS Teams channel name from conf file. + webhook="${webhook//CHANNEL/${channel}}" + + httpcode=$(docurl -H "Content-Type: application/json" -d "${payload}" "${webhook}") + + if [ "${httpcode}" = "200" ] + then + info "sent Microsoft team notification for: ${host} ${chart}.${name} is ${status} to '${webhook}'" + sent=$((sent + 1)) + else + error "failed to send Microsoft team notification for: ${host} ${chart}.${name} is ${status} to '${webhook}', with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + + return 1 +} + + +# slack sender + +send_slack() { + local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload + + [ "${SEND_SLACK}" != "YES" ] && return 1 + + case "${status}" in + WARNING) color="warning" ;; + CRITICAL) color="danger" ;; + CLEAR) color="good" ;; + *) color="#777777" ;; + esac + + for channel in ${channels} + do + payload="$(cat <<EOF + { + "channel": "#${channel}", + "username": "netdata on ${host}", + "icon_url": "${images_base_url}/images/seo-performance-128.png", + "text": "${host} ${status_message}, \`${chart}\` (_${family}_), *${alarm}*", + "attachments": [ + { + "fallback": "${alarm} - ${chart} (${family}) - ${info}", + "color": "${color}", + "title": "${alarm}", + "title_link": "${goto_url}", + "text": "${info}", + "fields": [ + { + "title": "${chart}", + "short": true + }, + { + "title": "${family}", + "short": true + } + ], + "thumb_url": "${image}", + "footer": "by <${goto_url}|${this_host}>", + "ts": ${when} + } + ] + } +EOF + )" + + httpcode=$(docurl -X POST --data-urlencode "payload=${payload}" "${webhook}") + if [ "${httpcode}" = "200" ] + then + info "sent slack notification for: ${host} ${chart}.${name} is ${status} to '${channel}'" + sent=$((sent + 1)) + else + error "failed to send slack notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + + return 1 +} + + +# ----------------------------------------------------------------------------- +# rocketchat sender + +send_rocketchat() { + local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload + + [ "${SEND_ROCKETCHAT}" != "YES" ] && return 1 + + case "${status}" in + WARNING) color="warning" ;; + CRITICAL) color="danger" ;; + CLEAR) color="good" ;; + *) color="#777777" ;; + esac + + for channel in ${channels} + do + payload="$(cat <<EOF + { + "channel": "#${channel}", + "alias": "netdata on ${host}", + "avatar": "${images_base_url}/images/seo-performance-128.png", + "text": "${host} ${status_message}, \`${chart}\` (_${family}_), *${alarm}*", + "attachments": [ + { + "color": "${color}", + "title": "${alarm}", + "title_link": "${goto_url}", + "text": "${info}", + "fields": [ + { + "title": "${chart}", + "short": true, + "value": "chart" + }, + { + "title": "${family}", + "short": true, + "value": "family" + } + ], + "thumb_url": "${image}", + "ts": "${when}" + } + ] + } +EOF + )" + + httpcode=$(docurl -X POST --data-urlencode "payload=${payload}" "${webhook}") + if [ "${httpcode}" = "200" ] + then + info "sent rocketchat notification for: ${host} ${chart}.${name} is ${status} to '${channel}'" + sent=$((sent + 1)) + else + error "failed to send rocketchat notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + + return 1 +} + +# ----------------------------------------------------------------------------- +# alerta sender + +send_alerta() { + local webhook="${1}" channels="${2}" httpcode sent=0 channel severity content + + [ "${SEND_ALERTA}" != "YES" ] && return 1 + + case "${status}" in + WARNING) severity="warning" ;; + CRITICAL) severity="critical" ;; + CLEAR) severity="cleared" ;; + *) severity="unknown" ;; + esac + + info=$( echo -n ${info}) + + # the "event" property must be unique and repetible between states to let alerta do automatic correlation using severity value + for channel in ${channels} + do + content="{" + content="$content \"environment\": \"${channel}\"," + content="$content \"service\": [\"${host}\"]," + content="$content \"resource\": \"${host}\"," + content="$content \"event\": \"${name}.${chart} (${family})\"," + content="$content \"severity\": \"${severity}\"," + content="$content \"value\": \"${alarm}\"," + content="$content \"text\": \"${info}\"" + content="$content }" + + + httpcode=$(docurl -X POST "${webhook}/alert" -H "Content-Type: application/json" -H "Authorization: Key $ALERTA_API_KEY" -d "$content" ) + + if [[ "${httpcode}" = "200" || "${httpcode}" = "201" ]] + then + info "sent alerta notification for: ${host} ${chart}.${name} is ${status} to '${channel}'" + sent=$((sent + 1)) + else + error "failed to send alerta notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + + return 1 +} + +# ----------------------------------------------------------------------------- +# flock sender + +send_flock() { + local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload + + [ "${SEND_FLOCK}" != "YES" ] && return 1 + + case "${status}" in + WARNING) color="warning" ;; + CRITICAL) color="danger" ;; + CLEAR) color="good" ;; + *) color="#777777" ;; + esac + + for channel in ${channels} + do + httpcode=$(docurl -X POST "${webhook}" -H "Content-Type: application/json" -d "{ + \"sendAs\": { + \"name\" : \"netdata on ${host}\", + \"profileImage\" : \"${images_base_url}/images/seo-performance-128.png\" + }, + \"text\": \"${host} *${status_message}*\", + \"timestamp\": \"${when}\", + \"attachments\": [ + { + \"description\": \"${chart} (${family}) - ${info}\", + \"color\": \"${color}\", + \"title\": \"${alarm}\", + \"url\": \"${goto_url}\", + \"text\": \"${info}\", + \"views\": { + \"image\": { + \"original\": { \"src\": \"${image}\", \"width\": 400, \"height\": 400 }, + \"thumbnail\": { \"src\": \"${image}\", \"width\": 50, \"height\": 50 }, + \"filename\": \"${image}\" + } + } + } + ] + }" ) + if [ "${httpcode}" = "200" ] + then + info "sent flock notification for: ${host} ${chart}.${name} is ${status} to '${channel}'" + sent=$((sent + 1)) + else + error "failed to send flock notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + + return 1 +} + +# ----------------------------------------------------------------------------- +# discord sender + +send_discord() { + local webhook="${1}/slack" channels="${2}" httpcode sent=0 channel color payload username + + [ "${SEND_DISCORD}" != "YES" ] && return 1 + + case "${status}" in + WARNING) color="warning" ;; + CRITICAL) color="danger" ;; + CLEAR) color="good" ;; + *) color="#777777" ;; + esac + + for channel in ${channels} + do + username="netdata on ${host}" + [ ${#username} -gt 32 ] && username="${username:0:29}..." + + payload="$(cat <<EOF + { + "channel": "#${channel}", + "username": "${username}", + "text": "${host} ${status_message}, \`${chart}\` (_${family}_), *${alarm}*", + "icon_url": "${images_base_url}/images/seo-performance-128.png", + "attachments": [ + { + "color": "${color}", + "title": "${alarm}", + "title_link": "${goto_url}", + "text": "${info}", + "fields": [ + { + "title": "${chart}", + "value": "${family}" + } + ], + "thumb_url": "${image}", + "footer_icon": "${images_base_url}/images/seo-performance-128.png", + "footer": "${this_host}", + "ts": ${when} + } + ] + } +EOF + )" + + httpcode=$(docurl -X POST --data-urlencode "payload=${payload}" "${webhook}") + if [ "${httpcode}" = "200" ] + then + info "sent discord notification for: ${host} ${chart}.${name} is ${status} to '${channel}'" + sent=$((sent + 1)) + else + error "failed to send discord notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + + return 1 +} + +# ----------------------------------------------------------------------------- +# fleep sender + +send_fleep() { + local httpcode sent=0 webhooks="${1}" data message + if [ "${SEND_FLEEP}" = "YES" ] ; then + message="${host} ${status_message}, \`${chart}\` (${family}), *${alarm}*\\n${info}" + + for hook in "${webhooks}" ; do + data="{ " + data="${data} 'message': '${message}', " + data="${data} 'user': '${FLEEP_SENDER}' " + data="${data} }" + + httpcode=$(docurl -X POST --data "${data}" "https://fleep.io/hook/${hook}") + + if [ "${httpcode}" = "200" ] ; then + info "sent fleep data for: ${host} ${chart}.${name} is ${status} and user '${FLEEP_SENDER}'" + sent=$((sent + 1)) + else + error "failed to send fleep data for: ${host} ${chart}.${name} is ${status} and user '${FLEEP_SENDER}' with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + fi + + return 1 +} + +# ----------------------------------------------------------------------------- +# irc sender + +send_irc() { + local NICKNAME="${1}" REALNAME="${2}" CHANNELS="${3}" NETWORK="${4}" SERVERNAME="${5}" MESSAGE="${6}" sent=0 channel color send_alarm reply_codes error + + if [ "${SEND_IRC}" = "YES" -a ! -z "${NICKNAME}" -a ! -z "${REALNAME}" -a ! -z "${CHANNELS}" -a ! -z "${NETWORK}" -a ! -z "${SERVERNAME}" ] + then + case "${status}" in + WARNING) color="warning" ;; + CRITICAL) color="danger" ;; + CLEAR) color="good" ;; + *) color="#777777" ;; + esac + + for CHANNEL in ${CHANNELS} + do + error=0 + send_alarm=$(echo -e "USER ${NICKNAME} guest ${REALNAME} ${SERVERNAME}\nNICK ${NICKNAME}\nJOIN ${CHANNEL}\nPRIVMSG ${CHANNEL} :${MESSAGE}\nQUIT\n" \ | nc ${NETWORK} 6667) + reply_codes=$(echo ${send_alarm} | cut -d ' ' -f 2 | grep -o '[0-9]*') + for code in ${reply_codes} + do + [ "${code}" -ge 400 -a "${code}" -le 599 ] && error=1 && break + done + + if [ "${error}" -eq 0 ] + then + info "sent irc notification for: ${host} ${chart}.${name} is ${status} to '${CHANNEL}'" + sent=$((sent + 1)) + else + error "failed to send irc notification for: ${host} ${chart}.${name} is ${status} to '${CHANNEL}', with error code ${code}." + fi + done + fi + + [ ${sent} -gt 0 ] && return 0 + + return 1 +} + +# ----------------------------------------------------------------------------- +# Amazon SNS sender + +send_awssns() { + local targets="${1}" message='' sent=0 region='' + local default_format="${status} on ${host} at ${date}: ${chart} ${value_string}" + + [ "${SEND_AWSSNS}" = "YES" ] || return 1 + + message=${AWSSNS_MESSAGE_FORMAT:-${default_format}} + + for target in ${targets} ; do + # Extract the region from the target ARN. We need to explicitly specify the region so that it matches up correctly. + region="$(echo ${target} | cut -f 4 -d ':')" + ${aws} sns publish --region "${region}" --subject "${host} ${status_message} - ${name//_/ } - ${chart}" --message "${message}" --target-arn ${target} &>/dev/null + if [ $? = 0 ]; then + info "sent Amazon SNS notification for: ${host} ${chart}.${name} is ${status} to '${target}'" + sent=$((sent + 1)) + else + error "failed to send Amazon SNS notification for: ${host} ${chart}.${name} is ${status} to '${target}'" + fi + done + + [ ${sent} -gt 0 ] && return 0 + + return 1 +} + +# ----------------------------------------------------------------------------- +# syslog sender + +send_syslog() { + local facility=${SYSLOG_FACILITY:-"local6"} level='info' targets="${1}" + local priority='' message='' host='' port='' prefix='' + local temp1='' temp2='' + + [ "${SEND_SYSLOG}" = "YES" ] || return 1 + + if [ "${status}" = "CRITICAL" ] ; then + level='crit' + elif [ "${status}" = "WARNING" ] ; then + level='warning' + fi + + for target in ${targets} ; do + priority="${facility}.${level}" + message='' + host='' + port='' + prefix='' + temp1='' + temp2='' + + prefix=$(echo ${target} | cut -d '/' -f 2) + temp1=$(echo ${target} | cut -d '/' -f 1) + + if [ ${prefix} != ${temp1} ] ; then + if (echo ${temp1} | grep -q '@' ) ; then + temp2=$(echo ${temp1} | cut -d '@' -f 1) + host=$(echo ${temp1} | cut -d '@' -f 2) + + if [ ${temp2} != ${host} ] ; then + priority=${temp2} + fi + + port=$(echo ${host} | rev | cut -d ':' -f 1 | rev) + + if ( echo ${host} | grep -E -q '\[.*\]' ) ; then + if ( echo ${port} | grep -q ']' ) ; then + port='' + else + host=$(echo ${host} | rev | cut -d ':' -f 2- | rev) + fi + else + if [ ${port} = ${host} ] ; then + port='' + else + host=$(echo ${host} | cut -d ':' -f 1) + fi + fi + else + priority=${temp1} + fi + fi + + message="${prefix} ${status} on ${host} at ${date}: ${chart} ${value_string}" + + if [ ${host} ] ; then + logger_options="${logger_options} -n ${host}" + if [ ${port} ] ; then + logger_options="${logger_options} -P ${port}" + fi + fi + + ${logger} -p ${priority} ${logger_options} "${message}" + done + + return $? +} + + +# ----------------------------------------------------------------------------- +# prepare the content of the notification + +# the url to send the user on click +urlencode "${host}" >/dev/null; url_host="${REPLY}" +urlencode "${chart}" >/dev/null; url_chart="${REPLY}" +urlencode "${family}" >/dev/null; url_family="${REPLY}" +urlencode "${name}" >/dev/null; url_name="${REPLY}" +goto_url="${NETDATA_REGISTRY_URL}/goto-host-from-alarm.html?host=${url_host}&chart=${url_chart}&family=${url_family}&alarm=${url_name}&alarm_unique_id=${unique_id}&alarm_id=${alarm_id}&alarm_event_id=${event_id}" + +# the severity of the alarm +severity="${status}" + +# the time the alarm was raised +duration4human ${duration} >/dev/null; duration_txt="${REPLY}" +duration4human ${non_clear_duration} >/dev/null; non_clear_duration_txt="${REPLY}" +raised_for="(was ${old_status,,} for ${duration_txt})" + +# the key status message +status_message="status unknown" + +# the color of the alarm +color="grey" + +# the alarm value +alarm="${name//_/ } = ${value_string}" + +# the image of the alarm +image="${images_base_url}/images/seo-performance-128.png" + +# prepare the title based on status +case "${status}" in + CRITICAL) + image="${images_base_url}/images/alert-128-red.png" + status_message="is critical" + color="#ca414b" + ;; + + WARNING) + image="${images_base_url}/images/alert-128-orange.png" + status_message="needs attention" + color="#ffc107" + ;; + + CLEAR) + image="${images_base_url}/images/check-mark-2-128-green.png" + status_message="recovered" + color="#77ca6d" + ;; +esac + +if [ "${status}" = "CLEAR" ] +then + severity="Recovered from ${old_status}" + if [ ${non_clear_duration} -gt ${duration} ] + then + raised_for="(alarm was raised for ${non_clear_duration_txt})" + fi + + # don't show the value when the status is CLEAR + # for certain alarms, this value might not have any meaning + alarm="${name//_/ } ${raised_for}" + +elif [ "${old_status}" = "WARNING" -a "${status}" = "CRITICAL" ] +then + severity="Escalated to ${status}" + if [ ${non_clear_duration} -gt ${duration} ] + then + raised_for="(alarm is raised for ${non_clear_duration_txt})" + fi + +elif [ "${old_status}" = "CRITICAL" -a "${status}" = "WARNING" ] +then + severity="Demoted to ${status}" + if [ ${non_clear_duration} -gt ${duration} ] + then + raised_for="(alarm is raised for ${non_clear_duration_txt})" + fi + +else + raised_for= +fi + +# prepare HTML versions of elements +info_html= +[ ! -z "${info}" ] && info_html=" <small><br/>${info}</small>" + +raised_for_html= +[ ! -z "${raised_for}" ] && raised_for_html="<br/><small>${raised_for}</small>" + +# ----------------------------------------------------------------------------- +# send the slack notification + +# slack aggregates posts from the same username +# so we use "${host} ${status}" as the bot username, to make them diff + +send_slack "${SLACK_WEBHOOK_URL}" "${to_slack}" +SENT_SLACK=$? + +# ----------------------------------------------------------------------------- +# send the Microsoft notification + +# Microsoft team aggregates posts from the same username +# so we use "${host} ${status}" as the bot username, to make them diff + +send_msteam "${MSTEAM_WEBHOOK_URL}" "${to_msteam}" +SENT_MSTEAM=$? + +# ----------------------------------------------------------------------------- +# send the rocketchat notification + +# rocketchat aggregates posts from the same username +# so we use "${host} ${status}" as the bot username, to make them diff + +send_rocketchat "${ROCKETCHAT_WEBHOOK_URL}" "${to_rocketchat}" +SENT_ROCKETCHAT=$? + +# ----------------------------------------------------------------------------- +# send the alerta notification + +# alerta aggregates posts from the same username +# so we use "${host} ${status}" as the bot username, to make them diff + +send_alerta "${ALERTA_WEBHOOK_URL}" "${to_alerta}" +SENT_ALERTA=$? + +# ----------------------------------------------------------------------------- +# send the flock notification + +# flock aggregates posts from the same username +# so we use "${host} ${status}" as the bot username, to make them diff + +send_flock "${FLOCK_WEBHOOK_URL}" "${to_flock}" +SENT_FLOCK=$? + +# ----------------------------------------------------------------------------- +# send the discord notification + +# discord aggregates posts from the same username +# so we use "${host} ${status}" as the bot username, to make them diff + +send_discord "${DISCORD_WEBHOOK_URL}" "${to_discord}" +SENT_DISCORD=$? + +# ----------------------------------------------------------------------------- +# send the pushover notification + +send_pushover "${PUSHOVER_APP_TOKEN}" "${to_pushover}" "${when}" "${goto_url}" "${status}" "${host} ${status_message} - ${name//_/ } - ${chart}" " +<font color=\"${color}\"><b>${alarm}</b></font>${info_html}<br/>  +<small><b>${chart}</b><br/>Chart<br/> </small> +<small><b>${family}</b><br/>Family<br/> </small> +<small><b>${severity}</b><br/>Severity<br/> </small> +<small><b>${date}${raised_for_html}</b><br/>Time<br/> </small> +<a href=\"${goto_url}\">View Netdata</a><br/>  +<small><small>The source of this alarm is line ${src}</small></small> +" + +SENT_PUSHOVER=$? + +# ----------------------------------------------------------------------------- +# send the pushbullet notification + +send_pushbullet "${PUSHBULLET_ACCESS_TOKEN}" "${PUSHBULLET_SOURCE_DEVICE}" "${to_pushbullet}" "${goto_url}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm}\n +Severity: ${severity}\n +Chart: ${chart}\n +Family: ${family}\n +$(date -d @${when})\n +The source of this alarm is line ${src}" + +SENT_PUSHBULLET=$? + +# ----------------------------------------------------------------------------- +# send the twilio SMS + +send_twilio "${TWILIO_ACCOUNT_SID}" "${TWILIO_ACCOUNT_TOKEN}" "${TWILIO_NUMBER}" "${to_twilio}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm} +Severity: ${severity} +Chart: ${chart} +Family: ${family} +${info}" + +SENT_TWILIO=$? + +# ----------------------------------------------------------------------------- +# send the messagebird SMS + +send_messagebird "${MESSAGEBIRD_ACCESS_KEY}" "${MESSAGEBIRD_NUMBER}" "${to_messagebird}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm} +Severity: ${severity} +Chart: ${chart} +Family: ${family} +${info}" + +SENT_MESSAGEBIRD=$? + + +# ----------------------------------------------------------------------------- +# send the kavenegar SMS + +send_kavenegar "${KAVENEGAR_API_KEY}" "${KAVENEGAR_SENDER}" "${to_kavenegar}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm} +Severity: ${severity} +Chart: ${chart} +Family: ${family} +${info}" + +SENT_KAVENEGAR=$? + + +# ----------------------------------------------------------------------------- +# send the telegram.org message + +# https://core.telegram.org/bots/api#formatting-options +send_telegram "${TELEGRAM_BOT_TOKEN}" "${to_telegram}" "${host} ${status_message} - <b>${name//_/ }</b> +${chart} (${family}) +<a href=\"${goto_url}\">${alarm}</a> +<i>${info}</i>" + +SENT_TELEGRAM=$? + + +# ----------------------------------------------------------------------------- +# send the kafka message + +send_kafka +SENT_KAFKA=$? + + +# ----------------------------------------------------------------------------- +# send the pagerduty.com message + +send_pd "${to_pd}" +SENT_PD=$? + +# ----------------------------------------------------------------------------- +# send the fleep message + +send_fleep "${to_fleep}" +SENT_FLEEP=$? + +# ----------------------------------------------------------------------------- +# send the irc message + +send_irc "${IRC_NICKNAME}" "${IRC_REALNAME}" "${to_irc}" "${IRC_NETWORK}" "${host}" "${host} ${status_message} - ${name//_/ } - ${chart} ----- ${alarm} +Severity: ${severity} +Chart: ${chart} +Family: ${family} +${info}" + +SENT_IRC=$? + +# ----------------------------------------------------------------------------- +# send the custom message + +send_custom() { + # is it enabled? + [ "${SEND_CUSTOM}" != "YES" ] && return 1 + + # do we have any sender? + [ -z "${1}" ] && return 1 + + # call the custom_sender function + custom_sender "${@}" +} + +send_custom "${to_custom}" +SENT_CUSTOM=$? + + +# ----------------------------------------------------------------------------- +# send hipchat message + +send_hipchat "${HIPCHAT_AUTH_TOKEN}" "${to_hipchat}" " \ +${host} ${status_message}<br/> \ +<b>${alarm}</b> ${info_html}<br/> \ +<b>${chart}</b> (family <b>${family}</b>)<br/> \ +<b>${date}${raised_for_html}</b><br/> \ +<a href=\\\"${goto_url}\\\">View netdata dashboard</a> \ +(source of alarm ${src}) \ +" + +SENT_HIPCHAT=$? + + +# ----------------------------------------------------------------------------- +# send the Amazon SNS message + +send_awssns ${to_awssns} + +SENT_AWSSNS=$? + + +# ----------------------------------------------------------------------------- +# send the syslog message + +send_syslog ${to_syslog} + +SENT_SYSLOG=$? + + +# ----------------------------------------------------------------------------- +# send the email + +send_email <<EOF +To: ${to_email} +Subject: ${host} ${status_message} - ${name//_/ } - ${chart} +MIME-Version: 1.0 +Content-Type: multipart/alternative; boundary="multipart-boundary" +${email_thread_headers} + +This is a MIME-encoded multipart message + +--multipart-boundary +Content-Type: text/plain; encoding=${EMAIL_CHARSET} +Content-Disposition: inline +Content-Transfer-Encoding: 8bit + +${host} ${status_message} + +${alarm} ${info} +${raised_for} + +Chart : ${chart} +Family : ${family} +Severity: ${severity} +URL : ${goto_url} +Source : ${src} +Date : ${date} +Notification generated on ${this_host} + +--multipart-boundary +Content-Type: text/html; encoding=${EMAIL_CHARSET} +Content-Disposition: inline +Content-Transfer-Encoding: 8bit + +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 14px; margin: 0; padding: 0;"> +<body style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 14px; width: 100% !important; min-height: 100%; line-height: 1.6; background: #f6f6f6; margin:0; padding: 0;"> +<table> + <tbody> + <tr> + <td style="vertical-align: top;" valign="top"></td> + <td width="700" style="vertical-align: top; display: block !important; max-width: 700px !important; clear: both !important; margin: 0 auto; padding: 0;" valign="top"> + <div style="max-width: 700px; display: block; margin: 0 auto; padding: 20px;"> + <table width="100%" cellpadding="0" cellspacing="0" style="background: #fff; border: 1px solid #e9e9e9;"> + <tbody> + <tr> + <td bgcolor="#eee" style="padding: 5px 20px 5px 20px; background-color: #eee;"> + <div style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 20px; color: #777; font-weight: bold;">netdata notification</div> + </td> + </tr> + <tr> + <td bgcolor="${color}" style="font-size: 16px; vertical-align: top; font-weight: 400; text-align: center; margin: 0; padding: 10px; color: #ffffff; background: ${color} !important; border: 1px solid ${color}; border-top-color: ${color};" align="center" valign="top"> + <h1 style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-weight: 400; margin: 0;">${host} ${status_message}</h1> + </td> + </tr> + <tr> + <td style="vertical-align: top;" valign="top"> + <div style="margin: 0; padding: 20px; max-width: 700px;"> + <table width="100%" cellpadding="0" cellspacing="0" style="max-width:700px"> + <tbody> + <tr> + <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding:0 0 20px;" align="left" valign="top"> + <span>${chart}</span> + <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Chart</span> + </td> + </tr> + <tr style="margin: 0; padding: 0;"> + <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top"> + <span><b>${alarm}</b>${info_html}</span> + <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Alarm</span> + </td> + </tr> + <tr> + <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top"> + <span>${family}</span> + <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Family</span> + </td> + </tr> + <tr style="margin: 0; padding: 0;"> + <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top"> + <span>${severity}</span> + <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Severity</span> + </td> + </tr> + <tr style="margin: 0; padding: 0;"> + <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top"><span>${date}</span> + <span>${raised_for_html}</span> <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Time</span> + </td> + </tr> + <tr style="margin: 0; padding: 0;"> + <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;"> + <a href="${goto_url}" style="font-size: 14px; color: #ffffff; text-decoration: none; line-height: 1.5; font-weight: bold; text-align: center; display: inline-block; text-transform: capitalize; background: #35568d; border-width: 1px; border-style: solid; border-color: #2b4c86; margin: 0; padding: 10px 15px;" target="_blank">View Netdata</a> + </td> + </tr> + <tr style="text-align: center; margin: 0; padding: 0;"> + <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 11px; vertical-align: top; margin: 0; padding: 10px 0 0 0; color: #666666;" align="center" valign="bottom">The source of this alarm is line <code>${src}</code><br/>(alarms are configurable, edit this file to adapt the alarm to your needs) + </td> + </tr> + <tr style="text-align: center; margin: 0; padding: 0;"> + <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; vertical-align: top; margin:0; padding: 20px 0 0 0; color: #666666; border-top: 1px solid #f0f0f0;" align="center" valign="bottom">Sent by + <a href="https://mynetdata.io/" target="_blank">netdata</a>, the real-time performance and health monitoring, on <code>${this_host}</code>. + </td> + </tr> + </tbody> + </table> + </div> + </td> + </tr> + </tbody> + </table> + </div> + </td> + </tr> + </tbody> +</table> +</body> +</html> +--multipart-boundary-- +EOF + +SENT_EMAIL=$? + +# ----------------------------------------------------------------------------- +# let netdata know + +if [ ${SENT_EMAIL} -eq 0 \ + -o ${SENT_PUSHOVER} -eq 0 \ + -o ${SENT_TELEGRAM} -eq 0 \ + -o ${SENT_SLACK} -eq 0 \ + -o ${SENT_MSTEAM} -eq 0 \ + -o ${SENT_ROCKETCHAT} -eq 0 \ + -o ${SENT_ALERTA} -eq 0 \ + -o ${SENT_FLOCK} -eq 0 \ + -o ${SENT_DISCORD} -eq 0 \ + -o ${SENT_TWILIO} -eq 0 \ + -o ${SENT_HIPCHAT} -eq 0 \ + -o ${SENT_MESSAGEBIRD} -eq 0 \ + -o ${SENT_KAVENEGAR} -eq 0 \ + -o ${SENT_PUSHBULLET} -eq 0 \ + -o ${SENT_KAFKA} -eq 0 \ + -o ${SENT_PD} -eq 0 \ + -o ${SENT_FLEEP} -eq 0 \ + -o ${SENT_IRC} -eq 0 \ + -o ${SENT_AWSSNS} -eq 0 \ + -o ${SENT_CUSTOM} -eq 0 \ + -o ${SENT_SYSLOG} -eq 0 \ + ] + then + # we did send something + exit 0 +fi + +# we did not send anything +exit 1 diff --git a/health/notifications/alarm-notify.sh.in b/health/notifications/alarm-notify.sh.in new file mode 100755 index 000000000..4aef3a521 --- /dev/null +++ b/health/notifications/alarm-notify.sh.in @@ -0,0 +1,2378 @@ +#!/usr/bin/env bash + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2017 Costa Tsaousis <costa@tsaousis.gr> +# SPDX-License-Identifier: GPL-3.0-or-later +# +# Script to send alarm notifications for netdata +# +# Features: +# - multiple notification methods +# - multiple roles per alarm +# - multiple recipients per role +# - severity filtering per recipient +# +# Supported notification methods: +# - emails by @ktsaou +# - slack.com notifications by @ktsaou +# - alerta.io notifications by @kattunga +# - discordapp.com notifications by @lowfive +# - pushover.net notifications by @ktsaou +# - pushbullet.com push notifications by Tiago Peralta @tperalta82 #1070 +# - telegram.org notifications by @hashworks #1002 +# - twilio.com notifications by Levi Blaney @shadycuz #1211 +# - kafka notifications by @ktsaou #1342 +# - pagerduty.com notifications by Jim Cooley @jimcooley #1373 +# - messagebird.com notifications by @tech_no_logical #1453 +# - hipchat notifications by @ktsaou #1561 +# - fleep notifications by @Ferroin +# - custom notifications by @ktsaou +# - syslog messages by @Ferroin +# - Microsoft Team notification by @tioumen + +# ----------------------------------------------------------------------------- +# testing notifications + + +if [ \( "${1}" = "test" -o "${2}" = "test" \) -a "${#}" -le 2 ] +then + if [ "${2}" = "test" ] + then + recipient="${1}" + else + recipient="${2}" + fi + + [ -z "${recipient}" ] && recipient="sysadmin" + + id=1 + last="CLEAR" + test_res=0 + for x in "WARNING" "CRITICAL" "CLEAR" + do + echo >&2 + echo >&2 "# SENDING TEST ${x} ALARM TO ROLE: ${recipient}" + + "${0}" "${recipient}" "$(hostname)" 1 1 "${id}" "$(date +%s)" "test_alarm" "test.chart" "test.family" "${x}" "${last}" 100 90 "${0}" 1 $((0 + id)) "units" "this is a test alarm to verify notifications work" "new value" "old value" + if [ $? -ne 0 ] + then + echo >&2 "# FAILED" + test_res=1 + else + echo >&2 "# OK" + fi + + last="${x}" + id=$((id + 1)) + done + + exit $test_res +fi + +export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin" +export LC_ALL=C + +# ----------------------------------------------------------------------------- + +PROGRAM_NAME="$(basename "${0}")" + +logdate() { + date "+%Y-%m-%d %H:%M:%S" +} + +log() { + local status="${1}" + shift + + echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" + +} + +warning() { + log WARNING "${@}" +} + +error() { + log ERROR "${@}" +} + +info() { + log INFO "${@}" +} + +fatal() { + log FATAL "${@}" + exit 1 +} + +debug=${NETDATA_ALARM_NOTIFY_DEBUG-0} +debug() { + [ "${debug}" = "1" ] && log DEBUG "${@}" +} + +docurl() { + if [ -z "${curl}" ] + then + error "\${curl} is unset." + return 1 + fi + + if [ "${debug}" = "1" ] + then + echo >&2 "--- BEGIN curl command ---" + printf >&2 "%q " ${curl} "${@}" + echo >&2 + echo >&2 "--- END curl command ---" + + local out=$(mktemp /tmp/netdata-health-alarm-notify-XXXXXXXX) + local code=$(${curl} ${curl_options} --write-out %{http_code} --output "${out}" --silent --show-error "${@}") + local ret=$? + echo >&2 "--- BEGIN received response ---" + cat >&2 "${out}" + echo >&2 + echo >&2 "--- END received response ---" + echo >&2 "RECEIVED HTTP RESPONSE CODE: ${code}" + rm "${out}" + echo "${code}" + return ${ret} + fi + + ${curl} ${curl_options} --write-out %{http_code} --output /dev/null --silent --show-error "${@}" + return $? +} + +# ----------------------------------------------------------------------------- +# this is to be overwritten by the config file + +custom_sender() { + info "not sending custom notification for ${status} of '${host}.${chart}.${name}'" +} + + +# ----------------------------------------------------------------------------- + +# check for BASH v4+ (required for associative arrays) +[ $(( ${BASH_VERSINFO[0]} )) -lt 4 ] && \ + fatal "BASH version 4 or later is required (this is ${BASH_VERSION})." + +# ----------------------------------------------------------------------------- +# defaults to allow running this script by hand + +[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@" +[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@" +[ -z "${NETDATA_CACHE_DIR}" ] && NETDATA_CACHE_DIR="@cachedir_POST@" +[ -z "${NETDATA_REGISTRY_URL}" ] && NETDATA_REGISTRY_URL="https://registry.my-netdata.io" + +# ----------------------------------------------------------------------------- +# parse command line parameters + +roles="${1}" # the roles that should be notified for this event +host="${2}" # the host generated this event +unique_id="${3}" # the unique id of this event +alarm_id="${4}" # the unique id of the alarm that generated this event +event_id="${5}" # the incremental id of the event, for this alarm id +when="${6}" # the timestamp this event occurred +name="${7}" # the name of the alarm, as given in netdata health.d entries +chart="${8}" # the name of the chart (type.id) +family="${9}" # the family of the chart +status="${10}" # the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL +old_status="${11}" # the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL +value="${12}" # the current value of the alarm +old_value="${13}" # the previous value of the alarm +src="${14}" # the line number and file the alarm has been configured +duration="${15}" # the duration in seconds of the previous alarm state +non_clear_duration="${16}" # the total duration in seconds this is/was non-clear +units="${17}" # the units of the value +info="${18}" # a short description of the alarm +value_string="${19}" # friendly value (with units) +old_value_string="${20}" # friendly old value (with units) + +# ----------------------------------------------------------------------------- +# find a suitable hostname to use, if netdata did not supply a hostname + +this_host=$(hostname -s 2>/dev/null) +[ -z "${host}" ] && host="${this_host}" + +# ----------------------------------------------------------------------------- +# screen statuses we don't need to send a notification + +# don't do anything if this is not WARNING, CRITICAL or CLEAR +if [ "${status}" != "WARNING" -a "${status}" != "CRITICAL" -a "${status}" != "CLEAR" ] +then + info "not sending notification for ${status} of '${host}.${chart}.${name}'" + exit 1 +fi + +# don't do anything if this is CLEAR, but it was not WARNING or CRITICAL +if [ "${old_status}" != "WARNING" -a "${old_status}" != "CRITICAL" -a "${status}" = "CLEAR" ] +then + info "not sending notification for ${status} of '${host}.${chart}.${name}' (last status was ${old_status})" + exit 1 +fi + +# ----------------------------------------------------------------------------- +# load configuration + +# By default fetch images from the global public registry. +# This is required by default, since all notification methods need to download +# images via the Internet, and private registries might not be reachable. +# This can be overwritten at the configuration file. +images_base_url="https://registry.my-netdata.io" + +# curl options to use +curl_options="" + +# needed commands +# if empty they will be searched in the system path +curl= +sendmail= + +# enable / disable features +SEND_SLACK="YES" +SEND_MSTEAM="YES" +SEND_ALERTA="YES" +SEND_FLOCK="YES" +SEND_DISCORD="YES" +SEND_PUSHOVER="YES" +SEND_TWILIO="YES" +SEND_HIPCHAT="YES" +SEND_MESSAGEBIRD="YES" +SEND_KAVENEGAR="YES" +SEND_TELEGRAM="YES" +SEND_EMAIL="YES" +SEND_PUSHBULLET="YES" +SEND_KAFKA="YES" +SEND_PD="YES" +SEND_FLEEP="YES" +SEND_IRC="YES" +SEND_AWSSNS="YES" +SEND_SYSLOG="NO" +SEND_CUSTOM="YES" + +# slack configs +SLACK_WEBHOOK_URL= +DEFAULT_RECIPIENT_SLACK= +declare -A role_recipients_slack=() + +# Microsoft Team configs +MSTEAM_WEBHOOK_URL= +DEFAULT_RECIPIENT_MSTEAM= +declare -A role_recipients_msteam=() + +# rocketchat configs +ROCKETCHAT_WEBHOOK_URL= +DEFAULT_RECIPIENT_ROCKETCHAT= +declare -A role_recipients_rocketchat=() + +# alerta configs +ALERTA_WEBHOOK_URL= +ALERTA_API_KEY= +DEFAULT_RECIPIENT_ALERTA= +declare -A role_recipients_alerta=() + +# flock configs +FLOCK_WEBHOOK_URL= +DEFAULT_RECIPIENT_FLOCK= +declare -A role_recipients_flock=() + +# discord configs +DISCORD_WEBHOOK_URL= +DEFAULT_RECIPIENT_DISCORD= +declare -A role_recipients_discord=() + +# pushover configs +PUSHOVER_APP_TOKEN= +DEFAULT_RECIPIENT_PUSHOVER= +declare -A role_recipients_pushover=() + +# pushbullet configs +PUSHBULLET_ACCESS_TOKEN= +PUSHBULLET_SOURCE_DEVICE= +DEFAULT_RECIPIENT_PUSHBULLET= +declare -A role_recipients_pushbullet=() + +# twilio configs +TWILIO_ACCOUNT_SID= +TWILIO_ACCOUNT_TOKEN= +TWILIO_NUMBER= +DEFAULT_RECIPIENT_TWILIO= +declare -A role_recipients_twilio=() + +# hipchat configs +HIPCHAT_SERVER= +HIPCHAT_AUTH_TOKEN= +DEFAULT_RECIPIENT_HIPCHAT= +declare -A role_recipients_hipchat=() + +# messagebird configs +MESSAGEBIRD_ACCESS_KEY= +MESSAGEBIRD_NUMBER= +DEFAULT_RECIPIENT_MESSAGEBIRD= +declare -A role_recipients_messagebird=() + +# kavenegar configs +KAVENEGAR_API_KEY="" +KAVENEGAR_SENDER="" +DEFAULT_RECIPIENT_KAVENEGAR=() +declare -A role_recipients_kavenegar="" + +# telegram configs +TELEGRAM_BOT_TOKEN= +DEFAULT_RECIPIENT_TELEGRAM= +declare -A role_recipients_telegram=() + +# kafka configs +KAFKA_URL= +KAFKA_SENDER_IP= + +# pagerduty.com configs +PD_SERVICE_KEY= +DEFAULT_RECIPIENT_PD= +declare -A role_recipients_pd=() + +# fleep.io configs +FLEEP_SENDER="${host}" +DEFAULT_RECIPIENT_FLEEP= +declare -A role_recipients_fleep=() + +# Amazon SNS configs +DEFAULT_RECIPIENT_AWSSNS= +AWSSNS_MESSAGE_FORMAT= +declare -A role_recipients_awssns=() + +# syslog configs +SYSLOG_FACILITY= +declare -A role_recipients_syslog=() + +# custom configs +DEFAULT_RECIPIENT_CUSTOM= +declare -A role_recipients_custom=() + +# email configs +EMAIL_SENDER= +DEFAULT_RECIPIENT_EMAIL="root" +EMAIL_CHARSET=$(locale charmap 2>/dev/null) +EMAIL_THREADING= +declare -A role_recipients_email=() + +# irc configs +IRC_NICKNAME= +IRC_REALNAME= +DEFAULT_RECIPIENT_IRC= +IRC_NETWORK= +declare -A role_recipients_irc=() + +# load the stock and user configuration files +# these will overwrite the variables above + +for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/health_alarm_notify.conf" "${NETDATA_USER_CONFIG_DIR}/health_alarm_notify.conf" +do + if [ -f "${CONFIG}" ] + then + debug "Loading config file '${CONFIG}'..." + source "${CONFIG}" + [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'." + else + warning "Cannot find file '${CONFIG}'." + fi +done + +# If we didn't autodetect the character set for e-mail and it wasn't +# set by the user, we need to set it to a reasonable default. UTF-8 +# should be correct for almost all modern UNIX systems. +if [ -z ${EMAIL_CHARSET} ] + then + EMAIL_CHARSET="UTF-8" +fi + +# ----------------------------------------------------------------------------- +# filter a recipient based on alarm event severity + +filter_recipient_by_criticality() { + local method="${1}" x="${2}" r s + shift + + r="${x/|*/}" # the recipient + s="${x/*|/}" # the severity required for notifying this recipient + + # no severity filtering for this person + [ "${r}" = "${s}" ] && return 0 + + # the severity is invalid + s="${s^^}" + if [ "${s}" != "CRITICAL" ] + then + error "SEVERITY FILTERING for ${x} VIA ${method}: invalid severity '${s,,}', only 'critical' is supported." + return 0 + fi + + # create the status tracking directory for this user + [ ! -d "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}" ] && \ + mkdir -p "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}" + + case "${status}" in + CRITICAL) + # make sure he will get future notifications for this alarm too + touch "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" + debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: the alarm is CRITICAL (will now receive next status change)" + return 0 + ;; + + WARNING) + if [ -f "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" ] + then + # we do not remove the file, so that he will get future notifications of this alarm + debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: recipient has been notified for this alarm in the past (will still receive next status change)" + return 0 + fi + ;; + + *) + if [ -f "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" ] + then + # remove the file, so that he will only receive notifications for CRITICAL states for this alarm + rm "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" + debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: recipient has been notified for this alarm (will only receive CRITICAL notifications from now on)" + return 0 + fi + ;; + esac + + debug "SEVERITY FILTERING for ${x} VIA ${method}: BLOCK: recipient should not receive this notification" + return 1 +} + +# ----------------------------------------------------------------------------- +# find the recipients' addresses per method + +declare -A arr_slack=() +declare -A arr_msteam=() +declare -A arr_rocketchat=() +declare -A arr_alerta=() +declare -A arr_flock=() +declare -A arr_discord=() +declare -A arr_pushover=() +declare -A arr_pushbullet=() +declare -A arr_twilio=() +declare -A arr_hipchat=() +declare -A arr_telegram=() +declare -A arr_pd=() +declare -A arr_email=() +declare -A arr_custom=() +declare -A arr_messagebird=() +declare -A arr_kavenegar=() +declare -A arr_fleep=() +declare -A arr_irc=() +declare -A arr_syslog=() +declare -A arr_awssns=() + +# netdata may call us with multiple roles, and roles may have multiple but +# overlapping recipients - so, here we find the unique recipients. +for x in ${roles//,/ } +do + # the roles 'silent' and 'disabled' mean: + # don't send a notification for this role + [ "${x}" = "silent" -o "${x}" = "disabled" ] && continue + + # email + a="${role_recipients_email[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_EMAIL}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality email "${r}" && arr_email[${r/|*/}]="1" + done + + # pushover + a="${role_recipients_pushover[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_PUSHOVER}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality pushover "${r}" && arr_pushover[${r/|*/}]="1" + done + + # pushbullet + a="${role_recipients_pushbullet[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_PUSHBULLET}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality pushbullet "${r}" && arr_pushbullet[${r/|*/}]="1" + done + + # twilio + a="${role_recipients_twilio[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_TWILIO}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality twilio "${r}" && arr_twilio[${r/|*/}]="1" + done + + # hipchat + a="${role_recipients_hipchat[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_HIPCHAT}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality hipchat "${r}" && arr_hipchat[${r/|*/}]="1" + done + + # messagebird + a="${role_recipients_messagebird[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_MESSAGEBIRD}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality messagebird "${r}" && arr_messagebird[${r/|*/}]="1" + done + + # kavenegar + a="${role_recipients_kavenegar[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_KAVENEGAR}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality kavenegar "${r}" && arr_kavenegar[${r/|*/}]="1" + done + + # telegram + a="${role_recipients_telegram[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_TELEGRAM}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality telegram "${r}" && arr_telegram[${r/|*/}]="1" + done + + # slack + a="${role_recipients_slack[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_SLACK}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality slack "${r}" && arr_slack[${r/|*/}]="1" + done + + # Microsoft Team + a="${role_recipients_msteam[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_MSTEAM}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality msteam "${r}" && arr_msteam[${r/|*/}]="1" + done + + # rocketchat + a="${role_recipients_rocketchat[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_ROCKETCHAT}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality rocketchat "${r}" && arr_rocketchat[${r/|*/}]="1" + done + + # alerta + a="${role_recipients_alerta[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_ALERTA}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality alerta "${r}" && arr_alerta[${r/|*/}]="1" + done + + # flock + a="${role_recipients_flock[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_FLOCK}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality flock "${r}" && arr_flock[${r/|*/}]="1" + done + + # discord + a="${role_recipients_discord[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_DISCORD}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality discord "${r}" && arr_discord[${r/|*/}]="1" + done + + # pagerduty.com + a="${role_recipients_pd[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_PD}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality pd "${r}" && arr_pd[${r/|*/}]="1" + done + + # fleep.io + a="${role_recipients_fleep[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_FLEEP}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality fleep "${r}" && arr_fleep[${r/|*/}]="1" + done + + # irc + a="${role_recipients_irc[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_IRC}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality irc "${r}" && arr_irc[${r/|*/}]="1" + done + + # amazon sns + a="${role_recipients_awssns[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_AWSSNS}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality awssns "${r}" && arr_awssns[${r/|*/}]="1" + done + + # syslog + a="${role_recipients_syslog[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_SYSLOG}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality syslog "${r}" && arr_syslog[${r/|*/}]="1" + done + + # custom + a="${role_recipients_custom[${x}]}" + [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_CUSTOM}" + for r in ${a//,/ } + do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality custom "${r}" && arr_custom[${r/|*/}]="1" + done + +done + +# build the list of slack recipients (channels) +to_slack="${!arr_slack[*]}" +[ -z "${to_slack}" ] && SEND_SLACK="NO" + +# build the list of Microsoft team recipients (channels) +to_msteam="${!arr_msteam[*]}" +[ -z "${to_msteam}" ] && SEND_MSTEAM="NO" + +# build the list of rocketchat recipients (channels) +to_rocketchat="${!arr_rocketchat[*]}" +[ -z "${to_rocketchat}" ] && SEND_ROCKETCHAT="NO" + +# build the list of alerta recipients (channels) +to_alerta="${!arr_alerta[*]}" +[ -z "${to_alerta}" ] && SEND_ALERTA="NO" + +# build the list of flock recipients (channels) +to_flock="${!arr_flock[*]}" +[ -z "${to_flock}" ] && SEND_FLOCK="NO" + +# build the list of discord recipients (channels) +to_discord="${!arr_discord[*]}" +[ -z "${to_discord}" ] && SEND_DISCORD="NO" + +# build the list of pushover recipients (user tokens) +to_pushover="${!arr_pushover[*]}" +[ -z "${to_pushover}" ] && SEND_PUSHOVER="NO" + +# build the list of pushbulet recipients (user tokens) +to_pushbullet="${!arr_pushbullet[*]}" +[ -z "${to_pushbullet}" ] && SEND_PUSHBULLET="NO" + +# build the list of twilio recipients (phone numbers) +to_twilio="${!arr_twilio[*]}" +[ -z "${to_twilio}" ] && SEND_TWILIO="NO" + +# build the list of hipchat recipients (rooms) +to_hipchat="${!arr_hipchat[*]}" +[ -z "${to_hipchat}" ] && SEND_HIPCHAT="NO" + +# build the list of messagebird recipients (phone numbers) +to_messagebird="${!arr_messagebird[*]}" +[ -z "${to_messagebird}" ] && SEND_MESSAGEBIRD="NO" + +# build the list of kavenegar recipients (phone numbers) +to_kavenegar="${!arr_kavenegar[*]}" +[ -z "${to_kavenegar}" ] && SEND_KAVENEGAR="NO" + +# check array of telegram recipients (chat ids) +to_telegram="${!arr_telegram[*]}" +[ -z "${to_telegram}" ] && SEND_TELEGRAM="NO" + +# build the list of pagerduty recipients (service keys) +to_pd="${!arr_pd[*]}" +[ -z "${to_pd}" ] && SEND_PD="NO" + +# build the list of fleep recipients (conversation webhooks) +to_fleep="${!arr_fleep[*]}" +[ -z "${to_fleep}" ] && SEND_FLEEP="NO" + +# build the list of custom recipients +to_custom="${!arr_custom[*]}" +[ -z "${to_custom}" ] && SEND_CUSTOM="NO" + +# build the list of email recipients (email addresses) +to_email= +for x in "${!arr_email[@]}" +do + [ ! -z "${to_email}" ] && to_email="${to_email}, " + to_email="${to_email}${x}" +done +[ -z "${to_email}" ] && SEND_EMAIL="NO" + +# build the list of irc recipients (channels) +to_irc="${!arr_irc[*]}" +[ -z "${to_irc}" ] && SEND_IRC="NO" + +# build the list of awssns recipients (facilities, servers, and prefixes) +to_awssns="${!arr_awssns[*]}" +[ -z "${to_awssns}" ] && SEND_AWSSNS="NO" + +# build the list of syslog recipients (facilities, servers, and prefixes) +to_syslog="${!arr_syslog[*]}" +[ -z "${to_syslog}" ] && SEND_SYSLOG="NO" + +# ----------------------------------------------------------------------------- +# verify the delivery methods supported + +# check slack +[ -z "${SLACK_WEBHOOK_URL}" ] && SEND_SLACK="NO" + +# check rocketchat +[ -z "${ROCKETCHAT_WEBHOOK_URL}" ] && SEND_ROCKETCHAT="NO" + +# check alerta +[ -z "${ALERTA_WEBHOOK_URL}" ] && SEND_ALERTA="NO" + +# check flock +[ -z "${FLOCK_WEBHOOK_URL}" ] && SEND_FLOCK="NO" + +# check discord +[ -z "${DISCORD_WEBHOOK_URL}" ] && SEND_DISCORD="NO" + +# check pushover +[ -z "${PUSHOVER_APP_TOKEN}" ] && SEND_PUSHOVER="NO" + +# check pushbullet +[ -z "${PUSHBULLET_ACCESS_TOKEN}" ] && SEND_PUSHBULLET="NO" + +# check twilio +[ -z "${TWILIO_ACCOUNT_TOKEN}" -o -z "${TWILIO_ACCOUNT_SID}" -o -z "${TWILIO_NUMBER}" ] && SEND_TWILIO="NO" + +# check hipchat +[ -z "${HIPCHAT_AUTH_TOKEN}" ] && SEND_HIPCHAT="NO" + +# check messagebird +[ -z "${MESSAGEBIRD_ACCESS_KEY}" -o -z "${MESSAGEBIRD_NUMBER}" ] && SEND_MESSAGEBIRD="NO" + +# check kavenegar +[ -z "${KAVENEGAR_API_KEY}" -o -z "${KAVENEGAR_SENDER}" ] && SEND_KAVENEGAR="NO" + +# check telegram +[ -z "${TELEGRAM_BOT_TOKEN}" ] && SEND_TELEGRAM="NO" + +# check kafka +[ -z "${KAFKA_URL}" -o -z "${KAFKA_SENDER_IP}" ] && SEND_KAFKA="NO" + +# check irc +[ -z "${IRC_NETWORK}" ] && SEND_IRC="NO" + +# check fleep +[ -z "${FLEEP_SERVER}" -o -z "${FLEEP_SENDER}" ] && SEND_FLEEP="NO" + +# check pagerduty.com +# if we need pd-send, check for the pd-send command +# https://www.pagerduty.com/docs/guides/agent-install-guide/ +if [ "${SEND_PD}" = "YES" ] + then + pd_send="$(which pd-send 2>/dev/null || command -v pd-send 2>/dev/null)" + if [ -z "${pd_send}" ] + then + error "Cannot find pd-send command in the system path. Disabling pagerduty.com notifications." + SEND_PD="NO" + fi +fi + +# if we need curl, check for the curl command +if [ \( \ + "${SEND_PUSHOVER}" = "YES" \ + -o "${SEND_SLACK}" = "YES" \ + -o "${SEND_ROCKETCHAT}" = "YES" \ + -o "${SEND_ALERTA}" = "YES" \ + -o "${SEND_FLOCK}" = "YES" \ + -o "${SEND_DISCORD}" = "YES" \ + -o "${SEND_HIPCHAT}" = "YES" \ + -o "${SEND_TWILIO}" = "YES" \ + -o "${SEND_MESSAGEBIRD}" = "YES" \ + -o "${SEND_KAVENEGAR}" = "YES" \ + -o "${SEND_TELEGRAM}" = "YES" \ + -o "${SEND_PUSHBULLET}" = "YES" \ + -o "${SEND_KAFKA}" = "YES" \ + -o "${SEND_FLEEP}" = "YES" \ + -o "${SEND_CUSTOM}" = "YES" \ + -o "${SEND_MSTEAM}" = "YES" \ + \) -a -z "${curl}" ] + then + curl="$(which curl 2>/dev/null || command -v curl 2>/dev/null)" + if [ -z "${curl}" ] + then + error "Cannot find curl command in the system path. Disabling all curl based notifications." + SEND_PUSHOVER="NO" + SEND_PUSHBULLET="NO" + SEND_TELEGRAM="NO" + SEND_SLACK="NO" + SEND_MSTEAM="NO" + SEND_ROCKETCHAT="NO" + SEND_ALERTA="NO" + SEND_FLOCK="NO" + SEND_DISCORD="NO" + SEND_TWILIO="NO" + SEND_HIPCHAT="NO" + SEND_MESSAGEBIRD="NO" + SEND_KAVENEGAR="NO" + SEND_KAFKA="NO" + SEND_FLEEP="NO" + SEND_CUSTOM="NO" + fi +fi + +# if we need sendmail, check for the sendmail command +if [ "${SEND_EMAIL}" = "YES" -a -z "${sendmail}" ] + then + sendmail="$(which sendmail 2>/dev/null || command -v sendmail 2>/dev/null)" + if [ -z "${sendmail}" ] + then + debug "Cannot find sendmail command in the system path. Disabling email notifications." + SEND_EMAIL="NO" + fi +fi + +# if we need logger, check for the logger command +if [ "${SEND_SYSLOG}" = "YES" -a -z "${logger}" ] + then + logger="$(which logger 2>/dev/null || command -v logger 2>/dev/null)" + if [ -z "${logger}" ] + then + debug "Cannot find logger command in the system path. Disabling syslog notifications." + SEND_SYSLOG="NO" + fi +fi + +# if we need aws, check for the aws command +if [ "${SEND_AWSSNS}" = "YES" -a -z "${aws}" ] + then + aws="$(which aws 2>/dev/null || command -v aws 2>/dev/null)" + if [ -z "${aws}" ] + then + debug "Cannot find aws command in the system path. Disabling Amazon SNS notifications." + SEND_AWSSNS="NO" + fi +fi + +# check that we have at least a method enabled +if [ "${SEND_EMAIL}" != "YES" \ + -a "${SEND_PUSHOVER}" != "YES" \ + -a "${SEND_TELEGRAM}" != "YES" \ + -a "${SEND_SLACK}" != "YES" \ + -a "${SEND_ROCKETCHAT}" != "YES" \ + -a "${SEND_ALERTA}" != "YES" \ + -a "${SEND_FLOCK}" != "YES" \ + -a "${SEND_DISCORD}" != "YES" \ + -a "${SEND_TWILIO}" != "YES" \ + -a "${SEND_HIPCHAT}" != "YES" \ + -a "${SEND_MESSAGEBIRD}" != "YES" \ + -a "${SEND_KAVENEGAR}" != "YES" \ + -a "${SEND_PUSHBULLET}" != "YES" \ + -a "${SEND_KAFKA}" != "YES" \ + -a "${SEND_PD}" != "YES" \ + -a "${SEND_FLEEP}" != "YES" \ + -a "${SEND_CUSTOM}" != "YES" \ + -a "${SEND_IRC}" != "YES" \ + -a "${SEND_AWSSNS}" != "YES" \ + -a "${SEND_SYSLOG}" != "YES" \ + -a "${SEND_MSTEAM}" != "YES" \ + ] + then + fatal "All notification methods are disabled. Not sending notification for host '${host}', chart '${chart}' to '${roles}' for '${name}' = '${value}' for status '${status}'." +fi + +# ----------------------------------------------------------------------------- +# get the date the alarm happened + +date=$(date --date=@${when} "${date_format}" 2>/dev/null) +[ -z "${date}" ] && date=$(date "${date_format}" 2>/dev/null) +[ -z "${date}" ] && date=$(date --date=@${when} 2>/dev/null) +[ -z "${date}" ] && date=$(date 2>/dev/null) + +# ---------------------------------------------------------------------------- +# prepare some extra headers if we've been asked to thread e-mails +if [ "${SEND_EMAIL}" == "YES" -a "${EMAIL_THREADING}" == "YES" ] ; then + email_thread_headers="In-Reply-To: <${chart}-${name}@${host}>\nReferences: <${chart}-${name}@${host}>" +else + email_thread_headers= +fi + +# ----------------------------------------------------------------------------- +# function to URL encode a string + +urlencode() { + local string="${1}" strlen encoded pos c o + + strlen=${#string} + for (( pos=0 ; pos<strlen ; pos++ )) + do + c=${string:${pos}:1} + case "${c}" in + [-_.~a-zA-Z0-9]) + o="${c}" + ;; + + *) + printf -v o '%%%02x' "'${c}" + ;; + esac + encoded+="${o}" + done + + REPLY="${encoded}" + echo "${REPLY}" +} + +# ----------------------------------------------------------------------------- +# function to convert a duration in seconds, to a human readable duration +# using DAYS, MINUTES, SECONDS + +duration4human() { + local s="${1}" d=0 h=0 m=0 ds="day" hs="hour" ms="minute" ss="second" ret + d=$(( s / 86400 )) + s=$(( s - (d * 86400) )) + h=$(( s / 3600 )) + s=$(( s - (h * 3600) )) + m=$(( s / 60 )) + s=$(( s - (m * 60) )) + + if [ ${d} -gt 0 ] + then + [ ${m} -ge 30 ] && h=$(( h + 1 )) + [ ${d} -gt 1 ] && ds="days" + [ ${h} -gt 1 ] && hs="hours" + if [ ${h} -gt 0 ] + then + ret="${d} ${ds} and ${h} ${hs}" + else + ret="${d} ${ds}" + fi + elif [ ${h} -gt 0 ] + then + [ ${s} -ge 30 ] && m=$(( m + 1 )) + [ ${h} -gt 1 ] && hs="hours" + [ ${m} -gt 1 ] && ms="minutes" + if [ ${m} -gt 0 ] + then + ret="${h} ${hs} and ${m} ${ms}" + else + ret="${h} ${hs}" + fi + elif [ ${m} -gt 0 ] + then + [ ${m} -gt 1 ] && ms="minutes" + [ ${s} -gt 1 ] && ss="seconds" + if [ ${s} -gt 0 ] + then + ret="${m} ${ms} and ${s} ${ss}" + else + ret="${m} ${ms}" + fi + else + [ ${s} -gt 1 ] && ss="seconds" + ret="${s} ${ss}" + fi + + REPLY="${ret}" + echo "${REPLY}" +} + +# ----------------------------------------------------------------------------- +# email sender + +send_email() { + local ret= opts=() sender_email="${EMAIL_SENDER}" sender_name= + if [ "${SEND_EMAIL}" = "YES" ] + then + + if [ ! -z "${EMAIL_SENDER}" ] + then + if [[ "${EMAIL_SENDER}" =~ ^\".*\"\ \<.*\>$ ]] + then + # the name includes double quotes + sender_email="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1)" + sender_name="$(echo "${EMAIL_SENDER}" | cut -d '"' -f 2)" + elif [[ "${EMAIL_SENDER}" =~ ^\'.*\'\ \<.*\>$ ]] + then + # the name includes single quotes + sender_email="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1)" + sender_name="$(echo "${EMAIL_SENDER}" | cut -d "'" -f 2)" + elif [[ "${EMAIL_SENDER}" =~ ^.*\ \<.*\>$ ]] + then + # the name does not have any quotes + sender_email="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1)" + sender_name="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)" + fi + fi + + [ ! -z "${sender_email}" ] && opts+=(-f "${sender_email}") + [ ! -z "${sender_name}" ] && opts+=(-F "${sender_name}") + + if [ "${debug}" = "1" ] + then + echo >&2 "--- BEGIN sendmail command ---" + printf >&2 "%q " "${sendmail}" -t "${opts[@]}" + echo >&2 + echo >&2 "--- END sendmail command ---" + fi + + "${sendmail}" -t "${opts[@]}" + ret=$? + + if [ ${ret} -eq 0 ] + then + info "sent email notification for: ${host} ${chart}.${name} is ${status} to '${to_email}'" + return 0 + else + error "failed to send email notification for: ${host} ${chart}.${name} is ${status} to '${to_email}' with error code ${ret}." + return 1 + fi + fi + + return 1 +} + +# ----------------------------------------------------------------------------- +# pushover sender + +send_pushover() { + local apptoken="${1}" usertokens="${2}" when="${3}" url="${4}" status="${5}" title="${6}" message="${7}" httpcode sent=0 user priority + + if [ "${SEND_PUSHOVER}" = "YES" -a ! -z "${apptoken}" -a ! -z "${usertokens}" -a ! -z "${title}" -a ! -z "${message}" ] + then + + # https://pushover.net/api + priority=-2 + case "${status}" in + CLEAR) priority=-1;; # low priority: no sound or vibration + WARNING) priority=0;; # normal priority: respect quiet hours + CRITICAL) priority=1;; # high priority: bypass quiet hours + *) priority=-2;; # lowest priority: no notification at all + esac + + for user in ${usertokens} + do + httpcode=$(docurl \ + --form-string "token=${apptoken}" \ + --form-string "user=${user}" \ + --form-string "html=1" \ + --form-string "title=${title}" \ + --form-string "message=${message}" \ + --form-string "timestamp=${when}" \ + --form-string "url=${url}" \ + --form-string "url_title=Open netdata dashboard to view the alarm" \ + --form-string "priority=${priority}" \ + https://api.pushover.net/1/messages.json) + + if [ "${httpcode}" = "200" ] + then + info "sent pushover notification for: ${host} ${chart}.${name} is ${status} to '${user}'" + sent=$((sent + 1)) + else + error "failed to send pushover notification for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + fi + + return 1 +} + +# ----------------------------------------------------------------------------- +# pushbullet sender + +send_pushbullet() { + local userapikey="${1}" source_device="${2}" recipients="${3}" url="${4}" title="${5}" message="${6}" httpcode sent=0 user + if [ "${SEND_PUSHBULLET}" = "YES" -a ! -z "${userapikey}" -a ! -z "${recipients}" -a ! -z "${message}" -a ! -z "${title}" ] + then + #https://docs.pushbullet.com/#create-push + for user in ${recipients} + do + httpcode=$(docurl \ + --header 'Access-Token: '${userapikey}'' \ + --header 'Content-Type: application/json' \ + --data-binary @<(cat <<EOF + {"title": "${title}", + "type": "link", + "email": "${user}", + "body": "$( echo -n ${message})", + "url": "${url}", + "source_device_iden": "${source_device}"} +EOF + ) "https://api.pushbullet.com/v2/pushes" -X POST) + + if [ "${httpcode}" = "200" ] + then + info "sent pushbullet notification for: ${host} ${chart}.${name} is ${status} to '${user}'" + sent=$((sent + 1)) + else + error "failed to send pushbullet notification for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + fi + + return 1 +} + +# ----------------------------------------------------------------------------- +# kafka sender + +send_kafka() { + local httpcode sent=0 + if [ "${SEND_KAFKA}" = "YES" ] + then + httpcode=$(docurl -X POST \ + --data "{host_ip:\"${KAFKA_SENDER_IP}\",when:${when},name:\"${name}\",chart:\"${chart}\",family:\"${family}\",status:\"${status}\",old_status:\"${old_status}\",value:${value},old_value:${old_value},duration:${duration},non_clear_duration:${non_clear_duration},units:\"${units}\",info:\"${info}\"}" \ + "${KAFKA_URL}") + + if [ "${httpcode}" = "204" ] + then + info "sent kafka data for: ${host} ${chart}.${name} is ${status} and ip '${KAFKA_SENDER_IP}'" + sent=$((sent + 1)) + else + error "failed to send kafka data for: ${host} ${chart}.${name} is ${status} and ip '${KAFKA_SENDER_IP}' with HTTP error code ${httpcode}." + fi + + [ ${sent} -gt 0 ] && return 0 + fi + + return 1 +} + +# ----------------------------------------------------------------------------- +# pagerduty.com sender + +send_pd() { + local recipients="${1}" sent=0 + unset t + case ${status} in + CLEAR) t='resolve';; + WARNING) t='trigger';; + CRITICAL) t='trigger';; + esac + + if [ ${SEND_PD} = "YES" -a ! -z "${t}" ] + then + for PD_SERVICE_KEY in ${recipients} + do + d="${status} ${name} = ${value_string} - ${host}, ${family}" + ${pd_send} -k ${PD_SERVICE_KEY} \ + -t ${t} \ + -d "${d}" \ + -i ${host}:${chart}:${name} \ + -f 'info'="${info}" \ + -f 'value_w_units'="${value_string}" \ + -f 'when'="${when}" \ + -f 'duration'="${duration}" \ + -f 'roles'="${roles}" \ + -f 'host'="${host}" \ + -f 'unique_id'="${unique_id}" \ + -f 'alarm_id'="${alarm_id}" \ + -f 'event_id'="${event_id}" \ + -f 'name'="${name}" \ + -f 'chart'="${chart}" \ + -f 'family'="${family}" \ + -f 'status'="${status}" \ + -f 'old_status'="${old_status}" \ + -f 'value'="${value}" \ + -f 'old_value'="${old_value}" \ + -f 'src'="${src}" \ + -f 'non_clear_duration'="${non_clear_duration}" \ + -f 'units'="${units}" + retval=$? + if [ ${retval} -eq 0 ] + then + info "sent pagerduty.com notification for host ${host} ${chart}.${name} using service key ${PD_SERVICE_KEY::-26}....: ${d}" + sent=$((sent + 1)) + else + error "failed to send pagerduty.com notification for ${host} ${chart}.${name} using service key ${PD_SERVICE_KEY::-26}.... (error code ${retval}): ${d}" + fi + done + + [ ${sent} -gt 0 ] && return 0 + fi + + return 1 +} + +# ----------------------------------------------------------------------------- +# twilio sender + +send_twilio() { + local accountsid="${1}" accounttoken="${2}" twilionumber="${3}" recipients="${4}" title="${5}" message="${6}" httpcode sent=0 user + if [ "${SEND_TWILIO}" = "YES" -a ! -z "${accountsid}" -a ! -z "${accounttoken}" -a ! -z "${twilionumber}" -a ! -z "${recipients}" -a ! -z "${message}" -a ! -z "${title}" ] + then + #https://www.twilio.com/packages/labs/code/bash/twilio-sms + for user in ${recipients} + do + httpcode=$(docurl -X POST \ + --data-urlencode "From=${twilionumber}" \ + --data-urlencode "To=${user}" \ + --data-urlencode "Body=${title} ${message}" \ + -u "${accountsid}:${accounttoken}" \ + "https://api.twilio.com/2010-04-01/Accounts/${accountsid}/Messages.json") + + if [ "${httpcode}" = "201" ] + then + info "sent Twilio SMS for: ${host} ${chart}.${name} is ${status} to '${user}'" + sent=$((sent + 1)) + else + error "failed to send Twilio SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + fi + + return 1 +} + + +# ----------------------------------------------------------------------------- +# hipchat sender + +send_hipchat() { + local authtoken="${1}" recipients="${2}" message="${3}" httpcode sent=0 room color sender msg_format notify + + # remove <small></small> from the message + message="${message//<small>/}" + message="${message//<\/small>/}" + + if [ "${SEND_HIPCHAT}" = "YES" -a ! -z "${HIPCHAT_SERVER}" -a ! -z "${authtoken}" -a ! -z "${recipients}" -a ! -z "${message}" ] + then + # A label to be shown in addition to the sender's name + # Valid length range: 0 - 64. + sender="netdata" + + # Valid values: html, text. + # Defaults to 'html'. + msg_format="html" + + # Background color for message. Valid values: yellow, green, red, purple, gray, random. Defaults to 'yellow'. + case "${status}" in + WARNING) color="yellow" ;; + CRITICAL) color="red" ;; + CLEAR) color="green" ;; + *) color="gray" ;; + esac + + # Whether this message should trigger a user notification (change the tab color, play a sound, notify mobile phones, etc). + # Each recipient's notification preferences are taken into account. + # Defaults to false. + notify="true" + + for room in ${recipients} + do + httpcode=$(docurl -X POST \ + -H "Content-type: application/json" \ + -H "Authorization: Bearer ${authtoken}" \ + -d "{\"color\": \"${color}\", \"from\": \"${host}\", \"message_format\": \"${msg_format}\", \"message\": \"${message}\", \"notify\": \"${notify}\"}" \ + "https://${HIPCHAT_SERVER}/v2/room/${room}/notification") + + if [ "${httpcode}" = "204" ] + then + info "sent HipChat notification for: ${host} ${chart}.${name} is ${status} to '${room}'" + sent=$((sent + 1)) + else + error "failed to send HipChat notification for: ${host} ${chart}.${name} is ${status} to '${room}' with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + fi + + return 1 +} + + +# ----------------------------------------------------------------------------- +# messagebird sender + +send_messagebird() { + local accesskey="${1}" messagebirdnumber="${2}" recipients="${3}" title="${4}" message="${5}" httpcode sent=0 user + if [ "${SEND_MESSAGEBIRD}" = "YES" -a ! -z "${accesskey}" -a ! -z "${messagebirdnumber}" -a ! -z "${recipients}" -a ! -z "${message}" -a ! -z "${title}" ] + then + #https://developers.messagebird.com/docs/messaging + for user in ${recipients} + do + httpcode=$(docurl -X POST \ + --data-urlencode "originator=${messagebirdnumber}" \ + --data-urlencode "recipients=${user}" \ + --data-urlencode "body=${title} ${message}" \ + --data-urlencode "datacoding=auto" \ + -H "Authorization: AccessKey ${accesskey}" \ + "https://rest.messagebird.com/messages") + + if [ "${httpcode}" = "201" ] + then + info "sent Messagebird SMS for: ${host} ${chart}.${name} is ${status} to '${user}'" + sent=$((sent + 1)) + else + error "failed to send Messagebird SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + fi + + return 1 +} + +# ----------------------------------------------------------------------------- +# kavenegar sender + +send_kavenegar() { + local API_KEY="${1}" kavenegarsender="${2}" recipients="${3}" title="${4}" message="${5}" httpcode sent=0 user + if [ "${SEND_KAVENEGAR}" = "YES" -a ! -z "${API_KEY}" -a ! -z "${kavenegarsender}" -a ! -z "${recipients}" -a ! -z "${message}" -a ! -z "${title}" ] + then + # http://api.kavenegar.com/v1/{API-KEY}/sms/send.json + for user in ${recipients} + do + httpcode=$(docurl -X POST http://api.kavenegar.com/v1/${API_KEY}/sms/send.json \ + --data-urlencode "sender=${kavenegarsender}" \ + --data-urlencode "receptor=${user}" \ + --data-urlencode "message=${title} ${message}") + + if [ "${httpcode}" = "201" ] + then + info "sent Kavenegar SMS for: ${host} ${chart}.${name} is ${status} to '${user}'" + sent=$((sent + 1)) + else + error "failed to send Kavenegar SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + fi + + return 1 +} + +# ----------------------------------------------------------------------------- +# telegram sender + +send_telegram() { + local bottoken="${1}" chatids="${2}" message="${3}" httpcode sent=0 chatid emoji disableNotification="" + + if [ "${status}" = "CLEAR" ]; then disableNotification="--data-urlencode disable_notification=true"; fi + + case "${status}" in + WARNING) emoji="⚠️" ;; + CRITICAL) emoji="🔴" ;; + CLEAR) emoji="✅" ;; + *) emoji="⚪️" ;; + esac + + if [ "${SEND_TELEGRAM}" = "YES" -a ! -z "${bottoken}" -a ! -z "${chatids}" -a ! -z "${message}" ]; + then + for chatid in ${chatids} + do + # https://core.telegram.org/bots/api#sendmessage + httpcode=$(docurl ${disableNotification} \ + --data-urlencode "parse_mode=HTML" \ + --data-urlencode "disable_web_page_preview=true" \ + --data-urlencode "text=${emoji} ${message}" \ + "https://api.telegram.org/bot${bottoken}/sendMessage?chat_id=${chatid}") + + if [ "${httpcode}" = "200" ] + then + info "sent telegram notification for: ${host} ${chart}.${name} is ${status} to '${chatid}'" + sent=$((sent + 1)) + elif [ "${httpcode}" = "401" ] + then + error "failed to send telegram notification for: ${host} ${chart}.${name} is ${status} to '${chatid}': Wrong bot token." + else + error "failed to send telegram notification for: ${host} ${chart}.${name} is ${status} to '${chatid}' with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + fi + + return 1 +} + +# ----------------------------------------------------------------------------- +# Microsoft Team sender + +send_msteam() { + + local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload + + [ "${SEND_MSTEAM}" != "YES" ] && return 1 + + case "${status}" in + WARNING) icon="${MSTEAM_ICON_WARNING}" && color="${MSTEAM_COLOR_WARNING}";; + CRITICAL) icon="${MSTEAM_ICON_CRITICAL}" && color="${MSTEAM_COLOR_CRITICAL}";; + CLEAR) icon="${MSTEAM_ICON_CLEAR}" && color="${MSTEAM_COLOR_CLEAR}";; + *) icon="${MSTEAM_ICON_DEFAULT}" && color="${MSTEAM_COLOR_DEFAULT}";; + esac + + for channel in ${channels} + do + ## More details are available here regarding the payload syntax options : https://docs.microsoft.com/en-us/outlook/actionable-messages/message-card-reference + ## Online designer : https://acdesignerbeta.azurewebsites.net/ + payload="$(cat <<EOF + { + "@context": "http://schema.org/extensions", + "@type": "MessageCard", + "themeColor": "${color}", + "title": "$icon Alert ${status} from netdata for ${host}", + "text": "${host} ${status_message}, ${chart} (_${family}_), *${alarm}*", + "potentialAction": [ + { + "@type": "OpenUri", + "name": "Netdata", + "targets": [ + { "os": "default", "uri": "${goto_url}" } + ] + } + ] + } +EOF + )" + + # Replacing in the webhook CHANNEL string by the MS Teams channel name from conf file. + webhook="${webhook//CHANNEL/${channel}}" + + httpcode=$(docurl -H "Content-Type: application/json" -d "${payload}" "${webhook}") + + if [ "${httpcode}" = "200" ] + then + info "sent Microsoft team notification for: ${host} ${chart}.${name} is ${status} to '${webhook}'" + sent=$((sent + 1)) + else + error "failed to send Microsoft team notification for: ${host} ${chart}.${name} is ${status} to '${webhook}', with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + + return 1 +} + + +# slack sender + +send_slack() { + local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload + + [ "${SEND_SLACK}" != "YES" ] && return 1 + + case "${status}" in + WARNING) color="warning" ;; + CRITICAL) color="danger" ;; + CLEAR) color="good" ;; + *) color="#777777" ;; + esac + + for channel in ${channels} + do + payload="$(cat <<EOF + { + "channel": "#${channel}", + "username": "netdata on ${host}", + "icon_url": "${images_base_url}/images/seo-performance-128.png", + "text": "${host} ${status_message}, \`${chart}\` (_${family}_), *${alarm}*", + "attachments": [ + { + "fallback": "${alarm} - ${chart} (${family}) - ${info}", + "color": "${color}", + "title": "${alarm}", + "title_link": "${goto_url}", + "text": "${info}", + "fields": [ + { + "title": "${chart}", + "short": true + }, + { + "title": "${family}", + "short": true + } + ], + "thumb_url": "${image}", + "footer": "by <${goto_url}|${this_host}>", + "ts": ${when} + } + ] + } +EOF + )" + + httpcode=$(docurl -X POST --data-urlencode "payload=${payload}" "${webhook}") + if [ "${httpcode}" = "200" ] + then + info "sent slack notification for: ${host} ${chart}.${name} is ${status} to '${channel}'" + sent=$((sent + 1)) + else + error "failed to send slack notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + + return 1 +} + + +# ----------------------------------------------------------------------------- +# rocketchat sender + +send_rocketchat() { + local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload + + [ "${SEND_ROCKETCHAT}" != "YES" ] && return 1 + + case "${status}" in + WARNING) color="warning" ;; + CRITICAL) color="danger" ;; + CLEAR) color="good" ;; + *) color="#777777" ;; + esac + + for channel in ${channels} + do + payload="$(cat <<EOF + { + "channel": "#${channel}", + "alias": "netdata on ${host}", + "avatar": "${images_base_url}/images/seo-performance-128.png", + "text": "${host} ${status_message}, \`${chart}\` (_${family}_), *${alarm}*", + "attachments": [ + { + "color": "${color}", + "title": "${alarm}", + "title_link": "${goto_url}", + "text": "${info}", + "fields": [ + { + "title": "${chart}", + "short": true, + "value": "chart" + }, + { + "title": "${family}", + "short": true, + "value": "family" + } + ], + "thumb_url": "${image}", + "ts": "${when}" + } + ] + } +EOF + )" + + httpcode=$(docurl -X POST --data-urlencode "payload=${payload}" "${webhook}") + if [ "${httpcode}" = "200" ] + then + info "sent rocketchat notification for: ${host} ${chart}.${name} is ${status} to '${channel}'" + sent=$((sent + 1)) + else + error "failed to send rocketchat notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + + return 1 +} + +# ----------------------------------------------------------------------------- +# alerta sender + +send_alerta() { + local webhook="${1}" channels="${2}" httpcode sent=0 channel severity content + + [ "${SEND_ALERTA}" != "YES" ] && return 1 + + case "${status}" in + WARNING) severity="warning" ;; + CRITICAL) severity="critical" ;; + CLEAR) severity="cleared" ;; + *) severity="unknown" ;; + esac + + info=$( echo -n ${info}) + + # the "event" property must be unique and repetible between states to let alerta do automatic correlation using severity value + for channel in ${channels} + do + content="{" + content="$content \"environment\": \"${channel}\"," + content="$content \"service\": [\"${host}\"]," + content="$content \"resource\": \"${host}\"," + content="$content \"event\": \"${name}.${chart} (${family})\"," + content="$content \"severity\": \"${severity}\"," + content="$content \"value\": \"${alarm}\"," + content="$content \"text\": \"${info}\"" + content="$content }" + + + httpcode=$(docurl -X POST "${webhook}/alert" -H "Content-Type: application/json" -H "Authorization: Key $ALERTA_API_KEY" -d "$content" ) + + if [[ "${httpcode}" = "200" || "${httpcode}" = "201" ]] + then + info "sent alerta notification for: ${host} ${chart}.${name} is ${status} to '${channel}'" + sent=$((sent + 1)) + else + error "failed to send alerta notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + + return 1 +} + +# ----------------------------------------------------------------------------- +# flock sender + +send_flock() { + local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload + + [ "${SEND_FLOCK}" != "YES" ] && return 1 + + case "${status}" in + WARNING) color="warning" ;; + CRITICAL) color="danger" ;; + CLEAR) color="good" ;; + *) color="#777777" ;; + esac + + for channel in ${channels} + do + httpcode=$(docurl -X POST "${webhook}" -H "Content-Type: application/json" -d "{ + \"sendAs\": { + \"name\" : \"netdata on ${host}\", + \"profileImage\" : \"${images_base_url}/images/seo-performance-128.png\" + }, + \"text\": \"${host} *${status_message}*\", + \"timestamp\": \"${when}\", + \"attachments\": [ + { + \"description\": \"${chart} (${family}) - ${info}\", + \"color\": \"${color}\", + \"title\": \"${alarm}\", + \"url\": \"${goto_url}\", + \"text\": \"${info}\", + \"views\": { + \"image\": { + \"original\": { \"src\": \"${image}\", \"width\": 400, \"height\": 400 }, + \"thumbnail\": { \"src\": \"${image}\", \"width\": 50, \"height\": 50 }, + \"filename\": \"${image}\" + } + } + } + ] + }" ) + if [ "${httpcode}" = "200" ] + then + info "sent flock notification for: ${host} ${chart}.${name} is ${status} to '${channel}'" + sent=$((sent + 1)) + else + error "failed to send flock notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + + return 1 +} + +# ----------------------------------------------------------------------------- +# discord sender + +send_discord() { + local webhook="${1}/slack" channels="${2}" httpcode sent=0 channel color payload username + + [ "${SEND_DISCORD}" != "YES" ] && return 1 + + case "${status}" in + WARNING) color="warning" ;; + CRITICAL) color="danger" ;; + CLEAR) color="good" ;; + *) color="#777777" ;; + esac + + for channel in ${channels} + do + username="netdata on ${host}" + [ ${#username} -gt 32 ] && username="${username:0:29}..." + + payload="$(cat <<EOF + { + "channel": "#${channel}", + "username": "${username}", + "text": "${host} ${status_message}, \`${chart}\` (_${family}_), *${alarm}*", + "icon_url": "${images_base_url}/images/seo-performance-128.png", + "attachments": [ + { + "color": "${color}", + "title": "${alarm}", + "title_link": "${goto_url}", + "text": "${info}", + "fields": [ + { + "title": "${chart}", + "value": "${family}" + } + ], + "thumb_url": "${image}", + "footer_icon": "${images_base_url}/images/seo-performance-128.png", + "footer": "${this_host}", + "ts": ${when} + } + ] + } +EOF + )" + + httpcode=$(docurl -X POST --data-urlencode "payload=${payload}" "${webhook}") + if [ "${httpcode}" = "200" ] + then + info "sent discord notification for: ${host} ${chart}.${name} is ${status} to '${channel}'" + sent=$((sent + 1)) + else + error "failed to send discord notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + + return 1 +} + +# ----------------------------------------------------------------------------- +# fleep sender + +send_fleep() { + local httpcode sent=0 webhooks="${1}" data message + if [ "${SEND_FLEEP}" = "YES" ] ; then + message="${host} ${status_message}, \`${chart}\` (${family}), *${alarm}*\\n${info}" + + for hook in "${webhooks}" ; do + data="{ " + data="${data} 'message': '${message}', " + data="${data} 'user': '${FLEEP_SENDER}' " + data="${data} }" + + httpcode=$(docurl -X POST --data "${data}" "https://fleep.io/hook/${hook}") + + if [ "${httpcode}" = "200" ] ; then + info "sent fleep data for: ${host} ${chart}.${name} is ${status} and user '${FLEEP_SENDER}'" + sent=$((sent + 1)) + else + error "failed to send fleep data for: ${host} ${chart}.${name} is ${status} and user '${FLEEP_SENDER}' with HTTP error code ${httpcode}." + fi + done + + [ ${sent} -gt 0 ] && return 0 + fi + + return 1 +} + +# ----------------------------------------------------------------------------- +# irc sender + +send_irc() { + local NICKNAME="${1}" REALNAME="${2}" CHANNELS="${3}" NETWORK="${4}" SERVERNAME="${5}" MESSAGE="${6}" sent=0 channel color send_alarm reply_codes error + + if [ "${SEND_IRC}" = "YES" -a ! -z "${NICKNAME}" -a ! -z "${REALNAME}" -a ! -z "${CHANNELS}" -a ! -z "${NETWORK}" -a ! -z "${SERVERNAME}" ] + then + case "${status}" in + WARNING) color="warning" ;; + CRITICAL) color="danger" ;; + CLEAR) color="good" ;; + *) color="#777777" ;; + esac + + for CHANNEL in ${CHANNELS} + do + error=0 + send_alarm=$(echo -e "USER ${NICKNAME} guest ${REALNAME} ${SERVERNAME}\nNICK ${NICKNAME}\nJOIN ${CHANNEL}\nPRIVMSG ${CHANNEL} :${MESSAGE}\nQUIT\n" \ | nc ${NETWORK} 6667) + reply_codes=$(echo ${send_alarm} | cut -d ' ' -f 2 | grep -o '[0-9]*') + for code in ${reply_codes} + do + [ "${code}" -ge 400 -a "${code}" -le 599 ] && error=1 && break + done + + if [ "${error}" -eq 0 ] + then + info "sent irc notification for: ${host} ${chart}.${name} is ${status} to '${CHANNEL}'" + sent=$((sent + 1)) + else + error "failed to send irc notification for: ${host} ${chart}.${name} is ${status} to '${CHANNEL}', with error code ${code}." + fi + done + fi + + [ ${sent} -gt 0 ] && return 0 + + return 1 +} + +# ----------------------------------------------------------------------------- +# Amazon SNS sender + +send_awssns() { + local targets="${1}" message='' sent=0 region='' + local default_format="${status} on ${host} at ${date}: ${chart} ${value_string}" + + [ "${SEND_AWSSNS}" = "YES" ] || return 1 + + message=${AWSSNS_MESSAGE_FORMAT:-${default_format}} + + for target in ${targets} ; do + # Extract the region from the target ARN. We need to explicitly specify the region so that it matches up correctly. + region="$(echo ${target} | cut -f 4 -d ':')" + ${aws} sns publish --region "${region}" --subject "${host} ${status_message} - ${name//_/ } - ${chart}" --message "${message}" --target-arn ${target} &>/dev/null + if [ $? = 0 ]; then + info "sent Amazon SNS notification for: ${host} ${chart}.${name} is ${status} to '${target}'" + sent=$((sent + 1)) + else + error "failed to send Amazon SNS notification for: ${host} ${chart}.${name} is ${status} to '${target}'" + fi + done + + [ ${sent} -gt 0 ] && return 0 + + return 1 +} + +# ----------------------------------------------------------------------------- +# syslog sender + +send_syslog() { + local facility=${SYSLOG_FACILITY:-"local6"} level='info' targets="${1}" + local priority='' message='' host='' port='' prefix='' + local temp1='' temp2='' + + [ "${SEND_SYSLOG}" = "YES" ] || return 1 + + if [ "${status}" = "CRITICAL" ] ; then + level='crit' + elif [ "${status}" = "WARNING" ] ; then + level='warning' + fi + + for target in ${targets} ; do + priority="${facility}.${level}" + message='' + host='' + port='' + prefix='' + temp1='' + temp2='' + + prefix=$(echo ${target} | cut -d '/' -f 2) + temp1=$(echo ${target} | cut -d '/' -f 1) + + if [ ${prefix} != ${temp1} ] ; then + if (echo ${temp1} | grep -q '@' ) ; then + temp2=$(echo ${temp1} | cut -d '@' -f 1) + host=$(echo ${temp1} | cut -d '@' -f 2) + + if [ ${temp2} != ${host} ] ; then + priority=${temp2} + fi + + port=$(echo ${host} | rev | cut -d ':' -f 1 | rev) + + if ( echo ${host} | grep -E -q '\[.*\]' ) ; then + if ( echo ${port} | grep -q ']' ) ; then + port='' + else + host=$(echo ${host} | rev | cut -d ':' -f 2- | rev) + fi + else + if [ ${port} = ${host} ] ; then + port='' + else + host=$(echo ${host} | cut -d ':' -f 1) + fi + fi + else + priority=${temp1} + fi + fi + + message="${prefix} ${status} on ${host} at ${date}: ${chart} ${value_string}" + + if [ ${host} ] ; then + logger_options="${logger_options} -n ${host}" + if [ ${port} ] ; then + logger_options="${logger_options} -P ${port}" + fi + fi + + ${logger} -p ${priority} ${logger_options} "${message}" + done + + return $? +} + + +# ----------------------------------------------------------------------------- +# prepare the content of the notification + +# the url to send the user on click +urlencode "${host}" >/dev/null; url_host="${REPLY}" +urlencode "${chart}" >/dev/null; url_chart="${REPLY}" +urlencode "${family}" >/dev/null; url_family="${REPLY}" +urlencode "${name}" >/dev/null; url_name="${REPLY}" +goto_url="${NETDATA_REGISTRY_URL}/goto-host-from-alarm.html?host=${url_host}&chart=${url_chart}&family=${url_family}&alarm=${url_name}&alarm_unique_id=${unique_id}&alarm_id=${alarm_id}&alarm_event_id=${event_id}" + +# the severity of the alarm +severity="${status}" + +# the time the alarm was raised +duration4human ${duration} >/dev/null; duration_txt="${REPLY}" +duration4human ${non_clear_duration} >/dev/null; non_clear_duration_txt="${REPLY}" +raised_for="(was ${old_status,,} for ${duration_txt})" + +# the key status message +status_message="status unknown" + +# the color of the alarm +color="grey" + +# the alarm value +alarm="${name//_/ } = ${value_string}" + +# the image of the alarm +image="${images_base_url}/images/seo-performance-128.png" + +# prepare the title based on status +case "${status}" in + CRITICAL) + image="${images_base_url}/images/alert-128-red.png" + status_message="is critical" + color="#ca414b" + ;; + + WARNING) + image="${images_base_url}/images/alert-128-orange.png" + status_message="needs attention" + color="#ffc107" + ;; + + CLEAR) + image="${images_base_url}/images/check-mark-2-128-green.png" + status_message="recovered" + color="#77ca6d" + ;; +esac + +if [ "${status}" = "CLEAR" ] +then + severity="Recovered from ${old_status}" + if [ ${non_clear_duration} -gt ${duration} ] + then + raised_for="(alarm was raised for ${non_clear_duration_txt})" + fi + + # don't show the value when the status is CLEAR + # for certain alarms, this value might not have any meaning + alarm="${name//_/ } ${raised_for}" + +elif [ "${old_status}" = "WARNING" -a "${status}" = "CRITICAL" ] +then + severity="Escalated to ${status}" + if [ ${non_clear_duration} -gt ${duration} ] + then + raised_for="(alarm is raised for ${non_clear_duration_txt})" + fi + +elif [ "${old_status}" = "CRITICAL" -a "${status}" = "WARNING" ] +then + severity="Demoted to ${status}" + if [ ${non_clear_duration} -gt ${duration} ] + then + raised_for="(alarm is raised for ${non_clear_duration_txt})" + fi + +else + raised_for= +fi + +# prepare HTML versions of elements +info_html= +[ ! -z "${info}" ] && info_html=" <small><br/>${info}</small>" + +raised_for_html= +[ ! -z "${raised_for}" ] && raised_for_html="<br/><small>${raised_for}</small>" + +# ----------------------------------------------------------------------------- +# send the slack notification + +# slack aggregates posts from the same username +# so we use "${host} ${status}" as the bot username, to make them diff + +send_slack "${SLACK_WEBHOOK_URL}" "${to_slack}" +SENT_SLACK=$? + +# ----------------------------------------------------------------------------- +# send the Microsoft notification + +# Microsoft team aggregates posts from the same username +# so we use "${host} ${status}" as the bot username, to make them diff + +send_msteam "${MSTEAM_WEBHOOK_URL}" "${to_msteam}" +SENT_MSTEAM=$? + +# ----------------------------------------------------------------------------- +# send the rocketchat notification + +# rocketchat aggregates posts from the same username +# so we use "${host} ${status}" as the bot username, to make them diff + +send_rocketchat "${ROCKETCHAT_WEBHOOK_URL}" "${to_rocketchat}" +SENT_ROCKETCHAT=$? + +# ----------------------------------------------------------------------------- +# send the alerta notification + +# alerta aggregates posts from the same username +# so we use "${host} ${status}" as the bot username, to make them diff + +send_alerta "${ALERTA_WEBHOOK_URL}" "${to_alerta}" +SENT_ALERTA=$? + +# ----------------------------------------------------------------------------- +# send the flock notification + +# flock aggregates posts from the same username +# so we use "${host} ${status}" as the bot username, to make them diff + +send_flock "${FLOCK_WEBHOOK_URL}" "${to_flock}" +SENT_FLOCK=$? + +# ----------------------------------------------------------------------------- +# send the discord notification + +# discord aggregates posts from the same username +# so we use "${host} ${status}" as the bot username, to make them diff + +send_discord "${DISCORD_WEBHOOK_URL}" "${to_discord}" +SENT_DISCORD=$? + +# ----------------------------------------------------------------------------- +# send the pushover notification + +send_pushover "${PUSHOVER_APP_TOKEN}" "${to_pushover}" "${when}" "${goto_url}" "${status}" "${host} ${status_message} - ${name//_/ } - ${chart}" " +<font color=\"${color}\"><b>${alarm}</b></font>${info_html}<br/>  +<small><b>${chart}</b><br/>Chart<br/> </small> +<small><b>${family}</b><br/>Family<br/> </small> +<small><b>${severity}</b><br/>Severity<br/> </small> +<small><b>${date}${raised_for_html}</b><br/>Time<br/> </small> +<a href=\"${goto_url}\">View Netdata</a><br/>  +<small><small>The source of this alarm is line ${src}</small></small> +" + +SENT_PUSHOVER=$? + +# ----------------------------------------------------------------------------- +# send the pushbullet notification + +send_pushbullet "${PUSHBULLET_ACCESS_TOKEN}" "${PUSHBULLET_SOURCE_DEVICE}" "${to_pushbullet}" "${goto_url}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm}\n +Severity: ${severity}\n +Chart: ${chart}\n +Family: ${family}\n +$(date -d @${when})\n +The source of this alarm is line ${src}" + +SENT_PUSHBULLET=$? + +# ----------------------------------------------------------------------------- +# send the twilio SMS + +send_twilio "${TWILIO_ACCOUNT_SID}" "${TWILIO_ACCOUNT_TOKEN}" "${TWILIO_NUMBER}" "${to_twilio}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm} +Severity: ${severity} +Chart: ${chart} +Family: ${family} +${info}" + +SENT_TWILIO=$? + +# ----------------------------------------------------------------------------- +# send the messagebird SMS + +send_messagebird "${MESSAGEBIRD_ACCESS_KEY}" "${MESSAGEBIRD_NUMBER}" "${to_messagebird}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm} +Severity: ${severity} +Chart: ${chart} +Family: ${family} +${info}" + +SENT_MESSAGEBIRD=$? + + +# ----------------------------------------------------------------------------- +# send the kavenegar SMS + +send_kavenegar "${KAVENEGAR_API_KEY}" "${KAVENEGAR_SENDER}" "${to_kavenegar}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm} +Severity: ${severity} +Chart: ${chart} +Family: ${family} +${info}" + +SENT_KAVENEGAR=$? + + +# ----------------------------------------------------------------------------- +# send the telegram.org message + +# https://core.telegram.org/bots/api#formatting-options +send_telegram "${TELEGRAM_BOT_TOKEN}" "${to_telegram}" "${host} ${status_message} - <b>${name//_/ }</b> +${chart} (${family}) +<a href=\"${goto_url}\">${alarm}</a> +<i>${info}</i>" + +SENT_TELEGRAM=$? + + +# ----------------------------------------------------------------------------- +# send the kafka message + +send_kafka +SENT_KAFKA=$? + + +# ----------------------------------------------------------------------------- +# send the pagerduty.com message + +send_pd "${to_pd}" +SENT_PD=$? + +# ----------------------------------------------------------------------------- +# send the fleep message + +send_fleep "${to_fleep}" +SENT_FLEEP=$? + +# ----------------------------------------------------------------------------- +# send the irc message + +send_irc "${IRC_NICKNAME}" "${IRC_REALNAME}" "${to_irc}" "${IRC_NETWORK}" "${host}" "${host} ${status_message} - ${name//_/ } - ${chart} ----- ${alarm} +Severity: ${severity} +Chart: ${chart} +Family: ${family} +${info}" + +SENT_IRC=$? + +# ----------------------------------------------------------------------------- +# send the custom message + +send_custom() { + # is it enabled? + [ "${SEND_CUSTOM}" != "YES" ] && return 1 + + # do we have any sender? + [ -z "${1}" ] && return 1 + + # call the custom_sender function + custom_sender "${@}" +} + +send_custom "${to_custom}" +SENT_CUSTOM=$? + + +# ----------------------------------------------------------------------------- +# send hipchat message + +send_hipchat "${HIPCHAT_AUTH_TOKEN}" "${to_hipchat}" " \ +${host} ${status_message}<br/> \ +<b>${alarm}</b> ${info_html}<br/> \ +<b>${chart}</b> (family <b>${family}</b>)<br/> \ +<b>${date}${raised_for_html}</b><br/> \ +<a href=\\\"${goto_url}\\\">View netdata dashboard</a> \ +(source of alarm ${src}) \ +" + +SENT_HIPCHAT=$? + + +# ----------------------------------------------------------------------------- +# send the Amazon SNS message + +send_awssns ${to_awssns} + +SENT_AWSSNS=$? + + +# ----------------------------------------------------------------------------- +# send the syslog message + +send_syslog ${to_syslog} + +SENT_SYSLOG=$? + + +# ----------------------------------------------------------------------------- +# send the email + +send_email <<EOF +To: ${to_email} +Subject: ${host} ${status_message} - ${name//_/ } - ${chart} +MIME-Version: 1.0 +Content-Type: multipart/alternative; boundary="multipart-boundary" +${email_thread_headers} + +This is a MIME-encoded multipart message + +--multipart-boundary +Content-Type: text/plain; encoding=${EMAIL_CHARSET} +Content-Disposition: inline +Content-Transfer-Encoding: 8bit + +${host} ${status_message} + +${alarm} ${info} +${raised_for} + +Chart : ${chart} +Family : ${family} +Severity: ${severity} +URL : ${goto_url} +Source : ${src} +Date : ${date} +Notification generated on ${this_host} + +--multipart-boundary +Content-Type: text/html; encoding=${EMAIL_CHARSET} +Content-Disposition: inline +Content-Transfer-Encoding: 8bit + +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 14px; margin: 0; padding: 0;"> +<body style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 14px; width: 100% !important; min-height: 100%; line-height: 1.6; background: #f6f6f6; margin:0; padding: 0;"> +<table> + <tbody> + <tr> + <td style="vertical-align: top;" valign="top"></td> + <td width="700" style="vertical-align: top; display: block !important; max-width: 700px !important; clear: both !important; margin: 0 auto; padding: 0;" valign="top"> + <div style="max-width: 700px; display: block; margin: 0 auto; padding: 20px;"> + <table width="100%" cellpadding="0" cellspacing="0" style="background: #fff; border: 1px solid #e9e9e9;"> + <tbody> + <tr> + <td bgcolor="#eee" style="padding: 5px 20px 5px 20px; background-color: #eee;"> + <div style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 20px; color: #777; font-weight: bold;">netdata notification</div> + </td> + </tr> + <tr> + <td bgcolor="${color}" style="font-size: 16px; vertical-align: top; font-weight: 400; text-align: center; margin: 0; padding: 10px; color: #ffffff; background: ${color} !important; border: 1px solid ${color}; border-top-color: ${color};" align="center" valign="top"> + <h1 style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-weight: 400; margin: 0;">${host} ${status_message}</h1> + </td> + </tr> + <tr> + <td style="vertical-align: top;" valign="top"> + <div style="margin: 0; padding: 20px; max-width: 700px;"> + <table width="100%" cellpadding="0" cellspacing="0" style="max-width:700px"> + <tbody> + <tr> + <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding:0 0 20px;" align="left" valign="top"> + <span>${chart}</span> + <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Chart</span> + </td> + </tr> + <tr style="margin: 0; padding: 0;"> + <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top"> + <span><b>${alarm}</b>${info_html}</span> + <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Alarm</span> + </td> + </tr> + <tr> + <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top"> + <span>${family}</span> + <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Family</span> + </td> + </tr> + <tr style="margin: 0; padding: 0;"> + <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top"> + <span>${severity}</span> + <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Severity</span> + </td> + </tr> + <tr style="margin: 0; padding: 0;"> + <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top"><span>${date}</span> + <span>${raised_for_html}</span> <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Time</span> + </td> + </tr> + <tr style="margin: 0; padding: 0;"> + <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;"> + <a href="${goto_url}" style="font-size: 14px; color: #ffffff; text-decoration: none; line-height: 1.5; font-weight: bold; text-align: center; display: inline-block; text-transform: capitalize; background: #35568d; border-width: 1px; border-style: solid; border-color: #2b4c86; margin: 0; padding: 10px 15px;" target="_blank">View Netdata</a> + </td> + </tr> + <tr style="text-align: center; margin: 0; padding: 0;"> + <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 11px; vertical-align: top; margin: 0; padding: 10px 0 0 0; color: #666666;" align="center" valign="bottom">The source of this alarm is line <code>${src}</code><br/>(alarms are configurable, edit this file to adapt the alarm to your needs) + </td> + </tr> + <tr style="text-align: center; margin: 0; padding: 0;"> + <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; vertical-align: top; margin:0; padding: 20px 0 0 0; color: #666666; border-top: 1px solid #f0f0f0;" align="center" valign="bottom">Sent by + <a href="https://mynetdata.io/" target="_blank">netdata</a>, the real-time performance and health monitoring, on <code>${this_host}</code>. + </td> + </tr> + </tbody> + </table> + </div> + </td> + </tr> + </tbody> + </table> + </div> + </td> + </tr> + </tbody> +</table> +</body> +</html> +--multipart-boundary-- +EOF + +SENT_EMAIL=$? + +# ----------------------------------------------------------------------------- +# let netdata know + +if [ ${SENT_EMAIL} -eq 0 \ + -o ${SENT_PUSHOVER} -eq 0 \ + -o ${SENT_TELEGRAM} -eq 0 \ + -o ${SENT_SLACK} -eq 0 \ + -o ${SENT_MSTEAM} -eq 0 \ + -o ${SENT_ROCKETCHAT} -eq 0 \ + -o ${SENT_ALERTA} -eq 0 \ + -o ${SENT_FLOCK} -eq 0 \ + -o ${SENT_DISCORD} -eq 0 \ + -o ${SENT_TWILIO} -eq 0 \ + -o ${SENT_HIPCHAT} -eq 0 \ + -o ${SENT_MESSAGEBIRD} -eq 0 \ + -o ${SENT_KAVENEGAR} -eq 0 \ + -o ${SENT_PUSHBULLET} -eq 0 \ + -o ${SENT_KAFKA} -eq 0 \ + -o ${SENT_PD} -eq 0 \ + -o ${SENT_FLEEP} -eq 0 \ + -o ${SENT_IRC} -eq 0 \ + -o ${SENT_AWSSNS} -eq 0 \ + -o ${SENT_CUSTOM} -eq 0 \ + -o ${SENT_SYSLOG} -eq 0 \ + ] + then + # we did send something + exit 0 +fi + +# we did not send anything +exit 1 diff --git a/health/notifications/alarm-test.sh b/health/notifications/alarm-test.sh new file mode 100755 index 000000000..828aa756b --- /dev/null +++ b/health/notifications/alarm-test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2017 Costa Tsaousis <costa@tsaousis.gr> +# SPDX-License-Identifier: GPL-3.0-or-later +# +# Script to test alarm notifications for netdata + +dir="$(dirname "${0}")" +"${dir}/alarm-notify.sh" test "${1}" +exit $? diff --git a/health/notifications/alerta/Makefile.inc b/health/notifications/alerta/Makefile.inc new file mode 100644 index 000000000..32fa08947 --- /dev/null +++ b/health/notifications/alerta/Makefile.inc @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_noinst_DATA += \ + alerta/README.md \ + alerta/Makefile.inc \ + $(NULL) + diff --git a/health/notifications/alerta/README.md b/health/notifications/alerta/README.md new file mode 100644 index 000000000..bbed23bac --- /dev/null +++ b/health/notifications/alerta/README.md @@ -0,0 +1,236 @@ +# alerta.io notifications + +The alerta monitoring system is a tool used to consolidate and de-duplicate alerts from multiple sources for quick ‘at-a-glance’ visualisation. With just one system you can monitor alerts from many other monitoring tools on a single screen. + +![](http://docs.alerta.io/en/latest/_images/alerta-screen-shot-3.png) + +When receiving alerts from multiple sources you can quickly become overwhelmed. With Alerta any alert with the same environment and resource is considered a duplicate if it has the same severity. If it has a different severity it is correlated so that you only see the most recent one. Awesome. + +main site http://www.alerta.io + +We can send Netadata alarms to Alerta so yo can see in one place alerts coming from many Netdata hosts or also from a multihost Netadata configuration.\ +The big advantage over other notifications method is that you have in a main view all active alarms with only las state, but you can also search history. + +## Setting up an Alerta server with Ubuntu 16.04 + +Here we will set a basic Alerta server to test it with Netdata alerts.\ +More advanced configurations are out os scope of this tutorial. + +source: http://alerta.readthedocs.io/en/latest/gettingstarted/tutorial-1-deploy-alerta.html + +I recommend to set up the server in a separated server, VM or container.\ +If you have other Nginx or Apache server in your organization, I recommend to proxy to this new server. + +Set us as root for easiest working +``` +sudo su +cd +``` + +Install Mongodb https://docs.mongodb.com/manual/tutorial/install-mongodb-on-ubuntu/ +``` +apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 2930ADAE8CAF5059EE73BB4B58712A2291FA4AD5 +echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.6 multiverse" | tee /etc/apt/sources.list.d/mongodb-org-3.6.list +apt-get update +apt-get install -y mongodb-org +systemctl enable mongod +systemctl start mongod +systemctl status mongod +``` + +Install Nginx and Alerta uwsgi +``` +apt-get install -y python-pip python-dev nginx +pip install alerta-server uwsgi +``` + +Install web console +``` +cd /var/www/html +mkdir alerta +cd alerta +wget -q -O - https://github.com/alerta/angular-alerta-webui/tarball/master | tar zxf - +mv alerta*/app/* . +cd +``` +## Services configuration + +Create a wsgi python file +``` +nano /var/www/wsgi.py +``` +fill with +``` +from alerta import app +``` +Create uWsgi configuration file +``` +nano /etc/uwsgi.ini +``` +fill with +``` +[uwsgi] +chdir = /var/www +mount = /alerta/api=wsgi.py +callable = app +manage-script-name = true + +master = true +processes = 5 +logger = syslog:alertad + +socket = /tmp/uwsgi.sock +chmod-socket = 664 +uid = www-data +gid = www-data +vacuum = true + +die-on-term = true +``` +Create a systemd configuration file +``` +nano /etc/systemd/system/uwsgi.service +``` +fill with +``` +[Unit] +Description=uWSGI service + +[Service] +ExecStart=/usr/local/bin/uwsgi --ini /etc/uwsgi.ini + +[Install] +WantedBy=multi-user.target +``` +enable service +``` +systemctl start uwsgi +systemctl status uwsgi +systemctl enable uwsgi +``` +Configure nginx to serve Alerta as a uWsgi application on /alerta/api +``` +nano /etc/nginx/sites-enabled/default +``` +fill with +``` +server { + listen 80 default_server; + listen [::]:80 default_server; + + location /alerta/api { try_files $uri @alerta/api; } + location @alerta/api { + include uwsgi_params; + uwsgi_pass unix:/tmp/uwsgi.sock; + proxy_set_header Host $host:$server_port; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location / { + root /var/www/html; + } +} +``` +restart nginx +``` +service nginx restart +``` +## Config web console +``` +nano /var/www/html/config.js +``` +fill with +``` +'use strict'; + +angular.module('config', []) + .constant('config', { + 'endpoint' : "/alerta/api", + 'provider' : "basic", + 'colors' : {}, + 'severity' : {}, + 'audio' : {} + }); +``` + +## Config Alerta server + +source: http://alerta.readthedocs.io/en/latest/configuration.html + +Create a random string to use as SECRET_KEY +``` +cat /dev/urandom | tr -dc A-Za-z0-9_\!\@\#\$\%\^\&\*\(\)-+= | head -c 32 && echo +``` +will output something like +``` +0pv8Bw7VKfW6avDAz_TqzYPme_fYV%7g +``` +Edit alertad.conf +``` +nano /etc/alertad.conf +``` +fill with (take care about all single quotes) +``` +BASE_URL='/alerta/api' +AUTH_REQUIRED=True +SECRET_KEY='0pv8Bw7VKfW6avDAz_TqzYPme_fYV%7g' +ADMIN_USERS=['<here put you email for future login>'] +``` + +restart +``` +systemctl restart uwsgi +``` + +* go to console to http://yourserver/alerta/ +* go to Login -> Create an account +* use your email for login so and administrative account will be created + +## create an API KEY + +You need an API KEY to send messages from any source.\ +To create an API KEY go to Configuration -> Api Keys\ +Then create a API KEY with write permisions. + +## configure Netdata to send alarms to Alerta + +On your system run: + +``` +/etc/netdata/edit-config health_alarm_notify.conf +``` + +and set + +``` +# enable/disable sending alerta notifications +SEND_ALERTA="YES" + +# here set your alerta server API url +# this is the API url you defined when installed Alerta server, +# it is the same for all users. Do not include last slash. +ALERTA_WEBHOOK_URL="http://yourserver/alerta/api" + +# Login with an administrative user to you Alerta server and create an API KEY +# with write permissions. +ALERTA_API_KEY="you last created API KEY" + +# you can define environments in /etc/alertad.conf option ALLOWED_ENVIRONMENTS +# standard environments are Production and Development +# if a role's recipients are not configured, a notification will be send to +# this Environment (empty = do not send a notification for unconfigured roles): +DEFAULT_RECIPIENT_ALERTA="Production" +``` + +## Test alarms + +We can test alarms with standard +``` +sudo su -s /bin/bash netdata +/opt/netdata/netdata-plugins/plugins.d/alarm-notify.sh test +exit +``` +But the problem is that Netdata will send 3 alarms, and because last alarm is "CLEAR" you will not se them in main Alerta page, you need to select to see "closed" alarma in top-right lookup. + +A little change in alarm-notify.sh that let us test each state one by one will be useful. \ No newline at end of file diff --git a/health/notifications/awssns/Makefile.inc b/health/notifications/awssns/Makefile.inc new file mode 100644 index 000000000..3d8e58ff8 --- /dev/null +++ b/health/notifications/awssns/Makefile.inc @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_noinst_DATA += \ + awssns/README.md \ + awssns/Makefile.inc \ + $(NULL) + diff --git a/health/notifications/awssns/README.md b/health/notifications/awssns/README.md new file mode 100644 index 000000000..d040d2d29 --- /dev/null +++ b/health/notifications/awssns/README.md @@ -0,0 +1,31 @@ +# Amazon SNS notifications + +As part of it's AWS suite, Amazon provides a notification broker service called 'Simple Notification Service' or SNS. Amazon SNS works kind of similarly to Netdata's own notification system, allowing dispatch of a single notification to multiple subscribers of different types. Among other things, SNS supports sending notifications to: + +* Email addresses. +* Mobile Phones via SMS. +* HTTP or HTTPS web hooks. +* AWS Lambda functions. +* AWS SQS queues. +* Mobile applications via push notifications. + +To get this working, you will need: + +* The Amazon Web Services CLI tools. Most distributions provide these with the package name `awscli`. +* An actual home directory for the user you run Netdata as, instead of just using `/` as a home directory. Setup of this is distribution specific. `/var/lib/netdata` is the recommended directory (because the permissions will already be correct) if you are using a dedicated user (which is how most distributions work). +* An Amazon SNS topic to send notifications to with one or more subscribers. The [Getting Started](https://docs.aws.amazon.com/sns/latest/dg/GettingStarted.html) section of the Amazon SNS documentation covers the basics of how to set this up. Make note of the Topic ARN when you create the topic. +* While not mandatory, it is highly recommended to create a dedicated IAM user on your account for netdata to send notifications. This user needs to have programmatic access, and should only allow access to SNS. If you're really paranoid, you can create one for each system or group of systems. + +Once you have all the above, run the follwing command as the user netdata runs under: + + aws configure + +THis will prompt you for the access key and secret key for accessing Amazon SNS (as well as the default region and output format, but you can leave those blank because we don't use them). + +Once that's done, you're ready to go and can specify the desired topic ARN as a recipient. + +Notes: + + * Netdata's native email notification support is far better in almost all respects than it's support through Amazon SNS. If you want email notifications, use the native support, not SNS. + * If you need to change the notification format for SNS notifications, you can do so by specifying the format in `AWSSNS_MESSAGE_FORMAT` in the configuration. This variable supports all the same vairiables you can use in custom notifications. + * While Amazon SNS supports sending differently formatted messages for different delivery methods, netdata does not currently support this functionality. \ No newline at end of file diff --git a/health/notifications/discord/Makefile.inc b/health/notifications/discord/Makefile.inc new file mode 100644 index 000000000..03d0339ab --- /dev/null +++ b/health/notifications/discord/Makefile.inc @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_noinst_DATA += \ + discord/README.md \ + discord/Makefile.inc \ + $(NULL) + diff --git a/health/notifications/discord/README.md b/health/notifications/discord/README.md new file mode 100644 index 000000000..5889ea099 --- /dev/null +++ b/health/notifications/discord/README.md @@ -0,0 +1,44 @@ +# Discordapp.com notifications + +This is what you will get: + +![image](https://cloud.githubusercontent.com/assets/7321975/22215935/b49ede7e-e162-11e6-98d0-ae8541e6b92e.png) + +You need: + +1. The **incoming webhook URL** as given by Discord. Create a webhook by following the official [Discord documentation](https://support.discordapp.com/hc/en-us/articles/228383668-Intro-to-Webhooks). You can use the same on all your netdata servers (or you can have multiple if you like - your decision). +2. One or more Discord channels to post the messages to. + +Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this: + +``` +############################################################################### +# sending discord notifications + +# note: multiple recipients can be given like this: +# "CHANNEL1 CHANNEL2 ..." + +# enable/disable sending discord notifications +SEND_DISCORD="YES" + +# Create a webhook by following the official documentation - +# https://support.discordapp.com/hc/en-us/articles/228383668-Intro-to-Webhooks +DISCORD_WEBHOOK_URL="https://discordapp.com/api/webhooks/XXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" + +# if a role's recipients are not configured, a notification will be send to +# this discord channel (empty = do not send a notification for unconfigured +# roles): +DEFAULT_RECIPIENT_DISCORD="alarms" + +``` + +You can define multiple channels like this: `alarms systems`. +You can give different channels per **role** using these (at the same file): + +``` +role_recipients_discord[sysadmin]="systems" +role_recipients_discord[dba]="databases systems" +role_recipients_discord[webmaster]="marketing development" +``` + +The keywords `systems`, `databases`, `marketing`, `development` are discordapp.com channels (they should already exist within your discord server). diff --git a/health/notifications/email/Makefile.inc b/health/notifications/email/Makefile.inc new file mode 100644 index 000000000..62dd18a62 --- /dev/null +++ b/health/notifications/email/Makefile.inc @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_noinst_DATA += \ + email/README.md \ + email/Makefile.inc \ + $(NULL) + diff --git a/health/notifications/email/README.md b/health/notifications/email/README.md new file mode 100644 index 000000000..979790ad6 --- /dev/null +++ b/health/notifications/email/README.md @@ -0,0 +1,31 @@ +# email notifications + +You need a working `sendmail` command for email alerts to work. Almost all MTAs provide a `sendmail` interface. + +netdata sends all emails as user `netdata`, so make sure your `sendmail` works for local users. + +email notifications look like this: + +![image](https://cloud.githubusercontent.com/assets/2662304/18407294/e9218c68-7714-11e6-8739-e4dd8a498252.png) + +## configuration + +To edit `health_alarm_notify.conf` on your system run `/etc/netdata/edit-config health_alarm_notify.conf`. + +You can configure recipients in [`/etc/netdata/health_alarm_notify.conf`](https://github.com/netdata/netdata/blob/99d44b7d0c4e006b11318a28ba4a7e7d3f9b3bae/conf.d/health_alarm_notify.conf#L101). + +You can also configure per role recipients [in the same file, a few lines below](https://github.com/netdata/netdata/blob/99d44b7d0c4e006b11318a28ba4a7e7d3f9b3bae/conf.d/health_alarm_notify.conf#L313). + +Changes to this file do not require netdata restart. + +You can test your configuration by issuing the commands: + +```sh +# become user netdata +sudo su -s /bin/bash netdata + +# send a test alarm +/usr/libexec/netdata/plugins.d/alarm-notify.sh test [ROLE] +``` + +Where `[ROLE]` is the role you want to test. The default (if you don't give a `[ROLE]`) is `sysadmin`. \ No newline at end of file diff --git a/health/notifications/flock/Makefile.inc b/health/notifications/flock/Makefile.inc new file mode 100644 index 000000000..fbff309a0 --- /dev/null +++ b/health/notifications/flock/Makefile.inc @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_noinst_DATA += \ + flock/README.md \ + flock/Makefile.inc \ + $(NULL) + diff --git a/health/notifications/flock/README.md b/health/notifications/flock/README.md new file mode 100644 index 000000000..33a545eac --- /dev/null +++ b/health/notifications/flock/README.md @@ -0,0 +1,31 @@ +# flock.com notifications + +This is what you will get: + + +![Flock](https://i.imgur.com/ok9bRzw.png) + +You need: + +The **incoming webhook URL** as given by flock.com. You can use the same on all your netdata servers (or you can have multiple if you like - your decision). + +Get them here: https://admin.flock.com/webhooks + +Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this: + +``` +############################################################################### +# sending flock notifications + +# enable/disable sending pushover notifications +SEND_FLOCK="YES" + +# Login to flock.com and create an incoming webhook. +# You need only one for all your netdata servers. +# Without it, netdata cannot send flock notifications. +FLOCK_WEBHOOK_URL="https://api.flock.com/hooks/sendMessage/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" + +# if a role recipient is not configured, no notification will be sent +DEFAULT_RECIPIENT_FLOCK="alarms" + +``` \ No newline at end of file diff --git a/health/notifications/health_alarm_notify.conf b/health/notifications/health_alarm_notify.conf new file mode 100755 index 000000000..9e72aac4d --- /dev/null +++ b/health/notifications/health_alarm_notify.conf @@ -0,0 +1,961 @@ +# Configuration for alarm notifications +# +# This configuration is used by: alarm-notify.sh +# changes take effect immediately (the next alarm will use them). +# +# alarm-notify.sh can send: +# - e-mails (using the sendmail command), +# - push notifications to your mobile phone (pushover.net), +# - messages to your slack team (slack.com), +# - messages to your alerta server (alerta.io), +# - messages to your flock team (flock.com), +# - messages to your discord guild (discordapp.com), +# - messages to your telegram chat / group chat (telegram.org) +# - sms messages to your cell phone or any sms enabled device (twilio.com) +# - sms messages to your cell phone or any sms enabled device (messagebird.com) +# - notifications to users on pagerduty.com +# - notifications to Amazon SNS topics (aws.amazon.com) +# - messages to your irc channel on your selected network +# - messages to a local or remote syslog daemon +# - message to Microsoft Team (thru webhook) +# +# The 'to' line given at netdata alarms defines a *role*, so that many +# people can be notified for each role. +# +# This file is a BASH script itself. +# +# +#------------------------------------------------------------------------------ +# proxy configuration +# +# If you need to send curl based notifications (pushover, pushbullet, slack, alerta, +# flock, discord, telegram) via a proxy, set these to your proxy address: +#export http_proxy="http://10.0.0.1:3128/" +#export https_proxy="http://10.0.0.1:3128/" + + +#------------------------------------------------------------------------------ +# notifications images +# +# Images in notifications need to be downloaded from an Internet facing site. +# To allow notification providers fetch the icons/images, by default we set +# the URL of the global public netdata registry. +# If you have an Internet facing netdata (or you have copied the images/ folder +# of netdata to your web server), set its URL here, to fetch the notification +# images from it. +#images_base_url="http://my.public.netdata.server:19999" + + +#------------------------------------------------------------------------------ +# date handling +# +# You can configure netdata alerts to send dates in any format you want. +# This uses standard `date` command format strings. See `man date` for +# more info on what you can put in here. Note that this has to start with a '+', otherwise it won't work. +# +# For ISO 8601 dates, use '+%FT%T%z' +# For RFC 5322 dates, use '+%a, %d %b %Y %H:%M:%S %z' +# For RFC 3339 dates, use '+%F %T%:z' +# For RFC 1123 dates, use '+%a, %d %b %Y %H:%M:%S %Z' +# For RFC 1036 dates, use '+%A, %d-%b-%y %H:%M:%S %Z' +# For a reasonably local date and time (in that order), use '+%x %X' +# For the old default behavior (compatible with ANSI C's asctime() function), leave this empty. +date_format='' + + +#------------------------------------------------------------------------------ +# external commands + +# The full path to the sendmail command. +# If empty, the system $PATH will be searched for it. +# If not found, email notifications will be disabled (silently). +sendmail="" + +# The full path of the curl command. +# If empty, the system $PATH will be searched for it. +# If not found, most notifications will be silently disabled. +curl="" + +# The full path of the nc command. +# If empty, the system $PATH will be searched for it. +# If not found, irc notifications will be silently disabled. +nc="" + +# The full path of the logger command. +# If empty, the system $PATH will be searched for it. +# If not found, syslog notifications will be silently disabled. +logger="" + +# The full path of the aws command. +# If empty, the system $PATH will be searched for it. +# If not found, Amazon SNS notifications will be silently disabled. +aws="" + +#------------------------------------------------------------------------------ +# extra options for external commands +# +# In some cases, you may need to change what options get passed to an +# external command. Such cases are covered here. + +# Extra options to pass to curl. In most cases, you shouldn't need to add anything +# to this. If you're having issues with HTTPS connections, you might try adding +# '--insecure' here, but be warned that it will make it much easier for +# third-parties to block notification delivery, and may allow disclosure +# of potentially sensitive information. +#curl_options="--insecure" + +# Extra options to pass to logger. You shouldn't have to specify anything +# here in most cases. +#logger_options="" + +#------------------------------------------------------------------------------ +# NOTE ABOUT RECIPIENTS +# +# When you define recipients (all types): +# +# - emails addresses +# - pushover user tokens +# - telegram chat ids +# - slack channels +# - alerta environment +# - flock rooms +# - discord channels +# - hipchat rooms +# - sms phone numbers +# - pagerduty.com (pd) services +# - irc channels +# +# You can append |critical to limit the notifications to be sent. +# +# In these examples, the first recipient receives all the alarms +# while the second one receives only the critical ones: +# +# email : "user1@example.com user2@example.com|critical" +# pushover : "2987343...9437837 8756278...2362736|critical" +# telegram : "111827421 112746832|critical" +# slack : "alarms disasters|critical" +# alerta : "alarms disasters|critical" +# flock : "alarms disasters|critical" +# discord : "alarms disasters|critical" +# twilio : "+15555555555 +17777777777|critical" +# messagebird: "+15555555555 +17777777777|critical" +# kavenegar : "09155555555 09177777777|critical" +# pd : "<pd_service_key_1> <pd_service_key_2>|critical" +# irc : "<irc_channel_1> <irc_channel_2>|critical" +# +# If a recipient is set to empty string, the default recipient of the given +# notification method (email, pushover, telegram, slack, alerta, etc) will be used. +# To disable a notification, use the recipient called: disabled +# This works for all notification methods (including the default recipients). + + +#------------------------------------------------------------------------------ +# email global notification options + +# multiple recipients can be given like this: +# "admin1@example.com admin2@example.com ..." + +# the email address sending email notifications +# the default is the system user netdata runs as (usually: netdata) +# The following formats are supported: +# EMAIL_SENDER="user@domain" +# EMAIL_SENDER="User Name <user@domain>" +# EMAIL_SENDER="'User Name' <user@domain>" +# EMAIL_SENDER="\"User Name\" <user@domain>" +EMAIL_SENDER="" + +# enable/disable sending emails +SEND_EMAIL="YES" + +# if a role recipient is not configured, an email will be send to: +DEFAULT_RECIPIENT_EMAIL="root" +# to receive only critical alarms, set it to "root|critical" + +# Optionally specify the encoding to list in the Content-Type header. +# This doesn't change what encoding the e-mail is sent with, just what +# the headers say it was encoded as. +# This shouldn't need to be changed as it will almost always be +# autodetected from the environment. +#EMAIL_CHARSET="UTF-8" + +# You can also have netdata add headers to the message that will +# cause most e-mail clients to treat all notifications for a given +# chart+alarm+host combination as a single thread. This can help +# simplify tracking of alarms, as it provides an easy wway for scripts +# to corelate messages and also will cause most clients to group all the +# messages together. THis is off by default. +#EMAIL_THREADING="YES" + + +#------------------------------------------------------------------------------ +# pushover (pushover.net) global notification options + +# multiple recipients can be given like this: +# "USERTOKEN1 USERTOKEN2 ..." + +# enable/disable sending pushover notifications +SEND_PUSHOVER="YES" + +# Login to pushover.net to get your pushover app token. +# You need only one for all your netdata servers (or you can have one for +# each of your netdata - your call). +# Without an app token, netdata cannot send pushover notifications. +PUSHOVER_APP_TOKEN="" + +# if a role's recipients are not configured, a notification will be send to +# this pushover user token (empty = do not send a notification for unconfigured +# roles): +DEFAULT_RECIPIENT_PUSHOVER="" + + +#------------------------------------------------------------------------------ +# pushbullet (pushbullet.com) push notification options + +# multiple recipients can be given like this: +# "user1@email.com user2@mail.com" + +# enable/disable sending pushbullet notifications +SEND_PUSHBULLET="YES" + +# Signup and Login to pushbullet.com +# To get your Access Token, go to https://www.pushbullet.com/#settings/account +# Create a new access token and paste it below. +# Then just set the recipients' emails. +# Please note that the if the email in the DEFAULT_RECIPIENT_PUSHBULLET does +# not have a pushbullet account, the pushbullet service will send an email +# to that address instead. + +# Without an access token, netdata cannot send pushbullet notifications. +PUSHBULLET_ACCESS_TOKEN="" +DEFAULT_RECIPIENT_PUSHBULLET="" + +# Device iden of the sending device. Optional. +PUSHBULLET_SOURCE_DEVICE="" + + +#------------------------------------------------------------------------------ +# Twilio (twilio.com) SMS options + +# multiple recipients can be given like this: +# "+15555555555 +17777777777" + +# enable/disable sending twilio SMS +SEND_TWILIO="YES" + +# Signup for free trial and select a SMS capable Twilio Number +# To get your Account SID and Token, go to https://www.twilio.com/console +# Place your sid, token and number below. +# Then just set the recipients' phone numbers. +# The trial account is only allowed to use the number specified when set up. + +# Without an account sid and token, netdata cannot send Twilio text messages. +TWILIO_ACCOUNT_SID="" +TWILIO_ACCOUNT_TOKEN="" +TWILIO_NUMBER="" +DEFAULT_RECIPIENT_TWILIO="" + + +#------------------------------------------------------------------------------ +# Messagebird (messagebird.com) SMS options + +# multiple recipients can be given like this: +# "+15555555555 +17777777777" + +# enable/disable sending messagebird SMS +SEND_MESSAGEBIRD="YES" + +# to get an access key, create a free account at https://www.messagebird.com +# verify and activate the account (no CC info needed) +# login to your account and enter your phonenumber to get some free credits +# to get the API key, click on 'API' in the sidebar, then 'API Access (REST)' +# click 'Add access key' and fill in data (you want a live key to send SMS) + +# Without an access key, netdata cannot send Messagebird text messages. +MESSAGEBIRD_ACCESS_KEY="" +MESSAGEBIRD_NUMBER="" +DEFAULT_RECIPIENT_MESSAGEBIRD="" + + +#------------------------------------------------------------------------------ +# Kavenegar (Kavenegar.com) SMS options + +# multiple recipients can be given like this: +# "09155555555 09177777777" + +# enable/disable sending kavenegar SMS +SEND_KAVENEGAR="YES" + +# to get an access key, after selecting and purchasing your desired service +# at http://kavenegar.com/pricing.html +# login to your account, go to your dashboard and my account are +# https://panel.kavenegar.com/Client/setting/account from API Key +# copy your api key. You can generate new API Key too. +# You can find and select kevenegar sender number from this place. + +# Without an API key, netdata cannot send KAVENEGAR text messages. +KAVENEGAR_API_KEY="" +KAVENEGAR_SENDER="" +DEFAULT_RECIPIENT_KAVENEGAR="" + + +#------------------------------------------------------------------------------ +# telegram (telegram.org) global notification options + +# To get your chat ID send the command /my_id to telegram bot @get_id. +# Users also need to open a query with the bot (see below). + +# note: multiple recipients can be given like this: +# "CHAT_ID_1 CHAT_ID_2 ..." + +# enable/disable sending telegram messages +SEND_TELEGRAM="YES" + +# Contact the bot @BotFather to create a new bot and receive a bot token. +# Without it, netdata cannot send telegram messages. +TELEGRAM_BOT_TOKEN="" + +# If a role's recipients are not configured, a message will be send to +# this chat id (empty = do not send a notification for unconfigured roles): +DEFAULT_RECIPIENT_TELEGRAM="" + + +#------------------------------------------------------------------------------ +# slack (slack.com) global notification options + +# multiple recipients can be given like this: +# "CHANNEL1 CHANNEL2 ..." + +# enable/disable sending slack notifications +SEND_SLACK="YES" + +# Login to slack.com and create an incoming webhook. You need only one for all +# your netdata servers (or you can have one for each of your netdata). +# Without it, netdata cannot send slack notifications. +# Get yours from: https://api.slack.com/incoming-webhooks +SLACK_WEBHOOK_URL="" + +# if a role's recipients are not configured, a notification will be send to +# this slack channel (empty = do not send a notification for unconfigured +# roles): +DEFAULT_RECIPIENT_SLACK="" + +#------------------------------------------------------------------------------ +# Microsoft Team (office.com) global notification options +# More details are available here regarding the payload syntax options : https://docs.microsoft.com/en-us/outlook/actionable-messages/message-card-reference +# Online designer : https://acdesignerbeta.azurewebsites.net/ +# multiple recipients can be given like this: +# "CHANNEL1 CHANNEL2 ..." + +# enable/disable sending team notifications +SEND_MSTEAM="YES" + +# if a role's recipients are not configured, a notification will be send to +# this slack channel (empty = do not send a notification for unconfigured +# roles): +# For team the channel name is encoded in the URI after ....IncomingWebhook/___/..... +# This value will be replaced in the webhook value to publish to several channels in a same Team. +# In order to get it working properly, you have to replace the value between [] ....IncomingWebhook/[___]/..... by "CHANNEL" string. +DEFAULT_RECIPIENT_MSTEAM="" +# Based on the way MS Teams is working, put the differents channels here like : "CHANNEL1 CHANNEL2 ..." +# AT LEAST ONE CHANNEL IS MANDATORY +MSTEAM_WEBHOOK_URL="" + +# Define the default color scheme for alert to MS Team - icon and color +# Icons - go to https://emojipedia.org/bomb/ +MSTEAM_ICON_DEFAULT="♡" +MSTEAM_ICON_CLEAR="💚" +MSTEAM_ICON_WARNING="⚠️" +MSTEAM_ICON_CRITICAL="🔥" + +# Colors +MSTEAM_COLOR_DEFAULT="0076D7" +MSTEAM_COLOR_CLEAR="65A677" +MSTEAM_COLOR_WARNING="FFA500" +MSTEAM_COLOR_CRITICAL="D93F3C" + + +#------------------------------------------------------------------------------ +# rocketchat (rocket.chat) global notification options + +# multiple recipients can be given like this: +# "CHANNEL1 CHANNEL2 ..." + +# enable/disable sending rocketchat notifications +SEND_ROCKETCHAT="YES" + +# Login to rocket.chat and create an incoming webhook. You need only one for all +# your netdata servers (or you can have one for each of your netdata). +# Without it, netdata cannot send rocketchat notifications. +ROCKETCHAT_WEBHOOK_URL="" + +# if a role's recipients are not configured, a notification will be send to +# this rocketchat channel (empty = do not send a notification for unconfigured +# roles): +DEFAULT_RECIPIENT_ROCKETCHAT="" + + +#------------------------------------------------------------------------------ +# alerta (alerta.io) global notification options + +# multiple recipients (Environments) can be given like this: +# "Production Development ..." + +# enable/disable sending alerta notifications +SEND_ALERTA="YES" + +# here set your alerta server API url +# this is the API url you defined when installed Alerta server, +# it is the same for all users. Do not include last slash. +# ALERTA_WEBHOOK_URL="https://<server>/alerta/api" +ALERTA_WEBHOOK_URL="" + +# Login with an administrative user to you Alerta server and create an API KEY +# with write permissions. +ALERTA_API_KEY="" + +# you can define environments in /etc/alertad.conf option ALLOWED_ENVIRONMENTS +# standard environments are Production and Development +# if a role's recipients are not configured, a notification will be send to +# this Environment (empty = do not send a notification for unconfigured roles): +DEFAULT_RECIPIENT_ALERTA="" + + +#------------------------------------------------------------------------------ +# flock (flock.com) global notification options + +# enable/disable sending flock notifications +SEND_FLOCK="YES" + +# Login to flock.com and create an incoming webhook. You need only one for all +# your netdata servers (or you can have one for each of your netdata). +# Without it, netdata cannot send flock notifications. +FLOCK_WEBHOOK_URL="" + +# if a role recipient is not configured, no notification will be sent +DEFAULT_RECIPIENT_FLOCK="" + + +#------------------------------------------------------------------------------ +# discord (discordapp.com) global notification options + +# multiple recipients can be given like this: +# "CHANNEL1 CHANNEL2 ..." + +# enable/disable sending discord notifications +SEND_DISCORD="YES" + +# Create a webhook by following the official documentation - +# https://support.discordapp.com/hc/en-us/articles/228383668-Intro-to-Webhooks +DISCORD_WEBHOOK_URL="" + +# if a role's recipients are not configured, a notification will be send to +# this discord channel (empty = do not send a notification for unconfigured +# roles): +DEFAULT_RECIPIENT_DISCORD="" + + +#------------------------------------------------------------------------------ +# hipchat global notification options + +# multiple recipients can be given like this: +# "ROOM1 ROOM2 ..." + +# enable/disable sending hipchat notifications +SEND_HIPCHAT="YES" + +# define hipchat server +HIPCHAT_SERVER="api.hipchat.com" + +# api.hipchat.com authorization token +# Without this, netdata cannot send hipchat notifications. +HIPCHAT_AUTH_TOKEN="" + +# if a role's recipients are not configured, a notification will be send to +# this hipchat room (empty = do not send a notification for unconfigured +# roles): +DEFAULT_RECIPIENT_HIPCHAT="" + + +#------------------------------------------------------------------------------ +# kafka notification options + +# enable/disable sending kafka notifications +SEND_KAFKA="YES" + +# The URL to POST kafka alarm data to. It should be the full URL. +KAFKA_URL="" + +# The IP to be used in the kafka message as the sender. +KAFKA_SENDER_IP="" + + +#------------------------------------------------------------------------------ +# pagerduty.com notification options +# +# pagerduty.com notifications require the pagerduty agent to be installed and +# a "Generic API" pagerduty service. +# https://www.pagerduty.com/docs/guides/agent-install-guide/ + +# multiple recipients can be given like this: +# "<pd_service_key_1> <pd_service_key_2> ..." + +# enable/disable sending pagerduty notifications +SEND_PD="YES" + +# if a role's recipients are not configured, a notification will be sent to +# the "General API" pagerduty.com service that uses this service key. +# (empty = do not send a notification for unconfigured roles): +DEFAULT_RECIPIENT_PD="" + + +#------------------------------------------------------------------------------ +# fleep notification options +# +# To send fleep.io notifications, you will need a webhook for the +# conversation you want to send to. + +# Fleep recipients are specified as the last part of the webhook URL. +# So, for a webhook URL of: https://fleep.io/hook/IJONmBuuSlWlkb_ttqyXJg, the +# recipient name would be: 'IJONmBuuSlWlkb_ttqyXJg'. + +# enable/disable sending fleep notifications +SEND_FLEEP="YES" + +# if a role's recipients are not configured, a notification will not be sent. +# (empty = do not send a notification for unconfigured roles): +DEFAULT_RECIPIENT_FLEEP="" + +# The user name to label the messages with. If this is unset, +# the hostname of the system the notification is for will be used. +FLEEP_SENDER="" + + +#------------------------------------------------------------------------------ +# irc notification options +# +# irc notifications require only the nc utility to be installed. + +# multiple recipients can be given like this: +# "<irc_channel_1> <irc_channel_2> ..." + +# enable/disable sending irc notifications +SEND_IRC="YES" + +# if a role's recipients are not configured, a notification will not be sent. +# (empty = do not send a notification for unconfigured roles): +DEFAULT_RECIPIENT_IRC="" + +# The irc network to which the recipients belong. It must be the full network. +# e.g. "irc.freenode.net" +IRC_NETWORK="" + +# The irc nickname which is required to send the notification. It must not be +# an already registered name as the connection's MODE is defined as a 'guest'. +IRC_NICKNAME="" + +# The irc realname which is required in order to make the connection and is an +# extra identifier. +IRC_REALNAME="" + + +#------------------------------------------------------------------------------ +# syslog notifications +# +# syslog notifications only need you to have a working logger command, which +# should be the case on pretty much any Linux system. + +# enable/disable sending syslog notifications +# NOTE: make sure you have everything else configured the way you want +# it _before_ turning this on. +SEND_SYSLOG="NO" + +# A note on log levels and facilities: +# +# The traditional UNIX syslog mechanism has the concept of both log +# levels and facilities. A log level indicates the relaitve severity of +# the message, while a facility specifies a generic source for the message +# (for example, the `mail` facility is where sendmail and postfix log +# their messages). All major syslog daemons have the ability to filter +# messages based on both log level and facility, and can often also make +# routing decisions for messages based on both factors. +# +# On Linux, the eight log levels in decreasing order of severity are: +# emerg, alert, crit, err, warning, notice, info, debug +# +# By default, warnings will be logged at the warning level, critical +# alerts at the crit level, and clear notifications at the invo level. +# +# And the 19 facilities you can log to are: +# auth, authpriv, cron, daemon, ftp, lpr, mail, news, syslog, user, +# uucp, local0, local1, local2, local3, local4, local5, local6, and local7 +# +# By default, netdata alerts will be logged to the local6 facility. +# +# Depending on your distribution, this means that either all your +# netdata alerts will by default end up in the main system log (usually +# /var/log/messages), or they won't be logged to a file at all. +# Neither of these are likely to be what you actually want, but any +# configuration to change that needs to happen in the syslog daemon +# configuration, not here. + +# This controls which facility is used by defalt for logging. Defaults +# to local6. +SYSLOG_FACILITY='' + +# If a role's recipients are not configured, use the following. +# (empty = do not send a notification for unconfigured roles) +# +# The recipient format for syslog uses the following format: +# [[facility.level][@host[:port]]/]prefix +# +# `prefix` gets appended to the front of all log messages generated for +# that recipient. The prefix is mandatory. +# 'host' and 'port' can be used to specify a remote syslog server to +# send messages to. Leave these out if you want messages to be delivered +# locally. 'host' can be either a hostname or an IP address. +# IPv6 addresses must have square around them. +# 'facility' and 'level' are used to override the default logging facility +# set above and the log level. If one is specified, both must be present. +# +# For example, to send messages with a 'netdata' prefix to a syslog +# daemon listening on port 514 on 'loghost' using the daemon facility and +# notice log level: +# DEFAULT_RECIPIENT_SYSLOG='daemon.notice@loghost:514/netdata' +# +DEFAULT_RECIPIENT_SYSLOG="netdata" + +#------------------------------------------------------------------------------ +# Amazon SNS notifications +# +# This method requires potentially complex manual configuration. See the +# netdata wiki for information on what is needed. + +# enable/disable sending Amazon SNS notifications +SEND_AWSSNS="YES" + +# Specify a template for the Amazon SNS notifications. This supports +# the same set of variables that are usable in the `custom_sender()` +# function in the custom notification configuration below. +# +AWSSNS_MESSAGE_FORMAT="${status} on ${host} at ${date}: ${chart} ${value_string}" + +# If a role's recipients are not configured, use the following. +# (empty = do not send a notification for unconfigured roles) +# +# Recipients for AWS SNS notifications are specified as topic ARN's. +# +DEFAULT_RECIPIENT_AWSSNS="" + +#------------------------------------------------------------------------------ +# custom notifications +# + +# enable/disable sending custom notifications +SEND_CUSTOM="YES" + +# if a role's recipients are not configured, use the following. +# (empty = do not send a notification for unconfigured roles) +DEFAULT_RECIPIENT_CUSTOM="" + +# The custom_sender() is a custom function to do whatever you need to do +custom_sender() { + # variables you can use: + # ${host} the host generated this event + # ${url_host} same as ${host} but URL encoded + # ${unique_id} the unique id of this event + # ${alarm_id} the unique id of the alarm that generated this event + # ${event_id} the incremental id of the event, for this alarm id + # ${when} the timestamp this event occurred + # ${name} the name of the alarm, as given in netdata health.d entries + # ${url_name} same as ${name} but URL encoded + # ${chart} the name of the chart (type.id) + # ${url_chart} same as ${chart} but URL encoded + # ${family} the family of the chart + # ${url_family} same as ${family} but URL encoded + # ${status} the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL + # ${old_status} the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL + # ${value} the current value of the alarm + # ${old_value} the previous value of the alarm + # ${src} the line number and file the alarm has been configured + # ${duration} the duration in seconds of the previous alarm state + # ${duration_txt} same as ${duration} for humans + # ${non_clear_duration} the total duration in seconds this is/was non-clear + # ${non_clear_duration_txt} same as ${non_clear_duration} for humans + # ${units} the units of the value + # ${info} a short description of the alarm + # ${value_string} friendly value (with units) + # ${old_value_string} friendly old value (with units) + # ${image} the URL of an image to represent the status of the alarm + # ${color} a color in #AABBCC format for the alarm + # ${goto_url} the URL the user can click to see the netdata dashboard + + # these are more human friendly: + # ${alarm} like "name = value units" + # ${status_message} like "needs attention", "recovered", "is critical" + # ${severity} like "Escalated to CRITICAL", "Recovered from WARNING" + # ${raised_for} like "(alarm was raised for 10 minutes)" + + # example human readable SMS + local msg="${host} ${status_message}: ${alarm} ${raised_for}" + + # limit it to 160 characters and encode it for use in a URL + urlencode "${msg:0:160}" >/dev/null; msg="${REPLY}" + + # a space separated list of the recipients to send alarms to + to="${1}" + + info "not sending custom notification to ${to}, for ${status} of '${host}.${chart}.${name}' - custom_sender() is not configured." +} + + +############################################################################### +# RECIPIENTS PER ROLE + +# ----------------------------------------------------------------------------- +# generic system alarms +# CPU, disks, network interfaces, entropy, etc + +role_recipients_email[sysadmin]="${DEFAULT_RECIPIENT_EMAIL}" + +role_recipients_pushover[sysadmin]="${DEFAULT_RECIPIENT_PUSHOVER}" + +role_recipients_pushbullet[sysadmin]="${DEFAULT_RECIPIENT_PUSHBULLET}" + +role_recipients_telegram[sysadmin]="${DEFAULT_RECIPIENT_TELEGRAM}" + +role_recipients_slack[sysadmin]="${DEFAULT_RECIPIENT_SLACK}" + +role_recipients_alerta[sysadmin]="${DEFAULT_RECIPIENT_ALERTA}" + +role_recipients_flock[sysadmin]="${DEFAULT_RECIPIENT_FLOCK}" + +role_recipients_discord[sysadmin]="${DEFAULT_RECIPIENT_DISCORD}" + +role_recipients_hipchat[sysadmin]="${DEFAULT_RECIPIENT_HIPCHAT}" + +role_recipients_twilio[sysadmin]="${DEFAULT_RECIPIENT_TWILIO}" + +role_recipients_messagebird[sysadmin]="${DEFAULT_RECIPIENT_MESSAGEBIRD}" + +role_recipients_kavenegar[sysadmin]="${DEFAULT_RECIPIENT_KAVENEGAR}" + +role_recipients_pd[sysadmin]="${DEFAULT_RECIPIENT_PD}" + +role_recipients_fleep[sysadmin]="${DEFAULT_RECIPIENT_FLEEP}" + +role_recipients_irc[sysadmin]="${DEFAULT_RECIPIENT_IRC}" + +role_recipients_syslog[sysadmin]="${DEFAULT_RECIPIENT_SYSLOG}" + +role_recipients_awssns[sysadmin]="${DEFAULT_RECIPIENT_AWSSNS}" + +role_recipients_custom[sysadmin]="${DEFAULT_RECIPIENT_CUSTOM}" + +role_recipients_msteam[sysadmin]="${DEFAULT_RECIPIENT_MSTEAM}" + +# ----------------------------------------------------------------------------- +# DNS related alarms + +role_recipients_email[domainadmin]="${DEFAULT_RECIPIENT_EMAIL}" + +role_recipients_pushover[domainadmin]="${DEFAULT_RECIPIENT_PUSHOVER}" + +role_recipients_pushbullet[domainadmin]="${DEFAULT_RECIPIENT_PUSHBULLET}" + +role_recipients_telegram[domainadmin]="${DEFAULT_RECIPIENT_TELEGRAM}" + +role_recipients_slack[domainadmin]="${DEFAULT_RECIPIENT_SLACK}" + +role_recipients_alerta[domainadmin]="${DEFAULT_RECIPIENT_ALERTA}" + +role_recipients_flock[domainadmin]="${DEFAULT_RECIPIENT_FLOCK}" + +role_recipients_discord[domainadmin]="${DEFAULT_RECIPIENT_DISCORD}" + +role_recipients_hipchat[domainadmin]="${DEFAULT_RECIPIENT_HIPCHAT}" + +role_recipients_twilio[domainadmin]="${DEFAULT_RECIPIENT_TWILIO}" + +role_recipients_messagebird[domainadmin]="${DEFAULT_RECIPIENT_MESSAGEBIRD}" + +role_recipients_kavenegar[domainadmin]="${DEFAULT_RECIPIENT_KAVENEGAR}" + +role_recipients_pd[domainadmin]="${DEFAULT_RECIPIENT_PD}" + +role_recipients_fleep[domainadmin]="${DEFAULT_RECIPIENT_FLEEP}" + +role_recipients_irc[domainadmin]="${DEFAULT_RECIPIENT_IRC}" + +role_recipients_syslog[domainadmin]="${DEFAULT_RECIPIENT_SYSLOG}" + +role_recipients_awssns[domainadmin]="${DEFAULT_RECIPIENT_AWSSNS}" + +role_recipients_custom[domainadmin]="${DEFAULT_RECIPIENT_CUSTOM}" + +role_recipients_msteam[domainadmin]="${DEFAULT_RECIPIENT_MSTEAM}" + +# ----------------------------------------------------------------------------- +# database servers alarms +# mysql, redis, memcached, postgres, etc + +role_recipients_email[dba]="${DEFAULT_RECIPIENT_EMAIL}" + +role_recipients_pushover[dba]="${DEFAULT_RECIPIENT_PUSHOVER}" + +role_recipients_pushbullet[dba]="${DEFAULT_RECIPIENT_PUSHBULLET}" + +role_recipients_telegram[dba]="${DEFAULT_RECIPIENT_TELEGRAM}" + +role_recipients_slack[dba]="${DEFAULT_RECIPIENT_SLACK}" + +role_recipients_alerta[dba]="${DEFAULT_RECIPIENT_ALERTA}" + +role_recipients_flock[dba]="${DEFAULT_RECIPIENT_FLOCK}" + +role_recipients_discord[dba]="${DEFAULT_RECIPIENT_DISCORD}" + +role_recipients_hipchat[dba]="${DEFAULT_RECIPIENT_HIPCHAT}" + +role_recipients_twilio[dba]="${DEFAULT_RECIPIENT_TWILIO}" + +role_recipients_messagebird[dba]="${DEFAULT_RECIPIENT_MESSAGEBIRD}" + +role_recipients_kavenegar[dba]="${DEFAULT_RECIPIENT_KAVENEGAR}" + +role_recipients_pd[dba]="${DEFAULT_RECIPIENT_PD}" + +role_recipients_fleep[dba]="${DEFAULT_RECIPIENT_FLEEP}" + +role_recipients_irc[dba]="${DEFAULT_RECIPIENT_IRC}" + +role_recipients_syslog[dba]="${DEFAULT_RECIPIENT_SYSLOG}" + +role_recipients_awssns[dba]="${DEFAULT_RECIPIENT_AWSSNS}" + +role_recipients_custom[dba]="${DEFAULT_RECIPIENT_CUSTOM}" + +role_recipients_msteam[dba]="${DEFAULT_RECIPIENT_MSTEAM}" + +# ----------------------------------------------------------------------------- +# web servers alarms +# apache, nginx, lighttpd, etc + +role_recipients_email[webmaster]="${DEFAULT_RECIPIENT_EMAIL}" + +role_recipients_pushover[webmaster]="${DEFAULT_RECIPIENT_PUSHOVER}" + +role_recipients_pushbullet[webmaster]="${DEFAULT_RECIPIENT_PUSHBULLET}" + +role_recipients_telegram[webmaster]="${DEFAULT_RECIPIENT_TELEGRAM}" + +role_recipients_slack[webmaster]="${DEFAULT_RECIPIENT_SLACK}" + +role_recipients_alerta[webmaster]="${DEFAULT_RECIPIENT_ALERTA}" + +role_recipients_flock[webmaster]="${DEFAULT_RECIPIENT_FLOCK}" + +role_recipients_discord[webmaster]="${DEFAULT_RECIPIENT_DISCORD}" + +role_recipients_hipchat[webmaster]="${DEFAULT_RECIPIENT_HIPCHAT}" + +role_recipients_twilio[webmaster]="${DEFAULT_RECIPIENT_TWILIO}" + +role_recipients_messagebird[webmaster]="${DEFAULT_RECIPIENT_MESSAGEBIRD}" + +role_recipients_kavenegar[webmaster]="${DEFAULT_RECIPIENT_KAVENEGAR}" + +role_recipients_pd[webmaster]="${DEFAULT_RECIPIENT_PD}" + +role_recipients_fleep[webmaster]="${DEFAULT_RECIPIENT_FLEEP}" + +role_recipients_irc[webmaster]="${DEFAULT_RECIPIENT_IRC}" + +role_recipients_syslog[webmaster]="${DEFAULT_RECIPIENT_SYSLOG}" + +role_recipients_awssns[webmaster]="${DEFAULT_RECIPIENT_AWSSNS}" + +role_recipients_custom[webmaster]="${DEFAULT_RECIPIENT_CUSTOM}" + +role_recipients_msteam[webmaster]="${DEFAULT_RECIPIENT_MSTEAM}" + +# ----------------------------------------------------------------------------- +# proxy servers alarms +# squid, etc + +role_recipients_email[proxyadmin]="${DEFAULT_RECIPIENT_EMAIL}" + +role_recipients_pushover[proxyadmin]="${DEFAULT_RECIPIENT_PUSHOVER}" + +role_recipients_pushbullet[proxyadmin]="${DEFAULT_RECIPIENT_PUSHBULLET}" + +role_recipients_telegram[proxyadmin]="${DEFAULT_RECIPIENT_TELEGRAM}" + +role_recipients_slack[proxyadmin]="${DEFAULT_RECIPIENT_SLACK}" + +role_recipients_alerta[proxyadmin]="${DEFAULT_RECIPIENT_ALERTA}" + +role_recipients_flock[proxyadmin]="${DEFAULT_RECIPIENT_FLOCK}" + +role_recipients_discord[proxyadmin]="${DEFAULT_RECIPIENT_DISCORD}" + +role_recipients_hipchat[proxyadmin]="${DEFAULT_RECIPIENT_HIPCHAT}" + +role_recipients_twilio[proxyadmin]="${DEFAULT_RECIPIENT_TWILIO}" + +role_recipients_messagebird[proxyadmin]="${DEFAULT_RECIPIENT_MESSAGEBIRD}" + +role_recipients_kavenegar[proxyadmin]="${DEFAULT_RECIPIENT_KAVENEGAR}" + +role_recipients_pd[proxyadmin]="${DEFAULT_RECIPIENT_PD}" + +role_recipients_fleep[proxyadmin]="${DEFAULT_RECIPIENT_FLEEP}" + +role_recipients_irc[proxyadmin]="${DEFAULT_RECIPIENT_IRC}" + +role_recipients_syslog[proxyadmin]="${DEFAULT_RECIPIENT_SYSLOG}" + +role_recipients_awssns[porxyadmin]="${DEFAULT_RECIPIENT_AWSSNS}" + +role_recipients_custom[proxyadmin]="${DEFAULT_RECIPIENT_CUSTOM}" + +role_recipients_msteam[proxyadmin]="${DEFAULT_RECIPIENT_MSTEAM}" + +# ----------------------------------------------------------------------------- +# peripheral devices +# UPS, photovoltaics, etc + +role_recipients_email[sitemgr]="${DEFAULT_RECIPIENT_EMAIL}" + +role_recipients_pushover[sitemgr]="${DEFAULT_RECIPIENT_PUSHOVER}" + +role_recipients_pushbullet[sitemgr]="${DEFAULT_RECIPIENT_PUSHBULLET}" + +role_recipients_telegram[sitemgr]="${DEFAULT_RECIPIENT_TELEGRAM}" + +role_recipients_slack[sitemgr]="${DEFAULT_RECIPIENT_SLACK}" + +role_recipients_alerta[sitemgr]="${DEFAULT_RECIPIENT_ALERTA}" + +role_recipients_flock[sitemgr]="${DEFAULT_RECIPIENT_FLOCK}" + +role_recipients_discord[sitemgr]="${DEFAULT_RECIPIENT_DISCORD}" + +role_recipients_hipchat[sitemgr]="${DEFAULT_RECIPIENT_HIPCHAT}" + +role_recipients_twilio[sitemgr]="${DEFAULT_RECIPIENT_TWILIO}" + +role_recipients_messagebird[sitemgr]="${DEFAULT_RECIPIENT_MESSAGEBIRD}" + +role_recipients_kavenegar[sitemgr]="${DEFAULT_RECIPIENT_KAVENEGAR}" + +role_recipients_pd[sitemgr]="${DEFAULT_RECIPIENT_PD}" + +role_recipients_fleep[sitemgr]="${DEFAULT_RECIPIENT_FLEEP}" + +role_recipients_syslog[sitemgr]="${DEFAULT_RECIPIENT_SYSLOG}" + +role_recipients_awssns[sitemgr]="${DEFAULT_RECIPIENT_AWSSNS}" + +role_recipients_custom[sitemgr]="${DEFAULT_RECIPIENT_CUSTOM}" + +role_recipients_msteam[sitemgr]="${DEFAULT_RECIPIENT_MSTEAM}" diff --git a/health/notifications/health_email_recipients.conf b/health/notifications/health_email_recipients.conf new file mode 100644 index 000000000..f56c6c64a --- /dev/null +++ b/health/notifications/health_email_recipients.conf @@ -0,0 +1,2 @@ +# OBSOLETE FILE +# REPLACED WITH health_alarm_notify.conf diff --git a/health/notifications/irc/Makefile.inc b/health/notifications/irc/Makefile.inc new file mode 100644 index 000000000..23be7210a --- /dev/null +++ b/health/notifications/irc/Makefile.inc @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_noinst_DATA += \ + irc/README.md \ + irc/Makefile.inc \ + $(NULL) + diff --git a/health/notifications/irc/README.md b/health/notifications/irc/README.md new file mode 100644 index 000000000..ece879fe6 --- /dev/null +++ b/health/notifications/irc/README.md @@ -0,0 +1,73 @@ +# IRC notifications + +This is what you will get: + +IRCCloud web client: +![image](https://user-images.githubusercontent.com/31221999/36793487-3735673e-1ca6-11e8-8880-d1d8b6cd3bc0.png) + +Irssi terminal client: +![image](https://user-images.githubusercontent.com/31221999/36793486-3713ada6-1ca6-11e8-8c12-70d956ad801e.png) + + +You need: +1. The `nc` utility. If you do not set the path, netdata will search for it in your system `$PATH`. + +Set the path for `nc` in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this: + +``` +#------------------------------------------------------------------------------ +# external commands +# +# The full path of the nc command. +# If empty, the system $PATH will be searched for it. +# If not found, irc notifications will be silently disabled. +nc="/usr/bin/nc" + +``` + +2. Αn `IRC_NETWORK` to which your preffered channels belong to. +3. One or more channels ( `DEFAULT_RECIPIENT_IRC` ) to post the messages to. +4. An `IRC_NICKNAME` and an `IRC_REALNAME` to identify in IRC. + +Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this: + +``` +#------------------------------------------------------------------------------ +# irc notification options +# +# irc notifications require only the nc utility to be installed. + +# multiple recipients can be given like this: +# "<irc_channel_1> <irc_channel_2> ..." + +# enable/disable sending irc notifications +SEND_IRC="YES" + +# if a role's recipients are not configured, a notification will not be sent. +# (empty = do not send a notification for unconfigured roles): +DEFAULT_RECIPIENT_IRC="#system-alarms" + +# The irc network to which the recipients belong. It must be the full network. +IRC_NETWORK="irc.freenode.net" + +# The irc nickname which is required to send the notification. It must not be +# an already registered name as the connection's MODE is defined as a 'guest'. +IRC_NICKNAME="netdata-alarm-user" + +# The irc realname which is required in order to make the connection and is an +# extra identifier. +IRC_REALNAME="netdata-user" + +``` + +You can define multiple channels like this: `#system-alarms #networking-alarms`. +You can also filter the notifications like this: `#system-alarms|critical`. +You can give different channels per **role** using these (at the same file): + +``` +role_recipients_irc[sysadmin]="#user-alarms #networking-alarms #system-alarms" +role_recipients_irc[dba]="#databases-alarms" +role_recipients_irc[webmaster]="#networking-alarms" +``` + +The keywords `#user-alarms`, `#networking-alarms`, `#system-alarms`, `#databases-alarms` are irc channels which belong to the specified IRC network. \ No newline at end of file diff --git a/health/notifications/kavenegar/Makefile.inc b/health/notifications/kavenegar/Makefile.inc new file mode 100644 index 000000000..6a17c3482 --- /dev/null +++ b/health/notifications/kavenegar/Makefile.inc @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_noinst_DATA += \ + kavenegar/README.md \ + kavenegar/Makefile.inc \ + $(NULL) + diff --git a/health/notifications/kavenegar/README.md b/health/notifications/kavenegar/README.md new file mode 100644 index 000000000..e59ad4d4f --- /dev/null +++ b/health/notifications/kavenegar/README.md @@ -0,0 +1,39 @@ +# Kavenegar notifications + +[Kavenegar](https://www.kavenegar.com/) as service for software developers, based in Iran, provides send and receive SMS, calling voice by using its APIs. + +Will look like this on your Android device: + +![image](https://cloud.githubusercontent.com/assets/17090999/20034652/620b6100-a39b-11e6-96af-4f83b8e830e2.png) + +You will need: + +1. Signup and Login to kavenegar.com +2. Get your APIKEY and Sender from http://panel.kavenegar.com/client/setting/account +3. Fill in KAVENEGAR_API_KEY="" KAVENEGAR_SENDER="" +4. Add the recipient phone numbers to DEFAULT_RECIPIENT_KAVENEGAR="" + +Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this: + +``` +############################################################################### +# Kavenegar (kavenegar.com) SMS options + +# multiple recipients can be given like this: +# "09155555555 09177777777" + +# enable/disable sending kavenegar SMS +SEND_KAVENEGAR="YES" + +# to get an access key, after selecting and purchasing your desired service +# at http://kavenegar.com/pricing.html +# login to your account, go to your dashboard and my account are +# https://panel.kavenegar.com/Client/setting/account from API Key +# copy your api key. You can generate new API Key too. +# You can find and select kevenegar sender number from this place. + +# Without an API key, netdata cannot send KAVENEGAR text messages. +KAVENEGAR_API_KEY="" +KAVENEGAR_SENDER="" +DEFAULT_RECIPIENT_KAVENEGAR="" +``` \ No newline at end of file diff --git a/health/notifications/messagebird/Makefile.inc b/health/notifications/messagebird/Makefile.inc new file mode 100644 index 000000000..8132fecce --- /dev/null +++ b/health/notifications/messagebird/Makefile.inc @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_noinst_DATA += \ + messagebird/README.md \ + messagebird/Makefile.inc \ + $(NULL) + diff --git a/health/notifications/messagebird/README.md b/health/notifications/messagebird/README.md new file mode 100644 index 000000000..e09ba5d38 --- /dev/null +++ b/health/notifications/messagebird/README.md @@ -0,0 +1,38 @@ + +Will look like this on your Android device: + +![image](https://cloud.githubusercontent.com/assets/17090999/20034652/620b6100-a39b-11e6-96af-4f83b8e830e2.png) + +You will need: + +1. Signup and Login to messagebird.com +2. Pick an SMS capable number after sign up to get some free credits +3. Go to <https://www.messagebird.com/app/settings/developers/access> +4. Create a new access key under 'API ACCESS (REST)' (you will want a live key) +3. Fill in MESSAGEBIRD_ACCESS_KEY="XXXXXXXX" MESSAGEBIRD_NUMBER="+XXXXXXXXXXX" +4. Add the recipient phone numbers to DEFAULT_RECIPIENT_MESSAGEBIRD="+XXXXXXXXXXX" + +Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this: + +``` +#------------------------------------------------------------------------------ +# Messagebird (messagebird.com) SMS options + +# multiple recipients can be given like this: +# "+15555555555 +17777777777" + +# enable/disable sending messagebird SMS +SEND_MESSAGEBIRD="YES" + +# to get an access key, create a free account at https://www.messagebird.com +# verify and activate the account (no CC info needed) +# login to your account and enter your phonenumber to get some free credits +# to get the API key, click on 'API' in the sidebar, then 'API Access (REST)' +# click 'Add access key' and fill in data (you want a live key to send SMS) + +# Without an access key, netdata cannot send Messagebird text messages. +MESSAGEBIRD_ACCESS_KEY="XXXXXXXX" +MESSAGEBIRD_NUMBER="XXXXXXX" +DEFAULT_RECIPIENT_MESSAGEBIRD="XXXXXXX" + +``` diff --git a/health/notifications/pagerduty/Makefile.inc b/health/notifications/pagerduty/Makefile.inc new file mode 100644 index 000000000..6012d20e9 --- /dev/null +++ b/health/notifications/pagerduty/Makefile.inc @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_noinst_DATA += \ + pagerduty/README.md \ + pagerduty/Makefile.inc \ + $(NULL) + diff --git a/health/notifications/pagerduty/README.md b/health/notifications/pagerduty/README.md new file mode 100644 index 000000000..e35051fd4 --- /dev/null +++ b/health/notifications/pagerduty/README.md @@ -0,0 +1,34 @@ + +[PagerDuty](https://www.pagerduty.com/company/) is the enterprise incident resolution service that integrates with ITOps and DevOps monitoring stacks to improve operational reliability and agility. From enriching and aggregating events to correlating them into incidents, PagerDuty streamlines the incident management process by reducing alert noise and resolution times. + +Here is an example of a PagerDuty dashboard with netdata notifications: + +![PagerDuty dashboard with netdata notifications](https://cloud.githubusercontent.com/assets/19278582/21233877/b466a08a-c2a5-11e6-8d66-ee6eed43818f.png) + +To have netdata send notifications to PagerDuty, you'll first need to set up a PagerDuty `Generic API` service and install the PagerDuty agent on the host running netdata. See the following guide for details: + +https://www.pagerduty.com/docs/guides/agent-install-guide/ + +During the setup of the `Generic API` PagerDuty service, you'll obtain a `pagerduty service key`. Keep this **service key** handy. + +Once the PagerDuty agent is installed on your host and can send notifications from your host to your `Generic API` service on PagerDuty, add the **service key** to `DEFAULT_RECIPIENT_PD` in `health_alarm_notify.conf`: + +``` +#------------------------------------------------------------------------------ +# pagerduty.com notification options +# +# pagerduty.com notifications require the pagerduty agent to be installed and +# a "Generic API" pagerduty service. +# https://www.pagerduty.com/docs/guides/agent-install-guide/ + +# multiple recipients can be given like this: +# "<pd_service_key_1> <pd_service_key_2> ..." + +# enable/disable sending pagerduty notifications +SEND_PD="YES" + +# if a role's recipients are not configured, a notification will be sent to +# the "General API" pagerduty.com service that uses this service key. +# (empty = do not send a notification for unconfigured roles): +DEFAULT_RECIPIENT_PD="<service key>" +``` diff --git a/health/notifications/pushbullet/Makefile.inc b/health/notifications/pushbullet/Makefile.inc new file mode 100644 index 000000000..693a0ffba --- /dev/null +++ b/health/notifications/pushbullet/Makefile.inc @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_noinst_DATA += \ + pushbullet/README.md \ + pushbullet/Makefile.inc \ + $(NULL) + diff --git a/health/notifications/pushbullet/README.md b/health/notifications/pushbullet/README.md new file mode 100644 index 000000000..827d9301d --- /dev/null +++ b/health/notifications/pushbullet/README.md @@ -0,0 +1,42 @@ +$ PushBullet notifications + +Will look like this on your browser: +![image](https://cloud.githubusercontent.com/assets/4300670/19109636/278b1c0c-8aee-11e6-8a09-7fc94fdbfec8.png) + +And like this on your Android device: + + +![image](https://cloud.githubusercontent.com/assets/4300670/19109635/278a1dde-8aee-11e6-9984-0bc87a13312d.png) + +You will need: + +1. Signup and Login to pushbullet.com +2. Get your Access Token, go to https://www.pushbullet.com/#settings/account and create a new one +3. Fill in the PUSHBULLET_ACCESS_TOKEN with that value +4. Add the recipient emails to DEFAULT_RECIPIENT_PUSHBULLET +!!PLEASE NOTE THAT IF THE RECIPIENT DOES NOT HAVE A PUSHBULLET ACCOUNT, PUSHBULLET SERVICE WILL SEND AN EMAIL!! + +Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this: + +``` +############################################################################### +# pushbullet (pushbullet.com) push notification options + +# multiple recipients can be given like this: +# "user1@email.com user2@mail.com" + +# enable/disable sending pushbullet notifications +SEND_PUSHBULLET="YES" + +# Signup and Login to pushbullet.com +# To get your Access Token, go to https://www.pushbullet.com/#settings/account +# And create a new access token +# Then just set the recipients emails +# Please note that the if the email in the DEFAULT_RECIPIENT_PUSHBULLET does +# not have a pushbullet account, the pushbullet service will send an email +# to that address instead + +# Without an access token, netdata cannot send pushbullet notifications. +PUSHBULLET_ACCESS_TOKEN="o.Sometokenhere" +DEFAULT_RECIPIENT_PUSHBULLET="admin1@example.com admin3@somemail.com" +``` \ No newline at end of file diff --git a/health/notifications/pushover/Makefile.inc b/health/notifications/pushover/Makefile.inc new file mode 100644 index 000000000..926ac7c35 --- /dev/null +++ b/health/notifications/pushover/Makefile.inc @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_noinst_DATA += \ + pushover/README.md \ + pushover/Makefile.inc \ + $(NULL) + diff --git a/health/notifications/pushover/README.md b/health/notifications/pushover/README.md new file mode 100644 index 000000000..284981695 --- /dev/null +++ b/health/notifications/pushover/README.md @@ -0,0 +1,17 @@ + +# PushOver notifications + +pushover.net allows you to receive push notifications on your mobile phone. The service seems free for up to 7.500 messages per month. + +netdata will send warning messages with priority `0` and critical messages with priority `1`. pushover.net allows you to select do-not-disturb hours. The way this is configured, critical notifications will ring and vibrate your phone, even during the do-not-disturb-hours. All other notifications will be delivered silently. + +You need: + +1. APP TOKEN. You can use the same on all your netdata servers. +2. USER TOKEN for each user you are going to send notifications to. This is the actual recipient of the notification. + +The configuration is like above (slack messages). + +pushover.net notifications look like this: + +![image](https://cloud.githubusercontent.com/assets/2662304/18407319/839c10c4-7715-11e6-92c0-12f8215128d3.png) \ No newline at end of file diff --git a/health/notifications/rocketchat/Makefile.inc b/health/notifications/rocketchat/Makefile.inc new file mode 100644 index 000000000..a6fc5d577 --- /dev/null +++ b/health/notifications/rocketchat/Makefile.inc @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_noinst_DATA += \ + rocketchat/README.md \ + rocketchat/Makefile.inc \ + $(NULL) + diff --git a/health/notifications/rocketchat/README.md b/health/notifications/rocketchat/README.md new file mode 100644 index 000000000..70c698672 --- /dev/null +++ b/health/notifications/rocketchat/README.md @@ -0,0 +1,46 @@ +# Rocket.Chat notifications + +This is what you will get: +![Netdata on RocketChat](https://i.imgur.com/Zu4t3j3.png) +You need: + +1. The **incoming webhook URL** as given by RocketChat. You can use the same on all your netdata servers (or you can have multiple if you like - your decision). +2. One or more channels to post the messages to. + +Get them here: https://rocket.chat/docs/administrator-guides/integrations/index.html#how-to-create-a-new-incoming-webhook + +Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this: + +``` +#------------------------------------------------------------------------------ +# rocketchat (rocket.chat) global notification options + +# multiple recipients can be given like this: +# "CHANNEL1 CHANNEL2 ..." + +# enable/disable sending rocketchat notifications +SEND_ROCKETCHAT="YES" + +# Login to rocket.chat and create an incoming webhook. You need only one for all +# your netdata servers (or you can have one for each of your netdata). +# Without it, netdata cannot send rocketchat notifications. +ROCKETCHAT_WEBHOOK_URL="<your_incoming_webhook_url>" + +# if a role's recipients are not configured, a notification will be send to +# this rocketchat channel (empty = do not send a notification for unconfigured +# roles). +DEFAULT_RECIPIENT_ROCKETCHAT="monitoring_alarms" + +``` + +You can define multiple channels like this: `alarms systems`. +You can give different channels per **role** using these (at the same file): + +``` +role_recipients_rocketchat[sysadmin]="systems" +role_recipients_rocketchat[dba]="databases systems" +role_recipients_rocketchat[webmaster]="marketing development" +``` + +The keywords `systems`, `databases`, `marketing`, `development` are RocketChat channels (they should already exist). +Both public and private channels can be used, even if they differ from the channel configured in yout RocketChat incomming webhook. diff --git a/health/notifications/slack/Makefile.inc b/health/notifications/slack/Makefile.inc new file mode 100644 index 000000000..955a8c7ad --- /dev/null +++ b/health/notifications/slack/Makefile.inc @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_noinst_DATA += \ + slack/README.md \ + slack/Makefile.inc \ + $(NULL) + diff --git a/health/notifications/slack/README.md b/health/notifications/slack/README.md new file mode 100644 index 000000000..45be45197 --- /dev/null +++ b/health/notifications/slack/README.md @@ -0,0 +1,45 @@ +# Slack.com notifications + +This is what you will get: +![image](https://cloud.githubusercontent.com/assets/2662304/18407116/bbd0fee6-7710-11e6-81cf-58c0defaee2b.png) + +You need: + +1. The **incoming webhook URL** as given by slack.com. You can use the same on all your netdata servers (or you can have multiple if you like - your decision). +2. One or more channels to post the messages to. + +Get them here: https://api.slack.com/incoming-webhooks + +Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this: + +``` +############################################################################### +# sending slack notifications + +# note: multiple recipients can be given like this: +# "CHANNEL1 CHANNEL2 ..." + +# enable/disable sending pushover notifications +SEND_SLACK="YES" + +# Login to slack.com and create an incoming webhook. +# You need only one for all your netdata servers. +# Without it, netdata cannot send slack notifications. +SLACK_WEBHOOK_URL="https://hooks.slack.com/services/XXXXXXXX/XXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" + +# if a role recipient is not configured, a notification will be send to +# this slack channel: +DEFAULT_RECIPIENT_SLACK="alarms" + +``` + +You can define multiple channels like this: `alarms systems`. +You can give different channels per **role** using these (at the same file): + +``` +role_recipients_slack[sysadmin]="systems" +role_recipients_slack[dba]="databases systems" +role_recipients_slack[webmaster]="marketing development" +``` + +The keywords `systems`, `databases`, `marketing`, `development` are slack.com channels (they should already exist in slack). diff --git a/health/notifications/syslog/Makefile.inc b/health/notifications/syslog/Makefile.inc new file mode 100644 index 000000000..1792b9d9c --- /dev/null +++ b/health/notifications/syslog/Makefile.inc @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_noinst_DATA += \ + syslog/README.md \ + syslog/Makefile.inc \ + $(NULL) + diff --git a/health/notifications/syslog/README.md b/health/notifications/syslog/README.md new file mode 100644 index 000000000..fcc2466a6 --- /dev/null +++ b/health/notifications/syslog/README.md @@ -0,0 +1,23 @@ +# syslog notifications + +You need a working `logger` command for this to work. This is the case on pretty much every Linux system in existence, and most BSD systems. + +Logged messages will look like this: + + netdata WARNING on hostname at Tue Apr 3 09:00:00 EDT 2018: disk_space._ out of disk space time = 5h + +## configuration + +System log targets are configured as recipients in [`/etc/netdata/health_alarm_notify.conf`](https://github.com/netdata/netdata/blob/36bedc044584dea791fd29455bdcd287c3306cb2/conf.d/health_alarm_notify.conf#L534) (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`). + +You can als configure per-role targets in the same file a bit further down. + +Targets are defined as follows: + + [[facility.level][@host[:port]]/]prefix + +`prefix` defines what the log messages are prefixed with. By default, all lines are prefixed with 'netdata'. + +The `facility` and `level` are the standard syslog facility and level options, for more info on them see your local `logger` and `syslog` documentation. By default, netdata will log to the `local6` facility, with a log level dependent on the type of message (`crit` for CRITICAL, `warning` for WARNING, and `info` for everything else). + +You can configure sending directly to remote log servers by specifying a host (and optionally a port). However, this has a somewhat high overhead, so it is much preferred to use your local syslog daemon to handle the forwarding of messages to remote systems (pretty much all of them allow at least simple forwarding, and most of the really popular ones support complex queueing and routing of messages to remote log servers). diff --git a/health/notifications/telegram/Makefile.inc b/health/notifications/telegram/Makefile.inc new file mode 100644 index 000000000..003996ba1 --- /dev/null +++ b/health/notifications/telegram/Makefile.inc @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_noinst_DATA += \ + telegram/README.md \ + telegram/Makefile.inc \ + $(NULL) + diff --git a/health/notifications/telegram/README.md b/health/notifications/telegram/README.md new file mode 100644 index 000000000..cd52fe194 --- /dev/null +++ b/health/notifications/telegram/README.md @@ -0,0 +1,19 @@ +# Telegram.org notifications + +[Telegram](https://telegram.org/) is a messaging app with a focus on speed and security, it’s super-fast, simple and free. You can use Telegram on all your devices at the same time — your messages sync seamlessly across any number of your phones, tablets or computers. + +With Telegram, you can send messages, photos, videos and files of any type (doc, zip, mp3, etc), as well as create groups for up to 30,000 people or channels for broadcasting to unlimited audiences. You can write to your phone contacts and find people by their usernames. As a result, Telegram is like SMS and email combined — and can take care of all your personal or business messaging needs. + +netdata will send warning messages without vibration. + +You need: + +1. A bot token. To get one, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot`. Follow the instructions. +2. A chat id for every chat you want to send messages to. Contact the [@myidbot](https://t.me/myidbot) bot and send the command `/getid` to get your personal chat id or invite him into a group and issue the same command to get the group chat id. +3. Start a conversation with your bot or invite him into a group you want to sent messages to. + +See slack for configuration. + +Telegram messages look like this: + +![image](https://fb.hash.works/ytl/preview.jpg) diff --git a/health/notifications/twilio/Makefile.inc b/health/notifications/twilio/Makefile.inc new file mode 100644 index 000000000..5bd00a21b --- /dev/null +++ b/health/notifications/twilio/Makefile.inc @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_noinst_DATA += \ + twilio/README.md \ + twilio/Makefile.inc \ + $(NULL) + diff --git a/health/notifications/twilio/README.md b/health/notifications/twilio/README.md new file mode 100644 index 000000000..ab3dd3c04 --- /dev/null +++ b/health/notifications/twilio/README.md @@ -0,0 +1,40 @@ +# Twilio.com notifications + +Will look like this on your Android device: + +![image](https://cloud.githubusercontent.com/assets/17090999/20034652/620b6100-a39b-11e6-96af-4f83b8e830e2.png) + +You will need: + +1. Signup and Login to twilio.com +2. Pick an SMS capable number during sign up. +3. Get your SID, and Token from <https://www.twilio.com/console> +3. Fill in TWILIO_ACCOUNT_SID="XXXXXXXX" TWILIO_ACCOUNT_TOKEN="XXXXXXXXX" TWILIO_NUMBER="+XXXXXXXXXXX" +4. Add the recipient phone numbers to DEFAULT_RECIPIENT_TWILIO="+XXXXXXXXXXX" + +!!PLEASE NOTE THAT IF YOUR ACCOUNT IS A TRIAL ACCOUNT YOU WILL ONLY BE ABLE TO SEND NOTIFICATIONS TO THE NUMBER YOU SIGNED UP WITH + +Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this: + +``` +############################################################################### +# Twilio (twilio.com) SMS options + +# multiple recipients can be given like this: +# "+15555555555 +17777777777" + +# enable/disable sending twilio SMS +SEND_TWILIO="YES" + +# Signup for free trial and select a SMS capable Twilio Number +# To get your Account SID and Token, go to https://www.twilio.com/console +# Place your sid, token and number below. +# Then just set the recipients' phone numbers. +# The trial account is only allowed to use the number specified when set up. + +# Without an account sid and token, netdata cannot send Twilio text messages. +TWILIO_ACCOUNT_SID="xxxxxxxxx" +TWILIO_ACCOUNT_TOKEN="xxxxxxxxxx" +TWILIO_NUMBER="xxxxxxxxxxx" +DEFAULT_RECIPIENT_TWILIO="+15555555555" +``` diff --git a/health/notifications/web/Makefile.inc b/health/notifications/web/Makefile.inc new file mode 100644 index 000000000..890824313 --- /dev/null +++ b/health/notifications/web/Makefile.inc @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_noinst_DATA += \ + web/README.md \ + web/Makefile.inc \ + $(NULL) + diff --git a/health/notifications/web/README.md b/health/notifications/web/README.md new file mode 100644 index 000000000..ba7dac1fe --- /dev/null +++ b/health/notifications/web/README.md @@ -0,0 +1,6 @@ +# Dashboard notifications + +The netdata dashboard shows HTML notifications, when it is open. + +Such web notifications look like this: +![image](https://cloud.githubusercontent.com/assets/2662304/18407279/82bac6a6-7714-11e6-847e-c2e84eeacbfb.png) diff --git a/installer/.keep b/installer/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/installer/functions.sh b/installer/functions.sh index 7e522f0aa..dc2833318 100644 --- a/installer/functions.sh +++ b/installer/functions.sh @@ -1,4 +1,6 @@ # no shebang necessary - this is a library to be sourced +# SPDX-License-Identifier: GPL-3.0-or-later +# shellcheck disable=SC1091,SC1117,SC2002,SC2004,SC2034,SC2046,SC2059,SC2086,SC2129,SC2148,SC2154,SC2155,SC2162,SC2166,SC2181,SC2193 # make sure we have a UID [ -z "${UID}" ] && UID="$(id -u)" @@ -8,8 +10,8 @@ # checking the availability of commands which_cmd() { - which "${1}" 2>/dev/null || \ - command -v "${1}" 2>/dev/null + # shellcheck disable=SC2230 + which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null } check_cmd() { @@ -159,6 +161,29 @@ pidof() { fi } +# ----------------------------------------------------------------------------- +# portable delete recursively interactively + +portable_deletedir_recursively_interactively() { + if [ ! -z "$1" -a -d "$1" ] + then + if [ "$(uname -s)" = "Darwin" ] + then + echo >&2 + read >&2 -p "Press ENTER to recursively delete directory '$1' > " + echo >&2 "Deleting directory '$1' ..." + run rm -R "$1" + else + echo >&2 + echo >&2 "Deleting directory '$1' ..." + run rm -I -R "$1" + fi + else + echo "Directory '$1' does not exist." + fi +} + + # ----------------------------------------------------------------------------- export SYSTEM_CPUS=1 @@ -287,6 +312,7 @@ portable_add_user() { echo >&2 "Adding ${username} user account with home ${homedir} ..." + # shellcheck disable=SC2230 local nologin="$(which nologin 2>/dev/null || command -v nologin 2>/dev/null || echo '/bin/false')" # Linux @@ -415,10 +441,11 @@ iscontainer() { issystemd() { local pids p myns ns systemctl - # if the directory /etc/systemd/system does not exit, it is not systemd - [ ! -d /etc/systemd/system ] && return 1 + # if the directory /lib/systemd/system does not exit, it is not systemd + [ ! -d /lib/systemd/system ] && return 1 # if there is no systemctl command, it is not systemd + # shellcheck disable=SC2230 systemctl=$(which systemctl 2>/dev/null || command -v systemctl 2>/dev/null) [ -z "${systemctl}" -o ! -x "${systemctl}" ] && return 1 @@ -436,7 +463,7 @@ issystemd() { ns="$(readlink /proc/${p}/ns/pid 2>/dev/null)" # if pid of systemd is in our namespace, it is systemd - [ ! -z "${myns}" && "${myns}" = "${ns}" ] && return 0 + [ ! -z "${myns}" ] && [ "${myns}" = "${ns}" ] && return 0 done # else, it is not systemd @@ -478,7 +505,7 @@ install_non_systemd_init() { run update-rc.d netdata defaults && \ run update-rc.d netdata enable && \ return 0 - elif [[ "${key}" =~ ^(amzn-201[567]|ol|CentOS release 6|Red Hat Enterprise Linux Server release 6|Scientific Linux CERN SLC release 6|CloudLinux Server release 6).* ]] + elif [[ "${key}" =~ ^(amzn-201[5678]|ol|CentOS release 6|Red Hat Enterprise Linux Server release 6|Scientific Linux CERN SLC release 6|CloudLinux Server release 6).* ]] then echo >&2 "Installing init.d file..." run cp system/netdata-init-d /etc/init.d/netdata && \ @@ -511,8 +538,16 @@ install_netdata_service() { if [ "${uname}" = "Darwin" ] then - echo >&2 "hm... I don't know how to install a startup script for MacOS X" - return 1 + if [ -f "/Library/LaunchDaemons/com.github.netdata.plist" ] + then + echo >&2 "file '/Library/LaunchDaemons/com.github.netdata.plist' already exists." + return 0 + else + echo >&2 "Installing MacOS X plist file..." + run cp system/netdata.plist /Library/LaunchDaemons/com.github.netdata.plist && \ + run launchctl load /Library/LaunchDaemons/com.github.netdata.plist && \ + return 0 + fi elif [ "${uname}" = "FreeBSD" ] then @@ -528,16 +563,15 @@ install_netdata_service() { NETDATA_START_CMD="systemctl start netdata" NETDATA_STOP_CMD="systemctl stop netdata" - if [ ! -f /etc/systemd/system/netdata.service ] + if [ -d "/lib/systemd/system" ] then echo >&2 "Installing systemd service..." - run cp system/netdata.service /etc/systemd/system/netdata.service && \ + run cp system/netdata.service /lib/systemd/system/netdata.service && \ run systemctl daemon-reload && \ run systemctl enable netdata && \ return 0 else - echo >&2 "file '/etc/systemd/system/netdata.service' already exists." - return 0 + echo >&2 "no '/lib/systemd/system' directory; cannot install netdata.service" fi else install_non_systemd_init @@ -792,31 +826,20 @@ download_netdata_conf() { # ----------------------------------------------------------------------------- # add netdata user and group -NETDATA_ADDED_TO_DOCKER=0 -NETDATA_ADDED_TO_NGINX=0 -NETDATA_ADDED_TO_VARNISH=0 -NETDATA_ADDED_TO_HAPROXY=0 -NETDATA_ADDED_TO_ADM=0 -NETDATA_ADDED_TO_NSD=0 -NETDATA_ADDED_TO_PROXY=0 -NETDATA_ADDED_TO_SQUID=0 -NETDATA_ADDED_TO_CEPH=0 +NETDATA_WANTED_GROUPS="docker nginx varnish haproxy adm nsd proxy squid ceph nobody" +NETDATA_ADDED_TO_GROUPS="" add_netdata_user_and_group() { - local homedir="${1}" + local homedir="${1}" g if [ ${UID} -eq 0 ] then portable_add_group netdata || return 1 portable_add_user netdata "${homedir}" || return 1 - portable_add_user_to_group docker netdata && NETDATA_ADDED_TO_DOCKER=1 - portable_add_user_to_group nginx netdata && NETDATA_ADDED_TO_NGINX=1 - portable_add_user_to_group varnish netdata && NETDATA_ADDED_TO_VARNISH=1 - portable_add_user_to_group haproxy netdata && NETDATA_ADDED_TO_HAPROXY=1 - portable_add_user_to_group adm netdata && NETDATA_ADDED_TO_ADM=1 - portable_add_user_to_group nsd netdata && NETDATA_ADDED_TO_NSD=1 - portable_add_user_to_group proxy netdata && NETDATA_ADDED_TO_PROXY=1 - portable_add_user_to_group squid netdata && NETDATA_ADDED_TO_SQUID=1 - portable_add_user_to_group ceph netdata && NETDATA_ADDED_TO_CEPH=1 + + for g in ${NETDATA_WANTED_GROUPS} + do + portable_add_user_to_group ${g} netdata && NETDATA_ADDED_TO_GROUPS="${NETDATA_ADDED_TO_GROUPS} ${g}" + done [ ~netdata = / ] && cat <<USERMOD diff --git a/kickstart-static64.sh b/kickstart-static64.sh index 1e1b089b8..1fe360345 100755 --- a/kickstart-static64.sh +++ b/kickstart-static64.sh @@ -1,13 +1,19 @@ #!/usr/bin/env sh +# SPDX-License-Identifier: GPL-3.0-or-later +# shellcheck disable=SC1117,SC2016,SC2034,SC2039,SC2059,SC2086,SC2119,SC2120,SC2129,SC2162,SC2166,SC2181 umask 022 +# make sure UID is set +# shellcheck disable=SC2155 +[ -z "${UID}" ] && export UID="$(id -u)" + # --------------------------------------------------------------------------------------------------------------------- # library functions copied from installer/functions.sh which_cmd() { - which "${1}" 2>/dev/null || \ - command -v "${1}" 2>/dev/null + # shellcheck disable=SC2230 + which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null } check_cmd() { @@ -170,7 +176,7 @@ wget="$(which_cmd wget)" progress "Checking the latest version of static build..." -BASE='https://raw.githubusercontent.com/firehol/binary-packages/master' +BASE='https://raw.githubusercontent.com/netdata/binary-packages/master' LATEST= if [ ! -z "${curl}" -a -x "${curl}" ] diff --git a/kickstart.sh b/kickstart.sh index cabe0146c..b493802f2 100755 --- a/kickstart.sh +++ b/kickstart.sh @@ -1,4 +1,5 @@ #!/usr/bin/env sh +# SPDX-License-Identifier: GPL-3.0-or-later # # Run me with: # @@ -23,6 +24,8 @@ # # 3. install netdata +# shellcheck disable=SC1117,SC2016,SC2034,SC2039,SC2059,SC2086,SC2119,SC2120,SC2129,SC2162,SC2166,SC2181 + umask 022 [ -z "${UID}" ] && UID="$(id -u)" @@ -31,8 +34,8 @@ umask 022 # library functions copied from installer/functions.sh which_cmd() { - which "${1}" 2>/dev/null || \ - command -v "${1}" 2>/dev/null + # shellcheck disable=SC2230 + which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null } check_cmd() { @@ -268,7 +271,7 @@ then if [ "${HAS_BASH4}" = "1" ] then tmp="$(mktemp /tmp/netdata-kickstart-XXXXXX)" - url="https://raw.githubusercontent.com/firehol/netdata-demo-site/master/install-required-packages.sh" + url="https://raw.githubusercontent.com/netdata/netdata-demo-site/master/install-required-packages.sh" progress "Downloading script to detect required packages..." if [ ! -z "${curl}" ] @@ -327,7 +330,7 @@ then if [ ! -d "${SOURCE_DST}/netdata.git" ] then progress "Downloading netdata source code..." - run ${sudo} ${git} clone https://github.com/firehol/netdata.git "${SOURCE_DST}/netdata.git" || fatal "Cannot download netdata source" + run ${sudo} ${git} clone https://github.com/netdata/netdata.git "${SOURCE_DST}/netdata.git" || fatal "Cannot download netdata source" cd "${SOURCE_DST}/netdata.git" || fatal "Cannot cd to netdata source tree" else progress "Updating netdata source code..." diff --git a/libnetdata/Makefile.am b/libnetdata/Makefile.am new file mode 100644 index 000000000..d2710f0a3 --- /dev/null +++ b/libnetdata/Makefile.am @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +SUBDIRS = \ + adaptive_resortable_list \ + avl \ + buffer \ + clocks \ + config \ + dictionary \ + eval \ + locks \ + log \ + popen \ + procfile \ + simple_pattern \ + socket \ + statistical \ + storage_number \ + threads \ + url \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/Makefile.in b/libnetdata/Makefile.in new file mode 100644 index 000000000..4ee89a69b --- /dev/null +++ b/libnetdata/Makefile.in @@ -0,0 +1,664 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = libnetdata +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + distdir +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = $(SUBDIRS) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +SUBDIRS = \ + adaptive_resortable_list \ + avl \ + buffer \ + clocks \ + config \ + dictionary \ + eval \ + locks \ + log \ + popen \ + procfile \ + simple_pattern \ + socket \ + statistical \ + storage_number \ + threads \ + url \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-recursive + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu libnetdata/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-recursive +all-am: Makefile $(DATA) +installdirs: installdirs-recursive +installdirs-am: +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-recursive + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-recursive + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: + +.MAKE: $(am__recursive_targets) install-am install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ + check-am clean clean-generic cscopelist-am ctags ctags-am \ + distclean distclean-generic distclean-tags distdir dvi dvi-am \ + html html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs installdirs-am maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/libnetdata/README.md b/libnetdata/README.md new file mode 100644 index 000000000..545f95984 --- /dev/null +++ b/libnetdata/README.md @@ -0,0 +1,6 @@ +# libnetdata + +`libnetdata` is a collection of library code that is used by all netdata `C` programs. + + + diff --git a/libnetdata/adaptive_resortable_list/Makefile.am b/libnetdata/adaptive_resortable_list/Makefile.am new file mode 100644 index 000000000..1cb69ed99 --- /dev/null +++ b/libnetdata/adaptive_resortable_list/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/adaptive_resortable_list/Makefile.in b/libnetdata/adaptive_resortable_list/Makefile.in new file mode 100644 index 000000000..229511083 --- /dev/null +++ b/libnetdata/adaptive_resortable_list/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = libnetdata/adaptive_resortable_list +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/adaptive_resortable_list/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu libnetdata/adaptive_resortable_list/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/libnetdata/adaptive_resortable_list/README.md b/libnetdata/adaptive_resortable_list/README.md new file mode 100644 index 000000000..0ba3ec9b5 --- /dev/null +++ b/libnetdata/adaptive_resortable_list/README.md @@ -0,0 +1,93 @@ + +# Adaptive Re-sortable List (ARL) + +This library allows netdata to read a series of `name - value` pairs +in the **fastest possible way**. + +ARLs are used all over netdata, as they are the most +CPU utilization efficient way to process `/proc` files. They are used to +process both vertical (csv like) and horizontal (one pair per line) `name - value` pairs. + +## How ARL works + +It maintains a linked list of all `NAME` (keywords), sorted in the +order found in the data source. The linked list is kept +sorted at all times - the data source may change at any time, the +linked list will adapt at the next iteration. + +### Initialization + +During initialization (just once), the caller: + +- calls `arl_create()` to create the ARL + +- calls `arl_expect()` multiple times to register the expected keywords + +The library will call the `processor()` function (given to +`arl_create()`), for each expected keyword found. +The default `processor()` expects `dst` to be an `unsigned long long *`. + +Each `name` keyword may have a different `processor()` (by calling +`arl_expect_custom()` instead of `arl_expect()`). + +### Data collection iterations + +For each iteration through the data source, the caller: + +- calls `arl_begin()` to initiate a data collection iteration. + This is to be called just ONCE every time the source is re-evaluated. + +- calls `arl_check()` for each entry read from the file. + +### Cleanup + +When the caller exits: + +- calls `arl_free()` to destroy this and free all memory. + +### Performance + +ARL maintains a list of `name` keywords found in the data source (even the ones +that are not useful for data collection). + +If the data source maintains the same order on the `name-value` pairs, for each +each call to `arl_check()` only an `strcmp()` is executed to verify the +expected order has not changed, a counter is incremented and a pointer is changed. +So, if the data source has 100 `name-value` pairs, and their order remains constant +over time, 100 successful `strcmp()` are executed. + +In the unlikely event that an iteration sees the data source with a different order, +for each out-of-order keyword, a full search of the remaining keywords is made. But +this search uses 32bit hashes, not string comparisons, so it should also be fast. + +When all expectations are satisfied (even in the middle of an iteration), +the call to `arl_check()` will return 1, to signal the caller to stop the loop, +saving valuable CPU resources for the rest of the data source. + +In the following test we used alternative methods to process, **1M times**, +a data source like `/proc/meminfo`, already tokenized, in memory, +to extract the same number of expected metrics: + +test|code|string comparison|number parsing|duration +:---:|:---:|:---:|:---:|:---:| +1|if-else-if-else-if|`strcmp()`|`strtoull()`|4630.337 ms +2|nested loops|inline `simple_hash()` and `strcmp()`|`strtoull()`|1597.481 ms +3|nested loops|inline `simple_hash()` and `strcmp()`|`str2ull()`|923.523 ms +4|if-else-if-else-if|inline `simple_hash()` and `strcmp()`|`strtoull()`| 854.574 ms +5|if-else-if-else-if|statement expression `simple_hash()` and `strcmp()`|`strtoull()`|912.013 ms +6|if-continue|inline `simple_hash()` and `strcmp()`|`strtoull()`|842.279 ms +7|if-else-if-else-if|inline `simple_hash()` and `strcmp()`|`str2ull()`|602.837 ms +8|ARL|ARL|`strtoull()`|350.360 ms +9|ARL|ARL|`str2ull()`|157.237 ms + +Compared to unoptimized code (test No 1: 4.6sec): + + - before ARL netdata was using test No **7** with hashing and a custom `str2ull()` to achieve 602ms. + - the current ARL implementation is test No **9** that needs only 157ms (29 times faster vs unoptimized code, about 4 times faster vs optimized code). + +[Check the source code of this test](../../tests/profile/benchmark-value-pairs.c). + +## Limitations + +Do not use ARL if the a name/keyword may appear more than once in the +source data. diff --git a/libnetdata/adaptive_resortable_list/adaptive_resortable_list.c b/libnetdata/adaptive_resortable_list/adaptive_resortable_list.c new file mode 100644 index 000000000..7f4c6c53d --- /dev/null +++ b/libnetdata/adaptive_resortable_list/adaptive_resortable_list.c @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +// the default processor() of the ARL +// can be overwritten at arl_create() +inline void arl_callback_str2ull(const char *name, uint32_t hash, const char *value, void *dst) { + (void)name; + (void)hash; + + register unsigned long long *d = dst; + *d = str2ull(value); + // fprintf(stderr, "name '%s' with hash %u and value '%s' is %llu\n", name, hash, value, *d); +} + +inline void arl_callback_str2kernel_uint_t(const char *name, uint32_t hash, const char *value, void *dst) { + (void)name; + (void)hash; + + register kernel_uint_t *d = dst; + *d = str2kernel_uint_t(value); + // fprintf(stderr, "name '%s' with hash %u and value '%s' is %llu\n", name, hash, value, (unsigned long long)*d); +} + +inline void arl_callback_ssize_t(const char *name, uint32_t hash, const char *value, void *dst) { + (void)name; + (void)hash; + + register ssize_t *d = dst; + *d = (ssize_t)str2ll(value, NULL); + // fprintf(stderr, "name '%s' with hash %u and value '%s' is %zd\n", name, hash, value, *d); +} + +// create a new ARL +ARL_BASE *arl_create(const char *name, void (*processor)(const char *, uint32_t, const char *, void *), size_t rechecks) { + ARL_BASE *base = callocz(1, sizeof(ARL_BASE)); + + base->name = strdupz(name); + + if(!processor) + base->processor = arl_callback_str2ull; + else + base->processor = processor; + + base->rechecks = rechecks; + + return base; +} + +void arl_free(ARL_BASE *arl_base) { + if(unlikely(!arl_base)) + return; + + while(arl_base->head) { + ARL_ENTRY *e = arl_base->head; + arl_base->head = e->next; + + freez(e->name); +#ifdef NETDATA_INTERNAL_CHECKS + memset(e, 0, sizeof(ARL_ENTRY)); +#endif + freez(e); + } + + freez(arl_base->name); + +#ifdef NETDATA_INTERNAL_CHECKS + memset(arl_base, 0, sizeof(ARL_BASE)); +#endif + + freez(arl_base); +} + +void arl_begin(ARL_BASE *base) { + +#ifdef NETDATA_INTERNAL_CHECKS + if(likely(base->iteration > 10)) { + // do these checks after the ARL has been sorted + + if(unlikely(base->relinkings > (base->expected + base->allocated))) + info("ARL '%s' has %zu relinkings with %zu expected and %zu allocated entries. Is the source changing so fast?" + , base->name, base->relinkings, base->expected, base->allocated); + + if(unlikely(base->slow > base->fast)) + info("ARL '%s' has %zu fast searches and %zu slow searches. Is the source really changing so fast?" + , base->name, base->fast, base->slow); + + /* + if(unlikely(base->iteration % 60 == 0)) { + info("ARL '%s' statistics: iteration %zu, expected %zu, wanted %zu, allocated %zu, fred %zu, relinkings %zu, found %zu, added %zu, fast %zu, slow %zu" + , base->name + , base->iteration + , base->expected + , base->wanted + , base->allocated + , base->fred + , base->relinkings + , base->found + , base->added + , base->fast + , base->slow + ); + // for(e = base->head; e; e = e->next) fprintf(stderr, "%s ", e->name); + // fprintf(stderr, "\n"); + } + */ + } +#endif + + if(unlikely(base->iteration > 0 && (base->added || (base->iteration % base->rechecks) == 0))) { + int wanted_equals_expected = ((base->iteration % base->rechecks) == 0); + + // fprintf(stderr, "\n\narl_begin() rechecking, added %zu, iteration %zu, rechecks %zu, wanted_equals_expected %d\n\n\n", base->added, base->iteration, base->rechecks, wanted_equals_expected); + + base->added = 0; + base->wanted = (wanted_equals_expected)?base->expected:0; + + ARL_ENTRY *e = base->head; + while(e) { + if(e->flags & ARL_ENTRY_FLAG_FOUND) { + + // remove the found flag + e->flags &= ~ARL_ENTRY_FLAG_FOUND; + + // count it in wanted + if(!wanted_equals_expected && e->flags & ARL_ENTRY_FLAG_EXPECTED) + base->wanted++; + + } + else if(e->flags & ARL_ENTRY_FLAG_DYNAMIC && !(base->head == e && !e->next)) { // not last entry + // we can remove this entry + // it is not found, and it was created because + // it was found in the source file + + // remember the next one + ARL_ENTRY *t = e->next; + + // remove it from the list + if(e->next) e->next->prev = e->prev; + if(e->prev) e->prev->next = e->next; + if(base->head == e) base->head = e->next; + + // free it + freez(e->name); + freez(e); + + // count it + base->fred++; + + // continue + e = t; + continue; + } + + e = e->next; + } + } + + if(unlikely(!base->head)) { + // hm... no nodes at all in the list #1700 + // add a fake one to prevent a crash + // this is better than checking for the existence of nodes all the time + arl_expect(base, "a-really-not-existing-source-keyword", NULL); + } + + base->iteration++; + base->next_keyword = base->head; + base->found = 0; + +} + +// register an expected keyword to the ARL +// together with its destination ( i.e. the output of the processor() ) +ARL_ENTRY *arl_expect_custom(ARL_BASE *base, const char *keyword, void (*processor)(const char *name, uint32_t hash, const char *value, void *dst), void *dst) { + ARL_ENTRY *e = callocz(1, sizeof(ARL_ENTRY)); + e->name = strdupz(keyword); + e->hash = simple_hash(e->name); + e->processor = (processor)?processor:base->processor; + e->dst = dst; + e->flags = ARL_ENTRY_FLAG_EXPECTED; + e->prev = NULL; + e->next = base->head; + + if(base->head) base->head->prev = e; + else base->next_keyword = e; + + base->head = e; + base->expected++; + base->allocated++; + + base->wanted = base->expected; + + return e; +} + +int arl_find_or_create_and_relink(ARL_BASE *base, const char *s, const char *value) { + ARL_ENTRY *e; + + uint32_t hash = simple_hash(s); + + // find if it already exists in the data + for(e = base->head; e ; e = e->next) + if(e->hash == hash && !strcmp(e->name, s)) + break; + +#ifdef NETDATA_INTERNAL_CHECKS + if(unlikely(base->next_keyword && e == base->next_keyword)) + fatal("Internal Error: e == base->last"); +#endif + + if(e) { + // found it in the keywords + + base->relinkings++; + + // run the processor for it + if(unlikely(e->dst)) { + e->processor(e->name, hash, value, e->dst); + base->found++; + } + + // unlink it - we will relink it below + if(e->next) e->next->prev = e->prev; + if(e->prev) e->prev->next = e->next; + + // make sure the head is properly linked + if(base->head == e) + base->head = e->next; + } + else { + // not found + + // create it + e = callocz(1, sizeof(ARL_ENTRY)); + e->name = strdupz(s); + e->hash = hash; + e->flags = ARL_ENTRY_FLAG_DYNAMIC; + + base->allocated++; + base->added++; + } + +#ifdef NETDATA_INTERNAL_CHECKS + if(unlikely(base->iteration % 60 == 0 && e->flags & ARL_ENTRY_FLAG_FOUND)) + info("ARL '%s': entry '%s' is already found. Did you forget to call arl_begin()?", base->name, s); +#endif + + e->flags |= ARL_ENTRY_FLAG_FOUND; + + // link it here + e->next = base->next_keyword; + if(base->next_keyword) { + e->prev = base->next_keyword->prev; + base->next_keyword->prev = e; + + if(e->prev) + e->prev->next = e; + + if(base->head == base->next_keyword) + base->head = e; + } + else { + e->prev = NULL; + + if(!base->head) + base->head = e; + } + + // prepare the next iteration + base->next_keyword = e->next; + if(unlikely(!base->next_keyword)) + base->next_keyword = base->head; + + if(unlikely(base->found == base->wanted)) { + // fprintf(stderr, "FOUND ALL WANTED 1: found = %zu, wanted = %zu, expected %zu\n", base->found, base->wanted, base->expected); + return 1; + } + + return 0; +} diff --git a/libnetdata/adaptive_resortable_list/adaptive_resortable_list.h b/libnetdata/adaptive_resortable_list/adaptive_resortable_list.h new file mode 100644 index 000000000..011ee73d9 --- /dev/null +++ b/libnetdata/adaptive_resortable_list/adaptive_resortable_list.h @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +#ifndef NETDATA_ADAPTIVE_RESORTABLE_LIST_H +#define NETDATA_ADAPTIVE_RESORTABLE_LIST_H 1 + +#define ARL_ENTRY_FLAG_FOUND 0x01 // the entry has been found in the source data +#define ARL_ENTRY_FLAG_EXPECTED 0x02 // the entry is expected by the program +#define ARL_ENTRY_FLAG_DYNAMIC 0x04 // the entry was dynamically allocated, from source data + +typedef struct arl_entry { + char *name; // the keywords + uint32_t hash; // the hash of the keyword + + void *dst; // the dst to pass to the processor + + uint8_t flags; // ARL_ENTRY_FLAG_* + + // the processor to do the job + void (*processor)(const char *name, uint32_t hash, const char *value, void *dst); + + // double linked list for fast re-linkings + struct arl_entry *prev, *next; +} ARL_ENTRY; + +typedef struct arl_base { + char *name; + + size_t iteration; // incremented on each iteration (arl_begin()) + size_t found; // the number of expected keywords found in this iteration + size_t expected; // the number of expected keywords + size_t wanted; // the number of wanted keywords + // i.e. the number of keywords found and expected + + size_t relinkings; // the number of relinkings we have made so far + + size_t allocated; // the number of keywords allocated + size_t fred; // the number of keywords cleaned up + + size_t rechecks; // the number of iterations between re-checks of the + // wanted number of keywords + // this is only needed in cases where the source + // is having less lines over time. + + size_t added; // it is non-zero if new keywords have been added + // this is only needed to detect new lines have + // been added to the file, over time. + +#ifdef NETDATA_INTERNAL_CHECKS + size_t fast; // the number of times we have taken the fast path + size_t slow; // the number of times we have taken the slow path +#endif + + // the processor to do the job + void (*processor)(const char *name, uint32_t hash, const char *value, void *dst); + + // the linked list of the keywords + ARL_ENTRY *head; + + // since we keep the list of keywords sorted (as found in the source data) + // this is next keyword that we expect to find in the source data. + ARL_ENTRY *next_keyword; +} ARL_BASE; + +// create a new ARL +extern ARL_BASE *arl_create(const char *name, void (*processor)(const char *, uint32_t, const char *, void *), size_t rechecks); + +// free an ARL +extern void arl_free(ARL_BASE *arl_base); + +// register an expected keyword to the ARL +// together with its destination ( i.e. the output of the processor() ) +extern ARL_ENTRY *arl_expect_custom(ARL_BASE *base, const char *keyword, void (*processor)(const char *name, uint32_t hash, const char *value, void *dst), void *dst); +#define arl_expect(base, keyword, dst) arl_expect_custom(base, keyword, NULL, dst) + +// an internal call to complete the check() call +extern int arl_find_or_create_and_relink(ARL_BASE *base, const char *s, const char *value); + +// begin an ARL iteration +extern void arl_begin(ARL_BASE *base); + +extern void arl_callback_str2ull(const char *name, uint32_t hash, const char *value, void *dst); +extern void arl_callback_str2kernel_uint_t(const char *name, uint32_t hash, const char *value, void *dst); +extern void arl_callback_ssize_t(const char *name, uint32_t hash, const char *value, void *dst); + +// check a keyword against the ARL +// this is to be called for each keyword read from source data +// s = the keyword, as collected +// src = the src data to be passed to the processor +// it is defined in the header file in order to be inlined +static inline int arl_check(ARL_BASE *base, const char *keyword, const char *value) { + ARL_ENTRY *e = base->next_keyword; + +#ifdef NETDATA_INTERNAL_CHECKS + if(unlikely((base->fast + base->slow) % (base->expected + base->allocated) == 0 && (base->fast + base->slow) > (base->expected + base->allocated) * base->iteration)) + info("ARL '%s': Did you forget to call arl_begin()?", base->name); +#endif + + // it should be the first entry (pointed by base->next_keyword) + if(likely(!strcmp(keyword, e->name))) { + // it is + +#ifdef NETDATA_INTERNAL_CHECKS + base->fast++; +#endif + + e->flags |= ARL_ENTRY_FLAG_FOUND; + + // execute the processor + if(unlikely(e->dst)) { + e->processor(e->name, e->hash, value, e->dst); + base->found++; + } + + // be prepared for the next iteration + base->next_keyword = e->next; + if(unlikely(!base->next_keyword)) + base->next_keyword = base->head; + + // stop if we collected all the values for this iteration + if(unlikely(base->found == base->wanted)) { + // fprintf(stderr, "FOUND ALL WANTED 2: found = %zu, wanted = %zu, expected %zu\n", base->found, base->wanted, base->expected); + return 1; + } + + return 0; + } + +#ifdef NETDATA_INTERNAL_CHECKS + base->slow++; +#endif + + // we read from source, a not-expected keyword + return arl_find_or_create_and_relink(base, keyword, value); +} + +#endif //NETDATA_ADAPTIVE_RESORTABLE_LIST_H diff --git a/libnetdata/avl/Makefile.am b/libnetdata/avl/Makefile.am new file mode 100644 index 000000000..1cb69ed99 --- /dev/null +++ b/libnetdata/avl/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/avl/Makefile.in b/libnetdata/avl/Makefile.in new file mode 100644 index 000000000..5982eb85b --- /dev/null +++ b/libnetdata/avl/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = libnetdata/avl +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/avl/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu libnetdata/avl/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/libnetdata/avl/README.md b/libnetdata/avl/README.md new file mode 100644 index 000000000..48212a715 --- /dev/null +++ b/libnetdata/avl/README.md @@ -0,0 +1,11 @@ +# AVL + +AVL is a library indexing objects in B-Trees. + +`avl_insert()`, `avl_remove()` and `avl_search()` are adaptations +of the AVL algorithm found in `libavl` v2.0.3, so that they do not +use any memory allocations and their memory footprint is optimized +(by eliminating non-necessary data members). + +In addition to the above, this version of AVL, provides versions using locks +and traversal functions. \ No newline at end of file diff --git a/libnetdata/avl/avl.c b/libnetdata/avl/avl.c new file mode 100644 index 000000000..c44bef307 --- /dev/null +++ b/libnetdata/avl/avl.c @@ -0,0 +1,404 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later + +#include "../libnetdata.h" + +/* ------------------------------------------------------------------------- */ +/* + * avl_insert(), avl_remove() and avl_search() + * are adaptations (by Costa Tsaousis) of the AVL algorithm found in libavl + * v2.0.3, so that they do not use any memory allocations and their memory + * footprint is optimized (by eliminating non-necessary data members). + * + * libavl - library for manipulation of binary trees. + * Copyright (C) 1998, 1999, 2000, 2001, 2002, 2004 Free Software + * Foundation, Inc. +*/ + + +/* Search |tree| for an item matching |item|, and return it if found. + Otherwise return |NULL|. */ +avl *avl_search(avl_tree *tree, avl *item) { + avl *p; + + // assert (tree != NULL && item != NULL); + + for (p = tree->root; p != NULL; ) { + int cmp = tree->compar(item, p); + + if (cmp < 0) + p = p->avl_link[0]; + else if (cmp > 0) + p = p->avl_link[1]; + else /* |cmp == 0| */ + return p; + } + + return NULL; +} + +/* Inserts |item| into |tree| and returns a pointer to |item|'s address. + If a duplicate item is found in the tree, + returns a pointer to the duplicate without inserting |item|. + */ +avl *avl_insert(avl_tree *tree, avl *item) { + avl *y, *z; /* Top node to update balance factor, and parent. */ + avl *p, *q; /* Iterator, and parent. */ + avl *n; /* Newly inserted node. */ + avl *w; /* New root of rebalanced subtree. */ + unsigned char dir; /* Direction to descend. */ + + unsigned char da[AVL_MAX_HEIGHT]; /* Cached comparison results. */ + int k = 0; /* Number of cached results. */ + + // assert(tree != NULL && item != NULL); + + z = (avl *) &tree->root; + y = tree->root; + dir = 0; + for (q = z, p = y; p != NULL; q = p, p = p->avl_link[dir]) { + int cmp = tree->compar(item, p); + if (cmp == 0) + return p; + + if (p->avl_balance != 0) + z = q, y = p, k = 0; + da[k++] = dir = (unsigned char)(cmp > 0); + } + + n = q->avl_link[dir] = item; + + // tree->avl_count++; + n->avl_link[0] = n->avl_link[1] = NULL; + n->avl_balance = 0; + if (y == NULL) return n; + + for (p = y, k = 0; p != n; p = p->avl_link[da[k]], k++) + if (da[k] == 0) + p->avl_balance--; + else + p->avl_balance++; + + if (y->avl_balance == -2) { + avl *x = y->avl_link[0]; + if (x->avl_balance == -1) { + w = x; + y->avl_link[0] = x->avl_link[1]; + x->avl_link[1] = y; + x->avl_balance = y->avl_balance = 0; + } + else { + // assert (x->avl_balance == +1); + w = x->avl_link[1]; + x->avl_link[1] = w->avl_link[0]; + w->avl_link[0] = x; + y->avl_link[0] = w->avl_link[1]; + w->avl_link[1] = y; + if (w->avl_balance == -1) + x->avl_balance = 0, y->avl_balance = +1; + else if (w->avl_balance == 0) + x->avl_balance = y->avl_balance = 0; + else /* |w->avl_balance == +1| */ + x->avl_balance = -1, y->avl_balance = 0; + w->avl_balance = 0; + } + } + else if (y->avl_balance == +2) { + avl *x = y->avl_link[1]; + if (x->avl_balance == +1) { + w = x; + y->avl_link[1] = x->avl_link[0]; + x->avl_link[0] = y; + x->avl_balance = y->avl_balance = 0; + } + else { + // assert (x->avl_balance == -1); + w = x->avl_link[0]; + x->avl_link[0] = w->avl_link[1]; + w->avl_link[1] = x; + y->avl_link[1] = w->avl_link[0]; + w->avl_link[0] = y; + if (w->avl_balance == +1) + x->avl_balance = 0, y->avl_balance = -1; + else if (w->avl_balance == 0) + x->avl_balance = y->avl_balance = 0; + else /* |w->avl_balance == -1| */ + x->avl_balance = +1, y->avl_balance = 0; + w->avl_balance = 0; + } + } + else return n; + + z->avl_link[y != z->avl_link[0]] = w; + + // tree->avl_generation++; + return n; +} + +/* Deletes from |tree| and returns an item matching |item|. + Returns a null pointer if no matching item found. */ +avl *avl_remove(avl_tree *tree, avl *item) { + /* Stack of nodes. */ + avl *pa[AVL_MAX_HEIGHT]; /* Nodes. */ + unsigned char da[AVL_MAX_HEIGHT]; /* |avl_link[]| indexes. */ + int k; /* Stack pointer. */ + + avl *p; /* Traverses tree to find node to delete. */ + int cmp; /* Result of comparison between |item| and |p|. */ + + // assert (tree != NULL && item != NULL); + + k = 0; + p = (avl *) &tree->root; + for(cmp = -1; cmp != 0; cmp = tree->compar(item, p)) { + unsigned char dir = (unsigned char)(cmp > 0); + + pa[k] = p; + da[k++] = dir; + + p = p->avl_link[dir]; + if(p == NULL) return NULL; + } + + item = p; + + if (p->avl_link[1] == NULL) + pa[k - 1]->avl_link[da[k - 1]] = p->avl_link[0]; + else { + avl *r = p->avl_link[1]; + if (r->avl_link[0] == NULL) { + r->avl_link[0] = p->avl_link[0]; + r->avl_balance = p->avl_balance; + pa[k - 1]->avl_link[da[k - 1]] = r; + da[k] = 1; + pa[k++] = r; + } + else { + avl *s; + int j = k++; + + for (;;) { + da[k] = 0; + pa[k++] = r; + s = r->avl_link[0]; + if (s->avl_link[0] == NULL) break; + + r = s; + } + + s->avl_link[0] = p->avl_link[0]; + r->avl_link[0] = s->avl_link[1]; + s->avl_link[1] = p->avl_link[1]; + s->avl_balance = p->avl_balance; + + pa[j - 1]->avl_link[da[j - 1]] = s; + da[j] = 1; + pa[j] = s; + } + } + + // assert (k > 0); + while (--k > 0) { + avl *y = pa[k]; + + if (da[k] == 0) { + y->avl_balance++; + if (y->avl_balance == +1) break; + else if (y->avl_balance == +2) { + avl *x = y->avl_link[1]; + if (x->avl_balance == -1) { + avl *w; + // assert (x->avl_balance == -1); + w = x->avl_link[0]; + x->avl_link[0] = w->avl_link[1]; + w->avl_link[1] = x; + y->avl_link[1] = w->avl_link[0]; + w->avl_link[0] = y; + if (w->avl_balance == +1) + x->avl_balance = 0, y->avl_balance = -1; + else if (w->avl_balance == 0) + x->avl_balance = y->avl_balance = 0; + else /* |w->avl_balance == -1| */ + x->avl_balance = +1, y->avl_balance = 0; + w->avl_balance = 0; + pa[k - 1]->avl_link[da[k - 1]] = w; + } + else { + y->avl_link[1] = x->avl_link[0]; + x->avl_link[0] = y; + pa[k - 1]->avl_link[da[k - 1]] = x; + if (x->avl_balance == 0) { + x->avl_balance = -1; + y->avl_balance = +1; + break; + } + else x->avl_balance = y->avl_balance = 0; + } + } + } + else + { + y->avl_balance--; + if (y->avl_balance == -1) break; + else if (y->avl_balance == -2) { + avl *x = y->avl_link[0]; + if (x->avl_balance == +1) { + avl *w; + // assert (x->avl_balance == +1); + w = x->avl_link[1]; + x->avl_link[1] = w->avl_link[0]; + w->avl_link[0] = x; + y->avl_link[0] = w->avl_link[1]; + w->avl_link[1] = y; + if (w->avl_balance == -1) + x->avl_balance = 0, y->avl_balance = +1; + else if (w->avl_balance == 0) + x->avl_balance = y->avl_balance = 0; + else /* |w->avl_balance == +1| */ + x->avl_balance = -1, y->avl_balance = 0; + w->avl_balance = 0; + pa[k - 1]->avl_link[da[k - 1]] = w; + } + else { + y->avl_link[0] = x->avl_link[1]; + x->avl_link[1] = y; + pa[k - 1]->avl_link[da[k - 1]] = x; + if (x->avl_balance == 0) { + x->avl_balance = +1; + y->avl_balance = -1; + break; + } + else x->avl_balance = y->avl_balance = 0; + } + } + } + } + + // tree->avl_count--; + // tree->avl_generation++; + return item; +} + +/* ------------------------------------------------------------------------- */ +// below are functions by (C) Costa Tsaousis + +// --------------------------- +// traversing + +int avl_walker(avl *node, int (*callback)(void * /*entry*/, void * /*data*/), void *data) { + int total = 0, ret = 0; + + if(node->avl_link[0]) { + ret = avl_walker(node->avl_link[0], callback, data); + if(ret < 0) return ret; + total += ret; + } + + ret = callback(node, data); + if(ret < 0) return ret; + total += ret; + + if(node->avl_link[1]) { + ret = avl_walker(node->avl_link[1], callback, data); + if (ret < 0) return ret; + total += ret; + } + + return total; +} + +int avl_traverse(avl_tree *tree, int (*callback)(void * /*entry*/, void * /*data*/), void *data) { + if(tree->root) + return avl_walker(tree->root, callback, data); + else + return 0; +} + +// --------------------------- +// locks + +void avl_read_lock(avl_tree_lock *t) { +#ifndef AVL_WITHOUT_PTHREADS +#ifdef AVL_LOCK_WITH_MUTEX + netdata_mutex_lock(&t->mutex); +#else + netdata_rwlock_rdlock(&t->rwlock); +#endif +#endif /* AVL_WITHOUT_PTHREADS */ +} + +void avl_write_lock(avl_tree_lock *t) { +#ifndef AVL_WITHOUT_PTHREADS +#ifdef AVL_LOCK_WITH_MUTEX + netdata_mutex_lock(&t->mutex); +#else + netdata_rwlock_wrlock(&t->rwlock); +#endif +#endif /* AVL_WITHOUT_PTHREADS */ +} + +void avl_unlock(avl_tree_lock *t) { +#ifndef AVL_WITHOUT_PTHREADS +#ifdef AVL_LOCK_WITH_MUTEX + netdata_mutex_unlock(&t->mutex); +#else + netdata_rwlock_unlock(&t->rwlock); +#endif +#endif /* AVL_WITHOUT_PTHREADS */ +} + +// --------------------------- +// operations with locking + +void avl_init_lock(avl_tree_lock *tree, int (*compar)(void * /*a*/, void * /*b*/)) { + avl_init(&tree->avl_tree, compar); + +#ifndef AVL_WITHOUT_PTHREADS + int lock; + +#ifdef AVL_LOCK_WITH_MUTEX + lock = netdata_mutex_init(&tree->mutex, NULL); +#else + lock = netdata_rwlock_init(&tree->rwlock); +#endif + + if(lock != 0) + fatal("Failed to initialize AVL mutex/rwlock, error: %d", lock); + +#endif /* AVL_WITHOUT_PTHREADS */ +} + +avl *avl_search_lock(avl_tree_lock *tree, avl *item) { + avl_read_lock(tree); + avl *ret = avl_search(&tree->avl_tree, item); + avl_unlock(tree); + return ret; +} + +avl * avl_remove_lock(avl_tree_lock *tree, avl *item) { + avl_write_lock(tree); + avl *ret = avl_remove(&tree->avl_tree, item); + avl_unlock(tree); + return ret; +} + +avl *avl_insert_lock(avl_tree_lock *tree, avl *item) { + avl_write_lock(tree); + avl * ret = avl_insert(&tree->avl_tree, item); + avl_unlock(tree); + return ret; +} + +int avl_traverse_lock(avl_tree_lock *tree, int (*callback)(void * /*entry*/, void * /*data*/), void *data) { + int ret; + avl_read_lock(tree); + ret = avl_traverse(&tree->avl_tree, callback, data); + avl_unlock(tree); + return ret; +} + +void avl_init(avl_tree *tree, int (*compar)(void * /*a*/, void * /*b*/)) { + tree->root = NULL; + tree->compar = compar; +} + +// ------------------ diff --git a/libnetdata/avl/avl.h b/libnetdata/avl/avl.h new file mode 100644 index 000000000..070bb3d3d --- /dev/null +++ b/libnetdata/avl/avl.h @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later + +#ifndef _AVL_H +#define _AVL_H 1 + +#include "../libnetdata.h" + +/* Maximum AVL tree height. */ +#ifndef AVL_MAX_HEIGHT +#define AVL_MAX_HEIGHT 92 +#endif + +#ifndef AVL_WITHOUT_PTHREADS +#include <pthread.h> + +// #define AVL_LOCK_WITH_MUTEX 1 + +#ifdef AVL_LOCK_WITH_MUTEX +#define AVL_LOCK_INITIALIZER NETDATA_MUTEX_INITIALIZER +#else /* AVL_LOCK_WITH_MUTEX */ +#define AVL_LOCK_INITIALIZER NETDATA_RWLOCK_INITIALIZER +#endif /* AVL_LOCK_WITH_MUTEX */ + +#else /* AVL_WITHOUT_PTHREADS */ +#define AVL_LOCK_INITIALIZER +#endif /* AVL_WITHOUT_PTHREADS */ + +/* Data structures */ + +/* One element of the AVL tree */ +typedef struct avl { + struct avl *avl_link[2]; /* Subtrees. */ + signed char avl_balance; /* Balance factor. */ +} avl; + +/* An AVL tree */ +typedef struct avl_tree { + avl *root; + int (*compar)(void *a, void *b); +} avl_tree; + +typedef struct avl_tree_lock { + avl_tree avl_tree; + +#ifndef AVL_WITHOUT_PTHREADS +#ifdef AVL_LOCK_WITH_MUTEX + netdata_mutex_t mutex; +#else /* AVL_LOCK_WITH_MUTEX */ + netdata_rwlock_t rwlock; +#endif /* AVL_LOCK_WITH_MUTEX */ +#endif /* AVL_WITHOUT_PTHREADS */ +} avl_tree_lock; + +/* Public methods */ + +/* Insert element a into the AVL tree t + * returns the added element a, or a pointer the + * element that is equal to a (as returned by t->compar()) + * a is linked directly to the tree, so it has to + * be properly allocated by the caller. + */ +avl *avl_insert_lock(avl_tree_lock *tree, avl *item) NEVERNULL WARNUNUSED; +avl *avl_insert(avl_tree *tree, avl *item) NEVERNULL WARNUNUSED; + +/* Remove an element a from the AVL tree t + * returns a pointer to the removed element + * or NULL if an element equal to a is not found + * (equal as returned by t->compar()) + */ +avl *avl_remove_lock(avl_tree_lock *tree, avl *item) WARNUNUSED; +avl *avl_remove(avl_tree *tree, avl *item) WARNUNUSED; + +/* Find the element into the tree that equal to a + * (equal as returned by t->compar()) + * returns NULL is no element is equal to a + */ +avl *avl_search_lock(avl_tree_lock *tree, avl *item); +avl *avl_search(avl_tree *tree, avl *item); + +/* Initialize the avl_tree_lock + */ +void avl_init_lock(avl_tree_lock *tree, int (*compar)(void *a, void *b)); +void avl_init(avl_tree *tree, int (*compar)(void *a, void *b)); + + +int avl_traverse_lock(avl_tree_lock *tree, int (*callback)(void *entry, void *data), void *data); +int avl_traverse(avl_tree *tree, int (*callback)(void *entry, void *data), void *data); + +#endif /* avl.h */ diff --git a/libnetdata/buffer/Makefile.am b/libnetdata/buffer/Makefile.am new file mode 100644 index 000000000..1cb69ed99 --- /dev/null +++ b/libnetdata/buffer/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/buffer/Makefile.in b/libnetdata/buffer/Makefile.in new file mode 100644 index 000000000..21bbd3cc9 --- /dev/null +++ b/libnetdata/buffer/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = libnetdata/buffer +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/buffer/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu libnetdata/buffer/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/libnetdata/buffer/README.md b/libnetdata/buffer/README.md new file mode 100644 index 000000000..a7cfef89d --- /dev/null +++ b/libnetdata/buffer/README.md @@ -0,0 +1,11 @@ +# BUFFER + +`BUFFER` is a convenience library for working with strings in `C`. +Mainly, `BUFFER`s eliminate the need for tracking the string length, thus providing +a safe alternative for string operations. + +Also, they are super fast in printing and appending data to the string and its `buffer_strlen()` +is just a lookup (it does not traverse the string). + +Netdata uses `BUFFER`s for preparing web responses and buffering data to be sent upstream or +to backend databases. \ No newline at end of file diff --git a/libnetdata/buffer/buffer.c b/libnetdata/buffer/buffer.c new file mode 100644 index 000000000..50672324b --- /dev/null +++ b/libnetdata/buffer/buffer.c @@ -0,0 +1,401 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +#define BUFFER_OVERFLOW_EOF "EOF" + +static inline void buffer_overflow_init(BUFFER *b) +{ + b->buffer[b->size] = '\0'; + strcpy(&b->buffer[b->size + 1], BUFFER_OVERFLOW_EOF); +} + +#ifdef NETDATA_INTERNAL_CHECKS +#define buffer_overflow_check(b) _buffer_overflow_check(b, __FILE__, __FUNCTION__, __LINE__) +#else +#define buffer_overflow_check(b) +#endif + +static inline void _buffer_overflow_check(BUFFER *b, const char *file, const char *function, const unsigned long line) +{ + if(b->len > b->size) { + error("BUFFER: length %zu is above size %zu, at line %lu, at function %s() of file '%s'.", b->len, b->size, line, function, file); + b->len = b->size; + } + + if(b->buffer[b->size] != '\0' || strcmp(&b->buffer[b->size + 1], BUFFER_OVERFLOW_EOF) != 0) { + error("BUFFER: detected overflow at line %lu, at function %s() of file '%s'.", line, function, file); + buffer_overflow_init(b); + } +} + + +void buffer_reset(BUFFER *wb) +{ + buffer_flush(wb); + + wb->contenttype = CT_TEXT_PLAIN; + wb->options = 0; + wb->date = 0; + wb->expires = 0; + + buffer_overflow_check(wb); +} + +const char *buffer_tostring(BUFFER *wb) +{ + buffer_need_bytes(wb, 1); + wb->buffer[wb->len] = '\0'; + + buffer_overflow_check(wb); + + return(wb->buffer); +} + +void buffer_char_replace(BUFFER *wb, char from, char to) +{ + char *s = wb->buffer, *end = &wb->buffer[wb->len]; + + while(s != end) { + if(*s == from) *s = to; + s++; + } + + buffer_overflow_check(wb); +} + +// This trick seems to give an 80% speed increase in 32bit systems +// print_calculated_number_llu_r() will just print the digits up to the +// point the remaining value fits in 32 bits, and then calls +// print_calculated_number_lu_r() to print the rest with 32 bit arithmetic. + +inline char *print_number_lu_r(char *str, unsigned long uvalue) { + char *wstr = str; + + // print each digit + do *wstr++ = (char)('0' + (uvalue % 10)); while(uvalue /= 10); + return wstr; +} + +inline char *print_number_llu_r(char *str, unsigned long long uvalue) { + char *wstr = str; + + // print each digit + do *wstr++ = (char)('0' + (uvalue % 10)); while((uvalue /= 10) && uvalue > (unsigned long long)0xffffffff); + if(uvalue) return print_number_lu_r(wstr, uvalue); + return wstr; +} + +inline char *print_number_llu_r_smart(char *str, unsigned long long uvalue) { +#ifdef ENVIRONMENT32 + if(uvalue > (unsigned long long)0xffffffff) + str = print_number_llu_r(str, uvalue); + else + str = print_number_lu_r(str, uvalue); +#else + do *str++ = (char)('0' + (uvalue % 10)); while(uvalue /= 10); +#endif + + return str; +} + +void buffer_print_llu(BUFFER *wb, unsigned long long uvalue) +{ + buffer_need_bytes(wb, 50); + + char *str = &wb->buffer[wb->len]; + char *wstr = str; + +#ifdef ENVIRONMENT32 + if(uvalue > (unsigned long long)0xffffffff) + wstr = print_number_llu_r(wstr, uvalue); + else + wstr = print_number_lu_r(wstr, uvalue); +#else + do *wstr++ = (char)('0' + (uvalue % 10)); while(uvalue /= 10); +#endif + + // terminate it + *wstr = '\0'; + + // reverse it + char *begin = str, *end = wstr - 1, aux; + while (end > begin) aux = *end, *end-- = *begin, *begin++ = aux; + + // return the buffer length + wb->len += wstr - str; +} + +void buffer_strcat(BUFFER *wb, const char *txt) +{ + // buffer_sprintf(wb, "%s", txt); + + if(unlikely(!txt || !*txt)) return; + + buffer_need_bytes(wb, 1); + + char *s = &wb->buffer[wb->len], *start, *end = &wb->buffer[wb->size]; + size_t len = wb->len; + + start = s; + while(*txt && s != end) + *s++ = *txt++; + + len += s - start; + + wb->len = len; + buffer_overflow_check(wb); + + if(*txt) { + debug(D_WEB_BUFFER, "strcat(): increasing web_buffer at position %zu, size = %zu\n", wb->len, wb->size); + len = strlen(txt); + buffer_increase(wb, len); + buffer_strcat(wb, txt); + } + else { + // terminate the string + // without increasing the length + buffer_need_bytes(wb, (size_t)1); + wb->buffer[wb->len] = '\0'; + } +} + +void buffer_strcat_htmlescape(BUFFER *wb, const char *txt) +{ + while(*txt) { + switch(*txt) { + case '&': buffer_strcat(wb, "&"); break; + case '<': buffer_strcat(wb, "<"); break; + case '>': buffer_strcat(wb, ">"); break; + case '"': buffer_strcat(wb, """); break; + case '/': buffer_strcat(wb, "/"); break; + case '\'': buffer_strcat(wb, "'"); break; + default: { + buffer_need_bytes(wb, 1); + wb->buffer[wb->len++] = *txt; + } + } + txt++; + } + + buffer_overflow_check(wb); +} + +void buffer_snprintf(BUFFER *wb, size_t len, const char *fmt, ...) +{ + if(unlikely(!fmt || !*fmt)) return; + + buffer_need_bytes(wb, len + 1); + + va_list args; + va_start(args, fmt); + wb->len += vsnprintfz(&wb->buffer[wb->len], len, fmt, args); + va_end(args); + + buffer_overflow_check(wb); + + // the buffer is \0 terminated by vsnprintfz +} + +void buffer_vsprintf(BUFFER *wb, const char *fmt, va_list args) +{ + if(unlikely(!fmt || !*fmt)) return; + + buffer_need_bytes(wb, 2); + + size_t len = wb->size - wb->len - 1; + + wb->len += vsnprintfz(&wb->buffer[wb->len], len, fmt, args); + + buffer_overflow_check(wb); + + // the buffer is \0 terminated by vsnprintfz +} + +void buffer_sprintf(BUFFER *wb, const char *fmt, ...) +{ + if(unlikely(!fmt || !*fmt)) return; + + va_list args; + size_t wrote = 0, need = 2, multiplier = 0, len; + + do { + need += wrote + multiplier * WEB_DATA_LENGTH_INCREASE_STEP; + multiplier++; + + debug(D_WEB_BUFFER, "web_buffer_sprintf(): increasing web_buffer at position %zu, size = %zu, by %zu bytes (wrote = %zu)\n", wb->len, wb->size, need, wrote); + buffer_need_bytes(wb, need); + + len = wb->size - wb->len - 1; + + va_start(args, fmt); + wrote = (size_t) vsnprintfz(&wb->buffer[wb->len], len, fmt, args); + va_end(args); + + } while(wrote >= len); + + wb->len += wrote; + + // the buffer is \0 terminated by vsnprintf +} + + +void buffer_rrd_value(BUFFER *wb, calculated_number value) +{ + buffer_need_bytes(wb, 50); + + if(isnan(value) || isinf(value)) { + buffer_strcat(wb, "null"); + return; + } + else + wb->len += print_calculated_number(&wb->buffer[wb->len], value); + + // terminate it + buffer_need_bytes(wb, 1); + wb->buffer[wb->len] = '\0'; + + buffer_overflow_check(wb); +} + +// generate a javascript date, the fastest possible way... +void buffer_jsdate(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds) +{ + // 10 20 30 = 35 + // 01234567890123456789012345678901234 + // Date(2014,04,01,03,28,20) + + buffer_need_bytes(wb, 30); + + char *b = &wb->buffer[wb->len], *p; + unsigned int *q = (unsigned int *)b; + + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + *q++ = 0x65746144; // "Date" backwards. + #else + *q++ = 0x44617465; // "Date" + #endif + p = (char *)q; + + *p++ = '('; + *p++ = '0' + year / 1000; year %= 1000; + *p++ = '0' + year / 100; year %= 100; + *p++ = '0' + year / 10; + *p++ = '0' + year % 10; + *p++ = ','; + *p = '0' + month / 10; if (*p != '0') p++; + *p++ = '0' + month % 10; + *p++ = ','; + *p = '0' + day / 10; if (*p != '0') p++; + *p++ = '0' + day % 10; + *p++ = ','; + *p = '0' + hours / 10; if (*p != '0') p++; + *p++ = '0' + hours % 10; + *p++ = ','; + *p = '0' + minutes / 10; if (*p != '0') p++; + *p++ = '0' + minutes % 10; + *p++ = ','; + *p = '0' + seconds / 10; if (*p != '0') p++; + *p++ = '0' + seconds % 10; + + unsigned short *r = (unsigned short *)p; + +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + *r++ = 0x0029; // ")\0" backwards. + #else + *r++ = 0x2900; // ")\0" + #endif + + wb->len += (size_t)((char *)r - b - 1); + + // terminate it + wb->buffer[wb->len] = '\0'; + buffer_overflow_check(wb); +} + +// generate a date, the fastest possible way... +void buffer_date(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds) +{ + // 10 20 30 = 35 + // 01234567890123456789012345678901234 + // 2014-04-01 03:28:20 + + buffer_need_bytes(wb, 36); + + char *b = &wb->buffer[wb->len]; + char *p = b; + + *p++ = '0' + year / 1000; year %= 1000; + *p++ = '0' + year / 100; year %= 100; + *p++ = '0' + year / 10; + *p++ = '0' + year % 10; + *p++ = '-'; + *p++ = '0' + month / 10; + *p++ = '0' + month % 10; + *p++ = '-'; + *p++ = '0' + day / 10; + *p++ = '0' + day % 10; + *p++ = ' '; + *p++ = '0' + hours / 10; + *p++ = '0' + hours % 10; + *p++ = ':'; + *p++ = '0' + minutes / 10; + *p++ = '0' + minutes % 10; + *p++ = ':'; + *p++ = '0' + seconds / 10; + *p++ = '0' + seconds % 10; + *p = '\0'; + + wb->len += (size_t)(p - b); + + // terminate it + wb->buffer[wb->len] = '\0'; + buffer_overflow_check(wb); +} + +BUFFER *buffer_create(size_t size) +{ + BUFFER *b; + + debug(D_WEB_BUFFER, "Creating new web buffer of size %zu.", size); + + b = callocz(1, sizeof(BUFFER)); + b->buffer = mallocz(size + sizeof(BUFFER_OVERFLOW_EOF) + 2); + b->buffer[0] = '\0'; + b->size = size; + b->contenttype = CT_TEXT_PLAIN; + buffer_overflow_init(b); + buffer_overflow_check(b); + + return(b); +} + +void buffer_free(BUFFER *b) { + if(unlikely(!b)) return; + + buffer_overflow_check(b); + + debug(D_WEB_BUFFER, "Freeing web buffer of size %zu.", b->size); + + freez(b->buffer); + freez(b); +} + +void buffer_increase(BUFFER *b, size_t free_size_required) { + buffer_overflow_check(b); + + size_t left = b->size - b->len; + + if(left >= free_size_required) return; + + size_t increase = free_size_required - left; + if(increase < WEB_DATA_LENGTH_INCREASE_STEP) increase = WEB_DATA_LENGTH_INCREASE_STEP; + + debug(D_WEB_BUFFER, "Increasing data buffer from size %zu to %zu.", b->size, b->size + increase); + + b->buffer = reallocz(b->buffer, b->size + increase + sizeof(BUFFER_OVERFLOW_EOF) + 2); + b->size += increase; + + buffer_overflow_init(b); + buffer_overflow_check(b); +} diff --git a/libnetdata/buffer/buffer.h b/libnetdata/buffer/buffer.h new file mode 100644 index 000000000..8e431bfd5 --- /dev/null +++ b/libnetdata/buffer/buffer.h @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_WEB_BUFFER_H +#define NETDATA_WEB_BUFFER_H 1 + +#include "../libnetdata.h" + +#define WEB_DATA_LENGTH_INCREASE_STEP 1024 + +typedef struct web_buffer { + size_t size; // allocation size of buffer, in bytes + size_t len; // current data length in buffer, in bytes + char *buffer; // the buffer itself + uint8_t contenttype; // the content type of the data in the buffer + uint8_t options; // options related to the content + time_t date; // the timestamp this content has been generated + time_t expires; // the timestamp this content expires +} BUFFER; + +// options +#define WB_CONTENT_CACHEABLE 1 +#define WB_CONTENT_NO_CACHEABLE 2 + +// content-types +#define CT_APPLICATION_JSON 1 +#define CT_TEXT_PLAIN 2 +#define CT_TEXT_HTML 3 +#define CT_APPLICATION_X_JAVASCRIPT 4 +#define CT_TEXT_CSS 5 +#define CT_TEXT_XML 6 +#define CT_APPLICATION_XML 7 +#define CT_TEXT_XSL 8 +#define CT_APPLICATION_OCTET_STREAM 9 +#define CT_APPLICATION_X_FONT_TRUETYPE 10 +#define CT_APPLICATION_X_FONT_OPENTYPE 11 +#define CT_APPLICATION_FONT_WOFF 12 +#define CT_APPLICATION_FONT_WOFF2 13 +#define CT_APPLICATION_VND_MS_FONTOBJ 14 +#define CT_IMAGE_SVG_XML 15 +#define CT_IMAGE_PNG 16 +#define CT_IMAGE_JPG 17 +#define CT_IMAGE_GIF 18 +#define CT_IMAGE_XICON 19 +#define CT_IMAGE_ICNS 20 +#define CT_IMAGE_BMP 21 +#define CT_PROMETHEUS 22 + +#define buffer_cacheable(wb) do { (wb)->options |= WB_CONTENT_CACHEABLE; if((wb)->options & WB_CONTENT_NO_CACHEABLE) (wb)->options &= ~WB_CONTENT_NO_CACHEABLE; } while(0) +#define buffer_no_cacheable(wb) do { (wb)->options |= WB_CONTENT_NO_CACHEABLE; if((wb)->options & WB_CONTENT_CACHEABLE) (wb)->options &= ~WB_CONTENT_CACHEABLE; (wb)->expires = 0; } while(0) + +#define buffer_strlen(wb) ((wb)->len) +extern const char *buffer_tostring(BUFFER *wb); + +#define buffer_flush(wb) wb->buffer[(wb)->len = 0] = '\0' +extern void buffer_reset(BUFFER *wb); + +extern void buffer_strcat(BUFFER *wb, const char *txt); +extern void buffer_rrd_value(BUFFER *wb, calculated_number value); + +extern void buffer_date(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds); +extern void buffer_jsdate(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds); + +extern BUFFER *buffer_create(size_t size); +extern void buffer_free(BUFFER *b); +extern void buffer_increase(BUFFER *b, size_t free_size_required); + +extern void buffer_snprintf(BUFFER *wb, size_t len, const char *fmt, ...) PRINTFLIKE(3, 4); +extern void buffer_vsprintf(BUFFER *wb, const char *fmt, va_list args); +extern void buffer_sprintf(BUFFER *wb, const char *fmt, ...) PRINTFLIKE(2,3); +extern void buffer_strcat_htmlescape(BUFFER *wb, const char *txt); + +extern void buffer_char_replace(BUFFER *wb, char from, char to); + +extern char *print_number_lu_r(char *str, unsigned long uvalue); +extern char *print_number_llu_r(char *str, unsigned long long uvalue); +extern char *print_number_llu_r_smart(char *str, unsigned long long uvalue); + +extern void buffer_print_llu(BUFFER *wb, unsigned long long uvalue); + +static inline void buffer_need_bytes(BUFFER *buffer, size_t needed_free_size) { + if(unlikely(buffer->size - buffer->len < needed_free_size)) + buffer_increase(buffer, needed_free_size); +} + +#endif /* NETDATA_WEB_BUFFER_H */ diff --git a/libnetdata/clocks/Makefile.am b/libnetdata/clocks/Makefile.am new file mode 100644 index 000000000..1cb69ed99 --- /dev/null +++ b/libnetdata/clocks/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/clocks/Makefile.in b/libnetdata/clocks/Makefile.in new file mode 100644 index 000000000..118d60317 --- /dev/null +++ b/libnetdata/clocks/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = libnetdata/clocks +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/clocks/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu libnetdata/clocks/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/libnetdata/clocks/README.md b/libnetdata/clocks/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/libnetdata/clocks/clocks.c b/libnetdata/clocks/clocks.c new file mode 100644 index 000000000..f303ccddc --- /dev/null +++ b/libnetdata/clocks/clocks.c @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +#ifndef HAVE_CLOCK_GETTIME +inline int clock_gettime(clockid_t clk_id, struct timespec *ts) { + struct timeval tv; + if(unlikely(gettimeofday(&tv, NULL) == -1)) { + error("gettimeofday() failed."); + return -1; + } + ts->tv_sec = tv.tv_sec; + ts->tv_nsec = (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC; + return 0; +} +#endif + +static inline time_t now_sec(clockid_t clk_id) { + struct timespec ts; + if(unlikely(clock_gettime(clk_id, &ts) == -1)) { + error("clock_gettime(%d, ×pec) failed.", clk_id); + return 0; + } + return ts.tv_sec; +} + +static inline usec_t now_usec(clockid_t clk_id) { + struct timespec ts; + if(unlikely(clock_gettime(clk_id, &ts) == -1)) { + error("clock_gettime(%d, ×pec) failed.", clk_id); + return 0; + } + return (usec_t)ts.tv_sec * USEC_PER_SEC + (ts.tv_nsec % NSEC_PER_SEC) / NSEC_PER_USEC; +} + +static inline int now_timeval(clockid_t clk_id, struct timeval *tv) { + struct timespec ts; + + if(unlikely(clock_gettime(clk_id, &ts) == -1)) { + error("clock_gettime(%d, ×pec) failed.", clk_id); + tv->tv_sec = 0; + tv->tv_usec = 0; + return -1; + } + + tv->tv_sec = ts.tv_sec; + tv->tv_usec = (suseconds_t)((ts.tv_nsec % NSEC_PER_SEC) / NSEC_PER_USEC); + return 0; +} + +inline time_t now_realtime_sec(void) { + return now_sec(CLOCK_REALTIME); +} + +inline usec_t now_realtime_usec(void) { + return now_usec(CLOCK_REALTIME); +} + +inline int now_realtime_timeval(struct timeval *tv) { + return now_timeval(CLOCK_REALTIME, tv); +} + +inline time_t now_monotonic_sec(void) { + return now_sec(CLOCK_MONOTONIC); +} + +inline usec_t now_monotonic_usec(void) { + return now_usec(CLOCK_MONOTONIC); +} + +inline int now_monotonic_timeval(struct timeval *tv) { + return now_timeval(CLOCK_MONOTONIC, tv); +} + +inline time_t now_boottime_sec(void) { + return now_sec(CLOCK_BOOTTIME); +} + +inline usec_t now_boottime_usec(void) { + return now_usec(CLOCK_BOOTTIME); +} + +inline int now_boottime_timeval(struct timeval *tv) { + return now_timeval(CLOCK_BOOTTIME, tv); +} + +inline usec_t timeval_usec(struct timeval *tv) { + return (usec_t)tv->tv_sec * USEC_PER_SEC + (tv->tv_usec % USEC_PER_SEC); +} + +inline msec_t timeval_msec(struct timeval *tv) { + return (msec_t)tv->tv_sec * MSEC_PER_SEC + ((tv->tv_usec % USEC_PER_SEC) / MSEC_PER_SEC); +} + +inline susec_t dt_usec_signed(struct timeval *now, struct timeval *old) { + usec_t ts1 = timeval_usec(now); + usec_t ts2 = timeval_usec(old); + + if(likely(ts1 >= ts2)) return (susec_t)(ts1 - ts2); + return -((susec_t)(ts2 - ts1)); +} + +inline usec_t dt_usec(struct timeval *now, struct timeval *old) { + usec_t ts1 = timeval_usec(now); + usec_t ts2 = timeval_usec(old); + return (ts1 > ts2) ? (ts1 - ts2) : (ts2 - ts1); +} + +inline void heartbeat_init(heartbeat_t *hb) +{ + hb->monotonic = hb->realtime = 0ULL; +} + +// waits for the next heartbeat +// it waits using the monotonic clock +// it returns the dt using the realtime clock + +usec_t heartbeat_next(heartbeat_t *hb, usec_t tick) { + heartbeat_t now; + now.monotonic = now_monotonic_usec(); + now.realtime = now_realtime_usec(); + + usec_t next_monotonic = now.monotonic - (now.monotonic % tick) + tick; + + while(now.monotonic < next_monotonic) { + sleep_usec(next_monotonic - now.monotonic); + now.monotonic = now_monotonic_usec(); + now.realtime = now_realtime_usec(); + } + + if(likely(hb->realtime != 0ULL)) { + usec_t dt_monotonic = now.monotonic - hb->monotonic; + usec_t dt_realtime = now.realtime - hb->realtime; + + hb->monotonic = now.monotonic; + hb->realtime = now.realtime; + + if(unlikely(dt_monotonic >= tick + tick / 2)) { + errno = 0; + error("heartbeat missed %llu monotonic microseconds", dt_monotonic - tick); + } + + return dt_realtime; + } + else { + hb->monotonic = now.monotonic; + hb->realtime = now.realtime; + return 0ULL; + } +} + +// returned the elapsed time, since the last heartbeat +// using the monotonic clock + +inline usec_t heartbeat_monotonic_dt_to_now_usec(heartbeat_t *hb) { + if(!hb || !hb->monotonic) return 0ULL; + return now_monotonic_usec() - hb->monotonic; +} + +int sleep_usec(usec_t usec) { + +#ifndef NETDATA_WITH_USLEEP + // we expect microseconds (1.000.000 per second) + // but timespec is nanoseconds (1.000.000.000 per second) + struct timespec rem, req = { + .tv_sec = (time_t) (usec / 1000000), + .tv_nsec = (suseconds_t) ((usec % 1000000) * 1000) + }; + + while (nanosleep(&req, &rem) == -1) { + if (likely(errno == EINTR)) { + debug(D_SYSTEM, "nanosleep() interrupted (while sleeping for %llu microseconds).", usec); + req.tv_sec = rem.tv_sec; + req.tv_nsec = rem.tv_nsec; + } else { + error("Cannot nanosleep() for %llu microseconds.", usec); + break; + } + } + + return 0; +#else + int ret = usleep(usec); + if(unlikely(ret == -1 && errno == EINVAL)) { + // on certain systems, usec has to be up to 999999 + if(usec > 999999) { + int counter = usec / 999999; + while(counter--) + usleep(999999); + + usleep(usec % 999999); + } + else { + error("Cannot usleep() for %llu microseconds.", usec); + return ret; + } + } + + if(ret != 0) + error("usleep() failed for %llu microseconds.", usec); + + return ret; +#endif +} diff --git a/libnetdata/clocks/clocks.h b/libnetdata/clocks/clocks.h new file mode 100644 index 000000000..d576d86b9 --- /dev/null +++ b/libnetdata/clocks/clocks.h @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_CLOCKS_H +#define NETDATA_CLOCKS_H 1 + +#include "../libnetdata.h" + +#ifndef HAVE_STRUCT_TIMESPEC +struct timespec { + time_t tv_sec; /* seconds */ + long tv_nsec; /* nanoseconds */ +}; +#endif + +#ifndef HAVE_CLOCKID_T +typedef int clockid_t; +#endif + +typedef unsigned long long nsec_t; +typedef unsigned long long msec_t; +typedef unsigned long long usec_t; +typedef long long susec_t; + +typedef struct heartbeat { + usec_t monotonic; + usec_t realtime; +} heartbeat_t; + +/* Linux value is as good as any other */ +#ifndef CLOCK_REALTIME +#define CLOCK_REALTIME 0 +#endif + +#ifndef CLOCK_MONOTONIC +/* fallback to CLOCK_REALTIME if not available */ +#define CLOCK_MONOTONIC CLOCK_REALTIME +#endif + +#ifndef CLOCK_BOOTTIME + +#ifdef CLOCK_UPTIME +/* CLOCK_BOOTTIME falls back to CLOCK_UPTIME on FreeBSD */ +#define CLOCK_BOOTTIME CLOCK_UPTIME +#else // CLOCK_UPTIME +/* CLOCK_BOOTTIME falls back to CLOCK_MONOTONIC */ +#define CLOCK_BOOTTIME CLOCK_MONOTONIC +#endif // CLOCK_UPTIME + +#else // CLOCK_BOOTTIME + +#ifdef HAVE_CLOCK_GETTIME +#define CLOCK_BOOTTIME_IS_AVAILABLE 1 // required for /proc/uptime +#endif // HAVE_CLOCK_GETTIME + +#endif // CLOCK_BOOTTIME + +#define NSEC_PER_MSEC 1000000ULL + +#define NSEC_PER_SEC 1000000000ULL +#define NSEC_PER_USEC 1000ULL + +#define USEC_PER_SEC 1000000ULL +#define MSEC_PER_SEC 1000ULL + +#define USEC_PER_MS 1000ULL + +#ifndef HAVE_CLOCK_GETTIME +/* Fallback function for POSIX.1-2001 clock_gettime() function. + * + * We use a realtime clock from gettimeofday(), this will + * make systems without clock_gettime() support sensitive + * to time jumps or hibernation/suspend side effects. + */ +extern int clock_gettime(clockid_t clk_id, struct timespec *ts); +#endif + +/* + * Three clocks are available (cf. man 3 clock_gettime): + * + * REALTIME clock (i.e. wall-clock): + * This clock is affected by discontinuous jumps in the system time + * (e.g., if the system administrator manually changes the clock), and by the incremental adjustments performed by adjtime(3) and NTP. + * + * MONOTONIC clock + * Clock that cannot be set and represents monotonic time since some unspecified starting point. + * This clock is not affected by discontinuous jumps in the system time + * (e.g., if the system administrator manually changes the clock), but is affected by the incremental adjustments performed by adjtime(3) and NTP. + * If not available on the system, this clock falls back to REALTIME clock. + * + * BOOTTIME clock + * Identical to CLOCK_MONOTONIC, except it also includes any time that the system is suspended. + * This allows applications to get a suspend-aware monotonic clock without having to deal with the complications of CLOCK_REALTIME, + * which may have discontinuities if the time is changed using settimeofday(2). + * If not available on the system, this clock falls back to MONOTONIC clock. + * + * All now_*_timeval() functions fill the `struct timeval` with the time from the appropriate clock. + * Those functions return 0 on success, -1 else with errno set appropriately. + * + * All now_*_sec() functions return the time in seconds from the approriate clock, or 0 on error. + * All now_*_usec() functions return the time in microseconds from the approriate clock, or 0 on error. + */ +extern int now_realtime_timeval(struct timeval *tv); +extern time_t now_realtime_sec(void); +extern usec_t now_realtime_usec(void); + +extern int now_monotonic_timeval(struct timeval *tv); +extern time_t now_monotonic_sec(void); +extern usec_t now_monotonic_usec(void); + +extern int now_boottime_timeval(struct timeval *tv); +extern time_t now_boottime_sec(void); +extern usec_t now_boottime_usec(void); + + +extern usec_t timeval_usec(struct timeval *tv); +extern msec_t timeval_msec(struct timeval *tv); + +extern usec_t dt_usec(struct timeval *now, struct timeval *old); +extern susec_t dt_usec_signed(struct timeval *now, struct timeval *old); + +extern void heartbeat_init(heartbeat_t *hb); + +/* Sleeps until next multiple of tick using monotonic clock. + * Returns elapsed time in microseconds since previous heartbeat + */ +extern usec_t heartbeat_next(heartbeat_t *hb, usec_t tick); + +/* Returns elapsed time in microseconds since last heartbeat */ +extern usec_t heartbeat_monotonic_dt_to_now_usec(heartbeat_t *hb); + +extern int sleep_usec(usec_t usec); + +#endif /* NETDATA_CLOCKS_H */ diff --git a/libnetdata/config/Makefile.am b/libnetdata/config/Makefile.am new file mode 100644 index 000000000..1cb69ed99 --- /dev/null +++ b/libnetdata/config/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/config/Makefile.in b/libnetdata/config/Makefile.in new file mode 100644 index 000000000..940ccb8d8 --- /dev/null +++ b/libnetdata/config/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = libnetdata/config +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/config/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu libnetdata/config/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/libnetdata/config/README.md b/libnetdata/config/README.md new file mode 100644 index 000000000..5e170caa9 --- /dev/null +++ b/libnetdata/config/README.md @@ -0,0 +1,46 @@ +# netdata ini config files + +Configuration files `netdata.conf` and `stream.conf` are netdata ini files. + +## Motivation + +The whole idea came up when we were evaluating the documentation involved +in maintaining a complex configuration system. Our intention was to give +configuration options for everything imaginable. But then, documenting all +these options would require a tremendous amount of time, users would have +to search through endless pages for the option they need, etc. + +We concluded then that **configuring software like that is a waste of time +and effort**. Of course there must be plenty of configuration options, but +the implementation itself should require a lot less effort for both the +developers and the users. + +So, we did this: + +1. No configuration is required to run netdata +2. There are plenty of options to tweak +3. There is minimal documentation (or no at all) + +## Why this works? + +The configuration file is a `name = value` dictionary with `[sections]`. +Write whatever you like there as long as it follows this simple format. + +Netdata loads this dictionary and then when the code needs a value from +it, it just looks up the `name` in the dictionary at the proper `section`. +In all places, in the code, there are both the `names` and their +`default values`, so if something is not found in the configuration +file, the default is used. The lookup is made using B-Trees and hashes +(no string comparisons), so they are super fast. Also the `names` of the +settings can be `my super duper setting that once set to yes, will turn the world upside down = no` +- so goodbye to most of the documentation involved. + +Next, netdata can generate a valid configuration for the user to edit. +No need to remember anything or copy and paste settings. Just get the +configuration from the server (`/netdata.conf` on your netdata server), +edit it and save it. + +Last, what about options you believe you have set, but you misspelled? +When you get the configuration file from the server, there will be a +comment above all `name = value` pairs the server does not use. +So you know that whatever you wrote there, is not used. diff --git a/libnetdata/config/appconfig.c b/libnetdata/config/appconfig.c new file mode 100644 index 000000000..411538446 --- /dev/null +++ b/libnetdata/config/appconfig.c @@ -0,0 +1,587 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +#define CONFIG_FILE_LINE_MAX ((CONFIG_MAX_NAME + CONFIG_MAX_VALUE + 1024) * 2) + +// ---------------------------------------------------------------------------- +// definitions + +#define CONFIG_VALUE_LOADED 0x01 // has been loaded from the config +#define CONFIG_VALUE_USED 0x02 // has been accessed from the program +#define CONFIG_VALUE_CHANGED 0x04 // has been changed from the loaded value or the internal default value +#define CONFIG_VALUE_CHECKED 0x08 // has been checked if the value is different from the default + +struct config_option { + avl avl; // the index entry of this entry - this has to be first! + + uint8_t flags; + uint32_t hash; // a simple hash to speed up searching + // we first compare hashes, and only if the hashes are equal we do string comparisons + + char *name; + char *value; + + struct config_option *next; // config->mutex protects just this +}; + +struct section { + avl avl; // the index entry of this section - this has to be first! + + uint32_t hash; // a simple hash to speed up searching + // we first compare hashes, and only if the hashes are equal we do string comparisons + + char *name; + + struct section *next; // gloabl config_mutex protects just this + + struct config_option *values; + avl_tree_lock values_index; + + netdata_mutex_t mutex; // this locks only the writers, to ensure atomic updates + // readers are protected using the rwlock in avl_tree_lock +}; + + +// ---------------------------------------------------------------------------- +// locking + +static inline void appconfig_wrlock(struct config *root) { + netdata_mutex_lock(&root->mutex); +} + +static inline void appconfig_unlock(struct config *root) { + netdata_mutex_unlock(&root->mutex); +} + +static inline void config_section_wrlock(struct section *co) { + netdata_mutex_lock(&co->mutex); +} + +static inline void config_section_unlock(struct section *co) { + netdata_mutex_unlock(&co->mutex); +} + + +// ---------------------------------------------------------------------------- +// config name-value index + +static int appconfig_option_compare(void *a, void *b) { + if(((struct config_option *)a)->hash < ((struct config_option *)b)->hash) return -1; + else if(((struct config_option *)a)->hash > ((struct config_option *)b)->hash) return 1; + else return strcmp(((struct config_option *)a)->name, ((struct config_option *)b)->name); +} + +#define appconfig_option_index_add(co, cv) (struct config_option *)avl_insert_lock(&((co)->values_index), (avl *)(cv)) +#define appconfig_option_index_del(co, cv) (struct config_option *)avl_remove_lock(&((co)->values_index), (avl *)(cv)) + +static struct config_option *appconfig_option_index_find(struct section *co, const char *name, uint32_t hash) { + struct config_option tmp; + tmp.hash = (hash)?hash:simple_hash(name); + tmp.name = (char *)name; + + return (struct config_option *)avl_search_lock(&(co->values_index), (avl *) &tmp); +} + + +// ---------------------------------------------------------------------------- +// config sections index + +int appconfig_section_compare(void *a, void *b) { + if(((struct section *)a)->hash < ((struct section *)b)->hash) return -1; + else if(((struct section *)a)->hash > ((struct section *)b)->hash) return 1; + else return strcmp(((struct section *)a)->name, ((struct section *)b)->name); +} + +#define appconfig_index_add(root, cfg) (struct section *)avl_insert_lock(&(root)->index, (avl *)(cfg)) +#define appconfig_index_del(root, cfg) (struct section *)avl_remove_lock(&(root)->index, (avl *)(cfg)) + +static struct section *appconfig_index_find(struct config *root, const char *name, uint32_t hash) { + struct section tmp; + tmp.hash = (hash)?hash:simple_hash(name); + tmp.name = (char *)name; + + return (struct section *)avl_search_lock(&root->index, (avl *) &tmp); +} + + +// ---------------------------------------------------------------------------- +// config section methods + +static inline struct section *appconfig_section_find(struct config *root, const char *section) { + return appconfig_index_find(root, section, 0); +} + +static inline struct section *appconfig_section_create(struct config *root, const char *section) { + debug(D_CONFIG, "Creating section '%s'.", section); + + struct section *co = callocz(1, sizeof(struct section)); + co->name = strdupz(section); + co->hash = simple_hash(co->name); + netdata_mutex_init(&co->mutex); + + avl_init_lock(&co->values_index, appconfig_option_compare); + + if(unlikely(appconfig_index_add(root, co) != co)) + error("INTERNAL ERROR: indexing of section '%s', already exists.", co->name); + + appconfig_wrlock(root); + struct section *co2 = root->sections; + if(co2) { + while (co2->next) co2 = co2->next; + co2->next = co; + } + else root->sections = co; + appconfig_unlock(root); + + return co; +} + + +// ---------------------------------------------------------------------------- +// config name-value methods + +static inline struct config_option *appconfig_value_create(struct section *co, const char *name, const char *value) { + debug(D_CONFIG, "Creating config entry for name '%s', value '%s', in section '%s'.", name, value, co->name); + + struct config_option *cv = callocz(1, sizeof(struct config_option)); + cv->name = strdupz(name); + cv->hash = simple_hash(cv->name); + cv->value = strdupz(value); + + struct config_option *found = appconfig_option_index_add(co, cv); + if(found != cv) { + error("indexing of config '%s' in section '%s': already exists - using the existing one.", cv->name, co->name); + freez(cv->value); + freez(cv->name); + freez(cv); + return found; + } + + config_section_wrlock(co); + struct config_option *cv2 = co->values; + if(cv2) { + while (cv2->next) cv2 = cv2->next; + cv2->next = cv; + } + else co->values = cv; + config_section_unlock(co); + + return cv; +} + +int appconfig_exists(struct config *root, const char *section, const char *name) { + struct config_option *cv; + + debug(D_CONFIG, "request to get config in section '%s', name '%s'", section, name); + + struct section *co = appconfig_section_find(root, section); + if(!co) return 0; + + cv = appconfig_option_index_find(co, name, 0); + if(!cv) return 0; + + return 1; +} + +int appconfig_move(struct config *root, const char *section_old, const char *name_old, const char *section_new, const char *name_new) { + struct config_option *cv_old, *cv_new; + int ret = -1; + + debug(D_CONFIG, "request to rename config in section '%s', old name '%s', to section '%s', new name '%s'", section_old, name_old, section_new, name_new); + + struct section *co_old = appconfig_section_find(root, section_old); + if(!co_old) return ret; + + struct section *co_new = appconfig_section_find(root, section_new); + if(!co_new) co_new = appconfig_section_create(root, section_new); + + config_section_wrlock(co_old); + if(co_old != co_new) + config_section_wrlock(co_new); + + cv_old = appconfig_option_index_find(co_old, name_old, 0); + if(!cv_old) goto cleanup; + + cv_new = appconfig_option_index_find(co_new, name_new, 0); + if(cv_new) goto cleanup; + + if(unlikely(appconfig_option_index_del(co_old, cv_old) != cv_old)) + error("INTERNAL ERROR: deletion of config '%s' from section '%s', deleted tge wrong config entry.", cv_old->name, co_old->name); + + if(co_old->values == cv_old) { + co_old->values = cv_old->next; + } + else { + struct config_option *t; + for(t = co_old->values; t && t->next != cv_old ;t = t->next) ; + if(!t || t->next != cv_old) + error("INTERNAL ERROR: cannot find variable '%s' in section '%s' of the config - but it should be there.", cv_old->name, co_old->name); + else + t->next = cv_old->next; + } + + freez(cv_old->name); + cv_old->name = strdupz(name_new); + cv_old->hash = simple_hash(cv_old->name); + + cv_new = cv_old; + cv_new->next = co_new->values; + co_new->values = cv_new; + + if(unlikely(appconfig_option_index_add(co_new, cv_old) != cv_old)) + error("INTERNAL ERROR: re-indexing of config '%s' in section '%s', already exists.", cv_old->name, co_new->name); + + ret = 0; + +cleanup: + if(co_old != co_new) + config_section_unlock(co_new); + config_section_unlock(co_old); + return ret; +} + +char *appconfig_get(struct config *root, const char *section, const char *name, const char *default_value) +{ + struct config_option *cv; + + debug(D_CONFIG, "request to get config in section '%s', name '%s', default_value '%s'", section, name, default_value); + + struct section *co = appconfig_section_find(root, section); + if(!co) co = appconfig_section_create(root, section); + + cv = appconfig_option_index_find(co, name, 0); + if(!cv) { + cv = appconfig_value_create(co, name, default_value); + if(!cv) return NULL; + } + cv->flags |= CONFIG_VALUE_USED; + + if((cv->flags & CONFIG_VALUE_LOADED) || (cv->flags & CONFIG_VALUE_CHANGED)) { + // this is a loaded value from the config file + // if it is different that the default, mark it + if(!(cv->flags & CONFIG_VALUE_CHECKED)) { + if(strcmp(cv->value, default_value) != 0) cv->flags |= CONFIG_VALUE_CHANGED; + cv->flags |= CONFIG_VALUE_CHECKED; + } + } + + return(cv->value); +} + +long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value) +{ + char buffer[100], *s; + sprintf(buffer, "%lld", value); + + s = appconfig_get(root, section, name, buffer); + if(!s) return value; + + return strtoll(s, NULL, 0); +} + +LONG_DOUBLE appconfig_get_float(struct config *root, const char *section, const char *name, LONG_DOUBLE value) +{ + char buffer[100], *s; + sprintf(buffer, "%0.5" LONG_DOUBLE_MODIFIER, value); + + s = appconfig_get(root, section, name, buffer); + if(!s) return value; + + return str2ld(s, NULL); +} + +int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value) +{ + char *s; + if(value) s = "yes"; + else s = "no"; + + s = appconfig_get(root, section, name, s); + if(!s) return value; + + if(!strcasecmp(s, "yes") || !strcasecmp(s, "true") || !strcasecmp(s, "on") || !strcasecmp(s, "auto") || !strcasecmp(s, "on demand")) return 1; + return 0; +} + +int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value) +{ + char *s; + + if(value == CONFIG_BOOLEAN_AUTO) + s = "auto"; + + else if(value == CONFIG_BOOLEAN_NO) + s = "no"; + + else + s = "yes"; + + s = appconfig_get(root, section, name, s); + if(!s) return value; + + if(!strcmp(s, "yes")) + return CONFIG_BOOLEAN_YES; + else if(!strcmp(s, "no")) + return CONFIG_BOOLEAN_NO; + else if(!strcmp(s, "auto") || !strcmp(s, "on demand")) + return CONFIG_BOOLEAN_AUTO; + + return value; +} + +const char *appconfig_set_default(struct config *root, const char *section, const char *name, const char *value) +{ + struct config_option *cv; + + debug(D_CONFIG, "request to set default config in section '%s', name '%s', value '%s'", section, name, value); + + struct section *co = appconfig_section_find(root, section); + if(!co) return appconfig_set(root, section, name, value); + + cv = appconfig_option_index_find(co, name, 0); + if(!cv) return appconfig_set(root, section, name, value); + + cv->flags |= CONFIG_VALUE_USED; + + if(cv->flags & CONFIG_VALUE_LOADED) + return cv->value; + + if(strcmp(cv->value, value) != 0) { + cv->flags |= CONFIG_VALUE_CHANGED; + + freez(cv->value); + cv->value = strdupz(value); + } + + return cv->value; +} + +const char *appconfig_set(struct config *root, const char *section, const char *name, const char *value) +{ + struct config_option *cv; + + debug(D_CONFIG, "request to set config in section '%s', name '%s', value '%s'", section, name, value); + + struct section *co = appconfig_section_find(root, section); + if(!co) co = appconfig_section_create(root, section); + + cv = appconfig_option_index_find(co, name, 0); + if(!cv) cv = appconfig_value_create(co, name, value); + cv->flags |= CONFIG_VALUE_USED; + + if(strcmp(cv->value, value) != 0) { + cv->flags |= CONFIG_VALUE_CHANGED; + + freez(cv->value); + cv->value = strdupz(value); + } + + return value; +} + +long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value) +{ + char buffer[100]; + sprintf(buffer, "%lld", value); + + appconfig_set(root, section, name, buffer); + + return value; +} + +LONG_DOUBLE appconfig_set_float(struct config *root, const char *section, const char *name, LONG_DOUBLE value) +{ + char buffer[100]; + sprintf(buffer, "%0.5" LONG_DOUBLE_MODIFIER, value); + + appconfig_set(root, section, name, buffer); + + return value; +} + +int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value) +{ + char *s; + if(value) s = "yes"; + else s = "no"; + + appconfig_set(root, section, name, s); + + return value; +} + + +// ---------------------------------------------------------------------------- +// config load/save + +int appconfig_load(struct config *root, char *filename, int overwrite_used) +{ + int line = 0; + struct section *co = NULL; + + char buffer[CONFIG_FILE_LINE_MAX + 1], *s; + + if(!filename) filename = CONFIG_DIR "/" CONFIG_FILENAME; + + debug(D_CONFIG, "CONFIG: opening config file '%s'", filename); + + FILE *fp = fopen(filename, "r"); + if(!fp) { + // info("CONFIG: cannot open file '%s'. Using internal defaults.", filename); + return 0; + } + + while(fgets(buffer, CONFIG_FILE_LINE_MAX, fp) != NULL) { + buffer[CONFIG_FILE_LINE_MAX] = '\0'; + line++; + + s = trim(buffer); + if(!s || *s == '#') { + debug(D_CONFIG, "CONFIG: ignoring line %d of file '%s', it is empty.", line, filename); + continue; + } + + int len = (int) strlen(s); + if(*s == '[' && s[len - 1] == ']') { + // new section + s[len - 1] = '\0'; + s++; + + co = appconfig_section_find(root, s); + if(!co) co = appconfig_section_create(root, s); + + continue; + } + + if(!co) { + // line outside a section + error("CONFIG: ignoring line %d ('%s') of file '%s', it is outside all sections.", line, s, filename); + continue; + } + + char *name = s; + char *value = strchr(s, '='); + if(!value) { + error("CONFIG: ignoring line %d ('%s') of file '%s', there is no = in it.", line, s, filename); + continue; + } + *value = '\0'; + value++; + + name = trim(name); + value = trim(value); + + if(!name || *name == '#') { + error("CONFIG: ignoring line %d of file '%s', name is empty.", line, filename); + continue; + } + + if(!value) value = ""; + + struct config_option *cv = appconfig_option_index_find(co, name, 0); + + if(!cv) cv = appconfig_value_create(co, name, value); + else { + if(((cv->flags & CONFIG_VALUE_USED) && overwrite_used) || !(cv->flags & CONFIG_VALUE_USED)) { + debug(D_CONFIG, "CONFIG: line %d of file '%s', overwriting '%s/%s'.", line, filename, co->name, cv->name); + freez(cv->value); + cv->value = strdupz(value); + } + else + debug(D_CONFIG, "CONFIG: ignoring line %d of file '%s', '%s/%s' is already present and used.", line, filename, co->name, cv->name); + } + cv->flags |= CONFIG_VALUE_LOADED; + } + + fclose(fp); + + return 1; +} + +void appconfig_generate(struct config *root, BUFFER *wb, int only_changed) +{ + int i, pri; + struct section *co; + struct config_option *cv; + + for(i = 0; i < 3 ;i++) { + switch(i) { + case 0: + buffer_strcat(wb, + "# netdata configuration\n" + "#\n" + "# You can download the latest version of this file, using:\n" + "#\n" + "# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf\n" + "# or\n" + "# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf\n" + "#\n" + "# You can uncomment and change any of the options below.\n" + "# The value shown in the commented settings, is the default value.\n" + "#\n" + "\n# global netdata configuration\n"); + break; + + case 1: + buffer_strcat(wb, "\n\n# per plugin configuration\n"); + break; + + case 2: + buffer_strcat(wb, "\n\n# per chart configuration\n"); + break; + } + + appconfig_wrlock(root); + for(co = root->sections; co ; co = co->next) { + if(!strcmp(co->name, CONFIG_SECTION_GLOBAL) + || !strcmp(co->name, CONFIG_SECTION_WEB) + || !strcmp(co->name, CONFIG_SECTION_STATSD) + || !strcmp(co->name, CONFIG_SECTION_PLUGINS) + || !strcmp(co->name, CONFIG_SECTION_REGISTRY) + || !strcmp(co->name, CONFIG_SECTION_HEALTH) + || !strcmp(co->name, CONFIG_SECTION_BACKEND) + || !strcmp(co->name, CONFIG_SECTION_STREAM) + ) + pri = 0; + else if(!strncmp(co->name, "plugin:", 7)) pri = 1; + else pri = 2; + + if(i == pri) { + int loaded = 0; + int used = 0; + int changed = 0; + int count = 0; + + config_section_wrlock(co); + for(cv = co->values; cv ; cv = cv->next) { + used += (cv->flags & CONFIG_VALUE_USED)?1:0; + loaded += (cv->flags & CONFIG_VALUE_LOADED)?1:0; + changed += (cv->flags & CONFIG_VALUE_CHANGED)?1:0; + count++; + } + config_section_unlock(co); + + if(!count) continue; + if(only_changed && !changed && !loaded) continue; + + if(!used) { + buffer_sprintf(wb, "\n# section '%s' is not used.", co->name); + } + + buffer_sprintf(wb, "\n[%s]\n", co->name); + + config_section_wrlock(co); + for(cv = co->values; cv ; cv = cv->next) { + + if(used && !(cv->flags & CONFIG_VALUE_USED)) { + buffer_sprintf(wb, "\n\t# option '%s' is not used.\n", cv->name); + } + buffer_sprintf(wb, "\t%s%s = %s\n", ((!(cv->flags & CONFIG_VALUE_LOADED)) && (!(cv->flags & CONFIG_VALUE_CHANGED)) && (cv->flags & CONFIG_VALUE_USED))?"# ":"", cv->name, cv->value); + } + config_section_unlock(co); + } + } + appconfig_unlock(root); + } +} diff --git a/libnetdata/config/appconfig.h b/libnetdata/config/appconfig.h new file mode 100644 index 000000000..6ac666d8b --- /dev/null +++ b/libnetdata/config/appconfig.h @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +/* + * This section manages ini config files, like netdata.conf and stream.conf + * + * It is organized like this: + * + * struct config (i.e. netdata.conf or stream.conf) + * .sections = a linked list of struct section + * .mutex = a mutex to protect the above linked list due to multi-threading + * .index = an AVL tree of struct section + * + * struct section (i.e. [global] or [health] of netdata.conf) + * .value = a linked list of struct config_option + * .mutex = a mutex to protect the above linked list due to multi-threading + * .value_index = an AVL tree of struct config_option + * + * struct config_option (ie. a name-value pair for each ini file option) + * + * The following operations on name-value options are supported: + * SET to set the value of an option + * SET DEFAULT to set the value and the default value of an option + * GET to get the value of an option + * EXISTS to check if an option exists + * MOVE to move an option from a section to another section, and/or rename it + * + * GET and SET operations are provided for the following data types: + * STRING + * NUMBER (long long) + * FLOAT (long double) + * BOOLEAN (false, true) + * BOOLEAN ONDEMAND (false, true, auto) + * + * GET and SET operations create struct config_option, if it is not already present. + * This allows netdata to run even without netdata.conf and stream.conf. The internal + * defaults are used to create the structure that should exist in the ini file and the config + * file can be downloaded from the server. + * + * Also 2 operations are supported for the whole config file: + * + * LOAD To load the ini file from disk + * GENERATE To generate the ini file (this is used to download the ini file from the server) + * + * For each option (name-value pair), the system maintains 4 flags: + * LOADED to indicate that the value has been loaded from the file + * USED to indicate that netdata used the value + * CHANGED to indicate that the value has been changed from the loaded value or the internal default value + * CHECKED is used internally for optimization (to avoid an strcmp() every time GET is called). + * + * TODO: + * 1. The linked lists and the mutexes can be removed and the AVL trees can become DICTIONARY. + * This part of the code was written before we add traversal to AVL. + * + * 2. High level data types could be supported, to simplify the rest of the code: + * MULTIPLE CHOICE to let the user select one of the supported keywords + * this would allow users see in comments the available options + * + * SIMPLE PATTERN to let the user define netdata SIMPLE PATTERNS + * + * 3. Sorting of options should be supported. + * Today, when the ini file is downloaded from the server, the options are shown in the order + * they appear in the linked list (the order they were added, listing changed options first). + * If we remove the linked list, the order they appear in the AVL tree will be used (which is + * random due to simple_hash()). + * Ideally, we support sorting of options when generating the ini file. + * + * 4. There is no free() operation. So, memory is freed on netdata exit. + * + * 5. Avoid memory fragmentation + * Since entries are created from multiple threads and a lot of allocations are required + * for each config_option, fragmentation can be a problem for IoT. + * + * 6. Although this way of managing options is quite flexible and dynamic, it wastes memory + * for the names of the options. Since most of the option names are static, we could provide + * a method to allocate only the dynamic option names. + */ + +#ifndef NETDATA_CONFIG_H +#define NETDATA_CONFIG_H 1 + +#include "../libnetdata.h" + +#define CONFIG_FILENAME "netdata.conf" + +#define CONFIG_SECTION_GLOBAL "global" +#define CONFIG_SECTION_WEB "web" +#define CONFIG_SECTION_STATSD "statsd" +#define CONFIG_SECTION_PLUGINS "plugins" +#define CONFIG_SECTION_REGISTRY "registry" +#define CONFIG_SECTION_HEALTH "health" +#define CONFIG_SECTION_BACKEND "backend" +#define CONFIG_SECTION_STREAM "stream" + +// these are used to limit the configuration names and values lengths +// they are not enforced by config.c functions (they will strdup() all strings, no matter of their length) +#define CONFIG_MAX_NAME 1024 +#define CONFIG_MAX_VALUE 2048 + +struct config { + struct section *sections; + netdata_mutex_t mutex; + avl_tree_lock index; +}; + +#define CONFIG_BOOLEAN_INVALID 100 // an invalid value to check for validity (used as default initialization when needed) + +#define CONFIG_BOOLEAN_NO 0 // disabled +#define CONFIG_BOOLEAN_YES 1 // enabled + +#ifndef CONFIG_BOOLEAN_AUTO +#define CONFIG_BOOLEAN_AUTO 2 // enabled if it has useful info when enabled +#endif + +extern int appconfig_load(struct config *root, char *filename, int overwrite_used); + +extern char *appconfig_get(struct config *root, const char *section, const char *name, const char *default_value); +extern long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value); +extern LONG_DOUBLE appconfig_get_float(struct config *root, const char *section, const char *name, LONG_DOUBLE value); +extern int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value); +extern int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value); + +extern const char *appconfig_set(struct config *root, const char *section, const char *name, const char *value); +extern const char *appconfig_set_default(struct config *root, const char *section, const char *name, const char *value); +extern long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value); +extern LONG_DOUBLE appconfig_set_float(struct config *root, const char *section, const char *name, LONG_DOUBLE value); +extern int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value); + +extern int appconfig_exists(struct config *root, const char *section, const char *name); +extern int appconfig_move(struct config *root, const char *section_old, const char *name_old, const char *section_new, const char *name_new); + +extern void appconfig_generate(struct config *root, BUFFER *wb, int only_changed); + +extern int appconfig_section_compare(void *a, void *b); + +#endif /* NETDATA_CONFIG_H */ diff --git a/libnetdata/dictionary/Makefile.am b/libnetdata/dictionary/Makefile.am new file mode 100644 index 000000000..1cb69ed99 --- /dev/null +++ b/libnetdata/dictionary/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/dictionary/Makefile.in b/libnetdata/dictionary/Makefile.in new file mode 100644 index 000000000..bafd64a8a --- /dev/null +++ b/libnetdata/dictionary/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = libnetdata/dictionary +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/dictionary/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu libnetdata/dictionary/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/libnetdata/dictionary/README.md b/libnetdata/dictionary/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/libnetdata/dictionary/dictionary.c b/libnetdata/dictionary/dictionary.c new file mode 100644 index 000000000..dd94a801d --- /dev/null +++ b/libnetdata/dictionary/dictionary.c @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +// ---------------------------------------------------------------------------- +// dictionary statistics + +static inline void NETDATA_DICTIONARY_STATS_INSERTS_PLUS1(DICTIONARY *dict) { + if(likely(dict->stats)) + dict->stats->inserts++; +} +static inline void NETDATA_DICTIONARY_STATS_DELETES_PLUS1(DICTIONARY *dict) { + if(likely(dict->stats)) + dict->stats->deletes++; +} +static inline void NETDATA_DICTIONARY_STATS_SEARCHES_PLUS1(DICTIONARY *dict) { + if(likely(dict->stats)) + dict->stats->searches++; +} +static inline void NETDATA_DICTIONARY_STATS_ENTRIES_PLUS1(DICTIONARY *dict) { + if(likely(dict->stats)) + dict->stats->entries++; +} +static inline void NETDATA_DICTIONARY_STATS_ENTRIES_MINUS1(DICTIONARY *dict) { + if(likely(dict->stats)) + dict->stats->entries--; +} + + +// ---------------------------------------------------------------------------- +// dictionary locks + +static inline void dictionary_read_lock(DICTIONARY *dict) { + if(likely(dict->rwlock)) { + // debug(D_DICTIONARY, "Dictionary READ lock"); + netdata_rwlock_rdlock(dict->rwlock); + } +} + +static inline void dictionary_write_lock(DICTIONARY *dict) { + if(likely(dict->rwlock)) { + // debug(D_DICTIONARY, "Dictionary WRITE lock"); + netdata_rwlock_wrlock(dict->rwlock); + } +} + +static inline void dictionary_unlock(DICTIONARY *dict) { + if(likely(dict->rwlock)) { + // debug(D_DICTIONARY, "Dictionary UNLOCK lock"); + netdata_rwlock_unlock(dict->rwlock); + } +} + + +// ---------------------------------------------------------------------------- +// avl index + +static int name_value_compare(void* a, void* b) { + if(((NAME_VALUE *)a)->hash < ((NAME_VALUE *)b)->hash) return -1; + else if(((NAME_VALUE *)a)->hash > ((NAME_VALUE *)b)->hash) return 1; + else return strcmp(((NAME_VALUE *)a)->name, ((NAME_VALUE *)b)->name); +} + +static inline NAME_VALUE *dictionary_name_value_index_find_nolock(DICTIONARY *dict, const char *name, uint32_t hash) { + NAME_VALUE tmp; + tmp.hash = (hash)?hash:simple_hash(name); + tmp.name = (char *)name; + + NETDATA_DICTIONARY_STATS_SEARCHES_PLUS1(dict); + return (NAME_VALUE *)avl_search(&(dict->values_index), (avl *) &tmp); +} + +// ---------------------------------------------------------------------------- +// internal methods + +static NAME_VALUE *dictionary_name_value_create_nolock(DICTIONARY *dict, const char *name, void *value, size_t value_len, uint32_t hash) { + debug(D_DICTIONARY, "Creating name value entry for name '%s'.", name); + + NAME_VALUE *nv = callocz(1, sizeof(NAME_VALUE)); + + if(dict->flags & DICTIONARY_FLAG_NAME_LINK_DONT_CLONE) + nv->name = (char *)name; + else { + nv->name = strdupz(name); + } + + nv->hash = (hash)?hash:simple_hash(nv->name); + + if(dict->flags & DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE) + nv->value = value; + else { + nv->value = mallocz(value_len); + memcpy(nv->value, value, value_len); + } + + // index it + NETDATA_DICTIONARY_STATS_INSERTS_PLUS1(dict); + if(unlikely(avl_insert(&((dict)->values_index), (avl *)(nv)) != (avl *)nv)) + error("dictionary: INTERNAL ERROR: duplicate insertion to dictionary."); + + NETDATA_DICTIONARY_STATS_ENTRIES_PLUS1(dict); + + return nv; +} + +static void dictionary_name_value_destroy_nolock(DICTIONARY *dict, NAME_VALUE *nv) { + debug(D_DICTIONARY, "Destroying name value entry for name '%s'.", nv->name); + + NETDATA_DICTIONARY_STATS_DELETES_PLUS1(dict); + if(unlikely(avl_remove(&(dict->values_index), (avl *)(nv)) != (avl *)nv)) + error("dictionary: INTERNAL ERROR: dictionary invalid removal of node."); + + NETDATA_DICTIONARY_STATS_ENTRIES_MINUS1(dict); + + if(!(dict->flags & DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE)) { + debug(D_REGISTRY, "Dictionary freeing value of '%s'", nv->name); + freez(nv->value); + } + + if(!(dict->flags & DICTIONARY_FLAG_NAME_LINK_DONT_CLONE)) { + debug(D_REGISTRY, "Dictionary freeing name '%s'", nv->name); + freez(nv->name); + } + + freez(nv); +} + +// ---------------------------------------------------------------------------- +// API - basic methods + +DICTIONARY *dictionary_create(uint8_t flags) { + debug(D_DICTIONARY, "Creating dictionary."); + + DICTIONARY *dict = callocz(1, sizeof(DICTIONARY)); + + if(flags & DICTIONARY_FLAG_WITH_STATISTICS) + dict->stats = callocz(1, sizeof(struct dictionary_stats)); + + if(!(flags & DICTIONARY_FLAG_SINGLE_THREADED)) { + dict->rwlock = callocz(1, sizeof(netdata_rwlock_t)); + netdata_rwlock_init(dict->rwlock); + } + + avl_init(&dict->values_index, name_value_compare); + dict->flags = flags; + + return dict; +} + +void dictionary_destroy(DICTIONARY *dict) { + debug(D_DICTIONARY, "Destroying dictionary."); + + dictionary_write_lock(dict); + + while(dict->values_index.root) + dictionary_name_value_destroy_nolock(dict, (NAME_VALUE *)dict->values_index.root); + + dictionary_unlock(dict); + + if(dict->stats) + freez(dict->stats); + + if(dict->rwlock) { + netdata_rwlock_destroy(dict->rwlock); + freez(dict->rwlock); + } + + freez(dict); +} + +// ---------------------------------------------------------------------------- + +void *dictionary_set(DICTIONARY *dict, const char *name, void *value, size_t value_len) { + debug(D_DICTIONARY, "SET dictionary entry with name '%s'.", name); + + uint32_t hash = simple_hash(name); + + dictionary_write_lock(dict); + + NAME_VALUE *nv = dictionary_name_value_index_find_nolock(dict, name, hash); + if(unlikely(!nv)) { + debug(D_DICTIONARY, "Dictionary entry with name '%s' not found. Creating a new one.", name); + + nv = dictionary_name_value_create_nolock(dict, name, value, value_len, hash); + if(unlikely(!nv)) + fatal("Cannot create name_value."); + } + else { + debug(D_DICTIONARY, "Dictionary entry with name '%s' found. Changing its value.", name); + + if(dict->flags & DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE) { + debug(D_REGISTRY, "Dictionary: linking value to '%s'", name); + nv->value = value; + } + else { + debug(D_REGISTRY, "Dictionary: cloning value to '%s'", name); + + // copy the new value without breaking + // any other thread accessing the same entry + void *new = mallocz(value_len), + *old = nv->value; + + memcpy(new, value, value_len); + nv->value = new; + + debug(D_REGISTRY, "Dictionary: freeing old value of '%s'", name); + freez(old); + } + } + + dictionary_unlock(dict); + + return nv->value; +} + +void *dictionary_get(DICTIONARY *dict, const char *name) { + debug(D_DICTIONARY, "GET dictionary entry with name '%s'.", name); + + dictionary_read_lock(dict); + NAME_VALUE *nv = dictionary_name_value_index_find_nolock(dict, name, 0); + dictionary_unlock(dict); + + if(unlikely(!nv)) { + debug(D_DICTIONARY, "Not found dictionary entry with name '%s'.", name); + return NULL; + } + + debug(D_DICTIONARY, "Found dictionary entry with name '%s'.", name); + return nv->value; +} + +int dictionary_del(DICTIONARY *dict, const char *name) { + int ret; + + debug(D_DICTIONARY, "DEL dictionary entry with name '%s'.", name); + + dictionary_write_lock(dict); + + NAME_VALUE *nv = dictionary_name_value_index_find_nolock(dict, name, 0); + if(unlikely(!nv)) { + debug(D_DICTIONARY, "Not found dictionary entry with name '%s'.", name); + ret = -1; + } + else { + debug(D_DICTIONARY, "Found dictionary entry with name '%s'.", name); + dictionary_name_value_destroy_nolock(dict, nv); + ret = 0; + } + + dictionary_unlock(dict); + + return ret; +} + + +// ---------------------------------------------------------------------------- +// API - walk through the dictionary +// the dictionary is locked for reading while this happens +// do not user other dictionary calls while walking the dictionary - deadlock! + +static int dictionary_walker(avl *a, int (*callback)(void *entry, void *data), void *data) { + int total = 0, ret = 0; + + if(a->avl_link[0]) { + ret = dictionary_walker(a->avl_link[0], callback, data); + if(ret < 0) return ret; + total += ret; + } + + ret = callback(((NAME_VALUE *)a)->value, data); + if(ret < 0) return ret; + total += ret; + + if(a->avl_link[1]) { + ret = dictionary_walker(a->avl_link[1], callback, data); + if (ret < 0) return ret; + total += ret; + } + + return total; +} + +int dictionary_get_all(DICTIONARY *dict, int (*callback)(void *entry, void *data), void *data) { + int ret = 0; + + dictionary_read_lock(dict); + + if(likely(dict->values_index.root)) + ret = dictionary_walker(dict->values_index.root, callback, data); + + dictionary_unlock(dict); + + return ret; +} diff --git a/libnetdata/dictionary/dictionary.h b/libnetdata/dictionary/dictionary.h new file mode 100644 index 000000000..61b9bfc61 --- /dev/null +++ b/libnetdata/dictionary/dictionary.h @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_DICTIONARY_H +#define NETDATA_DICTIONARY_H 1 + +#include "../libnetdata.h" + +struct dictionary_stats { + unsigned long long inserts; + unsigned long long deletes; + unsigned long long searches; + unsigned long long entries; +}; + +typedef struct name_value { + avl avl; // the index - this has to be first! + + uint32_t hash; // a simple hash to speed up searching + // we first compare hashes, and only if the hashes are equal we do string comparisons + + char *name; + void *value; +} NAME_VALUE; + +typedef struct dictionary { + avl_tree values_index; + + uint8_t flags; + + struct dictionary_stats *stats; + netdata_rwlock_t *rwlock; +} DICTIONARY; + +#define DICTIONARY_FLAG_DEFAULT 0x00000000 +#define DICTIONARY_FLAG_SINGLE_THREADED 0x00000001 +#define DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE 0x00000002 +#define DICTIONARY_FLAG_NAME_LINK_DONT_CLONE 0x00000004 +#define DICTIONARY_FLAG_WITH_STATISTICS 0x00000008 + +extern DICTIONARY *dictionary_create(uint8_t flags); +extern void dictionary_destroy(DICTIONARY *dict); +extern void *dictionary_set(DICTIONARY *dict, const char *name, void *value, size_t value_len) NEVERNULL; +extern void *dictionary_get(DICTIONARY *dict, const char *name); +extern int dictionary_del(DICTIONARY *dict, const char *name); + +extern int dictionary_get_all(DICTIONARY *dict, int (*callback)(void *entry, void *d), void *data); + +#endif /* NETDATA_DICTIONARY_H */ diff --git a/libnetdata/eval/Makefile.am b/libnetdata/eval/Makefile.am new file mode 100644 index 000000000..1cb69ed99 --- /dev/null +++ b/libnetdata/eval/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/eval/Makefile.in b/libnetdata/eval/Makefile.in new file mode 100644 index 000000000..0b8341b03 --- /dev/null +++ b/libnetdata/eval/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = libnetdata/eval +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/eval/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu libnetdata/eval/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/libnetdata/eval/README.md b/libnetdata/eval/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/libnetdata/eval/eval.c b/libnetdata/eval/eval.c new file mode 100644 index 000000000..0316edac0 --- /dev/null +++ b/libnetdata/eval/eval.c @@ -0,0 +1,1190 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +// ---------------------------------------------------------------------------- +// data structures for storing the parsed expression in memory + +typedef struct eval_value { + int type; + + union { + calculated_number number; + EVAL_VARIABLE *variable; + struct eval_node *expression; + }; +} EVAL_VALUE; + +typedef struct eval_node { + int id; + unsigned char operator; + int precedence; + + int count; + EVAL_VALUE ops[]; +} EVAL_NODE; + +// these are used for EVAL_NODE.operator +// they are used as internal IDs to identify an operator +// THEY ARE NOT USED FOR PARSING OPERATORS LIKE THAT +#define EVAL_OPERATOR_NOP '\0' +#define EVAL_OPERATOR_EXPRESSION_OPEN '(' +#define EVAL_OPERATOR_EXPRESSION_CLOSE ')' +#define EVAL_OPERATOR_NOT '!' +#define EVAL_OPERATOR_PLUS '+' +#define EVAL_OPERATOR_MINUS '-' +#define EVAL_OPERATOR_AND '&' +#define EVAL_OPERATOR_OR '|' +#define EVAL_OPERATOR_GREATER_THAN_OR_EQUAL 'G' +#define EVAL_OPERATOR_LESS_THAN_OR_EQUAL 'L' +#define EVAL_OPERATOR_NOT_EQUAL '~' +#define EVAL_OPERATOR_EQUAL '=' +#define EVAL_OPERATOR_LESS '<' +#define EVAL_OPERATOR_GREATER '>' +#define EVAL_OPERATOR_MULTIPLY '*' +#define EVAL_OPERATOR_DIVIDE '/' +#define EVAL_OPERATOR_SIGN_PLUS 'P' +#define EVAL_OPERATOR_SIGN_MINUS 'M' +#define EVAL_OPERATOR_ABS 'A' +#define EVAL_OPERATOR_IF_THEN_ELSE '?' + +// ---------------------------------------------------------------------------- +// forward function definitions + +static inline void eval_node_free(EVAL_NODE *op); +static inline EVAL_NODE *parse_full_expression(const char **string, int *error); +static inline EVAL_NODE *parse_one_full_operand(const char **string, int *error); +static inline calculated_number eval_node(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error); +static inline void print_parsed_as_node(BUFFER *out, EVAL_NODE *op, int *error); +static inline void print_parsed_as_constant(BUFFER *out, calculated_number n); + +// ---------------------------------------------------------------------------- +// evaluation of expressions + +static inline calculated_number eval_variable(EVAL_EXPRESSION *exp, EVAL_VARIABLE *v, int *error) { + static uint32_t this_hash = 0, now_hash = 0, after_hash = 0, before_hash = 0, status_hash = 0, removed_hash = 0, uninitialized_hash = 0, undefined_hash = 0, clear_hash = 0, warning_hash = 0, critical_hash = 0; + calculated_number n; + + if(unlikely(this_hash == 0)) { + this_hash = simple_hash("this"); + now_hash = simple_hash("now"); + after_hash = simple_hash("after"); + before_hash = simple_hash("before"); + status_hash = simple_hash("status"); + removed_hash = simple_hash("REMOVED"); + uninitialized_hash = simple_hash("UNINITIALIZED"); + undefined_hash = simple_hash("UNDEFINED"); + clear_hash = simple_hash("CLEAR"); + warning_hash = simple_hash("WARNING"); + critical_hash = simple_hash("CRITICAL"); + } + + if(unlikely(v->hash == this_hash && !strcmp(v->name, "this"))) { + n = (exp->this)?*exp->this:NAN; + buffer_strcat(exp->error_msg, "[ $this = "); + print_parsed_as_constant(exp->error_msg, n); + buffer_strcat(exp->error_msg, " ] "); + return n; + } + + if(unlikely(v->hash == after_hash && !strcmp(v->name, "after"))) { + n = (exp->after && *exp->after)?*exp->after:NAN; + buffer_strcat(exp->error_msg, "[ $after = "); + print_parsed_as_constant(exp->error_msg, n); + buffer_strcat(exp->error_msg, " ] "); + return n; + } + + if(unlikely(v->hash == before_hash && !strcmp(v->name, "before"))) { + n = (exp->before && *exp->before)?*exp->before:NAN; + buffer_strcat(exp->error_msg, "[ $before = "); + print_parsed_as_constant(exp->error_msg, n); + buffer_strcat(exp->error_msg, " ] "); + return n; + } + + if(unlikely(v->hash == now_hash && !strcmp(v->name, "now"))) { + n = now_realtime_sec(); + buffer_strcat(exp->error_msg, "[ $now = "); + print_parsed_as_constant(exp->error_msg, n); + buffer_strcat(exp->error_msg, " ] "); + return n; + } + + if(unlikely(v->hash == status_hash && !strcmp(v->name, "status"))) { + n = (exp->status)?*exp->status:RRDCALC_STATUS_UNINITIALIZED; + buffer_strcat(exp->error_msg, "[ $status = "); + print_parsed_as_constant(exp->error_msg, n); + buffer_strcat(exp->error_msg, " ] "); + return n; + } + + if(unlikely(v->hash == removed_hash && !strcmp(v->name, "REMOVED"))) { + n = RRDCALC_STATUS_REMOVED; + buffer_strcat(exp->error_msg, "[ $REMOVED = "); + print_parsed_as_constant(exp->error_msg, n); + buffer_strcat(exp->error_msg, " ] "); + return n; + } + + if(unlikely(v->hash == uninitialized_hash && !strcmp(v->name, "UNINITIALIZED"))) { + n = RRDCALC_STATUS_UNINITIALIZED; + buffer_strcat(exp->error_msg, "[ $UNINITIALIZED = "); + print_parsed_as_constant(exp->error_msg, n); + buffer_strcat(exp->error_msg, " ] "); + return n; + } + + if(unlikely(v->hash == undefined_hash && !strcmp(v->name, "UNDEFINED"))) { + n = RRDCALC_STATUS_UNDEFINED; + buffer_strcat(exp->error_msg, "[ $UNDEFINED = "); + print_parsed_as_constant(exp->error_msg, n); + buffer_strcat(exp->error_msg, " ] "); + return n; + } + + if(unlikely(v->hash == clear_hash && !strcmp(v->name, "CLEAR"))) { + n = RRDCALC_STATUS_CLEAR; + buffer_strcat(exp->error_msg, "[ $CLEAR = "); + print_parsed_as_constant(exp->error_msg, n); + buffer_strcat(exp->error_msg, " ] "); + return n; + } + + if(unlikely(v->hash == warning_hash && !strcmp(v->name, "WARNING"))) { + n = RRDCALC_STATUS_WARNING; + buffer_strcat(exp->error_msg, "[ $WARNING = "); + print_parsed_as_constant(exp->error_msg, n); + buffer_strcat(exp->error_msg, " ] "); + return n; + } + + if(unlikely(v->hash == critical_hash && !strcmp(v->name, "CRITICAL"))) { + n = RRDCALC_STATUS_CRITICAL; + buffer_strcat(exp->error_msg, "[ $CRITICAL = "); + print_parsed_as_constant(exp->error_msg, n); + buffer_strcat(exp->error_msg, " ] "); + return n; + } + + if(exp->rrdcalc && health_variable_lookup(v->name, v->hash, exp->rrdcalc, &n)) { + buffer_sprintf(exp->error_msg, "[ ${%s} = ", v->name); + print_parsed_as_constant(exp->error_msg, n); + buffer_strcat(exp->error_msg, " ] "); + return n; + } + + *error = EVAL_ERROR_UNKNOWN_VARIABLE; + buffer_sprintf(exp->error_msg, "[ undefined variable '%s' ] ", v->name); + return 0; +} + +static inline calculated_number eval_value(EVAL_EXPRESSION *exp, EVAL_VALUE *v, int *error) { + calculated_number n; + + switch(v->type) { + case EVAL_VALUE_EXPRESSION: + n = eval_node(exp, v->expression, error); + break; + + case EVAL_VALUE_NUMBER: + n = v->number; + break; + + case EVAL_VALUE_VARIABLE: + n = eval_variable(exp, v->variable, error); + break; + + default: + *error = EVAL_ERROR_INVALID_VALUE; + n = 0; + break; + } + + return n; +} + +static inline int is_true(calculated_number n) { + if(isnan(n)) return 0; + if(isinf(n)) return 1; + if(n == 0) return 0; + return 1; +} + +calculated_number eval_and(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { + return is_true(eval_value(exp, &op->ops[0], error)) && is_true(eval_value(exp, &op->ops[1], error)); +} +calculated_number eval_or(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { + return is_true(eval_value(exp, &op->ops[0], error)) || is_true(eval_value(exp, &op->ops[1], error)); +} +calculated_number eval_greater_than_or_equal(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { + calculated_number n1 = eval_value(exp, &op->ops[0], error); + calculated_number n2 = eval_value(exp, &op->ops[1], error); + return isgreaterequal(n1, n2); +} +calculated_number eval_less_than_or_equal(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { + calculated_number n1 = eval_value(exp, &op->ops[0], error); + calculated_number n2 = eval_value(exp, &op->ops[1], error); + return islessequal(n1, n2); +} +calculated_number eval_equal(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { + calculated_number n1 = eval_value(exp, &op->ops[0], error); + calculated_number n2 = eval_value(exp, &op->ops[1], error); + if(isnan(n1) && isnan(n2)) return 1; + if(isinf(n1) && isinf(n2)) return 1; + if(isnan(n1) || isnan(n2)) return 0; + if(isinf(n1) || isinf(n2)) return 0; + return calculated_number_equal(n1, n2); +} +calculated_number eval_not_equal(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { + return !eval_equal(exp, op, error); +} +calculated_number eval_less(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { + calculated_number n1 = eval_value(exp, &op->ops[0], error); + calculated_number n2 = eval_value(exp, &op->ops[1], error); + return isless(n1, n2); +} +calculated_number eval_greater(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { + calculated_number n1 = eval_value(exp, &op->ops[0], error); + calculated_number n2 = eval_value(exp, &op->ops[1], error); + return isgreater(n1, n2); +} +calculated_number eval_plus(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { + calculated_number n1 = eval_value(exp, &op->ops[0], error); + calculated_number n2 = eval_value(exp, &op->ops[1], error); + if(isnan(n1) || isnan(n2)) return NAN; + if(isinf(n1) || isinf(n2)) return INFINITY; + return n1 + n2; +} +calculated_number eval_minus(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { + calculated_number n1 = eval_value(exp, &op->ops[0], error); + calculated_number n2 = eval_value(exp, &op->ops[1], error); + if(isnan(n1) || isnan(n2)) return NAN; + if(isinf(n1) || isinf(n2)) return INFINITY; + return n1 - n2; +} +calculated_number eval_multiply(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { + calculated_number n1 = eval_value(exp, &op->ops[0], error); + calculated_number n2 = eval_value(exp, &op->ops[1], error); + if(isnan(n1) || isnan(n2)) return NAN; + if(isinf(n1) || isinf(n2)) return INFINITY; + return n1 * n2; +} +calculated_number eval_divide(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { + calculated_number n1 = eval_value(exp, &op->ops[0], error); + calculated_number n2 = eval_value(exp, &op->ops[1], error); + if(isnan(n1) || isnan(n2)) return NAN; + if(isinf(n1) || isinf(n2)) return INFINITY; + return n1 / n2; +} +calculated_number eval_nop(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { + return eval_value(exp, &op->ops[0], error); +} +calculated_number eval_not(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { + return !is_true(eval_value(exp, &op->ops[0], error)); +} +calculated_number eval_sign_plus(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { + return eval_value(exp, &op->ops[0], error); +} +calculated_number eval_sign_minus(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { + calculated_number n1 = eval_value(exp, &op->ops[0], error); + if(isnan(n1)) return NAN; + if(isinf(n1)) return INFINITY; + return -n1; +} +calculated_number eval_abs(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { + calculated_number n1 = eval_value(exp, &op->ops[0], error); + if(isnan(n1)) return NAN; + if(isinf(n1)) return INFINITY; + return abs(n1); +} +calculated_number eval_if_then_else(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { + if(is_true(eval_value(exp, &op->ops[0], error))) + return eval_value(exp, &op->ops[1], error); + else + return eval_value(exp, &op->ops[2], error); +} + +static struct operator { + const char *print_as; + char precedence; + char parameters; + char isfunction; + calculated_number (*eval)(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error); +} operators[256] = { + // this is a random access array + // we always access it with a known EVAL_OPERATOR_X + + [EVAL_OPERATOR_AND] = { "&&", 2, 2, 0, eval_and }, + [EVAL_OPERATOR_OR] = { "||", 2, 2, 0, eval_or }, + [EVAL_OPERATOR_GREATER_THAN_OR_EQUAL] = { ">=", 3, 2, 0, eval_greater_than_or_equal }, + [EVAL_OPERATOR_LESS_THAN_OR_EQUAL] = { "<=", 3, 2, 0, eval_less_than_or_equal }, + [EVAL_OPERATOR_NOT_EQUAL] = { "!=", 3, 2, 0, eval_not_equal }, + [EVAL_OPERATOR_EQUAL] = { "==", 3, 2, 0, eval_equal }, + [EVAL_OPERATOR_LESS] = { "<", 3, 2, 0, eval_less }, + [EVAL_OPERATOR_GREATER] = { ">", 3, 2, 0, eval_greater }, + [EVAL_OPERATOR_PLUS] = { "+", 4, 2, 0, eval_plus }, + [EVAL_OPERATOR_MINUS] = { "-", 4, 2, 0, eval_minus }, + [EVAL_OPERATOR_MULTIPLY] = { "*", 5, 2, 0, eval_multiply }, + [EVAL_OPERATOR_DIVIDE] = { "/", 5, 2, 0, eval_divide }, + [EVAL_OPERATOR_NOT] = { "!", 6, 1, 0, eval_not }, + [EVAL_OPERATOR_SIGN_PLUS] = { "+", 6, 1, 0, eval_sign_plus }, + [EVAL_OPERATOR_SIGN_MINUS] = { "-", 6, 1, 0, eval_sign_minus }, + [EVAL_OPERATOR_ABS] = { "abs(",6,1, 1, eval_abs }, + [EVAL_OPERATOR_IF_THEN_ELSE] = { "?", 7, 3, 0, eval_if_then_else }, + [EVAL_OPERATOR_NOP] = { NULL, 8, 1, 0, eval_nop }, + [EVAL_OPERATOR_EXPRESSION_OPEN] = { NULL, 8, 1, 0, eval_nop }, + + // this should exist in our evaluation list + [EVAL_OPERATOR_EXPRESSION_CLOSE] = { NULL, 99, 1, 0, eval_nop } +}; + +#define eval_precedence(operator) (operators[(unsigned char)(operator)].precedence) + +static inline calculated_number eval_node(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { + if(unlikely(op->count != operators[op->operator].parameters)) { + *error = EVAL_ERROR_INVALID_NUMBER_OF_OPERANDS; + return 0; + } + + calculated_number n = operators[op->operator].eval(exp, op, error); + + return n; +} + +// ---------------------------------------------------------------------------- +// parsed-as generation + +static inline void print_parsed_as_variable(BUFFER *out, EVAL_VARIABLE *v, int *error) { + (void)error; + buffer_sprintf(out, "${%s}", v->name); +} + +static inline void print_parsed_as_constant(BUFFER *out, calculated_number n) { + if(unlikely(isnan(n))) { + buffer_strcat(out, "nan"); + return; + } + + if(unlikely(isinf(n))) { + buffer_strcat(out, "inf"); + return; + } + + char b[100+1], *s; + snprintfz(b, 100, CALCULATED_NUMBER_FORMAT, n); + + s = &b[strlen(b) - 1]; + while(s > b && *s == '0') { + *s ='\0'; + s--; + } + + if(s > b && *s == '.') + *s = '\0'; + + buffer_strcat(out, b); +} + +static inline void print_parsed_as_value(BUFFER *out, EVAL_VALUE *v, int *error) { + switch(v->type) { + case EVAL_VALUE_EXPRESSION: + print_parsed_as_node(out, v->expression, error); + break; + + case EVAL_VALUE_NUMBER: + print_parsed_as_constant(out, v->number); + break; + + case EVAL_VALUE_VARIABLE: + print_parsed_as_variable(out, v->variable, error); + break; + + default: + *error = EVAL_ERROR_INVALID_VALUE; + break; + } +} + +static inline void print_parsed_as_node(BUFFER *out, EVAL_NODE *op, int *error) { + if(unlikely(op->count != operators[op->operator].parameters)) { + *error = EVAL_ERROR_INVALID_NUMBER_OF_OPERANDS; + return; + } + + if(operators[op->operator].parameters == 1) { + + if(operators[op->operator].print_as) + buffer_sprintf(out, "%s", operators[op->operator].print_as); + + //if(op->operator == EVAL_OPERATOR_EXPRESSION_OPEN) + // buffer_strcat(out, "("); + + print_parsed_as_value(out, &op->ops[0], error); + + //if(op->operator == EVAL_OPERATOR_EXPRESSION_OPEN) + // buffer_strcat(out, ")"); + } + + else if(operators[op->operator].parameters == 2) { + buffer_strcat(out, "("); + print_parsed_as_value(out, &op->ops[0], error); + + if(operators[op->operator].print_as) + buffer_sprintf(out, " %s ", operators[op->operator].print_as); + + print_parsed_as_value(out, &op->ops[1], error); + buffer_strcat(out, ")"); + } + else if(op->operator == EVAL_OPERATOR_IF_THEN_ELSE && operators[op->operator].parameters == 3) { + buffer_strcat(out, "("); + print_parsed_as_value(out, &op->ops[0], error); + + if(operators[op->operator].print_as) + buffer_sprintf(out, " %s ", operators[op->operator].print_as); + + print_parsed_as_value(out, &op->ops[1], error); + buffer_strcat(out, " : "); + print_parsed_as_value(out, &op->ops[2], error); + buffer_strcat(out, ")"); + } + + if(operators[op->operator].isfunction) + buffer_strcat(out, ")"); +} + +// ---------------------------------------------------------------------------- +// parsing expressions + +// skip spaces +static inline void skip_spaces(const char **string) { + const char *s = *string; + while(isspace(*s)) s++; + *string = s; +} + +// what character can appear just after an operator keyword +// like NOT AND OR ? +static inline int isoperatorterm_word(const char s) { + if(isspace(s) || s == '(' || s == '$' || s == '!' || s == '-' || s == '+' || isdigit(s) || !s) + return 1; + + return 0; +} + +// what character can appear just after an operator symbol? +static inline int isoperatorterm_symbol(const char s) { + if(isoperatorterm_word(s) || isalpha(s)) + return 1; + + return 0; +} + +// return 1 if the character should never appear in a variable +static inline int isvariableterm(const char s) { + if(isalnum(s) || s == '.' || s == '_') + return 0; + + return 1; +} + +// ---------------------------------------------------------------------------- +// parse operators + +static inline int parse_and(const char **string) { + const char *s = *string; + + // AND + if((s[0] == 'A' || s[0] == 'a') && (s[1] == 'N' || s[1] == 'n') && (s[2] == 'D' || s[2] == 'd') && isoperatorterm_word(s[3])) { + *string = &s[4]; + return 1; + } + + // && + if(s[0] == '&' && s[1] == '&' && isoperatorterm_symbol(s[2])) { + *string = &s[2]; + return 1; + } + + return 0; +} + +static inline int parse_or(const char **string) { + const char *s = *string; + + // OR + if((s[0] == 'O' || s[0] == 'o') && (s[1] == 'R' || s[1] == 'r') && isoperatorterm_word(s[2])) { + *string = &s[3]; + return 1; + } + + // || + if(s[0] == '|' && s[1] == '|' && isoperatorterm_symbol(s[2])) { + *string = &s[2]; + return 1; + } + + return 0; +} + +static inline int parse_greater_than_or_equal(const char **string) { + const char *s = *string; + + // >= + if(s[0] == '>' && s[1] == '=' && isoperatorterm_symbol(s[2])) { + *string = &s[2]; + return 1; + } + + return 0; +} + +static inline int parse_less_than_or_equal(const char **string) { + const char *s = *string; + + // <= + if (s[0] == '<' && s[1] == '=' && isoperatorterm_symbol(s[2])) { + *string = &s[2]; + return 1; + } + + return 0; +} + +static inline int parse_greater(const char **string) { + const char *s = *string; + + // > + if(s[0] == '>' && isoperatorterm_symbol(s[1])) { + *string = &s[1]; + return 1; + } + + return 0; +} + +static inline int parse_less(const char **string) { + const char *s = *string; + + // < + if(s[0] == '<' && isoperatorterm_symbol(s[1])) { + *string = &s[1]; + return 1; + } + + return 0; +} + +static inline int parse_equal(const char **string) { + const char *s = *string; + + // == + if(s[0] == '=' && s[1] == '=' && isoperatorterm_symbol(s[2])) { + *string = &s[2]; + return 1; + } + + // = + if(s[0] == '=' && isoperatorterm_symbol(s[1])) { + *string = &s[1]; + return 1; + } + + return 0; +} + +static inline int parse_not_equal(const char **string) { + const char *s = *string; + + // != + if(s[0] == '!' && s[1] == '=' && isoperatorterm_symbol(s[2])) { + *string = &s[2]; + return 1; + } + + // <> + if(s[0] == '<' && s[1] == '>' && isoperatorterm_symbol(s[2])) { + *string = &s[2]; + } + + return 0; +} + +static inline int parse_not(const char **string) { + const char *s = *string; + + // NOT + if((s[0] == 'N' || s[0] == 'n') && (s[1] == 'O' || s[1] == 'o') && (s[2] == 'T' || s[2] == 't') && isoperatorterm_word(s[3])) { + *string = &s[3]; + return 1; + } + + if(s[0] == '!') { + *string = &s[1]; + return 1; + } + + return 0; +} + +static inline int parse_multiply(const char **string) { + const char *s = *string; + + // * + if(s[0] == '*' && isoperatorterm_symbol(s[1])) { + *string = &s[1]; + return 1; + } + + return 0; +} + +static inline int parse_divide(const char **string) { + const char *s = *string; + + // / + if(s[0] == '/' && isoperatorterm_symbol(s[1])) { + *string = &s[1]; + return 1; + } + + return 0; +} + +static inline int parse_minus(const char **string) { + const char *s = *string; + + // - + if(s[0] == '-' && isoperatorterm_symbol(s[1])) { + *string = &s[1]; + return 1; + } + + return 0; +} + +static inline int parse_plus(const char **string) { + const char *s = *string; + + // + + if(s[0] == '+' && isoperatorterm_symbol(s[1])) { + *string = &s[1]; + return 1; + } + + return 0; +} + +static inline int parse_open_subexpression(const char **string) { + const char *s = *string; + + // ( + if(s[0] == '(') { + *string = &s[1]; + return 1; + } + + return 0; +} + +#define parse_close_function(x) parse_close_subexpression(x) + +static inline int parse_close_subexpression(const char **string) { + const char *s = *string; + + // ) + if(s[0] == ')') { + *string = &s[1]; + return 1; + } + + return 0; +} + +static inline int parse_variable(const char **string, char *buffer, size_t len) { + const char *s = *string; + + // $ + if(*s == '$') { + size_t i = 0; + s++; + + if(*s == '{') { + // ${variable_name} + + s++; + while (*s && *s != '}' && i < len) + buffer[i++] = *s++; + + if(*s == '}') + s++; + } + else { + // $variable_name + + while (*s && !isvariableterm(*s) && i < len) + buffer[i++] = *s++; + } + + buffer[i] = '\0'; + + if (buffer[0]) { + *string = s; + return 1; + } + } + + return 0; +} + +static inline int parse_constant(const char **string, calculated_number *number) { + char *end = NULL; + calculated_number n = str2ld(*string, &end); + if(unlikely(!end || *string == end)) { + *number = 0; + return 0; + } + *number = n; + *string = end; + return 1; +} + +static inline int parse_abs(const char **string) { + const char *s = *string; + + // ABS + if((s[0] == 'A' || s[0] == 'a') && (s[1] == 'B' || s[1] == 'b') && (s[2] == 'S' || s[2] == 's') && s[3] == '(') { + *string = &s[3]; + return 1; + } + + return 0; +} + +static inline int parse_if_then_else(const char **string) { + const char *s = *string; + + // ? + if(s[0] == '?') { + *string = &s[1]; + return 1; + } + + return 0; +} + +static struct operator_parser { + unsigned char id; + int (*parse)(const char **); +} operator_parsers[] = { + // the order in this list is important! + // the first matching will be used + // so place the longer of overlapping ones + // at the top + + { EVAL_OPERATOR_AND, parse_and }, + { EVAL_OPERATOR_OR, parse_or }, + { EVAL_OPERATOR_GREATER_THAN_OR_EQUAL, parse_greater_than_or_equal }, + { EVAL_OPERATOR_LESS_THAN_OR_EQUAL, parse_less_than_or_equal }, + { EVAL_OPERATOR_NOT_EQUAL, parse_not_equal }, + { EVAL_OPERATOR_EQUAL, parse_equal }, + { EVAL_OPERATOR_LESS, parse_less }, + { EVAL_OPERATOR_GREATER, parse_greater }, + { EVAL_OPERATOR_PLUS, parse_plus }, + { EVAL_OPERATOR_MINUS, parse_minus }, + { EVAL_OPERATOR_MULTIPLY, parse_multiply }, + { EVAL_OPERATOR_DIVIDE, parse_divide }, + { EVAL_OPERATOR_IF_THEN_ELSE, parse_if_then_else }, + + /* we should not put in this list the following: + * + * - NOT + * - ( + * - ) + * + * these are handled in code + */ + + // termination + { EVAL_OPERATOR_NOP, NULL } +}; + +static inline unsigned char parse_operator(const char **string, int *precedence) { + skip_spaces(string); + + int i; + for(i = 0 ; operator_parsers[i].parse != NULL ; i++) + if(operator_parsers[i].parse(string)) { + if(precedence) *precedence = eval_precedence(operator_parsers[i].id); + return operator_parsers[i].id; + } + + return EVAL_OPERATOR_NOP; +} + +// ---------------------------------------------------------------------------- +// memory management + +static inline EVAL_NODE *eval_node_alloc(int count) { + static int id = 1; + + EVAL_NODE *op = callocz(1, sizeof(EVAL_NODE) + (sizeof(EVAL_VALUE) * count)); + + op->id = id++; + op->operator = EVAL_OPERATOR_NOP; + op->precedence = eval_precedence(EVAL_OPERATOR_NOP); + op->count = count; + return op; +} + +static inline void eval_node_set_value_to_node(EVAL_NODE *op, int pos, EVAL_NODE *value) { + if(pos >= op->count) + fatal("Invalid request to set position %d of OPERAND that has only %d values", pos + 1, op->count + 1); + + op->ops[pos].type = EVAL_VALUE_EXPRESSION; + op->ops[pos].expression = value; +} + +static inline void eval_node_set_value_to_constant(EVAL_NODE *op, int pos, calculated_number value) { + if(pos >= op->count) + fatal("Invalid request to set position %d of OPERAND that has only %d values", pos + 1, op->count + 1); + + op->ops[pos].type = EVAL_VALUE_NUMBER; + op->ops[pos].number = value; +} + +static inline void eval_node_set_value_to_variable(EVAL_NODE *op, int pos, const char *variable) { + if(pos >= op->count) + fatal("Invalid request to set position %d of OPERAND that has only %d values", pos + 1, op->count + 1); + + op->ops[pos].type = EVAL_VALUE_VARIABLE; + op->ops[pos].variable = callocz(1, sizeof(EVAL_VARIABLE)); + op->ops[pos].variable->name = strdupz(variable); + op->ops[pos].variable->hash = simple_hash(op->ops[pos].variable->name); +} + +static inline void eval_variable_free(EVAL_VARIABLE *v) { + freez(v->name); + freez(v); +} + +static inline void eval_value_free(EVAL_VALUE *v) { + switch(v->type) { + case EVAL_VALUE_EXPRESSION: + eval_node_free(v->expression); + break; + + case EVAL_VALUE_VARIABLE: + eval_variable_free(v->variable); + break; + + default: + break; + } +} + +static inline void eval_node_free(EVAL_NODE *op) { + if(op->count) { + int i; + for(i = op->count - 1; i >= 0 ;i--) + eval_value_free(&op->ops[i]); + } + + freez(op); +} + +// ---------------------------------------------------------------------------- +// the parsing logic + +// helper function to avoid allocations all over the place +static inline EVAL_NODE *parse_next_operand_given_its_operator(const char **string, unsigned char operator_type, int *error) { + EVAL_NODE *sub = parse_one_full_operand(string, error); + if(!sub) return NULL; + + EVAL_NODE *op = eval_node_alloc(1); + op->operator = operator_type; + eval_node_set_value_to_node(op, 0, sub); + return op; +} + +// parse a full operand, including its sign or other associative operator (e.g. NOT) +static inline EVAL_NODE *parse_one_full_operand(const char **string, int *error) { + char variable_buffer[EVAL_MAX_VARIABLE_NAME_LENGTH + 1]; + EVAL_NODE *op1 = NULL; + calculated_number number; + + *error = EVAL_ERROR_OK; + + skip_spaces(string); + if(!(**string)) { + *error = EVAL_ERROR_MISSING_OPERAND; + return NULL; + } + + if(parse_not(string)) { + op1 = parse_next_operand_given_its_operator(string, EVAL_OPERATOR_NOT, error); + op1->precedence = eval_precedence(EVAL_OPERATOR_NOT); + } + else if(parse_plus(string)) { + op1 = parse_next_operand_given_its_operator(string, EVAL_OPERATOR_SIGN_PLUS, error); + op1->precedence = eval_precedence(EVAL_OPERATOR_SIGN_PLUS); + } + else if(parse_minus(string)) { + op1 = parse_next_operand_given_its_operator(string, EVAL_OPERATOR_SIGN_MINUS, error); + op1->precedence = eval_precedence(EVAL_OPERATOR_SIGN_MINUS); + } + else if(parse_abs(string)) { + op1 = parse_next_operand_given_its_operator(string, EVAL_OPERATOR_ABS, error); + op1->precedence = eval_precedence(EVAL_OPERATOR_ABS); + } + else if(parse_open_subexpression(string)) { + EVAL_NODE *sub = parse_full_expression(string, error); + if(sub) { + op1 = eval_node_alloc(1); + op1->operator = EVAL_OPERATOR_EXPRESSION_OPEN; + op1->precedence = eval_precedence(EVAL_OPERATOR_EXPRESSION_OPEN); + eval_node_set_value_to_node(op1, 0, sub); + if(!parse_close_subexpression(string)) { + *error = EVAL_ERROR_MISSING_CLOSE_SUBEXPRESSION; + eval_node_free(op1); + return NULL; + } + } + } + else if(parse_variable(string, variable_buffer, EVAL_MAX_VARIABLE_NAME_LENGTH)) { + op1 = eval_node_alloc(1); + op1->operator = EVAL_OPERATOR_NOP; + eval_node_set_value_to_variable(op1, 0, variable_buffer); + } + else if(parse_constant(string, &number)) { + op1 = eval_node_alloc(1); + op1->operator = EVAL_OPERATOR_NOP; + eval_node_set_value_to_constant(op1, 0, number); + } + else if(**string) + *error = EVAL_ERROR_UNKNOWN_OPERAND; + else + *error = EVAL_ERROR_MISSING_OPERAND; + + return op1; +} + +// parse an operator and the rest of the expression +// precedence processing is handled here +static inline EVAL_NODE *parse_rest_of_expression(const char **string, int *error, EVAL_NODE *op1) { + EVAL_NODE *op2 = NULL; + unsigned char operator; + int precedence; + + operator = parse_operator(string, &precedence); + skip_spaces(string); + + if(operator != EVAL_OPERATOR_NOP) { + op2 = parse_one_full_operand(string, error); + if(!op2) { + // error is already reported + eval_node_free(op1); + return NULL; + } + + EVAL_NODE *op = eval_node_alloc(operators[operator].parameters); + op->operator = operator; + op->precedence = precedence; + + if(operator == EVAL_OPERATOR_IF_THEN_ELSE && op->count == 3) { + skip_spaces(string); + + if(**string != ':') { + eval_node_free(op); + eval_node_free(op1); + eval_node_free(op2); + *error = EVAL_ERROR_IF_THEN_ELSE_MISSING_ELSE; + return NULL; + } + (*string)++; + + skip_spaces(string); + + EVAL_NODE *op3 = parse_one_full_operand(string, error); + if(!op3) { + eval_node_free(op); + eval_node_free(op1); + eval_node_free(op2); + // error is already reported + return NULL; + } + + eval_node_set_value_to_node(op, 2, op3); + } + + eval_node_set_value_to_node(op, 1, op2); + + // precedence processing + // if this operator has a higher precedence compared to its next + // put the next operator on top of us (top = evaluated later) + // function recursion does the rest... + if(op->precedence > op1->precedence && op1->count == 2 && op1->operator != '(' && op1->ops[1].type == EVAL_VALUE_EXPRESSION) { + eval_node_set_value_to_node(op, 0, op1->ops[1].expression); + op1->ops[1].expression = op; + op = op1; + } + else + eval_node_set_value_to_node(op, 0, op1); + + return parse_rest_of_expression(string, error, op); + } + else if(**string == ')') { + ; + } + else if(**string) { + eval_node_free(op1); + op1 = NULL; + *error = EVAL_ERROR_MISSING_OPERATOR; + } + + return op1; +} + +// high level function to parse an expression or a sub-expression +static inline EVAL_NODE *parse_full_expression(const char **string, int *error) { + EVAL_NODE *op1 = parse_one_full_operand(string, error); + if(!op1) { + *error = EVAL_ERROR_MISSING_OPERAND; + return NULL; + } + + return parse_rest_of_expression(string, error, op1); +} + +// ---------------------------------------------------------------------------- +// public API + +int expression_evaluate(EVAL_EXPRESSION *expression) { + expression->error = EVAL_ERROR_OK; + + buffer_reset(expression->error_msg); + expression->result = eval_node(expression, (EVAL_NODE *)expression->nodes, &expression->error); + + if(unlikely(isnan(expression->result))) { + if(expression->error == EVAL_ERROR_OK) + expression->error = EVAL_ERROR_VALUE_IS_NAN; + } + else if(unlikely(isinf(expression->result))) { + if(expression->error == EVAL_ERROR_OK) + expression->error = EVAL_ERROR_VALUE_IS_INFINITE; + } + else if(unlikely(expression->error == EVAL_ERROR_UNKNOWN_VARIABLE)) { + // although there is an unknown variable + // the expression was evaluated successfully + expression->error = EVAL_ERROR_OK; + } + + if(expression->error != EVAL_ERROR_OK) { + expression->result = NAN; + + if(buffer_strlen(expression->error_msg)) + buffer_strcat(expression->error_msg, "; "); + + buffer_sprintf(expression->error_msg, "failed to evaluate expression with error %d (%s)", expression->error, expression_strerror(expression->error)); + return 0; + } + + return 1; +} + +EVAL_EXPRESSION *expression_parse(const char *string, const char **failed_at, int *error) { + const char *s = string; + int err = EVAL_ERROR_OK; + + EVAL_NODE *op = parse_full_expression(&s, &err); + + if(*s) { + if(op) { + eval_node_free(op); + op = NULL; + } + err = EVAL_ERROR_REMAINING_GARBAGE; + } + + if (failed_at) *failed_at = s; + if (error) *error = err; + + if(!op) { + unsigned long pos = s - string + 1; + error("failed to parse expression '%s': %s at character %lu (i.e.: '%s').", string, expression_strerror(err), pos, s); + return NULL; + } + + BUFFER *out = buffer_create(1024); + print_parsed_as_node(out, op, &err); + if(err != EVAL_ERROR_OK) { + error("failed to re-generate expression '%s' with reason: %s", string, expression_strerror(err)); + eval_node_free(op); + buffer_free(out); + return NULL; + } + + EVAL_EXPRESSION *exp = callocz(1, sizeof(EVAL_EXPRESSION)); + + exp->source = strdupz(string); + exp->parsed_as = strdupz(buffer_tostring(out)); + buffer_free(out); + + exp->error_msg = buffer_create(100); + exp->nodes = (void *)op; + + return exp; +} + +void expression_free(EVAL_EXPRESSION *expression) { + if(!expression) return; + + if(expression->nodes) eval_node_free((EVAL_NODE *)expression->nodes); + freez((void *)expression->source); + freez((void *)expression->parsed_as); + buffer_free(expression->error_msg); + freez(expression); +} + +const char *expression_strerror(int error) { + switch(error) { + case EVAL_ERROR_OK: + return "success"; + + case EVAL_ERROR_MISSING_CLOSE_SUBEXPRESSION: + return "missing closing parenthesis"; + + case EVAL_ERROR_UNKNOWN_OPERAND: + return "unknown operand"; + + case EVAL_ERROR_MISSING_OPERAND: + return "expected operand"; + + case EVAL_ERROR_MISSING_OPERATOR: + return "expected operator"; + + case EVAL_ERROR_REMAINING_GARBAGE: + return "remaining characters after expression"; + + case EVAL_ERROR_INVALID_VALUE: + return "invalid value structure - internal error"; + + case EVAL_ERROR_INVALID_NUMBER_OF_OPERANDS: + return "wrong number of operands for operation - internal error"; + + case EVAL_ERROR_VALUE_IS_NAN: + return "value is unset"; + + case EVAL_ERROR_VALUE_IS_INFINITE: + return "computed value is infinite"; + + case EVAL_ERROR_UNKNOWN_VARIABLE: + return "undefined variable"; + + case EVAL_ERROR_IF_THEN_ELSE_MISSING_ELSE: + return "missing second sub-expression of inline conditional"; + + default: + return "unknown error"; + } +} diff --git a/libnetdata/eval/eval.h b/libnetdata/eval/eval.h new file mode 100644 index 000000000..57dae9d0b --- /dev/null +++ b/libnetdata/eval/eval.h @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_EVAL_H +#define NETDATA_EVAL_H 1 + +#include "../libnetdata.h" + +#define EVAL_MAX_VARIABLE_NAME_LENGTH 300 + +typedef enum rrdcalc_status { + RRDCALC_STATUS_REMOVED = -2, + RRDCALC_STATUS_UNDEFINED = -1, + RRDCALC_STATUS_UNINITIALIZED = 0, + RRDCALC_STATUS_CLEAR = 1, + RRDCALC_STATUS_RAISED = 2, + RRDCALC_STATUS_WARNING = 3, + RRDCALC_STATUS_CRITICAL = 4 +} RRDCALC_STATUS; + +typedef struct eval_variable { + char *name; + uint32_t hash; + struct eval_variable *next; +} EVAL_VARIABLE; + +typedef struct eval_expression { + const char *source; + const char *parsed_as; + + RRDCALC_STATUS *status; + calculated_number *this; + time_t *after; + time_t *before; + + calculated_number result; + + int error; + BUFFER *error_msg; + + // hidden EVAL_NODE * + void *nodes; + + // custom data to be used for looking up variables + struct rrdcalc *rrdcalc; +} EVAL_EXPRESSION; + +#define EVAL_VALUE_INVALID 0 +#define EVAL_VALUE_NUMBER 1 +#define EVAL_VALUE_VARIABLE 2 +#define EVAL_VALUE_EXPRESSION 3 + +// parsing and evaluation +#define EVAL_ERROR_OK 0 + +// parsing errors +#define EVAL_ERROR_MISSING_CLOSE_SUBEXPRESSION 1 +#define EVAL_ERROR_UNKNOWN_OPERAND 2 +#define EVAL_ERROR_MISSING_OPERAND 3 +#define EVAL_ERROR_MISSING_OPERATOR 4 +#define EVAL_ERROR_REMAINING_GARBAGE 5 +#define EVAL_ERROR_IF_THEN_ELSE_MISSING_ELSE 6 + +// evaluation errors +#define EVAL_ERROR_INVALID_VALUE 101 +#define EVAL_ERROR_INVALID_NUMBER_OF_OPERANDS 102 +#define EVAL_ERROR_VALUE_IS_NAN 103 +#define EVAL_ERROR_VALUE_IS_INFINITE 104 +#define EVAL_ERROR_UNKNOWN_VARIABLE 105 + +// parse the given string as an expression and return: +// a pointer to an expression if it parsed OK +// NULL in which case the pointer to error has the error code +extern EVAL_EXPRESSION *expression_parse(const char *string, const char **failed_at, int *error); + +// free all resources allocated for an expression +extern void expression_free(EVAL_EXPRESSION *expression); + +// convert an error code to a message +extern const char *expression_strerror(int error); + +// evaluate an expression and return +// 1 = OK, the result is in: expression->result +// 2 = FAILED, the error message is in: buffer_tostring(expression->error_msg) +extern int expression_evaluate(EVAL_EXPRESSION *expression); + +extern int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result); + +#endif //NETDATA_EVAL_H diff --git a/libnetdata/inlined.h b/libnetdata/inlined.h new file mode 100644 index 000000000..6a5994c12 --- /dev/null +++ b/libnetdata/inlined.h @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_INLINED_H +#define NETDATA_INLINED_H 1 + +#include "libnetdata.h" + +#ifdef KERNEL_32BIT +typedef uint32_t kernel_uint_t; +#define str2kernel_uint_t(string) str2uint32_t(string) +#define KERNEL_UINT_FORMAT "%u" +#else +typedef uint64_t kernel_uint_t; +#define str2kernel_uint_t(string) str2uint64_t(string) +#define KERNEL_UINT_FORMAT "%" PRIu64 +#endif + +#define str2pid_t(string) str2uint32_t(string) + + +// for faster execution, allow the compiler to inline +// these functions that are called thousands of times per second + +static inline uint32_t simple_hash(const char *name) { + unsigned char *s = (unsigned char *) name; + uint32_t hval = 0x811c9dc5; + while (*s) { + hval *= 16777619; + hval ^= (uint32_t) *s++; + } + return hval; +} + +static inline uint32_t simple_uhash(const char *name) { + unsigned char *s = (unsigned char *) name; + uint32_t hval = 0x811c9dc5, c; + while ((c = *s++)) { + if (unlikely(c >= 'A' && c <= 'Z')) c += 'a' - 'A'; + hval *= 16777619; + hval ^= c; + } + return hval; +} + +static inline int simple_hash_strcmp(const char *name, const char *b, uint32_t *hash) { + unsigned char *s = (unsigned char *) name; + uint32_t hval = 0x811c9dc5; + int ret = 0; + while (*s) { + if(!ret) ret = *s - *b++; + hval *= 16777619; + hval ^= (uint32_t) *s++; + } + *hash = hval; + return ret; +} + +static inline int str2i(const char *s) { + int n = 0; + char c, negative = (*s == '-'); + + for(c = (negative)?*(++s):*s; c >= '0' && c <= '9' ; c = *(++s)) { + n *= 10; + n += c - '0'; + } + + if(unlikely(negative)) + return -n; + + return n; +} + +static inline long str2l(const char *s) { + long n = 0; + char c, negative = (*s == '-'); + + for(c = (negative)?*(++s):*s; c >= '0' && c <= '9' ; c = *(++s)) { + n *= 10; + n += c - '0'; + } + + if(unlikely(negative)) + return -n; + + return n; +} + +static inline uint32_t str2uint32_t(const char *s) { + uint32_t n = 0; + char c; + for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) { + n *= 10; + n += c - '0'; + } + return n; +} + +static inline uint64_t str2uint64_t(const char *s) { + uint64_t n = 0; + char c; + for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) { + n *= 10; + n += c - '0'; + } + return n; +} + +static inline unsigned long str2ul(const char *s) { + unsigned long n = 0; + char c; + for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) { + n *= 10; + n += c - '0'; + } + return n; +} + +static inline unsigned long long str2ull(const char *s) { + unsigned long long n = 0; + char c; + for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) { + n *= 10; + n += c - '0'; + } + return n; +} + +static inline long long str2ll(const char *s, char **endptr) { + int negative = 0; + + if(unlikely(*s == '-')) { + s++; + negative = 1; + } + else if(unlikely(*s == '+')) + s++; + + long long n = 0; + char c; + for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) { + n *= 10; + n += c - '0'; + } + + if(unlikely(endptr)) + *endptr = (char *)s; + + if(unlikely(negative)) + return -n; + else + return n; +} + +static inline long double str2ld(const char *s, char **endptr) { + int negative = 0; + const char *start = s; + unsigned long long integer_part = 0; + unsigned long decimal_part = 0; + size_t decimal_digits = 0; + + switch(*s) { + case '-': + s++; + negative = 1; + break; + + case '+': + s++; + break; + + case 'n': + if(s[1] == 'a' && s[2] == 'n') { + if(endptr) *endptr = (char *)&s[3]; + return NAN; + } + break; + + case 'i': + if(s[1] == 'n' && s[2] == 'f') { + if(endptr) *endptr = (char *)&s[3]; + return INFINITY; + } + break; + + default: + break; + } + + while (*s >= '0' && *s <= '9') { + integer_part = (integer_part * 10) + (*s - '0'); + s++; + } + + if(unlikely(*s == '.')) { + decimal_part = 0; + s++; + + while (*s >= '0' && *s <= '9') { + decimal_part = (decimal_part * 10) + (*s - '0'); + s++; + decimal_digits++; + } + } + + if(unlikely(*s == 'e' || *s == 'E')) + return strtold(start, endptr); + + if(unlikely(endptr)) + *endptr = (char *)s; + + if(unlikely(negative)) { + if(unlikely(decimal_digits)) + return -((long double)integer_part + (long double)decimal_part / powl(10.0, decimal_digits)); + else + return -((long double)integer_part); + } + else { + if(unlikely(decimal_digits)) + return (long double)integer_part + (long double)decimal_part / powl(10.0, decimal_digits); + else + return (long double)integer_part; + } +} + +#ifdef NETDATA_STRCMP_OVERRIDE +#ifdef strcmp +#undef strcmp +#endif +#define strcmp(a, b) strsame(a, b) +#endif // NETDATA_STRCMP_OVERRIDE + +static inline int strsame(const char *a, const char *b) { + if(unlikely(a == b)) return 0; + while(*a && *a == *b) { a++; b++; } + return *a - *b; +} + +static inline char *strncpyz(char *dst, const char *src, size_t n) { + char *p = dst; + + while (*src && n--) + *dst++ = *src++; + + *dst = '\0'; + + return p; +} + +static inline int read_file(const char *filename, char *buffer, size_t size) { + if(unlikely(!size)) return 3; + + int fd = open(filename, O_RDONLY, 0666); + if(unlikely(fd == -1)) { + buffer[0] = '\0'; + return 1; + } + + ssize_t r = read(fd, buffer, size); + if(unlikely(r == -1)) { + buffer[0] = '\0'; + close(fd); + return 2; + } + buffer[r] = '\0'; + + close(fd); + return 0; +} + +static inline int read_single_number_file(const char *filename, unsigned long long *result) { + char buffer[30 + 1]; + + int ret = read_file(filename, buffer, 30); + if(unlikely(ret)) { + *result = 0; + return ret; + } + + buffer[30] = '\0'; + *result = str2ull(buffer); + return 0; +} + +static inline int read_single_signed_number_file(const char *filename, long long *result) { + char buffer[30 + 1]; + + int ret = read_file(filename, buffer, 30); + if(unlikely(ret)) { + *result = 0; + return ret; + } + + buffer[30] = '\0'; + *result = atoll(buffer); + return 0; +} + +#endif //NETDATA_INLINED_H diff --git a/libnetdata/libnetdata.c b/libnetdata/libnetdata.c new file mode 100644 index 000000000..de330cae8 --- /dev/null +++ b/libnetdata/libnetdata.c @@ -0,0 +1,1447 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "libnetdata.h" + +#ifdef __APPLE__ +#define INHERIT_NONE 0 +#endif /* __APPLE__ */ +#if defined(__FreeBSD__) || defined(__APPLE__) +# define O_NOATIME 0 +# define MADV_DONTFORK INHERIT_NONE +#endif /* __FreeBSD__ || __APPLE__*/ + +struct rlimit rlimit_nofile = { .rlim_cur = 1024, .rlim_max = 1024 }; +int enable_ksm = 1; + +volatile sig_atomic_t netdata_exit = 0; +const char *os_type = NETDATA_OS_TYPE; +const char *program_version = VERSION; + +// ---------------------------------------------------------------------------- +// memory allocation functions that handle failures + +// although netdata does not use memory allocations too often (netdata tries to +// maintain its memory footprint stable during runtime, i.e. all buffers are +// allocated during initialization and are adapted to current use throughout +// its lifetime), these can be used to override the default system allocation +// routines. + +#ifdef NETDATA_LOG_ALLOCATIONS +static __thread struct memory_statistics { + volatile ssize_t malloc_calls_made; + volatile ssize_t calloc_calls_made; + volatile ssize_t realloc_calls_made; + volatile ssize_t strdup_calls_made; + volatile ssize_t free_calls_made; + volatile ssize_t memory_calls_made; + volatile ssize_t allocated_memory; + volatile ssize_t mmapped_memory; +} memory_statistics = { 0, 0, 0, 0, 0, 0, 0, 0 }; + +__thread size_t log_thread_memory_allocations = 0; + +static inline void print_allocations(const char *file, const char *function, const unsigned long line, const char *type, size_t size) { + static __thread struct memory_statistics old = { 0, 0, 0, 0, 0, 0, 0, 0 }; + + fprintf(stderr, "%s iteration %zu MEMORY TRACE: %lu@%s : %s : %s : %zu\n", + netdata_thread_tag(), + log_thread_memory_allocations, + line, file, function, + type, size + ); + + fprintf(stderr, "%s iteration %zu MEMORY ALLOCATIONS: (%04lu@%-40.40s:%-40.40s): Allocated %zd KB (%+zd B), mmapped %zd KB (%+zd B): %s : malloc %zd (%+zd), calloc %zd (%+zd), realloc %zd (%+zd), strdup %zd (%+zd), free %zd (%+zd)\n", + netdata_thread_tag(), + log_thread_memory_allocations, + line, file, function, + (memory_statistics.allocated_memory + 512) / 1024, memory_statistics.allocated_memory - old.allocated_memory, + (memory_statistics.mmapped_memory + 512) / 1024, memory_statistics.mmapped_memory - old.mmapped_memory, + type, + memory_statistics.malloc_calls_made, memory_statistics.malloc_calls_made - old.malloc_calls_made, + memory_statistics.calloc_calls_made, memory_statistics.calloc_calls_made - old.calloc_calls_made, + memory_statistics.realloc_calls_made, memory_statistics.realloc_calls_made - old.realloc_calls_made, + memory_statistics.strdup_calls_made, memory_statistics.strdup_calls_made - old.strdup_calls_made, + memory_statistics.free_calls_made, memory_statistics.free_calls_made - old.free_calls_made + ); + + memcpy(&old, &memory_statistics, sizeof(struct memory_statistics)); +} + +static inline void mmap_accounting(size_t size) { + if(log_thread_memory_allocations) { + memory_statistics.memory_calls_made++; + memory_statistics.mmapped_memory += size; + } +} + +void *mallocz_int(const char *file, const char *function, const unsigned long line, size_t size) { + if(log_thread_memory_allocations) { + memory_statistics.memory_calls_made++; + memory_statistics.malloc_calls_made++; + memory_statistics.allocated_memory += size; + print_allocations(file, function, line, "malloc()", size); + } + + size_t *n = (size_t *)malloc(sizeof(size_t) + size); + if (unlikely(!n)) fatal("mallocz() cannot allocate %zu bytes of memory.", size); + *n = size; + return (void *)&n[1]; +} + +void *callocz_int(const char *file, const char *function, const unsigned long line, size_t nmemb, size_t size) { + size = nmemb * size; + + if(log_thread_memory_allocations) { + memory_statistics.memory_calls_made++; + memory_statistics.calloc_calls_made++; + memory_statistics.allocated_memory += size; + print_allocations(file, function, line, "calloc()", size); + } + + size_t *n = (size_t *)calloc(1, sizeof(size_t) + size); + if (unlikely(!n)) fatal("callocz() cannot allocate %zu bytes of memory.", size); + *n = size; + return (void *)&n[1]; +} + +void *reallocz_int(const char *file, const char *function, const unsigned long line, void *ptr, size_t size) { + if(!ptr) return mallocz_int(file, function, line, size); + + size_t *n = (size_t *)ptr; + n--; + size_t old_size = *n; + + n = realloc(n, sizeof(size_t) + size); + if (unlikely(!n)) fatal("reallocz() cannot allocate %zu bytes of memory (from %zu bytes).", size, old_size); + + if(log_thread_memory_allocations) { + memory_statistics.memory_calls_made++; + memory_statistics.realloc_calls_made++; + memory_statistics.allocated_memory += (size - old_size); + print_allocations(file, function, line, "realloc()", size - old_size); + } + + *n = size; + return (void *)&n[1]; +} + +char *strdupz_int(const char *file, const char *function, const unsigned long line, const char *s) { + size_t size = strlen(s) + 1; + + if(log_thread_memory_allocations) { + memory_statistics.memory_calls_made++; + memory_statistics.strdup_calls_made++; + memory_statistics.allocated_memory += size; + print_allocations(file, function, line, "strdup()", size); + } + + size_t *n = (size_t *)malloc(sizeof(size_t) + size); + if (unlikely(!n)) fatal("strdupz() cannot allocate %zu bytes of memory.", size); + + *n = size; + char *t = (char *)&n[1]; + strcpy(t, s); + return t; +} + +void freez_int(const char *file, const char *function, const unsigned long line, void *ptr) { + if(unlikely(!ptr)) return; + + size_t *n = (size_t *)ptr; + n--; + size_t size = *n; + + if(log_thread_memory_allocations) { + memory_statistics.memory_calls_made++; + memory_statistics.free_calls_made++; + memory_statistics.allocated_memory -= size; + print_allocations(file, function, line, "free()", size); + } + + free(n); +} +#else + +char *strdupz(const char *s) { + char *t = strdup(s); + if (unlikely(!t)) fatal("Cannot strdup() string '%s'", s); + return t; +} + +void freez(void *ptr) { + free(ptr); +} + +void *mallocz(size_t size) { + void *p = malloc(size); + if (unlikely(!p)) fatal("Cannot allocate %zu bytes of memory.", size); + return p; +} + +void *callocz(size_t nmemb, size_t size) { + void *p = calloc(nmemb, size); + if (unlikely(!p)) fatal("Cannot allocate %zu bytes of memory.", nmemb * size); + return p; +} + +void *reallocz(void *ptr, size_t size) { + void *p = realloc(ptr, size); + if (unlikely(!p)) fatal("Cannot re-allocate memory to %zu bytes.", size); + return p; +} + +#endif + +// -------------------------------------------------------------------------------------------------------------------- + +void json_escape_string(char *dst, const char *src, size_t size) { + const char *t; + char *d = dst, *e = &dst[size - 1]; + + for(t = src; *t && d < e ;t++) { + if(unlikely(*t == '\\' || *t == '"')) { + if(unlikely(d + 1 >= e)) break; + *d++ = '\\'; + } + *d++ = *t; + } + + *d = '\0'; +} + +void json_fix_string(char *s) { + unsigned char c; + while((c = (unsigned char)*s)) { + if(unlikely(c == '\\')) + *s++ = '/'; + else if(unlikely(c == '"')) + *s++ = '\''; + else if(unlikely(isspace(c) || iscntrl(c))) + *s++ = ' '; + else if(unlikely(!isprint(c) || c > 127)) + *s++ = '_'; + else + s++; + } +} + +unsigned char netdata_map_chart_names[256] = { + [0] = '\0', // + [1] = '_', // + [2] = '_', // + [3] = '_', // + [4] = '_', // + [5] = '_', // + [6] = '_', // + [7] = '_', // + [8] = '_', // + [9] = '_', // + [10] = '_', // + [11] = '_', // + [12] = '_', // + [13] = '_', // + [14] = '_', // + [15] = '_', // + [16] = '_', // + [17] = '_', // + [18] = '_', // + [19] = '_', // + [20] = '_', // + [21] = '_', // + [22] = '_', // + [23] = '_', // + [24] = '_', // + [25] = '_', // + [26] = '_', // + [27] = '_', // + [28] = '_', // + [29] = '_', // + [30] = '_', // + [31] = '_', // + [32] = '_', // + [33] = '_', // ! + [34] = '_', // " + [35] = '_', // # + [36] = '_', // $ + [37] = '_', // % + [38] = '_', // & + [39] = '_', // ' + [40] = '_', // ( + [41] = '_', // ) + [42] = '_', // * + [43] = '_', // + + [44] = '.', // , + [45] = '-', // - + [46] = '.', // . + [47] = '/', // / + [48] = '0', // 0 + [49] = '1', // 1 + [50] = '2', // 2 + [51] = '3', // 3 + [52] = '4', // 4 + [53] = '5', // 5 + [54] = '6', // 6 + [55] = '7', // 7 + [56] = '8', // 8 + [57] = '9', // 9 + [58] = '_', // : + [59] = '_', // ; + [60] = '_', // < + [61] = '_', // = + [62] = '_', // > + [63] = '_', // ? + [64] = '_', // @ + [65] = 'a', // A + [66] = 'b', // B + [67] = 'c', // C + [68] = 'd', // D + [69] = 'e', // E + [70] = 'f', // F + [71] = 'g', // G + [72] = 'h', // H + [73] = 'i', // I + [74] = 'j', // J + [75] = 'k', // K + [76] = 'l', // L + [77] = 'm', // M + [78] = 'n', // N + [79] = 'o', // O + [80] = 'p', // P + [81] = 'q', // Q + [82] = 'r', // R + [83] = 's', // S + [84] = 't', // T + [85] = 'u', // U + [86] = 'v', // V + [87] = 'w', // W + [88] = 'x', // X + [89] = 'y', // Y + [90] = 'z', // Z + [91] = '_', // [ + [92] = '/', // backslash + [93] = '_', // ] + [94] = '_', // ^ + [95] = '_', // _ + [96] = '_', // ` + [97] = 'a', // a + [98] = 'b', // b + [99] = 'c', // c + [100] = 'd', // d + [101] = 'e', // e + [102] = 'f', // f + [103] = 'g', // g + [104] = 'h', // h + [105] = 'i', // i + [106] = 'j', // j + [107] = 'k', // k + [108] = 'l', // l + [109] = 'm', // m + [110] = 'n', // n + [111] = 'o', // o + [112] = 'p', // p + [113] = 'q', // q + [114] = 'r', // r + [115] = 's', // s + [116] = 't', // t + [117] = 'u', // u + [118] = 'v', // v + [119] = 'w', // w + [120] = 'x', // x + [121] = 'y', // y + [122] = 'z', // z + [123] = '_', // { + [124] = '_', // | + [125] = '_', // } + [126] = '_', // ~ + [127] = '_', // + [128] = '_', // + [129] = '_', // + [130] = '_', // + [131] = '_', // + [132] = '_', // + [133] = '_', // + [134] = '_', // + [135] = '_', // + [136] = '_', // + [137] = '_', // + [138] = '_', // + [139] = '_', // + [140] = '_', // + [141] = '_', // + [142] = '_', // + [143] = '_', // + [144] = '_', // + [145] = '_', // + [146] = '_', // + [147] = '_', // + [148] = '_', // + [149] = '_', // + [150] = '_', // + [151] = '_', // + [152] = '_', // + [153] = '_', // + [154] = '_', // + [155] = '_', // + [156] = '_', // + [157] = '_', // + [158] = '_', // + [159] = '_', // + [160] = '_', // + [161] = '_', // + [162] = '_', // + [163] = '_', // + [164] = '_', // + [165] = '_', // + [166] = '_', // + [167] = '_', // + [168] = '_', // + [169] = '_', // + [170] = '_', // + [171] = '_', // + [172] = '_', // + [173] = '_', // + [174] = '_', // + [175] = '_', // + [176] = '_', // + [177] = '_', // + [178] = '_', // + [179] = '_', // + [180] = '_', // + [181] = '_', // + [182] = '_', // + [183] = '_', // + [184] = '_', // + [185] = '_', // + [186] = '_', // + [187] = '_', // + [188] = '_', // + [189] = '_', // + [190] = '_', // + [191] = '_', // + [192] = '_', // + [193] = '_', // + [194] = '_', // + [195] = '_', // + [196] = '_', // + [197] = '_', // + [198] = '_', // + [199] = '_', // + [200] = '_', // + [201] = '_', // + [202] = '_', // + [203] = '_', // + [204] = '_', // + [205] = '_', // + [206] = '_', // + [207] = '_', // + [208] = '_', // + [209] = '_', // + [210] = '_', // + [211] = '_', // + [212] = '_', // + [213] = '_', // + [214] = '_', // + [215] = '_', // + [216] = '_', // + [217] = '_', // + [218] = '_', // + [219] = '_', // + [220] = '_', // + [221] = '_', // + [222] = '_', // + [223] = '_', // + [224] = '_', // + [225] = '_', // + [226] = '_', // + [227] = '_', // + [228] = '_', // + [229] = '_', // + [230] = '_', // + [231] = '_', // + [232] = '_', // + [233] = '_', // + [234] = '_', // + [235] = '_', // + [236] = '_', // + [237] = '_', // + [238] = '_', // + [239] = '_', // + [240] = '_', // + [241] = '_', // + [242] = '_', // + [243] = '_', // + [244] = '_', // + [245] = '_', // + [246] = '_', // + [247] = '_', // + [248] = '_', // + [249] = '_', // + [250] = '_', // + [251] = '_', // + [252] = '_', // + [253] = '_', // + [254] = '_', // + [255] = '_' // +}; + +// make sure the supplied string +// is good for a netdata chart/dimension ID/NAME +void netdata_fix_chart_name(char *s) { + while ((*s = netdata_map_chart_names[(unsigned char) *s])) s++; +} + +unsigned char netdata_map_chart_ids[256] = { + [0] = '\0', // + [1] = '_', // + [2] = '_', // + [3] = '_', // + [4] = '_', // + [5] = '_', // + [6] = '_', // + [7] = '_', // + [8] = '_', // + [9] = '_', // + [10] = '_', // + [11] = '_', // + [12] = '_', // + [13] = '_', // + [14] = '_', // + [15] = '_', // + [16] = '_', // + [17] = '_', // + [18] = '_', // + [19] = '_', // + [20] = '_', // + [21] = '_', // + [22] = '_', // + [23] = '_', // + [24] = '_', // + [25] = '_', // + [26] = '_', // + [27] = '_', // + [28] = '_', // + [29] = '_', // + [30] = '_', // + [31] = '_', // + [32] = '_', // + [33] = '_', // ! + [34] = '_', // " + [35] = '_', // # + [36] = '_', // $ + [37] = '_', // % + [38] = '_', // & + [39] = '_', // ' + [40] = '_', // ( + [41] = '_', // ) + [42] = '_', // * + [43] = '_', // + + [44] = '.', // , + [45] = '-', // - + [46] = '.', // . + [47] = '_', // / + [48] = '0', // 0 + [49] = '1', // 1 + [50] = '2', // 2 + [51] = '3', // 3 + [52] = '4', // 4 + [53] = '5', // 5 + [54] = '6', // 6 + [55] = '7', // 7 + [56] = '8', // 8 + [57] = '9', // 9 + [58] = '_', // : + [59] = '_', // ; + [60] = '_', // < + [61] = '_', // = + [62] = '_', // > + [63] = '_', // ? + [64] = '_', // @ + [65] = 'a', // A + [66] = 'b', // B + [67] = 'c', // C + [68] = 'd', // D + [69] = 'e', // E + [70] = 'f', // F + [71] = 'g', // G + [72] = 'h', // H + [73] = 'i', // I + [74] = 'j', // J + [75] = 'k', // K + [76] = 'l', // L + [77] = 'm', // M + [78] = 'n', // N + [79] = 'o', // O + [80] = 'p', // P + [81] = 'q', // Q + [82] = 'r', // R + [83] = 's', // S + [84] = 't', // T + [85] = 'u', // U + [86] = 'v', // V + [87] = 'w', // W + [88] = 'x', // X + [89] = 'y', // Y + [90] = 'z', // Z + [91] = '_', // [ + [92] = '/', // backslash + [93] = '_', // ] + [94] = '_', // ^ + [95] = '_', // _ + [96] = '_', // ` + [97] = 'a', // a + [98] = 'b', // b + [99] = 'c', // c + [100] = 'd', // d + [101] = 'e', // e + [102] = 'f', // f + [103] = 'g', // g + [104] = 'h', // h + [105] = 'i', // i + [106] = 'j', // j + [107] = 'k', // k + [108] = 'l', // l + [109] = 'm', // m + [110] = 'n', // n + [111] = 'o', // o + [112] = 'p', // p + [113] = 'q', // q + [114] = 'r', // r + [115] = 's', // s + [116] = 't', // t + [117] = 'u', // u + [118] = 'v', // v + [119] = 'w', // w + [120] = 'x', // x + [121] = 'y', // y + [122] = 'z', // z + [123] = '_', // { + [124] = '_', // | + [125] = '_', // } + [126] = '_', // ~ + [127] = '_', // + [128] = '_', // + [129] = '_', // + [130] = '_', // + [131] = '_', // + [132] = '_', // + [133] = '_', // + [134] = '_', // + [135] = '_', // + [136] = '_', // + [137] = '_', // + [138] = '_', // + [139] = '_', // + [140] = '_', // + [141] = '_', // + [142] = '_', // + [143] = '_', // + [144] = '_', // + [145] = '_', // + [146] = '_', // + [147] = '_', // + [148] = '_', // + [149] = '_', // + [150] = '_', // + [151] = '_', // + [152] = '_', // + [153] = '_', // + [154] = '_', // + [155] = '_', // + [156] = '_', // + [157] = '_', // + [158] = '_', // + [159] = '_', // + [160] = '_', // + [161] = '_', // + [162] = '_', // + [163] = '_', // + [164] = '_', // + [165] = '_', // + [166] = '_', // + [167] = '_', // + [168] = '_', // + [169] = '_', // + [170] = '_', // + [171] = '_', // + [172] = '_', // + [173] = '_', // + [174] = '_', // + [175] = '_', // + [176] = '_', // + [177] = '_', // + [178] = '_', // + [179] = '_', // + [180] = '_', // + [181] = '_', // + [182] = '_', // + [183] = '_', // + [184] = '_', // + [185] = '_', // + [186] = '_', // + [187] = '_', // + [188] = '_', // + [189] = '_', // + [190] = '_', // + [191] = '_', // + [192] = '_', // + [193] = '_', // + [194] = '_', // + [195] = '_', // + [196] = '_', // + [197] = '_', // + [198] = '_', // + [199] = '_', // + [200] = '_', // + [201] = '_', // + [202] = '_', // + [203] = '_', // + [204] = '_', // + [205] = '_', // + [206] = '_', // + [207] = '_', // + [208] = '_', // + [209] = '_', // + [210] = '_', // + [211] = '_', // + [212] = '_', // + [213] = '_', // + [214] = '_', // + [215] = '_', // + [216] = '_', // + [217] = '_', // + [218] = '_', // + [219] = '_', // + [220] = '_', // + [221] = '_', // + [222] = '_', // + [223] = '_', // + [224] = '_', // + [225] = '_', // + [226] = '_', // + [227] = '_', // + [228] = '_', // + [229] = '_', // + [230] = '_', // + [231] = '_', // + [232] = '_', // + [233] = '_', // + [234] = '_', // + [235] = '_', // + [236] = '_', // + [237] = '_', // + [238] = '_', // + [239] = '_', // + [240] = '_', // + [241] = '_', // + [242] = '_', // + [243] = '_', // + [244] = '_', // + [245] = '_', // + [246] = '_', // + [247] = '_', // + [248] = '_', // + [249] = '_', // + [250] = '_', // + [251] = '_', // + [252] = '_', // + [253] = '_', // + [254] = '_', // + [255] = '_' // +}; + +// make sure the supplied string +// is good for a netdata chart/dimension ID/NAME +void netdata_fix_chart_id(char *s) { + while ((*s = netdata_map_chart_ids[(unsigned char) *s])) s++; +} + +/* +// http://stackoverflow.com/questions/7666509/hash-function-for-string +uint32_t simple_hash(const char *name) +{ + const char *s = name; + uint32_t hash = 5381; + int i; + + while((i = *s++)) hash = ((hash << 5) + hash) + i; + + // fprintf(stderr, "HASH: %lu %s\n", hash, name); + + return hash; +} +*/ + +/* +// http://isthe.com/chongo/tech/comp/fnv/#FNV-1a +uint32_t simple_hash(const char *name) { + unsigned char *s = (unsigned char *) name; + uint32_t hval = 0x811c9dc5; + + // FNV-1a algorithm + while (*s) { + // multiply by the 32 bit FNV magic prime mod 2^32 + // NOTE: No need to optimize with left shifts. + // GCC will use imul instruction anyway. + // Tested with 'gcc -O3 -S' + //hval += (hval<<1) + (hval<<4) + (hval<<7) + (hval<<8) + (hval<<24); + hval *= 16777619; + + // xor the bottom with the current octet + hval ^= (uint32_t) *s++; + } + + // fprintf(stderr, "HASH: %u = %s\n", hval, name); + return hval; +} + +uint32_t simple_uhash(const char *name) { + unsigned char *s = (unsigned char *) name; + uint32_t hval = 0x811c9dc5, c; + + // FNV-1a algorithm + while ((c = *s++)) { + if (unlikely(c >= 'A' && c <= 'Z')) c += 'a' - 'A'; + hval *= 16777619; + hval ^= c; + } + return hval; +} +*/ + +/* +// http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx +// one at a time hash +uint32_t simple_hash(const char *name) { + unsigned char *s = (unsigned char *)name; + uint32_t h = 0; + + while(*s) { + h += *s++; + h += (h << 10); + h ^= (h >> 6); + } + + h += (h << 3); + h ^= (h >> 11); + h += (h << 15); + + // fprintf(stderr, "HASH: %u = %s\n", h, name); + + return h; +} +*/ + +void strreverse(char *begin, char *end) { + while (end > begin) { + // clearer code. + char aux = *end; + *end-- = *begin; + *begin++ = aux; + } +} + +char *strsep_on_1char(char **ptr, char c) { + if(unlikely(!ptr || !*ptr)) + return NULL; + + // remember the position we started + char *s = *ptr; + + // skip separators in front + while(*s == c) s++; + char *ret = s; + + // find the next separator + while(*s++) { + if(unlikely(*s == c)) { + *s++ = '\0'; + *ptr = s; + return ret; + } + } + + *ptr = NULL; + return ret; +} + +char *mystrsep(char **ptr, char *s) { + char *p = ""; + while (p && !p[0] && *ptr) p = strsep(ptr, s); + return (p); +} + +char *trim(char *s) { + // skip leading spaces + while (*s && isspace(*s)) s++; + if (!*s) return NULL; + + // skip tailing spaces + // this way is way faster. Writes only one NUL char. + ssize_t l = strlen(s); + if (--l >= 0) { + char *p = s + l; + while (p > s && isspace(*p)) p--; + *++p = '\0'; + } + + if (!*s) return NULL; + + return s; +} + +inline char *trim_all(char *buffer) { + char *d = buffer, *s = buffer; + + // skip spaces + while(isspace(*s)) s++; + + while(*s) { + // copy the non-space part + while(*s && !isspace(*s)) *d++ = *s++; + + // add a space if we have to + if(*s && isspace(*s)) { + *d++ = ' '; + s++; + } + + // skip spaces + while(isspace(*s)) s++; + } + + *d = '\0'; + + if(d > buffer) { + d--; + if(isspace(*d)) *d = '\0'; + } + + if(!buffer[0]) return NULL; + return buffer; +} + +static int memory_file_open(const char *filename, size_t size) { + // info("memory_file_open('%s', %zu", filename, size); + + int fd = open(filename, O_RDWR | O_CREAT | O_NOATIME, 0664); + if (fd != -1) { + if (lseek(fd, size, SEEK_SET) == (off_t) size) { + if (write(fd, "", 1) == 1) { + if (ftruncate(fd, size)) + error("Cannot truncate file '%s' to size %zu. Will use the larger file.", filename, size); + } + else error("Cannot write to file '%s' at position %zu.", filename, size); + } + else error("Cannot seek file '%s' to size %zu.", filename, size); + } + else error("Cannot create/open file '%s'.", filename); + + return fd; +} + +// mmap_shared is used for memory mode = map +static void *memory_file_mmap(const char *filename, size_t size, int flags) { + // info("memory_file_mmap('%s', %zu", filename, size); + static int log_madvise = 1; + + int fd = -1; + if(filename) { + fd = memory_file_open(filename, size); + if(fd == -1) return MAP_FAILED; + } + + void *mem = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, fd, 0); + if (mem != MAP_FAILED) { +#ifdef NETDATA_LOG_ALLOCATIONS + mmap_accounting(size); +#endif + int advise = MADV_SEQUENTIAL | MADV_DONTFORK; + if (flags & MAP_SHARED) advise |= MADV_WILLNEED; + + if (madvise(mem, size, advise) != 0 && log_madvise) { + error("Cannot advise the kernel about shared memory usage."); + log_madvise--; + } + } + + if(fd != -1) + close(fd); + + return mem; +} + +#ifdef MADV_MERGEABLE +static void *memory_file_mmap_ksm(const char *filename, size_t size, int flags) { + // info("memory_file_mmap_ksm('%s', %zu", filename, size); + static int log_madvise_2 = 1, log_madvise_3 = 1; + + int fd = -1; + if(filename) { + fd = memory_file_open(filename, size); + if(fd == -1) return MAP_FAILED; + } + + void *mem = mmap(NULL, size, PROT_READ | PROT_WRITE, flags | MAP_ANONYMOUS, -1, 0); + if (mem != MAP_FAILED) { +#ifdef NETDATA_LOG_ALLOCATIONS + mmap_accounting(size); +#endif + if(fd != -1) { + if (lseek(fd, 0, SEEK_SET) == 0) { + if (read(fd, mem, size) != (ssize_t) size) + error("Cannot read from file '%s'", filename); + } + else error("Cannot seek to beginning of file '%s'.", filename); + } + + // don't use MADV_SEQUENTIAL|MADV_DONTFORK, they disable MADV_MERGEABLE + if (madvise(mem, size, MADV_SEQUENTIAL | MADV_DONTFORK) != 0 && log_madvise_2) { + error("Cannot advise the kernel about the memory usage (MADV_SEQUENTIAL|MADV_DONTFORK) of file '%s'.", filename); + log_madvise_2--; + } + + if (madvise(mem, size, MADV_MERGEABLE) != 0 && log_madvise_3) { + error("Cannot advise the kernel about the memory usage (MADV_MERGEABLE) of file '%s'.", filename); + log_madvise_3--; + } + } + + if(fd != -1) + close(fd); + + return mem; +} +#else +static void *memory_file_mmap_ksm(const char *filename, size_t size, int flags) { + // info("memory_file_mmap_ksm FALLBACK ('%s', %zu", filename, size); + + if(filename) + return memory_file_mmap(filename, size, flags); + + // when KSM is not available and no filename is given (memory mode = ram), + // we just report failure + return MAP_FAILED; +} +#endif + +void *mymmap(const char *filename, size_t size, int flags, int ksm) { + void *mem = NULL; + + if (filename && (flags & MAP_SHARED || !enable_ksm || !ksm)) + // memory mode = map | save + // when KSM is not enabled + // MAP_SHARED is used for memory mode = map (no KSM possible) + mem = memory_file_mmap(filename, size, flags); + + else + // memory mode = save | ram + // when KSM is enabled + // for memory mode = ram, the filename is NULL + mem = memory_file_mmap_ksm(filename, size, flags); + + if(mem == MAP_FAILED) return NULL; + + errno = 0; + return mem; +} + +int memory_file_save(const char *filename, void *mem, size_t size) { + char tmpfilename[FILENAME_MAX + 1]; + + snprintfz(tmpfilename, FILENAME_MAX, "%s.%ld.tmp", filename, (long) getpid()); + + int fd = open(tmpfilename, O_RDWR | O_CREAT | O_NOATIME, 0664); + if (fd < 0) { + error("Cannot create/open file '%s'.", filename); + return -1; + } + + if (write(fd, mem, size) != (ssize_t) size) { + error("Cannot write to file '%s' %ld bytes.", filename, (long) size); + close(fd); + return -1; + } + + close(fd); + + if (rename(tmpfilename, filename)) { + error("Cannot rename '%s' to '%s'", tmpfilename, filename); + return -1; + } + + return 0; +} + +int fd_is_valid(int fd) { + return fcntl(fd, F_GETFD) != -1 || errno != EBADF; +} + +char *fgets_trim_len(char *buf, size_t buf_size, FILE *fp, size_t *len) { + char *s = fgets(buf, (int)buf_size, fp); + if (!s) return NULL; + + char *t = s; + if (*t != '\0') { + // find the string end + while (*++t != '\0'); + + // trim trailing spaces/newlines/tabs + while (--t > s && *t == '\n') + *t = '\0'; + } + + if (len) + *len = t - s + 1; + + return s; +} + +int vsnprintfz(char *dst, size_t n, const char *fmt, va_list args) { + int size = vsnprintf(dst, n, fmt, args); + + if (unlikely((size_t) size > n)) { + // truncated + size = (int)n; + } + + dst[size] = '\0'; + return size; +} + +int snprintfz(char *dst, size_t n, const char *fmt, ...) { + va_list args; + + va_start(args, fmt); + int ret = vsnprintfz(dst, n, fmt, args); + va_end(args); + + return ret; +} + +/* +// poor man cycle counting +static unsigned long tsc; +void begin_tsc(void) { + unsigned long a, d; + asm volatile ("cpuid\nrdtsc" : "=a" (a), "=d" (d) : "0" (0) : "ebx", "ecx"); + tsc = ((unsigned long)d << 32) | (unsigned long)a; +} +unsigned long end_tsc(void) { + unsigned long a, d; + asm volatile ("rdtscp" : "=a" (a), "=d" (d) : : "ecx"); + return (((unsigned long)d << 32) | (unsigned long)a) - tsc; +} +*/ + +int recursively_delete_dir(const char *path, const char *reason) { + DIR *dir = opendir(path); + if(!dir) { + error("Cannot read %s directory to be deleted '%s'", reason?reason:"", path); + return -1; + } + + int ret = 0; + struct dirent *de = NULL; + while((de = readdir(dir))) { + if(de->d_type == DT_DIR + && ( + (de->d_name[0] == '.' && de->d_name[1] == '\0') + || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') + )) + continue; + + char fullpath[FILENAME_MAX + 1]; + snprintfz(fullpath, FILENAME_MAX, "%s/%s", path, de->d_name); + + if(de->d_type == DT_DIR) { + int r = recursively_delete_dir(fullpath, reason); + if(r > 0) ret += r; + continue; + } + + info("Deleting %s file '%s'", reason?reason:"", fullpath); + if(unlikely(unlink(fullpath) == -1)) + error("Cannot delete %s file '%s'", reason?reason:"", fullpath); + else + ret++; + } + + info("Deleting empty directory '%s'", path); + if(unlikely(rmdir(path) == -1)) + error("Cannot delete empty directory '%s'", path); + else + ret++; + + closedir(dir); + + return ret; +} + +static int is_virtual_filesystem(const char *path, char **reason) { + +#if defined(__APPLE__) || defined(__FreeBSD__) + (void)path; + (void)reason; +#else + struct statfs stat; + // stat.f_fsid.__val[0] is a file system id + // stat.f_fsid.__val[1] is the inode + // so their combination uniquely identifies the file/dir + + if (statfs(path, &stat) == -1) { + if(reason) *reason = "failed to statfs()"; + return -1; + } + + if(stat.f_fsid.__val[0] != 0 || stat.f_fsid.__val[1] != 0) { + errno = EINVAL; + if(reason) *reason = "is not a virtual file system"; + return -1; + } +#endif + + return 0; +} + +int verify_netdata_host_prefix() { + if(!netdata_configured_host_prefix) + netdata_configured_host_prefix = ""; + + if(!*netdata_configured_host_prefix) + return 0; + + char buffer[FILENAME_MAX + 1]; + char *path = netdata_configured_host_prefix; + char *reason = "unknown reason"; + errno = 0; + + struct stat sb; + if (stat(path, &sb) == -1) { + reason = "failed to stat()"; + goto failed; + } + + if((sb.st_mode & S_IFMT) != S_IFDIR) { + errno = EINVAL; + reason = "is not a directory"; + goto failed; + } + + path = buffer; + snprintfz(path, FILENAME_MAX, "%s/proc", netdata_configured_host_prefix); + if(is_virtual_filesystem(path, &reason) == -1) + goto failed; + + snprintfz(path, FILENAME_MAX, "%s/sys", netdata_configured_host_prefix); + if(is_virtual_filesystem(path, &reason) == -1) + goto failed; + + if(netdata_configured_host_prefix && *netdata_configured_host_prefix) + info("Using host prefix directory '%s'", netdata_configured_host_prefix); + + return 0; + +failed: + error("Ignoring host prefix '%s': path '%s' %s", netdata_configured_host_prefix, path, reason); + netdata_configured_host_prefix = ""; + return -1; +} + +char *strdupz_path_subpath(const char *path, const char *subpath) { + if(unlikely(!path || !*path)) path = "."; + if(unlikely(!subpath)) subpath = ""; + + // skip trailing slashes in path + size_t len = strlen(path); + while(len > 0 && path[len - 1] == '/') len--; + + // skip leading slashes in subpath + while(subpath[0] == '/') subpath++; + + // if the last character in path is / and (there is a subpath or path is now empty) + // keep the trailing slash in path and remove the additional slash + char *slash = "/"; + if(path[len] == '/' && (*subpath || len == 0)) { + slash = ""; + len++; + } + else if(!*subpath) { + // there is no subpath + // no need for trailing slash + slash = ""; + } + + char buffer[FILENAME_MAX + 1]; + snprintfz(buffer, FILENAME_MAX, "%.*s%s%s", (int)len, path, slash, subpath); + return strdupz(buffer); +} + +int path_is_dir(const char *path, const char *subpath) { + char *s = strdupz_path_subpath(path, subpath); + + size_t max_links = 100; + + int is_dir = 0; + struct stat statbuf; + while(max_links-- && stat(s, &statbuf) == 0) { + if((statbuf.st_mode & S_IFMT) == S_IFDIR) { + is_dir = 1; + break; + } + else if((statbuf.st_mode & S_IFMT) == S_IFLNK) { + char buffer[FILENAME_MAX + 1]; + ssize_t l = readlink(s, buffer, FILENAME_MAX); + if(l > 0) { + buffer[l] = '\0'; + freez(s); + s = strdupz(buffer); + continue; + } + else { + is_dir = 0; + break; + } + } + else { + is_dir = 0; + break; + } + } + + freez(s); + return is_dir; +} + +int path_is_file(const char *path, const char *subpath) { + char *s = strdupz_path_subpath(path, subpath); + + size_t max_links = 100; + + int is_file = 0; + struct stat statbuf; + while(max_links-- && stat(s, &statbuf) == 0) { + if((statbuf.st_mode & S_IFMT) == S_IFREG) { + is_file = 1; + break; + } + else if((statbuf.st_mode & S_IFMT) == S_IFLNK) { + char buffer[FILENAME_MAX + 1]; + ssize_t l = readlink(s, buffer, FILENAME_MAX); + if(l > 0) { + buffer[l] = '\0'; + freez(s); + s = strdupz(buffer); + continue; + } + else { + is_file = 0; + break; + } + } + else { + is_file = 0; + break; + } + } + + freez(s); + return is_file; +} + +void recursive_config_double_dir_load(const char *user_path, const char *stock_path, const char *subpath, int (*callback)(const char *filename, void *data), void *data, size_t depth) { + if(depth > 3) { + error("CONFIG: Max directory depth reached while reading user path '%s', stock path '%s', subpath '%s'", user_path, stock_path, subpath); + return; + } + + char *udir = strdupz_path_subpath(user_path, subpath); + char *sdir = strdupz_path_subpath(stock_path, subpath); + + debug(D_HEALTH, "CONFIG traversing user-config directory '%s', stock config directory '%s'", udir, sdir); + + DIR *dir = opendir(udir); + if (!dir) { + error("CONFIG cannot open user-config directory '%s'.", udir); + } + else { + struct dirent *de = NULL; + while((de = readdir(dir))) { + if(de->d_type == DT_DIR || de->d_type == DT_LNK) { + if( !de->d_name[0] || + (de->d_name[0] == '.' && de->d_name[1] == '\0') || + (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') + ) { + debug(D_HEALTH, "CONFIG ignoring user-config directory '%s/%s'", udir, de->d_name); + continue; + } + + if(path_is_dir(udir, de->d_name)) { + recursive_config_double_dir_load(udir, sdir, de->d_name, callback, data, depth + 1); + continue; + } + } + + if(de->d_type == DT_REG || de->d_type == DT_LNK) { + size_t len = strlen(de->d_name); + if(path_is_file(udir, de->d_name) && + len > 5 && !strcmp(&de->d_name[len - 5], ".conf")) { + char *filename = strdupz_path_subpath(udir, de->d_name); + callback(filename, data); + freez(filename); + } + else + debug(D_HEALTH, "CONFIG ignoring user-config file '%s/%s'", udir, de->d_name); + } + } + + closedir(dir); + } + + debug(D_HEALTH, "CONFIG traversing stock config directory '%s', user config directory '%s'", sdir, udir); + + dir = opendir(sdir); + if (!dir) { + error("CONFIG cannot open stock config directory '%s'.", sdir); + } + else { + struct dirent *de = NULL; + while((de = readdir(dir))) { + if(de->d_type == DT_DIR || de->d_type == DT_LNK) { + if( !de->d_name[0] || + (de->d_name[0] == '.' && de->d_name[1] == '\0') || + (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') + ) { + debug(D_HEALTH, "CONFIG ignoring stock config directory '%s/%s'", sdir, de->d_name); + continue; + } + + if(path_is_dir(sdir, de->d_name)) { + // we recurse in stock subdirectory, only when there is no corresponding + // user subdirectory - to avoid reading the files twice + + if(!path_is_dir(udir, de->d_name)) + recursive_config_double_dir_load(udir, sdir, de->d_name, callback, data, depth + 1); + + continue; + } + } + + if(de->d_type == DT_REG || de->d_type == DT_LNK) { + size_t len = strlen(de->d_name); + if(path_is_file(sdir, de->d_name) && !path_is_file(udir, de->d_name) && + len > 5 && !strcmp(&de->d_name[len - 5], ".conf")) { + char *filename = strdupz_path_subpath(sdir, de->d_name); + callback(filename, data); + freez(filename); + } + else + debug(D_HEALTH, "CONFIG ignoring stock config file '%s/%s'", sdir, de->d_name); + } + } + + closedir(dir); + } + + freez(udir); + freez(sdir); +} diff --git a/libnetdata/libnetdata.h b/libnetdata/libnetdata.h new file mode 100644 index 000000000..0bac3dc8f --- /dev/null +++ b/libnetdata/libnetdata.h @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_LIB_H +#define NETDATA_LIB_H 1 + +#ifdef HAVE_CONFIG_H +#include <config.h> +#endif + +#define OS_LINUX 1 +#define OS_FREEBSD 2 +#define OS_MACOS 3 + + +// ---------------------------------------------------------------------------- +// system include files for all netdata C programs + +/* select the memory allocator, based on autoconf findings */ +#if defined(ENABLE_JEMALLOC) + +#if defined(HAVE_JEMALLOC_JEMALLOC_H) +#include <jemalloc/jemalloc.h> +#else // !defined(HAVE_JEMALLOC_JEMALLOC_H) +#include <malloc.h> +#endif // !defined(HAVE_JEMALLOC_JEMALLOC_H) + +#elif defined(ENABLE_TCMALLOC) + +#include <google/tcmalloc.h> + +#else /* !defined(ENABLE_JEMALLOC) && !defined(ENABLE_TCMALLOC) */ + +#if !(defined(__FreeBSD__) || defined(__APPLE__)) +#include <malloc.h> +#endif /* __FreeBSD__ || __APPLE__ */ + +#endif /* !defined(ENABLE_JEMALLOC) && !defined(ENABLE_TCMALLOC) */ + +// ---------------------------------------------------------------------------- + +#if defined(__FreeBSD__) +#include <pthread_np.h> +#define NETDATA_OS_TYPE "freebsd" +#elif defined(__APPLE__) +#define NETDATA_OS_TYPE "macos" +#else +#define NETDATA_OS_TYPE "linux" +#endif /* __FreeBSD__, __APPLE__*/ + +#include <pthread.h> +#include <errno.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdarg.h> +#include <stddef.h> +#include <ctype.h> +#include <string.h> +#include <strings.h> +#include <arpa/inet.h> +#include <netinet/tcp.h> +#include <sys/ioctl.h> +#include <libgen.h> +#include <dirent.h> +#include <fcntl.h> +#include <getopt.h> +#include <grp.h> +#include <pwd.h> +#include <locale.h> +#include <net/if.h> +#include <poll.h> +#include <signal.h> +#include <syslog.h> +#include <sys/mman.h> +#include <sys/resource.h> +#include <sys/socket.h> +#include <sys/syscall.h> +#include <sys/time.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <sys/un.h> +#include <time.h> +#include <unistd.h> +#include <uuid/uuid.h> + +#ifdef HAVE_NETINET_IN_H +#include <netinet/in.h> +#endif + +#ifdef HAVE_RESOLV_H +#include <resolv.h> +#endif + +#ifdef HAVE_NETDB_H +#include <netdb.h> +#endif + +#ifdef HAVE_SYS_PRCTL_H +#include <sys/prctl.h> +#endif + +#ifdef HAVE_SYS_STAT_H +#include <sys/stat.h> +#endif + +#ifdef HAVE_SYS_VFS_H +#include <sys/vfs.h> +#endif + +#ifdef HAVE_SYS_STATFS_H +#include <sys/statfs.h> +#endif + +#ifdef HAVE_SYS_MOUNT_H +#include <sys/mount.h> +#endif + +#ifdef HAVE_SYS_STATVFS_H +#include <sys/statvfs.h> +#endif + +// #1408 +#ifdef MAJOR_IN_MKDEV +#include <sys/mkdev.h> +#endif +#ifdef MAJOR_IN_SYSMACROS +#include <sys/sysmacros.h> +#endif + +#ifdef STORAGE_WITH_MATH +#include <math.h> +#include <float.h> +#endif + +#if defined(HAVE_INTTYPES_H) +#include <inttypes.h> +#elif defined(HAVE_STDINT_H) +#include <stdint.h> +#endif + +#ifdef NETDATA_WITH_ZLIB +#include <zlib.h> +#endif + +#ifdef HAVE_CAPABILITY +#include <sys/capability.h> +#endif + + +// ---------------------------------------------------------------------------- +// netdata common definitions + +#if (SIZEOF_VOID_P == 8) +#define ENVIRONMENT64 +#elif (SIZEOF_VOID_P == 4) +#define ENVIRONMENT32 +#else +#error "Cannot detect if this is a 32 or 64 bit CPU" +#endif + +#ifdef __GNUC__ +#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) +#endif // __GNUC__ + +#ifdef HAVE_FUNC_ATTRIBUTE_RETURNS_NONNULL +#define NEVERNULL __attribute__((returns_nonnull)) +#else +#define NEVERNULL +#endif + +#ifdef HAVE_FUNC_ATTRIBUTE_NOINLINE +#define NOINLINE __attribute__((noinline)) +#else +#define NOINLINE +#endif + +#ifdef HAVE_FUNC_ATTRIBUTE_MALLOC +#define MALLOCLIKE __attribute__((malloc)) +#else +#define MALLOCLIKE +#endif + +#ifdef HAVE_FUNC_ATTRIBUTE_FORMAT +#define PRINTFLIKE(f, a) __attribute__ ((format(__printf__, f, a))) +#else +#define PRINTFLIKE(f, a) +#endif + +#ifdef HAVE_FUNC_ATTRIBUTE_NORETURN +#define NORETURN __attribute__ ((noreturn)) +#else +#define NORETURN +#endif + +#ifdef HAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT +#define WARNUNUSED __attribute__ ((warn_unused_result)) +#else +#define WARNUNUSED +#endif + +#ifdef abs +#undef abs +#endif +#define abs(x) (((x) < 0)? (-(x)) : (x)) + +#define GUID_LEN 36 + +extern void netdata_fix_chart_id(char *s); +extern void netdata_fix_chart_name(char *s); + +extern void strreverse(char* begin, char* end); +extern char *mystrsep(char **ptr, char *s); +extern char *trim(char *s); // remove leading and trailing spaces; may return NULL +extern char *trim_all(char *buffer); // like trim(), but also remove duplicate spaces inside the string; may return NULL + +extern int vsnprintfz(char *dst, size_t n, const char *fmt, va_list args); +extern int snprintfz(char *dst, size_t n, const char *fmt, ...) PRINTFLIKE(3, 4); + +// memory allocation functions that handle failures +#ifdef NETDATA_LOG_ALLOCATIONS +extern __thread size_t log_thread_memory_allocations; +#define strdupz(s) strdupz_int(__FILE__, __FUNCTION__, __LINE__, s) +#define callocz(nmemb, size) callocz_int(__FILE__, __FUNCTION__, __LINE__, nmemb, size) +#define mallocz(size) mallocz_int(__FILE__, __FUNCTION__, __LINE__, size) +#define reallocz(ptr, size) reallocz_int(__FILE__, __FUNCTION__, __LINE__, ptr, size) +#define freez(ptr) freez_int(__FILE__, __FUNCTION__, __LINE__, ptr) + +extern char *strdupz_int(const char *file, const char *function, const unsigned long line, const char *s); +extern void *callocz_int(const char *file, const char *function, const unsigned long line, size_t nmemb, size_t size); +extern void *mallocz_int(const char *file, const char *function, const unsigned long line, size_t size); +extern void *reallocz_int(const char *file, const char *function, const unsigned long line, void *ptr, size_t size); +extern void freez_int(const char *file, const char *function, const unsigned long line, void *ptr); +#else // NETDATA_LOG_ALLOCATIONS +extern char *strdupz(const char *s) MALLOCLIKE NEVERNULL; +extern void *callocz(size_t nmemb, size_t size) MALLOCLIKE NEVERNULL; +extern void *mallocz(size_t size) MALLOCLIKE NEVERNULL; +extern void *reallocz(void *ptr, size_t size) MALLOCLIKE NEVERNULL; +extern void freez(void *ptr); +#endif // NETDATA_LOG_ALLOCATIONS + +extern void json_escape_string(char *dst, const char *src, size_t size); +extern void json_fix_string(char *s); + +extern void *mymmap(const char *filename, size_t size, int flags, int ksm); +extern int memory_file_save(const char *filename, void *mem, size_t size); + +extern int fd_is_valid(int fd); + +extern struct rlimit rlimit_nofile; + +extern int enable_ksm; + +extern char *fgets_trim_len(char *buf, size_t buf_size, FILE *fp, size_t *len); + +extern int verify_netdata_host_prefix(); + +extern int recursively_delete_dir(const char *path, const char *reason); + +extern volatile sig_atomic_t netdata_exit; +extern const char *os_type; + +extern const char *program_version; + +extern char *strdupz_path_subpath(const char *path, const char *subpath); +extern int path_is_dir(const char *path, const char *subpath); +extern int path_is_file(const char *path, const char *subpath); +extern void recursive_config_double_dir_load( + const char *user_path + , const char *stock_path + , const char *subpath + , int (*callback)(const char *filename, void *data) + , void *data + , size_t depth +); + +/* fix for alpine linux */ +#ifndef RUSAGE_THREAD +#ifdef RUSAGE_CHILDREN +#define RUSAGE_THREAD RUSAGE_CHILDREN +#endif +#endif + +#define BITS_IN_A_KILOBIT 1000 + + +extern void netdata_cleanup_and_exit(int ret) NORETURN; +extern char *netdata_configured_host_prefix; + +#include "os.h" +#include "storage_number/storage_number.h" +#include "threads/threads.h" +#include "buffer/buffer.h" +#include "locks/locks.h" +#include "avl/avl.h" +#include "inlined.h" +#include "clocks/clocks.h" +#include "popen/popen.h" +#include "simple_pattern/simple_pattern.h" +#include "socket/socket.h" +#include "config/appconfig.h" +#include "log/log.h" +#include "procfile/procfile.h" +#include "dictionary/dictionary.h" +#include "eval/eval.h" +#include "statistical/statistical.h" +#include "adaptive_resortable_list/adaptive_resortable_list.h" +#include "url/url.h" + +#endif // NETDATA_LIB_H diff --git a/libnetdata/locks/Makefile.am b/libnetdata/locks/Makefile.am new file mode 100644 index 000000000..1cb69ed99 --- /dev/null +++ b/libnetdata/locks/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/locks/Makefile.in b/libnetdata/locks/Makefile.in new file mode 100644 index 000000000..38520f78e --- /dev/null +++ b/libnetdata/locks/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = libnetdata/locks +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/locks/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu libnetdata/locks/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/libnetdata/locks/README.md b/libnetdata/locks/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/libnetdata/locks/locks.c b/libnetdata/locks/locks.c new file mode 100644 index 000000000..91e226902 --- /dev/null +++ b/libnetdata/locks/locks.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +// ---------------------------------------------------------------------------- +// automatic thread cancelability management, based on locks + +static __thread int netdata_thread_first_cancelability = 0; +static __thread int netdata_thread_lock_cancelability = 0; + +inline void netdata_thread_disable_cancelability(void) { + int old; + int ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old); + if(ret != 0) + error("THREAD_CANCELABILITY: pthread_setcancelstate() on thread %s returned error %d", netdata_thread_tag(), ret); + else { + if(!netdata_thread_lock_cancelability) + netdata_thread_first_cancelability = old; + + netdata_thread_lock_cancelability++; + } +} + +inline void netdata_thread_enable_cancelability(void) { + if(netdata_thread_lock_cancelability < 1) { + error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): invalid thread cancelability count %d on thread %s - results will be undefined - please report this!", netdata_thread_lock_cancelability, netdata_thread_tag()); + } + else if(netdata_thread_lock_cancelability == 1) { + int old = 1; + int ret = pthread_setcancelstate(netdata_thread_first_cancelability, &old); + if(ret != 0) + error("THREAD_CANCELABILITY: pthread_setcancelstate() on thread %s returned error %d", netdata_thread_tag(), ret); + else { + if(old != PTHREAD_CANCEL_DISABLE) + error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): old thread cancelability on thread %s was changed, expected DISABLED (%d), found %s (%d) - please report this!", netdata_thread_tag(), PTHREAD_CANCEL_DISABLE, (old == PTHREAD_CANCEL_ENABLE)?"ENABLED":"UNKNOWN", old); + } + + netdata_thread_lock_cancelability = 0; + } + else + netdata_thread_lock_cancelability--; +} + +// ---------------------------------------------------------------------------- +// mutex + +int __netdata_mutex_init(netdata_mutex_t *mutex) { + int ret = pthread_mutex_init(mutex, NULL); + if(unlikely(ret != 0)) + error("MUTEX_LOCK: failed to initialize (code %d).", ret); + return ret; +} + +int __netdata_mutex_lock(netdata_mutex_t *mutex) { + netdata_thread_disable_cancelability(); + + int ret = pthread_mutex_lock(mutex); + if(unlikely(ret != 0)) { + netdata_thread_enable_cancelability(); + error("MUTEX_LOCK: failed to get lock (code %d)", ret); + } + return ret; +} + +int __netdata_mutex_trylock(netdata_mutex_t *mutex) { + netdata_thread_disable_cancelability(); + + int ret = pthread_mutex_trylock(mutex); + if(ret != 0) + netdata_thread_enable_cancelability(); + + return ret; +} + +int __netdata_mutex_unlock(netdata_mutex_t *mutex) { + int ret = pthread_mutex_unlock(mutex); + if(unlikely(ret != 0)) + error("MUTEX_LOCK: failed to unlock (code %d).", ret); + else + netdata_thread_enable_cancelability(); + + return ret; +} + +int netdata_mutex_init_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) { + usec_t start = 0; + (void)start; + + if(unlikely(debug_flags & D_LOCKS)) { + start = now_boottime_usec(); + debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_init(0x%p) from %lu@%s, %s()", mutex, line, file, function); + } + + int ret = __netdata_mutex_init(mutex); + + debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_init(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function); + + return ret; +} + +int netdata_mutex_lock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) { + usec_t start = 0; + (void)start; + + if(unlikely(debug_flags & D_LOCKS)) { + start = now_boottime_usec(); + debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_lock(0x%p) from %lu@%s, %s()", mutex, line, file, function); + } + + int ret = __netdata_mutex_lock(mutex); + + debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_lock(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function); + + return ret; +} + +int netdata_mutex_trylock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) { + usec_t start = 0; + (void)start; + + if(unlikely(debug_flags & D_LOCKS)) { + start = now_boottime_usec(); + debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_trylock(0x%p) from %lu@%s, %s()", mutex, line, file, function); + } + + int ret = __netdata_mutex_trylock(mutex); + + debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_trylock(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function); + + return ret; +} + +int netdata_mutex_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) { + usec_t start = 0; + (void)start; + + if(unlikely(debug_flags & D_LOCKS)) { + start = now_boottime_usec(); + debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_unlock(0x%p) from %lu@%s, %s()", mutex, line, file, function); + } + + int ret = __netdata_mutex_unlock(mutex); + + debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_unlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function); + + return ret; +} + + +// ---------------------------------------------------------------------------- +// r/w lock + +int __netdata_rwlock_destroy(netdata_rwlock_t *rwlock) { + int ret = pthread_rwlock_destroy(rwlock); + if(unlikely(ret != 0)) + error("RW_LOCK: failed to destroy lock (code %d)", ret); + return ret; +} + +int __netdata_rwlock_init(netdata_rwlock_t *rwlock) { + int ret = pthread_rwlock_init(rwlock, NULL); + if(unlikely(ret != 0)) + error("RW_LOCK: failed to initialize lock (code %d)", ret); + return ret; +} + +int __netdata_rwlock_rdlock(netdata_rwlock_t *rwlock) { + netdata_thread_disable_cancelability(); + + int ret = pthread_rwlock_rdlock(rwlock); + if(unlikely(ret != 0)) { + netdata_thread_enable_cancelability(); + error("RW_LOCK: failed to obtain read lock (code %d)", ret); + } + + return ret; +} + +int __netdata_rwlock_wrlock(netdata_rwlock_t *rwlock) { + netdata_thread_disable_cancelability(); + + int ret = pthread_rwlock_wrlock(rwlock); + if(unlikely(ret != 0)) { + error("RW_LOCK: failed to obtain write lock (code %d)", ret); + netdata_thread_enable_cancelability(); + } + + return ret; +} + +int __netdata_rwlock_unlock(netdata_rwlock_t *rwlock) { + int ret = pthread_rwlock_unlock(rwlock); + if(unlikely(ret != 0)) + error("RW_LOCK: failed to release lock (code %d)", ret); + else + netdata_thread_enable_cancelability(); + + return ret; +} + +int __netdata_rwlock_tryrdlock(netdata_rwlock_t *rwlock) { + netdata_thread_disable_cancelability(); + + int ret = pthread_rwlock_tryrdlock(rwlock); + if(ret != 0) + netdata_thread_enable_cancelability(); + + return ret; +} + +int __netdata_rwlock_trywrlock(netdata_rwlock_t *rwlock) { + netdata_thread_disable_cancelability(); + + int ret = pthread_rwlock_trywrlock(rwlock); + if(ret != 0) + netdata_thread_enable_cancelability(); + + return ret; +} + + +int netdata_rwlock_destroy_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { + usec_t start = 0; + (void)start; + + if(unlikely(debug_flags & D_LOCKS)) { + start = now_boottime_usec(); + debug(D_LOCKS, "RW_LOCK: netdata_rwlock_destroy(0x%p) from %lu@%s, %s()", rwlock, line, file, function); + } + + int ret = __netdata_rwlock_destroy(rwlock); + + debug(D_LOCKS, "RW_LOCK: netdata_rwlock_destroy(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function); + + return ret; +} + +int netdata_rwlock_init_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { + usec_t start = 0; + (void)start; + + if(unlikely(debug_flags & D_LOCKS)) { + start = now_boottime_usec(); + debug(D_LOCKS, "RW_LOCK: netdata_rwlock_init(0x%p) from %lu@%s, %s()", rwlock, line, file, function); + } + + int ret = __netdata_rwlock_init(rwlock); + + debug(D_LOCKS, "RW_LOCK: netdata_rwlock_init(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function); + + return ret; +} + +int netdata_rwlock_rdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { + usec_t start = 0; + (void)start; + + if(unlikely(debug_flags & D_LOCKS)) { + start = now_boottime_usec(); + debug(D_LOCKS, "RW_LOCK: netdata_rwlock_rdlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function); + } + + int ret = __netdata_rwlock_rdlock(rwlock); + + debug(D_LOCKS, "RW_LOCK: netdata_rwlock_rdlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function); + + return ret; +} + +int netdata_rwlock_wrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { + usec_t start = 0; + (void)start; + + if(unlikely(debug_flags & D_LOCKS)) { + start = now_boottime_usec(); + debug(D_LOCKS, "RW_LOCK: netdata_rwlock_wrlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function); + } + + int ret = __netdata_rwlock_wrlock(rwlock); + + debug(D_LOCKS, "RW_LOCK: netdata_rwlock_wrlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function); + + return ret; +} + +int netdata_rwlock_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { + usec_t start = 0; + (void)start; + + if(unlikely(debug_flags & D_LOCKS)) { + start = now_boottime_usec(); + debug(D_LOCKS, "RW_LOCK: netdata_rwlock_unlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function); + } + + int ret = __netdata_rwlock_unlock(rwlock); + + debug(D_LOCKS, "RW_LOCK: netdata_rwlock_unlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function); + + return ret; +} + +int netdata_rwlock_tryrdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { + usec_t start = 0; + (void)start; + + if(unlikely(debug_flags & D_LOCKS)) { + start = now_boottime_usec(); + debug(D_LOCKS, "RW_LOCK: netdata_rwlock_tryrdlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function); + } + + int ret = __netdata_rwlock_tryrdlock(rwlock); + + debug(D_LOCKS, "RW_LOCK: netdata_rwlock_tryrdlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function); + + return ret; +} + +int netdata_rwlock_trywrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { + usec_t start = 0; + (void)start; + + if(unlikely(debug_flags & D_LOCKS)) { + start = now_boottime_usec(); + debug(D_LOCKS, "RW_LOCK: netdata_rwlock_trywrlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function); + } + + int ret = __netdata_rwlock_trywrlock(rwlock); + + debug(D_LOCKS, "RW_LOCK: netdata_rwlock_trywrlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function); + + return ret; +} diff --git a/libnetdata/locks/locks.h b/libnetdata/locks/locks.h new file mode 100644 index 000000000..850dd7ebc --- /dev/null +++ b/libnetdata/locks/locks.h @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_LOCKS_H +#define NETDATA_LOCKS_H 1 + +#include "../libnetdata.h" + +typedef pthread_mutex_t netdata_mutex_t; +#define NETDATA_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER + +typedef pthread_rwlock_t netdata_rwlock_t; +#define NETDATA_RWLOCK_INITIALIZER PTHREAD_RWLOCK_INITIALIZER + +extern int __netdata_mutex_init(netdata_mutex_t *mutex); +extern int __netdata_mutex_lock(netdata_mutex_t *mutex); +extern int __netdata_mutex_trylock(netdata_mutex_t *mutex); +extern int __netdata_mutex_unlock(netdata_mutex_t *mutex); + +extern int __netdata_rwlock_destroy(netdata_rwlock_t *rwlock); +extern int __netdata_rwlock_init(netdata_rwlock_t *rwlock); +extern int __netdata_rwlock_rdlock(netdata_rwlock_t *rwlock); +extern int __netdata_rwlock_wrlock(netdata_rwlock_t *rwlock); +extern int __netdata_rwlock_unlock(netdata_rwlock_t *rwlock); +extern int __netdata_rwlock_tryrdlock(netdata_rwlock_t *rwlock); +extern int __netdata_rwlock_trywrlock(netdata_rwlock_t *rwlock); + +extern int netdata_mutex_init_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex); +extern int netdata_mutex_lock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex); +extern int netdata_mutex_trylock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex); +extern int netdata_mutex_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex); + +extern int netdata_rwlock_destroy_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock); +extern int netdata_rwlock_init_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock); +extern int netdata_rwlock_rdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock); +extern int netdata_rwlock_wrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock); +extern int netdata_rwlock_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock); +extern int netdata_rwlock_tryrdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock); +extern int netdata_rwlock_trywrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock); + +extern void netdata_thread_disable_cancelability(void); +extern void netdata_thread_enable_cancelability(void); + +#ifdef NETDATA_INTERNAL_CHECKS + +#define netdata_mutex_init(mutex) netdata_mutex_init_debug(__FILE__, __FUNCTION__, __LINE__, mutex) +#define netdata_mutex_lock(mutex) netdata_mutex_lock_debug(__FILE__, __FUNCTION__, __LINE__, mutex) +#define netdata_mutex_trylock(mutex) netdata_mutex_trylock_debug(__FILE__, __FUNCTION__, __LINE__, mutex) +#define netdata_mutex_unlock(mutex) netdata_mutex_unlock_debug(__FILE__, __FUNCTION__, __LINE__, mutex) + +#define netdata_rwlock_destroy(rwlock) netdata_rwlock_destroy_debug(__FILE__, __FUNCTION__, __LINE__, rwlock) +#define netdata_rwlock_init(rwlock) netdata_rwlock_init_debug(__FILE__, __FUNCTION__, __LINE__, rwlock) +#define netdata_rwlock_rdlock(rwlock) netdata_rwlock_rdlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock) +#define netdata_rwlock_wrlock(rwlock) netdata_rwlock_wrlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock) +#define netdata_rwlock_unlock(rwlock) netdata_rwlock_unlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock) +#define netdata_rwlock_tryrdlock(rwlock) netdata_rwlock_tryrdlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock) +#define netdata_rwlock_trywrlock(rwlock) netdata_rwlock_trywrlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock) + +#else // !NETDATA_INTERNAL_CHECKS + +#define netdata_mutex_init(mutex) __netdata_mutex_init(mutex) +#define netdata_mutex_lock(mutex) __netdata_mutex_lock(mutex) +#define netdata_mutex_trylock(mutex) __netdata_mutex_trylock(mutex) +#define netdata_mutex_unlock(mutex) __netdata_mutex_unlock(mutex) + +#define netdata_rwlock_destroy(rwlock) __netdata_rwlock_destroy(rwlock) +#define netdata_rwlock_init(rwlock) __netdata_rwlock_init(rwlock) +#define netdata_rwlock_rdlock(rwlock) __netdata_rwlock_rdlock(rwlock) +#define netdata_rwlock_wrlock(rwlock) __netdata_rwlock_wrlock(rwlock) +#define netdata_rwlock_unlock(rwlock) __netdata_rwlock_unlock(rwlock) +#define netdata_rwlock_tryrdlock(rwlock) __netdata_rwlock_tryrdlock(rwlock) +#define netdata_rwlock_trywrlock(rwlock) __netdata_rwlock_trywrlock(rwlock) + +#endif // NETDATA_INTERNAL_CHECKS + +#endif //NETDATA_LOCKS_H diff --git a/libnetdata/log/Makefile.am b/libnetdata/log/Makefile.am new file mode 100644 index 000000000..1cb69ed99 --- /dev/null +++ b/libnetdata/log/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/log/Makefile.in b/libnetdata/log/Makefile.in new file mode 100644 index 000000000..abf812b67 --- /dev/null +++ b/libnetdata/log/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = libnetdata/log +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/log/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu libnetdata/log/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/libnetdata/log/README.md b/libnetdata/log/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/libnetdata/log/log.c b/libnetdata/log/log.c new file mode 100644 index 000000000..198e98bd9 --- /dev/null +++ b/libnetdata/log/log.c @@ -0,0 +1,436 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +int web_server_is_multithreaded = 1; + +const char *program_name = ""; +uint64_t debug_flags = DEBUG; + +int access_log_syslog = 1; +int error_log_syslog = 1; +int output_log_syslog = 1; // debug log + +int stdaccess_fd = -1; +FILE *stdaccess = NULL; + +const char *stdaccess_filename = NULL; +const char *stderr_filename = NULL; +const char *stdout_filename = NULL; + +void syslog_init(void) { + static int i = 0; + + if(!i) { + openlog(program_name, LOG_PID, LOG_DAEMON); + i = 1; + } +} + +#define LOG_DATE_LENGTH 26 + +static inline void log_date(char *buffer, size_t len) { + if(unlikely(!buffer || !len)) + return; + + time_t t; + struct tm *tmp, tmbuf; + + t = now_realtime_sec(); + tmp = localtime_r(&t, &tmbuf); + + if (tmp == NULL) { + buffer[0] = '\0'; + return; + } + + if (unlikely(strftime(buffer, len, "%Y-%m-%d %H:%M:%S", tmp) == 0)) + buffer[0] = '\0'; + + buffer[len - 1] = '\0'; +} + +static netdata_mutex_t log_mutex = NETDATA_MUTEX_INITIALIZER; +static inline void log_lock() { + netdata_mutex_lock(&log_mutex); +} +static inline void log_unlock() { + netdata_mutex_unlock(&log_mutex); +} + +static FILE *open_log_file(int fd, FILE *fp, const char *filename, int *enabled_syslog, int is_stdaccess, int *fd_ptr) { + int f, devnull = 0; + + if(!filename || !*filename || !strcmp(filename, "none") || !strcmp(filename, "/dev/null")) { + filename = "/dev/null"; + devnull = 1; + } + + if(!strcmp(filename, "syslog")) { + filename = "/dev/null"; + devnull = 1; + syslog_init(); + if(enabled_syslog) *enabled_syslog = 1; + } + else if(enabled_syslog) *enabled_syslog = 0; + + // don't do anything if the user is willing + // to have the standard one + if(!strcmp(filename, "system")) { + if(fd != -1 && !is_stdaccess) { + if(fd_ptr) *fd_ptr = fd; + return fp; + } + + filename = "stderr"; + } + + if(!strcmp(filename, "stdout")) + f = STDOUT_FILENO; + + else if(!strcmp(filename, "stderr")) + f = STDERR_FILENO; + + else { + f = open(filename, O_WRONLY | O_APPEND | O_CREAT, 0664); + if(f == -1) { + error("Cannot open file '%s'. Leaving %d to its default.", filename, fd); + if(fd_ptr) *fd_ptr = fd; + return fp; + } + } + + // if there is a level-2 file pointer + // flush it before switching the level-1 fds + if(fp) + fflush(fp); + + if(devnull && is_stdaccess) { + fd = -1; + fp = NULL; + } + + if(fd != f && fd != -1) { + // it automatically closes + int t = dup2(f, fd); + if (t == -1) { + error("Cannot dup2() new fd %d to old fd %d for '%s'", f, fd, filename); + close(f); + if(fd_ptr) *fd_ptr = fd; + return fp; + } + // info("dup2() new fd %d to old fd %d for '%s'", f, fd, filename); + close(f); + } + else fd = f; + + if(!fp) { + fp = fdopen(fd, "a"); + if (!fp) + error("Cannot fdopen() fd %d ('%s')", fd, filename); + else { + if (setvbuf(fp, NULL, _IOLBF, 0) != 0) + error("Cannot set line buffering on fd %d ('%s')", fd, filename); + } + } + + if(fd_ptr) *fd_ptr = fd; + return fp; +} + +void reopen_all_log_files() { + if(stdout_filename) + open_log_file(STDOUT_FILENO, stdout, stdout_filename, &output_log_syslog, 0, NULL); + + if(stderr_filename) + open_log_file(STDERR_FILENO, stderr, stderr_filename, &error_log_syslog, 0, NULL); + + if(stdaccess_filename) + stdaccess = open_log_file(stdaccess_fd, stdaccess, stdaccess_filename, &access_log_syslog, 1, &stdaccess_fd); +} + +void open_all_log_files() { + // disable stdin + open_log_file(STDIN_FILENO, stdin, "/dev/null", NULL, 0, NULL); + + open_log_file(STDOUT_FILENO, stdout, stdout_filename, &output_log_syslog, 0, NULL); + open_log_file(STDERR_FILENO, stderr, stderr_filename, &error_log_syslog, 0, NULL); + stdaccess = open_log_file(stdaccess_fd, stdaccess, stdaccess_filename, &access_log_syslog, 1, &stdaccess_fd); +} + +// ---------------------------------------------------------------------------- +// error log throttling + +time_t error_log_throttle_period = 1200; +unsigned long error_log_errors_per_period = 200; +unsigned long error_log_errors_per_period_backup = 0; + +int error_log_limit(int reset) { + static time_t start = 0; + static unsigned long counter = 0, prevented = 0; + + // fprintf(stderr, "FLOOD: counter=%lu, allowed=%lu, backup=%lu, period=%llu\n", counter, error_log_errors_per_period, error_log_errors_per_period_backup, (unsigned long long)error_log_throttle_period); + + // do not throttle if the period is 0 + if(error_log_throttle_period == 0) + return 0; + + // prevent all logs if the errors per period is 0 + if(error_log_errors_per_period == 0) +#ifdef NETDATA_INTERNAL_CHECKS + return 0; +#else + return 1; +#endif + + time_t now = now_monotonic_sec(); + if(!start) start = now; + + if(reset) { + if(prevented) { + char date[LOG_DATE_LENGTH]; + log_date(date, LOG_DATE_LENGTH); + fprintf(stderr, "%s: %s LOG FLOOD PROTECTION reset for process '%s' (prevented %lu logs in the last %ld seconds).\n" + , date + , program_name + , program_name + , prevented + , now - start + ); + } + + start = now; + counter = 0; + prevented = 0; + } + + // detect if we log too much + counter++; + + if(now - start > error_log_throttle_period) { + if(prevented) { + char date[LOG_DATE_LENGTH]; + log_date(date, LOG_DATE_LENGTH); + fprintf(stderr, "%s: %s LOG FLOOD PROTECTION resuming logging from process '%s' (prevented %lu logs in the last %ld seconds).\n" + , date + , program_name + , program_name + , prevented + , error_log_throttle_period + ); + } + + // restart the period accounting + start = now; + counter = 1; + prevented = 0; + + // log this error + return 0; + } + + if(counter > error_log_errors_per_period) { + if(!prevented) { + char date[LOG_DATE_LENGTH]; + log_date(date, LOG_DATE_LENGTH); + fprintf(stderr, "%s: %s LOG FLOOD PROTECTION too many logs (%lu logs in %ld seconds, threshold is set to %lu logs in %ld seconds). Preventing more logs from process '%s' for %ld seconds.\n" + , date + , program_name + , counter + , now - start + , error_log_errors_per_period + , error_log_throttle_period + , program_name + , start + error_log_throttle_period - now); + } + + prevented++; + + // prevent logging this error +#ifdef NETDATA_INTERNAL_CHECKS + return 0; +#else + return 1; +#endif + } + + return 0; +} + +// ---------------------------------------------------------------------------- +// debug log + +void debug_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) { + va_list args; + + char date[LOG_DATE_LENGTH]; + log_date(date, LOG_DATE_LENGTH); + + va_start( args, fmt ); + printf("%s: %s DEBUG : %s : (%04lu@%-10.10s:%-15.15s): ", date, program_name, netdata_thread_tag(), line, file, function); + vprintf(fmt, args); + va_end( args ); + putchar('\n'); + + if(output_log_syslog) { + va_start( args, fmt ); + vsyslog(LOG_ERR, fmt, args ); + va_end( args ); + } + + fflush(stdout); +} + +// ---------------------------------------------------------------------------- +// info log + +void info_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) +{ + va_list args; + + // prevent logging too much + if(error_log_limit(0)) return; + + if(error_log_syslog) { + va_start( args, fmt ); + vsyslog(LOG_INFO, fmt, args ); + va_end( args ); + } + + char date[LOG_DATE_LENGTH]; + log_date(date, LOG_DATE_LENGTH); + + log_lock(); + + va_start( args, fmt ); + if(debug_flags) fprintf(stderr, "%s: %s INFO : %s : (%04lu@%-10.10s:%-15.15s): ", date, program_name, netdata_thread_tag(), line, file, function); + else fprintf(stderr, "%s: %s INFO : %s : ", date, program_name, netdata_thread_tag()); + vfprintf( stderr, fmt, args ); + va_end( args ); + + fputc('\n', stderr); + + log_unlock(); +} + +// ---------------------------------------------------------------------------- +// error log + +#if defined(STRERROR_R_CHAR_P) +// GLIBC version of strerror_r +static const char *strerror_result(const char *a, const char *b) { (void)b; return a; } +#elif defined(HAVE_STRERROR_R) +// POSIX version of strerror_r +static const char *strerror_result(int a, const char *b) { (void)a; return b; } +#elif defined(HAVE_C__GENERIC) + +// what a trick! +// http://stackoverflow.com/questions/479207/function-overloading-in-c +static const char *strerror_result_int(int a, const char *b) { (void)a; return b; } +static const char *strerror_result_string(const char *a, const char *b) { (void)b; return a; } + +#define strerror_result(a, b) _Generic((a), \ + int: strerror_result_int, \ + char *: strerror_result_string \ + )(a, b) + +#else +#error "cannot detect the format of function strerror_r()" +#endif + +void error_int( const char *prefix, const char *file, const char *function, const unsigned long line, const char *fmt, ... ) { + // save a copy of errno - just in case this function generates a new error + int __errno = errno; + + va_list args; + + // prevent logging too much + if(error_log_limit(0)) return; + + if(error_log_syslog) { + va_start( args, fmt ); + vsyslog(LOG_ERR, fmt, args ); + va_end( args ); + } + + char date[LOG_DATE_LENGTH]; + log_date(date, LOG_DATE_LENGTH); + + log_lock(); + + va_start( args, fmt ); + if(debug_flags) fprintf(stderr, "%s: %s %-5.5s : %s : (%04lu@%-10.10s:%-15.15s): ", date, program_name, prefix, netdata_thread_tag(), line, file, function); + else fprintf(stderr, "%s: %s %-5.5s : %s : ", date, program_name, prefix, netdata_thread_tag()); + vfprintf( stderr, fmt, args ); + va_end( args ); + + if(__errno) { + char buf[1024]; + fprintf(stderr, " (errno %d, %s)\n", __errno, strerror_result(strerror_r(__errno, buf, 1023), buf)); + errno = 0; + } + else + fputc('\n', stderr); + + log_unlock(); +} + +void fatal_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) { + va_list args; + + if(error_log_syslog) { + va_start( args, fmt ); + vsyslog(LOG_CRIT, fmt, args ); + va_end( args ); + } + + char date[LOG_DATE_LENGTH]; + log_date(date, LOG_DATE_LENGTH); + + log_lock(); + + va_start( args, fmt ); + if(debug_flags) fprintf(stderr, "%s: %s FATAL : %s : (%04lu@%-10.10s:%-15.15s): ", date, program_name, netdata_thread_tag(), line, file, function); + else fprintf(stderr, "%s: %s FATAL : %s :", date, program_name, netdata_thread_tag()); + vfprintf( stderr, fmt, args ); + va_end( args ); + + perror(" # "); + fputc('\n', stderr); + + log_unlock(); + + netdata_cleanup_and_exit(1); +} + +// ---------------------------------------------------------------------------- +// access log + +void log_access( const char *fmt, ... ) { + va_list args; + + if(access_log_syslog) { + va_start( args, fmt ); + vsyslog(LOG_INFO, fmt, args ); + va_end( args ); + } + + if(stdaccess) { + static netdata_mutex_t access_mutex = NETDATA_MUTEX_INITIALIZER; + + if(web_server_is_multithreaded) + netdata_mutex_lock(&access_mutex); + + char date[LOG_DATE_LENGTH]; + log_date(date, LOG_DATE_LENGTH); + fprintf(stdaccess, "%s: ", date); + + va_start( args, fmt ); + vfprintf( stdaccess, fmt, args ); + va_end( args ); + fputc('\n', stdaccess); + + if(web_server_is_multithreaded) + netdata_mutex_unlock(&access_mutex); + } +} diff --git a/libnetdata/log/log.h b/libnetdata/log/log.h new file mode 100644 index 000000000..48e1599a7 --- /dev/null +++ b/libnetdata/log/log.h @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_LOG_H +#define NETDATA_LOG_H 1 + +#include "../libnetdata.h" + +#define D_WEB_BUFFER 0x0000000000000001 +#define D_WEB_CLIENT 0x0000000000000002 +#define D_LISTENER 0x0000000000000004 +#define D_WEB_DATA 0x0000000000000008 +#define D_OPTIONS 0x0000000000000010 +#define D_PROCNETDEV_LOOP 0x0000000000000020 +#define D_RRD_STATS 0x0000000000000040 +#define D_WEB_CLIENT_ACCESS 0x0000000000000080 +#define D_TC_LOOP 0x0000000000000100 +#define D_DEFLATE 0x0000000000000200 +#define D_CONFIG 0x0000000000000400 +#define D_PLUGINSD 0x0000000000000800 +#define D_CHILDS 0x0000000000001000 +#define D_EXIT 0x0000000000002000 +#define D_CHECKS 0x0000000000004000 +#define D_NFACCT_LOOP 0x0000000000008000 +#define D_PROCFILE 0x0000000000010000 +#define D_RRD_CALLS 0x0000000000020000 +#define D_DICTIONARY 0x0000000000040000 +#define D_MEMORY 0x0000000000080000 +#define D_CGROUP 0x0000000000100000 +#define D_REGISTRY 0x0000000000200000 +#define D_VARIABLES 0x0000000000400000 +#define D_HEALTH 0x0000000000800000 +#define D_CONNECT_TO 0x0000000001000000 +#define D_RRDHOST 0x0000000002000000 +#define D_LOCKS 0x0000000004000000 +#define D_BACKEND 0x0000000008000000 +#define D_STATSD 0x0000000010000000 +#define D_POLLFD 0x0000000020000000 +#define D_STREAM 0x0000000040000000 +#define D_SYSTEM 0x8000000000000000 + +//#define DEBUG (D_WEB_CLIENT_ACCESS|D_LISTENER|D_RRD_STATS) +//#define DEBUG 0xffffffff +#define DEBUG (0) + +extern int web_server_is_multithreaded; + +extern uint64_t debug_flags; + +extern const char *program_name; + +extern int stdaccess_fd; +extern FILE *stdaccess; + +extern const char *stdaccess_filename; +extern const char *stderr_filename; +extern const char *stdout_filename; + +extern int access_log_syslog; +extern int error_log_syslog; +extern int output_log_syslog; + +extern time_t error_log_throttle_period; +extern unsigned long error_log_errors_per_period, error_log_errors_per_period_backup; +extern int error_log_limit(int reset); + +extern void open_all_log_files(); +extern void reopen_all_log_files(); + +static inline void debug_dummy(void) {} + +#define error_log_limit_reset() do { error_log_errors_per_period = error_log_errors_per_period_backup; error_log_limit(1); } while(0) +#define error_log_limit_unlimited() do { \ + error_log_limit_reset(); \ + error_log_errors_per_period = ((error_log_errors_per_period_backup * 10) < 10000) ? 10000 : (error_log_errors_per_period_backup * 10); \ + } while(0) + +#ifdef NETDATA_INTERNAL_CHECKS +#define debug(type, args...) do { if(unlikely(debug_flags & type)) debug_int(__FILE__, __FUNCTION__, __LINE__, ##args); } while(0) +#else +#define debug(type, args...) debug_dummy() +#endif + +#define info(args...) info_int(__FILE__, __FUNCTION__, __LINE__, ##args) +#define infoerr(args...) error_int("INFO", __FILE__, __FUNCTION__, __LINE__, ##args) +#define error(args...) error_int("ERROR", __FILE__, __FUNCTION__, __LINE__, ##args) +#define fatal(args...) fatal_int(__FILE__, __FUNCTION__, __LINE__, ##args) + +extern void debug_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(4, 5); +extern void info_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(4, 5); +extern void error_int( const char *prefix, const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(5, 6); +extern void fatal_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) NORETURN PRINTFLIKE(4, 5); +extern void log_access( const char *fmt, ... ) PRINTFLIKE(1, 2); + +#endif /* NETDATA_LOG_H */ diff --git a/libnetdata/os.c b/libnetdata/os.c new file mode 100644 index 000000000..0248eb627 --- /dev/null +++ b/libnetdata/os.c @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "os.h" + +// ---------------------------------------------------------------------------- +// system functions +// to retrieve settings of the system + +int processors = 1; +long get_system_cpus(void) { + processors = 1; + +#ifdef __APPLE__ + int32_t tmp_processors; + + if (unlikely(GETSYSCTL_BY_NAME("hw.logicalcpu", tmp_processors))) { + error("Assuming system has %d processors.", processors); + } else { + processors = tmp_processors; + } + + return processors; +#elif __FreeBSD__ + int32_t tmp_processors; + + if (unlikely(GETSYSCTL_BY_NAME("hw.ncpu", tmp_processors))) { + error("Assuming system has %d processors.", processors); + } else { + processors = tmp_processors; + } + + return processors; +#else + + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/proc/stat", netdata_configured_host_prefix); + + procfile *ff = procfile_open(filename, NULL, PROCFILE_FLAG_DEFAULT); + if(!ff) { + error("Cannot open file '%s'. Assuming system has %d processors.", filename, processors); + return processors; + } + + ff = procfile_readall(ff); + if(!ff) { + error("Cannot open file '%s'. Assuming system has %d processors.", filename, processors); + return processors; + } + + processors = 0; + unsigned int i; + for(i = 0; i < procfile_lines(ff); i++) { + if(!procfile_linewords(ff, i)) continue; + + if(strncmp(procfile_lineword(ff, i, 0), "cpu", 3) == 0) processors++; + } + processors--; + if(processors < 1) processors = 1; + + procfile_close(ff); + + debug(D_SYSTEM, "System has %d processors.", processors); + return processors; + +#endif /* __APPLE__, __FreeBSD__ */ +} + +pid_t pid_max = 32768; +pid_t get_system_pid_max(void) { +#ifdef __APPLE__ + // As we currently do not know a solution to query pid_max from the os + // we use the number defined in bsd/sys/proc_internal.h in XNU sources + pid_max = 99999; + return pid_max; +#elif __FreeBSD__ + int32_t tmp_pid_max; + + if (unlikely(GETSYSCTL_BY_NAME("kern.pid_max", tmp_pid_max))) { + pid_max = 99999; + error("Assuming system's maximum pid is %d.", pid_max); + } else { + pid_max = tmp_pid_max; + } + + return pid_max; +#else + + static char read = 0; + if(unlikely(read)) return pid_max; + read = 1; + + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/proc/sys/kernel/pid_max", netdata_configured_host_prefix); + + unsigned long long max = 0; + if(read_single_number_file(filename, &max) != 0) { + error("Cannot open file '%s'. Assuming system supports %d pids.", filename, pid_max); + return pid_max; + } + + if(!max) { + error("Cannot parse file '%s'. Assuming system supports %d pids.", filename, pid_max); + return pid_max; + } + + pid_max = (pid_t) max; + return pid_max; + +#endif /* __APPLE__, __FreeBSD__ */ +} + +unsigned int system_hz; +void get_system_HZ(void) { + long ticks; + + if ((ticks = sysconf(_SC_CLK_TCK)) == -1) { + error("Cannot get system clock ticks"); + } + + system_hz = (unsigned int) ticks; +} + +// ===================================================================================================================== +// FreeBSD + +#if (TARGET_OS == OS_FREEBSD) + +int getsysctl_by_name(const char *name, void *ptr, size_t len) { + size_t nlen = len; + + if (unlikely(sysctlbyname(name, ptr, &nlen, NULL, 0) == -1)) { + error("FREEBSD: sysctl(%s...) failed: %s", name, strerror(errno)); + return 1; + } + if (unlikely(nlen != len)) { + error("FREEBSD: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)len, (unsigned long)nlen); + return 1; + } + return 0; +} + +int getsysctl_simple(const char *name, int *mib, size_t miblen, void *ptr, size_t len) { + size_t nlen = len; + + if (unlikely(!mib[0])) + if (unlikely(getsysctl_mib(name, mib, miblen))) + return 1; + + if (unlikely(sysctl(mib, miblen, ptr, &nlen, NULL, 0) == -1)) { + error("FREEBSD: sysctl(%s...) failed: %s", name, strerror(errno)); + return 1; + } + if (unlikely(nlen != len)) { + error("FREEBSD: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)len, (unsigned long)nlen); + return 1; + } + + return 0; +} + +int getsysctl(const char *name, int *mib, size_t miblen, void *ptr, size_t *len) { + size_t nlen = *len; + + if (unlikely(!mib[0])) + if (unlikely(getsysctl_mib(name, mib, miblen))) + return 1; + + if (unlikely(sysctl(mib, miblen, ptr, len, NULL, 0) == -1)) { + error("FREEBSD: sysctl(%s...) failed: %s", name, strerror(errno)); + return 1; + } + if (unlikely(ptr != NULL && nlen != *len)) { + error("FREEBSD: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)*len, (unsigned long)nlen); + return 1; + } + + return 0; +} + +int getsysctl_mib(const char *name, int *mib, size_t len) { + size_t nlen = len; + + if (unlikely(sysctlnametomib(name, mib, &nlen) == -1)) { + error("FREEBSD: sysctl(%s...) failed: %s", name, strerror(errno)); + return 1; + } + if (unlikely(nlen != len)) { + error("FREEBSD: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)len, (unsigned long)nlen); + return 1; + } + return 0; +} + + +#endif + + +// ===================================================================================================================== +// MacOS + +#if (TARGET_OS == OS_MACOS) + +int getsysctl_by_name(const char *name, void *ptr, size_t len) { + size_t nlen = len; + + if (unlikely(sysctlbyname(name, ptr, &nlen, NULL, 0) == -1)) { + error("MACOS: sysctl(%s...) failed: %s", name, strerror(errno)); + return 1; + } + if (unlikely(nlen != len)) { + error("MACOS: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)len, (unsigned long)nlen); + return 1; + } + return 0; +} + +#endif // (TARGET_OS == OS_MACOS) + diff --git a/libnetdata/os.h b/libnetdata/os.h new file mode 100644 index 000000000..2494174bc --- /dev/null +++ b/libnetdata/os.h @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_OS_H +#define NETDATA_OS_H + +#include "libnetdata.h" + +// ===================================================================================================================== +// Linux + +#if (TARGET_OS == OS_LINUX) + + +// ===================================================================================================================== +// FreeBSD + +#elif (TARGET_OS == OS_FREEBSD) + +#include <sys/sysctl.h> + +#define GETSYSCTL_BY_NAME(name, var) getsysctl_by_name(name, &(var), sizeof(var)) +extern int getsysctl_by_name(const char *name, void *ptr, size_t len); + +#define GETSYSCTL_MIB(name, mib) getsysctl_mib(name, mib, sizeof(mib)/sizeof(int)) + +extern int getsysctl_mib(const char *name, int *mib, size_t len); + +#define GETSYSCTL_SIMPLE(name, mib, var) getsysctl_simple(name, mib, sizeof(mib)/sizeof(int), &(var), sizeof(var)) +#define GETSYSCTL_WSIZE(name, mib, var, size) getsysctl_simple(name, mib, sizeof(mib)/sizeof(int), var, size) + +extern int getsysctl_simple(const char *name, int *mib, size_t miblen, void *ptr, size_t len); + +#define GETSYSCTL_SIZE(name, mib, size) getsysctl(name, mib, sizeof(mib)/sizeof(int), NULL, &(size)) +#define GETSYSCTL(name, mib, var, size) getsysctl(name, mib, sizeof(mib)/sizeof(int), &(var), &(size)) + +extern int getsysctl(const char *name, int *mib, size_t miblen, void *ptr, size_t *len); + + +// ===================================================================================================================== +// MacOS + +#elif (TARGET_OS == OS_MACOS) + +#include <sys/sysctl.h> + +#define GETSYSCTL_BY_NAME(name, var) getsysctl_by_name(name, &(var), sizeof(var)) +extern int getsysctl_by_name(const char *name, void *ptr, size_t len); + + +// ===================================================================================================================== +// unknown O/S + +#else +#error unsupported operating system +#endif + + +// ===================================================================================================================== +// common for all O/S + +extern int processors; +extern long get_system_cpus(void); + +extern pid_t pid_max; +extern pid_t get_system_pid_max(void); + +extern unsigned int system_hz; +extern void get_system_HZ(void); + +#endif //NETDATA_OS_H diff --git a/libnetdata/popen/Makefile.am b/libnetdata/popen/Makefile.am new file mode 100644 index 000000000..1cb69ed99 --- /dev/null +++ b/libnetdata/popen/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/popen/Makefile.in b/libnetdata/popen/Makefile.in new file mode 100644 index 000000000..0a699332f --- /dev/null +++ b/libnetdata/popen/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = libnetdata/popen +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/popen/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu libnetdata/popen/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/libnetdata/popen/README.md b/libnetdata/popen/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/libnetdata/popen/popen.c b/libnetdata/popen/popen.c new file mode 100644 index 000000000..845363fd2 --- /dev/null +++ b/libnetdata/popen/popen.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +/* +struct mypopen { + pid_t pid; + FILE *fp; + struct mypopen *next; + struct mypopen *prev; +}; + +static struct mypopen *mypopen_root = NULL; + +static void mypopen_add(FILE *fp, pid_t *pid) { + struct mypopen *mp = malloc(sizeof(struct mypopen)); + if(!mp) { + fatal("Cannot allocate %zu bytes", sizeof(struct mypopen)) + return; + } + + mp->fp = fp; + mp->pid = pid; + mp->next = popen_root; + mp->prev = NULL; + if(mypopen_root) mypopen_root->prev = mp; + mypopen_root = mp; +} + +static void mypopen_del(FILE *fp) { + struct mypopen *mp; + + for(mp = mypopen_root; mp; mp = mp->next) + if(mp->fd == fp) break; + + if(!mp) error("Cannot find mypopen() file pointer in open childs."); + else { + if(mp->next) mp->next->prev = mp->prev; + if(mp->prev) mp->prev->next = mp->next; + if(mypopen_root == mp) mypopen_root = mp->next; + free(mp); + } +} +*/ +#define PIPE_READ 0 +#define PIPE_WRITE 1 + +FILE *mypopen(const char *command, volatile pid_t *pidptr) +{ + int pipefd[2]; + + if(pipe(pipefd) == -1) return NULL; + + int pid = fork(); + if(pid == -1) { + close(pipefd[PIPE_READ]); + close(pipefd[PIPE_WRITE]); + return NULL; + } + if(pid != 0) { + // the parent + *pidptr = pid; + close(pipefd[PIPE_WRITE]); + FILE *fp = fdopen(pipefd[PIPE_READ], "r"); + /*mypopen_add(fp, pid);*/ + return(fp); + } + // the child + + // close all files + int i; + for(i = (int) (sysconf(_SC_OPEN_MAX) - 1); i >= 0; i--) + if(i != STDIN_FILENO && i != STDERR_FILENO && i != pipefd[PIPE_WRITE]) close(i); + + // move the pipe to stdout + if(pipefd[PIPE_WRITE] != STDOUT_FILENO) { + dup2(pipefd[PIPE_WRITE], STDOUT_FILENO); + close(pipefd[PIPE_WRITE]); + } + +#ifdef DETACH_PLUGINS_FROM_NETDATA + // this was an attempt to detach the child and use the suspend mode charts.d + // unfortunatelly it does not work as expected. + + // fork again to become session leader + pid = fork(); + if(pid == -1) + error("pre-execution of command '%s' on pid %d: Cannot fork 2nd time.", command, getpid()); + + if(pid != 0) { + // the parent + exit(0); + } + + // set a new process group id for just this child + if( setpgid(0, 0) != 0 ) + error("pre-execution of command '%s' on pid %d: Cannot set a new process group.", command, getpid()); + + if( getpgid(0) != getpid() ) + error("pre-execution of command '%s' on pid %d: Cannot set a new process group. Process group set is incorrect. Expected %d, found %d", command, getpid(), getpid(), getpgid(0)); + + if( setsid() != 0 ) + error("pre-execution of command '%s' on pid %d: Cannot set session id.", command, getpid()); + + fprintf(stdout, "MYPID %d\n", getpid()); + fflush(NULL); +#endif + + // reset all signals + signals_unblock(); + signals_reset(); + + debug(D_CHILDS, "executing command: '%s' on pid %d.", command, getpid()); + execl("/bin/sh", "sh", "-c", command, NULL); + exit(1); +} + +FILE *mypopene(const char *command, volatile pid_t *pidptr, char **env) { + int pipefd[2]; + + if(pipe(pipefd) == -1) + return NULL; + + int pid = fork(); + if(pid == -1) { + close(pipefd[PIPE_READ]); + close(pipefd[PIPE_WRITE]); + return NULL; + } + if(pid != 0) { + // the parent + *pidptr = pid; + close(pipefd[PIPE_WRITE]); + FILE *fp = fdopen(pipefd[PIPE_READ], "r"); + return(fp); + } + // the child + + // close all files + int i; + for(i = (int) (sysconf(_SC_OPEN_MAX) - 1); i >= 0; i--) + if(i != STDIN_FILENO && i != STDERR_FILENO && i != pipefd[PIPE_WRITE]) close(i); + + // move the pipe to stdout + if(pipefd[PIPE_WRITE] != STDOUT_FILENO) { + dup2(pipefd[PIPE_WRITE], STDOUT_FILENO); + close(pipefd[PIPE_WRITE]); + } + + execle("/bin/sh", "sh", "-c", command, NULL, env); + exit(1); +} + +int mypclose(FILE *fp, pid_t pid) { + debug(D_EXIT, "Request to mypclose() on pid %d", pid); + + /*mypopen_del(fp);*/ + + // close the pipe fd + // this is required in musl + // without it the childs do not exit + close(fileno(fp)); + + // close the pipe file pointer + fclose(fp); + + errno = 0; + + siginfo_t info; + if(waitid(P_PID, (id_t) pid, &info, WEXITED) != -1) { + switch(info.si_code) { + case CLD_EXITED: + if(info.si_status) + error("child pid %d exited with code %d.", info.si_pid, info.si_status); + return(info.si_status); + + case CLD_KILLED: + error("child pid %d killed by signal %d.", info.si_pid, info.si_status); + return(-1); + + case CLD_DUMPED: + error("child pid %d core dumped by signal %d.", info.si_pid, info.si_status); + return(-2); + + case CLD_STOPPED: + error("child pid %d stopped by signal %d.", info.si_pid, info.si_status); + return(0); + + case CLD_TRAPPED: + error("child pid %d trapped by signal %d.", info.si_pid, info.si_status); + return(-4); + + case CLD_CONTINUED: + error("child pid %d continued by signal %d.", info.si_pid, info.si_status); + return(0); + + default: + error("child pid %d gave us a SIGCHLD with code %d and status %d.", info.si_pid, info.si_code, info.si_status); + return(-5); + } + } + else + error("Cannot waitid() for pid %d", pid); + + return 0; +} diff --git a/libnetdata/popen/popen.h b/libnetdata/popen/popen.h new file mode 100644 index 000000000..90d4b829b --- /dev/null +++ b/libnetdata/popen/popen.h @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_POPEN_H +#define NETDATA_POPEN_H 1 + +#include "../libnetdata.h" + +#define PIPE_READ 0 +#define PIPE_WRITE 1 + +extern FILE *mypopen(const char *command, volatile pid_t *pidptr); +extern FILE *mypopene(const char *command, volatile pid_t *pidptr, char **env); +extern int mypclose(FILE *fp, pid_t pid); + +extern void signals_unblock(void); +extern void signals_reset(void); + +#endif /* NETDATA_POPEN_H */ diff --git a/libnetdata/procfile/Makefile.am b/libnetdata/procfile/Makefile.am new file mode 100644 index 000000000..1cb69ed99 --- /dev/null +++ b/libnetdata/procfile/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/procfile/Makefile.in b/libnetdata/procfile/Makefile.in new file mode 100644 index 000000000..643e00ba1 --- /dev/null +++ b/libnetdata/procfile/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = libnetdata/procfile +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/procfile/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu libnetdata/procfile/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/libnetdata/procfile/README.md b/libnetdata/procfile/README.md new file mode 100644 index 000000000..279885f93 --- /dev/null +++ b/libnetdata/procfile/README.md @@ -0,0 +1,61 @@ + +# PROCFILE + +procfile is a library for reading text data files (i.e `/proc` files) in the fastest possible way. + +## How it works + +The library automatically adapts (through the iterations) its memory so that each file +is read with single `read()` call. + +Then the library splits the file into words, using the supplied separators. +The library also supported quoted words (i.e. strings within of which the separators are ignored). + +### Initialization + +Initially the caller: + +- calls `procfile_open()` to open the file and allocate the structures needed. + +### Iterations + +For each iteration, the caller: + +- calls `procfile_readall()` to read updated contents. + This call also rewinds (`lseek()` to 0) before reading it. + + For every file, a [BUFFER](../buffer/) is used that is automatically adjusted to fit + the entire file contents of the file. So the file is read with a single `read()` call + (providing atomicity / consistency when the data are read from the kernel). + + Once the data are read, 2 arrays of pointers are updated: + + - a `words` array, pointing to each word in the data read + - a `lines` array, pointing to the first word for each line + + This is highly optimized. Both arrays are automatically adjusted to + fit all contents and are updated in a single pass on the data. + + The library provides a number of macros: + + - `procfile_lines()` returns the # of lines read + - `procfile_linewords()` returns the # of words in the given line + - `procfile_word()` returns a pointer the given word # + - `procfile_line()` returns a pointer to the first word of the given line # + - `procfile_lineword()` returns a pointer to the given word # of the given line # + +### Cleanup + +When the caller exits: + +- calls `procfile_free()` to close the file and free all memory used. + +### Performance + +- a **raspberry Pi 1** (the oldest single core one) can process 5.000+ `/proc` files per second. +- a **J1900 Celeron** processor can process 23.000+ `/proc` files per second per core. + +To achieve this kind of performance, the library tries to work in batches so that the code +and the data are inside the processor's caches. + +This library is extensively used in netdata and its plugins. diff --git a/libnetdata/procfile/procfile.c b/libnetdata/procfile/procfile.c new file mode 100644 index 000000000..4a812baab --- /dev/null +++ b/libnetdata/procfile/procfile.c @@ -0,0 +1,472 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +#define PF_PREFIX "PROCFILE" + +#define PFWORDS_INCREASE_STEP 200 +#define PFLINES_INCREASE_STEP 10 +#define PROCFILE_INCREMENT_BUFFER 512 + +int procfile_open_flags = O_RDONLY; + +int procfile_adaptive_initial_allocation = 0; + +// if adaptive allocation is set, these store the +// max values we have seen so far +size_t procfile_max_lines = PFLINES_INCREASE_STEP; +size_t procfile_max_words = PFWORDS_INCREASE_STEP; +size_t procfile_max_allocation = PROCFILE_INCREMENT_BUFFER; + + +// ---------------------------------------------------------------------------- + +char *procfile_filename(procfile *ff) { + if(ff->filename[0]) return ff->filename; + + char buffer[FILENAME_MAX + 1]; + snprintfz(buffer, FILENAME_MAX, "/proc/self/fd/%d", ff->fd); + + ssize_t l = readlink(buffer, ff->filename, FILENAME_MAX); + if(unlikely(l == -1)) + snprintfz(ff->filename, FILENAME_MAX, "unknown filename for fd %d", ff->fd); + else + ff->filename[l] = '\0'; + + // on non-linux systems, something like this will be needed + // fcntl(ff->fd, F_GETPATH, ff->filename) + + return ff->filename; +} + +// ---------------------------------------------------------------------------- +// An array of words + +static inline void pfwords_add(procfile *ff, char *str) { + // debug(D_PROCFILE, PF_PREFIX ": adding word No %d: '%s'", fw->len, str); + + pfwords *fw = ff->words; + if(unlikely(fw->len == fw->size)) { + // debug(D_PROCFILE, PF_PREFIX ": expanding words"); + + ff->words = fw = reallocz(fw, sizeof(pfwords) + (fw->size + PFWORDS_INCREASE_STEP) * sizeof(char *)); + fw->size += PFWORDS_INCREASE_STEP; + } + + fw->words[fw->len++] = str; +} + +NEVERNULL +static inline pfwords *pfwords_new(void) { + // debug(D_PROCFILE, PF_PREFIX ": initializing words"); + + size_t size = (procfile_adaptive_initial_allocation) ? procfile_max_words : PFWORDS_INCREASE_STEP; + + pfwords *new = mallocz(sizeof(pfwords) + size * sizeof(char *)); + new->len = 0; + new->size = size; + return new; +} + +static inline void pfwords_reset(pfwords *fw) { + // debug(D_PROCFILE, PF_PREFIX ": reseting words"); + fw->len = 0; +} + +static inline void pfwords_free(pfwords *fw) { + // debug(D_PROCFILE, PF_PREFIX ": freeing words"); + + freez(fw); +} + + +// ---------------------------------------------------------------------------- +// An array of lines + +NEVERNULL +static inline size_t *pflines_add(procfile *ff) { + // debug(D_PROCFILE, PF_PREFIX ": adding line %d at word %d", fl->len, first_word); + + pflines *fl = ff->lines; + if(unlikely(fl->len == fl->size)) { + // debug(D_PROCFILE, PF_PREFIX ": expanding lines"); + + ff->lines = fl = reallocz(fl, sizeof(pflines) + (fl->size + PFLINES_INCREASE_STEP) * sizeof(ffline)); + fl->size += PFLINES_INCREASE_STEP; + } + + ffline *ffl = &fl->lines[fl->len++]; + ffl->words = 0; + ffl->first = ff->words->len; + + return &ffl->words; +} + +NEVERNULL +static inline pflines *pflines_new(void) { + // debug(D_PROCFILE, PF_PREFIX ": initializing lines"); + + size_t size = (unlikely(procfile_adaptive_initial_allocation)) ? procfile_max_words : PFLINES_INCREASE_STEP; + + pflines *new = mallocz(sizeof(pflines) + size * sizeof(ffline)); + new->len = 0; + new->size = size; + return new; +} + +static inline void pflines_reset(pflines *fl) { + // debug(D_PROCFILE, PF_PREFIX ": reseting lines"); + + fl->len = 0; +} + +static inline void pflines_free(pflines *fl) { + // debug(D_PROCFILE, PF_PREFIX ": freeing lines"); + + freez(fl); +} + + +// ---------------------------------------------------------------------------- +// The procfile + +void procfile_close(procfile *ff) { + if(unlikely(!ff)) return; + + debug(D_PROCFILE, PF_PREFIX ": Closing file '%s'", procfile_filename(ff)); + + if(likely(ff->lines)) pflines_free(ff->lines); + if(likely(ff->words)) pfwords_free(ff->words); + + if(likely(ff->fd != -1)) close(ff->fd); + freez(ff); +} + +NOINLINE +static void procfile_parser(procfile *ff) { + // debug(D_PROCFILE, PF_PREFIX ": Parsing file '%s'", ff->filename); + + char *s = ff->data // our current position + , *e = &ff->data[ff->len] // the terminating null + , *t = ff->data; // the first character of a word (or quoted / parenthesized string) + + // the look up array to find our type of character + PF_CHAR_TYPE *separators = ff->separators; + + char quote = 0; // the quote character - only when in quoted string + size_t opened = 0; // counts the number of open parenthesis + + size_t *line_words = pflines_add(ff); + + while(s < e) { + PF_CHAR_TYPE ct = separators[(unsigned char)(*s)]; + + // this is faster than a switch() + // read more here: http://lazarenko.me/switch/ + if(likely(ct == PF_CHAR_IS_WORD)) { + s++; + } + else if(likely(ct == PF_CHAR_IS_SEPARATOR)) { + if(!quote && !opened) { + if (s != t) { + // separator, but we have word before it + *s = '\0'; + pfwords_add(ff, t); + (*line_words)++; + t = ++s; + } + else { + // separator at the beginning + // skip it + t = ++s; + } + } + else { + // we are inside a quote or parenthesized string + s++; + } + } + else if(likely(ct == PF_CHAR_IS_NEWLINE)) { + // end of line + + *s = '\0'; + pfwords_add(ff, t); + (*line_words)++; + t = ++s; + + // debug(D_PROCFILE, PF_PREFIX ": ended line %d with %d words", l, ff->lines->lines[l].words); + + line_words = pflines_add(ff); + } + else if(likely(ct == PF_CHAR_IS_QUOTE)) { + if(unlikely(!quote && s == t)) { + // quote opened at the beginning + quote = *s; + t = ++s; + } + else if(unlikely(quote && quote == *s)) { + // quote closed + quote = 0; + + *s = '\0'; + pfwords_add(ff, t); + (*line_words)++; + t = ++s; + } + else + s++; + } + else if(likely(ct == PF_CHAR_IS_OPEN)) { + if(s == t) { + opened++; + t = ++s; + } + else if(opened) { + opened++; + s++; + } + else + s++; + } + else if(likely(ct == PF_CHAR_IS_CLOSE)) { + if(opened) { + opened--; + + if(!opened) { + *s = '\0'; + pfwords_add(ff, t); + (*line_words)++; + t = ++s; + } + else + s++; + } + else + s++; + } + else + fatal("Internal Error: procfile_readall() does not handle all the cases."); + } + + if(likely(s > t && t < e)) { + // the last word + if(unlikely(ff->len >= ff->size)) { + // we are going to loose the last byte + s = &ff->data[ff->size - 1]; + } + + *s = '\0'; + pfwords_add(ff, t); + (*line_words)++; + // t = ++s; + } +} + +procfile *procfile_readall(procfile *ff) { + // debug(D_PROCFILE, PF_PREFIX ": Reading file '%s'.", ff->filename); + + ff->len = 0; // zero the used size + ssize_t r = 1; // read at least once + while(r > 0) { + ssize_t s = ff->len; + ssize_t x = ff->size - s; + + if(unlikely(!x)) { + debug(D_PROCFILE, PF_PREFIX ": Expanding data buffer for file '%s'.", procfile_filename(ff)); + ff = reallocz(ff, sizeof(procfile) + ff->size + PROCFILE_INCREMENT_BUFFER); + ff->size += PROCFILE_INCREMENT_BUFFER; + } + + debug(D_PROCFILE, "Reading file '%s', from position %zd with length %zd", procfile_filename(ff), s, (ssize_t)(ff->size - s)); + r = read(ff->fd, &ff->data[s], ff->size - s); + if(unlikely(r == -1)) { + if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot read from file '%s' on fd %d", procfile_filename(ff), ff->fd); + procfile_close(ff); + return NULL; + } + + ff->len += r; + } + + // debug(D_PROCFILE, "Rewinding file '%s'", ff->filename); + if(unlikely(lseek(ff->fd, 0, SEEK_SET) == -1)) { + if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot rewind on file '%s'.", procfile_filename(ff)); + procfile_close(ff); + return NULL; + } + + pflines_reset(ff->lines); + pfwords_reset(ff->words); + procfile_parser(ff); + + if(unlikely(procfile_adaptive_initial_allocation)) { + if(unlikely(ff->len > procfile_max_allocation)) procfile_max_allocation = ff->len; + if(unlikely(ff->lines->len > procfile_max_lines)) procfile_max_lines = ff->lines->len; + if(unlikely(ff->words->len > procfile_max_words)) procfile_max_words = ff->words->len; + } + + // debug(D_PROCFILE, "File '%s' updated.", ff->filename); + return ff; +} + +NOINLINE +static void procfile_set_separators(procfile *ff, const char *separators) { + static PF_CHAR_TYPE def[256]; + static char initilized = 0; + + if(unlikely(!initilized)) { + // this is thread safe + // if initialized is zero, multiple threads may be executing + // this code at the same time, setting in def[] the exact same values + int i = 256; + while(i--) { + if(unlikely(i == '\n' || i == '\r')) + def[i] = PF_CHAR_IS_NEWLINE; + + else if(unlikely(isspace(i) || !isprint(i))) + def[i] = PF_CHAR_IS_SEPARATOR; + + else + def[i] = PF_CHAR_IS_WORD; + } + + initilized = 1; + } + + // copy the default + PF_CHAR_TYPE *ffs = ff->separators, *ffd = def, *ffe = &def[256]; + while(ffd != ffe) + *ffs++ = *ffd++; + + // set the separators + if(unlikely(!separators)) + separators = " \t=|"; + + ffs = ff->separators; + const char *s = separators; + while(*s) + ffs[(int)*s++] = PF_CHAR_IS_SEPARATOR; +} + +void procfile_set_quotes(procfile *ff, const char *quotes) { + PF_CHAR_TYPE *ffs = ff->separators; + + // remove all quotes + int i = 256; + while(i--) + if(unlikely(ffs[i] == PF_CHAR_IS_QUOTE)) + ffs[i] = PF_CHAR_IS_WORD; + + // if nothing given, return + if(unlikely(!quotes || !*quotes)) + return; + + // set the quotes + const char *s = quotes; + while(*s) + ffs[(int)*s++] = PF_CHAR_IS_QUOTE; +} + +void procfile_set_open_close(procfile *ff, const char *open, const char *close) { + PF_CHAR_TYPE *ffs = ff->separators; + + // remove all open/close + int i = 256; + while(i--) + if(unlikely(ffs[i] == PF_CHAR_IS_OPEN || ffs[i] == PF_CHAR_IS_CLOSE)) + ffs[i] = PF_CHAR_IS_WORD; + + // if nothing given, return + if(unlikely(!open || !*open || !close || !*close)) + return; + + // set the openings + const char *s = open; + while(*s) + ffs[(int)*s++] = PF_CHAR_IS_OPEN; + + // set the closings + s = close; + while(*s) + ffs[(int)*s++] = PF_CHAR_IS_CLOSE; +} + +procfile *procfile_open(const char *filename, const char *separators, uint32_t flags) { + debug(D_PROCFILE, PF_PREFIX ": Opening file '%s'", filename); + + int fd = open(filename, procfile_open_flags, 0666); + if(unlikely(fd == -1)) { + if(unlikely(!(flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot open file '%s'", filename); + return NULL; + } + + // info("PROCFILE: opened '%s' on fd %d", filename, fd); + + size_t size = (unlikely(procfile_adaptive_initial_allocation)) ? procfile_max_allocation : PROCFILE_INCREMENT_BUFFER; + procfile *ff = mallocz(sizeof(procfile) + size); + + //strncpyz(ff->filename, filename, FILENAME_MAX); + ff->filename[0] = '\0'; + + ff->fd = fd; + ff->size = size; + ff->len = 0; + ff->flags = flags; + + ff->lines = pflines_new(); + ff->words = pfwords_new(); + + procfile_set_separators(ff, separators); + + debug(D_PROCFILE, "File '%s' opened.", filename); + return ff; +} + +procfile *procfile_reopen(procfile *ff, const char *filename, const char *separators, uint32_t flags) { + if(unlikely(!ff)) return procfile_open(filename, separators, flags); + + if(likely(ff->fd != -1)) { + // info("PROCFILE: closing fd %d", ff->fd); + close(ff->fd); + } + + ff->fd = open(filename, procfile_open_flags, 0666); + if(unlikely(ff->fd == -1)) { + procfile_close(ff); + return NULL; + } + + // info("PROCFILE: opened '%s' on fd %d", filename, ff->fd); + + //strncpyz(ff->filename, filename, FILENAME_MAX); + ff->filename[0] = '\0'; + ff->flags = flags; + + // do not do the separators again if NULL is given + if(likely(separators)) procfile_set_separators(ff, separators); + + return ff; +} + +// ---------------------------------------------------------------------------- +// example parsing of procfile data + +void procfile_print(procfile *ff) { + size_t lines = procfile_lines(ff), l; + char *s; + (void)s; + + debug(D_PROCFILE, "File '%s' with %zu lines and %zu words", procfile_filename(ff), ff->lines->len, ff->words->len); + + for(l = 0; likely(l < lines) ;l++) { + size_t words = procfile_linewords(ff, l); + + debug(D_PROCFILE, " line %zu starts at word %zu and has %zu words", l, ff->lines->lines[l].first, ff->lines->lines[l].words); + + size_t w; + for(w = 0; likely(w < words) ;w++) { + s = procfile_lineword(ff, l, w); + debug(D_PROCFILE, " [%zu.%zu] '%s'", l, w, s); + } + } +} diff --git a/libnetdata/procfile/procfile.h b/libnetdata/procfile/procfile.h new file mode 100644 index 000000000..b107358ab --- /dev/null +++ b/libnetdata/procfile/procfile.h @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_PROCFILE_H +#define NETDATA_PROCFILE_H 1 + +#include "../libnetdata.h" + +// ---------------------------------------------------------------------------- +// An array of words + +typedef struct { + size_t len; // used entries + size_t size; // capacity + char *words[]; // array of pointers +} pfwords; + + +// ---------------------------------------------------------------------------- +// An array of lines + +typedef struct { + size_t words; // how many words this line has + size_t first; // the id of the first word of this line + // in the words array +} ffline; + +typedef struct { + size_t len; // used entries + size_t size; // capacity + ffline lines[]; // array of lines +} pflines; + + +// ---------------------------------------------------------------------------- +// The procfile + +#define PROCFILE_FLAG_DEFAULT 0x00000000 +#define PROCFILE_FLAG_NO_ERROR_ON_FILE_IO 0x00000001 + +typedef enum procfile_separator { + PF_CHAR_IS_SEPARATOR, + PF_CHAR_IS_NEWLINE, + PF_CHAR_IS_WORD, + PF_CHAR_IS_QUOTE, + PF_CHAR_IS_OPEN, + PF_CHAR_IS_CLOSE +} PF_CHAR_TYPE; + +typedef struct { + char filename[FILENAME_MAX + 1]; // not populated until profile_filename() is called + + uint32_t flags; + int fd; // the file desriptor + size_t len; // the bytes we have placed into data + size_t size; // the bytes we have allocated for data + pflines *lines; + pfwords *words; + PF_CHAR_TYPE separators[256]; + char data[]; // allocated buffer to keep file contents +} procfile; + +// close the proc file and free all related memory +extern void procfile_close(procfile *ff); + +// (re)read and parse the proc file +extern procfile *procfile_readall(procfile *ff); + +// open a /proc or /sys file +extern procfile *procfile_open(const char *filename, const char *separators, uint32_t flags); + +// re-open a file +// if separators == NULL, the last separators are used +extern procfile *procfile_reopen(procfile *ff, const char *filename, const char *separators, uint32_t flags); + +// example walk-through a procfile parsed file +extern void procfile_print(procfile *ff); + +extern void procfile_set_quotes(procfile *ff, const char *quotes); +extern void procfile_set_open_close(procfile *ff, const char *open, const char *close); + +extern char *procfile_filename(procfile *ff); + +// ---------------------------------------------------------------------------- + +// set to the O_XXXX flags, to have procfile_open and procfile_reopen use them when opening proc files +extern int procfile_open_flags; + +// set this to 1, to have procfile adapt its initial buffer allocation to the max allocation used so far +extern int procfile_adaptive_initial_allocation; + +// return the number of lines present +#define procfile_lines(ff) ((ff)->lines->len) + +// return the number of words of the Nth line +#define procfile_linewords(ff, line) (((line) < procfile_lines(ff)) ? (ff)->lines->lines[(line)].words : 0) + +// return the Nth word of the file, or empty string +#define procfile_word(ff, word) (((word) < (ff)->words->len) ? (ff)->words->words[(word)] : "") + +// return the first word of the Nth line, or empty string +#define procfile_line(ff, line) (((line) < procfile_lines(ff)) ? procfile_word((ff), (ff)->lines->lines[(line)].first) : "") + +// return the Nth word of the current line +#define procfile_lineword(ff, line, word) (((line) < procfile_lines(ff) && (word) < procfile_linewords((ff), (line))) ? procfile_word((ff), (ff)->lines->lines[(line)].first + (word)) : "") + +#endif /* NETDATA_PROCFILE_H */ diff --git a/libnetdata/simple_pattern/Makefile.am b/libnetdata/simple_pattern/Makefile.am new file mode 100644 index 000000000..1cb69ed99 --- /dev/null +++ b/libnetdata/simple_pattern/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/simple_pattern/Makefile.in b/libnetdata/simple_pattern/Makefile.in new file mode 100644 index 000000000..575850f03 --- /dev/null +++ b/libnetdata/simple_pattern/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = libnetdata/simple_pattern +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/simple_pattern/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu libnetdata/simple_pattern/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/libnetdata/simple_pattern/README.md b/libnetdata/simple_pattern/README.md new file mode 100644 index 000000000..22ccf373a --- /dev/null +++ b/libnetdata/simple_pattern/README.md @@ -0,0 +1,36 @@ +## netdata simple patterns + +Unix prefers regular expressions. But they are just too hard, too cryptic +to use, write and understand. + +So, netdata supports **simple patterns**. + +Simple patterns are a space separated list of words, that can have `*` +as a wildcard. Each world may use any number of `*`. Simple patterns +allow **negative** matches by prefixing a word with `!`. + +So, `pattern = !*bad* *` will match anything, except all those that +contain the word `bad`. + +Simple patterns are quite powerful: `pattern = *foobar* !foo* !*bar *` +matches everything containing `foobar`, except strings that start +with `foo` or end with `bar`. + +You can use the netdata command line to check simple patterns, +like this: + +```sh +# netdata -W simple-pattern '*foobar* !foo* !*bar *' 'hello world' +RESULT: MATCHED - pattern '*foobar* !foo* !*bar *' matches 'hello world' + +# netdata -W simple-pattern '*foobar* !foo* !*bar *' 'hello world bar' +RESULT: NOT MATCHED - pattern '*foobar* !foo* !*bar *' does not match 'hello world bar' + +# netdata -W simple-pattern '*foobar* !foo* !*bar *' 'hello world foobar' +RESULT: MATCHED - pattern '*foobar* !foo* !*bar *' matches 'hello world foobar' +``` + +netdata stops processing to the first positive or negative match +(left to right). If it is not matched by either positive or negative +patterns, it is denied at the end. + diff --git a/libnetdata/simple_pattern/simple_pattern.c b/libnetdata/simple_pattern/simple_pattern.c new file mode 100644 index 000000000..57b0aecc8 --- /dev/null +++ b/libnetdata/simple_pattern/simple_pattern.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +struct simple_pattern { + const char *match; + size_t len; + + SIMPLE_PREFIX_MODE mode; + char negative; + + struct simple_pattern *child; + + struct simple_pattern *next; +}; + +static inline struct simple_pattern *parse_pattern(char *str, SIMPLE_PREFIX_MODE default_mode) { + // fprintf(stderr, "PARSING PATTERN: '%s'\n", str); + + SIMPLE_PREFIX_MODE mode; + struct simple_pattern *child = NULL; + + char *s = str, *c = str; + + // skip asterisks in front + while(*c == '*') c++; + + // find the next asterisk + while(*c && *c != '*') c++; + + // do we have an asterisk in the middle? + if(*c == '*' && c[1] != '\0') { + // yes, we have + child = parse_pattern(c, default_mode); + c[1] = '\0'; + } + + // check what this one matches + + size_t len = strlen(s); + if(len >= 2 && *s == '*' && s[len - 1] == '*') { + s[len - 1] = '\0'; + s++; + mode = SIMPLE_PATTERN_SUBSTRING; + } + else if(len >= 1 && *s == '*') { + s++; + mode = SIMPLE_PATTERN_SUFFIX; + } + else if(len >= 1 && s[len - 1] == '*') { + s[len - 1] = '\0'; + mode = SIMPLE_PATTERN_PREFIX; + } + else + mode = default_mode; + + // allocate the structure + struct simple_pattern *m = callocz(1, sizeof(struct simple_pattern)); + if(*s) { + m->match = strdupz(s); + m->len = strlen(m->match); + m->mode = mode; + } + else { + m->mode = SIMPLE_PATTERN_SUBSTRING; + } + + m->child = child; + + return m; +} + +SIMPLE_PATTERN *simple_pattern_create(const char *list, const char *separators, SIMPLE_PREFIX_MODE default_mode) { + struct simple_pattern *root = NULL, *last = NULL; + + if(unlikely(!list || !*list)) return root; + + int isseparator[256] = { + [' '] = 1 // space + , ['\t'] = 1 // tab + , ['\r'] = 1 // carriage return + , ['\n'] = 1 // new line + , ['\f'] = 1 // form feed + , ['\v'] = 1 // vertical tab + }; + + if (unlikely(separators && *separators)) { + memset(&isseparator[0], 0, sizeof(isseparator)); + while(*separators) isseparator[(unsigned char)*separators++] = 1; + } + + char *buf = mallocz(strlen(list) + 1); + const char *s = list; + + while(s && *s) { + buf[0] = '\0'; + char *c = buf; + + char negative = 0; + + // skip all spaces + while(isseparator[(unsigned char)*s]) + s++; + + if(*s == '!') { + negative = 1; + s++; + } + + // empty string + if(unlikely(!*s)) + break; + + // find the next space + char escape = 0; + while(*s) { + if(*s == '\\' && !escape) { + escape = 1; + s++; + } + else { + if (isseparator[(unsigned char)*s] && !escape) { + s++; + break; + } + + *c++ = *s++; + escape = 0; + } + } + + // terminate our string + *c = '\0'; + + // if we matched the empty string, skip it + if(unlikely(!*buf)) + continue; + + // fprintf(stderr, "FOUND PATTERN: '%s'\n", buf); + struct simple_pattern *m = parse_pattern(buf, default_mode); + m->negative = negative; + + // link it at the end + if(unlikely(!root)) + root = last = m; + else { + last->next = m; + last = m; + } + } + + freez(buf); + return (SIMPLE_PATTERN *)root; +} + +static inline char *add_wildcarded(const char *matched, size_t matched_size, char *wildcarded, size_t *wildcarded_size) { + //if(matched_size) { + // char buf[matched_size + 1]; + // strncpyz(buf, matched, matched_size); + // fprintf(stderr, "ADD WILDCARDED '%s' of length %zu\n", buf, matched_size); + //} + + if(unlikely(wildcarded && *wildcarded_size && matched && *matched && matched_size)) { + size_t wss = *wildcarded_size - 1; + size_t len = (matched_size < wss)?matched_size:wss; + if(likely(len)) { + strncpyz(wildcarded, matched, len); + + *wildcarded_size -= len; + return &wildcarded[len]; + } + } + + return wildcarded; +} + +static inline int match_pattern(struct simple_pattern *m, const char *str, size_t len, char *wildcarded, size_t *wildcarded_size) { + char *s; + + if(m->len <= len) { + switch(m->mode) { + case SIMPLE_PATTERN_SUBSTRING: + if(!m->len) return 1; + if((s = strstr(str, m->match))) { + wildcarded = add_wildcarded(str, s - str, wildcarded, wildcarded_size); + if(!m->child) { + wildcarded = add_wildcarded(&s[m->len], len - (&s[m->len] - str), wildcarded, wildcarded_size); + return 1; + } + return match_pattern(m->child, &s[m->len], len - (s - str) - m->len, wildcarded, wildcarded_size); + } + break; + + case SIMPLE_PATTERN_PREFIX: + if(unlikely(strncmp(str, m->match, m->len) == 0)) { + if(!m->child) { + wildcarded = add_wildcarded(&str[m->len], len - m->len, wildcarded, wildcarded_size); + return 1; + } + return match_pattern(m->child, &str[m->len], len - m->len, wildcarded, wildcarded_size); + } + break; + + case SIMPLE_PATTERN_SUFFIX: + if(unlikely(strcmp(&str[len - m->len], m->match) == 0)) { + wildcarded = add_wildcarded(str, len - m->len, wildcarded, wildcarded_size); + if(!m->child) return 1; + return 0; + } + break; + + case SIMPLE_PATTERN_EXACT: + default: + if(unlikely(strcmp(str, m->match) == 0)) { + if(!m->child) return 1; + return 0; + } + break; + } + } + + return 0; +} + +int simple_pattern_matches_extract(SIMPLE_PATTERN *list, const char *str, char *wildcarded, size_t wildcarded_size) { + struct simple_pattern *m, *root = (struct simple_pattern *)list; + + if(unlikely(!root || !str || !*str)) return 0; + + size_t len = strlen(str); + for(m = root; m ; m = m->next) { + char *ws = wildcarded; + size_t wss = wildcarded_size; + if(unlikely(ws)) *ws = '\0'; + + if (match_pattern(m, str, len, ws, &wss)) { + + //if(ws && wss) + // fprintf(stderr, "FINAL WILDCARDED '%s' of length %zu\n", ws, strlen(ws)); + + if (m->negative) return 0; + return 1; + } + } + + return 0; +} + +static inline void free_pattern(struct simple_pattern *m) { + if(!m) return; + + free_pattern(m->child); + free_pattern(m->next); + freez((void *)m->match); + freez(m); +} + +void simple_pattern_free(SIMPLE_PATTERN *list) { + if(!list) return; + + free_pattern(((struct simple_pattern *)list)); +} diff --git a/libnetdata/simple_pattern/simple_pattern.h b/libnetdata/simple_pattern/simple_pattern.h new file mode 100644 index 000000000..b96a018ef --- /dev/null +++ b/libnetdata/simple_pattern/simple_pattern.h @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SIMPLE_PATTERN_H +#define NETDATA_SIMPLE_PATTERN_H + +#include "../libnetdata.h" + + +typedef enum { + SIMPLE_PATTERN_EXACT, + SIMPLE_PATTERN_PREFIX, + SIMPLE_PATTERN_SUFFIX, + SIMPLE_PATTERN_SUBSTRING +} SIMPLE_PREFIX_MODE; + +typedef void SIMPLE_PATTERN; + +// create a simple_pattern from the string given +// default_mode is used in cases where EXACT matches, without an asterisk, +// should be considered PREFIX matches. +extern SIMPLE_PATTERN *simple_pattern_create(const char *list, const char *separators, SIMPLE_PREFIX_MODE default_mode); + +// test if string str is matched from the pattern and fill 'wildcarded' with the parts matched by '*' +extern int simple_pattern_matches_extract(SIMPLE_PATTERN *list, const char *str, char *wildcarded, size_t wildcarded_size); + +// test if string str is matched from the pattern +#define simple_pattern_matches(list, str) simple_pattern_matches_extract(list, str, NULL, 0) + +// free a simple_pattern that was created with simple_pattern_create() +// list can be NULL, in which case, this does nothing. +extern void simple_pattern_free(SIMPLE_PATTERN *list); + +#endif //NETDATA_SIMPLE_PATTERN_H diff --git a/libnetdata/socket/Makefile.am b/libnetdata/socket/Makefile.am new file mode 100644 index 000000000..1cb69ed99 --- /dev/null +++ b/libnetdata/socket/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/socket/Makefile.in b/libnetdata/socket/Makefile.in new file mode 100644 index 000000000..45f13d068 --- /dev/null +++ b/libnetdata/socket/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = libnetdata/socket +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/socket/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu libnetdata/socket/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/libnetdata/socket/README.md b/libnetdata/socket/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/libnetdata/socket/socket.c b/libnetdata/socket/socket.c new file mode 100644 index 000000000..c266efeb4 --- /dev/null +++ b/libnetdata/socket/socket.c @@ -0,0 +1,1532 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +// -------------------------------------------------------------------------------------------------------------------- +// various library calls + +#ifdef __gnu_linux__ +#define LARGE_SOCK_SIZE 33554431 // don't ask why - I found it at brubeck source - I guess it is just a large number +#else +#define LARGE_SOCK_SIZE 4096 +#endif + +int sock_setnonblock(int fd) { + int flags; + + flags = fcntl(fd, F_GETFL); + flags |= O_NONBLOCK; + + int ret = fcntl(fd, F_SETFL, flags); + if(ret < 0) + error("Failed to set O_NONBLOCK on socket %d", fd); + + return ret; +} + +int sock_delnonblock(int fd) { + int flags; + + flags = fcntl(fd, F_GETFL); + flags &= ~O_NONBLOCK; + + int ret = fcntl(fd, F_SETFL, flags); + if(ret < 0) + error("Failed to remove O_NONBLOCK on socket %d", fd); + + return ret; +} + +int sock_setreuse(int fd, int reuse) { + int ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(reuse)); + + if(ret == -1) + error("Failed to set SO_REUSEADDR on socket %d", fd); + + return ret; +} + +int sock_setreuse_port(int fd, int reuse) { + int ret; + +#ifdef SO_REUSEPORT + ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &reuse, sizeof(reuse)); + if(ret == -1 && errno != ENOPROTOOPT) + error("failed to set SO_REUSEPORT on socket %d", fd); +#else + ret = -1; +#endif + + return ret; +} + +int sock_enlarge_in(int fd) { + int ret, bs = LARGE_SOCK_SIZE; + + ret = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &bs, sizeof(bs)); + + if(ret == -1) + error("Failed to set SO_RCVBUF on socket %d", fd); + + return ret; +} + +int sock_enlarge_out(int fd) { + int ret, bs = LARGE_SOCK_SIZE; + ret = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &bs, sizeof(bs)); + + if(ret == -1) + error("Failed to set SO_SNDBUF on socket %d", fd); + + return ret; +} + + +// -------------------------------------------------------------------------------------------------------------------- + +char *strdup_client_description(int family, const char *protocol, const char *ip, uint16_t port) { + char buffer[100 + 1]; + + switch(family) { + case AF_INET: + snprintfz(buffer, 100, "%s:%s:%d", protocol, ip, port); + break; + + case AF_INET6: + default: + snprintfz(buffer, 100, "%s:[%s]:%d", protocol, ip, port); + break; + + case AF_UNIX: + snprintfz(buffer, 100, "%s:%s", protocol, ip); + break; + } + + return strdupz(buffer); +} + +// -------------------------------------------------------------------------------------------------------------------- +// listening sockets + +int create_listen_socket_unix(const char *path, int listen_backlog) { + int sock; + + debug(D_LISTENER, "LISTENER: UNIX creating new listening socket on path '%s'", path); + + sock = socket(AF_UNIX, SOCK_STREAM, 0); + if(sock < 0) { + error("LISTENER: UNIX socket() on path '%s' failed.", path); + return -1; + } + + sock_setnonblock(sock); + sock_enlarge_in(sock); + + struct sockaddr_un name; + memset(&name, 0, sizeof(struct sockaddr_un)); + name.sun_family = AF_UNIX; + strncpy(name.sun_path, path, sizeof(name.sun_path)-1); + + errno = 0; + if (unlink(path) == -1 && errno != ENOENT) + error("LISTENER: failed to remove existing (probably obsolete or left-over) file on UNIX socket path '%s'.", path); + + if(bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) { + close(sock); + error("LISTENER: UNIX bind() on path '%s' failed.", path); + return -1; + } + + // we have to chmod this to 0777 so that the client will be able + // to read from and write to this socket. + if(chmod(path, 0777) == -1) + error("LISTENER: failed to chmod() socket file '%s'.", path); + + if(listen(sock, listen_backlog) < 0) { + close(sock); + error("LISTENER: UNIX listen() on path '%s' failed.", path); + return -1; + } + + debug(D_LISTENER, "LISTENER: Listening on UNIX path '%s'", path); + return sock; +} + +int create_listen_socket4(int socktype, const char *ip, uint16_t port, int listen_backlog) { + int sock; + + debug(D_LISTENER, "LISTENER: IPv4 creating new listening socket on ip '%s' port %d, socktype %d", ip, port, socktype); + + sock = socket(AF_INET, socktype, 0); + if(sock < 0) { + error("LISTENER: IPv4 socket() on ip '%s' port %d, socktype %d failed.", ip, port, socktype); + return -1; + } + + sock_setreuse(sock, 1); + sock_setreuse_port(sock, 1); + sock_setnonblock(sock); + sock_enlarge_in(sock); + + struct sockaddr_in name; + memset(&name, 0, sizeof(struct sockaddr_in)); + name.sin_family = AF_INET; + name.sin_port = htons (port); + + int ret = inet_pton(AF_INET, ip, (void *)&name.sin_addr.s_addr); + if(ret != 1) { + error("LISTENER: Failed to convert IP '%s' to a valid IPv4 address.", ip); + close(sock); + return -1; + } + + if(bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) { + close(sock); + error("LISTENER: IPv4 bind() on ip '%s' port %d, socktype %d failed.", ip, port, socktype); + return -1; + } + + if(socktype == SOCK_STREAM && listen(sock, listen_backlog) < 0) { + close(sock); + error("LISTENER: IPv4 listen() on ip '%s' port %d, socktype %d failed.", ip, port, socktype); + return -1; + } + + debug(D_LISTENER, "LISTENER: Listening on IPv4 ip '%s' port %d, socktype %d", ip, port, socktype); + return sock; +} + +int create_listen_socket6(int socktype, uint32_t scope_id, const char *ip, int port, int listen_backlog) { + int sock; + int ipv6only = 1; + + debug(D_LISTENER, "LISTENER: IPv6 creating new listening socket on ip '%s' port %d, socktype %d", ip, port, socktype); + + sock = socket(AF_INET6, socktype, 0); + if (sock < 0) { + error("LISTENER: IPv6 socket() on ip '%s' port %d, socktype %d, failed.", ip, port, socktype); + return -1; + } + + sock_setreuse(sock, 1); + sock_setreuse_port(sock, 1); + sock_setnonblock(sock); + sock_enlarge_in(sock); + + /* IPv6 only */ + if(setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (void*)&ipv6only, sizeof(ipv6only)) != 0) + error("LISTENER: Cannot set IPV6_V6ONLY on ip '%s' port %d, socktype %d.", ip, port, socktype); + + struct sockaddr_in6 name; + memset(&name, 0, sizeof(struct sockaddr_in6)); + name.sin6_family = AF_INET6; + name.sin6_port = htons ((uint16_t) port); + name.sin6_scope_id = scope_id; + + int ret = inet_pton(AF_INET6, ip, (void *)&name.sin6_addr.s6_addr); + if(ret != 1) { + error("LISTENER: Failed to convert IP '%s' to a valid IPv6 address.", ip); + close(sock); + return -1; + } + + name.sin6_scope_id = scope_id; + + if (bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) { + close(sock); + error("LISTENER: IPv6 bind() on ip '%s' port %d, socktype %d failed.", ip, port, socktype); + return -1; + } + + if (socktype == SOCK_STREAM && listen(sock, listen_backlog) < 0) { + close(sock); + error("LISTENER: IPv6 listen() on ip '%s' port %d, socktype %d failed.", ip, port, socktype); + return -1; + } + + debug(D_LISTENER, "LISTENER: Listening on IPv6 ip '%s' port %d, socktype %d", ip, port, socktype); + return sock; +} + +static inline int listen_sockets_add(LISTEN_SOCKETS *sockets, int fd, int family, int socktype, const char *protocol, const char *ip, uint16_t port) { + if(sockets->opened >= MAX_LISTEN_FDS) { + error("LISTENER: Too many listening sockets. Failed to add listening %s socket at ip '%s' port %d, protocol %s, socktype %d", protocol, ip, port, protocol, socktype); + close(fd); + return -1; + } + + sockets->fds[sockets->opened] = fd; + sockets->fds_types[sockets->opened] = socktype; + sockets->fds_families[sockets->opened] = family; + sockets->fds_names[sockets->opened] = strdup_client_description(family, protocol, ip, port); + + sockets->opened++; + return 0; +} + +int listen_sockets_check_is_member(LISTEN_SOCKETS *sockets, int fd) { + size_t i; + for(i = 0; i < sockets->opened ;i++) + if(sockets->fds[i] == fd) return 1; + + return 0; +} + +static inline void listen_sockets_init(LISTEN_SOCKETS *sockets) { + size_t i; + for(i = 0; i < MAX_LISTEN_FDS ;i++) { + sockets->fds[i] = -1; + sockets->fds_names[i] = NULL; + sockets->fds_types[i] = -1; + } + + sockets->opened = 0; + sockets->failed = 0; +} + +void listen_sockets_close(LISTEN_SOCKETS *sockets) { + size_t i; + for(i = 0; i < sockets->opened ;i++) { + close(sockets->fds[i]); + sockets->fds[i] = -1; + + freez(sockets->fds_names[i]); + sockets->fds_names[i] = NULL; + + sockets->fds_types[i] = -1; + } + + sockets->opened = 0; + sockets->failed = 0; +} + +static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, uint16_t default_port, int listen_backlog) { + int added = 0; + struct addrinfo hints; + struct addrinfo *result = NULL, *rp = NULL; + + char buffer[strlen(definition) + 1]; + strcpy(buffer, definition); + + char buffer2[10 + 1]; + snprintfz(buffer2, 10, "%d", default_port); + + char *ip = buffer, *port = buffer2, *interface = "";; + + int protocol = IPPROTO_TCP, socktype = SOCK_STREAM; + const char *protocol_str = "tcp"; + + if(strncmp(ip, "tcp:", 4) == 0) { + ip += 4; + protocol = IPPROTO_TCP; + socktype = SOCK_STREAM; + protocol_str = "tcp"; + } + else if(strncmp(ip, "udp:", 4) == 0) { + ip += 4; + protocol = IPPROTO_UDP; + socktype = SOCK_DGRAM; + protocol_str = "udp"; + } + else if(strncmp(ip, "unix:", 5) == 0) { + char *path = ip + 5; + socktype = SOCK_STREAM; + protocol_str = "unix"; + + int fd = create_listen_socket_unix(path, listen_backlog); + if (fd == -1) { + error("LISTENER: Cannot create unix socket '%s'", path); + sockets->failed++; + } + else { + listen_sockets_add(sockets, fd, AF_UNIX, socktype, protocol_str, path, 0); + added++; + } + return added; + } + + char *e = ip; + if(*e == '[') { + e = ++ip; + while(*e && *e != ']') e++; + if(*e == ']') { + *e = '\0'; + e++; + } + } + else { + while(*e && *e != ':' && *e != '%') e++; + } + + if(*e == '%') { + *e = '\0'; + e++; + interface = e; + while(*e && *e != ':') e++; + } + + if(*e == ':') { + port = e + 1; + *e = '\0'; + } + + uint32_t scope_id = 0; + if(*interface) { + scope_id = if_nametoindex(interface); + if(!scope_id) + error("LISTENER: Cannot find a network interface named '%s'. Continuing with limiting the network interface", interface); + } + + if(!*ip || *ip == '*' || !strcmp(ip, "any") || !strcmp(ip, "all")) + ip = NULL; + + if(!*port) + port = buffer2; + + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */ + hints.ai_socktype = socktype; + hints.ai_flags = AI_PASSIVE; /* For wildcard IP address */ + hints.ai_protocol = protocol; + hints.ai_canonname = NULL; + hints.ai_addr = NULL; + hints.ai_next = NULL; + + int r = getaddrinfo(ip, port, &hints, &result); + if (r != 0) { + error("LISTENER: getaddrinfo('%s', '%s'): %s\n", ip, port, gai_strerror(r)); + return -1; + } + + for (rp = result; rp != NULL; rp = rp->ai_next) { + int fd = -1; + int family; + + char rip[INET_ADDRSTRLEN + INET6_ADDRSTRLEN] = "INVALID"; + uint16_t rport = default_port; + + family = rp->ai_addr->sa_family; + switch (family) { + case AF_INET: { + struct sockaddr_in *sin = (struct sockaddr_in *) rp->ai_addr; + inet_ntop(AF_INET, &sin->sin_addr, rip, INET_ADDRSTRLEN); + rport = ntohs(sin->sin_port); + // info("Attempting to listen on IPv4 '%s' ('%s'), port %d ('%s'), socktype %d", rip, ip, rport, port, socktype); + fd = create_listen_socket4(socktype, rip, rport, listen_backlog); + break; + } + + case AF_INET6: { + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) rp->ai_addr; + inet_ntop(AF_INET6, &sin6->sin6_addr, rip, INET6_ADDRSTRLEN); + rport = ntohs(sin6->sin6_port); + // info("Attempting to listen on IPv6 '%s' ('%s'), port %d ('%s'), socktype %d", rip, ip, rport, port, socktype); + fd = create_listen_socket6(socktype, scope_id, rip, rport, listen_backlog); + break; + } + + default: + debug(D_LISTENER, "LISTENER: Unknown socket family %d", family); + break; + } + + if (fd == -1) { + error("LISTENER: Cannot bind to ip '%s', port %d", rip, rport); + sockets->failed++; + } + else { + listen_sockets_add(sockets, fd, family, socktype, protocol_str, rip, rport); + added++; + } + } + + freeaddrinfo(result); + + return added; +} + +int listen_sockets_setup(LISTEN_SOCKETS *sockets) { + listen_sockets_init(sockets); + + sockets->backlog = (int) appconfig_get_number(sockets->config, sockets->config_section, "listen backlog", sockets->backlog); + + long long int old_port = sockets->default_port; + long long int new_port = appconfig_get_number(sockets->config, sockets->config_section, "default port", sockets->default_port); + if(new_port < 1 || new_port > 65535) { + error("LISTENER: Invalid listen port %lld given. Defaulting to %lld.", new_port, old_port); + sockets->default_port = (uint16_t) appconfig_set_number(sockets->config, sockets->config_section, "default port", old_port); + } + else sockets->default_port = (uint16_t)new_port; + + debug(D_OPTIONS, "LISTENER: Default listen port set to %d.", sockets->default_port); + + char *s = appconfig_get(sockets->config, sockets->config_section, "bind to", sockets->default_bind_to); + while(*s) { + char *e = s; + + // skip separators, moving both s(tart) and e(nd) + while(isspace(*e) || *e == ',') s = ++e; + + // move e(nd) to the first separator + while(*e && !isspace(*e) && *e != ',') e++; + + // is there anything? + if(!*s || s == e) break; + + char buf[e - s + 1]; + strncpyz(buf, s, e - s); + bind_to_this(sockets, buf, sockets->default_port, sockets->backlog); + + s = e; + } + + if(sockets->failed) { + size_t i; + for(i = 0; i < sockets->opened ;i++) + info("LISTENER: Listen socket %s opened successfully.", sockets->fds_names[i]); + } + + return (int)sockets->opened; +} + + +// -------------------------------------------------------------------------------------------------------------------- +// connect to another host/port + +// connect_to_this_unix() +// path the path of the unix socket +// timeout the timeout for establishing a connection + +static inline int connect_to_unix(const char *path, struct timeval *timeout) { + int fd = socket(AF_UNIX, SOCK_STREAM, 0); + if(fd == -1) { + error("Failed to create UNIX socket() for '%s'", path); + return -1; + } + + if(timeout) { + if(setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, (char *) timeout, sizeof(struct timeval)) < 0) + error("Failed to set timeout on UNIX socket '%s'", path); + } + + struct sockaddr_un addr; + memset(&addr, 0, sizeof(addr)); + addr.sun_family = AF_UNIX; + strncpy(addr.sun_path, path, sizeof(addr.sun_path)-1); + + if (connect(fd, (struct sockaddr*)&addr, sizeof(addr)) == -1) { + error("Cannot connect to UNIX socket on path '%s'.", path); + close(fd); + return -1; + } + + debug(D_CONNECT_TO, "Connected to UNIX socket on path '%s'.", path); + + return fd; +} + +// connect_to_this_ip46() +// protocol IPPROTO_TCP, IPPROTO_UDP +// socktype SOCK_STREAM, SOCK_DGRAM +// host the destination hostname or IP address (IPv4 or IPv6) to connect to +// if it resolves to many IPs, all are tried (IPv4 and IPv6) +// scope_id the if_index id of the interface to use for connecting (0 = any) +// (used only under IPv6) +// service the service name or port to connect to +// timeout the timeout for establishing a connection + +static inline int connect_to_this_ip46(int protocol, int socktype, const char *host, uint32_t scope_id, const char *service, struct timeval *timeout) { + struct addrinfo hints; + struct addrinfo *ai_head = NULL, *ai = NULL; + + memset(&hints, 0, sizeof(hints)); + hints.ai_family = PF_UNSPEC; /* Allow IPv4 or IPv6 */ + hints.ai_socktype = socktype; + hints.ai_protocol = protocol; + + int ai_err = getaddrinfo(host, service, &hints, &ai_head); + if (ai_err != 0) { + error("Cannot resolve host '%s', port '%s': %s", host, service, gai_strerror(ai_err)); + return -1; + } + + int fd = -1; + for (ai = ai_head; ai != NULL && fd == -1; ai = ai->ai_next) { + + if (ai->ai_family == PF_INET6) { + struct sockaddr_in6 *pSadrIn6 = (struct sockaddr_in6 *) ai->ai_addr; + if(pSadrIn6->sin6_scope_id == 0) { + pSadrIn6->sin6_scope_id = scope_id; + } + } + + char hostBfr[NI_MAXHOST + 1]; + char servBfr[NI_MAXSERV + 1]; + + getnameinfo(ai->ai_addr, + ai->ai_addrlen, + hostBfr, + sizeof(hostBfr), + servBfr, + sizeof(servBfr), + NI_NUMERICHOST | NI_NUMERICSERV); + + debug(D_CONNECT_TO, "Address info: host = '%s', service = '%s', ai_flags = 0x%02X, ai_family = %d (PF_INET = %d, PF_INET6 = %d), ai_socktype = %d (SOCK_STREAM = %d, SOCK_DGRAM = %d), ai_protocol = %d (IPPROTO_TCP = %d, IPPROTO_UDP = %d), ai_addrlen = %lu (sockaddr_in = %lu, sockaddr_in6 = %lu)", + hostBfr, + servBfr, + (unsigned int)ai->ai_flags, + ai->ai_family, + PF_INET, + PF_INET6, + ai->ai_socktype, + SOCK_STREAM, + SOCK_DGRAM, + ai->ai_protocol, + IPPROTO_TCP, + IPPROTO_UDP, + (unsigned long)ai->ai_addrlen, + (unsigned long)sizeof(struct sockaddr_in), + (unsigned long)sizeof(struct sockaddr_in6)); + + switch (ai->ai_addr->sa_family) { + case PF_INET: { + struct sockaddr_in *pSadrIn = (struct sockaddr_in *)ai->ai_addr; + (void)pSadrIn; + + debug(D_CONNECT_TO, "ai_addr = sin_family: %d (AF_INET = %d, AF_INET6 = %d), sin_addr: '%s', sin_port: '%s'", + pSadrIn->sin_family, + AF_INET, + AF_INET6, + hostBfr, + servBfr); + break; + } + + case PF_INET6: { + struct sockaddr_in6 *pSadrIn6 = (struct sockaddr_in6 *) ai->ai_addr; + (void)pSadrIn6; + + debug(D_CONNECT_TO,"ai_addr = sin6_family: %d (AF_INET = %d, AF_INET6 = %d), sin6_addr: '%s', sin6_port: '%s', sin6_flowinfo: %u, sin6_scope_id: %u", + pSadrIn6->sin6_family, + AF_INET, + AF_INET6, + hostBfr, + servBfr, + pSadrIn6->sin6_flowinfo, + pSadrIn6->sin6_scope_id); + break; + } + + default: { + debug(D_CONNECT_TO, "Unknown protocol family %d.", ai->ai_family); + continue; + } + } + + fd = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol); + if(fd != -1) { + if(timeout) { + if(setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, (char *) timeout, sizeof(struct timeval)) < 0) + error("Failed to set timeout on the socket to ip '%s' port '%s'", hostBfr, servBfr); + } + + errno = 0; + if(connect(fd, ai->ai_addr, ai->ai_addrlen) < 0) { + if(errno == EALREADY || errno == EINPROGRESS) { + info("Waiting for connection to ip %s port %s to be established", hostBfr, servBfr); + + fd_set fds; + FD_ZERO(&fds); + FD_SET(0, &fds); + int rc = select (1, NULL, &fds, NULL, timeout); + + if(rc > 0 && FD_ISSET(fd, &fds)) { + info("connect() to ip %s port %s completed successfully", hostBfr, servBfr); + } + else if(rc == -1) { + error("Failed to connect to '%s', port '%s'. select() returned %d", hostBfr, servBfr, rc); + close(fd); + fd = -1; + } + else { + error("Timed out while connecting to '%s', port '%s'. select() returned %d", hostBfr, servBfr, rc); + close(fd); + fd = -1; + } + } + else { + error("Failed to connect to '%s', port '%s'", hostBfr, servBfr); + close(fd); + fd = -1; + } + } + + if(fd != -1) + debug(D_CONNECT_TO, "Connected to '%s' on port '%s'.", hostBfr, servBfr); + } + } + + freeaddrinfo(ai_head); + + return fd; +} + +// connect_to_this() +// +// definition format: +// +// [PROTOCOL:]IP[%INTERFACE][:PORT] +// +// PROTOCOL = tcp or udp +// IP = IPv4 or IPv6 IP or hostname, optionally enclosed in [] (required for IPv6) +// INTERFACE = for IPv6 only, the network interface to use +// PORT = port number or service name + +int connect_to_this(const char *definition, int default_port, struct timeval *timeout) { + char buffer[strlen(definition) + 1]; + strcpy(buffer, definition); + + char default_service[10 + 1]; + snprintfz(default_service, 10, "%d", default_port); + + char *host = buffer, *service = default_service, *interface = ""; + int protocol = IPPROTO_TCP, socktype = SOCK_STREAM; + uint32_t scope_id = 0; + + if(strncmp(host, "tcp:", 4) == 0) { + host += 4; + protocol = IPPROTO_TCP; + socktype = SOCK_STREAM; + } + else if(strncmp(host, "udp:", 4) == 0) { + host += 4; + protocol = IPPROTO_UDP; + socktype = SOCK_DGRAM; + } + else if(strncmp(host, "unix:", 5) == 0) { + char *path = host + 5; + return connect_to_unix(path, timeout); + } + + char *e = host; + if(*e == '[') { + e = ++host; + while(*e && *e != ']') e++; + if(*e == ']') { + *e = '\0'; + e++; + } + } + else { + while(*e && *e != ':' && *e != '%') e++; + } + + if(*e == '%') { + *e = '\0'; + e++; + interface = e; + while(*e && *e != ':') e++; + } + + if(*e == ':') { + *e = '\0'; + e++; + service = e; + } + + debug(D_CONNECT_TO, "Attempting connection to host = '%s', service = '%s', interface = '%s', protocol = %d (tcp = %d, udp = %d)", host, service, interface, protocol, IPPROTO_TCP, IPPROTO_UDP); + + if(!*host) { + error("Definition '%s' does not specify a host.", definition); + return -1; + } + + if(*interface) { + scope_id = if_nametoindex(interface); + if(!scope_id) + error("Cannot find a network interface named '%s'. Continuing with limiting the network interface", interface); + } + + if(!*service) + service = default_service; + + + return connect_to_this_ip46(protocol, socktype, host, scope_id, service, timeout); +} + +int connect_to_one_of(const char *destination, int default_port, struct timeval *timeout, size_t *reconnects_counter, char *connected_to, size_t connected_to_size) { + int sock = -1; + + const char *s = destination; + while(*s) { + const char *e = s; + + // skip separators, moving both s(tart) and e(nd) + while(isspace(*e) || *e == ',') s = ++e; + + // move e(nd) to the first separator + while(*e && !isspace(*e) && *e != ',') e++; + + // is there anything? + if(!*s || s == e) break; + + char buf[e - s + 1]; + strncpyz(buf, s, e - s); + if(reconnects_counter) *reconnects_counter += 1; + sock = connect_to_this(buf, default_port, timeout); + if(sock != -1) { + if(connected_to && connected_to_size) { + strncpy(connected_to, buf, connected_to_size); + connected_to[connected_to_size - 1] = '\0'; + } + break; + } + s = e; + } + + return sock; +} + + +// -------------------------------------------------------------------------------------------------------------------- +// helpers to send/receive data in one call, in blocking mode, with a timeout + +ssize_t recv_timeout(int sockfd, void *buf, size_t len, int flags, int timeout) { + for(;;) { + struct pollfd fd = { + .fd = sockfd, + .events = POLLIN, + .revents = 0 + }; + + errno = 0; + int retval = poll(&fd, 1, timeout * 1000); + + if(retval == -1) { + // failed + + if(errno == EINTR || errno == EAGAIN) + continue; + + return -1; + } + + if(!retval) { + // timeout + return 0; + } + + if(fd.events & POLLIN) break; + } + + return recv(sockfd, buf, len, flags); +} + +ssize_t send_timeout(int sockfd, void *buf, size_t len, int flags, int timeout) { + for(;;) { + struct pollfd fd = { + .fd = sockfd, + .events = POLLOUT, + .revents = 0 + }; + + errno = 0; + int retval = poll(&fd, 1, timeout * 1000); + + if(retval == -1) { + // failed + + if(errno == EINTR || errno == EAGAIN) + continue; + + return -1; + } + + if(!retval) { + // timeout + return 0; + } + + if(fd.events & POLLOUT) break; + } + + return send(sockfd, buf, len, flags); +} + + +// -------------------------------------------------------------------------------------------------------------------- +// accept4() replacement for systems that do not have one + +#ifndef HAVE_ACCEPT4 +int accept4(int sock, struct sockaddr *addr, socklen_t *addrlen, int flags) { + int fd = accept(sock, addr, addrlen); + int newflags = 0; + + if (fd < 0) return fd; + + if (flags & SOCK_NONBLOCK) { + newflags |= O_NONBLOCK; + flags &= ~SOCK_NONBLOCK; + } + +#ifdef SOCK_CLOEXEC +#ifdef O_CLOEXEC + if (flags & SOCK_CLOEXEC) { + newflags |= O_CLOEXEC; + flags &= ~SOCK_CLOEXEC; + } +#endif +#endif + + if (flags) { + close(fd); + errno = EINVAL; + return -1; + } + + if (fcntl(fd, F_SETFL, newflags) < 0) { + int saved_errno = errno; + close(fd); + errno = saved_errno; + return -1; + } + + return fd; +} +#endif + + +// -------------------------------------------------------------------------------------------------------------------- +// accept_socket() - accept a socket and store client IP and port + +int accept_socket(int fd, int flags, char *client_ip, size_t ipsize, char *client_port, size_t portsize, SIMPLE_PATTERN *access_list) { + struct sockaddr_storage sadr; + socklen_t addrlen = sizeof(sadr); + + int nfd = accept4(fd, (struct sockaddr *)&sadr, &addrlen, flags); + if (likely(nfd >= 0)) { + if (getnameinfo((struct sockaddr *)&sadr, addrlen, client_ip, (socklen_t)ipsize, client_port, (socklen_t)portsize, NI_NUMERICHOST | NI_NUMERICSERV) != 0) { + error("LISTENER: cannot getnameinfo() on received client connection."); + strncpyz(client_ip, "UNKNOWN", ipsize - 1); + strncpyz(client_port, "UNKNOWN", portsize - 1); + } + + client_ip[ipsize - 1] = '\0'; + client_port[portsize - 1] = '\0'; + + switch (((struct sockaddr *)&sadr)->sa_family) { + case AF_UNIX: + debug(D_LISTENER, "New UNIX domain web client from %s on socket %d.", client_ip, fd); + // set the port - certain versions of libc return garbage on unix sockets + strncpy(client_port, "UNIX", portsize); + client_port[portsize - 1] = '\0'; + break; + + case AF_INET: + debug(D_LISTENER, "New IPv4 web client from %s port %s on socket %d.", client_ip, client_port, fd); + break; + + case AF_INET6: + if (strncmp(client_ip, "::ffff:", 7) == 0) { + memmove(client_ip, &client_ip[7], strlen(&client_ip[7]) + 1); + debug(D_LISTENER, "New IPv4 web client from %s port %s on socket %d.", client_ip, client_port, fd); + } + else + debug(D_LISTENER, "New IPv6 web client from %s port %s on socket %d.", client_ip, client_port, fd); + break; + + default: + debug(D_LISTENER, "New UNKNOWN web client from %s port %s on socket %d.", client_ip, client_port, fd); + break; + } + + if(access_list) { + if(!strcmp(client_ip, "127.0.0.1") || !strcmp(client_ip, "::1")) { + strncpy(client_ip, "localhost", ipsize); + client_ip[ipsize - 1] = '\0'; + } + + if(unlikely(!simple_pattern_matches(access_list, client_ip))) { + errno = 0; + debug(D_LISTENER, "Permission denied for client '%s', port '%s'", client_ip, client_port); + error("DENIED ACCESS to client '%s'", client_ip); + close(nfd); + nfd = -1; + errno = EPERM; + } + } + } +#ifdef HAVE_ACCEPT4 + else if(errno == ENOSYS) + error("netdata has been compiled with the assumption that the system has the accept4() call, but it is not here. Recompile netdata like this: ./configure --disable-accept4 ..."); +#endif + + return nfd; +} + + +// -------------------------------------------------------------------------------------------------------------------- +// poll() based listener +// this should be the fastest possible listener for up to 100 sockets +// above 100, an epoll() interface is needed on Linux + +#define POLL_FDS_INCREASE_STEP 10 + +inline POLLINFO *poll_add_fd(POLLJOB *p + , int fd + , int socktype + , uint32_t flags + , const char *client_ip + , const char *client_port + , void *(*add_callback)(POLLINFO * /*pi*/, short int * /*events*/, void * /*data*/) + , void (*del_callback)(POLLINFO * /*pi*/) + , int (*rcv_callback)(POLLINFO * /*pi*/, short int * /*events*/) + , int (*snd_callback)(POLLINFO * /*pi*/, short int * /*events*/) + , void *data +) { + debug(D_POLLFD, "POLLFD: ADD: request to add fd %d, slots = %zu, used = %zu, min = %zu, max = %zu, next free = %zd", fd, p->slots, p->used, p->min, p->max, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1); + + if(unlikely(fd < 0)) return NULL; + + //if(p->limit && p->used >= p->limit) { + // info("Max sockets limit reached (%zu sockets), dropping connection", p->used); + // close(fd); + // return NULL; + //} + + if(unlikely(!p->first_free)) { + size_t new_slots = p->slots + POLL_FDS_INCREASE_STEP; + debug(D_POLLFD, "POLLFD: ADD: increasing size (current = %zu, new = %zu, used = %zu, min = %zu, max = %zu)", p->slots, new_slots, p->used, p->min, p->max); + + p->fds = reallocz(p->fds, sizeof(struct pollfd) * new_slots); + p->inf = reallocz(p->inf, sizeof(POLLINFO) * new_slots); + + // reset all the newly added slots + ssize_t i; + for(i = new_slots - 1; i >= (ssize_t)p->slots ; i--) { + debug(D_POLLFD, "POLLFD: ADD: resetting new slot %zd", i); + p->fds[i].fd = -1; + p->fds[i].events = 0; + p->fds[i].revents = 0; + + p->inf[i].p = p; + p->inf[i].slot = (size_t)i; + p->inf[i].flags = 0; + p->inf[i].socktype = -1; + p->inf[i].client_ip = NULL; + p->inf[i].client_port = NULL; + p->inf[i].del_callback = p->del_callback; + p->inf[i].rcv_callback = p->rcv_callback; + p->inf[i].snd_callback = p->snd_callback; + p->inf[i].data = NULL; + + // link them so that the first free will be earlier in the array + // (we loop decrementing i) + p->inf[i].next = p->first_free; + p->first_free = &p->inf[i]; + } + + p->slots = new_slots; + } + + POLLINFO *pi = p->first_free; + p->first_free = p->first_free->next; + + debug(D_POLLFD, "POLLFD: ADD: selected slot %zu, next free is %zd", pi->slot, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1); + + struct pollfd *pf = &p->fds[pi->slot]; + pf->fd = fd; + pf->events = POLLIN; + pf->revents = 0; + + pi->fd = fd; + pi->p = p; + pi->socktype = socktype; + pi->flags = flags; + pi->next = NULL; + pi->client_ip = strdupz(client_ip); + pi->client_port = strdupz(client_port); + + pi->del_callback = del_callback; + pi->rcv_callback = rcv_callback; + pi->snd_callback = snd_callback; + + pi->connected_t = now_boottime_sec(); + pi->last_received_t = 0; + pi->last_sent_t = 0; + pi->last_sent_t = 0; + pi->recv_count = 0; + pi->send_count = 0; + + netdata_thread_disable_cancelability(); + p->used++; + if(unlikely(pi->slot > p->max)) + p->max = pi->slot; + + if(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET) { + pi->data = add_callback(pi, &pf->events, data); + } + + if(pi->flags & POLLINFO_FLAG_SERVER_SOCKET) { + p->min = pi->slot; + } + netdata_thread_enable_cancelability(); + + debug(D_POLLFD, "POLLFD: ADD: completed, slots = %zu, used = %zu, min = %zu, max = %zu, next free = %zd", p->slots, p->used, p->min, p->max, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1); + + return pi; +} + +inline void poll_close_fd(POLLINFO *pi) { + POLLJOB *p = pi->p; + + struct pollfd *pf = &p->fds[pi->slot]; + debug(D_POLLFD, "POLLFD: DEL: request to clear slot %zu (fd %d), old next free was %zd", pi->slot, pf->fd, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1); + + if(unlikely(pf->fd == -1)) return; + + netdata_thread_disable_cancelability(); + + if(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET) { + pi->del_callback(pi); + + if(likely(!(pi->flags & POLLINFO_FLAG_DONT_CLOSE))) { + if(close(pf->fd) == -1) + error("Failed to close() poll_events() socket %d", pf->fd); + } + } + + pf->fd = -1; + pf->events = 0; + pf->revents = 0; + + pi->fd = -1; + pi->socktype = -1; + pi->flags = 0; + pi->data = NULL; + + pi->del_callback = NULL; + pi->rcv_callback = NULL; + pi->snd_callback = NULL; + + freez(pi->client_ip); + pi->client_ip = NULL; + + freez(pi->client_port); + pi->client_port = NULL; + + pi->next = p->first_free; + p->first_free = pi; + + p->used--; + if(unlikely(p->max == pi->slot)) { + p->max = p->min; + ssize_t i; + for(i = (ssize_t)pi->slot; i > (ssize_t)p->min ;i--) { + if (unlikely(p->fds[i].fd != -1)) { + p->max = (size_t)i; + break; + } + } + } + netdata_thread_enable_cancelability(); + + debug(D_POLLFD, "POLLFD: DEL: completed, slots = %zu, used = %zu, min = %zu, max = %zu, next free = %zd", p->slots, p->used, p->min, p->max, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1); +} + +void *poll_default_add_callback(POLLINFO *pi, short int *events, void *data) { + (void)pi; + (void)events; + (void)data; + + // error("POLLFD: internal error: poll_default_add_callback() called"); + + return NULL; +} + +void poll_default_del_callback(POLLINFO *pi) { + if(pi->data) + error("POLLFD: internal error: del_callback_default() called with data pointer - possible memory leak"); +} + +int poll_default_rcv_callback(POLLINFO *pi, short int *events) { + *events |= POLLIN; + + char buffer[1024 + 1]; + + ssize_t rc; + do { + rc = recv(pi->fd, buffer, 1024, MSG_DONTWAIT); + if (rc < 0) { + // read failed + if (errno != EWOULDBLOCK && errno != EAGAIN) { + error("POLLFD: poll_default_rcv_callback(): recv() failed with %zd.", rc); + return -1; + } + } else if (rc) { + // data received + info("POLLFD: internal error: poll_default_rcv_callback() is discarding %zd bytes received on socket %d", rc, pi->fd); + } + } while (rc != -1); + + return 0; +} + +int poll_default_snd_callback(POLLINFO *pi, short int *events) { + *events &= ~POLLOUT; + + info("POLLFD: internal error: poll_default_snd_callback(): nothing to send on socket %d", pi->fd); + return 0; +} + +void poll_default_tmr_callback(void *timer_data) { + (void)timer_data; +} + +static void poll_events_cleanup(void *data) { + POLLJOB *p = (POLLJOB *)data; + + size_t i; + for(i = 0 ; i <= p->max ; i++) { + POLLINFO *pi = &p->inf[i]; + poll_close_fd(pi); + } + + freez(p->fds); + freez(p->inf); +} + +static void poll_events_process(POLLJOB *p, POLLINFO *pi, struct pollfd *pf, short int revents, time_t now) { + short int events = pf->events; + int fd = pf->fd; + pf->revents = 0; + size_t i = pi->slot; + + if(unlikely(fd == -1)) { + debug(D_POLLFD, "POLLFD: LISTENER: ignoring slot %zu, it does not have an fd", i); + return; + } + + debug(D_POLLFD, "POLLFD: LISTENER: processing events for slot %zu (events = %d, revents = %d)", i, events, revents); + + if(revents & POLLIN || revents & POLLPRI) { + // receiving data + + pi->last_received_t = now; + pi->recv_count++; + + if(likely(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET)) { + // read data from client TCP socket + debug(D_POLLFD, "POLLFD: LISTENER: reading data from TCP client slot %zu (fd %d)", i, fd); + + pf->events = 0; + if (pi->rcv_callback(pi, &pf->events) == -1) { + poll_close_fd(&p->inf[i]); + return; + } + pf = &p->fds[i]; + pi = &p->inf[i]; + +#ifdef NETDATA_INTERNAL_CHECKS + // this is common - it is used for web server file copies + if(unlikely(!(pf->events & (POLLIN|POLLOUT)))) { + error("POLLFD: LISTENER: after reading, client slot %zu (fd %d) from '%s:%s' was left without expecting input or output. ", i, fd, pi->client_ip?pi->client_ip:"<undefined-ip>", pi->client_port?pi->client_port:"<undefined-port>"); + //poll_close_fd(pi); + //return; + } +#endif + } + else if(likely(pi->flags & POLLINFO_FLAG_SERVER_SOCKET)) { + // new connection + // debug(D_POLLFD, "POLLFD: LISTENER: accepting connections from slot %zu (fd %d)", i, fd); + + switch(pi->socktype) { + case SOCK_STREAM: { + // a TCP socket + // we accept the connection + + int nfd; + do { + char client_ip[NI_MAXHOST + 1]; + char client_port[NI_MAXSERV + 1]; + + debug(D_POLLFD, "POLLFD: LISTENER: calling accept4() slot %zu (fd %d)", i, fd); + nfd = accept_socket(fd, SOCK_NONBLOCK, client_ip, NI_MAXHOST + 1, client_port, NI_MAXSERV + 1, p->access_list); + if (unlikely(nfd < 0)) { + // accept failed + + debug(D_POLLFD, "POLLFD: LISTENER: accept4() slot %zu (fd %d) failed.", i, fd); + + if(unlikely(errno == EMFILE)) { + error("POLLFD: LISTENER: too many open files - sleeping for 1ms - used by this thread %zu, max for this thread %zu", p->used, p->limit); + usleep(1000); // 10ms + } + else if(unlikely(errno != EWOULDBLOCK && errno != EAGAIN)) + error("POLLFD: LISTENER: accept() failed."); + + break; + } + else { + // accept ok + // info("POLLFD: LISTENER: client '[%s]:%s' connected to '%s' on fd %d", client_ip, client_port, sockets->fds_names[i], nfd); + poll_add_fd(p + , nfd + , SOCK_STREAM + , POLLINFO_FLAG_CLIENT_SOCKET + , client_ip + , client_port + , p->add_callback + , p->del_callback + , p->rcv_callback + , p->snd_callback + , NULL + ); + + // it may have reallocated them, so refresh our pointers + pf = &p->fds[i]; + pi = &p->inf[i]; + } + } while (nfd >= 0 && (!p->limit || p->used < p->limit)); + break; + } + + case SOCK_DGRAM: { + // a UDP socket + // we read data from the server socket + + debug(D_POLLFD, "POLLFD: LISTENER: reading data from UDP slot %zu (fd %d)", i, fd); + + // TODO: access_list is not applied to UDP + // but checking the access list on every UDP packet will destroy + // performance, especially for statsd. + + pf->events = 0; + pi->rcv_callback(pi, &pf->events); + break; + } + + default: { + error("POLLFD: LISTENER: Unknown socktype %d on slot %zu", pi->socktype, pi->slot); + break; + } + } + } + } + + if(unlikely(revents & POLLOUT)) { + // sending data + debug(D_POLLFD, "POLLFD: LISTENER: sending data to socket on slot %zu (fd %d)", i, fd); + + pi->last_sent_t = now; + pi->send_count++; + + pf->events = 0; + if (pi->snd_callback(pi, &pf->events) == -1) { + poll_close_fd(&p->inf[i]); + return; + } + pf = &p->fds[i]; + pi = &p->inf[i]; + +#ifdef NETDATA_INTERNAL_CHECKS + // this is common - it is used for streaming + if(unlikely(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET && !(pf->events & (POLLIN|POLLOUT)))) { + error("POLLFD: LISTENER: after sending, client slot %zu (fd %d) from '%s:%s' was left without expecting input or output. ", i, fd, pi->client_ip?pi->client_ip:"<undefined-ip>", pi->client_port?pi->client_port:"<undefined-port>"); + //poll_close_fd(pi); + //return; + } +#endif + } + + if(unlikely(revents & POLLERR)) { + error("POLLFD: LISTENER: processing POLLERR events for slot %zu fd %d (events = %d, revents = %d)", i, events, revents, fd); + pf->events = 0; + poll_close_fd(pi); + return; + } + + if(unlikely(revents & POLLHUP)) { + error("POLLFD: LISTENER: processing POLLHUP events for slot %zu fd %d (events = %d, revents = %d)", i, events, revents, fd); + pf->events = 0; + poll_close_fd(pi); + return; + } + + if(unlikely(revents & POLLNVAL)) { + error("POLLFD: LISTENER: processing POLLNVAL events for slot %zu fd %d (events = %d, revents = %d)", i, events, revents, fd); + pf->events = 0; + poll_close_fd(pi); + return; + } +} + +void poll_events(LISTEN_SOCKETS *sockets + , void *(*add_callback)(POLLINFO * /*pi*/, short int * /*events*/, void * /*data*/) + , void (*del_callback)(POLLINFO * /*pi*/) + , int (*rcv_callback)(POLLINFO * /*pi*/, short int * /*events*/) + , int (*snd_callback)(POLLINFO * /*pi*/, short int * /*events*/) + , void (*tmr_callback)(void * /*timer_data*/) + , SIMPLE_PATTERN *access_list + , void *data + , time_t tcp_request_timeout_seconds + , time_t tcp_idle_timeout_seconds + , time_t timer_milliseconds + , void *timer_data + , size_t max_tcp_sockets +) { + if(!sockets || !sockets->opened) { + error("POLLFD: internal error: no listening sockets are opened"); + return; + } + + if(timer_milliseconds <= 0) timer_milliseconds = 0; + + int retval; + + POLLJOB p = { + .slots = 0, + .used = 0, + .max = 0, + .limit = max_tcp_sockets, + .fds = NULL, + .inf = NULL, + .first_free = NULL, + + .complete_request_timeout = tcp_request_timeout_seconds, + .idle_timeout = tcp_idle_timeout_seconds, + .checks_every = (tcp_idle_timeout_seconds / 3) + 1, + + .access_list = access_list, + + .timer_milliseconds = timer_milliseconds, + .timer_data = timer_data, + + .add_callback = add_callback?add_callback:poll_default_add_callback, + .del_callback = del_callback?del_callback:poll_default_del_callback, + .rcv_callback = rcv_callback?rcv_callback:poll_default_rcv_callback, + .snd_callback = snd_callback?snd_callback:poll_default_snd_callback, + .tmr_callback = tmr_callback?tmr_callback:poll_default_tmr_callback + }; + + size_t i; + for(i = 0; i < sockets->opened ;i++) { + + POLLINFO *pi = poll_add_fd(&p + , sockets->fds[i] + , sockets->fds_types[i] + , POLLINFO_FLAG_SERVER_SOCKET + , (sockets->fds_names[i])?sockets->fds_names[i]:"UNKNOWN" + , "" + , p.add_callback + , p.del_callback + , p.rcv_callback + , p.snd_callback + , NULL + ); + + pi->data = data; + info("POLLFD: LISTENER: listening on '%s'", (sockets->fds_names[i])?sockets->fds_names[i]:"UNKNOWN"); + } + + int listen_sockets_active = 1; + + int timeout_ms = 1000; // in milliseconds + time_t last_check = now_boottime_sec(); + + usec_t timer_usec = timer_milliseconds * USEC_PER_MS; + usec_t now_usec = 0, next_timer_usec = 0, last_timer_usec = 0; + (void)last_timer_usec; + + if(unlikely(timer_usec)) { + now_usec = now_boottime_usec(); + next_timer_usec = now_usec - (now_usec % timer_usec) + timer_usec; + } + + netdata_thread_cleanup_push(poll_events_cleanup, &p); + + while(!netdata_exit) { + if(unlikely(timer_usec)) { + now_usec = now_boottime_usec(); + + if(unlikely(timer_usec && now_usec >= next_timer_usec)) { + debug(D_POLLFD, "Calling timer callback after %zu usec", (size_t)(now_usec - last_timer_usec)); + last_timer_usec = now_usec; + p.tmr_callback(p.timer_data); + now_usec = now_boottime_usec(); + next_timer_usec = now_usec - (now_usec % timer_usec) + timer_usec; + } + + usec_t dt_usec = next_timer_usec - now_usec; + if(dt_usec > 1000 * USEC_PER_MS) + timeout_ms = 1000; + else + timeout_ms = (int)(dt_usec / USEC_PER_MS); + } + + // enable or disable the TCP listening sockets, based on the current number of sockets used and the limit set + if((listen_sockets_active && (p.limit && p.used >= p.limit)) || (!listen_sockets_active && (!p.limit || p.used < p.limit))) { + listen_sockets_active = !listen_sockets_active; + info("%s listening sockets (used TCP sockets %zu, max allowed for this worker %zu)", (listen_sockets_active)?"ENABLING":"DISABLING", p.used, p.limit); + for (i = 0; i <= p.max; i++) { + if(p.inf[i].flags & POLLINFO_FLAG_SERVER_SOCKET && p.inf[i].socktype == SOCK_STREAM) { + p.fds[i].events = (short int) ((listen_sockets_active) ? POLLIN : 0); + } + } + } + + debug(D_POLLFD, "POLLFD: LISTENER: Waiting on %zu sockets for %zu ms...", p.max + 1, (size_t)timeout_ms); + retval = poll(p.fds, p.max + 1, timeout_ms); + time_t now = now_boottime_sec(); + + if(unlikely(retval == -1)) { + error("POLLFD: LISTENER: poll() failed while waiting on %zu sockets.", p.max + 1); + break; + } + else if(unlikely(!retval)) { + debug(D_POLLFD, "POLLFD: LISTENER: poll() timeout."); + } + else { + for (i = 0; i <= p.max; i++) { + struct pollfd *pf = &p.fds[i]; + short int revents = pf->revents; + if (unlikely(revents)) + poll_events_process(&p, &p.inf[i], pf, revents, now); + } + } + + if(unlikely(p.checks_every > 0 && now - last_check > p.checks_every)) { + last_check = now; + + // security checks + for(i = 0; i <= p.max; i++) { + POLLINFO *pi = &p.inf[i]; + + if(likely(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET)) { + if (unlikely(pi->send_count == 0 && p.complete_request_timeout > 0 && (now - pi->connected_t) >= p.complete_request_timeout)) { + info("POLLFD: LISTENER: client slot %zu (fd %d) from '%s:%s' has not sent a complete request in %zu seconds - closing it. " + , i + , pi->fd + , pi->client_ip ? pi->client_ip : "<undefined-ip>" + , pi->client_port ? pi->client_port : "<undefined-port>" + , (size_t) p.complete_request_timeout + ); + poll_close_fd(pi); + } + else if(unlikely(pi->recv_count && p.idle_timeout > 0 && now - ((pi->last_received_t > pi->last_sent_t) ? pi->last_received_t : pi->last_sent_t) >= p.idle_timeout )) { + info("POLLFD: LISTENER: client slot %zu (fd %d) from '%s:%s' is idle for more than %zu seconds - closing it. " + , i + , pi->fd + , pi->client_ip ? pi->client_ip : "<undefined-ip>" + , pi->client_port ? pi->client_port : "<undefined-port>" + , (size_t) p.idle_timeout + ); + poll_close_fd(pi); + } + } + } + } + } + + netdata_thread_cleanup_pop(1); + debug(D_POLLFD, "POLLFD: LISTENER: cleanup completed"); +} diff --git a/libnetdata/socket/socket.h b/libnetdata/socket/socket.h new file mode 100644 index 000000000..f5412b63d --- /dev/null +++ b/libnetdata/socket/socket.h @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SOCKET_H +#define NETDATA_SOCKET_H + +#include "../libnetdata.h" + +#ifndef MAX_LISTEN_FDS +#define MAX_LISTEN_FDS 50 +#endif + +typedef struct listen_sockets { + struct config *config; // the config file to use + const char *config_section; // the netdata configuration section to read settings from + const char *default_bind_to; // the default bind to configuration string + uint16_t default_port; // the default port to use + int backlog; // the default listen backlog to use + + size_t opened; // the number of sockets opened + size_t failed; // the number of sockets attempted to open, but failed + int fds[MAX_LISTEN_FDS]; // the open sockets + char *fds_names[MAX_LISTEN_FDS]; // descriptions for the open sockets + int fds_types[MAX_LISTEN_FDS]; // the socktype for the open sockets (SOCK_STREAM, SOCK_DGRAM) + int fds_families[MAX_LISTEN_FDS]; // the family of the open sockets (AF_UNIX, AF_INET, AF_INET6) +} LISTEN_SOCKETS; + +extern char *strdup_client_description(int family, const char *protocol, const char *ip, uint16_t port); + +extern int listen_sockets_setup(LISTEN_SOCKETS *sockets); +extern void listen_sockets_close(LISTEN_SOCKETS *sockets); + +extern int connect_to_this(const char *definition, int default_port, struct timeval *timeout); +extern int connect_to_one_of(const char *destination, int default_port, struct timeval *timeout, size_t *reconnects_counter, char *connected_to, size_t connected_to_size); + +extern ssize_t recv_timeout(int sockfd, void *buf, size_t len, int flags, int timeout); +extern ssize_t send_timeout(int sockfd, void *buf, size_t len, int flags, int timeout); + +extern int sock_setnonblock(int fd); +extern int sock_delnonblock(int fd); +extern int sock_setreuse(int fd, int reuse); +extern int sock_setreuse_port(int fd, int reuse); +extern int sock_enlarge_in(int fd); +extern int sock_enlarge_out(int fd); + +extern int accept_socket(int fd, int flags, char *client_ip, size_t ipsize, char *client_port, size_t portsize, SIMPLE_PATTERN *access_list); + +#ifndef HAVE_ACCEPT4 +extern int accept4(int sock, struct sockaddr *addr, socklen_t *addrlen, int flags); + +#ifndef SOCK_NONBLOCK +#define SOCK_NONBLOCK 00004000 +#endif /* #ifndef SOCK_NONBLOCK */ + +#ifndef SOCK_CLOEXEC +#define SOCK_CLOEXEC 02000000 +#endif /* #ifndef SOCK_CLOEXEC */ + +#endif /* #ifndef HAVE_ACCEPT4 */ + + +// ---------------------------------------------------------------------------- +// poll() based listener + +#define POLLINFO_FLAG_SERVER_SOCKET 0x00000001 +#define POLLINFO_FLAG_CLIENT_SOCKET 0x00000002 +#define POLLINFO_FLAG_DONT_CLOSE 0x00000004 + +typedef struct poll POLLJOB; + +typedef struct pollinfo { + POLLJOB *p; // the parent + size_t slot; // the slot id + + int fd; // the file descriptor + int socktype; // the client socket type + char *client_ip; // the connected client IP + char *client_port; // the connected client port + + time_t connected_t; // the time the socket connected + time_t last_received_t; // the time the socket last received data + time_t last_sent_t; // the time the socket last sent data + + size_t recv_count; // the number of times the socket was ready for inbound traffic + size_t send_count; // the number of times the socket was ready for outbound traffic + + uint32_t flags; // internal flags + + // callbacks for this socket + void (*del_callback)(struct pollinfo *pi); + int (*rcv_callback)(struct pollinfo *pi, short int *events); + int (*snd_callback)(struct pollinfo *pi, short int *events); + + // the user data + void *data; + + // linking of free pollinfo structures + // for quickly finding the next available + // this is like a stack, it grows and shrinks + // (with gaps - lower empty slots are preferred) + struct pollinfo *next; +} POLLINFO; + +struct poll { + size_t slots; + size_t used; + size_t min; + size_t max; + + size_t limit; + + time_t complete_request_timeout; + time_t idle_timeout; + time_t checks_every; + + time_t timer_milliseconds; + void *timer_data; + + struct pollfd *fds; + struct pollinfo *inf; + struct pollinfo *first_free; + + SIMPLE_PATTERN *access_list; + + void *(*add_callback)(POLLINFO *pi, short int *events, void *data); + void (*del_callback)(POLLINFO *pi); + int (*rcv_callback)(POLLINFO *pi, short int *events); + int (*snd_callback)(POLLINFO *pi, short int *events); + void (*tmr_callback)(void *timer_data); +}; + +#define pollinfo_from_slot(p, slot) (&((p)->inf[(slot)])) + +extern int poll_default_snd_callback(POLLINFO *pi, short int *events); +extern int poll_default_rcv_callback(POLLINFO *pi, short int *events); +extern void poll_default_del_callback(POLLINFO *pi); +extern void *poll_default_add_callback(POLLINFO *pi, short int *events, void *data); + +extern POLLINFO *poll_add_fd(POLLJOB *p + , int fd + , int socktype + , uint32_t flags + , const char *client_ip + , const char *client_port + , void *(*add_callback)(POLLINFO *pi, short int *events, void *data) + , void (*del_callback)(POLLINFO *pi) + , int (*rcv_callback)(POLLINFO *pi, short int *events) + , int (*snd_callback)(POLLINFO *pi, short int *events) + , void *data +); +extern void poll_close_fd(POLLINFO *pi); + +extern void poll_events(LISTEN_SOCKETS *sockets + , void *(*add_callback)(POLLINFO *pi, short int *events, void *data) + , void (*del_callback)(POLLINFO *pi) + , int (*rcv_callback)(POLLINFO *pi, short int *events) + , int (*snd_callback)(POLLINFO *pi, short int *events) + , void (*tmr_callback)(void *timer_data) + , SIMPLE_PATTERN *access_list + , void *data + , time_t tcp_request_timeout_seconds + , time_t tcp_idle_timeout_seconds + , time_t timer_milliseconds + , void *timer_data + , size_t max_tcp_sockets +); + +#endif //NETDATA_SOCKET_H diff --git a/libnetdata/statistical/Makefile.am b/libnetdata/statistical/Makefile.am new file mode 100644 index 000000000..1cb69ed99 --- /dev/null +++ b/libnetdata/statistical/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/statistical/Makefile.in b/libnetdata/statistical/Makefile.in new file mode 100644 index 000000000..8209a1ecb --- /dev/null +++ b/libnetdata/statistical/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = libnetdata/statistical +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/statistical/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu libnetdata/statistical/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/libnetdata/statistical/README.md b/libnetdata/statistical/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/libnetdata/statistical/statistical.c b/libnetdata/statistical/statistical.c new file mode 100644 index 000000000..15792fd15 --- /dev/null +++ b/libnetdata/statistical/statistical.c @@ -0,0 +1,452 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +LONG_DOUBLE default_single_exponential_smoothing_alpha = 0.1; + +void log_series_to_stderr(LONG_DOUBLE *series, size_t entries, calculated_number result, const char *msg) { + const LONG_DOUBLE *value, *end = &series[entries]; + + fprintf(stderr, "%s of %zu entries [ ", msg, entries); + for(value = series; value < end ;value++) { + if(value != series) fprintf(stderr, ", "); + fprintf(stderr, "%" LONG_DOUBLE_MODIFIER, *value); + } + fprintf(stderr, " ] results in " CALCULATED_NUMBER_FORMAT "\n", result); +} + +// -------------------------------------------------------------------------------------------------------------------- + +inline LONG_DOUBLE sum_and_count(const LONG_DOUBLE *series, size_t entries, size_t *count) { + const LONG_DOUBLE *value, *end = &series[entries]; + LONG_DOUBLE sum = 0; + size_t c = 0; + + for(value = series; value < end ; value++) { + if(isnormal(*value)) { + sum += *value; + c++; + } + } + + if(unlikely(!c)) sum = NAN; + if(likely(count)) *count = c; + + return sum; +} + +inline LONG_DOUBLE sum(const LONG_DOUBLE *series, size_t entries) { + return sum_and_count(series, entries, NULL); +} + +inline LONG_DOUBLE average(const LONG_DOUBLE *series, size_t entries) { + size_t count = 0; + LONG_DOUBLE sum = sum_and_count(series, entries, &count); + + if(unlikely(!count)) return NAN; + return sum / (LONG_DOUBLE)count; +} + +// -------------------------------------------------------------------------------------------------------------------- + +LONG_DOUBLE moving_average(const LONG_DOUBLE *series, size_t entries, size_t period) { + if(unlikely(period <= 0)) + return 0.0; + + size_t i, count; + LONG_DOUBLE sum = 0, avg = 0; + LONG_DOUBLE p[period]; + + for(count = 0; count < period ; count++) + p[count] = 0.0; + + for(i = 0, count = 0; i < entries; i++) { + LONG_DOUBLE value = series[i]; + if(unlikely(!isnormal(value))) continue; + + if(unlikely(count < period)) { + sum += value; + avg = (count == period - 1) ? sum / (LONG_DOUBLE)period : 0; + } + else { + sum = sum - p[count % period] + value; + avg = sum / (LONG_DOUBLE)period; + } + + p[count % period] = value; + count++; + } + + return avg; +} + +// -------------------------------------------------------------------------------------------------------------------- + +static int qsort_compare(const void *a, const void *b) { + LONG_DOUBLE *p1 = (LONG_DOUBLE *)a, *p2 = (LONG_DOUBLE *)b; + LONG_DOUBLE n1 = *p1, n2 = *p2; + + if(unlikely(isnan(n1) || isnan(n2))) { + if(isnan(n1) && !isnan(n2)) return -1; + if(!isnan(n1) && isnan(n2)) return 1; + return 0; + } + if(unlikely(isinf(n1) || isinf(n2))) { + if(!isinf(n1) && isinf(n2)) return -1; + if(isinf(n1) && !isinf(n2)) return 1; + return 0; + } + + if(unlikely(n1 < n2)) return -1; + if(unlikely(n1 > n2)) return 1; + return 0; +} + +inline void sort_series(LONG_DOUBLE *series, size_t entries) { + qsort(series, entries, sizeof(LONG_DOUBLE), qsort_compare); +} + +inline LONG_DOUBLE *copy_series(const LONG_DOUBLE *series, size_t entries) { + LONG_DOUBLE *copy = mallocz(sizeof(LONG_DOUBLE) * entries); + memcpy(copy, series, sizeof(LONG_DOUBLE) * entries); + return copy; +} + +LONG_DOUBLE median_on_sorted_series(const LONG_DOUBLE *series, size_t entries) { + if(unlikely(entries == 0)) return NAN; + if(unlikely(entries == 1)) return series[0]; + if(unlikely(entries == 2)) return (series[0] + series[1]) / 2; + + LONG_DOUBLE average; + if(entries % 2 == 0) { + size_t m = entries / 2; + average = (series[m] + series[m + 1]) / 2; + } + else { + average = series[entries / 2]; + } + + return average; +} + +LONG_DOUBLE median(const LONG_DOUBLE *series, size_t entries) { + if(unlikely(entries == 0)) return NAN; + if(unlikely(entries == 1)) return series[0]; + + if(unlikely(entries == 2)) + return (series[0] + series[1]) / 2; + + LONG_DOUBLE *copy = copy_series(series, entries); + sort_series(copy, entries); + + LONG_DOUBLE avg = median_on_sorted_series(copy, entries); + + freez(copy); + return avg; +} + +// -------------------------------------------------------------------------------------------------------------------- + +LONG_DOUBLE moving_median(const LONG_DOUBLE *series, size_t entries, size_t period) { + if(entries <= period) + return median(series, entries); + + LONG_DOUBLE *data = copy_series(series, entries); + + size_t i; + for(i = period; i < entries; i++) { + data[i - period] = median(&series[i - period], period); + } + + LONG_DOUBLE avg = median(data, entries - period); + freez(data); + return avg; +} + +// -------------------------------------------------------------------------------------------------------------------- + +// http://stackoverflow.com/a/15150143/4525767 +LONG_DOUBLE running_median_estimate(const LONG_DOUBLE *series, size_t entries) { + LONG_DOUBLE median = 0.0f; + LONG_DOUBLE average = 0.0f; + size_t i; + + for(i = 0; i < entries ; i++) { + LONG_DOUBLE value = series[i]; + if(unlikely(!isnormal(value))) continue; + + average += ( value - average ) * 0.1f; // rough running average. + median += copysignl( average * 0.01, value - median ); + } + + return median; +} + +// -------------------------------------------------------------------------------------------------------------------- + +LONG_DOUBLE standard_deviation(const LONG_DOUBLE *series, size_t entries) { + if(unlikely(entries == 0)) return NAN; + if(unlikely(entries == 1)) return series[0]; + + const LONG_DOUBLE *value, *end = &series[entries]; + size_t count; + LONG_DOUBLE sum; + + for(count = 0, sum = 0, value = series ; value < end ;value++) { + if(likely(isnormal(*value))) { + count++; + sum += *value; + } + } + + if(unlikely(count == 0)) return NAN; + if(unlikely(count == 1)) return sum; + + LONG_DOUBLE average = sum / (LONG_DOUBLE)count; + + for(count = 0, sum = 0, value = series ; value < end ;value++) { + if(isnormal(*value)) { + count++; + sum += powl(*value - average, 2); + } + } + + if(unlikely(count == 0)) return NAN; + if(unlikely(count == 1)) return average; + + LONG_DOUBLE variance = sum / (LONG_DOUBLE)(count); // remove -1 from count to have a population stddev + LONG_DOUBLE stddev = sqrtl(variance); + return stddev; +} + +// -------------------------------------------------------------------------------------------------------------------- + +LONG_DOUBLE single_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha) { + if(unlikely(entries == 0)) + return NAN; + + if(unlikely(isnan(alpha))) + alpha = default_single_exponential_smoothing_alpha; + + const LONG_DOUBLE *value = series, *end = &series[entries]; + LONG_DOUBLE level = (1.0 - alpha) * (*value); + + for(value++ ; value < end; value++) { + if(likely(isnormal(*value))) + level = alpha * (*value) + (1.0 - alpha) * level; + } + + return level; +} + +LONG_DOUBLE single_exponential_smoothing_reverse(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha) { + if(unlikely(entries == 0)) + return NAN; + + if(unlikely(isnan(alpha))) + alpha = default_single_exponential_smoothing_alpha; + + const LONG_DOUBLE *value = &series[entries -1]; + LONG_DOUBLE level = (1.0 - alpha) * (*value); + + for(value++ ; value >= series; value--) { + if(likely(isnormal(*value))) + level = alpha * (*value) + (1.0 - alpha) * level; + } + + return level; +} + +// -------------------------------------------------------------------------------------------------------------------- + +// http://grisha.org/blog/2016/02/16/triple-exponential-smoothing-forecasting-part-ii/ +LONG_DOUBLE double_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE *forecast) { + if(unlikely(entries == 0)) + return NAN; + + LONG_DOUBLE level, trend; + + if(unlikely(isnan(alpha))) + alpha = 0.3; + + if(unlikely(isnan(beta))) + beta = 0.05; + + level = series[0]; + + if(likely(entries > 1)) + trend = series[1] - series[0]; + else + trend = 0; + + const LONG_DOUBLE *value = series; + for(value++ ; value >= series; value--) { + if(likely(isnormal(*value))) { + + LONG_DOUBLE last_level = level; + level = alpha * *value + (1.0 - alpha) * (level + trend); + trend = beta * (level - last_level) + (1.0 - beta) * trend; + + } + } + + if(forecast) + *forecast = level + trend; + + return level; +} + +// -------------------------------------------------------------------------------------------------------------------- + +/* + * Based on th R implementation + * + * a: level component + * b: trend component + * s: seasonal component + * + * Additive: + * + * Yhat[t+h] = a[t] + h * b[t] + s[t + 1 + (h - 1) mod p], + * a[t] = α (Y[t] - s[t-p]) + (1-α) (a[t-1] + b[t-1]) + * b[t] = β (a[t] - a[t-1]) + (1-β) b[t-1] + * s[t] = γ (Y[t] - a[t]) + (1-γ) s[t-p] + * + * Multiplicative: + * + * Yhat[t+h] = (a[t] + h * b[t]) * s[t + 1 + (h - 1) mod p], + * a[t] = α (Y[t] / s[t-p]) + (1-α) (a[t-1] + b[t-1]) + * b[t] = β (a[t] - a[t-1]) + (1-β) b[t-1] + * s[t] = γ (Y[t] / a[t]) + (1-γ) s[t-p] + */ +static int __HoltWinters( + const LONG_DOUBLE *series, + int entries, // start_time + h + + LONG_DOUBLE alpha, // alpha parameter of Holt-Winters Filter. + LONG_DOUBLE beta, // beta parameter of Holt-Winters Filter. If set to 0, the function will do exponential smoothing. + LONG_DOUBLE gamma, // gamma parameter used for the seasonal component. If set to 0, an non-seasonal model is fitted. + + const int *seasonal, + const int *period, + const LONG_DOUBLE *a, // Start value for level (a[0]). + const LONG_DOUBLE *b, // Start value for trend (b[0]). + LONG_DOUBLE *s, // Vector of start values for the seasonal component (s_1[0] ... s_p[0]) + + /* return values */ + LONG_DOUBLE *SSE, // The final sum of squared errors achieved in optimizing + LONG_DOUBLE *level, // Estimated values for the level component (size entries - t + 2) + LONG_DOUBLE *trend, // Estimated values for the trend component (size entries - t + 2) + LONG_DOUBLE *season // Estimated values for the seasonal component (size entries - t + 2) +) +{ + if(unlikely(entries < 4)) + return 0; + + int start_time = 2; + + LONG_DOUBLE res = 0, xhat = 0, stmp = 0; + int i, i0, s0; + + /* copy start values to the beginning of the vectors */ + level[0] = *a; + if(beta > 0) trend[0] = *b; + if(gamma > 0) memcpy(season, s, *period * sizeof(LONG_DOUBLE)); + + for(i = start_time - 1; i < entries; i++) { + /* indices for period i */ + i0 = i - start_time + 2; + s0 = i0 + *period - 1; + + /* forecast *for* period i */ + xhat = level[i0 - 1] + (beta > 0 ? trend[i0 - 1] : 0); + stmp = gamma > 0 ? season[s0 - *period] : (*seasonal != 1); + if (*seasonal == 1) + xhat += stmp; + else + xhat *= stmp; + + /* Sum of Squared Errors */ + res = series[i] - xhat; + *SSE += res * res; + + /* estimate of level *in* period i */ + if (*seasonal == 1) + level[i0] = alpha * (series[i] - stmp) + + (1 - alpha) * (level[i0 - 1] + trend[i0 - 1]); + else + level[i0] = alpha * (series[i] / stmp) + + (1 - alpha) * (level[i0 - 1] + trend[i0 - 1]); + + /* estimate of trend *in* period i */ + if (beta > 0) + trend[i0] = beta * (level[i0] - level[i0 - 1]) + + (1 - beta) * trend[i0 - 1]; + + /* estimate of seasonal component *in* period i */ + if (gamma > 0) { + if (*seasonal == 1) + season[s0] = gamma * (series[i] - level[i0]) + + (1 - gamma) * stmp; + else + season[s0] = gamma * (series[i] / level[i0]) + + (1 - gamma) * stmp; + } + } + + return 1; +} + +LONG_DOUBLE holtwinters(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE gamma, LONG_DOUBLE *forecast) { + if(unlikely(isnan(alpha))) + alpha = 0.3; + + if(unlikely(isnan(beta))) + beta = 0.05; + + if(unlikely(isnan(gamma))) + gamma = 0; + + int seasonal = 0; + int period = 0; + LONG_DOUBLE a0 = series[0]; + LONG_DOUBLE b0 = 0; + LONG_DOUBLE s[] = {}; + + LONG_DOUBLE errors = 0.0; + size_t nb_computations = entries; + LONG_DOUBLE *estimated_level = callocz(nb_computations, sizeof(LONG_DOUBLE)); + LONG_DOUBLE *estimated_trend = callocz(nb_computations, sizeof(LONG_DOUBLE)); + LONG_DOUBLE *estimated_season = callocz(nb_computations, sizeof(LONG_DOUBLE)); + + int ret = __HoltWinters( + series, + (int)entries, + alpha, + beta, + gamma, + &seasonal, + &period, + &a0, + &b0, + s, + &errors, + estimated_level, + estimated_trend, + estimated_season + ); + + LONG_DOUBLE value = estimated_level[nb_computations - 1]; + + if(forecast) + *forecast = 0.0; + + freez(estimated_level); + freez(estimated_trend); + freez(estimated_season); + + if(!ret) + return 0.0; + + return value; +} diff --git a/libnetdata/statistical/statistical.h b/libnetdata/statistical/statistical.h new file mode 100644 index 000000000..586c6b34a --- /dev/null +++ b/libnetdata/statistical/statistical.h @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_STATISTICAL_H +#define NETDATA_STATISTICAL_H 1 + +#include "../libnetdata.h" + +#ifndef isnormal +#define isnormal(x) (fpclassify(x) == FP_NORMAL) +#endif + +extern void log_series_to_stderr(LONG_DOUBLE *series, size_t entries, calculated_number result, const char *msg); + +extern LONG_DOUBLE average(const LONG_DOUBLE *series, size_t entries); +extern LONG_DOUBLE moving_average(const LONG_DOUBLE *series, size_t entries, size_t period); +extern LONG_DOUBLE median(const LONG_DOUBLE *series, size_t entries); +extern LONG_DOUBLE moving_median(const LONG_DOUBLE *series, size_t entries, size_t period); +extern LONG_DOUBLE running_median_estimate(const LONG_DOUBLE *series, size_t entries); +extern LONG_DOUBLE standard_deviation(const LONG_DOUBLE *series, size_t entries); +extern LONG_DOUBLE single_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha); +extern LONG_DOUBLE single_exponential_smoothing_reverse(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha); +extern LONG_DOUBLE double_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE *forecast); +extern LONG_DOUBLE holtwinters(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE gamma, LONG_DOUBLE *forecast); +extern LONG_DOUBLE sum_and_count(const LONG_DOUBLE *series, size_t entries, size_t *count); +extern LONG_DOUBLE sum(const LONG_DOUBLE *series, size_t entries); +extern LONG_DOUBLE median_on_sorted_series(const LONG_DOUBLE *series, size_t entries); +extern LONG_DOUBLE *copy_series(const LONG_DOUBLE *series, size_t entries); +extern void sort_series(LONG_DOUBLE *series, size_t entries); + +#endif //NETDATA_STATISTICAL_H diff --git a/libnetdata/storage_number/Makefile.am b/libnetdata/storage_number/Makefile.am new file mode 100644 index 000000000..1cb69ed99 --- /dev/null +++ b/libnetdata/storage_number/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/storage_number/Makefile.in b/libnetdata/storage_number/Makefile.in new file mode 100644 index 000000000..1ab5afd87 --- /dev/null +++ b/libnetdata/storage_number/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = libnetdata/storage_number +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/storage_number/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu libnetdata/storage_number/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/libnetdata/storage_number/README.md b/libnetdata/storage_number/README.md new file mode 100644 index 000000000..206c42b87 --- /dev/null +++ b/libnetdata/storage_number/README.md @@ -0,0 +1,10 @@ +# netdata storage number + +Although `netdata` does all its calculations using `long double`, it stores all values using +a **custom-made 32-bit number**. + +This custom-made number can store in 29 bits values from `-167772150000000.0` to `167772150000000.0` +with a precision of 0.00001 (yes, it's a floating point number, meaning that higher integer values +have less decimal precision) and 3 bits for flags. + +This provides an extremely optimized memory footprint with just 0.0001% max accuracy loss. diff --git a/libnetdata/storage_number/storage_number.c b/libnetdata/storage_number/storage_number.c new file mode 100644 index 000000000..db4cb700b --- /dev/null +++ b/libnetdata/storage_number/storage_number.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +storage_number pack_storage_number(calculated_number value, uint32_t flags) +{ + // bit 32 = sign 0:positive, 1:negative + // bit 31 = 0:divide, 1:multiply + // bit 30, 29, 28 = (multiplier or divider) 0-7 (8 total) + // bit 27, 26, 25 flags + // bit 24 to bit 1 = the value + + storage_number r = get_storage_number_flags(flags); + if(!value) return r; + + int m = 0; + calculated_number n = value; + + // if the value is negative + // add the sign bit and make it positive + if(n < 0) { + r += (1 << 31); // the sign bit 32 + n = -n; + } + + // make its integer part fit in 0x00ffffff + // by dividing it by 10 up to 7 times + // and increasing the multiplier + while(m < 7 && n > (calculated_number)0x00ffffff) { + n /= 10; + m++; + } + + if(m) { + // the value was too big and we divided it + // so we add a multiplier to unpack it + r += (1 << 30) + (m << 27); // the multiplier m + + if(n > (calculated_number)0x00ffffff) { + #ifdef NETDATA_INTERNAL_CHECKS + error("Number " CALCULATED_NUMBER_FORMAT " is too big.", value); + #endif + r += 0x00ffffff; + return r; + } + } + else { + // 0x0019999e is the number that can be multiplied + // by 10 to give 0x00ffffff + // while the value is below 0x0019999e we can + // multiply it by 10, up to 7 times, increasing + // the multiplier + while(m < 7 && n < (calculated_number)0x0019999e) { + n *= 10; + m++; + } + + // the value was small enough and we multiplied it + // so we add a divider to unpack it + r += (0 << 30) + (m << 27); // the divider m + } + +#ifdef STORAGE_WITH_MATH + // without this there are rounding problems + // example: 0.9 becomes 0.89 + r += lrint((double) n); +#else + r += (storage_number)n; +#endif + + return r; +} + +calculated_number unpack_storage_number(storage_number value) +{ + if(!value) return 0; + + int sign = 0, exp = 0; + + value ^= get_storage_number_flags(value); + + if(value & (1 << 31)) { + sign = 1; + value ^= 1 << 31; + } + + if(value & (1 << 30)) { + exp = 1; + value ^= 1 << 30; + } + + int mul = value >> 27; + value ^= mul << 27; + + calculated_number n = value; + + // fprintf(stderr, "UNPACK: %08X, sign = %d, exp = %d, mul = %d, n = " CALCULATED_NUMBER_FORMAT "\n", value, sign, exp, mul, n); + + while(mul > 0) { + if(exp) n *= 10; + else n /= 10; + mul--; + } + + if(sign) n = -n; + return n; +} + +/* +int print_calculated_number(char *str, calculated_number value) +{ + char *wstr = str; + + int sign = (value < 0) ? 1 : 0; + if(sign) value = -value; + +#ifdef STORAGE_WITH_MATH + // without llrintl() there are rounding problems + // for example 0.9 becomes 0.89 + unsigned long long uvalue = (unsigned long long int) llrintl(value * (calculated_number)100000); +#else + unsigned long long uvalue = value * (calculated_number)100000; +#endif + + wstr = print_number_llu_r_smart(str, uvalue); + + // make sure we have 6 bytes at least + while((wstr - str) < 6) *wstr++ = '0'; + + // put the sign back + if(sign) *wstr++ = '-'; + + // reverse it + char *begin = str, *end = --wstr, aux; + while (end > begin) aux = *end, *end-- = *begin, *begin++ = aux; + // wstr--; + // strreverse(str, wstr); + + // remove trailing zeros + int decimal = 5; + while(decimal > 0 && *wstr == '0') { + *wstr-- = '\0'; + decimal--; + } + + // terminate it, one position to the right + // to let space for a dot + wstr[2] = '\0'; + + // make space for the dot + int i; + for(i = 0; i < decimal ;i++) { + wstr[1] = wstr[0]; + wstr--; + } + + // put the dot + if(wstr[2] == '\0') { wstr[1] = '\0'; decimal--; } + else wstr[1] = '.'; + + // return the buffer length + return (int) ((wstr - str) + 2 + decimal ); +} +*/ + +int print_calculated_number(char *str, calculated_number value) { + // info("printing number " CALCULATED_NUMBER_FORMAT, value); + char integral_str[50], fractional_str[50]; + + char *wstr = str; + + if(unlikely(value < 0)) { + *wstr++ = '-'; + value = -value; + } + + calculated_number integral, fractional; + +#ifdef STORAGE_WITH_MATH + fractional = calculated_number_modf(value, &integral) * 10000000.0; +#else + fractional = ((unsigned long long)(value * 10000000ULL) % 10000000ULL); +#endif + + unsigned long long integral_int = (unsigned long long)integral; + unsigned long long fractional_int = (unsigned long long)calculated_number_llrint(fractional); + if(unlikely(fractional_int >= 10000000)) { + integral_int += 1; + fractional_int -= 10000000; + } + + // info("integral " CALCULATED_NUMBER_FORMAT " (%llu), fractional " CALCULATED_NUMBER_FORMAT " (%llu)", integral, integral_int, fractional, fractional_int); + + char *istre; + if(unlikely(integral_int == 0)) { + integral_str[0] = '0'; + istre = &integral_str[1]; + } + else + // convert the integral part to string (reversed) + istre = print_number_llu_r_smart(integral_str, integral_int); + + // copy reversed the integral string + istre--; + while( istre >= integral_str ) *wstr++ = *istre--; + + if(likely(fractional_int != 0)) { + // add a dot + *wstr++ = '.'; + + // convert the fractional part to string (reversed) + char *fstre = print_number_llu_r_smart(fractional_str, fractional_int); + + // prepend zeros to reach 7 digits length + int decimal = 7; + int len = (int)(fstre - fractional_str); + while(len < decimal) { + *wstr++ = '0'; + len++; + } + + char *begin = fractional_str; + while(begin < fstre && *begin == '0') begin++; + + // copy reversed the fractional string + fstre--; + while( fstre >= begin ) *wstr++ = *fstre--; + } + + *wstr = '\0'; + // info("printed number '%s'", str); + return (int)(wstr - str); +} diff --git a/libnetdata/storage_number/storage_number.h b/libnetdata/storage_number/storage_number.h new file mode 100644 index 000000000..5353ab60b --- /dev/null +++ b/libnetdata/storage_number/storage_number.h @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_STORAGE_NUMBER_H +#define NETDATA_STORAGE_NUMBER_H 1 + +#include "../libnetdata.h" + +#ifdef NETDATA_WITHOUT_LONG_DOUBLE + +#define powl pow +#define modfl modf +#define llrintl llrint +#define roundl round +#define sqrtl sqrt +#define copysignl copysign +#define strtold strtod + +typedef double calculated_number; +#define CALCULATED_NUMBER_FORMAT "%0.7f" +#define CALCULATED_NUMBER_FORMAT_ZERO "%0.0f" +#define CALCULATED_NUMBER_FORMAT_AUTO "%f" + +#define LONG_DOUBLE_MODIFIER "f" +typedef double LONG_DOUBLE; + +#else + +typedef long double calculated_number; +#define CALCULATED_NUMBER_FORMAT "%0.7Lf" +#define CALCULATED_NUMBER_FORMAT_ZERO "%0.0Lf" +#define CALCULATED_NUMBER_FORMAT_AUTO "%Lf" + +#define LONG_DOUBLE_MODIFIER "Lf" +typedef long double LONG_DOUBLE; + +#endif + +//typedef long long calculated_number; +//#define CALCULATED_NUMBER_FORMAT "%lld" + +typedef long long collected_number; +#define COLLECTED_NUMBER_FORMAT "%lld" + +/* +typedef long double collected_number; +#define COLLECTED_NUMBER_FORMAT "%0.7Lf" +*/ + +#define calculated_number_modf(x, y) modfl(x, y) +#define calculated_number_llrint(x) llrintl(x) +#define calculated_number_round(x) roundl(x) +#define calculated_number_fabs(x) fabsl(x) +#define calculated_number_epsilon (calculated_number)0.0000001 + +#define calculated_number_equal(a, b) (calculated_number_fabs((a) - (b)) < calculated_number_epsilon) + +typedef uint32_t storage_number; +#define STORAGE_NUMBER_FORMAT "%u" + +#define SN_NOT_EXISTS (0x0 << 24) +#define SN_EXISTS (0x1 << 24) +#define SN_EXISTS_RESET (0x2 << 24) +#define SN_EXISTS_UNDEF1 (0x3 << 24) +#define SN_EXISTS_UNDEF2 (0x4 << 24) +#define SN_EXISTS_UNDEF3 (0x5 << 24) +#define SN_EXISTS_UNDEF4 (0x6 << 24) + +#define SN_FLAGS_MASK (~(0x6 << 24)) + +// extract the flags +#define get_storage_number_flags(value) ((((storage_number)(value)) & (1 << 24)) | (((storage_number)(value)) & (2 << 24)) | (((storage_number)(value)) & (4 << 24))) +#define SN_EMPTY_SLOT 0x00000000 + +// checks +#define does_storage_number_exist(value) ((get_storage_number_flags(value) != 0)?1:0) +#define did_storage_number_reset(value) ((get_storage_number_flags(value) == SN_EXISTS_RESET)?1:0) + +storage_number pack_storage_number(calculated_number value, uint32_t flags); +calculated_number unpack_storage_number(storage_number value); + +int print_calculated_number(char *str, calculated_number value); + +#define STORAGE_NUMBER_POSITIVE_MAX (167772150000000.0) +#define STORAGE_NUMBER_POSITIVE_MIN (0.0000001) +#define STORAGE_NUMBER_NEGATIVE_MAX (-0.0000001) +#define STORAGE_NUMBER_NEGATIVE_MIN (-167772150000000.0) + +// accepted accuracy loss +#define ACCURACY_LOSS 0.0001 +#define accuracy_loss(t1, t2) (((t1) == (t2) || (t1) == 0.0 || (t2) == 0.0) ? 0.0 : (100.0 - (((t1) > (t2)) ? ((t2) * 100.0 / (t1) ) : ((t1) * 100.0 / (t2))))) + +#endif /* NETDATA_STORAGE_NUMBER_H */ diff --git a/libnetdata/threads/Makefile.am b/libnetdata/threads/Makefile.am new file mode 100644 index 000000000..1cb69ed99 --- /dev/null +++ b/libnetdata/threads/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/threads/Makefile.in b/libnetdata/threads/Makefile.in new file mode 100644 index 000000000..0a4460bcc --- /dev/null +++ b/libnetdata/threads/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = libnetdata/threads +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/threads/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu libnetdata/threads/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/libnetdata/threads/README.md b/libnetdata/threads/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/libnetdata/threads/threads.c b/libnetdata/threads/threads.c new file mode 100644 index 000000000..133d9a547 --- /dev/null +++ b/libnetdata/threads/threads.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +static size_t default_stacksize = 0, wanted_stacksize = 0; +static pthread_attr_t *attr = NULL; + +// ---------------------------------------------------------------------------- +// per thread data + +typedef struct { + void *arg; + pthread_t *thread; + const char *tag; + void *(*start_routine) (void *); + NETDATA_THREAD_OPTIONS options; +} NETDATA_THREAD; + +static __thread NETDATA_THREAD *netdata_thread = NULL; + +const char *netdata_thread_tag(void) { + return ((netdata_thread && netdata_thread->tag && *netdata_thread->tag)?netdata_thread->tag:"MAIN"); +} + +// ---------------------------------------------------------------------------- +// compatibility library functions + +pid_t gettid(void) { +#ifdef __FreeBSD__ + + return (pid_t)pthread_getthreadid_np(); + +#elif defined(__APPLE__) + + #if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1060) + uint64_t curthreadid; + pthread_threadid_np(NULL, &curthreadid); + return (pid_t)curthreadid; + #else /* __MAC_OS_X_VERSION_MIN_REQUIRED */ + return (pid_t)pthread_self; + #endif /* __MAC_OS_X_VERSION_MIN_REQUIRED */ + +#else /* __APPLE__*/ + + return (pid_t)syscall(SYS_gettid); + +#endif /* __FreeBSD__, __APPLE__*/ +} + +// ---------------------------------------------------------------------------- +// early initialization + +size_t netdata_threads_init(void) { + int i; + + // -------------------------------------------------------------------- + // get the required stack size of the threads of netdata + + attr = callocz(1, sizeof(pthread_attr_t)); + i = pthread_attr_init(attr); + if(i != 0) + fatal("pthread_attr_init() failed with code %d.", i); + + i = pthread_attr_getstacksize(attr, &default_stacksize); + if(i != 0) + fatal("pthread_attr_getstacksize() failed with code %d.", i); + else + debug(D_OPTIONS, "initial pthread stack size is %zu bytes", default_stacksize); + + return default_stacksize; +} + +// ---------------------------------------------------------------------------- +// late initialization + +void netdata_threads_init_after_fork(size_t stacksize) { + wanted_stacksize = stacksize; + int i; + + // ------------------------------------------------------------------------ + // set default pthread stack size + + if(attr && default_stacksize < wanted_stacksize && wanted_stacksize > 0) { + i = pthread_attr_setstacksize(attr, wanted_stacksize); + if(i != 0) + fatal("pthread_attr_setstacksize() to %zu bytes, failed with code %d.", wanted_stacksize, i); + else + debug(D_SYSTEM, "Successfully set pthread stacksize to %zu bytes", wanted_stacksize); + } +} + + +// ---------------------------------------------------------------------------- +// netdata_thread_create + +static void thread_cleanup(void *ptr) { + if(netdata_thread != ptr) { + NETDATA_THREAD *info = (NETDATA_THREAD *)ptr; + error("THREADS: internal error - thread local variable does not match the one passed to this function. Expected thread '%s', passed thread '%s'", netdata_thread->tag, info->tag); + } + + if(!(netdata_thread->options & NETDATA_THREAD_OPTION_DONT_LOG_CLEANUP)) + info("thread with task id %d finished", gettid()); + + freez((void *)netdata_thread->tag); + netdata_thread->tag = NULL; + + freez(netdata_thread); + netdata_thread = NULL; +} + +static void *thread_start(void *ptr) { + netdata_thread = (NETDATA_THREAD *)ptr; + + if(!(netdata_thread->options & NETDATA_THREAD_OPTION_DONT_LOG_STARTUP)) + info("thread created with task id %d", gettid()); + + if(pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL) != 0) + error("cannot set pthread cancel type to DEFERRED."); + + if(pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0) + error("cannot set pthread cancel state to ENABLE."); + + void *ret = NULL; + pthread_cleanup_push(thread_cleanup, ptr); + ret = netdata_thread->start_routine(netdata_thread->arg); + pthread_cleanup_pop(1); + + return ret; +} + +int netdata_thread_create(netdata_thread_t *thread, const char *tag, NETDATA_THREAD_OPTIONS options, void *(*start_routine) (void *), void *arg) { + NETDATA_THREAD *info = mallocz(sizeof(NETDATA_THREAD)); + info->arg = arg; + info->thread = thread; + info->tag = strdupz(tag); + info->start_routine = start_routine; + info->options = options; + + int ret = pthread_create(thread, attr, thread_start, info); + if(ret != 0) + error("failed to create new thread for %s. pthread_create() failed with code %d", tag, ret); + + else { + if (!(options & NETDATA_THREAD_OPTION_JOINABLE)) { + int ret2 = pthread_detach(*thread); + if (ret2 != 0) + error("cannot request detach of newly created %s thread. pthread_detach() failed with code %d", tag, ret2); + } + } + + return ret; +} + +// ---------------------------------------------------------------------------- +// netdata_thread_cancel + +int netdata_thread_cancel(netdata_thread_t thread) { + int ret = pthread_cancel(thread); + if(ret != 0) + error("cannot cancel thread. pthread_cancel() failed with code %d.", ret); + + return ret; +} + +// ---------------------------------------------------------------------------- +// netdata_thread_join + +int netdata_thread_join(netdata_thread_t thread, void **retval) { + int ret = pthread_join(thread, retval); + if(ret != 0) + error("cannot join thread. pthread_join() failed with code %d.", ret); + + return ret; +} + +int netdata_thread_detach(pthread_t thread) { + int ret = pthread_detach(thread); + if(ret != 0) + error("cannot detach thread. pthread_detach() failed with code %d.", ret); + + return ret; +} diff --git a/libnetdata/threads/threads.h b/libnetdata/threads/threads.h new file mode 100644 index 000000000..eec6ad0e3 --- /dev/null +++ b/libnetdata/threads/threads.h @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_THREADS_H +#define NETDATA_THREADS_H 1 + +#include "../libnetdata.h" + +extern pid_t gettid(void); + +typedef enum { + NETDATA_THREAD_OPTION_DEFAULT = 0 << 0, + NETDATA_THREAD_OPTION_JOINABLE = 1 << 0, + NETDATA_THREAD_OPTION_DONT_LOG_STARTUP = 1 << 1, + NETDATA_THREAD_OPTION_DONT_LOG_CLEANUP = 1 << 2, + NETDATA_THREAD_OPTION_DONT_LOG = NETDATA_THREAD_OPTION_DONT_LOG_STARTUP|NETDATA_THREAD_OPTION_DONT_LOG_CLEANUP, +} NETDATA_THREAD_OPTIONS; + +#define netdata_thread_cleanup_push(func, arg) pthread_cleanup_push(func, arg) +#define netdata_thread_cleanup_pop(execute) pthread_cleanup_pop(execute) + +typedef pthread_t netdata_thread_t; + +#define NETDATA_THREAD_TAG_MAX 100 +extern const char *netdata_thread_tag(void); + +extern size_t netdata_threads_init(void); +extern void netdata_threads_init_after_fork(size_t stacksize); + +extern int netdata_thread_create(netdata_thread_t *thread, const char *tag, NETDATA_THREAD_OPTIONS options, void *(*start_routine) (void *), void *arg); +extern int netdata_thread_cancel(netdata_thread_t thread); +extern int netdata_thread_join(netdata_thread_t thread, void **retval); +extern int netdata_thread_detach(pthread_t thread); + +#define netdata_thread_self pthread_self +#define netdata_thread_testcancel pthread_testcancel + +#endif //NETDATA_THREADS_H diff --git a/libnetdata/url/Makefile.am b/libnetdata/url/Makefile.am new file mode 100644 index 000000000..1cb69ed99 --- /dev/null +++ b/libnetdata/url/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/url/Makefile.in b/libnetdata/url/Makefile.in new file mode 100644 index 000000000..8ccc4fd64 --- /dev/null +++ b/libnetdata/url/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = libnetdata/url +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/url/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu libnetdata/url/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/libnetdata/url/README.md b/libnetdata/url/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/libnetdata/url/url.c b/libnetdata/url/url.c new file mode 100644 index 000000000..07a9f8069 --- /dev/null +++ b/libnetdata/url/url.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +// ---------------------------------------------------------------------------- +// URL encode / decode +// code from: http://www.geekhideout.com/urlcode.shtml + +/* Converts a hex character to its integer value */ +char from_hex(char ch) { + return (char)(isdigit(ch) ? ch - '0' : tolower(ch) - 'a' + 10); +} + +/* Converts an integer value to its hex character*/ +char to_hex(char code) { + static char hex[] = "0123456789abcdef"; + return hex[code & 15]; +} + +/* Returns a url-encoded version of str */ +/* IMPORTANT: be sure to free() the returned string after use */ +char *url_encode(char *str) { + char *buf, *pbuf; + + pbuf = buf = mallocz(strlen(str) * 3 + 1); + + while (*str) { + if (isalnum(*str) || *str == '-' || *str == '_' || *str == '.' || *str == '~') + *pbuf++ = *str; + + else if (*str == ' ') + *pbuf++ = '+'; + + else + *pbuf++ = '%', *pbuf++ = to_hex(*str >> 4), *pbuf++ = to_hex(*str & 15); + + str++; + } + *pbuf = '\0'; + + pbuf = strdupz(buf); + freez(buf); + return pbuf; +} + +/* Returns a url-decoded version of str */ +/* IMPORTANT: be sure to free() the returned string after use */ +char *url_decode(char *str) { + size_t size = strlen(str) + 1; + + char *buf = mallocz(size); + return url_decode_r(buf, str, size); +} + +char *url_decode_r(char *to, char *url, size_t size) { + char *s = url, // source + *d = to, // destination + *e = &to[size - 1]; // destination end + + while(*s && d < e) { + if(unlikely(*s == '%')) { + if(likely(s[1] && s[2])) { + char t = from_hex(s[1]) << 4 | from_hex(s[2]); + // avoid HTTP header injection + *d++ = (char)((isprint(t))? t : ' '); + s += 2; + } + } + else if(unlikely(*s == '+')) + *d++ = ' '; + + else + *d++ = *s; + + s++; + } + + *d = '\0'; + + return to; +} diff --git a/libnetdata/url/url.h b/libnetdata/url/url.h new file mode 100644 index 000000000..6cef6d7a8 --- /dev/null +++ b/libnetdata/url/url.h @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_URL_H +#define NETDATA_URL_H 1 + +#include "../libnetdata.h" + +// ---------------------------------------------------------------------------- +// URL encode / decode +// code from: http://www.geekhideout.com/urlcode.shtml + +/* Converts a hex character to its integer value */ +extern char from_hex(char ch); + +/* Converts an integer value to its hex character*/ +extern char to_hex(char code); + +/* Returns a url-encoded version of str */ +/* IMPORTANT: be sure to free() the returned string after use */ +extern char *url_encode(char *str); + +/* Returns a url-decoded version of str */ +/* IMPORTANT: be sure to free() the returned string after use */ +extern char *url_decode(char *str); + +extern char *url_decode_r(char *to, char *url, size_t size); + +#endif /* NETDATA_URL_H */ diff --git a/m4/ax_c___atomic.m4 b/m4/ax_c___atomic.m4 deleted file mode 100644 index dd5ee3d1c..000000000 --- a/m4/ax_c___atomic.m4 +++ /dev/null @@ -1,36 +0,0 @@ -# AC_C___ATOMIC -# ------------- -# Define HAVE_C___ATOMIC if __atomic works. -AN_IDENTIFIER([__atomic], [AC_C___ATOMIC]) -AC_DEFUN([AC_C___ATOMIC], -[AC_CACHE_CHECK([for __atomic], ac_cv_c___atomic, -[AC_LINK_IFELSE( - [AC_LANG_SOURCE( - [[int - main (int argc, char **argv) - { - volatile unsigned long ul1 = 1, ul2 = 0, ul3 = 2; - __atomic_load_n(&ul1, __ATOMIC_SEQ_CST); - __atomic_compare_exchange(&ul1, &ul2, &ul3, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); - __atomic_fetch_add(&ul1, 1, __ATOMIC_SEQ_CST); - __atomic_fetch_sub(&ul3, 1, __ATOMIC_SEQ_CST); - __atomic_or_fetch(&ul1, ul2, __ATOMIC_SEQ_CST); - __atomic_and_fetch(&ul1, ul2, __ATOMIC_SEQ_CST); - volatile unsigned long long ull1 = 1, ull2 = 0, ull3 = 2; - __atomic_load_n(&ull1, __ATOMIC_SEQ_CST); - __atomic_compare_exchange(&ull1, &ull2, &ull3, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); - __atomic_fetch_add(&ull1, 1, __ATOMIC_SEQ_CST); - __atomic_fetch_sub(&ull3, 1, __ATOMIC_SEQ_CST); - __atomic_or_fetch(&ull1, ull2, __ATOMIC_SEQ_CST); - __atomic_and_fetch(&ull1, ull2, __ATOMIC_SEQ_CST); - return 0; - } - ]])], - [ac_cv_c___atomic=yes], - [ac_cv_c___atomic=no])]) -if test $ac_cv_c___atomic = yes; then - AC_DEFINE([HAVE_C___ATOMIC], 1, - [Define to 1 if __atomic operations work.]) -fi -])# AC_C___ATOMIC - diff --git a/m4/ax_c__generic.m4 b/m4/ax_c__generic.m4 deleted file mode 100644 index 0c4dd52c6..000000000 --- a/m4/ax_c__generic.m4 +++ /dev/null @@ -1,28 +0,0 @@ -# https://lists.gnu.org/archive/html/autoconf-commit/2012-12/msg00004.html -# AC_C__GENERIC -# ------------- -# Define HAVE_C__GENERIC if _Generic works, a la C11. -AN_IDENTIFIER([_Generic], [AC_C__GENERIC]) -AC_DEFUN([AC_C__GENERIC], -[AC_CACHE_CHECK([for _Generic], ac_cv_c__Generic, -[AC_COMPILE_IFELSE( - [AC_LANG_SOURCE( - [[int - main (int argc, char **argv) - { - int a = _Generic (argc, int: argc = 1); - int *b = &_Generic (argc, default: argc); - char ***c = _Generic (argv, int: argc, default: argv ? &argv : 0); - _Generic (1 ? 0 : b, int: a, default: b) = &argc; - _Generic (a = 1, default: a) = 3; - return a + !b + !c; - } - ]])], - [ac_cv_c__Generic=yes], - [ac_cv_c__Generic=no])]) -if test $ac_cv_c__Generic = yes; then - AC_DEFINE([HAVE_C__GENERIC], 1, - [Define to 1 if C11-style _Generic works.]) -fi -])# AC_C__GENERIC - diff --git a/m4/ax_c_lto.m4 b/m4/ax_c_lto.m4 deleted file mode 100644 index 7e6bc0119..000000000 --- a/m4/ax_c_lto.m4 +++ /dev/null @@ -1,21 +0,0 @@ -# AC_C_LTO -# ------------- -# Define HAVE_LTO if -flto works. -AN_IDENTIFIER([lto], [AC_C_LTO]) -AC_DEFUN([AC_C_LTO], -[AC_CACHE_CHECK([if -flto builds executables], ac_cv_c_lto, -[AC_RUN_IFELSE( - [AC_LANG_SOURCE( - [[#include <stdio.h> - int main(int argc, char **argv) { - return 0; - } - ]])], - [ac_cv_c_lto=yes], - [ac_cv_c_lto=no], - [ac_cv_c_lto=${ac_cv_c_lto_cross_compile}])]) -if test "${ac_cv_c_lto}" = "yes"; then - AC_DEFINE([HAVE_LTO], 1, - [Define to 1 if -flto works.]) -fi -])# AC_C_LTO diff --git a/m4/ax_c_mallinfo.m4 b/m4/ax_c_mallinfo.m4 deleted file mode 100644 index af8d0481e..000000000 --- a/m4/ax_c_mallinfo.m4 +++ /dev/null @@ -1,24 +0,0 @@ -# AC_C_MALLINFO -# ------------- -# Define HAVE_C_MALLINFO if mallinfo() works. -AN_IDENTIFIER([mallinfo], [AC_C_MALLINFO]) -AC_DEFUN([AC_C_MALLINFO], -[AC_CACHE_CHECK([for mallinfo], ac_cv_c_mallinfo, -[AC_LINK_IFELSE( - [AC_LANG_PROGRAM( - [[#include <malloc.h>]], - [[ - struct mallinfo mi = mallinfo(); - /* make sure that fields exists */ - mi.uordblks = 0; - mi.hblkhd = 0; - mi.arena = 0; - ]] - )], - [ac_cv_c_mallinfo=yes], - [ac_cv_c_mallinfo=no])]) -if test $ac_cv_c_mallinfo = yes; then - AC_DEFINE([HAVE_C_MALLINFO], 1, - [Define to 1 if glibc mallinfo exists.]) -fi -])# AC_C_MALLINFO diff --git a/m4/ax_c_mallopt.m4 b/m4/ax_c_mallopt.m4 deleted file mode 100644 index 31c4fdc36..000000000 --- a/m4/ax_c_mallopt.m4 +++ /dev/null @@ -1,20 +0,0 @@ -# AC_C_MALLOPT -# ------------- -# Define HAVE_C_MALLOPT if mallopt() works. -AN_IDENTIFIER([mallopt], [AC_C_MALLOPT]) -AC_DEFUN([AC_C_MALLOPT], -[AC_CACHE_CHECK([for mallopt], ac_cv_c_mallopt, -[AC_LINK_IFELSE( - [AC_LANG_SOURCE( - [[#include <malloc.h> - int main(int argc, char **argv) { - mallopt(M_ARENA_MAX, 1); - } - ]])], - [ac_cv_c_mallopt=yes], - [ac_cv_c_mallopt=no])]) -if test $ac_cv_c_mallopt = yes; then - AC_DEFINE([HAVE_C_MALLOPT], 1, - [Define to 1 if glibc mallopt exists.]) -fi -])# AC_C_MALLOPT diff --git a/m4/ax_c_statement_expressions.m4 b/m4/ax_c_statement_expressions.m4 deleted file mode 100644 index fb259e727..000000000 --- a/m4/ax_c_statement_expressions.m4 +++ /dev/null @@ -1,23 +0,0 @@ -# AC_C_STMT_EXPR -# ------------- -# Define HAVE_STMT_EXPR if compiler has statement expressions. -AN_IDENTIFIER([_Generic], [AC_C_STMT_EXPR]) -AC_DEFUN([AC_C_STMT_EXPR], -[AC_CACHE_CHECK([for statement expressions], ac_cv_c_stmt_expr, -[AC_COMPILE_IFELSE( - [AC_LANG_SOURCE( - [[int - main (int argc, char **argv) - { - int x = ({ int y = 1; y; }); - return x; - } - ]])], - [ac_cv_c_stmt_expr=yes], - [ac_cv_c_stmt_expr=no])]) -if test $ac_cv_c_stmt_expr = yes; then - AC_DEFINE([HAVE_STMT_EXPR], 1, - [Define to 1 if compiler supports statement expressions.]) -fi -])# AC_C_STMT_EXPR - diff --git a/m4/ax_check_compile_flag.m4 b/m4/ax_check_compile_flag.m4 deleted file mode 100644 index 51df0c09a..000000000 --- a/m4/ax_check_compile_flag.m4 +++ /dev/null @@ -1,74 +0,0 @@ -# =========================================================================== -# http://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS], [INPUT]) -# -# DESCRIPTION -# -# Check whether the given FLAG works with the current language's compiler -# or gives an error. (Warnings, however, are ignored) -# -# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on -# success/failure. -# -# If EXTRA-FLAGS is defined, it is added to the current language's default -# flags (e.g. CFLAGS) when the check is done. The check is thus made with -# the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to -# force the compiler to issue an error when a bad flag is given. -# -# INPUT gives an alternative input source to AC_COMPILE_IFELSE. -# -# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this -# macro in sync with AX_CHECK_{PREPROC,LINK}_FLAG. -# -# LICENSE -# -# Copyright (c) 2008 Guido U. Draheim <guidod@gmx.de> -# Copyright (c) 2011 Maarten Bosmans <mkbosmans@gmail.com> -# -# This program is free software: you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the -# Free Software Foundation, either version 3 of the License, or (at your -# option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General -# Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see <http://www.gnu.org/licenses/>. -# -# As a special exception, the respective Autoconf Macro's copyright owner -# gives unlimited permission to copy, distribute and modify the configure -# scripts that are the output of Autoconf when processing the Macro. You -# need not follow the terms of the GNU General Public License when using -# or distributing such scripts, even though portions of the text of the -# Macro appear in them. The GNU General Public License (GPL) does govern -# all other use of the material that constitutes the Autoconf Macro. -# -# This special exception to the GPL applies to versions of the Autoconf -# Macro released by the Autoconf Archive. When you make and distribute a -# modified version of the Autoconf Macro, you may extend this special -# exception to the GPL to apply to your modified version as well. - -#serial 3 - -AC_DEFUN([AX_CHECK_COMPILE_FLAG], -[AC_PREREQ(2.59)dnl for _AC_LANG_PREFIX -AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_[]_AC_LANG_ABBREV[]flags_$4_$1])dnl -AC_CACHE_CHECK([whether _AC_LANG compiler accepts $1], CACHEVAR, [ - ax_check_save_flags=$[]_AC_LANG_PREFIX[]FLAGS - _AC_LANG_PREFIX[]FLAGS="$[]_AC_LANG_PREFIX[]FLAGS $4 $1" - AC_COMPILE_IFELSE([m4_default([$5],[AC_LANG_PROGRAM()])], - [AS_VAR_SET(CACHEVAR,[yes])], - [AS_VAR_SET(CACHEVAR,[no])]) - _AC_LANG_PREFIX[]FLAGS=$ax_check_save_flags]) -AS_IF([test x"AS_VAR_GET(CACHEVAR)" = xyes], - [m4_default([$2], :)], - [m4_default([$3], :)]) -AS_VAR_POPDEF([CACHEVAR])dnl -])dnl AX_CHECK_COMPILE_FLAGS diff --git a/m4/ax_check_enable_debug.m4 b/m4/ax_check_enable_debug.m4 deleted file mode 100644 index f99d75feb..000000000 --- a/m4/ax_check_enable_debug.m4 +++ /dev/null @@ -1,124 +0,0 @@ -# =========================================================================== -# http://www.gnu.org/software/autoconf-archive/ax_check_enable_debug.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_CHECK_ENABLE_DEBUG([enable by default=yes/info/profile/no], [ENABLE DEBUG VARIABLES ...], [DISABLE DEBUG VARIABLES NDEBUG ...], [IS-RELEASE]) -# -# DESCRIPTION -# -# Check for the presence of an --enable-debug option to configure, with -# the specified default value used when the option is not present. Return -# the value in the variable $ax_enable_debug. -# -# Specifying 'yes' adds '-g -O0' to the compilation flags for all -# languages. Specifying 'info' adds '-g' to the compilation flags. -# Specifying 'profile' adds '-g -pg' to the compilation flags and '-pg' to -# the linking flags. Otherwise, nothing is added. -# -# Define the variables listed in the second argument if debug is enabled, -# defaulting to no variables. Defines the variables listed in the third -# argument if debug is disabled, defaulting to NDEBUG. All lists of -# variables should be space-separated. -# -# If debug is not enabled, ensure AC_PROG_* will not add debugging flags. -# Should be invoked prior to any AC_PROG_* compiler checks. -# -# IS-RELEASE can be used to change the default to 'no' when making a -# release. Set IS-RELEASE to 'yes' or 'no' as appropriate. By default, it -# uses the value of $ax_is_release, so if you are using the AX_IS_RELEASE -# macro, there is no need to pass this parameter. -# -# AX_IS_RELEASE([git-directory]) -# AX_CHECK_ENABLE_DEBUG() -# -# LICENSE -# -# Copyright (c) 2011 Rhys Ulerich <rhys.ulerich@gmail.com> -# Copyright (c) 2014, 2015 Philip Withnall <philip@tecnocode.co.uk> -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. - -#serial 5 - -AC_DEFUN([AX_CHECK_ENABLE_DEBUG],[ - AC_BEFORE([$0],[AC_PROG_CC])dnl - AC_BEFORE([$0],[AC_PROG_CXX])dnl - AC_BEFORE([$0],[AC_PROG_F77])dnl - AC_BEFORE([$0],[AC_PROG_FC])dnl - - AC_MSG_CHECKING(whether to enable debugging) - - ax_enable_debug_default=m4_tolower(m4_normalize(ifelse([$1],,[no],[$1]))) - ax_enable_debug_is_release=m4_tolower(m4_normalize(ifelse([$4],, - [$ax_is_release], - [$4]))) - - # If this is a release, override the default. - AS_IF([test "$ax_enable_debug_is_release" = "yes"], - [ax_enable_debug_default="no"]) - - m4_define(ax_enable_debug_vars,[m4_normalize(ifelse([$2],,,[$2]))]) - m4_define(ax_disable_debug_vars,[m4_normalize(ifelse([$3],,[NDEBUG],[$3]))]) - - AC_ARG_ENABLE(debug, - [AS_HELP_STRING([--enable-debug=]@<:@yes/info/profile/no@:>@,[compile with debugging])], - [],enable_debug=$ax_enable_debug_default) - - # empty mean debug yes - AS_IF([test "x$enable_debug" = "x"], - [enable_debug="yes"]) - - # case of debug - AS_CASE([$enable_debug], - [yes],[ - AC_MSG_RESULT(yes) - CFLAGS="${CFLAGS} -g -O0" - CXXFLAGS="${CXXFLAGS} -g -O0" - FFLAGS="${FFLAGS} -g -O0" - FCFLAGS="${FCFLAGS} -g -O0" - OBJCFLAGS="${OBJCFLAGS} -g -O0" - ], - [info],[ - AC_MSG_RESULT(info) - CFLAGS="${CFLAGS} -g" - CXXFLAGS="${CXXFLAGS} -g" - FFLAGS="${FFLAGS} -g" - FCFLAGS="${FCFLAGS} -g" - OBJCFLAGS="${OBJCFLAGS} -g" - ], - [profile],[ - AC_MSG_RESULT(profile) - CFLAGS="${CFLAGS} -g -pg" - CXXFLAGS="${CXXFLAGS} -g -pg" - FFLAGS="${FFLAGS} -g -pg" - FCFLAGS="${FCFLAGS} -g -pg" - OBJCFLAGS="${OBJCFLAGS} -g -pg" - LDFLAGS="${LDFLAGS} -pg" - ], - [ - AC_MSG_RESULT(no) - dnl Ensure AC_PROG_CC/CXX/F77/FC/OBJC will not enable debug flags - dnl by setting any unset environment flag variables - AS_IF([test "x${CFLAGS+set}" != "xset"], - [CFLAGS=""]) - AS_IF([test "x${CXXFLAGS+set}" != "xset"], - [CXXFLAGS=""]) - AS_IF([test "x${FFLAGS+set}" != "xset"], - [FFLAGS=""]) - AS_IF([test "x${FCFLAGS+set}" != "xset"], - [FCFLAGS=""]) - AS_IF([test "x${OBJCFLAGS+set}" != "xset"], - [OBJCFLAGS=""]) - ]) - - dnl Define various variables if debugging is disabled. - dnl assert.h is a NOP if NDEBUG is defined, so define it by default. - AS_IF([test "x$enable_debug" = "xyes"], - [m4_map_args_w(ax_enable_debug_vars, [AC_DEFINE(], [,,[Define if debugging is enabled])])], - [m4_map_args_w(ax_disable_debug_vars, [AC_DEFINE(], [,,[Define if debugging is disabled])])]) - ax_enable_debug=$enable_debug -]) diff --git a/m4/ax_gcc_func_attribute.m4 b/m4/ax_gcc_func_attribute.m4 deleted file mode 100644 index a6d9f6c5f..000000000 --- a/m4/ax_gcc_func_attribute.m4 +++ /dev/null @@ -1,226 +0,0 @@ -# =========================================================================== -# http://www.gnu.org/software/autoconf-archive/ax_gcc_func_attribute.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_GCC_FUNC_ATTRIBUTE(ATTRIBUTE) -# -# DESCRIPTION -# -# This macro checks if the compiler supports one of GCC's function -# attributes; many other compilers also provide function attributes with -# the same syntax. Compiler warnings are used to detect supported -# attributes as unsupported ones are ignored by default so quieting -# warnings when using this macro will yield false positives. -# -# The ATTRIBUTE parameter holds the name of the attribute to be checked. -# -# If ATTRIBUTE is supported define HAVE_FUNC_ATTRIBUTE_<ATTRIBUTE>. -# -# The macro caches its result in the ax_cv_have_func_attribute_<attribute> -# variable. -# -# The macro currently supports the following function attributes: -# -# alias -# aligned -# alloc_size -# always_inline -# artificial -# cold -# const -# constructor -# constructor_priority for constructor attribute with priority -# deprecated -# destructor -# dllexport -# dllimport -# error -# externally_visible -# flatten -# format -# format_arg -# gnu_inline -# hot -# ifunc -# leaf -# malloc -# noclone -# noinline -# nonnull -# noreturn -# nothrow -# optimize -# pure -# unused -# used -# visibility -# warning -# warn_unused_result -# weak -# weakref -# -# Unsuppored function attributes will be tested with a prototype returning -# an int and not accepting any arguments and the result of the check might -# be wrong or meaningless so use with care. -# -# LICENSE -# -# Copyright (c) 2013 Gabriele Svelto <gabriele.svelto@gmail.com> -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 4 - -AC_DEFUN([AX_GCC_FUNC_ATTRIBUTE], [ - AS_VAR_PUSHDEF([ac_var], [ax_cv_have_func_attribute_$1]) - - AC_CACHE_CHECK([for __attribute__(($1))], [ac_var], [ - AC_LINK_IFELSE([AC_LANG_PROGRAM([ - m4_case([$1], - [alias], [ - int foo( void ) { return 0; } - int bar( void ) __attribute__(($1("foo"))); - ], - [aligned], [ - int foo( void ) __attribute__(($1(32))); - ], - [alloc_size], [ - void *foo(int a) __attribute__(($1(1))); - ], - [always_inline], [ - inline __attribute__(($1)) int foo( void ) { return 0; } - ], - [artificial], [ - inline __attribute__(($1)) int foo( void ) { return 0; } - ], - [cold], [ - int foo( void ) __attribute__(($1)); - ], - [const], [ - int foo( void ) __attribute__(($1)); - ], - [constructor_priority], [ - int foo( void ) __attribute__((__constructor__(65535/2))); - ], - [constructor], [ - int foo( void ) __attribute__(($1)); - ], - [deprecated], [ - int foo( void ) __attribute__(($1(""))); - ], - [destructor], [ - int foo( void ) __attribute__(($1)); - ], - [dllexport], [ - __attribute__(($1)) int foo( void ) { return 0; } - ], - [dllimport], [ - int foo( void ) __attribute__(($1)); - ], - [error], [ - int foo( void ) __attribute__(($1(""))); - ], - [externally_visible], [ - int foo( void ) __attribute__(($1)); - ], - [flatten], [ - int foo( void ) __attribute__(($1)); - ], - [format], [ - int foo(const char *p, ...) __attribute__(($1(printf, 1, 2))); - ], - [format_arg], [ - char *foo(const char *p) __attribute__(($1(1))); - ], - [gnu_inline], [ - inline __attribute__(($1)) int foo( void ) { return 0; } - ], - [hot], [ - int foo( void ) __attribute__(($1)); - ], - [ifunc], [ - int my_foo( void ) { return 0; } - static int (*resolve_foo(void))(void) { return my_foo; } - int foo( void ) __attribute__(($1("resolve_foo"))); - ], - [leaf], [ - __attribute__(($1)) int foo( void ) { return 0; } - ], - [malloc], [ - void *foo( void ) __attribute__(($1)); - ], - [noclone], [ - int foo( void ) __attribute__(($1)); - ], - [noinline], [ - __attribute__(($1)) int foo( void ) { return 0; } - ], - [nonnull], [ - int foo(char *p) __attribute__(($1(1))); - ], - [noreturn], [ - void foo( void ) __attribute__(($1)); - ], - [nothrow], [ - int foo( void ) __attribute__(($1)); - ], - [optimize], [ - __attribute__(($1(3))) int foo( void ) { return 0; } - ], - [pure], [ - int foo( void ) __attribute__(($1)); - ], - [returns_nonnull], [ - void *foo( void ) __attribute__(($1)); - ], - [unused], [ - int foo( void ) __attribute__(($1)); - ], - [used], [ - int foo( void ) __attribute__(($1)); - ], - [visibility], [ - int foo_def( void ) __attribute__(($1("default"))); - int foo_hid( void ) __attribute__(($1("hidden"))); - int foo_int( void ) __attribute__(($1("internal"))); - int foo_pro( void ) __attribute__(($1("protected"))); - ], - [warning], [ - int foo( void ) __attribute__(($1(""))); - ], - [warn_unused_result], [ - int foo( void ) __attribute__(($1)); - ], - [weak], [ - int foo( void ) __attribute__(($1)); - ], - [weakref], [ - static int foo( void ) { return 0; } - static int bar( void ) __attribute__(($1("foo"))); - ], - [ - m4_warn([syntax], [Unsupported attribute $1, the test may fail]) - int foo( void ) __attribute__(($1)); - ] - )], []) - ], - dnl GCC doesn't exit with an error if an unknown attribute is - dnl provided but only outputs a warning, so accept the attribute - dnl only if no warning were issued. - [AS_IF([test -s conftest.err], - [AS_VAR_SET([ac_var], [no])], - [AS_VAR_SET([ac_var], [yes])])], - [AS_VAR_SET([ac_var], [no])]) - ]) - - AS_IF([test yes = AS_VAR_GET([ac_var])], - [AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_FUNC_ATTRIBUTE_$1), 1, - [Define to 1 if the system has the `$1' function attribute])], []) - - AS_VAR_POPDEF([ac_var]) -]) diff --git a/m4/ax_pthread.m4 b/m4/ax_pthread.m4 deleted file mode 100644 index d383ad5c6..000000000 --- a/m4/ax_pthread.m4 +++ /dev/null @@ -1,332 +0,0 @@ -# =========================================================================== -# http://www.gnu.org/software/autoconf-archive/ax_pthread.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_PTHREAD([ACTION-IF-FOUND[, ACTION-IF-NOT-FOUND]]) -# -# DESCRIPTION -# -# This macro figures out how to build C programs using POSIX threads. It -# sets the PTHREAD_LIBS output variable to the threads library and linker -# flags, and the PTHREAD_CFLAGS output variable to any special C compiler -# flags that are needed. (The user can also force certain compiler -# flags/libs to be tested by setting these environment variables.) -# -# Also sets PTHREAD_CC to any special C compiler that is needed for -# multi-threaded programs (defaults to the value of CC otherwise). (This -# is necessary on AIX to use the special cc_r compiler alias.) -# -# NOTE: You are assumed to not only compile your program with these flags, -# but also link it with them as well. e.g. you should link with -# $PTHREAD_CC $CFLAGS $PTHREAD_CFLAGS $LDFLAGS ... $PTHREAD_LIBS $LIBS -# -# If you are only building threads programs, you may wish to use these -# variables in your default LIBS, CFLAGS, and CC: -# -# LIBS="$PTHREAD_LIBS $LIBS" -# CFLAGS="$CFLAGS $PTHREAD_CFLAGS" -# CC="$PTHREAD_CC" -# -# In addition, if the PTHREAD_CREATE_JOINABLE thread-attribute constant -# has a nonstandard name, defines PTHREAD_CREATE_JOINABLE to that name -# (e.g. PTHREAD_CREATE_UNDETACHED on AIX). -# -# Also HAVE_PTHREAD_PRIO_INHERIT is defined if pthread is found and the -# PTHREAD_PRIO_INHERIT symbol is defined when compiling with -# PTHREAD_CFLAGS. -# -# ACTION-IF-FOUND is a list of shell commands to run if a threads library -# is found, and ACTION-IF-NOT-FOUND is a list of commands to run it if it -# is not found. If ACTION-IF-FOUND is not specified, the default action -# will define HAVE_PTHREAD. -# -# Please let the authors know if this macro fails on any platform, or if -# you have any other suggestions or comments. This macro was based on work -# by SGJ on autoconf scripts for FFTW (http://www.fftw.org/) (with help -# from M. Frigo), as well as ac_pthread and hb_pthread macros posted by -# Alejandro Forero Cuervo to the autoconf macro repository. We are also -# grateful for the helpful feedback of numerous users. -# -# Updated for Autoconf 2.68 by Daniel Richard G. -# -# LICENSE -# -# Copyright (c) 2008 Steven G. Johnson <stevenj@alum.mit.edu> -# Copyright (c) 2011 Daniel Richard G. <skunk@iSKUNK.ORG> -# -# This program is free software: you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the -# Free Software Foundation, either version 3 of the License, or (at your -# option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General -# Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see <http://www.gnu.org/licenses/>. -# -# As a special exception, the respective Autoconf Macro's copyright owner -# gives unlimited permission to copy, distribute and modify the configure -# scripts that are the output of Autoconf when processing the Macro. You -# need not follow the terms of the GNU General Public License when using -# or distributing such scripts, even though portions of the text of the -# Macro appear in them. The GNU General Public License (GPL) does govern -# all other use of the material that constitutes the Autoconf Macro. -# -# This special exception to the GPL applies to versions of the Autoconf -# Macro released by the Autoconf Archive. When you make and distribute a -# modified version of the Autoconf Macro, you may extend this special -# exception to the GPL to apply to your modified version as well. - -#serial 21 - -AU_ALIAS([ACX_PTHREAD], [AX_PTHREAD]) -AC_DEFUN([AX_PTHREAD], [ -AC_REQUIRE([AC_CANONICAL_HOST]) -AC_LANG_PUSH([C]) -ax_pthread_ok=no - -# We used to check for pthread.h first, but this fails if pthread.h -# requires special compiler flags (e.g. on True64 or Sequent). -# It gets checked for in the link test anyway. - -# First of all, check if the user has set any of the PTHREAD_LIBS, -# etcetera environment variables, and if threads linking works using -# them: -if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then - save_CFLAGS="$CFLAGS" - CFLAGS="$CFLAGS $PTHREAD_CFLAGS" - save_LIBS="$LIBS" - LIBS="$PTHREAD_LIBS $LIBS" - AC_MSG_CHECKING([for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS]) - AC_TRY_LINK_FUNC([pthread_join], [ax_pthread_ok=yes]) - AC_MSG_RESULT([$ax_pthread_ok]) - if test x"$ax_pthread_ok" = xno; then - PTHREAD_LIBS="" - PTHREAD_CFLAGS="" - fi - LIBS="$save_LIBS" - CFLAGS="$save_CFLAGS" -fi - -# We must check for the threads library under a number of different -# names; the ordering is very important because some systems -# (e.g. DEC) have both -lpthread and -lpthreads, where one of the -# libraries is broken (non-POSIX). - -# Create a list of thread flags to try. Items starting with a "-" are -# C compiler flags, and other items are library names, except for "none" -# which indicates that we try without any flags at all, and "pthread-config" -# which is a program returning the flags for the Pth emulation library. - -ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config" - -# The ordering *is* (sometimes) important. Some notes on the -# individual items follow: - -# pthreads: AIX (must check this before -lpthread) -# none: in case threads are in libc; should be tried before -Kthread and -# other compiler flags to prevent continual compiler warnings -# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) -# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) -# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread) -# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads) -# -pthreads: Solaris/gcc -# -mthreads: Mingw32/gcc, Lynx/gcc -# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it -# doesn't hurt to check since this sometimes defines pthreads too; -# also defines -D_REENTRANT) -# ... -mt is also the pthreads flag for HP/aCC -# pthread: Linux, etcetera -# --thread-safe: KAI C++ -# pthread-config: use pthread-config program (for GNU Pth library) - -case ${host_os} in - solaris*) - - # On Solaris (at least, for some versions), libc contains stubbed - # (non-functional) versions of the pthreads routines, so link-based - # tests will erroneously succeed. (We need to link with -pthreads/-mt/ - # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather - # a function called by this macro, so we could check for that, but - # who knows whether they'll stub that too in a future libc.) So, - # we'll just look for -pthreads and -lpthread first: - - ax_pthread_flags="-pthreads pthread -mt -pthread $ax_pthread_flags" - ;; - - darwin*) - ax_pthread_flags="-pthread $ax_pthread_flags" - ;; -esac - -# Clang doesn't consider unrecognized options an error unless we specify -# -Werror. We throw in some extra Clang-specific options to ensure that -# this doesn't happen for GCC, which also accepts -Werror. - -AC_MSG_CHECKING([if compiler needs -Werror to reject unknown flags]) -save_CFLAGS="$CFLAGS" -ax_pthread_extra_flags="-Werror" -CFLAGS="$CFLAGS $ax_pthread_extra_flags -Wunknown-warning-option -Wsizeof-array-argument" -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([int foo(void);],[foo()])], - [AC_MSG_RESULT([yes])], - [ax_pthread_extra_flags= - AC_MSG_RESULT([no])]) -CFLAGS="$save_CFLAGS" - -if test x"$ax_pthread_ok" = xno; then -for flag in $ax_pthread_flags; do - - case $flag in - none) - AC_MSG_CHECKING([whether pthreads work without any flags]) - ;; - - -*) - AC_MSG_CHECKING([whether pthreads work with $flag]) - PTHREAD_CFLAGS="$flag" - ;; - - pthread-config) - AC_CHECK_PROG([ax_pthread_config], [pthread-config], [yes], [no]) - if test x"$ax_pthread_config" = xno; then continue; fi - PTHREAD_CFLAGS="`pthread-config --cflags`" - PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`" - ;; - - *) - AC_MSG_CHECKING([for the pthreads library -l$flag]) - PTHREAD_LIBS="-l$flag" - ;; - esac - - save_LIBS="$LIBS" - save_CFLAGS="$CFLAGS" - LIBS="$PTHREAD_LIBS $LIBS" - CFLAGS="$CFLAGS $PTHREAD_CFLAGS $ax_pthread_extra_flags" - - # Check for various functions. We must include pthread.h, - # since some functions may be macros. (On the Sequent, we - # need a special flag -Kthread to make this header compile.) - # We check for pthread_join because it is in -lpthread on IRIX - # while pthread_create is in libc. We check for pthread_attr_init - # due to DEC craziness with -lpthreads. We check for - # pthread_cleanup_push because it is one of the few pthread - # functions on Solaris that doesn't have a non-functional libc stub. - # We try pthread_create on general principles. - AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <pthread.h> - static void routine(void *a) { a = 0; } - static void *start_routine(void *a) { return a; }], - [pthread_t th; pthread_attr_t attr; - pthread_create(&th, 0, start_routine, 0); - pthread_join(th, 0); - pthread_attr_init(&attr); - pthread_cleanup_push(routine, 0); - pthread_cleanup_pop(0) /* ; */])], - [ax_pthread_ok=yes], - []) - - LIBS="$save_LIBS" - CFLAGS="$save_CFLAGS" - - AC_MSG_RESULT([$ax_pthread_ok]) - if test "x$ax_pthread_ok" = xyes; then - break; - fi - - PTHREAD_LIBS="" - PTHREAD_CFLAGS="" -done -fi - -# Various other checks: -if test "x$ax_pthread_ok" = xyes; then - save_LIBS="$LIBS" - LIBS="$PTHREAD_LIBS $LIBS" - save_CFLAGS="$CFLAGS" - CFLAGS="$CFLAGS $PTHREAD_CFLAGS" - - # Detect AIX lossage: JOINABLE attribute is called UNDETACHED. - AC_MSG_CHECKING([for joinable pthread attribute]) - attr_name=unknown - for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do - AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <pthread.h>], - [int attr = $attr; return attr /* ; */])], - [attr_name=$attr; break], - []) - done - AC_MSG_RESULT([$attr_name]) - if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then - AC_DEFINE_UNQUOTED([PTHREAD_CREATE_JOINABLE], [$attr_name], - [Define to necessary symbol if this constant - uses a non-standard name on your system.]) - fi - - AC_MSG_CHECKING([if more special flags are required for pthreads]) - flag=no - case ${host_os} in - aix* | freebsd* | darwin*) flag="-D_THREAD_SAFE";; - osf* | hpux*) flag="-D_REENTRANT";; - solaris*) - if test "$GCC" = "yes"; then - flag="-D_REENTRANT" - else - # TODO: What about Clang on Solaris? - flag="-mt -D_REENTRANT" - fi - ;; - esac - AC_MSG_RESULT([$flag]) - if test "x$flag" != xno; then - PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS" - fi - - AC_CACHE_CHECK([for PTHREAD_PRIO_INHERIT], - [ax_cv_PTHREAD_PRIO_INHERIT], [ - AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include <pthread.h>]], - [[int i = PTHREAD_PRIO_INHERIT;]])], - [ax_cv_PTHREAD_PRIO_INHERIT=yes], - [ax_cv_PTHREAD_PRIO_INHERIT=no]) - ]) - AS_IF([test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes"], - [AC_DEFINE([HAVE_PTHREAD_PRIO_INHERIT], [1], [Have PTHREAD_PRIO_INHERIT.])]) - - LIBS="$save_LIBS" - CFLAGS="$save_CFLAGS" - - # More AIX lossage: compile with *_r variant - if test "x$GCC" != xyes; then - case $host_os in - aix*) - AS_CASE(["x/$CC"], - [x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6], - [#handle absolute path differently from PATH based program lookup - AS_CASE(["x$CC"], - [x/*], - [AS_IF([AS_EXECUTABLE_P([${CC}_r])],[PTHREAD_CC="${CC}_r"])], - [AC_CHECK_PROGS([PTHREAD_CC],[${CC}_r],[$CC])])]) - ;; - esac - fi -fi - -test -n "$PTHREAD_CC" || PTHREAD_CC="$CC" - -AC_SUBST([PTHREAD_LIBS]) -AC_SUBST([PTHREAD_CFLAGS]) -AC_SUBST([PTHREAD_CC]) - -# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND: -if test x"$ax_pthread_ok" = xyes; then - ifelse([$1],,[AC_DEFINE([HAVE_PTHREAD],[1],[Define if you have POSIX threads libraries and header files.])],[$1]) - : -else - ax_pthread_ok=no - $2 -fi -AC_LANG_POP -])dnl AX_PTHREAD diff --git a/m4/jemalloc.m4 b/m4/jemalloc.m4 deleted file mode 100644 index bb5dceb81..000000000 --- a/m4/jemalloc.m4 +++ /dev/null @@ -1,88 +0,0 @@ -dnl -------------------------------------------------------- -*- autoconf -*- -dnl Licensed to the Apache Software Foundation (ASF) under one or more -dnl contributor license agreements. See the NOTICE file distributed with -dnl this work for additional information regarding copyright ownership. -dnl The ASF licenses this file to You under the Apache License, Version 2.0 -dnl (the "License"); you may not use this file except in compliance with -dnl the License. You may obtain a copy of the License at -dnl -dnl http://www.apache.org/licenses/LICENSE-2.0 -dnl -dnl Unless required by applicable law or agreed to in writing, software -dnl distributed under the License is distributed on an "AS IS" BASIS, -dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -dnl See the License for the specific language governing permissions and -dnl limitations under the License. - -dnl -dnl jemalloc.m4: Trafficserver's jemalloc autoconf macros -dnl modified to skip other TS_ helpers -dnl - -AC_DEFUN([TS_CHECK_JEMALLOC], [ -AC_ARG_WITH([jemalloc-prefix], - [AS_HELP_STRING([--with-jemalloc-prefix=PREFIX],[Specify the jemalloc prefix [default=""]])], - [ - jemalloc_prefix="$withval" - ],[ - if test "`uname -s`" = "Darwin"; then - jemalloc_prefix="je_" - else - jemalloc_prefix="" - fi - ] -) -AC_DEFINE_UNQUOTED([prefix_jemalloc], [${jemalloc_prefix}], [jemalloc prefix]) - -enable_jemalloc=no -AC_ARG_WITH([jemalloc], [AS_HELP_STRING([--with-jemalloc=DIR], [use a specific jemalloc library])], -[ - if test "$withval" != "no"; then - if test "x${enable_tcmalloc}" = "xyes"; then - AC_MSG_ERROR([Cannot compile with both jemalloc and tcmalloc]) - fi - enable_jemalloc=yes - jemalloc_base_dir="$withval" - case "$withval" in - yes) - jemalloc_base_dir="/usr" - AC_MSG_CHECKING(checking for jemalloc includes standard directories) - ;; - *":"*) - jemalloc_include="`echo $withval |sed -e 's/:.*$//'`" - jemalloc_ldflags="`echo $withval |sed -e 's/^.*://'`" - AC_MSG_CHECKING(checking for jemalloc includes in $jemalloc_include libs in $jemalloc_ldflags) - ;; - *) - jemalloc_include="$withval/include" - jemalloc_ldflags="$withval/lib" - AC_MSG_CHECKING(checking for jemalloc includes in $withval) - ;; - esac - fi -]) - -has_jemalloc=0 -if test "$enable_jemalloc" != "no"; then - jemalloc_have_headers=0 - jemalloc_have_libs=0 - if test "$jemalloc_base_dir" != "/usr"; then - CFLAGS="${CFLAGS} -I${jemalloc_include}" - LDFLAGS="${LDFLAGS} -L${jemalloc_ldflags}" - LIBTOOL_LINK_FLAGS="${LIBTOOL_LINK_FLAGS} -R${jemalloc_ldflags}" - fi - func="${jemalloc_prefix}malloc_stats_print" - AC_CHECK_LIB(jemalloc, ${func}, [jemalloc_have_libs=1]) - if test "$jemalloc_have_libs" != "0"; then - AC_CHECK_HEADERS([jemalloc/jemalloc.h], [jemalloc_have_headers=1]) - fi - if test "$jemalloc_have_headers" != "0"; then - has_jemalloc=1 - LIBS="${LIBS} -ljemalloc" - AC_DEFINE(has_jemalloc, [1], [Link/compile against jemalloc]) - else - AC_MSG_ERROR([Couldn't find a jemalloc installation]) - fi -fi -AC_SUBST(has_jemalloc) -]) diff --git a/m4/tcmalloc.m4 b/m4/tcmalloc.m4 deleted file mode 100644 index e84697dc7..000000000 --- a/m4/tcmalloc.m4 +++ /dev/null @@ -1,58 +0,0 @@ -dnl -------------------------------------------------------- -*- autoconf -*- -dnl Licensed to the Apache Software Foundation (ASF) under one or more -dnl contributor license agreements. See the NOTICE file distributed with -dnl this work for additional information regarding copyright ownership. -dnl The ASF licenses this file to You under the Apache License, Version 2.0 -dnl (the "License"); you may not use this file except in compliance with -dnl the License. You may obtain a copy of the License at -dnl -dnl http://www.apache.org/licenses/LICENSE-2.0 -dnl -dnl Unless required by applicable law or agreed to in writing, software -dnl distributed under the License is distributed on an "AS IS" BASIS, -dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -dnl See the License for the specific language governing permissions and -dnl limitations under the License. - -dnl -dnl tcmalloc.m4: Trafficserver's tcmalloc autoconf macros -dnl modified to skip other TS_ helpers -dnl - -dnl This is kinda fugly, but need a way to both specify a directory and which -dnl of the many tcmalloc libraries to use ... -AC_DEFUN([TS_CHECK_TCMALLOC], [ -AC_ARG_WITH([tcmalloc-lib], - [AS_HELP_STRING([--with-tcmalloc-lib],[specify the tcmalloc library to use [default=tcmalloc]])], - [ - with_tcmalloc_lib="$withval" - ],[ - with_tcmalloc_lib="tcmalloc" - ] -) - -has_tcmalloc=0 -AC_ARG_WITH([tcmalloc], [AS_HELP_STRING([--with-tcmalloc=DIR], [use the tcmalloc library])], -[ - if test "$withval" != "no"; then - if test "x${enable_jemalloc}" = "xyes"; then - AC_MSG_ERROR([Cannot compile with both tcmalloc and jemalloc]) - fi - tcmalloc_have_lib=0 - if test "x$withval" != "xyes" && test "x$withval" != "x"; then - tcmalloc_ldflags="$withval/lib" - LDFLAGS="${LDFLAGS} -L${tcmalloc_ldflags}" - LIBTOOL_LINK_FLAGS="${LIBTOOL_LINK_FLAGS} -rpath ${tcmalloc_ldflags}" - fi - AC_CHECK_LIB(${with_tcmalloc_lib}, tc_cfree, [tcmalloc_have_lib=1]) - if test "$tcmalloc_have_lib" != "0"; then - LIBS="${LIBS} -l${with_tcmalloc_lib}" - has_tcmalloc=1 - AC_DEFINE(has_tcmalloc, [1], [Link/compile against tcmalloc]) - else - AC_MSG_ERROR([Couldn't find a tcmalloc installation]) - fi - fi -]) -AC_SUBST(has_tcmalloc) -]) diff --git a/makeself/Makefile.am b/makeself/Makefile.am index 923f3cefb..f6f9167a0 100644 --- a/makeself/Makefile.am +++ b/makeself/Makefile.am @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-3.0-or-later MAINTAINERCLEANFILES= $(srcdir)/Makefile.in dist_noinst_DATA = \ @@ -10,8 +11,8 @@ dist_noinst_SCRIPTS = \ install-alpine-packages.sh \ post-installer.sh \ jobs/10-prepare-destination.install.sh \ - jobs/50-curl-7.53.1.install.sh \ - jobs/50-bash-4.4.install.sh \ + jobs/50-curl-7.60.0.install.sh \ + jobs/50-bash-4.4.18.install.sh \ jobs/50-fping-4.0.install.sh \ jobs/70-netdata-git.install.sh \ jobs/99-makeself.install.sh \ diff --git a/makeself/Makefile.in b/makeself/Makefile.in index 9c84cc042..8962dbbc1 100644 --- a/makeself/Makefile.in +++ b/makeself/Makefile.in @@ -83,14 +83,16 @@ subdir = makeself DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(dist_noinst_SCRIPTS) $(dist_noinst_DATA) ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \ - $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \ - $(top_srcdir)/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \ - $(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d @@ -217,6 +219,7 @@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ +build_target = @build_target@ build_vendor = @build_vendor@ builddir = @builddir@ cachedir = @cachedir@ @@ -238,6 +241,7 @@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ +libconfigdir = @libconfigdir@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ @@ -264,6 +268,8 @@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ varlibdir = @varlibdir@ webdir = @webdir@ + +# SPDX-License-Identifier: GPL-3.0-or-later MAINTAINERCLEANFILES = $(srcdir)/Makefile.in dist_noinst_DATA = \ $(NULL) @@ -275,8 +281,8 @@ dist_noinst_SCRIPTS = \ install-alpine-packages.sh \ post-installer.sh \ jobs/10-prepare-destination.install.sh \ - jobs/50-curl-7.53.1.install.sh \ - jobs/50-bash-4.4.install.sh \ + jobs/50-curl-7.60.0.install.sh \ + jobs/50-bash-4.4.18.install.sh \ jobs/50-fping-4.0.install.sh \ jobs/70-netdata-git.install.sh \ jobs/99-makeself.install.sh \ diff --git a/makeself/build-x86_64-static.sh b/makeself/build-x86_64-static.sh index 357666093..b0902512c 100755 --- a/makeself/build-x86_64-static.sh +++ b/makeself/build-x86_64-static.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later . $(dirname "$0")/../installer/functions.sh || exit 1 diff --git a/makeself/build.sh b/makeself/build.sh index afa4f545e..e8c1c9ddc 100755 --- a/makeself/build.sh +++ b/makeself/build.sh @@ -1,4 +1,5 @@ #!/usr/bin/env sh +# SPDX-License-Identifier: GPL-3.0-or-later # ----------------------------------------------------------------------------- # parse command line arguments @@ -32,7 +33,7 @@ cd $(dirname "$0") || exit 1 # download it and run from it if [ ! -f ../netdata-installer.sh ] then - git clone https://github.com/firehol/netdata.git netdata.git || exit 1 + git clone https://github.com/netdata/netdata.git netdata.git || exit 1 cd netdata.git/makeself || exit 1 ./build.sh "$@" exit $? diff --git a/makeself/functions.sh b/makeself/functions.sh index 839fc3226..10b324deb 100755 --- a/makeself/functions.sh +++ b/makeself/functions.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later # ----------------------------------------------------------------------------- @@ -38,7 +39,7 @@ fetch() { if [ ! -d "${NETDATA_MAKESELF_PATH}/tmp/${dir}" ] then cd "${NETDATA_MAKESELF_PATH}/tmp" - run tar -zxvpf "${tar}" + run tar -zxpf "${tar}" cd - fi diff --git a/makeself/install-alpine-packages.sh b/makeself/install-alpine-packages.sh index 87cd29669..695be4d4f 100755 --- a/makeself/install-alpine-packages.sh +++ b/makeself/install-alpine-packages.sh @@ -1,4 +1,5 @@ #!/usr/bin/env sh +# SPDX-License-Identifier: GPL-3.0-or-later # this script should be running in alpine linux # install the required packages diff --git a/makeself/install-or-update.sh b/makeself/install-or-update.sh index eed2bc301..bfcbe720a 100755 --- a/makeself/install-or-update.sh +++ b/makeself/install-or-update.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later . $(dirname "${0}")/functions.sh @@ -23,62 +24,49 @@ do shift done +deleted_stock_configs=0 +if [ ! -f "etc/netdata/.installer-cleanup-of-stock-configs-done" ] +then -# ----------------------------------------------------------------------------- -progress "Checking new configuration files" - -declare -A configs_signatures=() -. system/configs.signatures - -if [ ! -d etc/netdata ] - then - run mkdir -p etc/netdata -fi - -md5sum="$(which md5sum 2>/dev/null || command -v md5sum 2>/dev/null || command -v md5 2>/dev/null)" -for x in $(find etc.new -type f) -do - # find it relative filename - f="${x/etc.new\/netdata\//}" - t="${x/etc.new\//etc\/}" - d=$(dirname "${t}") + # ----------------------------------------------------------------------------- + progress "Deleting stock configuration files from user configuration directory" - #echo >&2 "x: ${x}" - #echo >&2 "t: ${t}" - #echo >&2 "d: ${d}" + declare -A configs_signatures=() + source "system/configs.signatures" - if [ ! -d "${d}" ] + if [ ! -d etc/netdata ] then - run mkdir -p "${d}" + run mkdir -p etc/netdata fi - if [ ! -f "${t}" ] - then - run cp "${x}" "${t}" - continue - fi + md5sum="$(which md5sum 2>/dev/null || command -v md5sum 2>/dev/null || command -v md5 2>/dev/null)" + for x in $(find etc -type f) + do + # find it relative filename + f="${x/etc\/netdata\//}" - if [ ! -z "${md5sum}" ] - then - # find the checksum of the existing file - md5="$(cat "${t}" | ${md5sum} | cut -d ' ' -f 1)" - #echo >&2 "md5: ${md5}" + # find the stock filename + t="${f/.conf.old/.conf}" + t="${t/.conf.orig/.conf}" - # check if it matches - if [ "${configs_signatures[${md5}]}" = "${f}" ] + if [ ! -z "${md5sum}" ] then - run cp "${x}" "${t}" + # find the checksum of the existing file + md5="$( ${md5sum} <"${x}" | cut -d ' ' -f 1)" + #echo >&2 "md5: ${md5}" + + # check if it matches + if [ "${configs_signatures[${md5}]}" = "${t}" ] + then + # it matches the default + run rm -f "${x}" + deleted_stock_configs=$(( deleted_stock_configs + 1 )) + fi fi - fi - - if ! [[ "${x}" =~ .*\.orig ]] - then - run mv "${x}" "${t}.orig" - fi -done - -run rm -rf etc.new + done + touch "etc/netdata/.installer-cleanup-of-stock-configs-done" +fi # ----------------------------------------------------------------------------- progress "Add user netdata to required user groups" @@ -165,6 +153,26 @@ dir_should_be_link . var/lib/netdata netdata-dbs dir_should_be_link . var/cache/netdata netdata-metrics dir_should_be_link . var/log/netdata netdata-logs +dir_should_be_link etc/netdata ../../usr/lib/netdata/conf.d orig + +if [ ${deleted_stock_configs} -gt 0 ] +then + dir_should_be_link etc/netdata ../../usr/lib/netdata/conf.d "000.-.USE.THE.orig.LINK.TO.COPY.AND.EDIT.STOCK.CONFIG.FILES" +fi + + +# ----------------------------------------------------------------------------- + +progress "create user config directories" + +for x in "python.d" "charts.d" "node.d" "health.d" "statsd.d" +do + if [ ! -d "etc/netdata/${x}" ] + then + run mkdir -p "etc/netdata/${x}" || exit 1 + fi +done + # ----------------------------------------------------------------------------- progress "fix permissions" diff --git a/makeself/jobs/10-prepare-destination.install.sh b/makeself/jobs/10-prepare-destination.install.sh index 58c8c25fd..019732636 100755 --- a/makeself/jobs/10-prepare-destination.install.sh +++ b/makeself/jobs/10-prepare-destination.install.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later . $(dirname "${0}")/../functions.sh "${@}" || exit 1 diff --git a/makeself/jobs/50-bash-4.4.18.install.sh b/makeself/jobs/50-bash-4.4.18.install.sh new file mode 100755 index 000000000..000765825 --- /dev/null +++ b/makeself/jobs/50-bash-4.4.18.install.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +. $(dirname "${0}")/../functions.sh "${@}" || exit 1 + +fetch "bash-4.4.18" "http://ftp.gnu.org/gnu/bash/bash-4.4.18.tar.gz" + +run ./configure \ + --prefix=${NETDATA_INSTALL_PATH} \ + --without-bash-malloc \ + --enable-static-link \ + --enable-net-redirections \ + --enable-array-variables \ + --disable-profiling \ + --disable-nls \ +# --disable-rpath \ +# --enable-alias \ +# --enable-arith-for-command \ +# --enable-array-variables \ +# --enable-brace-expansion \ +# --enable-casemod-attributes \ +# --enable-casemod-expansions \ +# --enable-command-timing \ +# --enable-cond-command \ +# --enable-cond-regexp \ +# --enable-directory-stack \ +# --enable-dparen-arithmetic \ +# --enable-function-import \ +# --enable-glob-asciiranges-default \ +# --enable-help-builtin \ +# --enable-job-control \ +# --enable-net-redirections \ +# --enable-process-substitution \ +# --enable-progcomp \ +# --enable-prompt-string-decoding \ +# --enable-readline \ +# --enable-select \ + + +run make clean +run make -j${SYSTEM_CPUS} + +cat >examples/loadables/Makefile <<EOF +all: +clean: +install: +EOF + +run make install + +if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ] +then + run strip ${NETDATA_INSTALL_PATH}/bin/bash +fi diff --git a/makeself/jobs/50-bash-4.4.install.sh b/makeself/jobs/50-bash-4.4.install.sh deleted file mode 100755 index 7868a1e76..000000000 --- a/makeself/jobs/50-bash-4.4.install.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env bash - -. $(dirname "${0}")/../functions.sh "${@}" || exit 1 - -fetch "bash-4.4" "http://ftp.gnu.org/gnu/bash/bash-4.4.tar.gz" - -run ./configure \ - --prefix=${NETDATA_INSTALL_PATH} \ - --enable-static-link \ - --disable-nls \ - --without-bash-malloc \ -# --disable-rpath \ -# --enable-alias \ -# --enable-arith-for-command \ -# --enable-array-variables \ -# --enable-brace-expansion \ -# --enable-casemod-attributes \ -# --enable-casemod-expansions \ -# --enable-command-timing \ -# --enable-cond-command \ -# --enable-cond-regexp \ -# --enable-directory-stack \ -# --enable-dparen-arithmetic \ -# --enable-function-import \ -# --enable-glob-asciiranges-default \ -# --enable-help-builtin \ -# --enable-job-control \ -# --enable-net-redirections \ -# --enable-process-substitution \ -# --enable-progcomp \ -# --enable-prompt-string-decoding \ -# --enable-readline \ -# --enable-select \ - - -run make clean -run make -j${SYSTEM_CPUS} - -cat >examples/loadables/Makefile <<EOF -all: -clean: -install: -EOF - -run make install - -if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ] -then - run strip ${NETDATA_INSTALL_PATH}/bin/bash -fi diff --git a/makeself/jobs/50-curl-7.53.1.install.sh b/makeself/jobs/50-curl-7.53.1.install.sh deleted file mode 100755 index ea80f0d2c..000000000 --- a/makeself/jobs/50-curl-7.53.1.install.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bash - -. $(dirname "${0}")/../functions.sh "${@}" || exit 1 - -fetch "curl-curl-7_53_1" "https://github.com/curl/curl/archive/curl-7_53_1.tar.gz" - -export LDFLAGS="-static" -export PKG_CONFIG="pkg-config --static" - -run ./buildconf - -run ./configure \ - --prefix=${NETDATA_INSTALL_PATH} \ - --enable-optimize \ - --disable-shared \ - --enable-static \ - --enable-http \ - --enable-proxy \ - --enable-ipv6 \ - --enable-cookies \ - ${NULL} - -# Curl autoconf does not honour the curl_LDFLAGS environment variable -run sed -i -e "s/curl_LDFLAGS =/curl_LDFLAGS = -all-static/" src/Makefile - -run make clean -run make -j${SYSTEM_CPUS} -run make install - -if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ] -then - run strip ${NETDATA_INSTALL_PATH}/bin/curl -fi diff --git a/makeself/jobs/50-curl-7.60.0.install.sh b/makeself/jobs/50-curl-7.60.0.install.sh new file mode 100755 index 000000000..2b5c8f139 --- /dev/null +++ b/makeself/jobs/50-curl-7.60.0.install.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +. $(dirname "${0}")/../functions.sh "${@}" || exit 1 + +fetch "curl-curl-7_60_0" "https://github.com/curl/curl/archive/curl-7_60_0.tar.gz" + +export LDFLAGS="-static" +export PKG_CONFIG="pkg-config --static" + +run ./buildconf + +run ./configure \ + --prefix=${NETDATA_INSTALL_PATH} \ + --enable-optimize \ + --disable-shared \ + --enable-static \ + --enable-http \ + --enable-proxy \ + --enable-ipv6 \ + --enable-cookies \ + ${NULL} + +# Curl autoconf does not honour the curl_LDFLAGS environment variable +run sed -i -e "s/curl_LDFLAGS =/curl_LDFLAGS = -all-static/" src/Makefile + +run make clean +run make -j${SYSTEM_CPUS} +run make install + +if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ] +then + run strip ${NETDATA_INSTALL_PATH}/bin/curl +fi diff --git a/makeself/jobs/50-fping-4.0.install.sh b/makeself/jobs/50-fping-4.0.install.sh index 2e22ebf8d..7928f1aa4 100755 --- a/makeself/jobs/50-fping-4.0.install.sh +++ b/makeself/jobs/50-fping-4.0.install.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later . $(dirname "${0}")/../functions.sh "${@}" || exit 1 diff --git a/makeself/jobs/70-netdata-git.install.sh b/makeself/jobs/70-netdata-git.install.sh index fea3a88bd..71ea0f63a 100755 --- a/makeself/jobs/70-netdata-git.install.sh +++ b/makeself/jobs/70-netdata-git.install.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later . ${NETDATA_MAKESELF_PATH}/functions.sh "${@}" || exit 1 @@ -12,13 +13,6 @@ else # export CFLAGS="-static -O1 -ggdb -Wall -Wextra -Wformat-signedness" fi -if [ ! -z "${NETDATA_INSTALL_PATH}" -a -d "${NETDATA_INSTALL_PATH}/etc" ] - then - # make sure we don't have an old etc path, so that the installer - # will install all files without examining changes - run mv "${NETDATA_INSTALL_PATH}/etc" "${NETDATA_INSTALL_PATH}/etc.new" -fi - run ./netdata-installer.sh --install "${NETDATA_INSTALL_PARENT}" \ --dont-wait \ --dont-start-it \ diff --git a/makeself/jobs/99-makeself.install.sh b/makeself/jobs/99-makeself.install.sh index 698f2f92d..0e0d5c9da 100755 --- a/makeself/jobs/99-makeself.install.sh +++ b/makeself/jobs/99-makeself.install.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later . $(dirname "${0}")/../functions.sh "${@}" || exit 1 @@ -12,18 +13,18 @@ ORIGIN="$(git config --get remote.origin.url || echo "unknown")" if [[ "${ORIGIN}" =~ ^git@github.com:.*/netdata.*$ ]] then NOWNER="${ORIGIN/git@github.com:/}" - NOWNER="${NOWNER/\/netdata*/}" + NOWNER="$( echo ${NOWNER} | cut -d '/' -f 1 )" elif [[ "${ORIGIN}" =~ ^https://github.com/.*/netdata.*$ ]] then NOWNER="${ORIGIN/https:\/\/github.com\//}" - NOWNER="${NOWNER/\/netdata*/}" + NOWNER="$( echo ${NOWNER} | cut -d '/' -f 1 )" fi # make sure it does not have any slashes in it NOWNER="${NOWNER//\//_}" -if [ "${NOWNER}" = "firehol" ] +if [ "${NOWNER}" = "netdata" ] then NOWNER= else @@ -64,33 +65,13 @@ run mv "${NETDATA_INSTALL_PATH}/bin/netdata" \ cat >"${NETDATA_INSTALL_PATH}/bin/netdata" <<EOF #!${NETDATA_INSTALL_PATH}/bin/bash +export NETDATA_BASH_LOADABLES="DISABLE" export PATH="${NETDATA_INSTALL_PATH}/bin:\${PATH}" exec "${NETDATA_INSTALL_PATH}/bin/srv/netdata" "\${@}" EOF run chmod 755 "${NETDATA_INSTALL_PATH}/bin/netdata" -# ----------------------------------------------------------------------------- -# move etc to protect the destination when unpacked - -if [ ! -z "${NETDATA_INSTALL_PATH}" -a -d "${NETDATA_INSTALL_PATH}/etc" ] - then - if [ -d "${NETDATA_INSTALL_PATH}/etc.new" ] - then - run rm -rf "${NETDATA_INSTALL_PATH}/etc.new" || exit 1 - fi - - run mv "${NETDATA_INSTALL_PATH}/etc" \ - "${NETDATA_INSTALL_PATH}/etc.new" || exit 1 - - if [ -f "${NETDATA_INSTALL_PATH}/etc.new/netdata/netdata.conf" ] - then - # delete the generated netdata.conf, so that the static installer will generate a new one - run rm "${NETDATA_INSTALL_PATH}/etc.new/netdata/netdata.conf" - fi -fi - - # ----------------------------------------------------------------------------- # remove the links to allow untaring the archive diff --git a/makeself/makeself-header.sh b/makeself/makeself-header.sh index 83131a17c..19c1c3f99 100755 --- a/makeself/makeself-header.sh +++ b/makeself/makeself-header.sh @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-3.0-or-later cat << EOF > "$archname" #!/bin/sh # This script was generated using Makeself $MS_VERSION diff --git a/makeself/makeself-help-header.txt b/makeself/makeself-help-header.txt index e26490059..6e9e96237 100644 --- a/makeself/makeself-help-header.txt +++ b/makeself/makeself-help-header.txt @@ -26,7 +26,7 @@ # SYSTEM INIT This file will be installed if this system runs with systemd: - - /etc/systemd/system/netdata.service + - /lib/systemd/system/netdata.service or, for older Centos, Debian/Ubuntu or OpenRC Gentoo: @@ -41,6 +41,6 @@ netdata re-distributes a lot of open source software components. Check its full license at: - https://github.com/firehol/netdata/blob/master/LICENSE.md + https://github.com/netdata/netdata/blob/master/LICENSE.md diff --git a/makeself/makeself-license.txt b/makeself/makeself-license.txt index e26490059..6e9e96237 100644 --- a/makeself/makeself-license.txt +++ b/makeself/makeself-license.txt @@ -26,7 +26,7 @@ # SYSTEM INIT This file will be installed if this system runs with systemd: - - /etc/systemd/system/netdata.service + - /lib/systemd/system/netdata.service or, for older Centos, Debian/Ubuntu or OpenRC Gentoo: @@ -41,6 +41,6 @@ netdata re-distributes a lot of open source software components. Check its full license at: - https://github.com/firehol/netdata/blob/master/LICENSE.md + https://github.com/netdata/netdata/blob/master/LICENSE.md diff --git a/makeself/makeself.sh b/makeself/makeself.sh index 2ce37a24f..ee89df9a4 100755 --- a/makeself/makeself.sh +++ b/makeself/makeself.sh @@ -1,4 +1,5 @@ #!/bin/sh +# SPDX-License-Identifier: GPL-3.0-or-later # # Makeself version 2.3.x # by Stephane Peter <megastep@megastep.org> diff --git a/makeself/post-installer.sh b/makeself/post-installer.sh index 10f9863b9..38cc41ef7 100755 --- a/makeself/post-installer.sh +++ b/makeself/post-installer.sh @@ -1,4 +1,5 @@ #!/bin/sh +# SPDX-License-Identifier: GPL-3.0-or-later # This script is started using the shell of the system # and executes our 'install-or-update.sh' script diff --git a/makeself/run-all-jobs.sh b/makeself/run-all-jobs.sh index 2ad594380..7a35fe648 100755 --- a/makeself/run-all-jobs.sh +++ b/makeself/run-all-jobs.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later LC_ALL=C umask 002 diff --git a/netdata-installer.sh b/netdata-installer.sh index e433cc189..3ca8ad320 100755 --- a/netdata-installer.sh +++ b/netdata-installer.sh @@ -1,4 +1,6 @@ #!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later +# shellcheck disable=SC1090,SC1091,SC1117,SC2002,SC2034,SC2044,SC2046,SC2086,SC2129,SC2162,SC2166,SC2181 export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin" uniquepath() { @@ -79,7 +81,9 @@ REINSTALL_COMMAND="$(printf "%q " "$0" "${@}"; printf "\n")" # remove options that shown not be inherited by netdata-updater.sh REINSTALL_COMMAND="${REINSTALL_COMMAND// --dont-wait/}" REINSTALL_COMMAND="${REINSTALL_COMMAND// --dont-start-it/}" +[ "${REINSTALL_COMMAND:0:1}" != "." -a "${REINSTALL_COMMAND:0:1}" != "/" -a -f "./${0}" ] && REINSTALL_COMMAND="./${REINSTALL_COMMAND}" +# shellcheck disable=SC2230 setcap="$(which setcap 2>/dev/null || command -v setcap 2>/dev/null)" ME="$0" @@ -137,6 +141,11 @@ Valid <installer options> are: Enable/disable Link-Time-Optimization Default: enabled + --disable-x86-sse + + Disable SSE instructions + Default: enabled + --zlib-is-really-here --libs-are-really-here @@ -164,48 +173,6 @@ For the plugins, you will at least need: USAGE } -md5sum="$(which md5sum 2>/dev/null || command -v md5sum 2>/dev/null || command -v md5 2>/dev/null)" -get_git_config_signatures() { - local x s file md5 - - [ ! -d "conf.d" ] && echo >&2 "Wrong directory." && return 1 - [ -z "${md5sum}" -o ! -x "${md5sum}" ] && echo >&2 "No md5sum command." && return 1 - - echo >configs.signatures.tmp - - for x in $(find conf.d -name \*.conf) - do - x="${x/conf.d\//}" - echo "${x}" - for c in $(git log --follow "conf.d/${x}" | grep ^commit | cut -d ' ' -f 2) - do - git checkout ${c} "conf.d/${x}" || continue - s="$(cat "conf.d/${x}" | ${md5sum} | cut -d ' ' -f 1)" - echo >>configs.signatures.tmp "${s}:${x}" - echo " ${s}" - done - git checkout HEAD "conf.d/${x}" || break - done - - cat configs.signatures.tmp |\ - grep -v "^$" |\ - sort -u |\ - { - echo "declare -A configs_signatures=(" - IFS=":" - while read md5 file - do - echo " ['${md5}']='${file}'" - done - echo ")" - } >configs.signatures - - rm configs.signatures.tmp - - return 0 -} - - while [ ! -z "${1}" ] do if [ "$1" = "--install" ] @@ -252,14 +219,14 @@ do then NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-lto/} --disable-lto" shift 1 + elif [ "$1" = "--disable-x86-sse" ] + then + NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-x86-sse/} --disable-x86-sse" + shift 1 elif [ "$1" = "--help" -o "$1" = "-h" ] then usage exit 1 - elif [ "$1" = "get_git_config_signatures" ] - then - get_git_config_signatures && exit 0 - exit 1 else echo >&2 echo >&2 "ERROR:" @@ -387,15 +354,6 @@ autotools 2.60 or later is required Sorry, you do not seem to have autotools 2.60 or later, which is required to build from the git sources of netdata. -You can either install a suitable version of autotools and automake -or download a netdata package which does not have these dependencies. - -Source packages where autotools have already been run are available -here: - https://firehol.org/download/netdata/ - -The unsigned/master folder tracks the head of the git tree and released -packages are also available. EOF exit 1 fi @@ -441,7 +399,7 @@ You may need to check these: If you still cannot get it to build, ask for help at github: - https://github.com/firehol/netdata/issues + https://github.com/netdata/netdata/issues EOF @@ -468,7 +426,7 @@ progress "Run autotools to configure the build environment" if [ "$have_autotools" ] then - run ./autogen.sh || exit 1 + run autoreconf -ivf || exit 1 fi run ./configure \ @@ -489,6 +447,7 @@ progress "Cleanup compilation directory" [ -f src/netdata ] && run make clean + # ----------------------------------------------------------------------------- progress "Compile netdata" @@ -540,79 +499,79 @@ if [ -d "${NETDATA_PREFIX}/etc/netdata" ] fi # ----------------------------------------------------------------------------- -progress "Backup existing netdata configuration before installing it" -if [ "${BASH_VERSINFO[0]}" -ge "4" ] +# shellcheck disable=SC2230 +md5sum="$(which md5sum 2>/dev/null || command -v md5sum 2>/dev/null || command -v md5 2>/dev/null)" + +deleted_stock_configs=0 +if [ ! -f "${NETDATA_PREFIX}/etc/netdata/.installer-cleanup-of-stock-configs-done" ] then - declare -A configs_signatures=() - if [ -f "configs.signatures" ] - then - source "configs.signatures" || echo >&2 "ERROR: Failed to load configs.signatures !" - fi -fi -config_signature_matches() { - local md5="${1}" file="${2}" + progress "Backup existing netdata configuration before installing it" if [ "${BASH_VERSINFO[0]}" -ge "4" ] - then - [ "${configs_signatures[${md5}]}" = "${file}" ] && return 0 - return 1 + then + declare -A configs_signatures=() + if [ -f "configs.signatures" ] + then + source "configs.signatures" || echo >&2 "ERROR: Failed to load configs.signatures !" + fi fi - if [ -f "configs.signatures" ] - then - grep "\['${md5}'\]='${file}'" "configs.signatures" >/dev/null - return $? - fi + config_signature_matches() { + local md5="${1}" file="${2}" - return 1 -} + if [ "${BASH_VERSINFO[0]}" -ge "4" ] + then + [ "${configs_signatures[${md5}]}" = "${file}" ] && return 0 + return 1 + fi -# backup user configurations -installer_backup_suffix="${PID}.${RANDOM}" -for x in $(find -L "${NETDATA_PREFIX}/etc/netdata" -name '*.conf' -type f) -do - if [ -f "${x}" ] - then - # make a backup of the configuration file - cp -p "${x}" "${x}.old" + if [ -f "configs.signatures" ] + then + grep "\['${md5}'\]='${file}'" "configs.signatures" >/dev/null + return $? + fi - if [ -z "${md5sum}" -o ! -x "${md5sum}" ] + return 1 + } + + # clean up stock config files from the user configuration directory + for x in $(find -L "${NETDATA_PREFIX}/etc/netdata" -type f) + do + if [ -f "${x}" ] then - # we don't have md5sum - keep it - echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' ${TPUT_RET}is not known to distribution${TPUT_RESET}. Keeping it." - run cp -a "${x}" "${x}.installer_backup.${installer_backup_suffix}" - else # find it relative filename - f="${x/*\/etc\/netdata\//}" + f="${x/${NETDATA_PREFIX}\/etc\/netdata\//}" - # find its checksum - md5="$(cat "${x}" | ${md5sum} | cut -d ' ' -f 1)" + # find the stock filename + t="${f/.conf.installer_backup.*/.conf}" + t="${t/.conf.old/.conf}" + t="${t/.conf.orig/.conf}" - # copy the original - if [ -f "conf.d/${f}" ] + if [ -z "${md5sum}" -o ! -x "${md5sum}" ] then - cp "conf.d/${f}" "${x}.orig" - fi - - if config_signature_matches "${md5}" "${f}" - then - # it is a stock version - don't keep it - echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' is stock version." + # we don't have md5sum - keep it + echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' ${TPUT_RET}is not known to distribution${TPUT_RESET}. Keeping it." else - # edited by user - keep it - echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' ${TPUT_RED} has been edited by user${TPUT_RESET}. Keeping it." - run cp -a "${x}" "${x}.installer_backup.${installer_backup_suffix}" + # find its checksum + md5="$(${md5sum} <"${x}" | cut -d ' ' -f 1)" + + if config_signature_matches "${md5}" "${t}" + then + # it is a stock version - remove it + echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' is stock version of '${t}'." + run rm -f "${x}" + deleted_stock_configs=$(( deleted_stock_configs + 1 )) + else + # edited by user - keep it + echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' ${TPUT_RED} does not match stock of '${t}'${TPUT_RESET}. Keeping it." + fi fi fi - - elif [ -f "${x}.installer_backup.${installer_backup_suffix}" ] - then - rm -f "${x}.installer_backup.${installer_backup_suffix}" - fi -done - + done +fi +touch "${NETDATA_PREFIX}/etc/netdata/.installer-cleanup-of-stock-configs-done" # ----------------------------------------------------------------------------- progress "Install netdata" @@ -620,19 +579,6 @@ progress "Install netdata" run make install || exit 1 -# ----------------------------------------------------------------------------- -progress "Restore user edited netdata configuration files" - -for x in $(find -L "${NETDATA_PREFIX}/etc/netdata/" -name '*.conf' -type f) -do - if [ -f "${x}.installer_backup.${installer_backup_suffix}" ] - then - run cp -a "${x}.installer_backup.${installer_backup_suffix}" "${x}" && \ - run rm -f "${x}.installer_backup.${installer_backup_suffix}" - fi -done - - # ----------------------------------------------------------------------------- progress "Fix generated files permissions" @@ -679,22 +625,23 @@ config_option() { if [ "${UID}" = "0" ] then NETDATA_USER="$( config_option "global" "run as user" "netdata" )" - NETDATA_GROUP="${NETDATA_USER}" ROOT_USER="root" else NETDATA_USER="${USER}" - NETDATA_GROUP="$(id -g -n ${NETDATA_USER})" ROOT_USER="${NETDATA_USER}" fi +NETDATA_GROUP="$(id -g -n ${NETDATA_USER})" +[ -z "${NETDATA_GROUP}" ] && NETDATA_GROUP="${NETDATA_USER}" # the owners of the web files NETDATA_WEB_USER="$( config_option "web" "web files owner" "${NETDATA_USER}" )" +NETDATA_WEB_GROUP="${NETDATA_GROUP}" if [ "${UID}" = "0" -a "${NETDATA_USER}" != "${NETDATA_WEB_USER}" ] then - NETDATA_GROUP="$(id -g -n ${NETDATA_WEB_USER})" - [ -z "${NETDATA_GROUP}" ] && NETDATA_GROUP="${NETDATA_WEB_USER}" + NETDATA_WEB_GROUP="$(id -g -n ${NETDATA_WEB_USER})" + [ -z "${NETDATA_WEB_GROUP}" ] && NETDATA_WEB_GROUP="${NETDATA_WEB_USER}" fi -NETDATA_WEB_GROUP="$( config_option "web" "web files group" "${NETDATA_GROUP}" )" +NETDATA_WEB_GROUP="$( config_option "web" "web files group" "${NETDATA_WEB_GROUP}" )" # port defport=19999 @@ -705,28 +652,30 @@ NETDATA_LIB_DIR="$( config_option "global" "lib directory" "${NETDATA_PREFIX}/va NETDATA_CACHE_DIR="$( config_option "global" "cache directory" "${NETDATA_PREFIX}/var/cache/netdata" )" NETDATA_WEB_DIR="$( config_option "global" "web files directory" "${NETDATA_PREFIX}/usr/share/netdata/web" )" NETDATA_LOG_DIR="$( config_option "global" "log directory" "${NETDATA_PREFIX}/var/log/netdata" )" -NETDATA_CONF_DIR="$( config_option "global" "config directory" "${NETDATA_PREFIX}/etc/netdata" )" +NETDATA_USER_CONFIG_DIR="$( config_option "global" "config directory" "${NETDATA_PREFIX}/etc/netdata" )" +NETDATA_STOCK_CONFIG_DIR="$( config_option "global" "stock config directory" "${NETDATA_PREFIX}/usr/lib/netdata/conf.d" )" NETDATA_RUN_DIR="${NETDATA_PREFIX}/var/run" cat <<OPTIONSEOF Permissions - - netdata user : ${NETDATA_USER} - - netdata group : ${NETDATA_GROUP} - - web files user : ${NETDATA_WEB_USER} - - web files group : ${NETDATA_WEB_GROUP} - - root user : ${ROOT_USER} + - netdata user : ${NETDATA_USER} + - netdata group : ${NETDATA_GROUP} + - web files user : ${NETDATA_WEB_USER} + - web files group : ${NETDATA_WEB_GROUP} + - root user : ${ROOT_USER} Directories - - netdata conf dir : ${NETDATA_CONF_DIR} - - netdata log dir : ${NETDATA_LOG_DIR} - - netdata run dir : ${NETDATA_RUN_DIR} - - netdata lib dir : ${NETDATA_LIB_DIR} - - netdata web dir : ${NETDATA_WEB_DIR} - - netdata cache dir: ${NETDATA_CACHE_DIR} + - netdata user config dir : ${NETDATA_USER_CONFIG_DIR} + - netdata stock config dir : ${NETDATA_STOCK_CONFIG_DIR} + - netdata log dir : ${NETDATA_LOG_DIR} + - netdata run dir : ${NETDATA_RUN_DIR} + - netdata lib dir : ${NETDATA_LIB_DIR} + - netdata web dir : ${NETDATA_WEB_DIR} + - netdata cache dir : ${NETDATA_CACHE_DIR} Other - - netdata port : ${NETDATA_PORT} + - netdata port : ${NETDATA_PORT} OPTIONSEOF @@ -741,17 +690,36 @@ fi # --- conf dir ---- -for x in "python.d" "charts.d" "node.d" +for x in "python.d" "charts.d" "node.d" "health.d" "statsd.d" do - if [ ! -d "${NETDATA_CONF_DIR}/${x}" ] + if [ ! -d "${NETDATA_USER_CONFIG_DIR}/${x}" ] then - echo >&2 "Creating directory '${NETDATA_CONF_DIR}/${x}'" - run mkdir -p "${NETDATA_CONF_DIR}/${x}" || exit 1 + echo >&2 "Creating directory '${NETDATA_USER_CONFIG_DIR}/${x}'" + run mkdir -p "${NETDATA_USER_CONFIG_DIR}/${x}" || exit 1 fi done -run chown -R "${ROOT_USER}:${NETDATA_GROUP}" "${NETDATA_CONF_DIR}" -run find "${NETDATA_CONF_DIR}" -type f -exec chmod 0640 {} \; -run find "${NETDATA_CONF_DIR}" -type d -exec chmod 0755 {} \; +run chown -R "${ROOT_USER}:${NETDATA_GROUP}" "${NETDATA_USER_CONFIG_DIR}" +run find "${NETDATA_USER_CONFIG_DIR}" -type f -exec chmod 0640 {} \; +run find "${NETDATA_USER_CONFIG_DIR}" -type d -exec chmod 0755 {} \; +run chmod 755 "${NETDATA_USER_CONFIG_DIR}/edit-config" + +# --- stock conf dir ---- + +[ ! -d "${NETDATA_STOCK_CONFIG_DIR}" ] && mkdir -p "${NETDATA_STOCK_CONFIG_DIR}" + +helplink="000.-.USE.THE.orig.LINK.TO.COPY.AND.EDIT.STOCK.CONFIG.FILES" +[ ${deleted_stock_configs} -eq 0 ] && helplink="" +for link in "orig" "${helplink}" +do + if [ ! -z "${link}" ] + then + [ -L "${NETDATA_USER_CONFIG_DIR}/${link}" ] && run rm -f "${NETDATA_USER_CONFIG_DIR}/${link}" + run ln -s "${NETDATA_STOCK_CONFIG_DIR}" "${NETDATA_USER_CONFIG_DIR}/${link}" + fi +done +run chown -R "${ROOT_USER}:${NETDATA_GROUP}" "${NETDATA_STOCK_CONFIG_DIR}" +run find "${NETDATA_STOCK_CONFIG_DIR}" -type f -exec chmod 0640 {} \; +run find "${NETDATA_STOCK_CONFIG_DIR}" -type d -exec chmod 0755 {} \; # --- web dir ---- @@ -950,7 +918,7 @@ fi # ----------------------------------------------------------------------------- progress "Check version.txt" -if [ ! -s web/version.txt ] +if [ ! -s webserver/gui/version.txt ] then cat <<VERMSG @@ -962,7 +930,7 @@ Update check on the dashboard, will not work. If you want to have version update check, please re-install it following the procedure in: -https://github.com/firehol/netdata/wiki/Installation +https://github.com/netdata/netdata/wiki/Installation VERMSG fi @@ -990,8 +958,8 @@ To run apps.plugin with escalated capabilities: or, to run apps.plugin as root: - ${TPUT_YELLOW}${TPUT_BOLD}sudo chown root \"${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin\"${TPUT_RESET} - ${TPUT_YELLOW}${TPUT_BOLD}sudo chmod 4755 \"${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin\"${TPUT_RESET} + ${TPUT_YELLOW}${TPUT_BOLD}sudo chown root:${NETDATA_GROUP} \"${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin\"${TPUT_RESET} + ${TPUT_YELLOW}${TPUT_BOLD}sudo chmod 4750 \"${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin\"${TPUT_RESET} apps.plugin is performing a hard-coded function of data collection for all running processes. It cannot be instructed from the netdata daemon to perform @@ -1016,24 +984,17 @@ if [ "\$1" != "--force" ] exit 1 fi +source installer/functions.sh || exit 1 + echo >&2 "Stopping a possibly running netdata..." -for p in \$(pidof netdata); do kill \$p; done +for p in \$(pidof netdata); do run kill \$p; done sleep 2 -deletedir() { - if [ ! -z "\$1" -a -d "\$1" ] - then - echo - echo "Deleting directory '\$1' ..." - rm -I -R "\$1" - fi -} - if [ ! -z "${NETDATA_PREFIX}" -a -d "${NETDATA_PREFIX}" ] then # installation prefix was given - deletedir "${NETDATA_PREFIX}" + portable_deletedir_recursively_interactively "${NETDATA_PREFIX}" else # installation prefix was NOT given @@ -1041,49 +1002,55 @@ else if [ -f "${NETDATA_PREFIX}/usr/sbin/netdata" ] then echo "Deleting ${NETDATA_PREFIX}/usr/sbin/netdata ..." - rm -i "${NETDATA_PREFIX}/usr/sbin/netdata" + run rm -i "${NETDATA_PREFIX}/usr/sbin/netdata" fi - deletedir "${NETDATA_PREFIX}/etc/netdata" - deletedir "${NETDATA_PREFIX}/usr/share/netdata" - deletedir "${NETDATA_PREFIX}/usr/libexec/netdata" - deletedir "${NETDATA_PREFIX}/var/lib/netdata" - deletedir "${NETDATA_PREFIX}/var/cache/netdata" - deletedir "${NETDATA_PREFIX}/var/log/netdata" + portable_deletedir_recursively_interactively "${NETDATA_PREFIX}/etc/netdata" + portable_deletedir_recursively_interactively "${NETDATA_PREFIX}/usr/share/netdata" + portable_deletedir_recursively_interactively "${NETDATA_PREFIX}/usr/libexec/netdata" + portable_deletedir_recursively_interactively "${NETDATA_PREFIX}/var/lib/netdata" + portable_deletedir_recursively_interactively "${NETDATA_PREFIX}/var/cache/netdata" + portable_deletedir_recursively_interactively "${NETDATA_PREFIX}/var/log/netdata" fi if [ -f /etc/logrotate.d/netdata ] then echo "Deleting /etc/logrotate.d/netdata ..." - rm -i /etc/logrotate.d/netdata + run rm -i /etc/logrotate.d/netdata fi if [ -f /etc/systemd/system/netdata.service ] then echo "Deleting /etc/systemd/system/netdata.service ..." - rm -i /etc/systemd/system/netdata.service + run rm -i /etc/systemd/system/netdata.service +fi + +if [ -f /lib/systemd/system/netdata.service ] + then + echo "Deleting /lib/systemd/system/netdata.service ..." + run rm -i /lib/systemd/system/netdata.service fi if [ -f /etc/init.d/netdata ] then echo "Deleting /etc/init.d/netdata ..." - rm -i /etc/init.d/netdata + run rm -i /etc/init.d/netdata fi if [ -f /etc/periodic/daily/netdata-updater ] then echo "Deleting /etc/periodic/daily/netdata-updater ..." - rm -i /etc/periodic/daily/netdata-updater + run rm -i /etc/periodic/daily/netdata-updater fi if [ -f /etc/cron.daily/netdata-updater ] then echo "Deleting /etc/cron.daily/netdata-updater ..." - rm -i /etc/cron.daily/netdata-updater + run rm -i /etc/cron.daily/netdata-updater fi -getent passwd netdata > /dev/null -if [ $? -eq 0 ] +portable_check_user_exists netdata +if [ \$? -eq 0 ] then echo echo "You may also want to remove the user netdata" @@ -1091,8 +1058,8 @@ if [ $? -eq 0 ] echo " userdel netdata" fi -getent group netdata > /dev/null -if [ $? -eq 0 ] +portable_check_group_exists netdata > /dev/null +if [ \$? -eq 0 ] then echo echo "You may also want to remove the group netdata" @@ -1100,86 +1067,17 @@ if [ $? -eq 0 ] echo " groupdel netdata" fi -getent group docker > /dev/null -if [ $? -eq 0 -a "${NETDATA_ADDED_TO_DOCKER}" = "1" ] - then - echo - echo "You may also want to remove the netdata user from the docker group" - echo "by running:" - echo " gpasswd -d netdata docker" -fi - -getent group nginx > /dev/null -if [ $? -eq 0 -a "${NETDATA_ADDED_TO_NGINX}" = "1" ] - then - echo - echo "You may also want to remove the netdata user from the nginx group" - echo "by running:" - echo " gpasswd -d netdata nginx" -fi - -getent group varnish > /dev/null -if [ $? -eq 0 -a "${NETDATA_ADDED_TO_VARNISH}" = "1" ] - then - echo - echo "You may also want to remove the netdata user from the varnish group" - echo "by running:" - echo " gpasswd -d netdata varnish" -fi - -getent group haproxy > /dev/null -if [ $? -eq 0 -a "${NETDATA_ADDED_TO_HAPROXY}" = "1" ] - then - echo - echo "You may also want to remove the netdata user from the haproxy group" - echo "by running:" - echo " gpasswd -d netdata haproxy" -fi - -getent group adm > /dev/null -if [ $? -eq 0 -a "${NETDATA_ADDED_TO_ADM}" = "1" ] - then - echo - echo "You may also want to remove the netdata user from the adm group" - echo "by running:" - echo " gpasswd -d netdata adm" -fi - -getent group nsd > /dev/null -if [ $? -eq 0 -a "${NETDATA_ADDED_TO_NSD}" = "1" ] - then - echo - echo "You may also want to remove the netdata user from the nsd group" - echo "by running:" - echo " gpasswd -d netdata nsd" -fi - -getent group proxy > /dev/null -if [ $? -eq 0 -a "${NETDATA_ADDED_TO_PROXY}" = "1" ] - then - echo - echo "You may also want to remove the netdata user from the proxy group" - echo "by running:" - echo " gpasswd -d netdata proxy" -fi - -getent group squid > /dev/null -if [ $? -eq 0 -a "${NETDATA_ADDED_TO_SQUID}" = "1" ] - then - echo - echo "You may also want to remove the netdata user from the squid group" - echo "by running:" - echo " gpasswd -d netdata squid" -fi - -getent group ceph > /dev/null -if [ $? -eq 0 -a "${NETDATA_ADDED_TO_CEPH}" = "1" ] - then - echo - echo "You may also want to remove the netdata user from the squid group" - echo "by running:" - echo " gpasswd -d netdata ceph" -fi +for g in ${NETDATA_ADDED_TO_GROUPS} +do + portable_check_group_exists \$g > /dev/null + if [ \$? -eq 0 ] + then + echo + echo "You may also want to remove the netdata user from the \$g group" + echo "by running:" + echo " gpasswd -d netdata \$g" + fi +done UNINSTALL chmod 750 netdata-uninstaller.sh @@ -1351,16 +1249,16 @@ REINSTALL if [ "${AUTOUPDATE}" = "1" ] then progress "Installing netdata-updater at cron" - run ln -s "${PWD}/netdata-updater.sh" "${crondir}/netdata-updater" + run ln -fs "${PWD}/netdata-updater.sh" "${crondir}/netdata-updater" else echo >&2 "${TPUT_DIM}Run this to automatically check and install netdata updates once per day:${TPUT_RESET}" echo >&2 - echo >&2 "${TPUT_YELLOW}${TPUT_BOLD}sudo ln -s ${PWD}/netdata-updater.sh ${crondir}/netdata-updater${TPUT_RESET}" + echo >&2 "${TPUT_YELLOW}${TPUT_BOLD}sudo ln -fs ${PWD}/netdata-updater.sh ${crondir}/netdata-updater${TPUT_RESET}" fi else progress "Refreshing netdata-updater at cron" run rm "${crondir}/netdata-updater" - run ln -s "${PWD}/netdata-updater.sh" "${crondir}/netdata-updater" + run ln -fs "${PWD}/netdata-updater.sh" "${crondir}/netdata-updater" fi else [ "${AUTOUPDATE}" = "1" ] && echo >&2 "Cannot figure out the cron directory to install netdata-updater." diff --git a/netdata.cppcheck b/netdata.cppcheck index 49bb443ba..245c7a005 100644 --- a/netdata.cppcheck +++ b/netdata.cppcheck @@ -1,6 +1,5 @@ <?xml version="1.0" encoding="UTF-8"?> <project version="1"> - <root name="/home/costa/src/netdata-ktsaou.git/src"/> <builddir>cppcheck-build</builddir> <includedir> <dir name=".."/> diff --git a/netdata.spec b/netdata.spec index 98ba99c75..544a23f85 100644 --- a/netdata.spec +++ b/netdata.spec @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-3.0-or-later %global contentdir %{_datadir}/netdata # This is temporary and should eventually be resolved. This bypasses @@ -80,11 +81,11 @@ Recommends: python2-psycopg2 \ Summary: Real-time performance monitoring, done right Name: netdata -Version: 1.10.0 +Version: 1.10.1 Release: 1%{?dist} License: GPLv3+ Group: Applications/System -Source0: https://github.com/firehol/%{name}/releases/download/v1.10.0/%{name}-1.10.0.tar.xz +Source0: https://github.com/netdata/%{name}/releases/download/v1.11.0_rolling/%{name}-1.11.0_rolling.tar.gz URL: http://my-netdata.io BuildRequires: pkgconfig BuildRequires: xz @@ -125,7 +126,7 @@ so that you can get insights of what is happening now and what just happened, on your systems and applications. %prep -%setup -q -n netdata-1.10.0 +%setup -q -n netdata-1.11.0_rolling %build %configure \ @@ -180,19 +181,20 @@ rm -rf "${RPM_BUILD_ROOT}" %defattr(-,root,root) %dir %{_sysconfdir}/%{name} +%dir %{_libdir}/%{name} -%config(noreplace) %{_sysconfdir}/%{name}/*.conf -%config(noreplace) %{_sysconfdir}/%{name}/charts.d/*.conf -%config(noreplace) %{_sysconfdir}/%{name}/health.d/*.conf -#%%config(noreplace) %{_sysconfdir}/%{name}/node.d/*.conf -%config(noreplace) %{_sysconfdir}/%{name}/python.d/*.conf -%config(noreplace) %{_sysconfdir}/%{name}/statsd.d/*.conf -%config(noreplace) %{_sysconfdir}/logrotate.d/%{name} +%config %{_sysconfdir}/%{name}/*.conf +#%config %{_sysconfdir}/%{name}/charts.d/*.conf +#%config %{_sysconfdir}/%{name}/health.d/*.conf +#%config %{_sysconfdir}/%{name}/node.d/*.conf +#%config %{_sysconfdir}/%{name}/python.d/*.conf +#%config %{_sysconfdir}/%{name}/statsd.d/*.conf +%config %{_sysconfdir}/logrotate.d/%{name} -# To be eventually moved to %%_defaultdocdir -%{_sysconfdir}/%{name}/node.d/*.md +%{_libdir}/%{name} %{_libexecdir}/%{name} %{_sbindir}/%{name} +%{_sysconfdir}/%{name}/edit-config %caps(cap_dac_read_search,cap_sys_ptrace=ep) %attr(0550,root,netdata) %{_libexecdir}/%{name}/plugins.d/apps.plugin @@ -213,10 +215,18 @@ rm -rf "${RPM_BUILD_ROOT}" %attr(0770,netdata,netdata) %dir %{_localstatedir}/lib/%{name} %dir %{_datadir}/%{name} + %dir %{_sysconfdir}/%{name}/health.d %dir %{_sysconfdir}/%{name}/python.d %dir %{_sysconfdir}/%{name}/charts.d %dir %{_sysconfdir}/%{name}/node.d +%dir %{_sysconfdir}/%{name}/statsd.d + +%dir %{_libdir}/%{name}/conf.d/health.d +%dir %{_libdir}/%{name}/conf.d/python.d +%dir %{_libdir}/%{name}/conf.d/charts.d +%dir %{_libdir}/%{name}/conf.d/node.d +%dir %{_libdir}/%{name}/conf.d/statsd.d %if %{with systemd} %{_unitdir}/netdata.service @@ -230,70 +240,5 @@ rm -rf "${RPM_BUILD_ROOT}" %{_datadir}/%{name}/web %changelog -* Tue Mar 27 2018 Costa Tsaousis <costa@tsaousis.gr> - 1.10.0-1 - Please check full changelog at github. - https://github.com/firehol/netdata/releases -* Sun Dec 17 2017 Costa Tsaousis <costa@tsaousis.gr> - 1.9.0-1 - Please check full changelog at github. - https://github.com/firehol/netdata/releases -* Sun Sep 17 2017 Costa Tsaousis <costa@tsaousis.gr> - 1.8.0-1 - This is mainly a bugfix release. - Please check full changelog at github. -* Sun Jul 16 2017 Costa Tsaousis <costa@tsaousis.gr> - 1.7.0-1 -- netdata is now a fully featured statsd server -- new installation options -- metrics streaming and replication improvements -- backends improvements - prometheus support rewritten -- netdata now monitors ZFS (on Linux and FreeBSD) -- netdata now monitors ElasticSearch -- netdata now monitors RabbitMQ -- netdata now monitors Go applications (via expvar) -- netdata now monitors ipfw (on FreeBSD 11) -- netdata now monitors samba -- netdata now monitors squid logs -- netdata dashboard loading times have been improved significantly -- netdata alarms now support custom hooks -- dozens more improvements and bug fixes -* Mon Mar 20 2017 Costa Tsaousis <costa@tsaousis.gr> - 1.6.0-1 -- central netdata -- monitoring ephemeral nodes -- monitoring ephemeral containers and VM guests -- apps.plugin ported for FreeBSD -- web_log plugin -- JSON backends -- IPMI monitoring -- several new and improved plugins -- several new and improved alarms and notifications -- dozens more improvements and bug fixes -* Sun Jan 22 2017 Costa Tsaousis <costa@tsaousis.gr> - 1.5.0-1 -- FreeBSD, MacOS, FreeNAS -- Backends support -- dozens of new and improved plugins -- dozens of new and improved alarms and notification methods -* Tue Oct 4 2016 Costa Tsaousis <costa@tsaousis.gr> - 1.4.0-1 -- the fastest netdata ever (with a better look too)! -- improved IoT and containers support! -- alarms improved in almost every way! -- Several more improvements, new features and bugfixes. -* Sun Aug 28 2016 Costa Tsaousis <costa@tsaousis.gr> - 1.3.0-1 -- netdata now has health monitoring -- netdata now generates badges -- netdata now has python plugins -- Several more improvements, new features and bugfixes. -* Tue Jul 26 2016 Jason Barnett <J@sonBarnett.com> - 1.2.0-2 -- Added support for EL6 -- Corrected several Requires statements -- Changed default to build without nfacct -- Removed --docdir from configure -* Mon May 16 2016 Costa Tsaousis <costa@tsaousis.gr> - 1.2.0-1 -- netdata is now 30% faster. -- netdata now has a registry (my-netdata menu on the dashboard). -- netdata now monitors Linux containers. -- Several more improvements, new features and bugfixes. -* Wed Apr 20 2016 Costa Tsaousis <costa@tsaousis.gr> - 1.1.0-1 -- Several new features (IPv6, SYNPROXY, Users, Users Groups). -- A lot of bug fixes and optimizations. -* Tue Mar 22 2016 Costa Tsaousis <costa@tsaousis.gr> - 1.0.0-1 -- First public release. * Sun Nov 15 2015 Alon Bar-Lev <alonbl@redhat.com> - 0.0.0-1 - Initial add. diff --git a/netdata.spec.in b/netdata.spec.in index 529dc2383..b54a91e9f 100644 --- a/netdata.spec.in +++ b/netdata.spec.in @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-3.0-or-later %global contentdir %{_datadir}/netdata # This is temporary and should eventually be resolved. This bypasses @@ -84,7 +85,7 @@ Version: @PACKAGE_RPM_VERSION@ Release: 1%{?dist} License: GPLv3+ Group: Applications/System -Source0: https://github.com/firehol/%{name}/releases/download/v@PACKAGE_VERSION@/%{name}-@PACKAGE_VERSION@.tar.xz +Source0: https://github.com/netdata/%{name}/releases/download/v@PACKAGE_VERSION@/%{name}-@PACKAGE_VERSION@.tar.gz URL: http://my-netdata.io BuildRequires: pkgconfig BuildRequires: xz @@ -180,19 +181,20 @@ rm -rf "${RPM_BUILD_ROOT}" %defattr(-,root,root) %dir %{_sysconfdir}/%{name} +%dir %{_libdir}/%{name} -%config(noreplace) %{_sysconfdir}/%{name}/*.conf -%config(noreplace) %{_sysconfdir}/%{name}/charts.d/*.conf -%config(noreplace) %{_sysconfdir}/%{name}/health.d/*.conf -#%%config(noreplace) %{_sysconfdir}/%{name}/node.d/*.conf -%config(noreplace) %{_sysconfdir}/%{name}/python.d/*.conf -%config(noreplace) %{_sysconfdir}/%{name}/statsd.d/*.conf -%config(noreplace) %{_sysconfdir}/logrotate.d/%{name} +%config %{_sysconfdir}/%{name}/*.conf +#%config %{_sysconfdir}/%{name}/charts.d/*.conf +#%config %{_sysconfdir}/%{name}/health.d/*.conf +#%config %{_sysconfdir}/%{name}/node.d/*.conf +#%config %{_sysconfdir}/%{name}/python.d/*.conf +#%config %{_sysconfdir}/%{name}/statsd.d/*.conf +%config %{_sysconfdir}/logrotate.d/%{name} -# To be eventually moved to %%_defaultdocdir -%{_sysconfdir}/%{name}/node.d/*.md +%{_libdir}/%{name} %{_libexecdir}/%{name} %{_sbindir}/%{name} +%{_sysconfdir}/%{name}/edit-config %caps(cap_dac_read_search,cap_sys_ptrace=ep) %attr(0550,root,netdata) %{_libexecdir}/%{name}/plugins.d/apps.plugin @@ -213,10 +215,18 @@ rm -rf "${RPM_BUILD_ROOT}" %attr(0770,netdata,netdata) %dir %{_localstatedir}/lib/%{name} %dir %{_datadir}/%{name} + %dir %{_sysconfdir}/%{name}/health.d %dir %{_sysconfdir}/%{name}/python.d %dir %{_sysconfdir}/%{name}/charts.d %dir %{_sysconfdir}/%{name}/node.d +%dir %{_sysconfdir}/%{name}/statsd.d + +%dir %{_libdir}/%{name}/conf.d/health.d +%dir %{_libdir}/%{name}/conf.d/python.d +%dir %{_libdir}/%{name}/conf.d/charts.d +%dir %{_libdir}/%{name}/conf.d/node.d +%dir %{_libdir}/%{name}/conf.d/statsd.d %if %{with systemd} %{_unitdir}/netdata.service @@ -230,70 +240,5 @@ rm -rf "${RPM_BUILD_ROOT}" %{_datadir}/%{name}/web %changelog -* Tue Mar 27 2018 Costa Tsaousis <costa@tsaousis.gr> - 1.10.0-1 - Please check full changelog at github. - https://github.com/firehol/netdata/releases -* Sun Dec 17 2017 Costa Tsaousis <costa@tsaousis.gr> - 1.9.0-1 - Please check full changelog at github. - https://github.com/firehol/netdata/releases -* Sun Sep 17 2017 Costa Tsaousis <costa@tsaousis.gr> - 1.8.0-1 - This is mainly a bugfix release. - Please check full changelog at github. -* Sun Jul 16 2017 Costa Tsaousis <costa@tsaousis.gr> - 1.7.0-1 -- netdata is now a fully featured statsd server -- new installation options -- metrics streaming and replication improvements -- backends improvements - prometheus support rewritten -- netdata now monitors ZFS (on Linux and FreeBSD) -- netdata now monitors ElasticSearch -- netdata now monitors RabbitMQ -- netdata now monitors Go applications (via expvar) -- netdata now monitors ipfw (on FreeBSD 11) -- netdata now monitors samba -- netdata now monitors squid logs -- netdata dashboard loading times have been improved significantly -- netdata alarms now support custom hooks -- dozens more improvements and bug fixes -* Mon Mar 20 2017 Costa Tsaousis <costa@tsaousis.gr> - 1.6.0-1 -- central netdata -- monitoring ephemeral nodes -- monitoring ephemeral containers and VM guests -- apps.plugin ported for FreeBSD -- web_log plugin -- JSON backends -- IPMI monitoring -- several new and improved plugins -- several new and improved alarms and notifications -- dozens more improvements and bug fixes -* Sun Jan 22 2017 Costa Tsaousis <costa@tsaousis.gr> - 1.5.0-1 -- FreeBSD, MacOS, FreeNAS -- Backends support -- dozens of new and improved plugins -- dozens of new and improved alarms and notification methods -* Tue Oct 4 2016 Costa Tsaousis <costa@tsaousis.gr> - 1.4.0-1 -- the fastest netdata ever (with a better look too)! -- improved IoT and containers support! -- alarms improved in almost every way! -- Several more improvements, new features and bugfixes. -* Sun Aug 28 2016 Costa Tsaousis <costa@tsaousis.gr> - 1.3.0-1 -- netdata now has health monitoring -- netdata now generates badges -- netdata now has python plugins -- Several more improvements, new features and bugfixes. -* Tue Jul 26 2016 Jason Barnett <J@sonBarnett.com> - 1.2.0-2 -- Added support for EL6 -- Corrected several Requires statements -- Changed default to build without nfacct -- Removed --docdir from configure -* Mon May 16 2016 Costa Tsaousis <costa@tsaousis.gr> - 1.2.0-1 -- netdata is now 30% faster. -- netdata now has a registry (my-netdata menu on the dashboard). -- netdata now monitors Linux containers. -- Several more improvements, new features and bugfixes. -* Wed Apr 20 2016 Costa Tsaousis <costa@tsaousis.gr> - 1.1.0-1 -- Several new features (IPv6, SYNPROXY, Users, Users Groups). -- A lot of bug fixes and optimizations. -* Tue Mar 22 2016 Costa Tsaousis <costa@tsaousis.gr> - 1.0.0-1 -- First public release. * Sun Nov 15 2015 Alon Bar-Lev <alonbl@redhat.com> - 0.0.0-1 - Initial add. diff --git a/node.d/Makefile.am b/node.d/Makefile.am deleted file mode 100644 index 7f67faa6a..000000000 --- a/node.d/Makefile.am +++ /dev/null @@ -1,28 +0,0 @@ -MAINTAINERCLEANFILES= $(srcdir)/Makefile.in - -dist_node_DATA = \ - README.md \ - named.node.js \ - fronius.node.js \ - sma_webbox.node.js \ - snmp.node.js \ - stiebeleltron.node.js \ - $(NULL) - -nodemodulesdir=$(nodedir)/node_modules -dist_nodemodules_DATA = \ - node_modules/netdata.js \ - node_modules/extend.js \ - node_modules/pixl-xml.js \ - node_modules/net-snmp.js \ - node_modules/asn1-ber.js \ - $(NULL) - -nodemoduleslibberdir=$(nodedir)/node_modules/lib/ber -dist_nodemoduleslibber_DATA = \ - node_modules/lib/ber/index.js \ - node_modules/lib/ber/errors.js \ - node_modules/lib/ber/reader.js \ - node_modules/lib/ber/types.js \ - node_modules/lib/ber/writer.js \ - $(NULL) diff --git a/node.d/Makefile.in b/node.d/Makefile.in deleted file mode 100644 index dd572ee8b..000000000 --- a/node.d/Makefile.in +++ /dev/null @@ -1,583 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = node.d -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_node_DATA) $(dist_nodemodules_DATA) \ - $(dist_nodemoduleslibber_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \ - $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \ - $(top_srcdir)/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \ - $(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(nodedir)" "$(DESTDIR)$(nodemodulesdir)" \ - "$(DESTDIR)$(nodemoduleslibberdir)" -DATA = $(dist_node_DATA) $(dist_nodemodules_DATA) \ - $(dist_nodemoduleslibber_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_node_DATA = \ - README.md \ - named.node.js \ - fronius.node.js \ - sma_webbox.node.js \ - snmp.node.js \ - stiebeleltron.node.js \ - $(NULL) - -nodemodulesdir = $(nodedir)/node_modules -dist_nodemodules_DATA = \ - node_modules/netdata.js \ - node_modules/extend.js \ - node_modules/pixl-xml.js \ - node_modules/net-snmp.js \ - node_modules/asn1-ber.js \ - $(NULL) - -nodemoduleslibberdir = $(nodedir)/node_modules/lib/ber -dist_nodemoduleslibber_DATA = \ - node_modules/lib/ber/index.js \ - node_modules/lib/ber/errors.js \ - node_modules/lib/ber/reader.js \ - node_modules/lib/ber/types.js \ - node_modules/lib/ber/writer.js \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu node.d/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu node.d/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -install-dist_nodeDATA: $(dist_node_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_node_DATA)'; test -n "$(nodedir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(nodedir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(nodedir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodedir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(nodedir)" || exit $$?; \ - done - -uninstall-dist_nodeDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_node_DATA)'; test -n "$(nodedir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(nodedir)'; $(am__uninstall_files_from_dir) -install-dist_nodemodulesDATA: $(dist_nodemodules_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_nodemodules_DATA)'; test -n "$(nodemodulesdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(nodemodulesdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(nodemodulesdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodemodulesdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(nodemodulesdir)" || exit $$?; \ - done - -uninstall-dist_nodemodulesDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_nodemodules_DATA)'; test -n "$(nodemodulesdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(nodemodulesdir)'; $(am__uninstall_files_from_dir) -install-dist_nodemoduleslibberDATA: $(dist_nodemoduleslibber_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_nodemoduleslibber_DATA)'; test -n "$(nodemoduleslibberdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(nodemoduleslibberdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(nodemoduleslibberdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodemoduleslibberdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(nodemoduleslibberdir)" || exit $$?; \ - done - -uninstall-dist_nodemoduleslibberDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_nodemoduleslibber_DATA)'; test -n "$(nodemoduleslibberdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(nodemoduleslibberdir)'; $(am__uninstall_files_from_dir) -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: - for dir in "$(DESTDIR)$(nodedir)" "$(DESTDIR)$(nodemodulesdir)" "$(DESTDIR)$(nodemoduleslibberdir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: install-dist_nodeDATA install-dist_nodemodulesDATA \ - install-dist_nodemoduleslibberDATA - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-dist_nodeDATA uninstall-dist_nodemodulesDATA \ - uninstall-dist_nodemoduleslibberDATA - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dist_nodeDATA \ - install-dist_nodemodulesDATA \ - install-dist_nodemoduleslibberDATA install-dvi install-dvi-am \ - install-exec install-exec-am install-html install-html-am \ - install-info install-info-am install-man install-pdf \ - install-pdf-am install-ps install-ps-am install-strip \ - installcheck installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am \ - uninstall-dist_nodeDATA uninstall-dist_nodemodulesDATA \ - uninstall-dist_nodemoduleslibberDATA - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/node.d/README.md b/node.d/README.md deleted file mode 100644 index 6818d34de..000000000 --- a/node.d/README.md +++ /dev/null @@ -1,118 +0,0 @@ -# Disclaimer - -Module configurations are written in JSON and **node.js is required**. - -to be edited. - ---- - -The following node.d modules are supported: - -# fronius - -This module collects metrics from the configured solar power installation from Fronius Symo. -See `netdata/conf.d/node.d/fronius.conf.md` for more details. - -**Requirements** - * Configuration file `fronius.conf` in the node.d netdata config dir (default: `/etc/netdata/node.d/fronius.conf`) - * Fronius Symo with network access (http) - -It produces per server: - -1. **Power** - * Current power input from the grid (positive values), output to the grid (negative values), in W - * Current power input from the solar panels, in W - * Current power stored in the accumulator (if present), in W (in theory, untested) - -2. **Consumption** - * Local consumption in W - -3. **Autonomy** - * Relative autonomy in %. 100 % autonomy means that the solar panels are delivering more power than it is needed by local consumption. - * Relative self consumption in %. The lower the better - -4. **Energy** - * The energy produced during the current day, in kWh - * The energy produced during the current year, in kWh - -5. **Inverter** - * The current power output from the connected inverters, in W, one dimension per inverter. At least one is always present. - - -### configuration - -Sample: - -```json -{ - "enable_autodetect": false, - "update_every": 5, - "servers": [ - { - "name": "Symo", - "hostname": "symo.ip.or.dns", - "update_every": 5, - "api_path": "/solar_api/v1/GetPowerFlowRealtimeData.fcgi" - } - ] -} -``` - -If no configuration is given, the module will be disabled. Each `update_every` is optional, the default is `5`. - ---- - -# stiebel eltron - -This module collects metrics from the configured heat pump and hot water installation from Stiebel Eltron ISG web. -See `netdata/conf.d/node.d/stiebeleltron.conf.md` for more details. - -**Requirements** - * Configuration file `stiebeleltron.conf` in the node.d netdata config dir (default: `/etc/netdata/node.d/stiebeleltron.conf`) - * Stiebel Eltron ISG web with network access (http), without password login - -The charts are configurable, however, the provided default configuration collects the following: - -1. **General** - * Outside temperature in C - * Condenser temperature in C - * Heating circuit pressure in bar - * Flow rate in l/min - * Output of water and heat pumps in % - -2. **Heating** - * Heat circuit 1 temperature in C (set/actual) - * Heat circuit 2 temperature in C (set/actual) - * Flow temperature in C (set/actual) - * Buffer temperature in C (set/actual) - * Pre-flow temperature in C - -3. **Hot Water** - * Hot water temperature in C (set/actual) - -4. **Room Temperature** - * Heat circuit 1 room temperature in C (set/actual) - * Heat circuit 2 room temperature in C (set/actual) - -5. **Eletric Reheating** - * Dual Mode Reheating temperature in C (hot water/heating) - -6. **Process Data** - * Remaining compressor rest time in s - -7. **Runtime** - * Compressor runtime hours (hot water/heating) - * Reheating runtime hours (reheating 1/reheating 2) - -8. **Energy** - * Compressor today in kWh (hot water/heating) - * Compressor Total in kWh (hot water/heating) - - -### configuration - -The default configuration is provided in [netdata/conf.d/node.d/stiebeleltron.conf.md](https://github.com/firehol/netdata/blob/master/conf.d/node.d/stiebeleltron.conf.md). Just change the `update_every` (if necessary) and hostnames. **You may have to adapt the configuration to suit your needs and setup** (which might be different). - -If no configuration is given, the module will be disabled. Each `update_every` is optional, the default is `10`. - ---- diff --git a/node.d/fronius.node.js b/node.d/fronius.node.js deleted file mode 100644 index fc49e5d38..000000000 --- a/node.d/fronius.node.js +++ /dev/null @@ -1,399 +0,0 @@ -"use strict"; - -// This program will connect to one or more Fronius Symo Inverters. -// to get the Solar Power Generated (current, today). - -// example configuration in netdata/conf.d/node.d/fronius.conf.md - -var url = require("url"); -var http = require("http"); -var netdata = require("netdata"); - -netdata.debug("loaded " + __filename + " plugin"); - -var fronius = { - name: "Fronius", - enable_autodetect: false, - update_every: 5, - base_priority: 60000, - charts: {}, - - powerGridId: "p_grid", - powerPvId: "p_pv", - powerAccuId: "p_akku", // not my typo! Using the ID from the AP - consumptionLoadId: "p_load", - autonomyId: "rel_autonomy", - consumptionSelfId: "rel_selfconsumption", - solarConsumptionId: "solar_consumption", - energyTodayId: "e_day", - energyYearId: "e_year", - - createBasicDimension: function (id, name, divisor) { - return { - id: id, // the unique id of the dimension - name: name, // the name of the dimension - algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm - multiplier: 1, // the multiplier - divisor: divisor, // the divisor - hidden: false // is hidden (boolean) - }; - }, - - // Gets the site power chart. Will be created if not existing. - getSitePowerChart: function (service, suffix) { - var id = this.getChartId(service, suffix); - var chart = fronius.charts[id]; - if (fronius.isDefined(chart)) return chart; - - var dim = {}; - dim[fronius.powerGridId] = this.createBasicDimension(fronius.powerGridId, "grid", 1); - dim[fronius.powerPvId] = this.createBasicDimension(fronius.powerPvId, "photovoltaics", 1); - dim[fronius.powerAccuId] = this.createBasicDimension(fronius.powerAccuId, "accumulator", 1); - - chart = { - id: id, // the unique id of the chart - name: "", // the unique name of the chart - title: service.name + " Current Site Power", // the title of the chart - units: "W", // the units of the chart dimensions - family: "power", // the family of the chart - context: "fronius.power", // the context of the chart - type: netdata.chartTypes.area, // the type of the chart - priority: fronius.base_priority + 1, // the priority relative to others in the same family - update_every: service.update_every, // the expected update frequency of the chart - dimensions: dim - }; - chart = service.chart(id, chart); - fronius.charts[id] = chart; - - return chart; - }, - - // Gets the site consumption chart. Will be created if not existing. - getSiteConsumptionChart: function (service, suffix) { - var id = this.getChartId(service, suffix); - var chart = fronius.charts[id]; - if (fronius.isDefined(chart)) return chart; - var dim = {}; - dim[fronius.consumptionLoadId] = this.createBasicDimension(fronius.consumptionLoadId, "load", 1); - - chart = { - id: id, // the unique id of the chart - name: "", // the unique name of the chart - title: service.name + " Current Load", // the title of the chart - units: "W", // the units of the chart dimensions - family: "consumption", // the family of the chart - context: "fronius.consumption", // the context of the chart - type: netdata.chartTypes.area, // the type of the chart - priority: fronius.base_priority + 2, // the priority relative to others in the same family - update_every: service.update_every, // the expected update frequency of the chart - dimensions: dim - }; - chart = service.chart(id, chart); - fronius.charts[id] = chart; - - return chart; - }, - - // Gets the site consumption chart. Will be created if not existing. - getSiteAutonomyChart: function (service, suffix) { - var id = this.getChartId(service, suffix); - var chart = fronius.charts[id]; - if (fronius.isDefined(chart)) return chart; - var dim = {}; - dim[fronius.autonomyId] = this.createBasicDimension(fronius.autonomyId, "autonomy", 1); - dim[fronius.consumptionSelfId] = this.createBasicDimension(fronius.consumptionSelfId, "self_consumption", 1); - dim[fronius.solarConsumptionId] = this.createBasicDimension(fronius.solarConsumptionId, "solar_consumption", 1); - - chart = { - id: id, // the unique id of the chart - name: "", // the unique name of the chart - title: service.name + " Current Autonomy", // the title of the chart - units: "%", // the units of the chart dimensions - family: "autonomy", // the family of the chart - context: "fronius.autonomy", // the context of the chart - type: netdata.chartTypes.area, // the type of the chart - priority: fronius.base_priority + 3, // the priority relative to others in the same family - update_every: service.update_every, // the expected update frequency of the chart - dimensions: dim - }; - chart = service.chart(id, chart); - fronius.charts[id] = chart; - - return chart; - }, - - // Gets the site energy chart for today. Will be created if not existing. - getSiteEnergyTodayChart: function (service, suffix) { - var chartId = this.getChartId(service, suffix); - var chart = fronius.charts[chartId]; - if (fronius.isDefined(chart)) return chart; - var dim = {}; - dim[fronius.energyTodayId] = this.createBasicDimension(fronius.energyTodayId, "today", 1000); - chart = { - id: chartId, // the unique id of the chart - name: "", // the unique name of the chart - title: service.name + " Energy production for today",// the title of the chart - units: "kWh", // the units of the chart dimensions - family: "energy", // the family of the chart - context: "fronius.energy.today", // the context of the chart - type: netdata.chartTypes.area, // the type of the chart - priority: fronius.base_priority + 4, // the priority relative to others in the same family - update_every: service.update_every, // the expected update frequency of the chart - dimensions: dim - }; - chart = service.chart(chartId, chart); - fronius.charts[chartId] = chart; - - return chart; - }, - - // Gets the site energy chart for today. Will be created if not existing. - getSiteEnergyYearChart: function (service, suffix) { - var chartId = this.getChartId(service, suffix); - var chart = fronius.charts[chartId]; - if (fronius.isDefined(chart)) return chart; - var dim = {}; - dim[fronius.energyYearId] = this.createBasicDimension(fronius.energyYearId, "year", 1000); - chart = { - id: chartId, // the unique id of the chart - name: "", // the unique name of the chart - title: service.name + " Energy production for this year",// the title of the chart - units: "kWh", // the units of the chart dimensions - family: "energy", // the family of the chart - context: "fronius.energy.year", // the context of the chart - type: netdata.chartTypes.area, // the type of the chart - priority: fronius.base_priority + 5, // the priority relative to others in the same family - update_every: service.update_every, // the expected update frequency of the chart - dimensions: dim - }; - chart = service.chart(chartId, chart); - fronius.charts[chartId] = chart; - - return chart; - }, - - // Gets the inverter power chart. Will be created if not existing. - // Needs the array of inverters in order to create a chart with all inverters as dimensions - getInverterPowerChart: function (service, suffix, inverters) { - var chartId = this.getChartId(service, suffix); - var chart = fronius.charts[chartId]; - if (fronius.isDefined(chart)) return chart; - - var dim = {}; - for (var key in inverters) { - if (inverters.hasOwnProperty(key)) { - var name = key; - if (!isNaN(key)) name = "inverter_" + key; - dim[key] = this.createBasicDimension("inverter_" + key, name, 1); - } - } - - chart = { - id: chartId, // the unique id of the chart - name: "", // the unique name of the chart - title: service.name + " Current Inverter Output",// the title of the chart - units: "W", // the units of the chart dimensions - family: "inverters", // the family of the chart - context: "fronius.inverter.output", // the context of the chart - type: netdata.chartTypes.stacked, // the type of the chart - priority: fronius.base_priority + 6, // the priority relative to others in the same family - update_every: service.update_every, // the expected update frequency of the chart - dimensions: dim - }; - chart = service.chart(chartId, chart); - fronius.charts[chartId] = chart; - - return chart; - }, - - processResponse: function (service, content) { - var json = fronius.convertToJson(content); - if (json === null) return; - - // add the service - service.commit(); - - var chartDefinitions = fronius.parseCharts(service, json); - var chartCount = chartDefinitions.length; - while (chartCount--) { - var chartObj = chartDefinitions[chartCount]; - service.begin(chartObj.chart); - var dimCount = chartObj.dimensions.length; - while (dimCount--) { - var dim = chartObj.dimensions[dimCount]; - service.set(dim.name, dim.value); - } - service.end(); - } - }, - - parseCharts: function (service, json) { - var site = json.Body.Data.Site; - return [ - this.parsePowerChart(service, site), - this.parseConsumptionChart(service, site), - this.parseAutonomyChart(service, site), - this.parseEnergyTodayChart(service, site), - this.parseEnergyYearChart(service, site), - this.parseInverterChart(service, json.Body.Data.Inverters) - ]; - }, - - parsePowerChart: function (service, site) { - return this.getChart(this.getSitePowerChart(service, "power"), - [ - this.getDimension(this.powerGridId, Math.round(site.P_Grid)), - this.getDimension(this.powerPvId, Math.round(Math.max(site.P_PV, 0))), - this.getDimension(this.powerAccuId, Math.round(site.P_Akku)) - ] - ); - }, - - parseConsumptionChart: function (service, site) { - return this.getChart(this.getSiteConsumptionChart(service, "consumption"), - [this.getDimension(this.consumptionLoadId, Math.round(Math.abs(site.P_Load)))] - ); - }, - - parseAutonomyChart: function (service, site) { - var selfConsumption = site.rel_SelfConsumption; - var solarConsumption = 0; - var load = Math.abs(site.P_Load); - var power = Math.max(site.P_PV, 0); - if (power <= 0) solarConsumption = 0; - else if (load >= power) solarConsumption = 100; - else solarConsumption = 100 / power * load; - return this.getChart(this.getSiteAutonomyChart(service, "autonomy"), - [ - this.getDimension(this.autonomyId, Math.round(site.rel_Autonomy)), - this.getDimension(this.consumptionSelfId, Math.round(selfConsumption === null ? 100 : selfConsumption)), - this.getDimension(this.solarConsumptionId, Math.round(solarConsumption)) - ] - ); - }, - - parseEnergyTodayChart: function (service, site) { - return this.getChart(this.getSiteEnergyTodayChart(service, "energy.today"), - [this.getDimension(this.energyTodayId, Math.round(Math.max(site.E_Day, 0)))] - ); - }, - - parseEnergyYearChart: function (service, site) { - return this.getChart(this.getSiteEnergyYearChart(service, "energy.year"), - [this.getDimension(this.energyYearId, Math.round(Math.max(site.E_Year, 0)))] - ); - }, - - parseInverterChart: function (service, inverters) { - var dimensions = []; - for (var key in inverters) { - if (inverters.hasOwnProperty(key)) { - dimensions.push(this.getDimension(key, Math.round(inverters[key].P))); - } - } - return this.getChart(this.getInverterPowerChart(service, "inverters.output", inverters), dimensions); - }, - - getDimension: function (name, value) { - return { - name: name, - value: value - }; - }, - - getChart: function (chart, dimensions) { - return { - chart: chart, - dimensions: dimensions - }; - }, - - getChartId: function (service, suffix) { - return "fronius_" + service.name + "." + suffix; - }, - - convertToJson: function (httpBody) { - if (httpBody === null) return null; - var json = httpBody; - // can't parse if it's already a json object, - // the check enables easier testing if the httpBody is already valid JSON. - if (typeof httpBody !== "object") { - try { - json = JSON.parse(httpBody); - } catch (error) { - netdata.error("fronius: Got a response, but it is not valid JSON. Ignoring. Error: " + error.message); - return null; - } - } - return this.isResponseValid(json) ? json : null; - }, - - // some basic validation - isResponseValid: function (json) { - if (this.isUndefined(json.Body)) return false; - if (this.isUndefined(json.Body.Data)) return false; - if (this.isUndefined(json.Body.Data.Site)) return false; - return this.isDefined(json.Body.Data.Inverters); - }, - - // module.serviceExecute() - // this function is called only from this module - // its purpose is to prepare the request and call - // netdata.serviceExecute() - serviceExecute: function (name, uri, update_every) { - netdata.debug(this.name + ": " + name + ": url: " + uri + ", update_every: " + update_every); - - var service = netdata.service({ - name: name, - request: netdata.requestFromURL("http://" + uri), - update_every: update_every, - module: this - }); - service.execute(this.processResponse); - }, - - - configure: function (config) { - if (fronius.isUndefined(config.servers)) return 0; - var added = 0; - var len = config.servers.length; - while (len--) { - var server = config.servers[len]; - if (fronius.isUndefined(server.update_every)) server.update_every = this.update_every; - if (fronius.areUndefined([server.name, server.hostname, server.api_path])) continue; - - var url = server.hostname + server.api_path; - this.serviceExecute(server.name, url, server.update_every); - added++; - } - return added; - }, - - // module.update() - // this is called repeatedly to collect data, by calling - // netdata.serviceExecute() - update: function (service, callback) { - service.execute(function (serv, data) { - service.module.processResponse(serv, data); - callback(); - }); - }, - - isUndefined: function (value) { - return typeof value === "undefined"; - }, - - areUndefined: function (valueArray) { - var i = 0; - for (i; i < valueArray.length; i++) { - if (this.isUndefined(valueArray[i])) return true; - } - return false; - }, - - isDefined: function (value) { - return typeof value !== "undefined"; - } -}; - -module.exports = fronius; diff --git a/node.d/named.node.js b/node.d/named.node.js deleted file mode 100644 index 02c890c60..000000000 --- a/node.d/named.node.js +++ /dev/null @@ -1,609 +0,0 @@ -'use strict'; - -// collect statistics from bind (named) v9.10+ -// -// bind statistics documentation at: -// http://jpmens.net/2013/03/18/json-in-bind-9-s-statistics-server/ -// https://ftp.isc.org/isc/bind/9.10.3/doc/arm/Bv9ARM.ch06.html#statistics - -// example configuration in /etc/netdata/node.d/named.conf -// the module supports auto-detection if bind is running at localhost - -/* -{ - "enable_autodetect": true, - "update_every": 5, - "servers": [ - { - "name": "bind1", - "url": "http://127.0.0.1:8888/json/v1/server", - "update_every": 1 - }, - { - "name": "bind2", - "url": "http://10.0.0.1:8888/xml/v3/server", - "update_every": 2 - } - ] -} -*/ - -// the following is the bind named.conf configuration required - -/* -statistics-channels { - inet 127.0.0.1 port 8888 allow { 127.0.0.1; }; -}; -*/ - -var url = require('url'); -var http = require('http'); -var XML = require('pixl-xml'); -var netdata = require('netdata'); - -if(netdata.options.DEBUG === true) netdata.debug('loaded', __filename, 'plugin'); - -var named = { - name: __filename, - enable_autodetect: true, - update_every: 1, - base_priority: 60000, - charts: {}, - - chartFromMembersCreate: function(service, obj, id, title_suffix, units, family, context, type, priority, algorithm, multiplier, divisor) { - var chart = { - id: id, // the unique id of the chart - name: '', // the unique name of the chart - title: service.name + ' ' + title_suffix, // the title of the chart - units: units, // the units of the chart dimensions - family: family, // the family of the chart - context: context, // the context of the chart - type: type, // the type of the chart - priority: priority, // the priority relative to others in the same family - update_every: service.update_every, // the expected update frequency of the chart - dimensions: {} - }; - - var found = 0; - var dims = Object.keys(obj); - var len = dims.length; - for(var i = 0; i < len ;i++) { - var x = dims[i]; - - if(typeof(obj[x]) !== 'undefined' && obj[x] !== 0) { - found++; - chart.dimensions[x] = { - id: x, // the unique id of the dimension - name: x, // the name of the dimension - algorithm: algorithm, // the id of the netdata algorithm - multiplier: multiplier, // the multiplier - divisor: divisor, // the divisor - hidden: false // is hidden (boolean) - } - } - } - - if(found === false) - return null; - - chart = service.chart(id, chart); - this.charts[id] = chart; - return chart; - }, - - chartFromMembers: function(service, obj, id_suffix, title_suffix, units, family, context, type, priority, algorithm, multiplier, divisor) { - var id = 'named_' + service.name + '.' + id_suffix; - var chart = this.charts[id]; - var dims, len, x, i; - - if(typeof chart === 'undefined') { - chart = this.chartFromMembersCreate(service, obj, id, title_suffix, units, family, context, type, priority, algorithm, multiplier, divisor); - if(chart === null) return false; - } - else { - // check if we need to re-generate the chart - dims = Object.keys(obj); - len = dims.length; - for(i = 0; i < len ;i++) { - x = dims[i]; - if(typeof(chart.dimensions[x]) === 'undefined') { - chart = this.chartFromMembersCreate(service, obj, id, title_suffix, units, family, context, type, priority, algorithm, multiplier, divisor); - if(chart === null) return false; - break; - } - } - } - - service.begin(chart); - - var found = 0; - dims = Object.keys(obj); - len = dims.length; - for(i = 0; i < len ;i++) { - x = dims[i]; - if(typeof(chart.dimensions[x]) !== 'undefined') { - found++; - service.set(x, obj[x]); - } - } - - service.end(); - - return (found > 0); - }, - - // an index to map values to different charts - lookups: { - nsstats: {}, - resolver_stats: {}, - numfetch: {} - }, - - // transform the XML response of bind - // to the JSON response of bind - xml2js: function(service, data_xml) { - var d = XML.parse(data_xml); - if(d === null) return null; - - var a, aa, alen, alen2; - - var data = {}; - var len = d.server.counters.length; - while(len--) { - a = d.server.counters[len]; - if(typeof a.counter === 'undefined') continue; - if(a.type === 'opcode') a.type = 'opcodes'; - else if(a.type === 'qtype') a.type = 'qtypes'; - else if(a.type === 'nsstat') a.type = 'nsstats'; - aa = data[a.type] = {}; - alen = 0; - alen2 = a.counter.length; - while(alen < alen2) { - aa[a.counter[alen].name] = parseInt(a.counter[alen]._Data, 10); - alen++; - } - } - - data.views = {}; - var vlen = d.views.view.length; - while(vlen--) { - var vname = d.views.view[vlen].name; - data.views[vname] = { resolver: {} }; - len = d.views.view[vlen].counters.length; - while(len--) { - a = d.views.view[vlen].counters[len]; - if(typeof a.counter === 'undefined') continue; - if(a.type === 'resstats') a.type = 'stats'; - else if(a.type === 'resqtype') a.type = 'qtypes'; - else if(a.type === 'adbstat') a.type = 'adb'; - aa = data.views[vname].resolver[a.type] = {}; - alen = 0; - alen2 = a.counter.length; - while(alen < alen2) { - aa[a.counter[alen].name] = parseInt(a.counter[alen]._Data, 10); - alen++; - } - } - } - - return data; - }, - - processResponse: function(service, data) { - if(data !== null) { - var r, x, look, id, chart, keys, len; - - // parse XML or JSON - // pepending on the URL given - if(service.request.path.match(/^\/xml/) !== null) - r = named.xml2js(service, data); - else - r = JSON.parse(data); - - if(typeof r === 'undefined' || r === null) { - service.error("Cannot parse these data: " + data.toString()); - return; - } - - if(service.added !== true) - service.commit(); - - if(typeof r.nsstats !== 'undefined') { - // we split the nsstats object to several others - var global_requests = {}, global_requests_enable = false; - var global_failures = {}, global_failures_enable = false; - var global_failures_detail = {}, global_failures_detail_enable = false; - var global_updates = {}, global_updates_enable = false; - var protocol_queries = {}, protocol_queries_enable = false; - var global_queries = {}, global_queries_enable = false; - var global_queries_success = {}, global_queries_success_enable = false; - var default_enable = false; - var RecursClients = 0; - - // RecursClients is an absolute value - if(typeof r.nsstats['RecursClients'] !== 'undefined') { - RecursClients = r.nsstats['RecursClients']; - delete r.nsstats['RecursClients']; - } - - keys = Object.keys(r.nsstats); - len = keys.length; - while(len--) { - x = keys[len]; - - // we maintain an index of the values found - // mapping them to objects splitted - - look = named.lookups.nsstats[x]; - if(typeof look === 'undefined') { - // a new value, not found in the index - // index it: - if(x === 'Requestv4') { - named.lookups.nsstats[x] = { - name: 'IPv4', - type: 'global_requests' - }; - } - else if(x === 'Requestv6') { - named.lookups.nsstats[x] = { - name: 'IPv6', - type: 'global_requests' - }; - } - else if(x === 'QryFailure') { - named.lookups.nsstats[x] = { - name: 'failures', - type: 'global_failures' - }; - } - else if(x === 'QryUDP') { - named.lookups.nsstats[x] = { - name: 'UDP', - type: 'protocol_queries' - }; - } - else if(x === 'QryTCP') { - named.lookups.nsstats[x] = { - name: 'TCP', - type: 'protocol_queries' - }; - } - else if(x === 'QrySuccess') { - named.lookups.nsstats[x] = { - name: 'queries', - type: 'global_queries_success' - }; - } - else if(x.match(/QryRej$/) !== null) { - named.lookups.nsstats[x] = { - name: x, - type: 'global_failures_detail' - }; - } - else if(x.match(/^Qry/) !== null) { - named.lookups.nsstats[x] = { - name: x, - type: 'global_queries' - }; - } - else if(x.match(/^Update/) !== null) { - named.lookups.nsstats[x] = { - name: x, - type: 'global_updates' - }; - } - else { - // values not mapped, will remain - // in the default map - named.lookups.nsstats[x] = { - name: x, - type: 'default' - }; - } - - look = named.lookups.nsstats[x]; - // netdata.error('lookup nsstats value: ' + x + ' >>> ' + named.lookups.nsstats[x].type); - } - - switch(look.type) { - case 'global_requests': global_requests[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_requests_enable = true; break; - case 'global_queries': global_queries[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_queries_enable = true; break; - case 'global_queries_success': global_queries_success[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_queries_success_enable = true; break; - case 'global_updates': global_updates[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_updates_enable = true; break; - case 'protocol_queries': protocol_queries[look.name] = r.nsstats[x]; delete r.nsstats[x]; protocol_queries_enable = true; break; - case 'global_failures': global_failures[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_failures_enable = true; break; - case 'global_failures_detail': global_failures_detail[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_failures_detail_enable = true; break; - default: default_enable = true; break; - } - } - - if(global_requests_enable === true) - service.module.chartFromMembers(service, global_requests, 'received_requests', 'Bind, Global Received Requests by IP version', 'requests/s', 'requests', 'named.requests', netdata.chartTypes.stacked, named.base_priority + 1, netdata.chartAlgorithms.incremental, 1, 1); - - if(global_queries_success_enable === true) - service.module.chartFromMembers(service, global_queries_success, 'global_queries_success', 'Bind, Global Successful Queries', 'queries/s', 'queries', 'named.queries_succcess', netdata.chartTypes.line, named.base_priority + 2, netdata.chartAlgorithms.incremental, 1, 1); - - if(protocol_queries_enable === true) - service.module.chartFromMembers(service, protocol_queries, 'protocols_queries', 'Bind, Global Queries by IP Protocol', 'queries/s', 'queries', 'named.protocol_queries', netdata.chartTypes.stacked, named.base_priority + 3, netdata.chartAlgorithms.incremental, 1, 1); - - if(global_queries_enable === true) - service.module.chartFromMembers(service, global_queries, 'global_queries', 'Bind, Global Queries Analysis', 'queries/s', 'queries', 'named.global_queries', netdata.chartTypes.stacked, named.base_priority + 4, netdata.chartAlgorithms.incremental, 1, 1); - - if(global_updates_enable === true) - service.module.chartFromMembers(service, global_updates, 'received_updates', 'Bind, Global Received Updates', 'updates/s', 'updates', 'named.global_updates', netdata.chartTypes.stacked, named.base_priority + 5, netdata.chartAlgorithms.incremental, 1, 1); - - if(global_failures_enable === true) - service.module.chartFromMembers(service, global_failures, 'query_failures', 'Bind, Global Query Failures', 'failures/s', 'failures', 'named.global_failures', netdata.chartTypes.line, named.base_priority + 6, netdata.chartAlgorithms.incremental, 1, 1); - - if(global_failures_detail_enable === true) - service.module.chartFromMembers(service, global_failures_detail, 'query_failures_detail', 'Bind, Global Query Failures Analysis', 'failures/s', 'failures', 'named.global_failures_detail', netdata.chartTypes.stacked, named.base_priority + 7, netdata.chartAlgorithms.incremental, 1, 1); - - if(default_enable === true) - service.module.chartFromMembers(service, r.nsstats, 'nsstats', 'Bind, Other Global Server Statistics', 'operations/s', 'other', 'named.nsstats', netdata.chartTypes.line, named.base_priority + 8, netdata.chartAlgorithms.incremental, 1, 1); - - // RecursClients chart - id = 'named_' + service.name + '.recursive_clients'; - chart = named.charts[id]; - - if(typeof chart === 'undefined') { - chart = { - id: id, // the unique id of the chart - name: '', // the unique name of the chart - title: service.name + ' Bind, Current Recursive Clients', // the title of the chart - units: 'clients', // the units of the chart dimensions - family: 'clients', // the family of the chart - context: 'named.recursive_clients', // the context of the chart - type: netdata.chartTypes.line, // the type of the chart - priority: named.base_priority + 1, // the priority relative to others in the same family - update_every: service.update_every, // the expected update frequency of the chart - dimensions: { - 'clients': { - id: 'clients', // the unique id of the dimension - name: '', // the name of the dimension - algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm - multiplier: 1, // the multiplier - divisor: 1, // the divisor - hidden: false // is hidden (boolean) - } - } - }; - - chart = service.chart(id, chart); - named.charts[id] = chart; - } - - service.begin(chart); - service.set('clients', RecursClients); - service.end(); - } - - if(typeof r.opcodes !== 'undefined') - service.module.chartFromMembers(service, r.opcodes, 'in_opcodes', 'Bind, Global Incoming Requests by OpCode', 'requests/s', 'requests', 'named.in_opcodes', netdata.chartTypes.stacked, named.base_priority + 9, netdata.chartAlgorithms.incremental, 1, 1); - - if(typeof r.qtypes !== 'undefined') - service.module.chartFromMembers(service, r.qtypes, 'in_qtypes', 'Bind, Global Incoming Requests by Query Type', 'requests/s', 'requests', 'named.in_qtypes', netdata.chartTypes.stacked, named.base_priority + 10, netdata.chartAlgorithms.incremental, 1, 1); - - if(typeof r.sockstats !== 'undefined') - service.module.chartFromMembers(service, r.sockstats, 'in_sockstats', 'Bind, Global Socket Statistics', 'operations/s', 'sockets', 'named.in_sockstats', netdata.chartTypes.line, named.base_priority + 11, netdata.chartAlgorithms.incremental, 1, 1); - - if(typeof r.views !== 'undefined') { - keys = Object.keys(r.views); - len = keys.length; - while(len--) { - x = keys[len]; - var resolver = r.views[x].resolver; - - if(typeof resolver !== 'undefined') { - if(typeof resolver.stats !== 'undefined') { - var NumFetch = 0; - var key = service.name + '.' + x; - var rtt = {}, rtt_enable = false; - default_enable = false; - - // NumFetch is an absolute value - if(typeof resolver.stats['NumFetch'] !== 'undefined') { - named.lookups.numfetch[key] = true; - NumFetch = resolver.stats['NumFetch']; - delete resolver.stats['NumFetch']; - } - if(typeof resolver.stats['BucketSize'] !== 'undefined') { - delete resolver.stats['BucketSize']; - } - - // split the QryRTT* from the main chart - var ykeys = Object.keys(resolver.stats); - var ylen = ykeys.length; - while(ylen--) { - var y = ykeys[ylen]; - - // we maintain an index of the values found - // mapping them to objects splitted - - look = named.lookups.resolver_stats[y]; - if(typeof look === 'undefined') { - if(y.match(/^QryRTT/) !== null) { - named.lookups.resolver_stats[y] = { - name: y, - type: 'rtt' - }; - } - else { - named.lookups.resolver_stats[y] = { - name: y, - type: 'default' - }; - } - - look = named.lookups.resolver_stats[y]; - // netdata.error('lookup resolver stats value: ' + y + ' >>> ' + look.type); - } - - switch(look.type) { - case 'rtt': rtt[look.name] = resolver.stats[y]; delete resolver.stats[y]; rtt_enable = true; break; - default: default_enable = true; break; - } - } - - if(rtt_enable) - service.module.chartFromMembers(service, rtt, 'view_resolver_rtt_' + x, 'Bind, ' + x + ' View, Resolver Round Trip Timings', 'queries/s', 'view_' + x, 'named.resolver_rtt', netdata.chartTypes.stacked, named.base_priority + 12, netdata.chartAlgorithms.incremental, 1, 1); - - if(default_enable) - service.module.chartFromMembers(service, resolver.stats, 'view_resolver_stats_' + x, 'Bind, ' + x + ' View, Resolver Statistics', 'operations/s', 'view_' + x, 'named.resolver_stats', netdata.chartTypes.line, named.base_priority + 13, netdata.chartAlgorithms.incremental, 1, 1); - - // NumFetch chart - if(typeof named.lookups.numfetch[key] !== 'undefined') { - id = 'named_' + service.name + '.view_resolver_numfetch_' + x; - chart = named.charts[id]; - - if(typeof chart === 'undefined') { - chart = { - id: id, // the unique id of the chart - name: '', // the unique name of the chart - title: service.name + ' Bind, ' + x + ' View, Resolver Active Queries', // the title of the chart - units: 'queries', // the units of the chart dimensions - family: 'view_' + x, // the family of the chart - context: 'named.resolver_active_queries', // the context of the chart - type: netdata.chartTypes.line, // the type of the chart - priority: named.base_priority + 1001, // the priority relative to others in the same family - update_every: service.update_every, // the expected update frequency of the chart - dimensions: { - 'queries': { - id: 'queries', // the unique id of the dimension - name: '', // the name of the dimension - algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm - multiplier: 1, // the multiplier - divisor: 1, // the divisor - hidden: false // is hidden (boolean) - } - } - }; - - chart = service.chart(id, chart); - named.charts[id] = chart; - } - - service.begin(chart); - service.set('queries', NumFetch); - service.end(); - } - } - } - - if(typeof resolver.qtypes !== 'undefined') - service.module.chartFromMembers(service, resolver.qtypes, 'view_resolver_qtypes_' + x, 'Bind, ' + x + ' View, Requests by Query Type', 'requests/s', 'view_' + x, 'named.resolver_qtypes', netdata.chartTypes.stacked, named.base_priority + 14, netdata.chartAlgorithms.incremental, 1, 1); - - //if(typeof resolver.cache !== 'undefined') - // service.module.chartFromMembers(service, resolver.cache, 'view_resolver_cache_' + x, 'Bind, ' + x + ' View, Cache Entries', 'entries', 'view_' + x, 'named.resolver_cache', netdata.chartTypes.stacked, named.base_priority + 15, netdata.chartAlgorithms.absolute, 1, 1); - - if(typeof resolver.cachestats['CacheHits'] !== 'undefined' && resolver.cachestats['CacheHits'] > 0) { - id = 'named_' + service.name + '.view_resolver_cachehits_' + x; - chart = named.charts[id]; - - if(typeof chart === 'undefined') { - chart = { - id: id, // the unique id of the chart - name: '', // the unique name of the chart - title: service.name + ' Bind, ' + x + ' View, Resolver Cache Hits', // the title of the chart - units: 'operations/s', // the units of the chart dimensions - family: 'view_' + x, // the family of the chart - context: 'named.resolver_cache_hits', // the context of the chart - type: netdata.chartTypes.area, // the type of the chart - priority: named.base_priority + 1100, // the priority relative to others in the same family - update_every: service.update_every, // the expected update frequency of the chart - dimensions: { - 'CacheHits': { - id: 'CacheHits', // the unique id of the dimension - name: 'hits', // the name of the dimension - algorithm: netdata.chartAlgorithms.incremental,// the id of the netdata algorithm - multiplier: 1, // the multiplier - divisor: 1, // the divisor - hidden: false // is hidden (boolean) - }, - 'CacheMisses': { - id: 'CacheMisses', // the unique id of the dimension - name: 'misses', // the name of the dimension - algorithm: netdata.chartAlgorithms.incremental,// the id of the netdata algorithm - multiplier: -1, // the multiplier - divisor: 1, // the divisor - hidden: false // is hidden (boolean) - } - } - }; - - chart = service.chart(id, chart); - named.charts[id] = chart; - } - - service.begin(chart); - service.set('CacheHits', resolver.cachestats['CacheHits']); - service.set('CacheMisses', resolver.cachestats['CacheMisses']); - service.end(); - } - - // this is wrong, it contains many types of info: - // 1. CacheHits, CacheMisses - incremental (added above) - // 2. QueryHits, QueryMisses - incremental - // 3. DeleteLRU, DeleteTTL - incremental - // 4. CacheNodes, CacheBuckets - absolute - // 5. TreeMemTotal, TreeMemInUse - absolute - // 6. HeapMemMax, HeapMemTotal, HeapMemInUse - absolute - //if(typeof resolver.cachestats !== 'undefined') - // service.module.chartFromMembers(service, resolver.cachestats, 'view_resolver_cachestats_' + x, 'Bind, ' + x + ' View, Cache Statistics', 'requests/s', 'view_' + x, 'named.resolver_cache_stats', netdata.chartTypes.line, named.base_priority + 1001, netdata.chartAlgorithms.incremental, 1, 1); - - //if(typeof resolver.adb !== 'undefined') - // service.module.chartFromMembers(service, resolver.adb, 'view_resolver_adb_' + x, 'Bind, ' + x + ' View, ADB Statistics', 'entries', 'view_' + x, 'named.resolver_adb', netdata.chartTypes.line, named.base_priority + 1002, netdata.chartAlgorithms.absolute, 1, 1); - } - } - } - }, - - // module.serviceExecute() - // this function is called only from this module - // its purpose is to prepare the request and call - // netdata.serviceExecute() - serviceExecute: function(name, a_url, update_every) { - if(netdata.options.DEBUG === true) netdata.debug(this.name + ': ' + name + ': url: ' + a_url + ', update_every: ' + update_every); - var service = netdata.service({ - name: name, - request: netdata.requestFromURL(a_url), - update_every: update_every, - module: this - }); - - service.execute(this.processResponse); - }, - - configure: function(config) { - var added = 0; - - if(this.enable_autodetect === true) { - this.serviceExecute('local', 'http://localhost:8888/json/v1/server', this.update_every); - added++; - } - - if(typeof(config.servers) !== 'undefined') { - var len = config.servers.length; - while(len--) { - if(typeof config.servers[len].update_every === 'undefined') - config.servers[len].update_every = this.update_every; - - this.serviceExecute(config.servers[len].name, config.servers[len].url, config.servers[len].update_every); - added++; - } - } - - return added; - }, - - // module.update() - // this is called repeatidly to collect data, by calling - // netdata.serviceExecute() - update: function(service, callback) { - service.execute(function(serv, data) { - service.module.processResponse(serv, data); - callback(); - }); - } -}; - -module.exports = named; diff --git a/node.d/node_modules/asn1-ber.js b/node.d/node_modules/asn1-ber.js deleted file mode 100644 index 57809f486..000000000 --- a/node.d/node_modules/asn1-ber.js +++ /dev/null @@ -1,6 +0,0 @@ - -var Ber = require('./lib/ber/index') - -exports.Ber = Ber -exports.BerReader = Ber.Reader -exports.BerWriter = Ber.Writer diff --git a/node.d/node_modules/extend.js b/node.d/node_modules/extend.js deleted file mode 100644 index 0fdd8be22..000000000 --- a/node.d/node_modules/extend.js +++ /dev/null @@ -1,87 +0,0 @@ -// https://github.com/justmoon/node-extend - -'use strict'; - -var hasOwn = Object.prototype.hasOwnProperty; -var toStr = Object.prototype.toString; - -var isArray = function isArray(arr) { - if (typeof Array.isArray === 'function') { - return Array.isArray(arr); - } - - return toStr.call(arr) === '[object Array]'; -}; - -var isPlainObject = function isPlainObject(obj) { - if (!obj || toStr.call(obj) !== '[object Object]') { - return false; - } - - var hasOwnConstructor = hasOwn.call(obj, 'constructor'); - var hasIsPrototypeOf = obj.constructor && obj.constructor.prototype && hasOwn.call(obj.constructor.prototype, 'isPrototypeOf'); - // Not own constructor property must be Object - if (obj.constructor && !hasOwnConstructor && !hasIsPrototypeOf) { - return false; - } - - // Own properties are enumerated firstly, so to speed up, - // if last one is own, then all properties are own. - var key; - for (key in obj) { /**/ } - - return typeof key === 'undefined' || hasOwn.call(obj, key); -}; - -module.exports = function extend() { - var options, name, src, copy, copyIsArray, clone; - var target = arguments[0]; - var i = 1; - var length = arguments.length; - var deep = false; - - // Handle a deep copy situation - if (typeof target === 'boolean') { - deep = target; - target = arguments[1] || {}; - // skip the boolean and the target - i = 2; - } else if ((typeof target !== 'object' && typeof target !== 'function') || target == null) { - target = {}; - } - - for (; i < length; ++i) { - options = arguments[i]; - // Only deal with non-null/undefined values - if (options != null) { - // Extend the base object - for (name in options) { - src = target[name]; - copy = options[name]; - - // Prevent never-ending loop - if (target !== copy) { - // Recurse if we're merging plain objects or arrays - if (deep && copy && (isPlainObject(copy) || (copyIsArray = isArray(copy)))) { - if (copyIsArray) { - copyIsArray = false; - clone = src && isArray(src) ? src : []; - } else { - clone = src && isPlainObject(src) ? src : {}; - } - - // Never move original objects, clone them - target[name] = extend(deep, clone, copy); - - // Don't bring in undefined values - } else if (typeof copy !== 'undefined') { - target[name] = copy; - } - } - } - } - } - - // Return the modified object - return target; -}; diff --git a/node.d/node_modules/lib/ber/errors.js b/node.d/node_modules/lib/ber/errors.js deleted file mode 100644 index 0106747e6..000000000 --- a/node.d/node_modules/lib/ber/errors.js +++ /dev/null @@ -1,9 +0,0 @@ - -module.exports = { - InvalidAsn1Error: function(msg) { - var e = new Error() - e.name = 'InvalidAsn1Error' - e.message = msg || '' - return e - } -} diff --git a/node.d/node_modules/lib/ber/index.js b/node.d/node_modules/lib/ber/index.js deleted file mode 100644 index 65985c1e1..000000000 --- a/node.d/node_modules/lib/ber/index.js +++ /dev/null @@ -1,17 +0,0 @@ - -var errors = require('./errors') -var types = require('./types') - -var Reader = require('./reader') -var Writer = require('./writer') - -for (var t in types) - if (types.hasOwnProperty(t)) - exports[t] = types[t] - -for (var e in errors) - if (errors.hasOwnProperty(e)) - exports[e] = errors[e] - -exports.Reader = Reader -exports.Writer = Writer diff --git a/node.d/node_modules/lib/ber/reader.js b/node.d/node_modules/lib/ber/reader.js deleted file mode 100644 index f93d829aa..000000000 --- a/node.d/node_modules/lib/ber/reader.js +++ /dev/null @@ -1,269 +0,0 @@ - -var assert = require('assert'); - -var ASN1 = require('./types'); -var errors = require('./errors'); - - -///--- Globals - -var InvalidAsn1Error = errors.InvalidAsn1Error; - - - -///--- API - -function Reader(data) { - if (!data || !Buffer.isBuffer(data)) - throw new TypeError('data must be a node Buffer'); - - this._buf = data; - this._size = data.length; - - // These hold the "current" state - this._len = 0; - this._offset = 0; -} - -Object.defineProperty(Reader.prototype, 'length', { - enumerable: true, - get: function () { return (this._len); } -}); - -Object.defineProperty(Reader.prototype, 'offset', { - enumerable: true, - get: function () { return (this._offset); } -}); - -Object.defineProperty(Reader.prototype, 'remain', { - get: function () { return (this._size - this._offset); } -}); - -Object.defineProperty(Reader.prototype, 'buffer', { - get: function () { return (this._buf.slice(this._offset)); } -}); - - -/** - * Reads a single byte and advances offset; you can pass in `true` to make this - * a "peek" operation (i.e., get the byte, but don't advance the offset). - * - * @param {Boolean} peek true means don't move offset. - * @return {Number} the next byte, null if not enough data. - */ -Reader.prototype.readByte = function(peek) { - if (this._size - this._offset < 1) - return null; - - var b = this._buf[this._offset] & 0xff; - - if (!peek) - this._offset += 1; - - return b; -}; - - -Reader.prototype.peek = function() { - return this.readByte(true); -}; - - -/** - * Reads a (potentially) variable length off the BER buffer. This call is - * not really meant to be called directly, as callers have to manipulate - * the internal buffer afterwards. - * - * As a result of this call, you can call `Reader.length`, until the - * next thing called that does a readLength. - * - * @return {Number} the amount of offset to advance the buffer. - * @throws {InvalidAsn1Error} on bad ASN.1 - */ -Reader.prototype.readLength = function(offset) { - if (offset === undefined) - offset = this._offset; - - if (offset >= this._size) - return null; - - var lenB = this._buf[offset++] & 0xff; - if (lenB === null) - return null; - - if ((lenB & 0x80) == 0x80) { - lenB &= 0x7f; - - if (lenB == 0) - throw InvalidAsn1Error('Indefinite length not supported'); - - if (lenB > 4) - throw InvalidAsn1Error('encoding too long'); - - if (this._size - offset < lenB) - return null; - - this._len = 0; - for (var i = 0; i < lenB; i++) - this._len = (this._len << 8) + (this._buf[offset++] & 0xff); - - } else { - // Wasn't a variable length - this._len = lenB; - } - - return offset; -}; - - -/** - * Parses the next sequence in this BER buffer. - * - * To get the length of the sequence, call `Reader.length`. - * - * @return {Number} the sequence's tag. - */ -Reader.prototype.readSequence = function(tag) { - var seq = this.peek(); - if (seq === null) - return null; - if (tag !== undefined && tag !== seq) - throw InvalidAsn1Error('Expected 0x' + tag.toString(16) + - ': got 0x' + seq.toString(16)); - - var o = this.readLength(this._offset + 1); // stored in `length` - if (o === null) - return null; - - this._offset = o; - return seq; -}; - - -Reader.prototype.readInt = function(tag) { - if (typeof(tag) !== 'number') - tag = ASN1.Integer; - - return this._readTag(ASN1.Integer); -}; - - -Reader.prototype.readBoolean = function(tag) { - if (typeof(tag) !== 'number') - tag = ASN1.Boolean; - - return (this._readTag(tag) === 0 ? false : true); -}; - - -Reader.prototype.readEnumeration = function(tag) { - if (typeof(tag) !== 'number') - tag = ASN1.Enumeration; - - return this._readTag(ASN1.Enumeration); -}; - - -Reader.prototype.readString = function(tag, retbuf) { - if (!tag) - tag = ASN1.OctetString; - - var b = this.peek(); - if (b === null) - return null; - - if (b !== tag) - throw InvalidAsn1Error('Expected 0x' + tag.toString(16) + - ': got 0x' + b.toString(16)); - - var o = this.readLength(this._offset + 1); // stored in `length` - - if (o === null) - return null; - - if (this.length > this._size - o) - return null; - - this._offset = o; - - if (this.length === 0) - return retbuf ? new Buffer(0) : ''; - - var str = this._buf.slice(this._offset, this._offset + this.length); - this._offset += this.length; - - return retbuf ? str : str.toString('utf8'); -}; - -Reader.prototype.readOID = function(tag) { - if (!tag) - tag = ASN1.OID; - - var b = this.readString(tag, true); - if (b === null) - return null; - - var values = []; - var value = 0; - - for (var i = 0; i < b.length; i++) { - var byte = b[i] & 0xff; - - value <<= 7; - value += byte & 0x7f; - if ((byte & 0x80) == 0) { - values.push(value >>> 0); - value = 0; - } - } - - value = values.shift(); - values.unshift(value % 40); - values.unshift((value / 40) >> 0); - - return values.join('.'); -}; - - -Reader.prototype._readTag = function(tag) { - assert.ok(tag !== undefined); - - var b = this.peek(); - - if (b === null) - return null; - - if (b !== tag) - throw InvalidAsn1Error('Expected 0x' + tag.toString(16) + - ': got 0x' + b.toString(16)); - - var o = this.readLength(this._offset + 1); // stored in `length` - if (o === null) - return null; - - if (this.length > 4) - throw InvalidAsn1Error('Integer too long: ' + this.length); - - if (this.length > this._size - o) - return null; - this._offset = o; - - var fb = this._buf[this._offset]; - var value = 0; - - for (var i = 0; i < this.length; i++) { - value <<= 8; - value |= (this._buf[this._offset++] & 0xff); - } - - if ((fb & 0x80) == 0x80 && i !== 4) - value -= (1 << (i * 8)); - - return value >> 0; -}; - - - -///--- Exported API - -module.exports = Reader; diff --git a/node.d/node_modules/lib/ber/types.js b/node.d/node_modules/lib/ber/types.js deleted file mode 100644 index 345824bb1..000000000 --- a/node.d/node_modules/lib/ber/types.js +++ /dev/null @@ -1,34 +0,0 @@ - -module.exports = { - EOC: 0, - Boolean: 1, - Integer: 2, - BitString: 3, - OctetString: 4, - Null: 5, - OID: 6, - ObjectDescriptor: 7, - External: 8, - Real: 9, - Enumeration: 10, - PDV: 11, - Utf8String: 12, - RelativeOID: 13, - Sequence: 16, - Set: 17, - NumericString: 18, - PrintableString: 19, - T61String: 20, - VideotexString: 21, - IA5String: 22, - UTCTime: 23, - GeneralizedTime: 24, - GraphicString: 25, - VisibleString: 26, - GeneralString: 28, - UniversalString: 29, - CharacterString: 30, - BMPString: 31, - Constructor: 32, - Context: 128 -} diff --git a/node.d/node_modules/lib/ber/writer.js b/node.d/node_modules/lib/ber/writer.js deleted file mode 100644 index bf9805886..000000000 --- a/node.d/node_modules/lib/ber/writer.js +++ /dev/null @@ -1,317 +0,0 @@ - -var assert = require('assert'); -var ASN1 = require('./types'); -var errors = require('./errors'); - - -///--- Globals - -var InvalidAsn1Error = errors.InvalidAsn1Error; - -var DEFAULT_OPTS = { - size: 1024, - growthFactor: 8 -}; - - -///--- Helpers - -function merge(from, to) { - assert.ok(from); - assert.equal(typeof(from), 'object'); - assert.ok(to); - assert.equal(typeof(to), 'object'); - - var keys = Object.getOwnPropertyNames(from); - keys.forEach(function(key) { - if (to[key]) - return; - - var value = Object.getOwnPropertyDescriptor(from, key); - Object.defineProperty(to, key, value); - }); - - return to; -} - - - -///--- API - -function Writer(options) { - options = merge(DEFAULT_OPTS, options || {}); - - this._buf = new Buffer(options.size || 1024); - this._size = this._buf.length; - this._offset = 0; - this._options = options; - - // A list of offsets in the buffer where we need to insert - // sequence tag/len pairs. - this._seq = []; -} - -Object.defineProperty(Writer.prototype, 'buffer', { - get: function () { - if (this._seq.length) - throw new InvalidAsn1Error(this._seq.length + ' unended sequence(s)'); - - return (this._buf.slice(0, this._offset)); - } -}); - -Writer.prototype.writeByte = function(b) { - if (typeof(b) !== 'number') - throw new TypeError('argument must be a Number'); - - this._ensure(1); - this._buf[this._offset++] = b; -}; - - -Writer.prototype.writeInt = function(i, tag) { - if (typeof(i) !== 'number') - throw new TypeError('argument must be a Number'); - if (typeof(tag) !== 'number') - tag = ASN1.Integer; - - var sz = 4; - - while ((((i & 0xff800000) === 0) || ((i & 0xff800000) === 0xff800000 >> 0)) && - (sz > 1)) { - sz--; - i <<= 8; - } - - if (sz > 4) - throw new InvalidAsn1Error('BER ints cannot be > 0xffffffff'); - - this._ensure(2 + sz); - this._buf[this._offset++] = tag; - this._buf[this._offset++] = sz; - - while (sz-- > 0) { - this._buf[this._offset++] = ((i & 0xff000000) >>> 24); - i <<= 8; - } - -}; - - -Writer.prototype.writeNull = function() { - this.writeByte(ASN1.Null); - this.writeByte(0x00); -}; - - -Writer.prototype.writeEnumeration = function(i, tag) { - if (typeof(i) !== 'number') - throw new TypeError('argument must be a Number'); - if (typeof(tag) !== 'number') - tag = ASN1.Enumeration; - - return this.writeInt(i, tag); -}; - - -Writer.prototype.writeBoolean = function(b, tag) { - if (typeof(b) !== 'boolean') - throw new TypeError('argument must be a Boolean'); - if (typeof(tag) !== 'number') - tag = ASN1.Boolean; - - this._ensure(3); - this._buf[this._offset++] = tag; - this._buf[this._offset++] = 0x01; - this._buf[this._offset++] = b ? 0xff : 0x00; -}; - - -Writer.prototype.writeString = function(s, tag) { - if (typeof(s) !== 'string') - throw new TypeError('argument must be a string (was: ' + typeof(s) + ')'); - if (typeof(tag) !== 'number') - tag = ASN1.OctetString; - - var len = Buffer.byteLength(s); - this.writeByte(tag); - this.writeLength(len); - if (len) { - this._ensure(len); - this._buf.write(s, this._offset); - this._offset += len; - } -}; - - -Writer.prototype.writeBuffer = function(buf, tag) { - if (!Buffer.isBuffer(buf)) - throw new TypeError('argument must be a buffer'); - - // If no tag is specified we will assume `buf` already contains tag and length - if (typeof(tag) === 'number') { - this.writeByte(tag); - this.writeLength(buf.length); - } - - this._ensure(buf.length); - buf.copy(this._buf, this._offset, 0, buf.length); - this._offset += buf.length; -}; - - -Writer.prototype.writeStringArray = function(strings, tag) { - if (! (strings instanceof Array)) - throw new TypeError('argument must be an Array[String]'); - - var self = this; - strings.forEach(function(s) { - self.writeString(s, tag); - }); -}; - -// This is really to solve DER cases, but whatever for now -Writer.prototype.writeOID = function(s, tag) { - if (typeof(s) !== 'string') - throw new TypeError('argument must be a string'); - if (typeof(tag) !== 'number') - tag = ASN1.OID; - - if (!/^([0-9]+\.){3,}[0-9]+$/.test(s)) - throw new Error('argument is not a valid OID string'); - - function encodeOctet(bytes, octet) { - if (octet < 128) { - bytes.push(octet); - } else if (octet < 16384) { - bytes.push((octet >>> 7) | 0x80); - bytes.push(octet & 0x7F); - } else if (octet < 2097152) { - bytes.push((octet >>> 14) | 0x80); - bytes.push(((octet >>> 7) | 0x80) & 0xFF); - bytes.push(octet & 0x7F); - } else if (octet < 268435456) { - bytes.push((octet >>> 21) | 0x80); - bytes.push(((octet >>> 14) | 0x80) & 0xFF); - bytes.push(((octet >>> 7) | 0x80) & 0xFF); - bytes.push(octet & 0x7F); - } else { - bytes.push(((octet >>> 28) | 0x80) & 0xFF); - bytes.push(((octet >>> 21) | 0x80) & 0xFF); - bytes.push(((octet >>> 14) | 0x80) & 0xFF); - bytes.push(((octet >>> 7) | 0x80) & 0xFF); - bytes.push(octet & 0x7F); - } - } - - var tmp = s.split('.'); - var bytes = []; - bytes.push(parseInt(tmp[0], 10) * 40 + parseInt(tmp[1], 10)); - tmp.slice(2).forEach(function(b) { - encodeOctet(bytes, parseInt(b, 10)); - }); - - var self = this; - this._ensure(2 + bytes.length); - this.writeByte(tag); - this.writeLength(bytes.length); - bytes.forEach(function(b) { - self.writeByte(b); - }); -}; - - -Writer.prototype.writeLength = function(len) { - if (typeof(len) !== 'number') - throw new TypeError('argument must be a Number'); - - this._ensure(4); - - if (len <= 0x7f) { - this._buf[this._offset++] = len; - } else if (len <= 0xff) { - this._buf[this._offset++] = 0x81; - this._buf[this._offset++] = len; - } else if (len <= 0xffff) { - this._buf[this._offset++] = 0x82; - this._buf[this._offset++] = len >> 8; - this._buf[this._offset++] = len; - } else if (len <= 0xffffff) { - this._buf[this._offset++] = 0x83; - this._buf[this._offset++] = len >> 16; - this._buf[this._offset++] = len >> 8; - this._buf[this._offset++] = len; - } else { - throw new InvalidAsn1Error('Length too long (> 4 bytes)'); - } -}; - -Writer.prototype.startSequence = function(tag) { - if (typeof(tag) !== 'number') - tag = ASN1.Sequence | ASN1.Constructor; - - this.writeByte(tag); - this._seq.push(this._offset); - this._ensure(3); - this._offset += 3; -}; - - -Writer.prototype.endSequence = function() { - var seq = this._seq.pop(); - var start = seq + 3; - var len = this._offset - start; - - if (len <= 0x7f) { - this._shift(start, len, -2); - this._buf[seq] = len; - } else if (len <= 0xff) { - this._shift(start, len, -1); - this._buf[seq] = 0x81; - this._buf[seq + 1] = len; - } else if (len <= 0xffff) { - this._buf[seq] = 0x82; - this._buf[seq + 1] = len >> 8; - this._buf[seq + 2] = len; - } else if (len <= 0xffffff) { - this._shift(start, len, 1); - this._buf[seq] = 0x83; - this._buf[seq + 1] = len >> 16; - this._buf[seq + 2] = len >> 8; - this._buf[seq + 3] = len; - } else { - throw new InvalidAsn1Error('Sequence too long'); - } -}; - - -Writer.prototype._shift = function(start, len, shift) { - assert.ok(start !== undefined); - assert.ok(len !== undefined); - assert.ok(shift); - - this._buf.copy(this._buf, start + shift, start, start + len); - this._offset += shift; -}; - -Writer.prototype._ensure = function(len) { - assert.ok(len); - - if (this._size - this._offset < len) { - var sz = this._size * this._options.growthFactor; - if (sz - this._offset < len) - sz += len; - - var buf = new Buffer(sz); - - this._buf.copy(buf, 0, 0, this._offset); - this._buf = buf; - this._size = sz; - } -}; - - - -///--- Exported API - -module.exports = Writer; diff --git a/node.d/node_modules/net-snmp.js b/node.d/node_modules/net-snmp.js deleted file mode 100644 index ac9a8d350..000000000 --- a/node.d/node_modules/net-snmp.js +++ /dev/null @@ -1,1457 +0,0 @@ - -// Copyright 2013 Stephen Vickers <stephen.vickers.sv@gmail.com> - -var ber = require ("asn1-ber").Ber; -var dgram = require ("dgram"); -var events = require ("events"); -var util = require ("util"); - -/***************************************************************************** - ** Constants - **/ - -function _expandConstantObject (object) { - var keys = []; - for (var key in object) - keys.push (key); - for (var i = 0; i < keys.length; i++) - object[object[keys[i]]] = parseInt (keys[i]); -} - -var ErrorStatus = { - 0: "NoError", - 1: "TooBig", - 2: "NoSuchName", - 3: "BadValue", - 4: "ReadOnly", - 5: "GeneralError", - 6: "NoAccess", - 7: "WrongType", - 8: "WrongLength", - 9: "WrongEncoding", - 10: "WrongValue", - 11: "NoCreation", - 12: "InconsistentValue", - 13: "ResourceUnavailable", - 14: "CommitFailed", - 15: "UndoFailed", - 16: "AuthorizationError", - 17: "NotWritable", - 18: "InconsistentName" -}; - -_expandConstantObject (ErrorStatus); - -var ObjectType = { - 1: "Boolean", - 2: "Integer", - 4: "OctetString", - 5: "Null", - 6: "OID", - 64: "IpAddress", - 65: "Counter", - 66: "Gauge", - 67: "TimeTicks", - 68: "Opaque", - 70: "Counter64", - 128: "NoSuchObject", - 129: "NoSuchInstance", - 130: "EndOfMibView" -}; - -_expandConstantObject (ObjectType); - -ObjectType.Integer32 = ObjectType.Integer; -ObjectType.Counter32 = ObjectType.Counter; -ObjectType.Gauge32 = ObjectType.Gauge; -ObjectType.Unsigned32 = ObjectType.Gauge32; - -var PduType = { - 160: "GetRequest", - 161: "GetNextRequest", - 162: "GetResponse", - 163: "SetRequest", - 164: "Trap", - 165: "GetBulkRequest", - 166: "InformRequest", - 167: "TrapV2", - 168: "Report" -}; - -_expandConstantObject (PduType); - -var TrapType = { - 0: "ColdStart", - 1: "WarmStart", - 2: "LinkDown", - 3: "LinkUp", - 4: "AuthenticationFailure", - 5: "EgpNeighborLoss", - 6: "EnterpriseSpecific" -}; - -_expandConstantObject (TrapType); - -var Version1 = 0; -var Version2c = 1; - -/***************************************************************************** - ** Exception class definitions - **/ - -function ResponseInvalidError (message) { - this.name = "ResponseInvalidError"; - this.message = message; - Error.captureStackTrace(this, ResponseInvalidError); -} -util.inherits (ResponseInvalidError, Error); - -function RequestInvalidError (message) { - this.name = "RequestInvalidError"; - this.message = message; - Error.captureStackTrace(this, RequestInvalidError); -} -util.inherits (RequestInvalidError, Error); - -function RequestFailedError (message, status) { - this.name = "RequestFailedError"; - this.message = message; - this.status = status; - Error.captureStackTrace(this, RequestFailedError); -} -util.inherits (RequestFailedError, Error); - -function RequestTimedOutError (message) { - this.name = "RequestTimedOutError"; - this.message = message; - Error.captureStackTrace(this, RequestTimedOutError); -} -util.inherits (RequestTimedOutError, Error); - -/***************************************************************************** - ** OID and varbind helper functions - **/ - -function isVarbindError (varbind) { - return !!(varbind.type == ObjectType.NoSuchObject - || varbind.type == ObjectType.NoSuchInstance - || varbind.type == ObjectType.EndOfMibView); -} - -function varbindError (varbind) { - return (ObjectType[varbind.type] || "NotAnError") + ": " + varbind.oid; -} - -function oidFollowsOid (oidString, nextString) { - var oid = {str: oidString, len: oidString.length, idx: 0}; - var next = {str: nextString, len: nextString.length, idx: 0}; - var dotCharCode = ".".charCodeAt (0); - - function getNumber (item) { - var n = 0; - if (item.idx >= item.len) - return null; - while (item.idx < item.len) { - var charCode = item.str.charCodeAt (item.idx++); - if (charCode == dotCharCode) - return n; - n = (n ? (n * 10) : n) + (charCode - 48); - } - return n; - } - - while (1) { - var oidNumber = getNumber (oid); - var nextNumber = getNumber (next); - - if (oidNumber !== null) { - if (nextNumber !== null) { - if (nextNumber > oidNumber) { - return true; - } else if (nextNumber < oidNumber) { - return false; - } - } else { - return true; - } - } else { - return true; - } - } -} - -function oidInSubtree (oidString, nextString) { - var oid = oidString.split ("."); - var next = nextString.split ("."); - - if (oid.length > next.length) - return false; - - for (var i = 0; i < oid.length; i++) { - if (next[i] != oid[i]) - return false; - } - - return true; -} - -/** - ** Some SNMP agents produce integers on the wire such as 00 ff ff ff ff. - ** The ASN.1 BER parser we use throws an error when parsing this, which we - ** believe is correct. So, we decided not to bother the "asn1" developer(s) - ** with this, instead opting to work around it here. - ** - ** If an integer is 5 bytes in length we check if the first byte is 0, and if so - ** simply drop it and parse it like it was a 4 byte integer, otherwise throw - ** an error since the integer is too large. - **/ - -function readInt (buffer) { - return readUint (buffer, true); -} - -function readUint (buffer, isSigned) { - buffer.readByte (); - var length = buffer.readByte (); - var value = 0; - var signedBitSet = false; - - if (length > 5) { - throw new RangeError ("Integer too long '" + length + "'"); - } else if (length == 5) { - if (buffer.readByte () !== 0) - throw new RangeError ("Integer too long '" + length + "'"); - length = 4; - } - - for (var i = 0; i < length; i++) { - value *= 256; - value += buffer.readByte (); - - if (isSigned && i <= 0) { - if ((value & 0x80) == 0x80) - signedBitSet = true; - } - } - - if (signedBitSet) - value -= (1 << (i * 8)); - - return value; -} - -function readUint64 (buffer) { - var value = buffer.readString (ObjectType.Counter64, true); - - return value; -} - -function readVarbinds (buffer, varbinds) { - buffer.readSequence (); - - while (1) { - buffer.readSequence (); - var oid = buffer.readOID (); - var type = buffer.peek (); - - if (type == null) - break; - - var value; - - if (type == ObjectType.Boolean) { - value = buffer.readBoolean (); - } else if (type == ObjectType.Integer) { - value = readInt (buffer); - } else if (type == ObjectType.OctetString) { - value = buffer.readString (null, true); - } else if (type == ObjectType.Null) { - buffer.readByte (); - buffer.readByte (); - value = null; - } else if (type == ObjectType.OID) { - value = buffer.readOID (); - } else if (type == ObjectType.IpAddress) { - var bytes = buffer.readString (ObjectType.IpAddress, true); - if (bytes.length != 4) - throw new ResponseInvalidError ("Length '" + bytes.length - + "' of IP address '" + bytes.toString ("hex") - + "' is not 4"); - value = bytes[0] + "." + bytes[1] + "." + bytes[2] + "." + bytes[3]; - } else if (type == ObjectType.Counter) { - value = readUint (buffer); - } else if (type == ObjectType.Gauge) { - value = readUint (buffer); - } else if (type == ObjectType.TimeTicks) { - value = readUint (buffer); - } else if (type == ObjectType.Opaque) { - value = buffer.readString (ObjectType.Opaque, true); - } else if (type == ObjectType.Counter64) { - value = readUint64 (buffer); - } else if (type == ObjectType.NoSuchObject) { - buffer.readByte (); - buffer.readByte (); - value = null; - } else if (type == ObjectType.NoSuchInstance) { - buffer.readByte (); - buffer.readByte (); - value = null; - } else if (type == ObjectType.EndOfMibView) { - buffer.readByte (); - buffer.readByte (); - value = null; - } else { - throw new ResponseInvalidError ("Unknown type '" + type - + "' in response"); - } - - varbinds.push ({ - oid: oid, - type: type, - value: value - }); - } -} - -function writeUint (buffer, type, value) { - var b = new Buffer (4); - b.writeUInt32BE (value, 0); - buffer.writeBuffer (b, type); -} - -function writeUint64 (buffer, value) { - buffer.writeBuffer (value, ObjectType.Counter64); -} - -function writeVarbinds (buffer, varbinds) { - buffer.startSequence (); - for (var i = 0; i < varbinds.length; i++) { - buffer.startSequence (); - buffer.writeOID (varbinds[i].oid); - - if (varbinds[i].type && varbinds[i].hasOwnProperty("value")) { - var type = varbinds[i].type; - var value = varbinds[i].value; - - if (type == ObjectType.Boolean) { - buffer.writeBoolean (value ? true : false); - } else if (type == ObjectType.Integer) { // also Integer32 - buffer.writeInt (value); - } else if (type == ObjectType.OctetString) { - if (typeof value == "string") - buffer.writeString (value); - else - buffer.writeBuffer (value, ObjectType.OctetString); - } else if (type == ObjectType.Null) { - buffer.writeNull (); - } else if (type == ObjectType.OID) { - buffer.writeOID (value); - } else if (type == ObjectType.IpAddress) { - var bytes = value.split ("."); - if (bytes.length != 4) - throw new RequestInvalidError ("Invalid IP address '" - + value + "'"); - buffer.writeBuffer (new Buffer (bytes), 64); - } else if (type == ObjectType.Counter) { // also Counter32 - writeUint (buffer, ObjectType.Counter, value); - } else if (type == ObjectType.Gauge) { // also Gauge32 & Unsigned32 - writeUint (buffer, ObjectType.Gauge, value); - } else if (type == ObjectType.TimeTicks) { - writeUint (buffer, ObjectType.TimeTicks, value); - } else if (type == ObjectType.Opaque) { - buffer.writeBuffer (value, ObjectType.Opaque); - } else if (type == ObjectType.Counter64) { - writeUint64 (buffer, value); - } else { - throw new RequestInvalidError ("Unknown type '" + type - + "' in request"); - } - } else { - buffer.writeNull (); - } - - buffer.endSequence (); - } - buffer.endSequence (); -} - -/***************************************************************************** - ** PDU class definitions - **/ - -var SimplePdu = function (id, varbinds, options) { - this.id = id; - this.varbinds = varbinds; - this.options = options || {}; -}; - -SimplePdu.prototype.toBuffer = function (buffer) { - buffer.startSequence (this.type); - - buffer.writeInt (this.id); - buffer.writeInt ((this.type == PduType.GetBulkRequest) - ? (this.options.nonRepeaters || 0) - : 0); - buffer.writeInt ((this.type == PduType.GetBulkRequest) - ? (this.options.maxRepetitions || 0) - : 0); - - writeVarbinds (buffer, this.varbinds); - - buffer.endSequence (); -}; - -var GetBulkRequestPdu = function () { - this.type = PduType.GetBulkRequest; - GetBulkRequestPdu.super_.apply (this, arguments); -}; - -util.inherits (GetBulkRequestPdu, SimplePdu); - -var GetNextRequestPdu = function () { - this.type = PduType.GetNextRequest; - GetNextRequestPdu.super_.apply (this, arguments); -}; - -util.inherits (GetNextRequestPdu, SimplePdu); - -var GetResponsePdu = function (buffer) { - this.type = PduType.GetResponse; - - buffer.readSequence (this.type); - - this.id = buffer.readInt (); - - this.errorStatus = buffer.readInt (); - this.errorIndex = buffer.readInt (); - - this.varbinds = []; - - readVarbinds (buffer, this.varbinds); -}; - -var GetRequestPdu = function () { - this.type = PduType.GetRequest; - GetRequestPdu.super_.apply (this, arguments); -}; - -util.inherits (GetRequestPdu, SimplePdu); - -var InformRequestPdu = function () { - this.type = PduType.InformRequest; - InformRequestPdu.super_.apply (this, arguments); -}; - -util.inherits (InformRequestPdu, SimplePdu); - -var SetRequestPdu = function () { - this.type = PduType.SetRequest; - SetRequestPdu.super_.apply (this, arguments); -}; - -util.inherits (SetRequestPdu, SimplePdu); - -var TrapPdu = function (typeOrOid, varbinds, options) { - this.type = PduType.Trap; - - this.agentAddr = options.agentAddr || "127.0.0.1"; - this.upTime = options.upTime; - - if (typeof typeOrOid == "string") { - this.generic = TrapType.EnterpriseSpecific; - this.specific = parseInt (typeOrOid.match (/\.(\d+)$/)[1]); - this.enterprise = typeOrOid.replace (/\.(\d+)$/, ""); - } else { - this.generic = typeOrOid; - this.specific = 0; - this.enterprise = "1.3.6.1.4.1"; - } - - this.varbinds = varbinds; -}; - -TrapPdu.prototype.toBuffer = function (buffer) { - buffer.startSequence (this.type); - - buffer.writeOID (this.enterprise); - buffer.writeBuffer (new Buffer (this.agentAddr.split (".")), - ObjectType.IpAddress); - buffer.writeInt (this.generic); - buffer.writeInt (this.specific); - writeUint (buffer, ObjectType.TimeTicks, - this.upTime || Math.floor (process.uptime () * 100)); - - writeVarbinds (buffer, this.varbinds); - - buffer.endSequence (); -}; - -var TrapV2Pdu = function () { - this.type = PduType.TrapV2; - TrapV2Pdu.super_.apply (this, arguments); -}; - -util.inherits (TrapV2Pdu, SimplePdu); - -/***************************************************************************** - ** Message class definitions - **/ - -var RequestMessage = function (version, community, pdu) { - this.version = version; - this.community = community; - this.pdu = pdu; -}; - -RequestMessage.prototype.toBuffer = function () { - if (this.buffer) - return this.buffer; - - var writer = new ber.Writer (); - - writer.startSequence (); - - writer.writeInt (this.version); - writer.writeString (this.community); - - this.pdu.toBuffer (writer); - - writer.endSequence (); - - this.buffer = writer.buffer; - - return this.buffer; -}; - -var ResponseMessage = function (buffer) { - var reader = new ber.Reader (buffer); - - reader.readSequence (); - - this.version = reader.readInt (); - this.community = reader.readString (); - - var type = reader.peek (); - - if (type == PduType.GetResponse) { - this.pdu = new GetResponsePdu (reader); - } else { - throw new ResponseInvalidError ("Unknown PDU type '" + type - + "' in response"); - } -}; - -/***************************************************************************** - ** Session class definition - **/ - -var Session = function (target, community, options) { - this.target = target || "127.0.0.1"; - this.community = community || "public"; - - this.version = (options && options.version) - ? options.version - : Version1; - - this.transport = (options && options.transport) - ? options.transport - : "udp4"; - this.port = (options && options.port ) - ? options.port - : 161; - this.trapPort = (options && options.trapPort ) - ? options.trapPort - : 162; - - this.retries = (options && (options.retries || options.retries == 0)) - ? options.retries - : 1; - this.timeout = (options && options.timeout) - ? options.timeout - : 5000; - - this.sourceAddress = (options && options.sourceAddress ) - ? options.sourceAddress - : undefined; - this.sourcePort = (options && options.sourcePort ) - ? parseInt(options.sourcePort) - : undefined; - - this.reqs = {}; - this.reqCount = 0; - - this.dgram = dgram.createSocket (this.transport); - this.dgram.unref(); - - var me = this; - this.dgram.on ("message", me.onMsg.bind (me)); - this.dgram.on ("close", me.onClose.bind (me)); - this.dgram.on ("error", me.onError.bind (me)); - - if (this.sourceAddress || this.sourcePort) - this.dgram.bind (this.sourcePort, this.sourceAddress); -}; - -util.inherits (Session, events.EventEmitter); - -Session.prototype.close = function () { - this.dgram.close (); - return this; -}; - -Session.prototype.cancelRequests = function (error) { - var id; - for (id in this.reqs) { - var req = this.reqs[id]; - this.unregisterRequest (req.id); - req.responseCb (error); - } -}; - -function _generateId () { - return Math.floor (Math.random () + Math.random () * 10000000) -} - -Session.prototype.get = function (oids, responseCb) { - function feedCb (req, message) { - var pdu = message.pdu; - var varbinds = []; - - if (req.message.pdu.varbinds.length != pdu.varbinds.length) { - req.responseCb (new ResponseInvalidError ("Requested OIDs do not " - + "match response OIDs")); - } else { - for (var i = 0; i < req.message.pdu.varbinds.length; i++) { - if (req.message.pdu.varbinds[i].oid != pdu.varbinds[i].oid) { - req.responseCb (new ResponseInvalidError ("OID '" - + req.message.pdu.varbinds[i].oid - + "' in request at positiion '" + i + "' does not " - + "match OID '" + pdu.varbinds[i].oid + "' in response " - + "at position '" + i + "'")); - return; - } else { - varbinds.push (pdu.varbinds[i]); - } - } - - req.responseCb (null, varbinds); - } - } - - var pduVarbinds = []; - - for (var i = 0; i < oids.length; i++) { - var varbind = { - oid: oids[i] - }; - pduVarbinds.push (varbind); - } - - this.simpleGet (GetRequestPdu, feedCb, pduVarbinds, responseCb); - - return this; -}; - -Session.prototype.getBulk = function () { - var oids, nonRepeaters, maxRepetitions, responseCb; - - if (arguments.length >= 4) { - oids = arguments[0]; - nonRepeaters = arguments[1]; - maxRepetitions = arguments[2]; - responseCb = arguments[3]; - } else if (arguments.length >= 3) { - oids = arguments[0]; - nonRepeaters = arguments[1]; - maxRepetitions = 10; - responseCb = arguments[2]; - } else { - oids = arguments[0]; - nonRepeaters = 0; - maxRepetitions = 10; - responseCb = arguments[1]; - } - - function feedCb (req, message) { - var pdu = message.pdu; - var varbinds = []; - var i = 0; - - // first walk through and grab non-repeaters - if (pdu.varbinds.length < nonRepeaters) { - req.responseCb (new ResponseInvalidError ("Varbind count in " - + "response '" + pdu.varbinds.length + "' is less than " - + "non-repeaters '" + nonRepeaters + "' in request")); - } else { - for ( ; i < nonRepeaters; i++) { - if (isVarbindError (pdu.varbinds[i])) { - varbinds.push (pdu.varbinds[i]); - } else if (! oidFollowsOid (req.message.pdu.varbinds[i].oid, - pdu.varbinds[i].oid)) { - req.responseCb (new ResponseInvalidError ("OID '" - + req.message.pdu.varbinds[i].oid + "' in request at " - + "positiion '" + i + "' does not precede " - + "OID '" + pdu.varbinds[i].oid + "' in response " - + "at position '" + i + "'")); - return; - } else { - varbinds.push (pdu.varbinds[i]); - } - } - } - - var repeaters = req.message.pdu.varbinds.length - nonRepeaters; - - // secondly walk through and grab repeaters - if (pdu.varbinds.length % (repeaters)) { - req.responseCb (new ResponseInvalidError ("Varbind count in " - + "response '" + pdu.varbinds.length + "' is not a " - + "multiple of repeaters '" + repeaters - + "' plus non-repeaters '" + nonRepeaters + "' in request")); - } else { - while (i < pdu.varbinds.length) { - for (var j = 0; j < repeaters; j++, i++) { - var reqIndex = nonRepeaters + j; - var respIndex = i; - - if (isVarbindError (pdu.varbinds[respIndex])) { - if (! varbinds[reqIndex]) - varbinds[reqIndex] = []; - varbinds[reqIndex].push (pdu.varbinds[respIndex]); - } else if (! oidFollowsOid ( - req.message.pdu.varbinds[reqIndex].oid, - pdu.varbinds[respIndex].oid)) { - req.responseCb (new ResponseInvalidError ("OID '" - + req.message.pdu.varbinds[reqIndex].oid - + "' in request at positiion '" + (reqIndex) - + "' does not precede OID '" - + pdu.varbinds[respIndex].oid - + "' in response at position '" + (respIndex) + "'")); - return; - } else { - if (! varbinds[reqIndex]) - varbinds[reqIndex] = []; - varbinds[reqIndex].push (pdu.varbinds[respIndex]); - } - } - } - } - - req.responseCb (null, varbinds); - } - - var pduVarbinds = []; - - for (var i = 0; i < oids.length; i++) { - var varbind = { - oid: oids[i] - }; - pduVarbinds.push (varbind); - } - - var options = { - nonRepeaters: nonRepeaters, - maxRepetitions: maxRepetitions - }; - - this.simpleGet (GetBulkRequestPdu, feedCb, pduVarbinds, responseCb, - options); - - return this; -}; - -Session.prototype.getNext = function (oids, responseCb) { - function feedCb (req, message) { - var pdu = message.pdu; - var varbinds = []; - - if (req.message.pdu.varbinds.length != pdu.varbinds.length) { - req.responseCb (new ResponseInvalidError ("Requested OIDs do not " - + "match response OIDs")); - } else { - for (var i = 0; i < req.message.pdu.varbinds.length; i++) { - if (isVarbindError (pdu.varbinds[i])) { - varbinds.push (pdu.varbinds[i]); - } else if (! oidFollowsOid (req.message.pdu.varbinds[i].oid, - pdu.varbinds[i].oid)) { - req.responseCb (new ResponseInvalidError ("OID '" - + req.message.pdu.varbinds[i].oid + "' in request at " - + "positiion '" + i + "' does not precede " - + "OID '" + pdu.varbinds[i].oid + "' in response " - + "at position '" + i + "'")); - return; - } else { - varbinds.push (pdu.varbinds[i]); - } - } - - req.responseCb (null, varbinds); - } - } - - var pduVarbinds = []; - - for (var i = 0; i < oids.length; i++) { - var varbind = { - oid: oids[i] - }; - pduVarbinds.push (varbind); - } - - this.simpleGet (GetNextRequestPdu, feedCb, pduVarbinds, responseCb); - - return this; -}; - -Session.prototype.inform = function () { - var typeOrOid = arguments[0]; - var varbinds, options = {}, responseCb; - - /** - ** Support the following signatures: - ** - ** typeOrOid, varbinds, options, callback - ** typeOrOid, varbinds, callback - ** typeOrOid, options, callback - ** typeOrOid, callback - **/ - if (arguments.length >= 4) { - varbinds = arguments[1]; - options = arguments[2]; - responseCb = arguments[3]; - } else if (arguments.length >= 3) { - if (arguments[1].constructor != Array) { - varbinds = []; - options = arguments[1]; - responseCb = arguments[2]; - } else { - varbinds = arguments[1]; - responseCb = arguments[2]; - } - } else { - varbinds = []; - responseCb = arguments[1]; - } - - function feedCb (req, message) { - var pdu = message.pdu; - var varbinds = []; - - if (req.message.pdu.varbinds.length != pdu.varbinds.length) { - req.responseCb (new ResponseInvalidError ("Inform OIDs do not " - + "match response OIDs")); - } else { - for (var i = 0; i < req.message.pdu.varbinds.length; i++) { - if (req.message.pdu.varbinds[i].oid != pdu.varbinds[i].oid) { - req.responseCb (new ResponseInvalidError ("OID '" - + req.message.pdu.varbinds[i].oid - + "' in inform at positiion '" + i + "' does not " - + "match OID '" + pdu.varbinds[i].oid + "' in response " - + "at position '" + i + "'")); - return; - } else { - varbinds.push (pdu.varbinds[i]); - } - } - - req.responseCb (null, varbinds); - } - } - - if (typeof typeOrOid != "string") - typeOrOid = "1.3.6.1.6.3.1.1.5." + (typeOrOid + 1); - - var pduVarbinds = [ - { - oid: "1.3.6.1.2.1.1.3.0", - type: ObjectType.TimeTicks, - value: options.upTime || Math.floor (process.uptime () * 100) - }, - { - oid: "1.3.6.1.6.3.1.1.4.1.0", - type: ObjectType.OID, - value: typeOrOid - } - ]; - - for (var i = 0; i < varbinds.length; i++) { - var varbind = { - oid: varbinds[i].oid, - type: varbinds[i].type, - value: varbinds[i].value - }; - pduVarbinds.push (varbind); - } - - options.port = this.trapPort; - - this.simpleGet (InformRequestPdu, feedCb, pduVarbinds, responseCb, options); - - return this; -}; - -Session.prototype.onClose = function () { - this.cancelRequests (new Error ("Socket forcibly closed")); - this.emit ("close"); -}; - -Session.prototype.onError = function (error) { - this.emit (error); -}; - -Session.prototype.onMsg = function (buffer, remote) { - try { - var message = new ResponseMessage (buffer); - - var req = this.unregisterRequest (message.pdu.id); - if (! req) - return; - - try { - if (message.version != req.message.version) { - req.responseCb (new ResponseInvalidError ("Version in request '" - + req.message.version + "' does not match version in " - + "response '" + message.version)); - } else if (message.community != req.message.community) { - req.responseCb (new ResponseInvalidError ("Community '" - + req.message.community + "' in request does not match " - + "community '" + message.community + "' in response")); - } else if (message.pdu.type == PduType.GetResponse) { - req.onResponse (req, message); - } else { - req.responseCb (new ResponseInvalidError ("Unknown PDU type '" - + message.pdu.type + "' in response")); - } - } catch (error) { - req.responseCb (error); - } - } catch (error) { - this.emit("error", error); - } -}; - -Session.prototype.onSimpleGetResponse = function (req, message) { - var pdu = message.pdu; - - if (pdu.errorStatus > 0) { - var statusString = ErrorStatus[pdu.errorStatus] - || ErrorStatus.GeneralError; - var statusCode = ErrorStatus[statusString] - || ErrorStatus[ErrorStatus.GeneralError]; - - if (pdu.errorIndex <= 0 || pdu.errorIndex > pdu.varbinds.length) { - req.responseCb (new RequestFailedError (statusString, statusCode)); - } else { - var oid = pdu.varbinds[pdu.errorIndex - 1].oid; - var error = new RequestFailedError (statusString + ": " + oid, - statusCode); - req.responseCb (error); - } - } else { - req.feedCb (req, message); - } -}; - -Session.prototype.registerRequest = function (req) { - if (! this.reqs[req.id]) { - this.reqs[req.id] = req; - if (this.reqCount <= 0) - this.dgram.ref(); - this.reqCount++; - } - var me = this; - req.timer = setTimeout (function () { - if (req.retries-- > 0) { - me.send (req); - } else { - me.unregisterRequest (req.id); - req.responseCb (new RequestTimedOutError ( - "Request timed out")); - } - }, req.timeout); -}; - -Session.prototype.send = function (req, noWait) { - try { - var me = this; - - var buffer = req.message.toBuffer (); - - this.dgram.send (buffer, 0, buffer.length, req.port, this.target, - function (error, bytes) { - if (error) { - req.responseCb (error); - } else { - if (noWait) { - req.responseCb (null); - } else { - me.registerRequest (req); - } - } - }); - } catch (error) { - req.responseCb (error); - } - - return this; -}; - -Session.prototype.set = function (varbinds, responseCb) { - function feedCb (req, message) { - var pdu = message.pdu; - var varbinds = []; - - if (req.message.pdu.varbinds.length != pdu.varbinds.length) { - req.responseCb (new ResponseInvalidError ("Requested OIDs do not " - + "match response OIDs")); - } else { - for (var i = 0; i < req.message.pdu.varbinds.length; i++) { - if (req.message.pdu.varbinds[i].oid != pdu.varbinds[i].oid) { - req.responseCb (new ResponseInvalidError ("OID '" - + req.message.pdu.varbinds[i].oid - + "' in request at positiion '" + i + "' does not " - + "match OID '" + pdu.varbinds[i].oid + "' in response " - + "at position '" + i + "'")); - return; - } else { - varbinds.push (pdu.varbinds[i]); - } - } - - req.responseCb (null, varbinds); - } - } - - var pduVarbinds = []; - - for (var i = 0; i < varbinds.length; i++) { - var varbind = { - oid: varbinds[i].oid, - type: varbinds[i].type, - value: varbinds[i].value - }; - pduVarbinds.push (varbind); - } - - this.simpleGet (SetRequestPdu, feedCb, pduVarbinds, responseCb); - - return this; -}; - -Session.prototype.simpleGet = function (pduClass, feedCb, varbinds, - responseCb, options) { - var req = {}; - - try { - var id = _generateId (); - var pdu = new pduClass (id, varbinds, options); - var message = new RequestMessage (this.version, this.community, pdu); - - req = { - id: id, - message: message, - responseCb: responseCb, - retries: this.retries, - timeout: this.timeout, - onResponse: this.onSimpleGetResponse, - feedCb: feedCb, - port: (options && options.port) ? options.port : this.port - }; - - this.send (req); - } catch (error) { - if (req.responseCb) - req.responseCb (error); - } -}; - -function subtreeCb (req, varbinds) { - var done = 0; - - for (var i = varbinds.length; i > 0; i--) { - if (! oidInSubtree (req.baseOid, varbinds[i - 1].oid)) { - done = 1; - varbinds.pop (); - } - } - - if (varbinds.length > 0) - req.feedCb (varbinds); - - if (done) - return true; -} - -Session.prototype.subtree = function () { - var me = this; - var oid = arguments[0]; - var maxRepetitions, feedCb, doneCb; - - if (arguments.length < 4) { - maxRepetitions = 20; - feedCb = arguments[1]; - doneCb = arguments[2]; - } else { - maxRepetitions = arguments[1]; - feedCb = arguments[2]; - doneCb = arguments[3]; - } - - var req = { - feedCb: feedCb, - doneCb: doneCb, - maxRepetitions: maxRepetitions, - baseOid: oid - }; - - this.walk (oid, maxRepetitions, subtreeCb.bind (me, req), doneCb); - - return this; -}; - -function tableColumnsResponseCb (req, error) { - if (error) { - req.responseCb (error); - } else if (req.error) { - req.responseCb (req.error); - } else { - if (req.columns.length > 0) { - var column = req.columns.pop (); - var me = this; - this.subtree (req.rowOid + column, req.maxRepetitions, - tableColumnsFeedCb.bind (me, req), - tableColumnsResponseCb.bind (me, req)); - } else { - req.responseCb (null, req.table); - } - } -} - -function tableColumnsFeedCb (req, varbinds) { - for (var i = 0; i < varbinds.length; i++) { - if (isVarbindError (varbinds[i])) { - req.error = new RequestFailedError (varbindError (varbind[i])); - return true; - } - - var oid = varbinds[i].oid.replace (req.rowOid, ""); - if (oid && oid != varbinds[i].oid) { - var match = oid.match (/^(\d+)\.(.+)$/); - if (match && match[1] > 0) { - if (! req.table[match[2]]) - req.table[match[2]] = {}; - req.table[match[2]][match[1]] = varbinds[i].value; - } - } - } -} - -Session.prototype.tableColumns = function () { - var me = this; - - var oid = arguments[0]; - var columns = arguments[1]; - var maxRepetitions, responseCb; - - if (arguments.length < 4) { - responseCb = arguments[2]; - maxRepetitions = 20; - } else { - maxRepetitions = arguments[2]; - responseCb = arguments[3]; - } - - var req = { - responseCb: responseCb, - maxRepetitions: maxRepetitions, - baseOid: oid, - rowOid: oid + ".1.", - columns: columns.slice(0), - table: {} - }; - - if (req.columns.length > 0) { - var column = req.columns.pop (); - this.subtree (req.rowOid + column, maxRepetitions, - tableColumnsFeedCb.bind (me, req), - tableColumnsResponseCb.bind (me, req)); - } - - return this; -}; - -function tableResponseCb (req, error) { - if (error) - req.responseCb (error); - else if (req.error) - req.responseCb (req.error); - else - req.responseCb (null, req.table); -} - -function tableFeedCb (req, varbinds) { - for (var i = 0; i < varbinds.length; i++) { - if (isVarbindError (varbinds[i])) { - req.error = new RequestFailedError (varbindError (varbind[i])); - return true; - } - - var oid = varbinds[i].oid.replace (req.rowOid, ""); - if (oid && oid != varbinds[i].oid) { - var match = oid.match (/^(\d+)\.(.+)$/); - if (match && match[1] > 0) { - if (! req.table[match[2]]) - req.table[match[2]] = {}; - req.table[match[2]][match[1]] = varbinds[i].value; - } - } - } -} - -Session.prototype.table = function () { - var me = this; - - var oid = arguments[0]; - var maxRepetitions, responseCb; - - if (arguments.length < 3) { - responseCb = arguments[1]; - maxRepetitions = 20; - } else { - maxRepetitions = arguments[1]; - responseCb = arguments[2]; - } - - var req = { - responseCb: responseCb, - maxRepetitions: maxRepetitions, - baseOid: oid, - rowOid: oid + ".1.", - table: {} - }; - - this.subtree (oid, maxRepetitions, tableFeedCb.bind (me, req), - tableResponseCb.bind (me, req)); - - return this; -}; - -Session.prototype.trap = function () { - var req = {}; - - try { - var typeOrOid = arguments[0]; - var varbinds, options = {}, responseCb; - - /** - ** Support the following signatures: - ** - ** typeOrOid, varbinds, options, callback - ** typeOrOid, varbinds, agentAddr, callback - ** typeOrOid, varbinds, callback - ** typeOrOid, agentAddr, callback - ** typeOrOid, options, callback - ** typeOrOid, callback - **/ - if (arguments.length >= 4) { - varbinds = arguments[1]; - if (typeof arguments[2] == "string") { - options.agentAddr = arguments[2]; - } else if (arguments[2].constructor != Array) { - options = arguments[2]; - } - responseCb = arguments[3]; - } else if (arguments.length >= 3) { - if (typeof arguments[1] == "string") { - varbinds = []; - options.agentAddr = arguments[1]; - } else if (arguments[1].constructor != Array) { - varbinds = []; - options = arguments[1]; - } else { - varbinds = arguments[1]; - agentAddr = null; - } - responseCb = arguments[2]; - } else { - varbinds = []; - responseCb = arguments[1]; - } - - var pdu, pduVarbinds = []; - - for (var i = 0; i < varbinds.length; i++) { - var varbind = { - oid: varbinds[i].oid, - type: varbinds[i].type, - value: varbinds[i].value - }; - pduVarbinds.push (varbind); - } - - var id = _generateId (); - - if (this.version == Version2c) { - if (typeof typeOrOid != "string") - typeOrOid = "1.3.6.1.6.3.1.1.5." + (typeOrOid + 1); - - pduVarbinds.unshift ( - { - oid: "1.3.6.1.2.1.1.3.0", - type: ObjectType.TimeTicks, - value: options.upTime || Math.floor (process.uptime () * 100) - }, - { - oid: "1.3.6.1.6.3.1.1.4.1.0", - type: ObjectType.OID, - value: typeOrOid - } - ); - - pdu = new TrapV2Pdu (id, pduVarbinds, options); - } else { - pdu = new TrapPdu (typeOrOid, pduVarbinds, options); - } - - var message = new RequestMessage (this.version, this.community, pdu); - - req = { - id: id, - message: message, - responseCb: responseCb, - port: this.trapPort - }; - - this.send (req, true); - } catch (error) { - if (req.responseCb) - req.responseCb (error); - } - - return this; -}; - -Session.prototype.unregisterRequest = function (id) { - var req = this.reqs[id]; - if (req) { - delete this.reqs[id]; - clearTimeout (req.timer); - delete req.timer; - this.reqCount--; - if (this.reqCount <= 0) - this.dgram.unref(); - return req; - } else { - return null; - } -}; - -function walkCb (req, error, varbinds) { - var done = 0; - var oid; - - if (error) { - if (error instanceof RequestFailedError) { - if (error.status != ErrorStatus.NoSuchName) { - req.doneCb (error); - return; - } else { - // signal the version 1 walk code below that it should stop - done = 1; - } - } else { - req.doneCb (error); - return; - } - } - - if (this.version == Version2c) { - for (var i = varbinds[0].length; i > 0; i--) { - if (varbinds[0][i - 1].type == ObjectType.EndOfMibView) { - varbinds[0].pop (); - done = 1; - } - } - if (req.feedCb (varbinds[0])) - done = 1; - if (! done) - oid = varbinds[0][varbinds[0].length - 1].oid; - } else { - if (! done) { - if (req.feedCb (varbinds)) { - done = 1; - } else { - oid = varbinds[0].oid; - } - } - } - - if (done) - req.doneCb (null); - else - this.walk (oid, req.maxRepetitions, req.feedCb, req.doneCb, - req.baseOid); -} - -Session.prototype.walk = function () { - var me = this; - var oid = arguments[0]; - var maxRepetitions, feedCb, doneCb, baseOid; - - if (arguments.length < 4) { - maxRepetitions = 20; - feedCb = arguments[1]; - doneCb = arguments[2]; - } else { - maxRepetitions = arguments[1]; - feedCb = arguments[2]; - doneCb = arguments[3]; - } - - var req = { - maxRepetitions: maxRepetitions, - feedCb: feedCb, - doneCb: doneCb - }; - - if (this.version == Version2c) - this.getBulk ([oid], 0, maxRepetitions, - walkCb.bind (me, req)); - else - this.getNext ([oid], walkCb.bind (me, req)); - - return this; -}; - -/***************************************************************************** - ** Exports - **/ - -exports.Session = Session; - -exports.createSession = function (target, community, options) { - return new Session (target, community, options); -}; - -exports.isVarbindError = isVarbindError; -exports.varbindError = varbindError; - -exports.Version1 = Version1; -exports.Version2c = Version2c; - -exports.ErrorStatus = ErrorStatus; -exports.TrapType = TrapType; -exports.ObjectType = ObjectType; - -exports.ResponseInvalidError = ResponseInvalidError; -exports.RequestInvalidError = RequestInvalidError; -exports.RequestFailedError = RequestFailedError; -exports.RequestTimedOutError = RequestTimedOutError; - -/** - ** We've added this for testing. - **/ -exports.ObjectParser = { - readInt: readInt, - readUint: readUint -}; diff --git a/node.d/node_modules/netdata.js b/node.d/node_modules/netdata.js deleted file mode 100644 index 4ab8308c1..000000000 --- a/node.d/node_modules/netdata.js +++ /dev/null @@ -1,654 +0,0 @@ -'use strict'; - -// netdata -// real-time performance and health monitoring, done right! -// (C) 2016 Costa Tsaousis <costa@tsaousis.gr> -// GPL v3+ - -var url = require('url'); -var http = require('http'); -var util = require('util'); - -/* -var netdata = require('netdata'); - -var example_chart = { - id: 'id', // the unique id of the chart - name: 'name', // the name of the chart - title: 'title', // the title of the chart - units: 'units', // the units of the chart dimensions - family: 'family', // the family of the chart - context: 'context', // the context of the chart - type: netdata.chartTypes.line, // the type of the chart - priority: 0, // the priority relative to others in the same family - update_every: 1, // the expected update frequency of the chart - dimensions: { - 'dim1': { - id: 'dim1', // the unique id of the dimension - name: 'name', // the name of the dimension - algorithm: netdata.chartAlgorithms.absolute, // the id of the netdata algorithm - multiplier: 1, // the multiplier - divisor: 1, // the divisor - hidden: false, // is hidden (boolean) - }, - 'dim2': { - id: 'dim2', // the unique id of the dimension - name: 'name', // the name of the dimension - algorithm: 'absolute', // the id of the netdata algorithm - multiplier: 1, // the multiplier - divisor: 1, // the divisor - hidden: false, // is hidden (boolean) - } - // add as many dimensions as needed - } -}; -*/ - -var netdata = { - options: { - filename: __filename, - DEBUG: false, - update_every: 1 - }, - - chartAlgorithms: { - incremental: 'incremental', - absolute: 'absolute', - percentage_of_absolute_row: 'percentage-of-absolute-row', - percentage_of_incremental_row: 'percentage-of-incremental-row' - }, - - chartTypes: { - line: 'line', - area: 'area', - stacked: 'stacked' - }, - - services: new Array(), - modules_configuring: 0, - charts: {}, - - processors: { - http: { - name: 'http', - - process: function(service, callback) { - var __DEBUG = netdata.options.DEBUG; - - if(__DEBUG === true) - netdata.debug(service.module.name + ': ' + service.name + ': making ' + this.name + ' request: ' + netdata.stringify(service.request)); - - var req = http.request(service.request, function(response) { - if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': got server response...'); - - var end = false; - var data = ''; - response.setEncoding('utf8'); - - if(response.statusCode !== 200) { - if(end === false) { - service.error('Got HTTP code ' + response.statusCode + ', failed to get data.'); - end = true; - return callback(null); - } - } - - response.on('data', function(chunk) { - if(end === false) data += chunk; - }); - - response.on('error', function() { - if(end === false) { - service.error(': Read error, failed to get data.'); - end = true; - return callback(null); - } - }); - - response.on('end', function() { - if(end === false) { - if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': read completed.'); - end = true; - return callback(data); - } - }); - }); - - req.on('error', function(e) { - if(__DEBUG === true) netdata.debug('Failed to make request: ' + netdata.stringify(service.request) + ', message: ' + e.message); - service.error('Failed to make request, message: ' + e.message); - return callback(null); - }); - - // write data to request body - if(typeof service.postData !== 'undefined' && service.request.method === 'POST') { - if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': posting data: ' + service.postData); - req.write(service.postData); - } - - req.end(); - } - } - }, - - stringify: function(obj) { - return util.inspect(obj, {depth: 10}); - }, - - zeropad2: function(s) { - return ("00" + s).slice(-2); - }, - - logdate: function(d) { - if(typeof d === 'undefined') d = new Date(); - return d.getFullYear().toString() + '-' + this.zeropad2(d.getMonth() + 1) + '-' + this.zeropad2(d.getDate()) - + ' ' + this.zeropad2(d.getHours()) + ':' + this.zeropad2(d.getMinutes()) + ':' + this.zeropad2(d.getSeconds()); - }, - - // show debug info, if debug is enabled - debug: function(msg) { - if(this.options.DEBUG === true) { - console.error(this.logdate() + ': ' + netdata.options.filename + ': DEBUG: ' + ((typeof(msg) === 'object')?netdata.stringify(msg):msg).toString()); - } - }, - - // log an error - error: function(msg) { - console.error(this.logdate() + ': ' + netdata.options.filename + ': ERROR: ' + ((typeof(msg) === 'object')?netdata.stringify(msg):msg).toString()); - }, - - // send data to netdata - send: function(msg) { - console.log(msg.toString()); - }, - - service: function(service) { - if(typeof service === 'undefined') - service = {}; - - var now = Date.now(); - - service._current_chart = null; // the current chart we work on - service._queue = ''; // data to be sent to netdata - - service.error_reported = false; // error log flood control - - service.added = false; // added to netdata.services - service.enabled = true; - service.updates = 0; - service.running = false; - service.started = 0; - service.ended = 0; - - if(typeof service.module === 'undefined') { - service.module = { name: 'not-defined-module' }; - service.error('Attempted to create service without a module.'); - service.enabled = false; - } - - if(typeof service.name === 'undefined') { - service.name = 'unnamed@' + service.module.name + '/' + now; - } - - if(typeof service.processor === 'undefined') - service.processor = netdata.processors.http; - - if(typeof service.update_every === 'undefined') - service.update_every = service.module.update_every; - - if(typeof service.update_every === 'undefined') - service.update_every = netdata.options.update_every; - - if(service.update_every < netdata.options.update_every) - service.update_every = netdata.options.update_every; - - // align the runs - service.next_run = now - (now % (service.update_every * 1000)) + (service.update_every * 1000); - - service.commit = function() { - if(this.added !== true) { - this.added = true; - - var now = Date.now(); - this.next_run = now - (now % (service.update_every * 1000)) + (service.update_every * 1000); - - netdata.services.push(this); - if(netdata.options.DEBUG === true) netdata.debug(this.module.name + ': ' + this.name + ': service committed.'); - } - }; - - service.execute = function(responseProcessor) { - var __DEBUG = netdata.options.DEBUG; - - if(service.enabled === false) - return responseProcessor(null); - - this.module.active++; - this.running = true; - this.started = Date.now(); - this.updates++; - - if(__DEBUG === true) - netdata.debug(this.module.name + ': ' + this.name + ': making ' + this.processor.name + ' request: ' + netdata.stringify(this)); - - this.processor.process(this, function(response) { - service.ended = Date.now(); - service.duration = service.ended - service.started; - - if(typeof response === 'undefined') - response = null; - - if(response !== null) - service.errorClear(); - - if(__DEBUG === true) - netdata.debug(service.module.name + ': ' + service.name + ': processing ' + service.processor.name + ' response (received in ' + (service.ended - service.started).toString() + ' ms)'); - - try { - responseProcessor(service, response); - } - catch(e) { - netdata.error(e); - service.error("responseProcessor failed process response data."); - } - - service.running = false; - service.module.active--; - if(service.module.active < 0) { - service.module.active = 0; - if(__DEBUG === true) - netdata.debug(service.module.name + ': active module counter below zero.'); - } - - if(service.module.active === 0) { - // check if we run under configure - if(service.module.configure_callback !== null) { - if(__DEBUG === true) - netdata.debug(service.module.name + ': configuration finish callback called from processResponse().'); - - var configure_callback = service.module.configure_callback; - service.module.configure_callback = null; - configure_callback(); - } - } - }); - }; - - service.update = function() { - if(netdata.options.DEBUG === true) - netdata.debug(this.module.name + ': ' + this.name + ': starting data collection...'); - - this.module.update(this, function() { - if(netdata.options.DEBUG === true) - netdata.debug(service.module.name + ': ' + service.name + ': data collection ended in ' + service.duration.toString() + ' ms.'); - }); - }; - - service.error = function(message) { - if(this.error_reported === false) { - netdata.error(this.module.name + ': ' + this.name + ': ' + message); - this.error_reported = true; - } - else if(netdata.options.DEBUG === true) - netdata.debug(this.module.name + ': ' + this.name + ': ' + message); - }; - - service.errorClear = function() { - this.error_reported = false; - }; - - service.queue = function(txt) { - this._queue += txt + '\n'; - }; - - service._send_chart_to_netdata = function(chart) { - // internal function to send a chart to netdata - this.queue('CHART "' + chart.id + '" "' + chart.name + '" "' + chart.title + '" "' + chart.units + '" "' + chart.family + '" "' + chart.context + '" "' + chart.type + '" ' + chart.priority.toString() + ' ' + chart.update_every.toString()); - - if(typeof(chart.dimensions) !== 'undefined') { - var dims = Object.keys(chart.dimensions); - var len = dims.length; - while(len--) { - var d = chart.dimensions[dims[len]]; - - this.queue('DIMENSION "' + d.id + '" "' + d.name + '" "' + d.algorithm + '" ' + d.multiplier.toString() + ' ' + d.divisor.toString() + ' ' + ((d.hidden === true) ? 'hidden' : '').toString()); - d._created = true; - d._updated = false; - } - } - - chart._created = true; - chart._updated = false; - }; - - // begin data collection for a chart - service.begin = function(chart) { - if(this._current_chart !== null && this._current_chart !== chart) { - this.error('Called begin() for chart ' + chart.id + ' while chart ' + this._current_chart.id + ' is still open. Closing it.'); - this.end(); - } - - if(typeof(chart.id) === 'undefined' || netdata.charts[chart.id] !== chart) { - this.error('Called begin() for chart ' + chart.id + ' that is not mine. Where did you find it? Ignoring it.'); - return false; - } - - if(netdata.options.DEBUG === true) netdata.debug('setting current chart to ' + chart.id); - this._current_chart = chart; - this._current_chart._began = true; - - if(this._current_chart._dimensions_count !== 0) { - if(this._current_chart._created === false || this._current_chart._updated === true) - this._send_chart_to_netdata(this._current_chart); - - var now = this.ended; - this.queue('BEGIN ' + this._current_chart.id + ' ' + ((this._current_chart._last_updated > 0)?((now - this._current_chart._last_updated) * 1000):'').toString()); - } - // else this.error('Called begin() for chart ' + chart.id + ' which is empty.'); - - this._current_chart._last_updated = now; - this._current_chart._began = true; - this._current_chart._counter++; - - return true; - }; - - // set a collected value for a chart - // we do most things on the first value we attempt to set - service.set = function(dimension, value) { - if(this._current_chart === null) { - this.error('Called set(' + dimension + ', ' + value + ') without an open chart.'); - return false; - } - - if(typeof(this._current_chart.dimensions[dimension]) === 'undefined') { - this.error('Called set(' + dimension + ', ' + value + ') but dimension "' + dimension + '" does not exist in chart "' + this._current_chart.id + '".'); - return false; - } - - if(typeof value === 'undefined' || value === null) - return false; - - if(this._current_chart._dimensions_count !== 0) - this.queue('SET ' + dimension + ' = ' + value.toString()); - - return true; - }; - - // end data collection for the current chart - after calling begin() - service.end = function() { - if(this._current_chart !== null && this._current_chart._began === false) { - this.error('Called end() without an open chart.'); - return false; - } - - if(this._current_chart._dimensions_count !== 0) { - this.queue('END'); - netdata.send(this._queue); - } - - this._queue = ''; - this._current_chart._began = false; - if(netdata.options.DEBUG === true) netdata.debug('sent chart ' + this._current_chart.id); - this._current_chart = null; - return true; - }; - - // discard the collected values for the current chart - after calling begin() - service.flush = function() { - if(this._current_chart === null || this._current_chart._began === false) { - this.error('Called flush() without an open chart.'); - return false; - } - - this._queue = ''; - this._current_chart._began = false; - this._current_chart = null; - return true; - }; - - // create a netdata chart - service.chart = function(id, chart) { - var __DEBUG = netdata.options.DEBUG; - - if(typeof(netdata.charts[id]) === 'undefined') { - netdata.charts[id] = { - _created: false, - _updated: true, - _began: false, - _counter: 0, - _last_updated: 0, - _dimensions_count: 0, - id: id, - name: id, - title: 'untitled chart', - units: 'a unit', - family: '', - context: '', - type: netdata.chartTypes.line, - priority: 50000, - update_every: netdata.options.update_every, - dimensions: {} - }; - } - - var c = netdata.charts[id]; - - if(typeof(chart.name) !== 'undefined' && chart.name !== c.name) { - if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its name'); - c.name = chart.name; - c._updated = true; - } - - if(typeof(chart.title) !== 'undefined' && chart.title !== c.title) { - if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its title'); - c.title = chart.title; - c._updated = true; - } - - if(typeof(chart.units) !== 'undefined' && chart.units !== c.units) { - if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its units'); - c.units = chart.units; - c._updated = true; - } - - if(typeof(chart.family) !== 'undefined' && chart.family !== c.family) { - if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its family'); - c.family = chart.family; - c._updated = true; - } - - if(typeof(chart.context) !== 'undefined' && chart.context !== c.context) { - if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its context'); - c.context = chart.context; - c._updated = true; - } - - if(typeof(chart.type) !== 'undefined' && chart.type !== c.type) { - if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its type'); - c.type = chart.type; - c._updated = true; - } - - if(typeof(chart.priority) !== 'undefined' && chart.priority !== c.priority) { - if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its priority'); - c.priority = chart.priority; - c._updated = true; - } - - if(typeof(chart.update_every) !== 'undefined' && chart.update_every !== c.update_every) { - if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its update_every from ' + c.update_every + ' to ' + chart.update_every); - c.update_every = chart.update_every; - c._updated = true; - } - - if(typeof(chart.dimensions) !== 'undefined') { - var dims = Object.keys(chart.dimensions); - var len = dims.length; - while(len--) { - var x = dims[len]; - - if(typeof(c.dimensions[x]) === 'undefined') { - c._dimensions_count++; - - c.dimensions[x] = { - _created: false, - _updated: false, - id: x, // the unique id of the dimension - name: x, // the name of the dimension - algorithm: netdata.chartAlgorithms.absolute, // the id of the netdata algorithm - multiplier: 1, // the multiplier - divisor: 1, // the divisor - hidden: false // is hidden (boolean) - }; - - if(__DEBUG === true) netdata.debug('chart ' + id + ' created dimension ' + x); - c._updated = true; - } - - var dim = chart.dimensions[x]; - var d = c.dimensions[x]; - - if(typeof(dim.name) !== 'undefined' && d.name !== dim.name) { - if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its name'); - d.name = dim.name; - d._updated = true; - } - - if(typeof(dim.algorithm) !== 'undefined' && d.algorithm !== dim.algorithm) { - if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its algorithm from ' + d.algorithm + ' to ' + dim.algorithm); - d.algorithm = dim.algorithm; - d._updated = true; - } - - if(typeof(dim.multiplier) !== 'undefined' && d.multiplier !== dim.multiplier) { - if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its multiplier'); - d.multiplier = dim.multiplier; - d._updated = true; - } - - if(typeof(dim.divisor) !== 'undefined' && d.divisor !== dim.divisor) { - if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its divisor'); - d.divisor = dim.divisor; - d._updated = true; - } - - if(typeof(dim.hidden) !== 'undefined' && d.hidden !== dim.hidden) { - if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its hidden status'); - d.hidden = dim.hidden; - d._updated = true; - } - - if(d._updated) c._updated = true; - } - } - - //if(netdata.options.DEBUG === true) netdata.debug(netdata.charts); - return netdata.charts[id]; - }; - - return service; - }, - - runAllServices: function() { - if(netdata.options.DEBUG === true) netdata.debug('runAllServices()'); - - var now = Date.now(); - var len = netdata.services.length; - while(len--) { - var service = netdata.services[len]; - - if(service.enabled === false || service.running === true) continue; - if(now <= service.next_run) continue; - - service.update(); - - now = Date.now(); - service.next_run = now - (now % (service.update_every * 1000)) + (service.update_every * 1000); - } - - // 1/10th of update_every in pause - setTimeout(netdata.runAllServices, netdata.options.update_every * 100); - }, - - start: function() { - if(netdata.options.DEBUG === true) this.debug('started, services: ' + netdata.stringify(this.services)); - - if(this.services.length === 0) { - this.disableNodePlugin(); - - // eslint suggested way to exit - var exit = process.exit; - exit(1); - } - else this.runAllServices(); - }, - - // disable the whole node.js plugin - disableNodePlugin: function() { - this.send('DISABLE'); - - // eslint suggested way to exit - var exit = process.exit; - exit(1); - }, - - requestFromParams: function(protocol, hostname, port, path, method) { - return { - protocol: protocol, - hostname: hostname, - port: port, - path: path, - //family: 4, - method: method, - headers: { - 'Content-Type': 'application/x-www-form-urlencoded', - 'Connection': 'keep-alive' - }, - agent: new http.Agent({ - keepAlive: true, - keepAliveMsecs: netdata.options.update_every * 1000, - maxSockets: 2, // it must be 2 to work - maxFreeSockets: 1 - }) - }; - }, - - requestFromURL: function(a_url) { - var u = url.parse(a_url); - return netdata.requestFromParams(u.protocol, u.hostname, u.port, u.path, 'GET'); - }, - - configure: function(module, config, callback) { - if(netdata.options.DEBUG === true) this.debug(module.name + ': configuring (update_every: ' + this.options.update_every + ')...'); - - module.active = 0; - module.update_every = this.options.update_every; - - if(typeof config.update_every !== 'undefined') - module.update_every = config.update_every; - - module.enable_autodetect = (config.enable_autodetect)?true:false; - - if(typeof(callback) === 'function') - module.configure_callback = callback; - else - module.configure_callback = null; - - var added = module.configure(config); - - if(netdata.options.DEBUG === true) this.debug(module.name + ': configured, reporting ' + added + ' eligible services.'); - - if(module.configure_callback !== null && added === 0) { - if(netdata.options.DEBUG === true) this.debug(module.name + ': configuration finish callback called from configure().'); - var configure_callback = module.configure_callback; - module.configure_callback = null; - configure_callback(); - } - - return added; - } -}; - -if(netdata.options.DEBUG === true) netdata.debug('loaded netdata from:', __filename); -module.exports = netdata; diff --git a/node.d/node_modules/pixl-xml.js b/node.d/node_modules/pixl-xml.js deleted file mode 100644 index 481acbaeb..000000000 --- a/node.d/node_modules/pixl-xml.js +++ /dev/null @@ -1,606 +0,0 @@ -/* - JavaScript XML Library - Plus a bunch of object utility functions - - Usage: - var XML = require('pixl-xml'); - var myxmlstring = '<?xml version="1.0"?><Document>' + - '<Simple>Hello</Simple>' + - '<Node Key="Value">Content</Node>' + - '</Document>'; - - var tree = XML.parse( myxmlstring, { preserveAttributes: true }); - console.log( tree ); - - tree.Simple = "Hello2"; - tree.Node._Attribs.Key = "Value2"; - tree.Node._Data = "Content2"; - tree.New = "I added this"; - - console.log( XML.stringify( tree, 'Document' ) ); - - Copyright (c) 2004 - 2015 Joseph Huckaby - Released under the MIT License - This version is for Node.JS, converted in 2012. -*/ - -var fs = require('fs'); - -var indent_string = "\t"; -var xml_header = '<?xml version="1.0"?>'; -var sort_args = null; -var re_valid_tag_name = /^\w[\w\-\:]*$/; - -var XML = exports.XML = function XML(args) { - // class constructor for XML parser class - // pass in args hash or text to parse - if (!args) args = ''; - if (isa_hash(args)) { - for (var key in args) this[key] = args[key]; - } - else this.text = args || ''; - - // stringify buffers - if (this.text instanceof Buffer) { - this.text = this.text.toString(); - } - - if (!this.text.match(/^\s*</)) { - // try as file path - var file = this.text; - this.text = fs.readFileSync(file, { encoding: 'utf8' }); - if (!this.text) throw new Error("File not found: " + file); - } - - this.tree = {}; - this.errors = []; - this.piNodeList = []; - this.dtdNodeList = []; - this.documentNodeName = ''; - - if (this.lowerCase) { - this.attribsKey = this.attribsKey.toLowerCase(); - this.dataKey = this.dataKey.toLowerCase(); - } - - this.patTag.lastIndex = 0; - if (this.text) this.parse(); -} - -XML.prototype.preserveAttributes = false; -XML.prototype.lowerCase = false; - -XML.prototype.patTag = /([^<]*?)<([^>]+)>/g; -XML.prototype.patSpecialTag = /^\s*([\!\?])/; -XML.prototype.patPITag = /^\s*\?/; -XML.prototype.patCommentTag = /^\s*\!--/; -XML.prototype.patDTDTag = /^\s*\!DOCTYPE/; -XML.prototype.patCDATATag = /^\s*\!\s*\[\s*CDATA/; -XML.prototype.patStandardTag = /^\s*(\/?)([\w\-\:\.]+)\s*(.*)$/; -XML.prototype.patSelfClosing = /\/\s*$/; -XML.prototype.patAttrib = new RegExp("([\\w\\-\\:\\.]+)\\s*=\\s*([\\\"\\'])([^\\2]*?)\\2", "g"); -XML.prototype.patPINode = /^\s*\?\s*([\w\-\:]+)\s*(.*)$/; -XML.prototype.patEndComment = /--$/; -XML.prototype.patNextClose = /([^>]*?)>/g; -XML.prototype.patExternalDTDNode = new RegExp("^\\s*\\!DOCTYPE\\s+([\\w\\-\\:]+)\\s+(SYSTEM|PUBLIC)\\s+\\\"([^\\\"]+)\\\""); -XML.prototype.patInlineDTDNode = /^\s*\!DOCTYPE\s+([\w\-\:]+)\s+\[/; -XML.prototype.patEndDTD = /\]$/; -XML.prototype.patDTDNode = /^\s*\!DOCTYPE\s+([\w\-\:]+)\s+\[(.*)\]/; -XML.prototype.patEndCDATA = /\]\]$/; -XML.prototype.patCDATANode = /^\s*\!\s*\[\s*CDATA\s*\[([^]*)\]\]/; - -XML.prototype.attribsKey = '_Attribs'; -XML.prototype.dataKey = '_Data'; - -XML.prototype.parse = function(branch, name) { - // parse text into XML tree, recurse for nested nodes - if (!branch) branch = this.tree; - if (!name) name = null; - var foundClosing = false; - var matches = null; - - // match each tag, plus preceding text - while ( matches = this.patTag.exec(this.text) ) { - var before = matches[1]; - var tag = matches[2]; - - // text leading up to tag = content of parent node - if (before.match(/\S/)) { - if (typeof(branch[this.dataKey]) != 'undefined') branch[this.dataKey] += ' '; else branch[this.dataKey] = ''; - branch[this.dataKey] += trim(decode_entities(before)); - } - - // parse based on tag type - if (tag.match(this.patSpecialTag)) { - // special tag - if (tag.match(this.patPITag)) tag = this.parsePINode(tag); - else if (tag.match(this.patCommentTag)) tag = this.parseCommentNode(tag); - else if (tag.match(this.patDTDTag)) tag = this.parseDTDNode(tag); - else if (tag.match(this.patCDATATag)) { - tag = this.parseCDATANode(tag); - if (typeof(branch[this.dataKey]) != 'undefined') branch[this.dataKey] += ' '; else branch[this.dataKey] = ''; - branch[this.dataKey] += trim(decode_entities(tag)); - } // cdata - else { - this.throwParseError( "Malformed special tag", tag ); - break; - } // error - - if (tag == null) break; - continue; - } // special tag - else { - // Tag is standard, so parse name and attributes (if any) - var matches = tag.match(this.patStandardTag); - if (!matches) { - this.throwParseError( "Malformed tag", tag ); - break; - } - - var closing = matches[1]; - var nodeName = this.lowerCase ? matches[2].toLowerCase() : matches[2]; - var attribsRaw = matches[3]; - - // If this is a closing tag, make sure it matches its opening tag - if (closing) { - if (nodeName == (name || '')) { - foundClosing = 1; - break; - } - else { - this.throwParseError( "Mismatched closing tag (expected </" + name + ">)", tag ); - break; - } - } // closing tag - else { - // Not a closing tag, so parse attributes into hash. If tag - // is self-closing, no recursive parsing is needed. - var selfClosing = !!attribsRaw.match(this.patSelfClosing); - var leaf = {}; - var attribs = leaf; - - // preserve attributes means they go into a sub-hash named "_Attribs" - // the XML composer honors this for restoring the tree back into XML - if (this.preserveAttributes) { - leaf[this.attribsKey] = {}; - attribs = leaf[this.attribsKey]; - } - - // parse attributes - this.patAttrib.lastIndex = 0; - while ( matches = this.patAttrib.exec(attribsRaw) ) { - var key = this.lowerCase ? matches[1].toLowerCase() : matches[1]; - attribs[ key ] = decode_entities( matches[3] ); - } // foreach attrib - - // if no attribs found, but we created the _Attribs subhash, clean it up now - if (this.preserveAttributes && !num_keys(attribs)) { - delete leaf[this.attribsKey]; - } - - // Recurse for nested nodes - if (!selfClosing) { - this.parse( leaf, nodeName ); - if (this.error()) break; - } - - // Compress into simple node if text only - var num_leaf_keys = num_keys(leaf); - if ((typeof(leaf[this.dataKey]) != 'undefined') && (num_leaf_keys == 1)) { - leaf = leaf[this.dataKey]; - } - else if (!num_leaf_keys) { - leaf = ''; - } - - // Add leaf to parent branch - if (typeof(branch[nodeName]) != 'undefined') { - if (isa_array(branch[nodeName])) { - branch[nodeName].push( leaf ); - } - else { - var temp = branch[nodeName]; - branch[nodeName] = [ temp, leaf ]; - } - } - else { - branch[nodeName] = leaf; - } - - if (this.error() || (branch == this.tree)) break; - } // not closing - } // standard tag - } // main reg exp - - // Make sure we found the closing tag - if (name && !foundClosing) { - this.throwParseError( "Missing closing tag (expected </" + name + ">)", name ); - } - - // If we are the master node, finish parsing and setup our doc node - if (branch == this.tree) { - if (typeof(this.tree[this.dataKey]) != 'undefined') delete this.tree[this.dataKey]; - - if (num_keys(this.tree) > 1) { - this.throwParseError( 'Only one top-level node is allowed in document', first_key(this.tree) ); - return; - } - - this.documentNodeName = first_key(this.tree); - if (this.documentNodeName) { - this.tree = this.tree[this.documentNodeName]; - } - } -}; - -XML.prototype.throwParseError = function(key, tag) { - // log error and locate current line number in source XML document - var parsedSource = this.text.substring(0, this.patTag.lastIndex); - var eolMatch = parsedSource.match(/\n/g); - var lineNum = (eolMatch ? eolMatch.length : 0) + 1; - lineNum -= tag.match(/\n/) ? tag.match(/\n/g).length : 0; - - this.errors.push({ - type: 'Parse', - key: key, - text: '<' + tag + '>', - line: lineNum - }); - - // Throw actual error (must wrap parse in try/catch) - throw new Error( this.getLastError() ); -}; - -XML.prototype.error = function() { - // return number of errors - return this.errors.length; -}; - -XML.prototype.getError = function(error) { - // get formatted error - var text = ''; - if (!error) return ''; - - text = (error.type || 'General') + ' Error'; - if (error.code) text += ' ' + error.code; - text += ': ' + error.key; - - if (error.line) text += ' on line ' + error.line; - if (error.text) text += ': ' + error.text; - - return text; -}; - -XML.prototype.getLastError = function() { - // Get most recently thrown error in plain text format - if (!this.error()) return ''; - return this.getError( this.errors[this.errors.length - 1] ); -}; - -XML.prototype.parsePINode = function(tag) { - // Parse Processor Instruction Node, e.g. <?xml version="1.0"?> - if (!tag.match(this.patPINode)) { - this.throwParseError( "Malformed processor instruction", tag ); - return null; - } - - this.piNodeList.push( tag ); - return tag; -}; - -XML.prototype.parseCommentNode = function(tag) { - // Parse Comment Node, e.g. <!-- hello --> - var matches = null; - this.patNextClose.lastIndex = this.patTag.lastIndex; - - while (!tag.match(this.patEndComment)) { - if (matches = this.patNextClose.exec(this.text)) { - tag += '>' + matches[1]; - } - else { - this.throwParseError( "Unclosed comment tag", tag ); - return null; - } - } - - this.patTag.lastIndex = this.patNextClose.lastIndex; - return tag; -}; - -XML.prototype.parseDTDNode = function(tag) { - // Parse Document Type Descriptor Node, e.g. <!DOCTYPE ... > - var matches = null; - - if (tag.match(this.patExternalDTDNode)) { - // tag is external, and thus self-closing - this.dtdNodeList.push( tag ); - } - else if (tag.match(this.patInlineDTDNode)) { - // Tag is inline, so check for nested nodes. - this.patNextClose.lastIndex = this.patTag.lastIndex; - - while (!tag.match(this.patEndDTD)) { - if (matches = this.patNextClose.exec(this.text)) { - tag += '>' + matches[1]; - } - else { - this.throwParseError( "Unclosed DTD tag", tag ); - return null; - } - } - - this.patTag.lastIndex = this.patNextClose.lastIndex; - - // Make sure complete tag is well-formed, and push onto DTD stack. - if (tag.match(this.patDTDNode)) { - this.dtdNodeList.push( tag ); - } - else { - this.throwParseError( "Malformed DTD tag", tag ); - return null; - } - } - else { - this.throwParseError( "Malformed DTD tag", tag ); - return null; - } - - return tag; -}; - -XML.prototype.parseCDATANode = function(tag) { - // Parse CDATA Node, e.g. <![CDATA[Brooks & Shields]]> - var matches = null; - this.patNextClose.lastIndex = this.patTag.lastIndex; - - while (!tag.match(this.patEndCDATA)) { - if (matches = this.patNextClose.exec(this.text)) { - tag += '>' + matches[1]; - } - else { - this.throwParseError( "Unclosed CDATA tag", tag ); - return null; - } - } - - this.patTag.lastIndex = this.patNextClose.lastIndex; - - if (matches = tag.match(this.patCDATANode)) { - return matches[1]; - } - else { - this.throwParseError( "Malformed CDATA tag", tag ); - return null; - } -}; - -XML.prototype.getTree = function() { - // get reference to parsed XML tree - return this.tree; -}; - -XML.prototype.compose = function() { - // compose tree back into XML - var raw = compose_xml( this.tree, this.documentNodeName ); - var body = raw.substring( raw.indexOf("\n") + 1, raw.length ); - var xml = ''; - - if (this.piNodeList.length) { - for (var idx = 0, len = this.piNodeList.length; idx < len; idx++) { - xml += '<' + this.piNodeList[idx] + '>' + "\n"; - } - } - else { - xml += xml_header + "\n"; - } - - if (this.dtdNodeList.length) { - for (var idx = 0, len = this.dtdNodeList.length; idx < len; idx++) { - xml += '<' + this.dtdNodeList[idx] + '>' + "\n"; - } - } - - xml += body; - return xml; -}; - -// -// Static Utility Functions: -// - -var parse_xml = exports.parse = function parse_xml(text, opts) { - // turn text into XML tree quickly - if (!opts) opts = {}; - opts.text = text; - var parser = new XML(opts); - return parser.error() ? parser.getLastError() : parser.getTree(); -}; - -var trim = exports.trim = function trim(text) { - // strip whitespace from beginning and end of string - if (text == null) return ''; - - if (text && text.replace) { - text = text.replace(/^\s+/, ""); - text = text.replace(/\s+$/, ""); - } - - return text; -}; - -var encode_entities = exports.encodeEntities = function encode_entities(text) { - // Simple entitize exports.for = function for composing XML - if (text == null) return ''; - - if (text && text.replace) { - text = text.replace(/\&/g, "&"); // MUST BE FIRST - text = text.replace(/</g, "<"); - text = text.replace(/>/g, ">"); - } - - return text; -}; - -var encode_attrib_entities = exports.encodeAttribEntities = function encode_attrib_entities(text) { - // Simple entitize exports.for = function for composing XML attributes - if (text == null) return ''; - - if (text && text.replace) { - text = text.replace(/\&/g, "&"); // MUST BE FIRST - text = text.replace(/</g, "<"); - text = text.replace(/>/g, ">"); - text = text.replace(/\"/g, """); - text = text.replace(/\'/g, "'"); - } - - return text; -}; - -var decode_entities = exports.decodeEntities = function decode_entities(text) { - // Decode XML entities into raw ASCII - if (text == null) return ''; - - if (text && text.replace && text.match(/\&/)) { - text = text.replace(/\<\;/g, "<"); - text = text.replace(/\>\;/g, ">"); - text = text.replace(/\"\;/g, '"'); - text = text.replace(/\&apos\;/g, "'"); - text = text.replace(/\&\;/g, "&"); // MUST BE LAST - } - - return text; -}; - -var compose_xml = exports.stringify = function compose_xml(node, name, indent) { - // Compose node into XML including attributes - // Recurse for child nodes - var xml = ""; - - // If this is the root node, set the indent to 0 - // and setup the XML header (PI node) - if (!indent) { - indent = 0; - xml = xml_header + "\n"; - - if (!name) { - // no name provided, assume content is wrapped in it - name = first_key(node); - node = node[name]; - } - } - - // Setup the indent text - var indent_text = ""; - for (var k = 0; k < indent; k++) indent_text += indent_string; - - if ((typeof(node) == 'object') && (node != null)) { - // node is object -- now see if it is an array or hash - if (!node.length) { // what about zero-length array? - // node is hash - xml += indent_text + "<" + name; - - var num_keys = 0; - var has_attribs = 0; - for (var key in node) num_keys++; // there must be a better way... - - if (node["_Attribs"]) { - has_attribs = 1; - var sorted_keys = hash_keys_to_array(node["_Attribs"]).sort(); - for (var idx = 0, len = sorted_keys.length; idx < len; idx++) { - var key = sorted_keys[idx]; - xml += " " + key + "=\"" + encode_attrib_entities(node["_Attribs"][key]) + "\""; - } - } // has attribs - - if (num_keys > has_attribs) { - // has child elements - xml += ">"; - - if (node["_Data"]) { - // simple text child node - xml += encode_entities(node["_Data"]) + "</" + name + ">\n"; - } // just text - else { - xml += "\n"; - - var sorted_keys = hash_keys_to_array(node).sort(); - for (var idx = 0, len = sorted_keys.length; idx < len; idx++) { - var key = sorted_keys[idx]; - if ((key != "_Attribs") && key.match(re_valid_tag_name)) { - // recurse for node, with incremented indent value - xml += compose_xml( node[key], key, indent + 1 ); - } // not _Attribs key - } // foreach key - - xml += indent_text + "</" + name + ">\n"; - } // real children - } - else { - // no child elements, so self-close - xml += "/>\n"; - } - } // standard node - else { - // node is array - for (var idx = 0; idx < node.length; idx++) { - // recurse for node in array with same indent - xml += compose_xml( node[idx], name, indent ); - } - } // array of nodes - } // complex node - else { - // node is simple string - xml += indent_text + "<" + name + ">" + encode_entities(node) + "</" + name + ">\n"; - } // simple text node - - return xml; -}; - -var always_array = exports.alwaysArray = function always_array(obj, key) { - // if object is not array, return array containing object - // if key is passed, work like XMLalwaysarray() instead - if (key) { - if ((typeof(obj[key]) != 'object') || (typeof(obj[key].length) == 'undefined')) { - var temp = obj[key]; - delete obj[key]; - obj[key] = new Array(); - obj[key][0] = temp; - } - return null; - } - else { - if ((typeof(obj) != 'object') || (typeof(obj.length) == 'undefined')) { return [ obj ]; } - else return obj; - } -}; - -var hash_keys_to_array = exports.hashKeysToArray = function hash_keys_to_array(hash) { - // convert hash keys to array (discard values) - var array = []; - for (var key in hash) array.push(key); - return array; -}; - -var isa_hash = exports.isaHash = function isa_hash(arg) { - // determine if arg is a hash - return( !!arg && (typeof(arg) == 'object') && (typeof(arg.length) == 'undefined') ); -}; - -var isa_array = exports.isaArray = function isa_array(arg) { - // determine if arg is an array or is array-like - if (typeof(arg) == 'array') return true; - return( !!arg && (typeof(arg) == 'object') && (typeof(arg.length) != 'undefined') ); -}; - -var first_key = exports.firstKey = function first_key(hash) { - // return first key from hash (unordered) - for (var key in hash) return key; - return null; // no keys in hash -}; - -var num_keys = exports.numKeys = function num_keys(hash) { - // count the number of keys in a hash - var count = 0; - for (var a in hash) count++; - return count; -}; diff --git a/node.d/sma_webbox.node.js b/node.d/sma_webbox.node.js deleted file mode 100644 index 3d99943d4..000000000 --- a/node.d/sma_webbox.node.js +++ /dev/null @@ -1,237 +0,0 @@ -'use strict'; - -// This program will connect to one or more SMA Sunny Webboxes -// to get the Solar Power Generated (current, today, total). - -// example configuration in /etc/netdata/node.d/sma_webbox.conf -/* -{ - "enable_autodetect": false, - "update_every": 5, - "servers": [ - { - "name": "plant1", - "hostname": "10.0.1.1", - "update_every": 10 - }, - { - "name": "plant2", - "hostname": "10.0.2.1", - "update_every": 15 - } - ] -} -*/ - -var url = require('url'); -var http = require('http'); -var netdata = require('netdata'); - -if(netdata.options.DEBUG === true) netdata.debug('loaded ' + __filename + ' plugin'); - -var webbox = { - name: __filename, - enable_autodetect: true, - update_every: 1, - base_priority: 60000, - charts: {}, - - processResponse: function(service, data) { - if(data !== null) { - var r = JSON.parse(data); - - var d = { - 'GriPwr': { - unit: null, - value: null - }, - 'GriEgyTdy': { - unit: null, - value: null - }, - 'GriEgyTot': { - unit: null, - value: null - } - }; - - // parse the webbox response - // and put it in our d object - var found = 0; - var len = r.result.overview.length; - while(len--) { - var e = r.result.overview[len]; - if(typeof(d[e.meta]) !== 'undefined') { - found++; - d[e.meta].value = e.value; - d[e.meta].unit = e.unit; - } - } - - // add the service - if(found > 0 && service.added !== true) - service.commit(); - - // Grid Current Power Chart - if(d['GriPwr'].value !== null) { - var id = 'smawebbox_' + service.name + '.current'; - var chart = webbox.charts[id]; - - if(typeof chart === 'undefined') { - chart = { - id: id, // the unique id of the chart - name: '', // the unique name of the chart - title: service.name + ' Current Grid Power', // the title of the chart - units: d['GriPwr'].unit, // the units of the chart dimensions - family: 'now', // the family of the chart - context: 'smawebbox.grid_power', // the context of the chart - type: netdata.chartTypes.area, // the type of the chart - priority: webbox.base_priority + 1, // the priority relative to others in the same family - update_every: service.update_every, // the expected update frequency of the chart - dimensions: { - 'GriPwr': { - id: 'GriPwr', // the unique id of the dimension - name: 'power', // the name of the dimension - algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm - multiplier: 1, // the multiplier - divisor: 1, // the divisor - hidden: false // is hidden (boolean) - } - } - }; - - chart = service.chart(id, chart); - webbox.charts[id] = chart; - } - - service.begin(chart); - service.set('GriPwr', Math.round(d['GriPwr'].value)); - service.end(); - } - - if(d['GriEgyTdy'].value !== null) { - var id = 'smawebbox_' + service.name + '.today'; - var chart = webbox.charts[id]; - - if(typeof chart === 'undefined') { - chart = { - id: id, // the unique id of the chart - name: '', // the unique name of the chart - title: service.name + ' Today Grid Power', // the title of the chart - units: d['GriEgyTdy'].unit, // the units of the chart dimensions - family: 'today', // the family of the chart - context: 'smawebbox.grid_power_today', // the context of the chart - type: netdata.chartTypes.area, // the type of the chart - priority: webbox.base_priority + 2, // the priority relative to others in the same family - update_every: service.update_every, // the expected update frequency of the chart - dimensions: { - 'GriEgyTdy': { - id: 'GriEgyTdy', // the unique id of the dimension - name: 'power', // the name of the dimension - algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm - multiplier: 1, // the multiplier - divisor: 1000, // the divisor - hidden: false // is hidden (boolean) - } - } - }; - - chart = service.chart(id, chart); - webbox.charts[id] = chart; - } - - service.begin(chart); - service.set('GriEgyTdy', Math.round(d['GriEgyTdy'].value * 1000)); - service.end(); - } - - if(d['GriEgyTot'].value !== null) { - var id = 'smawebbox_' + service.name + '.total'; - var chart = webbox.charts[id]; - - if(typeof chart === 'undefined') { - chart = { - id: id, // the unique id of the chart - name: '', // the unique name of the chart - title: service.name + ' Total Grid Power', // the title of the chart - units: d['GriEgyTot'].unit, // the units of the chart dimensions - family: 'total', // the family of the chart - context: 'smawebbox.grid_power_total', // the context of the chart - type: netdata.chartTypes.area, // the type of the chart - priority: webbox.base_priority + 3, // the priority relative to others in the same family - update_every: service.update_every, // the expected update frequency of the chart - dimensions: { - 'GriEgyTot': { - id: 'GriEgyTot', // the unique id of the dimension - name: 'power', // the name of the dimension - algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm - multiplier: 1, // the multiplier - divisor: 1000, // the divisor - hidden: false // is hidden (boolean) - } - } - }; - - chart = service.chart(id, chart); - webbox.charts[id] = chart; - } - - service.begin(chart); - service.set('GriEgyTot', Math.round(d['GriEgyTot'].value * 1000)); - service.end(); - } - } - }, - - // module.serviceExecute() - // this function is called only from this module - // its purpose is to prepare the request and call - // netdata.serviceExecute() - serviceExecute: function(name, hostname, update_every) { - if(netdata.options.DEBUG === true) netdata.debug(this.name + ': ' + name + ': hostname: ' + hostname + ', update_every: ' + update_every); - - var service = netdata.service({ - name: name, - request: netdata.requestFromURL('http://' + hostname + '/rpc'), - update_every: update_every, - module: this - }); - service.postData = 'RPC={"proc":"GetPlantOverview","format":"JSON","version":"1.0","id":"1"}'; - service.request.method = 'POST'; - service.request.headers['Content-Length'] = service.postData.length; - - service.execute(this.processResponse); - }, - - configure: function(config) { - var added = 0; - - if(typeof(config.servers) !== 'undefined') { - var len = config.servers.length; - while(len--) { - if(typeof config.servers[len].update_every === 'undefined') - config.servers[len].update_every = this.update_every; - - if(config.servers[len].update_every < 5) - config.servers[len].update_every = 5; - - this.serviceExecute(config.servers[len].name, config.servers[len].hostname, config.servers[len].update_every); - added++; - } - } - - return added; - }, - - // module.update() - // this is called repeatidly to collect data, by calling - // netdata.serviceExecute() - update: function(service, callback) { - service.execute(function(serv, data) { - service.module.processResponse(serv, data); - callback(); - }); - }, -}; - -module.exports = webbox; diff --git a/node.d/snmp.node.js b/node.d/snmp.node.js deleted file mode 100644 index 3e7027958..000000000 --- a/node.d/snmp.node.js +++ /dev/null @@ -1,513 +0,0 @@ -'use strict'; -// netdata snmp module -// This program will connect to one or more SNMP Agents -// - -// example configuration in /etc/netdata/node.d/snmp.conf -/* -{ - "enable_autodetect": false, - "update_every": 5, - "max_request_size": 50, - "servers": [ - { - "hostname": "10.11.12.8", - "community": "public", - "update_every": 10, - "max_request_size": 50, - "options": { "timeout": 10000 }, - "charts": { - "snmp_switch.bandwidth_port1": { - "title": "Switch Bandwidth for port 1", - "units": "kilobits/s", - "type": "area", - "priority": 1, - "dimensions": { - "in": { - "oid": ".1.3.6.1.2.1.2.2.1.10.1", - "algorithm": "incremental", - "multiplier": 8, - "divisor": 1024, - "offset": 0 - }, - "out": { - "oid": ".1.3.6.1.2.1.2.2.1.16.1", - "algorithm": "incremental", - "multiplier": -8, - "divisor": 1024, - "offset": 0 - } - } - }, - "snmp_switch.bandwidth_port2": { - "title": "Switch Bandwidth for port 2", - "units": "kilobits/s", - "type": "area", - "priority": 1, - "dimensions": { - "in": { - "oid": ".1.3.6.1.2.1.2.2.1.10.2", - "algorithm": "incremental", - "multiplier": 8, - "divisor": 1024, - "offset": 0 - }, - "out": { - "oid": ".1.3.6.1.2.1.2.2.1.16.2", - "algorithm": "incremental", - "multiplier": -8, - "divisor": 1024, - "offset": 0 - } - } - } - } - } - ] -} -*/ - -// You can also give ranges of charts like the following. -// This will append 1-24 to id, title, oid (on each dimension) -// so that 24 charts will be created. -/* -{ - "enable_autodetect": false, - "update_every": 10, - "max_request_size": 50, - "servers": [ - { - "hostname": "10.11.12.8", - "community": "public", - "update_every": 10, - "max_request_size": 50, - "options": { "timeout": 20000 }, - "charts": { - "snmp_switch.bandwidth_port": { - "title": "Switch Bandwidth for port ", - "units": "kilobits/s", - "type": "area", - "priority": 1, - "multiply_range": [ 1, 24 ], - "dimensions": { - "in": { - "oid": ".1.3.6.1.2.1.2.2.1.10.", - "algorithm": "incremental", - "multiplier": 8, - "divisor": 1024, - "offset": 0 - }, - "out": { - "oid": ".1.3.6.1.2.1.2.2.1.16.", - "algorithm": "incremental", - "multiplier": -8, - "divisor": 1024, - "offset": 0 - } - } - } - } - } - ] -} -*/ - -var net_snmp = require('net-snmp'); -var extend = require('extend'); -var netdata = require('netdata'); - -if(netdata.options.DEBUG === true) netdata.debug('loaded', __filename, ' plugin'); - -netdata.processors.snmp = { - name: 'snmp', - - fixoid: function(oid) { - if(typeof oid !== 'string') - return oid; - - if(oid.charAt(0) === '.') - return oid.substring(1, oid.length); - - return oid; - }, - - prepare: function(service) { - var __DEBUG = netdata.options.DEBUG; - - if(typeof service.snmp_oids === 'undefined' || service.snmp_oids === null || service.snmp_oids.length === 0) { - // this is the first time we see this service - - if(__DEBUG === true) - netdata.debug(service.module.name + ': ' + service.name + ': preparing ' + this.name + ' OIDs'); - - // build an index of all OIDs - service.snmp_oids_index = {}; - var chart_keys = Object.keys(service.request.charts); - var chart_keys_len = chart_keys.length; - while(chart_keys_len--) { - var c = chart_keys[chart_keys_len]; - var chart = service.request.charts[c]; - - // for each chart - - if(__DEBUG === true) - netdata.debug(service.module.name + ': ' + service.name + ': indexing ' + this.name + ' chart: ' + c); - - if(typeof chart.titleoid !== 'undefined') { - service.snmp_oids_index[this.fixoid(chart.titleoid)] = { - type: 'title', - link: chart - }; - } - - var dim_keys = Object.keys(chart.dimensions); - var dim_keys_len = dim_keys.length; - while(dim_keys_len--) { - var d = dim_keys[dim_keys_len]; - var dim = chart.dimensions[d]; - - // for each dimension in the chart - - var oid = this.fixoid(dim.oid); - var oidname = this.fixoid(dim.oidname); - - if(__DEBUG === true) - netdata.debug(service.module.name + ': ' + service.name + ': indexing ' + this.name + ' chart: ' + c + ', dimension: ' + d + ', OID: ' + oid + ", OID name: " + oidname); - - // link it to the point we need to set the value to - service.snmp_oids_index[oid] = { - type: 'value', - link: dim - }; - - if(typeof oidname !== 'undefined') - service.snmp_oids_index[oidname] = { - type: 'name', - link: dim - }; - - // and set the value to null - dim.value = null; - } - } - - if(__DEBUG === true) - netdata.debug(service.module.name + ': ' + service.name + ': indexed ' + this.name + ' OIDs: ' + netdata.stringify(service.snmp_oids_index)); - - // now create the array of OIDs needed by net-snmp - service.snmp_oids = Object.keys(service.snmp_oids_index); - - if(__DEBUG === true) - netdata.debug(service.module.name + ': ' + service.name + ': final list of ' + this.name + ' OIDs: ' + netdata.stringify(service.snmp_oids)); - - service.snmp_oids_cleaned = 0; - } - else if(service.snmp_oids_cleaned === 0) { - service.snmp_oids_cleaned = 1; - - // the second time, keep only values - - service.snmp_oids = new Array(); - var oid_keys = Object.keys(service.snmp_oids_index); - var oid_keys_len = oid_keys.length; - while(oid_keys_len--) { - if (service.snmp_oids_index[oid_keys[oid_keys_len]].type === 'value') - service.snmp_oids.push(oid_keys[oid_keys_len]); - } - } - }, - - getdata: function(service, index, ok, failed, callback) { - var __DEBUG = netdata.options.DEBUG; - var that = this; - - if(index >= service.snmp_oids.length) { - callback((ok > 0)?{ ok: ok, failed: failed }:null); - return; - } - - var slice; - if(service.snmp_oids.length <= service.request.max_request_size) { - slice = service.snmp_oids; - index = service.snmp_oids.length; - } - else if(service.snmp_oids.length - index <= service.request.max_request_size) { - slice = service.snmp_oids.slice(index, service.snmp_oids.length); - index = service.snmp_oids.length; - } - else { - slice = service.snmp_oids.slice(index, index + service.request.max_request_size); - index += service.request.max_request_size; - } - - if(__DEBUG === true) - netdata.debug(service.module.name + ': ' + service.name + ': making ' + slice.length + ' entries request, max is: ' + service.request.max_request_size); - - service.snmp_session.get(slice, function(error, varbinds) { - if(error) { - service.error('Received error = ' + netdata.stringify(error) + ' varbinds = ' + netdata.stringify(varbinds)); - - // make all values null - var len = slice.length; - while(len--) - service.snmp_oids_index[slice[len]].value = null; - } - else { - if(__DEBUG === true) - netdata.debug(service.module.name + ': ' + service.name + ': got valid ' + service.module.name + ' response: ' + netdata.stringify(varbinds)); - - var varbinds_len = varbinds.length; - for(var i = 0; i < varbinds_len ; i++) { - var value = null; - - if(net_snmp.isVarbindError(varbinds[i])) { - if(__DEBUG === true) - netdata.debug(service.module.name + ': ' + service.name + ': failed ' + service.module.name + ' get for OIDs ' + varbinds[i].oid); - - service.error('OID ' + varbinds[i].oid + ' gave error: ' + snmp.varbindError(varbinds[i])); - value = null; - failed++; - } - else { - // test fom Counter64 - // varbinds[i].type = net_snmp.ObjectType.Counter64; - // varbinds[i].value = new Buffer([0x34, 0x49, 0x2e, 0xdc, 0xd1]); - - switch(varbinds[i].type) { - case net_snmp.ObjectType.OctetString: - if(service.snmp_oids_index[varbinds[i].oid].type !== 'title') - // parse floating point values, exposed as strings - value = parseFloat(varbinds[i].value) * 1000; - if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof(varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as float in string)"); - else - // just use the string - value = varbinds[i].value; - if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof(varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as string)"); - break; - - case net_snmp.ObjectType.Counter64: - // copy the buffer - value = '0x' + varbinds[i].value.toString('hex'); - if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof(varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as buffer)"); - break; - - case net_snmp.ObjectType.Integer: - case net_snmp.ObjectType.Counter: - case net_snmp.ObjectType.Gauge: - default: - value = varbinds[i].value; - if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof(varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as number)"); - break; - } - - ok++; - } - - if(value !== null) { - switch(service.snmp_oids_index[varbinds[i].oid].type) { - case 'title': service.snmp_oids_index[varbinds[i].oid].link.title += ' ' + value; break; - case 'name' : service.snmp_oids_index[varbinds[i].oid].link.name = value; break; - case 'value': service.snmp_oids_index[varbinds[i].oid].link.value = value; break; - } - } - } - - if(__DEBUG === true) - netdata.debug(service.module.name + ': ' + service.name + ': finished ' + service.module.name + ' with ' + ok + ' successful and ' + failed + ' failed values'); - } - that.getdata(service, index, ok, failed, callback); - }); - }, - - process: function(service, callback) { - var __DEBUG = netdata.options.DEBUG; - - this.prepare(service); - - if(service.snmp_oids.length === 0) { - // no OIDs found for this service - - if(__DEBUG === true) - service.error('no OIDs to process.'); - - callback(null); - return; - } - - if(typeof service.snmp_session === 'undefined' || service.snmp_session === null) { - // no SNMP session has been created for this service - // the SNMP session is just the initialization of NET-SNMP - - if(__DEBUG === true) - netdata.debug(service.module.name + ': ' + service.name + ': opening ' + this.name + ' session on ' + service.request.hostname + ' community ' + service.request.community + ' options ' + netdata.stringify(service.request.options)); - - // create the SNMP session - service.snmp_session = net_snmp.createSession (service.request.hostname, service.request.community, service.request.options); - - if(__DEBUG === true) - netdata.debug(service.module.name + ': ' + service.name + ': got ' + this.name + ' session: ' + netdata.stringify(service.snmp_session)); - - // if we later need traps, this is how to do it: - //service.snmp_session.trap(net_snmp.TrapType.LinkDown, function(error) { - // if(error) console.error('trap error: ' + netdata.stringify(error)); - //}); - } - - // do it, get the SNMP values for the sessions we need - this.getdata(service, 0, 0, 0, callback); - } -}; - -var snmp = { - name: __filename, - enable_autodetect: true, - update_every: 1, - base_priority: 50000, - - charts: {}, - - processResponse: function(service, data) { - if(data !== null) { - if(service.added !== true) - service.commit(); - - var chart_keys = Object.keys(service.request.charts); - var chart_keys_len = chart_keys.length; - for(var i = 0; i < chart_keys_len; i++) { - var c = chart_keys[i]; - - var chart = snmp.charts[c]; - if(typeof chart === 'undefined') { - chart = service.chart(c, service.request.charts[c]); - snmp.charts[c] = chart; - } - - service.begin(chart); - - var dimensions = service.request.charts[c].dimensions; - var dim_keys = Object.keys(dimensions); - var dim_keys_len = dim_keys.length; - for(var j = 0; j < dim_keys_len ; j++) { - var d = dim_keys[j]; - - if (dimensions[d].value !== null) { - if(typeof dimensions[d].offset === 'number') - service.set(d, dimensions[d].value + dimensions[d].offset); - else - service.set(d, dimensions[d].value); - } - } - - service.end(); - } - } - }, - - // module.serviceExecute() - // this function is called only from this module - // its purpose is to prepare the request and call - // netdata.serviceExecute() - serviceExecute: function(conf) { - var __DEBUG = netdata.options.DEBUG; - - if(__DEBUG === true) - netdata.debug(this.name + ': snmp hostname: ' + conf.hostname + ', update_every: ' + conf.update_every); - - var service = netdata.service({ - name: conf.hostname, - request: conf, - update_every: conf.update_every, - module: this, - processor: netdata.processors.snmp - }); - - // multiply the charts, if required - var chart_keys = Object.keys(service.request.charts); - var chart_keys_len = chart_keys.length; - for( var i = 0; i < chart_keys_len ; i++ ) { - var c = chart_keys[i]; - var service_request_chart = service.request.charts[c]; - - if(__DEBUG === true) - netdata.debug(this.name + ': snmp hostname: ' + conf.hostname + ', examining chart: ' + c); - - if(typeof service_request_chart.update_every === 'undefined') - service_request_chart.update_every = service.update_every; - - if(typeof service_request_chart.multiply_range !== 'undefined') { - var from = service_request_chart.multiply_range[0]; - var to = service_request_chart.multiply_range[1]; - var prio = service_request_chart.priority || 1; - - if(prio < snmp.base_priority) prio += snmp.base_priority; - - while(from <= to) { - var id = c + from.toString(); - var chart = extend(true, {}, service_request_chart); - chart.title += from.toString(); - - if(typeof chart.titleoid !== 'undefined') - chart.titleoid += from.toString(); - - chart.priority = prio++; - - var dim_keys = Object.keys(chart.dimensions); - var dim_keys_len = dim_keys.length; - for(var j = 0; j < dim_keys_len ; j++) { - var d = dim_keys[j]; - - chart.dimensions[d].oid += from.toString(); - - if(typeof chart.dimensions[d].oidname !== 'undefined') - chart.dimensions[d].oidname += from.toString(); - } - service.request.charts[id] = chart; - from++; - } - - delete service.request.charts[c]; - } - else { - if(service.request.charts[c].priority < snmp.base_priority) - service.request.charts[c].priority += snmp.base_priority; - } - } - - service.execute(this.processResponse); - }, - - configure: function(config) { - var added = 0; - - if(typeof config.max_request_size === 'undefined') - config.max_request_size = 50; - - if(typeof(config.servers) !== 'undefined') { - var len = config.servers.length; - while(len--) { - if(typeof config.servers[len].update_every === 'undefined') - config.servers[len].update_every = this.update_every; - - if(typeof config.servers[len].max_request_size === 'undefined') - config.servers[len].max_request_size = config.max_request_size; - - this.serviceExecute(config.servers[len]); - added++; - } - } - - return added; - }, - - // module.update() - // this is called repeatidly to collect data, by calling - // service.execute() - update: function(service, callback) { - service.execute(function(serv, data) { - service.module.processResponse(serv, data); - callback(); - }); - } -}; - -module.exports = snmp; diff --git a/node.d/stiebeleltron.node.js b/node.d/stiebeleltron.node.js deleted file mode 100644 index 77317f2fb..000000000 --- a/node.d/stiebeleltron.node.js +++ /dev/null @@ -1,196 +0,0 @@ -'use strict'; - -// This program will connect to one Stiebel Eltron ISG for heatpump heating -// to get the heat pump metrics. - -// example configuration in netdata/conf.d/node.d/stiebeleltron.conf.md - -var url = require("url"); -var http = require("http"); -var netdata = require("netdata"); - -netdata.debug("loaded " + __filename + " plugin"); - -var stiebeleltron = { - name: "Stiebel Eltron", - enable_autodetect: false, - update_every: 10, - base_priority: 60000, - charts: {}, - pages: {}, - - createBasicDimension: function (id, name, multiplier, divisor) { - return { - id: id, // the unique id of the dimension - name: name, // the name of the dimension - algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm - multiplier: multiplier, // the multiplier - divisor: divisor, // the divisor - hidden: false // is hidden (boolean) - }; - }, - - processResponse: function (service, html) { - if (html === null) return; - - // add the service - service.commit(); - - var page = stiebeleltron.pages[service.name]; - var categories = page.categories; - var categoriesCount = categories.length; - while (categoriesCount--) { - var context = { - html: html, - service: service, - category: categories[categoriesCount], - page: page, - chartDefinition: null, - dimension: null - }; - stiebeleltron.processCategory(context); - - } - }, - - processCategory: function (context) { - var charts = context.category.charts; - var chartCount = charts.length; - while (chartCount--) { - context.chartDefinition = charts[chartCount]; - stiebeleltron.processChart(context); - } - }, - - processChart: function (context) { - var dimensions = context.chartDefinition.dimensions; - var dimensionCount = dimensions.length; - context.service.begin(stiebeleltron.getChartFromContext(context)); - - while (dimensionCount--) { - context.dimension = dimensions[dimensionCount]; - stiebeleltron.processDimension(context); - } - context.service.end(); - }, - - processDimension: function (context) { - var dimension = context.dimension; - var match = new RegExp(dimension.regex).exec(context.html); - if (match === null) return; - var value = match[1].replace(",", "."); - // most values have a single digit by default, which requires the values to be multiplied. can be overridden. - if (stiebeleltron.isDefined(dimension.digits)) { - value *= Math.pow(10, dimension.digits); - } else { - value *= 10; - } - context.service.set(stiebeleltron.getDimensionId(context), value); - }, - - getChartFromContext: function (context) { - var chartId = this.getChartId(context); - var chart = stiebeleltron.charts[chartId]; - if (stiebeleltron.isDefined(chart)) return chart; - - var chartDefinition = context.chartDefinition; - var service = context.service; - var dimensions = {}; - - var dimCount = chartDefinition.dimensions.length; - while (dimCount--) { - var dim = chartDefinition.dimensions[dimCount]; - var multiplier = 1; - var divisor = 10; - if (stiebeleltron.isDefined(dim.digits)) divisor = Math.pow(10, Math.max(0, dim.digits)); - if (stiebeleltron.isDefined(dim.multiplier)) multiplier = dim.multiplier; - if (stiebeleltron.isDefined(dim.divisor)) divisor = dim.divisor; - context.dimension = dim; - var dimId = this.getDimensionId(context); - dimensions[dimId] = this.createBasicDimension(dimId, dim.name, multiplier, divisor); - } - - chart = { - id: chartId, - name: '', - title: chartDefinition.title, - units: chartDefinition.unit, - family: context.category.name, - context: 'stiebeleltron.' + context.category.id + '.' + chartDefinition.id, - type: chartDefinition.type, - priority: stiebeleltron.base_priority + chartDefinition.prio,// the priority relative to others in the same family - update_every: service.update_every, // the expected update frequency of the chart - dimensions: dimensions - }; - chart = service.chart(chartId, chart); - stiebeleltron.charts[chartId] = chart; - - return chart; - }, - - // module.serviceExecute() - // this function is called only from this module - // its purpose is to prepare the request and call - // netdata.serviceExecute() - serviceExecute: function (name, uri, update_every) { - netdata.debug(this.name + ': ' + name + ': url: ' + uri + ', update_every: ' + update_every); - - var service = netdata.service({ - name: name, - request: netdata.requestFromURL(uri), - update_every: update_every, - module: this - }); - service.execute(this.processResponse); - }, - - - configure: function (config) { - if (stiebeleltron.isUndefined(config.pages)) return 0; - var added = 0; - var pageCount = config.pages.length; - while (pageCount--) { - var page = config.pages[pageCount]; - // some validation - if (stiebeleltron.isUndefined(page.categories) || page.categories.length < 1) { - netdata.error("Your Stiebel Eltron config is invalid. Disabling plugin."); - return 0; - } - if (stiebeleltron.isUndefined(page.update_every)) page.update_every = this.update_every; - this.pages[page.name] = page; - this.serviceExecute(page.name, page.url, page.update_every); - added++; - } - return added; - }, - - // module.update() - // this is called repeatedly to collect data, by calling - // netdata.serviceExecute() - update: function (service, callback) { - service.execute(function (serv, data) { - service.module.processResponse(serv, data); - callback(); - }); - }, - - getChartId: function (context) { - return "stiebeleltron_" + context.page.id + - "." + context.category.id + - "." + context.chartDefinition.id; - }, - - getDimensionId: function (context) { - return context.dimension.id; - }, - - isUndefined: function (value) { - return typeof value === 'undefined'; - }, - - isDefined: function (value) { - return typeof value !== 'undefined'; - } -}; - -module.exports = stiebeleltron; diff --git a/plugins.d/Makefile.am b/plugins.d/Makefile.am deleted file mode 100644 index 41e6d5366..000000000 --- a/plugins.d/Makefile.am +++ /dev/null @@ -1,23 +0,0 @@ -# -# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com> -# -MAINTAINERCLEANFILES= $(srcdir)/Makefile.in - -dist_plugins_DATA = \ - README.md \ - $(NULL) - -dist_plugins_SCRIPTS = \ - alarm-email.sh \ - alarm-notify.sh \ - alarm-test.sh \ - cgroup-name.sh \ - cgroup-network-helper.sh \ - charts.d.dryrun-helper.sh \ - charts.d.plugin \ - fping.plugin \ - node.d.plugin \ - python.d.plugin \ - tc-qos-helper.sh \ - loopsleepms.sh.inc \ - $(NULL) diff --git a/plugins.d/Makefile.in b/plugins.d/Makefile.in deleted file mode 100644 index 059d68f6a..000000000 --- a/plugins.d/Makefile.in +++ /dev/null @@ -1,567 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = plugins.d -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_plugins_SCRIPTS) $(dist_plugins_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \ - $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \ - $(top_srcdir)/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \ - $(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pluginsdir)" -SCRIPTS = $(dist_plugins_SCRIPTS) -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_plugins_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ - -# -# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com> -# -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_plugins_DATA = \ - README.md \ - $(NULL) - -dist_plugins_SCRIPTS = \ - alarm-email.sh \ - alarm-notify.sh \ - alarm-test.sh \ - cgroup-name.sh \ - cgroup-network-helper.sh \ - charts.d.dryrun-helper.sh \ - charts.d.plugin \ - fping.plugin \ - node.d.plugin \ - python.d.plugin \ - tc-qos-helper.sh \ - loopsleepms.sh.inc \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu plugins.d/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu plugins.d/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS) - @$(NORMAL_INSTALL) - @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ - done | \ - sed -e 'p;s,.*/,,;n' \ - -e 'h;s|.*|.|' \ - -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ - $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ - { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ - if ($$2 == $$4) { files[d] = files[d] " " $$1; \ - if (++n[d] == $(am__install_max)) { \ - print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ - else { print "f", d "/" $$4, $$1 } } \ - END { for (d in files) print "f", d, files[d] }' | \ - while read type dir files; do \ - if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ - test -z "$$files" || { \ - echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \ - $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \ - } \ - ; done - -uninstall-dist_pluginsSCRIPTS: - @$(NORMAL_UNINSTALL) - @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \ - files=`for p in $$list; do echo "$$p"; done | \ - sed -e 's,.*/,,;$(transform)'`; \ - dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir) -install-dist_pluginsDATA: $(dist_plugins_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_plugins_DATA)'; test -n "$(pluginsdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pluginsdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(pluginsdir)" || exit $$?; \ - done - -uninstall-dist_pluginsDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_plugins_DATA)'; test -n "$(pluginsdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir) -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(SCRIPTS) $(DATA) -installdirs: - for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pluginsdir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: install-dist_pluginsDATA install-dist_pluginsSCRIPTS - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-dist_pluginsDATA uninstall-dist_pluginsSCRIPTS - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dist_pluginsDATA \ - install-dist_pluginsSCRIPTS install-dvi install-dvi-am \ - install-exec install-exec-am install-html install-html-am \ - install-info install-info-am install-man install-pdf \ - install-pdf-am install-ps install-ps-am install-strip \ - installcheck installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am \ - uninstall-dist_pluginsDATA uninstall-dist_pluginsSCRIPTS - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/plugins.d/README.md b/plugins.d/README.md deleted file mode 100644 index 35b9a2d99..000000000 --- a/plugins.d/README.md +++ /dev/null @@ -1,236 +0,0 @@ -netdata plugins -=============== - -Any program that can print a few values to its standard output can become -a netdata plugin. - -There are 5 lines netdata parses. lines starting with: - -- `CHART` - create a new chart -- `DIMENSION` - add a dimension to the chart just created -- `BEGIN` - initialize data collection for a chart -- `SET` - set the value of a dimension for the initialized chart -- `END` - complete data collection for the initialized chart - -a single program can produce any number of charts with any number of dimensions -each. - -charts can also be added any time (not just the beginning). - -### command line parameters - -The plugin should accept just **one** parameter: **the number of seconds it is -expected to update the values for its charts**. The value passed by netdata -to the plugin is controlled via its configuration file (so there is not need -for the plugin to handle this configuration option). - -The script can overwrite the update frequency. For example, the server may -request per second updates, but the script may overwrite this to one update -every 5 seconds. - -### environment variables - -There are a few environment variables that are set by `netdata` and are -available for the plugin to use. - -variable|description -:------:|:---------- -`NETDATA_CONFIG_DIR`|The directory where all netdata related configuration should be stored. If the plugin requires custom configuration, this is the place to save it. -`NETDATA_PLUGINS_DIR`|The directory where all netdata plugins are stored. -`NETDATA_WEB_DIR`|The directory where the web files of netdata are saved. -`NETDATA_CACHE_DIR`|The directory where the cache files of netdata are stored. Use this directory if the plugin requires a place to store data. A new directory should be created for the plugin for this purpose, inside this directory. -`NETDATA_LOG_DIR`|The directory where the log files are stored. By default the `stderr` output of the plugin will be saved in the `error.log` file of netdata. -`NETDATA_HOST_PREFIX`|This is used in environments where system directories like `/sys` and `/proc` have to be accessed at a different path. -`NETDATA_DEBUG_FLAGS`|This is number (probably in hex starting with `0x`), that enables certain netdata debugging features. -`NETDATA_UPDATE_EVERY`|The minimum number of seconds between chart refreshes. This is like the **internal clock** of netdata (it is user configurable, defaulting to `1`). There is no meaning for a plugin to update its values more frequently than this number of seconds. - - -# the output of the plugin - -The plugin should output instructions for netdata to its output (`stdout`). - -## CHART - -`CHART` defines a new chart. - -the template is: - -> CHART type.id name title units [family [category [charttype [priority [update_every]]]]] - - where: - - `type.id` - - uniquely identifies the chart, - this is what will be needed to add values to the chart - - - `name` - - is the name that will be presented to the used for this chart - - - `title` - - the text above the chart - - - `units` - - the label of the vertical axis of the chart, - all dimensions added to a chart should have the same units - of measurement - - - `family` - - is used to group charts together - (for example all eth0 charts should say: eth0), - if empty or missing, the `id` part of `type.id` will be used - - - `category` - - the section under which the chart will appear - (for example mem.ram should appear in the 'system' section), - the special word 'none' means: do not show this chart on the home page, - if empty or missing, the `type` part of `type.id` will be used - - - `charttype` - - one of `line`, `area` or `stacked`, - if empty or missing, the `line` will be used - - - `priority` - - is the relative priority of the charts as rendered on the web page, - lower numbers make the charts appear before the ones with higher numbers, - if empty or missing, `1000` will be used - - - `update_every` - - overwrite the update frequency set by the server, - if empty or missing, the user configured value will be used - - -## DIMENSION - -`DIMENSION` defines a new dimension for the chart - -the template is: - -> DIMENSION id [name [algorithm [multiplier [divisor [hidden]]]]] - - where: - - - `id` - - the `id` of this dimension (it is a text value, not numeric), - this will be needed later to add values to the dimension - - - `name` - - the name of the dimension as it will appear at the legend of the chart, - if empty or missing the `id` will be used - - - `algorithm` - - one of: - - * `absolute` - - the value is to drawn as-is (interpolated to second boundary), - if `algorithm` is empty, invalid or missing, `absolute` is used - - * `incremental` - - the value increases over time, - the difference from the last value is presented in the chart, - the server interpolates the value and calculates a per second figure - - * `percentage-of-absolute-row` - - the % of this value compared to the total of all dimensions - - * `percentage-of-incremental-row` - - the % of this value compared to the incremental total of - all dimensions - - - `multiplier` - - an integer value to multiply the collected value, - if empty or missing, `1` is used - - - `divisor` - - an integer value to divide the collected value, - if empty or missing, `1` is used - - - `hidden` - - giving the keyword `hidden` will make this dimension hidden, - it will take part in the calculations but will not be presented in the chart - - -## data collection - -data collection is defined as a series of `BEGIN` -> `SET` -> `END` lines - -> BEGIN type.id [microseconds] - - - `type.id` - - is the unique identification of the chart (as given in `CHART`) - - - `microseconds` - - is the number of microseconds since the last update of the chart, - it is optional. - - Under heavy system load, the system may have some latency transfering - data from the plugins to netdata via the pipe. This number improves - accuracy significantly, since the plugin is able to calculate the - duration between its iterations better than netdata. - - The first time the plugin is started, no microseconds should be given - to netdata. - -> SET id = value - - - `id` - - is the unique identification of the dimension (of the chart just began) - - - `value` - - is the collected value - -> END - - END does not take any parameters, it commits the collected values to the chart. - -More `SET` lines may appear to update all the dimensions of the chart. -All of them in one `BEGIN` -> `END` block. - -All `SET` lines within a single `BEGIN` -> `END` block have to refer to the -same chart. - -If more charts need to be updated, each chart should have its own -`BEGIN` -> `SET` -> `END` block. - -If, for any reason, a plugin has issued a `BEGIN` but wants to cancel it, -it can issue a `FLUSH`. The `FLUSH` command will instruct netdata to ignore -the last `BEGIN` command. - -If a plugin does not behave properly (outputs invalid lines, or does not -follow these guidelines), will be disabled by netdata. - - -### collected values - -netdata will collect any **signed** value in the 64bit range: -`-9.223.372.036.854.775.808` to `+9.223.372.036.854.775.807` - -Internally, all calculations are made using 128 bit double precision and are -stored in 30 bits as floating point. - -If a value is not collected, leave it empty, like this: - -`SET id = ` - -or do not output the line at all. diff --git a/plugins.d/alarm-email.sh b/plugins.d/alarm-email.sh deleted file mode 100755 index df083c655..000000000 --- a/plugins.d/alarm-email.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env bash - -# OBSOLETE - REPLACED WITH -# alarm-notify.sh - -${0/alarm-email.sh/alarm-notify.sh} "${@}" diff --git a/plugins.d/alarm-notify.sh b/plugins.d/alarm-notify.sh deleted file mode 100755 index 3e23a164f..000000000 --- a/plugins.d/alarm-notify.sh +++ /dev/null @@ -1,1919 +0,0 @@ -#!/usr/bin/env bash - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2017 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ -# -# Script to send alarm notifications for netdata -# -# Features: -# - multiple notification methods -# - multiple roles per alarm -# - multiple recipients per role -# - severity filtering per recipient -# -# Supported notification methods: -# - emails by @ktsaou -# - slack.com notifications by @ktsaou -# - alerta.io notifications by @kattunga -# - discordapp.com notifications by @lowfive -# - pushover.net notifications by @ktsaou -# - pushbullet.com push notifications by Tiago Peralta @tperalta82 #1070 -# - telegram.org notifications by @hashworks #1002 -# - twilio.com notifications by Levi Blaney @shadycuz #1211 -# - kafka notifications by @ktsaou #1342 -# - pagerduty.com notifications by Jim Cooley @jimcooley #1373 -# - messagebird.com notifications by @tech_no_logical #1453 -# - hipchat notifications by @ktsaou #1561 -# - custom notifications by @ktsaou - -# ----------------------------------------------------------------------------- -# testing notifications - -if [ \( "${1}" = "test" -o "${2}" = "test" \) -a "${#}" -le 2 ] -then - if [ "${2}" = "test" ] - then - recipient="${1}" - else - recipient="${2}" - fi - - [ -z "${recipient}" ] && recipient="sysadmin" - - id=1 - last="CLEAR" - for x in "WARNING" "CRITICAL" "CLEAR" - do - echo >&2 - echo >&2 "# SENDING TEST ${x} ALARM TO ROLE: ${recipient}" - - "${0}" "${recipient}" "$(hostname)" 1 1 "${id}" "$(date +%s)" "test_alarm" "test.chart" "test.family" "${x}" "${last}" 100 90 "${0}" 1 $((0 + id)) "units" "this is a test alarm to verify notifications work" "new value" "old value" - if [ $? -ne 0 ] - then - echo >&2 "# FAILED" - else - echo >&2 "# OK" - fi - - last="${x}" - id=$((id + 1)) - done - - exit 1 -fi - -export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin" -export LC_ALL=C - -# ----------------------------------------------------------------------------- - -PROGRAM_NAME="$(basename "${0}")" - -logdate() { - date "+%Y-%m-%d %H:%M:%S" -} - -log() { - local status="${1}" - shift - - echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" - -} - -warning() { - log WARNING "${@}" -} - -error() { - log ERROR "${@}" -} - -info() { - log INFO "${@}" -} - -fatal() { - log FATAL "${@}" - exit 1 -} - -debug=${NETDATA_ALARM_NOTIFY_DEBUG-0} -debug() { - [ "${debug}" = "1" ] && log DEBUG "${@}" -} - -docurl() { - if [ -z "${curl}" ] - then - error "\${curl} is unset." - return 1 - fi - - if [ "${debug}" = "1" ] - then - echo >&2 "--- BEGIN curl command ---" - printf >&2 "%q " ${curl} "${@}" - echo >&2 - echo >&2 "--- END curl command ---" - - local out=$(mktemp /tmp/netdata-health-alarm-notify-XXXXXXXX) - local code=$(${curl} ${curl_options} --write-out %{http_code} --output "${out}" --silent --show-error "${@}") - local ret=$? - echo >&2 "--- BEGIN received response ---" - cat >&2 "${out}" - echo >&2 - echo >&2 "--- END received response ---" - echo >&2 "RECEIVED HTTP RESPONSE CODE: ${code}" - rm "${out}" - echo "${code}" - return ${ret} - fi - - ${curl} ${curl_options} --write-out %{http_code} --output /dev/null --silent --show-error "${@}" - return $? -} - -# ----------------------------------------------------------------------------- -# this is to be overwritten by the config file - -custom_sender() { - info "not sending custom notification for ${status} of '${host}.${chart}.${name}'" -} - - -# ----------------------------------------------------------------------------- - -# check for BASH v4+ (required for associative arrays) -[ $(( ${BASH_VERSINFO[0]} )) -lt 4 ] && \ - fatal "BASH version 4 or later is required (this is ${BASH_VERSION})." - -# ----------------------------------------------------------------------------- -# defaults to allow running this script by hand - -[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata" -[ -z "${NETDATA_CACHE_DIR}" ] && NETDATA_CACHE_DIR="$(dirname "${0}")/../../../../var/cache/netdata" -[ -z "${NETDATA_REGISTRY_URL}" ] && NETDATA_REGISTRY_URL="https://registry.my-netdata.io" - -# ----------------------------------------------------------------------------- -# parse command line parameters - -roles="${1}" # the roles that should be notified for this event -host="${2}" # the host generated this event -unique_id="${3}" # the unique id of this event -alarm_id="${4}" # the unique id of the alarm that generated this event -event_id="${5}" # the incremental id of the event, for this alarm id -when="${6}" # the timestamp this event occurred -name="${7}" # the name of the alarm, as given in netdata health.d entries -chart="${8}" # the name of the chart (type.id) -family="${9}" # the family of the chart -status="${10}" # the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL -old_status="${11}" # the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL -value="${12}" # the current value of the alarm -old_value="${13}" # the previous value of the alarm -src="${14}" # the line number and file the alarm has been configured -duration="${15}" # the duration in seconds of the previous alarm state -non_clear_duration="${16}" # the total duration in seconds this is/was non-clear -units="${17}" # the units of the value -info="${18}" # a short description of the alarm -value_string="${19}" # friendly value (with units) -old_value_string="${20}" # friendly old value (with units) - -# ----------------------------------------------------------------------------- -# find a suitable hostname to use, if netdata did not supply a hostname - -this_host=$(hostname -s 2>/dev/null) -[ -z "${host}" ] && host="${this_host}" - -# ----------------------------------------------------------------------------- -# screen statuses we don't need to send a notification - -# don't do anything if this is not WARNING, CRITICAL or CLEAR -if [ "${status}" != "WARNING" -a "${status}" != "CRITICAL" -a "${status}" != "CLEAR" ] -then - info "not sending notification for ${status} of '${host}.${chart}.${name}'" - exit 1 -fi - -# don't do anything if this is CLEAR, but it was not WARNING or CRITICAL -if [ "${old_status}" != "WARNING" -a "${old_status}" != "CRITICAL" -a "${status}" = "CLEAR" ] -then - info "not sending notification for ${status} of '${host}.${chart}.${name}' (last status was ${old_status})" - exit 1 -fi - -# ----------------------------------------------------------------------------- -# load configuration - -# By default fetch images from the global public registry. -# This is required by default, since all notification methods need to download -# images via the Internet, and private registries might not be reachable. -# This can be overwritten at the configuration file. -images_base_url="https://registry.my-netdata.io" - -# curl options to use -curl_options= - -# needed commands -# if empty they will be searched in the system path -curl= -sendmail= - -# enable / disable features -SEND_SLACK="YES" -SEND_ALERTA="YES" -SEND_FLOCK="YES" -SEND_DISCORD="YES" -SEND_PUSHOVER="YES" -SEND_TWILIO="YES" -SEND_HIPCHAT="YES" -SEND_MESSAGEBIRD="YES" -SEND_KAVENEGAR="YES" -SEND_TELEGRAM="YES" -SEND_EMAIL="YES" -SEND_PUSHBULLET="YES" -SEND_KAFKA="YES" -SEND_PD="YES" -SEND_IRC="YES" -SEND_CUSTOM="YES" - -# slack configs -SLACK_WEBHOOK_URL= -DEFAULT_RECIPIENT_SLACK= -declare -A role_recipients_slack=() - -# alerta configs -ALERTA_WEBHOOK_URL= -ALERTA_API_KEY= -DEFAULT_RECIPIENT_ALERTA= -declare -A role_recipients_alerta=() - -# flock configs -FLOCK_WEBHOOK_URL= -DEFAULT_RECIPIENT_FLOCK= -declare -A role_recipients_flock=() - -# discord configs -DISCORD_WEBHOOK_URL= -DEFAULT_RECIPIENT_DISCORD= -declare -A role_recipients_discord=() - -# pushover configs -PUSHOVER_APP_TOKEN= -DEFAULT_RECIPIENT_PUSHOVER= -declare -A role_recipients_pushover=() - -# pushbullet configs -PUSHBULLET_ACCESS_TOKEN= -PUSHBULLET_SOURCE_DEVICE= -DEFAULT_RECIPIENT_PUSHBULLET= -declare -A role_recipients_pushbullet=() - -# twilio configs -TWILIO_ACCOUNT_SID= -TWILIO_ACCOUNT_TOKEN= -TWILIO_NUMBER= -DEFAULT_RECIPIENT_TWILIO= -declare -A role_recipients_twilio=() - -# hipchat configs -HIPCHAT_SERVER= -HIPCHAT_AUTH_TOKEN= -DEFAULT_RECIPIENT_HIPCHAT= -declare -A role_recipients_hipchat=() - -# messagebird configs -MESSAGEBIRD_ACCESS_KEY= -MESSAGEBIRD_NUMBER= -DEFAULT_RECIPIENT_MESSAGEBIRD= -declare -A role_recipients_messagebird=() - -# kavenegar configs -KAVENEGAR_API_KEY="" -KAVENEGAR_SENDER="" -DEFAULT_RECIPIENT_KAVENEGAR=() -declare -A role_recipients_kavenegar="" - -# telegram configs -TELEGRAM_BOT_TOKEN= -DEFAULT_RECIPIENT_TELEGRAM= -declare -A role_recipients_telegram=() - -# kafka configs -KAFKA_URL= -KAFKA_SENDER_IP= - -# pagerduty.com configs -PD_SERVICE_KEY= -DEFAULT_RECIPIENT_PD= -declare -A role_recipients_pd=() - -# custom configs -DEFAULT_RECIPIENT_CUSTOM= -declare -A role_recipients_custom=() - -# email configs -EMAIL_SENDER= -DEFAULT_RECIPIENT_EMAIL="root" -EMAIL_CHARSET=$(locale charmap 2>/dev/null) -declare -A role_recipients_email=() - -# irc configs -IRC_NICKNAME= -IRC_REALNAME= -DEFAULT_RECIPIENT_IRC= -IRC_NETWORK= -declare -A role_recipients_irc=() - -# load the user configuration -# this will overwrite the variables above -if [ -f "${NETDATA_CONFIG_DIR}/health_alarm_notify.conf" ] - then - source "${NETDATA_CONFIG_DIR}/health_alarm_notify.conf" -else - error "Cannot find file ${NETDATA_CONFIG_DIR}/health_alarm_notify.conf. Using internal defaults." -fi - -# If we didn't autodetect the character set for e-mail and it wasn't -# set by the user, we need to set it to a reasonable default. UTF-8 -# should be correct for almost all modern UNIX systems. -if [ -z ${EMAIL_CHARSET} ] - then - EMAIL_CHARSET="UTF-8" -fi - -# ----------------------------------------------------------------------------- -# filter a recipient based on alarm event severity - -filter_recipient_by_criticality() { - local method="${1}" x="${2}" r s - shift - - r="${x/|*/}" # the recipient - s="${x/*|/}" # the severity required for notifying this recipient - - # no severity filtering for this person - [ "${r}" = "${s}" ] && return 0 - - # the severity is invalid - s="${s^^}" - if [ "${s}" != "CRITICAL" ] - then - error "SEVERITY FILTERING for ${x} VIA ${method}: invalid severity '${s,,}', only 'critical' is supported." - return 0 - fi - - # create the status tracking directory for this user - [ ! -d "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}" ] && \ - mkdir -p "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}" - - case "${status}" in - CRITICAL) - # make sure he will get future notifications for this alarm too - touch "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" - debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: the alarm is CRITICAL (will now receive next status change)" - return 0 - ;; - - WARNING) - if [ -f "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" ] - then - # we do not remove the file, so that he will get future notifications of this alarm - debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: recipient has been notified for this alarm in the past (will still receive next status change)" - return 0 - fi - ;; - - *) - if [ -f "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" ] - then - # remove the file, so that he will only receive notifications for CRITICAL states for this alarm - rm "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" - debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: recipient has been notified for this alarm (will only receive CRITICAL notifications from now on)" - return 0 - fi - ;; - esac - - debug "SEVERITY FILTERING for ${x} VIA ${method}: BLOCK: recipient should not receive this notification" - return 1 -} - -# ----------------------------------------------------------------------------- -# find the recipients' addresses per method - -declare -A arr_slack=() -declare -A arr_alerta=() -declare -A arr_flock=() -declare -A arr_discord=() -declare -A arr_pushover=() -declare -A arr_pushbullet=() -declare -A arr_twilio=() -declare -A arr_hipchat=() -declare -A arr_telegram=() -declare -A arr_pd=() -declare -A arr_email=() -declare -A arr_custom=() -declare -A arr_messagebird=() -declare -A arr_kavenegar=() -declare -A arr_irc=() - -# netdata may call us with multiple roles, and roles may have multiple but -# overlapping recipients - so, here we find the unique recipients. -for x in ${roles//,/ } -do - # the roles 'silent' and 'disabled' mean: - # don't send a notification for this role - [ "${x}" = "silent" -o "${x}" = "disabled" ] && continue - - # email - a="${role_recipients_email[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_EMAIL}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality email "${r}" && arr_email[${r/|*/}]="1" - done - - # pushover - a="${role_recipients_pushover[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_PUSHOVER}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality pushover "${r}" && arr_pushover[${r/|*/}]="1" - done - - # pushbullet - a="${role_recipients_pushbullet[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_PUSHBULLET}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality pushbullet "${r}" && arr_pushbullet[${r/|*/}]="1" - done - - # twilio - a="${role_recipients_twilio[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_TWILIO}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality twilio "${r}" && arr_twilio[${r/|*/}]="1" - done - - # hipchat - a="${role_recipients_hipchat[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_HIPCHAT}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality hipchat "${r}" && arr_hipchat[${r/|*/}]="1" - done - - # messagebird - a="${role_recipients_messagebird[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_MESSAGEBIRD}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality messagebird "${r}" && arr_messagebird[${r/|*/}]="1" - done - - # kavenegar - a="${role_recipients_kavenegar[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_KAVENEGAR}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality kavenegar "${r}" && arr_kavenegar[${r/|*/}]="1" - done - - # telegram - a="${role_recipients_telegram[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_TELEGRAM}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality telegram "${r}" && arr_telegram[${r/|*/}]="1" - done - - # slack - a="${role_recipients_slack[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_SLACK}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality slack "${r}" && arr_slack[${r/|*/}]="1" - done - - # alerta - a="${role_recipients_alerta[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_ALERTA}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality alerta "${r}" && arr_alerta[${r/|*/}]="1" - done - - # flock - a="${role_recipients_flock[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_FLOCK}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality flock "${r}" && arr_flock[${r/|*/}]="1" - done - - # discord - a="${role_recipients_discord[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_DISCORD}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality discord "${r}" && arr_discord[${r/|*/}]="1" - done - - # pagerduty.com - a="${role_recipients_pd[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_PD}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality pd "${r}" && arr_pd[${r/|*/}]="1" - done - - # irc - a="${role_recipients_irc[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_IRC}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality irc "${r}" && arr_irc[${r/|*/}]="1" - done - - # custom - a="${role_recipients_custom[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_CUSTOM}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality custom "${r}" && arr_custom[${r/|*/}]="1" - done - -done - -# build the list of slack recipients (channels) -to_slack="${!arr_slack[*]}" -[ -z "${to_slack}" ] && SEND_SLACK="NO" - -# build the list of alerta recipients (channels) -to_alerta="${!arr_alerta[*]}" -[ -z "${to_alerta}" ] && SEND_ALERTA="NO" - -# build the list of flock recipients (channels) -to_flock="${!arr_flock[*]}" -[ -z "${to_flock}" ] && SEND_FLOCK="NO" - -# build the list of discord recipients (channels) -to_discord="${!arr_discord[*]}" -[ -z "${to_discord}" ] && SEND_DISCORD="NO" - -# build the list of pushover recipients (user tokens) -to_pushover="${!arr_pushover[*]}" -[ -z "${to_pushover}" ] && SEND_PUSHOVER="NO" - -# build the list of pushbulet recipients (user tokens) -to_pushbullet="${!arr_pushbullet[*]}" -[ -z "${to_pushbullet}" ] && SEND_PUSHBULLET="NO" - -# build the list of twilio recipients (phone numbers) -to_twilio="${!arr_twilio[*]}" -[ -z "${to_twilio}" ] && SEND_TWILIO="NO" - -# build the list of hipchat recipients (rooms) -to_hipchat="${!arr_hipchat[*]}" -[ -z "${to_hipchat}" ] && SEND_HIPCHAT="NO" - -# build the list of messagebird recipients (phone numbers) -to_messagebird="${!arr_messagebird[*]}" -[ -z "${to_messagebird}" ] && SEND_MESSAGEBIRD="NO" - -# build the list of kavenegar recipients (phone numbers) -to_kavenegar="${!arr_kavenegar[*]}" -[ -z "${to_kavenegar}" ] && SEND_KAVENEGAR="NO" - -# check array of telegram recipients (chat ids) -to_telegram="${!arr_telegram[*]}" -[ -z "${to_telegram}" ] && SEND_TELEGRAM="NO" - -# build the list of pagerduty recipients (service keys) -to_pd="${!arr_pd[*]}" -[ -z "${to_pd}" ] && SEND_PD="NO" - -# build the list of custom recipients -to_custom="${!arr_custom[*]}" -[ -z "${to_custom}" ] && SEND_CUSTOM="NO" - -# build the list of email recipients (email addresses) -to_email= -for x in "${!arr_email[@]}" -do - [ ! -z "${to_email}" ] && to_email="${to_email}, " - to_email="${to_email}${x}" -done -[ -z "${to_email}" ] && SEND_EMAIL="NO" - -# build the list of irc recipients (channels) -to_irc="${!arr_irc[*]}" -[ -z "${to_irc}" ] && SEND_IRC="NO" - -# ----------------------------------------------------------------------------- -# verify the delivery methods supported - -# check slack -[ -z "${SLACK_WEBHOOK_URL}" ] && SEND_SLACK="NO" - -# check alerta -[ -z "${ALERTA_WEBHOOK_URL}" ] && SEND_ALERTA="NO" - -# check flock -[ -z "${FLOCK_WEBHOOK_URL}" ] && SEND_FLOCK="NO" - -# check discord -[ -z "${DISCORD_WEBHOOK_URL}" ] && SEND_DISCORD="NO" - -# check pushover -[ -z "${PUSHOVER_APP_TOKEN}" ] && SEND_PUSHOVER="NO" - -# check pushbullet -[ -z "${PUSHBULLET_ACCESS_TOKEN}" ] && SEND_PUSHBULLET="NO" - -# check twilio -[ -z "${TWILIO_ACCOUNT_TOKEN}" -o -z "${TWILIO_ACCOUNT_SID}" -o -z "${TWILIO_NUMBER}" ] && SEND_TWILIO="NO" - -# check hipchat -[ -z "${HIPCHAT_AUTH_TOKEN}" ] && SEND_HIPCHAT="NO" - -# check messagebird -[ -z "${MESSAGEBIRD_ACCESS_KEY}" -o -z "${MESSAGEBIRD_NUMBER}" ] && SEND_MESSAGEBIRD="NO" - -# check kavenegar -[ -z "${KAVENEGAR_API_KEY}" -o -z "${KAVENEGAR_SENDER}" ] && SEND_KAVENEGAR="NO" - -# check telegram -[ -z "${TELEGRAM_BOT_TOKEN}" ] && SEND_TELEGRAM="NO" - -# check kafka -[ -z "${KAFKA_URL}" -o -z "${KAFKA_SENDER_IP}" ] && SEND_KAFKA="NO" - -# check irc -[ -z "${IRC_NETWORK}" ] && SEND_IRC="NO" - -# check pagerduty.com -# if we need pd-send, check for the pd-send command -# https://www.pagerduty.com/docs/guides/agent-install-guide/ -if [ "${SEND_PD}" = "YES" ] - then - pd_send="$(which pd-send 2>/dev/null || command -v pd-send 2>/dev/null)" - if [ -z "${pd_send}" ] - then - error "Cannot find pd-send command in the system path. Disabling pagerduty.com notifications." - SEND_PD="NO" - fi -fi - -# if we need curl, check for the curl command -if [ \( \ - "${SEND_PUSHOVER}" = "YES" \ - -o "${SEND_SLACK}" = "YES" \ - -o "${SEND_ALERTA}" = "YES" \ - -o "${SEND_FLOCK}" = "YES" \ - -o "${SEND_DISCORD}" = "YES" \ - -o "${SEND_HIPCHAT}" = "YES" \ - -o "${SEND_TWILIO}" = "YES" \ - -o "${SEND_MESSAGEBIRD}" = "YES" \ - -o "${SEND_KAVENEGAR}" = "YES" \ - -o "${SEND_TELEGRAM}" = "YES" \ - -o "${SEND_PUSHBULLET}" = "YES" \ - -o "${SEND_KAFKA}" = "YES" \ - -o "${SEND_CUSTOM}" = "YES" \ - \) -a -z "${curl}" ] - then - curl="$(which curl 2>/dev/null || command -v curl 2>/dev/null)" - if [ -z "${curl}" ] - then - error "Cannot find curl command in the system path. Disabling all curl based notifications." - SEND_PUSHOVER="NO" - SEND_PUSHBULLET="NO" - SEND_TELEGRAM="NO" - SEND_SLACK="NO" - SEND_ALERTA="NO" - SEND_FLOCK="NO" - SEND_DISCORD="NO" - SEND_TWILIO="NO" - SEND_HIPCHAT="NO" - SEND_MESSAGEBIRD="NO" - SEND_KAVENEGAR="NO" - SEND_KAFKA="NO" - SEND_CUSTOM="NO" - fi -fi - -# if we need sendmail, check for the sendmail command -if [ "${SEND_EMAIL}" = "YES" -a -z "${sendmail}" ] - then - sendmail="$(which sendmail 2>/dev/null || command -v sendmail 2>/dev/null)" - if [ -z "${sendmail}" ] - then - debug "Cannot find sendmail command in the system path. Disabling email notifications." - SEND_EMAIL="NO" - fi -fi - -# check that we have at least a method enabled -if [ "${SEND_EMAIL}" != "YES" \ - -a "${SEND_PUSHOVER}" != "YES" \ - -a "${SEND_TELEGRAM}" != "YES" \ - -a "${SEND_SLACK}" != "YES" \ - -a "${SEND_ALERTA}" != "YES" \ - -a "${SEND_FLOCK}" != "YES" \ - -a "${SEND_DISCORD}" != "YES" \ - -a "${SEND_TWILIO}" != "YES" \ - -a "${SEND_HIPCHAT}" != "YES" \ - -a "${SEND_MESSAGEBIRD}" != "YES" \ - -a "${SEND_KAVENEGAR}" != "YES" \ - -a "${SEND_PUSHBULLET}" != "YES" \ - -a "${SEND_KAFKA}" != "YES" \ - -a "${SEND_PD}" != "YES" \ - -a "${SEND_CUSTOM}" != "YES" \ - -a "${SEND_IRC}" != "YES" \ - ] - then - fatal "All notification methods are disabled. Not sending notification for host '${host}', chart '${chart}' to '${roles}' for '${name}' = '${value}' for status '${status}'." -fi - -# ----------------------------------------------------------------------------- -# get the date the alarm happened - -date="$(date --date=@${when} 2>/dev/null)" -[ -z "${date}" ] && date="$(date 2>/dev/null)" - -# ----------------------------------------------------------------------------- -# function to URL encode a string - -urlencode() { - local string="${1}" strlen encoded pos c o - - strlen=${#string} - for (( pos=0 ; pos<strlen ; pos++ )) - do - c=${string:${pos}:1} - case "${c}" in - [-_.~a-zA-Z0-9]) - o="${c}" - ;; - - *) - printf -v o '%%%02x' "'${c}" - ;; - esac - encoded+="${o}" - done - - REPLY="${encoded}" - echo "${REPLY}" -} - -# ----------------------------------------------------------------------------- -# function to convert a duration in seconds, to a human readable duration -# using DAYS, MINUTES, SECONDS - -duration4human() { - local s="${1}" d=0 h=0 m=0 ds="day" hs="hour" ms="minute" ss="second" ret - d=$(( s / 86400 )) - s=$(( s - (d * 86400) )) - h=$(( s / 3600 )) - s=$(( s - (h * 3600) )) - m=$(( s / 60 )) - s=$(( s - (m * 60) )) - - if [ ${d} -gt 0 ] - then - [ ${m} -ge 30 ] && h=$(( h + 1 )) - [ ${d} -gt 1 ] && ds="days" - [ ${h} -gt 1 ] && hs="hours" - if [ ${h} -gt 0 ] - then - ret="${d} ${ds} and ${h} ${hs}" - else - ret="${d} ${ds}" - fi - elif [ ${h} -gt 0 ] - then - [ ${s} -ge 30 ] && m=$(( m + 1 )) - [ ${h} -gt 1 ] && hs="hours" - [ ${m} -gt 1 ] && ms="minutes" - if [ ${m} -gt 0 ] - then - ret="${h} ${hs} and ${m} ${ms}" - else - ret="${h} ${hs}" - fi - elif [ ${m} -gt 0 ] - then - [ ${m} -gt 1 ] && ms="minutes" - [ ${s} -gt 1 ] && ss="seconds" - if [ ${s} -gt 0 ] - then - ret="${m} ${ms} and ${s} ${ss}" - else - ret="${m} ${ms}" - fi - else - [ ${s} -gt 1 ] && ss="seconds" - ret="${s} ${ss}" - fi - - REPLY="${ret}" - echo "${REPLY}" -} - -# ----------------------------------------------------------------------------- -# email sender - -send_email() { - local ret= opts= - if [ "${SEND_EMAIL}" = "YES" ] - then - - if [ ! -z "${EMAIL_SENDER}" ] - then - if [[ "${EMAIL_SENDER}" =~ \".*\"\ \<.*\> ]] - then - # the name includes single quotes - opts=" -f $(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1) -F $(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)" - elif [[ "${EMAIL_SENDER}" =~ \'.*\'\ \<.*\> ]] - then - # the name includes double quotes - opts=" -f $(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1) -F $(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)" - elif [[ "${EMAIL_SENDER}" =~ .*\ \<.*\> ]] - then - # the name does not have any quotes - opts=" -f $(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1) -F '$(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)'" - else - # no name at all - opts=" -f ${EMAIL_SENDER}" - fi - fi - - if [ "${debug}" = "1" ] - then - echo >&2 "--- BEGIN sendmail command ---" - printf >&2 "%q " "${sendmail}" -t ${opts} - echo >&2 - echo >&2 "--- END sendmail command ---" - fi - - "${sendmail}" -t ${opts} - ret=$? - - if [ ${ret} -eq 0 ] - then - info "sent email notification for: ${host} ${chart}.${name} is ${status} to '${to_email}'" - return 0 - else - error "failed to send email notification for: ${host} ${chart}.${name} is ${status} to '${to_email}' with error code ${ret}." - return 1 - fi - fi - - return 1 -} - -# ----------------------------------------------------------------------------- -# pushover sender - -send_pushover() { - local apptoken="${1}" usertokens="${2}" when="${3}" url="${4}" status="${5}" title="${6}" message="${7}" httpcode sent=0 user priority - - if [ "${SEND_PUSHOVER}" = "YES" -a ! -z "${apptoken}" -a ! -z "${usertokens}" -a ! -z "${title}" -a ! -z "${message}" ] - then - - # https://pushover.net/api - priority=-2 - case "${status}" in - CLEAR) priority=-1;; # low priority: no sound or vibration - WARNING) priority=0;; # normal priority: respect quiet hours - CRITICAL) priority=1;; # high priority: bypass quiet hours - *) priority=-2;; # lowest priority: no notification at all - esac - - for user in ${usertokens} - do - httpcode=$(docurl \ - --form-string "token=${apptoken}" \ - --form-string "user=${user}" \ - --form-string "html=1" \ - --form-string "title=${title}" \ - --form-string "message=${message}" \ - --form-string "timestamp=${when}" \ - --form-string "url=${url}" \ - --form-string "url_title=Open netdata dashboard to view the alarm" \ - --form-string "priority=${priority}" \ - https://api.pushover.net/1/messages.json) - - if [ "${httpcode}" = "200" ] - then - info "sent pushover notification for: ${host} ${chart}.${name} is ${status} to '${user}'" - sent=$((sent + 1)) - else - error "failed to send pushover notification for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}." - fi - done - - [ ${sent} -gt 0 ] && return 0 - fi - - return 1 -} - -# ----------------------------------------------------------------------------- -# pushbullet sender - -send_pushbullet() { - local userapikey="${1}" source_device="${2}" recipients="${3}" url="${4}" title="${5}" message="${6}" httpcode sent=0 user - if [ "${SEND_PUSHBULLET}" = "YES" -a ! -z "${userapikey}" -a ! -z "${recipients}" -a ! -z "${message}" -a ! -z "${title}" ] - then - #https://docs.pushbullet.com/#create-push - for user in ${recipients} - do - httpcode=$(docurl \ - --header 'Access-Token: '${userapikey}'' \ - --header 'Content-Type: application/json' \ - --data-binary @<(cat <<EOF - {"title": "${title}", - "type": "link", - "email": "${user}", - "body": "$( echo -n ${message})", - "url": "${url}", - "source_device_iden": "${source_device}"} -EOF - ) "https://api.pushbullet.com/v2/pushes" -X POST) - - if [ "${httpcode}" = "200" ] - then - info "sent pushbullet notification for: ${host} ${chart}.${name} is ${status} to '${user}'" - sent=$((sent + 1)) - else - error "failed to send pushbullet notification for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}." - fi - done - - [ ${sent} -gt 0 ] && return 0 - fi - - return 1 -} - -# ----------------------------------------------------------------------------- -# kafka sender - -send_kafka() { - local httpcode sent=0 - if [ "${SEND_KAFKA}" = "YES" ] - then - httpcode=$(docurl -X POST \ - --data "{host_ip:\"${KAFKA_SENDER_IP}\",when:${when},name:\"${name}\",chart:\"${chart}\",family:\"${family}\",status:\"${status}\",old_status:\"${old_status}\",value:${value},old_value:${old_value},duration:${duration},non_clear_duration:${non_clear_duration},units:\"${units}\",info:\"${info}\"}" \ - "${KAFKA_URL}") - - if [ "${httpcode}" = "204" ] - then - info "sent kafka data for: ${host} ${chart}.${name} is ${status} and ip '${KAFKA_SENDER_IP}'" - sent=$((sent + 1)) - else - error "failed to send kafka data for: ${host} ${chart}.${name} is ${status} and ip '${KAFKA_SENDER_IP}' with HTTP error code ${httpcode}." - fi - - [ ${sent} -gt 0 ] && return 0 - fi - - return 1 -} - -# ----------------------------------------------------------------------------- -# pagerduty.com sender - -send_pd() { - local recipients="${1}" sent=0 - unset t - case ${status} in - CLEAR) t='resolve';; - WARNING) t='trigger';; - CRITICAL) t='trigger';; - esac - - if [ ${SEND_PD} = "YES" -a ! -z "${t}" ] - then - for PD_SERVICE_KEY in ${recipients} - do - d="${status} ${name} = ${value_string} - ${host}, ${family}" - ${pd_send} -k ${PD_SERVICE_KEY} \ - -t ${t} \ - -d "${d}" \ - -i ${host}:${chart}:${name} \ - -f 'info'="${info}" \ - -f 'value_w_units'="${value_string}" \ - -f 'when'="${when}" \ - -f 'duration'="${duration}" \ - -f 'roles'="${roles}" \ - -f 'host'="${host}" \ - -f 'unique_id'="${unique_id}" \ - -f 'alarm_id'="${alarm_id}" \ - -f 'event_id'="${event_id}" \ - -f 'name'="${name}" \ - -f 'chart'="${chart}" \ - -f 'family'="${family}" \ - -f 'status'="${status}" \ - -f 'old_status'="${old_status}" \ - -f 'value'="${value}" \ - -f 'old_value'="${old_value}" \ - -f 'src'="${src}" \ - -f 'non_clear_duration'="${non_clear_duration}" \ - -f 'units'="${units}" - retval=$? - if [ ${retval} -eq 0 ] - then - info "sent pagerduty.com notification for host ${host} ${chart}.${name} using service key ${PD_SERVICE_KEY::-26}....: ${d}" - sent=$((sent + 1)) - else - error "failed to send pagerduty.com notification for ${host} ${chart}.${name} using service key ${PD_SERVICE_KEY::-26}.... (error code ${retval}): ${d}" - fi - done - - [ ${sent} -gt 0 ] && return 0 - fi - - return 1 -} - -# ----------------------------------------------------------------------------- -# twilio sender - -send_twilio() { - local accountsid="${1}" accounttoken="${2}" twilionumber="${3}" recipients="${4}" title="${5}" message="${6}" httpcode sent=0 user - if [ "${SEND_TWILIO}" = "YES" -a ! -z "${accountsid}" -a ! -z "${accounttoken}" -a ! -z "${twilionumber}" -a ! -z "${recipients}" -a ! -z "${message}" -a ! -z "${title}" ] - then - #https://www.twilio.com/packages/labs/code/bash/twilio-sms - for user in ${recipients} - do - httpcode=$(docurl -X POST \ - --data-urlencode "From=${twilionumber}" \ - --data-urlencode "To=${user}" \ - --data-urlencode "Body=${title} ${message}" \ - -u "${accountsid}:${accounttoken}" \ - "https://api.twilio.com/2010-04-01/Accounts/${accountsid}/Messages.json") - - if [ "${httpcode}" = "201" ] - then - info "sent Twilio SMS for: ${host} ${chart}.${name} is ${status} to '${user}'" - sent=$((sent + 1)) - else - error "failed to send Twilio SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}." - fi - done - - [ ${sent} -gt 0 ] && return 0 - fi - - return 1 -} - - -# ----------------------------------------------------------------------------- -# hipchat sender - -send_hipchat() { - local authtoken="${1}" recipients="${2}" message="${3}" httpcode sent=0 room color sender msg_format notify - - # remove <small></small> from the message - message="${message//<small>/}" - message="${message//<\/small>/}" - - if [ "${SEND_HIPCHAT}" = "YES" -a ! -z "${HIPCHAT_SERVER}" -a ! -z "${authtoken}" -a ! -z "${recipients}" -a ! -z "${message}" ] - then - # A label to be shown in addition to the sender's name - # Valid length range: 0 - 64. - sender="netdata" - - # Valid values: html, text. - # Defaults to 'html'. - msg_format="html" - - # Background color for message. Valid values: yellow, green, red, purple, gray, random. Defaults to 'yellow'. - case "${status}" in - WARNING) color="yellow" ;; - CRITICAL) color="red" ;; - CLEAR) color="green" ;; - *) color="gray" ;; - esac - - # Whether this message should trigger a user notification (change the tab color, play a sound, notify mobile phones, etc). - # Each recipient's notification preferences are taken into account. - # Defaults to false. - notify="true" - - for room in ${recipients} - do - httpcode=$(docurl -X POST \ - -H "Content-type: application/json" \ - -H "Authorization: Bearer ${authtoken}" \ - -d "{\"color\": \"${color}\", \"from\": \"${host}\", \"message_format\": \"${msg_format}\", \"message\": \"${message}\", \"notify\": \"${notify}\"}" \ - "https://${HIPCHAT_SERVER}/v2/room/${room}/notification") - - if [ "${httpcode}" = "204" ] - then - info "sent HipChat notification for: ${host} ${chart}.${name} is ${status} to '${room}'" - sent=$((sent + 1)) - else - error "failed to send HipChat notification for: ${host} ${chart}.${name} is ${status} to '${room}' with HTTP error code ${httpcode}." - fi - done - - [ ${sent} -gt 0 ] && return 0 - fi - - return 1 -} - - -# ----------------------------------------------------------------------------- -# messagebird sender - -send_messagebird() { - local accesskey="${1}" messagebirdnumber="${2}" recipients="${3}" title="${4}" message="${5}" httpcode sent=0 user - if [ "${SEND_MESSAGEBIRD}" = "YES" -a ! -z "${accesskey}" -a ! -z "${messagebirdnumber}" -a ! -z "${recipients}" -a ! -z "${message}" -a ! -z "${title}" ] - then - #https://developers.messagebird.com/docs/messaging - for user in ${recipients} - do - httpcode=$(docurl -X POST \ - --data-urlencode "originator=${messagebirdnumber}" \ - --data-urlencode "recipients=${user}" \ - --data-urlencode "body=${title} ${message}" \ - --data-urlencode "datacoding=auto" \ - -H "Authorization: AccessKey ${accesskey}" \ - "https://rest.messagebird.com/messages") - - if [ "${httpcode}" = "201" ] - then - info "sent Messagebird SMS for: ${host} ${chart}.${name} is ${status} to '${user}'" - sent=$((sent + 1)) - else - error "failed to send Messagebird SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}." - fi - done - - [ ${sent} -gt 0 ] && return 0 - fi - - return 1 -} - -# ----------------------------------------------------------------------------- -# kavenegar sender - -send_kavenegar() { - local API_KEY="${1}" kavenegarsender="${2}" recipients="${3}" title="${4}" message="${5}" httpcode sent=0 user - if [ "${SEND_KAVENEGAR}" = "YES" -a ! -z "${API_KEY}" -a ! -z "${kavenegarsender}" -a ! -z "${recipients}" -a ! -z "${message}" -a ! -z "${title}" ] - then - # http://api.kavenegar.com/v1/{API-KEY}/sms/send.json - for user in ${recipients} - do - httpcode=$(docurl -X POST http://api.kavenegar.com/v1/${API_KEY}/sms/send.json \ - --data-urlencode "sender=${kavenegarsender}" \ - --data-urlencode "receptor=${user}" \ - --data-urlencode "message=${title} ${message}") - - if [ "${httpcode}" = "201" ] - then - info "sent Kavenegar SMS for: ${host} ${chart}.${name} is ${status} to '${user}'" - sent=$((sent + 1)) - else - error "failed to send Kavenegar SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}." - fi - done - - [ ${sent} -gt 0 ] && return 0 - fi - - return 1 -} - -# ----------------------------------------------------------------------------- -# telegram sender - -send_telegram() { - local bottoken="${1}" chatids="${2}" message="${3}" httpcode sent=0 chatid emoji disableNotification="" - - if [ "${status}" = "CLEAR" ]; then disableNotification="--data-urlencode disable_notification=true"; fi - - case "${status}" in - WARNING) emoji="⚠️" ;; - CRITICAL) emoji="🔴" ;; - CLEAR) emoji="✅" ;; - *) emoji="⚪️" ;; - esac - - if [ "${SEND_TELEGRAM}" = "YES" -a ! -z "${bottoken}" -a ! -z "${chatids}" -a ! -z "${message}" ]; - then - for chatid in ${chatids} - do - # https://core.telegram.org/bots/api#sendmessage - httpcode=$(docurl ${disableNotification} \ - --data-urlencode "parse_mode=HTML" \ - --data-urlencode "disable_web_page_preview=true" \ - --data-urlencode "text=${emoji} ${message}" \ - "https://api.telegram.org/bot${bottoken}/sendMessage?chat_id=${chatid}") - - if [ "${httpcode}" = "200" ] - then - info "sent telegram notification for: ${host} ${chart}.${name} is ${status} to '${chatid}'" - sent=$((sent + 1)) - elif [ "${httpcode}" = "401" ] - then - error "failed to send telegram notification for: ${host} ${chart}.${name} is ${status} to '${chatid}': Wrong bot token." - else - error "failed to send telegram notification for: ${host} ${chart}.${name} is ${status} to '${chatid}' with HTTP error code ${httpcode}." - fi - done - - [ ${sent} -gt 0 ] && return 0 - fi - - return 1 -} - -# ----------------------------------------------------------------------------- -# slack sender - -send_slack() { - local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload - - [ "${SEND_SLACK}" != "YES" ] && return 1 - - case "${status}" in - WARNING) color="warning" ;; - CRITICAL) color="danger" ;; - CLEAR) color="good" ;; - *) color="#777777" ;; - esac - - for channel in ${channels} - do - payload="$(cat <<EOF - { - "channel": "#${channel}", - "username": "netdata on ${host}", - "icon_url": "${images_base_url}/images/seo-performance-128.png", - "text": "${host} ${status_message}, \`${chart}\` (_${family}_), *${alarm}*", - "attachments": [ - { - "fallback": "${alarm} - ${chart} (${family}) - ${info}", - "color": "${color}", - "title": "${alarm}", - "title_link": "${goto_url}", - "text": "${info}", - "fields": [ - { - "title": "${chart}", - "short": true - }, - { - "title": "${family}", - "short": true - } - ], - "thumb_url": "${image}", - "footer": "by <${goto_url}|${this_host}>", - "ts": ${when} - } - ] - } -EOF - )" - - httpcode=$(docurl -X POST --data-urlencode "payload=${payload}" "${webhook}") - if [ "${httpcode}" = "200" ] - then - info "sent slack notification for: ${host} ${chart}.${name} is ${status} to '${channel}'" - sent=$((sent + 1)) - else - error "failed to send slack notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}." - fi - done - - [ ${sent} -gt 0 ] && return 0 - - return 1 -} - -# ----------------------------------------------------------------------------- -# alerta sender - -send_alerta() { - local webhook="${1}" channels="${2}" httpcode sent=0 channel severity content - - [ "${SEND_ALERTA}" != "YES" ] && return 1 - - case "${status}" in - WARNING) severity="warning" ;; - CRITICAL) severity="critical" ;; - CLEAR) severity="cleared" ;; - *) severity="unknown" ;; - esac - - info=$( echo -n ${info}) - - # the "event" property must be unique and repetible between states to let alerta do automatic correlation using severity value - for channel in ${channels} - do - content="{" - content="$content \"environment\": \"${channel}\"," - content="$content \"service\": [\"${host}\"]," - content="$content \"resource\": \"${host}\"," - content="$content \"event\": \"${name}.${chart} (${family})\"," - content="$content \"severity\": \"${severity}\"," - content="$content \"value\": \"${alarm}\"," - content="$content \"text\": \"${info}\"" - content="$content }" - - - httpcode=$(docurl -X POST "${webhook}/alert" -H "Content-Type: application/json" -H "Authorization: Key $ALERTA_API_KEY" -d "$content" ) - - if [[ "${httpcode}" = "200" || "${httpcode}" = "201" ]] - then - info "sent alerta notification for: ${host} ${chart}.${name} is ${status} to '${channel}'" - sent=$((sent + 1)) - else - error "failed to send alerta notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}." - fi - done - - [ ${sent} -gt 0 ] && return 0 - - return 1 -} - -# ----------------------------------------------------------------------------- -# flock sender - -send_flock() { - local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload - - [ "${SEND_FLOCK}" != "YES" ] && return 1 - - case "${status}" in - WARNING) color="warning" ;; - CRITICAL) color="danger" ;; - CLEAR) color="good" ;; - *) color="#777777" ;; - esac - - for channel in ${channels} - do - httpcode=$(docurl -X POST "${webhook}" -H "Content-Type: application/json" -d "{ - \"sendAs\": { - \"name\" : \"netdata on ${host}\", - \"profileImage\" : \"${images_base_url}/images/seo-performance-128.png\" - }, - \"text\": \"${host} *${status_message}*\", - \"timestamp\": \"${when}\", - \"attachments\": [ - { - \"description\": \"${chart} (${family}) - ${info}\", - \"color\": \"${color}\", - \"title\": \"${alarm}\", - \"url\": \"${goto_url}\", - \"text\": \"${info}\", - \"views\": { - \"image\": { - \"original\": { \"src\": \"${image}\", \"width\": 400, \"height\": 400 }, - \"thumbnail\": { \"src\": \"${image}\", \"width\": 50, \"height\": 50 }, - \"filename\": \"${image}\" - } - } - } - ] - }" ) - if [ "${httpcode}" = "200" ] - then - info "sent flock notification for: ${host} ${chart}.${name} is ${status} to '${channel}'" - sent=$((sent + 1)) - else - error "failed to send flock notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}." - fi - done - - [ ${sent} -gt 0 ] && return 0 - - return 1 -} - -# ----------------------------------------------------------------------------- -# discord sender - -send_discord() { - local webhook="${1}/slack" channels="${2}" httpcode sent=0 channel color payload username - - [ "${SEND_DISCORD}" != "YES" ] && return 1 - - case "${status}" in - WARNING) color="warning" ;; - CRITICAL) color="danger" ;; - CLEAR) color="good" ;; - *) color="#777777" ;; - esac - - for channel in ${channels} - do - username="netdata on ${host}" - [ ${#username} -gt 32 ] && username="${username:0:29}..." - - payload="$(cat <<EOF - { - "channel": "#${channel}", - "username": "${username}", - "text": "${host} ${status_message}, \`${chart}\` (_${family}_), *${alarm}*", - "icon_url": "${images_base_url}/images/seo-performance-128.png", - "attachments": [ - { - "color": "${color}", - "title": "${alarm}", - "title_link": "${goto_url}", - "text": "${info}", - "fields": [ - { - "title": "${chart}", - "value": "${family}" - } - ], - "thumb_url": "${image}", - "footer_icon": "${images_base_url}/images/seo-performance-128.png", - "footer": "${this_host}", - "ts": ${when} - } - ] - } -EOF - )" - - httpcode=$(docurl -X POST --data-urlencode "payload=${payload}" "${webhook}") - if [ "${httpcode}" = "200" ] - then - info "sent discord notification for: ${host} ${chart}.${name} is ${status} to '${channel}'" - sent=$((sent + 1)) - else - error "failed to send discord notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}." - fi - done - - [ ${sent} -gt 0 ] && return 0 - - return 1 -} - -# ----------------------------------------------------------------------------- -# irc sender - -send_irc() { - local NICKNAME="${1}" REALNAME="${2}" CHANNELS="${3}" NETWORK="${4}" SERVERNAME="${5}" MESSAGE="${6}" sent=0 channel color send_alarm reply_codes error - - if [ "${SEND_IRC}" = "YES" -a ! -z "${NICKNAME}" -a ! -z "${REALNAME}" -a ! -z "${CHANNELS}" -a ! -z "${NETWORK}" -a ! -z "${SERVERNAME}" ] - then - case "${status}" in - WARNING) color="warning" ;; - CRITICAL) color="danger" ;; - CLEAR) color="good" ;; - *) color="#777777" ;; - esac - - for CHANNEL in ${CHANNELS} - do - error=0 - send_alarm=$(echo -e "USER ${NICKNAME} guest ${REALNAME} ${SERVERNAME}\nNICK ${NICKNAME}\nJOIN ${CHANNEL}\nPRIVMSG ${CHANNEL} :${MESSAGE}\nQUIT\n" \ | nc ${NETWORK} 6667) - reply_codes=$(echo ${send_alarm} | cut -d ' ' -f 2 | grep -o '[0-9]*') - for code in ${reply_codes} - do - [ "${code}" -ge 400 -a "${code}" -le 599 ] && error=1 && break - done - - if [ "${error}" -eq 0 ] - then - info "sent irc notification for: ${host} ${chart}.${name} is ${status} to '${CHANNEL}'" - sent=$((sent + 1)) - else - error "failed to send irc notification for: ${host} ${chart}.${name} is ${status} to '${CHANNEL}', with error code ${code}." - fi - done - fi - - [ ${sent} -gt 0 ] && return 0 - - return 1 -} - - -# ----------------------------------------------------------------------------- -# prepare the content of the notification - -# the url to send the user on click -urlencode "${host}" >/dev/null; url_host="${REPLY}" -urlencode "${chart}" >/dev/null; url_chart="${REPLY}" -urlencode "${family}" >/dev/null; url_family="${REPLY}" -urlencode "${name}" >/dev/null; url_name="${REPLY}" -goto_url="${NETDATA_REGISTRY_URL}/goto-host-from-alarm.html?host=${url_host}&chart=${url_chart}&family=${url_family}&alarm=${url_name}&alarm_unique_id=${unique_id}&alarm_id=${alarm_id}&alarm_event_id=${event_id}" - -# the severity of the alarm -severity="${status}" - -# the time the alarm was raised -duration4human ${duration} >/dev/null; duration_txt="${REPLY}" -duration4human ${non_clear_duration} >/dev/null; non_clear_duration_txt="${REPLY}" -raised_for="(was ${old_status,,} for ${duration_txt})" - -# the key status message -status_message="status unknown" - -# the color of the alarm -color="grey" - -# the alarm value -alarm="${name//_/ } = ${value_string}" - -# the image of the alarm -image="${images_base_url}/images/seo-performance-128.png" - -# prepare the title based on status -case "${status}" in - CRITICAL) - image="${images_base_url}/images/alert-128-red.png" - status_message="is critical" - color="#ca414b" - ;; - - WARNING) - image="${images_base_url}/images/alert-128-orange.png" - status_message="needs attention" - color="#ffc107" - ;; - - CLEAR) - image="${images_base_url}/images/check-mark-2-128-green.png" - status_message="recovered" - color="#77ca6d" - ;; -esac - -if [ "${status}" = "CLEAR" ] -then - severity="Recovered from ${old_status}" - if [ ${non_clear_duration} -gt ${duration} ] - then - raised_for="(alarm was raised for ${non_clear_duration_txt})" - fi - - # don't show the value when the status is CLEAR - # for certain alarms, this value might not have any meaning - alarm="${name//_/ } ${raised_for}" - -elif [ "${old_status}" = "WARNING" -a "${status}" = "CRITICAL" ] -then - severity="Escalated to ${status}" - if [ ${non_clear_duration} -gt ${duration} ] - then - raised_for="(alarm is raised for ${non_clear_duration_txt})" - fi - -elif [ "${old_status}" = "CRITICAL" -a "${status}" = "WARNING" ] -then - severity="Demoted to ${status}" - if [ ${non_clear_duration} -gt ${duration} ] - then - raised_for="(alarm is raised for ${non_clear_duration_txt})" - fi - -else - raised_for= -fi - -# prepare HTML versions of elements -info_html= -[ ! -z "${info}" ] && info_html=" <small><br/>${info}</small>" - -raised_for_html= -[ ! -z "${raised_for}" ] && raised_for_html="<br/><small>${raised_for}</small>" - -# ----------------------------------------------------------------------------- -# send the slack notification - -# slack aggregates posts from the same username -# so we use "${host} ${status}" as the bot username, to make them diff - -send_slack "${SLACK_WEBHOOK_URL}" "${to_slack}" -SENT_SLACK=$? - -# ----------------------------------------------------------------------------- -# send the alerta notification - -# alerta aggregates posts from the same username -# so we use "${host} ${status}" as the bot username, to make them diff - -send_alerta "${ALERTA_WEBHOOK_URL}" "${to_alerta}" -SENT_ALERTA=$? - -# ----------------------------------------------------------------------------- -# send the flock notification - -# flock aggregates posts from the same username -# so we use "${host} ${status}" as the bot username, to make them diff - -send_flock "${FLOCK_WEBHOOK_URL}" "${to_flock}" -SENT_FLOCK=$? - -# ----------------------------------------------------------------------------- -# send the discord notification - -# discord aggregates posts from the same username -# so we use "${host} ${status}" as the bot username, to make them diff - -send_discord "${DISCORD_WEBHOOK_URL}" "${to_discord}" -SENT_DISCORD=$? - -# ----------------------------------------------------------------------------- -# send the pushover notification - -send_pushover "${PUSHOVER_APP_TOKEN}" "${to_pushover}" "${when}" "${goto_url}" "${status}" "${host} ${status_message} - ${name//_/ } - ${chart}" " -<font color=\"${color}\"><b>${alarm}</b></font>${info_html}<br/>  -<small><b>${chart}</b><br/>Chart<br/> </small> -<small><b>${family}</b><br/>Family<br/> </small> -<small><b>${severity}</b><br/>Severity<br/> </small> -<small><b>${date}${raised_for_html}</b><br/>Time<br/> </small> -<a href=\"${goto_url}\">View Netdata</a><br/>  -<small><small>The source of this alarm is line ${src}</small></small> -" - -SENT_PUSHOVER=$? - -# ----------------------------------------------------------------------------- -# send the pushbullet notification - -send_pushbullet "${PUSHBULLET_ACCESS_TOKEN}" "${PUSHBULLET_SOURCE_DEVICE}" "${to_pushbullet}" "${goto_url}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm}\n -Severity: ${severity}\n -Chart: ${chart}\n -Family: ${family}\n -$(date -d @${when})\n -The source of this alarm is line ${src}" - -SENT_PUSHBULLET=$? - -# ----------------------------------------------------------------------------- -# send the twilio SMS - -send_twilio "${TWILIO_ACCOUNT_SID}" "${TWILIO_ACCOUNT_TOKEN}" "${TWILIO_NUMBER}" "${to_twilio}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm} -Severity: ${severity} -Chart: ${chart} -Family: ${family} -${info}" - -SENT_TWILIO=$? - -# ----------------------------------------------------------------------------- -# send the messagebird SMS - -send_messagebird "${MESSAGEBIRD_ACCESS_KEY}" "${MESSAGEBIRD_NUMBER}" "${to_messagebird}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm} -Severity: ${severity} -Chart: ${chart} -Family: ${family} -${info}" - -SENT_MESSAGEBIRD=$? - - -# ----------------------------------------------------------------------------- -# send the kavenegar SMS - -send_kavenegar "${KAVENEGAR_API_KEY}" "${KAVENEGAR_SENDER}" "${to_kavenegar}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm} -Severity: ${severity} -Chart: ${chart} -Family: ${family} -${info}" - -SENT_KAVENEGAR=$? - - -# ----------------------------------------------------------------------------- -# send the telegram.org message - -# https://core.telegram.org/bots/api#formatting-options -send_telegram "${TELEGRAM_BOT_TOKEN}" "${to_telegram}" "${host} ${status_message} - <b>${name//_/ }</b> -${chart} (${family}) -<a href=\"${goto_url}\">${alarm}</a> -<i>${info}</i>" - -SENT_TELEGRAM=$? - - -# ----------------------------------------------------------------------------- -# send the kafka message - -send_kafka -SENT_KAFKA=$? - - -# ----------------------------------------------------------------------------- -# send the pagerduty.com message - -send_pd "${to_pd}" -SENT_PD=$? - -# ----------------------------------------------------------------------------- -# send the irc message - -send_irc "${IRC_NICKNAME}" "${IRC_REALNAME}" "${to_irc}" "${IRC_NETWORK}" "${host}" "${host} ${status_message} - ${name//_/ } - ${chart} ----- ${alarm} -Severity: ${severity} -Chart: ${chart} -Family: ${family} -${info}" - -SENT_IRC=$? - -# ----------------------------------------------------------------------------- -# send the custom message - -send_custom() { - # is it enabled? - [ "${SEND_CUSTOM}" != "YES" ] && return 1 - - # do we have any sender? - [ -z "${1}" ] && return 1 - - # call the custom_sender function - custom_sender "${@}" -} - -send_custom "${to_custom}" -SENT_CUSTOM=$? - - -# ----------------------------------------------------------------------------- -# send hipchat message - -send_hipchat "${HIPCHAT_AUTH_TOKEN}" "${to_hipchat}" " \ -${host} ${status_message}<br/> \ -<b>${alarm}</b> ${info_html}<br/> \ -<b>${chart}</b> (family <b>${family}</b>)<br/> \ -<b>${date}${raised_for_html}</b><br/> \ -<a href=\\\"${goto_url}\\\">View netdata dashboard</a> \ -(source of alarm ${src}) \ -" - -SENT_HIPCHAT=$? - - -# ----------------------------------------------------------------------------- -# send the email - -send_email <<EOF -To: ${to_email} -Subject: ${host} ${status_message} - ${name//_/ } - ${chart} -MIME-Version: 1.0 -Content-Type: multipart/alternative; boundary="multipart-boundary" - -This is a MIME-encoded multipart message - ---multipart-boundary -Content-Type: text/plain; encoding=${EMAIL_CHARSET} -Content-Disposition: inline -Content-Transfer-Encoding: 8bit - -${host} ${status_message} - -${alarm} ${info} -${raised_for} - -Chart : ${chart} -Family : ${family} -Severity: ${severity} -URL : ${goto_url} -Source : ${src} -Date : ${date} -Notification generated on ${this_host} - ---multipart-boundary -Content-Type: text/html; encoding=${EMAIL_CHARSET} -Content-Disposition: inline -Content-Transfer-Encoding: 8bit - -<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> -<html xmlns="http://www.w3.org/1999/xhtml" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 14px; margin: 0; padding: 0;"> -<body style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 14px; width: 100% !important; min-height: 100%; line-height: 1.6; background: #f6f6f6; margin:0; padding: 0;"> -<table> - <tbody> - <tr> - <td style="vertical-align: top;" valign="top"></td> - <td width="700" style="vertical-align: top; display: block !important; max-width: 700px !important; clear: both !important; margin: 0 auto; padding: 0;" valign="top"> - <div style="max-width: 700px; display: block; margin: 0 auto; padding: 20px;"> - <table width="100%" cellpadding="0" cellspacing="0" style="background: #fff; border: 1px solid #e9e9e9;"> - <tbody> - <tr> - <td bgcolor="#eee" style="padding: 5px 20px 5px 20px; background-color: #eee;"> - <div style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 20px; color: #777; font-weight: bold;">netdata notification</div> - </td> - </tr> - <tr> - <td bgcolor="${color}" style="font-size: 16px; vertical-align: top; font-weight: 400; text-align: center; margin: 0; padding: 10px; color: #ffffff; background: ${color} !important; border: 1px solid ${color}; border-top-color: ${color};" align="center" valign="top"> - <h1 style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-weight: 400; margin: 0;">${host} ${status_message}</h1> - </td> - </tr> - <tr> - <td style="vertical-align: top;" valign="top"> - <div style="margin: 0; padding: 20px; max-width: 700px;"> - <table width="100%" cellpadding="0" cellspacing="0" style="max-width:700px"> - <tbody> - <tr> - <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding:0 0 20px;" align="left" valign="top"> - <span>${chart}</span> - <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Chart</span> - </td> - </tr> - <tr style="margin: 0; padding: 0;"> - <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top"> - <span><b>${alarm}</b>${info_html}</span> - <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Alarm</span> - </td> - </tr> - <tr> - <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top"> - <span>${family}</span> - <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Family</span> - </td> - </tr> - <tr style="margin: 0; padding: 0;"> - <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top"> - <span>${severity}</span> - <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Severity</span> - </td> - </tr> - <tr style="margin: 0; padding: 0;"> - <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top"><span>${date}</span> - <span>${raised_for_html}</span> <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Time</span> - </td> - </tr> - <tr style="margin: 0; padding: 0;"> - <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;"> - <a href="${goto_url}" style="font-size: 14px; color: #ffffff; text-decoration: none; line-height: 1.5; font-weight: bold; text-align: center; display: inline-block; text-transform: capitalize; background: #35568d; border-width: 1px; border-style: solid; border-color: #2b4c86; margin: 0; padding: 10px 15px;" target="_blank">View Netdata</a> - </td> - </tr> - <tr style="text-align: center; margin: 0; padding: 0;"> - <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 11px; vertical-align: top; margin: 0; padding: 10px 0 0 0; color: #666666;" align="center" valign="bottom">The source of this alarm is line <code>${src}</code><br/>(alarms are configurable, edit this file to adapt the alarm to your needs) - </td> - </tr> - <tr style="text-align: center; margin: 0; padding: 0;"> - <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; vertical-align: top; margin:0; padding: 20px 0 0 0; color: #666666; border-top: 1px solid #f0f0f0;" align="center" valign="bottom">Sent by - <a href="https://mynetdata.io/" target="_blank">netdata</a>, the real-time performance and health monitoring, on <code>${this_host}</code>. - </td> - </tr> - </tbody> - </table> - </div> - </td> - </tr> - </tbody> - </table> - </div> - </td> - </tr> - </tbody> -</table> -</body> -</html> ---multipart-boundary-- -EOF - -SENT_EMAIL=$? - -# ----------------------------------------------------------------------------- -# let netdata know - -if [ ${SENT_EMAIL} -eq 0 \ - -o ${SENT_PUSHOVER} -eq 0 \ - -o ${SENT_TELEGRAM} -eq 0 \ - -o ${SENT_SLACK} -eq 0 \ - -o ${SENT_ALERTA} -eq 0 \ - -o ${SENT_FLOCK} -eq 0 \ - -o ${SENT_DISCORD} -eq 0 \ - -o ${SENT_TWILIO} -eq 0 \ - -o ${SENT_HIPCHAT} -eq 0 \ - -o ${SENT_MESSAGEBIRD} -eq 0 \ - -o ${SENT_KAVENEGAR} -eq 0 \ - -o ${SENT_PUSHBULLET} -eq 0 \ - -o ${SENT_KAFKA} -eq 0 \ - -o ${SENT_PD} -eq 0 \ - -o ${SENT_IRC} -eq 0 \ - -o ${SENT_CUSTOM} -eq 0 \ - ] - then - # we did send something - exit 0 -fi - -# we did not send anything -exit 1 diff --git a/plugins.d/alarm-test.sh b/plugins.d/alarm-test.sh deleted file mode 100755 index 9df5361a9..000000000 --- a/plugins.d/alarm-test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2017 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ -# -# Script to test alarm notifications for netdata - -dir="$(dirname "${0}")" -${dir}/alarm-notify.sh test "${1}" -exit $? diff --git a/plugins.d/cgroup-name.sh b/plugins.d/cgroup-name.sh deleted file mode 100755 index 3c8ad7205..000000000 --- a/plugins.d/cgroup-name.sh +++ /dev/null @@ -1,189 +0,0 @@ -#!/usr/bin/env bash - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2016 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ -# -# Script to find a better name for cgroups -# - -export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin" -export LC_ALL=C - -# ----------------------------------------------------------------------------- - -PROGRAM_NAME="$(basename "${0}")" - -logdate() { - date "+%Y-%m-%d %H:%M:%S" -} - -log() { - local status="${1}" - shift - - echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" - -} - -warning() { - log WARNING "${@}" -} - -error() { - log ERROR "${@}" -} - -info() { - log INFO "${@}" -} - -fatal() { - log FATAL "${@}" - exit 1 -} - -debug=0 -debug() { - [ $debug -eq 1 ] && log DEBUG "${@}" -} - -# ----------------------------------------------------------------------------- - -[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata" -CONFIG="${NETDATA_CONFIG_DIR}/cgroups-names.conf" -CGROUP="${1}" -NAME= - -# ----------------------------------------------------------------------------- - -if [ -z "${CGROUP}" ] - then - fatal "called without a cgroup name. Nothing to do." -fi - -if [ -f "${CONFIG}" ] - then - NAME="$(grep "^${CGROUP} " "${CONFIG}" | sed "s/[[:space:]]\+/ /g" | cut -d ' ' -f 2)" - if [ -z "${NAME}" ] - then - info "cannot find cgroup '${CGROUP}' in '${CONFIG}'." - fi -#else -# info "configuration file '${CONFIG}' is not available." -fi - -function docker_get_name_classic { - local id="${1}" - info "Running command: docker ps --filter=id=\"${id}\" --format=\"{{.Names}}\"" - NAME="$( docker ps --filter=id="${id}" --format="{{.Names}}" )" - return 0 -} - -function docker_get_name_api { - local id="${1}" - if [ ! -S "/var/run/docker.sock" ] - then - warning "Can't find /var/run/docker.sock" - return 1 - fi - info "Running API command: /containers/${id}/json" - JSON=$(echo -e "GET /containers/${id}/json HTTP/1.0\r\n" | nc -U /var/run/docker.sock | grep '^{.*') - NAME=$(echo $JSON | jq -r .Name,.Config.Hostname | grep -v null | head -n1 | sed 's|^/||') - return 0 -} - -function docker_get_name { - local id="${1}" - if hash docker 2>/dev/null - then - docker_get_name_classic "${id}" - else - docker_get_name_api "${id}" || docker_get_name_classic "${id}" - fi - if [ -z "${NAME}" ] - then - warning "cannot find the name of docker container '${id}'" - NAME="${id:0:12}" - else - info "docker container '${id}' is named '${NAME}'" - fi -} - -if [ -z "${NAME}" ] - then - if [[ "${CGROUP}" =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]] - then - # docker containers - - DOCKERID="$( echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|" )" - # echo "DOCKERID=${DOCKERID}" - - if [ ! -z "${DOCKERID}" -a \( ${#DOCKERID} -eq 64 -o ${#DOCKERID} -eq 12 \) ] - then - docker_get_name "${DOCKERID}" - else - error "a docker id cannot be extracted from docker cgroup '${CGROUP}'." - fi - elif [[ "${CGROUP}" =~ ^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]+[_/][a-fA-F0-9]+$ ]] - then - # kubernetes - - DOCKERID="$( echo "${CGROUP}" | sed "s|^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]\+[_/]\([a-fA-F0-9]\+\)$|\1|" )" - # echo "DOCKERID=${DOCKERID}" - - if [ ! -z "${DOCKERID}" -a \( ${#DOCKERID} -eq 64 -o ${#DOCKERID} -eq 12 \) ] - then - docker_get_name "${DOCKERID}" - else - error "a docker id cannot be extracted from kubernetes cgroup '${CGROUP}'." - fi - elif [[ "${CGROUP}" =~ machine.slice[_/].*\.service ]] - then - # systemd-nspawn - - NAME="$(echo ${CGROUP} | sed 's/.*machine.slice[_\/]\(.*\)\.service/\1/g')" - - elif [[ "${CGROUP}" =~ machine.slice_machine.*-qemu ]] - then - # libvirtd / qemu virtual machines - - # NAME="$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d//; s/\/x2d/\-/g; s/\.scope//g')" - NAME="qemu_$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d[[:digit:]]*//; s/\/x2d//g; s/\.scope//g')" - - elif [[ "${CGROUP}" =~ machine_.*\.libvirt-qemu ]] - then - # libvirtd / qemu virtual machines - NAME="qemu_$(echo ${CGROUP} | sed 's/^machine_//; s/\.libvirt-qemu$//; s/-/_/;')" - - elif [[ "${CGROUP}" =~ qemu.slice_([0-9]+).scope && -d /etc/pve ]] - then - # Proxmox VMs - - FILENAME="/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" - if [[ -f $FILENAME && -r $FILENAME ]] - then - NAME="qemu_$(grep -e '^name: ' "/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')" - else - error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group." - fi - elif [[ "${CGROUP}" =~ lxc_([0-9]+) && -d /etc/pve ]] - then - # Proxmox Containers (LXC) - - FILENAME="/etc/pve/lxc/${BASH_REMATCH[1]}.conf" - if [[ -f ${FILENAME} && -r ${FILENAME} ]] - then - NAME=$(grep -e '^hostname: ' /etc/pve/lxc/${BASH_REMATCH[1]}.conf | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p') - else - error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group." - fi - fi - - [ -z "${NAME}" ] && NAME="${CGROUP}" - [ ${#NAME} -gt 100 ] && NAME="${NAME:0:100}" -fi - -info "cgroup '${CGROUP}' is called '${NAME}'" -echo "${NAME}" diff --git a/plugins.d/cgroup-network-helper.sh b/plugins.d/cgroup-network-helper.sh deleted file mode 100755 index f07059986..000000000 --- a/plugins.d/cgroup-network-helper.sh +++ /dev/null @@ -1,251 +0,0 @@ -#!/usr/bin/env bash - -# cgroup-network-helper.sh -# detect container and virtual machine interfaces -# -# (C) 2017 Costa Tsaousis -# GPL v3+ -# -# This script is called as root (by cgroup-network), with either a pid, or a cgroup path. -# It tries to find all the network interfaces that belong to the same cgroup. -# -# It supports several method for this detection: -# -# 1. cgroup-network (the binary father of this script) detects veth network interfaces, -# by examining iflink and ifindex IDs and switching namespaces -# (it also detects the interface name as it is used by the container). -# -# 2. this script, uses /proc/PID/fdinfo to find tun/tap network interfaces. -# -# 3. this script, calls virsh to find libvirt network interfaces. -# - -# ----------------------------------------------------------------------------- - -# the system path is cleared by cgroup-network -[ -f /etc/profile ] && source /etc/profile - -export LC_ALL=C - -PROGRAM_NAME="$(basename "${0}")" - -logdate() { - date "+%Y-%m-%d %H:%M:%S" -} - -log() { - local status="${1}" - shift - - echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" - -} - -warning() { - log WARNING "${@}" -} - -error() { - log ERROR "${@}" -} - -info() { - log INFO "${@}" -} - -fatal() { - log FATAL "${@}" - exit 1 -} - -debug=0 -debug() { - [ "${debug}" = "1" ] && log DEBUG "${@}" -} - -# ----------------------------------------------------------------------------- -# check for BASH v4+ (required for associative arrays) - -[ $(( ${BASH_VERSINFO[0]} )) -lt 4 ] && \ - fatal "BASH version 4 or later is required (this is ${BASH_VERSION})." - -# ----------------------------------------------------------------------------- -# parse the arguments - -pid= -cgroup= -while [ ! -z "${1}" ] -do - case "${1}" in - --cgroup) cgroup="${2}"; shift 1;; - --pid|-p) pid="${2}"; shift 1;; - --debug|debug) debug=1;; - *) fatal "Cannot understand argument '${1}'";; - esac - - shift -done - -if [ -z "${pid}" -a -z "${cgroup}" ] -then - fatal "Either --pid or --cgroup is required" -fi - -# ----------------------------------------------------------------------------- - -set_source() { - [ ${debug} -eq 1 ] && echo "SRC ${*}" -} - - -# ----------------------------------------------------------------------------- -# veth interfaces via cgroup - -# cgroup-network can detect veth interfaces by itself (written in C). -# If you seek for a shell version of what it does, check this: -# https://github.com/firehol/netdata/issues/474#issuecomment-317866709 - - -# ----------------------------------------------------------------------------- -# tun/tap interfaces via /proc/PID/fdinfo - -# find any tun/tap devices linked to a pid -proc_pid_fdinfo_iff() { - local p="${1}" # the pid - - debug "Searching for tun/tap interfaces for pid ${p}..." - set_source "fdinfo" - grep ^iff:.* "${NETDATA_HOST_PREFIX}/proc/${p}/fdinfo"/* 2>/dev/null | cut -f 2 -} - -find_tun_tap_interfaces_for_cgroup() { - local c="${1}" # the cgroup path - - # for each pid of the cgroup - # find any tun/tap devices linked to the pid - if [ -f "${c}/emulator/cgroup.procs" ] - then - local p - for p in $(< "${c}/emulator/cgroup.procs" ) - do - proc_pid_fdinfo_iff ${p} - done - fi -} - - -# ----------------------------------------------------------------------------- -# virsh domain network interfaces - -virsh_cgroup_to_domain_name() { - local c="${1}" # the cgroup path - - debug "extracting a possible virsh domain from cgroup ${c}..." - - # extract for the cgroup path - sed -n -e "s|.*/machine-qemu\\\\x2d[0-9]\+\\\\x2d\(.*\)\.scope$|\1|p" \ - -e "s|.*/machine/\(.*\)\.libvirt-qemu$|\1|p" \ - <<EOF -${c} -EOF -} - -virsh_find_all_interfaces_for_cgroup() { - local c="${1}" # the cgroup path - - # the virsh command - local virsh="$(which virsh 2>/dev/null || command -v virsh 2>/dev/null)" - - if [ ! -z "${virsh}" ] - then - local d="$(virsh_cgroup_to_domain_name "${c}")" - - if [ ! -z "${d}" ] - then - debug "running: virsh domiflist ${d}; to find the network interfaces" - - # match only 'network' interfaces from virsh output - - set_source "virsh" - "${virsh}" -r domiflist ${d} |\ - sed -n \ - -e "s|^\([^[:space:]]\+\)[[:space:]]\+network[[:space:]]\+\([^[:space:]]\+\)[[:space:]]\+[^[:space:]]\+[[:space:]]\+[^[:space:]]\+$|\1 \1_\2|p" \ - -e "s|^\([^[:space:]]\+\)[[:space:]]\+bridge[[:space:]]\+\([^[:space:]]\+\)[[:space:]]\+[^[:space:]]\+[[:space:]]\+[^[:space:]]\+$|\1 \1_\2|p" - else - debug "no virsh domain extracted from cgroup ${c}" - fi - else - debug "virsh command is not available" - fi -} - -# ----------------------------------------------------------------------------- - -find_all_interfaces_of_pid_or_cgroup() { - local p="${1}" c="${2}" # the pid and the cgroup path - - if [ ! -z "${pid}" ] - then - # we have been called with a pid - - proc_pid_fdinfo_iff ${p} - - elif [ ! -z "${c}" ] - then - # we have been called with a cgroup - - info "searching for network interfaces of cgroup '${c}'" - - find_tun_tap_interfaces_for_cgroup "${c}" - virsh_find_all_interfaces_for_cgroup "${c}" - - else - - error "Either a pid or a cgroup path is needed" - return 1 - - fi - - return 0 -} - -# ----------------------------------------------------------------------------- - -# an associative array to store the interfaces -# the index is the interface name as seen by the host -# the value is the interface name as seen by the guest / container -declare -A devs=() - -# store all interfaces found in the associative array -# this will also give the unique devices, as seen by the host -last_src= -while read host_device guest_device -do - [ -z "${host_device}" ] && continue - - [ "${host_device}" = "SRC" ] && last_src="${guest_device}" && continue - - # the default guest_device is the host_device - [ -z "${guest_device}" ] && guest_device="${host_device}" - - # when we run in debug, show the source - debug "Found host device '${host_device}', guest device '${guest_device}', detected via '${last_src}'" - - [ -z "${devs[${host_device}]}" -o "${devs[${host_device}]}" = "${host_device}" ] && \ - devs[${host_device}]="${guest_device}" - -done < <( find_all_interfaces_of_pid_or_cgroup "${pid}" "${cgroup}" ) - -# print the interfaces found, in the format netdata expects them -found=0 -for x in "${!devs[@]}" -do - found=$((found + 1)) - echo "${x} ${devs[${x}]}" -done - -debug "found ${found} network interfaces for pid '${pid}', cgroup '${cgroup}', run as ${USER}, ${UID}" - -# let netdata know if we found any -[ ${found} -eq 0 ] && exit 1 -exit 0 diff --git a/plugins.d/charts.d.dryrun-helper.sh b/plugins.d/charts.d.dryrun-helper.sh deleted file mode 100755 index 8142f9882..000000000 --- a/plugins.d/charts.d.dryrun-helper.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env bash - -# will stop the script for any error -set -e - -me="$0" -name="$1" -chart="$2" -conf="$3" - -can_diff=1 - -tmp1="`mktemp`" -tmp2="`mktemp`" - -myset() { - set | grep -v "^_=" | grep -v "^PIPESTATUS=" | grep -v "^BASH_LINENO=" -} - -# save 2 'set' -myset >"$tmp1" -myset >"$tmp2" - -# make sure they don't differ -diff "$tmp1" "$tmp2" >/dev/null 2>&1 -if [ $? -ne 0 ] -then - # they differ, we cannot do the check - echo >&2 "$me: cannot check with diff." - can_diff=0 -fi - -# do it again, now including the script -myset >"$tmp1" - -# include the plugin and its config -if [ -f "$conf" ] -then - . "$conf" - if [ $? -ne 0 ] - then - echo >&2 "$me: cannot load config file $conf" - rm "$tmp1" "$tmp2" - exit 1 - fi -fi - -. "$chart" -if [ $? -ne 0 ] -then - echo >&2 "$me: cannot load chart file $chart" - rm "$tmp1" "$tmp2" - exit 1 -fi - -# remove all variables starting with the plugin name -myset | grep -v "^$name" >"$tmp2" - -if [ $can_diff -eq 1 ] -then - # check if they are different - # make sure they don't differ - diff "$tmp1" "$tmp2" >&2 - if [ $? -ne 0 ] - then - # they differ - rm "$tmp1" "$tmp2" - exit 1 - fi -fi - -rm "$tmp1" "$tmp2" -exit 0 diff --git a/plugins.d/charts.d.plugin b/plugins.d/charts.d.plugin deleted file mode 100755 index 9bd03fd47..000000000 --- a/plugins.d/charts.d.plugin +++ /dev/null @@ -1,713 +0,0 @@ -#!/usr/bin/env bash - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2017 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ -# -# charts.d.plugin allows easy development of BASH plugins -# -# if you need to run parallel charts.d processes, link this file to a different name -# in the same directory, with a .plugin suffix and netdata will start both of them, -# each will have a different config file and modules configuration directory. -# - -export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin" - -PROGRAM_FILE="$0" -PROGRAM_NAME="$(basename $0)" -PROGRAM_NAME="${PROGRAM_NAME/.plugin}" -MODULE_NAME="main" - -# ----------------------------------------------------------------------------- -# create temp dir - -debug=0 -TMP_DIR= -chartsd_cleanup() { - trap '' EXIT QUIT HUP INT TERM - - if [ ! -z "$TMP_DIR" -a -d "$TMP_DIR" ] - then - [ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: cleaning up temporary directory $TMP_DIR ..." - rm -rf "$TMP_DIR" - fi - exit 0 -} -trap chartsd_cleanup EXIT QUIT HUP INT TERM - -if [ $UID = "0" ] -then - TMP_DIR="$( mktemp -d /var/run/netdata-${PROGRAM_NAME}-XXXXXXXXXX )" -else - TMP_DIR="$( mktemp -d /tmp/.netdata-${PROGRAM_NAME}-XXXXXXXXXX )" -fi - -logdate() { - date "+%Y-%m-%d %H:%M:%S" -} - -log() { - local status="${1}" - shift - - echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: ${*}" - -} - -warning() { - log WARNING "${@}" -} - -error() { - log ERROR "${@}" -} - -info() { - log INFO "${@}" -} - -fatal() { - log FATAL "${@}" - echo "DISABLE" - exit 1 -} - -debug() { - [ $debug -eq 1 ] && log DEBUG "${@}" -} - -# ----------------------------------------------------------------------------- -# check a few commands - -require_cmd() { - local x=$(which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null) - if [ -z "${x}" -o ! -x "${x}" ] - then - warning "command '${1}' is not found in ${PATH}." - eval "${1^^}_CMD=\"\"" - return 1 - fi - - eval "${1^^}_CMD=\"${x}\"" - return 0 -} - -require_cmd date || exit 1 -require_cmd sed || exit 1 -require_cmd basename || exit 1 -require_cmd dirname || exit 1 -require_cmd cat || exit 1 -require_cmd grep || exit 1 -require_cmd egrep || exit 1 -require_cmd mktemp || exit 1 -require_cmd awk || exit 1 -require_cmd timeout || exit 1 -require_cmd curl || exit 1 - -# ----------------------------------------------------------------------------- - -[ $(( ${BASH_VERSINFO[0]} )) -lt 4 ] && fatal "BASH version 4 or later is required, but found version: ${BASH_VERSION}. Please upgrade." - -info "started from '$PROGRAM_FILE' with options: $*" - -# ----------------------------------------------------------------------------- -# internal defaults -# netdata exposes a few environment variables for us - -[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")" -[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata" - -pluginsd="${NETDATA_PLUGINS_DIR}" -confd="${NETDATA_CONFIG_DIR}" -chartsd="$pluginsd/../charts.d" - -myconfig="$confd/$PROGRAM_NAME.conf" - -minimum_update_frequency="${NETDATA_UPDATE_EVERY-1}" -update_every=${minimum_update_frequency} # this will be overwritten by the command line - -# work around for non BASH shells -charts_create="_create" -charts_update="_update" -charts_check="_check" -charts_undescore="_" - -# when making iterations, charts.d can loop more frequently -# to prevent plugins missing iterations. -# this is a percentage relative to update_every to align its -# iterations. -# The minimum is 10%, the maximum 100%. -# So, if update_every is 1 second and time_divisor is 50, -# charts.d will iterate every 500ms. -# Charts will be called to collect data only if the time -# passed since the last time the collected data is equal or -# above their update_every. -time_divisor=50 - -# number of seconds to run without restart -# after this time, charts.d.plugin will exit -# netdata will restart it -restart_timeout=$((3600 * 4)) - -# check if the charts.d plugins are using global variables -# they should not. -# It does not currently support BASH v4 arrays, so it is -# disabled -dryrunner=0 - -# check for timeout command -check_for_timeout=1 - -# the default enable/disable value for all charts -enable_all_charts="yes" - -# ----------------------------------------------------------------------------- -# parse parameters - -check=0 -chart_only= -while [ ! -z "$1" ] -do - if [ "$1" = "check" ] - then - check=1 - shift - continue - fi - - if [ "$1" = "debug" -o "$1" = "all" ] - then - debug=1 - shift - continue - fi - - if [ -f "$chartsd/$1.chart.sh" ] - then - debug=1 - chart_only="$( echo $1.chart.sh | sed "s/\.chart\.sh$//g" )" - shift - continue - fi - - if [ -f "$chartsd/$1" ] - then - debug=1 - chart_only="$( echo $1 | sed "s/\.chart\.sh$//g" )" - shift - continue - fi - - # number check - n="$1" - x=$(( n )) - if [ "$x" = "$n" ] - then - shift - update_every=$x - [ $update_every -lt $minimum_update_frequency ] && update_every=$minimum_update_frequency - continue - fi - - fatal "Cannot understand parameter $1. Aborting." -done - - -# ----------------------------------------------------------------------------- -# loop control - -# default sleep function -LOOPSLEEPMS_HIGHRES=0 -now_ms= -current_time_ms_default() { - now_ms="$(date +'%s')000" -} -current_time_ms="current_time_ms_default" -current_time_ms_accuracy=1 -mysleep="sleep" - -# if found and included, this file overwrites loopsleepms() -# and current_time_ms() with a high resolution timer function -# for precise looping. -. "$pluginsd/loopsleepms.sh.inc" - -# ----------------------------------------------------------------------------- -# load my configuration - -if [ -f "$myconfig" ] - then - . "$myconfig" - [ $? -ne 0 ] && fatal "cannot load $myconfig" - - time_divisor=$((time_divisor)) - [ $time_divisor -lt 10 ] && time_divisor=10 - [ $time_divisor -gt 100 ] && time_divisor=100 -else - info "configuration file '$myconfig' not found. Using defaults." -fi - -# we check for the timeout command, after we load our -# configuration, so that the user may overwrite the -# timeout command we use, providing a function that -# can emulate the timeout command we need: -# > timeout SECONDS command ... -if [ $check_for_timeout -eq 1 ] - then - require_cmd timeout || exit 1 -fi - -# ----------------------------------------------------------------------------- -# internal checks - -# netdata passes the requested update frequency as the first argument -update_every=$(( update_every + 1 - 1)) # makes sure it is a number -test $update_every -eq 0 && update_every=1 # if it is zero, make it 1 - -# check the charts.d directory -[ ! -d "$chartsd" ] && fatal "cannot find charts directory '$chartsd'" - -# ----------------------------------------------------------------------------- -# library functions - -fixid() { - echo "$*" |\ - tr -c "[A-Z][a-z][0-9]" "_" |\ - sed -e "s|^_\+||g" -e "s|_\+$||g" -e "s|_\+|_|g" |\ - tr "[A-Z]" "[a-z]" -} - -run() { - local ret pid="${BASHPID}" t - - if [ "z${1}" = "z-t" -a "${2}" != "0" ] - then - t="${2}" - shift 2 - timeout ${t} "${@}" 2>"${TMP_DIR}/run.${pid}" - ret=$? - else - "${@}" 2>"${TMP_DIR}/run.${pid}" - ret=$? - fi - - if [ ${ret} -ne 0 ] - then - { - printf "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: command '" - printf "%q " "${@}" - printf "' failed:\n --- BEGIN TRACE ---\n" - cat "${TMP_DIR}/run.${pid}" - printf " --- END TRACE ---\n" - } >&2 - fi - rm "${TMP_DIR}/run.${pid}" - - return ${ret} -} - -# convert any floating point number -# to integer, give a multiplier -# the result is stored in ${FLOAT2INT_RESULT} -# so that no fork is necessary -# the multiplier must be a power of 10 -float2int() { - local f m="$2" a b l v=($1) - f=${v[0]} - - # the length of the multiplier - 1 - l=$(( ${#m} - 1 )) - - # check if the number is in scientific notation - if [[ ${f} =~ ^[[:space:]]*(-)?[0-9.]+(e|E)(\+|-)[0-9]+ ]] - then - # convert it to decimal - # unfortunately, this fork cannot be avoided - # if you know of a way to avoid it, please let me know - f=$(printf "%0.${l}f" ${f}) - fi - - # split the floating point number - # in integer (a) and decimal (b) - a=${f/.*/} - b=${f/*./} - - # if the integer part is missing - # set it to zero - [ -z "${a}" ] && a="0" - - # strip leading zeros from the integer part - # base 10 convertion - a=$((10#$a)) - - # check the length of the decimal part - # against the length of the multiplier - if [ ${#b} -gt ${l} ] - then - # too many digits - take the most significant - b=${b:0:${l}} - - elif [ ${#b} -lt ${l} ] - then - # too few digits - pad with zero on the right - local z="00000000000000000000000" r=$((l - ${#b})) - b="${b}${z:0:${r}}" - fi - - # strip leading zeros from the decimal part - # base 10 convertion - b=$((10#$b)) - - # store the result - FLOAT2INT_RESULT=$(( (a * m) + b )) -} - - -# ----------------------------------------------------------------------------- -# charts check functions - -all_charts() { - cd "$chartsd" - [ $? -ne 0 ] && error "cannot cd to $chartsd" && return 1 - - ls *.chart.sh | sed "s/\.chart\.sh$//g" -} - -declare -A charts_enable_keyword=( - ['apache']="force" - ['cpu_apps']="force" - ['cpufreq']="force" - ['example']="force" - ['exim']="force" - ['hddtemp']="force" - ['load_average']="force" - ['mem_apps']="force" - ['mysql']="force" - ['nginx']="force" - ['phpfpm']="force" - ['postfix']="force" - ['sensors']="force" - ['squid']="force" - ['tomcat']="force" - ) - -all_enabled_charts() { - local charts= enabled= required= - - # find all enabled charts - - for chart in $( all_charts ) - do - MODULE_NAME="${chart}" - - eval "enabled=\$$chart" - if [ -z "${enabled}" ] - then - enabled="${enable_all_charts}" - fi - - required="${charts_enable_keyword[${chart}]}" - [ -z "${required}" ] && required="yes" - - if [ ! "${enabled}" = "${required}" ] - then - info "is disabled. Add a line with $chart=$required in $myconfig to enable it (or remove the line that disables it)." - else - debug "is enabled for auto-detection." - local charts="$charts $chart" - fi - done - MODULE_NAME="main" - - local charts2= - for chart in $charts - do - MODULE_NAME="${chart}" - - # check the enabled charts - local check="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_check()" )" - if [ -z "$check" ] - then - error "module '$chart' does not seem to have a $chart$charts_check() function. Disabling it." - continue - fi - - local create="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_create()" )" - if [ -z "$create" ] - then - error "module '$chart' does not seem to have a $chart$charts_create() function. Disabling it." - continue - fi - - local update="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_update()" )" - if [ -z "$update" ] - then - error "module '$chart' does not seem to have a $chart$charts_update() function. Disabling it." - continue - fi - - # check its config - #if [ -f "$confd/$chart.conf" ] - #then - # if [ ! -z "$( cat "$confd/$chart.conf" | sed "s/^ \+//g" | grep -v "^$" | grep -v "^#" | grep -v "^$chart$charts_undescore" )" ] - # then - # error "module's $chart config $confd/$chart.conf should only have lines starting with $chart$charts_undescore . Disabling it." - # continue - # fi - #fi - - #if [ $dryrunner -eq 1 ] - # then - # "$pluginsd/charts.d.dryrun-helper.sh" "$chart" "$chartsd/$chart.chart.sh" "$confd/$chart.conf" >/dev/null - # if [ $? -ne 0 ] - # then - # error "module's $chart did not pass the dry run check. This means it uses global variables not starting with $chart. Disabling it." - # continue - # fi - #fi - - local charts2="$charts2 $chart" - done - MODULE_NAME="main" - - echo $charts2 - debug "enabled charts: $charts2" -} - -# ----------------------------------------------------------------------------- -# load the charts - -suffix_retries="_retries" -suffix_update_every="_update_every" -active_charts= -for chart in $( all_enabled_charts ) -do - MODULE_NAME="${chart}" - - debug "loading module: '$chartsd/$chart.chart.sh'" - - . "$chartsd/$chart.chart.sh" - - if [ -f "$confd/$PROGRAM_NAME/$chart.conf" ] - then - debug "loading module configuration: '$confd/$PROGRAM_NAME/$chart.conf'" - . "$confd/$PROGRAM_NAME/$chart.conf" - elif [ -f "$confd/$chart.conf" ] - then - debug "loading module configuration: '$confd/$chart.conf'" - . "$confd/$chart.conf" - else - warning "configuration file '$confd/$PROGRAM_NAME/$chart.conf' not found. Using defaults." - fi - - eval "dt=\$$chart$suffix_update_every" - dt=$(( dt + 1 - 1 )) # make sure it is a number - if [ $dt -lt $update_every ] - then - eval "$chart$suffix_update_every=$update_every" - fi - - $chart$charts_check - if [ $? -eq 0 ] - then - debug "module '$chart' activated" - active_charts="$active_charts $chart" - else - error "module's '$chart' check() function reports failure." - fi -done -MODULE_NAME="main" -debug "activated modules: $active_charts" - - -# ----------------------------------------------------------------------------- -# check overwrites - -# enable work time reporting -debug_time= -test $debug -eq 1 && debug_time=tellwork - -# if we only need a specific chart, remove all the others -if [ ! -z "${chart_only}" ] -then - debug "requested to run only for: '${chart_only}'" - check_charts= - for chart in $active_charts - do - if [ "$chart" = "$chart_only" ] - then - check_charts="$chart" - break - fi - done - active_charts="$check_charts" -fi -debug "activated charts: $active_charts" - -# stop if we just need a pre-check -if [ $check -eq 1 ] -then - info "CHECK RESULT" - info "Will run the charts: $active_charts" - exit 0 -fi - -# ----------------------------------------------------------------------------- - -cd "${TMP_DIR}" || exit 1 - -# ----------------------------------------------------------------------------- -# create charts - -run_charts= -for chart in $active_charts -do - MODULE_NAME="${chart}" - - debug "calling '$chart$charts_create()'..." - $chart$charts_create - if [ $? -eq 0 ] - then - run_charts="$run_charts $chart" - debug "'$chart' initialized." - else - error "module's '$chart' function '$chart$charts_create()' reports failure." - fi -done -MODULE_NAME="main" -debug "run_charts='$run_charts'" - - -# ----------------------------------------------------------------------------- -# update dimensions - -[ -z "$run_charts" ] && fatal "No charts to collect data from." - -declare -A charts_last_update=() charts_update_every=() charts_retries=() charts_next_update=() charts_run_counter=() charts_serial_failures=() -global_update() { - local exit_at \ - c=0 dt ret last_ms exec_start_ms exec_end_ms \ - chart now_charts=() next_charts=($run_charts) \ - next_ms x seconds millis - - # return the current time in ms in $now_ms - ${current_time_ms} - - exit_at=$(( now_ms + (restart_timeout * 1000) )) - - for chart in $run_charts - do - eval "charts_update_every[$chart]=\$$chart$suffix_update_every" - test -z "${charts_update_every[$chart]}" && charts_update_every[$chart]=$update_every - - eval "charts_retries[$chart]=\$$chart$suffix_retries" - test -z "${charts_retries[$chart]}" && charts_retries[$chart]=10 - - charts_last_update[$chart]=$((now_ms - (now_ms % (charts_update_every[$chart] * 1000) ) )) - charts_next_update[$chart]=$(( charts_last_update[$chart] + (charts_update_every[$chart] * 1000) )) - charts_run_counter[$chart]=0 - charts_serial_failures[$chart]=0 - - echo "CHART netdata.plugin_chartsd_$chart '' 'Execution time for $chart plugin' 'milliseconds / run' charts.d netdata.plugin_charts area 145000 ${charts_update_every[$chart]}" - echo "DIMENSION run_time 'run time' absolute 1 1" - done - - # the main loop - while [ "${#next_charts[@]}" -gt 0 ] - do - c=$((c + 1)) - now_charts=("${next_charts[@]}") - next_charts=() - - # return the current time in ms in $now_ms - ${current_time_ms} - - for chart in "${now_charts[@]}" - do - MODULE_NAME="${chart}" - - if [ ${now_ms} -ge ${charts_next_update[$chart]} ] - then - last_ms=${charts_last_update[$chart]} - dt=$(( (now_ms - last_ms) )) - - charts_last_update[$chart]=${now_ms} - - while [ ${charts_next_update[$chart]} -lt ${now_ms} ] - do - charts_next_update[$chart]=$(( charts_next_update[$chart] + (charts_update_every[$chart] * 1000) )) - done - - # the first call should not give a duration - # so that netdata calibrates to current time - dt=$(( dt * 1000 )) - charts_run_counter[$chart]=$(( charts_run_counter[$chart] + 1 )) - if [ ${charts_run_counter[$chart]} -eq 1 ] - then - dt= - fi - - exec_start_ms=$now_ms - $chart$charts_update $dt - ret=$? - - # return the current time in ms in $now_ms - ${current_time_ms}; exec_end_ms=$now_ms - - echo "BEGIN netdata.plugin_chartsd_$chart $dt" - echo "SET run_time = $(( exec_end_ms - exec_start_ms ))" - echo "END" - - if [ $ret -eq 0 ] - then - charts_serial_failures[$chart]=0 - next_charts+=($chart) - else - charts_serial_failures[$chart]=$(( charts_serial_failures[$chart] + 1 )) - - if [ ${charts_serial_failures[$chart]} -gt ${charts_retries[$chart]} ] - then - error "module's '$chart' update() function reported failure ${charts_serial_failures[$chart]} times. Disabling it." - else - error "module's '$chart' update() function reports failure. Will keep trying for a while." - next_charts+=($chart) - fi - fi - else - next_charts+=($chart) - fi - done - MODULE_NAME="${chart}" - - # wait the time you are required to - next_ms=$((now_ms + (update_every * 1000 * 100) )) - for x in "${charts_next_update[@]}"; do [ ${x} -lt ${next_ms} ] && next_ms=${x}; done - next_ms=$((next_ms - now_ms)) - - if [ ${LOOPSLEEPMS_HIGHRES} -eq 1 -a ${next_ms} -gt 0 ] - then - next_ms=$(( next_ms + current_time_ms_accuracy )) - seconds=$(( next_ms / 1000 )) - millis=$(( next_ms % 1000 )) - if [ ${millis} -lt 10 ] - then - millis="00${millis}" - elif [ ${millis} -lt 100 ] - then - millis="0${millis}" - fi - - debug "sleeping for ${seconds}.${millis} seconds." - ${mysleep} ${seconds}.${millis} - else - debug "sleeping for ${update_every} seconds." - ${mysleep} $update_every - fi - - test ${now_ms} -ge ${exit_at} && exit 0 - done - - fatal "nothing left to do, exiting..." -} - -global_update diff --git a/plugins.d/fping.plugin b/plugins.d/fping.plugin deleted file mode 100755 index f38a8dde0..000000000 --- a/plugins.d/fping.plugin +++ /dev/null @@ -1,188 +0,0 @@ -#!/usr/bin/env bash - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2017 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ -# -# This plugin requires a latest version of fping. -# You can compile it from source, by running me with option: install - -export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin" -export LC_ALL=C - -if [ "${1}" = "install" ] - then - [ "${UID}" != 0 ] && echo >&2 "Please run me as root. This will install a single binary file: /usr/local/bin/fping." && exit 1 - - run() { - printf >&2 " > " - printf >&2 "%q " "${@}" - printf >&2 "\n" - "${@}" || exit 1 - } - - download() { - local curl="$(which curl 2>/dev/null || command -v curl 2>/dev/null)" - [ ! -z "${curl}" ] && run curl -s -L "${1}" && return 0 - - local wget="$(which wget 2>/dev/null || command -v wget 2>/dev/null)" - [ ! -z "${wget}" ] && run wget -q -O - "${1}" && return 0 - - echo >&2 "Cannot find 'curl' or 'wget' in this system." && exit 1 - } - - [ ! -d /usr/src ] && run mkdir -p /usr/src - [ ! -d /usr/local/bin ] && run mkdir -p /usr/local/bin - - run cd /usr/src - - if [ -d fping-4.0 ] - then - run rm -rf fping-4.0 || exit 1 - fi - - download 'https://github.com/schweikert/fping/releases/download/v4.0/fping-4.0.tar.gz' | run tar -zxvpf - - [ $? -ne 0 ] && exit 1 - run cd fping-4.0 || exit 1 - - run ./configure --prefix=/usr/local - run make clean - run make - if [ -f /usr/local/bin/fping ] - then - run mv -f /usr/local/bin/fping /usr/local/bin/fping.old - fi - run mv src/fping /usr/local/bin/fping - run chown root:root /usr/local/bin/fping - run chmod 4755 /usr/local/bin/fping - echo >&2 - echo >&2 "All done, you have a compatible fping now at /usr/local/bin/fping." - echo >&2 - - fping="$(which fping 2>/dev/null || command -v fping 2>/dev/null)" - if [ "${fping}" != "/usr/local/bin/fping" ] - then - echo >&2 "You have another fping installed at: ${fping}." - echo >&2 "Please set:" - echo >&2 - echo >&2 " fping=\"/usr/local/bin/fping\"" - echo >&2 - echo >&2 "at /etc/netdata/fping.conf" - echo >&2 - fi - exit 0 -fi - -# ----------------------------------------------------------------------------- - -PROGRAM_NAME="$(basename "${0}")" - -logdate() { - date "+%Y-%m-%d %H:%M:%S" -} - -log() { - local status="${1}" - shift - - echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" - -} - -warning() { - log WARNING "${@}" -} - -error() { - log ERROR "${@}" -} - -info() { - log INFO "${@}" -} - -fatal() { - log FATAL "${@}" - echo "DISABLE" - exit 1 -} - -debug=0 -debug() { - [ $debug -eq 1 ] && log DEBUG "${@}" -} - -# ----------------------------------------------------------------------------- - -# store in ${plugin} the name we run under -# this allows us to copy/link fping.plugin under a different name -# to have multiple fping plugins running with different settings -plugin="${PROGRAM_NAME/.plugin/}" - - -# ----------------------------------------------------------------------------- - -# the frequency to send info to netdata -# passed by netdata as the first parameter -update_every="${1-1}" - -# the netdata configuration directory -# passed by netdata as an environment variable -[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata" - -# ----------------------------------------------------------------------------- -# configuration options -# can be overwritten at /etc/netdata/fping.conf - -# the fping binary to use -# we need one that can output netdata friendly info (supporting: -N) -# if you have multiple versions, put here the full filename of the right one -fping="$( which fping 2>/dev/null || command -v fping 2>/dev/null )" - -# a space separated list of hosts to fping -# we suggest to put names here and the IPs of these names in /etc/hosts -hosts="" - -# the time in milliseconds (1 sec = 1000 ms) -# to ping the hosts - by default 5 pings per host per iteration -ping_every="$((update_every * 1000 / 5))" - -# fping options -fping_opts="-R -b 56 -i 1 -r 0 -t 5000" - -# ----------------------------------------------------------------------------- -# load the configuration file - -if [ ! -f "${NETDATA_CONFIG_DIR}/${plugin}.conf" ] -then - fatal "configuration file '${NETDATA_CONFIG_DIR}/${plugin}.conf' not found - nothing to do." -fi - -source "${NETDATA_CONFIG_DIR}/${plugin}.conf" - -if [ -z "${hosts}" ] -then - fatal "no hosts configured in '${NETDATA_CONFIG_DIR}/${plugin}.conf' - nothing to do." -fi - -if [ -z "${fping}" -o ! -x "${fping}" ] -then - fatal "command '${fping}' is not found or is not executable - cannot proceed." -fi - -if [ ${ping_every} -lt 20 ] - then - warning "ping every was set to ${ping_every} but 20 is the minimum for non-root users. Setting it to 20 ms." - ping_every=20 -fi - -# the fping options we will use -options=( -N -l -Q ${update_every} -p ${ping_every} ${fping_opts} ${hosts} ) - -# execute fping -info "starting fping: ${fping} ${options[*]}" -exec "${fping}" "${options[@]}" - -# if we cannot execute fping, stop -fatal "command '${fping} ${options[@]}' failed to be executed." diff --git a/plugins.d/loopsleepms.sh.inc b/plugins.d/loopsleepms.sh.inc deleted file mode 100644 index ef3db192d..000000000 --- a/plugins.d/loopsleepms.sh.inc +++ /dev/null @@ -1,189 +0,0 @@ -# no need for shebang - this file is included from other scripts - -LOOPSLEEP_DATE="$(which date 2>/dev/null || command -v date 2>/dev/null)" -if [ -z "$LOOPSLEEP_DATE" ] - then - echo >&2 "$0: ERROR: Cannot find the command 'date' in the system path." - exit 1 -fi - -# ----------------------------------------------------------------------------- -# use the date command as a high resolution timer - -now_ms= -LOOPSLEEPMS_HIGHRES=1 -test "$($LOOPSLEEP_DATE +%N)" = "%N" && LOOPSLEEPMS_HIGHRES=0 -test -z "$($LOOPSLEEP_DATE +%N)" && LOOPSLEEPMS_HIGHRES=0 -current_time_ms_from_date() { - if [ $LOOPSLEEPMS_HIGHRES -eq 0 ] - then - now_ms="$($LOOPSLEEP_DATE +'%s')000" - else - now_ms="$(( $( $LOOPSLEEP_DATE +'%s * 1000 + %-N / 1000000' ) ))" - fi -} - -# ----------------------------------------------------------------------------- -# use /proc/uptime as a high resolution timer - -current_time_ms_from_date -current_time_ms_from_uptime_started="${now_ms}" -current_time_ms_from_uptime_last="${now_ms}" -current_time_ms_from_uptime_first=0 -current_time_ms_from_uptime() { - local up rest arr=() n - - read up rest </proc/uptime - if [ $? -ne 0 ] - then - echo >&2 "$0: Cannot read /proc/uptime - falling back to current_time_ms_from_date()." - current_time_ms="current_time_ms_from_date" - current_time_ms_from_date - current_time_ms_accuracy=1 - return - fi - - arr=(${up//./ }) - - if [ ${#arr[1]} -lt 1 ] - then - n="${arr[0]}000" - elif [ ${#arr[1]} -lt 2 ] - then - n="${arr[0]}${arr[1]}00" - elif [ ${#arr[1]} -lt 3 ] - then - n="${arr[0]}${arr[1]}0" - else - n="${arr[0]}${arr[1]}" - fi - - now_ms=$((current_time_ms_from_uptime_started - current_time_ms_from_uptime_first + n)) - - if [ "${now_ms}" -lt "${current_time_ms_from_uptime_last}" ] - then - echo >&2 "$0: Cannot use current_time_ms_from_uptime() - new time ${now_ms} is older than the last ${current_time_ms_from_uptime_last} - falling back to current_time_ms_from_date()." - current_time_ms="current_time_ms_from_date" - current_time_ms_from_date - current_time_ms_accuracy=1 - fi - - current_time_ms_from_uptime_last="${now_ms}" -} -current_time_ms_from_uptime -current_time_ms_from_uptime_first="$((now_ms - current_time_ms_from_uptime_started))" -current_time_ms_from_uptime_last="${current_time_ms_from_uptime_first}" -current_time_ms="current_time_ms_from_uptime" -current_time_ms_accuracy=10 -if [ "${current_time_ms_from_uptime_first}" -eq 0 ] - then - echo >&2 "$0: Invalid setup for current_time_ms_from_uptime() - falling back to current_time_ms_from_date()." - current_time_ms="current_time_ms_from_date" - current_time_ms_accuracy=1 -fi - -# ----------------------------------------------------------------------------- -# use read with timeout for sleep - -mysleep="mysleep_read" - -mysleep_fifo="${NETDATA_CACHE_DIR-/tmp}/.netdata_bash_sleep_timer_fifo" -[ ! -e "${mysleep_fifo}" ] && mkfifo "${mysleep_fifo}" -[ ! -e "${mysleep_fifo}" ] && mysleep="sleep" - -mysleep_read() { - read -t "${1}" <>"${mysleep_fifo}" - ret=$? - if [ $ret -le 128 ] - then - echo >&2 "$0: Cannot use read for sleeping (return code ${ret})." - mysleep="sleep" - ${mysleep} "${1}" - fi -} - - -# ----------------------------------------------------------------------------- -# this function is used to sleep a fraction of a second -# it calculates the difference between every time is called -# and tries to align the sleep time to give you exactly the -# loop you need. - -LOOPSLEEPMS_LASTRUN=0 -LOOPSLEEPMS_NEXTRUN=0 -LOOPSLEEPMS_LASTSLEEP=0 -LOOPSLEEPMS_LASTWORK=0 - -loopsleepms() { - local tellwork=0 t="${1}" div s m now mstosleep - - if [ "${t}" = "tellwork" ] - then - tellwork=1 - shift - t="${1}" - fi - - # $t = the time in seconds to wait - - # if high resolution is not supported - # just sleep the time requested, in seconds - if [ ${LOOPSLEEPMS_HIGHRES} -eq 0 ] - then - sleep ${t} - return - fi - - # get the current time, in ms in ${now_ms} - ${current_time_ms} - - # calculate ms since last run - [ ${LOOPSLEEPMS_LASTRUN} -gt 0 ] && \ - LOOPSLEEPMS_LASTWORK=$((now_ms - LOOPSLEEPMS_LASTRUN - LOOPSLEEPMS_LASTSLEEP + current_time_ms_accuracy)) - # echo "# last loop's work took $LOOPSLEEPMS_LASTWORK ms" - - # remember this run - LOOPSLEEPMS_LASTRUN=${now_ms} - - # calculate the next run - LOOPSLEEPMS_NEXTRUN=$(( ( now_ms - ( now_ms % ( t * 1000 ) ) ) + ( t * 1000 ) )) - - # calculate ms to sleep - mstosleep=$(( LOOPSLEEPMS_NEXTRUN - now_ms + current_time_ms_accuracy )) - # echo "# mstosleep is $mstosleep ms" - - # if we are too slow, sleep some time - test ${mstosleep} -lt 200 && mstosleep=200 - - s=$(( mstosleep / 1000 )) - m=$(( mstosleep - (s * 1000) )) - [ "${m}" -lt 100 ] && m="0${m}" - [ "${m}" -lt 10 ] && m="0${m}" - - test $tellwork -eq 1 && echo >&2 " >>> PERFORMANCE >>> WORK TOOK ${LOOPSLEEPMS_LASTWORK} ms ( $((LOOPSLEEPMS_LASTWORK * 100 / 1000)).$((LOOPSLEEPMS_LASTWORK % 10))% cpu ) >>> SLEEPING ${mstosleep} ms" - - # echo "# sleeping ${s}.${m}" - # echo - ${mysleep} ${s}.${m} - - # keep the values we need - # for our next run - LOOPSLEEPMS_LASTSLEEP=$mstosleep -} - -# test it -#while [ 1 ] -#do -# r=$(( (RANDOM * 2000 / 32767) )) -# s=$((r / 1000)) -# m=$((r - (s * 1000))) -# [ "${m}" -lt 100 ] && m="0${m}" -# [ "${m}" -lt 10 ] && m="0${m}" -# echo "${r} = ${s}.${m}" -# -# # the work -# ${mysleep} ${s}.${m} -# -# # the alignment loop -# loopsleepms tellwork 1 -#done diff --git a/plugins.d/node.d.plugin b/plugins.d/node.d.plugin deleted file mode 100755 index b16203912..000000000 --- a/plugins.d/node.d.plugin +++ /dev/null @@ -1,294 +0,0 @@ -#!/usr/bin/env bash -':' //; exec "$(command -v nodejs || command -v node || command -v js || echo "ERROR node.js IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@" - -// shebang hack from: -// http://unix.stackexchange.com/questions/65235/universal-node-js-shebang - -// Initially this is run as a shell script. -// Then, the second line, finds nodejs or node or js in the system path -// and executes it with the shell parameters. - -// netdata -// real-time performance and health monitoring, done right! -// (C) 2017 Costa Tsaousis <costa@tsaousis.gr> -// GPL v3+ - -// -------------------------------------------------------------------------------------------------------------------- - -'use strict'; - -// -------------------------------------------------------------------------------------------------------------------- -// get NETDATA environment variables - -var NETDATA_PLUGINS_DIR = process.env.NETDATA_PLUGINS_DIR || __dirname; -var NETDATA_CONFIG_DIR = process.env.NETDATA_CONFIG_DIR || __dirname + '/../../../../etc/netdata'; -var NETDATA_UPDATE_EVERY = process.env.NETDATA_UPDATE_EVERY || 1; -var NODE_D_DIR = NETDATA_PLUGINS_DIR + '/../node.d'; - -// make sure the modules are found -process.mainModule.paths.unshift(NODE_D_DIR + '/node_modules'); -process.mainModule.paths.unshift(NODE_D_DIR); - - -// -------------------------------------------------------------------------------------------------------------------- -// load required modules - -var fs = require('fs'); -var url = require('url'); -var util = require('util'); -var http = require('http'); -var path = require('path'); -var extend = require('extend'); -var netdata = require('netdata'); - - -// -------------------------------------------------------------------------------------------------------------------- -// configuration - -function pluginConfig(filename) { - var f = path.basename(filename); - - // node.d.plugin configuration - var m = f.match('.plugin' + '$'); - if(m !== null) - return netdata.options.paths.config + '/' + f.substring(0, m.index) + '.conf'; - - // node.d modules configuration - m = f.match('.node.js' + '$'); - if(m !== null) - return netdata.options.paths.config + '/node.d/' + f.substring(0, m.index) + '.conf'; - - return netdata.options.paths.config + '/node.d/' + f + '.conf'; -} - -// internal defaults -extend(true, netdata.options, { - filename: path.basename(__filename), - - update_every: NETDATA_UPDATE_EVERY, - - paths: { - plugins: NETDATA_PLUGINS_DIR, - config: NETDATA_CONFIG_DIR, - modules: [], - }, - - modules_enable_autodetect: true, - modules_enable_all: true, - modules: {}, -}); -netdata.options.config_filename = pluginConfig(__filename); - -// load configuration file -try { - netdata.options_loaded = JSON.parse(fs.readFileSync(netdata.options.config_filename, 'utf8')); - extend(true, netdata.options, netdata.options_loaded); - - if(!netdata.options.paths.plugins) - netdata.options.paths.plugins = NETDATA_PLUGINS_DIR; - - if(!netdata.options.paths.config) - netdata.options.paths.config = NETDATA_CONFIG_DIR; - - // console.error('merged netdata object:'); - // console.error(util.inspect(netdata, {depth: 10})); -} -catch(e) { - netdata.error('Cannot read configuration file ' + netdata.options.config_filename + ': ' + e.message + ', using internal defaults.'); - netdata.options_loaded = undefined; - dumpError(e); -} - - -// apply module paths to node.js process -function applyModulePaths() { - var len = netdata.options.paths.modules.length; - while(len--) - process.mainModule.paths.unshift(netdata.options.paths.modules[len]); -} -applyModulePaths(); - - -// -------------------------------------------------------------------------------------------------------------------- -// tracing - -function dumpError(err) { - if (typeof err === 'object') { - if (err.stack) { - netdata.debug(err.stack); - } - } -} - -// -------------------------------------------------------------------------------------------------------------------- -// get command line arguments -{ - var found_myself = false; - var found_number = false; - var found_modules = false; - process.argv.forEach(function (val, index, array) { - netdata.debug('PARAM: ' + val); - - if(!found_myself) { - if(val === __filename) - found_myself = true; - } - else { - switch(val) { - case 'debug': - netdata.options.DEBUG = true; - netdata.debug('DEBUG enabled'); - break; - - default: - if(found_number === true) { - if(found_modules === false) { - for(var i in netdata.options.modules) - netdata.options.modules[i].enabled = false; - } - - if(typeof netdata.options.modules[val] === 'undefined') - netdata.options.modules[val] = {}; - - netdata.options.modules[val].enabled = true; - netdata.options.modules_enable_all = false; - netdata.debug('enabled module ' + val); - } - else { - try { - var x = parseInt(val); - if(x > 0) { - netdata.options.update_every = x; - if(netdata.options.update_every < NETDATA_UPDATE_EVERY) { - netdata.options.update_every = NETDATA_UPDATE_EVERY; - netdata.debug('Update frequency ' + x + 's is too low'); - } - - found_number = true; - netdata.debug('Update frequency set to ' + netdata.options.update_every + ' seconds'); - } - else netdata.error('Ignoring parameter: ' + val); - } - catch(e) { - netdata.error('Cannot get value of parameter: ' + val); - dumpError(e); - } - } - break; - } - } - }); -} - -if(netdata.options.update_every < 1) { - netdata.debug('Adjusting update frequency to 1 second'); - netdata.options.update_every = 1; -} - -// -------------------------------------------------------------------------------------------------------------------- -// find modules - -function findModules() { - var found = 0; - - var files = fs.readdirSync(NODE_D_DIR); - var len = files.length; - while(len--) { - var m = files[len].match('.node.js' + '$'); - if(m !== null) { - var n = files[len].substring(0, m.index); - - if(typeof(netdata.options.modules[n]) === 'undefined') - netdata.options.modules[n] = { name: n, enabled: netdata.options.modules_enable_all }; - - if(netdata.options.modules[n].enabled === true) { - netdata.options.modules[n].name = n; - netdata.options.modules[n].filename = NODE_D_DIR + '/' + files[len]; - netdata.options.modules[n].loaded = false; - - if(typeof(netdata.options.modules[n].config_filename) !== 'string') - netdata.options.modules[n].config_filename = pluginConfig(files[len]); - - // load the module - try { - netdata.debug('loading module ' + netdata.options.modules[n].filename); - netdata.options.modules[n].module = require(netdata.options.modules[n].filename); - netdata.options.modules[n].module.name = n; - netdata.debug('loaded module ' + netdata.options.modules[n].name + ' from ' + netdata.options.modules[n].filename); - } - catch(e) { - netdata.options.modules[n].enabled = false; - netdata.error('Cannot load module: ' + netdata.options.modules[n].filename + ' exception: ' + e); - dumpError(e); - continue; - } - - // load its configuration - var c = { - enable_autodetect: netdata.options.modules_enable_autodetect, - update_every: netdata.options.update_every - }; - try { - netdata.debug('loading module\'s ' + netdata.options.modules[n].name + ' config ' + netdata.options.modules[n].config_filename); - var c2 = JSON.parse(fs.readFileSync(netdata.options.modules[n].config_filename, 'utf8')); - extend(true, c, c2); - netdata.debug('loaded module\'s ' + netdata.options.modules[n].name + ' config ' + netdata.options.modules[n].config_filename); - } - catch(e) { - netdata.error('Cannot load module\'s ' + netdata.options.modules[n].name + ' config from ' + netdata.options.modules[n].config_filename + ' exception: ' + e + ', using internal defaults.'); - dumpError(e); - } - - // call module auto-detection / configuration - try { - netdata.modules_configuring++; - netdata.debug('Configuring module ' + netdata.options.modules[n].name); - var serv = netdata.configure(netdata.options.modules[n].module, c, function() { - netdata.debug('Configured module ' + netdata.options.modules[n].name); - netdata.modules_configuring--; - }); - - netdata.debug('Configuring module ' + netdata.options.modules[n].name + ' reports ' + serv + ' eligible services.'); - } - catch(e) { - netdata.modules_configuring--; - netdata.options.modules[n].enabled = false; - netdata.error('Failed module auto-detection: ' + netdata.options.modules[n].name + ' exception: ' + e + ', disabling module.'); - dumpError(e); - continue; - } - - netdata.options.modules[n].loaded = true; - found++; - } - } - } - - // netdata.debug(netdata.options.modules); - return found; -} - -if(findModules() === 0) { - netdata.error('Cannot load any .node.js module from: ' + NODE_D_DIR); - netdata.disableNodePlugin(); - process.exit(1); -} - - -// -------------------------------------------------------------------------------------------------------------------- -// start - -function start_when_configuring_ends() { - if(netdata.modules_configuring > 0) { - netdata.debug('Waiting modules configuration, still running ' + netdata.modules_configuring); - setTimeout(start_when_configuring_ends, 500); - return; - } - - netdata.modules_configuring = 0; - netdata.start(); -} -start_when_configuring_ends(); - -//netdata.debug('netdata object:') -//netdata.debug(netdata); diff --git a/plugins.d/python.d.plugin b/plugins.d/python.d.plugin deleted file mode 100755 index c9b260164..000000000 --- a/plugins.d/python.d.plugin +++ /dev/null @@ -1,382 +0,0 @@ -#!/usr/bin/env bash -'''':; exec "$(command -v python || command -v python3 || command -v python2 || -echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@" # ''' - -# -*- coding: utf-8 -*- -# Description: -# Author: Pawel Krupa (paulfantom) -# Author: Ilya Mashchenko (l2isbad) - -import os -import sys -import threading - -from re import sub -from sys import version_info, argv -from time import sleep - -try: - from time import monotonic as time -except ImportError: - from time import time - -PY_VERSION = version_info[:2] -PLUGIN_CONFIG_DIR = os.getenv('NETDATA_CONFIG_DIR', os.path.dirname(__file__) + '/../../../../etc/netdata') + '/' -CHARTS_PY_DIR = os.path.abspath(os.getenv('NETDATA_PLUGINS_DIR', os.path.dirname(__file__)) + '/../python.d') + '/' -CHARTS_PY_CONFIG_DIR = PLUGIN_CONFIG_DIR + 'python.d/' -PYTHON_MODULES_DIR = CHARTS_PY_DIR + 'python_modules' - -sys.path.append(PYTHON_MODULES_DIR) - -from bases.loaders import ModuleAndConfigLoader -from bases.loggers import PythonDLogger -from bases.collection import setdefault_values, run_and_exit - -try: - from collections import OrderedDict -except ImportError: - from third_party.ordereddict import OrderedDict - -BASE_CONFIG = {'update_every': os.getenv('NETDATA_UPDATE_EVERY', 1), - 'retries': 60, - 'priority': 60000, - 'autodetection_retry': 0, - 'chart_cleanup': 10, - 'name': str()} - - -MODULE_EXTENSION = '.chart.py' -OBSOLETE_MODULES = ['apache_cache', 'gunicorn_log', 'nginx_log'] - - -def module_ok(m): - return m.endswith(MODULE_EXTENSION) and m[:-len(MODULE_EXTENSION)] not in OBSOLETE_MODULES - - -ALL_MODULES = [m for m in sorted(os.listdir(CHARTS_PY_DIR)) if module_ok(m)] - - -def parse_cmd(): - debug = 'debug' in argv[1:] - trace = 'trace' in argv[1:] - override_update_every = next((arg for arg in argv[1:] if arg.isdigit() and int(arg) > 1), False) - modules = [''.join([m, MODULE_EXTENSION]) for m in argv[1:] if ''.join([m, MODULE_EXTENSION]) in ALL_MODULES] - return debug, trace, override_update_every, modules or ALL_MODULES - - -def multi_job_check(config): - return next((True for key in config if isinstance(config[key], dict)), False) - - -class Job(object): - def __init__(self, initialized_job, job_id): - """ - :param initialized_job: instance of <Class Service> - :param job_id: <str> - """ - self.job = initialized_job - self.id = job_id # key in Modules.jobs() - self.module_name = self.job.__module__ # used in Plugin.delete_job() - self.recheck_every = self.job.configuration.pop('autodetection_retry') - self.checked = False # used in Plugin.check_job() - self.created = False # used in Plugin.create_job_charts() - if OVERRIDE_UPDATE_EVERY: - self.job.update_every = int(OVERRIDE_UPDATE_EVERY) - - def __getattr__(self, item): - return getattr(self.job, item) - - def __repr__(self): - return self.job.__repr__() - - def is_dead(self): - return bool(self.ident) and not self.is_alive() - - def not_launched(self): - return not bool(self.ident) - - def is_autodetect(self): - return self.recheck_every - - -class Module(object): - def __init__(self, service, config): - """ - :param service: <Module> - :param config: <dict> - """ - self.service = service - self.name = service.__name__ - self.config = self.jobs_configurations_builder(config) - self.jobs = OrderedDict() - self.counter = 1 - - self.initialize_jobs() - - def __repr__(self): - return "<Class Module '{name}'>".format(name=self.name) - - def __iter__(self): - return iter(OrderedDict(self.jobs).values()) - - def __getitem__(self, item): - return self.jobs[item] - - def __delitem__(self, key): - del self.jobs[key] - - def __len__(self): - return len(self.jobs) - - def __bool__(self): - return bool(self.jobs) - - def __nonzero__(self): - return self.__bool__() - - def jobs_configurations_builder(self, config): - """ - :param config: <dict> - :return: - """ - counter = 0 - job_base_config = dict() - - for attr in BASE_CONFIG: - job_base_config[attr] = config.pop(attr, getattr(self.service, attr, BASE_CONFIG[attr])) - - if not config: - config = {str(): dict()} - elif not multi_job_check(config): - config = {str(): config} - - for job_name in config: - if not isinstance(config[job_name], dict): - continue - - job_config = setdefault_values(config[job_name], base_dict=job_base_config) - job_name = sub(r'\s+', '_', job_name) - config[job_name]['name'] = sub(r'\s+', '_', config[job_name]['name']) - counter += 1 - job_id = 'job' + str(counter).zfill(3) - - yield job_id, job_name, job_config - - def initialize_jobs(self): - """ - :return: - """ - for job_id, job_name, job_config in self.config: - job_config['job_name'] = job_name - job_config['override_name'] = job_config.pop('name') - - try: - initialized_job = self.service.Service(configuration=job_config) - except Exception as error: - Logger.error("job initialization: '{module_name} {job_name}' " - "=> ['FAILED'] ({error})".format(module_name=self.name, - job_name=job_name, - error=error)) - continue - else: - Logger.debug("job initialization: '{module_name} {job_name}' " - "=> ['OK']".format(module_name=self.name, - job_name=job_name or self.name)) - self.jobs[job_id] = Job(initialized_job=initialized_job, - job_id=job_id) - del self.config - del self.service - - -class Plugin(object): - def __init__(self): - self.loader = ModuleAndConfigLoader() - self.modules = OrderedDict() - self.sleep_time = 1 - self.runs_counter = 0 - self.config, error = self.loader.load_config_from_file(PLUGIN_CONFIG_DIR + 'python.d.conf') - if error: - Logger.error('"python.d.conf" configuration file not found. Using defaults.') - - if not self.config.get('enabled', True): - run_and_exit(Logger.info)('DISABLED in configuration file.') - - self.load_and_initialize_modules() - if not self.modules: - run_and_exit(Logger.info)('No modules to run. Exit...') - - def __iter__(self): - return iter(OrderedDict(self.modules).values()) - - @property - def jobs(self): - return (job for mod in self for job in mod) - - @property - def dead_jobs(self): - return (job for job in self.jobs if job.is_dead()) - - @property - def autodetect_jobs(self): - return [job for job in self.jobs if job.not_launched()] - - def enabled_modules(self): - for mod in MODULES_TO_RUN: - mod_name = mod[:-len(MODULE_EXTENSION)] - mod_path = CHARTS_PY_DIR + mod - conf_path = ''.join([CHARTS_PY_CONFIG_DIR, mod_name, '.conf']) - - if DEBUG: - yield mod, mod_name, mod_path, conf_path - else: - if all([self.config.get('default_run', True), - self.config.get(mod_name, True)]): - yield mod, mod_name, mod_path, conf_path - - elif all([not self.config.get('default_run'), - self.config.get(mod_name)]): - yield mod, mod_name, mod_path, conf_path - - def load_and_initialize_modules(self): - for mod, mod_name, mod_path, conf_path in self.enabled_modules(): - - # Load module from file ------------------------------------------------------------ - loaded_module, error = self.loader.load_module_from_file(mod_name, mod_path) - log = Logger.error if error else Logger.debug - log("module load source: '{module_name}' => [{status}]".format(status='FAILED' if error else 'OK', - module_name=mod_name)) - if error: - Logger.error("load source error : {0}".format(error)) - continue - - # Load module config from file ------------------------------------------------------ - loaded_config, error = self.loader.load_config_from_file(conf_path) - log = Logger.error if error else Logger.debug - log("module load config: '{module_name}' => [{status}]".format(status='FAILED' if error else 'OK', - module_name=mod_name)) - if error: - Logger.error('load config error : {0}'.format(error)) - - # Service instance initialization --------------------------------------------------- - initialized_module = Module(service=loaded_module, config=loaded_config) - Logger.debug("module status: '{module_name}' => [{status}] " - "(jobs: {jobs_number})".format(status='OK' if initialized_module else 'FAILED', - module_name=initialized_module.name, - jobs_number=len(initialized_module))) - - if initialized_module: - self.modules[initialized_module.name] = initialized_module - - @staticmethod - def check_job(job): - """ - :param job: <Job> - :return: - """ - try: - check_ok = bool(job.check()) - except Exception as error: - job.error('check() unhandled exception: {error}'.format(error=error)) - return None - else: - return check_ok - - @staticmethod - def create_job_charts(job): - """ - :param job: <Job> - :return: - """ - try: - create_ok = job.create() - except Exception as error: - job.error('create() unhandled exception: {error}'.format(error=error)) - return False - else: - return create_ok - - def delete_job(self, job): - """ - :param job: <Job> - :return: - """ - del self.modules[job.module_name][job.id] - - def run_check(self): - checked = list() - for job in self.jobs: - if job.name in checked: - job.info('check() => [DROPPED] (already served by another job)') - self.delete_job(job) - continue - ok = self.check_job(job) - if ok: - job.info('check() => [OK]') - checked.append(job.name) - job.checked = True - continue - if not job.is_autodetect() or ok is None: - job.info('check() => [FAILED]') - self.delete_job(job) - else: - job.info('check() => [RECHECK] (autodetection_retry: {0})'.format(job.recheck_every)) - - def run_create(self): - for job in self.jobs: - if not job.checked: - # skip autodetection_retry jobs - continue - ok = self.create_job_charts(job) - if ok: - job.debug('create() => [OK] (charts: {0})'.format(len(job.charts))) - job.created = True - continue - job.error('create() => [FAILED] (charts: {0})'.format(len(job.charts))) - self.delete_job(job) - - def start(self): - self.run_check() - self.run_create() - for job in self.jobs: - if job.created: - job.start() - - while True: - if threading.active_count() <= 1 and not self.autodetect_jobs: - run_and_exit(Logger.info)('FINISHED') - - sleep(self.sleep_time) - self.cleanup() - self.autodetect_retry() - - def cleanup(self): - for job in self.dead_jobs: - self.delete_job(job) - for mod in self: - if not mod: - del self.modules[mod.name] - - def autodetect_retry(self): - self.runs_counter += self.sleep_time - for job in self.autodetect_jobs: - if self.runs_counter % job.recheck_every == 0: - checked = self.check_job(job) - if checked: - created = self.create_job_charts(job) - if not created: - self.delete_job(job) - continue - job.start() - - -if __name__ == '__main__': - DEBUG, TRACE, OVERRIDE_UPDATE_EVERY, MODULES_TO_RUN = parse_cmd() - Logger = PythonDLogger() - if DEBUG: - Logger.logger.severity = 'DEBUG' - if TRACE: - Logger.log_traceback = True - Logger.info('Using python {version}'.format(version=PY_VERSION[0])) - - plugin = Plugin() - plugin.start() diff --git a/plugins.d/tc-qos-helper.sh b/plugins.d/tc-qos-helper.sh deleted file mode 100755 index 9153f22e2..000000000 --- a/plugins.d/tc-qos-helper.sh +++ /dev/null @@ -1,303 +0,0 @@ -#!/usr/bin/env bash - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2017 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ -# -# This script is a helper to allow netdata collect tc data. -# tc output parsing has been implemented in C, inside netdata -# This script allows setting names to dimensions. - -export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin" -export LC_ALL=C - - -# ----------------------------------------------------------------------------- -# find /var/run/fireqos - -# the default -fireqos_run_dir="/var/run/fireqos" - -function realdir { - local r="$1" - local t=$(readlink "$r") - - while [ "$t" ] - do - r=$(cd $(dirname "$r") && cd $(dirname "$t") && pwd -P)/$(basename "$t") - t=$(readlink "$r") - done - - dirname "$r" -} - -if [ ! -d "${fireqos_run_dir}" ] - then - - # the fireqos executable - we will use it to find its config - fireqos="$(which fireqos 2>/dev/null || command -v fireqos 2>/dev/null)" - - if [ ! -z "${fireqos}" ] - then - - fireqos_exec_dir="$(realdir ${fireqos})" - - if [ ! -z "${fireqos_exec_dir}" -a "${fireqos_exec_dir}" != "." -a -f "${fireqos_exec_dir}/install.config" ] - then - - LOCALSTATEDIR= - source "${fireqos_exec_dir}/install.config" - - if [ -d "${LOCALSTATEDIR}/run/fireqos" ] - then - fireqos_run_dir="${LOCALSTATEDIR}/run/fireqos" - fi - fi - fi -fi - -# ----------------------------------------------------------------------------- -# logging functions - -PROGRAM_FILE="$0" -PROGRAM_NAME="$(basename $0)" -PROGRAM_NAME="${PROGRAM_NAME/.plugin}" - -logdate() { - date "+%Y-%m-%d %H:%M:%S" -} - -log() { - local status="${1}" - shift - - echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" - -} - -warning() { - log WARNING "${@}" -} - -error() { - log ERROR "${@}" -} - -info() { - log INFO "${@}" -} - -fatal() { - log FATAL "${@}" - exit 1 -} - -debug=0 -debug() { - [ $debug -eq 1 ] && log DEBUG "${@}" -} - - -# ----------------------------------------------------------------------------- - -[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")" -[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata" - -plugins_dir="${NETDATA_PLUGINS_DIR}" -config_dir="${NETDATA_CONFIG_DIR}" -tc="$(which tc 2>/dev/null || command -v tc 2>/dev/null)" - - -# ----------------------------------------------------------------------------- -# user configuration - -# time in seconds to refresh QoS class/qdisc names -qos_get_class_names_every=120 - -# time in seconds to exit - netdata will restart the script -qos_exit_every=3600 - -# what to use? classes or qdiscs? -tc_show="qdisc" # can also be "class" - - -# ----------------------------------------------------------------------------- -# check if we have a valid number for interval - -t=${1} -update_every=$((t)) -[ $((update_every)) -lt 1 ] && update_every=${NETDATA_UPDATE_EVERY} -[ $((update_every)) -lt 1 ] && update_every=1 - - -# ----------------------------------------------------------------------------- -# allow the user to override our defaults - -if [ -f "${config_dir}/tc-qos-helper.conf" ] - then - source "${config_dir}/tc-qos-helper.conf" -fi - -case "${tc_show}" in - qdisc|class) - ;; - - *) - error "tc_show variable can be either 'qdisc' or 'class' but is set to '${tc_show}'. Assuming it is 'qdisc'." - tc_show="qdisc" - ;; -esac - - -# ----------------------------------------------------------------------------- -# default sleep function - -LOOPSLEEPMS_LASTWORK=0 -loopsleepms() { - sleep $1 -} - -# if found and included, this file overwrites loopsleepms() -# with a high resolution timer function for precise looping. -. "${plugins_dir}/loopsleepms.sh.inc" - - -# ----------------------------------------------------------------------------- -# final checks we can run - -if [ -z "${tc}" -o ! -x "${tc}" ] - then - fatal "cannot find command 'tc' in this system." -fi - -tc_devices= -fix_names= - -# ----------------------------------------------------------------------------- - -setclassname() { - if [ "${tc_show}" = "qdisc" ] - then - echo "SETCLASSNAME $4 $2" - else - echo "SETCLASSNAME $3 $2" - fi -} - -show_tc_cls() { - [ "${tc_show}" = "qdisc" ] && return 1 - - local x="${1}" - - if [ -f /etc/iproute2/tc_cls ] - then - local classid name rest - while read classid name rest - do - [ -z "${classid}" -o -z "${name}" -o "${classid}" = "#" -o "${name}" = "#" -o "${classid:0:1}" = "#" -o "${name:0:1}" = "#" ] && continue - setclassname "" "${name}" "${classid}" - done </etc/iproute2/tc_cls - return 0 - fi - return 1 -} - -show_fireqos_names() { - local x="${1}" name n interface_dev interface_classes interface_classes_monitor - - if [ -f "${fireqos_run_dir}/ifaces/${x}" ] - then - name="$(<"${fireqos_run_dir}/ifaces/${x}")" - echo "SETDEVICENAME ${name}" - - interface_dev= - interface_classes= - interface_classes_monitor= - source "${fireqos_run_dir}/${name}.conf" - for n in ${interface_classes_monitor} - do - setclassname ${n//|/ } - done - [ ! -z "${interface_dev}" ] && echo "SETDEVICEGROUP ${interface_dev}" - - return 0 - fi - - return 1 -} - -show_tc() { - local x="${1}" - - echo "BEGIN ${x}" - - # netdata can parse the output of tc - ${tc} -s ${tc_show} show dev ${x} - - # check FireQOS names for classes - if [ ! -z "${fix_names}" ] - then - show_fireqos_names "${x}" || show_tc_cls "${x}" - fi - - echo "END ${x}" -} - -find_tc_devices() { - local count=0 devs= dev rest l - - # find all the devices in the system - # without forking - while IFS=":| " read dev rest - do - count=$((count + 1)) - [ ${count} -le 2 ] && continue - devs="${devs} ${dev}" - done </proc/net/dev - - # from all the devices find the ones - # that have QoS defined - # unfortunately, one fork per device cannot be avoided - tc_devices= - for dev in ${devs} - do - l="$(${tc} class show dev ${dev} 2>/dev/null)" - [ ! -z "${l}" ] && tc_devices="${tc_devices} ${dev}" - done -} - -# update devices and class names -# once every 2 minutes -names_every=$((qos_get_class_names_every / update_every)) - -# exit this script every hour -# it will be restarted automatically -exit_after=$((qos_exit_every / update_every)) - -c=0 -gc=0 -while [ 1 ] -do - fix_names= - c=$((c + 1)) - gc=$((gc + 1)) - - if [ ${c} -le 1 -o ${c} -ge ${names_every} ] - then - c=1 - fix_names="YES" - find_tc_devices - fi - - for d in ${tc_devices} - do - show_tc ${d} - done - - echo "WORKTIME ${LOOPSLEEPMS_LASTWORK}" - - loopsleepms ${update_every} - - [ ${gc} -gt ${exit_after} ] && exit 0 -done diff --git a/python.d/Makefile.am b/python.d/Makefile.am deleted file mode 100644 index a5fcc7394..000000000 --- a/python.d/Makefile.am +++ /dev/null @@ -1,204 +0,0 @@ -MAINTAINERCLEANFILES= $(srcdir)/Makefile.in -CLEANFILES = \ - python-modules-installer.sh \ - $(NULL) - -include $(top_srcdir)/build/subst.inc - -SUFFIXES = .in - -dist_python_SCRIPTS = \ - python-modules-installer.sh \ - $(NULL) - -dist_python_DATA = \ - README.md \ - apache.chart.py \ - beanstalk.chart.py \ - bind_rndc.chart.py \ - ceph.chart.py \ - chrony.chart.py \ - couchdb.chart.py \ - cpufreq.chart.py \ - cpuidle.chart.py \ - dns_query_time.chart.py \ - dnsdist.chart.py \ - dovecot.chart.py \ - elasticsearch.chart.py \ - example.chart.py \ - exim.chart.py \ - fail2ban.chart.py \ - freeradius.chart.py \ - go_expvar.chart.py \ - haproxy.chart.py \ - hddtemp.chart.py \ - httpcheck.chart.py \ - icecast.chart.py \ - ipfs.chart.py \ - isc_dhcpd.chart.py \ - mdstat.chart.py \ - memcached.chart.py \ - mongodb.chart.py \ - mysql.chart.py \ - nginx.chart.py \ - nginx_plus.chart.py \ - nsd.chart.py \ - ntpd.chart.py \ - ovpn_status_log.chart.py \ - phpfpm.chart.py \ - portcheck.chart.py \ - postfix.chart.py \ - postgres.chart.py \ - powerdns.chart.py \ - rabbitmq.chart.py \ - redis.chart.py \ - retroshare.chart.py \ - samba.chart.py \ - sensors.chart.py \ - springboot.chart.py \ - squid.chart.py \ - smartd_log.chart.py \ - tomcat.chart.py \ - traefik.chart.py \ - varnish.chart.py \ - web_log.chart.py \ - $(NULL) - -pythonmodulesdir=$(pythondir)/python_modules -dist_pythonmodules_DATA = \ - python_modules/__init__.py \ - python_modules/base.py \ - $(NULL) - -basesdir=$(pythonmodulesdir)/bases -dist_bases_DATA = \ - python_modules/bases/__init__.py \ - python_modules/bases/charts.py \ - python_modules/bases/collection.py \ - python_modules/bases/loaders.py \ - python_modules/bases/loggers.py \ - $(NULL) - -bases_framework_servicesdir=$(basesdir)/FrameworkServices -dist_bases_framework_services_DATA = \ - python_modules/bases/FrameworkServices/__init__.py \ - python_modules/bases/FrameworkServices/ExecutableService.py \ - python_modules/bases/FrameworkServices/LogService.py \ - python_modules/bases/FrameworkServices/MySQLService.py \ - python_modules/bases/FrameworkServices/SimpleService.py \ - python_modules/bases/FrameworkServices/SocketService.py \ - python_modules/bases/FrameworkServices/UrlService.py \ - $(NULL) - -third_partydir=$(pythonmodulesdir)/third_party -dist_third_party_DATA = \ - python_modules/third_party/__init__.py \ - python_modules/third_party/ordereddict.py \ - python_modules/third_party/lm_sensors.py \ - $(NULL) - -pythonyaml2dir=$(pythonmodulesdir)/pyyaml2 -dist_pythonyaml2_DATA = \ - python_modules/pyyaml2/__init__.py \ - python_modules/pyyaml2/composer.py \ - python_modules/pyyaml2/constructor.py \ - python_modules/pyyaml2/cyaml.py \ - python_modules/pyyaml2/dumper.py \ - python_modules/pyyaml2/emitter.py \ - python_modules/pyyaml2/error.py \ - python_modules/pyyaml2/events.py \ - python_modules/pyyaml2/loader.py \ - python_modules/pyyaml2/nodes.py \ - python_modules/pyyaml2/parser.py \ - python_modules/pyyaml2/reader.py \ - python_modules/pyyaml2/representer.py \ - python_modules/pyyaml2/resolver.py \ - python_modules/pyyaml2/scanner.py \ - python_modules/pyyaml2/serializer.py \ - python_modules/pyyaml2/tokens.py \ - $(NULL) - -pythonyaml3dir=$(pythonmodulesdir)/pyyaml3 -dist_pythonyaml3_DATA = \ - python_modules/pyyaml3/__init__.py \ - python_modules/pyyaml3/composer.py \ - python_modules/pyyaml3/constructor.py \ - python_modules/pyyaml3/cyaml.py \ - python_modules/pyyaml3/dumper.py \ - python_modules/pyyaml3/emitter.py \ - python_modules/pyyaml3/error.py \ - python_modules/pyyaml3/events.py \ - python_modules/pyyaml3/loader.py \ - python_modules/pyyaml3/nodes.py \ - python_modules/pyyaml3/parser.py \ - python_modules/pyyaml3/reader.py \ - python_modules/pyyaml3/representer.py \ - python_modules/pyyaml3/resolver.py \ - python_modules/pyyaml3/scanner.py \ - python_modules/pyyaml3/serializer.py \ - python_modules/pyyaml3/tokens.py \ - $(NULL) - -python_urllib3dir=$(pythonmodulesdir)/urllib3 -dist_python_urllib3_DATA = \ - python_modules/urllib3/__init__.py \ - python_modules/urllib3/_collections.py \ - python_modules/urllib3/connection.py \ - python_modules/urllib3/connectionpool.py \ - python_modules/urllib3/exceptions.py \ - python_modules/urllib3/fields.py \ - python_modules/urllib3/filepost.py \ - python_modules/urllib3/response.py \ - python_modules/urllib3/poolmanager.py \ - python_modules/urllib3/request.py \ - $(NULL) - -python_urllib3_utildir=$(python_urllib3dir)/util -dist_python_urllib3_util_DATA = \ - python_modules/urllib3/util/__init__.py \ - python_modules/urllib3/util/connection.py \ - python_modules/urllib3/util/request.py \ - python_modules/urllib3/util/response.py \ - python_modules/urllib3/util/retry.py \ - python_modules/urllib3/util/selectors.py \ - python_modules/urllib3/util/ssl_.py \ - python_modules/urllib3/util/timeout.py \ - python_modules/urllib3/util/url.py \ - python_modules/urllib3/util/wait.py \ - $(NULL) - -python_urllib3_packagesdir=$(python_urllib3dir)/packages -dist_python_urllib3_packages_DATA = \ - python_modules/urllib3/packages/__init__.py \ - python_modules/urllib3/packages/ordered_dict.py \ - python_modules/urllib3/packages/six.py \ - $(NULL) - -python_urllib3_backportsdir=$(python_urllib3_packagesdir)/backports -dist_python_urllib3_backports_DATA = \ - python_modules/urllib3/packages/backports/__init__.py \ - python_modules/urllib3/packages/backports/makefile.py \ - $(NULL) - -python_urllib3_ssl_match_hostnamedir=$(python_urllib3_packagesdir)/ssl_match_hostname -dist_python_urllib3_ssl_match_hostname_DATA = \ - python_modules/urllib3/packages/ssl_match_hostname/__init__.py \ - python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \ - $(NULL) - -python_urllib3_contribdir=$(python_urllib3dir)/contrib -dist_python_urllib3_contrib_DATA = \ - python_modules/urllib3/contrib/__init__.py \ - python_modules/urllib3/contrib/appengine.py \ - python_modules/urllib3/contrib/ntlmpool.py \ - python_modules/urllib3/contrib/pyopenssl.py \ - python_modules/urllib3/contrib/securetransport.py \ - python_modules/urllib3/contrib/socks.py \ - $(NULL) - -python_urllib3_securetransportdir=$(python_urllib3_contribdir)/_securetransport -dist_python_urllib3_securetransport_DATA = \ - python_modules/urllib3/contrib/_securetransport/__init__.py \ - python_modules/urllib3/contrib/_securetransport/bindings.py \ - python_modules/urllib3/contrib/_securetransport/low_level.py \ - $(NULL) diff --git a/python.d/Makefile.in b/python.d/Makefile.in deleted file mode 100644 index d6e11d0cf..000000000 --- a/python.d/Makefile.in +++ /dev/null @@ -1,1107 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \ - $(srcdir)/Makefile.am $(dist_python_SCRIPTS) \ - $(dist_bases_DATA) $(dist_bases_framework_services_DATA) \ - $(dist_python_DATA) $(dist_python_urllib3_DATA) \ - $(dist_python_urllib3_backports_DATA) \ - $(dist_python_urllib3_contrib_DATA) \ - $(dist_python_urllib3_packages_DATA) \ - $(dist_python_urllib3_securetransport_DATA) \ - $(dist_python_urllib3_ssl_match_hostname_DATA) \ - $(dist_python_urllib3_util_DATA) $(dist_pythonmodules_DATA) \ - $(dist_pythonyaml2_DATA) $(dist_pythonyaml3_DATA) \ - $(dist_third_party_DATA) -subdir = python.d -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \ - $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \ - $(top_srcdir)/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \ - $(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(basesdir)" \ - "$(DESTDIR)$(bases_framework_servicesdir)" \ - "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(python_urllib3dir)" \ - "$(DESTDIR)$(python_urllib3_backportsdir)" \ - "$(DESTDIR)$(python_urllib3_contribdir)" \ - "$(DESTDIR)$(python_urllib3_packagesdir)" \ - "$(DESTDIR)$(python_urllib3_securetransportdir)" \ - "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" \ - "$(DESTDIR)$(python_urllib3_utildir)" \ - "$(DESTDIR)$(pythonmodulesdir)" "$(DESTDIR)$(pythonyaml2dir)" \ - "$(DESTDIR)$(pythonyaml3dir)" "$(DESTDIR)$(third_partydir)" -SCRIPTS = $(dist_python_SCRIPTS) -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_bases_DATA) $(dist_bases_framework_services_DATA) \ - $(dist_python_DATA) $(dist_python_urllib3_DATA) \ - $(dist_python_urllib3_backports_DATA) \ - $(dist_python_urllib3_contrib_DATA) \ - $(dist_python_urllib3_packages_DATA) \ - $(dist_python_urllib3_securetransport_DATA) \ - $(dist_python_urllib3_ssl_match_hostname_DATA) \ - $(dist_python_urllib3_util_DATA) $(dist_pythonmodules_DATA) \ - $(dist_pythonyaml2_DATA) $(dist_pythonyaml3_DATA) \ - $(dist_third_party_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -CLEANFILES = \ - python-modules-installer.sh \ - $(NULL) - -SUFFIXES = .in -dist_python_SCRIPTS = \ - python-modules-installer.sh \ - $(NULL) - -dist_python_DATA = \ - README.md \ - apache.chart.py \ - beanstalk.chart.py \ - bind_rndc.chart.py \ - ceph.chart.py \ - chrony.chart.py \ - couchdb.chart.py \ - cpufreq.chart.py \ - cpuidle.chart.py \ - dns_query_time.chart.py \ - dnsdist.chart.py \ - dovecot.chart.py \ - elasticsearch.chart.py \ - example.chart.py \ - exim.chart.py \ - fail2ban.chart.py \ - freeradius.chart.py \ - go_expvar.chart.py \ - haproxy.chart.py \ - hddtemp.chart.py \ - httpcheck.chart.py \ - icecast.chart.py \ - ipfs.chart.py \ - isc_dhcpd.chart.py \ - mdstat.chart.py \ - memcached.chart.py \ - mongodb.chart.py \ - mysql.chart.py \ - nginx.chart.py \ - nginx_plus.chart.py \ - nsd.chart.py \ - ntpd.chart.py \ - ovpn_status_log.chart.py \ - phpfpm.chart.py \ - portcheck.chart.py \ - postfix.chart.py \ - postgres.chart.py \ - powerdns.chart.py \ - rabbitmq.chart.py \ - redis.chart.py \ - retroshare.chart.py \ - samba.chart.py \ - sensors.chart.py \ - springboot.chart.py \ - squid.chart.py \ - smartd_log.chart.py \ - tomcat.chart.py \ - traefik.chart.py \ - varnish.chart.py \ - web_log.chart.py \ - $(NULL) - -pythonmodulesdir = $(pythondir)/python_modules -dist_pythonmodules_DATA = \ - python_modules/__init__.py \ - python_modules/base.py \ - $(NULL) - -basesdir = $(pythonmodulesdir)/bases -dist_bases_DATA = \ - python_modules/bases/__init__.py \ - python_modules/bases/charts.py \ - python_modules/bases/collection.py \ - python_modules/bases/loaders.py \ - python_modules/bases/loggers.py \ - $(NULL) - -bases_framework_servicesdir = $(basesdir)/FrameworkServices -dist_bases_framework_services_DATA = \ - python_modules/bases/FrameworkServices/__init__.py \ - python_modules/bases/FrameworkServices/ExecutableService.py \ - python_modules/bases/FrameworkServices/LogService.py \ - python_modules/bases/FrameworkServices/MySQLService.py \ - python_modules/bases/FrameworkServices/SimpleService.py \ - python_modules/bases/FrameworkServices/SocketService.py \ - python_modules/bases/FrameworkServices/UrlService.py \ - $(NULL) - -third_partydir = $(pythonmodulesdir)/third_party -dist_third_party_DATA = \ - python_modules/third_party/__init__.py \ - python_modules/third_party/ordereddict.py \ - python_modules/third_party/lm_sensors.py \ - $(NULL) - -pythonyaml2dir = $(pythonmodulesdir)/pyyaml2 -dist_pythonyaml2_DATA = \ - python_modules/pyyaml2/__init__.py \ - python_modules/pyyaml2/composer.py \ - python_modules/pyyaml2/constructor.py \ - python_modules/pyyaml2/cyaml.py \ - python_modules/pyyaml2/dumper.py \ - python_modules/pyyaml2/emitter.py \ - python_modules/pyyaml2/error.py \ - python_modules/pyyaml2/events.py \ - python_modules/pyyaml2/loader.py \ - python_modules/pyyaml2/nodes.py \ - python_modules/pyyaml2/parser.py \ - python_modules/pyyaml2/reader.py \ - python_modules/pyyaml2/representer.py \ - python_modules/pyyaml2/resolver.py \ - python_modules/pyyaml2/scanner.py \ - python_modules/pyyaml2/serializer.py \ - python_modules/pyyaml2/tokens.py \ - $(NULL) - -pythonyaml3dir = $(pythonmodulesdir)/pyyaml3 -dist_pythonyaml3_DATA = \ - python_modules/pyyaml3/__init__.py \ - python_modules/pyyaml3/composer.py \ - python_modules/pyyaml3/constructor.py \ - python_modules/pyyaml3/cyaml.py \ - python_modules/pyyaml3/dumper.py \ - python_modules/pyyaml3/emitter.py \ - python_modules/pyyaml3/error.py \ - python_modules/pyyaml3/events.py \ - python_modules/pyyaml3/loader.py \ - python_modules/pyyaml3/nodes.py \ - python_modules/pyyaml3/parser.py \ - python_modules/pyyaml3/reader.py \ - python_modules/pyyaml3/representer.py \ - python_modules/pyyaml3/resolver.py \ - python_modules/pyyaml3/scanner.py \ - python_modules/pyyaml3/serializer.py \ - python_modules/pyyaml3/tokens.py \ - $(NULL) - -python_urllib3dir = $(pythonmodulesdir)/urllib3 -dist_python_urllib3_DATA = \ - python_modules/urllib3/__init__.py \ - python_modules/urllib3/_collections.py \ - python_modules/urllib3/connection.py \ - python_modules/urllib3/connectionpool.py \ - python_modules/urllib3/exceptions.py \ - python_modules/urllib3/fields.py \ - python_modules/urllib3/filepost.py \ - python_modules/urllib3/response.py \ - python_modules/urllib3/poolmanager.py \ - python_modules/urllib3/request.py \ - $(NULL) - -python_urllib3_utildir = $(python_urllib3dir)/util -dist_python_urllib3_util_DATA = \ - python_modules/urllib3/util/__init__.py \ - python_modules/urllib3/util/connection.py \ - python_modules/urllib3/util/request.py \ - python_modules/urllib3/util/response.py \ - python_modules/urllib3/util/retry.py \ - python_modules/urllib3/util/selectors.py \ - python_modules/urllib3/util/ssl_.py \ - python_modules/urllib3/util/timeout.py \ - python_modules/urllib3/util/url.py \ - python_modules/urllib3/util/wait.py \ - $(NULL) - -python_urllib3_packagesdir = $(python_urllib3dir)/packages -dist_python_urllib3_packages_DATA = \ - python_modules/urllib3/packages/__init__.py \ - python_modules/urllib3/packages/ordered_dict.py \ - python_modules/urllib3/packages/six.py \ - $(NULL) - -python_urllib3_backportsdir = $(python_urllib3_packagesdir)/backports -dist_python_urllib3_backports_DATA = \ - python_modules/urllib3/packages/backports/__init__.py \ - python_modules/urllib3/packages/backports/makefile.py \ - $(NULL) - -python_urllib3_ssl_match_hostnamedir = $(python_urllib3_packagesdir)/ssl_match_hostname -dist_python_urllib3_ssl_match_hostname_DATA = \ - python_modules/urllib3/packages/ssl_match_hostname/__init__.py \ - python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \ - $(NULL) - -python_urllib3_contribdir = $(python_urllib3dir)/contrib -dist_python_urllib3_contrib_DATA = \ - python_modules/urllib3/contrib/__init__.py \ - python_modules/urllib3/contrib/appengine.py \ - python_modules/urllib3/contrib/ntlmpool.py \ - python_modules/urllib3/contrib/pyopenssl.py \ - python_modules/urllib3/contrib/securetransport.py \ - python_modules/urllib3/contrib/socks.py \ - $(NULL) - -python_urllib3_securetransportdir = $(python_urllib3_contribdir)/_securetransport -dist_python_urllib3_securetransport_DATA = \ - python_modules/urllib3/contrib/_securetransport/__init__.py \ - python_modules/urllib3/contrib/_securetransport/bindings.py \ - python_modules/urllib3/contrib/_securetransport/low_level.py \ - $(NULL) - -all: all-am - -.SUFFIXES: -.SUFFIXES: .in -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu python.d/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu python.d/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; -$(top_srcdir)/build/subst.inc: - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -install-dist_pythonSCRIPTS: $(dist_python_SCRIPTS) - @$(NORMAL_INSTALL) - @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ - done | \ - sed -e 'p;s,.*/,,;n' \ - -e 'h;s|.*|.|' \ - -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ - $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ - { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ - if ($$2 == $$4) { files[d] = files[d] " " $$1; \ - if (++n[d] == $(am__install_max)) { \ - print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ - else { print "f", d "/" $$4, $$1 } } \ - END { for (d in files) print "f", d, files[d] }' | \ - while read type dir files; do \ - if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ - test -z "$$files" || { \ - echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pythondir)$$dir'"; \ - $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pythondir)$$dir" || exit $$?; \ - } \ - ; done - -uninstall-dist_pythonSCRIPTS: - @$(NORMAL_UNINSTALL) - @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || exit 0; \ - files=`for p in $$list; do echo "$$p"; done | \ - sed -e 's,.*/,,;$(transform)'`; \ - dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir) -install-dist_basesDATA: $(dist_bases_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(basesdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(basesdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(basesdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(basesdir)" || exit $$?; \ - done - -uninstall-dist_basesDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(basesdir)'; $(am__uninstall_files_from_dir) -install-dist_bases_framework_servicesDATA: $(dist_bases_framework_services_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(bases_framework_servicesdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(bases_framework_servicesdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(bases_framework_servicesdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(bases_framework_servicesdir)" || exit $$?; \ - done - -uninstall-dist_bases_framework_servicesDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(bases_framework_servicesdir)'; $(am__uninstall_files_from_dir) -install-dist_pythonDATA: $(dist_python_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythondir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(pythondir)" || exit $$?; \ - done - -uninstall-dist_pythonDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir) -install-dist_python_urllib3DATA: $(dist_python_urllib3_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3dir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(python_urllib3dir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3dir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3dir)" || exit $$?; \ - done - -uninstall-dist_python_urllib3DATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(python_urllib3dir)'; $(am__uninstall_files_from_dir) -install-dist_python_urllib3_backportsDATA: $(dist_python_urllib3_backports_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_backportsdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(python_urllib3_backportsdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_backportsdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_backportsdir)" || exit $$?; \ - done - -uninstall-dist_python_urllib3_backportsDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(python_urllib3_backportsdir)'; $(am__uninstall_files_from_dir) -install-dist_python_urllib3_contribDATA: $(dist_python_urllib3_contrib_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_contribdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(python_urllib3_contribdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_contribdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_contribdir)" || exit $$?; \ - done - -uninstall-dist_python_urllib3_contribDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(python_urllib3_contribdir)'; $(am__uninstall_files_from_dir) -install-dist_python_urllib3_packagesDATA: $(dist_python_urllib3_packages_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_packagesdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(python_urllib3_packagesdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_packagesdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_packagesdir)" || exit $$?; \ - done - -uninstall-dist_python_urllib3_packagesDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(python_urllib3_packagesdir)'; $(am__uninstall_files_from_dir) -install-dist_python_urllib3_securetransportDATA: $(dist_python_urllib3_securetransport_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit $$?; \ - done - -uninstall-dist_python_urllib3_securetransportDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(python_urllib3_securetransportdir)'; $(am__uninstall_files_from_dir) -install-dist_python_urllib3_ssl_match_hostnameDATA: $(dist_python_urllib3_ssl_match_hostname_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit $$?; \ - done - -uninstall-dist_python_urllib3_ssl_match_hostnameDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'; $(am__uninstall_files_from_dir) -install-dist_python_urllib3_utilDATA: $(dist_python_urllib3_util_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_utildir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(python_urllib3_utildir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_utildir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_utildir)" || exit $$?; \ - done - -uninstall-dist_python_urllib3_utilDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(python_urllib3_utildir)'; $(am__uninstall_files_from_dir) -install-dist_pythonmodulesDATA: $(dist_pythonmodules_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pythonmodulesdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pythonmodulesdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonmodulesdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonmodulesdir)" || exit $$?; \ - done - -uninstall-dist_pythonmodulesDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(pythonmodulesdir)'; $(am__uninstall_files_from_dir) -install-dist_pythonyaml2DATA: $(dist_pythonyaml2_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml2dir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pythonyaml2dir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml2dir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml2dir)" || exit $$?; \ - done - -uninstall-dist_pythonyaml2DATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(pythonyaml2dir)'; $(am__uninstall_files_from_dir) -install-dist_pythonyaml3DATA: $(dist_pythonyaml3_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml3dir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pythonyaml3dir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml3dir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml3dir)" || exit $$?; \ - done - -uninstall-dist_pythonyaml3DATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(pythonyaml3dir)'; $(am__uninstall_files_from_dir) -install-dist_third_partyDATA: $(dist_third_party_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(third_partydir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(third_partydir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(third_partydir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(third_partydir)" || exit $$?; \ - done - -uninstall-dist_third_partyDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(third_partydir)'; $(am__uninstall_files_from_dir) -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(SCRIPTS) $(DATA) -installdirs: - for dir in "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(basesdir)" "$(DESTDIR)$(bases_framework_servicesdir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(python_urllib3dir)" "$(DESTDIR)$(python_urllib3_backportsdir)" "$(DESTDIR)$(python_urllib3_contribdir)" "$(DESTDIR)$(python_urllib3_packagesdir)" "$(DESTDIR)$(python_urllib3_securetransportdir)" "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" "$(DESTDIR)$(python_urllib3_utildir)" "$(DESTDIR)$(pythonmodulesdir)" "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)" "$(DESTDIR)$(third_partydir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: install-dist_basesDATA \ - install-dist_bases_framework_servicesDATA \ - install-dist_pythonDATA install-dist_pythonSCRIPTS \ - install-dist_python_urllib3DATA \ - install-dist_python_urllib3_backportsDATA \ - install-dist_python_urllib3_contribDATA \ - install-dist_python_urllib3_packagesDATA \ - install-dist_python_urllib3_securetransportDATA \ - install-dist_python_urllib3_ssl_match_hostnameDATA \ - install-dist_python_urllib3_utilDATA \ - install-dist_pythonmodulesDATA install-dist_pythonyaml2DATA \ - install-dist_pythonyaml3DATA install-dist_third_partyDATA - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-dist_basesDATA \ - uninstall-dist_bases_framework_servicesDATA \ - uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \ - uninstall-dist_python_urllib3DATA \ - uninstall-dist_python_urllib3_backportsDATA \ - uninstall-dist_python_urllib3_contribDATA \ - uninstall-dist_python_urllib3_packagesDATA \ - uninstall-dist_python_urllib3_securetransportDATA \ - uninstall-dist_python_urllib3_ssl_match_hostnameDATA \ - uninstall-dist_python_urllib3_utilDATA \ - uninstall-dist_pythonmodulesDATA \ - uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \ - uninstall-dist_third_partyDATA - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dist_basesDATA \ - install-dist_bases_framework_servicesDATA \ - install-dist_pythonDATA install-dist_pythonSCRIPTS \ - install-dist_python_urllib3DATA \ - install-dist_python_urllib3_backportsDATA \ - install-dist_python_urllib3_contribDATA \ - install-dist_python_urllib3_packagesDATA \ - install-dist_python_urllib3_securetransportDATA \ - install-dist_python_urllib3_ssl_match_hostnameDATA \ - install-dist_python_urllib3_utilDATA \ - install-dist_pythonmodulesDATA install-dist_pythonyaml2DATA \ - install-dist_pythonyaml3DATA install-dist_third_partyDATA \ - install-dvi install-dvi-am install-exec install-exec-am \ - install-html install-html-am install-info install-info-am \ - install-man install-pdf install-pdf-am install-ps \ - install-ps-am install-strip installcheck installcheck-am \ - installdirs maintainer-clean maintainer-clean-generic \ - mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags-am \ - uninstall uninstall-am uninstall-dist_basesDATA \ - uninstall-dist_bases_framework_servicesDATA \ - uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \ - uninstall-dist_python_urllib3DATA \ - uninstall-dist_python_urllib3_backportsDATA \ - uninstall-dist_python_urllib3_contribDATA \ - uninstall-dist_python_urllib3_packagesDATA \ - uninstall-dist_python_urllib3_securetransportDATA \ - uninstall-dist_python_urllib3_ssl_match_hostnameDATA \ - uninstall-dist_python_urllib3_utilDATA \ - uninstall-dist_pythonmodulesDATA \ - uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \ - uninstall-dist_third_partyDATA - -.in: - if sed \ - -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \ - -e 's#[@]sbindir_POST@#$(sbindir)#g' \ - -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \ - -e 's#[@]pythondir_POST@#$(pythondir)#g' \ - $< > $@.tmp; then \ - mv "$@.tmp" "$@"; \ - else \ - rm -f "$@.tmp"; \ - false; \ - fi - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/python.d/README.md b/python.d/README.md deleted file mode 100644 index faabba2c7..000000000 --- a/python.d/README.md +++ /dev/null @@ -1,2363 +0,0 @@ -# Disclaimer - -Every module should be compatible with python2 and python3. -All third party libraries should be installed system-wide or in `python_modules` directory. -Module configurations are written in YAML and **pyYAML is required**. - -Every configuration file must have one of two formats: - -- Configuration for only one job: - -```yaml -update_every : 2 # update frequency -retries : 1 # how many failures in update() is tolerated -priority : 20000 # where it is shown on dashboard - -other_var1 : bla # variables passed to module -other_var2 : alb -``` - -- Configuration for many jobs (ex. mysql): - -```yaml -# module defaults: -update_every : 2 -retries : 1 -priority : 20000 - -local: # job name - update_every : 5 # job update frequency - other_var1 : some_val # module specific variable - -other_job: - priority : 5 # job position on dashboard - retries : 20 # job retries - other_var2 : val # module specific variable -``` - -`update_every`, `retries`, and `priority` are always optional. - ---- - -The following python.d modules are supported: - -# apache - -This module will monitor one or more apache servers depending on configuration. - -**Requirements:** - * apache with enabled `mod_status` - -It produces the following charts: - -1. **Requests** in requests/s - * requests - -2. **Connections** - * connections - -3. **Async Connections** - * keepalive - * closing - * writing - -4. **Bandwidth** in kilobytes/s - * sent - -5. **Workers** - * idle - * busy - -6. **Lifetime Avg. Requests/s** in requests/s - * requests_sec - -7. **Lifetime Avg. Bandwidth/s** in kilobytes/s - * size_sec - -8. **Lifetime Avg. Response Size** in bytes/request - * size_req - -### configuration - -Needs only `url` to server's `server-status?auto` - -Here is an example for 2 servers: - -```yaml -update_every : 10 -priority : 90100 - -local: - url : 'http://localhost/server-status?auto' - retries : 20 - -remote: - url : 'http://www.apache.org/server-status?auto' - update_every : 5 - retries : 4 -``` - -Without configuration, module attempts to connect to `http://localhost/server-status?auto` - ---- - -# apache_cache - -Module monitors apache mod_cache log and produces only one chart: - -**cached responses** in percent cached - * hit - * miss - * other - -### configuration - -Sample: - -```yaml -update_every : 10 -priority : 120000 -retries : 5 -log_path : '/var/log/apache2/cache.log' -``` - -If no configuration is given, module will attempt to read log file at `/var/log/apache2/cache.log` - ---- - -# beanstalk - -Module provides server and tube level statistics: - -**Requirements:** - * `python-beanstalkc` - * `python-yaml` - -**Server statistics:** - -1. **Cpu usage** in cpu time - * user - * system - -2. **Jobs rate** in jobs/s - * total - * timeouts - -3. **Connections rate** in connections/s - * connections - -4. **Commands rate** in commands/s - * put - * peek - * peek-ready - * peek-delayed - * peek-buried - * reserve - * use - * watch - * ignore - * delete - * release - * bury - * kick - * stats - * stats-job - * stats-tube - * list-tubes - * list-tube-used - * list-tubes-watched - * pause-tube - -5. **Current tubes** in tubes - * tubes - -6. **Current jobs** in jobs - * urgent - * ready - * reserved - * delayed - * buried - -7. **Current connections** in connections - * written - * producers - * workers - * waiting - -8. **Binlog** in records/s - * written - * migrated - -9. **Uptime** in seconds - * uptime - -**Per tube statistics:** - -1. **Jobs rate** in jobs/s - * jobs - -2. **Jobs** in jobs - * using - * ready - * reserved - * delayed - * buried - -3. **Connections** in connections - * using - * waiting - * watching - -4. **Commands** in commands/s - * deletes - * pauses - -5. **Pause** in seconds - * since - * left - - -### configuration - -Sample: - -```yaml -host : '127.0.0.1' -port : 11300 -``` - -If no configuration is given, module will attempt to connect to beanstalkd on `127.0.0.1:11300` address - ---- - -# bind_rndc - -Module parses bind dump file to collect real-time performance metrics - -**Requirements:** - * Version of bind must be 9.6 + - * Netdata must have permissions to run `rndc stats` - -It produces: - -1. **Name server statistics** - * requests - * responses - * success - * auth_answer - * nonauth_answer - * nxrrset - * failure - * nxdomain - * recursion - * duplicate - * rejections - -2. **Incoming queries** - * RESERVED0 - * A - * NS - * CNAME - * SOA - * PTR - * MX - * TXT - * X25 - * AAAA - * SRV - * NAPTR - * A6 - * DS - * RSIG - * DNSKEY - * SPF - * ANY - * DLV - -3. **Outgoing queries** - * Same as Incoming queries - - -### configuration - -Sample: - -```yaml -local: - named_stats_path : '/var/log/bind/named.stats' -``` - -If no configuration is given, module will attempt to read named.stats file at `/var/log/bind/named.stats` - ---- - -# chrony - -This module monitors the precision and statistics of a local chronyd server. - -It produces: - -* frequency -* last offset -* RMS offset -* residual freq -* root delay -* root dispersion -* skew -* system time - -**Requirements:** -Verify that user netdata can execute `chronyc tracking`. If necessary, update `/etc/chrony.conf`, `cmdallow`. - -### Configuration - -Sample: -```yaml -# data collection frequency: -update_every: 1 - -# chrony query command: -local: - command: 'chronyc -n tracking' -``` - ---- - -# ceph - -This module monitors the ceph cluster usage and consuption data of a server. - -It produces: - -* Cluster statistics (usage, available, latency, objects, read/write rate) -* OSD usage -* OSD latency -* Pool usage -* Pool read/write operations -* Pool read/write rate -* number of objects per pool - -**Requirements:** - -- `rados` python module -- Granting read permissions to ceph group from keyring file -```shell -# chmod 640 /etc/ceph/ceph.client.admin.keyring -``` - -### Configuration - -Sample: -```yaml -local: - config_file: '/etc/ceph/ceph.conf' - keyring_file: '/etc/ceph/ceph.client.admin.keyring' -``` - ---- - -# couchdb - -This module monitors vital statistics of a local Apache CouchDB 2.x server, including: - -* Overall server reads/writes -* HTTP traffic breakdown - * Request methods (`GET`, `PUT`, `POST`, etc.) - * Response status codes (`200`, `201`, `4xx`, etc.) -* Active server tasks -* Replication status (CouchDB 2.1 and up only) -* Erlang VM stats -* Optional per-database statistics: sizes, # of docs, # of deleted docs - -### Configuration - -Sample for a local server running on port 5984: -```yaml -local: - user: 'admin' - pass: 'password' - node: 'couchdb@127.0.0.1' -``` - -Be sure to specify a correct admin-level username and password. - -You may also need to change the `node` name; this should match the value of `-name NODENAME` in your CouchDB's `etc/vm.args` file. Typically this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` / `couchdb@localhost` for a single-node server. - -If you want per-database statistics, these need to be added to the configuration, separated by spaces: -```yaml -local: - ... - databases: 'db1 db2 db3 ...' -``` - ---- - -# cpufreq - -This module shows the current CPU frequency as set by the cpufreq kernel -module. - -**Requirement:** -You need to have `CONFIG_CPU_FREQ` and (optionally) `CONFIG_CPU_FREQ_STAT` -enabled in your kernel. - -This module tries to read from one of two possible locations. On -initialization, it tries to read the `time_in_state` files provided by -cpufreq\_stats. If this file does not exist, or doesn't contain valid data, it -falls back to using the more inaccurate `scaling_cur_freq` file (which only -represents the **current** CPU frequency, and doesn't account for any state -changes which happen between updates). - -It produces one chart with multiple lines (one line per core). - -### configuration - -Sample: - -```yaml -sys_dir: "/sys/devices" -``` - -If no configuration is given, module will search for cpufreq files in `/sys/devices` directory. -Directory is also prefixed with `NETDATA_HOST_PREFIX` if specified. - ---- - -# cpuidle - -This module monitors the usage of CPU idle states. - -**Requirement:** -Your kernel needs to have `CONFIG_CPU_IDLE` enabled. - -It produces one stacked chart per CPU, showing the percentage of time spent in -each state. - ---- -# dns_query_time - -This module provides dns query time statistics. - -**Requirement:** -* `python-dnspython` package - -It produces one aggregate chart or one chart per dns server, showing the query time. - ---- - -# dnsdist - -Module monitor dnsdist performance and health metrics. - -Following charts are drawn: - -1. **Response latency** - * latency-slow - * latency100-1000 - * latency50-100 - * latency10-50 - * latency1-10 - * latency0-1 - -2. **Cache performance** - * cache-hits - * cache-misses - -3. **ACL events** - * acl-drops - * rule-drop - * rule-nxdomain - * rule-refused - -4. **Noncompliant data** - * empty-queries - * no-policy - * noncompliant-queries - * noncompliant-responses - -5. **Queries** - * queries - * rdqueries - * rdqueries - -6. **Health** - * downstream-send-errors - * downstream-timeouts - * servfail-responses - * trunc-failures - -### configuration - -```yaml -localhost: - name : 'local' - url : 'http://127.0.0.1:5053/jsonstat?command=stats' - user : 'username' - pass : 'password' - header: - X-API-Key: 'dnsdist-api-key' -``` - -# dovecot - -This module provides statistics information from dovecot server. -Statistics are taken from dovecot socket by executing `EXPORT global` command. -More information about dovecot stats can be found on [project wiki page.](http://wiki2.dovecot.org/Statistics) - -**Requirement:** -Dovecot unix socket with R/W permissions for user netdata or dovecot with configured TCP/IP socket. - -Module gives information with following charts: - -1. **sessions** - * active sessions - -2. **logins** - * logins - -3. **commands** - number of IMAP commands - * commands - -4. **Faults** - * minor - * major - -5. **Context Switches** - * volountary - * involountary - -6. **disk** in bytes/s - * read - * write - -7. **bytes** in bytes/s - * read - * write - -8. **number of syscalls** in syscalls/s - * read - * write - -9. **lookups** - number of lookups per second - * path - * attr - -10. **hits** - number of cache hits - * hits - -11. **attempts** - authorization attemts - * success - * failure - -12. **cache** - cached authorization hits - * hit - * miss - -### configuration - -Sample: - -```yaml -localtcpip: - name : 'local' - host : '127.0.0.1' - port : 24242 - -localsocket: - name : 'local' - socket : '/var/run/dovecot/stats' -``` - -If no configuration is given, module will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats` - ---- - -# elasticsearch - -Module monitor elasticsearch performance and health metrics - -It produces: - -1. **Search performance** charts: - * Number of queries, fetches - * Time spent on queries, fetches - * Query and fetch latency - -2. **Indexing performance** charts: - * Number of documents indexed, index refreshes, flushes - * Time spent on indexing, refreshing, flushing - * Indexing and flushing latency - -3. **Memory usage and garbace collection** charts: - * JVM heap currently in use, commited - * Count of garbage collections - * Time spent on garbage collections - -4. **Host metrics** charts: - * Available file descriptors in percent - * Opened HTTP connections - * Cluster communication transport metrics - -5. **Queues and rejections** charts: - * Number of queued/rejected threads in thread pool - -6. **Fielddata cache** charts: - * Fielddata cache size - * Fielddata evictions and circuit breaker tripped count - -7. **Cluster health API** charts: - * Cluster status - * Nodes and tasks statistics - * Shards statistics - -8. **Cluster stats API** charts: - * Nodes statistics - * Query cache statistics - * Docs statistics - * Store statistics - * Indices and shards statistics - -### configuration - -Sample: - -```yaml -local: - host : 'ipaddress' # Server ip address or hostname - port : 'password' # Port on which elasticsearch listed - cluster_health : True/False # Calls to cluster health elasticsearch API. Enabled by default. - cluster_stats : True/False # Calls to cluster stats elasticsearch API. Enabled by default. -``` - -If no configuration is given, module will fail to run. - ---- - -# exim - -Simple module executing `exim -bpc` to grab exim queue. -This command can take a lot of time to finish its execution thus it is not recommended to run it every second. - -It produces only one chart: - -1. **Exim Queue Emails** - * emails - -Configuration is not needed. - ---- - -# fail2ban - -Module monitor fail2ban log file to show all bans for all active jails - -**Requirements:** - * fail2ban.log file MUST BE readable by netdata (A good idea is to add **create 0640 root netdata** to fail2ban conf at logrotate.d) - -It produces one chart with multiple lines (one line per jail) - -### configuration - -Sample: - -```yaml -local: - log_path: '/var/log/fail2ban.log' - conf_path: '/etc/fail2ban/jail.local' - exclude: 'dropbear apache' -``` -If no configuration is given, module will attempt to read log file at `/var/log/fail2ban.log` and conf file at `/etc/fail2ban/jail.local`. -If conf file is not found default jail is `ssh`. - ---- - -# freeradius - -Uses the `radclient` command to provide freeradius statistics. It is not recommended to run it every second. - -It produces: - -1. **Authentication counters:** - * access-accepts - * access-rejects - * auth-dropped-requests - * auth-duplicate-requests - * auth-invalid-requests - * auth-malformed-requests - * auth-unknown-types - -2. **Accounting counters:** [optional] - * accounting-requests - * accounting-responses - * acct-dropped-requests - * acct-duplicate-requests - * acct-invalid-requests - * acct-malformed-requests - * acct-unknown-types - -3. **Proxy authentication counters:** [optional] - * proxy-access-accepts - * proxy-access-rejects - * proxy-auth-dropped-requests - * proxy-auth-duplicate-requests - * proxy-auth-invalid-requests - * proxy-auth-malformed-requests - * proxy-auth-unknown-types - -4. **Proxy accounting counters:** [optional] - * proxy-accounting-requests - * proxy-accounting-responses - * proxy-acct-dropped-requests - * proxy-acct-duplicate-requests - * proxy-acct-invalid-requests - * proxy-acct-malformed-requests - * proxy-acct-unknown-typesa - - -### configuration - -Sample: - -```yaml -local: - host : 'localhost' - port : '18121' - secret : 'adminsecret' - acct : False # Freeradius accounting statistics. - proxy_auth : False # Freeradius proxy authentication statistics. - proxy_acct : False # Freeradius proxy accounting statistics. -``` - -**Freeradius server configuration:** - -The configuration for the status server is automatically created in the sites-available directory. -By default, server is enabled and can be queried from every client. -FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled. - -To do this, create a link from the sites-enabled directory to the status file in the sites-available directory: - * cd sites-enabled - * ln -s ../sites-available/status status - -and restart/reload your FREERADIUS server. - ---- - -# go_expvar - ---- - -The `go_expvar` module can monitor any Go application that exposes its metrics with the use of `expvar` package from the Go standard library. - -`go_expvar` produces charts for Go runtime memory statistics and optionally any number of custom charts. Please see the [wiki page](https://github.com/firehol/netdata/wiki/Monitoring-Go-Applications) for more info. - -For the memory statistics, it produces the following charts: - -1. **Heap allocations** in kB - * alloc: size of objects allocated on the heap - * inuse: size of allocated heap spans - -2. **Stack allocations** in kB - * inuse: size of allocated stack spans - -3. **MSpan allocations** in kB - * inuse: size of allocated mspan structures - -4. **MCache allocations** in kB - * inuse: size of allocated mcache structures - -5. **Virtual memory** in kB - * sys: size of reserved virtual address space - -6. **Live objects** - * live: number of live objects in memory - -7. **GC pauses average** in ns - * avg: average duration of all GC stop-the-world pauses - -### configuration - -Please see the [wiki page](https://github.com/firehol/netdata/wiki/Monitoring-Go-Applications#using-netdata-go_expvar-module) for detailed info about module configuration. - ---- - -# haproxy - -Module monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current. -And health metrics such as backend servers status (server check should be used). - -Plugin can obtain data from url **OR** unix socket. - -**Requirement:** -Socket MUST be readable AND writable by netdata user. - -It produces: - -1. **Frontend** family charts - * Kilobytes in/s - * Kilobytes out/s - * Sessions current - * Sessions in queue current - -2. **Backend** family charts - * Kilobytes in/s - * Kilobytes out/s - * Sessions current - * Sessions in queue current - -3. **Health** chart - * number of failed servers for every backend (in DOWN state) - - -### configuration - -Sample: - -```yaml -via_url: - user : 'username' # ONLY IF stats auth is used - pass : 'password' # # ONLY IF stats auth is used - url : 'http://ip.address:port/url;csv;norefresh' -``` - -OR - -```yaml -via_socket: - socket : 'path/to/haproxy/sock' -``` - -If no configuration is given, module will fail to run. - ---- - -# hddtemp - -Module monitors disk temperatures from one or more hddtemp daemons. - -**Requirement:** -Running `hddtemp` in daemonized mode with access on tcp port - -It produces one chart **Temperature** with dynamic number of dimensions (one per disk) - -### configuration - -Sample: - -```yaml -update_every: 3 -host: "127.0.0.1" -port: 7634 -``` - -If no configuration is given, module will attempt to connect to hddtemp daemon on `127.0.0.1:7634` address - ---- - -# httpcheck - -Module monitors remote http server for availability and response time. - -Following charts are drawn per job: - -1. **Response time** ms - * Time in 0.1 ms resolution in which the server responds. - If the connection failed, the value is missing. - -2. **Status** boolean - * Connection successful - * Unexpected content: No Regex match found in the response - * Unexpected status code: Do we get 500 errors? - * Connection failed: port not listening or blocked - * Connection timed out: host or port unreachable - -### configuration - -Sample configuration and their default values. - -```yaml -server: - url: 'http://host:port/path' # required - status_accepted: # optional - - 200 - timeout: 1 # optional, supports decimals (e.g. 0.2) - update_every: 3 # optional - regex: 'REGULAR_EXPRESSION' # optional, see https://docs.python.org/3/howto/regex.html - redirect: yes # optional -``` - -### notes - - * The status chart is primarily intended for alarms, badges or for access via API. - * A system/service/firewall might block netdata's access if a portscan or - similar is detected. - * This plugin is meant for simple use cases. Currently, the accuracy of the - response time is low and should be used as reference only. - ---- - -# icecast - -This module will monitor number of listeners for active sources. - -**Requirements:** - * icecast version >= 2.4.0 - -It produces the following charts: - -1. **Listeners** in listeners - * source number - -### configuration - -Needs only `url` to server's `/status-json.xsl` - -Here is an example for remote server: - -```yaml -remote: - url : 'http://1.2.3.4:8443/status-json.xsl' -``` - -Without configuration, module attempts to connect to `http://localhost:8443/status-json.xsl` - ---- - -# IPFS - -Module monitors [IPFS](https://ipfs.io) basic information. - -1. **Bandwidth** in kbits/s - * in - * out - -2. **Peers** - * peers - -### configuration - -Only url to IPFS server is needed. - -Sample: - -```yaml -localhost: - name : 'local' - url : 'http://localhost:5001' -``` - ---- - -# isc_dhcpd - -Module monitor leases database to show all active leases for given pools. - -**Requirements:** - * dhcpd leases file MUST BE readable by netdata - * pools MUST BE in CIDR format - -It produces: - -1. **Pools utilization** Aggregate chart for all pools. - * utilization in percent - -2. **Total leases** - * leases (overall number of leases for all pools) - -3. **Active leases** for every pools - * leases (number of active leases in pool) - - -### configuration - -Sample: - -```yaml -local: - leases_path : '/var/lib/dhcp/dhcpd.leases' - pools : '192.168.3.0/24 192.168.4.0/24 192.168.5.0/24' -``` - -In case of python2 you need to install `py2-ipaddress` to make plugin work. -The module will not work If no configuration is given. - ---- - - -# mdstat - -Module monitor /proc/mdstat - -It produces: - -1. **Health** Number of failed disks in every array (aggregate chart). - -2. **Disks stats** - * total (number of devices array ideally would have) - * inuse (number of devices currently are in use) - -3. **Current status** - * resync in percent - * recovery in percent - * reshape in percent - * check in percent - -4. **Operation status** (if resync/recovery/reshape/check is active) - * finish in minutes - * speed in megabytes/s - -### configuration -No configuration is needed. - ---- - -# memcached - -Memcached monitoring module. Data grabbed from [stats interface](https://github.com/memcached/memcached/wiki/Commands#stats). - -1. **Network** in kilobytes/s - * read - * written - -2. **Connections** per second - * current - * rejected - * total - -3. **Items** in cluster - * current - * total - -4. **Evicted and Reclaimed** items - * evicted - * reclaimed - -5. **GET** requests/s - * hits - * misses - -6. **GET rate** rate in requests/s - * rate - -7. **SET rate** rate in requests/s - * rate - -8. **DELETE** requests/s - * hits - * misses - -9. **CAS** requests/s - * hits - * misses - * bad value - -10. **Increment** requests/s - * hits - * misses - -11. **Decrement** requests/s - * hits - * misses - -12. **Touch** requests/s - * hits - * misses - -13. **Touch rate** rate in requests/s - * rate - -### configuration - -Sample: - -```yaml -localtcpip: - name : 'local' - host : '127.0.0.1' - port : 24242 -``` - -If no configuration is given, module will attempt to connect to memcached instance on `127.0.0.1:11211` address. - ---- - -# mongodb - -Module monitor mongodb performance and health metrics - -**Requirements:** - * `python-pymongo` package. - -You need to install it manually. - - -Number of charts depends on mongodb version, storage engine and other features (replication): - -1. **Read requests**: - * query - * getmore (operation the cursor executes to get additional data from query) - -2. **Write requests**: - * insert - * delete - * update - -3. **Active clients**: - * readers (number of clients with read operations in progress or queued) - * writers (number of clients with write operations in progress or queued) - -4. **Journal transactions**: - * commits (count of transactions that have been written to the journal) - -5. **Data written to the journal**: - * volume (volume of data) - -6. **Background flush** (MMAPv1): - * average ms (average time taken by flushes to execute) - * last ms (time taken by the last flush) - -8. **Read tickets** (WiredTiger): - * in use (number of read tickets in use) - * available (number of available read tickets remaining) - -9. **Write tickets** (WiredTiger): - * in use (number of write tickets in use) - * available (number of available write tickets remaining) - -10. **Cursors**: - * opened (number of cursors currently opened by MongoDB for clients) - * timedOut (number of cursors that have timed) - * noTimeout (number of open cursors with timeout disabled) - -11. **Connections**: - * connected (number of clients currently connected to the database server) - * unused (number of unused connections available for new clients) - -12. **Memory usage metrics**: - * virtual - * resident (amount of memory used by the database process) - * mapped - * non mapped - -13. **Page faults**: - * page faults (number of times MongoDB had to request from disk) - -14. **Cache metrics** (WiredTiger): - * percentage of bytes currently in the cache (amount of space taken by cached data) - * percantage of tracked dirty bytes in the cache (amount of space taken by dirty data) - -15. **Pages evicted from cache** (WiredTiger): - * modified - * unmodified - -16. **Queued requests**: - * readers (number of read request currently queued) - * writers (number of write request currently queued) - -17. **Errors**: - * msg (number of message assertions raised) - * warning (number of warning assertions raised) - * regular (number of regular assertions raised) - * user (number of assertions corresponding to errors generated by users) - -18. **Storage metrics** (one chart for every database) - * dataSize (size of all documents + padding in the database) - * indexSize (size of all indexes in the database) - * storageSize (size of all extents in the database) - -19. **Documents in the database** (one chart for all databases) - * documents (number of objects in the database among all the collections) - -20. **tcmalloc metrics** - * central cache free - * current total thread cache - * pageheap free - * pageheap unmapped - * thread cache free - * transfer cache free - * heap size - -21. **Commands total/failed rate** - * count - * createIndex - * delete - * eval - * findAndModify - * insert - -22. **Locks metrics** (acquireCount metrics - number of times the lock was acquired in the specified mode) - * Global lock - * Database lock - * Collection lock - * Metadata lock - * oplog lock - -23. **Replica set members state** - * state - -24. **Oplog window** - * window (interval of time between the oldest and the latest entries in the oplog) - -25. **Replication lag** - * member (time when last entry from the oplog was applied for every member) - -26. **Replication set member heartbeat latency** - * member (time when last heartbeat was received from replica set member) - - -### configuration - -Sample: - -```yaml -local: - name : 'local' - host : '127.0.0.1' - port : 27017 - user : 'netdata' - pass : 'netdata' - -``` - -If no configuration is given, module will attempt to connect to mongodb daemon on `127.0.0.1:27017` address - ---- - - -# mysql - -Module monitors one or more mysql servers - -**Requirements:** - * python library [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) (faster) or [PyMySQL](https://github.com/PyMySQL/PyMySQL) (slower) - -It will produce following charts (if data is available): - -1. **Bandwidth** in kbps - * in - * out - -2. **Queries** in queries/sec - * queries - * questions - * slow queries - -3. **Operations** in operations/sec - * opened tables - * flush - * commit - * delete - * prepare - * read first - * read key - * read next - * read prev - * read random - * read random next - * rollback - * save point - * update - * write - -4. **Table Locks** in locks/sec - * immediate - * waited - -5. **Select Issues** in issues/sec - * full join - * full range join - * range - * range check - * scan - -6. **Sort Issues** in issues/sec - * merge passes - * range - * scan - -### configuration - -You can provide, per server, the following: - -1. username which have access to database (deafults to 'root') -2. password (defaults to none) -3. mysql my.cnf configuration file -4. mysql socket (optional) -5. mysql host (ip or hostname) -6. mysql port (defaults to 3306) - -Here is an example for 3 servers: - -```yaml -update_every : 10 -priority : 90100 -retries : 5 - -local: - 'my.cnf' : '/etc/mysql/my.cnf' - priority : 90000 - -local_2: - user : 'root' - pass : 'blablablabla' - socket : '/var/run/mysqld/mysqld.sock' - update_every : 1 - -remote: - user : 'admin' - pass : 'bla' - host : 'example.org' - port : 9000 - retries : 20 -``` - -If no configuration is given, module will attempt to connect to mysql server via unix socket at `/var/run/mysqld/mysqld.sock` without password and with username `root` - ---- - -# nginx - -This module will monitor one or more nginx servers depending on configuration. Servers can be either local or remote. - -**Requirements:** - * nginx with configured 'ngx_http_stub_status_module' - * 'location /stub_status' - -Example nginx configuration can be found in 'python.d/nginx.conf' - -It produces following charts: - -1. **Active Connections** - * active - -2. **Requests** in requests/s - * requests - -3. **Active Connections by Status** - * reading - * writing - * waiting - -4. **Connections Rate** in connections/s - * accepts - * handled - -### configuration - -Needs only `url` to server's `stub_status` - -Here is an example for local server: - -```yaml -update_every : 10 -priority : 90100 - -local: - url : 'http://localhost/stub_status' - retries : 10 -``` - -Without configuration, module attempts to connect to `http://localhost/stub_status` - ---- - -# nginx_plus - -This module will monitor one or more nginx_plus servers depending on configuration. -Servers can be either local or remote. - -Example nginx_plus configuration can be found in 'python.d/nginx_plus.conf' - -It produces following charts: - -1. **Requests total** in requests/s - * total - -2. **Requests current** in requests - * current - -3. **Connection Statistics** in connections/s - * accepted - * dropped - -4. **Workers Statistics** in workers - * idle - * active - -5. **SSL Handshakes** in handshakes/s - * successful - * failed - -6. **SSL Session Reuses** in sessions/s - * reused - -7. **SSL Memory Usage** in percent - * usage - -8. **Processes** in processes - * respawned - -For every server zone: - -1. **Processing** in requests - * processing - -2. **Requests** in requests/s - * requests - -3. **Responses** in requests/s - * 1xx - * 2xx - * 3xx - * 4xx - * 5xx - -4. **Traffic** in kilobits/s - * received - * sent - -For every upstream: - -1. **Peers Requests** in requests/s - * peer name (dimension per peer) - -2. **All Peers Responses** in responses/s - * 1xx - * 2xx - * 3xx - * 4xx - * 5xx - -3. **Peer Responses** in requests/s (for every peer) - * 1xx - * 2xx - * 3xx - * 4xx - * 5xx - -4. **Peers Connections** in active - * peer name (dimension per peer) - -5. **Peers Connections Usage** in percent - * peer name (dimension per peer) - -6. **All Peers Traffic** in KB - * received - * sent - -7. **Peer Traffic** in KB/s (for every peer) - * received - * sent - -8. **Peer Timings** in ms (for every peer) - * header - * response - -9. **Memory Usage** in percent - * usage - -10. **Peers Status** in state - * peer name (dimension per peer) - -11. **Peers Total Downtime** in seconds - * peer name (dimension per peer) - -For every cache: - -1. **Traffic** in KB - * served - * written - * bypass - -2. **Memory Usage** in percent - * usage - -### configuration - -Needs only `url` to server's `status` - -Here is an example for local server: - -```yaml -local: - url : 'http://localhost/status' -``` - -Without configuration, module fail to start. - ---- - -# nsd - -Module uses the `nsd-control stats_noreset` command to provide `nsd` statistics. - -**Requirements:** - * Version of `nsd` must be 4.0+ - * Netdata must have permissions to run `nsd-control stats_noreset` - -It produces: - -1. **Queries** - * queries - -2. **Zones** - * master - * slave - -3. **Protocol** - * udp - * udp6 - * tcp - * tcp6 - -4. **Query Type** - * A - * NS - * CNAME - * SOA - * PTR - * HINFO - * MX - * NAPTR - * TXT - * AAAA - * SRV - * ANY - -5. **Transfer** - * NOTIFY - * AXFR - -6. **Return Code** - * NOERROR - * FORMERR - * SERVFAIL - * NXDOMAIN - * NOTIMP - * REFUSED - * YXDOMAIN - - -Configuration is not needed. - ---- - -# ntpd - -Module monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](http://doc.ntp.org/current-stable/ntpq.html). - -**Requirements:** - * Version: `NTPv4` - * Local interrogation allowed in `/etc/ntp.conf` (default): - -``` -# Local users may interrogate the ntp server more closely. -restrict 127.0.0.1 -restrict ::1 -``` - -It produces: - -1. system - * offset - * jitter - * frequency - * delay - * dispersion - * stratum - * tc - * precision - -2. peers - * offset - * delay - * dispersion - * jitter - * rootdelay - * rootdispersion - * stratum - * hmode - * pmode - * hpoll - * ppoll - * precision - -**configuration** - -Sample: - -```yaml -update_every: 10 - -host: 'localhost' -port: '123' -show_peers: yes -# hide peers with source address in ranges 127.0.0.0/8 and 192.168.0.0/16 -peer_filter: '(127\..*)|(192\.168\..*)' -# check for new/changed peers every 60 updates -peer_rescan: 60 -``` - -Sample (multiple jobs): - -Note: `ntp.conf` on the host `otherhost` must be configured to allow queries from our local host by including a line like `restrict <IP> nomodify notrap nopeer`. - -```yaml -local: - host: 'localhost' - -otherhost: - host: 'otherhost' -``` - -If no configuration is given, module will attempt to connect to `ntpd` on `::1:123` or `127.0.0.1:123` and show charts for the systemvars. Use `show_peers: yes` to also show the charts for configured peers. Local peers in the range `127.0.0.0/8` are hidden by default, use `peer_filter: ''` to show all peers. - ---- - -# ovpn_status_log - -Module monitor openvpn-status log file. - -**Requirements:** - - * If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files - so that multiple instances do not overwrite each other's output files. - - * Make sure NETDATA USER CAN READ openvpn-status.log - - * Update_every interval MUST MATCH interval on which OpenVPN writes operational status to log file. - -It produces: - -1. **Users** OpenVPN active users - * users - -2. **Traffic** OpenVPN overall bandwidth usage in kilobit/s - * in - * out - -### configuration - -Sample: - -```yaml -default - log_path : '/var/log/openvpn-status.log' -``` - ---- - -# phpfpm - -This module will monitor one or more php-fpm instances depending on configuration. - -**Requirements:** - * php-fpm with enabled `status` page - * access to `status` page via web server - -It produces following charts: - -1. **Active Connections** - * active - * maxActive - * idle - -2. **Requests** in requests/s - * requests - -3. **Performance** - * reached - * slow - -### configuration - -Needs only `url` to server's `status` - -Here is an example for local instance: - -```yaml -update_every : 3 -priority : 90100 - -local: - url : 'http://localhost/status' - retries : 10 -``` - -Without configuration, module attempts to connect to `http://localhost/status` - ---- - -# portcheck - -Module monitors a remote TCP service. - -Following charts are drawn per host: - -1. **Latency** ms - * Time required to connect to a TCP port. - Displays latency in 0.1 ms resolution. If the connection failed, the value is missing. - -2. **Status** boolean - * Connection successful - * Could not create socket: possible DNS problems - * Connection refused: port not listening or blocked - * Connection timed out: host or port unreachable - - -### configuration - -```yaml -server: - host: 'dns or ip' # required - port: 22 # required - timeout: 1 # optional - update_every: 1 # optional -``` - -### notes - - * The error chart is intended for alarms, badges or for access via API. - * A system/service/firewall might block netdata's access if a portscan or - similar is detected. - * Currently, the accuracy of the latency is low and should be used as reference only. - ---- - -# postfix - -Simple module executing `postfix -p` to grab postfix queue. - -It produces only two charts: - -1. **Postfix Queue Emails** - * emails - -2. **Postfix Queue Emails Size** in KB - * size - -Configuration is not needed. - ---- - -# postgres - -Module monitors one or more postgres servers. - -**Requirements:** - - * `python-psycopg2` package. You have to install to manually. - -Following charts are drawn: - -1. **Database size** MB - * size - -2. **Current Backend Processes** processes - * active - -3. **Write-Ahead Logging Statistics** files/s - * total - * ready - * done - -4. **Checkpoints** writes/s - * scheduled - * requested - -5. **Current connections to db** count - * connections - -6. **Tuples returned from db** tuples/s - * sequential - * bitmap - -7. **Tuple reads from db** reads/s - * disk - * cache - -8. **Transactions on db** transactions/s - * commited - * rolled back - -9. **Tuples written to db** writes/s - * inserted - * updated - * deleted - * conflicts - -10. **Locks on db** count per type - * locks - -### configuration - -```yaml -socket: - name : 'socket' - user : 'postgres' - database : 'postgres' - -tcp: - name : 'tcp' - user : 'postgres' - database : 'postgres' - host : 'localhost' - port : 5432 -``` - -When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:5432`. - ---- - -# powerdns - -Module monitor powerdns performance and health metrics. - -Following charts are drawn: - -1. **Queries and Answers** - * udp-queries - * udp-answers - * tcp-queries - * tcp-answers - -2. **Cache Usage** - * query-cache-hit - * query-cache-miss - * packetcache-hit - * packetcache-miss - -3. **Cache Size** - * query-cache-size - * packetcache-size - * key-cache-size - * meta-cache-size - -4. **Latency** - * latency - -### configuration - -```yaml -local: - name : 'local' - url : 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics' - header : - X-API-Key: 'change_me' -``` - ---- - -# rabbitmq - -Module monitor rabbitmq performance and health metrics. - -Following charts are drawn: - -1. **Queued Messages** - * ready - * unacknowledged - -2. **Message Rates** - * ack - * redelivered - * deliver - * publish - -3. **Global Counts** - * channels - * consumers - * connections - * queues - * exchanges - -4. **File Descriptors** - * used descriptors - -5. **Socket Descriptors** - * used descriptors - -6. **Erlang processes** - * used processes - -7. **Erlang run queue** - * Erlang run queue - -8. **Memory** - * free memory in megabytes - -9. **Disk Space** - * free disk space in gigabytes - -### configuration - -```yaml -socket: - name : 'local' - host : '127.0.0.1' - port : 15672 - user : 'guest' - pass : 'guest' - -``` - -When no configuration file is found, module tries to connect to: `localhost:15672`. - ---- - -# redis - -Get INFO data from redis instance. - -Following charts are drawn: - -1. **Operations** per second - * operations - -2. **Hit rate** in percent - * rate - -3. **Memory utilization** in kilobytes - * total - * lua - -4. **Database keys** - * lines are creates dynamically based on how many databases are there - -5. **Clients** - * connected - * blocked - -6. **Slaves** - * connected - -### configuration - -```yaml -socket: - name : 'local' - socket : '/var/lib/redis/redis.sock' - -localhost: - name : 'local' - host : 'localhost' - port : 6379 -``` - -When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:6379`. - ---- - -# samba - -Performance metrics of Samba file sharing. - -It produces the following charts: - -1. **Syscall R/Ws** in kilobytes/s - * sendfile - * recvfle - -2. **Smb2 R/Ws** in kilobytes/s - * readout - * writein - * readin - * writeout - -3. **Smb2 Create/Close** in operations/s - * create - * close - -4. **Smb2 Info** in operations/s - * getinfo - * setinfo - -5. **Smb2 Find** in operations/s - * find - -6. **Smb2 Notify** in operations/s - * notify - -7. **Smb2 Lesser Ops** as counters - * tcon - * negprot - * tdis - * cancel - * logoff - * flush - * lock - * keepalive - * break - * sessetup - -### configuration - -Requires that smbd has been compiled with profiling enabled. Also required -that `smbd` was started either with the `-P 1` option or inside `smb.conf` -using `smbd profiling level`. - -This plugin uses `smbstatus -P` which can only be executed by root. It uses -sudo and assumes that it is configured such that the `netdata` user can -execute smbstatus as root without password. - -For example: - - netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P - -```yaml -update_every : 5 # update frequency -``` - ---- - -# sensors - -System sensors information. - -Charts are created dynamically. - -### configuration - -For detailed configuration information please read [`sensors.conf`](https://github.com/firehol/netdata/blob/master/conf.d/python.d/sensors.conf) file. - -### possible issues - -There have been reports from users that on certain servers, ACPI ring buffer errors are printed by the kernel (`dmesg`) when ACPI sensors are being accessed. -We are tracking such cases in issue [#827](https://github.com/firehol/netdata/issues/827). -Please join this discussion for help. - ---- - -# springboot - -This module will monitor one or more Java Spring-boot applications depending on configuration. - -It produces following charts: - -1. **Response Codes** in requests/s - * 1xx - * 2xx - * 3xx - * 4xx - * 5xx - * others - -2. **Threads** - * daemon - * total - -3. **GC Time** in milliseconds and **GC Operations** in operations/s - * Copy - * MarkSweep - * ... - -4. **Heap Mmeory Usage** in KB - * used - * committed - -### configuration - -Please see the [Monitoring Java Spring Boot Applications](https://github.com/firehol/netdata/wiki/Monitoring-Java-Spring-Boot-Applications) page for detailed info about module configuration. - ---- - -# squid - -This module will monitor one or more squid instances depending on configuration. - -It produces following charts: - -1. **Client Bandwidth** in kilobits/s - * in - * out - * hits - -2. **Client Requests** in requests/s - * requests - * hits - * errors - -3. **Server Bandwidth** in kilobits/s - * in - * out - -4. **Server Requests** in requests/s - * requests - * errors - -### configuration - -```yaml -priority : 50000 - -local: - request : 'cache_object://localhost:3128/counters' - host : 'localhost' - port : 3128 -``` - -Without any configuration module will try to autodetect where squid presents its `counters` data - ---- - -# smartd_log - -Module monitor `smartd` log files to collect HDD/SSD S.M.A.R.T attributes. - -It produces following charts (you can add additional attributes in the module configuration file): - -1. **Read Error Rate** attribute 1 - -2. **Start/Stop Count** attribute 4 - -3. **Reallocated Sectors Count** attribute 5 - -4. **Seek Error Rate** attribute 7 - -5. **Power-On Hours Count** attribute 9 - -6. **Power Cycle Count** attribute 12 - -7. **Load/Unload Cycles** attribute 193 - -8. **Temperature** attribute 194 - -9. **Current Pending Sectors** attribute 197 - -10. **Off-Line Uncorrectable** attribute 198 - -11. **Write Error Rate** attribute 200 - -### configuration - -```yaml -local: - log_path : '/var/log/smartd/' -``` - -If no configuration is given, module will attempt to read log files in /var/log/smartd/ directory. - ---- - -# tomcat - -Present tomcat containers memory utilization. - -Charts: - -1. **Requests** per second - * accesses - -2. **Volume** in KB/s - * volume - -3. **Threads** - * current - * busy - -4. **JVM Free Memory** in MB - * jvm - -### configuration - -```yaml -localhost: - name : 'local' - url : 'http://127.0.0.1:8080/manager/status?XML=true' - user : 'tomcat_username' - pass : 'secret_tomcat_password' -``` - -Without configuration, module attempts to connect to `http://localhost:8080/manager/status?XML=true`, without any credentials. -So it will probably fail. - ---- - -# Traefik - -Module uses the `health` API to provide statistics. - -It produces: - -1. **Responses** by statuses - * success (1xx, 2xx, 304) - * error (5xx) - * redirect (3xx except 304) - * bad (4xx) - * other (all other responses) - -2. **Responses** by codes - * 2xx (successful) - * 5xx (internal server errors) - * 3xx (redirect) - * 4xx (bad) - * 1xx (informational) - * other (non-standart responses) - -3. **Detailed Response Codes** requests/s (number of responses for each response code family individually) - -4. **Requests**/s - * request statistics - -5. **Total response time** - * sum of all response time - -6. **Average response time** - -7. **Average response time per iteration** - -8. **Uptime** - * Traefik server uptime - -### configuration - -Needs only `url` to server's `health` - -Here is an example for local server: - -```yaml -update_every : 1 -priority : 60000 - -local: - url : 'http://localhost:8080/health' - retries : 10 -``` - -Without configuration, module attempts to connect to `http://localhost:8080/health`. - ---- - -# varnish cache - -Module uses the `varnishstat` command to provide varnish cache statistics. - -It produces: - -1. **Connections Statistics** in connections/s - * accepted - * dropped - -2. **Client Requests** in requests/s - * received - -3. **All History Hit Rate Ratio** in percent - * hit - * miss - * hitpass - -4. **Current Poll Hit Rate Ratio** in percent - * hit - * miss - * hitpass - -5. **Expired Objects** in expired/s - * objects - -6. **Least Recently Used Nuked Objects** in nuked/s - * objects - - -7. **Number Of Threads In All Pools** in threads - * threads - -8. **Threads Statistics** in threads/s - * created - * failed - * limited - -9. **Current Queue Length** in requests - * in queue - -10. **Backend Connections Statistics** in connections/s - * successful - * unhealthy - * reused - * closed - * resycled - * failed - -10. **Requests To The Backend** in requests/s - * received - -11. **ESI Statistics** in problems/s - * errors - * warnings - -12. **Memory Usage** in MB - * free - * allocated - -13. **Uptime** in seconds - * uptime - - -### configuration - -No configuration is needed. - ---- - -# web_log - -Tails the apache/nginx/lighttpd/gunicorn log files to collect real-time web-server statistics. - -It produces following charts: - -1. **Response by type** requests/s - * success (1xx, 2xx, 304) - * error (5xx) - * redirect (3xx except 304) - * bad (4xx) - * other (all other responses) - -2. **Response by code family** requests/s - * 1xx (informational) - * 2xx (successful) - * 3xx (redirect) - * 4xx (bad) - * 5xx (internal server errors) - * other (non-standart responses) - * unmatched (the lines in the log file that are not matched) - -3. **Detailed Response Codes** requests/s (number of responses for each response code family individually) - -4. **Bandwidth** KB/s - * received (bandwidth of requests) - * send (bandwidth of responses) - -5. **Timings** ms (request processing time) - * min (bandwidth of requests) - * max (bandwidth of responses) - * average (bandwidth of responses) - -6. **Request per url** requests/s (configured by user) - -7. **Http Methods** requests/s (requests per http method) - -8. **Http Versions** requests/s (requests per http version) - -9. **IP protocols** requests/s (requests per ip protocol version) - -10. **Curent Poll Unique Client IPs** unique ips/s (unique client IPs per data collection iteration) - -11. **All Time Unique Client IPs** unique ips/s (unique client IPs since the last restart of netdata) - - -### configuration - -```yaml -nginx_log: - name : 'nginx_log' - path : '/var/log/nginx/access.log' - -apache_log: - name : 'apache_log' - path : '/var/log/apache/other_vhosts_access.log' - categories: - cacti : 'cacti.*' - observium : 'observium' -``` - -Module has preconfigured jobs for nginx, apache and gunicorn on various distros. - ---- diff --git a/python.d/apache.chart.py b/python.d/apache.chart.py deleted file mode 100644 index 789b3c099..000000000 --- a/python.d/apache.chart.py +++ /dev/null @@ -1,129 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: apache netdata python.d module -# Author: Pawel Krupa (paulfantom) - -from bases.FrameworkServices.UrlService import UrlService - -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 - -# default job configuration (overridden by python.d.plugin) -# config = {'local': { -# 'update_every': update_every, -# 'retries': retries, -# 'priority': priority, -# 'url': 'http://www.apache.org/server-status?auto' -# }} - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['requests', 'connections', 'conns_async', 'net', 'workers', 'reqpersec', 'bytespersec', 'bytesperreq'] - -CHARTS = { - 'bytesperreq': { - 'options': [None, 'apache Lifetime Avg. Response Size', 'bytes/request', - 'statistics', 'apache.bytesperreq', 'area'], - 'lines': [ - ["size_req"] - ]}, - 'workers': { - 'options': [None, 'apache Workers', 'workers', 'workers', 'apache.workers', 'stacked'], - 'lines': [ - ["idle"], - ["busy"], - ]}, - 'reqpersec': { - 'options': [None, 'apache Lifetime Avg. Requests/s', 'requests/s', 'statistics', - 'apache.reqpersec', 'area'], - 'lines': [ - ["requests_sec"] - ]}, - 'bytespersec': { - 'options': [None, 'apache Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics', - 'apache.bytesperreq', 'area'], - 'lines': [ - ["size_sec", None, 'absolute', 8, 1000] - ]}, - 'requests': { - 'options': [None, 'apache Requests', 'requests/s', 'requests', 'apache.requests', 'line'], - 'lines': [ - ["requests", None, 'incremental'] - ]}, - 'net': { - 'options': [None, 'apache Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'], - 'lines': [ - ["sent", None, 'incremental', 8, 1] - ]}, - 'connections': { - 'options': [None, 'apache Connections', 'connections', 'connections', 'apache.connections', 'line'], - 'lines': [ - ["connections"] - ]}, - 'conns_async': { - 'options': [None, 'apache Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'], - 'lines': [ - ["keepalive"], - ["closing"], - ["writing"] - ]} -} - -ASSIGNMENT = {"BytesPerReq": 'size_req', - "IdleWorkers": 'idle', - "IdleServers": 'idle_servers', - "BusyWorkers": 'busy', - "BusyServers": 'busy_servers', - "ReqPerSec": 'requests_sec', - "BytesPerSec": 'size_sec', - "Total Accesses": 'requests', - "Total kBytes": 'sent', - "ConnsTotal": 'connections', - "ConnsAsyncKeepAlive": 'keepalive', - "ConnsAsyncClosing": 'closing', - "ConnsAsyncWriting": 'writing'} - - -class Service(UrlService): - def __init__(self, configuration=None, name=None): - UrlService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.url = self.configuration.get('url', 'http://localhost/server-status?auto') - - def check(self): - self._manager = self._build_manager() - data = self._get_data() - if not data: - return None - - if 'idle_servers' in data: - self.module_name = 'lighttpd' - for chart in self.definitions: - if chart == 'workers': - lines = self.definitions[chart]['lines'] - lines[0] = ["idle_servers", 'idle'] - lines[1] = ["busy_servers", 'busy'] - opts = self.definitions[chart]['options'] - opts[1] = opts[1].replace('apache', 'lighttpd') - opts[4] = opts[4].replace('apache', 'lighttpd') - return True - - def _get_data(self): - """ - Format data received from http request - :return: dict - """ - raw_data = self._get_raw_data() - if not raw_data: - return None - data = dict() - - for row in raw_data.split('\n'): - tmp = row.split(":") - if tmp[0] in ASSIGNMENT: - try: - data[ASSIGNMENT[tmp[0]]] = int(float(tmp[1])) - except (IndexError, ValueError): - continue - return data or None diff --git a/python.d/beanstalk.chart.py b/python.d/beanstalk.chart.py deleted file mode 100644 index 8880afdd9..000000000 --- a/python.d/beanstalk.chart.py +++ /dev/null @@ -1,250 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: beanstalk netdata python.d module -# Author: l2isbad - -try: - import beanstalkc - BEANSTALKC = True -except ImportError: - BEANSTALKC = False - -try: - import yaml - YAML = True -except ImportError: - YAML = False - -from bases.FrameworkServices.SimpleService import SimpleService - -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 - -ORDER = ['cpu_usage', 'jobs_rate', 'connections_rate', 'commands_rate', 'current_tubes', 'current_jobs', - 'current_connections', 'binlog', 'uptime'] - -CHARTS = { - 'cpu_usage': { - 'options': [None, 'Cpu Usage', 'cpu time', 'server statistics', 'beanstalk.cpu_usage', 'area'], - 'lines': [ - ['rusage-utime', 'user', 'incremental'], - ['rusage-stime', 'system', 'incremental'] - ] - }, - 'jobs_rate': { - 'options': [None, 'Jobs Rate', 'jobs/s', 'server statistics', 'beanstalk.jobs_rate', 'line'], - 'lines': [ - ['total-jobs', 'total', 'incremental'], - ['job-timeouts', 'timeouts', 'incremental'] - ] - }, - 'connections_rate': { - 'options': [None, 'Connections Rate', 'connections/s', 'server statistics', 'beanstalk.connections_rate', - 'area'], - 'lines': [ - ['total-connections', 'connections', 'incremental'] - ] - }, - 'commands_rate': { - 'options': [None, 'Commands Rate', 'commands/s', 'server statistics', 'beanstalk.commands_rate', 'stacked'], - 'lines': [ - ['cmd-put', 'put', 'incremental'], - ['cmd-peek', 'peek', 'incremental'], - ['cmd-peek-ready', 'peek-ready', 'incremental'], - ['cmd-peek-delayed', 'peek-delayed', 'incremental'], - ['cmd-peek-buried', 'peek-buried', 'incremental'], - ['cmd-reserve', 'reserve', 'incremental'], - ['cmd-use', 'use', 'incremental'], - ['cmd-watch', 'watch', 'incremental'], - ['cmd-ignore', 'ignore', 'incremental'], - ['cmd-delete', 'delete', 'incremental'], - ['cmd-release', 'release', 'incremental'], - ['cmd-bury', 'bury', 'incremental'], - ['cmd-kick', 'kick', 'incremental'], - ['cmd-stats', 'stats', 'incremental'], - ['cmd-stats-job', 'stats-job', 'incremental'], - ['cmd-stats-tube', 'stats-tube', 'incremental'], - ['cmd-list-tubes', 'list-tubes', 'incremental'], - ['cmd-list-tube-used', 'list-tube-used', 'incremental'], - ['cmd-list-tubes-watched', 'list-tubes-watched', 'incremental'], - ['cmd-pause-tube', 'pause-tube', 'incremental'] - ] - }, - 'current_tubes': { - 'options': [None, 'Current Tubes', 'tubes', 'server statistics', 'beanstalk.current_tubes', 'area'], - 'lines': [ - ['current-tubes', 'tubes'] - ] - }, - 'current_jobs': { - 'options': [None, 'Current Jobs', 'jobs', 'server statistics', 'beanstalk.current_jobs', 'stacked'], - 'lines': [ - ['current-jobs-urgent', 'urgent'], - ['current-jobs-ready', 'ready'], - ['current-jobs-reserved', 'reserved'], - ['current-jobs-delayed', 'delayed'], - ['current-jobs-buried', 'buried'] - ] - }, - 'current_connections': { - 'options': [None, 'Current Connections', 'connections', 'server statistics', - 'beanstalk.current_connections', 'line'], - 'lines': [ - ['current-connections', 'written'], - ['current-producers', 'producers'], - ['current-workers', 'workers'], - ['current-waiting', 'waiting'] - ] - }, - 'binlog': { - 'options': [None, 'Binlog', 'records/s', 'server statistics', 'beanstalk.binlog', 'line'], - 'lines': [ - ['binlog-records-written', 'written', 'incremental'], - ['binlog-records-migrated', 'migrated', 'incremental'] - ] - }, - 'uptime': { - 'options': [None, 'Uptime', 'seconds', 'server statistics', 'beanstalk.uptime', 'line'], - 'lines': [ - ['uptime'], - ] - } -} - - -def tube_chart_template(name): - order = ['{0}_jobs_rate'.format(name), - '{0}_jobs'.format(name), - '{0}_connections'.format(name), - '{0}_commands'.format(name), - '{0}_pause'.format(name) - ] - family = 'tube {0}'.format(name) - - charts = { - order[0]: { - 'options': [None, 'Job Rate', 'jobs/s', family, 'beanstalk.jobs_rate', 'area'], - 'lines': [ - ['_'.join([name, 'total-jobs']), 'jobs', 'incremental'] - ]}, - order[1]: { - 'options': [None, 'Jobs', 'jobs', family, 'beanstalk.jobs', 'stacked'], - 'lines': [ - ['_'.join([name, 'current-jobs-urgent']), 'urgent'], - ['_'.join([name, 'current-jobs-ready']), 'ready'], - ['_'.join([name, 'current-jobs-reserved']), 'reserved'], - ['_'.join([name, 'current-jobs-delayed']), 'delayed'], - ['_'.join([name, 'current-jobs-buried']), 'buried'] - ]}, - order[2]: { - 'options': [None, 'Connections', 'connections', family, 'beanstalk.connections', 'stacked'], - 'lines': [ - ['_'.join([name, 'current-using']), 'using'], - ['_'.join([name, 'current-waiting']), 'waiting'], - ['_'.join([name, 'current-watching']), 'watching'] - ]}, - order[3]: { - 'options': [None, 'Commands', 'commands/s', family, 'beanstalk.commands', 'stacked'], - 'lines': [ - ['_'.join([name, 'cmd-delete']), 'deletes', 'incremental'], - ['_'.join([name, 'cmd-pause-tube']), 'pauses', 'incremental'] - ]}, - order[4]: { - 'options': [None, 'Pause', 'seconds', family, 'beanstalk.pause', 'stacked'], - 'lines': [ - ['_'.join([name, 'pause']), 'since'], - ['_'.join([name, 'pause-time-left']), 'left'] - ]} - - } - - return order, charts - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.configuration = configuration - self.order = list(ORDER) - self.definitions = dict(CHARTS) - self.conn = None - self.alive = True - - def check(self): - if not BEANSTALKC: - self.error("'beanstalkc' module is needed to use beanstalk.chart.py") - return False - - if not YAML: - self.error("'yaml' module is needed to use beanstalk.chart.py") - return False - - self.conn = self.connect() - - return True if self.conn else False - - def get_data(self): - """ - :return: dict - """ - if not self.is_alive(): - return None - - active_charts = self.charts.active_charts() - data = dict() - - try: - data.update(self.conn.stats()) - - for tube in self.conn.tubes(): - stats = self.conn.stats_tube(tube) - - if tube + '_jobs_rate' not in active_charts: - self.create_new_tube_charts(tube) - - for stat in stats: - data['_'.join([tube, stat])] = stats[stat] - - except beanstalkc.SocketError: - self.alive = False - return None - - return data or None - - def create_new_tube_charts(self, tube): - order, charts = tube_chart_template(tube) - - for chart_name in order: - params = [chart_name] + charts[chart_name]['options'] - dimensions = charts[chart_name]['lines'] - - new_chart = self.charts.add_chart(params) - for dimension in dimensions: - new_chart.add_dimension(dimension) - - def connect(self): - host = self.configuration.get('host', '127.0.0.1') - port = self.configuration.get('port', 11300) - timeout = self.configuration.get('timeout', 1) - try: - return beanstalkc.Connection(host=host, - port=port, - connect_timeout=timeout, - parse_yaml=yaml.load) - except beanstalkc.SocketError as error: - self.error('Connection to {0}:{1} failed: {2}'.format(host, port, error)) - return None - - def reconnect(self): - try: - self.conn.reconnect() - self.alive = True - return True - except beanstalkc.SocketError: - return False - - def is_alive(self): - if not self.alive: - return self.reconnect() - return True diff --git a/python.d/bind_rndc.chart.py b/python.d/bind_rndc.chart.py deleted file mode 100644 index cc96659b2..000000000 --- a/python.d/bind_rndc.chart.py +++ /dev/null @@ -1,243 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: bind rndc netdata python.d module -# Author: l2isbad - -import os - -from collections import defaultdict -from subprocess import Popen - -from bases.collection import find_binary -from bases.FrameworkServices.SimpleService import SimpleService - -priority = 60000 -retries = 60 -update_every = 30 - -ORDER = ['name_server_statistics', 'incoming_queries', 'outgoing_queries', 'named_stats_size'] - -CHARTS = { - 'name_server_statistics': { - 'options': [None, 'Name Server Statistics', 'stats', 'name server statistics', - 'bind_rndc.name_server_statistics', 'line'], - 'lines': [ - ['nms_requests', 'requests', 'incremental'], - ['nms_rejected_queries', 'rejected_queries', 'incremental'], - ['nms_success', 'success', 'incremental'], - ['nms_failure', 'failure', 'incremental'], - ['nms_responses', 'responses', 'incremental'], - ['nms_duplicate', 'duplicate', 'incremental'], - ['nms_recursion', 'recursion', 'incremental'], - ['nms_nxrrset', 'nxrrset', 'incremental'], - ['nms_nxdomain', 'nxdomain', 'incremental'], - ['nms_non_auth_answer', 'non_auth_answer', 'incremental'], - ['nms_auth_answer', 'auth_answer', 'incremental'], - ['nms_dropped_queries', 'dropped_queries', 'incremental'], - ]}, - 'incoming_queries': { - 'options': [None, 'Incoming Queries', 'queries', 'incoming queries', - 'bind_rndc.incoming_queries', 'line'], - 'lines': [ - ]}, - 'outgoing_queries': { - 'options': [None, 'Outgoing Queries', 'queries', 'outgoing queries', - 'bind_rndc.outgoing_queries', 'line'], - 'lines': [ - ]}, - 'named_stats_size': { - 'options': [None, 'Named Stats File Size', 'MB', 'file size', - 'bind_rndc.stats_size', 'line'], - 'lines': [ - ['stats_size', None, 'absolute', 1, 1 << 20] - ]} -} - -NMS = { - 'nms_requests': - ['IPv4 requests received', - 'IPv6 requests received', - 'TCP requests received', - 'requests with EDNS(0) receive'], - 'nms_responses': - ['responses sent', - 'truncated responses sent', - 'responses with EDNS(0) sent', - 'requests with unsupported EDNS version received'], - 'nms_failure': - ['other query failures', - 'queries resulted in SERVFAIL'], - 'nms_auth_answer': - ['queries resulted in authoritative answer'], - 'nms_non_auth_answer': - ['queries resulted in non authoritative answer'], - 'nms_nxrrset': - ['queries resulted in nxrrset'], - 'nms_success': - ['queries resulted in successful answer'], - 'nms_nxdomain': - ['queries resulted in NXDOMAIN'], - 'nms_recursion': - ['queries caused recursion'], - 'nms_duplicate': - ['duplicate queries received'], - 'nms_rejected_queries': - ['auth queries rejected', - 'recursive queries rejected'], - 'nms_dropped_queries': - ['queries dropped'] -} - -STATS = ['Name Server Statistics', 'Incoming Queries', 'Outgoing Queries'] - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.named_stats_path = self.configuration.get('named_stats_path', '/var/log/bind/named.stats') - self.rndc = find_binary('rndc') - self.data = dict(nms_requests=0, nms_responses=0, nms_failure=0, nms_auth=0, - nms_non_auth=0, nms_nxrrset=0, nms_success=0, nms_nxdomain=0, - nms_recursion=0, nms_duplicate=0, nms_rejected_queries=0, - nms_dropped_queries=0) - - def check(self): - if not self.rndc: - self.error('Can\'t locate "rndc" binary or binary is not executable by netdata') - return False - - if not (os.path.isfile(self.named_stats_path) and os.access(self.named_stats_path, os.R_OK)): - self.error('Cannot access file %s' % self.named_stats_path) - return False - - run_rndc = Popen([self.rndc, 'stats'], shell=False) - run_rndc.wait() - - if not run_rndc.returncode: - return True - self.error('Not enough permissions to run "%s stats"' % self.rndc) - return False - - def _get_raw_data(self): - """ - Run 'rndc stats' and read last dump from named.stats - :return: dict - """ - result = dict() - try: - current_size = os.path.getsize(self.named_stats_path) - run_rndc = Popen([self.rndc, 'stats'], shell=False) - run_rndc.wait() - - if run_rndc.returncode: - return None - with open(self.named_stats_path) as named_stats: - named_stats.seek(current_size) - result['stats'] = named_stats.readlines() - result['size'] = current_size - return result - except (OSError, IOError): - return None - - def _get_data(self): - """ - Parse data from _get_raw_data() - :return: dict - """ - - raw_data = self._get_raw_data() - - if raw_data is None: - return None - parsed = dict() - for stat in STATS: - parsed[stat] = parse_stats(field=stat, - named_stats=raw_data['stats']) - - self.data.update(nms_mapper(data=parsed['Name Server Statistics'])) - - for elem in zip(['Incoming Queries', 'Outgoing Queries'], ['incoming_queries', 'outgoing_queries']): - parsed_key, chart_name = elem[0], elem[1] - for dimension_id, value in queries_mapper(data=parsed[parsed_key], - add=chart_name[:9]).items(): - - if dimension_id not in self.data: - dimension = dimension_id.replace(chart_name[:9], '') - if dimension_id not in self.charts[chart_name]: - self.charts[chart_name].add_dimension([dimension_id, dimension, 'incremental']) - - self.data[dimension_id] = value - - self.data['stats_size'] = raw_data['size'] - return self.data - - -def parse_stats(field, named_stats): - """ - :param field: str: - :param named_stats: list: - :return: dict - - Example: - filed: 'Incoming Queries' - names_stats (list of lines): - ++ Incoming Requests ++ - 1405660 QUERY - 3 NOTIFY - ++ Incoming Queries ++ - 1214961 A - 75 NS - 2 CNAME - 2897 SOA - 35544 PTR - 14 MX - 5822 TXT - 145974 AAAA - 371 SRV - ++ Outgoing Queries ++ - ... - - result: - {'A', 1214961, 'NS': 75, 'CNAME': 2, 'SOA': 2897, ...} - """ - data = dict() - ns = iter(named_stats) - for line in ns: - if field not in line: - continue - while True: - try: - line = next(ns) - except StopIteration: - break - if '++' not in line: - if '[' in line: - continue - v, k = line.strip().split(' ', 1) - data[k] = int(v) - continue - break - break - return data - - -def nms_mapper(data): - """ - :param data: dict - :return: dict(defaultdict) - """ - result = defaultdict(int) - for k, v in NMS.items(): - for elem in v: - result[k] += data.get(elem, 0) - return result - - -def queries_mapper(data, add): - """ - :param data: dict - :param add: str - :return: dict - """ - return dict([(add + k, v) for k, v in data.items()]) diff --git a/python.d/ceph.chart.py b/python.d/ceph.chart.py deleted file mode 100644 index fb78397d0..000000000 --- a/python.d/ceph.chart.py +++ /dev/null @@ -1,313 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: ceph netdata python.d module -# Author: Luis Eduardo (lets00) - -try: - import rados - CEPH = True -except ImportError: - CEPH = False - -import json -from bases.FrameworkServices.SimpleService import SimpleService - -# default module values (can be overridden per job in `config`) -update_every = 10 -priority = 60000 -retries = 60 - -ORDER = ['general_usage', 'general_objects', 'general_bytes', 'general_operations', - 'general_latency', 'pool_usage', 'pool_objects', 'pool_read_bytes', - 'pool_write_bytes', 'pool_read_operations', 'pool_write_operations', 'osd_usage', - 'osd_apply_latency', 'osd_commit_latency'] - -CHARTS = { - 'general_usage': { - 'options': [None, 'Ceph General Space', 'KB', 'general', 'ceph.general_usage', 'stacked'], - 'lines': [ - ['general_available', 'avail', 'absolute', 1, 1024], - ['general_usage', 'used', 'absolute', 1, 1024] - ] - }, - 'general_objects': { - 'options': [None, 'Ceph General Objects', 'objects', 'general', 'ceph.general_objects', 'area'], - 'lines': [ - ['general_objects', 'cluster', 'absolute'] - ] - }, - 'general_bytes': { - 'options': [None, 'Ceph General Read/Write Data/s', 'KB', 'general', 'ceph.general_bytes', - 'area'], - 'lines': [ - ['general_read_bytes', 'read', 'absolute', 1, 1024], - ['general_write_bytes', 'write', 'absolute', -1, 1024] - ] - }, - 'general_operations': { - 'options': [None, 'Ceph General Read/Write Operations/s', 'operations', 'general', 'ceph.general_operations', - 'area'], - 'lines': [ - ['general_read_operations', 'read', 'absolute', 1], - ['general_write_operations', 'write', 'absolute', -1] - ] - }, - 'general_latency': { - 'options': [None, 'Ceph General Apply/Commit latency', 'milliseconds', 'general', 'ceph.general_latency', - 'area'], - 'lines': [ - ['general_apply_latency', 'apply', 'absolute'], - ['general_commit_latency', 'commit', 'absolute'] - ] - }, - 'pool_usage': { - 'options': [None, 'Ceph Pools', 'KB', 'pool', 'ceph.pool_usage', 'line'], - 'lines': [] - }, - 'pool_objects': { - 'options': [None, 'Ceph Pools', 'objects', 'pool', 'ceph.pool_objects', 'line'], - 'lines': [] - }, - 'pool_read_bytes': { - 'options': [None, 'Ceph Read Pool Data/s', 'KB', 'pool', 'ceph.pool_read_bytes', 'area'], - 'lines': [] - }, - 'pool_write_bytes': { - 'options': [None, 'Ceph Write Pool Data/s', 'KB', 'pool', 'ceph.pool_write_bytes', 'area'], - 'lines': [] - }, - 'pool_read_operations': { - 'options': [None, 'Ceph Read Pool Operations/s', 'operations', 'pool', 'ceph.pool_read_operations', 'area'], - 'lines': [] - }, - 'pool_write_operations': { - 'options': [None, 'Ceph Write Pool Operations/s', 'operations', 'pool', 'ceph.pool_write_operations', 'area'], - 'lines': [] - }, - 'osd_usage': { - 'options': [None, 'Ceph OSDs', 'KB', 'osd', 'ceph.osd_usage', 'line'], - 'lines': [] - }, - 'osd_apply_latency': { - 'options': [None, 'Ceph OSDs apply latency', 'milliseconds', 'osd', 'ceph.apply_latency', 'line'], - 'lines': [] - }, - 'osd_commit_latency': { - 'options': [None, 'Ceph OSDs commit latency', 'milliseconds', 'osd', 'ceph.commit_latency', 'line'], - 'lines': [] - } - -} - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.config_file = self.configuration.get('config_file') - self.keyring_file = self.configuration.get('keyring_file') - - def check(self): - """ - Checks module - :return: - """ - if not CEPH: - self.error('rados module is needed to use ceph.chart.py') - return False - if not (self.config_file and self.keyring_file): - self.error('config_file and/or keyring_file is not defined') - return False - try: - self.cluster = rados.Rados(conffile=self.config_file, - conf=dict(keyring=self.keyring_file)) - self.cluster.connect() - except rados.Error as error: - self.error(error) - return False - self.create_definitions() - return True - - def create_definitions(self): - """ - Create dynamically charts options - :return: None - """ - # Pool lines - for pool in sorted(self._get_df()['pools']): - self.definitions['pool_usage']['lines'].append([pool['name'], - pool['name'], - 'absolute']) - self.definitions['pool_objects']['lines'].append(["obj_{0}".format(pool['name']), - pool['name'], - 'absolute']) - self.definitions['pool_read_bytes']['lines'].append(['read_{0}'.format(pool['name']), - pool['name'], - 'absolute', 1, 1024]) - self.definitions['pool_write_bytes']['lines'].append(['write_{0}'.format(pool['name']), - pool['name'], - 'absolute', 1, 1024]) - self.definitions['pool_read_operations']['lines'].append(['read_operations_{0}'.format(pool['name']), - pool['name'], - 'absolute']) - self.definitions['pool_write_operations']['lines'].append(['write_operations_{0}'.format(pool['name']), - pool['name'], - 'absolute']) - - # OSD lines - for osd in sorted(self._get_osd_df()['nodes']): - self.definitions['osd_usage']['lines'].append([osd['name'], - osd['name'], - 'absolute']) - self.definitions['osd_apply_latency']['lines'].append(['apply_latency_{0}'.format(osd['name']), - osd['name'], - 'absolute']) - self.definitions['osd_commit_latency']['lines'].append(['commit_latency_{0}'.format(osd['name']), - osd['name'], - 'absolute']) - - def get_data(self): - """ - Catch all ceph data - :return: dict - """ - try: - data = {} - df = self._get_df() - osd_df = self._get_osd_df() - osd_perf = self._get_osd_perf() - pool_stats = self._get_osd_pool_stats() - data.update(self._get_general(osd_perf, pool_stats)) - for pool in df['pools']: - data.update(self._get_pool_usage(pool)) - data.update(self._get_pool_objects(pool)) - for pool_io in pool_stats: - data.update(self._get_pool_rw(pool_io)) - for osd in osd_df['nodes']: - data.update(self._get_osd_usage(osd)) - for osd_apply_commit in osd_perf['osd_perf_infos']: - data.update(self._get_osd_latency(osd_apply_commit)) - return data - except (ValueError, AttributeError) as error: - self.error(error) - return None - - def _get_general(self, osd_perf, pool_stats): - """ - Get ceph's general usage - :return: dict - """ - status = self.cluster.get_cluster_stats() - read_bytes_sec = 0 - write_bytes_sec = 0 - read_op_per_sec = 0 - write_op_per_sec = 0 - apply_latency = 0 - commit_latency = 0 - - for pool_rw_io_b in pool_stats: - read_bytes_sec += pool_rw_io_b['client_io_rate'].get('read_bytes_sec', 0) - write_bytes_sec += pool_rw_io_b['client_io_rate'].get('write_bytes_sec', 0) - read_op_per_sec += pool_rw_io_b['client_io_rate'].get('read_op_per_sec', 0) - write_op_per_sec += pool_rw_io_b['client_io_rate'].get('write_op_per_sec', 0) - for perf in osd_perf['osd_perf_infos']: - apply_latency += perf['perf_stats']['apply_latency_ms'] - commit_latency += perf['perf_stats']['commit_latency_ms'] - - return {'general_usage': int(status['kb_used']), - 'general_available': int(status['kb_avail']), - 'general_objects': int(status['num_objects']), - 'general_read_bytes': read_bytes_sec, - 'general_write_bytes': write_bytes_sec, - 'general_read_operations': read_op_per_sec, - 'general_write_operations': write_op_per_sec, - 'general_apply_latency': apply_latency, - 'general_commit_latency': commit_latency - } - - @staticmethod - def _get_pool_usage(pool): - """ - Process raw data into pool usage dict information - :return: A pool dict with pool name's key and usage bytes' value - """ - return {pool['name']: pool['stats']['kb_used']} - - @staticmethod - def _get_pool_objects(pool): - """ - Process raw data into pool usage dict information - :return: A pool dict with pool name's key and object numbers - """ - return {'obj_{0}'.format(pool['name']): pool['stats']['objects']} - - @staticmethod - def _get_pool_rw(pool): - """ - Get read/write kb and operations in a pool - :return: A pool dict with both read/write bytes and operations. - """ - return {'read_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_bytes_sec', 0)), - 'write_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_bytes_sec', 0)), - 'read_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_op_per_sec', 0)), - 'write_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_op_per_sec', 0)) - } - - @staticmethod - def _get_osd_usage(osd): - """ - Process raw data into osd dict information to get osd usage - :return: A osd dict with osd name's key and usage bytes' value - """ - return {osd['name']: float(osd['kb_used'])} - - @staticmethod - def _get_osd_latency(osd): - """ - Get ceph osd apply and commit latency - :return: A osd dict with osd name's key with both apply and commit latency values - """ - return {'apply_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['apply_latency_ms'], - 'commit_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['commit_latency_ms']} - - def _get_df(self): - """ - Get ceph df output - :return: ceph df --format json - """ - return json.loads(self.cluster.mon_command(json.dumps({ - 'prefix': 'df', - 'format': 'json' - }), '')[1]) - - def _get_osd_df(self): - """ - Get ceph osd df output - :return: ceph osd df --format json - """ - return json.loads(self.cluster.mon_command(json.dumps({ - 'prefix': 'osd df', - 'format': 'json' - }), '')[1]) - - def _get_osd_perf(self): - """ - Get ceph osd performance - :return: ceph osd perf --format json - """ - return json.loads(self.cluster.mon_command(json.dumps({ - 'prefix': 'osd perf', - 'format': 'json' - }), '')[1]) - - def _get_osd_pool_stats(self): - """ - Get ceph osd pool status. - This command is used to get information about both - read/write operation and bytes per second on each pool - :return: ceph osd pool stats --format json - """ - return json.loads(self.cluster.mon_command(json.dumps({ - 'prefix': 'osd pool stats', - 'format': 'json' - }), '')[1]) diff --git a/python.d/chrony.chart.py b/python.d/chrony.chart.py deleted file mode 100644 index 8f331fa50..000000000 --- a/python.d/chrony.chart.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: chrony netdata python.d module -# Author: Dominik Schloesser (domschl) - -from bases.FrameworkServices.ExecutableService import ExecutableService - -# default module values (can be overridden per job in `config`) -update_every = 5 -priority = 60000 -retries = 10 - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['system', 'offsets', 'stratum', 'root', 'frequency', 'residualfreq', 'skew'] - -CHARTS = { - # id: { - # 'options': [name, title, units, family, context, charttype], - # 'lines': [ - # [unique_dimension_name, name, algorithm, multiplier, divisor] - # ]} - 'system': { - 'options': [None, "Chrony System Time Deltas", "microseconds", 'system', 'chrony.system', 'area'], - 'lines': [ - ['timediff', 'system time', 'absolute', 1, 1000] - ]}, - 'offsets': { - 'options': [None, "Chrony System Time Offsets", "microseconds", 'system', 'chrony.offsets', 'area'], - 'lines': [ - ['lastoffset', 'last offset', 'absolute', 1, 1000], - ['rmsoffset', 'RMS offset', 'absolute', 1, 1000] - ]}, - 'stratum': { - 'options': [None, "Chrony Stratum", "stratum", 'root', 'chrony.stratum', 'line'], - 'lines': [ - ['stratum', None, 'absolute', 1, 1] - ]}, - 'root': { - 'options': [None, "Chrony Root Delays", "milliseconds", 'root', 'chrony.root', 'line'], - 'lines': [ - ['rootdelay', 'delay', 'absolute', 1, 1000000], - ['rootdispersion', 'dispersion', 'absolute', 1, 1000000] - ]}, - 'frequency': { - 'options': [None, "Chrony Frequency", "ppm", 'frequencies', 'chrony.frequency', 'area'], - 'lines': [ - ['frequency', None, 'absolute', 1, 1000] - ]}, - 'residualfreq': { - 'options': [None, "Chrony Residual frequency", "ppm", 'frequencies', 'chrony.residualfreq', 'area'], - 'lines': [ - ['residualfreq', 'residual frequency', 'absolute', 1, 1000] - ]}, - 'skew': { - 'options': [None, "Chrony Skew, error bound on frequency", "ppm", 'frequencies', 'chrony.skew', 'area'], - 'lines': [ - ['skew', None, 'absolute', 1, 1000] - ]} -} - -CHRONY = [('Frequency', 'frequency', 1e3), - ('Last offset', 'lastoffset', 1e9), - ('RMS offset', 'rmsoffset', 1e9), - ('Residual freq', 'residualfreq', 1e3), - ('Root delay', 'rootdelay', 1e9), - ('Root dispersion', 'rootdispersion', 1e9), - ('Skew', 'skew', 1e3), - ('Stratum', 'stratum', 1), - ('System time', 'timediff', 1e9)] - - -class Service(ExecutableService): - def __init__(self, configuration=None, name=None): - ExecutableService.__init__( - self, configuration=configuration, name=name) - self.command = "chronyc -n tracking" - self.order = ORDER - self.definitions = CHARTS - - def _get_data(self): - """ - Format data received from shell command - :return: dict - """ - raw_data = self._get_raw_data() - if not raw_data: - return None - - raw_data = (line.split(':', 1) for line in raw_data) - parsed, data = dict(), dict() - - for line in raw_data: - try: - key, value = (l.strip() for l in line) - except ValueError: - continue - if value: - parsed[key] = value.split()[0] - - for key, dim_id, multiplier in CHRONY: - try: - data[dim_id] = int(float(parsed[key]) * multiplier) - except (KeyError, ValueError): - continue - - return data or None diff --git a/python.d/couchdb.chart.py b/python.d/couchdb.chart.py deleted file mode 100644 index 558bac587..000000000 --- a/python.d/couchdb.chart.py +++ /dev/null @@ -1,410 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: couchdb netdata python.d module -# Author: wohali <wohali@apache.org> -# Thanks to l2isbad for good examples :) - -from collections import namedtuple, defaultdict -from json import loads -from threading import Thread -from socket import gethostbyname, gaierror -try: - from queue import Queue -except ImportError: - from Queue import Queue - -from bases.FrameworkServices.UrlService import UrlService - -# default module values (can be overridden per job in `config`) -update_every = 1 -priority = 60000 -retries = 60 - -METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats']) - -OVERVIEW_STATS = [ - 'couchdb.database_reads.value', - 'couchdb.database_writes.value', - 'couchdb.httpd.view_reads.value' - 'couchdb.httpd_request_methods.COPY.value', - 'couchdb.httpd_request_methods.DELETE.value', - 'couchdb.httpd_request_methods.GET.value', - 'couchdb.httpd_request_methods.HEAD.value', - 'couchdb.httpd_request_methods.OPTIONS.value', - 'couchdb.httpd_request_methods.POST.value', - 'couchdb.httpd_request_methods.PUT.value', - 'couchdb.httpd_status_codes.200.value', - 'couchdb.httpd_status_codes.201.value', - 'couchdb.httpd_status_codes.202.value', - 'couchdb.httpd_status_codes.204.value', - 'couchdb.httpd_status_codes.206.value', - 'couchdb.httpd_status_codes.301.value', - 'couchdb.httpd_status_codes.302.value', - 'couchdb.httpd_status_codes.304.value', - 'couchdb.httpd_status_codes.400.value', - 'couchdb.httpd_status_codes.401.value', - 'couchdb.httpd_status_codes.403.value', - 'couchdb.httpd_status_codes.404.value', - 'couchdb.httpd_status_codes.405.value', - 'couchdb.httpd_status_codes.406.value', - 'couchdb.httpd_status_codes.409.value', - 'couchdb.httpd_status_codes.412.value', - 'couchdb.httpd_status_codes.413.value', - 'couchdb.httpd_status_codes.414.value', - 'couchdb.httpd_status_codes.415.value', - 'couchdb.httpd_status_codes.416.value', - 'couchdb.httpd_status_codes.417.value', - 'couchdb.httpd_status_codes.500.value', - 'couchdb.httpd_status_codes.501.value', - 'couchdb.open_os_files.value', - 'couch_replicator.jobs.running.value', - 'couch_replicator.jobs.pending.value', - 'couch_replicator.jobs.crashed.value', -] - -SYSTEM_STATS = [ - 'context_switches', - 'run_queue', - 'ets_table_count', - 'reductions', - 'memory.atom', - 'memory.atom_used', - 'memory.binary', - 'memory.code', - 'memory.ets', - 'memory.other', - 'memory.processes', - 'io_input', - 'io_output', - 'os_proc_count', - 'process_count', - 'internal_replication_jobs' -] - -DB_STATS = [ - 'doc_count', - 'doc_del_count', - 'sizes.file', - 'sizes.external', - 'sizes.active' -] - -ORDER = [ - 'activity', - 'request_methods', - 'response_codes', - 'active_tasks', - 'replicator_jobs', - 'open_files', - 'db_sizes_file', - 'db_sizes_external', - 'db_sizes_active', - 'db_doc_counts', - 'db_doc_del_counts', - 'erlang_memory', - 'erlang_proc_counts', - 'erlang_peak_msg_queue', - 'erlang_reductions' -] - -CHARTS = { - 'activity': { - 'options': [None, 'Overall Activity', 'req/s', - 'dbactivity', 'couchdb.activity', 'stacked'], - 'lines': [ - ['couchdb_database_reads', 'DB reads', 'incremental'], - ['couchdb_database_writes', 'DB writes', 'incremental'], - ['couchdb_httpd_view_reads', 'View reads', 'incremental'] - ] - }, - 'request_methods': { - 'options': [None, 'HTTP request methods', 'req/s', - 'httptraffic', 'couchdb.request_methods', - 'stacked'], - 'lines': [ - ['couchdb_httpd_request_methods_COPY', 'COPY', 'incremental'], - ['couchdb_httpd_request_methods_DELETE', 'DELETE', 'incremental'], - ['couchdb_httpd_request_methods_GET', 'GET', 'incremental'], - ['couchdb_httpd_request_methods_HEAD', 'HEAD', 'incremental'], - ['couchdb_httpd_request_methods_OPTIONS', 'OPTIONS', - 'incremental'], - ['couchdb_httpd_request_methods_POST', 'POST', 'incremental'], - ['couchdb_httpd_request_methods_PUT', 'PUT', 'incremental'] - ] - }, - 'response_codes': { - 'options': [None, 'HTTP response status codes', 'resp/s', - 'httptraffic', 'couchdb.response_codes', - 'stacked'], - 'lines': [ - ['couchdb_httpd_status_codes_200', '200 OK', 'incremental'], - ['couchdb_httpd_status_codes_201', '201 Created', 'incremental'], - ['couchdb_httpd_status_codes_202', '202 Accepted', 'incremental'], - ['couchdb_httpd_status_codes_2xx', 'Other 2xx Success', - 'incremental'], - ['couchdb_httpd_status_codes_3xx', '3xx Redirection', - 'incremental'], - ['couchdb_httpd_status_codes_4xx', '4xx Client error', - 'incremental'], - ['couchdb_httpd_status_codes_5xx', '5xx Server error', - 'incremental'] - ] - }, - 'open_files': { - 'options': [None, 'Open files', 'files', - 'ops', 'couchdb.open_files', 'line'], - 'lines': [ - ['couchdb_open_os_files', '# files', 'absolute'] - ] - }, - 'active_tasks': { - 'options': [None, 'Active task breakdown', 'tasks', - 'ops', 'couchdb.active_tasks', 'stacked'], - 'lines': [ - ['activetasks_indexer', 'Indexer', 'absolute'], - ['activetasks_database_compaction', 'DB Compaction', 'absolute'], - ['activetasks_replication', 'Replication', 'absolute'], - ['activetasks_view_compaction', 'View Compaction', 'absolute'] - ] - }, - 'replicator_jobs': { - 'options': [None, 'Replicator job breakdown', 'jobs', - 'ops', 'couchdb.replicator_jobs', 'stacked'], - 'lines': [ - ['couch_replicator_jobs_running', 'Running', 'absolute'], - ['couch_replicator_jobs_pending', 'Pending', 'absolute'], - ['couch_replicator_jobs_crashed', 'Crashed', 'absolute'], - ['internal_replication_jobs', 'Internal replication jobs', - 'absolute'] - ] - }, - 'erlang_memory': { - 'options': [None, 'Erlang VM memory usage', 'bytes', - 'erlang', 'couchdb.erlang_vm_memory', 'stacked'], - 'lines': [ - ['memory_atom', 'atom', 'absolute'], - ['memory_binary', 'binaries', 'absolute'], - ['memory_code', 'code', 'absolute'], - ['memory_ets', 'ets', 'absolute'], - ['memory_processes', 'procs', 'absolute'], - ['memory_other', 'other', 'absolute'] - ] - }, - 'erlang_reductions': { - 'options': [None, 'Erlang reductions', 'count', - 'erlang', 'couchdb.reductions', 'line'], - 'lines': [ - ['reductions', 'reductions', 'incremental'] - ] - }, - 'erlang_proc_counts': { - 'options': [None, 'Process counts', 'count', - 'erlang', 'couchdb.proccounts', 'line'], - 'lines': [ - ['os_proc_count', 'OS procs', 'absolute'], - ['process_count', 'erl procs', 'absolute'] - ] - }, - 'erlang_peak_msg_queue': { - 'options': [None, 'Peak message queue size', 'count', - 'erlang', 'couchdb.peakmsgqueue', - 'line'], - 'lines': [ - ['peak_msg_queue', 'peak size', 'absolute'] - ] - }, - # Lines for the following are added as part of check() - 'db_sizes_file': { - 'options': [None, 'Database sizes (file)', 'KB', - 'perdbstats', 'couchdb.db_sizes_file', 'line'], - 'lines': [] - }, - 'db_sizes_external': { - 'options': [None, 'Database sizes (external)', 'KB', - 'perdbstats', 'couchdb.db_sizes_external', 'line'], - 'lines': [] - }, - 'db_sizes_active': { - 'options': [None, 'Database sizes (active)', 'KB', - 'perdbstats', 'couchdb.db_sizes_active', 'line'], - 'lines': [] - }, - 'db_doc_counts': { - 'options': [None, 'Database # of docs', 'docs', - 'perdbstats', 'couchdb_db_doc_count', 'line'], - 'lines': [] - }, - 'db_doc_del_counts': { - 'options': [None, 'Database # of deleted docs', 'docs', - 'perdbstats', 'couchdb_db_doc_del_count', 'line'], - 'lines': [] - } -} - - -class Service(UrlService): - def __init__(self, configuration=None, name=None): - UrlService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.host = self.configuration.get('host', '127.0.0.1') - self.port = self.configuration.get('port', 5984) - self.node = self.configuration.get('node', 'couchdb@127.0.0.1') - self.scheme = self.configuration.get('scheme', 'http') - self.user = self.configuration.get('user') - self.password = self.configuration.get('pass') - try: - self.dbs = self.configuration.get('databases').split(' ') - except (KeyError, AttributeError): - self.dbs = [] - - def check(self): - if not (self.host and self.port): - self.error('Host is not defined in the module configuration file') - return False - try: - self.host = gethostbyname(self.host) - except gaierror as error: - self.error(str(error)) - return False - self.url = '{scheme}://{host}:{port}'.format(scheme=self.scheme, - host=self.host, - port=self.port) - stats = self.url + '/_node/{node}/_stats'.format(node=self.node) - active_tasks = self.url + '/_active_tasks' - system = self.url + '/_node/{node}/_system'.format(node=self.node) - self.methods = [METHODS(get_data=self._get_overview_stats, - url=stats, - stats=OVERVIEW_STATS), - METHODS(get_data=self._get_active_tasks_stats, - url=active_tasks, - stats=None), - METHODS(get_data=self._get_overview_stats, - url=system, - stats=SYSTEM_STATS), - METHODS(get_data=self._get_dbs_stats, - url=self.url, - stats=DB_STATS)] - # must initialise manager before using _get_raw_data - self._manager = self._build_manager() - self.dbs = [db for db in self.dbs - if self._get_raw_data(self.url + '/' + db)] - for db in self.dbs: - self.definitions['db_sizes_file']['lines'].append( - ['db_'+db+'_sizes_file', db, 'absolute', 1, 1000] - ) - self.definitions['db_sizes_external']['lines'].append( - ['db_'+db+'_sizes_external', db, 'absolute', 1, 1000] - ) - self.definitions['db_sizes_active']['lines'].append( - ['db_'+db+'_sizes_active', db, 'absolute', 1, 1000] - ) - self.definitions['db_doc_counts']['lines'].append( - ['db_'+db+'_doc_count', db, 'absolute'] - ) - self.definitions['db_doc_del_counts']['lines'].append( - ['db_'+db+'_doc_del_count', db, 'absolute'] - ) - return UrlService.check(self) - - def _get_data(self): - threads = list() - queue = Queue() - result = dict() - - for method in self.methods: - th = Thread(target=method.get_data, - args=(queue, method.url, method.stats)) - th.start() - threads.append(th) - - for thread in threads: - thread.join() - result.update(queue.get()) - - # self.info('couchdb result = ' + str(result)) - return result or None - - def _get_overview_stats(self, queue, url, stats): - raw_data = self._get_raw_data(url) - if not raw_data: - return queue.put(dict()) - data = loads(raw_data) - to_netdata = self._fetch_data(raw_data=data, metrics=stats) - if 'message_queues' in data: - to_netdata['peak_msg_queue'] = get_peak_msg_queue(data) - return queue.put(to_netdata) - - def _get_active_tasks_stats(self, queue, url, _): - taskdict = defaultdict(int) - taskdict["activetasks_indexer"] = 0 - taskdict["activetasks_database_compaction"] = 0 - taskdict["activetasks_replication"] = 0 - taskdict["activetasks_view_compaction"] = 0 - raw_data = self._get_raw_data(url) - if not raw_data: - return queue.put(dict()) - data = loads(raw_data) - for task in data: - taskdict["activetasks_" + task["type"]] += 1 - return queue.put(dict(taskdict)) - - def _get_dbs_stats(self, queue, url, stats): - to_netdata = {} - for db in self.dbs: - raw_data = self._get_raw_data(url + '/' + db) - if not raw_data: - continue - data = loads(raw_data) - for metric in stats: - value = data - metrics_list = metric.split('.') - try: - for m in metrics_list: - value = value[m] - except KeyError as e: - self.debug('cannot process ' + metric + ' for ' + db - + ": " + str(e)) - continue - metric_name = 'db_{0}_{1}'.format(db, '_'.join(metrics_list)) - to_netdata[metric_name] = value - return queue.put(to_netdata) - - def _fetch_data(self, raw_data, metrics): - data = dict() - for metric in metrics: - value = raw_data - metrics_list = metric.split('.') - try: - for m in metrics_list: - value = value[m] - except KeyError as e: - self.debug('cannot process ' + metric + ': ' + str(e)) - continue - # strip off .value from end of stat - if metrics_list[-1] == 'value': - metrics_list = metrics_list[:-1] - # sum up 3xx/4xx/5xx - if metrics_list[0:2] == ['couchdb', 'httpd_status_codes'] and \ - int(metrics_list[2]) > 202: - metrics_list[2] = '{0}xx'.format(int(metrics_list[2]) // 100) - if '_'.join(metrics_list) in data: - data['_'.join(metrics_list)] += value - else: - data['_'.join(metrics_list)] = value - else: - data['_'.join(metrics_list)] = value - return data - - -def get_peak_msg_queue(data): - maxsize = 0 - queues = data['message_queues'] - for queue in iter(queues.values()): - if isinstance(queue, dict) and 'count' in queue: - value = queue['count'] - elif isinstance(queue, int): - value = queue - else: - continue - maxsize = max(maxsize, value) - return maxsize diff --git a/python.d/cpufreq.chart.py b/python.d/cpufreq.chart.py deleted file mode 100644 index 3abde736c..000000000 --- a/python.d/cpufreq.chart.py +++ /dev/null @@ -1,113 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: cpufreq netdata python.d module -# Author: Pawel Krupa (paulfantom) and Steven Noonan (tycho) - -import glob -import os - -from bases.FrameworkServices.SimpleService import SimpleService - -# default module values (can be overridden per job in `config`) -# update_every = 2 - -ORDER = ['cpufreq'] - -CHARTS = { - 'cpufreq': { - 'options': [None, 'CPU Clock', 'MHz', 'cpufreq', 'cpufreq.cpufreq', 'line'], - 'lines': [ - # lines are created dynamically in `check()` method - ]} -} - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - prefix = os.getenv('NETDATA_HOST_PREFIX', "") - if prefix.endswith('/'): - prefix = prefix[:-1] - self.sys_dir = prefix + "/sys/devices" - SimpleService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.fake_name = 'cpu' - self.assignment = {} - self.accurate_exists = True - self.accurate_last = {} - - def _get_data(self): - data = {} - - if self.accurate_exists: - accurate_ok = True - - for name, paths in self.assignment.items(): - last = self.accurate_last[name] - - current = {} - deltas = {} - ticks_since_last = 0 - - for line in open(paths['accurate'], 'r'): - line = list(map(int, line.split())) - current[line[0]] = line[1] - ticks = line[1] - last.get(line[0], 0) - ticks_since_last += ticks - deltas[line[0]] = line[1] - last.get(line[0], 0) - - avg_freq = 0 - if ticks_since_last != 0: - for frequency, ticks in deltas.items(): - avg_freq += frequency * ticks - avg_freq /= ticks_since_last - - data[name] = avg_freq - self.accurate_last[name] = current - if avg_freq == 0 or ticks_since_last == 0: - # Delta is either too large or nonexistent, fall back to - # less accurate reading. This can happen if we switch - # to/from the 'schedutil' governor, which doesn't report - # stats. - accurate_ok = False - - if accurate_ok: - return data - - for name, paths in self.assignment.items(): - data[name] = open(paths['inaccurate'], 'r').read() - - return data - - def check(self): - try: - self.sys_dir = str(self.configuration['sys_dir']) - except (KeyError, TypeError): - self.error("No path specified. Using: '" + self.sys_dir + "'") - - for path in glob.glob(self.sys_dir + '/system/cpu/cpu*/cpufreq/stats/time_in_state'): - path_elem = path.split('/') - cpu = path_elem[-4] - if cpu not in self.assignment: - self.assignment[cpu] = {} - self.assignment[cpu]['accurate'] = path - self.accurate_last[cpu] = {} - - if len(self.assignment) == 0: - self.accurate_exists = False - - for path in glob.glob(self.sys_dir + '/system/cpu/cpu*/cpufreq/scaling_cur_freq'): - path_elem = path.split('/') - cpu = path_elem[-3] - if cpu not in self.assignment: - self.assignment[cpu] = {} - self.assignment[cpu]['inaccurate'] = path - - if len(self.assignment) == 0: - self.error("couldn't find a method to read cpufreq statistics") - return False - - for name in sorted(self.assignment, key=lambda v: int(v[3:])): - self.definitions[ORDER[0]]['lines'].append([name, name, 'absolute', 1, 1000]) - - return True - diff --git a/python.d/cpuidle.chart.py b/python.d/cpuidle.chart.py deleted file mode 100644 index d14c6aaf3..000000000 --- a/python.d/cpuidle.chart.py +++ /dev/null @@ -1,148 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: cpuidle netdata python.d module -# Author: Steven Noonan (tycho) - -import glob -import os -import platform - -from bases.FrameworkServices.SimpleService import SimpleService - -import ctypes -syscall = ctypes.CDLL('libc.so.6').syscall - -# default module values (can be overridden per job in `config`) -# update_every = 2 - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - prefix = os.getenv('NETDATA_HOST_PREFIX', "") - if prefix.endswith('/'): - prefix = prefix[:-1] - self.sys_dir = prefix + "/sys/devices/system/cpu" - self.schedstat_path = prefix + "/proc/schedstat" - SimpleService.__init__(self, configuration=configuration, name=name) - self.order = [] - self.definitions = {} - self.fake_name = 'cpu' - self.assignment = {} - self.last_schedstat = None - - @staticmethod - def __gettid(): - # This is horrendous. We need the *thread id* (not the *process id*), - # but there's no Python standard library way of doing that. If you need - # to enable this module on a non-x86 machine type, you'll have to find - # the Linux syscall number for gettid() and add it to the dictionary - # below. - syscalls = { - 'i386': 224, - 'x86_64': 186, - } - if platform.machine() not in syscalls: - return None - tid = syscall(syscalls[platform.machine()]) - return tid - - def __wake_cpus(self, cpus): - # Requires Python 3.3+. This will "tickle" each CPU to force it to - # update its idle counters. - if hasattr(os, 'sched_setaffinity'): - pid = self.__gettid() - save_affinity = os.sched_getaffinity(pid) - for idx in cpus: - os.sched_setaffinity(pid, [idx]) - os.sched_getaffinity(pid) - os.sched_setaffinity(pid, save_affinity) - - def __read_schedstat(self): - cpus = {} - for line in open(self.schedstat_path, 'r'): - if not line.startswith('cpu'): - continue - line = line.rstrip().split() - cpu = line[0] - active_time = line[7] - cpus[cpu] = int(active_time) // 1000 - return cpus - - def _get_data(self): - results = {} - - # Use the kernel scheduler stats to determine how much time was spent - # in C0 (active). - schedstat = self.__read_schedstat() - - # Determine if any of the CPUs are idle. If they are, then we need to - # tickle them in order to update their C-state residency statistics. - if self.last_schedstat is None: - needs_tickle = list(self.assignment.keys()) - else: - needs_tickle = [] - for cpu, active_time in self.last_schedstat.items(): - delta = schedstat[cpu] - active_time - if delta < 1: - needs_tickle.append(cpu) - - if needs_tickle: - # This line is critical for the stats to update. If we don't "tickle" - # idle CPUs, then the counters for those CPUs stop counting. - self.__wake_cpus([int(cpu[3:]) for cpu in needs_tickle]) - - # Re-read schedstat now that we've tickled any idlers. - schedstat = self.__read_schedstat() - - self.last_schedstat = schedstat - - for cpu, metrics in self.assignment.items(): - update_time = schedstat[cpu] - results[cpu + '_active_time'] = update_time - - for metric, path in metrics.items(): - residency = int(open(path, 'r').read()) - results[metric] = residency - - return results - - def check(self): - if self.__gettid() is None: - self.error("Cannot get thread ID. Stats would be completely broken.") - return False - - for path in sorted(glob.glob(self.sys_dir + '/cpu*/cpuidle/state*/name')): - # ['', 'sys', 'devices', 'system', 'cpu', 'cpu0', 'cpuidle', 'state3', 'name'] - path_elem = path.split('/') - cpu = path_elem[-4] - state = path_elem[-2] - statename = open(path, 'rt').read().rstrip() - - orderid = '%s_cpuidle' % (cpu,) - if orderid not in self.definitions: - self.order.append(orderid) - active_name = '%s_active_time' % (cpu,) - self.definitions[orderid] = { - 'options': [None, 'C-state residency', 'time%', 'cpuidle', 'cpuidle.cpuidle', 'stacked'], - 'lines': [ - [active_name, 'C0 (active)', 'percentage-of-incremental-row', 1, 1], - ], - } - self.assignment[cpu] = {} - - defid = '%s_%s_time' % (orderid, state) - - self.definitions[orderid]['lines'].append( - [defid, statename, 'percentage-of-incremental-row', 1, 1] - ) - - self.assignment[cpu][defid] = '/'.join(path_elem[:-1] + ['time']) - - # Sort order by kernel-specified CPU index - self.order.sort(key=lambda x: int(x.split('_')[0][3:])) - - if len(self.definitions) == 0: - self.error("couldn't find cstate stats") - return False - - return True - diff --git a/python.d/dns_query_time.chart.py b/python.d/dns_query_time.chart.py deleted file mode 100644 index 9a794a9c9..000000000 --- a/python.d/dns_query_time.chart.py +++ /dev/null @@ -1,134 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: dns_query_time netdata python.d module -# Author: l2isbad - -from random import choice -from threading import Thread -from socket import getaddrinfo, gaierror - -try: - from time import monotonic as time -except ImportError: - from time import time -try: - import dns.message, dns.query, dns.name - DNS_PYTHON = True -except ImportError: - DNS_PYTHON = False -try: - from queue import Queue -except ImportError: - from Queue import Queue - -from bases.FrameworkServices.SimpleService import SimpleService - - -# default module values (can be overridden per job in `config`) -update_every = 5 -priority = 60000 -retries = 60 - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.order = list() - self.definitions = dict() - self.timeout = self.configuration.get('response_timeout', 4) - self.aggregate = self.configuration.get('aggregate', True) - self.domains = self.configuration.get('domains') - self.server_list = self.configuration.get('dns_servers') - - def check(self): - if not DNS_PYTHON: - self.error('\'python-dnspython\' package is needed to use dns_query_time.chart.py') - return False - - self.timeout = self.timeout if isinstance(self.timeout, int) else 4 - - if not all([self.domains, self.server_list, - isinstance(self.server_list, str), isinstance(self.domains, str)]): - self.error('server_list and domain_list can\'t be empty') - return False - else: - self.domains, self.server_list = self.domains.split(), self.server_list.split() - - for ns in self.server_list: - if not check_ns(ns): - self.info('Bad NS: %s' % ns) - self.server_list.remove(ns) - if not self.server_list: - return False - - data = self._get_data(timeout=1) - - down_servers = [s for s in data if data[s] == -100] - for down in down_servers: - down = down[3:].replace('_', '.') - self.info('Removed due to non response %s' % down) - self.server_list.remove(down) - if not self.server_list: - return False - - self.order, self.definitions = create_charts(aggregate=self.aggregate, server_list=self.server_list) - return True - - def _get_data(self, timeout=None): - return dns_request(self.server_list, timeout or self.timeout, self.domains) - - -def dns_request(server_list, timeout, domains): - threads = list() - que = Queue() - result = dict() - - def dns_req(ns, t, q): - domain = dns.name.from_text(choice(domains)) - request = dns.message.make_query(domain, dns.rdatatype.A) - - try: - dns_start = time() - dns.query.udp(request, ns, timeout=t) - dns_end = time() - query_time = round((dns_end - dns_start) * 1000) - q.put({'_'.join(['ns', ns.replace('.', '_')]): query_time}) - except dns.exception.Timeout: - q.put({'_'.join(['ns', ns.replace('.', '_')]): -100}) - - for server in server_list: - th = Thread(target=dns_req, args=(server, timeout, que)) - th.start() - threads.append(th) - - for th in threads: - th.join() - result.update(que.get()) - - return result - - -def check_ns(ns): - try: - return getaddrinfo(ns, 'domain')[0][4][0] - except gaierror: - return False - - -def create_charts(aggregate, server_list): - if aggregate: - order = ['dns_group'] - definitions = {'dns_group': {'options': [None, 'DNS Response Time', 'ms', 'name servers', - 'dns_query_time.response_time', 'line'], 'lines': []}} - for ns in server_list: - definitions['dns_group']['lines'].append(['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute']) - - return order, definitions - else: - order = [''.join(['dns_', ns.replace('.', '_')]) for ns in server_list] - definitions = dict() - for ns in server_list: - definitions[''.join(['dns_', ns.replace('.', '_')])] = {'options': [None, 'DNS Response Time', 'ms', ns, - 'dns_query_time.response_time', 'area'], - 'lines': [['_'.join(['ns', ns.replace('.', '_')]), - ns, 'absolute']]} - return order, definitions diff --git a/python.d/dnsdist.chart.py b/python.d/dnsdist.chart.py deleted file mode 100644 index b40112cbc..000000000 --- a/python.d/dnsdist.chart.py +++ /dev/null @@ -1,101 +0,0 @@ -# -*- coding: utf-8 -*- -from json import loads -from bases.FrameworkServices.UrlService import UrlService - -ORDER = ['queries', 'queries_dropped', 'packets_dropped', 'answers', 'backend_responses', 'backend_commerrors', 'backend_errors', 'cache', 'servercpu', 'servermem', 'query_latency', 'query_latency_avg'] -CHARTS = { - 'queries': { - 'options': [None, 'Client queries received', 'queries/s', 'queries', 'dnsdist.queries', 'line'], - 'lines': [ - ['queries', 'all', 'incremental'], - ['rdqueries', 'recursive', 'incremental'], - ['empty-queries', 'empty', 'incremental'] - ]}, - 'queries_dropped': { - 'options': [None, 'Client queries dropped', 'queries/s', 'queries', 'dnsdist.queries_dropped', 'line'], - 'lines': [ - ['rule-drop', 'rule drop', 'incremental'], - ['dyn-blocked', 'dynamic block', 'incremental'], - ['no-policy', 'no policy', 'incremental'], - ['noncompliant-queries', 'non compliant', 'incremental'] - ]}, - 'packets_dropped': { - 'options': [None, 'Packets dropped', 'packets/s', 'packets', 'dnsdist.packets_dropped', 'line'], - 'lines': [ - ['acl-drops', 'acl', 'incremental'] - ]}, - 'answers': { - 'options': [None, 'Answers statistics', 'answers/s', 'answers', 'dnsdist.answers', 'line'], - 'lines': [ - ['self-answered', 'self answered', 'incremental'], - ['rule-nxdomain', 'nxdomain', 'incremental', -1], - ['rule-refused', 'refused', 'incremental', -1], - ['trunc-failures', 'trunc failures', 'incremental', -1] - ]}, - 'backend_responses': { - 'options': [None, 'Backend responses', 'responses/s', 'backends', 'dnsdist.backend_responses', 'line'], - 'lines': [ - ['responses', 'responses', 'incremental'] - ]}, - 'backend_commerrors': { - 'options': [None, 'Backend Communication Errors', 'errors/s', 'backends', 'dnsdist.backend_commerrors', 'line'], - 'lines': [ - ['downstream-send-errors', 'send errors', 'incremental'] - ]}, - 'backend_errors': { - 'options': [None, 'Backend error responses', 'responses/s', 'backends', 'dnsdist.backend_errors', 'line'], - 'lines': [ - ['downstream-timeouts', 'timeout', 'incremental'], - ['servfail-responses', 'servfail', 'incremental'], - ['noncompliant-responses', 'non compliant', 'incremental'] - ]}, - 'cache': { - 'options': [None, 'Cache performance', 'answers/s', 'cache', 'dnsdist.cache', 'area'], - 'lines': [ - ['cache-hits', 'hits', 'incremental'], - ['cache-misses', 'misses', 'incremental', -1] - ]}, - 'servercpu': { - 'options': [None, 'DNSDIST server CPU utilization', 'ms/s', 'server', 'dnsdist.servercpu', 'stacked'], - 'lines': [ - ['cpu-sys-msec', 'system state', 'incremental'], - ['cpu-user-msec', 'user state', 'incremental'] - ]}, - 'servermem': { - 'options': [None, 'DNSDIST server memory utilization', 'MB', 'server', 'dnsdist.servermem', 'area'], - 'lines': [ - ['real-memory-usage', 'memory usage', 'absolute', 1, 1048576] - ]}, - 'query_latency': { - 'options': [None, 'Query latency', 'queries/s', 'latency', 'dnsdist.query_latency', 'stacked'], - 'lines': [ - ['latency0-1', '1ms', 'incremental'], - ['latency1-10', '10ms', 'incremental'], - ['latency10-50', '50ms', 'incremental'], - ['latency50-100', '100ms', 'incremental'], - ['latency100-1000', '1sec', 'incremental'], - ['latency-slow', 'slow', 'incremental'] - ]}, - 'query_latency_avg': { - 'options': [None, 'Average latency for the last N queries', 'ms/query', 'latency', 'dnsdist.query_latency_avg', 'line'], - 'lines': [ - ['latency-avg100', '100', 'absolute'], - ['latency-avg1000', '1k', 'absolute'], - ['latency-avg10000', '10k', 'absolute'], - ['latency-avg1000000', '1000k', 'absolute'] - ]} -} - -class Service(UrlService): - def __init__(self, configuration=None, name=None): - UrlService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - - def _get_data(self): - data = self._get_raw_data() - if not data: - return None - - return loads(data) - diff --git a/python.d/dovecot.chart.py b/python.d/dovecot.chart.py deleted file mode 100644 index 5689f2ec9..000000000 --- a/python.d/dovecot.chart.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: dovecot netdata python.d module -# Author: Pawel Krupa (paulfantom) - -from bases.FrameworkServices.SocketService import SocketService - -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['sessions', 'logins', 'commands', - 'faults', - 'context_switches', - 'io', 'net', 'syscalls', - 'lookup', 'cache', - 'auth', 'auth_cache'] - -CHARTS = { - 'sessions': { - 'options': [None, "Dovecot Active Sessions", 'number', 'sessions', 'dovecot.sessions', 'line'], - 'lines': [ - ['num_connected_sessions', 'active sessions', 'absolute'] - ]}, - 'logins': { - 'options': [None, "Dovecot Logins", 'number', 'logins', 'dovecot.logins', 'line'], - 'lines': [ - ['num_logins', 'logins', 'absolute'] - ]}, - 'commands': { - 'options': [None, "Dovecot Commands", "commands", 'commands', 'dovecot.commands', 'line'], - 'lines': [ - ['num_cmds', 'commands', 'absolute'] - ]}, - 'faults': { - 'options': [None, "Dovecot Page Faults", "faults", 'page faults', 'dovecot.faults', 'line'], - 'lines': [ - ['min_faults', 'minor', 'absolute'], - ['maj_faults', 'major', 'absolute'] - ]}, - 'context_switches': { - 'options': [None, "Dovecot Context Switches", '', 'context switches', 'dovecot.context_switches', 'line'], - 'lines': [ - ['vol_cs', 'voluntary', 'absolute'], - ['invol_cs', 'involuntary', 'absolute'] - ]}, - 'io': { - 'options': [None, "Dovecot Disk I/O", 'kilobytes/s', 'disk', 'dovecot.io', 'area'], - 'lines': [ - ['disk_input', 'read', 'incremental', 1, 1024], - ['disk_output', 'write', 'incremental', -1, 1024] - ]}, - 'net': { - 'options': [None, "Dovecot Network Bandwidth", 'kilobits/s', 'network', 'dovecot.net', 'area'], - 'lines': [ - ['read_bytes', 'read', 'incremental', 8, 1024], - ['write_bytes', 'write', 'incremental', -8, 1024] - ]}, - 'syscalls': { - 'options': [None, "Dovecot Number of SysCalls", 'syscalls/s', 'system', 'dovecot.syscalls', 'line'], - 'lines': [ - ['read_count', 'read', 'incremental'], - ['write_count', 'write', 'incremental'] - ]}, - 'lookup': { - 'options': [None, "Dovecot Lookups", 'number/s', 'lookups', 'dovecot.lookup', 'stacked'], - 'lines': [ - ['mail_lookup_path', 'path', 'incremental'], - ['mail_lookup_attr', 'attr', 'incremental'] - ]}, - 'cache': { - 'options': [None, "Dovecot Cache Hits", 'hits/s', 'cache', 'dovecot.cache', 'line'], - 'lines': [ - ['mail_cache_hits', 'hits', 'incremental'] - ]}, - 'auth': { - 'options': [None, "Dovecot Authentications", 'attempts', 'logins', 'dovecot.auth', 'stacked'], - 'lines': [ - ['auth_successes', 'ok', 'absolute'], - ['auth_failures', 'failed', 'absolute'] - ]}, - 'auth_cache': { - 'options': [None, "Dovecot Authentication Cache", 'number', 'cache', 'dovecot.auth_cache', 'stacked'], - 'lines': [ - ['auth_cache_hits', 'hit', 'absolute'], - ['auth_cache_misses', 'miss', 'absolute'] - ]} -} - - -class Service(SocketService): - def __init__(self, configuration=None, name=None): - SocketService.__init__(self, configuration=configuration, name=name) - self.request = "EXPORT\tglobal\r\n" - self.host = None # localhost - self.port = None # 24242 - # self._keep_alive = True - self.unix_socket = "/var/run/dovecot/stats" - self.order = ORDER - self.definitions = CHARTS - - def _get_data(self): - """ - Format data received from socket - :return: dict - """ - try: - raw = self._get_raw_data() - except (ValueError, AttributeError): - return None - - if raw is None: - self.debug("dovecot returned no data") - return None - - data = raw.split('\n')[:2] - desc = data[0].split('\t') - vals = data[1].split('\t') - ret = dict() - for i, _ in enumerate(desc): - try: - ret[str(desc[i])] = int(vals[i]) - except ValueError: - continue - return ret or None diff --git a/python.d/elasticsearch.chart.py b/python.d/elasticsearch.chart.py deleted file mode 100644 index 9c2c58944..000000000 --- a/python.d/elasticsearch.chart.py +++ /dev/null @@ -1,554 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: elastic search node stats netdata python.d module -# Author: l2isbad - -from collections import namedtuple -from json import loads -from socket import gethostbyname, gaierror -from threading import Thread -try: - from queue import Queue -except ImportError: - from Queue import Queue - -from bases.FrameworkServices.UrlService import UrlService - -# default module values (can be overridden per job in `config`) -update_every = 5 -priority = 60000 -retries = 60 - -METHODS = namedtuple('METHODS', ['get_data', 'url', 'run']) - -NODE_STATS = [ - 'indices.search.fetch_current', - 'indices.search.fetch_total', - 'indices.search.query_current', - 'indices.search.query_total', - 'indices.search.query_time_in_millis', - 'indices.search.fetch_time_in_millis', - 'indices.indexing.index_total', - 'indices.indexing.index_current', - 'indices.indexing.index_time_in_millis', - 'indices.refresh.total', - 'indices.refresh.total_time_in_millis', - 'indices.flush.total', - 'indices.flush.total_time_in_millis', - 'indices.translog.operations', - 'indices.translog.size_in_bytes', - 'indices.translog.uncommitted_operations', - 'indices.translog.uncommitted_size_in_bytes', - 'indices.segments.count', - 'indices.segments.terms_memory_in_bytes', - 'indices.segments.stored_fields_memory_in_bytes', - 'indices.segments.term_vectors_memory_in_bytes', - 'indices.segments.norms_memory_in_bytes', - 'indices.segments.points_memory_in_bytes', - 'indices.segments.doc_values_memory_in_bytes', - 'indices.segments.index_writer_memory_in_bytes', - 'indices.segments.version_map_memory_in_bytes', - 'indices.segments.fixed_bit_set_memory_in_bytes', - 'jvm.gc.collectors.young.collection_count', - 'jvm.gc.collectors.old.collection_count', - 'jvm.gc.collectors.young.collection_time_in_millis', - 'jvm.gc.collectors.old.collection_time_in_millis', - 'jvm.mem.heap_used_percent', - 'jvm.mem.heap_used_in_bytes', - 'jvm.mem.heap_committed_in_bytes', - 'jvm.buffer_pools.direct.count', - 'jvm.buffer_pools.direct.used_in_bytes', - 'jvm.buffer_pools.direct.total_capacity_in_bytes', - 'jvm.buffer_pools.mapped.count', - 'jvm.buffer_pools.mapped.used_in_bytes', - 'jvm.buffer_pools.mapped.total_capacity_in_bytes', - 'thread_pool.bulk.queue', - 'thread_pool.bulk.rejected', - 'thread_pool.index.queue', - 'thread_pool.index.rejected', - 'thread_pool.search.queue', - 'thread_pool.search.rejected', - 'thread_pool.merge.queue', - 'thread_pool.merge.rejected', - 'indices.fielddata.memory_size_in_bytes', - 'indices.fielddata.evictions', - 'breakers.fielddata.tripped', - 'http.current_open', - 'transport.rx_size_in_bytes', - 'transport.tx_size_in_bytes', - 'process.max_file_descriptors', - 'process.open_file_descriptors' -] - -CLUSTER_STATS = [ - 'nodes.count.data_only', - 'nodes.count.master_data', - 'nodes.count.total', - 'nodes.count.master_only', - 'nodes.count.client', - 'indices.docs.count', - 'indices.query_cache.hit_count', - 'indices.query_cache.miss_count', - 'indices.store.size_in_bytes', - 'indices.count', - 'indices.shards.total' -] - -HEALTH_STATS = [ - 'number_of_nodes', - 'number_of_data_nodes', - 'number_of_pending_tasks', - 'number_of_in_flight_fetch', - 'active_shards', - 'relocating_shards', - 'unassigned_shards', - 'delayed_unassigned_shards', - 'initializing_shards', - 'active_shards_percent_as_number' -] - -LATENCY = { - 'query_latency': - {'total': 'indices_search_query_total', - 'spent_time': 'indices_search_query_time_in_millis'}, - 'fetch_latency': - {'total': 'indices_search_fetch_total', - 'spent_time': 'indices_search_fetch_time_in_millis'}, - 'indexing_latency': - {'total': 'indices_indexing_index_total', - 'spent_time': 'indices_indexing_index_time_in_millis'}, - 'flushing_latency': - {'total': 'indices_flush_total', - 'spent_time': 'indices_flush_total_time_in_millis'} -} - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['search_performance_total', 'search_performance_current', 'search_performance_time', - 'search_latency', 'index_performance_total', 'index_performance_current', 'index_performance_time', - 'index_latency', 'index_translog_operations', 'index_translog_size', 'index_segments_count', 'index_segments_memory_writer', - 'index_segments_memory', 'jvm_mem_heap', 'jvm_mem_heap_bytes', 'jvm_buffer_pool_count', - 'jvm_direct_buffers_memory', 'jvm_mapped_buffers_memory', 'jvm_gc_count', 'jvm_gc_time', 'host_metrics_file_descriptors', - 'host_metrics_http', 'host_metrics_transport', 'thread_pool_queued', 'thread_pool_rejected', - 'fielddata_cache', 'fielddata_evictions_tripped', 'cluster_health_status', 'cluster_health_nodes', - 'cluster_health_shards', 'cluster_stats_nodes', 'cluster_stats_query_cache', 'cluster_stats_docs', - 'cluster_stats_store', 'cluster_stats_indices_shards'] - -CHARTS = { - 'search_performance_total': { - 'options': [None, 'Queries And Fetches', 'number of', 'search performance', - 'elastic.search_performance_total', 'stacked'], - 'lines': [ - ['indices_search_query_total', 'queries', 'incremental'], - ['indices_search_fetch_total', 'fetches', 'incremental'] - ]}, - 'search_performance_current': { - 'options': [None, 'Queries and Fetches In Progress', 'number of', 'search performance', - 'elastic.search_performance_current', 'stacked'], - 'lines': [ - ['indices_search_query_current', 'queries', 'absolute'], - ['indices_search_fetch_current', 'fetches', 'absolute'] - ]}, - 'search_performance_time': { - 'options': [None, 'Time Spent On Queries And Fetches', 'seconds', 'search performance', - 'elastic.search_performance_time', 'stacked'], - 'lines': [ - ['indices_search_query_time_in_millis', 'query', 'incremental', 1, 1000], - ['indices_search_fetch_time_in_millis', 'fetch', 'incremental', 1, 1000] - ]}, - 'search_latency': { - 'options': [None, 'Query And Fetch Latency', 'ms', 'search performance', 'elastic.search_latency', 'stacked'], - 'lines': [ - ['query_latency', 'query', 'absolute', 1, 1000], - ['fetch_latency', 'fetch', 'absolute', 1, 1000] - ]}, - 'index_performance_total': { - 'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'number of', - 'indexing performance', 'elastic.index_performance_total', 'stacked'], - 'lines': [ - ['indices_indexing_index_total', 'indexed', 'incremental'], - ['indices_refresh_total', 'refreshes', 'incremental'], - ['indices_flush_total', 'flushes', 'incremental'] - ]}, - 'index_performance_current': { - 'options': [None, 'Number Of Documents Currently Being Indexed', 'currently indexed', - 'indexing performance', 'elastic.index_performance_current', 'stacked'], - 'lines': [ - ['indices_indexing_index_current', 'documents', 'absolute'] - ]}, - 'index_performance_time': { - 'options': [None, 'Time Spent On Indexing, Refreshing, Flushing', 'seconds', 'indexing performance', - 'elastic.index_performance_time', 'stacked'], - 'lines': [ - ['indices_indexing_index_time_in_millis', 'indexing', 'incremental', 1, 1000], - ['indices_refresh_total_time_in_millis', 'refreshing', 'incremental', 1, 1000], - ['indices_flush_total_time_in_millis', 'flushing', 'incremental', 1, 1000] - ]}, - 'index_latency': { - 'options': [None, 'Indexing And Flushing Latency', 'ms', 'indexing performance', - 'elastic.index_latency', 'stacked'], - 'lines': [ - ['indexing_latency', 'indexing', 'absolute', 1, 1000], - ['flushing_latency', 'flushing', 'absolute', 1, 1000] - ]}, - 'index_translog_operations': { - 'options': [None, 'Translog Operations', 'count', 'translog', - 'elastic.index_translog_operations', 'area'], - 'lines': [ - ['indices_translog_operations', 'total', 'absolute'], - ['indices_translog_uncommitted_operations', 'uncommited', 'absolute'] - ]}, - 'index_translog_size': { - 'options': [None, 'Translog Size', 'MB', 'translog', - 'elastic.index_translog_size', 'area'], - 'lines': [ - ['indices_translog_size_in_bytes', 'total', 'absolute', 1, 1048567], - ['indices_translog_uncommitted_size_in_bytes', 'uncommited', 'absolute', 1, 1048567] - ]}, - 'index_segments_count': { - 'options': [None, 'Total Number Of Indices Segments', 'count', 'indices segments', - 'elastic.index_segments_count', 'line'], - 'lines': [ - ['indices_segments_count', 'segments', 'absolute'] - ]}, - 'index_segments_memory_writer': { - 'options': [None, 'Index Writer Memory Usage', 'MB', 'indices segments', - 'elastic.index_segments_memory_writer', 'area'], - 'lines': [ - ['indices_segments_index_writer_memory_in_bytes', 'total', 'absolute', 1, 1048567] - ]}, - 'index_segments_memory': { - 'options': [None, 'Indices Segments Memory Usage', 'MB', 'indices segments', - 'elastic.index_segments_memory', 'stacked'], - 'lines': [ - ['indices_segments_terms_memory_in_bytes', 'terms', 'absolute', 1, 1048567], - ['indices_segments_stored_fields_memory_in_bytes', 'stored fields', 'absolute', 1, 1048567], - ['indices_segments_term_vectors_memory_in_bytes', 'term vectors', 'absolute', 1, 1048567], - ['indices_segments_norms_memory_in_bytes', 'norms', 'absolute', 1, 1048567], - ['indices_segments_points_memory_in_bytes', 'points', 'absolute', 1, 1048567], - ['indices_segments_doc_values_memory_in_bytes', 'doc values', 'absolute', 1, 1048567], - ['indices_segments_version_map_memory_in_bytes', 'version map', 'absolute', 1, 1048567], - ['indices_segments_fixed_bit_set_memory_in_bytes', 'fixed bit set', 'absolute', 1, 1048567] - ]}, - 'jvm_mem_heap': { - 'options': [None, 'JVM Heap Percentage Currently in Use', 'percent', 'memory usage and gc', - 'elastic.jvm_heap', 'area'], - 'lines': [ - ['jvm_mem_heap_used_percent', 'inuse', 'absolute'] - ]}, - 'jvm_mem_heap_bytes': { - 'options': [None, 'JVM Heap Commit And Usage', 'MB', 'memory usage and gc', - 'elastic.jvm_heap_bytes', 'area'], - 'lines': [ - ['jvm_mem_heap_committed_in_bytes', 'commited', 'absolute', 1, 1048576], - ['jvm_mem_heap_used_in_bytes', 'used', 'absolute', 1, 1048576] - ]}, - 'jvm_buffer_pool_count': { - 'options': [None, 'JVM Buffers', 'count', 'memory usage and gc', - 'elastic.jvm_buffer_pool_count', 'line'], - 'lines': [ - ['jvm_buffer_pools_direct_count', 'direct', 'absolute'], - ['jvm_buffer_pools_mapped_count', 'mapped', 'absolute'] - ]}, - 'jvm_direct_buffers_memory': { - 'options': [None, 'JVM Direct Buffers Memory', 'MB', 'memory usage and gc', - 'elastic.jvm_direct_buffers_memory', 'area'], - 'lines': [ - ['jvm_buffer_pools_direct_used_in_bytes', 'used', 'absolute', 1, 1048567], - ['jvm_buffer_pools_direct_total_capacity_in_bytes', 'total capacity', 'absolute', 1, 1048567] - ]}, - 'jvm_mapped_buffers_memory': { - 'options': [None, 'JVM Mapped Buffers Memory', 'MB', 'memory usage and gc', - 'elastic.jvm_mapped_buffers_memory', 'area'], - 'lines': [ - ['jvm_buffer_pools_mapped_used_in_bytes', 'used', 'absolute', 1, 1048567], - ['jvm_buffer_pools_mapped_total_capacity_in_bytes', 'total capacity', 'absolute', 1, 1048567] - ]}, - 'jvm_gc_count': { - 'options': [None, 'Garbage Collections', 'counts', 'memory usage and gc', 'elastic.gc_count', 'stacked'], - 'lines': [ - ['jvm_gc_collectors_young_collection_count', 'young', 'incremental'], - ['jvm_gc_collectors_old_collection_count', 'old', 'incremental'] - ]}, - 'jvm_gc_time': { - 'options': [None, 'Time Spent On Garbage Collections', 'ms', 'memory usage and gc', - 'elastic.gc_time', 'stacked'], - 'lines': [ - ['jvm_gc_collectors_young_collection_time_in_millis', 'young', 'incremental'], - ['jvm_gc_collectors_old_collection_time_in_millis', 'old', 'incremental'] - ]}, - 'thread_pool_queued': { - 'options': [None, 'Number Of Queued Threads In Thread Pool', 'queued threads', 'queues and rejections', - 'elastic.thread_pool_queued', 'stacked'], - 'lines': [ - ['thread_pool_bulk_queue', 'bulk', 'absolute'], - ['thread_pool_index_queue', 'index', 'absolute'], - ['thread_pool_search_queue', 'search', 'absolute'], - ['thread_pool_merge_queue', 'merge', 'absolute'] - ]}, - 'thread_pool_rejected': { - 'options': [None, 'Rejected Threads In Thread Pool', 'rejected threads', 'queues and rejections', - 'elastic.thread_pool_rejected', 'stacked'], - 'lines': [ - ['thread_pool_bulk_rejected', 'bulk', 'absolute'], - ['thread_pool_index_rejected', 'index', 'absolute'], - ['thread_pool_search_rejected', 'search', 'absolute'], - ['thread_pool_merge_rejected', 'merge', 'absolute'] - ]}, - 'fielddata_cache': { - 'options': [None, 'Fielddata Cache', 'MB', 'fielddata cache', 'elastic.fielddata_cache', 'line'], - 'lines': [ - ['indices_fielddata_memory_size_in_bytes', 'cache', 'absolute', 1, 1048576] - ]}, - 'fielddata_evictions_tripped': { - 'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'number of events', - 'fielddata cache', 'elastic.fielddata_evictions_tripped', 'line'], - 'lines': [ - ['indices_fielddata_evictions', 'evictions', 'incremental'], - ['indices_fielddata_tripped', 'tripped', 'incremental'] - ]}, - 'cluster_health_nodes': { - 'options': [None, 'Nodes And Tasks Statistics', 'units', 'cluster health API', - 'elastic.cluster_health_nodes', 'stacked'], - 'lines': [ - ['number_of_nodes', 'nodes', 'absolute'], - ['number_of_data_nodes', 'data_nodes', 'absolute'], - ['number_of_pending_tasks', 'pending_tasks', 'absolute'], - ['number_of_in_flight_fetch', 'in_flight_fetch', 'absolute'] - ]}, - 'cluster_health_status': { - 'options': [None, 'Cluster Status', 'status', 'cluster health API', - 'elastic.cluster_health_status', 'area'], - 'lines': [ - ['status_green', 'green', 'absolute'], - ['status_red', 'red', 'absolute'], - ['status_foo1', None, 'absolute'], - ['status_foo2', None, 'absolute'], - ['status_foo3', None, 'absolute'], - ['status_yellow', 'yellow', 'absolute'] - ]}, - 'cluster_health_shards': { - 'options': [None, 'Shards Statistics', 'shards', 'cluster health API', - 'elastic.cluster_health_shards', 'stacked'], - 'lines': [ - ['active_shards', 'active_shards', 'absolute'], - ['relocating_shards', 'relocating_shards', 'absolute'], - ['unassigned_shards', 'unassigned', 'absolute'], - ['delayed_unassigned_shards', 'delayed_unassigned', 'absolute'], - ['initializing_shards', 'initializing', 'absolute'], - ['active_shards_percent_as_number', 'active_percent', 'absolute'] - ]}, - 'cluster_stats_nodes': { - 'options': [None, 'Nodes Statistics', 'nodes', 'cluster stats API', - 'elastic.cluster_nodes', 'stacked'], - 'lines': [ - ['nodes_count_data_only', 'data_only', 'absolute'], - ['nodes_count_master_data', 'master_data', 'absolute'], - ['nodes_count_total', 'total', 'absolute'], - ['nodes_count_master_only', 'master_only', 'absolute'], - ['nodes_count_client', 'client', 'absolute'] - ]}, - 'cluster_stats_query_cache': { - 'options': [None, 'Query Cache Statistics', 'queries', 'cluster stats API', - 'elastic.cluster_query_cache', 'stacked'], - 'lines': [ - ['indices_query_cache_hit_count', 'hit', 'incremental'], - ['indices_query_cache_miss_count', 'miss', 'incremental'] - ]}, - 'cluster_stats_docs': { - 'options': [None, 'Docs Statistics', 'count', 'cluster stats API', - 'elastic.cluster_docs', 'line'], - 'lines': [ - ['indices_docs_count', 'docs', 'absolute'] - ]}, - 'cluster_stats_store': { - 'options': [None, 'Store Statistics', 'MB', 'cluster stats API', - 'elastic.cluster_store', 'line'], - 'lines': [ - ['indices_store_size_in_bytes', 'size', 'absolute', 1, 1048567] - ]}, - 'cluster_stats_indices_shards': { - 'options': [None, 'Indices And Shards Statistics', 'count', 'cluster stats API', - 'elastic.cluster_indices_shards', 'stacked'], - 'lines': [ - ['indices_count', 'indices', 'absolute'], - ['indices_shards_total', 'shards', 'absolute'] - ]}, - 'host_metrics_transport': { - 'options': [None, 'Cluster Communication Transport Metrics', 'kilobit/s', 'host metrics', - 'elastic.host_transport', 'area'], - 'lines': [ - ['transport_rx_size_in_bytes', 'in', 'incremental', 8, 1000], - ['transport_tx_size_in_bytes', 'out', 'incremental', -8, 1000] - ]}, - 'host_metrics_file_descriptors': { - 'options': [None, 'Available File Descriptors In Percent', 'percent', 'host metrics', - 'elastic.host_descriptors', 'area'], - 'lines': [ - ['file_descriptors_used', 'used', 'absolute', 1, 10] - ]}, - 'host_metrics_http': { - 'options': [None, 'Opened HTTP Connections', 'connections', 'host metrics', - 'elastic.host_http_connections', 'line'], - 'lines': [ - ['http_current_open', 'opened', 'absolute', 1, 1] - ]} -} - - -class Service(UrlService): - def __init__(self, configuration=None, name=None): - UrlService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.host = self.configuration.get('host') - self.port = self.configuration.get('port', 9200) - self.url = '{scheme}://{host}:{port}'.format(scheme=self.configuration.get('scheme', 'http'), - host=self.host, - port=self.port) - self.latency = dict() - self.methods = list() - - def check(self): - if not all([self.host, - self.port, - isinstance(self.host, str), - isinstance(self.port, (str, int))]): - self.error('Host is not defined in the module configuration file') - return False - - # Hostname -> ip address - try: - self.host = gethostbyname(self.host) - except gaierror as error: - self.error(str(error)) - return False - - # Create URL for every Elasticsearch API - self.methods = [METHODS(get_data=self._get_node_stats, - url=self.url + '/_nodes/_local/stats', - run=self.configuration.get('node_stats', True)), - METHODS(get_data=self._get_cluster_health, - url=self.url + '/_cluster/health', - run=self.configuration.get('cluster_health', True)), - METHODS(get_data=self._get_cluster_stats, - url=self.url + '/_cluster/stats', - run=self.configuration.get('cluster_stats', True))] - - # Remove disabled API calls from 'avail methods' - return UrlService.check(self) - - def _get_data(self): - threads = list() - queue = Queue() - result = dict() - - for method in self.methods: - if not method.run: - continue - th = Thread(target=method.get_data, - args=(queue, method.url)) - th.start() - threads.append(th) - - for thread in threads: - thread.join() - result.update(queue.get()) - - return result or None - - def _get_cluster_health(self, queue, url): - """ - Format data received from http request - :return: dict - """ - - raw_data = self._get_raw_data(url) - - if not raw_data: - return queue.put(dict()) - - data = loads(raw_data) - to_netdata = fetch_data_(raw_data=data, - metrics=HEALTH_STATS) - - to_netdata.update({'status_green': 0, 'status_red': 0, 'status_yellow': 0, - 'status_foo1': 0, 'status_foo2': 0, 'status_foo3': 0}) - current_status = 'status_' + data['status'] - to_netdata[current_status] = 1 - - return queue.put(to_netdata) - - def _get_cluster_stats(self, queue, url): - """ - Format data received from http request - :return: dict - """ - - raw_data = self._get_raw_data(url) - - if not raw_data: - return queue.put(dict()) - - data = loads(raw_data) - to_netdata = fetch_data_(raw_data=data, - metrics=CLUSTER_STATS) - - return queue.put(to_netdata) - - def _get_node_stats(self, queue, url): - """ - Format data received from http request - :return: dict - """ - - raw_data = self._get_raw_data(url) - - if not raw_data: - return queue.put(dict()) - - data = loads(raw_data) - - node = list(data['nodes'].keys())[0] - to_netdata = fetch_data_(raw_data=data['nodes'][node], - metrics=NODE_STATS) - - # Search, index, flush, fetch performance latency - for key in LATENCY: - try: - to_netdata[key] = self.find_avg(total=to_netdata[LATENCY[key]['total']], - spent_time=to_netdata[LATENCY[key]['spent_time']], - key=key) - except KeyError: - continue - if 'process_open_file_descriptors' in to_netdata and 'process_max_file_descriptors' in to_netdata: - to_netdata['file_descriptors_used'] = round(float(to_netdata['process_open_file_descriptors']) - / to_netdata['process_max_file_descriptors'] * 1000) - - return queue.put(to_netdata) - - def find_avg(self, total, spent_time, key): - if key not in self.latency: - self.latency[key] = dict(total=total, - spent_time=spent_time) - return 0 - if self.latency[key]['total'] != total: - latency = float(spent_time - self.latency[key]['spent_time'])\ - / float(total - self.latency[key]['total']) * 1000 - self.latency[key]['total'] = total - self.latency[key]['spent_time'] = spent_time - return latency - self.latency[key]['spent_time'] = spent_time - return 0 - - -def fetch_data_(raw_data, metrics): - data = dict() - for metric in metrics: - value = raw_data - metrics_list = metric.split('.') - try: - for m in metrics_list: - value = value[m] - except KeyError: - continue - data['_'.join(metrics_list)] = value - return data diff --git a/python.d/example.chart.py b/python.d/example.chart.py deleted file mode 100644 index ee7ff62fc..000000000 --- a/python.d/example.chart.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: example netdata python.d module -# Author: Pawel Krupa (paulfantom) - -from random import SystemRandom - -from bases.FrameworkServices.SimpleService import SimpleService - -# default module values -# update_every = 4 -priority = 90000 -retries = 60 - -ORDER = ['random'] -CHARTS = { - 'random': { - 'options': [None, 'A random number', 'random number', 'random', 'random', 'line'], - 'lines': [ - ['random1'] - ] - } -} - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.random = SystemRandom() - - @staticmethod - def check(): - return True - - def get_data(self): - data = dict() - - for i in range(1, 4): - dimension_id = ''.join(['random', str(i)]) - - if dimension_id not in self.charts['random']: - self.charts['random'].add_dimension([dimension_id]) - - data[dimension_id] = self.random.randint(0, 100) - - return data diff --git a/python.d/exim.chart.py b/python.d/exim.chart.py deleted file mode 100644 index 2e5b924ba..000000000 --- a/python.d/exim.chart.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: exim netdata python.d module -# Author: Pawel Krupa (paulfantom) - -from bases.FrameworkServices.ExecutableService import ExecutableService - -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['qemails'] - -CHARTS = { - 'qemails': { - 'options': [None, "Exim Queue Emails", "emails", 'queue', 'exim.qemails', 'line'], - 'lines': [ - ['emails', None, 'absolute'] - ]} -} - - -class Service(ExecutableService): - def __init__(self, configuration=None, name=None): - ExecutableService.__init__(self, configuration=configuration, name=name) - self.command = "exim -bpc" - self.order = ORDER - self.definitions = CHARTS - - def _get_data(self): - """ - Format data received from shell command - :return: dict - """ - try: - return {'emails': int(self._get_raw_data()[0])} - except (ValueError, AttributeError): - return None diff --git a/python.d/fail2ban.chart.py b/python.d/fail2ban.chart.py deleted file mode 100644 index 895833f87..000000000 --- a/python.d/fail2ban.chart.py +++ /dev/null @@ -1,213 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: fail2ban log netdata python.d module -# Author: l2isbad - -import bisect - -from glob import glob -from re import compile as r_compile -from os import access as is_accessible, R_OK -from os.path import isdir, getsize - - -from bases.FrameworkServices.LogService import LogService - -priority = 60000 -retries = 60 -REGEX_JAILS = r_compile(r'\[([a-zA-Z0-9_-]+)\][^\[\]]+?enabled\s+= (true|false)') -REGEX_DATA = r_compile(r'\[(?P<jail>[A-Za-z-_0-9]+)\] (?P<action>U|B)[a-z]+ (?P<ipaddr>\d{1,3}(?:\.\d{1,3}){3})') -ORDER = ['jails_bans', 'jails_in_jail'] - - -class Service(LogService): - """ - fail2ban log class - Reads logs line by line - Jail auto detection included - It produces following charts: - * Bans per second for every jail - * Banned IPs for every jail (since the last restart of netdata) - """ - def __init__(self, configuration=None, name=None): - LogService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = dict() - self.log_path = self.configuration.get('log_path', '/var/log/fail2ban.log') - self.conf_path = self.configuration.get('conf_path', '/etc/fail2ban/jail.local') - self.conf_dir = self.configuration.get('conf_dir', '/etc/fail2ban/jail.d/') - self.exclude = self.configuration.get('exclude') - - def _get_data(self): - """ - Parse new log lines - :return: dict - """ - raw = self._get_raw_data() - if raw is None: - return None - elif not raw: - return self.to_netdata - - # Fail2ban logs looks like - # 2016-12-25 12:36:04,711 fail2ban.actions[2455]: WARNING [ssh] Ban 178.156.32.231 - for row in raw: - match = REGEX_DATA.search(row) - if match: - match_dict = match.groupdict() - jail, action, ipaddr = match_dict['jail'], match_dict['action'], match_dict['ipaddr'] - if jail in self.jails_list: - if action == 'B': - self.to_netdata[jail] += 1 - if address_not_in_jail(self.banned_ips[jail], ipaddr, self.to_netdata[jail + '_in_jail']): - self.to_netdata[jail + '_in_jail'] += 1 - else: - if ipaddr in self.banned_ips[jail]: - self.banned_ips[jail].remove(ipaddr) - self.to_netdata[jail + '_in_jail'] -= 1 - - return self.to_netdata - - def check(self): - """ - :return: bool - - Check if the "log_path" is not empty and readable - """ - - if not (is_accessible(self.log_path, R_OK) and getsize(self.log_path) != 0): - self.error('%s is not readable or empty' % self.log_path) - return False - self.jails_list, self.to_netdata, self.banned_ips = self.jails_auto_detection_() - self.definitions = create_definitions_(self.jails_list) - self.info('Jails: %s' % self.jails_list) - return True - - def jails_auto_detection_(self): - """ - return: <tuple> - - * jails_list - list of enabled jails (['ssh', 'apache', ...]) - * to_netdata - dict ({'ssh': 0, 'ssh_in_jail': 0, ...}) - * banned_ips - here will be stored all the banned ips ({'ssh': ['1.2.3.4', '5.6.7.8', ...], ...}) - """ - raw_jails_list = list() - jails_list = list() - - for raw_jail in parse_configuration_files_(self.conf_path, self.conf_dir, self.error): - raw_jails_list.extend(raw_jail) - - for jail, status in raw_jails_list: - if status == 'true' and jail not in jails_list: - jails_list.append(jail) - elif status == 'false' and jail in jails_list: - jails_list.remove(jail) - # If for some reason parse failed we still can START with default jails_list. - jails_list = list(set(jails_list) - set(self.exclude.split() - if isinstance(self.exclude, str) else list())) or ['ssh'] - - to_netdata = dict([(jail, 0) for jail in jails_list]) - to_netdata.update(dict([(jail + '_in_jail', 0) for jail in jails_list])) - banned_ips = dict([(jail, list()) for jail in jails_list]) - - return jails_list, to_netdata, banned_ips - - -def create_definitions_(jails_list): - """ - Chart definitions creating - """ - - definitions = { - 'jails_bans': {'options': [None, 'Jails Ban Statistics', 'bans/s', 'bans', 'jail.bans', 'line'], - 'lines': []}, - 'jails_in_jail': {'options': [None, 'Banned IPs (since the last restart of netdata)', 'IPs', - 'in jail', 'jail.in_jail', 'line'], - 'lines': []}} - for jail in jails_list: - definitions['jails_bans']['lines'].append([jail, jail, 'incremental']) - definitions['jails_in_jail']['lines'].append([jail + '_in_jail', jail, 'absolute']) - - return definitions - - -def parse_configuration_files_(jails_conf_path, jails_conf_dir, print_error): - """ - :param jails_conf_path: <str> - :param jails_conf_dir: <str> - :param print_error: <function> - :return: <tuple> - - Uses "find_jails_in_files" function to find all jails in the "jails_conf_dir" directory - and in the "jails_conf_path" - - All files must endswith ".local" or ".conf" - Return order is important. - According man jail.conf it should be - * jail.conf - * jail.d/*.conf (in alphabetical order) - * jail.local - * jail.d/*.local (in alphabetical order) - """ - path_conf, path_local, dir_conf, dir_local = list(), list(), list(), list() - - # Parse files in the directory - if not (isinstance(jails_conf_dir, str) and isdir(jails_conf_dir)): - print_error('%s is not a directory' % jails_conf_dir) - else: - dir_conf = list(filter(lambda conf: is_accessible(conf, R_OK), glob(jails_conf_dir + '/*.conf'))) - dir_local = list(filter(lambda local: is_accessible(local, R_OK), glob(jails_conf_dir + '/*.local'))) - if not (dir_conf or dir_local): - print_error('%s is empty or not readable' % jails_conf_dir) - else: - dir_conf, dir_local = (find_jails_in_files(dir_conf, print_error), - find_jails_in_files(dir_local, print_error)) - - # Parse .conf and .local files - if isinstance(jails_conf_path, str) and jails_conf_path.endswith(('.local', '.conf')): - path_conf, path_local = (find_jails_in_files([jails_conf_path.split('.')[0] + '.conf'], print_error), - find_jails_in_files([jails_conf_path.split('.')[0] + '.local'], print_error)) - - return path_conf, dir_conf, path_local, dir_local - - -def find_jails_in_files(list_of_files, print_error): - """ - :param list_of_files: <list> - :param print_error: <function> - :return: <list> - - Open a file and parse it to find all (enabled and disabled) jails - The output is a list of tuples: - [('ssh', 'true'), ('apache', 'false'), ...] - """ - jails_list = list() - for conf in list_of_files: - if is_accessible(conf, R_OK): - with open(conf, 'rt') as f: - raw_data = f.readlines() - data = ' '.join(line for line in raw_data if line.startswith(('[', 'enabled'))) - jails_list.extend(REGEX_JAILS.findall(data)) - else: - print_error('%s is not readable or not exist' % conf) - return jails_list - - -def address_not_in_jail(pool, address, pool_size): - """ - :param pool: <list> - :param address: <str> - :param pool_size: <int> - :return: bool - - Checks if the address is in the pool. - If not address will be added - """ - index = bisect.bisect_left(pool, address) - if index < pool_size: - if pool[index] == address: - return False - bisect.insort_left(pool, address) - return True - else: - bisect.insort_left(pool, address) - return True diff --git a/python.d/freeradius.chart.py b/python.d/freeradius.chart.py deleted file mode 100644 index 3acc58d1a..000000000 --- a/python.d/freeradius.chart.py +++ /dev/null @@ -1,125 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: freeradius netdata python.d module -# Author: l2isbad - -from re import findall -from subprocess import Popen, PIPE - -from bases.collection import find_binary -from bases.FrameworkServices.SimpleService import SimpleService - -# default module values (can be overridden per job in `config`) -priority = 60000 -retries = 60 -update_every = 15 - -RADIUS_MSG = 'Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept' - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['authentication', 'accounting', 'proxy-auth', 'proxy-acct'] - -CHARTS = { - 'authentication': { - 'options': [None, "Authentication", "packets/s", 'Authentication', 'freerad.auth', 'line'], - 'lines': [ - ['access-accepts', None, 'incremental'], - ['access-rejects', None, 'incremental'], - ['auth-dropped-requests', 'dropped-requests', 'incremental'], - ['auth-duplicate-requests', 'duplicate-requests', 'incremental'], - ['auth-invalid-requests', 'invalid-requests', 'incremental'], - ['auth-malformed-requests', 'malformed-requests', 'incremental'], - ['auth-unknown-types', 'unknown-types', 'incremental'] - ]}, - 'accounting': { - 'options': [None, "Accounting", "packets/s", 'Accounting', 'freerad.acct', 'line'], - 'lines': [ - ['accounting-requests', 'requests', 'incremental'], - ['accounting-responses', 'responses', 'incremental'], - ['acct-dropped-requests', 'dropped-requests', 'incremental'], - ['acct-duplicate-requests', 'duplicate-requests', 'incremental'], - ['acct-invalid-requests', 'invalid-requests', 'incremental'], - ['acct-malformed-requests', 'malformed-requests', 'incremental'], - ['acct-unknown-types', 'unknown-types', 'incremental'] - ]}, - 'proxy-auth': { - 'options': [None, "Proxy Authentication", "packets/s", 'Authentication', 'freerad.proxy.auth', 'line'], - 'lines': [ - ['proxy-access-accepts', 'access-accepts', 'incremental'], - ['proxy-access-rejects', 'access-rejects', 'incremental'], - ['proxy-auth-dropped-requests', 'dropped-requests', 'incremental'], - ['proxy-auth-duplicate-requests', 'duplicate-requests', 'incremental'], - ['proxy-auth-invalid-requests', 'invalid-requests', 'incremental'], - ['proxy-auth-malformed-requests', 'malformed-requests', 'incremental'], - ['proxy-auth-unknown-types', 'unknown-types', 'incremental'] - ]}, - 'proxy-acct': { - 'options': [None, "Proxy Accounting", "packets/s", 'Accounting', 'freerad.proxy.acct', 'line'], - 'lines': [ - ['proxy-accounting-requests', 'requests', 'incremental'], - ['proxy-accounting-responses', 'responses', 'incremental'], - ['proxy-acct-dropped-requests', 'dropped-requests', 'incremental'], - ['proxy-acct-duplicate-requests', 'duplicate-requests', 'incremental'], - ['proxy-acct-invalid-requests', 'invalid-requests', 'incremental'], - ['proxy-acct-malformed-requests', 'malformed-requests', 'incremental'], - ['proxy-acct-unknown-types', 'unknown-types', 'incremental'] - ]} - -} - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.definitions = CHARTS - self.host = self.configuration.get('host', 'localhost') - self.port = self.configuration.get('port', '18121') - self.secret = self.configuration.get('secret') - self.acct = self.configuration.get('acct', False) - self.proxy_auth = self.configuration.get('proxy_auth', False) - self.proxy_acct = self.configuration.get('proxy_acct', False) - chart_choice = [True, bool(self.acct), bool(self.proxy_auth), bool(self.proxy_acct)] - self.order = [chart for chart, choice in zip(ORDER, chart_choice) if choice] - self.echo = find_binary('echo') - self.radclient = find_binary('radclient') - self.sub_echo = [self.echo, RADIUS_MSG] - self.sub_radclient = [self.radclient, '-r', '1', '-t', '1', '-x', - ':'.join([self.host, self.port]), 'status', self.secret] - - def check(self): - if not all([self.echo, self.radclient]): - self.error('Can\'t locate "radclient" binary or binary is not executable by netdata') - return False - if not self.secret: - self.error('"secret" not set') - return None - - if self._get_raw_data(): - return True - self.error('Request returned no data. Is server alive?') - return False - - def _get_data(self): - """ - Format data received from shell command - :return: dict - """ - result = self._get_raw_data() - return dict([(elem[0].lower(), int(elem[1])) for elem in findall(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)', result)]) - - def _get_raw_data(self): - """ - The following code is equivalent to - 'echo "Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept" - | radclient -t 1 -r 1 host:port status secret' - :return: str - """ - try: - process_echo = Popen(self.sub_echo, stdout=PIPE, stderr=PIPE, shell=False) - process_rad = Popen(self.sub_radclient, stdin=process_echo.stdout, stdout=PIPE, stderr=PIPE, shell=False) - process_echo.stdout.close() - raw_result = process_rad.communicate()[0] - except OSError: - return None - if process_rad.returncode is 0: - return raw_result.decode() - return None diff --git a/python.d/go_expvar.chart.py b/python.d/go_expvar.chart.py deleted file mode 100644 index cbd462570..000000000 --- a/python.d/go_expvar.chart.py +++ /dev/null @@ -1,237 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: go_expvar netdata python.d module -# Author: Jan Kral (kralewitz) - -from __future__ import division -import json - -from bases.FrameworkServices.UrlService import UrlService - -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 - - -MEMSTATS_CHARTS = { - 'memstats_heap': { - 'options': ['heap', 'memory: size of heap memory structures', 'kB', 'memstats', - 'expvar.memstats.heap', 'line'], - 'lines': [ - ['memstats_heap_alloc', 'alloc', 'absolute', 1, 1024], - ['memstats_heap_inuse', 'inuse', 'absolute', 1, 1024] - ]}, - 'memstats_stack': { - 'options': ['stack', 'memory: size of stack memory structures', 'kB', 'memstats', - 'expvar.memstats.stack', 'line'], - 'lines': [ - ['memstats_stack_inuse', 'inuse', 'absolute', 1, 1024] - ]}, - 'memstats_mspan': { - 'options': ['mspan', 'memory: size of mspan memory structures', 'kB', 'memstats', - 'expvar.memstats.mspan', 'line'], - 'lines': [ - ['memstats_mspan_inuse', 'inuse', 'absolute', 1, 1024] - ]}, - 'memstats_mcache': { - 'options': ['mcache', 'memory: size of mcache memory structures', 'kB', 'memstats', - 'expvar.memstats.mcache', 'line'], - 'lines': [ - ['memstats_mcache_inuse', 'inuse', 'absolute', 1, 1024] - ]}, - 'memstats_live_objects': { - 'options': ['live_objects', 'memory: number of live objects', 'objects', 'memstats', - 'expvar.memstats.live_objects', 'line'], - 'lines': [ - ['memstats_live_objects', 'live'] - ]}, - 'memstats_sys': { - 'options': ['sys', 'memory: size of reserved virtual address space', 'kB', 'memstats', - 'expvar.memstats.sys', 'line'], - 'lines': [ - ['memstats_sys', 'sys', 'absolute', 1, 1024] - ]}, - 'memstats_gc_pauses': { - 'options': ['gc_pauses', 'memory: average duration of GC pauses', 'ns', 'memstats', - 'expvar.memstats.gc_pauses', 'line'], - 'lines': [ - ['memstats_gc_pauses', 'avg'] - ]}, -} - -MEMSTATS_ORDER = ['memstats_heap', 'memstats_stack', 'memstats_mspan', 'memstats_mcache', - 'memstats_sys', 'memstats_live_objects', 'memstats_gc_pauses'] - - -def flatten(d, top='', sep='.'): - items = [] - for key, val in d.items(): - nkey = top + sep + key if top else key - if isinstance(val, dict): - items.extend(flatten(val, nkey, sep=sep).items()) - else: - items.append((nkey, val)) - return dict(items) - - -class Service(UrlService): - def __init__(self, configuration=None, name=None): - UrlService.__init__(self, configuration=configuration, name=name) - - # if memstats collection is enabled, add the charts and their order - if self.configuration.get('collect_memstats'): - self.definitions = dict(MEMSTATS_CHARTS) - self.order = list(MEMSTATS_ORDER) - else: - self.definitions = dict() - self.order = list() - - # if extra charts are defined, parse their config - extra_charts = self.configuration.get('extra_charts') - if extra_charts: - self._parse_extra_charts_config(extra_charts) - - def check(self): - """ - Check if the module can collect data: - 1) At least one JOB configuration has to be specified - 2) The JOB configuration needs to define the URL and either collect_memstats must be enabled or at least one - extra_chart must be defined. - - The configuration and URL check is provided by the UrlService class. - """ - - if not (self.configuration.get('extra_charts') or self.configuration.get('collect_memstats')): - self.error('Memstats collection is disabled and no extra_charts are defined, disabling module.') - return False - - return UrlService.check(self) - - def _parse_extra_charts_config(self, extra_charts_config): - - # a place to store the expvar keys and their types - self.expvars = dict() - - for chart in extra_charts_config: - - chart_dict = dict() - chart_id = chart.get('id') - chart_lines = chart.get('lines') - chart_opts = chart.get('options', dict()) - - if not all([chart_id, chart_lines]): - self.info('Chart {0} has no ID or no lines defined, skipping'.format(chart)) - continue - - chart_dict['options'] = [ - chart_opts.get('name', ''), - chart_opts.get('title', ''), - chart_opts.get('units', ''), - chart_opts.get('family', ''), - chart_opts.get('context', ''), - chart_opts.get('chart_type', 'line') - ] - chart_dict['lines'] = list() - - # add the lines to the chart - for line in chart_lines: - - ev_key = line.get('expvar_key') - ev_type = line.get('expvar_type') - line_id = line.get('id') - - if not all([ev_key, ev_type, line_id]): - self.info('Line missing expvar_key, expvar_type, or line_id, skipping: {0}'.format(line)) - continue - - if ev_type not in ['int', 'float']: - self.info('Unsupported expvar_type "{0}". Must be "int" or "float"'.format(ev_type)) - continue - - if ev_key in self.expvars: - self.info('Duplicate expvar key {0}: skipping line.'.format(ev_key)) - continue - - self.expvars[ev_key] = (ev_type, line_id) - - chart_dict['lines'].append( - [ - line.get('id', ''), - line.get('name', ''), - line.get('algorithm', ''), - line.get('multiplier', 1), - line.get('divisor', 100 if ev_type == 'float' else 1), - line.get('hidden', False) - ] - ) - - self.order.append(chart_id) - self.definitions[chart_id] = chart_dict - - def _get_data(self): - """ - Format data received from http request - :return: dict - """ - - raw_data = self._get_raw_data() - if not raw_data: - return None - - data = json.loads(raw_data) - - expvars = dict() - if self.configuration.get('collect_memstats'): - expvars.update(self._parse_memstats(data)) - - if self.configuration.get('extra_charts'): - # the memstats part of the data has been already parsed, so we remove it before flattening and checking - # the rest of the data, thus avoiding needless iterating over the multiply nested memstats dict. - del (data['memstats']) - flattened = flatten(data) - for k, v in flattened.items(): - ev = self.expvars.get(k) - if not ev: - # expvar is not defined in config, skip it - continue - try: - key_type, line_id = ev - if key_type == 'int': - expvars[line_id] = int(v) - elif key_type == 'float': - # if the value type is float, multiply it by 1000 and set line divisor to 1000 - expvars[line_id] = float(v) * 100 - except ValueError: - self.info('Failed to parse value for key {0} as {1}, ignoring key.'.format(k, key_type)) - del self.expvars[k] - - return expvars - - @staticmethod - def _parse_memstats(data): - - memstats = data['memstats'] - - # calculate the number of live objects in memory - live_objs = int(memstats['Mallocs']) - int(memstats['Frees']) - - # calculate GC pause times average - # the Go runtime keeps the last 256 GC pause durations in a circular buffer, - # so we need to filter out the 0 values before the buffer is filled - gc_pauses = memstats['PauseNs'] - try: - gc_pause_avg = sum(gc_pauses) / len([x for x in gc_pauses if x > 0]) - # no GC cycles have occured yet - except ZeroDivisionError: - gc_pause_avg = 0 - - return { - 'memstats_heap_alloc': memstats['HeapAlloc'], - 'memstats_heap_inuse': memstats['HeapInuse'], - 'memstats_stack_inuse': memstats['StackInuse'], - 'memstats_mspan_inuse': memstats['MSpanInuse'], - 'memstats_mcache_inuse': memstats['MCacheInuse'], - 'memstats_sys': memstats['Sys'], - 'memstats_live_objects': live_objs, - 'memstats_gc_pauses': gc_pause_avg, - } diff --git a/python.d/haproxy.chart.py b/python.d/haproxy.chart.py deleted file mode 100644 index 3061f5ef2..000000000 --- a/python.d/haproxy.chart.py +++ /dev/null @@ -1,339 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: haproxy netdata python.d module -# Author: l2isbad, ktarasz - -from collections import defaultdict -from re import compile as re_compile - -try: - from urlparse import urlparse -except ImportError: - from urllib.parse import urlparse - -from bases.FrameworkServices.SocketService import SocketService -from bases.FrameworkServices.UrlService import UrlService - - -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['fbin', 'fbout', 'fscur', 'fqcur', - 'fhrsp_1xx', 'fhrsp_2xx', 'fhrsp_3xx', 'fhrsp_4xx', 'fhrsp_5xx', 'fhrsp_other', 'fhrsp_total', - 'bbin', 'bbout', 'bscur', 'bqcur', - 'bhrsp_1xx', 'bhrsp_2xx', 'bhrsp_3xx', 'bhrsp_4xx', 'bhrsp_5xx', 'bhrsp_other', 'bhrsp_total', - 'bqtime', 'bttime', 'brtime', 'bctime', - 'health_sup', 'health_sdown', 'health_bdown', 'health_idle'] - -CHARTS = { - 'fbin': { - 'options': [None, "Kilobytes In", "KB/s", 'frontend', 'haproxy_f.bin', 'line'], - 'lines': [ - ]}, - 'fbout': { - 'options': [None, "Kilobytes Out", "KB/s", 'frontend', 'haproxy_f.bout', 'line'], - 'lines': [ - ]}, - 'fscur': { - 'options': [None, "Sessions Active", "sessions", 'frontend', 'haproxy_f.scur', 'line'], - 'lines': [ - ]}, - 'fqcur': { - 'options': [None, "Session In Queue", "sessions", 'frontend', 'haproxy_f.qcur', 'line'], - 'lines': [ - ]}, - 'fhrsp_1xx': { - 'options': [None, "HTTP responses with 1xx code", "responses/s", 'frontend', 'haproxy_f.hrsp_1xx', 'line'], - 'lines': [ - ]}, - 'fhrsp_2xx': { - 'options': [None, "HTTP responses with 2xx code", "responses/s", 'frontend', 'haproxy_f.hrsp_2xx', 'line'], - 'lines': [ - ]}, - 'fhrsp_3xx': { - 'options': [None, "HTTP responses with 3xx code", "responses/s", 'frontend', 'haproxy_f.hrsp_3xx', 'line'], - 'lines': [ - ]}, - 'fhrsp_4xx': { - 'options': [None, "HTTP responses with 4xx code", "responses/s", 'frontend', 'haproxy_f.hrsp_4xx', 'line'], - 'lines': [ - ]}, - 'fhrsp_5xx': { - 'options': [None, "HTTP responses with 5xx code", "responses/s", 'frontend', 'haproxy_f.hrsp_5xx', 'line'], - 'lines': [ - ]}, - 'fhrsp_other': { - 'options': [None, "HTTP responses with other codes (protocol error)", "responses/s", 'frontend', 'haproxy_f.hrsp_other', 'line'], - 'lines': [ - ]}, - 'fhrsp_total': { - 'options': [None, "HTTP responses", "responses", 'frontend', 'haproxy_f.hrsp_total', 'line'], - 'lines': [ - ]}, - 'bbin': { - 'options': [None, "Kilobytes In", "KB/s", 'backend', 'haproxy_b.bin', 'line'], - 'lines': [ - ]}, - 'bbout': { - 'options': [None, "Kilobytes Out", "KB/s", 'backend', 'haproxy_b.bout', 'line'], - 'lines': [ - ]}, - 'bscur': { - 'options': [None, "Sessions Active", "sessions", 'backend', 'haproxy_b.scur', 'line'], - 'lines': [ - ]}, - 'bqcur': { - 'options': [None, "Sessions In Queue", "sessions", 'backend', 'haproxy_b.qcur', 'line'], - 'lines': [ - ]}, - 'bhrsp_1xx': { - 'options': [None, "HTTP responses with 1xx code", "responses/s", 'backend', 'haproxy_b.hrsp_1xx', 'line'], - 'lines': [ - ]}, - 'bhrsp_2xx': { - 'options': [None, "HTTP responses with 2xx code", "responses/s", 'backend', 'haproxy_b.hrsp_2xx', 'line'], - 'lines': [ - ]}, - 'bhrsp_3xx': { - 'options': [None, "HTTP responses with 3xx code", "responses/s", 'backend', 'haproxy_b.hrsp_3xx', 'line'], - 'lines': [ - ]}, - 'bhrsp_4xx': { - 'options': [None, "HTTP responses with 4xx code", "responses/s", 'backend', 'haproxy_b.hrsp_4xx', 'line'], - 'lines': [ - ]}, - 'bhrsp_5xx': { - 'options': [None, "HTTP responses with 5xx code", "responses/s", 'backend', 'haproxy_b.hrsp_5xx', 'line'], - 'lines': [ - ]}, - 'bhrsp_other': { - 'options': [None, "HTTP responses with other codes (protocol error)", "responses/s", 'backend', - 'haproxy_b.hrsp_other', 'line'], - 'lines': [ - ]}, - 'bhrsp_total': { - 'options': [None, "HTTP responses (total)", "responses/s", 'backend', 'haproxy_b.hrsp_total', 'line'], - 'lines': [ - ]}, - 'bqtime': { - 'options': [None, "The average queue time over the 1024 last requests", "ms", 'backend', 'haproxy_b.qtime', 'line'], - 'lines': [ - ]}, - 'bctime': { - 'options': [None, "The average connect time over the 1024 last requests", "ms", 'backend', - 'haproxy_b.ctime', 'line'], - 'lines': [ - ]}, - 'brtime': { - 'options': [None, "The average response time over the 1024 last requests", "ms", 'backend', - 'haproxy_b.rtime', 'line'], - 'lines': [ - ]}, - 'bttime': { - 'options': [None, "The average total session time over the 1024 last requests", "ms", 'backend', - 'haproxy_b.ttime', 'line'], - 'lines': [ - ]}, - 'health_sdown': { - 'options': [None, "Backend Servers In DOWN State", "failed servers", 'health', - 'haproxy_hs.down', 'line'], - 'lines': [ - ]}, - 'health_sup': { - 'options': [None, "Backend Servers In UP State", "health servers", 'health', - 'haproxy_hs.up', 'line'], - 'lines': [ - ]}, - 'health_bdown': { - 'options': [None, "Is Backend Alive? 1 = DOWN", "failed backend", 'health', 'haproxy_hb.down', 'line'], - 'lines': [ - ]}, - 'health_idle': { - 'options': [None, "The Ratio Of Polling Time Vs Total Time", "percent", 'health', 'haproxy.idle', 'line'], - 'lines': [ - ['idle', None, 'absolute'] - ]} -} - - -METRICS = {'bin': {'algorithm': 'incremental', 'divisor': 1024}, - 'bout': {'algorithm': 'incremental', 'divisor': 1024}, - 'scur': {'algorithm': 'absolute', 'divisor': 1}, - 'qcur': {'algorithm': 'absolute', 'divisor': 1}, - 'hrsp_1xx': {'algorithm': 'incremental', 'divisor': 1}, - 'hrsp_2xx': {'algorithm': 'incremental', 'divisor': 1}, - 'hrsp_3xx': {'algorithm': 'incremental', 'divisor': 1}, - 'hrsp_4xx': {'algorithm': 'incremental', 'divisor': 1}, - 'hrsp_5xx': {'algorithm': 'incremental', 'divisor': 1}, - 'hrsp_other': {'algorithm': 'incremental', 'divisor': 1}, - } - - -BACKEND_METRICS = { - 'qtime': {'algorithm': 'absolute', 'divisor': 1}, - 'ctime': {'algorithm': 'absolute', 'divisor': 1}, - 'rtime': {'algorithm': 'absolute', 'divisor': 1}, - 'ttime': {'algorithm': 'absolute', 'divisor': 1} -} - - -REGEX = dict(url=re_compile(r'idle = (?P<idle>[0-9]+)'), - socket=re_compile(r'Idle_pct: (?P<idle>[0-9]+)')) - - -class Service(UrlService, SocketService): - def __init__(self, configuration=None, name=None): - if 'socket' in configuration: - SocketService.__init__(self, configuration=configuration, name=name) - self.poll = SocketService - self.options_ = dict(regex=REGEX['socket'], - stat='show stat\n'.encode(), - info='show info\n'.encode()) - else: - UrlService.__init__(self, configuration=configuration, name=name) - self.poll = UrlService - self.options_ = dict(regex=REGEX['url'], - stat=self.url, - info=url_remove_params(self.url)) - self.order = ORDER - self.definitions = CHARTS - - def check(self): - if self.poll.check(self): - self.create_charts() - self.info('We are using %s.' % self.poll.__name__) - return True - return False - - def _get_data(self): - to_netdata = dict() - self.request, self.url = self.options_['stat'], self.options_['stat'] - stat_data = self._get_stat_data() - self.request, self.url = self.options_['info'], self.options_['info'] - info_data = self._get_info_data(regex=self.options_['regex']) - - to_netdata.update(stat_data) - to_netdata.update(info_data) - return to_netdata or None - - def _get_stat_data(self): - """ - :return: dict - """ - raw_data = self.poll._get_raw_data(self) - - if not raw_data: - return dict() - - raw_data = raw_data.splitlines() - self.data = parse_data_([dict(zip(raw_data[0].split(','), raw_data[_].split(','))) - for _ in range(1, len(raw_data))]) - if not self.data: - return dict() - - stat_data = dict() - - for frontend in self.data['frontend']: - for metric in METRICS: - idx = frontend['# pxname'].replace('.', '_') - stat_data['_'.join(['frontend', metric, idx])] = frontend.get(metric) or 0 - - for backend in self.data['backend']: - name, idx = backend['# pxname'], backend['# pxname'].replace('.', '_') - stat_data['hsup_' + idx] = len([server for server in self.data['servers'] - if server_status(server, name, 'UP')]) - stat_data['hsdown_' + idx] = len([server for server in self.data['servers'] - if server_status(server, name, 'DOWN')]) - stat_data['hbdown_' + idx] = 1 if backend.get('status') == 'DOWN' else 0 - for metric in BACKEND_METRICS: - stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0 - hrsp_total = 0 - for metric in METRICS: - stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0 - if metric.startswith('hrsp_'): - hrsp_total += int(backend.get(metric) or 0) - stat_data['_'.join(['backend', 'hrsp_total', idx])] = hrsp_total - return stat_data - - def _get_info_data(self, regex): - """ - :return: dict - """ - raw_data = self.poll._get_raw_data(self) - if not raw_data: - return dict() - - match = regex.search(raw_data) - return match.groupdict() if match else dict() - - @staticmethod - def _check_raw_data(data): - """ - Check if all data has been gathered from socket - :param data: str - :return: boolean - """ - return not bool(data) - - def create_charts(self): - for front in self.data['frontend']: - name, idx = front['# pxname'], front['# pxname'].replace('.', '_') - for metric in METRICS: - self.definitions['f' + metric]['lines'].append(['_'.join(['frontend', metric, idx]), - name, METRICS[metric]['algorithm'], 1, - METRICS[metric]['divisor']]) - self.definitions['fhrsp_total']['lines'].append(['_'.join(['frontend', 'hrsp_total', idx]), - name, 'incremental', 1, 1]) - for back in self.data['backend']: - name, idx = back['# pxname'], back['# pxname'].replace('.', '_') - for metric in METRICS: - self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]), - name, METRICS[metric]['algorithm'], 1, - METRICS[metric]['divisor']]) - self.definitions['bhrsp_total']['lines'].append(['_'.join(['backend', 'hrsp_total', idx]), - name, 'incremental', 1, 1]) - for metric in BACKEND_METRICS: - self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]), - name, BACKEND_METRICS[metric]['algorithm'], 1, - BACKEND_METRICS[metric]['divisor']]) - self.definitions['health_sup']['lines'].append(['hsup_' + idx, name, 'absolute']) - self.definitions['health_sdown']['lines'].append(['hsdown_' + idx, name, 'absolute']) - self.definitions['health_bdown']['lines'].append(['hbdown_' + idx, name, 'absolute']) - - -def parse_data_(data): - def is_backend(backend): - return backend.get('svname') == 'BACKEND' and backend.get('# pxname') != 'stats' - - def is_frontend(frontend): - return frontend.get('svname') == 'FRONTEND' and frontend.get('# pxname') != 'stats' - - def is_server(server): - return not server.get('svname', '').startswith(('FRONTEND', 'BACKEND')) - - if not data: - return None - - result = defaultdict(list) - for elem in data: - if is_backend(elem): - result['backend'].append(elem) - continue - elif is_frontend(elem): - result['frontend'].append(elem) - continue - elif is_server(elem): - result['servers'].append(elem) - - return result or None - - -def server_status(server, backend_name, status='DOWN'): - return server.get('# pxname') == backend_name and server.get('status') == status - - -def url_remove_params(url): - parsed = urlparse(url or str()) - return '{scheme}://{netloc}{path}'.format(scheme=parsed.scheme, netloc=parsed.netloc, path=parsed.path) diff --git a/python.d/hddtemp.chart.py b/python.d/hddtemp.chart.py deleted file mode 100644 index 577cab09f..000000000 --- a/python.d/hddtemp.chart.py +++ /dev/null @@ -1,110 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: hddtemp netdata python.d module -# Author: Pawel Krupa (paulfantom) - - -import os -from copy import deepcopy - -from bases.FrameworkServices.SocketService import SocketService - -# default module values (can be overridden per job in `config`) -#update_every = 2 -priority = 60000 -retries = 60 - -# default job configuration (overridden by python.d.plugin) -# config = {'local': { -# 'update_every': update_every, -# 'retries': retries, -# 'priority': priority, -# 'host': 'localhost', -# 'port': 7634 -# }} - -ORDER = ['temperatures'] - -CHARTS = { - 'temperatures': { - 'options': ['disks_temp', 'Disks Temperatures', 'Celsius', 'temperatures', 'hddtemp.temperatures', 'line'], - 'lines': [ - # lines are created dynamically in `check()` method - ]}} - - -class Service(SocketService): - def __init__(self, configuration=None, name=None): - SocketService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = deepcopy(CHARTS) - self._keep_alive = False - self.request = "" - self.host = "127.0.0.1" - self.port = 7634 - self.disks = list() - - def get_disks(self): - try: - disks = self.configuration['devices'] - self.info("Using configured disks {0}".format(disks)) - except (KeyError, TypeError): - self.info("Autodetecting disks") - return ["/dev/" + f for f in os.listdir("/dev") if len(f) == 3 and f.startswith("sd")] - - ret = list() - for disk in disks: - if not disk.startswith('/dev/'): - disk = "/dev/" + disk - ret.append(disk) - if not ret: - self.error("Provided disks cannot be found in /dev directory.") - return ret - - def _check_raw_data(self, data): - if not data.endswith('|'): - return False - - if all(disk in data for disk in self.disks): - return True - return False - - def get_data(self): - """ - Get data from TCP/IP socket - :return: dict - """ - try: - raw = self._get_raw_data().split("|")[:-1] - except AttributeError: - self.error("no data received") - return None - data = dict() - for i in range(len(raw) // 5): - if not raw[i*5+1] in self.disks: - continue - try: - val = int(raw[i*5+3]) - except ValueError: - val = 0 - data[raw[i*5+1].replace("/dev/", "")] = val - - if not data: - self.error("received data doesn't have needed records") - return None - return data - - def check(self): - """ - Parse configuration, check if hddtemp is available, and dynamically create chart lines data - :return: boolean - """ - self._parse_config() - self.disks = self.get_disks() - - data = self.get_data() - if data is None: - return False - - for name in data: - self.definitions['temperatures']['lines'].append([name]) - return True diff --git a/python.d/httpcheck.chart.py b/python.d/httpcheck.chart.py deleted file mode 100644 index b0177ff90..000000000 --- a/python.d/httpcheck.chart.py +++ /dev/null @@ -1,117 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: http check netdata python.d module -# Original Author: ccremer (github.com/ccremer) - -import urllib3 -import re - -try: - from time import monotonic as time -except ImportError: - from time import time - -from bases.FrameworkServices.UrlService import UrlService - -# default module values (can be overridden per job in `config`) -update_every = 3 -priority = 60000 -retries = 60 - -# Response -HTTP_RESPONSE_TIME = 'time' -HTTP_RESPONSE_LENGTH = 'length' - -# Status dimensions -HTTP_SUCCESS = 'success' -HTTP_BAD_CONTENT = 'bad_content' -HTTP_BAD_STATUS = 'bad_status' -HTTP_TIMEOUT = 'timeout' -HTTP_NO_CONNECTION = 'no_connection' - -ORDER = ['response_time', 'response_length', 'status'] - -CHARTS = { - 'response_time': { - 'options': [None, 'HTTP response time', 'ms', 'response', 'httpcheck.responsetime', 'line'], - 'lines': [ - [HTTP_RESPONSE_TIME, 'time', 'absolute', 100, 1000] - ]}, - 'response_length': { - 'options': [None, 'HTTP response body length', 'characters', 'response', 'httpcheck.responselength', 'line'], - 'lines': [ - [HTTP_RESPONSE_LENGTH, 'length', 'absolute'] - ]}, - 'status': { - 'options': [None, 'HTTP status', 'boolean', 'status', 'httpcheck.status', 'line'], - 'lines': [ - [HTTP_SUCCESS, 'success', 'absolute'], - [HTTP_BAD_CONTENT, 'bad content', 'absolute'], - [HTTP_BAD_STATUS, 'bad status', 'absolute'], - [HTTP_TIMEOUT, 'timeout', 'absolute'], - [HTTP_NO_CONNECTION, 'no connection', 'absolute'] - ]} -} - - -class Service(UrlService): - def __init__(self, configuration=None, name=None): - UrlService.__init__(self, configuration=configuration, name=name) - pattern = self.configuration.get('regex') - self.regex = re.compile(pattern) if pattern else None - self.status_codes_accepted = self.configuration.get('status_accepted', [200]) - self.follow_redirect = self.configuration.get('redirect', True) - self.order = ORDER - self.definitions = CHARTS - - def _get_data(self): - """ - Format data received from http request - :return: dict - """ - data = dict() - data[HTTP_SUCCESS] = 0 - data[HTTP_BAD_CONTENT] = 0 - data[HTTP_BAD_STATUS] = 0 - data[HTTP_TIMEOUT] = 0 - data[HTTP_NO_CONNECTION] = 0 - url = self.url - try: - start = time() - status, content = self._get_raw_data_with_status(retries=1 if self.follow_redirect else False, - redirect=self.follow_redirect) - diff = time() - start - data[HTTP_RESPONSE_TIME] = max(round(diff * 10000), 0) - self.debug('Url: {url}. Host responded with status code {code} in {diff} s'.format( - url=url, code=status, diff=diff - )) - self.process_response(content, data, status) - - except urllib3.exceptions.NewConnectionError as error: - self.debug("Connection failed: {url}. Error: {error}".format(url=url, error=error)) - data[HTTP_NO_CONNECTION] = 1 - - except (urllib3.exceptions.TimeoutError, urllib3.exceptions.PoolError) as error: - self.debug("Connection timed out: {url}. Error: {error}".format(url=url, error=error)) - data[HTTP_TIMEOUT] = 1 - - except urllib3.exceptions.HTTPError as error: - self.debug("Connection failed: {url}. Error: {error}".format(url=url, error=error)) - data[HTTP_NO_CONNECTION] = 1 - - except (TypeError, AttributeError) as error: - self.error('Url: {url}. Error: {error}'.format(url=url, error=error)) - return None - - return data - - def process_response(self, content, data, status): - data[HTTP_RESPONSE_LENGTH] = len(content) - self.debug('Content: \n\n{content}\n'.format(content=content)) - if status in self.status_codes_accepted: - if self.regex and self.regex.search(content) is None: - self.debug("No match for regex '{regex}' found".format(regex=self.regex.pattern)) - data[HTTP_BAD_CONTENT] = 1 - else: - data[HTTP_SUCCESS] = 1 - else: - data[HTTP_BAD_STATUS] = 1 diff --git a/python.d/icecast.chart.py b/python.d/icecast.chart.py deleted file mode 100644 index 792b99f3f..000000000 --- a/python.d/icecast.chart.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: icecast netdata python.d module -# Author: Ilya Mashchenko (l2isbad) - -import json - -from bases.FrameworkServices.UrlService import UrlService - - -priority = 60000 -retries = 60 - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['listeners'] - -CHARTS = { - 'listeners': { - 'options': [None, 'Number Of Listeners', 'listeners', - 'listeners', 'icecast.listeners', 'line'], - 'lines': [ - ]} -} - - -class Source: - def __init__(self, idx, data): - self.name = 'source_{0}'.format(idx) - self.is_active = data.get('stream_start') and data.get('server_name') - self.listeners = data['listeners'] - - -class Service(UrlService): - def __init__(self, configuration=None, name=None): - UrlService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.url = self.configuration.get('url') - self._manager = self._build_manager() - - def check(self): - """ - Add active sources to the "listeners" chart - :return: bool - """ - sources = self.get_sources() - if not sources: - return None - - active_sources = 0 - for idx, raw_source in enumerate(sources): - if Source(idx, raw_source).is_active: - active_sources += 1 - dim_id = 'source_{0}'.format(idx) - dim = 'source {0}'.format(idx) - self.definitions['listeners']['lines'].append([dim_id, dim]) - - return bool(active_sources) - - def _get_data(self): - """ - Get number of listeners for every source - :return: dict - """ - sources = self.get_sources() - if not sources: - return None - - data = dict() - - for idx, raw_source in enumerate(sources): - source = Source(idx, raw_source) - data[source.name] = source.listeners - - return data - - def get_sources(self): - """ - Format data received from http request and return list of sources - :return: list - """ - - raw_data = self._get_raw_data() - if not raw_data: - return None - - try: - data = json.loads(raw_data) - except ValueError as error: - self.error("JSON decode error:", error) - return None - - return data['icestats'].get('source') diff --git a/python.d/ipfs.chart.py b/python.d/ipfs.chart.py deleted file mode 100644 index 43500dfb5..000000000 --- a/python.d/ipfs.chart.py +++ /dev/null @@ -1,124 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: IPFS netdata python.d module -# Authors: Pawel Krupa (paulfantom), davidak - -import json - -from bases.FrameworkServices.UrlService import UrlService - -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 - -# default job configuration (overridden by python.d.plugin) -# config = {'local': { -# 'update_every': update_every, -# 'retries': retries, -# 'priority': priority, -# 'url': 'http://localhost:5001' -# }} - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['bandwidth', 'peers', 'repo_size', 'repo_objects'] - -CHARTS = { - 'bandwidth': { - 'options': [None, 'IPFS Bandwidth', 'kbits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'], - 'lines': [ - ["in", None, "absolute", 8, 1000], - ["out", None, "absolute", -8, 1000] - ]}, - 'peers': { - 'options': [None, 'IPFS Peers', 'peers', 'Peers', 'ipfs.peers', 'line'], - 'lines': [ - ["peers", None, 'absolute'] - ]}, - 'repo_size': { - 'options': [None, 'IPFS Repo Size', 'GB', 'Size', 'ipfs.repo_size', 'area'], - 'lines': [ - ["avail", None, "absolute", 1, 1e9], - ["size", None, "absolute", 1, 1e9], - ]}, - 'repo_objects': { - 'options': [None, 'IPFS Repo Objects', 'objects', 'Objects', 'ipfs.repo_objects', 'line'], - 'lines': [ - ["objects", None, "absolute", 1, 1], - ["pinned", None, "absolute", 1, 1], - ["recursive_pins", None, "absolute", 1, 1] - ]}, -} - -SI_zeroes = {'k': 3, 'm': 6, 'g': 9, 't': 12, - 'p': 15, 'e': 18, 'z': 21, 'y': 24} - - -class Service(UrlService): - def __init__(self, configuration=None, name=None): - UrlService.__init__(self, configuration=configuration, name=name) - self.baseurl = self.configuration.get('url', 'http://localhost:5001') - self.order = ORDER - self.definitions = CHARTS - self.__storage_max = None - - def _get_json(self, sub_url): - """ - :return: json decoding of the specified url - """ - self.url = self.baseurl + sub_url - try: - return json.loads(self._get_raw_data()) - except (TypeError, ValueError): - return dict() - - @staticmethod - def _recursive_pins(keys): - return len([k for k in keys if keys[k]["Type"] == b"recursive"]) - - @staticmethod - def _dehumanize(store_max): - # convert from '10Gb' to 10000000000 - if not isinstance(store_max, int): - store_max = store_max.lower() - if store_max.endswith('b'): - val, units = store_max[:-2], store_max[-2] - if units in SI_zeroes: - val += '0'*SI_zeroes[units] - store_max = val - try: - store_max = int(store_max) - except (TypeError, ValueError): - store_max = None - return store_max - - def _storagemax(self, store_cfg): - if self.__storage_max is None: - self.__storage_max = self._dehumanize(store_cfg['StorageMax']) - return self.__storage_max - - def _get_data(self): - """ - Get data from API - :return: dict - """ - # suburl : List of (result-key, original-key, transform-func) - cfg = { - '/api/v0/stats/bw': - [('in', 'RateIn', int), ('out', 'RateOut', int)], - '/api/v0/swarm/peers': - [('peers', 'Strings', len)], - '/api/v0/stats/repo': - [('size', 'RepoSize', int), ('objects', 'NumObjects', int)], - '/api/v0/pin/ls': - [('pinned', 'Keys', len), ('recursive_pins', 'Keys', self._recursive_pins)], - '/api/v0/config/show': [('avail', 'Datastore', self._storagemax)] - } - r = dict() - for suburl in cfg: - in_json = self._get_json(suburl) - for new_key, orig_key, xmute in cfg[suburl]: - try: - r[new_key] = xmute(in_json[orig_key]) - except Exception: - continue - return r or None diff --git a/python.d/isc_dhcpd.chart.py b/python.d/isc_dhcpd.chart.py deleted file mode 100644 index eb6338452..000000000 --- a/python.d/isc_dhcpd.chart.py +++ /dev/null @@ -1,193 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: isc dhcpd lease netdata python.d module -# Author: l2isbad - -import os -import re -import time - - -try: - import ipaddress - HAVE_IP_ADDRESS = True -except ImportError: - HAVE_IP_ADDRESS = False - -from collections import defaultdict -from copy import deepcopy - -from bases.FrameworkServices.SimpleService import SimpleService - -priority = 60000 -retries = 60 - -ORDER = ['pools_utilization', 'pools_active_leases', 'leases_total'] - -CHARTS = { - 'pools_utilization': { - 'options': [None, 'Pools Utilization', '%', 'utilization', - 'isc_dhcpd.utilization', 'line'], - 'lines': []}, - 'pools_active_leases': { - 'options': [None, 'Active Leases Per Pool', 'leases', 'active leases', - 'isc_dhcpd.active_leases', 'line'], - 'lines': []}, - 'leases_total': { - 'options': [None, 'All Active Leases', 'leases', 'active leases', - 'isc_dhcpd.leases_total', 'line'], - 'lines': [['leases_total', 'leases', 'absolute']], - 'variables': [ - ['leases_size'] - ] - } -} - - -class DhcpdLeasesFile: - def __init__(self, path): - self.path = path - self.mod_time = 0 - self.size = 0 - - def is_valid(self): - return os.path.isfile(self.path) and os.access(self.path, os.R_OK) - - def is_changed(self): - mod_time = os.path.getmtime(self.path) - if mod_time != self.mod_time: - self.mod_time = mod_time - self.size = int(os.path.getsize(self.path) / 1024) - return True - return False - - def get_data(self): - try: - with open(self.path) as leases: - result = defaultdict(dict) - for row in leases: - row = row.strip() - if row.startswith('lease'): - address = row[6:-2] - elif row.startswith('iaaddr'): - address = row[7:-2] - elif row.startswith('ends'): - result[address]['ends'] = row[5:-1] - elif row.startswith('binding state'): - result[address]['state'] = row[14:-1] - return dict((k, v) for k, v in result.items() if len(v) == 2) - except (OSError, IOError): - return None - - -class Pool: - def __init__(self, name, network): - self.id = re.sub(r'[:/.-]+', '_', name) - self.name = name - self.network = ipaddress.ip_network(address=u'%s' % network) - - def num_hosts(self): - return self.network.num_addresses - 2 - - def __contains__(self, item): - return item.address in self.network - - -class Lease: - def __init__(self, address, ends, state): - self.address = ipaddress.ip_address(address=u'%s' % address) - self.ends = ends - self.state = state - - def is_active(self, current_time): - # lease_end_time might be epoch - if self.ends.startswith('epoch'): - epoch = int(self.ends.split()[1].replace(';', '')) - return epoch - current_time > 0 - # max. int for lease-time causes lease to expire in year 2038. - # dhcpd puts 'never' in the ends section of active lease - elif self.ends == 'never': - return True - return time.mktime(time.strptime(self.ends, '%w %Y/%m/%d %H:%M:%S')) - current_time > 0 - - def is_valid(self): - return self.state == 'active' - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = deepcopy(CHARTS) - - lease_path = self.configuration.get('leases_path', '/var/lib/dhcp/dhcpd.leases') - self.dhcpd_leases = DhcpdLeasesFile(path=lease_path) - self.pools = list() - self.data = dict() - - # Will work only with 'default' db-time-format (weekday year/month/day hour:minute:second) - # TODO: update algorithm to parse correctly 'local' db-time-format - - def check(self): - if not HAVE_IP_ADDRESS: - self.error("'python-ipaddress' module is needed") - return False - - if not self.dhcpd_leases.is_valid(): - self.error("Make sure '{path}' is exist and readable by netdata".format(path=self.dhcpd_leases.path)) - return False - - pools = self.configuration.get('pools') - if not pools: - self.error('Pools are not defined') - return False - - for pool in pools: - try: - new_pool = Pool(name=pool, network=pools[pool]) - except ValueError as error: - self.error("'{pool}' was removed, error: {error}".format(pool=pools[pool], error=error)) - else: - self.pools.append(new_pool) - - self.create_charts() - return bool(self.pools) - - def get_data(self): - """ - :return: dict - """ - if not self.dhcpd_leases.is_changed(): - return self.data - - raw_leases = self.dhcpd_leases.get_data() - if not raw_leases: - self.data = dict() - return None - - active_leases = list() - current_time = time.mktime(time.gmtime()) - - for address in raw_leases: - try: - new_lease = Lease(address, **raw_leases[address]) - except ValueError: - continue - else: - if new_lease.is_active(current_time) and new_lease.is_valid(): - active_leases.append(new_lease) - - for pool in self.pools: - count = len([ip for ip in active_leases if ip in pool]) - self.data[pool.id + '_active_leases'] = count - self.data[pool.id + '_utilization'] = float(count) / pool.num_hosts() * 10000 - - self.data['leases_size'] = self.dhcpd_leases.size - self.data['leases_total'] = len(active_leases) - - return self.data - - def create_charts(self): - for pool in self.pools: - self.definitions['pools_utilization']['lines'].append([pool.id + '_utilization', pool.name, - 'absolute', 1, 100]) - self.definitions['pools_active_leases']['lines'].append([pool.id + '_active_leases', pool.name]) diff --git a/python.d/mdstat.chart.py b/python.d/mdstat.chart.py deleted file mode 100644 index 35ba9058f..000000000 --- a/python.d/mdstat.chart.py +++ /dev/null @@ -1,189 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: mdstat netdata python.d module -# Author: l2isbad - -import re - -from collections import defaultdict - -from bases.FrameworkServices.SimpleService import SimpleService - -priority = 60000 -retries = 60 -update_every = 1 - -ORDER = ['mdstat_health'] -CHARTS = { - 'mdstat_health': { - 'options': [None, 'Faulty Devices In MD', 'failed disks', 'health', 'md.health', 'line'], - 'lines': list() - } -} - -OPERATIONS = ('check', 'resync', 'reshape', 'recovery', 'finish', 'speed') - -RE_DISKS = re.compile(r' (?P<array>[a-zA-Z_0-9]+) : active .+\[' - r'(?P<total_disks>[0-9]+)/' - r'(?P<inuse_disks>[0-9]+)\]') - -RE_STATUS = re.compile(r' (?P<array>[a-zA-Z_0-9]+) : active .+ ' - r'(?P<operation>[a-z]+) =[ ]{1,2}' - r'(?P<operation_status>[0-9.]+).+finish=' - r'(?P<finish>([0-9.]+))min speed=' - r'(?P<speed>[0-9]+)') - - -def md_charts(md): - order = ['{0}_disks'.format(md.name), - '{0}_operation'.format(md.name), - '{0}_finish'.format(md.name), - '{0}_speed'.format(md.name) - ] - - charts = dict() - charts[order[0]] = { - 'options': [None, 'Disks Stats', 'disks', md.name, 'md.disks', 'stacked'], - 'lines': [ - ['{0}_total_disks'.format(md.name), 'total', 'absolute'], - ['{0}_inuse_disks'.format(md.name), 'inuse', 'absolute'] - ] - } - - charts['_'.join([md.name, 'operation'])] = { - 'options': [None, 'Current Status', 'percent', md.name, 'md.status', 'line'], - 'lines': [ - ['{0}_resync'.format(md.name), 'resync', 'absolute', 1, 100], - ['{0}_recovery'.format(md.name), 'recovery', 'absolute', 1, 100], - ['{0}_reshape'.format(md.name), 'reshape', 'absolute', 1, 100], - ['{0}_check'.format(md.name), 'check', 'absolute', 1, 100] - ] - } - - charts['_'.join([md.name, 'finish'])] = { - 'options': [None, 'Approximate Time Until Finish', 'seconds', md.name, 'md.rate', 'line'], - 'lines': [ - ['{0}_finish'.format(md.name), 'finish in', 'absolute', 1, 1000] - ] - } - - charts['_'.join([md.name, 'speed'])] = { - 'options': [None, 'Operation Speed', 'KB/s', md.name, 'md.rate', 'line'], - 'lines': [ - ['{0}_speed'.format(md.name), 'speed', 'absolute', 1, 1000] - ] - } - - return order, charts - - -class MD: - def __init__(self, name, stats): - self.name = name - self.stats = stats - - def update_stats(self, stats): - self.stats = stats - - def data(self): - stats = dict(('_'.join([self.name, k]), v) for k, v in self.stats.items()) - stats['{0}_health'.format(self.name)] = int(self.stats['total_disks']) - int(self.stats['inuse_disks']) - return stats - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.mds = dict() - - def check(self): - arrays = find_arrays(self._get_raw_data()) - if not arrays: - self.error('Failed to read data from /proc/mdstat or there is no active arrays') - return None - return True - - @staticmethod - def _get_raw_data(): - """ - Read data from /proc/mdstat - :return: str - """ - try: - with open('/proc/mdstat', 'rt') as proc_mdstat: - return proc_mdstat.readlines() or None - except (OSError, IOError): - return None - - def get_data(self): - """ - Parse data from _get_raw_data() - :return: dict - """ - arrays = find_arrays(self._get_raw_data()) - if not arrays: - return None - - data = dict() - for array, values in arrays.items(): - - if array not in self.mds: - md = MD(array, values) - self.mds[md.name] = md - self.create_new_array_charts(md) - else: - md = self.mds[array] - md.update_stats(values) - - data.update(md.data()) - - return data - - def create_new_array_charts(self, md): - order, charts = md_charts(md) - - self.charts['mdstat_health'].add_dimension(['{0}_health'.format(md.name), md.name]) - for chart_name in order: - params = [chart_name] + charts[chart_name]['options'] - dimensions = charts[chart_name]['lines'] - - new_chart = self.charts.add_chart(params) - for dimension in dimensions: - new_chart.add_dimension(dimension) - - -def find_arrays(raw_data): - if raw_data is None: - return None - data = defaultdict(str) - counter = 1 - - for row in (elem.strip() for elem in raw_data): - if not row: - counter += 1 - continue - data[counter] = ' '.join([data[counter], row]) - - arrays = dict() - for value in data.values(): - match = RE_DISKS.search(value) - if not match: - continue - - match = match.groupdict() - array = match.pop('array') - arrays[array] = match - for operation in OPERATIONS: - arrays[array][operation] = 0 - - match = RE_STATUS.search(value) - if match: - match = match.groupdict() - if match['operation'] in OPERATIONS: - arrays[array]['operation'] = match['operation'] - arrays[array][match['operation']] = float(match['operation_status']) * 100 - arrays[array]['finish'] = float(match['finish']) * 1000 * 60 - arrays[array]['speed'] = float(match['speed']) * 1000 - - return arrays or None diff --git a/python.d/memcached.chart.py b/python.d/memcached.chart.py deleted file mode 100644 index 4f7adfa23..000000000 --- a/python.d/memcached.chart.py +++ /dev/null @@ -1,183 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: memcached netdata python.d module -# Author: Pawel Krupa (paulfantom) - -from bases.FrameworkServices.SocketService import SocketService - -# default module values (can be overridden per job in `config`) -#update_every = 2 -priority = 60000 -retries = 60 - -# default job configuration (overridden by python.d.plugin) -# config = {'local': { -# 'update_every': update_every, -# 'retries': retries, -# 'priority': priority, -# 'host': 'localhost', -# 'port': 11211, -# 'unix_socket': None -# }} - -ORDER = ['cache', 'net', 'connections', 'items', 'evicted_reclaimed', - 'get', 'get_rate', 'set_rate', 'cas', 'delete', 'increment', 'decrement', 'touch', 'touch_rate'] - -CHARTS = { - 'cache': { - 'options': [None, 'Cache Size', 'megabytes', 'cache', 'memcached.cache', 'stacked'], - 'lines': [ - ['avail', 'available', 'absolute', 1, 1048576], - ['used', 'used', 'absolute', 1, 1048576] - ]}, - 'net': { - 'options': [None, 'Network', 'kilobits/s', 'network', 'memcached.net', 'area'], - 'lines': [ - ['bytes_read', 'in', 'incremental', 8, 1024], - ['bytes_written', 'out', 'incremental', -8, 1024] - ]}, - 'connections': { - 'options': [None, 'Connections', 'connections/s', 'connections', 'memcached.connections', 'line'], - 'lines': [ - ['curr_connections', 'current', 'incremental'], - ['rejected_connections', 'rejected', 'incremental'], - ['total_connections', 'total', 'incremental'] - ]}, - 'items': { - 'options': [None, 'Items', 'items', 'items', 'memcached.items', 'line'], - 'lines': [ - ['curr_items', 'current', 'absolute'], - ['total_items', 'total', 'absolute'] - ]}, - 'evicted_reclaimed': { - 'options': [None, 'Items', 'items', 'items', 'memcached.evicted_reclaimed', 'line'], - 'lines': [ - ['reclaimed', 'reclaimed', 'absolute'], - ['evictions', 'evicted', 'absolute'] - ]}, - 'get': { - 'options': [None, 'Requests', 'requests', 'get ops', 'memcached.get', 'stacked'], - 'lines': [ - ['get_hits', 'hits', 'percent-of-absolute-row'], - ['get_misses', 'misses', 'percent-of-absolute-row'] - ]}, - 'get_rate': { - 'options': [None, 'Rate', 'requests/s', 'get ops', 'memcached.get_rate', 'line'], - 'lines': [ - ['cmd_get', 'rate', 'incremental'] - ]}, - 'set_rate': { - 'options': [None, 'Rate', 'requests/s', 'set ops', 'memcached.set_rate', 'line'], - 'lines': [ - ['cmd_set', 'rate', 'incremental'] - ]}, - 'delete': { - 'options': [None, 'Requests', 'requests', 'delete ops', 'memcached.delete', 'stacked'], - 'lines': [ - ['delete_hits', 'hits', 'percent-of-absolute-row'], - ['delete_misses', 'misses', 'percent-of-absolute-row'], - ]}, - 'cas': { - 'options': [None, 'Requests', 'requests', 'check and set ops', 'memcached.cas', 'stacked'], - 'lines': [ - ['cas_hits', 'hits', 'percent-of-absolute-row'], - ['cas_misses', 'misses', 'percent-of-absolute-row'], - ['cas_badval', 'bad value', 'percent-of-absolute-row'] - ]}, - 'increment': { - 'options': [None, 'Requests', 'requests', 'increment ops', 'memcached.increment', 'stacked'], - 'lines': [ - ['incr_hits', 'hits', 'percent-of-absolute-row'], - ['incr_misses', 'misses', 'percent-of-absolute-row'] - ]}, - 'decrement': { - 'options': [None, 'Requests', 'requests', 'decrement ops', 'memcached.decrement', 'stacked'], - 'lines': [ - ['decr_hits', 'hits', 'percent-of-absolute-row'], - ['decr_misses', 'misses', 'percent-of-absolute-row'] - ]}, - 'touch': { - 'options': [None, 'Requests', 'requests', 'touch ops', 'memcached.touch', 'stacked'], - 'lines': [ - ['touch_hits', 'hits', 'percent-of-absolute-row'], - ['touch_misses', 'misses', 'percent-of-absolute-row'] - ]}, - 'touch_rate': { - 'options': [None, 'Rate', 'requests/s', 'touch ops', 'memcached.touch_rate', 'line'], - 'lines': [ - ['cmd_touch', 'rate', 'incremental'] - ]} -} - - -class Service(SocketService): - def __init__(self, configuration=None, name=None): - SocketService.__init__(self, configuration=configuration, name=name) - self.request = "stats\r\n" - self.host = "localhost" - self.port = 11211 - self._keep_alive = True - self.unix_socket = None - self.order = ORDER - self.definitions = CHARTS - - def _get_data(self): - """ - Get data from socket - :return: dict - """ - response = self._get_raw_data() - if response is None: - # error has already been logged - return None - - if response.startswith('ERROR'): - self.error("received ERROR") - return None - - try: - parsed = response.split("\n") - except AttributeError: - self.error("response is invalid/empty") - return None - - # split the response - data = {} - for line in parsed: - if line.startswith('STAT'): - try: - t = line[5:].split(' ') - data[t[0]] = t[1] - except (IndexError, ValueError): - self.debug("invalid line received: " + str(line)) - - if not data: - self.error("received data doesn't have any records") - return None - - # custom calculations - try: - data['avail'] = int(data['limit_maxbytes']) - int(data['bytes']) - data['used'] = int(data['bytes']) - except (KeyError, ValueError, TypeError): - pass - - return data - - def _check_raw_data(self, data): - if data.endswith('END\r\n'): - self.debug("received full response from memcached") - return True - - self.debug("waiting more data from memcached") - return False - - def check(self): - """ - Parse configuration, check if memcached is available - :return: boolean - """ - self._parse_config() - data = self._get_data() - if data is None: - return False - return True diff --git a/python.d/mongodb.chart.py b/python.d/mongodb.chart.py deleted file mode 100644 index 909a419da..000000000 --- a/python.d/mongodb.chart.py +++ /dev/null @@ -1,673 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: mongodb netdata python.d module -# Author: l2isbad - -from copy import deepcopy -from datetime import datetime -from sys import exc_info - -try: - from pymongo import MongoClient, ASCENDING, DESCENDING - from pymongo.errors import PyMongoError - PYMONGO = True -except ImportError: - PYMONGO = False - -from bases.FrameworkServices.SimpleService import SimpleService - -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 - -REPL_SET_STATES = [ - ('1', 'primary'), - ('8', 'down'), - ('2', 'secondary'), - ('3', 'recovering'), - ('5', 'startup2'), - ('4', 'fatal'), - ('7', 'arbiter'), - ('6', 'unknown'), - ('9', 'rollback'), - ('10', 'removed'), - ('0', 'startup')] - - -def multiply_by_100(value): - return value * 100 - - -DEFAULT_METRICS = [ - ('opcounters.delete', None, None), - ('opcounters.update', None, None), - ('opcounters.insert', None, None), - ('opcounters.query', None, None), - ('opcounters.getmore', None, None), - ('globalLock.activeClients.readers', 'activeClients_readers', None), - ('globalLock.activeClients.writers', 'activeClients_writers', None), - ('connections.available', 'connections_available', None), - ('connections.current', 'connections_current', None), - ('mem.mapped', None, None), - ('mem.resident', None, None), - ('mem.virtual', None, None), - ('globalLock.currentQueue.readers', 'currentQueue_readers', None), - ('globalLock.currentQueue.writers', 'currentQueue_writers', None), - ('asserts.msg', None, None), - ('asserts.regular', None, None), - ('asserts.user', None, None), - ('asserts.warning', None, None), - ('extra_info.page_faults', None, None), - ('metrics.record.moves', None, None), - ('backgroundFlushing.average_ms', None, multiply_by_100), - ('backgroundFlushing.last_ms', None, multiply_by_100), - ('backgroundFlushing.flushes', None, multiply_by_100), - ('metrics.cursor.timedOut', None, None), - ('metrics.cursor.open.total', 'cursor_total', None), - ('metrics.cursor.open.noTimeout', None, None), - ('cursors.timedOut', None, None), - ('cursors.totalOpen', 'cursor_total', None) -] - -DUR = [ - ('dur.commits', None, None), - ('dur.journaledMB', None, multiply_by_100) -] - -WIREDTIGER = [ - ('wiredTiger.concurrentTransactions.read.available', 'wiredTigerRead_available', None), - ('wiredTiger.concurrentTransactions.read.out', 'wiredTigerRead_out', None), - ('wiredTiger.concurrentTransactions.write.available', 'wiredTigerWrite_available', None), - ('wiredTiger.concurrentTransactions.write.out', 'wiredTigerWrite_out', None), - ('wiredTiger.cache.bytes currently in the cache', None, None), - ('wiredTiger.cache.tracked dirty bytes in the cache', None, None), - ('wiredTiger.cache.maximum bytes configured', None, None), - ('wiredTiger.cache.unmodified pages evicted', 'unmodified', None), - ('wiredTiger.cache.modified pages evicted', 'modified', None) -] - -TCMALLOC = [ - ('tcmalloc.generic.current_allocated_bytes', None, None), - ('tcmalloc.generic.heap_size', None, None), - ('tcmalloc.tcmalloc.central_cache_free_bytes', None, None), - ('tcmalloc.tcmalloc.current_total_thread_cache_bytes', None, None), - ('tcmalloc.tcmalloc.pageheap_free_bytes', None, None), - ('tcmalloc.tcmalloc.pageheap_unmapped_bytes', None, None), - ('tcmalloc.tcmalloc.thread_cache_free_bytes', None, None), - ('tcmalloc.tcmalloc.transfer_cache_free_bytes', None, None) -] - -COMMANDS = [ - ('metrics.commands.count.total', 'count_total', None), - ('metrics.commands.createIndexes.total', 'createIndexes_total', None), - ('metrics.commands.delete.total', 'delete_total', None), - ('metrics.commands.eval.total', 'eval_total', None), - ('metrics.commands.findAndModify.total', 'findAndModify_total', None), - ('metrics.commands.insert.total', 'insert_total', None), - ('metrics.commands.delete.total', 'delete_total', None), - ('metrics.commands.count.failed', 'count_failed', None), - ('metrics.commands.createIndexes.failed', 'createIndexes_failed', None), - ('metrics.commands.delete.failed', 'delete_failed', None), - ('metrics.commands.eval.failed', 'eval_failed', None), - ('metrics.commands.findAndModify.failed', 'findAndModify_failed', None), - ('metrics.commands.insert.failed', 'insert_failed', None), - ('metrics.commands.delete.failed', 'delete_failed', None) -] - -LOCKS = [ - ('locks.Collection.acquireCount.R', 'Collection_R', None), - ('locks.Collection.acquireCount.r', 'Collection_r', None), - ('locks.Collection.acquireCount.W', 'Collection_W', None), - ('locks.Collection.acquireCount.w', 'Collection_w', None), - ('locks.Database.acquireCount.R', 'Database_R', None), - ('locks.Database.acquireCount.r', 'Database_r', None), - ('locks.Database.acquireCount.W', 'Database_W', None), - ('locks.Database.acquireCount.w', 'Database_w', None), - ('locks.Global.acquireCount.R', 'Global_R', None), - ('locks.Global.acquireCount.r', 'Global_r', None), - ('locks.Global.acquireCount.W', 'Global_W', None), - ('locks.Global.acquireCount.w', 'Global_w', None), - ('locks.Metadata.acquireCount.R', 'Metadata_R', None), - ('locks.Metadata.acquireCount.w', 'Metadata_w', None), - ('locks.oplog.acquireCount.r', 'oplog_r', None), - ('locks.oplog.acquireCount.w', 'oplog_w', None) -] - -DBSTATS = [ - 'dataSize', - 'indexSize', - 'storageSize', - 'objects' -] - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['read_operations', 'write_operations', 'active_clients', 'journaling_transactions', - 'journaling_volume', 'background_flush_average', 'background_flush_last', 'background_flush_rate', - 'wiredtiger_read', 'wiredtiger_write', 'cursors', 'connections', 'memory', 'page_faults', - 'queued_requests', 'record_moves', 'wiredtiger_cache', 'wiredtiger_pages_evicted', 'asserts', - 'locks_collection', 'locks_database', 'locks_global', 'locks_metadata', 'locks_oplog', - 'dbstats_objects', 'tcmalloc_generic', 'tcmalloc_metrics', 'command_total_rate', 'command_failed_rate'] - -CHARTS = { - 'read_operations': { - 'options': [None, 'Received read requests', 'requests/s', 'throughput metrics', - 'mongodb.read_operations', 'line'], - 'lines': [ - ['query', None, 'incremental'], - ['getmore', None, 'incremental'] - ]}, - 'write_operations': { - 'options': [None, 'Received write requests', 'requests/s', 'throughput metrics', - 'mongodb.write_operations', 'line'], - 'lines': [ - ['insert', None, 'incremental'], - ['update', None, 'incremental'], - ['delete', None, 'incremental'] - ]}, - 'active_clients': { - 'options': [None, 'Clients with read or write operations in progress or queued', 'clients', - 'throughput metrics', 'mongodb.active_clients', 'line'], - 'lines': [ - ['activeClients_readers', 'readers', 'absolute'], - ['activeClients_writers', 'writers', 'absolute'] - ]}, - 'journaling_transactions': { - 'options': [None, 'Transactions that have been written to the journal', 'commits', - 'database performance', 'mongodb.journaling_transactions', 'line'], - 'lines': [ - ['commits', None, 'absolute'] - ]}, - 'journaling_volume': { - 'options': [None, 'Volume of data written to the journal', 'MB', 'database performance', - 'mongodb.journaling_volume', 'line'], - 'lines': [ - ['journaledMB', 'volume', 'absolute', 1, 100] - ]}, - 'background_flush_average': { - 'options': [None, 'Average time taken by flushes to execute', 'ms', 'database performance', - 'mongodb.background_flush_average', 'line'], - 'lines': [ - ['average_ms', 'time', 'absolute', 1, 100] - ]}, - 'background_flush_last': { - 'options': [None, 'Time taken by the last flush operation to execute', 'ms', 'database performance', - 'mongodb.background_flush_last', 'line'], - 'lines': [ - ['last_ms', 'time', 'absolute', 1, 100] - ]}, - 'background_flush_rate': { - 'options': [None, 'Flushes rate', 'flushes', 'database performance', 'mongodb.background_flush_rate', 'line'], - 'lines': [ - ['flushes', 'flushes', 'incremental', 1, 1] - ]}, - 'wiredtiger_read': { - 'options': [None, 'Read tickets in use and remaining', 'tickets', 'database performance', - 'mongodb.wiredtiger_read', 'stacked'], - 'lines': [ - ['wiredTigerRead_available', 'available', 'absolute', 1, 1], - ['wiredTigerRead_out', 'inuse', 'absolute', 1, 1] - ]}, - 'wiredtiger_write': { - 'options': [None, 'Write tickets in use and remaining', 'tickets', 'database performance', - 'mongodb.wiredtiger_write', 'stacked'], - 'lines': [ - ['wiredTigerWrite_available', 'available', 'absolute', 1, 1], - ['wiredTigerWrite_out', 'inuse', 'absolute', 1, 1] - ]}, - 'cursors': { - 'options': [None, 'Currently openned cursors, cursors with timeout disabled and timed out cursors', - 'cursors', 'database performance', 'mongodb.cursors', 'stacked'], - 'lines': [ - ['cursor_total', 'openned', 'absolute', 1, 1], - ['noTimeout', None, 'absolute', 1, 1], - ['timedOut', None, 'incremental', 1, 1] - ]}, - 'connections': { - 'options': [None, 'Currently connected clients and unused connections', 'connections', - 'resource utilization', 'mongodb.connections', 'stacked'], - 'lines': [ - ['connections_available', 'unused', 'absolute', 1, 1], - ['connections_current', 'connected', 'absolute', 1, 1] - ]}, - 'memory': { - 'options': [None, 'Memory metrics', 'MB', 'resource utilization', 'mongodb.memory', 'stacked'], - 'lines': [ - ['virtual', None, 'absolute', 1, 1], - ['resident', None, 'absolute', 1, 1], - ['nonmapped', None, 'absolute', 1, 1], - ['mapped', None, 'absolute', 1, 1] - ]}, - 'page_faults': { - 'options': [None, 'Number of times MongoDB had to fetch data from disk', 'request/s', - 'resource utilization', 'mongodb.page_faults', 'line'], - 'lines': [ - ['page_faults', None, 'incremental', 1, 1] - ]}, - 'queued_requests': { - 'options': [None, 'Currently queued read and wrire requests', 'requests', 'resource saturation', - 'mongodb.queued_requests', 'line'], - 'lines': [ - ['currentQueue_readers', 'readers', 'absolute', 1, 1], - ['currentQueue_writers', 'writers', 'absolute', 1, 1] - ]}, - 'record_moves': { - 'options': [None, 'Number of times documents had to be moved on-disk', 'number', - 'resource saturation', 'mongodb.record_moves', 'line'], - 'lines': [ - ['moves', None, 'incremental', 1, 1] - ]}, - 'asserts': { - 'options': [None, 'Number of message, warning, regular, corresponding to errors generated' - ' by users assertions raised', 'number', 'errors (asserts)', 'mongodb.asserts', 'line'], - 'lines': [ - ['msg', None, 'incremental', 1, 1], - ['warning', None, 'incremental', 1, 1], - ['regular', None, 'incremental', 1, 1], - ['user', None, 'incremental', 1, 1] - ]}, - 'wiredtiger_cache': { - 'options': [None, 'The percentage of the wiredTiger cache that is in use and cache with dirty bytes', - 'percent', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'], - 'lines': [ - ['wiredTiger_percent_clean', 'inuse', 'absolute', 1, 1000], - ['wiredTiger_percent_dirty', 'dirty', 'absolute', 1, 1000] - ]}, - 'wiredtiger_pages_evicted': { - 'options': [None, 'Pages evicted from the cache', - 'pages', 'resource utilization', 'mongodb.wiredtiger_pages_evicted', 'stacked'], - 'lines': [ - ['unmodified', None, 'absolute', 1, 1], - ['modified', None, 'absolute', 1, 1] - ]}, - 'dbstats_objects': { - 'options': [None, 'Number of documents in the database among all the collections', 'documents', - 'storage size metrics', 'mongodb.dbstats_objects', 'stacked'], - 'lines': [ - ]}, - 'tcmalloc_generic': { - 'options': [None, 'Tcmalloc generic metrics', 'MB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'], - 'lines': [ - ['current_allocated_bytes', 'allocated', 'absolute', 1, 1048576], - ['heap_size', 'heap_size', 'absolute', 1, 1048576] - ]}, - 'tcmalloc_metrics': { - 'options': [None, 'Tcmalloc metrics', 'KB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'], - 'lines': [ - ['central_cache_free_bytes', 'central_cache_free', 'absolute', 1, 1024], - ['current_total_thread_cache_bytes', 'current_total_thread_cache', 'absolute', 1, 1024], - ['pageheap_free_bytes', 'pageheap_free', 'absolute', 1, 1024], - ['pageheap_unmapped_bytes', 'pageheap_unmapped', 'absolute', 1, 1024], - ['thread_cache_free_bytes', 'thread_cache_free', 'absolute', 1, 1024], - ['transfer_cache_free_bytes', 'transfer_cache_free', 'absolute', 1, 1024] - ]}, - 'command_total_rate': { - 'options': [None, 'Commands total rate', 'commands/s', 'commands', 'mongodb.command_total_rate', 'stacked'], - 'lines': [ - ['count_total', 'count', 'incremental', 1, 1], - ['createIndexes_total', 'createIndexes', 'incremental', 1, 1], - ['delete_total', 'delete', 'incremental', 1, 1], - ['eval_total', 'eval', 'incremental', 1, 1], - ['findAndModify_total', 'findAndModify', 'incremental', 1, 1], - ['insert_total', 'insert', 'incremental', 1, 1], - ['update_total', 'update', 'incremental', 1, 1] - ]}, - 'command_failed_rate': { - 'options': [None, 'Commands failed rate', 'commands/s', 'commands', 'mongodb.command_failed_rate', 'stacked'], - 'lines': [ - ['count_failed', 'count', 'incremental', 1, 1], - ['createIndexes_failed', 'createIndexes', 'incremental', 1, 1], - ['delete_failed', 'delete', 'incremental', 1, 1], - ['eval_failed', 'eval', 'incremental', 1, 1], - ['findAndModify_failed', 'findAndModify', 'incremental', 1, 1], - ['insert_failed', 'insert', 'incremental', 1, 1], - ['update_failed', 'update', 'incremental', 1, 1] - ]}, - 'locks_collection': { - 'options': [None, 'Collection lock. Number of times the lock was acquired in the specified mode', - 'locks', 'locks metrics', 'mongodb.locks_collection', 'stacked'], - 'lines': [ - ['Collection_R', 'shared', 'incremental'], - ['Collection_W', 'exclusive', 'incremental'], - ['Collection_r', 'intent_shared', 'incremental'], - ['Collection_w', 'intent_exclusive', 'incremental'] - ]}, - 'locks_database': { - 'options': [None, 'Database lock. Number of times the lock was acquired in the specified mode', - 'locks', 'locks metrics', 'mongodb.locks_database', 'stacked'], - 'lines': [ - ['Database_R', 'shared', 'incremental'], - ['Database_W', 'exclusive', 'incremental'], - ['Database_r', 'intent_shared', 'incremental'], - ['Database_w', 'intent_exclusive', 'incremental'] - ]}, - 'locks_global': { - 'options': [None, 'Global lock. Number of times the lock was acquired in the specified mode', - 'locks', 'locks metrics', 'mongodb.locks_global', 'stacked'], - 'lines': [ - ['Global_R', 'shared', 'incremental'], - ['Global_W', 'exclusive', 'incremental'], - ['Global_r', 'intent_shared', 'incremental'], - ['Global_w', 'intent_exclusive', 'incremental'] - ]}, - 'locks_metadata': { - 'options': [None, 'Metadata lock. Number of times the lock was acquired in the specified mode', - 'locks', 'locks metrics', 'mongodb.locks_metadata', 'stacked'], - 'lines': [ - ['Metadata_R', 'shared', 'incremental'], - ['Metadata_w', 'intent_exclusive', 'incremental'] - ]}, - 'locks_oplog': { - 'options': [None, 'Lock on the oplog. Number of times the lock was acquired in the specified mode', - 'locks', 'locks metrics', 'mongodb.locks_oplog', 'stacked'], - 'lines': [ - ['oplog_r', 'intent_shared', 'incremental'], - ['oplog_w', 'intent_exclusive', 'incremental'] - ]} -} - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.order = ORDER[:] - self.definitions = deepcopy(CHARTS) - self.user = self.configuration.get('user') - self.password = self.configuration.get('pass') - self.host = self.configuration.get('host', '127.0.0.1') - self.port = self.configuration.get('port', 27017) - self.timeout = self.configuration.get('timeout', 100) - self.metrics_to_collect = deepcopy(DEFAULT_METRICS) - self.connection = None - self.do_replica = None - self.databases = list() - - def check(self): - if not PYMONGO: - self.error('Pymongo module is needed to use mongodb.chart.py') - return False - self.connection, server_status, error = self._create_connection() - if error: - self.error(error) - return False - - self.build_metrics_to_collect_(server_status) - - try: - data = self._get_data() - except (LookupError, SyntaxError, AttributeError): - self.error('Type: %s, error: %s' % (str(exc_info()[0]), str(exc_info()[1]))) - return False - if isinstance(data, dict) and data: - self._data_from_check = data - self.create_charts_(server_status) - return True - self.error('_get_data() returned no data or type is not <dict>') - return False - - def build_metrics_to_collect_(self, server_status): - - self.do_replica = 'repl' in server_status - if 'dur' in server_status: - self.metrics_to_collect.extend(DUR) - if 'tcmalloc' in server_status: - self.metrics_to_collect.extend(TCMALLOC) - if 'commands' in server_status['metrics']: - self.metrics_to_collect.extend(COMMANDS) - if 'wiredTiger' in server_status: - self.metrics_to_collect.extend(WIREDTIGER) - if 'Collection' in server_status['locks']: - self.metrics_to_collect.extend(LOCKS) - - def create_charts_(self, server_status): - - if 'dur' not in server_status: - self.order.remove('journaling_transactions') - self.order.remove('journaling_volume') - - if 'backgroundFlushing' not in server_status: - self.order.remove('background_flush_average') - self.order.remove('background_flush_last') - self.order.remove('background_flush_rate') - - if 'wiredTiger' not in server_status: - self.order.remove('wiredtiger_write') - self.order.remove('wiredtiger_read') - self.order.remove('wiredtiger_cache') - - if 'tcmalloc' not in server_status: - self.order.remove('tcmalloc_generic') - self.order.remove('tcmalloc_metrics') - - if 'commands' not in server_status['metrics']: - self.order.remove('command_total_rate') - self.order.remove('command_failed_rate') - - if 'Collection' not in server_status['locks']: - self.order.remove('locks_collection') - self.order.remove('locks_database') - self.order.remove('locks_global') - self.order.remove('locks_metadata') - - if 'oplog' not in server_status['locks']: - self.order.remove('locks_oplog') - - for dbase in self.databases: - self.order.append('_'.join([dbase, 'dbstats'])) - self.definitions['_'.join([dbase, 'dbstats'])] = { - 'options': [None, '%s: size of all documents, indexes, extents' % dbase, 'KB', - 'storage size metrics', 'mongodb.dbstats', 'line'], - 'lines': [ - ['_'.join([dbase, 'dataSize']), 'documents', 'absolute', 1, 1024], - ['_'.join([dbase, 'indexSize']), 'indexes', 'absolute', 1, 1024], - ['_'.join([dbase, 'storageSize']), 'extents', 'absolute', 1, 1024] - ]} - self.definitions['dbstats_objects']['lines'].append(['_'.join([dbase, 'objects']), dbase, 'absolute']) - - if self.do_replica: - def create_lines(hosts, string): - lines = list() - for host in hosts: - dim_id = '_'.join([host, string]) - lines.append([dim_id, host, 'absolute', 1, 1000]) - return lines - - def create_state_lines(states): - lines = list() - for state, description in states: - dim_id = '_'.join([host, 'state', state]) - lines.append([dim_id, description, 'absolute', 1, 1]) - return lines - - all_hosts = server_status['repl']['hosts'] + server_status['repl'].get('arbiters', list()) - this_host = server_status['repl']['me'] - other_hosts = [host for host in all_hosts if host != this_host] - - if 'local' in self.databases: - self.order.append('oplog_window') - self.definitions['oplog_window'] = { - 'options': [None, 'Interval of time between the oldest and the latest entries in the oplog', - 'seconds', 'replication and oplog', 'mongodb.oplog_window', 'line'], - 'lines': [['timeDiff', 'window', 'absolute', 1, 1000]]} - # Create "heartbeat delay" chart - self.order.append('heartbeat_delay') - self.definitions['heartbeat_delay'] = { - 'options': [None, 'Time when last heartbeat was received' - ' from the replica set member (lastHeartbeatRecv)', - 'seconds ago', 'replication and oplog', 'mongodb.replication_heartbeat_delay', 'stacked'], - 'lines': create_lines(other_hosts, 'heartbeat_lag')} - # Create "optimedate delay" chart - self.order.append('optimedate_delay') - self.definitions['optimedate_delay'] = { - 'options': [None, 'Time when last entry from the oplog was applied (optimeDate)', - 'seconds ago', 'replication and oplog', 'mongodb.replication_optimedate_delay', 'stacked'], - 'lines': create_lines(all_hosts, 'optimedate')} - # Create "replica set members state" chart - for host in all_hosts: - chart_name = '_'.join([host, 'state']) - self.order.append(chart_name) - self.definitions[chart_name] = { - 'options': [None, 'Replica set member (%s) current state' % host, 'state', - 'replication and oplog', 'mongodb.replication_state', 'line'], - 'lines': create_state_lines(REPL_SET_STATES)} - - def _get_raw_data(self): - raw_data = dict() - - raw_data.update(self.get_server_status() or dict()) - raw_data.update(self.get_db_stats() or dict()) - raw_data.update(self.get_repl_set_get_status() or dict()) - raw_data.update(self.get_get_replication_info() or dict()) - - return raw_data or None - - def get_server_status(self): - raw_data = dict() - try: - raw_data['serverStatus'] = self.connection.admin.command('serverStatus') - except PyMongoError: - return None - else: - return raw_data - - def get_db_stats(self): - if not self.databases: - return None - - raw_data = dict() - raw_data['dbStats'] = dict() - try: - for dbase in self.databases: - raw_data['dbStats'][dbase] = self.connection[dbase].command('dbStats') - return raw_data - except PyMongoError: - return None - - def get_repl_set_get_status(self): - if not self.do_replica: - return None - - raw_data = dict() - try: - raw_data['replSetGetStatus'] = self.connection.admin.command('replSetGetStatus') - return raw_data - except PyMongoError: - return None - - def get_get_replication_info(self): - if not (self.do_replica and 'local' in self.databases): - return None - - raw_data = dict() - raw_data['getReplicationInfo'] = dict() - try: - raw_data['getReplicationInfo']['ASCENDING'] = self.connection.local.oplog.rs.find().sort( - "$natural", ASCENDING).limit(1)[0] - raw_data['getReplicationInfo']['DESCENDING'] = self.connection.local.oplog.rs.find().sort( - "$natural", DESCENDING).limit(1)[0] - return raw_data - except PyMongoError: - return None - - def _get_data(self): - """ - :return: dict - """ - raw_data = self._get_raw_data() - - if not raw_data: - return None - - to_netdata = dict() - serverStatus = raw_data['serverStatus'] - dbStats = raw_data.get('dbStats') - replSetGetStatus = raw_data.get('replSetGetStatus') - getReplicationInfo = raw_data.get('getReplicationInfo') - utc_now = datetime.utcnow() - - # serverStatus - for metric, new_name, func in self.metrics_to_collect: - value = serverStatus - for key in metric.split('.'): - try: - value = value[key] - except KeyError: - break - - if not isinstance(value, dict) and key: - to_netdata[new_name or key] = value if not func else func(value) - - to_netdata['nonmapped'] = to_netdata['virtual'] - serverStatus['mem'].get('mappedWithJournal', - to_netdata['mapped']) - if to_netdata.get('maximum bytes configured'): - maximum = to_netdata['maximum bytes configured'] - to_netdata['wiredTiger_percent_clean'] = int(to_netdata['bytes currently in the cache'] - * 100 / maximum * 1000) - to_netdata['wiredTiger_percent_dirty'] = int(to_netdata['tracked dirty bytes in the cache'] - * 100 / maximum * 1000) - - # dbStats - if dbStats: - for dbase in dbStats: - for metric in DBSTATS: - key = '_'.join([dbase, metric]) - to_netdata[key] = dbStats[dbase][metric] - - # replSetGetStatus - if replSetGetStatus: - other_hosts = list() - members = replSetGetStatus['members'] - unix_epoch = datetime(1970, 1, 1, 0, 0) - - for member in members: - if not member.get('self'): - other_hosts.append(member) - # Replica set time diff between current time and time when last entry from the oplog was applied - if member.get('optimeDate', unix_epoch) != unix_epoch: - member_optimedate = member['name'] + '_optimedate' - to_netdata.update({member_optimedate: int(delta_calculation(delta=utc_now - member['optimeDate'], - multiplier=1000))}) - # Replica set members state - member_state = member['name'] + '_state' - for elem in REPL_SET_STATES: - state = elem[0] - to_netdata.update({'_'.join([member_state, state]): 0}) - to_netdata.update({'_'.join([member_state, str(member['state'])]): member['state']}) - # Heartbeat lag calculation - for other in other_hosts: - if other['lastHeartbeatRecv'] != unix_epoch: - node = other['name'] + '_heartbeat_lag' - to_netdata[node] = int(delta_calculation(delta=utc_now - other['lastHeartbeatRecv'], - multiplier=1000)) - - if getReplicationInfo: - first_event = getReplicationInfo['ASCENDING']['ts'].as_datetime() - last_event = getReplicationInfo['DESCENDING']['ts'].as_datetime() - to_netdata['timeDiff'] = int(delta_calculation(delta=last_event - first_event, multiplier=1000)) - - return to_netdata - - def _create_connection(self): - conn_vars = {'host': self.host, 'port': self.port} - if hasattr(MongoClient, 'server_selection_timeout'): - conn_vars.update({'serverselectiontimeoutms': self.timeout}) - try: - connection = MongoClient(**conn_vars) - if self.user and self.password: - connection.admin.authenticate(name=self.user, password=self.password) - # elif self.user: - # connection.admin.authenticate(name=self.user, mechanism='MONGODB-X509') - server_status = connection.admin.command('serverStatus') - except PyMongoError as error: - return None, None, str(error) - else: - try: - self.databases = connection.database_names() - except PyMongoError as error: - self.info('Can\'t collect databases: %s' % str(error)) - return connection, server_status, None - - -def delta_calculation(delta, multiplier=1): - if hasattr(delta, 'total_seconds'): - return delta.total_seconds() * multiplier - return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6 * multiplier diff --git a/python.d/mysql.chart.py b/python.d/mysql.chart.py deleted file mode 100644 index 4c7058b26..000000000 --- a/python.d/mysql.chart.py +++ /dev/null @@ -1,491 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: MySQL netdata python.d module -# Author: Pawel Krupa (paulfantom) - -from bases.FrameworkServices.MySQLService import MySQLService - -# default module values (can be overridden per job in `config`) -# update_every = 3 -priority = 60000 -retries = 60 - -# query executed on MySQL server -QUERY_GLOBAL = 'SHOW GLOBAL STATUS;' -QUERY_SLAVE = 'SHOW SLAVE STATUS;' - -GLOBAL_STATS = [ - 'Bytes_received', - 'Bytes_sent', - 'Queries', - 'Questions', - 'Slow_queries', - 'Handler_commit', - 'Handler_delete', - 'Handler_prepare', - 'Handler_read_first', - 'Handler_read_key', - 'Handler_read_next', - 'Handler_read_prev', - 'Handler_read_rnd', - 'Handler_read_rnd_next', - 'Handler_rollback', - 'Handler_savepoint', - 'Handler_savepoint_rollback', - 'Handler_update', - 'Handler_write', - 'Table_locks_immediate', - 'Table_locks_waited', - 'Select_full_join', - 'Select_full_range_join', - 'Select_range', - 'Select_range_check', - 'Select_scan', - 'Sort_merge_passes', - 'Sort_range', - 'Sort_scan', - 'Created_tmp_disk_tables', - 'Created_tmp_files', - 'Created_tmp_tables', - 'Connections', - 'Aborted_connects', - 'Binlog_cache_disk_use', - 'Binlog_cache_use', - 'Threads_connected', - 'Threads_created', - 'Threads_cached', - 'Threads_running', - 'Thread_cache_misses', - 'Innodb_data_read', - 'Innodb_data_written', - 'Innodb_data_reads', - 'Innodb_data_writes', - 'Innodb_data_fsyncs', - 'Innodb_data_pending_reads', - 'Innodb_data_pending_writes', - 'Innodb_data_pending_fsyncs', - 'Innodb_log_waits', - 'Innodb_log_write_requests', - 'Innodb_log_writes', - 'Innodb_os_log_fsyncs', - 'Innodb_os_log_pending_fsyncs', - 'Innodb_os_log_pending_writes', - 'Innodb_os_log_written', - 'Innodb_row_lock_current_waits', - 'Innodb_rows_inserted', - 'Innodb_rows_read', - 'Innodb_rows_updated', - 'Innodb_rows_deleted', - 'Innodb_buffer_pool_pages_data', - 'Innodb_buffer_pool_pages_dirty', - 'Innodb_buffer_pool_pages_free', - 'Innodb_buffer_pool_pages_flushed', - 'Innodb_buffer_pool_pages_misc', - 'Innodb_buffer_pool_pages_total', - 'Innodb_buffer_pool_bytes_data', - 'Innodb_buffer_pool_bytes_dirty', - 'Innodb_buffer_pool_read_ahead', - 'Innodb_buffer_pool_read_ahead_evicted', - 'Innodb_buffer_pool_read_ahead_rnd', - 'Innodb_buffer_pool_read_requests', - 'Innodb_buffer_pool_write_requests', - 'Innodb_buffer_pool_reads', - 'Innodb_buffer_pool_wait_free', - 'Qcache_hits', - 'Qcache_lowmem_prunes', - 'Qcache_inserts', - 'Qcache_not_cached', - 'Qcache_queries_in_cache', - 'Qcache_free_memory', - 'Qcache_free_blocks', - 'Qcache_total_blocks', - 'Key_blocks_unused', - 'Key_blocks_used', - 'Key_blocks_not_flushed', - 'Key_read_requests', - 'Key_write_requests', - 'Key_reads', - 'Key_writes', - 'Open_files', - 'Opened_files', - 'Binlog_stmt_cache_disk_use', - 'Binlog_stmt_cache_use', - 'Connection_errors_accept', - 'Connection_errors_internal', - 'Connection_errors_max_connections', - 'Connection_errors_peer_address', - 'Connection_errors_select', - 'Connection_errors_tcpwrap', - 'wsrep_local_recv_queue', - 'wsrep_local_send_queue', - 'wsrep_received', - 'wsrep_replicated', - 'wsrep_received_bytes', - 'wsrep_replicated_bytes', - 'wsrep_local_bf_aborts', - 'wsrep_local_cert_failures', - 'wsrep_flow_control_paused_ns'] - -def slave_seconds(value): - try: - return int(value) - except (TypeError, ValueError): - return -1 - - -def slave_running(value): - return 1 if value == 'Yes' else -1 - - -SLAVE_STATS = [ - ('Seconds_Behind_Master', slave_seconds), - ('Slave_SQL_Running', slave_running), - ('Slave_IO_Running', slave_running) -] - -ORDER = ['net', - 'queries', - 'handlers', - 'table_locks', - 'join_issues', 'sort_issues', - 'tmp', - 'connections', 'connection_errors', - 'binlog_cache', 'binlog_stmt_cache', - 'threads', 'thread_cache_misses', - 'innodb_io', 'innodb_io_ops', 'innodb_io_pending_ops', 'innodb_log', 'innodb_os_log', 'innodb_os_log_io', - 'innodb_cur_row_lock', 'innodb_rows', 'innodb_buffer_pool_pages', 'innodb_buffer_pool_bytes', - 'innodb_buffer_pool_read_ahead', 'innodb_buffer_pool_reqs', 'innodb_buffer_pool_ops', - 'qcache_ops', 'qcache', 'qcache_freemem', 'qcache_memblocks', - 'key_blocks', 'key_requests', 'key_disk_ops', - 'files', 'files_rate', 'slave_behind', 'slave_status', - 'galera_writesets', 'galera_bytes', 'galera_queue', 'galera_conflicts', 'galera_flow_control'] - -CHARTS = { - 'net': { - 'options': [None, 'mysql Bandwidth', 'kilobits/s', 'bandwidth', 'mysql.net', 'area'], - 'lines': [ - ['Bytes_received', 'in', 'incremental', 8, 1024], - ['Bytes_sent', 'out', 'incremental', -8, 1024] - ]}, - 'queries': { - 'options': [None, 'mysql Queries', 'queries/s', 'queries', 'mysql.queries', 'line'], - 'lines': [ - ['Queries', 'queries', 'incremental'], - ['Questions', 'questions', 'incremental'], - ['Slow_queries', 'slow_queries', 'incremental'] - ]}, - 'handlers': { - 'options': [None, 'mysql Handlers', 'handlers/s', 'handlers', 'mysql.handlers', 'line'], - 'lines': [ - ['Handler_commit', 'commit', 'incremental'], - ['Handler_delete', 'delete', 'incremental'], - ['Handler_prepare', 'prepare', 'incremental'], - ['Handler_read_first', 'read_first', 'incremental'], - ['Handler_read_key', 'read_key', 'incremental'], - ['Handler_read_next', 'read_next', 'incremental'], - ['Handler_read_prev', 'read_prev', 'incremental'], - ['Handler_read_rnd', 'read_rnd', 'incremental'], - ['Handler_read_rnd_next', 'read_rnd_next', 'incremental'], - ['Handler_rollback', 'rollback', 'incremental'], - ['Handler_savepoint', 'savepoint', 'incremental'], - ['Handler_savepoint_rollback', 'savepoint_rollback', 'incremental'], - ['Handler_update', 'update', 'incremental'], - ['Handler_write', 'write', 'incremental'] - ]}, - 'table_locks': { - 'options': [None, 'mysql Tables Locks', 'locks/s', 'locks', 'mysql.table_locks', 'line'], - 'lines': [ - ['Table_locks_immediate', 'immediate', 'incremental'], - ['Table_locks_waited', 'waited', 'incremental', -1, 1] - ]}, - 'join_issues': { - 'options': [None, 'mysql Select Join Issues', 'joins/s', 'issues', 'mysql.join_issues', 'line'], - 'lines': [ - ['Select_full_join', 'full_join', 'incremental'], - ['Select_full_range_join', 'full_range_join', 'incremental'], - ['Select_range', 'range', 'incremental'], - ['Select_range_check', 'range_check', 'incremental'], - ['Select_scan', 'scan', 'incremental'] - ]}, - 'sort_issues': { - 'options': [None, 'mysql Sort Issues', 'issues/s', 'issues', 'mysql.sort_issues', 'line'], - 'lines': [ - ['Sort_merge_passes', 'merge_passes', 'incremental'], - ['Sort_range', 'range', 'incremental'], - ['Sort_scan', 'scan', 'incremental'] - ]}, - 'tmp': { - 'options': [None, 'mysql Tmp Operations', 'counter', 'temporaries', 'mysql.tmp', 'line'], - 'lines': [ - ['Created_tmp_disk_tables', 'disk_tables', 'incremental'], - ['Created_tmp_files', 'files', 'incremental'], - ['Created_tmp_tables', 'tables', 'incremental'] - ]}, - 'connections': { - 'options': [None, 'mysql Connections', 'connections/s', 'connections', 'mysql.connections', 'line'], - 'lines': [ - ['Connections', 'all', 'incremental'], - ['Aborted_connects', 'aborted', 'incremental'] - ]}, - 'binlog_cache': { - 'options': [None, 'mysql Binlog Cache', 'transactions/s', 'binlog', 'mysql.binlog_cache', 'line'], - 'lines': [ - ['Binlog_cache_disk_use', 'disk', 'incremental'], - ['Binlog_cache_use', 'all', 'incremental'] - ]}, - 'threads': { - 'options': [None, 'mysql Threads', 'threads', 'threads', 'mysql.threads', 'line'], - 'lines': [ - ['Threads_connected', 'connected', 'absolute'], - ['Threads_created', 'created', 'incremental'], - ['Threads_cached', 'cached', 'absolute', -1, 1], - ['Threads_running', 'running', 'absolute'], - ]}, - 'thread_cache_misses': { - 'options': [None, 'mysql Threads Cache Misses', 'misses', 'threads', 'mysql.thread_cache_misses', 'area'], - 'lines': [ - ['Thread_cache_misses', 'misses', 'absolute', 1, 100] - ]}, - 'innodb_io': { - 'options': [None, 'mysql InnoDB I/O Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_io', 'area'], - 'lines': [ - ['Innodb_data_read', 'read', 'incremental', 1, 1024], - ['Innodb_data_written', 'write', 'incremental', -1, 1024] - ]}, - 'innodb_io_ops': { - 'options': [None, 'mysql InnoDB I/O Operations', 'operations/s', 'innodb', 'mysql.innodb_io_ops', 'line'], - 'lines': [ - ['Innodb_data_reads', 'reads', 'incremental'], - ['Innodb_data_writes', 'writes', 'incremental', -1, 1], - ['Innodb_data_fsyncs', 'fsyncs', 'incremental'] - ]}, - 'innodb_io_pending_ops': { - 'options': [None, 'mysql InnoDB Pending I/O Operations', 'operations', 'innodb', - 'mysql.innodb_io_pending_ops', 'line'], - 'lines': [ - ['Innodb_data_pending_reads', 'reads', 'absolute'], - ['Innodb_data_pending_writes', 'writes', 'absolute', -1, 1], - ['Innodb_data_pending_fsyncs', 'fsyncs', 'absolute'] - ]}, - 'innodb_log': { - 'options': [None, 'mysql InnoDB Log Operations', 'operations/s', 'innodb', 'mysql.innodb_log', 'line'], - 'lines': [ - ['Innodb_log_waits', 'waits', 'incremental'], - ['Innodb_log_write_requests', 'write_requests', 'incremental', -1, 1], - ['Innodb_log_writes', 'writes', 'incremental', -1, 1], - ]}, - 'innodb_os_log': { - 'options': [None, 'mysql InnoDB OS Log Operations', 'operations', 'innodb', 'mysql.innodb_os_log', 'line'], - 'lines': [ - ['Innodb_os_log_fsyncs', 'fsyncs', 'incremental'], - ['Innodb_os_log_pending_fsyncs', 'pending_fsyncs', 'absolute'], - ['Innodb_os_log_pending_writes', 'pending_writes', 'absolute', -1, 1], - ]}, - 'innodb_os_log_io': { - 'options': [None, 'mysql InnoDB OS Log Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_os_log_io', 'area'], - 'lines': [ - ['Innodb_os_log_written', 'write', 'incremental', -1, 1024], - ]}, - 'innodb_cur_row_lock': { - 'options': [None, 'mysql InnoDB Current Row Locks', 'operations', 'innodb', - 'mysql.innodb_cur_row_lock', 'area'], - 'lines': [ - ['Innodb_row_lock_current_waits', 'current_waits', 'absolute'] - ]}, - 'innodb_rows': { - 'options': [None, 'mysql InnoDB Row Operations', 'operations/s', 'innodb', 'mysql.innodb_rows', 'area'], - 'lines': [ - ['Innodb_rows_inserted', 'inserted', 'incremental'], - ['Innodb_rows_read', 'read', 'incremental', 1, 1], - ['Innodb_rows_updated', 'updated', 'incremental', 1, 1], - ['Innodb_rows_deleted', 'deleted', 'incremental', -1, 1], - ]}, - 'innodb_buffer_pool_pages': { - 'options': [None, 'mysql InnoDB Buffer Pool Pages', 'pages', 'innodb', - 'mysql.innodb_buffer_pool_pages', 'line'], - 'lines': [ - ['Innodb_buffer_pool_pages_data', 'data', 'absolute'], - ['Innodb_buffer_pool_pages_dirty', 'dirty', 'absolute', -1, 1], - ['Innodb_buffer_pool_pages_free', 'free', 'absolute'], - ['Innodb_buffer_pool_pages_flushed', 'flushed', 'incremental', -1, 1], - ['Innodb_buffer_pool_pages_misc', 'misc', 'absolute', -1, 1], - ['Innodb_buffer_pool_pages_total', 'total', 'absolute'] - ]}, - 'innodb_buffer_pool_bytes': { - 'options': [None, 'mysql InnoDB Buffer Pool Bytes', 'MB', 'innodb', 'mysql.innodb_buffer_pool_bytes', 'area'], - 'lines': [ - ['Innodb_buffer_pool_bytes_data', 'data', 'absolute', 1, 1024 * 1024], - ['Innodb_buffer_pool_bytes_dirty', 'dirty', 'absolute', -1, 1024 * 1024] - ]}, - 'innodb_buffer_pool_read_ahead': { - 'options': [None, 'mysql InnoDB Buffer Pool Read Ahead', 'operations/s', 'innodb', - 'mysql.innodb_buffer_pool_read_ahead', 'area'], - 'lines': [ - ['Innodb_buffer_pool_read_ahead', 'all', 'incremental'], - ['Innodb_buffer_pool_read_ahead_evicted', 'evicted', 'incremental', -1, 1], - ['Innodb_buffer_pool_read_ahead_rnd', 'random', 'incremental'] - ]}, - 'innodb_buffer_pool_reqs': { - 'options': [None, 'mysql InnoDB Buffer Pool Requests', 'requests/s', 'innodb', - 'mysql.innodb_buffer_pool_reqs', 'area'], - 'lines': [ - ['Innodb_buffer_pool_read_requests', 'reads', 'incremental'], - ['Innodb_buffer_pool_write_requests', 'writes', 'incremental', -1, 1] - ]}, - 'innodb_buffer_pool_ops': { - 'options': [None, 'mysql InnoDB Buffer Pool Operations', 'operations/s', 'innodb', - 'mysql.innodb_buffer_pool_ops', 'area'], - 'lines': [ - ['Innodb_buffer_pool_reads', 'disk reads', 'incremental'], - ['Innodb_buffer_pool_wait_free', 'wait free', 'incremental', -1, 1] - ]}, - 'qcache_ops': { - 'options': [None, 'mysql QCache Operations', 'queries/s', 'qcache', 'mysql.qcache_ops', 'line'], - 'lines': [ - ['Qcache_hits', 'hits', 'incremental'], - ['Qcache_lowmem_prunes', 'lowmem prunes', 'incremental', -1, 1], - ['Qcache_inserts', 'inserts', 'incremental'], - ['Qcache_not_cached', 'not cached', 'incremental', -1, 1] - ]}, - 'qcache': { - 'options': [None, 'mysql QCache Queries in Cache', 'queries', 'qcache', 'mysql.qcache', 'line'], - 'lines': [ - ['Qcache_queries_in_cache', 'queries', 'absolute'] - ]}, - 'qcache_freemem': { - 'options': [None, 'mysql QCache Free Memory', 'MB', 'qcache', 'mysql.qcache_freemem', 'area'], - 'lines': [ - ['Qcache_free_memory', 'free', 'absolute', 1, 1024 * 1024] - ]}, - 'qcache_memblocks': { - 'options': [None, 'mysql QCache Memory Blocks', 'blocks', 'qcache', 'mysql.qcache_memblocks', 'line'], - 'lines': [ - ['Qcache_free_blocks', 'free', 'absolute'], - ['Qcache_total_blocks', 'total', 'absolute'] - ]}, - 'key_blocks': { - 'options': [None, 'mysql MyISAM Key Cache Blocks', 'blocks', 'myisam', 'mysql.key_blocks', 'line'], - 'lines': [ - ['Key_blocks_unused', 'unused', 'absolute'], - ['Key_blocks_used', 'used', 'absolute', -1, 1], - ['Key_blocks_not_flushed', 'not flushed', 'absolute'] - ]}, - 'key_requests': { - 'options': [None, 'mysql MyISAM Key Cache Requests', 'requests/s', 'myisam', 'mysql.key_requests', 'area'], - 'lines': [ - ['Key_read_requests', 'reads', 'incremental'], - ['Key_write_requests', 'writes', 'incremental', -1, 1] - ]}, - 'key_disk_ops': { - 'options': [None, 'mysql MyISAM Key Cache Disk Operations', 'operations/s', - 'myisam', 'mysql.key_disk_ops', 'area'], - 'lines': [ - ['Key_reads', 'reads', 'incremental'], - ['Key_writes', 'writes', 'incremental', -1, 1] - ]}, - 'files': { - 'options': [None, 'mysql Open Files', 'files', 'files', 'mysql.files', 'line'], - 'lines': [ - ['Open_files', 'files', 'absolute'] - ]}, - 'files_rate': { - 'options': [None, 'mysql Opened Files Rate', 'files/s', 'files', 'mysql.files_rate', 'line'], - 'lines': [ - ['Opened_files', 'files', 'incremental'] - ]}, - 'binlog_stmt_cache': { - 'options': [None, 'mysql Binlog Statement Cache', 'statements/s', 'binlog', - 'mysql.binlog_stmt_cache', 'line'], - 'lines': [ - ['Binlog_stmt_cache_disk_use', 'disk', 'incremental'], - ['Binlog_stmt_cache_use', 'all', 'incremental'] - ]}, - 'connection_errors': { - 'options': [None, 'mysql Connection Errors', 'connections/s', 'connections', - 'mysql.connection_errors', 'line'], - 'lines': [ - ['Connection_errors_accept', 'accept', 'incremental'], - ['Connection_errors_internal', 'internal', 'incremental'], - ['Connection_errors_max_connections', 'max', 'incremental'], - ['Connection_errors_peer_address', 'peer_addr', 'incremental'], - ['Connection_errors_select', 'select', 'incremental'], - ['Connection_errors_tcpwrap', 'tcpwrap', 'incremental'] - ]}, - 'slave_behind': { - 'options': [None, 'Slave Behind Seconds', 'seconds', 'slave', 'mysql.slave_behind', 'line'], - 'lines': [ - ['Seconds_Behind_Master', 'seconds', 'absolute'] - ]}, - 'slave_status': { - 'options': [None, 'Slave Status', 'status', 'slave', 'mysql.slave_status', 'line'], - 'lines': [ - ['Slave_SQL_Running', 'sql_running', 'absolute'], - ['Slave_IO_Running', 'io_running', 'absolute'] - ]}, - 'galera_writesets': { - 'options': [None, 'Replicated writesets', 'writesets/s', 'galera', 'mysql.galera_writesets', 'line'], - 'lines': [ - ['wsrep_received', 'rx', 'incremental'], - ['wsrep_replicated', 'tx', 'incremental', -1, 1], - ]}, - 'galera_bytes': { - 'options': [None, 'Replicated bytes', 'KB/s', 'galera', 'mysql.galera_bytes', 'area'], - 'lines': [ - ['wsrep_received_bytes', 'rx', 'incremental', 1, 1024], - ['wsrep_replicated_bytes', 'tx', 'incremental', -1, 1024], - ]}, - 'galera_queue': { - 'options': [None, 'Galera queue', 'writesets', 'galera', 'mysql.galera_queue', 'line'], - 'lines': [ - ['wsrep_local_recv_queue', 'rx', 'absolute'], - ['wsrep_local_send_queue', 'tx', 'absolute', -1, 1], - ]}, - 'galera_conflicts': { - 'options': [None, 'Replication conflicts', 'transactions', 'galera', 'mysql.galera_conflicts', 'area'], - 'lines': [ - ['wsrep_local_bf_aborts', 'bf_aborts', 'incremental'], - ['wsrep_local_cert_failures', 'cert_fails', 'incremental', -1, 1], - ]}, - 'galera_flow_control': { - 'options': [None, 'Flow control', 'millisec', 'galera', 'mysql.galera_flow_control', 'area'], - 'lines': [ - ['wsrep_flow_control_paused_ns', 'paused', 'incremental', 1, 1000000], - ]} -} - - -class Service(MySQLService): - def __init__(self, configuration=None, name=None): - MySQLService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.queries = dict(global_status=QUERY_GLOBAL, slave_status=QUERY_SLAVE) - - def _get_data(self): - - raw_data = self._get_raw_data(description=True) - - if not raw_data: - return None - - to_netdata = dict() - - if 'global_status' in raw_data: - global_status = dict(raw_data['global_status'][0]) - for key in GLOBAL_STATS: - if key in global_status: - to_netdata[key] = global_status[key] - if 'Threads_created' in to_netdata and 'Connections' in to_netdata: - to_netdata['Thread_cache_misses'] = round(int(to_netdata['Threads_created']) - / float(to_netdata['Connections']) * 10000) - - if 'slave_status' in raw_data: - if raw_data['slave_status'][0]: - slave_raw_data = dict(zip([e[0] for e in raw_data['slave_status'][1]], raw_data['slave_status'][0][0])) - for key, func in SLAVE_STATS: - if key in slave_raw_data: - to_netdata[key] = func(slave_raw_data[key]) - else: - self.queries.pop('slave_status') - - return to_netdata or None - diff --git a/python.d/nginx.chart.py b/python.d/nginx.chart.py deleted file mode 100644 index 2e4f0d1b5..000000000 --- a/python.d/nginx.chart.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: nginx netdata python.d module -# Author: Pawel Krupa (paulfantom) - -from bases.FrameworkServices.UrlService import UrlService - -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 - -# default job configuration (overridden by python.d.plugin) -# config = {'local': { -# 'update_every': update_every, -# 'retries': retries, -# 'priority': priority, -# 'url': 'http://localhost/stub_status' -# }} - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['connections', 'requests', 'connection_status', 'connect_rate'] - -CHARTS = { - 'connections': { - 'options': [None, 'nginx Active Connections', 'connections', 'active connections', - 'nginx.connections', 'line'], - 'lines': [ - ["active"] - ]}, - 'requests': { - 'options': [None, 'nginx Requests', 'requests/s', 'requests', 'nginx.requests', 'line'], - 'lines': [ - ["requests", None, 'incremental'] - ]}, - 'connection_status': { - 'options': [None, 'nginx Active Connections by Status', 'connections', 'status', - 'nginx.connection_status', 'line'], - 'lines': [ - ["reading"], - ["writing"], - ["waiting", "idle"] - ]}, - 'connect_rate': { - 'options': [None, 'nginx Connections Rate', 'connections/s', 'connections rate', - 'nginx.connect_rate', 'line'], - 'lines': [ - ["accepts", "accepted", "incremental"], - ["handled", None, "incremental"] - ]} -} - - -class Service(UrlService): - def __init__(self, configuration=None, name=None): - UrlService.__init__(self, configuration=configuration, name=name) - self.url = self.configuration.get('url', 'http://localhost/stub_status') - self.order = ORDER - self.definitions = CHARTS - - def _get_data(self): - """ - Format data received from http request - :return: dict - """ - try: - raw = self._get_raw_data().split(" ") - return {'active': int(raw[2]), - 'requests': int(raw[9]), - 'reading': int(raw[11]), - 'writing': int(raw[13]), - 'waiting': int(raw[15]), - 'accepts': int(raw[7]), - 'handled': int(raw[8])} - except (ValueError, AttributeError): - return None diff --git a/python.d/nginx_plus.chart.py b/python.d/nginx_plus.chart.py deleted file mode 100644 index 509ddd380..000000000 --- a/python.d/nginx_plus.chart.py +++ /dev/null @@ -1,491 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: nginx_plus netdata python.d module -# Author: Ilya Mashchenko (l2isbad) - -import re - -from collections import defaultdict -from copy import deepcopy -from json import loads - -try: - from collections import OrderedDict -except ImportError: - from third_party.ordereddict import OrderedDict - -from bases.FrameworkServices.UrlService import UrlService - -# default module values (can be overridden per job in `config`) -update_every = 1 -priority = 60000 -retries = 60 - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['requests_total', 'requests_current', - 'connections_statistics', 'connections_workers', - 'ssl_handshakes', 'ssl_session_reuses', 'ssl_memory_usage', - 'processes'] - -CHARTS = { - 'requests_total': { - 'options': [None, 'Requests Total', 'requests/s', - 'requests', 'nginx_plus.requests_total', 'line'], - 'lines': [ - ['requests_total', 'total', 'incremental'] - ]}, - 'requests_current': { - 'options': [None, 'Requests Current', 'requests', - 'requests', 'nginx_plus.requests_current', 'line'], - 'lines': [ - ['requests_current', 'current'] - ]}, - 'connections_statistics': { - 'options': [None, 'Connections Statistics', 'connections/s', - 'connections', 'nginx_plus.connections_statistics', 'stacked'], - 'lines': [ - ['connections_accepted', 'accepted', 'incremental'], - ['connections_dropped', 'dropped', 'incremental'] - ]}, - 'connections_workers': { - 'options': [None, 'Workers Statistics', 'workers', - 'connections', 'nginx_plus.connections_workers', 'stacked'], - 'lines': [ - ['connections_idle', 'idle'], - ['connections_active', 'active'] - ]}, - 'ssl_handshakes': { - 'options': [None, 'SSL Handshakes', 'handshakes/s', - 'ssl', 'nginx_plus.ssl_handshakes', 'stacked'], - 'lines': [ - ['ssl_handshakes', 'successful', 'incremental'], - ['ssl_handshakes_failed', 'failed', 'incremental'] - ]}, - 'ssl_session_reuses': { - 'options': [None, 'Session Reuses', 'sessions/s', - 'ssl', 'nginx_plus.ssl_session_reuses', 'line'], - 'lines': [ - ['ssl_session_reuses', 'reused', 'incremental'] - ]}, - 'ssl_memory_usage': { - 'options': [None, 'Memory Usage', '%', - 'ssl', 'nginx_plus.ssl_memory_usage', 'area'], - 'lines': [ - ['ssl_memory_usage', 'usage', 'absolute', 1, 100] - ]}, - 'processes': { - 'options': [None, 'Processes', 'processes', - 'processes', 'nginx_plus.processes', 'line'], - 'lines': [ - ['processes_respawned', 'respawned'] - ]} -} - - -def cache_charts(cache): - family = 'cache {0}'.format(cache.real_name) - charts = OrderedDict() - - charts['{0}_traffic'.format(cache.name)] = { - 'options': [None, 'Traffic', 'KB', family, - 'nginx_plus.cache_traffic', 'stacked'], - 'lines': [ - ['_'.join([cache.name, 'hit_bytes']), 'served', 'absolute', 1, 1024], - ['_'.join([cache.name, 'miss_bytes_written']), 'written', 'absolute', 1, 1024], - ['_'.join([cache.name, 'miss_bytes']), 'bypass', 'absolute', 1, 1024] - ] - } - charts['{0}_memory_usage'.format(cache.name)] = { - 'options': [None, 'Memory Usage', '%', family, - 'nginx_plus.cache_memory_usage', 'area'], - 'lines': [ - ['_'.join([cache.name, 'memory_usage']), 'usage', 'absolute', 1, 100], - ] - } - return charts - - -def web_zone_charts(wz): - charts = OrderedDict() - family = 'web zone {name}'.format(name=wz.real_name) - - # Processing - charts['zone_{name}_processing'.format(name=wz.name)] = { - 'options': [None, 'Zone "{name}" Processing'.format(name=wz.name), 'requests', family, - 'nginx_plus.web_zone_processing', 'line'], - 'lines': [ - ['_'.join([wz.name, 'processing']), 'processing'] - ] - } - # Requests - charts['zone_{name}_requests'.format(name=wz.name)] = { - 'options': [None, 'Zone "{name}" Requests'.format(name=wz.name), 'requests/s', family, - 'nginx_plus.web_zone_requests', 'line'], - 'lines': [ - ['_'.join([wz.name, 'requests']), 'requests', 'incremental'] - ] - } - # Response Codes - charts['zone_{name}_responses'.format(name=wz.name)] = { - 'options': [None, 'Zone "{name}" Responses'.format(name=wz.name), 'requests/s', family, - 'nginx_plus.web_zone_responses', 'stacked'], - 'lines': [ - ['_'.join([wz.name, 'responses_2xx']), '2xx', 'incremental'], - ['_'.join([wz.name, 'responses_5xx']), '5xx', 'incremental'], - ['_'.join([wz.name, 'responses_3xx']), '3xx', 'incremental'], - ['_'.join([wz.name, 'responses_4xx']), '4xx', 'incremental'], - ['_'.join([wz.name, 'responses_1xx']), '1xx', 'incremental'] - ] - } - # Traffic - charts['zone_{name}_net'.format(name=wz.name)] = { - 'options': [None, 'Zone "{name}" Traffic'.format(name=wz.name), 'kilobits/s', family, - 'nginx_plus.zone_net', 'area'], - 'lines': [ - ['_'.join([wz.name, 'received']), 'received', 'incremental', 1, 1000], - ['_'.join([wz.name, 'sent']), 'sent', 'incremental', -1, 1000] - ] - } - return charts - - -def web_upstream_charts(wu): - def dimensions(value, a='absolute', m=1, d=1): - dims = list() - for p in wu: - dims.append(['_'.join([wu.name, p.server, value]), p.real_server, a, m, d]) - return dims - - charts = OrderedDict() - family = 'web upstream {name}'.format(name=wu.real_name) - - # Requests - charts['web_upstream_{name}_requests'.format(name=wu.name)] = { - 'options': [None, 'Peers Requests', 'requests/s', family, - 'nginx_plus.web_upstream_requests', 'line'], - 'lines': dimensions('requests', 'incremental') - } - # Responses Codes - charts['web_upstream_{name}_all_responses'.format(name=wu.name)] = { - 'options': [None, 'All Peers Responses', 'responses/s', family, - 'nginx_plus.web_upstream_all_responses', 'stacked'], - 'lines': [ - ['_'.join([wu.name, 'responses_2xx']), '2xx', 'incremental'], - ['_'.join([wu.name, 'responses_5xx']), '5xx', 'incremental'], - ['_'.join([wu.name, 'responses_3xx']), '3xx', 'incremental'], - ['_'.join([wu.name, 'responses_4xx']), '4xx', 'incremental'], - ['_'.join([wu.name, 'responses_1xx']), '1xx', 'incremental'], - ] - } - for peer in wu: - charts['web_upstream_{0}_{1}_responses'.format(wu.name, peer.id)] = { - 'options': [None, 'Peer "{0}" Responses'.format(peer.real_server), 'responses/s', family, - 'nginx_plus.web_upstream_peer_responses', 'stacked'], - 'lines': [ - ['_'.join([wu.name, peer.server, 'responses_2xx']), '2xx', 'incremental'], - ['_'.join([wu.name, peer.server, 'responses_5xx']), '5xx', 'incremental'], - ['_'.join([wu.name, peer.server, 'responses_3xx']), '3xx', 'incremental'], - ['_'.join([wu.name, peer.server, 'responses_4xx']), '4xx', 'incremental'], - ['_'.join([wu.name, peer.server, 'responses_1xx']), '1xx', 'incremental'] - ] - } - # Connections - charts['web_upstream_{name}_connections'.format(name=wu.name)] = { - 'options': [None, 'Peers Connections', 'active', family, - 'nginx_plus.web_upstream_connections', 'line'], - 'lines': dimensions('active') - } - charts['web_upstream_{name}_connections_usage'.format(name=wu.name)] = { - 'options': [None, 'Peers Connections Usage', '%', family, - 'nginx_plus.web_upstream_connections_usage', 'line'], - 'lines': dimensions('connections_usage', d=100) - } - # Traffic - charts['web_upstream_{0}_all_net'.format(wu.name)] = { - 'options': [None, 'All Peers Traffic', 'kilobits/s', family, - 'nginx_plus.web_upstream_all_net', 'area'], - 'lines': [ - ['{0}_received'.format(wu.name), 'received', 'incremental', 1, 1000], - ['{0}_sent'.format(wu.name), 'sent', 'incremental', -1, 1000] - ] - } - for peer in wu: - charts['web_upstream_{0}_{1}_net'.format(wu.name, peer.id)] = { - 'options': [None, 'Peer "{0}" Traffic'.format(peer.real_server), 'kilobits/s', family, - 'nginx_plus.web_upstream_peer_traffic', 'area'], - 'lines': [ - ['{0}_{1}_received'.format(wu.name, peer.server), 'received', 'incremental', 1, 1000], - ['{0}_{1}_sent'.format(wu.name, peer.server), 'sent', 'incremental', -1, 1000] - ] - } - # Response Time - for peer in wu: - charts['web_upstream_{0}_{1}_timings'.format(wu.name, peer.id)] = { - 'options': [None, 'Peer "{0}" Timings'.format(peer.real_server), 'ms', family, - 'nginx_plus.web_upstream_peer_timings', 'line'], - 'lines': [ - ['_'.join([wu.name, peer.server, 'header_time']), 'header'], - ['_'.join([wu.name, peer.server, 'response_time']), 'response'] - ] - } - # Memory Usage - charts['web_upstream_{name}_memory_usage'.format(name=wu.name)] = { - 'options': [None, 'Memory Usage', '%', family, - 'nginx_plus.web_upstream_memory_usage', 'area'], - 'lines': [ - ['_'.join([wu.name, 'memory_usage']), 'usage', 'absolute', 1, 100] - ] - } - # State - charts['web_upstream_{name}_status'.format(name=wu.name)] = { - 'options': [None, 'Peers Status', 'state', family, - 'nginx_plus.web_upstream_status', 'line'], - 'lines': dimensions('state') - } - # Downtime - charts['web_upstream_{name}_downtime'.format(name=wu.name)] = { - 'options': [None, 'Peers Downtime', 'seconds', family, - 'nginx_plus.web_upstream_peer_downtime', 'line'], - 'lines': dimensions('downtime', d=1000) - } - - return charts - - -METRICS = dict( - SERVER=[ - 'processes.respawned', - 'connections.accepted', - 'connections.dropped', - 'connections.active', - 'connections.idle', - 'ssl.handshakes', - 'ssl.handshakes_failed', - 'ssl.session_reuses', - 'requests.total', - 'requests.current', - 'slabs.SSL.pages.free', - 'slabs.SSL.pages.used' - ], - WEB_ZONE=[ - 'processing', - 'requests', - 'responses.1xx', - 'responses.2xx', - 'responses.3xx', - 'responses.4xx', - 'responses.5xx', - 'discarded', - 'received', - 'sent' - ], - WEB_UPSTREAM_PEER=[ - 'id', - 'server', - 'name', - 'state', - 'active', - 'max_conns', - 'requests', - 'header_time', # alive only - 'response_time', # alive only - 'responses.1xx', - 'responses.2xx', - 'responses.3xx', - 'responses.4xx', - 'responses.5xx', - 'sent', - 'received', - 'downtime' - ], - WEB_UPSTREAM_SUMMARY=[ - 'responses.1xx', - 'responses.2xx', - 'responses.3xx', - 'responses.4xx', - 'responses.5xx', - 'sent', - 'received' - ], - CACHE=[ - 'hit.bytes', # served - 'miss.bytes_written', # written - 'miss.bytes' # bypass - - ] -) - -BAD_SYMBOLS = re.compile(r'[:/.-]+') - - -class Cache: - key = 'caches' - charts = cache_charts - - def __init__(self, **kw): - self.real_name = kw['name'] - self.name = BAD_SYMBOLS.sub('_', self.real_name) - - def memory_usage(self, data): - used = data['slabs'][self.real_name]['pages']['used'] - free = data['slabs'][self.real_name]['pages']['free'] - return used / float(free + used) * 1e4 - - def get_data(self, raw_data): - zone_data = raw_data['caches'][self.real_name] - data = parse_json(zone_data, METRICS['CACHE']) - data['memory_usage'] = self.memory_usage(raw_data) - return dict(('_'.join([self.name, k]), v) for k, v in data.items()) - - -class WebZone: - key = 'server_zones' - charts = web_zone_charts - - def __init__(self, **kw): - self.real_name = kw['name'] - self.name = BAD_SYMBOLS.sub('_', self.real_name) - - def get_data(self, raw_data): - zone_data = raw_data['server_zones'][self.real_name] - data = parse_json(zone_data, METRICS['WEB_ZONE']) - return dict(('_'.join([self.name, k]), v) for k, v in data.items()) - - -class WebUpstream: - key = 'upstreams' - charts = web_upstream_charts - - def __init__(self, **kw): - self.real_name = kw['name'] - self.name = BAD_SYMBOLS.sub('_', self.real_name) - self.peers = OrderedDict() - - peers = kw['response']['upstreams'][self.real_name]['peers'] - for peer in peers: - self.add_peer(peer['id'], peer['server']) - - def __iter__(self): - return iter(self.peers.values()) - - def add_peer(self, idx, server): - peer = WebUpstreamPeer(idx, server) - self.peers[peer.real_server] = peer - return peer - - def peers_stats(self, peers): - data = dict() - for peer in self.peers.values(): - if not peer.active: - continue - try: - data.update(peer.get_data(peers[peer.id])) - except KeyError: - peer.active = False - return data - - def memory_usage(self, data): - used = data['slabs'][self.real_name]['pages']['used'] - free = data['slabs'][self.real_name]['pages']['free'] - return used / float(free + used) * 1e4 - - def summary_stats(self, data): - rv = defaultdict(int) - for metric in METRICS['WEB_UPSTREAM_SUMMARY']: - for peer in self.peers.values(): - if peer.active: - metric = '_'.join(metric.split('.')) - rv[metric] += data['_'.join([peer.server, metric])] - return rv - - def get_data(self, raw_data): - data = dict() - peers = raw_data['upstreams'][self.real_name]['peers'] - data.update(self.peers_stats(peers)) - data.update(self.summary_stats(data)) - data['memory_usage'] = self.memory_usage(raw_data) - return dict(('_'.join([self.name, k]), v) for k, v in data.items()) - - -class WebUpstreamPeer: - def __init__(self, idx, server): - self.id = idx - self.real_server = server - self.server = BAD_SYMBOLS.sub('_', self.real_server) - self.active = True - - def get_data(self, raw): - data = dict(header_time=0, response_time=0, max_conns=0) - data.update(parse_json(raw, METRICS['WEB_UPSTREAM_PEER'])) - data['connections_usage'] = 0 if not data['max_conns'] else data['active'] / float(data['max_conns']) * 1e4 - data['state'] = int(data['state'] == 'up') - return dict(('_'.join([self.server, k]), v) for k, v in data.items()) - - -class Service(UrlService): - def __init__(self, configuration=None, name=None): - UrlService.__init__(self, configuration=configuration, name=name) - self.order = list(ORDER) - self.definitions = deepcopy(CHARTS) - self.objects = dict() - - def check(self): - if not self.url: - self.error('URL is not defined') - return None - - self._manager = self._build_manager() - if not self._manager: - return None - - raw_data = self._get_raw_data() - if not raw_data: - return None - - try: - response = loads(raw_data) - except ValueError: - return None - - for obj_cls in [WebZone, WebUpstream, Cache]: - for obj_name in response.get(obj_cls.key, list()): - obj = obj_cls(name=obj_name, response=response) - self.objects[obj.real_name] = obj - charts = obj_cls.charts(obj) - for chart in charts: - self.order.append(chart) - self.definitions[chart] = charts[chart] - - return bool(self.objects) - - def _get_data(self): - """ - Format data received from http request - :return: dict - """ - raw_data = self._get_raw_data() - if not raw_data: - return None - response = loads(raw_data) - - data = parse_json(response, METRICS['SERVER']) - data['ssl_memory_usage'] = data['slabs_SSL_pages_used'] / float(data['slabs_SSL_pages_free']) * 1e4 - - for obj in self.objects.values(): - if obj.real_name in response[obj.key]: - data.update(obj.get_data(response)) - - return data - - -def parse_json(raw_data, metrics): - data = dict() - for metric in metrics: - value = raw_data - metrics_list = metric.split('.') - try: - for m in metrics_list: - value = value[m] - except KeyError: - continue - data['_'.join(metrics_list)] = value - return data diff --git a/python.d/nsd.chart.py b/python.d/nsd.chart.py deleted file mode 100644 index 499dfda2e..000000000 --- a/python.d/nsd.chart.py +++ /dev/null @@ -1,93 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: NSD `nsd-control stats_noreset` netdata python.d module -# Author: <383c57 at gmail.com> - -import re - -from bases.FrameworkServices.ExecutableService import ExecutableService - -# default module values (can be overridden per job in `config`) -priority = 60000 -retries = 5 -update_every = 30 - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['queries', 'zones', 'protocol', 'type', 'transfer', 'rcode'] - -CHARTS = { - 'queries': { - 'options': [ - None, "queries", 'queries/s', 'queries', 'nsd.queries', 'line'], - 'lines': [ - ['num_queries', 'queries', 'incremental'],]}, - 'zones': { - 'options': [ - None, "zones", 'zones', 'zones', 'nsd.zones', 'stacked'], - 'lines': [ - ['zone_master', 'master', 'absolute'], - ['zone_slave', 'slave', 'absolute'],]}, - 'protocol': { - 'options': [ - None, "protocol", 'queries/s', 'protocol', 'nsd.protocols', 'stacked'], - 'lines': [ - ['num_udp', 'udp', 'incremental'], - ['num_udp6', 'udp6', 'incremental'], - ['num_tcp', 'tcp', 'incremental'], - ['num_tcp6', 'tcp6', 'incremental'],]}, - 'type': { - 'options': [ - None, "query type", 'queries/s', 'query type', 'nsd.type', 'stacked'], - 'lines': [ - ['num_type_A', 'A', 'incremental'], - ['num_type_NS', 'NS', 'incremental'], - ['num_type_CNAME', 'CNAME', 'incremental'], - ['num_type_SOA', 'SOA', 'incremental'], - ['num_type_PTR', 'PTR', 'incremental'], - ['num_type_HINFO', 'HINFO', 'incremental'], - ['num_type_MX', 'MX', 'incremental'], - ['num_type_NAPTR', 'NAPTR', 'incremental'], - ['num_type_TXT', 'TXT', 'incremental'], - ['num_type_AAAA', 'AAAA', 'incremental'], - ['num_type_SRV', 'SRV', 'incremental'], - ['num_type_TYPE255', 'ANY', 'incremental'],]}, - 'transfer': { - 'options': [ - None, "transfer", 'queries/s', 'transfer', 'nsd.transfer', 'stacked'], - 'lines': [ - ['num_opcode_NOTIFY', 'NOTIFY', 'incremental'], - ['num_type_TYPE252', 'AXFR', 'incremental'],]}, - 'rcode': { - 'options': [ - None, "return code", 'queries/s', 'return code', 'nsd.rcode', 'stacked'], - 'lines': [ - ['num_rcode_NOERROR', 'NOERROR', 'incremental'], - ['num_rcode_FORMERR', 'FORMERR', 'incremental'], - ['num_rcode_SERVFAIL', 'SERVFAIL', 'incremental'], - ['num_rcode_NXDOMAIN', 'NXDOMAIN', 'incremental'], - ['num_rcode_NOTIMP', 'NOTIMP', 'incremental'], - ['num_rcode_REFUSED', 'REFUSED', 'incremental'], - ['num_rcode_YXDOMAIN', 'YXDOMAIN', 'incremental'],]} -} - - -class Service(ExecutableService): - def __init__(self, configuration=None, name=None): - ExecutableService.__init__( - self, configuration=configuration, name=name) - self.command = "nsd-control stats_noreset" - self.order = ORDER - self.definitions = CHARTS - self.regex = re.compile(r'([A-Za-z0-9.]+)=(\d+)') - - def _get_data(self): - lines = self._get_raw_data() - if not lines: - return None - - r = self.regex - stats = dict((k.replace('.', '_'), int(v)) - for k, v in r.findall(''.join(lines))) - stats.setdefault('num_opcode_NOTIFY', 0) - stats.setdefault('num_type_TYPE252', 0) - stats.setdefault('num_type_TYPE255', 0) - return stats diff --git a/python.d/ntpd.chart.py b/python.d/ntpd.chart.py deleted file mode 100644 index 05209da87..000000000 --- a/python.d/ntpd.chart.py +++ /dev/null @@ -1,380 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: ntpd netdata python.d module -# Author: Sven Mäder (rda0) -# Author: Ilya Mashchenko (l2isbad) - -import struct -import re - -from bases.FrameworkServices.SocketService import SocketService - -# default module values -update_every = 1 -priority = 60000 -retries = 60 - -# NTP Control Message Protocol constants -MODE = 6 -HEADER_FORMAT = '!BBHHHHH' -HEADER_LEN = 12 -OPCODES = { - 'readstat': 1, - 'readvar': 2 -} - -# Maximal dimension precision -PRECISION = 1000000 - -# Static charts -ORDER = [ - 'sys_offset', - 'sys_jitter', - 'sys_frequency', - 'sys_wander', - 'sys_rootdelay', - 'sys_rootdisp', - 'sys_stratum', - 'sys_tc', - 'sys_precision', - 'peer_offset', - 'peer_delay', - 'peer_dispersion', - 'peer_jitter', - 'peer_xleave', - 'peer_rootdelay', - 'peer_rootdisp', - 'peer_stratum', - 'peer_hmode', - 'peer_pmode', - 'peer_hpoll', - 'peer_ppoll', - 'peer_precision' -] - -CHARTS = { - 'sys_offset': { - 'options': [None, 'Combined offset of server relative to this host', 'ms', 'system', 'ntpd.sys_offset', 'area'], - 'lines': [ - ['offset', 'offset', 'absolute', 1, PRECISION] - ]}, - 'sys_jitter': { - 'options': [None, 'Combined system jitter and clock jitter', 'ms', 'system', 'ntpd.sys_jitter', 'line'], - 'lines': [ - ['sys_jitter', 'system', 'absolute', 1, PRECISION], - ['clk_jitter', 'clock', 'absolute', 1, PRECISION] - ]}, - 'sys_frequency': { - 'options': [None, 'Frequency offset relative to hardware clock', 'ppm', 'system', 'ntpd.sys_frequency', 'area'], - 'lines': [ - ['frequency', 'frequency', 'absolute', 1, PRECISION] - ]}, - 'sys_wander': { - 'options': [None, 'Clock frequency wander', 'ppm', 'system', 'ntpd.sys_wander', 'area'], - 'lines': [ - ['clk_wander', 'clock', 'absolute', 1, PRECISION] - ]}, - 'sys_rootdelay': { - 'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'system', - 'ntpd.sys_rootdelay', 'area'], - 'lines': [ - ['rootdelay', 'delay', 'absolute', 1, PRECISION] - ]}, - 'sys_rootdisp': { - 'options': [None, 'Total root dispersion to the primary reference clock', 'ms', 'system', - 'ntpd.sys_rootdisp', 'area'], - 'lines': [ - ['rootdisp', 'dispersion', 'absolute', 1, PRECISION] - ]}, - 'sys_stratum': { - 'options': [None, 'Stratum (1-15)', 'stratum', 'system', 'ntpd.sys_stratum', 'line'], - 'lines': [ - ['stratum', 'stratum', 'absolute', 1, PRECISION] - ]}, - 'sys_tc': { - 'options': [None, 'Time constant and poll exponent (3-17)', 'log2 s', 'system', 'ntpd.sys_tc', 'line'], - 'lines': [ - ['tc', 'current', 'absolute', 1, PRECISION], - ['mintc', 'minimum', 'absolute', 1, PRECISION] - ]}, - 'sys_precision': { - 'options': [None, 'Precision', 'log2 s', 'system', 'ntpd.sys_precision', 'line'], - 'lines': [ - ['precision', 'precision', 'absolute', 1, PRECISION] - ]} -} - -PEER_CHARTS = { - 'peer_offset': { - 'options': [None, 'Filter offset', 'ms', 'peers', 'ntpd.peer_offset', 'line'], - 'lines': [ - ]}, - 'peer_delay': { - 'options': [None, 'Filter delay', 'ms', 'peers', 'ntpd.peer_delay', 'line'], - 'lines': [ - ]}, - 'peer_dispersion': { - 'options': [None, 'Filter dispersion', 'ms', 'peers', 'ntpd.peer_dispersion', 'line'], - 'lines': [ - ]}, - 'peer_jitter': { - 'options': [None, 'Filter jitter', 'ms', 'peers', 'ntpd.peer_jitter', 'line'], - 'lines': [ - ]}, - 'peer_xleave': { - 'options': [None, 'Interleave delay', 'ms', 'peers', 'ntpd.peer_xleave', 'line'], - 'lines': [ - ]}, - 'peer_rootdelay': { - 'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'peers', - 'ntpd.peer_rootdelay', 'line'], - 'lines': [ - ]}, - 'peer_rootdisp': { - 'options': [None, 'Total root dispersion to the primary reference clock', 'ms', 'peers', - 'ntpd.peer_rootdisp', 'line'], - 'lines': [ - ]}, - 'peer_stratum': { - 'options': [None, 'Stratum (1-15)', 'stratum', 'peers', 'ntpd.peer_stratum', 'line'], - 'lines': [ - ]}, - 'peer_hmode': { - 'options': [None, 'Host mode (1-6)', 'hmode', 'peers', 'ntpd.peer_hmode', 'line'], - 'lines': [ - ]}, - 'peer_pmode': { - 'options': [None, 'Peer mode (1-5)', 'pmode', 'peers', 'ntpd.peer_pmode', 'line'], - 'lines': [ - ]}, - 'peer_hpoll': { - 'options': [None, 'Host poll exponent', 'log2 s', 'peers', 'ntpd.peer_hpoll', 'line'], - 'lines': [ - ]}, - 'peer_ppoll': { - 'options': [None, 'Peer poll exponent', 'log2 s', 'peers', 'ntpd.peer_ppoll', 'line'], - 'lines': [ - ]}, - 'peer_precision': { - 'options': [None, 'Precision', 'log2 s', 'peers', 'ntpd.peer_precision', 'line'], - 'lines': [ - ]} -} - - -class Base: - regex = re.compile(r'([a-z_]+)=((?:-)?[0-9]+(?:\.[0-9]+)?)') - - @staticmethod - def get_header(associd=0, operation='readvar'): - """ - Constructs the NTP Control Message header: - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - |LI | VN |Mode |R|E|M| OpCode | Sequence Number | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Status | Association ID | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Offset | Count | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - """ - version = 2 - sequence = 1 - status = 0 - offset = 0 - count = 0 - header = struct.pack(HEADER_FORMAT, (version << 3 | MODE), OPCODES[operation], - sequence, status, associd, offset, count) - return header - - -class System(Base): - def __init__(self): - self.request = self.get_header() - - def get_data(self, raw): - """ - Extracts key=value pairs with float/integer from ntp response packet data. - """ - data = dict() - for key, value in self.regex.findall(raw): - data[key] = float(value) * PRECISION - return data - - -class Peer(Base): - def __init__(self, idx, name): - self.id = idx - self.real_name = name - self.name = name.replace('.', '_') - self.request = self.get_header(self.id) - - def get_data(self, raw): - """ - Extracts key=value pairs with float/integer from ntp response packet data. - """ - data = dict() - for key, value in self.regex.findall(raw): - dimension = '_'.join([self.name, key]) - data[dimension] = float(value) * PRECISION - return data - - -class Service(SocketService): - def __init__(self, configuration=None, name=None): - SocketService.__init__(self, configuration=configuration, name=name) - self.order = list(ORDER) - self.definitions = dict(CHARTS) - - self.port = 'ntp' - self.dgram_socket = True - self.system = System() - self.peers = dict() - self.request = str() - self.retries = 0 - self.show_peers = self.configuration.get('show_peers', False) - self.peer_rescan = self.configuration.get('peer_rescan', 60) - - if self.show_peers: - self.definitions.update(PEER_CHARTS) - - def check(self): - """ - Checks if we can get valid systemvars. - If not, returns None to disable module. - """ - self._parse_config() - - peer_filter = self.configuration.get('peer_filter', r'127\..*') - try: - self.peer_filter = re.compile(r'^((0\.0\.0\.0)|({0}))$'.format(peer_filter)) - except re.error as error: - self.error('Compile pattern error (peer_filter) : {0}'.format(error)) - return None - - self.request = self.system.request - raw_systemvars = self._get_raw_data() - - if not self.system.get_data(raw_systemvars): - return None - - return True - - def get_data(self): - """ - Gets systemvars data on each update. - Gets peervars data for all peers on each update. - """ - data = dict() - - self.request = self.system.request - raw = self._get_raw_data() - if not raw: - return None - - data.update(self.system.get_data(raw)) - - if not self.show_peers: - return data - - if not self.peers or self.runs_counter % self.peer_rescan == 0 or self.retries > 8: - self.find_new_peers() - - for peer in self.peers.values(): - self.request = peer.request - peer_data = peer.get_data(self._get_raw_data()) - if peer_data: - data.update(peer_data) - else: - self.retries += 1 - - return data - - def find_new_peers(self): - new_peers = dict((p.real_name, p) for p in self.get_peers()) - if new_peers: - - peers_to_remove = set(self.peers) - set(new_peers) - peers_to_add = set(new_peers) - set(self.peers) - - for peer_name in peers_to_remove: - self.hide_old_peer_from_charts(self.peers[peer_name]) - del self.peers[peer_name] - - for peer_name in peers_to_add: - self.add_new_peer_to_charts(new_peers[peer_name]) - - self.peers.update(new_peers) - self.retries = 0 - - def add_new_peer_to_charts(self, peer): - for chart_id in set(self.charts.charts) & set(PEER_CHARTS): - dim_id = peer.name + chart_id[4:] - if dim_id not in self.charts[chart_id]: - self.charts[chart_id].add_dimension([dim_id, peer.real_name, 'absolute', 1, PRECISION]) - else: - self.charts[chart_id].hide_dimension(dim_id, reverse=True) - - def hide_old_peer_from_charts(self, peer): - for chart_id in set(self.charts.charts) & set(PEER_CHARTS): - dim_id = peer.name + chart_id[4:] - self.charts[chart_id].hide_dimension(dim_id) - - def get_peers(self): - self.request = Base.get_header(operation='readstat') - - raw_data = self._get_raw_data(raw=True) - if not raw_data: - return list() - - peer_ids = self.get_peer_ids(raw_data) - if not peer_ids: - return list() - - new_peers = list() - for peer_id in peer_ids: - self.request = Base.get_header(peer_id) - raw_peer_data = self._get_raw_data() - if not raw_peer_data: - continue - srcadr = re.search(r'(srcadr)=([^,]+)', raw_peer_data) - if not srcadr: - continue - srcadr = srcadr.group(2) - if self.peer_filter.search(srcadr): - continue - stratum = re.search(r'(stratum)=([^,]+)', raw_peer_data) - if not stratum: - continue - if int(stratum.group(2)) > 15: - continue - - new_peer = Peer(idx=peer_id, name=srcadr) - new_peers.append(new_peer) - return new_peers - - def get_peer_ids(self, res): - """ - Unpack the NTP Control Message header - Get data length from header - Get list of association ids returned in the readstat response - """ - - try: - count = struct.unpack(HEADER_FORMAT, res[:HEADER_LEN])[6] - except struct.error as error: - self.error('error unpacking header: {0}'.format(error)) - return None - if not count: - self.error('empty data field in NTP control packet') - return None - - data_end = HEADER_LEN + count - data = res[HEADER_LEN:data_end] - data_format = ''.join(['!', 'H' * int(count / 2)]) - try: - peer_ids = list(struct.unpack(data_format, data))[::2] - except struct.error as error: - self.error('error unpacking data: {0}'.format(error)) - return None - return peer_ids diff --git a/python.d/ovpn_status_log.chart.py b/python.d/ovpn_status_log.chart.py deleted file mode 100644 index 519c77fa3..000000000 --- a/python.d/ovpn_status_log.chart.py +++ /dev/null @@ -1,118 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: openvpn status log netdata python.d module -# Author: l2isbad - -from re import compile as r_compile - -from bases.FrameworkServices.SimpleService import SimpleService - -priority = 60000 -retries = 60 -update_every = 10 - -ORDER = ['users', 'traffic'] -CHARTS = { - 'users': { - 'options': [None, 'OpenVPN Active Users', 'active users', 'users', 'openvpn_status.users', 'line'], - 'lines': [ - ['users', None, 'absolute'], - ]}, - 'traffic': { - 'options': [None, 'OpenVPN Traffic', 'KB/s', 'traffic', 'openvpn_status.traffic', 'area'], - 'lines': [ - ['bytes_in', 'in', 'incremental', 1, 1 << 10], ['bytes_out', 'out', 'incremental', 1, -1 << 10] - ]}, - -} - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.log_path = self.configuration.get('log_path') - self.regex = dict(tls=r_compile(r'\d{1,3}(?:\.\d{1,3}){3}(?::\d+)? (?P<bytes_in>\d+) (?P<bytes_out>\d+)'), - static_key=r_compile(r'TCP/[A-Z]+ (?P<direction>(?:read|write)) bytes,(?P<bytes>\d+)')) - - def check(self): - if not (self.log_path and isinstance(self.log_path, str)): - self.error("'log_path' is not defined") - return False - - data = self._get_raw_data() - if not data: - self.error('Make sure that the openvpn status log file exists and netdata has permission to read it') - return None - - found = None - for row in data: - if 'ROUTING' in row: - self.get_data = self.get_data_tls - found = True - break - elif 'STATISTICS' in row: - self.get_data = self.get_data_static_key - found = True - break - if found: - return True - self.error("Failed to parse ovpenvpn log file") - return False - - def _get_raw_data(self): - """ - Open log file - :return: str - """ - - try: - with open(self.log_path) as log: - raw_data = log.readlines() or None - except OSError: - return None - else: - return raw_data - - def get_data_static_key(self): - """ - Parse openvpn-status log file. - """ - - raw_data = self._get_raw_data() - if not raw_data: - return None - - data = dict(bytes_in=0, bytes_out=0) - - for row in raw_data: - match = self.regex['static_key'].search(row) - if match: - match = match.groupdict() - if match['direction'] == 'read': - data['bytes_in'] += int(match['bytes']) - else: - data['bytes_out'] += int(match['bytes']) - - return data or None - - def get_data_tls(self): - """ - Parse openvpn-status log file. - """ - - raw_data = self._get_raw_data() - if not raw_data: - return None - - data = dict(users=0, bytes_in=0, bytes_out=0) - for row in raw_data: - row = ' '.join(row.split(',')) if ',' in row else ' '.join(row.split()) - match = self.regex['tls'].search(row) - if match: - match = match.groupdict() - data['users'] += 1 - data['bytes_in'] += int(match['bytes_in']) - data['bytes_out'] += int(match['bytes_out']) - - return data or None diff --git a/python.d/phpfpm.chart.py b/python.d/phpfpm.chart.py deleted file mode 100644 index ea7a9a7e6..000000000 --- a/python.d/phpfpm.chart.py +++ /dev/null @@ -1,169 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: PHP-FPM netdata python.d module -# Author: Pawel Krupa (paulfantom) - -import json -import re - -from bases.FrameworkServices.UrlService import UrlService - -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 - -# default job configuration (overridden by python.d.plugin) -# config = {'local': { -# 'update_every': update_every, -# 'retries': retries, -# 'priority': priority, -# 'url': 'http://localhost/status?full&json' -# }} - -# charts order (can be overridden if you want less charts, or different order) - -POOL_INFO = [ - ('active processes', 'active'), - ('max active processes', 'maxActive'), - ('idle processes', 'idle'), - ('accepted conn', 'requests'), - ('max children reached', 'reached'), - ('slow requests', 'slow') -] - -PER_PROCESS_INFO = [ - ('request duration', 'ReqDur'), - ('last request cpu', 'ReqCpu'), - ('last request memory', 'ReqMem') -] - - -def average(collection): - return sum(collection, 0.0) / max(len(collection), 1) - - -CALC = [ - ('min', min), - ('max', max), - ('avg', average) -] - -ORDER = ['connections', 'requests', 'performance', 'request_duration', 'request_cpu', 'request_mem'] - -CHARTS = { - 'connections': { - 'options': [None, 'PHP-FPM Active Connections', 'connections', 'active connections', 'phpfpm.connections', - 'line'], - 'lines': [ - ['active'], - ['maxActive', 'max active'], - ['idle'] - ]}, - 'requests': { - 'options': [None, 'PHP-FPM Requests', 'requests/s', 'requests', 'phpfpm.requests', 'line'], - 'lines': [ - ['requests', None, 'incremental'] - ]}, - 'performance': { - 'options': [None, 'PHP-FPM Performance', 'status', 'performance', 'phpfpm.performance', 'line'], - 'lines': [ - ['reached', 'max children reached'], - ['slow', 'slow requests'] - ]}, - 'request_duration': { - 'options': [None, 'PHP-FPM Request Duration', 'milliseconds', 'request duration', 'phpfpm.request_duration', - 'line'], - 'lines': [ - ['minReqDur', 'min', 'absolute', 1, 1000], - ['maxReqDur', 'max', 'absolute', 1, 1000], - ['avgReqDur', 'avg', 'absolute', 1, 1000] - ]}, - 'request_cpu': { - 'options': [None, 'PHP-FPM Request CPU', 'percent', 'request CPU', 'phpfpm.request_cpu', 'line'], - 'lines': [ - ['minReqCpu', 'min'], - ['maxReqCpu', 'max'], - ['avgReqCpu', 'avg'] - ]}, - 'request_mem': { - 'options': [None, 'PHP-FPM Request Memory', 'kilobytes', 'request memory', 'phpfpm.request_mem', 'line'], - 'lines': [ - ['minReqMem', 'min', 'absolute', 1, 1024], - ['maxReqMem', 'max', 'absolute', 1, 1024], - ['avgReqMem', 'avg', 'absolute', 1, 1024] - ]} -} - - -class Service(UrlService): - def __init__(self, configuration=None, name=None): - UrlService.__init__(self, configuration=configuration, name=name) - self.url = self.configuration.get('url', 'http://localhost/status?full&json') - self.order = ORDER - self.definitions = CHARTS - self.regex = re.compile(r'([a-z][a-z ]+): ([\d.]+)') - self.json = '&json' in self.url or '?json' in self.url - self.json_full = self.url.endswith(('?full&json', '?json&full')) - self.if_all_processes_running = dict([(c_name + p_name, 0) for c_name, func in CALC - for metric, p_name in PER_PROCESS_INFO]) - - def _get_data(self): - """ - Format data received from http request - :return: dict - """ - raw = self._get_raw_data() - if not raw: - return None - - raw_json = parse_raw_data_(is_json=self.json, regex=self.regex, raw_data=raw) - - # Per Pool info: active connections, requests and performance charts - to_netdata = fetch_data_(raw_data=raw_json, metrics_list=POOL_INFO) - - # Per Process Info: duration, cpu and memory charts (min, max, avg) - if self.json_full: - p_info = dict() - to_netdata.update(self.if_all_processes_running) # If all processes are in running state - # Metrics are always 0 if the process is not in Idle state because calculation is done - # when the request processing has terminated - for process in [p for p in raw_json['processes'] if p['state'] == 'Idle']: - p_info.update(fetch_data_(raw_data=process, metrics_list=PER_PROCESS_INFO, pid=str(process['pid']))) - - if p_info: - for new_name in PER_PROCESS_INFO: - for name, func in CALC: - to_netdata[name + new_name[1]] = func([p_info[k] for k in p_info if new_name[1] in k]) - - return to_netdata or None - - -def fetch_data_(raw_data, metrics_list, pid=''): - """ - :param raw_data: dict - :param metrics_list: list - :param pid: str - :return: dict - """ - result = dict() - for metric, new_name in metrics_list: - if metric in raw_data: - result[new_name + pid] = float(raw_data[metric]) - return result - - -def parse_raw_data_(is_json, regex, raw_data): - """ - :param is_json: bool - :param regex: compiled regular expr - :param raw_data: dict - :return: dict - """ - if is_json: - try: - return json.loads(raw_data) - except ValueError: - return dict() - else: - raw_data = ' '.join(raw_data.split()) - return dict(regex.findall(raw_data)) diff --git a/python.d/portcheck.chart.py b/python.d/portcheck.chart.py deleted file mode 100644 index 0a312210d..000000000 --- a/python.d/portcheck.chart.py +++ /dev/null @@ -1,159 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: simple port check netdata python.d module -# Original Author: ccremer (github.com/ccremer) - -import socket - -try: - from time import monotonic as time -except ImportError: - from time import time - -from bases.FrameworkServices.SimpleService import SimpleService - -# default module values (can be overridden per job in `config`) -priority = 60000 -retries = 60 - -PORT_LATENCY = 'connect' - -PORT_SUCCESS = 'success' -PORT_TIMEOUT = 'timeout' -PORT_FAILED = 'no_connection' - -ORDER = ['latency', 'status'] - -CHARTS = { - 'latency': { - 'options': [None, 'TCP connect latency', 'ms', 'latency', 'portcheck.latency', 'line'], - 'lines': [ - [PORT_LATENCY, 'connect', 'absolute', 100, 1000] - ] - }, - 'status': { - 'options': [None, 'Portcheck status', 'boolean', 'status', 'portcheck.status', 'line'], - 'lines': [ - [PORT_SUCCESS, 'success', 'absolute'], - [PORT_TIMEOUT, 'timeout', 'absolute'], - [PORT_FAILED, 'no connection', 'absolute'] - ]} -} - - -# Not deriving from SocketService, too much is different -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.host = self.configuration.get('host') - self.port = self.configuration.get('port') - self.timeout = self.configuration.get('timeout', 1) - - def check(self): - """ - Parse configuration, check if configuration is available, and dynamically create chart lines data - :return: boolean - """ - if self.host is None or self.port is None: - self.error("Host or port missing") - return False - if not isinstance(self.port, int): - self.error('"port" is not an integer. Specify a numerical value, not service name.') - return False - - self.debug("Enabled portcheck: {host}:{port}, update every {update}s, timeout: {timeout}s".format( - host=self.host, port=self.port, update=self.update_every, timeout=self.timeout - )) - # We will accept any (valid-ish) configuration, even if initial connection fails (a service might be down from - # the beginning) - return True - - def _get_data(self): - """ - Get data from socket - :return: dict - """ - data = dict() - data[PORT_SUCCESS] = 0 - data[PORT_TIMEOUT] = 0 - data[PORT_FAILED] = 0 - - success = False - try: - for socket_config in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM): - # use first working socket - sock = self._create_socket(socket_config) - if sock is not None: - self._connect2socket(data, socket_config, sock) - self._disconnect(sock) - success = True - break - except socket.gaierror as error: - self.debug('Failed to connect to "{host}:{port}", error: {error}'.format( - host=self.host, port=self.port, error=error - )) - - # We could not connect - if not success: - data[PORT_FAILED] = 1 - - return data - - def _create_socket(self, socket_config): - af, sock_type, proto, canon_name, sa = socket_config - try: - self.debug('Creating socket to "{address}", port {port}'.format(address=sa[0], port=sa[1])) - sock = socket.socket(af, sock_type, proto) - sock.settimeout(self.timeout) - return sock - except socket.error as error: - self.debug('Failed to create socket "{address}", port {port}, error: {error}'.format( - address=sa[0], port=sa[1], error=error - )) - return None - - def _connect2socket(self, data, socket_config, sock): - """ - Connect to a socket, passing the result of getaddrinfo() - :return: dict - """ - - af, sock_type, proto, canon_name, sa = socket_config - port = str(sa[1]) - try: - self.debug('Connecting socket to "{address}", port {port}'.format(address=sa[0], port=port)) - start = time() - sock.connect(sa) - diff = time() - start - self.debug('Connected to "{address}", port {port}, latency {latency}'.format( - address=sa[0], port=port, latency=diff - )) - # we will set it at least 0.1 ms. 0.0 would mean failed connection (handy for 3rd-party-APIs) - data[PORT_LATENCY] = max(round(diff * 10000), 0) - data[PORT_SUCCESS] = 1 - - except socket.timeout as error: - self.debug('Socket timed out on "{address}", port {port}, error: {error}'.format( - address=sa[0], port=port, error=error - )) - data[PORT_TIMEOUT] = 1 - - except socket.error as error: - self.debug('Failed to connect to "{address}", port {port}, error: {error}'.format( - address=sa[0], port=port, error=error - )) - data[PORT_FAILED] = 1 - - def _disconnect(self, sock): - """ - Close socket connection - :return: - """ - if sock is not None: - try: - self.debug('Closing socket') - sock.shutdown(2) # 0 - read, 1 - write, 2 - all - sock.close() - except socket.error: - pass diff --git a/python.d/postfix.chart.py b/python.d/postfix.chart.py deleted file mode 100644 index a2129e4be..000000000 --- a/python.d/postfix.chart.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: postfix netdata python.d module -# Author: Pawel Krupa (paulfantom) - -from bases.FrameworkServices.ExecutableService import ExecutableService - -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['qemails', 'qsize'] - -CHARTS = { - 'qemails': { - 'options': [None, "Postfix Queue Emails", "emails", 'queue', 'postfix.qemails', 'line'], - 'lines': [ - ['emails', None, 'absolute'] - ]}, - 'qsize': { - 'options': [None, "Postfix Queue Emails Size", "emails size in KB", 'queue', 'postfix.qsize', 'area'], - 'lines': [ - ["size", None, 'absolute'] - ]} -} - - -class Service(ExecutableService): - def __init__(self, configuration=None, name=None): - ExecutableService.__init__(self, configuration=configuration, name=name) - self.command = "postqueue -p" - self.order = ORDER - self.definitions = CHARTS - - def _get_data(self): - """ - Format data received from shell command - :return: dict - """ - try: - raw = self._get_raw_data()[-1].split(' ') - if raw[0] == 'Mail' and raw[1] == 'queue': - return {'emails': 0, - 'size': 0} - - return {'emails': raw[4], - 'size': raw[1]} - except (ValueError, AttributeError): - return None diff --git a/python.d/postgres.chart.py b/python.d/postgres.chart.py deleted file mode 100644 index 0522b1938..000000000 --- a/python.d/postgres.chart.py +++ /dev/null @@ -1,666 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: example netdata python.d module -# Authors: facetoe, dangtranhoang - -from copy import deepcopy - -try: - import psycopg2 - from psycopg2 import extensions - from psycopg2.extras import DictCursor - from psycopg2 import OperationalError - PSYCOPG2 = True -except ImportError: - PSYCOPG2 = False - -from bases.FrameworkServices.SimpleService import SimpleService - -# default module values -update_every = 1 -priority = 60000 -retries = 60 - -METRICS = dict( - DATABASE=['connections', - 'xact_commit', - 'xact_rollback', - 'blks_read', - 'blks_hit', - 'tup_returned', - 'tup_fetched', - 'tup_inserted', - 'tup_updated', - 'tup_deleted', - 'conflicts', - 'temp_files', - 'temp_bytes', - 'size'], - BACKENDS=['backends_active', - 'backends_idle'], - INDEX_STATS=['index_count', - 'index_size'], - TABLE_STATS=['table_size', - 'table_count'], - WAL=['written_wal', - 'recycled_wal', - 'total_wal'], - WAL_WRITES=['wal_writes'], - ARCHIVE=['ready_count', - 'done_count', - 'file_count'], - BGWRITER=['checkpoint_scheduled', - 'checkpoint_requested', - 'buffers_checkpoint', - 'buffers_clean', - 'maxwritten_clean', - 'buffers_backend', - 'buffers_alloc', - 'buffers_backend_fsync'], - LOCKS=['ExclusiveLock', - 'RowShareLock', - 'SIReadLock', - 'ShareUpdateExclusiveLock', - 'AccessExclusiveLock', - 'AccessShareLock', - 'ShareRowExclusiveLock', - 'ShareLock', - 'RowExclusiveLock'], - AUTOVACUUM=['analyze', - 'vacuum_analyze', - 'vacuum', - 'vacuum_freeze', - 'brin_summarize'], - STANDBY_DELTA=['sent_delta', - 'write_delta', - 'flush_delta', - 'replay_delta'], - REPSLOT_FILES=['replslot_wal_keep', - 'replslot_files'] - -) - -QUERIES = dict( - WAL=""" -SELECT - count(*) as total_wal, - count(*) FILTER (WHERE type = 'recycled') AS recycled_wal, - count(*) FILTER (WHERE type = 'written') AS written_wal -FROM - (SELECT wal.name, - pg_{0}file_name(CASE pg_is_in_recovery() WHEN true THEN NULL ELSE pg_current_{0}_{1}() END ), - CASE WHEN wal.name > pg_{0}file_name(CASE pg_is_in_recovery() WHEN true THEN NULL ELSE pg_current_{0}_{1}() END ) THEN 'recycled' - ELSE 'written' - END AS type - FROM pg_catalog.pg_ls_dir('pg_{0}') AS wal(name) - WHERE name ~ '^[0-9A-F]{{24}}$' - ORDER BY (pg_stat_file('pg_{0}/'||name)).modification, wal.name DESC) sub; -""", - ARCHIVE=""" -SELECT - CAST(COUNT(*) AS INT) AS file_count, - CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)), 0) AS INT) AS ready_count, - CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)), 0) AS INT) AS done_count -FROM - pg_catalog.pg_ls_dir('pg_{0}/archive_status') AS archive_files (archive_file); -""", - BACKENDS=""" -SELECT - count(*) - (SELECT count(*) FROM pg_stat_activity WHERE state = 'idle') AS backends_active, - (SELECT count(*) FROM pg_stat_activity WHERE state = 'idle' ) AS backends_idle -FROM pg_stat_activity; -""", - TABLE_STATS=""" -SELECT - ((sum(relpages) * 8) * 1024) AS table_size, - count(1) AS table_count -FROM pg_class -WHERE relkind IN ('r', 't'); -""", - INDEX_STATS=""" -SELECT - ((sum(relpages) * 8) * 1024) AS index_size, - count(1) AS index_count -FROM pg_class -WHERE relkind = 'i';""", - DATABASE=""" -SELECT - datname AS database_name, - numbackends AS connections, - xact_commit AS xact_commit, - xact_rollback AS xact_rollback, - blks_read AS blks_read, - blks_hit AS blks_hit, - tup_returned AS tup_returned, - tup_fetched AS tup_fetched, - tup_inserted AS tup_inserted, - tup_updated AS tup_updated, - tup_deleted AS tup_deleted, - conflicts AS conflicts, - pg_database_size(datname) AS size, - temp_files AS temp_files, - temp_bytes AS temp_bytes -FROM pg_stat_database -WHERE datname IN %(databases)s -; -""", - BGWRITER=""" -SELECT - checkpoints_timed AS checkpoint_scheduled, - checkpoints_req AS checkpoint_requested, - buffers_checkpoint * current_setting('block_size')::numeric buffers_checkpoint, - buffers_clean * current_setting('block_size')::numeric buffers_clean, - maxwritten_clean, - buffers_backend * current_setting('block_size')::numeric buffers_backend, - buffers_alloc * current_setting('block_size')::numeric buffers_alloc, - buffers_backend_fsync -FROM pg_stat_bgwriter; -""", - LOCKS=""" -SELECT - pg_database.datname as database_name, - mode, - count(mode) AS locks_count -FROM pg_locks - INNER JOIN pg_database ON pg_database.oid = pg_locks.database -GROUP BY datname, mode -ORDER BY datname, mode; -""", - FIND_DATABASES=""" -SELECT datname -FROM pg_stat_database -WHERE has_database_privilege((SELECT current_user), datname, 'connect') -AND NOT datname ~* '^template\d+'; -""", - FIND_STANDBY=""" -SELECT application_name -FROM pg_stat_replication -WHERE application_name IS NOT NULL -GROUP BY application_name; -""", - FIND_REPLICATION_SLOT=""" -SELECT slot_name -FROM pg_replication_slots; -""", - STANDBY_DELTA=""" -SELECT application_name, - pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END , sent_{1}) AS sent_delta, - pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END , write_{1}) AS write_delta, - pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END , flush_{1}) AS flush_delta, - pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END , replay_{1}) AS replay_delta -FROM pg_stat_replication -WHERE application_name IS NOT NULL; -""", - REPSLOT_FILES=""" -WITH wal_size AS ( - SELECT current_setting('wal_block_size')::INT * setting::INT AS val - FROM pg_settings - WHERE name = 'wal_segment_size' -) -SELECT slot_name, slot_type, replslot_wal_keep, count(slot_file) AS replslot_files -FROM ( - SELECT slot.slot_name, CASE WHEN slot_file <> 'state' THEN 1 END AS slot_file , slot_type, - COALESCE (floor((pg_wal_lsn_diff (pg_current_wal_lsn (), - slot.restart_lsn) - (pg_walfile_name_offset (restart_lsn)).file_offset) / (s.val)), - 0) AS replslot_wal_keep - FROM pg_replication_slots slot - LEFT JOIN ( - SELECT slot2.slot_name, - pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file - FROM pg_replication_slots slot2 - ) files (slot_name, slot_file) - ON slot.slot_name = files.slot_name - CROSS JOIN wal_size s) AS d -GROUP BY slot_name, slot_type, replslot_wal_keep; -""", - IF_SUPERUSER=""" -SELECT current_setting('is_superuser') = 'on' AS is_superuser; -""", - DETECT_SERVER_VERSION=""" -SHOW server_version_num; -""", - AUTOVACUUM=""" -SELECT - count(*) FILTER (WHERE query LIKE 'autovacuum: ANALYZE%%') AS analyze, - count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM ANALYZE%%') AS vacuum_analyze, - count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM%%' - AND query NOT LIKE 'autovacuum: VACUUM ANALYZE%%' - AND query NOT LIKE '%%to prevent wraparound%%') AS vacuum, - count(*) FILTER (WHERE query LIKE '%%to prevent wraparound%%') AS vacuum_freeze, - count(*) FILTER (WHERE query LIKE 'autovacuum: BRIN summarize%%') AS brin_summarize -FROM pg_stat_activity -WHERE query NOT LIKE '%%pg_stat_activity%%'; -""", - DIFF_LSN=""" -SELECT pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END, '0/0') as wal_writes ; -""" - -) - - -QUERY_STATS = { - QUERIES['DATABASE']: METRICS['DATABASE'], - QUERIES['BACKENDS']: METRICS['BACKENDS'], - QUERIES['LOCKS']: METRICS['LOCKS'] -} - -ORDER = ['db_stat_temp_files', 'db_stat_temp_bytes', 'db_stat_blks', 'db_stat_tuple_returned', 'db_stat_tuple_write', - 'db_stat_transactions','db_stat_connections', 'database_size', 'backend_process', 'index_count', 'index_size', - 'table_count', 'table_size', 'wal', 'wal_writes', 'archive_wal', 'checkpointer', 'stat_bgwriter_alloc', 'stat_bgwriter_checkpoint', - 'stat_bgwriter_backend', 'stat_bgwriter_backend_fsync' , 'stat_bgwriter_bgwriter', 'stat_bgwriter_maxwritten', - 'replication_slot', 'standby_delta', 'autovacuum'] - -CHARTS = { - 'db_stat_transactions': { - 'options': [None, 'Transactions on db', 'transactions/s', 'db statistics', 'postgres.db_stat_transactions', - 'line'], - 'lines': [ - ['xact_commit', 'committed', 'incremental'], - ['xact_rollback', 'rolled back', 'incremental'] - ]}, - 'db_stat_connections': { - 'options': [None, 'Current connections to db', 'count', 'db statistics', 'postgres.db_stat_connections', - 'line'], - 'lines': [ - ['connections', 'connections', 'absolute'] - ]}, - 'db_stat_blks': { - 'options': [None, 'Disk blocks reads from db', 'reads/s', 'db statistics', 'postgres.db_stat_blks', 'line'], - 'lines': [ - ['blks_read', 'disk', 'incremental'], - ['blks_hit', 'cache', 'incremental'] - ]}, - 'db_stat_tuple_returned': { - 'options': [None, 'Tuples returned from db', 'tuples/s', 'db statistics', 'postgres.db_stat_tuple_returned', - 'line'], - 'lines': [ - ['tup_returned', 'sequential', 'incremental'], - ['tup_fetched', 'bitmap', 'incremental'] - ]}, - 'db_stat_tuple_write': { - 'options': [None, 'Tuples written to db', 'writes/s', 'db statistics', 'postgres.db_stat_tuple_write', 'line'], - 'lines': [ - ['tup_inserted', 'inserted', 'incremental'], - ['tup_updated', 'updated', 'incremental'], - ['tup_deleted', 'deleted', 'incremental'], - ['conflicts', 'conflicts', 'incremental'] - ]}, - 'db_stat_temp_bytes': { - 'options': [None, 'Temp files written to disk', 'KB/s', 'db statistics', 'postgres.db_stat_temp_bytes', 'line'], - 'lines': [ - ['temp_bytes', 'size', 'incremental', 1, 1024] - ]}, - 'db_stat_temp_files': { - 'options': [None, 'Temp files written to disk', 'files', 'db statistics', 'postgres.db_stat_temp_files', 'line'], - 'lines': [ - ['temp_files', 'files', 'incremental'] - ]}, - 'database_size': { - 'options': [None, 'Database size', 'MB', 'database size', 'postgres.db_size', 'stacked'], - 'lines': [ - ]}, - 'backend_process': { - 'options': [None, 'Current Backend Processes', 'processes', 'backend processes', 'postgres.backend_process', - 'line'], - 'lines': [ - ['backends_active', 'active', 'absolute'], - ['backends_idle', 'idle', 'absolute'] - ]}, - 'index_count': { - 'options': [None, 'Total indexes', 'index', 'indexes', 'postgres.index_count', 'line'], - 'lines': [ - ['index_count', 'total', 'absolute'] - ]}, - 'index_size': { - 'options': [None, 'Indexes size', 'MB', 'indexes', 'postgres.index_size', 'line'], - 'lines': [ - ['index_size', 'size', 'absolute', 1, 1024 * 1024] - ]}, - 'table_count': { - 'options': [None, 'Total Tables', 'tables', 'tables', 'postgres.table_count', 'line'], - 'lines': [ - ['table_count', 'total', 'absolute'] - ]}, - 'table_size': { - 'options': [None, 'Tables size', 'MB', 'tables', 'postgres.table_size', 'line'], - 'lines': [ - ['table_size', 'size', 'absolute', 1, 1024 * 1024] - ]}, - 'wal': { - 'options': [None, 'Write-Ahead Logs', 'files', 'wal', 'postgres.wal', 'line'], - 'lines': [ - ['written_wal', 'written', 'absolute'], - ['recycled_wal', 'recycled', 'absolute'], - ['total_wal', 'total', 'absolute'] - ]}, - 'wal_writes': { - 'options': [None, 'Write-Ahead Logs', 'kilobytes/s', 'wal_writes', 'postgres.wal_writes', 'line'], - 'lines': [ - ['wal_writes', 'writes', 'incremental', 1, 1024] - ]}, - 'archive_wal': { - 'options': [None, 'Archive Write-Ahead Logs', 'files/s', 'archive wal', 'postgres.archive_wal', 'line'], - 'lines': [ - ['file_count', 'total', 'incremental'], - ['ready_count', 'ready', 'incremental'], - ['done_count', 'done', 'incremental'] - ]}, - 'checkpointer': { - 'options': [None, 'Checkpoints', 'writes', 'checkpointer', 'postgres.checkpointer', 'line'], - 'lines': [ - ['checkpoint_scheduled', 'scheduled', 'incremental'], - ['checkpoint_requested', 'requested', 'incremental'] - ]}, - 'stat_bgwriter_alloc': { - 'options': [None, 'Buffers allocated', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_alloc', 'line'], - 'lines': [ - ['buffers_alloc', 'alloc', 'incremental', 1, 1024] - ]}, - 'stat_bgwriter_checkpoint': { - 'options': [None, 'Buffers written during checkpoints', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_checkpoint', 'line'], - 'lines': [ - ['buffers_checkpoint', 'checkpoint', 'incremental', 1, 1024] - ]}, - 'stat_bgwriter_backend': { - 'options': [None, 'Buffers written directly by a backend', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_backend', 'line'], - 'lines': [ - ['buffers_backend', 'backend', 'incremental', 1, 1024] - ]}, - 'stat_bgwriter_backend_fsync': { - 'options': [None, 'Fsync by backend', 'times', 'bgwriter', 'postgres.stat_bgwriter_backend_fsync', 'line'], - 'lines': [ - ['buffers_backend_fsync', 'backend fsync', 'incremental'] - ]}, - 'stat_bgwriter_bgwriter': { - 'options': [None, 'Buffers written by the background writer', 'kilobytes/s', 'bgwriter', 'postgres.bgwriter_bgwriter', 'line'], - 'lines': [ - ['buffers_clean', 'clean', 'incremental', 1, 1024] - ]}, - 'stat_bgwriter_maxwritten': { - 'options': [None, 'Too many buffers written', 'times', 'bgwriter', 'postgres.stat_bgwriter_maxwritten', 'line'], - 'lines': [ - ['maxwritten_clean', 'maxwritten', 'incremental'] - ]}, - 'autovacuum': { - 'options': [None, 'Autovacuum workers', 'workers', 'autovacuum', 'postgres.autovacuum', 'line'], - 'lines': [ - ['analyze', 'analyze', 'absolute'], - ['vacuum', 'vacuum', 'absolute'], - ['vacuum_analyze', 'vacuum analyze', 'absolute'], - ['vacuum_freeze', 'vacuum freeze', 'absolute'], - ['brin_summarize', 'brin summarize', 'absolute'] - ]}, - 'standby_delta': { - 'options': [None, 'Standby delta', 'kilobytes', 'replication delta', 'postgres.standby_delta', 'line'], - 'lines': [ - ['sent_delta', 'sent delta', 'absolute', 1, 1024], - ['write_delta', 'write delta', 'absolute', 1, 1024], - ['flush_delta', 'flush delta', 'absolute', 1, 1024], - ['replay_delta', 'replay delta', 'absolute', 1, 1024] - ]}, - 'replication_slot': { - 'options': [None, 'Replication slot files', 'files', 'replication slot', 'postgres.replication_slot', 'line'], - 'lines': [ - ['replslot_wal_keep', 'wal keeped', 'absolute'], - ['replslot_files', 'pg_replslot files', 'absolute'] - ]} -} - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.order = ORDER[:] - self.definitions = deepcopy(CHARTS) - self.table_stats = configuration.pop('table_stats', False) - self.index_stats = configuration.pop('index_stats', False) - self.database_poll = configuration.pop('database_poll', None) - self.configuration = configuration - self.connection = False - self.server_version = None - self.data = dict() - self.locks_zeroed = dict() - self.databases = list() - self.secondaries = list() - self.replication_slots = list() - self.queries = QUERY_STATS.copy() - - def _connect(self): - params = dict(user='postgres', - database=None, - password=None, - host=None, - port=5432) - params.update(self.configuration) - - if not self.connection: - try: - self.connection = psycopg2.connect(**params) - self.connection.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT) - self.connection.set_session(readonly=True) - except OperationalError as error: - return False, str(error) - return True, True - - def check(self): - if not PSYCOPG2: - self.error('\'python-psycopg2\' module is needed to use postgres.chart.py') - return False - result, error = self._connect() - if not result: - conf = dict((k, (lambda k, v: v if k != 'password' else '*****')(k, v)) - for k, v in self.configuration.items()) - self.error('Failed to connect to %s. Error: %s' % (str(conf), error)) - return False - try: - cursor = self.connection.cursor() - self.databases = discover_databases_(cursor, QUERIES['FIND_DATABASES']) - is_superuser = check_if_superuser_(cursor, QUERIES['IF_SUPERUSER']) - self.secondaries = discover_secondaries_(cursor, QUERIES['FIND_STANDBY']) - self.server_version = detect_server_version(cursor, QUERIES['DETECT_SERVER_VERSION']) - if self.server_version >= 94000: - self.replication_slots = discover_replication_slots_(cursor, QUERIES['FIND_REPLICATION_SLOT']) - cursor.close() - - if self.database_poll and isinstance(self.database_poll, str): - self.databases = [dbase for dbase in self.databases if dbase in self.database_poll.split()]\ - or self.databases - - self.locks_zeroed = populate_lock_types(self.databases) - self.add_additional_queries_(is_superuser) - self.create_dynamic_charts_() - return True - except Exception as error: - self.error(str(error)) - return False - - def add_additional_queries_(self, is_superuser): - - if self.server_version >= 100000: - wal = 'wal' - lsn = 'lsn' - else: - wal = 'xlog' - lsn = 'location' - self.queries[QUERIES['BGWRITER']] = METRICS['BGWRITER'] - self.queries[QUERIES['DIFF_LSN'].format(wal,lsn)] = METRICS['WAL_WRITES'] - self.queries[QUERIES['STANDBY_DELTA'].format(wal,lsn)] = METRICS['STANDBY_DELTA'] - - if self.index_stats: - self.queries[QUERIES['INDEX_STATS']] = METRICS['INDEX_STATS'] - if self.table_stats: - self.queries[QUERIES['TABLE_STATS']] = METRICS['TABLE_STATS'] - if is_superuser: - self.queries[QUERIES['ARCHIVE'].format(wal)] = METRICS['ARCHIVE'] - if self.server_version >= 90400: - self.queries[QUERIES['WAL'].format(wal,lsn)] = METRICS['WAL'] - if self.server_version >= 100000: - self.queries[QUERIES['REPSLOT_FILES']] = METRICS['REPSLOT_FILES'] - if self.server_version >= 90400: - self.queries[QUERIES['AUTOVACUUM']] = METRICS['AUTOVACUUM'] - - def create_dynamic_charts_(self): - - for database_name in self.databases[::-1]: - self.definitions['database_size']['lines'].append([database_name + '_size', - database_name, 'absolute', 1, 1024 * 1024]) - for chart_name in [name for name in self.order if name.startswith('db_stat')]: - add_database_stat_chart_(order=self.order, definitions=self.definitions, - name=chart_name, database_name=database_name) - - add_database_lock_chart_(order=self.order, definitions=self.definitions, database_name=database_name) - - for application_name in self.secondaries[::-1]: - add_replication_delta_chart_(order=self.order, definitions=self.definitions, - name='standby_delta', application_name=application_name) - - for slot_name in self.replication_slots[::-1]: - add_replication_slot_chart_(order=self.order, definitions=self.definitions, - name='replication_slot', slot_name=slot_name) - - - - def _get_data(self): - result, error = self._connect() - if result: - cursor = self.connection.cursor(cursor_factory=DictCursor) - try: - self.data.update(self.locks_zeroed) - for query, metrics in self.queries.items(): - self.query_stats_(cursor, query, metrics) - - except OperationalError: - self.connection = False - cursor.close() - return None - else: - cursor.close() - return self.data - else: - return None - - def query_stats_(self, cursor, query, metrics): - cursor.execute(query, dict(databases=tuple(self.databases))) - for row in cursor: - for metric in metrics: - if 'database_name' in row: - dimension_id = '_'.join([row['database_name'], metric]) - elif 'application_name' in row: - dimension_id = '_'.join([row['application_name'], metric]) - elif 'slot_name' in row: - dimension_id = '_'.join([row['slot_name'], metric]) - else: - dimension_id = metric - if metric in row: - self.data[dimension_id] = int(row[metric]) - elif 'locks_count' in row: - self.data[dimension_id] = row['locks_count'] if metric == row['mode'] else 0 - - -def discover_databases_(cursor, query): - cursor.execute(query) - result = list() - for db in [database[0] for database in cursor]: - if db not in result: - result.append(db) - return result - -def discover_secondaries_(cursor, query): - cursor.execute(query) - result = list() - for sc in [standby[0] for standby in cursor]: - if sc not in result: - result.append(sc) - return result - -def discover_replication_slots_(cursor, query): - cursor.execute(query) - result = list() - for slot in [replication_slot[0] for replication_slot in cursor]: - if slot not in result: - result.append(slot) - return result - -def check_if_superuser_(cursor, query): - cursor.execute(query) - return cursor.fetchone()[0] - -def detect_server_version(cursor, query): - cursor.execute(query) - return int(cursor.fetchone()[0]) - -def populate_lock_types(databases): - result = dict() - for database in databases: - for lock_type in METRICS['LOCKS']: - key = '_'.join([database, lock_type]) - result[key] = 0 - - return result - - -def add_database_lock_chart_(order, definitions, database_name): - def create_lines(database): - result = list() - for lock_type in METRICS['LOCKS']: - dimension_id = '_'.join([database, lock_type]) - result.append([dimension_id, lock_type, 'absolute']) - return result - - chart_name = database_name + '_locks' - order.insert(-1, chart_name) - definitions[chart_name] = { - 'options': - [None, 'Locks on db: ' + database_name, 'locks', 'db ' + database_name, 'postgres.db_locks', 'line'], - 'lines': create_lines(database_name) - } - - -def add_database_stat_chart_(order, definitions, name, database_name): - def create_lines(database, lines): - result = list() - for line in lines: - new_line = ['_'.join([database, line[0]])] + line[1:] - result.append(new_line) - return result - - chart_template = CHARTS[name] - chart_name = '_'.join([database_name, name]) - order.insert(0, chart_name) - name, title, units, family, context, chart_type = chart_template['options'] - definitions[chart_name] = { - 'options': [name, title + ': ' + database_name, units, 'db ' + database_name, context, chart_type], - 'lines': create_lines(database_name, chart_template['lines'])} - -def add_replication_delta_chart_(order, definitions, name, application_name): - def create_lines(standby, lines): - result = list() - for line in lines: - new_line = ['_'.join([standby, line[0]])] + line[1:] - result.append(new_line) - return result - - chart_template = CHARTS[name] - chart_name = '_'.join([application_name, name]) - position = order.index('database_size') - order.insert(position, chart_name) - name, title, units, family, context, chart_type = chart_template['options'] - definitions[chart_name] = { - 'options': [name, title + ': ' + application_name, units, 'replication delta', context, chart_type], - 'lines': create_lines(application_name, chart_template['lines'])} - -def add_replication_slot_chart_(order, definitions, name, slot_name): - def create_lines(slot, lines): - result = list() - for line in lines: - new_line = ['_'.join([slot, line[0]])] + line[1:] - result.append(new_line) - return result - - chart_template = CHARTS[name] - chart_name = '_'.join([slot_name, name]) - position = order.index('database_size') - order.insert(position, chart_name) - name, title, units, family, context, chart_type = chart_template['options'] - definitions[chart_name] = { - 'options': [name, title + ': ' + slot_name, units, 'replication slot files', context, chart_type], - 'lines': create_lines(slot_name, chart_template['lines'])} diff --git a/python.d/powerdns.chart.py b/python.d/powerdns.chart.py deleted file mode 100644 index a8d2f399c..000000000 --- a/python.d/powerdns.chart.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: powerdns netdata python.d module -# Author: l2isbad - -from json import loads - -from bases.FrameworkServices.UrlService import UrlService - -priority = 60000 -retries = 60 -# update_every = 3 - -ORDER = ['questions', 'cache_usage', 'cache_size', 'latency'] -CHARTS = { - 'questions': { - 'options': [None, 'PowerDNS Queries and Answers', 'count', 'questions', 'powerdns.questions', 'line'], - 'lines': [ - ['udp-queries', None, 'incremental'], - ['udp-answers', None, 'incremental'], - ['tcp-queries', None, 'incremental'], - ['tcp-answers', None, 'incremental'] - ]}, - 'cache_usage': { - 'options': [None, 'PowerDNS Cache Usage', 'count', 'cache', 'powerdns.cache_usage', 'line'], - 'lines': [ - ['query-cache-hit', None, 'incremental'], - ['query-cache-miss', None, 'incremental'], - ['packetcache-hit', 'packet-cache-hit', 'incremental'], - ['packetcache-miss', 'packet-cache-miss', 'incremental'] - ]}, - 'cache_size': { - 'options': [None, 'PowerDNS Cache Size', 'count', 'cache', 'powerdns.cache_size', 'line'], - 'lines': [ - ['query-cache-size', None, 'absolute'], - ['packetcache-size', 'packet-cache-size', 'absolute'], - ['key-cache-size', None, 'absolute'], - ['meta-cache-size', None, 'absolute'] - ]}, - 'latency': { - 'options': [None, 'PowerDNS Latency', 'microseconds', 'latency', 'powerdns.latency', 'line'], - 'lines': [ - ['latency', None, 'absolute'] - ]} - -} - - -class Service(UrlService): - def __init__(self, configuration=None, name=None): - UrlService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - - def _get_data(self): - data = self._get_raw_data() - if not data: - return None - return dict((d['name'], d['value']) for d in loads(data)) diff --git a/python.d/python-modules-installer.sh b/python.d/python-modules-installer.sh deleted file mode 100644 index cda3c6662..000000000 --- a/python.d/python-modules-installer.sh +++ /dev/null @@ -1,158 +0,0 @@ -#!/usr/bin/env bash - -umask 022 - -dir="/usr/local/libexec/netdata/python.d" -target="${dir}/python_modules" -pv="$(python -V 2>&1)" - -# parse parameters -while [ ! -z "${1}" ] -do - case "${1}" in - -p|--python) - pv="Python ${2}" - shift 2 - ;; - - -d|--dir) - dir="${2}" - target="${dir}/python_modules" - echo >&2 "Will install python modules in: '${target}'" - shift 2 - ;; - - -s|--system) - target= - echo >&2 "Will install python modules system-wide" - shift - ;; - - -h|--help) - echo "${0} [--dir netdata-python.d-path] [--system]" - echo "Please make sure you have installed packages: python-pip (or python3-pip) python-dev libyaml-dev libmysqlclient-dev" - exit 0 - ;; - - *) - echo >&2 "Cannot understand parameter: ${1}" - exit 1 - ;; - esac -done - - -if [ ! -z "${target}" -a ! -d "${target}" ] -then - echo >&2 "Cannot find directory: '${target}'" - exit 1 -fi - -if [[ "${pv}" =~ ^Python\ 2.* ]] -then - pv=2 - pip="$(which pip2 2>/dev/null)" -elif [[ "${pv}" =~ ^Python\ 3.* ]] -then - pv=3 - pip="$(which pip3 2>/dev/null)" -else - echo >&2 "Cannot detect python version. Is python installed?" - exit 1 -fi - -[ -z "${pip}" ] && pip="$(which pip 2>/dev/null)" -if [ -z "${pip}" ] -then - echo >&2 "pip command is required to install python v${pv} modules." - [ "${pv}" = "2" ] && echo >&2 "Please install python-pip." - [ "${pv}" = "3" ] && echo >&2 "Please install python3-pip." - exit 1 -fi - -echo >&2 "Working for python version ${pv} (pip command: '${pip}')" -echo >&2 "Installing netdata python modules in: '${target}'" - -run() { - printf "Running command:\n# " - printf "%q " "${@}" - printf "\n" - "${@}" -} - -# try to install all the python modules given as parameters -# until the first that succeeds -failed="" -installed="" -errors=0 -pip_install() { - local ret x msg="${1}" - shift - - echo >&2 - echo >&2 - echo >&2 "Installing one of: ${*}" - - for x in "${@}" - do - echo >&2 - echo >&2 "attempting to install: ${x}" - if [ ! -z "${target}" ] - then - run "${pip}" install --target "${target}" "${x}" - ret=$? - else - run "${pip}" install "${x}" - ret=$? - fi - [ ${ret} -eq 0 ] && break - echo >&2 "failed to install: ${x}. ${msg}" - done - - if [ ${ret} -ne 0 ] - then - echo >&2 - echo >&2 - echo >&2 "FAILED: could not install any of: ${*}. ${msg}" - echo >&2 - echo >&2 - errors=$(( errors + 1 )) - failed="${failed}|${*}" - else - echo >&2 - echo >&2 - echo >&2 "SUCCESS: we have: ${x}" - echo >&2 - echo >&2 - installed="${installed} ${x}" - fi - return ${ret} -} - -if [ "${pv}" = "2" ] -then - pip_install "is libyaml-dev and python-dev installed?" pyyaml - pip_install "is libmysqlclient-dev and python-dev installed?" mysqlclient mysql-python pymysql -else - pip_install "is libyaml-dev and python-dev installed?" pyyaml - pip_install "is libmysqlclient-dev and python-dev installed?" mysql-python mysqlclient pymysql -fi - -echo >&2 -echo >&2 -if [ ${errors} -ne 0 ] -then - echo >&2 "Failed to install ${errors} modules: ${failed}" - if [ ! -z "${target}" ] - then - echo >&2 - echo >&2 "If you are getting errors during cleanup from pip, there is a known bug" - echo >&2 "in certain versions of pip that prevents installing packages local to an" - echo >&2 "application. To install them system-wide please run:" - echo >&2 "$0 --system" - fi - exit 1 -else - echo >&2 "All done. We have: ${installed}" - exit 0 -fi diff --git a/python.d/python_modules/__init__.py b/python.d/python_modules/__init__.py deleted file mode 100644 index 8d1c8b69c..000000000 --- a/python.d/python_modules/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/python.d/python_modules/base.py b/python.d/python_modules/base.py deleted file mode 100644 index 7c6e1d2f2..000000000 --- a/python.d/python_modules/base.py +++ /dev/null @@ -1,9 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: backward compatibility with old version - -from bases.FrameworkServices.SimpleService import SimpleService -from bases.FrameworkServices.UrlService import UrlService -from bases.FrameworkServices.SocketService import SocketService -from bases.FrameworkServices.LogService import LogService -from bases.FrameworkServices.ExecutableService import ExecutableService -from bases.FrameworkServices.MySQLService import MySQLService diff --git a/python.d/python_modules/bases/FrameworkServices/ExecutableService.py b/python.d/python_modules/bases/FrameworkServices/ExecutableService.py deleted file mode 100644 index a71f2bfd2..000000000 --- a/python.d/python_modules/bases/FrameworkServices/ExecutableService.py +++ /dev/null @@ -1,88 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: -# Author: Pawel Krupa (paulfantom) -# Author: Ilya Mashchenko (l2isbad) - -import os - -from subprocess import Popen, PIPE - -from bases.FrameworkServices.SimpleService import SimpleService -from bases.collection import find_binary - - -class ExecutableService(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.command = None - - def _get_raw_data(self, stderr=False): - """ - Get raw data from executed command - :return: <list> - """ - try: - p = Popen(self.command, stdout=PIPE, stderr=PIPE) - except Exception as error: - self.error('Executing command {command} resulted in error: {error}'.format(command=self.command, - error=error)) - return None - data = list() - std = p.stderr if stderr else p.stdout - for line in std: - try: - data.append(line.decode('utf-8')) - except TypeError: - continue - - return data or None - - def check(self): - """ - Parse basic configuration, check if command is whitelisted and is returning values - :return: <boolean> - """ - # Preference: 1. "command" from configuration file 2. "command" from plugin (if specified) - if 'command' in self.configuration: - self.command = self.configuration['command'] - - # "command" must be: 1.not None 2. type <str> - if not (self.command and isinstance(self.command, str)): - self.error('Command is not defined or command type is not <str>') - return False - - # Split "command" into: 1. command <str> 2. options <list> - command, opts = self.command.split()[0], self.command.split()[1:] - - # Check for "bad" symbols in options. No pipes, redirects etc. - opts_list = ['&', '|', ';', '>', '<'] - bad_opts = set(''.join(opts)) & set(opts_list) - if bad_opts: - self.error("Bad command argument(s): {opts}".format(opts=bad_opts)) - return False - - # Find absolute path ('echo' => '/bin/echo') - if '/' not in command: - command = find_binary(command) - if not command: - self.error('Can\'t locate "{command}" binary'.format(command=self.command)) - return False - # Check if binary exist and executable - else: - if not os.access(command, os.X_OK): - self.error('"{binary}" is not executable'.format(binary=command)) - return False - - self.command = [command] + opts if opts else [command] - - try: - data = self._get_data() - except Exception as error: - self.error('_get_data() failed. Command: {command}. Error: {error}'.format(command=self.command, - error=error)) - return False - - if isinstance(data, dict) and data: - return True - self.error('Command "{command}" returned no data'.format(command=self.command)) - return False diff --git a/python.d/python_modules/bases/FrameworkServices/LogService.py b/python.d/python_modules/bases/FrameworkServices/LogService.py deleted file mode 100644 index 45daa2446..000000000 --- a/python.d/python_modules/bases/FrameworkServices/LogService.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: -# Author: Pawel Krupa (paulfantom) - -from glob import glob -import os - -from bases.FrameworkServices.SimpleService import SimpleService - - -class LogService(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.log_path = self.configuration.get('path') - self.__glob_path = self.log_path - self._last_position = 0 - self.__re_find = dict(current=0, run=0, maximum=60) - - def _get_raw_data(self): - """ - Get log lines since last poll - :return: list - """ - lines = list() - try: - if self.__re_find['current'] == self.__re_find['run']: - self._find_recent_log_file() - size = os.path.getsize(self.log_path) - if size == self._last_position: - self.__re_find['current'] += 1 - return list() # return empty list if nothing has changed - elif size < self._last_position: - self._last_position = 0 # read from beginning if file has shrunk - - with open(self.log_path) as fp: - fp.seek(self._last_position) - for line in fp: - lines.append(line) - self._last_position = fp.tell() - self.__re_find['current'] = 0 - except (OSError, IOError) as error: - self.__re_find['current'] += 1 - self.error(str(error)) - - return lines or None - - def _find_recent_log_file(self): - """ - :return: - """ - self.__re_find['run'] = self.__re_find['maximum'] - self.__re_find['current'] = 0 - self.__glob_path = self.__glob_path or self.log_path # workaround for modules w/o config files - path_list = glob(self.__glob_path) - if path_list: - self.log_path = max(path_list) - return True - return False - - def check(self): - """ - Parse basic configuration and check if log file exists - :return: boolean - """ - if not self.log_path: - self.error('No path to log specified') - return None - - if self._find_recent_log_file() and os.access(self.log_path, os.R_OK) and os.path.isfile(self.log_path): - return True - self.error('Cannot access {0}'.format(self.log_path)) - return False - - def create(self): - # set cursor at last byte of log file - self._last_position = os.path.getsize(self.log_path) - status = SimpleService.create(self) - return status diff --git a/python.d/python_modules/bases/FrameworkServices/MySQLService.py b/python.d/python_modules/bases/FrameworkServices/MySQLService.py deleted file mode 100644 index 3acc5b109..000000000 --- a/python.d/python_modules/bases/FrameworkServices/MySQLService.py +++ /dev/null @@ -1,158 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: -# Author: Ilya Mashchenko (l2isbad) - -from sys import exc_info - -try: - import MySQLdb - - PY_MYSQL = True -except ImportError: - try: - import pymysql as MySQLdb - - PY_MYSQL = True - except ImportError: - PY_MYSQL = False - -from bases.FrameworkServices.SimpleService import SimpleService - - -class MySQLService(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.__connection = None - self.__conn_properties = dict() - self.extra_conn_properties = dict() - self.__queries = self.configuration.get('queries', dict()) - self.queries = dict() - - def __connect(self): - try: - connection = MySQLdb.connect(connect_timeout=self.update_every, **self.__conn_properties) - except (MySQLdb.MySQLError, TypeError, AttributeError) as error: - return None, str(error) - else: - return connection, None - - def check(self): - def get_connection_properties(conf, extra_conf): - properties = dict() - if conf.get('user'): - properties['user'] = conf['user'] - if conf.get('pass'): - properties['passwd'] = conf['pass'] - if conf.get('socket'): - properties['unix_socket'] = conf['socket'] - elif conf.get('host'): - properties['host'] = conf['host'] - properties['port'] = int(conf.get('port', 3306)) - elif conf.get('my.cnf'): - if MySQLdb.__name__ == 'pymysql': - self.error('"my.cnf" parsing is not working for pymysql') - else: - properties['read_default_file'] = conf['my.cnf'] - if isinstance(extra_conf, dict) and extra_conf: - properties.update(extra_conf) - - return properties or None - - def is_valid_queries_dict(raw_queries, log_error): - """ - :param raw_queries: dict: - :param log_error: function: - :return: dict or None - - raw_queries is valid when: type <dict> and not empty after is_valid_query(for all queries) - """ - - def is_valid_query(query): - return all([isinstance(query, str), - query.startswith(('SELECT', 'select', 'SHOW', 'show'))]) - - if hasattr(raw_queries, 'keys') and raw_queries: - valid_queries = dict([(n, q) for n, q in raw_queries.items() if is_valid_query(q)]) - bad_queries = set(raw_queries) - set(valid_queries) - - if bad_queries: - log_error('Removed query(s): {queries}'.format(queries=bad_queries)) - return valid_queries - else: - log_error('Unsupported "queries" format. Must be not empty <dict>') - return None - - if not PY_MYSQL: - self.error('MySQLdb or PyMySQL module is needed to use mysql.chart.py plugin') - return False - - # Preference: 1. "queries" from the configuration file 2. "queries" from the module - self.queries = self.__queries or self.queries - # Check if "self.queries" exist, not empty and all queries are in valid format - self.queries = is_valid_queries_dict(self.queries, self.error) - if not self.queries: - return None - - # Get connection properties - self.__conn_properties = get_connection_properties(self.configuration, self.extra_conn_properties) - if not self.__conn_properties: - self.error('Connection properties are missing') - return False - - # Create connection to the database - self.__connection, error = self.__connect() - if error: - self.error('Can\'t establish connection to MySQL: {error}'.format(error=error)) - return False - - try: - data = self._get_data() - except Exception as error: - self.error('_get_data() failed. Error: {error}'.format(error=error)) - return False - - if isinstance(data, dict) and data: - return True - self.error("_get_data() returned no data or type is not <dict>") - return False - - def _get_raw_data(self, description=None): - """ - Get raw data from MySQL server - :return: dict: fetchall() or (fetchall(), description) - """ - - if not self.__connection: - self.__connection, error = self.__connect() - if error: - return None - - raw_data = dict() - queries = dict(self.queries) - try: - with self.__connection as cursor: - for name, query in queries.items(): - try: - cursor.execute(query) - except (MySQLdb.ProgrammingError, MySQLdb.OperationalError) as error: - if self.__is_error_critical(err_class=exc_info()[0], err_text=str(error)): - raise RuntimeError - self.error('Removed query: {name}[{query}]. Error: error'.format(name=name, - query=query, - error=error)) - self.queries.pop(name) - continue - else: - raw_data[name] = (cursor.fetchall(), cursor.description) if description else cursor.fetchall() - self.__connection.commit() - except (MySQLdb.MySQLError, RuntimeError, TypeError, AttributeError): - self.__connection.close() - self.__connection = None - return None - else: - return raw_data or None - - @staticmethod - def __is_error_critical(err_class, err_text): - return err_class == MySQLdb.OperationalError and all(['denied' not in err_text, - 'Unknown column' not in err_text]) diff --git a/python.d/python_modules/bases/FrameworkServices/SimpleService.py b/python.d/python_modules/bases/FrameworkServices/SimpleService.py deleted file mode 100644 index 177332c1f..000000000 --- a/python.d/python_modules/bases/FrameworkServices/SimpleService.py +++ /dev/null @@ -1,262 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: -# Author: Pawel Krupa (paulfantom) -# Author: Ilya Mashchenko (l2isbad) - -from threading import Thread - -try: - from time import sleep, monotonic as time -except ImportError: - from time import sleep, time - -from bases.charts import Charts, ChartError, create_runtime_chart -from bases.collection import OldVersionCompatibility, safe_print -from bases.loggers import PythonDLimitedLogger - -RUNTIME_CHART_UPDATE = 'BEGIN netdata.runtime_{job_name} {since_last}\n' \ - 'SET run_time = {elapsed}\n' \ - 'END\n' - - -class RuntimeCounters: - def __init__(self, configuration): - """ - :param configuration: <dict> - """ - self.FREQ = int(configuration.pop('update_every')) - self.START_RUN = 0 - self.NEXT_RUN = 0 - self.PREV_UPDATE = 0 - self.SINCE_UPDATE = 0 - self.ELAPSED = 0 - self.RETRIES = 0 - self.RETRIES_MAX = configuration.pop('retries') - self.PENALTY = 0 - self.RUNS = 1 - - def is_sleep_time(self): - return self.START_RUN < self.NEXT_RUN - - -class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, object): - """ - Prototype of Service class. - Implemented basic functionality to run jobs by `python.d.plugin` - """ - def __init__(self, configuration, name=''): - """ - :param configuration: <dict> - :param name: <str> - """ - Thread.__init__(self) - self.daemon = True - PythonDLimitedLogger.__init__(self) - OldVersionCompatibility.__init__(self) - self.configuration = configuration - self.order = list() - self.definitions = dict() - - self.module_name = self.__module__ - self.job_name = configuration.pop('job_name') - self.override_name = configuration.pop('override_name') - self.fake_name = None - - self._runtime_counters = RuntimeCounters(configuration=configuration) - self.charts = Charts(job_name=self.actual_name, - priority=configuration.pop('priority'), - cleanup=configuration.pop('chart_cleanup'), - get_update_every=self.get_update_every, - module_name=self.module_name) - - def __repr__(self): - return '<{cls_bases}: {name}>'.format(cls_bases=', '.join(c.__name__ for c in self.__class__.__bases__), - name=self.name) - - @property - def name(self): - if self.job_name: - return '_'.join([self.module_name, self.override_name or self.job_name]) - return self.module_name - - def actual_name(self): - return self.fake_name or self.name - - @property - def runs_counter(self): - return self._runtime_counters.RUNS - - @property - def update_every(self): - return self._runtime_counters.FREQ - - @update_every.setter - def update_every(self, value): - """ - :param value: <int> - :return: - """ - self._runtime_counters.FREQ = value - - def get_update_every(self): - return self.update_every - - def check(self): - """ - check() prototype - :return: boolean - """ - self.debug("job doesn't implement check() method. Using default which simply invokes get_data().") - data = self.get_data() - if data and isinstance(data, dict): - return True - self.debug('returned value is wrong: {0}'.format(data)) - return False - - @create_runtime_chart - def create(self): - for chart_name in self.order: - chart_config = self.definitions.get(chart_name) - - if not chart_config: - self.debug("create() => [NOT ADDED] chart '{chart_name}' not in definitions. " - "Skipping it.".format(chart_name=chart_name)) - continue - - # create chart - chart_params = [chart_name] + chart_config['options'] - try: - self.charts.add_chart(params=chart_params) - except ChartError as error: - self.error("create() => [NOT ADDED] (chart '{chart}': {error})".format(chart=chart_name, - error=error)) - continue - - # add dimensions to chart - for dimension in chart_config['lines']: - try: - self.charts[chart_name].add_dimension(dimension) - except ChartError as error: - self.error("create() => [NOT ADDED] (dimension '{dimension}': {error})".format(dimension=dimension, - error=error)) - continue - - # add variables to chart - if 'variables' in chart_config: - for variable in chart_config['variables']: - try: - self.charts[chart_name].add_variable(variable) - except ChartError as error: - self.error("create() => [NOT ADDED] (variable '{var}': {error})".format(var=variable, - error=error)) - continue - - del self.order - del self.definitions - - # True if job has at least 1 chart else False - return bool(self.charts) - - def run(self): - """ - Runs job in thread. Handles retries. - Exits when job failed or timed out. - :return: None - """ - job = self._runtime_counters - self.debug('started, update frequency: {freq}, ' - 'retries: {retries}'.format(freq=job.FREQ, retries=job.RETRIES_MAX - job.RETRIES)) - - while True: - job.START_RUN = time() - - job.NEXT_RUN = job.START_RUN - (job.START_RUN % job.FREQ) + job.FREQ + job.PENALTY - - self.sleep_until_next_run() - - if job.PREV_UPDATE: - job.SINCE_UPDATE = int((job.START_RUN - job.PREV_UPDATE) * 1e6) - - try: - updated = self.update(interval=job.SINCE_UPDATE) - except Exception as error: - self.error('update() unhandled exception: {error}'.format(error=error)) - updated = False - - job.RUNS += 1 - - if not updated: - if not self.manage_retries(): - return - else: - job.ELAPSED = int((time() - job.START_RUN) * 1e3) - job.PREV_UPDATE = job.START_RUN - job.RETRIES, job.PENALTY = 0, 0 - safe_print(RUNTIME_CHART_UPDATE.format(job_name=self.name, - since_last=job.SINCE_UPDATE, - elapsed=job.ELAPSED)) - self.debug('update => [{status}] (elapsed time: {elapsed}, ' - 'retries left: {retries})'.format(status='OK' if updated else 'FAILED', - elapsed=job.ELAPSED if updated else '-', - retries=job.RETRIES_MAX - job.RETRIES)) - - def update(self, interval): - """ - :return: - """ - data = self.get_data() - if not data: - self.debug('get_data() returned no data') - return False - elif not isinstance(data, dict): - self.debug('get_data() returned incorrect type data') - return False - - updated = False - - for chart in self.charts: - if chart.flags.obsoleted: - if chart.can_be_updated(data): - chart.refresh() - else: - continue - elif self.charts.cleanup and chart.penalty >= self.charts.cleanup: - chart.obsolete() - self.error("chart '{0}' was suppressed due to non updating".format(chart.name)) - continue - - ok = chart.update(data, interval) - if ok: - updated = True - - if not updated: - self.debug('none of the charts has been updated') - - return updated - - def manage_retries(self): - rc = self._runtime_counters - rc.RETRIES += 1 - if rc.RETRIES % 5 == 0: - rc.PENALTY = int(rc.RETRIES * self.update_every / 2) - if rc.RETRIES >= rc.RETRIES_MAX: - self.error('stopped after {0} data collection failures in a row'.format(rc.RETRIES_MAX)) - return False - return True - - def sleep_until_next_run(self): - job = self._runtime_counters - - # sleep() is interruptable - while job.is_sleep_time(): - sleep_time = job.NEXT_RUN - job.START_RUN - self.debug('sleeping for {sleep_time} to reach frequency of {freq} sec'.format(sleep_time=sleep_time, - freq=job.FREQ + job.PENALTY)) - sleep(sleep_time) - job.START_RUN = time() - - def get_data(self): - return self._get_data() - - def _get_data(self): - raise NotImplementedError diff --git a/python.d/python_modules/bases/FrameworkServices/SocketService.py b/python.d/python_modules/bases/FrameworkServices/SocketService.py deleted file mode 100644 index 8d27ae660..000000000 --- a/python.d/python_modules/bases/FrameworkServices/SocketService.py +++ /dev/null @@ -1,261 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: -# Author: Pawel Krupa (paulfantom) - -import socket - -from bases.FrameworkServices.SimpleService import SimpleService - - -class SocketService(SimpleService): - def __init__(self, configuration=None, name=None): - self._sock = None - self._keep_alive = False - self.host = 'localhost' - self.port = None - self.unix_socket = None - self.dgram_socket = False - self.request = '' - self.__socket_config = None - self.__empty_request = "".encode() - SimpleService.__init__(self, configuration=configuration, name=name) - - def _socket_error(self, message=None): - if self.unix_socket is not None: - self.error('unix socket "{socket}": {message}'.format(socket=self.unix_socket, - message=message)) - else: - if self.__socket_config is not None: - af, sock_type, proto, canon_name, sa = self.__socket_config - self.error('socket to "{address}" port {port}: {message}'.format(address=sa[0], - port=sa[1], - message=message)) - else: - self.error('unknown socket: {0}'.format(message)) - - def _connect2socket(self, res=None): - """ - Connect to a socket, passing the result of getaddrinfo() - :return: boolean - """ - if res is None: - res = self.__socket_config - if res is None: - self.error("Cannot create socket to 'None':") - return False - - af, sock_type, proto, canon_name, sa = res - try: - self.debug('Creating socket to "{address}", port {port}'.format(address=sa[0], port=sa[1])) - self._sock = socket.socket(af, sock_type, proto) - except socket.error as error: - self.error('Failed to create socket "{address}", port {port}, error: {error}'.format(address=sa[0], - port=sa[1], - error=error)) - self._sock = None - self.__socket_config = None - return False - - try: - self.debug('connecting socket to "{address}", port {port}'.format(address=sa[0], port=sa[1])) - self._sock.connect(sa) - except socket.error as error: - self.error('Failed to connect to "{address}", port {port}, error: {error}'.format(address=sa[0], - port=sa[1], - error=error)) - self._disconnect() - self.__socket_config = None - return False - - self.debug('connected to "{address}", port {port}'.format(address=sa[0], port=sa[1])) - self.__socket_config = res - return True - - def _connect2unixsocket(self): - """ - Connect to a unix socket, given its filename - :return: boolean - """ - if self.unix_socket is None: - self.error("cannot connect to unix socket 'None'") - return False - - try: - self.debug('attempting DGRAM unix socket "{0}"'.format(self.unix_socket)) - self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) - self._sock.connect(self.unix_socket) - self.debug('connected DGRAM unix socket "{0}"'.format(self.unix_socket)) - return True - except socket.error as error: - self.debug('Failed to connect DGRAM unix socket "{socket}": {error}'.format(socket=self.unix_socket, - error=error)) - - try: - self.debug('attempting STREAM unix socket "{0}"'.format(self.unix_socket)) - self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - self._sock.connect(self.unix_socket) - self.debug('connected STREAM unix socket "{0}"'.format(self.unix_socket)) - return True - except socket.error as error: - self.debug('Failed to connect STREAM unix socket "{socket}": {error}'.format(socket=self.unix_socket, - error=error)) - self._sock = None - return False - - def _connect(self): - """ - Recreate socket and connect to it since sockets cannot be reused after closing - Available configurations are IPv6, IPv4 or UNIX socket - :return: - """ - try: - if self.unix_socket is not None: - self._connect2unixsocket() - - else: - if self.__socket_config is not None: - self._connect2socket() - else: - if self.dgram_socket: - sock_type = socket.SOCK_DGRAM - else: - sock_type = socket.SOCK_STREAM - for res in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, sock_type): - if self._connect2socket(res): - break - - except Exception: - self._sock = None - self.__socket_config = None - - if self._sock is not None: - self._sock.setblocking(0) - self._sock.settimeout(5) - self.debug('set socket timeout to: {0}'.format(self._sock.gettimeout())) - - def _disconnect(self): - """ - Close socket connection - :return: - """ - if self._sock is not None: - try: - self.debug('closing socket') - self._sock.shutdown(2) # 0 - read, 1 - write, 2 - all - self._sock.close() - except Exception: - pass - self._sock = None - - def _send(self): - """ - Send request. - :return: boolean - """ - # Send request if it is needed - if self.request != self.__empty_request: - try: - self.debug('sending request: {0}'.format(self.request)) - self._sock.send(self.request) - except Exception as error: - self._socket_error('error sending request: {0}'.format(error)) - self._disconnect() - return False - return True - - def _receive(self, raw=False): - """ - Receive data from socket - :param raw: set `True` to return bytes - :type raw: bool - :return: decoded str or raw bytes - :rtype: str/bytes - """ - data = "" if not raw else b"" - while True: - self.debug('receiving response') - try: - buf = self._sock.recv(4096) - except Exception as error: - self._socket_error('failed to receive response: {0}'.format(error)) - self._disconnect() - break - - if buf is None or len(buf) == 0: # handle server disconnect - if data == "" or data == b"": - self._socket_error('unexpectedly disconnected') - else: - self.debug('server closed the connection') - self._disconnect() - break - - self.debug('received data') - data += buf.decode('utf-8', 'ignore') if not raw else buf - if self._check_raw_data(data): - break - - self.debug('final response: {0}'.format(data)) - return data - - def _get_raw_data(self, raw=False): - """ - Get raw data with low-level "socket" module. - :param raw: set `True` to return bytes - :type raw: bool - :return: decoded data (str) or raw data (bytes) - :rtype: str/bytes - """ - if self._sock is None: - self._connect() - if self._sock is None: - return None - - # Send request if it is needed - if not self._send(): - return None - - data = self._receive(raw) - - if not self._keep_alive: - self._disconnect() - - return data - - @staticmethod - def _check_raw_data(data): - """ - Check if all data has been gathered from socket - :param data: str - :return: boolean - """ - return bool(data) - - def _parse_config(self): - """ - Parse configuration data - :return: boolean - """ - try: - self.unix_socket = str(self.configuration['socket']) - except (KeyError, TypeError): - self.debug('No unix socket specified. Trying TCP/IP socket.') - self.unix_socket = None - try: - self.host = str(self.configuration['host']) - except (KeyError, TypeError): - self.debug('No host specified. Using: "{0}"'.format(self.host)) - try: - self.port = int(self.configuration['port']) - except (KeyError, TypeError): - self.debug('No port specified. Using: "{0}"'.format(self.port)) - - try: - self.request = str(self.configuration['request']) - except (KeyError, TypeError): - self.debug('No request specified. Using: "{0}"'.format(self.request)) - - self.request = self.request.encode() - - def check(self): - self._parse_config() - return SimpleService.check(self) diff --git a/python.d/python_modules/bases/FrameworkServices/UrlService.py b/python.d/python_modules/bases/FrameworkServices/UrlService.py deleted file mode 100644 index bb340ba3b..000000000 --- a/python.d/python_modules/bases/FrameworkServices/UrlService.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: -# Author: Pawel Krupa (paulfantom) -# Author: Ilya Mashchenko (l2isbad) - -import urllib3 - -from bases.FrameworkServices.SimpleService import SimpleService - -try: - urllib3.disable_warnings() -except AttributeError: - pass - - -class UrlService(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.url = self.configuration.get('url') - self.user = self.configuration.get('user') - self.password = self.configuration.get('pass') - self.proxy_user = self.configuration.get('proxy_user') - self.proxy_password = self.configuration.get('proxy_pass') - self.proxy_url = self.configuration.get('proxy_url') - self.header = self.configuration.get('header') - self.request_timeout = self.configuration.get('timeout', 1) - self._manager = None - - def __make_headers(self, **header_kw): - user = header_kw.get('user') or self.user - password = header_kw.get('pass') or self.password - proxy_user = header_kw.get('proxy_user') or self.proxy_user - proxy_password = header_kw.get('proxy_pass') or self.proxy_password - custom_header = header_kw.get('header') or self.header - header_params = dict(keep_alive=True) - proxy_header_params = dict() - if user and password: - header_params['basic_auth'] = '{user}:{password}'.format(user=user, - password=password) - if proxy_user and proxy_password: - proxy_header_params['proxy_basic_auth'] = '{user}:{password}'.format(user=proxy_user, - password=proxy_password) - try: - header, proxy_header = urllib3.make_headers(**header_params), urllib3.make_headers(**proxy_header_params) - except TypeError as error: - self.error('build_header() error: {error}'.format(error=error)) - return None, None - else: - header.update(custom_header or dict()) - return header, proxy_header - - def _build_manager(self, **header_kw): - header, proxy_header = self.__make_headers(**header_kw) - if header is None or proxy_header is None: - return None - proxy_url = header_kw.get('proxy_url') or self.proxy_url - if proxy_url: - manager = urllib3.ProxyManager - params = dict(proxy_url=proxy_url, headers=header, proxy_headers=proxy_header) - else: - manager = urllib3.PoolManager - params = dict(headers=header) - try: - url = header_kw.get('url') or self.url - if url.startswith('https'): - return manager(assert_hostname=False, cert_reqs='CERT_NONE', **params) - return manager(**params) - except (urllib3.exceptions.ProxySchemeUnknown, TypeError) as error: - self.error('build_manager() error:', str(error)) - return None - - def _get_raw_data(self, url=None, manager=None): - """ - Get raw data from http request - :return: str - """ - try: - status, data = self._get_raw_data_with_status(url, manager) - except (urllib3.exceptions.HTTPError, TypeError, AttributeError) as error: - self.error('Url: {url}. Error: {error}'.format(url=url, error=error)) - return None - - if status == 200: - return data.decode() - else: - self.debug('Url: {url}. Http response status code: {code}'.format(url=url, code=status)) - return None - - def _get_raw_data_with_status(self, url=None, manager=None, retries=1, redirect=True): - """ - Get status and response body content from http request. Does not catch exceptions - :return: int, str - """ - url = url or self.url - manager = manager or self._manager - response = manager.request(method='GET', - url=url, - timeout=self.request_timeout, - retries=retries, - headers=manager.headers, - redirect=redirect) - return response.status, response.data - - def check(self): - """ - Format configuration data and try to connect to server - :return: boolean - """ - if not (self.url and isinstance(self.url, str)): - self.error('URL is not defined or type is not <str>') - return False - - self._manager = self._build_manager() - if not self._manager: - return False - - try: - data = self._get_data() - except Exception as error: - self.error('_get_data() failed. Url: {url}. Error: {error}'.format(url=self.url, error=error)) - return False - - if isinstance(data, dict) and data: - return True - self.error('_get_data() returned no data or type is not <dict>') - return False diff --git a/python.d/python_modules/bases/FrameworkServices/__init__.py b/python.d/python_modules/bases/FrameworkServices/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/python.d/python_modules/bases/__init__.py b/python.d/python_modules/bases/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/python.d/python_modules/bases/charts.py b/python.d/python_modules/bases/charts.py deleted file mode 100644 index 5394fbf64..000000000 --- a/python.d/python_modules/bases/charts.py +++ /dev/null @@ -1,382 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: -# Author: Ilya Mashchenko (l2isbad) - -from bases.collection import safe_print - -CHART_PARAMS = ['type', 'id', 'name', 'title', 'units', 'family', 'context', 'chart_type'] -DIMENSION_PARAMS = ['id', 'name', 'algorithm', 'multiplier', 'divisor', 'hidden'] -VARIABLE_PARAMS = ['id', 'value'] - -CHART_TYPES = ['line', 'area', 'stacked'] -DIMENSION_ALGORITHMS = ['absolute', 'incremental', 'percentage-of-absolute-row', 'percentage-of-incremental-row'] - -CHART_BEGIN = 'BEGIN {type}.{id} {since_last}\n' -CHART_CREATE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \ - "{chart_type} {priority} {update_every} '' 'python.d.plugin' '{module_name}'\n" -CHART_OBSOLETE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \ - "{chart_type} {priority} {update_every} 'obsolete'\n" - - -DIMENSION_CREATE = "DIMENSION '{id}' '{name}' {algorithm} {multiplier} {divisor} '{hidden}'\n" -DIMENSION_SET = "SET '{id}' = {value}\n" - -CHART_VARIABLE_SET = "VARIABLE CHART '{id}' = {value}\n" - -RUNTIME_CHART_CREATE = "CHART netdata.runtime_{job_name} '' 'Execution time for {job_name}' 'ms' 'python.d' " \ - "netdata.pythond_runtime line 145000 {update_every}\n" \ - "DIMENSION run_time 'run time' absolute 1 1\n" - - -def create_runtime_chart(func): - """ - Calls a wrapped function, then prints runtime chart to stdout. - - Used as a decorator for SimpleService.create() method. - The whole point of making 'create runtime chart' functionality as a decorator was - to help users who re-implements create() in theirs classes. - - :param func: class method - :return: - """ - def wrapper(*args, **kwargs): - self = args[0] - ok = func(*args, **kwargs) - if ok: - safe_print(RUNTIME_CHART_CREATE.format(job_name=self.name, - update_every=self._runtime_counters.FREQ)) - return ok - return wrapper - - -class ChartError(Exception): - """Base-class for all exceptions raised by this module""" - - -class DuplicateItemError(ChartError): - """Occurs when user re-adds a chart or a dimension that has already been added""" - - -class ItemTypeError(ChartError): - """Occurs when user passes value of wrong type to Chart, Dimension or ChartVariable class""" - - -class ItemValueError(ChartError): - """Occurs when user passes inappropriate value to Chart, Dimension or ChartVariable class""" - - -class Charts: - """Represent a collection of charts - - All charts stored in a dict. - Chart is a instance of Chart class. - Charts adding must be done using Charts.add_chart() method only""" - def __init__(self, job_name, priority, cleanup, get_update_every, module_name): - """ - :param job_name: <bound method> - :param priority: <int> - :param get_update_every: <bound method> - """ - self.job_name = job_name - self.priority = priority - self.cleanup = cleanup - self.get_update_every = get_update_every - self.module_name = module_name - self.charts = dict() - - def __len__(self): - return len(self.charts) - - def __iter__(self): - return iter(self.charts.values()) - - def __repr__(self): - return 'Charts({0})'.format(self) - - def __str__(self): - return str([chart for chart in self.charts]) - - def __contains__(self, item): - return item in self.charts - - def __getitem__(self, item): - return self.charts[item] - - def __delitem__(self, key): - del self.charts[key] - - def __bool__(self): - return bool(self.charts) - - def __nonzero__(self): - return self.__bool__() - - def add_chart(self, params): - """ - Create Chart instance and add it to the dict - - Manually adds job name, priority and update_every to params. - :param params: <list> - :return: - """ - params = [self.job_name()] + params - new_chart = Chart(params) - - new_chart.params['update_every'] = self.get_update_every() - new_chart.params['priority'] = self.priority - new_chart.params['module_name'] = self.module_name - - self.priority += 1 - self.charts[new_chart.id] = new_chart - - return new_chart - - def active_charts(self): - return [chart.id for chart in self if not chart.flags.obsoleted] - - -class Chart: - """Represent a chart""" - def __init__(self, params): - """ - :param params: <list> - """ - if not isinstance(params, list): - raise ItemTypeError("'chart' must be a list type") - if not len(params) >= 8: - raise ItemValueError("invalid value for 'chart', must be {0}".format(CHART_PARAMS)) - - self.params = dict(zip(CHART_PARAMS, (p or str() for p in params))) - self.name = '{type}.{id}'.format(type=self.params['type'], - id=self.params['id']) - if self.params.get('chart_type') not in CHART_TYPES: - self.params['chart_type'] = 'absolute' - - self.dimensions = list() - self.variables = set() - self.flags = ChartFlags() - self.penalty = 0 - - def __getattr__(self, item): - try: - return self.params[item] - except KeyError: - raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self), - attr=item)) - - def __repr__(self): - return 'Chart({0})'.format(self.id) - - def __str__(self): - return self.id - - def __iter__(self): - return iter(self.dimensions) - - def __contains__(self, item): - return item in [dimension.id for dimension in self.dimensions] - - def add_variable(self, variable): - """ - :param variable: <list> - :return: - """ - self.variables.add(ChartVariable(variable)) - - def add_dimension(self, dimension): - """ - :param dimension: <list> - :return: - """ - dim = Dimension(dimension) - - if dim.id in self: - raise DuplicateItemError("'{dimension}' already in '{chart}' dimensions".format(dimension=dim.id, - chart=self.name)) - self.refresh() - self.dimensions.append(dim) - return dim - - def hide_dimension(self, dimension_id, reverse=False): - if dimension_id in self: - idx = self.dimensions.index(dimension_id) - dimension = self.dimensions[idx] - dimension.params['hidden'] = 'hidden' if not reverse else str() - self.refresh() - - def create(self): - """ - :return: - """ - chart = CHART_CREATE.format(**self.params) - dimensions = ''.join([dimension.create() for dimension in self.dimensions]) - variables = ''.join([var.set(var.value) for var in self.variables if var]) - - self.flags.push = False - self.flags.created = True - - safe_print(chart + dimensions + variables) - - def can_be_updated(self, data): - for dim in self.dimensions: - if dim.get_value(data) is not None: - return True - return False - - def update(self, data, interval): - updated_dimensions, updated_variables = str(), str() - - for dim in self.dimensions: - value = dim.get_value(data) - if value is not None: - updated_dimensions += dim.set(value) - - for var in self.variables: - value = var.get_value(data) - if value is not None: - updated_variables += var.set(value) - - if updated_dimensions: - since_last = interval if self.flags.updated else 0 - - if self.flags.push: - self.create() - - chart_begin = CHART_BEGIN.format(type=self.type, id=self.id, since_last=since_last) - safe_print(chart_begin, updated_dimensions, updated_variables, 'END\n') - - self.flags.updated = True - self.penalty = 0 - else: - self.penalty += 1 - self.flags.updated = False - - return bool(updated_dimensions) - - def obsolete(self): - self.flags.obsoleted = True - if self.flags.created: - safe_print(CHART_OBSOLETE.format(**self.params)) - - def refresh(self): - self.penalty = 0 - self.flags.push = True - self.flags.obsoleted = False - - -class Dimension: - """Represent a dimension""" - def __init__(self, params): - """ - :param params: <list> - """ - if not isinstance(params, list): - raise ItemTypeError("'dimension' must be a list type") - if not params: - raise ItemValueError("invalid value for 'dimension', must be {0}".format(DIMENSION_PARAMS)) - - self.params = dict(zip(DIMENSION_PARAMS, (p or str() for p in params))) - self.params['name'] = self.params.get('name') or self.params['id'] - - if self.params.get('algorithm') not in DIMENSION_ALGORITHMS: - self.params['algorithm'] = 'absolute' - if not isinstance(self.params.get('multiplier'), int): - self.params['multiplier'] = 1 - if not isinstance(self.params.get('divisor'), int): - self.params['divisor'] = 1 - self.params.setdefault('hidden', '') - - def __getattr__(self, item): - try: - return self.params[item] - except KeyError: - raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self), - attr=item)) - - def __repr__(self): - return 'Dimension({0})'.format(self.id) - - def __str__(self): - return self.id - - def __eq__(self, other): - if not isinstance(other, Dimension): - return self.id == other - return self.id == other.id - - def create(self): - return DIMENSION_CREATE.format(**self.params) - - def set(self, value): - """ - :param value: <str>: must be a digit - :return: - """ - return DIMENSION_SET.format(id=self.id, - value=value) - - def get_value(self, data): - try: - return int(data[self.id]) - except (KeyError, TypeError): - return None - - -class ChartVariable: - """Represent a chart variable""" - def __init__(self, params): - """ - :param params: <list> - """ - if not isinstance(params, list): - raise ItemTypeError("'variable' must be a list type") - if not params: - raise ItemValueError("invalid value for 'variable' must be: {0}".format(VARIABLE_PARAMS)) - - self.params = dict(zip(VARIABLE_PARAMS, params)) - self.params.setdefault('value', None) - - def __getattr__(self, item): - try: - return self.params[item] - except KeyError: - raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self), - attr=item)) - - def __bool__(self): - return self.value is not None - - def __nonzero__(self): - return self.__bool__() - - def __repr__(self): - return 'ChartVariable({0})'.format(self.id) - - def __str__(self): - return self.id - - def __eq__(self, other): - if isinstance(other, ChartVariable): - return self.id == other.id - return False - - def __hash__(self): - return hash(repr(self)) - - def set(self, value): - return CHART_VARIABLE_SET.format(id=self.id, - value=value) - - def get_value(self, data): - try: - return int(data[self.id]) - except (KeyError, TypeError): - return None - - -class ChartFlags: - def __init__(self): - self.push = True - self.created = False - self.updated = False - self.obsoleted = False diff --git a/python.d/python_modules/bases/collection.py b/python.d/python_modules/bases/collection.py deleted file mode 100644 index e03b4f58e..000000000 --- a/python.d/python_modules/bases/collection.py +++ /dev/null @@ -1,144 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: -# Author: Ilya Mashchenko (l2isbad) - -import os - -PATH = os.getenv('PATH', '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin').split(':') - -CHART_BEGIN = 'BEGIN {0} {1}\n' -CHART_CREATE = "CHART {0} '{1}' '{2}' '{3}' '{4}' '{5}' {6} {7} {8}\n" -DIMENSION_CREATE = "DIMENSION '{0}' '{1}' {2} {3} {4} '{5}'\n" -DIMENSION_SET = "SET '{0}' = {1}\n" - - -def setdefault_values(config, base_dict): - for key, value in base_dict.items(): - config.setdefault(key, value) - return config - - -def run_and_exit(func): - def wrapper(*args, **kwargs): - func(*args, **kwargs) - exit(1) - return wrapper - - -def on_try_except_finally(on_except=(None, ), on_finally=(None, )): - except_func = on_except[0] - finally_func = on_finally[0] - - def decorator(func): - def wrapper(*args, **kwargs): - try: - func(*args, **kwargs) - except Exception: - if except_func: - except_func(*on_except[1:]) - finally: - if finally_func: - finally_func(*on_finally[1:]) - return wrapper - return decorator - - -def static_vars(**kwargs): - def decorate(func): - for k in kwargs: - setattr(func, k, kwargs[k]) - return func - return decorate - - -@on_try_except_finally(on_except=(exit, 1)) -def safe_print(*msg): - """ - :param msg: - :return: - """ - print(''.join(msg)) - - -def find_binary(binary): - """ - :param binary: <str> - :return: - """ - for directory in PATH: - binary_name = '/'.join([directory, binary]) - if os.path.isfile(binary_name) and os.access(binary_name, os.X_OK): - return binary_name - return None - - -def read_last_line(f): - with open(f, 'rb') as opened: - opened.seek(-2, 2) - while opened.read(1) != b'\n': - opened.seek(-2, 1) - if opened.tell() == 0: - break - result = opened.readline() - return result.decode() - - -class OldVersionCompatibility: - - def __init__(self): - self._data_stream = str() - - def begin(self, type_id, microseconds=0): - """ - :param type_id: <str> - :param microseconds: <str> or <int>: must be a digit - :return: - """ - self._data_stream += CHART_BEGIN.format(type_id, microseconds) - - def set(self, dim_id, value): - """ - :param dim_id: <str> - :param value: <int> or <str>: must be a digit - :return: - """ - self._data_stream += DIMENSION_SET.format(dim_id, value) - - def end(self): - self._data_stream += 'END\n' - - def chart(self, type_id, name='', title='', units='', family='', category='', chart_type='line', - priority='', update_every=''): - """ - :param type_id: <str> - :param name: <str> - :param title: <str> - :param units: <str> - :param family: <str> - :param category: <str> - :param chart_type: <str> - :param priority: <str> or <int> - :param update_every: <str> or <int> - :return: - """ - self._data_stream += CHART_CREATE.format(type_id, name, title, units, - family, category, chart_type, - priority, update_every) - - def dimension(self, dim_id, name=None, algorithm="absolute", multiplier=1, divisor=1, hidden=False): - """ - :param dim_id: <str> - :param name: <str> or None - :param algorithm: <str> - :param multiplier: <str> or <int>: must be a digit - :param divisor: <str> or <int>: must be a digit - :param hidden: <str>: literally "hidden" or "" - :return: - """ - self._data_stream += DIMENSION_CREATE.format(dim_id, name or dim_id, algorithm, - multiplier, divisor, hidden or str()) - - @on_try_except_finally(on_except=(exit, 1)) - def commit(self): - print(self._data_stream) - self._data_stream = str() diff --git a/python.d/python_modules/bases/loaders.py b/python.d/python_modules/bases/loaders.py deleted file mode 100644 index d18b9dcd0..000000000 --- a/python.d/python_modules/bases/loaders.py +++ /dev/null @@ -1,66 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: -# Author: Ilya Mashchenko (l2isbad) - -import types -from sys import version_info - -PY_VERSION = version_info[:2] - -if PY_VERSION > (3, 1): - from pyyaml3 import SafeLoader as YamlSafeLoader - from importlib.machinery import SourceFileLoader - DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map' -else: - from pyyaml2 import SafeLoader as YamlSafeLoader - from imp import load_source as SourceFileLoader - DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map' - -try: - from collections import OrderedDict -except ImportError: - from third_party.ordereddict import OrderedDict - - -def dict_constructor(loader, node): - return OrderedDict(loader.construct_pairs(node)) - - -YamlSafeLoader.add_constructor(DEFAULT_MAPPING_TAG, dict_constructor) - - -class YamlOrderedLoader: - @staticmethod - def load_config_from_file(file_name): - opened, loaded = False, False - try: - stream = open(file_name, 'r') - opened = True - loader = YamlSafeLoader(stream) - loaded = True - parsed = loader.get_single_data() or dict() - except Exception as error: - return dict(), error - else: - return parsed, None - finally: - if opened: - stream.close() - if loaded: - loader.dispose() - - -class SourceLoader: - @staticmethod - def load_module_from_file(name, path): - try: - loaded = SourceFileLoader(name, path) - if isinstance(loaded, types.ModuleType): - return loaded, None - return loaded.load_module(), None - except Exception as error: - return None, error - - -class ModuleAndConfigLoader(YamlOrderedLoader, SourceLoader): - pass diff --git a/python.d/python_modules/bases/loggers.py b/python.d/python_modules/bases/loggers.py deleted file mode 100644 index fc40b83d3..000000000 --- a/python.d/python_modules/bases/loggers.py +++ /dev/null @@ -1,205 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: -# Author: Ilya Mashchenko (l2isbad) - -import logging -import traceback - -from sys import exc_info - -try: - from time import monotonic as time -except ImportError: - from time import time - -from bases.collection import on_try_except_finally - - -LOGGING_LEVELS = {'CRITICAL': 50, - 'ERROR': 40, - 'WARNING': 30, - 'INFO': 20, - 'DEBUG': 10, - 'NOTSET': 0} - -DEFAULT_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s : %(message)s' -DEFAULT_LOG_TIME_FORMAT = '%Y-%m-%d %H:%M:%S' - -PYTHON_D_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s: %(module_name)s: %(job_name)s: %(message)s' -PYTHON_D_LOG_NAME = 'python.d' - - -def limiter(log_max_count=30, allowed_in_seconds=60): - def on_decorator(func): - - def on_call(*args): - current_time = args[0]._runtime_counters.START_RUN - lc = args[0]._logger_counters - - if lc.logged and lc.logged % log_max_count == 0: - if current_time - lc.time_to_compare <= allowed_in_seconds: - lc.dropped += 1 - return - lc.time_to_compare = current_time - - lc.logged += 1 - func(*args) - - return on_call - return on_decorator - - -def add_traceback(func): - def on_call(*args): - self = args[0] - - if not self.log_traceback: - func(*args) - else: - if exc_info()[0]: - func(*args) - func(self, traceback.format_exc()) - else: - func(*args) - - return on_call - - -class LoggerCounters: - def __init__(self): - self.logged = 0 - self.dropped = 0 - self.time_to_compare = time() - - def __repr__(self): - return 'LoggerCounter(logged: {logged}, dropped: {dropped})'.format(logged=self.logged, - dropped=self.dropped) - - -class BaseLogger(object): - def __init__(self, logger_name, log_fmt=DEFAULT_LOG_LINE_FORMAT, date_fmt=DEFAULT_LOG_TIME_FORMAT, - handler=logging.StreamHandler): - """ - :param logger_name: <str> - :param log_fmt: <str> - :param date_fmt: <str> - :param handler: <logging handler> - """ - self.logger = logging.getLogger(logger_name) - if not self.has_handlers(): - self.severity = 'INFO' - self.logger.addHandler(handler()) - self.set_formatter(fmt=log_fmt, date_fmt=date_fmt) - - def __repr__(self): - return '<Logger: {name})>'.format(name=self.logger.name) - - def set_formatter(self, fmt, date_fmt=DEFAULT_LOG_TIME_FORMAT): - """ - :param fmt: <str> - :param date_fmt: <str> - :return: - """ - if self.has_handlers(): - self.logger.handlers[0].setFormatter(logging.Formatter(fmt=fmt, datefmt=date_fmt)) - - def has_handlers(self): - return self.logger.handlers - - @property - def severity(self): - return self.logger.getEffectiveLevel() - - @severity.setter - def severity(self, level): - """ - :param level: <str> or <int> - :return: - """ - if level in LOGGING_LEVELS: - self.logger.setLevel(LOGGING_LEVELS[level]) - - def debug(self, *msg, **kwargs): - self.logger.debug(' '.join(map(str, msg)), **kwargs) - - def info(self, *msg, **kwargs): - self.logger.info(' '.join(map(str, msg)), **kwargs) - - def warning(self, *msg, **kwargs): - self.logger.warning(' '.join(map(str, msg)), **kwargs) - - def error(self, *msg, **kwargs): - self.logger.error(' '.join(map(str, msg)), **kwargs) - - def alert(self, *msg, **kwargs): - self.logger.critical(' '.join(map(str, msg)), **kwargs) - - @on_try_except_finally(on_finally=(exit, 1)) - def fatal(self, *msg, **kwargs): - self.logger.critical(' '.join(map(str, msg)), **kwargs) - - -class PythonDLogger(object): - def __init__(self, logger_name=PYTHON_D_LOG_NAME, log_fmt=PYTHON_D_LOG_LINE_FORMAT): - """ - :param logger_name: <str> - :param log_fmt: <str> - """ - self.logger = BaseLogger(logger_name, log_fmt=log_fmt) - self.module_name = 'plugin' - self.job_name = 'main' - self._logger_counters = LoggerCounters() - - _LOG_TRACEBACK = False - - @property - def log_traceback(self): - return PythonDLogger._LOG_TRACEBACK - - @log_traceback.setter - def log_traceback(self, value): - PythonDLogger._LOG_TRACEBACK = value - - def debug(self, *msg): - self.logger.debug(*msg, extra={'module_name': self.module_name, - 'job_name': self.job_name or self.module_name}) - - def info(self, *msg): - self.logger.info(*msg, extra={'module_name': self.module_name, - 'job_name': self.job_name or self.module_name}) - - def warning(self, *msg): - self.logger.warning(*msg, extra={'module_name': self.module_name, - 'job_name': self.job_name or self.module_name}) - - @add_traceback - def error(self, *msg): - self.logger.error(*msg, extra={'module_name': self.module_name, - 'job_name': self.job_name or self.module_name}) - - @add_traceback - def alert(self, *msg): - self.logger.alert(*msg, extra={'module_name': self.module_name, - 'job_name': self.job_name or self.module_name}) - - def fatal(self, *msg): - self.logger.fatal(*msg, extra={'module_name': self.module_name, - 'job_name': self.job_name or self.module_name}) - - -class PythonDLimitedLogger(PythonDLogger): - @limiter() - def info(self, *msg): - PythonDLogger.info(self, *msg) - - @limiter() - def warning(self, *msg): - PythonDLogger.warning(self, *msg) - - @limiter() - def error(self, *msg): - PythonDLogger.error(self, *msg) - - @limiter() - def alert(self, *msg): - PythonDLogger.alert(self, *msg) diff --git a/python.d/python_modules/pyyaml2/__init__.py b/python.d/python_modules/pyyaml2/__init__.py deleted file mode 100644 index 76e19e13f..000000000 --- a/python.d/python_modules/pyyaml2/__init__.py +++ /dev/null @@ -1,315 +0,0 @@ - -from error import * - -from tokens import * -from events import * -from nodes import * - -from loader import * -from dumper import * - -__version__ = '3.11' - -try: - from cyaml import * - __with_libyaml__ = True -except ImportError: - __with_libyaml__ = False - -def scan(stream, Loader=Loader): - """ - Scan a YAML stream and produce scanning tokens. - """ - loader = Loader(stream) - try: - while loader.check_token(): - yield loader.get_token() - finally: - loader.dispose() - -def parse(stream, Loader=Loader): - """ - Parse a YAML stream and produce parsing events. - """ - loader = Loader(stream) - try: - while loader.check_event(): - yield loader.get_event() - finally: - loader.dispose() - -def compose(stream, Loader=Loader): - """ - Parse the first YAML document in a stream - and produce the corresponding representation tree. - """ - loader = Loader(stream) - try: - return loader.get_single_node() - finally: - loader.dispose() - -def compose_all(stream, Loader=Loader): - """ - Parse all YAML documents in a stream - and produce corresponding representation trees. - """ - loader = Loader(stream) - try: - while loader.check_node(): - yield loader.get_node() - finally: - loader.dispose() - -def load(stream, Loader=Loader): - """ - Parse the first YAML document in a stream - and produce the corresponding Python object. - """ - loader = Loader(stream) - try: - return loader.get_single_data() - finally: - loader.dispose() - -def load_all(stream, Loader=Loader): - """ - Parse all YAML documents in a stream - and produce corresponding Python objects. - """ - loader = Loader(stream) - try: - while loader.check_data(): - yield loader.get_data() - finally: - loader.dispose() - -def safe_load(stream): - """ - Parse the first YAML document in a stream - and produce the corresponding Python object. - Resolve only basic YAML tags. - """ - return load(stream, SafeLoader) - -def safe_load_all(stream): - """ - Parse all YAML documents in a stream - and produce corresponding Python objects. - Resolve only basic YAML tags. - """ - return load_all(stream, SafeLoader) - -def emit(events, stream=None, Dumper=Dumper, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None): - """ - Emit YAML parsing events into a stream. - If stream is None, return the produced string instead. - """ - getvalue = None - if stream is None: - from StringIO import StringIO - stream = StringIO() - getvalue = stream.getvalue - dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break) - try: - for event in events: - dumper.emit(event) - finally: - dumper.dispose() - if getvalue: - return getvalue() - -def serialize_all(nodes, stream=None, Dumper=Dumper, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding='utf-8', explicit_start=None, explicit_end=None, - version=None, tags=None): - """ - Serialize a sequence of representation trees into a YAML stream. - If stream is None, return the produced string instead. - """ - getvalue = None - if stream is None: - if encoding is None: - from StringIO import StringIO - else: - from cStringIO import StringIO - stream = StringIO() - getvalue = stream.getvalue - dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break, - encoding=encoding, version=version, tags=tags, - explicit_start=explicit_start, explicit_end=explicit_end) - try: - dumper.open() - for node in nodes: - dumper.serialize(node) - dumper.close() - finally: - dumper.dispose() - if getvalue: - return getvalue() - -def serialize(node, stream=None, Dumper=Dumper, **kwds): - """ - Serialize a representation tree into a YAML stream. - If stream is None, return the produced string instead. - """ - return serialize_all([node], stream, Dumper=Dumper, **kwds) - -def dump_all(documents, stream=None, Dumper=Dumper, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding='utf-8', explicit_start=None, explicit_end=None, - version=None, tags=None): - """ - Serialize a sequence of Python objects into a YAML stream. - If stream is None, return the produced string instead. - """ - getvalue = None - if stream is None: - if encoding is None: - from StringIO import StringIO - else: - from cStringIO import StringIO - stream = StringIO() - getvalue = stream.getvalue - dumper = Dumper(stream, default_style=default_style, - default_flow_style=default_flow_style, - canonical=canonical, indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break, - encoding=encoding, version=version, tags=tags, - explicit_start=explicit_start, explicit_end=explicit_end) - try: - dumper.open() - for data in documents: - dumper.represent(data) - dumper.close() - finally: - dumper.dispose() - if getvalue: - return getvalue() - -def dump(data, stream=None, Dumper=Dumper, **kwds): - """ - Serialize a Python object into a YAML stream. - If stream is None, return the produced string instead. - """ - return dump_all([data], stream, Dumper=Dumper, **kwds) - -def safe_dump_all(documents, stream=None, **kwds): - """ - Serialize a sequence of Python objects into a YAML stream. - Produce only basic YAML tags. - If stream is None, return the produced string instead. - """ - return dump_all(documents, stream, Dumper=SafeDumper, **kwds) - -def safe_dump(data, stream=None, **kwds): - """ - Serialize a Python object into a YAML stream. - Produce only basic YAML tags. - If stream is None, return the produced string instead. - """ - return dump_all([data], stream, Dumper=SafeDumper, **kwds) - -def add_implicit_resolver(tag, regexp, first=None, - Loader=Loader, Dumper=Dumper): - """ - Add an implicit scalar detector. - If an implicit scalar value matches the given regexp, - the corresponding tag is assigned to the scalar. - first is a sequence of possible initial characters or None. - """ - Loader.add_implicit_resolver(tag, regexp, first) - Dumper.add_implicit_resolver(tag, regexp, first) - -def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): - """ - Add a path based resolver for the given tag. - A path is a list of keys that forms a path - to a node in the representation tree. - Keys can be string values, integers, or None. - """ - Loader.add_path_resolver(tag, path, kind) - Dumper.add_path_resolver(tag, path, kind) - -def add_constructor(tag, constructor, Loader=Loader): - """ - Add a constructor for the given tag. - Constructor is a function that accepts a Loader instance - and a node object and produces the corresponding Python object. - """ - Loader.add_constructor(tag, constructor) - -def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader): - """ - Add a multi-constructor for the given tag prefix. - Multi-constructor is called for a node if its tag starts with tag_prefix. - Multi-constructor accepts a Loader instance, a tag suffix, - and a node object and produces the corresponding Python object. - """ - Loader.add_multi_constructor(tag_prefix, multi_constructor) - -def add_representer(data_type, representer, Dumper=Dumper): - """ - Add a representer for the given type. - Representer is a function accepting a Dumper instance - and an instance of the given data type - and producing the corresponding representation node. - """ - Dumper.add_representer(data_type, representer) - -def add_multi_representer(data_type, multi_representer, Dumper=Dumper): - """ - Add a representer for the given type. - Multi-representer is a function accepting a Dumper instance - and an instance of the given data type or subtype - and producing the corresponding representation node. - """ - Dumper.add_multi_representer(data_type, multi_representer) - -class YAMLObjectMetaclass(type): - """ - The metaclass for YAMLObject. - """ - def __init__(cls, name, bases, kwds): - super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) - if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: - cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) - cls.yaml_dumper.add_representer(cls, cls.to_yaml) - -class YAMLObject(object): - """ - An object that can dump itself to a YAML stream - and load itself from a YAML stream. - """ - - __metaclass__ = YAMLObjectMetaclass - __slots__ = () # no direct instantiation, so allow immutable subclasses - - yaml_loader = Loader - yaml_dumper = Dumper - - yaml_tag = None - yaml_flow_style = None - - def from_yaml(cls, loader, node): - """ - Convert a representation node to a Python object. - """ - return loader.construct_yaml_object(node, cls) - from_yaml = classmethod(from_yaml) - - def to_yaml(cls, dumper, data): - """ - Convert a Python object to a representation node. - """ - return dumper.represent_yaml_object(cls.yaml_tag, data, cls, - flow_style=cls.yaml_flow_style) - to_yaml = classmethod(to_yaml) - diff --git a/python.d/python_modules/pyyaml2/composer.py b/python.d/python_modules/pyyaml2/composer.py deleted file mode 100644 index 06e5ac782..000000000 --- a/python.d/python_modules/pyyaml2/composer.py +++ /dev/null @@ -1,139 +0,0 @@ - -__all__ = ['Composer', 'ComposerError'] - -from error import MarkedYAMLError -from events import * -from nodes import * - -class ComposerError(MarkedYAMLError): - pass - -class Composer(object): - - def __init__(self): - self.anchors = {} - - def check_node(self): - # Drop the STREAM-START event. - if self.check_event(StreamStartEvent): - self.get_event() - - # If there are more documents available? - return not self.check_event(StreamEndEvent) - - def get_node(self): - # Get the root node of the next document. - if not self.check_event(StreamEndEvent): - return self.compose_document() - - def get_single_node(self): - # Drop the STREAM-START event. - self.get_event() - - # Compose a document if the stream is not empty. - document = None - if not self.check_event(StreamEndEvent): - document = self.compose_document() - - # Ensure that the stream contains no more documents. - if not self.check_event(StreamEndEvent): - event = self.get_event() - raise ComposerError("expected a single document in the stream", - document.start_mark, "but found another document", - event.start_mark) - - # Drop the STREAM-END event. - self.get_event() - - return document - - def compose_document(self): - # Drop the DOCUMENT-START event. - self.get_event() - - # Compose the root node. - node = self.compose_node(None, None) - - # Drop the DOCUMENT-END event. - self.get_event() - - self.anchors = {} - return node - - def compose_node(self, parent, index): - if self.check_event(AliasEvent): - event = self.get_event() - anchor = event.anchor - if anchor not in self.anchors: - raise ComposerError(None, None, "found undefined alias %r" - % anchor.encode('utf-8'), event.start_mark) - return self.anchors[anchor] - event = self.peek_event() - anchor = event.anchor - if anchor is not None: - if anchor in self.anchors: - raise ComposerError("found duplicate anchor %r; first occurence" - % anchor.encode('utf-8'), self.anchors[anchor].start_mark, - "second occurence", event.start_mark) - self.descend_resolver(parent, index) - if self.check_event(ScalarEvent): - node = self.compose_scalar_node(anchor) - elif self.check_event(SequenceStartEvent): - node = self.compose_sequence_node(anchor) - elif self.check_event(MappingStartEvent): - node = self.compose_mapping_node(anchor) - self.ascend_resolver() - return node - - def compose_scalar_node(self, anchor): - event = self.get_event() - tag = event.tag - if tag is None or tag == u'!': - tag = self.resolve(ScalarNode, event.value, event.implicit) - node = ScalarNode(tag, event.value, - event.start_mark, event.end_mark, style=event.style) - if anchor is not None: - self.anchors[anchor] = node - return node - - def compose_sequence_node(self, anchor): - start_event = self.get_event() - tag = start_event.tag - if tag is None or tag == u'!': - tag = self.resolve(SequenceNode, None, start_event.implicit) - node = SequenceNode(tag, [], - start_event.start_mark, None, - flow_style=start_event.flow_style) - if anchor is not None: - self.anchors[anchor] = node - index = 0 - while not self.check_event(SequenceEndEvent): - node.value.append(self.compose_node(node, index)) - index += 1 - end_event = self.get_event() - node.end_mark = end_event.end_mark - return node - - def compose_mapping_node(self, anchor): - start_event = self.get_event() - tag = start_event.tag - if tag is None or tag == u'!': - tag = self.resolve(MappingNode, None, start_event.implicit) - node = MappingNode(tag, [], - start_event.start_mark, None, - flow_style=start_event.flow_style) - if anchor is not None: - self.anchors[anchor] = node - while not self.check_event(MappingEndEvent): - #key_event = self.peek_event() - item_key = self.compose_node(node, None) - #if item_key in node.value: - # raise ComposerError("while composing a mapping", start_event.start_mark, - # "found duplicate key", key_event.start_mark) - item_value = self.compose_node(node, item_key) - #node.value[item_key] = item_value - node.value.append((item_key, item_value)) - end_event = self.get_event() - node.end_mark = end_event.end_mark - return node - diff --git a/python.d/python_modules/pyyaml2/constructor.py b/python.d/python_modules/pyyaml2/constructor.py deleted file mode 100644 index 635faac3e..000000000 --- a/python.d/python_modules/pyyaml2/constructor.py +++ /dev/null @@ -1,675 +0,0 @@ - -__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', - 'ConstructorError'] - -from error import * -from nodes import * - -import datetime - -import binascii, re, sys, types - -class ConstructorError(MarkedYAMLError): - pass - -class BaseConstructor(object): - - yaml_constructors = {} - yaml_multi_constructors = {} - - def __init__(self): - self.constructed_objects = {} - self.recursive_objects = {} - self.state_generators = [] - self.deep_construct = False - - def check_data(self): - # If there are more documents available? - return self.check_node() - - def get_data(self): - # Construct and return the next document. - if self.check_node(): - return self.construct_document(self.get_node()) - - def get_single_data(self): - # Ensure that the stream contains a single document and construct it. - node = self.get_single_node() - if node is not None: - return self.construct_document(node) - return None - - def construct_document(self, node): - data = self.construct_object(node) - while self.state_generators: - state_generators = self.state_generators - self.state_generators = [] - for generator in state_generators: - for dummy in generator: - pass - self.constructed_objects = {} - self.recursive_objects = {} - self.deep_construct = False - return data - - def construct_object(self, node, deep=False): - if node in self.constructed_objects: - return self.constructed_objects[node] - if deep: - old_deep = self.deep_construct - self.deep_construct = True - if node in self.recursive_objects: - raise ConstructorError(None, None, - "found unconstructable recursive node", node.start_mark) - self.recursive_objects[node] = None - constructor = None - tag_suffix = None - if node.tag in self.yaml_constructors: - constructor = self.yaml_constructors[node.tag] - else: - for tag_prefix in self.yaml_multi_constructors: - if node.tag.startswith(tag_prefix): - tag_suffix = node.tag[len(tag_prefix):] - constructor = self.yaml_multi_constructors[tag_prefix] - break - else: - if None in self.yaml_multi_constructors: - tag_suffix = node.tag - constructor = self.yaml_multi_constructors[None] - elif None in self.yaml_constructors: - constructor = self.yaml_constructors[None] - elif isinstance(node, ScalarNode): - constructor = self.__class__.construct_scalar - elif isinstance(node, SequenceNode): - constructor = self.__class__.construct_sequence - elif isinstance(node, MappingNode): - constructor = self.__class__.construct_mapping - if tag_suffix is None: - data = constructor(self, node) - else: - data = constructor(self, tag_suffix, node) - if isinstance(data, types.GeneratorType): - generator = data - data = generator.next() - if self.deep_construct: - for dummy in generator: - pass - else: - self.state_generators.append(generator) - self.constructed_objects[node] = data - del self.recursive_objects[node] - if deep: - self.deep_construct = old_deep - return data - - def construct_scalar(self, node): - if not isinstance(node, ScalarNode): - raise ConstructorError(None, None, - "expected a scalar node, but found %s" % node.id, - node.start_mark) - return node.value - - def construct_sequence(self, node, deep=False): - if not isinstance(node, SequenceNode): - raise ConstructorError(None, None, - "expected a sequence node, but found %s" % node.id, - node.start_mark) - return [self.construct_object(child, deep=deep) - for child in node.value] - - def construct_mapping(self, node, deep=False): - if not isinstance(node, MappingNode): - raise ConstructorError(None, None, - "expected a mapping node, but found %s" % node.id, - node.start_mark) - mapping = {} - for key_node, value_node in node.value: - key = self.construct_object(key_node, deep=deep) - try: - hash(key) - except TypeError, exc: - raise ConstructorError("while constructing a mapping", node.start_mark, - "found unacceptable key (%s)" % exc, key_node.start_mark) - value = self.construct_object(value_node, deep=deep) - mapping[key] = value - return mapping - - def construct_pairs(self, node, deep=False): - if not isinstance(node, MappingNode): - raise ConstructorError(None, None, - "expected a mapping node, but found %s" % node.id, - node.start_mark) - pairs = [] - for key_node, value_node in node.value: - key = self.construct_object(key_node, deep=deep) - value = self.construct_object(value_node, deep=deep) - pairs.append((key, value)) - return pairs - - def add_constructor(cls, tag, constructor): - if not 'yaml_constructors' in cls.__dict__: - cls.yaml_constructors = cls.yaml_constructors.copy() - cls.yaml_constructors[tag] = constructor - add_constructor = classmethod(add_constructor) - - def add_multi_constructor(cls, tag_prefix, multi_constructor): - if not 'yaml_multi_constructors' in cls.__dict__: - cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() - cls.yaml_multi_constructors[tag_prefix] = multi_constructor - add_multi_constructor = classmethod(add_multi_constructor) - -class SafeConstructor(BaseConstructor): - - def construct_scalar(self, node): - if isinstance(node, MappingNode): - for key_node, value_node in node.value: - if key_node.tag == u'tag:yaml.org,2002:value': - return self.construct_scalar(value_node) - return BaseConstructor.construct_scalar(self, node) - - def flatten_mapping(self, node): - merge = [] - index = 0 - while index < len(node.value): - key_node, value_node = node.value[index] - if key_node.tag == u'tag:yaml.org,2002:merge': - del node.value[index] - if isinstance(value_node, MappingNode): - self.flatten_mapping(value_node) - merge.extend(value_node.value) - elif isinstance(value_node, SequenceNode): - submerge = [] - for subnode in value_node.value: - if not isinstance(subnode, MappingNode): - raise ConstructorError("while constructing a mapping", - node.start_mark, - "expected a mapping for merging, but found %s" - % subnode.id, subnode.start_mark) - self.flatten_mapping(subnode) - submerge.append(subnode.value) - submerge.reverse() - for value in submerge: - merge.extend(value) - else: - raise ConstructorError("while constructing a mapping", node.start_mark, - "expected a mapping or list of mappings for merging, but found %s" - % value_node.id, value_node.start_mark) - elif key_node.tag == u'tag:yaml.org,2002:value': - key_node.tag = u'tag:yaml.org,2002:str' - index += 1 - else: - index += 1 - if merge: - node.value = merge + node.value - - def construct_mapping(self, node, deep=False): - if isinstance(node, MappingNode): - self.flatten_mapping(node) - return BaseConstructor.construct_mapping(self, node, deep=deep) - - def construct_yaml_null(self, node): - self.construct_scalar(node) - return None - - bool_values = { - u'yes': True, - u'no': False, - u'true': True, - u'false': False, - u'on': True, - u'off': False, - } - - def construct_yaml_bool(self, node): - value = self.construct_scalar(node) - return self.bool_values[value.lower()] - - def construct_yaml_int(self, node): - value = str(self.construct_scalar(node)) - value = value.replace('_', '') - sign = +1 - if value[0] == '-': - sign = -1 - if value[0] in '+-': - value = value[1:] - if value == '0': - return 0 - elif value.startswith('0b'): - return sign*int(value[2:], 2) - elif value.startswith('0x'): - return sign*int(value[2:], 16) - elif value[0] == '0': - return sign*int(value, 8) - elif ':' in value: - digits = [int(part) for part in value.split(':')] - digits.reverse() - base = 1 - value = 0 - for digit in digits: - value += digit*base - base *= 60 - return sign*value - else: - return sign*int(value) - - inf_value = 1e300 - while inf_value != inf_value*inf_value: - inf_value *= inf_value - nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). - - def construct_yaml_float(self, node): - value = str(self.construct_scalar(node)) - value = value.replace('_', '').lower() - sign = +1 - if value[0] == '-': - sign = -1 - if value[0] in '+-': - value = value[1:] - if value == '.inf': - return sign*self.inf_value - elif value == '.nan': - return self.nan_value - elif ':' in value: - digits = [float(part) for part in value.split(':')] - digits.reverse() - base = 1 - value = 0.0 - for digit in digits: - value += digit*base - base *= 60 - return sign*value - else: - return sign*float(value) - - def construct_yaml_binary(self, node): - value = self.construct_scalar(node) - try: - return str(value).decode('base64') - except (binascii.Error, UnicodeEncodeError), exc: - raise ConstructorError(None, None, - "failed to decode base64 data: %s" % exc, node.start_mark) - - timestamp_regexp = re.compile( - ur'''^(?P<year>[0-9][0-9][0-9][0-9]) - -(?P<month>[0-9][0-9]?) - -(?P<day>[0-9][0-9]?) - (?:(?:[Tt]|[ \t]+) - (?P<hour>[0-9][0-9]?) - :(?P<minute>[0-9][0-9]) - :(?P<second>[0-9][0-9]) - (?:\.(?P<fraction>[0-9]*))? - (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?) - (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X) - - def construct_yaml_timestamp(self, node): - value = self.construct_scalar(node) - match = self.timestamp_regexp.match(node.value) - values = match.groupdict() - year = int(values['year']) - month = int(values['month']) - day = int(values['day']) - if not values['hour']: - return datetime.date(year, month, day) - hour = int(values['hour']) - minute = int(values['minute']) - second = int(values['second']) - fraction = 0 - if values['fraction']: - fraction = values['fraction'][:6] - while len(fraction) < 6: - fraction += '0' - fraction = int(fraction) - delta = None - if values['tz_sign']: - tz_hour = int(values['tz_hour']) - tz_minute = int(values['tz_minute'] or 0) - delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) - if values['tz_sign'] == '-': - delta = -delta - data = datetime.datetime(year, month, day, hour, minute, second, fraction) - if delta: - data -= delta - return data - - def construct_yaml_omap(self, node): - # Note: we do not check for duplicate keys, because it's too - # CPU-expensive. - omap = [] - yield omap - if not isinstance(node, SequenceNode): - raise ConstructorError("while constructing an ordered map", node.start_mark, - "expected a sequence, but found %s" % node.id, node.start_mark) - for subnode in node.value: - if not isinstance(subnode, MappingNode): - raise ConstructorError("while constructing an ordered map", node.start_mark, - "expected a mapping of length 1, but found %s" % subnode.id, - subnode.start_mark) - if len(subnode.value) != 1: - raise ConstructorError("while constructing an ordered map", node.start_mark, - "expected a single mapping item, but found %d items" % len(subnode.value), - subnode.start_mark) - key_node, value_node = subnode.value[0] - key = self.construct_object(key_node) - value = self.construct_object(value_node) - omap.append((key, value)) - - def construct_yaml_pairs(self, node): - # Note: the same code as `construct_yaml_omap`. - pairs = [] - yield pairs - if not isinstance(node, SequenceNode): - raise ConstructorError("while constructing pairs", node.start_mark, - "expected a sequence, but found %s" % node.id, node.start_mark) - for subnode in node.value: - if not isinstance(subnode, MappingNode): - raise ConstructorError("while constructing pairs", node.start_mark, - "expected a mapping of length 1, but found %s" % subnode.id, - subnode.start_mark) - if len(subnode.value) != 1: - raise ConstructorError("while constructing pairs", node.start_mark, - "expected a single mapping item, but found %d items" % len(subnode.value), - subnode.start_mark) - key_node, value_node = subnode.value[0] - key = self.construct_object(key_node) - value = self.construct_object(value_node) - pairs.append((key, value)) - - def construct_yaml_set(self, node): - data = set() - yield data - value = self.construct_mapping(node) - data.update(value) - - def construct_yaml_str(self, node): - value = self.construct_scalar(node) - try: - return value.encode('ascii') - except UnicodeEncodeError: - return value - - def construct_yaml_seq(self, node): - data = [] - yield data - data.extend(self.construct_sequence(node)) - - def construct_yaml_map(self, node): - data = {} - yield data - value = self.construct_mapping(node) - data.update(value) - - def construct_yaml_object(self, node, cls): - data = cls.__new__(cls) - yield data - if hasattr(data, '__setstate__'): - state = self.construct_mapping(node, deep=True) - data.__setstate__(state) - else: - state = self.construct_mapping(node) - data.__dict__.update(state) - - def construct_undefined(self, node): - raise ConstructorError(None, None, - "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'), - node.start_mark) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:null', - SafeConstructor.construct_yaml_null) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:bool', - SafeConstructor.construct_yaml_bool) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:int', - SafeConstructor.construct_yaml_int) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:float', - SafeConstructor.construct_yaml_float) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:binary', - SafeConstructor.construct_yaml_binary) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:timestamp', - SafeConstructor.construct_yaml_timestamp) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:omap', - SafeConstructor.construct_yaml_omap) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:pairs', - SafeConstructor.construct_yaml_pairs) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:set', - SafeConstructor.construct_yaml_set) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:str', - SafeConstructor.construct_yaml_str) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:seq', - SafeConstructor.construct_yaml_seq) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:map', - SafeConstructor.construct_yaml_map) - -SafeConstructor.add_constructor(None, - SafeConstructor.construct_undefined) - -class Constructor(SafeConstructor): - - def construct_python_str(self, node): - return self.construct_scalar(node).encode('utf-8') - - def construct_python_unicode(self, node): - return self.construct_scalar(node) - - def construct_python_long(self, node): - return long(self.construct_yaml_int(node)) - - def construct_python_complex(self, node): - return complex(self.construct_scalar(node)) - - def construct_python_tuple(self, node): - return tuple(self.construct_sequence(node)) - - def find_python_module(self, name, mark): - if not name: - raise ConstructorError("while constructing a Python module", mark, - "expected non-empty name appended to the tag", mark) - try: - __import__(name) - except ImportError, exc: - raise ConstructorError("while constructing a Python module", mark, - "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark) - return sys.modules[name] - - def find_python_name(self, name, mark): - if not name: - raise ConstructorError("while constructing a Python object", mark, - "expected non-empty name appended to the tag", mark) - if u'.' in name: - module_name, object_name = name.rsplit('.', 1) - else: - module_name = '__builtin__' - object_name = name - try: - __import__(module_name) - except ImportError, exc: - raise ConstructorError("while constructing a Python object", mark, - "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark) - module = sys.modules[module_name] - if not hasattr(module, object_name): - raise ConstructorError("while constructing a Python object", mark, - "cannot find %r in the module %r" % (object_name.encode('utf-8'), - module.__name__), mark) - return getattr(module, object_name) - - def construct_python_name(self, suffix, node): - value = self.construct_scalar(node) - if value: - raise ConstructorError("while constructing a Python name", node.start_mark, - "expected the empty value, but found %r" % value.encode('utf-8'), - node.start_mark) - return self.find_python_name(suffix, node.start_mark) - - def construct_python_module(self, suffix, node): - value = self.construct_scalar(node) - if value: - raise ConstructorError("while constructing a Python module", node.start_mark, - "expected the empty value, but found %r" % value.encode('utf-8'), - node.start_mark) - return self.find_python_module(suffix, node.start_mark) - - class classobj: pass - - def make_python_instance(self, suffix, node, - args=None, kwds=None, newobj=False): - if not args: - args = [] - if not kwds: - kwds = {} - cls = self.find_python_name(suffix, node.start_mark) - if newobj and isinstance(cls, type(self.classobj)) \ - and not args and not kwds: - instance = self.classobj() - instance.__class__ = cls - return instance - elif newobj and isinstance(cls, type): - return cls.__new__(cls, *args, **kwds) - else: - return cls(*args, **kwds) - - def set_python_instance_state(self, instance, state): - if hasattr(instance, '__setstate__'): - instance.__setstate__(state) - else: - slotstate = {} - if isinstance(state, tuple) and len(state) == 2: - state, slotstate = state - if hasattr(instance, '__dict__'): - instance.__dict__.update(state) - elif state: - slotstate.update(state) - for key, value in slotstate.items(): - setattr(object, key, value) - - def construct_python_object(self, suffix, node): - # Format: - # !!python/object:module.name { ... state ... } - instance = self.make_python_instance(suffix, node, newobj=True) - yield instance - deep = hasattr(instance, '__setstate__') - state = self.construct_mapping(node, deep=deep) - self.set_python_instance_state(instance, state) - - def construct_python_object_apply(self, suffix, node, newobj=False): - # Format: - # !!python/object/apply # (or !!python/object/new) - # args: [ ... arguments ... ] - # kwds: { ... keywords ... } - # state: ... state ... - # listitems: [ ... listitems ... ] - # dictitems: { ... dictitems ... } - # or short format: - # !!python/object/apply [ ... arguments ... ] - # The difference between !!python/object/apply and !!python/object/new - # is how an object is created, check make_python_instance for details. - if isinstance(node, SequenceNode): - args = self.construct_sequence(node, deep=True) - kwds = {} - state = {} - listitems = [] - dictitems = {} - else: - value = self.construct_mapping(node, deep=True) - args = value.get('args', []) - kwds = value.get('kwds', {}) - state = value.get('state', {}) - listitems = value.get('listitems', []) - dictitems = value.get('dictitems', {}) - instance = self.make_python_instance(suffix, node, args, kwds, newobj) - if state: - self.set_python_instance_state(instance, state) - if listitems: - instance.extend(listitems) - if dictitems: - for key in dictitems: - instance[key] = dictitems[key] - return instance - - def construct_python_object_new(self, suffix, node): - return self.construct_python_object_apply(suffix, node, newobj=True) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/none', - Constructor.construct_yaml_null) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/bool', - Constructor.construct_yaml_bool) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/str', - Constructor.construct_python_str) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/unicode', - Constructor.construct_python_unicode) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/int', - Constructor.construct_yaml_int) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/long', - Constructor.construct_python_long) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/float', - Constructor.construct_yaml_float) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/complex', - Constructor.construct_python_complex) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/list', - Constructor.construct_yaml_seq) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/tuple', - Constructor.construct_python_tuple) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/dict', - Constructor.construct_yaml_map) - -Constructor.add_multi_constructor( - u'tag:yaml.org,2002:python/name:', - Constructor.construct_python_name) - -Constructor.add_multi_constructor( - u'tag:yaml.org,2002:python/module:', - Constructor.construct_python_module) - -Constructor.add_multi_constructor( - u'tag:yaml.org,2002:python/object:', - Constructor.construct_python_object) - -Constructor.add_multi_constructor( - u'tag:yaml.org,2002:python/object/apply:', - Constructor.construct_python_object_apply) - -Constructor.add_multi_constructor( - u'tag:yaml.org,2002:python/object/new:', - Constructor.construct_python_object_new) - diff --git a/python.d/python_modules/pyyaml2/cyaml.py b/python.d/python_modules/pyyaml2/cyaml.py deleted file mode 100644 index 68dcd7519..000000000 --- a/python.d/python_modules/pyyaml2/cyaml.py +++ /dev/null @@ -1,85 +0,0 @@ - -__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', - 'CBaseDumper', 'CSafeDumper', 'CDumper'] - -from _yaml import CParser, CEmitter - -from constructor import * - -from serializer import * -from representer import * - -from resolver import * - -class CBaseLoader(CParser, BaseConstructor, BaseResolver): - - def __init__(self, stream): - CParser.__init__(self, stream) - BaseConstructor.__init__(self) - BaseResolver.__init__(self) - -class CSafeLoader(CParser, SafeConstructor, Resolver): - - def __init__(self, stream): - CParser.__init__(self, stream) - SafeConstructor.__init__(self) - Resolver.__init__(self) - -class CLoader(CParser, Constructor, Resolver): - - def __init__(self, stream): - CParser.__init__(self, stream) - Constructor.__init__(self) - Resolver.__init__(self) - -class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - CEmitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, encoding=encoding, - allow_unicode=allow_unicode, line_break=line_break, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - Representer.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - -class CSafeDumper(CEmitter, SafeRepresenter, Resolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - CEmitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, encoding=encoding, - allow_unicode=allow_unicode, line_break=line_break, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - SafeRepresenter.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - -class CDumper(CEmitter, Serializer, Representer, Resolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - CEmitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, encoding=encoding, - allow_unicode=allow_unicode, line_break=line_break, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - Representer.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - diff --git a/python.d/python_modules/pyyaml2/dumper.py b/python.d/python_modules/pyyaml2/dumper.py deleted file mode 100644 index f811d2c91..000000000 --- a/python.d/python_modules/pyyaml2/dumper.py +++ /dev/null @@ -1,62 +0,0 @@ - -__all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] - -from emitter import * -from serializer import * -from representer import * -from resolver import * - -class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - Emitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break) - Serializer.__init__(self, encoding=encoding, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - Representer.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - -class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - Emitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break) - Serializer.__init__(self, encoding=encoding, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - SafeRepresenter.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - -class Dumper(Emitter, Serializer, Representer, Resolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - Emitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break) - Serializer.__init__(self, encoding=encoding, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - Representer.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - diff --git a/python.d/python_modules/pyyaml2/emitter.py b/python.d/python_modules/pyyaml2/emitter.py deleted file mode 100644 index e5bcdcccb..000000000 --- a/python.d/python_modules/pyyaml2/emitter.py +++ /dev/null @@ -1,1140 +0,0 @@ - -# Emitter expects events obeying the following grammar: -# stream ::= STREAM-START document* STREAM-END -# document ::= DOCUMENT-START node DOCUMENT-END -# node ::= SCALAR | sequence | mapping -# sequence ::= SEQUENCE-START node* SEQUENCE-END -# mapping ::= MAPPING-START (node node)* MAPPING-END - -__all__ = ['Emitter', 'EmitterError'] - -from error import YAMLError -from events import * - -class EmitterError(YAMLError): - pass - -class ScalarAnalysis(object): - def __init__(self, scalar, empty, multiline, - allow_flow_plain, allow_block_plain, - allow_single_quoted, allow_double_quoted, - allow_block): - self.scalar = scalar - self.empty = empty - self.multiline = multiline - self.allow_flow_plain = allow_flow_plain - self.allow_block_plain = allow_block_plain - self.allow_single_quoted = allow_single_quoted - self.allow_double_quoted = allow_double_quoted - self.allow_block = allow_block - -class Emitter(object): - - DEFAULT_TAG_PREFIXES = { - u'!' : u'!', - u'tag:yaml.org,2002:' : u'!!', - } - - def __init__(self, stream, canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None): - - # The stream should have the methods `write` and possibly `flush`. - self.stream = stream - - # Encoding can be overriden by STREAM-START. - self.encoding = None - - # Emitter is a state machine with a stack of states to handle nested - # structures. - self.states = [] - self.state = self.expect_stream_start - - # Current event and the event queue. - self.events = [] - self.event = None - - # The current indentation level and the stack of previous indents. - self.indents = [] - self.indent = None - - # Flow level. - self.flow_level = 0 - - # Contexts. - self.root_context = False - self.sequence_context = False - self.mapping_context = False - self.simple_key_context = False - - # Characteristics of the last emitted character: - # - current position. - # - is it a whitespace? - # - is it an indention character - # (indentation space, '-', '?', or ':')? - self.line = 0 - self.column = 0 - self.whitespace = True - self.indention = True - - # Whether the document requires an explicit document indicator - self.open_ended = False - - # Formatting details. - self.canonical = canonical - self.allow_unicode = allow_unicode - self.best_indent = 2 - if indent and 1 < indent < 10: - self.best_indent = indent - self.best_width = 80 - if width and width > self.best_indent*2: - self.best_width = width - self.best_line_break = u'\n' - if line_break in [u'\r', u'\n', u'\r\n']: - self.best_line_break = line_break - - # Tag prefixes. - self.tag_prefixes = None - - # Prepared anchor and tag. - self.prepared_anchor = None - self.prepared_tag = None - - # Scalar analysis and style. - self.analysis = None - self.style = None - - def dispose(self): - # Reset the state attributes (to clear self-references) - self.states = [] - self.state = None - - def emit(self, event): - self.events.append(event) - while not self.need_more_events(): - self.event = self.events.pop(0) - self.state() - self.event = None - - # In some cases, we wait for a few next events before emitting. - - def need_more_events(self): - if not self.events: - return True - event = self.events[0] - if isinstance(event, DocumentStartEvent): - return self.need_events(1) - elif isinstance(event, SequenceStartEvent): - return self.need_events(2) - elif isinstance(event, MappingStartEvent): - return self.need_events(3) - else: - return False - - def need_events(self, count): - level = 0 - for event in self.events[1:]: - if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): - level += 1 - elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): - level -= 1 - elif isinstance(event, StreamEndEvent): - level = -1 - if level < 0: - return False - return (len(self.events) < count+1) - - def increase_indent(self, flow=False, indentless=False): - self.indents.append(self.indent) - if self.indent is None: - if flow: - self.indent = self.best_indent - else: - self.indent = 0 - elif not indentless: - self.indent += self.best_indent - - # States. - - # Stream handlers. - - def expect_stream_start(self): - if isinstance(self.event, StreamStartEvent): - if self.event.encoding and not getattr(self.stream, 'encoding', None): - self.encoding = self.event.encoding - self.write_stream_start() - self.state = self.expect_first_document_start - else: - raise EmitterError("expected StreamStartEvent, but got %s" - % self.event) - - def expect_nothing(self): - raise EmitterError("expected nothing, but got %s" % self.event) - - # Document handlers. - - def expect_first_document_start(self): - return self.expect_document_start(first=True) - - def expect_document_start(self, first=False): - if isinstance(self.event, DocumentStartEvent): - if (self.event.version or self.event.tags) and self.open_ended: - self.write_indicator(u'...', True) - self.write_indent() - if self.event.version: - version_text = self.prepare_version(self.event.version) - self.write_version_directive(version_text) - self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() - if self.event.tags: - handles = self.event.tags.keys() - handles.sort() - for handle in handles: - prefix = self.event.tags[handle] - self.tag_prefixes[prefix] = handle - handle_text = self.prepare_tag_handle(handle) - prefix_text = self.prepare_tag_prefix(prefix) - self.write_tag_directive(handle_text, prefix_text) - implicit = (first and not self.event.explicit and not self.canonical - and not self.event.version and not self.event.tags - and not self.check_empty_document()) - if not implicit: - self.write_indent() - self.write_indicator(u'---', True) - if self.canonical: - self.write_indent() - self.state = self.expect_document_root - elif isinstance(self.event, StreamEndEvent): - if self.open_ended: - self.write_indicator(u'...', True) - self.write_indent() - self.write_stream_end() - self.state = self.expect_nothing - else: - raise EmitterError("expected DocumentStartEvent, but got %s" - % self.event) - - def expect_document_end(self): - if isinstance(self.event, DocumentEndEvent): - self.write_indent() - if self.event.explicit: - self.write_indicator(u'...', True) - self.write_indent() - self.flush_stream() - self.state = self.expect_document_start - else: - raise EmitterError("expected DocumentEndEvent, but got %s" - % self.event) - - def expect_document_root(self): - self.states.append(self.expect_document_end) - self.expect_node(root=True) - - # Node handlers. - - def expect_node(self, root=False, sequence=False, mapping=False, - simple_key=False): - self.root_context = root - self.sequence_context = sequence - self.mapping_context = mapping - self.simple_key_context = simple_key - if isinstance(self.event, AliasEvent): - self.expect_alias() - elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): - self.process_anchor(u'&') - self.process_tag() - if isinstance(self.event, ScalarEvent): - self.expect_scalar() - elif isinstance(self.event, SequenceStartEvent): - if self.flow_level or self.canonical or self.event.flow_style \ - or self.check_empty_sequence(): - self.expect_flow_sequence() - else: - self.expect_block_sequence() - elif isinstance(self.event, MappingStartEvent): - if self.flow_level or self.canonical or self.event.flow_style \ - or self.check_empty_mapping(): - self.expect_flow_mapping() - else: - self.expect_block_mapping() - else: - raise EmitterError("expected NodeEvent, but got %s" % self.event) - - def expect_alias(self): - if self.event.anchor is None: - raise EmitterError("anchor is not specified for alias") - self.process_anchor(u'*') - self.state = self.states.pop() - - def expect_scalar(self): - self.increase_indent(flow=True) - self.process_scalar() - self.indent = self.indents.pop() - self.state = self.states.pop() - - # Flow sequence handlers. - - def expect_flow_sequence(self): - self.write_indicator(u'[', True, whitespace=True) - self.flow_level += 1 - self.increase_indent(flow=True) - self.state = self.expect_first_flow_sequence_item - - def expect_first_flow_sequence_item(self): - if isinstance(self.event, SequenceEndEvent): - self.indent = self.indents.pop() - self.flow_level -= 1 - self.write_indicator(u']', False) - self.state = self.states.pop() - else: - if self.canonical or self.column > self.best_width: - self.write_indent() - self.states.append(self.expect_flow_sequence_item) - self.expect_node(sequence=True) - - def expect_flow_sequence_item(self): - if isinstance(self.event, SequenceEndEvent): - self.indent = self.indents.pop() - self.flow_level -= 1 - if self.canonical: - self.write_indicator(u',', False) - self.write_indent() - self.write_indicator(u']', False) - self.state = self.states.pop() - else: - self.write_indicator(u',', False) - if self.canonical or self.column > self.best_width: - self.write_indent() - self.states.append(self.expect_flow_sequence_item) - self.expect_node(sequence=True) - - # Flow mapping handlers. - - def expect_flow_mapping(self): - self.write_indicator(u'{', True, whitespace=True) - self.flow_level += 1 - self.increase_indent(flow=True) - self.state = self.expect_first_flow_mapping_key - - def expect_first_flow_mapping_key(self): - if isinstance(self.event, MappingEndEvent): - self.indent = self.indents.pop() - self.flow_level -= 1 - self.write_indicator(u'}', False) - self.state = self.states.pop() - else: - if self.canonical or self.column > self.best_width: - self.write_indent() - if not self.canonical and self.check_simple_key(): - self.states.append(self.expect_flow_mapping_simple_value) - self.expect_node(mapping=True, simple_key=True) - else: - self.write_indicator(u'?', True) - self.states.append(self.expect_flow_mapping_value) - self.expect_node(mapping=True) - - def expect_flow_mapping_key(self): - if isinstance(self.event, MappingEndEvent): - self.indent = self.indents.pop() - self.flow_level -= 1 - if self.canonical: - self.write_indicator(u',', False) - self.write_indent() - self.write_indicator(u'}', False) - self.state = self.states.pop() - else: - self.write_indicator(u',', False) - if self.canonical or self.column > self.best_width: - self.write_indent() - if not self.canonical and self.check_simple_key(): - self.states.append(self.expect_flow_mapping_simple_value) - self.expect_node(mapping=True, simple_key=True) - else: - self.write_indicator(u'?', True) - self.states.append(self.expect_flow_mapping_value) - self.expect_node(mapping=True) - - def expect_flow_mapping_simple_value(self): - self.write_indicator(u':', False) - self.states.append(self.expect_flow_mapping_key) - self.expect_node(mapping=True) - - def expect_flow_mapping_value(self): - if self.canonical or self.column > self.best_width: - self.write_indent() - self.write_indicator(u':', True) - self.states.append(self.expect_flow_mapping_key) - self.expect_node(mapping=True) - - # Block sequence handlers. - - def expect_block_sequence(self): - indentless = (self.mapping_context and not self.indention) - self.increase_indent(flow=False, indentless=indentless) - self.state = self.expect_first_block_sequence_item - - def expect_first_block_sequence_item(self): - return self.expect_block_sequence_item(first=True) - - def expect_block_sequence_item(self, first=False): - if not first and isinstance(self.event, SequenceEndEvent): - self.indent = self.indents.pop() - self.state = self.states.pop() - else: - self.write_indent() - self.write_indicator(u'-', True, indention=True) - self.states.append(self.expect_block_sequence_item) - self.expect_node(sequence=True) - - # Block mapping handlers. - - def expect_block_mapping(self): - self.increase_indent(flow=False) - self.state = self.expect_first_block_mapping_key - - def expect_first_block_mapping_key(self): - return self.expect_block_mapping_key(first=True) - - def expect_block_mapping_key(self, first=False): - if not first and isinstance(self.event, MappingEndEvent): - self.indent = self.indents.pop() - self.state = self.states.pop() - else: - self.write_indent() - if self.check_simple_key(): - self.states.append(self.expect_block_mapping_simple_value) - self.expect_node(mapping=True, simple_key=True) - else: - self.write_indicator(u'?', True, indention=True) - self.states.append(self.expect_block_mapping_value) - self.expect_node(mapping=True) - - def expect_block_mapping_simple_value(self): - self.write_indicator(u':', False) - self.states.append(self.expect_block_mapping_key) - self.expect_node(mapping=True) - - def expect_block_mapping_value(self): - self.write_indent() - self.write_indicator(u':', True, indention=True) - self.states.append(self.expect_block_mapping_key) - self.expect_node(mapping=True) - - # Checkers. - - def check_empty_sequence(self): - return (isinstance(self.event, SequenceStartEvent) and self.events - and isinstance(self.events[0], SequenceEndEvent)) - - def check_empty_mapping(self): - return (isinstance(self.event, MappingStartEvent) and self.events - and isinstance(self.events[0], MappingEndEvent)) - - def check_empty_document(self): - if not isinstance(self.event, DocumentStartEvent) or not self.events: - return False - event = self.events[0] - return (isinstance(event, ScalarEvent) and event.anchor is None - and event.tag is None and event.implicit and event.value == u'') - - def check_simple_key(self): - length = 0 - if isinstance(self.event, NodeEvent) and self.event.anchor is not None: - if self.prepared_anchor is None: - self.prepared_anchor = self.prepare_anchor(self.event.anchor) - length += len(self.prepared_anchor) - if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ - and self.event.tag is not None: - if self.prepared_tag is None: - self.prepared_tag = self.prepare_tag(self.event.tag) - length += len(self.prepared_tag) - if isinstance(self.event, ScalarEvent): - if self.analysis is None: - self.analysis = self.analyze_scalar(self.event.value) - length += len(self.analysis.scalar) - return (length < 128 and (isinstance(self.event, AliasEvent) - or (isinstance(self.event, ScalarEvent) - and not self.analysis.empty and not self.analysis.multiline) - or self.check_empty_sequence() or self.check_empty_mapping())) - - # Anchor, Tag, and Scalar processors. - - def process_anchor(self, indicator): - if self.event.anchor is None: - self.prepared_anchor = None - return - if self.prepared_anchor is None: - self.prepared_anchor = self.prepare_anchor(self.event.anchor) - if self.prepared_anchor: - self.write_indicator(indicator+self.prepared_anchor, True) - self.prepared_anchor = None - - def process_tag(self): - tag = self.event.tag - if isinstance(self.event, ScalarEvent): - if self.style is None: - self.style = self.choose_scalar_style() - if ((not self.canonical or tag is None) and - ((self.style == '' and self.event.implicit[0]) - or (self.style != '' and self.event.implicit[1]))): - self.prepared_tag = None - return - if self.event.implicit[0] and tag is None: - tag = u'!' - self.prepared_tag = None - else: - if (not self.canonical or tag is None) and self.event.implicit: - self.prepared_tag = None - return - if tag is None: - raise EmitterError("tag is not specified") - if self.prepared_tag is None: - self.prepared_tag = self.prepare_tag(tag) - if self.prepared_tag: - self.write_indicator(self.prepared_tag, True) - self.prepared_tag = None - - def choose_scalar_style(self): - if self.analysis is None: - self.analysis = self.analyze_scalar(self.event.value) - if self.event.style == '"' or self.canonical: - return '"' - if not self.event.style and self.event.implicit[0]: - if (not (self.simple_key_context and - (self.analysis.empty or self.analysis.multiline)) - and (self.flow_level and self.analysis.allow_flow_plain - or (not self.flow_level and self.analysis.allow_block_plain))): - return '' - if self.event.style and self.event.style in '|>': - if (not self.flow_level and not self.simple_key_context - and self.analysis.allow_block): - return self.event.style - if not self.event.style or self.event.style == '\'': - if (self.analysis.allow_single_quoted and - not (self.simple_key_context and self.analysis.multiline)): - return '\'' - return '"' - - def process_scalar(self): - if self.analysis is None: - self.analysis = self.analyze_scalar(self.event.value) - if self.style is None: - self.style = self.choose_scalar_style() - split = (not self.simple_key_context) - #if self.analysis.multiline and split \ - # and (not self.style or self.style in '\'\"'): - # self.write_indent() - if self.style == '"': - self.write_double_quoted(self.analysis.scalar, split) - elif self.style == '\'': - self.write_single_quoted(self.analysis.scalar, split) - elif self.style == '>': - self.write_folded(self.analysis.scalar) - elif self.style == '|': - self.write_literal(self.analysis.scalar) - else: - self.write_plain(self.analysis.scalar, split) - self.analysis = None - self.style = None - - # Analyzers. - - def prepare_version(self, version): - major, minor = version - if major != 1: - raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) - return u'%d.%d' % (major, minor) - - def prepare_tag_handle(self, handle): - if not handle: - raise EmitterError("tag handle must not be empty") - if handle[0] != u'!' or handle[-1] != u'!': - raise EmitterError("tag handle must start and end with '!': %r" - % (handle.encode('utf-8'))) - for ch in handle[1:-1]: - if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ - or ch in u'-_'): - raise EmitterError("invalid character %r in the tag handle: %r" - % (ch.encode('utf-8'), handle.encode('utf-8'))) - return handle - - def prepare_tag_prefix(self, prefix): - if not prefix: - raise EmitterError("tag prefix must not be empty") - chunks = [] - start = end = 0 - if prefix[0] == u'!': - end = 1 - while end < len(prefix): - ch = prefix[end] - if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ - or ch in u'-;/?!:@&=+$,_.~*\'()[]': - end += 1 - else: - if start < end: - chunks.append(prefix[start:end]) - start = end = end+1 - data = ch.encode('utf-8') - for ch in data: - chunks.append(u'%%%02X' % ord(ch)) - if start < end: - chunks.append(prefix[start:end]) - return u''.join(chunks) - - def prepare_tag(self, tag): - if not tag: - raise EmitterError("tag must not be empty") - if tag == u'!': - return tag - handle = None - suffix = tag - prefixes = self.tag_prefixes.keys() - prefixes.sort() - for prefix in prefixes: - if tag.startswith(prefix) \ - and (prefix == u'!' or len(prefix) < len(tag)): - handle = self.tag_prefixes[prefix] - suffix = tag[len(prefix):] - chunks = [] - start = end = 0 - while end < len(suffix): - ch = suffix[end] - if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ - or ch in u'-;/?:@&=+$,_.~*\'()[]' \ - or (ch == u'!' and handle != u'!'): - end += 1 - else: - if start < end: - chunks.append(suffix[start:end]) - start = end = end+1 - data = ch.encode('utf-8') - for ch in data: - chunks.append(u'%%%02X' % ord(ch)) - if start < end: - chunks.append(suffix[start:end]) - suffix_text = u''.join(chunks) - if handle: - return u'%s%s' % (handle, suffix_text) - else: - return u'!<%s>' % suffix_text - - def prepare_anchor(self, anchor): - if not anchor: - raise EmitterError("anchor must not be empty") - for ch in anchor: - if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ - or ch in u'-_'): - raise EmitterError("invalid character %r in the anchor: %r" - % (ch.encode('utf-8'), anchor.encode('utf-8'))) - return anchor - - def analyze_scalar(self, scalar): - - # Empty scalar is a special case. - if not scalar: - return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, - allow_flow_plain=False, allow_block_plain=True, - allow_single_quoted=True, allow_double_quoted=True, - allow_block=False) - - # Indicators and special characters. - block_indicators = False - flow_indicators = False - line_breaks = False - special_characters = False - - # Important whitespace combinations. - leading_space = False - leading_break = False - trailing_space = False - trailing_break = False - break_space = False - space_break = False - - # Check document indicators. - if scalar.startswith(u'---') or scalar.startswith(u'...'): - block_indicators = True - flow_indicators = True - - # First character or preceded by a whitespace. - preceeded_by_whitespace = True - - # Last character or followed by a whitespace. - followed_by_whitespace = (len(scalar) == 1 or - scalar[1] in u'\0 \t\r\n\x85\u2028\u2029') - - # The previous character is a space. - previous_space = False - - # The previous character is a break. - previous_break = False - - index = 0 - while index < len(scalar): - ch = scalar[index] - - # Check for indicators. - if index == 0: - # Leading indicators are special characters. - if ch in u'#,[]{}&*!|>\'\"%@`': - flow_indicators = True - block_indicators = True - if ch in u'?:': - flow_indicators = True - if followed_by_whitespace: - block_indicators = True - if ch == u'-' and followed_by_whitespace: - flow_indicators = True - block_indicators = True - else: - # Some indicators cannot appear within a scalar as well. - if ch in u',?[]{}': - flow_indicators = True - if ch == u':': - flow_indicators = True - if followed_by_whitespace: - block_indicators = True - if ch == u'#' and preceeded_by_whitespace: - flow_indicators = True - block_indicators = True - - # Check for line breaks, special, and unicode characters. - if ch in u'\n\x85\u2028\u2029': - line_breaks = True - if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'): - if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF' - or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF': - unicode_characters = True - if not self.allow_unicode: - special_characters = True - else: - special_characters = True - - # Detect important whitespace combinations. - if ch == u' ': - if index == 0: - leading_space = True - if index == len(scalar)-1: - trailing_space = True - if previous_break: - break_space = True - previous_space = True - previous_break = False - elif ch in u'\n\x85\u2028\u2029': - if index == 0: - leading_break = True - if index == len(scalar)-1: - trailing_break = True - if previous_space: - space_break = True - previous_space = False - previous_break = True - else: - previous_space = False - previous_break = False - - # Prepare for the next character. - index += 1 - preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029') - followed_by_whitespace = (index+1 >= len(scalar) or - scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029') - - # Let's decide what styles are allowed. - allow_flow_plain = True - allow_block_plain = True - allow_single_quoted = True - allow_double_quoted = True - allow_block = True - - # Leading and trailing whitespaces are bad for plain scalars. - if (leading_space or leading_break - or trailing_space or trailing_break): - allow_flow_plain = allow_block_plain = False - - # We do not permit trailing spaces for block scalars. - if trailing_space: - allow_block = False - - # Spaces at the beginning of a new line are only acceptable for block - # scalars. - if break_space: - allow_flow_plain = allow_block_plain = allow_single_quoted = False - - # Spaces followed by breaks, as well as special character are only - # allowed for double quoted scalars. - if space_break or special_characters: - allow_flow_plain = allow_block_plain = \ - allow_single_quoted = allow_block = False - - # Although the plain scalar writer supports breaks, we never emit - # multiline plain scalars. - if line_breaks: - allow_flow_plain = allow_block_plain = False - - # Flow indicators are forbidden for flow plain scalars. - if flow_indicators: - allow_flow_plain = False - - # Block indicators are forbidden for block plain scalars. - if block_indicators: - allow_block_plain = False - - return ScalarAnalysis(scalar=scalar, - empty=False, multiline=line_breaks, - allow_flow_plain=allow_flow_plain, - allow_block_plain=allow_block_plain, - allow_single_quoted=allow_single_quoted, - allow_double_quoted=allow_double_quoted, - allow_block=allow_block) - - # Writers. - - def flush_stream(self): - if hasattr(self.stream, 'flush'): - self.stream.flush() - - def write_stream_start(self): - # Write BOM if needed. - if self.encoding and self.encoding.startswith('utf-16'): - self.stream.write(u'\uFEFF'.encode(self.encoding)) - - def write_stream_end(self): - self.flush_stream() - - def write_indicator(self, indicator, need_whitespace, - whitespace=False, indention=False): - if self.whitespace or not need_whitespace: - data = indicator - else: - data = u' '+indicator - self.whitespace = whitespace - self.indention = self.indention and indention - self.column += len(data) - self.open_ended = False - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - - def write_indent(self): - indent = self.indent or 0 - if not self.indention or self.column > indent \ - or (self.column == indent and not self.whitespace): - self.write_line_break() - if self.column < indent: - self.whitespace = True - data = u' '*(indent-self.column) - self.column = indent - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - - def write_line_break(self, data=None): - if data is None: - data = self.best_line_break - self.whitespace = True - self.indention = True - self.line += 1 - self.column = 0 - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - - def write_version_directive(self, version_text): - data = u'%%YAML %s' % version_text - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - self.write_line_break() - - def write_tag_directive(self, handle_text, prefix_text): - data = u'%%TAG %s %s' % (handle_text, prefix_text) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - self.write_line_break() - - # Scalar streams. - - def write_single_quoted(self, text, split=True): - self.write_indicator(u'\'', True) - spaces = False - breaks = False - start = end = 0 - while end <= len(text): - ch = None - if end < len(text): - ch = text[end] - if spaces: - if ch is None or ch != u' ': - if start+1 == end and self.column > self.best_width and split \ - and start != 0 and end != len(text): - self.write_indent() - else: - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - elif breaks: - if ch is None or ch not in u'\n\x85\u2028\u2029': - if text[start] == u'\n': - self.write_line_break() - for br in text[start:end]: - if br == u'\n': - self.write_line_break() - else: - self.write_line_break(br) - self.write_indent() - start = end - else: - if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'': - if start < end: - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - if ch == u'\'': - data = u'\'\'' - self.column += 2 - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end + 1 - if ch is not None: - spaces = (ch == u' ') - breaks = (ch in u'\n\x85\u2028\u2029') - end += 1 - self.write_indicator(u'\'', False) - - ESCAPE_REPLACEMENTS = { - u'\0': u'0', - u'\x07': u'a', - u'\x08': u'b', - u'\x09': u't', - u'\x0A': u'n', - u'\x0B': u'v', - u'\x0C': u'f', - u'\x0D': u'r', - u'\x1B': u'e', - u'\"': u'\"', - u'\\': u'\\', - u'\x85': u'N', - u'\xA0': u'_', - u'\u2028': u'L', - u'\u2029': u'P', - } - - def write_double_quoted(self, text, split=True): - self.write_indicator(u'"', True) - start = end = 0 - while end <= len(text): - ch = None - if end < len(text): - ch = text[end] - if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \ - or not (u'\x20' <= ch <= u'\x7E' - or (self.allow_unicode - and (u'\xA0' <= ch <= u'\uD7FF' - or u'\uE000' <= ch <= u'\uFFFD'))): - if start < end: - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - if ch is not None: - if ch in self.ESCAPE_REPLACEMENTS: - data = u'\\'+self.ESCAPE_REPLACEMENTS[ch] - elif ch <= u'\xFF': - data = u'\\x%02X' % ord(ch) - elif ch <= u'\uFFFF': - data = u'\\u%04X' % ord(ch) - else: - data = u'\\U%08X' % ord(ch) - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end+1 - if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \ - and self.column+(end-start) > self.best_width and split: - data = text[start:end]+u'\\' - if start < end: - start = end - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - self.write_indent() - self.whitespace = False - self.indention = False - if text[start] == u' ': - data = u'\\' - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - end += 1 - self.write_indicator(u'"', False) - - def determine_block_hints(self, text): - hints = u'' - if text: - if text[0] in u' \n\x85\u2028\u2029': - hints += unicode(self.best_indent) - if text[-1] not in u'\n\x85\u2028\u2029': - hints += u'-' - elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029': - hints += u'+' - return hints - - def write_folded(self, text): - hints = self.determine_block_hints(text) - self.write_indicator(u'>'+hints, True) - if hints[-1:] == u'+': - self.open_ended = True - self.write_line_break() - leading_space = True - spaces = False - breaks = True - start = end = 0 - while end <= len(text): - ch = None - if end < len(text): - ch = text[end] - if breaks: - if ch is None or ch not in u'\n\x85\u2028\u2029': - if not leading_space and ch is not None and ch != u' ' \ - and text[start] == u'\n': - self.write_line_break() - leading_space = (ch == u' ') - for br in text[start:end]: - if br == u'\n': - self.write_line_break() - else: - self.write_line_break(br) - if ch is not None: - self.write_indent() - start = end - elif spaces: - if ch != u' ': - if start+1 == end and self.column > self.best_width: - self.write_indent() - else: - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - else: - if ch is None or ch in u' \n\x85\u2028\u2029': - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - if ch is None: - self.write_line_break() - start = end - if ch is not None: - breaks = (ch in u'\n\x85\u2028\u2029') - spaces = (ch == u' ') - end += 1 - - def write_literal(self, text): - hints = self.determine_block_hints(text) - self.write_indicator(u'|'+hints, True) - if hints[-1:] == u'+': - self.open_ended = True - self.write_line_break() - breaks = True - start = end = 0 - while end <= len(text): - ch = None - if end < len(text): - ch = text[end] - if breaks: - if ch is None or ch not in u'\n\x85\u2028\u2029': - for br in text[start:end]: - if br == u'\n': - self.write_line_break() - else: - self.write_line_break(br) - if ch is not None: - self.write_indent() - start = end - else: - if ch is None or ch in u'\n\x85\u2028\u2029': - data = text[start:end] - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - if ch is None: - self.write_line_break() - start = end - if ch is not None: - breaks = (ch in u'\n\x85\u2028\u2029') - end += 1 - - def write_plain(self, text, split=True): - if self.root_context: - self.open_ended = True - if not text: - return - if not self.whitespace: - data = u' ' - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - self.whitespace = False - self.indention = False - spaces = False - breaks = False - start = end = 0 - while end <= len(text): - ch = None - if end < len(text): - ch = text[end] - if spaces: - if ch != u' ': - if start+1 == end and self.column > self.best_width and split: - self.write_indent() - self.whitespace = False - self.indention = False - else: - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - elif breaks: - if ch not in u'\n\x85\u2028\u2029': - if text[start] == u'\n': - self.write_line_break() - for br in text[start:end]: - if br == u'\n': - self.write_line_break() - else: - self.write_line_break(br) - self.write_indent() - self.whitespace = False - self.indention = False - start = end - else: - if ch is None or ch in u' \n\x85\u2028\u2029': - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - if ch is not None: - spaces = (ch == u' ') - breaks = (ch in u'\n\x85\u2028\u2029') - end += 1 - diff --git a/python.d/python_modules/pyyaml2/error.py b/python.d/python_modules/pyyaml2/error.py deleted file mode 100644 index 577686db5..000000000 --- a/python.d/python_modules/pyyaml2/error.py +++ /dev/null @@ -1,75 +0,0 @@ - -__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] - -class Mark(object): - - def __init__(self, name, index, line, column, buffer, pointer): - self.name = name - self.index = index - self.line = line - self.column = column - self.buffer = buffer - self.pointer = pointer - - def get_snippet(self, indent=4, max_length=75): - if self.buffer is None: - return None - head = '' - start = self.pointer - while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029': - start -= 1 - if self.pointer-start > max_length/2-1: - head = ' ... ' - start += 5 - break - tail = '' - end = self.pointer - while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029': - end += 1 - if end-self.pointer > max_length/2-1: - tail = ' ... ' - end -= 5 - break - snippet = self.buffer[start:end].encode('utf-8') - return ' '*indent + head + snippet + tail + '\n' \ - + ' '*(indent+self.pointer-start+len(head)) + '^' - - def __str__(self): - snippet = self.get_snippet() - where = " in \"%s\", line %d, column %d" \ - % (self.name, self.line+1, self.column+1) - if snippet is not None: - where += ":\n"+snippet - return where - -class YAMLError(Exception): - pass - -class MarkedYAMLError(YAMLError): - - def __init__(self, context=None, context_mark=None, - problem=None, problem_mark=None, note=None): - self.context = context - self.context_mark = context_mark - self.problem = problem - self.problem_mark = problem_mark - self.note = note - - def __str__(self): - lines = [] - if self.context is not None: - lines.append(self.context) - if self.context_mark is not None \ - and (self.problem is None or self.problem_mark is None - or self.context_mark.name != self.problem_mark.name - or self.context_mark.line != self.problem_mark.line - or self.context_mark.column != self.problem_mark.column): - lines.append(str(self.context_mark)) - if self.problem is not None: - lines.append(self.problem) - if self.problem_mark is not None: - lines.append(str(self.problem_mark)) - if self.note is not None: - lines.append(self.note) - return '\n'.join(lines) - diff --git a/python.d/python_modules/pyyaml2/events.py b/python.d/python_modules/pyyaml2/events.py deleted file mode 100644 index f79ad389c..000000000 --- a/python.d/python_modules/pyyaml2/events.py +++ /dev/null @@ -1,86 +0,0 @@ - -# Abstract classes. - -class Event(object): - def __init__(self, start_mark=None, end_mark=None): - self.start_mark = start_mark - self.end_mark = end_mark - def __repr__(self): - attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] - if hasattr(self, key)] - arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) - for key in attributes]) - return '%s(%s)' % (self.__class__.__name__, arguments) - -class NodeEvent(Event): - def __init__(self, anchor, start_mark=None, end_mark=None): - self.anchor = anchor - self.start_mark = start_mark - self.end_mark = end_mark - -class CollectionStartEvent(NodeEvent): - def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, - flow_style=None): - self.anchor = anchor - self.tag = tag - self.implicit = implicit - self.start_mark = start_mark - self.end_mark = end_mark - self.flow_style = flow_style - -class CollectionEndEvent(Event): - pass - -# Implementations. - -class StreamStartEvent(Event): - def __init__(self, start_mark=None, end_mark=None, encoding=None): - self.start_mark = start_mark - self.end_mark = end_mark - self.encoding = encoding - -class StreamEndEvent(Event): - pass - -class DocumentStartEvent(Event): - def __init__(self, start_mark=None, end_mark=None, - explicit=None, version=None, tags=None): - self.start_mark = start_mark - self.end_mark = end_mark - self.explicit = explicit - self.version = version - self.tags = tags - -class DocumentEndEvent(Event): - def __init__(self, start_mark=None, end_mark=None, - explicit=None): - self.start_mark = start_mark - self.end_mark = end_mark - self.explicit = explicit - -class AliasEvent(NodeEvent): - pass - -class ScalarEvent(NodeEvent): - def __init__(self, anchor, tag, implicit, value, - start_mark=None, end_mark=None, style=None): - self.anchor = anchor - self.tag = tag - self.implicit = implicit - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - self.style = style - -class SequenceStartEvent(CollectionStartEvent): - pass - -class SequenceEndEvent(CollectionEndEvent): - pass - -class MappingStartEvent(CollectionStartEvent): - pass - -class MappingEndEvent(CollectionEndEvent): - pass - diff --git a/python.d/python_modules/pyyaml2/loader.py b/python.d/python_modules/pyyaml2/loader.py deleted file mode 100644 index 293ff467b..000000000 --- a/python.d/python_modules/pyyaml2/loader.py +++ /dev/null @@ -1,40 +0,0 @@ - -__all__ = ['BaseLoader', 'SafeLoader', 'Loader'] - -from reader import * -from scanner import * -from parser import * -from composer import * -from constructor import * -from resolver import * - -class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): - - def __init__(self, stream): - Reader.__init__(self, stream) - Scanner.__init__(self) - Parser.__init__(self) - Composer.__init__(self) - BaseConstructor.__init__(self) - BaseResolver.__init__(self) - -class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): - - def __init__(self, stream): - Reader.__init__(self, stream) - Scanner.__init__(self) - Parser.__init__(self) - Composer.__init__(self) - SafeConstructor.__init__(self) - Resolver.__init__(self) - -class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): - - def __init__(self, stream): - Reader.__init__(self, stream) - Scanner.__init__(self) - Parser.__init__(self) - Composer.__init__(self) - Constructor.__init__(self) - Resolver.__init__(self) - diff --git a/python.d/python_modules/pyyaml2/nodes.py b/python.d/python_modules/pyyaml2/nodes.py deleted file mode 100644 index c4f070c41..000000000 --- a/python.d/python_modules/pyyaml2/nodes.py +++ /dev/null @@ -1,49 +0,0 @@ - -class Node(object): - def __init__(self, tag, value, start_mark, end_mark): - self.tag = tag - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - def __repr__(self): - value = self.value - #if isinstance(value, list): - # if len(value) == 0: - # value = '<empty>' - # elif len(value) == 1: - # value = '<1 item>' - # else: - # value = '<%d items>' % len(value) - #else: - # if len(value) > 75: - # value = repr(value[:70]+u' ... ') - # else: - # value = repr(value) - value = repr(value) - return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) - -class ScalarNode(Node): - id = 'scalar' - def __init__(self, tag, value, - start_mark=None, end_mark=None, style=None): - self.tag = tag - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - self.style = style - -class CollectionNode(Node): - def __init__(self, tag, value, - start_mark=None, end_mark=None, flow_style=None): - self.tag = tag - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - self.flow_style = flow_style - -class SequenceNode(CollectionNode): - id = 'sequence' - -class MappingNode(CollectionNode): - id = 'mapping' - diff --git a/python.d/python_modules/pyyaml2/parser.py b/python.d/python_modules/pyyaml2/parser.py deleted file mode 100644 index f9e3057f3..000000000 --- a/python.d/python_modules/pyyaml2/parser.py +++ /dev/null @@ -1,589 +0,0 @@ - -# The following YAML grammar is LL(1) and is parsed by a recursive descent -# parser. -# -# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -# implicit_document ::= block_node DOCUMENT-END* -# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -# block_node_or_indentless_sequence ::= -# ALIAS -# | properties (block_content | indentless_block_sequence)? -# | block_content -# | indentless_block_sequence -# block_node ::= ALIAS -# | properties block_content? -# | block_content -# flow_node ::= ALIAS -# | properties flow_content? -# | flow_content -# properties ::= TAG ANCHOR? | ANCHOR TAG? -# block_content ::= block_collection | flow_collection | SCALAR -# flow_content ::= flow_collection | SCALAR -# block_collection ::= block_sequence | block_mapping -# flow_collection ::= flow_sequence | flow_mapping -# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -# block_mapping ::= BLOCK-MAPPING_START -# ((KEY block_node_or_indentless_sequence?)? -# (VALUE block_node_or_indentless_sequence?)?)* -# BLOCK-END -# flow_sequence ::= FLOW-SEQUENCE-START -# (flow_sequence_entry FLOW-ENTRY)* -# flow_sequence_entry? -# FLOW-SEQUENCE-END -# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -# flow_mapping ::= FLOW-MAPPING-START -# (flow_mapping_entry FLOW-ENTRY)* -# flow_mapping_entry? -# FLOW-MAPPING-END -# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -# -# FIRST sets: -# -# stream: { STREAM-START } -# explicit_document: { DIRECTIVE DOCUMENT-START } -# implicit_document: FIRST(block_node) -# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } -# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } -# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } -# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } -# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } -# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } -# block_sequence: { BLOCK-SEQUENCE-START } -# block_mapping: { BLOCK-MAPPING-START } -# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } -# indentless_sequence: { ENTRY } -# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } -# flow_sequence: { FLOW-SEQUENCE-START } -# flow_mapping: { FLOW-MAPPING-START } -# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } -# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } - -__all__ = ['Parser', 'ParserError'] - -from error import MarkedYAMLError -from tokens import * -from events import * -from scanner import * - -class ParserError(MarkedYAMLError): - pass - -class Parser(object): - # Since writing a recursive-descendant parser is a straightforward task, we - # do not give many comments here. - - DEFAULT_TAGS = { - u'!': u'!', - u'!!': u'tag:yaml.org,2002:', - } - - def __init__(self): - self.current_event = None - self.yaml_version = None - self.tag_handles = {} - self.states = [] - self.marks = [] - self.state = self.parse_stream_start - - def dispose(self): - # Reset the state attributes (to clear self-references) - self.states = [] - self.state = None - - def check_event(self, *choices): - # Check the type of the next event. - if self.current_event is None: - if self.state: - self.current_event = self.state() - if self.current_event is not None: - if not choices: - return True - for choice in choices: - if isinstance(self.current_event, choice): - return True - return False - - def peek_event(self): - # Get the next event. - if self.current_event is None: - if self.state: - self.current_event = self.state() - return self.current_event - - def get_event(self): - # Get the next event and proceed further. - if self.current_event is None: - if self.state: - self.current_event = self.state() - value = self.current_event - self.current_event = None - return value - - # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END - # implicit_document ::= block_node DOCUMENT-END* - # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* - - def parse_stream_start(self): - - # Parse the stream start. - token = self.get_token() - event = StreamStartEvent(token.start_mark, token.end_mark, - encoding=token.encoding) - - # Prepare the next state. - self.state = self.parse_implicit_document_start - - return event - - def parse_implicit_document_start(self): - - # Parse an implicit document. - if not self.check_token(DirectiveToken, DocumentStartToken, - StreamEndToken): - self.tag_handles = self.DEFAULT_TAGS - token = self.peek_token() - start_mark = end_mark = token.start_mark - event = DocumentStartEvent(start_mark, end_mark, - explicit=False) - - # Prepare the next state. - self.states.append(self.parse_document_end) - self.state = self.parse_block_node - - return event - - else: - return self.parse_document_start() - - def parse_document_start(self): - - # Parse any extra document end indicators. - while self.check_token(DocumentEndToken): - self.get_token() - - # Parse an explicit document. - if not self.check_token(StreamEndToken): - token = self.peek_token() - start_mark = token.start_mark - version, tags = self.process_directives() - if not self.check_token(DocumentStartToken): - raise ParserError(None, None, - "expected '<document start>', but found %r" - % self.peek_token().id, - self.peek_token().start_mark) - token = self.get_token() - end_mark = token.end_mark - event = DocumentStartEvent(start_mark, end_mark, - explicit=True, version=version, tags=tags) - self.states.append(self.parse_document_end) - self.state = self.parse_document_content - else: - # Parse the end of the stream. - token = self.get_token() - event = StreamEndEvent(token.start_mark, token.end_mark) - assert not self.states - assert not self.marks - self.state = None - return event - - def parse_document_end(self): - - # Parse the document end. - token = self.peek_token() - start_mark = end_mark = token.start_mark - explicit = False - if self.check_token(DocumentEndToken): - token = self.get_token() - end_mark = token.end_mark - explicit = True - event = DocumentEndEvent(start_mark, end_mark, - explicit=explicit) - - # Prepare the next state. - self.state = self.parse_document_start - - return event - - def parse_document_content(self): - if self.check_token(DirectiveToken, - DocumentStartToken, DocumentEndToken, StreamEndToken): - event = self.process_empty_scalar(self.peek_token().start_mark) - self.state = self.states.pop() - return event - else: - return self.parse_block_node() - - def process_directives(self): - self.yaml_version = None - self.tag_handles = {} - while self.check_token(DirectiveToken): - token = self.get_token() - if token.name == u'YAML': - if self.yaml_version is not None: - raise ParserError(None, None, - "found duplicate YAML directive", token.start_mark) - major, minor = token.value - if major != 1: - raise ParserError(None, None, - "found incompatible YAML document (version 1.* is required)", - token.start_mark) - self.yaml_version = token.value - elif token.name == u'TAG': - handle, prefix = token.value - if handle in self.tag_handles: - raise ParserError(None, None, - "duplicate tag handle %r" % handle.encode('utf-8'), - token.start_mark) - self.tag_handles[handle] = prefix - if self.tag_handles: - value = self.yaml_version, self.tag_handles.copy() - else: - value = self.yaml_version, None - for key in self.DEFAULT_TAGS: - if key not in self.tag_handles: - self.tag_handles[key] = self.DEFAULT_TAGS[key] - return value - - # block_node_or_indentless_sequence ::= ALIAS - # | properties (block_content | indentless_block_sequence)? - # | block_content - # | indentless_block_sequence - # block_node ::= ALIAS - # | properties block_content? - # | block_content - # flow_node ::= ALIAS - # | properties flow_content? - # | flow_content - # properties ::= TAG ANCHOR? | ANCHOR TAG? - # block_content ::= block_collection | flow_collection | SCALAR - # flow_content ::= flow_collection | SCALAR - # block_collection ::= block_sequence | block_mapping - # flow_collection ::= flow_sequence | flow_mapping - - def parse_block_node(self): - return self.parse_node(block=True) - - def parse_flow_node(self): - return self.parse_node() - - def parse_block_node_or_indentless_sequence(self): - return self.parse_node(block=True, indentless_sequence=True) - - def parse_node(self, block=False, indentless_sequence=False): - if self.check_token(AliasToken): - token = self.get_token() - event = AliasEvent(token.value, token.start_mark, token.end_mark) - self.state = self.states.pop() - else: - anchor = None - tag = None - start_mark = end_mark = tag_mark = None - if self.check_token(AnchorToken): - token = self.get_token() - start_mark = token.start_mark - end_mark = token.end_mark - anchor = token.value - if self.check_token(TagToken): - token = self.get_token() - tag_mark = token.start_mark - end_mark = token.end_mark - tag = token.value - elif self.check_token(TagToken): - token = self.get_token() - start_mark = tag_mark = token.start_mark - end_mark = token.end_mark - tag = token.value - if self.check_token(AnchorToken): - token = self.get_token() - end_mark = token.end_mark - anchor = token.value - if tag is not None: - handle, suffix = tag - if handle is not None: - if handle not in self.tag_handles: - raise ParserError("while parsing a node", start_mark, - "found undefined tag handle %r" % handle.encode('utf-8'), - tag_mark) - tag = self.tag_handles[handle]+suffix - else: - tag = suffix - #if tag == u'!': - # raise ParserError("while parsing a node", start_mark, - # "found non-specific tag '!'", tag_mark, - # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") - if start_mark is None: - start_mark = end_mark = self.peek_token().start_mark - event = None - implicit = (tag is None or tag == u'!') - if indentless_sequence and self.check_token(BlockEntryToken): - end_mark = self.peek_token().end_mark - event = SequenceStartEvent(anchor, tag, implicit, - start_mark, end_mark) - self.state = self.parse_indentless_sequence_entry - else: - if self.check_token(ScalarToken): - token = self.get_token() - end_mark = token.end_mark - if (token.plain and tag is None) or tag == u'!': - implicit = (True, False) - elif tag is None: - implicit = (False, True) - else: - implicit = (False, False) - event = ScalarEvent(anchor, tag, implicit, token.value, - start_mark, end_mark, style=token.style) - self.state = self.states.pop() - elif self.check_token(FlowSequenceStartToken): - end_mark = self.peek_token().end_mark - event = SequenceStartEvent(anchor, tag, implicit, - start_mark, end_mark, flow_style=True) - self.state = self.parse_flow_sequence_first_entry - elif self.check_token(FlowMappingStartToken): - end_mark = self.peek_token().end_mark - event = MappingStartEvent(anchor, tag, implicit, - start_mark, end_mark, flow_style=True) - self.state = self.parse_flow_mapping_first_key - elif block and self.check_token(BlockSequenceStartToken): - end_mark = self.peek_token().start_mark - event = SequenceStartEvent(anchor, tag, implicit, - start_mark, end_mark, flow_style=False) - self.state = self.parse_block_sequence_first_entry - elif block and self.check_token(BlockMappingStartToken): - end_mark = self.peek_token().start_mark - event = MappingStartEvent(anchor, tag, implicit, - start_mark, end_mark, flow_style=False) - self.state = self.parse_block_mapping_first_key - elif anchor is not None or tag is not None: - # Empty scalars are allowed even if a tag or an anchor is - # specified. - event = ScalarEvent(anchor, tag, (implicit, False), u'', - start_mark, end_mark) - self.state = self.states.pop() - else: - if block: - node = 'block' - else: - node = 'flow' - token = self.peek_token() - raise ParserError("while parsing a %s node" % node, start_mark, - "expected the node content, but found %r" % token.id, - token.start_mark) - return event - - # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END - - def parse_block_sequence_first_entry(self): - token = self.get_token() - self.marks.append(token.start_mark) - return self.parse_block_sequence_entry() - - def parse_block_sequence_entry(self): - if self.check_token(BlockEntryToken): - token = self.get_token() - if not self.check_token(BlockEntryToken, BlockEndToken): - self.states.append(self.parse_block_sequence_entry) - return self.parse_block_node() - else: - self.state = self.parse_block_sequence_entry - return self.process_empty_scalar(token.end_mark) - if not self.check_token(BlockEndToken): - token = self.peek_token() - raise ParserError("while parsing a block collection", self.marks[-1], - "expected <block end>, but found %r" % token.id, token.start_mark) - token = self.get_token() - event = SequenceEndEvent(token.start_mark, token.end_mark) - self.state = self.states.pop() - self.marks.pop() - return event - - # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ - - def parse_indentless_sequence_entry(self): - if self.check_token(BlockEntryToken): - token = self.get_token() - if not self.check_token(BlockEntryToken, - KeyToken, ValueToken, BlockEndToken): - self.states.append(self.parse_indentless_sequence_entry) - return self.parse_block_node() - else: - self.state = self.parse_indentless_sequence_entry - return self.process_empty_scalar(token.end_mark) - token = self.peek_token() - event = SequenceEndEvent(token.start_mark, token.start_mark) - self.state = self.states.pop() - return event - - # block_mapping ::= BLOCK-MAPPING_START - # ((KEY block_node_or_indentless_sequence?)? - # (VALUE block_node_or_indentless_sequence?)?)* - # BLOCK-END - - def parse_block_mapping_first_key(self): - token = self.get_token() - self.marks.append(token.start_mark) - return self.parse_block_mapping_key() - - def parse_block_mapping_key(self): - if self.check_token(KeyToken): - token = self.get_token() - if not self.check_token(KeyToken, ValueToken, BlockEndToken): - self.states.append(self.parse_block_mapping_value) - return self.parse_block_node_or_indentless_sequence() - else: - self.state = self.parse_block_mapping_value - return self.process_empty_scalar(token.end_mark) - if not self.check_token(BlockEndToken): - token = self.peek_token() - raise ParserError("while parsing a block mapping", self.marks[-1], - "expected <block end>, but found %r" % token.id, token.start_mark) - token = self.get_token() - event = MappingEndEvent(token.start_mark, token.end_mark) - self.state = self.states.pop() - self.marks.pop() - return event - - def parse_block_mapping_value(self): - if self.check_token(ValueToken): - token = self.get_token() - if not self.check_token(KeyToken, ValueToken, BlockEndToken): - self.states.append(self.parse_block_mapping_key) - return self.parse_block_node_or_indentless_sequence() - else: - self.state = self.parse_block_mapping_key - return self.process_empty_scalar(token.end_mark) - else: - self.state = self.parse_block_mapping_key - token = self.peek_token() - return self.process_empty_scalar(token.start_mark) - - # flow_sequence ::= FLOW-SEQUENCE-START - # (flow_sequence_entry FLOW-ENTRY)* - # flow_sequence_entry? - # FLOW-SEQUENCE-END - # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - # - # Note that while production rules for both flow_sequence_entry and - # flow_mapping_entry are equal, their interpretations are different. - # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` - # generate an inline mapping (set syntax). - - def parse_flow_sequence_first_entry(self): - token = self.get_token() - self.marks.append(token.start_mark) - return self.parse_flow_sequence_entry(first=True) - - def parse_flow_sequence_entry(self, first=False): - if not self.check_token(FlowSequenceEndToken): - if not first: - if self.check_token(FlowEntryToken): - self.get_token() - else: - token = self.peek_token() - raise ParserError("while parsing a flow sequence", self.marks[-1], - "expected ',' or ']', but got %r" % token.id, token.start_mark) - - if self.check_token(KeyToken): - token = self.peek_token() - event = MappingStartEvent(None, None, True, - token.start_mark, token.end_mark, - flow_style=True) - self.state = self.parse_flow_sequence_entry_mapping_key - return event - elif not self.check_token(FlowSequenceEndToken): - self.states.append(self.parse_flow_sequence_entry) - return self.parse_flow_node() - token = self.get_token() - event = SequenceEndEvent(token.start_mark, token.end_mark) - self.state = self.states.pop() - self.marks.pop() - return event - - def parse_flow_sequence_entry_mapping_key(self): - token = self.get_token() - if not self.check_token(ValueToken, - FlowEntryToken, FlowSequenceEndToken): - self.states.append(self.parse_flow_sequence_entry_mapping_value) - return self.parse_flow_node() - else: - self.state = self.parse_flow_sequence_entry_mapping_value - return self.process_empty_scalar(token.end_mark) - - def parse_flow_sequence_entry_mapping_value(self): - if self.check_token(ValueToken): - token = self.get_token() - if not self.check_token(FlowEntryToken, FlowSequenceEndToken): - self.states.append(self.parse_flow_sequence_entry_mapping_end) - return self.parse_flow_node() - else: - self.state = self.parse_flow_sequence_entry_mapping_end - return self.process_empty_scalar(token.end_mark) - else: - self.state = self.parse_flow_sequence_entry_mapping_end - token = self.peek_token() - return self.process_empty_scalar(token.start_mark) - - def parse_flow_sequence_entry_mapping_end(self): - self.state = self.parse_flow_sequence_entry - token = self.peek_token() - return MappingEndEvent(token.start_mark, token.start_mark) - - # flow_mapping ::= FLOW-MAPPING-START - # (flow_mapping_entry FLOW-ENTRY)* - # flow_mapping_entry? - # FLOW-MAPPING-END - # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - - def parse_flow_mapping_first_key(self): - token = self.get_token() - self.marks.append(token.start_mark) - return self.parse_flow_mapping_key(first=True) - - def parse_flow_mapping_key(self, first=False): - if not self.check_token(FlowMappingEndToken): - if not first: - if self.check_token(FlowEntryToken): - self.get_token() - else: - token = self.peek_token() - raise ParserError("while parsing a flow mapping", self.marks[-1], - "expected ',' or '}', but got %r" % token.id, token.start_mark) - if self.check_token(KeyToken): - token = self.get_token() - if not self.check_token(ValueToken, - FlowEntryToken, FlowMappingEndToken): - self.states.append(self.parse_flow_mapping_value) - return self.parse_flow_node() - else: - self.state = self.parse_flow_mapping_value - return self.process_empty_scalar(token.end_mark) - elif not self.check_token(FlowMappingEndToken): - self.states.append(self.parse_flow_mapping_empty_value) - return self.parse_flow_node() - token = self.get_token() - event = MappingEndEvent(token.start_mark, token.end_mark) - self.state = self.states.pop() - self.marks.pop() - return event - - def parse_flow_mapping_value(self): - if self.check_token(ValueToken): - token = self.get_token() - if not self.check_token(FlowEntryToken, FlowMappingEndToken): - self.states.append(self.parse_flow_mapping_key) - return self.parse_flow_node() - else: - self.state = self.parse_flow_mapping_key - return self.process_empty_scalar(token.end_mark) - else: - self.state = self.parse_flow_mapping_key - token = self.peek_token() - return self.process_empty_scalar(token.start_mark) - - def parse_flow_mapping_empty_value(self): - self.state = self.parse_flow_mapping_key - return self.process_empty_scalar(self.peek_token().start_mark) - - def process_empty_scalar(self, mark): - return ScalarEvent(None, None, (True, False), u'', mark, mark) - diff --git a/python.d/python_modules/pyyaml2/reader.py b/python.d/python_modules/pyyaml2/reader.py deleted file mode 100644 index 3249e6b9f..000000000 --- a/python.d/python_modules/pyyaml2/reader.py +++ /dev/null @@ -1,190 +0,0 @@ -# This module contains abstractions for the input stream. You don't have to -# looks further, there are no pretty code. -# -# We define two classes here. -# -# Mark(source, line, column) -# It's just a record and its only use is producing nice error messages. -# Parser does not use it for any other purposes. -# -# Reader(source, data) -# Reader determines the encoding of `data` and converts it to unicode. -# Reader provides the following methods and attributes: -# reader.peek(length=1) - return the next `length` characters -# reader.forward(length=1) - move the current position to `length` characters. -# reader.index - the number of the current character. -# reader.line, stream.column - the line and the column of the current character. - -__all__ = ['Reader', 'ReaderError'] - -from error import YAMLError, Mark - -import codecs, re - -class ReaderError(YAMLError): - - def __init__(self, name, position, character, encoding, reason): - self.name = name - self.character = character - self.position = position - self.encoding = encoding - self.reason = reason - - def __str__(self): - if isinstance(self.character, str): - return "'%s' codec can't decode byte #x%02x: %s\n" \ - " in \"%s\", position %d" \ - % (self.encoding, ord(self.character), self.reason, - self.name, self.position) - else: - return "unacceptable character #x%04x: %s\n" \ - " in \"%s\", position %d" \ - % (self.character, self.reason, - self.name, self.position) - -class Reader(object): - # Reader: - # - determines the data encoding and converts it to unicode, - # - checks if characters are in allowed range, - # - adds '\0' to the end. - - # Reader accepts - # - a `str` object, - # - a `unicode` object, - # - a file-like object with its `read` method returning `str`, - # - a file-like object with its `read` method returning `unicode`. - - # Yeah, it's ugly and slow. - - def __init__(self, stream): - self.name = None - self.stream = None - self.stream_pointer = 0 - self.eof = True - self.buffer = u'' - self.pointer = 0 - self.raw_buffer = None - self.raw_decode = None - self.encoding = None - self.index = 0 - self.line = 0 - self.column = 0 - if isinstance(stream, unicode): - self.name = "<unicode string>" - self.check_printable(stream) - self.buffer = stream+u'\0' - elif isinstance(stream, str): - self.name = "<string>" - self.raw_buffer = stream - self.determine_encoding() - else: - self.stream = stream - self.name = getattr(stream, 'name', "<file>") - self.eof = False - self.raw_buffer = '' - self.determine_encoding() - - def peek(self, index=0): - try: - return self.buffer[self.pointer+index] - except IndexError: - self.update(index+1) - return self.buffer[self.pointer+index] - - def prefix(self, length=1): - if self.pointer+length >= len(self.buffer): - self.update(length) - return self.buffer[self.pointer:self.pointer+length] - - def forward(self, length=1): - if self.pointer+length+1 >= len(self.buffer): - self.update(length+1) - while length: - ch = self.buffer[self.pointer] - self.pointer += 1 - self.index += 1 - if ch in u'\n\x85\u2028\u2029' \ - or (ch == u'\r' and self.buffer[self.pointer] != u'\n'): - self.line += 1 - self.column = 0 - elif ch != u'\uFEFF': - self.column += 1 - length -= 1 - - def get_mark(self): - if self.stream is None: - return Mark(self.name, self.index, self.line, self.column, - self.buffer, self.pointer) - else: - return Mark(self.name, self.index, self.line, self.column, - None, None) - - def determine_encoding(self): - while not self.eof and len(self.raw_buffer) < 2: - self.update_raw() - if not isinstance(self.raw_buffer, unicode): - if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): - self.raw_decode = codecs.utf_16_le_decode - self.encoding = 'utf-16-le' - elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): - self.raw_decode = codecs.utf_16_be_decode - self.encoding = 'utf-16-be' - else: - self.raw_decode = codecs.utf_8_decode - self.encoding = 'utf-8' - self.update(1) - - NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]') - def check_printable(self, data): - match = self.NON_PRINTABLE.search(data) - if match: - character = match.group() - position = self.index+(len(self.buffer)-self.pointer)+match.start() - raise ReaderError(self.name, position, ord(character), - 'unicode', "special characters are not allowed") - - def update(self, length): - if self.raw_buffer is None: - return - self.buffer = self.buffer[self.pointer:] - self.pointer = 0 - while len(self.buffer) < length: - if not self.eof: - self.update_raw() - if self.raw_decode is not None: - try: - data, converted = self.raw_decode(self.raw_buffer, - 'strict', self.eof) - except UnicodeDecodeError, exc: - character = exc.object[exc.start] - if self.stream is not None: - position = self.stream_pointer-len(self.raw_buffer)+exc.start - else: - position = exc.start - raise ReaderError(self.name, position, character, - exc.encoding, exc.reason) - else: - data = self.raw_buffer - converted = len(data) - self.check_printable(data) - self.buffer += data - self.raw_buffer = self.raw_buffer[converted:] - if self.eof: - self.buffer += u'\0' - self.raw_buffer = None - break - - def update_raw(self, size=1024): - data = self.stream.read(size) - if data: - self.raw_buffer += data - self.stream_pointer += len(data) - else: - self.eof = True - -#try: -# import psyco -# psyco.bind(Reader) -#except ImportError: -# pass - diff --git a/python.d/python_modules/pyyaml2/representer.py b/python.d/python_modules/pyyaml2/representer.py deleted file mode 100644 index 5f4fc70db..000000000 --- a/python.d/python_modules/pyyaml2/representer.py +++ /dev/null @@ -1,484 +0,0 @@ - -__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', - 'RepresenterError'] - -from error import * -from nodes import * - -import datetime - -import sys, copy_reg, types - -class RepresenterError(YAMLError): - pass - -class BaseRepresenter(object): - - yaml_representers = {} - yaml_multi_representers = {} - - def __init__(self, default_style=None, default_flow_style=None): - self.default_style = default_style - self.default_flow_style = default_flow_style - self.represented_objects = {} - self.object_keeper = [] - self.alias_key = None - - def represent(self, data): - node = self.represent_data(data) - self.serialize(node) - self.represented_objects = {} - self.object_keeper = [] - self.alias_key = None - - def get_classobj_bases(self, cls): - bases = [cls] - for base in cls.__bases__: - bases.extend(self.get_classobj_bases(base)) - return bases - - def represent_data(self, data): - if self.ignore_aliases(data): - self.alias_key = None - else: - self.alias_key = id(data) - if self.alias_key is not None: - if self.alias_key in self.represented_objects: - node = self.represented_objects[self.alias_key] - #if node is None: - # raise RepresenterError("recursive objects are not allowed: %r" % data) - return node - #self.represented_objects[alias_key] = None - self.object_keeper.append(data) - data_types = type(data).__mro__ - if type(data) is types.InstanceType: - data_types = self.get_classobj_bases(data.__class__)+list(data_types) - if data_types[0] in self.yaml_representers: - node = self.yaml_representers[data_types[0]](self, data) - else: - for data_type in data_types: - if data_type in self.yaml_multi_representers: - node = self.yaml_multi_representers[data_type](self, data) - break - else: - if None in self.yaml_multi_representers: - node = self.yaml_multi_representers[None](self, data) - elif None in self.yaml_representers: - node = self.yaml_representers[None](self, data) - else: - node = ScalarNode(None, unicode(data)) - #if alias_key is not None: - # self.represented_objects[alias_key] = node - return node - - def add_representer(cls, data_type, representer): - if not 'yaml_representers' in cls.__dict__: - cls.yaml_representers = cls.yaml_representers.copy() - cls.yaml_representers[data_type] = representer - add_representer = classmethod(add_representer) - - def add_multi_representer(cls, data_type, representer): - if not 'yaml_multi_representers' in cls.__dict__: - cls.yaml_multi_representers = cls.yaml_multi_representers.copy() - cls.yaml_multi_representers[data_type] = representer - add_multi_representer = classmethod(add_multi_representer) - - def represent_scalar(self, tag, value, style=None): - if style is None: - style = self.default_style - node = ScalarNode(tag, value, style=style) - if self.alias_key is not None: - self.represented_objects[self.alias_key] = node - return node - - def represent_sequence(self, tag, sequence, flow_style=None): - value = [] - node = SequenceNode(tag, value, flow_style=flow_style) - if self.alias_key is not None: - self.represented_objects[self.alias_key] = node - best_style = True - for item in sequence: - node_item = self.represent_data(item) - if not (isinstance(node_item, ScalarNode) and not node_item.style): - best_style = False - value.append(node_item) - if flow_style is None: - if self.default_flow_style is not None: - node.flow_style = self.default_flow_style - else: - node.flow_style = best_style - return node - - def represent_mapping(self, tag, mapping, flow_style=None): - value = [] - node = MappingNode(tag, value, flow_style=flow_style) - if self.alias_key is not None: - self.represented_objects[self.alias_key] = node - best_style = True - if hasattr(mapping, 'items'): - mapping = mapping.items() - mapping.sort() - for item_key, item_value in mapping: - node_key = self.represent_data(item_key) - node_value = self.represent_data(item_value) - if not (isinstance(node_key, ScalarNode) and not node_key.style): - best_style = False - if not (isinstance(node_value, ScalarNode) and not node_value.style): - best_style = False - value.append((node_key, node_value)) - if flow_style is None: - if self.default_flow_style is not None: - node.flow_style = self.default_flow_style - else: - node.flow_style = best_style - return node - - def ignore_aliases(self, data): - return False - -class SafeRepresenter(BaseRepresenter): - - def ignore_aliases(self, data): - if data in [None, ()]: - return True - if isinstance(data, (str, unicode, bool, int, float)): - return True - - def represent_none(self, data): - return self.represent_scalar(u'tag:yaml.org,2002:null', - u'null') - - def represent_str(self, data): - tag = None - style = None - try: - data = unicode(data, 'ascii') - tag = u'tag:yaml.org,2002:str' - except UnicodeDecodeError: - try: - data = unicode(data, 'utf-8') - tag = u'tag:yaml.org,2002:str' - except UnicodeDecodeError: - data = data.encode('base64') - tag = u'tag:yaml.org,2002:binary' - style = '|' - return self.represent_scalar(tag, data, style=style) - - def represent_unicode(self, data): - return self.represent_scalar(u'tag:yaml.org,2002:str', data) - - def represent_bool(self, data): - if data: - value = u'true' - else: - value = u'false' - return self.represent_scalar(u'tag:yaml.org,2002:bool', value) - - def represent_int(self, data): - return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) - - def represent_long(self, data): - return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) - - inf_value = 1e300 - while repr(inf_value) != repr(inf_value*inf_value): - inf_value *= inf_value - - def represent_float(self, data): - if data != data or (data == 0.0 and data == 1.0): - value = u'.nan' - elif data == self.inf_value: - value = u'.inf' - elif data == -self.inf_value: - value = u'-.inf' - else: - value = unicode(repr(data)).lower() - # Note that in some cases `repr(data)` represents a float number - # without the decimal parts. For instance: - # >>> repr(1e17) - # '1e17' - # Unfortunately, this is not a valid float representation according - # to the definition of the `!!float` tag. We fix this by adding - # '.0' before the 'e' symbol. - if u'.' not in value and u'e' in value: - value = value.replace(u'e', u'.0e', 1) - return self.represent_scalar(u'tag:yaml.org,2002:float', value) - - def represent_list(self, data): - #pairs = (len(data) > 0 and isinstance(data, list)) - #if pairs: - # for item in data: - # if not isinstance(item, tuple) or len(item) != 2: - # pairs = False - # break - #if not pairs: - return self.represent_sequence(u'tag:yaml.org,2002:seq', data) - #value = [] - #for item_key, item_value in data: - # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', - # [(item_key, item_value)])) - #return SequenceNode(u'tag:yaml.org,2002:pairs', value) - - def represent_dict(self, data): - return self.represent_mapping(u'tag:yaml.org,2002:map', data) - - def represent_set(self, data): - value = {} - for key in data: - value[key] = None - return self.represent_mapping(u'tag:yaml.org,2002:set', value) - - def represent_date(self, data): - value = unicode(data.isoformat()) - return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) - - def represent_datetime(self, data): - value = unicode(data.isoformat(' ')) - return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) - - def represent_yaml_object(self, tag, data, cls, flow_style=None): - if hasattr(data, '__getstate__'): - state = data.__getstate__() - else: - state = data.__dict__.copy() - return self.represent_mapping(tag, state, flow_style=flow_style) - - def represent_undefined(self, data): - raise RepresenterError("cannot represent an object: %s" % data) - -SafeRepresenter.add_representer(type(None), - SafeRepresenter.represent_none) - -SafeRepresenter.add_representer(str, - SafeRepresenter.represent_str) - -SafeRepresenter.add_representer(unicode, - SafeRepresenter.represent_unicode) - -SafeRepresenter.add_representer(bool, - SafeRepresenter.represent_bool) - -SafeRepresenter.add_representer(int, - SafeRepresenter.represent_int) - -SafeRepresenter.add_representer(long, - SafeRepresenter.represent_long) - -SafeRepresenter.add_representer(float, - SafeRepresenter.represent_float) - -SafeRepresenter.add_representer(list, - SafeRepresenter.represent_list) - -SafeRepresenter.add_representer(tuple, - SafeRepresenter.represent_list) - -SafeRepresenter.add_representer(dict, - SafeRepresenter.represent_dict) - -SafeRepresenter.add_representer(set, - SafeRepresenter.represent_set) - -SafeRepresenter.add_representer(datetime.date, - SafeRepresenter.represent_date) - -SafeRepresenter.add_representer(datetime.datetime, - SafeRepresenter.represent_datetime) - -SafeRepresenter.add_representer(None, - SafeRepresenter.represent_undefined) - -class Representer(SafeRepresenter): - - def represent_str(self, data): - tag = None - style = None - try: - data = unicode(data, 'ascii') - tag = u'tag:yaml.org,2002:str' - except UnicodeDecodeError: - try: - data = unicode(data, 'utf-8') - tag = u'tag:yaml.org,2002:python/str' - except UnicodeDecodeError: - data = data.encode('base64') - tag = u'tag:yaml.org,2002:binary' - style = '|' - return self.represent_scalar(tag, data, style=style) - - def represent_unicode(self, data): - tag = None - try: - data.encode('ascii') - tag = u'tag:yaml.org,2002:python/unicode' - except UnicodeEncodeError: - tag = u'tag:yaml.org,2002:str' - return self.represent_scalar(tag, data) - - def represent_long(self, data): - tag = u'tag:yaml.org,2002:int' - if int(data) is not data: - tag = u'tag:yaml.org,2002:python/long' - return self.represent_scalar(tag, unicode(data)) - - def represent_complex(self, data): - if data.imag == 0.0: - data = u'%r' % data.real - elif data.real == 0.0: - data = u'%rj' % data.imag - elif data.imag > 0: - data = u'%r+%rj' % (data.real, data.imag) - else: - data = u'%r%rj' % (data.real, data.imag) - return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data) - - def represent_tuple(self, data): - return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data) - - def represent_name(self, data): - name = u'%s.%s' % (data.__module__, data.__name__) - return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'') - - def represent_module(self, data): - return self.represent_scalar( - u'tag:yaml.org,2002:python/module:'+data.__name__, u'') - - def represent_instance(self, data): - # For instances of classic classes, we use __getinitargs__ and - # __getstate__ to serialize the data. - - # If data.__getinitargs__ exists, the object must be reconstructed by - # calling cls(**args), where args is a tuple returned by - # __getinitargs__. Otherwise, the cls.__init__ method should never be - # called and the class instance is created by instantiating a trivial - # class and assigning to the instance's __class__ variable. - - # If data.__getstate__ exists, it returns the state of the object. - # Otherwise, the state of the object is data.__dict__. - - # We produce either a !!python/object or !!python/object/new node. - # If data.__getinitargs__ does not exist and state is a dictionary, we - # produce a !!python/object node . Otherwise we produce a - # !!python/object/new node. - - cls = data.__class__ - class_name = u'%s.%s' % (cls.__module__, cls.__name__) - args = None - state = None - if hasattr(data, '__getinitargs__'): - args = list(data.__getinitargs__()) - if hasattr(data, '__getstate__'): - state = data.__getstate__() - else: - state = data.__dict__ - if args is None and isinstance(state, dict): - return self.represent_mapping( - u'tag:yaml.org,2002:python/object:'+class_name, state) - if isinstance(state, dict) and not state: - return self.represent_sequence( - u'tag:yaml.org,2002:python/object/new:'+class_name, args) - value = {} - if args: - value['args'] = args - value['state'] = state - return self.represent_mapping( - u'tag:yaml.org,2002:python/object/new:'+class_name, value) - - def represent_object(self, data): - # We use __reduce__ API to save the data. data.__reduce__ returns - # a tuple of length 2-5: - # (function, args, state, listitems, dictitems) - - # For reconstructing, we calls function(*args), then set its state, - # listitems, and dictitems if they are not None. - - # A special case is when function.__name__ == '__newobj__'. In this - # case we create the object with args[0].__new__(*args). - - # Another special case is when __reduce__ returns a string - we don't - # support it. - - # We produce a !!python/object, !!python/object/new or - # !!python/object/apply node. - - cls = type(data) - if cls in copy_reg.dispatch_table: - reduce = copy_reg.dispatch_table[cls](data) - elif hasattr(data, '__reduce_ex__'): - reduce = data.__reduce_ex__(2) - elif hasattr(data, '__reduce__'): - reduce = data.__reduce__() - else: - raise RepresenterError("cannot represent object: %r" % data) - reduce = (list(reduce)+[None]*5)[:5] - function, args, state, listitems, dictitems = reduce - args = list(args) - if state is None: - state = {} - if listitems is not None: - listitems = list(listitems) - if dictitems is not None: - dictitems = dict(dictitems) - if function.__name__ == '__newobj__': - function = args[0] - args = args[1:] - tag = u'tag:yaml.org,2002:python/object/new:' - newobj = True - else: - tag = u'tag:yaml.org,2002:python/object/apply:' - newobj = False - function_name = u'%s.%s' % (function.__module__, function.__name__) - if not args and not listitems and not dictitems \ - and isinstance(state, dict) and newobj: - return self.represent_mapping( - u'tag:yaml.org,2002:python/object:'+function_name, state) - if not listitems and not dictitems \ - and isinstance(state, dict) and not state: - return self.represent_sequence(tag+function_name, args) - value = {} - if args: - value['args'] = args - if state or not isinstance(state, dict): - value['state'] = state - if listitems: - value['listitems'] = listitems - if dictitems: - value['dictitems'] = dictitems - return self.represent_mapping(tag+function_name, value) - -Representer.add_representer(str, - Representer.represent_str) - -Representer.add_representer(unicode, - Representer.represent_unicode) - -Representer.add_representer(long, - Representer.represent_long) - -Representer.add_representer(complex, - Representer.represent_complex) - -Representer.add_representer(tuple, - Representer.represent_tuple) - -Representer.add_representer(type, - Representer.represent_name) - -Representer.add_representer(types.ClassType, - Representer.represent_name) - -Representer.add_representer(types.FunctionType, - Representer.represent_name) - -Representer.add_representer(types.BuiltinFunctionType, - Representer.represent_name) - -Representer.add_representer(types.ModuleType, - Representer.represent_module) - -Representer.add_multi_representer(types.InstanceType, - Representer.represent_instance) - -Representer.add_multi_representer(object, - Representer.represent_object) - diff --git a/python.d/python_modules/pyyaml2/resolver.py b/python.d/python_modules/pyyaml2/resolver.py deleted file mode 100644 index 6b5ab8759..000000000 --- a/python.d/python_modules/pyyaml2/resolver.py +++ /dev/null @@ -1,224 +0,0 @@ - -__all__ = ['BaseResolver', 'Resolver'] - -from error import * -from nodes import * - -import re - -class ResolverError(YAMLError): - pass - -class BaseResolver(object): - - DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str' - DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq' - DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map' - - yaml_implicit_resolvers = {} - yaml_path_resolvers = {} - - def __init__(self): - self.resolver_exact_paths = [] - self.resolver_prefix_paths = [] - - def add_implicit_resolver(cls, tag, regexp, first): - if not 'yaml_implicit_resolvers' in cls.__dict__: - cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy() - if first is None: - first = [None] - for ch in first: - cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) - add_implicit_resolver = classmethod(add_implicit_resolver) - - def add_path_resolver(cls, tag, path, kind=None): - # Note: `add_path_resolver` is experimental. The API could be changed. - # `new_path` is a pattern that is matched against the path from the - # root to the node that is being considered. `node_path` elements are - # tuples `(node_check, index_check)`. `node_check` is a node class: - # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` - # matches any kind of a node. `index_check` could be `None`, a boolean - # value, a string value, or a number. `None` and `False` match against - # any _value_ of sequence and mapping nodes. `True` matches against - # any _key_ of a mapping node. A string `index_check` matches against - # a mapping value that corresponds to a scalar key which content is - # equal to the `index_check` value. An integer `index_check` matches - # against a sequence value with the index equal to `index_check`. - if not 'yaml_path_resolvers' in cls.__dict__: - cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() - new_path = [] - for element in path: - if isinstance(element, (list, tuple)): - if len(element) == 2: - node_check, index_check = element - elif len(element) == 1: - node_check = element[0] - index_check = True - else: - raise ResolverError("Invalid path element: %s" % element) - else: - node_check = None - index_check = element - if node_check is str: - node_check = ScalarNode - elif node_check is list: - node_check = SequenceNode - elif node_check is dict: - node_check = MappingNode - elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ - and not isinstance(node_check, basestring) \ - and node_check is not None: - raise ResolverError("Invalid node checker: %s" % node_check) - if not isinstance(index_check, (basestring, int)) \ - and index_check is not None: - raise ResolverError("Invalid index checker: %s" % index_check) - new_path.append((node_check, index_check)) - if kind is str: - kind = ScalarNode - elif kind is list: - kind = SequenceNode - elif kind is dict: - kind = MappingNode - elif kind not in [ScalarNode, SequenceNode, MappingNode] \ - and kind is not None: - raise ResolverError("Invalid node kind: %s" % kind) - cls.yaml_path_resolvers[tuple(new_path), kind] = tag - add_path_resolver = classmethod(add_path_resolver) - - def descend_resolver(self, current_node, current_index): - if not self.yaml_path_resolvers: - return - exact_paths = {} - prefix_paths = [] - if current_node: - depth = len(self.resolver_prefix_paths) - for path, kind in self.resolver_prefix_paths[-1]: - if self.check_resolver_prefix(depth, path, kind, - current_node, current_index): - if len(path) > depth: - prefix_paths.append((path, kind)) - else: - exact_paths[kind] = self.yaml_path_resolvers[path, kind] - else: - for path, kind in self.yaml_path_resolvers: - if not path: - exact_paths[kind] = self.yaml_path_resolvers[path, kind] - else: - prefix_paths.append((path, kind)) - self.resolver_exact_paths.append(exact_paths) - self.resolver_prefix_paths.append(prefix_paths) - - def ascend_resolver(self): - if not self.yaml_path_resolvers: - return - self.resolver_exact_paths.pop() - self.resolver_prefix_paths.pop() - - def check_resolver_prefix(self, depth, path, kind, - current_node, current_index): - node_check, index_check = path[depth-1] - if isinstance(node_check, basestring): - if current_node.tag != node_check: - return - elif node_check is not None: - if not isinstance(current_node, node_check): - return - if index_check is True and current_index is not None: - return - if (index_check is False or index_check is None) \ - and current_index is None: - return - if isinstance(index_check, basestring): - if not (isinstance(current_index, ScalarNode) - and index_check == current_index.value): - return - elif isinstance(index_check, int) and not isinstance(index_check, bool): - if index_check != current_index: - return - return True - - def resolve(self, kind, value, implicit): - if kind is ScalarNode and implicit[0]: - if value == u'': - resolvers = self.yaml_implicit_resolvers.get(u'', []) - else: - resolvers = self.yaml_implicit_resolvers.get(value[0], []) - resolvers += self.yaml_implicit_resolvers.get(None, []) - for tag, regexp in resolvers: - if regexp.match(value): - return tag - implicit = implicit[1] - if self.yaml_path_resolvers: - exact_paths = self.resolver_exact_paths[-1] - if kind in exact_paths: - return exact_paths[kind] - if None in exact_paths: - return exact_paths[None] - if kind is ScalarNode: - return self.DEFAULT_SCALAR_TAG - elif kind is SequenceNode: - return self.DEFAULT_SEQUENCE_TAG - elif kind is MappingNode: - return self.DEFAULT_MAPPING_TAG - -class Resolver(BaseResolver): - pass - -Resolver.add_implicit_resolver( - u'tag:yaml.org,2002:bool', - re.compile(ur'''^(?:yes|Yes|YES|no|No|NO - |true|True|TRUE|false|False|FALSE - |on|On|ON|off|Off|OFF)$''', re.X), - list(u'yYnNtTfFoO')) - -Resolver.add_implicit_resolver( - u'tag:yaml.org,2002:float', - re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? - |\.[0-9_]+(?:[eE][-+][0-9]+)? - |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* - |[-+]?\.(?:inf|Inf|INF) - |\.(?:nan|NaN|NAN))$''', re.X), - list(u'-+0123456789.')) - -Resolver.add_implicit_resolver( - u'tag:yaml.org,2002:int', - re.compile(ur'''^(?:[-+]?0b[0-1_]+ - |[-+]?0[0-7_]+ - |[-+]?(?:0|[1-9][0-9_]*) - |[-+]?0x[0-9a-fA-F_]+ - |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), - list(u'-+0123456789')) - -Resolver.add_implicit_resolver( - u'tag:yaml.org,2002:merge', - re.compile(ur'^(?:<<)$'), - [u'<']) - -Resolver.add_implicit_resolver( - u'tag:yaml.org,2002:null', - re.compile(ur'''^(?: ~ - |null|Null|NULL - | )$''', re.X), - [u'~', u'n', u'N', u'']) - -Resolver.add_implicit_resolver( - u'tag:yaml.org,2002:timestamp', - re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] - |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? - (?:[Tt]|[ \t]+)[0-9][0-9]? - :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? - (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), - list(u'0123456789')) - -Resolver.add_implicit_resolver( - u'tag:yaml.org,2002:value', - re.compile(ur'^(?:=)$'), - [u'=']) - -# The following resolver is only for documentation purposes. It cannot work -# because plain scalars cannot start with '!', '&', or '*'. -Resolver.add_implicit_resolver( - u'tag:yaml.org,2002:yaml', - re.compile(ur'^(?:!|&|\*)$'), - list(u'!&*')) - diff --git a/python.d/python_modules/pyyaml2/scanner.py b/python.d/python_modules/pyyaml2/scanner.py deleted file mode 100644 index 5228fad65..000000000 --- a/python.d/python_modules/pyyaml2/scanner.py +++ /dev/null @@ -1,1457 +0,0 @@ - -# Scanner produces tokens of the following types: -# STREAM-START -# STREAM-END -# DIRECTIVE(name, value) -# DOCUMENT-START -# DOCUMENT-END -# BLOCK-SEQUENCE-START -# BLOCK-MAPPING-START -# BLOCK-END -# FLOW-SEQUENCE-START -# FLOW-MAPPING-START -# FLOW-SEQUENCE-END -# FLOW-MAPPING-END -# BLOCK-ENTRY -# FLOW-ENTRY -# KEY -# VALUE -# ALIAS(value) -# ANCHOR(value) -# TAG(value) -# SCALAR(value, plain, style) -# -# Read comments in the Scanner code for more details. -# - -__all__ = ['Scanner', 'ScannerError'] - -from error import MarkedYAMLError -from tokens import * - -class ScannerError(MarkedYAMLError): - pass - -class SimpleKey(object): - # See below simple keys treatment. - - def __init__(self, token_number, required, index, line, column, mark): - self.token_number = token_number - self.required = required - self.index = index - self.line = line - self.column = column - self.mark = mark - -class Scanner(object): - - def __init__(self): - """Initialize the scanner.""" - # It is assumed that Scanner and Reader will have a common descendant. - # Reader do the dirty work of checking for BOM and converting the - # input data to Unicode. It also adds NUL to the end. - # - # Reader supports the following methods - # self.peek(i=0) # peek the next i-th character - # self.prefix(l=1) # peek the next l characters - # self.forward(l=1) # read the next l characters and move the pointer. - - # Had we reached the end of the stream? - self.done = False - - # The number of unclosed '{' and '['. `flow_level == 0` means block - # context. - self.flow_level = 0 - - # List of processed tokens that are not yet emitted. - self.tokens = [] - - # Add the STREAM-START token. - self.fetch_stream_start() - - # Number of tokens that were emitted through the `get_token` method. - self.tokens_taken = 0 - - # The current indentation level. - self.indent = -1 - - # Past indentation levels. - self.indents = [] - - # Variables related to simple keys treatment. - - # A simple key is a key that is not denoted by the '?' indicator. - # Example of simple keys: - # --- - # block simple key: value - # ? not a simple key: - # : { flow simple key: value } - # We emit the KEY token before all keys, so when we find a potential - # simple key, we try to locate the corresponding ':' indicator. - # Simple keys should be limited to a single line and 1024 characters. - - # Can a simple key start at the current position? A simple key may - # start: - # - at the beginning of the line, not counting indentation spaces - # (in block context), - # - after '{', '[', ',' (in the flow context), - # - after '?', ':', '-' (in the block context). - # In the block context, this flag also signifies if a block collection - # may start at the current position. - self.allow_simple_key = True - - # Keep track of possible simple keys. This is a dictionary. The key - # is `flow_level`; there can be no more that one possible simple key - # for each level. The value is a SimpleKey record: - # (token_number, required, index, line, column, mark) - # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), - # '[', or '{' tokens. - self.possible_simple_keys = {} - - # Public methods. - - def check_token(self, *choices): - # Check if the next token is one of the given types. - while self.need_more_tokens(): - self.fetch_more_tokens() - if self.tokens: - if not choices: - return True - for choice in choices: - if isinstance(self.tokens[0], choice): - return True - return False - - def peek_token(self): - # Return the next token, but do not delete if from the queue. - while self.need_more_tokens(): - self.fetch_more_tokens() - if self.tokens: - return self.tokens[0] - - def get_token(self): - # Return the next token. - while self.need_more_tokens(): - self.fetch_more_tokens() - if self.tokens: - self.tokens_taken += 1 - return self.tokens.pop(0) - - # Private methods. - - def need_more_tokens(self): - if self.done: - return False - if not self.tokens: - return True - # The current token may be a potential simple key, so we - # need to look further. - self.stale_possible_simple_keys() - if self.next_possible_simple_key() == self.tokens_taken: - return True - - def fetch_more_tokens(self): - - # Eat whitespaces and comments until we reach the next token. - self.scan_to_next_token() - - # Remove obsolete possible simple keys. - self.stale_possible_simple_keys() - - # Compare the current indentation and column. It may add some tokens - # and decrease the current indentation level. - self.unwind_indent(self.column) - - # Peek the next character. - ch = self.peek() - - # Is it the end of stream? - if ch == u'\0': - return self.fetch_stream_end() - - # Is it a directive? - if ch == u'%' and self.check_directive(): - return self.fetch_directive() - - # Is it the document start? - if ch == u'-' and self.check_document_start(): - return self.fetch_document_start() - - # Is it the document end? - if ch == u'.' and self.check_document_end(): - return self.fetch_document_end() - - # TODO: support for BOM within a stream. - #if ch == u'\uFEFF': - # return self.fetch_bom() <-- issue BOMToken - - # Note: the order of the following checks is NOT significant. - - # Is it the flow sequence start indicator? - if ch == u'[': - return self.fetch_flow_sequence_start() - - # Is it the flow mapping start indicator? - if ch == u'{': - return self.fetch_flow_mapping_start() - - # Is it the flow sequence end indicator? - if ch == u']': - return self.fetch_flow_sequence_end() - - # Is it the flow mapping end indicator? - if ch == u'}': - return self.fetch_flow_mapping_end() - - # Is it the flow entry indicator? - if ch == u',': - return self.fetch_flow_entry() - - # Is it the block entry indicator? - if ch == u'-' and self.check_block_entry(): - return self.fetch_block_entry() - - # Is it the key indicator? - if ch == u'?' and self.check_key(): - return self.fetch_key() - - # Is it the value indicator? - if ch == u':' and self.check_value(): - return self.fetch_value() - - # Is it an alias? - if ch == u'*': - return self.fetch_alias() - - # Is it an anchor? - if ch == u'&': - return self.fetch_anchor() - - # Is it a tag? - if ch == u'!': - return self.fetch_tag() - - # Is it a literal scalar? - if ch == u'|' and not self.flow_level: - return self.fetch_literal() - - # Is it a folded scalar? - if ch == u'>' and not self.flow_level: - return self.fetch_folded() - - # Is it a single quoted scalar? - if ch == u'\'': - return self.fetch_single() - - # Is it a double quoted scalar? - if ch == u'\"': - return self.fetch_double() - - # It must be a plain scalar then. - if self.check_plain(): - return self.fetch_plain() - - # No? It's an error. Let's produce a nice error message. - raise ScannerError("while scanning for the next token", None, - "found character %r that cannot start any token" - % ch.encode('utf-8'), self.get_mark()) - - # Simple keys treatment. - - def next_possible_simple_key(self): - # Return the number of the nearest possible simple key. Actually we - # don't need to loop through the whole dictionary. We may replace it - # with the following code: - # if not self.possible_simple_keys: - # return None - # return self.possible_simple_keys[ - # min(self.possible_simple_keys.keys())].token_number - min_token_number = None - for level in self.possible_simple_keys: - key = self.possible_simple_keys[level] - if min_token_number is None or key.token_number < min_token_number: - min_token_number = key.token_number - return min_token_number - - def stale_possible_simple_keys(self): - # Remove entries that are no longer possible simple keys. According to - # the YAML specification, simple keys - # - should be limited to a single line, - # - should be no longer than 1024 characters. - # Disabling this procedure will allow simple keys of any length and - # height (may cause problems if indentation is broken though). - for level in self.possible_simple_keys.keys(): - key = self.possible_simple_keys[level] - if key.line != self.line \ - or self.index-key.index > 1024: - if key.required: - raise ScannerError("while scanning a simple key", key.mark, - "could not found expected ':'", self.get_mark()) - del self.possible_simple_keys[level] - - def save_possible_simple_key(self): - # The next token may start a simple key. We check if it's possible - # and save its position. This function is called for - # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. - - # Check if a simple key is required at the current position. - required = not self.flow_level and self.indent == self.column - - # A simple key is required only if it is the first token in the current - # line. Therefore it is always allowed. - assert self.allow_simple_key or not required - - # The next token might be a simple key. Let's save it's number and - # position. - if self.allow_simple_key: - self.remove_possible_simple_key() - token_number = self.tokens_taken+len(self.tokens) - key = SimpleKey(token_number, required, - self.index, self.line, self.column, self.get_mark()) - self.possible_simple_keys[self.flow_level] = key - - def remove_possible_simple_key(self): - # Remove the saved possible key position at the current flow level. - if self.flow_level in self.possible_simple_keys: - key = self.possible_simple_keys[self.flow_level] - - if key.required: - raise ScannerError("while scanning a simple key", key.mark, - "could not found expected ':'", self.get_mark()) - - del self.possible_simple_keys[self.flow_level] - - # Indentation functions. - - def unwind_indent(self, column): - - ## In flow context, tokens should respect indentation. - ## Actually the condition should be `self.indent >= column` according to - ## the spec. But this condition will prohibit intuitively correct - ## constructions such as - ## key : { - ## } - #if self.flow_level and self.indent > column: - # raise ScannerError(None, None, - # "invalid intendation or unclosed '[' or '{'", - # self.get_mark()) - - # In the flow context, indentation is ignored. We make the scanner less - # restrictive then specification requires. - if self.flow_level: - return - - # In block context, we may need to issue the BLOCK-END tokens. - while self.indent > column: - mark = self.get_mark() - self.indent = self.indents.pop() - self.tokens.append(BlockEndToken(mark, mark)) - - def add_indent(self, column): - # Check if we need to increase indentation. - if self.indent < column: - self.indents.append(self.indent) - self.indent = column - return True - return False - - # Fetchers. - - def fetch_stream_start(self): - # We always add STREAM-START as the first token and STREAM-END as the - # last token. - - # Read the token. - mark = self.get_mark() - - # Add STREAM-START. - self.tokens.append(StreamStartToken(mark, mark, - encoding=self.encoding)) - - - def fetch_stream_end(self): - - # Set the current intendation to -1. - self.unwind_indent(-1) - - # Reset simple keys. - self.remove_possible_simple_key() - self.allow_simple_key = False - self.possible_simple_keys = {} - - # Read the token. - mark = self.get_mark() - - # Add STREAM-END. - self.tokens.append(StreamEndToken(mark, mark)) - - # The steam is finished. - self.done = True - - def fetch_directive(self): - - # Set the current intendation to -1. - self.unwind_indent(-1) - - # Reset simple keys. - self.remove_possible_simple_key() - self.allow_simple_key = False - - # Scan and add DIRECTIVE. - self.tokens.append(self.scan_directive()) - - def fetch_document_start(self): - self.fetch_document_indicator(DocumentStartToken) - - def fetch_document_end(self): - self.fetch_document_indicator(DocumentEndToken) - - def fetch_document_indicator(self, TokenClass): - - # Set the current intendation to -1. - self.unwind_indent(-1) - - # Reset simple keys. Note that there could not be a block collection - # after '---'. - self.remove_possible_simple_key() - self.allow_simple_key = False - - # Add DOCUMENT-START or DOCUMENT-END. - start_mark = self.get_mark() - self.forward(3) - end_mark = self.get_mark() - self.tokens.append(TokenClass(start_mark, end_mark)) - - def fetch_flow_sequence_start(self): - self.fetch_flow_collection_start(FlowSequenceStartToken) - - def fetch_flow_mapping_start(self): - self.fetch_flow_collection_start(FlowMappingStartToken) - - def fetch_flow_collection_start(self, TokenClass): - - # '[' and '{' may start a simple key. - self.save_possible_simple_key() - - # Increase the flow level. - self.flow_level += 1 - - # Simple keys are allowed after '[' and '{'. - self.allow_simple_key = True - - # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(TokenClass(start_mark, end_mark)) - - def fetch_flow_sequence_end(self): - self.fetch_flow_collection_end(FlowSequenceEndToken) - - def fetch_flow_mapping_end(self): - self.fetch_flow_collection_end(FlowMappingEndToken) - - def fetch_flow_collection_end(self, TokenClass): - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Decrease the flow level. - self.flow_level -= 1 - - # No simple keys after ']' or '}'. - self.allow_simple_key = False - - # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(TokenClass(start_mark, end_mark)) - - def fetch_flow_entry(self): - - # Simple keys are allowed after ','. - self.allow_simple_key = True - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Add FLOW-ENTRY. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(FlowEntryToken(start_mark, end_mark)) - - def fetch_block_entry(self): - - # Block context needs additional checks. - if not self.flow_level: - - # Are we allowed to start a new entry? - if not self.allow_simple_key: - raise ScannerError(None, None, - "sequence entries are not allowed here", - self.get_mark()) - - # We may need to add BLOCK-SEQUENCE-START. - if self.add_indent(self.column): - mark = self.get_mark() - self.tokens.append(BlockSequenceStartToken(mark, mark)) - - # It's an error for the block entry to occur in the flow context, - # but we let the parser detect this. - else: - pass - - # Simple keys are allowed after '-'. - self.allow_simple_key = True - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Add BLOCK-ENTRY. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(BlockEntryToken(start_mark, end_mark)) - - def fetch_key(self): - - # Block context needs additional checks. - if not self.flow_level: - - # Are we allowed to start a key (not nessesary a simple)? - if not self.allow_simple_key: - raise ScannerError(None, None, - "mapping keys are not allowed here", - self.get_mark()) - - # We may need to add BLOCK-MAPPING-START. - if self.add_indent(self.column): - mark = self.get_mark() - self.tokens.append(BlockMappingStartToken(mark, mark)) - - # Simple keys are allowed after '?' in the block context. - self.allow_simple_key = not self.flow_level - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Add KEY. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(KeyToken(start_mark, end_mark)) - - def fetch_value(self): - - # Do we determine a simple key? - if self.flow_level in self.possible_simple_keys: - - # Add KEY. - key = self.possible_simple_keys[self.flow_level] - del self.possible_simple_keys[self.flow_level] - self.tokens.insert(key.token_number-self.tokens_taken, - KeyToken(key.mark, key.mark)) - - # If this key starts a new block mapping, we need to add - # BLOCK-MAPPING-START. - if not self.flow_level: - if self.add_indent(key.column): - self.tokens.insert(key.token_number-self.tokens_taken, - BlockMappingStartToken(key.mark, key.mark)) - - # There cannot be two simple keys one after another. - self.allow_simple_key = False - - # It must be a part of a complex key. - else: - - # Block context needs additional checks. - # (Do we really need them? They will be catched by the parser - # anyway.) - if not self.flow_level: - - # We are allowed to start a complex value if and only if - # we can start a simple key. - if not self.allow_simple_key: - raise ScannerError(None, None, - "mapping values are not allowed here", - self.get_mark()) - - # If this value starts a new block mapping, we need to add - # BLOCK-MAPPING-START. It will be detected as an error later by - # the parser. - if not self.flow_level: - if self.add_indent(self.column): - mark = self.get_mark() - self.tokens.append(BlockMappingStartToken(mark, mark)) - - # Simple keys are allowed after ':' in the block context. - self.allow_simple_key = not self.flow_level - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Add VALUE. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(ValueToken(start_mark, end_mark)) - - def fetch_alias(self): - - # ALIAS could be a simple key. - self.save_possible_simple_key() - - # No simple keys after ALIAS. - self.allow_simple_key = False - - # Scan and add ALIAS. - self.tokens.append(self.scan_anchor(AliasToken)) - - def fetch_anchor(self): - - # ANCHOR could start a simple key. - self.save_possible_simple_key() - - # No simple keys after ANCHOR. - self.allow_simple_key = False - - # Scan and add ANCHOR. - self.tokens.append(self.scan_anchor(AnchorToken)) - - def fetch_tag(self): - - # TAG could start a simple key. - self.save_possible_simple_key() - - # No simple keys after TAG. - self.allow_simple_key = False - - # Scan and add TAG. - self.tokens.append(self.scan_tag()) - - def fetch_literal(self): - self.fetch_block_scalar(style='|') - - def fetch_folded(self): - self.fetch_block_scalar(style='>') - - def fetch_block_scalar(self, style): - - # A simple key may follow a block scalar. - self.allow_simple_key = True - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Scan and add SCALAR. - self.tokens.append(self.scan_block_scalar(style)) - - def fetch_single(self): - self.fetch_flow_scalar(style='\'') - - def fetch_double(self): - self.fetch_flow_scalar(style='"') - - def fetch_flow_scalar(self, style): - - # A flow scalar could be a simple key. - self.save_possible_simple_key() - - # No simple keys after flow scalars. - self.allow_simple_key = False - - # Scan and add SCALAR. - self.tokens.append(self.scan_flow_scalar(style)) - - def fetch_plain(self): - - # A plain scalar could be a simple key. - self.save_possible_simple_key() - - # No simple keys after plain scalars. But note that `scan_plain` will - # change this flag if the scan is finished at the beginning of the - # line. - self.allow_simple_key = False - - # Scan and add SCALAR. May change `allow_simple_key`. - self.tokens.append(self.scan_plain()) - - # Checkers. - - def check_directive(self): - - # DIRECTIVE: ^ '%' ... - # The '%' indicator is already checked. - if self.column == 0: - return True - - def check_document_start(self): - - # DOCUMENT-START: ^ '---' (' '|'\n') - if self.column == 0: - if self.prefix(3) == u'---' \ - and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': - return True - - def check_document_end(self): - - # DOCUMENT-END: ^ '...' (' '|'\n') - if self.column == 0: - if self.prefix(3) == u'...' \ - and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': - return True - - def check_block_entry(self): - - # BLOCK-ENTRY: '-' (' '|'\n') - return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' - - def check_key(self): - - # KEY(flow context): '?' - if self.flow_level: - return True - - # KEY(block context): '?' (' '|'\n') - else: - return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' - - def check_value(self): - - # VALUE(flow context): ':' - if self.flow_level: - return True - - # VALUE(block context): ':' (' '|'\n') - else: - return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' - - def check_plain(self): - - # A plain scalar may start with any non-space character except: - # '-', '?', ':', ',', '[', ']', '{', '}', - # '#', '&', '*', '!', '|', '>', '\'', '\"', - # '%', '@', '`'. - # - # It may also start with - # '-', '?', ':' - # if it is followed by a non-space character. - # - # Note that we limit the last rule to the block context (except the - # '-' character) because we want the flow context to be space - # independent. - ch = self.peek() - return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ - or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029' - and (ch == u'-' or (not self.flow_level and ch in u'?:'))) - - # Scanners. - - def scan_to_next_token(self): - # We ignore spaces, line breaks and comments. - # If we find a line break in the block context, we set the flag - # `allow_simple_key` on. - # The byte order mark is stripped if it's the first character in the - # stream. We do not yet support BOM inside the stream as the - # specification requires. Any such mark will be considered as a part - # of the document. - # - # TODO: We need to make tab handling rules more sane. A good rule is - # Tabs cannot precede tokens - # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, - # KEY(block), VALUE(block), BLOCK-ENTRY - # So the checking code is - # if <TAB>: - # self.allow_simple_keys = False - # We also need to add the check for `allow_simple_keys == True` to - # `unwind_indent` before issuing BLOCK-END. - # Scanners for block, flow, and plain scalars need to be modified. - - if self.index == 0 and self.peek() == u'\uFEFF': - self.forward() - found = False - while not found: - while self.peek() == u' ': - self.forward() - if self.peek() == u'#': - while self.peek() not in u'\0\r\n\x85\u2028\u2029': - self.forward() - if self.scan_line_break(): - if not self.flow_level: - self.allow_simple_key = True - else: - found = True - - def scan_directive(self): - # See the specification for details. - start_mark = self.get_mark() - self.forward() - name = self.scan_directive_name(start_mark) - value = None - if name == u'YAML': - value = self.scan_yaml_directive_value(start_mark) - end_mark = self.get_mark() - elif name == u'TAG': - value = self.scan_tag_directive_value(start_mark) - end_mark = self.get_mark() - else: - end_mark = self.get_mark() - while self.peek() not in u'\0\r\n\x85\u2028\u2029': - self.forward() - self.scan_directive_ignored_line(start_mark) - return DirectiveToken(name, value, start_mark, end_mark) - - def scan_directive_name(self, start_mark): - # See the specification for details. - length = 0 - ch = self.peek(length) - while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ - or ch in u'-_': - length += 1 - ch = self.peek(length) - if not length: - raise ScannerError("while scanning a directive", start_mark, - "expected alphabetic or numeric character, but found %r" - % ch.encode('utf-8'), self.get_mark()) - value = self.prefix(length) - self.forward(length) - ch = self.peek() - if ch not in u'\0 \r\n\x85\u2028\u2029': - raise ScannerError("while scanning a directive", start_mark, - "expected alphabetic or numeric character, but found %r" - % ch.encode('utf-8'), self.get_mark()) - return value - - def scan_yaml_directive_value(self, start_mark): - # See the specification for details. - while self.peek() == u' ': - self.forward() - major = self.scan_yaml_directive_number(start_mark) - if self.peek() != '.': - raise ScannerError("while scanning a directive", start_mark, - "expected a digit or '.', but found %r" - % self.peek().encode('utf-8'), - self.get_mark()) - self.forward() - minor = self.scan_yaml_directive_number(start_mark) - if self.peek() not in u'\0 \r\n\x85\u2028\u2029': - raise ScannerError("while scanning a directive", start_mark, - "expected a digit or ' ', but found %r" - % self.peek().encode('utf-8'), - self.get_mark()) - return (major, minor) - - def scan_yaml_directive_number(self, start_mark): - # See the specification for details. - ch = self.peek() - if not (u'0' <= ch <= u'9'): - raise ScannerError("while scanning a directive", start_mark, - "expected a digit, but found %r" % ch.encode('utf-8'), - self.get_mark()) - length = 0 - while u'0' <= self.peek(length) <= u'9': - length += 1 - value = int(self.prefix(length)) - self.forward(length) - return value - - def scan_tag_directive_value(self, start_mark): - # See the specification for details. - while self.peek() == u' ': - self.forward() - handle = self.scan_tag_directive_handle(start_mark) - while self.peek() == u' ': - self.forward() - prefix = self.scan_tag_directive_prefix(start_mark) - return (handle, prefix) - - def scan_tag_directive_handle(self, start_mark): - # See the specification for details. - value = self.scan_tag_handle('directive', start_mark) - ch = self.peek() - if ch != u' ': - raise ScannerError("while scanning a directive", start_mark, - "expected ' ', but found %r" % ch.encode('utf-8'), - self.get_mark()) - return value - - def scan_tag_directive_prefix(self, start_mark): - # See the specification for details. - value = self.scan_tag_uri('directive', start_mark) - ch = self.peek() - if ch not in u'\0 \r\n\x85\u2028\u2029': - raise ScannerError("while scanning a directive", start_mark, - "expected ' ', but found %r" % ch.encode('utf-8'), - self.get_mark()) - return value - - def scan_directive_ignored_line(self, start_mark): - # See the specification for details. - while self.peek() == u' ': - self.forward() - if self.peek() == u'#': - while self.peek() not in u'\0\r\n\x85\u2028\u2029': - self.forward() - ch = self.peek() - if ch not in u'\0\r\n\x85\u2028\u2029': - raise ScannerError("while scanning a directive", start_mark, - "expected a comment or a line break, but found %r" - % ch.encode('utf-8'), self.get_mark()) - self.scan_line_break() - - def scan_anchor(self, TokenClass): - # The specification does not restrict characters for anchors and - # aliases. This may lead to problems, for instance, the document: - # [ *alias, value ] - # can be interpteted in two ways, as - # [ "value" ] - # and - # [ *alias , "value" ] - # Therefore we restrict aliases to numbers and ASCII letters. - start_mark = self.get_mark() - indicator = self.peek() - if indicator == u'*': - name = 'alias' - else: - name = 'anchor' - self.forward() - length = 0 - ch = self.peek(length) - while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ - or ch in u'-_': - length += 1 - ch = self.peek(length) - if not length: - raise ScannerError("while scanning an %s" % name, start_mark, - "expected alphabetic or numeric character, but found %r" - % ch.encode('utf-8'), self.get_mark()) - value = self.prefix(length) - self.forward(length) - ch = self.peek() - if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`': - raise ScannerError("while scanning an %s" % name, start_mark, - "expected alphabetic or numeric character, but found %r" - % ch.encode('utf-8'), self.get_mark()) - end_mark = self.get_mark() - return TokenClass(value, start_mark, end_mark) - - def scan_tag(self): - # See the specification for details. - start_mark = self.get_mark() - ch = self.peek(1) - if ch == u'<': - handle = None - self.forward(2) - suffix = self.scan_tag_uri('tag', start_mark) - if self.peek() != u'>': - raise ScannerError("while parsing a tag", start_mark, - "expected '>', but found %r" % self.peek().encode('utf-8'), - self.get_mark()) - self.forward() - elif ch in u'\0 \t\r\n\x85\u2028\u2029': - handle = None - suffix = u'!' - self.forward() - else: - length = 1 - use_handle = False - while ch not in u'\0 \r\n\x85\u2028\u2029': - if ch == u'!': - use_handle = True - break - length += 1 - ch = self.peek(length) - handle = u'!' - if use_handle: - handle = self.scan_tag_handle('tag', start_mark) - else: - handle = u'!' - self.forward() - suffix = self.scan_tag_uri('tag', start_mark) - ch = self.peek() - if ch not in u'\0 \r\n\x85\u2028\u2029': - raise ScannerError("while scanning a tag", start_mark, - "expected ' ', but found %r" % ch.encode('utf-8'), - self.get_mark()) - value = (handle, suffix) - end_mark = self.get_mark() - return TagToken(value, start_mark, end_mark) - - def scan_block_scalar(self, style): - # See the specification for details. - - if style == '>': - folded = True - else: - folded = False - - chunks = [] - start_mark = self.get_mark() - - # Scan the header. - self.forward() - chomping, increment = self.scan_block_scalar_indicators(start_mark) - self.scan_block_scalar_ignored_line(start_mark) - - # Determine the indentation level and go to the first non-empty line. - min_indent = self.indent+1 - if min_indent < 1: - min_indent = 1 - if increment is None: - breaks, max_indent, end_mark = self.scan_block_scalar_indentation() - indent = max(min_indent, max_indent) - else: - indent = min_indent+increment-1 - breaks, end_mark = self.scan_block_scalar_breaks(indent) - line_break = u'' - - # Scan the inner part of the block scalar. - while self.column == indent and self.peek() != u'\0': - chunks.extend(breaks) - leading_non_space = self.peek() not in u' \t' - length = 0 - while self.peek(length) not in u'\0\r\n\x85\u2028\u2029': - length += 1 - chunks.append(self.prefix(length)) - self.forward(length) - line_break = self.scan_line_break() - breaks, end_mark = self.scan_block_scalar_breaks(indent) - if self.column == indent and self.peek() != u'\0': - - # Unfortunately, folding rules are ambiguous. - # - # This is the folding according to the specification: - - if folded and line_break == u'\n' \ - and leading_non_space and self.peek() not in u' \t': - if not breaks: - chunks.append(u' ') - else: - chunks.append(line_break) - - # This is Clark Evans's interpretation (also in the spec - # examples): - # - #if folded and line_break == u'\n': - # if not breaks: - # if self.peek() not in ' \t': - # chunks.append(u' ') - # else: - # chunks.append(line_break) - #else: - # chunks.append(line_break) - else: - break - - # Chomp the tail. - if chomping is not False: - chunks.append(line_break) - if chomping is True: - chunks.extend(breaks) - - # We are done. - return ScalarToken(u''.join(chunks), False, start_mark, end_mark, - style) - - def scan_block_scalar_indicators(self, start_mark): - # See the specification for details. - chomping = None - increment = None - ch = self.peek() - if ch in u'+-': - if ch == '+': - chomping = True - else: - chomping = False - self.forward() - ch = self.peek() - if ch in u'0123456789': - increment = int(ch) - if increment == 0: - raise ScannerError("while scanning a block scalar", start_mark, - "expected indentation indicator in the range 1-9, but found 0", - self.get_mark()) - self.forward() - elif ch in u'0123456789': - increment = int(ch) - if increment == 0: - raise ScannerError("while scanning a block scalar", start_mark, - "expected indentation indicator in the range 1-9, but found 0", - self.get_mark()) - self.forward() - ch = self.peek() - if ch in u'+-': - if ch == '+': - chomping = True - else: - chomping = False - self.forward() - ch = self.peek() - if ch not in u'\0 \r\n\x85\u2028\u2029': - raise ScannerError("while scanning a block scalar", start_mark, - "expected chomping or indentation indicators, but found %r" - % ch.encode('utf-8'), self.get_mark()) - return chomping, increment - - def scan_block_scalar_ignored_line(self, start_mark): - # See the specification for details. - while self.peek() == u' ': - self.forward() - if self.peek() == u'#': - while self.peek() not in u'\0\r\n\x85\u2028\u2029': - self.forward() - ch = self.peek() - if ch not in u'\0\r\n\x85\u2028\u2029': - raise ScannerError("while scanning a block scalar", start_mark, - "expected a comment or a line break, but found %r" - % ch.encode('utf-8'), self.get_mark()) - self.scan_line_break() - - def scan_block_scalar_indentation(self): - # See the specification for details. - chunks = [] - max_indent = 0 - end_mark = self.get_mark() - while self.peek() in u' \r\n\x85\u2028\u2029': - if self.peek() != u' ': - chunks.append(self.scan_line_break()) - end_mark = self.get_mark() - else: - self.forward() - if self.column > max_indent: - max_indent = self.column - return chunks, max_indent, end_mark - - def scan_block_scalar_breaks(self, indent): - # See the specification for details. - chunks = [] - end_mark = self.get_mark() - while self.column < indent and self.peek() == u' ': - self.forward() - while self.peek() in u'\r\n\x85\u2028\u2029': - chunks.append(self.scan_line_break()) - end_mark = self.get_mark() - while self.column < indent and self.peek() == u' ': - self.forward() - return chunks, end_mark - - def scan_flow_scalar(self, style): - # See the specification for details. - # Note that we loose indentation rules for quoted scalars. Quoted - # scalars don't need to adhere indentation because " and ' clearly - # mark the beginning and the end of them. Therefore we are less - # restrictive then the specification requires. We only need to check - # that document separators are not included in scalars. - if style == '"': - double = True - else: - double = False - chunks = [] - start_mark = self.get_mark() - quote = self.peek() - self.forward() - chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) - while self.peek() != quote: - chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) - chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) - self.forward() - end_mark = self.get_mark() - return ScalarToken(u''.join(chunks), False, start_mark, end_mark, - style) - - ESCAPE_REPLACEMENTS = { - u'0': u'\0', - u'a': u'\x07', - u'b': u'\x08', - u't': u'\x09', - u'\t': u'\x09', - u'n': u'\x0A', - u'v': u'\x0B', - u'f': u'\x0C', - u'r': u'\x0D', - u'e': u'\x1B', - u' ': u'\x20', - u'\"': u'\"', - u'\\': u'\\', - u'N': u'\x85', - u'_': u'\xA0', - u'L': u'\u2028', - u'P': u'\u2029', - } - - ESCAPE_CODES = { - u'x': 2, - u'u': 4, - u'U': 8, - } - - def scan_flow_scalar_non_spaces(self, double, start_mark): - # See the specification for details. - chunks = [] - while True: - length = 0 - while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029': - length += 1 - if length: - chunks.append(self.prefix(length)) - self.forward(length) - ch = self.peek() - if not double and ch == u'\'' and self.peek(1) == u'\'': - chunks.append(u'\'') - self.forward(2) - elif (double and ch == u'\'') or (not double and ch in u'\"\\'): - chunks.append(ch) - self.forward() - elif double and ch == u'\\': - self.forward() - ch = self.peek() - if ch in self.ESCAPE_REPLACEMENTS: - chunks.append(self.ESCAPE_REPLACEMENTS[ch]) - self.forward() - elif ch in self.ESCAPE_CODES: - length = self.ESCAPE_CODES[ch] - self.forward() - for k in range(length): - if self.peek(k) not in u'0123456789ABCDEFabcdef': - raise ScannerError("while scanning a double-quoted scalar", start_mark, - "expected escape sequence of %d hexdecimal numbers, but found %r" % - (length, self.peek(k).encode('utf-8')), self.get_mark()) - code = int(self.prefix(length), 16) - chunks.append(unichr(code)) - self.forward(length) - elif ch in u'\r\n\x85\u2028\u2029': - self.scan_line_break() - chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) - else: - raise ScannerError("while scanning a double-quoted scalar", start_mark, - "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark()) - else: - return chunks - - def scan_flow_scalar_spaces(self, double, start_mark): - # See the specification for details. - chunks = [] - length = 0 - while self.peek(length) in u' \t': - length += 1 - whitespaces = self.prefix(length) - self.forward(length) - ch = self.peek() - if ch == u'\0': - raise ScannerError("while scanning a quoted scalar", start_mark, - "found unexpected end of stream", self.get_mark()) - elif ch in u'\r\n\x85\u2028\u2029': - line_break = self.scan_line_break() - breaks = self.scan_flow_scalar_breaks(double, start_mark) - if line_break != u'\n': - chunks.append(line_break) - elif not breaks: - chunks.append(u' ') - chunks.extend(breaks) - else: - chunks.append(whitespaces) - return chunks - - def scan_flow_scalar_breaks(self, double, start_mark): - # See the specification for details. - chunks = [] - while True: - # Instead of checking indentation, we check for document - # separators. - prefix = self.prefix(3) - if (prefix == u'---' or prefix == u'...') \ - and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': - raise ScannerError("while scanning a quoted scalar", start_mark, - "found unexpected document separator", self.get_mark()) - while self.peek() in u' \t': - self.forward() - if self.peek() in u'\r\n\x85\u2028\u2029': - chunks.append(self.scan_line_break()) - else: - return chunks - - def scan_plain(self): - # See the specification for details. - # We add an additional restriction for the flow context: - # plain scalars in the flow context cannot contain ',', ':' and '?'. - # We also keep track of the `allow_simple_key` flag here. - # Indentation rules are loosed for the flow context. - chunks = [] - start_mark = self.get_mark() - end_mark = start_mark - indent = self.indent+1 - # We allow zero indentation for scalars, but then we need to check for - # document separators at the beginning of the line. - #if indent == 0: - # indent = 1 - spaces = [] - while True: - length = 0 - if self.peek() == u'#': - break - while True: - ch = self.peek(length) - if ch in u'\0 \t\r\n\x85\u2028\u2029' \ - or (not self.flow_level and ch == u':' and - self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \ - or (self.flow_level and ch in u',:?[]{}'): - break - length += 1 - # It's not clear what we should do with ':' in the flow context. - if (self.flow_level and ch == u':' - and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'): - self.forward(length) - raise ScannerError("while scanning a plain scalar", start_mark, - "found unexpected ':'", self.get_mark(), - "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.") - if length == 0: - break - self.allow_simple_key = False - chunks.extend(spaces) - chunks.append(self.prefix(length)) - self.forward(length) - end_mark = self.get_mark() - spaces = self.scan_plain_spaces(indent, start_mark) - if not spaces or self.peek() == u'#' \ - or (not self.flow_level and self.column < indent): - break - return ScalarToken(u''.join(chunks), True, start_mark, end_mark) - - def scan_plain_spaces(self, indent, start_mark): - # See the specification for details. - # The specification is really confusing about tabs in plain scalars. - # We just forbid them completely. Do not use tabs in YAML! - chunks = [] - length = 0 - while self.peek(length) in u' ': - length += 1 - whitespaces = self.prefix(length) - self.forward(length) - ch = self.peek() - if ch in u'\r\n\x85\u2028\u2029': - line_break = self.scan_line_break() - self.allow_simple_key = True - prefix = self.prefix(3) - if (prefix == u'---' or prefix == u'...') \ - and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': - return - breaks = [] - while self.peek() in u' \r\n\x85\u2028\u2029': - if self.peek() == ' ': - self.forward() - else: - breaks.append(self.scan_line_break()) - prefix = self.prefix(3) - if (prefix == u'---' or prefix == u'...') \ - and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': - return - if line_break != u'\n': - chunks.append(line_break) - elif not breaks: - chunks.append(u' ') - chunks.extend(breaks) - elif whitespaces: - chunks.append(whitespaces) - return chunks - - def scan_tag_handle(self, name, start_mark): - # See the specification for details. - # For some strange reasons, the specification does not allow '_' in - # tag handles. I have allowed it anyway. - ch = self.peek() - if ch != u'!': - raise ScannerError("while scanning a %s" % name, start_mark, - "expected '!', but found %r" % ch.encode('utf-8'), - self.get_mark()) - length = 1 - ch = self.peek(length) - if ch != u' ': - while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ - or ch in u'-_': - length += 1 - ch = self.peek(length) - if ch != u'!': - self.forward(length) - raise ScannerError("while scanning a %s" % name, start_mark, - "expected '!', but found %r" % ch.encode('utf-8'), - self.get_mark()) - length += 1 - value = self.prefix(length) - self.forward(length) - return value - - def scan_tag_uri(self, name, start_mark): - # See the specification for details. - # Note: we do not check if URI is well-formed. - chunks = [] - length = 0 - ch = self.peek(length) - while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ - or ch in u'-;/?:@&=+$,_.!~*\'()[]%': - if ch == u'%': - chunks.append(self.prefix(length)) - self.forward(length) - length = 0 - chunks.append(self.scan_uri_escapes(name, start_mark)) - else: - length += 1 - ch = self.peek(length) - if length: - chunks.append(self.prefix(length)) - self.forward(length) - length = 0 - if not chunks: - raise ScannerError("while parsing a %s" % name, start_mark, - "expected URI, but found %r" % ch.encode('utf-8'), - self.get_mark()) - return u''.join(chunks) - - def scan_uri_escapes(self, name, start_mark): - # See the specification for details. - bytes = [] - mark = self.get_mark() - while self.peek() == u'%': - self.forward() - for k in range(2): - if self.peek(k) not in u'0123456789ABCDEFabcdef': - raise ScannerError("while scanning a %s" % name, start_mark, - "expected URI escape sequence of 2 hexdecimal numbers, but found %r" % - (self.peek(k).encode('utf-8')), self.get_mark()) - bytes.append(chr(int(self.prefix(2), 16))) - self.forward(2) - try: - value = unicode(''.join(bytes), 'utf-8') - except UnicodeDecodeError, exc: - raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) - return value - - def scan_line_break(self): - # Transforms: - # '\r\n' : '\n' - # '\r' : '\n' - # '\n' : '\n' - # '\x85' : '\n' - # '\u2028' : '\u2028' - # '\u2029 : '\u2029' - # default : '' - ch = self.peek() - if ch in u'\r\n\x85': - if self.prefix(2) == u'\r\n': - self.forward(2) - else: - self.forward() - return u'\n' - elif ch in u'\u2028\u2029': - self.forward() - return ch - return u'' - -#try: -# import psyco -# psyco.bind(Scanner) -#except ImportError: -# pass - diff --git a/python.d/python_modules/pyyaml2/serializer.py b/python.d/python_modules/pyyaml2/serializer.py deleted file mode 100644 index 0bf1e96dc..000000000 --- a/python.d/python_modules/pyyaml2/serializer.py +++ /dev/null @@ -1,111 +0,0 @@ - -__all__ = ['Serializer', 'SerializerError'] - -from error import YAMLError -from events import * -from nodes import * - -class SerializerError(YAMLError): - pass - -class Serializer(object): - - ANCHOR_TEMPLATE = u'id%03d' - - def __init__(self, encoding=None, - explicit_start=None, explicit_end=None, version=None, tags=None): - self.use_encoding = encoding - self.use_explicit_start = explicit_start - self.use_explicit_end = explicit_end - self.use_version = version - self.use_tags = tags - self.serialized_nodes = {} - self.anchors = {} - self.last_anchor_id = 0 - self.closed = None - - def open(self): - if self.closed is None: - self.emit(StreamStartEvent(encoding=self.use_encoding)) - self.closed = False - elif self.closed: - raise SerializerError("serializer is closed") - else: - raise SerializerError("serializer is already opened") - - def close(self): - if self.closed is None: - raise SerializerError("serializer is not opened") - elif not self.closed: - self.emit(StreamEndEvent()) - self.closed = True - - #def __del__(self): - # self.close() - - def serialize(self, node): - if self.closed is None: - raise SerializerError("serializer is not opened") - elif self.closed: - raise SerializerError("serializer is closed") - self.emit(DocumentStartEvent(explicit=self.use_explicit_start, - version=self.use_version, tags=self.use_tags)) - self.anchor_node(node) - self.serialize_node(node, None, None) - self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) - self.serialized_nodes = {} - self.anchors = {} - self.last_anchor_id = 0 - - def anchor_node(self, node): - if node in self.anchors: - if self.anchors[node] is None: - self.anchors[node] = self.generate_anchor(node) - else: - self.anchors[node] = None - if isinstance(node, SequenceNode): - for item in node.value: - self.anchor_node(item) - elif isinstance(node, MappingNode): - for key, value in node.value: - self.anchor_node(key) - self.anchor_node(value) - - def generate_anchor(self, node): - self.last_anchor_id += 1 - return self.ANCHOR_TEMPLATE % self.last_anchor_id - - def serialize_node(self, node, parent, index): - alias = self.anchors[node] - if node in self.serialized_nodes: - self.emit(AliasEvent(alias)) - else: - self.serialized_nodes[node] = True - self.descend_resolver(parent, index) - if isinstance(node, ScalarNode): - detected_tag = self.resolve(ScalarNode, node.value, (True, False)) - default_tag = self.resolve(ScalarNode, node.value, (False, True)) - implicit = (node.tag == detected_tag), (node.tag == default_tag) - self.emit(ScalarEvent(alias, node.tag, implicit, node.value, - style=node.style)) - elif isinstance(node, SequenceNode): - implicit = (node.tag - == self.resolve(SequenceNode, node.value, True)) - self.emit(SequenceStartEvent(alias, node.tag, implicit, - flow_style=node.flow_style)) - index = 0 - for item in node.value: - self.serialize_node(item, node, index) - index += 1 - self.emit(SequenceEndEvent()) - elif isinstance(node, MappingNode): - implicit = (node.tag - == self.resolve(MappingNode, node.value, True)) - self.emit(MappingStartEvent(alias, node.tag, implicit, - flow_style=node.flow_style)) - for key, value in node.value: - self.serialize_node(key, node, None) - self.serialize_node(value, node, key) - self.emit(MappingEndEvent()) - self.ascend_resolver() - diff --git a/python.d/python_modules/pyyaml2/tokens.py b/python.d/python_modules/pyyaml2/tokens.py deleted file mode 100644 index 4d0b48a39..000000000 --- a/python.d/python_modules/pyyaml2/tokens.py +++ /dev/null @@ -1,104 +0,0 @@ - -class Token(object): - def __init__(self, start_mark, end_mark): - self.start_mark = start_mark - self.end_mark = end_mark - def __repr__(self): - attributes = [key for key in self.__dict__ - if not key.endswith('_mark')] - attributes.sort() - arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) - for key in attributes]) - return '%s(%s)' % (self.__class__.__name__, arguments) - -#class BOMToken(Token): -# id = '<byte order mark>' - -class DirectiveToken(Token): - id = '<directive>' - def __init__(self, name, value, start_mark, end_mark): - self.name = name - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - -class DocumentStartToken(Token): - id = '<document start>' - -class DocumentEndToken(Token): - id = '<document end>' - -class StreamStartToken(Token): - id = '<stream start>' - def __init__(self, start_mark=None, end_mark=None, - encoding=None): - self.start_mark = start_mark - self.end_mark = end_mark - self.encoding = encoding - -class StreamEndToken(Token): - id = '<stream end>' - -class BlockSequenceStartToken(Token): - id = '<block sequence start>' - -class BlockMappingStartToken(Token): - id = '<block mapping start>' - -class BlockEndToken(Token): - id = '<block end>' - -class FlowSequenceStartToken(Token): - id = '[' - -class FlowMappingStartToken(Token): - id = '{' - -class FlowSequenceEndToken(Token): - id = ']' - -class FlowMappingEndToken(Token): - id = '}' - -class KeyToken(Token): - id = '?' - -class ValueToken(Token): - id = ':' - -class BlockEntryToken(Token): - id = '-' - -class FlowEntryToken(Token): - id = ',' - -class AliasToken(Token): - id = '<alias>' - def __init__(self, value, start_mark, end_mark): - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - -class AnchorToken(Token): - id = '<anchor>' - def __init__(self, value, start_mark, end_mark): - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - -class TagToken(Token): - id = '<tag>' - def __init__(self, value, start_mark, end_mark): - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - -class ScalarToken(Token): - id = '<scalar>' - def __init__(self, value, plain, start_mark, end_mark, style=None): - self.value = value - self.plain = plain - self.start_mark = start_mark - self.end_mark = end_mark - self.style = style - diff --git a/python.d/python_modules/pyyaml3/__init__.py b/python.d/python_modules/pyyaml3/__init__.py deleted file mode 100644 index a5e20f94d..000000000 --- a/python.d/python_modules/pyyaml3/__init__.py +++ /dev/null @@ -1,312 +0,0 @@ - -from .error import * - -from .tokens import * -from .events import * -from .nodes import * - -from .loader import * -from .dumper import * - -__version__ = '3.11' -try: - from .cyaml import * - __with_libyaml__ = True -except ImportError: - __with_libyaml__ = False - -import io - -def scan(stream, Loader=Loader): - """ - Scan a YAML stream and produce scanning tokens. - """ - loader = Loader(stream) - try: - while loader.check_token(): - yield loader.get_token() - finally: - loader.dispose() - -def parse(stream, Loader=Loader): - """ - Parse a YAML stream and produce parsing events. - """ - loader = Loader(stream) - try: - while loader.check_event(): - yield loader.get_event() - finally: - loader.dispose() - -def compose(stream, Loader=Loader): - """ - Parse the first YAML document in a stream - and produce the corresponding representation tree. - """ - loader = Loader(stream) - try: - return loader.get_single_node() - finally: - loader.dispose() - -def compose_all(stream, Loader=Loader): - """ - Parse all YAML documents in a stream - and produce corresponding representation trees. - """ - loader = Loader(stream) - try: - while loader.check_node(): - yield loader.get_node() - finally: - loader.dispose() - -def load(stream, Loader=Loader): - """ - Parse the first YAML document in a stream - and produce the corresponding Python object. - """ - loader = Loader(stream) - try: - return loader.get_single_data() - finally: - loader.dispose() - -def load_all(stream, Loader=Loader): - """ - Parse all YAML documents in a stream - and produce corresponding Python objects. - """ - loader = Loader(stream) - try: - while loader.check_data(): - yield loader.get_data() - finally: - loader.dispose() - -def safe_load(stream): - """ - Parse the first YAML document in a stream - and produce the corresponding Python object. - Resolve only basic YAML tags. - """ - return load(stream, SafeLoader) - -def safe_load_all(stream): - """ - Parse all YAML documents in a stream - and produce corresponding Python objects. - Resolve only basic YAML tags. - """ - return load_all(stream, SafeLoader) - -def emit(events, stream=None, Dumper=Dumper, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None): - """ - Emit YAML parsing events into a stream. - If stream is None, return the produced string instead. - """ - getvalue = None - if stream is None: - stream = io.StringIO() - getvalue = stream.getvalue - dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break) - try: - for event in events: - dumper.emit(event) - finally: - dumper.dispose() - if getvalue: - return getvalue() - -def serialize_all(nodes, stream=None, Dumper=Dumper, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - """ - Serialize a sequence of representation trees into a YAML stream. - If stream is None, return the produced string instead. - """ - getvalue = None - if stream is None: - if encoding is None: - stream = io.StringIO() - else: - stream = io.BytesIO() - getvalue = stream.getvalue - dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break, - encoding=encoding, version=version, tags=tags, - explicit_start=explicit_start, explicit_end=explicit_end) - try: - dumper.open() - for node in nodes: - dumper.serialize(node) - dumper.close() - finally: - dumper.dispose() - if getvalue: - return getvalue() - -def serialize(node, stream=None, Dumper=Dumper, **kwds): - """ - Serialize a representation tree into a YAML stream. - If stream is None, return the produced string instead. - """ - return serialize_all([node], stream, Dumper=Dumper, **kwds) - -def dump_all(documents, stream=None, Dumper=Dumper, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - """ - Serialize a sequence of Python objects into a YAML stream. - If stream is None, return the produced string instead. - """ - getvalue = None - if stream is None: - if encoding is None: - stream = io.StringIO() - else: - stream = io.BytesIO() - getvalue = stream.getvalue - dumper = Dumper(stream, default_style=default_style, - default_flow_style=default_flow_style, - canonical=canonical, indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break, - encoding=encoding, version=version, tags=tags, - explicit_start=explicit_start, explicit_end=explicit_end) - try: - dumper.open() - for data in documents: - dumper.represent(data) - dumper.close() - finally: - dumper.dispose() - if getvalue: - return getvalue() - -def dump(data, stream=None, Dumper=Dumper, **kwds): - """ - Serialize a Python object into a YAML stream. - If stream is None, return the produced string instead. - """ - return dump_all([data], stream, Dumper=Dumper, **kwds) - -def safe_dump_all(documents, stream=None, **kwds): - """ - Serialize a sequence of Python objects into a YAML stream. - Produce only basic YAML tags. - If stream is None, return the produced string instead. - """ - return dump_all(documents, stream, Dumper=SafeDumper, **kwds) - -def safe_dump(data, stream=None, **kwds): - """ - Serialize a Python object into a YAML stream. - Produce only basic YAML tags. - If stream is None, return the produced string instead. - """ - return dump_all([data], stream, Dumper=SafeDumper, **kwds) - -def add_implicit_resolver(tag, regexp, first=None, - Loader=Loader, Dumper=Dumper): - """ - Add an implicit scalar detector. - If an implicit scalar value matches the given regexp, - the corresponding tag is assigned to the scalar. - first is a sequence of possible initial characters or None. - """ - Loader.add_implicit_resolver(tag, regexp, first) - Dumper.add_implicit_resolver(tag, regexp, first) - -def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): - """ - Add a path based resolver for the given tag. - A path is a list of keys that forms a path - to a node in the representation tree. - Keys can be string values, integers, or None. - """ - Loader.add_path_resolver(tag, path, kind) - Dumper.add_path_resolver(tag, path, kind) - -def add_constructor(tag, constructor, Loader=Loader): - """ - Add a constructor for the given tag. - Constructor is a function that accepts a Loader instance - and a node object and produces the corresponding Python object. - """ - Loader.add_constructor(tag, constructor) - -def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader): - """ - Add a multi-constructor for the given tag prefix. - Multi-constructor is called for a node if its tag starts with tag_prefix. - Multi-constructor accepts a Loader instance, a tag suffix, - and a node object and produces the corresponding Python object. - """ - Loader.add_multi_constructor(tag_prefix, multi_constructor) - -def add_representer(data_type, representer, Dumper=Dumper): - """ - Add a representer for the given type. - Representer is a function accepting a Dumper instance - and an instance of the given data type - and producing the corresponding representation node. - """ - Dumper.add_representer(data_type, representer) - -def add_multi_representer(data_type, multi_representer, Dumper=Dumper): - """ - Add a representer for the given type. - Multi-representer is a function accepting a Dumper instance - and an instance of the given data type or subtype - and producing the corresponding representation node. - """ - Dumper.add_multi_representer(data_type, multi_representer) - -class YAMLObjectMetaclass(type): - """ - The metaclass for YAMLObject. - """ - def __init__(cls, name, bases, kwds): - super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) - if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: - cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) - cls.yaml_dumper.add_representer(cls, cls.to_yaml) - -class YAMLObject(metaclass=YAMLObjectMetaclass): - """ - An object that can dump itself to a YAML stream - and load itself from a YAML stream. - """ - - __slots__ = () # no direct instantiation, so allow immutable subclasses - - yaml_loader = Loader - yaml_dumper = Dumper - - yaml_tag = None - yaml_flow_style = None - - @classmethod - def from_yaml(cls, loader, node): - """ - Convert a representation node to a Python object. - """ - return loader.construct_yaml_object(node, cls) - - @classmethod - def to_yaml(cls, dumper, data): - """ - Convert a Python object to a representation node. - """ - return dumper.represent_yaml_object(cls.yaml_tag, data, cls, - flow_style=cls.yaml_flow_style) - diff --git a/python.d/python_modules/pyyaml3/composer.py b/python.d/python_modules/pyyaml3/composer.py deleted file mode 100644 index d5c6a7acd..000000000 --- a/python.d/python_modules/pyyaml3/composer.py +++ /dev/null @@ -1,139 +0,0 @@ - -__all__ = ['Composer', 'ComposerError'] - -from .error import MarkedYAMLError -from .events import * -from .nodes import * - -class ComposerError(MarkedYAMLError): - pass - -class Composer: - - def __init__(self): - self.anchors = {} - - def check_node(self): - # Drop the STREAM-START event. - if self.check_event(StreamStartEvent): - self.get_event() - - # If there are more documents available? - return not self.check_event(StreamEndEvent) - - def get_node(self): - # Get the root node of the next document. - if not self.check_event(StreamEndEvent): - return self.compose_document() - - def get_single_node(self): - # Drop the STREAM-START event. - self.get_event() - - # Compose a document if the stream is not empty. - document = None - if not self.check_event(StreamEndEvent): - document = self.compose_document() - - # Ensure that the stream contains no more documents. - if not self.check_event(StreamEndEvent): - event = self.get_event() - raise ComposerError("expected a single document in the stream", - document.start_mark, "but found another document", - event.start_mark) - - # Drop the STREAM-END event. - self.get_event() - - return document - - def compose_document(self): - # Drop the DOCUMENT-START event. - self.get_event() - - # Compose the root node. - node = self.compose_node(None, None) - - # Drop the DOCUMENT-END event. - self.get_event() - - self.anchors = {} - return node - - def compose_node(self, parent, index): - if self.check_event(AliasEvent): - event = self.get_event() - anchor = event.anchor - if anchor not in self.anchors: - raise ComposerError(None, None, "found undefined alias %r" - % anchor, event.start_mark) - return self.anchors[anchor] - event = self.peek_event() - anchor = event.anchor - if anchor is not None: - if anchor in self.anchors: - raise ComposerError("found duplicate anchor %r; first occurence" - % anchor, self.anchors[anchor].start_mark, - "second occurence", event.start_mark) - self.descend_resolver(parent, index) - if self.check_event(ScalarEvent): - node = self.compose_scalar_node(anchor) - elif self.check_event(SequenceStartEvent): - node = self.compose_sequence_node(anchor) - elif self.check_event(MappingStartEvent): - node = self.compose_mapping_node(anchor) - self.ascend_resolver() - return node - - def compose_scalar_node(self, anchor): - event = self.get_event() - tag = event.tag - if tag is None or tag == '!': - tag = self.resolve(ScalarNode, event.value, event.implicit) - node = ScalarNode(tag, event.value, - event.start_mark, event.end_mark, style=event.style) - if anchor is not None: - self.anchors[anchor] = node - return node - - def compose_sequence_node(self, anchor): - start_event = self.get_event() - tag = start_event.tag - if tag is None or tag == '!': - tag = self.resolve(SequenceNode, None, start_event.implicit) - node = SequenceNode(tag, [], - start_event.start_mark, None, - flow_style=start_event.flow_style) - if anchor is not None: - self.anchors[anchor] = node - index = 0 - while not self.check_event(SequenceEndEvent): - node.value.append(self.compose_node(node, index)) - index += 1 - end_event = self.get_event() - node.end_mark = end_event.end_mark - return node - - def compose_mapping_node(self, anchor): - start_event = self.get_event() - tag = start_event.tag - if tag is None or tag == '!': - tag = self.resolve(MappingNode, None, start_event.implicit) - node = MappingNode(tag, [], - start_event.start_mark, None, - flow_style=start_event.flow_style) - if anchor is not None: - self.anchors[anchor] = node - while not self.check_event(MappingEndEvent): - #key_event = self.peek_event() - item_key = self.compose_node(node, None) - #if item_key in node.value: - # raise ComposerError("while composing a mapping", start_event.start_mark, - # "found duplicate key", key_event.start_mark) - item_value = self.compose_node(node, item_key) - #node.value[item_key] = item_value - node.value.append((item_key, item_value)) - end_event = self.get_event() - node.end_mark = end_event.end_mark - return node - diff --git a/python.d/python_modules/pyyaml3/constructor.py b/python.d/python_modules/pyyaml3/constructor.py deleted file mode 100644 index 981543aeb..000000000 --- a/python.d/python_modules/pyyaml3/constructor.py +++ /dev/null @@ -1,686 +0,0 @@ - -__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', - 'ConstructorError'] - -from .error import * -from .nodes import * - -import collections, datetime, base64, binascii, re, sys, types - -class ConstructorError(MarkedYAMLError): - pass - -class BaseConstructor: - - yaml_constructors = {} - yaml_multi_constructors = {} - - def __init__(self): - self.constructed_objects = {} - self.recursive_objects = {} - self.state_generators = [] - self.deep_construct = False - - def check_data(self): - # If there are more documents available? - return self.check_node() - - def get_data(self): - # Construct and return the next document. - if self.check_node(): - return self.construct_document(self.get_node()) - - def get_single_data(self): - # Ensure that the stream contains a single document and construct it. - node = self.get_single_node() - if node is not None: - return self.construct_document(node) - return None - - def construct_document(self, node): - data = self.construct_object(node) - while self.state_generators: - state_generators = self.state_generators - self.state_generators = [] - for generator in state_generators: - for dummy in generator: - pass - self.constructed_objects = {} - self.recursive_objects = {} - self.deep_construct = False - return data - - def construct_object(self, node, deep=False): - if node in self.constructed_objects: - return self.constructed_objects[node] - if deep: - old_deep = self.deep_construct - self.deep_construct = True - if node in self.recursive_objects: - raise ConstructorError(None, None, - "found unconstructable recursive node", node.start_mark) - self.recursive_objects[node] = None - constructor = None - tag_suffix = None - if node.tag in self.yaml_constructors: - constructor = self.yaml_constructors[node.tag] - else: - for tag_prefix in self.yaml_multi_constructors: - if node.tag.startswith(tag_prefix): - tag_suffix = node.tag[len(tag_prefix):] - constructor = self.yaml_multi_constructors[tag_prefix] - break - else: - if None in self.yaml_multi_constructors: - tag_suffix = node.tag - constructor = self.yaml_multi_constructors[None] - elif None in self.yaml_constructors: - constructor = self.yaml_constructors[None] - elif isinstance(node, ScalarNode): - constructor = self.__class__.construct_scalar - elif isinstance(node, SequenceNode): - constructor = self.__class__.construct_sequence - elif isinstance(node, MappingNode): - constructor = self.__class__.construct_mapping - if tag_suffix is None: - data = constructor(self, node) - else: - data = constructor(self, tag_suffix, node) - if isinstance(data, types.GeneratorType): - generator = data - data = next(generator) - if self.deep_construct: - for dummy in generator: - pass - else: - self.state_generators.append(generator) - self.constructed_objects[node] = data - del self.recursive_objects[node] - if deep: - self.deep_construct = old_deep - return data - - def construct_scalar(self, node): - if not isinstance(node, ScalarNode): - raise ConstructorError(None, None, - "expected a scalar node, but found %s" % node.id, - node.start_mark) - return node.value - - def construct_sequence(self, node, deep=False): - if not isinstance(node, SequenceNode): - raise ConstructorError(None, None, - "expected a sequence node, but found %s" % node.id, - node.start_mark) - return [self.construct_object(child, deep=deep) - for child in node.value] - - def construct_mapping(self, node, deep=False): - if not isinstance(node, MappingNode): - raise ConstructorError(None, None, - "expected a mapping node, but found %s" % node.id, - node.start_mark) - mapping = {} - for key_node, value_node in node.value: - key = self.construct_object(key_node, deep=deep) - if not isinstance(key, collections.Hashable): - raise ConstructorError("while constructing a mapping", node.start_mark, - "found unhashable key", key_node.start_mark) - value = self.construct_object(value_node, deep=deep) - mapping[key] = value - return mapping - - def construct_pairs(self, node, deep=False): - if not isinstance(node, MappingNode): - raise ConstructorError(None, None, - "expected a mapping node, but found %s" % node.id, - node.start_mark) - pairs = [] - for key_node, value_node in node.value: - key = self.construct_object(key_node, deep=deep) - value = self.construct_object(value_node, deep=deep) - pairs.append((key, value)) - return pairs - - @classmethod - def add_constructor(cls, tag, constructor): - if not 'yaml_constructors' in cls.__dict__: - cls.yaml_constructors = cls.yaml_constructors.copy() - cls.yaml_constructors[tag] = constructor - - @classmethod - def add_multi_constructor(cls, tag_prefix, multi_constructor): - if not 'yaml_multi_constructors' in cls.__dict__: - cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() - cls.yaml_multi_constructors[tag_prefix] = multi_constructor - -class SafeConstructor(BaseConstructor): - - def construct_scalar(self, node): - if isinstance(node, MappingNode): - for key_node, value_node in node.value: - if key_node.tag == 'tag:yaml.org,2002:value': - return self.construct_scalar(value_node) - return super().construct_scalar(node) - - def flatten_mapping(self, node): - merge = [] - index = 0 - while index < len(node.value): - key_node, value_node = node.value[index] - if key_node.tag == 'tag:yaml.org,2002:merge': - del node.value[index] - if isinstance(value_node, MappingNode): - self.flatten_mapping(value_node) - merge.extend(value_node.value) - elif isinstance(value_node, SequenceNode): - submerge = [] - for subnode in value_node.value: - if not isinstance(subnode, MappingNode): - raise ConstructorError("while constructing a mapping", - node.start_mark, - "expected a mapping for merging, but found %s" - % subnode.id, subnode.start_mark) - self.flatten_mapping(subnode) - submerge.append(subnode.value) - submerge.reverse() - for value in submerge: - merge.extend(value) - else: - raise ConstructorError("while constructing a mapping", node.start_mark, - "expected a mapping or list of mappings for merging, but found %s" - % value_node.id, value_node.start_mark) - elif key_node.tag == 'tag:yaml.org,2002:value': - key_node.tag = 'tag:yaml.org,2002:str' - index += 1 - else: - index += 1 - if merge: - node.value = merge + node.value - - def construct_mapping(self, node, deep=False): - if isinstance(node, MappingNode): - self.flatten_mapping(node) - return super().construct_mapping(node, deep=deep) - - def construct_yaml_null(self, node): - self.construct_scalar(node) - return None - - bool_values = { - 'yes': True, - 'no': False, - 'true': True, - 'false': False, - 'on': True, - 'off': False, - } - - def construct_yaml_bool(self, node): - value = self.construct_scalar(node) - return self.bool_values[value.lower()] - - def construct_yaml_int(self, node): - value = self.construct_scalar(node) - value = value.replace('_', '') - sign = +1 - if value[0] == '-': - sign = -1 - if value[0] in '+-': - value = value[1:] - if value == '0': - return 0 - elif value.startswith('0b'): - return sign*int(value[2:], 2) - elif value.startswith('0x'): - return sign*int(value[2:], 16) - elif value[0] == '0': - return sign*int(value, 8) - elif ':' in value: - digits = [int(part) for part in value.split(':')] - digits.reverse() - base = 1 - value = 0 - for digit in digits: - value += digit*base - base *= 60 - return sign*value - else: - return sign*int(value) - - inf_value = 1e300 - while inf_value != inf_value*inf_value: - inf_value *= inf_value - nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). - - def construct_yaml_float(self, node): - value = self.construct_scalar(node) - value = value.replace('_', '').lower() - sign = +1 - if value[0] == '-': - sign = -1 - if value[0] in '+-': - value = value[1:] - if value == '.inf': - return sign*self.inf_value - elif value == '.nan': - return self.nan_value - elif ':' in value: - digits = [float(part) for part in value.split(':')] - digits.reverse() - base = 1 - value = 0.0 - for digit in digits: - value += digit*base - base *= 60 - return sign*value - else: - return sign*float(value) - - def construct_yaml_binary(self, node): - try: - value = self.construct_scalar(node).encode('ascii') - except UnicodeEncodeError as exc: - raise ConstructorError(None, None, - "failed to convert base64 data into ascii: %s" % exc, - node.start_mark) - try: - if hasattr(base64, 'decodebytes'): - return base64.decodebytes(value) - else: - return base64.decodestring(value) - except binascii.Error as exc: - raise ConstructorError(None, None, - "failed to decode base64 data: %s" % exc, node.start_mark) - - timestamp_regexp = re.compile( - r'''^(?P<year>[0-9][0-9][0-9][0-9]) - -(?P<month>[0-9][0-9]?) - -(?P<day>[0-9][0-9]?) - (?:(?:[Tt]|[ \t]+) - (?P<hour>[0-9][0-9]?) - :(?P<minute>[0-9][0-9]) - :(?P<second>[0-9][0-9]) - (?:\.(?P<fraction>[0-9]*))? - (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?) - (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X) - - def construct_yaml_timestamp(self, node): - value = self.construct_scalar(node) - match = self.timestamp_regexp.match(node.value) - values = match.groupdict() - year = int(values['year']) - month = int(values['month']) - day = int(values['day']) - if not values['hour']: - return datetime.date(year, month, day) - hour = int(values['hour']) - minute = int(values['minute']) - second = int(values['second']) - fraction = 0 - if values['fraction']: - fraction = values['fraction'][:6] - while len(fraction) < 6: - fraction += '0' - fraction = int(fraction) - delta = None - if values['tz_sign']: - tz_hour = int(values['tz_hour']) - tz_minute = int(values['tz_minute'] or 0) - delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) - if values['tz_sign'] == '-': - delta = -delta - data = datetime.datetime(year, month, day, hour, minute, second, fraction) - if delta: - data -= delta - return data - - def construct_yaml_omap(self, node): - # Note: we do not check for duplicate keys, because it's too - # CPU-expensive. - omap = [] - yield omap - if not isinstance(node, SequenceNode): - raise ConstructorError("while constructing an ordered map", node.start_mark, - "expected a sequence, but found %s" % node.id, node.start_mark) - for subnode in node.value: - if not isinstance(subnode, MappingNode): - raise ConstructorError("while constructing an ordered map", node.start_mark, - "expected a mapping of length 1, but found %s" % subnode.id, - subnode.start_mark) - if len(subnode.value) != 1: - raise ConstructorError("while constructing an ordered map", node.start_mark, - "expected a single mapping item, but found %d items" % len(subnode.value), - subnode.start_mark) - key_node, value_node = subnode.value[0] - key = self.construct_object(key_node) - value = self.construct_object(value_node) - omap.append((key, value)) - - def construct_yaml_pairs(self, node): - # Note: the same code as `construct_yaml_omap`. - pairs = [] - yield pairs - if not isinstance(node, SequenceNode): - raise ConstructorError("while constructing pairs", node.start_mark, - "expected a sequence, but found %s" % node.id, node.start_mark) - for subnode in node.value: - if not isinstance(subnode, MappingNode): - raise ConstructorError("while constructing pairs", node.start_mark, - "expected a mapping of length 1, but found %s" % subnode.id, - subnode.start_mark) - if len(subnode.value) != 1: - raise ConstructorError("while constructing pairs", node.start_mark, - "expected a single mapping item, but found %d items" % len(subnode.value), - subnode.start_mark) - key_node, value_node = subnode.value[0] - key = self.construct_object(key_node) - value = self.construct_object(value_node) - pairs.append((key, value)) - - def construct_yaml_set(self, node): - data = set() - yield data - value = self.construct_mapping(node) - data.update(value) - - def construct_yaml_str(self, node): - return self.construct_scalar(node) - - def construct_yaml_seq(self, node): - data = [] - yield data - data.extend(self.construct_sequence(node)) - - def construct_yaml_map(self, node): - data = {} - yield data - value = self.construct_mapping(node) - data.update(value) - - def construct_yaml_object(self, node, cls): - data = cls.__new__(cls) - yield data - if hasattr(data, '__setstate__'): - state = self.construct_mapping(node, deep=True) - data.__setstate__(state) - else: - state = self.construct_mapping(node) - data.__dict__.update(state) - - def construct_undefined(self, node): - raise ConstructorError(None, None, - "could not determine a constructor for the tag %r" % node.tag, - node.start_mark) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:null', - SafeConstructor.construct_yaml_null) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:bool', - SafeConstructor.construct_yaml_bool) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:int', - SafeConstructor.construct_yaml_int) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:float', - SafeConstructor.construct_yaml_float) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:binary', - SafeConstructor.construct_yaml_binary) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:timestamp', - SafeConstructor.construct_yaml_timestamp) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:omap', - SafeConstructor.construct_yaml_omap) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:pairs', - SafeConstructor.construct_yaml_pairs) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:set', - SafeConstructor.construct_yaml_set) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:str', - SafeConstructor.construct_yaml_str) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:seq', - SafeConstructor.construct_yaml_seq) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:map', - SafeConstructor.construct_yaml_map) - -SafeConstructor.add_constructor(None, - SafeConstructor.construct_undefined) - -class Constructor(SafeConstructor): - - def construct_python_str(self, node): - return self.construct_scalar(node) - - def construct_python_unicode(self, node): - return self.construct_scalar(node) - - def construct_python_bytes(self, node): - try: - value = self.construct_scalar(node).encode('ascii') - except UnicodeEncodeError as exc: - raise ConstructorError(None, None, - "failed to convert base64 data into ascii: %s" % exc, - node.start_mark) - try: - if hasattr(base64, 'decodebytes'): - return base64.decodebytes(value) - else: - return base64.decodestring(value) - except binascii.Error as exc: - raise ConstructorError(None, None, - "failed to decode base64 data: %s" % exc, node.start_mark) - - def construct_python_long(self, node): - return self.construct_yaml_int(node) - - def construct_python_complex(self, node): - return complex(self.construct_scalar(node)) - - def construct_python_tuple(self, node): - return tuple(self.construct_sequence(node)) - - def find_python_module(self, name, mark): - if not name: - raise ConstructorError("while constructing a Python module", mark, - "expected non-empty name appended to the tag", mark) - try: - __import__(name) - except ImportError as exc: - raise ConstructorError("while constructing a Python module", mark, - "cannot find module %r (%s)" % (name, exc), mark) - return sys.modules[name] - - def find_python_name(self, name, mark): - if not name: - raise ConstructorError("while constructing a Python object", mark, - "expected non-empty name appended to the tag", mark) - if '.' in name: - module_name, object_name = name.rsplit('.', 1) - else: - module_name = 'builtins' - object_name = name - try: - __import__(module_name) - except ImportError as exc: - raise ConstructorError("while constructing a Python object", mark, - "cannot find module %r (%s)" % (module_name, exc), mark) - module = sys.modules[module_name] - if not hasattr(module, object_name): - raise ConstructorError("while constructing a Python object", mark, - "cannot find %r in the module %r" - % (object_name, module.__name__), mark) - return getattr(module, object_name) - - def construct_python_name(self, suffix, node): - value = self.construct_scalar(node) - if value: - raise ConstructorError("while constructing a Python name", node.start_mark, - "expected the empty value, but found %r" % value, node.start_mark) - return self.find_python_name(suffix, node.start_mark) - - def construct_python_module(self, suffix, node): - value = self.construct_scalar(node) - if value: - raise ConstructorError("while constructing a Python module", node.start_mark, - "expected the empty value, but found %r" % value, node.start_mark) - return self.find_python_module(suffix, node.start_mark) - - def make_python_instance(self, suffix, node, - args=None, kwds=None, newobj=False): - if not args: - args = [] - if not kwds: - kwds = {} - cls = self.find_python_name(suffix, node.start_mark) - if newobj and isinstance(cls, type): - return cls.__new__(cls, *args, **kwds) - else: - return cls(*args, **kwds) - - def set_python_instance_state(self, instance, state): - if hasattr(instance, '__setstate__'): - instance.__setstate__(state) - else: - slotstate = {} - if isinstance(state, tuple) and len(state) == 2: - state, slotstate = state - if hasattr(instance, '__dict__'): - instance.__dict__.update(state) - elif state: - slotstate.update(state) - for key, value in slotstate.items(): - setattr(object, key, value) - - def construct_python_object(self, suffix, node): - # Format: - # !!python/object:module.name { ... state ... } - instance = self.make_python_instance(suffix, node, newobj=True) - yield instance - deep = hasattr(instance, '__setstate__') - state = self.construct_mapping(node, deep=deep) - self.set_python_instance_state(instance, state) - - def construct_python_object_apply(self, suffix, node, newobj=False): - # Format: - # !!python/object/apply # (or !!python/object/new) - # args: [ ... arguments ... ] - # kwds: { ... keywords ... } - # state: ... state ... - # listitems: [ ... listitems ... ] - # dictitems: { ... dictitems ... } - # or short format: - # !!python/object/apply [ ... arguments ... ] - # The difference between !!python/object/apply and !!python/object/new - # is how an object is created, check make_python_instance for details. - if isinstance(node, SequenceNode): - args = self.construct_sequence(node, deep=True) - kwds = {} - state = {} - listitems = [] - dictitems = {} - else: - value = self.construct_mapping(node, deep=True) - args = value.get('args', []) - kwds = value.get('kwds', {}) - state = value.get('state', {}) - listitems = value.get('listitems', []) - dictitems = value.get('dictitems', {}) - instance = self.make_python_instance(suffix, node, args, kwds, newobj) - if state: - self.set_python_instance_state(instance, state) - if listitems: - instance.extend(listitems) - if dictitems: - for key in dictitems: - instance[key] = dictitems[key] - return instance - - def construct_python_object_new(self, suffix, node): - return self.construct_python_object_apply(suffix, node, newobj=True) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/none', - Constructor.construct_yaml_null) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/bool', - Constructor.construct_yaml_bool) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/str', - Constructor.construct_python_str) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/unicode', - Constructor.construct_python_unicode) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/bytes', - Constructor.construct_python_bytes) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/int', - Constructor.construct_yaml_int) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/long', - Constructor.construct_python_long) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/float', - Constructor.construct_yaml_float) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/complex', - Constructor.construct_python_complex) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/list', - Constructor.construct_yaml_seq) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/tuple', - Constructor.construct_python_tuple) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/dict', - Constructor.construct_yaml_map) - -Constructor.add_multi_constructor( - 'tag:yaml.org,2002:python/name:', - Constructor.construct_python_name) - -Constructor.add_multi_constructor( - 'tag:yaml.org,2002:python/module:', - Constructor.construct_python_module) - -Constructor.add_multi_constructor( - 'tag:yaml.org,2002:python/object:', - Constructor.construct_python_object) - -Constructor.add_multi_constructor( - 'tag:yaml.org,2002:python/object/apply:', - Constructor.construct_python_object_apply) - -Constructor.add_multi_constructor( - 'tag:yaml.org,2002:python/object/new:', - Constructor.construct_python_object_new) - diff --git a/python.d/python_modules/pyyaml3/cyaml.py b/python.d/python_modules/pyyaml3/cyaml.py deleted file mode 100644 index d5cb87e99..000000000 --- a/python.d/python_modules/pyyaml3/cyaml.py +++ /dev/null @@ -1,85 +0,0 @@ - -__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', - 'CBaseDumper', 'CSafeDumper', 'CDumper'] - -from _yaml import CParser, CEmitter - -from .constructor import * - -from .serializer import * -from .representer import * - -from .resolver import * - -class CBaseLoader(CParser, BaseConstructor, BaseResolver): - - def __init__(self, stream): - CParser.__init__(self, stream) - BaseConstructor.__init__(self) - BaseResolver.__init__(self) - -class CSafeLoader(CParser, SafeConstructor, Resolver): - - def __init__(self, stream): - CParser.__init__(self, stream) - SafeConstructor.__init__(self) - Resolver.__init__(self) - -class CLoader(CParser, Constructor, Resolver): - - def __init__(self, stream): - CParser.__init__(self, stream) - Constructor.__init__(self) - Resolver.__init__(self) - -class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - CEmitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, encoding=encoding, - allow_unicode=allow_unicode, line_break=line_break, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - Representer.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - -class CSafeDumper(CEmitter, SafeRepresenter, Resolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - CEmitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, encoding=encoding, - allow_unicode=allow_unicode, line_break=line_break, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - SafeRepresenter.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - -class CDumper(CEmitter, Serializer, Representer, Resolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - CEmitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, encoding=encoding, - allow_unicode=allow_unicode, line_break=line_break, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - Representer.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - diff --git a/python.d/python_modules/pyyaml3/dumper.py b/python.d/python_modules/pyyaml3/dumper.py deleted file mode 100644 index 0b6912877..000000000 --- a/python.d/python_modules/pyyaml3/dumper.py +++ /dev/null @@ -1,62 +0,0 @@ - -__all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] - -from .emitter import * -from .serializer import * -from .representer import * -from .resolver import * - -class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - Emitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break) - Serializer.__init__(self, encoding=encoding, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - Representer.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - -class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - Emitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break) - Serializer.__init__(self, encoding=encoding, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - SafeRepresenter.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - -class Dumper(Emitter, Serializer, Representer, Resolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - Emitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break) - Serializer.__init__(self, encoding=encoding, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - Representer.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - diff --git a/python.d/python_modules/pyyaml3/emitter.py b/python.d/python_modules/pyyaml3/emitter.py deleted file mode 100644 index 34cb145a5..000000000 --- a/python.d/python_modules/pyyaml3/emitter.py +++ /dev/null @@ -1,1137 +0,0 @@ - -# Emitter expects events obeying the following grammar: -# stream ::= STREAM-START document* STREAM-END -# document ::= DOCUMENT-START node DOCUMENT-END -# node ::= SCALAR | sequence | mapping -# sequence ::= SEQUENCE-START node* SEQUENCE-END -# mapping ::= MAPPING-START (node node)* MAPPING-END - -__all__ = ['Emitter', 'EmitterError'] - -from .error import YAMLError -from .events import * - -class EmitterError(YAMLError): - pass - -class ScalarAnalysis: - def __init__(self, scalar, empty, multiline, - allow_flow_plain, allow_block_plain, - allow_single_quoted, allow_double_quoted, - allow_block): - self.scalar = scalar - self.empty = empty - self.multiline = multiline - self.allow_flow_plain = allow_flow_plain - self.allow_block_plain = allow_block_plain - self.allow_single_quoted = allow_single_quoted - self.allow_double_quoted = allow_double_quoted - self.allow_block = allow_block - -class Emitter: - - DEFAULT_TAG_PREFIXES = { - '!' : '!', - 'tag:yaml.org,2002:' : '!!', - } - - def __init__(self, stream, canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None): - - # The stream should have the methods `write` and possibly `flush`. - self.stream = stream - - # Encoding can be overriden by STREAM-START. - self.encoding = None - - # Emitter is a state machine with a stack of states to handle nested - # structures. - self.states = [] - self.state = self.expect_stream_start - - # Current event and the event queue. - self.events = [] - self.event = None - - # The current indentation level and the stack of previous indents. - self.indents = [] - self.indent = None - - # Flow level. - self.flow_level = 0 - - # Contexts. - self.root_context = False - self.sequence_context = False - self.mapping_context = False - self.simple_key_context = False - - # Characteristics of the last emitted character: - # - current position. - # - is it a whitespace? - # - is it an indention character - # (indentation space, '-', '?', or ':')? - self.line = 0 - self.column = 0 - self.whitespace = True - self.indention = True - - # Whether the document requires an explicit document indicator - self.open_ended = False - - # Formatting details. - self.canonical = canonical - self.allow_unicode = allow_unicode - self.best_indent = 2 - if indent and 1 < indent < 10: - self.best_indent = indent - self.best_width = 80 - if width and width > self.best_indent*2: - self.best_width = width - self.best_line_break = '\n' - if line_break in ['\r', '\n', '\r\n']: - self.best_line_break = line_break - - # Tag prefixes. - self.tag_prefixes = None - - # Prepared anchor and tag. - self.prepared_anchor = None - self.prepared_tag = None - - # Scalar analysis and style. - self.analysis = None - self.style = None - - def dispose(self): - # Reset the state attributes (to clear self-references) - self.states = [] - self.state = None - - def emit(self, event): - self.events.append(event) - while not self.need_more_events(): - self.event = self.events.pop(0) - self.state() - self.event = None - - # In some cases, we wait for a few next events before emitting. - - def need_more_events(self): - if not self.events: - return True - event = self.events[0] - if isinstance(event, DocumentStartEvent): - return self.need_events(1) - elif isinstance(event, SequenceStartEvent): - return self.need_events(2) - elif isinstance(event, MappingStartEvent): - return self.need_events(3) - else: - return False - - def need_events(self, count): - level = 0 - for event in self.events[1:]: - if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): - level += 1 - elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): - level -= 1 - elif isinstance(event, StreamEndEvent): - level = -1 - if level < 0: - return False - return (len(self.events) < count+1) - - def increase_indent(self, flow=False, indentless=False): - self.indents.append(self.indent) - if self.indent is None: - if flow: - self.indent = self.best_indent - else: - self.indent = 0 - elif not indentless: - self.indent += self.best_indent - - # States. - - # Stream handlers. - - def expect_stream_start(self): - if isinstance(self.event, StreamStartEvent): - if self.event.encoding and not hasattr(self.stream, 'encoding'): - self.encoding = self.event.encoding - self.write_stream_start() - self.state = self.expect_first_document_start - else: - raise EmitterError("expected StreamStartEvent, but got %s" - % self.event) - - def expect_nothing(self): - raise EmitterError("expected nothing, but got %s" % self.event) - - # Document handlers. - - def expect_first_document_start(self): - return self.expect_document_start(first=True) - - def expect_document_start(self, first=False): - if isinstance(self.event, DocumentStartEvent): - if (self.event.version or self.event.tags) and self.open_ended: - self.write_indicator('...', True) - self.write_indent() - if self.event.version: - version_text = self.prepare_version(self.event.version) - self.write_version_directive(version_text) - self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() - if self.event.tags: - handles = sorted(self.event.tags.keys()) - for handle in handles: - prefix = self.event.tags[handle] - self.tag_prefixes[prefix] = handle - handle_text = self.prepare_tag_handle(handle) - prefix_text = self.prepare_tag_prefix(prefix) - self.write_tag_directive(handle_text, prefix_text) - implicit = (first and not self.event.explicit and not self.canonical - and not self.event.version and not self.event.tags - and not self.check_empty_document()) - if not implicit: - self.write_indent() - self.write_indicator('---', True) - if self.canonical: - self.write_indent() - self.state = self.expect_document_root - elif isinstance(self.event, StreamEndEvent): - if self.open_ended: - self.write_indicator('...', True) - self.write_indent() - self.write_stream_end() - self.state = self.expect_nothing - else: - raise EmitterError("expected DocumentStartEvent, but got %s" - % self.event) - - def expect_document_end(self): - if isinstance(self.event, DocumentEndEvent): - self.write_indent() - if self.event.explicit: - self.write_indicator('...', True) - self.write_indent() - self.flush_stream() - self.state = self.expect_document_start - else: - raise EmitterError("expected DocumentEndEvent, but got %s" - % self.event) - - def expect_document_root(self): - self.states.append(self.expect_document_end) - self.expect_node(root=True) - - # Node handlers. - - def expect_node(self, root=False, sequence=False, mapping=False, - simple_key=False): - self.root_context = root - self.sequence_context = sequence - self.mapping_context = mapping - self.simple_key_context = simple_key - if isinstance(self.event, AliasEvent): - self.expect_alias() - elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): - self.process_anchor('&') - self.process_tag() - if isinstance(self.event, ScalarEvent): - self.expect_scalar() - elif isinstance(self.event, SequenceStartEvent): - if self.flow_level or self.canonical or self.event.flow_style \ - or self.check_empty_sequence(): - self.expect_flow_sequence() - else: - self.expect_block_sequence() - elif isinstance(self.event, MappingStartEvent): - if self.flow_level or self.canonical or self.event.flow_style \ - or self.check_empty_mapping(): - self.expect_flow_mapping() - else: - self.expect_block_mapping() - else: - raise EmitterError("expected NodeEvent, but got %s" % self.event) - - def expect_alias(self): - if self.event.anchor is None: - raise EmitterError("anchor is not specified for alias") - self.process_anchor('*') - self.state = self.states.pop() - - def expect_scalar(self): - self.increase_indent(flow=True) - self.process_scalar() - self.indent = self.indents.pop() - self.state = self.states.pop() - - # Flow sequence handlers. - - def expect_flow_sequence(self): - self.write_indicator('[', True, whitespace=True) - self.flow_level += 1 - self.increase_indent(flow=True) - self.state = self.expect_first_flow_sequence_item - - def expect_first_flow_sequence_item(self): - if isinstance(self.event, SequenceEndEvent): - self.indent = self.indents.pop() - self.flow_level -= 1 - self.write_indicator(']', False) - self.state = self.states.pop() - else: - if self.canonical or self.column > self.best_width: - self.write_indent() - self.states.append(self.expect_flow_sequence_item) - self.expect_node(sequence=True) - - def expect_flow_sequence_item(self): - if isinstance(self.event, SequenceEndEvent): - self.indent = self.indents.pop() - self.flow_level -= 1 - if self.canonical: - self.write_indicator(',', False) - self.write_indent() - self.write_indicator(']', False) - self.state = self.states.pop() - else: - self.write_indicator(',', False) - if self.canonical or self.column > self.best_width: - self.write_indent() - self.states.append(self.expect_flow_sequence_item) - self.expect_node(sequence=True) - - # Flow mapping handlers. - - def expect_flow_mapping(self): - self.write_indicator('{', True, whitespace=True) - self.flow_level += 1 - self.increase_indent(flow=True) - self.state = self.expect_first_flow_mapping_key - - def expect_first_flow_mapping_key(self): - if isinstance(self.event, MappingEndEvent): - self.indent = self.indents.pop() - self.flow_level -= 1 - self.write_indicator('}', False) - self.state = self.states.pop() - else: - if self.canonical or self.column > self.best_width: - self.write_indent() - if not self.canonical and self.check_simple_key(): - self.states.append(self.expect_flow_mapping_simple_value) - self.expect_node(mapping=True, simple_key=True) - else: - self.write_indicator('?', True) - self.states.append(self.expect_flow_mapping_value) - self.expect_node(mapping=True) - - def expect_flow_mapping_key(self): - if isinstance(self.event, MappingEndEvent): - self.indent = self.indents.pop() - self.flow_level -= 1 - if self.canonical: - self.write_indicator(',', False) - self.write_indent() - self.write_indicator('}', False) - self.state = self.states.pop() - else: - self.write_indicator(',', False) - if self.canonical or self.column > self.best_width: - self.write_indent() - if not self.canonical and self.check_simple_key(): - self.states.append(self.expect_flow_mapping_simple_value) - self.expect_node(mapping=True, simple_key=True) - else: - self.write_indicator('?', True) - self.states.append(self.expect_flow_mapping_value) - self.expect_node(mapping=True) - - def expect_flow_mapping_simple_value(self): - self.write_indicator(':', False) - self.states.append(self.expect_flow_mapping_key) - self.expect_node(mapping=True) - - def expect_flow_mapping_value(self): - if self.canonical or self.column > self.best_width: - self.write_indent() - self.write_indicator(':', True) - self.states.append(self.expect_flow_mapping_key) - self.expect_node(mapping=True) - - # Block sequence handlers. - - def expect_block_sequence(self): - indentless = (self.mapping_context and not self.indention) - self.increase_indent(flow=False, indentless=indentless) - self.state = self.expect_first_block_sequence_item - - def expect_first_block_sequence_item(self): - return self.expect_block_sequence_item(first=True) - - def expect_block_sequence_item(self, first=False): - if not first and isinstance(self.event, SequenceEndEvent): - self.indent = self.indents.pop() - self.state = self.states.pop() - else: - self.write_indent() - self.write_indicator('-', True, indention=True) - self.states.append(self.expect_block_sequence_item) - self.expect_node(sequence=True) - - # Block mapping handlers. - - def expect_block_mapping(self): - self.increase_indent(flow=False) - self.state = self.expect_first_block_mapping_key - - def expect_first_block_mapping_key(self): - return self.expect_block_mapping_key(first=True) - - def expect_block_mapping_key(self, first=False): - if not first and isinstance(self.event, MappingEndEvent): - self.indent = self.indents.pop() - self.state = self.states.pop() - else: - self.write_indent() - if self.check_simple_key(): - self.states.append(self.expect_block_mapping_simple_value) - self.expect_node(mapping=True, simple_key=True) - else: - self.write_indicator('?', True, indention=True) - self.states.append(self.expect_block_mapping_value) - self.expect_node(mapping=True) - - def expect_block_mapping_simple_value(self): - self.write_indicator(':', False) - self.states.append(self.expect_block_mapping_key) - self.expect_node(mapping=True) - - def expect_block_mapping_value(self): - self.write_indent() - self.write_indicator(':', True, indention=True) - self.states.append(self.expect_block_mapping_key) - self.expect_node(mapping=True) - - # Checkers. - - def check_empty_sequence(self): - return (isinstance(self.event, SequenceStartEvent) and self.events - and isinstance(self.events[0], SequenceEndEvent)) - - def check_empty_mapping(self): - return (isinstance(self.event, MappingStartEvent) and self.events - and isinstance(self.events[0], MappingEndEvent)) - - def check_empty_document(self): - if not isinstance(self.event, DocumentStartEvent) or not self.events: - return False - event = self.events[0] - return (isinstance(event, ScalarEvent) and event.anchor is None - and event.tag is None and event.implicit and event.value == '') - - def check_simple_key(self): - length = 0 - if isinstance(self.event, NodeEvent) and self.event.anchor is not None: - if self.prepared_anchor is None: - self.prepared_anchor = self.prepare_anchor(self.event.anchor) - length += len(self.prepared_anchor) - if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ - and self.event.tag is not None: - if self.prepared_tag is None: - self.prepared_tag = self.prepare_tag(self.event.tag) - length += len(self.prepared_tag) - if isinstance(self.event, ScalarEvent): - if self.analysis is None: - self.analysis = self.analyze_scalar(self.event.value) - length += len(self.analysis.scalar) - return (length < 128 and (isinstance(self.event, AliasEvent) - or (isinstance(self.event, ScalarEvent) - and not self.analysis.empty and not self.analysis.multiline) - or self.check_empty_sequence() or self.check_empty_mapping())) - - # Anchor, Tag, and Scalar processors. - - def process_anchor(self, indicator): - if self.event.anchor is None: - self.prepared_anchor = None - return - if self.prepared_anchor is None: - self.prepared_anchor = self.prepare_anchor(self.event.anchor) - if self.prepared_anchor: - self.write_indicator(indicator+self.prepared_anchor, True) - self.prepared_anchor = None - - def process_tag(self): - tag = self.event.tag - if isinstance(self.event, ScalarEvent): - if self.style is None: - self.style = self.choose_scalar_style() - if ((not self.canonical or tag is None) and - ((self.style == '' and self.event.implicit[0]) - or (self.style != '' and self.event.implicit[1]))): - self.prepared_tag = None - return - if self.event.implicit[0] and tag is None: - tag = '!' - self.prepared_tag = None - else: - if (not self.canonical or tag is None) and self.event.implicit: - self.prepared_tag = None - return - if tag is None: - raise EmitterError("tag is not specified") - if self.prepared_tag is None: - self.prepared_tag = self.prepare_tag(tag) - if self.prepared_tag: - self.write_indicator(self.prepared_tag, True) - self.prepared_tag = None - - def choose_scalar_style(self): - if self.analysis is None: - self.analysis = self.analyze_scalar(self.event.value) - if self.event.style == '"' or self.canonical: - return '"' - if not self.event.style and self.event.implicit[0]: - if (not (self.simple_key_context and - (self.analysis.empty or self.analysis.multiline)) - and (self.flow_level and self.analysis.allow_flow_plain - or (not self.flow_level and self.analysis.allow_block_plain))): - return '' - if self.event.style and self.event.style in '|>': - if (not self.flow_level and not self.simple_key_context - and self.analysis.allow_block): - return self.event.style - if not self.event.style or self.event.style == '\'': - if (self.analysis.allow_single_quoted and - not (self.simple_key_context and self.analysis.multiline)): - return '\'' - return '"' - - def process_scalar(self): - if self.analysis is None: - self.analysis = self.analyze_scalar(self.event.value) - if self.style is None: - self.style = self.choose_scalar_style() - split = (not self.simple_key_context) - #if self.analysis.multiline and split \ - # and (not self.style or self.style in '\'\"'): - # self.write_indent() - if self.style == '"': - self.write_double_quoted(self.analysis.scalar, split) - elif self.style == '\'': - self.write_single_quoted(self.analysis.scalar, split) - elif self.style == '>': - self.write_folded(self.analysis.scalar) - elif self.style == '|': - self.write_literal(self.analysis.scalar) - else: - self.write_plain(self.analysis.scalar, split) - self.analysis = None - self.style = None - - # Analyzers. - - def prepare_version(self, version): - major, minor = version - if major != 1: - raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) - return '%d.%d' % (major, minor) - - def prepare_tag_handle(self, handle): - if not handle: - raise EmitterError("tag handle must not be empty") - if handle[0] != '!' or handle[-1] != '!': - raise EmitterError("tag handle must start and end with '!': %r" % handle) - for ch in handle[1:-1]: - if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ - or ch in '-_'): - raise EmitterError("invalid character %r in the tag handle: %r" - % (ch, handle)) - return handle - - def prepare_tag_prefix(self, prefix): - if not prefix: - raise EmitterError("tag prefix must not be empty") - chunks = [] - start = end = 0 - if prefix[0] == '!': - end = 1 - while end < len(prefix): - ch = prefix[end] - if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ - or ch in '-;/?!:@&=+$,_.~*\'()[]': - end += 1 - else: - if start < end: - chunks.append(prefix[start:end]) - start = end = end+1 - data = ch.encode('utf-8') - for ch in data: - chunks.append('%%%02X' % ord(ch)) - if start < end: - chunks.append(prefix[start:end]) - return ''.join(chunks) - - def prepare_tag(self, tag): - if not tag: - raise EmitterError("tag must not be empty") - if tag == '!': - return tag - handle = None - suffix = tag - prefixes = sorted(self.tag_prefixes.keys()) - for prefix in prefixes: - if tag.startswith(prefix) \ - and (prefix == '!' or len(prefix) < len(tag)): - handle = self.tag_prefixes[prefix] - suffix = tag[len(prefix):] - chunks = [] - start = end = 0 - while end < len(suffix): - ch = suffix[end] - if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ - or ch in '-;/?:@&=+$,_.~*\'()[]' \ - or (ch == '!' and handle != '!'): - end += 1 - else: - if start < end: - chunks.append(suffix[start:end]) - start = end = end+1 - data = ch.encode('utf-8') - for ch in data: - chunks.append('%%%02X' % ord(ch)) - if start < end: - chunks.append(suffix[start:end]) - suffix_text = ''.join(chunks) - if handle: - return '%s%s' % (handle, suffix_text) - else: - return '!<%s>' % suffix_text - - def prepare_anchor(self, anchor): - if not anchor: - raise EmitterError("anchor must not be empty") - for ch in anchor: - if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ - or ch in '-_'): - raise EmitterError("invalid character %r in the anchor: %r" - % (ch, anchor)) - return anchor - - def analyze_scalar(self, scalar): - - # Empty scalar is a special case. - if not scalar: - return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, - allow_flow_plain=False, allow_block_plain=True, - allow_single_quoted=True, allow_double_quoted=True, - allow_block=False) - - # Indicators and special characters. - block_indicators = False - flow_indicators = False - line_breaks = False - special_characters = False - - # Important whitespace combinations. - leading_space = False - leading_break = False - trailing_space = False - trailing_break = False - break_space = False - space_break = False - - # Check document indicators. - if scalar.startswith('---') or scalar.startswith('...'): - block_indicators = True - flow_indicators = True - - # First character or preceded by a whitespace. - preceeded_by_whitespace = True - - # Last character or followed by a whitespace. - followed_by_whitespace = (len(scalar) == 1 or - scalar[1] in '\0 \t\r\n\x85\u2028\u2029') - - # The previous character is a space. - previous_space = False - - # The previous character is a break. - previous_break = False - - index = 0 - while index < len(scalar): - ch = scalar[index] - - # Check for indicators. - if index == 0: - # Leading indicators are special characters. - if ch in '#,[]{}&*!|>\'\"%@`': - flow_indicators = True - block_indicators = True - if ch in '?:': - flow_indicators = True - if followed_by_whitespace: - block_indicators = True - if ch == '-' and followed_by_whitespace: - flow_indicators = True - block_indicators = True - else: - # Some indicators cannot appear within a scalar as well. - if ch in ',?[]{}': - flow_indicators = True - if ch == ':': - flow_indicators = True - if followed_by_whitespace: - block_indicators = True - if ch == '#' and preceeded_by_whitespace: - flow_indicators = True - block_indicators = True - - # Check for line breaks, special, and unicode characters. - if ch in '\n\x85\u2028\u2029': - line_breaks = True - if not (ch == '\n' or '\x20' <= ch <= '\x7E'): - if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF' - or '\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF': - unicode_characters = True - if not self.allow_unicode: - special_characters = True - else: - special_characters = True - - # Detect important whitespace combinations. - if ch == ' ': - if index == 0: - leading_space = True - if index == len(scalar)-1: - trailing_space = True - if previous_break: - break_space = True - previous_space = True - previous_break = False - elif ch in '\n\x85\u2028\u2029': - if index == 0: - leading_break = True - if index == len(scalar)-1: - trailing_break = True - if previous_space: - space_break = True - previous_space = False - previous_break = True - else: - previous_space = False - previous_break = False - - # Prepare for the next character. - index += 1 - preceeded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029') - followed_by_whitespace = (index+1 >= len(scalar) or - scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029') - - # Let's decide what styles are allowed. - allow_flow_plain = True - allow_block_plain = True - allow_single_quoted = True - allow_double_quoted = True - allow_block = True - - # Leading and trailing whitespaces are bad for plain scalars. - if (leading_space or leading_break - or trailing_space or trailing_break): - allow_flow_plain = allow_block_plain = False - - # We do not permit trailing spaces for block scalars. - if trailing_space: - allow_block = False - - # Spaces at the beginning of a new line are only acceptable for block - # scalars. - if break_space: - allow_flow_plain = allow_block_plain = allow_single_quoted = False - - # Spaces followed by breaks, as well as special character are only - # allowed for double quoted scalars. - if space_break or special_characters: - allow_flow_plain = allow_block_plain = \ - allow_single_quoted = allow_block = False - - # Although the plain scalar writer supports breaks, we never emit - # multiline plain scalars. - if line_breaks: - allow_flow_plain = allow_block_plain = False - - # Flow indicators are forbidden for flow plain scalars. - if flow_indicators: - allow_flow_plain = False - - # Block indicators are forbidden for block plain scalars. - if block_indicators: - allow_block_plain = False - - return ScalarAnalysis(scalar=scalar, - empty=False, multiline=line_breaks, - allow_flow_plain=allow_flow_plain, - allow_block_plain=allow_block_plain, - allow_single_quoted=allow_single_quoted, - allow_double_quoted=allow_double_quoted, - allow_block=allow_block) - - # Writers. - - def flush_stream(self): - if hasattr(self.stream, 'flush'): - self.stream.flush() - - def write_stream_start(self): - # Write BOM if needed. - if self.encoding and self.encoding.startswith('utf-16'): - self.stream.write('\uFEFF'.encode(self.encoding)) - - def write_stream_end(self): - self.flush_stream() - - def write_indicator(self, indicator, need_whitespace, - whitespace=False, indention=False): - if self.whitespace or not need_whitespace: - data = indicator - else: - data = ' '+indicator - self.whitespace = whitespace - self.indention = self.indention and indention - self.column += len(data) - self.open_ended = False - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - - def write_indent(self): - indent = self.indent or 0 - if not self.indention or self.column > indent \ - or (self.column == indent and not self.whitespace): - self.write_line_break() - if self.column < indent: - self.whitespace = True - data = ' '*(indent-self.column) - self.column = indent - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - - def write_line_break(self, data=None): - if data is None: - data = self.best_line_break - self.whitespace = True - self.indention = True - self.line += 1 - self.column = 0 - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - - def write_version_directive(self, version_text): - data = '%%YAML %s' % version_text - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - self.write_line_break() - - def write_tag_directive(self, handle_text, prefix_text): - data = '%%TAG %s %s' % (handle_text, prefix_text) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - self.write_line_break() - - # Scalar streams. - - def write_single_quoted(self, text, split=True): - self.write_indicator('\'', True) - spaces = False - breaks = False - start = end = 0 - while end <= len(text): - ch = None - if end < len(text): - ch = text[end] - if spaces: - if ch is None or ch != ' ': - if start+1 == end and self.column > self.best_width and split \ - and start != 0 and end != len(text): - self.write_indent() - else: - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - elif breaks: - if ch is None or ch not in '\n\x85\u2028\u2029': - if text[start] == '\n': - self.write_line_break() - for br in text[start:end]: - if br == '\n': - self.write_line_break() - else: - self.write_line_break(br) - self.write_indent() - start = end - else: - if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'': - if start < end: - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - if ch == '\'': - data = '\'\'' - self.column += 2 - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end + 1 - if ch is not None: - spaces = (ch == ' ') - breaks = (ch in '\n\x85\u2028\u2029') - end += 1 - self.write_indicator('\'', False) - - ESCAPE_REPLACEMENTS = { - '\0': '0', - '\x07': 'a', - '\x08': 'b', - '\x09': 't', - '\x0A': 'n', - '\x0B': 'v', - '\x0C': 'f', - '\x0D': 'r', - '\x1B': 'e', - '\"': '\"', - '\\': '\\', - '\x85': 'N', - '\xA0': '_', - '\u2028': 'L', - '\u2029': 'P', - } - - def write_double_quoted(self, text, split=True): - self.write_indicator('"', True) - start = end = 0 - while end <= len(text): - ch = None - if end < len(text): - ch = text[end] - if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \ - or not ('\x20' <= ch <= '\x7E' - or (self.allow_unicode - and ('\xA0' <= ch <= '\uD7FF' - or '\uE000' <= ch <= '\uFFFD'))): - if start < end: - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - if ch is not None: - if ch in self.ESCAPE_REPLACEMENTS: - data = '\\'+self.ESCAPE_REPLACEMENTS[ch] - elif ch <= '\xFF': - data = '\\x%02X' % ord(ch) - elif ch <= '\uFFFF': - data = '\\u%04X' % ord(ch) - else: - data = '\\U%08X' % ord(ch) - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end+1 - if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \ - and self.column+(end-start) > self.best_width and split: - data = text[start:end]+'\\' - if start < end: - start = end - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - self.write_indent() - self.whitespace = False - self.indention = False - if text[start] == ' ': - data = '\\' - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - end += 1 - self.write_indicator('"', False) - - def determine_block_hints(self, text): - hints = '' - if text: - if text[0] in ' \n\x85\u2028\u2029': - hints += str(self.best_indent) - if text[-1] not in '\n\x85\u2028\u2029': - hints += '-' - elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029': - hints += '+' - return hints - - def write_folded(self, text): - hints = self.determine_block_hints(text) - self.write_indicator('>'+hints, True) - if hints[-1:] == '+': - self.open_ended = True - self.write_line_break() - leading_space = True - spaces = False - breaks = True - start = end = 0 - while end <= len(text): - ch = None - if end < len(text): - ch = text[end] - if breaks: - if ch is None or ch not in '\n\x85\u2028\u2029': - if not leading_space and ch is not None and ch != ' ' \ - and text[start] == '\n': - self.write_line_break() - leading_space = (ch == ' ') - for br in text[start:end]: - if br == '\n': - self.write_line_break() - else: - self.write_line_break(br) - if ch is not None: - self.write_indent() - start = end - elif spaces: - if ch != ' ': - if start+1 == end and self.column > self.best_width: - self.write_indent() - else: - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - else: - if ch is None or ch in ' \n\x85\u2028\u2029': - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - if ch is None: - self.write_line_break() - start = end - if ch is not None: - breaks = (ch in '\n\x85\u2028\u2029') - spaces = (ch == ' ') - end += 1 - - def write_literal(self, text): - hints = self.determine_block_hints(text) - self.write_indicator('|'+hints, True) - if hints[-1:] == '+': - self.open_ended = True - self.write_line_break() - breaks = True - start = end = 0 - while end <= len(text): - ch = None - if end < len(text): - ch = text[end] - if breaks: - if ch is None or ch not in '\n\x85\u2028\u2029': - for br in text[start:end]: - if br == '\n': - self.write_line_break() - else: - self.write_line_break(br) - if ch is not None: - self.write_indent() - start = end - else: - if ch is None or ch in '\n\x85\u2028\u2029': - data = text[start:end] - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - if ch is None: - self.write_line_break() - start = end - if ch is not None: - breaks = (ch in '\n\x85\u2028\u2029') - end += 1 - - def write_plain(self, text, split=True): - if self.root_context: - self.open_ended = True - if not text: - return - if not self.whitespace: - data = ' ' - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - self.whitespace = False - self.indention = False - spaces = False - breaks = False - start = end = 0 - while end <= len(text): - ch = None - if end < len(text): - ch = text[end] - if spaces: - if ch != ' ': - if start+1 == end and self.column > self.best_width and split: - self.write_indent() - self.whitespace = False - self.indention = False - else: - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - elif breaks: - if ch not in '\n\x85\u2028\u2029': - if text[start] == '\n': - self.write_line_break() - for br in text[start:end]: - if br == '\n': - self.write_line_break() - else: - self.write_line_break(br) - self.write_indent() - self.whitespace = False - self.indention = False - start = end - else: - if ch is None or ch in ' \n\x85\u2028\u2029': - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - if ch is not None: - spaces = (ch == ' ') - breaks = (ch in '\n\x85\u2028\u2029') - end += 1 - diff --git a/python.d/python_modules/pyyaml3/error.py b/python.d/python_modules/pyyaml3/error.py deleted file mode 100644 index b796b4dc5..000000000 --- a/python.d/python_modules/pyyaml3/error.py +++ /dev/null @@ -1,75 +0,0 @@ - -__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] - -class Mark: - - def __init__(self, name, index, line, column, buffer, pointer): - self.name = name - self.index = index - self.line = line - self.column = column - self.buffer = buffer - self.pointer = pointer - - def get_snippet(self, indent=4, max_length=75): - if self.buffer is None: - return None - head = '' - start = self.pointer - while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029': - start -= 1 - if self.pointer-start > max_length/2-1: - head = ' ... ' - start += 5 - break - tail = '' - end = self.pointer - while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029': - end += 1 - if end-self.pointer > max_length/2-1: - tail = ' ... ' - end -= 5 - break - snippet = self.buffer[start:end] - return ' '*indent + head + snippet + tail + '\n' \ - + ' '*(indent+self.pointer-start+len(head)) + '^' - - def __str__(self): - snippet = self.get_snippet() - where = " in \"%s\", line %d, column %d" \ - % (self.name, self.line+1, self.column+1) - if snippet is not None: - where += ":\n"+snippet - return where - -class YAMLError(Exception): - pass - -class MarkedYAMLError(YAMLError): - - def __init__(self, context=None, context_mark=None, - problem=None, problem_mark=None, note=None): - self.context = context - self.context_mark = context_mark - self.problem = problem - self.problem_mark = problem_mark - self.note = note - - def __str__(self): - lines = [] - if self.context is not None: - lines.append(self.context) - if self.context_mark is not None \ - and (self.problem is None or self.problem_mark is None - or self.context_mark.name != self.problem_mark.name - or self.context_mark.line != self.problem_mark.line - or self.context_mark.column != self.problem_mark.column): - lines.append(str(self.context_mark)) - if self.problem is not None: - lines.append(self.problem) - if self.problem_mark is not None: - lines.append(str(self.problem_mark)) - if self.note is not None: - lines.append(self.note) - return '\n'.join(lines) - diff --git a/python.d/python_modules/pyyaml3/events.py b/python.d/python_modules/pyyaml3/events.py deleted file mode 100644 index f79ad389c..000000000 --- a/python.d/python_modules/pyyaml3/events.py +++ /dev/null @@ -1,86 +0,0 @@ - -# Abstract classes. - -class Event(object): - def __init__(self, start_mark=None, end_mark=None): - self.start_mark = start_mark - self.end_mark = end_mark - def __repr__(self): - attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] - if hasattr(self, key)] - arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) - for key in attributes]) - return '%s(%s)' % (self.__class__.__name__, arguments) - -class NodeEvent(Event): - def __init__(self, anchor, start_mark=None, end_mark=None): - self.anchor = anchor - self.start_mark = start_mark - self.end_mark = end_mark - -class CollectionStartEvent(NodeEvent): - def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, - flow_style=None): - self.anchor = anchor - self.tag = tag - self.implicit = implicit - self.start_mark = start_mark - self.end_mark = end_mark - self.flow_style = flow_style - -class CollectionEndEvent(Event): - pass - -# Implementations. - -class StreamStartEvent(Event): - def __init__(self, start_mark=None, end_mark=None, encoding=None): - self.start_mark = start_mark - self.end_mark = end_mark - self.encoding = encoding - -class StreamEndEvent(Event): - pass - -class DocumentStartEvent(Event): - def __init__(self, start_mark=None, end_mark=None, - explicit=None, version=None, tags=None): - self.start_mark = start_mark - self.end_mark = end_mark - self.explicit = explicit - self.version = version - self.tags = tags - -class DocumentEndEvent(Event): - def __init__(self, start_mark=None, end_mark=None, - explicit=None): - self.start_mark = start_mark - self.end_mark = end_mark - self.explicit = explicit - -class AliasEvent(NodeEvent): - pass - -class ScalarEvent(NodeEvent): - def __init__(self, anchor, tag, implicit, value, - start_mark=None, end_mark=None, style=None): - self.anchor = anchor - self.tag = tag - self.implicit = implicit - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - self.style = style - -class SequenceStartEvent(CollectionStartEvent): - pass - -class SequenceEndEvent(CollectionEndEvent): - pass - -class MappingStartEvent(CollectionStartEvent): - pass - -class MappingEndEvent(CollectionEndEvent): - pass - diff --git a/python.d/python_modules/pyyaml3/loader.py b/python.d/python_modules/pyyaml3/loader.py deleted file mode 100644 index 08c8f01b3..000000000 --- a/python.d/python_modules/pyyaml3/loader.py +++ /dev/null @@ -1,40 +0,0 @@ - -__all__ = ['BaseLoader', 'SafeLoader', 'Loader'] - -from .reader import * -from .scanner import * -from .parser import * -from .composer import * -from .constructor import * -from .resolver import * - -class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): - - def __init__(self, stream): - Reader.__init__(self, stream) - Scanner.__init__(self) - Parser.__init__(self) - Composer.__init__(self) - BaseConstructor.__init__(self) - BaseResolver.__init__(self) - -class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): - - def __init__(self, stream): - Reader.__init__(self, stream) - Scanner.__init__(self) - Parser.__init__(self) - Composer.__init__(self) - SafeConstructor.__init__(self) - Resolver.__init__(self) - -class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): - - def __init__(self, stream): - Reader.__init__(self, stream) - Scanner.__init__(self) - Parser.__init__(self) - Composer.__init__(self) - Constructor.__init__(self) - Resolver.__init__(self) - diff --git a/python.d/python_modules/pyyaml3/nodes.py b/python.d/python_modules/pyyaml3/nodes.py deleted file mode 100644 index c4f070c41..000000000 --- a/python.d/python_modules/pyyaml3/nodes.py +++ /dev/null @@ -1,49 +0,0 @@ - -class Node(object): - def __init__(self, tag, value, start_mark, end_mark): - self.tag = tag - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - def __repr__(self): - value = self.value - #if isinstance(value, list): - # if len(value) == 0: - # value = '<empty>' - # elif len(value) == 1: - # value = '<1 item>' - # else: - # value = '<%d items>' % len(value) - #else: - # if len(value) > 75: - # value = repr(value[:70]+u' ... ') - # else: - # value = repr(value) - value = repr(value) - return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) - -class ScalarNode(Node): - id = 'scalar' - def __init__(self, tag, value, - start_mark=None, end_mark=None, style=None): - self.tag = tag - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - self.style = style - -class CollectionNode(Node): - def __init__(self, tag, value, - start_mark=None, end_mark=None, flow_style=None): - self.tag = tag - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - self.flow_style = flow_style - -class SequenceNode(CollectionNode): - id = 'sequence' - -class MappingNode(CollectionNode): - id = 'mapping' - diff --git a/python.d/python_modules/pyyaml3/parser.py b/python.d/python_modules/pyyaml3/parser.py deleted file mode 100644 index 13a5995d2..000000000 --- a/python.d/python_modules/pyyaml3/parser.py +++ /dev/null @@ -1,589 +0,0 @@ - -# The following YAML grammar is LL(1) and is parsed by a recursive descent -# parser. -# -# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -# implicit_document ::= block_node DOCUMENT-END* -# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -# block_node_or_indentless_sequence ::= -# ALIAS -# | properties (block_content | indentless_block_sequence)? -# | block_content -# | indentless_block_sequence -# block_node ::= ALIAS -# | properties block_content? -# | block_content -# flow_node ::= ALIAS -# | properties flow_content? -# | flow_content -# properties ::= TAG ANCHOR? | ANCHOR TAG? -# block_content ::= block_collection | flow_collection | SCALAR -# flow_content ::= flow_collection | SCALAR -# block_collection ::= block_sequence | block_mapping -# flow_collection ::= flow_sequence | flow_mapping -# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -# block_mapping ::= BLOCK-MAPPING_START -# ((KEY block_node_or_indentless_sequence?)? -# (VALUE block_node_or_indentless_sequence?)?)* -# BLOCK-END -# flow_sequence ::= FLOW-SEQUENCE-START -# (flow_sequence_entry FLOW-ENTRY)* -# flow_sequence_entry? -# FLOW-SEQUENCE-END -# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -# flow_mapping ::= FLOW-MAPPING-START -# (flow_mapping_entry FLOW-ENTRY)* -# flow_mapping_entry? -# FLOW-MAPPING-END -# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -# -# FIRST sets: -# -# stream: { STREAM-START } -# explicit_document: { DIRECTIVE DOCUMENT-START } -# implicit_document: FIRST(block_node) -# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } -# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } -# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } -# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } -# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } -# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } -# block_sequence: { BLOCK-SEQUENCE-START } -# block_mapping: { BLOCK-MAPPING-START } -# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } -# indentless_sequence: { ENTRY } -# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } -# flow_sequence: { FLOW-SEQUENCE-START } -# flow_mapping: { FLOW-MAPPING-START } -# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } -# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } - -__all__ = ['Parser', 'ParserError'] - -from .error import MarkedYAMLError -from .tokens import * -from .events import * -from .scanner import * - -class ParserError(MarkedYAMLError): - pass - -class Parser: - # Since writing a recursive-descendant parser is a straightforward task, we - # do not give many comments here. - - DEFAULT_TAGS = { - '!': '!', - '!!': 'tag:yaml.org,2002:', - } - - def __init__(self): - self.current_event = None - self.yaml_version = None - self.tag_handles = {} - self.states = [] - self.marks = [] - self.state = self.parse_stream_start - - def dispose(self): - # Reset the state attributes (to clear self-references) - self.states = [] - self.state = None - - def check_event(self, *choices): - # Check the type of the next event. - if self.current_event is None: - if self.state: - self.current_event = self.state() - if self.current_event is not None: - if not choices: - return True - for choice in choices: - if isinstance(self.current_event, choice): - return True - return False - - def peek_event(self): - # Get the next event. - if self.current_event is None: - if self.state: - self.current_event = self.state() - return self.current_event - - def get_event(self): - # Get the next event and proceed further. - if self.current_event is None: - if self.state: - self.current_event = self.state() - value = self.current_event - self.current_event = None - return value - - # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END - # implicit_document ::= block_node DOCUMENT-END* - # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* - - def parse_stream_start(self): - - # Parse the stream start. - token = self.get_token() - event = StreamStartEvent(token.start_mark, token.end_mark, - encoding=token.encoding) - - # Prepare the next state. - self.state = self.parse_implicit_document_start - - return event - - def parse_implicit_document_start(self): - - # Parse an implicit document. - if not self.check_token(DirectiveToken, DocumentStartToken, - StreamEndToken): - self.tag_handles = self.DEFAULT_TAGS - token = self.peek_token() - start_mark = end_mark = token.start_mark - event = DocumentStartEvent(start_mark, end_mark, - explicit=False) - - # Prepare the next state. - self.states.append(self.parse_document_end) - self.state = self.parse_block_node - - return event - - else: - return self.parse_document_start() - - def parse_document_start(self): - - # Parse any extra document end indicators. - while self.check_token(DocumentEndToken): - self.get_token() - - # Parse an explicit document. - if not self.check_token(StreamEndToken): - token = self.peek_token() - start_mark = token.start_mark - version, tags = self.process_directives() - if not self.check_token(DocumentStartToken): - raise ParserError(None, None, - "expected '<document start>', but found %r" - % self.peek_token().id, - self.peek_token().start_mark) - token = self.get_token() - end_mark = token.end_mark - event = DocumentStartEvent(start_mark, end_mark, - explicit=True, version=version, tags=tags) - self.states.append(self.parse_document_end) - self.state = self.parse_document_content - else: - # Parse the end of the stream. - token = self.get_token() - event = StreamEndEvent(token.start_mark, token.end_mark) - assert not self.states - assert not self.marks - self.state = None - return event - - def parse_document_end(self): - - # Parse the document end. - token = self.peek_token() - start_mark = end_mark = token.start_mark - explicit = False - if self.check_token(DocumentEndToken): - token = self.get_token() - end_mark = token.end_mark - explicit = True - event = DocumentEndEvent(start_mark, end_mark, - explicit=explicit) - - # Prepare the next state. - self.state = self.parse_document_start - - return event - - def parse_document_content(self): - if self.check_token(DirectiveToken, - DocumentStartToken, DocumentEndToken, StreamEndToken): - event = self.process_empty_scalar(self.peek_token().start_mark) - self.state = self.states.pop() - return event - else: - return self.parse_block_node() - - def process_directives(self): - self.yaml_version = None - self.tag_handles = {} - while self.check_token(DirectiveToken): - token = self.get_token() - if token.name == 'YAML': - if self.yaml_version is not None: - raise ParserError(None, None, - "found duplicate YAML directive", token.start_mark) - major, minor = token.value - if major != 1: - raise ParserError(None, None, - "found incompatible YAML document (version 1.* is required)", - token.start_mark) - self.yaml_version = token.value - elif token.name == 'TAG': - handle, prefix = token.value - if handle in self.tag_handles: - raise ParserError(None, None, - "duplicate tag handle %r" % handle, - token.start_mark) - self.tag_handles[handle] = prefix - if self.tag_handles: - value = self.yaml_version, self.tag_handles.copy() - else: - value = self.yaml_version, None - for key in self.DEFAULT_TAGS: - if key not in self.tag_handles: - self.tag_handles[key] = self.DEFAULT_TAGS[key] - return value - - # block_node_or_indentless_sequence ::= ALIAS - # | properties (block_content | indentless_block_sequence)? - # | block_content - # | indentless_block_sequence - # block_node ::= ALIAS - # | properties block_content? - # | block_content - # flow_node ::= ALIAS - # | properties flow_content? - # | flow_content - # properties ::= TAG ANCHOR? | ANCHOR TAG? - # block_content ::= block_collection | flow_collection | SCALAR - # flow_content ::= flow_collection | SCALAR - # block_collection ::= block_sequence | block_mapping - # flow_collection ::= flow_sequence | flow_mapping - - def parse_block_node(self): - return self.parse_node(block=True) - - def parse_flow_node(self): - return self.parse_node() - - def parse_block_node_or_indentless_sequence(self): - return self.parse_node(block=True, indentless_sequence=True) - - def parse_node(self, block=False, indentless_sequence=False): - if self.check_token(AliasToken): - token = self.get_token() - event = AliasEvent(token.value, token.start_mark, token.end_mark) - self.state = self.states.pop() - else: - anchor = None - tag = None - start_mark = end_mark = tag_mark = None - if self.check_token(AnchorToken): - token = self.get_token() - start_mark = token.start_mark - end_mark = token.end_mark - anchor = token.value - if self.check_token(TagToken): - token = self.get_token() - tag_mark = token.start_mark - end_mark = token.end_mark - tag = token.value - elif self.check_token(TagToken): - token = self.get_token() - start_mark = tag_mark = token.start_mark - end_mark = token.end_mark - tag = token.value - if self.check_token(AnchorToken): - token = self.get_token() - end_mark = token.end_mark - anchor = token.value - if tag is not None: - handle, suffix = tag - if handle is not None: - if handle not in self.tag_handles: - raise ParserError("while parsing a node", start_mark, - "found undefined tag handle %r" % handle, - tag_mark) - tag = self.tag_handles[handle]+suffix - else: - tag = suffix - #if tag == '!': - # raise ParserError("while parsing a node", start_mark, - # "found non-specific tag '!'", tag_mark, - # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") - if start_mark is None: - start_mark = end_mark = self.peek_token().start_mark - event = None - implicit = (tag is None or tag == '!') - if indentless_sequence and self.check_token(BlockEntryToken): - end_mark = self.peek_token().end_mark - event = SequenceStartEvent(anchor, tag, implicit, - start_mark, end_mark) - self.state = self.parse_indentless_sequence_entry - else: - if self.check_token(ScalarToken): - token = self.get_token() - end_mark = token.end_mark - if (token.plain and tag is None) or tag == '!': - implicit = (True, False) - elif tag is None: - implicit = (False, True) - else: - implicit = (False, False) - event = ScalarEvent(anchor, tag, implicit, token.value, - start_mark, end_mark, style=token.style) - self.state = self.states.pop() - elif self.check_token(FlowSequenceStartToken): - end_mark = self.peek_token().end_mark - event = SequenceStartEvent(anchor, tag, implicit, - start_mark, end_mark, flow_style=True) - self.state = self.parse_flow_sequence_first_entry - elif self.check_token(FlowMappingStartToken): - end_mark = self.peek_token().end_mark - event = MappingStartEvent(anchor, tag, implicit, - start_mark, end_mark, flow_style=True) - self.state = self.parse_flow_mapping_first_key - elif block and self.check_token(BlockSequenceStartToken): - end_mark = self.peek_token().start_mark - event = SequenceStartEvent(anchor, tag, implicit, - start_mark, end_mark, flow_style=False) - self.state = self.parse_block_sequence_first_entry - elif block and self.check_token(BlockMappingStartToken): - end_mark = self.peek_token().start_mark - event = MappingStartEvent(anchor, tag, implicit, - start_mark, end_mark, flow_style=False) - self.state = self.parse_block_mapping_first_key - elif anchor is not None or tag is not None: - # Empty scalars are allowed even if a tag or an anchor is - # specified. - event = ScalarEvent(anchor, tag, (implicit, False), '', - start_mark, end_mark) - self.state = self.states.pop() - else: - if block: - node = 'block' - else: - node = 'flow' - token = self.peek_token() - raise ParserError("while parsing a %s node" % node, start_mark, - "expected the node content, but found %r" % token.id, - token.start_mark) - return event - - # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END - - def parse_block_sequence_first_entry(self): - token = self.get_token() - self.marks.append(token.start_mark) - return self.parse_block_sequence_entry() - - def parse_block_sequence_entry(self): - if self.check_token(BlockEntryToken): - token = self.get_token() - if not self.check_token(BlockEntryToken, BlockEndToken): - self.states.append(self.parse_block_sequence_entry) - return self.parse_block_node() - else: - self.state = self.parse_block_sequence_entry - return self.process_empty_scalar(token.end_mark) - if not self.check_token(BlockEndToken): - token = self.peek_token() - raise ParserError("while parsing a block collection", self.marks[-1], - "expected <block end>, but found %r" % token.id, token.start_mark) - token = self.get_token() - event = SequenceEndEvent(token.start_mark, token.end_mark) - self.state = self.states.pop() - self.marks.pop() - return event - - # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ - - def parse_indentless_sequence_entry(self): - if self.check_token(BlockEntryToken): - token = self.get_token() - if not self.check_token(BlockEntryToken, - KeyToken, ValueToken, BlockEndToken): - self.states.append(self.parse_indentless_sequence_entry) - return self.parse_block_node() - else: - self.state = self.parse_indentless_sequence_entry - return self.process_empty_scalar(token.end_mark) - token = self.peek_token() - event = SequenceEndEvent(token.start_mark, token.start_mark) - self.state = self.states.pop() - return event - - # block_mapping ::= BLOCK-MAPPING_START - # ((KEY block_node_or_indentless_sequence?)? - # (VALUE block_node_or_indentless_sequence?)?)* - # BLOCK-END - - def parse_block_mapping_first_key(self): - token = self.get_token() - self.marks.append(token.start_mark) - return self.parse_block_mapping_key() - - def parse_block_mapping_key(self): - if self.check_token(KeyToken): - token = self.get_token() - if not self.check_token(KeyToken, ValueToken, BlockEndToken): - self.states.append(self.parse_block_mapping_value) - return self.parse_block_node_or_indentless_sequence() - else: - self.state = self.parse_block_mapping_value - return self.process_empty_scalar(token.end_mark) - if not self.check_token(BlockEndToken): - token = self.peek_token() - raise ParserError("while parsing a block mapping", self.marks[-1], - "expected <block end>, but found %r" % token.id, token.start_mark) - token = self.get_token() - event = MappingEndEvent(token.start_mark, token.end_mark) - self.state = self.states.pop() - self.marks.pop() - return event - - def parse_block_mapping_value(self): - if self.check_token(ValueToken): - token = self.get_token() - if not self.check_token(KeyToken, ValueToken, BlockEndToken): - self.states.append(self.parse_block_mapping_key) - return self.parse_block_node_or_indentless_sequence() - else: - self.state = self.parse_block_mapping_key - return self.process_empty_scalar(token.end_mark) - else: - self.state = self.parse_block_mapping_key - token = self.peek_token() - return self.process_empty_scalar(token.start_mark) - - # flow_sequence ::= FLOW-SEQUENCE-START - # (flow_sequence_entry FLOW-ENTRY)* - # flow_sequence_entry? - # FLOW-SEQUENCE-END - # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - # - # Note that while production rules for both flow_sequence_entry and - # flow_mapping_entry are equal, their interpretations are different. - # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` - # generate an inline mapping (set syntax). - - def parse_flow_sequence_first_entry(self): - token = self.get_token() - self.marks.append(token.start_mark) - return self.parse_flow_sequence_entry(first=True) - - def parse_flow_sequence_entry(self, first=False): - if not self.check_token(FlowSequenceEndToken): - if not first: - if self.check_token(FlowEntryToken): - self.get_token() - else: - token = self.peek_token() - raise ParserError("while parsing a flow sequence", self.marks[-1], - "expected ',' or ']', but got %r" % token.id, token.start_mark) - - if self.check_token(KeyToken): - token = self.peek_token() - event = MappingStartEvent(None, None, True, - token.start_mark, token.end_mark, - flow_style=True) - self.state = self.parse_flow_sequence_entry_mapping_key - return event - elif not self.check_token(FlowSequenceEndToken): - self.states.append(self.parse_flow_sequence_entry) - return self.parse_flow_node() - token = self.get_token() - event = SequenceEndEvent(token.start_mark, token.end_mark) - self.state = self.states.pop() - self.marks.pop() - return event - - def parse_flow_sequence_entry_mapping_key(self): - token = self.get_token() - if not self.check_token(ValueToken, - FlowEntryToken, FlowSequenceEndToken): - self.states.append(self.parse_flow_sequence_entry_mapping_value) - return self.parse_flow_node() - else: - self.state = self.parse_flow_sequence_entry_mapping_value - return self.process_empty_scalar(token.end_mark) - - def parse_flow_sequence_entry_mapping_value(self): - if self.check_token(ValueToken): - token = self.get_token() - if not self.check_token(FlowEntryToken, FlowSequenceEndToken): - self.states.append(self.parse_flow_sequence_entry_mapping_end) - return self.parse_flow_node() - else: - self.state = self.parse_flow_sequence_entry_mapping_end - return self.process_empty_scalar(token.end_mark) - else: - self.state = self.parse_flow_sequence_entry_mapping_end - token = self.peek_token() - return self.process_empty_scalar(token.start_mark) - - def parse_flow_sequence_entry_mapping_end(self): - self.state = self.parse_flow_sequence_entry - token = self.peek_token() - return MappingEndEvent(token.start_mark, token.start_mark) - - # flow_mapping ::= FLOW-MAPPING-START - # (flow_mapping_entry FLOW-ENTRY)* - # flow_mapping_entry? - # FLOW-MAPPING-END - # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - - def parse_flow_mapping_first_key(self): - token = self.get_token() - self.marks.append(token.start_mark) - return self.parse_flow_mapping_key(first=True) - - def parse_flow_mapping_key(self, first=False): - if not self.check_token(FlowMappingEndToken): - if not first: - if self.check_token(FlowEntryToken): - self.get_token() - else: - token = self.peek_token() - raise ParserError("while parsing a flow mapping", self.marks[-1], - "expected ',' or '}', but got %r" % token.id, token.start_mark) - if self.check_token(KeyToken): - token = self.get_token() - if not self.check_token(ValueToken, - FlowEntryToken, FlowMappingEndToken): - self.states.append(self.parse_flow_mapping_value) - return self.parse_flow_node() - else: - self.state = self.parse_flow_mapping_value - return self.process_empty_scalar(token.end_mark) - elif not self.check_token(FlowMappingEndToken): - self.states.append(self.parse_flow_mapping_empty_value) - return self.parse_flow_node() - token = self.get_token() - event = MappingEndEvent(token.start_mark, token.end_mark) - self.state = self.states.pop() - self.marks.pop() - return event - - def parse_flow_mapping_value(self): - if self.check_token(ValueToken): - token = self.get_token() - if not self.check_token(FlowEntryToken, FlowMappingEndToken): - self.states.append(self.parse_flow_mapping_key) - return self.parse_flow_node() - else: - self.state = self.parse_flow_mapping_key - return self.process_empty_scalar(token.end_mark) - else: - self.state = self.parse_flow_mapping_key - token = self.peek_token() - return self.process_empty_scalar(token.start_mark) - - def parse_flow_mapping_empty_value(self): - self.state = self.parse_flow_mapping_key - return self.process_empty_scalar(self.peek_token().start_mark) - - def process_empty_scalar(self, mark): - return ScalarEvent(None, None, (True, False), '', mark, mark) - diff --git a/python.d/python_modules/pyyaml3/reader.py b/python.d/python_modules/pyyaml3/reader.py deleted file mode 100644 index f70e920f4..000000000 --- a/python.d/python_modules/pyyaml3/reader.py +++ /dev/null @@ -1,192 +0,0 @@ -# This module contains abstractions for the input stream. You don't have to -# looks further, there are no pretty code. -# -# We define two classes here. -# -# Mark(source, line, column) -# It's just a record and its only use is producing nice error messages. -# Parser does not use it for any other purposes. -# -# Reader(source, data) -# Reader determines the encoding of `data` and converts it to unicode. -# Reader provides the following methods and attributes: -# reader.peek(length=1) - return the next `length` characters -# reader.forward(length=1) - move the current position to `length` characters. -# reader.index - the number of the current character. -# reader.line, stream.column - the line and the column of the current character. - -__all__ = ['Reader', 'ReaderError'] - -from .error import YAMLError, Mark - -import codecs, re - -class ReaderError(YAMLError): - - def __init__(self, name, position, character, encoding, reason): - self.name = name - self.character = character - self.position = position - self.encoding = encoding - self.reason = reason - - def __str__(self): - if isinstance(self.character, bytes): - return "'%s' codec can't decode byte #x%02x: %s\n" \ - " in \"%s\", position %d" \ - % (self.encoding, ord(self.character), self.reason, - self.name, self.position) - else: - return "unacceptable character #x%04x: %s\n" \ - " in \"%s\", position %d" \ - % (self.character, self.reason, - self.name, self.position) - -class Reader(object): - # Reader: - # - determines the data encoding and converts it to a unicode string, - # - checks if characters are in allowed range, - # - adds '\0' to the end. - - # Reader accepts - # - a `bytes` object, - # - a `str` object, - # - a file-like object with its `read` method returning `str`, - # - a file-like object with its `read` method returning `unicode`. - - # Yeah, it's ugly and slow. - - def __init__(self, stream): - self.name = None - self.stream = None - self.stream_pointer = 0 - self.eof = True - self.buffer = '' - self.pointer = 0 - self.raw_buffer = None - self.raw_decode = None - self.encoding = None - self.index = 0 - self.line = 0 - self.column = 0 - if isinstance(stream, str): - self.name = "<unicode string>" - self.check_printable(stream) - self.buffer = stream+'\0' - elif isinstance(stream, bytes): - self.name = "<byte string>" - self.raw_buffer = stream - self.determine_encoding() - else: - self.stream = stream - self.name = getattr(stream, 'name', "<file>") - self.eof = False - self.raw_buffer = None - self.determine_encoding() - - def peek(self, index=0): - try: - return self.buffer[self.pointer+index] - except IndexError: - self.update(index+1) - return self.buffer[self.pointer+index] - - def prefix(self, length=1): - if self.pointer+length >= len(self.buffer): - self.update(length) - return self.buffer[self.pointer:self.pointer+length] - - def forward(self, length=1): - if self.pointer+length+1 >= len(self.buffer): - self.update(length+1) - while length: - ch = self.buffer[self.pointer] - self.pointer += 1 - self.index += 1 - if ch in '\n\x85\u2028\u2029' \ - or (ch == '\r' and self.buffer[self.pointer] != '\n'): - self.line += 1 - self.column = 0 - elif ch != '\uFEFF': - self.column += 1 - length -= 1 - - def get_mark(self): - if self.stream is None: - return Mark(self.name, self.index, self.line, self.column, - self.buffer, self.pointer) - else: - return Mark(self.name, self.index, self.line, self.column, - None, None) - - def determine_encoding(self): - while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2): - self.update_raw() - if isinstance(self.raw_buffer, bytes): - if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): - self.raw_decode = codecs.utf_16_le_decode - self.encoding = 'utf-16-le' - elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): - self.raw_decode = codecs.utf_16_be_decode - self.encoding = 'utf-16-be' - else: - self.raw_decode = codecs.utf_8_decode - self.encoding = 'utf-8' - self.update(1) - - NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]') - def check_printable(self, data): - match = self.NON_PRINTABLE.search(data) - if match: - character = match.group() - position = self.index+(len(self.buffer)-self.pointer)+match.start() - raise ReaderError(self.name, position, ord(character), - 'unicode', "special characters are not allowed") - - def update(self, length): - if self.raw_buffer is None: - return - self.buffer = self.buffer[self.pointer:] - self.pointer = 0 - while len(self.buffer) < length: - if not self.eof: - self.update_raw() - if self.raw_decode is not None: - try: - data, converted = self.raw_decode(self.raw_buffer, - 'strict', self.eof) - except UnicodeDecodeError as exc: - character = self.raw_buffer[exc.start] - if self.stream is not None: - position = self.stream_pointer-len(self.raw_buffer)+exc.start - else: - position = exc.start - raise ReaderError(self.name, position, character, - exc.encoding, exc.reason) - else: - data = self.raw_buffer - converted = len(data) - self.check_printable(data) - self.buffer += data - self.raw_buffer = self.raw_buffer[converted:] - if self.eof: - self.buffer += '\0' - self.raw_buffer = None - break - - def update_raw(self, size=4096): - data = self.stream.read(size) - if self.raw_buffer is None: - self.raw_buffer = data - else: - self.raw_buffer += data - self.stream_pointer += len(data) - if not data: - self.eof = True - -#try: -# import psyco -# psyco.bind(Reader) -#except ImportError: -# pass - diff --git a/python.d/python_modules/pyyaml3/representer.py b/python.d/python_modules/pyyaml3/representer.py deleted file mode 100644 index 67cd6fd25..000000000 --- a/python.d/python_modules/pyyaml3/representer.py +++ /dev/null @@ -1,374 +0,0 @@ - -__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', - 'RepresenterError'] - -from .error import * -from .nodes import * - -import datetime, sys, copyreg, types, base64 - -class RepresenterError(YAMLError): - pass - -class BaseRepresenter: - - yaml_representers = {} - yaml_multi_representers = {} - - def __init__(self, default_style=None, default_flow_style=None): - self.default_style = default_style - self.default_flow_style = default_flow_style - self.represented_objects = {} - self.object_keeper = [] - self.alias_key = None - - def represent(self, data): - node = self.represent_data(data) - self.serialize(node) - self.represented_objects = {} - self.object_keeper = [] - self.alias_key = None - - def represent_data(self, data): - if self.ignore_aliases(data): - self.alias_key = None - else: - self.alias_key = id(data) - if self.alias_key is not None: - if self.alias_key in self.represented_objects: - node = self.represented_objects[self.alias_key] - #if node is None: - # raise RepresenterError("recursive objects are not allowed: %r" % data) - return node - #self.represented_objects[alias_key] = None - self.object_keeper.append(data) - data_types = type(data).__mro__ - if data_types[0] in self.yaml_representers: - node = self.yaml_representers[data_types[0]](self, data) - else: - for data_type in data_types: - if data_type in self.yaml_multi_representers: - node = self.yaml_multi_representers[data_type](self, data) - break - else: - if None in self.yaml_multi_representers: - node = self.yaml_multi_representers[None](self, data) - elif None in self.yaml_representers: - node = self.yaml_representers[None](self, data) - else: - node = ScalarNode(None, str(data)) - #if alias_key is not None: - # self.represented_objects[alias_key] = node - return node - - @classmethod - def add_representer(cls, data_type, representer): - if not 'yaml_representers' in cls.__dict__: - cls.yaml_representers = cls.yaml_representers.copy() - cls.yaml_representers[data_type] = representer - - @classmethod - def add_multi_representer(cls, data_type, representer): - if not 'yaml_multi_representers' in cls.__dict__: - cls.yaml_multi_representers = cls.yaml_multi_representers.copy() - cls.yaml_multi_representers[data_type] = representer - - def represent_scalar(self, tag, value, style=None): - if style is None: - style = self.default_style - node = ScalarNode(tag, value, style=style) - if self.alias_key is not None: - self.represented_objects[self.alias_key] = node - return node - - def represent_sequence(self, tag, sequence, flow_style=None): - value = [] - node = SequenceNode(tag, value, flow_style=flow_style) - if self.alias_key is not None: - self.represented_objects[self.alias_key] = node - best_style = True - for item in sequence: - node_item = self.represent_data(item) - if not (isinstance(node_item, ScalarNode) and not node_item.style): - best_style = False - value.append(node_item) - if flow_style is None: - if self.default_flow_style is not None: - node.flow_style = self.default_flow_style - else: - node.flow_style = best_style - return node - - def represent_mapping(self, tag, mapping, flow_style=None): - value = [] - node = MappingNode(tag, value, flow_style=flow_style) - if self.alias_key is not None: - self.represented_objects[self.alias_key] = node - best_style = True - if hasattr(mapping, 'items'): - mapping = list(mapping.items()) - try: - mapping = sorted(mapping) - except TypeError: - pass - for item_key, item_value in mapping: - node_key = self.represent_data(item_key) - node_value = self.represent_data(item_value) - if not (isinstance(node_key, ScalarNode) and not node_key.style): - best_style = False - if not (isinstance(node_value, ScalarNode) and not node_value.style): - best_style = False - value.append((node_key, node_value)) - if flow_style is None: - if self.default_flow_style is not None: - node.flow_style = self.default_flow_style - else: - node.flow_style = best_style - return node - - def ignore_aliases(self, data): - return False - -class SafeRepresenter(BaseRepresenter): - - def ignore_aliases(self, data): - if data in [None, ()]: - return True - if isinstance(data, (str, bytes, bool, int, float)): - return True - - def represent_none(self, data): - return self.represent_scalar('tag:yaml.org,2002:null', 'null') - - def represent_str(self, data): - return self.represent_scalar('tag:yaml.org,2002:str', data) - - def represent_binary(self, data): - if hasattr(base64, 'encodebytes'): - data = base64.encodebytes(data).decode('ascii') - else: - data = base64.encodestring(data).decode('ascii') - return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|') - - def represent_bool(self, data): - if data: - value = 'true' - else: - value = 'false' - return self.represent_scalar('tag:yaml.org,2002:bool', value) - - def represent_int(self, data): - return self.represent_scalar('tag:yaml.org,2002:int', str(data)) - - inf_value = 1e300 - while repr(inf_value) != repr(inf_value*inf_value): - inf_value *= inf_value - - def represent_float(self, data): - if data != data or (data == 0.0 and data == 1.0): - value = '.nan' - elif data == self.inf_value: - value = '.inf' - elif data == -self.inf_value: - value = '-.inf' - else: - value = repr(data).lower() - # Note that in some cases `repr(data)` represents a float number - # without the decimal parts. For instance: - # >>> repr(1e17) - # '1e17' - # Unfortunately, this is not a valid float representation according - # to the definition of the `!!float` tag. We fix this by adding - # '.0' before the 'e' symbol. - if '.' not in value and 'e' in value: - value = value.replace('e', '.0e', 1) - return self.represent_scalar('tag:yaml.org,2002:float', value) - - def represent_list(self, data): - #pairs = (len(data) > 0 and isinstance(data, list)) - #if pairs: - # for item in data: - # if not isinstance(item, tuple) or len(item) != 2: - # pairs = False - # break - #if not pairs: - return self.represent_sequence('tag:yaml.org,2002:seq', data) - #value = [] - #for item_key, item_value in data: - # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', - # [(item_key, item_value)])) - #return SequenceNode(u'tag:yaml.org,2002:pairs', value) - - def represent_dict(self, data): - return self.represent_mapping('tag:yaml.org,2002:map', data) - - def represent_set(self, data): - value = {} - for key in data: - value[key] = None - return self.represent_mapping('tag:yaml.org,2002:set', value) - - def represent_date(self, data): - value = data.isoformat() - return self.represent_scalar('tag:yaml.org,2002:timestamp', value) - - def represent_datetime(self, data): - value = data.isoformat(' ') - return self.represent_scalar('tag:yaml.org,2002:timestamp', value) - - def represent_yaml_object(self, tag, data, cls, flow_style=None): - if hasattr(data, '__getstate__'): - state = data.__getstate__() - else: - state = data.__dict__.copy() - return self.represent_mapping(tag, state, flow_style=flow_style) - - def represent_undefined(self, data): - raise RepresenterError("cannot represent an object: %s" % data) - -SafeRepresenter.add_representer(type(None), - SafeRepresenter.represent_none) - -SafeRepresenter.add_representer(str, - SafeRepresenter.represent_str) - -SafeRepresenter.add_representer(bytes, - SafeRepresenter.represent_binary) - -SafeRepresenter.add_representer(bool, - SafeRepresenter.represent_bool) - -SafeRepresenter.add_representer(int, - SafeRepresenter.represent_int) - -SafeRepresenter.add_representer(float, - SafeRepresenter.represent_float) - -SafeRepresenter.add_representer(list, - SafeRepresenter.represent_list) - -SafeRepresenter.add_representer(tuple, - SafeRepresenter.represent_list) - -SafeRepresenter.add_representer(dict, - SafeRepresenter.represent_dict) - -SafeRepresenter.add_representer(set, - SafeRepresenter.represent_set) - -SafeRepresenter.add_representer(datetime.date, - SafeRepresenter.represent_date) - -SafeRepresenter.add_representer(datetime.datetime, - SafeRepresenter.represent_datetime) - -SafeRepresenter.add_representer(None, - SafeRepresenter.represent_undefined) - -class Representer(SafeRepresenter): - - def represent_complex(self, data): - if data.imag == 0.0: - data = '%r' % data.real - elif data.real == 0.0: - data = '%rj' % data.imag - elif data.imag > 0: - data = '%r+%rj' % (data.real, data.imag) - else: - data = '%r%rj' % (data.real, data.imag) - return self.represent_scalar('tag:yaml.org,2002:python/complex', data) - - def represent_tuple(self, data): - return self.represent_sequence('tag:yaml.org,2002:python/tuple', data) - - def represent_name(self, data): - name = '%s.%s' % (data.__module__, data.__name__) - return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '') - - def represent_module(self, data): - return self.represent_scalar( - 'tag:yaml.org,2002:python/module:'+data.__name__, '') - - def represent_object(self, data): - # We use __reduce__ API to save the data. data.__reduce__ returns - # a tuple of length 2-5: - # (function, args, state, listitems, dictitems) - - # For reconstructing, we calls function(*args), then set its state, - # listitems, and dictitems if they are not None. - - # A special case is when function.__name__ == '__newobj__'. In this - # case we create the object with args[0].__new__(*args). - - # Another special case is when __reduce__ returns a string - we don't - # support it. - - # We produce a !!python/object, !!python/object/new or - # !!python/object/apply node. - - cls = type(data) - if cls in copyreg.dispatch_table: - reduce = copyreg.dispatch_table[cls](data) - elif hasattr(data, '__reduce_ex__'): - reduce = data.__reduce_ex__(2) - elif hasattr(data, '__reduce__'): - reduce = data.__reduce__() - else: - raise RepresenterError("cannot represent object: %r" % data) - reduce = (list(reduce)+[None]*5)[:5] - function, args, state, listitems, dictitems = reduce - args = list(args) - if state is None: - state = {} - if listitems is not None: - listitems = list(listitems) - if dictitems is not None: - dictitems = dict(dictitems) - if function.__name__ == '__newobj__': - function = args[0] - args = args[1:] - tag = 'tag:yaml.org,2002:python/object/new:' - newobj = True - else: - tag = 'tag:yaml.org,2002:python/object/apply:' - newobj = False - function_name = '%s.%s' % (function.__module__, function.__name__) - if not args and not listitems and not dictitems \ - and isinstance(state, dict) and newobj: - return self.represent_mapping( - 'tag:yaml.org,2002:python/object:'+function_name, state) - if not listitems and not dictitems \ - and isinstance(state, dict) and not state: - return self.represent_sequence(tag+function_name, args) - value = {} - if args: - value['args'] = args - if state or not isinstance(state, dict): - value['state'] = state - if listitems: - value['listitems'] = listitems - if dictitems: - value['dictitems'] = dictitems - return self.represent_mapping(tag+function_name, value) - -Representer.add_representer(complex, - Representer.represent_complex) - -Representer.add_representer(tuple, - Representer.represent_tuple) - -Representer.add_representer(type, - Representer.represent_name) - -Representer.add_representer(types.FunctionType, - Representer.represent_name) - -Representer.add_representer(types.BuiltinFunctionType, - Representer.represent_name) - -Representer.add_representer(types.ModuleType, - Representer.represent_module) - -Representer.add_multi_representer(object, - Representer.represent_object) - diff --git a/python.d/python_modules/pyyaml3/resolver.py b/python.d/python_modules/pyyaml3/resolver.py deleted file mode 100644 index 0eece2582..000000000 --- a/python.d/python_modules/pyyaml3/resolver.py +++ /dev/null @@ -1,224 +0,0 @@ - -__all__ = ['BaseResolver', 'Resolver'] - -from .error import * -from .nodes import * - -import re - -class ResolverError(YAMLError): - pass - -class BaseResolver: - - DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str' - DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq' - DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map' - - yaml_implicit_resolvers = {} - yaml_path_resolvers = {} - - def __init__(self): - self.resolver_exact_paths = [] - self.resolver_prefix_paths = [] - - @classmethod - def add_implicit_resolver(cls, tag, regexp, first): - if not 'yaml_implicit_resolvers' in cls.__dict__: - cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy() - if first is None: - first = [None] - for ch in first: - cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) - - @classmethod - def add_path_resolver(cls, tag, path, kind=None): - # Note: `add_path_resolver` is experimental. The API could be changed. - # `new_path` is a pattern that is matched against the path from the - # root to the node that is being considered. `node_path` elements are - # tuples `(node_check, index_check)`. `node_check` is a node class: - # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` - # matches any kind of a node. `index_check` could be `None`, a boolean - # value, a string value, or a number. `None` and `False` match against - # any _value_ of sequence and mapping nodes. `True` matches against - # any _key_ of a mapping node. A string `index_check` matches against - # a mapping value that corresponds to a scalar key which content is - # equal to the `index_check` value. An integer `index_check` matches - # against a sequence value with the index equal to `index_check`. - if not 'yaml_path_resolvers' in cls.__dict__: - cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() - new_path = [] - for element in path: - if isinstance(element, (list, tuple)): - if len(element) == 2: - node_check, index_check = element - elif len(element) == 1: - node_check = element[0] - index_check = True - else: - raise ResolverError("Invalid path element: %s" % element) - else: - node_check = None - index_check = element - if node_check is str: - node_check = ScalarNode - elif node_check is list: - node_check = SequenceNode - elif node_check is dict: - node_check = MappingNode - elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ - and not isinstance(node_check, str) \ - and node_check is not None: - raise ResolverError("Invalid node checker: %s" % node_check) - if not isinstance(index_check, (str, int)) \ - and index_check is not None: - raise ResolverError("Invalid index checker: %s" % index_check) - new_path.append((node_check, index_check)) - if kind is str: - kind = ScalarNode - elif kind is list: - kind = SequenceNode - elif kind is dict: - kind = MappingNode - elif kind not in [ScalarNode, SequenceNode, MappingNode] \ - and kind is not None: - raise ResolverError("Invalid node kind: %s" % kind) - cls.yaml_path_resolvers[tuple(new_path), kind] = tag - - def descend_resolver(self, current_node, current_index): - if not self.yaml_path_resolvers: - return - exact_paths = {} - prefix_paths = [] - if current_node: - depth = len(self.resolver_prefix_paths) - for path, kind in self.resolver_prefix_paths[-1]: - if self.check_resolver_prefix(depth, path, kind, - current_node, current_index): - if len(path) > depth: - prefix_paths.append((path, kind)) - else: - exact_paths[kind] = self.yaml_path_resolvers[path, kind] - else: - for path, kind in self.yaml_path_resolvers: - if not path: - exact_paths[kind] = self.yaml_path_resolvers[path, kind] - else: - prefix_paths.append((path, kind)) - self.resolver_exact_paths.append(exact_paths) - self.resolver_prefix_paths.append(prefix_paths) - - def ascend_resolver(self): - if not self.yaml_path_resolvers: - return - self.resolver_exact_paths.pop() - self.resolver_prefix_paths.pop() - - def check_resolver_prefix(self, depth, path, kind, - current_node, current_index): - node_check, index_check = path[depth-1] - if isinstance(node_check, str): - if current_node.tag != node_check: - return - elif node_check is not None: - if not isinstance(current_node, node_check): - return - if index_check is True and current_index is not None: - return - if (index_check is False or index_check is None) \ - and current_index is None: - return - if isinstance(index_check, str): - if not (isinstance(current_index, ScalarNode) - and index_check == current_index.value): - return - elif isinstance(index_check, int) and not isinstance(index_check, bool): - if index_check != current_index: - return - return True - - def resolve(self, kind, value, implicit): - if kind is ScalarNode and implicit[0]: - if value == '': - resolvers = self.yaml_implicit_resolvers.get('', []) - else: - resolvers = self.yaml_implicit_resolvers.get(value[0], []) - resolvers += self.yaml_implicit_resolvers.get(None, []) - for tag, regexp in resolvers: - if regexp.match(value): - return tag - implicit = implicit[1] - if self.yaml_path_resolvers: - exact_paths = self.resolver_exact_paths[-1] - if kind in exact_paths: - return exact_paths[kind] - if None in exact_paths: - return exact_paths[None] - if kind is ScalarNode: - return self.DEFAULT_SCALAR_TAG - elif kind is SequenceNode: - return self.DEFAULT_SEQUENCE_TAG - elif kind is MappingNode: - return self.DEFAULT_MAPPING_TAG - -class Resolver(BaseResolver): - pass - -Resolver.add_implicit_resolver( - 'tag:yaml.org,2002:bool', - re.compile(r'''^(?:yes|Yes|YES|no|No|NO - |true|True|TRUE|false|False|FALSE - |on|On|ON|off|Off|OFF)$''', re.X), - list('yYnNtTfFoO')) - -Resolver.add_implicit_resolver( - 'tag:yaml.org,2002:float', - re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? - |\.[0-9_]+(?:[eE][-+][0-9]+)? - |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* - |[-+]?\.(?:inf|Inf|INF) - |\.(?:nan|NaN|NAN))$''', re.X), - list('-+0123456789.')) - -Resolver.add_implicit_resolver( - 'tag:yaml.org,2002:int', - re.compile(r'''^(?:[-+]?0b[0-1_]+ - |[-+]?0[0-7_]+ - |[-+]?(?:0|[1-9][0-9_]*) - |[-+]?0x[0-9a-fA-F_]+ - |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), - list('-+0123456789')) - -Resolver.add_implicit_resolver( - 'tag:yaml.org,2002:merge', - re.compile(r'^(?:<<)$'), - ['<']) - -Resolver.add_implicit_resolver( - 'tag:yaml.org,2002:null', - re.compile(r'''^(?: ~ - |null|Null|NULL - | )$''', re.X), - ['~', 'n', 'N', '']) - -Resolver.add_implicit_resolver( - 'tag:yaml.org,2002:timestamp', - re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] - |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? - (?:[Tt]|[ \t]+)[0-9][0-9]? - :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? - (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), - list('0123456789')) - -Resolver.add_implicit_resolver( - 'tag:yaml.org,2002:value', - re.compile(r'^(?:=)$'), - ['=']) - -# The following resolver is only for documentation purposes. It cannot work -# because plain scalars cannot start with '!', '&', or '*'. -Resolver.add_implicit_resolver( - 'tag:yaml.org,2002:yaml', - re.compile(r'^(?:!|&|\*)$'), - list('!&*')) - diff --git a/python.d/python_modules/pyyaml3/scanner.py b/python.d/python_modules/pyyaml3/scanner.py deleted file mode 100644 index 494d975ba..000000000 --- a/python.d/python_modules/pyyaml3/scanner.py +++ /dev/null @@ -1,1448 +0,0 @@ - -# Scanner produces tokens of the following types: -# STREAM-START -# STREAM-END -# DIRECTIVE(name, value) -# DOCUMENT-START -# DOCUMENT-END -# BLOCK-SEQUENCE-START -# BLOCK-MAPPING-START -# BLOCK-END -# FLOW-SEQUENCE-START -# FLOW-MAPPING-START -# FLOW-SEQUENCE-END -# FLOW-MAPPING-END -# BLOCK-ENTRY -# FLOW-ENTRY -# KEY -# VALUE -# ALIAS(value) -# ANCHOR(value) -# TAG(value) -# SCALAR(value, plain, style) -# -# Read comments in the Scanner code for more details. -# - -__all__ = ['Scanner', 'ScannerError'] - -from .error import MarkedYAMLError -from .tokens import * - -class ScannerError(MarkedYAMLError): - pass - -class SimpleKey: - # See below simple keys treatment. - - def __init__(self, token_number, required, index, line, column, mark): - self.token_number = token_number - self.required = required - self.index = index - self.line = line - self.column = column - self.mark = mark - -class Scanner: - - def __init__(self): - """Initialize the scanner.""" - # It is assumed that Scanner and Reader will have a common descendant. - # Reader do the dirty work of checking for BOM and converting the - # input data to Unicode. It also adds NUL to the end. - # - # Reader supports the following methods - # self.peek(i=0) # peek the next i-th character - # self.prefix(l=1) # peek the next l characters - # self.forward(l=1) # read the next l characters and move the pointer. - - # Had we reached the end of the stream? - self.done = False - - # The number of unclosed '{' and '['. `flow_level == 0` means block - # context. - self.flow_level = 0 - - # List of processed tokens that are not yet emitted. - self.tokens = [] - - # Add the STREAM-START token. - self.fetch_stream_start() - - # Number of tokens that were emitted through the `get_token` method. - self.tokens_taken = 0 - - # The current indentation level. - self.indent = -1 - - # Past indentation levels. - self.indents = [] - - # Variables related to simple keys treatment. - - # A simple key is a key that is not denoted by the '?' indicator. - # Example of simple keys: - # --- - # block simple key: value - # ? not a simple key: - # : { flow simple key: value } - # We emit the KEY token before all keys, so when we find a potential - # simple key, we try to locate the corresponding ':' indicator. - # Simple keys should be limited to a single line and 1024 characters. - - # Can a simple key start at the current position? A simple key may - # start: - # - at the beginning of the line, not counting indentation spaces - # (in block context), - # - after '{', '[', ',' (in the flow context), - # - after '?', ':', '-' (in the block context). - # In the block context, this flag also signifies if a block collection - # may start at the current position. - self.allow_simple_key = True - - # Keep track of possible simple keys. This is a dictionary. The key - # is `flow_level`; there can be no more that one possible simple key - # for each level. The value is a SimpleKey record: - # (token_number, required, index, line, column, mark) - # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), - # '[', or '{' tokens. - self.possible_simple_keys = {} - - # Public methods. - - def check_token(self, *choices): - # Check if the next token is one of the given types. - while self.need_more_tokens(): - self.fetch_more_tokens() - if self.tokens: - if not choices: - return True - for choice in choices: - if isinstance(self.tokens[0], choice): - return True - return False - - def peek_token(self): - # Return the next token, but do not delete if from the queue. - while self.need_more_tokens(): - self.fetch_more_tokens() - if self.tokens: - return self.tokens[0] - - def get_token(self): - # Return the next token. - while self.need_more_tokens(): - self.fetch_more_tokens() - if self.tokens: - self.tokens_taken += 1 - return self.tokens.pop(0) - - # Private methods. - - def need_more_tokens(self): - if self.done: - return False - if not self.tokens: - return True - # The current token may be a potential simple key, so we - # need to look further. - self.stale_possible_simple_keys() - if self.next_possible_simple_key() == self.tokens_taken: - return True - - def fetch_more_tokens(self): - - # Eat whitespaces and comments until we reach the next token. - self.scan_to_next_token() - - # Remove obsolete possible simple keys. - self.stale_possible_simple_keys() - - # Compare the current indentation and column. It may add some tokens - # and decrease the current indentation level. - self.unwind_indent(self.column) - - # Peek the next character. - ch = self.peek() - - # Is it the end of stream? - if ch == '\0': - return self.fetch_stream_end() - - # Is it a directive? - if ch == '%' and self.check_directive(): - return self.fetch_directive() - - # Is it the document start? - if ch == '-' and self.check_document_start(): - return self.fetch_document_start() - - # Is it the document end? - if ch == '.' and self.check_document_end(): - return self.fetch_document_end() - - # TODO: support for BOM within a stream. - #if ch == '\uFEFF': - # return self.fetch_bom() <-- issue BOMToken - - # Note: the order of the following checks is NOT significant. - - # Is it the flow sequence start indicator? - if ch == '[': - return self.fetch_flow_sequence_start() - - # Is it the flow mapping start indicator? - if ch == '{': - return self.fetch_flow_mapping_start() - - # Is it the flow sequence end indicator? - if ch == ']': - return self.fetch_flow_sequence_end() - - # Is it the flow mapping end indicator? - if ch == '}': - return self.fetch_flow_mapping_end() - - # Is it the flow entry indicator? - if ch == ',': - return self.fetch_flow_entry() - - # Is it the block entry indicator? - if ch == '-' and self.check_block_entry(): - return self.fetch_block_entry() - - # Is it the key indicator? - if ch == '?' and self.check_key(): - return self.fetch_key() - - # Is it the value indicator? - if ch == ':' and self.check_value(): - return self.fetch_value() - - # Is it an alias? - if ch == '*': - return self.fetch_alias() - - # Is it an anchor? - if ch == '&': - return self.fetch_anchor() - - # Is it a tag? - if ch == '!': - return self.fetch_tag() - - # Is it a literal scalar? - if ch == '|' and not self.flow_level: - return self.fetch_literal() - - # Is it a folded scalar? - if ch == '>' and not self.flow_level: - return self.fetch_folded() - - # Is it a single quoted scalar? - if ch == '\'': - return self.fetch_single() - - # Is it a double quoted scalar? - if ch == '\"': - return self.fetch_double() - - # It must be a plain scalar then. - if self.check_plain(): - return self.fetch_plain() - - # No? It's an error. Let's produce a nice error message. - raise ScannerError("while scanning for the next token", None, - "found character %r that cannot start any token" % ch, - self.get_mark()) - - # Simple keys treatment. - - def next_possible_simple_key(self): - # Return the number of the nearest possible simple key. Actually we - # don't need to loop through the whole dictionary. We may replace it - # with the following code: - # if not self.possible_simple_keys: - # return None - # return self.possible_simple_keys[ - # min(self.possible_simple_keys.keys())].token_number - min_token_number = None - for level in self.possible_simple_keys: - key = self.possible_simple_keys[level] - if min_token_number is None or key.token_number < min_token_number: - min_token_number = key.token_number - return min_token_number - - def stale_possible_simple_keys(self): - # Remove entries that are no longer possible simple keys. According to - # the YAML specification, simple keys - # - should be limited to a single line, - # - should be no longer than 1024 characters. - # Disabling this procedure will allow simple keys of any length and - # height (may cause problems if indentation is broken though). - for level in list(self.possible_simple_keys): - key = self.possible_simple_keys[level] - if key.line != self.line \ - or self.index-key.index > 1024: - if key.required: - raise ScannerError("while scanning a simple key", key.mark, - "could not found expected ':'", self.get_mark()) - del self.possible_simple_keys[level] - - def save_possible_simple_key(self): - # The next token may start a simple key. We check if it's possible - # and save its position. This function is called for - # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. - - # Check if a simple key is required at the current position. - required = not self.flow_level and self.indent == self.column - - # A simple key is required only if it is the first token in the current - # line. Therefore it is always allowed. - assert self.allow_simple_key or not required - - # The next token might be a simple key. Let's save it's number and - # position. - if self.allow_simple_key: - self.remove_possible_simple_key() - token_number = self.tokens_taken+len(self.tokens) - key = SimpleKey(token_number, required, - self.index, self.line, self.column, self.get_mark()) - self.possible_simple_keys[self.flow_level] = key - - def remove_possible_simple_key(self): - # Remove the saved possible key position at the current flow level. - if self.flow_level in self.possible_simple_keys: - key = self.possible_simple_keys[self.flow_level] - - if key.required: - raise ScannerError("while scanning a simple key", key.mark, - "could not found expected ':'", self.get_mark()) - - del self.possible_simple_keys[self.flow_level] - - # Indentation functions. - - def unwind_indent(self, column): - - ## In flow context, tokens should respect indentation. - ## Actually the condition should be `self.indent >= column` according to - ## the spec. But this condition will prohibit intuitively correct - ## constructions such as - ## key : { - ## } - #if self.flow_level and self.indent > column: - # raise ScannerError(None, None, - # "invalid intendation or unclosed '[' or '{'", - # self.get_mark()) - - # In the flow context, indentation is ignored. We make the scanner less - # restrictive then specification requires. - if self.flow_level: - return - - # In block context, we may need to issue the BLOCK-END tokens. - while self.indent > column: - mark = self.get_mark() - self.indent = self.indents.pop() - self.tokens.append(BlockEndToken(mark, mark)) - - def add_indent(self, column): - # Check if we need to increase indentation. - if self.indent < column: - self.indents.append(self.indent) - self.indent = column - return True - return False - - # Fetchers. - - def fetch_stream_start(self): - # We always add STREAM-START as the first token and STREAM-END as the - # last token. - - # Read the token. - mark = self.get_mark() - - # Add STREAM-START. - self.tokens.append(StreamStartToken(mark, mark, - encoding=self.encoding)) - - - def fetch_stream_end(self): - - # Set the current intendation to -1. - self.unwind_indent(-1) - - # Reset simple keys. - self.remove_possible_simple_key() - self.allow_simple_key = False - self.possible_simple_keys = {} - - # Read the token. - mark = self.get_mark() - - # Add STREAM-END. - self.tokens.append(StreamEndToken(mark, mark)) - - # The steam is finished. - self.done = True - - def fetch_directive(self): - - # Set the current intendation to -1. - self.unwind_indent(-1) - - # Reset simple keys. - self.remove_possible_simple_key() - self.allow_simple_key = False - - # Scan and add DIRECTIVE. - self.tokens.append(self.scan_directive()) - - def fetch_document_start(self): - self.fetch_document_indicator(DocumentStartToken) - - def fetch_document_end(self): - self.fetch_document_indicator(DocumentEndToken) - - def fetch_document_indicator(self, TokenClass): - - # Set the current intendation to -1. - self.unwind_indent(-1) - - # Reset simple keys. Note that there could not be a block collection - # after '---'. - self.remove_possible_simple_key() - self.allow_simple_key = False - - # Add DOCUMENT-START or DOCUMENT-END. - start_mark = self.get_mark() - self.forward(3) - end_mark = self.get_mark() - self.tokens.append(TokenClass(start_mark, end_mark)) - - def fetch_flow_sequence_start(self): - self.fetch_flow_collection_start(FlowSequenceStartToken) - - def fetch_flow_mapping_start(self): - self.fetch_flow_collection_start(FlowMappingStartToken) - - def fetch_flow_collection_start(self, TokenClass): - - # '[' and '{' may start a simple key. - self.save_possible_simple_key() - - # Increase the flow level. - self.flow_level += 1 - - # Simple keys are allowed after '[' and '{'. - self.allow_simple_key = True - - # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(TokenClass(start_mark, end_mark)) - - def fetch_flow_sequence_end(self): - self.fetch_flow_collection_end(FlowSequenceEndToken) - - def fetch_flow_mapping_end(self): - self.fetch_flow_collection_end(FlowMappingEndToken) - - def fetch_flow_collection_end(self, TokenClass): - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Decrease the flow level. - self.flow_level -= 1 - - # No simple keys after ']' or '}'. - self.allow_simple_key = False - - # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(TokenClass(start_mark, end_mark)) - - def fetch_flow_entry(self): - - # Simple keys are allowed after ','. - self.allow_simple_key = True - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Add FLOW-ENTRY. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(FlowEntryToken(start_mark, end_mark)) - - def fetch_block_entry(self): - - # Block context needs additional checks. - if not self.flow_level: - - # Are we allowed to start a new entry? - if not self.allow_simple_key: - raise ScannerError(None, None, - "sequence entries are not allowed here", - self.get_mark()) - - # We may need to add BLOCK-SEQUENCE-START. - if self.add_indent(self.column): - mark = self.get_mark() - self.tokens.append(BlockSequenceStartToken(mark, mark)) - - # It's an error for the block entry to occur in the flow context, - # but we let the parser detect this. - else: - pass - - # Simple keys are allowed after '-'. - self.allow_simple_key = True - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Add BLOCK-ENTRY. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(BlockEntryToken(start_mark, end_mark)) - - def fetch_key(self): - - # Block context needs additional checks. - if not self.flow_level: - - # Are we allowed to start a key (not nessesary a simple)? - if not self.allow_simple_key: - raise ScannerError(None, None, - "mapping keys are not allowed here", - self.get_mark()) - - # We may need to add BLOCK-MAPPING-START. - if self.add_indent(self.column): - mark = self.get_mark() - self.tokens.append(BlockMappingStartToken(mark, mark)) - - # Simple keys are allowed after '?' in the block context. - self.allow_simple_key = not self.flow_level - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Add KEY. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(KeyToken(start_mark, end_mark)) - - def fetch_value(self): - - # Do we determine a simple key? - if self.flow_level in self.possible_simple_keys: - - # Add KEY. - key = self.possible_simple_keys[self.flow_level] - del self.possible_simple_keys[self.flow_level] - self.tokens.insert(key.token_number-self.tokens_taken, - KeyToken(key.mark, key.mark)) - - # If this key starts a new block mapping, we need to add - # BLOCK-MAPPING-START. - if not self.flow_level: - if self.add_indent(key.column): - self.tokens.insert(key.token_number-self.tokens_taken, - BlockMappingStartToken(key.mark, key.mark)) - - # There cannot be two simple keys one after another. - self.allow_simple_key = False - - # It must be a part of a complex key. - else: - - # Block context needs additional checks. - # (Do we really need them? They will be catched by the parser - # anyway.) - if not self.flow_level: - - # We are allowed to start a complex value if and only if - # we can start a simple key. - if not self.allow_simple_key: - raise ScannerError(None, None, - "mapping values are not allowed here", - self.get_mark()) - - # If this value starts a new block mapping, we need to add - # BLOCK-MAPPING-START. It will be detected as an error later by - # the parser. - if not self.flow_level: - if self.add_indent(self.column): - mark = self.get_mark() - self.tokens.append(BlockMappingStartToken(mark, mark)) - - # Simple keys are allowed after ':' in the block context. - self.allow_simple_key = not self.flow_level - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Add VALUE. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(ValueToken(start_mark, end_mark)) - - def fetch_alias(self): - - # ALIAS could be a simple key. - self.save_possible_simple_key() - - # No simple keys after ALIAS. - self.allow_simple_key = False - - # Scan and add ALIAS. - self.tokens.append(self.scan_anchor(AliasToken)) - - def fetch_anchor(self): - - # ANCHOR could start a simple key. - self.save_possible_simple_key() - - # No simple keys after ANCHOR. - self.allow_simple_key = False - - # Scan and add ANCHOR. - self.tokens.append(self.scan_anchor(AnchorToken)) - - def fetch_tag(self): - - # TAG could start a simple key. - self.save_possible_simple_key() - - # No simple keys after TAG. - self.allow_simple_key = False - - # Scan and add TAG. - self.tokens.append(self.scan_tag()) - - def fetch_literal(self): - self.fetch_block_scalar(style='|') - - def fetch_folded(self): - self.fetch_block_scalar(style='>') - - def fetch_block_scalar(self, style): - - # A simple key may follow a block scalar. - self.allow_simple_key = True - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Scan and add SCALAR. - self.tokens.append(self.scan_block_scalar(style)) - - def fetch_single(self): - self.fetch_flow_scalar(style='\'') - - def fetch_double(self): - self.fetch_flow_scalar(style='"') - - def fetch_flow_scalar(self, style): - - # A flow scalar could be a simple key. - self.save_possible_simple_key() - - # No simple keys after flow scalars. - self.allow_simple_key = False - - # Scan and add SCALAR. - self.tokens.append(self.scan_flow_scalar(style)) - - def fetch_plain(self): - - # A plain scalar could be a simple key. - self.save_possible_simple_key() - - # No simple keys after plain scalars. But note that `scan_plain` will - # change this flag if the scan is finished at the beginning of the - # line. - self.allow_simple_key = False - - # Scan and add SCALAR. May change `allow_simple_key`. - self.tokens.append(self.scan_plain()) - - # Checkers. - - def check_directive(self): - - # DIRECTIVE: ^ '%' ... - # The '%' indicator is already checked. - if self.column == 0: - return True - - def check_document_start(self): - - # DOCUMENT-START: ^ '---' (' '|'\n') - if self.column == 0: - if self.prefix(3) == '---' \ - and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': - return True - - def check_document_end(self): - - # DOCUMENT-END: ^ '...' (' '|'\n') - if self.column == 0: - if self.prefix(3) == '...' \ - and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': - return True - - def check_block_entry(self): - - # BLOCK-ENTRY: '-' (' '|'\n') - return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029' - - def check_key(self): - - # KEY(flow context): '?' - if self.flow_level: - return True - - # KEY(block context): '?' (' '|'\n') - else: - return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029' - - def check_value(self): - - # VALUE(flow context): ':' - if self.flow_level: - return True - - # VALUE(block context): ':' (' '|'\n') - else: - return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029' - - def check_plain(self): - - # A plain scalar may start with any non-space character except: - # '-', '?', ':', ',', '[', ']', '{', '}', - # '#', '&', '*', '!', '|', '>', '\'', '\"', - # '%', '@', '`'. - # - # It may also start with - # '-', '?', ':' - # if it is followed by a non-space character. - # - # Note that we limit the last rule to the block context (except the - # '-' character) because we want the flow context to be space - # independent. - ch = self.peek() - return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ - or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029' - and (ch == '-' or (not self.flow_level and ch in '?:'))) - - # Scanners. - - def scan_to_next_token(self): - # We ignore spaces, line breaks and comments. - # If we find a line break in the block context, we set the flag - # `allow_simple_key` on. - # The byte order mark is stripped if it's the first character in the - # stream. We do not yet support BOM inside the stream as the - # specification requires. Any such mark will be considered as a part - # of the document. - # - # TODO: We need to make tab handling rules more sane. A good rule is - # Tabs cannot precede tokens - # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, - # KEY(block), VALUE(block), BLOCK-ENTRY - # So the checking code is - # if <TAB>: - # self.allow_simple_keys = False - # We also need to add the check for `allow_simple_keys == True` to - # `unwind_indent` before issuing BLOCK-END. - # Scanners for block, flow, and plain scalars need to be modified. - - if self.index == 0 and self.peek() == '\uFEFF': - self.forward() - found = False - while not found: - while self.peek() == ' ': - self.forward() - if self.peek() == '#': - while self.peek() not in '\0\r\n\x85\u2028\u2029': - self.forward() - if self.scan_line_break(): - if not self.flow_level: - self.allow_simple_key = True - else: - found = True - - def scan_directive(self): - # See the specification for details. - start_mark = self.get_mark() - self.forward() - name = self.scan_directive_name(start_mark) - value = None - if name == 'YAML': - value = self.scan_yaml_directive_value(start_mark) - end_mark = self.get_mark() - elif name == 'TAG': - value = self.scan_tag_directive_value(start_mark) - end_mark = self.get_mark() - else: - end_mark = self.get_mark() - while self.peek() not in '\0\r\n\x85\u2028\u2029': - self.forward() - self.scan_directive_ignored_line(start_mark) - return DirectiveToken(name, value, start_mark, end_mark) - - def scan_directive_name(self, start_mark): - # See the specification for details. - length = 0 - ch = self.peek(length) - while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ - or ch in '-_': - length += 1 - ch = self.peek(length) - if not length: - raise ScannerError("while scanning a directive", start_mark, - "expected alphabetic or numeric character, but found %r" - % ch, self.get_mark()) - value = self.prefix(length) - self.forward(length) - ch = self.peek() - if ch not in '\0 \r\n\x85\u2028\u2029': - raise ScannerError("while scanning a directive", start_mark, - "expected alphabetic or numeric character, but found %r" - % ch, self.get_mark()) - return value - - def scan_yaml_directive_value(self, start_mark): - # See the specification for details. - while self.peek() == ' ': - self.forward() - major = self.scan_yaml_directive_number(start_mark) - if self.peek() != '.': - raise ScannerError("while scanning a directive", start_mark, - "expected a digit or '.', but found %r" % self.peek(), - self.get_mark()) - self.forward() - minor = self.scan_yaml_directive_number(start_mark) - if self.peek() not in '\0 \r\n\x85\u2028\u2029': - raise ScannerError("while scanning a directive", start_mark, - "expected a digit or ' ', but found %r" % self.peek(), - self.get_mark()) - return (major, minor) - - def scan_yaml_directive_number(self, start_mark): - # See the specification for details. - ch = self.peek() - if not ('0' <= ch <= '9'): - raise ScannerError("while scanning a directive", start_mark, - "expected a digit, but found %r" % ch, self.get_mark()) - length = 0 - while '0' <= self.peek(length) <= '9': - length += 1 - value = int(self.prefix(length)) - self.forward(length) - return value - - def scan_tag_directive_value(self, start_mark): - # See the specification for details. - while self.peek() == ' ': - self.forward() - handle = self.scan_tag_directive_handle(start_mark) - while self.peek() == ' ': - self.forward() - prefix = self.scan_tag_directive_prefix(start_mark) - return (handle, prefix) - - def scan_tag_directive_handle(self, start_mark): - # See the specification for details. - value = self.scan_tag_handle('directive', start_mark) - ch = self.peek() - if ch != ' ': - raise ScannerError("while scanning a directive", start_mark, - "expected ' ', but found %r" % ch, self.get_mark()) - return value - - def scan_tag_directive_prefix(self, start_mark): - # See the specification for details. - value = self.scan_tag_uri('directive', start_mark) - ch = self.peek() - if ch not in '\0 \r\n\x85\u2028\u2029': - raise ScannerError("while scanning a directive", start_mark, - "expected ' ', but found %r" % ch, self.get_mark()) - return value - - def scan_directive_ignored_line(self, start_mark): - # See the specification for details. - while self.peek() == ' ': - self.forward() - if self.peek() == '#': - while self.peek() not in '\0\r\n\x85\u2028\u2029': - self.forward() - ch = self.peek() - if ch not in '\0\r\n\x85\u2028\u2029': - raise ScannerError("while scanning a directive", start_mark, - "expected a comment or a line break, but found %r" - % ch, self.get_mark()) - self.scan_line_break() - - def scan_anchor(self, TokenClass): - # The specification does not restrict characters for anchors and - # aliases. This may lead to problems, for instance, the document: - # [ *alias, value ] - # can be interpteted in two ways, as - # [ "value" ] - # and - # [ *alias , "value" ] - # Therefore we restrict aliases to numbers and ASCII letters. - start_mark = self.get_mark() - indicator = self.peek() - if indicator == '*': - name = 'alias' - else: - name = 'anchor' - self.forward() - length = 0 - ch = self.peek(length) - while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ - or ch in '-_': - length += 1 - ch = self.peek(length) - if not length: - raise ScannerError("while scanning an %s" % name, start_mark, - "expected alphabetic or numeric character, but found %r" - % ch, self.get_mark()) - value = self.prefix(length) - self.forward(length) - ch = self.peek() - if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`': - raise ScannerError("while scanning an %s" % name, start_mark, - "expected alphabetic or numeric character, but found %r" - % ch, self.get_mark()) - end_mark = self.get_mark() - return TokenClass(value, start_mark, end_mark) - - def scan_tag(self): - # See the specification for details. - start_mark = self.get_mark() - ch = self.peek(1) - if ch == '<': - handle = None - self.forward(2) - suffix = self.scan_tag_uri('tag', start_mark) - if self.peek() != '>': - raise ScannerError("while parsing a tag", start_mark, - "expected '>', but found %r" % self.peek(), - self.get_mark()) - self.forward() - elif ch in '\0 \t\r\n\x85\u2028\u2029': - handle = None - suffix = '!' - self.forward() - else: - length = 1 - use_handle = False - while ch not in '\0 \r\n\x85\u2028\u2029': - if ch == '!': - use_handle = True - break - length += 1 - ch = self.peek(length) - handle = '!' - if use_handle: - handle = self.scan_tag_handle('tag', start_mark) - else: - handle = '!' - self.forward() - suffix = self.scan_tag_uri('tag', start_mark) - ch = self.peek() - if ch not in '\0 \r\n\x85\u2028\u2029': - raise ScannerError("while scanning a tag", start_mark, - "expected ' ', but found %r" % ch, self.get_mark()) - value = (handle, suffix) - end_mark = self.get_mark() - return TagToken(value, start_mark, end_mark) - - def scan_block_scalar(self, style): - # See the specification for details. - - if style == '>': - folded = True - else: - folded = False - - chunks = [] - start_mark = self.get_mark() - - # Scan the header. - self.forward() - chomping, increment = self.scan_block_scalar_indicators(start_mark) - self.scan_block_scalar_ignored_line(start_mark) - - # Determine the indentation level and go to the first non-empty line. - min_indent = self.indent+1 - if min_indent < 1: - min_indent = 1 - if increment is None: - breaks, max_indent, end_mark = self.scan_block_scalar_indentation() - indent = max(min_indent, max_indent) - else: - indent = min_indent+increment-1 - breaks, end_mark = self.scan_block_scalar_breaks(indent) - line_break = '' - - # Scan the inner part of the block scalar. - while self.column == indent and self.peek() != '\0': - chunks.extend(breaks) - leading_non_space = self.peek() not in ' \t' - length = 0 - while self.peek(length) not in '\0\r\n\x85\u2028\u2029': - length += 1 - chunks.append(self.prefix(length)) - self.forward(length) - line_break = self.scan_line_break() - breaks, end_mark = self.scan_block_scalar_breaks(indent) - if self.column == indent and self.peek() != '\0': - - # Unfortunately, folding rules are ambiguous. - # - # This is the folding according to the specification: - - if folded and line_break == '\n' \ - and leading_non_space and self.peek() not in ' \t': - if not breaks: - chunks.append(' ') - else: - chunks.append(line_break) - - # This is Clark Evans's interpretation (also in the spec - # examples): - # - #if folded and line_break == '\n': - # if not breaks: - # if self.peek() not in ' \t': - # chunks.append(' ') - # else: - # chunks.append(line_break) - #else: - # chunks.append(line_break) - else: - break - - # Chomp the tail. - if chomping is not False: - chunks.append(line_break) - if chomping is True: - chunks.extend(breaks) - - # We are done. - return ScalarToken(''.join(chunks), False, start_mark, end_mark, - style) - - def scan_block_scalar_indicators(self, start_mark): - # See the specification for details. - chomping = None - increment = None - ch = self.peek() - if ch in '+-': - if ch == '+': - chomping = True - else: - chomping = False - self.forward() - ch = self.peek() - if ch in '0123456789': - increment = int(ch) - if increment == 0: - raise ScannerError("while scanning a block scalar", start_mark, - "expected indentation indicator in the range 1-9, but found 0", - self.get_mark()) - self.forward() - elif ch in '0123456789': - increment = int(ch) - if increment == 0: - raise ScannerError("while scanning a block scalar", start_mark, - "expected indentation indicator in the range 1-9, but found 0", - self.get_mark()) - self.forward() - ch = self.peek() - if ch in '+-': - if ch == '+': - chomping = True - else: - chomping = False - self.forward() - ch = self.peek() - if ch not in '\0 \r\n\x85\u2028\u2029': - raise ScannerError("while scanning a block scalar", start_mark, - "expected chomping or indentation indicators, but found %r" - % ch, self.get_mark()) - return chomping, increment - - def scan_block_scalar_ignored_line(self, start_mark): - # See the specification for details. - while self.peek() == ' ': - self.forward() - if self.peek() == '#': - while self.peek() not in '\0\r\n\x85\u2028\u2029': - self.forward() - ch = self.peek() - if ch not in '\0\r\n\x85\u2028\u2029': - raise ScannerError("while scanning a block scalar", start_mark, - "expected a comment or a line break, but found %r" % ch, - self.get_mark()) - self.scan_line_break() - - def scan_block_scalar_indentation(self): - # See the specification for details. - chunks = [] - max_indent = 0 - end_mark = self.get_mark() - while self.peek() in ' \r\n\x85\u2028\u2029': - if self.peek() != ' ': - chunks.append(self.scan_line_break()) - end_mark = self.get_mark() - else: - self.forward() - if self.column > max_indent: - max_indent = self.column - return chunks, max_indent, end_mark - - def scan_block_scalar_breaks(self, indent): - # See the specification for details. - chunks = [] - end_mark = self.get_mark() - while self.column < indent and self.peek() == ' ': - self.forward() - while self.peek() in '\r\n\x85\u2028\u2029': - chunks.append(self.scan_line_break()) - end_mark = self.get_mark() - while self.column < indent and self.peek() == ' ': - self.forward() - return chunks, end_mark - - def scan_flow_scalar(self, style): - # See the specification for details. - # Note that we loose indentation rules for quoted scalars. Quoted - # scalars don't need to adhere indentation because " and ' clearly - # mark the beginning and the end of them. Therefore we are less - # restrictive then the specification requires. We only need to check - # that document separators are not included in scalars. - if style == '"': - double = True - else: - double = False - chunks = [] - start_mark = self.get_mark() - quote = self.peek() - self.forward() - chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) - while self.peek() != quote: - chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) - chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) - self.forward() - end_mark = self.get_mark() - return ScalarToken(''.join(chunks), False, start_mark, end_mark, - style) - - ESCAPE_REPLACEMENTS = { - '0': '\0', - 'a': '\x07', - 'b': '\x08', - 't': '\x09', - '\t': '\x09', - 'n': '\x0A', - 'v': '\x0B', - 'f': '\x0C', - 'r': '\x0D', - 'e': '\x1B', - ' ': '\x20', - '\"': '\"', - '\\': '\\', - 'N': '\x85', - '_': '\xA0', - 'L': '\u2028', - 'P': '\u2029', - } - - ESCAPE_CODES = { - 'x': 2, - 'u': 4, - 'U': 8, - } - - def scan_flow_scalar_non_spaces(self, double, start_mark): - # See the specification for details. - chunks = [] - while True: - length = 0 - while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029': - length += 1 - if length: - chunks.append(self.prefix(length)) - self.forward(length) - ch = self.peek() - if not double and ch == '\'' and self.peek(1) == '\'': - chunks.append('\'') - self.forward(2) - elif (double and ch == '\'') or (not double and ch in '\"\\'): - chunks.append(ch) - self.forward() - elif double and ch == '\\': - self.forward() - ch = self.peek() - if ch in self.ESCAPE_REPLACEMENTS: - chunks.append(self.ESCAPE_REPLACEMENTS[ch]) - self.forward() - elif ch in self.ESCAPE_CODES: - length = self.ESCAPE_CODES[ch] - self.forward() - for k in range(length): - if self.peek(k) not in '0123456789ABCDEFabcdef': - raise ScannerError("while scanning a double-quoted scalar", start_mark, - "expected escape sequence of %d hexdecimal numbers, but found %r" % - (length, self.peek(k)), self.get_mark()) - code = int(self.prefix(length), 16) - chunks.append(chr(code)) - self.forward(length) - elif ch in '\r\n\x85\u2028\u2029': - self.scan_line_break() - chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) - else: - raise ScannerError("while scanning a double-quoted scalar", start_mark, - "found unknown escape character %r" % ch, self.get_mark()) - else: - return chunks - - def scan_flow_scalar_spaces(self, double, start_mark): - # See the specification for details. - chunks = [] - length = 0 - while self.peek(length) in ' \t': - length += 1 - whitespaces = self.prefix(length) - self.forward(length) - ch = self.peek() - if ch == '\0': - raise ScannerError("while scanning a quoted scalar", start_mark, - "found unexpected end of stream", self.get_mark()) - elif ch in '\r\n\x85\u2028\u2029': - line_break = self.scan_line_break() - breaks = self.scan_flow_scalar_breaks(double, start_mark) - if line_break != '\n': - chunks.append(line_break) - elif not breaks: - chunks.append(' ') - chunks.extend(breaks) - else: - chunks.append(whitespaces) - return chunks - - def scan_flow_scalar_breaks(self, double, start_mark): - # See the specification for details. - chunks = [] - while True: - # Instead of checking indentation, we check for document - # separators. - prefix = self.prefix(3) - if (prefix == '---' or prefix == '...') \ - and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': - raise ScannerError("while scanning a quoted scalar", start_mark, - "found unexpected document separator", self.get_mark()) - while self.peek() in ' \t': - self.forward() - if self.peek() in '\r\n\x85\u2028\u2029': - chunks.append(self.scan_line_break()) - else: - return chunks - - def scan_plain(self): - # See the specification for details. - # We add an additional restriction for the flow context: - # plain scalars in the flow context cannot contain ',', ':' and '?'. - # We also keep track of the `allow_simple_key` flag here. - # Indentation rules are loosed for the flow context. - chunks = [] - start_mark = self.get_mark() - end_mark = start_mark - indent = self.indent+1 - # We allow zero indentation for scalars, but then we need to check for - # document separators at the beginning of the line. - #if indent == 0: - # indent = 1 - spaces = [] - while True: - length = 0 - if self.peek() == '#': - break - while True: - ch = self.peek(length) - if ch in '\0 \t\r\n\x85\u2028\u2029' \ - or (not self.flow_level and ch == ':' and - self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \ - or (self.flow_level and ch in ',:?[]{}'): - break - length += 1 - # It's not clear what we should do with ':' in the flow context. - if (self.flow_level and ch == ':' - and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'): - self.forward(length) - raise ScannerError("while scanning a plain scalar", start_mark, - "found unexpected ':'", self.get_mark(), - "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.") - if length == 0: - break - self.allow_simple_key = False - chunks.extend(spaces) - chunks.append(self.prefix(length)) - self.forward(length) - end_mark = self.get_mark() - spaces = self.scan_plain_spaces(indent, start_mark) - if not spaces or self.peek() == '#' \ - or (not self.flow_level and self.column < indent): - break - return ScalarToken(''.join(chunks), True, start_mark, end_mark) - - def scan_plain_spaces(self, indent, start_mark): - # See the specification for details. - # The specification is really confusing about tabs in plain scalars. - # We just forbid them completely. Do not use tabs in YAML! - chunks = [] - length = 0 - while self.peek(length) in ' ': - length += 1 - whitespaces = self.prefix(length) - self.forward(length) - ch = self.peek() - if ch in '\r\n\x85\u2028\u2029': - line_break = self.scan_line_break() - self.allow_simple_key = True - prefix = self.prefix(3) - if (prefix == '---' or prefix == '...') \ - and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': - return - breaks = [] - while self.peek() in ' \r\n\x85\u2028\u2029': - if self.peek() == ' ': - self.forward() - else: - breaks.append(self.scan_line_break()) - prefix = self.prefix(3) - if (prefix == '---' or prefix == '...') \ - and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': - return - if line_break != '\n': - chunks.append(line_break) - elif not breaks: - chunks.append(' ') - chunks.extend(breaks) - elif whitespaces: - chunks.append(whitespaces) - return chunks - - def scan_tag_handle(self, name, start_mark): - # See the specification for details. - # For some strange reasons, the specification does not allow '_' in - # tag handles. I have allowed it anyway. - ch = self.peek() - if ch != '!': - raise ScannerError("while scanning a %s" % name, start_mark, - "expected '!', but found %r" % ch, self.get_mark()) - length = 1 - ch = self.peek(length) - if ch != ' ': - while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ - or ch in '-_': - length += 1 - ch = self.peek(length) - if ch != '!': - self.forward(length) - raise ScannerError("while scanning a %s" % name, start_mark, - "expected '!', but found %r" % ch, self.get_mark()) - length += 1 - value = self.prefix(length) - self.forward(length) - return value - - def scan_tag_uri(self, name, start_mark): - # See the specification for details. - # Note: we do not check if URI is well-formed. - chunks = [] - length = 0 - ch = self.peek(length) - while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ - or ch in '-;/?:@&=+$,_.!~*\'()[]%': - if ch == '%': - chunks.append(self.prefix(length)) - self.forward(length) - length = 0 - chunks.append(self.scan_uri_escapes(name, start_mark)) - else: - length += 1 - ch = self.peek(length) - if length: - chunks.append(self.prefix(length)) - self.forward(length) - length = 0 - if not chunks: - raise ScannerError("while parsing a %s" % name, start_mark, - "expected URI, but found %r" % ch, self.get_mark()) - return ''.join(chunks) - - def scan_uri_escapes(self, name, start_mark): - # See the specification for details. - codes = [] - mark = self.get_mark() - while self.peek() == '%': - self.forward() - for k in range(2): - if self.peek(k) not in '0123456789ABCDEFabcdef': - raise ScannerError("while scanning a %s" % name, start_mark, - "expected URI escape sequence of 2 hexdecimal numbers, but found %r" - % self.peek(k), self.get_mark()) - codes.append(int(self.prefix(2), 16)) - self.forward(2) - try: - value = bytes(codes).decode('utf-8') - except UnicodeDecodeError as exc: - raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) - return value - - def scan_line_break(self): - # Transforms: - # '\r\n' : '\n' - # '\r' : '\n' - # '\n' : '\n' - # '\x85' : '\n' - # '\u2028' : '\u2028' - # '\u2029 : '\u2029' - # default : '' - ch = self.peek() - if ch in '\r\n\x85': - if self.prefix(2) == '\r\n': - self.forward(2) - else: - self.forward() - return '\n' - elif ch in '\u2028\u2029': - self.forward() - return ch - return '' - -#try: -# import psyco -# psyco.bind(Scanner) -#except ImportError: -# pass - diff --git a/python.d/python_modules/pyyaml3/serializer.py b/python.d/python_modules/pyyaml3/serializer.py deleted file mode 100644 index fe911e67a..000000000 --- a/python.d/python_modules/pyyaml3/serializer.py +++ /dev/null @@ -1,111 +0,0 @@ - -__all__ = ['Serializer', 'SerializerError'] - -from .error import YAMLError -from .events import * -from .nodes import * - -class SerializerError(YAMLError): - pass - -class Serializer: - - ANCHOR_TEMPLATE = 'id%03d' - - def __init__(self, encoding=None, - explicit_start=None, explicit_end=None, version=None, tags=None): - self.use_encoding = encoding - self.use_explicit_start = explicit_start - self.use_explicit_end = explicit_end - self.use_version = version - self.use_tags = tags - self.serialized_nodes = {} - self.anchors = {} - self.last_anchor_id = 0 - self.closed = None - - def open(self): - if self.closed is None: - self.emit(StreamStartEvent(encoding=self.use_encoding)) - self.closed = False - elif self.closed: - raise SerializerError("serializer is closed") - else: - raise SerializerError("serializer is already opened") - - def close(self): - if self.closed is None: - raise SerializerError("serializer is not opened") - elif not self.closed: - self.emit(StreamEndEvent()) - self.closed = True - - #def __del__(self): - # self.close() - - def serialize(self, node): - if self.closed is None: - raise SerializerError("serializer is not opened") - elif self.closed: - raise SerializerError("serializer is closed") - self.emit(DocumentStartEvent(explicit=self.use_explicit_start, - version=self.use_version, tags=self.use_tags)) - self.anchor_node(node) - self.serialize_node(node, None, None) - self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) - self.serialized_nodes = {} - self.anchors = {} - self.last_anchor_id = 0 - - def anchor_node(self, node): - if node in self.anchors: - if self.anchors[node] is None: - self.anchors[node] = self.generate_anchor(node) - else: - self.anchors[node] = None - if isinstance(node, SequenceNode): - for item in node.value: - self.anchor_node(item) - elif isinstance(node, MappingNode): - for key, value in node.value: - self.anchor_node(key) - self.anchor_node(value) - - def generate_anchor(self, node): - self.last_anchor_id += 1 - return self.ANCHOR_TEMPLATE % self.last_anchor_id - - def serialize_node(self, node, parent, index): - alias = self.anchors[node] - if node in self.serialized_nodes: - self.emit(AliasEvent(alias)) - else: - self.serialized_nodes[node] = True - self.descend_resolver(parent, index) - if isinstance(node, ScalarNode): - detected_tag = self.resolve(ScalarNode, node.value, (True, False)) - default_tag = self.resolve(ScalarNode, node.value, (False, True)) - implicit = (node.tag == detected_tag), (node.tag == default_tag) - self.emit(ScalarEvent(alias, node.tag, implicit, node.value, - style=node.style)) - elif isinstance(node, SequenceNode): - implicit = (node.tag - == self.resolve(SequenceNode, node.value, True)) - self.emit(SequenceStartEvent(alias, node.tag, implicit, - flow_style=node.flow_style)) - index = 0 - for item in node.value: - self.serialize_node(item, node, index) - index += 1 - self.emit(SequenceEndEvent()) - elif isinstance(node, MappingNode): - implicit = (node.tag - == self.resolve(MappingNode, node.value, True)) - self.emit(MappingStartEvent(alias, node.tag, implicit, - flow_style=node.flow_style)) - for key, value in node.value: - self.serialize_node(key, node, None) - self.serialize_node(value, node, key) - self.emit(MappingEndEvent()) - self.ascend_resolver() - diff --git a/python.d/python_modules/pyyaml3/tokens.py b/python.d/python_modules/pyyaml3/tokens.py deleted file mode 100644 index 4d0b48a39..000000000 --- a/python.d/python_modules/pyyaml3/tokens.py +++ /dev/null @@ -1,104 +0,0 @@ - -class Token(object): - def __init__(self, start_mark, end_mark): - self.start_mark = start_mark - self.end_mark = end_mark - def __repr__(self): - attributes = [key for key in self.__dict__ - if not key.endswith('_mark')] - attributes.sort() - arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) - for key in attributes]) - return '%s(%s)' % (self.__class__.__name__, arguments) - -#class BOMToken(Token): -# id = '<byte order mark>' - -class DirectiveToken(Token): - id = '<directive>' - def __init__(self, name, value, start_mark, end_mark): - self.name = name - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - -class DocumentStartToken(Token): - id = '<document start>' - -class DocumentEndToken(Token): - id = '<document end>' - -class StreamStartToken(Token): - id = '<stream start>' - def __init__(self, start_mark=None, end_mark=None, - encoding=None): - self.start_mark = start_mark - self.end_mark = end_mark - self.encoding = encoding - -class StreamEndToken(Token): - id = '<stream end>' - -class BlockSequenceStartToken(Token): - id = '<block sequence start>' - -class BlockMappingStartToken(Token): - id = '<block mapping start>' - -class BlockEndToken(Token): - id = '<block end>' - -class FlowSequenceStartToken(Token): - id = '[' - -class FlowMappingStartToken(Token): - id = '{' - -class FlowSequenceEndToken(Token): - id = ']' - -class FlowMappingEndToken(Token): - id = '}' - -class KeyToken(Token): - id = '?' - -class ValueToken(Token): - id = ':' - -class BlockEntryToken(Token): - id = '-' - -class FlowEntryToken(Token): - id = ',' - -class AliasToken(Token): - id = '<alias>' - def __init__(self, value, start_mark, end_mark): - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - -class AnchorToken(Token): - id = '<anchor>' - def __init__(self, value, start_mark, end_mark): - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - -class TagToken(Token): - id = '<tag>' - def __init__(self, value, start_mark, end_mark): - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - -class ScalarToken(Token): - id = '<scalar>' - def __init__(self, value, plain, start_mark, end_mark, style=None): - self.value = value - self.plain = plain - self.start_mark = start_mark - self.end_mark = end_mark - self.style = style - diff --git a/python.d/python_modules/third_party/__init__.py b/python.d/python_modules/third_party/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/python.d/python_modules/third_party/lm_sensors.py b/python.d/python_modules/third_party/lm_sensors.py deleted file mode 100644 index 1d868f0e2..000000000 --- a/python.d/python_modules/third_party/lm_sensors.py +++ /dev/null @@ -1,257 +0,0 @@ -""" -@package sensors.py -Python Bindings for libsensors3 - -use the documentation of libsensors for the low level API. -see example.py for high level API usage. - -@author: Pavel Rojtberg (http://www.rojtberg.net) -@see: https://github.com/paroj/sensors.py -@copyright: LGPLv2 (same as libsensors) <http://opensource.org/licenses/LGPL-2.1> -""" - -from ctypes import * -import ctypes.util - -_libc = cdll.LoadLibrary(ctypes.util.find_library("c")) -# see https://github.com/paroj/sensors.py/issues/1 -_libc.free.argtypes = [c_void_p] -_hdl = cdll.LoadLibrary(ctypes.util.find_library("sensors")) - -version = c_char_p.in_dll(_hdl, "libsensors_version").value.decode("ascii") - - -class bus_id(Structure): - _fields_ = [("type", c_short), - ("nr", c_short)] - - -class chip_name(Structure): - _fields_ = [("prefix", c_char_p), - ("bus", bus_id), - ("addr", c_int), - ("path", c_char_p)] - - -class feature(Structure): - _fields_ = [("name", c_char_p), - ("number", c_int), - ("type", c_int)] - - # sensors_feature_type - IN = 0x00 - FAN = 0x01 - TEMP = 0x02 - POWER = 0x03 - ENERGY = 0x04 - CURR = 0x05 - HUMIDITY = 0x06 - MAX_MAIN = 0x7 - VID = 0x10 - INTRUSION = 0x11 - MAX_OTHER = 0x12 - BEEP_ENABLE = 0x18 - - -class subfeature(Structure): - _fields_ = [("name", c_char_p), - ("number", c_int), - ("type", c_int), - ("mapping", c_int), - ("flags", c_uint)] - - -_hdl.sensors_get_detected_chips.restype = POINTER(chip_name) -_hdl.sensors_get_features.restype = POINTER(feature) -_hdl.sensors_get_all_subfeatures.restype = POINTER(subfeature) -_hdl.sensors_get_label.restype = c_void_p # return pointer instead of str so we can free it -_hdl.sensors_get_adapter_name.restype = c_char_p # docs do not say whether to free this or not -_hdl.sensors_strerror.restype = c_char_p - -### RAW API ### -MODE_R = 1 -MODE_W = 2 -COMPUTE_MAPPING = 4 - - -def init(cfg_file=None): - file = _libc.fopen(cfg_file.encode("utf-8"), "r") if cfg_file is not None else None - - if _hdl.sensors_init(file) != 0: - raise Exception("sensors_init failed") - - if file is not None: - _libc.fclose(file) - - -def cleanup(): - _hdl.sensors_cleanup() - - -def parse_chip_name(orig_name): - ret = chip_name() - err = _hdl.sensors_parse_chip_name(orig_name.encode("utf-8"), byref(ret)) - - if err < 0: - raise Exception(strerror(err)) - - return ret - - -def strerror(errnum): - return _hdl.sensors_strerror(errnum).decode("utf-8") - - -def free_chip_name(chip): - _hdl.sensors_free_chip_name(byref(chip)) - - -def get_detected_chips(match, nr): - """ - @return: (chip, next nr to query) - """ - _nr = c_int(nr) - - if match is not None: - match = byref(match) - - chip = _hdl.sensors_get_detected_chips(match, byref(_nr)) - chip = chip.contents if bool(chip) else None - return chip, _nr.value - - -def chip_snprintf_name(chip, buffer_size=200): - """ - @param buffer_size defaults to the size used in the sensors utility - """ - ret = create_string_buffer(buffer_size) - err = _hdl.sensors_snprintf_chip_name(ret, buffer_size, byref(chip)) - - if err < 0: - raise Exception(strerror(err)) - - return ret.value.decode("utf-8") - - -def do_chip_sets(chip): - """ - @attention this function was not tested - """ - err = _hdl.sensors_do_chip_sets(byref(chip)) - if err < 0: - raise Exception(strerror(err)) - - -def get_adapter_name(bus): - return _hdl.sensors_get_adapter_name(byref(bus)).decode("utf-8") - - -def get_features(chip, nr): - """ - @return: (feature, next nr to query) - """ - _nr = c_int(nr) - feature = _hdl.sensors_get_features(byref(chip), byref(_nr)) - feature = feature.contents if bool(feature) else None - return feature, _nr.value - - -def get_label(chip, feature): - ptr = _hdl.sensors_get_label(byref(chip), byref(feature)) - val = cast(ptr, c_char_p).value.decode("utf-8") - _libc.free(ptr) - return val - - -def get_all_subfeatures(chip, feature, nr): - """ - @return: (subfeature, next nr to query) - """ - _nr = c_int(nr) - subfeature = _hdl.sensors_get_all_subfeatures(byref(chip), byref(feature), byref(_nr)) - subfeature = subfeature.contents if bool(subfeature) else None - return subfeature, _nr.value - - -def get_value(chip, subfeature_nr): - val = c_double() - err = _hdl.sensors_get_value(byref(chip), subfeature_nr, byref(val)) - if err < 0: - raise Exception(strerror(err)) - return val.value - - -def set_value(chip, subfeature_nr, value): - """ - @attention this function was not tested - """ - val = c_double(value) - err = _hdl.sensors_set_value(byref(chip), subfeature_nr, byref(val)) - if err < 0: - raise Exception(strerror(err)) - - -### Convenience API ### -class ChipIterator: - def __init__(self, match=None): - self.match = parse_chip_name(match) if match is not None else None - self.nr = 0 - - def __iter__(self): - return self - - def __next__(self): - chip, self.nr = get_detected_chips(self.match, self.nr) - - if chip is None: - raise StopIteration - - return chip - - def __del__(self): - if self.match is not None: - free_chip_name(self.match) - - def next(self): # python2 compability - return self.__next__() - - -class FeatureIterator: - def __init__(self, chip): - self.chip = chip - self.nr = 0 - - def __iter__(self): - return self - - def __next__(self): - feature, self.nr = get_features(self.chip, self.nr) - - if feature is None: - raise StopIteration - - return feature - - def next(self): # python2 compability - return self.__next__() - - -class SubFeatureIterator: - def __init__(self, chip, feature): - self.chip = chip - self.feature = feature - self.nr = 0 - - def __iter__(self): - return self - - def __next__(self): - subfeature, self.nr = get_all_subfeatures(self.chip, self.feature, self.nr) - - if subfeature is None: - raise StopIteration - - return subfeature - - def next(self): # python2 compability - return self.__next__() \ No newline at end of file diff --git a/python.d/python_modules/third_party/ordereddict.py b/python.d/python_modules/third_party/ordereddict.py deleted file mode 100644 index d0b97d47c..000000000 --- a/python.d/python_modules/third_party/ordereddict.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (c) 2009 Raymond Hettinger -# -# Permission is hereby granted, free of charge, to any person -# obtaining a copy of this software and associated documentation files -# (the "Software"), to deal in the Software without restriction, -# including without limitation the rights to use, copy, modify, merge, -# publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, -# subject to the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. - -from UserDict import DictMixin - - -class OrderedDict(dict, DictMixin): - - def __init__(self, *args, **kwds): - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - try: - self.__end - except AttributeError: - self.clear() - self.update(*args, **kwds) - - def clear(self): - self.__end = end = [] - end += [None, end, end] # sentinel node for doubly linked list - self.__map = {} # key --> [key, prev, next] - dict.clear(self) - - def __setitem__(self, key, value): - if key not in self: - end = self.__end - curr = end[1] - curr[2] = end[1] = self.__map[key] = [key, curr, end] - dict.__setitem__(self, key, value) - - def __delitem__(self, key): - dict.__delitem__(self, key) - key, prev, next = self.__map.pop(key) - prev[2] = next - next[1] = prev - - def __iter__(self): - end = self.__end - curr = end[2] - while curr is not end: - yield curr[0] - curr = curr[2] - - def __reversed__(self): - end = self.__end - curr = end[1] - while curr is not end: - yield curr[0] - curr = curr[1] - - def popitem(self, last=True): - if not self: - raise KeyError('dictionary is empty') - if last: - key = reversed(self).next() - else: - key = iter(self).next() - value = self.pop(key) - return key, value - - def __reduce__(self): - items = [[k, self[k]] for k in self] - tmp = self.__map, self.__end - del self.__map, self.__end - inst_dict = vars(self).copy() - self.__map, self.__end = tmp - if inst_dict: - return self.__class__, (items,), inst_dict - return self.__class__, (items,) - - def keys(self): - return list(self) - - setdefault = DictMixin.setdefault - update = DictMixin.update - pop = DictMixin.pop - values = DictMixin.values - items = DictMixin.items - iterkeys = DictMixin.iterkeys - itervalues = DictMixin.itervalues - iteritems = DictMixin.iteritems - - def __repr__(self): - if not self: - return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, self.items()) - - def copy(self): - return self.__class__(self) - - @classmethod - def fromkeys(cls, iterable, value=None): - d = cls() - for key in iterable: - d[key] = value - return d - - def __eq__(self, other): - if isinstance(other, OrderedDict): - if len(self) != len(other): - return False - for p, q in zip(self.items(), other.items()): - if p != q: - return False - return True - return dict.__eq__(self, other) - - def __ne__(self, other): - return not self == other diff --git a/python.d/python_modules/urllib3/__init__.py b/python.d/python_modules/urllib3/__init__.py deleted file mode 100644 index 26493ecb9..000000000 --- a/python.d/python_modules/urllib3/__init__.py +++ /dev/null @@ -1,97 +0,0 @@ -""" -urllib3 - Thread-safe connection pooling and re-using. -""" - -from __future__ import absolute_import -import warnings - -from .connectionpool import ( - HTTPConnectionPool, - HTTPSConnectionPool, - connection_from_url -) - -from . import exceptions -from .filepost import encode_multipart_formdata -from .poolmanager import PoolManager, ProxyManager, proxy_from_url -from .response import HTTPResponse -from .util.request import make_headers -from .util.url import get_host -from .util.timeout import Timeout -from .util.retry import Retry - - -# Set default logging handler to avoid "No handler found" warnings. -import logging -try: # Python 2.7+ - from logging import NullHandler -except ImportError: - class NullHandler(logging.Handler): - def emit(self, record): - pass - -__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)' -__license__ = 'MIT' -__version__ = '1.21.1' - -__all__ = ( - 'HTTPConnectionPool', - 'HTTPSConnectionPool', - 'PoolManager', - 'ProxyManager', - 'HTTPResponse', - 'Retry', - 'Timeout', - 'add_stderr_logger', - 'connection_from_url', - 'disable_warnings', - 'encode_multipart_formdata', - 'get_host', - 'make_headers', - 'proxy_from_url', -) - -logging.getLogger(__name__).addHandler(NullHandler()) - - -def add_stderr_logger(level=logging.DEBUG): - """ - Helper for quickly adding a StreamHandler to the logger. Useful for - debugging. - - Returns the handler after adding it. - """ - # This method needs to be in this __init__.py to get the __name__ correct - # even if urllib3 is vendored within another package. - logger = logging.getLogger(__name__) - handler = logging.StreamHandler() - handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) - logger.addHandler(handler) - logger.setLevel(level) - logger.debug('Added a stderr logging handler to logger: %s', __name__) - return handler - - -# ... Clean up. -del NullHandler - - -# All warning filters *must* be appended unless you're really certain that they -# shouldn't be: otherwise, it's very hard for users to use most Python -# mechanisms to silence them. -# SecurityWarning's always go off by default. -warnings.simplefilter('always', exceptions.SecurityWarning, append=True) -# SubjectAltNameWarning's should go off once per host -warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True) -# InsecurePlatformWarning's don't vary between requests, so we keep it default. -warnings.simplefilter('default', exceptions.InsecurePlatformWarning, - append=True) -# SNIMissingWarnings should go off only once. -warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True) - - -def disable_warnings(category=exceptions.HTTPWarning): - """ - Helper for quickly disabling all urllib3 warnings. - """ - warnings.simplefilter('ignore', category) diff --git a/python.d/python_modules/urllib3/_collections.py b/python.d/python_modules/urllib3/_collections.py deleted file mode 100644 index 4849ddecd..000000000 --- a/python.d/python_modules/urllib3/_collections.py +++ /dev/null @@ -1,314 +0,0 @@ -from __future__ import absolute_import -from collections import Mapping, MutableMapping -try: - from threading import RLock -except ImportError: # Platform-specific: No threads available - class RLock: - def __enter__(self): - pass - - def __exit__(self, exc_type, exc_value, traceback): - pass - - -try: # Python 2.7+ - from collections import OrderedDict -except ImportError: - from .packages.ordered_dict import OrderedDict -from .packages.six import iterkeys, itervalues, PY3 - - -__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict'] - - -_Null = object() - - -class RecentlyUsedContainer(MutableMapping): - """ - Provides a thread-safe dict-like container which maintains up to - ``maxsize`` keys while throwing away the least-recently-used keys beyond - ``maxsize``. - - :param maxsize: - Maximum number of recent elements to retain. - - :param dispose_func: - Every time an item is evicted from the container, - ``dispose_func(value)`` is called. Callback which will get called - """ - - ContainerCls = OrderedDict - - def __init__(self, maxsize=10, dispose_func=None): - self._maxsize = maxsize - self.dispose_func = dispose_func - - self._container = self.ContainerCls() - self.lock = RLock() - - def __getitem__(self, key): - # Re-insert the item, moving it to the end of the eviction line. - with self.lock: - item = self._container.pop(key) - self._container[key] = item - return item - - def __setitem__(self, key, value): - evicted_value = _Null - with self.lock: - # Possibly evict the existing value of 'key' - evicted_value = self._container.get(key, _Null) - self._container[key] = value - - # If we didn't evict an existing value, we might have to evict the - # least recently used item from the beginning of the container. - if len(self._container) > self._maxsize: - _key, evicted_value = self._container.popitem(last=False) - - if self.dispose_func and evicted_value is not _Null: - self.dispose_func(evicted_value) - - def __delitem__(self, key): - with self.lock: - value = self._container.pop(key) - - if self.dispose_func: - self.dispose_func(value) - - def __len__(self): - with self.lock: - return len(self._container) - - def __iter__(self): - raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.') - - def clear(self): - with self.lock: - # Copy pointers to all values, then wipe the mapping - values = list(itervalues(self._container)) - self._container.clear() - - if self.dispose_func: - for value in values: - self.dispose_func(value) - - def keys(self): - with self.lock: - return list(iterkeys(self._container)) - - -class HTTPHeaderDict(MutableMapping): - """ - :param headers: - An iterable of field-value pairs. Must not contain multiple field names - when compared case-insensitively. - - :param kwargs: - Additional field-value pairs to pass in to ``dict.update``. - - A ``dict`` like container for storing HTTP Headers. - - Field names are stored and compared case-insensitively in compliance with - RFC 7230. Iteration provides the first case-sensitive key seen for each - case-insensitive pair. - - Using ``__setitem__`` syntax overwrites fields that compare equal - case-insensitively in order to maintain ``dict``'s api. For fields that - compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add`` - in a loop. - - If multiple fields that are equal case-insensitively are passed to the - constructor or ``.update``, the behavior is undefined and some will be - lost. - - >>> headers = HTTPHeaderDict() - >>> headers.add('Set-Cookie', 'foo=bar') - >>> headers.add('set-cookie', 'baz=quxx') - >>> headers['content-length'] = '7' - >>> headers['SET-cookie'] - 'foo=bar, baz=quxx' - >>> headers['Content-Length'] - '7' - """ - - def __init__(self, headers=None, **kwargs): - super(HTTPHeaderDict, self).__init__() - self._container = OrderedDict() - if headers is not None: - if isinstance(headers, HTTPHeaderDict): - self._copy_from(headers) - else: - self.extend(headers) - if kwargs: - self.extend(kwargs) - - def __setitem__(self, key, val): - self._container[key.lower()] = [key, val] - return self._container[key.lower()] - - def __getitem__(self, key): - val = self._container[key.lower()] - return ', '.join(val[1:]) - - def __delitem__(self, key): - del self._container[key.lower()] - - def __contains__(self, key): - return key.lower() in self._container - - def __eq__(self, other): - if not isinstance(other, Mapping) and not hasattr(other, 'keys'): - return False - if not isinstance(other, type(self)): - other = type(self)(other) - return (dict((k.lower(), v) for k, v in self.itermerged()) == - dict((k.lower(), v) for k, v in other.itermerged())) - - def __ne__(self, other): - return not self.__eq__(other) - - if not PY3: # Python 2 - iterkeys = MutableMapping.iterkeys - itervalues = MutableMapping.itervalues - - __marker = object() - - def __len__(self): - return len(self._container) - - def __iter__(self): - # Only provide the originally cased names - for vals in self._container.values(): - yield vals[0] - - def pop(self, key, default=__marker): - '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. - If key is not found, d is returned if given, otherwise KeyError is raised. - ''' - # Using the MutableMapping function directly fails due to the private marker. - # Using ordinary dict.pop would expose the internal structures. - # So let's reinvent the wheel. - try: - value = self[key] - except KeyError: - if default is self.__marker: - raise - return default - else: - del self[key] - return value - - def discard(self, key): - try: - del self[key] - except KeyError: - pass - - def add(self, key, val): - """Adds a (name, value) pair, doesn't overwrite the value if it already - exists. - - >>> headers = HTTPHeaderDict(foo='bar') - >>> headers.add('Foo', 'baz') - >>> headers['foo'] - 'bar, baz' - """ - key_lower = key.lower() - new_vals = [key, val] - # Keep the common case aka no item present as fast as possible - vals = self._container.setdefault(key_lower, new_vals) - if new_vals is not vals: - vals.append(val) - - def extend(self, *args, **kwargs): - """Generic import function for any type of header-like object. - Adapted version of MutableMapping.update in order to insert items - with self.add instead of self.__setitem__ - """ - if len(args) > 1: - raise TypeError("extend() takes at most 1 positional " - "arguments ({0} given)".format(len(args))) - other = args[0] if len(args) >= 1 else () - - if isinstance(other, HTTPHeaderDict): - for key, val in other.iteritems(): - self.add(key, val) - elif isinstance(other, Mapping): - for key in other: - self.add(key, other[key]) - elif hasattr(other, "keys"): - for key in other.keys(): - self.add(key, other[key]) - else: - for key, value in other: - self.add(key, value) - - for key, value in kwargs.items(): - self.add(key, value) - - def getlist(self, key): - """Returns a list of all the values for the named field. Returns an - empty list if the key doesn't exist.""" - try: - vals = self._container[key.lower()] - except KeyError: - return [] - else: - return vals[1:] - - # Backwards compatibility for httplib - getheaders = getlist - getallmatchingheaders = getlist - iget = getlist - - def __repr__(self): - return "%s(%s)" % (type(self).__name__, dict(self.itermerged())) - - def _copy_from(self, other): - for key in other: - val = other.getlist(key) - if isinstance(val, list): - # Don't need to convert tuples - val = list(val) - self._container[key.lower()] = [key] + val - - def copy(self): - clone = type(self)() - clone._copy_from(self) - return clone - - def iteritems(self): - """Iterate over all header lines, including duplicate ones.""" - for key in self: - vals = self._container[key.lower()] - for val in vals[1:]: - yield vals[0], val - - def itermerged(self): - """Iterate over all headers, merging duplicate ones together.""" - for key in self: - val = self._container[key.lower()] - yield val[0], ', '.join(val[1:]) - - def items(self): - return list(self.iteritems()) - - @classmethod - def from_httplib(cls, message): # Python 2 - """Read headers from a Python 2 httplib message object.""" - # python2.7 does not expose a proper API for exporting multiheaders - # efficiently. This function re-reads raw lines from the message - # object and extracts the multiheaders properly. - headers = [] - - for line in message.headers: - if line.startswith((' ', '\t')): - key, value = headers[-1] - headers[-1] = (key, value + '\r\n' + line.rstrip()) - continue - - key, value = line.split(':', 1) - headers.append((key, value.strip())) - - return cls(headers) diff --git a/python.d/python_modules/urllib3/connection.py b/python.d/python_modules/urllib3/connection.py deleted file mode 100644 index c0d832998..000000000 --- a/python.d/python_modules/urllib3/connection.py +++ /dev/null @@ -1,373 +0,0 @@ -from __future__ import absolute_import -import datetime -import logging -import os -import sys -import socket -from socket import error as SocketError, timeout as SocketTimeout -import warnings -from .packages import six -from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection -from .packages.six.moves.http_client import HTTPException # noqa: F401 - -try: # Compiled with SSL? - import ssl - BaseSSLError = ssl.SSLError -except (ImportError, AttributeError): # Platform-specific: No SSL. - ssl = None - - class BaseSSLError(BaseException): - pass - - -try: # Python 3: - # Not a no-op, we're adding this to the namespace so it can be imported. - ConnectionError = ConnectionError -except NameError: # Python 2: - class ConnectionError(Exception): - pass - - -from .exceptions import ( - NewConnectionError, - ConnectTimeoutError, - SubjectAltNameWarning, - SystemTimeWarning, -) -from .packages.ssl_match_hostname import match_hostname, CertificateError - -from .util.ssl_ import ( - resolve_cert_reqs, - resolve_ssl_version, - assert_fingerprint, - create_urllib3_context, - ssl_wrap_socket -) - - -from .util import connection - -from ._collections import HTTPHeaderDict - -log = logging.getLogger(__name__) - -port_by_scheme = { - 'http': 80, - 'https': 443, -} - -# When updating RECENT_DATE, move it to -# within two years of the current date, and no -# earlier than 6 months ago. -RECENT_DATE = datetime.date(2016, 1, 1) - - -class DummyConnection(object): - """Used to detect a failed ConnectionCls import.""" - pass - - -class HTTPConnection(_HTTPConnection, object): - """ - Based on httplib.HTTPConnection but provides an extra constructor - backwards-compatibility layer between older and newer Pythons. - - Additional keyword parameters are used to configure attributes of the connection. - Accepted parameters include: - - - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool` - - ``source_address``: Set the source address for the current connection. - - .. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x - - - ``socket_options``: Set specific options on the underlying socket. If not specified, then - defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling - Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy. - - For example, if you wish to enable TCP Keep Alive in addition to the defaults, - you might pass:: - - HTTPConnection.default_socket_options + [ - (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), - ] - - Or you may want to disable the defaults by passing an empty list (e.g., ``[]``). - """ - - default_port = port_by_scheme['http'] - - #: Disable Nagle's algorithm by default. - #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]`` - default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)] - - #: Whether this connection verifies the host's certificate. - is_verified = False - - def __init__(self, *args, **kw): - if six.PY3: # Python 3 - kw.pop('strict', None) - - # Pre-set source_address in case we have an older Python like 2.6. - self.source_address = kw.get('source_address') - - if sys.version_info < (2, 7): # Python 2.6 - # _HTTPConnection on Python 2.6 will balk at this keyword arg, but - # not newer versions. We can still use it when creating a - # connection though, so we pop it *after* we have saved it as - # self.source_address. - kw.pop('source_address', None) - - #: The socket options provided by the user. If no options are - #: provided, we use the default options. - self.socket_options = kw.pop('socket_options', self.default_socket_options) - - # Superclass also sets self.source_address in Python 2.7+. - _HTTPConnection.__init__(self, *args, **kw) - - def _new_conn(self): - """ Establish a socket connection and set nodelay settings on it. - - :return: New socket connection. - """ - extra_kw = {} - if self.source_address: - extra_kw['source_address'] = self.source_address - - if self.socket_options: - extra_kw['socket_options'] = self.socket_options - - try: - conn = connection.create_connection( - (self.host, self.port), self.timeout, **extra_kw) - - except SocketTimeout as e: - raise ConnectTimeoutError( - self, "Connection to %s timed out. (connect timeout=%s)" % - (self.host, self.timeout)) - - except SocketError as e: - raise NewConnectionError( - self, "Failed to establish a new connection: %s" % e) - - return conn - - def _prepare_conn(self, conn): - self.sock = conn - # the _tunnel_host attribute was added in python 2.6.3 (via - # http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do - # not have them. - if getattr(self, '_tunnel_host', None): - # TODO: Fix tunnel so it doesn't depend on self.sock state. - self._tunnel() - # Mark this connection as not reusable - self.auto_open = 0 - - def connect(self): - conn = self._new_conn() - self._prepare_conn(conn) - - def request_chunked(self, method, url, body=None, headers=None): - """ - Alternative to the common request method, which sends the - body with chunked encoding and not as one block - """ - headers = HTTPHeaderDict(headers if headers is not None else {}) - skip_accept_encoding = 'accept-encoding' in headers - skip_host = 'host' in headers - self.putrequest( - method, - url, - skip_accept_encoding=skip_accept_encoding, - skip_host=skip_host - ) - for header, value in headers.items(): - self.putheader(header, value) - if 'transfer-encoding' not in headers: - self.putheader('Transfer-Encoding', 'chunked') - self.endheaders() - - if body is not None: - stringish_types = six.string_types + (six.binary_type,) - if isinstance(body, stringish_types): - body = (body,) - for chunk in body: - if not chunk: - continue - if not isinstance(chunk, six.binary_type): - chunk = chunk.encode('utf8') - len_str = hex(len(chunk))[2:] - self.send(len_str.encode('utf-8')) - self.send(b'\r\n') - self.send(chunk) - self.send(b'\r\n') - - # After the if clause, to always have a closed body - self.send(b'0\r\n\r\n') - - -class HTTPSConnection(HTTPConnection): - default_port = port_by_scheme['https'] - - ssl_version = None - - def __init__(self, host, port=None, key_file=None, cert_file=None, - strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - ssl_context=None, **kw): - - HTTPConnection.__init__(self, host, port, strict=strict, - timeout=timeout, **kw) - - self.key_file = key_file - self.cert_file = cert_file - self.ssl_context = ssl_context - - # Required property for Google AppEngine 1.9.0 which otherwise causes - # HTTPS requests to go out as HTTP. (See Issue #356) - self._protocol = 'https' - - def connect(self): - conn = self._new_conn() - self._prepare_conn(conn) - - if self.ssl_context is None: - self.ssl_context = create_urllib3_context( - ssl_version=resolve_ssl_version(None), - cert_reqs=resolve_cert_reqs(None), - ) - - self.sock = ssl_wrap_socket( - sock=conn, - keyfile=self.key_file, - certfile=self.cert_file, - ssl_context=self.ssl_context, - ) - - -class VerifiedHTTPSConnection(HTTPSConnection): - """ - Based on httplib.HTTPSConnection but wraps the socket with - SSL certification. - """ - cert_reqs = None - ca_certs = None - ca_cert_dir = None - ssl_version = None - assert_fingerprint = None - - def set_cert(self, key_file=None, cert_file=None, - cert_reqs=None, ca_certs=None, - assert_hostname=None, assert_fingerprint=None, - ca_cert_dir=None): - """ - This method should only be called once, before the connection is used. - """ - # If cert_reqs is not provided, we can try to guess. If the user gave - # us a cert database, we assume they want to use it: otherwise, if - # they gave us an SSL Context object we should use whatever is set for - # it. - if cert_reqs is None: - if ca_certs or ca_cert_dir: - cert_reqs = 'CERT_REQUIRED' - elif self.ssl_context is not None: - cert_reqs = self.ssl_context.verify_mode - - self.key_file = key_file - self.cert_file = cert_file - self.cert_reqs = cert_reqs - self.assert_hostname = assert_hostname - self.assert_fingerprint = assert_fingerprint - self.ca_certs = ca_certs and os.path.expanduser(ca_certs) - self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir) - - def connect(self): - # Add certificate verification - conn = self._new_conn() - - hostname = self.host - if getattr(self, '_tunnel_host', None): - # _tunnel_host was added in Python 2.6.3 - # (See: http://hg.python.org/cpython/rev/0f57b30a152f) - - self.sock = conn - # Calls self._set_hostport(), so self.host is - # self._tunnel_host below. - self._tunnel() - # Mark this connection as not reusable - self.auto_open = 0 - - # Override the host with the one we're requesting data from. - hostname = self._tunnel_host - - is_time_off = datetime.date.today() < RECENT_DATE - if is_time_off: - warnings.warn(( - 'System time is way off (before {0}). This will probably ' - 'lead to SSL verification errors').format(RECENT_DATE), - SystemTimeWarning - ) - - # Wrap socket using verification with the root certs in - # trusted_root_certs - if self.ssl_context is None: - self.ssl_context = create_urllib3_context( - ssl_version=resolve_ssl_version(self.ssl_version), - cert_reqs=resolve_cert_reqs(self.cert_reqs), - ) - - context = self.ssl_context - context.verify_mode = resolve_cert_reqs(self.cert_reqs) - self.sock = ssl_wrap_socket( - sock=conn, - keyfile=self.key_file, - certfile=self.cert_file, - ca_certs=self.ca_certs, - ca_cert_dir=self.ca_cert_dir, - server_hostname=hostname, - ssl_context=context) - - if self.assert_fingerprint: - assert_fingerprint(self.sock.getpeercert(binary_form=True), - self.assert_fingerprint) - elif context.verify_mode != ssl.CERT_NONE \ - and not getattr(context, 'check_hostname', False) \ - and self.assert_hostname is not False: - # While urllib3 attempts to always turn off hostname matching from - # the TLS library, this cannot always be done. So we check whether - # the TLS Library still thinks it's matching hostnames. - cert = self.sock.getpeercert() - if not cert.get('subjectAltName', ()): - warnings.warn(( - 'Certificate for {0} has no `subjectAltName`, falling back to check for a ' - '`commonName` for now. This feature is being removed by major browsers and ' - 'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 ' - 'for details.)'.format(hostname)), - SubjectAltNameWarning - ) - _match_hostname(cert, self.assert_hostname or hostname) - - self.is_verified = ( - context.verify_mode == ssl.CERT_REQUIRED or - self.assert_fingerprint is not None - ) - - -def _match_hostname(cert, asserted_hostname): - try: - match_hostname(cert, asserted_hostname) - except CertificateError as e: - log.error( - 'Certificate did not match expected hostname: %s. ' - 'Certificate: %s', asserted_hostname, cert - ) - # Add cert to exception and reraise so client code can inspect - # the cert when catching the exception, if they want to - e._peer_cert = cert - raise - - -if ssl: - # Make a copy for testing. - UnverifiedHTTPSConnection = HTTPSConnection - HTTPSConnection = VerifiedHTTPSConnection -else: - HTTPSConnection = DummyConnection diff --git a/python.d/python_modules/urllib3/connectionpool.py b/python.d/python_modules/urllib3/connectionpool.py deleted file mode 100644 index b4f1166a6..000000000 --- a/python.d/python_modules/urllib3/connectionpool.py +++ /dev/null @@ -1,899 +0,0 @@ -from __future__ import absolute_import -import errno -import logging -import sys -import warnings - -from socket import error as SocketError, timeout as SocketTimeout -import socket - - -from .exceptions import ( - ClosedPoolError, - ProtocolError, - EmptyPoolError, - HeaderParsingError, - HostChangedError, - LocationValueError, - MaxRetryError, - ProxyError, - ReadTimeoutError, - SSLError, - TimeoutError, - InsecureRequestWarning, - NewConnectionError, -) -from .packages.ssl_match_hostname import CertificateError -from .packages import six -from .packages.six.moves import queue -from .connection import ( - port_by_scheme, - DummyConnection, - HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection, - HTTPException, BaseSSLError, -) -from .request import RequestMethods -from .response import HTTPResponse - -from .util.connection import is_connection_dropped -from .util.request import set_file_position -from .util.response import assert_header_parsing -from .util.retry import Retry -from .util.timeout import Timeout -from .util.url import get_host, Url - - -if six.PY2: - # Queue is imported for side effects on MS Windows - import Queue as _unused_module_Queue # noqa: F401 - -xrange = six.moves.xrange - -log = logging.getLogger(__name__) - -_Default = object() - - -# Pool objects -class ConnectionPool(object): - """ - Base class for all connection pools, such as - :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. - """ - - scheme = None - QueueCls = queue.LifoQueue - - def __init__(self, host, port=None): - if not host: - raise LocationValueError("No host specified.") - - self.host = _ipv6_host(host).lower() - self.port = port - - def __str__(self): - return '%s(host=%r, port=%r)' % (type(self).__name__, - self.host, self.port) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.close() - # Return False to re-raise any potential exceptions - return False - - def close(self): - """ - Close all pooled connections and disable the pool. - """ - pass - - -# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 -_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK]) - - -class HTTPConnectionPool(ConnectionPool, RequestMethods): - """ - Thread-safe connection pool for one host. - - :param host: - Host used for this HTTP Connection (e.g. "localhost"), passed into - :class:`httplib.HTTPConnection`. - - :param port: - Port used for this HTTP Connection (None is equivalent to 80), passed - into :class:`httplib.HTTPConnection`. - - :param strict: - Causes BadStatusLine to be raised if the status line can't be parsed - as a valid HTTP/1.0 or 1.1 status line, passed into - :class:`httplib.HTTPConnection`. - - .. note:: - Only works in Python 2. This parameter is ignored in Python 3. - - :param timeout: - Socket timeout in seconds for each individual connection. This can - be a float or integer, which sets the timeout for the HTTP request, - or an instance of :class:`urllib3.util.Timeout` which gives you more - fine-grained control over request timeouts. After the constructor has - been parsed, this is always a `urllib3.util.Timeout` object. - - :param maxsize: - Number of connections to save that can be reused. More than 1 is useful - in multithreaded situations. If ``block`` is set to False, more - connections will be created but they will not be saved once they've - been used. - - :param block: - If set to True, no more than ``maxsize`` connections will be used at - a time. When no free connections are available, the call will block - until a connection has been released. This is a useful side effect for - particular multithreaded situations where one does not want to use more - than maxsize connections per host to prevent flooding. - - :param headers: - Headers to include with all requests, unless other headers are given - explicitly. - - :param retries: - Retry configuration to use by default with requests in this pool. - - :param _proxy: - Parsed proxy URL, should not be used directly, instead, see - :class:`urllib3.connectionpool.ProxyManager`" - - :param _proxy_headers: - A dictionary with proxy headers, should not be used directly, - instead, see :class:`urllib3.connectionpool.ProxyManager`" - - :param \\**conn_kw: - Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, - :class:`urllib3.connection.HTTPSConnection` instances. - """ - - scheme = 'http' - ConnectionCls = HTTPConnection - ResponseCls = HTTPResponse - - def __init__(self, host, port=None, strict=False, - timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, - headers=None, retries=None, - _proxy=None, _proxy_headers=None, - **conn_kw): - ConnectionPool.__init__(self, host, port) - RequestMethods.__init__(self, headers) - - self.strict = strict - - if not isinstance(timeout, Timeout): - timeout = Timeout.from_float(timeout) - - if retries is None: - retries = Retry.DEFAULT - - self.timeout = timeout - self.retries = retries - - self.pool = self.QueueCls(maxsize) - self.block = block - - self.proxy = _proxy - self.proxy_headers = _proxy_headers or {} - - # Fill the queue up so that doing get() on it will block properly - for _ in xrange(maxsize): - self.pool.put(None) - - # These are mostly for testing and debugging purposes. - self.num_connections = 0 - self.num_requests = 0 - self.conn_kw = conn_kw - - if self.proxy: - # Enable Nagle's algorithm for proxies, to avoid packet fragmentation. - # We cannot know if the user has added default socket options, so we cannot replace the - # list. - self.conn_kw.setdefault('socket_options', []) - - def _new_conn(self): - """ - Return a fresh :class:`HTTPConnection`. - """ - self.num_connections += 1 - log.debug("Starting new HTTP connection (%d): %s", - self.num_connections, self.host) - - conn = self.ConnectionCls(host=self.host, port=self.port, - timeout=self.timeout.connect_timeout, - strict=self.strict, **self.conn_kw) - return conn - - def _get_conn(self, timeout=None): - """ - Get a connection. Will return a pooled connection if one is available. - - If no connections are available and :prop:`.block` is ``False``, then a - fresh connection is returned. - - :param timeout: - Seconds to wait before giving up and raising - :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and - :prop:`.block` is ``True``. - """ - conn = None - try: - conn = self.pool.get(block=self.block, timeout=timeout) - - except AttributeError: # self.pool is None - raise ClosedPoolError(self, "Pool is closed.") - - except queue.Empty: - if self.block: - raise EmptyPoolError(self, - "Pool reached maximum size and no more " - "connections are allowed.") - pass # Oh well, we'll create a new connection then - - # If this is a persistent connection, check if it got disconnected - if conn and is_connection_dropped(conn): - log.debug("Resetting dropped connection: %s", self.host) - conn.close() - if getattr(conn, 'auto_open', 1) == 0: - # This is a proxied connection that has been mutated by - # httplib._tunnel() and cannot be reused (since it would - # attempt to bypass the proxy) - conn = None - - return conn or self._new_conn() - - def _put_conn(self, conn): - """ - Put a connection back into the pool. - - :param conn: - Connection object for the current host and port as returned by - :meth:`._new_conn` or :meth:`._get_conn`. - - If the pool is already full, the connection is closed and discarded - because we exceeded maxsize. If connections are discarded frequently, - then maxsize should be increased. - - If the pool is closed, then the connection will be closed and discarded. - """ - try: - self.pool.put(conn, block=False) - return # Everything is dandy, done. - except AttributeError: - # self.pool is None. - pass - except queue.Full: - # This should never happen if self.block == True - log.warning( - "Connection pool is full, discarding connection: %s", - self.host) - - # Connection never got put back into the pool, close it. - if conn: - conn.close() - - def _validate_conn(self, conn): - """ - Called right before a request is made, after the socket is created. - """ - pass - - def _prepare_proxy(self, conn): - # Nothing to do for HTTP connections. - pass - - def _get_timeout(self, timeout): - """ Helper that always returns a :class:`urllib3.util.Timeout` """ - if timeout is _Default: - return self.timeout.clone() - - if isinstance(timeout, Timeout): - return timeout.clone() - else: - # User passed us an int/float. This is for backwards compatibility, - # can be removed later - return Timeout.from_float(timeout) - - def _raise_timeout(self, err, url, timeout_value): - """Is the error actually a timeout? Will raise a ReadTimeout or pass""" - - if isinstance(err, SocketTimeout): - raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) - - # See the above comment about EAGAIN in Python 3. In Python 2 we have - # to specifically catch it and throw the timeout error - if hasattr(err, 'errno') and err.errno in _blocking_errnos: - raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) - - # Catch possible read timeouts thrown as SSL errors. If not the - # case, rethrow the original. We need to do this because of: - # http://bugs.python.org/issue10272 - if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6 - raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) - - def _make_request(self, conn, method, url, timeout=_Default, chunked=False, - **httplib_request_kw): - """ - Perform a request on a given urllib connection object taken from our - pool. - - :param conn: - a connection from one of our connection pools - - :param timeout: - Socket timeout in seconds for the request. This can be a - float or integer, which will set the same timeout value for - the socket connect and the socket read, or an instance of - :class:`urllib3.util.Timeout`, which gives you more fine-grained - control over your timeouts. - """ - self.num_requests += 1 - - timeout_obj = self._get_timeout(timeout) - timeout_obj.start_connect() - conn.timeout = timeout_obj.connect_timeout - - # Trigger any extra validation we need to do. - try: - self._validate_conn(conn) - except (SocketTimeout, BaseSSLError) as e: - # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout. - self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) - raise - - # conn.request() calls httplib.*.request, not the method in - # urllib3.request. It also calls makefile (recv) on the socket. - if chunked: - conn.request_chunked(method, url, **httplib_request_kw) - else: - conn.request(method, url, **httplib_request_kw) - - # Reset the timeout for the recv() on the socket - read_timeout = timeout_obj.read_timeout - - # App Engine doesn't have a sock attr - if getattr(conn, 'sock', None): - # In Python 3 socket.py will catch EAGAIN and return None when you - # try and read into the file pointer created by http.client, which - # instead raises a BadStatusLine exception. Instead of catching - # the exception and assuming all BadStatusLine exceptions are read - # timeouts, check for a zero timeout before making the request. - if read_timeout == 0: - raise ReadTimeoutError( - self, url, "Read timed out. (read timeout=%s)" % read_timeout) - if read_timeout is Timeout.DEFAULT_TIMEOUT: - conn.sock.settimeout(socket.getdefaulttimeout()) - else: # None or a value - conn.sock.settimeout(read_timeout) - - # Receive the response from the server - try: - try: # Python 2.7, use buffering of HTTP responses - httplib_response = conn.getresponse(buffering=True) - except TypeError: # Python 2.6 and older, Python 3 - try: - httplib_response = conn.getresponse() - except Exception as e: - # Remove the TypeError from the exception chain in Python 3; - # otherwise it looks like a programming error was the cause. - six.raise_from(e, None) - except (SocketTimeout, BaseSSLError, SocketError) as e: - self._raise_timeout(err=e, url=url, timeout_value=read_timeout) - raise - - # AppEngine doesn't have a version attr. - http_version = getattr(conn, '_http_vsn_str', 'HTTP/?') - log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port, - method, url, http_version, httplib_response.status, - httplib_response.length) - - try: - assert_header_parsing(httplib_response.msg) - except HeaderParsingError as hpe: # Platform-specific: Python 3 - log.warning( - 'Failed to parse headers (url=%s): %s', - self._absolute_url(url), hpe, exc_info=True) - - return httplib_response - - def _absolute_url(self, path): - return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url - - def close(self): - """ - Close all pooled connections and disable the pool. - """ - # Disable access to the pool - old_pool, self.pool = self.pool, None - - try: - while True: - conn = old_pool.get(block=False) - if conn: - conn.close() - - except queue.Empty: - pass # Done. - - def is_same_host(self, url): - """ - Check if the given ``url`` is a member of the same host as this - connection pool. - """ - if url.startswith('/'): - return True - - # TODO: Add optional support for socket.gethostbyname checking. - scheme, host, port = get_host(url) - - host = _ipv6_host(host).lower() - - # Use explicit default port for comparison when none is given - if self.port and not port: - port = port_by_scheme.get(scheme) - elif not self.port and port == port_by_scheme.get(scheme): - port = None - - return (scheme, host, port) == (self.scheme, self.host, self.port) - - def urlopen(self, method, url, body=None, headers=None, retries=None, - redirect=True, assert_same_host=True, timeout=_Default, - pool_timeout=None, release_conn=None, chunked=False, - body_pos=None, **response_kw): - """ - Get a connection from the pool and perform an HTTP request. This is the - lowest level call for making a request, so you'll need to specify all - the raw details. - - .. note:: - - More commonly, it's appropriate to use a convenience method provided - by :class:`.RequestMethods`, such as :meth:`request`. - - .. note:: - - `release_conn` will only behave as expected if - `preload_content=False` because we want to make - `preload_content=False` the default behaviour someday soon without - breaking backwards compatibility. - - :param method: - HTTP request method (such as GET, POST, PUT, etc.) - - :param body: - Data to send in the request body (useful for creating - POST requests, see HTTPConnectionPool.post_url for - more convenience). - - :param headers: - Dictionary of custom headers to send, such as User-Agent, - If-None-Match, etc. If None, pool headers are used. If provided, - these headers completely replace any pool-specific headers. - - :param retries: - Configure the number of retries to allow before raising a - :class:`~urllib3.exceptions.MaxRetryError` exception. - - Pass ``None`` to retry until you receive a response. Pass a - :class:`~urllib3.util.retry.Retry` object for fine-grained control - over different types of retries. - Pass an integer number to retry connection errors that many times, - but no other types of errors. Pass zero to never retry. - - If ``False``, then retries are disabled and any exception is raised - immediately. Also, instead of raising a MaxRetryError on redirects, - the redirect response will be returned. - - :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. - - :param redirect: - If True, automatically handle redirects (status codes 301, 302, - 303, 307, 308). Each redirect counts as a retry. Disabling retries - will disable redirect, too. - - :param assert_same_host: - If ``True``, will make sure that the host of the pool requests is - consistent else will raise HostChangedError. When False, you can - use the pool on an HTTP proxy and request foreign hosts. - - :param timeout: - If specified, overrides the default timeout for this one - request. It may be a float (in seconds) or an instance of - :class:`urllib3.util.Timeout`. - - :param pool_timeout: - If set and the pool is set to block=True, then this method will - block for ``pool_timeout`` seconds and raise EmptyPoolError if no - connection is available within the time period. - - :param release_conn: - If False, then the urlopen call will not release the connection - back into the pool once a response is received (but will release if - you read the entire contents of the response such as when - `preload_content=True`). This is useful if you're not preloading - the response's content immediately. You will need to call - ``r.release_conn()`` on the response ``r`` to return the connection - back into the pool. If None, it takes the value of - ``response_kw.get('preload_content', True)``. - - :param chunked: - If True, urllib3 will send the body using chunked transfer - encoding. Otherwise, urllib3 will send the body using the standard - content-length form. Defaults to False. - - :param int body_pos: - Position to seek to in file-like body in the event of a retry or - redirect. Typically this won't need to be set because urllib3 will - auto-populate the value when needed. - - :param \\**response_kw: - Additional parameters are passed to - :meth:`urllib3.response.HTTPResponse.from_httplib` - """ - if headers is None: - headers = self.headers - - if not isinstance(retries, Retry): - retries = Retry.from_int(retries, redirect=redirect, default=self.retries) - - if release_conn is None: - release_conn = response_kw.get('preload_content', True) - - # Check host - if assert_same_host and not self.is_same_host(url): - raise HostChangedError(self, url, retries) - - conn = None - - # Track whether `conn` needs to be released before - # returning/raising/recursing. Update this variable if necessary, and - # leave `release_conn` constant throughout the function. That way, if - # the function recurses, the original value of `release_conn` will be - # passed down into the recursive call, and its value will be respected. - # - # See issue #651 [1] for details. - # - # [1] <https://github.com/shazow/urllib3/issues/651> - release_this_conn = release_conn - - # Merge the proxy headers. Only do this in HTTP. We have to copy the - # headers dict so we can safely change it without those changes being - # reflected in anyone else's copy. - if self.scheme == 'http': - headers = headers.copy() - headers.update(self.proxy_headers) - - # Must keep the exception bound to a separate variable or else Python 3 - # complains about UnboundLocalError. - err = None - - # Keep track of whether we cleanly exited the except block. This - # ensures we do proper cleanup in finally. - clean_exit = False - - # Rewind body position, if needed. Record current position - # for future rewinds in the event of a redirect/retry. - body_pos = set_file_position(body, body_pos) - - try: - # Request a connection from the queue. - timeout_obj = self._get_timeout(timeout) - conn = self._get_conn(timeout=pool_timeout) - - conn.timeout = timeout_obj.connect_timeout - - is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None) - if is_new_proxy_conn: - self._prepare_proxy(conn) - - # Make the request on the httplib connection object. - httplib_response = self._make_request(conn, method, url, - timeout=timeout_obj, - body=body, headers=headers, - chunked=chunked) - - # If we're going to release the connection in ``finally:``, then - # the response doesn't need to know about the connection. Otherwise - # it will also try to release it and we'll have a double-release - # mess. - response_conn = conn if not release_conn else None - - # Pass method to Response for length checking - response_kw['request_method'] = method - - # Import httplib's response into our own wrapper object - response = self.ResponseCls.from_httplib(httplib_response, - pool=self, - connection=response_conn, - retries=retries, - **response_kw) - - # Everything went great! - clean_exit = True - - except queue.Empty: - # Timed out by queue. - raise EmptyPoolError(self, "No pool connections are available.") - - except (BaseSSLError, CertificateError) as e: - # Close the connection. If a connection is reused on which there - # was a Certificate error, the next request will certainly raise - # another Certificate error. - clean_exit = False - raise SSLError(e) - - except SSLError: - # Treat SSLError separately from BaseSSLError to preserve - # traceback. - clean_exit = False - raise - - except (TimeoutError, HTTPException, SocketError, ProtocolError) as e: - # Discard the connection for these exceptions. It will be - # be replaced during the next _get_conn() call. - clean_exit = False - - if isinstance(e, (SocketError, NewConnectionError)) and self.proxy: - e = ProxyError('Cannot connect to proxy.', e) - elif isinstance(e, (SocketError, HTTPException)): - e = ProtocolError('Connection aborted.', e) - - retries = retries.increment(method, url, error=e, _pool=self, - _stacktrace=sys.exc_info()[2]) - retries.sleep() - - # Keep track of the error for the retry warning. - err = e - - finally: - if not clean_exit: - # We hit some kind of exception, handled or otherwise. We need - # to throw the connection away unless explicitly told not to. - # Close the connection, set the variable to None, and make sure - # we put the None back in the pool to avoid leaking it. - conn = conn and conn.close() - release_this_conn = True - - if release_this_conn: - # Put the connection back to be reused. If the connection is - # expired then it will be None, which will get replaced with a - # fresh connection during _get_conn. - self._put_conn(conn) - - if not conn: - # Try again - log.warning("Retrying (%r) after connection " - "broken by '%r': %s", retries, err, url) - return self.urlopen(method, url, body, headers, retries, - redirect, assert_same_host, - timeout=timeout, pool_timeout=pool_timeout, - release_conn=release_conn, body_pos=body_pos, - **response_kw) - - # Handle redirect? - redirect_location = redirect and response.get_redirect_location() - if redirect_location: - if response.status == 303: - method = 'GET' - - try: - retries = retries.increment(method, url, response=response, _pool=self) - except MaxRetryError: - if retries.raise_on_redirect: - # Release the connection for this response, since we're not - # returning it to be released manually. - response.release_conn() - raise - return response - - retries.sleep_for_retry(response) - log.debug("Redirecting %s -> %s", url, redirect_location) - return self.urlopen( - method, redirect_location, body, headers, - retries=retries, redirect=redirect, - assert_same_host=assert_same_host, - timeout=timeout, pool_timeout=pool_timeout, - release_conn=release_conn, body_pos=body_pos, - **response_kw) - - # Check if we should retry the HTTP response. - has_retry_after = bool(response.getheader('Retry-After')) - if retries.is_retry(method, response.status, has_retry_after): - try: - retries = retries.increment(method, url, response=response, _pool=self) - except MaxRetryError: - if retries.raise_on_status: - # Release the connection for this response, since we're not - # returning it to be released manually. - response.release_conn() - raise - return response - retries.sleep(response) - log.debug("Retry: %s", url) - return self.urlopen( - method, url, body, headers, - retries=retries, redirect=redirect, - assert_same_host=assert_same_host, - timeout=timeout, pool_timeout=pool_timeout, - release_conn=release_conn, - body_pos=body_pos, **response_kw) - - return response - - -class HTTPSConnectionPool(HTTPConnectionPool): - """ - Same as :class:`.HTTPConnectionPool`, but HTTPS. - - When Python is compiled with the :mod:`ssl` module, then - :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates, - instead of :class:`.HTTPSConnection`. - - :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``, - ``assert_hostname`` and ``host`` in this order to verify connections. - If ``assert_hostname`` is False, no verification is done. - - The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, - ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is - available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade - the connection socket into an SSL socket. - """ - - scheme = 'https' - ConnectionCls = HTTPSConnection - - def __init__(self, host, port=None, - strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, - block=False, headers=None, retries=None, - _proxy=None, _proxy_headers=None, - key_file=None, cert_file=None, cert_reqs=None, - ca_certs=None, ssl_version=None, - assert_hostname=None, assert_fingerprint=None, - ca_cert_dir=None, **conn_kw): - - HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize, - block, headers, retries, _proxy, _proxy_headers, - **conn_kw) - - if ca_certs and cert_reqs is None: - cert_reqs = 'CERT_REQUIRED' - - self.key_file = key_file - self.cert_file = cert_file - self.cert_reqs = cert_reqs - self.ca_certs = ca_certs - self.ca_cert_dir = ca_cert_dir - self.ssl_version = ssl_version - self.assert_hostname = assert_hostname - self.assert_fingerprint = assert_fingerprint - - def _prepare_conn(self, conn): - """ - Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket` - and establish the tunnel if proxy is used. - """ - - if isinstance(conn, VerifiedHTTPSConnection): - conn.set_cert(key_file=self.key_file, - cert_file=self.cert_file, - cert_reqs=self.cert_reqs, - ca_certs=self.ca_certs, - ca_cert_dir=self.ca_cert_dir, - assert_hostname=self.assert_hostname, - assert_fingerprint=self.assert_fingerprint) - conn.ssl_version = self.ssl_version - return conn - - def _prepare_proxy(self, conn): - """ - Establish tunnel connection early, because otherwise httplib - would improperly set Host: header to proxy's IP:port. - """ - # Python 2.7+ - try: - set_tunnel = conn.set_tunnel - except AttributeError: # Platform-specific: Python 2.6 - set_tunnel = conn._set_tunnel - - if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older - set_tunnel(self.host, self.port) - else: - set_tunnel(self.host, self.port, self.proxy_headers) - - conn.connect() - - def _new_conn(self): - """ - Return a fresh :class:`httplib.HTTPSConnection`. - """ - self.num_connections += 1 - log.debug("Starting new HTTPS connection (%d): %s", - self.num_connections, self.host) - - if not self.ConnectionCls or self.ConnectionCls is DummyConnection: - raise SSLError("Can't connect to HTTPS URL because the SSL " - "module is not available.") - - actual_host = self.host - actual_port = self.port - if self.proxy is not None: - actual_host = self.proxy.host - actual_port = self.proxy.port - - conn = self.ConnectionCls(host=actual_host, port=actual_port, - timeout=self.timeout.connect_timeout, - strict=self.strict, **self.conn_kw) - - return self._prepare_conn(conn) - - def _validate_conn(self, conn): - """ - Called right before a request is made, after the socket is created. - """ - super(HTTPSConnectionPool, self)._validate_conn(conn) - - # Force connect early to allow us to validate the connection. - if not getattr(conn, 'sock', None): # AppEngine might not have `.sock` - conn.connect() - - if not conn.is_verified: - warnings.warn(( - 'Unverified HTTPS request is being made. ' - 'Adding certificate verification is strongly advised. See: ' - 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' - '#ssl-warnings'), - InsecureRequestWarning) - - -def connection_from_url(url, **kw): - """ - Given a url, return an :class:`.ConnectionPool` instance of its host. - - This is a shortcut for not having to parse out the scheme, host, and port - of the url before creating an :class:`.ConnectionPool` instance. - - :param url: - Absolute URL string that must include the scheme. Port is optional. - - :param \\**kw: - Passes additional parameters to the constructor of the appropriate - :class:`.ConnectionPool`. Useful for specifying things like - timeout, maxsize, headers, etc. - - Example:: - - >>> conn = connection_from_url('http://google.com/') - >>> r = conn.request('GET', '/') - """ - scheme, host, port = get_host(url) - port = port or port_by_scheme.get(scheme, 80) - if scheme == 'https': - return HTTPSConnectionPool(host, port=port, **kw) - else: - return HTTPConnectionPool(host, port=port, **kw) - - -def _ipv6_host(host): - """ - Process IPv6 address literals - """ - - # httplib doesn't like it when we include brackets in IPv6 addresses - # Specifically, if we include brackets but also pass the port then - # httplib crazily doubles up the square brackets on the Host header. - # Instead, we need to make sure we never pass ``None`` as the port. - # However, for backward compatibility reasons we can't actually - # *assert* that. See http://bugs.python.org/issue28539 - # - # Also if an IPv6 address literal has a zone identifier, the - # percent sign might be URIencoded, convert it back into ASCII - if host.startswith('[') and host.endswith(']'): - host = host.replace('%25', '%').strip('[]') - return host diff --git a/python.d/python_modules/urllib3/contrib/__init__.py b/python.d/python_modules/urllib3/contrib/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/python.d/python_modules/urllib3/contrib/_securetransport/__init__.py b/python.d/python_modules/urllib3/contrib/_securetransport/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/python.d/python_modules/urllib3/contrib/_securetransport/bindings.py b/python.d/python_modules/urllib3/contrib/_securetransport/bindings.py deleted file mode 100644 index e26b84086..000000000 --- a/python.d/python_modules/urllib3/contrib/_securetransport/bindings.py +++ /dev/null @@ -1,590 +0,0 @@ -""" -This module uses ctypes to bind a whole bunch of functions and constants from -SecureTransport. The goal here is to provide the low-level API to -SecureTransport. These are essentially the C-level functions and constants, and -they're pretty gross to work with. - -This code is a bastardised version of the code found in Will Bond's oscrypto -library. An enormous debt is owed to him for blazing this trail for us. For -that reason, this code should be considered to be covered both by urllib3's -license and by oscrypto's: - - Copyright (c) 2015-2016 Will Bond <will@wbond.net> - - Permission is hereby granted, free of charge, to any person obtaining a - copy of this software and associated documentation files (the "Software"), - to deal in the Software without restriction, including without limitation - the rights to use, copy, modify, merge, publish, distribute, sublicense, - and/or sell copies of the Software, and to permit persons to whom the - Software is furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - DEALINGS IN THE SOFTWARE. -""" -from __future__ import absolute_import - -import platform -from ctypes.util import find_library -from ctypes import ( - c_void_p, c_int32, c_char_p, c_size_t, c_byte, c_uint32, c_ulong, c_long, - c_bool -) -from ctypes import CDLL, POINTER, CFUNCTYPE - - -security_path = find_library('Security') -if not security_path: - raise ImportError('The library Security could not be found') - - -core_foundation_path = find_library('CoreFoundation') -if not core_foundation_path: - raise ImportError('The library CoreFoundation could not be found') - - -version = platform.mac_ver()[0] -version_info = tuple(map(int, version.split('.'))) -if version_info < (10, 8): - raise OSError( - 'Only OS X 10.8 and newer are supported, not %s.%s' % ( - version_info[0], version_info[1] - ) - ) - -Security = CDLL(security_path, use_errno=True) -CoreFoundation = CDLL(core_foundation_path, use_errno=True) - -Boolean = c_bool -CFIndex = c_long -CFStringEncoding = c_uint32 -CFData = c_void_p -CFString = c_void_p -CFArray = c_void_p -CFMutableArray = c_void_p -CFDictionary = c_void_p -CFError = c_void_p -CFType = c_void_p -CFTypeID = c_ulong - -CFTypeRef = POINTER(CFType) -CFAllocatorRef = c_void_p - -OSStatus = c_int32 - -CFDataRef = POINTER(CFData) -CFStringRef = POINTER(CFString) -CFArrayRef = POINTER(CFArray) -CFMutableArrayRef = POINTER(CFMutableArray) -CFDictionaryRef = POINTER(CFDictionary) -CFArrayCallBacks = c_void_p -CFDictionaryKeyCallBacks = c_void_p -CFDictionaryValueCallBacks = c_void_p - -SecCertificateRef = POINTER(c_void_p) -SecExternalFormat = c_uint32 -SecExternalItemType = c_uint32 -SecIdentityRef = POINTER(c_void_p) -SecItemImportExportFlags = c_uint32 -SecItemImportExportKeyParameters = c_void_p -SecKeychainRef = POINTER(c_void_p) -SSLProtocol = c_uint32 -SSLCipherSuite = c_uint32 -SSLContextRef = POINTER(c_void_p) -SecTrustRef = POINTER(c_void_p) -SSLConnectionRef = c_uint32 -SecTrustResultType = c_uint32 -SecTrustOptionFlags = c_uint32 -SSLProtocolSide = c_uint32 -SSLConnectionType = c_uint32 -SSLSessionOption = c_uint32 - - -try: - Security.SecItemImport.argtypes = [ - CFDataRef, - CFStringRef, - POINTER(SecExternalFormat), - POINTER(SecExternalItemType), - SecItemImportExportFlags, - POINTER(SecItemImportExportKeyParameters), - SecKeychainRef, - POINTER(CFArrayRef), - ] - Security.SecItemImport.restype = OSStatus - - Security.SecCertificateGetTypeID.argtypes = [] - Security.SecCertificateGetTypeID.restype = CFTypeID - - Security.SecIdentityGetTypeID.argtypes = [] - Security.SecIdentityGetTypeID.restype = CFTypeID - - Security.SecKeyGetTypeID.argtypes = [] - Security.SecKeyGetTypeID.restype = CFTypeID - - Security.SecCertificateCreateWithData.argtypes = [ - CFAllocatorRef, - CFDataRef - ] - Security.SecCertificateCreateWithData.restype = SecCertificateRef - - Security.SecCertificateCopyData.argtypes = [ - SecCertificateRef - ] - Security.SecCertificateCopyData.restype = CFDataRef - - Security.SecCopyErrorMessageString.argtypes = [ - OSStatus, - c_void_p - ] - Security.SecCopyErrorMessageString.restype = CFStringRef - - Security.SecIdentityCreateWithCertificate.argtypes = [ - CFTypeRef, - SecCertificateRef, - POINTER(SecIdentityRef) - ] - Security.SecIdentityCreateWithCertificate.restype = OSStatus - - Security.SecKeychainCreate.argtypes = [ - c_char_p, - c_uint32, - c_void_p, - Boolean, - c_void_p, - POINTER(SecKeychainRef) - ] - Security.SecKeychainCreate.restype = OSStatus - - Security.SecKeychainDelete.argtypes = [ - SecKeychainRef - ] - Security.SecKeychainDelete.restype = OSStatus - - Security.SecPKCS12Import.argtypes = [ - CFDataRef, - CFDictionaryRef, - POINTER(CFArrayRef) - ] - Security.SecPKCS12Import.restype = OSStatus - - SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t)) - SSLWriteFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t)) - - Security.SSLSetIOFuncs.argtypes = [ - SSLContextRef, - SSLReadFunc, - SSLWriteFunc - ] - Security.SSLSetIOFuncs.restype = OSStatus - - Security.SSLSetPeerID.argtypes = [ - SSLContextRef, - c_char_p, - c_size_t - ] - Security.SSLSetPeerID.restype = OSStatus - - Security.SSLSetCertificate.argtypes = [ - SSLContextRef, - CFArrayRef - ] - Security.SSLSetCertificate.restype = OSStatus - - Security.SSLSetCertificateAuthorities.argtypes = [ - SSLContextRef, - CFTypeRef, - Boolean - ] - Security.SSLSetCertificateAuthorities.restype = OSStatus - - Security.SSLSetConnection.argtypes = [ - SSLContextRef, - SSLConnectionRef - ] - Security.SSLSetConnection.restype = OSStatus - - Security.SSLSetPeerDomainName.argtypes = [ - SSLContextRef, - c_char_p, - c_size_t - ] - Security.SSLSetPeerDomainName.restype = OSStatus - - Security.SSLHandshake.argtypes = [ - SSLContextRef - ] - Security.SSLHandshake.restype = OSStatus - - Security.SSLRead.argtypes = [ - SSLContextRef, - c_char_p, - c_size_t, - POINTER(c_size_t) - ] - Security.SSLRead.restype = OSStatus - - Security.SSLWrite.argtypes = [ - SSLContextRef, - c_char_p, - c_size_t, - POINTER(c_size_t) - ] - Security.SSLWrite.restype = OSStatus - - Security.SSLClose.argtypes = [ - SSLContextRef - ] - Security.SSLClose.restype = OSStatus - - Security.SSLGetNumberSupportedCiphers.argtypes = [ - SSLContextRef, - POINTER(c_size_t) - ] - Security.SSLGetNumberSupportedCiphers.restype = OSStatus - - Security.SSLGetSupportedCiphers.argtypes = [ - SSLContextRef, - POINTER(SSLCipherSuite), - POINTER(c_size_t) - ] - Security.SSLGetSupportedCiphers.restype = OSStatus - - Security.SSLSetEnabledCiphers.argtypes = [ - SSLContextRef, - POINTER(SSLCipherSuite), - c_size_t - ] - Security.SSLSetEnabledCiphers.restype = OSStatus - - Security.SSLGetNumberEnabledCiphers.argtype = [ - SSLContextRef, - POINTER(c_size_t) - ] - Security.SSLGetNumberEnabledCiphers.restype = OSStatus - - Security.SSLGetEnabledCiphers.argtypes = [ - SSLContextRef, - POINTER(SSLCipherSuite), - POINTER(c_size_t) - ] - Security.SSLGetEnabledCiphers.restype = OSStatus - - Security.SSLGetNegotiatedCipher.argtypes = [ - SSLContextRef, - POINTER(SSLCipherSuite) - ] - Security.SSLGetNegotiatedCipher.restype = OSStatus - - Security.SSLGetNegotiatedProtocolVersion.argtypes = [ - SSLContextRef, - POINTER(SSLProtocol) - ] - Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus - - Security.SSLCopyPeerTrust.argtypes = [ - SSLContextRef, - POINTER(SecTrustRef) - ] - Security.SSLCopyPeerTrust.restype = OSStatus - - Security.SecTrustSetAnchorCertificates.argtypes = [ - SecTrustRef, - CFArrayRef - ] - Security.SecTrustSetAnchorCertificates.restype = OSStatus - - Security.SecTrustSetAnchorCertificatesOnly.argstypes = [ - SecTrustRef, - Boolean - ] - Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus - - Security.SecTrustEvaluate.argtypes = [ - SecTrustRef, - POINTER(SecTrustResultType) - ] - Security.SecTrustEvaluate.restype = OSStatus - - Security.SecTrustGetCertificateCount.argtypes = [ - SecTrustRef - ] - Security.SecTrustGetCertificateCount.restype = CFIndex - - Security.SecTrustGetCertificateAtIndex.argtypes = [ - SecTrustRef, - CFIndex - ] - Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef - - Security.SSLCreateContext.argtypes = [ - CFAllocatorRef, - SSLProtocolSide, - SSLConnectionType - ] - Security.SSLCreateContext.restype = SSLContextRef - - Security.SSLSetSessionOption.argtypes = [ - SSLContextRef, - SSLSessionOption, - Boolean - ] - Security.SSLSetSessionOption.restype = OSStatus - - Security.SSLSetProtocolVersionMin.argtypes = [ - SSLContextRef, - SSLProtocol - ] - Security.SSLSetProtocolVersionMin.restype = OSStatus - - Security.SSLSetProtocolVersionMax.argtypes = [ - SSLContextRef, - SSLProtocol - ] - Security.SSLSetProtocolVersionMax.restype = OSStatus - - Security.SecCopyErrorMessageString.argtypes = [ - OSStatus, - c_void_p - ] - Security.SecCopyErrorMessageString.restype = CFStringRef - - Security.SSLReadFunc = SSLReadFunc - Security.SSLWriteFunc = SSLWriteFunc - Security.SSLContextRef = SSLContextRef - Security.SSLProtocol = SSLProtocol - Security.SSLCipherSuite = SSLCipherSuite - Security.SecIdentityRef = SecIdentityRef - Security.SecKeychainRef = SecKeychainRef - Security.SecTrustRef = SecTrustRef - Security.SecTrustResultType = SecTrustResultType - Security.SecExternalFormat = SecExternalFormat - Security.OSStatus = OSStatus - - Security.kSecImportExportPassphrase = CFStringRef.in_dll( - Security, 'kSecImportExportPassphrase' - ) - Security.kSecImportItemIdentity = CFStringRef.in_dll( - Security, 'kSecImportItemIdentity' - ) - - # CoreFoundation time! - CoreFoundation.CFRetain.argtypes = [ - CFTypeRef - ] - CoreFoundation.CFRetain.restype = CFTypeRef - - CoreFoundation.CFRelease.argtypes = [ - CFTypeRef - ] - CoreFoundation.CFRelease.restype = None - - CoreFoundation.CFGetTypeID.argtypes = [ - CFTypeRef - ] - CoreFoundation.CFGetTypeID.restype = CFTypeID - - CoreFoundation.CFStringCreateWithCString.argtypes = [ - CFAllocatorRef, - c_char_p, - CFStringEncoding - ] - CoreFoundation.CFStringCreateWithCString.restype = CFStringRef - - CoreFoundation.CFStringGetCStringPtr.argtypes = [ - CFStringRef, - CFStringEncoding - ] - CoreFoundation.CFStringGetCStringPtr.restype = c_char_p - - CoreFoundation.CFStringGetCString.argtypes = [ - CFStringRef, - c_char_p, - CFIndex, - CFStringEncoding - ] - CoreFoundation.CFStringGetCString.restype = c_bool - - CoreFoundation.CFDataCreate.argtypes = [ - CFAllocatorRef, - c_char_p, - CFIndex - ] - CoreFoundation.CFDataCreate.restype = CFDataRef - - CoreFoundation.CFDataGetLength.argtypes = [ - CFDataRef - ] - CoreFoundation.CFDataGetLength.restype = CFIndex - - CoreFoundation.CFDataGetBytePtr.argtypes = [ - CFDataRef - ] - CoreFoundation.CFDataGetBytePtr.restype = c_void_p - - CoreFoundation.CFDictionaryCreate.argtypes = [ - CFAllocatorRef, - POINTER(CFTypeRef), - POINTER(CFTypeRef), - CFIndex, - CFDictionaryKeyCallBacks, - CFDictionaryValueCallBacks - ] - CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef - - CoreFoundation.CFDictionaryGetValue.argtypes = [ - CFDictionaryRef, - CFTypeRef - ] - CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef - - CoreFoundation.CFArrayCreate.argtypes = [ - CFAllocatorRef, - POINTER(CFTypeRef), - CFIndex, - CFArrayCallBacks, - ] - CoreFoundation.CFArrayCreate.restype = CFArrayRef - - CoreFoundation.CFArrayCreateMutable.argtypes = [ - CFAllocatorRef, - CFIndex, - CFArrayCallBacks - ] - CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef - - CoreFoundation.CFArrayAppendValue.argtypes = [ - CFMutableArrayRef, - c_void_p - ] - CoreFoundation.CFArrayAppendValue.restype = None - - CoreFoundation.CFArrayGetCount.argtypes = [ - CFArrayRef - ] - CoreFoundation.CFArrayGetCount.restype = CFIndex - - CoreFoundation.CFArrayGetValueAtIndex.argtypes = [ - CFArrayRef, - CFIndex - ] - CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p - - CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll( - CoreFoundation, 'kCFAllocatorDefault' - ) - CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks') - CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll( - CoreFoundation, 'kCFTypeDictionaryKeyCallBacks' - ) - CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll( - CoreFoundation, 'kCFTypeDictionaryValueCallBacks' - ) - - CoreFoundation.CFTypeRef = CFTypeRef - CoreFoundation.CFArrayRef = CFArrayRef - CoreFoundation.CFStringRef = CFStringRef - CoreFoundation.CFDictionaryRef = CFDictionaryRef - -except (AttributeError): - raise ImportError('Error initializing ctypes') - - -class CFConst(object): - """ - A class object that acts as essentially a namespace for CoreFoundation - constants. - """ - kCFStringEncodingUTF8 = CFStringEncoding(0x08000100) - - -class SecurityConst(object): - """ - A class object that acts as essentially a namespace for Security constants. - """ - kSSLSessionOptionBreakOnServerAuth = 0 - - kSSLProtocol2 = 1 - kSSLProtocol3 = 2 - kTLSProtocol1 = 4 - kTLSProtocol11 = 7 - kTLSProtocol12 = 8 - - kSSLClientSide = 1 - kSSLStreamType = 0 - - kSecFormatPEMSequence = 10 - - kSecTrustResultInvalid = 0 - kSecTrustResultProceed = 1 - # This gap is present on purpose: this was kSecTrustResultConfirm, which - # is deprecated. - kSecTrustResultDeny = 3 - kSecTrustResultUnspecified = 4 - kSecTrustResultRecoverableTrustFailure = 5 - kSecTrustResultFatalTrustFailure = 6 - kSecTrustResultOtherError = 7 - - errSSLProtocol = -9800 - errSSLWouldBlock = -9803 - errSSLClosedGraceful = -9805 - errSSLClosedNoNotify = -9816 - errSSLClosedAbort = -9806 - - errSSLXCertChainInvalid = -9807 - errSSLCrypto = -9809 - errSSLInternal = -9810 - errSSLCertExpired = -9814 - errSSLCertNotYetValid = -9815 - errSSLUnknownRootCert = -9812 - errSSLNoRootCert = -9813 - errSSLHostNameMismatch = -9843 - errSSLPeerHandshakeFail = -9824 - errSSLPeerUserCancelled = -9839 - errSSLWeakPeerEphemeralDHKey = -9850 - errSSLServerAuthCompleted = -9841 - errSSLRecordOverflow = -9847 - - errSecVerifyFailed = -67808 - errSecNoTrustSettings = -25263 - errSecItemNotFound = -25300 - errSecInvalidTrustSettings = -25262 - - # Cipher suites. We only pick the ones our default cipher string allows. - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030 - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F - TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3 - TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F - TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2 - TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024 - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028 - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014 - TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B - TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A - TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039 - TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038 - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023 - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027 - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009 - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013 - TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067 - TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040 - TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033 - TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032 - TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D - TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C - TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D - TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C - TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035 - TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F diff --git a/python.d/python_modules/urllib3/contrib/_securetransport/low_level.py b/python.d/python_modules/urllib3/contrib/_securetransport/low_level.py deleted file mode 100644 index 5e3494bce..000000000 --- a/python.d/python_modules/urllib3/contrib/_securetransport/low_level.py +++ /dev/null @@ -1,343 +0,0 @@ -""" -Low-level helpers for the SecureTransport bindings. - -These are Python functions that are not directly related to the high-level APIs -but are necessary to get them to work. They include a whole bunch of low-level -CoreFoundation messing about and memory management. The concerns in this module -are almost entirely about trying to avoid memory leaks and providing -appropriate and useful assistance to the higher-level code. -""" -import base64 -import ctypes -import itertools -import re -import os -import ssl -import tempfile - -from .bindings import Security, CoreFoundation, CFConst - - -# This regular expression is used to grab PEM data out of a PEM bundle. -_PEM_CERTS_RE = re.compile( - b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL -) - - -def _cf_data_from_bytes(bytestring): - """ - Given a bytestring, create a CFData object from it. This CFData object must - be CFReleased by the caller. - """ - return CoreFoundation.CFDataCreate( - CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring) - ) - - -def _cf_dictionary_from_tuples(tuples): - """ - Given a list of Python tuples, create an associated CFDictionary. - """ - dictionary_size = len(tuples) - - # We need to get the dictionary keys and values out in the same order. - keys = (t[0] for t in tuples) - values = (t[1] for t in tuples) - cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys) - cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values) - - return CoreFoundation.CFDictionaryCreate( - CoreFoundation.kCFAllocatorDefault, - cf_keys, - cf_values, - dictionary_size, - CoreFoundation.kCFTypeDictionaryKeyCallBacks, - CoreFoundation.kCFTypeDictionaryValueCallBacks, - ) - - -def _cf_string_to_unicode(value): - """ - Creates a Unicode string from a CFString object. Used entirely for error - reporting. - - Yes, it annoys me quite a lot that this function is this complex. - """ - value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p)) - - string = CoreFoundation.CFStringGetCStringPtr( - value_as_void_p, - CFConst.kCFStringEncodingUTF8 - ) - if string is None: - buffer = ctypes.create_string_buffer(1024) - result = CoreFoundation.CFStringGetCString( - value_as_void_p, - buffer, - 1024, - CFConst.kCFStringEncodingUTF8 - ) - if not result: - raise OSError('Error copying C string from CFStringRef') - string = buffer.value - if string is not None: - string = string.decode('utf-8') - return string - - -def _assert_no_error(error, exception_class=None): - """ - Checks the return code and throws an exception if there is an error to - report - """ - if error == 0: - return - - cf_error_string = Security.SecCopyErrorMessageString(error, None) - output = _cf_string_to_unicode(cf_error_string) - CoreFoundation.CFRelease(cf_error_string) - - if output is None or output == u'': - output = u'OSStatus %s' % error - - if exception_class is None: - exception_class = ssl.SSLError - - raise exception_class(output) - - -def _cert_array_from_pem(pem_bundle): - """ - Given a bundle of certs in PEM format, turns them into a CFArray of certs - that can be used to validate a cert chain. - """ - der_certs = [ - base64.b64decode(match.group(1)) - for match in _PEM_CERTS_RE.finditer(pem_bundle) - ] - if not der_certs: - raise ssl.SSLError("No root certificates specified") - - cert_array = CoreFoundation.CFArrayCreateMutable( - CoreFoundation.kCFAllocatorDefault, - 0, - ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks) - ) - if not cert_array: - raise ssl.SSLError("Unable to allocate memory!") - - try: - for der_bytes in der_certs: - certdata = _cf_data_from_bytes(der_bytes) - if not certdata: - raise ssl.SSLError("Unable to allocate memory!") - cert = Security.SecCertificateCreateWithData( - CoreFoundation.kCFAllocatorDefault, certdata - ) - CoreFoundation.CFRelease(certdata) - if not cert: - raise ssl.SSLError("Unable to build cert object!") - - CoreFoundation.CFArrayAppendValue(cert_array, cert) - CoreFoundation.CFRelease(cert) - except Exception: - # We need to free the array before the exception bubbles further. - # We only want to do that if an error occurs: otherwise, the caller - # should free. - CoreFoundation.CFRelease(cert_array) - - return cert_array - - -def _is_cert(item): - """ - Returns True if a given CFTypeRef is a certificate. - """ - expected = Security.SecCertificateGetTypeID() - return CoreFoundation.CFGetTypeID(item) == expected - - -def _is_identity(item): - """ - Returns True if a given CFTypeRef is an identity. - """ - expected = Security.SecIdentityGetTypeID() - return CoreFoundation.CFGetTypeID(item) == expected - - -def _temporary_keychain(): - """ - This function creates a temporary Mac keychain that we can use to work with - credentials. This keychain uses a one-time password and a temporary file to - store the data. We expect to have one keychain per socket. The returned - SecKeychainRef must be freed by the caller, including calling - SecKeychainDelete. - - Returns a tuple of the SecKeychainRef and the path to the temporary - directory that contains it. - """ - # Unfortunately, SecKeychainCreate requires a path to a keychain. This - # means we cannot use mkstemp to use a generic temporary file. Instead, - # we're going to create a temporary directory and a filename to use there. - # This filename will be 8 random bytes expanded into base64. We also need - # some random bytes to password-protect the keychain we're creating, so we - # ask for 40 random bytes. - random_bytes = os.urandom(40) - filename = base64.b64encode(random_bytes[:8]).decode('utf-8') - password = base64.b64encode(random_bytes[8:]) # Must be valid UTF-8 - tempdirectory = tempfile.mkdtemp() - - keychain_path = os.path.join(tempdirectory, filename).encode('utf-8') - - # We now want to create the keychain itself. - keychain = Security.SecKeychainRef() - status = Security.SecKeychainCreate( - keychain_path, - len(password), - password, - False, - None, - ctypes.byref(keychain) - ) - _assert_no_error(status) - - # Having created the keychain, we want to pass it off to the caller. - return keychain, tempdirectory - - -def _load_items_from_file(keychain, path): - """ - Given a single file, loads all the trust objects from it into arrays and - the keychain. - Returns a tuple of lists: the first list is a list of identities, the - second a list of certs. - """ - certificates = [] - identities = [] - result_array = None - - with open(path, 'rb') as f: - raw_filedata = f.read() - - try: - filedata = CoreFoundation.CFDataCreate( - CoreFoundation.kCFAllocatorDefault, - raw_filedata, - len(raw_filedata) - ) - result_array = CoreFoundation.CFArrayRef() - result = Security.SecItemImport( - filedata, # cert data - None, # Filename, leaving it out for now - None, # What the type of the file is, we don't care - None, # what's in the file, we don't care - 0, # import flags - None, # key params, can include passphrase in the future - keychain, # The keychain to insert into - ctypes.byref(result_array) # Results - ) - _assert_no_error(result) - - # A CFArray is not very useful to us as an intermediary - # representation, so we are going to extract the objects we want - # and then free the array. We don't need to keep hold of keys: the - # keychain already has them! - result_count = CoreFoundation.CFArrayGetCount(result_array) - for index in range(result_count): - item = CoreFoundation.CFArrayGetValueAtIndex( - result_array, index - ) - item = ctypes.cast(item, CoreFoundation.CFTypeRef) - - if _is_cert(item): - CoreFoundation.CFRetain(item) - certificates.append(item) - elif _is_identity(item): - CoreFoundation.CFRetain(item) - identities.append(item) - finally: - if result_array: - CoreFoundation.CFRelease(result_array) - - CoreFoundation.CFRelease(filedata) - - return (identities, certificates) - - -def _load_client_cert_chain(keychain, *paths): - """ - Load certificates and maybe keys from a number of files. Has the end goal - of returning a CFArray containing one SecIdentityRef, and then zero or more - SecCertificateRef objects, suitable for use as a client certificate trust - chain. - """ - # Ok, the strategy. - # - # This relies on knowing that macOS will not give you a SecIdentityRef - # unless you have imported a key into a keychain. This is a somewhat - # artificial limitation of macOS (for example, it doesn't necessarily - # affect iOS), but there is nothing inside Security.framework that lets you - # get a SecIdentityRef without having a key in a keychain. - # - # So the policy here is we take all the files and iterate them in order. - # Each one will use SecItemImport to have one or more objects loaded from - # it. We will also point at a keychain that macOS can use to work with the - # private key. - # - # Once we have all the objects, we'll check what we actually have. If we - # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise, - # we'll take the first certificate (which we assume to be our leaf) and - # ask the keychain to give us a SecIdentityRef with that cert's associated - # key. - # - # We'll then return a CFArray containing the trust chain: one - # SecIdentityRef and then zero-or-more SecCertificateRef objects. The - # responsibility for freeing this CFArray will be with the caller. This - # CFArray must remain alive for the entire connection, so in practice it - # will be stored with a single SSLSocket, along with the reference to the - # keychain. - certificates = [] - identities = [] - - # Filter out bad paths. - paths = (path for path in paths if path) - - try: - for file_path in paths: - new_identities, new_certs = _load_items_from_file( - keychain, file_path - ) - identities.extend(new_identities) - certificates.extend(new_certs) - - # Ok, we have everything. The question is: do we have an identity? If - # not, we want to grab one from the first cert we have. - if not identities: - new_identity = Security.SecIdentityRef() - status = Security.SecIdentityCreateWithCertificate( - keychain, - certificates[0], - ctypes.byref(new_identity) - ) - _assert_no_error(status) - identities.append(new_identity) - - # We now want to release the original certificate, as we no longer - # need it. - CoreFoundation.CFRelease(certificates.pop(0)) - - # We now need to build a new CFArray that holds the trust chain. - trust_chain = CoreFoundation.CFArrayCreateMutable( - CoreFoundation.kCFAllocatorDefault, - 0, - ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks), - ) - for item in itertools.chain(identities, certificates): - # ArrayAppendValue does a CFRetain on the item. That's fine, - # because the finally block will release our other refs to them. - CoreFoundation.CFArrayAppendValue(trust_chain, item) - - return trust_chain - finally: - for obj in itertools.chain(identities, certificates): - CoreFoundation.CFRelease(obj) diff --git a/python.d/python_modules/urllib3/contrib/appengine.py b/python.d/python_modules/urllib3/contrib/appengine.py deleted file mode 100644 index 814b0222d..000000000 --- a/python.d/python_modules/urllib3/contrib/appengine.py +++ /dev/null @@ -1,296 +0,0 @@ -""" -This module provides a pool manager that uses Google App Engine's -`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_. - -Example usage:: - - from urllib3 import PoolManager - from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox - - if is_appengine_sandbox(): - # AppEngineManager uses AppEngine's URLFetch API behind the scenes - http = AppEngineManager() - else: - # PoolManager uses a socket-level API behind the scenes - http = PoolManager() - - r = http.request('GET', 'https://google.com/') - -There are `limitations <https://cloud.google.com/appengine/docs/python/\ -urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be -the best choice for your application. There are three options for using -urllib3 on Google App Engine: - -1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is - cost-effective in many circumstances as long as your usage is within the - limitations. -2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets. - Sockets also have `limitations and restrictions - <https://cloud.google.com/appengine/docs/python/sockets/\ - #limitations-and-restrictions>`_ and have a lower free quota than URLFetch. - To use sockets, be sure to specify the following in your ``app.yaml``:: - - env_variables: - GAE_USE_SOCKETS_HTTPLIB : 'true' - -3. If you are using `App Engine Flexible -<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard -:class:`PoolManager` without any configuration or special environment variables. -""" - -from __future__ import absolute_import -import logging -import os -import warnings -from ..packages.six.moves.urllib.parse import urljoin - -from ..exceptions import ( - HTTPError, - HTTPWarning, - MaxRetryError, - ProtocolError, - TimeoutError, - SSLError -) - -from ..packages.six import BytesIO -from ..request import RequestMethods -from ..response import HTTPResponse -from ..util.timeout import Timeout -from ..util.retry import Retry - -try: - from google.appengine.api import urlfetch -except ImportError: - urlfetch = None - - -log = logging.getLogger(__name__) - - -class AppEnginePlatformWarning(HTTPWarning): - pass - - -class AppEnginePlatformError(HTTPError): - pass - - -class AppEngineManager(RequestMethods): - """ - Connection manager for Google App Engine sandbox applications. - - This manager uses the URLFetch service directly instead of using the - emulated httplib, and is subject to URLFetch limitations as described in - the App Engine documentation `here - <https://cloud.google.com/appengine/docs/python/urlfetch>`_. - - Notably it will raise an :class:`AppEnginePlatformError` if: - * URLFetch is not available. - * If you attempt to use this on App Engine Flexible, as full socket - support is available. - * If a request size is more than 10 megabytes. - * If a response size is more than 32 megabtyes. - * If you use an unsupported request method such as OPTIONS. - - Beyond those cases, it will raise normal urllib3 errors. - """ - - def __init__(self, headers=None, retries=None, validate_certificate=True, - urlfetch_retries=True): - if not urlfetch: - raise AppEnginePlatformError( - "URLFetch is not available in this environment.") - - if is_prod_appengine_mvms(): - raise AppEnginePlatformError( - "Use normal urllib3.PoolManager instead of AppEngineManager" - "on Managed VMs, as using URLFetch is not necessary in " - "this environment.") - - warnings.warn( - "urllib3 is using URLFetch on Google App Engine sandbox instead " - "of sockets. To use sockets directly instead of URLFetch see " - "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.", - AppEnginePlatformWarning) - - RequestMethods.__init__(self, headers) - self.validate_certificate = validate_certificate - self.urlfetch_retries = urlfetch_retries - - self.retries = retries or Retry.DEFAULT - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - # Return False to re-raise any potential exceptions - return False - - def urlopen(self, method, url, body=None, headers=None, - retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT, - **response_kw): - - retries = self._get_retries(retries, redirect) - - try: - follow_redirects = ( - redirect and - retries.redirect != 0 and - retries.total) - response = urlfetch.fetch( - url, - payload=body, - method=method, - headers=headers or {}, - allow_truncated=False, - follow_redirects=self.urlfetch_retries and follow_redirects, - deadline=self._get_absolute_timeout(timeout), - validate_certificate=self.validate_certificate, - ) - except urlfetch.DeadlineExceededError as e: - raise TimeoutError(self, e) - - except urlfetch.InvalidURLError as e: - if 'too large' in str(e): - raise AppEnginePlatformError( - "URLFetch request too large, URLFetch only " - "supports requests up to 10mb in size.", e) - raise ProtocolError(e) - - except urlfetch.DownloadError as e: - if 'Too many redirects' in str(e): - raise MaxRetryError(self, url, reason=e) - raise ProtocolError(e) - - except urlfetch.ResponseTooLargeError as e: - raise AppEnginePlatformError( - "URLFetch response too large, URLFetch only supports" - "responses up to 32mb in size.", e) - - except urlfetch.SSLCertificateError as e: - raise SSLError(e) - - except urlfetch.InvalidMethodError as e: - raise AppEnginePlatformError( - "URLFetch does not support method: %s" % method, e) - - http_response = self._urlfetch_response_to_http_response( - response, retries=retries, **response_kw) - - # Handle redirect? - redirect_location = redirect and http_response.get_redirect_location() - if redirect_location: - # Check for redirect response - if (self.urlfetch_retries and retries.raise_on_redirect): - raise MaxRetryError(self, url, "too many redirects") - else: - if http_response.status == 303: - method = 'GET' - - try: - retries = retries.increment(method, url, response=http_response, _pool=self) - except MaxRetryError: - if retries.raise_on_redirect: - raise MaxRetryError(self, url, "too many redirects") - return http_response - - retries.sleep_for_retry(http_response) - log.debug("Redirecting %s -> %s", url, redirect_location) - redirect_url = urljoin(url, redirect_location) - return self.urlopen( - method, redirect_url, body, headers, - retries=retries, redirect=redirect, - timeout=timeout, **response_kw) - - # Check if we should retry the HTTP response. - has_retry_after = bool(http_response.getheader('Retry-After')) - if retries.is_retry(method, http_response.status, has_retry_after): - retries = retries.increment( - method, url, response=http_response, _pool=self) - log.debug("Retry: %s", url) - retries.sleep(http_response) - return self.urlopen( - method, url, - body=body, headers=headers, - retries=retries, redirect=redirect, - timeout=timeout, **response_kw) - - return http_response - - def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw): - - if is_prod_appengine(): - # Production GAE handles deflate encoding automatically, but does - # not remove the encoding header. - content_encoding = urlfetch_resp.headers.get('content-encoding') - - if content_encoding == 'deflate': - del urlfetch_resp.headers['content-encoding'] - - transfer_encoding = urlfetch_resp.headers.get('transfer-encoding') - # We have a full response's content, - # so let's make sure we don't report ourselves as chunked data. - if transfer_encoding == 'chunked': - encodings = transfer_encoding.split(",") - encodings.remove('chunked') - urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings) - - return HTTPResponse( - # In order for decoding to work, we must present the content as - # a file-like object. - body=BytesIO(urlfetch_resp.content), - headers=urlfetch_resp.headers, - status=urlfetch_resp.status_code, - **response_kw - ) - - def _get_absolute_timeout(self, timeout): - if timeout is Timeout.DEFAULT_TIMEOUT: - return None # Defer to URLFetch's default. - if isinstance(timeout, Timeout): - if timeout._read is not None or timeout._connect is not None: - warnings.warn( - "URLFetch does not support granular timeout settings, " - "reverting to total or default URLFetch timeout.", - AppEnginePlatformWarning) - return timeout.total - return timeout - - def _get_retries(self, retries, redirect): - if not isinstance(retries, Retry): - retries = Retry.from_int( - retries, redirect=redirect, default=self.retries) - - if retries.connect or retries.read or retries.redirect: - warnings.warn( - "URLFetch only supports total retries and does not " - "recognize connect, read, or redirect retry parameters.", - AppEnginePlatformWarning) - - return retries - - -def is_appengine(): - return (is_local_appengine() or - is_prod_appengine() or - is_prod_appengine_mvms()) - - -def is_appengine_sandbox(): - return is_appengine() and not is_prod_appengine_mvms() - - -def is_local_appengine(): - return ('APPENGINE_RUNTIME' in os.environ and - 'Development/' in os.environ['SERVER_SOFTWARE']) - - -def is_prod_appengine(): - return ('APPENGINE_RUNTIME' in os.environ and - 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and - not is_prod_appengine_mvms()) - - -def is_prod_appengine_mvms(): - return os.environ.get('GAE_VM', False) == 'true' diff --git a/python.d/python_modules/urllib3/contrib/ntlmpool.py b/python.d/python_modules/urllib3/contrib/ntlmpool.py deleted file mode 100644 index 642e99ed2..000000000 --- a/python.d/python_modules/urllib3/contrib/ntlmpool.py +++ /dev/null @@ -1,112 +0,0 @@ -""" -NTLM authenticating pool, contributed by erikcederstran - -Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10 -""" -from __future__ import absolute_import - -from logging import getLogger -from ntlm import ntlm - -from .. import HTTPSConnectionPool -from ..packages.six.moves.http_client import HTTPSConnection - - -log = getLogger(__name__) - - -class NTLMConnectionPool(HTTPSConnectionPool): - """ - Implements an NTLM authentication version of an urllib3 connection pool - """ - - scheme = 'https' - - def __init__(self, user, pw, authurl, *args, **kwargs): - """ - authurl is a random URL on the server that is protected by NTLM. - user is the Windows user, probably in the DOMAIN\\username format. - pw is the password for the user. - """ - super(NTLMConnectionPool, self).__init__(*args, **kwargs) - self.authurl = authurl - self.rawuser = user - user_parts = user.split('\\', 1) - self.domain = user_parts[0].upper() - self.user = user_parts[1] - self.pw = pw - - def _new_conn(self): - # Performs the NTLM handshake that secures the connection. The socket - # must be kept open while requests are performed. - self.num_connections += 1 - log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s', - self.num_connections, self.host, self.authurl) - - headers = {} - headers['Connection'] = 'Keep-Alive' - req_header = 'Authorization' - resp_header = 'www-authenticate' - - conn = HTTPSConnection(host=self.host, port=self.port) - - # Send negotiation message - headers[req_header] = ( - 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser)) - log.debug('Request headers: %s', headers) - conn.request('GET', self.authurl, None, headers) - res = conn.getresponse() - reshdr = dict(res.getheaders()) - log.debug('Response status: %s %s', res.status, res.reason) - log.debug('Response headers: %s', reshdr) - log.debug('Response data: %s [...]', res.read(100)) - - # Remove the reference to the socket, so that it can not be closed by - # the response object (we want to keep the socket open) - res.fp = None - - # Server should respond with a challenge message - auth_header_values = reshdr[resp_header].split(', ') - auth_header_value = None - for s in auth_header_values: - if s[:5] == 'NTLM ': - auth_header_value = s[5:] - if auth_header_value is None: - raise Exception('Unexpected %s response header: %s' % - (resp_header, reshdr[resp_header])) - - # Send authentication message - ServerChallenge, NegotiateFlags = \ - ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value) - auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge, - self.user, - self.domain, - self.pw, - NegotiateFlags) - headers[req_header] = 'NTLM %s' % auth_msg - log.debug('Request headers: %s', headers) - conn.request('GET', self.authurl, None, headers) - res = conn.getresponse() - log.debug('Response status: %s %s', res.status, res.reason) - log.debug('Response headers: %s', dict(res.getheaders())) - log.debug('Response data: %s [...]', res.read()[:100]) - if res.status != 200: - if res.status == 401: - raise Exception('Server rejected request: wrong ' - 'username or password') - raise Exception('Wrong server response: %s %s' % - (res.status, res.reason)) - - res.fp = None - log.debug('Connection established') - return conn - - def urlopen(self, method, url, body=None, headers=None, retries=3, - redirect=True, assert_same_host=True): - if headers is None: - headers = {} - headers['Connection'] = 'Keep-Alive' - return super(NTLMConnectionPool, self).urlopen(method, url, body, - headers, retries, - redirect, - assert_same_host) diff --git a/python.d/python_modules/urllib3/contrib/pyopenssl.py b/python.d/python_modules/urllib3/contrib/pyopenssl.py deleted file mode 100644 index 6645dbaa9..000000000 --- a/python.d/python_modules/urllib3/contrib/pyopenssl.py +++ /dev/null @@ -1,457 +0,0 @@ -""" -SSL with SNI_-support for Python 2. Follow these instructions if you would -like to verify SSL certificates in Python 2. Note, the default libraries do -*not* do certificate checking; you need to do additional work to validate -certificates yourself. - -This needs the following packages installed: - -* pyOpenSSL (tested with 16.0.0) -* cryptography (minimum 1.3.4, from pyopenssl) -* idna (minimum 2.0, from cryptography) - -However, pyopenssl depends on cryptography, which depends on idna, so while we -use all three directly here we end up having relatively few packages required. - -You can install them with the following command: - - pip install pyopenssl cryptography idna - -To activate certificate checking, call -:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code -before you begin making HTTP requests. This can be done in a ``sitecustomize`` -module, or at any other time before your application begins using ``urllib3``, -like this:: - - try: - import urllib3.contrib.pyopenssl - urllib3.contrib.pyopenssl.inject_into_urllib3() - except ImportError: - pass - -Now you can use :mod:`urllib3` as you normally would, and it will support SNI -when the required modules are installed. - -Activating this module also has the positive side effect of disabling SSL/TLS -compression in Python 2 (see `CRIME attack`_). - -If you want to configure the default list of supported cipher suites, you can -set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable. - -.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication -.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit) -""" -from __future__ import absolute_import - -import OpenSSL.SSL -from cryptography import x509 -from cryptography.hazmat.backends.openssl import backend as openssl_backend -from cryptography.hazmat.backends.openssl.x509 import _Certificate - -from socket import timeout, error as SocketError -from io import BytesIO - -try: # Platform-specific: Python 2 - from socket import _fileobject -except ImportError: # Platform-specific: Python 3 - _fileobject = None - from ..packages.backports.makefile import backport_makefile - -import logging -import ssl - -try: - import six -except ImportError: - from ..packages import six - -import sys - -from .. import util - -__all__ = ['inject_into_urllib3', 'extract_from_urllib3'] - -# SNI always works. -HAS_SNI = True - -# Map from urllib3 to PyOpenSSL compatible parameter-values. -_openssl_versions = { - ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD, - ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD, -} - -if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'): - _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD - -if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'): - _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD - -try: - _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD}) -except AttributeError: - pass - -_stdlib_to_openssl_verify = { - ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE, - ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER, - ssl.CERT_REQUIRED: - OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, -} -_openssl_to_stdlib_verify = dict( - (v, k) for k, v in _stdlib_to_openssl_verify.items() -) - -# OpenSSL will only write 16K at a time -SSL_WRITE_BLOCKSIZE = 16384 - -orig_util_HAS_SNI = util.HAS_SNI -orig_util_SSLContext = util.ssl_.SSLContext - - -log = logging.getLogger(__name__) - - -def inject_into_urllib3(): - 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.' - - _validate_dependencies_met() - - util.ssl_.SSLContext = PyOpenSSLContext - util.HAS_SNI = HAS_SNI - util.ssl_.HAS_SNI = HAS_SNI - util.IS_PYOPENSSL = True - util.ssl_.IS_PYOPENSSL = True - - -def extract_from_urllib3(): - 'Undo monkey-patching by :func:`inject_into_urllib3`.' - - util.ssl_.SSLContext = orig_util_SSLContext - util.HAS_SNI = orig_util_HAS_SNI - util.ssl_.HAS_SNI = orig_util_HAS_SNI - util.IS_PYOPENSSL = False - util.ssl_.IS_PYOPENSSL = False - - -def _validate_dependencies_met(): - """ - Verifies that PyOpenSSL's package-level dependencies have been met. - Throws `ImportError` if they are not met. - """ - # Method added in `cryptography==1.1`; not available in older versions - from cryptography.x509.extensions import Extensions - if getattr(Extensions, "get_extension_for_class", None) is None: - raise ImportError("'cryptography' module missing required functionality. " - "Try upgrading to v1.3.4 or newer.") - - # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509 - # attribute is only present on those versions. - from OpenSSL.crypto import X509 - x509 = X509() - if getattr(x509, "_x509", None) is None: - raise ImportError("'pyOpenSSL' module missing required functionality. " - "Try upgrading to v0.14 or newer.") - - -def _dnsname_to_stdlib(name): - """ - Converts a dNSName SubjectAlternativeName field to the form used by the - standard library on the given Python version. - - Cryptography produces a dNSName as a unicode string that was idna-decoded - from ASCII bytes. We need to idna-encode that string to get it back, and - then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib - uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8). - """ - def idna_encode(name): - """ - Borrowed wholesale from the Python Cryptography Project. It turns out - that we can't just safely call `idna.encode`: it can explode for - wildcard names. This avoids that problem. - """ - import idna - - for prefix in [u'*.', u'.']: - if name.startswith(prefix): - name = name[len(prefix):] - return prefix.encode('ascii') + idna.encode(name) - return idna.encode(name) - - name = idna_encode(name) - if sys.version_info >= (3, 0): - name = name.decode('utf-8') - return name - - -def get_subj_alt_name(peer_cert): - """ - Given an PyOpenSSL certificate, provides all the subject alternative names. - """ - # Pass the cert to cryptography, which has much better APIs for this. - # This is technically using private APIs, but should work across all - # relevant versions until PyOpenSSL gets something proper for this. - cert = _Certificate(openssl_backend, peer_cert._x509) - - # We want to find the SAN extension. Ask Cryptography to locate it (it's - # faster than looping in Python) - try: - ext = cert.extensions.get_extension_for_class( - x509.SubjectAlternativeName - ).value - except x509.ExtensionNotFound: - # No such extension, return the empty list. - return [] - except (x509.DuplicateExtension, x509.UnsupportedExtension, - x509.UnsupportedGeneralNameType, UnicodeError) as e: - # A problem has been found with the quality of the certificate. Assume - # no SAN field is present. - log.warning( - "A problem was encountered with the certificate that prevented " - "urllib3 from finding the SubjectAlternativeName field. This can " - "affect certificate validation. The error was %s", - e, - ) - return [] - - # We want to return dNSName and iPAddress fields. We need to cast the IPs - # back to strings because the match_hostname function wants them as - # strings. - # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8 - # decoded. This is pretty frustrating, but that's what the standard library - # does with certificates, and so we need to attempt to do the same. - names = [ - ('DNS', _dnsname_to_stdlib(name)) - for name in ext.get_values_for_type(x509.DNSName) - ] - names.extend( - ('IP Address', str(name)) - for name in ext.get_values_for_type(x509.IPAddress) - ) - - return names - - -class WrappedSocket(object): - '''API-compatibility wrapper for Python OpenSSL's Connection-class. - - Note: _makefile_refs, _drop() and _reuse() are needed for the garbage - collector of pypy. - ''' - - def __init__(self, connection, socket, suppress_ragged_eofs=True): - self.connection = connection - self.socket = socket - self.suppress_ragged_eofs = suppress_ragged_eofs - self._makefile_refs = 0 - self._closed = False - - def fileno(self): - return self.socket.fileno() - - # Copy-pasted from Python 3.5 source code - def _decref_socketios(self): - if self._makefile_refs > 0: - self._makefile_refs -= 1 - if self._closed: - self.close() - - def recv(self, *args, **kwargs): - try: - data = self.connection.recv(*args, **kwargs) - except OpenSSL.SSL.SysCallError as e: - if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'): - return b'' - else: - raise SocketError(str(e)) - except OpenSSL.SSL.ZeroReturnError as e: - if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: - return b'' - else: - raise - except OpenSSL.SSL.WantReadError: - rd = util.wait_for_read(self.socket, self.socket.gettimeout()) - if not rd: - raise timeout('The read operation timed out') - else: - return self.recv(*args, **kwargs) - else: - return data - - def recv_into(self, *args, **kwargs): - try: - return self.connection.recv_into(*args, **kwargs) - except OpenSSL.SSL.SysCallError as e: - if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'): - return 0 - else: - raise SocketError(str(e)) - except OpenSSL.SSL.ZeroReturnError as e: - if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: - return 0 - else: - raise - except OpenSSL.SSL.WantReadError: - rd = util.wait_for_read(self.socket, self.socket.gettimeout()) - if not rd: - raise timeout('The read operation timed out') - else: - return self.recv_into(*args, **kwargs) - - def settimeout(self, timeout): - return self.socket.settimeout(timeout) - - def _send_until_done(self, data): - while True: - try: - return self.connection.send(data) - except OpenSSL.SSL.WantWriteError: - wr = util.wait_for_write(self.socket, self.socket.gettimeout()) - if not wr: - raise timeout() - continue - except OpenSSL.SSL.SysCallError as e: - raise SocketError(str(e)) - - def sendall(self, data): - total_sent = 0 - while total_sent < len(data): - sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE]) - total_sent += sent - - def shutdown(self): - # FIXME rethrow compatible exceptions should we ever use this - self.connection.shutdown() - - def close(self): - if self._makefile_refs < 1: - try: - self._closed = True - return self.connection.close() - except OpenSSL.SSL.Error: - return - else: - self._makefile_refs -= 1 - - def getpeercert(self, binary_form=False): - x509 = self.connection.get_peer_certificate() - - if not x509: - return x509 - - if binary_form: - return OpenSSL.crypto.dump_certificate( - OpenSSL.crypto.FILETYPE_ASN1, - x509) - - return { - 'subject': ( - (('commonName', x509.get_subject().CN),), - ), - 'subjectAltName': get_subj_alt_name(x509) - } - - def _reuse(self): - self._makefile_refs += 1 - - def _drop(self): - if self._makefile_refs < 1: - self.close() - else: - self._makefile_refs -= 1 - - -if _fileobject: # Platform-specific: Python 2 - def makefile(self, mode, bufsize=-1): - self._makefile_refs += 1 - return _fileobject(self, mode, bufsize, close=True) -else: # Platform-specific: Python 3 - makefile = backport_makefile - -WrappedSocket.makefile = makefile - - -class PyOpenSSLContext(object): - """ - I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible - for translating the interface of the standard library ``SSLContext`` object - to calls into PyOpenSSL. - """ - def __init__(self, protocol): - self.protocol = _openssl_versions[protocol] - self._ctx = OpenSSL.SSL.Context(self.protocol) - self._options = 0 - self.check_hostname = False - - @property - def options(self): - return self._options - - @options.setter - def options(self, value): - self._options = value - self._ctx.set_options(value) - - @property - def verify_mode(self): - return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()] - - @verify_mode.setter - def verify_mode(self, value): - self._ctx.set_verify( - _stdlib_to_openssl_verify[value], - _verify_callback - ) - - def set_default_verify_paths(self): - self._ctx.set_default_verify_paths() - - def set_ciphers(self, ciphers): - if isinstance(ciphers, six.text_type): - ciphers = ciphers.encode('utf-8') - self._ctx.set_cipher_list(ciphers) - - def load_verify_locations(self, cafile=None, capath=None, cadata=None): - if cafile is not None: - cafile = cafile.encode('utf-8') - if capath is not None: - capath = capath.encode('utf-8') - self._ctx.load_verify_locations(cafile, capath) - if cadata is not None: - self._ctx.load_verify_locations(BytesIO(cadata)) - - def load_cert_chain(self, certfile, keyfile=None, password=None): - self._ctx.use_certificate_file(certfile) - if password is not None: - self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password) - self._ctx.use_privatekey_file(keyfile or certfile) - - def wrap_socket(self, sock, server_side=False, - do_handshake_on_connect=True, suppress_ragged_eofs=True, - server_hostname=None): - cnx = OpenSSL.SSL.Connection(self._ctx, sock) - - if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3 - server_hostname = server_hostname.encode('utf-8') - - if server_hostname is not None: - cnx.set_tlsext_host_name(server_hostname) - - cnx.set_connect_state() - - while True: - try: - cnx.do_handshake() - except OpenSSL.SSL.WantReadError: - rd = util.wait_for_read(sock, sock.gettimeout()) - if not rd: - raise timeout('select timed out') - continue - except OpenSSL.SSL.Error as e: - raise ssl.SSLError('bad handshake: %r' % e) - break - - return WrappedSocket(cnx, sock) - - -def _verify_callback(cnx, x509, err_no, err_depth, return_code): - return err_no == 0 diff --git a/python.d/python_modules/urllib3/contrib/securetransport.py b/python.d/python_modules/urllib3/contrib/securetransport.py deleted file mode 100644 index 72b23ab1c..000000000 --- a/python.d/python_modules/urllib3/contrib/securetransport.py +++ /dev/null @@ -1,807 +0,0 @@ -""" -SecureTranport support for urllib3 via ctypes. - -This makes platform-native TLS available to urllib3 users on macOS without the -use of a compiler. This is an important feature because the Python Package -Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL -that ships with macOS is not capable of doing TLSv1.2. The only way to resolve -this is to give macOS users an alternative solution to the problem, and that -solution is to use SecureTransport. - -We use ctypes here because this solution must not require a compiler. That's -because pip is not allowed to require a compiler either. - -This is not intended to be a seriously long-term solution to this problem. -The hope is that PEP 543 will eventually solve this issue for us, at which -point we can retire this contrib module. But in the short term, we need to -solve the impending tire fire that is Python on Mac without this kind of -contrib module. So...here we are. - -To use this module, simply import and inject it:: - - import urllib3.contrib.securetransport - urllib3.contrib.securetransport.inject_into_urllib3() - -Happy TLSing! -""" -from __future__ import absolute_import - -import contextlib -import ctypes -import errno -import os.path -import shutil -import socket -import ssl -import threading -import weakref - -from .. import util -from ._securetransport.bindings import ( - Security, SecurityConst, CoreFoundation -) -from ._securetransport.low_level import ( - _assert_no_error, _cert_array_from_pem, _temporary_keychain, - _load_client_cert_chain -) - -try: # Platform-specific: Python 2 - from socket import _fileobject -except ImportError: # Platform-specific: Python 3 - _fileobject = None - from ..packages.backports.makefile import backport_makefile - -try: - memoryview(b'') -except NameError: - raise ImportError("SecureTransport only works on Pythons with memoryview") - -__all__ = ['inject_into_urllib3', 'extract_from_urllib3'] - -# SNI always works -HAS_SNI = True - -orig_util_HAS_SNI = util.HAS_SNI -orig_util_SSLContext = util.ssl_.SSLContext - -# This dictionary is used by the read callback to obtain a handle to the -# calling wrapped socket. This is a pretty silly approach, but for now it'll -# do. I feel like I should be able to smuggle a handle to the wrapped socket -# directly in the SSLConnectionRef, but for now this approach will work I -# guess. -# -# We need to lock around this structure for inserts, but we don't do it for -# reads/writes in the callbacks. The reasoning here goes as follows: -# -# 1. It is not possible to call into the callbacks before the dictionary is -# populated, so once in the callback the id must be in the dictionary. -# 2. The callbacks don't mutate the dictionary, they only read from it, and -# so cannot conflict with any of the insertions. -# -# This is good: if we had to lock in the callbacks we'd drastically slow down -# the performance of this code. -_connection_refs = weakref.WeakValueDictionary() -_connection_ref_lock = threading.Lock() - -# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over -# for no better reason than we need *a* limit, and this one is right there. -SSL_WRITE_BLOCKSIZE = 16384 - -# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to -# individual cipher suites. We need to do this becuase this is how -# SecureTransport wants them. -CIPHER_SUITES = [ - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - SecurityConst.TLS_DHE_DSS_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_DHE_DSS_WITH_AES_128_GCM_SHA256, - SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, - SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, - SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA, - SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA, - SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256, - SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256, - SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA, -] - -# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of -# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version. -_protocol_to_min_max = { - ssl.PROTOCOL_SSLv23: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12), -} - -if hasattr(ssl, "PROTOCOL_SSLv2"): - _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = ( - SecurityConst.kSSLProtocol2, SecurityConst.kSSLProtocol2 - ) -if hasattr(ssl, "PROTOCOL_SSLv3"): - _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = ( - SecurityConst.kSSLProtocol3, SecurityConst.kSSLProtocol3 - ) -if hasattr(ssl, "PROTOCOL_TLSv1"): - _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = ( - SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol1 - ) -if hasattr(ssl, "PROTOCOL_TLSv1_1"): - _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = ( - SecurityConst.kTLSProtocol11, SecurityConst.kTLSProtocol11 - ) -if hasattr(ssl, "PROTOCOL_TLSv1_2"): - _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = ( - SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12 - ) -if hasattr(ssl, "PROTOCOL_TLS"): - _protocol_to_min_max[ssl.PROTOCOL_TLS] = _protocol_to_min_max[ssl.PROTOCOL_SSLv23] - - -def inject_into_urllib3(): - """ - Monkey-patch urllib3 with SecureTransport-backed SSL-support. - """ - util.ssl_.SSLContext = SecureTransportContext - util.HAS_SNI = HAS_SNI - util.ssl_.HAS_SNI = HAS_SNI - util.IS_SECURETRANSPORT = True - util.ssl_.IS_SECURETRANSPORT = True - - -def extract_from_urllib3(): - """ - Undo monkey-patching by :func:`inject_into_urllib3`. - """ - util.ssl_.SSLContext = orig_util_SSLContext - util.HAS_SNI = orig_util_HAS_SNI - util.ssl_.HAS_SNI = orig_util_HAS_SNI - util.IS_SECURETRANSPORT = False - util.ssl_.IS_SECURETRANSPORT = False - - -def _read_callback(connection_id, data_buffer, data_length_pointer): - """ - SecureTransport read callback. This is called by ST to request that data - be returned from the socket. - """ - wrapped_socket = None - try: - wrapped_socket = _connection_refs.get(connection_id) - if wrapped_socket is None: - return SecurityConst.errSSLInternal - base_socket = wrapped_socket.socket - - requested_length = data_length_pointer[0] - - timeout = wrapped_socket.gettimeout() - error = None - read_count = 0 - buffer = (ctypes.c_char * requested_length).from_address(data_buffer) - buffer_view = memoryview(buffer) - - try: - while read_count < requested_length: - if timeout is None or timeout >= 0: - readables = util.wait_for_read([base_socket], timeout) - if not readables: - raise socket.error(errno.EAGAIN, 'timed out') - - # We need to tell ctypes that we have a buffer that can be - # written to. Upsettingly, we do that like this: - chunk_size = base_socket.recv_into( - buffer_view[read_count:requested_length] - ) - read_count += chunk_size - if not chunk_size: - if not read_count: - return SecurityConst.errSSLClosedGraceful - break - except (socket.error) as e: - error = e.errno - - if error is not None and error != errno.EAGAIN: - if error == errno.ECONNRESET: - return SecurityConst.errSSLClosedAbort - raise - - data_length_pointer[0] = read_count - - if read_count != requested_length: - return SecurityConst.errSSLWouldBlock - - return 0 - except Exception as e: - if wrapped_socket is not None: - wrapped_socket._exception = e - return SecurityConst.errSSLInternal - - -def _write_callback(connection_id, data_buffer, data_length_pointer): - """ - SecureTransport write callback. This is called by ST to request that data - actually be sent on the network. - """ - wrapped_socket = None - try: - wrapped_socket = _connection_refs.get(connection_id) - if wrapped_socket is None: - return SecurityConst.errSSLInternal - base_socket = wrapped_socket.socket - - bytes_to_write = data_length_pointer[0] - data = ctypes.string_at(data_buffer, bytes_to_write) - - timeout = wrapped_socket.gettimeout() - error = None - sent = 0 - - try: - while sent < bytes_to_write: - if timeout is None or timeout >= 0: - writables = util.wait_for_write([base_socket], timeout) - if not writables: - raise socket.error(errno.EAGAIN, 'timed out') - chunk_sent = base_socket.send(data) - sent += chunk_sent - - # This has some needless copying here, but I'm not sure there's - # much value in optimising this data path. - data = data[chunk_sent:] - except (socket.error) as e: - error = e.errno - - if error is not None and error != errno.EAGAIN: - if error == errno.ECONNRESET: - return SecurityConst.errSSLClosedAbort - raise - - data_length_pointer[0] = sent - if sent != bytes_to_write: - return SecurityConst.errSSLWouldBlock - - return 0 - except Exception as e: - if wrapped_socket is not None: - wrapped_socket._exception = e - return SecurityConst.errSSLInternal - - -# We need to keep these two objects references alive: if they get GC'd while -# in use then SecureTransport could attempt to call a function that is in freed -# memory. That would be...uh...bad. Yeah, that's the word. Bad. -_read_callback_pointer = Security.SSLReadFunc(_read_callback) -_write_callback_pointer = Security.SSLWriteFunc(_write_callback) - - -class WrappedSocket(object): - """ - API-compatibility wrapper for Python's OpenSSL wrapped socket object. - - Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage - collector of PyPy. - """ - def __init__(self, socket): - self.socket = socket - self.context = None - self._makefile_refs = 0 - self._closed = False - self._exception = None - self._keychain = None - self._keychain_dir = None - self._client_cert_chain = None - - # We save off the previously-configured timeout and then set it to - # zero. This is done because we use select and friends to handle the - # timeouts, but if we leave the timeout set on the lower socket then - # Python will "kindly" call select on that socket again for us. Avoid - # that by forcing the timeout to zero. - self._timeout = self.socket.gettimeout() - self.socket.settimeout(0) - - @contextlib.contextmanager - def _raise_on_error(self): - """ - A context manager that can be used to wrap calls that do I/O from - SecureTransport. If any of the I/O callbacks hit an exception, this - context manager will correctly propagate the exception after the fact. - This avoids silently swallowing those exceptions. - - It also correctly forces the socket closed. - """ - self._exception = None - - # We explicitly don't catch around this yield because in the unlikely - # event that an exception was hit in the block we don't want to swallow - # it. - yield - if self._exception is not None: - exception, self._exception = self._exception, None - self.close() - raise exception - - def _set_ciphers(self): - """ - Sets up the allowed ciphers. By default this matches the set in - util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done - custom and doesn't allow changing at this time, mostly because parsing - OpenSSL cipher strings is going to be a freaking nightmare. - """ - ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES) - result = Security.SSLSetEnabledCiphers( - self.context, ciphers, len(CIPHER_SUITES) - ) - _assert_no_error(result) - - def _custom_validate(self, verify, trust_bundle): - """ - Called when we have set custom validation. We do this in two cases: - first, when cert validation is entirely disabled; and second, when - using a custom trust DB. - """ - # If we disabled cert validation, just say: cool. - if not verify: - return - - # We want data in memory, so load it up. - if os.path.isfile(trust_bundle): - with open(trust_bundle, 'rb') as f: - trust_bundle = f.read() - - cert_array = None - trust = Security.SecTrustRef() - - try: - # Get a CFArray that contains the certs we want. - cert_array = _cert_array_from_pem(trust_bundle) - - # Ok, now the hard part. We want to get the SecTrustRef that ST has - # created for this connection, shove our CAs into it, tell ST to - # ignore everything else it knows, and then ask if it can build a - # chain. This is a buuuunch of code. - result = Security.SSLCopyPeerTrust( - self.context, ctypes.byref(trust) - ) - _assert_no_error(result) - if not trust: - raise ssl.SSLError("Failed to copy trust reference") - - result = Security.SecTrustSetAnchorCertificates(trust, cert_array) - _assert_no_error(result) - - result = Security.SecTrustSetAnchorCertificatesOnly(trust, True) - _assert_no_error(result) - - trust_result = Security.SecTrustResultType() - result = Security.SecTrustEvaluate( - trust, ctypes.byref(trust_result) - ) - _assert_no_error(result) - finally: - if trust: - CoreFoundation.CFRelease(trust) - - if cert_array is None: - CoreFoundation.CFRelease(cert_array) - - # Ok, now we can look at what the result was. - successes = ( - SecurityConst.kSecTrustResultUnspecified, - SecurityConst.kSecTrustResultProceed - ) - if trust_result.value not in successes: - raise ssl.SSLError( - "certificate verify failed, error code: %d" % - trust_result.value - ) - - def handshake(self, - server_hostname, - verify, - trust_bundle, - min_version, - max_version, - client_cert, - client_key, - client_key_passphrase): - """ - Actually performs the TLS handshake. This is run automatically by - wrapped socket, and shouldn't be needed in user code. - """ - # First, we do the initial bits of connection setup. We need to create - # a context, set its I/O funcs, and set the connection reference. - self.context = Security.SSLCreateContext( - None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType - ) - result = Security.SSLSetIOFuncs( - self.context, _read_callback_pointer, _write_callback_pointer - ) - _assert_no_error(result) - - # Here we need to compute the handle to use. We do this by taking the - # id of self modulo 2**31 - 1. If this is already in the dictionary, we - # just keep incrementing by one until we find a free space. - with _connection_ref_lock: - handle = id(self) % 2147483647 - while handle in _connection_refs: - handle = (handle + 1) % 2147483647 - _connection_refs[handle] = self - - result = Security.SSLSetConnection(self.context, handle) - _assert_no_error(result) - - # If we have a server hostname, we should set that too. - if server_hostname: - if not isinstance(server_hostname, bytes): - server_hostname = server_hostname.encode('utf-8') - - result = Security.SSLSetPeerDomainName( - self.context, server_hostname, len(server_hostname) - ) - _assert_no_error(result) - - # Setup the ciphers. - self._set_ciphers() - - # Set the minimum and maximum TLS versions. - result = Security.SSLSetProtocolVersionMin(self.context, min_version) - _assert_no_error(result) - result = Security.SSLSetProtocolVersionMax(self.context, max_version) - _assert_no_error(result) - - # If there's a trust DB, we need to use it. We do that by telling - # SecureTransport to break on server auth. We also do that if we don't - # want to validate the certs at all: we just won't actually do any - # authing in that case. - if not verify or trust_bundle is not None: - result = Security.SSLSetSessionOption( - self.context, - SecurityConst.kSSLSessionOptionBreakOnServerAuth, - True - ) - _assert_no_error(result) - - # If there's a client cert, we need to use it. - if client_cert: - self._keychain, self._keychain_dir = _temporary_keychain() - self._client_cert_chain = _load_client_cert_chain( - self._keychain, client_cert, client_key - ) - result = Security.SSLSetCertificate( - self.context, self._client_cert_chain - ) - _assert_no_error(result) - - while True: - with self._raise_on_error(): - result = Security.SSLHandshake(self.context) - - if result == SecurityConst.errSSLWouldBlock: - raise socket.timeout("handshake timed out") - elif result == SecurityConst.errSSLServerAuthCompleted: - self._custom_validate(verify, trust_bundle) - continue - else: - _assert_no_error(result) - break - - def fileno(self): - return self.socket.fileno() - - # Copy-pasted from Python 3.5 source code - def _decref_socketios(self): - if self._makefile_refs > 0: - self._makefile_refs -= 1 - if self._closed: - self.close() - - def recv(self, bufsiz): - buffer = ctypes.create_string_buffer(bufsiz) - bytes_read = self.recv_into(buffer, bufsiz) - data = buffer[:bytes_read] - return data - - def recv_into(self, buffer, nbytes=None): - # Read short on EOF. - if self._closed: - return 0 - - if nbytes is None: - nbytes = len(buffer) - - buffer = (ctypes.c_char * nbytes).from_buffer(buffer) - processed_bytes = ctypes.c_size_t(0) - - with self._raise_on_error(): - result = Security.SSLRead( - self.context, buffer, nbytes, ctypes.byref(processed_bytes) - ) - - # There are some result codes that we want to treat as "not always - # errors". Specifically, those are errSSLWouldBlock, - # errSSLClosedGraceful, and errSSLClosedNoNotify. - if (result == SecurityConst.errSSLWouldBlock): - # If we didn't process any bytes, then this was just a time out. - # However, we can get errSSLWouldBlock in situations when we *did* - # read some data, and in those cases we should just read "short" - # and return. - if processed_bytes.value == 0: - # Timed out, no data read. - raise socket.timeout("recv timed out") - elif result in (SecurityConst.errSSLClosedGraceful, SecurityConst.errSSLClosedNoNotify): - # The remote peer has closed this connection. We should do so as - # well. Note that we don't actually return here because in - # principle this could actually be fired along with return data. - # It's unlikely though. - self.close() - else: - _assert_no_error(result) - - # Ok, we read and probably succeeded. We should return whatever data - # was actually read. - return processed_bytes.value - - def settimeout(self, timeout): - self._timeout = timeout - - def gettimeout(self): - return self._timeout - - def send(self, data): - processed_bytes = ctypes.c_size_t(0) - - with self._raise_on_error(): - result = Security.SSLWrite( - self.context, data, len(data), ctypes.byref(processed_bytes) - ) - - if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0: - # Timed out - raise socket.timeout("send timed out") - else: - _assert_no_error(result) - - # We sent, and probably succeeded. Tell them how much we sent. - return processed_bytes.value - - def sendall(self, data): - total_sent = 0 - while total_sent < len(data): - sent = self.send(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE]) - total_sent += sent - - def shutdown(self): - with self._raise_on_error(): - Security.SSLClose(self.context) - - def close(self): - # TODO: should I do clean shutdown here? Do I have to? - if self._makefile_refs < 1: - self._closed = True - if self.context: - CoreFoundation.CFRelease(self.context) - self.context = None - if self._client_cert_chain: - CoreFoundation.CFRelease(self._client_cert_chain) - self._client_cert_chain = None - if self._keychain: - Security.SecKeychainDelete(self._keychain) - CoreFoundation.CFRelease(self._keychain) - shutil.rmtree(self._keychain_dir) - self._keychain = self._keychain_dir = None - return self.socket.close() - else: - self._makefile_refs -= 1 - - def getpeercert(self, binary_form=False): - # Urgh, annoying. - # - # Here's how we do this: - # - # 1. Call SSLCopyPeerTrust to get hold of the trust object for this - # connection. - # 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf. - # 3. To get the CN, call SecCertificateCopyCommonName and process that - # string so that it's of the appropriate type. - # 4. To get the SAN, we need to do something a bit more complex: - # a. Call SecCertificateCopyValues to get the data, requesting - # kSecOIDSubjectAltName. - # b. Mess about with this dictionary to try to get the SANs out. - # - # This is gross. Really gross. It's going to be a few hundred LoC extra - # just to repeat something that SecureTransport can *already do*. So my - # operating assumption at this time is that what we want to do is - # instead to just flag to urllib3 that it shouldn't do its own hostname - # validation when using SecureTransport. - if not binary_form: - raise ValueError( - "SecureTransport only supports dumping binary certs" - ) - trust = Security.SecTrustRef() - certdata = None - der_bytes = None - - try: - # Grab the trust store. - result = Security.SSLCopyPeerTrust( - self.context, ctypes.byref(trust) - ) - _assert_no_error(result) - if not trust: - # Probably we haven't done the handshake yet. No biggie. - return None - - cert_count = Security.SecTrustGetCertificateCount(trust) - if not cert_count: - # Also a case that might happen if we haven't handshaked. - # Handshook? Handshaken? - return None - - leaf = Security.SecTrustGetCertificateAtIndex(trust, 0) - assert leaf - - # Ok, now we want the DER bytes. - certdata = Security.SecCertificateCopyData(leaf) - assert certdata - - data_length = CoreFoundation.CFDataGetLength(certdata) - data_buffer = CoreFoundation.CFDataGetBytePtr(certdata) - der_bytes = ctypes.string_at(data_buffer, data_length) - finally: - if certdata: - CoreFoundation.CFRelease(certdata) - if trust: - CoreFoundation.CFRelease(trust) - - return der_bytes - - def _reuse(self): - self._makefile_refs += 1 - - def _drop(self): - if self._makefile_refs < 1: - self.close() - else: - self._makefile_refs -= 1 - - -if _fileobject: # Platform-specific: Python 2 - def makefile(self, mode, bufsize=-1): - self._makefile_refs += 1 - return _fileobject(self, mode, bufsize, close=True) -else: # Platform-specific: Python 3 - def makefile(self, mode="r", buffering=None, *args, **kwargs): - # We disable buffering with SecureTransport because it conflicts with - # the buffering that ST does internally (see issue #1153 for more). - buffering = 0 - return backport_makefile(self, mode, buffering, *args, **kwargs) - -WrappedSocket.makefile = makefile - - -class SecureTransportContext(object): - """ - I am a wrapper class for the SecureTransport library, to translate the - interface of the standard library ``SSLContext`` object to calls into - SecureTransport. - """ - def __init__(self, protocol): - self._min_version, self._max_version = _protocol_to_min_max[protocol] - self._options = 0 - self._verify = False - self._trust_bundle = None - self._client_cert = None - self._client_key = None - self._client_key_passphrase = None - - @property - def check_hostname(self): - """ - SecureTransport cannot have its hostname checking disabled. For more, - see the comment on getpeercert() in this file. - """ - return True - - @check_hostname.setter - def check_hostname(self, value): - """ - SecureTransport cannot have its hostname checking disabled. For more, - see the comment on getpeercert() in this file. - """ - pass - - @property - def options(self): - # TODO: Well, crap. - # - # So this is the bit of the code that is the most likely to cause us - # trouble. Essentially we need to enumerate all of the SSL options that - # users might want to use and try to see if we can sensibly translate - # them, or whether we should just ignore them. - return self._options - - @options.setter - def options(self, value): - # TODO: Update in line with above. - self._options = value - - @property - def verify_mode(self): - return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE - - @verify_mode.setter - def verify_mode(self, value): - self._verify = True if value == ssl.CERT_REQUIRED else False - - def set_default_verify_paths(self): - # So, this has to do something a bit weird. Specifically, what it does - # is nothing. - # - # This means that, if we had previously had load_verify_locations - # called, this does not undo that. We need to do that because it turns - # out that the rest of the urllib3 code will attempt to load the - # default verify paths if it hasn't been told about any paths, even if - # the context itself was sometime earlier. We resolve that by just - # ignoring it. - pass - - def load_default_certs(self): - return self.set_default_verify_paths() - - def set_ciphers(self, ciphers): - # For now, we just require the default cipher string. - if ciphers != util.ssl_.DEFAULT_CIPHERS: - raise ValueError( - "SecureTransport doesn't support custom cipher strings" - ) - - def load_verify_locations(self, cafile=None, capath=None, cadata=None): - # OK, we only really support cadata and cafile. - if capath is not None: - raise ValueError( - "SecureTransport does not support cert directories" - ) - - self._trust_bundle = cafile or cadata - - def load_cert_chain(self, certfile, keyfile=None, password=None): - self._client_cert = certfile - self._client_key = keyfile - self._client_cert_passphrase = password - - def wrap_socket(self, sock, server_side=False, - do_handshake_on_connect=True, suppress_ragged_eofs=True, - server_hostname=None): - # So, what do we do here? Firstly, we assert some properties. This is a - # stripped down shim, so there is some functionality we don't support. - # See PEP 543 for the real deal. - assert not server_side - assert do_handshake_on_connect - assert suppress_ragged_eofs - - # Ok, we're good to go. Now we want to create the wrapped socket object - # and store it in the appropriate place. - wrapped_socket = WrappedSocket(sock) - - # Now we can handshake - wrapped_socket.handshake( - server_hostname, self._verify, self._trust_bundle, - self._min_version, self._max_version, self._client_cert, - self._client_key, self._client_key_passphrase - ) - return wrapped_socket diff --git a/python.d/python_modules/urllib3/contrib/socks.py b/python.d/python_modules/urllib3/contrib/socks.py deleted file mode 100644 index 39e92fde1..000000000 --- a/python.d/python_modules/urllib3/contrib/socks.py +++ /dev/null @@ -1,188 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module contains provisional support for SOCKS proxies from within -urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and -SOCKS5. To enable its functionality, either install PySocks or install this -module with the ``socks`` extra. - -The SOCKS implementation supports the full range of urllib3 features. It also -supports the following SOCKS features: - -- SOCKS4 -- SOCKS4a -- SOCKS5 -- Usernames and passwords for the SOCKS proxy - -Known Limitations: - -- Currently PySocks does not support contacting remote websites via literal - IPv6 addresses. Any such connection attempt will fail. You must use a domain - name. -- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any - such connection attempt will fail. -""" -from __future__ import absolute_import - -try: - import socks -except ImportError: - import warnings - from ..exceptions import DependencyWarning - - warnings.warn(( - 'SOCKS support in urllib3 requires the installation of optional ' - 'dependencies: specifically, PySocks. For more information, see ' - 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies' - ), - DependencyWarning - ) - raise - -from socket import error as SocketError, timeout as SocketTimeout - -from ..connection import ( - HTTPConnection, HTTPSConnection -) -from ..connectionpool import ( - HTTPConnectionPool, HTTPSConnectionPool -) -from ..exceptions import ConnectTimeoutError, NewConnectionError -from ..poolmanager import PoolManager -from ..util.url import parse_url - -try: - import ssl -except ImportError: - ssl = None - - -class SOCKSConnection(HTTPConnection): - """ - A plain-text HTTP connection that connects via a SOCKS proxy. - """ - def __init__(self, *args, **kwargs): - self._socks_options = kwargs.pop('_socks_options') - super(SOCKSConnection, self).__init__(*args, **kwargs) - - def _new_conn(self): - """ - Establish a new connection via the SOCKS proxy. - """ - extra_kw = {} - if self.source_address: - extra_kw['source_address'] = self.source_address - - if self.socket_options: - extra_kw['socket_options'] = self.socket_options - - try: - conn = socks.create_connection( - (self.host, self.port), - proxy_type=self._socks_options['socks_version'], - proxy_addr=self._socks_options['proxy_host'], - proxy_port=self._socks_options['proxy_port'], - proxy_username=self._socks_options['username'], - proxy_password=self._socks_options['password'], - proxy_rdns=self._socks_options['rdns'], - timeout=self.timeout, - **extra_kw - ) - - except SocketTimeout as e: - raise ConnectTimeoutError( - self, "Connection to %s timed out. (connect timeout=%s)" % - (self.host, self.timeout)) - - except socks.ProxyError as e: - # This is fragile as hell, but it seems to be the only way to raise - # useful errors here. - if e.socket_err: - error = e.socket_err - if isinstance(error, SocketTimeout): - raise ConnectTimeoutError( - self, - "Connection to %s timed out. (connect timeout=%s)" % - (self.host, self.timeout) - ) - else: - raise NewConnectionError( - self, - "Failed to establish a new connection: %s" % error - ) - else: - raise NewConnectionError( - self, - "Failed to establish a new connection: %s" % e - ) - - except SocketError as e: # Defensive: PySocks should catch all these. - raise NewConnectionError( - self, "Failed to establish a new connection: %s" % e) - - return conn - - -# We don't need to duplicate the Verified/Unverified distinction from -# urllib3/connection.py here because the HTTPSConnection will already have been -# correctly set to either the Verified or Unverified form by that module. This -# means the SOCKSHTTPSConnection will automatically be the correct type. -class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection): - pass - - -class SOCKSHTTPConnectionPool(HTTPConnectionPool): - ConnectionCls = SOCKSConnection - - -class SOCKSHTTPSConnectionPool(HTTPSConnectionPool): - ConnectionCls = SOCKSHTTPSConnection - - -class SOCKSProxyManager(PoolManager): - """ - A version of the urllib3 ProxyManager that routes connections via the - defined SOCKS proxy. - """ - pool_classes_by_scheme = { - 'http': SOCKSHTTPConnectionPool, - 'https': SOCKSHTTPSConnectionPool, - } - - def __init__(self, proxy_url, username=None, password=None, - num_pools=10, headers=None, **connection_pool_kw): - parsed = parse_url(proxy_url) - - if parsed.scheme == 'socks5': - socks_version = socks.PROXY_TYPE_SOCKS5 - rdns = False - elif parsed.scheme == 'socks5h': - socks_version = socks.PROXY_TYPE_SOCKS5 - rdns = True - elif parsed.scheme == 'socks4': - socks_version = socks.PROXY_TYPE_SOCKS4 - rdns = False - elif parsed.scheme == 'socks4a': - socks_version = socks.PROXY_TYPE_SOCKS4 - rdns = True - else: - raise ValueError( - "Unable to determine SOCKS version from %s" % proxy_url - ) - - self.proxy_url = proxy_url - - socks_options = { - 'socks_version': socks_version, - 'proxy_host': parsed.host, - 'proxy_port': parsed.port, - 'username': username, - 'password': password, - 'rdns': rdns - } - connection_pool_kw['_socks_options'] = socks_options - - super(SOCKSProxyManager, self).__init__( - num_pools, headers, **connection_pool_kw - ) - - self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme diff --git a/python.d/python_modules/urllib3/exceptions.py b/python.d/python_modules/urllib3/exceptions.py deleted file mode 100644 index 6c4be5810..000000000 --- a/python.d/python_modules/urllib3/exceptions.py +++ /dev/null @@ -1,246 +0,0 @@ -from __future__ import absolute_import -from .packages.six.moves.http_client import ( - IncompleteRead as httplib_IncompleteRead -) -# Base Exceptions - - -class HTTPError(Exception): - "Base exception used by this module." - pass - - -class HTTPWarning(Warning): - "Base warning used by this module." - pass - - -class PoolError(HTTPError): - "Base exception for errors caused within a pool." - def __init__(self, pool, message): - self.pool = pool - HTTPError.__init__(self, "%s: %s" % (pool, message)) - - def __reduce__(self): - # For pickling purposes. - return self.__class__, (None, None) - - -class RequestError(PoolError): - "Base exception for PoolErrors that have associated URLs." - def __init__(self, pool, url, message): - self.url = url - PoolError.__init__(self, pool, message) - - def __reduce__(self): - # For pickling purposes. - return self.__class__, (None, self.url, None) - - -class SSLError(HTTPError): - "Raised when SSL certificate fails in an HTTPS connection." - pass - - -class ProxyError(HTTPError): - "Raised when the connection to a proxy fails." - pass - - -class DecodeError(HTTPError): - "Raised when automatic decoding based on Content-Type fails." - pass - - -class ProtocolError(HTTPError): - "Raised when something unexpected happens mid-request/response." - pass - - -#: Renamed to ProtocolError but aliased for backwards compatibility. -ConnectionError = ProtocolError - - -# Leaf Exceptions - -class MaxRetryError(RequestError): - """Raised when the maximum number of retries is exceeded. - - :param pool: The connection pool - :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool` - :param string url: The requested Url - :param exceptions.Exception reason: The underlying error - - """ - - def __init__(self, pool, url, reason=None): - self.reason = reason - - message = "Max retries exceeded with url: %s (Caused by %r)" % ( - url, reason) - - RequestError.__init__(self, pool, url, message) - - -class HostChangedError(RequestError): - "Raised when an existing pool gets a request for a foreign host." - - def __init__(self, pool, url, retries=3): - message = "Tried to open a foreign host with url: %s" % url - RequestError.__init__(self, pool, url, message) - self.retries = retries - - -class TimeoutStateError(HTTPError): - """ Raised when passing an invalid state to a timeout """ - pass - - -class TimeoutError(HTTPError): - """ Raised when a socket timeout error occurs. - - Catching this error will catch both :exc:`ReadTimeoutErrors - <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`. - """ - pass - - -class ReadTimeoutError(TimeoutError, RequestError): - "Raised when a socket timeout occurs while receiving data from a server" - pass - - -# This timeout error does not have a URL attached and needs to inherit from the -# base HTTPError -class ConnectTimeoutError(TimeoutError): - "Raised when a socket timeout occurs while connecting to a server" - pass - - -class NewConnectionError(ConnectTimeoutError, PoolError): - "Raised when we fail to establish a new connection. Usually ECONNREFUSED." - pass - - -class EmptyPoolError(PoolError): - "Raised when a pool runs out of connections and no more are allowed." - pass - - -class ClosedPoolError(PoolError): - "Raised when a request enters a pool after the pool has been closed." - pass - - -class LocationValueError(ValueError, HTTPError): - "Raised when there is something wrong with a given URL input." - pass - - -class LocationParseError(LocationValueError): - "Raised when get_host or similar fails to parse the URL input." - - def __init__(self, location): - message = "Failed to parse: %s" % location - HTTPError.__init__(self, message) - - self.location = location - - -class ResponseError(HTTPError): - "Used as a container for an error reason supplied in a MaxRetryError." - GENERIC_ERROR = 'too many error responses' - SPECIFIC_ERROR = 'too many {status_code} error responses' - - -class SecurityWarning(HTTPWarning): - "Warned when perfoming security reducing actions" - pass - - -class SubjectAltNameWarning(SecurityWarning): - "Warned when connecting to a host with a certificate missing a SAN." - pass - - -class InsecureRequestWarning(SecurityWarning): - "Warned when making an unverified HTTPS request." - pass - - -class SystemTimeWarning(SecurityWarning): - "Warned when system time is suspected to be wrong" - pass - - -class InsecurePlatformWarning(SecurityWarning): - "Warned when certain SSL configuration is not available on a platform." - pass - - -class SNIMissingWarning(HTTPWarning): - "Warned when making a HTTPS request without SNI available." - pass - - -class DependencyWarning(HTTPWarning): - """ - Warned when an attempt is made to import a module with missing optional - dependencies. - """ - pass - - -class ResponseNotChunked(ProtocolError, ValueError): - "Response needs to be chunked in order to read it as chunks." - pass - - -class BodyNotHttplibCompatible(HTTPError): - """ - Body should be httplib.HTTPResponse like (have an fp attribute which - returns raw chunks) for read_chunked(). - """ - pass - - -class IncompleteRead(HTTPError, httplib_IncompleteRead): - """ - Response length doesn't match expected Content-Length - - Subclass of http_client.IncompleteRead to allow int value - for `partial` to avoid creating large objects on streamed - reads. - """ - def __init__(self, partial, expected): - super(IncompleteRead, self).__init__(partial, expected) - - def __repr__(self): - return ('IncompleteRead(%i bytes read, ' - '%i more expected)' % (self.partial, self.expected)) - - -class InvalidHeader(HTTPError): - "The header provided was somehow invalid." - pass - - -class ProxySchemeUnknown(AssertionError, ValueError): - "ProxyManager does not support the supplied scheme" - # TODO(t-8ch): Stop inheriting from AssertionError in v2.0. - - def __init__(self, scheme): - message = "Not supported proxy scheme %s" % scheme - super(ProxySchemeUnknown, self).__init__(message) - - -class HeaderParsingError(HTTPError): - "Raised by assert_header_parsing, but we convert it to a log.warning statement." - def __init__(self, defects, unparsed_data): - message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data) - super(HeaderParsingError, self).__init__(message) - - -class UnrewindableBodyError(HTTPError): - "urllib3 encountered an error when trying to rewind a body" - pass diff --git a/python.d/python_modules/urllib3/fields.py b/python.d/python_modules/urllib3/fields.py deleted file mode 100644 index 19b0ae0c8..000000000 --- a/python.d/python_modules/urllib3/fields.py +++ /dev/null @@ -1,178 +0,0 @@ -from __future__ import absolute_import -import email.utils -import mimetypes - -from .packages import six - - -def guess_content_type(filename, default='application/octet-stream'): - """ - Guess the "Content-Type" of a file. - - :param filename: - The filename to guess the "Content-Type" of using :mod:`mimetypes`. - :param default: - If no "Content-Type" can be guessed, default to `default`. - """ - if filename: - return mimetypes.guess_type(filename)[0] or default - return default - - -def format_header_param(name, value): - """ - Helper function to format and quote a single header parameter. - - Particularly useful for header parameters which might contain - non-ASCII values, like file names. This follows RFC 2231, as - suggested by RFC 2388 Section 4.4. - - :param name: - The name of the parameter, a string expected to be ASCII only. - :param value: - The value of the parameter, provided as a unicode string. - """ - if not any(ch in value for ch in '"\\\r\n'): - result = '%s="%s"' % (name, value) - try: - result.encode('ascii') - except (UnicodeEncodeError, UnicodeDecodeError): - pass - else: - return result - if not six.PY3 and isinstance(value, six.text_type): # Python 2: - value = value.encode('utf-8') - value = email.utils.encode_rfc2231(value, 'utf-8') - value = '%s*=%s' % (name, value) - return value - - -class RequestField(object): - """ - A data container for request body parameters. - - :param name: - The name of this request field. - :param data: - The data/value body. - :param filename: - An optional filename of the request field. - :param headers: - An optional dict-like object of headers to initially use for the field. - """ - def __init__(self, name, data, filename=None, headers=None): - self._name = name - self._filename = filename - self.data = data - self.headers = {} - if headers: - self.headers = dict(headers) - - @classmethod - def from_tuples(cls, fieldname, value): - """ - A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. - - Supports constructing :class:`~urllib3.fields.RequestField` from - parameter of key/value strings AND key/filetuple. A filetuple is a - (filename, data, MIME type) tuple where the MIME type is optional. - For example:: - - 'foo': 'bar', - 'fakefile': ('foofile.txt', 'contents of foofile'), - 'realfile': ('barfile.txt', open('realfile').read()), - 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), - 'nonamefile': 'contents of nonamefile field', - - Field names and filenames must be unicode. - """ - if isinstance(value, tuple): - if len(value) == 3: - filename, data, content_type = value - else: - filename, data = value - content_type = guess_content_type(filename) - else: - filename = None - content_type = None - data = value - - request_param = cls(fieldname, data, filename=filename) - request_param.make_multipart(content_type=content_type) - - return request_param - - def _render_part(self, name, value): - """ - Overridable helper function to format a single header parameter. - - :param name: - The name of the parameter, a string expected to be ASCII only. - :param value: - The value of the parameter, provided as a unicode string. - """ - return format_header_param(name, value) - - def _render_parts(self, header_parts): - """ - Helper function to format and quote a single header. - - Useful for single headers that are composed of multiple items. E.g., - 'Content-Disposition' fields. - - :param header_parts: - A sequence of (k, v) typles or a :class:`dict` of (k, v) to format - as `k1="v1"; k2="v2"; ...`. - """ - parts = [] - iterable = header_parts - if isinstance(header_parts, dict): - iterable = header_parts.items() - - for name, value in iterable: - if value is not None: - parts.append(self._render_part(name, value)) - - return '; '.join(parts) - - def render_headers(self): - """ - Renders the headers for this request field. - """ - lines = [] - - sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location'] - for sort_key in sort_keys: - if self.headers.get(sort_key, False): - lines.append('%s: %s' % (sort_key, self.headers[sort_key])) - - for header_name, header_value in self.headers.items(): - if header_name not in sort_keys: - if header_value: - lines.append('%s: %s' % (header_name, header_value)) - - lines.append('\r\n') - return '\r\n'.join(lines) - - def make_multipart(self, content_disposition=None, content_type=None, - content_location=None): - """ - Makes this request field into a multipart request field. - - This method overrides "Content-Disposition", "Content-Type" and - "Content-Location" headers to the request parameter. - - :param content_type: - The 'Content-Type' of the request body. - :param content_location: - The 'Content-Location' of the request body. - - """ - self.headers['Content-Disposition'] = content_disposition or 'form-data' - self.headers['Content-Disposition'] += '; '.join([ - '', self._render_parts( - (('name', self._name), ('filename', self._filename)) - ) - ]) - self.headers['Content-Type'] = content_type - self.headers['Content-Location'] = content_location diff --git a/python.d/python_modules/urllib3/filepost.py b/python.d/python_modules/urllib3/filepost.py deleted file mode 100644 index cd11cee46..000000000 --- a/python.d/python_modules/urllib3/filepost.py +++ /dev/null @@ -1,94 +0,0 @@ -from __future__ import absolute_import -import codecs - -from uuid import uuid4 -from io import BytesIO - -from .packages import six -from .packages.six import b -from .fields import RequestField - -writer = codecs.lookup('utf-8')[3] - - -def choose_boundary(): - """ - Our embarrassingly-simple replacement for mimetools.choose_boundary. - """ - return uuid4().hex - - -def iter_field_objects(fields): - """ - Iterate over fields. - - Supports list of (k, v) tuples and dicts, and lists of - :class:`~urllib3.fields.RequestField`. - - """ - if isinstance(fields, dict): - i = six.iteritems(fields) - else: - i = iter(fields) - - for field in i: - if isinstance(field, RequestField): - yield field - else: - yield RequestField.from_tuples(*field) - - -def iter_fields(fields): - """ - .. deprecated:: 1.6 - - Iterate over fields. - - The addition of :class:`~urllib3.fields.RequestField` makes this function - obsolete. Instead, use :func:`iter_field_objects`, which returns - :class:`~urllib3.fields.RequestField` objects. - - Supports list of (k, v) tuples and dicts. - """ - if isinstance(fields, dict): - return ((k, v) for k, v in six.iteritems(fields)) - - return ((k, v) for k, v in fields) - - -def encode_multipart_formdata(fields, boundary=None): - """ - Encode a dictionary of ``fields`` using the multipart/form-data MIME format. - - :param fields: - Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`). - - :param boundary: - If not specified, then a random boundary will be generated using - :func:`mimetools.choose_boundary`. - """ - body = BytesIO() - if boundary is None: - boundary = choose_boundary() - - for field in iter_field_objects(fields): - body.write(b('--%s\r\n' % (boundary))) - - writer(body).write(field.render_headers()) - data = field.data - - if isinstance(data, int): - data = str(data) # Backwards compatibility - - if isinstance(data, six.text_type): - writer(body).write(data) - else: - body.write(data) - - body.write(b'\r\n') - - body.write(b('--%s--\r\n' % (boundary))) - - content_type = str('multipart/form-data; boundary=%s' % boundary) - - return body.getvalue(), content_type diff --git a/python.d/python_modules/urllib3/packages/__init__.py b/python.d/python_modules/urllib3/packages/__init__.py deleted file mode 100644 index 170e974c1..000000000 --- a/python.d/python_modules/urllib3/packages/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from __future__ import absolute_import - -from . import ssl_match_hostname - -__all__ = ('ssl_match_hostname', ) diff --git a/python.d/python_modules/urllib3/packages/backports/__init__.py b/python.d/python_modules/urllib3/packages/backports/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/python.d/python_modules/urllib3/packages/backports/makefile.py b/python.d/python_modules/urllib3/packages/backports/makefile.py deleted file mode 100644 index 75b80dcf8..000000000 --- a/python.d/python_modules/urllib3/packages/backports/makefile.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -""" -backports.makefile -~~~~~~~~~~~~~~~~~~ - -Backports the Python 3 ``socket.makefile`` method for use with anything that -wants to create a "fake" socket object. -""" -import io - -from socket import SocketIO - - -def backport_makefile(self, mode="r", buffering=None, encoding=None, - errors=None, newline=None): - """ - Backport of ``socket.makefile`` from Python 3.5. - """ - if not set(mode) <= set(["r", "w", "b"]): - raise ValueError( - "invalid mode %r (only r, w, b allowed)" % (mode,) - ) - writing = "w" in mode - reading = "r" in mode or not writing - assert reading or writing - binary = "b" in mode - rawmode = "" - if reading: - rawmode += "r" - if writing: - rawmode += "w" - raw = SocketIO(self, rawmode) - self._makefile_refs += 1 - if buffering is None: - buffering = -1 - if buffering < 0: - buffering = io.DEFAULT_BUFFER_SIZE - if buffering == 0: - if not binary: - raise ValueError("unbuffered streams must be binary") - return raw - if reading and writing: - buffer = io.BufferedRWPair(raw, raw, buffering) - elif reading: - buffer = io.BufferedReader(raw, buffering) - else: - assert writing - buffer = io.BufferedWriter(raw, buffering) - if binary: - return buffer - text = io.TextIOWrapper(buffer, encoding, errors, newline) - text.mode = mode - return text diff --git a/python.d/python_modules/urllib3/packages/ordered_dict.py b/python.d/python_modules/urllib3/packages/ordered_dict.py deleted file mode 100644 index 4479363cc..000000000 --- a/python.d/python_modules/urllib3/packages/ordered_dict.py +++ /dev/null @@ -1,259 +0,0 @@ -# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. -# Passes Python2.7's test suite and incorporates all the latest updates. -# Copyright 2009 Raymond Hettinger, released under the MIT License. -# http://code.activestate.com/recipes/576693/ -try: - from thread import get_ident as _get_ident -except ImportError: - from dummy_thread import get_ident as _get_ident - -try: - from _abcoll import KeysView, ValuesView, ItemsView -except ImportError: - pass - - -class OrderedDict(dict): - 'Dictionary that remembers insertion order' - # An inherited dict maps keys to values. - # The inherited dict provides __getitem__, __len__, __contains__, and get. - # The remaining methods are order-aware. - # Big-O running times for all methods are the same as for regular dictionaries. - - # The internal self.__map dictionary maps keys to links in a doubly linked list. - # The circular doubly linked list starts and ends with a sentinel element. - # The sentinel element never gets deleted (this simplifies the algorithm). - # Each link is stored as a list of length three: [PREV, NEXT, KEY]. - - def __init__(self, *args, **kwds): - '''Initialize an ordered dictionary. Signature is the same as for - regular dictionaries, but keyword arguments are not recommended - because their insertion order is arbitrary. - - ''' - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - try: - self.__root - except AttributeError: - self.__root = root = [] # sentinel node - root[:] = [root, root, None] - self.__map = {} - self.__update(*args, **kwds) - - def __setitem__(self, key, value, dict_setitem=dict.__setitem__): - 'od.__setitem__(i, y) <==> od[i]=y' - # Setting a new item creates a new link which goes at the end of the linked - # list, and the inherited dictionary is updated with the new key/value pair. - if key not in self: - root = self.__root - last = root[0] - last[1] = root[0] = self.__map[key] = [last, root, key] - dict_setitem(self, key, value) - - def __delitem__(self, key, dict_delitem=dict.__delitem__): - 'od.__delitem__(y) <==> del od[y]' - # Deleting an existing item uses self.__map to find the link which is - # then removed by updating the links in the predecessor and successor nodes. - dict_delitem(self, key) - link_prev, link_next, key = self.__map.pop(key) - link_prev[1] = link_next - link_next[0] = link_prev - - def __iter__(self): - 'od.__iter__() <==> iter(od)' - root = self.__root - curr = root[1] - while curr is not root: - yield curr[2] - curr = curr[1] - - def __reversed__(self): - 'od.__reversed__() <==> reversed(od)' - root = self.__root - curr = root[0] - while curr is not root: - yield curr[2] - curr = curr[0] - - def clear(self): - 'od.clear() -> None. Remove all items from od.' - try: - for node in self.__map.itervalues(): - del node[:] - root = self.__root - root[:] = [root, root, None] - self.__map.clear() - except AttributeError: - pass - dict.clear(self) - - def popitem(self, last=True): - '''od.popitem() -> (k, v), return and remove a (key, value) pair. - Pairs are returned in LIFO order if last is true or FIFO order if false. - - ''' - if not self: - raise KeyError('dictionary is empty') - root = self.__root - if last: - link = root[0] - link_prev = link[0] - link_prev[1] = root - root[0] = link_prev - else: - link = root[1] - link_next = link[1] - root[1] = link_next - link_next[0] = root - key = link[2] - del self.__map[key] - value = dict.pop(self, key) - return key, value - - # -- the following methods do not depend on the internal structure -- - - def keys(self): - 'od.keys() -> list of keys in od' - return list(self) - - def values(self): - 'od.values() -> list of values in od' - return [self[key] for key in self] - - def items(self): - 'od.items() -> list of (key, value) pairs in od' - return [(key, self[key]) for key in self] - - def iterkeys(self): - 'od.iterkeys() -> an iterator over the keys in od' - return iter(self) - - def itervalues(self): - 'od.itervalues -> an iterator over the values in od' - for k in self: - yield self[k] - - def iteritems(self): - 'od.iteritems -> an iterator over the (key, value) items in od' - for k in self: - yield (k, self[k]) - - def update(*args, **kwds): - '''od.update(E, **F) -> None. Update od from dict/iterable E and F. - - If E is a dict instance, does: for k in E: od[k] = E[k] - If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] - Or if E is an iterable of items, does: for k, v in E: od[k] = v - In either case, this is followed by: for k, v in F.items(): od[k] = v - - ''' - if len(args) > 2: - raise TypeError('update() takes at most 2 positional ' - 'arguments (%d given)' % (len(args),)) - elif not args: - raise TypeError('update() takes at least 1 argument (0 given)') - self = args[0] - # Make progressively weaker assumptions about "other" - other = () - if len(args) == 2: - other = args[1] - if isinstance(other, dict): - for key in other: - self[key] = other[key] - elif hasattr(other, 'keys'): - for key in other.keys(): - self[key] = other[key] - else: - for key, value in other: - self[key] = value - for key, value in kwds.items(): - self[key] = value - - __update = update # let subclasses override update without breaking __init__ - - __marker = object() - - def pop(self, key, default=__marker): - '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. - If key is not found, d is returned if given, otherwise KeyError is raised. - - ''' - if key in self: - result = self[key] - del self[key] - return result - if default is self.__marker: - raise KeyError(key) - return default - - def setdefault(self, key, default=None): - 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' - if key in self: - return self[key] - self[key] = default - return default - - def __repr__(self, _repr_running={}): - 'od.__repr__() <==> repr(od)' - call_key = id(self), _get_ident() - if call_key in _repr_running: - return '...' - _repr_running[call_key] = 1 - try: - if not self: - return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, self.items()) - finally: - del _repr_running[call_key] - - def __reduce__(self): - 'Return state information for pickling' - items = [[k, self[k]] for k in self] - inst_dict = vars(self).copy() - for k in vars(OrderedDict()): - inst_dict.pop(k, None) - if inst_dict: - return (self.__class__, (items,), inst_dict) - return self.__class__, (items,) - - def copy(self): - 'od.copy() -> a shallow copy of od' - return self.__class__(self) - - @classmethod - def fromkeys(cls, iterable, value=None): - '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S - and values equal to v (which defaults to None). - - ''' - d = cls() - for key in iterable: - d[key] = value - return d - - def __eq__(self, other): - '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive - while comparison to a regular mapping is order-insensitive. - - ''' - if isinstance(other, OrderedDict): - return len(self)==len(other) and self.items() == other.items() - return dict.__eq__(self, other) - - def __ne__(self, other): - return not self == other - - # -- the following methods are only used in Python 2.7 -- - - def viewkeys(self): - "od.viewkeys() -> a set-like object providing a view on od's keys" - return KeysView(self) - - def viewvalues(self): - "od.viewvalues() -> an object providing a view on od's values" - return ValuesView(self) - - def viewitems(self): - "od.viewitems() -> a set-like object providing a view on od's items" - return ItemsView(self) diff --git a/python.d/python_modules/urllib3/packages/six.py b/python.d/python_modules/urllib3/packages/six.py deleted file mode 100644 index 190c0239c..000000000 --- a/python.d/python_modules/urllib3/packages/six.py +++ /dev/null @@ -1,868 +0,0 @@ -"""Utilities for writing code that runs on Python 2 and 3""" - -# Copyright (c) 2010-2015 Benjamin Peterson -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -from __future__ import absolute_import - -import functools -import itertools -import operator -import sys -import types - -__author__ = "Benjamin Peterson <benjamin@python.org>" -__version__ = "1.10.0" - - -# Useful for very coarse version differentiation. -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 -PY34 = sys.version_info[0:2] >= (3, 4) - -if PY3: - string_types = str, - integer_types = int, - class_types = type, - text_type = str - binary_type = bytes - - MAXSIZE = sys.maxsize -else: - string_types = basestring, - integer_types = (int, long) - class_types = (type, types.ClassType) - text_type = unicode - binary_type = str - - if sys.platform.startswith("java"): - # Jython always uses 32 bits. - MAXSIZE = int((1 << 31) - 1) - else: - # It's possible to have sizeof(long) != sizeof(Py_ssize_t). - class X(object): - - def __len__(self): - return 1 << 31 - try: - len(X()) - except OverflowError: - # 32-bit - MAXSIZE = int((1 << 31) - 1) - else: - # 64-bit - MAXSIZE = int((1 << 63) - 1) - del X - - -def _add_doc(func, doc): - """Add documentation to a function.""" - func.__doc__ = doc - - -def _import_module(name): - """Import module, returning the module after the last dot.""" - __import__(name) - return sys.modules[name] - - -class _LazyDescr(object): - - def __init__(self, name): - self.name = name - - def __get__(self, obj, tp): - result = self._resolve() - setattr(obj, self.name, result) # Invokes __set__. - try: - # This is a bit ugly, but it avoids running this again by - # removing this descriptor. - delattr(obj.__class__, self.name) - except AttributeError: - pass - return result - - -class MovedModule(_LazyDescr): - - def __init__(self, name, old, new=None): - super(MovedModule, self).__init__(name) - if PY3: - if new is None: - new = name - self.mod = new - else: - self.mod = old - - def _resolve(self): - return _import_module(self.mod) - - def __getattr__(self, attr): - _module = self._resolve() - value = getattr(_module, attr) - setattr(self, attr, value) - return value - - -class _LazyModule(types.ModuleType): - - def __init__(self, name): - super(_LazyModule, self).__init__(name) - self.__doc__ = self.__class__.__doc__ - - def __dir__(self): - attrs = ["__doc__", "__name__"] - attrs += [attr.name for attr in self._moved_attributes] - return attrs - - # Subclasses should override this - _moved_attributes = [] - - -class MovedAttribute(_LazyDescr): - - def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): - super(MovedAttribute, self).__init__(name) - if PY3: - if new_mod is None: - new_mod = name - self.mod = new_mod - if new_attr is None: - if old_attr is None: - new_attr = name - else: - new_attr = old_attr - self.attr = new_attr - else: - self.mod = old_mod - if old_attr is None: - old_attr = name - self.attr = old_attr - - def _resolve(self): - module = _import_module(self.mod) - return getattr(module, self.attr) - - -class _SixMetaPathImporter(object): - - """ - A meta path importer to import six.moves and its submodules. - - This class implements a PEP302 finder and loader. It should be compatible - with Python 2.5 and all existing versions of Python3 - """ - - def __init__(self, six_module_name): - self.name = six_module_name - self.known_modules = {} - - def _add_module(self, mod, *fullnames): - for fullname in fullnames: - self.known_modules[self.name + "." + fullname] = mod - - def _get_module(self, fullname): - return self.known_modules[self.name + "." + fullname] - - def find_module(self, fullname, path=None): - if fullname in self.known_modules: - return self - return None - - def __get_module(self, fullname): - try: - return self.known_modules[fullname] - except KeyError: - raise ImportError("This loader does not know module " + fullname) - - def load_module(self, fullname): - try: - # in case of a reload - return sys.modules[fullname] - except KeyError: - pass - mod = self.__get_module(fullname) - if isinstance(mod, MovedModule): - mod = mod._resolve() - else: - mod.__loader__ = self - sys.modules[fullname] = mod - return mod - - def is_package(self, fullname): - """ - Return true, if the named module is a package. - - We need this method to get correct spec objects with - Python 3.4 (see PEP451) - """ - return hasattr(self.__get_module(fullname), "__path__") - - def get_code(self, fullname): - """Return None - - Required, if is_package is implemented""" - self.__get_module(fullname) # eventually raises ImportError - return None - get_source = get_code # same as get_code - -_importer = _SixMetaPathImporter(__name__) - - -class _MovedItems(_LazyModule): - - """Lazy loading of moved objects""" - __path__ = [] # mark as package - - -_moved_attributes = [ - MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), - MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), - MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), - MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), - MovedAttribute("intern", "__builtin__", "sys"), - MovedAttribute("map", "itertools", "builtins", "imap", "map"), - MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), - MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), - MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), - MovedAttribute("reduce", "__builtin__", "functools"), - MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), - MovedAttribute("StringIO", "StringIO", "io"), - MovedAttribute("UserDict", "UserDict", "collections"), - MovedAttribute("UserList", "UserList", "collections"), - MovedAttribute("UserString", "UserString", "collections"), - MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), - MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), - MovedModule("builtins", "__builtin__"), - MovedModule("configparser", "ConfigParser"), - MovedModule("copyreg", "copy_reg"), - MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), - MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), - MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), - MovedModule("http_cookies", "Cookie", "http.cookies"), - MovedModule("html_entities", "htmlentitydefs", "html.entities"), - MovedModule("html_parser", "HTMLParser", "html.parser"), - MovedModule("http_client", "httplib", "http.client"), - MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), - MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), - MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), - MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), - MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), - MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), - MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), - MovedModule("cPickle", "cPickle", "pickle"), - MovedModule("queue", "Queue"), - MovedModule("reprlib", "repr"), - MovedModule("socketserver", "SocketServer"), - MovedModule("_thread", "thread", "_thread"), - MovedModule("tkinter", "Tkinter"), - MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), - MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), - MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), - MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), - MovedModule("tkinter_tix", "Tix", "tkinter.tix"), - MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), - MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), - MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), - MovedModule("tkinter_colorchooser", "tkColorChooser", - "tkinter.colorchooser"), - MovedModule("tkinter_commondialog", "tkCommonDialog", - "tkinter.commondialog"), - MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), - MovedModule("tkinter_font", "tkFont", "tkinter.font"), - MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), - MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", - "tkinter.simpledialog"), - MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), - MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), - MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), - MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), - MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), - MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), -] -# Add windows specific modules. -if sys.platform == "win32": - _moved_attributes += [ - MovedModule("winreg", "_winreg"), - ] - -for attr in _moved_attributes: - setattr(_MovedItems, attr.name, attr) - if isinstance(attr, MovedModule): - _importer._add_module(attr, "moves." + attr.name) -del attr - -_MovedItems._moved_attributes = _moved_attributes - -moves = _MovedItems(__name__ + ".moves") -_importer._add_module(moves, "moves") - - -class Module_six_moves_urllib_parse(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_parse""" - - -_urllib_parse_moved_attributes = [ - MovedAttribute("ParseResult", "urlparse", "urllib.parse"), - MovedAttribute("SplitResult", "urlparse", "urllib.parse"), - MovedAttribute("parse_qs", "urlparse", "urllib.parse"), - MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), - MovedAttribute("urldefrag", "urlparse", "urllib.parse"), - MovedAttribute("urljoin", "urlparse", "urllib.parse"), - MovedAttribute("urlparse", "urlparse", "urllib.parse"), - MovedAttribute("urlsplit", "urlparse", "urllib.parse"), - MovedAttribute("urlunparse", "urlparse", "urllib.parse"), - MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), - MovedAttribute("quote", "urllib", "urllib.parse"), - MovedAttribute("quote_plus", "urllib", "urllib.parse"), - MovedAttribute("unquote", "urllib", "urllib.parse"), - MovedAttribute("unquote_plus", "urllib", "urllib.parse"), - MovedAttribute("urlencode", "urllib", "urllib.parse"), - MovedAttribute("splitquery", "urllib", "urllib.parse"), - MovedAttribute("splittag", "urllib", "urllib.parse"), - MovedAttribute("splituser", "urllib", "urllib.parse"), - MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), - MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), - MovedAttribute("uses_params", "urlparse", "urllib.parse"), - MovedAttribute("uses_query", "urlparse", "urllib.parse"), - MovedAttribute("uses_relative", "urlparse", "urllib.parse"), -] -for attr in _urllib_parse_moved_attributes: - setattr(Module_six_moves_urllib_parse, attr.name, attr) -del attr - -Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes - -_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), - "moves.urllib_parse", "moves.urllib.parse") - - -class Module_six_moves_urllib_error(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_error""" - - -_urllib_error_moved_attributes = [ - MovedAttribute("URLError", "urllib2", "urllib.error"), - MovedAttribute("HTTPError", "urllib2", "urllib.error"), - MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), -] -for attr in _urllib_error_moved_attributes: - setattr(Module_six_moves_urllib_error, attr.name, attr) -del attr - -Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes - -_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), - "moves.urllib_error", "moves.urllib.error") - - -class Module_six_moves_urllib_request(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_request""" - - -_urllib_request_moved_attributes = [ - MovedAttribute("urlopen", "urllib2", "urllib.request"), - MovedAttribute("install_opener", "urllib2", "urllib.request"), - MovedAttribute("build_opener", "urllib2", "urllib.request"), - MovedAttribute("pathname2url", "urllib", "urllib.request"), - MovedAttribute("url2pathname", "urllib", "urllib.request"), - MovedAttribute("getproxies", "urllib", "urllib.request"), - MovedAttribute("Request", "urllib2", "urllib.request"), - MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), - MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), - MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), - MovedAttribute("BaseHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), - MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), - MovedAttribute("FileHandler", "urllib2", "urllib.request"), - MovedAttribute("FTPHandler", "urllib2", "urllib.request"), - MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), - MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), - MovedAttribute("urlretrieve", "urllib", "urllib.request"), - MovedAttribute("urlcleanup", "urllib", "urllib.request"), - MovedAttribute("URLopener", "urllib", "urllib.request"), - MovedAttribute("FancyURLopener", "urllib", "urllib.request"), - MovedAttribute("proxy_bypass", "urllib", "urllib.request"), -] -for attr in _urllib_request_moved_attributes: - setattr(Module_six_moves_urllib_request, attr.name, attr) -del attr - -Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes - -_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), - "moves.urllib_request", "moves.urllib.request") - - -class Module_six_moves_urllib_response(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_response""" - - -_urllib_response_moved_attributes = [ - MovedAttribute("addbase", "urllib", "urllib.response"), - MovedAttribute("addclosehook", "urllib", "urllib.response"), - MovedAttribute("addinfo", "urllib", "urllib.response"), - MovedAttribute("addinfourl", "urllib", "urllib.response"), -] -for attr in _urllib_response_moved_attributes: - setattr(Module_six_moves_urllib_response, attr.name, attr) -del attr - -Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes - -_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), - "moves.urllib_response", "moves.urllib.response") - - -class Module_six_moves_urllib_robotparser(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_robotparser""" - - -_urllib_robotparser_moved_attributes = [ - MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), -] -for attr in _urllib_robotparser_moved_attributes: - setattr(Module_six_moves_urllib_robotparser, attr.name, attr) -del attr - -Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes - -_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), - "moves.urllib_robotparser", "moves.urllib.robotparser") - - -class Module_six_moves_urllib(types.ModuleType): - - """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" - __path__ = [] # mark as package - parse = _importer._get_module("moves.urllib_parse") - error = _importer._get_module("moves.urllib_error") - request = _importer._get_module("moves.urllib_request") - response = _importer._get_module("moves.urllib_response") - robotparser = _importer._get_module("moves.urllib_robotparser") - - def __dir__(self): - return ['parse', 'error', 'request', 'response', 'robotparser'] - -_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), - "moves.urllib") - - -def add_move(move): - """Add an item to six.moves.""" - setattr(_MovedItems, move.name, move) - - -def remove_move(name): - """Remove item from six.moves.""" - try: - delattr(_MovedItems, name) - except AttributeError: - try: - del moves.__dict__[name] - except KeyError: - raise AttributeError("no such move, %r" % (name,)) - - -if PY3: - _meth_func = "__func__" - _meth_self = "__self__" - - _func_closure = "__closure__" - _func_code = "__code__" - _func_defaults = "__defaults__" - _func_globals = "__globals__" -else: - _meth_func = "im_func" - _meth_self = "im_self" - - _func_closure = "func_closure" - _func_code = "func_code" - _func_defaults = "func_defaults" - _func_globals = "func_globals" - - -try: - advance_iterator = next -except NameError: - def advance_iterator(it): - return it.next() -next = advance_iterator - - -try: - callable = callable -except NameError: - def callable(obj): - return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) - - -if PY3: - def get_unbound_function(unbound): - return unbound - - create_bound_method = types.MethodType - - def create_unbound_method(func, cls): - return func - - Iterator = object -else: - def get_unbound_function(unbound): - return unbound.im_func - - def create_bound_method(func, obj): - return types.MethodType(func, obj, obj.__class__) - - def create_unbound_method(func, cls): - return types.MethodType(func, None, cls) - - class Iterator(object): - - def next(self): - return type(self).__next__(self) - - callable = callable -_add_doc(get_unbound_function, - """Get the function out of a possibly unbound function""") - - -get_method_function = operator.attrgetter(_meth_func) -get_method_self = operator.attrgetter(_meth_self) -get_function_closure = operator.attrgetter(_func_closure) -get_function_code = operator.attrgetter(_func_code) -get_function_defaults = operator.attrgetter(_func_defaults) -get_function_globals = operator.attrgetter(_func_globals) - - -if PY3: - def iterkeys(d, **kw): - return iter(d.keys(**kw)) - - def itervalues(d, **kw): - return iter(d.values(**kw)) - - def iteritems(d, **kw): - return iter(d.items(**kw)) - - def iterlists(d, **kw): - return iter(d.lists(**kw)) - - viewkeys = operator.methodcaller("keys") - - viewvalues = operator.methodcaller("values") - - viewitems = operator.methodcaller("items") -else: - def iterkeys(d, **kw): - return d.iterkeys(**kw) - - def itervalues(d, **kw): - return d.itervalues(**kw) - - def iteritems(d, **kw): - return d.iteritems(**kw) - - def iterlists(d, **kw): - return d.iterlists(**kw) - - viewkeys = operator.methodcaller("viewkeys") - - viewvalues = operator.methodcaller("viewvalues") - - viewitems = operator.methodcaller("viewitems") - -_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") -_add_doc(itervalues, "Return an iterator over the values of a dictionary.") -_add_doc(iteritems, - "Return an iterator over the (key, value) pairs of a dictionary.") -_add_doc(iterlists, - "Return an iterator over the (key, [values]) pairs of a dictionary.") - - -if PY3: - def b(s): - return s.encode("latin-1") - - def u(s): - return s - unichr = chr - import struct - int2byte = struct.Struct(">B").pack - del struct - byte2int = operator.itemgetter(0) - indexbytes = operator.getitem - iterbytes = iter - import io - StringIO = io.StringIO - BytesIO = io.BytesIO - _assertCountEqual = "assertCountEqual" - if sys.version_info[1] <= 1: - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" - else: - _assertRaisesRegex = "assertRaisesRegex" - _assertRegex = "assertRegex" -else: - def b(s): - return s - # Workaround for standalone backslash - - def u(s): - return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") - unichr = unichr - int2byte = chr - - def byte2int(bs): - return ord(bs[0]) - - def indexbytes(buf, i): - return ord(buf[i]) - iterbytes = functools.partial(itertools.imap, ord) - import StringIO - StringIO = BytesIO = StringIO.StringIO - _assertCountEqual = "assertItemsEqual" - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" -_add_doc(b, """Byte literal""") -_add_doc(u, """Text literal""") - - -def assertCountEqual(self, *args, **kwargs): - return getattr(self, _assertCountEqual)(*args, **kwargs) - - -def assertRaisesRegex(self, *args, **kwargs): - return getattr(self, _assertRaisesRegex)(*args, **kwargs) - - -def assertRegex(self, *args, **kwargs): - return getattr(self, _assertRegex)(*args, **kwargs) - - -if PY3: - exec_ = getattr(moves.builtins, "exec") - - def reraise(tp, value, tb=None): - if value is None: - value = tp() - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - -else: - def exec_(_code_, _globs_=None, _locs_=None): - """Execute code in a namespace.""" - if _globs_ is None: - frame = sys._getframe(1) - _globs_ = frame.f_globals - if _locs_ is None: - _locs_ = frame.f_locals - del frame - elif _locs_ is None: - _locs_ = _globs_ - exec("""exec _code_ in _globs_, _locs_""") - - exec_("""def reraise(tp, value, tb=None): - raise tp, value, tb -""") - - -if sys.version_info[:2] == (3, 2): - exec_("""def raise_from(value, from_value): - if from_value is None: - raise value - raise value from from_value -""") -elif sys.version_info[:2] > (3, 2): - exec_("""def raise_from(value, from_value): - raise value from from_value -""") -else: - def raise_from(value, from_value): - raise value - - -print_ = getattr(moves.builtins, "print", None) -if print_ is None: - def print_(*args, **kwargs): - """The new-style print function for Python 2.4 and 2.5.""" - fp = kwargs.pop("file", sys.stdout) - if fp is None: - return - - def write(data): - if not isinstance(data, basestring): - data = str(data) - # If the file has an encoding, encode unicode with it. - if (isinstance(fp, file) and - isinstance(data, unicode) and - fp.encoding is not None): - errors = getattr(fp, "errors", None) - if errors is None: - errors = "strict" - data = data.encode(fp.encoding, errors) - fp.write(data) - want_unicode = False - sep = kwargs.pop("sep", None) - if sep is not None: - if isinstance(sep, unicode): - want_unicode = True - elif not isinstance(sep, str): - raise TypeError("sep must be None or a string") - end = kwargs.pop("end", None) - if end is not None: - if isinstance(end, unicode): - want_unicode = True - elif not isinstance(end, str): - raise TypeError("end must be None or a string") - if kwargs: - raise TypeError("invalid keyword arguments to print()") - if not want_unicode: - for arg in args: - if isinstance(arg, unicode): - want_unicode = True - break - if want_unicode: - newline = unicode("\n") - space = unicode(" ") - else: - newline = "\n" - space = " " - if sep is None: - sep = space - if end is None: - end = newline - for i, arg in enumerate(args): - if i: - write(sep) - write(arg) - write(end) -if sys.version_info[:2] < (3, 3): - _print = print_ - - def print_(*args, **kwargs): - fp = kwargs.get("file", sys.stdout) - flush = kwargs.pop("flush", False) - _print(*args, **kwargs) - if flush and fp is not None: - fp.flush() - -_add_doc(reraise, """Reraise an exception.""") - -if sys.version_info[0:2] < (3, 4): - def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, - updated=functools.WRAPPER_UPDATES): - def wrapper(f): - f = functools.wraps(wrapped, assigned, updated)(f) - f.__wrapped__ = wrapped - return f - return wrapper -else: - wraps = functools.wraps - - -def with_metaclass(meta, *bases): - """Create a base class with a metaclass.""" - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(meta): - - def __new__(cls, name, this_bases, d): - return meta(name, bases, d) - return type.__new__(metaclass, 'temporary_class', (), {}) - - -def add_metaclass(metaclass): - """Class decorator for creating a class with a metaclass.""" - def wrapper(cls): - orig_vars = cls.__dict__.copy() - slots = orig_vars.get('__slots__') - if slots is not None: - if isinstance(slots, str): - slots = [slots] - for slots_var in slots: - orig_vars.pop(slots_var) - orig_vars.pop('__dict__', None) - orig_vars.pop('__weakref__', None) - return metaclass(cls.__name__, cls.__bases__, orig_vars) - return wrapper - - -def python_2_unicode_compatible(klass): - """ - A decorator that defines __unicode__ and __str__ methods under Python 2. - Under Python 3 it does nothing. - - To support Python 2 and 3 with a single code base, define a __str__ method - returning text and apply this decorator to the class. - """ - if PY2: - if '__str__' not in klass.__dict__: - raise ValueError("@python_2_unicode_compatible cannot be applied " - "to %s because it doesn't define __str__()." % - klass.__name__) - klass.__unicode__ = klass.__str__ - klass.__str__ = lambda self: self.__unicode__().encode('utf-8') - return klass - - -# Complete the moves implementation. -# This code is at the end of this module to speed up module loading. -# Turn this module into a package. -__path__ = [] # required for PEP 302 and PEP 451 -__package__ = __name__ # see PEP 366 @ReservedAssignment -if globals().get("__spec__") is not None: - __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable -# Remove other six meta path importers, since they cause problems. This can -# happen if six is removed from sys.modules and then reloaded. (Setuptools does -# this for some reason.) -if sys.meta_path: - for i, importer in enumerate(sys.meta_path): - # Here's some real nastiness: Another "instance" of the six module might - # be floating around. Therefore, we can't use isinstance() to check for - # the six meta path importer, since the other six instance will have - # inserted an importer with different class. - if (type(importer).__name__ == "_SixMetaPathImporter" and - importer.name == __name__): - del sys.meta_path[i] - break - del i, importer -# Finally, add the importer to the meta path import hook. -sys.meta_path.append(_importer) diff --git a/python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py b/python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py deleted file mode 100644 index d6594eb26..000000000 --- a/python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -import sys - -try: - # Our match_hostname function is the same as 3.5's, so we only want to - # import the match_hostname function if it's at least that good. - if sys.version_info < (3, 5): - raise ImportError("Fallback to vendored code") - - from ssl import CertificateError, match_hostname -except ImportError: - try: - # Backport of the function from a pypi module - from backports.ssl_match_hostname import CertificateError, match_hostname - except ImportError: - # Our vendored copy - from ._implementation import CertificateError, match_hostname - -# Not needed, but documenting what we provide. -__all__ = ('CertificateError', 'match_hostname') diff --git a/python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py b/python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py deleted file mode 100644 index 1fd42f38a..000000000 --- a/python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py +++ /dev/null @@ -1,157 +0,0 @@ -"""The match_hostname() function from Python 3.3.3, essential when using SSL.""" - -# Note: This file is under the PSF license as the code comes from the python -# stdlib. http://docs.python.org/3/license.html - -import re -import sys - -# ipaddress has been backported to 2.6+ in pypi. If it is installed on the -# system, use it to handle IPAddress ServerAltnames (this was added in -# python-3.5) otherwise only do DNS matching. This allows -# backports.ssl_match_hostname to continue to be used all the way back to -# python-2.4. -try: - import ipaddress -except ImportError: - ipaddress = None - -__version__ = '3.5.0.1' - - -class CertificateError(ValueError): - pass - - -def _dnsname_match(dn, hostname, max_wildcards=1): - """Matching according to RFC 6125, section 6.4.3 - - http://tools.ietf.org/html/rfc6125#section-6.4.3 - """ - pats = [] - if not dn: - return False - - # Ported from python3-syntax: - # leftmost, *remainder = dn.split(r'.') - parts = dn.split(r'.') - leftmost = parts[0] - remainder = parts[1:] - - wildcards = leftmost.count('*') - if wildcards > max_wildcards: - # Issue #17980: avoid denials of service by refusing more - # than one wildcard per fragment. A survey of established - # policy among SSL implementations showed it to be a - # reasonable choice. - raise CertificateError( - "too many wildcards in certificate DNS name: " + repr(dn)) - - # speed up common case w/o wildcards - if not wildcards: - return dn.lower() == hostname.lower() - - # RFC 6125, section 6.4.3, subitem 1. - # The client SHOULD NOT attempt to match a presented identifier in which - # the wildcard character comprises a label other than the left-most label. - if leftmost == '*': - # When '*' is a fragment by itself, it matches a non-empty dotless - # fragment. - pats.append('[^.]+') - elif leftmost.startswith('xn--') or hostname.startswith('xn--'): - # RFC 6125, section 6.4.3, subitem 3. - # The client SHOULD NOT attempt to match a presented identifier - # where the wildcard character is embedded within an A-label or - # U-label of an internationalized domain name. - pats.append(re.escape(leftmost)) - else: - # Otherwise, '*' matches any dotless string, e.g. www* - pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) - - # add the remaining fragments, ignore any wildcards - for frag in remainder: - pats.append(re.escape(frag)) - - pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) - return pat.match(hostname) - - -def _to_unicode(obj): - if isinstance(obj, str) and sys.version_info < (3,): - obj = unicode(obj, encoding='ascii', errors='strict') - return obj - -def _ipaddress_match(ipname, host_ip): - """Exact matching of IP addresses. - - RFC 6125 explicitly doesn't define an algorithm for this - (section 1.7.2 - "Out of Scope"). - """ - # OpenSSL may add a trailing newline to a subjectAltName's IP address - # Divergence from upstream: ipaddress can't handle byte str - ip = ipaddress.ip_address(_to_unicode(ipname).rstrip()) - return ip == host_ip - - -def match_hostname(cert, hostname): - """Verify that *cert* (in decoded format as returned by - SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 - rules are followed, but IP addresses are not accepted for *hostname*. - - CertificateError is raised on failure. On success, the function - returns nothing. - """ - if not cert: - raise ValueError("empty or no certificate, match_hostname needs a " - "SSL socket or SSL context with either " - "CERT_OPTIONAL or CERT_REQUIRED") - try: - # Divergence from upstream: ipaddress can't handle byte str - host_ip = ipaddress.ip_address(_to_unicode(hostname)) - except ValueError: - # Not an IP address (common case) - host_ip = None - except UnicodeError: - # Divergence from upstream: Have to deal with ipaddress not taking - # byte strings. addresses should be all ascii, so we consider it not - # an ipaddress in this case - host_ip = None - except AttributeError: - # Divergence from upstream: Make ipaddress library optional - if ipaddress is None: - host_ip = None - else: - raise - dnsnames = [] - san = cert.get('subjectAltName', ()) - for key, value in san: - if key == 'DNS': - if host_ip is None and _dnsname_match(value, hostname): - return - dnsnames.append(value) - elif key == 'IP Address': - if host_ip is not None and _ipaddress_match(value, host_ip): - return - dnsnames.append(value) - if not dnsnames: - # The subject is only checked when there is no dNSName entry - # in subjectAltName - for sub in cert.get('subject', ()): - for key, value in sub: - # XXX according to RFC 2818, the most specific Common Name - # must be used. - if key == 'commonName': - if _dnsname_match(value, hostname): - return - dnsnames.append(value) - if len(dnsnames) > 1: - raise CertificateError("hostname %r " - "doesn't match either of %s" - % (hostname, ', '.join(map(repr, dnsnames)))) - elif len(dnsnames) == 1: - raise CertificateError("hostname %r " - "doesn't match %r" - % (hostname, dnsnames[0])) - else: - raise CertificateError("no appropriate commonName or " - "subjectAltName fields were found") diff --git a/python.d/python_modules/urllib3/poolmanager.py b/python.d/python_modules/urllib3/poolmanager.py deleted file mode 100644 index 4ae91744d..000000000 --- a/python.d/python_modules/urllib3/poolmanager.py +++ /dev/null @@ -1,440 +0,0 @@ -from __future__ import absolute_import -import collections -import functools -import logging - -from ._collections import RecentlyUsedContainer -from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool -from .connectionpool import port_by_scheme -from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown -from .packages.six.moves.urllib.parse import urljoin -from .request import RequestMethods -from .util.url import parse_url -from .util.retry import Retry - - -__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url'] - - -log = logging.getLogger(__name__) - -SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs', - 'ssl_version', 'ca_cert_dir', 'ssl_context') - -# All known keyword arguments that could be provided to the pool manager, its -# pools, or the underlying connections. This is used to construct a pool key. -_key_fields = ( - 'key_scheme', # str - 'key_host', # str - 'key_port', # int - 'key_timeout', # int or float or Timeout - 'key_retries', # int or Retry - 'key_strict', # bool - 'key_block', # bool - 'key_source_address', # str - 'key_key_file', # str - 'key_cert_file', # str - 'key_cert_reqs', # str - 'key_ca_certs', # str - 'key_ssl_version', # str - 'key_ca_cert_dir', # str - 'key_ssl_context', # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext - 'key_maxsize', # int - 'key_headers', # dict - 'key__proxy', # parsed proxy url - 'key__proxy_headers', # dict - 'key_socket_options', # list of (level (int), optname (int), value (int or str)) tuples - 'key__socks_options', # dict - 'key_assert_hostname', # bool or string - 'key_assert_fingerprint', # str -) - -#: The namedtuple class used to construct keys for the connection pool. -#: All custom key schemes should include the fields in this key at a minimum. -PoolKey = collections.namedtuple('PoolKey', _key_fields) - - -def _default_key_normalizer(key_class, request_context): - """ - Create a pool key out of a request context dictionary. - - According to RFC 3986, both the scheme and host are case-insensitive. - Therefore, this function normalizes both before constructing the pool - key for an HTTPS request. If you wish to change this behaviour, provide - alternate callables to ``key_fn_by_scheme``. - - :param key_class: - The class to use when constructing the key. This should be a namedtuple - with the ``scheme`` and ``host`` keys at a minimum. - :type key_class: namedtuple - :param request_context: - A dictionary-like object that contain the context for a request. - :type request_context: dict - - :return: A namedtuple that can be used as a connection pool key. - :rtype: PoolKey - """ - # Since we mutate the dictionary, make a copy first - context = request_context.copy() - context['scheme'] = context['scheme'].lower() - context['host'] = context['host'].lower() - - # These are both dictionaries and need to be transformed into frozensets - for key in ('headers', '_proxy_headers', '_socks_options'): - if key in context and context[key] is not None: - context[key] = frozenset(context[key].items()) - - # The socket_options key may be a list and needs to be transformed into a - # tuple. - socket_opts = context.get('socket_options') - if socket_opts is not None: - context['socket_options'] = tuple(socket_opts) - - # Map the kwargs to the names in the namedtuple - this is necessary since - # namedtuples can't have fields starting with '_'. - for key in list(context.keys()): - context['key_' + key] = context.pop(key) - - # Default to ``None`` for keys missing from the context - for field in key_class._fields: - if field not in context: - context[field] = None - - return key_class(**context) - - -#: A dictionary that maps a scheme to a callable that creates a pool key. -#: This can be used to alter the way pool keys are constructed, if desired. -#: Each PoolManager makes a copy of this dictionary so they can be configured -#: globally here, or individually on the instance. -key_fn_by_scheme = { - 'http': functools.partial(_default_key_normalizer, PoolKey), - 'https': functools.partial(_default_key_normalizer, PoolKey), -} - -pool_classes_by_scheme = { - 'http': HTTPConnectionPool, - 'https': HTTPSConnectionPool, -} - - -class PoolManager(RequestMethods): - """ - Allows for arbitrary requests while transparently keeping track of - necessary connection pools for you. - - :param num_pools: - Number of connection pools to cache before discarding the least - recently used pool. - - :param headers: - Headers to include with all requests, unless other headers are given - explicitly. - - :param \\**connection_pool_kw: - Additional parameters are used to create fresh - :class:`urllib3.connectionpool.ConnectionPool` instances. - - Example:: - - >>> manager = PoolManager(num_pools=2) - >>> r = manager.request('GET', 'http://google.com/') - >>> r = manager.request('GET', 'http://google.com/mail') - >>> r = manager.request('GET', 'http://yahoo.com/') - >>> len(manager.pools) - 2 - - """ - - proxy = None - - def __init__(self, num_pools=10, headers=None, **connection_pool_kw): - RequestMethods.__init__(self, headers) - self.connection_pool_kw = connection_pool_kw - self.pools = RecentlyUsedContainer(num_pools, - dispose_func=lambda p: p.close()) - - # Locally set the pool classes and keys so other PoolManagers can - # override them. - self.pool_classes_by_scheme = pool_classes_by_scheme - self.key_fn_by_scheme = key_fn_by_scheme.copy() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.clear() - # Return False to re-raise any potential exceptions - return False - - def _new_pool(self, scheme, host, port, request_context=None): - """ - Create a new :class:`ConnectionPool` based on host, port, scheme, and - any additional pool keyword arguments. - - If ``request_context`` is provided, it is provided as keyword arguments - to the pool class used. This method is used to actually create the - connection pools handed out by :meth:`connection_from_url` and - companion methods. It is intended to be overridden for customization. - """ - pool_cls = self.pool_classes_by_scheme[scheme] - if request_context is None: - request_context = self.connection_pool_kw.copy() - - # Although the context has everything necessary to create the pool, - # this function has historically only used the scheme, host, and port - # in the positional args. When an API change is acceptable these can - # be removed. - for key in ('scheme', 'host', 'port'): - request_context.pop(key, None) - - if scheme == 'http': - for kw in SSL_KEYWORDS: - request_context.pop(kw, None) - - return pool_cls(host, port, **request_context) - - def clear(self): - """ - Empty our store of pools and direct them all to close. - - This will not affect in-flight connections, but they will not be - re-used after completion. - """ - self.pools.clear() - - def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None): - """ - Get a :class:`ConnectionPool` based on the host, port, and scheme. - - If ``port`` isn't given, it will be derived from the ``scheme`` using - ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is - provided, it is merged with the instance's ``connection_pool_kw`` - variable and used to create the new connection pool, if one is - needed. - """ - - if not host: - raise LocationValueError("No host specified.") - - request_context = self._merge_pool_kwargs(pool_kwargs) - request_context['scheme'] = scheme or 'http' - if not port: - port = port_by_scheme.get(request_context['scheme'].lower(), 80) - request_context['port'] = port - request_context['host'] = host - - return self.connection_from_context(request_context) - - def connection_from_context(self, request_context): - """ - Get a :class:`ConnectionPool` based on the request context. - - ``request_context`` must at least contain the ``scheme`` key and its - value must be a key in ``key_fn_by_scheme`` instance variable. - """ - scheme = request_context['scheme'].lower() - pool_key_constructor = self.key_fn_by_scheme[scheme] - pool_key = pool_key_constructor(request_context) - - return self.connection_from_pool_key(pool_key, request_context=request_context) - - def connection_from_pool_key(self, pool_key, request_context=None): - """ - Get a :class:`ConnectionPool` based on the provided pool key. - - ``pool_key`` should be a namedtuple that only contains immutable - objects. At a minimum it must have the ``scheme``, ``host``, and - ``port`` fields. - """ - with self.pools.lock: - # If the scheme, host, or port doesn't match existing open - # connections, open a new ConnectionPool. - pool = self.pools.get(pool_key) - if pool: - return pool - - # Make a fresh ConnectionPool of the desired type - scheme = request_context['scheme'] - host = request_context['host'] - port = request_context['port'] - pool = self._new_pool(scheme, host, port, request_context=request_context) - self.pools[pool_key] = pool - - return pool - - def connection_from_url(self, url, pool_kwargs=None): - """ - Similar to :func:`urllib3.connectionpool.connection_from_url`. - - If ``pool_kwargs`` is not provided and a new pool needs to be - constructed, ``self.connection_pool_kw`` is used to initialize - the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` - is provided, it is used instead. Note that if a new pool does not - need to be created for the request, the provided ``pool_kwargs`` are - not used. - """ - u = parse_url(url) - return self.connection_from_host(u.host, port=u.port, scheme=u.scheme, - pool_kwargs=pool_kwargs) - - def _merge_pool_kwargs(self, override): - """ - Merge a dictionary of override values for self.connection_pool_kw. - - This does not modify self.connection_pool_kw and returns a new dict. - Any keys in the override dictionary with a value of ``None`` are - removed from the merged dictionary. - """ - base_pool_kwargs = self.connection_pool_kw.copy() - if override: - for key, value in override.items(): - if value is None: - try: - del base_pool_kwargs[key] - except KeyError: - pass - else: - base_pool_kwargs[key] = value - return base_pool_kwargs - - def urlopen(self, method, url, redirect=True, **kw): - """ - Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen` - with custom cross-host redirect logic and only sends the request-uri - portion of the ``url``. - - The given ``url`` parameter must be absolute, such that an appropriate - :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. - """ - u = parse_url(url) - conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) - - kw['assert_same_host'] = False - kw['redirect'] = False - if 'headers' not in kw: - kw['headers'] = self.headers - - if self.proxy is not None and u.scheme == "http": - response = conn.urlopen(method, url, **kw) - else: - response = conn.urlopen(method, u.request_uri, **kw) - - redirect_location = redirect and response.get_redirect_location() - if not redirect_location: - return response - - # Support relative URLs for redirecting. - redirect_location = urljoin(url, redirect_location) - - # RFC 7231, Section 6.4.4 - if response.status == 303: - method = 'GET' - - retries = kw.get('retries') - if not isinstance(retries, Retry): - retries = Retry.from_int(retries, redirect=redirect) - - try: - retries = retries.increment(method, url, response=response, _pool=conn) - except MaxRetryError: - if retries.raise_on_redirect: - raise - return response - - kw['retries'] = retries - kw['redirect'] = redirect - - log.info("Redirecting %s -> %s", url, redirect_location) - return self.urlopen(method, redirect_location, **kw) - - -class ProxyManager(PoolManager): - """ - Behaves just like :class:`PoolManager`, but sends all requests through - the defined proxy, using the CONNECT method for HTTPS URLs. - - :param proxy_url: - The URL of the proxy to be used. - - :param proxy_headers: - A dictionary contaning headers that will be sent to the proxy. In case - of HTTP they are being sent with each request, while in the - HTTPS/CONNECT case they are sent only once. Could be used for proxy - authentication. - - Example: - >>> proxy = urllib3.ProxyManager('http://localhost:3128/') - >>> r1 = proxy.request('GET', 'http://google.com/') - >>> r2 = proxy.request('GET', 'http://httpbin.org/') - >>> len(proxy.pools) - 1 - >>> r3 = proxy.request('GET', 'https://httpbin.org/') - >>> r4 = proxy.request('GET', 'https://twitter.com/') - >>> len(proxy.pools) - 3 - - """ - - def __init__(self, proxy_url, num_pools=10, headers=None, - proxy_headers=None, **connection_pool_kw): - - if isinstance(proxy_url, HTTPConnectionPool): - proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host, - proxy_url.port) - proxy = parse_url(proxy_url) - if not proxy.port: - port = port_by_scheme.get(proxy.scheme, 80) - proxy = proxy._replace(port=port) - - if proxy.scheme not in ("http", "https"): - raise ProxySchemeUnknown(proxy.scheme) - - self.proxy = proxy - self.proxy_headers = proxy_headers or {} - - connection_pool_kw['_proxy'] = self.proxy - connection_pool_kw['_proxy_headers'] = self.proxy_headers - - super(ProxyManager, self).__init__( - num_pools, headers, **connection_pool_kw) - - def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None): - if scheme == "https": - return super(ProxyManager, self).connection_from_host( - host, port, scheme, pool_kwargs=pool_kwargs) - - return super(ProxyManager, self).connection_from_host( - self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs) - - def _set_proxy_headers(self, url, headers=None): - """ - Sets headers needed by proxies: specifically, the Accept and Host - headers. Only sets headers not provided by the user. - """ - headers_ = {'Accept': '*/*'} - - netloc = parse_url(url).netloc - if netloc: - headers_['Host'] = netloc - - if headers: - headers_.update(headers) - return headers_ - - def urlopen(self, method, url, redirect=True, **kw): - "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." - u = parse_url(url) - - if u.scheme == "http": - # For proxied HTTPS requests, httplib sets the necessary headers - # on the CONNECT to the proxy. For HTTP, we'll definitely - # need to set 'Host' at the very least. - headers = kw.get('headers', self.headers) - kw['headers'] = self._set_proxy_headers(url, headers) - - return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw) - - -def proxy_from_url(url, **kw): - return ProxyManager(proxy_url=url, **kw) diff --git a/python.d/python_modules/urllib3/request.py b/python.d/python_modules/urllib3/request.py deleted file mode 100644 index c0fddff04..000000000 --- a/python.d/python_modules/urllib3/request.py +++ /dev/null @@ -1,148 +0,0 @@ -from __future__ import absolute_import - -from .filepost import encode_multipart_formdata -from .packages.six.moves.urllib.parse import urlencode - - -__all__ = ['RequestMethods'] - - -class RequestMethods(object): - """ - Convenience mixin for classes who implement a :meth:`urlopen` method, such - as :class:`~urllib3.connectionpool.HTTPConnectionPool` and - :class:`~urllib3.poolmanager.PoolManager`. - - Provides behavior for making common types of HTTP request methods and - decides which type of request field encoding to use. - - Specifically, - - :meth:`.request_encode_url` is for sending requests whose fields are - encoded in the URL (such as GET, HEAD, DELETE). - - :meth:`.request_encode_body` is for sending requests whose fields are - encoded in the *body* of the request using multipart or www-form-urlencoded - (such as for POST, PUT, PATCH). - - :meth:`.request` is for making any kind of request, it will look up the - appropriate encoding format and use one of the above two methods to make - the request. - - Initializer parameters: - - :param headers: - Headers to include with all requests, unless other headers are given - explicitly. - """ - - _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS']) - - def __init__(self, headers=None): - self.headers = headers or {} - - def urlopen(self, method, url, body=None, headers=None, - encode_multipart=True, multipart_boundary=None, - **kw): # Abstract - raise NotImplemented("Classes extending RequestMethods must implement " - "their own ``urlopen`` method.") - - def request(self, method, url, fields=None, headers=None, **urlopen_kw): - """ - Make a request using :meth:`urlopen` with the appropriate encoding of - ``fields`` based on the ``method`` used. - - This is a convenience method that requires the least amount of manual - effort. It can be used in most situations, while still having the - option to drop down to more specific methods when necessary, such as - :meth:`request_encode_url`, :meth:`request_encode_body`, - or even the lowest level :meth:`urlopen`. - """ - method = method.upper() - - if method in self._encode_url_methods: - return self.request_encode_url(method, url, fields=fields, - headers=headers, - **urlopen_kw) - else: - return self.request_encode_body(method, url, fields=fields, - headers=headers, - **urlopen_kw) - - def request_encode_url(self, method, url, fields=None, headers=None, - **urlopen_kw): - """ - Make a request using :meth:`urlopen` with the ``fields`` encoded in - the url. This is useful for request methods like GET, HEAD, DELETE, etc. - """ - if headers is None: - headers = self.headers - - extra_kw = {'headers': headers} - extra_kw.update(urlopen_kw) - - if fields: - url += '?' + urlencode(fields) - - return self.urlopen(method, url, **extra_kw) - - def request_encode_body(self, method, url, fields=None, headers=None, - encode_multipart=True, multipart_boundary=None, - **urlopen_kw): - """ - Make a request using :meth:`urlopen` with the ``fields`` encoded in - the body. This is useful for request methods like POST, PUT, PATCH, etc. - - When ``encode_multipart=True`` (default), then - :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode - the payload with the appropriate content type. Otherwise - :meth:`urllib.urlencode` is used with the - 'application/x-www-form-urlencoded' content type. - - Multipart encoding must be used when posting files, and it's reasonably - safe to use it in other times too. However, it may break request - signing, such as with OAuth. - - Supports an optional ``fields`` parameter of key/value strings AND - key/filetuple. A filetuple is a (filename, data, MIME type) tuple where - the MIME type is optional. For example:: - - fields = { - 'foo': 'bar', - 'fakefile': ('foofile.txt', 'contents of foofile'), - 'realfile': ('barfile.txt', open('realfile').read()), - 'typedfile': ('bazfile.bin', open('bazfile').read(), - 'image/jpeg'), - 'nonamefile': 'contents of nonamefile field', - } - - When uploading a file, providing a filename (the first parameter of the - tuple) is optional but recommended to best mimick behavior of browsers. - - Note that if ``headers`` are supplied, the 'Content-Type' header will - be overwritten because it depends on the dynamic random boundary string - which is used to compose the body of the request. The random boundary - string can be explicitly set with the ``multipart_boundary`` parameter. - """ - if headers is None: - headers = self.headers - - extra_kw = {'headers': {}} - - if fields: - if 'body' in urlopen_kw: - raise TypeError( - "request got values for both 'fields' and 'body', can only specify one.") - - if encode_multipart: - body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary) - else: - body, content_type = urlencode(fields), 'application/x-www-form-urlencoded' - - extra_kw['body'] = body - extra_kw['headers'] = {'Content-Type': content_type} - - extra_kw['headers'].update(headers) - extra_kw.update(urlopen_kw) - - return self.urlopen(method, url, **extra_kw) diff --git a/python.d/python_modules/urllib3/response.py b/python.d/python_modules/urllib3/response.py deleted file mode 100644 index 408d9996a..000000000 --- a/python.d/python_modules/urllib3/response.py +++ /dev/null @@ -1,622 +0,0 @@ -from __future__ import absolute_import -from contextlib import contextmanager -import zlib -import io -import logging -from socket import timeout as SocketTimeout -from socket import error as SocketError - -from ._collections import HTTPHeaderDict -from .exceptions import ( - BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError, - ResponseNotChunked, IncompleteRead, InvalidHeader -) -from .packages.six import string_types as basestring, binary_type, PY3 -from .packages.six.moves import http_client as httplib -from .connection import HTTPException, BaseSSLError -from .util.response import is_fp_closed, is_response_to_head - -log = logging.getLogger(__name__) - - -class DeflateDecoder(object): - - def __init__(self): - self._first_try = True - self._data = binary_type() - self._obj = zlib.decompressobj() - - def __getattr__(self, name): - return getattr(self._obj, name) - - def decompress(self, data): - if not data: - return data - - if not self._first_try: - return self._obj.decompress(data) - - self._data += data - try: - decompressed = self._obj.decompress(data) - if decompressed: - self._first_try = False - self._data = None - return decompressed - except zlib.error: - self._first_try = False - self._obj = zlib.decompressobj(-zlib.MAX_WBITS) - try: - return self.decompress(self._data) - finally: - self._data = None - - -class GzipDecoder(object): - - def __init__(self): - self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) - - def __getattr__(self, name): - return getattr(self._obj, name) - - def decompress(self, data): - if not data: - return data - return self._obj.decompress(data) - - -def _get_decoder(mode): - if mode == 'gzip': - return GzipDecoder() - - return DeflateDecoder() - - -class HTTPResponse(io.IOBase): - """ - HTTP Response container. - - Backwards-compatible to httplib's HTTPResponse but the response ``body`` is - loaded and decoded on-demand when the ``data`` property is accessed. This - class is also compatible with the Python standard library's :mod:`io` - module, and can hence be treated as a readable object in the context of that - framework. - - Extra parameters for behaviour not present in httplib.HTTPResponse: - - :param preload_content: - If True, the response's body will be preloaded during construction. - - :param decode_content: - If True, attempts to decode specific content-encoding's based on headers - (like 'gzip' and 'deflate') will be skipped and raw data will be used - instead. - - :param original_response: - When this HTTPResponse wrapper is generated from an httplib.HTTPResponse - object, it's convenient to include the original for debug purposes. It's - otherwise unused. - - :param retries: - The retries contains the last :class:`~urllib3.util.retry.Retry` that - was used during the request. - - :param enforce_content_length: - Enforce content length checking. Body returned by server must match - value of Content-Length header, if present. Otherwise, raise error. - """ - - CONTENT_DECODERS = ['gzip', 'deflate'] - REDIRECT_STATUSES = [301, 302, 303, 307, 308] - - def __init__(self, body='', headers=None, status=0, version=0, reason=None, - strict=0, preload_content=True, decode_content=True, - original_response=None, pool=None, connection=None, - retries=None, enforce_content_length=False, request_method=None): - - if isinstance(headers, HTTPHeaderDict): - self.headers = headers - else: - self.headers = HTTPHeaderDict(headers) - self.status = status - self.version = version - self.reason = reason - self.strict = strict - self.decode_content = decode_content - self.retries = retries - self.enforce_content_length = enforce_content_length - - self._decoder = None - self._body = None - self._fp = None - self._original_response = original_response - self._fp_bytes_read = 0 - - if body and isinstance(body, (basestring, binary_type)): - self._body = body - - self._pool = pool - self._connection = connection - - if hasattr(body, 'read'): - self._fp = body - - # Are we using the chunked-style of transfer encoding? - self.chunked = False - self.chunk_left = None - tr_enc = self.headers.get('transfer-encoding', '').lower() - # Don't incur the penalty of creating a list and then discarding it - encodings = (enc.strip() for enc in tr_enc.split(",")) - if "chunked" in encodings: - self.chunked = True - - # Determine length of response - self.length_remaining = self._init_length(request_method) - - # If requested, preload the body. - if preload_content and not self._body: - self._body = self.read(decode_content=decode_content) - - def get_redirect_location(self): - """ - Should we redirect and where to? - - :returns: Truthy redirect location string if we got a redirect status - code and valid location. ``None`` if redirect status and no - location. ``False`` if not a redirect status code. - """ - if self.status in self.REDIRECT_STATUSES: - return self.headers.get('location') - - return False - - def release_conn(self): - if not self._pool or not self._connection: - return - - self._pool._put_conn(self._connection) - self._connection = None - - @property - def data(self): - # For backwords-compat with earlier urllib3 0.4 and earlier. - if self._body: - return self._body - - if self._fp: - return self.read(cache_content=True) - - @property - def connection(self): - return self._connection - - def tell(self): - """ - Obtain the number of bytes pulled over the wire so far. May differ from - the amount of content returned by :meth:``HTTPResponse.read`` if bytes - are encoded on the wire (e.g, compressed). - """ - return self._fp_bytes_read - - def _init_length(self, request_method): - """ - Set initial length value for Response content if available. - """ - length = self.headers.get('content-length') - - if length is not None and self.chunked: - # This Response will fail with an IncompleteRead if it can't be - # received as chunked. This method falls back to attempt reading - # the response before raising an exception. - log.warning("Received response with both Content-Length and " - "Transfer-Encoding set. This is expressly forbidden " - "by RFC 7230 sec 3.3.2. Ignoring Content-Length and " - "attempting to process response as Transfer-Encoding: " - "chunked.") - return None - - elif length is not None: - try: - # RFC 7230 section 3.3.2 specifies multiple content lengths can - # be sent in a single Content-Length header - # (e.g. Content-Length: 42, 42). This line ensures the values - # are all valid ints and that as long as the `set` length is 1, - # all values are the same. Otherwise, the header is invalid. - lengths = set([int(val) for val in length.split(',')]) - if len(lengths) > 1: - raise InvalidHeader("Content-Length contained multiple " - "unmatching values (%s)" % length) - length = lengths.pop() - except ValueError: - length = None - else: - if length < 0: - length = None - - # Convert status to int for comparison - # In some cases, httplib returns a status of "_UNKNOWN" - try: - status = int(self.status) - except ValueError: - status = 0 - - # Check for responses that shouldn't include a body - if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD': - length = 0 - - return length - - def _init_decoder(self): - """ - Set-up the _decoder attribute if necessary. - """ - # Note: content-encoding value should be case-insensitive, per RFC 7230 - # Section 3.2 - content_encoding = self.headers.get('content-encoding', '').lower() - if self._decoder is None and content_encoding in self.CONTENT_DECODERS: - self._decoder = _get_decoder(content_encoding) - - def _decode(self, data, decode_content, flush_decoder): - """ - Decode the data passed in and potentially flush the decoder. - """ - try: - if decode_content and self._decoder: - data = self._decoder.decompress(data) - except (IOError, zlib.error) as e: - content_encoding = self.headers.get('content-encoding', '').lower() - raise DecodeError( - "Received response with content-encoding: %s, but " - "failed to decode it." % content_encoding, e) - - if flush_decoder and decode_content: - data += self._flush_decoder() - - return data - - def _flush_decoder(self): - """ - Flushes the decoder. Should only be called if the decoder is actually - being used. - """ - if self._decoder: - buf = self._decoder.decompress(b'') - return buf + self._decoder.flush() - - return b'' - - @contextmanager - def _error_catcher(self): - """ - Catch low-level python exceptions, instead re-raising urllib3 - variants, so that low-level exceptions are not leaked in the - high-level api. - - On exit, release the connection back to the pool. - """ - clean_exit = False - - try: - try: - yield - - except SocketTimeout: - # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but - # there is yet no clean way to get at it from this context. - raise ReadTimeoutError(self._pool, None, 'Read timed out.') - - except BaseSSLError as e: - # FIXME: Is there a better way to differentiate between SSLErrors? - if 'read operation timed out' not in str(e): # Defensive: - # This shouldn't happen but just in case we're missing an edge - # case, let's avoid swallowing SSL errors. - raise - - raise ReadTimeoutError(self._pool, None, 'Read timed out.') - - except (HTTPException, SocketError) as e: - # This includes IncompleteRead. - raise ProtocolError('Connection broken: %r' % e, e) - - # If no exception is thrown, we should avoid cleaning up - # unnecessarily. - clean_exit = True - finally: - # If we didn't terminate cleanly, we need to throw away our - # connection. - if not clean_exit: - # The response may not be closed but we're not going to use it - # anymore so close it now to ensure that the connection is - # released back to the pool. - if self._original_response: - self._original_response.close() - - # Closing the response may not actually be sufficient to close - # everything, so if we have a hold of the connection close that - # too. - if self._connection: - self._connection.close() - - # If we hold the original response but it's closed now, we should - # return the connection back to the pool. - if self._original_response and self._original_response.isclosed(): - self.release_conn() - - def read(self, amt=None, decode_content=None, cache_content=False): - """ - Similar to :meth:`httplib.HTTPResponse.read`, but with two additional - parameters: ``decode_content`` and ``cache_content``. - - :param amt: - How much of the content to read. If specified, caching is skipped - because it doesn't make sense to cache partial content as the full - response. - - :param decode_content: - If True, will attempt to decode the body based on the - 'content-encoding' header. - - :param cache_content: - If True, will save the returned data such that the same result is - returned despite of the state of the underlying file object. This - is useful if you want the ``.data`` property to continue working - after having ``.read()`` the file object. (Overridden if ``amt`` is - set.) - """ - self._init_decoder() - if decode_content is None: - decode_content = self.decode_content - - if self._fp is None: - return - - flush_decoder = False - data = None - - with self._error_catcher(): - if amt is None: - # cStringIO doesn't like amt=None - data = self._fp.read() - flush_decoder = True - else: - cache_content = False - data = self._fp.read(amt) - if amt != 0 and not data: # Platform-specific: Buggy versions of Python. - # Close the connection when no data is returned - # - # This is redundant to what httplib/http.client _should_ - # already do. However, versions of python released before - # December 15, 2012 (http://bugs.python.org/issue16298) do - # not properly close the connection in all cases. There is - # no harm in redundantly calling close. - self._fp.close() - flush_decoder = True - if self.enforce_content_length and self.length_remaining not in (0, None): - # This is an edge case that httplib failed to cover due - # to concerns of backward compatibility. We're - # addressing it here to make sure IncompleteRead is - # raised during streaming, so all calls with incorrect - # Content-Length are caught. - raise IncompleteRead(self._fp_bytes_read, self.length_remaining) - - if data: - self._fp_bytes_read += len(data) - if self.length_remaining is not None: - self.length_remaining -= len(data) - - data = self._decode(data, decode_content, flush_decoder) - - if cache_content: - self._body = data - - return data - - def stream(self, amt=2**16, decode_content=None): - """ - A generator wrapper for the read() method. A call will block until - ``amt`` bytes have been read from the connection or until the - connection is closed. - - :param amt: - How much of the content to read. The generator will return up to - much data per iteration, but may return less. This is particularly - likely when using compressed data. However, the empty string will - never be returned. - - :param decode_content: - If True, will attempt to decode the body based on the - 'content-encoding' header. - """ - if self.chunked and self.supports_chunked_reads(): - for line in self.read_chunked(amt, decode_content=decode_content): - yield line - else: - while not is_fp_closed(self._fp): - data = self.read(amt=amt, decode_content=decode_content) - - if data: - yield data - - @classmethod - def from_httplib(ResponseCls, r, **response_kw): - """ - Given an :class:`httplib.HTTPResponse` instance ``r``, return a - corresponding :class:`urllib3.response.HTTPResponse` object. - - Remaining parameters are passed to the HTTPResponse constructor, along - with ``original_response=r``. - """ - headers = r.msg - - if not isinstance(headers, HTTPHeaderDict): - if PY3: # Python 3 - headers = HTTPHeaderDict(headers.items()) - else: # Python 2 - headers = HTTPHeaderDict.from_httplib(headers) - - # HTTPResponse objects in Python 3 don't have a .strict attribute - strict = getattr(r, 'strict', 0) - resp = ResponseCls(body=r, - headers=headers, - status=r.status, - version=r.version, - reason=r.reason, - strict=strict, - original_response=r, - **response_kw) - return resp - - # Backwards-compatibility methods for httplib.HTTPResponse - def getheaders(self): - return self.headers - - def getheader(self, name, default=None): - return self.headers.get(name, default) - - # Overrides from io.IOBase - def close(self): - if not self.closed: - self._fp.close() - - if self._connection: - self._connection.close() - - @property - def closed(self): - if self._fp is None: - return True - elif hasattr(self._fp, 'isclosed'): - return self._fp.isclosed() - elif hasattr(self._fp, 'closed'): - return self._fp.closed - else: - return True - - def fileno(self): - if self._fp is None: - raise IOError("HTTPResponse has no file to get a fileno from") - elif hasattr(self._fp, "fileno"): - return self._fp.fileno() - else: - raise IOError("The file-like object this HTTPResponse is wrapped " - "around has no file descriptor") - - def flush(self): - if self._fp is not None and hasattr(self._fp, 'flush'): - return self._fp.flush() - - def readable(self): - # This method is required for `io` module compatibility. - return True - - def readinto(self, b): - # This method is required for `io` module compatibility. - temp = self.read(len(b)) - if len(temp) == 0: - return 0 - else: - b[:len(temp)] = temp - return len(temp) - - def supports_chunked_reads(self): - """ - Checks if the underlying file-like object looks like a - httplib.HTTPResponse object. We do this by testing for the fp - attribute. If it is present we assume it returns raw chunks as - processed by read_chunked(). - """ - return hasattr(self._fp, 'fp') - - def _update_chunk_length(self): - # First, we'll figure out length of a chunk and then - # we'll try to read it from socket. - if self.chunk_left is not None: - return - line = self._fp.fp.readline() - line = line.split(b';', 1)[0] - try: - self.chunk_left = int(line, 16) - except ValueError: - # Invalid chunked protocol response, abort. - self.close() - raise httplib.IncompleteRead(line) - - def _handle_chunk(self, amt): - returned_chunk = None - if amt is None: - chunk = self._fp._safe_read(self.chunk_left) - returned_chunk = chunk - self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. - self.chunk_left = None - elif amt < self.chunk_left: - value = self._fp._safe_read(amt) - self.chunk_left = self.chunk_left - amt - returned_chunk = value - elif amt == self.chunk_left: - value = self._fp._safe_read(amt) - self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. - self.chunk_left = None - returned_chunk = value - else: # amt > self.chunk_left - returned_chunk = self._fp._safe_read(self.chunk_left) - self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. - self.chunk_left = None - return returned_chunk - - def read_chunked(self, amt=None, decode_content=None): - """ - Similar to :meth:`HTTPResponse.read`, but with an additional - parameter: ``decode_content``. - - :param decode_content: - If True, will attempt to decode the body based on the - 'content-encoding' header. - """ - self._init_decoder() - # FIXME: Rewrite this method and make it a class with a better structured logic. - if not self.chunked: - raise ResponseNotChunked( - "Response is not chunked. " - "Header 'transfer-encoding: chunked' is missing.") - if not self.supports_chunked_reads(): - raise BodyNotHttplibCompatible( - "Body should be httplib.HTTPResponse like. " - "It should have have an fp attribute which returns raw chunks.") - - # Don't bother reading the body of a HEAD request. - if self._original_response and is_response_to_head(self._original_response): - self._original_response.close() - return - - with self._error_catcher(): - while True: - self._update_chunk_length() - if self.chunk_left == 0: - break - chunk = self._handle_chunk(amt) - decoded = self._decode(chunk, decode_content=decode_content, - flush_decoder=False) - if decoded: - yield decoded - - if decode_content: - # On CPython and PyPy, we should never need to flush the - # decoder. However, on Jython we *might* need to, so - # lets defensively do it anyway. - decoded = self._flush_decoder() - if decoded: # Platform-specific: Jython. - yield decoded - - # Chunk content ends with \r\n: discard it. - while True: - line = self._fp.fp.readline() - if not line: - # Some sites may not end with '\r\n'. - break - if line == b'\r\n': - break - - # We read everything; close the "file". - if self._original_response: - self._original_response.close() diff --git a/python.d/python_modules/urllib3/util/__init__.py b/python.d/python_modules/urllib3/util/__init__.py deleted file mode 100644 index 2f2770b62..000000000 --- a/python.d/python_modules/urllib3/util/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -from __future__ import absolute_import -# For backwards compatibility, provide imports that used to be here. -from .connection import is_connection_dropped -from .request import make_headers -from .response import is_fp_closed -from .ssl_ import ( - SSLContext, - HAS_SNI, - IS_PYOPENSSL, - IS_SECURETRANSPORT, - assert_fingerprint, - resolve_cert_reqs, - resolve_ssl_version, - ssl_wrap_socket, -) -from .timeout import ( - current_time, - Timeout, -) - -from .retry import Retry -from .url import ( - get_host, - parse_url, - split_first, - Url, -) -from .wait import ( - wait_for_read, - wait_for_write -) - -__all__ = ( - 'HAS_SNI', - 'IS_PYOPENSSL', - 'IS_SECURETRANSPORT', - 'SSLContext', - 'Retry', - 'Timeout', - 'Url', - 'assert_fingerprint', - 'current_time', - 'is_connection_dropped', - 'is_fp_closed', - 'get_host', - 'parse_url', - 'make_headers', - 'resolve_cert_reqs', - 'resolve_ssl_version', - 'split_first', - 'ssl_wrap_socket', - 'wait_for_read', - 'wait_for_write' -) diff --git a/python.d/python_modules/urllib3/util/connection.py b/python.d/python_modules/urllib3/util/connection.py deleted file mode 100644 index bf699cfd0..000000000 --- a/python.d/python_modules/urllib3/util/connection.py +++ /dev/null @@ -1,130 +0,0 @@ -from __future__ import absolute_import -import socket -from .wait import wait_for_read -from .selectors import HAS_SELECT, SelectorError - - -def is_connection_dropped(conn): # Platform-specific - """ - Returns True if the connection is dropped and should be closed. - - :param conn: - :class:`httplib.HTTPConnection` object. - - Note: For platforms like AppEngine, this will always return ``False`` to - let the platform handle connection recycling transparently for us. - """ - sock = getattr(conn, 'sock', False) - if sock is False: # Platform-specific: AppEngine - return False - if sock is None: # Connection already closed (such as by httplib). - return True - - if not HAS_SELECT: - return False - - try: - return bool(wait_for_read(sock, timeout=0.0)) - except SelectorError: - return True - - -# This function is copied from socket.py in the Python 2.7 standard -# library test suite. Added to its signature is only `socket_options`. -# One additional modification is that we avoid binding to IPv6 servers -# discovered in DNS if the system doesn't have IPv6 functionality. -def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - source_address=None, socket_options=None): - """Connect to *address* and return the socket object. - - Convenience function. Connect to *address* (a 2-tuple ``(host, - port)``) and return the socket object. Passing the optional - *timeout* parameter will set the timeout on the socket instance - before attempting to connect. If no *timeout* is supplied, the - global default timeout setting returned by :func:`getdefaulttimeout` - is used. If *source_address* is set it must be a tuple of (host, port) - for the socket to bind as a source address before making the connection. - An host of '' or port 0 tells the OS to use the default. - """ - - host, port = address - if host.startswith('['): - host = host.strip('[]') - err = None - - # Using the value from allowed_gai_family() in the context of getaddrinfo lets - # us select whether to work with IPv4 DNS records, IPv6 records, or both. - # The original create_connection function always returns all records. - family = allowed_gai_family() - - for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): - af, socktype, proto, canonname, sa = res - sock = None - try: - sock = socket.socket(af, socktype, proto) - - # If provided, set socket level options before connecting. - _set_socket_options(sock, socket_options) - - if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: - sock.settimeout(timeout) - if source_address: - sock.bind(source_address) - sock.connect(sa) - return sock - - except socket.error as e: - err = e - if sock is not None: - sock.close() - sock = None - - if err is not None: - raise err - - raise socket.error("getaddrinfo returns an empty list") - - -def _set_socket_options(sock, options): - if options is None: - return - - for opt in options: - sock.setsockopt(*opt) - - -def allowed_gai_family(): - """This function is designed to work in the context of - getaddrinfo, where family=socket.AF_UNSPEC is the default and - will perform a DNS search for both IPv6 and IPv4 records.""" - - family = socket.AF_INET - if HAS_IPV6: - family = socket.AF_UNSPEC - return family - - -def _has_ipv6(host): - """ Returns True if the system can bind an IPv6 address. """ - sock = None - has_ipv6 = False - - if socket.has_ipv6: - # has_ipv6 returns true if cPython was compiled with IPv6 support. - # It does not tell us if the system has IPv6 support enabled. To - # determine that we must bind to an IPv6 address. - # https://github.com/shazow/urllib3/pull/611 - # https://bugs.python.org/issue658327 - try: - sock = socket.socket(socket.AF_INET6) - sock.bind((host, 0)) - has_ipv6 = True - except Exception: - pass - - if sock: - sock.close() - return has_ipv6 - - -HAS_IPV6 = _has_ipv6('::1') diff --git a/python.d/python_modules/urllib3/util/request.py b/python.d/python_modules/urllib3/util/request.py deleted file mode 100644 index 3ddfcd559..000000000 --- a/python.d/python_modules/urllib3/util/request.py +++ /dev/null @@ -1,118 +0,0 @@ -from __future__ import absolute_import -from base64 import b64encode - -from ..packages.six import b, integer_types -from ..exceptions import UnrewindableBodyError - -ACCEPT_ENCODING = 'gzip,deflate' -_FAILEDTELL = object() - - -def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, - basic_auth=None, proxy_basic_auth=None, disable_cache=None): - """ - Shortcuts for generating request headers. - - :param keep_alive: - If ``True``, adds 'connection: keep-alive' header. - - :param accept_encoding: - Can be a boolean, list, or string. - ``True`` translates to 'gzip,deflate'. - List will get joined by comma. - String will be used as provided. - - :param user_agent: - String representing the user-agent you want, such as - "python-urllib3/0.6" - - :param basic_auth: - Colon-separated username:password string for 'authorization: basic ...' - auth header. - - :param proxy_basic_auth: - Colon-separated username:password string for 'proxy-authorization: basic ...' - auth header. - - :param disable_cache: - If ``True``, adds 'cache-control: no-cache' header. - - Example:: - - >>> make_headers(keep_alive=True, user_agent="Batman/1.0") - {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'} - >>> make_headers(accept_encoding=True) - {'accept-encoding': 'gzip,deflate'} - """ - headers = {} - if accept_encoding: - if isinstance(accept_encoding, str): - pass - elif isinstance(accept_encoding, list): - accept_encoding = ','.join(accept_encoding) - else: - accept_encoding = ACCEPT_ENCODING - headers['accept-encoding'] = accept_encoding - - if user_agent: - headers['user-agent'] = user_agent - - if keep_alive: - headers['connection'] = 'keep-alive' - - if basic_auth: - headers['authorization'] = 'Basic ' + \ - b64encode(b(basic_auth)).decode('utf-8') - - if proxy_basic_auth: - headers['proxy-authorization'] = 'Basic ' + \ - b64encode(b(proxy_basic_auth)).decode('utf-8') - - if disable_cache: - headers['cache-control'] = 'no-cache' - - return headers - - -def set_file_position(body, pos): - """ - If a position is provided, move file to that point. - Otherwise, we'll attempt to record a position for future use. - """ - if pos is not None: - rewind_body(body, pos) - elif getattr(body, 'tell', None) is not None: - try: - pos = body.tell() - except (IOError, OSError): - # This differentiates from None, allowing us to catch - # a failed `tell()` later when trying to rewind the body. - pos = _FAILEDTELL - - return pos - - -def rewind_body(body, body_pos): - """ - Attempt to rewind body to a certain position. - Primarily used for request redirects and retries. - - :param body: - File-like object that supports seek. - - :param int pos: - Position to seek to in file. - """ - body_seek = getattr(body, 'seek', None) - if body_seek is not None and isinstance(body_pos, integer_types): - try: - body_seek(body_pos) - except (IOError, OSError): - raise UnrewindableBodyError("An error occurred when rewinding request " - "body for redirect/retry.") - elif body_pos is _FAILEDTELL: - raise UnrewindableBodyError("Unable to record file position for rewinding " - "request body during a redirect/retry.") - else: - raise ValueError("body_pos must be of type integer, " - "instead it was %s." % type(body_pos)) diff --git a/python.d/python_modules/urllib3/util/response.py b/python.d/python_modules/urllib3/util/response.py deleted file mode 100644 index 67cf730ab..000000000 --- a/python.d/python_modules/urllib3/util/response.py +++ /dev/null @@ -1,81 +0,0 @@ -from __future__ import absolute_import -from ..packages.six.moves import http_client as httplib - -from ..exceptions import HeaderParsingError - - -def is_fp_closed(obj): - """ - Checks whether a given file-like object is closed. - - :param obj: - The file-like object to check. - """ - - try: - # Check `isclosed()` first, in case Python3 doesn't set `closed`. - # GH Issue #928 - return obj.isclosed() - except AttributeError: - pass - - try: - # Check via the official file-like-object way. - return obj.closed - except AttributeError: - pass - - try: - # Check if the object is a container for another file-like object that - # gets released on exhaustion (e.g. HTTPResponse). - return obj.fp is None - except AttributeError: - pass - - raise ValueError("Unable to determine whether fp is closed.") - - -def assert_header_parsing(headers): - """ - Asserts whether all headers have been successfully parsed. - Extracts encountered errors from the result of parsing headers. - - Only works on Python 3. - - :param headers: Headers to verify. - :type headers: `httplib.HTTPMessage`. - - :raises urllib3.exceptions.HeaderParsingError: - If parsing errors are found. - """ - - # This will fail silently if we pass in the wrong kind of parameter. - # To make debugging easier add an explicit check. - if not isinstance(headers, httplib.HTTPMessage): - raise TypeError('expected httplib.Message, got {0}.'.format( - type(headers))) - - defects = getattr(headers, 'defects', None) - get_payload = getattr(headers, 'get_payload', None) - - unparsed_data = None - if get_payload: # Platform-specific: Python 3. - unparsed_data = get_payload() - - if defects or unparsed_data: - raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data) - - -def is_response_to_head(response): - """ - Checks whether the request of a response has been a HEAD-request. - Handles the quirks of AppEngine. - - :param conn: - :type conn: :class:`httplib.HTTPResponse` - """ - # FIXME: Can we do this somehow without accessing private httplib _method? - method = response._method - if isinstance(method, int): # Platform-specific: Appengine - return method == 3 - return method.upper() == 'HEAD' diff --git a/python.d/python_modules/urllib3/util/retry.py b/python.d/python_modules/urllib3/util/retry.py deleted file mode 100644 index c603cb490..000000000 --- a/python.d/python_modules/urllib3/util/retry.py +++ /dev/null @@ -1,401 +0,0 @@ -from __future__ import absolute_import -import time -import logging -from collections import namedtuple -from itertools import takewhile -import email -import re - -from ..exceptions import ( - ConnectTimeoutError, - MaxRetryError, - ProtocolError, - ReadTimeoutError, - ResponseError, - InvalidHeader, -) -from ..packages import six - - -log = logging.getLogger(__name__) - -# Data structure for representing the metadata of requests that result in a retry. -RequestHistory = namedtuple('RequestHistory', ["method", "url", "error", - "status", "redirect_location"]) - - -class Retry(object): - """ Retry configuration. - - Each retry attempt will create a new Retry object with updated values, so - they can be safely reused. - - Retries can be defined as a default for a pool:: - - retries = Retry(connect=5, read=2, redirect=5) - http = PoolManager(retries=retries) - response = http.request('GET', 'http://example.com/') - - Or per-request (which overrides the default for the pool):: - - response = http.request('GET', 'http://example.com/', retries=Retry(10)) - - Retries can be disabled by passing ``False``:: - - response = http.request('GET', 'http://example.com/', retries=False) - - Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless - retries are disabled, in which case the causing exception will be raised. - - :param int total: - Total number of retries to allow. Takes precedence over other counts. - - Set to ``None`` to remove this constraint and fall back on other - counts. It's a good idea to set this to some sensibly-high value to - account for unexpected edge cases and avoid infinite retry loops. - - Set to ``0`` to fail on the first retry. - - Set to ``False`` to disable and imply ``raise_on_redirect=False``. - - :param int connect: - How many connection-related errors to retry on. - - These are errors raised before the request is sent to the remote server, - which we assume has not triggered the server to process the request. - - Set to ``0`` to fail on the first retry of this type. - - :param int read: - How many times to retry on read errors. - - These errors are raised after the request was sent to the server, so the - request may have side-effects. - - Set to ``0`` to fail on the first retry of this type. - - :param int redirect: - How many redirects to perform. Limit this to avoid infinite redirect - loops. - - A redirect is a HTTP response with a status code 301, 302, 303, 307 or - 308. - - Set to ``0`` to fail on the first retry of this type. - - Set to ``False`` to disable and imply ``raise_on_redirect=False``. - - :param int status: - How many times to retry on bad status codes. - - These are retries made on responses, where status code matches - ``status_forcelist``. - - Set to ``0`` to fail on the first retry of this type. - - :param iterable method_whitelist: - Set of uppercased HTTP method verbs that we should retry on. - - By default, we only retry on methods which are considered to be - idempotent (multiple requests with the same parameters end with the - same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`. - - Set to a ``False`` value to retry on any verb. - - :param iterable status_forcelist: - A set of integer HTTP status codes that we should force a retry on. - A retry is initiated if the request method is in ``method_whitelist`` - and the response status code is in ``status_forcelist``. - - By default, this is disabled with ``None``. - - :param float backoff_factor: - A backoff factor to apply between attempts after the second try - (most errors are resolved immediately by a second try without a - delay). urllib3 will sleep for:: - - {backoff factor} * (2 ^ ({number of total retries} - 1)) - - seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep - for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer - than :attr:`Retry.BACKOFF_MAX`. - - By default, backoff is disabled (set to 0). - - :param bool raise_on_redirect: Whether, if the number of redirects is - exhausted, to raise a MaxRetryError, or to return a response with a - response code in the 3xx range. - - :param bool raise_on_status: Similar meaning to ``raise_on_redirect``: - whether we should raise an exception, or return a response, - if status falls in ``status_forcelist`` range and retries have - been exhausted. - - :param tuple history: The history of the request encountered during - each call to :meth:`~Retry.increment`. The list is in the order - the requests occurred. Each list item is of class :class:`RequestHistory`. - - :param bool respect_retry_after_header: - Whether to respect Retry-After header on status codes defined as - :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not. - - """ - - DEFAULT_METHOD_WHITELIST = frozenset([ - 'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE']) - - RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503]) - - #: Maximum backoff time. - BACKOFF_MAX = 120 - - def __init__(self, total=10, connect=None, read=None, redirect=None, status=None, - method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None, - backoff_factor=0, raise_on_redirect=True, raise_on_status=True, - history=None, respect_retry_after_header=True): - - self.total = total - self.connect = connect - self.read = read - self.status = status - - if redirect is False or total is False: - redirect = 0 - raise_on_redirect = False - - self.redirect = redirect - self.status_forcelist = status_forcelist or set() - self.method_whitelist = method_whitelist - self.backoff_factor = backoff_factor - self.raise_on_redirect = raise_on_redirect - self.raise_on_status = raise_on_status - self.history = history or tuple() - self.respect_retry_after_header = respect_retry_after_header - - def new(self, **kw): - params = dict( - total=self.total, - connect=self.connect, read=self.read, redirect=self.redirect, status=self.status, - method_whitelist=self.method_whitelist, - status_forcelist=self.status_forcelist, - backoff_factor=self.backoff_factor, - raise_on_redirect=self.raise_on_redirect, - raise_on_status=self.raise_on_status, - history=self.history, - ) - params.update(kw) - return type(self)(**params) - - @classmethod - def from_int(cls, retries, redirect=True, default=None): - """ Backwards-compatibility for the old retries format.""" - if retries is None: - retries = default if default is not None else cls.DEFAULT - - if isinstance(retries, Retry): - return retries - - redirect = bool(redirect) and None - new_retries = cls(retries, redirect=redirect) - log.debug("Converted retries value: %r -> %r", retries, new_retries) - return new_retries - - def get_backoff_time(self): - """ Formula for computing the current backoff - - :rtype: float - """ - # We want to consider only the last consecutive errors sequence (Ignore redirects). - consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None, - reversed(self.history)))) - if consecutive_errors_len <= 1: - return 0 - - backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1)) - return min(self.BACKOFF_MAX, backoff_value) - - def parse_retry_after(self, retry_after): - # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4 - if re.match(r"^\s*[0-9]+\s*$", retry_after): - seconds = int(retry_after) - else: - retry_date_tuple = email.utils.parsedate(retry_after) - if retry_date_tuple is None: - raise InvalidHeader("Invalid Retry-After header: %s" % retry_after) - retry_date = time.mktime(retry_date_tuple) - seconds = retry_date - time.time() - - if seconds < 0: - seconds = 0 - - return seconds - - def get_retry_after(self, response): - """ Get the value of Retry-After in seconds. """ - - retry_after = response.getheader("Retry-After") - - if retry_after is None: - return None - - return self.parse_retry_after(retry_after) - - def sleep_for_retry(self, response=None): - retry_after = self.get_retry_after(response) - if retry_after: - time.sleep(retry_after) - return True - - return False - - def _sleep_backoff(self): - backoff = self.get_backoff_time() - if backoff <= 0: - return - time.sleep(backoff) - - def sleep(self, response=None): - """ Sleep between retry attempts. - - This method will respect a server's ``Retry-After`` response header - and sleep the duration of the time requested. If that is not present, it - will use an exponential backoff. By default, the backoff factor is 0 and - this method will return immediately. - """ - - if response: - slept = self.sleep_for_retry(response) - if slept: - return - - self._sleep_backoff() - - def _is_connection_error(self, err): - """ Errors when we're fairly sure that the server did not receive the - request, so it should be safe to retry. - """ - return isinstance(err, ConnectTimeoutError) - - def _is_read_error(self, err): - """ Errors that occur after the request has been started, so we should - assume that the server began processing it. - """ - return isinstance(err, (ReadTimeoutError, ProtocolError)) - - def _is_method_retryable(self, method): - """ Checks if a given HTTP method should be retried upon, depending if - it is included on the method whitelist. - """ - if self.method_whitelist and method.upper() not in self.method_whitelist: - return False - - return True - - def is_retry(self, method, status_code, has_retry_after=False): - """ Is this method/status code retryable? (Based on whitelists and control - variables such as the number of total retries to allow, whether to - respect the Retry-After header, whether this header is present, and - whether the returned status code is on the list of status codes to - be retried upon on the presence of the aforementioned header) - """ - if not self._is_method_retryable(method): - return False - - if self.status_forcelist and status_code in self.status_forcelist: - return True - - return (self.total and self.respect_retry_after_header and - has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES)) - - def is_exhausted(self): - """ Are we out of retries? """ - retry_counts = (self.total, self.connect, self.read, self.redirect, self.status) - retry_counts = list(filter(None, retry_counts)) - if not retry_counts: - return False - - return min(retry_counts) < 0 - - def increment(self, method=None, url=None, response=None, error=None, - _pool=None, _stacktrace=None): - """ Return a new Retry object with incremented retry counters. - - :param response: A response object, or None, if the server did not - return a response. - :type response: :class:`~urllib3.response.HTTPResponse` - :param Exception error: An error encountered during the request, or - None if the response was received successfully. - - :return: A new ``Retry`` object. - """ - if self.total is False and error: - # Disabled, indicate to re-raise the error. - raise six.reraise(type(error), error, _stacktrace) - - total = self.total - if total is not None: - total -= 1 - - connect = self.connect - read = self.read - redirect = self.redirect - status_count = self.status - cause = 'unknown' - status = None - redirect_location = None - - if error and self._is_connection_error(error): - # Connect retry? - if connect is False: - raise six.reraise(type(error), error, _stacktrace) - elif connect is not None: - connect -= 1 - - elif error and self._is_read_error(error): - # Read retry? - if read is False or not self._is_method_retryable(method): - raise six.reraise(type(error), error, _stacktrace) - elif read is not None: - read -= 1 - - elif response and response.get_redirect_location(): - # Redirect retry? - if redirect is not None: - redirect -= 1 - cause = 'too many redirects' - redirect_location = response.get_redirect_location() - status = response.status - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the whitelist - cause = ResponseError.GENERIC_ERROR - if response and response.status: - if status_count is not None: - status_count -= 1 - cause = ResponseError.SPECIFIC_ERROR.format( - status_code=response.status) - status = response.status - - history = self.history + (RequestHistory(method, url, error, status, redirect_location),) - - new_retry = self.new( - total=total, - connect=connect, read=read, redirect=redirect, status=status_count, - history=history) - - if new_retry.is_exhausted(): - raise MaxRetryError(_pool, url, error or ResponseError(cause)) - - log.debug("Incremented Retry for (url='%s'): %r", url, new_retry) - - return new_retry - - def __repr__(self): - return ('{cls.__name__}(total={self.total}, connect={self.connect}, ' - 'read={self.read}, redirect={self.redirect}, status={self.status})').format( - cls=type(self), self=self) - - -# For backwards compatibility (equivalent to pre-v1.9): -Retry.DEFAULT = Retry(3) diff --git a/python.d/python_modules/urllib3/util/selectors.py b/python.d/python_modules/urllib3/util/selectors.py deleted file mode 100644 index d75cb266b..000000000 --- a/python.d/python_modules/urllib3/util/selectors.py +++ /dev/null @@ -1,581 +0,0 @@ -# Backport of selectors.py from Python 3.5+ to support Python < 3.4 -# Also has the behavior specified in PEP 475 which is to retry syscalls -# in the case of an EINTR error. This module is required because selectors34 -# does not follow this behavior and instead returns that no dile descriptor -# events have occurred rather than retry the syscall. The decision to drop -# support for select.devpoll is made to maintain 100% test coverage. - -import errno -import math -import select -import socket -import sys -import time -from collections import namedtuple, Mapping - -try: - monotonic = time.monotonic -except (AttributeError, ImportError): # Python 3.3< - monotonic = time.time - -EVENT_READ = (1 << 0) -EVENT_WRITE = (1 << 1) - -HAS_SELECT = True # Variable that shows whether the platform has a selector. -_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None. -_DEFAULT_SELECTOR = None - - -class SelectorError(Exception): - def __init__(self, errcode): - super(SelectorError, self).__init__() - self.errno = errcode - - def __repr__(self): - return "<SelectorError errno={0}>".format(self.errno) - - def __str__(self): - return self.__repr__() - - -def _fileobj_to_fd(fileobj): - """ Return a file descriptor from a file object. If - given an integer will simply return that integer back. """ - if isinstance(fileobj, int): - fd = fileobj - else: - try: - fd = int(fileobj.fileno()) - except (AttributeError, TypeError, ValueError): - raise ValueError("Invalid file object: {0!r}".format(fileobj)) - if fd < 0: - raise ValueError("Invalid file descriptor: {0}".format(fd)) - return fd - - -# Determine which function to use to wrap system calls because Python 3.5+ -# already handles the case when system calls are interrupted. -if sys.version_info >= (3, 5): - def _syscall_wrapper(func, _, *args, **kwargs): - """ This is the short-circuit version of the below logic - because in Python 3.5+ all system calls automatically restart - and recalculate their timeouts. """ - try: - return func(*args, **kwargs) - except (OSError, IOError, select.error) as e: - errcode = None - if hasattr(e, "errno"): - errcode = e.errno - raise SelectorError(errcode) -else: - def _syscall_wrapper(func, recalc_timeout, *args, **kwargs): - """ Wrapper function for syscalls that could fail due to EINTR. - All functions should be retried if there is time left in the timeout - in accordance with PEP 475. """ - timeout = kwargs.get("timeout", None) - if timeout is None: - expires = None - recalc_timeout = False - else: - timeout = float(timeout) - if timeout < 0.0: # Timeout less than 0 treated as no timeout. - expires = None - else: - expires = monotonic() + timeout - - args = list(args) - if recalc_timeout and "timeout" not in kwargs: - raise ValueError( - "Timeout must be in args or kwargs to be recalculated") - - result = _SYSCALL_SENTINEL - while result is _SYSCALL_SENTINEL: - try: - result = func(*args, **kwargs) - # OSError is thrown by select.select - # IOError is thrown by select.epoll.poll - # select.error is thrown by select.poll.poll - # Aren't we thankful for Python 3.x rework for exceptions? - except (OSError, IOError, select.error) as e: - # select.error wasn't a subclass of OSError in the past. - errcode = None - if hasattr(e, "errno"): - errcode = e.errno - elif hasattr(e, "args"): - errcode = e.args[0] - - # Also test for the Windows equivalent of EINTR. - is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and - errcode == errno.WSAEINTR)) - - if is_interrupt: - if expires is not None: - current_time = monotonic() - if current_time > expires: - raise OSError(errno=errno.ETIMEDOUT) - if recalc_timeout: - if "timeout" in kwargs: - kwargs["timeout"] = expires - current_time - continue - if errcode: - raise SelectorError(errcode) - else: - raise - return result - - -SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data']) - - -class _SelectorMapping(Mapping): - """ Mapping of file objects to selector keys """ - - def __init__(self, selector): - self._selector = selector - - def __len__(self): - return len(self._selector._fd_to_key) - - def __getitem__(self, fileobj): - try: - fd = self._selector._fileobj_lookup(fileobj) - return self._selector._fd_to_key[fd] - except KeyError: - raise KeyError("{0!r} is not registered.".format(fileobj)) - - def __iter__(self): - return iter(self._selector._fd_to_key) - - -class BaseSelector(object): - """ Abstract Selector class - - A selector supports registering file objects to be monitored - for specific I/O events. - - A file object is a file descriptor or any object with a - `fileno()` method. An arbitrary object can be attached to the - file object which can be used for example to store context info, - a callback, etc. - - A selector can use various implementations (select(), poll(), epoll(), - and kqueue()) depending on the platform. The 'DefaultSelector' class uses - the most efficient implementation for the current platform. - """ - def __init__(self): - # Maps file descriptors to keys. - self._fd_to_key = {} - - # Read-only mapping returned by get_map() - self._map = _SelectorMapping(self) - - def _fileobj_lookup(self, fileobj): - """ Return a file descriptor from a file object. - This wraps _fileobj_to_fd() to do an exhaustive - search in case the object is invalid but we still - have it in our map. Used by unregister() so we can - unregister an object that was previously registered - even if it is closed. It is also used by _SelectorMapping - """ - try: - return _fileobj_to_fd(fileobj) - except ValueError: - - # Search through all our mapped keys. - for key in self._fd_to_key.values(): - if key.fileobj is fileobj: - return key.fd - - # Raise ValueError after all. - raise - - def register(self, fileobj, events, data=None): - """ Register a file object for a set of events to monitor. """ - if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)): - raise ValueError("Invalid events: {0!r}".format(events)) - - key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data) - - if key.fd in self._fd_to_key: - raise KeyError("{0!r} (FD {1}) is already registered" - .format(fileobj, key.fd)) - - self._fd_to_key[key.fd] = key - return key - - def unregister(self, fileobj): - """ Unregister a file object from being monitored. """ - try: - key = self._fd_to_key.pop(self._fileobj_lookup(fileobj)) - except KeyError: - raise KeyError("{0!r} is not registered".format(fileobj)) - - # Getting the fileno of a closed socket on Windows errors with EBADF. - except socket.error as e: # Platform-specific: Windows. - if e.errno != errno.EBADF: - raise - else: - for key in self._fd_to_key.values(): - if key.fileobj is fileobj: - self._fd_to_key.pop(key.fd) - break - else: - raise KeyError("{0!r} is not registered".format(fileobj)) - return key - - def modify(self, fileobj, events, data=None): - """ Change a registered file object monitored events and data. """ - # NOTE: Some subclasses optimize this operation even further. - try: - key = self._fd_to_key[self._fileobj_lookup(fileobj)] - except KeyError: - raise KeyError("{0!r} is not registered".format(fileobj)) - - if events != key.events: - self.unregister(fileobj) - key = self.register(fileobj, events, data) - - elif data != key.data: - # Use a shortcut to update the data. - key = key._replace(data=data) - self._fd_to_key[key.fd] = key - - return key - - def select(self, timeout=None): - """ Perform the actual selection until some monitored file objects - are ready or the timeout expires. """ - raise NotImplementedError() - - def close(self): - """ Close the selector. This must be called to ensure that all - underlying resources are freed. """ - self._fd_to_key.clear() - self._map = None - - def get_key(self, fileobj): - """ Return the key associated with a registered file object. """ - mapping = self.get_map() - if mapping is None: - raise RuntimeError("Selector is closed") - try: - return mapping[fileobj] - except KeyError: - raise KeyError("{0!r} is not registered".format(fileobj)) - - def get_map(self): - """ Return a mapping of file objects to selector keys """ - return self._map - - def _key_from_fd(self, fd): - """ Return the key associated to a given file descriptor - Return None if it is not found. """ - try: - return self._fd_to_key[fd] - except KeyError: - return None - - def __enter__(self): - return self - - def __exit__(self, *args): - self.close() - - -# Almost all platforms have select.select() -if hasattr(select, "select"): - class SelectSelector(BaseSelector): - """ Select-based selector. """ - def __init__(self): - super(SelectSelector, self).__init__() - self._readers = set() - self._writers = set() - - def register(self, fileobj, events, data=None): - key = super(SelectSelector, self).register(fileobj, events, data) - if events & EVENT_READ: - self._readers.add(key.fd) - if events & EVENT_WRITE: - self._writers.add(key.fd) - return key - - def unregister(self, fileobj): - key = super(SelectSelector, self).unregister(fileobj) - self._readers.discard(key.fd) - self._writers.discard(key.fd) - return key - - def _select(self, r, w, timeout=None): - """ Wrapper for select.select because timeout is a positional arg """ - return select.select(r, w, [], timeout) - - def select(self, timeout=None): - # Selecting on empty lists on Windows errors out. - if not len(self._readers) and not len(self._writers): - return [] - - timeout = None if timeout is None else max(timeout, 0.0) - ready = [] - r, w, _ = _syscall_wrapper(self._select, True, self._readers, - self._writers, timeout) - r = set(r) - w = set(w) - for fd in r | w: - events = 0 - if fd in r: - events |= EVENT_READ - if fd in w: - events |= EVENT_WRITE - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - return ready - - -if hasattr(select, "poll"): - class PollSelector(BaseSelector): - """ Poll-based selector """ - def __init__(self): - super(PollSelector, self).__init__() - self._poll = select.poll() - - def register(self, fileobj, events, data=None): - key = super(PollSelector, self).register(fileobj, events, data) - event_mask = 0 - if events & EVENT_READ: - event_mask |= select.POLLIN - if events & EVENT_WRITE: - event_mask |= select.POLLOUT - self._poll.register(key.fd, event_mask) - return key - - def unregister(self, fileobj): - key = super(PollSelector, self).unregister(fileobj) - self._poll.unregister(key.fd) - return key - - def _wrap_poll(self, timeout=None): - """ Wrapper function for select.poll.poll() so that - _syscall_wrapper can work with only seconds. """ - if timeout is not None: - if timeout <= 0: - timeout = 0 - else: - # select.poll.poll() has a resolution of 1 millisecond, - # round away from zero to wait *at least* timeout seconds. - timeout = math.ceil(timeout * 1e3) - - result = self._poll.poll(timeout) - return result - - def select(self, timeout=None): - ready = [] - fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout) - for fd, event_mask in fd_events: - events = 0 - if event_mask & ~select.POLLIN: - events |= EVENT_WRITE - if event_mask & ~select.POLLOUT: - events |= EVENT_READ - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - - return ready - - -if hasattr(select, "epoll"): - class EpollSelector(BaseSelector): - """ Epoll-based selector """ - def __init__(self): - super(EpollSelector, self).__init__() - self._epoll = select.epoll() - - def fileno(self): - return self._epoll.fileno() - - def register(self, fileobj, events, data=None): - key = super(EpollSelector, self).register(fileobj, events, data) - events_mask = 0 - if events & EVENT_READ: - events_mask |= select.EPOLLIN - if events & EVENT_WRITE: - events_mask |= select.EPOLLOUT - _syscall_wrapper(self._epoll.register, False, key.fd, events_mask) - return key - - def unregister(self, fileobj): - key = super(EpollSelector, self).unregister(fileobj) - try: - _syscall_wrapper(self._epoll.unregister, False, key.fd) - except SelectorError: - # This can occur when the fd was closed since registry. - pass - return key - - def select(self, timeout=None): - if timeout is not None: - if timeout <= 0: - timeout = 0.0 - else: - # select.epoll.poll() has a resolution of 1 millisecond - # but luckily takes seconds so we don't need a wrapper - # like PollSelector. Just for better rounding. - timeout = math.ceil(timeout * 1e3) * 1e-3 - timeout = float(timeout) - else: - timeout = -1.0 # epoll.poll() must have a float. - - # We always want at least 1 to ensure that select can be called - # with no file descriptors registered. Otherwise will fail. - max_events = max(len(self._fd_to_key), 1) - - ready = [] - fd_events = _syscall_wrapper(self._epoll.poll, True, - timeout=timeout, - maxevents=max_events) - for fd, event_mask in fd_events: - events = 0 - if event_mask & ~select.EPOLLIN: - events |= EVENT_WRITE - if event_mask & ~select.EPOLLOUT: - events |= EVENT_READ - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - return ready - - def close(self): - self._epoll.close() - super(EpollSelector, self).close() - - -if hasattr(select, "kqueue"): - class KqueueSelector(BaseSelector): - """ Kqueue / Kevent-based selector """ - def __init__(self): - super(KqueueSelector, self).__init__() - self._kqueue = select.kqueue() - - def fileno(self): - return self._kqueue.fileno() - - def register(self, fileobj, events, data=None): - key = super(KqueueSelector, self).register(fileobj, events, data) - if events & EVENT_READ: - kevent = select.kevent(key.fd, - select.KQ_FILTER_READ, - select.KQ_EV_ADD) - - _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) - - if events & EVENT_WRITE: - kevent = select.kevent(key.fd, - select.KQ_FILTER_WRITE, - select.KQ_EV_ADD) - - _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) - - return key - - def unregister(self, fileobj): - key = super(KqueueSelector, self).unregister(fileobj) - if key.events & EVENT_READ: - kevent = select.kevent(key.fd, - select.KQ_FILTER_READ, - select.KQ_EV_DELETE) - try: - _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) - except SelectorError: - pass - if key.events & EVENT_WRITE: - kevent = select.kevent(key.fd, - select.KQ_FILTER_WRITE, - select.KQ_EV_DELETE) - try: - _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) - except SelectorError: - pass - - return key - - def select(self, timeout=None): - if timeout is not None: - timeout = max(timeout, 0) - - max_events = len(self._fd_to_key) * 2 - ready_fds = {} - - kevent_list = _syscall_wrapper(self._kqueue.control, True, - None, max_events, timeout) - - for kevent in kevent_list: - fd = kevent.ident - event_mask = kevent.filter - events = 0 - if event_mask == select.KQ_FILTER_READ: - events |= EVENT_READ - if event_mask == select.KQ_FILTER_WRITE: - events |= EVENT_WRITE - - key = self._key_from_fd(fd) - if key: - if key.fd not in ready_fds: - ready_fds[key.fd] = (key, events & key.events) - else: - old_events = ready_fds[key.fd][1] - ready_fds[key.fd] = (key, (events | old_events) & key.events) - - return list(ready_fds.values()) - - def close(self): - self._kqueue.close() - super(KqueueSelector, self).close() - - -if not hasattr(select, 'select'): # Platform-specific: AppEngine - HAS_SELECT = False - - -def _can_allocate(struct): - """ Checks that select structs can be allocated by the underlying - operating system, not just advertised by the select module. We don't - check select() because we'll be hopeful that most platforms that - don't have it available will not advertise it. (ie: GAE) """ - try: - # select.poll() objects won't fail until used. - if struct == 'poll': - p = select.poll() - p.poll(0) - - # All others will fail on allocation. - else: - getattr(select, struct)().close() - return True - except (OSError, AttributeError) as e: - return False - - -# Choose the best implementation, roughly: -# kqueue == epoll > poll > select. Devpoll not supported. (See above) -# select() also can't accept a FD > FD_SETSIZE (usually around 1024) -def DefaultSelector(): - """ This function serves as a first call for DefaultSelector to - detect if the select module is being monkey-patched incorrectly - by eventlet, greenlet, and preserve proper behavior. """ - global _DEFAULT_SELECTOR - if _DEFAULT_SELECTOR is None: - if _can_allocate('kqueue'): - _DEFAULT_SELECTOR = KqueueSelector - elif _can_allocate('epoll'): - _DEFAULT_SELECTOR = EpollSelector - elif _can_allocate('poll'): - _DEFAULT_SELECTOR = PollSelector - elif hasattr(select, 'select'): - _DEFAULT_SELECTOR = SelectSelector - else: # Platform-specific: AppEngine - raise ValueError('Platform does not have a selector') - return _DEFAULT_SELECTOR() diff --git a/python.d/python_modules/urllib3/util/ssl_.py b/python.d/python_modules/urllib3/util/ssl_.py deleted file mode 100644 index 33d428ed8..000000000 --- a/python.d/python_modules/urllib3/util/ssl_.py +++ /dev/null @@ -1,337 +0,0 @@ -from __future__ import absolute_import -import errno -import warnings -import hmac - -from binascii import hexlify, unhexlify -from hashlib import md5, sha1, sha256 - -from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning - - -SSLContext = None -HAS_SNI = False -IS_PYOPENSSL = False -IS_SECURETRANSPORT = False - -# Maps the length of a digest to a possible hash function producing this digest -HASHFUNC_MAP = { - 32: md5, - 40: sha1, - 64: sha256, -} - - -def _const_compare_digest_backport(a, b): - """ - Compare two digests of equal length in constant time. - - The digests must be of type str/bytes. - Returns True if the digests match, and False otherwise. - """ - result = abs(len(a) - len(b)) - for l, r in zip(bytearray(a), bytearray(b)): - result |= l ^ r - return result == 0 - - -_const_compare_digest = getattr(hmac, 'compare_digest', - _const_compare_digest_backport) - - -try: # Test for SSL features - import ssl - from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23 - from ssl import HAS_SNI # Has SNI? -except ImportError: - pass - - -try: - from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION -except ImportError: - OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000 - OP_NO_COMPRESSION = 0x20000 - -# A secure default. -# Sources for more information on TLS ciphers: -# -# - https://wiki.mozilla.org/Security/Server_Side_TLS -# - https://www.ssllabs.com/projects/best-practices/index.html -# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ -# -# The general intent is: -# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE), -# - prefer ECDHE over DHE for better performance, -# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and -# security, -# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common, -# - disable NULL authentication, MD5 MACs and DSS for security reasons. -DEFAULT_CIPHERS = ':'.join([ - 'ECDH+AESGCM', - 'ECDH+CHACHA20', - 'DH+AESGCM', - 'DH+CHACHA20', - 'ECDH+AES256', - 'DH+AES256', - 'ECDH+AES128', - 'DH+AES', - 'RSA+AESGCM', - 'RSA+AES', - '!aNULL', - '!eNULL', - '!MD5', -]) - -try: - from ssl import SSLContext # Modern SSL? -except ImportError: - import sys - - class SSLContext(object): # Platform-specific: Python 2 & 3.1 - supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or - (3, 2) <= sys.version_info) - - def __init__(self, protocol_version): - self.protocol = protocol_version - # Use default values from a real SSLContext - self.check_hostname = False - self.verify_mode = ssl.CERT_NONE - self.ca_certs = None - self.options = 0 - self.certfile = None - self.keyfile = None - self.ciphers = None - - def load_cert_chain(self, certfile, keyfile): - self.certfile = certfile - self.keyfile = keyfile - - def load_verify_locations(self, cafile=None, capath=None): - self.ca_certs = cafile - - if capath is not None: - raise SSLError("CA directories not supported in older Pythons") - - def set_ciphers(self, cipher_suite): - if not self.supports_set_ciphers: - raise TypeError( - 'Your version of Python does not support setting ' - 'a custom cipher suite. Please upgrade to Python ' - '2.7, 3.2, or later if you need this functionality.' - ) - self.ciphers = cipher_suite - - def wrap_socket(self, socket, server_hostname=None, server_side=False): - warnings.warn( - 'A true SSLContext object is not available. This prevents ' - 'urllib3 from configuring SSL appropriately and may cause ' - 'certain SSL connections to fail. You can upgrade to a newer ' - 'version of Python to solve this. For more information, see ' - 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' - '#ssl-warnings', - InsecurePlatformWarning - ) - kwargs = { - 'keyfile': self.keyfile, - 'certfile': self.certfile, - 'ca_certs': self.ca_certs, - 'cert_reqs': self.verify_mode, - 'ssl_version': self.protocol, - 'server_side': server_side, - } - if self.supports_set_ciphers: # Platform-specific: Python 2.7+ - return wrap_socket(socket, ciphers=self.ciphers, **kwargs) - else: # Platform-specific: Python 2.6 - return wrap_socket(socket, **kwargs) - - -def assert_fingerprint(cert, fingerprint): - """ - Checks if given fingerprint matches the supplied certificate. - - :param cert: - Certificate as bytes object. - :param fingerprint: - Fingerprint as string of hexdigits, can be interspersed by colons. - """ - - fingerprint = fingerprint.replace(':', '').lower() - digest_length = len(fingerprint) - hashfunc = HASHFUNC_MAP.get(digest_length) - if not hashfunc: - raise SSLError( - 'Fingerprint of invalid length: {0}'.format(fingerprint)) - - # We need encode() here for py32; works on py2 and p33. - fingerprint_bytes = unhexlify(fingerprint.encode()) - - cert_digest = hashfunc(cert).digest() - - if not _const_compare_digest(cert_digest, fingerprint_bytes): - raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".' - .format(fingerprint, hexlify(cert_digest))) - - -def resolve_cert_reqs(candidate): - """ - Resolves the argument to a numeric constant, which can be passed to - the wrap_socket function/method from the ssl module. - Defaults to :data:`ssl.CERT_NONE`. - If given a string it is assumed to be the name of the constant in the - :mod:`ssl` module or its abbrevation. - (So you can specify `REQUIRED` instead of `CERT_REQUIRED`. - If it's neither `None` nor a string we assume it is already the numeric - constant which can directly be passed to wrap_socket. - """ - if candidate is None: - return CERT_NONE - - if isinstance(candidate, str): - res = getattr(ssl, candidate, None) - if res is None: - res = getattr(ssl, 'CERT_' + candidate) - return res - - return candidate - - -def resolve_ssl_version(candidate): - """ - like resolve_cert_reqs - """ - if candidate is None: - return PROTOCOL_SSLv23 - - if isinstance(candidate, str): - res = getattr(ssl, candidate, None) - if res is None: - res = getattr(ssl, 'PROTOCOL_' + candidate) - return res - - return candidate - - -def create_urllib3_context(ssl_version=None, cert_reqs=None, - options=None, ciphers=None): - """All arguments have the same meaning as ``ssl_wrap_socket``. - - By default, this function does a lot of the same work that - ``ssl.create_default_context`` does on Python 3.4+. It: - - - Disables SSLv2, SSLv3, and compression - - Sets a restricted set of server ciphers - - If you wish to enable SSLv3, you can do:: - - from urllib3.util import ssl_ - context = ssl_.create_urllib3_context() - context.options &= ~ssl_.OP_NO_SSLv3 - - You can do the same to enable compression (substituting ``COMPRESSION`` - for ``SSLv3`` in the last line above). - - :param ssl_version: - The desired protocol version to use. This will default to - PROTOCOL_SSLv23 which will negotiate the highest protocol that both - the server and your installation of OpenSSL support. - :param cert_reqs: - Whether to require the certificate verification. This defaults to - ``ssl.CERT_REQUIRED``. - :param options: - Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``, - ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``. - :param ciphers: - Which cipher suites to allow the server to select. - :returns: - Constructed SSLContext object with specified options - :rtype: SSLContext - """ - context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23) - - # Setting the default here, as we may have no ssl module on import - cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs - - if options is None: - options = 0 - # SSLv2 is easily broken and is considered harmful and dangerous - options |= OP_NO_SSLv2 - # SSLv3 has several problems and is now dangerous - options |= OP_NO_SSLv3 - # Disable compression to prevent CRIME attacks for OpenSSL 1.0+ - # (issue #309) - options |= OP_NO_COMPRESSION - - context.options |= options - - if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6 - context.set_ciphers(ciphers or DEFAULT_CIPHERS) - - context.verify_mode = cert_reqs - if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2 - # We do our own verification, including fingerprints and alternative - # hostnames. So disable it here - context.check_hostname = False - return context - - -def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, - ca_certs=None, server_hostname=None, - ssl_version=None, ciphers=None, ssl_context=None, - ca_cert_dir=None): - """ - All arguments except for server_hostname, ssl_context, and ca_cert_dir have - the same meaning as they do when using :func:`ssl.wrap_socket`. - - :param server_hostname: - When SNI is supported, the expected hostname of the certificate - :param ssl_context: - A pre-made :class:`SSLContext` object. If none is provided, one will - be created using :func:`create_urllib3_context`. - :param ciphers: - A string of ciphers we wish the client to support. This is not - supported on Python 2.6 as the ssl module does not support it. - :param ca_cert_dir: - A directory containing CA certificates in multiple separate files, as - supported by OpenSSL's -CApath flag or the capath argument to - SSLContext.load_verify_locations(). - """ - context = ssl_context - if context is None: - # Note: This branch of code and all the variables in it are no longer - # used by urllib3 itself. We should consider deprecating and removing - # this code. - context = create_urllib3_context(ssl_version, cert_reqs, - ciphers=ciphers) - - if ca_certs or ca_cert_dir: - try: - context.load_verify_locations(ca_certs, ca_cert_dir) - except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2 - raise SSLError(e) - # Py33 raises FileNotFoundError which subclasses OSError - # These are not equivalent unless we check the errno attribute - except OSError as e: # Platform-specific: Python 3.3 and beyond - if e.errno == errno.ENOENT: - raise SSLError(e) - raise - elif getattr(context, 'load_default_certs', None) is not None: - # try to load OS default certs; works well on Windows (require Python3.4+) - context.load_default_certs() - - if certfile: - context.load_cert_chain(certfile, keyfile) - if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI - return context.wrap_socket(sock, server_hostname=server_hostname) - - warnings.warn( - 'An HTTPS request has been made, but the SNI (Subject Name ' - 'Indication) extension to TLS is not available on this platform. ' - 'This may cause the server to present an incorrect TLS ' - 'certificate, which can cause validation failures. You can upgrade to ' - 'a newer version of Python to solve this. For more information, see ' - 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' - '#ssl-warnings', - SNIMissingWarning - ) - return context.wrap_socket(sock) diff --git a/python.d/python_modules/urllib3/util/timeout.py b/python.d/python_modules/urllib3/util/timeout.py deleted file mode 100644 index cec817e6e..000000000 --- a/python.d/python_modules/urllib3/util/timeout.py +++ /dev/null @@ -1,242 +0,0 @@ -from __future__ import absolute_import -# The default socket timeout, used by httplib to indicate that no timeout was -# specified by the user -from socket import _GLOBAL_DEFAULT_TIMEOUT -import time - -from ..exceptions import TimeoutStateError - -# A sentinel value to indicate that no timeout was specified by the user in -# urllib3 -_Default = object() - - -# Use time.monotonic if available. -current_time = getattr(time, "monotonic", time.time) - - -class Timeout(object): - """ Timeout configuration. - - Timeouts can be defined as a default for a pool:: - - timeout = Timeout(connect=2.0, read=7.0) - http = PoolManager(timeout=timeout) - response = http.request('GET', 'http://example.com/') - - Or per-request (which overrides the default for the pool):: - - response = http.request('GET', 'http://example.com/', timeout=Timeout(10)) - - Timeouts can be disabled by setting all the parameters to ``None``:: - - no_timeout = Timeout(connect=None, read=None) - response = http.request('GET', 'http://example.com/, timeout=no_timeout) - - - :param total: - This combines the connect and read timeouts into one; the read timeout - will be set to the time leftover from the connect attempt. In the - event that both a connect timeout and a total are specified, or a read - timeout and a total are specified, the shorter timeout will be applied. - - Defaults to None. - - :type total: integer, float, or None - - :param connect: - The maximum amount of time to wait for a connection attempt to a server - to succeed. Omitting the parameter will default the connect timeout to - the system default, probably `the global default timeout in socket.py - <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_. - None will set an infinite timeout for connection attempts. - - :type connect: integer, float, or None - - :param read: - The maximum amount of time to wait between consecutive - read operations for a response from the server. Omitting - the parameter will default the read timeout to the system - default, probably `the global default timeout in socket.py - <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_. - None will set an infinite timeout. - - :type read: integer, float, or None - - .. note:: - - Many factors can affect the total amount of time for urllib3 to return - an HTTP response. - - For example, Python's DNS resolver does not obey the timeout specified - on the socket. Other factors that can affect total request time include - high CPU load, high swap, the program running at a low priority level, - or other behaviors. - - In addition, the read and total timeouts only measure the time between - read operations on the socket connecting the client and the server, - not the total amount of time for the request to return a complete - response. For most requests, the timeout is raised because the server - has not sent the first byte in the specified time. This is not always - the case; if a server streams one byte every fifteen seconds, a timeout - of 20 seconds will not trigger, even though the request will take - several minutes to complete. - - If your goal is to cut off any request after a set amount of wall clock - time, consider having a second "watcher" thread to cut off a slow - request. - """ - - #: A sentinel object representing the default timeout value - DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT - - def __init__(self, total=None, connect=_Default, read=_Default): - self._connect = self._validate_timeout(connect, 'connect') - self._read = self._validate_timeout(read, 'read') - self.total = self._validate_timeout(total, 'total') - self._start_connect = None - - def __str__(self): - return '%s(connect=%r, read=%r, total=%r)' % ( - type(self).__name__, self._connect, self._read, self.total) - - @classmethod - def _validate_timeout(cls, value, name): - """ Check that a timeout attribute is valid. - - :param value: The timeout value to validate - :param name: The name of the timeout attribute to validate. This is - used to specify in error messages. - :return: The validated and casted version of the given value. - :raises ValueError: If it is a numeric value less than or equal to - zero, or the type is not an integer, float, or None. - """ - if value is _Default: - return cls.DEFAULT_TIMEOUT - - if value is None or value is cls.DEFAULT_TIMEOUT: - return value - - if isinstance(value, bool): - raise ValueError("Timeout cannot be a boolean value. It must " - "be an int, float or None.") - try: - float(value) - except (TypeError, ValueError): - raise ValueError("Timeout value %s was %s, but it must be an " - "int, float or None." % (name, value)) - - try: - if value <= 0: - raise ValueError("Attempted to set %s timeout to %s, but the " - "timeout cannot be set to a value less " - "than or equal to 0." % (name, value)) - except TypeError: # Python 3 - raise ValueError("Timeout value %s was %s, but it must be an " - "int, float or None." % (name, value)) - - return value - - @classmethod - def from_float(cls, timeout): - """ Create a new Timeout from a legacy timeout value. - - The timeout value used by httplib.py sets the same timeout on the - connect(), and recv() socket requests. This creates a :class:`Timeout` - object that sets the individual timeouts to the ``timeout`` value - passed to this function. - - :param timeout: The legacy timeout value. - :type timeout: integer, float, sentinel default object, or None - :return: Timeout object - :rtype: :class:`Timeout` - """ - return Timeout(read=timeout, connect=timeout) - - def clone(self): - """ Create a copy of the timeout object - - Timeout properties are stored per-pool but each request needs a fresh - Timeout object to ensure each one has its own start/stop configured. - - :return: a copy of the timeout object - :rtype: :class:`Timeout` - """ - # We can't use copy.deepcopy because that will also create a new object - # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to - # detect the user default. - return Timeout(connect=self._connect, read=self._read, - total=self.total) - - def start_connect(self): - """ Start the timeout clock, used during a connect() attempt - - :raises urllib3.exceptions.TimeoutStateError: if you attempt - to start a timer that has been started already. - """ - if self._start_connect is not None: - raise TimeoutStateError("Timeout timer has already been started.") - self._start_connect = current_time() - return self._start_connect - - def get_connect_duration(self): - """ Gets the time elapsed since the call to :meth:`start_connect`. - - :return: Elapsed time. - :rtype: float - :raises urllib3.exceptions.TimeoutStateError: if you attempt - to get duration for a timer that hasn't been started. - """ - if self._start_connect is None: - raise TimeoutStateError("Can't get connect duration for timer " - "that has not started.") - return current_time() - self._start_connect - - @property - def connect_timeout(self): - """ Get the value to use when setting a connection timeout. - - This will be a positive float or integer, the value None - (never timeout), or the default system timeout. - - :return: Connect timeout. - :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None - """ - if self.total is None: - return self._connect - - if self._connect is None or self._connect is self.DEFAULT_TIMEOUT: - return self.total - - return min(self._connect, self.total) - - @property - def read_timeout(self): - """ Get the value for the read timeout. - - This assumes some time has elapsed in the connection timeout and - computes the read timeout appropriately. - - If self.total is set, the read timeout is dependent on the amount of - time taken by the connect timeout. If the connection time has not been - established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be - raised. - - :return: Value to use for the read timeout. - :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None - :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect` - has not yet been called on this object. - """ - if (self.total is not None and - self.total is not self.DEFAULT_TIMEOUT and - self._read is not None and - self._read is not self.DEFAULT_TIMEOUT): - # In case the connect timeout has not yet been established. - if self._start_connect is None: - return self._read - return max(0, min(self.total - self.get_connect_duration(), - self._read)) - elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT: - return max(0, self.total - self.get_connect_duration()) - else: - return self._read diff --git a/python.d/python_modules/urllib3/util/url.py b/python.d/python_modules/urllib3/util/url.py deleted file mode 100644 index 6b6f9968d..000000000 --- a/python.d/python_modules/urllib3/util/url.py +++ /dev/null @@ -1,230 +0,0 @@ -from __future__ import absolute_import -from collections import namedtuple - -from ..exceptions import LocationParseError - - -url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'] - -# We only want to normalize urls with an HTTP(S) scheme. -# urllib3 infers URLs without a scheme (None) to be http. -NORMALIZABLE_SCHEMES = ('http', 'https', None) - - -class Url(namedtuple('Url', url_attrs)): - """ - Datastructure for representing an HTTP URL. Used as a return value for - :func:`parse_url`. Both the scheme and host are normalized as they are - both case-insensitive according to RFC 3986. - """ - __slots__ = () - - def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, - query=None, fragment=None): - if path and not path.startswith('/'): - path = '/' + path - if scheme: - scheme = scheme.lower() - if host and scheme in NORMALIZABLE_SCHEMES: - host = host.lower() - return super(Url, cls).__new__(cls, scheme, auth, host, port, path, - query, fragment) - - @property - def hostname(self): - """For backwards-compatibility with urlparse. We're nice like that.""" - return self.host - - @property - def request_uri(self): - """Absolute path including the query string.""" - uri = self.path or '/' - - if self.query is not None: - uri += '?' + self.query - - return uri - - @property - def netloc(self): - """Network location including host and port""" - if self.port: - return '%s:%d' % (self.host, self.port) - return self.host - - @property - def url(self): - """ - Convert self into a url - - This function should more or less round-trip with :func:`.parse_url`. The - returned url may not be exactly the same as the url inputted to - :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls - with a blank port will have : removed). - - Example: :: - - >>> U = parse_url('http://google.com/mail/') - >>> U.url - 'http://google.com/mail/' - >>> Url('http', 'username:password', 'host.com', 80, - ... '/path', 'query', 'fragment').url - 'http://username:password@host.com:80/path?query#fragment' - """ - scheme, auth, host, port, path, query, fragment = self - url = '' - - # We use "is not None" we want things to happen with empty strings (or 0 port) - if scheme is not None: - url += scheme + '://' - if auth is not None: - url += auth + '@' - if host is not None: - url += host - if port is not None: - url += ':' + str(port) - if path is not None: - url += path - if query is not None: - url += '?' + query - if fragment is not None: - url += '#' + fragment - - return url - - def __str__(self): - return self.url - - -def split_first(s, delims): - """ - Given a string and an iterable of delimiters, split on the first found - delimiter. Return two split parts and the matched delimiter. - - If not found, then the first part is the full input string. - - Example:: - - >>> split_first('foo/bar?baz', '?/=') - ('foo', 'bar?baz', '/') - >>> split_first('foo/bar?baz', '123') - ('foo/bar?baz', '', None) - - Scales linearly with number of delims. Not ideal for large number of delims. - """ - min_idx = None - min_delim = None - for d in delims: - idx = s.find(d) - if idx < 0: - continue - - if min_idx is None or idx < min_idx: - min_idx = idx - min_delim = d - - if min_idx is None or min_idx < 0: - return s, '', None - - return s[:min_idx], s[min_idx + 1:], min_delim - - -def parse_url(url): - """ - Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is - performed to parse incomplete urls. Fields not provided will be None. - - Partly backwards-compatible with :mod:`urlparse`. - - Example:: - - >>> parse_url('http://google.com/mail/') - Url(scheme='http', host='google.com', port=None, path='/mail/', ...) - >>> parse_url('google.com:80') - Url(scheme=None, host='google.com', port=80, path=None, ...) - >>> parse_url('/foo?bar') - Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...) - """ - - # While this code has overlap with stdlib's urlparse, it is much - # simplified for our needs and less annoying. - # Additionally, this implementations does silly things to be optimal - # on CPython. - - if not url: - # Empty - return Url() - - scheme = None - auth = None - host = None - port = None - path = None - fragment = None - query = None - - # Scheme - if '://' in url: - scheme, url = url.split('://', 1) - - # Find the earliest Authority Terminator - # (http://tools.ietf.org/html/rfc3986#section-3.2) - url, path_, delim = split_first(url, ['/', '?', '#']) - - if delim: - # Reassemble the path - path = delim + path_ - - # Auth - if '@' in url: - # Last '@' denotes end of auth part - auth, url = url.rsplit('@', 1) - - # IPv6 - if url and url[0] == '[': - host, url = url.split(']', 1) - host += ']' - - # Port - if ':' in url: - _host, port = url.split(':', 1) - - if not host: - host = _host - - if port: - # If given, ports must be integers. No whitespace, no plus or - # minus prefixes, no non-integer digits such as ^2 (superscript). - if not port.isdigit(): - raise LocationParseError(url) - try: - port = int(port) - except ValueError: - raise LocationParseError(url) - else: - # Blank ports are cool, too. (rfc3986#section-3.2.3) - port = None - - elif not host and url: - host = url - - if not path: - return Url(scheme, auth, host, port, path, query, fragment) - - # Fragment - if '#' in path: - path, fragment = path.split('#', 1) - - # Query - if '?' in path: - path, query = path.split('?', 1) - - return Url(scheme, auth, host, port, path, query, fragment) - - -def get_host(url): - """ - Deprecated. Use :func:`parse_url` instead. - """ - p = parse_url(url) - return p.scheme or 'http', p.hostname, p.port diff --git a/python.d/python_modules/urllib3/util/wait.py b/python.d/python_modules/urllib3/util/wait.py deleted file mode 100644 index cb396e508..000000000 --- a/python.d/python_modules/urllib3/util/wait.py +++ /dev/null @@ -1,40 +0,0 @@ -from .selectors import ( - HAS_SELECT, - DefaultSelector, - EVENT_READ, - EVENT_WRITE -) - - -def _wait_for_io_events(socks, events, timeout=None): - """ Waits for IO events to be available from a list of sockets - or optionally a single socket if passed in. Returns a list of - sockets that can be interacted with immediately. """ - if not HAS_SELECT: - raise ValueError('Platform does not have a selector') - if not isinstance(socks, list): - # Probably just a single socket. - if hasattr(socks, "fileno"): - socks = [socks] - # Otherwise it might be a non-list iterable. - else: - socks = list(socks) - with DefaultSelector() as selector: - for sock in socks: - selector.register(sock, events) - return [key[0].fileobj for key in - selector.select(timeout) if key[1] & events] - - -def wait_for_read(socks, timeout=None): - """ Waits for reading to be available from a list of sockets - or optionally a single socket if passed in. Returns a list of - sockets that can be read from immediately. """ - return _wait_for_io_events(socks, EVENT_READ, timeout) - - -def wait_for_write(socks, timeout=None): - """ Waits for writing to be available from a list of sockets - or optionally a single socket if passed in. Returns a list of - sockets that can be written to immediately. """ - return _wait_for_io_events(socks, EVENT_WRITE, timeout) diff --git a/python.d/rabbitmq.chart.py b/python.d/rabbitmq.chart.py deleted file mode 100644 index b8847e9f8..000000000 --- a/python.d/rabbitmq.chart.py +++ /dev/null @@ -1,193 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: rabbitmq netdata python.d module -# Author: l2isbad - -from collections import namedtuple -from json import loads -from socket import gethostbyname, gaierror -from threading import Thread -try: - from queue import Queue -except ImportError: - from Queue import Queue - -from bases.FrameworkServices.UrlService import UrlService - -# default module values (can be overridden per job in `config`) -update_every = 1 -priority = 60000 -retries = 60 - -METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats']) - -NODE_STATS = ['fd_used', - 'mem_used', - 'sockets_used', - 'proc_used', - 'disk_free', - 'run_queue' - ] -OVERVIEW_STATS = ['object_totals.channels', - 'object_totals.consumers', - 'object_totals.connections', - 'object_totals.queues', - 'object_totals.exchanges', - 'queue_totals.messages_ready', - 'queue_totals.messages_unacknowledged', - 'message_stats.ack', - 'message_stats.redeliver', - 'message_stats.deliver', - 'message_stats.publish' - ] -ORDER = ['queued_messages', 'message_rates', 'global_counts', - 'file_descriptors', 'socket_descriptors', 'erlang_processes', 'erlang_run_queue', 'memory', 'disk_space'] - -CHARTS = { - 'file_descriptors': { - 'options': [None, 'File Descriptors', 'descriptors', 'overview', - 'rabbitmq.file_descriptors', 'line'], - 'lines': [ - ['fd_used', 'used', 'absolute'] - ]}, - 'memory': { - 'options': [None, 'Memory', 'MB', 'overview', - 'rabbitmq.memory', 'line'], - 'lines': [ - ['mem_used', 'used', 'absolute', 1, 1024 << 10] - ]}, - 'disk_space': { - 'options': [None, 'Disk Space', 'GB', 'overview', - 'rabbitmq.disk_space', 'line'], - 'lines': [ - ['disk_free', 'free', 'absolute', 1, 1024 ** 3] - ]}, - 'socket_descriptors': { - 'options': [None, 'Socket Descriptors', 'descriptors', 'overview', - 'rabbitmq.sockets', 'line'], - 'lines': [ - ['sockets_used', 'used', 'absolute'] - ]}, - 'erlang_processes': { - 'options': [None, 'Erlang Processes', 'processes', 'overview', - 'rabbitmq.processes', 'line'], - 'lines': [ - ['proc_used', 'used', 'absolute'] - ]}, - 'erlang_run_queue': { - 'options': [None, 'Erlang Run Queue', 'processes', 'overview', - 'rabbitmq.erlang_run_queue', 'line'], - 'lines': [ - ['run_queue',' length', 'absolute'] - ]}, - 'global_counts': { - 'options': [None, 'Global Counts', 'counts', 'overview', - 'rabbitmq.global_counts', 'line'], - 'lines': [ - ['object_totals_channels', 'channels', 'absolute'], - ['object_totals_consumers', 'consumers', 'absolute'], - ['object_totals_connections', 'connections', 'absolute'], - ['object_totals_queues', 'queues', 'absolute'], - ['object_totals_exchanges', 'exchanges', 'absolute'] - ]}, - 'queued_messages': { - 'options': [None, 'Queued Messages', 'messages', 'overview', - 'rabbitmq.queued_messages', 'stacked'], - 'lines': [ - ['queue_totals_messages_ready', 'ready', 'absolute'], - ['queue_totals_messages_unacknowledged', 'unacknowledged', 'absolute'] - ]}, - 'message_rates': { - 'options': [None, 'Message Rates', 'messages/s', 'overview', - 'rabbitmq.message_rates', 'stacked'], - 'lines': [ - ['message_stats_ack', 'ack', 'incremental'], - ['message_stats_redeliver', 'redeliver', 'incremental'], - ['message_stats_deliver', 'deliver', 'incremental'], - ['message_stats_publish', 'publish', 'incremental'] - ]} -} - - -class Service(UrlService): - def __init__(self, configuration=None, name=None): - UrlService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.host = self.configuration.get('host', '127.0.0.1') - self.port = self.configuration.get('port', 15672) - self.scheme = self.configuration.get('scheme', 'http') - - def check(self): - # We can't start if <host> AND <port> not specified - if not (self.host and self.port): - self.error('Host is not defined in the module configuration file') - return False - - # Hostname -> ip address - try: - self.host = gethostbyname(self.host) - except gaierror as error: - self.error(str(error)) - return False - - # Add handlers (auth, self signed cert accept) - self.url = '{scheme}://{host}:{port}/api'.format(scheme=self.scheme, - host=self.host, - port=self.port) - # Add methods - api_node = self.url + '/nodes' - api_overview = self.url + '/overview' - self.methods = [METHODS(get_data=self._get_overview_stats, - url=api_node, - stats=NODE_STATS), - METHODS(get_data=self._get_overview_stats, - url=api_overview, - stats=OVERVIEW_STATS)] - return UrlService.check(self) - - def _get_data(self): - threads = list() - queue = Queue() - result = dict() - - for method in self.methods: - th = Thread(target=method.get_data, - args=(queue, method.url, method.stats)) - th.start() - threads.append(th) - - for thread in threads: - thread.join() - result.update(queue.get()) - - return result or None - - def _get_overview_stats(self, queue, url, stats): - """ - Format data received from http request - :return: dict - """ - - raw_data = self._get_raw_data(url) - - if not raw_data: - return queue.put(dict()) - data = loads(raw_data) - data = data[0] if isinstance(data, list) else data - - to_netdata = fetch_data(raw_data=data, metrics=stats) - return queue.put(to_netdata) - - -def fetch_data(raw_data, metrics): - data = dict() - for metric in metrics: - value = raw_data - metrics_list = metric.split('.') - try: - for m in metrics_list: - value = value[m] - except KeyError: - continue - data['_'.join(metrics_list)] = value - return data diff --git a/python.d/redis.chart.py b/python.d/redis.chart.py deleted file mode 100644 index bcfcf16a6..000000000 --- a/python.d/redis.chart.py +++ /dev/null @@ -1,200 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: redis netdata python.d module -# Author: Pawel Krupa (paulfantom) - -from bases.FrameworkServices.SocketService import SocketService - -# default module values (can be overridden per job in `config`) -priority = 60000 -retries = 60 - -# default job configuration (overridden by python.d.plugin) -# config = {'local': { -# 'update_every': update_every, -# 'retries': retries, -# 'priority': priority, -# 'host': 'localhost', -# 'port': 6379, -# 'unix_socket': None -# }} - -ORDER = ['operations', 'hit_rate', 'memory', 'keys', 'net', 'connections', 'clients', 'slaves', 'persistence', - 'bgsave_now', 'bgsave_health'] - -CHARTS = { - 'operations': { - 'options': [None, 'Redis Operations', 'operations/s', 'operations', 'redis.operations', 'line'], - 'lines': [ - ['total_commands_processed', 'commands', 'incremental'], - ['instantaneous_ops_per_sec', 'operations', 'absolute'] - ]}, - 'hit_rate': { - 'options': [None, 'Redis Hit rate', 'percent', 'hits', 'redis.hit_rate', 'line'], - 'lines': [ - ['hit_rate', 'rate', 'absolute'] - ]}, - 'memory': { - 'options': [None, 'Redis Memory utilization', 'kilobytes', 'memory', 'redis.memory', 'line'], - 'lines': [ - ['used_memory', 'total', 'absolute', 1, 1024], - ['used_memory_lua', 'lua', 'absolute', 1, 1024] - ]}, - 'net': { - 'options': [None, 'Redis Bandwidth', 'kilobits/s', 'network', 'redis.net', 'area'], - 'lines': [ - ['total_net_input_bytes', 'in', 'incremental', 8, 1024], - ['total_net_output_bytes', 'out', 'incremental', -8, 1024] - ]}, - 'keys': { - 'options': [None, 'Redis Keys per Database', 'keys', 'keys', 'redis.keys', 'line'], - 'lines': [ - # lines are created dynamically in `check()` method - ]}, - 'connections': { - 'options': [None, 'Redis Connections', 'connections/s', 'connections', 'redis.connections', 'line'], - 'lines': [ - ['total_connections_received', 'received', 'incremental', 1], - ['rejected_connections', 'rejected', 'incremental', -1] - ]}, - 'clients': { - 'options': [None, 'Redis Clients', 'clients', 'connections', 'redis.clients', 'line'], - 'lines': [ - ['connected_clients', 'connected', 'absolute', 1], - ['blocked_clients', 'blocked', 'absolute', -1] - ]}, - 'slaves': { - 'options': [None, 'Redis Slaves', 'slaves', 'replication', 'redis.slaves', 'line'], - 'lines': [ - ['connected_slaves', 'connected', 'absolute'] - ]}, - 'persistence': { - 'options': [None, 'Redis Persistence Changes Since Last Save', 'changes', 'persistence', - 'redis.rdb_changes', 'line'], - 'lines': [ - ['rdb_changes_since_last_save', 'changes', 'absolute'] - ]}, - 'bgsave_now': { - 'options': [None, 'Duration of the RDB Save Operation', 'seconds', 'persistence', - 'redis.bgsave_now', 'absolute'], - 'lines': [ - ['rdb_bgsave_in_progress', 'rdb save', 'absolute'] - ]}, - 'bgsave_health': { - 'options': [None, 'Status of the Last RDB Save Operation', 'status', 'persistence', - 'redis.bgsave_health', 'line'], - 'lines': [ - ['rdb_last_bgsave_status', 'rdb save', 'absolute'] - ]} -} - - -class Service(SocketService): - def __init__(self, configuration=None, name=None): - SocketService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self._keep_alive = True - self.chart_name = "" - self.host = self.configuration.get('host', 'localhost') - self.port = self.configuration.get('port', 6379) - self.unix_socket = self.configuration.get('socket') - password = self.configuration.get('pass', str()) - self.bgsave_time = 0 - self.requests = dict(request='INFO\r\n'.encode(), - password=' '.join(['AUTH', password, '\r\n']).encode() if password else None) - self.request = self.requests['request'] - - def _get_data(self): - """ - Get data from socket - :return: dict - """ - if self.requests['password']: - self.request = self.requests['password'] - raw = self._get_raw_data().strip() - if raw != "+OK": - self.error("invalid password") - return None - self.request = self.requests['request'] - response = self._get_raw_data() - if response is None: - # error has already been logged - return None - - try: - parsed = response.split("\n") - except AttributeError: - self.error("response is invalid/empty") - return None - - data = dict() - for line in parsed: - if len(line) < 5 or line[0] == '$' or line[0] == '#': - continue - - if line.startswith('db'): - tmp = line.split(',')[0].replace('keys=', '') - record = tmp.split(':') - data[record[0]] = record[1] - continue - - try: - t = line.split(':') - data[t[0]] = t[1] - except (IndexError, ValueError): - self.debug("invalid line received: " + str(line)) - - if not data: - self.error("received data doesn't have any records") - return None - - try: - data['hit_rate'] = (int(data['keyspace_hits']) * 100) / (int(data['keyspace_hits']) - + int(data['keyspace_misses'])) - except (KeyError, ZeroDivisionError, TypeError): - data['hit_rate'] = 0 - - if data['rdb_bgsave_in_progress'] != '0\r': - self.bgsave_time += self.update_every - else: - self.bgsave_time = 0 - - data['rdb_last_bgsave_status'] = 0 if data['rdb_last_bgsave_status'] == 'ok\r' else 1 - data['rdb_bgsave_in_progress'] = self.bgsave_time - - return data - - def _check_raw_data(self, data): - """ - Check if all data has been gathered from socket. - Parse first line containing message length and check against received message - :param data: str - :return: boolean - """ - length = len(data) - supposed = data.split('\n')[0][1:-1] - offset = len(supposed) + 4 # 1 dollar sing, 1 new line character + 1 ending sequence '\r\n' - if not supposed.isdigit(): - return True - supposed = int(supposed) - - if length - offset >= supposed: - self.debug("received full response from redis") - return True - - self.debug("waiting more data from redis") - return False - - def check(self): - """ - Parse configuration, check if redis is available, and dynamically create chart lines data - :return: boolean - """ - data = self._get_data() - if data is None: - return False - - for name in data: - if name.startswith('db'): - self.definitions['keys']['lines'].append([name, None, 'absolute']) - return True diff --git a/python.d/retroshare.chart.py b/python.d/retroshare.chart.py deleted file mode 100644 index 8c0330ec6..000000000 --- a/python.d/retroshare.chart.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: RetroShare netdata python.d module -# Authors: sehraf - -import json - -from bases.FrameworkServices.UrlService import UrlService - -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['bandwidth', 'peers', 'dht'] - -CHARTS = { - 'bandwidth': { - 'options': [None, 'RetroShare Bandwidth', 'kB/s', 'RetroShare', 'retroshare.bandwidth', 'area'], - 'lines': [ - ['bandwidth_up_kb', 'Upload'], - ['bandwidth_down_kb', 'Download'] - ]}, - 'peers': { - 'options': [None, 'RetroShare Peers', 'peers', 'RetroShare', 'retroshare.peers', 'line'], - 'lines': [ - ['peers_all', 'All friends'], - ['peers_connected', 'Connected friends'] - ]}, - 'dht': { - 'options': [None, 'Retroshare DHT', 'peers', 'RetroShare', 'retroshare.dht', 'line'], - 'lines': [ - ['dht_size_all', 'DHT nodes estimated'], - ['dht_size_rs', 'RS nodes estimated'] - ]} -} - - -class Service(UrlService): - def __init__(self, configuration=None, name=None): - UrlService.__init__(self, configuration=configuration, name=name) - self.baseurl = self.configuration.get('url', 'http://localhost:9090') - self.order = ORDER - self.definitions = CHARTS - - def _get_stats(self): - """ - Format data received from http request - :return: dict - """ - try: - raw = self._get_raw_data() - parsed = json.loads(raw) - if str(parsed['returncode']) != 'ok': - return None - except (TypeError, ValueError): - return None - - return parsed['data'][0] - - def _get_data(self): - """ - Get data from API - :return: dict - """ - self.url = self.baseurl + '/api/v2/stats' - data = self._get_stats() - if data is None: - return None - - data['bandwidth_up_kb'] = data['bandwidth_up_kb'] * -1 - if data['dht_active'] is False: - data['dht_size_all'] = None - data['dht_size_rs'] = None - - return data diff --git a/python.d/samba.chart.py b/python.d/samba.chart.py deleted file mode 100644 index 3f4fd5a12..000000000 --- a/python.d/samba.chart.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: samba netdata python.d module -# Author: Christopher Cox <chris_cox@endlessnow.com> -# -# The netdata user needs to be able to be able to sudo the smbstatus program -# without password: -# netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P -# -# This makes calls to smbstatus -P -# -# This just looks at a couple of values out of syscall, and some from smb2. -# -# The Lesser Ops chart is merely a display of current counter values. They -# didn't seem to change much to me. However, if you notice something changing -# a lot there, bring one or more out into its own chart and make it incremental -# (like find and notify... good examples). - -import re - -from bases.collection import find_binary -from bases.FrameworkServices.ExecutableService import ExecutableService - -# default module values (can be overridden per job in `config`) -update_every = 5 -priority = 60000 -retries = 60 - -ORDER = ['syscall_rw','smb2_rw','smb2_create_close','smb2_info','smb2_find','smb2_notify','smb2_sm_count'] - -CHARTS = { - 'syscall_rw': { - 'lines': [ - ['syscall_sendfile_bytes', 'sendfile', 'incremental', 1, 1024], - ['syscall_recvfile_bytes', 'recvfile', 'incremental', -1, 1024] - ], - 'options': [None, 'R/Ws', 'kilobytes/s', 'syscall', 'syscall.rw', 'area'] - }, - 'smb2_rw': { - 'lines': [ - ['smb2_read_outbytes', 'readout', 'incremental', 1, 1024], - ['smb2_write_inbytes', 'writein', 'incremental', -1, 1024], - ['smb2_read_inbytes', 'readin', 'incremental', 1, 1024], - ['smb2_write_outbytes', 'writeout', 'incremental', -1, 1024] - ], - 'options': [None, 'R/Ws', 'kilobytes/s', 'smb2', 'smb2.rw', 'area'] - }, - 'smb2_create_close': { - 'lines': [ - ['smb2_create_count', 'create', 'incremental', 1, 1], - ['smb2_close_count', 'close', 'incremental', -1, 1] - ], - 'options': [None, 'Create/Close', 'operations/s', 'smb2', 'smb2.create_close', 'line'] - }, - 'smb2_info': { - 'lines': [ - ['smb2_getinfo_count', 'getinfo', 'incremental', 1, 1], - ['smb2_setinfo_count', 'setinfo', 'incremental', -1, 1] - ], - 'options': [None, 'Info', 'operations/s', 'smb2', 'smb2.get_set_info', 'line'] - }, - 'smb2_find': { - 'lines': [ - ['smb2_find_count', 'find', 'incremental', 1, 1] - ], - 'options': [None, 'Find', 'operations/s', 'smb2', 'smb2.find', 'line'] - }, - 'smb2_notify': { - 'lines': [ - ['smb2_notify_count', 'notify', 'incremental', 1, 1] - ], - 'options': [None, 'Notify', 'operations/s', 'smb2', 'smb2.notify', 'line'] - }, - 'smb2_sm_count': { - 'lines': [ - ['smb2_tcon_count', 'tcon', 'absolute', 1, 1], - ['smb2_negprot_count', 'negprot', 'absolute', 1, 1], - ['smb2_tdis_count', 'tdis', 'absolute', 1, 1], - ['smb2_cancel_count', 'cancel', 'absolute', 1, 1], - ['smb2_logoff_count', 'logoff', 'absolute', 1, 1], - ['smb2_flush_count', 'flush', 'absolute', 1, 1], - ['smb2_lock_count', 'lock', 'absolute', 1, 1], - ['smb2_keepalive_count', 'keepalive', 'absolute', 1, 1], - ['smb2_break_count', 'break', 'absolute', 1, 1], - ['smb2_sessetup_count', 'sessetup', 'absolute', 1, 1] - ], - 'options': [None, 'Lesser Ops', 'count', 'smb2', 'smb2.sm_counters', 'stacked'] - } - } - - -class Service(ExecutableService): - def __init__(self, configuration=None, name=None): - ExecutableService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.rgx_smb2 = re.compile(r'(smb2_[^:]+|syscall_.*file_bytes):\s+(\d+)') - - def check(self): - sudo_binary, smbstatus_binary = find_binary('sudo'), find_binary('smbstatus') - - if not (sudo_binary and smbstatus_binary): - self.error("Can\'t locate 'sudo' or 'smbstatus' binary") - return False - - self.command = [sudo_binary, '-v'] - err = self._get_raw_data(stderr=True) - if err: - self.error(''.join(err)) - return False - - self.command = ' '.join([sudo_binary, '-n', smbstatus_binary, '-P']) - - return ExecutableService.check(self) - - def _get_data(self): - """ - Format data received from shell command - :return: dict - """ - raw_data = self._get_raw_data() - if not raw_data: - return None - - parsed = self.rgx_smb2.findall(' '.join(raw_data)) - - return dict(parsed) or None diff --git a/python.d/sensors.chart.py b/python.d/sensors.chart.py deleted file mode 100644 index 06e420b68..000000000 --- a/python.d/sensors.chart.py +++ /dev/null @@ -1,139 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: sensors netdata python.d plugin -# Author: Pawel Krupa (paulfantom) - -from bases.FrameworkServices.SimpleService import SimpleService -from third_party import lm_sensors as sensors - -# default module values (can be overridden per job in `config`) -# update_every = 2 - -ORDER = ['temperature', 'fan', 'voltage', 'current', 'power', 'energy', 'humidity'] - -# This is a prototype of chart definition which is used to dynamically create self.definitions -CHARTS = { - 'temperature': { - 'options': [None, ' temperature', 'Celsius', 'temperature', 'sensors.temperature', 'line'], - 'lines': [ - [None, None, 'absolute', 1, 1000] - ]}, - 'voltage': { - 'options': [None, ' voltage', 'Volts', 'voltage', 'sensors.voltage', 'line'], - 'lines': [ - [None, None, 'absolute', 1, 1000] - ]}, - 'current': { - 'options': [None, ' current', 'Ampere', 'current', 'sensors.current', 'line'], - 'lines': [ - [None, None, 'absolute', 1, 1000] - ]}, - 'power': { - 'options': [None, ' power', 'Watt', 'power', 'sensors.power', 'line'], - 'lines': [ - [None, None, 'absolute', 1, 1000000] - ]}, - 'fan': { - 'options': [None, ' fans speed', 'Rotations/min', 'fans', 'sensors.fan', 'line'], - 'lines': [ - [None, None, 'absolute', 1, 1000] - ]}, - 'energy': { - 'options': [None, ' energy', 'Joule', 'energy', 'sensors.energy', 'areastack'], - 'lines': [ - [None, None, 'incremental', 1, 1000000] - ]}, - 'humidity': { - 'options': [None, ' humidity', 'Percent', 'humidity', 'sensors.humidity', 'line'], - 'lines': [ - [None, None, 'absolute', 1, 1000] - ]} -} - -LIMITS = { - 'temperature': [-127, 1000], - 'voltage': [-127, 127], - 'current': [-127, 127], - 'fan': [0, 65535] -} - -TYPE_MAP = { - 0: 'voltage', - 1: 'fan', - 2: 'temperature', - 3: 'power', - 4: 'energy', - 5: 'current', - 6: 'humidity', - 7: 'max_main', - 16: 'vid', - 17: 'intrusion', - 18: 'max_other', - 24: 'beep_enable' -} - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.order = list() - self.definitions = dict() - self.chips = list() - - def get_data(self): - data = dict() - try: - for chip in sensors.ChipIterator(): - prefix = sensors.chip_snprintf_name(chip) - for feature in sensors.FeatureIterator(chip): - sfi = sensors.SubFeatureIterator(chip, feature) - for sf in sfi: - val = sensors.get_value(chip, sf.number) - break - type_name = TYPE_MAP[feature.type] - if type_name in LIMITS: - limit = LIMITS[type_name] - if val < limit[0] or val > limit[1]: - continue - data[prefix + "_" + str(feature.name.decode())] = int(val * 1000) - except Exception as error: - self.error(error) - return None - - return data or None - - def create_definitions(self): - for sensor in ORDER: - for chip in sensors.ChipIterator(): - chip_name = sensors.chip_snprintf_name(chip) - if self.chips and not any([chip_name.startswith(ex) for ex in self.chips]): - continue - for feature in sensors.FeatureIterator(chip): - sfi = sensors.SubFeatureIterator(chip, feature) - vals = [sensors.get_value(chip, sf.number) for sf in sfi] - if vals[0] == 0: - continue - if TYPE_MAP[feature.type] == sensor: - # create chart - name = chip_name + "_" + TYPE_MAP[feature.type] - if name not in self.order: - self.order.append(name) - chart_def = list(CHARTS[sensor]['options']) - chart_def[1] = chip_name + chart_def[1] - self.definitions[name] = {'options': chart_def} - self.definitions[name]['lines'] = [] - line = list(CHARTS[sensor]['lines'][0]) - line[0] = chip_name + "_" + str(feature.name.decode()) - line[1] = sensors.get_label(chip, feature) - self.definitions[name]['lines'].append(line) - - def check(self): - try: - sensors.init() - except Exception as error: - self.error(error) - return False - - self.create_definitions() - - return True - diff --git a/python.d/smartd_log.chart.py b/python.d/smartd_log.chart.py deleted file mode 100644 index 07ad88cd4..000000000 --- a/python.d/smartd_log.chart.py +++ /dev/null @@ -1,346 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: smart netdata python.d module -# Author: l2isbad, vorph1 - -import os -import re - -from collections import namedtuple -from time import time - -from bases.collection import read_last_line -from bases.FrameworkServices.SimpleService import SimpleService - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['1', '4', '5', '7', '9', '12', '193', '194', '197', '198', '200'] - -SMART_ATTR = { - '1': 'Read Error Rate', - '2': 'Throughput Performance', - '3': 'Spin-Up Time', - '4': 'Start/Stop Count', - '5': 'Reallocated Sectors Count', - '6': 'Read Channel Margin', - '7': 'Seek Error Rate', - '8': 'Seek Time Performance', - '9': 'Power-On Hours Count', - '10': 'Spin-up Retries', - '11': 'Calibration Retries', - '12': 'Power Cycle Count', - '13': 'Soft Read Error Rate', - '100': 'Erase/Program Cycles', - '103': 'Translation Table Rebuild', - '108': 'Unknown (108)', - '170': 'Reserved Block Count', - '171': 'Program Fail Count', - '172': 'Erase Fail Count', - '173': 'Wear Leveller Worst Case Erase Count', - '174': 'Unexpected Power Loss', - '175': 'Program Fail Count', - '176': 'Erase Fail Count', - '177': 'Wear Leveling Count', - '178': 'Used Reserved Block Count', - '179': 'Used Reserved Block Count', - '180': 'Unused Reserved Block Count', - '181': 'Program Fail Count', - '182': 'Erase Fail Count', - '183': 'SATA Downshifts', - '184': 'End-to-End error', - '185': 'Head Stability', - '186': 'Induced Op-Vibration Detection', - '187': 'Reported Uncorrectable Errors', - '188': 'Command Timeout', - '189': 'High Fly Writes', - '190': 'Temperature', - '191': 'G-Sense Errors', - '192': 'Power-Off Retract Cycles', - '193': 'Load/Unload Cycles', - '194': 'Temperature', - '195': 'Hardware ECC Recovered', - '196': 'Reallocation Events', - '197': 'Current Pending Sectors', - '198': 'Off-line Uncorrectable', - '199': 'UDMA CRC Error Rate', - '200': 'Write Error Rate', - '201': 'Soft Read Errors', - '202': 'Data Address Mark Errors', - '203': 'Run Out Cancel', - '204': 'Soft ECC Corrections', - '205': 'Thermal Asperity Rate', - '206': 'Flying Height', - '207': 'Spin High Current', - '209': 'Offline Seek Performance', - '220': 'Disk Shift', - '221': 'G-Sense Error Rate', - '222': 'Loaded Hours', - '223': 'Load/Unload Retries', - '224': 'Load Friction', - '225': 'Load/Unload Cycles', - '226': 'Load-in Time', - '227': 'Torque Amplification Count', - '228': 'Power-Off Retracts', - '230': 'GMR Head Amplitude', - '231': 'Temperature', - '232': 'Available Reserved Space', - '233': 'Media Wearout Indicator', - '240': 'Head Flying Hours', - '241': 'Total LBAs Written', - '242': 'Total LBAs Read', - '250': 'Read Error Retry Rate' -} - -LIMIT = namedtuple('LIMIT', ['min', 'max']) - -LIMITS = { - '194': LIMIT(0, 200) -} - -RESCAN_INTERVAL = 60 - -REGEX = re.compile( - '(\d+);' # attribute - '(\d+);' # normalized value - '(\d+)', # raw value - re.X -) - - -def chart_template(chart_name): - units, attr_id = chart_name.split('_')[-2:] - title = '{value_type} {description}'.format(value_type=units.capitalize(), - description=SMART_ATTR[attr_id]) - family = SMART_ATTR[attr_id].lower() - - return { - chart_name: { - 'options': [None, title, units, family, 'smartd_log.' + chart_name, 'line'], - 'lines': [] - } - } - - -def handle_os_error(method): - def on_call(*args): - try: - return method(*args) - except OSError: - return None - return on_call - - -class SmartAttribute(object): - def __init__(self, idx, normalized, raw): - self.id = idx - self.normalized = normalized - self._raw = raw - - @property - def raw(self): - if self.id in LIMITS: - limit = LIMITS[self.id] - if limit.min <= int(self._raw) <= limit.max: - return self._raw - return None - return self._raw - - @raw.setter - def raw(self, value): - self._raw = value - - -class DiskLogFile: - def __init__(self, path): - self.path = path - self.size = os.path.getsize(path) - - @handle_os_error - def is_changed(self): - new_size = os.path.getsize(self.path) - old_size, self.size = self.size, new_size - - return new_size != old_size and new_size - - @staticmethod - @handle_os_error - def is_valid(log_file, exclude): - return all([log_file.endswith('.csv'), - not [p for p in exclude if p in log_file], - os.access(log_file, os.R_OK), - os.path.getsize(log_file)]) - - -class Disk: - def __init__(self, full_path, age): - self.log_file = DiskLogFile(full_path) - self.name = os.path.basename(full_path).split('.')[-3] - self.age = int(age) - self.status = True - self.attributes = dict() - - self.get_attributes() - - def __eq__(self, other): - if isinstance(other, Disk): - return self.name == other.name - return self.name == other - - @handle_os_error - def is_active(self): - return (time() - os.path.getmtime(self.log_file.path)) / 60 < self.age - - @handle_os_error - def get_attributes(self): - last_line = read_last_line(self.log_file.path) - self.attributes = dict((attr, SmartAttribute(attr, normalized, raw)) for attr, normalized, raw - in REGEX.findall(last_line)) - return True - - def data(self): - data = dict() - for attr in self.attributes.values(): - data['_'.join([self.name, 'normalized', attr.id])] = attr.normalized - if attr.raw is not None: - data['_'.join([self.name, 'raw', attr.id])] = attr.raw - return data - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.log_path = self.configuration.get('log_path', '/var/log/smartd') - self.raw = self.configuration.get('raw_values', True) - self.exclude = self.configuration.get('exclude_disks', str()).split() - self.age = self.configuration.get('age', 30) - - self.runs = 0 - self.disks = list() - self.order = list() - self.definitions = dict() - - def check(self): - self.disks = self.scan() - - if not self.disks: - return None - - user_defined_sa = self.configuration.get('smart_attributes') - - if user_defined_sa: - order = user_defined_sa.split() or ORDER - else: - order = ORDER - - self.create_charts(order) - - return True - - def get_data(self): - self.runs += 1 - - if self.runs % RESCAN_INTERVAL == 0: - self.cleanup_and_rescan() - - data = dict() - - for disk in self.disks: - - if not disk.status: - continue - - changed = disk.log_file.is_changed() - - # True = changed, False = unchanged, None = Exception - if changed is None: - disk.status = False - continue - - if changed: - success = disk.get_attributes() - if not success: - disk.status = False - continue - - data.update(disk.data()) - - return data or None - - def create_charts(self, order): - for attr in order: - raw_name, normalized_name = 'attr_id_raw_' + attr, 'attr_id_normalized_' + attr - raw, normalized = chart_template(raw_name), chart_template(normalized_name) - self.order.extend([normalized_name, raw_name]) - self.definitions.update(raw) - self.definitions.update(normalized) - - for disk in self.disks: - if attr not in disk.attributes: - self.debug("'{disk}' has no attribute '{attr_id}'".format(disk=disk.name, - attr_id=attr)) - continue - normalized[normalized_name]['lines'].append(['_'.join([disk.name, 'normalized', attr]), disk.name]) - - if not self.raw: - continue - - if disk.attributes[attr].raw is not None: - raw[raw_name]['lines'].append(['_'.join([disk.name, 'raw', attr]), disk.name]) - continue - self.debug("'{disk}' attribute '{attr_id}' value not in {limits}".format(disk=disk.name, - attr_id=attr, - limits=LIMITS[attr])) - - def cleanup_and_rescan(self): - self.cleanup() - new_disks = self.scan(only_new=True) - - for disk in new_disks: - valid = False - - for chart in self.charts: - value_type, idx = chart.id.split('_')[2:] - - if idx in disk.attributes: - valid = True - dimension_id = '_'.join([disk.name, value_type, idx]) - - if dimension_id in chart: - chart.hide_dimension(dimension_id=dimension_id, reverse=True) - else: - chart.add_dimension([dimension_id, disk.name]) - if valid: - self.disks.append(disk) - - def cleanup(self): - for disk in self.disks: - - if not disk.is_active(): - disk.status = False - if not disk.status: - for chart in self.charts: - dimension_id = '_'.join([disk.name, chart.id[8:]]) - chart.hide_dimension(dimension_id=dimension_id) - - self.disks = [disk for disk in self.disks if disk.status] - - def scan(self, only_new=None): - new_disks = list() - for f in os.listdir(self.log_path): - full_path = os.path.join(self.log_path, f) - - if DiskLogFile.is_valid(full_path, self.exclude): - disk = Disk(full_path, self.age) - - active = disk.is_active() - if active is None: - continue - if active: - if not only_new: - new_disks.append(disk) - else: - if disk not in self.disks: - new_disks.append(disk) - else: - if not only_new: - self.debug("'{disk}' not updated in the last {age} minutes, " - "skipping it.".format(disk=disk.name, age=self.age)) - return new_disks diff --git a/python.d/springboot.chart.py b/python.d/springboot.chart.py deleted file mode 100644 index 60ad0cccb..000000000 --- a/python.d/springboot.chart.py +++ /dev/null @@ -1,151 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: tomcat netdata python.d module -# Author: Wing924 - -import json -from bases.FrameworkServices.UrlService import UrlService - -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 - - -DEFAULT_ORDER = ['response_code', 'threads', 'gc_time', 'gc_ope', 'heap'] - -DEFAULT_CHARTS = { - 'response_code': { - 'options': [None, "Response Codes", "requests/s", "response", "springboot.response_code", "stacked"], - 'lines': [ - ["resp_other", 'Other', 'incremental'], - ["resp_1xx", '1xx', 'incremental'], - ["resp_2xx", '2xx', 'incremental'], - ["resp_3xx", '3xx', 'incremental'], - ["resp_4xx", '4xx', 'incremental'], - ["resp_5xx", '5xx', 'incremental'], - ]}, - 'threads': { - 'options': [None, "Threads", "current threads", "threads", "springboot.threads", "area"], - 'lines': [ - ["threads_daemon", 'daemon', 'absolute'], - ["threads", 'total', 'absolute'], - ]}, - 'gc_time': { - 'options': [None, "GC Time", "milliseconds", "garbage collection", "springboot.gc_time", "stacked"], - 'lines': [ - ["gc_copy_time", 'Copy', 'incremental'], - ["gc_marksweepcompact_time", 'MarkSweepCompact', 'incremental'], - ["gc_parnew_time", 'ParNew', 'incremental'], - ["gc_concurrentmarksweep_time", 'ConcurrentMarkSweep', 'incremental'], - ["gc_ps_scavenge_time", 'PS Scavenge', 'incremental'], - ["gc_ps_marksweep_time", 'PS MarkSweep', 'incremental'], - ["gc_g1_young_generation_time", 'G1 Young Generation', 'incremental'], - ["gc_g1_old_generation_time", 'G1 Old Generation', 'incremental'], - ]}, - 'gc_ope': { - 'options': [None, "GC Operations", "operations/s", "garbage collection", "springboot.gc_ope", "stacked"], - 'lines': [ - ["gc_copy_count", 'Copy', 'incremental'], - ["gc_marksweepcompact_count", 'MarkSweepCompact', 'incremental'], - ["gc_parnew_count", 'ParNew', 'incremental'], - ["gc_concurrentmarksweep_count", 'ConcurrentMarkSweep', 'incremental'], - ["gc_ps_scavenge_count", 'PS Scavenge', 'incremental'], - ["gc_ps_marksweep_count", 'PS MarkSweep', 'incremental'], - ["gc_g1_young_generation_count", 'G1 Young Generation', 'incremental'], - ["gc_g1_old_generation_count", 'G1 Old Generation', 'incremental'], - ]}, - 'heap': { - 'options': [None, "Heap Memory Usage", "KB", "heap memory", "springboot.heap", "area"], - 'lines': [ - ["heap_committed", 'committed', "absolute"], - ["heap_used", 'used', "absolute"], - ]}, -} - -class ExtraChartError(ValueError): - pass - -class Service(UrlService): - def __init__(self, configuration=None, name=None): - UrlService.__init__(self, configuration=configuration, name=name) - self.url = self.configuration.get('url', "http://localhost:8080/metrics") - self._setup_charts() - - def _get_data(self): - """ - Format data received from http request - :return: dict - """ - raw_data = self._get_raw_data() - if not raw_data: - return None - - try: - data = json.loads(raw_data) - except ValueError: - self.debug('%s is not a vaild JSON page' % self.url) - return None - - result = { - 'resp_1xx': 0, - 'resp_2xx': 0, - 'resp_3xx': 0, - 'resp_4xx': 0, - 'resp_5xx': 0, - 'resp_other': 0, - } - - for key, value in data.iteritems(): - if 'counter.status.' in key: - status_type = key[15:16] + 'xx' - if status_type[0] not in '12345': - status_type = 'other' - result['resp_' + status_type] += value - else: - result[key.replace('.', '_')] = value - - return result or None - - def _setup_charts(self): - self.order = [] - self.definitions = {} - defaults = self.configuration.get('defaults', {}) - - for chart in DEFAULT_ORDER: - if defaults.get(chart, True): - self.order.append(chart) - self.definitions[chart] = DEFAULT_CHARTS[chart] - - for extra in self.configuration.get('extras', []): - self._add_extra_chart(extra) - self.order.append(extra['id']) - - def _add_extra_chart(self, chart): - chart_id = chart.get('id', None) or die('id is not defined in extra chart') - options = chart.get('options', None) or die('option is not defined in extra chart: %s' % chart_id) - lines = chart.get('lines', None) or die('lines is not defined in extra chart: %s' % chart_id) - - title = options.get('title', None) or die('title is missing: %s' % chart_id) - units = options.get('units', None) or die('units is missing: %s' % chart_id) - family = options.get('family', title) - context = options.get('context', 'springboot.' + title) - charttype = options.get('charttype', 'line') - - result = { - 'options': [None, title, units, family, context, charttype], - 'lines': [], - } - - for line in lines: - dimension = line.get('dimension', None) or die('dimension is missing: %s' % chart_id) - name = line.get('name', dimension) - algorithm = line.get('algorithm', 'absolute') - multiplier = line.get('multiplier', 1) - divisor = line.get('divisor', 1) - result['lines'].append([dimension, name, algorithm, multiplier, divisor]) - - self.definitions[chart_id] = result - - @staticmethod - def die(error_message): - raise ExtraChartError(error_message) diff --git a/python.d/squid.chart.py b/python.d/squid.chart.py deleted file mode 100644 index ba8f982ff..000000000 --- a/python.d/squid.chart.py +++ /dev/null @@ -1,120 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: squid netdata python.d module -# Author: Pawel Krupa (paulfantom) - -from bases.FrameworkServices.SocketService import SocketService - - -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['clients_net', 'clients_requests', 'servers_net', 'servers_requests'] - -CHARTS = { - 'clients_net': { - 'options': [None, "Squid Client Bandwidth", "kilobits/s", "clients", "squid.clients_net", "area"], - 'lines': [ - ["client_http_kbytes_in", "in", "incremental", 8, 1], - ["client_http_kbytes_out", "out", "incremental", -8, 1], - ["client_http_hit_kbytes_out", "hits", "incremental", -8, 1] - ]}, - 'clients_requests': { - 'options': [None, "Squid Client Requests", "requests/s", "clients", "squid.clients_requests", 'line'], - 'lines': [ - ["client_http_requests", "requests", "incremental"], - ["client_http_hits", "hits", "incremental"], - ["client_http_errors", "errors", "incremental", -1, 1] - ]}, - 'servers_net': { - 'options': [None, "Squid Server Bandwidth", "kilobits/s", "servers", "squid.servers_net", "area"], - 'lines': [ - ["server_all_kbytes_in", "in", "incremental", 8, 1], - ["server_all_kbytes_out", "out", "incremental", -8, 1] - ]}, - 'servers_requests': { - 'options': [None, "Squid Server Requests", "requests/s", "servers", "squid.servers_requests", 'line'], - 'lines': [ - ["server_all_requests", "requests", "incremental"], - ["server_all_errors", "errors", "incremental", -1, 1] - ]} -} - - -class Service(SocketService): - def __init__(self, configuration=None, name=None): - SocketService.__init__(self, configuration=configuration, name=name) - self._keep_alive = True - self.request = "" - self.host = "localhost" - self.port = 3128 - self.order = ORDER - self.definitions = CHARTS - - def _get_data(self): - """ - Get data via http request - :return: dict - """ - response = self._get_raw_data() - - data = dict() - try: - raw = "" - for tmp in response.split('\r\n'): - if tmp.startswith("sample_time"): - raw = tmp - break - - if raw.startswith('<'): - self.error("invalid data received") - return None - - for row in raw.split('\n'): - if row.startswith(("client", "server.all")): - tmp = row.split("=") - data[tmp[0].replace('.', '_').strip(' ')] = int(tmp[1]) - - except (ValueError, AttributeError, TypeError): - self.error("invalid data received") - return None - - if not data: - self.error("no data received") - return None - return data - - def _check_raw_data(self, data): - header = data[:1024].lower() - - if "connection: keep-alive" in header: - self._keep_alive = True - else: - self._keep_alive = False - - if data[-7:] == "\r\n0\r\n\r\n" and "transfer-encoding: chunked" in header: # HTTP/1.1 response - self.debug("received full response from squid") - return True - - self.debug("waiting more data from squid") - return False - - def check(self): - """ - Parse essential configuration, autodetect squid configuration (if needed), and check if data is available - :return: boolean - """ - self._parse_config() - # format request - req = self.request.decode() - if not req.startswith("GET"): - req = "GET " + req - if not req.endswith(" HTTP/1.1\r\n\r\n"): - req += " HTTP/1.1\r\n\r\n" - self.request = req.encode() - if self._get_data() is not None: - return True - else: - return False diff --git a/python.d/tomcat.chart.py b/python.d/tomcat.chart.py deleted file mode 100644 index a570d5643..000000000 --- a/python.d/tomcat.chart.py +++ /dev/null @@ -1,153 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: tomcat netdata python.d module -# Author: Pawel Krupa (paulfantom) - -import xml.etree.ElementTree as ET - -from bases.FrameworkServices.UrlService import UrlService - -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['accesses', 'bandwidth', 'processing_time', 'threads', 'jvm', 'jvm_eden', 'jvm_survivor', 'jvm_tenured'] - -CHARTS = { - 'accesses': { - 'options': [None, "Requests", "requests/s", "statistics", "tomcat.accesses", "area"], - 'lines': [ - ["requestCount", 'accesses', 'incremental'], - ["errorCount", 'errors', 'incremental'], - ]}, - 'bandwidth': { - 'options': [None, "Bandwidth", "KB/s", "statistics", "tomcat.bandwidth", "area"], - 'lines': [ - ["bytesSent", 'sent', 'incremental', 1, 1024], - ["bytesReceived", 'received', 'incremental', 1, 1024], - ]}, - 'processing_time': { - 'options': [None, "processing time", "seconds", "statistics", "tomcat.processing_time", "area"], - 'lines': [ - ["processingTime", 'processing time', 'incremental', 1, 1000] - ]}, - 'threads': { - 'options': [None, "Threads", "current threads", "statistics", "tomcat.threads", "area"], - 'lines': [ - ["currentThreadCount", 'current', "absolute"], - ["currentThreadsBusy", 'busy', "absolute"] - ]}, - 'jvm': { - 'options': [None, "JVM Memory Pool Usage", "MB", "memory", "tomcat.jvm", "stacked"], - 'lines': [ - ["free", 'free', "absolute", 1, 1048576], - ["eden_used", 'eden', "absolute", 1, 1048576], - ["survivor_used", 'survivor', "absolute", 1, 1048576], - ["tenured_used", 'tenured', "absolute", 1, 1048576], - ["code_cache_used", 'code cache', "absolute", 1, 1048576], - ["compressed_used", 'compressed', "absolute", 1, 1048576], - ["metaspace_used", 'metaspace', "absolute", 1, 1048576], - ]}, - 'jvm_eden': { - 'options': [None, "Eden Memory Usage", "MB", "memory", "tomcat.jvm_eden", "area"], - 'lines': [ - ["eden_used", 'used', "absolute", 1, 1048576], - ["eden_commited", 'commited', "absolute", 1, 1048576], - ["eden_max", 'max', "absolute", 1, 1048576] - ]}, - 'jvm_survivor': { - 'options': [None, "Survivor Memory Usage", "MB", "memory", "tomcat.jvm_survivor", "area"], - 'lines': [ - ["survivor_used", 'used', "absolute", 1, 1048576], - ["survivor_commited", 'commited', "absolute", 1, 1048576], - ["survivor_max", 'max', "absolute", 1, 1048576] - ]}, - 'jvm_tenured': { - 'options': [None, "Tenured Memory Usage", "MB", "memory", "tomcat.jvm_tenured", "area"], - 'lines': [ - ["tenured_used", 'used', "absolute", 1, 1048576], - ["tenured_commited", 'commited', "absolute", 1, 1048576], - ["tenured_max", 'max', "absolute", 1, 1048576] - ]}, -} - - -class Service(UrlService): - def __init__(self, configuration=None, name=None): - UrlService.__init__(self, configuration=configuration, name=name) - self.url = self.configuration.get('url', "http://127.0.0.1:8080/manager/status?XML=true") - self.connector_name = self.configuration.get('connector_name', None) - self.order = ORDER - self.definitions = CHARTS - - def _get_data(self): - """ - Format data received from http request - :return: dict - """ - data = None - raw_data = self._get_raw_data() - if raw_data: - try: - xml = ET.fromstring(raw_data) - except ET.ParseError: - self.debug('%s is not a vaild XML page. Please add "?XML=true" to tomcat status page.' % self.url) - return None - data = {} - - jvm = xml.find('jvm') - - connector = None - if self.connector_name: - for conn in xml.findall('connector'): - if self.connector_name in conn.get('name'): - connector = conn - break - else: - connector = xml.find('connector') - - memory = jvm.find('memory') - data['free'] = memory.get('free') - data['total'] = memory.get('total') - - for pool in jvm.findall('memorypool'): - name = pool.get('name') - if 'Eden Space' in name: - data['eden_used'] = pool.get('usageUsed') - data['eden_commited'] = pool.get('usageCommitted') - data['eden_max'] = pool.get('usageMax') - elif 'Survivor Space' in name: - data['survivor_used'] = pool.get('usageUsed') - data['survivor_commited'] = pool.get('usageCommitted') - data['survivor_max'] = pool.get('usageMax') - elif 'Tenured Gen' in name or 'Old Gen' in name: - data['tenured_used'] = pool.get('usageUsed') - data['tenured_commited'] = pool.get('usageCommitted') - data['tenured_max'] = pool.get('usageMax') - elif name == 'Code Cache': - data['code_cache_used'] = pool.get('usageUsed') - data['code_cache_commited'] = pool.get('usageCommitted') - data['code_cache_max'] = pool.get('usageMax') - elif name == 'Compressed': - data['compressed_used'] = pool.get('usageUsed') - data['compressed_commited'] = pool.get('usageCommitted') - data['compressed_max'] = pool.get('usageMax') - elif name == 'Metaspace': - data['metaspace_used'] = pool.get('usageUsed') - data['metaspace_commited'] = pool.get('usageCommitted') - data['metaspace_max'] = pool.get('usageMax') - - if connector: - thread_info = connector.find('threadInfo') - data['currentThreadsBusy'] = thread_info.get('currentThreadsBusy') - data['currentThreadCount'] = thread_info.get('currentThreadCount') - - request_info = connector.find('requestInfo') - data['processingTime'] = request_info.get('processingTime') - data['requestCount'] = request_info.get('requestCount') - data['errorCount'] = request_info.get('errorCount') - data['bytesReceived'] = request_info.get('bytesReceived') - data['bytesSent'] = request_info.get('bytesSent') - - return data or None diff --git a/python.d/traefik.chart.py b/python.d/traefik.chart.py deleted file mode 100644 index f7c3e223b..000000000 --- a/python.d/traefik.chart.py +++ /dev/null @@ -1,181 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: traefik netdata python.d module -# Author: Alexandre Menezes (@ale_menezes) - -from json import loads -from collections import defaultdict -from bases.FrameworkServices.UrlService import UrlService - -# default module values (can be overridden per job in `config`) -update_every = 1 -priority = 60000 -retries = 10 - -# charts order (can be overridden if you want less charts, or different order) -ORDER = [ - 'response_statuses', - 'response_codes', - 'detailed_response_codes', - 'requests', - 'total_response_time', - 'average_response_time', - 'average_response_time_per_iteration', - 'uptime' -] - -CHARTS = { - 'response_statuses': { - 'options': [None, 'Response statuses', 'requests/s', 'responses', 'traefik.response_statuses', 'stacked'], - 'lines': [ - ['successful_requests', 'success', 'incremental'], - ['server_errors', 'error', 'incremental'], - ['redirects', 'redirect', 'incremental'], - ['bad_requests', 'bad', 'incremental'], - ['other_requests', 'other', 'incremental'] - ]}, - 'response_codes': { - 'options': [None, 'Responses by codes', 'requests/s', 'responses', 'traefik.response_codes', 'stacked'], - 'lines': [ - ['2xx', None, 'incremental'], - ['5xx', None, 'incremental'], - ['3xx', None, 'incremental'], - ['4xx', None, 'incremental'], - ['1xx', None, 'incremental'], - ['other', None, 'incremental'] - ]}, - 'detailed_response_codes': { - 'options': [None, 'Detailed response codes', 'requests/s', 'responses', 'traefik.detailed_response_codes', 'stacked'], - 'lines': [ - ]}, - 'requests': { - 'options': [None, 'Requests', 'requests/s', 'requests', 'traefik.requests', 'line'], - 'lines': [ - ['total_count', 'requests', 'incremental'] - ]}, - 'total_response_time': { - 'options': [None, 'Total response time', 'seconds', 'timings', 'traefik.total_response_time', 'line'], - 'lines': [ - ['total_response_time_sec', 'response', 'absolute', 1, 10000] - ]}, - 'average_response_time': { - 'options': [None, 'Average response time', 'milliseconds', 'timings', 'traefik.average_response_time', 'line'], - 'lines': [ - ['average_response_time_sec', 'response', 'absolute', 1, 1000] - ]}, - 'average_response_time_per_iteration': { - 'options': [None, 'Average response time per iteration', 'milliseconds', 'timings', 'traefik.average_response_time_per_iteration', 'line'], - 'lines': [ - ['average_response_time_per_iteration_sec', 'response', 'incremental', 1, 10000] - ]}, - 'uptime': { - 'options': [None, 'Uptime', 'seconds', 'uptime', 'traefik.uptime', 'line'], - 'lines': [ - ['uptime_sec', 'uptime', 'absolute'] - ]} - } - -HEALTH_STATS = [ - 'uptime_sec', - 'average_response_time_sec', - 'total_response_time_sec', - 'total_count', - 'total_status_code_count' -] - -class Service(UrlService): - def __init__(self, configuration=None, name=None): - UrlService.__init__(self, configuration=configuration, name=name) - self.url = self.configuration.get('url', 'http://localhost:8080/health') - self.order = ORDER - self.definitions = CHARTS - self.data = { - 'successful_requests': 0, 'redirects': 0, 'bad_requests': 0, - 'server_errors': 0, 'other_requests': 0, '1xx': 0, '2xx': 0, - '3xx': 0, '4xx': 0, '5xx': 0, 'other': 0, - 'average_response_time_per_iteration_sec': 0 - } - self.last_total_response_time = 0 - self.last_total_count = 0 - - def _get_data(self): - data = self._get_raw_data() - - if not data: - return None - - data = loads(data) - - self.get_data_per_code_status(raw_data=data) - - self.get_data_per_code_family(raw_data=data) - - self.get_data_per_code(raw_data=data) - - self.data.update(fetch_data_(raw_data=data, metrics=HEALTH_STATS)) - - self.data['average_response_time_sec'] *= 1000000 - self.data['total_response_time_sec'] *= 10000 - if data['total_count'] != self.last_total_count: - self.data['average_response_time_per_iteration_sec'] = (data['total_response_time_sec'] - self.last_total_response_time) * 1000000 / (data['total_count'] - self.last_total_count) - else: - self.data['average_response_time_per_iteration_sec'] = 0 - self.last_total_response_time = data['total_response_time_sec'] - self.last_total_count = data['total_count'] - - return self.data or None - - def get_data_per_code_status(self, raw_data): - data = defaultdict(int) - for code, value in raw_data['total_status_code_count'].items(): - code_prefix = code[0] - if code_prefix == '1' or code_prefix == '2' or code == '304': - data['successful_requests'] += value - elif code_prefix == '3': - data['redirects'] += value - elif code_prefix == '4': - data['bad_requests'] += value - elif code_prefix == '5': - data['server_errors'] += value - else: - data['other_requests'] += value - self.data.update(data) - - def get_data_per_code_family(self, raw_data): - data = defaultdict(int) - for code, value in raw_data['total_status_code_count'].items(): - code_prefix = code[0] - if code_prefix == '1': - data['1xx'] += value - elif code_prefix == '2': - data['2xx'] += value - elif code_prefix == '3': - data['3xx'] += value - elif code_prefix == '4': - data['4xx'] += value - elif code_prefix == '5': - data['5xx'] += value - else: - data['other'] += value - self.data.update(data) - - def get_data_per_code(self, raw_data): - for code, value in raw_data['total_status_code_count'].items(): - if self.charts: - if code not in self.data: - self.charts['detailed_response_codes'].add_dimension([code, code, 'incremental']) - self.data[code] = value - -def fetch_data_(raw_data, metrics): - data = dict() - - for metric in metrics: - value = raw_data - metrics_list = metric.split('.') - try: - for m in metrics_list: - value = value[m] - except KeyError: - continue - data['_'.join(metrics_list)] = value - - return data diff --git a/python.d/varnish.chart.py b/python.d/varnish.chart.py deleted file mode 100644 index d8145c0b6..000000000 --- a/python.d/varnish.chart.py +++ /dev/null @@ -1,241 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: varnish netdata python.d module -# Author: l2isbad - -import re - -from bases.collection import find_binary -from bases.FrameworkServices.ExecutableService import ExecutableService - -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 - -ORDER = ['session_connections', 'client_requests', - 'all_time_hit_rate', 'current_poll_hit_rate', 'cached_objects_expired', 'cached_objects_nuked', - 'threads_total', 'threads_statistics', 'threads_queue_len', - 'backend_connections', 'backend_requests', - 'esi_statistics', - 'memory_usage', - 'uptime'] - -CHARTS = { - 'session_connections': { - 'options': [None, 'Connections Statistics', 'connections/s', - 'client metrics', 'varnish.session_connection', 'line'], - 'lines': [ - ['sess_conn', 'accepted', 'incremental'], - ['sess_dropped', 'dropped', 'incremental'] - ] - }, - 'client_requests': { - 'options': [None, 'Client Requests', 'requests/s', - 'client metrics', 'varnish.client_requests', 'line'], - 'lines': [ - ['client_req', 'received', 'incremental'] - ] - }, - 'all_time_hit_rate': { - 'options': [None, 'All History Hit Rate Ratio', 'percent', 'cache performance', - 'varnish.all_time_hit_rate', 'stacked'], - 'lines': [ - ['cache_hit', 'hit', 'percentage-of-absolute-row'], - ['cache_miss', 'miss', 'percentage-of-absolute-row'], - ['cache_hitpass', 'hitpass', 'percentage-of-absolute-row']] - }, - 'current_poll_hit_rate': { - 'options': [None, 'Current Poll Hit Rate Ratio', 'percent', 'cache performance', - 'varnish.current_poll_hit_rate', 'stacked'], - 'lines': [ - ['cache_hit', 'hit', 'percentage-of-incremental-row'], - ['cache_miss', 'miss', 'percentage-of-incremental-row'], - ['cache_hitpass', 'hitpass', 'percentage-of-incremental-row'] - ] - }, - 'cached_objects_expired': { - 'options': [None, 'Expired Objects', 'expired/s', 'cache performance', - 'varnish.cached_objects_expired', 'line'], - 'lines': [ - ['n_expired', 'objects', 'incremental'] - ] - }, - 'cached_objects_nuked': { - 'options': [None, 'Least Recently Used Nuked Objects', 'nuked/s', 'cache performance', - 'varnish.cached_objects_nuked', 'line'], - 'lines': [ - ['n_lru_nuked', 'objects', 'incremental'] - ] - }, - 'threads_total': { - 'options': [None, 'Number Of Threads In All Pools', 'number', 'thread related metrics', - 'varnish.threads_total', 'line'], - 'lines': [ - ['threads', None, 'absolute'] - ] - }, - 'threads_statistics': { - 'options': [None, 'Threads Statistics', 'threads/s', 'thread related metrics', - 'varnish.threads_statistics', 'line'], - 'lines': [ - ['threads_created', 'created', 'incremental'], - ['threads_failed', 'failed', 'incremental'], - ['threads_limited', 'limited', 'incremental'] - ] - }, - 'threads_queue_len': { - 'options': [None, 'Current Queue Length', 'requests', 'thread related metrics', - 'varnish.threads_queue_len', 'line'], - 'lines': [ - ['thread_queue_len', 'in queue'] - ] - }, - 'backend_connections': { - 'options': [None, 'Backend Connections Statistics', 'connections/s', 'backend metrics', - 'varnish.backend_connections', 'line'], - 'lines': [ - ['backend_conn', 'successful', 'incremental'], - ['backend_unhealthy', 'unhealthy', 'incremental'], - ['backend_reuse', 'reused', 'incremental'], - ['backend_toolate', 'closed', 'incremental'], - ['backend_recycle', 'resycled', 'incremental'], - ['backend_fail', 'failed', 'incremental'] - ] - }, - 'backend_requests': { - 'options': [None, 'Requests To The Backend', 'requests/s', 'backend metrics', - 'varnish.backend_requests', 'line'], - 'lines': [ - ['backend_req', 'sent', 'incremental'] - ] - }, - 'esi_statistics': { - 'options': [None, 'ESI Statistics', 'problems/s', 'esi related metrics', 'varnish.esi_statistics', 'line'], - 'lines': [ - ['esi_errors', 'errors', 'incremental'], - ['esi_warnings', 'warnings', 'incremental'] - ] - }, - 'memory_usage': { - 'options': [None, 'Memory Usage', 'MB', 'memory usage', 'varnish.memory_usage', 'stacked'], - 'lines': [ - ['memory_free', 'free', 'absolute', 1, 1 << 20], - ['memory_allocated', 'allocated', 'absolute', 1, 1 << 20]] - }, - 'uptime': { - 'lines': [ - ['uptime', None, 'absolute'] - ], - 'options': [None, 'Uptime', 'seconds', 'uptime', 'varnish.uptime', 'line'] - } -} - - -class Parser: - _backend_new = re.compile(r'VBE.([\d\w_.]+)\(.*?\).(beresp[\w_]+)\s+(\d+)') - _backend_old = re.compile(r'VBE\.[\d\w-]+\.([\w\d_]+).(beresp[\w_]+)\s+(\d+)') - _default = re.compile(r'([A-Z]+\.)?([\d\w_.]+)\s+(\d+)') - - def __init__(self): - self.re_default = None - self.re_backend = None - - def init(self, data): - data = ''.join(data) - parsed_main = Parser._default.findall(data) - if parsed_main: - self.re_default = Parser._default - - parsed_backend = Parser._backend_new.findall(data) - if parsed_backend: - self.re_backend = Parser._backend_new - else: - parsed_backend = Parser._backend_old.findall(data) - if parsed_backend: - self.re_backend = Parser._backend_old - - def server_stats(self, data): - return self.re_default.findall(''.join(data)) - - def backend_stats(self, data): - return self.re_backend.findall(''.join(data)) - - -class Service(ExecutableService): - def __init__(self, configuration=None, name=None): - ExecutableService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - varnishstat = find_binary('varnishstat') - self.command = [varnishstat, '-1'] if varnishstat else None - self.parser = Parser() - - def check(self): - if not self.command: - self.error("Can't locate 'varnishstat' binary or binary is not executable by user netdata") - return False - - # STDOUT is not empty - reply = self._get_raw_data() - if not reply: - self.error("No output from 'varnishstat'. Not enough privileges?") - return False - - self.parser.init(reply) - - # Output is parsable - if not self.parser.re_default: - self.error('Cant parse the output...') - return False - - if self.parser.re_backend: - backends = [b[0] for b in self.parser.backend_stats(reply)[::2]] - self.create_backends_charts(backends) - return True - - def get_data(self): - """ - Format data received from shell command - :return: dict - """ - raw = self._get_raw_data() - if not raw: - return None - - data = dict() - server_stats = self.parser.server_stats(raw) - if not server_stats: - return None - - if self.parser.re_backend: - backend_stats = self.parser.backend_stats(raw) - data.update(dict(('_'.join([name, param]), value) for name, param, value in backend_stats)) - - data.update(dict((param, value) for _, param, value in server_stats)) - - data['memory_allocated'] = data['s0.g_bytes'] - data['memory_free'] = data['s0.g_space'] - - return data - - def create_backends_charts(self, backends): - for backend in backends: - chart_name = ''.join([backend, '_response_statistics']) - title = 'Backend "{0}"'.format(backend.capitalize()) - hdr_bytes = ''.join([backend, '_beresp_hdrbytes']) - body_bytes = ''.join([backend, '_beresp_bodybytes']) - - chart = { - chart_name: - { - 'options': [None, title, 'kilobits/s', 'backend response statistics', - 'varnish.backend', 'area'], - 'lines': [ - [hdr_bytes, 'header', 'incremental', 8, 1000], - [body_bytes, 'body', 'incremental', -8, 1000] - ] - } - } - - self.order.insert(0, chart_name) - self.definitions.update(chart) diff --git a/python.d/web_log.chart.py b/python.d/web_log.chart.py deleted file mode 100644 index be9baba92..000000000 --- a/python.d/web_log.chart.py +++ /dev/null @@ -1,1047 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: web log netdata python.d module -# Author: l2isbad - -import bisect -import re -import os -import sys - -from collections import namedtuple, defaultdict -from copy import deepcopy - -try: - from itertools import filterfalse -except ImportError: - from itertools import ifilter as filter - from itertools import ifilterfalse as filterfalse - -from bases.collection import read_last_line -from bases.FrameworkServices.LogService import LogService - - -ORDER_APACHE_CACHE = ['apache_cache'] - -ORDER_WEB = ['response_statuses', 'response_codes', 'bandwidth', - 'response_time', 'response_time_hist', 'response_time_upstream', 'response_time_upstream_hist', - 'requests_per_url', 'requests_per_user_defined', 'http_method', 'http_version', - 'requests_per_ipproto', 'clients', 'clients_all'] - -ORDER_SQUID = ['squid_response_statuses', 'squid_response_codes', 'squid_detailed_response_codes', - 'squid_method', 'squid_mime_type', 'squid_hier_code', 'squid_transport_methods', - 'squid_transport_errors', 'squid_code', 'squid_handling_opts', 'squid_object_types', - 'squid_cache_events', 'squid_bytes', 'squid_duration', 'squid_clients', 'squid_clients_all'] - -CHARTS_WEB = { - 'response_codes': { - 'options': [None, 'Response Codes', 'requests/s', 'responses', 'web_log.response_codes', 'stacked'], - 'lines': [ - ['2xx', None, 'incremental'], - ['5xx', None, 'incremental'], - ['3xx', None, 'incremental'], - ['4xx', None, 'incremental'], - ['1xx', None, 'incremental'], - ['0xx', 'other', 'incremental'], - ['unmatched', None, 'incremental'] - ]}, - 'bandwidth': { - 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'web_log.bandwidth', 'area'], - 'lines': [ - ['resp_length', 'received', 'incremental', 8, 1000], - ['bytes_sent', 'sent', 'incremental', -8, 1000] - ]}, - 'response_time': { - 'options': [None, 'Processing Time', 'milliseconds', 'timings', 'web_log.response_time', 'area'], - 'lines': [ - ['resp_time_min', 'min', 'incremental', 1, 1000], - ['resp_time_max', 'max', 'incremental', 1, 1000], - ['resp_time_avg', 'avg', 'incremental', 1, 1000] - ]}, - 'response_time_hist': { - 'options': [None, 'Processing Time Histogram', 'requests/s', 'timings', 'web_log.response_time_hist', 'line'], - 'lines': [ - ]}, - 'response_time_upstream': { - 'options': [None, 'Processing Time Upstream', 'milliseconds', 'timings', - 'web_log.response_time_upstream', 'area'], - 'lines': [ - ['resp_time_upstream_min', 'min', 'incremental', 1, 1000], - ['resp_time_upstream_max', 'max', 'incremental', 1, 1000], - ['resp_time_upstream_avg', 'avg', 'incremental', 1, 1000] - ]}, - 'response_time_upstream_hist': { - 'options': [None, 'Processing Time Histogram', 'requests/s', 'timings', - 'web_log.response_time_upstream_hist', 'line'], - 'lines': [ - ]}, - 'clients': { - 'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'clients', 'web_log.clients', 'stacked'], - 'lines': [ - ['unique_cur_ipv4', 'ipv4', 'incremental', 1, 1], - ['unique_cur_ipv6', 'ipv6', 'incremental', 1, 1] - ]}, - 'clients_all': { - 'options': [None, 'All Time Unique Client IPs', 'unique ips', 'clients', 'web_log.clients_all', 'stacked'], - 'lines': [ - ['unique_tot_ipv4', 'ipv4', 'absolute', 1, 1], - ['unique_tot_ipv6', 'ipv6', 'absolute', 1, 1] - ]}, - 'http_method': { - 'options': [None, 'Requests Per HTTP Method', 'requests/s', 'http methods', 'web_log.http_method', 'stacked'], - 'lines': [ - ['GET', 'GET', 'incremental', 1, 1] - ]}, - 'http_version': { - 'options': [None, 'Requests Per HTTP Version', 'requests/s', 'http versions', - 'web_log.http_version', 'stacked'], - 'lines': []}, - 'requests_per_ipproto': { - 'options': [None, 'Requests Per IP Protocol', 'requests/s', 'ip protocols', 'web_log.requests_per_ipproto', - 'stacked'], - 'lines': [ - ['req_ipv4', 'ipv4', 'incremental', 1, 1], - ['req_ipv6', 'ipv6', 'incremental', 1, 1] - ]}, - 'response_statuses': { - 'options': [None, 'Response Statuses', 'requests/s', 'responses', 'web_log.response_statuses', - 'stacked'], - 'lines': [ - ['successful_requests', 'success', 'incremental', 1, 1], - ['server_errors', 'error', 'incremental', 1, 1], - ['redirects', 'redirect', 'incremental', 1, 1], - ['bad_requests', 'bad', 'incremental', 1, 1], - ['other_requests', 'other', 'incremental', 1, 1] - ]}, - 'requests_per_url': { - 'options': [None, 'Requests Per Url', 'requests/s', 'urls', 'web_log.requests_per_url', - 'stacked'], - 'lines': [ - ['url_pattern_other', 'other', 'incremental', 1, 1] - ]}, - 'requests_per_user_defined': { - 'options': [None, 'Requests Per User Defined Pattern', 'requests/s', 'user defined', - 'web_log.requests_per_user_defined', 'stacked'], - 'lines': [ - ['user_pattern_other', 'other', 'incremental', 1, 1] - ]} -} - -CHARTS_APACHE_CACHE = { - 'apache_cache': { - 'options': [None, 'Apache Cached Responses', 'percent cached', 'cached', 'web_log.apache_cache_cache', - 'stacked'], - 'lines': [ - ["hit", 'cache', "percentage-of-absolute-row"], - ["miss", None, "percentage-of-absolute-row"], - ["other", None, "percentage-of-absolute-row"] - ]} -} - -CHARTS_SQUID = { - 'squid_duration': { - 'options': [None, 'Elapsed Time The Transaction Busied The Cache', - 'milliseconds', 'squid_timings', 'web_log.squid_duration', 'area'], - 'lines': [ - ['duration_min', 'min', 'incremental', 1, 1000], - ['duration_max', 'max', 'incremental', 1, 1000], - ['duration_avg', 'avg', 'incremental', 1, 1000] - ]}, - 'squid_bytes': { - 'options': [None, 'Amount Of Data Delivered To The Clients', - 'kilobits/s', 'squid_bandwidth', 'web_log.squid_bytes', 'area'], - 'lines': [ - ['bytes', 'sent', 'incremental', 8, 1000] - ]}, - 'squid_response_statuses': { - 'options': [None, 'Response Statuses', 'responses/s', 'squid_responses', 'web_log.squid_response_statuses', - 'stacked'], - 'lines': [ - ['successful_requests', 'success', 'incremental', 1, 1], - ['server_errors', 'error', 'incremental', 1, 1], - ['redirects', 'redirect', 'incremental', 1, 1], - ['bad_requests', 'bad', 'incremental', 1, 1], - ['other_requests', 'other', 'incremental', 1, 1] - ]}, - 'squid_response_codes': { - 'options': [None, 'Response Codes', 'responses/s', 'squid_responses', - 'web_log.squid_response_codes', 'stacked'], - 'lines': [ - ['2xx', None, 'incremental'], - ['5xx', None, 'incremental'], - ['3xx', None, 'incremental'], - ['4xx', None, 'incremental'], - ['1xx', None, 'incremental'], - ['0xx', None, 'incremental'], - ['other', None, 'incremental'], - ['unmatched', None, 'incremental'] - ]}, - 'squid_code': { - 'options': [None, 'Responses Per Cache Result Of The Request', - 'requests/s', 'squid_squid_cache', 'web_log.squid_code', 'stacked'], - 'lines': [ - ]}, - 'squid_detailed_response_codes': { - 'options': [None, 'Detailed Response Codes', - 'responses/s', 'squid_responses', 'web_log.squid_detailed_response_codes', 'stacked'], - 'lines': [ - ]}, - 'squid_hier_code': { - 'options': [None, 'Responses Per Hierarchy Code', - 'requests/s', 'squid_hierarchy', 'web_log.squid_hier_code', 'stacked'], - 'lines': [ - ]}, - 'squid_method': { - 'options': [None, 'Requests Per Method', - 'requests/s', 'squid_requests', 'web_log.squid_method', 'stacked'], - 'lines': [ - ]}, - 'squid_mime_type': { - 'options': [None, 'Requests Per MIME Type', - 'requests/s', 'squid_requests', 'web_log.squid_mime_type', 'stacked'], - 'lines': [ - ]}, - 'squid_clients': { - 'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'squid_clients', - 'web_log.squid_clients', 'stacked'], - 'lines': [ - ['unique_ipv4', 'ipv4', 'incremental'], - ['unique_ipv6', 'ipv6', 'incremental'] - ]}, - 'squid_clients_all': { - 'options': [None, 'All Time Unique Client IPs', 'unique ips', 'squid_clients', - 'web_log.squid_clients_all', 'stacked'], - 'lines': [ - ['unique_tot_ipv4', 'ipv4', 'absolute'], - ['unique_tot_ipv6', 'ipv6', 'absolute'] - ]}, - 'squid_transport_methods': { - 'options': [None, 'Transport Methods', 'requests/s', 'squid_squid_transport', - 'web_log.squid_transport_methods', 'stacked'], - 'lines': [ - ]}, - 'squid_transport_errors': { - 'options': [None, 'Transport Errors', 'requests/s', 'squid_squid_transport', - 'web_log.squid_transport_errors', 'stacked'], - 'lines': [ - ]}, - 'squid_handling_opts': { - 'options': [None, 'Handling Opts', 'requests/s', 'squid_squid_cache', - 'web_log.squid_handling_opts', 'stacked'], - 'lines': [ - ]}, - 'squid_object_types': { - 'options': [None, 'Object Types', 'objects/s', 'squid_squid_cache', - 'web_log.squid_object_types', 'stacked'], - 'lines': [ - ]}, - 'squid_cache_events': { - 'options': [None, 'Cache Events', 'events/s', 'squid_squid_cache', - 'web_log.squid_cache_events', 'stacked'], - 'lines': [ - ]} -} - -NAMED_PATTERN = namedtuple('PATTERN', ['description', 'func']) - -DET_RESP_AGGR = ['', '_1xx', '_2xx', '_3xx', '_4xx', '_5xx', '_Other'] - -SQUID_CODES = dict(TCP='squid_transport_methods', UDP='squid_transport_methods', NONE='squid_transport_methods', - CLIENT='squid_handling_opts', IMS='squid_handling_opts', ASYNC='squid_handling_opts', - SWAPFAIL='squid_handling_opts', REFRESH='squid_handling_opts', SHARED='squid_handling_opts', - REPLY='squid_handling_opts', NEGATIVE='squid_object_types', STALE='squid_object_types', - OFFLINE='squid_object_types', INVALID='squid_object_types', FAIL='squid_object_types', - MODIFIED='squid_object_types', UNMODIFIED='squid_object_types', REDIRECT='squid_object_types', - HIT='squid_cache_events', MEM='squid_cache_events', MISS='squid_cache_events', - DENIED='squid_cache_events', NOFETCH='squid_cache_events', TUNNEL='squid_cache_events', - ABORTED='squid_transport_errors', TIMEOUT='squid_transport_errors') - -REQUEST_REGEX = re.compile(r'(?P<method>[A-Z]+) (?P<url>[^ ]+) [A-Z]+/(?P<http_version>\d(?:.\d)?)') - - -class Service(LogService): - def __init__(self, configuration=None, name=None): - """ - :param configuration: - :param name: - """ - LogService.__init__(self, configuration=configuration, name=name) - self.configuration = configuration - self.log_path = self.configuration.get('path') - self.job = None - - def check(self): - """ - :return: bool - - 1. "log_path" is specified in the module configuration file - 2. "log_path" must be readable by netdata user and must exist - 3. "log_path' must not be empty. We need at least 1 line to find appropriate pattern to parse - 4. other checks depends on log "type" - """ - - log_type = self.configuration.get('type', 'web') - log_types = dict(web=Web, apache_cache=ApacheCache, squid=Squid) - - if log_type not in log_types: - self.error("bad log type {log_type}. Supported types: {types}".format(log_type=log_type, - types=log_types.keys())) - return False - - if not self.log_path: - self.error('log path is not specified') - return False - - if not (self._find_recent_log_file() and os.access(self.log_path, os.R_OK)): - self.error('{log_file} not readable or not exist'.format(log_file=self.log_path)) - return False - - if not os.path.getsize(self.log_path): - self.error('{log_file} is empty'.format(log_file=self.log_path)) - return False - - self.job = log_types[log_type](self) - if self.job.check(): - self.order = self.job.order - self.definitions = self.job.definitions - return True - return False - - def _get_data(self): - return self.job.get_data(self._get_raw_data()) - - -class Web: - def __init__(self, service): - self.service = service - self.order = ORDER_WEB[:] - self.definitions = deepcopy(CHARTS_WEB) - self.pre_filter = check_patterns('filter', self.configuration.get('filter')) - self.storage = dict() - self.data = {'bytes_sent': 0, 'resp_length': 0, 'resp_time_min': 0, 'resp_time_max': 0, - 'resp_time_avg': 0, 'resp_time_upstream_min': 0, 'resp_time_upstream_max': 0, - 'resp_time_upstream_avg': 0, 'unique_cur_ipv4': 0, 'unique_cur_ipv6': 0, '2xx': 0, - '5xx': 0, '3xx': 0, '4xx': 0, '1xx': 0, '0xx': 0, 'unmatched': 0, 'req_ipv4': 0, - 'req_ipv6': 0, 'unique_tot_ipv4': 0, 'unique_tot_ipv6': 0, 'successful_requests': 0, - 'redirects': 0, 'bad_requests': 0, 'server_errors': 0, 'other_requests': 0, 'GET': 0} - - def __getattr__(self, item): - return getattr(self.service, item) - - def check(self): - last_line = read_last_line(self.log_path) - if not last_line: - return False - # Custom_log_format or predefined log format. - if self.configuration.get('custom_log_format'): - match_dict, error = self.find_regex_custom(last_line) - else: - match_dict, error = self.find_regex(last_line) - - # "match_dict" is None if there are any problems - if match_dict is None: - self.error(error) - return False - - self.storage['unique_all_time'] = list() - self.storage['url_pattern'] = check_patterns('url_pattern', self.configuration.get('categories')) - self.storage['user_pattern'] = check_patterns('user_pattern', self.configuration.get('user_defined')) - - self.create_web_charts(match_dict) # Create charts - self.info('Collected data: %s' % list(match_dict.keys())) - return True - - def create_web_charts(self, match_dict): - """ - :param match_dict: dict: regex.search.groupdict(). Ex. {'address': '127.0.0.1', 'code': '200', 'method': 'GET'} - :return: - Create/remove additional charts depending on the 'match_dict' keys and configuration file options - """ - if 'resp_time' not in match_dict: - self.order.remove('response_time') - self.order.remove('response_time_hist') - if 'resp_time_upstream' not in match_dict: - self.order.remove('response_time_upstream') - self.order.remove('response_time_upstream_hist') - - # Add 'response_time_hist' and 'response_time_upstream_hist' charts if is specified in the configuration - histogram = self.configuration.get('histogram', None) - if isinstance(histogram, list): - self.storage['bucket_index'] = histogram[:] - self.storage['bucket_index'].append(sys.maxint) - self.storage['buckets'] = [0] * (len(histogram) + 1) - self.storage['upstream_buckets'] = [0] * (len(histogram) + 1) - hist_lines = self.definitions['response_time_hist']['lines'] - upstream_hist_lines = self.definitions['response_time_upstream_hist']['lines'] - for i, le in enumerate(histogram): - hist_key = "response_time_hist_%d" % i - upstream_hist_key = "response_time_upstream_hist_%d" % i - hist_lines.append([hist_key, str(le), 'incremental', 1, 1]) - upstream_hist_lines.append([upstream_hist_key, str(le), 'incremental', 1, 1]) - - hist_lines.append(["response_time_hist_%d" % len(histogram), '+Inf', 'incremental', 1, 1]) - upstream_hist_lines.append(["response_time_upstream_hist_%d" % len(histogram), '+Inf', 'incremental', 1, 1]) - elif histogram is not None: - self.error("expect histogram list, but was {0}".format(type(histogram))) - - if not self.configuration.get('all_time', True): - self.order.remove('clients_all') - - # Add 'detailed_response_codes' chart if specified in the configuration - if self.configuration.get('detailed_response_codes', True): - if self.configuration.get('detailed_response_aggregate', True): - codes = DET_RESP_AGGR[:1] - else: - codes = DET_RESP_AGGR[1:] - - for code in codes: - self.order.append('detailed_response_codes%s' % code) - self.definitions['detailed_response_codes%s' % code] \ - = {'options': [None, 'Detailed Response Codes %s' % code[1:], 'requests/s', 'responses', - 'web_log.detailed_response_codes%s' % code, 'stacked'], - 'lines': []} - - # Add 'requests_per_url' chart if specified in the configuration - if self.storage['url_pattern']: - for elem in self.storage['url_pattern']: - dim = [elem.description, elem.description[12:], 'incremental'] - self.definitions['requests_per_url']['lines'].append(dim) - self.data[elem.description] = 0 - self.data['url_pattern_other'] = 0 - else: - self.order.remove('requests_per_url') - - # Add 'requests_per_user_defined' chart if specified in the configuration - if self.storage['user_pattern'] and 'user_defined' in match_dict: - for elem in self.storage['user_pattern']: - dim = [elem.description, elem.description[13:], 'incremental'] - self.definitions['requests_per_user_defined']['lines'].append(dim) - self.data[elem.description] = 0 - self.data['user_pattern_other'] = 0 - else: - self.order.remove('requests_per_user_defined') - - def get_data(self, raw_data=None): - """ - Parses new log lines - :return: dict OR None - None if _get_raw_data method fails. - In all other cases - dict. - """ - if not raw_data: - return None if raw_data is None else self.data - - filtered_data = filter_data(raw_data=raw_data, pre_filter=self.pre_filter) - - unique_current = set() - timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0)) - - for line in filtered_data: - match = self.storage['regex'].search(line) - if match: - match_dict = match.groupdict() - try: - code = match_dict['code'][0] + 'xx' - self.data[code] += 1 - except KeyError: - self.data['0xx'] += 1 - # detailed response code - if self.configuration.get('detailed_response_codes', True): - self.get_data_per_response_codes_detailed(code=match_dict['code']) - # response statuses - self.get_data_per_statuses(code=match_dict['code']) - # requests per user defined pattern - if self.storage['user_pattern'] and 'user_defined' in match_dict: - self.get_data_per_pattern(row=match_dict['user_defined'], - other='user_pattern_other', - pattern=self.storage['user_pattern']) - # method, url, http version - self.get_data_from_request_field(match_dict=match_dict) - # bandwidth sent - bytes_sent = match_dict['bytes_sent'] if '-' not in match_dict['bytes_sent'] else 0 - self.data['bytes_sent'] += int(bytes_sent) - # request processing time and bandwidth received - if 'resp_length' in match_dict: - resp_length = match_dict['resp_length'] if '-' not in match_dict['resp_length'] else 0 - self.data['resp_length'] += int(resp_length) - if 'resp_time' in match_dict: - resp_time = self.storage['func_resp_time'](float(match_dict['resp_time'])) - get_timings(timings=timings['resp_time'], time=resp_time) - if 'bucket_index' in self.storage: - get_hist(self.storage['bucket_index'], self.storage['buckets'], resp_time / 1000) - if 'resp_time_upstream' in match_dict and match_dict['resp_time_upstream'] != '-': - resp_time_upstream = self.storage['func_resp_time'](float(match_dict['resp_time_upstream'])) - get_timings(timings=timings['resp_time_upstream'], time=resp_time_upstream) - if 'bucket_index' in self.storage: - get_hist(self.storage['bucket_index'], self.storage['upstream_buckets'], resp_time / 1000) - # requests per ip proto - proto = 'ipv6' if ':' in match_dict['address'] else 'ipv4' - self.data['req_' + proto] += 1 - # unique clients ips - if self.configuration.get('all_time', True): - if address_not_in_pool(pool=self.storage['unique_all_time'], - address=match_dict['address'], - pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']): - self.data['unique_tot_' + proto] += 1 - if match_dict['address'] not in unique_current: - self.data['unique_cur_' + proto] += 1 - unique_current.add(match_dict['address']) - else: - self.data['unmatched'] += 1 - - # timings - for elem in timings: - self.data[elem + '_min'] += timings[elem]['minimum'] - self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count'] - self.data[elem + '_max'] += timings[elem]['maximum'] - - # histogram - if 'bucket_index' in self.storage: - buckets = self.storage['buckets'] - upstream_buckets = self.storage['upstream_buckets'] - for i in range(0, len(self.storage['bucket_index'])): - hist_key = "response_time_hist_%d" % i - upstream_hist_key = "response_time_upstream_hist_%d" % i - self.data[hist_key] = buckets[i] - self.data[upstream_hist_key] = upstream_buckets[i] - - return self.data - - def find_regex(self, last_line): - """ - :param last_line: str: literally last line from log file - :return: tuple where: - [0]: dict or None: match_dict or None - [1]: str: error description - We need to find appropriate pattern for current log file - All logic is do a regex search through the string for all predefined patterns - until we find something or fail. - """ - # REGEX: 1.IPv4 address 2.HTTP method 3. URL 4. Response code - # 5. Bytes sent 6. Response length 7. Response process time - default = re.compile(r'(?P<address>[\da-f.:]+|localhost)' - r' -.*?"(?P<request>[^"]*)"' - r' (?P<code>[1-9]\d{2})' - r' (?P<bytes_sent>\d+|-)') - - apache_ext_insert = re.compile(r'(?P<address>[\da-f.:]+|localhost)' - r' -.*?"(?P<request>[^"]*)"' - r' (?P<code>[1-9]\d{2})' - r' (?P<bytes_sent>\d+|-)' - r' (?P<resp_length>\d+|-)' - r' (?P<resp_time>\d+) ') - - apache_ext_append = re.compile(r'(?P<address>[\da-f.:]+|localhost)' - r' -.*?"(?P<request>[^"]*)"' - r' (?P<code>[1-9]\d{2})' - r' (?P<bytes_sent>\d+|-)' - r' .*?' - r' (?P<resp_length>\d+|-)' - r' (?P<resp_time>\d+)' - r'(?: |$)') - - nginx_ext_insert = re.compile(r'(?P<address>[\da-f.:]+)' - r' -.*?"(?P<request>[^"]*)"' - r' (?P<code>[1-9]\d{2})' - r' (?P<bytes_sent>\d+)' - r' (?P<resp_length>\d+)' - r' (?P<resp_time>\d+\.\d+) ') - - nginx_ext2_insert = re.compile(r'(?P<address>[\da-f.:]+)' - r' -.*?"(?P<request>[^"]*)"' - r' (?P<code>[1-9]\d{2})' - r' (?P<bytes_sent>\d+)' - r' (?P<resp_length>\d+)' - r' (?P<resp_time>\d+\.\d+)' - r' (?P<resp_time_upstream>[\d.-]+) ') - - nginx_ext_append = re.compile(r'(?P<address>[\da-f.:]+)' - r' -.*?"(?P<request>[^"]*)"' - r' (?P<code>[1-9]\d{2})' - r' (?P<bytes_sent>\d+)' - r' .*?' - r' (?P<resp_length>\d+)' - r' (?P<resp_time>\d+\.\d+)') - - def func_usec(time): - return time - - def func_sec(time): - return time * 1000000 - - r_regex = [apache_ext_insert, apache_ext_append, - nginx_ext2_insert, nginx_ext_insert, nginx_ext_append, - default] - r_function = [func_usec, func_usec, func_sec, func_sec, func_sec, func_usec] - regex_function = zip(r_regex, r_function) - - match_dict = dict() - for regex, func in regex_function: - match = regex.search(last_line) - if match: - self.storage['regex'] = regex - self.storage['func_resp_time'] = func - match_dict = match.groupdict() - break - - return find_regex_return(match_dict=match_dict or None, - msg='Unknown log format. You need to use "custom_log_format" feature.') - - def find_regex_custom(self, last_line): - """ - :param last_line: str: literally last line from log file - :return: tuple where: - [0]: dict or None: match_dict or None - [1]: str: error description - - We are here only if "custom_log_format" is in logs. We need to make sure: - 1. "custom_log_format" is a dict - 2. "pattern" in "custom_log_format" and pattern is <str> instance - 3. if "time_multiplier" is in "custom_log_format" it must be <int> instance - - If all parameters is ok we need to make sure: - 1. Pattern search is success - 2. Pattern search contains named subgroups (?P<subgroup_name>) (= "match_dict") - - If pattern search is success we need to make sure: - 1. All mandatory keys ['address', 'code', 'bytes_sent', 'method', 'url'] are in "match_dict" - - If this is True we need to make sure: - 1. All mandatory key values from "match_dict" have the correct format - ("code" is integer, "method" is uppercase word, etc) - - If non mandatory keys in "match_dict" we need to make sure: - 1. All non mandatory key values from match_dict ['resp_length', 'resp_time'] have the correct format - ("resp_length" is integer or "-", "resp_time" is integer or float) - - """ - if not hasattr(self.configuration.get('custom_log_format'), 'keys'): - return find_regex_return(msg='Custom log: "custom_log_format" is not a <dict>') - - pattern = self.configuration.get('custom_log_format', dict()).get('pattern') - if not (pattern and isinstance(pattern, str)): - return find_regex_return(msg='Custom log: "pattern" option is not specified or type is not <str>') - - resp_time_func = self.configuration.get('custom_log_format', dict()).get('time_multiplier') or 0 - - if not isinstance(resp_time_func, int): - return find_regex_return(msg='Custom log: "time_multiplier" is not an integer') - - try: - regex = re.compile(pattern) - except re.error as error: - return find_regex_return(msg='Pattern compile error: %s' % str(error)) - - match = regex.search(last_line) - if not match: - return find_regex_return(msg='Custom log: pattern search FAILED') - - match_dict = match.groupdict() or None - if match_dict is None: - return find_regex_return(msg='Custom log: search OK but contains no named subgroups' - ' (you need to use ?P<subgroup_name>)') - mandatory_dict = {'address': r'[\w.:-]+', - 'code': r'[1-9]\d{2}', - 'bytes_sent': r'\d+|-'} - optional_dict = {'resp_length': r'\d+|-', - 'resp_time': r'[\d.]+', - 'resp_time_upstream': r'[\d.-]+', - 'method': r'[A-Z]+', - 'http_version': r'\d(?:.\d)?'} - - mandatory_values = set(mandatory_dict) - set(match_dict) - if mandatory_values: - return find_regex_return(msg='Custom log: search OK but some mandatory keys (%s) are missing' - % list(mandatory_values)) - for key in mandatory_dict: - if not re.search(mandatory_dict[key], match_dict[key]): - return find_regex_return(msg='Custom log: can\'t parse "%s": %s' - % (key, match_dict[key])) - - optional_values = set(optional_dict) & set(match_dict) - for key in optional_values: - if not re.search(optional_dict[key], match_dict[key]): - return find_regex_return(msg='Custom log: can\'t parse "%s": %s' - % (key, match_dict[key])) - - dot_in_time = '.' in match_dict.get('resp_time', '') - if dot_in_time: - self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1000000) - else: - self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1) - - self.storage['regex'] = regex - return find_regex_return(match_dict=match_dict) - - def get_data_from_request_field(self, match_dict): - if match_dict.get('request'): - match_dict = REQUEST_REGEX.search(match_dict['request']) - if match_dict: - match_dict = match_dict.groupdict() - else: - return - # requests per url - if match_dict.get('url') and self.storage['url_pattern']: - self.get_data_per_pattern(row=match_dict['url'], - other='url_pattern_other', - pattern=self.storage['url_pattern']) - # requests per http method - if match_dict.get('method'): - if match_dict['method'] not in self.data: - self.charts['http_method'].add_dimension([match_dict['method'], - match_dict['method'], - 'incremental']) - self.data[match_dict['method']] = 0 - self.data[match_dict['method']] += 1 - # requests per http version - if match_dict.get('http_version'): - dim_id = match_dict['http_version'].replace('.', '_') - if dim_id not in self.data: - self.charts['http_version'].add_dimension([dim_id, - match_dict['http_version'], - 'incremental']) - self.data[dim_id] = 0 - self.data[dim_id] += 1 - - def get_data_per_response_codes_detailed(self, code): - """ - :param code: str: CODE from parsed line. Ex.: '202, '499' - :return: - Calls add_new_dimension method If the value is found for the first time - """ - if code not in self.data: - if self.configuration.get('detailed_response_aggregate', True): - self.charts['detailed_response_codes'].add_dimension([code, code, 'incremental']) - self.data[code] = 0 - else: - code_index = int(code[0]) if int(code[0]) < 6 else 6 - chart_key = 'detailed_response_codes' + DET_RESP_AGGR[code_index] - self.charts[chart_key].add_dimension([code, code, 'incremental']) - self.data[code] = 0 - self.data[code] += 1 - - def get_data_per_pattern(self, row, other, pattern): - """ - :param row: str: - :param other: str: - :param pattern: named tuple: (['pattern_description', 'regular expression']) - :return: - Scan through string looking for the first location where patterns produce a match for all user - defined patterns - """ - match = None - for elem in pattern: - if elem.func(row): - self.data[elem.description] += 1 - match = True - break - if not match: - self.data[other] += 1 - - def get_data_per_statuses(self, code): - """ - :param code: str: response status code. Ex.: '202', '499' - :return: - """ - code_class = code[0] - if code_class == '2' or code == '304' or code_class == '1': - self.data['successful_requests'] += 1 - elif code_class == '3': - self.data['redirects'] += 1 - elif code_class == '4': - self.data['bad_requests'] += 1 - elif code_class == '5': - self.data['server_errors'] += 1 - else: - self.data['other_requests'] += 1 - - -class ApacheCache: - def __init__(self, service): - self.service = service - self.order = ORDER_APACHE_CACHE - self.definitions = CHARTS_APACHE_CACHE - - @staticmethod - def check(): - return True - - @staticmethod - def get_data(raw_data=None): - data = dict(hit=0, miss=0, other=0) - if not raw_data: - return None if raw_data is None else data - - for line in raw_data: - if 'cache hit' in line: - data['hit'] += 1 - elif 'cache miss' in line: - data['miss'] += 1 - else: - data['other'] += 1 - return data - - -class Squid: - def __init__(self, service): - self.service = service - self.order = ORDER_SQUID - self.definitions = CHARTS_SQUID - self.pre_filter = check_patterns('filter', self.configuration.get('filter')) - self.storage = dict() - self.data = {'duration_max': 0, 'duration_avg': 0, 'duration_min': 0, 'bytes': 0, - '0xx': 0, '1xx': 0, '2xx': 0, '3xx': 0, '4xx': 0, '5xx': 0, - 'other': 0, 'unmatched': 0, 'unique_ipv4': 0, 'unique_ipv6': 0, - 'unique_tot_ipv4': 0, 'unique_tot_ipv6': 0, 'successful_requests': 0, - 'redirects': 0, 'bad_requests': 0, 'server_errors': 0, 'other_requests': 0 - } - - def __getattr__(self, item): - return getattr(self.service, item) - - def check(self): - last_line = read_last_line(self.log_path) - if not last_line: - return False - self.storage['unique_all_time'] = list() - self.storage['regex'] = re.compile(r'[0-9.]+\s+(?P<duration>[0-9]+)' - r' (?P<client_address>[\da-f.:]+)' - r' (?P<squid_code>[A-Z_]+)/' - r'(?P<http_code>[0-9]+)' - r' (?P<bytes>[0-9]+)' - r' (?P<method>[A-Z_]+)' - r' (?P<url>[^ ]+)' - r' (?P<user>[^ ]+)' - r' (?P<hier_code>[A-Z_]+)/[\da-f.:-]+' - r' (?P<mime_type>[^\n]+)') - - match = self.storage['regex'].search(last_line) - if not match: - self.error('Regex not matches (%s)' % self.storage['regex'].pattern) - return False - self.storage['dynamic'] = { - 'http_code': - {'chart': 'squid_detailed_response_codes', - 'func_dim_id': None, - 'func_dim': None}, - 'hier_code': { - 'chart': 'squid_hier_code', - 'func_dim_id': None, - 'func_dim': lambda v: v.replace('HIER_', '')}, - 'method': { - 'chart': 'squid_method', - 'func_dim_id': None, - 'func_dim': None}, - 'mime_type': { - 'chart': 'squid_mime_type', - 'func_dim_id': lambda v: v.split('/')[0], - 'func_dim': None}} - if not self.configuration.get('all_time', True): - self.order.remove('squid_clients_all') - return True - - def get_data(self, raw_data=None): - if not raw_data: - return None if raw_data is None else self.data - - filtered_data = filter_data(raw_data=raw_data, pre_filter=self.pre_filter) - - unique_ip = set() - timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0)) - - for row in filtered_data: - match = self.storage['regex'].search(row) - if match: - match = match.groupdict() - if match['duration'] != '0': - get_timings(timings=timings['duration'], time=float(match['duration']) * 1000) - try: - self.data[match['http_code'][0] + 'xx'] += 1 - except KeyError: - self.data['other'] += 1 - - self.get_data_per_statuses(match['http_code']) - - self.get_data_per_squid_code(match['squid_code']) - - self.data['bytes'] += int(match['bytes']) - - proto = 'ipv4' if '.' in match['client_address'] else 'ipv6' - # unique clients ips - if self.configuration.get('all_time', True): - if address_not_in_pool(pool=self.storage['unique_all_time'], - address=match['client_address'], - pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']): - self.data['unique_tot_' + proto] += 1 - - if match['client_address'] not in unique_ip: - self.data['unique_' + proto] += 1 - unique_ip.add(match['client_address']) - - for key, values in self.storage['dynamic'].items(): - if match[key] == '-': - continue - dimension_id = values['func_dim_id'](match[key]) if values['func_dim_id'] else match[key] - if dimension_id not in self.data: - dimension = values['func_dim'](match[key]) if values['func_dim'] else dimension_id - self.charts[values['chart']].add_dimension([dimension_id, - dimension, - 'incremental']) - self.data[dimension_id] = 0 - self.data[dimension_id] += 1 - else: - self.data['unmatched'] += 1 - - for elem in timings: - self.data[elem + '_min'] += timings[elem]['minimum'] - self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count'] - self.data[elem + '_max'] += timings[elem]['maximum'] - return self.data - - def get_data_per_statuses(self, code): - """ - :param code: str: response status code. Ex.: '202', '499' - :return: - """ - code_class = code[0] - if code_class == '2' or code == '304' or code_class == '1' or code == '000': - self.data['successful_requests'] += 1 - elif code_class == '3': - self.data['redirects'] += 1 - elif code_class == '4': - self.data['bad_requests'] += 1 - elif code_class == '5' or code_class == '6': - self.data['server_errors'] += 1 - else: - self.data['other_requests'] += 1 - - def get_data_per_squid_code(self, code): - """ - :param code: str: squid response code. Ex.: 'TCP_MISS', 'TCP_MISS_ABORTED' - :return: - """ - if code not in self.data: - self.charts['squid_code'].add_dimension([code, code, 'incremental']) - self.data[code] = 0 - self.data[code] += 1 - - for tag in code.split('_'): - try: - chart_key = SQUID_CODES[tag] - except KeyError: - continue - dimension_id = '_'.join(['code_detailed', tag]) - if dimension_id not in self.data: - self.charts[chart_key].add_dimension([dimension_id, tag, 'incremental']) - self.data[dimension_id] = 0 - self.data[dimension_id] += 1 - - -def get_timings(timings, time): - """ - :param timings: - :param time: - :return: - """ - if timings['minimum'] is None: - timings['minimum'] = time - if time > timings['maximum']: - timings['maximum'] = time - elif time < timings['minimum']: - timings['minimum'] = time - timings['summary'] += time - timings['count'] += 1 - -def get_hist(index, buckets, time): - """ - :param index: histogram index (Ex. [10, 50, 100, 150, ...]) - :param buckets: histogram buckets - :param time: time - :return: None - """ - for i in range(len(index)-1, -1, -1): - if time <= index[i]: - buckets[i] += 1 - else: - break - -def address_not_in_pool(pool, address, pool_size): - """ - :param pool: list of ip addresses - :param address: ip address - :param pool_size: current pool size - :return: True if address not in pool. False otherwise. - """ - index = bisect.bisect_left(pool, address) - if index < pool_size: - if pool[index] == address: - return False - bisect.insort_left(pool, address) - return True - bisect.insort_left(pool, address) - return True - - -def find_regex_return(match_dict=None, msg='Generic error message'): - """ - :param match_dict: dict: re.search.groupdict() or None - :param msg: str: error description - :return: tuple: - """ - return match_dict, msg - - -def check_patterns(string, dimension_regex_dict): - """ - :param string: str: - :param dimension_regex_dict: dict: ex. {'dim1': '<pattern1>', 'dim2': '<pattern2>'} - :return: list of named tuples or None: - We need to make sure all patterns are valid regular expressions - """ - if not hasattr(dimension_regex_dict, 'keys'): - return None - - result = list() - - def valid_pattern(pattern): - """ - :param pattern: str - :return: re.compile(pattern) or None - """ - if not isinstance(pattern, str): - return False - try: - return re.compile(pattern) - except re.error: - return False - - def func_search(pattern): - def closure(v): - return pattern.search(v) - - return closure - - for dimension, regex in dimension_regex_dict.items(): - valid = valid_pattern(regex) - if isinstance(dimension, str) and valid_pattern: - func = func_search(valid) - result.append(NAMED_PATTERN(description='_'.join([string, dimension]), - func=func)) - return result or None - - -def filter_data(raw_data, pre_filter): - """ - :param raw_data: - :param pre_filter: - :return: - """ - - if not pre_filter: - return raw_data - filtered = raw_data - for elem in pre_filter: - if elem.description == 'filter_include': - filtered = filter(elem.func, filtered) - elif elem.description == 'filter_exclude': - filtered = filterfalse(elem.func, filtered) - return filtered diff --git a/registry/Makefile.am b/registry/Makefile.am new file mode 100644 index 000000000..1cb69ed99 --- /dev/null +++ b/registry/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/registry/Makefile.in b/registry/Makefile.in new file mode 100644 index 000000000..fded2d81f --- /dev/null +++ b/registry/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = registry +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu registry/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu registry/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/registry/README.md b/registry/README.md new file mode 100644 index 000000000..f1d125b77 --- /dev/null +++ b/registry/README.md @@ -0,0 +1,152 @@ +# netdata registry + +Netdata registry implements the `my-netdata` menu on netdata dashboards. +The `my-netdata` menu lists the netdata servers you have visited. + +## Why? + +netdata provides distributed monitoring. + +Traditional monitoring solutions centralize all the data to provide unified dashboards across all servers. Before netdata, this was the standard practice. However it has a few issues: + +1. due to the resources required, the number of metrics collected is limited. +2. for the same reason, the data collection frequency is not that high, at best it will be once every 10 or 15 seconds, at worst every 5 or 10 mins. +2. the central monitoring solution needs dedicated resources, thus becoming "another bottleneck" in the whole ecosystem. It also requires maintenance, administration, etc. +3. most centralized monitoring solutions are usually only good for presenting *statistics of past performance* (i.e. cannot be used for real-time performance troubleshooting). + +Netdata has a different approach: + +1. data collection happens per second +2. thousands of metrics per server are collected +3. data do not leave the server where they are collected +4. netdata servers do not talk to each other +5. your browser connects all the netdata servers + +Using netdata, your monitoring infrastructure is embedded on each server, limiting significantly the need of additional resources. netdata is blazingly fast, very resource efficient and utilizes server resources that already exist and are spare (on each server). This allows **scaling out** the monitoring infrastructure. + +However, the netdata approach introduces a few new issues that need to be addressed, one being **the list of netdata we have installed**, i.e. the URLs our netdata servers are listening. + +To solve this, netdata utilizes a **central registry**. This registry, together with certain browser features, allow netdata to provide unified cross server dashboards. For example, when you jump from server to server using the `my-netdata` menu, several session settings (like the currently viewed charts, the current zoom and pan operations on the charts, etc) are propagated to the new server, so that the new dashboard will come with exactly the same view. + +## What is the registry? + +The registry keeps track of 3 entities: + +1. **machines**: i.e. the netdata installations (a random GUID generated by each netdata the first time it starts; we call this **machine_guid**) + + For each netdata installation (each `machine_guid`) the registry keeps track of the different URLs it is accessed. + +2. **persons**: i.e. the web browsers accessing the netdata installations (a random GUID generated by the registry the first time it sees a new web browser; we call this **person_guid**) + + For each person, the registry keeps track of the netdata installations it has accessed and their URLs. + +3. **URLs** of netdata installations (as seen by the web browsers) + + For each URL, the registry keeps the URL and nothing more. Each URL is linked to *persons* and *machines*. The only way to find a URL is to know its **machine_guid** or have a **person_guid** it is linked to it. + +## Who talks to the registry? + +Your web browser **only**! Check here if this is against your policies: [how to not send any information to a thirdparty server](https://github.com/netdata/netdata/wiki/netdata-security#registry-or-how-to-not-send-any-information-to-a-thirdparty-server) + +Your netdata servers do not talk to the registry. This is a UML diagram of its operation: + +![registry](https://cloud.githubusercontent.com/assets/2662304/19448565/11a70632-94ab-11e6-9d80-f410b4acb797.png) + +## What data the registry maintains? + +Its database contains: + +- **random person GUIDs** (generated by the registry as a browser cookie) +- **random machine GUIDs** (generated by each netdata server on its first run), including the hostname of the server netdata is running (without the domain) +- **URLs** (the base URL for accessing a netdata server, as seen by the web browser) + +For *persons* and *machines*, the registry keeps links to *URLs*, each link with 2 timestamps (first time seen, last time seen) and a counter (number of times it has been seen). + +## Which is the default registry? + +`https://registry.my-netdata.io`, which is currently served by `https://london.my-netdata.io`. This registry listens to both HTTP and HTTPS requests but the default is HTTPS. + +#### Can this registry handle the global load of netdata installations? + +Yeap! The registry can handle 50.000 - 100.000 requests **per second per core** (depending on the type of CPU, the computer's memory bandwidth, etc). 50.000 is on J1900 (celeron 2Ghz). + +We believe, it can do it... + +## Every netdata can be a registry + +Yes, you read correct, **every netdata can be a registry**. Just pick one and configure it. + +**To turn any netdata into a registry**, edit `/etc/netdata/netdata.conf` and set: + +``` +[registry] + enabled = yes + registry to announce = http://your.registry:19999 +``` + +Restart your netdata to activate it. + +Then, you need to tell **all your other netdata servers to advertise your registry**, instead of the default. To do this, on each of your netdata servers, edit `/etc/netdata/netdata.conf` and set: + +``` +[registry] + enabled = no + registry to announce = http://your.registry:19999 +``` + +Note that we have not enabled the registry on the other servers. Only one netdata (the registry) needs `[registry].enabled = yes`. + +This is it. You have your registry now. + +You may also want to give your server different names under the **my-netdata** menu (i.e. to have them sorted / grouped). You can change its registry name, by setting on each netdata server: + +``` +[registry] + registry hostname = Group1 - Master DB +``` + +So this server will appear in **my-netdata** as `Group1 - Master DB`. The max name length is 50 characters. + +#### limiting access to the registry + +netdata v1.9+ support limiting access to the registry from given IPs, like this: +``` +[registry] + allow from = * +``` + +`allow from` settings are [netdata simple patterns](https://github.com/netdata/netdata/wiki/Configuration#netdata-simple-patterns): string matches that use `*` as wildcard (any number of times) and a `!` prefix for a negative match. So: `allow from = !10.1.2.3 10.*` will allow all IPs in `10.*` except `10.1.2.3`. The order is important: left to right, the first positive or negative match is used. + +Keep in mind that connections to netdata API ports are filtered by `[web].allow connections from`. So, IPs allowed by `[registry].allow from` should also be allowed by `[web].allow connection from`. + +#### Where is the registry database stored? + +`/var/lib/netdata/registry/*.db` + +There can be up to 2 files: + +- `registry-log.db`, the transaction log + + all incoming requests that affect the registry are saved in this file in real-time. + +- `registry.db`, the database + + every `[registry].registry save db every new entries` entries in `registry-log.db`, netdata will save its database to `registry.db` and empty `registry-log.db`. + +Both files are machine readable text files. + +## The future + +The registry opens a whole world of new possibilities for netdata. Check here what we think: https://github.com/netdata/netdata/issues/416 + +## Troubleshooting the registry + +The registry URL should be set to the URL of a netdata dashboard. This server has to have `[registry].enabled = yes`. So, accessing the registry URL directly with your web browser, should present the dashboard of the netdata operating the registry. + +To use the registry, your web browser needs to support **third party cookies**, since the cookies are set by the registry while you are browsing the dashboard of another netdata server. The registry, the first time it sees a new web browser it tries to figure if the web browser has cookies enabled or not. It does this by setting a cookie and redirecting the browser back to itself hoping that it will receive the cookie. If it does not receive the cookie, the registry will keep redirecting your web browser back to itself, which after a few redirects will fail with an error like this: + +``` +ERROR 409: Cannot ACCESS netdata registry: https://registry.my-netdata.io responded with: {"status":"redirect","registry":"https://registry.my-netdata.io"} +``` + +This error is printed on your web browser console (press F12 on your browser to show it). diff --git a/registry/registry.c b/registry/registry.c new file mode 100644 index 000000000..4f97eb58f --- /dev/null +++ b/registry/registry.c @@ -0,0 +1,415 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../daemon/common.h" +#include "registry_internals.h" + +#define REGISTRY_STATUS_OK "ok" +#define REGISTRY_STATUS_FAILED "failed" +#define REGISTRY_STATUS_DISABLED "disabled" + +// ---------------------------------------------------------------------------- +// REGISTRY concurrency locking + +static inline void registry_lock(void) { + netdata_mutex_lock(®istry.lock); +} + +static inline void registry_unlock(void) { + netdata_mutex_unlock(®istry.lock); +} + + +// ---------------------------------------------------------------------------- +// COOKIES + +static void registry_set_cookie(struct web_client *w, const char *guid) { + char edate[100]; + time_t et = now_realtime_sec() + registry.persons_expiration; + struct tm etmbuf, *etm = gmtime_r(&et, &etmbuf); + strftime(edate, sizeof(edate), "%a, %d %b %Y %H:%M:%S %Z", etm); + + snprintfz(w->cookie1, NETDATA_WEB_REQUEST_COOKIE_SIZE, NETDATA_REGISTRY_COOKIE_NAME "=%s; Expires=%s", guid, edate); + + if(registry.registry_domain && registry.registry_domain[0]) + snprintfz(w->cookie2, NETDATA_WEB_REQUEST_COOKIE_SIZE, NETDATA_REGISTRY_COOKIE_NAME "=%s; Domain=%s; Expires=%s", guid, registry.registry_domain, edate); +} + +static inline void registry_set_person_cookie(struct web_client *w, REGISTRY_PERSON *p) { + registry_set_cookie(w, p->guid); +} + + +// ---------------------------------------------------------------------------- +// JSON GENERATION + +static inline void registry_json_header(RRDHOST *host, struct web_client *w, const char *action, const char *status) { + buffer_flush(w->response.data); + w->response.data->contenttype = CT_APPLICATION_JSON; + buffer_sprintf(w->response.data, "{\n\t\"action\": \"%s\",\n\t\"status\": \"%s\",\n\t\"hostname\": \"%s\",\n\t\"machine_guid\": \"%s\"", + action, status, host->registry_hostname, host->machine_guid); +} + +static inline void registry_json_footer(struct web_client *w) { + buffer_strcat(w->response.data, "\n}\n"); +} + +static inline int registry_json_disabled(RRDHOST *host, struct web_client *w, const char *action) { + registry_json_header(host, w, action, REGISTRY_STATUS_DISABLED); + + buffer_sprintf(w->response.data, ",\n\t\"registry\": \"%s\"", + registry.registry_to_announce); + + registry_json_footer(w); + return 200; +} + + +// ---------------------------------------------------------------------------- +// CALLBACKS FOR WALKING THROUGH REGISTRY OBJECTS + +// structure used be the callbacks below +struct registry_json_walk_person_urls_callback { + REGISTRY_PERSON *p; + REGISTRY_MACHINE *m; + struct web_client *w; + int count; +}; + +// callback for rendering PERSON_URLs +static int registry_json_person_url_callback(void *entry, void *data) { + REGISTRY_PERSON_URL *pu = (REGISTRY_PERSON_URL *)entry; + struct registry_json_walk_person_urls_callback *c = (struct registry_json_walk_person_urls_callback *)data; + struct web_client *w = c->w; + + if(unlikely(c->count++)) + buffer_strcat(w->response.data, ","); + + buffer_sprintf(w->response.data, "\n\t\t[ \"%s\", \"%s\", %u000, %u, \"%s\" ]", + pu->machine->guid, pu->url->url, pu->last_t, pu->usages, pu->machine_name); + + return 0; +} + +// callback for rendering MACHINE_URLs +static int registry_json_machine_url_callback(void *entry, void *data) { + REGISTRY_MACHINE_URL *mu = (REGISTRY_MACHINE_URL *)entry; + struct registry_json_walk_person_urls_callback *c = (struct registry_json_walk_person_urls_callback *)data; + struct web_client *w = c->w; + REGISTRY_MACHINE *m = c->m; + + if(unlikely(c->count++)) + buffer_strcat(w->response.data, ","); + + buffer_sprintf(w->response.data, "\n\t\t[ \"%s\", \"%s\", %u000, %u ]", + m->guid, mu->url->url, mu->last_t, mu->usages); + + return 1; +} + +// ---------------------------------------------------------------------------- + +// structure used be the callbacks below +struct registry_person_url_callback_verify_machine_exists_data { + REGISTRY_MACHINE *m; + int count; +}; + +static inline int registry_person_url_callback_verify_machine_exists(void *entry, void *data) { + struct registry_person_url_callback_verify_machine_exists_data *d = (struct registry_person_url_callback_verify_machine_exists_data *)data; + REGISTRY_PERSON_URL *pu = (REGISTRY_PERSON_URL *)entry; + REGISTRY_MACHINE *m = d->m; + + if(pu->machine == m) + d->count++; + + return 0; +} + +// ---------------------------------------------------------------------------- +// public HELLO request + +int registry_request_hello_json(RRDHOST *host, struct web_client *w) { + registry_json_header(host, w, "hello", REGISTRY_STATUS_OK); + + buffer_sprintf(w->response.data, ",\n\t\"registry\": \"%s\"", + registry.registry_to_announce); + + registry_json_footer(w); + return 200; +} + +// ---------------------------------------------------------------------------- +//public ACCESS request + +#define REGISTRY_VERIFY_COOKIES_GUID "give-me-back-this-cookie-now--please" + +// the main method for registering an access +int registry_request_access_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *name, time_t when) { + if(unlikely(!registry.enabled)) + return registry_json_disabled(host, w, "access"); + + // ------------------------------------------------------------------------ + // verify the browser supports cookies + + if(registry.verify_cookies_redirects > 0 && !person_guid[0]) { + buffer_flush(w->response.data); + registry_set_cookie(w, REGISTRY_VERIFY_COOKIES_GUID); + w->response.data->contenttype = CT_APPLICATION_JSON; + buffer_sprintf(w->response.data, "{ \"status\": \"redirect\", \"registry\": \"%s\" }", registry.registry_to_announce); + return 200; + } + + if(unlikely(person_guid[0] && !strcmp(person_guid, REGISTRY_VERIFY_COOKIES_GUID))) + person_guid[0] = '\0'; + + // ------------------------------------------------------------------------ + + registry_lock(); + + REGISTRY_PERSON *p = registry_request_access(person_guid, machine_guid, url, name, when); + if(!p) { + registry_json_header(host, w, "access", REGISTRY_STATUS_FAILED); + registry_json_footer(w); + registry_unlock(); + return 412; + } + + // set the cookie + registry_set_person_cookie(w, p); + + // generate the response + registry_json_header(host, w, "access", REGISTRY_STATUS_OK); + + buffer_sprintf(w->response.data, ",\n\t\"person_guid\": \"%s\",\n\t\"urls\": [", p->guid); + struct registry_json_walk_person_urls_callback c = { p, NULL, w, 0 }; + avl_traverse(&p->person_urls, registry_json_person_url_callback, &c); + buffer_strcat(w->response.data, "\n\t]\n"); + + registry_json_footer(w); + registry_unlock(); + return 200; +} + +// ---------------------------------------------------------------------------- +// public DELETE request + +// the main method for deleting a URL from a person +int registry_request_delete_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *delete_url, time_t when) { + if(!registry.enabled) + return registry_json_disabled(host, w, "delete"); + + registry_lock(); + + REGISTRY_PERSON *p = registry_request_delete(person_guid, machine_guid, url, delete_url, when); + if(!p) { + registry_json_header(host, w, "delete", REGISTRY_STATUS_FAILED); + registry_json_footer(w); + registry_unlock(); + return 412; + } + + // generate the response + registry_json_header(host, w, "delete", REGISTRY_STATUS_OK); + registry_json_footer(w); + registry_unlock(); + return 200; +} + +// ---------------------------------------------------------------------------- +// public SEARCH request + +// the main method for searching the URLs of a netdata +int registry_request_search_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *request_machine, time_t when) { + if(!registry.enabled) + return registry_json_disabled(host, w, "search"); + + registry_lock(); + + REGISTRY_MACHINE *m = registry_request_machine(person_guid, machine_guid, url, request_machine, when); + if(!m) { + registry_json_header(host, w, "search", REGISTRY_STATUS_FAILED); + registry_json_footer(w); + registry_unlock(); + return 404; + } + + registry_json_header(host, w, "search", REGISTRY_STATUS_OK); + + buffer_strcat(w->response.data, ",\n\t\"urls\": ["); + struct registry_json_walk_person_urls_callback c = { NULL, m, w, 0 }; + dictionary_get_all(m->machine_urls, registry_json_machine_url_callback, &c); + buffer_strcat(w->response.data, "\n\t]\n"); + + registry_json_footer(w); + registry_unlock(); + return 200; +} + +// ---------------------------------------------------------------------------- +// SWITCH REQUEST + +// the main method for switching user identity +int registry_request_switch_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *new_person_guid, time_t when) { + if(!registry.enabled) + return registry_json_disabled(host, w, "switch"); + + (void)url; + (void)when; + + registry_lock(); + + REGISTRY_PERSON *op = registry_person_find(person_guid); + if(!op) { + registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED); + registry_json_footer(w); + registry_unlock(); + return 430; + } + + REGISTRY_PERSON *np = registry_person_find(new_person_guid); + if(!np) { + registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED); + registry_json_footer(w); + registry_unlock(); + return 431; + } + + REGISTRY_MACHINE *m = registry_machine_find(machine_guid); + if(!m) { + registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED); + registry_json_footer(w); + registry_unlock(); + return 432; + } + + struct registry_person_url_callback_verify_machine_exists_data data = { m, 0 }; + + // verify the old person has access to this machine + avl_traverse(&op->person_urls, registry_person_url_callback_verify_machine_exists, &data); + if(!data.count) { + registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED); + registry_json_footer(w); + registry_unlock(); + return 433; + } + + // verify the new person has access to this machine + data.count = 0; + avl_traverse(&np->person_urls, registry_person_url_callback_verify_machine_exists, &data); + if(!data.count) { + registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED); + registry_json_footer(w); + registry_unlock(); + return 434; + } + + // set the cookie of the new person + // the user just switched identity + registry_set_person_cookie(w, np); + + // generate the response + registry_json_header(host, w, "switch", REGISTRY_STATUS_OK); + buffer_sprintf(w->response.data, ",\n\t\"person_guid\": \"%s\"", np->guid); + registry_json_footer(w); + + registry_unlock(); + return 200; +} + +// ---------------------------------------------------------------------------- +// STATISTICS + +void registry_statistics(void) { + if(!registry.enabled) return; + + static RRDSET *sts = NULL, *stc = NULL, *stm = NULL; + + if(unlikely(!sts)) { + sts = rrdset_create_localhost( + "netdata" + , "registry_sessions" + , NULL + , "registry" + , NULL + , "NetData Registry Sessions" + , "session" + , "registry" + , "stats" + , 131000 + , localhost->rrd_update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(sts, "sessions", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(sts); + + rrddim_set(sts, "sessions", registry.usages_count); + rrdset_done(sts); + + // ------------------------------------------------------------------------ + + if(unlikely(!stc)) { + stc = rrdset_create_localhost( + "netdata" + , "registry_entries" + , NULL + , "registry" + , NULL + , "NetData Registry Entries" + , "entries" + , "registry" + , "stats" + , 131100 + , localhost->rrd_update_every + , RRDSET_TYPE_LINE + ); + + rrddim_add(stc, "persons", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(stc, "machines", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(stc, "urls", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(stc, "persons_urls", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(stc, "machines_urls", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(stc); + + rrddim_set(stc, "persons", registry.persons_count); + rrddim_set(stc, "machines", registry.machines_count); + rrddim_set(stc, "urls", registry.urls_count); + rrddim_set(stc, "persons_urls", registry.persons_urls_count); + rrddim_set(stc, "machines_urls", registry.machines_urls_count); + rrdset_done(stc); + + // ------------------------------------------------------------------------ + + if(unlikely(!stm)) { + stm = rrdset_create_localhost( + "netdata" + , "registry_mem" + , NULL + , "registry" + , NULL + , "NetData Registry Memory" + , "KB" + , "registry" + , "stats" + , 131300 + , localhost->rrd_update_every + , RRDSET_TYPE_STACKED + ); + + rrddim_add(stm, "persons", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(stm, "machines", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(stm, "urls", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(stm, "persons_urls", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(stm, "machines_urls", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + } + else rrdset_next(stm); + + rrddim_set(stm, "persons", registry.persons_memory + registry.persons_count * sizeof(NAME_VALUE) + sizeof(DICTIONARY)); + rrddim_set(stm, "machines", registry.machines_memory + registry.machines_count * sizeof(NAME_VALUE) + sizeof(DICTIONARY)); + rrddim_set(stm, "urls", registry.urls_memory); + rrddim_set(stm, "persons_urls", registry.persons_urls_memory); + rrddim_set(stm, "machines_urls", registry.machines_urls_memory + registry.machines_count * sizeof(DICTIONARY) + registry.machines_urls_count * sizeof(NAME_VALUE)); + rrdset_done(stm); +} diff --git a/registry/registry.h b/registry/registry.h new file mode 100644 index 000000000..ab36de014 --- /dev/null +++ b/registry/registry.h @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +/* + * netdata registry + * + * this header file describes the public interface + * to the netdata registry + * + * only these high level functions are exposed + * + */ + +// ---------------------------------------------------------------------------- +// TODO +// +// 1. the default tracking cookie expires in 1 year, but the persons are not +// removed from the db - this means the database only grows - ideally the +// database should be cleaned in registry_db_save() for both on-disk and +// on-memory entries. +// +// Cleanup: +// i. Find all the PERSONs that have expired cookie +// ii. For each of their PERSON_URLs: +// - decrement the linked MACHINE links +// - if the linked MACHINE has no other links, remove the linked MACHINE too +// - remove the PERSON_URL +// +// 2. add protection to prevent abusing the registry by flooding it with +// requests to fill the memory and crash it. +// +// Possible protections: +// - limit the number of URLs per person +// - limit the number of URLs per machine +// - limit the number of persons +// - limit the number of machines +// - [DONE] limit the size of URLs +// - [DONE] limit the size of PERSON_URL names +// - limit the number of requests that add data to the registry, +// per client IP per hour +// +// 3. lower memory requirements +// +// - embed avl structures directly into registry objects, instead of DICTIONARY +// [DONE for PERSON_URLs, PENDING for MACHINE_URLs] +// - store GUIDs in memory as UUID instead of char * +// - do not track persons using the demo machines only +// (i.e. start tracking them only when they access a non-demo machine) +// - [DONE] do not track custom dashboards by default + +#ifndef NETDATA_REGISTRY_H +#define NETDATA_REGISTRY_H 1 + +#include "../daemon/common.h" + +#define NETDATA_REGISTRY_COOKIE_NAME "netdata_registry_id" + +// initialize the registry +// should only happen when netdata starts +extern int registry_init(void); + +// free all data held by the registry +// should only happen when netdata exits +extern void registry_free(void); + +// HTTP requests handled by the registry +extern int registry_request_access_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *name, time_t when); +extern int registry_request_delete_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *delete_url, time_t when); +extern int registry_request_search_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *request_machine, time_t when); +extern int registry_request_switch_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *new_person_guid, time_t when); +extern int registry_request_hello_json(RRDHOST *host, struct web_client *w); + +// update the registry monitoring charts +extern void registry_statistics(void); + +extern char *registry_get_this_machine_guid(void); +extern char *registry_get_this_machine_hostname(void); + +extern int regenerate_guid(const char *guid, char *result); + +#endif /* NETDATA_REGISTRY_H */ diff --git a/registry/registry_db.c b/registry/registry_db.c new file mode 100644 index 000000000..d8e2bbd8d --- /dev/null +++ b/registry/registry_db.c @@ -0,0 +1,346 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../daemon/common.h" +#include "registry_internals.h" + +int registry_db_should_be_saved(void) { + debug(D_REGISTRY, "log entries %llu, max %llu", registry.log_count, registry.save_registry_every_entries); + return registry.log_count > registry.save_registry_every_entries; +} + +// ---------------------------------------------------------------------------- +// INTERNAL FUNCTIONS FOR SAVING REGISTRY OBJECTS + +static int registry_machine_save_url(void *entry, void *file) { + REGISTRY_MACHINE_URL *mu = entry; + FILE *fp = file; + + debug(D_REGISTRY, "Registry: registry_machine_save_url('%s')", mu->url->url); + + int ret = fprintf(fp, "V\t%08x\t%08x\t%08x\t%02x\t%s\n", + mu->first_t, + mu->last_t, + mu->usages, + mu->flags, + mu->url->url + ); + + // error handling is done at registry_db_save() + + return ret; +} + +static int registry_machine_save(void *entry, void *file) { + REGISTRY_MACHINE *m = entry; + FILE *fp = file; + + debug(D_REGISTRY, "Registry: registry_machine_save('%s')", m->guid); + + int ret = fprintf(fp, "M\t%08x\t%08x\t%08x\t%s\n", + m->first_t, + m->last_t, + m->usages, + m->guid + ); + + if(ret >= 0) { + int ret2 = dictionary_get_all(m->machine_urls, registry_machine_save_url, fp); + if(ret2 < 0) return ret2; + ret += ret2; + } + + // error handling is done at registry_db_save() + + return ret; +} + +static inline int registry_person_save_url(void *entry, void *file) { + REGISTRY_PERSON_URL *pu = entry; + FILE *fp = file; + + debug(D_REGISTRY, "Registry: registry_person_save_url('%s')", pu->url->url); + + int ret = fprintf(fp, "U\t%08x\t%08x\t%08x\t%02x\t%s\t%s\t%s\n", + pu->first_t, + pu->last_t, + pu->usages, + pu->flags, + pu->machine->guid, + pu->machine_name, + pu->url->url + ); + + // error handling is done at registry_db_save() + + return ret; +} + +static inline int registry_person_save(void *entry, void *file) { + REGISTRY_PERSON *p = entry; + FILE *fp = file; + + debug(D_REGISTRY, "Registry: registry_person_save('%s')", p->guid); + + int ret = fprintf(fp, "P\t%08x\t%08x\t%08x\t%s\n", + p->first_t, + p->last_t, + p->usages, + p->guid + ); + + if(ret >= 0) { + //int ret2 = dictionary_get_all(p->person_urls, registry_person_save_url, fp); + int ret2 = avl_traverse(&p->person_urls, registry_person_save_url, fp); + if (ret2 < 0) return ret2; + ret += ret2; + } + + // error handling is done at registry_db_save() + + return ret; +} + +// ---------------------------------------------------------------------------- +// SAVE THE REGISTRY DATABASE + +int registry_db_save(void) { + if(unlikely(!registry.enabled)) + return -1; + + if(unlikely(!registry_db_should_be_saved())) + return -2; + + error_log_limit_unlimited(); + + char tmp_filename[FILENAME_MAX + 1]; + char old_filename[FILENAME_MAX + 1]; + + snprintfz(old_filename, FILENAME_MAX, "%s.old", registry.db_filename); + snprintfz(tmp_filename, FILENAME_MAX, "%s.tmp", registry.db_filename); + + debug(D_REGISTRY, "Registry: Creating file '%s'", tmp_filename); + FILE *fp = fopen(tmp_filename, "w"); + if(!fp) { + error("Registry: Cannot create file: %s", tmp_filename); + error_log_limit_reset(); + return -1; + } + + // dictionary_get_all() has its own locking, so this is safe to do + + debug(D_REGISTRY, "Saving all machines"); + int bytes1 = dictionary_get_all(registry.machines, registry_machine_save, fp); + if(bytes1 < 0) { + error("Registry: Cannot save registry machines - return value %d", bytes1); + fclose(fp); + error_log_limit_reset(); + return bytes1; + } + debug(D_REGISTRY, "Registry: saving machines took %d bytes", bytes1); + + debug(D_REGISTRY, "Saving all persons"); + int bytes2 = dictionary_get_all(registry.persons, registry_person_save, fp); + if(bytes2 < 0) { + error("Registry: Cannot save registry persons - return value %d", bytes2); + fclose(fp); + error_log_limit_reset(); + return bytes2; + } + debug(D_REGISTRY, "Registry: saving persons took %d bytes", bytes2); + + // save the totals + fprintf(fp, "T\t%016llx\t%016llx\t%016llx\t%016llx\t%016llx\t%016llx\n", + registry.persons_count, + registry.machines_count, + registry.usages_count + 1, // this is required - it is lost on db rotation + registry.urls_count, + registry.persons_urls_count, + registry.machines_urls_count + ); + + fclose(fp); + + errno = 0; + + // remove the .old db + debug(D_REGISTRY, "Registry: Removing old db '%s'", old_filename); + if(unlink(old_filename) == -1 && errno != ENOENT) + error("Registry: cannot remove old registry file '%s'", old_filename); + + // rename the db to .old + debug(D_REGISTRY, "Registry: Link current db '%s' to .old: '%s'", registry.db_filename, old_filename); + if(link(registry.db_filename, old_filename) == -1 && errno != ENOENT) + error("Registry: cannot move file '%s' to '%s'. Saving registry DB failed!", registry.db_filename, old_filename); + + else { + // remove the database (it is saved in .old) + debug(D_REGISTRY, "Registry: removing db '%s'", registry.db_filename); + if (unlink(registry.db_filename) == -1 && errno != ENOENT) + error("Registry: cannot remove old registry file '%s'", registry.db_filename); + + // move the .tmp to make it active + debug(D_REGISTRY, "Registry: linking tmp db '%s' to active db '%s'", tmp_filename, registry.db_filename); + if (link(tmp_filename, registry.db_filename) == -1) { + error("Registry: cannot move file '%s' to '%s'. Saving registry DB failed!", tmp_filename, + registry.db_filename); + + // move the .old back + debug(D_REGISTRY, "Registry: linking old db '%s' to active db '%s'", old_filename, registry.db_filename); + if(link(old_filename, registry.db_filename) == -1) + error("Registry: cannot move file '%s' to '%s'. Recovering the old registry DB failed!", old_filename, registry.db_filename); + } + else { + debug(D_REGISTRY, "Registry: removing tmp db '%s'", tmp_filename); + if(unlink(tmp_filename) == -1) + error("Registry: cannot remove tmp registry file '%s'", tmp_filename); + + // it has been moved successfully + // discard the current registry log + registry_log_recreate(); + registry.log_count = 0; + } + } + + // continue operations + error_log_limit_reset(); + + return -1; +} + +// ---------------------------------------------------------------------------- +// LOAD THE REGISTRY DATABASE + +size_t registry_db_load(void) { + char *s, buf[4096 + 1]; + REGISTRY_PERSON *p = NULL; + REGISTRY_MACHINE *m = NULL; + REGISTRY_URL *u = NULL; + size_t line = 0; + + debug(D_REGISTRY, "Registry: loading active db from: '%s'", registry.db_filename); + FILE *fp = fopen(registry.db_filename, "r"); + if(!fp) { + error("Registry: cannot open registry file: '%s'", registry.db_filename); + return 0; + } + + size_t len = 0; + buf[4096] = '\0'; + while((s = fgets_trim_len(buf, 4096, fp, &len))) { + line++; + + debug(D_REGISTRY, "Registry: read line %zu to length %zu: %s", line, len, s); + switch(*s) { + case 'T': // totals + if(unlikely(len != 103 || s[1] != '\t' || s[18] != '\t' || s[35] != '\t' || s[52] != '\t' || s[69] != '\t' || s[86] != '\t' || s[103] != '\0')) { + error("Registry totals line %zu is wrong (len = %zu).", line, len); + continue; + } + registry.persons_count = strtoull(&s[2], NULL, 16); + registry.machines_count = strtoull(&s[19], NULL, 16); + registry.usages_count = strtoull(&s[36], NULL, 16); + registry.urls_count = strtoull(&s[53], NULL, 16); + registry.persons_urls_count = strtoull(&s[70], NULL, 16); + registry.machines_urls_count = strtoull(&s[87], NULL, 16); + break; + + case 'P': // person + m = NULL; + // verify it is valid + if(unlikely(len != 65 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[65] != '\0')) { + error("Registry person line %zu is wrong (len = %zu).", line, len); + continue; + } + + s[1] = s[10] = s[19] = s[28] = '\0'; + p = registry_person_allocate(&s[29], strtoul(&s[2], NULL, 16)); + p->last_t = (uint32_t)strtoul(&s[11], NULL, 16); + p->usages = (uint32_t)strtoul(&s[20], NULL, 16); + debug(D_REGISTRY, "Registry loaded person '%s', first: %u, last: %u, usages: %u", p->guid, p->first_t, p->last_t, p->usages); + break; + + case 'M': // machine + p = NULL; + // verify it is valid + if(unlikely(len != 65 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[65] != '\0')) { + error("Registry person line %zu is wrong (len = %zu).", line, len); + continue; + } + + s[1] = s[10] = s[19] = s[28] = '\0'; + m = registry_machine_allocate(&s[29], strtoul(&s[2], NULL, 16)); + m->last_t = (uint32_t)strtoul(&s[11], NULL, 16); + m->usages = (uint32_t)strtoul(&s[20], NULL, 16); + debug(D_REGISTRY, "Registry loaded machine '%s', first: %u, last: %u, usages: %u", m->guid, m->first_t, m->last_t, m->usages); + break; + + case 'U': // person URL + if(unlikely(!p)) { + error("Registry: ignoring line %zu, no person loaded: %s", line, s); + continue; + } + + // verify it is valid + if(len < 69 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[31] != '\t' || s[68] != '\t') { + error("Registry person URL line %zu is wrong (len = %zu).", line, len); + continue; + } + + s[1] = s[10] = s[19] = s[28] = s[31] = s[68] = '\0'; + + // skip the name to find the url + char *url = &s[69]; + while(*url && *url != '\t') url++; + if(!*url) { + error("Registry person URL line %zu does not have a url.", line); + continue; + } + *url++ = '\0'; + + // u = registry_url_allocate_nolock(url, strlen(url)); + u = registry_url_get(url, strlen(url)); + + time_t first_t = strtoul(&s[2], NULL, 16); + + m = registry_machine_find(&s[32]); + if(!m) m = registry_machine_allocate(&s[32], first_t); + + REGISTRY_PERSON_URL *pu = registry_person_url_allocate(p, m, u, &s[69], strlen(&s[69]), first_t); + pu->last_t = (uint32_t)strtoul(&s[11], NULL, 16); + pu->usages = (uint32_t)strtoul(&s[20], NULL, 16); + pu->flags = (uint8_t)strtoul(&s[29], NULL, 16); + debug(D_REGISTRY, "Registry loaded person URL '%s' with name '%s' of machine '%s', first: %u, last: %u, usages: %u, flags: %02x", u->url, pu->machine_name, m->guid, pu->first_t, pu->last_t, pu->usages, pu->flags); + break; + + case 'V': // machine URL + if(unlikely(!m)) { + error("Registry: ignoring line %zu, no machine loaded: %s", line, s); + continue; + } + + // verify it is valid + if(len < 32 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[31] != '\t') { + error("Registry person URL line %zu is wrong (len = %zu).", line, len); + continue; + } + + s[1] = s[10] = s[19] = s[28] = s[31] = '\0'; + // u = registry_url_allocate_nolock(&s[32], strlen(&s[32])); + u = registry_url_get(&s[32], strlen(&s[32])); + + REGISTRY_MACHINE_URL *mu = registry_machine_url_allocate(m, u, strtoul(&s[2], NULL, 16)); + mu->last_t = (uint32_t)strtoul(&s[11], NULL, 16); + mu->usages = (uint32_t)strtoul(&s[20], NULL, 16); + mu->flags = (uint8_t)strtoul(&s[29], NULL, 16); + debug(D_REGISTRY, "Registry loaded machine URL '%s', machine '%s', first: %u, last: %u, usages: %u, flags: %02x", u->url, m->guid, mu->first_t, mu->last_t, mu->usages, mu->flags); + break; + + default: + error("Registry: ignoring line %zu of filename '%s': %s.", line, registry.db_filename, s); + break; + } + } + fclose(fp); + + return line; +} diff --git a/registry/registry_init.c b/registry/registry_init.c new file mode 100644 index 000000000..d3e0420d2 --- /dev/null +++ b/registry/registry_init.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../daemon/common.h" +#include "registry_internals.h" + +int registry_init(void) { + char filename[FILENAME_MAX + 1]; + + // registry enabled? + if(web_server_mode != WEB_SERVER_MODE_NONE) { + registry.enabled = config_get_boolean(CONFIG_SECTION_REGISTRY, "enabled", 0); + } + else { + info("Registry is disabled - use the central netdata"); + config_set_boolean(CONFIG_SECTION_REGISTRY, "enabled", 0); + registry.enabled = 0; + } + + // pathnames + snprintfz(filename, FILENAME_MAX, "%s/registry", netdata_configured_varlib_dir); + registry.pathname = config_get(CONFIG_SECTION_REGISTRY, "registry db directory", filename); + if(mkdir(registry.pathname, 0770) == -1 && errno != EEXIST) + fatal("Cannot create directory '%s'.", registry.pathname); + + // filenames + snprintfz(filename, FILENAME_MAX, "%s/netdata.public.unique.id", registry.pathname); + registry.machine_guid_filename = config_get(CONFIG_SECTION_REGISTRY, "netdata unique id file", filename); + + snprintfz(filename, FILENAME_MAX, "%s/registry.db", registry.pathname); + registry.db_filename = config_get(CONFIG_SECTION_REGISTRY, "registry db file", filename); + + snprintfz(filename, FILENAME_MAX, "%s/registry-log.db", registry.pathname); + registry.log_filename = config_get(CONFIG_SECTION_REGISTRY, "registry log file", filename); + + // configuration options + registry.save_registry_every_entries = (unsigned long long)config_get_number(CONFIG_SECTION_REGISTRY, "registry save db every new entries", 1000000); + registry.persons_expiration = config_get_number(CONFIG_SECTION_REGISTRY, "registry expire idle persons days", 365) * 86400; + registry.registry_domain = config_get(CONFIG_SECTION_REGISTRY, "registry domain", ""); + registry.registry_to_announce = config_get(CONFIG_SECTION_REGISTRY, "registry to announce", "https://registry.my-netdata.io"); + registry.hostname = config_get(CONFIG_SECTION_REGISTRY, "registry hostname", netdata_configured_hostname); + registry.verify_cookies_redirects = config_get_boolean(CONFIG_SECTION_REGISTRY, "verify browser cookies support", 1); + + setenv("NETDATA_REGISTRY_HOSTNAME", registry.hostname, 1); + setenv("NETDATA_REGISTRY_URL", registry.registry_to_announce, 1); + + registry.max_url_length = (size_t)config_get_number(CONFIG_SECTION_REGISTRY, "max URL length", 1024); + if(registry.max_url_length < 10) { + registry.max_url_length = 10; + config_set_number(CONFIG_SECTION_REGISTRY, "max URL length", (long long)registry.max_url_length); + } + + registry.max_name_length = (size_t)config_get_number(CONFIG_SECTION_REGISTRY, "max URL name length", 50); + if(registry.max_name_length < 10) { + registry.max_name_length = 10; + config_set_number(CONFIG_SECTION_REGISTRY, "max URL name length", (long long)registry.max_name_length); + } + + // initialize entries counters + registry.persons_count = 0; + registry.machines_count = 0; + registry.usages_count = 0; + registry.urls_count = 0; + registry.persons_urls_count = 0; + registry.machines_urls_count = 0; + + // initialize memory counters + registry.persons_memory = 0; + registry.machines_memory = 0; + registry.urls_memory = 0; + registry.persons_urls_memory = 0; + registry.machines_urls_memory = 0; + + // initialize locks + netdata_mutex_init(®istry.lock); + + // create dictionaries + registry.persons = dictionary_create(DICTIONARY_FLAGS); + registry.machines = dictionary_create(DICTIONARY_FLAGS); + avl_init(®istry.registry_urls_root_index, registry_url_compare); + + // load the registry database + if(registry.enabled) { + registry_log_open(); + registry_db_load(); + registry_log_load(); + + if(unlikely(registry_db_should_be_saved())) + registry_db_save(); + } + + return 0; +} + +void registry_free(void) { + if(!registry.enabled) return; + + // we need to destroy the dictionaries ourselves + // since the dictionaries use memory we allocated + + while(registry.persons->values_index.root) { + REGISTRY_PERSON *p = ((NAME_VALUE *)registry.persons->values_index.root)->value; + registry_person_del(p); + } + + while(registry.machines->values_index.root) { + REGISTRY_MACHINE *m = ((NAME_VALUE *)registry.machines->values_index.root)->value; + + // fprintf(stderr, "\nMACHINE: '%s', first: %u, last: %u, usages: %u\n", m->guid, m->first_t, m->last_t, m->usages); + + while(m->machine_urls->values_index.root) { + REGISTRY_MACHINE_URL *mu = ((NAME_VALUE *)m->machine_urls->values_index.root)->value; + + // fprintf(stderr, "\tURL: '%s', first: %u, last: %u, usages: %u, flags: 0x%02x\n", mu->url->url, mu->first_t, mu->last_t, mu->usages, mu->flags); + + //debug(D_REGISTRY, "Registry: destroying persons dictionary from url '%s'", mu->url->url); + //dictionary_destroy(mu->persons); + + debug(D_REGISTRY, "Registry: deleting url '%s' from person '%s'", mu->url->url, m->guid); + dictionary_del(m->machine_urls, mu->url->url); + + debug(D_REGISTRY, "Registry: unlinking url '%s' from machine", mu->url->url); + registry_url_unlink(mu->url); + + debug(D_REGISTRY, "Registry: freeing machine url"); + freez(mu); + } + + debug(D_REGISTRY, "Registry: deleting machine '%s' from machines registry", m->guid); + dictionary_del(registry.machines, m->guid); + + debug(D_REGISTRY, "Registry: destroying URL dictionary of machine '%s'", m->guid); + dictionary_destroy(m->machine_urls); + + debug(D_REGISTRY, "Registry: freeing machine '%s'", m->guid); + freez(m); + } + + // and free the memory of remaining dictionary structures + + debug(D_REGISTRY, "Registry: destroying persons dictionary"); + dictionary_destroy(registry.persons); + + debug(D_REGISTRY, "Registry: destroying machines dictionary"); + dictionary_destroy(registry.machines); +} + diff --git a/registry/registry_internals.c b/registry/registry_internals.c new file mode 100644 index 000000000..b54b90142 --- /dev/null +++ b/registry/registry_internals.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../daemon/common.h" +#include "registry_internals.h" + +struct registry registry; + +// ---------------------------------------------------------------------------- +// common functions + +// parse a GUID and re-generated to be always lower case +// this is used as a protection against the variations of GUIDs +int regenerate_guid(const char *guid, char *result) { + uuid_t uuid; + if(unlikely(uuid_parse(guid, uuid) == -1)) { + info("Registry: GUID '%s' is not a valid GUID.", guid); + return -1; + } + else { + uuid_unparse_lower(uuid, result); + +#ifdef NETDATA_INTERNAL_CHECKS + if(strcmp(guid, result) != 0) + info("GUID '%s' and re-generated GUID '%s' differ!", guid, result); +#endif /* NETDATA_INTERNAL_CHECKS */ + } + + return 0; +} + +// make sure the names of the machines / URLs do not contain any tabs +// (which are used as our separator in the database files) +// and are properly trimmed (before and after) +static inline char *registry_fix_machine_name(char *name, size_t *len) { + char *s = name?name:""; + + // skip leading spaces + while(*s && isspace(*s)) s++; + + // make sure all spaces are a SPACE + char *t = s; + while(*t) { + if(unlikely(isspace(*t))) + *t = ' '; + + t++; + } + + // remove trailing spaces + while(--t >= s) { + if(*t == ' ') + *t = '\0'; + else + break; + } + t++; + + if(likely(len)) + *len = (t - s); + + return s; +} + +static inline char *registry_fix_url(char *url, size_t *len) { + size_t l = 0; + char *s = registry_fix_machine_name(url, &l); + + // protection from too big URLs + if(l > registry.max_url_length) { + l = registry.max_url_length; + s[l] = '\0'; + } + + if(len) *len = l; + return s; +} + + +// ---------------------------------------------------------------------------- +// HELPERS + +// verify the person, the machine and the URL exist in our DB +REGISTRY_PERSON_URL *registry_verify_request(char *person_guid, char *machine_guid, char *url, REGISTRY_PERSON **pp, REGISTRY_MACHINE **mm) { + char pbuf[GUID_LEN + 1], mbuf[GUID_LEN + 1]; + + if(!person_guid || !*person_guid || !machine_guid || !*machine_guid || !url || !*url) { + info("Registry Request Verification: invalid request! person: '%s', machine '%s', url '%s'", person_guid?person_guid:"UNSET", machine_guid?machine_guid:"UNSET", url?url:"UNSET"); + return NULL; + } + + // normalize the url + url = registry_fix_url(url, NULL); + + // make sure the person GUID is valid + if(regenerate_guid(person_guid, pbuf) == -1) { + info("Registry Request Verification: invalid person GUID, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url); + return NULL; + } + person_guid = pbuf; + + // make sure the machine GUID is valid + if(regenerate_guid(machine_guid, mbuf) == -1) { + info("Registry Request Verification: invalid machine GUID, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url); + return NULL; + } + machine_guid = mbuf; + + // make sure the machine exists + REGISTRY_MACHINE *m = registry_machine_find(machine_guid); + if(!m) { + info("Registry Request Verification: machine not found, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url); + return NULL; + } + if(mm) *mm = m; + + // make sure the person exist + REGISTRY_PERSON *p = registry_person_find(person_guid); + if(!p) { + info("Registry Request Verification: person not found, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url); + return NULL; + } + if(pp) *pp = p; + + REGISTRY_PERSON_URL *pu = registry_person_url_index_find(p, url); + if(!pu) { + info("Registry Request Verification: URL not found for person, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url); + return NULL; + } + return pu; +} + + +// ---------------------------------------------------------------------------- +// REGISTRY REQUESTS + +REGISTRY_PERSON *registry_request_access(char *person_guid, char *machine_guid, char *url, char *name, time_t when) { + debug(D_REGISTRY, "registry_request_access('%s', '%s', '%s'): NEW REQUEST", (person_guid)?person_guid:"", machine_guid, url); + + REGISTRY_MACHINE *m = registry_machine_get(machine_guid, when); + if(!m) return NULL; + + // make sure the name is valid + size_t namelen; + name = registry_fix_machine_name(name, &namelen); + + size_t urllen; + url = registry_fix_url(url, &urllen); + + REGISTRY_PERSON *p = registry_person_get(person_guid, when); + + REGISTRY_URL *u = registry_url_get(url, urllen); + registry_person_link_to_url(p, m, u, name, namelen, when); + registry_machine_link_to_url(m, u, when); + + registry_log('A', p, m, u, name); + + registry.usages_count++; + + return p; +} + +REGISTRY_PERSON *registry_request_delete(char *person_guid, char *machine_guid, char *url, char *delete_url, time_t when) { + (void) when; + + REGISTRY_PERSON *p = NULL; + REGISTRY_MACHINE *m = NULL; + REGISTRY_PERSON_URL *pu = registry_verify_request(person_guid, machine_guid, url, &p, &m); + if(!pu || !p || !m) return NULL; + + // normalize the url + delete_url = registry_fix_url(delete_url, NULL); + + // make sure the user is not deleting the url it uses + if(!strcmp(delete_url, pu->url->url)) { + info("Registry Delete Request: delete URL is the one currently accessed, person: '%s', machine '%s', url '%s', delete url '%s'" + , p->guid, m->guid, pu->url->url, delete_url); + return NULL; + } + + REGISTRY_PERSON_URL *dpu = registry_person_url_index_find(p, delete_url); + if(!dpu) { + info("Registry Delete Request: URL not found for person: '%s', machine '%s', url '%s', delete url '%s'", p->guid + , m->guid, pu->url->url, delete_url); + return NULL; + } + + registry_log('D', p, m, pu->url, dpu->url->url); + registry_person_unlink_from_url(p, dpu); + + return p; +} + + +// a structure to pass to the dictionary_get_all() callback handler +struct machine_request_callback_data { + REGISTRY_MACHINE *find_this_machine; + REGISTRY_PERSON_URL *result; +}; + +// the callback function +// this will be run for every PERSON_URL of this PERSON +static int machine_request_callback(void *entry, void *data) { + REGISTRY_PERSON_URL *mypu = (REGISTRY_PERSON_URL *)entry; + struct machine_request_callback_data *myrdata = (struct machine_request_callback_data *)data; + + if(mypu->machine == myrdata->find_this_machine) { + myrdata->result = mypu; + return -1; // this will also stop the walk through + } + + return 0; // continue +} + +REGISTRY_MACHINE *registry_request_machine(char *person_guid, char *machine_guid, char *url, char *request_machine, time_t when) { + (void)when; + + char mbuf[GUID_LEN + 1]; + + REGISTRY_PERSON *p = NULL; + REGISTRY_MACHINE *m = NULL; + REGISTRY_PERSON_URL *pu = registry_verify_request(person_guid, machine_guid, url, &p, &m); + if(!pu || !p || !m) return NULL; + + // make sure the machine GUID is valid + if(regenerate_guid(request_machine, mbuf) == -1) { + info("Registry Machine URLs request: invalid machine GUID, person: '%s', machine '%s', url '%s', request machine '%s'", p->guid, m->guid, pu->url->url, request_machine); + return NULL; + } + request_machine = mbuf; + + // make sure the machine exists + m = registry_machine_find(request_machine); + if(!m) { + info("Registry Machine URLs request: machine not found, person: '%s', machine '%s', url '%s', request machine '%s'", p->guid, machine_guid, pu->url->url, request_machine); + return NULL; + } + + // Verify the user has in the past accessed this machine + // We will walk through the PERSON_URLs to find the machine + // linking to our machine + + // a structure to pass to the dictionary_get_all() callback handler + struct machine_request_callback_data rdata = { m, NULL }; + + // request a walk through on the dictionary + avl_traverse(&p->person_urls, machine_request_callback, &rdata); + + if(rdata.result) + return m; + + return NULL; +} + + +// ---------------------------------------------------------------------------- +// REGISTRY THIS MACHINE UNIQUE ID + +static inline int is_machine_guid_blacklisted(const char *guid) { + // these are machine GUIDs that have been included in distribution packages. + // we blacklist them here, so that the next version of netdata will generate + // new ones. + + if(!strcmp(guid, "8a795b0c-2311-11e6-8563-000c295076a6") + || !strcmp(guid, "4aed1458-1c3e-11e6-a53f-000c290fc8f5") + ) { + error("Blacklisted machine GUID '%s' found.", guid); + return 1; + } + + return 0; +} + +char *registry_get_this_machine_hostname(void) { + return registry.hostname; +} + +char *registry_get_this_machine_guid(void) { + static char guid[GUID_LEN + 1] = ""; + + if(likely(guid[0])) + return guid; + + // read it from disk + int fd = open(registry.machine_guid_filename, O_RDONLY); + if(fd != -1) { + char buf[GUID_LEN + 1]; + if(read(fd, buf, GUID_LEN) != GUID_LEN) + error("Failed to read machine GUID from '%s'", registry.machine_guid_filename); + else { + buf[GUID_LEN] = '\0'; + if(regenerate_guid(buf, guid) == -1) { + error("Failed to validate machine GUID '%s' from '%s'. Ignoring it - this might mean this netdata will appear as duplicate in the registry.", + buf, registry.machine_guid_filename); + + guid[0] = '\0'; + } + else if(is_machine_guid_blacklisted(guid)) + guid[0] = '\0'; + } + close(fd); + } + + // generate a new one? + if(!guid[0]) { + uuid_t uuid; + + uuid_generate_time(uuid); + uuid_unparse_lower(uuid, guid); + guid[GUID_LEN] = '\0'; + + // save it + fd = open(registry.machine_guid_filename, O_WRONLY|O_CREAT|O_TRUNC, 444); + if(fd == -1) + fatal("Cannot create unique machine id file '%s'. Please fix this.", registry.machine_guid_filename); + + if(write(fd, guid, GUID_LEN) != GUID_LEN) + fatal("Cannot write the unique machine id file '%s'. Please fix this.", registry.machine_guid_filename); + + close(fd); + } + + setenv("NETDATA_REGISTRY_UNIQUE_ID", guid, 1); + + return guid; +} diff --git a/registry/registry_internals.h b/registry/registry_internals.h new file mode 100644 index 000000000..baa2dc09d --- /dev/null +++ b/registry/registry_internals.h @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_REGISTRY_INTERNALS_H_H +#define NETDATA_REGISTRY_INTERNALS_H_H 1 + +#include "registry.h" + +#define REGISTRY_URL_FLAGS_DEFAULT 0x00 +#define REGISTRY_URL_FLAGS_EXPIRED 0x01 + +#define DICTIONARY_FLAGS (DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE | DICTIONARY_FLAG_NAME_LINK_DONT_CLONE | DICTIONARY_FLAG_SINGLE_THREADED) + +// ---------------------------------------------------------------------------- +// COMMON structures + +struct registry { + int enabled; + + // entries counters / statistics + unsigned long long persons_count; + unsigned long long machines_count; + unsigned long long usages_count; + unsigned long long urls_count; + unsigned long long persons_urls_count; + unsigned long long machines_urls_count; + unsigned long long log_count; + + // memory counters / statistics + unsigned long long persons_memory; + unsigned long long machines_memory; + unsigned long long urls_memory; + unsigned long long persons_urls_memory; + unsigned long long machines_urls_memory; + + // configuration + unsigned long long save_registry_every_entries; + char *registry_domain; + char *hostname; + char *registry_to_announce; + time_t persons_expiration; // seconds to expire idle persons + int verify_cookies_redirects; + + size_t max_url_length; + size_t max_name_length; + + // file/path names + char *pathname; + char *db_filename; + char *log_filename; + char *machine_guid_filename; + + // open files + FILE *log_fp; + + // the database + DICTIONARY *persons; // dictionary of REGISTRY_PERSON *, with key the REGISTRY_PERSON.guid + DICTIONARY *machines; // dictionary of REGISTRY_MACHINE *, with key the REGISTRY_MACHINE.guid + + avl_tree registry_urls_root_index; + + netdata_mutex_t lock; +}; + +#include "registry_url.h" +#include "registry_machine.h" +#include "registry_person.h" +#include "registry.h" + +extern struct registry registry; + +// REGISTRY LOW-LEVEL REQUESTS (in registry-internals.c) +extern REGISTRY_PERSON *registry_request_access(char *person_guid, char *machine_guid, char *url, char *name, time_t when); +extern REGISTRY_PERSON *registry_request_delete(char *person_guid, char *machine_guid, char *url, char *delete_url, time_t when); +extern REGISTRY_MACHINE *registry_request_machine(char *person_guid, char *machine_guid, char *url, char *request_machine, time_t when); + +// REGISTRY LOG (in registry_log.c) +extern void registry_log(char action, REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name); +extern int registry_log_open(void); +extern void registry_log_close(void); +extern void registry_log_recreate(void); +extern ssize_t registry_log_load(void); + +// REGISTRY DB (in registry_db.c) +extern int registry_db_save(void); +extern size_t registry_db_load(void); +extern int registry_db_should_be_saved(void); + +#endif //NETDATA_REGISTRY_INTERNALS_H_H diff --git a/registry/registry_log.c b/registry/registry_log.c new file mode 100644 index 000000000..e0e58ede3 --- /dev/null +++ b/registry/registry_log.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../daemon/common.h" +#include "registry_internals.h" + +void registry_log(char action, REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name) { + if(likely(registry.log_fp)) { + if(unlikely(fprintf(registry.log_fp, "%c\t%08x\t%s\t%s\t%s\t%s\n", + action, + p->last_t, + p->guid, + m->guid, + name, + u->url) < 0)) + error("Registry: failed to save log. Registry data may be lost in case of abnormal restart."); + + // we increase the counter even on failures + // so that the registry will be saved periodically + registry.log_count++; + + // this must be outside the log_lock(), or a deadlock will happen. + // registry_db_save() checks the same inside the log_lock, so only + // one thread will save the db + if(unlikely(registry_db_should_be_saved())) + registry_db_save(); + } +} + +int registry_log_open(void) { + if(registry.log_fp) + fclose(registry.log_fp); + + registry.log_fp = fopen(registry.log_filename, "a"); + if(registry.log_fp) { + if (setvbuf(registry.log_fp, NULL, _IOLBF, 0) != 0) + error("Cannot set line buffering on registry log file."); + return 0; + } + + error("Cannot open registry log file '%s'. Registry data will be lost in case of netdata or server crash.", registry.log_filename); + return -1; +} + +void registry_log_close(void) { + if(registry.log_fp) { + fclose(registry.log_fp); + registry.log_fp = NULL; + } +} + +void registry_log_recreate(void) { + if(registry.log_fp != NULL) { + registry_log_close(); + + // open it with truncate + registry.log_fp = fopen(registry.log_filename, "w"); + if(registry.log_fp) fclose(registry.log_fp); + else error("Cannot truncate registry log '%s'", registry.log_filename); + + registry.log_fp = NULL; + registry_log_open(); + } +} + +ssize_t registry_log_load(void) { + ssize_t line = -1; + + // closing the log is required here + // otherwise we will append to it the values we read + registry_log_close(); + + debug(D_REGISTRY, "Registry: loading active db from: %s", registry.log_filename); + FILE *fp = fopen(registry.log_filename, "r"); + if(!fp) + error("Registry: cannot open registry file: %s", registry.log_filename); + else { + char *s, buf[4096 + 1]; + line = 0; + size_t len = 0; + + while ((s = fgets_trim_len(buf, 4096, fp, &len))) { + line++; + + switch (s[0]) { + case 'A': // accesses + case 'D': // deletes + + // verify it is valid + if (unlikely(len < 85 || s[1] != '\t' || s[10] != '\t' || s[47] != '\t' || s[84] != '\t')) { + error("Registry: log line %zd is wrong (len = %zu).", line, len); + continue; + } + s[1] = s[10] = s[47] = s[84] = '\0'; + + // get the variables + time_t when = strtoul(&s[2], NULL, 16); + char *person_guid = &s[11]; + char *machine_guid = &s[48]; + char *name = &s[85]; + + // skip the name to find the url + char *url = name; + while(*url && *url != '\t') url++; + if(!*url) { + error("Registry: log line %zd does not have a url.", line); + continue; + } + *url++ = '\0'; + + // make sure the person exists + // without this, a new person guid will be created + REGISTRY_PERSON *p = registry_person_find(person_guid); + if(!p) p = registry_person_allocate(person_guid, when); + + if(s[0] == 'A') + registry_request_access(p->guid, machine_guid, url, name, when); + else + registry_request_delete(p->guid, machine_guid, url, name, when); + + registry.log_count++; + break; + + default: + error("Registry: ignoring line %zd of filename '%s': %s.", line, registry.log_filename, s); + break; + } + } + + fclose(fp); + } + + // open the log again + registry_log_open(); + + return line; +} diff --git a/registry/registry_machine.c b/registry/registry_machine.c new file mode 100644 index 000000000..8dbeb8ea6 --- /dev/null +++ b/registry/registry_machine.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../daemon/common.h" +#include "registry_internals.h" + +// ---------------------------------------------------------------------------- +// MACHINE + +REGISTRY_MACHINE *registry_machine_find(const char *machine_guid) { + debug(D_REGISTRY, "Registry: registry_machine_find('%s')", machine_guid); + return dictionary_get(registry.machines, machine_guid); +} + +REGISTRY_MACHINE_URL *registry_machine_url_allocate(REGISTRY_MACHINE *m, REGISTRY_URL *u, time_t when) { + debug(D_REGISTRY, "registry_machine_url_allocate('%s', '%s'): allocating %zu bytes", m->guid, u->url, sizeof(REGISTRY_MACHINE_URL)); + + REGISTRY_MACHINE_URL *mu = mallocz(sizeof(REGISTRY_MACHINE_URL)); + + mu->first_t = mu->last_t = (uint32_t)when; + mu->usages = 1; + mu->url = u; + mu->flags = REGISTRY_URL_FLAGS_DEFAULT; + + registry.machines_urls_memory += sizeof(REGISTRY_MACHINE_URL); + + debug(D_REGISTRY, "registry_machine_url_allocate('%s', '%s'): indexing URL in machine", m->guid, u->url); + dictionary_set(m->machine_urls, u->url, mu, sizeof(REGISTRY_MACHINE_URL)); + + registry_url_link(u); + + return mu; +} + +REGISTRY_MACHINE *registry_machine_allocate(const char *machine_guid, time_t when) { + debug(D_REGISTRY, "Registry: registry_machine_allocate('%s'): creating new machine, sizeof(MACHINE)=%zu", machine_guid, sizeof(REGISTRY_MACHINE)); + + REGISTRY_MACHINE *m = mallocz(sizeof(REGISTRY_MACHINE)); + + strncpyz(m->guid, machine_guid, GUID_LEN); + + debug(D_REGISTRY, "Registry: registry_machine_allocate('%s'): creating dictionary of urls", machine_guid); + m->machine_urls = dictionary_create(DICTIONARY_FLAGS); + + m->first_t = m->last_t = (uint32_t)when; + m->usages = 0; + + registry.machines_memory += sizeof(REGISTRY_MACHINE); + + registry.machines_count++; + dictionary_set(registry.machines, m->guid, m, sizeof(REGISTRY_MACHINE)); + + return m; +} + +// 1. validate machine GUID +// 2. if it is valid, find it or create it and return it +// 3. if it is not valid, return NULL +REGISTRY_MACHINE *registry_machine_get(const char *machine_guid, time_t when) { + REGISTRY_MACHINE *m = NULL; + + if(likely(machine_guid && *machine_guid)) { + // validate it is a GUID + char buf[GUID_LEN + 1]; + if(unlikely(regenerate_guid(machine_guid, buf) == -1)) + info("Registry: machine guid '%s' is not a valid guid. Ignoring it.", machine_guid); + else { + machine_guid = buf; + m = registry_machine_find(machine_guid); + if(!m) m = registry_machine_allocate(machine_guid, when); + } + } + + return m; +} + + +// ---------------------------------------------------------------------------- +// LINKING OF OBJECTS + +REGISTRY_MACHINE_URL *registry_machine_link_to_url(REGISTRY_MACHINE *m, REGISTRY_URL *u, time_t when) { + debug(D_REGISTRY, "registry_machine_link_to_url('%s', '%s'): searching for URL in machine", m->guid, u->url); + + REGISTRY_MACHINE_URL *mu = dictionary_get(m->machine_urls, u->url); + if(!mu) { + debug(D_REGISTRY, "registry_machine_link_to_url('%s', '%s'): not found", m->guid, u->url); + mu = registry_machine_url_allocate(m, u, when); + registry.machines_urls_count++; + } + else { + debug(D_REGISTRY, "registry_machine_link_to_url('%s', '%s'): found", m->guid, u->url); + mu->usages++; + if(likely(mu->last_t < (uint32_t)when)) mu->last_t = (uint32_t)when; + } + + m->usages++; + if(likely(m->last_t < (uint32_t)when)) m->last_t = (uint32_t)when; + + if(mu->flags & REGISTRY_URL_FLAGS_EXPIRED) { + debug(D_REGISTRY, "registry_machine_link_to_url('%s', '%s'): accessing an expired URL.", m->guid, u->url); + mu->flags &= ~REGISTRY_URL_FLAGS_EXPIRED; + } + + return mu; +} diff --git a/registry/registry_machine.h b/registry/registry_machine.h new file mode 100644 index 000000000..77ab5aaf5 --- /dev/null +++ b/registry/registry_machine.h @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_REGISTRY_MACHINE_H +#define NETDATA_REGISTRY_MACHINE_H 1 + +#include "registry_internals.h" + +// ---------------------------------------------------------------------------- +// MACHINE structures + +// For each MACHINE-URL pair we keep this +struct registry_machine_url { + REGISTRY_URL *url; // de-duplicated URL + + uint8_t flags; + + uint32_t first_t; // the first time we saw this + uint32_t last_t; // the last time we saw this + uint32_t usages; // how many times this has been accessed +}; +typedef struct registry_machine_url REGISTRY_MACHINE_URL; + +// A machine +struct registry_machine { + char guid[GUID_LEN + 1]; // the GUID + + uint32_t links; // the number of REGISTRY_PERSON_URL linked to this machine + + DICTIONARY *machine_urls; // MACHINE_URL * + + uint32_t first_t; // the first time we saw this + uint32_t last_t; // the last time we saw this + uint32_t usages; // how many times this has been accessed +}; +typedef struct registry_machine REGISTRY_MACHINE; + +extern REGISTRY_MACHINE *registry_machine_find(const char *machine_guid); +extern REGISTRY_MACHINE_URL *registry_machine_url_allocate(REGISTRY_MACHINE *m, REGISTRY_URL *u, time_t when); +extern REGISTRY_MACHINE *registry_machine_allocate(const char *machine_guid, time_t when); +extern REGISTRY_MACHINE *registry_machine_get(const char *machine_guid, time_t when); +extern REGISTRY_MACHINE_URL *registry_machine_link_to_url(REGISTRY_MACHINE *m, REGISTRY_URL *u, time_t when); + +#endif //NETDATA_REGISTRY_MACHINE_H diff --git a/registry/registry_person.c b/registry/registry_person.c new file mode 100644 index 000000000..53e3f47f4 --- /dev/null +++ b/registry/registry_person.c @@ -0,0 +1,267 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../daemon/common.h" +#include "registry_internals.h" + +// ---------------------------------------------------------------------------- +// PERSON_URL INDEX + +int person_url_compare(void *a, void *b) { + register uint32_t hash1 = ((REGISTRY_PERSON_URL *)a)->url->hash; + register uint32_t hash2 = ((REGISTRY_PERSON_URL *)b)->url->hash; + + if(hash1 < hash2) return -1; + else if(hash1 > hash2) return 1; + else return strcmp(((REGISTRY_PERSON_URL *)a)->url->url, ((REGISTRY_PERSON_URL *)b)->url->url); +} + +inline REGISTRY_PERSON_URL *registry_person_url_index_find(REGISTRY_PERSON *p, const char *url) { + debug(D_REGISTRY, "Registry: registry_person_url_index_find('%s', '%s')", p->guid, url); + + char buf[sizeof(REGISTRY_URL) + strlen(url)]; + + REGISTRY_URL *u = (REGISTRY_URL *)&buf; + strcpy(u->url, url); + u->hash = simple_hash(u->url); + + REGISTRY_PERSON_URL tpu = { .url = u }; + + REGISTRY_PERSON_URL *pu = (REGISTRY_PERSON_URL *)avl_search(&p->person_urls, (void *)&tpu); + return pu; +} + +inline REGISTRY_PERSON_URL *registry_person_url_index_add(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) { + debug(D_REGISTRY, "Registry: registry_person_url_index_add('%s', '%s')", p->guid, pu->url->url); + REGISTRY_PERSON_URL *tpu = (REGISTRY_PERSON_URL *)avl_insert(&(p->person_urls), (avl *)(pu)); + if(tpu != pu) + error("Registry: registry_person_url_index_add('%s', '%s') already exists as '%s'", p->guid, pu->url->url, tpu->url->url); + + return tpu; +} + +inline REGISTRY_PERSON_URL *registry_person_url_index_del(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) { + debug(D_REGISTRY, "Registry: registry_person_url_index_del('%s', '%s')", p->guid, pu->url->url); + REGISTRY_PERSON_URL *tpu = (REGISTRY_PERSON_URL *)avl_remove(&(p->person_urls), (avl *)(pu)); + if(!tpu) + error("Registry: registry_person_url_index_del('%s', '%s') deleted nothing", p->guid, pu->url->url); + else if(tpu != pu) + error("Registry: registry_person_url_index_del('%s', '%s') deleted wrong URL '%s'", p->guid, pu->url->url, tpu->url->url); + + return tpu; +} + +// ---------------------------------------------------------------------------- +// PERSON_URL + +REGISTRY_PERSON_URL *registry_person_url_allocate(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when) { + debug(D_REGISTRY, "registry_person_url_allocate('%s', '%s', '%s'): allocating %zu bytes", p->guid, m->guid, u->url, sizeof(REGISTRY_PERSON_URL) + namelen); + + // protection from too big names + if(namelen > registry.max_name_length) + namelen = registry.max_name_length; + + REGISTRY_PERSON_URL *pu = mallocz(sizeof(REGISTRY_PERSON_URL) + namelen); + + // a simple strcpy() should do the job + // but I prefer to be safe, since the caller specified urllen + strncpyz(pu->machine_name, name, namelen); + + pu->machine = m; + pu->first_t = pu->last_t = (uint32_t)when; + pu->usages = 1; + pu->url = u; + pu->flags = REGISTRY_URL_FLAGS_DEFAULT; + m->links++; + + registry.persons_urls_memory += sizeof(REGISTRY_PERSON_URL) + namelen; + + debug(D_REGISTRY, "registry_person_url_allocate('%s', '%s', '%s'): indexing URL in person", p->guid, m->guid, u->url); + REGISTRY_PERSON_URL *tpu = registry_person_url_index_add(p, pu); + if(tpu != pu) { + error("Registry: Attempted to add duplicate person url '%s' with name '%s' to person '%s'", u->url, name, p->guid); + free(pu); + pu = tpu; + } + else + registry_url_link(u); + + return pu; +} + +void registry_person_url_free(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) { + debug(D_REGISTRY, "registry_person_url_free('%s', '%s')", p->guid, pu->url->url); + + REGISTRY_PERSON_URL *tpu = registry_person_url_index_del(p, pu); + if(tpu) { + registry_url_unlink(tpu->url); + tpu->machine->links--; + registry.persons_urls_memory -= sizeof(REGISTRY_PERSON_URL) + strlen(tpu->machine_name); + freez(tpu); + } +} + +// this function is needed to change the name of a PERSON_URL +REGISTRY_PERSON_URL *registry_person_url_reallocate(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when, REGISTRY_PERSON_URL *pu) { + debug(D_REGISTRY, "registry_person_url_reallocate('%s', '%s', '%s'): allocating %zu bytes", p->guid, m->guid, u->url, sizeof(REGISTRY_PERSON_URL) + namelen); + + // keep a backup + REGISTRY_PERSON_URL pu2 = { + .first_t = pu->first_t, + .last_t = pu->last_t, + .usages = pu->usages, + .flags = pu->flags, + .machine = pu->machine, + .machine_name = "" + }; + + // remove the existing one from the index + registry_person_url_free(p, pu); + pu = &pu2; + + // allocate a new one + REGISTRY_PERSON_URL *tpu = registry_person_url_allocate(p, m, u, name, namelen, when); + tpu->first_t = pu->first_t; + tpu->last_t = pu->last_t; + tpu->usages = pu->usages; + tpu->flags = pu->flags; + + return tpu; +} + + +// ---------------------------------------------------------------------------- +// PERSON + +REGISTRY_PERSON *registry_person_find(const char *person_guid) { + debug(D_REGISTRY, "Registry: registry_person_find('%s')", person_guid); + return dictionary_get(registry.persons, person_guid); +} + +REGISTRY_PERSON *registry_person_allocate(const char *person_guid, time_t when) { + debug(D_REGISTRY, "Registry: registry_person_allocate('%s'): allocating new person, sizeof(PERSON)=%zu", (person_guid)?person_guid:"", sizeof(REGISTRY_PERSON)); + + REGISTRY_PERSON *p = mallocz(sizeof(REGISTRY_PERSON)); + if(!person_guid) { + for(;;) { + uuid_t uuid; + uuid_generate(uuid); + uuid_unparse_lower(uuid, p->guid); + + debug(D_REGISTRY, "Registry: Checking if the generated person guid '%s' is unique", p->guid); + if (!dictionary_get(registry.persons, p->guid)) { + debug(D_REGISTRY, "Registry: generated person guid '%s' is unique", p->guid); + break; + } + else + info("Registry: generated person guid '%s' found in the registry. Retrying...", p->guid); + } + } + else + strncpyz(p->guid, person_guid, GUID_LEN); + + debug(D_REGISTRY, "Registry: registry_person_allocate('%s'): creating dictionary of urls", p->guid); + avl_init(&p->person_urls, person_url_compare); + + p->first_t = p->last_t = (uint32_t)when; + p->usages = 0; + + registry.persons_memory += sizeof(REGISTRY_PERSON); + + registry.persons_count++; + dictionary_set(registry.persons, p->guid, p, sizeof(REGISTRY_PERSON)); + + return p; +} + + +// 1. validate person GUID +// 2. if it is valid, find it +// 3. if it is not valid, create a new one +// 4. return it +REGISTRY_PERSON *registry_person_get(const char *person_guid, time_t when) { + debug(D_REGISTRY, "Registry: registry_person_get('%s'): creating dictionary of urls", person_guid); + + REGISTRY_PERSON *p = NULL; + + if(person_guid && *person_guid) { + char buf[GUID_LEN + 1]; + // validate it is a GUID + if(unlikely(regenerate_guid(person_guid, buf) == -1)) + info("Registry: person guid '%s' is not a valid guid. Ignoring it.", person_guid); + else { + person_guid = buf; + p = registry_person_find(person_guid); + } + } + + if(!p) p = registry_person_allocate(NULL, when); + + return p; +} + +void registry_person_del(REGISTRY_PERSON *p) { + debug(D_REGISTRY, "Registry: registry_person_del('%s'): creating dictionary of urls", p->guid); + + while(p->person_urls.root) + registry_person_unlink_from_url(p, (REGISTRY_PERSON_URL *)p->person_urls.root); + + debug(D_REGISTRY, "Registry: deleting person '%s' from persons registry", p->guid); + dictionary_del(registry.persons, p->guid); + + debug(D_REGISTRY, "Registry: freeing person '%s'", p->guid); + freez(p); +} + +// ---------------------------------------------------------------------------- +// LINKING OF OBJECTS + +REGISTRY_PERSON_URL *registry_person_link_to_url(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when) { + debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): searching for URL in person", p->guid, m->guid, u->url); + + REGISTRY_PERSON_URL *pu = registry_person_url_index_find(p, u->url); + if(!pu) { + debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): not found", p->guid, m->guid, u->url); + pu = registry_person_url_allocate(p, m, u, name, namelen, when); + registry.persons_urls_count++; + } + else { + debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): found", p->guid, m->guid, u->url); + pu->usages++; + if(likely(pu->last_t < (uint32_t)when)) pu->last_t = (uint32_t)when; + + if(pu->machine != m) { + REGISTRY_MACHINE_URL *mu = dictionary_get(pu->machine->machine_urls, u->url); + if(mu) { + debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): URL switched machines (old was '%s') - expiring it from previous machine.", + p->guid, m->guid, u->url, pu->machine->guid); + mu->flags |= REGISTRY_URL_FLAGS_EXPIRED; + } + else { + debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): URL switched machines (old was '%s') - but the URL is not linked to the old machine.", + p->guid, m->guid, u->url, pu->machine->guid); + } + + pu->machine->links--; + pu->machine = m; + } + + if(strcmp(pu->machine_name, name) != 0) { + // the name of the PERSON_URL has changed ! + pu = registry_person_url_reallocate(p, m, u, name, namelen, when, pu); + } + } + + p->usages++; + if(likely(p->last_t < (uint32_t)when)) p->last_t = (uint32_t)when; + + if(pu->flags & REGISTRY_URL_FLAGS_EXPIRED) { + debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): accessing an expired URL. Re-enabling URL.", p->guid, m->guid, u->url); + pu->flags &= ~REGISTRY_URL_FLAGS_EXPIRED; + } + + return pu; +} + +void registry_person_unlink_from_url(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) { + registry_person_url_free(p, pu); +} diff --git a/registry/registry_person.h b/registry/registry_person.h new file mode 100644 index 000000000..30e9cb513 --- /dev/null +++ b/registry/registry_person.h @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_REGISTRY_PERSON_H +#define NETDATA_REGISTRY_PERSON_H 1 + +#include "registry_internals.h" + +// ---------------------------------------------------------------------------- +// PERSON structures + +// for each PERSON-URL pair we keep this +struct registry_person_url { + avl avl; // binary tree node + + REGISTRY_URL *url; // de-duplicated URL + REGISTRY_MACHINE *machine; // link the MACHINE of this URL + + uint8_t flags; + + uint32_t first_t; // the first time we saw this + uint32_t last_t; // the last time we saw this + uint32_t usages; // how many times this has been accessed + + char machine_name[1]; // the name of the machine, as known by the user + // dynamically allocated to fit properly +}; +typedef struct registry_person_url REGISTRY_PERSON_URL; + +// A person +struct registry_person { + char guid[GUID_LEN + 1]; // the person GUID + + avl_tree person_urls; // dictionary of PERSON_URLs + + uint32_t first_t; // the first time we saw this + uint32_t last_t; // the last time we saw this + uint32_t usages; // how many times this has been accessed + + //uint32_t flags; + //char *email; +}; +typedef struct registry_person REGISTRY_PERSON; + +// PERSON_URL +extern REGISTRY_PERSON_URL *registry_person_url_index_find(REGISTRY_PERSON *p, const char *url); +extern REGISTRY_PERSON_URL *registry_person_url_index_add(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) NEVERNULL WARNUNUSED; +extern REGISTRY_PERSON_URL *registry_person_url_index_del(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) WARNUNUSED; + +extern REGISTRY_PERSON_URL *registry_person_url_allocate(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when); +extern REGISTRY_PERSON_URL *registry_person_url_reallocate(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when, REGISTRY_PERSON_URL *pu); + +// PERSON +extern REGISTRY_PERSON *registry_person_find(const char *person_guid); +extern REGISTRY_PERSON *registry_person_allocate(const char *person_guid, time_t when); +extern REGISTRY_PERSON *registry_person_get(const char *person_guid, time_t when); +extern void registry_person_del(REGISTRY_PERSON *p); + +// LINKING PERSON -> PERSON_URL +extern REGISTRY_PERSON_URL *registry_person_link_to_url(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when); +extern void registry_person_unlink_from_url(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu); + +#endif //NETDATA_REGISTRY_PERSON_H diff --git a/registry/registry_url.c b/registry/registry_url.c new file mode 100644 index 000000000..6a7106458 --- /dev/null +++ b/registry/registry_url.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../daemon/common.h" +#include "registry_internals.h" + +// ---------------------------------------------------------------------------- +// REGISTRY_URL + +int registry_url_compare(void *a, void *b) { + if(((REGISTRY_URL *)a)->hash < ((REGISTRY_URL *)b)->hash) return -1; + else if(((REGISTRY_URL *)a)->hash > ((REGISTRY_URL *)b)->hash) return 1; + else return strcmp(((REGISTRY_URL *)a)->url, ((REGISTRY_URL *)b)->url); +} + +inline REGISTRY_URL *registry_url_index_add(REGISTRY_URL *u) { + return (REGISTRY_URL *)avl_insert(&(registry.registry_urls_root_index), (avl *)(u)); +} + +inline REGISTRY_URL *registry_url_index_del(REGISTRY_URL *u) { + return (REGISTRY_URL *)avl_remove(&(registry.registry_urls_root_index), (avl *)(u)); +} + +REGISTRY_URL *registry_url_get(const char *url, size_t urllen) { + // protection from too big URLs + if(urllen > registry.max_url_length) + urllen = registry.max_url_length; + + debug(D_REGISTRY, "Registry: registry_url_get('%s', %zu)", url, urllen); + + char buf[sizeof(REGISTRY_URL) + urllen]; // no need for +1, 1 is already in REGISTRY_URL + REGISTRY_URL *n = (REGISTRY_URL *)&buf[0]; + n->len = (uint16_t)urllen; + strncpyz(n->url, url, n->len); + n->hash = simple_hash(n->url); + + REGISTRY_URL *u = (REGISTRY_URL *)avl_search(&(registry.registry_urls_root_index), (avl *)n); + if(!u) { + debug(D_REGISTRY, "Registry: registry_url_get('%s', %zu): allocating %zu bytes", url, urllen, sizeof(REGISTRY_URL) + urllen); + u = callocz(1, sizeof(REGISTRY_URL) + urllen); // no need for +1, 1 is already in REGISTRY_URL + + // a simple strcpy() should do the job + // but I prefer to be safe, since the caller specified urllen + u->len = (uint16_t)urllen; + strncpyz(u->url, url, u->len); + u->links = 0; + u->hash = simple_hash(u->url); + + registry.urls_memory += sizeof(REGISTRY_URL) + urllen; // no need for +1, 1 is already in REGISTRY_URL + + debug(D_REGISTRY, "Registry: registry_url_get('%s'): indexing it", url); + n = registry_url_index_add(u); + if(n != u) { + error("INTERNAL ERROR: registry_url_get(): url '%s' already exists in the registry as '%s'", u->url, n->url); + free(u); + u = n; + } + else + registry.urls_count++; + } + + return u; +} + +void registry_url_link(REGISTRY_URL *u) { + u->links++; + debug(D_REGISTRY, "Registry: registry_url_link('%s'): URL has now %u links", u->url, u->links); +} + +void registry_url_unlink(REGISTRY_URL *u) { + u->links--; + if(!u->links) { + debug(D_REGISTRY, "Registry: registry_url_unlink('%s'): No more links for this URL", u->url); + REGISTRY_URL *n = registry_url_index_del(u); + if(!n) { + error("INTERNAL ERROR: registry_url_unlink('%s'): cannot find url in index", u->url); + } + else { + if(n != u) { + error("INTERNAL ERROR: registry_url_unlink('%s'): deleted different url '%s'", u->url, n->url); + } + + registry.urls_memory -= sizeof(REGISTRY_URL) + n->len; // no need for +1, 1 is already in REGISTRY_URL + freez(n); + } + } + else + debug(D_REGISTRY, "Registry: registry_url_unlink('%s'): URL has %u links left", u->url, u->links); +} diff --git a/registry/registry_url.h b/registry/registry_url.h new file mode 100644 index 000000000..c684f1c35 --- /dev/null +++ b/registry/registry_url.h @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_REGISTRY_URL_H +#define NETDATA_REGISTRY_URL_H 1 + +#include "registry_internals.h" + +// ---------------------------------------------------------------------------- +// URL structures +// Save memory by de-duplicating URLs +// so instead of storing URLs all over the place +// we store them here and we keep pointers elsewhere + +struct registry_url { + avl avl; + uint32_t hash; // the index hash + + uint32_t links; // the number of links to this URL - when none is left, we free it + + uint16_t len; // the length of the URL in bytes + char url[1]; // the URL - dynamically allocated to more size +}; +typedef struct registry_url REGISTRY_URL; + +// REGISTRY_URL INDEX +extern int registry_url_compare(void *a, void *b); +extern REGISTRY_URL *registry_url_index_del(REGISTRY_URL *u) WARNUNUSED; +extern REGISTRY_URL *registry_url_index_add(REGISTRY_URL *u) NEVERNULL WARNUNUSED; + +// REGISTRY_URL MANAGEMENT +extern REGISTRY_URL *registry_url_get(const char *url, size_t urllen) NEVERNULL; +extern void registry_url_link(REGISTRY_URL *u); +extern void registry_url_unlink(REGISTRY_URL *u); + +#endif //NETDATA_REGISTRY_URL_H diff --git a/src/.keep b/src/.keep deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/Makefile.am b/src/Makefile.am deleted file mode 100644 index df174cbd1..000000000 --- a/src/Makefile.am +++ /dev/null @@ -1,307 +0,0 @@ -# -# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com> -# -MAINTAINERCLEANFILES= $(srcdir)/Makefile.in - -AM_CPPFLAGS = \ - -DVARLIB_DIR="\"$(varlibdir)\"" \ - -DCACHE_DIR="\"$(cachedir)\"" \ - -DCONFIG_DIR="\"$(configdir)\"" \ - -DLOG_DIR="\"$(logdir)\"" \ - -DPLUGINS_DIR="\"$(pluginsdir)\"" \ - -DRUN_DIR="\"$(localstatedir)/run/netdata\"" \ - -DWEB_DIR="\"$(webdir)\"" \ - $(NULL) - -AM_CFLAGS = \ - $(OPTIONAL_MATH_CFLAGS) \ - $(OPTIONAL_NFACCT_CLFAGS) \ - $(OPTIONAL_ZLIB_CFLAGS) \ - $(OPTIONAL_UUID_CFLAGS) \ - $(OPTIONAL_LIBCAP_CFLAGS) \ - $(OPTIONAL_IPMIMONITORING_CFLAGS)\ - $(NULL) - -sbin_PROGRAMS = netdata -dist_cache_DATA = .keep -dist_varlib_DATA = .keep -dist_registry_DATA = .keep -dist_log_DATA = .keep -plugins_PROGRAMS = - -if ENABLE_PLUGIN_APPS -plugins_PROGRAMS += apps.plugin -endif - -if ENABLE_PLUGIN_FREEIPMI -plugins_PROGRAMS += freeipmi.plugin -endif - -if ENABLE_PLUGIN_CGROUP_NETWORK -plugins_PROGRAMS += cgroup-network -endif - -netdata_SOURCES = \ - adaptive_resortable_list.c \ - adaptive_resortable_list.h \ - appconfig.c \ - appconfig.h \ - avl.c \ - avl.h \ - backend_prometheus.c \ - backend_prometheus.h \ - backends.c \ - backends.h \ - clocks.c \ - clocks.h \ - common.c \ - common.h \ - daemon.c \ - daemon.h \ - dictionary.c \ - dictionary.h \ - eval.c \ - eval.h \ - global_statistics.c \ - global_statistics.h \ - health.c \ - health.h \ - health_config.c \ - health_json.c \ - health_log.c \ - inlined.h \ - locks.c \ - locks.h \ - log.c \ - log.h \ - main.c \ - main.h \ - plugin_checks.c \ - plugin_checks.h \ - plugin_idlejitter.c \ - plugin_idlejitter.h \ - plugins_d.c \ - plugins_d.h \ - popen.c \ - popen.h \ - procfile.c \ - procfile.h \ - registry.c \ - registry.h \ - registry_db.c \ - registry_init.c \ - registry_internals.c \ - registry_internals.h \ - registry_log.c \ - registry_machine.c \ - registry_machine.h \ - registry_person.c \ - registry_person.h \ - registry_url.c \ - registry_url.h \ - rrd.c \ - rrd.h \ - rrd2json.c \ - rrd2json.h \ - rrd2json_api_old.c \ - rrd2json_api_old.h \ - rrdcalc.c \ - rrdcalctemplate.c \ - rrddim.c \ - rrddimvar.c \ - rrdfamily.c \ - rrdhost.c \ - rrdpush.c \ - rrdpush.h \ - rrdset.c \ - rrdsetvar.c \ - rrdvar.c \ - signals.c \ - signals.h \ - simple_pattern.c \ - simple_pattern.h \ - socket.c \ - socket.h \ - statistical.c \ - statistical.h \ - statsd.c \ - statsd.h \ - storage_number.c \ - storage_number.h \ - threads.c \ - threads.h \ - unit_test.c \ - unit_test.h \ - url.c \ - url.h \ - web_api_old.c \ - web_api_old.h \ - web_api_v1.c \ - web_api_v1.h \ - web_buffer.c \ - web_buffer.h \ - web_buffer_svg.c \ - web_buffer_svg.h \ - web_client.c \ - web_client.h \ - web_server.c \ - web_server.h \ - $(NULL) - -if FREEBSD -netdata_SOURCES += \ - plugin_freebsd.c \ - plugin_freebsd.h \ - freebsd_sysctl.c \ - freebsd_getmntinfo.c \ - freebsd_getifaddrs.c \ - freebsd_devstat.c \ - zfs_common.c \ - zfs_common.h \ - freebsd_kstat_zfs.c \ - freebsd_ipfw.c \ - $(NULL) -else -if MACOS -netdata_SOURCES += \ - plugin_macos.c \ - plugin_macos.h \ - macos_sysctl.c \ - macos_mach_smi.c \ - macos_fw.c \ - $(NULL) -else -netdata_SOURCES += \ - ipc.c \ - ipc.h \ - plugin_nfacct.c \ - plugin_nfacct.h \ - plugin_proc.c \ - plugin_proc.h \ - plugin_proc_diskspace.c \ - plugin_proc_diskspace.h \ - plugin_tc.c \ - plugin_tc.h \ - proc_diskstats.c \ - proc_interrupts.c \ - proc_softirqs.c \ - proc_loadavg.c \ - proc_meminfo.c \ - proc_net_dev.c \ - proc_net_ip_vs_stats.c \ - proc_net_netstat.c \ - proc_net_rpc_nfs.c \ - proc_net_rpc_nfsd.c \ - proc_net_snmp.c \ - proc_net_snmp6.c \ - proc_net_sockstat.c \ - proc_net_sockstat6.c \ - proc_net_softnet_stat.c \ - proc_net_stat_conntrack.c \ - proc_net_stat_synproxy.c \ - proc_self_mountinfo.c \ - proc_self_mountinfo.h \ - zfs_common.c \ - zfs_common.h \ - proc_spl_kstat_zfs.c \ - proc_stat.c \ - proc_sys_kernel_random_entropy_avail.c \ - proc_vmstat.c \ - proc_uptime.c \ - sys_kernel_mm_ksm.c \ - sys_devices_system_edac_mc.c \ - sys_devices_system_node.c \ - sys_fs_cgroup.c \ - sys_fs_btrfs.c \ - $(NULL) -endif -endif - -netdata_LDADD = \ - $(OPTIONAL_MATH_LIBS) \ - $(OPTIONAL_NFACCT_LIBS) \ - $(OPTIONAL_ZLIB_LIBS) \ - $(OPTIONAL_UUID_LIBS) \ - $(NULL) - -apps_plugin_SOURCES = \ - apps_plugin.c \ - avl.c \ - avl.h \ - clocks.c \ - clocks.h \ - common.c \ - common.h \ - inlined.h \ - locks.c \ - locks.h \ - log.c log.h \ - procfile.c \ - procfile.h \ - threads.c \ - threads.h \ - web_buffer.c \ - web_buffer.h \ - $(NULL) - -if FREEBSD -apps_plugin_SOURCES += \ - plugin_freebsd.h \ - $(NULL) -else -apps_plugin_SOURCES += \ - adaptive_resortable_list.c \ - adaptive_resortable_list.h \ - $(NULL) -endif - -apps_plugin_LDADD = \ - $(OPTIONAL_MATH_LIBS) \ - $(OPTIONAL_LIBCAP_LIBS) \ - $(NULL) - -freeipmi_plugin_SOURCES = \ - freeipmi_plugin.c \ - clocks.c \ - clocks.h \ - common.c \ - common.h \ - inlined.h \ - locks.c \ - locks.h \ - log.c log.h \ - procfile.c \ - procfile.h \ - threads.c \ - threads.h \ - $(NULL) - -freeipmi_plugin_LDADD = \ - $(OPTIONAL_IPMIMONITORING_LIBS) \ - $(NULL) - -cgroup_network_SOURCES = \ - cgroup-network.c \ - clocks.c \ - clocks.h \ - common.c \ - common.h \ - inlined.h \ - locks.c \ - locks.h \ - log.c \ - log.h \ - procfile.c \ - procfile.h \ - popen.c \ - popen.h \ - signals.c \ - signals.h \ - threads.c \ - threads.h \ - $(NULL) - -cgroup_network_LDADD = \ - $(OPTIONAL_MATH_LIBS) \ - $(OPTIONAL_LIBCAP_LIBS) \ - $(NULL) diff --git a/src/Makefile.in b/src/Makefile.in deleted file mode 100644 index 75f85632e..000000000 --- a/src/Makefile.in +++ /dev/null @@ -1,1238 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -sbin_PROGRAMS = netdata$(EXEEXT) -plugins_PROGRAMS = $(am__EXEEXT_1) $(am__EXEEXT_2) $(am__EXEEXT_3) -@ENABLE_PLUGIN_APPS_TRUE@am__append_1 = apps.plugin -@ENABLE_PLUGIN_FREEIPMI_TRUE@am__append_2 = freeipmi.plugin -@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@am__append_3 = cgroup-network -@FREEBSD_TRUE@am__append_4 = \ -@FREEBSD_TRUE@ plugin_freebsd.c \ -@FREEBSD_TRUE@ plugin_freebsd.h \ -@FREEBSD_TRUE@ freebsd_sysctl.c \ -@FREEBSD_TRUE@ freebsd_getmntinfo.c \ -@FREEBSD_TRUE@ freebsd_getifaddrs.c \ -@FREEBSD_TRUE@ freebsd_devstat.c \ -@FREEBSD_TRUE@ zfs_common.c \ -@FREEBSD_TRUE@ zfs_common.h \ -@FREEBSD_TRUE@ freebsd_kstat_zfs.c \ -@FREEBSD_TRUE@ freebsd_ipfw.c \ -@FREEBSD_TRUE@ $(NULL) - -@FREEBSD_FALSE@@MACOS_TRUE@am__append_5 = \ -@FREEBSD_FALSE@@MACOS_TRUE@ plugin_macos.c \ -@FREEBSD_FALSE@@MACOS_TRUE@ plugin_macos.h \ -@FREEBSD_FALSE@@MACOS_TRUE@ macos_sysctl.c \ -@FREEBSD_FALSE@@MACOS_TRUE@ macos_mach_smi.c \ -@FREEBSD_FALSE@@MACOS_TRUE@ macos_fw.c \ -@FREEBSD_FALSE@@MACOS_TRUE@ $(NULL) - -@FREEBSD_FALSE@@MACOS_FALSE@am__append_6 = \ -@FREEBSD_FALSE@@MACOS_FALSE@ ipc.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ ipc.h \ -@FREEBSD_FALSE@@MACOS_FALSE@ plugin_nfacct.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ plugin_nfacct.h \ -@FREEBSD_FALSE@@MACOS_FALSE@ plugin_proc.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ plugin_proc.h \ -@FREEBSD_FALSE@@MACOS_FALSE@ plugin_proc_diskspace.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ plugin_proc_diskspace.h \ -@FREEBSD_FALSE@@MACOS_FALSE@ plugin_tc.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ plugin_tc.h \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_diskstats.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_interrupts.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_softirqs.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_loadavg.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_meminfo.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_dev.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_ip_vs_stats.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_netstat.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_rpc_nfs.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_rpc_nfsd.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_snmp.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_snmp6.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_sockstat.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_sockstat6.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_softnet_stat.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_stat_conntrack.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_stat_synproxy.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_self_mountinfo.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_self_mountinfo.h \ -@FREEBSD_FALSE@@MACOS_FALSE@ zfs_common.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ zfs_common.h \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_spl_kstat_zfs.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_stat.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_sys_kernel_random_entropy_avail.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_vmstat.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_uptime.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ sys_kernel_mm_ksm.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ sys_devices_system_edac_mc.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ sys_devices_system_node.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ sys_fs_cgroup.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ sys_fs_btrfs.c \ -@FREEBSD_FALSE@@MACOS_FALSE@ $(NULL) - -@FREEBSD_TRUE@am__append_7 = \ -@FREEBSD_TRUE@ plugin_freebsd.h \ -@FREEBSD_TRUE@ $(NULL) - -@FREEBSD_FALSE@am__append_8 = \ -@FREEBSD_FALSE@ adaptive_resortable_list.c \ -@FREEBSD_FALSE@ adaptive_resortable_list.h \ -@FREEBSD_FALSE@ $(NULL) - -subdir = src -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(top_srcdir)/depcomp $(dist_cache_DATA) $(dist_log_DATA) \ - $(dist_registry_DATA) $(dist_varlib_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \ - $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \ - $(top_srcdir)/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \ - $(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -@ENABLE_PLUGIN_APPS_TRUE@am__EXEEXT_1 = apps.plugin$(EXEEXT) -@ENABLE_PLUGIN_FREEIPMI_TRUE@am__EXEEXT_2 = freeipmi.plugin$(EXEEXT) -@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@am__EXEEXT_3 = \ -@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@ cgroup-network$(EXEEXT) -am__installdirs = "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(sbindir)" \ - "$(DESTDIR)$(cachedir)" "$(DESTDIR)$(logdir)" \ - "$(DESTDIR)$(registrydir)" "$(DESTDIR)$(varlibdir)" -PROGRAMS = $(plugins_PROGRAMS) $(sbin_PROGRAMS) -am__apps_plugin_SOURCES_DIST = apps_plugin.c avl.c avl.h clocks.c \ - clocks.h common.c common.h inlined.h locks.c locks.h log.c \ - log.h procfile.c procfile.h threads.c threads.h web_buffer.c \ - web_buffer.h plugin_freebsd.h adaptive_resortable_list.c \ - adaptive_resortable_list.h -am__objects_1 = -@FREEBSD_FALSE@am__objects_2 = adaptive_resortable_list.$(OBJEXT) -am_apps_plugin_OBJECTS = apps_plugin.$(OBJEXT) avl.$(OBJEXT) \ - clocks.$(OBJEXT) common.$(OBJEXT) locks.$(OBJEXT) \ - log.$(OBJEXT) procfile.$(OBJEXT) threads.$(OBJEXT) \ - web_buffer.$(OBJEXT) $(am__objects_1) $(am__objects_2) -apps_plugin_OBJECTS = $(am_apps_plugin_OBJECTS) -am__DEPENDENCIES_1 = -apps_plugin_DEPENDENCIES = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) -am_cgroup_network_OBJECTS = cgroup-network.$(OBJEXT) clocks.$(OBJEXT) \ - common.$(OBJEXT) locks.$(OBJEXT) log.$(OBJEXT) \ - procfile.$(OBJEXT) popen.$(OBJEXT) signals.$(OBJEXT) \ - threads.$(OBJEXT) -cgroup_network_OBJECTS = $(am_cgroup_network_OBJECTS) -cgroup_network_DEPENDENCIES = $(am__DEPENDENCIES_1) \ - $(am__DEPENDENCIES_1) -am_freeipmi_plugin_OBJECTS = freeipmi_plugin.$(OBJEXT) \ - clocks.$(OBJEXT) common.$(OBJEXT) locks.$(OBJEXT) \ - log.$(OBJEXT) procfile.$(OBJEXT) threads.$(OBJEXT) -freeipmi_plugin_OBJECTS = $(am_freeipmi_plugin_OBJECTS) -freeipmi_plugin_DEPENDENCIES = $(am__DEPENDENCIES_1) -am__netdata_SOURCES_DIST = adaptive_resortable_list.c \ - adaptive_resortable_list.h appconfig.c appconfig.h avl.c avl.h \ - backend_prometheus.c backend_prometheus.h backends.c \ - backends.h clocks.c clocks.h common.c common.h daemon.c \ - daemon.h dictionary.c dictionary.h eval.c eval.h \ - global_statistics.c global_statistics.h health.c health.h \ - health_config.c health_json.c health_log.c inlined.h locks.c \ - locks.h log.c log.h main.c main.h plugin_checks.c \ - plugin_checks.h plugin_idlejitter.c plugin_idlejitter.h \ - plugins_d.c plugins_d.h popen.c popen.h procfile.c procfile.h \ - registry.c registry.h registry_db.c registry_init.c \ - registry_internals.c registry_internals.h registry_log.c \ - registry_machine.c registry_machine.h registry_person.c \ - registry_person.h registry_url.c registry_url.h rrd.c rrd.h \ - rrd2json.c rrd2json.h rrd2json_api_old.c rrd2json_api_old.h \ - rrdcalc.c rrdcalctemplate.c rrddim.c rrddimvar.c rrdfamily.c \ - rrdhost.c rrdpush.c rrdpush.h rrdset.c rrdsetvar.c rrdvar.c \ - signals.c signals.h simple_pattern.c simple_pattern.h socket.c \ - socket.h statistical.c statistical.h statsd.c statsd.h \ - storage_number.c storage_number.h threads.c threads.h \ - unit_test.c unit_test.h url.c url.h web_api_old.c \ - web_api_old.h web_api_v1.c web_api_v1.h web_buffer.c \ - web_buffer.h web_buffer_svg.c web_buffer_svg.h web_client.c \ - web_client.h web_server.c web_server.h plugin_freebsd.c \ - plugin_freebsd.h freebsd_sysctl.c freebsd_getmntinfo.c \ - freebsd_getifaddrs.c freebsd_devstat.c zfs_common.c \ - zfs_common.h freebsd_kstat_zfs.c freebsd_ipfw.c plugin_macos.c \ - plugin_macos.h macos_sysctl.c macos_mach_smi.c macos_fw.c \ - ipc.c ipc.h plugin_nfacct.c plugin_nfacct.h plugin_proc.c \ - plugin_proc.h plugin_proc_diskspace.c plugin_proc_diskspace.h \ - plugin_tc.c plugin_tc.h proc_diskstats.c proc_interrupts.c \ - proc_softirqs.c proc_loadavg.c proc_meminfo.c proc_net_dev.c \ - proc_net_ip_vs_stats.c proc_net_netstat.c proc_net_rpc_nfs.c \ - proc_net_rpc_nfsd.c proc_net_snmp.c proc_net_snmp6.c \ - proc_net_sockstat.c proc_net_sockstat6.c \ - proc_net_softnet_stat.c proc_net_stat_conntrack.c \ - proc_net_stat_synproxy.c proc_self_mountinfo.c \ - proc_self_mountinfo.h proc_spl_kstat_zfs.c proc_stat.c \ - proc_sys_kernel_random_entropy_avail.c proc_vmstat.c \ - proc_uptime.c sys_kernel_mm_ksm.c sys_devices_system_edac_mc.c \ - sys_devices_system_node.c sys_fs_cgroup.c sys_fs_btrfs.c -@FREEBSD_TRUE@am__objects_3 = plugin_freebsd.$(OBJEXT) \ -@FREEBSD_TRUE@ freebsd_sysctl.$(OBJEXT) \ -@FREEBSD_TRUE@ freebsd_getmntinfo.$(OBJEXT) \ -@FREEBSD_TRUE@ freebsd_getifaddrs.$(OBJEXT) \ -@FREEBSD_TRUE@ freebsd_devstat.$(OBJEXT) zfs_common.$(OBJEXT) \ -@FREEBSD_TRUE@ freebsd_kstat_zfs.$(OBJEXT) \ -@FREEBSD_TRUE@ freebsd_ipfw.$(OBJEXT) -@FREEBSD_FALSE@@MACOS_TRUE@am__objects_4 = plugin_macos.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_TRUE@ macos_sysctl.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_TRUE@ macos_mach_smi.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_TRUE@ macos_fw.$(OBJEXT) -@FREEBSD_FALSE@@MACOS_FALSE@am__objects_5 = ipc.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ plugin_nfacct.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ plugin_proc.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ plugin_proc_diskspace.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ plugin_tc.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_diskstats.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_interrupts.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_softirqs.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_loadavg.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_meminfo.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_dev.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_ip_vs_stats.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_netstat.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_rpc_nfs.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_rpc_nfsd.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_snmp.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_snmp6.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_sockstat.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_sockstat6.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_softnet_stat.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_stat_conntrack.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_stat_synproxy.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_self_mountinfo.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ zfs_common.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_spl_kstat_zfs.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_stat.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_sys_kernel_random_entropy_avail.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_vmstat.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ proc_uptime.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ sys_kernel_mm_ksm.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ sys_devices_system_edac_mc.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ sys_devices_system_node.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ sys_fs_cgroup.$(OBJEXT) \ -@FREEBSD_FALSE@@MACOS_FALSE@ sys_fs_btrfs.$(OBJEXT) -am_netdata_OBJECTS = adaptive_resortable_list.$(OBJEXT) \ - appconfig.$(OBJEXT) avl.$(OBJEXT) backend_prometheus.$(OBJEXT) \ - backends.$(OBJEXT) clocks.$(OBJEXT) common.$(OBJEXT) \ - daemon.$(OBJEXT) dictionary.$(OBJEXT) eval.$(OBJEXT) \ - global_statistics.$(OBJEXT) health.$(OBJEXT) \ - health_config.$(OBJEXT) health_json.$(OBJEXT) \ - health_log.$(OBJEXT) locks.$(OBJEXT) log.$(OBJEXT) \ - main.$(OBJEXT) plugin_checks.$(OBJEXT) \ - plugin_idlejitter.$(OBJEXT) plugins_d.$(OBJEXT) \ - popen.$(OBJEXT) procfile.$(OBJEXT) registry.$(OBJEXT) \ - registry_db.$(OBJEXT) registry_init.$(OBJEXT) \ - registry_internals.$(OBJEXT) registry_log.$(OBJEXT) \ - registry_machine.$(OBJEXT) registry_person.$(OBJEXT) \ - registry_url.$(OBJEXT) rrd.$(OBJEXT) rrd2json.$(OBJEXT) \ - rrd2json_api_old.$(OBJEXT) rrdcalc.$(OBJEXT) \ - rrdcalctemplate.$(OBJEXT) rrddim.$(OBJEXT) rrddimvar.$(OBJEXT) \ - rrdfamily.$(OBJEXT) rrdhost.$(OBJEXT) rrdpush.$(OBJEXT) \ - rrdset.$(OBJEXT) rrdsetvar.$(OBJEXT) rrdvar.$(OBJEXT) \ - signals.$(OBJEXT) simple_pattern.$(OBJEXT) socket.$(OBJEXT) \ - statistical.$(OBJEXT) statsd.$(OBJEXT) \ - storage_number.$(OBJEXT) threads.$(OBJEXT) unit_test.$(OBJEXT) \ - url.$(OBJEXT) web_api_old.$(OBJEXT) web_api_v1.$(OBJEXT) \ - web_buffer.$(OBJEXT) web_buffer_svg.$(OBJEXT) \ - web_client.$(OBJEXT) web_server.$(OBJEXT) $(am__objects_3) \ - $(am__objects_4) $(am__objects_5) -netdata_OBJECTS = $(am_netdata_OBJECTS) -netdata_DEPENDENCIES = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ - $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -depcomp = $(SHELL) $(top_srcdir)/depcomp -am__depfiles_maybe = depfiles -am__mv = mv -f -COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ - $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -AM_V_CC = $(am__v_CC_@AM_V@) -am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) -am__v_CC_0 = @echo " CC " $@; -am__v_CC_1 = -CCLD = $(CC) -LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ -AM_V_CCLD = $(am__v_CCLD_@AM_V@) -am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) -am__v_CCLD_0 = @echo " CCLD " $@; -am__v_CCLD_1 = -SOURCES = $(apps_plugin_SOURCES) $(cgroup_network_SOURCES) \ - $(freeipmi_plugin_SOURCES) $(netdata_SOURCES) -DIST_SOURCES = $(am__apps_plugin_SOURCES_DIST) \ - $(cgroup_network_SOURCES) $(freeipmi_plugin_SOURCES) \ - $(am__netdata_SOURCES_DIST) -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -DATA = $(dist_cache_DATA) $(dist_log_DATA) $(dist_registry_DATA) \ - $(dist_varlib_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -# Read a list of newline-separated strings from the standard input, -# and print each of them once, without duplicates. Input order is -# *not* preserved. -am__uniquify_input = $(AWK) '\ - BEGIN { nonempty = 0; } \ - { items[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in items) print i; }; } \ -' -# Make sure the list of sources is unique. This is necessary because, -# e.g., the same source file might be shared among _SOURCES variables -# for different programs/libraries. -am__define_uniq_tagged_files = \ - list='$(am__tagged_files)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | $(am__uniquify_input)` -ETAGS = etags -CTAGS = ctags -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ - -# -# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com> -# -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -AM_CPPFLAGS = \ - -DVARLIB_DIR="\"$(varlibdir)\"" \ - -DCACHE_DIR="\"$(cachedir)\"" \ - -DCONFIG_DIR="\"$(configdir)\"" \ - -DLOG_DIR="\"$(logdir)\"" \ - -DPLUGINS_DIR="\"$(pluginsdir)\"" \ - -DRUN_DIR="\"$(localstatedir)/run/netdata\"" \ - -DWEB_DIR="\"$(webdir)\"" \ - $(NULL) - -AM_CFLAGS = \ - $(OPTIONAL_MATH_CFLAGS) \ - $(OPTIONAL_NFACCT_CLFAGS) \ - $(OPTIONAL_ZLIB_CFLAGS) \ - $(OPTIONAL_UUID_CFLAGS) \ - $(OPTIONAL_LIBCAP_CFLAGS) \ - $(OPTIONAL_IPMIMONITORING_CFLAGS)\ - $(NULL) - -dist_cache_DATA = .keep -dist_varlib_DATA = .keep -dist_registry_DATA = .keep -dist_log_DATA = .keep -netdata_SOURCES = adaptive_resortable_list.c \ - adaptive_resortable_list.h appconfig.c appconfig.h avl.c avl.h \ - backend_prometheus.c backend_prometheus.h backends.c \ - backends.h clocks.c clocks.h common.c common.h daemon.c \ - daemon.h dictionary.c dictionary.h eval.c eval.h \ - global_statistics.c global_statistics.h health.c health.h \ - health_config.c health_json.c health_log.c inlined.h locks.c \ - locks.h log.c log.h main.c main.h plugin_checks.c \ - plugin_checks.h plugin_idlejitter.c plugin_idlejitter.h \ - plugins_d.c plugins_d.h popen.c popen.h procfile.c procfile.h \ - registry.c registry.h registry_db.c registry_init.c \ - registry_internals.c registry_internals.h registry_log.c \ - registry_machine.c registry_machine.h registry_person.c \ - registry_person.h registry_url.c registry_url.h rrd.c rrd.h \ - rrd2json.c rrd2json.h rrd2json_api_old.c rrd2json_api_old.h \ - rrdcalc.c rrdcalctemplate.c rrddim.c rrddimvar.c rrdfamily.c \ - rrdhost.c rrdpush.c rrdpush.h rrdset.c rrdsetvar.c rrdvar.c \ - signals.c signals.h simple_pattern.c simple_pattern.h socket.c \ - socket.h statistical.c statistical.h statsd.c statsd.h \ - storage_number.c storage_number.h threads.c threads.h \ - unit_test.c unit_test.h url.c url.h web_api_old.c \ - web_api_old.h web_api_v1.c web_api_v1.h web_buffer.c \ - web_buffer.h web_buffer_svg.c web_buffer_svg.h web_client.c \ - web_client.h web_server.c web_server.h $(NULL) $(am__append_4) \ - $(am__append_5) $(am__append_6) -netdata_LDADD = \ - $(OPTIONAL_MATH_LIBS) \ - $(OPTIONAL_NFACCT_LIBS) \ - $(OPTIONAL_ZLIB_LIBS) \ - $(OPTIONAL_UUID_LIBS) \ - $(NULL) - -apps_plugin_SOURCES = apps_plugin.c avl.c avl.h clocks.c clocks.h \ - common.c common.h inlined.h locks.c locks.h log.c log.h \ - procfile.c procfile.h threads.c threads.h web_buffer.c \ - web_buffer.h $(NULL) $(am__append_7) $(am__append_8) -apps_plugin_LDADD = \ - $(OPTIONAL_MATH_LIBS) \ - $(OPTIONAL_LIBCAP_LIBS) \ - $(NULL) - -freeipmi_plugin_SOURCES = \ - freeipmi_plugin.c \ - clocks.c \ - clocks.h \ - common.c \ - common.h \ - inlined.h \ - locks.c \ - locks.h \ - log.c log.h \ - procfile.c \ - procfile.h \ - threads.c \ - threads.h \ - $(NULL) - -freeipmi_plugin_LDADD = \ - $(OPTIONAL_IPMIMONITORING_LIBS) \ - $(NULL) - -cgroup_network_SOURCES = \ - cgroup-network.c \ - clocks.c \ - clocks.h \ - common.c \ - common.h \ - inlined.h \ - locks.c \ - locks.h \ - log.c \ - log.h \ - procfile.c \ - procfile.h \ - popen.c \ - popen.h \ - signals.c \ - signals.h \ - threads.c \ - threads.h \ - $(NULL) - -cgroup_network_LDADD = \ - $(OPTIONAL_MATH_LIBS) \ - $(OPTIONAL_LIBCAP_LIBS) \ - $(NULL) - -all: all-am - -.SUFFIXES: -.SUFFIXES: .c .o .obj -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu src/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu src/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -install-pluginsPROGRAMS: $(plugins_PROGRAMS) - @$(NORMAL_INSTALL) - @list='$(plugins_PROGRAMS)'; test -n "$(pluginsdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \ - fi; \ - for p in $$list; do echo "$$p $$p"; done | \ - sed 's/$(EXEEXT)$$//' | \ - while read p p1; do if test -f $$p \ - ; then echo "$$p"; echo "$$p"; else :; fi; \ - done | \ - sed -e 'p;s,.*/,,;n;h' \ - -e 's|.*|.|' \ - -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ - sed 'N;N;N;s,\n, ,g' | \ - $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ - { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ - if ($$2 == $$4) files[d] = files[d] " " $$1; \ - else { print "f", $$3 "/" $$4, $$1; } } \ - END { for (d in files) print "f", d, files[d] }' | \ - while read type dir files; do \ - if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ - test -z "$$files" || { \ - echo " $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \ - $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \ - } \ - ; done - -uninstall-pluginsPROGRAMS: - @$(NORMAL_UNINSTALL) - @list='$(plugins_PROGRAMS)'; test -n "$(pluginsdir)" || list=; \ - files=`for p in $$list; do echo "$$p"; done | \ - sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ - -e 's/$$/$(EXEEXT)/' \ - `; \ - test -n "$$list" || exit 0; \ - echo " ( cd '$(DESTDIR)$(pluginsdir)' && rm -f" $$files ")"; \ - cd "$(DESTDIR)$(pluginsdir)" && rm -f $$files - -clean-pluginsPROGRAMS: - -test -z "$(plugins_PROGRAMS)" || rm -f $(plugins_PROGRAMS) -install-sbinPROGRAMS: $(sbin_PROGRAMS) - @$(NORMAL_INSTALL) - @list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(sbindir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(sbindir)" || exit 1; \ - fi; \ - for p in $$list; do echo "$$p $$p"; done | \ - sed 's/$(EXEEXT)$$//' | \ - while read p p1; do if test -f $$p \ - ; then echo "$$p"; echo "$$p"; else :; fi; \ - done | \ - sed -e 'p;s,.*/,,;n;h' \ - -e 's|.*|.|' \ - -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ - sed 'N;N;N;s,\n, ,g' | \ - $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ - { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ - if ($$2 == $$4) files[d] = files[d] " " $$1; \ - else { print "f", $$3 "/" $$4, $$1; } } \ - END { for (d in files) print "f", d, files[d] }' | \ - while read type dir files; do \ - if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ - test -z "$$files" || { \ - echo " $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(sbindir)$$dir'"; \ - $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(sbindir)$$dir" || exit $$?; \ - } \ - ; done - -uninstall-sbinPROGRAMS: - @$(NORMAL_UNINSTALL) - @list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \ - files=`for p in $$list; do echo "$$p"; done | \ - sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ - -e 's/$$/$(EXEEXT)/' \ - `; \ - test -n "$$list" || exit 0; \ - echo " ( cd '$(DESTDIR)$(sbindir)' && rm -f" $$files ")"; \ - cd "$(DESTDIR)$(sbindir)" && rm -f $$files - -clean-sbinPROGRAMS: - -test -z "$(sbin_PROGRAMS)" || rm -f $(sbin_PROGRAMS) - -apps.plugin$(EXEEXT): $(apps_plugin_OBJECTS) $(apps_plugin_DEPENDENCIES) $(EXTRA_apps_plugin_DEPENDENCIES) - @rm -f apps.plugin$(EXEEXT) - $(AM_V_CCLD)$(LINK) $(apps_plugin_OBJECTS) $(apps_plugin_LDADD) $(LIBS) - -cgroup-network$(EXEEXT): $(cgroup_network_OBJECTS) $(cgroup_network_DEPENDENCIES) $(EXTRA_cgroup_network_DEPENDENCIES) - @rm -f cgroup-network$(EXEEXT) - $(AM_V_CCLD)$(LINK) $(cgroup_network_OBJECTS) $(cgroup_network_LDADD) $(LIBS) - -freeipmi.plugin$(EXEEXT): $(freeipmi_plugin_OBJECTS) $(freeipmi_plugin_DEPENDENCIES) $(EXTRA_freeipmi_plugin_DEPENDENCIES) - @rm -f freeipmi.plugin$(EXEEXT) - $(AM_V_CCLD)$(LINK) $(freeipmi_plugin_OBJECTS) $(freeipmi_plugin_LDADD) $(LIBS) - -netdata$(EXEEXT): $(netdata_OBJECTS) $(netdata_DEPENDENCIES) $(EXTRA_netdata_DEPENDENCIES) - @rm -f netdata$(EXEEXT) - $(AM_V_CCLD)$(LINK) $(netdata_OBJECTS) $(netdata_LDADD) $(LIBS) - -mostlyclean-compile: - -rm -f *.$(OBJEXT) - -distclean-compile: - -rm -f *.tab.c - -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/adaptive_resortable_list.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/appconfig.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/apps_plugin.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/avl.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/backend_prometheus.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/backends.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cgroup-network.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/clocks.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/common.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/daemon.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dictionary.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eval.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/freebsd_devstat.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/freebsd_getifaddrs.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/freebsd_getmntinfo.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/freebsd_ipfw.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/freebsd_kstat_zfs.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/freebsd_sysctl.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/freeipmi_plugin.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/global_statistics.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/health.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/health_config.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/health_json.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/health_log.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ipc.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/locks.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/log.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/macos_fw.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/macos_mach_smi.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/macos_sysctl.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/main.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/plugin_checks.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/plugin_freebsd.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/plugin_idlejitter.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/plugin_macos.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/plugin_nfacct.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/plugin_proc.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/plugin_proc_diskspace.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/plugin_tc.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/plugins_d.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/popen.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_diskstats.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_interrupts.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_loadavg.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_meminfo.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_net_dev.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_net_ip_vs_stats.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_net_netstat.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_net_rpc_nfs.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_net_rpc_nfsd.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_net_snmp.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_net_snmp6.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_net_sockstat.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_net_sockstat6.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_net_softnet_stat.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_net_stat_conntrack.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_net_stat_synproxy.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_self_mountinfo.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_softirqs.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_spl_kstat_zfs.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_stat.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_sys_kernel_random_entropy_avail.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_uptime.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_vmstat.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/procfile.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/registry.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/registry_db.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/registry_init.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/registry_internals.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/registry_log.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/registry_machine.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/registry_person.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/registry_url.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rrd.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rrd2json.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rrd2json_api_old.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rrdcalc.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rrdcalctemplate.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rrddim.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rrddimvar.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rrdfamily.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rrdhost.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rrdpush.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rrdset.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rrdsetvar.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rrdvar.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/signals.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/simple_pattern.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/socket.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/statistical.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/statsd.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/storage_number.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sys_devices_system_edac_mc.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sys_devices_system_node.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sys_fs_btrfs.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sys_fs_cgroup.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sys_kernel_mm_ksm.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/threads.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unit_test.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/url.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/web_api_old.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/web_api_v1.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/web_buffer.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/web_buffer_svg.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/web_client.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/web_server.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/zfs_common.Po@am__quote@ - -.c.o: -@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< -@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po -@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< - -.c.obj: -@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` -@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po -@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` -install-dist_cacheDATA: $(dist_cache_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_cache_DATA)'; test -n "$(cachedir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(cachedir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(cachedir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(cachedir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(cachedir)" || exit $$?; \ - done - -uninstall-dist_cacheDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_cache_DATA)'; test -n "$(cachedir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(cachedir)'; $(am__uninstall_files_from_dir) -install-dist_logDATA: $(dist_log_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_log_DATA)'; test -n "$(logdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(logdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(logdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(logdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(logdir)" || exit $$?; \ - done - -uninstall-dist_logDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_log_DATA)'; test -n "$(logdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(logdir)'; $(am__uninstall_files_from_dir) -install-dist_registryDATA: $(dist_registry_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_registry_DATA)'; test -n "$(registrydir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(registrydir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(registrydir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(registrydir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(registrydir)" || exit $$?; \ - done - -uninstall-dist_registryDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_registry_DATA)'; test -n "$(registrydir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(registrydir)'; $(am__uninstall_files_from_dir) -install-dist_varlibDATA: $(dist_varlib_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_varlib_DATA)'; test -n "$(varlibdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(varlibdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(varlibdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(varlibdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(varlibdir)" || exit $$?; \ - done - -uninstall-dist_varlibDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_varlib_DATA)'; test -n "$(varlibdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(varlibdir)'; $(am__uninstall_files_from_dir) - -ID: $(am__tagged_files) - $(am__define_uniq_tagged_files); mkid -fID $$unique -tags: tags-am -TAGS: tags - -tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - set x; \ - here=`pwd`; \ - $(am__define_uniq_tagged_files); \ - shift; \ - if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - if test $$# -gt 0; then \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - "$$@" $$unique; \ - else \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$unique; \ - fi; \ - fi -ctags: ctags-am - -CTAGS: ctags -ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - $(am__define_uniq_tagged_files); \ - test -z "$(CTAGS_ARGS)$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && $(am__cd) $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) "$$here" -cscopelist: cscopelist-am - -cscopelist-am: $(am__tagged_files) - list='$(am__tagged_files)'; \ - case "$(srcdir)" in \ - [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ - *) sdir=$(subdir)/$(srcdir) ;; \ - esac; \ - for i in $$list; do \ - if test -f "$$i"; then \ - echo "$(subdir)/$$i"; \ - else \ - echo "$$sdir/$$i"; \ - fi; \ - done >> $(top_builddir)/cscope.files - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(PROGRAMS) $(DATA) -installdirs: - for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(sbindir)" "$(DESTDIR)$(cachedir)" "$(DESTDIR)$(logdir)" "$(DESTDIR)$(registrydir)" "$(DESTDIR)$(varlibdir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic clean-pluginsPROGRAMS clean-sbinPROGRAMS \ - mostlyclean-am - -distclean: distclean-am - -rm -rf ./$(DEPDIR) - -rm -f Makefile -distclean-am: clean-am distclean-compile distclean-generic \ - distclean-tags - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: install-dist_cacheDATA install-dist_logDATA \ - install-dist_registryDATA install-dist_varlibDATA \ - install-pluginsPROGRAMS - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: install-sbinPROGRAMS - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -rf ./$(DEPDIR) - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-compile mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-dist_cacheDATA uninstall-dist_logDATA \ - uninstall-dist_registryDATA uninstall-dist_varlibDATA \ - uninstall-pluginsPROGRAMS uninstall-sbinPROGRAMS - -.MAKE: install-am install-strip - -.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \ - clean-pluginsPROGRAMS clean-sbinPROGRAMS cscopelist-am ctags \ - ctags-am distclean distclean-compile distclean-generic \ - distclean-tags distdir dvi dvi-am html html-am info info-am \ - install install-am install-data install-data-am \ - install-dist_cacheDATA install-dist_logDATA \ - install-dist_registryDATA install-dist_varlibDATA install-dvi \ - install-dvi-am install-exec install-exec-am install-html \ - install-html-am install-info install-info-am install-man \ - install-pdf install-pdf-am install-pluginsPROGRAMS install-ps \ - install-ps-am install-sbinPROGRAMS install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-compile \ - mostlyclean-generic pdf pdf-am ps ps-am tags tags-am uninstall \ - uninstall-am uninstall-dist_cacheDATA uninstall-dist_logDATA \ - uninstall-dist_registryDATA uninstall-dist_varlibDATA \ - uninstall-pluginsPROGRAMS uninstall-sbinPROGRAMS - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/src/adaptive_resortable_list.c b/src/adaptive_resortable_list.c deleted file mode 100644 index c564efff3..000000000 --- a/src/adaptive_resortable_list.c +++ /dev/null @@ -1,269 +0,0 @@ -#include "adaptive_resortable_list.h" - -// the default processor() of the ARL -// can be overwritten at arl_create() -inline void arl_callback_str2ull(const char *name, uint32_t hash, const char *value, void *dst) { - (void)name; - (void)hash; - - register unsigned long long *d = dst; - *d = str2ull(value); - // fprintf(stderr, "name '%s' with hash %u and value '%s' is %llu\n", name, hash, value, *d); -} - -inline void arl_callback_str2kernel_uint_t(const char *name, uint32_t hash, const char *value, void *dst) { - (void)name; - (void)hash; - - register kernel_uint_t *d = dst; - *d = str2kernel_uint_t(value); - // fprintf(stderr, "name '%s' with hash %u and value '%s' is %llu\n", name, hash, value, (unsigned long long)*d); -} - -// create a new ARL -ARL_BASE *arl_create(const char *name, void (*processor)(const char *, uint32_t, const char *, void *), size_t rechecks) { - ARL_BASE *base = callocz(1, sizeof(ARL_BASE)); - - base->name = strdupz(name); - - if(!processor) - base->processor = arl_callback_str2ull; - else - base->processor = processor; - - base->rechecks = rechecks; - - return base; -} - -void arl_free(ARL_BASE *arl_base) { - if(unlikely(!arl_base)) - return; - - while(arl_base->head) { - ARL_ENTRY *e = arl_base->head; - arl_base->head = e->next; - - freez(e->name); -#ifdef NETDATA_INTERNAL_CHECKS - memset(e, 0, sizeof(ARL_ENTRY)); -#endif - freez(e); - } - - freez(arl_base->name); - -#ifdef NETDATA_INTERNAL_CHECKS - memset(arl_base, 0, sizeof(ARL_BASE)); -#endif - - freez(arl_base); -} - -void arl_begin(ARL_BASE *base) { - -#ifdef NETDATA_INTERNAL_CHECKS - if(likely(base->iteration > 10)) { - // do these checks after the ARL has been sorted - - if(unlikely(base->relinkings > (base->expected + base->allocated))) - info("ARL '%s' has %zu relinkings with %zu expected and %zu allocated entries. Is the source changing so fast?" - , base->name, base->relinkings, base->expected, base->allocated); - - if(unlikely(base->slow > base->fast)) - info("ARL '%s' has %zu fast searches and %zu slow searches. Is the source really changing so fast?" - , base->name, base->fast, base->slow); - - /* - if(unlikely(base->iteration % 60 == 0)) { - info("ARL '%s' statistics: iteration %zu, expected %zu, wanted %zu, allocated %zu, fred %zu, relinkings %zu, found %zu, added %zu, fast %zu, slow %zu" - , base->name - , base->iteration - , base->expected - , base->wanted - , base->allocated - , base->fred - , base->relinkings - , base->found - , base->added - , base->fast - , base->slow - ); - // for(e = base->head; e; e = e->next) fprintf(stderr, "%s ", e->name); - // fprintf(stderr, "\n"); - } - */ - } -#endif - - if(unlikely(base->iteration > 0 && (base->added || (base->iteration % base->rechecks) == 0))) { - int wanted_equals_expected = ((base->iteration % base->rechecks) == 0); - - // fprintf(stderr, "\n\narl_begin() rechecking, added %zu, iteration %zu, rechecks %zu, wanted_equals_expected %d\n\n\n", base->added, base->iteration, base->rechecks, wanted_equals_expected); - - base->added = 0; - base->wanted = (wanted_equals_expected)?base->expected:0; - - ARL_ENTRY *e = base->head; - while(e) { - if(e->flags & ARL_ENTRY_FLAG_FOUND) { - - // remove the found flag - e->flags &= ~ARL_ENTRY_FLAG_FOUND; - - // count it in wanted - if(!wanted_equals_expected && e->flags & ARL_ENTRY_FLAG_EXPECTED) - base->wanted++; - - } - else if(e->flags & ARL_ENTRY_FLAG_DYNAMIC && !(base->head == e && !e->next)) { // not last entry - // we can remove this entry - // it is not found, and it was created because - // it was found in the source file - - // remember the next one - ARL_ENTRY *t = e->next; - - // remove it from the list - if(e->next) e->next->prev = e->prev; - if(e->prev) e->prev->next = e->next; - if(base->head == e) base->head = e->next; - - // free it - freez(e->name); - freez(e); - - // count it - base->fred++; - - // continue - e = t; - continue; - } - - e = e->next; - } - } - - if(unlikely(!base->head)) { - // hm... no nodes at all in the list #1700 - // add a fake one to prevent a crash - // this is better than checking for the existence of nodes all the time - arl_expect(base, "a-really-not-existing-source-keyword", NULL); - } - - base->iteration++; - base->next_keyword = base->head; - base->found = 0; - -} - -// register an expected keyword to the ARL -// together with its destination ( i.e. the output of the processor() ) -ARL_ENTRY *arl_expect_custom(ARL_BASE *base, const char *keyword, void (*processor)(const char *name, uint32_t hash, const char *value, void *dst), void *dst) { - ARL_ENTRY *e = callocz(1, sizeof(ARL_ENTRY)); - e->name = strdupz(keyword); - e->hash = simple_hash(e->name); - e->processor = (processor)?processor:base->processor; - e->dst = dst; - e->flags = ARL_ENTRY_FLAG_EXPECTED; - e->prev = NULL; - e->next = base->head; - - if(base->head) base->head->prev = e; - else base->next_keyword = e; - - base->head = e; - base->expected++; - base->allocated++; - - base->wanted = base->expected; - - return e; -} - -int arl_find_or_create_and_relink(ARL_BASE *base, const char *s, const char *value) { - ARL_ENTRY *e; - - uint32_t hash = simple_hash(s); - - // find if it already exists in the data - for(e = base->head; e ; e = e->next) - if(e->hash == hash && !strcmp(e->name, s)) - break; - -#ifdef NETDATA_INTERNAL_CHECKS - if(unlikely(base->next_keyword && e == base->next_keyword)) - fatal("Internal Error: e == base->last"); -#endif - - if(e) { - // found it in the keywords - - base->relinkings++; - - // run the processor for it - if(unlikely(e->dst)) { - e->processor(e->name, hash, value, e->dst); - base->found++; - } - - // unlink it - we will relink it below - if(e->next) e->next->prev = e->prev; - if(e->prev) e->prev->next = e->next; - - // make sure the head is properly linked - if(base->head == e) - base->head = e->next; - } - else { - // not found - - // create it - e = callocz(1, sizeof(ARL_ENTRY)); - e->name = strdupz(s); - e->hash = hash; - e->flags = ARL_ENTRY_FLAG_DYNAMIC; - - base->allocated++; - base->added++; - } - -#ifdef NETDATA_INTERNAL_CHECKS - if(unlikely(base->iteration % 60 == 0 && e->flags & ARL_ENTRY_FLAG_FOUND)) - info("ARL '%s': entry '%s' is already found. Did you forget to call arl_begin()?", base->name, s); -#endif - - e->flags |= ARL_ENTRY_FLAG_FOUND; - - // link it here - e->next = base->next_keyword; - if(base->next_keyword) { - e->prev = base->next_keyword->prev; - base->next_keyword->prev = e; - - if(e->prev) - e->prev->next = e; - - if(base->head == base->next_keyword) - base->head = e; - } - else { - e->prev = NULL; - - if(!base->head) - base->head = e; - } - - // prepare the next iteration - base->next_keyword = e->next; - if(unlikely(!base->next_keyword)) - base->next_keyword = base->head; - - if(unlikely(base->found == base->wanted)) { - // fprintf(stderr, "FOUND ALL WANTED 1: found = %zu, wanted = %zu, expected %zu\n", base->found, base->wanted, base->expected); - return 1; - } - - return 0; -} diff --git a/src/adaptive_resortable_list.h b/src/adaptive_resortable_list.h deleted file mode 100644 index d05a8ede7..000000000 --- a/src/adaptive_resortable_list.h +++ /dev/null @@ -1,171 +0,0 @@ -#ifndef NETDATA_ADAPTIVE_RESORTABLE_LIST_H -#define NETDATA_ADAPTIVE_RESORTABLE_LIST_H - -/* - * ADAPTIVE RE-SORTABLE LIST - * This structure allows netdata to read a file of NAME VALUE lines - * in the fastest possible way. - * - * It maintains a linked list of all NAME (keywords), sorted in the - * same order as found in the source data file. - * The linked list is kept sorted at all times - the source file - * may change at any time, the list will adapt. - * - * The caller: - * - * 1. calls arl_create() to create a list - * - * 2. calls arl_expect() to register the expected keyword - * - * Then: - * - * 3. calls arl_begin() to initiate a data collection iteration. - * This is to be called just ONCE every time the source is re-scanned. - * - * 4. calls arl_check() for each line read from the file. - * - * Finally: - * - * 5. calls arl_free() to destroy this and free all memory. - * - * The program will call the processor() function, given to - * arl_create(), for each expected keyword found. - * The default processor() expects dst to be an unsigned long long *. - * - * LIMITATIONS - * DO NOT USE THIS IF THE A NAME/KEYWORD MAY APPEAR MORE THAN - * ONCE IN THE SOURCE DATA SET. - */ - -#include "common.h" - -#define ARL_ENTRY_FLAG_FOUND 0x01 // the entry has been found in the source data -#define ARL_ENTRY_FLAG_EXPECTED 0x02 // the entry is expected by the program -#define ARL_ENTRY_FLAG_DYNAMIC 0x04 // the entry was dynamically allocated, from source data - -typedef struct arl_entry { - char *name; // the keywords - uint32_t hash; // the hash of the keyword - - void *dst; // the dst to pass to the processor - - uint8_t flags; // ARL_ENTRY_FLAG_* - - // the processor to do the job - void (*processor)(const char *name, uint32_t hash, const char *value, void *dst); - - // double linked list for fast re-linkings - struct arl_entry *prev, *next; -} ARL_ENTRY; - -typedef struct arl_base { - char *name; - - size_t iteration; // incremented on each iteration (arl_begin()) - size_t found; // the number of expected keywords found in this iteration - size_t expected; // the number of expected keywords - size_t wanted; // the number of wanted keywords - // i.e. the number of keywords found and expected - - size_t relinkings; // the number of relinkings we have made so far - - size_t allocated; // the number of keywords allocated - size_t fred; // the number of keywords cleaned up - - size_t rechecks; // the number of iterations between re-checks of the - // wanted number of keywords - // this is only needed in cases where the source - // is having less lines over time. - - size_t added; // it is non-zero if new keywords have been added - // this is only needed to detect new lines have - // been added to the file, over time. - -#ifdef NETDATA_INTERNAL_CHECKS - size_t fast; // the number of times we have taken the fast path - size_t slow; // the number of times we have taken the slow path -#endif - - // the processor to do the job - void (*processor)(const char *name, uint32_t hash, const char *value, void *dst); - - // the linked list of the keywords - ARL_ENTRY *head; - - // since we keep the list of keywords sorted (as found in the source data) - // this is next keyword that we expect to find in the source data. - ARL_ENTRY *next_keyword; -} ARL_BASE; - -// create a new ARL -extern ARL_BASE *arl_create(const char *name, void (*processor)(const char *, uint32_t, const char *, void *), size_t rechecks); - -// free an ARL -extern void arl_free(ARL_BASE *arl_base); - -// register an expected keyword to the ARL -// together with its destination ( i.e. the output of the processor() ) -extern ARL_ENTRY *arl_expect_custom(ARL_BASE *base, const char *keyword, void (*processor)(const char *name, uint32_t hash, const char *value, void *dst), void *dst); -#define arl_expect(base, keyword, dst) arl_expect_custom(base, keyword, NULL, dst) - -// an internal call to complete the check() call -extern int arl_find_or_create_and_relink(ARL_BASE *base, const char *s, const char *value); - -// begin an ARL iteration -extern void arl_begin(ARL_BASE *base); - -extern void arl_callback_str2ull(const char *name, uint32_t hash, const char *value, void *dst); -extern void arl_callback_str2kernel_uint_t(const char *name, uint32_t hash, const char *value, void *dst); - -// check a keyword against the ARL -// this is to be called for each keyword read from source data -// s = the keyword, as collected -// src = the src data to be passed to the processor -// it is defined in the header file in order to be inlined -static inline int arl_check(ARL_BASE *base, const char *keyword, const char *value) { - ARL_ENTRY *e = base->next_keyword; - -#ifdef NETDATA_INTERNAL_CHECKS - if(unlikely((base->fast + base->slow) % (base->expected + base->allocated) == 0 && (base->fast + base->slow) > (base->expected + base->allocated) * base->iteration)) - info("ARL '%s': Did you forget to call arl_begin()?", base->name); -#endif - - // it should be the first entry (pointed by base->next_keyword) - if(likely(!strcmp(keyword, e->name))) { - // it is - -#ifdef NETDATA_INTERNAL_CHECKS - base->fast++; -#endif - - e->flags |= ARL_ENTRY_FLAG_FOUND; - - // execute the processor - if(unlikely(e->dst)) { - e->processor(e->name, e->hash, value, e->dst); - base->found++; - } - - // be prepared for the next iteration - base->next_keyword = e->next; - if(unlikely(!base->next_keyword)) - base->next_keyword = base->head; - - // stop if we collected all the values for this iteration - if(unlikely(base->found == base->wanted)) { - // fprintf(stderr, "FOUND ALL WANTED 2: found = %zu, wanted = %zu, expected %zu\n", base->found, base->wanted, base->expected); - return 1; - } - - return 0; - } - -#ifdef NETDATA_INTERNAL_CHECKS - base->slow++; -#endif - - // we read from source, a not-expected keyword - return arl_find_or_create_and_relink(base, keyword, value); -} - -#endif //NETDATA_ADAPTIVE_RESORTABLE_LIST_H diff --git a/src/appconfig.c b/src/appconfig.c deleted file mode 100644 index 2424864b5..000000000 --- a/src/appconfig.c +++ /dev/null @@ -1,606 +0,0 @@ -#include "common.h" - -#define CONFIG_FILE_LINE_MAX ((CONFIG_MAX_NAME + CONFIG_MAX_VALUE + 1024) * 2) - -// ---------------------------------------------------------------------------- -// definitions - -#define CONFIG_VALUE_LOADED 0x01 // has been loaded from the config -#define CONFIG_VALUE_USED 0x02 // has been accessed from the program -#define CONFIG_VALUE_CHANGED 0x04 // has been changed from the loaded value -#define CONFIG_VALUE_CHECKED 0x08 // has been checked if the value is different from the default - -struct config_option { - avl avl; // the index - this has to be first! - - uint8_t flags; - uint32_t hash; // a simple hash to speed up searching - // we first compare hashes, and only if the hashes are equal we do string comparisons - - char *name; - char *value; - - struct config_option *next; // config->mutex protects just this -}; - -struct section { - avl avl; - - uint32_t hash; // a simple hash to speed up searching - // we first compare hashes, and only if the hashes are equal we do string comparisons - - char *name; - - struct section *next; // gloabl config_mutex protects just this - - struct config_option *values; - avl_tree_lock values_index; - - netdata_mutex_t mutex; // this locks only the writers, to ensure atomic updates - // readers are protected using the rwlock in avl_tree_lock -}; - -static int appconfig_section_compare(void *a, void *b); - -struct config netdata_config = { - .sections = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { - { NULL, appconfig_section_compare }, - AVL_LOCK_INITIALIZER - } -}; - -struct config stream_config = { - .sections = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { - { NULL, appconfig_section_compare }, - AVL_LOCK_INITIALIZER - } -}; - -// ---------------------------------------------------------------------------- -// locking - -static inline void appconfig_wrlock(struct config *root) { - netdata_mutex_lock(&root->mutex); -} - -static inline void appconfig_unlock(struct config *root) { - netdata_mutex_unlock(&root->mutex); -} - -static inline void config_section_wrlock(struct section *co) { - netdata_mutex_lock(&co->mutex); -} - -static inline void config_section_unlock(struct section *co) { - netdata_mutex_unlock(&co->mutex); -} - - -// ---------------------------------------------------------------------------- -// config name-value index - -static int appconfig_option_compare(void *a, void *b) { - if(((struct config_option *)a)->hash < ((struct config_option *)b)->hash) return -1; - else if(((struct config_option *)a)->hash > ((struct config_option *)b)->hash) return 1; - else return strcmp(((struct config_option *)a)->name, ((struct config_option *)b)->name); -} - -#define appconfig_option_index_add(co, cv) (struct config_option *)avl_insert_lock(&((co)->values_index), (avl *)(cv)) -#define appconfig_option_index_del(co, cv) (struct config_option *)avl_remove_lock(&((co)->values_index), (avl *)(cv)) - -static struct config_option *appconfig_option_index_find(struct section *co, const char *name, uint32_t hash) { - struct config_option tmp; - tmp.hash = (hash)?hash:simple_hash(name); - tmp.name = (char *)name; - - return (struct config_option *)avl_search_lock(&(co->values_index), (avl *) &tmp); -} - - -// ---------------------------------------------------------------------------- -// config sections index - -static int appconfig_section_compare(void *a, void *b) { - if(((struct section *)a)->hash < ((struct section *)b)->hash) return -1; - else if(((struct section *)a)->hash > ((struct section *)b)->hash) return 1; - else return strcmp(((struct section *)a)->name, ((struct section *)b)->name); -} - -#define appconfig_index_add(root, cfg) (struct section *)avl_insert_lock(&(root)->index, (avl *)(cfg)) -#define appconfig_index_del(root, cfg) (struct section *)avl_remove_lock(&(root)->index, (avl *)(cfg)) - -static struct section *appconfig_index_find(struct config *root, const char *name, uint32_t hash) { - struct section tmp; - tmp.hash = (hash)?hash:simple_hash(name); - tmp.name = (char *)name; - - return (struct section *)avl_search_lock(&root->index, (avl *) &tmp); -} - - -// ---------------------------------------------------------------------------- -// config section methods - -static inline struct section *appconfig_section_find(struct config *root, const char *section) { - return appconfig_index_find(root, section, 0); -} - -static inline struct section *appconfig_section_create(struct config *root, const char *section) { - debug(D_CONFIG, "Creating section '%s'.", section); - - struct section *co = callocz(1, sizeof(struct section)); - co->name = strdupz(section); - co->hash = simple_hash(co->name); - netdata_mutex_init(&co->mutex); - - avl_init_lock(&co->values_index, appconfig_option_compare); - - if(unlikely(appconfig_index_add(root, co) != co)) - error("INTERNAL ERROR: indexing of section '%s', already exists.", co->name); - - appconfig_wrlock(root); - struct section *co2 = root->sections; - if(co2) { - while (co2->next) co2 = co2->next; - co2->next = co; - } - else root->sections = co; - appconfig_unlock(root); - - return co; -} - - -// ---------------------------------------------------------------------------- -// config name-value methods - -static inline struct config_option *appconfig_value_create(struct section *co, const char *name, const char *value) { - debug(D_CONFIG, "Creating config entry for name '%s', value '%s', in section '%s'.", name, value, co->name); - - struct config_option *cv = callocz(1, sizeof(struct config_option)); - cv->name = strdupz(name); - cv->hash = simple_hash(cv->name); - cv->value = strdupz(value); - - struct config_option *found = appconfig_option_index_add(co, cv); - if(found != cv) { - error("indexing of config '%s' in section '%s': already exists - using the existing one.", cv->name, co->name); - freez(cv->value); - freez(cv->name); - freez(cv); - return found; - } - - config_section_wrlock(co); - struct config_option *cv2 = co->values; - if(cv2) { - while (cv2->next) cv2 = cv2->next; - cv2->next = cv; - } - else co->values = cv; - config_section_unlock(co); - - return cv; -} - -int appconfig_exists(struct config *root, const char *section, const char *name) { - struct config_option *cv; - - debug(D_CONFIG, "request to get config in section '%s', name '%s'", section, name); - - struct section *co = appconfig_section_find(root, section); - if(!co) return 0; - - cv = appconfig_option_index_find(co, name, 0); - if(!cv) return 0; - - return 1; -} - -int appconfig_move(struct config *root, const char *section_old, const char *name_old, const char *section_new, const char *name_new) { - struct config_option *cv_old, *cv_new; - int ret = -1; - - debug(D_CONFIG, "request to rename config in section '%s', old name '%s', to section '%s', new name '%s'", section_old, name_old, section_new, name_new); - - struct section *co_old = appconfig_section_find(root, section_old); - if(!co_old) return ret; - - struct section *co_new = appconfig_section_find(root, section_new); - if(!co_new) co_new = appconfig_section_create(root, section_new); - - config_section_wrlock(co_old); - if(co_old != co_new) - config_section_wrlock(co_new); - - cv_old = appconfig_option_index_find(co_old, name_old, 0); - if(!cv_old) goto cleanup; - - cv_new = appconfig_option_index_find(co_new, name_new, 0); - if(cv_new) goto cleanup; - - if(unlikely(appconfig_option_index_del(co_old, cv_old) != cv_old)) - error("INTERNAL ERROR: deletion of config '%s' from section '%s', deleted tge wrong config entry.", cv_old->name, co_old->name); - - if(co_old->values == cv_old) { - co_old->values = cv_old->next; - } - else { - struct config_option *t; - for(t = co_old->values; t && t->next != cv_old ;t = t->next) ; - if(!t || t->next != cv_old) - error("INTERNAL ERROR: cannot find variable '%s' in section '%s' of the config - but it should be there.", cv_old->name, co_old->name); - else - t->next = cv_old->next; - } - - freez(cv_old->name); - cv_old->name = strdupz(name_new); - cv_old->hash = simple_hash(cv_old->name); - - cv_new = cv_old; - cv_new->next = co_new->values; - co_new->values = cv_new; - - if(unlikely(appconfig_option_index_add(co_new, cv_old) != cv_old)) - error("INTERNAL ERROR: re-indexing of config '%s' in section '%s', already exists.", cv_old->name, co_new->name); - - ret = 0; - -cleanup: - if(co_old != co_new) - config_section_unlock(co_new); - config_section_unlock(co_old); - return ret; -} - -char *appconfig_get(struct config *root, const char *section, const char *name, const char *default_value) -{ - struct config_option *cv; - - debug(D_CONFIG, "request to get config in section '%s', name '%s', default_value '%s'", section, name, default_value); - - struct section *co = appconfig_section_find(root, section); - if(!co) co = appconfig_section_create(root, section); - - cv = appconfig_option_index_find(co, name, 0); - if(!cv) { - cv = appconfig_value_create(co, name, default_value); - if(!cv) return NULL; - } - cv->flags |= CONFIG_VALUE_USED; - - if((cv->flags & CONFIG_VALUE_LOADED) || (cv->flags & CONFIG_VALUE_CHANGED)) { - // this is a loaded value from the config file - // if it is different that the default, mark it - if(!(cv->flags & CONFIG_VALUE_CHECKED)) { - if(strcmp(cv->value, default_value) != 0) cv->flags |= CONFIG_VALUE_CHANGED; - cv->flags |= CONFIG_VALUE_CHECKED; - } - } - - return(cv->value); -} - -long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value) -{ - char buffer[100], *s; - sprintf(buffer, "%lld", value); - - s = appconfig_get(root, section, name, buffer); - if(!s) return value; - - return strtoll(s, NULL, 0); -} - -LONG_DOUBLE appconfig_get_float(struct config *root, const char *section, const char *name, LONG_DOUBLE value) -{ - char buffer[100], *s; - sprintf(buffer, "%0.5" LONG_DOUBLE_MODIFIER, value); - - s = appconfig_get(root, section, name, buffer); - if(!s) return value; - - return str2ld(s, NULL); -} - -int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value) -{ - char *s; - if(value) s = "yes"; - else s = "no"; - - s = appconfig_get(root, section, name, s); - if(!s) return value; - - if(!strcasecmp(s, "yes") || !strcasecmp(s, "true") || !strcasecmp(s, "on") || !strcasecmp(s, "auto") || !strcasecmp(s, "on demand")) return 1; - return 0; -} - -int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value) -{ - char *s; - - if(value == CONFIG_BOOLEAN_AUTO) - s = "auto"; - - else if(value == CONFIG_BOOLEAN_NO) - s = "no"; - - else - s = "yes"; - - s = appconfig_get(root, section, name, s); - if(!s) return value; - - if(!strcmp(s, "yes")) - return CONFIG_BOOLEAN_YES; - else if(!strcmp(s, "no")) - return CONFIG_BOOLEAN_NO; - else if(!strcmp(s, "auto") || !strcmp(s, "on demand")) - return CONFIG_BOOLEAN_AUTO; - - return value; -} - -const char *appconfig_set_default(struct config *root, const char *section, const char *name, const char *value) -{ - struct config_option *cv; - - debug(D_CONFIG, "request to set default config in section '%s', name '%s', value '%s'", section, name, value); - - struct section *co = appconfig_section_find(root, section); - if(!co) return appconfig_set(root, section, name, value); - - cv = appconfig_option_index_find(co, name, 0); - if(!cv) return appconfig_set(root, section, name, value); - - cv->flags |= CONFIG_VALUE_USED; - - if(cv->flags & CONFIG_VALUE_LOADED) - return cv->value; - - if(strcmp(cv->value, value) != 0) { - cv->flags |= CONFIG_VALUE_CHANGED; - - freez(cv->value); - cv->value = strdupz(value); - } - - return cv->value; -} - -const char *appconfig_set(struct config *root, const char *section, const char *name, const char *value) -{ - struct config_option *cv; - - debug(D_CONFIG, "request to set config in section '%s', name '%s', value '%s'", section, name, value); - - struct section *co = appconfig_section_find(root, section); - if(!co) co = appconfig_section_create(root, section); - - cv = appconfig_option_index_find(co, name, 0); - if(!cv) cv = appconfig_value_create(co, name, value); - cv->flags |= CONFIG_VALUE_USED; - - if(strcmp(cv->value, value) != 0) { - cv->flags |= CONFIG_VALUE_CHANGED; - - freez(cv->value); - cv->value = strdupz(value); - } - - return value; -} - -long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value) -{ - char buffer[100]; - sprintf(buffer, "%lld", value); - - appconfig_set(root, section, name, buffer); - - return value; -} - -LONG_DOUBLE appconfig_set_float(struct config *root, const char *section, const char *name, LONG_DOUBLE value) -{ - char buffer[100]; - sprintf(buffer, "%0.5" LONG_DOUBLE_MODIFIER, value); - - appconfig_set(root, section, name, buffer); - - return value; -} - -int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value) -{ - char *s; - if(value) s = "yes"; - else s = "no"; - - appconfig_set(root, section, name, s); - - return value; -} - - -// ---------------------------------------------------------------------------- -// config load/save - -int appconfig_load(struct config *root, char *filename, int overwrite_used) -{ - int line = 0; - struct section *co = NULL; - - char buffer[CONFIG_FILE_LINE_MAX + 1], *s; - - if(!filename) filename = CONFIG_DIR "/" CONFIG_FILENAME; - - debug(D_CONFIG, "CONFIG: opening config file '%s'", filename); - - FILE *fp = fopen(filename, "r"); - if(!fp) { - error("CONFIG: cannot open file '%s'", filename); - return 0; - } - - while(fgets(buffer, CONFIG_FILE_LINE_MAX, fp) != NULL) { - buffer[CONFIG_FILE_LINE_MAX] = '\0'; - line++; - - s = trim(buffer); - if(!s || *s == '#') { - debug(D_CONFIG, "CONFIG: ignoring line %d of file '%s', it is empty.", line, filename); - continue; - } - - int len = (int) strlen(s); - if(*s == '[' && s[len - 1] == ']') { - // new section - s[len - 1] = '\0'; - s++; - - co = appconfig_section_find(root, s); - if(!co) co = appconfig_section_create(root, s); - - continue; - } - - if(!co) { - // line outside a section - error("CONFIG: ignoring line %d ('%s') of file '%s', it is outside all sections.", line, s, filename); - continue; - } - - char *name = s; - char *value = strchr(s, '='); - if(!value) { - error("CONFIG: ignoring line %d ('%s') of file '%s', there is no = in it.", line, s, filename); - continue; - } - *value = '\0'; - value++; - - name = trim(name); - value = trim(value); - - if(!name || *name == '#') { - error("CONFIG: ignoring line %d of file '%s', name is empty.", line, filename); - continue; - } - if(!value) { - debug(D_CONFIG, "CONFIG: ignoring line %d of file '%s', value is empty.", line, filename); - continue; - } - - struct config_option *cv = appconfig_option_index_find(co, name, 0); - - if(!cv) cv = appconfig_value_create(co, name, value); - else { - if(((cv->flags & CONFIG_VALUE_USED) && overwrite_used) || !(cv->flags & CONFIG_VALUE_USED)) { - debug(D_CONFIG, "CONFIG: line %d of file '%s', overwriting '%s/%s'.", line, filename, co->name, cv->name); - freez(cv->value); - cv->value = strdupz(value); - } - else - debug(D_CONFIG, "CONFIG: ignoring line %d of file '%s', '%s/%s' is already present and used.", line, filename, co->name, cv->name); - } - cv->flags |= CONFIG_VALUE_LOADED; - } - - fclose(fp); - - return 1; -} - -void appconfig_generate(struct config *root, BUFFER *wb, int only_changed) -{ - int i, pri; - struct section *co; - struct config_option *cv; - - for(i = 0; i < 3 ;i++) { - switch(i) { - case 0: - buffer_strcat(wb, - "# netdata configuration\n" - "#\n" - "# You can download the latest version of this file, using:\n" - "#\n" - "# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf\n" - "# or\n" - "# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf\n" - "#\n" - "# You can uncomment and change any of the options below.\n" - "# The value shown in the commented settings, is the default value.\n" - "#\n" - "\n# global netdata configuration\n"); - break; - - case 1: - buffer_strcat(wb, "\n\n# per plugin configuration\n"); - break; - - case 2: - buffer_strcat(wb, "\n\n# per chart configuration\n"); - break; - } - - appconfig_wrlock(root); - for(co = root->sections; co ; co = co->next) { - if(!strcmp(co->name, CONFIG_SECTION_GLOBAL) - || !strcmp(co->name, CONFIG_SECTION_WEB) - || !strcmp(co->name, CONFIG_SECTION_STATSD) - || !strcmp(co->name, CONFIG_SECTION_PLUGINS) - || !strcmp(co->name, CONFIG_SECTION_REGISTRY) - || !strcmp(co->name, CONFIG_SECTION_HEALTH) - || !strcmp(co->name, CONFIG_SECTION_BACKEND) - || !strcmp(co->name, CONFIG_SECTION_STREAM) - ) - pri = 0; - else if(!strncmp(co->name, "plugin:", 7)) pri = 1; - else pri = 2; - - if(i == pri) { - int loaded = 0; - int used = 0; - int changed = 0; - int count = 0; - - config_section_wrlock(co); - for(cv = co->values; cv ; cv = cv->next) { - used += (cv->flags & CONFIG_VALUE_USED)?1:0; - loaded += (cv->flags & CONFIG_VALUE_LOADED)?1:0; - changed += (cv->flags & CONFIG_VALUE_CHANGED)?1:0; - count++; - } - config_section_unlock(co); - - if(!count) continue; - if(only_changed && !changed && !loaded) continue; - - if(!used) { - buffer_sprintf(wb, "\n# section '%s' is not used.", co->name); - } - - buffer_sprintf(wb, "\n[%s]\n", co->name); - - config_section_wrlock(co); - for(cv = co->values; cv ; cv = cv->next) { - - if(used && !(cv->flags & CONFIG_VALUE_USED)) { - buffer_sprintf(wb, "\n\t# option '%s' is not used.\n", cv->name); - } - buffer_sprintf(wb, "\t%s%s = %s\n", ((!(cv->flags & CONFIG_VALUE_LOADED)) && (!(cv->flags & CONFIG_VALUE_CHANGED)) && (cv->flags & CONFIG_VALUE_USED))?"# ":"", cv->name, cv->value); - } - config_section_unlock(co); - } - } - appconfig_unlock(root); - } -} diff --git a/src/appconfig.h b/src/appconfig.h deleted file mode 100644 index 7d056e6be..000000000 --- a/src/appconfig.h +++ /dev/null @@ -1,74 +0,0 @@ -#ifndef NETDATA_CONFIG_H -#define NETDATA_CONFIG_H 1 - -#define CONFIG_FILENAME "netdata.conf" - -#define CONFIG_SECTION_GLOBAL "global" -#define CONFIG_SECTION_WEB "web" -#define CONFIG_SECTION_STATSD "statsd" -#define CONFIG_SECTION_PLUGINS "plugins" -#define CONFIG_SECTION_REGISTRY "registry" -#define CONFIG_SECTION_HEALTH "health" -#define CONFIG_SECTION_BACKEND "backend" -#define CONFIG_SECTION_STREAM "stream" - -// these are used to limit the configuration names and values lengths -// they are not enforced by config.c functions (they will strdup() all strings, no matter of their length) -#define CONFIG_MAX_NAME 1024 -#define CONFIG_MAX_VALUE 2048 - -struct config { - struct section *sections; - netdata_mutex_t mutex; - avl_tree_lock index; -}; - -extern struct config - netdata_config, - stream_config; - -#define CONFIG_BOOLEAN_NO 0 -#define CONFIG_BOOLEAN_YES 1 -#define CONFIG_BOOLEAN_AUTO 2 - -extern int appconfig_load(struct config *root, char *filename, int overwrite_used); - -extern char *appconfig_get(struct config *root, const char *section, const char *name, const char *default_value); -extern long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value); -extern LONG_DOUBLE appconfig_get_float(struct config *root, const char *section, const char *name, LONG_DOUBLE value); -extern int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value); -extern int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value); - -extern const char *appconfig_set(struct config *root, const char *section, const char *name, const char *value); -extern const char *appconfig_set_default(struct config *root, const char *section, const char *name, const char *value); -extern long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value); -extern LONG_DOUBLE appconfig_set_float(struct config *root, const char *section, const char *name, LONG_DOUBLE value); -extern int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value); - -extern int appconfig_exists(struct config *root, const char *section, const char *name); -extern int appconfig_move(struct config *root, const char *section_old, const char *name_old, const char *section_new, const char *name_new); - -extern void appconfig_generate(struct config *root, BUFFER *wb, int only_changed); - -// ---------------------------------------------------------------------------- -// shortcuts for the default netdata configuration - -#define config_load(filename, overwrite_used) appconfig_load(&netdata_config, filename, overwrite_used) -#define config_get(section, name, default_value) appconfig_get(&netdata_config, section, name, default_value) -#define config_get_number(section, name, value) appconfig_get_number(&netdata_config, section, name, value) -#define config_get_float(section, name, value) appconfig_get_float(&netdata_config, section, name, value) -#define config_get_boolean(section, name, value) appconfig_get_boolean(&netdata_config, section, name, value) -#define config_get_boolean_ondemand(section, name, value) appconfig_get_boolean_ondemand(&netdata_config, section, name, value) - -#define config_set(section, name, default_value) appconfig_set(&netdata_config, section, name, default_value) -#define config_set_default(section, name, value) appconfig_set_default(&netdata_config, section, name, value) -#define config_set_number(section, name, value) appconfig_set_number(&netdata_config, section, name, value) -#define config_set_float(section, name, value) appconfig_set_float(&netdata_config, section, name, value) -#define config_set_boolean(section, name, value) appconfig_set_boolean(&netdata_config, section, name, value) - -#define config_exists(section, name) appconfig_exists(&netdata_config, section, name) -#define config_move(section_old, name_old, section_new, name_new) appconfig_move(&netdata_config, section_old, name_old, section_new, name_new) - -#define config_generate(buffer, only_changed) appconfig_generate(&netdata_config, buffer, only_changed) - -#endif /* NETDATA_CONFIG_H */ diff --git a/src/apps_plugin.c b/src/apps_plugin.c deleted file mode 100644 index 8595da6c2..000000000 --- a/src/apps_plugin.c +++ /dev/null @@ -1,3639 +0,0 @@ - -/* - * netdata apps.plugin - * (C) Copyright 2016-2017 Costa Tsaousis <costa@tsaousis.gr> - * Released under GPL v3+ - */ - -#include "common.h" - -#ifdef __FreeBSD__ -#include <sys/user.h> -#endif - -// ---------------------------------------------------------------------------- -// per O/S configuration - -// the minimum PID of the system -// this is also the pid of the init process -#define INIT_PID 1 - -// if the way apps.plugin will work, will read the entire process list, -// including the resource utilization of each process, instantly -// set this to 1 -// when set to 0, apps.plugin builds a sort list of processes, in order -// to process children processes, before parent processes -#ifdef __FreeBSD__ -#define ALL_PIDS_ARE_READ_INSTANTLY 1 -#else -#define ALL_PIDS_ARE_READ_INSTANTLY 0 -#endif - -// ---------------------------------------------------------------------------- -// string lengths - -#define MAX_COMPARE_NAME 100 -#define MAX_NAME 100 -#define MAX_CMDLINE 16384 - -// ---------------------------------------------------------------------------- -// the rates we are going to send to netdata will have this detail a value of: -// - 1 will send just integer parts to netdata -// - 100 will send 2 decimal points -// - 1000 will send 3 decimal points -// etc. -#define RATES_DETAIL 10000ULL - - -// ---------------------------------------------------------------------------- -// to avoid reallocating too frequently, we can increase the number of spare -// file descriptors used by processes. -// IMPORTANT: -// having a lot of spares, increases the CPU utilization of the plugin. -#define MAX_SPARE_FDS 1 - - -// ---------------------------------------------------------------------------- -// command line options - -static int - debug = 0, - update_every = 1, - enable_guest_charts = 0, -#ifdef __FreeBSD__ - enable_file_charts = 0, -#else - enable_file_charts = 1, -#endif - enable_users_charts = 1, - enable_groups_charts = 1, - include_exited_childs = 1; - - -// will be changed to getenv(NETDATA_CONFIG_DIR) if it exists -static char *config_dir = CONFIG_DIR; - -// ---------------------------------------------------------------------------- -// internal flags -// handled in code (automatically set) - -static int - show_guest_time = 0, // 1 when guest values are collected - show_guest_time_old = 0, - proc_pid_cmdline_is_needed = 0; // 1 when we need to read /proc/cmdline - - -// ---------------------------------------------------------------------------- -// internal counters - -static size_t - global_iterations_counter = 1, - calls_counter = 0, - file_counter = 0, - targets_assignment_counter = 0; - - -// ---------------------------------------------------------------------------- -// Normalization -// -// With normalization we lower the collected metrics by a factor to make them -// match the total utilization of the system. -// The discrepancy exists because apps.plugin needs some time to collect all -// the metrics. This results in utilization that exceeds the total utilization -// of the system. -// -// With normalization we align the per-process utilization, to the total of -// the system. We first consume the exited children utilization and it the -// collected values is above the total, we proportionally scale each reported -// metric. - -// the total system time, as reported by /proc/stat -#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) -static kernel_uint_t - global_utime = 0, - global_stime = 0, - global_gtime = 0; -#endif - -// the normalization ratios, as calculated by normalize_utilization() -double utime_fix_ratio = 1.0, - stime_fix_ratio = 1.0, - gtime_fix_ratio = 1.0, - minflt_fix_ratio = 1.0, - majflt_fix_ratio = 1.0, - cutime_fix_ratio = 1.0, - cstime_fix_ratio = 1.0, - cgtime_fix_ratio = 1.0, - cminflt_fix_ratio = 1.0, - cmajflt_fix_ratio = 1.0; - -// ---------------------------------------------------------------------------- -// target -// -// target is the structure that processes are aggregated to be reported -// to netdata. -// -// - Each entry in /etc/apps_groups.conf creates a target. -// - Each user and group used by a process in the system, creates a target. - -struct target { - char compare[MAX_COMPARE_NAME + 1]; - uint32_t comparehash; - size_t comparelen; - - char id[MAX_NAME + 1]; - uint32_t idhash; - - char name[MAX_NAME + 1]; - - uid_t uid; - gid_t gid; - - kernel_uint_t minflt; - kernel_uint_t cminflt; - kernel_uint_t majflt; - kernel_uint_t cmajflt; - kernel_uint_t utime; - kernel_uint_t stime; - kernel_uint_t gtime; - kernel_uint_t cutime; - kernel_uint_t cstime; - kernel_uint_t cgtime; - kernel_uint_t num_threads; - // kernel_uint_t rss; - - kernel_uint_t status_vmsize; - kernel_uint_t status_vmrss; - kernel_uint_t status_vmshared; - kernel_uint_t status_rssfile; - kernel_uint_t status_rssshmem; - kernel_uint_t status_vmswap; - - kernel_uint_t io_logical_bytes_read; - kernel_uint_t io_logical_bytes_written; - // kernel_uint_t io_read_calls; - // kernel_uint_t io_write_calls; - kernel_uint_t io_storage_bytes_read; - kernel_uint_t io_storage_bytes_written; - // kernel_uint_t io_cancelled_write_bytes; - - int *target_fds; - int target_fds_size; - - kernel_uint_t openfiles; - kernel_uint_t openpipes; - kernel_uint_t opensockets; - kernel_uint_t openinotifies; - kernel_uint_t openeventfds; - kernel_uint_t opentimerfds; - kernel_uint_t opensignalfds; - kernel_uint_t openeventpolls; - kernel_uint_t openother; - - unsigned int processes; // how many processes have been merged to this - int exposed; // if set, we have sent this to netdata - int hidden; // if set, we set the hidden flag on the dimension - int debug; - int ends_with; - int starts_with; // if set, the compare string matches only the - // beginning of the command - - struct target *target; // the one that will be reported to netdata - struct target *next; -}; - -struct target - *apps_groups_default_target = NULL, // the default target - *apps_groups_root_target = NULL, // apps_groups.conf defined - *users_root_target = NULL, // users - *groups_root_target = NULL; // user groups - -size_t - apps_groups_targets_count = 0; // # of apps_groups.conf targets - - -// ---------------------------------------------------------------------------- -// pid_stat -// -// structure to store data for each process running -// see: man proc for the description of the fields - -struct pid_stat { - int32_t pid; - char comm[MAX_COMPARE_NAME + 1]; - char *cmdline; - - uint32_t log_thrown; - - // char state; - int32_t ppid; - // int32_t pgrp; - // int32_t session; - // int32_t tty_nr; - // int32_t tpgid; - // uint64_t flags; - - // these are raw values collected - kernel_uint_t minflt_raw; - kernel_uint_t cminflt_raw; - kernel_uint_t majflt_raw; - kernel_uint_t cmajflt_raw; - kernel_uint_t utime_raw; - kernel_uint_t stime_raw; - kernel_uint_t gtime_raw; // guest_time - kernel_uint_t cutime_raw; - kernel_uint_t cstime_raw; - kernel_uint_t cgtime_raw; // cguest_time - - // these are rates - kernel_uint_t minflt; - kernel_uint_t cminflt; - kernel_uint_t majflt; - kernel_uint_t cmajflt; - kernel_uint_t utime; - kernel_uint_t stime; - kernel_uint_t gtime; - kernel_uint_t cutime; - kernel_uint_t cstime; - kernel_uint_t cgtime; - - // int64_t priority; - // int64_t nice; - int32_t num_threads; - // int64_t itrealvalue; - // kernel_uint_t starttime; - // kernel_uint_t vsize; - // kernel_uint_t rss; - // kernel_uint_t rsslim; - // kernel_uint_t starcode; - // kernel_uint_t endcode; - // kernel_uint_t startstack; - // kernel_uint_t kstkesp; - // kernel_uint_t kstkeip; - // uint64_t signal; - // uint64_t blocked; - // uint64_t sigignore; - // uint64_t sigcatch; - // uint64_t wchan; - // uint64_t nswap; - // uint64_t cnswap; - // int32_t exit_signal; - // int32_t processor; - // uint32_t rt_priority; - // uint32_t policy; - // kernel_uint_t delayacct_blkio_ticks; - - uid_t uid; - gid_t gid; - - kernel_uint_t status_vmsize; - kernel_uint_t status_vmrss; - kernel_uint_t status_vmshared; - kernel_uint_t status_rssfile; - kernel_uint_t status_rssshmem; - kernel_uint_t status_vmswap; -#ifndef __FreeBSD__ - ARL_BASE *status_arl; -#endif - - kernel_uint_t io_logical_bytes_read_raw; - kernel_uint_t io_logical_bytes_written_raw; - // kernel_uint_t io_read_calls_raw; - // kernel_uint_t io_write_calls_raw; - kernel_uint_t io_storage_bytes_read_raw; - kernel_uint_t io_storage_bytes_written_raw; - // kernel_uint_t io_cancelled_write_bytes_raw; - - kernel_uint_t io_logical_bytes_read; - kernel_uint_t io_logical_bytes_written; - // kernel_uint_t io_read_calls; - // kernel_uint_t io_write_calls; - kernel_uint_t io_storage_bytes_read; - kernel_uint_t io_storage_bytes_written; - // kernel_uint_t io_cancelled_write_bytes; - - int *fds; // array of fds it uses - int fds_size; // the size of the fds array - - int children_count; // number of processes directly referencing this - char keep:1; // 1 when we need to keep this process in memory even after it exited - int keeploops; // increases by 1 every time keep is 1 and updated 0 - char updated:1; // 1 when the process is currently running - char merged:1; // 1 when it has been merged to its parent - char read:1; // 1 when we have already read this process for this iteration - - int sortlist; // higher numbers = top on the process tree - // each process gets a unique number - - struct target *target; // app_groups.conf targets - struct target *user_target; // uid based targets - struct target *group_target; // gid based targets - - usec_t stat_collected_usec; - usec_t last_stat_collected_usec; - - usec_t io_collected_usec; - usec_t last_io_collected_usec; - - char *fds_dirname; // the full directory name in /proc/PID/fd - - char *stat_filename; - char *status_filename; - char *io_filename; - char *cmdline_filename; - - struct pid_stat *parent; - struct pid_stat *prev; - struct pid_stat *next; -}; - -size_t pagesize; - -// log each problem once per process -// log flood protection flags (log_thrown) -#define PID_LOG_IO 0x00000001 -#define PID_LOG_STATUS 0x00000002 -#define PID_LOG_CMDLINE 0x00000004 -#define PID_LOG_FDS 0x00000008 -#define PID_LOG_STAT 0x00000010 - -static struct pid_stat - *root_of_pids = NULL, // global list of all processes running - **all_pids = NULL; // to avoid allocations, we pre-allocate the - // the entire pid space. - -static size_t - all_pids_count = 0; // the number of processes running - -#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) -// Another pre-allocated list of all possible pids. -// We need it to pids and assign them a unique sortlist id, so that we -// read parents before children. This is needed to prevent a situation where -// a child is found running, but until we read its parent, it has exited and -// its parent has accumulated its resources. -static pid_t - *all_pids_sortlist = NULL; -#endif - -// ---------------------------------------------------------------------------- -// file descriptor -// -// this is used to keep a global list of all open files of the system. -// it is needed in order to calculate the unique files processes have open. - -#define FILE_DESCRIPTORS_INCREASE_STEP 100 - -// types for struct file_descriptor->type -typedef enum fd_filetype { - FILETYPE_OTHER, - FILETYPE_FILE, - FILETYPE_PIPE, - FILETYPE_SOCKET, - FILETYPE_INOTIFY, - FILETYPE_EVENTFD, - FILETYPE_EVENTPOLL, - FILETYPE_TIMERFD, - FILETYPE_SIGNALFD -} FD_FILETYPE; - -struct file_descriptor { - avl avl; - -#ifdef NETDATA_INTERNAL_CHECKS - uint32_t magic; -#endif /* NETDATA_INTERNAL_CHECKS */ - - const char *name; - uint32_t hash; - - FD_FILETYPE type; - int count; - int pos; -} *all_files = NULL; - -static int - all_files_len = 0, - all_files_size = 0; - -// ---------------------------------------------------------------------------- -// callback required by fatal() - -void netdata_cleanup_and_exit(int ret) { - exit(ret); -} - -// ---------------------------------------------------------------------------- -// apps_groups.conf -// aggregate all processes in groups, to have a limited number of dimensions - -static struct target *get_users_target(uid_t uid) { - struct target *w; - for(w = users_root_target ; w ; w = w->next) - if(w->uid == uid) return w; - - w = callocz(sizeof(struct target), 1); - snprintfz(w->compare, MAX_COMPARE_NAME, "%u", uid); - w->comparehash = simple_hash(w->compare); - w->comparelen = strlen(w->compare); - - snprintfz(w->id, MAX_NAME, "%u", uid); - w->idhash = simple_hash(w->id); - - struct passwd *pw = getpwuid(uid); - if(!pw || !pw->pw_name || !*pw->pw_name) - snprintfz(w->name, MAX_NAME, "%u", uid); - else - snprintfz(w->name, MAX_NAME, "%s", pw->pw_name); - - netdata_fix_chart_name(w->name); - - w->uid = uid; - - w->next = users_root_target; - users_root_target = w; - - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: added uid %u ('%s') target\n", w->uid, w->name); - - return w; -} - -struct target *get_groups_target(gid_t gid) -{ - struct target *w; - for(w = groups_root_target ; w ; w = w->next) - if(w->gid == gid) return w; - - w = callocz(sizeof(struct target), 1); - snprintfz(w->compare, MAX_COMPARE_NAME, "%u", gid); - w->comparehash = simple_hash(w->compare); - w->comparelen = strlen(w->compare); - - snprintfz(w->id, MAX_NAME, "%u", gid); - w->idhash = simple_hash(w->id); - - struct group *gr = getgrgid(gid); - if(!gr || !gr->gr_name || !*gr->gr_name) - snprintfz(w->name, MAX_NAME, "%u", gid); - else - snprintfz(w->name, MAX_NAME, "%s", gr->gr_name); - - netdata_fix_chart_name(w->name); - - w->gid = gid; - - w->next = groups_root_target; - groups_root_target = w; - - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: added gid %u ('%s') target\n", w->gid, w->name); - - return w; -} - -// find or create a new target -// there are targets that are just aggregated to other target (the second argument) -static struct target *get_apps_groups_target(const char *id, struct target *target, const char *name) { - int tdebug = 0, thidden = target?target->hidden:0, ends_with = 0; - const char *nid = id; - - // extract the options - while(nid[0] == '-' || nid[0] == '+' || nid[0] == '*') { - if(nid[0] == '-') thidden = 1; - if(nid[0] == '+') tdebug = 1; - if(nid[0] == '*') ends_with = 1; - nid++; - } - uint32_t hash = simple_hash(id); - - // find if it already exists - struct target *w, *last = apps_groups_root_target; - for(w = apps_groups_root_target ; w ; w = w->next) { - if(w->idhash == hash && strncmp(nid, w->id, MAX_NAME) == 0) - return w; - - last = w; - } - - // find an existing target - if(unlikely(!target)) { - while(*name == '-') { - if(*name == '-') thidden = 1; - name++; - } - - for(target = apps_groups_root_target ; target != NULL ; target = target->next) { - if(!target->target && strcmp(name, target->name) == 0) - break; - } - - if(unlikely(debug)) { - if(unlikely(target)) - fprintf(stderr, "apps.plugin: REUSING TARGET NAME '%s' on ID '%s'\n", target->name, target->id); - else - fprintf(stderr, "apps.plugin: NEW TARGET NAME '%s' on ID '%s'\n", name, id); - } - } - - if(target && target->target) - fatal("Internal Error: request to link process '%s' to target '%s' which is linked to target '%s'", id, target->id, target->target->id); - - w = callocz(sizeof(struct target), 1); - strncpyz(w->id, nid, MAX_NAME); - w->idhash = simple_hash(w->id); - - if(unlikely(!target)) - // copy the name - strncpyz(w->name, name, MAX_NAME); - else - // copy the id - strncpyz(w->name, nid, MAX_NAME); - - strncpyz(w->compare, nid, MAX_COMPARE_NAME); - size_t len = strlen(w->compare); - if(w->compare[len - 1] == '*') { - w->compare[len - 1] = '\0'; - w->starts_with = 1; - } - w->ends_with = ends_with; - - if(w->starts_with && w->ends_with) - proc_pid_cmdline_is_needed = 1; - - w->comparehash = simple_hash(w->compare); - w->comparelen = strlen(w->compare); - - w->hidden = thidden; - w->debug = tdebug; - w->target = target; - - // append it, to maintain the order in apps_groups.conf - if(last) last->next = w; - else apps_groups_root_target = w; - - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: ADDING TARGET ID '%s', process name '%s' (%s), aggregated on target '%s', options: %s %s\n" - , w->id - , w->compare, (w->starts_with && w->ends_with)?"substring":((w->starts_with)?"prefix":((w->ends_with)?"suffix":"exact")) - , w->target?w->target->name:w->name - , (w->hidden)?"hidden":"-" - , (w->debug)?"debug":"-" - ); - - return w; -} - -// read the apps_groups.conf file -static int read_apps_groups_conf(const char *file) -{ - char filename[FILENAME_MAX + 1]; - - snprintfz(filename, FILENAME_MAX, "%s/apps_%s.conf", config_dir, file); - - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: process groups file: '%s'\n", filename); - - // ---------------------------------------- - - procfile *ff = procfile_open(filename, " :\t", PROCFILE_FLAG_DEFAULT); - if(!ff) return 1; - - procfile_set_quotes(ff, "'\""); - - ff = procfile_readall(ff); - if(!ff) - return 1; - - size_t line, lines = procfile_lines(ff); - - for(line = 0; line < lines ;line++) { - size_t word, words = procfile_linewords(ff, line); - if(!words) continue; - - char *name = procfile_lineword(ff, line, 0); - if(!name || !*name) continue; - - // find a possibly existing target - struct target *w = NULL; - - // loop through all words, skipping the first one (the name) - for(word = 0; word < words ;word++) { - char *s = procfile_lineword(ff, line, word); - if(!s || !*s) continue; - if(*s == '#') break; - - // is this the first word? skip it - if(s == name) continue; - - // add this target - struct target *n = get_apps_groups_target(s, w, name); - if(!n) { - error("Cannot create target '%s' (line %zu, word %zu)", s, line, word); - continue; - } - - // just some optimization - // to avoid searching for a target for each process - if(!w) w = n->target?n->target:n; - } - } - - procfile_close(ff); - - apps_groups_default_target = get_apps_groups_target("p+!o@w#e$i^r&7*5(-i)l-o_", NULL, "other"); // match nothing - if(!apps_groups_default_target) - fatal("Cannot create default target"); - - // allow the user to override group 'other' - if(apps_groups_default_target->target) - apps_groups_default_target = apps_groups_default_target->target; - - return 0; -} - - -// ---------------------------------------------------------------------------- -// struct pid_stat management - -static inline struct pid_stat *get_pid_entry(pid_t pid) { - if(unlikely(all_pids[pid])) - return all_pids[pid]; - - struct pid_stat *p = callocz(sizeof(struct pid_stat), 1); - p->fds = callocz(sizeof(int), MAX_SPARE_FDS); - p->fds_size = MAX_SPARE_FDS; - - if(likely(root_of_pids)) - root_of_pids->prev = p; - - p->next = root_of_pids; - root_of_pids = p; - - p->pid = pid; - - all_pids[pid] = p; - all_pids_count++; - - return p; -} - -static inline void del_pid_entry(pid_t pid) { - struct pid_stat *p = all_pids[pid]; - - if(unlikely(!p)) { - error("attempted to free pid %d that is not allocated.", pid); - return; - } - - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: process %d %s exited, deleting it.\n", pid, p->comm); - - if(root_of_pids == p) - root_of_pids = p->next; - - if(p->next) p->next->prev = p->prev; - if(p->prev) p->prev->next = p->next; - - freez(p->fds); - freez(p->fds_dirname); - freez(p->stat_filename); - freez(p->status_filename); -#ifndef __FreeBSD__ - arl_free(p->status_arl); -#endif - freez(p->io_filename); - freez(p->cmdline_filename); - freez(p->cmdline); - freez(p); - - all_pids[pid] = NULL; - all_pids_count--; -} - -// ---------------------------------------------------------------------------- - -static inline int managed_log(struct pid_stat *p, uint32_t log, int status) { - if(unlikely(!status)) { - // error("command failed log %u, errno %d", log, errno); - - if(unlikely(debug || errno != ENOENT)) { - if(unlikely(debug || !(p->log_thrown & log))) { - p->log_thrown |= log; - switch(log) { - case PID_LOG_IO: - #ifdef __FreeBSD__ - error("Cannot fetch process %d I/O info (command '%s')", p->pid, p->comm); - #else - error("Cannot process %s/proc/%d/io (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); - #endif - break; - - case PID_LOG_STATUS: - #ifdef __FreeBSD__ - error("Cannot fetch process %d status info (command '%s')", p->pid, p->comm); - #else - error("Cannot process %s/proc/%d/status (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); - #endif - break; - - case PID_LOG_CMDLINE: - #ifdef __FreeBSD__ - error("Cannot fetch process %d command line (command '%s')", p->pid, p->comm); - #else - error("Cannot process %s/proc/%d/cmdline (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); - #endif - break; - - case PID_LOG_FDS: - #ifdef __FreeBSD__ - error("Cannot fetch process %d files (command '%s')", p->pid, p->comm); - #else - error("Cannot process entries in %s/proc/%d/fd (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); - #endif - break; - - case PID_LOG_STAT: - break; - - default: - error("unhandled error for pid %d, command '%s'", p->pid, p->comm); - break; - } - } - } - errno = 0; - } - else if(unlikely(p->log_thrown & log)) { - // error("unsetting log %u on pid %d", log, p->pid); - p->log_thrown &= ~log; - } - - return status; -} - -static inline void assign_target_to_pid(struct pid_stat *p) { - targets_assignment_counter++; - - uint32_t hash = simple_hash(p->comm); - size_t pclen = strlen(p->comm); - - struct target *w; - for(w = apps_groups_root_target; w ; w = w->next) { - // if(debug || (p->target && p->target->debug)) fprintf(stderr, "apps.plugin: \t\tcomparing '%s' with '%s'\n", w->compare, p->comm); - - // find it - 4 cases: - // 1. the target is not a pattern - // 2. the target has the prefix - // 3. the target has the suffix - // 4. the target is something inside cmdline - - if(unlikely(( (!w->starts_with && !w->ends_with && w->comparehash == hash && !strcmp(w->compare, p->comm)) - || (w->starts_with && !w->ends_with && !strncmp(w->compare, p->comm, w->comparelen)) - || (!w->starts_with && w->ends_with && pclen >= w->comparelen && !strcmp(w->compare, &p->comm[pclen - w->comparelen])) - || (proc_pid_cmdline_is_needed && w->starts_with && w->ends_with && p->cmdline && strstr(p->cmdline, w->compare)) - ))) { - - if(w->target) p->target = w->target; - else p->target = w; - - if(debug || (p->target && p->target->debug)) - fprintf(stderr, "apps.plugin: \t\t%s linked to target %s\n", p->comm, p->target->name); - - break; - } - } -} - - -// ---------------------------------------------------------------------------- -// update pids from proc - -static inline int read_proc_pid_cmdline(struct pid_stat *p) { - static char cmdline[MAX_CMDLINE + 1]; - -#ifdef __FreeBSD__ - size_t i, bytes = MAX_CMDLINE; - int mib[4]; - - mib[0] = CTL_KERN; - mib[1] = KERN_PROC; - mib[2] = KERN_PROC_ARGS; - mib[3] = p->pid; - if (unlikely(sysctl(mib, 4, cmdline, &bytes, NULL, 0))) - goto cleanup; -#else - if(unlikely(!p->cmdline_filename)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/proc/%d/cmdline", netdata_configured_host_prefix, p->pid); - p->cmdline_filename = strdupz(filename); - } - - int fd = open(p->cmdline_filename, O_RDONLY, 0666); - if(unlikely(fd == -1)) goto cleanup; - - ssize_t i, bytes = read(fd, cmdline, MAX_CMDLINE); - close(fd); - - if(unlikely(bytes < 0)) goto cleanup; -#endif - - cmdline[bytes] = '\0'; - for(i = 0; i < bytes ; i++) - if(unlikely(!cmdline[i])) cmdline[i] = ' '; - - p->cmdline = strdupz(cmdline); - - if(unlikely(debug)) - fprintf(stderr, "Read file '%s' contents: %s\n", p->cmdline_filename, p->cmdline); - - return 1; - -cleanup: - // copy the command to the command line - p->cmdline = strdupz(p->comm); - return 0; -} - -// ---------------------------------------------------------------------------- -// macro to calculate the incremental rate of a value -// each parameter is accessed only ONCE - so it is safe to pass function calls -// or other macros as parameters - -#define incremental_rate(rate_variable, last_kernel_variable, new_kernel_value, collected_usec, last_collected_usec) { \ - kernel_uint_t _new_tmp = new_kernel_value; \ - (rate_variable) = (_new_tmp - (last_kernel_variable)) * (USEC_PER_SEC * RATES_DETAIL) / ((collected_usec) - (last_collected_usec)); \ - (last_kernel_variable) = _new_tmp; \ - } - -// the same macro for struct pid members -#define pid_incremental_rate(type, var, value) \ - incremental_rate(var, var##_raw, value, p->type##_collected_usec, p->last_##type##_collected_usec) - - -// ---------------------------------------------------------------------------- - -#ifndef __FreeBSD__ -struct arl_callback_ptr { - struct pid_stat *p; - procfile *ff; - size_t line; -}; - -void arl_callback_status_uid(const char *name, uint32_t hash, const char *value, void *dst) { - (void)name; (void)hash; (void)value; - struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; - if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 5)) return; - - //const char *real_uid = procfile_lineword(aptr->ff, aptr->line, 1); - const char *effective_uid = procfile_lineword(aptr->ff, aptr->line, 2); - //const char *saved_uid = procfile_lineword(aptr->ff, aptr->line, 3); - //const char *filesystem_uid = procfile_lineword(aptr->ff, aptr->line, 4); - - if(likely(effective_uid && *effective_uid)) - aptr->p->uid = (uid_t)str2l(effective_uid); -} - -void arl_callback_status_gid(const char *name, uint32_t hash, const char *value, void *dst) { - (void)name; (void)hash; (void)value; - struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; - if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 5)) return; - - //const char *real_gid = procfile_lineword(aptr->ff, aptr->line, 1); - const char *effective_gid = procfile_lineword(aptr->ff, aptr->line, 2); - //const char *saved_gid = procfile_lineword(aptr->ff, aptr->line, 3); - //const char *filesystem_gid = procfile_lineword(aptr->ff, aptr->line, 4); - - if(likely(effective_gid && *effective_gid)) - aptr->p->gid = (uid_t)str2l(effective_gid); -} - -void arl_callback_status_vmsize(const char *name, uint32_t hash, const char *value, void *dst) { - (void)name; (void)hash; (void)value; - struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; - if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return; - - aptr->p->status_vmsize = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)); -} - -void arl_callback_status_vmswap(const char *name, uint32_t hash, const char *value, void *dst) { - (void)name; (void)hash; (void)value; - struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; - if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return; - - aptr->p->status_vmswap = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)); -} - -void arl_callback_status_vmrss(const char *name, uint32_t hash, const char *value, void *dst) { - (void)name; (void)hash; (void)value; - struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; - if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return; - - aptr->p->status_vmrss = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)); -} - -void arl_callback_status_rssfile(const char *name, uint32_t hash, const char *value, void *dst) { - (void)name; (void)hash; (void)value; - struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; - if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return; - - aptr->p->status_rssfile = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)); -} - -void arl_callback_status_rssshmem(const char *name, uint32_t hash, const char *value, void *dst) { - (void)name; (void)hash; (void)value; - struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; - if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return; - - aptr->p->status_rssshmem = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)); -} -#endif // !__FreeBSD__ - -static inline int read_proc_pid_status(struct pid_stat *p, void *ptr) { - p->status_vmsize = 0; - p->status_vmrss = 0; - p->status_vmshared = 0; - p->status_rssfile = 0; - p->status_rssshmem = 0; - p->status_vmswap = 0; - -#ifdef __FreeBSD__ - struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr; - - p->uid = proc_info->ki_uid; - p->gid = proc_info->ki_groups[0]; - p->status_vmsize = proc_info->ki_size / 1024; // in kB - p->status_vmrss = proc_info->ki_rssize * pagesize / 1024; // in kB - // FIXME: what about shared and swap memory on FreeBSD? - return 1; -#else - (void)ptr; - - static struct arl_callback_ptr arl_ptr; - static procfile *ff = NULL; - - if(unlikely(!p->status_arl)) { - p->status_arl = arl_create("/proc/pid/status", NULL, 60); - arl_expect_custom(p->status_arl, "Uid", arl_callback_status_uid, &arl_ptr); - arl_expect_custom(p->status_arl, "Gid", arl_callback_status_gid, &arl_ptr); - arl_expect_custom(p->status_arl, "VmSize", arl_callback_status_vmsize, &arl_ptr); - arl_expect_custom(p->status_arl, "VmRSS", arl_callback_status_vmrss, &arl_ptr); - arl_expect_custom(p->status_arl, "RssFile", arl_callback_status_rssfile, &arl_ptr); - arl_expect_custom(p->status_arl, "RssShmem", arl_callback_status_rssshmem, &arl_ptr); - arl_expect_custom(p->status_arl, "VmSwap", arl_callback_status_vmswap, &arl_ptr); - } - - if(unlikely(!p->status_filename)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/proc/%d/status", netdata_configured_host_prefix, p->pid); - p->status_filename = strdupz(filename); - } - - ff = procfile_reopen(ff, p->status_filename, (!ff)?" \t:,-()/":NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO); - if(unlikely(!ff)) return 0; - - ff = procfile_readall(ff); - if(unlikely(!ff)) return 0; - - calls_counter++; - - // let ARL use this pid - arl_ptr.p = p; - arl_ptr.ff = ff; - - size_t lines = procfile_lines(ff), l; - arl_begin(p->status_arl); - - for(l = 0; l < lines ;l++) { - // fprintf(stderr, "CHECK: line %zu of %zu, key '%s' = '%s'\n", l, lines, procfile_lineword(ff, l, 0), procfile_lineword(ff, l, 1)); - arl_ptr.line = l; - if(unlikely(arl_check(p->status_arl, - procfile_lineword(ff, l, 0), - procfile_lineword(ff, l, 1)))) break; - } - - p->status_vmshared = p->status_rssfile + p->status_rssshmem; - - // fprintf(stderr, "%s uid %d, gid %d, VmSize %zu, VmRSS %zu, RssFile %zu, RssShmem %zu, shared %zu\n", p->comm, (int)p->uid, (int)p->gid, p->status_vmsize, p->status_vmrss, p->status_rssfile, p->status_rssshmem, p->status_vmshared); - - return 1; -#endif -} - - -// ---------------------------------------------------------------------------- - -static inline int read_proc_pid_stat(struct pid_stat *p, void *ptr) { - (void)ptr; - -#ifdef __FreeBSD__ - struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr; - - if (unlikely(proc_info->ki_tdflags & TDF_IDLETD)) - goto cleanup; -#else - static procfile *ff = NULL; - - if(unlikely(!p->stat_filename)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/proc/%d/stat", netdata_configured_host_prefix, p->pid); - p->stat_filename = strdupz(filename); - } - - int set_quotes = (!ff)?1:0; - - ff = procfile_reopen(ff, p->stat_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO); - if(unlikely(!ff)) goto cleanup; - - // if(set_quotes) procfile_set_quotes(ff, "()"); - if(unlikely(set_quotes)) - procfile_set_open_close(ff, "(", ")"); - - ff = procfile_readall(ff); - if(unlikely(!ff)) goto cleanup; -#endif - - p->last_stat_collected_usec = p->stat_collected_usec; - p->stat_collected_usec = now_monotonic_usec(); - calls_counter++; - -#ifdef __FreeBSD__ - char *comm = proc_info->ki_comm; - p->ppid = proc_info->ki_ppid; -#else - // p->pid = str2pid_t(procfile_lineword(ff, 0, 0)); - char *comm = procfile_lineword(ff, 0, 1); - // p->state = *(procfile_lineword(ff, 0, 2)); - p->ppid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 3)); - // p->pgrp = (int32_t)str2pid_t(procfile_lineword(ff, 0, 4)); - // p->session = (int32_t)str2pid_t(procfile_lineword(ff, 0, 5)); - // p->tty_nr = (int32_t)str2pid_t(procfile_lineword(ff, 0, 6)); - // p->tpgid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 7)); - // p->flags = str2uint64_t(procfile_lineword(ff, 0, 8)); -#endif - - if(strcmp(p->comm, comm) != 0) { - if(unlikely(debug)) { - if(p->comm[0]) - fprintf(stderr, "apps.plugin: \tpid %d (%s) changed name to '%s'\n", p->pid, p->comm, comm); - else - fprintf(stderr, "apps.plugin: \tJust added %d (%s)\n", p->pid, comm); - } - - strncpyz(p->comm, comm, MAX_COMPARE_NAME); - - // /proc/<pid>/cmdline - if(likely(proc_pid_cmdline_is_needed)) - managed_log(p, PID_LOG_CMDLINE, read_proc_pid_cmdline(p)); - - assign_target_to_pid(p); - } - -#ifdef __FreeBSD__ - pid_incremental_rate(stat, p->minflt, (kernel_uint_t)proc_info->ki_rusage.ru_minflt); - pid_incremental_rate(stat, p->cminflt, (kernel_uint_t)proc_info->ki_rusage_ch.ru_minflt); - pid_incremental_rate(stat, p->majflt, (kernel_uint_t)proc_info->ki_rusage.ru_majflt); - pid_incremental_rate(stat, p->cmajflt, (kernel_uint_t)proc_info->ki_rusage_ch.ru_majflt); - pid_incremental_rate(stat, p->utime, (kernel_uint_t)proc_info->ki_rusage.ru_utime.tv_sec * 100 + proc_info->ki_rusage.ru_utime.tv_usec / 10000); - pid_incremental_rate(stat, p->stime, (kernel_uint_t)proc_info->ki_rusage.ru_stime.tv_sec * 100 + proc_info->ki_rusage.ru_stime.tv_usec / 10000); - pid_incremental_rate(stat, p->cutime, (kernel_uint_t)proc_info->ki_rusage_ch.ru_utime.tv_sec * 100 + proc_info->ki_rusage_ch.ru_utime.tv_usec / 10000); - pid_incremental_rate(stat, p->cstime, (kernel_uint_t)proc_info->ki_rusage_ch.ru_stime.tv_sec * 100 + proc_info->ki_rusage_ch.ru_stime.tv_usec / 10000); - - p->num_threads = proc_info->ki_numthreads; - - if(enable_guest_charts) { - enable_guest_charts = 0; - info("Guest charts aren't supported by FreeBSD"); - } -#else - pid_incremental_rate(stat, p->minflt, str2kernel_uint_t(procfile_lineword(ff, 0, 9))); - pid_incremental_rate(stat, p->cminflt, str2kernel_uint_t(procfile_lineword(ff, 0, 10))); - pid_incremental_rate(stat, p->majflt, str2kernel_uint_t(procfile_lineword(ff, 0, 11))); - pid_incremental_rate(stat, p->cmajflt, str2kernel_uint_t(procfile_lineword(ff, 0, 12))); - pid_incremental_rate(stat, p->utime, str2kernel_uint_t(procfile_lineword(ff, 0, 13))); - pid_incremental_rate(stat, p->stime, str2kernel_uint_t(procfile_lineword(ff, 0, 14))); - pid_incremental_rate(stat, p->cutime, str2kernel_uint_t(procfile_lineword(ff, 0, 15))); - pid_incremental_rate(stat, p->cstime, str2kernel_uint_t(procfile_lineword(ff, 0, 16))); - // p->priority = str2kernel_uint_t(procfile_lineword(ff, 0, 17)); - // p->nice = str2kernel_uint_t(procfile_lineword(ff, 0, 18)); - p->num_threads = (int32_t)str2uint32_t(procfile_lineword(ff, 0, 19)); - // p->itrealvalue = str2kernel_uint_t(procfile_lineword(ff, 0, 20)); - // p->starttime = str2kernel_uint_t(procfile_lineword(ff, 0, 21)); - // p->vsize = str2kernel_uint_t(procfile_lineword(ff, 0, 22)); - // p->rss = str2kernel_uint_t(procfile_lineword(ff, 0, 23)); - // p->rsslim = str2kernel_uint_t(procfile_lineword(ff, 0, 24)); - // p->starcode = str2kernel_uint_t(procfile_lineword(ff, 0, 25)); - // p->endcode = str2kernel_uint_t(procfile_lineword(ff, 0, 26)); - // p->startstack = str2kernel_uint_t(procfile_lineword(ff, 0, 27)); - // p->kstkesp = str2kernel_uint_t(procfile_lineword(ff, 0, 28)); - // p->kstkeip = str2kernel_uint_t(procfile_lineword(ff, 0, 29)); - // p->signal = str2kernel_uint_t(procfile_lineword(ff, 0, 30)); - // p->blocked = str2kernel_uint_t(procfile_lineword(ff, 0, 31)); - // p->sigignore = str2kernel_uint_t(procfile_lineword(ff, 0, 32)); - // p->sigcatch = str2kernel_uint_t(procfile_lineword(ff, 0, 33)); - // p->wchan = str2kernel_uint_t(procfile_lineword(ff, 0, 34)); - // p->nswap = str2kernel_uint_t(procfile_lineword(ff, 0, 35)); - // p->cnswap = str2kernel_uint_t(procfile_lineword(ff, 0, 36)); - // p->exit_signal = str2kernel_uint_t(procfile_lineword(ff, 0, 37)); - // p->processor = str2kernel_uint_t(procfile_lineword(ff, 0, 38)); - // p->rt_priority = str2kernel_uint_t(procfile_lineword(ff, 0, 39)); - // p->policy = str2kernel_uint_t(procfile_lineword(ff, 0, 40)); - // p->delayacct_blkio_ticks = str2kernel_uint_t(procfile_lineword(ff, 0, 41)); - - if(enable_guest_charts) { - - pid_incremental_rate(stat, p->gtime, str2kernel_uint_t(procfile_lineword(ff, 0, 42))); - pid_incremental_rate(stat, p->cgtime, str2kernel_uint_t(procfile_lineword(ff, 0, 43))); - - if (show_guest_time || p->gtime || p->cgtime) { - p->utime -= (p->utime >= p->gtime) ? p->gtime : p->utime; - p->cutime -= (p->cutime >= p->cgtime) ? p->cgtime : p->cutime; - show_guest_time = 1; - } - } -#endif - - if(unlikely(debug || (p->target && p->target->debug))) - fprintf(stderr, "apps.plugin: READ PROC/PID/STAT: %s/proc/%d/stat, process: '%s' on target '%s' (dt=%llu) VALUES: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT ", threads=%d\n", netdata_configured_host_prefix, p->pid, p->comm, (p->target)?p->target->name:"UNSET", p->stat_collected_usec - p->last_stat_collected_usec, p->utime, p->stime, p->cutime, p->cstime, p->minflt, p->majflt, p->cminflt, p->cmajflt, p->num_threads); - - if(unlikely(global_iterations_counter == 1)) { - p->minflt = 0; - p->cminflt = 0; - p->majflt = 0; - p->cmajflt = 0; - p->utime = 0; - p->stime = 0; - p->gtime = 0; - p->cutime = 0; - p->cstime = 0; - p->cgtime = 0; - } - - return 1; - -cleanup: - p->minflt = 0; - p->cminflt = 0; - p->majflt = 0; - p->cmajflt = 0; - p->utime = 0; - p->stime = 0; - p->gtime = 0; - p->cutime = 0; - p->cstime = 0; - p->cgtime = 0; - p->num_threads = 0; - // p->rss = 0; - return 0; -} - -static inline int read_proc_pid_io(struct pid_stat *p, void *ptr) { - (void)ptr; -#ifdef __FreeBSD__ - struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr; -#else - static procfile *ff = NULL; - - if(unlikely(!p->io_filename)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/proc/%d/io", netdata_configured_host_prefix, p->pid); - p->io_filename = strdupz(filename); - } - - // open the file - ff = procfile_reopen(ff, p->io_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO); - if(unlikely(!ff)) goto cleanup; - - ff = procfile_readall(ff); - if(unlikely(!ff)) goto cleanup; -#endif - - calls_counter++; - - p->last_io_collected_usec = p->io_collected_usec; - p->io_collected_usec = now_monotonic_usec(); - -#ifdef __FreeBSD__ - pid_incremental_rate(io, p->io_storage_bytes_read, proc_info->ki_rusage.ru_inblock); - pid_incremental_rate(io, p->io_storage_bytes_written, proc_info->ki_rusage.ru_oublock); -#else - pid_incremental_rate(io, p->io_logical_bytes_read, str2kernel_uint_t(procfile_lineword(ff, 0, 1))); - pid_incremental_rate(io, p->io_logical_bytes_written, str2kernel_uint_t(procfile_lineword(ff, 1, 1))); - // pid_incremental_rate(io, p->io_read_calls, str2kernel_uint_t(procfile_lineword(ff, 2, 1))); - // pid_incremental_rate(io, p->io_write_calls, str2kernel_uint_t(procfile_lineword(ff, 3, 1))); - pid_incremental_rate(io, p->io_storage_bytes_read, str2kernel_uint_t(procfile_lineword(ff, 4, 1))); - pid_incremental_rate(io, p->io_storage_bytes_written, str2kernel_uint_t(procfile_lineword(ff, 5, 1))); - // pid_incremental_rate(io, p->io_cancelled_write_bytes, str2kernel_uint_t(procfile_lineword(ff, 6, 1))); -#endif - - if(unlikely(global_iterations_counter == 1)) { - p->io_logical_bytes_read = 0; - p->io_logical_bytes_written = 0; - // p->io_read_calls = 0; - // p->io_write_calls = 0; - p->io_storage_bytes_read = 0; - p->io_storage_bytes_written = 0; - // p->io_cancelled_write_bytes = 0; - } - - return 1; - -#ifndef __FreeBSD__ -cleanup: - p->io_logical_bytes_read = 0; - p->io_logical_bytes_written = 0; - // p->io_read_calls = 0; - // p->io_write_calls = 0; - p->io_storage_bytes_read = 0; - p->io_storage_bytes_written = 0; - // p->io_cancelled_write_bytes = 0; - return 0; -#endif -} - -#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) -static inline int read_proc_stat() { - static char filename[FILENAME_MAX + 1] = ""; - static procfile *ff = NULL; - static kernel_uint_t utime_raw = 0, stime_raw = 0, gtime_raw = 0, gntime_raw = 0, ntime_raw = 0; - static usec_t collected_usec = 0, last_collected_usec = 0; - - if(unlikely(!ff)) { - snprintfz(filename, FILENAME_MAX, "%s/proc/stat", netdata_configured_host_prefix); - ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) goto cleanup; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) goto cleanup; - - last_collected_usec = collected_usec; - collected_usec = now_monotonic_usec(); - - calls_counter++; - - // temporary - it is added global_ntime; - kernel_uint_t global_ntime = 0; - - incremental_rate(global_utime, utime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 1)), collected_usec, last_collected_usec); - incremental_rate(global_ntime, ntime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 2)), collected_usec, last_collected_usec); - incremental_rate(global_stime, stime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 3)), collected_usec, last_collected_usec); - incremental_rate(global_gtime, gtime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 10)), collected_usec, last_collected_usec); - - global_utime += global_ntime; - - if(enable_guest_charts) { - // temporary - it is added global_ntime; - kernel_uint_t global_gntime = 0; - - // guest nice time, on guest time - incremental_rate(global_gntime, gntime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 11)), collected_usec, last_collected_usec); - - global_gtime += global_gntime; - - // remove guest time from user time - global_utime -= (global_utime > global_gtime) ? global_gtime : global_utime; - } - - if(unlikely(global_iterations_counter == 1)) { - global_utime = 0; - global_stime = 0; - global_gtime = 0; - } - - return 1; - -cleanup: - global_utime = 0; - global_stime = 0; - global_gtime = 0; - return 0; -} -#else -static inline int read_proc_stat() { - return 0; -} -#endif - -// ---------------------------------------------------------------------------- - -int file_descriptor_compare(void* a, void* b) { -#ifdef NETDATA_INTERNAL_CHECKS - if(((struct file_descriptor *)a)->magic != 0x0BADCAFE || ((struct file_descriptor *)b)->magic != 0x0BADCAFE) - error("Corrupted index data detected. Please report this."); -#endif /* NETDATA_INTERNAL_CHECKS */ - - if(((struct file_descriptor *)a)->hash < ((struct file_descriptor *)b)->hash) - return -1; - - else if(((struct file_descriptor *)a)->hash > ((struct file_descriptor *)b)->hash) - return 1; - - else - return strcmp(((struct file_descriptor *)a)->name, ((struct file_descriptor *)b)->name); -} - -int file_descriptor_iterator(avl *a) { if(a) {}; return 0; } - -avl_tree all_files_index = { - NULL, - file_descriptor_compare -}; - -static struct file_descriptor *file_descriptor_find(const char *name, uint32_t hash) { - struct file_descriptor tmp; - tmp.hash = (hash)?hash:simple_hash(name); - tmp.name = name; - tmp.count = 0; - tmp.pos = 0; -#ifdef NETDATA_INTERNAL_CHECKS - tmp.magic = 0x0BADCAFE; -#endif /* NETDATA_INTERNAL_CHECKS */ - - return (struct file_descriptor *)avl_search(&all_files_index, (avl *) &tmp); -} - -#define file_descriptor_add(fd) avl_insert(&all_files_index, (avl *)(fd)) -#define file_descriptor_remove(fd) avl_remove(&all_files_index, (avl *)(fd)) - -// ---------------------------------------------------------------------------- - -static inline void file_descriptor_not_used(int id) -{ - if(id > 0 && id < all_files_size) { - -#ifdef NETDATA_INTERNAL_CHECKS - if(all_files[id].magic != 0x0BADCAFE) { - error("Ignoring request to remove empty file id %d.", id); - return; - } -#endif /* NETDATA_INTERNAL_CHECKS */ - - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: decreasing slot %d (count = %d).\n", id, all_files[id].count); - - if(all_files[id].count > 0) { - all_files[id].count--; - - if(!all_files[id].count) { - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: >> slot %d is empty.\n", id); - - if(unlikely(file_descriptor_remove(&all_files[id]) != (void *)&all_files[id])) - error("INTERNAL ERROR: removal of unused fd from index, removed a different fd"); - -#ifdef NETDATA_INTERNAL_CHECKS - all_files[id].magic = 0x00000000; -#endif /* NETDATA_INTERNAL_CHECKS */ - all_files_len--; - } - } - else - error("Request to decrease counter of fd %d (%s), while the use counter is 0", id, all_files[id].name); - } - else error("Request to decrease counter of fd %d, which is outside the array size (1 to %d)", id, all_files_size); -} - -static inline void all_files_grow() { - void *old = all_files; - int i; - - // there is no empty slot - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: extending fd array to %d entries\n", all_files_size + FILE_DESCRIPTORS_INCREASE_STEP); - - all_files = reallocz(all_files, (all_files_size + FILE_DESCRIPTORS_INCREASE_STEP) * sizeof(struct file_descriptor)); - - // if the address changed, we have to rebuild the index - // since all pointers are now invalid - - if(unlikely(old && old != (void *)all_files)) { - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: >> re-indexing.\n"); - - all_files_index.root = NULL; - for(i = 0; i < all_files_size; i++) { - if(!all_files[i].count) continue; - if(unlikely(file_descriptor_add(&all_files[i]) != (void *)&all_files[i])) - error("INTERNAL ERROR: duplicate indexing of fd during realloc."); - } - - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: >> re-indexing done.\n"); - } - - // initialize the newly added entries - - for(i = all_files_size; i < (all_files_size + FILE_DESCRIPTORS_INCREASE_STEP); i++) { - all_files[i].count = 0; - all_files[i].name = NULL; -#ifdef NETDATA_INTERNAL_CHECKS - all_files[i].magic = 0x00000000; -#endif /* NETDATA_INTERNAL_CHECKS */ - all_files[i].pos = i; - } - - if(unlikely(!all_files_size)) all_files_len = 1; - all_files_size += FILE_DESCRIPTORS_INCREASE_STEP; -} - -static inline int file_descriptor_set_on_empty_slot(const char *name, uint32_t hash, FD_FILETYPE type) { - // check we have enough memory to add it - if(!all_files || all_files_len == all_files_size) - all_files_grow(); - - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: >> searching for empty slot.\n"); - - // search for an empty slot - - static int last_pos = 0; - int i, c; - for(i = 0, c = last_pos ; i < all_files_size ; i++, c++) { - if(c >= all_files_size) c = 0; - if(c == 0) continue; - - if(!all_files[c].count) { - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: >> Examining slot %d.\n", c); - -#ifdef NETDATA_INTERNAL_CHECKS - if(all_files[c].magic == 0x0BADCAFE && all_files[c].name && file_descriptor_find(all_files[c].name, all_files[c].hash)) - error("fd on position %d is not cleared properly. It still has %s in it.\n", c, all_files[c].name); -#endif /* NETDATA_INTERNAL_CHECKS */ - - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: >> %s fd position %d for %s (last name: %s)\n", all_files[c].name?"re-using":"using", c, name, all_files[c].name); - - freez((void *)all_files[c].name); - all_files[c].name = NULL; - last_pos = c; - break; - } - } - - all_files_len++; - - if(i == all_files_size) { - fatal("We should find an empty slot, but there isn't any"); - exit(1); - } - // else we have an empty slot in 'c' - - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: >> updating slot %d.\n", c); - - all_files[c].name = strdupz(name); - all_files[c].hash = hash; - all_files[c].type = type; - all_files[c].pos = c; - all_files[c].count = 1; -#ifdef NETDATA_INTERNAL_CHECKS - all_files[c].magic = 0x0BADCAFE; -#endif /* NETDATA_INTERNAL_CHECKS */ - if(unlikely(file_descriptor_add(&all_files[c]) != (void *)&all_files[c])) - error("INTERNAL ERROR: duplicate indexing of fd."); - - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: using fd position %d (name: %s)\n", c, all_files[c].name); - - return c; -} - -static inline int file_descriptor_find_or_add(const char *name) -{ - uint32_t hash = simple_hash(name); - - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: adding or finding name '%s' with hash %u\n", name, hash); - - struct file_descriptor *fd = file_descriptor_find(name, hash); - if(fd) { - // found - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: >> found on slot %d\n", fd->pos); - - fd->count++; - return fd->pos; - } - // not found - - FD_FILETYPE type; - if(likely(name[0] == '/')) type = FILETYPE_FILE; - else if(likely(strncmp(name, "pipe:", 5) == 0)) type = FILETYPE_PIPE; - else if(likely(strncmp(name, "socket:", 7) == 0)) type = FILETYPE_SOCKET; - else if(likely(strncmp(name, "anon_inode:", 11) == 0)) { - const char *t = &name[11]; - - if(strcmp(t, "inotify") == 0) type = FILETYPE_INOTIFY; - else if(strcmp(t, "[eventfd]") == 0) type = FILETYPE_EVENTFD; - else if(strcmp(t, "[eventpoll]") == 0) type = FILETYPE_EVENTPOLL; - else if(strcmp(t, "[timerfd]") == 0) type = FILETYPE_TIMERFD; - else if(strcmp(t, "[signalfd]") == 0) type = FILETYPE_SIGNALFD; - else { - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: FIXME: unknown anonymous inode: %s\n", name); - - type = FILETYPE_OTHER; - } - } - else if(likely(strcmp(name, "inotify") == 0)) type = FILETYPE_INOTIFY; - else { - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: FIXME: cannot understand linkname: %s\n", name); - - type = FILETYPE_OTHER; - } - - return file_descriptor_set_on_empty_slot(name, hash, type); -} - -static inline void make_all_pid_fds_negative(struct pid_stat *p) { - int *fd = p->fds, *end = &p->fds[p->fds_size]; - while(fd < end) { - *fd = -(*fd); - fd++; - } -} - -static inline void cleanup_negative_pid_fds(struct pid_stat *p) { - int *fd = p->fds, *fdend = &p->fds[p->fds_size]; - - while(fd < fdend) { - if(unlikely(*fd < 0)) { - file_descriptor_not_used(-(*fd)); - *fd++ = 0; - } - else - fd++; - } -} - -static inline void zero_pid_fds(struct pid_stat *p, int first, int size) { - int *fd = &p->fds[first], *end = &p->fds[first + size]; - while(fd < end) *fd++ = 0; -} - -static inline int read_pid_file_descriptors(struct pid_stat *p, void *ptr) { - (void)ptr; -#ifdef __FreeBSD__ - int mib[4]; - size_t size; - struct kinfo_file *fds; - static char *fdsbuf; - char *bfdsbuf, *efdsbuf; - char fdsname[FILENAME_MAX + 1]; - - // we make all pid fds negative, so that - // we can detect unused file descriptors - // at the end, to free them - make_all_pid_fds_negative(p); - - mib[0] = CTL_KERN; - mib[1] = KERN_PROC; - mib[2] = KERN_PROC_FILEDESC; - mib[3] = p->pid; - - if (unlikely(sysctl(mib, 4, NULL, &size, NULL, 0))) { - error("sysctl error: Can't get file descriptors data size for pid %d", p->pid); - return 0; - } - if (likely(size > 0)) - fdsbuf = reallocz(fdsbuf, size); - if (unlikely(sysctl(mib, 4, fdsbuf, &size, NULL, 0))) { - error("sysctl error: Can't get file descriptors data for pid %d", p->pid); - return 0; - } - - bfdsbuf = fdsbuf; - efdsbuf = fdsbuf + size; - while (bfdsbuf < efdsbuf) { - fds = (struct kinfo_file *)(uintptr_t)bfdsbuf; - if (unlikely(fds->kf_structsize == 0)) - break; - - // do not process file descriptors for current working directory, root directory, - // jail directory, ktrace vnode, text vnode and controlling terminal - if (unlikely(fds->kf_fd < 0)) { - bfdsbuf += fds->kf_structsize; - continue; - } - - // get file descriptors array index - int fdid = fds->kf_fd; - - // check if the fds array is small - if (unlikely(fdid >= p->fds_size)) { - // it is small, extend it - - if (unlikely(debug)) - fprintf(stderr, "apps.plugin: extending fd memory slots for %s from %d to %d\n", p->comm, p->fds_size, fdid + MAX_SPARE_FDS); - - p->fds = reallocz(p->fds, (fdid + MAX_SPARE_FDS) * sizeof(int)); - - // and initialize it - zero_pid_fds(p, p->fds_size, (fdid + MAX_SPARE_FDS) - p->fds_size); - p->fds_size = fdid + MAX_SPARE_FDS; - } - - if (unlikely(p->fds[fdid] == 0)) { - // we don't know this fd, get it - - switch (fds->kf_type) { - case KF_TYPE_FIFO: - case KF_TYPE_VNODE: - if (unlikely(!fds->kf_path[0])) { - sprintf(fdsname, "other: inode: %lu", fds->kf_un.kf_file.kf_file_fileid); - break; - } - sprintf(fdsname, "%s", fds->kf_path); - break; - case KF_TYPE_SOCKET: - switch (fds->kf_sock_domain) { - case AF_INET: - case AF_INET6: - if (fds->kf_sock_protocol == IPPROTO_TCP) - sprintf(fdsname, "socket: %d %lx", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sock_inpcb); - else - sprintf(fdsname, "socket: %d %lx", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sock_pcb); - break; - case AF_UNIX: - /* print address of pcb and connected pcb */ - sprintf(fdsname, "socket: %lx %lx", fds->kf_un.kf_sock.kf_sock_pcb, fds->kf_un.kf_sock.kf_sock_unpconn); - break; - default: - /* print protocol number and socket address */ -#if __FreeBSD_version < 1200031 - sprintf(fdsname, "socket: other: %d %s %s", fds->kf_sock_protocol, fds->kf_sa_local.__ss_pad1, fds->kf_sa_local.__ss_pad2); -#else - sprintf(fdsname, "socket: other: %d %s %s", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sa_local.__ss_pad1, fds->kf_un.kf_sock.kf_sa_local.__ss_pad2); -#endif - } - break; - case KF_TYPE_PIPE: - sprintf(fdsname, "pipe: %lu %lu", fds->kf_un.kf_pipe.kf_pipe_addr, fds->kf_un.kf_pipe.kf_pipe_peer); - break; - case KF_TYPE_PTS: -#if __FreeBSD_version < 1200031 - sprintf(fdsname, "other: pts: %u", fds->kf_un.kf_pts.kf_pts_dev); -#else - sprintf(fdsname, "other: pts: %lu", fds->kf_un.kf_pts.kf_pts_dev); -#endif - break; - case KF_TYPE_SHM: - sprintf(fdsname, "other: shm: %s size: %lu", fds->kf_path, fds->kf_un.kf_file.kf_file_size); - break; - case KF_TYPE_SEM: - sprintf(fdsname, "other: sem: %u", fds->kf_un.kf_sem.kf_sem_value); - break; - default: - sprintf(fdsname, "other: pid: %d fd: %d", fds->kf_un.kf_proc.kf_pid, fds->kf_fd); - } - - // if another process already has this, we will get - // the same id - p->fds[fdid] = file_descriptor_find_or_add(fdsname); - } - - // else make it positive again, we need it - // of course, the actual file may have changed, but we don't care so much - // FIXME: we could compare the inode as returned by readdir dirent structure - - else - p->fds[fdid] = -p->fds[fdid]; - - bfdsbuf += fds->kf_structsize; - } -#else - if(unlikely(!p->fds_dirname)) { - char dirname[FILENAME_MAX+1]; - snprintfz(dirname, FILENAME_MAX, "%s/proc/%d/fd", netdata_configured_host_prefix, p->pid); - p->fds_dirname = strdupz(dirname); - } - - DIR *fds = opendir(p->fds_dirname); - if(unlikely(!fds)) return 0; - - struct dirent *de; - char fdname[FILENAME_MAX + 1]; - char linkname[FILENAME_MAX + 1]; - - // we make all pid fds negative, so that - // we can detect unused file descriptors - // at the end, to free them - make_all_pid_fds_negative(p); - - while((de = readdir(fds))) { - // we need only files with numeric names - - if(unlikely(de->d_name[0] < '0' || de->d_name[0] > '9')) - continue; - - // get its number - int fdid = (int) str2l(de->d_name); - if(unlikely(fdid < 0)) continue; - - // check if the fds array is small - if(unlikely(fdid >= p->fds_size)) { - // it is small, extend it - - if(unlikely(debug)) - fprintf(stderr - , "apps.plugin: extending fd memory slots for %s from %d to %d\n" - , p->comm - , p->fds_size - , fdid + MAX_SPARE_FDS - ); - - p->fds = reallocz(p->fds, (fdid + MAX_SPARE_FDS) * sizeof(int)); - - // and initialize it - zero_pid_fds(p, p->fds_size, (fdid + MAX_SPARE_FDS) - p->fds_size); - p->fds_size = fdid + MAX_SPARE_FDS; - } - - if(unlikely(p->fds[fdid] == 0)) { - // we don't know this fd, get it - - sprintf(fdname, "%s/proc/%d/fd/%s", netdata_configured_host_prefix, p->pid, de->d_name); - ssize_t l = readlink(fdname, linkname, FILENAME_MAX); - if(unlikely(l == -1)) { - if(debug || (p->target && p->target->debug)) { - if(debug || (p->target && p->target->debug)) - error("Cannot read link %s", fdname); - } - continue; - } - else - linkname[l] = '\0'; - - file_counter++; - - // if another process already has this, we will get - // the same id - p->fds[fdid] = file_descriptor_find_or_add(linkname); - } - - // else make it positive again, we need it - // of course, the actual file may have changed, but we don't care so much - // FIXME: we could compare the inode as returned by readdir dirent structure - // UPDATE: no we cannot use inodes - under /proc inodes don't change when the link is changed - - else - p->fds[fdid] = -p->fds[fdid]; - } - - closedir(fds); -#endif - cleanup_negative_pid_fds(p); - - return 1; -} - -// ---------------------------------------------------------------------------- - -static inline int print_process_and_parents(struct pid_stat *p, usec_t time) { - char *prefix = "\\_ "; - int indent = 0; - - if(p->parent) - indent = print_process_and_parents(p->parent, p->stat_collected_usec); - else - prefix = " > "; - - char buffer[indent + 1]; - int i; - - for(i = 0; i < indent ;i++) buffer[i] = ' '; - buffer[i] = '\0'; - - fprintf(stderr, " %s %s%s (%d %s %llu" - , buffer - , prefix - , p->comm - , p->pid - , p->updated?"running":"exited" - , p->stat_collected_usec - time - ); - - if(p->utime) fprintf(stderr, " utime=" KERNEL_UINT_FORMAT, p->utime); - if(p->stime) fprintf(stderr, " stime=" KERNEL_UINT_FORMAT, p->stime); - if(p->gtime) fprintf(stderr, " gtime=" KERNEL_UINT_FORMAT, p->gtime); - if(p->cutime) fprintf(stderr, " cutime=" KERNEL_UINT_FORMAT, p->cutime); - if(p->cstime) fprintf(stderr, " cstime=" KERNEL_UINT_FORMAT, p->cstime); - if(p->cgtime) fprintf(stderr, " cgtime=" KERNEL_UINT_FORMAT, p->cgtime); - if(p->minflt) fprintf(stderr, " minflt=" KERNEL_UINT_FORMAT, p->minflt); - if(p->cminflt) fprintf(stderr, " cminflt=" KERNEL_UINT_FORMAT, p->cminflt); - if(p->majflt) fprintf(stderr, " majflt=" KERNEL_UINT_FORMAT, p->majflt); - if(p->cmajflt) fprintf(stderr, " cmajflt=" KERNEL_UINT_FORMAT, p->cmajflt); - fprintf(stderr, ")\n"); - - return indent + 1; -} - -static inline void print_process_tree(struct pid_stat *p, char *msg) { - fprintf(stderr, "%s: process %s (%d, %s) with parents:\n", msg, p->comm, p->pid, p->updated?"running":"exited"); - print_process_and_parents(p, p->stat_collected_usec); -} - -static inline void find_lost_child_debug(struct pid_stat *pe, kernel_uint_t lost, int type) { - int found = 0; - struct pid_stat *p = NULL; - - for(p = root_of_pids; p ; p = p->next) { - if(p == pe) continue; - - switch(type) { - case 1: - if(p->cminflt > lost) { - fprintf(stderr, " > process %d (%s) could use the lost exited child minflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm); - found++; - } - break; - - case 2: - if(p->cmajflt > lost) { - fprintf(stderr, " > process %d (%s) could use the lost exited child majflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm); - found++; - } - break; - - case 3: - if(p->cutime > lost) { - fprintf(stderr, " > process %d (%s) could use the lost exited child utime " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm); - found++; - } - break; - - case 4: - if(p->cstime > lost) { - fprintf(stderr, " > process %d (%s) could use the lost exited child stime " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm); - found++; - } - break; - - case 5: - if(p->cgtime > lost) { - fprintf(stderr, " > process %d (%s) could use the lost exited child gtime " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm); - found++; - } - break; - } - } - - if(!found) { - switch(type) { - case 1: - fprintf(stderr, " > cannot find any process to use the lost exited child minflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm); - break; - - case 2: - fprintf(stderr, " > cannot find any process to use the lost exited child majflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm); - break; - - case 3: - fprintf(stderr, " > cannot find any process to use the lost exited child utime " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm); - break; - - case 4: - fprintf(stderr, " > cannot find any process to use the lost exited child stime " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm); - break; - - case 5: - fprintf(stderr, " > cannot find any process to use the lost exited child gtime " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm); - break; - } - } -} - -static inline kernel_uint_t remove_exited_child_from_parent(kernel_uint_t *field, kernel_uint_t *pfield) { - kernel_uint_t absorbed = 0; - - if(*field > *pfield) { - absorbed += *pfield; - *field -= *pfield; - *pfield = 0; - } - else { - absorbed += *field; - *pfield -= *field; - *field = 0; - } - - return absorbed; -} - -static inline void process_exited_processes() { - struct pid_stat *p; - - for(p = root_of_pids; p ; p = p->next) { - if(p->updated || !p->stat_collected_usec) - continue; - - kernel_uint_t utime = (p->utime_raw + p->cutime_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec); - kernel_uint_t stime = (p->stime_raw + p->cstime_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec); - kernel_uint_t gtime = (p->gtime_raw + p->cgtime_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec); - kernel_uint_t minflt = (p->minflt_raw + p->cminflt_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec); - kernel_uint_t majflt = (p->majflt_raw + p->cmajflt_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec); - - if(utime + stime + gtime + minflt + majflt == 0) - continue; - - if(unlikely(debug)) { - fprintf(stderr, "Absorb %s (%d %s total resources: utime=" KERNEL_UINT_FORMAT " stime=" KERNEL_UINT_FORMAT " gtime=" KERNEL_UINT_FORMAT " minflt=" KERNEL_UINT_FORMAT " majflt=" KERNEL_UINT_FORMAT ")\n" - , p->comm - , p->pid - , p->updated?"running":"exited" - , utime - , stime - , gtime - , minflt - , majflt - ); - print_process_tree(p, "Searching parents"); - } - - struct pid_stat *pp; - for(pp = p->parent; pp ; pp = pp->parent) { - if(!pp->updated) continue; - - kernel_uint_t absorbed; - absorbed = remove_exited_child_from_parent(&utime, &pp->cutime); - if(unlikely(debug && absorbed)) - fprintf(stderr, " > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " utime (remaining: " KERNEL_UINT_FORMAT ")\n", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, utime); - - absorbed = remove_exited_child_from_parent(&stime, &pp->cstime); - if(unlikely(debug && absorbed)) - fprintf(stderr, " > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " stime (remaining: " KERNEL_UINT_FORMAT ")\n", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, stime); - - absorbed = remove_exited_child_from_parent(>ime, &pp->cgtime); - if(unlikely(debug && absorbed)) - fprintf(stderr, " > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " gtime (remaining: " KERNEL_UINT_FORMAT ")\n", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, gtime); - - absorbed = remove_exited_child_from_parent(&minflt, &pp->cminflt); - if(unlikely(debug && absorbed)) - fprintf(stderr, " > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " minflt (remaining: " KERNEL_UINT_FORMAT ")\n", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, minflt); - - absorbed = remove_exited_child_from_parent(&majflt, &pp->cmajflt); - if(unlikely(debug && absorbed)) - fprintf(stderr, " > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " majflt (remaining: " KERNEL_UINT_FORMAT ")\n", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, majflt); - } - - if(unlikely(utime + stime + gtime + minflt + majflt > 0)) { - if(unlikely(debug)) { - if(utime) find_lost_child_debug(p, utime, 3); - if(stime) find_lost_child_debug(p, stime, 4); - if(gtime) find_lost_child_debug(p, gtime, 5); - if(minflt) find_lost_child_debug(p, minflt, 1); - if(majflt) find_lost_child_debug(p, majflt, 2); - } - - p->keep = 1; - - if(unlikely(debug)) - fprintf(stderr, " > remaining resources - KEEP - for another loop: %s (%d %s total resources: utime=" KERNEL_UINT_FORMAT " stime=" KERNEL_UINT_FORMAT " gtime=" KERNEL_UINT_FORMAT " minflt=" KERNEL_UINT_FORMAT " majflt=" KERNEL_UINT_FORMAT ")\n" - , p->comm - , p->pid - , p->updated?"running":"exited" - , utime - , stime - , gtime - , minflt - , majflt - ); - - for(pp = p->parent; pp ; pp = pp->parent) { - if(pp->updated) break; - pp->keep = 1; - - if(unlikely(debug)) - fprintf(stderr, " > - KEEP - parent for another loop: %s (%d %s)\n" - , pp->comm - , pp->pid - , pp->updated?"running":"exited" - ); - } - - p->utime_raw = utime * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL); - p->stime_raw = stime * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL); - p->gtime_raw = gtime * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL); - p->minflt_raw = minflt * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL); - p->majflt_raw = majflt * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL); - p->cutime_raw = p->cstime_raw = p->cgtime_raw = p->cminflt_raw = p->cmajflt_raw = 0; - - if(unlikely(debug)) - fprintf(stderr, "\n"); - } - else if(unlikely(debug)) { - fprintf(stderr, " > totally absorbed - DONE - %s (%d %s)\n" - , p->comm - , p->pid - , p->updated?"running":"exited" - ); - } - } -} - -static inline void link_all_processes_to_their_parents(void) { - struct pid_stat *p, *pp; - - // link all children to their parents - // and update children count on parents - for(p = root_of_pids; p ; p = p->next) { - // for each process found - - p->sortlist = 0; - p->parent = NULL; - - if(unlikely(!p->ppid)) { - p->parent = NULL; - continue; - } - - pp = all_pids[p->ppid]; - if(likely(pp)) { - p->parent = pp; - pp->children_count++; - - if(unlikely(debug || (p->target && p->target->debug))) - fprintf(stderr, "apps.plugin: \tchild %d (%s, %s) on target '%s' has parent %d (%s, %s). Parent: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", gtime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", cgtime=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT "\n", p->pid, p->comm, p->updated?"running":"exited", (p->target)?p->target->name:"UNSET", pp->pid, pp->comm, pp->updated?"running":"exited", pp->utime, pp->stime, pp->gtime, pp->minflt, pp->majflt, pp->cutime, pp->cstime, pp->cgtime, pp->cminflt, pp->cmajflt); - } - else { - p->parent = NULL; - error("pid %d %s states parent %d, but the later does not exist.", p->pid, p->comm, p->ppid); - } - } -} - -// ---------------------------------------------------------------------------- - -// 1. read all files in /proc -// 2. for each numeric directory: -// i. read /proc/pid/stat -// ii. read /proc/pid/status -// iii. read /proc/pid/io (requires root access) -// iii. read the entries in directory /proc/pid/fd (requires root access) -// for each entry: -// a. find or create a struct file_descriptor -// b. cleanup any old/unused file_descriptors - -// after all these, some pids may be linked to targets, while others may not - -// in case of errors, only 1 every 1000 errors is printed -// to avoid filling up all disk space -// if debug is enabled, all errors are printed - -#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) -static int compar_pid(const void *pid1, const void *pid2) { - - struct pid_stat *p1 = all_pids[*((pid_t *)pid1)]; - struct pid_stat *p2 = all_pids[*((pid_t *)pid2)]; - - if(p1->sortlist > p2->sortlist) - return -1; - else - return 1; -} -#endif - -static inline int collect_data_for_pid(pid_t pid, void *ptr) { - if(unlikely(pid < 0 || pid > pid_max)) { - error("Invalid pid %d read (expected %d to %d). Ignoring process.", pid, 0, pid_max); - return 0; - } - - struct pid_stat *p = get_pid_entry(pid); - if(unlikely(!p || p->read)) return 0; - p->read = 1; - - // fprintf(stderr, "Reading process %d (%s), sortlist %d\n", p->pid, p->comm, p->sortlist); - - // -------------------------------------------------------------------- - // /proc/<pid>/stat - - if(unlikely(!managed_log(p, PID_LOG_STAT, read_proc_pid_stat(p, ptr)))) - // there is no reason to proceed if we cannot get its status - return 0; - - // check its parent pid - if(unlikely(p->ppid < 0 || p->ppid > pid_max)) { - error("Pid %d (command '%s') states invalid parent pid %d. Using 0.", pid, p->comm, p->ppid); - p->ppid = 0; - } - - // -------------------------------------------------------------------- - // /proc/<pid>/io - - managed_log(p, PID_LOG_IO, read_proc_pid_io(p, ptr)); - - // -------------------------------------------------------------------- - // /proc/<pid>/status - - if(unlikely(!managed_log(p, PID_LOG_STATUS, read_proc_pid_status(p, ptr)))) - // there is no reason to proceed if we cannot get its status - return 0; - - // -------------------------------------------------------------------- - // /proc/<pid>/fd - - if(enable_file_charts) - managed_log(p, PID_LOG_FDS, read_pid_file_descriptors(p, ptr)); - - // -------------------------------------------------------------------- - // done! - - if(unlikely(debug && include_exited_childs && all_pids_count && p->ppid && all_pids[p->ppid] && !all_pids[p->ppid]->read)) - fprintf(stderr, "Read process %d (%s) sortlisted %d, but its parent %d (%s) sortlisted %d, is not read\n", p->pid, p->comm, p->sortlist, all_pids[p->ppid]->pid, all_pids[p->ppid]->comm, all_pids[p->ppid]->sortlist); - - // mark it as updated - p->updated = 1; - p->keep = 0; - p->keeploops = 0; - - return 1; -} - -static int collect_data_for_all_processes(void) { - struct pid_stat *p = NULL; - -#ifdef __FreeBSD__ - int i, procnum; - - static size_t procbase_size = 0; - static struct kinfo_proc *procbase = NULL; - - size_t new_procbase_size; - - int mib[3] = { CTL_KERN, KERN_PROC, KERN_PROC_PROC }; - if (unlikely(sysctl(mib, 3, NULL, &new_procbase_size, NULL, 0))) { - error("sysctl error: Can't get processes data size"); - return 0; - } - - // give it some air for processes that may be started - // during this little time. - new_procbase_size += 100 * sizeof(struct kinfo_proc); - - // increase the buffer if needed - if(new_procbase_size > procbase_size) { - procbase_size = new_procbase_size; - procbase = reallocz(procbase, procbase_size); - } - - // sysctl() gets from new_procbase_size the buffer size - // and also returns to it the amount of data filled in - new_procbase_size = procbase_size; - - // get the processes from the system - if (unlikely(sysctl(mib, 3, procbase, &new_procbase_size, NULL, 0))) { - error("sysctl error: Can't get processes data"); - return 0; - } - - // based on the amount of data filled in - // calculate the number of processes we got - procnum = new_procbase_size / sizeof(struct kinfo_proc); - -#endif - - if(all_pids_count) { -#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) - size_t slc = 0; -#endif - for(p = root_of_pids; p ; p = p->next) { - p->read = 0; // mark it as not read, so that collect_data_for_pid() will read it - p->updated = 0; - p->merged = 0; - p->children_count = 0; - p->parent = NULL; - -#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) - all_pids_sortlist[slc++] = p->pid; -#endif - } - -#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) - if(unlikely(slc != all_pids_count)) { - error("Internal error: I was thinking I had %zu processes in my arrays, but it seems there are %zu.", all_pids_count, slc); - all_pids_count = slc; - } - - if(include_exited_childs) { - // Read parents before childs - // This is needed to prevent a situation where - // a child is found running, but until we read - // its parent, it has exited and its parent - // has accumulated its resources. - - qsort((void *)all_pids_sortlist, (size_t)all_pids_count, sizeof(pid_t), compar_pid); - - // we forward read all running processes - // collect_data_for_pid() is smart enough, - // not to read the same pid twice per iteration - for(slc = 0; slc < all_pids_count; slc++) - collect_data_for_pid(all_pids_sortlist[slc], NULL); - } -#endif - } - -#ifdef __FreeBSD__ - for (i = 0 ; i < procnum ; ++i) { - pid_t pid = procbase[i].ki_pid; - collect_data_for_pid(pid, &procbase[i]); - } -#else - char dirname[FILENAME_MAX + 1]; - - snprintfz(dirname, FILENAME_MAX, "%s/proc", netdata_configured_host_prefix); - DIR *dir = opendir(dirname); - if(!dir) return 0; - - struct dirent *de = NULL; - - while((de = readdir(dir))) { - char *endptr = de->d_name; - - if(unlikely(de->d_type != DT_DIR || de->d_name[0] < '0' || de->d_name[0] > '9')) - continue; - - pid_t pid = (pid_t) strtoul(de->d_name, &endptr, 10); - - // make sure we read a valid number - if(unlikely(endptr == de->d_name || *endptr != '\0')) - continue; - - collect_data_for_pid(pid, NULL); - } - closedir(dir); -#endif - - if(!all_pids_count) - return 0; - - // we need /proc/stat to normalize the cpu consumption of the exited childs - read_proc_stat(); - - // build the process tree - link_all_processes_to_their_parents(); - - // normally this is done - // however we may have processes exited while we collected values - // so let's find the exited ones - // we do this by collecting the ownership of process - // if we manage to get the ownership, the process still runs - process_exited_processes(); - - return 1; -} - -// ---------------------------------------------------------------------------- -// update statistics on the targets - -// 1. link all childs to their parents -// 2. go from bottom to top, marking as merged all childs to their parents -// this step links all parents without a target to the child target, if any -// 3. link all top level processes (the ones not merged) to the default target -// 4. go from top to bottom, linking all childs without a target, to their parent target -// after this step, all processes have a target -// [5. for each killed pid (updated = 0), remove its usage from its target] -// 6. zero all apps_groups_targets -// 7. concentrate all values on the apps_groups_targets -// 8. remove all killed processes -// 9. find the unique file count for each target -// check: update_apps_groups_statistics() - -static void cleanup_exited_pids(void) { - int c; - struct pid_stat *p = NULL; - - for(p = root_of_pids; p ;) { - if(!p->updated && (!p->keep || p->keeploops > 0)) { - if(unlikely(debug && (p->keep || p->keeploops))) - fprintf(stderr, " > CLEANUP cannot keep exited process %d (%s) anymore - removing it.\n", p->pid, p->comm); - - for(c = 0; c < p->fds_size; c++) - if(p->fds[c] > 0) { - file_descriptor_not_used(p->fds[c]); - p->fds[c] = 0; - } - - pid_t r = p->pid; - p = p->next; - del_pid_entry(r); - } - else { - if(unlikely(p->keep)) p->keeploops++; - p->keep = 0; - p = p->next; - } - } -} - -static void apply_apps_groups_targets_inheritance(void) { - struct pid_stat *p = NULL; - - // children that do not have a target - // inherit their target from their parent - int found = 1, loops = 0; - while(found) { - if(unlikely(debug)) loops++; - found = 0; - for(p = root_of_pids; p ; p = p->next) { - // if this process does not have a target - // and it has a parent - // and its parent has a target - // then, set the parent's target to this process - if(unlikely(!p->target && p->parent && p->parent->target)) { - p->target = p->parent->target; - found++; - - if(debug || (p->target && p->target->debug)) - fprintf(stderr, "apps.plugin: \t\tTARGET INHERITANCE: %s is inherited by %d (%s) from its parent %d (%s).\n", p->target->name, p->pid, p->comm, p->parent->pid, p->parent->comm); - } - } - } - - // find all the procs with 0 childs and merge them to their parents - // repeat, until nothing more can be done. - int sortlist = 1; - found = 1; - while(found) { - if(unlikely(debug)) loops++; - found = 0; - - for(p = root_of_pids; p ; p = p->next) { - if(unlikely(!p->sortlist && !p->children_count)) - p->sortlist = sortlist++; - - if(unlikely( - !p->children_count // if this process does not have any children - && !p->merged // and is not already merged - && p->parent // and has a parent - && p->parent->children_count // and its parent has children - // and the target of this process and its parent is the same, - // or the parent does not have a target - && (p->target == p->parent->target || !p->parent->target) - && p->ppid != INIT_PID // and its parent is not init - )) { - // mark it as merged - p->parent->children_count--; - p->merged = 1; - - // the parent inherits the child's target, if it does not have a target itself - if(unlikely(p->target && !p->parent->target)) { - p->parent->target = p->target; - - if(debug || (p->target && p->target->debug)) - fprintf(stderr, "apps.plugin: \t\tTARGET INHERITANCE: %s is inherited by %d (%s) from its child %d (%s).\n", p->target->name, p->parent->pid, p->parent->comm, p->pid, p->comm); - } - - found++; - } - } - - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: TARGET INHERITANCE: merged %d processes\n", found); - } - - // init goes always to default target - if(all_pids[INIT_PID]) - all_pids[INIT_PID]->target = apps_groups_default_target; - - // pid 0 goes always to default target - if(all_pids[0]) - all_pids[0]->target = apps_groups_default_target; - - // give a default target on all top level processes - if(unlikely(debug)) loops++; - for(p = root_of_pids; p ; p = p->next) { - // if the process is not merged itself - // then is is a top level process - if(unlikely(!p->merged && !p->target)) - p->target = apps_groups_default_target; - - // make sure all processes have a sortlist - if(unlikely(!p->sortlist)) - p->sortlist = sortlist++; - } - - if(all_pids[1]) - all_pids[1]->sortlist = sortlist++; - - // give a target to all merged child processes - found = 1; - while(found) { - if(unlikely(debug)) loops++; - found = 0; - for(p = root_of_pids; p ; p = p->next) { - if(unlikely(!p->target && p->merged && p->parent && p->parent->target)) { - p->target = p->parent->target; - found++; - - if(debug || (p->target && p->target->debug)) - fprintf(stderr, "apps.plugin: \t\tTARGET INHERITANCE: %s is inherited by %d (%s) from its parent %d (%s) at phase 2.\n", p->target->name, p->pid, p->comm, p->parent->pid, p->parent->comm); - } - } - } - - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: apply_apps_groups_targets_inheritance() made %d loops on the process tree\n", loops); -} - -static size_t zero_all_targets(struct target *root) { - struct target *w; - size_t count = 0; - - for (w = root; w ; w = w->next) { - count++; - - w->minflt = 0; - w->majflt = 0; - w->utime = 0; - w->stime = 0; - w->gtime = 0; - w->cminflt = 0; - w->cmajflt = 0; - w->cutime = 0; - w->cstime = 0; - w->cgtime = 0; - w->num_threads = 0; - // w->rss = 0; - w->processes = 0; - - w->status_vmsize = 0; - w->status_vmrss = 0; - w->status_vmshared = 0; - w->status_rssfile = 0; - w->status_rssshmem = 0; - w->status_vmswap = 0; - - w->io_logical_bytes_read = 0; - w->io_logical_bytes_written = 0; - // w->io_read_calls = 0; - // w->io_write_calls = 0; - w->io_storage_bytes_read = 0; - w->io_storage_bytes_written = 0; - // w->io_cancelled_write_bytes = 0; - - // zero file counters - if(w->target_fds) { - memset(w->target_fds, 0, sizeof(int) * w->target_fds_size); - w->openfiles = 0; - w->openpipes = 0; - w->opensockets = 0; - w->openinotifies = 0; - w->openeventfds = 0; - w->opentimerfds = 0; - w->opensignalfds = 0; - w->openeventpolls = 0; - w->openother = 0; - } - } - - return count; -} - -static inline void reallocate_target_fds(struct target *w) { - if(unlikely(!w)) - return; - - if(unlikely(!w->target_fds || w->target_fds_size < all_files_size)) { - w->target_fds = reallocz(w->target_fds, sizeof(int) * all_files_size); - memset(&w->target_fds[w->target_fds_size], 0, sizeof(int) * (all_files_size - w->target_fds_size)); - w->target_fds_size = all_files_size; - } -} - -static inline void aggregate_fd_on_target(int fd, struct target *w) { - if(unlikely(!w)) - return; - - if(unlikely(w->target_fds[fd])) { - // it is already aggregated - // just increase its usage counter - w->target_fds[fd]++; - return; - } - - // increase its usage counter - // so that we will not add it again - w->target_fds[fd]++; - - switch(all_files[fd].type) { - case FILETYPE_FILE: - w->openfiles++; - break; - - case FILETYPE_PIPE: - w->openpipes++; - break; - - case FILETYPE_SOCKET: - w->opensockets++; - break; - - case FILETYPE_INOTIFY: - w->openinotifies++; - break; - - case FILETYPE_EVENTFD: - w->openeventfds++; - break; - - case FILETYPE_TIMERFD: - w->opentimerfds++; - break; - - case FILETYPE_SIGNALFD: - w->opensignalfds++; - break; - - case FILETYPE_EVENTPOLL: - w->openeventpolls++; - break; - - case FILETYPE_OTHER: - w->openother++; - break; - } -} - -static inline void aggregate_pid_fds_on_targets(struct pid_stat *p) { - - if(unlikely(!p->updated)) { - // the process is not running - return; - } - - struct target *w = p->target, *u = p->user_target, *g = p->group_target; - - reallocate_target_fds(w); - reallocate_target_fds(u); - reallocate_target_fds(g); - - int c, size = p->fds_size, *fds = p->fds; - for(c = 0; c < size ;c++) { - int fd = fds[c]; - - if(likely(fd <= 0 || fd >= all_files_size)) - continue; - - aggregate_fd_on_target(fd, w); - aggregate_fd_on_target(fd, u); - aggregate_fd_on_target(fd, g); - } -} - -static inline void aggregate_pid_on_target(struct target *w, struct pid_stat *p, struct target *o) { - (void)o; - - if(unlikely(!p->updated)) { - // the process is not running - return; - } - - if(unlikely(!w)) { - error("pid %d %s was left without a target!", p->pid, p->comm); - return; - } - - w->cutime += p->cutime; - w->cstime += p->cstime; - w->cgtime += p->cgtime; - w->cminflt += p->cminflt; - w->cmajflt += p->cmajflt; - - w->utime += p->utime; - w->stime += p->stime; - w->gtime += p->gtime; - w->minflt += p->minflt; - w->majflt += p->majflt; - - // w->rss += p->rss; - - w->status_vmsize += p->status_vmsize; - w->status_vmrss += p->status_vmrss; - w->status_vmshared += p->status_vmshared; - w->status_rssfile += p->status_rssfile; - w->status_rssshmem += p->status_rssshmem; - w->status_vmswap += p->status_vmswap; - - w->io_logical_bytes_read += p->io_logical_bytes_read; - w->io_logical_bytes_written += p->io_logical_bytes_written; - // w->io_read_calls += p->io_read_calls; - // w->io_write_calls += p->io_write_calls; - w->io_storage_bytes_read += p->io_storage_bytes_read; - w->io_storage_bytes_written += p->io_storage_bytes_written; - // w->io_cancelled_write_bytes += p->io_cancelled_write_bytes; - - w->processes++; - w->num_threads += p->num_threads; - - if(unlikely(debug || w->debug)) - fprintf(stderr, "apps.plugin: \taggregating '%s' pid %d on target '%s' utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", gtime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", cgtime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT "\n", p->comm, p->pid, w->name, p->utime, p->stime, p->gtime, p->cutime, p->cstime, p->cgtime, p->minflt, p->majflt, p->cminflt, p->cmajflt); -} - -static void calculate_netdata_statistics(void) { - - apply_apps_groups_targets_inheritance(); - - zero_all_targets(users_root_target); - zero_all_targets(groups_root_target); - apps_groups_targets_count = zero_all_targets(apps_groups_root_target); - - // this has to be done, before the cleanup - struct pid_stat *p = NULL; - struct target *w = NULL, *o = NULL; - - // concentrate everything on the targets - for(p = root_of_pids; p ; p = p->next) { - - // -------------------------------------------------------------------- - // apps_groups target - - aggregate_pid_on_target(p->target, p, NULL); - - - // -------------------------------------------------------------------- - // user target - - o = p->user_target; - if(likely(p->user_target && p->user_target->uid == p->uid)) - w = p->user_target; - else { - if(unlikely(debug && p->user_target)) - fprintf(stderr, "apps.plugin: \t\tpid %d (%s) switched user from %u (%s) to %u.\n", p->pid, p->comm, p->user_target->uid, p->user_target->name, p->uid); - - w = p->user_target = get_users_target(p->uid); - } - - aggregate_pid_on_target(w, p, o); - - - // -------------------------------------------------------------------- - // user group target - - o = p->group_target; - if(likely(p->group_target && p->group_target->gid == p->gid)) - w = p->group_target; - else { - if(unlikely(debug && p->group_target)) - fprintf(stderr, "apps.plugin: \t\tpid %d (%s) switched group from %u (%s) to %u.\n", p->pid, p->comm, p->group_target->gid, p->group_target->name, p->gid); - - w = p->group_target = get_groups_target(p->gid); - } - - aggregate_pid_on_target(w, p, o); - - - // -------------------------------------------------------------------- - // aggregate all file descriptors - - if(enable_file_charts) - aggregate_pid_fds_on_targets(p); - } - - cleanup_exited_pids(); -} - -// ---------------------------------------------------------------------------- -// update chart dimensions - -int print_calculated_number(char *str, calculated_number value) { (void)str; (void)value; return 0; } - -static inline void send_BEGIN(const char *type, const char *id, usec_t usec) { - fprintf(stdout, "BEGIN %s.%s %llu\n", type, id, usec); -} - -static inline void send_SET(const char *name, kernel_uint_t value) { - fprintf(stdout, "SET %s = " KERNEL_UINT_FORMAT "\n", name, value); -} - -static inline void send_END(void) { - fprintf(stdout, "END\n"); -} - -void send_resource_usage_to_netdata(usec_t dt) { - static struct timeval last = { 0, 0 }; - static struct rusage me_last; - - struct timeval now; - struct rusage me; - - usec_t cpuuser; - usec_t cpusyst; - - if(!last.tv_sec) { - now_monotonic_timeval(&last); - getrusage(RUSAGE_SELF, &me_last); - - cpuuser = 0; - cpusyst = 0; - } - else { - now_monotonic_timeval(&now); - getrusage(RUSAGE_SELF, &me); - - cpuuser = me.ru_utime.tv_sec * USEC_PER_SEC + me.ru_utime.tv_usec; - cpusyst = me.ru_stime.tv_sec * USEC_PER_SEC + me.ru_stime.tv_usec; - - memmove(&last, &now, sizeof(struct timeval)); - memmove(&me_last, &me, sizeof(struct rusage)); - } - - static char created_charts = 0; - if(unlikely(!created_charts)) { - created_charts = 1; - - fprintf(stdout, - "CHART netdata.apps_cpu '' 'Apps Plugin CPU' 'milliseconds/s' apps.plugin netdata.apps_cpu stacked 140000 %1$d\n" - "DIMENSION user '' incremental 1 1000\n" - "DIMENSION system '' incremental 1 1000\n" - "CHART netdata.apps_sizes '' 'Apps Plugin Files' 'files/s' apps.plugin netdata.apps_sizes line 140001 %1$d\n" - "DIMENSION calls '' incremental 1 1\n" - "DIMENSION files '' incremental 1 1\n" - "DIMENSION pids '' absolute 1 1\n" - "DIMENSION fds '' absolute 1 1\n" - "DIMENSION targets '' absolute 1 1\n" - "DIMENSION new_pids 'new pids' incremental 1 1\n" - , update_every - ); - -#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) - fprintf(stdout, - "CHART netdata.apps_fix '' 'Apps Plugin Normalization Ratios' 'percentage' apps.plugin netdata.apps_fix line 140002 %1$d\n" - "DIMENSION utime '' absolute 1 %2$llu\n" - "DIMENSION stime '' absolute 1 %2$llu\n" - "DIMENSION gtime '' absolute 1 %2$llu\n" - "DIMENSION minflt '' absolute 1 %2$llu\n" - "DIMENSION majflt '' absolute 1 %2$llu\n" - , update_every - , RATES_DETAIL - ); - - if(include_exited_childs) - fprintf(stdout, - "CHART netdata.apps_children_fix '' 'Apps Plugin Exited Children Normalization Ratios' 'percentage' apps.plugin netdata.apps_children_fix line 140003 %1$d\n" - "DIMENSION cutime '' absolute 1 %2$llu\n" - "DIMENSION cstime '' absolute 1 %2$llu\n" - "DIMENSION cgtime '' absolute 1 %2$llu\n" - "DIMENSION cminflt '' absolute 1 %2$llu\n" - "DIMENSION cmajflt '' absolute 1 %2$llu\n" - , update_every - , RATES_DETAIL - ); -#endif - - } - - fprintf(stdout, - "BEGIN netdata.apps_cpu %llu\n" - "SET user = %llu\n" - "SET system = %llu\n" - "END\n" - "BEGIN netdata.apps_sizes %llu\n" - "SET calls = %zu\n" - "SET files = %zu\n" - "SET pids = %zu\n" - "SET fds = %d\n" - "SET targets = %zu\n" - "SET new_pids = %zu\n" - "END\n" - , dt - , cpuuser - , cpusyst - , dt - , calls_counter - , file_counter - , all_pids_count - , all_files_len - , apps_groups_targets_count - , targets_assignment_counter - ); - -#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) - fprintf(stdout, - "BEGIN netdata.apps_fix %llu\n" - "SET utime = %u\n" - "SET stime = %u\n" - "SET gtime = %u\n" - "SET minflt = %u\n" - "SET majflt = %u\n" - "END\n" - , dt - , (unsigned int)(utime_fix_ratio * 100 * RATES_DETAIL) - , (unsigned int)(stime_fix_ratio * 100 * RATES_DETAIL) - , (unsigned int)(gtime_fix_ratio * 100 * RATES_DETAIL) - , (unsigned int)(minflt_fix_ratio * 100 * RATES_DETAIL) - , (unsigned int)(majflt_fix_ratio * 100 * RATES_DETAIL) - ); - - if(include_exited_childs) - fprintf(stdout, - "BEGIN netdata.apps_children_fix %llu\n" - "SET cutime = %u\n" - "SET cstime = %u\n" - "SET cgtime = %u\n" - "SET cminflt = %u\n" - "SET cmajflt = %u\n" - "END\n" - , dt - , (unsigned int)(cutime_fix_ratio * 100 * RATES_DETAIL) - , (unsigned int)(cstime_fix_ratio * 100 * RATES_DETAIL) - , (unsigned int)(cgtime_fix_ratio * 100 * RATES_DETAIL) - , (unsigned int)(cminflt_fix_ratio * 100 * RATES_DETAIL) - , (unsigned int)(cmajflt_fix_ratio * 100 * RATES_DETAIL) - ); -#endif -} - -#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) -static void normalize_utilization(struct target *root) { - struct target *w; - - // childs processing introduces spikes - // here we try to eliminate them by disabling childs processing either for specific dimensions - // or entirely. Of course, either way, we disable it just a single iteration. - - kernel_uint_t max_time = processors * hz * RATES_DETAIL; - kernel_uint_t utime = 0, cutime = 0, stime = 0, cstime = 0, gtime = 0, cgtime = 0, minflt = 0, cminflt = 0, majflt = 0, cmajflt = 0; - - if(global_utime > max_time) global_utime = max_time; - if(global_stime > max_time) global_stime = max_time; - if(global_gtime > max_time) global_gtime = max_time; - - for(w = root; w ; w = w->next) { - if(w->target || (!w->processes && !w->exposed)) continue; - - utime += w->utime; - stime += w->stime; - gtime += w->gtime; - cutime += w->cutime; - cstime += w->cstime; - cgtime += w->cgtime; - - minflt += w->minflt; - majflt += w->majflt; - cminflt += w->cminflt; - cmajflt += w->cmajflt; - } - - if((global_utime || global_stime || global_gtime) && (utime || stime || gtime)) { - if(global_utime + global_stime + global_gtime > utime + cutime + stime + cstime + gtime + cgtime) { - // everything we collected fits - utime_fix_ratio = - stime_fix_ratio = - gtime_fix_ratio = - cutime_fix_ratio = - cstime_fix_ratio = - cgtime_fix_ratio = 1.0; //(double)(global_utime + global_stime) / (double)(utime + cutime + stime + cstime); - } - else if(global_utime + global_stime > utime + stime) { - // childrens resources are too high - // lower only the children resources - utime_fix_ratio = - stime_fix_ratio = - gtime_fix_ratio = 1.0; - cutime_fix_ratio = - cstime_fix_ratio = - cgtime_fix_ratio = (double)((global_utime + global_stime) - (utime + stime)) / (double)(cutime + cstime); - } - else { - // even running processes are unrealistic - // zero the children resources - // lower the running processes resources - utime_fix_ratio = - stime_fix_ratio = - gtime_fix_ratio = (double)(global_utime + global_stime) / (double)(utime + stime); - cutime_fix_ratio = - cstime_fix_ratio = - cgtime_fix_ratio = 0.0; - } - } - else { - utime_fix_ratio = - stime_fix_ratio = - gtime_fix_ratio = - cutime_fix_ratio = - cstime_fix_ratio = - cgtime_fix_ratio = 0.0; - } - - if(utime_fix_ratio > 1.0) utime_fix_ratio = 1.0; - if(cutime_fix_ratio > 1.0) cutime_fix_ratio = 1.0; - if(stime_fix_ratio > 1.0) stime_fix_ratio = 1.0; - if(cstime_fix_ratio > 1.0) cstime_fix_ratio = 1.0; - if(gtime_fix_ratio > 1.0) gtime_fix_ratio = 1.0; - if(cgtime_fix_ratio > 1.0) cgtime_fix_ratio = 1.0; - - // if(utime_fix_ratio < 0.0) utime_fix_ratio = 0.0; - // if(cutime_fix_ratio < 0.0) cutime_fix_ratio = 0.0; - // if(stime_fix_ratio < 0.0) stime_fix_ratio = 0.0; - // if(cstime_fix_ratio < 0.0) cstime_fix_ratio = 0.0; - // if(gtime_fix_ratio < 0.0) gtime_fix_ratio = 0.0; - // if(cgtime_fix_ratio < 0.0) cgtime_fix_ratio = 0.0; - - // FIXME - // we use cpu time to normalize page faults - // the problem is that to find the proper max values - // for page faults we have to parse /proc/vmstat - // which is quite big to do it again (netdata does it already) - // - // a better solution could be to somehow have netdata - // do this normalization for us - - if(utime || stime || gtime) - majflt_fix_ratio = - minflt_fix_ratio = (double)(utime * utime_fix_ratio + stime * stime_fix_ratio + gtime * gtime_fix_ratio) / (double)(utime + stime + gtime); - else - minflt_fix_ratio = - majflt_fix_ratio = 1.0; - - if(cutime || cstime || cgtime) - cmajflt_fix_ratio = - cminflt_fix_ratio = (double)(cutime * cutime_fix_ratio + cstime * cstime_fix_ratio + cgtime * cgtime_fix_ratio) / (double)(cutime + cstime + cgtime); - else - cminflt_fix_ratio = - cmajflt_fix_ratio = 1.0; - - // the report - - if(unlikely(debug)) { - fprintf(stderr, - "SYSTEM: u=" KERNEL_UINT_FORMAT " s=" KERNEL_UINT_FORMAT " g=" KERNEL_UINT_FORMAT " " - "COLLECTED: u=" KERNEL_UINT_FORMAT " s=" KERNEL_UINT_FORMAT " g=" KERNEL_UINT_FORMAT " cu=" KERNEL_UINT_FORMAT " cs=" KERNEL_UINT_FORMAT " cg=" KERNEL_UINT_FORMAT " " - "DELTA: u=" KERNEL_UINT_FORMAT " s=" KERNEL_UINT_FORMAT " g=" KERNEL_UINT_FORMAT " " - "FIX: u=%0.2f s=%0.2f g=%0.2f cu=%0.2f cs=%0.2f cg=%0.2f " - "FINALLY: u=" KERNEL_UINT_FORMAT " s=" KERNEL_UINT_FORMAT " g=" KERNEL_UINT_FORMAT " cu=" KERNEL_UINT_FORMAT " cs=" KERNEL_UINT_FORMAT " cg=" KERNEL_UINT_FORMAT " " - "\n" - , global_utime - , global_stime - , global_gtime - , utime - , stime - , gtime - , cutime - , cstime - , cgtime - , utime + cutime - global_utime - , stime + cstime - global_stime - , gtime + cgtime - global_gtime - , utime_fix_ratio - , stime_fix_ratio - , gtime_fix_ratio - , cutime_fix_ratio - , cstime_fix_ratio - , cgtime_fix_ratio - , (kernel_uint_t)(utime * utime_fix_ratio) - , (kernel_uint_t)(stime * stime_fix_ratio) - , (kernel_uint_t)(gtime * gtime_fix_ratio) - , (kernel_uint_t)(cutime * cutime_fix_ratio) - , (kernel_uint_t)(cstime * cstime_fix_ratio) - , (kernel_uint_t)(cgtime * cgtime_fix_ratio) - ); - } -} -#else // ALL_PIDS_ARE_READ_INSTANTLY == 1 -static void normalize_utilization(struct target *root) { - (void)root; -} -#endif // ALL_PIDS_ARE_READ_INSTANTLY - -static void send_collected_data_to_netdata(struct target *root, const char *type, usec_t dt) { - struct target *w; - - send_BEGIN(type, "cpu", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - send_SET(w->name, (kernel_uint_t)(w->utime * utime_fix_ratio) + (kernel_uint_t)(w->stime * stime_fix_ratio) + (kernel_uint_t)(w->gtime * gtime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cutime * cutime_fix_ratio) + (kernel_uint_t)(w->cstime * cstime_fix_ratio) + (kernel_uint_t)(w->cgtime * cgtime_fix_ratio)):0ULL)); - } - send_END(); - - send_BEGIN(type, "cpu_user", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - send_SET(w->name, (kernel_uint_t)(w->utime * utime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cutime * cutime_fix_ratio)):0ULL)); - } - send_END(); - - send_BEGIN(type, "cpu_system", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - send_SET(w->name, (kernel_uint_t)(w->stime * stime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cstime * cstime_fix_ratio)):0ULL)); - } - send_END(); - - if(show_guest_time) { - send_BEGIN(type, "cpu_guest", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - send_SET(w->name, (kernel_uint_t)(w->gtime * gtime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cgtime * cgtime_fix_ratio)):0ULL)); - } - send_END(); - } - - send_BEGIN(type, "threads", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - send_SET(w->name, w->num_threads); - } - send_END(); - - send_BEGIN(type, "processes", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - send_SET(w->name, w->processes); - } - send_END(); - - send_BEGIN(type, "mem", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - send_SET(w->name, (w->status_vmrss > w->status_vmshared)?(w->status_vmrss - w->status_vmshared):0ULL); - } - send_END(); - - send_BEGIN(type, "vmem", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - send_SET(w->name, w->status_vmsize); - } - send_END(); - -#ifndef __FreeBSD__ - send_BEGIN(type, "swap", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - send_SET(w->name, w->status_vmswap); - } - send_END(); -#endif - - send_BEGIN(type, "minor_faults", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - send_SET(w->name, (kernel_uint_t)(w->minflt * minflt_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cminflt * cminflt_fix_ratio)):0ULL)); - } - send_END(); - - send_BEGIN(type, "major_faults", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - send_SET(w->name, (kernel_uint_t)(w->majflt * majflt_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cmajflt * cmajflt_fix_ratio)):0ULL)); - } - send_END(); - -#ifndef __FreeBSD__ - send_BEGIN(type, "lreads", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - send_SET(w->name, w->io_logical_bytes_read); - } - send_END(); - - send_BEGIN(type, "lwrites", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - send_SET(w->name, w->io_logical_bytes_written); - } - send_END(); -#endif - - send_BEGIN(type, "preads", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - send_SET(w->name, w->io_storage_bytes_read); - } - send_END(); - - send_BEGIN(type, "pwrites", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - send_SET(w->name, w->io_storage_bytes_written); - } - send_END(); - - if(enable_file_charts) { - send_BEGIN(type, "files", dt); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed)) - send_SET(w->name, w->openfiles); - } - send_END(); - - send_BEGIN(type, "sockets", dt); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed)) - send_SET(w->name, w->opensockets); - } - send_END(); - - send_BEGIN(type, "pipes", dt); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed)) - send_SET(w->name, w->openpipes); - } - send_END(); - } -} - - -// ---------------------------------------------------------------------------- -// generate the charts - -static void send_charts_updates_to_netdata(struct target *root, const char *type, const char *title) -{ - struct target *w; - int newly_added = 0; - - for(w = root ; w ; w = w->next) { - if (w->target) continue; - - if (!w->exposed && w->processes) { - newly_added++; - w->exposed = 1; - if (debug || w->debug) fprintf(stderr, "apps.plugin: %s just added - regenerating charts.\n", w->name); - } - } - - // nothing more to show - if(!newly_added && show_guest_time == show_guest_time_old) return; - - // we have something new to show - // update the charts - fprintf(stdout, "CHART %s.cpu '' '%s CPU Time (%d%% = %d core%s)' 'cpu time %%' cpu %s.cpu stacked 20001 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu %s\n", w->name, hz * RATES_DETAIL / 100, w->hidden ? "hidden" : ""); - } - - fprintf(stdout, "CHART %s.mem '' '%s Real Memory (w/o shared)' 'MB' mem %s.mem stacked 20003 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L); - } - - fprintf(stdout, "CHART %s.vmem '' '%s Virtual Memory Size' 'MB' mem %s.vmem stacked 20005 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L); - } - - fprintf(stdout, "CHART %s.threads '' '%s Threads' 'threads' processes %s.threads stacked 20006 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } - - fprintf(stdout, "CHART %s.processes '' '%s Processes' 'processes' processes %s.processes stacked 20007 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } - - fprintf(stdout, "CHART %s.cpu_user '' '%s CPU User Time (%d%% = %d core%s)' 'cpu time %%' cpu %s.cpu_user stacked 20020 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, hz * RATES_DETAIL / 100LLU); - } - - fprintf(stdout, "CHART %s.cpu_system '' '%s CPU System Time (%d%% = %d core%s)' 'cpu time %%' cpu %s.cpu_system stacked 20021 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, hz * RATES_DETAIL / 100LLU); - } - - if(show_guest_time) { - fprintf(stdout, "CHART %s.cpu_guest '' '%s CPU Guest Time (%d%% = %d core%s)' 'cpu time %%' cpu %s.cpu_system stacked 20022 %d\n", type, title, (processors * 100), processors, (processors > 1) ? "s" : "", type, update_every); - for (w = root; w; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, hz * RATES_DETAIL / 100LLU); - } - } - -#ifndef __FreeBSD__ - fprintf(stdout, "CHART %s.swap '' '%s Swap Memory' 'MB' swap %s.swap stacked 20011 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L); - } -#endif - - fprintf(stdout, "CHART %s.major_faults '' '%s Major Page Faults (swap read)' 'page faults/s' swap %s.major_faults stacked 20012 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL); - } - - fprintf(stdout, "CHART %s.minor_faults '' '%s Minor Page Faults' 'page faults/s' mem %s.minor_faults stacked 20011 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL); - } - -#ifdef __FreeBSD__ - fprintf(stdout, "CHART %s.preads '' '%s Disk Reads' 'blocks/s' disk %s.preads stacked 20002 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL); - } - - fprintf(stdout, "CHART %s.pwrites '' '%s Disk Writes' 'blocks/s' disk %s.pwrites stacked 20002 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL); - } -#else - fprintf(stdout, "CHART %s.preads '' '%s Disk Reads' 'kilobytes/s' disk %s.preads stacked 20002 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL); - } - - fprintf(stdout, "CHART %s.pwrites '' '%s Disk Writes' 'kilobytes/s' disk %s.pwrites stacked 20002 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL); - } - - fprintf(stdout, "CHART %s.lreads '' '%s Disk Logical Reads' 'kilobytes/s' disk %s.lreads stacked 20042 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL); - } - - fprintf(stdout, "CHART %s.lwrites '' '%s I/O Logical Writes' 'kilobytes/s' disk %s.lwrites stacked 20042 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL); - } -#endif - - if(enable_file_charts) { - fprintf(stdout, "CHART %s.files '' '%s Open Files' 'open files' disk %s.files stacked 20050 %d\n", type, - title, type, update_every); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } - - fprintf(stdout, "CHART %s.sockets '' '%s Open Sockets' 'open sockets' net %s.sockets stacked 20051 %d\n", - type, title, type, update_every); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } - - fprintf(stdout, "CHART %s.pipes '' '%s Pipes' 'open pipes' processes %s.pipes stacked 20053 %d\n", type, - title, type, update_every); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } - } -} - - -// ---------------------------------------------------------------------------- -// parse command line arguments - -int check_proc_1_io() { - int ret = 0; - - procfile *ff = procfile_open("/proc/1/io", NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO); - if(!ff) goto cleanup; - - ff = procfile_readall(ff); - if(!ff) goto cleanup; - - ret = 1; - -cleanup: - procfile_close(ff); - return ret; -} - -static void parse_args(int argc, char **argv) -{ - int i, freq = 0; - char *name = NULL; - - for(i = 1; i < argc; i++) { - if(!freq) { - int n = (int)str2l(argv[i]); - if(n > 0) { - freq = n; - continue; - } - } - - if(strcmp("version", argv[i]) == 0 || strcmp("-version", argv[i]) == 0 || strcmp("--version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) { - printf("apps.plugin %s\n", VERSION); - exit(0); - } - - if(strcmp("test-permissions", argv[i]) == 0 || strcmp("-t", argv[i]) == 0) { - if(!check_proc_1_io()) { - perror("Tried to read /proc/1/io and it failed"); - exit(1); - } - printf("OK\n"); - exit(0); - } - - if(strcmp("debug", argv[i]) == 0) { - debug = 1; - // debug_flags = 0xffffffff; - continue; - } - - if(strcmp("no-childs", argv[i]) == 0 || strcmp("without-childs", argv[i]) == 0) { - include_exited_childs = 0; - continue; - } - - if(strcmp("with-childs", argv[i]) == 0) { - include_exited_childs = 1; - continue; - } - - if(strcmp("with-guest", argv[i]) == 0) { - enable_guest_charts = 1; - continue; - } - - if(strcmp("no-guest", argv[i]) == 0 || strcmp("without-guest", argv[i]) == 0) { - enable_guest_charts = 0; - continue; - } - - if(strcmp("with-files", argv[i]) == 0) { - enable_file_charts = 1; - continue; - } - - if(strcmp("no-files", argv[i]) == 0 || strcmp("without-files", argv[i]) == 0) { - enable_file_charts = 0; - continue; - } - - if(strcmp("no-users", argv[i]) == 0 || strcmp("without-users", argv[i]) == 0) { - enable_users_charts = 0; - continue; - } - - if(strcmp("no-groups", argv[i]) == 0 || strcmp("without-groups", argv[i]) == 0) { - enable_groups_charts = 0; - continue; - } - - if(strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) { - fprintf(stderr, - "\n" - " netdata apps.plugin %s\n" - " Copyright (C) 2016-2017 Costa Tsaousis <costa@tsaousis.gr>\n" - " Released under GNU General Public License v3 or later.\n" - " All rights reserved.\n" - "\n" - " This program is a data collector plugin for netdata.\n" - "\n" - " Available command line options:\n" - "\n" - " SECONDS set the data collection frequency\n" - "\n" - " debug enable debugging (lot of output)\n" - "\n" - " with-childs\n" - " without-childs enable / disable aggregating exited\n" - " children resources into parents\n" - " (default is enabled)\n" - "\n" - " with-guest\n" - " without-guest enable / disable reporting guest charts\n" - " (default is disabled)\n" - "\n" - " with-files\n" - " without-files enable / disable reporting files, sockets, pipes\n" - " (default is enabled)\n" - "\n" - " NAME read apps_NAME.conf instead of\n" - " apps_groups.conf\n" - " (default NAME=groups)\n" - "\n" - " version or -v or -V print program version and exit\n" - "\n" - , VERSION - ); - exit(1); - } - - if(!name) { - name = argv[i]; - continue; - } - - error("Cannot understand option %s", argv[i]); - exit(1); - } - - if(freq > 0) update_every = freq; - if(!name) name = "groups"; - - if(read_apps_groups_conf(name)) { - error("Cannot read process groups '%s/apps_%s.conf'. There are no internal defaults. Failing.", config_dir, name); - exit(1); - } -} - -static int am_i_running_as_root() { - uid_t uid = getuid(), euid = geteuid(); - - if(uid == 0 || euid == 0) { - if(debug) info("I am running with escalated privileges, uid = %u, euid = %u.", uid, euid); - return 1; - } - - if(debug) info("I am not running with escalated privileges, uid = %u, euid = %u.", uid, euid); - return 0; -} - -#ifdef HAVE_CAPABILITY -static int check_capabilities() { - cap_t caps = cap_get_proc(); - if(!caps) { - error("Cannot get current capabilities."); - return 0; - } - else if(debug) - info("Received my capabilities from the system."); - - int ret = 1; - - cap_flag_value_t cfv = CAP_CLEAR; - if(cap_get_flag(caps, CAP_DAC_READ_SEARCH, CAP_EFFECTIVE, &cfv) == -1) { - error("Cannot find if CAP_DAC_READ_SEARCH is effective."); - ret = 0; - } - else { - if(cfv != CAP_SET) { - error("apps.plugin should run with CAP_DAC_READ_SEARCH."); - ret = 0; - } - else if(debug) - info("apps.plugin runs with CAP_DAC_READ_SEARCH."); - } - - cfv = CAP_CLEAR; - if(cap_get_flag(caps, CAP_SYS_PTRACE, CAP_EFFECTIVE, &cfv) == -1) { - error("Cannot find if CAP_SYS_PTRACE is effective."); - ret = 0; - } - else { - if(cfv != CAP_SET) { - error("apps.plugin should run with CAP_SYS_PTRACE."); - ret = 0; - } - else if(debug) - info("apps.plugin runs with CAP_SYS_PTRACE."); - } - - cap_free(caps); - - return ret; -} -#else -static int check_capabilities() { - return 0; -} -#endif - -int main(int argc, char **argv) { - // debug_flags = D_PROCFILE; - - pagesize = (size_t)sysconf(_SC_PAGESIZE); - - // set the name for logging - program_name = "apps.plugin"; - - // disable syslog for apps.plugin - error_log_syslog = 0; - - // set errors flood protection to 100 logs per hour - error_log_errors_per_period = 100; - error_log_throttle_period = 3600; - - netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX"); - if(netdata_configured_host_prefix == NULL) { - // info("NETDATA_HOST_PREFIX is not passed from netdata"); - netdata_configured_host_prefix = ""; - } - // else info("Found NETDATA_HOST_PREFIX='%s'", netdata_configured_host_prefix); - - config_dir = getenv("NETDATA_CONFIG_DIR"); - if(config_dir == NULL) { - // info("NETDATA_CONFIG_DIR is not passed from netdata"); - config_dir = CONFIG_DIR; - } - // else info("Found NETDATA_CONFIG_DIR='%s'", config_dir); - -#ifdef NETDATA_INTERNAL_CHECKS - if(debug_flags != 0) { - struct rlimit rl = { RLIM_INFINITY, RLIM_INFINITY }; - if(setrlimit(RLIMIT_CORE, &rl) != 0) - info("Cannot request unlimited core dumps for debugging... Proceeding anyway..."); -#ifdef HAVE_SYS_PRCTL_H - prctl(PR_SET_DUMPABLE, 1, 0, 0, 0); -#endif - } -#endif /* NETDATA_INTERNAL_CHECKS */ - - procfile_adaptive_initial_allocation = 1; - - time_t started_t = now_monotonic_sec(); - get_system_HZ(); - get_system_pid_max(); - get_system_cpus(); - - parse_args(argc, argv); - - if(!check_capabilities() && !am_i_running_as_root() && !check_proc_1_io()) { - uid_t uid = getuid(), euid = geteuid(); -#ifdef HAVE_CAPABILITY - error("apps.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. " - "Without these, apps.plugin cannot report disk I/O utilization of other processes. " - "To enable capabilities run: sudo setcap cap_dac_read_search,cap_sys_ptrace+ep %s; " - "To enable setuid to root run: sudo chown root %s; sudo chmod 4755 %s; " - , uid, euid, argv[0], argv[0], argv[0] - ); -#else - error("apps.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. " - "Without these, apps.plugin cannot report disk I/O utilization of other processes. " - "Your system does not support capabilities. " - "To enable setuid to root run: sudo chown root %s; sudo chmod 4755 %s; " - , uid, euid, argv[0], argv[0] - ); -#endif - } - - info("started on pid %d", getpid()); - -#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) - all_pids_sortlist = callocz(sizeof(pid_t), (size_t)pid_max); -#endif - - all_pids = callocz(sizeof(struct pid_stat *), (size_t) pid_max); - - usec_t step = update_every * USEC_PER_SEC; - global_iterations_counter = 1; - heartbeat_t hb; - heartbeat_init(&hb); - for(;1; global_iterations_counter++) { - -#ifdef NETDATA_PROFILING -#warning "compiling for profiling" - static int profiling_count=0; - profiling_count++; - if(unlikely(profiling_count > 2000)) exit(0); - usec_t dt = update_every * USEC_PER_SEC; -#else - usec_t dt = heartbeat_next(&hb, step); -#endif - - if(!collect_data_for_all_processes()) { - error("Cannot collect /proc data for running processes. Disabling apps.plugin..."); - printf("DISABLE\n"); - exit(1); - } - - calculate_netdata_statistics(); - normalize_utilization(apps_groups_root_target); - - send_resource_usage_to_netdata(dt); - - // this is smart enough to show only newly added apps, when needed - send_charts_updates_to_netdata(apps_groups_root_target, "apps", "Apps"); - - if(likely(enable_users_charts)) - send_charts_updates_to_netdata(users_root_target, "users", "Users"); - - if(likely(enable_groups_charts)) - send_charts_updates_to_netdata(groups_root_target, "groups", "User Groups"); - - send_collected_data_to_netdata(apps_groups_root_target, "apps", dt); - - if(likely(enable_users_charts)) - send_collected_data_to_netdata(users_root_target, "users", dt); - - if(likely(enable_groups_charts)) - send_collected_data_to_netdata(groups_root_target, "groups", dt); - - fflush(stdout); - - show_guest_time_old = show_guest_time; - - if(unlikely(debug)) - fprintf(stderr, "apps.plugin: done Loop No %zu\n", global_iterations_counter); - - // restart check (14400 seconds) - if(now_monotonic_sec() - started_t > 14400) exit(0); - } -} diff --git a/src/avl.c b/src/avl.c deleted file mode 100644 index a2c6911e7..000000000 --- a/src/avl.c +++ /dev/null @@ -1,403 +0,0 @@ -#include "common.h" - -/* ------------------------------------------------------------------------- */ -/* - * avl_insert(), avl_remove() and avl_search() - * are adaptations (by Costa Tsaousis) of the AVL algorithm found in libavl - * v2.0.3, so that they do not use any memory allocations and their memory - * footprint is optimized (by eliminating non-necessary data members). - * - * libavl - library for manipulation of binary trees. - * Copyright (C) 1998, 1999, 2000, 2001, 2002, 2004 Free Software - * Foundation, Inc. - * GNU Lesser General Public License -*/ - - -/* Search |tree| for an item matching |item|, and return it if found. - Otherwise return |NULL|. */ -avl *avl_search(avl_tree *tree, avl *item) { - avl *p; - - // assert (tree != NULL && item != NULL); - - for (p = tree->root; p != NULL; ) { - int cmp = tree->compar(item, p); - - if (cmp < 0) - p = p->avl_link[0]; - else if (cmp > 0) - p = p->avl_link[1]; - else /* |cmp == 0| */ - return p; - } - - return NULL; -} - -/* Inserts |item| into |tree| and returns a pointer to |item|'s address. - If a duplicate item is found in the tree, - returns a pointer to the duplicate without inserting |item|. - */ -avl *avl_insert(avl_tree *tree, avl *item) { - avl *y, *z; /* Top node to update balance factor, and parent. */ - avl *p, *q; /* Iterator, and parent. */ - avl *n; /* Newly inserted node. */ - avl *w; /* New root of rebalanced subtree. */ - int dir; /* Direction to descend. */ - - unsigned char da[AVL_MAX_HEIGHT]; /* Cached comparison results. */ - int k = 0; /* Number of cached results. */ - - // assert(tree != NULL && item != NULL); - - z = (avl *) &tree->root; - y = tree->root; - dir = 0; - for (q = z, p = y; p != NULL; q = p, p = p->avl_link[dir]) { - int cmp = tree->compar(item, p); - if (cmp == 0) - return p; - - if (p->avl_balance != 0) - z = q, y = p, k = 0; - da[k++] = dir = (cmp > 0); - } - - n = q->avl_link[dir] = item; - - // tree->avl_count++; - n->avl_link[0] = n->avl_link[1] = NULL; - n->avl_balance = 0; - if (y == NULL) return n; - - for (p = y, k = 0; p != n; p = p->avl_link[da[k]], k++) - if (da[k] == 0) - p->avl_balance--; - else - p->avl_balance++; - - if (y->avl_balance == -2) { - avl *x = y->avl_link[0]; - if (x->avl_balance == -1) { - w = x; - y->avl_link[0] = x->avl_link[1]; - x->avl_link[1] = y; - x->avl_balance = y->avl_balance = 0; - } - else { - // assert (x->avl_balance == +1); - w = x->avl_link[1]; - x->avl_link[1] = w->avl_link[0]; - w->avl_link[0] = x; - y->avl_link[0] = w->avl_link[1]; - w->avl_link[1] = y; - if (w->avl_balance == -1) - x->avl_balance = 0, y->avl_balance = +1; - else if (w->avl_balance == 0) - x->avl_balance = y->avl_balance = 0; - else /* |w->avl_balance == +1| */ - x->avl_balance = -1, y->avl_balance = 0; - w->avl_balance = 0; - } - } - else if (y->avl_balance == +2) { - avl *x = y->avl_link[1]; - if (x->avl_balance == +1) { - w = x; - y->avl_link[1] = x->avl_link[0]; - x->avl_link[0] = y; - x->avl_balance = y->avl_balance = 0; - } - else { - // assert (x->avl_balance == -1); - w = x->avl_link[0]; - x->avl_link[0] = w->avl_link[1]; - w->avl_link[1] = x; - y->avl_link[1] = w->avl_link[0]; - w->avl_link[0] = y; - if (w->avl_balance == +1) - x->avl_balance = 0, y->avl_balance = -1; - else if (w->avl_balance == 0) - x->avl_balance = y->avl_balance = 0; - else /* |w->avl_balance == -1| */ - x->avl_balance = +1, y->avl_balance = 0; - w->avl_balance = 0; - } - } - else return n; - - z->avl_link[y != z->avl_link[0]] = w; - - // tree->avl_generation++; - return n; -} - -/* Deletes from |tree| and returns an item matching |item|. - Returns a null pointer if no matching item found. */ -avl *avl_remove(avl_tree *tree, avl *item) { - /* Stack of nodes. */ - avl *pa[AVL_MAX_HEIGHT]; /* Nodes. */ - unsigned char da[AVL_MAX_HEIGHT]; /* |avl_link[]| indexes. */ - int k; /* Stack pointer. */ - - avl *p; /* Traverses tree to find node to delete. */ - int cmp; /* Result of comparison between |item| and |p|. */ - - // assert (tree != NULL && item != NULL); - - k = 0; - p = (avl *) &tree->root; - for(cmp = -1; cmp != 0; cmp = tree->compar(item, p)) { - int dir = (cmp > 0); - - pa[k] = p; - da[k++] = dir; - - p = p->avl_link[dir]; - if(p == NULL) return NULL; - } - - item = p; - - if (p->avl_link[1] == NULL) - pa[k - 1]->avl_link[da[k - 1]] = p->avl_link[0]; - else { - avl *r = p->avl_link[1]; - if (r->avl_link[0] == NULL) { - r->avl_link[0] = p->avl_link[0]; - r->avl_balance = p->avl_balance; - pa[k - 1]->avl_link[da[k - 1]] = r; - da[k] = 1; - pa[k++] = r; - } - else { - avl *s; - int j = k++; - - for (;;) { - da[k] = 0; - pa[k++] = r; - s = r->avl_link[0]; - if (s->avl_link[0] == NULL) break; - - r = s; - } - - s->avl_link[0] = p->avl_link[0]; - r->avl_link[0] = s->avl_link[1]; - s->avl_link[1] = p->avl_link[1]; - s->avl_balance = p->avl_balance; - - pa[j - 1]->avl_link[da[j - 1]] = s; - da[j] = 1; - pa[j] = s; - } - } - - // assert (k > 0); - while (--k > 0) { - avl *y = pa[k]; - - if (da[k] == 0) { - y->avl_balance++; - if (y->avl_balance == +1) break; - else if (y->avl_balance == +2) { - avl *x = y->avl_link[1]; - if (x->avl_balance == -1) { - avl *w; - // assert (x->avl_balance == -1); - w = x->avl_link[0]; - x->avl_link[0] = w->avl_link[1]; - w->avl_link[1] = x; - y->avl_link[1] = w->avl_link[0]; - w->avl_link[0] = y; - if (w->avl_balance == +1) - x->avl_balance = 0, y->avl_balance = -1; - else if (w->avl_balance == 0) - x->avl_balance = y->avl_balance = 0; - else /* |w->avl_balance == -1| */ - x->avl_balance = +1, y->avl_balance = 0; - w->avl_balance = 0; - pa[k - 1]->avl_link[da[k - 1]] = w; - } - else { - y->avl_link[1] = x->avl_link[0]; - x->avl_link[0] = y; - pa[k - 1]->avl_link[da[k - 1]] = x; - if (x->avl_balance == 0) { - x->avl_balance = -1; - y->avl_balance = +1; - break; - } - else x->avl_balance = y->avl_balance = 0; - } - } - } - else - { - y->avl_balance--; - if (y->avl_balance == -1) break; - else if (y->avl_balance == -2) { - avl *x = y->avl_link[0]; - if (x->avl_balance == +1) { - avl *w; - // assert (x->avl_balance == +1); - w = x->avl_link[1]; - x->avl_link[1] = w->avl_link[0]; - w->avl_link[0] = x; - y->avl_link[0] = w->avl_link[1]; - w->avl_link[1] = y; - if (w->avl_balance == -1) - x->avl_balance = 0, y->avl_balance = +1; - else if (w->avl_balance == 0) - x->avl_balance = y->avl_balance = 0; - else /* |w->avl_balance == +1| */ - x->avl_balance = -1, y->avl_balance = 0; - w->avl_balance = 0; - pa[k - 1]->avl_link[da[k - 1]] = w; - } - else { - y->avl_link[0] = x->avl_link[1]; - x->avl_link[1] = y; - pa[k - 1]->avl_link[da[k - 1]] = x; - if (x->avl_balance == 0) { - x->avl_balance = +1; - y->avl_balance = -1; - break; - } - else x->avl_balance = y->avl_balance = 0; - } - } - } - } - - // tree->avl_count--; - // tree->avl_generation++; - return item; -} - -/* ------------------------------------------------------------------------- */ -// below are functions by (C) Costa Tsaousis - -// --------------------------- -// traversing - -int avl_walker(avl *node, int (*callback)(void *entry, void *data), void *data) { - int total = 0, ret = 0; - - if(node->avl_link[0]) { - ret = avl_walker(node->avl_link[0], callback, data); - if(ret < 0) return ret; - total += ret; - } - - ret = callback(node, data); - if(ret < 0) return ret; - total += ret; - - if(node->avl_link[1]) { - ret = avl_walker(node->avl_link[1], callback, data); - if (ret < 0) return ret; - total += ret; - } - - return total; -} - -int avl_traverse(avl_tree *t, int (*callback)(void *entry, void *data), void *data) { - if(t->root) - return avl_walker(t->root, callback, data); - else - return 0; -} - -// --------------------------- -// locks - -void avl_read_lock(avl_tree_lock *t) { -#ifndef AVL_WITHOUT_PTHREADS -#ifdef AVL_LOCK_WITH_MUTEX - netdata_mutex_lock(&t->mutex); -#else - netdata_rwlock_rdlock(&t->rwlock); -#endif -#endif /* AVL_WITHOUT_PTHREADS */ -} - -void avl_write_lock(avl_tree_lock *t) { -#ifndef AVL_WITHOUT_PTHREADS -#ifdef AVL_LOCK_WITH_MUTEX - netdata_mutex_lock(&t->mutex); -#else - netdata_rwlock_wrlock(&t->rwlock); -#endif -#endif /* AVL_WITHOUT_PTHREADS */ -} - -void avl_unlock(avl_tree_lock *t) { -#ifndef AVL_WITHOUT_PTHREADS -#ifdef AVL_LOCK_WITH_MUTEX - netdata_mutex_unlock(&t->mutex); -#else - netdata_rwlock_unlock(&t->rwlock); -#endif -#endif /* AVL_WITHOUT_PTHREADS */ -} - -// --------------------------- -// operations with locking - -void avl_init_lock(avl_tree_lock *t, int (*compar)(void *a, void *b)) { - avl_init(&t->avl_tree, compar); - -#ifndef AVL_WITHOUT_PTHREADS - int lock; - -#ifdef AVL_LOCK_WITH_MUTEX - lock = netdata_mutex_init(&t->mutex, NULL); -#else - lock = netdata_rwlock_init(&t->rwlock); -#endif - - if(lock != 0) - fatal("Failed to initialize AVL mutex/rwlock, error: %d", lock); - -#endif /* AVL_WITHOUT_PTHREADS */ -} - -avl *avl_search_lock(avl_tree_lock *t, avl *a) { - avl_read_lock(t); - avl *ret = avl_search(&t->avl_tree, a); - avl_unlock(t); - return ret; -} - -avl * avl_remove_lock(avl_tree_lock *t, avl *a) { - avl_write_lock(t); - avl *ret = avl_remove(&t->avl_tree, a); - avl_unlock(t); - return ret; -} - -avl *avl_insert_lock(avl_tree_lock *t, avl *a) { - avl_write_lock(t); - avl * ret = avl_insert(&t->avl_tree, a); - avl_unlock(t); - return ret; -} - -int avl_traverse_lock(avl_tree_lock *t, int (*callback)(void *entry, void *data), void *data) { - int ret; - avl_read_lock(t); - ret = avl_traverse(&t->avl_tree, callback, data); - avl_unlock(t); - return ret; -} - -void avl_init(avl_tree *t, int (*compar)(void *a, void *b)) { - t->root = NULL; - t->compar = compar; -} - -// ------------------ \ No newline at end of file diff --git a/src/avl.h b/src/avl.h deleted file mode 100644 index 19648cd1d..000000000 --- a/src/avl.h +++ /dev/null @@ -1,86 +0,0 @@ - -#ifndef _AVL_H -#define _AVL_H 1 - -/* Maximum AVL tree height. */ -#ifndef AVL_MAX_HEIGHT -#define AVL_MAX_HEIGHT 92 -#endif - -#ifndef AVL_WITHOUT_PTHREADS -#include <pthread.h> - -// #define AVL_LOCK_WITH_MUTEX 1 - -#ifdef AVL_LOCK_WITH_MUTEX -#define AVL_LOCK_INITIALIZER NETDATA_MUTEX_INITIALIZER -#else /* AVL_LOCK_WITH_MUTEX */ -#define AVL_LOCK_INITIALIZER NETDATA_RWLOCK_INITIALIZER -#endif /* AVL_LOCK_WITH_MUTEX */ - -#else /* AVL_WITHOUT_PTHREADS */ -#define AVL_LOCK_INITIALIZER -#endif /* AVL_WITHOUT_PTHREADS */ - -/* Data structures */ - -/* One element of the AVL tree */ -typedef struct avl { - struct avl *avl_link[2]; /* Subtrees. */ - signed char avl_balance; /* Balance factor. */ -} avl; - -/* An AVL tree */ -typedef struct avl_tree { - avl *root; - int (*compar)(void *a, void *b); -} avl_tree; - -typedef struct avl_tree_lock { - avl_tree avl_tree; - -#ifndef AVL_WITHOUT_PTHREADS -#ifdef AVL_LOCK_WITH_MUTEX - netdata_mutex_t mutex; -#else /* AVL_LOCK_WITH_MUTEX */ - netdata_rwlock_t rwlock; -#endif /* AVL_LOCK_WITH_MUTEX */ -#endif /* AVL_WITHOUT_PTHREADS */ -} avl_tree_lock; - -/* Public methods */ - -/* Insert element a into the AVL tree t - * returns the added element a, or a pointer the - * element that is equal to a (as returned by t->compar()) - * a is linked directly to the tree, so it has to - * be properly allocated by the caller. - */ -avl *avl_insert_lock(avl_tree_lock *t, avl *a) NEVERNULL WARNUNUSED; -avl *avl_insert(avl_tree *t, avl *a) NEVERNULL WARNUNUSED; - -/* Remove an element a from the AVL tree t - * returns a pointer to the removed element - * or NULL if an element equal to a is not found - * (equal as returned by t->compar()) - */ -avl *avl_remove_lock(avl_tree_lock *t, avl *a) WARNUNUSED; -avl *avl_remove(avl_tree *t, avl *a) WARNUNUSED; - -/* Find the element into the tree that equal to a - * (equal as returned by t->compar()) - * returns NULL is no element is equal to a - */ -avl *avl_search_lock(avl_tree_lock *t, avl *a); -avl *avl_search(avl_tree *t, avl *a); - -/* Initialize the avl_tree_lock - */ -void avl_init_lock(avl_tree_lock *t, int (*compar)(void *a, void *b)); -void avl_init(avl_tree *t, int (*compar)(void *a, void *b)); - - -int avl_traverse_lock(avl_tree_lock *t, int (*callback)(void *entry, void *data), void *data); -int avl_traverse(avl_tree *t, int (*callback)(void *entry, void *data), void *data); - -#endif /* avl.h */ diff --git a/src/backend_prometheus.c b/src/backend_prometheus.c deleted file mode 100644 index bfcda9297..000000000 --- a/src/backend_prometheus.c +++ /dev/null @@ -1,479 +0,0 @@ -#include "common.h" - -// ---------------------------------------------------------------------------- -// PROMETHEUS -// /api/v1/allmetrics?format=prometheus and /api/v1/allmetrics?format=prometheus_all_hosts - -static struct prometheus_server { - const char *server; - uint32_t hash; - RRDHOST *host; - time_t last_access; - struct prometheus_server *next; -} *prometheus_server_root = NULL; - -static inline time_t prometheus_server_last_access(const char *server, RRDHOST *host, time_t now) { - static netdata_mutex_t prometheus_server_root_mutex = NETDATA_MUTEX_INITIALIZER; - - uint32_t hash = simple_hash(server); - - netdata_mutex_lock(&prometheus_server_root_mutex); - - struct prometheus_server *ps; - for(ps = prometheus_server_root; ps ;ps = ps->next) { - if (host == ps->host && hash == ps->hash && !strcmp(server, ps->server)) { - time_t last = ps->last_access; - ps->last_access = now; - netdata_mutex_unlock(&prometheus_server_root_mutex); - return last; - } - } - - ps = callocz(1, sizeof(struct prometheus_server)); - ps->server = strdupz(server); - ps->hash = hash; - ps->host = host; - ps->last_access = now; - ps->next = prometheus_server_root; - prometheus_server_root = ps; - - netdata_mutex_unlock(&prometheus_server_root_mutex); - return 0; -} - -static inline size_t prometheus_name_copy(char *d, const char *s, size_t usable) { - size_t n; - - for(n = 0; *s && n < usable ; d++, s++, n++) { - register char c = *s; - - if(!isalnum(c)) *d = '_'; - else *d = c; - } - *d = '\0'; - - return n; -} - -static inline size_t prometheus_label_copy(char *d, const char *s, size_t usable) { - size_t n; - - // make sure we can escape one character without overflowing the buffer - usable--; - - for(n = 0; *s && n < usable ; d++, s++, n++) { - register char c = *s; - - if(unlikely(c == '"' || c == '\\' || c == '\n')) { - *d++ = '\\'; - n++; - } - *d = c; - } - *d = '\0'; - - return n; -} - -static inline char *prometheus_units_copy(char *d, const char *s, size_t usable) { - const char *sorig = s; - char *ret = d; - size_t n; - - *d++ = '_'; - for(n = 1; *s && n < usable ; d++, s++, n++) { - register char c = *s; - - if(!isalnum(c)) *d = '_'; - else *d = c; - } - - if(n == 2 && sorig[0] == '%') { - n = 0; - d = ret; - s = "_percent"; - for( ; *s && n < usable ; n++) *d++ = *s++; - } - else if(n > 3 && sorig[n-3] == '/' && sorig[n-2] == 's') { - n = n - 2; - d -= 2; - s = "_persec"; - for( ; *s && n < usable ; n++) *d++ = *s++; - } - - *d = '\0'; - - return ret; -} - - -#define PROMETHEUS_ELEMENT_MAX 256 -#define PROMETHEUS_LABELS_MAX 1024 - -static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER *wb, const char *prefix, uint32_t options, time_t after, time_t before, int allhosts, int help, int types, int names, int timestamps) { - rrdhost_rdlock(host); - - char hostname[PROMETHEUS_ELEMENT_MAX + 1]; - prometheus_label_copy(hostname, host->hostname, PROMETHEUS_ELEMENT_MAX); - - char labels[PROMETHEUS_LABELS_MAX + 1] = ""; - if(allhosts) { - if(timestamps) - buffer_sprintf(wb, "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1 %llu\n", hostname, host->program_name, host->program_version, now_realtime_usec() / USEC_PER_MS); - else - buffer_sprintf(wb, "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1\n", hostname, host->program_name, host->program_version); - - if(host->tags && *(host->tags)) { - if(timestamps) { - buffer_sprintf(wb, "netdata_host_tags_info{instance=\"%s\",%s} 1 %llu\n", hostname, host->tags, now_realtime_usec() / USEC_PER_MS); - - // deprecated, exists only for compatibility with older queries - buffer_sprintf(wb, "netdata_host_tags{instance=\"%s\",%s} 1 %llu\n", hostname, host->tags, now_realtime_usec() / USEC_PER_MS); - } - else { - buffer_sprintf(wb, "netdata_host_tags_info{instance=\"%s\",%s} 1\n", hostname, host->tags); - - // deprecated, exists only for compatibility with older queries - buffer_sprintf(wb, "netdata_host_tags{instance=\"%s\",%s} 1\n", hostname, host->tags); - } - - } - - snprintfz(labels, PROMETHEUS_LABELS_MAX, ",instance=\"%s\"", hostname); - } - else { - if(timestamps) - buffer_sprintf(wb, "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1 %llu\n", hostname, host->program_name, host->program_version, now_realtime_usec() / USEC_PER_MS); - else - buffer_sprintf(wb, "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1\n", hostname, host->program_name, host->program_version); - - if(host->tags && *(host->tags)) { - if(timestamps) { - buffer_sprintf(wb, "netdata_host_tags_info{%s} 1 %llu\n", host->tags, now_realtime_usec() / USEC_PER_MS); - - // deprecated, exists only for compatibility with older queries - buffer_sprintf(wb, "netdata_host_tags{%s} 1 %llu\n", host->tags, now_realtime_usec() / USEC_PER_MS); - } - else { - buffer_sprintf(wb, "netdata_host_tags_info{%s} 1\n", host->tags); - - // deprecated, exists only for compatibility with older queries - buffer_sprintf(wb, "netdata_host_tags{%s} 1\n", host->tags); - } - } - } - - // for each chart - RRDSET *st; - rrdset_foreach_read(st, host) { - char chart[PROMETHEUS_ELEMENT_MAX + 1]; - char context[PROMETHEUS_ELEMENT_MAX + 1]; - char family[PROMETHEUS_ELEMENT_MAX + 1]; - char units[PROMETHEUS_ELEMENT_MAX + 1] = ""; - - prometheus_label_copy(chart, (names && st->name)?st->name:st->id, PROMETHEUS_ELEMENT_MAX); - prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX); - prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX); - - if(likely(backends_can_send_rrdset(options, st))) { - rrdset_rdlock(st); - - int as_collected = ((options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_AS_COLLECTED); - int homogeneus = 1; - if(as_collected) { - if(rrdset_flag_check(st, RRDSET_FLAG_HOMEGENEOUS_CHECK)) - rrdset_update_heterogeneous_flag(st); - - if(rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS)) - homogeneus = 0; - } - else { - if((options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_AVERAGE) - prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX); - } - - if(unlikely(help)) - buffer_sprintf(wb, "\n# COMMENT %s chart \"%s\", context \"%s\", family \"%s\", units \"%s\"\n" - , (homogeneus)?"homogeneus":"heterogeneous" - , (names && st->name) ? st->name : st->id - , st->context - , st->family - , st->units - ); - - // for each dimension - RRDDIM *rd; - rrddim_foreach_read(rd, st) { - if(rd->collections_counter) { - char dimension[PROMETHEUS_ELEMENT_MAX + 1]; - char *suffix = ""; - - if (as_collected) { - // we need as-collected / raw data - - const char *t = "gauge", *h = "gives"; - if(rd->algorithm == RRD_ALGORITHM_INCREMENTAL || - rd->algorithm == RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL) { - t = "counter"; - h = "delta gives"; - suffix = "_total"; - } - - if(homogeneus) { - // all the dimensions of the chart, has the same algorithm, multiplier and divisor - // we add all dimensions as labels - - prometheus_label_copy(dimension, (names && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX); - - if(unlikely(help)) - buffer_sprintf(wb - , "# COMMENT %s_%s%s: chart \"%s\", context \"%s\", family \"%s\", dimension \"%s\", value * " COLLECTED_NUMBER_FORMAT " / " COLLECTED_NUMBER_FORMAT " %s %s (%s)\n" - , prefix - , context - , suffix - , (names && st->name) ? st->name : st->id - , st->context - , st->family - , (names && rd->name) ? rd->name : rd->id - , rd->multiplier - , rd->divisor - , h - , st->units - , t - ); - - if(unlikely(types)) - buffer_sprintf(wb, "# COMMENT TYPE %s_%s%s %s\n" - , prefix - , context - , suffix - , t - ); - - if(timestamps) - buffer_sprintf(wb - , "%s_%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " COLLECTED_NUMBER_FORMAT " %llu\n" - , prefix - , context - , suffix - , chart - , family - , dimension - , labels - , rd->last_collected_value - , timeval_msec(&rd->last_collected_time) - ); - else - buffer_sprintf(wb - , "%s_%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " COLLECTED_NUMBER_FORMAT "\n" - , prefix - , context - , suffix - , chart - , family - , dimension - , labels - , rd->last_collected_value - ); - } - else { - // the dimensions of the chart, do not have the same algorithm, multiplier or divisor - // we create a metric per dimension - - prometheus_name_copy(dimension, (names && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX); - - if(unlikely(help)) - buffer_sprintf(wb - , "# COMMENT %s_%s_%s%s: chart \"%s\", context \"%s\", family \"%s\", dimension \"%s\", value * " COLLECTED_NUMBER_FORMAT " / " COLLECTED_NUMBER_FORMAT " %s %s (%s)\n" - , prefix - , context - , dimension - , suffix - , (names && st->name) ? st->name : st->id - , st->context - , st->family - , (names && rd->name) ? rd->name : rd->id - , rd->multiplier - , rd->divisor - , h - , st->units - , t - ); - - if(unlikely(types)) - buffer_sprintf(wb, "# COMMENT TYPE %s_%s_%s%s %s\n" - , prefix - , context - , dimension - , suffix - , t - ); - - if(timestamps) - buffer_sprintf(wb - , "%s_%s_%s%s{chart=\"%s\",family=\"%s\"%s} " COLLECTED_NUMBER_FORMAT " %llu\n" - , prefix - , context - , dimension - , suffix - , chart - , family - , labels - , rd->last_collected_value - , timeval_msec(&rd->last_collected_time) - ); - else - buffer_sprintf(wb - , "%s_%s_%s%s{chart=\"%s\",family=\"%s\"%s} " COLLECTED_NUMBER_FORMAT "\n" - , prefix - , context - , dimension - , suffix - , chart - , family - , labels - , rd->last_collected_value - ); - } - } - else { - // we need average or sum of the data - - time_t first_t = after, last_t = before; - calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, options, &first_t, &last_t); - - if(!isnan(value) && !isinf(value)) { - - if((options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_AVERAGE) - suffix = "_average"; - else if((options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_SUM) - suffix = "_sum"; - - prometheus_label_copy(dimension, (names && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX); - - if (unlikely(help)) - buffer_sprintf(wb, "# COMMENT %s_%s%s%s: dimension \"%s\", value is %s, gauge, dt %llu to %llu inclusive\n" - , prefix - , context - , units - , suffix - , (names && rd->name) ? rd->name : rd->id - , st->units - , (unsigned long long)first_t - , (unsigned long long)last_t - ); - - if (unlikely(types)) - buffer_sprintf(wb, "# COMMENT TYPE %s_%s%s%s gauge\n" - , prefix - , context - , units - , suffix - ); - - if(timestamps) - buffer_sprintf(wb, "%s_%s%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " CALCULATED_NUMBER_FORMAT " %llu\n" - , prefix - , context - , units - , suffix - , chart - , family - , dimension - , labels - , value - , last_t * MSEC_PER_SEC - ); - else - buffer_sprintf(wb, "%s_%s%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " CALCULATED_NUMBER_FORMAT "\n" - , prefix - , context - , units - , suffix - , chart - , family - , dimension - , labels - , value - ); - } - } - } - } - - rrdset_unlock(st); - } - } - - rrdhost_unlock(host); -} - -static inline time_t prometheus_preparation(RRDHOST *host, BUFFER *wb, uint32_t options, const char *server, time_t now, int help) { - if(!server || !*server) server = "default"; - - time_t after = prometheus_server_last_access(server, host, now); - - int first_seen = 0; - if(!after) { - after = now - backend_update_every; - first_seen = 1; - } - - if(after > now) { - // oops! this should never happen - after = now - backend_update_every; - } - - if(help) { - int show_range = 1; - char *mode; - if((options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_AS_COLLECTED) { - mode = "as collected"; - show_range = 0; - } - else if((options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_AVERAGE) - mode = "average"; - else if((options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_SUM) - mode = "sum"; - else - mode = "unknown"; - - buffer_sprintf(wb, "# COMMENT netdata \"%s\" to %sprometheus \"%s\", source \"%s\", last seen %lu %s" - , host->hostname - , (first_seen)?"FIRST SEEN ":"" - , server - , mode - , (unsigned long)((first_seen)?0:(now - after)) - , (first_seen)?"never":"seconds ago" - ); - - if(show_range) - buffer_sprintf(wb, ", time range %lu to %lu", (unsigned long)after, (unsigned long)now); - - buffer_strcat(wb, "\n\n"); - } - - return after; -} - -void rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, uint32_t options, int help, int types, int names, int timestamps) { - time_t before = now_realtime_sec(); - - // we start at the point we had stopped before - time_t after = prometheus_preparation(host, wb, options, server, before, help); - - rrd_stats_api_v1_charts_allmetrics_prometheus(host, wb, prefix, options, after, before, 0, help, types, names, timestamps); -} - -void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, uint32_t options, int help, int types, int names, int timestamps) { - time_t before = now_realtime_sec(); - - // we start at the point we had stopped before - time_t after = prometheus_preparation(host, wb, options, server, before, help); - - rrd_rdlock(); - rrdhost_foreach_read(host) { - rrd_stats_api_v1_charts_allmetrics_prometheus(host, wb, prefix, options, after, before, 1, help, types, names, timestamps); - } - rrd_unlock(); -} diff --git a/src/backend_prometheus.h b/src/backend_prometheus.h deleted file mode 100644 index b1a021baa..000000000 --- a/src/backend_prometheus.h +++ /dev/null @@ -1,11 +0,0 @@ -// -// Created by costa on 09/07/17. -// - -#ifndef NETDATA_BACKEND_PROMETHEUS_H -#define NETDATA_BACKEND_PROMETHEUS_H - -extern void rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, uint32_t options, int help, int types, int names, int timestamps); -extern void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, uint32_t options, int help, int types, int names, int timestamps); - -#endif //NETDATA_BACKEND_PROMETHEUS_H diff --git a/src/backends.c b/src/backends.c deleted file mode 100644 index 1360638f2..000000000 --- a/src/backends.c +++ /dev/null @@ -1,969 +0,0 @@ -#include "common.h" - -// ---------------------------------------------------------------------------- -// How backends work in netdata: -// -// 1. There is an independent thread that runs at the required interval -// (for example, once every 10 seconds) -// -// 2. Every time it wakes, it calls the backend formatting functions to build -// a buffer of data. This is a very fast, memory only operation. -// -// 3. If the buffer already includes data, the new data are appended. -// If the buffer becomes too big, because the data cannot be sent, a -// log is written and the buffer is discarded. -// -// 4. Then it tries to send all the data. It blocks until all the data are sent -// or the socket returns an error. -// If the time required for this is above the interval, it starts skipping -// intervals, but the calculated values include the entire database, without -// gaps (it remembers the timestamps and continues from where it stopped). -// -// 5. repeats the above forever. -// - -const char *backend_prefix = "netdata"; -int backend_send_names = 1; -int backend_update_every = 10; -uint32_t backend_options = BACKEND_SOURCE_DATA_AVERAGE; - -// ---------------------------------------------------------------------------- -// helper functions for backends - -static inline size_t backend_name_copy(char *d, const char *s, size_t usable) { - size_t n; - - for(n = 0; *s && n < usable ; d++, s++, n++) { - char c = *s; - - if(c != '.' && !isalnum(c)) *d = '_'; - else *d = c; - } - *d = '\0'; - - return n; -} - -// calculate the SUM or AVERAGE of a dimension, for any timeframe -// may return NAN if the database does not have any value in the give timeframe - -inline calculated_number backend_calculate_value_from_stored_data( - RRDSET *st // the chart - , RRDDIM *rd // the dimension - , time_t after // the start timestamp - , time_t before // the end timestamp - , uint32_t options // BACKEND_SOURCE_* bitmap - , time_t *first_timestamp // the first point of the database used in this response - , time_t *last_timestamp // the timestamp that should be reported to backend -) { - RRDHOST *host = st->rrdhost; - - // find the edges of the rrd database for this chart - time_t first_t = rrdset_first_entry_t(st); - time_t last_t = rrdset_last_entry_t(st); - time_t update_every = st->update_every; - - // step back a little, to make sure we have complete data collection - // for all metrics - after -= update_every * 2; - before -= update_every * 2; - - // align the time-frame - after = after - (after % update_every); - before = before - (before % update_every); - - // for before, loose another iteration - // the latest point will be reported the next time - before -= update_every; - - if(unlikely(after > before)) - // this can happen when update_every > before - after - after = before; - - if(unlikely(after < first_t)) - after = first_t; - - if(unlikely(before > last_t)) - before = last_t; - - if(unlikely(before < first_t || after > last_t)) { - // the chart has not been updated in the wanted timeframe - debug(D_BACKEND, "BACKEND: %s.%s.%s: aligned timeframe %lu to %lu is outside the chart's database range %lu to %lu", - host->hostname, st->id, rd->id, - (unsigned long)after, (unsigned long)before, - (unsigned long)first_t, (unsigned long)last_t - ); - return NAN; - } - - *first_timestamp = after; - *last_timestamp = before; - - size_t counter = 0; - calculated_number sum = 0; - - long start_at_slot = rrdset_time2slot(st, before), - stop_at_slot = rrdset_time2slot(st, after), - slot, stop_now = 0; - - for(slot = start_at_slot; !stop_now ; slot--) { - - if(unlikely(slot < 0)) slot = st->entries - 1; - if(unlikely(slot == stop_at_slot)) stop_now = 1; - - storage_number n = rd->values[slot]; - - if(unlikely(!does_storage_number_exist(n))) { - // not collected - continue; - } - - calculated_number value = unpack_storage_number(n); - sum += value; - - counter++; - } - - if(unlikely(!counter)) { - debug(D_BACKEND, "BACKEND: %s.%s.%s: no values stored in database for range %lu to %lu", - host->hostname, st->id, rd->id, - (unsigned long)after, (unsigned long)before - ); - return NAN; - } - - if(unlikely((options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_SUM)) - return sum; - - return sum / (calculated_number)counter; -} - - -// discard a response received by a backend -// after logging a simple of it to error.log - -static inline int discard_response(BUFFER *b, const char *backend) { - char sample[1024]; - const char *s = buffer_tostring(b); - char *d = sample, *e = &sample[sizeof(sample) - 1]; - - for(; *s && d < e ;s++) { - char c = *s; - if(unlikely(!isprint(c))) c = ' '; - *d++ = c; - } - *d = '\0'; - - info("BACKEND: received %zu bytes from %s backend. Ignoring them. Sample: '%s'", buffer_strlen(b), backend, sample); - buffer_flush(b); - return 0; -} - - -// ---------------------------------------------------------------------------- -// graphite backend - -static inline int format_dimension_collected_graphite_plaintext( - BUFFER *b // the buffer to write data to - , const char *prefix // the prefix to use - , RRDHOST *host // the host this chart comes from - , const char *hostname // the hostname (to override host->hostname) - , RRDSET *st // the chart - , RRDDIM *rd // the dimension - , time_t after // the start timestamp - , time_t before // the end timestamp - , uint32_t options // BACKEND_SOURCE_* bitmap -) { - (void)host; - (void)after; - (void)before; - (void)options; - - char chart_name[RRD_ID_LENGTH_MAX + 1]; - char dimension_name[RRD_ID_LENGTH_MAX + 1]; - backend_name_copy(chart_name, (backend_send_names && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX); - backend_name_copy(dimension_name, (backend_send_names && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX); - - buffer_sprintf( - b - , "%s.%s.%s.%s " COLLECTED_NUMBER_FORMAT " %u\n" - , prefix - , hostname - , chart_name - , dimension_name - , rd->last_collected_value - , (uint32_t)rd->last_collected_time.tv_sec - ); - - return 1; -} - -static inline int format_dimension_stored_graphite_plaintext( - BUFFER *b // the buffer to write data to - , const char *prefix // the prefix to use - , RRDHOST *host // the host this chart comes from - , const char *hostname // the hostname (to override host->hostname) - , RRDSET *st // the chart - , RRDDIM *rd // the dimension - , time_t after // the start timestamp - , time_t before // the end timestamp - , uint32_t options // BACKEND_SOURCE_* bitmap -) { - (void)host; - - char chart_name[RRD_ID_LENGTH_MAX + 1]; - char dimension_name[RRD_ID_LENGTH_MAX + 1]; - backend_name_copy(chart_name, (backend_send_names && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX); - backend_name_copy(dimension_name, (backend_send_names && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX); - - time_t first_t = after, last_t = before; - calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, options, &first_t, &last_t); - - if(!isnan(value)) { - - buffer_sprintf( - b - , "%s.%s.%s.%s " CALCULATED_NUMBER_FORMAT " %u\n" - , prefix - , hostname - , chart_name - , dimension_name - , value - , (uint32_t) last_t - ); - - return 1; - } - return 0; -} - -static inline int process_graphite_response(BUFFER *b) { - return discard_response(b, "graphite"); -} - - -// ---------------------------------------------------------------------------- -// opentsdb backend - -static inline int format_dimension_collected_opentsdb_telnet( - BUFFER *b // the buffer to write data to - , const char *prefix // the prefix to use - , RRDHOST *host // the host this chart comes from - , const char *hostname // the hostname (to override host->hostname) - , RRDSET *st // the chart - , RRDDIM *rd // the dimension - , time_t after // the start timestamp - , time_t before // the end timestamp - , uint32_t options // BACKEND_SOURCE_* bitmap -) { - (void)host; - (void)after; - (void)before; - (void)options; - - char chart_name[RRD_ID_LENGTH_MAX + 1]; - char dimension_name[RRD_ID_LENGTH_MAX + 1]; - backend_name_copy(chart_name, (backend_send_names && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX); - backend_name_copy(dimension_name, (backend_send_names && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX); - - buffer_sprintf( - b - , "put %s.%s.%s %u " COLLECTED_NUMBER_FORMAT " host=%s%s%s\n" - , prefix - , chart_name - , dimension_name - , (uint32_t)rd->last_collected_time.tv_sec - , rd->last_collected_value - , hostname - , (host->tags)?" ":"" - , (host->tags)?host->tags:"" - ); - - return 1; -} - -static inline int format_dimension_stored_opentsdb_telnet( - BUFFER *b // the buffer to write data to - , const char *prefix // the prefix to use - , RRDHOST *host // the host this chart comes from - , const char *hostname // the hostname (to override host->hostname) - , RRDSET *st // the chart - , RRDDIM *rd // the dimension - , time_t after // the start timestamp - , time_t before // the end timestamp - , uint32_t options // BACKEND_SOURCE_* bitmap -) { - (void)host; - - time_t first_t = after, last_t = before; - calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, options, &first_t, &last_t); - - char chart_name[RRD_ID_LENGTH_MAX + 1]; - char dimension_name[RRD_ID_LENGTH_MAX + 1]; - backend_name_copy(chart_name, (backend_send_names && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX); - backend_name_copy(dimension_name, (backend_send_names && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX); - - if(!isnan(value)) { - - buffer_sprintf( - b - , "put %s.%s.%s %u " CALCULATED_NUMBER_FORMAT " host=%s%s%s\n" - , prefix - , chart_name - , dimension_name - , (uint32_t) last_t - , value - , hostname - , (host->tags)?" ":"" - , (host->tags)?host->tags:"" - ); - - return 1; - } - return 0; -} - -static inline int process_opentsdb_response(BUFFER *b) { - return discard_response(b, "opentsdb"); -} - - -// ---------------------------------------------------------------------------- -// json backend - -static inline int format_dimension_collected_json_plaintext( - BUFFER *b // the buffer to write data to - , const char *prefix // the prefix to use - , RRDHOST *host // the host this chart comes from - , const char *hostname // the hostname (to override host->hostname) - , RRDSET *st // the chart - , RRDDIM *rd // the dimension - , time_t after // the start timestamp - , time_t before // the end timestamp - , uint32_t options // BACKEND_SOURCE_* bitmap -) { - (void)host; - (void)after; - (void)before; - (void)options; - - const char *tags_pre = "", *tags_post = "", *tags = host->tags; - if(!tags) tags = ""; - - if(*tags) { - if(*tags == '{' || *tags == '[' || *tags == '"') { - tags_pre = "\"host_tags\":"; - tags_post = ","; - } - else { - tags_pre = "\"host_tags\":\""; - tags_post = "\","; - } - } - - buffer_sprintf(b, "{" - "\"prefix\":\"%s\"," - "\"hostname\":\"%s\"," - "%s%s%s" - - "\"chart_id\":\"%s\"," - "\"chart_name\":\"%s\"," - "\"chart_family\":\"%s\"," - "\"chart_context\": \"%s\"," - "\"chart_type\":\"%s\"," - "\"units\": \"%s\"," - - "\"id\":\"%s\"," - "\"name\":\"%s\"," - "\"value\":" COLLECTED_NUMBER_FORMAT "," - - "\"timestamp\": %u}\n", - prefix, - hostname, - tags_pre, tags, tags_post, - - st->id, - st->name, - st->family, - st->context, - st->type, - st->units, - - rd->id, - rd->name, - rd->last_collected_value, - - (uint32_t)rd->last_collected_time.tv_sec - ); - - return 1; -} - -static inline int format_dimension_stored_json_plaintext( - BUFFER *b // the buffer to write data to - , const char *prefix // the prefix to use - , RRDHOST *host // the host this chart comes from - , const char *hostname // the hostname (to override host->hostname) - , RRDSET *st // the chart - , RRDDIM *rd // the dimension - , time_t after // the start timestamp - , time_t before // the end timestamp - , uint32_t options // BACKEND_SOURCE_* bitmap -) { - (void)host; - - time_t first_t = after, last_t = before; - calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, options, &first_t, &last_t); - - if(!isnan(value)) { - const char *tags_pre = "", *tags_post = "", *tags = host->tags; - if(!tags) tags = ""; - - if(*tags) { - if(*tags == '{' || *tags == '[' || *tags == '"') { - tags_pre = "\"host_tags\":"; - tags_post = ","; - } - else { - tags_pre = "\"host_tags\":\""; - tags_post = "\","; - } - } - - buffer_sprintf(b, "{" - "\"prefix\":\"%s\"," - "\"hostname\":\"%s\"," - "%s%s%s" - - "\"chart_id\":\"%s\"," - "\"chart_name\":\"%s\"," - "\"chart_family\":\"%s\"," - "\"chart_context\": \"%s\"," - "\"chart_type\":\"%s\"," - "\"units\": \"%s\"," - - "\"id\":\"%s\"," - "\"name\":\"%s\"," - "\"value\":" CALCULATED_NUMBER_FORMAT "," - - "\"timestamp\": %u}\n", - prefix, - hostname, - tags_pre, tags, tags_post, - - st->id, - st->name, - st->family, - st->context, - st->type, - st->units, - - rd->id, - rd->name, - value, - - (uint32_t) last_t - ); - - return 1; - } - return 0; -} - -static inline int process_json_response(BUFFER *b) { - return discard_response(b, "json"); -} - - -// ---------------------------------------------------------------------------- -// the backend thread - -static SIMPLE_PATTERN *charts_pattern = NULL; -static SIMPLE_PATTERN *hosts_pattern = NULL; - -inline int backends_can_send_rrdset(uint32_t options, RRDSET *st) { - RRDHOST *host = st->rrdhost; - - if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_BACKEND_IGNORE))) - return 0; - - if(unlikely(!rrdset_flag_check(st, RRDSET_FLAG_BACKEND_SEND))) { - // we have not checked this chart - if(simple_pattern_matches(charts_pattern, st->id) || simple_pattern_matches(charts_pattern, st->name)) - rrdset_flag_set(st, RRDSET_FLAG_BACKEND_SEND); - else { - rrdset_flag_set(st, RRDSET_FLAG_BACKEND_IGNORE); - debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s', because it is disabled for backends.", st->id, host->hostname); - return 0; - } - } - - if(unlikely(!rrdset_is_available_for_backends(st))) { - debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s', because it is not available for backends.", st->id, host->hostname); - return 0; - } - - if(unlikely(st->rrd_memory_mode == RRD_MEMORY_MODE_NONE && !((options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_AS_COLLECTED))) { - debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s' because its memory mode is '%s' and the backend requires database access.", st->id, host->hostname, rrd_memory_mode_name(host->rrd_memory_mode)); - return 0; - } - - return 1; -} - -inline uint32_t backend_parse_data_source(const char *source, uint32_t mode) { - if(!strcmp(source, "raw") || !strcmp(source, "as collected") || !strcmp(source, "as-collected") || !strcmp(source, "as_collected") || !strcmp(source, "ascollected")) { - mode |= BACKEND_SOURCE_DATA_AS_COLLECTED; - mode &= ~(BACKEND_SOURCE_BITS ^ BACKEND_SOURCE_DATA_AS_COLLECTED); - } - else if(!strcmp(source, "average")) { - mode |= BACKEND_SOURCE_DATA_AVERAGE; - mode &= ~(BACKEND_SOURCE_BITS ^ BACKEND_SOURCE_DATA_AVERAGE); - } - else if(!strcmp(source, "sum") || !strcmp(source, "volume")) { - mode |= BACKEND_SOURCE_DATA_SUM; - mode &= ~(BACKEND_SOURCE_BITS ^ BACKEND_SOURCE_DATA_SUM); - } - else { - error("BACKEND: invalid data source method '%s'.", source); - } - - return mode; -} - -static void backends_main_cleanup(void *ptr) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - - info("cleaning up..."); - - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; -} - -void *backends_main(void *ptr) { - netdata_thread_cleanup_push(backends_main_cleanup, ptr); - - int default_port = 0; - int sock = -1; - BUFFER *b = buffer_create(1), *response = buffer_create(1); - int (*backend_request_formatter)(BUFFER *, const char *, RRDHOST *, const char *, RRDSET *, RRDDIM *, time_t, time_t, uint32_t) = NULL; - int (*backend_response_checker)(BUFFER *) = NULL; - - // ------------------------------------------------------------------------ - // collect configuration options - - struct timeval timeout = { - .tv_sec = 0, - .tv_usec = 0 - }; - int enabled = config_get_boolean(CONFIG_SECTION_BACKEND, "enabled", 0); - const char *source = config_get(CONFIG_SECTION_BACKEND, "data source", "average"); - const char *type = config_get(CONFIG_SECTION_BACKEND, "type", "graphite"); - const char *destination = config_get(CONFIG_SECTION_BACKEND, "destination", "localhost"); - backend_prefix = config_get(CONFIG_SECTION_BACKEND, "prefix", "netdata"); - const char *hostname = config_get(CONFIG_SECTION_BACKEND, "hostname", localhost->hostname); - backend_update_every = (int)config_get_number(CONFIG_SECTION_BACKEND, "update every", backend_update_every); - int buffer_on_failures = (int)config_get_number(CONFIG_SECTION_BACKEND, "buffer on failures", 10); - long timeoutms = config_get_number(CONFIG_SECTION_BACKEND, "timeout ms", backend_update_every * 2 * 1000); - backend_send_names = config_get_boolean(CONFIG_SECTION_BACKEND, "send names instead of ids", backend_send_names); - - charts_pattern = simple_pattern_create(config_get(CONFIG_SECTION_BACKEND, "send charts matching", "*"), NULL, SIMPLE_PATTERN_EXACT); - hosts_pattern = simple_pattern_create(config_get(CONFIG_SECTION_BACKEND, "send hosts matching", "localhost *"), NULL, SIMPLE_PATTERN_EXACT); - - - // ------------------------------------------------------------------------ - // validate configuration options - // and prepare for sending data to our backend - - backend_options = backend_parse_data_source(source, backend_options); - - if(timeoutms < 1) { - error("BACKEND: invalid timeout %ld ms given. Assuming %d ms.", timeoutms, backend_update_every * 2 * 1000); - timeoutms = backend_update_every * 2 * 1000; - } - timeout.tv_sec = (timeoutms * 1000) / 1000000; - timeout.tv_usec = (timeoutms * 1000) % 1000000; - - if(!enabled || backend_update_every < 1) - goto cleanup; - - // ------------------------------------------------------------------------ - // select the backend type - - if(!strcmp(type, "graphite") || !strcmp(type, "graphite:plaintext")) { - - default_port = 2003; - backend_response_checker = process_graphite_response; - - if((backend_options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_AS_COLLECTED) - backend_request_formatter = format_dimension_collected_graphite_plaintext; - else - backend_request_formatter = format_dimension_stored_graphite_plaintext; - - } - else if(!strcmp(type, "opentsdb") || !strcmp(type, "opentsdb:telnet")) { - - default_port = 4242; - backend_response_checker = process_opentsdb_response; - - if((backend_options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_AS_COLLECTED) - backend_request_formatter = format_dimension_collected_opentsdb_telnet; - else - backend_request_formatter = format_dimension_stored_opentsdb_telnet; - - } - else if (!strcmp(type, "json") || !strcmp(type, "json:plaintext")) { - - default_port = 5448; - backend_response_checker = process_json_response; - - if ((backend_options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_AS_COLLECTED) - backend_request_formatter = format_dimension_collected_json_plaintext; - else - backend_request_formatter = format_dimension_stored_json_plaintext; - - } - else { - error("BACKEND: Unknown backend type '%s'", type); - goto cleanup; - } - - if(backend_request_formatter == NULL || backend_response_checker == NULL) { - error("BACKEND: backend is misconfigured - disabling it."); - goto cleanup; - } - - - // ------------------------------------------------------------------------ - // prepare the charts for monitoring the backend operation - - struct rusage thread; - - collected_number - chart_buffered_metrics = 0, - chart_lost_metrics = 0, - chart_sent_metrics = 0, - chart_buffered_bytes = 0, - chart_received_bytes = 0, - chart_sent_bytes = 0, - chart_receptions = 0, - chart_transmission_successes = 0, - chart_transmission_failures = 0, - chart_data_lost_events = 0, - chart_lost_bytes = 0, - chart_backend_reconnects = 0, - chart_backend_latency = 0; - - RRDSET *chart_metrics = rrdset_create_localhost("netdata", "backend_metrics", NULL, "backend", NULL, "Netdata Buffered Metrics", "metrics", "backends", NULL, 130600, backend_update_every, RRDSET_TYPE_LINE); - rrddim_add(chart_metrics, "buffered", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(chart_metrics, "lost", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(chart_metrics, "sent", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - - RRDSET *chart_bytes = rrdset_create_localhost("netdata", "backend_bytes", NULL, "backend", NULL, "Netdata Backend Data Size", "KB", "backends", NULL, 130610, backend_update_every, RRDSET_TYPE_AREA); - rrddim_add(chart_bytes, "buffered", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(chart_bytes, "lost", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(chart_bytes, "sent", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(chart_bytes, "received", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - - RRDSET *chart_ops = rrdset_create_localhost("netdata", "backend_ops", NULL, "backend", NULL, "Netdata Backend Operations", "operations", "backends", NULL, 130630, backend_update_every, RRDSET_TYPE_LINE); - rrddim_add(chart_ops, "write", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(chart_ops, "discard", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(chart_ops, "reconnect", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(chart_ops, "failure", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(chart_ops, "read", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - - /* - * this is misleading - we can only measure the time we need to send data - * this time is not related to the time required for the data to travel to - * the backend database and the time that server needed to process them - * - * issue #1432 and https://www.softlab.ntua.gr/facilities/documentation/unix/unix-socket-faq/unix-socket-faq-2.html - * - RRDSET *chart_latency = rrdset_create_localhost("netdata", "backend_latency", NULL, "backend", NULL, "Netdata Backend Latency", "ms", "backends", NULL, 130620, backend_update_every, RRDSET_TYPE_AREA); - rrddim_add(chart_latency, "latency", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - */ - - RRDSET *chart_rusage = rrdset_create_localhost("netdata", "backend_thread_cpu", NULL, "backend", NULL, "NetData Backend Thread CPU usage", "milliseconds/s", "backends", NULL, 130630, backend_update_every, RRDSET_TYPE_STACKED); - rrddim_add(chart_rusage, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(chart_rusage, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - - - // ------------------------------------------------------------------------ - // prepare the backend main loop - - info("BACKEND: configured ('%s' on '%s' sending '%s' data, every %d seconds, as host '%s', with prefix '%s')", type, destination, source, backend_update_every, hostname, backend_prefix); - - usec_t step_ut = backend_update_every * USEC_PER_SEC; - time_t after = now_realtime_sec(); - int failures = 0; - heartbeat_t hb; - heartbeat_init(&hb); - - while(!netdata_exit) { - - // ------------------------------------------------------------------------ - // Wait for the next iteration point. - - heartbeat_next(&hb, step_ut); - time_t before = now_realtime_sec(); - debug(D_BACKEND, "BACKEND: preparing buffer for timeframe %lu to %lu", (unsigned long)after, (unsigned long)before); - - // ------------------------------------------------------------------------ - // add to the buffer the data we need to send to the backend - - netdata_thread_disable_cancelability(); - - size_t count_hosts = 0; - size_t count_charts_total = 0; - size_t count_dims_total = 0; - - rrd_rdlock(); - RRDHOST *host; - rrdhost_foreach_read(host) { - if(unlikely(!rrdhost_flag_check(host, RRDHOST_FLAG_BACKEND_SEND|RRDHOST_FLAG_BACKEND_DONT_SEND))) { - char *name = (host == localhost)?"localhost":host->hostname; - if (!hosts_pattern || simple_pattern_matches(hosts_pattern, name)) { - rrdhost_flag_set(host, RRDHOST_FLAG_BACKEND_SEND); - info("enabled backend for host '%s'", name); - } - else { - rrdhost_flag_set(host, RRDHOST_FLAG_BACKEND_DONT_SEND); - info("disabled backend for host '%s'", name); - } - } - - if(unlikely(!rrdhost_flag_check(host, RRDHOST_FLAG_BACKEND_SEND))) - continue; - - rrdhost_rdlock(host); - - count_hosts++; - size_t count_charts = 0; - size_t count_dims = 0; - size_t count_dims_skipped = 0; - - const char *__hostname = (host == localhost)?hostname:host->hostname; - - RRDSET *st; - rrdset_foreach_read(st, host) { - if(likely(backends_can_send_rrdset(backend_options, st))) { - rrdset_rdlock(st); - - count_charts++; - - RRDDIM *rd; - rrddim_foreach_read(rd, st) { - if (likely(rd->last_collected_time.tv_sec >= after)) { - chart_buffered_metrics += backend_request_formatter(b, backend_prefix, host, __hostname, st, rd, after, before, backend_options); - count_dims++; - } - else { - debug(D_BACKEND, "BACKEND: not sending dimension '%s' of chart '%s' from host '%s', its last data collection (%lu) is not within our timeframe (%lu to %lu)", rd->id, st->id, __hostname, (unsigned long)rd->last_collected_time.tv_sec, (unsigned long)after, (unsigned long)before); - count_dims_skipped++; - } - } - - rrdset_unlock(st); - } - } - - debug(D_BACKEND, "BACKEND: sending host '%s', metrics of %zu dimensions, of %zu charts. Skipped %zu dimensions.", __hostname, count_dims, count_charts, count_dims_skipped); - count_charts_total += count_charts; - count_dims_total += count_dims; - - rrdhost_unlock(host); - } - rrd_unlock(); - - netdata_thread_enable_cancelability(); - - debug(D_BACKEND, "BACKEND: buffer has %zu bytes, added metrics for %zu dimensions, of %zu charts, from %zu hosts", buffer_strlen(b), count_dims_total, count_charts_total, count_hosts); - - // ------------------------------------------------------------------------ - - chart_buffered_bytes = (collected_number)buffer_strlen(b); - - // reset the monitoring chart counters - chart_received_bytes = - chart_sent_bytes = - chart_sent_metrics = - chart_lost_metrics = - chart_transmission_successes = - chart_transmission_failures = - chart_data_lost_events = - chart_lost_bytes = - chart_backend_reconnects = - chart_backend_latency = 0; - - if(unlikely(netdata_exit)) break; - - //fprintf(stderr, "\nBACKEND BEGIN:\n%s\nBACKEND END\n", buffer_tostring(b)); // FIXME - //fprintf(stderr, "after = %lu, before = %lu\n", after, before); - - // prepare for the next iteration - // to add incrementally data to buffer - after = before; - - // ------------------------------------------------------------------------ - // if we are connected, receive a response, without blocking - - if(likely(sock != -1)) { - errno = 0; - - // loop through to collect all data - while(sock != -1 && errno != EWOULDBLOCK) { - buffer_need_bytes(response, 4096); - - ssize_t r = recv(sock, &response->buffer[response->len], response->size - response->len, MSG_DONTWAIT); - if(likely(r > 0)) { - // we received some data - response->len += r; - chart_received_bytes += r; - chart_receptions++; - } - else if(r == 0) { - error("BACKEND: '%s' closed the socket", destination); - close(sock); - sock = -1; - } - else { - // failed to receive data - if(errno != EAGAIN && errno != EWOULDBLOCK) { - error("BACKEND: cannot receive data from backend '%s'.", destination); - } - } - } - - // if we received data, process them - if(buffer_strlen(response)) - backend_response_checker(response); - } - - // ------------------------------------------------------------------------ - // if we are not connected, connect to a backend server - - if(unlikely(sock == -1)) { - usec_t start_ut = now_monotonic_usec(); - size_t reconnects = 0; - - sock = connect_to_one_of(destination, default_port, &timeout, &reconnects, NULL, 0); - - chart_backend_reconnects += reconnects; - chart_backend_latency += now_monotonic_usec() - start_ut; - } - - if(unlikely(netdata_exit)) break; - - // ------------------------------------------------------------------------ - // if we are connected, send our buffer to the backend server - - if(likely(sock != -1)) { - size_t len = buffer_strlen(b); - usec_t start_ut = now_monotonic_usec(); - int flags = 0; -#ifdef MSG_NOSIGNAL - flags += MSG_NOSIGNAL; -#endif - - ssize_t written = send(sock, buffer_tostring(b), len, flags); - chart_backend_latency += now_monotonic_usec() - start_ut; - if(written != -1 && (size_t)written == len) { - // we sent the data successfully - chart_transmission_successes++; - chart_sent_bytes += written; - chart_sent_metrics = chart_buffered_metrics; - - // reset the failures count - failures = 0; - - // empty the buffer - buffer_flush(b); - } - else { - // oops! we couldn't send (all or some of the) data - error("BACKEND: failed to write data to database backend '%s'. Willing to write %zu bytes, wrote %zd bytes. Will re-connect.", destination, len, written); - chart_transmission_failures++; - - if(written != -1) - chart_sent_bytes += written; - - // increment the counter we check for data loss - failures++; - - // close the socket - we will re-open it next time - close(sock); - sock = -1; - } - } - else { - error("BACKEND: failed to update database backend '%s'", destination); - chart_transmission_failures++; - - // increment the counter we check for data loss - failures++; - } - - if(failures > buffer_on_failures) { - // too bad! we are going to lose data - chart_lost_bytes += buffer_strlen(b); - error("BACKEND: reached %d backend failures. Flushing buffers to protect this host - this results in data loss on back-end server '%s'", failures, destination); - buffer_flush(b); - failures = 0; - chart_data_lost_events++; - chart_lost_metrics = chart_buffered_metrics; - } - - if(unlikely(netdata_exit)) break; - - // ------------------------------------------------------------------------ - // update the monitoring charts - - if(likely(chart_ops->counter_done)) rrdset_next(chart_ops); - rrddim_set(chart_ops, "read", chart_receptions); - rrddim_set(chart_ops, "write", chart_transmission_successes); - rrddim_set(chart_ops, "discard", chart_data_lost_events); - rrddim_set(chart_ops, "failure", chart_transmission_failures); - rrddim_set(chart_ops, "reconnect", chart_backend_reconnects); - rrdset_done(chart_ops); - - if(likely(chart_metrics->counter_done)) rrdset_next(chart_metrics); - rrddim_set(chart_metrics, "buffered", chart_buffered_metrics); - rrddim_set(chart_metrics, "lost", chart_lost_metrics); - rrddim_set(chart_metrics, "sent", chart_sent_metrics); - rrdset_done(chart_metrics); - - if(likely(chart_bytes->counter_done)) rrdset_next(chart_bytes); - rrddim_set(chart_bytes, "buffered", chart_buffered_bytes); - rrddim_set(chart_bytes, "lost", chart_lost_bytes); - rrddim_set(chart_bytes, "sent", chart_sent_bytes); - rrddim_set(chart_bytes, "received", chart_received_bytes); - rrdset_done(chart_bytes); - - /* - if(likely(chart_latency->counter_done)) rrdset_next(chart_latency); - rrddim_set(chart_latency, "latency", chart_backend_latency); - rrdset_done(chart_latency); - */ - - getrusage(RUSAGE_THREAD, &thread); - if(likely(chart_rusage->counter_done)) rrdset_next(chart_rusage); - rrddim_set(chart_rusage, "user", thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec); - rrddim_set(chart_rusage, "system", thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec); - rrdset_done(chart_rusage); - - if(likely(buffer_strlen(b) == 0)) - chart_buffered_metrics = 0; - - if(unlikely(netdata_exit)) break; - } - -cleanup: - if(sock != -1) - close(sock); - - buffer_free(b); - buffer_free(response); - - netdata_thread_cleanup_pop(1); - return NULL; -} diff --git a/src/backends.h b/src/backends.h deleted file mode 100644 index e882f3db1..000000000 --- a/src/backends.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef NETDATA_BACKENDS_H -#define NETDATA_BACKENDS_H 1 - -#define BACKEND_SOURCE_DATA_AS_COLLECTED 0x00000001 -#define BACKEND_SOURCE_DATA_AVERAGE 0x00000002 -#define BACKEND_SOURCE_DATA_SUM 0x00000004 - -#define BACKEND_SOURCE_BITS (BACKEND_SOURCE_DATA_AS_COLLECTED|BACKEND_SOURCE_DATA_AVERAGE|BACKEND_SOURCE_DATA_SUM) - -extern int backend_send_names; -extern int backend_update_every; -extern uint32_t backend_options; -extern const char *backend_prefix; - -extern void *backends_main(void *ptr); - -extern int backends_can_send_rrdset(uint32_t options, RRDSET *st); -extern uint32_t backend_parse_data_source(const char *source, uint32_t mode); - -extern calculated_number backend_calculate_value_from_stored_data( - RRDSET *st // the chart - , RRDDIM *rd // the dimension - , time_t after // the start timestamp - , time_t before // the end timestamp - , uint32_t options // BACKEND_SOURCE_* bitmap - , time_t *first_timestamp // the timestamp of the first point used in this response - , time_t *last_timestamp // the timestamp that should be reported to backend -); - -#endif /* NETDATA_BACKENDS_H */ diff --git a/src/cgroup-network.c b/src/cgroup-network.c deleted file mode 100644 index 0e2d5163a..000000000 --- a/src/cgroup-network.c +++ /dev/null @@ -1,651 +0,0 @@ -#include "common.h" -#include <libgen.h> - -#ifdef HAVE_SETNS -#ifndef _GNU_SOURCE -#define _GNU_SOURCE /* See feature_test_macros(7) */ -#endif -#include <sched.h> -#endif - -char *host_prefix = ""; - -char environment_variable2[FILENAME_MAX + 50] = ""; -char *environment[] = { - "PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin", - environment_variable2, - NULL -}; - - -// ---------------------------------------------------------------------------- -// callback required by fatal() - -void netdata_cleanup_and_exit(int ret) { - exit(ret); -} -void health_reload(void) {}; -void rrdhost_save_all(void) {}; - - -// ---------------------------------------------------------------------------- - -struct iface { - const char *device; - uint32_t hash; - - unsigned int ifindex; - unsigned int iflink; - - struct iface *next; -}; - -unsigned int read_iface_iflink(const char *prefix, const char *iface) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/sys/class/net/%s/iflink", prefix?prefix:"", iface); - - unsigned long long iflink = 0; - int ret = read_single_number_file(filename, &iflink); - if(ret) error("Cannot read '%s'.", filename); - - return (unsigned int)iflink; -} - -unsigned int read_iface_ifindex(const char *prefix, const char *iface) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/sys/class/net/%s/ifindex", prefix?prefix:"", iface); - - unsigned long long ifindex = 0; - int ret = read_single_number_file(filename, &ifindex); - if(ret) error("Cannot read '%s'.", filename); - - return (unsigned int)ifindex; -} - -struct iface *read_proc_net_dev(const char *prefix) { - procfile *ff = NULL; - char filename[FILENAME_MAX + 1]; - - snprintfz(filename, FILENAME_MAX, "%s%s", prefix?prefix:"", "/proc/net/dev"); - ff = procfile_open(filename, " \t,:|", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) { - error("Cannot open file '%s'", filename); - return NULL; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) { - error("Cannot read file '%s'", filename); - return NULL; - } - - size_t lines = procfile_lines(ff), l; - struct iface *root = NULL; - for(l = 2; l < lines ;l++) { - if (unlikely(procfile_linewords(ff, l) < 1)) continue; - - struct iface *t = callocz(1, sizeof(struct iface)); - t->device = strdupz(procfile_lineword(ff, l, 0)); - t->hash = simple_hash(t->device); - t->ifindex = read_iface_ifindex(prefix, t->device); - t->iflink = read_iface_iflink(prefix, t->device); - t->next = root; - root = t; - } - - procfile_close(ff); - - return root; -} - -void free_iface(struct iface *iface) { - freez((void *)iface->device); - freez(iface); -} - -void free_host_ifaces(struct iface *iface) { - while(iface) { - struct iface *t = iface->next; - free_iface(iface); - iface = t; - } -} - -int iface_is_eligible(struct iface *iface) { - if(iface->iflink != iface->ifindex) - return 1; - - return 0; -} - -int eligible_ifaces(struct iface *root) { - int eligible = 0; - - struct iface *t; - for(t = root; t ; t = t->next) - if(iface_is_eligible(t)) - eligible++; - - return eligible; -} - -static void continue_as_child(void) { - pid_t child = fork(); - int status; - pid_t ret; - - if (child < 0) - error("fork() failed"); - - /* Only the child returns */ - if (child == 0) - return; - - for (;;) { - ret = waitpid(child, &status, WUNTRACED); - if ((ret == child) && (WIFSTOPPED(status))) { - /* The child suspended so suspend us as well */ - kill(getpid(), SIGSTOP); - kill(child, SIGCONT); - } else { - break; - } - } - - /* Return the child's exit code if possible */ - if (WIFEXITED(status)) { - exit(WEXITSTATUS(status)); - } else if (WIFSIGNALED(status)) { - kill(getpid(), WTERMSIG(status)); - } - - exit(EXIT_FAILURE); -} - -int proc_pid_fd(const char *prefix, const char *ns, pid_t pid) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/proc/%d/%s", prefix?prefix:"", (int)pid, ns); - int fd = open(filename, O_RDONLY); - - if(fd == -1) - error("Cannot open file '%s'", filename); - - return fd; -} - -static struct ns { - int nstype; - int fd; - int status; - const char *name; - const char *path; -} all_ns[] = { - // { .nstype = CLONE_NEWUSER, .fd = -1, .status = -1, .name = "user", .path = "ns/user" }, - // { .nstype = CLONE_NEWCGROUP, .fd = -1, .status = -1, .name = "cgroup", .path = "ns/cgroup" }, - // { .nstype = CLONE_NEWIPC, .fd = -1, .status = -1, .name = "ipc", .path = "ns/ipc" }, - // { .nstype = CLONE_NEWUTS, .fd = -1, .status = -1, .name = "uts", .path = "ns/uts" }, - { .nstype = CLONE_NEWNET, .fd = -1, .status = -1, .name = "network", .path = "ns/net" }, - { .nstype = CLONE_NEWPID, .fd = -1, .status = -1, .name = "pid", .path = "ns/pid" }, - { .nstype = CLONE_NEWNS, .fd = -1, .status = -1, .name = "mount", .path = "ns/mnt" }, - - // terminator - { .nstype = 0, .fd = -1, .status = -1, .name = NULL, .path = NULL } -}; - -int switch_namespace(const char *prefix, pid_t pid) { -#ifdef HAVE_SETNS - - int i; - for(i = 0; all_ns[i].name ; i++) - all_ns[i].fd = proc_pid_fd(prefix, all_ns[i].path, pid); - - int root_fd = proc_pid_fd(prefix, "root", pid); - int cwd_fd = proc_pid_fd(prefix, "cwd", pid); - - setgroups(0, NULL); - - // 2 passes - found it at nsenter source code - // this is related CLONE_NEWUSER functionality - - // FIXME: this code cannot switch user namespace - // Fortunately, we don't need it. - - int pass, errors = 0; - for(pass = 0; pass < 2 ;pass++) { - for(i = 0; all_ns[i].name ; i++) { - if (all_ns[i].fd != -1 && all_ns[i].status == -1) { - if(setns(all_ns[i].fd, all_ns[i].nstype) == -1) { - if(pass == 1) { - all_ns[i].status = 0; - error("Cannot switch to %s namespace of pid %d", all_ns[i].name, (int) pid); - errors++; - } - } - else - all_ns[i].status = 1; - } - } - } - - setgroups(0, NULL); - - if(root_fd != -1) { - if(fchdir(root_fd) < 0) - error("Cannot fchdir() to pid %d root directory", (int)pid); - - if(chroot(".") < 0) - error("Cannot chroot() to pid %d root directory", (int)pid); - - close(root_fd); - } - - if(cwd_fd != -1) { - if(fchdir(cwd_fd) < 0) - error("Cannot fchdir() to pid %d current working directory", (int)pid); - - close(cwd_fd); - } - - int do_fork = 0; - for(i = 0; all_ns[i].name ; i++) - if(all_ns[i].fd != -1) { - - // CLONE_NEWPID requires a fork() to become effective - if(all_ns[i].nstype == CLONE_NEWPID && all_ns[i].status) - do_fork = 1; - - close(all_ns[i].fd); - } - - if(do_fork) - continue_as_child(); - - return 0; - -#else - - errno = ENOSYS; - error("setns() is missing on this system."); - return 1; - -#endif -} - -pid_t read_pid_from_cgroup_file(const char *filename) { - FILE *fp = fopen(filename, "r"); - if(!fp) { - error("Cannot read file '%s'.", filename); - return 0; - } - - char buffer[100 + 1]; - pid_t pid = 0; - char *s; - while((s = fgets(buffer, 100, fp))) { - buffer[100] = '\0'; - pid = atoi(s); - if(pid > 0) break; - } - - fclose(fp); - return pid; -} - -pid_t read_pid_from_cgroup_files(const char *path) { - char filename[FILENAME_MAX + 1]; - - snprintfz(filename, FILENAME_MAX, "%s/cgroup.procs", path); - pid_t pid = read_pid_from_cgroup_file(filename); - if(pid > 0) return pid; - - snprintfz(filename, FILENAME_MAX, "%s/tasks", path); - return read_pid_from_cgroup_file(filename); -} - -pid_t read_pid_from_cgroup(const char *path) { - pid_t pid = read_pid_from_cgroup_files(path); - if (pid > 0) return pid; - - DIR *dir = opendir(path); - if (!dir) { - error("cannot read directory '%s'", path); - return 0; - } - - struct dirent *de = NULL; - while ((de = readdir(dir))) { - if (de->d_type == DT_DIR - && ( - (de->d_name[0] == '.' && de->d_name[1] == '\0') - || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') - )) - continue; - - if (de->d_type == DT_DIR) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/%s", path, de->d_name); - pid = read_pid_from_cgroup(filename); - if(pid > 0) break; - } - } - closedir(dir); - return pid; -} - -// ---------------------------------------------------------------------------- -// send the result to netdata - -struct found_device { - const char *host_device; - const char *guest_device; - - uint32_t host_device_hash; - - struct found_device *next; -} *detected_devices = NULL; - -void add_device(const char *host, const char *guest) { - uint32_t hash = simple_hash(host); - - if(guest && (!*guest || strcmp(host, guest) == 0)) - guest = NULL; - - struct found_device *f; - for(f = detected_devices; f ; f = f->next) { - if(f->host_device_hash == hash && strcmp(host, f->host_device) == 0) { - - if(guest && !f->guest_device) - f->guest_device = strdupz(guest); - - return; - } - } - - f = mallocz(sizeof(struct found_device)); - f->host_device = strdupz(host); - f->host_device_hash = hash; - f->guest_device = (guest)?strdupz(guest):NULL; - f->next = detected_devices; - detected_devices = f; -} - -int send_devices(void) { - int found = 0; - - struct found_device *f; - for(f = detected_devices; f ; f = f->next) { - found++; - printf("%s %s\n", f->host_device, (f->guest_device)?f->guest_device:f->host_device); - } - - return found; -} - -// ---------------------------------------------------------------------------- -// this function should be called only **ONCE** -// also it has to be the **LAST** to be called -// since it switches namespaces, so after this call, everything is different! - -void detect_veth_interfaces(pid_t pid) { - struct iface *host, *cgroup, *h, *c; - const char *prefix = getenv("NETDATA_HOST_PREFIX"); - - host = read_proc_net_dev(prefix); - if(!host) { - errno = 0; - error("cannot read host interface list."); - return; - } - - if(!eligible_ifaces(host)) { - errno = 0; - error("there are no double-linked host interfaces available."); - goto cleanup; - } - - if(switch_namespace(prefix, pid)) { - errno = 0; - error("cannot switch to the namespace of pid %u", (unsigned int) pid); - goto cleanup; - } - - cgroup = read_proc_net_dev(NULL); - if(!cgroup) { - errno = 0; - error("cannot read cgroup interface list."); - goto cleanup; - } - - if(!eligible_ifaces(cgroup)) { - errno = 0; - error("there are not double-linked cgroup interfaces available."); - goto cleanup; - } - - for(h = host; h ; h = h->next) { - if(iface_is_eligible(h)) { - for (c = cgroup; c; c = c->next) { - if(iface_is_eligible(c) && h->ifindex == c->iflink && h->iflink == c->ifindex) { - add_device(h->device, c->device); - } - } - } - } - -cleanup: - free_host_ifaces(host); -} - -// ---------------------------------------------------------------------------- -// call the external helper - -#define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048 -void call_the_helper(pid_t pid, const char *cgroup) { - if(setresuid(0, 0, 0) == -1) - error("setresuid(0, 0, 0) failed."); - - char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1]; - if(cgroup) - snprintfz(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec " PLUGINS_DIR "/cgroup-network-helper.sh --cgroup '%s'", cgroup); - else - snprintfz(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec " PLUGINS_DIR "/cgroup-network-helper.sh --pid %d", pid); - - info("running: %s", buffer); - - pid_t cgroup_pid; - FILE *fp = mypopene(buffer, &cgroup_pid, environment); - if(fp) { - char *s; - while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, fp))) { - trim(s); - - if(*s && *s != '\n') { - char *t = s; - while(*t && *t != ' ') t++; - if(*t == ' ') { - *t = '\0'; - t++; - } - - if(!*s || !*t) continue; - add_device(s, t); - } - } - - mypclose(fp, cgroup_pid); - } - else - error("cannot execute cgroup-network helper script: %s", buffer); -} - -int is_valid_path_symbol(char c) { - switch(c) { - case '/': // path separators - case '\\': // needed for virsh domains \x2d1\x2dname - case ' ': // space - case '-': // hyphen - case '_': // underscore - case '.': // dot - case ',': // comma - return 1; - - default: - return 0; - } -} - -// we will pass this path a shell script running as root -// so, we need to make sure the path will be valid -// and will not include anything that could allow -// the caller use shell expansion for gaining escalated -// privileges. -int verify_path(const char *path) { - struct stat sb; - - char c; - const char *s = path; - while((c = *s++)) { - if(!( isalnum(c) || is_valid_path_symbol(c) )) { - error("invalid character in path '%s'", path); - return -1; - } - } - - if(strstr(path, "\\") && !strstr(path, "\\x")) { - error("invalid escape sequence in path '%s'", path); - return 1; - } - - if(strstr(path, "/../")) { - error("invalid parent path sequence detected in '%s'", path); - return 1; - } - - if(path[0] != '/') { - error("only absolute path names are supported - invalid path '%s'", path); - return -1; - } - - if (stat(path, &sb) == -1) { - error("cannot stat() path '%s'", path); - return -1; - } - - if((sb.st_mode & S_IFMT) != S_IFDIR) { - error("path '%s' is not a directory", path); - return -1; - } - - return 0; -} - -/* -char *fix_path_variable(void) { - const char *path = getenv("PATH"); - if(!path || !*path) return 0; - - char *p = strdupz(path); - char *safe_path = callocz(1, strlen(p) + strlen("PATH=") + 1); - strcpy(safe_path, "PATH="); - - int added = 0; - char *ptr = p; - while(ptr && *ptr) { - char *s = strsep(&ptr, ":"); - if(s && *s) { - if(verify_path(s) == -1) { - error("the PATH variable includes an invalid path '%s' - removed it.", s); - } - else { - info("the PATH variable includes a valid path '%s'.", s); - if(added) strcat(safe_path, ":"); - strcat(safe_path, s); - added++; - } - } - } - - info("unsafe PATH: '%s'.", path); - info(" safe PATH: '%s'.", safe_path); - - freez(p); - return safe_path; -} -*/ - -// ---------------------------------------------------------------------------- -// main - -void usage(void) { - fprintf(stderr, "%s [ -p PID | --pid PID | --cgroup /path/to/cgroup ]\n", program_name); - exit(1); -} - -int main(int argc, char **argv) { - pid_t pid = 0; - - program_name = argv[0]; - program_version = VERSION; - error_log_syslog = 0; - - - // ------------------------------------------------------------------------ - // make sure NETDATA_HOST_PREFIX is safe - - host_prefix = getenv("NETDATA_HOST_PREFIX"); - if(!host_prefix || !*host_prefix) - host_prefix = ""; - - if(host_prefix[0] != '\0' && verify_path(host_prefix) == -1) - fatal("invalid NETDATA_HOST_PREFIX '%s'", host_prefix); - - // ------------------------------------------------------------------------ - // build a safe environment for our script - - // the first environment variable is a fixed PATH= - snprintfz(environment_variable2, sizeof(environment_variable2) - 1, "NETDATA_HOST_PREFIX=%s", host_prefix); - - // ------------------------------------------------------------------------ - - if(argc == 2 && (!strcmp(argv[1], "version") || !strcmp(argv[1], "-version") || !strcmp(argv[1], "--version") || !strcmp(argv[1], "-v") || !strcmp(argv[1], "-V"))) { - fprintf(stderr, "cgroup-network %s\n", VERSION); - exit(0); - } - - if(argc != 3) - usage(); - - if(!strcmp(argv[1], "-p") || !strcmp(argv[1], "--pid")) { - pid = atoi(argv[2]); - - if(pid <= 0) { - errno = 0; - error("Invalid pid %d given", (int) pid); - return 2; - } - - call_the_helper(pid, NULL); - } - else if(!strcmp(argv[1], "--cgroup")) { - char *cgroup = argv[2]; - if(verify_path(cgroup) == -1) - fatal("cgroup '%s' does not exist or is not valid.", cgroup); - - pid = read_pid_from_cgroup(cgroup); - call_the_helper(pid, cgroup); - - if(pid <= 0 && !detected_devices) { - errno = 0; - error("Cannot find a cgroup PID from cgroup '%s'", cgroup); - } - } - else - usage(); - - if(pid > 0) - detect_veth_interfaces(pid); - - int found = send_devices(); - if(found <= 0) return 1; - return 0; -} diff --git a/src/clocks.c b/src/clocks.c deleted file mode 100644 index 2b1c36e3f..000000000 --- a/src/clocks.c +++ /dev/null @@ -1,137 +0,0 @@ -#include "common.h" - -#ifndef HAVE_CLOCK_GETTIME -inline int clock_gettime(clockid_t clk_id, struct timespec *ts) { - struct timeval tv; - if(unlikely(gettimeofday(&tv, NULL) == -1)) - return -1; - ts->tv_sec = tv.tv_sec; - ts->tv_nsec = (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC; - return 0; -} -#endif - -static inline time_t now_sec(clockid_t clk_id) { - struct timespec ts; - if(unlikely(clock_gettime(clk_id, &ts) == -1)) - return 0; - return ts.tv_sec; -} - -static inline usec_t now_usec(clockid_t clk_id) { - struct timespec ts; - if(unlikely(clock_gettime(clk_id, &ts) == -1)) - return 0; - return (usec_t)ts.tv_sec * USEC_PER_SEC + (ts.tv_nsec % NSEC_PER_SEC) / NSEC_PER_USEC; -} - -static inline int now_timeval(clockid_t clk_id, struct timeval *tv) { - struct timespec ts; - - if(unlikely(clock_gettime(clk_id, &ts) == -1)) { - tv->tv_sec = 0; - tv->tv_usec = 0; - return -1; - } - - tv->tv_sec = ts.tv_sec; - tv->tv_usec = (suseconds_t)((ts.tv_nsec % NSEC_PER_SEC) / NSEC_PER_USEC); - return 0; -} - -inline time_t now_realtime_sec(void) { - return now_sec(CLOCK_REALTIME); -} - -inline usec_t now_realtime_usec(void) { - return now_usec(CLOCK_REALTIME); -} - -inline int now_realtime_timeval(struct timeval *tv) { - return now_timeval(CLOCK_REALTIME, tv); -} - -inline time_t now_monotonic_sec(void) { - return now_sec(CLOCK_MONOTONIC); -} - -inline usec_t now_monotonic_usec(void) { - return now_usec(CLOCK_MONOTONIC); -} - -inline int now_monotonic_timeval(struct timeval *tv) { - return now_timeval(CLOCK_MONOTONIC, tv); -} - -inline time_t now_boottime_sec(void) { - return now_sec(CLOCK_BOOTTIME); -} - -inline usec_t now_boottime_usec(void) { - return now_usec(CLOCK_BOOTTIME); -} - -inline int now_boottime_timeval(struct timeval *tv) { - return now_timeval(CLOCK_BOOTTIME, tv); -} - -inline usec_t timeval_usec(struct timeval *tv) { - return (usec_t)tv->tv_sec * USEC_PER_SEC + (tv->tv_usec % USEC_PER_SEC); -} - -inline msec_t timeval_msec(struct timeval *tv) { - return (msec_t)tv->tv_sec * MSEC_PER_SEC + ((tv->tv_usec % USEC_PER_SEC) / MSEC_PER_SEC); -} - -inline susec_t dt_usec_signed(struct timeval *now, struct timeval *old) { - usec_t ts1 = timeval_usec(now); - usec_t ts2 = timeval_usec(old); - - if(likely(ts1 >= ts2)) return (susec_t)(ts1 - ts2); - return -((susec_t)(ts2 - ts1)); -} - -inline usec_t dt_usec(struct timeval *now, struct timeval *old) { - usec_t ts1 = timeval_usec(now); - usec_t ts2 = timeval_usec(old); - return (ts1 > ts2) ? (ts1 - ts2) : (ts2 - ts1); -} - -inline void heartbeat_init(heartbeat_t *hb) -{ - *hb = 0ULL; -} - -usec_t heartbeat_next(heartbeat_t *hb, usec_t tick) -{ - heartbeat_t now = now_monotonic_usec(); - usec_t next = now - (now % tick) + tick; - - while(now < next) { - sleep_usec(next - now); - now = now_monotonic_usec(); - } - - if(likely(*hb != 0ULL)) { - usec_t dt = now - *hb; - *hb = now; - - if(unlikely(dt >= tick + tick / 2)) { - errno = 0; - error("heartbeat missed %llu microseconds", dt - tick); - } - - return dt; - } - else { - *hb = now; - return 0ULL; - } -} - -inline usec_t heartbeat_dt_usec(heartbeat_t *hb) -{ - if(!*hb) - return 0ULL; - return now_monotonic_usec() - *hb; -} diff --git a/src/clocks.h b/src/clocks.h deleted file mode 100644 index ca5715254..000000000 --- a/src/clocks.h +++ /dev/null @@ -1,124 +0,0 @@ -#ifndef NETDATA_CLOCKS_H -#define NETDATA_CLOCKS_H 1 - -#ifndef HAVE_STRUCT_TIMESPEC -struct timespec { - time_t tv_sec; /* seconds */ - long tv_nsec; /* nanoseconds */ -}; -#endif - -#ifndef HAVE_CLOCKID_T -typedef int clockid_t; -#endif - -typedef unsigned long long nsec_t; -typedef unsigned long long msec_t; -typedef unsigned long long usec_t; -typedef long long susec_t; - -typedef usec_t heartbeat_t; - -/* Linux value is as good as any other */ -#ifndef CLOCK_REALTIME -#define CLOCK_REALTIME 0 -#endif - -#ifndef CLOCK_MONOTONIC -/* fallback to CLOCK_REALTIME if not available */ -#define CLOCK_MONOTONIC CLOCK_REALTIME -#endif - -#ifndef CLOCK_BOOTTIME - -#ifdef CLOCK_UPTIME -/* CLOCK_BOOTTIME falls back to CLOCK_UPTIME on FreeBSD */ -#define CLOCK_BOOTTIME CLOCK_UPTIME -#else // CLOCK_UPTIME -/* CLOCK_BOOTTIME falls back to CLOCK_MONOTONIC */ -#define CLOCK_BOOTTIME CLOCK_MONOTONIC -#endif // CLOCK_UPTIME - -#else // CLOCK_BOOTTIME - -#ifdef HAVE_CLOCK_GETTIME -#define CLOCK_BOOTTIME_IS_AVAILABLE 1 // required for /proc/uptime -#endif // HAVE_CLOCK_GETTIME - -#endif // CLOCK_BOOTTIME - -#define NSEC_PER_MSEC 1000000ULL - -#define NSEC_PER_SEC 1000000000ULL -#define NSEC_PER_USEC 1000ULL - -#define USEC_PER_SEC 1000000ULL -#define MSEC_PER_SEC 1000ULL - -#define USEC_PER_MS 1000ULL - -#ifndef HAVE_CLOCK_GETTIME -/* Fallback function for POSIX.1-2001 clock_gettime() function. - * - * We use a realtime clock from gettimeofday(), this will - * make systems without clock_gettime() support sensitive - * to time jumps or hibernation/suspend side effects. - */ -extern int clock_gettime(clockid_t clk_id, struct timespec *ts); -#endif - -/* - * Three clocks are available (cf. man 3 clock_gettime): - * - * REALTIME clock (i.e. wall-clock): - * This clock is affected by discontinuous jumps in the system time - * (e.g., if the system administrator manually changes the clock), and by the incremental adjustments performed by adjtime(3) and NTP. - * - * MONOTONIC clock - * Clock that cannot be set and represents monotonic time since some unspecified starting point. - * This clock is not affected by discontinuous jumps in the system time - * (e.g., if the system administrator manually changes the clock), but is affected by the incremental adjustments performed by adjtime(3) and NTP. - * If not available on the system, this clock falls back to REALTIME clock. - * - * BOOTTIME clock - * Identical to CLOCK_MONOTONIC, except it also includes any time that the system is suspended. - * This allows applications to get a suspend-aware monotonic clock without having to deal with the complications of CLOCK_REALTIME, - * which may have discontinuities if the time is changed using settimeofday(2). - * If not available on the system, this clock falls back to MONOTONIC clock. - * - * All now_*_timeval() functions fill the `struct timeval` with the time from the appropriate clock. - * Those functions return 0 on success, -1 else with errno set appropriately. - * - * All now_*_sec() functions return the time in seconds from the approriate clock, or 0 on error. - * All now_*_usec() functions return the time in microseconds from the approriate clock, or 0 on error. - */ -extern int now_realtime_timeval(struct timeval *tv); -extern time_t now_realtime_sec(void); -extern usec_t now_realtime_usec(void); - -extern int now_monotonic_timeval(struct timeval *tv); -extern time_t now_monotonic_sec(void); -extern usec_t now_monotonic_usec(void); - -extern int now_boottime_timeval(struct timeval *tv); -extern time_t now_boottime_sec(void); -extern usec_t now_boottime_usec(void); - - -extern usec_t timeval_usec(struct timeval *ts); -extern msec_t timeval_msec(struct timeval *tv); - -extern usec_t dt_usec(struct timeval *now, struct timeval *old); -extern susec_t dt_usec_signed(struct timeval *now, struct timeval *old); - -extern void heartbeat_init(heartbeat_t *hb); - -/* Sleeps until next multiple of tick using monotonic clock. - * Returns elapsed time in microseconds since previous heartbeat - */ -extern usec_t heartbeat_next(heartbeat_t *hb, usec_t tick); - -/* Returns elapsed time in microseconds since last heartbeat */ -extern usec_t heartbeat_dt_usec(heartbeat_t *hb); - -#endif /* NETDATA_CLOCKS_H */ diff --git a/src/common.c b/src/common.c deleted file mode 100644 index 94fd5e429..000000000 --- a/src/common.c +++ /dev/null @@ -1,1361 +0,0 @@ -#include "common.h" - -#ifdef __APPLE__ -#define INHERIT_NONE 0 -#endif /* __APPLE__ */ -#if defined(__FreeBSD__) || defined(__APPLE__) -# define O_NOATIME 0 -# define MADV_DONTFORK INHERIT_NONE -#endif /* __FreeBSD__ || __APPLE__*/ - -char *netdata_configured_hostname = NULL; -char *netdata_configured_config_dir = NULL; -char *netdata_configured_log_dir = NULL; -char *netdata_configured_plugins_dir = NULL; -char *netdata_configured_web_dir = NULL; -char *netdata_configured_cache_dir = NULL; -char *netdata_configured_varlib_dir = NULL; -char *netdata_configured_home_dir = NULL; -char *netdata_configured_host_prefix = NULL; -char *netdata_configured_timezone = NULL; - -struct rlimit rlimit_nofile = { .rlim_cur = 1024, .rlim_max = 1024 }; -int enable_ksm = 1; - -volatile sig_atomic_t netdata_exit = 0; -const char *os_type = NETDATA_OS_TYPE; -const char *program_version = VERSION; - -// ---------------------------------------------------------------------------- -// memory allocation functions that handle failures - -// although netdata does not use memory allocations too often (netdata tries to -// maintain its memory footprint stable during runtime, i.e. all buffers are -// allocated during initialization and are adapted to current use throughout -// its lifetime), these can be used to override the default system allocation -// routines. - -#ifdef NETDATA_LOG_ALLOCATIONS -static struct memory_statistics { - volatile size_t malloc_calls_made; - volatile size_t calloc_calls_made; - volatile size_t realloc_calls_made; - volatile size_t strdup_calls_made; - volatile size_t free_calls_made; - volatile size_t memory_calls_made; - volatile size_t allocated_memory; - volatile size_t mmapped_memory; -} memory_statistics; - -static inline void print_allocations(const char *file, const char *function, const unsigned long line) { - static struct memory_statistics old = { 0, 0, 0, 0, 0, 0, 0, 0 }; - - //if(unlikely(!(memory_statistics.memory_calls_made % 5))) { - fprintf(stderr, "(%04lu@%-10.10s:%-15.15s): Allocated %zu KB (+%zu B), mmapped %zu KB (+%zu B): malloc %zu (+%zu), calloc %zu (+%zu), realloc %zu (+%zu), strdup %zu (+%zu), free %zu (+%zu)\n", - line, file, function, - (memory_statistics.allocated_memory + 512) / 1024, memory_statistics.allocated_memory - old.allocated_memory, - (memory_statistics.mmapped_memory + 512) / 1024, memory_statistics.mmapped_memory - old.mmapped_memory, - memory_statistics.malloc_calls_made, memory_statistics.malloc_calls_made - old.malloc_calls_made, - memory_statistics.calloc_calls_made, memory_statistics.calloc_calls_made - old.calloc_calls_made, - memory_statistics.realloc_calls_made, memory_statistics.realloc_calls_made - old.realloc_calls_made, - memory_statistics.strdup_calls_made, memory_statistics.strdup_calls_made - old.strdup_calls_made, - memory_statistics.free_calls_made, memory_statistics.free_calls_made - old.free_calls_made - ); - - memcpy(&old, &memory_statistics, sizeof(struct memory_statistics)); - //} -} - -static inline void malloc_accounting(const char *file, const char *function, const unsigned long line, size_t size) { -#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS) - __atomic_fetch_add(&memory_statistics.memory_calls_made, 1, __ATOMIC_SEQ_CST); - __atomic_fetch_add(&memory_statistics.malloc_calls_made, 1, __ATOMIC_SEQ_CST); - __atomic_fetch_add(&memory_statistics.allocated_memory, size, __ATOMIC_SEQ_CST); -#else - // this is for debugging - we don't care locking it - memory_statistics.memory_calls_made++; - memory_statistics.malloc_calls_made++; - memory_statistics.allocated_memory += size; -#endif - print_allocations(file, function, line); -} - -static inline void mmap_accounting(size_t size) { -#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS) - __atomic_fetch_add(&memory_statistics.malloc_calls_made, 1, __ATOMIC_SEQ_CST); - __atomic_fetch_add(&memory_statistics.mmapped_memory, size, __ATOMIC_SEQ_CST); -#else - // this is for debugging - we don't care locking it - memory_statistics.memory_calls_made++; - memory_statistics.mmapped_memory += size; -#endif -} - -static inline void calloc_accounting(const char *file, const char *function, const unsigned long line, size_t size) { -#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS) - __atomic_fetch_add(&memory_statistics.memory_calls_made, 1, __ATOMIC_SEQ_CST); - __atomic_fetch_add(&memory_statistics.calloc_calls_made, 1, __ATOMIC_SEQ_CST); - __atomic_fetch_add(&memory_statistics.allocated_memory, size, __ATOMIC_SEQ_CST); -#else - // this is for debugging - we don't care locking it - memory_statistics.memory_calls_made++; - memory_statistics.calloc_calls_made++; - memory_statistics.allocated_memory += size; -#endif - print_allocations(file, function, line); -} - -static inline void realloc_accounting(const char *file, const char *function, const unsigned long line, void *ptr, size_t size) { - (void)ptr; - -#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS) - __atomic_fetch_add(&memory_statistics.memory_calls_made, 1, __ATOMIC_SEQ_CST); - __atomic_fetch_add(&memory_statistics.realloc_calls_made, 1, __ATOMIC_SEQ_CST); - __atomic_fetch_add(&memory_statistics.allocated_memory, size, __ATOMIC_SEQ_CST); -#else - // this is for debugging - we don't care locking it - memory_statistics.memory_calls_made++; - memory_statistics.realloc_calls_made++; - memory_statistics.allocated_memory += size; -#endif - print_allocations(file, function, line); -} - -static inline void strdup_accounting(const char *file, const char *function, const unsigned long line, const char *s) { - size_t size = strlen(s) + 1; - -#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS) - __atomic_fetch_add(&memory_statistics.memory_calls_made, 1, __ATOMIC_SEQ_CST); - __atomic_fetch_add(&memory_statistics.strdup_calls_made, 1, __ATOMIC_SEQ_CST); - __atomic_fetch_add(&memory_statistics.allocated_memory, size, __ATOMIC_SEQ_CST); -#else - // this is for debugging - we don't care locking it - memory_statistics.memory_calls_made++; - memory_statistics.strdup_calls_made++; - memory_statistics.allocated_memory += size; -#endif - print_allocations(file, function, line); -} - -static inline void free_accounting(const char *file, const char *function, const unsigned long line, void *ptr) { - (void)file; - (void)function; - (void)line; - - if(likely(ptr)) { -#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS) - __atomic_fetch_add(&memory_statistics.memory_calls_made, 1, __ATOMIC_SEQ_CST); - __atomic_fetch_add(&memory_statistics.free_calls_made, 1, __ATOMIC_SEQ_CST); -#else - // this is for debugging - we don't care locking it - memory_statistics.memory_calls_made++; - memory_statistics.free_calls_made++; -#endif - } -} -#endif - -#ifdef NETDATA_LOG_ALLOCATIONS -char *strdupz_int(const char *file, const char *function, const unsigned long line, const char *s) { - strdup_accounting(file, function, line, s); -#else -char *strdupz(const char *s) { -#endif - - char *t = strdup(s); - if (unlikely(!t)) fatal("Cannot strdup() string '%s'", s); - return t; -} - -#ifdef NETDATA_LOG_ALLOCATIONS -void *mallocz_int(const char *file, const char *function, const unsigned long line, size_t size) { - malloc_accounting(file, function, line, size); -#else -void *mallocz(size_t size) { -#endif - - void *p = malloc(size); - if (unlikely(!p)) fatal("Cannot allocate %zu bytes of memory.", size); - return p; -} - -#ifdef NETDATA_LOG_ALLOCATIONS -void *callocz_int(const char *file, const char *function, const unsigned long line, size_t nmemb, size_t size) { - calloc_accounting(file, function, line, nmemb * size); -#else -void *callocz(size_t nmemb, size_t size) { -#endif - - void *p = calloc(nmemb, size); - if (unlikely(!p)) fatal("Cannot allocate %zu bytes of memory.", nmemb * size); - return p; -} - -#ifdef NETDATA_LOG_ALLOCATIONS -void *reallocz_int(const char *file, const char *function, const unsigned long line, void *ptr, size_t size) { - realloc_accounting(file, function, line, ptr, size); -#else -void *reallocz(void *ptr, size_t size) { -#endif - - void *p = realloc(ptr, size); - if (unlikely(!p)) fatal("Cannot re-allocate memory to %zu bytes.", size); - return p; -} - -#ifdef NETDATA_LOG_ALLOCATIONS -void freez_int(const char *file, const char *function, const unsigned long line, void *ptr) { - free_accounting(file, function, line, ptr); -#else -void freez(void *ptr) { -#endif - - free(ptr); -} - -void json_escape_string(char *dst, const char *src, size_t size) { - const char *t; - char *d = dst, *e = &dst[size - 1]; - - for(t = src; *t && d < e ;t++) { - if(unlikely(*t == '\\' || *t == '"')) { - if(unlikely(d + 1 >= e)) break; - *d++ = '\\'; - } - *d++ = *t; - } - - *d = '\0'; -} - -void json_fix_string(char *s) { - unsigned char c; - while((c = (unsigned char)*s)) { - if(unlikely(c == '\\')) - *s++ = '/'; - else if(unlikely(c == '"')) - *s++ = '\''; - else if(unlikely(isspace(c) || iscntrl(c))) - *s++ = ' '; - else if(unlikely(!isprint(c) || c > 127)) - *s++ = '_'; - else - s++; - } -} - -int sleep_usec(usec_t usec) { - -#ifndef NETDATA_WITH_USLEEP - // we expect microseconds (1.000.000 per second) - // but timespec is nanoseconds (1.000.000.000 per second) - struct timespec rem, req = { - .tv_sec = (time_t) (usec / 1000000), - .tv_nsec = (suseconds_t) ((usec % 1000000) * 1000) - }; - - while (nanosleep(&req, &rem) == -1) { - if (likely(errno == EINTR)) { - debug(D_SYSTEM, "nanosleep() interrupted (while sleeping for %llu microseconds).", usec); - req.tv_sec = rem.tv_sec; - req.tv_nsec = rem.tv_nsec; - } else { - error("Cannot nanosleep() for %llu microseconds.", usec); - break; - } - } - - return 0; -#else - int ret = usleep(usec); - if(unlikely(ret == -1 && errno == EINVAL)) { - // on certain systems, usec has to be up to 999999 - if(usec > 999999) { - int counter = usec / 999999; - while(counter--) - usleep(999999); - - usleep(usec % 999999); - } - else { - error("Cannot usleep() for %llu microseconds.", usec); - return ret; - } - } - - if(ret != 0) - error("usleep() failed for %llu microseconds.", usec); - - return ret; -#endif -} - -unsigned char netdata_map_chart_names[256] = { - [0] = '\0', // - [1] = '_', // - [2] = '_', // - [3] = '_', // - [4] = '_', // - [5] = '_', // - [6] = '_', // - [7] = '_', // - [8] = '_', // - [9] = '_', // - [10] = '_', // - [11] = '_', // - [12] = '_', // - [13] = '_', // - [14] = '_', // - [15] = '_', // - [16] = '_', // - [17] = '_', // - [18] = '_', // - [19] = '_', // - [20] = '_', // - [21] = '_', // - [22] = '_', // - [23] = '_', // - [24] = '_', // - [25] = '_', // - [26] = '_', // - [27] = '_', // - [28] = '_', // - [29] = '_', // - [30] = '_', // - [31] = '_', // - [32] = '_', // - [33] = '_', // ! - [34] = '_', // " - [35] = '_', // # - [36] = '_', // $ - [37] = '_', // % - [38] = '_', // & - [39] = '_', // ' - [40] = '_', // ( - [41] = '_', // ) - [42] = '_', // * - [43] = '_', // + - [44] = '.', // , - [45] = '-', // - - [46] = '.', // . - [47] = '/', // / - [48] = '0', // 0 - [49] = '1', // 1 - [50] = '2', // 2 - [51] = '3', // 3 - [52] = '4', // 4 - [53] = '5', // 5 - [54] = '6', // 6 - [55] = '7', // 7 - [56] = '8', // 8 - [57] = '9', // 9 - [58] = '_', // : - [59] = '_', // ; - [60] = '_', // < - [61] = '_', // = - [62] = '_', // > - [63] = '_', // ? - [64] = '_', // @ - [65] = 'a', // A - [66] = 'b', // B - [67] = 'c', // C - [68] = 'd', // D - [69] = 'e', // E - [70] = 'f', // F - [71] = 'g', // G - [72] = 'h', // H - [73] = 'i', // I - [74] = 'j', // J - [75] = 'k', // K - [76] = 'l', // L - [77] = 'm', // M - [78] = 'n', // N - [79] = 'o', // O - [80] = 'p', // P - [81] = 'q', // Q - [82] = 'r', // R - [83] = 's', // S - [84] = 't', // T - [85] = 'u', // U - [86] = 'v', // V - [87] = 'w', // W - [88] = 'x', // X - [89] = 'y', // Y - [90] = 'z', // Z - [91] = '_', // [ - [92] = '/', // backslash - [93] = '_', // ] - [94] = '_', // ^ - [95] = '_', // _ - [96] = '_', // ` - [97] = 'a', // a - [98] = 'b', // b - [99] = 'c', // c - [100] = 'd', // d - [101] = 'e', // e - [102] = 'f', // f - [103] = 'g', // g - [104] = 'h', // h - [105] = 'i', // i - [106] = 'j', // j - [107] = 'k', // k - [108] = 'l', // l - [109] = 'm', // m - [110] = 'n', // n - [111] = 'o', // o - [112] = 'p', // p - [113] = 'q', // q - [114] = 'r', // r - [115] = 's', // s - [116] = 't', // t - [117] = 'u', // u - [118] = 'v', // v - [119] = 'w', // w - [120] = 'x', // x - [121] = 'y', // y - [122] = 'z', // z - [123] = '_', // { - [124] = '_', // | - [125] = '_', // } - [126] = '_', // ~ - [127] = '_', // - [128] = '_', // - [129] = '_', // - [130] = '_', // - [131] = '_', // - [132] = '_', // - [133] = '_', // - [134] = '_', // - [135] = '_', // - [136] = '_', // - [137] = '_', // - [138] = '_', // - [139] = '_', // - [140] = '_', // - [141] = '_', // - [142] = '_', // - [143] = '_', // - [144] = '_', // - [145] = '_', // - [146] = '_', // - [147] = '_', // - [148] = '_', // - [149] = '_', // - [150] = '_', // - [151] = '_', // - [152] = '_', // - [153] = '_', // - [154] = '_', // - [155] = '_', // - [156] = '_', // - [157] = '_', // - [158] = '_', // - [159] = '_', // - [160] = '_', // - [161] = '_', // - [162] = '_', // - [163] = '_', // - [164] = '_', // - [165] = '_', // - [166] = '_', // - [167] = '_', // - [168] = '_', // - [169] = '_', // - [170] = '_', // - [171] = '_', // - [172] = '_', // - [173] = '_', // - [174] = '_', // - [175] = '_', // - [176] = '_', // - [177] = '_', // - [178] = '_', // - [179] = '_', // - [180] = '_', // - [181] = '_', // - [182] = '_', // - [183] = '_', // - [184] = '_', // - [185] = '_', // - [186] = '_', // - [187] = '_', // - [188] = '_', // - [189] = '_', // - [190] = '_', // - [191] = '_', // - [192] = '_', // - [193] = '_', // - [194] = '_', // - [195] = '_', // - [196] = '_', // - [197] = '_', // - [198] = '_', // - [199] = '_', // - [200] = '_', // - [201] = '_', // - [202] = '_', // - [203] = '_', // - [204] = '_', // - [205] = '_', // - [206] = '_', // - [207] = '_', // - [208] = '_', // - [209] = '_', // - [210] = '_', // - [211] = '_', // - [212] = '_', // - [213] = '_', // - [214] = '_', // - [215] = '_', // - [216] = '_', // - [217] = '_', // - [218] = '_', // - [219] = '_', // - [220] = '_', // - [221] = '_', // - [222] = '_', // - [223] = '_', // - [224] = '_', // - [225] = '_', // - [226] = '_', // - [227] = '_', // - [228] = '_', // - [229] = '_', // - [230] = '_', // - [231] = '_', // - [232] = '_', // - [233] = '_', // - [234] = '_', // - [235] = '_', // - [236] = '_', // - [237] = '_', // - [238] = '_', // - [239] = '_', // - [240] = '_', // - [241] = '_', // - [242] = '_', // - [243] = '_', // - [244] = '_', // - [245] = '_', // - [246] = '_', // - [247] = '_', // - [248] = '_', // - [249] = '_', // - [250] = '_', // - [251] = '_', // - [252] = '_', // - [253] = '_', // - [254] = '_', // - [255] = '_' // -}; - -// make sure the supplied string -// is good for a netdata chart/dimension ID/NAME -void netdata_fix_chart_name(char *s) { - while ((*s = netdata_map_chart_names[(unsigned char) *s])) s++; -} - -unsigned char netdata_map_chart_ids[256] = { - [0] = '\0', // - [1] = '_', // - [2] = '_', // - [3] = '_', // - [4] = '_', // - [5] = '_', // - [6] = '_', // - [7] = '_', // - [8] = '_', // - [9] = '_', // - [10] = '_', // - [11] = '_', // - [12] = '_', // - [13] = '_', // - [14] = '_', // - [15] = '_', // - [16] = '_', // - [17] = '_', // - [18] = '_', // - [19] = '_', // - [20] = '_', // - [21] = '_', // - [22] = '_', // - [23] = '_', // - [24] = '_', // - [25] = '_', // - [26] = '_', // - [27] = '_', // - [28] = '_', // - [29] = '_', // - [30] = '_', // - [31] = '_', // - [32] = '_', // - [33] = '_', // ! - [34] = '_', // " - [35] = '_', // # - [36] = '_', // $ - [37] = '_', // % - [38] = '_', // & - [39] = '_', // ' - [40] = '_', // ( - [41] = '_', // ) - [42] = '_', // * - [43] = '_', // + - [44] = '.', // , - [45] = '-', // - - [46] = '.', // . - [47] = '_', // / - [48] = '0', // 0 - [49] = '1', // 1 - [50] = '2', // 2 - [51] = '3', // 3 - [52] = '4', // 4 - [53] = '5', // 5 - [54] = '6', // 6 - [55] = '7', // 7 - [56] = '8', // 8 - [57] = '9', // 9 - [58] = '_', // : - [59] = '_', // ; - [60] = '_', // < - [61] = '_', // = - [62] = '_', // > - [63] = '_', // ? - [64] = '_', // @ - [65] = 'a', // A - [66] = 'b', // B - [67] = 'c', // C - [68] = 'd', // D - [69] = 'e', // E - [70] = 'f', // F - [71] = 'g', // G - [72] = 'h', // H - [73] = 'i', // I - [74] = 'j', // J - [75] = 'k', // K - [76] = 'l', // L - [77] = 'm', // M - [78] = 'n', // N - [79] = 'o', // O - [80] = 'p', // P - [81] = 'q', // Q - [82] = 'r', // R - [83] = 's', // S - [84] = 't', // T - [85] = 'u', // U - [86] = 'v', // V - [87] = 'w', // W - [88] = 'x', // X - [89] = 'y', // Y - [90] = 'z', // Z - [91] = '_', // [ - [92] = '/', // backslash - [93] = '_', // ] - [94] = '_', // ^ - [95] = '_', // _ - [96] = '_', // ` - [97] = 'a', // a - [98] = 'b', // b - [99] = 'c', // c - [100] = 'd', // d - [101] = 'e', // e - [102] = 'f', // f - [103] = 'g', // g - [104] = 'h', // h - [105] = 'i', // i - [106] = 'j', // j - [107] = 'k', // k - [108] = 'l', // l - [109] = 'm', // m - [110] = 'n', // n - [111] = 'o', // o - [112] = 'p', // p - [113] = 'q', // q - [114] = 'r', // r - [115] = 's', // s - [116] = 't', // t - [117] = 'u', // u - [118] = 'v', // v - [119] = 'w', // w - [120] = 'x', // x - [121] = 'y', // y - [122] = 'z', // z - [123] = '_', // { - [124] = '_', // | - [125] = '_', // } - [126] = '_', // ~ - [127] = '_', // - [128] = '_', // - [129] = '_', // - [130] = '_', // - [131] = '_', // - [132] = '_', // - [133] = '_', // - [134] = '_', // - [135] = '_', // - [136] = '_', // - [137] = '_', // - [138] = '_', // - [139] = '_', // - [140] = '_', // - [141] = '_', // - [142] = '_', // - [143] = '_', // - [144] = '_', // - [145] = '_', // - [146] = '_', // - [147] = '_', // - [148] = '_', // - [149] = '_', // - [150] = '_', // - [151] = '_', // - [152] = '_', // - [153] = '_', // - [154] = '_', // - [155] = '_', // - [156] = '_', // - [157] = '_', // - [158] = '_', // - [159] = '_', // - [160] = '_', // - [161] = '_', // - [162] = '_', // - [163] = '_', // - [164] = '_', // - [165] = '_', // - [166] = '_', // - [167] = '_', // - [168] = '_', // - [169] = '_', // - [170] = '_', // - [171] = '_', // - [172] = '_', // - [173] = '_', // - [174] = '_', // - [175] = '_', // - [176] = '_', // - [177] = '_', // - [178] = '_', // - [179] = '_', // - [180] = '_', // - [181] = '_', // - [182] = '_', // - [183] = '_', // - [184] = '_', // - [185] = '_', // - [186] = '_', // - [187] = '_', // - [188] = '_', // - [189] = '_', // - [190] = '_', // - [191] = '_', // - [192] = '_', // - [193] = '_', // - [194] = '_', // - [195] = '_', // - [196] = '_', // - [197] = '_', // - [198] = '_', // - [199] = '_', // - [200] = '_', // - [201] = '_', // - [202] = '_', // - [203] = '_', // - [204] = '_', // - [205] = '_', // - [206] = '_', // - [207] = '_', // - [208] = '_', // - [209] = '_', // - [210] = '_', // - [211] = '_', // - [212] = '_', // - [213] = '_', // - [214] = '_', // - [215] = '_', // - [216] = '_', // - [217] = '_', // - [218] = '_', // - [219] = '_', // - [220] = '_', // - [221] = '_', // - [222] = '_', // - [223] = '_', // - [224] = '_', // - [225] = '_', // - [226] = '_', // - [227] = '_', // - [228] = '_', // - [229] = '_', // - [230] = '_', // - [231] = '_', // - [232] = '_', // - [233] = '_', // - [234] = '_', // - [235] = '_', // - [236] = '_', // - [237] = '_', // - [238] = '_', // - [239] = '_', // - [240] = '_', // - [241] = '_', // - [242] = '_', // - [243] = '_', // - [244] = '_', // - [245] = '_', // - [246] = '_', // - [247] = '_', // - [248] = '_', // - [249] = '_', // - [250] = '_', // - [251] = '_', // - [252] = '_', // - [253] = '_', // - [254] = '_', // - [255] = '_' // -}; - -// make sure the supplied string -// is good for a netdata chart/dimension ID/NAME -void netdata_fix_chart_id(char *s) { - while ((*s = netdata_map_chart_ids[(unsigned char) *s])) s++; -} - -/* -// http://stackoverflow.com/questions/7666509/hash-function-for-string -uint32_t simple_hash(const char *name) -{ - const char *s = name; - uint32_t hash = 5381; - int i; - - while((i = *s++)) hash = ((hash << 5) + hash) + i; - - // fprintf(stderr, "HASH: %lu %s\n", hash, name); - - return hash; -} -*/ - -/* -// http://isthe.com/chongo/tech/comp/fnv/#FNV-1a -uint32_t simple_hash(const char *name) { - unsigned char *s = (unsigned char *) name; - uint32_t hval = 0x811c9dc5; - - // FNV-1a algorithm - while (*s) { - // multiply by the 32 bit FNV magic prime mod 2^32 - // NOTE: No need to optimize with left shifts. - // GCC will use imul instruction anyway. - // Tested with 'gcc -O3 -S' - //hval += (hval<<1) + (hval<<4) + (hval<<7) + (hval<<8) + (hval<<24); - hval *= 16777619; - - // xor the bottom with the current octet - hval ^= (uint32_t) *s++; - } - - // fprintf(stderr, "HASH: %u = %s\n", hval, name); - return hval; -} - -uint32_t simple_uhash(const char *name) { - unsigned char *s = (unsigned char *) name; - uint32_t hval = 0x811c9dc5, c; - - // FNV-1a algorithm - while ((c = *s++)) { - if (unlikely(c >= 'A' && c <= 'Z')) c += 'a' - 'A'; - hval *= 16777619; - hval ^= c; - } - return hval; -} -*/ - -/* -// http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx -// one at a time hash -uint32_t simple_hash(const char *name) { - unsigned char *s = (unsigned char *)name; - uint32_t h = 0; - - while(*s) { - h += *s++; - h += (h << 10); - h ^= (h >> 6); - } - - h += (h << 3); - h ^= (h >> 11); - h += (h << 15); - - // fprintf(stderr, "HASH: %u = %s\n", h, name); - - return h; -} -*/ - -void strreverse(char *begin, char *end) { - while (end > begin) { - // clearer code. - char aux = *end; - *end-- = *begin; - *begin++ = aux; - } -} - -char *strsep_on_1char(char **ptr, char c) { - if(unlikely(!ptr || !*ptr)) - return NULL; - - // remember the position we started - char *s = *ptr; - - // skip separators in front - while(*s == c) s++; - char *ret = s; - - // find the next separator - while(*s++) { - if(unlikely(*s == c)) { - *s++ = '\0'; - *ptr = s; - return ret; - } - } - - *ptr = NULL; - return ret; -} - -char *mystrsep(char **ptr, char *s) { - char *p = ""; - while (p && !p[0] && *ptr) p = strsep(ptr, s); - return (p); -} - -char *trim(char *s) { - // skip leading spaces - while (*s && isspace(*s)) s++; - if (!*s) return NULL; - - // skip tailing spaces - // this way is way faster. Writes only one NUL char. - ssize_t l = strlen(s); - if (--l >= 0) { - char *p = s + l; - while (p > s && isspace(*p)) p--; - *++p = '\0'; - } - - if (!*s) return NULL; - - return s; -} - -inline char *trim_all(char *buffer) { - char *d = buffer, *s = buffer; - - // skip spaces - while(isspace(*s)) s++; - - while(*s) { - // copy the non-space part - while(*s && !isspace(*s)) *d++ = *s++; - - // add a space if we have to - if(*s && isspace(*s)) { - *d++ = ' '; - s++; - } - - // skip spaces - while(isspace(*s)) s++; - } - - *d = '\0'; - - if(d > buffer) { - d--; - if(isspace(*d)) *d = '\0'; - } - - if(!buffer[0]) return NULL; - return buffer; -} - -static int memory_file_open(const char *filename, size_t size) { - // info("memory_file_open('%s', %zu", filename, size); - - int fd = open(filename, O_RDWR | O_CREAT | O_NOATIME, 0664); - if (fd != -1) { - if (lseek(fd, size, SEEK_SET) == (off_t) size) { - if (write(fd, "", 1) == 1) { - if (ftruncate(fd, size)) - error("Cannot truncate file '%s' to size %zu. Will use the larger file.", filename, size); - } - else error("Cannot write to file '%s' at position %zu.", filename, size); - } - else error("Cannot seek file '%s' to size %zu.", filename, size); - } - else error("Cannot create/open file '%s'.", filename); - - return fd; -} - -// mmap_shared is used for memory mode = map -static void *memory_file_mmap(const char *filename, size_t size, int flags) { - // info("memory_file_mmap('%s', %zu", filename, size); - static int log_madvise = 1; - - int fd = -1; - if(filename) { - fd = memory_file_open(filename, size); - if(fd == -1) return MAP_FAILED; - } - - void *mem = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, fd, 0); - if (mem != MAP_FAILED) { -#ifdef NETDATA_LOG_ALLOCATIONS - mmap_accounting(size); -#endif - int advise = MADV_SEQUENTIAL | MADV_DONTFORK; - if (flags & MAP_SHARED) advise |= MADV_WILLNEED; - - if (madvise(mem, size, advise) != 0 && log_madvise) { - error("Cannot advise the kernel about shared memory usage."); - log_madvise--; - } - } - - if(fd != -1) - close(fd); - - return mem; -} - -#ifdef MADV_MERGEABLE -static void *memory_file_mmap_ksm(const char *filename, size_t size, int flags) { - // info("memory_file_mmap_ksm('%s', %zu", filename, size); - static int log_madvise_2 = 1, log_madvise_3 = 1; - - int fd = -1; - if(filename) { - fd = memory_file_open(filename, size); - if(fd == -1) return MAP_FAILED; - } - - void *mem = mmap(NULL, size, PROT_READ | PROT_WRITE, flags | MAP_ANONYMOUS, -1, 0); - if (mem != MAP_FAILED) { -#ifdef NETDATA_LOG_ALLOCATIONS - mmap_accounting(size); -#endif - if(fd != -1) { - if (lseek(fd, 0, SEEK_SET) == 0) { - if (read(fd, mem, size) != (ssize_t) size) - error("Cannot read from file '%s'", filename); - } - else error("Cannot seek to beginning of file '%s'.", filename); - } - - // don't use MADV_SEQUENTIAL|MADV_DONTFORK, they disable MADV_MERGEABLE - if (madvise(mem, size, MADV_SEQUENTIAL | MADV_DONTFORK) != 0 && log_madvise_2) { - error("Cannot advise the kernel about the memory usage (MADV_SEQUENTIAL|MADV_DONTFORK) of file '%s'.", filename); - log_madvise_2--; - } - - if (madvise(mem, size, MADV_MERGEABLE) != 0 && log_madvise_3) { - error("Cannot advise the kernel about the memory usage (MADV_MERGEABLE) of file '%s'.", filename); - log_madvise_3--; - } - } - - if(fd != -1) - close(fd); - - return mem; -} -#else -static void *memory_file_mmap_ksm(const char *filename, size_t size, int flags) { - // info("memory_file_mmap_ksm FALLBACK ('%s', %zu", filename, size); - - if(filename) - return memory_file_mmap(filename, size, flags); - - // when KSM is not available and no filename is given (memory mode = ram), - // we just report failure - return MAP_FAILED; -} -#endif - -void *mymmap(const char *filename, size_t size, int flags, int ksm) { - void *mem = NULL; - - if (filename && (flags & MAP_SHARED || !enable_ksm || !ksm)) - // memory mode = map | save - // when KSM is not enabled - // MAP_SHARED is used for memory mode = map (no KSM possible) - mem = memory_file_mmap(filename, size, flags); - - else - // memory mode = save | ram - // when KSM is enabled - // for memory mode = ram, the filename is NULL - mem = memory_file_mmap_ksm(filename, size, flags); - - if(mem == MAP_FAILED) return NULL; - - errno = 0; - return mem; -} - -int memory_file_save(const char *filename, void *mem, size_t size) { - char tmpfilename[FILENAME_MAX + 1]; - - snprintfz(tmpfilename, FILENAME_MAX, "%s.%ld.tmp", filename, (long) getpid()); - - int fd = open(tmpfilename, O_RDWR | O_CREAT | O_NOATIME, 0664); - if (fd < 0) { - error("Cannot create/open file '%s'.", filename); - return -1; - } - - if (write(fd, mem, size) != (ssize_t) size) { - error("Cannot write to file '%s' %ld bytes.", filename, (long) size); - close(fd); - return -1; - } - - close(fd); - - if (rename(tmpfilename, filename)) { - error("Cannot rename '%s' to '%s'", tmpfilename, filename); - return -1; - } - - return 0; -} - -int fd_is_valid(int fd) { - return fcntl(fd, F_GETFD) != -1 || errno != EBADF; -} - -char *fgets_trim_len(char *buf, size_t buf_size, FILE *fp, size_t *len) { - char *s = fgets(buf, (int)buf_size, fp); - if (!s) return NULL; - - char *t = s; - if (*t != '\0') { - // find the string end - while (*++t != '\0'); - - // trim trailing spaces/newlines/tabs - while (--t > s && *t == '\n') - *t = '\0'; - } - - if (len) - *len = t - s + 1; - - return s; -} - -int vsnprintfz(char *dst, size_t n, const char *fmt, va_list args) { - int size = vsnprintf(dst, n, fmt, args); - - if (unlikely((size_t) size > n)) { - // truncated - size = (int)n; - } - - dst[size] = '\0'; - return size; -} - -int snprintfz(char *dst, size_t n, const char *fmt, ...) { - va_list args; - - va_start(args, fmt); - int ret = vsnprintfz(dst, n, fmt, args); - va_end(args); - - return ret; -} - -// ---------------------------------------------------------------------------- -// system functions -// to retrieve settings of the system - -int processors = 1; -long get_system_cpus(void) { - processors = 1; - - #ifdef __APPLE__ - int32_t tmp_processors; - - if (unlikely(GETSYSCTL_BY_NAME("hw.logicalcpu", tmp_processors))) { - error("Assuming system has %d processors.", processors); - } else { - processors = tmp_processors; - } - - return processors; - #elif __FreeBSD__ - int32_t tmp_processors; - - if (unlikely(GETSYSCTL_BY_NAME("hw.ncpu", tmp_processors))) { - error("Assuming system has %d processors.", processors); - } else { - processors = tmp_processors; - } - - return processors; - #else - - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/proc/stat", netdata_configured_host_prefix); - - procfile *ff = procfile_open(filename, NULL, PROCFILE_FLAG_DEFAULT); - if(!ff) { - error("Cannot open file '%s'. Assuming system has %d processors.", filename, processors); - return processors; - } - - ff = procfile_readall(ff); - if(!ff) { - error("Cannot open file '%s'. Assuming system has %d processors.", filename, processors); - return processors; - } - - processors = 0; - unsigned int i; - for(i = 0; i < procfile_lines(ff); i++) { - if(!procfile_linewords(ff, i)) continue; - - if(strncmp(procfile_lineword(ff, i, 0), "cpu", 3) == 0) processors++; - } - processors--; - if(processors < 1) processors = 1; - - procfile_close(ff); - - debug(D_SYSTEM, "System has %d processors.", processors); - return processors; - - #endif /* __APPLE__, __FreeBSD__ */ -} - -pid_t pid_max = 32768; -pid_t get_system_pid_max(void) { - #ifdef __APPLE__ - // As we currently do not know a solution to query pid_max from the os - // we use the number defined in bsd/sys/proc_internal.h in XNU sources - pid_max = 99999; - return pid_max; - #elif __FreeBSD__ - int32_t tmp_pid_max; - - if (unlikely(GETSYSCTL_BY_NAME("kern.pid_max", tmp_pid_max))) { - pid_max = 99999; - error("Assuming system's maximum pid is %d.", pid_max); - } else { - pid_max = tmp_pid_max; - } - - return pid_max; - #else - - static char read = 0; - if(unlikely(read)) return pid_max; - read = 1; - - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/proc/sys/kernel/pid_max", netdata_configured_host_prefix); - - unsigned long long max = 0; - if(read_single_number_file(filename, &max) != 0) { - error("Cannot open file '%s'. Assuming system supports %d pids.", filename, pid_max); - return pid_max; - } - - if(!max) { - error("Cannot parse file '%s'. Assuming system supports %d pids.", filename, pid_max); - return pid_max; - } - - pid_max = (pid_t) max; - return pid_max; - - #endif /* __APPLE__, __FreeBSD__ */ -} - -unsigned int hz; -void get_system_HZ(void) { - long ticks; - - if ((ticks = sysconf(_SC_CLK_TCK)) == -1) { - error("Cannot get system clock ticks"); - } - - hz = (unsigned int) ticks; -} - -/* -// poor man cycle counting -static unsigned long tsc; -void begin_tsc(void) { - unsigned long a, d; - asm volatile ("cpuid\nrdtsc" : "=a" (a), "=d" (d) : "0" (0) : "ebx", "ecx"); - tsc = ((unsigned long)d << 32) | (unsigned long)a; -} -unsigned long end_tsc(void) { - unsigned long a, d; - asm volatile ("rdtscp" : "=a" (a), "=d" (d) : : "ecx"); - return (((unsigned long)d << 32) | (unsigned long)a) - tsc; -} -*/ - -int recursively_delete_dir(const char *path, const char *reason) { - DIR *dir = opendir(path); - if(!dir) { - error("Cannot read %s directory to be deleted '%s'", reason?reason:"", path); - return -1; - } - - int ret = 0; - struct dirent *de = NULL; - while((de = readdir(dir))) { - if(de->d_type == DT_DIR - && ( - (de->d_name[0] == '.' && de->d_name[1] == '\0') - || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') - )) - continue; - - char fullpath[FILENAME_MAX + 1]; - snprintfz(fullpath, FILENAME_MAX, "%s/%s", path, de->d_name); - - if(de->d_type == DT_DIR) { - int r = recursively_delete_dir(fullpath, reason); - if(r > 0) ret += r; - continue; - } - - info("Deleting %s file '%s'", reason?reason:"", fullpath); - if(unlikely(unlink(fullpath) == -1)) - error("Cannot delete %s file '%s'", reason?reason:"", fullpath); - else - ret++; - } - - info("Deleting empty directory '%s'", path); - if(unlikely(rmdir(path) == -1)) - error("Cannot delete empty directory '%s'", path); - else - ret++; - - closedir(dir); - - return ret; -} diff --git a/src/common.h b/src/common.h deleted file mode 100644 index 15fc50a6a..000000000 --- a/src/common.h +++ /dev/null @@ -1,360 +0,0 @@ -#ifndef NETDATA_COMMON_H -#define NETDATA_COMMON_H 1 - -#ifdef HAVE_CONFIG_H -#include <config.h> -#endif - - -// ---------------------------------------------------------------------------- -// system include files for all netdata C programs - -/* select the memory allocator, based on autoconf findings */ -#if defined(ENABLE_JEMALLOC) - -#if defined(HAVE_JEMALLOC_JEMALLOC_H) -#include <jemalloc/jemalloc.h> -#else -#include <malloc.h> -#endif - -#elif defined(ENABLE_TCMALLOC) - -#include <google/tcmalloc.h> - -#else /* !defined(ENABLE_JEMALLOC) && !defined(ENABLE_TCMALLOC) */ - -#if !(defined(__FreeBSD__) || defined(__APPLE__)) -#include <malloc.h> -#endif /* __FreeBSD__ || __APPLE__ */ - -#endif - -#include <pthread.h> -#include <errno.h> -#include <stdio.h> -#include <stdlib.h> -#include <stdarg.h> -#include <stddef.h> -#include <ctype.h> -#include <string.h> -#include <strings.h> -#include <arpa/inet.h> -#include <netinet/tcp.h> -#include <sys/ioctl.h> - -#ifdef HAVE_NETINET_IN_H -#include <netinet/in.h> -#endif - -#ifdef HAVE_RESOLV_H -#include <resolv.h> -#endif - -#include <dirent.h> -#include <fcntl.h> -#include <getopt.h> -#include <grp.h> -#include <pwd.h> -#include <locale.h> - -#ifdef HAVE_NETDB_H -#include <netdb.h> -#endif - -#include <net/if.h> - -#include <poll.h> -#include <signal.h> -#include <syslog.h> -#include <sys/mman.h> - -#ifdef HAVE_SYS_PRCTL_H -#include <sys/prctl.h> -#endif - -#include <sys/resource.h> -#include <sys/socket.h> -#include <sys/stat.h> -#include <sys/statvfs.h> -#include <sys/syscall.h> -#include <sys/time.h> -#include <sys/types.h> -#include <sys/wait.h> -#include <sys/un.h> -#include <time.h> -#include <unistd.h> -#include <uuid/uuid.h> - -// #1408 -#ifdef MAJOR_IN_MKDEV -#include <sys/mkdev.h> -#endif -#ifdef MAJOR_IN_SYSMACROS -#include <sys/sysmacros.h> -#endif - -/* -#include <mntent.h> -*/ - -#ifdef STORAGE_WITH_MATH -#include <math.h> -#include <float.h> -#endif - -#if defined(HAVE_INTTYPES_H) -#include <inttypes.h> -#elif defined(HAVE_STDINT_H) -#include <stdint.h> -#endif - -#ifdef NETDATA_WITH_ZLIB -#include <zlib.h> -#endif - -#ifdef HAVE_CAPABILITY -#include <sys/capability.h> -#endif - -// ---------------------------------------------------------------------------- -// netdata chart priorities - -// This is a work in progress - to scope is to collect here all chart priorities. -// These should be based on the CONTEXT of the charts + the chart id when needed -// - for each SECTION +1000 (or +X000 for big sections) -// - for each FAMILY +100 -// - for each CHART +10 - -// Memory Section - 1xxx -#define NETDATA_CHART_PRIO_MEM_SYSTEM 1000 -#define NETDATA_CHART_PRIO_MEM_SYSTEM_AVAILABLE 1010 -#define NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED 1020 -#define NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS 1030 -#define NETDATA_CHART_PRIO_MEM_KERNEL 1100 -#define NETDATA_CHART_PRIO_MEM_SLAB 1200 -#define NETDATA_CHART_PRIO_MEM_HUGEPAGES 1250 -#define NETDATA_CHART_PRIO_MEM_KSM 1300 -#define NETDATA_CHART_PRIO_MEM_NUMA 1400 -#define NETDATA_CHART_PRIO_MEM_HW 1500 - - -// ---------------------------------------------------------------------------- -// netdata common definitions - -#if (SIZEOF_VOID_P == 8) -#define ENVIRONMENT64 -#elif (SIZEOF_VOID_P == 4) -#define ENVIRONMENT32 -#else -#error "Cannot detect if this is a 32 or 64 bit CPU" -#endif - -#ifdef __GNUC__ -#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) -#endif // __GNUC__ - -#ifdef HAVE_FUNC_ATTRIBUTE_RETURNS_NONNULL -#define NEVERNULL __attribute__((returns_nonnull)) -#else -#define NEVERNULL -#endif - -#ifdef HAVE_FUNC_ATTRIBUTE_NOINLINE -#define NOINLINE __attribute__((noinline)) -#else -#define NOINLINE -#endif - -#ifdef HAVE_FUNC_ATTRIBUTE_MALLOC -#define MALLOCLIKE __attribute__((malloc)) -#else -#define MALLOCLIKE -#endif - -#ifdef HAVE_FUNC_ATTRIBUTE_FORMAT -#define PRINTFLIKE(f, a) __attribute__ ((format(__printf__, f, a))) -#else -#define PRINTFLIKE(f, a) -#endif - -#ifdef HAVE_FUNC_ATTRIBUTE_NORETURN -#define NORETURN __attribute__ ((noreturn)) -#else -#define NORETURN -#endif - -#ifdef HAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT -#define WARNUNUSED __attribute__ ((warn_unused_result)) -#else -#define WARNUNUSED -#endif - -#ifdef abs -#undef abs -#endif -#define abs(x) (((x) < 0)? (-(x)) : (x)) - -#define GUID_LEN 36 - -// ---------------------------------------------------------------------------- -// netdata include files - -#include "clocks.h" -#include "log.h" -#include "threads.h" -#include "locks.h" -#include "simple_pattern.h" -#include "avl.h" -#include "global_statistics.h" -#include "storage_number.h" -#include "web_buffer.h" -#include "web_buffer_svg.h" -#include "url.h" -#include "popen.h" - -#include "procfile.h" -#include "appconfig.h" -#include "dictionary.h" -#include "proc_self_mountinfo.h" -#include "plugin_checks.h" -#include "plugin_idlejitter.h" -#include "plugin_nfacct.h" - -#if defined(__FreeBSD__) -#include <pthread_np.h> -#include "plugin_freebsd.h" -#define NETDATA_OS_TYPE "freebsd" -#elif defined(__APPLE__) -#include "plugin_macos.h" -#define NETDATA_OS_TYPE "macos" -#else -#include "plugin_proc.h" -#include "plugin_proc_diskspace.h" -#define NETDATA_OS_TYPE "linux" -#endif /* __FreeBSD__, __APPLE__*/ - -typedef enum rrdcalc_status { - RRDCALC_STATUS_REMOVED = -2, - RRDCALC_STATUS_UNDEFINED = -1, - RRDCALC_STATUS_UNINITIALIZED = 0, - RRDCALC_STATUS_CLEAR = 1, - RRDCALC_STATUS_RAISED = 2, - RRDCALC_STATUS_WARNING = 3, - RRDCALC_STATUS_CRITICAL = 4 -} RRDCALC_STATUS; - -#include "eval.h" -#include "health.h" - -#include "statistical.h" -#include "socket.h" -#include "rrd.h" -#include "plugin_tc.h" -#include "plugins_d.h" -#include "statsd.h" -#include "rrd2json.h" -#include "rrd2json_api_old.h" -#include "web_client.h" -#include "web_server.h" -#include "registry.h" -#include "signals.h" -#include "daemon.h" -#include "main.h" -#include "unit_test.h" -#include "ipc.h" -#include "backends.h" -#include "backend_prometheus.h" -#include "inlined.h" -#include "adaptive_resortable_list.h" -#include "rrdpush.h" -#include "web_api_v1.h" -#include "web_api_old.h" - -extern char *netdata_configured_hostname; -extern char *netdata_configured_config_dir; -extern char *netdata_configured_log_dir; -extern char *netdata_configured_plugins_dir_base; -extern char *netdata_configured_plugins_dir; -extern char *netdata_configured_web_dir; -extern char *netdata_configured_cache_dir; -extern char *netdata_configured_varlib_dir; -extern char *netdata_configured_home_dir; -extern char *netdata_configured_host_prefix; -extern char *netdata_configured_timezone; - -extern void netdata_fix_chart_id(char *s); -extern void netdata_fix_chart_name(char *s); - -extern void strreverse(char* begin, char* end); -extern char *mystrsep(char **ptr, char *s); -extern char *trim(char *s); // remove leading and trailing spaces; may return NULL -extern char *trim_all(char *buffer); // like trim(), but also remove duplicate spaces inside the string; may return NULL - -extern int vsnprintfz(char *dst, size_t n, const char *fmt, va_list args); -extern int snprintfz(char *dst, size_t n, const char *fmt, ...) PRINTFLIKE(3, 4); - -// memory allocation functions that handle failures -#ifdef NETDATA_LOG_ALLOCATIONS -#define strdupz(s) strdupz_int(__FILE__, __FUNCTION__, __LINE__, s) -#define callocz(nmemb, size) callocz_int(__FILE__, __FUNCTION__, __LINE__, nmemb, size) -#define mallocz(size) mallocz_int(__FILE__, __FUNCTION__, __LINE__, size) -#define reallocz(ptr, size) reallocz_int(__FILE__, __FUNCTION__, __LINE__, ptr, size) -#define freez(ptr) freez_int(__FILE__, __FUNCTION__, __LINE__, ptr) - -extern char *strdupz_int(const char *file, const char *function, const unsigned long line, const char *s); -extern void *callocz_int(const char *file, const char *function, const unsigned long line, size_t nmemb, size_t size); -extern void *mallocz_int(const char *file, const char *function, const unsigned long line, size_t size); -extern void *reallocz_int(const char *file, const char *function, const unsigned long line, void *ptr, size_t size); -extern void freez_int(const char *file, const char *function, const unsigned long line, void *ptr); -#else -extern char *strdupz(const char *s) MALLOCLIKE NEVERNULL; -extern void *callocz(size_t nmemb, size_t size) MALLOCLIKE NEVERNULL; -extern void *mallocz(size_t size) MALLOCLIKE NEVERNULL; -extern void *reallocz(void *ptr, size_t size) MALLOCLIKE NEVERNULL; -extern void freez(void *ptr); -#endif - -extern void json_escape_string(char *dst, const char *src, size_t size); -extern void json_fix_string(char *s); - -extern void *mymmap(const char *filename, size_t size, int flags, int ksm); -extern int memory_file_save(const char *filename, void *mem, size_t size); - -extern int fd_is_valid(int fd); - -extern struct rlimit rlimit_nofile; - -extern int enable_ksm; - -extern int sleep_usec(usec_t usec); - -extern char *fgets_trim_len(char *buf, size_t buf_size, FILE *fp, size_t *len); - -extern int processors; -extern long get_system_cpus(void); - -extern pid_t pid_max; -extern pid_t get_system_pid_max(void); - -/* Number of ticks per second */ -extern unsigned int hz; -extern void get_system_HZ(void); - -extern int recursively_delete_dir(const char *path, const char *reason); - -extern volatile sig_atomic_t netdata_exit; -extern const char *os_type; - -extern const char *program_version; - -/* fix for alpine linux */ -#ifndef RUSAGE_THREAD -#ifdef RUSAGE_CHILDREN -#define RUSAGE_THREAD RUSAGE_CHILDREN -#endif -#endif - -#define BITS_IN_A_KILOBIT 1000 - -#endif /* NETDATA_COMMON_H */ diff --git a/src/daemon.c b/src/daemon.c deleted file mode 100644 index 471c62c6e..000000000 --- a/src/daemon.c +++ /dev/null @@ -1,396 +0,0 @@ -#include "common.h" -#include <sched.h> - -char pidfile[FILENAME_MAX + 1] = ""; - -static void chown_open_file(int fd, uid_t uid, gid_t gid) { - if(fd == -1) return; - - struct stat buf; - - if(fstat(fd, &buf) == -1) { - error("Cannot fstat() fd %d", fd); - return; - } - - if((buf.st_uid != uid || buf.st_gid != gid) && S_ISREG(buf.st_mode)) { - if(fchown(fd, uid, gid) == -1) - error("Cannot fchown() fd %d.", fd); - } -} - -void create_needed_dir(const char *dir, uid_t uid, gid_t gid) -{ - // attempt to create the directory - if(mkdir(dir, 0755) == 0) { - // we created it - - // chown it to match the required user - if(chown(dir, uid, gid) == -1) - error("Cannot chown directory '%s' to %u:%u", dir, (unsigned int)uid, (unsigned int)gid); - } - else if(errno != EEXIST) - // log an error only if the directory does not exist - error("Cannot create directory '%s'", dir); -} - -int become_user(const char *username, int pid_fd) { - int am_i_root = (getuid() == 0)?1:0; - - struct passwd *pw = getpwnam(username); - if(!pw) { - error("User %s is not present.", username); - return -1; - } - - uid_t uid = pw->pw_uid; - gid_t gid = pw->pw_gid; - - create_needed_dir(netdata_configured_cache_dir, uid, gid); - create_needed_dir(netdata_configured_varlib_dir, uid, gid); - - if(pidfile[0]) { - if(chown(pidfile, uid, gid) == -1) - error("Cannot chown '%s' to %u:%u", pidfile, (unsigned int)uid, (unsigned int)gid); - } - - int ngroups = (int)sysconf(_SC_NGROUPS_MAX); - gid_t *supplementary_groups = NULL; - if(ngroups > 0) { - supplementary_groups = mallocz(sizeof(gid_t) * ngroups); - if(getgrouplist(username, gid, supplementary_groups, &ngroups) == -1) { - if(am_i_root) - error("Cannot get supplementary groups of user '%s'.", username); - - ngroups = 0; - } - } - - chown_open_file(STDOUT_FILENO, uid, gid); - chown_open_file(STDERR_FILENO, uid, gid); - chown_open_file(stdaccess_fd, uid, gid); - chown_open_file(pid_fd, uid, gid); - - if(supplementary_groups && ngroups > 0) { - if(setgroups((size_t)ngroups, supplementary_groups) == -1) { - if(am_i_root) - error("Cannot set supplementary groups for user '%s'", username); - } - ngroups = 0; - } - - if(supplementary_groups) - freez(supplementary_groups); - -#ifdef __APPLE__ - if(setregid(gid, gid) != 0) { -#else - if(setresgid(gid, gid, gid) != 0) { -#endif /* __APPLE__ */ - error("Cannot switch to user's %s group (gid: %u).", username, gid); - return -1; - } - -#ifdef __APPLE__ - if(setreuid(uid, uid) != 0) { -#else - if(setresuid(uid, uid, uid) != 0) { -#endif /* __APPLE__ */ - error("Cannot switch to user %s (uid: %u).", username, uid); - return -1; - } - - if(setgid(gid) != 0) { - error("Cannot switch to user's %s group (gid: %u).", username, gid); - return -1; - } - if(setegid(gid) != 0) { - error("Cannot effectively switch to user's %s group (gid: %u).", username, gid); - return -1; - } - if(setuid(uid) != 0) { - error("Cannot switch to user %s (uid: %u).", username, uid); - return -1; - } - if(seteuid(uid) != 0) { - error("Cannot effectively switch to user %s (uid: %u).", username, uid); - return -1; - } - - return(0); -} - -#ifndef OOM_SCORE_ADJ_MAX -#define OOM_SCORE_ADJ_MAX (1000) -#endif -#ifndef OOM_SCORE_ADJ_MIN -#define OOM_SCORE_ADJ_MIN (-1000) -#endif - -static void oom_score_adj(void) { - char buf[30 + 1]; - long long int old_score, wanted_score = OOM_SCORE_ADJ_MAX, final_score = 0; - - // read the existing score - if(read_single_signed_number_file("/proc/self/oom_score_adj", &old_score)) { - error("Out-Of-Memory (OOM) score setting is not supported on this system."); - return; - } - - if(old_score != 0) - wanted_score = old_score; - - // check the environment - char *s = getenv("OOMScoreAdjust"); - if(!s || !*s) { - snprintfz(buf, 30, "%d", (int)wanted_score); - s = buf; - } - - // check netdata.conf configuration - s = config_get(CONFIG_SECTION_GLOBAL, "OOM score", s); - if(s && *s && (isdigit(*s) || *s == '-' || *s == '+')) - wanted_score = atoll(s); - else { - info("Out-Of-Memory (OOM) score not changed due to non-numeric setting: '%s' (running with %d)", s, (int)old_score); - return; - } - - if(wanted_score < OOM_SCORE_ADJ_MIN) { - error("Wanted Out-Of-Memory (OOM) score %d is too small. Using %d", (int)wanted_score, (int)OOM_SCORE_ADJ_MIN); - wanted_score = OOM_SCORE_ADJ_MIN; - } - - if(wanted_score > OOM_SCORE_ADJ_MAX) { - error("Wanted Out-Of-Memory (OOM) score %d is too big. Using %d", (int)wanted_score, (int)OOM_SCORE_ADJ_MAX); - wanted_score = OOM_SCORE_ADJ_MAX; - } - - if(old_score == wanted_score) { - info("Out-Of-Memory (OOM) score is already set to the wanted value %d", (int)old_score); - return; - } - - int written = 0; - int fd = open("/proc/self/oom_score_adj", O_WRONLY); - if(fd != -1) { - snprintfz(buf, 30, "%d", (int)wanted_score); - ssize_t len = strlen(buf); - if(len > 0 && write(fd, buf, (size_t)len) == len) written = 1; - close(fd); - - if(written) { - if(read_single_signed_number_file("/proc/self/oom_score_adj", &final_score)) - error("Adjusted my Out-Of-Memory (OOM) score to %d, but cannot verify it.", (int)wanted_score); - else if(final_score == wanted_score) - info("Adjusted my Out-Of-Memory (OOM) score from %d to %d.", (int)old_score, (int)final_score); - else - error("Adjusted my Out-Of-Memory (OOM) score from %d to %d, but it has been set to %d.", (int)old_score, (int)wanted_score, (int)final_score); - } - else - error("Failed to adjust my Out-Of-Memory (OOM) score to %d. Running with %d. (systemd systems may change it via netdata.service)", (int)wanted_score, (int)old_score); - } - else - error("Failed to adjust my Out-Of-Memory (OOM) score. Cannot open /proc/self/oom_score_adj for writing."); -} - -static void process_nice_level(void) { -#ifdef HAVE_NICE - int nice_level = (int)config_get_number(CONFIG_SECTION_GLOBAL, "process nice level", 19); - if(nice(nice_level) == -1) error("Cannot set netdata CPU nice level to %d.", nice_level); - else debug(D_SYSTEM, "Set netdata nice level to %d.", nice_level); -#endif // HAVE_NICE -}; - -#ifdef HAVE_SCHED_SETSCHEDULER - -#define SCHED_FLAG_NONE 0x00 -#define SCHED_FLAG_PRIORITY_CONFIGURABLE 0x01 // the priority is user configurable -#define SCHED_FLAG_KEEP_AS_IS 0x04 // do not attempt to set policy, priority or nice() -#define SCHED_FLAG_USE_NICE 0x08 // use nice() after setting this policy - -struct sched_def { - char *name; - int policy; - int priority; - uint8_t flags; -} scheduler_defaults[] = { - - // the order of array members is important! - // the first defined is the default used by netdata - - // the available members are important too! - // these are all the possible scheduling policies supported by netdata - -#ifdef SCHED_IDLE - { "idle", SCHED_IDLE, 0, SCHED_FLAG_NONE }, -#endif - -#ifdef SCHED_OTHER - { "nice", SCHED_OTHER, 0, SCHED_FLAG_USE_NICE }, - { "other", SCHED_OTHER, 0, SCHED_FLAG_USE_NICE }, -#endif - -#ifdef SCHED_RR - { "rr", SCHED_RR, 0, SCHED_FLAG_PRIORITY_CONFIGURABLE }, -#endif - -#ifdef SCHED_FIFO - { "fifo", SCHED_FIFO, 0, SCHED_FLAG_PRIORITY_CONFIGURABLE }, -#endif - -#ifdef SCHED_BATCH - { "batch", SCHED_BATCH, 0, SCHED_FLAG_USE_NICE }, -#endif - - // do not change the scheduling priority - { "keep", 0, 0, SCHED_FLAG_KEEP_AS_IS }, - { "none", 0, 0, SCHED_FLAG_KEEP_AS_IS }, - - // array termination - { NULL, 0, 0, 0 } -}; - -static void sched_setscheduler_set(void) { - - if(scheduler_defaults[0].name) { - const char *name = scheduler_defaults[0].name; - int policy = scheduler_defaults[0].policy, priority = scheduler_defaults[0].priority; - uint8_t flags = scheduler_defaults[0].flags; - int found = 0; - - // read the configuration - name = config_get(CONFIG_SECTION_GLOBAL, "process scheduling policy", name); - int i; - for(i = 0 ; scheduler_defaults[i].name ; i++) { - if(!strcmp(name, scheduler_defaults[i].name)) { - found = 1; - policy = scheduler_defaults[i].policy; - priority = scheduler_defaults[i].priority; - flags = scheduler_defaults[i].flags; - - if(flags & SCHED_FLAG_KEEP_AS_IS) - return; - - if(flags & SCHED_FLAG_PRIORITY_CONFIGURABLE) - priority = (int)config_get_number(CONFIG_SECTION_GLOBAL, "process scheduling priority", priority); - -#ifdef HAVE_SCHED_GET_PRIORITY_MIN - errno = 0; - if(priority < sched_get_priority_min(policy)) { - error("scheduler %s (%d) priority %d is below the minimum %d. Using the minimum.", name, policy, priority, sched_get_priority_min(policy)); - priority = sched_get_priority_min(policy); - } -#endif -#ifdef HAVE_SCHED_GET_PRIORITY_MAX - errno = 0; - if(priority > sched_get_priority_max(policy)) { - error("scheduler %s (%d) priority %d is above the maximum %d. Using the maximum.", name, policy, priority, sched_get_priority_max(policy)); - priority = sched_get_priority_max(policy); - } -#endif - break; - } - } - - if(!found) { - error("Unknown scheduling policy '%s' - falling back to nice", name); - goto fallback; - } - - const struct sched_param param = { - .sched_priority = priority - }; - - errno = 0; - i = sched_setscheduler(0, policy, ¶m); - if(i != 0) { - error("Cannot adjust netdata scheduling policy to %s (%d), with priority %d. Falling back to nice.", name, policy, priority); - } - else { - info("Adjusted netdata scheduling policy to %s (%d), with priority %d.", name, policy, priority); - if(!(flags & SCHED_FLAG_USE_NICE)) - return; - } - } - -fallback: - process_nice_level(); -} -#else -static void sched_setscheduler_set(void) { - process_nice_level(); -} -#endif - -int become_daemon(int dont_fork, const char *user) -{ - if(!dont_fork) { - int i = fork(); - if(i == -1) { - perror("cannot fork"); - exit(1); - } - if(i != 0) { - exit(0); // the parent - } - - // become session leader - if (setsid() < 0) { - perror("Cannot become session leader."); - exit(2); - } - - // fork() again - i = fork(); - if(i == -1) { - perror("cannot fork"); - exit(1); - } - if(i != 0) { - exit(0); // the parent - } - } - - // generate our pid file - int pidfd = -1; - if(pidfile[0]) { - pidfd = open(pidfile, O_WRONLY | O_CREAT, 0644); - if(pidfd >= 0) { - if(ftruncate(pidfd, 0) != 0) - error("Cannot truncate pidfile '%s'.", pidfile); - - char b[100]; - sprintf(b, "%d\n", getpid()); - ssize_t i = write(pidfd, b, strlen(b)); - if(i <= 0) - error("Cannot write pidfile '%s'.", pidfile); - } - else error("Failed to open pidfile '%s'.", pidfile); - } - - // Set new file permissions - umask(0007); - - // adjust my Out-Of-Memory score - oom_score_adj(); - - // never become a problem - sched_setscheduler_set(); - - if(user && *user) { - if(become_user(user, pidfd) != 0) { - error("Cannot become user '%s'. Continuing as we are.", user); - } - else debug(D_SYSTEM, "Successfully became user '%s'.", user); - } - else { - create_needed_dir(netdata_configured_cache_dir, getuid(), getgid()); - create_needed_dir(netdata_configured_varlib_dir, getuid(), getgid()); - } - - if(pidfd != -1) - close(pidfd); - - return(0); -} diff --git a/src/daemon.h b/src/daemon.h deleted file mode 100644 index 150d74e3a..000000000 --- a/src/daemon.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef NETDATA_DAEMON_H -#define NETDATA_DAEMON_H 1 - -extern int become_user(const char *username, int pid_fd); - -extern int become_daemon(int dont_fork, const char *user); - -extern void netdata_cleanup_and_exit(int i); - -extern char pidfile[]; - -#endif /* NETDATA_DAEMON_H */ diff --git a/src/dictionary.c b/src/dictionary.c deleted file mode 100644 index 512b4bbe6..000000000 --- a/src/dictionary.c +++ /dev/null @@ -1,292 +0,0 @@ -#include "common.h" - -// ---------------------------------------------------------------------------- -// dictionary statistics - -static inline void NETDATA_DICTIONARY_STATS_INSERTS_PLUS1(DICTIONARY *dict) { - if(likely(dict->stats)) - dict->stats->inserts++; -} -static inline void NETDATA_DICTIONARY_STATS_DELETES_PLUS1(DICTIONARY *dict) { - if(likely(dict->stats)) - dict->stats->deletes++; -} -static inline void NETDATA_DICTIONARY_STATS_SEARCHES_PLUS1(DICTIONARY *dict) { - if(likely(dict->stats)) - dict->stats->searches++; -} -static inline void NETDATA_DICTIONARY_STATS_ENTRIES_PLUS1(DICTIONARY *dict) { - if(likely(dict->stats)) - dict->stats->entries++; -} -static inline void NETDATA_DICTIONARY_STATS_ENTRIES_MINUS1(DICTIONARY *dict) { - if(likely(dict->stats)) - dict->stats->entries--; -} - - -// ---------------------------------------------------------------------------- -// dictionary locks - -static inline void dictionary_read_lock(DICTIONARY *dict) { - if(likely(dict->rwlock)) { - // debug(D_DICTIONARY, "Dictionary READ lock"); - netdata_rwlock_rdlock(dict->rwlock); - } -} - -static inline void dictionary_write_lock(DICTIONARY *dict) { - if(likely(dict->rwlock)) { - // debug(D_DICTIONARY, "Dictionary WRITE lock"); - netdata_rwlock_wrlock(dict->rwlock); - } -} - -static inline void dictionary_unlock(DICTIONARY *dict) { - if(likely(dict->rwlock)) { - // debug(D_DICTIONARY, "Dictionary UNLOCK lock"); - netdata_rwlock_unlock(dict->rwlock); - } -} - - -// ---------------------------------------------------------------------------- -// avl index - -static int name_value_compare(void* a, void* b) { - if(((NAME_VALUE *)a)->hash < ((NAME_VALUE *)b)->hash) return -1; - else if(((NAME_VALUE *)a)->hash > ((NAME_VALUE *)b)->hash) return 1; - else return strcmp(((NAME_VALUE *)a)->name, ((NAME_VALUE *)b)->name); -} - -static inline NAME_VALUE *dictionary_name_value_index_find_nolock(DICTIONARY *dict, const char *name, uint32_t hash) { - NAME_VALUE tmp; - tmp.hash = (hash)?hash:simple_hash(name); - tmp.name = (char *)name; - - NETDATA_DICTIONARY_STATS_SEARCHES_PLUS1(dict); - return (NAME_VALUE *)avl_search(&(dict->values_index), (avl *) &tmp); -} - -// ---------------------------------------------------------------------------- -// internal methods - -static NAME_VALUE *dictionary_name_value_create_nolock(DICTIONARY *dict, const char *name, void *value, size_t value_len, uint32_t hash) { - debug(D_DICTIONARY, "Creating name value entry for name '%s'.", name); - - NAME_VALUE *nv = callocz(1, sizeof(NAME_VALUE)); - - if(dict->flags & DICTIONARY_FLAG_NAME_LINK_DONT_CLONE) - nv->name = (char *)name; - else { - nv->name = strdupz(name); - } - - nv->hash = (hash)?hash:simple_hash(nv->name); - - if(dict->flags & DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE) - nv->value = value; - else { - nv->value = mallocz(value_len); - memcpy(nv->value, value, value_len); - } - - // index it - NETDATA_DICTIONARY_STATS_INSERTS_PLUS1(dict); - if(unlikely(avl_insert(&((dict)->values_index), (avl *)(nv)) != (avl *)nv)) - error("dictionary: INTERNAL ERROR: duplicate insertion to dictionary."); - - NETDATA_DICTIONARY_STATS_ENTRIES_PLUS1(dict); - - return nv; -} - -static void dictionary_name_value_destroy_nolock(DICTIONARY *dict, NAME_VALUE *nv) { - debug(D_DICTIONARY, "Destroying name value entry for name '%s'.", nv->name); - - NETDATA_DICTIONARY_STATS_DELETES_PLUS1(dict); - if(unlikely(avl_remove(&(dict->values_index), (avl *)(nv)) != (avl *)nv)) - error("dictionary: INTERNAL ERROR: dictionary invalid removal of node."); - - NETDATA_DICTIONARY_STATS_ENTRIES_MINUS1(dict); - - if(!(dict->flags & DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE)) { - debug(D_REGISTRY, "Dictionary freeing value of '%s'", nv->name); - freez(nv->value); - } - - if(!(dict->flags & DICTIONARY_FLAG_NAME_LINK_DONT_CLONE)) { - debug(D_REGISTRY, "Dictionary freeing name '%s'", nv->name); - freez(nv->name); - } - - freez(nv); -} - -// ---------------------------------------------------------------------------- -// API - basic methods - -DICTIONARY *dictionary_create(uint8_t flags) { - debug(D_DICTIONARY, "Creating dictionary."); - - DICTIONARY *dict = callocz(1, sizeof(DICTIONARY)); - - if(flags & DICTIONARY_FLAG_WITH_STATISTICS) - dict->stats = callocz(1, sizeof(struct dictionary_stats)); - - if(!(flags & DICTIONARY_FLAG_SINGLE_THREADED)) { - dict->rwlock = callocz(1, sizeof(netdata_rwlock_t)); - netdata_rwlock_init(dict->rwlock); - } - - avl_init(&dict->values_index, name_value_compare); - dict->flags = flags; - - return dict; -} - -void dictionary_destroy(DICTIONARY *dict) { - debug(D_DICTIONARY, "Destroying dictionary."); - - dictionary_write_lock(dict); - - while(dict->values_index.root) - dictionary_name_value_destroy_nolock(dict, (NAME_VALUE *)dict->values_index.root); - - dictionary_unlock(dict); - - if(dict->stats) - freez(dict->stats); - - if(dict->rwlock) { - netdata_rwlock_destroy(dict->rwlock); - freez(dict->rwlock); - } - - freez(dict); -} - -// ---------------------------------------------------------------------------- - -void *dictionary_set(DICTIONARY *dict, const char *name, void *value, size_t value_len) { - debug(D_DICTIONARY, "SET dictionary entry with name '%s'.", name); - - uint32_t hash = simple_hash(name); - - dictionary_write_lock(dict); - - NAME_VALUE *nv = dictionary_name_value_index_find_nolock(dict, name, hash); - if(unlikely(!nv)) { - debug(D_DICTIONARY, "Dictionary entry with name '%s' not found. Creating a new one.", name); - - nv = dictionary_name_value_create_nolock(dict, name, value, value_len, hash); - if(unlikely(!nv)) - fatal("Cannot create name_value."); - } - else { - debug(D_DICTIONARY, "Dictionary entry with name '%s' found. Changing its value.", name); - - if(dict->flags & DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE) { - debug(D_REGISTRY, "Dictionary: linking value to '%s'", name); - nv->value = value; - } - else { - debug(D_REGISTRY, "Dictionary: cloning value to '%s'", name); - - // copy the new value without breaking - // any other thread accessing the same entry - void *new = mallocz(value_len), - *old = nv->value; - - memcpy(new, value, value_len); - nv->value = new; - - debug(D_REGISTRY, "Dictionary: freeing old value of '%s'", name); - freez(old); - } - } - - dictionary_unlock(dict); - - return nv->value; -} - -void *dictionary_get(DICTIONARY *dict, const char *name) { - debug(D_DICTIONARY, "GET dictionary entry with name '%s'.", name); - - dictionary_read_lock(dict); - NAME_VALUE *nv = dictionary_name_value_index_find_nolock(dict, name, 0); - dictionary_unlock(dict); - - if(unlikely(!nv)) { - debug(D_DICTIONARY, "Not found dictionary entry with name '%s'.", name); - return NULL; - } - - debug(D_DICTIONARY, "Found dictionary entry with name '%s'.", name); - return nv->value; -} - -int dictionary_del(DICTIONARY *dict, const char *name) { - int ret; - - debug(D_DICTIONARY, "DEL dictionary entry with name '%s'.", name); - - dictionary_write_lock(dict); - - NAME_VALUE *nv = dictionary_name_value_index_find_nolock(dict, name, 0); - if(unlikely(!nv)) { - debug(D_DICTIONARY, "Not found dictionary entry with name '%s'.", name); - ret = -1; - } - else { - debug(D_DICTIONARY, "Found dictionary entry with name '%s'.", name); - dictionary_name_value_destroy_nolock(dict, nv); - ret = 0; - } - - dictionary_unlock(dict); - - return ret; -} - - -// ---------------------------------------------------------------------------- -// API - walk through the dictionary -// the dictionary is locked for reading while this happens -// do not user other dictionary calls while walking the dictionary - deadlock! - -static int dictionary_walker(avl *a, int (*callback)(void *entry, void *data), void *data) { - int total = 0, ret = 0; - - if(a->avl_link[0]) { - ret = dictionary_walker(a->avl_link[0], callback, data); - if(ret < 0) return ret; - total += ret; - } - - ret = callback(((NAME_VALUE *)a)->value, data); - if(ret < 0) return ret; - total += ret; - - if(a->avl_link[1]) { - ret = dictionary_walker(a->avl_link[1], callback, data); - if (ret < 0) return ret; - total += ret; - } - - return total; -} - -int dictionary_get_all(DICTIONARY *dict, int (*callback)(void *entry, void *data), void *data) { - int ret = 0; - - dictionary_read_lock(dict); - - if(likely(dict->values_index.root)) - ret = dictionary_walker(dict->values_index.root, callback, data); - - dictionary_unlock(dict); - - return ret; -} diff --git a/src/dictionary.h b/src/dictionary.h deleted file mode 100644 index f028dbb30..000000000 --- a/src/dictionary.h +++ /dev/null @@ -1,44 +0,0 @@ -#ifndef NETDATA_DICTIONARY_H -#define NETDATA_DICTIONARY_H 1 - -struct dictionary_stats { - unsigned long long inserts; - unsigned long long deletes; - unsigned long long searches; - unsigned long long entries; -}; - -typedef struct name_value { - avl avl; // the index - this has to be first! - - uint32_t hash; // a simple hash to speed up searching - // we first compare hashes, and only if the hashes are equal we do string comparisons - - char *name; - void *value; -} NAME_VALUE; - -typedef struct dictionary { - avl_tree values_index; - - uint8_t flags; - - struct dictionary_stats *stats; - netdata_rwlock_t *rwlock; -} DICTIONARY; - -#define DICTIONARY_FLAG_DEFAULT 0x00000000 -#define DICTIONARY_FLAG_SINGLE_THREADED 0x00000001 -#define DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE 0x00000002 -#define DICTIONARY_FLAG_NAME_LINK_DONT_CLONE 0x00000004 -#define DICTIONARY_FLAG_WITH_STATISTICS 0x00000008 - -extern DICTIONARY *dictionary_create(uint8_t flags); -extern void dictionary_destroy(DICTIONARY *dict); -extern void *dictionary_set(DICTIONARY *dict, const char *name, void *value, size_t value_len); -extern void *dictionary_get(DICTIONARY *dict, const char *name); -extern int dictionary_del(DICTIONARY *dict, const char *name); - -extern int dictionary_get_all(DICTIONARY *dict, int (*callback)(void *entry, void *d), void *data); - -#endif /* NETDATA_DICTIONARY_H */ diff --git a/src/eval.c b/src/eval.c deleted file mode 100644 index 84369f6d4..000000000 --- a/src/eval.c +++ /dev/null @@ -1,1188 +0,0 @@ -#include "common.h" - -// ---------------------------------------------------------------------------- -// data structures for storing the parsed expression in memory - -typedef struct eval_value { - int type; - - union { - calculated_number number; - EVAL_VARIABLE *variable; - struct eval_node *expression; - }; -} EVAL_VALUE; - -typedef struct eval_node { - int id; - unsigned char operator; - int precedence; - - int count; - EVAL_VALUE ops[]; -} EVAL_NODE; - -// these are used for EVAL_NODE.operator -// they are used as internal IDs to identify an operator -// THEY ARE NOT USED FOR PARSING OPERATORS LIKE THAT -#define EVAL_OPERATOR_NOP '\0' -#define EVAL_OPERATOR_EXPRESSION_OPEN '(' -#define EVAL_OPERATOR_EXPRESSION_CLOSE ')' -#define EVAL_OPERATOR_NOT '!' -#define EVAL_OPERATOR_PLUS '+' -#define EVAL_OPERATOR_MINUS '-' -#define EVAL_OPERATOR_AND '&' -#define EVAL_OPERATOR_OR '|' -#define EVAL_OPERATOR_GREATER_THAN_OR_EQUAL 'G' -#define EVAL_OPERATOR_LESS_THAN_OR_EQUAL 'L' -#define EVAL_OPERATOR_NOT_EQUAL '~' -#define EVAL_OPERATOR_EQUAL '=' -#define EVAL_OPERATOR_LESS '<' -#define EVAL_OPERATOR_GREATER '>' -#define EVAL_OPERATOR_MULTIPLY '*' -#define EVAL_OPERATOR_DIVIDE '/' -#define EVAL_OPERATOR_SIGN_PLUS 'P' -#define EVAL_OPERATOR_SIGN_MINUS 'M' -#define EVAL_OPERATOR_ABS 'A' -#define EVAL_OPERATOR_IF_THEN_ELSE '?' - -// ---------------------------------------------------------------------------- -// forward function definitions - -static inline void eval_node_free(EVAL_NODE *op); -static inline EVAL_NODE *parse_full_expression(const char **string, int *error); -static inline EVAL_NODE *parse_one_full_operand(const char **string, int *error); -static inline calculated_number eval_node(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error); -static inline void print_parsed_as_node(BUFFER *out, EVAL_NODE *op, int *error); -static inline void print_parsed_as_constant(BUFFER *out, calculated_number n); - -// ---------------------------------------------------------------------------- -// evaluation of expressions - -static inline calculated_number eval_variable(EVAL_EXPRESSION *exp, EVAL_VARIABLE *v, int *error) { - static uint32_t this_hash = 0, now_hash = 0, after_hash = 0, before_hash = 0, status_hash = 0, removed_hash = 0, uninitialized_hash = 0, undefined_hash = 0, clear_hash = 0, warning_hash = 0, critical_hash = 0; - calculated_number n; - - if(unlikely(this_hash == 0)) { - this_hash = simple_hash("this"); - now_hash = simple_hash("now"); - after_hash = simple_hash("after"); - before_hash = simple_hash("before"); - status_hash = simple_hash("status"); - removed_hash = simple_hash("REMOVED"); - uninitialized_hash = simple_hash("UNINITIALIZED"); - undefined_hash = simple_hash("UNDEFINED"); - clear_hash = simple_hash("CLEAR"); - warning_hash = simple_hash("WARNING"); - critical_hash = simple_hash("CRITICAL"); - } - - if(unlikely(v->hash == this_hash && !strcmp(v->name, "this"))) { - n = (exp->this)?*exp->this:NAN; - buffer_strcat(exp->error_msg, "[ $this = "); - print_parsed_as_constant(exp->error_msg, n); - buffer_strcat(exp->error_msg, " ] "); - return n; - } - - if(unlikely(v->hash == after_hash && !strcmp(v->name, "after"))) { - n = (exp->after && *exp->after)?*exp->after:NAN; - buffer_strcat(exp->error_msg, "[ $after = "); - print_parsed_as_constant(exp->error_msg, n); - buffer_strcat(exp->error_msg, " ] "); - return n; - } - - if(unlikely(v->hash == before_hash && !strcmp(v->name, "before"))) { - n = (exp->before && *exp->before)?*exp->before:NAN; - buffer_strcat(exp->error_msg, "[ $before = "); - print_parsed_as_constant(exp->error_msg, n); - buffer_strcat(exp->error_msg, " ] "); - return n; - } - - if(unlikely(v->hash == now_hash && !strcmp(v->name, "now"))) { - n = now_realtime_sec(); - buffer_strcat(exp->error_msg, "[ $now = "); - print_parsed_as_constant(exp->error_msg, n); - buffer_strcat(exp->error_msg, " ] "); - return n; - } - - if(unlikely(v->hash == status_hash && !strcmp(v->name, "status"))) { - n = (exp->status)?*exp->status:RRDCALC_STATUS_UNINITIALIZED; - buffer_strcat(exp->error_msg, "[ $status = "); - print_parsed_as_constant(exp->error_msg, n); - buffer_strcat(exp->error_msg, " ] "); - return n; - } - - if(unlikely(v->hash == removed_hash && !strcmp(v->name, "REMOVED"))) { - n = RRDCALC_STATUS_REMOVED; - buffer_strcat(exp->error_msg, "[ $REMOVED = "); - print_parsed_as_constant(exp->error_msg, n); - buffer_strcat(exp->error_msg, " ] "); - return n; - } - - if(unlikely(v->hash == uninitialized_hash && !strcmp(v->name, "UNINITIALIZED"))) { - n = RRDCALC_STATUS_UNINITIALIZED; - buffer_strcat(exp->error_msg, "[ $UNINITIALIZED = "); - print_parsed_as_constant(exp->error_msg, n); - buffer_strcat(exp->error_msg, " ] "); - return n; - } - - if(unlikely(v->hash == undefined_hash && !strcmp(v->name, "UNDEFINED"))) { - n = RRDCALC_STATUS_UNDEFINED; - buffer_strcat(exp->error_msg, "[ $UNDEFINED = "); - print_parsed_as_constant(exp->error_msg, n); - buffer_strcat(exp->error_msg, " ] "); - return n; - } - - if(unlikely(v->hash == clear_hash && !strcmp(v->name, "CLEAR"))) { - n = RRDCALC_STATUS_CLEAR; - buffer_strcat(exp->error_msg, "[ $CLEAR = "); - print_parsed_as_constant(exp->error_msg, n); - buffer_strcat(exp->error_msg, " ] "); - return n; - } - - if(unlikely(v->hash == warning_hash && !strcmp(v->name, "WARNING"))) { - n = RRDCALC_STATUS_WARNING; - buffer_strcat(exp->error_msg, "[ $WARNING = "); - print_parsed_as_constant(exp->error_msg, n); - buffer_strcat(exp->error_msg, " ] "); - return n; - } - - if(unlikely(v->hash == critical_hash && !strcmp(v->name, "CRITICAL"))) { - n = RRDCALC_STATUS_CRITICAL; - buffer_strcat(exp->error_msg, "[ $CRITICAL = "); - print_parsed_as_constant(exp->error_msg, n); - buffer_strcat(exp->error_msg, " ] "); - return n; - } - - if(exp->rrdcalc && health_variable_lookup(v->name, v->hash, exp->rrdcalc, &n)) { - buffer_sprintf(exp->error_msg, "[ ${%s} = ", v->name); - print_parsed_as_constant(exp->error_msg, n); - buffer_strcat(exp->error_msg, " ] "); - return n; - } - - *error = EVAL_ERROR_UNKNOWN_VARIABLE; - buffer_sprintf(exp->error_msg, "[ undefined variable '%s' ] ", v->name); - return 0; -} - -static inline calculated_number eval_value(EVAL_EXPRESSION *exp, EVAL_VALUE *v, int *error) { - calculated_number n; - - switch(v->type) { - case EVAL_VALUE_EXPRESSION: - n = eval_node(exp, v->expression, error); - break; - - case EVAL_VALUE_NUMBER: - n = v->number; - break; - - case EVAL_VALUE_VARIABLE: - n = eval_variable(exp, v->variable, error); - break; - - default: - *error = EVAL_ERROR_INVALID_VALUE; - n = 0; - break; - } - - return n; -} - -static inline int is_true(calculated_number n) { - if(isnan(n)) return 0; - if(isinf(n)) return 1; - if(n == 0) return 0; - return 1; -} - -calculated_number eval_and(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { - return is_true(eval_value(exp, &op->ops[0], error)) && is_true(eval_value(exp, &op->ops[1], error)); -} -calculated_number eval_or(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { - return is_true(eval_value(exp, &op->ops[0], error)) || is_true(eval_value(exp, &op->ops[1], error)); -} -calculated_number eval_greater_than_or_equal(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { - calculated_number n1 = eval_value(exp, &op->ops[0], error); - calculated_number n2 = eval_value(exp, &op->ops[1], error); - return isgreaterequal(n1, n2); -} -calculated_number eval_less_than_or_equal(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { - calculated_number n1 = eval_value(exp, &op->ops[0], error); - calculated_number n2 = eval_value(exp, &op->ops[1], error); - return islessequal(n1, n2); -} -calculated_number eval_equal(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { - calculated_number n1 = eval_value(exp, &op->ops[0], error); - calculated_number n2 = eval_value(exp, &op->ops[1], error); - if(isnan(n1) && isnan(n2)) return 1; - if(isinf(n1) && isinf(n2)) return 1; - if(isnan(n1) || isnan(n2)) return 0; - if(isinf(n1) || isinf(n2)) return 0; - return calculated_number_equal(n1, n2); -} -calculated_number eval_not_equal(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { - return !eval_equal(exp, op, error); -} -calculated_number eval_less(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { - calculated_number n1 = eval_value(exp, &op->ops[0], error); - calculated_number n2 = eval_value(exp, &op->ops[1], error); - return isless(n1, n2); -} -calculated_number eval_greater(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { - calculated_number n1 = eval_value(exp, &op->ops[0], error); - calculated_number n2 = eval_value(exp, &op->ops[1], error); - return isgreater(n1, n2); -} -calculated_number eval_plus(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { - calculated_number n1 = eval_value(exp, &op->ops[0], error); - calculated_number n2 = eval_value(exp, &op->ops[1], error); - if(isnan(n1) || isnan(n2)) return NAN; - if(isinf(n1) || isinf(n2)) return INFINITY; - return n1 + n2; -} -calculated_number eval_minus(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { - calculated_number n1 = eval_value(exp, &op->ops[0], error); - calculated_number n2 = eval_value(exp, &op->ops[1], error); - if(isnan(n1) || isnan(n2)) return NAN; - if(isinf(n1) || isinf(n2)) return INFINITY; - return n1 - n2; -} -calculated_number eval_multiply(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { - calculated_number n1 = eval_value(exp, &op->ops[0], error); - calculated_number n2 = eval_value(exp, &op->ops[1], error); - if(isnan(n1) || isnan(n2)) return NAN; - if(isinf(n1) || isinf(n2)) return INFINITY; - return n1 * n2; -} -calculated_number eval_divide(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { - calculated_number n1 = eval_value(exp, &op->ops[0], error); - calculated_number n2 = eval_value(exp, &op->ops[1], error); - if(isnan(n1) || isnan(n2)) return NAN; - if(isinf(n1) || isinf(n2)) return INFINITY; - return n1 / n2; -} -calculated_number eval_nop(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { - return eval_value(exp, &op->ops[0], error); -} -calculated_number eval_not(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { - return !is_true(eval_value(exp, &op->ops[0], error)); -} -calculated_number eval_sign_plus(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { - return eval_value(exp, &op->ops[0], error); -} -calculated_number eval_sign_minus(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { - calculated_number n1 = eval_value(exp, &op->ops[0], error); - if(isnan(n1)) return NAN; - if(isinf(n1)) return INFINITY; - return -n1; -} -calculated_number eval_abs(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { - calculated_number n1 = eval_value(exp, &op->ops[0], error); - if(isnan(n1)) return NAN; - if(isinf(n1)) return INFINITY; - return abs(n1); -} -calculated_number eval_if_then_else(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { - if(is_true(eval_value(exp, &op->ops[0], error))) - return eval_value(exp, &op->ops[1], error); - else - return eval_value(exp, &op->ops[2], error); -} - -static struct operator { - const char *print_as; - char precedence; - char parameters; - char isfunction; - calculated_number (*eval)(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error); -} operators[256] = { - // this is a random access array - // we always access it with a known EVAL_OPERATOR_X - - [EVAL_OPERATOR_AND] = { "&&", 2, 2, 0, eval_and }, - [EVAL_OPERATOR_OR] = { "||", 2, 2, 0, eval_or }, - [EVAL_OPERATOR_GREATER_THAN_OR_EQUAL] = { ">=", 3, 2, 0, eval_greater_than_or_equal }, - [EVAL_OPERATOR_LESS_THAN_OR_EQUAL] = { "<=", 3, 2, 0, eval_less_than_or_equal }, - [EVAL_OPERATOR_NOT_EQUAL] = { "!=", 3, 2, 0, eval_not_equal }, - [EVAL_OPERATOR_EQUAL] = { "==", 3, 2, 0, eval_equal }, - [EVAL_OPERATOR_LESS] = { "<", 3, 2, 0, eval_less }, - [EVAL_OPERATOR_GREATER] = { ">", 3, 2, 0, eval_greater }, - [EVAL_OPERATOR_PLUS] = { "+", 4, 2, 0, eval_plus }, - [EVAL_OPERATOR_MINUS] = { "-", 4, 2, 0, eval_minus }, - [EVAL_OPERATOR_MULTIPLY] = { "*", 5, 2, 0, eval_multiply }, - [EVAL_OPERATOR_DIVIDE] = { "/", 5, 2, 0, eval_divide }, - [EVAL_OPERATOR_NOT] = { "!", 6, 1, 0, eval_not }, - [EVAL_OPERATOR_SIGN_PLUS] = { "+", 6, 1, 0, eval_sign_plus }, - [EVAL_OPERATOR_SIGN_MINUS] = { "-", 6, 1, 0, eval_sign_minus }, - [EVAL_OPERATOR_ABS] = { "abs(",6,1, 1, eval_abs }, - [EVAL_OPERATOR_IF_THEN_ELSE] = { "?", 7, 3, 0, eval_if_then_else }, - [EVAL_OPERATOR_NOP] = { NULL, 8, 1, 0, eval_nop }, - [EVAL_OPERATOR_EXPRESSION_OPEN] = { NULL, 8, 1, 0, eval_nop }, - - // this should exist in our evaluation list - [EVAL_OPERATOR_EXPRESSION_CLOSE] = { NULL, 99, 1, 0, eval_nop } -}; - -#define eval_precedence(operator) (operators[(unsigned char)(operator)].precedence) - -static inline calculated_number eval_node(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) { - if(unlikely(op->count != operators[op->operator].parameters)) { - *error = EVAL_ERROR_INVALID_NUMBER_OF_OPERANDS; - return 0; - } - - calculated_number n = operators[op->operator].eval(exp, op, error); - - return n; -} - -// ---------------------------------------------------------------------------- -// parsed-as generation - -static inline void print_parsed_as_variable(BUFFER *out, EVAL_VARIABLE *v, int *error) { - (void)error; - buffer_sprintf(out, "${%s}", v->name); -} - -static inline void print_parsed_as_constant(BUFFER *out, calculated_number n) { - if(unlikely(isnan(n))) { - buffer_strcat(out, "nan"); - return; - } - - if(unlikely(isinf(n))) { - buffer_strcat(out, "inf"); - return; - } - - char b[100+1], *s; - snprintfz(b, 100, CALCULATED_NUMBER_FORMAT, n); - - s = &b[strlen(b) - 1]; - while(s > b && *s == '0') { - *s ='\0'; - s--; - } - - if(s > b && *s == '.') - *s = '\0'; - - buffer_strcat(out, b); -} - -static inline void print_parsed_as_value(BUFFER *out, EVAL_VALUE *v, int *error) { - switch(v->type) { - case EVAL_VALUE_EXPRESSION: - print_parsed_as_node(out, v->expression, error); - break; - - case EVAL_VALUE_NUMBER: - print_parsed_as_constant(out, v->number); - break; - - case EVAL_VALUE_VARIABLE: - print_parsed_as_variable(out, v->variable, error); - break; - - default: - *error = EVAL_ERROR_INVALID_VALUE; - break; - } -} - -static inline void print_parsed_as_node(BUFFER *out, EVAL_NODE *op, int *error) { - if(unlikely(op->count != operators[op->operator].parameters)) { - *error = EVAL_ERROR_INVALID_NUMBER_OF_OPERANDS; - return; - } - - if(operators[op->operator].parameters == 1) { - - if(operators[op->operator].print_as) - buffer_sprintf(out, "%s", operators[op->operator].print_as); - - //if(op->operator == EVAL_OPERATOR_EXPRESSION_OPEN) - // buffer_strcat(out, "("); - - print_parsed_as_value(out, &op->ops[0], error); - - //if(op->operator == EVAL_OPERATOR_EXPRESSION_OPEN) - // buffer_strcat(out, ")"); - } - - else if(operators[op->operator].parameters == 2) { - buffer_strcat(out, "("); - print_parsed_as_value(out, &op->ops[0], error); - - if(operators[op->operator].print_as) - buffer_sprintf(out, " %s ", operators[op->operator].print_as); - - print_parsed_as_value(out, &op->ops[1], error); - buffer_strcat(out, ")"); - } - else if(op->operator == EVAL_OPERATOR_IF_THEN_ELSE && operators[op->operator].parameters == 3) { - buffer_strcat(out, "("); - print_parsed_as_value(out, &op->ops[0], error); - - if(operators[op->operator].print_as) - buffer_sprintf(out, " %s ", operators[op->operator].print_as); - - print_parsed_as_value(out, &op->ops[1], error); - buffer_strcat(out, " : "); - print_parsed_as_value(out, &op->ops[2], error); - buffer_strcat(out, ")"); - } - - if(operators[op->operator].isfunction) - buffer_strcat(out, ")"); -} - -// ---------------------------------------------------------------------------- -// parsing expressions - -// skip spaces -static inline void skip_spaces(const char **string) { - const char *s = *string; - while(isspace(*s)) s++; - *string = s; -} - -// what character can appear just after an operator keyword -// like NOT AND OR ? -static inline int isoperatorterm_word(const char s) { - if(isspace(s) || s == '(' || s == '$' || s == '!' || s == '-' || s == '+' || isdigit(s) || !s) - return 1; - - return 0; -} - -// what character can appear just after an operator symbol? -static inline int isoperatorterm_symbol(const char s) { - if(isoperatorterm_word(s) || isalpha(s)) - return 1; - - return 0; -} - -// return 1 if the character should never appear in a variable -static inline int isvariableterm(const char s) { - if(isalnum(s) || s == '.' || s == '_') - return 0; - - return 1; -} - -// ---------------------------------------------------------------------------- -// parse operators - -static inline int parse_and(const char **string) { - const char *s = *string; - - // AND - if((s[0] == 'A' || s[0] == 'a') && (s[1] == 'N' || s[1] == 'n') && (s[2] == 'D' || s[2] == 'd') && isoperatorterm_word(s[3])) { - *string = &s[4]; - return 1; - } - - // && - if(s[0] == '&' && s[1] == '&' && isoperatorterm_symbol(s[2])) { - *string = &s[2]; - return 1; - } - - return 0; -} - -static inline int parse_or(const char **string) { - const char *s = *string; - - // OR - if((s[0] == 'O' || s[0] == 'o') && (s[1] == 'R' || s[1] == 'r') && isoperatorterm_word(s[2])) { - *string = &s[3]; - return 1; - } - - // || - if(s[0] == '|' && s[1] == '|' && isoperatorterm_symbol(s[2])) { - *string = &s[2]; - return 1; - } - - return 0; -} - -static inline int parse_greater_than_or_equal(const char **string) { - const char *s = *string; - - // >= - if(s[0] == '>' && s[1] == '=' && isoperatorterm_symbol(s[2])) { - *string = &s[2]; - return 1; - } - - return 0; -} - -static inline int parse_less_than_or_equal(const char **string) { - const char *s = *string; - - // <= - if (s[0] == '<' && s[1] == '=' && isoperatorterm_symbol(s[2])) { - *string = &s[2]; - return 1; - } - - return 0; -} - -static inline int parse_greater(const char **string) { - const char *s = *string; - - // > - if(s[0] == '>' && isoperatorterm_symbol(s[1])) { - *string = &s[1]; - return 1; - } - - return 0; -} - -static inline int parse_less(const char **string) { - const char *s = *string; - - // < - if(s[0] == '<' && isoperatorterm_symbol(s[1])) { - *string = &s[1]; - return 1; - } - - return 0; -} - -static inline int parse_equal(const char **string) { - const char *s = *string; - - // == - if(s[0] == '=' && s[1] == '=' && isoperatorterm_symbol(s[2])) { - *string = &s[2]; - return 1; - } - - // = - if(s[0] == '=' && isoperatorterm_symbol(s[1])) { - *string = &s[1]; - return 1; - } - - return 0; -} - -static inline int parse_not_equal(const char **string) { - const char *s = *string; - - // != - if(s[0] == '!' && s[1] == '=' && isoperatorterm_symbol(s[2])) { - *string = &s[2]; - return 1; - } - - // <> - if(s[0] == '<' && s[1] == '>' && isoperatorterm_symbol(s[2])) { - *string = &s[2]; - } - - return 0; -} - -static inline int parse_not(const char **string) { - const char *s = *string; - - // NOT - if((s[0] == 'N' || s[0] == 'n') && (s[1] == 'O' || s[1] == 'o') && (s[2] == 'T' || s[2] == 't') && isoperatorterm_word(s[3])) { - *string = &s[3]; - return 1; - } - - if(s[0] == '!') { - *string = &s[1]; - return 1; - } - - return 0; -} - -static inline int parse_multiply(const char **string) { - const char *s = *string; - - // * - if(s[0] == '*' && isoperatorterm_symbol(s[1])) { - *string = &s[1]; - return 1; - } - - return 0; -} - -static inline int parse_divide(const char **string) { - const char *s = *string; - - // / - if(s[0] == '/' && isoperatorterm_symbol(s[1])) { - *string = &s[1]; - return 1; - } - - return 0; -} - -static inline int parse_minus(const char **string) { - const char *s = *string; - - // - - if(s[0] == '-' && isoperatorterm_symbol(s[1])) { - *string = &s[1]; - return 1; - } - - return 0; -} - -static inline int parse_plus(const char **string) { - const char *s = *string; - - // + - if(s[0] == '+' && isoperatorterm_symbol(s[1])) { - *string = &s[1]; - return 1; - } - - return 0; -} - -static inline int parse_open_subexpression(const char **string) { - const char *s = *string; - - // ( - if(s[0] == '(') { - *string = &s[1]; - return 1; - } - - return 0; -} - -#define parse_close_function(x) parse_close_subexpression(x) - -static inline int parse_close_subexpression(const char **string) { - const char *s = *string; - - // ) - if(s[0] == ')') { - *string = &s[1]; - return 1; - } - - return 0; -} - -static inline int parse_variable(const char **string, char *buffer, size_t len) { - const char *s = *string; - - // $ - if(*s == '$') { - size_t i = 0; - s++; - - if(*s == '{') { - // ${variable_name} - - s++; - while (*s && *s != '}' && i < len) - buffer[i++] = *s++; - - if(*s == '}') - s++; - } - else { - // $variable_name - - while (*s && !isvariableterm(*s) && i < len) - buffer[i++] = *s++; - } - - buffer[i] = '\0'; - - if (buffer[0]) { - *string = s; - return 1; - } - } - - return 0; -} - -static inline int parse_constant(const char **string, calculated_number *number) { - char *end = NULL; - calculated_number n = str2ld(*string, &end); - if(unlikely(!end || *string == end)) { - *number = 0; - return 0; - } - *number = n; - *string = end; - return 1; -} - -static inline int parse_abs(const char **string) { - const char *s = *string; - - // ABS - if((s[0] == 'A' || s[0] == 'a') && (s[1] == 'B' || s[1] == 'b') && (s[2] == 'S' || s[2] == 's') && s[3] == '(') { - *string = &s[3]; - return 1; - } - - return 0; -} - -static inline int parse_if_then_else(const char **string) { - const char *s = *string; - - // ? - if(s[0] == '?') { - *string = &s[1]; - return 1; - } - - return 0; -} - -static struct operator_parser { - unsigned char id; - int (*parse)(const char **); -} operator_parsers[] = { - // the order in this list is important! - // the first matching will be used - // so place the longer of overlapping ones - // at the top - - { EVAL_OPERATOR_AND, parse_and }, - { EVAL_OPERATOR_OR, parse_or }, - { EVAL_OPERATOR_GREATER_THAN_OR_EQUAL, parse_greater_than_or_equal }, - { EVAL_OPERATOR_LESS_THAN_OR_EQUAL, parse_less_than_or_equal }, - { EVAL_OPERATOR_NOT_EQUAL, parse_not_equal }, - { EVAL_OPERATOR_EQUAL, parse_equal }, - { EVAL_OPERATOR_LESS, parse_less }, - { EVAL_OPERATOR_GREATER, parse_greater }, - { EVAL_OPERATOR_PLUS, parse_plus }, - { EVAL_OPERATOR_MINUS, parse_minus }, - { EVAL_OPERATOR_MULTIPLY, parse_multiply }, - { EVAL_OPERATOR_DIVIDE, parse_divide }, - { EVAL_OPERATOR_IF_THEN_ELSE, parse_if_then_else }, - - /* we should not put in this list the following: - * - * - NOT - * - ( - * - ) - * - * these are handled in code - */ - - // termination - { EVAL_OPERATOR_NOP, NULL } -}; - -static inline unsigned char parse_operator(const char **string, int *precedence) { - skip_spaces(string); - - int i; - for(i = 0 ; operator_parsers[i].parse != NULL ; i++) - if(operator_parsers[i].parse(string)) { - if(precedence) *precedence = eval_precedence(operator_parsers[i].id); - return operator_parsers[i].id; - } - - return EVAL_OPERATOR_NOP; -} - -// ---------------------------------------------------------------------------- -// memory management - -static inline EVAL_NODE *eval_node_alloc(int count) { - static int id = 1; - - EVAL_NODE *op = callocz(1, sizeof(EVAL_NODE) + (sizeof(EVAL_VALUE) * count)); - - op->id = id++; - op->operator = EVAL_OPERATOR_NOP; - op->precedence = eval_precedence(EVAL_OPERATOR_NOP); - op->count = count; - return op; -} - -static inline void eval_node_set_value_to_node(EVAL_NODE *op, int pos, EVAL_NODE *value) { - if(pos >= op->count) - fatal("Invalid request to set position %d of OPERAND that has only %d values", pos + 1, op->count + 1); - - op->ops[pos].type = EVAL_VALUE_EXPRESSION; - op->ops[pos].expression = value; -} - -static inline void eval_node_set_value_to_constant(EVAL_NODE *op, int pos, calculated_number value) { - if(pos >= op->count) - fatal("Invalid request to set position %d of OPERAND that has only %d values", pos + 1, op->count + 1); - - op->ops[pos].type = EVAL_VALUE_NUMBER; - op->ops[pos].number = value; -} - -static inline void eval_node_set_value_to_variable(EVAL_NODE *op, int pos, const char *variable) { - if(pos >= op->count) - fatal("Invalid request to set position %d of OPERAND that has only %d values", pos + 1, op->count + 1); - - op->ops[pos].type = EVAL_VALUE_VARIABLE; - op->ops[pos].variable = callocz(1, sizeof(EVAL_VARIABLE)); - op->ops[pos].variable->name = strdupz(variable); - op->ops[pos].variable->hash = simple_hash(op->ops[pos].variable->name); -} - -static inline void eval_variable_free(EVAL_VARIABLE *v) { - freez(v->name); - freez(v); -} - -static inline void eval_value_free(EVAL_VALUE *v) { - switch(v->type) { - case EVAL_VALUE_EXPRESSION: - eval_node_free(v->expression); - break; - - case EVAL_VALUE_VARIABLE: - eval_variable_free(v->variable); - break; - - default: - break; - } -} - -static inline void eval_node_free(EVAL_NODE *op) { - if(op->count) { - int i; - for(i = op->count - 1; i >= 0 ;i--) - eval_value_free(&op->ops[i]); - } - - freez(op); -} - -// ---------------------------------------------------------------------------- -// the parsing logic - -// helper function to avoid allocations all over the place -static inline EVAL_NODE *parse_next_operand_given_its_operator(const char **string, unsigned char operator_type, int *error) { - EVAL_NODE *sub = parse_one_full_operand(string, error); - if(!sub) return NULL; - - EVAL_NODE *op = eval_node_alloc(1); - op->operator = operator_type; - eval_node_set_value_to_node(op, 0, sub); - return op; -} - -// parse a full operand, including its sign or other associative operator (e.g. NOT) -static inline EVAL_NODE *parse_one_full_operand(const char **string, int *error) { - char variable_buffer[EVAL_MAX_VARIABLE_NAME_LENGTH + 1]; - EVAL_NODE *op1 = NULL; - calculated_number number; - - *error = EVAL_ERROR_OK; - - skip_spaces(string); - if(!(**string)) { - *error = EVAL_ERROR_MISSING_OPERAND; - return NULL; - } - - if(parse_not(string)) { - op1 = parse_next_operand_given_its_operator(string, EVAL_OPERATOR_NOT, error); - op1->precedence = eval_precedence(EVAL_OPERATOR_NOT); - } - else if(parse_plus(string)) { - op1 = parse_next_operand_given_its_operator(string, EVAL_OPERATOR_SIGN_PLUS, error); - op1->precedence = eval_precedence(EVAL_OPERATOR_SIGN_PLUS); - } - else if(parse_minus(string)) { - op1 = parse_next_operand_given_its_operator(string, EVAL_OPERATOR_SIGN_MINUS, error); - op1->precedence = eval_precedence(EVAL_OPERATOR_SIGN_MINUS); - } - else if(parse_abs(string)) { - op1 = parse_next_operand_given_its_operator(string, EVAL_OPERATOR_ABS, error); - op1->precedence = eval_precedence(EVAL_OPERATOR_ABS); - } - else if(parse_open_subexpression(string)) { - EVAL_NODE *sub = parse_full_expression(string, error); - if(sub) { - op1 = eval_node_alloc(1); - op1->operator = EVAL_OPERATOR_EXPRESSION_OPEN; - op1->precedence = eval_precedence(EVAL_OPERATOR_EXPRESSION_OPEN); - eval_node_set_value_to_node(op1, 0, sub); - if(!parse_close_subexpression(string)) { - *error = EVAL_ERROR_MISSING_CLOSE_SUBEXPRESSION; - eval_node_free(op1); - return NULL; - } - } - } - else if(parse_variable(string, variable_buffer, EVAL_MAX_VARIABLE_NAME_LENGTH)) { - op1 = eval_node_alloc(1); - op1->operator = EVAL_OPERATOR_NOP; - eval_node_set_value_to_variable(op1, 0, variable_buffer); - } - else if(parse_constant(string, &number)) { - op1 = eval_node_alloc(1); - op1->operator = EVAL_OPERATOR_NOP; - eval_node_set_value_to_constant(op1, 0, number); - } - else if(**string) - *error = EVAL_ERROR_UNKNOWN_OPERAND; - else - *error = EVAL_ERROR_MISSING_OPERAND; - - return op1; -} - -// parse an operator and the rest of the expression -// precedence processing is handled here -static inline EVAL_NODE *parse_rest_of_expression(const char **string, int *error, EVAL_NODE *op1) { - EVAL_NODE *op2 = NULL; - unsigned char operator; - int precedence; - - operator = parse_operator(string, &precedence); - skip_spaces(string); - - if(operator != EVAL_OPERATOR_NOP) { - op2 = parse_one_full_operand(string, error); - if(!op2) { - // error is already reported - eval_node_free(op1); - return NULL; - } - - EVAL_NODE *op = eval_node_alloc(operators[operator].parameters); - op->operator = operator; - op->precedence = precedence; - - if(operator == EVAL_OPERATOR_IF_THEN_ELSE && op->count == 3) { - skip_spaces(string); - - if(**string != ':') { - eval_node_free(op); - eval_node_free(op1); - eval_node_free(op2); - *error = EVAL_ERROR_IF_THEN_ELSE_MISSING_ELSE; - return NULL; - } - (*string)++; - - skip_spaces(string); - - EVAL_NODE *op3 = parse_one_full_operand(string, error); - if(!op3) { - eval_node_free(op); - eval_node_free(op1); - eval_node_free(op2); - // error is already reported - return NULL; - } - - eval_node_set_value_to_node(op, 2, op3); - } - - eval_node_set_value_to_node(op, 1, op2); - - // precedence processing - // if this operator has a higher precedence compared to its next - // put the next operator on top of us (top = evaluated later) - // function recursion does the rest... - if(op->precedence > op1->precedence && op1->count == 2 && op1->operator != '(' && op1->ops[1].type == EVAL_VALUE_EXPRESSION) { - eval_node_set_value_to_node(op, 0, op1->ops[1].expression); - op1->ops[1].expression = op; - op = op1; - } - else - eval_node_set_value_to_node(op, 0, op1); - - return parse_rest_of_expression(string, error, op); - } - else if(**string == ')') { - ; - } - else if(**string) { - eval_node_free(op1); - op1 = NULL; - *error = EVAL_ERROR_MISSING_OPERATOR; - } - - return op1; -} - -// high level function to parse an expression or a sub-expression -static inline EVAL_NODE *parse_full_expression(const char **string, int *error) { - EVAL_NODE *op1 = parse_one_full_operand(string, error); - if(!op1) { - *error = EVAL_ERROR_MISSING_OPERAND; - return NULL; - } - - return parse_rest_of_expression(string, error, op1); -} - -// ---------------------------------------------------------------------------- -// public API - -int expression_evaluate(EVAL_EXPRESSION *exp) { - exp->error = EVAL_ERROR_OK; - - buffer_reset(exp->error_msg); - exp->result = eval_node(exp, (EVAL_NODE *)exp->nodes, &exp->error); - - if(unlikely(isnan(exp->result))) { - if(exp->error == EVAL_ERROR_OK) - exp->error = EVAL_ERROR_VALUE_IS_NAN; - } - else if(unlikely(isinf(exp->result))) { - if(exp->error == EVAL_ERROR_OK) - exp->error = EVAL_ERROR_VALUE_IS_INFINITE; - } - else if(unlikely(exp->error == EVAL_ERROR_UNKNOWN_VARIABLE)) { - // although there is an unknown variable - // the expression was evaluated successfully - exp->error = EVAL_ERROR_OK; - } - - if(exp->error != EVAL_ERROR_OK) { - exp->result = NAN; - - if(buffer_strlen(exp->error_msg)) - buffer_strcat(exp->error_msg, "; "); - - buffer_sprintf(exp->error_msg, "failed to evaluate expression with error %d (%s)", exp->error, expression_strerror(exp->error)); - return 0; - } - - return 1; -} - -EVAL_EXPRESSION *expression_parse(const char *string, const char **failed_at, int *error) { - const char *s = string; - int err = EVAL_ERROR_OK; - - EVAL_NODE *op = parse_full_expression(&s, &err); - - if(*s) { - if(op) { - eval_node_free(op); - op = NULL; - } - err = EVAL_ERROR_REMAINING_GARBAGE; - } - - if (failed_at) *failed_at = s; - if (error) *error = err; - - if(!op) { - unsigned long pos = s - string + 1; - error("failed to parse expression '%s': %s at character %lu (i.e.: '%s').", string, expression_strerror(err), pos, s); - return NULL; - } - - BUFFER *out = buffer_create(1024); - print_parsed_as_node(out, op, &err); - if(err != EVAL_ERROR_OK) { - error("failed to re-generate expression '%s' with reason: %s", string, expression_strerror(err)); - eval_node_free(op); - buffer_free(out); - return NULL; - } - - EVAL_EXPRESSION *exp = callocz(1, sizeof(EVAL_EXPRESSION)); - - exp->source = strdupz(string); - exp->parsed_as = strdupz(buffer_tostring(out)); - buffer_free(out); - - exp->error_msg = buffer_create(100); - exp->nodes = (void *)op; - - return exp; -} - -void expression_free(EVAL_EXPRESSION *exp) { - if(!exp) return; - - if(exp->nodes) eval_node_free((EVAL_NODE *)exp->nodes); - freez((void *)exp->source); - freez((void *)exp->parsed_as); - buffer_free(exp->error_msg); - freez(exp); -} - -const char *expression_strerror(int error) { - switch(error) { - case EVAL_ERROR_OK: - return "success"; - - case EVAL_ERROR_MISSING_CLOSE_SUBEXPRESSION: - return "missing closing parenthesis"; - - case EVAL_ERROR_UNKNOWN_OPERAND: - return "unknown operand"; - - case EVAL_ERROR_MISSING_OPERAND: - return "expected operand"; - - case EVAL_ERROR_MISSING_OPERATOR: - return "expected operator"; - - case EVAL_ERROR_REMAINING_GARBAGE: - return "remaining characters after expression"; - - case EVAL_ERROR_INVALID_VALUE: - return "invalid value structure - internal error"; - - case EVAL_ERROR_INVALID_NUMBER_OF_OPERANDS: - return "wrong number of operands for operation - internal error"; - - case EVAL_ERROR_VALUE_IS_NAN: - return "value is unset"; - - case EVAL_ERROR_VALUE_IS_INFINITE: - return "computed value is infinite"; - - case EVAL_ERROR_UNKNOWN_VARIABLE: - return "undefined variable"; - - case EVAL_ERROR_IF_THEN_ELSE_MISSING_ELSE: - return "missing second sub-expression of inline conditional"; - - default: - return "unknown error"; - } -} diff --git a/src/eval.h b/src/eval.h deleted file mode 100644 index 6a5562fd4..000000000 --- a/src/eval.h +++ /dev/null @@ -1,72 +0,0 @@ -#ifndef NETDATA_EVAL_H -#define NETDATA_EVAL_H - -#define EVAL_MAX_VARIABLE_NAME_LENGTH 300 - -typedef struct eval_variable { - char *name; - uint32_t hash; - struct eval_variable *next; -} EVAL_VARIABLE; - -typedef struct eval_expression { - const char *source; - const char *parsed_as; - - RRDCALC_STATUS *status; - calculated_number *this; - time_t *after; - time_t *before; - - calculated_number result; - - int error; - BUFFER *error_msg; - - // hidden EVAL_NODE * - void *nodes; - - // custom data to be used for looking up variables - struct rrdcalc *rrdcalc; -} EVAL_EXPRESSION; - -#define EVAL_VALUE_INVALID 0 -#define EVAL_VALUE_NUMBER 1 -#define EVAL_VALUE_VARIABLE 2 -#define EVAL_VALUE_EXPRESSION 3 - -// parsing and evaluation -#define EVAL_ERROR_OK 0 - -// parsing errors -#define EVAL_ERROR_MISSING_CLOSE_SUBEXPRESSION 1 -#define EVAL_ERROR_UNKNOWN_OPERAND 2 -#define EVAL_ERROR_MISSING_OPERAND 3 -#define EVAL_ERROR_MISSING_OPERATOR 4 -#define EVAL_ERROR_REMAINING_GARBAGE 5 -#define EVAL_ERROR_IF_THEN_ELSE_MISSING_ELSE 6 - -// evaluation errors -#define EVAL_ERROR_INVALID_VALUE 101 -#define EVAL_ERROR_INVALID_NUMBER_OF_OPERANDS 102 -#define EVAL_ERROR_VALUE_IS_NAN 103 -#define EVAL_ERROR_VALUE_IS_INFINITE 104 -#define EVAL_ERROR_UNKNOWN_VARIABLE 105 - -// parse the given string as an expression and return: -// a pointer to an expression if it parsed OK -// NULL in which case the pointer to error has the error code -extern EVAL_EXPRESSION *expression_parse(const char *string, const char **failed_at, int *error); - -// free all resources allocated for an expression -extern void expression_free(EVAL_EXPRESSION *op); - -// convert an error code to a message -extern const char *expression_strerror(int error); - -// evaluate an expression and return -// 1 = OK, the result is in: expression->result -// 2 = FAILED, the error message is in: buffer_tostring(expression->error_msg) -extern int expression_evaluate(EVAL_EXPRESSION *expression); - -#endif //NETDATA_EVAL_H diff --git a/src/freebsd_devstat.c b/src/freebsd_devstat.c deleted file mode 100644 index ed7466ead..000000000 --- a/src/freebsd_devstat.c +++ /dev/null @@ -1,778 +0,0 @@ -#include "common.h" - -#include <sys/devicestat.h> - -struct disk { - char *name; - uint32_t hash; - size_t len; - - // flags - int configured; - int enabled; - int updated; - - int do_io; - int do_ops; - int do_qops; - int do_util; - int do_iotime; - int do_await; - int do_avagsz; - int do_svctm; - - - // data for differential charts - - struct prev_dstat { - collected_number bytes_read; - collected_number bytes_write; - collected_number bytes_free; - collected_number operations_read; - collected_number operations_write; - collected_number operations_other; - collected_number operations_free; - collected_number duration_read_ms; - collected_number duration_write_ms; - collected_number duration_other_ms; - collected_number duration_free_ms; - collected_number busy_time_ms; - } prev_dstat; - - // charts and dimensions - - RRDSET *st_io; - RRDDIM *rd_io_in; - RRDDIM *rd_io_out; - RRDDIM *rd_io_free; - - RRDSET *st_ops; - RRDDIM *rd_ops_in; - RRDDIM *rd_ops_out; - RRDDIM *rd_ops_other; - RRDDIM *rd_ops_free; - - RRDSET *st_qops; - RRDDIM *rd_qops; - - RRDSET *st_util; - RRDDIM *rd_util; - - RRDSET *st_iotime; - RRDDIM *rd_iotime_in; - RRDDIM *rd_iotime_out; - RRDDIM *rd_iotime_other; - RRDDIM *rd_iotime_free; - - RRDSET *st_await; - RRDDIM *rd_await_in; - RRDDIM *rd_await_out; - RRDDIM *rd_await_other; - RRDDIM *rd_await_free; - - RRDSET *st_avagsz; - RRDDIM *rd_avagsz_in; - RRDDIM *rd_avagsz_out; - RRDDIM *rd_avagsz_free; - - RRDSET *st_svctm; - RRDDIM *rd_svctm; - - struct disk *next; -}; - -static struct disk *disks_root = NULL, *disks_last_used = NULL; - -static size_t disks_added = 0, disks_found = 0; - -static void disk_free(struct disk *dm) { - if (likely(dm->st_io)) - rrdset_is_obsolete(dm->st_io); - if (likely(dm->st_ops)) - rrdset_is_obsolete(dm->st_ops); - if (likely(dm->st_qops)) - rrdset_is_obsolete(dm->st_qops); - if (likely(dm->st_util)) - rrdset_is_obsolete(dm->st_util); - if (likely(dm->st_iotime)) - rrdset_is_obsolete(dm->st_iotime); - if (likely(dm->st_await)) - rrdset_is_obsolete(dm->st_await); - if (likely(dm->st_avagsz)) - rrdset_is_obsolete(dm->st_avagsz); - if (likely(dm->st_svctm)) - rrdset_is_obsolete(dm->st_svctm); - - disks_added--; - freez(dm->name); - freez(dm); -} - -static void disks_cleanup() { - if (likely(disks_found == disks_added)) return; - - struct disk *dm = disks_root, *last = NULL; - while(dm) { - if (unlikely(!dm->updated)) { - // info("Removing disk '%s', linked after '%s'", dm->name, last?last->name:"ROOT"); - - if (disks_last_used == dm) - disks_last_used = last; - - struct disk *t = dm; - - if (dm == disks_root || !last) - disks_root = dm = dm->next; - - else - last->next = dm = dm->next; - - t->next = NULL; - disk_free(t); - } - else { - last = dm; - dm->updated = 0; - dm = dm->next; - } - } -} - -static struct disk *get_disk(const char *name) { - struct disk *dm; - - uint32_t hash = simple_hash(name); - - // search it, from the last position to the end - for(dm = disks_last_used ; dm ; dm = dm->next) { - if (unlikely(hash == dm->hash && !strcmp(name, dm->name))) { - disks_last_used = dm->next; - return dm; - } - } - - // search it from the beginning to the last position we used - for(dm = disks_root ; dm != disks_last_used ; dm = dm->next) { - if (unlikely(hash == dm->hash && !strcmp(name, dm->name))) { - disks_last_used = dm->next; - return dm; - } - } - - // create a new one - dm = callocz(1, sizeof(struct disk)); - dm->name = strdupz(name); - dm->hash = simple_hash(dm->name); - dm->len = strlen(dm->name); - disks_added++; - - // link it to the end - if (disks_root) { - struct disk *e; - for(e = disks_root; e->next ; e = e->next) ; - e->next = dm; - } - else - disks_root = dm; - - return dm; -} - -// -------------------------------------------------------------------------------------------------------------------- -// kern.devstat - -int do_kern_devstat(int update_every, usec_t dt) { - -#define DELAULT_EXLUDED_DISKS "" -#define CONFIG_SECTION_KERN_DEVSTAT "plugin:freebsd:kern.devstat" -#define BINTIME_SCALE 5.42101086242752217003726400434970855712890625e-17 // this is 1000/2^64 - - static int enable_new_disks = -1; - static int enable_pass_devices = -1, do_system_io = -1, do_io = -1, do_ops = -1, do_qops = -1, do_util = -1, - do_iotime = -1, do_await = -1, do_avagsz = -1, do_svctm = -1; - static SIMPLE_PATTERN *excluded_disks = NULL; - - if (unlikely(enable_new_disks == -1)) { - enable_new_disks = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, - "enable new disks detected at runtime", CONFIG_BOOLEAN_AUTO); - - enable_pass_devices = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, - "performance metrics for pass devices", CONFIG_BOOLEAN_AUTO); - - do_system_io = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "total bandwidth for all disks", - CONFIG_BOOLEAN_YES); - - do_io = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "bandwidth for all disks", - CONFIG_BOOLEAN_AUTO); - do_ops = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "operations for all disks", - CONFIG_BOOLEAN_AUTO); - do_qops = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "queued operations for all disks", - CONFIG_BOOLEAN_AUTO); - do_util = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "utilization percentage for all disks", - CONFIG_BOOLEAN_AUTO); - do_iotime = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "i/o time for all disks", - CONFIG_BOOLEAN_AUTO); - do_await = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "average completed i/o time for all disks", - CONFIG_BOOLEAN_AUTO); - do_avagsz = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "average completed i/o bandwidth for all disks", - CONFIG_BOOLEAN_AUTO); - do_svctm = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "average service time for all disks", - CONFIG_BOOLEAN_AUTO); - - excluded_disks = simple_pattern_create( - config_get(CONFIG_SECTION_KERN_DEVSTAT, "disable by default disks matching", DELAULT_EXLUDED_DISKS) - , NULL - , SIMPLE_PATTERN_EXACT - ); - } - - if (likely(do_system_io || do_io || do_ops || do_qops || do_util || do_iotime || do_await || do_avagsz || do_svctm)) { - static int mib_numdevs[3] = {0, 0, 0}; - int numdevs; - int common_error = 0; - - if (unlikely(GETSYSCTL_SIMPLE("kern.devstat.numdevs", mib_numdevs, numdevs))) { - common_error = 1; - } else { - static int mib_devstat[3] = {0, 0, 0}; - static void *devstat_data = NULL; - static int old_numdevs = 0; - - if (unlikely(numdevs != old_numdevs)) { - devstat_data = reallocz(devstat_data, sizeof(long) + sizeof(struct devstat) * - numdevs); // there is generation number before devstat structures - old_numdevs = numdevs; - } - if (unlikely(GETSYSCTL_WSIZE("kern.devstat.all", mib_devstat, devstat_data, - sizeof(long) + sizeof(struct devstat) * numdevs))) { - common_error = 1; - } else { - struct devstat *dstat; - int i; - collected_number total_disk_kbytes_read = 0; - collected_number total_disk_kbytes_write = 0; - - disks_found = 0; - - dstat = devstat_data + sizeof(long); // skip generation number - - for (i = 0; i < numdevs; i++) { - if (likely(do_system_io)) { - if (((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_DIRECT) || - ((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_STORARRAY)) { - total_disk_kbytes_read += dstat[i].bytes[DEVSTAT_READ] / KILO_FACTOR; - total_disk_kbytes_write += dstat[i].bytes[DEVSTAT_WRITE] / KILO_FACTOR; - } - } - - if (unlikely(!enable_pass_devices)) - if ((dstat[i].device_type & DEVSTAT_TYPE_PASS) == DEVSTAT_TYPE_PASS) - continue; - - if (((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_DIRECT) || - ((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_STORARRAY)) { - char disk[DEVSTAT_NAME_LEN + MAX_INT_DIGITS + 1]; - struct cur_dstat { - collected_number duration_read_ms; - collected_number duration_write_ms; - collected_number duration_other_ms; - collected_number duration_free_ms; - collected_number busy_time_ms; - } cur_dstat; - - sprintf(disk, "%s%d", dstat[i].device_name, dstat[i].unit_number); - - struct disk *dm = get_disk(disk); - dm->updated = 1; - disks_found++; - - if(unlikely(!dm->configured)) { - char var_name[4096 + 1]; - - // this is the first time we see this disk - - // remember we configured it - dm->configured = 1; - - dm->enabled = enable_new_disks; - - if (likely(dm->enabled)) - dm->enabled = !simple_pattern_matches(excluded_disks, disk); - - snprintfz(var_name, 4096, "%s:%s", CONFIG_SECTION_KERN_DEVSTAT, disk); - dm->enabled = config_get_boolean_ondemand(var_name, "enabled", dm->enabled); - - dm->do_io = config_get_boolean_ondemand(var_name, "bandwidth", do_io); - dm->do_ops = config_get_boolean_ondemand(var_name, "operations", do_ops); - dm->do_qops = config_get_boolean_ondemand(var_name, "queued operations", do_qops); - dm->do_util = config_get_boolean_ondemand(var_name, "utilization percentage", do_util); - dm->do_iotime = config_get_boolean_ondemand(var_name, "i/o time", do_iotime); - dm->do_await = config_get_boolean_ondemand(var_name, "average completed i/o time", - do_await); - dm->do_avagsz = config_get_boolean_ondemand(var_name, "average completed i/o bandwidth", - do_avagsz); - dm->do_svctm = config_get_boolean_ondemand(var_name, "average service time", do_svctm); - - // initialise data for differential charts - - dm->prev_dstat.bytes_read = dstat[i].bytes[DEVSTAT_READ]; - dm->prev_dstat.bytes_write = dstat[i].bytes[DEVSTAT_WRITE]; - dm->prev_dstat.bytes_free = dstat[i].bytes[DEVSTAT_FREE]; - dm->prev_dstat.operations_read = dstat[i].operations[DEVSTAT_READ]; - dm->prev_dstat.operations_write = dstat[i].operations[DEVSTAT_WRITE]; - dm->prev_dstat.operations_other = dstat[i].operations[DEVSTAT_NO_DATA]; - dm->prev_dstat.operations_free = dstat[i].operations[DEVSTAT_FREE]; - dm->prev_dstat.duration_read_ms = dstat[i].duration[DEVSTAT_READ].sec * 1000 - + dstat[i].duration[DEVSTAT_READ].frac * BINTIME_SCALE; - dm->prev_dstat.duration_write_ms = dstat[i].duration[DEVSTAT_WRITE].sec * 1000 - + dstat[i].duration[DEVSTAT_WRITE].frac * BINTIME_SCALE; - dm->prev_dstat.duration_other_ms = dstat[i].duration[DEVSTAT_NO_DATA].sec * 1000 - + dstat[i].duration[DEVSTAT_NO_DATA].frac * BINTIME_SCALE; - dm->prev_dstat.duration_free_ms = dstat[i].duration[DEVSTAT_FREE].sec * 1000 - + dstat[i].duration[DEVSTAT_FREE].frac * BINTIME_SCALE; - dm->prev_dstat.busy_time_ms = dstat[i].busy_time.sec * 1000 - + dstat[i].busy_time.frac * BINTIME_SCALE; - } - - cur_dstat.duration_read_ms = dstat[i].duration[DEVSTAT_READ].sec * 1000 - + dstat[i].duration[DEVSTAT_READ].frac * BINTIME_SCALE; - cur_dstat.duration_write_ms = dstat[i].duration[DEVSTAT_WRITE].sec * 1000 - + dstat[i].duration[DEVSTAT_WRITE].frac * BINTIME_SCALE; - cur_dstat.duration_other_ms = dstat[i].duration[DEVSTAT_NO_DATA].sec * 1000 - + dstat[i].duration[DEVSTAT_NO_DATA].frac * BINTIME_SCALE; - cur_dstat.duration_free_ms = dstat[i].duration[DEVSTAT_FREE].sec * 1000 - + dstat[i].duration[DEVSTAT_FREE].frac * BINTIME_SCALE; - - cur_dstat.busy_time_ms = dstat[i].busy_time.sec * 1000 + dstat[i].busy_time.frac * BINTIME_SCALE; - - // -------------------------------------------------------------------- - - if(dm->do_io == CONFIG_BOOLEAN_YES || (dm->do_io == CONFIG_BOOLEAN_AUTO && - (dstat[i].bytes[DEVSTAT_READ] || - dstat[i].bytes[DEVSTAT_WRITE] || - dstat[i].bytes[DEVSTAT_FREE]))) { - if (unlikely(!dm->st_io)) { - dm->st_io = rrdset_create_localhost("disk", - disk, - NULL, - disk, - "disk.io", - "Disk I/O Bandwidth", - "kilobytes/s", - "freebsd", - "devstat", - 2000, - update_every, - RRDSET_TYPE_AREA - ); - - dm->rd_io_in = rrddim_add(dm->st_io, "reads", NULL, 1, KILO_FACTOR, - RRD_ALGORITHM_INCREMENTAL); - dm->rd_io_out = rrddim_add(dm->st_io, "writes", NULL, -1, KILO_FACTOR, - RRD_ALGORITHM_INCREMENTAL); - dm->rd_io_free = rrddim_add(dm->st_io, "frees", NULL, -1, KILO_FACTOR, - RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(dm->st_io); - - rrddim_set_by_pointer(dm->st_io, dm->rd_io_in, dstat[i].bytes[DEVSTAT_READ]); - rrddim_set_by_pointer(dm->st_io, dm->rd_io_out, dstat[i].bytes[DEVSTAT_WRITE]); - rrddim_set_by_pointer(dm->st_io, dm->rd_io_free, dstat[i].bytes[DEVSTAT_FREE]); - rrdset_done(dm->st_io); - } - - // -------------------------------------------------------------------- - - if(dm->do_ops == CONFIG_BOOLEAN_YES || (dm->do_ops == CONFIG_BOOLEAN_AUTO && - (dstat[i].operations[DEVSTAT_READ] || - dstat[i].operations[DEVSTAT_WRITE] || - dstat[i].operations[DEVSTAT_NO_DATA] || - dstat[i].operations[DEVSTAT_FREE]))) { - if (unlikely(!dm->st_ops)) { - dm->st_ops = rrdset_create_localhost("disk_ops", - disk, - NULL, - disk, - "disk.ops", - "Disk Completed I/O Operations", - "operations/s", - "freebsd", - "devstat", - 2001, - update_every, - RRDSET_TYPE_LINE - ); - - rrdset_flag_set(dm->st_ops, RRDSET_FLAG_DETAIL); - - dm->rd_ops_in = rrddim_add(dm->st_ops, "reads", NULL, 1, 1, - RRD_ALGORITHM_INCREMENTAL); - dm->rd_ops_out = rrddim_add(dm->st_ops, "writes", NULL, -1, 1, - RRD_ALGORITHM_INCREMENTAL); - dm->rd_ops_other = rrddim_add(dm->st_ops, "other", NULL, 1, 1, - RRD_ALGORITHM_INCREMENTAL); - dm->rd_ops_free = rrddim_add(dm->st_ops, "frees", NULL, -1, 1, - RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(dm->st_ops); - - rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_in, dstat[i].operations[DEVSTAT_READ]); - rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_out, dstat[i].operations[DEVSTAT_WRITE]); - rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_other, dstat[i].operations[DEVSTAT_NO_DATA]); - rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_free, dstat[i].operations[DEVSTAT_FREE]); - rrdset_done(dm->st_ops); - } - - // -------------------------------------------------------------------- - - if(dm->do_qops == CONFIG_BOOLEAN_YES || (dm->do_qops == CONFIG_BOOLEAN_AUTO && - (dstat[i].start_count || dstat[i].end_count))) { - if (unlikely(!dm->st_qops)) { - dm->st_qops = rrdset_create_localhost("disk_qops", - disk, - NULL, - disk, - "disk.qops", - "Disk Current I/O Operations", - "operations", - "freebsd", - "devstat", - 2002, - update_every, - RRDSET_TYPE_LINE - ); - - rrdset_flag_set(dm->st_qops, RRDSET_FLAG_DETAIL); - - dm->rd_qops = rrddim_add(dm->st_qops, "operations", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } else - rrdset_next(dm->st_qops); - - rrddim_set_by_pointer(dm->st_qops, dm->rd_qops, dstat[i].start_count - dstat[i].end_count); - rrdset_done(dm->st_qops); - } - - // -------------------------------------------------------------------- - - if(dm->do_util == CONFIG_BOOLEAN_YES || (dm->do_util == CONFIG_BOOLEAN_AUTO && - cur_dstat.busy_time_ms)) { - if (unlikely(!dm->st_util)) { - dm->st_util = rrdset_create_localhost("disk_util", - disk, - NULL, - disk, - "disk.util", - "Disk Utilization Time", - "% of time working", - "freebsd", - "devstat", - 2004, - update_every, - RRDSET_TYPE_AREA - ); - - rrdset_flag_set(dm->st_util, RRDSET_FLAG_DETAIL); - - dm->rd_util = rrddim_add(dm->st_util, "utilization", NULL, 1, 10, - RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(dm->st_util); - - rrddim_set_by_pointer(dm->st_util, dm->rd_util, cur_dstat.busy_time_ms); - rrdset_done(dm->st_util); - } - - // -------------------------------------------------------------------- - - if(dm->do_iotime == CONFIG_BOOLEAN_YES || (dm->do_iotime == CONFIG_BOOLEAN_AUTO && - (cur_dstat.duration_read_ms || - cur_dstat.duration_write_ms || - cur_dstat.duration_other_ms || - cur_dstat.duration_free_ms))) { - if (unlikely(!dm->st_iotime)) { - dm->st_iotime = rrdset_create_localhost("disk_iotime", - disk, - NULL, - disk, - "disk.iotime", - "Disk Total I/O Time", - "milliseconds/s", - "freebsd", - "devstat", - 2022, - update_every, - RRDSET_TYPE_LINE - ); - - rrdset_flag_set(dm->st_iotime, RRDSET_FLAG_DETAIL); - - dm->rd_iotime_in = rrddim_add(dm->st_iotime, "reads", NULL, 1, 1, - RRD_ALGORITHM_INCREMENTAL); - dm->rd_iotime_out = rrddim_add(dm->st_iotime, "writes", NULL, -1, 1, - RRD_ALGORITHM_INCREMENTAL); - dm->rd_iotime_other = rrddim_add(dm->st_iotime, "other", NULL, 1, 1, - RRD_ALGORITHM_INCREMENTAL); - dm->rd_iotime_free = rrddim_add(dm->st_iotime, "frees", NULL, -1, 1, - RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(dm->st_iotime); - - rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_in, cur_dstat.duration_read_ms); - rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_out, cur_dstat.duration_write_ms); - rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_other, cur_dstat.duration_other_ms); - rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_free, cur_dstat.duration_free_ms); - rrdset_done(dm->st_iotime); - } - - // -------------------------------------------------------------------- - // calculate differential charts - // only if this is not the first time we run - - if (likely(dt)) { - - // -------------------------------------------------------------------- - - if(dm->do_await == CONFIG_BOOLEAN_YES || (dm->do_await == CONFIG_BOOLEAN_AUTO && - (dstat[i].operations[DEVSTAT_READ] || - dstat[i].operations[DEVSTAT_WRITE] || - dstat[i].operations[DEVSTAT_NO_DATA] || - dstat[i].operations[DEVSTAT_FREE]))) { - if (unlikely(!dm->st_await)) { - dm->st_await = rrdset_create_localhost("disk_await", - disk, - NULL, - disk, - "disk.await", - "Average Completed I/O Operation Time", - "ms per operation", - "freebsd", - "devstat", - 2005, - update_every, - RRDSET_TYPE_LINE - ); - - rrdset_flag_set(dm->st_await, RRDSET_FLAG_DETAIL); - - dm->rd_await_in = rrddim_add(dm->st_await, "reads", NULL, 1, 1, - RRD_ALGORITHM_ABSOLUTE); - dm->rd_await_out = rrddim_add(dm->st_await, "writes", NULL, -1, 1, - RRD_ALGORITHM_ABSOLUTE); - dm->rd_await_other = rrddim_add(dm->st_await, "other", NULL, 1, 1, - RRD_ALGORITHM_ABSOLUTE); - dm->rd_await_free = rrddim_add(dm->st_await, "frees", NULL, -1, 1, - RRD_ALGORITHM_ABSOLUTE); - } else - rrdset_next(dm->st_await); - - rrddim_set_by_pointer(dm->st_await, dm->rd_await_in, - (dstat[i].operations[DEVSTAT_READ] - - dm->prev_dstat.operations_read) ? - (cur_dstat.duration_read_ms - dm->prev_dstat.duration_read_ms) / - (dstat[i].operations[DEVSTAT_READ] - - dm->prev_dstat.operations_read) : - 0); - rrddim_set_by_pointer(dm->st_await, dm->rd_await_out, - (dstat[i].operations[DEVSTAT_WRITE] - - dm->prev_dstat.operations_write) ? - (cur_dstat.duration_write_ms - dm->prev_dstat.duration_write_ms) / - (dstat[i].operations[DEVSTAT_WRITE] - - dm->prev_dstat.operations_write) : - 0); - rrddim_set_by_pointer(dm->st_await, dm->rd_await_other, - (dstat[i].operations[DEVSTAT_NO_DATA] - - dm->prev_dstat.operations_other) ? - (cur_dstat.duration_other_ms - dm->prev_dstat.duration_other_ms) / - (dstat[i].operations[DEVSTAT_NO_DATA] - - dm->prev_dstat.operations_other) : - 0); - rrddim_set_by_pointer(dm->st_await, dm->rd_await_free, - (dstat[i].operations[DEVSTAT_FREE] - - dm->prev_dstat.operations_free) ? - (cur_dstat.duration_free_ms - dm->prev_dstat.duration_free_ms) / - (dstat[i].operations[DEVSTAT_FREE] - - dm->prev_dstat.operations_free) : - 0); - rrdset_done(dm->st_await); - } - - // -------------------------------------------------------------------- - - if(dm->do_avagsz == CONFIG_BOOLEAN_YES || (dm->do_avagsz == CONFIG_BOOLEAN_AUTO && - (dstat[i].operations[DEVSTAT_READ] || - dstat[i].operations[DEVSTAT_WRITE] || - dstat[i].operations[DEVSTAT_FREE]))) { - if (unlikely(!dm->st_avagsz)) { - dm->st_avagsz = rrdset_create_localhost("disk_avgsz", - disk, - NULL, - disk, - "disk.avgsz", - "Average Completed I/O Operation Bandwidth", - "kilobytes per operation", - "freebsd", - "devstat", - 2006, - update_every, - RRDSET_TYPE_AREA - ); - - rrdset_flag_set(dm->st_avagsz, RRDSET_FLAG_DETAIL); - - dm->rd_avagsz_in = rrddim_add(dm->st_avagsz, "reads", NULL, 1, KILO_FACTOR, - RRD_ALGORITHM_ABSOLUTE); - dm->rd_avagsz_out = rrddim_add(dm->st_avagsz, "writes", NULL, -1, KILO_FACTOR, - RRD_ALGORITHM_ABSOLUTE); - dm->rd_avagsz_free = rrddim_add(dm->st_avagsz, "frees", NULL, -1, KILO_FACTOR, - RRD_ALGORITHM_ABSOLUTE); - } else - rrdset_next(dm->st_avagsz); - - rrddim_set_by_pointer(dm->st_avagsz, dm->rd_avagsz_in, - (dstat[i].operations[DEVSTAT_READ] - - dm->prev_dstat.operations_read) ? - (dstat[i].bytes[DEVSTAT_READ] - dm->prev_dstat.bytes_read) / - (dstat[i].operations[DEVSTAT_READ] - - dm->prev_dstat.operations_read) : - 0); - rrddim_set_by_pointer(dm->st_avagsz, dm->rd_avagsz_out, - (dstat[i].operations[DEVSTAT_WRITE] - - dm->prev_dstat.operations_write) ? - (dstat[i].bytes[DEVSTAT_WRITE] - dm->prev_dstat.bytes_write) / - (dstat[i].operations[DEVSTAT_WRITE] - - dm->prev_dstat.operations_write) : - 0); - rrddim_set_by_pointer(dm->st_avagsz, dm->rd_avagsz_free, - (dstat[i].operations[DEVSTAT_FREE] - - dm->prev_dstat.operations_free) ? - (dstat[i].bytes[DEVSTAT_FREE] - dm->prev_dstat.bytes_free) / - (dstat[i].operations[DEVSTAT_FREE] - - dm->prev_dstat.operations_free) : - 0); - rrdset_done(dm->st_avagsz); - } - - // -------------------------------------------------------------------- - - if(dm->do_svctm == CONFIG_BOOLEAN_YES || (dm->do_svctm == CONFIG_BOOLEAN_AUTO && - (dstat[i].operations[DEVSTAT_READ] || - dstat[i].operations[DEVSTAT_WRITE] || - dstat[i].operations[DEVSTAT_NO_DATA] || - dstat[i].operations[DEVSTAT_FREE]))) { - if (unlikely(!dm->st_svctm)) { - dm->st_svctm = rrdset_create_localhost("disk_svctm", - disk, - NULL, - disk, - "disk.svctm", - "Average Service Time", - "ms per operation", - "freebsd", - "devstat", - 2007, - update_every, - RRDSET_TYPE_LINE - ); - - rrdset_flag_set(dm->st_svctm, RRDSET_FLAG_DETAIL); - - dm->rd_svctm = rrddim_add(dm->st_svctm, "svctm", NULL, 1, 1, - RRD_ALGORITHM_ABSOLUTE); - } else - rrdset_next(dm->st_svctm); - - rrddim_set_by_pointer(dm->st_svctm, dm->rd_svctm, - ((dstat[i].operations[DEVSTAT_READ] - dm->prev_dstat.operations_read) + - (dstat[i].operations[DEVSTAT_WRITE] - dm->prev_dstat.operations_write) + - (dstat[i].operations[DEVSTAT_NO_DATA] - dm->prev_dstat.operations_other) + - (dstat[i].operations[DEVSTAT_FREE] - dm->prev_dstat.operations_free)) ? - (cur_dstat.busy_time_ms - dm->prev_dstat.busy_time_ms) / - ((dstat[i].operations[DEVSTAT_READ] - dm->prev_dstat.operations_read) + - (dstat[i].operations[DEVSTAT_WRITE] - dm->prev_dstat.operations_write) + - (dstat[i].operations[DEVSTAT_NO_DATA] - dm->prev_dstat.operations_other) + - (dstat[i].operations[DEVSTAT_FREE] - dm->prev_dstat.operations_free)) : - 0); - rrdset_done(dm->st_svctm); - } - - // -------------------------------------------------------------------- - - dm->prev_dstat.bytes_read = dstat[i].bytes[DEVSTAT_READ]; - dm->prev_dstat.bytes_write = dstat[i].bytes[DEVSTAT_WRITE]; - dm->prev_dstat.bytes_free = dstat[i].bytes[DEVSTAT_FREE]; - dm->prev_dstat.operations_read = dstat[i].operations[DEVSTAT_READ]; - dm->prev_dstat.operations_write = dstat[i].operations[DEVSTAT_WRITE]; - dm->prev_dstat.operations_other = dstat[i].operations[DEVSTAT_NO_DATA]; - dm->prev_dstat.operations_free = dstat[i].operations[DEVSTAT_FREE]; - dm->prev_dstat.duration_read_ms = cur_dstat.duration_read_ms; - dm->prev_dstat.duration_write_ms = cur_dstat.duration_write_ms; - dm->prev_dstat.duration_other_ms = cur_dstat.duration_other_ms; - dm->prev_dstat.duration_free_ms = cur_dstat.duration_free_ms; - dm->prev_dstat.busy_time_ms = cur_dstat.busy_time_ms; - } - } - } - - // -------------------------------------------------------------------- - - if (likely(do_system_io)) { - static RRDSET *st = NULL; - static RRDDIM *rd_in = NULL, *rd_out = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost("system", - "io", - NULL, - "disk", - NULL, - "Disk I/O", - "kilobytes/s", - "freebsd", - "devstat", - 150, - update_every, - RRDSET_TYPE_AREA - ); - - rd_in = rrddim_add(st, "in", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out = rrddim_add(st, "out", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_in, total_disk_kbytes_read); - rrddim_set_by_pointer(st, rd_out, total_disk_kbytes_write); - rrdset_done(st); - } - } - } - if (unlikely(common_error)) { - do_system_io = 0; - error("DISABLED: system.io chart"); - do_io = 0; - error("DISABLED: disk.* charts"); - do_ops = 0; - error("DISABLED: disk_ops.* charts"); - do_qops = 0; - error("DISABLED: disk_qops.* charts"); - do_util = 0; - error("DISABLED: disk_util.* charts"); - do_iotime = 0; - error("DISABLED: disk_iotime.* charts"); - do_await = 0; - error("DISABLED: disk_await.* charts"); - do_avagsz = 0; - error("DISABLED: disk_avgsz.* charts"); - do_svctm = 0; - error("DISABLED: disk_svctm.* charts"); - error("DISABLED: kern.devstat module"); - return 1; - } - } else { - error("DISABLED: kern.devstat module"); - return 1; - } - - disks_cleanup(); - - return 0; -} diff --git a/src/freebsd_getifaddrs.c b/src/freebsd_getifaddrs.c deleted file mode 100644 index 73f8f1824..000000000 --- a/src/freebsd_getifaddrs.c +++ /dev/null @@ -1,506 +0,0 @@ -#include "common.h" - -#include <ifaddrs.h> - -struct cgroup_network_interface { - char *name; - uint32_t hash; - size_t len; - - // flags - int configured; - int enabled; - int updated; - - int do_bandwidth; - int do_packets; - int do_errors; - int do_drops; - int do_events; - - // charts and dimensions - - RRDSET *st_bandwidth; - RRDDIM *rd_bandwidth_in; - RRDDIM *rd_bandwidth_out; - - RRDSET *st_packets; - RRDDIM *rd_packets_in; - RRDDIM *rd_packets_out; - RRDDIM *rd_packets_m_in; - RRDDIM *rd_packets_m_out; - - RRDSET *st_errors; - RRDDIM *rd_errors_in; - RRDDIM *rd_errors_out; - - RRDSET *st_drops; - RRDDIM *rd_drops_in; - RRDDIM *rd_drops_out; - - RRDSET *st_events; - RRDDIM *rd_events_coll; - - struct cgroup_network_interface *next; -}; - -static struct cgroup_network_interface *network_interfaces_root = NULL, *network_interfaces_last_used = NULL; - -static size_t network_interfaces_added = 0, network_interfaces_found = 0; - -static void network_interface_free(struct cgroup_network_interface *ifm) { - if (likely(ifm->st_bandwidth)) - rrdset_is_obsolete(ifm->st_bandwidth); - if (likely(ifm->st_packets)) - rrdset_is_obsolete(ifm->st_packets); - if (likely(ifm->st_errors)) - rrdset_is_obsolete(ifm->st_errors); - if (likely(ifm->st_drops)) - rrdset_is_obsolete(ifm->st_drops); - if (likely(ifm->st_events)) - rrdset_is_obsolete(ifm->st_events); - - network_interfaces_added--; - freez(ifm->name); - freez(ifm); -} - -static void network_interfaces_cleanup() { - if (likely(network_interfaces_found == network_interfaces_added)) return; - - struct cgroup_network_interface *ifm = network_interfaces_root, *last = NULL; - while(ifm) { - if (unlikely(!ifm->updated)) { - // info("Removing network interface '%s', linked after '%s'", ifm->name, last?last->name:"ROOT"); - - if (network_interfaces_last_used == ifm) - network_interfaces_last_used = last; - - struct cgroup_network_interface *t = ifm; - - if (ifm == network_interfaces_root || !last) - network_interfaces_root = ifm = ifm->next; - - else - last->next = ifm = ifm->next; - - t->next = NULL; - network_interface_free(t); - } - else { - last = ifm; - ifm->updated = 0; - ifm = ifm->next; - } - } -} - -static struct cgroup_network_interface *get_network_interface(const char *name) { - struct cgroup_network_interface *ifm; - - uint32_t hash = simple_hash(name); - - // search it, from the last position to the end - for(ifm = network_interfaces_last_used ; ifm ; ifm = ifm->next) { - if (unlikely(hash == ifm->hash && !strcmp(name, ifm->name))) { - network_interfaces_last_used = ifm->next; - return ifm; - } - } - - // search it from the beginning to the last position we used - for(ifm = network_interfaces_root ; ifm != network_interfaces_last_used ; ifm = ifm->next) { - if (unlikely(hash == ifm->hash && !strcmp(name, ifm->name))) { - network_interfaces_last_used = ifm->next; - return ifm; - } - } - - // create a new one - ifm = callocz(1, sizeof(struct cgroup_network_interface)); - ifm->name = strdupz(name); - ifm->hash = simple_hash(ifm->name); - ifm->len = strlen(ifm->name); - network_interfaces_added++; - - // link it to the end - if (network_interfaces_root) { - struct cgroup_network_interface *e; - for(e = network_interfaces_root; e->next ; e = e->next) ; - e->next = ifm; - } - else - network_interfaces_root = ifm; - - return ifm; -} - -// -------------------------------------------------------------------------------------------------------------------- -// getifaddrs - -int do_getifaddrs(int update_every, usec_t dt) { - (void)dt; - -#define DELAULT_EXLUDED_INTERFACES "lo*" -#define CONFIG_SECTION_GETIFADDRS "plugin:freebsd:getifaddrs" - - static int enable_new_interfaces = -1; - static int do_bandwidth_ipv4 = -1, do_bandwidth_ipv6 = -1, do_bandwidth = -1, do_packets = -1, - do_errors = -1, do_drops = -1, do_events = -1; - static SIMPLE_PATTERN *excluded_interfaces = NULL; - - if (unlikely(enable_new_interfaces == -1)) { - enable_new_interfaces = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, - "enable new interfaces detected at runtime", - CONFIG_BOOLEAN_AUTO); - - do_bandwidth_ipv4 = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total bandwidth for ipv4 interfaces", - CONFIG_BOOLEAN_AUTO); - do_bandwidth_ipv6 = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total bandwidth for ipv6 interfaces", - CONFIG_BOOLEAN_AUTO); - do_bandwidth = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "bandwidth for all interfaces", - CONFIG_BOOLEAN_AUTO); - do_packets = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "packets for all interfaces", - CONFIG_BOOLEAN_AUTO); - do_errors = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "errors for all interfaces", - CONFIG_BOOLEAN_AUTO); - do_drops = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "drops for all interfaces", - CONFIG_BOOLEAN_AUTO); - do_events = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "collisions for all interfaces", - CONFIG_BOOLEAN_AUTO); - - excluded_interfaces = simple_pattern_create( - config_get(CONFIG_SECTION_GETIFADDRS, "disable by default interfaces matching", DELAULT_EXLUDED_INTERFACES) - , NULL - , SIMPLE_PATTERN_EXACT - ); - } - - if (likely(do_bandwidth_ipv4 || do_bandwidth_ipv6 || do_bandwidth || do_packets || do_errors || - do_drops || do_events)) { - struct ifaddrs *ifap; - - if (unlikely(getifaddrs(&ifap))) { - error("FREEBSD: getifaddrs() failed"); - do_bandwidth_ipv4 = 0; - error("DISABLED: system.ipv4 chart"); - do_bandwidth_ipv6 = 0; - error("DISABLED: system.ipv6 chart"); - do_bandwidth = 0; - error("DISABLED: net.* charts"); - do_packets = 0; - error("DISABLED: net_packets.* charts"); - do_errors = 0; - error("DISABLED: net_errors.* charts"); - do_drops = 0; - error("DISABLED: net_drops.* charts"); - do_events = 0; - error("DISABLED: net_events.* charts"); - error("DISABLED: getifaddrs module"); - return 1; - } else { -#define IFA_DATA(s) (((struct if_data *)ifa->ifa_data)->ifi_ ## s) - struct ifaddrs *ifa; - struct iftot { - u_long ift_ibytes; - u_long ift_obytes; - } iftot = {0, 0}; - - // -------------------------------------------------------------------- - - if (likely(do_bandwidth_ipv4)) { - iftot.ift_ibytes = iftot.ift_obytes = 0; - for (ifa = ifap; ifa; ifa = ifa->ifa_next) { - if (ifa->ifa_addr->sa_family != AF_INET) - continue; - iftot.ift_ibytes += IFA_DATA(ibytes); - iftot.ift_obytes += IFA_DATA(obytes); - } - - static RRDSET *st = NULL; - static RRDDIM *rd_in = NULL, *rd_out = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost("system", - "ipv4", - NULL, - "network", - NULL, - "IPv4 Bandwidth", - "kilobits/s", - "freebsd", - "getifaddrs", - 500, - update_every, - RRDSET_TYPE_AREA - ); - - rd_in = rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - rd_out = rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_in, iftot.ift_ibytes); - rrddim_set_by_pointer(st, rd_out, iftot.ift_obytes); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (likely(do_bandwidth_ipv6)) { - iftot.ift_ibytes = iftot.ift_obytes = 0; - for (ifa = ifap; ifa; ifa = ifa->ifa_next) { - if (ifa->ifa_addr->sa_family != AF_INET6) - continue; - iftot.ift_ibytes += IFA_DATA(ibytes); - iftot.ift_obytes += IFA_DATA(obytes); - } - - static RRDSET *st = NULL; - static RRDDIM *rd_in = NULL, *rd_out = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost("system", - "ipv6", - NULL, - "network", - NULL, - "IPv6 Bandwidth", - "kilobits/s", - "freebsd", - "getifaddrs", - 500, - update_every, - RRDSET_TYPE_AREA - ); - - rd_in = rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - rd_out = rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_in, iftot.ift_ibytes); - rrddim_set_by_pointer(st, rd_out, iftot.ift_obytes); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - network_interfaces_found = 0; - - for (ifa = ifap; ifa; ifa = ifa->ifa_next) { - if (ifa->ifa_addr->sa_family != AF_LINK) - continue; - - struct cgroup_network_interface *ifm = get_network_interface(ifa->ifa_name); - ifm->updated = 1; - network_interfaces_found++; - - if (unlikely(!ifm->configured)) { - char var_name[4096 + 1]; - - // this is the first time we see this network interface - - // remember we configured it - ifm->configured = 1; - - ifm->enabled = enable_new_interfaces; - - if (likely(ifm->enabled)) - ifm->enabled = !simple_pattern_matches(excluded_interfaces, ifa->ifa_name); - - snprintfz(var_name, 4096, "%s:%s", CONFIG_SECTION_GETIFADDRS, ifa->ifa_name); - ifm->enabled = config_get_boolean_ondemand(var_name, "enabled", ifm->enabled); - - if (unlikely(ifm->enabled == CONFIG_BOOLEAN_NO)) - continue; - - ifm->do_bandwidth = config_get_boolean_ondemand(var_name, "bandwidth", do_bandwidth); - ifm->do_packets = config_get_boolean_ondemand(var_name, "packets", do_packets); - ifm->do_errors = config_get_boolean_ondemand(var_name, "errors", do_errors); - ifm->do_drops = config_get_boolean_ondemand(var_name, "drops", do_drops); - ifm->do_events = config_get_boolean_ondemand(var_name, "events", do_events); - } - - if (unlikely(!ifm->enabled)) - continue; - - // -------------------------------------------------------------------- - - if (ifm->do_bandwidth == CONFIG_BOOLEAN_YES || (ifm->do_bandwidth == CONFIG_BOOLEAN_AUTO && - (IFA_DATA(ibytes) || IFA_DATA(obytes)))) { - if (unlikely(!ifm->st_bandwidth)) { - ifm->st_bandwidth = rrdset_create_localhost("net", - ifa->ifa_name, - NULL, - ifa->ifa_name, - "net.net", - "Bandwidth", - "kilobits/s", - "freebsd", - "getifaddrs", - 7000, - update_every, - RRDSET_TYPE_AREA - ); - - ifm->rd_bandwidth_in = rrddim_add(ifm->st_bandwidth, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - ifm->rd_bandwidth_out = rrddim_add(ifm->st_bandwidth, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(ifm->st_bandwidth); - - rrddim_set_by_pointer(ifm->st_bandwidth, ifm->rd_bandwidth_in, IFA_DATA(ibytes)); - rrddim_set_by_pointer(ifm->st_bandwidth, ifm->rd_bandwidth_out, IFA_DATA(obytes)); - rrdset_done(ifm->st_bandwidth); - } - - // -------------------------------------------------------------------- - - if (ifm->do_packets == CONFIG_BOOLEAN_YES || (ifm->do_packets == CONFIG_BOOLEAN_AUTO && - (IFA_DATA(ipackets) || IFA_DATA(opackets) || IFA_DATA(imcasts) || IFA_DATA(omcasts)))) { - if (unlikely(!ifm->st_packets)) { - ifm->st_packets = rrdset_create_localhost("net_packets", - ifa->ifa_name, - NULL, - ifa->ifa_name, - "net.packets", - "Packets", - "packets/s", - "freebsd", - "getifaddrs", - 7001, - update_every, - RRDSET_TYPE_LINE - ); - - rrdset_flag_set(ifm->st_packets, RRDSET_FLAG_DETAIL); - - ifm->rd_packets_in = rrddim_add(ifm->st_packets, "received", NULL, 1, 1, - RRD_ALGORITHM_INCREMENTAL); - ifm->rd_packets_out = rrddim_add(ifm->st_packets, "sent", NULL, -1, 1, - RRD_ALGORITHM_INCREMENTAL); - ifm->rd_packets_m_in = rrddim_add(ifm->st_packets, "multicast_received", NULL, 1, 1, - RRD_ALGORITHM_INCREMENTAL); - ifm->rd_packets_m_out = rrddim_add(ifm->st_packets, "multicast_sent", NULL, -1, 1, - RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(ifm->st_packets); - - rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_in, IFA_DATA(ipackets)); - rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_out, IFA_DATA(opackets)); - rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_m_in, IFA_DATA(imcasts)); - rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_m_out, IFA_DATA(omcasts)); - rrdset_done(ifm->st_packets); - } - - // -------------------------------------------------------------------- - - if (ifm->do_errors == CONFIG_BOOLEAN_YES || (ifm->do_errors == CONFIG_BOOLEAN_AUTO && - (IFA_DATA(ierrors) || IFA_DATA(oerrors)))) { - if (unlikely(!ifm->st_errors)) { - ifm->st_errors = rrdset_create_localhost("net_errors", - ifa->ifa_name, - NULL, - ifa->ifa_name, - "net.errors", - "Interface Errors", - "errors/s", - "freebsd", - "getifaddrs", - 7002, - update_every, - RRDSET_TYPE_LINE - ); - - rrdset_flag_set(ifm->st_errors, RRDSET_FLAG_DETAIL); - - ifm->rd_errors_in = rrddim_add(ifm->st_errors, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - ifm->rd_errors_out = rrddim_add(ifm->st_errors, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(ifm->st_errors); - - rrddim_set_by_pointer(ifm->st_errors, ifm->rd_errors_in, IFA_DATA(ierrors)); - rrddim_set_by_pointer(ifm->st_errors, ifm->rd_errors_out, IFA_DATA(oerrors)); - rrdset_done(ifm->st_errors); - } - // -------------------------------------------------------------------- - - if (ifm->do_drops == CONFIG_BOOLEAN_YES || (ifm->do_drops == CONFIG_BOOLEAN_AUTO && - (IFA_DATA(iqdrops) - #if __FreeBSD__ >= 11 - || IFA_DATA(oqdrops) -#endif - ))) { - if (unlikely(!ifm->st_drops)) { - ifm->st_drops = rrdset_create_localhost("net_drops", - ifa->ifa_name, - NULL, - ifa->ifa_name, - "net.drops", - "Interface Drops", - "drops/s", - "freebsd", - "getifaddrs", - 7003, - update_every, - RRDSET_TYPE_LINE - ); - - rrdset_flag_set(ifm->st_drops, RRDSET_FLAG_DETAIL); - - ifm->rd_drops_in = rrddim_add(ifm->st_drops, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); -#if __FreeBSD__ >= 11 - ifm->rd_drops_out = rrddim_add(ifm->st_drops, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); -#endif - } else - rrdset_next(ifm->st_drops); - - rrddim_set_by_pointer(ifm->st_drops, ifm->rd_drops_in, IFA_DATA(iqdrops)); -#if __FreeBSD__ >= 11 - rrddim_set_by_pointer(ifm->st_drops, ifm->rd_drops_out, IFA_DATA(oqdrops)); -#endif - rrdset_done(ifm->st_drops); - } - - // -------------------------------------------------------------------- - - if (ifm->do_events == CONFIG_BOOLEAN_YES || (ifm->do_events == CONFIG_BOOLEAN_AUTO && - IFA_DATA(collisions))) { - if (unlikely(!ifm->st_events)) { - ifm->st_events = rrdset_create_localhost("net_events", - ifa->ifa_name, - NULL, - ifa->ifa_name, - "net.events", - "Network Interface Events", - "events/s", - "freebsd", - "getifaddrs", - 7006, - update_every, - RRDSET_TYPE_LINE - ); - - rrdset_flag_set(ifm->st_events, RRDSET_FLAG_DETAIL); - - ifm->rd_events_coll = rrddim_add(ifm->st_events, "collisions", NULL, -1, 1, - RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(ifm->st_events); - - rrddim_set_by_pointer(ifm->st_events, ifm->rd_events_coll, IFA_DATA(collisions)); - rrdset_done(ifm->st_events); - } - } - - freeifaddrs(ifap); - } - } else { - error("DISABLED: getifaddrs module"); - return 1; - } - - network_interfaces_cleanup(); - - return 0; -} diff --git a/src/freebsd_getmntinfo.c b/src/freebsd_getmntinfo.c deleted file mode 100644 index ea82b9fd1..000000000 --- a/src/freebsd_getmntinfo.c +++ /dev/null @@ -1,299 +0,0 @@ -#include "common.h" - -#include <sys/mount.h> - -struct mount_point { - char *name; - uint32_t hash; - size_t len; - - // flags - int configured; - int enabled; - int updated; - - int do_space; - int do_inodes; - - size_t collected; // the number of times this has been collected - - // charts and dimensions - - RRDSET *st_space; - RRDDIM *rd_space_used; - RRDDIM *rd_space_avail; - RRDDIM *rd_space_reserved; - - RRDSET *st_inodes; - RRDDIM *rd_inodes_used; - RRDDIM *rd_inodes_avail; - - struct mount_point *next; -}; - -static struct mount_point *mount_points_root = NULL, *mount_points_last_used = NULL; - -static size_t mount_points_added = 0, mount_points_found = 0; - -static void mount_point_free(struct mount_point *m) { - if (likely(m->st_space)) - rrdset_is_obsolete(m->st_space); - if (likely(m->st_inodes)) - rrdset_is_obsolete(m->st_inodes); - - mount_points_added--; - freez(m->name); - freez(m); -} - -static void mount_points_cleanup() { - if (likely(mount_points_found == mount_points_added)) return; - - struct mount_point *m = mount_points_root, *last = NULL; - while(m) { - if (unlikely(!m->updated)) { - // info("Removing mount point '%s', linked after '%s'", m->name, last?last->name:"ROOT"); - - if (mount_points_last_used == m) - mount_points_last_used = last; - - struct mount_point *t = m; - - if (m == mount_points_root || !last) - mount_points_root = m = m->next; - - else - last->next = m = m->next; - - t->next = NULL; - mount_point_free(t); - } - else { - last = m; - m->updated = 0; - m = m->next; - } - } -} - -static struct mount_point *get_mount_point(const char *name) { - struct mount_point *m; - - uint32_t hash = simple_hash(name); - - // search it, from the last position to the end - for(m = mount_points_last_used ; m ; m = m->next) { - if (unlikely(hash == m->hash && !strcmp(name, m->name))) { - mount_points_last_used = m->next; - return m; - } - } - - // search it from the beginning to the last position we used - for(m = mount_points_root ; m != mount_points_last_used ; m = m->next) { - if (unlikely(hash == m->hash && !strcmp(name, m->name))) { - mount_points_last_used = m->next; - return m; - } - } - - // create a new one - m = callocz(1, sizeof(struct mount_point)); - m->name = strdupz(name); - m->hash = simple_hash(m->name); - m->len = strlen(m->name); - mount_points_added++; - - // link it to the end - if (mount_points_root) { - struct mount_point *e; - for(e = mount_points_root; e->next ; e = e->next) ; - e->next = m; - } - else - mount_points_root = m; - - return m; -} - -// -------------------------------------------------------------------------------------------------------------------- -// getmntinfo - -int do_getmntinfo(int update_every, usec_t dt) { - (void)dt; - -#define DELAULT_EXCLUDED_PATHS "/proc/*" -// taken from gnulib/mountlist.c and shortened to FreeBSD related fstypes -#define DEFAULT_EXCLUDED_FILESYSTEMS "autofs procfs subfs devfs none" -#define CONFIG_SECTION_GETMNTINFO "plugin:freebsd:getmntinfo" - - static int enable_new_mount_points = -1; - static int do_space = -1, do_inodes = -1; - static SIMPLE_PATTERN *excluded_mountpoints = NULL; - static SIMPLE_PATTERN *excluded_filesystems = NULL; - - if (unlikely(enable_new_mount_points == -1)) { - enable_new_mount_points = config_get_boolean_ondemand(CONFIG_SECTION_GETMNTINFO, - "enable new mount points detected at runtime", - CONFIG_BOOLEAN_AUTO); - - do_space = config_get_boolean_ondemand(CONFIG_SECTION_GETMNTINFO, "space usage for all disks", CONFIG_BOOLEAN_AUTO); - do_inodes = config_get_boolean_ondemand(CONFIG_SECTION_GETMNTINFO, "inodes usage for all disks", CONFIG_BOOLEAN_AUTO); - - excluded_mountpoints = simple_pattern_create( - config_get(CONFIG_SECTION_GETMNTINFO, "exclude space metrics on paths", - DELAULT_EXCLUDED_PATHS) - , NULL - , SIMPLE_PATTERN_EXACT - ); - - excluded_filesystems = simple_pattern_create( - config_get(CONFIG_SECTION_GETMNTINFO, "exclude space metrics on filesystems", - DEFAULT_EXCLUDED_FILESYSTEMS) - , NULL - , SIMPLE_PATTERN_EXACT - ); - } - - if (likely(do_space || do_inodes)) { - struct statfs *mntbuf; - int mntsize; - - // there is no mount info in sysctl MIBs - if (unlikely(!(mntsize = getmntinfo(&mntbuf, MNT_NOWAIT)))) { - error("FREEBSD: getmntinfo() failed"); - do_space = 0; - error("DISABLED: disk_space.* charts"); - do_inodes = 0; - error("DISABLED: disk_inodes.* charts"); - error("DISABLED: getmntinfo module"); - return 1; - } else { - int i; - - mount_points_found = 0; - - for (i = 0; i < mntsize; i++) { - char title[4096 + 1]; - - struct mount_point *m = get_mount_point(mntbuf[i].f_mntonname); - m->updated = 1; - mount_points_found++; - - if (unlikely(!m->configured)) { - char var_name[4096 + 1]; - - // this is the first time we see this filesystem - - // remember we configured it - m->configured = 1; - - m->enabled = enable_new_mount_points; - - if (likely(m->enabled)) - m->enabled = !(simple_pattern_matches(excluded_mountpoints, mntbuf[i].f_mntonname) - || simple_pattern_matches(excluded_filesystems, mntbuf[i].f_fstypename)); - - snprintfz(var_name, 4096, "%s:%s", CONFIG_SECTION_GETMNTINFO, mntbuf[i].f_mntonname); - m->enabled = config_get_boolean_ondemand(var_name, "enabled", m->enabled); - - if (unlikely(m->enabled == CONFIG_BOOLEAN_NO)) - continue; - - m->do_space = config_get_boolean_ondemand(var_name, "space usage", do_space); - m->do_inodes = config_get_boolean_ondemand(var_name, "inodes usage", do_inodes); - } - - if (unlikely(!m->enabled)) - continue; - - if (unlikely(mntbuf[i].f_flags & MNT_RDONLY && !m->collected)) - continue; - - // -------------------------------------------------------------------------- - - int rendered = 0; - - if (m->do_space == CONFIG_BOOLEAN_YES || (m->do_space == CONFIG_BOOLEAN_AUTO && (mntbuf[i].f_blocks > 2))) { - if (unlikely(!m->st_space)) { - snprintfz(title, 4096, "Disk Space Usage for %s [%s]", - mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname); - m->st_space = rrdset_create_localhost("disk_space", - mntbuf[i].f_mntonname, - NULL, - mntbuf[i].f_mntonname, - "disk.space", - title, - "GB", - "freebsd", - "getmntinfo", - 2023, - update_every, - RRDSET_TYPE_STACKED - ); - - m->rd_space_avail = rrddim_add(m->st_space, "avail", NULL, - mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); - m->rd_space_used = rrddim_add(m->st_space, "used", NULL, - mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); - m->rd_space_reserved = rrddim_add(m->st_space, "reserved_for_root", "reserved for root", - mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); - } else - rrdset_next(m->st_space); - - rrddim_set_by_pointer(m->st_space, m->rd_space_avail, (collected_number) mntbuf[i].f_bavail); - rrddim_set_by_pointer(m->st_space, m->rd_space_used, (collected_number) (mntbuf[i].f_blocks - - mntbuf[i].f_bfree)); - rrddim_set_by_pointer(m->st_space, m->rd_space_reserved, (collected_number) (mntbuf[i].f_bfree - - mntbuf[i].f_bavail)); - rrdset_done(m->st_space); - - rendered++; - } - - // -------------------------------------------------------------------------- - - if (m->do_inodes == CONFIG_BOOLEAN_YES || (m->do_inodes == CONFIG_BOOLEAN_AUTO && (mntbuf[i].f_files > 1))) { - if (unlikely(!m->st_inodes)) { - snprintfz(title, 4096, "Disk Files (inodes) Usage for %s [%s]", - mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname); - m->st_inodes = rrdset_create_localhost("disk_inodes", - mntbuf[i].f_mntonname, - NULL, - mntbuf[i].f_mntonname, - "disk.inodes", - title, - "Inodes", - "freebsd", - "getmntinfo", - 2024, - update_every, - RRDSET_TYPE_STACKED - ); - - m->rd_inodes_avail = rrddim_add(m->st_inodes, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - m->rd_inodes_used = rrddim_add(m->st_inodes, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } else - rrdset_next(m->st_inodes); - - rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_avail, (collected_number) mntbuf[i].f_ffree); - rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_used, (collected_number) (mntbuf[i].f_files - - mntbuf[i].f_ffree)); - rrdset_done(m->st_inodes); - - rendered++; - } - - if (likely(rendered)) - m->collected++; - } - } - } else { - error("DISABLED: getmntinfo module"); - return 1; - } - - mount_points_cleanup(); - - return 0; -} diff --git a/src/freebsd_ipfw.c b/src/freebsd_ipfw.c deleted file mode 100644 index 81264b3f3..000000000 --- a/src/freebsd_ipfw.c +++ /dev/null @@ -1,370 +0,0 @@ -#include "common.h" - -#include <netinet/ip_fw.h> - -#define FREE_MEM_THRESHOLD 10000 // number of unused chunks that trigger memory freeing - -#define COMMON_IPFW_ERROR() error("DISABLED: ipfw.packets chart"); \ - error("DISABLED: ipfw.bytes chart"); \ - error("DISABLED: ipfw.dyn_active chart"); \ - error("DISABLED: ipfw.dyn_expired chart"); \ - error("DISABLED: ipfw.mem chart"); - -// -------------------------------------------------------------------------------------------------------------------- -// ipfw - -int do_ipfw(int update_every, usec_t dt) { - (void)dt; -#if __FreeBSD__ >= 11 - - static int do_static = -1, do_dynamic = -1, do_mem = -1; - - if (unlikely(do_static == -1)) { - do_static = config_get_boolean("plugin:freebsd:ipfw", "counters for static rules", 1); - do_dynamic = config_get_boolean("plugin:freebsd:ipfw", "number of dynamic rules", 1); - do_mem = config_get_boolean("plugin:freebsd:ipfw", "allocated memory", 1); - } - - // variables for getting ipfw configuration - - int error; - static int ipfw_socket = -1; - static ipfw_cfg_lheader *cfg = NULL; - ip_fw3_opheader *op3 = NULL; - static socklen_t *optlen = NULL, cfg_size = 0; - - // variables for static rules handling - - ipfw_obj_ctlv *ctlv = NULL; - ipfw_obj_tlv *rbase = NULL; - int rcnt = 0; - - int n, seen; - struct ip_fw_rule *rule; - struct ip_fw_bcounter *cntr; - int c = 0; - - char rule_num_str[12]; - - // variables for dynamic rules handling - - caddr_t dynbase = NULL; - size_t dynsz = 0; - size_t readsz = sizeof(*cfg);; - int ttype = 0; - ipfw_obj_tlv *tlv; - ipfw_dyn_rule *dyn_rule; - uint16_t rulenum, prev_rulenum = IPFW_DEFAULT_RULE; - unsigned srn, static_rules_num = 0; - static size_t dyn_rules_num_size = 0; - - static struct dyn_rule_num { - uint16_t rule_num; - uint32_t active_rules; - uint32_t expired_rules; - } *dyn_rules_num = NULL; - - uint32_t *dyn_rules_counter; - - if (likely(do_static | do_dynamic | do_mem)) { - - // initialize the smallest ipfw_cfg_lheader possible - - if (unlikely((optlen == NULL) || (cfg == NULL))) { - optlen = reallocz(optlen, sizeof(socklen_t)); - *optlen = cfg_size = 32; - cfg = reallocz(cfg, *optlen); - } - - // get socket descriptor and initialize ipfw_cfg_lheader structure - - if (unlikely(ipfw_socket == -1)) - ipfw_socket = socket(AF_INET, SOCK_RAW, IPPROTO_RAW); - if (unlikely(ipfw_socket == -1)) { - error("FREEBSD: can't get socket for ipfw configuration"); - error("FREEBSD: run netdata as root to get access to ipfw data"); - COMMON_IPFW_ERROR(); - return 1; - } - - bzero(cfg, 32); - cfg->flags = IPFW_CFG_GET_STATIC | IPFW_CFG_GET_COUNTERS | IPFW_CFG_GET_STATES; - op3 = &cfg->opheader; - op3->opcode = IP_FW_XGET; - - // get ifpw configuration size than get configuration - - *optlen = cfg_size; - error = getsockopt(ipfw_socket, IPPROTO_IP, IP_FW3, op3, optlen); - if (error) - if (errno != ENOMEM) { - error("FREEBSD: ipfw socket reading error"); - COMMON_IPFW_ERROR(); - return 1; - } - if ((cfg->size > cfg_size) || ((cfg_size - cfg->size) > sizeof(struct dyn_rule_num) * FREE_MEM_THRESHOLD)) { - *optlen = cfg_size = cfg->size; - cfg = reallocz(cfg, *optlen); - bzero(cfg, 32); - cfg->flags = IPFW_CFG_GET_STATIC | IPFW_CFG_GET_COUNTERS | IPFW_CFG_GET_STATES; - op3 = &cfg->opheader; - op3->opcode = IP_FW_XGET; - error = getsockopt(ipfw_socket, IPPROTO_IP, IP_FW3, op3, optlen); - if (error) { - error("FREEBSD: ipfw socket reading error"); - COMMON_IPFW_ERROR(); - return 1; - } - } - - // go through static rules configuration structures - - ctlv = (ipfw_obj_ctlv *) (cfg + 1); - - if (cfg->flags & IPFW_CFG_GET_STATIC) { - /* We've requested static rules */ - if (ctlv->head.type == IPFW_TLV_TBLNAME_LIST) { - readsz += ctlv->head.length; - ctlv = (ipfw_obj_ctlv *) ((caddr_t) ctlv + - ctlv->head.length); - } - - if (ctlv->head.type == IPFW_TLV_RULE_LIST) { - rbase = (ipfw_obj_tlv *) (ctlv + 1); - rcnt = ctlv->count; - readsz += ctlv->head.length; - ctlv = (ipfw_obj_ctlv *) ((caddr_t) ctlv + ctlv->head.length); - } - } - - if ((cfg->flags & IPFW_CFG_GET_STATES) && (readsz != *optlen)) { - /* We may have some dynamic states */ - dynsz = *optlen - readsz; - /* Skip empty header */ - if (dynsz != sizeof(ipfw_obj_ctlv)) - dynbase = (caddr_t) ctlv; - else - dynsz = 0; - } - - // -------------------------------------------------------------------- - - if (likely(do_mem)) { - static RRDSET *st_mem = NULL; - static RRDDIM *rd_dyn_mem = NULL; - static RRDDIM *rd_stat_mem = NULL; - - if (unlikely(!st_mem)) { - st_mem = rrdset_create_localhost("ipfw", - "mem", - NULL, - "memory allocated", - NULL, - "Memory allocated by rules", - "bytes", - "freebsd", - "ipfw", - 3005, - update_every, - RRDSET_TYPE_STACKED - ); - rrdset_flag_set(st_mem, RRDSET_FLAG_DETAIL); - - rd_dyn_mem = rrddim_add(st_mem, "dynamic", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_stat_mem = rrddim_add(st_mem, "static", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } else - rrdset_next(st_mem); - - rrddim_set_by_pointer(st_mem, rd_dyn_mem, dynsz); - rrddim_set_by_pointer(st_mem, rd_stat_mem, *optlen - dynsz); - rrdset_done(st_mem); - } - - // -------------------------------------------------------------------- - - static RRDSET *st_packets = NULL, *st_bytes = NULL; - RRDDIM *rd_packets = NULL, *rd_bytes = NULL; - - if (likely(do_static || do_dynamic)) { - if (likely(do_static)) { - if (unlikely(!st_packets)) - st_packets = rrdset_create_localhost("ipfw", - "packets", - NULL, - "static rules", - NULL, - "Packets", - "packets/s", - "freebsd", - "ipfw", - 3001, - update_every, - RRDSET_TYPE_STACKED - ); - else - rrdset_next(st_packets); - - if (unlikely(!st_bytes)) - st_bytes = rrdset_create_localhost("ipfw", - "bytes", - NULL, - "static rules", - NULL, - "Bytes", - "bytes/s", - "freebsd", - "ipfw", - 3002, - update_every, - RRDSET_TYPE_STACKED - ); - else - rrdset_next(st_bytes); - } - - for (n = seen = 0; n < rcnt; n++, rbase = (ipfw_obj_tlv *) ((caddr_t) rbase + rbase->length)) { - cntr = (struct ip_fw_bcounter *) (rbase + 1); - rule = (struct ip_fw_rule *) ((caddr_t) cntr + cntr->size); - if (rule->rulenum != prev_rulenum) - static_rules_num++; - if (rule->rulenum > IPFW_DEFAULT_RULE) - break; - - if (likely(do_static)) { - sprintf(rule_num_str, "%d_%d", rule->rulenum, rule->id); - - rd_packets = rrddim_find(st_packets, rule_num_str); - if (unlikely(!rd_packets)) - rd_packets = rrddim_add(st_packets, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_set_by_pointer(st_packets, rd_packets, cntr->pcnt); - - rd_bytes = rrddim_find(st_bytes, rule_num_str); - if (unlikely(!rd_bytes)) - rd_bytes = rrddim_add(st_bytes, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_set_by_pointer(st_bytes, rd_bytes, cntr->bcnt); - } - - c += rbase->length; - seen++; - } - - if (likely(do_static)) { - rrdset_done(st_packets); - rrdset_done(st_bytes); - } - } - - // -------------------------------------------------------------------- - - // go through dynamic rules configuration structures - - if (likely(do_dynamic && (dynsz > 0))) { - if ((dyn_rules_num_size < sizeof(struct dyn_rule_num) * static_rules_num) || - ((dyn_rules_num_size - sizeof(struct dyn_rule_num) * static_rules_num) > - sizeof(struct dyn_rule_num) * FREE_MEM_THRESHOLD)) { - dyn_rules_num_size = sizeof(struct dyn_rule_num) * static_rules_num; - dyn_rules_num = reallocz(dyn_rules_num, dyn_rules_num_size); - } - bzero(dyn_rules_num, sizeof(struct dyn_rule_num) * static_rules_num); - dyn_rules_num->rule_num = IPFW_DEFAULT_RULE; - - if (dynsz > 0 && ctlv->head.type == IPFW_TLV_DYNSTATE_LIST) { - dynbase += sizeof(*ctlv); - dynsz -= sizeof(*ctlv); - ttype = IPFW_TLV_DYN_ENT; - } - - while (dynsz > 0) { - tlv = (ipfw_obj_tlv *) dynbase; - if (tlv->type != ttype) - break; - - dyn_rule = (ipfw_dyn_rule *) (tlv + 1); - bcopy(&dyn_rule->rule, &rulenum, sizeof(rulenum)); - - for (srn = 0; srn < (static_rules_num - 1); srn++) { - if (dyn_rule->expire > 0) - dyn_rules_counter = &dyn_rules_num[srn].active_rules; - else - dyn_rules_counter = &dyn_rules_num[srn].expired_rules; - if (dyn_rules_num[srn].rule_num == rulenum) { - (*dyn_rules_counter)++; - break; - } - if (dyn_rules_num[srn].rule_num == IPFW_DEFAULT_RULE) { - dyn_rules_num[srn].rule_num = rulenum; - dyn_rules_num[srn + 1].rule_num = IPFW_DEFAULT_RULE; - (*dyn_rules_counter)++; - break; - } - } - - dynsz -= tlv->length; - dynbase += tlv->length; - } - - // -------------------------------------------------------------------- - - static RRDSET *st_active = NULL, *st_expired = NULL; - RRDDIM *rd_active = NULL, *rd_expired = NULL; - - if (unlikely(!st_active)) - st_active = rrdset_create_localhost("ipfw", - "active", - NULL, - "dynamic_rules", - NULL, - "Active rules", - "rules", - "freebsd", - "ipfw", - 3003, - update_every, - RRDSET_TYPE_STACKED - ); - else - rrdset_next(st_active); - - if (unlikely(!st_expired)) - st_expired = rrdset_create_localhost("ipfw", - "expired", - NULL, - "dynamic_rules", - NULL, - "Expired rules", - "rules", - "freebsd", - "ipfw", - 3004, - update_every, - RRDSET_TYPE_STACKED - ); - else - rrdset_next(st_expired); - - for (srn = 0; (srn < (static_rules_num - 1)) && (dyn_rules_num[srn].rule_num != IPFW_DEFAULT_RULE); srn++) { - sprintf(rule_num_str, "%d", dyn_rules_num[srn].rule_num); - - rd_active = rrddim_find(st_active, rule_num_str); - if (unlikely(!rd_active)) - rd_active = rrddim_add(st_active, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rrddim_set_by_pointer(st_active, rd_active, dyn_rules_num[srn].active_rules); - - rd_expired = rrddim_find(st_expired, rule_num_str); - if (unlikely(!rd_expired)) - rd_expired = rrddim_add(st_expired, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rrddim_set_by_pointer(st_expired, rd_expired, dyn_rules_num[srn].expired_rules); - } - - rrdset_done(st_active); - rrdset_done(st_expired); - } - } - - return 0; -#else - error("FREEBSD: ipfw charts supported for FreeBSD 11.0 and newer releases only"); - COMMON_IPFW_ERROR(); - return 1; -#endif -} diff --git a/src/freebsd_kstat_zfs.c b/src/freebsd_kstat_zfs.c deleted file mode 100644 index 1bd48d4bf..000000000 --- a/src/freebsd_kstat_zfs.c +++ /dev/null @@ -1,298 +0,0 @@ -#include "common.h" -#include "zfs_common.h" - -extern struct arcstats arcstats; - -// -------------------------------------------------------------------------------------------------------------------- -// kstat.zfs.misc.arcstats - -int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt) { - (void)dt; - - unsigned long long l2_size; - size_t uint64_t_size = sizeof(uint64_t); - static struct mibs { - int hits[5]; - int misses[5]; - int demand_data_hits[5]; - int demand_data_misses[5]; - int demand_metadata_hits[5]; - int demand_metadata_misses[5]; - int prefetch_data_hits[5]; - int prefetch_data_misses[5]; - int prefetch_metadata_hits[5]; - int prefetch_metadata_misses[5]; - int mru_hits[5]; - int mru_ghost_hits[5]; - int mfu_hits[5]; - int mfu_ghost_hits[5]; - int deleted[5]; - int mutex_miss[5]; - int evict_skip[5]; - int evict_not_enough[5]; - int evict_l2_cached[5]; - int evict_l2_eligible[5]; - int evict_l2_ineligible[5]; - int evict_l2_skip[5]; - int hash_elements[5]; - int hash_elements_max[5]; - int hash_collisions[5]; - int hash_chains[5]; - int hash_chain_max[5]; - int p[5]; - int c[5]; - int c_min[5]; - int c_max[5]; - int size[5]; - int hdr_size[5]; - int data_size[5]; - int metadata_size[5]; - int other_size[5]; - int anon_size[5]; - int anon_evictable_data[5]; - int anon_evictable_metadata[5]; - int mru_size[5]; - int mru_evictable_data[5]; - int mru_evictable_metadata[5]; - int mru_ghost_size[5]; - int mru_ghost_evictable_data[5]; - int mru_ghost_evictable_metadata[5]; - int mfu_size[5]; - int mfu_evictable_data[5]; - int mfu_evictable_metadata[5]; - int mfu_ghost_size[5]; - int mfu_ghost_evictable_data[5]; - int mfu_ghost_evictable_metadata[5]; - int l2_hits[5]; - int l2_misses[5]; - int l2_feeds[5]; - int l2_rw_clash[5]; - int l2_read_bytes[5]; - int l2_write_bytes[5]; - int l2_writes_sent[5]; - int l2_writes_done[5]; - int l2_writes_error[5]; - int l2_writes_lock_retry[5]; - int l2_evict_lock_retry[5]; - int l2_evict_reading[5]; - int l2_evict_l1cached[5]; - int l2_free_on_write[5]; - int l2_cdata_free_on_write[5]; - int l2_abort_lowmem[5]; - int l2_cksum_bad[5]; - int l2_io_error[5]; - int l2_size[5]; - int l2_asize[5]; - int l2_hdr_size[5]; - int l2_compress_successes[5]; - int l2_compress_zeros[5]; - int l2_compress_failures[5]; - int memory_throttle_count[5]; - int duplicate_buffers[5]; - int duplicate_buffers_size[5]; - int duplicate_reads[5]; - int memory_direct_count[5]; - int memory_indirect_count[5]; - int arc_no_grow[5]; - int arc_tempreserve[5]; - int arc_loaned_bytes[5]; - int arc_prune[5]; - int arc_meta_used[5]; - int arc_meta_limit[5]; - int arc_meta_max[5]; - int arc_meta_min[5]; - int arc_need_free[5]; - int arc_sys_free[5]; - } mibs; - - arcstats.l2exist = -1; - - if(unlikely(sysctlbyname("kstat.zfs.misc.arcstats.l2_size", &l2_size, &uint64_t_size, NULL, 0))) - return 0; - - if(likely(l2_size)) - arcstats.l2exist = 1; - else - arcstats.l2exist = 0; - - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hits", mibs.hits, arcstats.hits); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.misses", mibs.misses, arcstats.misses); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_data_hits", mibs.demand_data_hits, arcstats.demand_data_hits); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_data_misses", mibs.demand_data_misses, arcstats.demand_data_misses); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_metadata_hits", mibs.demand_metadata_hits, arcstats.demand_metadata_hits); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_metadata_misses", mibs.demand_metadata_misses, arcstats.demand_metadata_misses); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_data_hits", mibs.prefetch_data_hits, arcstats.prefetch_data_hits); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_data_misses", mibs.prefetch_data_misses, arcstats.prefetch_data_misses); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_metadata_hits", mibs.prefetch_metadata_hits, arcstats.prefetch_metadata_hits); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_metadata_misses", mibs.prefetch_metadata_misses, arcstats.prefetch_metadata_misses); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_hits", mibs.mru_hits, arcstats.mru_hits); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_hits", mibs.mru_ghost_hits, arcstats.mru_ghost_hits); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_hits", mibs.mfu_hits, arcstats.mfu_hits); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_hits", mibs.mfu_ghost_hits, arcstats.mfu_ghost_hits); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.deleted", mibs.deleted, arcstats.deleted); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mutex_miss", mibs.mutex_miss, arcstats.mutex_miss); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_skip", mibs.evict_skip, arcstats.evict_skip); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_not_enough", mibs.evict_not_enough, arcstats.evict_not_enough); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_cached", mibs.evict_l2_cached, arcstats.evict_l2_cached); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_eligible", mibs.evict_l2_eligible, arcstats.evict_l2_eligible); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_ineligible", mibs.evict_l2_ineligible, arcstats.evict_l2_ineligible); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_skip", mibs.evict_l2_skip, arcstats.evict_l2_skip); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_elements", mibs.hash_elements, arcstats.hash_elements); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_elements_max", mibs.hash_elements_max, arcstats.hash_elements_max); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_collisions", mibs.hash_collisions, arcstats.hash_collisions); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_chains", mibs.hash_chains, arcstats.hash_chains); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_chain_max", mibs.hash_chain_max, arcstats.hash_chain_max); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.p", mibs.p, arcstats.p); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.c", mibs.c, arcstats.c); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.c_min", mibs.c_min, arcstats.c_min); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.c_max", mibs.c_max, arcstats.c_max); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.size", mibs.size, arcstats.size); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hdr_size", mibs.hdr_size, arcstats.hdr_size); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.data_size", mibs.data_size, arcstats.data_size); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.metadata_size", mibs.metadata_size, arcstats.metadata_size); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.other_size", mibs.other_size, arcstats.other_size); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.anon_size", mibs.anon_size, arcstats.anon_size); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.anon_evictable_data", mibs.anon_evictable_data, arcstats.anon_evictable_data); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.anon_evictable_metadata", mibs.anon_evictable_metadata, arcstats.anon_evictable_metadata); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_size", mibs.mru_size, arcstats.mru_size); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_evictable_data", mibs.mru_evictable_data, arcstats.mru_evictable_data); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_evictable_metadata", mibs.mru_evictable_metadata, arcstats.mru_evictable_metadata); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_size", mibs.mru_ghost_size, arcstats.mru_ghost_size); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_evictable_data", mibs.mru_ghost_evictable_data, arcstats.mru_ghost_evictable_data); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_evictable_metadata", mibs.mru_ghost_evictable_metadata, arcstats.mru_ghost_evictable_metadata); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_size", mibs.mfu_size, arcstats.mfu_size); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_evictable_data", mibs.mfu_evictable_data, arcstats.mfu_evictable_data); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_evictable_metadata", mibs.mfu_evictable_metadata, arcstats.mfu_evictable_metadata); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_size", mibs.mfu_ghost_size, arcstats.mfu_ghost_size); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_evictable_data", mibs.mfu_ghost_evictable_data, arcstats.mfu_ghost_evictable_data); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_evictable_metadata", mibs.mfu_ghost_evictable_metadata, arcstats.mfu_ghost_evictable_metadata); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_hits", mibs.l2_hits, arcstats.l2_hits); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_misses", mibs.l2_misses, arcstats.l2_misses); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_feeds", mibs.l2_feeds, arcstats.l2_feeds); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_rw_clash", mibs.l2_rw_clash, arcstats.l2_rw_clash); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_read_bytes", mibs.l2_read_bytes, arcstats.l2_read_bytes); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_write_bytes", mibs.l2_write_bytes, arcstats.l2_write_bytes); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_sent", mibs.l2_writes_sent, arcstats.l2_writes_sent); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_done", mibs.l2_writes_done, arcstats.l2_writes_done); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_error", mibs.l2_writes_error, arcstats.l2_writes_error); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_lock_retry", mibs.l2_writes_lock_retry, arcstats.l2_writes_lock_retry); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_evict_lock_retry", mibs.l2_evict_lock_retry, arcstats.l2_evict_lock_retry); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_evict_reading", mibs.l2_evict_reading, arcstats.l2_evict_reading); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_evict_l1cached", mibs.l2_evict_l1cached, arcstats.l2_evict_l1cached); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_free_on_write", mibs.l2_free_on_write, arcstats.l2_free_on_write); - // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_cdata_free_on_write", mibs.l2_cdata_free_on_write, arcstats.l2_cdata_free_on_write); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_abort_lowmem", mibs.l2_abort_lowmem, arcstats.l2_abort_lowmem); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_cksum_bad", mibs.l2_cksum_bad, arcstats.l2_cksum_bad); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_io_error", mibs.l2_io_error, arcstats.l2_io_error); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_size", mibs.l2_size, arcstats.l2_size); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_asize", mibs.l2_asize, arcstats.l2_asize); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_hdr_size", mibs.l2_hdr_size, arcstats.l2_hdr_size); - // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_compress_successes", mibs.l2_compress_successes, arcstats.l2_compress_successes); - // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_compress_zeros", mibs.l2_compress_zeros, arcstats.l2_compress_zeros); - // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_compress_failures", mibs.l2_compress_failures, arcstats.l2_compress_failures); - GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.memory_throttle_count", mibs.memory_throttle_count, arcstats.memory_throttle_count); - // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.duplicate_buffers", mibs.duplicate_buffers, arcstats.duplicate_buffers); - // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.duplicate_buffers_size", mibs.duplicate_buffers_size, arcstats.duplicate_buffers_size); - // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.duplicate_reads", mibs.duplicate_reads, arcstats.duplicate_reads); - // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.memory_direct_count", mibs.memory_direct_count, arcstats.memory_direct_count); - // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.memory_indirect_count", mibs.memory_indirect_count, arcstats.memory_indirect_count); - // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_no_grow", mibs.arc_no_grow, arcstats.arc_no_grow); - // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_tempreserve", mibs.arc_tempreserve, arcstats.arc_tempreserve); - // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_loaned_bytes", mibs.arc_loaned_bytes, arcstats.arc_loaned_bytes); - // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_prune", mibs.arc_prune, arcstats.arc_prune); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_used", mibs.arc_meta_used, arcstats.arc_meta_used); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_limit", mibs.arc_meta_limit, arcstats.arc_meta_limit); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_max", mibs.arc_meta_max, arcstats.arc_meta_max); - // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_min", mibs.arc_meta_min, arcstats.arc_meta_min); - // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_need_free", mibs.arc_need_free, arcstats.arc_need_free); - // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_sys_free", mibs.arc_sys_free, arcstats.arc_sys_free); - - generate_charts_arcstats("freebsd", update_every); - generate_charts_arc_summary("freebsd", update_every); - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// kstat.zfs.misc.zio_trim - -int do_kstat_zfs_misc_zio_trim(int update_every, usec_t dt) { - (void)dt; - static int mib_bytes[5] = {0, 0, 0, 0, 0}, mib_success[5] = {0, 0, 0, 0, 0}, - mib_failed[5] = {0, 0, 0, 0, 0}, mib_unsupported[5] = {0, 0, 0, 0, 0}; - uint64_t bytes, success, failed, unsupported; - - if (unlikely(GETSYSCTL_SIMPLE("kstat.zfs.misc.zio_trim.bytes", mib_bytes, bytes) || - GETSYSCTL_SIMPLE("kstat.zfs.misc.zio_trim.success", mib_success, success) || - GETSYSCTL_SIMPLE("kstat.zfs.misc.zio_trim.failed", mib_failed, failed) || - GETSYSCTL_SIMPLE("kstat.zfs.misc.zio_trim.unsupported", mib_unsupported, unsupported))) { - error("DISABLED: zfs.trim_bytes chart"); - error("DISABLED: zfs.trim_success chart"); - error("DISABLED: kstat.zfs.misc.zio_trim module"); - return 1; - } else { - - // -------------------------------------------------------------------- - - static RRDSET *st_bytes = NULL; - static RRDDIM *rd_bytes = NULL; - - if (unlikely(!st_bytes)) { - st_bytes = rrdset_create_localhost( - "zfs", - "trim_bytes", - NULL, - "trim", - NULL, - "Successfully TRIMmed bytes", - "bytes", - "freebsd", - "zfs", - 2320, - update_every, - RRDSET_TYPE_LINE - ); - - rd_bytes = rrddim_add(st_bytes, "TRIMmed", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st_bytes); - - rrddim_set_by_pointer(st_bytes, rd_bytes, bytes); - rrdset_done(st_bytes); - - // -------------------------------------------------------------------- - - static RRDSET *st_requests = NULL; - static RRDDIM *rd_successful = NULL, *rd_failed = NULL, *rd_unsupported = NULL; - - if (unlikely(!st_requests)) { - st_requests = rrdset_create_localhost( - "zfs", - "trim_requests", - NULL, - "trim", - NULL, - "TRIM requests", - "requests", - "freebsd", - "zfs", - 2321, - update_every, - RRDSET_TYPE_STACKED - ); - - rd_successful = rrddim_add(st_requests, "successful", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_failed = rrddim_add(st_requests, "failed", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_unsupported = rrddim_add(st_requests, "unsupported", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st_requests); - - rrddim_set_by_pointer(st_requests, rd_successful, success); - rrddim_set_by_pointer(st_requests, rd_failed, failed); - rrddim_set_by_pointer(st_requests, rd_unsupported, unsupported); - rrdset_done(st_requests); - - } - - return 0; -} \ No newline at end of file diff --git a/src/freebsd_sysctl.c b/src/freebsd_sysctl.c deleted file mode 100644 index 1e11255aa..000000000 --- a/src/freebsd_sysctl.c +++ /dev/null @@ -1,3171 +0,0 @@ -#include "common.h" - -#include <sys/vmmeter.h> -#include <vm/vm_param.h> - -#define _KERNEL -#include <sys/sem.h> -#include <sys/shm.h> -#include <sys/msg.h> -#undef _KERNEL - -#include <net/netisr.h> - -#include <netinet/ip.h> -#include <netinet/ip_var.h> -#include <netinet/ip_icmp.h> -#include <netinet/icmp_var.h> -#include <netinet6/ip6_var.h> -#include <netinet/icmp6.h> -#include <netinet/tcp_var.h> -#include <netinet/tcp_fsm.h> -#include <netinet/udp.h> -#include <netinet/udp_var.h> - -// -------------------------------------------------------------------------------------------------------------------- -// common definitions and variables - -int system_pagesize = PAGE_SIZE; -int number_of_cpus = 1; -#if __FreeBSD_version >= 1200029 -struct __vmmeter { - uint64_t v_swtch; - uint64_t v_trap; - uint64_t v_syscall; - uint64_t v_intr; - uint64_t v_soft; - uint64_t v_vm_faults; - uint64_t v_io_faults; - uint64_t v_cow_faults; - uint64_t v_cow_optim; - uint64_t v_zfod; - uint64_t v_ozfod; - uint64_t v_swapin; - uint64_t v_swapout; - uint64_t v_swappgsin; - uint64_t v_swappgsout; - uint64_t v_vnodein; - uint64_t v_vnodeout; - uint64_t v_vnodepgsin; - uint64_t v_vnodepgsout; - uint64_t v_intrans; - uint64_t v_reactivated; - uint64_t v_pdwakeups; - uint64_t v_pdpages; - uint64_t v_pdshortfalls; - uint64_t v_dfree; - uint64_t v_pfree; - uint64_t v_tfree; - uint64_t v_forks; - uint64_t v_vforks; - uint64_t v_rforks; - uint64_t v_kthreads; - uint64_t v_forkpages; - uint64_t v_vforkpages; - uint64_t v_rforkpages; - uint64_t v_kthreadpages; - u_int v_page_size; - u_int v_page_count; - u_int v_free_reserved; - u_int v_free_target; - u_int v_free_min; - u_int v_free_count; - u_int v_wire_count; - u_int v_active_count; - u_int v_inactive_target; - u_int v_inactive_count; - u_int v_laundry_count; - u_int v_pageout_free_min; - u_int v_interrupt_free_min; - u_int v_free_severe; -}; -typedef struct __vmmeter vmmeter_t; -#else -typedef struct vmmeter vmmeter_t; -#endif - -// -------------------------------------------------------------------------------------------------------------------- -// FreeBSD plugin initialization - -int freebsd_plugin_init() -{ - system_pagesize = getpagesize(); - if (system_pagesize <= 0) { - error("FREEBSD: can't get system page size"); - return 1; - } - - if (unlikely(GETSYSCTL_BY_NAME("kern.smp.cpus", number_of_cpus))) { - error("FREEBSD: can't get number of cpus"); - return 1; - } - - if (unlikely(!number_of_cpus)) { - error("FREEBSD: wrong number of cpus"); - return 1; - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// vm.loadavg - -// FreeBSD calculates load averages once every 5 seconds -#define MIN_LOADAVG_UPDATE_EVERY 5 - -int do_vm_loadavg(int update_every, usec_t dt){ - static usec_t next_loadavg_dt = 0; - - if (next_loadavg_dt <= dt) { - static int mib[2] = {0, 0}; - struct loadavg sysload; - - if (unlikely(GETSYSCTL_SIMPLE("vm.loadavg", mib, sysload))) { - error("DISABLED: system.load chart"); - error("DISABLED: vm.loadavg module"); - return 1; - } else { - - // -------------------------------------------------------------------- - - static RRDSET *st = NULL; - static RRDDIM *rd_load1 = NULL, *rd_load2 = NULL, *rd_load3 = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "system", - "load", - NULL, - "load", - NULL, - "System Load Average", - "load", - "freebsd", - "vm.loadavg", - 100, - (update_every < MIN_LOADAVG_UPDATE_EVERY) ? - MIN_LOADAVG_UPDATE_EVERY : update_every, RRDSET_TYPE_LINE - ); - rd_load1 = rrddim_add(st, "load1", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_load2 = rrddim_add(st, "load5", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_load3 = rrddim_add(st, "load15", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_load1, (collected_number) ((double) sysload.ldavg[0] / sysload.fscale * 1000)); - rrddim_set_by_pointer(st, rd_load2, (collected_number) ((double) sysload.ldavg[1] / sysload.fscale * 1000)); - rrddim_set_by_pointer(st, rd_load3, (collected_number) ((double) sysload.ldavg[2] / sysload.fscale * 1000)); - rrdset_done(st); - - next_loadavg_dt = st->update_every * USEC_PER_SEC; - } - } - else - next_loadavg_dt -= dt; - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// vm.vmtotal - -int do_vm_vmtotal(int update_every, usec_t dt) { - (void)dt; - static int do_all_processes = -1, do_processes = -1, do_committed = -1; - - if (unlikely(do_all_processes == -1)) { - do_all_processes = config_get_boolean("plugin:freebsd:vm.vmtotal", "enable total processes", 1); - do_processes = config_get_boolean("plugin:freebsd:vm.vmtotal", "processes running", 1); - do_committed = config_get_boolean("plugin:freebsd:vm.vmtotal", "committed memory", 1); - } - - if (likely(do_all_processes | do_processes | do_committed)) { - static int mib[2] = {0, 0}; - struct vmtotal vmtotal_data; - - if (unlikely(GETSYSCTL_SIMPLE("vm.vmtotal", mib, vmtotal_data))) { - do_all_processes = 0; - error("DISABLED: system.active_processes chart"); - do_processes = 0; - error("DISABLED: system.processes chart"); - do_committed = 0; - error("DISABLED: mem.committed chart"); - error("DISABLED: vm.vmtotal module"); - return 1; - } else { - - // -------------------------------------------------------------------- - - if (likely(do_all_processes)) { - static RRDSET *st = NULL; - static RRDDIM *rd = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "system", - "active_processes", - NULL, - "processes", - NULL, - "System Active Processes", - "processes", - "freebsd", - "vm.vmtotal", - 750, - update_every, - RRDSET_TYPE_LINE - ); - rd = rrddim_add(st, "active", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd, (vmtotal_data.t_rq + vmtotal_data.t_dw + vmtotal_data.t_pw + vmtotal_data.t_sl + vmtotal_data.t_sw)); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (likely(do_processes)) { - static RRDSET *st = NULL; - static RRDDIM *rd_running = NULL, *rd_blocked = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "system", - "processes", - NULL, - "processes", - NULL, - "System Processes", - "processes", - "freebsd", - "vm.vmtotal", - 600, - update_every, - RRDSET_TYPE_LINE - ); - - rd_running = rrddim_add(st, "running", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_blocked = rrddim_add(st, "blocked", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_running, vmtotal_data.t_rq); - rrddim_set_by_pointer(st, rd_blocked, (vmtotal_data.t_dw + vmtotal_data.t_pw)); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (likely(do_committed)) { - static RRDSET *st = NULL; - static RRDDIM *rd = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "mem", - "committed", - NULL, - "system", - NULL, - "Committed (Allocated) Memory", - "MB", - "freebsd", - "vm.vmtotal", - NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED, - update_every, - RRDSET_TYPE_AREA - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd = rrddim_add(st, "Committed_AS", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd, vmtotal_data.t_rm); - rrdset_done(st); - } - } - } else { - error("DISABLED: vm.vmtotal module"); - return 1; - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// kern.cp_time - -int do_kern_cp_time(int update_every, usec_t dt) { - (void)dt; - - if (unlikely(CPUSTATES != 5)) { - error("FREEBSD: There are %d CPU states (5 was expected)", CPUSTATES); - error("DISABLED: system.cpu chart"); - error("DISABLED: kern.cp_time module"); - return 1; - } else { - static int mib[2] = {0, 0}; - long cp_time[CPUSTATES]; - - if (unlikely(GETSYSCTL_SIMPLE("kern.cp_time", mib, cp_time))) { - error("DISABLED: system.cpu chart"); - error("DISABLED: kern.cp_time module"); - return 1; - } else { - - // -------------------------------------------------------------------- - - static RRDSET *st = NULL; - static RRDDIM *rd_nice = NULL, *rd_system = NULL, *rd_user = NULL, *rd_interrupt = NULL, *rd_idle = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "system", - "cpu", - NULL, - "cpu", - "system.cpu", - "Total CPU utilization", - "percentage", - "freebsd", - "kern.cp_time", - 100, update_every, - RRDSET_TYPE_STACKED - ); - - rd_nice = rrddim_add(st, "nice", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_system = rrddim_add(st, "system", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_user = rrddim_add(st, "user", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_interrupt = rrddim_add(st, "interrupt", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_idle = rrddim_add(st, "idle", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rrddim_hide(st, "idle"); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_nice, cp_time[1]); - rrddim_set_by_pointer(st, rd_system, cp_time[2]); - rrddim_set_by_pointer(st, rd_user, cp_time[0]); - rrddim_set_by_pointer(st, rd_interrupt, cp_time[3]); - rrddim_set_by_pointer(st, rd_idle, cp_time[4]); - rrdset_done(st); - } - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// kern.cp_times - -int do_kern_cp_times(int update_every, usec_t dt) { - (void)dt; - - if (unlikely(CPUSTATES != 5)) { - error("FREEBSD: There are %d CPU states (5 was expected)", CPUSTATES); - error("DISABLED: cpu.cpuXX charts"); - error("DISABLED: kern.cp_times module"); - return 1; - } else { - static int mib[2] = {0, 0}; - long cp_time[CPUSTATES]; - static long *pcpu_cp_time = NULL; - static int old_number_of_cpus = 0; - - if(unlikely(number_of_cpus != old_number_of_cpus)) - pcpu_cp_time = reallocz(pcpu_cp_time, sizeof(cp_time) * number_of_cpus); - if (unlikely(GETSYSCTL_WSIZE("kern.cp_times", mib, pcpu_cp_time, sizeof(cp_time) * number_of_cpus))) { - error("DISABLED: cpu.cpuXX charts"); - error("DISABLED: kern.cp_times module"); - return 1; - } else { - - // -------------------------------------------------------------------- - - int i; - static struct cpu_chart { - char cpuid[MAX_INT_DIGITS + 4]; - RRDSET *st; - RRDDIM *rd_user; - RRDDIM *rd_nice; - RRDDIM *rd_system; - RRDDIM *rd_interrupt; - RRDDIM *rd_idle; - } *all_cpu_charts = NULL; - - if(unlikely(number_of_cpus > old_number_of_cpus)) { - all_cpu_charts = reallocz(all_cpu_charts, sizeof(struct cpu_chart) * number_of_cpus); - memset(&all_cpu_charts[old_number_of_cpus], 0, sizeof(struct cpu_chart) * (number_of_cpus - old_number_of_cpus)); - } - - for (i = 0; i < number_of_cpus; i++) { - if (unlikely(!all_cpu_charts[i].st)) { - snprintfz(all_cpu_charts[i].cpuid, MAX_INT_DIGITS, "cpu%d", i); - all_cpu_charts[i].st = rrdset_create_localhost( - "cpu", - all_cpu_charts[i].cpuid, - NULL, - "utilization", - "cpu.cpu", - "Core utilization", - "percentage", - "freebsd", - "kern.cp_times", - 1000, - update_every, - RRDSET_TYPE_STACKED - ); - - all_cpu_charts[i].rd_nice = rrddim_add(all_cpu_charts[i].st, "nice", NULL, 1, 1, - RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - all_cpu_charts[i].rd_system = rrddim_add(all_cpu_charts[i].st, "system", NULL, 1, 1, - RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - all_cpu_charts[i].rd_user = rrddim_add(all_cpu_charts[i].st, "user", NULL, 1, 1, - RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - all_cpu_charts[i].rd_interrupt = rrddim_add(all_cpu_charts[i].st, "interrupt", NULL, 1, 1, - RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - all_cpu_charts[i].rd_idle = rrddim_add(all_cpu_charts[i].st, "idle", NULL, 1, 1, - RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rrddim_hide(all_cpu_charts[i].st, "idle"); - } else rrdset_next(all_cpu_charts[i].st); - - rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_nice, pcpu_cp_time[i * 5 + 1]); - rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_system, pcpu_cp_time[i * 5 + 2]); - rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_user, pcpu_cp_time[i * 5 + 0]); - rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_interrupt, pcpu_cp_time[i * 5 + 3]); - rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_idle, pcpu_cp_time[i * 5 + 4]); - rrdset_done(all_cpu_charts[i].st); - } - } - - old_number_of_cpus = number_of_cpus; - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// dev.cpu.temperature - -int do_dev_cpu_temperature(int update_every, usec_t dt) { - (void)dt; - - int i; - static int *mib = NULL; - static int *pcpu_temperature = NULL; - static int old_number_of_cpus = 0; - char char_mib[MAX_INT_DIGITS + 21]; - char char_rd[MAX_INT_DIGITS + 9]; - - if (unlikely(number_of_cpus != old_number_of_cpus)) { - pcpu_temperature = reallocz(pcpu_temperature, sizeof(int) * number_of_cpus); - mib = reallocz(mib, sizeof(int) * number_of_cpus * 4); - if (unlikely(number_of_cpus > old_number_of_cpus)) - memset(&mib[old_number_of_cpus * 4], 0, sizeof(RRDDIM) * (number_of_cpus - old_number_of_cpus)); - } - for (i = 0; i < number_of_cpus; i++) { - if (unlikely(!(mib[i * 4]))) - sprintf(char_mib, "dev.cpu.%d.temperature", i); - if (unlikely(getsysctl_simple(char_mib, &mib[i * 4], 4, &pcpu_temperature[i], sizeof(int)))) { - error("DISABLED: cpu.temperature chart"); - error("DISABLED: dev.cpu.temperature module"); - return 1; - } - } - - // -------------------------------------------------------------------- - - static RRDSET *st; - static RRDDIM **rd_pcpu_temperature; - - if (unlikely(number_of_cpus != old_number_of_cpus)) { - rd_pcpu_temperature = reallocz(rd_pcpu_temperature, sizeof(RRDDIM) * number_of_cpus); - if (unlikely(number_of_cpus > old_number_of_cpus)) - memset(&rd_pcpu_temperature[old_number_of_cpus], 0, sizeof(RRDDIM) * (number_of_cpus - old_number_of_cpus)); - } - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "cpu", - "temperature", - NULL, - "temperature", - "cpu.temperatute", - "Core temperature", - "Celsius", - "freebsd", - "dev.cpu.temperature", - 1050, - update_every, - RRDSET_TYPE_LINE - ); - } - else rrdset_next(st); - - for (i = 0; i < number_of_cpus; i++) { - if (unlikely(!rd_pcpu_temperature[i])) { - sprintf(char_rd, "cpu%d.temp", i); - rd_pcpu_temperature[i] = rrddim_add(st, char_rd, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - - rrddim_set_by_pointer(st, rd_pcpu_temperature[i], (collected_number) ((double)pcpu_temperature[i] / 10 - 273.15)); - } - - rrdset_done(st); - - old_number_of_cpus = number_of_cpus; - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// dev.cpu.0.freq - -int do_dev_cpu_0_freq(int update_every, usec_t dt) { - (void)dt; - static int mib[4] = {0, 0, 0, 0}; - int cpufreq; - - if (unlikely(GETSYSCTL_SIMPLE("dev.cpu.0.freq", mib, cpufreq))) { - error("DISABLED: cpu.scaling_cur_freq chart"); - error("DISABLED: dev.cpu.0.freq module"); - return 1; - } else { - - // -------------------------------------------------------------------- - - static RRDSET *st = NULL; - static RRDDIM *rd = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "cpu", - "scaling_cur_freq", - NULL, - "cpufreq", - NULL, - "Current CPU Scaling Frequency", - "MHz", - "freebsd", - "dev.cpu.0.freq", - 5003, - update_every, - RRDSET_TYPE_LINE - ); - - rd = rrddim_add(st, "frequency", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd, cpufreq); - rrdset_done(st); - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// hw.intrcnt - -int do_hw_intcnt(int update_every, usec_t dt) { - (void)dt; - static int mib_hw_intrcnt[2] = {0, 0}; - size_t intrcnt_size = 0; - unsigned long i; - - if (unlikely(GETSYSCTL_SIZE("hw.intrcnt", mib_hw_intrcnt, intrcnt_size))) { - error("DISABLED: system.intr chart"); - error("DISABLED: system.interrupts chart"); - error("DISABLED: hw.intrcnt module"); - return 1; - } else { - unsigned long nintr = 0; - static unsigned long old_nintr = 0; - static unsigned long *intrcnt = NULL; - unsigned long long totalintr = 0; - - nintr = intrcnt_size / sizeof(u_long); - if (unlikely(nintr != old_nintr)) - intrcnt = reallocz(intrcnt, nintr * sizeof(u_long)); - if (unlikely(GETSYSCTL_WSIZE("hw.intrcnt", mib_hw_intrcnt, intrcnt, nintr * sizeof(u_long)))) { - error("DISABLED: system.intr chart"); - error("DISABLED: system.interrupts chart"); - error("DISABLED: hw.intrcnt module"); - return 1; - } else { - for (i = 0; i < nintr; i++) - totalintr += intrcnt[i]; - - // -------------------------------------------------------------------- - - static RRDSET *st_intr = NULL; - static RRDDIM *rd_intr = NULL; - - if (unlikely(!st_intr)) { - st_intr = rrdset_create_localhost( - "system", - "intr", - NULL, - "interrupts", - NULL, - "Total Hardware Interrupts", - "interrupts/s", - "freebsd", - "hw.intrcnt", - 900, - update_every, - RRDSET_TYPE_LINE - ); - rrdset_flag_set(st_intr, RRDSET_FLAG_DETAIL); - - rd_intr = rrddim_add(st_intr, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st_intr); - - rrddim_set_by_pointer(st_intr, rd_intr, totalintr); - rrdset_done(st_intr); - - // -------------------------------------------------------------------- - - size_t size; - static int mib_hw_intrnames[2] = {0, 0}; - static char *intrnames = NULL; - - size = nintr * (MAXCOMLEN + 1); - if (unlikely(nintr != old_nintr)) - intrnames = reallocz(intrnames, size); - if (unlikely(GETSYSCTL_WSIZE("hw.intrnames", mib_hw_intrnames, intrnames, size))) { - error("DISABLED: system.intr chart"); - error("DISABLED: system.interrupts chart"); - error("DISABLED: hw.intrcnt module"); - return 1; - } else { - - // -------------------------------------------------------------------- - - static RRDSET *st_interrupts = NULL; - void *p; - - if (unlikely(!st_interrupts)) - st_interrupts = rrdset_create_localhost( - "system", - "interrupts", - NULL, - "interrupts", - NULL, - "System interrupts", - "interrupts/s", - "freebsd", - "hw.intrcnt", - 1000, - update_every, - RRDSET_TYPE_STACKED - ); - else - rrdset_next(st_interrupts); - - for (i = 0; i < nintr; i++) { - p = intrnames + i * (MAXCOMLEN + 1); - if (unlikely((intrcnt[i] != 0) && (*(char *) p != 0))) { - RRDDIM *rd_interrupts = rrddim_find(st_interrupts, p); - - if (unlikely(!rd_interrupts)) - rd_interrupts = rrddim_add(st_interrupts, p, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_interrupts, rd_interrupts, intrcnt[i]); - } - } - rrdset_done(st_interrupts); - } - } - - old_nintr = nintr; - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// vm.stats.sys.v_intr - -int do_vm_stats_sys_v_intr(int update_every, usec_t dt) { - (void)dt; - static int mib[4] = {0, 0, 0, 0}; - u_int int_number; - - if (unlikely(GETSYSCTL_SIMPLE("vm.stats.sys.v_intr", mib, int_number))) { - error("DISABLED: system.dev_intr chart"); - error("DISABLED: vm.stats.sys.v_intr module"); - return 1; - } else { - - // -------------------------------------------------------------------- - - static RRDSET *st = NULL; - static RRDDIM *rd = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "system", - "dev_intr", - NULL, - "interrupts", - NULL, - "Device Interrupts", - "interrupts/s", - "freebsd", - "vm.stats.sys.v_intr", - 1000, - update_every, - RRDSET_TYPE_LINE - ); - - rd = rrddim_add(st, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd, int_number); - rrdset_done(st); - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// vm.stats.sys.v_soft - -int do_vm_stats_sys_v_soft(int update_every, usec_t dt) { - (void)dt; - static int mib[4] = {0, 0, 0, 0}; - u_int soft_intr_number; - - if (unlikely(GETSYSCTL_SIMPLE("vm.stats.sys.v_soft", mib, soft_intr_number))) { - error("DISABLED: system.dev_intr chart"); - error("DISABLED: vm.stats.sys.v_soft module"); - return 1; - } else { - - // -------------------------------------------------------------------- - - static RRDSET *st = NULL; - static RRDDIM *rd = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "system", - "soft_intr", - NULL, - "interrupts", - NULL, - "Software Interrupts", - "interrupts/s", - "freebsd", - "vm.stats.sys.v_soft", - 1100, - update_every, - RRDSET_TYPE_LINE - ); - - rd = rrddim_add(st, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd, soft_intr_number); - rrdset_done(st); - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// vm.stats.sys.v_swtch - -int do_vm_stats_sys_v_swtch(int update_every, usec_t dt) { - (void)dt; - static int mib[4] = {0, 0, 0, 0}; - u_int ctxt_number; - - if (unlikely(GETSYSCTL_SIMPLE("vm.stats.sys.v_swtch", mib, ctxt_number))) { - error("DISABLED: system.ctxt chart"); - error("DISABLED: vm.stats.sys.v_swtch module"); - return 1; - } else { - - // -------------------------------------------------------------------- - - static RRDSET *st = NULL; - static RRDDIM *rd = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "system", - "ctxt", - NULL, - "processes", - NULL, - "CPU Context Switches", - "context switches/s", - "freebsd", - "vm.stats.sys.v_swtch", - 800, - update_every, - RRDSET_TYPE_LINE - ); - - rd = rrddim_add(st, "switches", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd, ctxt_number); - rrdset_done(st); - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// vm.stats.vm.v_forks - -int do_vm_stats_sys_v_forks(int update_every, usec_t dt) { - (void)dt; - static int mib[4] = {0, 0, 0, 0}; - u_int forks_number; - - if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_forks", mib, forks_number))) { - error("DISABLED: system.forks chart"); - error("DISABLED: vm.stats.sys.v_swtch module"); - return 1; - } else { - - // -------------------------------------------------------------------- - - static RRDSET *st = NULL; - static RRDDIM *rd = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "system", - "forks", - NULL, - "processes", - NULL, - "Started Processes", - "processes/s", - "freebsd", - "vm.stats.sys.v_swtch", - 700, - update_every, - RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd = rrddim_add(st, "started", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd, forks_number); - rrdset_done(st); - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// vm.swap_info - -int do_vm_swap_info(int update_every, usec_t dt) { - (void)dt; - static int mib[3] = {0, 0, 0}; - - if (unlikely(getsysctl_mib("vm.swap_info", mib, 2))) { - error("DISABLED: system.swap chart"); - error("DISABLED: vm.swap_info module"); - return 1; - } else { - int i; - struct xswdev xsw; - struct total_xsw { - collected_number bytes_used; - collected_number bytes_total; - } total_xsw = {0, 0}; - - for (i = 0; ; i++) { - size_t size; - - mib[2] = i; - size = sizeof(xsw); - if (unlikely(sysctl(mib, 3, &xsw, &size, NULL, 0) == -1 )) { - if (unlikely(errno != ENOENT)) { - error("FREEBSD: sysctl(%s...) failed: %s", "vm.swap_info", strerror(errno)); - error("DISABLED: system.swap chart"); - error("DISABLED: vm.swap_info module"); - return 1; - } else { - if (unlikely(size != sizeof(xsw))) { - error("FREEBSD: sysctl(%s...) expected %lu, got %lu", "vm.swap_info", (unsigned long)sizeof(xsw), (unsigned long)size); - error("DISABLED: system.swap chart"); - error("DISABLED: vm.swap_info module"); - return 1; - } else break; - } - } - total_xsw.bytes_used += xsw.xsw_used; - total_xsw.bytes_total += xsw.xsw_nblks; - } - - // -------------------------------------------------------------------- - - static RRDSET *st = NULL; - static RRDDIM *rd_free = NULL, *rd_used = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "system", - "swap", - NULL, - "swap", - NULL, - "System Swap", - "MB", - "freebsd", - "vm.swap_info", - 201, - update_every, - RRDSET_TYPE_STACKED - ); - - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_free = rrddim_add(st, "free", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); - rd_used = rrddim_add(st, "used", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_free, total_xsw.bytes_total - total_xsw.bytes_used); - rrddim_set_by_pointer(st, rd_used, total_xsw.bytes_used); - rrdset_done(st); - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// system.ram - -int do_system_ram(int update_every, usec_t dt) { - (void)dt; - static int mib_active_count[4] = {0, 0, 0, 0}, mib_inactive_count[4] = {0, 0, 0, 0}, mib_wire_count[4] = {0, 0, 0, 0}, - mib_cache_count[4] = {0, 0, 0, 0}, mib_vfs_bufspace[2] = {0, 0}, mib_free_count[4] = {0, 0, 0, 0}; - vmmeter_t vmmeter_data; - int vfs_bufspace_count; - - if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_active_count", mib_active_count, vmmeter_data.v_active_count) || - GETSYSCTL_SIMPLE("vm.stats.vm.v_inactive_count", mib_inactive_count, vmmeter_data.v_inactive_count) || - GETSYSCTL_SIMPLE("vm.stats.vm.v_wire_count", mib_wire_count, vmmeter_data.v_wire_count) || -#if __FreeBSD_version < 1200016 - GETSYSCTL_SIMPLE("vm.stats.vm.v_cache_count", mib_cache_count, vmmeter_data.v_cache_count) || -#endif - GETSYSCTL_SIMPLE("vfs.bufspace", mib_vfs_bufspace, vfs_bufspace_count) || - GETSYSCTL_SIMPLE("vm.stats.vm.v_free_count", mib_free_count, vmmeter_data.v_free_count))) { - error("DISABLED: system.ram chart"); - error("DISABLED: system.ram module"); - return 1; - } else { - - // -------------------------------------------------------------------- - - static RRDSET *st = NULL; - static RRDDIM *rd_free = NULL, *rd_active = NULL, *rd_inactive = NULL, - *rd_wired = NULL, *rd_cache = NULL, *rd_buffers = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "system", - "ram", - NULL, - "ram", - NULL, - "System RAM", - "MB", - "freebsd", - "system.ram", - 200, - update_every, - RRDSET_TYPE_STACKED - ); - - rd_free = rrddim_add(st, "free", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); - rd_active = rrddim_add(st, "active", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); - rd_inactive = rrddim_add(st, "inactive", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); - rd_wired = rrddim_add(st, "wired", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); -#if __FreeBSD_version < 1200016 - rd_cache = rrddim_add(st, "cache", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); -#endif - rd_buffers = rrddim_add(st, "buffers", NULL, 1, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_free, vmmeter_data.v_free_count); - rrddim_set_by_pointer(st, rd_active, vmmeter_data.v_active_count); - rrddim_set_by_pointer(st, rd_inactive, vmmeter_data.v_inactive_count); - rrddim_set_by_pointer(st, rd_wired, vmmeter_data.v_wire_count); -#if __FreeBSD_version < 1200016 - rrddim_set_by_pointer(st, rd_cache, vmmeter_data.v_cache_count); -#endif - rrddim_set_by_pointer(st, rd_buffers, vfs_bufspace_count); - rrdset_done(st); - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// vm.stats.vm.v_swappgs - -int do_vm_stats_sys_v_swappgs(int update_every, usec_t dt) { - (void)dt; - static int mib_swappgsin[4] = {0, 0, 0, 0}, mib_swappgsout[4] = {0, 0, 0, 0}; - vmmeter_t vmmeter_data; - - if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_swappgsin", mib_swappgsin, vmmeter_data.v_swappgsin) || - GETSYSCTL_SIMPLE("vm.stats.vm.v_swappgsout", mib_swappgsout, vmmeter_data.v_swappgsout))) { - error("DISABLED: system.swapio chart"); - error("DISABLED: vm.stats.vm.v_swappgs module"); - return 1; - } else { - - // -------------------------------------------------------------------- - - static RRDSET *st = NULL; - static RRDDIM *rd_in = NULL, *rd_out = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "system", - "swapio", - NULL, - "swap", - NULL, - "Swap I/O", - "kilobytes/s", - "freebsd", - "vm.stats.vm.v_swappgs", - 250, - update_every, - RRDSET_TYPE_AREA - ); - - rd_in = rrddim_add(st, "in", NULL, system_pagesize, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL); - rd_out = rrddim_add(st, "out", NULL, -system_pagesize, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_in, vmmeter_data.v_swappgsin); - rrddim_set_by_pointer(st, rd_out, vmmeter_data.v_swappgsout); - rrdset_done(st); - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// vm.stats.vm.v_pgfaults - -int do_vm_stats_sys_v_pgfaults(int update_every, usec_t dt) { - (void)dt; - static int mib_vm_faults[4] = {0, 0, 0, 0}, mib_io_faults[4] = {0, 0, 0, 0}, mib_cow_faults[4] = {0, 0, 0, 0}, - mib_cow_optim[4] = {0, 0, 0, 0}, mib_intrans[4] = {0, 0, 0, 0}; - vmmeter_t vmmeter_data; - - if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_vm_faults", mib_vm_faults, vmmeter_data.v_vm_faults) || - GETSYSCTL_SIMPLE("vm.stats.vm.v_io_faults", mib_io_faults, vmmeter_data.v_io_faults) || - GETSYSCTL_SIMPLE("vm.stats.vm.v_cow_faults", mib_cow_faults, vmmeter_data.v_cow_faults) || - GETSYSCTL_SIMPLE("vm.stats.vm.v_cow_optim", mib_cow_optim, vmmeter_data.v_cow_optim) || - GETSYSCTL_SIMPLE("vm.stats.vm.v_intrans", mib_intrans, vmmeter_data.v_intrans))) { - error("DISABLED: mem.pgfaults chart"); - error("DISABLED: vm.stats.vm.v_pgfaults module"); - return 1; - } else { - - // -------------------------------------------------------------------- - - static RRDSET *st = NULL; - static RRDDIM *rd_memory = NULL, *rd_io_requiring = NULL, *rd_cow = NULL, - *rd_cow_optimized = NULL, *rd_in_transit = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "mem", - "pgfaults", - NULL, - "system", - NULL, - "Memory Page Faults", - "page faults/s", - "freebsd", - "vm.stats.vm.v_pgfaults", - NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS, - update_every, - RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_memory = rrddim_add(st, "memory", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_io_requiring = rrddim_add(st, "io_requiring", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_cow = rrddim_add(st, "cow", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_cow_optimized = rrddim_add(st, "cow_optimized", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_transit = rrddim_add(st, "in_transit", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_memory, vmmeter_data.v_vm_faults); - rrddim_set_by_pointer(st, rd_io_requiring, vmmeter_data.v_io_faults); - rrddim_set_by_pointer(st, rd_cow, vmmeter_data.v_cow_faults); - rrddim_set_by_pointer(st, rd_cow_optimized, vmmeter_data.v_cow_optim); - rrddim_set_by_pointer(st, rd_in_transit, vmmeter_data.v_intrans); - rrdset_done(st); - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// kern.ipc.sem - -int do_kern_ipc_sem(int update_every, usec_t dt) { - (void)dt; - static int mib_semmni[3] = {0, 0, 0}, mib_sema[3] = {0, 0, 0}; - struct ipc_sem { - int semmni; - collected_number sets; - collected_number semaphores; - } ipc_sem = {0, 0, 0}; - - if (unlikely(GETSYSCTL_SIMPLE("kern.ipc.semmni", mib_semmni, ipc_sem.semmni))) { - error("DISABLED: system.ipc_semaphores chart"); - error("DISABLED: system.ipc_semaphore_arrays chart"); - error("DISABLED: kern.ipc.sem module"); - return 1; - } else { - static struct semid_kernel *ipc_sem_data = NULL; - static int old_semmni = 0; - - if (unlikely(ipc_sem.semmni != old_semmni)) { - ipc_sem_data = reallocz(ipc_sem_data, sizeof(struct semid_kernel) * ipc_sem.semmni); - old_semmni = ipc_sem.semmni; - } - if (unlikely(GETSYSCTL_WSIZE("kern.ipc.sema", mib_sema, ipc_sem_data, sizeof(struct semid_kernel) * ipc_sem.semmni))) { - error("DISABLED: system.ipc_semaphores chart"); - error("DISABLED: system.ipc_semaphore_arrays chart"); - error("DISABLED: kern.ipc.sem module"); - return 1; - } else { - int i; - - for (i = 0; i < ipc_sem.semmni; i++) { - if (unlikely(ipc_sem_data[i].u.sem_perm.mode & SEM_ALLOC)) { - ipc_sem.sets += 1; - ipc_sem.semaphores += ipc_sem_data[i].u.sem_nsems; - } - } - - // -------------------------------------------------------------------- - - static RRDSET *st_semaphores = NULL, *st_semaphore_arrays = NULL; - static RRDDIM *rd_semaphores = NULL, *rd_semaphore_arrays = NULL; - - if (unlikely(!st_semaphores)) { - st_semaphores = rrdset_create_localhost( - "system", - "ipc_semaphores", - NULL, - "ipc semaphores", - NULL, - "IPC Semaphores", - "semaphores", - "freebsd", - "kern.ipc.sem", - 1000, - update_every, - RRDSET_TYPE_AREA - ); - - rd_semaphores = rrddim_add(st_semaphores, "semaphores", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st_semaphores); - - rrddim_set_by_pointer(st_semaphores, rd_semaphores, ipc_sem.semaphores); - rrdset_done(st_semaphores); - - // -------------------------------------------------------------------- - - if (unlikely(!st_semaphore_arrays)) { - st_semaphore_arrays = rrdset_create_localhost( - "system", - "ipc_semaphore_arrays", - NULL, - "ipc semaphores", - NULL, - "IPC Semaphore Arrays", - "arrays", - "freebsd", - "kern.ipc.sem", - 1000, - update_every, - RRDSET_TYPE_AREA - ); - - rd_semaphore_arrays = rrddim_add(st_semaphore_arrays, "arrays", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st_semaphore_arrays); - - rrddim_set_by_pointer(st_semaphore_arrays, rd_semaphore_arrays, ipc_sem.sets); - rrdset_done(st_semaphore_arrays); - } - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// kern.ipc.shm - -int do_kern_ipc_shm(int update_every, usec_t dt) { - (void)dt; - static int mib_shmmni[3] = {0, 0, 0}, mib_shmsegs[3] = {0, 0, 0}; - struct ipc_shm { - u_long shmmni; - collected_number segs; - collected_number segsize; - } ipc_shm = {0, 0, 0}; - - if (unlikely(GETSYSCTL_SIMPLE("kern.ipc.shmmni", mib_shmmni, ipc_shm.shmmni))) { - error("DISABLED: system.ipc_shared_mem_segs chart"); - error("DISABLED: system.ipc_shared_mem_size chart"); - error("DISABLED: kern.ipc.shmmodule"); - return 1; - } else { - static struct shmid_kernel *ipc_shm_data = NULL; - static u_long old_shmmni = 0; - - if (unlikely(ipc_shm.shmmni != old_shmmni)) { - ipc_shm_data = reallocz(ipc_shm_data, sizeof(struct shmid_kernel) * ipc_shm.shmmni); - old_shmmni = ipc_shm.shmmni; - } - if (unlikely( - GETSYSCTL_WSIZE("kern.ipc.shmsegs", mib_shmsegs, ipc_shm_data, sizeof(struct shmid_kernel) * ipc_shm.shmmni))) { - error("DISABLED: system.ipc_shared_mem_segs chart"); - error("DISABLED: system.ipc_shared_mem_size chart"); - error("DISABLED: kern.ipc.shmmodule"); - return 1; - } else { - unsigned long i; - - for (i = 0; i < ipc_shm.shmmni; i++) { - if (unlikely(ipc_shm_data[i].u.shm_perm.mode & 0x0800)) { - ipc_shm.segs += 1; - ipc_shm.segsize += ipc_shm_data[i].u.shm_segsz; - } - } - - // -------------------------------------------------------------------- - - static RRDSET *st_segs = NULL, *st_size = NULL; - static RRDDIM *rd_segments = NULL, *rd_allocated = NULL; - - if (unlikely(!st_segs)) { - st_segs = rrdset_create_localhost( - "system", - "ipc_shared_mem_segs", - NULL, - "ipc shared memory", - NULL, - "IPC Shared Memory Segments", - "segments", - "freebsd", - "kern.ipc.shm", - 1000, - update_every, - RRDSET_TYPE_AREA - ); - - rd_segments = rrddim_add(st_segs, "segments", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st_segs); - - rrddim_set_by_pointer(st_segs, rd_segments, ipc_shm.segs); - rrdset_done(st_segs); - - // -------------------------------------------------------------------- - - if (unlikely(!st_size)) { - st_size = rrdset_create_localhost( - "system", - "ipc_shared_mem_size", - NULL, - "ipc shared memory", - NULL, - "IPC Shared Memory Segments Size", - "kilobytes", - "freebsd", - "kern.ipc.shm", - 1000, - update_every, - RRDSET_TYPE_AREA - ); - - rd_allocated = rrddim_add(st_size, "allocated", NULL, 1, KILO_FACTOR, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st_size); - - rrddim_set_by_pointer(st_size, rd_allocated, ipc_shm.segsize); - rrdset_done(st_size); - } - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// kern.ipc.msq - -int do_kern_ipc_msq(int update_every, usec_t dt) { - (void)dt; - static int mib_msgmni[3] = {0, 0, 0}, mib_msqids[3] = {0, 0, 0}; - struct ipc_msq { - int msgmni; - collected_number queues; - collected_number messages; - collected_number usedsize; - collected_number allocsize; - } ipc_msq = {0, 0, 0, 0, 0}; - - if (unlikely(GETSYSCTL_SIMPLE("kern.ipc.msgmni", mib_msgmni, ipc_msq.msgmni))) { - error("DISABLED: system.ipc_msq_queues chart"); - error("DISABLED: system.ipc_msq_messages chart"); - error("DISABLED: system.ipc_msq_size chart"); - error("DISABLED: kern.ipc.msg module"); - return 1; - } else { - static struct msqid_kernel *ipc_msq_data = NULL; - static int old_msgmni = 0; - - if (unlikely(ipc_msq.msgmni != old_msgmni)) { - ipc_msq_data = reallocz(ipc_msq_data, sizeof(struct msqid_kernel) * ipc_msq.msgmni); - old_msgmni = ipc_msq.msgmni; - } - if (unlikely( - GETSYSCTL_WSIZE("kern.ipc.msqids", mib_msqids, ipc_msq_data, sizeof(struct msqid_kernel) * ipc_msq.msgmni))) { - error("DISABLED: system.ipc_msq_queues chart"); - error("DISABLED: system.ipc_msq_messages chart"); - error("DISABLED: system.ipc_msq_size chart"); - error("DISABLED: kern.ipc.msg module"); - return 1; - } else { - int i; - - for (i = 0; i < ipc_msq.msgmni; i++) { - if (unlikely(ipc_msq_data[i].u.msg_qbytes != 0)) { - ipc_msq.queues += 1; - ipc_msq.messages += ipc_msq_data[i].u.msg_qnum; - ipc_msq.usedsize += ipc_msq_data[i].u.msg_cbytes; - ipc_msq.allocsize += ipc_msq_data[i].u.msg_qbytes; - } - } - - // -------------------------------------------------------------------- - - static RRDSET *st_queues = NULL, *st_messages = NULL, *st_size = NULL; - static RRDDIM *rd_queues = NULL, *rd_messages = NULL, *rd_allocated = NULL, *rd_used = NULL; - - if (unlikely(!st_queues)) { - st_queues = rrdset_create_localhost( - "system", - "ipc_msq_queues", - NULL, - "ipc message queues", - NULL, - "Number of IPC Message Queues", - "queues", - "freebsd", - "kern.ipc.msq", - 990, - update_every, - RRDSET_TYPE_AREA - ); - - rd_queues = rrddim_add(st_queues, "queues", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st_queues); - - rrddim_set_by_pointer(st_queues, rd_queues, ipc_msq.queues); - rrdset_done(st_queues); - - // -------------------------------------------------------------------- - - if (unlikely(!st_messages)) { - st_messages = rrdset_create_localhost( - "system", - "ipc_msq_messages", - NULL, - "ipc message queues", - NULL, - "Number of Messages in IPC Message Queues", - "messages", - "freebsd", - "kern.ipc.msq", - 1000, - update_every, - RRDSET_TYPE_AREA - ); - - rd_messages = rrddim_add(st_messages, "messages", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st_messages); - - rrddim_set_by_pointer(st_messages, rd_messages, ipc_msq.messages); - rrdset_done(st_messages); - - // -------------------------------------------------------------------- - - if (unlikely(!st_size)) { - st_size = rrdset_create_localhost( - "system", - "ipc_msq_size", - NULL, - "ipc message queues", - NULL, - "Size of IPC Message Queues", - "bytes", - "freebsd", - "kern.ipc.msq", - 1100, - update_every, - RRDSET_TYPE_LINE - ); - - rd_allocated = rrddim_add(st_size, "allocated", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_used = rrddim_add(st_size, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st_size); - - rrddim_set_by_pointer(st_size, rd_allocated, ipc_msq.allocsize); - rrddim_set_by_pointer(st_size, rd_used, ipc_msq.usedsize); - rrdset_done(st_size); - } - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// uptime - -int do_uptime(int update_every, usec_t dt) { - (void)dt; - struct timespec up_time; - - clock_gettime(CLOCK_UPTIME, &up_time); - - // -------------------------------------------------------------------- - - static RRDSET *st = NULL; - static RRDDIM *rd = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "system", - "uptime", - NULL, - "uptime", - NULL, - "System Uptime", - "seconds", - "freebsd", - "uptime", - 1000, - update_every, - RRDSET_TYPE_LINE - ); - - rd = rrddim_add(st, "uptime", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd, up_time.tv_sec); - rrdset_done(st); - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// net.isr - -int do_net_isr(int update_every, usec_t dt) { - (void)dt; - static int do_netisr = -1, do_netisr_per_core = -1; - - if (unlikely(do_netisr == -1)) { - do_netisr = config_get_boolean("plugin:freebsd:net.isr", "netisr", 1); - do_netisr_per_core = config_get_boolean("plugin:freebsd:net.isr", "netisr per core", 1); - } - - static int mib_workstream[3] = {0, 0, 0}, mib_work[3] = {0, 0, 0}; - int common_error = 0; - size_t netisr_workstream_size = 0, netisr_work_size = 0; - unsigned long num_netisr_workstreams = 0, num_netisr_works = 0; - static struct sysctl_netisr_workstream *netisr_workstream = NULL; - static struct sysctl_netisr_work *netisr_work = NULL; - static struct netisr_stats { - collected_number dispatched; - collected_number hybrid_dispatched; - collected_number qdrops; - collected_number queued; - } *netisr_stats = NULL; - - if (likely(do_netisr || do_netisr_per_core)) { - if (unlikely(GETSYSCTL_SIZE("net.isr.workstream", mib_workstream, netisr_workstream_size))) { - common_error = 1; - } else if (unlikely(GETSYSCTL_SIZE("net.isr.work", mib_work, netisr_work_size))) { - common_error = 1; - } else { - static size_t old_netisr_workstream_size = 0; - - num_netisr_workstreams = netisr_workstream_size / sizeof(struct sysctl_netisr_workstream); - if (unlikely(netisr_workstream_size != old_netisr_workstream_size)) { - netisr_workstream = reallocz(netisr_workstream, - num_netisr_workstreams * sizeof(struct sysctl_netisr_workstream)); - old_netisr_workstream_size = netisr_workstream_size; - } - if (unlikely(GETSYSCTL_WSIZE("net.isr.workstream", mib_workstream, netisr_workstream, - num_netisr_workstreams * sizeof(struct sysctl_netisr_workstream)))){ - common_error = 1; - } else { - static size_t old_netisr_work_size = 0; - - num_netisr_works = netisr_work_size / sizeof(struct sysctl_netisr_work); - if (unlikely(netisr_work_size != old_netisr_work_size)) { - netisr_work = reallocz(netisr_work, num_netisr_works * sizeof(struct sysctl_netisr_work)); - old_netisr_work_size = netisr_work_size; - } - if (unlikely(GETSYSCTL_WSIZE("net.isr.work", mib_work, netisr_work, - num_netisr_works * sizeof(struct sysctl_netisr_work)))){ - common_error = 1; - } - } - } - if (unlikely(common_error)) { - do_netisr = 0; - error("DISABLED: system.softnet_stat chart"); - do_netisr_per_core = 0; - error("DISABLED: system.cpuX_softnet_stat chart"); - common_error = 0; - error("DISABLED: net.isr module"); - return 1; - } else { - unsigned long i, n; - int j; - static int old_number_of_cpus = 0; - - if (unlikely(number_of_cpus != old_number_of_cpus)) { - netisr_stats = reallocz(netisr_stats, (number_of_cpus + 1) * sizeof(struct netisr_stats)); - old_number_of_cpus = number_of_cpus; - } - memset(netisr_stats, 0, (number_of_cpus + 1) * sizeof(struct netisr_stats)); - for (i = 0; i < num_netisr_workstreams; i++) { - for (n = 0; n < num_netisr_works; n++) { - if (netisr_workstream[i].snws_wsid == netisr_work[n].snw_wsid) { - netisr_stats[netisr_workstream[i].snws_cpu].dispatched += netisr_work[n].snw_dispatched; - netisr_stats[netisr_workstream[i].snws_cpu].hybrid_dispatched += netisr_work[n].snw_hybrid_dispatched; - netisr_stats[netisr_workstream[i].snws_cpu].qdrops += netisr_work[n].snw_qdrops; - netisr_stats[netisr_workstream[i].snws_cpu].queued += netisr_work[n].snw_queued; - } - } - } - for (j = 0; j < number_of_cpus; j++) { - netisr_stats[number_of_cpus].dispatched += netisr_stats[j].dispatched; - netisr_stats[number_of_cpus].hybrid_dispatched += netisr_stats[j].hybrid_dispatched; - netisr_stats[number_of_cpus].qdrops += netisr_stats[j].qdrops; - netisr_stats[number_of_cpus].queued += netisr_stats[j].queued; - } - } - } else { - error("DISABLED: net.isr module"); - return 1; - } - - // -------------------------------------------------------------------- - - if (likely(do_netisr)) { - static RRDSET *st = NULL; - static RRDDIM *rd_dispatched = NULL, *rd_hybrid_dispatched = NULL, *rd_qdrops = NULL, *rd_queued = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "system", - "softnet_stat", - NULL, - "softnet_stat", - NULL, - "System softnet_stat", - "events/s", - "freebsd", - "net.isr", - 955, - update_every, - RRDSET_TYPE_LINE - ); - - rd_dispatched = rrddim_add(st, "dispatched", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_hybrid_dispatched = rrddim_add(st, "hybrid_dispatched", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_qdrops = rrddim_add(st, "qdrops", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_queued = rrddim_add(st, "queued", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_dispatched, netisr_stats[number_of_cpus].dispatched); - rrddim_set_by_pointer(st, rd_hybrid_dispatched, netisr_stats[number_of_cpus].hybrid_dispatched); - rrddim_set_by_pointer(st, rd_qdrops, netisr_stats[number_of_cpus].qdrops); - rrddim_set_by_pointer(st, rd_queued, netisr_stats[number_of_cpus].queued); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (likely(do_netisr_per_core)) { - static struct softnet_chart { - char netisr_cpuid[MAX_INT_DIGITS + 17]; - RRDSET *st; - RRDDIM *rd_dispatched; - RRDDIM *rd_hybrid_dispatched; - RRDDIM *rd_qdrops; - RRDDIM *rd_queued; - } *all_softnet_charts = NULL; - static int old_number_of_cpus = 0; - int i; - - if(unlikely(number_of_cpus > old_number_of_cpus)) { - all_softnet_charts = reallocz(all_softnet_charts, sizeof(struct softnet_chart) * number_of_cpus); - memset(&all_softnet_charts[old_number_of_cpus], 0, sizeof(struct softnet_chart) * (number_of_cpus - old_number_of_cpus)); - old_number_of_cpus = number_of_cpus; - } - - for (i = 0; i < number_of_cpus ;i++) { - snprintfz(all_softnet_charts[i].netisr_cpuid, MAX_INT_DIGITS + 17, "cpu%d_softnet_stat", i); - - if (unlikely(!all_softnet_charts[i].st)) { - all_softnet_charts[i].st = rrdset_create_localhost( - "cpu", - all_softnet_charts[i].netisr_cpuid, - NULL, - "softnet_stat", - NULL, - "Per CPU netisr statistics", - "events/s", - "freebsd", - "net.isr", - 1101 + i, - update_every, - RRDSET_TYPE_LINE - ); - - all_softnet_charts[i].rd_dispatched = rrddim_add(all_softnet_charts[i].st, "dispatched", - NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - all_softnet_charts[i].rd_hybrid_dispatched = rrddim_add(all_softnet_charts[i].st, "hybrid_dispatched", - NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - all_softnet_charts[i].rd_qdrops = rrddim_add(all_softnet_charts[i].st, "qdrops", - NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - all_softnet_charts[i].rd_queued = rrddim_add(all_softnet_charts[i].st, "queued", - NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(all_softnet_charts[i].st); - - rrddim_set_by_pointer(all_softnet_charts[i].st, all_softnet_charts[i].rd_dispatched, - netisr_stats[i].dispatched); - rrddim_set_by_pointer(all_softnet_charts[i].st, all_softnet_charts[i].rd_hybrid_dispatched, - netisr_stats[i].hybrid_dispatched); - rrddim_set_by_pointer(all_softnet_charts[i].st, all_softnet_charts[i].rd_qdrops, - netisr_stats[i].qdrops); - rrddim_set_by_pointer(all_softnet_charts[i].st, all_softnet_charts[i].rd_queued, - netisr_stats[i].queued); - rrdset_done(all_softnet_charts[i].st); - } - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// net.inet.tcp.states - -int do_net_inet_tcp_states(int update_every, usec_t dt) { - (void)dt; - static int mib[4] = {0, 0, 0, 0}; - uint64_t tcps_states[TCP_NSTATES]; - - // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html - if (unlikely(GETSYSCTL_SIMPLE("net.inet.tcp.states", mib, tcps_states))) { - error("DISABLED: ipv4.tcpsock chart"); - error("DISABLED: net.inet.tcp.states module"); - return 1; - } else { - - // -------------------------------------------------------------------- - - static RRDSET *st = NULL; - static RRDDIM *rd = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4", - "tcpsock", - NULL, - "tcp", - NULL, - "IPv4 TCP Connections", - "active connections", - "freebsd", - "net.inet.tcp.states", - 2500, - update_every, - RRDSET_TYPE_LINE - ); - - rd = rrddim_add(st, "CurrEstab", "connections", 1, 1, RRD_ALGORITHM_ABSOLUTE); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd, tcps_states[TCPS_ESTABLISHED]); - rrdset_done(st); - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// net.inet.tcp.stats - -int do_net_inet_tcp_stats(int update_every, usec_t dt) { - (void)dt; - static int do_tcp_packets = -1, do_tcp_errors = -1, do_tcp_handshake = -1, do_tcpext_connaborts = -1, do_tcpext_ofo = -1, - do_tcpext_syncookies = -1, do_tcpext_listen = -1, do_ecn = -1; - - if (unlikely(do_tcp_packets == -1)) { - do_tcp_packets = config_get_boolean("plugin:freebsd:net.inet.tcp.stats", "ipv4 TCP packets", 1); - do_tcp_errors = config_get_boolean("plugin:freebsd:net.inet.tcp.stats", "ipv4 TCP errors", 1); - do_tcp_handshake = config_get_boolean("plugin:freebsd:net.inet.tcp.stats", "ipv4 TCP handshake issues", 1); - do_tcpext_connaborts = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "TCP connection aborts", - CONFIG_BOOLEAN_AUTO); - do_tcpext_ofo = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "TCP out-of-order queue", - CONFIG_BOOLEAN_AUTO); - do_tcpext_syncookies = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "TCP SYN cookies", - CONFIG_BOOLEAN_AUTO); - do_tcpext_listen = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "TCP listen issues", - CONFIG_BOOLEAN_AUTO); - do_ecn = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "ECN packets", - CONFIG_BOOLEAN_AUTO); - } - - // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html - if (likely(do_tcp_packets || do_tcp_errors || do_tcp_handshake || do_tcpext_connaborts || do_tcpext_ofo || - do_tcpext_syncookies || do_tcpext_listen || do_ecn)) { - static int mib[4] = {0, 0, 0, 0}; - struct tcpstat tcpstat; - - if (unlikely(GETSYSCTL_SIMPLE("net.inet.tcp.stats", mib, tcpstat))) { - do_tcp_packets = 0; - error("DISABLED: ipv4.tcppackets chart"); - do_tcp_errors = 0; - error("DISABLED: ipv4.tcperrors chart"); - do_tcp_handshake = 0; - error("DISABLED: ipv4.tcphandshake chart"); - do_tcpext_connaborts = 0; - error("DISABLED: ipv4.tcpconnaborts chart"); - do_tcpext_ofo = 0; - error("DISABLED: ipv4.tcpofo chart"); - do_tcpext_syncookies = 0; - error("DISABLED: ipv4.tcpsyncookies chart"); - do_tcpext_listen = 0; - error("DISABLED: ipv4.tcplistenissues chart"); - do_ecn = 0; - error("DISABLED: ipv4.ecnpkts chart"); - error("DISABLED: net.inet.tcp.stats module"); - return 1; - } else { - - // -------------------------------------------------------------------- - - if (likely(do_tcp_packets)) { - static RRDSET *st = NULL; - static RRDDIM *rd_in_segs = NULL, *rd_out_segs = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4", - "tcppackets", - NULL, - "tcp", - NULL, - "IPv4 TCP Packets", - "packets/s", - "freebsd", - "net.inet.tcp.stats", - 2600, - update_every, - RRDSET_TYPE_LINE - ); - - rd_in_segs = rrddim_add(st, "InSegs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_segs = rrddim_add(st, "OutSegs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_in_segs, tcpstat.tcps_rcvtotal); - rrddim_set_by_pointer(st, rd_out_segs, tcpstat.tcps_sndtotal); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (likely(do_tcp_errors)) { - static RRDSET *st = NULL; - static RRDDIM *rd_in_errs = NULL, *rd_in_csum_errs = NULL, *rd_retrans_segs = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4", - "tcperrors", - NULL, - "tcp", - NULL, - "IPv4 TCP Errors", - "packets/s", - "freebsd", - "net.inet.tcp.stats", - 2700, - update_every, - RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_in_errs = rrddim_add(st, "InErrs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_csum_errs = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_retrans_segs = rrddim_add(st, "RetransSegs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - -#if __FreeBSD__ >= 11 - rrddim_set_by_pointer(st, rd_in_errs, tcpstat.tcps_rcvbadoff + tcpstat.tcps_rcvreassfull + - tcpstat.tcps_rcvshort); -#else - rrddim_set_by_pointer(st, rd_in_errs, tcpstat.tcps_rcvbadoff + tcpstat.tcps_rcvshort); -#endif - rrddim_set_by_pointer(st, rd_in_csum_errs, tcpstat.tcps_rcvbadsum); - rrddim_set_by_pointer(st, rd_retrans_segs, tcpstat.tcps_sndrexmitpack); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (likely(do_tcp_handshake)) { - static RRDSET *st = NULL; - static RRDDIM *rd_estab_resets = NULL, *rd_active_opens = NULL, *rd_passive_opens = NULL, - *rd_attempt_fails = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4", - "tcphandshake", - NULL, - "tcp", - NULL, - "IPv4 TCP Handshake Issues", - "events/s", - "freebsd", - "net.inet.tcp.stats", - 2900, - update_every, - RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_estab_resets = rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_active_opens = rrddim_add(st, "ActiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_passive_opens = rrddim_add(st, "PassiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_attempt_fails = rrddim_add(st, "AttemptFails", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_estab_resets, tcpstat.tcps_drops); - rrddim_set_by_pointer(st, rd_active_opens, tcpstat.tcps_connattempt); - rrddim_set_by_pointer(st, rd_passive_opens, tcpstat.tcps_accepts); - rrddim_set_by_pointer(st, rd_attempt_fails, tcpstat.tcps_conndrops); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_rcvpackafterwin || tcpstat.tcps_rcvafterclose || tcpstat.tcps_rcvmemdrop || tcpstat.tcps_persistdrop || tcpstat.tcps_finwait2_drops))) { - do_tcpext_connaborts = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_on_data = NULL, *rd_on_close = NULL, *rd_on_memory = NULL, - *rd_on_timeout = NULL, *rd_on_linger = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4", - "tcpconnaborts", - NULL, - "tcp", - NULL, - "TCP Connection Aborts", - "connections/s", - "freebsd", - "net.inet.tcp.stats", - 3010, - update_every, - RRDSET_TYPE_LINE - ); - - rd_on_data = rrddim_add(st, "TCPAbortOnData", "baddata", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_on_close = rrddim_add(st, "TCPAbortOnClose", "userclosed", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_on_memory = rrddim_add(st, "TCPAbortOnMemory", "nomemory", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_on_timeout = rrddim_add(st, "TCPAbortOnTimeout", "timeout", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_on_linger = rrddim_add(st, "TCPAbortOnLinger", "linger", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_on_data, tcpstat.tcps_rcvpackafterwin); - rrddim_set_by_pointer(st, rd_on_close, tcpstat.tcps_rcvafterclose); - rrddim_set_by_pointer(st, rd_on_memory, tcpstat.tcps_rcvmemdrop); - rrddim_set_by_pointer(st, rd_on_timeout, tcpstat.tcps_persistdrop); - rrddim_set_by_pointer(st, rd_on_linger, tcpstat.tcps_finwait2_drops); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && tcpstat.tcps_rcvoopack)) { - do_tcpext_ofo = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_ofo_queue = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4", - "tcpofo", - NULL, - "tcp", - NULL, - "TCP Out-Of-Order Queue", - "packets/s", - "freebsd", - "net.inet.tcp.stats", - 3050, - update_every, - RRDSET_TYPE_LINE - ); - - rd_ofo_queue = rrddim_add(st, "TCPOFOQueue", "inqueue", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_ofo_queue, tcpstat.tcps_rcvoopack); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_tcpext_syncookies == CONFIG_BOOLEAN_YES || (do_tcpext_syncookies == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_sc_sendcookie || tcpstat.tcps_sc_recvcookie || tcpstat.tcps_sc_zonefail))) { - do_tcpext_syncookies = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_recv = NULL, *rd_send = NULL, *rd_failed = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4", - "tcpsyncookies", - NULL, - "tcp", - NULL, - "TCP SYN Cookies", - "packets/s", - "freebsd", - "net.inet.tcp.stats", - 3100, - update_every, - RRDSET_TYPE_LINE - ); - - rd_recv = rrddim_add(st, "SyncookiesRecv", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_send = rrddim_add(st, "SyncookiesSent", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_failed = rrddim_add(st, "SyncookiesFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_recv, tcpstat.tcps_sc_recvcookie); - rrddim_set_by_pointer(st, rd_send, tcpstat.tcps_sc_sendcookie); - rrddim_set_by_pointer(st, rd_failed, tcpstat.tcps_sc_zonefail); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_tcpext_listen == CONFIG_BOOLEAN_YES || (do_tcpext_listen == CONFIG_BOOLEAN_AUTO && tcpstat.tcps_listendrop)) { - do_tcpext_listen = CONFIG_BOOLEAN_YES; - - static RRDSET *st_listen = NULL; - static RRDDIM *rd_overflows = NULL; - - if(unlikely(!st_listen)) { - - st_listen = rrdset_create_localhost( - "ipv4", - "tcplistenissues", - NULL, - "tcp", - NULL, - "TCP Listen Socket Issues", - "packets/s", - "freebsd", - "net.inet.tcp.stats", - 3015, - update_every, - RRDSET_TYPE_LINE - ); - - rd_overflows = rrddim_add(st_listen, "ListenOverflows", "overflows", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_listen); - - rrddim_set_by_pointer(st_listen, rd_overflows, tcpstat.tcps_listendrop); - - rrdset_done(st_listen); - } - - // -------------------------------------------------------------------- - - if (do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_ecn_ce || tcpstat.tcps_ecn_ect0 || tcpstat.tcps_ecn_ect1))) { - do_ecn = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_ce = NULL, *rd_no_ect = NULL, *rd_ect0 = NULL, *rd_ect1 = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4", - "ecnpkts", - NULL, - "ecn", - NULL, - "IPv4 ECN Statistics", - "packets/s", - "freebsd", - "net.inet.tcp.stats", - 8700, - update_every, - RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_ce = rrddim_add(st, "InCEPkts", "CEP", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_no_ect = rrddim_add(st, "InNoECTPkts", "NoECTP", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_ect0 = rrddim_add(st, "InECT0Pkts", "ECTP0", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_ect1 = rrddim_add(st, "InECT1Pkts", "ECTP1", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_ce, tcpstat.tcps_ecn_ce); - rrddim_set_by_pointer(st, rd_no_ect, tcpstat.tcps_ecn_ce - (tcpstat.tcps_ecn_ect0 + - tcpstat.tcps_ecn_ect1)); - rrddim_set_by_pointer(st, rd_ect0, tcpstat.tcps_ecn_ect0); - rrddim_set_by_pointer(st, rd_ect1, tcpstat.tcps_ecn_ect1); - rrdset_done(st); - } - - } - } else { - error("DISABLED: net.inet.tcp.stats module"); - return 1; - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// net.inet.udp.stats - -int do_net_inet_udp_stats(int update_every, usec_t dt) { - (void)dt; - static int do_udp_packets = -1, do_udp_errors = -1; - - if (unlikely(do_udp_packets == -1)) { - do_udp_packets = config_get_boolean("plugin:freebsd:net.inet.udp.stats", "ipv4 UDP packets", 1); - do_udp_errors = config_get_boolean("plugin:freebsd:net.inet.udp.stats", "ipv4 UDP errors", 1); - } - - // see http://net-snmp.sourceforge.net/docs/mibs/udp.html - if (likely(do_udp_packets || do_udp_errors)) { - static int mib[4] = {0, 0, 0, 0}; - struct udpstat udpstat; - - if (unlikely(GETSYSCTL_SIMPLE("net.inet.udp.stats", mib, udpstat))) { - do_udp_packets = 0; - error("DISABLED: ipv4.udppackets chart"); - do_udp_errors = 0; - error("DISABLED: ipv4.udperrors chart"); - error("DISABLED: net.inet.udp.stats module"); - return 1; - } else { - - // -------------------------------------------------------------------- - - if (likely(do_udp_packets)) { - static RRDSET *st = NULL; - static RRDDIM *rd_in = NULL, *rd_out = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4", - "udppackets", - NULL, - "udp", - NULL, - "IPv4 UDP Packets", - "packets/s", - "freebsd", - "net.inet.udp.stats", - 2601, - update_every, - RRDSET_TYPE_LINE - ); - - rd_in = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_in, udpstat.udps_ipackets); - rrddim_set_by_pointer(st, rd_out, udpstat.udps_opackets); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (likely(do_udp_errors)) { - static RRDSET *st = NULL; - static RRDDIM *rd_in_errors = NULL, *rd_no_ports = NULL, *rd_recv_buf_errors = NULL, - *rd_in_csum_errors = NULL, *rd_ignored_multi = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4", - "udperrors", - NULL, - "udp", - NULL, - "IPv4 UDP Errors", - "events/s", - "freebsd", - "net.inet.udp.stats", - 2701, - update_every, - RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_in_errors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_no_ports = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_recv_buf_errors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_csum_errors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_ignored_multi = rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_in_errors, udpstat.udps_hdrops + udpstat.udps_badlen); - rrddim_set_by_pointer(st, rd_no_ports, udpstat.udps_noport); - rrddim_set_by_pointer(st, rd_recv_buf_errors, udpstat.udps_fullsock); - rrddim_set_by_pointer(st, rd_in_csum_errors, udpstat.udps_badsum + udpstat.udps_nosum); - rrddim_set_by_pointer(st, rd_ignored_multi, udpstat.udps_filtermcast); - rrdset_done(st); - } - } - } else { - error("DISABLED: net.inet.udp.stats module"); - return 1; - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// net.inet.icmp.stats - -int do_net_inet_icmp_stats(int update_every, usec_t dt) { - (void)dt; - static int do_icmp_packets = -1, do_icmp_errors = -1, do_icmpmsg = -1; - - if (unlikely(do_icmp_packets == -1)) { - do_icmp_packets = config_get_boolean("plugin:freebsd:net.inet.icmp.stats", "ipv4 ICMP packets", 1); - do_icmp_errors = config_get_boolean("plugin:freebsd:net.inet.icmp.stats", "ipv4 ICMP errors", 1); - do_icmpmsg = config_get_boolean("plugin:freebsd:net.inet.icmp.stats", "ipv4 ICMP messages", 1); - } - - if (likely(do_icmp_packets || do_icmp_errors || do_icmpmsg)) { - static int mib[4] = {0, 0, 0, 0}; - struct icmpstat icmpstat; - int i; - struct icmp_total { - u_long msgs_in; - u_long msgs_out; - } icmp_total = {0, 0}; - - if (unlikely(GETSYSCTL_SIMPLE("net.inet.icmp.stats", mib, icmpstat))) { - do_icmp_packets = 0; - error("DISABLED: ipv4.icmp chart"); - do_icmp_errors = 0; - error("DISABLED: ipv4.icmp_errors chart"); - do_icmpmsg = 0; - error("DISABLED: ipv4.icmpmsg chart"); - error("DISABLED: net.inet.icmp.stats module"); - return 1; - } else { - for (i = 0; i <= ICMP_MAXTYPE; i++) { - icmp_total.msgs_in += icmpstat.icps_inhist[i]; - icmp_total.msgs_out += icmpstat.icps_outhist[i]; - } - icmp_total.msgs_in += icmpstat.icps_badcode + icmpstat.icps_badlen + icmpstat.icps_checksum + icmpstat.icps_tooshort; - - // -------------------------------------------------------------------- - - if (likely(do_icmp_packets)) { - static RRDSET *st = NULL; - static RRDDIM *rd_in = NULL, *rd_out = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "icmp" - , NULL - , "icmp" - , NULL - , "IPv4 ICMP Packets" - , "packets/s" - , "freebsd" - , "net.inet.icmp.stats" - , 2602 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_in = rrddim_add(st, "InMsgs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out = rrddim_add(st, "OutMsgs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_in, icmp_total.msgs_in); - rrddim_set_by_pointer(st, rd_out, icmp_total.msgs_out); - - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (likely(do_icmp_errors)) { - static RRDSET *st = NULL; - static RRDDIM *rd_in = NULL, *rd_out = NULL, *rd_in_csum = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "icmp_errors" - , NULL - , "icmp" - , NULL - , "IPv4 ICMP Errors" - , "packets/s" - , "freebsd" - , "net.inet.icmp.stats" - , 2603 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_in = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out = rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_csum = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_in, icmpstat.icps_badcode + icmpstat.icps_badlen + - icmpstat.icps_checksum + icmpstat.icps_tooshort); - rrddim_set_by_pointer(st, rd_out, icmpstat.icps_error); - rrddim_set_by_pointer(st, rd_in_csum, icmpstat.icps_checksum); - - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (likely(do_icmpmsg)) { - static RRDSET *st = NULL; - static RRDDIM *rd_in_reps = NULL, *rd_out_reps = NULL, *rd_in = NULL, *rd_out = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "icmpmsg" - , NULL - , "icmp" - , NULL - , "IPv4 ICMP Messages" - , "packets/s" - , "freebsd" - , "net.inet.icmp.stats" - , 2604 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_in_reps = rrddim_add(st, "InEchoReps", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_reps = rrddim_add(st, "OutEchoReps", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in = rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out = rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_in_reps, icmpstat.icps_inhist[ICMP_ECHOREPLY]); - rrddim_set_by_pointer(st, rd_out_reps, icmpstat.icps_outhist[ICMP_ECHOREPLY]); - rrddim_set_by_pointer(st, rd_in, icmpstat.icps_inhist[ICMP_ECHO]); - rrddim_set_by_pointer(st, rd_out, icmpstat.icps_outhist[ICMP_ECHO]); - - rrdset_done(st); - } - } - } else { - error("DISABLED: net.inet.icmp.stats module"); - return 1; - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// net.inet.ip.stats - -int do_net_inet_ip_stats(int update_every, usec_t dt) { - (void)dt; - static int do_ip_packets = -1, do_ip_fragsout = -1, do_ip_fragsin = -1, do_ip_errors = -1; - - if (unlikely(do_ip_packets == -1)) { - do_ip_packets = config_get_boolean("plugin:freebsd:net.inet.ip.stats", "ipv4 packets", 1); - do_ip_fragsout = config_get_boolean("plugin:freebsd:net.inet.ip.stats", "ipv4 fragments sent", 1); - do_ip_fragsin = config_get_boolean("plugin:freebsd:net.inet.ip.stats", "ipv4 fragments assembly", 1); - do_ip_errors = config_get_boolean("plugin:freebsd:net.inet.ip.stats", "ipv4 errors", 1); - } - - // see also http://net-snmp.sourceforge.net/docs/mibs/ip.html - if (likely(do_ip_packets || do_ip_fragsout || do_ip_fragsin || do_ip_errors)) { - static int mib[4] = {0, 0, 0, 0}; - struct ipstat ipstat; - - if (unlikely(GETSYSCTL_SIMPLE("net.inet.ip.stats", mib, ipstat))) { - do_ip_packets = 0; - error("DISABLED: ipv4.packets chart"); - do_ip_fragsout = 0; - error("DISABLED: ipv4.fragsout chart"); - do_ip_fragsin = 0; - error("DISABLED: ipv4.fragsin chart"); - do_ip_errors = 0; - error("DISABLED: ipv4.errors chart"); - error("DISABLED: net.inet.ip.stats module"); - return 1; - } else { - - // -------------------------------------------------------------------- - - if (likely(do_ip_packets)) { - static RRDSET *st = NULL; - static RRDDIM *rd_in_receives = NULL, *rd_out_requests = NULL, *rd_forward_datagrams = NULL, - *rd_in_delivers = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4", - "packets", - NULL, - "packets", - NULL, - "IPv4 Packets", - "packets/s", - "freebsd", - "net.inet.ip.stats", - 3000, - update_every, - RRDSET_TYPE_LINE - ); - - rd_in_receives = rrddim_add(st, "InReceives", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_requests = rrddim_add(st, "OutRequests", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_forward_datagrams = rrddim_add(st, "ForwDatagrams", "forwarded", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_delivers = rrddim_add(st, "InDelivers", "delivered", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_in_receives, ipstat.ips_total); - rrddim_set_by_pointer(st, rd_out_requests, ipstat.ips_localout); - rrddim_set_by_pointer(st, rd_forward_datagrams, ipstat.ips_forward); - rrddim_set_by_pointer(st, rd_in_delivers, ipstat.ips_delivered); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (likely(do_ip_fragsout)) { - static RRDSET *st = NULL; - static RRDDIM *rd_ok = NULL, *rd_fails = NULL, *rd_created = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4", - "fragsout", - NULL, - "fragments", - NULL, - "IPv4 Fragments Sent", - "packets/s", - "freebsd", - "net.inet.ip.stats", - 3010, - update_every, - RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_ok = rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_fails = rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_created = rrddim_add(st, "FragCreates", "created", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_ok, ipstat.ips_fragmented); - rrddim_set_by_pointer(st, rd_fails, ipstat.ips_cantfrag); - rrddim_set_by_pointer(st, rd_created, ipstat.ips_ofragments); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (likely(do_ip_fragsin)) { - static RRDSET *st = NULL; - static RRDDIM *rd_ok = NULL, *rd_failed = NULL, *rd_all = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4", - "fragsin", - NULL, - "fragments", - NULL, - "IPv4 Fragments Reassembly", - "packets/s", - "freebsd", - "net.inet.ip.stats", - 3011, - update_every, - RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_ok = rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_failed = rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_all = rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_ok, ipstat.ips_fragments); - rrddim_set_by_pointer(st, rd_failed, ipstat.ips_fragdropped); - rrddim_set_by_pointer(st, rd_all, ipstat.ips_reassembled); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (likely(do_ip_errors)) { - static RRDSET *st = NULL; - static RRDDIM *rd_in_discards = NULL, *rd_out_discards = NULL, - *rd_in_hdr_errors = NULL, *rd_out_no_routes = NULL, - *rd_in_addr_errors = NULL, *rd_in_unknown_protos = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4", - "errors", - NULL, - "errors", - NULL, - "IPv4 Errors", - "packets/s", - "freebsd", - "net.inet.ip.stats", - 3002, - update_every, - RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_in_discards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_discards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_hdr_errors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_no_routes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_addr_errors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_unknown_protos = rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_in_discards, ipstat.ips_badsum + ipstat.ips_tooshort + - ipstat.ips_toosmall + ipstat.ips_toolong); - rrddim_set_by_pointer(st, rd_out_discards, ipstat.ips_odropped); - rrddim_set_by_pointer(st, rd_in_hdr_errors, ipstat.ips_badhlen + ipstat.ips_badlen + - ipstat.ips_badoptions + ipstat.ips_badvers); - rrddim_set_by_pointer(st, rd_out_no_routes, ipstat.ips_noroute); - rrddim_set_by_pointer(st, rd_in_addr_errors, ipstat.ips_badaddr); - rrddim_set_by_pointer(st, rd_in_unknown_protos, ipstat.ips_noproto); - rrdset_done(st); - } - } - } else { - error("DISABLED: net.inet.ip.stats module"); - return 1; - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// net.inet6.ip6.stats - -int do_net_inet6_ip6_stats(int update_every, usec_t dt) { - (void)dt; - static int do_ip6_packets = -1, do_ip6_fragsout = -1, do_ip6_fragsin = -1, do_ip6_errors = -1; - - if (unlikely(do_ip6_packets == -1)) { - do_ip6_packets = config_get_boolean_ondemand("plugin:freebsd:net.inet6.ip6.stats", "ipv6 packets", - CONFIG_BOOLEAN_AUTO); - do_ip6_fragsout = config_get_boolean_ondemand("plugin:freebsd:net.inet6.ip6.stats", "ipv6 fragments sent", - CONFIG_BOOLEAN_AUTO); - do_ip6_fragsin = config_get_boolean_ondemand("plugin:freebsd:net.inet6.ip6.stats", "ipv6 fragments assembly", - CONFIG_BOOLEAN_AUTO); - do_ip6_errors = config_get_boolean_ondemand("plugin:freebsd:net.inet6.ip6.stats", "ipv6 errors", - CONFIG_BOOLEAN_AUTO); - } - - if (likely(do_ip6_packets || do_ip6_fragsout || do_ip6_fragsin || do_ip6_errors)) { - static int mib[4] = {0, 0, 0, 0}; - struct ip6stat ip6stat; - - if (unlikely(GETSYSCTL_SIMPLE("net.inet6.ip6.stats", mib, ip6stat))) { - do_ip6_packets = 0; - error("DISABLED: ipv6.packets chart"); - do_ip6_fragsout = 0; - error("DISABLED: ipv6.fragsout chart"); - do_ip6_fragsin = 0; - error("DISABLED: ipv6.fragsin chart"); - do_ip6_errors = 0; - error("DISABLED: ipv6.errors chart"); - error("DISABLED: net.inet6.ip6.stats module"); - return 1; - } else { - - // -------------------------------------------------------------------- - - if (do_ip6_packets == CONFIG_BOOLEAN_YES || (do_ip6_packets == CONFIG_BOOLEAN_AUTO && - (ip6stat.ip6s_localout || ip6stat.ip6s_total || - ip6stat.ip6s_forward || ip6stat.ip6s_delivered))) { - do_ip6_packets = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_received = NULL, *rd_sent = NULL, *rd_forwarded = NULL, *rd_delivers = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6", - "packets", - NULL, - "packets", - NULL, - "IPv6 Packets", - "packets/s", - "freebsd", - "net.inet6.ip6.stats", - 3000, - update_every, - RRDSET_TYPE_LINE - ); - - rd_received = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_sent = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_forwarded = rrddim_add(st, "forwarded", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_delivers = rrddim_add(st, "delivers", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_sent, ip6stat.ip6s_localout); - rrddim_set_by_pointer(st, rd_received, ip6stat.ip6s_total); - rrddim_set_by_pointer(st, rd_forwarded, ip6stat.ip6s_forward); - rrddim_set_by_pointer(st, rd_delivers, ip6stat.ip6s_delivered); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_ip6_fragsout == CONFIG_BOOLEAN_YES || (do_ip6_fragsout == CONFIG_BOOLEAN_AUTO && - (ip6stat.ip6s_fragmented || ip6stat.ip6s_cantfrag || - ip6stat.ip6s_ofragments))) { - do_ip6_fragsout = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_ok = NULL, *rd_failed = NULL, *rd_all = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6", - "fragsout", - NULL, - "fragments", - NULL, - "IPv6 Fragments Sent", - "packets/s", - "freebsd", - "net.inet6.ip6.stats", - 3010, - update_every, - RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_ok = rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_failed = rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_all = rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_ok, ip6stat.ip6s_fragmented); - rrddim_set_by_pointer(st, rd_failed, ip6stat.ip6s_cantfrag); - rrddim_set_by_pointer(st, rd_all, ip6stat.ip6s_ofragments); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_ip6_fragsin == CONFIG_BOOLEAN_YES || (do_ip6_fragsin == CONFIG_BOOLEAN_AUTO && - (ip6stat.ip6s_reassembled || ip6stat.ip6s_fragdropped || - ip6stat.ip6s_fragtimeout || ip6stat.ip6s_fragments))) { - do_ip6_fragsin = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_ok = NULL, *rd_failed = NULL, *rd_timeout = NULL, *rd_all = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6", - "fragsin", - NULL, - "fragments", - NULL, - "IPv6 Fragments Reassembly", - "packets/s", - "freebsd", - "net.inet6.ip6.stats", - 3011, - update_every, - RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_ok = rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_failed = rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_timeout = rrddim_add(st, "timeout", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_all = rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_ok, ip6stat.ip6s_reassembled); - rrddim_set_by_pointer(st, rd_failed, ip6stat.ip6s_fragdropped); - rrddim_set_by_pointer(st, rd_timeout, ip6stat.ip6s_fragtimeout); - rrddim_set_by_pointer(st, rd_all, ip6stat.ip6s_fragments); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_ip6_errors == CONFIG_BOOLEAN_YES || (do_ip6_errors == CONFIG_BOOLEAN_AUTO && ( - ip6stat.ip6s_toosmall || - ip6stat.ip6s_odropped || - ip6stat.ip6s_badoptions || - ip6stat.ip6s_badvers || - ip6stat.ip6s_exthdrtoolong || - ip6stat.ip6s_sources_none || - ip6stat.ip6s_tooshort || - ip6stat.ip6s_cantforward || - ip6stat.ip6s_noroute))) { - do_ip6_errors = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_in_discards = NULL, *rd_out_discards = NULL, - *rd_in_hdr_errors = NULL, *rd_in_addr_errors = NULL, *rd_in_truncated_pkts = NULL, - *rd_in_no_routes = NULL, *rd_out_no_routes = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6", - "errors", - NULL, - "errors", - NULL, - "IPv6 Errors", - "packets/s", - "freebsd", - "net.inet6.ip6.stats", - 3002, - update_every, - RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_in_discards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_discards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_hdr_errors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_addr_errors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_truncated_pkts = rrddim_add(st, "InTruncatedPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_no_routes = rrddim_add(st, "InNoRoutes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_no_routes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_in_discards, ip6stat.ip6s_toosmall); - rrddim_set_by_pointer(st, rd_out_discards, ip6stat.ip6s_odropped); - rrddim_set_by_pointer(st, rd_in_hdr_errors, ip6stat.ip6s_badoptions + ip6stat.ip6s_badvers + - ip6stat.ip6s_exthdrtoolong); - rrddim_set_by_pointer(st, rd_in_addr_errors, ip6stat.ip6s_sources_none); - rrddim_set_by_pointer(st, rd_in_truncated_pkts, ip6stat.ip6s_tooshort); - rrddim_set_by_pointer(st, rd_in_no_routes, ip6stat.ip6s_cantforward); - rrddim_set_by_pointer(st, rd_out_no_routes, ip6stat.ip6s_noroute); - rrdset_done(st); - } - } - } else { - error("DISABLED: net.inet6.ip6.stats module"); - return 1; - } - - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- -// net.inet6.icmp6.stats - -int do_net_inet6_icmp6_stats(int update_every, usec_t dt) { - (void)dt; - static int do_icmp6 = -1, do_icmp6_redir = -1, do_icmp6_errors = -1, do_icmp6_echos = -1, do_icmp6_router = -1, - do_icmp6_neighbor = -1, do_icmp6_types = -1; - - if (unlikely(do_icmp6 == -1)) { - do_icmp6 = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp", - CONFIG_BOOLEAN_AUTO); - do_icmp6_redir = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp redirects", - CONFIG_BOOLEAN_AUTO); - do_icmp6_errors = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp errors", - CONFIG_BOOLEAN_AUTO); - do_icmp6_echos = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp echos", - CONFIG_BOOLEAN_AUTO); - do_icmp6_router = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp router", - CONFIG_BOOLEAN_AUTO); - do_icmp6_neighbor = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp neighbor", - CONFIG_BOOLEAN_AUTO); - do_icmp6_types = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp types", - CONFIG_BOOLEAN_AUTO); - } - - if (likely(do_icmp6 || do_icmp6_redir || do_icmp6_errors || do_icmp6_echos || do_icmp6_router || do_icmp6_neighbor || do_icmp6_types)) { - static int mib[4] = {0, 0, 0, 0}; - struct icmp6stat icmp6stat; - - if (unlikely(GETSYSCTL_SIMPLE("net.inet6.icmp6.stats", mib, icmp6stat))) { - do_icmp6 = 0; - error("DISABLED: ipv6.icmp chart"); - do_icmp6_redir = 0; - error("DISABLED: ipv6.icmpredir chart"); - do_icmp6_errors = 0; - error("DISABLED: ipv6.icmperrors chart"); - do_icmp6_echos = 0; - error("DISABLED: ipv6.icmpechos chart"); - do_icmp6_router = 0; - error("DISABLED: ipv6.icmprouter chart"); - do_icmp6_neighbor = 0; - error("DISABLED: ipv6.icmpneighbor chart"); - do_icmp6_types = 0; - error("DISABLED: ipv6.icmptypes chart"); - error("DISABLED: net.inet6.icmp6.stats module"); - return 1; - } else { - int i; - struct icmp6_total { - u_long msgs_in; - u_long msgs_out; - } icmp6_total = {0, 0}; - - for (i = 0; i <= ICMP6_MAXTYPE; i++) { - icmp6_total.msgs_in += icmp6stat.icp6s_inhist[i]; - icmp6_total.msgs_out += icmp6stat.icp6s_outhist[i]; - } - icmp6_total.msgs_in += icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen + icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort; - - // -------------------------------------------------------------------- - - if (do_icmp6 == CONFIG_BOOLEAN_YES || (do_icmp6 == CONFIG_BOOLEAN_AUTO && (icmp6_total.msgs_in || icmp6_total.msgs_out))) { - do_icmp6 = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_received = NULL, *rd_sent = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6", - "icmp", - NULL, - "icmp", - NULL, - "IPv6 ICMP Messages", - "messages/s", - "freebsd", - "net.inet6.icmp6.stats", - 10000, - update_every, - RRDSET_TYPE_LINE - ); - - rd_received = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_sent = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_received, icmp6_total.msgs_out); - rrddim_set_by_pointer(st, rd_sent, icmp6_total.msgs_in); - - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_icmp6_redir == CONFIG_BOOLEAN_YES || (do_icmp6_redir == CONFIG_BOOLEAN_AUTO && (icmp6stat.icp6s_inhist[ND_REDIRECT] || icmp6stat.icp6s_outhist[ND_REDIRECT]))) { - do_icmp6_redir = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_received = NULL, *rd_sent = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6", - "icmpredir", - NULL, - "icmp", - NULL, - "IPv6 ICMP Redirects", - "redirects/s", - "freebsd", - "net.inet6.icmp6.stats", - 10050, - update_every, - RRDSET_TYPE_LINE - ); - - rd_received = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_sent = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_received, icmp6stat.icp6s_outhist[ND_REDIRECT]); - rrddim_set_by_pointer(st, rd_sent, icmp6stat.icp6s_inhist[ND_REDIRECT]); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_icmp6_errors == CONFIG_BOOLEAN_YES || (do_icmp6_errors == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_badcode || - icmp6stat.icp6s_badlen || - icmp6stat.icp6s_checksum || - icmp6stat.icp6s_tooshort || - icmp6stat.icp6s_error || - icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH] || - icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED] || - icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB] || - icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH] || - icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED] || - icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB]))) { - do_icmp6_errors = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_in_errors = NULL, *rd_out_errors = NULL, *rd_in_csum_errors = NULL, - *rd_in_dest_unreachs = NULL, *rd_in_pkt_too_bigs = NULL, *rd_in_time_excds = NULL, - *rd_in_parm_problems = NULL, *rd_out_dest_unreachs = NULL, *rd_out_time_excds = NULL, - *rd_out_parm_problems = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6", - "icmperrors", - NULL, "icmp", - NULL, - "IPv6 ICMP Errors", - "errors/s", - "freebsd", - "net.inet6.icmp6.stats", - 10100, - update_every, - RRDSET_TYPE_LINE - ); - - rd_in_errors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_errors = rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_csum_errors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_dest_unreachs = rrddim_add(st, "InDestUnreachs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_pkt_too_bigs = rrddim_add(st, "InPktTooBigs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_time_excds = rrddim_add(st, "InTimeExcds", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_parm_problems = rrddim_add(st, "InParmProblems", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_dest_unreachs = rrddim_add(st, "OutDestUnreachs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_time_excds = rrddim_add(st, "OutTimeExcds", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_parm_problems = rrddim_add(st, "OutParmProblems", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_in_errors, icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen + - icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort); - rrddim_set_by_pointer(st, rd_out_errors, icmp6stat.icp6s_error); - rrddim_set_by_pointer(st, rd_in_csum_errors, icmp6stat.icp6s_checksum); - rrddim_set_by_pointer(st, rd_in_dest_unreachs, icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH]); - rrddim_set_by_pointer(st, rd_in_pkt_too_bigs, icmp6stat.icp6s_badlen); - rrddim_set_by_pointer(st, rd_in_time_excds, icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED]); - rrddim_set_by_pointer(st, rd_in_parm_problems, icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB]); - rrddim_set_by_pointer(st, rd_out_dest_unreachs, icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH]); - rrddim_set_by_pointer(st, rd_out_time_excds, icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED]); - rrddim_set_by_pointer(st, rd_out_parm_problems, icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB]); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_icmp6_echos == CONFIG_BOOLEAN_YES || (do_icmp6_echos == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST] || - icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST] || - icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY] || - icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY]))) { - do_icmp6_echos = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_in = NULL, *rd_out = NULL, *rd_in_replies = NULL, *rd_out_replies = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6", - "icmpechos", - NULL, - "icmp", - NULL, - "IPv6 ICMP Echo", - "messages/s", - "freebsd", - "net.inet6.icmp6.stats", - 10200, - update_every, - RRDSET_TYPE_LINE - ); - - rd_in = rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out = rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_replies = rrddim_add(st, "InEchoReplies", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_replies = rrddim_add(st, "OutEchoReplies", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_in, icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST]); - rrddim_set_by_pointer(st, rd_out, icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST]); - rrddim_set_by_pointer(st, rd_in_replies, icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY]); - rrddim_set_by_pointer(st, rd_out_replies, icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY]); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_icmp6_router == CONFIG_BOOLEAN_YES || (do_icmp6_router == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT] || - icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT] || - icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT] || - icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT]))) { - do_icmp6_router = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_in_solicits = NULL, *rd_out_solicits = NULL, - *rd_in_advertisements = NULL, *rd_out_advertisements = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6", - "icmprouter", - NULL, - "icmp", - NULL, - "IPv6 Router Messages", - "messages/s", - "freebsd", - "net.inet6.icmp6.stats", - 10400, - update_every, - RRDSET_TYPE_LINE - ); - - rd_in_solicits = rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_solicits = rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_advertisements = rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_advertisements = rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_in_solicits, icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT]); - rrddim_set_by_pointer(st, rd_out_solicits, icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT]); - rrddim_set_by_pointer(st, rd_in_advertisements, icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT]); - rrddim_set_by_pointer(st, rd_out_advertisements, icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT]); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_icmp6_neighbor == CONFIG_BOOLEAN_YES || (do_icmp6_neighbor == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT] || - icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT] || - icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT] || - icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]))) { - do_icmp6_neighbor = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_in_solicits = NULL, *rd_out_solicits = NULL, - *rd_in_advertisements = NULL, *rd_out_advertisements = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6", - "icmpneighbor", - NULL, - "icmp", - NULL, - "IPv6 Neighbor Messages", - "messages/s", - "freebsd", - "net.inet6.icmp6.stats", - 10500, - update_every, - RRDSET_TYPE_LINE - ); - - rd_in_solicits = rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_solicits = rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_advertisements = rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_advertisements = rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_in_solicits, icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT]); - rrddim_set_by_pointer(st, rd_out_solicits, icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT]); - rrddim_set_by_pointer(st, rd_in_advertisements, icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT]); - rrddim_set_by_pointer(st, rd_out_advertisements, icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_icmp6_types == CONFIG_BOOLEAN_YES || (do_icmp6_types == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[1] || - icmp6stat.icp6s_inhist[128] || - icmp6stat.icp6s_inhist[129] || - icmp6stat.icp6s_inhist[136] || - icmp6stat.icp6s_outhist[1] || - icmp6stat.icp6s_outhist[128] || - icmp6stat.icp6s_outhist[129] || - icmp6stat.icp6s_outhist[133] || - icmp6stat.icp6s_outhist[135] || - icmp6stat.icp6s_outhist[136]))) { - do_icmp6_types = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_in_1 = NULL, *rd_in_128 = NULL, *rd_in_129 = NULL, *rd_in_136 = NULL, - *rd_out_1 = NULL, *rd_out_128 = NULL, *rd_out_129 = NULL, *rd_out_133 = NULL, - *rd_out_135 = NULL, *rd_out_143 = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6", - "icmptypes", - NULL, - "icmp", - NULL, - "IPv6 ICMP Types", - "messages/s", - "freebsd", - "net.inet6.icmp6.stats", - 10700, - update_every, - RRDSET_TYPE_LINE - ); - - rd_in_1 = rrddim_add(st, "InType1", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_128 = rrddim_add(st, "InType128", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_129 = rrddim_add(st, "InType129", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_in_136 = rrddim_add(st, "InType136", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_1 = rrddim_add(st, "OutType1", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_128 = rrddim_add(st, "OutType128", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_129 = rrddim_add(st, "OutType129", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_133 = rrddim_add(st, "OutType133", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_135 = rrddim_add(st, "OutType135", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out_143 = rrddim_add(st, "OutType143", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd_in_1, icmp6stat.icp6s_inhist[1]); - rrddim_set_by_pointer(st, rd_in_128, icmp6stat.icp6s_inhist[128]); - rrddim_set_by_pointer(st, rd_in_129, icmp6stat.icp6s_inhist[129]); - rrddim_set_by_pointer(st, rd_in_136, icmp6stat.icp6s_inhist[136]); - rrddim_set_by_pointer(st, rd_out_1, icmp6stat.icp6s_outhist[1]); - rrddim_set_by_pointer(st, rd_out_128, icmp6stat.icp6s_outhist[128]); - rrddim_set_by_pointer(st, rd_out_129, icmp6stat.icp6s_outhist[129]); - rrddim_set_by_pointer(st, rd_out_133, icmp6stat.icp6s_outhist[133]); - rrddim_set_by_pointer(st, rd_out_135, icmp6stat.icp6s_outhist[135]); - rrddim_set_by_pointer(st, rd_out_143, icmp6stat.icp6s_outhist[143]); - rrdset_done(st); - } - } - } else { - error("DISABLED: net.inet6.icmp6.stats module"); - return 1; - } - - return 0; -} diff --git a/src/freeipmi_plugin.c b/src/freeipmi_plugin.c deleted file mode 100644 index df4c019a4..000000000 --- a/src/freeipmi_plugin.c +++ /dev/null @@ -1,1680 +0,0 @@ -/* - * netdata freeipmi.plugin - * Copyright (C) 2017 Costa Tsaousis - * GPL v3+ - * - * Based on: - * ipmimonitoring-sensors.c,v 1.51 2016/11/02 23:46:24 chu11 Exp - * ipmimonitoring-sel.c,v 1.51 2016/11/02 23:46:24 chu11 Exp - * - * Copyright (C) 2007-2015 Lawrence Livermore National Security, LLC. - * Copyright (C) 2006-2007 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Albert Chu <chu11@llnl.gov> - * UCRL-CODE-222073 - */ - -#include "common.h" - -#include <stdio.h> -#include <stdlib.h> -#include <stdint.h> -#include <string.h> -#include <assert.h> -#include <errno.h> -#include <unistd.h> -#include <sys/time.h> - -#ifdef HAVE_FREEIPMI - -#include <ipmi_monitoring.h> -#include <ipmi_monitoring_bitmasks.h> - -/* Communication Configuration - Initialize accordingly */ - -/* Hostname, NULL for In-band communication, non-null for a hostname */ -char *hostname = NULL; - -/* In-band Communication Configuration */ -int driver_type = -1; // IPMI_MONITORING_DRIVER_TYPE_KCS; /* or -1 for default */ -int disable_auto_probe = 0; /* probe for in-band device */ -unsigned int driver_address = 0; /* not used if probing */ -unsigned int register_spacing = 0; /* not used if probing */ -char *driver_device = NULL; /* not used if probing */ - -/* Out-of-band Communication Configuration */ -int protocol_version = -1; //IPMI_MONITORING_PROTOCOL_VERSION_1_5; /* or -1 for default */ -char *username = "foousername"; -char *password = "foopassword"; -unsigned char *k_g = NULL; -unsigned int k_g_len = 0; -int privilege_level = -1; // IPMI_MONITORING_PRIVILEGE_LEVEL_USER; /* or -1 for default */ -int authentication_type = -1; // IPMI_MONITORING_AUTHENTICATION_TYPE_MD5; /* or -1 for default */ -int cipher_suite_id = 0; /* or -1 for default */ -int session_timeout = 0; /* 0 for default */ -int retransmission_timeout = 0; /* 0 for default */ - -/* Workarounds - specify workaround flags if necessary */ -unsigned int workaround_flags = 0; - -/* Initialize w/ record id numbers to only monitor specific record ids */ -unsigned int record_ids[] = {0}; -unsigned int record_ids_length = 0; - -/* Initialize w/ sensor types to only monitor specific sensor types - * see ipmi_monitoring.h sensor types list. - */ -unsigned int sensor_types[] = {0}; -unsigned int sensor_types_length = 0; - -/* Set to an appropriate alternate if desired */ -char *sdr_cache_directory = "/tmp"; -char *sensor_config_file = NULL; - -/* Set to 1 or 0 to enable these sensor reading flags - * - See ipmi_monitoring.h for descriptions of these flags. - */ -int reread_sdr_cache = 0; -int ignore_non_interpretable_sensors = 1; -int bridge_sensors = 0; -int interpret_oem_data = 0; -int shared_sensors = 0; -int discrete_reading = 0; -int ignore_scanning_disabled = 0; -int assume_bmc_owner = 0; -int entity_sensor_names = 0; - -/* Initialization flags - * - * Most commonly bitwise OR IPMI_MONITORING_FLAGS_DEBUG and/or - * IPMI_MONITORING_FLAGS_DEBUG_IPMI_PACKETS for extra debugging - * information. - */ -unsigned int ipmimonitoring_init_flags = 0; - -int errnum; - -// ---------------------------------------------------------------------------- -// SEL only variables - -/* Initialize w/ date range to only monitoring specific date range */ -char *date_begin = NULL; /* use MM/DD/YYYY format */ -char *date_end = NULL; /* use MM/DD/YYYY format */ - -int assume_system_event_record = 0; - -char *sel_config_file = NULL; - - -// ---------------------------------------------------------------------------- -// functions common to sensors and SEL - -static void -_init_ipmi_config (struct ipmi_monitoring_ipmi_config *ipmi_config) -{ - assert (ipmi_config); - - ipmi_config->driver_type = driver_type; - ipmi_config->disable_auto_probe = disable_auto_probe; - ipmi_config->driver_address = driver_address; - ipmi_config->register_spacing = register_spacing; - ipmi_config->driver_device = driver_device; - - ipmi_config->protocol_version = protocol_version; - ipmi_config->username = username; - ipmi_config->password = password; - ipmi_config->k_g = k_g; - ipmi_config->k_g_len = k_g_len; - ipmi_config->privilege_level = privilege_level; - ipmi_config->authentication_type = authentication_type; - ipmi_config->cipher_suite_id = cipher_suite_id; - ipmi_config->session_timeout_len = session_timeout; - ipmi_config->retransmission_timeout_len = retransmission_timeout; - - ipmi_config->workaround_flags = workaround_flags; -} - -#ifdef NETDATA_COMMENTED -static const char * -_get_sensor_type_string (int sensor_type) -{ - switch (sensor_type) - { - case IPMI_MONITORING_SENSOR_TYPE_RESERVED: - return ("Reserved"); - case IPMI_MONITORING_SENSOR_TYPE_TEMPERATURE: - return ("Temperature"); - case IPMI_MONITORING_SENSOR_TYPE_VOLTAGE: - return ("Voltage"); - case IPMI_MONITORING_SENSOR_TYPE_CURRENT: - return ("Current"); - case IPMI_MONITORING_SENSOR_TYPE_FAN: - return ("Fan"); - case IPMI_MONITORING_SENSOR_TYPE_PHYSICAL_SECURITY: - return ("Physical Security"); - case IPMI_MONITORING_SENSOR_TYPE_PLATFORM_SECURITY_VIOLATION_ATTEMPT: - return ("Platform Security Violation Attempt"); - case IPMI_MONITORING_SENSOR_TYPE_PROCESSOR: - return ("Processor"); - case IPMI_MONITORING_SENSOR_TYPE_POWER_SUPPLY: - return ("Power Supply"); - case IPMI_MONITORING_SENSOR_TYPE_POWER_UNIT: - return ("Power Unit"); - case IPMI_MONITORING_SENSOR_TYPE_COOLING_DEVICE: - return ("Cooling Device"); - case IPMI_MONITORING_SENSOR_TYPE_OTHER_UNITS_BASED_SENSOR: - return ("Other Units Based Sensor"); - case IPMI_MONITORING_SENSOR_TYPE_MEMORY: - return ("Memory"); - case IPMI_MONITORING_SENSOR_TYPE_DRIVE_SLOT: - return ("Drive Slot"); - case IPMI_MONITORING_SENSOR_TYPE_POST_MEMORY_RESIZE: - return ("POST Memory Resize"); - case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_FIRMWARE_PROGRESS: - return ("System Firmware Progress"); - case IPMI_MONITORING_SENSOR_TYPE_EVENT_LOGGING_DISABLED: - return ("Event Logging Disabled"); - case IPMI_MONITORING_SENSOR_TYPE_WATCHDOG1: - return ("Watchdog 1"); - case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_EVENT: - return ("System Event"); - case IPMI_MONITORING_SENSOR_TYPE_CRITICAL_INTERRUPT: - return ("Critical Interrupt"); - case IPMI_MONITORING_SENSOR_TYPE_BUTTON_SWITCH: - return ("Button/Switch"); - case IPMI_MONITORING_SENSOR_TYPE_MODULE_BOARD: - return ("Module/Board"); - case IPMI_MONITORING_SENSOR_TYPE_MICROCONTROLLER_COPROCESSOR: - return ("Microcontroller/Coprocessor"); - case IPMI_MONITORING_SENSOR_TYPE_ADD_IN_CARD: - return ("Add In Card"); - case IPMI_MONITORING_SENSOR_TYPE_CHASSIS: - return ("Chassis"); - case IPMI_MONITORING_SENSOR_TYPE_CHIP_SET: - return ("Chip Set"); - case IPMI_MONITORING_SENSOR_TYPE_OTHER_FRU: - return ("Other Fru"); - case IPMI_MONITORING_SENSOR_TYPE_CABLE_INTERCONNECT: - return ("Cable/Interconnect"); - case IPMI_MONITORING_SENSOR_TYPE_TERMINATOR: - return ("Terminator"); - case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_BOOT_INITIATED: - return ("System Boot Initiated"); - case IPMI_MONITORING_SENSOR_TYPE_BOOT_ERROR: - return ("Boot Error"); - case IPMI_MONITORING_SENSOR_TYPE_OS_BOOT: - return ("OS Boot"); - case IPMI_MONITORING_SENSOR_TYPE_OS_CRITICAL_STOP: - return ("OS Critical Stop"); - case IPMI_MONITORING_SENSOR_TYPE_SLOT_CONNECTOR: - return ("Slot/Connector"); - case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_ACPI_POWER_STATE: - return ("System ACPI Power State"); - case IPMI_MONITORING_SENSOR_TYPE_WATCHDOG2: - return ("Watchdog 2"); - case IPMI_MONITORING_SENSOR_TYPE_PLATFORM_ALERT: - return ("Platform Alert"); - case IPMI_MONITORING_SENSOR_TYPE_ENTITY_PRESENCE: - return ("Entity Presence"); - case IPMI_MONITORING_SENSOR_TYPE_MONITOR_ASIC_IC: - return ("Monitor ASIC/IC"); - case IPMI_MONITORING_SENSOR_TYPE_LAN: - return ("LAN"); - case IPMI_MONITORING_SENSOR_TYPE_MANAGEMENT_SUBSYSTEM_HEALTH: - return ("Management Subsystem Health"); - case IPMI_MONITORING_SENSOR_TYPE_BATTERY: - return ("Battery"); - case IPMI_MONITORING_SENSOR_TYPE_SESSION_AUDIT: - return ("Session Audit"); - case IPMI_MONITORING_SENSOR_TYPE_VERSION_CHANGE: - return ("Version Change"); - case IPMI_MONITORING_SENSOR_TYPE_FRU_STATE: - return ("FRU State"); - } - - return ("Unrecognized"); -} -#endif // NETDATA_COMMENTED - - -// ---------------------------------------------------------------------------- -// BEGIN NETDATA CODE - -static int debug = 0; - -static int netdata_update_every = 5; // this is the minimum update frequency -static int netdata_priority = 90000; -static int netdata_do_sel = 1; - -static size_t netdata_sensors_updated = 0; -static size_t netdata_sensors_collected = 0; -static size_t netdata_sel_events = 0; -static size_t netdata_sensors_states_nominal = 0; -static size_t netdata_sensors_states_warning = 0; -static size_t netdata_sensors_states_critical = 0; - -struct sensor { - int record_id; - int sensor_number; - int sensor_type; - int sensor_state; - int sensor_units; - char *sensor_name; - - int sensor_reading_type; - union { - uint8_t bool_value; - uint32_t uint32_value; - double double_value; - } sensor_reading; - - int sent; - int ignore; - int exposed; - int updated; - struct sensor *next; -} *sensors_root = NULL; - -static void netdata_mark_as_not_updated() { - struct sensor *sn; - for(sn = sensors_root; sn ;sn = sn->next) - sn->updated = sn->sent = 0; - - netdata_sensors_updated = 0; - netdata_sensors_collected = 0; - netdata_sel_events = 0; - - netdata_sensors_states_nominal = 0; - netdata_sensors_states_warning = 0; - netdata_sensors_states_critical = 0; -} - -static void send_chart_to_netdata_for_units(int units) { - struct sensor *sn; - - switch(units) { - case IPMI_MONITORING_SENSOR_UNITS_CELSIUS: - printf("CHART ipmi.temperatures_c '' 'System Celcius Temperatures read by IPMI' 'Celcius' 'temperatures' 'ipmi.temperatures_c' 'line' %d %d\n" - , netdata_priority + 10 - , netdata_update_every - ); - break; - - case IPMI_MONITORING_SENSOR_UNITS_FAHRENHEIT: - printf("CHART ipmi.temperatures_f '' 'System Fahrenheit Temperatures read by IPMI' 'Fahrenheit' 'temperatures' 'ipmi.temperatures_f' 'line' %d %d\n" - , netdata_priority + 11 - , netdata_update_every - ); - break; - - case IPMI_MONITORING_SENSOR_UNITS_VOLTS: - printf("CHART ipmi.volts '' 'System Voltages read by IPMI' 'Volts' 'voltages' 'ipmi.voltages' 'line' %d %d\n" - , netdata_priority + 12 - , netdata_update_every - ); - break; - - case IPMI_MONITORING_SENSOR_UNITS_AMPS: - printf("CHART ipmi.amps '' 'System Current read by IPMI' 'Amps' 'current' 'ipmi.amps' 'line' %d %d\n" - , netdata_priority + 13 - , netdata_update_every - ); - break; - - case IPMI_MONITORING_SENSOR_UNITS_RPM: - printf("CHART ipmi.rpm '' 'System Fans read by IPMI' 'RPM' 'fans' 'ipmi.rpm' 'line' %d %d\n" - , netdata_priority + 14 - , netdata_update_every - ); - break; - - case IPMI_MONITORING_SENSOR_UNITS_WATTS: - printf("CHART ipmi.watts '' 'System Power read by IPMI' 'Watts' 'power' 'ipmi.watts' 'line' %d %d\n" - , netdata_priority + 5 - , netdata_update_every - ); - break; - - case IPMI_MONITORING_SENSOR_UNITS_PERCENT: - printf("CHART ipmi.percent '' 'System Metrics read by IPMI' '%%' 'other' 'ipmi.percent' 'line' %d %d\n" - , netdata_priority + 15 - , netdata_update_every - ); - break; - - default: - for(sn = sensors_root; sn; sn = sn->next) - if(sn->sensor_units == units) - sn->ignore = 1; - return; - } - - for(sn = sensors_root; sn; sn = sn->next) { - if(sn->sensor_units == units && sn->updated && !sn->ignore) { - sn->exposed = 1; - - switch(sn->sensor_reading_type) { - case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL: - case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32: - printf("DIMENSION i%d_n%d_r%d '%s i%d' absolute 1 1\n" - , sn->sensor_number - , sn->record_id - , sn->sensor_reading_type - , sn->sensor_name - , sn->sensor_number - ); - break; - - case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE: - printf("DIMENSION i%d_n%d_r%d '%s i%d' absolute 1 1000\n" - , sn->sensor_number - , sn->record_id - , sn->sensor_reading_type - , sn->sensor_name - , sn->sensor_number - ); - break; - - default: - sn->ignore = 1; - break; - } - } - } -} - -static void send_metrics_to_netdata_for_units(int units) { - struct sensor *sn; - - switch(units) { - case IPMI_MONITORING_SENSOR_UNITS_CELSIUS: - printf("BEGIN ipmi.temperatures_c\n"); - break; - - case IPMI_MONITORING_SENSOR_UNITS_FAHRENHEIT: - printf("BEGIN ipmi.temperatures_f\n"); - break; - - case IPMI_MONITORING_SENSOR_UNITS_VOLTS: - printf("BEGIN ipmi.volts\n"); - break; - - case IPMI_MONITORING_SENSOR_UNITS_AMPS: - printf("BEGIN ipmi.amps\n"); - break; - - case IPMI_MONITORING_SENSOR_UNITS_RPM: - printf("BEGIN ipmi.rpm\n"); - break; - - case IPMI_MONITORING_SENSOR_UNITS_WATTS: - printf("BEGIN ipmi.watts\n"); - break; - - case IPMI_MONITORING_SENSOR_UNITS_PERCENT: - printf("BEGIN ipmi.percent\n"); - break; - - default: - for(sn = sensors_root; sn; sn = sn->next) - if(sn->sensor_units == units) - sn->ignore = 1; - return; - } - - for(sn = sensors_root; sn; sn = sn->next) { - if(sn->sensor_units == units && sn->updated && !sn->sent && !sn->ignore) { - netdata_sensors_updated++; - - sn->sent = 1; - - switch(sn->sensor_reading_type) { - case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL: - printf("SET i%d_n%d_r%d = %u\n" - , sn->sensor_number - , sn->record_id - , sn->sensor_reading_type - , sn->sensor_reading.bool_value - ); - break; - - case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32: - printf("SET i%d_n%d_r%d = %u\n" - , sn->sensor_number - , sn->record_id - , sn->sensor_reading_type - , sn->sensor_reading.uint32_value - ); - break; - - case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE: - printf("SET i%d_n%d_r%d = %lld\n" - , sn->sensor_number - , sn->record_id - , sn->sensor_reading_type - , (long long int)(sn->sensor_reading.double_value * 1000) - ); - break; - - default: - sn->ignore = 1; - break; - } - } - } - - printf("END\n"); -} - -static void send_metrics_to_netdata() { - static int sel_chart_generated = 0, sensors_states_chart_generated = 0; - struct sensor *sn; - - if(netdata_do_sel && !sel_chart_generated) { - sel_chart_generated = 1; - printf("CHART ipmi.events '' 'IPMI Events' 'events' 'events' ipmi.sel area %d %d\n" - , netdata_priority + 2 - , netdata_update_every - ); - printf("DIMENSION events '' absolute 1 1\n"); - } - - if(!sensors_states_chart_generated) { - sensors_states_chart_generated = 1; - printf("CHART ipmi.sensors_states '' 'IPMI Sensors State' 'sensors' 'states' ipmi.sensors_states line %d %d\n" - , netdata_priority + 1 - , netdata_update_every - ); - printf("DIMENSION nominal '' absolute 1 1\n"); - printf("DIMENSION critical '' absolute 1 1\n"); - printf("DIMENSION warning '' absolute 1 1\n"); - } - - // generate the CHART/DIMENSION lines, if we have to - for(sn = sensors_root; sn; sn = sn->next) - if(sn->updated && !sn->exposed && !sn->ignore) - send_chart_to_netdata_for_units(sn->sensor_units); - - if(netdata_do_sel) { - printf( - "BEGIN ipmi.events\n" - "SET events = %zu\n" - "END\n" - , netdata_sel_events - ); - } - - printf( - "BEGIN ipmi.sensors_states\n" - "SET nominal = %zu\n" - "SET warning = %zu\n" - "SET critical = %zu\n" - "END\n" - , netdata_sensors_states_nominal - , netdata_sensors_states_warning - , netdata_sensors_states_critical - ); - - // send metrics to netdata - for(sn = sensors_root; sn; sn = sn->next) - if(sn->updated && sn->exposed && !sn->sent && !sn->ignore) - send_metrics_to_netdata_for_units(sn->sensor_units); - -} - -static int *excluded_record_ids = NULL; -size_t excluded_record_ids_length = 0; - -static void excluded_record_ids_parse(const char *s) { - if(!s) return; - - while(*s) { - while(*s && !isdigit(*s)) s++; - - if(isdigit(*s)) { - char *e; - unsigned long n = strtoul(s, &e, 10); - s = e; - - if(n != 0) { - excluded_record_ids = realloc(excluded_record_ids, (excluded_record_ids_length + 1) * sizeof(int)); - if(!excluded_record_ids) { - fprintf(stderr, "freeipmi.plugin: failed to allocate memory. Exiting."); - exit(1); - } - excluded_record_ids[excluded_record_ids_length++] = (int)n; - } - } - } - - if(debug) { - fprintf(stderr, "freeipmi.plugin: excluded record ids:"); - size_t i; - for(i = 0; i < excluded_record_ids_length; i++) { - fprintf(stderr, " %d", excluded_record_ids[i]); - } - fprintf(stderr, "\n"); - } -} - - -static int excluded_record_ids_check(int record_id) { - size_t i; - - for(i = 0; i < excluded_record_ids_length; i++) { - if(excluded_record_ids[i] == record_id) - return 1; - } - - return 0; -} - -static void netdata_get_sensor( - int record_id - , int sensor_number - , int sensor_type - , int sensor_state - , int sensor_units - , int sensor_reading_type - , char *sensor_name - , void *sensor_reading -) { - // find the sensor record - struct sensor *sn; - for(sn = sensors_root; sn ;sn = sn->next) - if( sn->record_id == record_id && - sn->sensor_number == sensor_number && - sn->sensor_reading_type == sensor_reading_type && - sn->sensor_units == sensor_units && - !strcmp(sn->sensor_name, sensor_name) - ) - break; - - if(!sn) { - // not found, create it - - // check if it is excluded - if(excluded_record_ids_check(record_id)) - return; - - sn = calloc(1, sizeof(struct sensor)); - if(!sn) { - fatal("cannot allocate %zu bytes of memory.", sizeof(struct sensor)); - } - - sn->record_id = record_id; - sn->sensor_number = sensor_number; - sn->sensor_type = sensor_type; - sn->sensor_state = sensor_state; - sn->sensor_units = sensor_units; - sn->sensor_reading_type = sensor_reading_type; - sn->sensor_name = strdup(sensor_name); - if(!sn->sensor_name) { - fatal("cannot allocate %zu bytes of memory.", strlen(sensor_name)); - } - - sn->next = sensors_root; - sensors_root = sn; - } - - switch(sensor_reading_type) { - case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL: - sn->sensor_reading.bool_value = *((uint8_t *)sensor_reading); - sn->updated = 1; - netdata_sensors_collected++; - break; - - case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32: - sn->sensor_reading.uint32_value = *((uint32_t *)sensor_reading); - sn->updated = 1; - netdata_sensors_collected++; - break; - - case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE: - sn->sensor_reading.double_value = *((double *)sensor_reading); - sn->updated = 1; - netdata_sensors_collected++; - break; - - default: - sn->ignore = 1; - break; - } - - switch(sensor_state) { - case IPMI_MONITORING_STATE_NOMINAL: - netdata_sensors_states_nominal++; - break; - - case IPMI_MONITORING_STATE_WARNING: - netdata_sensors_states_warning++; - break; - - case IPMI_MONITORING_STATE_CRITICAL: - netdata_sensors_states_critical++; - break; - - default: - break; - } -} - -static void netdata_get_sel( - int record_id - , int record_type_class - , int sel_state -) { - (void)record_id; - (void)record_type_class; - (void)sel_state; - - netdata_sel_events++; -} - - -void netdata_cleanup_and_exit(int ret) { - exit(ret); -} - -// END NETDATA CODE -// ---------------------------------------------------------------------------- - - -static int -_ipmimonitoring_sensors (struct ipmi_monitoring_ipmi_config *ipmi_config) -{ - ipmi_monitoring_ctx_t ctx = NULL; - unsigned int sensor_reading_flags = 0; - int i; - int sensor_count; - int rv = -1; - - if (!(ctx = ipmi_monitoring_ctx_create ())) { - error("ipmi_monitoring_ctx_create()"); - goto cleanup; - } - - if (sdr_cache_directory) - { - if (ipmi_monitoring_ctx_sdr_cache_directory (ctx, - sdr_cache_directory) < 0) - { - error("ipmi_monitoring_ctx_sdr_cache_directory(): %s\n", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - } - - /* Must call otherwise only default interpretations ever used */ - if (sensor_config_file) - { - if (ipmi_monitoring_ctx_sensor_config_file (ctx, - sensor_config_file) < 0) - { - error( "ipmi_monitoring_ctx_sensor_config_file(): %s\n", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - } - else - { - if (ipmi_monitoring_ctx_sensor_config_file (ctx, NULL) < 0) - { - error( "ipmi_monitoring_ctx_sensor_config_file(): %s\n", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - } - - if (reread_sdr_cache) - sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_REREAD_SDR_CACHE; - - if (ignore_non_interpretable_sensors) - sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_IGNORE_NON_INTERPRETABLE_SENSORS; - - if (bridge_sensors) - sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_BRIDGE_SENSORS; - - if (interpret_oem_data) - sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_INTERPRET_OEM_DATA; - - if (shared_sensors) - sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_SHARED_SENSORS; - - if (discrete_reading) - sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_DISCRETE_READING; - - if (ignore_scanning_disabled) - sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_IGNORE_SCANNING_DISABLED; - - if (assume_bmc_owner) - sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_ASSUME_BMC_OWNER; - -#ifdef IPMI_MONITORING_SENSOR_READING_FLAGS_ENTITY_SENSOR_NAMES - if (entity_sensor_names) - sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_ENTITY_SENSOR_NAMES; -#endif // IPMI_MONITORING_SENSOR_READING_FLAGS_ENTITY_SENSOR_NAMES - - if (!record_ids_length && !sensor_types_length) - { - if ((sensor_count = ipmi_monitoring_sensor_readings_by_record_id (ctx, - hostname, - ipmi_config, - sensor_reading_flags, - NULL, - 0, - NULL, - NULL)) < 0) - { - error( "ipmi_monitoring_sensor_readings_by_record_id(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - } - else if (record_ids_length) - { - if ((sensor_count = ipmi_monitoring_sensor_readings_by_record_id (ctx, - hostname, - ipmi_config, - sensor_reading_flags, - record_ids, - record_ids_length, - NULL, - NULL)) < 0) - { - error( "ipmi_monitoring_sensor_readings_by_record_id(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - } - else - { - if ((sensor_count = ipmi_monitoring_sensor_readings_by_sensor_type (ctx, - hostname, - ipmi_config, - sensor_reading_flags, - sensor_types, - sensor_types_length, - NULL, - NULL)) < 0) - { - error( "ipmi_monitoring_sensor_readings_by_sensor_type(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - } - -#ifdef NETDATA_COMMENTED - printf ("%s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n", - "Record ID", - "Sensor Name", - "Sensor Number", - "Sensor Type", - "Sensor State", - "Sensor Reading", - "Sensor Units", - "Sensor Event/Reading Type Code", - "Sensor Event Bitmask", - "Sensor Event String"); -#endif // NETDATA_COMMENTED - - for (i = 0; i < sensor_count; i++, ipmi_monitoring_sensor_iterator_next (ctx)) - { - int record_id, sensor_number, sensor_type, sensor_state, sensor_units, - sensor_reading_type; - -#ifdef NETDATA_COMMENTED - int sensor_bitmask_type, sensor_bitmask, event_reading_type_code; - char **sensor_bitmask_strings = NULL; - const char *sensor_type_str; - const char *sensor_state_str; -#endif // NETDATA_COMMENTED - - char *sensor_name = NULL; - void *sensor_reading; - - if ((record_id = ipmi_monitoring_sensor_read_record_id (ctx)) < 0) - { - error( "ipmi_monitoring_sensor_read_record_id(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - if ((sensor_number = ipmi_monitoring_sensor_read_sensor_number (ctx)) < 0) - { - error( "ipmi_monitoring_sensor_read_sensor_number(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - if ((sensor_type = ipmi_monitoring_sensor_read_sensor_type (ctx)) < 0) - { - error( "ipmi_monitoring_sensor_read_sensor_type(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - if (!(sensor_name = ipmi_monitoring_sensor_read_sensor_name (ctx))) - { - error( "ipmi_monitoring_sensor_read_sensor_name(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - if ((sensor_state = ipmi_monitoring_sensor_read_sensor_state (ctx)) < 0) - { - error( "ipmi_monitoring_sensor_read_sensor_state(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - if ((sensor_units = ipmi_monitoring_sensor_read_sensor_units (ctx)) < 0) - { - error( "ipmi_monitoring_sensor_read_sensor_units(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - -#ifdef NETDATA_COMMENTED - if ((sensor_bitmask_type = ipmi_monitoring_sensor_read_sensor_bitmask_type (ctx)) < 0) - { - error( "ipmi_monitoring_sensor_read_sensor_bitmask_type(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - if ((sensor_bitmask = ipmi_monitoring_sensor_read_sensor_bitmask (ctx)) < 0) - { - error( - "ipmi_monitoring_sensor_read_sensor_bitmask(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - if (!(sensor_bitmask_strings = ipmi_monitoring_sensor_read_sensor_bitmask_strings (ctx))) - { - error( "ipmi_monitoring_sensor_read_sensor_bitmask_strings(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } -#endif // NETDATA_COMMENTED - - if ((sensor_reading_type = ipmi_monitoring_sensor_read_sensor_reading_type (ctx)) < 0) - { - error( "ipmi_monitoring_sensor_read_sensor_reading_type(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - sensor_reading = ipmi_monitoring_sensor_read_sensor_reading (ctx); - -#ifdef NETDATA_COMMENTED - if ((event_reading_type_code = ipmi_monitoring_sensor_read_event_reading_type_code (ctx)) < 0) - { - error( "ipmi_monitoring_sensor_read_event_reading_type_code(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } -#endif // NETDATA_COMMENTED - - netdata_get_sensor( - record_id - , sensor_number - , sensor_type - , sensor_state - , sensor_units - , sensor_reading_type - , sensor_name - , sensor_reading - ); - -#ifdef NETDATA_COMMENTED - if (!strlen (sensor_name)) - sensor_name = "N/A"; - - sensor_type_str = _get_sensor_type_string (sensor_type); - - printf ("%d, %s, %d, %s", - record_id, - sensor_name, - sensor_number, - sensor_type_str); - - if (sensor_state == IPMI_MONITORING_STATE_NOMINAL) - sensor_state_str = "Nominal"; - else if (sensor_state == IPMI_MONITORING_STATE_WARNING) - sensor_state_str = "Warning"; - else if (sensor_state == IPMI_MONITORING_STATE_CRITICAL) - sensor_state_str = "Critical"; - else - sensor_state_str = "N/A"; - - printf (", %s", sensor_state_str); - - if (sensor_reading) - { - const char *sensor_units_str; - - if (sensor_reading_type == IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL) - printf (", %s", - (*((uint8_t *)sensor_reading) ? "true" : "false")); - else if (sensor_reading_type == IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32) - printf (", %u", - *((uint32_t *)sensor_reading)); - else if (sensor_reading_type == IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE) - printf (", %.2f", - *((double *)sensor_reading)); - else - printf (", N/A"); - - if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_CELSIUS) - sensor_units_str = "C"; - else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_FAHRENHEIT) - sensor_units_str = "F"; - else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_VOLTS) - sensor_units_str = "V"; - else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_AMPS) - sensor_units_str = "A"; - else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_RPM) - sensor_units_str = "RPM"; - else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_WATTS) - sensor_units_str = "W"; - else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_PERCENT) - sensor_units_str = "%"; - else - sensor_units_str = "N/A"; - - printf (", %s", sensor_units_str); - } - else - printf (", N/A, N/A"); - - printf (", %Xh", event_reading_type_code); - - /* It is possible you may want to monitor specific event - * conditions that may occur. If that is the case, you may want - * to check out what specific bitmask type and bitmask events - * occurred. See ipmi_monitoring_bitmasks.h for a list of - * bitmasks and types. - */ - - if (sensor_bitmask_type != IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN) - printf (", %Xh", sensor_bitmask); - else - printf (", N/A"); - - if (sensor_bitmask_type != IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN) - { - unsigned int i = 0; - - printf (","); - - while (sensor_bitmask_strings[i]) - { - printf (" "); - - printf ("'%s'", - sensor_bitmask_strings[i]); - - i++; - } - } - else - printf (", N/A"); - - printf ("\n"); -#endif // NETDATA_COMMENTED - } - - rv = 0; - cleanup: - if (ctx) - ipmi_monitoring_ctx_destroy (ctx); - return (rv); -} - - -static int -_ipmimonitoring_sel (struct ipmi_monitoring_ipmi_config *ipmi_config) -{ - ipmi_monitoring_ctx_t ctx = NULL; - unsigned int sel_flags = 0; - int i; - int sel_count; - int rv = -1; - - if (!(ctx = ipmi_monitoring_ctx_create ())) - { - error("ipmi_monitoring_ctx_create()"); - goto cleanup; - } - - if (sdr_cache_directory) - { - if (ipmi_monitoring_ctx_sdr_cache_directory (ctx, - sdr_cache_directory) < 0) - { - error( "ipmi_monitoring_ctx_sdr_cache_directory(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - } - - /* Must call otherwise only default interpretations ever used */ - if (sel_config_file) - { - if (ipmi_monitoring_ctx_sel_config_file (ctx, - sel_config_file) < 0) - { - error( "ipmi_monitoring_ctx_sel_config_file(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - } - else - { - if (ipmi_monitoring_ctx_sel_config_file (ctx, NULL) < 0) - { - error( "ipmi_monitoring_ctx_sel_config_file(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - } - - if (reread_sdr_cache) - sel_flags |= IPMI_MONITORING_SEL_FLAGS_REREAD_SDR_CACHE; - - if (interpret_oem_data) - sel_flags |= IPMI_MONITORING_SEL_FLAGS_INTERPRET_OEM_DATA; - - if (assume_system_event_record) - sel_flags |= IPMI_MONITORING_SEL_FLAGS_ASSUME_SYSTEM_EVENT_RECORD; - -#ifdef IPMI_MONITORING_SEL_FLAGS_ENTITY_SENSOR_NAMES - if (entity_sensor_names) - sel_flags |= IPMI_MONITORING_SEL_FLAGS_ENTITY_SENSOR_NAMES; -#endif // IPMI_MONITORING_SEL_FLAGS_ENTITY_SENSOR_NAMES - - if (record_ids_length) - { - if ((sel_count = ipmi_monitoring_sel_by_record_id (ctx, - hostname, - ipmi_config, - sel_flags, - record_ids, - record_ids_length, - NULL, - NULL)) < 0) - { - error( "ipmi_monitoring_sel_by_record_id(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - } - else if (sensor_types_length) - { - if ((sel_count = ipmi_monitoring_sel_by_sensor_type (ctx, - hostname, - ipmi_config, - sel_flags, - sensor_types, - sensor_types_length, - NULL, - NULL)) < 0) - { - error( "ipmi_monitoring_sel_by_sensor_type(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - } - else if (date_begin - || date_end) - { - if ((sel_count = ipmi_monitoring_sel_by_date_range (ctx, - hostname, - ipmi_config, - sel_flags, - date_begin, - date_end, - NULL, - NULL)) < 0) - { - error( "ipmi_monitoring_sel_by_sensor_type(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - } - else - { - if ((sel_count = ipmi_monitoring_sel_by_record_id (ctx, - hostname, - ipmi_config, - sel_flags, - NULL, - 0, - NULL, - NULL)) < 0) - { - error( "ipmi_monitoring_sel_by_record_id(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - } - -#ifdef NETDATA_COMMENTED - printf ("%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n", - "Record ID", - "Record Type", - "SEL State", - "Timestamp", - "Sensor Name", - "Sensor Type", - "Event Direction", - "Event Type Code", - "Event Data", - "Event Offset", - "Event Offset String"); -#endif // NETDATA_COMMENTED - - for (i = 0; i < sel_count; i++, ipmi_monitoring_sel_iterator_next (ctx)) - { - int record_id, record_type, sel_state, record_type_class; -#ifdef NETDATA_COMMENTED - int sensor_type, sensor_number, event_direction, - event_offset_type, event_offset, event_type_code, manufacturer_id; - unsigned int timestamp, event_data1, event_data2, event_data3; - char *event_offset_string = NULL; - const char *sensor_type_str; - const char *event_direction_str; - const char *sel_state_str; - char *sensor_name = NULL; - unsigned char oem_data[64]; - int oem_data_len; - unsigned int j; -#endif // NETDATA_COMMENTED - - if ((record_id = ipmi_monitoring_sel_read_record_id (ctx)) < 0) - { - error( "ipmi_monitoring_sel_read_record_id(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - if ((record_type = ipmi_monitoring_sel_read_record_type (ctx)) < 0) - { - error( "ipmi_monitoring_sel_read_record_type(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - if ((record_type_class = ipmi_monitoring_sel_read_record_type_class (ctx)) < 0) - { - error( "ipmi_monitoring_sel_read_record_type_class(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - if ((sel_state = ipmi_monitoring_sel_read_sel_state (ctx)) < 0) - { - error( "ipmi_monitoring_sel_read_sel_state(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - netdata_get_sel( - record_id - , record_type_class - , sel_state - ); - -#ifdef NETDATA_COMMENTED - if (sel_state == IPMI_MONITORING_STATE_NOMINAL) - sel_state_str = "Nominal"; - else if (sel_state == IPMI_MONITORING_STATE_WARNING) - sel_state_str = "Warning"; - else if (sel_state == IPMI_MONITORING_STATE_CRITICAL) - sel_state_str = "Critical"; - else - sel_state_str = "N/A"; - - printf ("%d, %d, %s", - record_id, - record_type, - sel_state_str); - - if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_SYSTEM_EVENT_RECORD - || record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_TIMESTAMPED_OEM_RECORD) - { - - if (ipmi_monitoring_sel_read_timestamp (ctx, ×tamp) < 0) - { - error( "ipmi_monitoring_sel_read_timestamp(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - /* XXX: This should be converted to a nice date output using - * your favorite timestamp -> string conversion functions. - */ - printf (", %u", timestamp); - } - else - printf (", N/A"); - - if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_SYSTEM_EVENT_RECORD) - { - /* If you are integrating ipmimonitoring SEL into a monitoring application, - * you may wish to count the number of times a specific error occurred - * and report that to the monitoring application. - * - * In this particular case, you'll probably want to check out - * what sensor type each SEL event is reporting, the - * event offset type, and the specific event offset that occurred. - * - * See ipmi_monitoring_offsets.h for a list of event offsets - * and types. - */ - - if (!(sensor_name = ipmi_monitoring_sel_read_sensor_name (ctx))) - { - error( "ipmi_monitoring_sel_read_sensor_name(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - if ((sensor_type = ipmi_monitoring_sel_read_sensor_type (ctx)) < 0) - { - error( "ipmi_monitoring_sel_read_sensor_type(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - if ((sensor_number = ipmi_monitoring_sel_read_sensor_number (ctx)) < 0) - { - error( "ipmi_monitoring_sel_read_sensor_number(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - if ((event_direction = ipmi_monitoring_sel_read_event_direction (ctx)) < 0) - { - error( "ipmi_monitoring_sel_read_event_direction(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - if ((event_type_code = ipmi_monitoring_sel_read_event_type_code (ctx)) < 0) - { - error( "ipmi_monitoring_sel_read_event_type_code(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - if (ipmi_monitoring_sel_read_event_data (ctx, - &event_data1, - &event_data2, - &event_data3) < 0) - { - error( "ipmi_monitoring_sel_read_event_data(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - if ((event_offset_type = ipmi_monitoring_sel_read_event_offset_type (ctx)) < 0) - { - error( "ipmi_monitoring_sel_read_event_offset_type(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - if ((event_offset = ipmi_monitoring_sel_read_event_offset (ctx)) < 0) - { - error( "ipmi_monitoring_sel_read_event_offset(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - if (!(event_offset_string = ipmi_monitoring_sel_read_event_offset_string (ctx))) - { - error( "ipmi_monitoring_sel_read_event_offset_string(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - if (!strlen (sensor_name)) - sensor_name = "N/A"; - - sensor_type_str = _get_sensor_type_string (sensor_type); - - if (event_direction == IPMI_MONITORING_SEL_EVENT_DIRECTION_ASSERTION) - event_direction_str = "Assertion"; - else - event_direction_str = "Deassertion"; - - printf (", %s, %s, %d, %s, %Xh, %Xh-%Xh-%Xh", - sensor_name, - sensor_type_str, - sensor_number, - event_direction_str, - event_type_code, - event_data1, - event_data2, - event_data3); - - if (event_offset_type != IPMI_MONITORING_EVENT_OFFSET_TYPE_UNKNOWN) - printf (", %Xh", event_offset); - else - printf (", N/A"); - - if (event_offset_type != IPMI_MONITORING_EVENT_OFFSET_TYPE_UNKNOWN) - printf (", %s", event_offset_string); - else - printf (", N/A"); - } - else if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_TIMESTAMPED_OEM_RECORD - || record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_NON_TIMESTAMPED_OEM_RECORD) - { - if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_TIMESTAMPED_OEM_RECORD) - { - if ((manufacturer_id = ipmi_monitoring_sel_read_manufacturer_id (ctx)) < 0) - { - error( "ipmi_monitoring_sel_read_manufacturer_id(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - printf (", Manufacturer ID = %Xh", manufacturer_id); - } - - if ((oem_data_len = ipmi_monitoring_sel_read_oem_data (ctx, oem_data, 1024)) < 0) - { - error( "ipmi_monitoring_sel_read_oem_data(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } - - printf (", OEM Data = "); - - for (j = 0; j < oem_data_len; j++) - printf ("%02Xh ", oem_data[j]); - } - else - printf (", N/A, N/A, N/A, N/A, N/A, N/A, N/A"); - - printf ("\n"); -#endif // NETDATA_COMMENTED - } - - rv = 0; - cleanup: - if (ctx) - ipmi_monitoring_ctx_destroy (ctx); - return (rv); -} - -// ---------------------------------------------------------------------------- -// MAIN PROGRAM FOR NETDATA PLUGIN - -int ipmi_collect_data(struct ipmi_monitoring_ipmi_config *ipmi_config) { - errno = 0; - - if (_ipmimonitoring_sensors(ipmi_config) < 0) return -1; - - if(netdata_do_sel) { - if(_ipmimonitoring_sel(ipmi_config) < 0) return -2; - } - - return 0; -} - -int ipmi_detect_speed_secs(struct ipmi_monitoring_ipmi_config *ipmi_config) { - int i, checks = 10; - unsigned long long total = 0; - - for(i = 0 ; i < checks ; i++) { - if(debug) fprintf(stderr, "freeipmi.plugin: checking data collection speed iteration %d of %d\n", i+1, checks); - - // measure the time a data collection needs - unsigned long long start = now_realtime_usec(); - if(ipmi_collect_data(ipmi_config) < 0) - fatal("freeipmi.plugin: data collection failed."); - - unsigned long long end = now_realtime_usec(); - - if(debug) fprintf(stderr, "freeipmi.plugin: data collection speed was %llu usec\n", end - start); - - // add it to our total - total += end - start; - - // wait the same time - // to avoid flooding the IPMI processor with requests - sleep_usec(end - start); - } - - // so, we assume it needed 2x the time - // we find the average in microseconds - // and we round-up to the closest second - - return (int)(( total * 2 / checks / 1000000 ) + 1); -} - -int main (int argc, char **argv) { - - // ------------------------------------------------------------------------ - // initialization of netdata plugin - - program_name = "freeipmi.plugin"; - - // disable syslog - error_log_syslog = 0; - - // set errors flood protection to 100 logs per hour - error_log_errors_per_period = 100; - error_log_throttle_period = 3600; - - - // ------------------------------------------------------------------------ - // parse command line parameters - - int i, freq = 0; - for(i = 1; i < argc ; i++) { - if(isdigit(*argv[i]) && !freq) { - int n = atoi(argv[i]); - if(n > 0 && freq < 86400) { - freq = n; - continue; - } - } - else if(strcmp("version", argv[i]) == 0 || strcmp("-version", argv[i]) == 0 || strcmp("--version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) { - printf("freeipmi.plugin %s\n", VERSION); - exit(0); - } - else if(strcmp("debug", argv[i]) == 0) { - debug = 1; - continue; - } - else if(strcmp("sel", argv[i]) == 0) { - netdata_do_sel = 1; - continue; - } - else if(strcmp("no-sel", argv[i]) == 0) { - netdata_do_sel = 0; - continue; - } - else if(strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) { - fprintf(stderr, - "\n" - " netdata freeipmi.plugin %s\n" - " Copyright (C) 2016-2017 Costa Tsaousis <costa@tsaousis.gr>\n" - " Released under GNU General Public License v3 or later.\n" - " All rights reserved.\n" - "\n" - " This program is a data collector plugin for netdata.\n" - "\n" - " Available command line options:\n" - "\n" - " SECONDS data collection frequency\n" - " minimum: %d\n" - "\n" - " debug enable verbose output\n" - " default: disabled\n" - "\n" - " sel\n" - " no-sel enable/disable SEL collection\n" - " default: %s\n" - "\n" - " hostname HOST\n" - " username USER\n" - " password PASS connect to remote IPMI host\n" - " default: local IPMI processor\n" - "\n" - " sdr-cache-dir PATH directory for SDR cache files\n" - " default: %s\n" - "\n" - " sensor-config-file FILE filename to read sensor configuration\n" - " default: %s\n" - "\n" - " ignore N1,N2,N3,... sensor IDs to ignore\n" - " default: none\n" - "\n" - " -v\n" - " -V\n" - " version print version and exit\n" - "\n" - " Linux kernel module for IPMI is CPU hungry.\n" - " On Linux run this to lower kipmiN CPU utilization:\n" - " # echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us\n" - "\n" - " or create: /etc/modprobe.d/ipmi.conf with these contents:\n" - " options ipmi_si kipmid_max_busy_us=10\n" - "\n" - " For more information:\n" - " https://github.com/firehol/netdata/wiki/monitoring-IPMI\n" - "\n" - , VERSION - , netdata_update_every - , netdata_do_sel?"enabled":"disabled" - , sdr_cache_directory?sdr_cache_directory:"system default" - , sensor_config_file?sensor_config_file:"system default" - ); - exit(1); - } - else if(i < argc && strcmp("hostname", argv[i]) == 0) { - hostname = strdupz(argv[++i]); - char *s = argv[i]; - // mask it be hidden from the process tree - while(*s) *s++ = 'x'; - if(debug) fprintf(stderr, "freeipmi.plugin: hostname set to '%s'\n", hostname); - continue; - } - else if(i < argc && strcmp("username", argv[i]) == 0) { - username = strdupz(argv[++i]); - char *s = argv[i]; - // mask it be hidden from the process tree - while(*s) *s++ = 'x'; - if(debug) fprintf(stderr, "freeipmi.plugin: username set to '%s'\n", username); - continue; - } - else if(i < argc && strcmp("password", argv[i]) == 0) { - password = strdupz(argv[++i]); - char *s = argv[i]; - // mask it be hidden from the process tree - while(*s) *s++ = 'x'; - if(debug) fprintf(stderr, "freeipmi.plugin: password set to '%s'\n", password); - continue; - } - else if(i < argc && strcmp("sdr-cache-dir", argv[i]) == 0) { - sdr_cache_directory = argv[++i]; - if(debug) fprintf(stderr, "freeipmi.plugin: SDR cache directory set to '%s'\n", sdr_cache_directory); - continue; - } - else if(i < argc && strcmp("sensor-config-file", argv[i]) == 0) { - sensor_config_file = argv[++i]; - if(debug) fprintf(stderr, "freeipmi.plugin: sensor config file set to '%s'\n", sensor_config_file); - continue; - } - else if(i < argc && strcmp("ignore", argv[i]) == 0) { - excluded_record_ids_parse(argv[++i]); - continue; - } - - error("freeipmi.plugin: ignoring parameter '%s'", argv[i]); - } - - errno = 0; - - if(freq > netdata_update_every) - netdata_update_every = freq; - - else if(freq) - error("update frequency %d seconds is too small for IPMI. Using %d.", freq, netdata_update_every); - - - // ------------------------------------------------------------------------ - // initialize IPMI - - struct ipmi_monitoring_ipmi_config ipmi_config; - - if(debug) fprintf(stderr, "freeipmi.plugin: calling _init_ipmi_config()\n"); - - _init_ipmi_config(&ipmi_config); - - if(debug) fprintf(stderr, "freeipmi.plugin: calling ipmi_monitoring_init()\n"); - - if(ipmi_monitoring_init(ipmimonitoring_init_flags, &errnum) < 0) - fatal("ipmi_monitoring_init: %s", ipmi_monitoring_ctx_strerror(errnum)); - - if(debug) fprintf(stderr, "freeipmi.plugin: detecting IPMI minimum update frequency...\n"); - freq = ipmi_detect_speed_secs(&ipmi_config); - if(debug) fprintf(stderr, "freeipmi.plugin: IPMI minimum update frequency was calculated to %d seconds.\n", freq); - - if(freq > netdata_update_every) { - info("enforcing minimum data collection frequency, calculated to %d seconds.", freq); - netdata_update_every = freq; - } - - - // ------------------------------------------------------------------------ - // the main loop - - if(debug) fprintf(stderr, "freeipmi.plugin: starting data collection\n"); - - time_t started_t = now_monotonic_sec(); - - size_t iteration = 0; - usec_t step = netdata_update_every * USEC_PER_SEC; - - heartbeat_t hb; - heartbeat_init(&hb); - for(iteration = 0; 1 ; iteration++) { - usec_t dt = heartbeat_next(&hb, step); - - if(debug && iteration) - fprintf(stderr, "freeipmi.plugin: iteration %zu, dt %llu usec, sensors collected %zu, sensors sent to netdata %zu \n" - , iteration - , dt - , netdata_sensors_collected - , netdata_sensors_updated - ); - - netdata_mark_as_not_updated(); - - if(debug) fprintf(stderr, "freeipmi.plugin: calling ipmi_collect_data()\n"); - if(ipmi_collect_data(&ipmi_config) < 0) - fatal("data collection failed."); - - if(debug) fprintf(stderr, "freeipmi.plugin: calling send_metrics_to_netdata()\n"); - send_metrics_to_netdata(); - fflush(stdout); - - // restart check (14400 seconds) - if(now_monotonic_sec() - started_t > 14400) exit(0); - } -} - -#else // !HAVE_FREEIPMI - -int main(int argc, char **argv) { - fatal("freeipmi.plugin is not compiled."); -} - -#endif // !HAVE_FREEIPMI diff --git a/src/global_statistics.c b/src/global_statistics.c deleted file mode 100644 index 4f34e92df..000000000 --- a/src/global_statistics.c +++ /dev/null @@ -1,414 +0,0 @@ -#include "common.h" - -volatile struct global_statistics global_statistics = { - .connected_clients = 0, - .web_requests = 0, - .web_usec = 0, - .bytes_received = 0, - .bytes_sent = 0, - .content_size = 0, - .compressed_content_size = 0, - .web_client_count = 1 -}; - -netdata_mutex_t global_statistics_mutex = NETDATA_MUTEX_INITIALIZER; - -inline void global_statistics_lock(void) { - netdata_mutex_lock(&global_statistics_mutex); -} - -inline void global_statistics_unlock(void) { - netdata_mutex_unlock(&global_statistics_mutex); -} - -void finished_web_request_statistics(uint64_t dt, - uint64_t bytes_received, - uint64_t bytes_sent, - uint64_t content_size, - uint64_t compressed_content_size) { -#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS) - uint64_t old_web_usec_max = global_statistics.web_usec_max; - while(dt > old_web_usec_max) - __atomic_compare_exchange(&global_statistics.web_usec_max, &old_web_usec_max, &dt, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); - - __atomic_fetch_add(&global_statistics.web_requests, 1, __ATOMIC_SEQ_CST); - __atomic_fetch_add(&global_statistics.web_usec, dt, __ATOMIC_SEQ_CST); - __atomic_fetch_add(&global_statistics.bytes_received, bytes_received, __ATOMIC_SEQ_CST); - __atomic_fetch_add(&global_statistics.bytes_sent, bytes_sent, __ATOMIC_SEQ_CST); - __atomic_fetch_add(&global_statistics.content_size, content_size, __ATOMIC_SEQ_CST); - __atomic_fetch_add(&global_statistics.compressed_content_size, compressed_content_size, __ATOMIC_SEQ_CST); -#else -#warning NOT using atomic operations - using locks for global statistics - if (web_server_is_multithreaded) - global_statistics_lock(); - - if (dt > global_statistics.web_usec_max) - global_statistics.web_usec_max = dt; - - global_statistics.web_requests++; - global_statistics.web_usec += dt; - global_statistics.bytes_received += bytes_received; - global_statistics.bytes_sent += bytes_sent; - global_statistics.content_size += content_size; - global_statistics.compressed_content_size += compressed_content_size; - - if (web_server_is_multithreaded) - global_statistics_unlock(); -#endif -} - -uint64_t web_client_connected(void) { -#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS) - __atomic_fetch_add(&global_statistics.connected_clients, 1, __ATOMIC_SEQ_CST); - uint64_t id = __atomic_fetch_add(&global_statistics.web_client_count, 1, __ATOMIC_SEQ_CST); -#else - if (web_server_is_multithreaded) - global_statistics_lock(); - - global_statistics.connected_clients++; - uint64_t id = global_statistics.web_client_count++; - - if (web_server_is_multithreaded) - global_statistics_unlock(); -#endif - - return id; -} - -void web_client_disconnected(void) { -#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS) - __atomic_fetch_sub(&global_statistics.connected_clients, 1, __ATOMIC_SEQ_CST); -#else - if (web_server_is_multithreaded) - global_statistics_lock(); - - global_statistics.connected_clients--; - - if (web_server_is_multithreaded) - global_statistics_unlock(); -#endif -} - - -inline void global_statistics_copy(struct global_statistics *gs, uint8_t options) { -#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS) - gs->connected_clients = __atomic_fetch_add(&global_statistics.connected_clients, 0, __ATOMIC_SEQ_CST); - gs->web_requests = __atomic_fetch_add(&global_statistics.web_requests, 0, __ATOMIC_SEQ_CST); - gs->web_usec = __atomic_fetch_add(&global_statistics.web_usec, 0, __ATOMIC_SEQ_CST); - gs->web_usec_max = __atomic_fetch_add(&global_statistics.web_usec_max, 0, __ATOMIC_SEQ_CST); - gs->bytes_received = __atomic_fetch_add(&global_statistics.bytes_received, 0, __ATOMIC_SEQ_CST); - gs->bytes_sent = __atomic_fetch_add(&global_statistics.bytes_sent, 0, __ATOMIC_SEQ_CST); - gs->content_size = __atomic_fetch_add(&global_statistics.content_size, 0, __ATOMIC_SEQ_CST); - gs->compressed_content_size = __atomic_fetch_add(&global_statistics.compressed_content_size, 0, __ATOMIC_SEQ_CST); - gs->web_client_count = __atomic_fetch_add(&global_statistics.web_client_count, 0, __ATOMIC_SEQ_CST); - - if(options & GLOBAL_STATS_RESET_WEB_USEC_MAX) { - uint64_t n = 0; - __atomic_compare_exchange(&global_statistics.web_usec_max, &gs->web_usec_max, &n, 1, __ATOMIC_SEQ_CST, - __ATOMIC_SEQ_CST); - } -#else - global_statistics_lock(); - - memcpy(gs, (const void *)&global_statistics, sizeof(struct global_statistics)); - - if (options & GLOBAL_STATS_RESET_WEB_USEC_MAX) - global_statistics.web_usec_max = 0; - - global_statistics_unlock(); -#endif -} - -void global_statistics_charts(void) { - static unsigned long long old_web_requests = 0, - old_web_usec = 0, - old_content_size = 0, - old_compressed_content_size = 0; - - static collected_number compression_ratio = -1, - average_response_time = -1; - - struct global_statistics gs; - struct rusage me, thread; - - global_statistics_copy(&gs, GLOBAL_STATS_RESET_WEB_USEC_MAX); - getrusage(RUSAGE_THREAD, &thread); - getrusage(RUSAGE_SELF, &me); - - { - static RRDSET *st_cpu_thread = NULL; - static RRDDIM *rd_cpu_thread_user = NULL, - *rd_cpu_thread_system = NULL; - -#ifdef __FreeBSD__ - if (unlikely(!st_cpu_thread)) { - st_cpu_thread = rrdset_create_localhost( - "netdata" - , "plugin_freebsd_cpu" - , NULL - , "freebsd" - , NULL - , "NetData FreeBSD Plugin CPU usage" - , "milliseconds/s" - , "netdata" - , "stats" - , 132000 - , localhost->rrd_update_every - , RRDSET_TYPE_STACKED - ); -#else - if (unlikely(!st_cpu_thread)) { - st_cpu_thread = rrdset_create_localhost( - "netdata" - , "plugin_proc_cpu" - , NULL - , "proc" - , NULL - , "NetData Proc Plugin CPU usage" - , "milliseconds/s" - , "netdata" - , "stats" - , 132000 - , localhost->rrd_update_every - , RRDSET_TYPE_STACKED - ); -#endif - - rd_cpu_thread_user = rrddim_add(st_cpu_thread, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - rd_cpu_thread_system = rrddim_add(st_cpu_thread, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_cpu_thread); - - rrddim_set_by_pointer(st_cpu_thread, rd_cpu_thread_user, thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec); - rrddim_set_by_pointer(st_cpu_thread, rd_cpu_thread_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec); - rrdset_done(st_cpu_thread); - } - - // ---------------------------------------------------------------- - - { - static RRDSET *st_cpu = NULL; - static RRDDIM *rd_cpu_user = NULL, - *rd_cpu_system = NULL; - - if (unlikely(!st_cpu)) { - st_cpu = rrdset_create_localhost( - "netdata" - , "server_cpu" - , NULL - , "netdata" - , NULL - , "NetData CPU usage" - , "milliseconds/s" - , "netdata" - , "stats" - , 130000 - , localhost->rrd_update_every - , RRDSET_TYPE_STACKED - ); - - rd_cpu_user = rrddim_add(st_cpu, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - rd_cpu_system = rrddim_add(st_cpu, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_cpu); - - rrddim_set_by_pointer(st_cpu, rd_cpu_user, me.ru_utime.tv_sec * 1000000ULL + me.ru_utime.tv_usec); - rrddim_set_by_pointer(st_cpu, rd_cpu_system, me.ru_stime.tv_sec * 1000000ULL + me.ru_stime.tv_usec); - rrdset_done(st_cpu); - } - - // ---------------------------------------------------------------- - - { - static RRDSET *st_clients = NULL; - static RRDDIM *rd_clients = NULL; - - if (unlikely(!st_clients)) { - st_clients = rrdset_create_localhost( - "netdata" - , "clients" - , NULL - , "netdata" - , NULL - , "NetData Web Clients" - , "connected clients" - , "netdata" - , "stats" - , 130200 - , localhost->rrd_update_every - , RRDSET_TYPE_LINE - ); - - rd_clients = rrddim_add(st_clients, "clients", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else - rrdset_next(st_clients); - - rrddim_set_by_pointer(st_clients, rd_clients, gs.connected_clients); - rrdset_done(st_clients); - } - - // ---------------------------------------------------------------- - - { - static RRDSET *st_reqs = NULL; - static RRDDIM *rd_requests = NULL; - - if (unlikely(!st_reqs)) { - st_reqs = rrdset_create_localhost( - "netdata" - , "requests" - , NULL - , "netdata" - , NULL - , "NetData Web Requests" - , "requests/s" - , "netdata" - , "stats" - , 130300 - , localhost->rrd_update_every - , RRDSET_TYPE_LINE - ); - - rd_requests = rrddim_add(st_reqs, "requests", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_reqs); - - rrddim_set_by_pointer(st_reqs, rd_requests, (collected_number) gs.web_requests); - rrdset_done(st_reqs); - } - - // ---------------------------------------------------------------- - - { - static RRDSET *st_bytes = NULL; - static RRDDIM *rd_in = NULL, - *rd_out = NULL; - - if (unlikely(!st_bytes)) { - st_bytes = rrdset_create_localhost( - "netdata" - , "net" - , NULL - , "netdata" - , NULL - , "NetData Network Traffic" - , "kilobits/s" - , "netdata" - , "stats" - , 130000 - , localhost->rrd_update_every - , RRDSET_TYPE_AREA - ); - - rd_in = rrddim_add(st_bytes, "in", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - rd_out = rrddim_add(st_bytes, "out", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_bytes); - - rrddim_set_by_pointer(st_bytes, rd_in, (collected_number) gs.bytes_received); - rrddim_set_by_pointer(st_bytes, rd_out, (collected_number) gs.bytes_sent); - rrdset_done(st_bytes); - } - - // ---------------------------------------------------------------- - - { - static RRDSET *st_duration = NULL; - static RRDDIM *rd_average = NULL, - *rd_max = NULL; - - if (unlikely(!st_duration)) { - st_duration = rrdset_create_localhost( - "netdata" - , "response_time" - , NULL - , "netdata" - , NULL - , "NetData API Response Time" - , "ms/request" - , "netdata" - , "stats" - , 130400 - , localhost->rrd_update_every - , RRDSET_TYPE_LINE - ); - - rd_average = rrddim_add(st_duration, "average", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_max = rrddim_add(st_duration, "max", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - } - else - rrdset_next(st_duration); - - uint64_t gweb_usec = gs.web_usec; - uint64_t gweb_requests = gs.web_requests; - - uint64_t web_usec = (gweb_usec >= old_web_usec) ? gweb_usec - old_web_usec : 0; - uint64_t web_requests = (gweb_requests >= old_web_requests) ? gweb_requests - old_web_requests : 0; - - old_web_usec = gweb_usec; - old_web_requests = gweb_requests; - - if (web_requests) - average_response_time = (collected_number) (web_usec / web_requests); - - if (unlikely(average_response_time != -1)) - rrddim_set_by_pointer(st_duration, rd_average, average_response_time); - else - rrddim_set_by_pointer(st_duration, rd_average, 0); - - rrddim_set_by_pointer(st_duration, rd_max, ((gs.web_usec_max)?(collected_number)gs.web_usec_max:average_response_time)); - rrdset_done(st_duration); - } - - // ---------------------------------------------------------------- - - { - static RRDSET *st_compression = NULL; - static RRDDIM *rd_savings = NULL; - - if (unlikely(!st_compression)) { - st_compression = rrdset_create_localhost( - "netdata" - , "compression_ratio" - , NULL - , "netdata" - , NULL - , "NetData API Responses Compression Savings Ratio" - , "percentage" - , "netdata" - , "stats" - , 130500 - , localhost->rrd_update_every - , RRDSET_TYPE_LINE - ); - - rd_savings = rrddim_add(st_compression, "savings", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - } - else - rrdset_next(st_compression); - - // since we don't lock here to read the global statistics - // read the smaller value first - unsigned long long gcompressed_content_size = gs.compressed_content_size; - unsigned long long gcontent_size = gs.content_size; - - unsigned long long compressed_content_size = gcompressed_content_size - old_compressed_content_size; - unsigned long long content_size = gcontent_size - old_content_size; - - old_compressed_content_size = gcompressed_content_size; - old_content_size = gcontent_size; - - if (content_size && content_size >= compressed_content_size) - compression_ratio = ((content_size - compressed_content_size) * 100 * 1000) / content_size; - - if (compression_ratio != -1) - rrddim_set_by_pointer(st_compression, rd_savings, compression_ratio); - - rrdset_done(st_compression); - } -} diff --git a/src/global_statistics.h b/src/global_statistics.h deleted file mode 100644 index 62fee6e36..000000000 --- a/src/global_statistics.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef NETDATA_GLOBAL_STATISTICS_H -#define NETDATA_GLOBAL_STATISTICS_H 1 - -// ---------------------------------------------------------------------------- -// global statistics - -struct global_statistics { - volatile uint16_t connected_clients; - - volatile uint64_t web_requests; - volatile uint64_t web_usec; - volatile uint64_t web_usec_max; - volatile uint64_t bytes_received; - volatile uint64_t bytes_sent; - volatile uint64_t content_size; - volatile uint64_t compressed_content_size; - - volatile uint64_t web_client_count; -}; - -extern volatile struct global_statistics global_statistics; - -extern void global_statistics_lock(void); -extern void global_statistics_unlock(void); -extern void finished_web_request_statistics(uint64_t dt, - uint64_t bytes_received, - uint64_t bytes_sent, - uint64_t content_size, - uint64_t compressed_content_size); - -extern uint64_t web_client_connected(void); -extern void web_client_disconnected(void); - -#define GLOBAL_STATS_RESET_WEB_USEC_MAX 0x01 -extern void global_statistics_copy(struct global_statistics *gs, uint8_t options); -extern void global_statistics_charts(void); - -#endif /* NETDATA_GLOBAL_STATISTICS_H */ diff --git a/src/health.c b/src/health.c deleted file mode 100644 index 04e04f089..000000000 --- a/src/health.c +++ /dev/null @@ -1,737 +0,0 @@ -#define NETDATA_HEALTH_INTERNALS -#include "common.h" - -int default_health_enabled = 1; - -// ---------------------------------------------------------------------------- -// health initialization - -inline char *health_config_dir(void) { - char buffer[FILENAME_MAX + 1]; - snprintfz(buffer, FILENAME_MAX, "%s/health.d", netdata_configured_config_dir); - return config_get(CONFIG_SECTION_HEALTH, "health configuration directory", buffer); -} - -void health_init(void) { - debug(D_HEALTH, "Health configuration initializing"); - - if(!(default_health_enabled = config_get_boolean(CONFIG_SECTION_HEALTH, "enabled", 1))) { - debug(D_HEALTH, "Health is disabled."); - return; - } -} - -// ---------------------------------------------------------------------------- -// re-load health configuration - -void health_reload_host(RRDHOST *host) { - if(unlikely(!host->health_enabled)) - return; - - char *path = health_config_dir(); - - // free all running alarms - rrdhost_wrlock(host); - - while(host->templates) - rrdcalctemplate_unlink_and_free(host, host->templates); - - while(host->alarms) - rrdcalc_unlink_and_free(host, host->alarms); - - rrdhost_unlock(host); - - // invalidate all previous entries in the alarm log - ALARM_ENTRY *t; - for(t = host->health_log.alarms ; t ; t = t->next) { - if(t->new_status != RRDCALC_STATUS_REMOVED) - t->flags |= HEALTH_ENTRY_FLAG_UPDATED; - } - - rrdhost_rdlock(host); - // reset all thresholds to all charts - RRDSET *st; - rrdset_foreach_read(st, host) { - st->green = NAN; - st->red = NAN; - } - rrdhost_unlock(host); - - // load the new alarms - rrdhost_wrlock(host); - health_readdir(host, path); - - // link the loaded alarms to their charts - rrdset_foreach_write(st, host) { - rrdsetcalc_link_matching(st); - rrdcalctemplate_link_matching(st); - } - - rrdhost_unlock(host); -} - -void health_reload(void) { - - rrd_rdlock(); - - RRDHOST *host; - rrdhost_foreach_read(host) - health_reload_host(host); - - rrd_unlock(); -} - -// ---------------------------------------------------------------------------- -// health main thread and friends - -static inline RRDCALC_STATUS rrdcalc_value2status(calculated_number n) { - if(isnan(n) || isinf(n)) return RRDCALC_STATUS_UNDEFINED; - if(n) return RRDCALC_STATUS_RAISED; - return RRDCALC_STATUS_CLEAR; -} - -#define ALARM_EXEC_COMMAND_LENGTH 8192 - -static inline void health_alarm_execute(RRDHOST *host, ALARM_ENTRY *ae) { - ae->flags |= HEALTH_ENTRY_FLAG_PROCESSED; - - if(unlikely(ae->new_status < RRDCALC_STATUS_CLEAR)) { - // do not send notifications for internal statuses - debug(D_HEALTH, "Health not sending notification for alarm '%s.%s' status %s (internal statuses)", ae->chart, ae->name, rrdcalc_status2string(ae->new_status)); - goto done; - } - - if(unlikely(ae->new_status <= RRDCALC_STATUS_CLEAR && (ae->flags & HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION))) { - // do not send notifications for disabled statuses - debug(D_HEALTH, "Health not sending notification for alarm '%s.%s' status %s (it has no-clear-notification enabled)", ae->chart, ae->name, rrdcalc_status2string(ae->new_status)); - // mark it as run, so that we will send the same alarm if it happens again - goto done; - } - - // find the previous notification for the same alarm - // which we have run the exec script - // exception: alarms with HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION set - if(likely(!(ae->flags & HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION))) { - uint32_t id = ae->alarm_id; - ALARM_ENTRY *t; - for(t = ae->next; t ; t = t->next) { - if(t->alarm_id == id && t->flags & HEALTH_ENTRY_FLAG_EXEC_RUN) - break; - } - - if(likely(t)) { - // we have executed this alarm notification in the past - if(t && t->new_status == ae->new_status) { - // don't send the notification for the same status again - debug(D_HEALTH, "Health not sending again notification for alarm '%s.%s' status %s", ae->chart, ae->name - , rrdcalc_status2string(ae->new_status)); - goto done; - } - } - else { - // we have not executed this alarm notification in the past - // so, don't send CLEAR notifications - if(unlikely(ae->new_status == RRDCALC_STATUS_CLEAR)) { - debug(D_HEALTH, "Health not sending notification for first initialization of alarm '%s.%s' status %s" - , ae->chart, ae->name, rrdcalc_status2string(ae->new_status)); - goto done; - } - } - } - - static char command_to_run[ALARM_EXEC_COMMAND_LENGTH + 1]; - pid_t command_pid; - - const char *exec = (ae->exec) ? ae->exec : host->health_default_exec; - const char *recipient = (ae->recipient) ? ae->recipient : host->health_default_recipient; - - snprintfz(command_to_run, ALARM_EXEC_COMMAND_LENGTH, "exec %s '%s' '%s' '%u' '%u' '%u' '%lu' '%s' '%s' '%s' '%s' '%s' '" CALCULATED_NUMBER_FORMAT_ZERO "' '" CALCULATED_NUMBER_FORMAT_ZERO "' '%s' '%u' '%u' '%s' '%s' '%s' '%s'", - exec, - recipient, - host->registry_hostname, - ae->unique_id, - ae->alarm_id, - ae->alarm_event_id, - (unsigned long)ae->when, - ae->name, - ae->chart?ae->chart:"NOCAHRT", - ae->family?ae->family:"NOFAMILY", - rrdcalc_status2string(ae->new_status), - rrdcalc_status2string(ae->old_status), - ae->new_value, - ae->old_value, - ae->source?ae->source:"UNKNOWN", - (uint32_t)ae->duration, - (uint32_t)ae->non_clear_duration, - ae->units?ae->units:"", - ae->info?ae->info:"", - ae->new_value_string, - ae->old_value_string - ); - - ae->flags |= HEALTH_ENTRY_FLAG_EXEC_RUN; - ae->exec_run_timestamp = now_realtime_sec(); - - debug(D_HEALTH, "executing command '%s'", command_to_run); - FILE *fp = mypopen(command_to_run, &command_pid); - if(!fp) { - error("HEALTH: Cannot popen(\"%s\", \"r\").", command_to_run); - goto done; - } - debug(D_HEALTH, "HEALTH reading from command"); - char *s = fgets(command_to_run, FILENAME_MAX, fp); - (void)s; - ae->exec_code = mypclose(fp, command_pid); - debug(D_HEALTH, "done executing command - returned with code %d", ae->exec_code); - - if(ae->exec_code != 0) - ae->flags |= HEALTH_ENTRY_FLAG_EXEC_FAILED; - -done: - health_alarm_log_save(host, ae); -} - -static inline void health_process_notifications(RRDHOST *host, ALARM_ENTRY *ae) { - debug(D_HEALTH, "Health alarm '%s.%s' = " CALCULATED_NUMBER_FORMAT_AUTO " - changed status from %s to %s", - ae->chart?ae->chart:"NOCHART", ae->name, - ae->new_value, - rrdcalc_status2string(ae->old_status), - rrdcalc_status2string(ae->new_status) - ); - - health_alarm_execute(host, ae); -} - -static inline void health_alarm_log_process(RRDHOST *host) { - uint32_t first_waiting = (host->health_log.alarms)?host->health_log.alarms->unique_id:0; - time_t now = now_realtime_sec(); - - netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock); - - ALARM_ENTRY *ae; - for(ae = host->health_log.alarms; ae && ae->unique_id >= host->health_last_processed_id ; ae = ae->next) { - if(unlikely( - !(ae->flags & HEALTH_ENTRY_FLAG_PROCESSED) && - !(ae->flags & HEALTH_ENTRY_FLAG_UPDATED) - )) { - - if(unlikely(ae->unique_id < first_waiting)) - first_waiting = ae->unique_id; - - if(likely(now >= ae->delay_up_to_timestamp)) - health_process_notifications(host, ae); - } - } - - // remember this for the next iteration - host->health_last_processed_id = first_waiting; - - netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock); - - if(host->health_log.count <= host->health_log.max) - return; - - // cleanup excess entries in the log - netdata_rwlock_wrlock(&host->health_log.alarm_log_rwlock); - - ALARM_ENTRY *last = NULL; - unsigned int count = host->health_log.max * 2 / 3; - for(ae = host->health_log.alarms; ae && count ; count--, last = ae, ae = ae->next) ; - - if(ae && last && last->next == ae) - last->next = NULL; - else - ae = NULL; - - while(ae) { - debug(D_HEALTH, "Health removing alarm log entry with id: %u", ae->unique_id); - - ALARM_ENTRY *t = ae->next; - - health_alarm_log_free_one_nochecks_nounlink(ae); - - ae = t; - host->health_log.count--; - } - - netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock); -} - -static inline int rrdcalc_isrunnable(RRDCALC *rc, time_t now, time_t *next_run) { - if(unlikely(!rc->rrdset)) { - debug(D_HEALTH, "Health not running alarm '%s.%s'. It is not linked to a chart.", rc->chart?rc->chart:"NOCHART", rc->name); - return 0; - } - - if(unlikely(rc->next_update > now)) { - if (unlikely(*next_run > rc->next_update)) { - // update the next_run time of the main loop - // to run this alarm precisely the time required - *next_run = rc->next_update; - } - - debug(D_HEALTH, "Health not examining alarm '%s.%s' yet (will do in %d secs).", rc->chart?rc->chart:"NOCHART", rc->name, (int) (rc->next_update - now)); - return 0; - } - - if(unlikely(!rc->update_every)) { - debug(D_HEALTH, "Health not running alarm '%s.%s'. It does not have an update frequency", rc->chart?rc->chart:"NOCHART", rc->name); - return 0; - } - - if(unlikely(rrdset_flag_check(rc->rrdset, RRDSET_FLAG_OBSOLETE))) { - debug(D_HEALTH, "Health not running alarm '%s.%s'. The chart has been marked as obsolete", rc->chart?rc->chart:"NOCHART", rc->name); - return 0; - } - - if(unlikely(!rrdset_flag_check(rc->rrdset, RRDSET_FLAG_ENABLED))) { - debug(D_HEALTH, "Health not running alarm '%s.%s'. The chart is not enabled", rc->chart?rc->chart:"NOCHART", rc->name); - return 0; - } - - if(unlikely(!rc->rrdset->last_collected_time.tv_sec || rc->rrdset->counter_done < 2)) { - debug(D_HEALTH, "Health not running alarm '%s.%s'. Chart is not fully collected yet.", rc->chart?rc->chart:"NOCHART", rc->name); - return 0; - } - - int update_every = rc->rrdset->update_every; - time_t first = rrdset_first_entry_t(rc->rrdset); - time_t last = rrdset_last_entry_t(rc->rrdset); - - if(unlikely(now + update_every < first /* || now - update_every > last */)) { - debug(D_HEALTH - , "Health not examining alarm '%s.%s' yet (wanted time is out of bounds - we need %lu but got %lu - %lu)." - , rc->chart ? rc->chart : "NOCHART", rc->name, (unsigned long) now, (unsigned long) first - , (unsigned long) last); - return 0; - } - - if(RRDCALC_HAS_DB_LOOKUP(rc)) { - time_t needed = now + rc->before + rc->after; - - if(needed + update_every < first || needed - update_every > last) { - debug(D_HEALTH - , "Health not examining alarm '%s.%s' yet (not enough data yet - we need %lu but got %lu - %lu)." - , rc->chart ? rc->chart : "NOCHART", rc->name, (unsigned long) needed, (unsigned long) first - , (unsigned long) last); - return 0; - } - } - - return 1; -} - -static inline int check_if_resumed_from_suspention(void) { - static usec_t last_realtime = 0, last_monotonic = 0; - usec_t realtime = now_realtime_usec(), monotonic = now_monotonic_usec(); - int ret = 0; - - // detect if monotonic and realtime have twice the difference - // in which case we assume the system was just waken from hibernation - - if(last_realtime && last_monotonic && realtime - last_realtime > 2 * (monotonic - last_monotonic)) - ret = 1; - - last_realtime = realtime; - last_monotonic = monotonic; - - return ret; -} - -static void health_main_cleanup(void *ptr) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - - info("cleaning up..."); - - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; -} - -void *health_main(void *ptr) { - netdata_thread_cleanup_push(health_main_cleanup, ptr); - - int min_run_every = (int)config_get_number(CONFIG_SECTION_HEALTH, "run at least every seconds", 10); - if(min_run_every < 1) min_run_every = 1; - - time_t now = now_realtime_sec(); - time_t hibernation_delay = config_get_number(CONFIG_SECTION_HEALTH, "postpone alarms during hibernation for seconds", 60); - - unsigned int loop = 0; - while(!netdata_exit) { - loop++; - debug(D_HEALTH, "Health monitoring iteration no %u started", loop); - - int runnable = 0, apply_hibernation_delay = 0; - time_t next_run = now + min_run_every; - RRDCALC *rc; - - if(unlikely(check_if_resumed_from_suspention())) { - apply_hibernation_delay = 1; - - info("Postponing alarm checks for %ld seconds, because it seems that the system was just resumed from suspension." - , hibernation_delay - ); - } - - rrd_rdlock(); - - RRDHOST *host; - rrdhost_foreach_read(host) { - if(unlikely(!host->health_enabled)) - continue; - - if(unlikely(apply_hibernation_delay)) { - - info("Postponing health checks for %ld seconds, on host '%s'." - , hibernation_delay - , host->hostname - ); - - host->health_delay_up_to = now + hibernation_delay; - } - - if(unlikely(host->health_delay_up_to)) { - if(unlikely(now < host->health_delay_up_to)) - continue; - - info("Resuming health checks on host '%s'.", host->hostname); - host->health_delay_up_to = 0; - } - - rrdhost_rdlock(host); - - // the first loop is to lookup values from the db - for(rc = host->alarms; rc; rc = rc->next) { - if(unlikely(!rrdcalc_isrunnable(rc, now, &next_run))) { - if(unlikely(rc->rrdcalc_flags & RRDCALC_FLAG_RUNNABLE)) - rc->rrdcalc_flags &= ~RRDCALC_FLAG_RUNNABLE; - continue; - } - - runnable++; - rc->old_value = rc->value; - rc->rrdcalc_flags |= RRDCALC_FLAG_RUNNABLE; - - // ------------------------------------------------------------ - // if there is database lookup, do it - - if(unlikely(RRDCALC_HAS_DB_LOOKUP(rc))) { - /* time_t old_db_timestamp = rc->db_before; */ - int value_is_null = 0; - - int ret = rrdset2value_api_v1(rc->rrdset - , NULL - , &rc->value - , rc->dimensions - , 1 - , rc->after - , rc->before - , rc->group - , 0 - , rc->options - , &rc->db_after - , &rc->db_before - , &value_is_null - ); - - if(unlikely(ret != 200)) { - // database lookup failed - rc->value = NAN; - rc->rrdcalc_flags |= RRDCALC_FLAG_DB_ERROR; - - debug(D_HEALTH - , "Health on host '%s', alarm '%s.%s': database lookup returned error %d" - , host->hostname - , rc->chart ? rc->chart : "NOCHART" - , rc->name - , ret - ); - } - else - rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_ERROR; - - /* - RRDCALC_FLAG_DB_STALE not currently used - if (unlikely(old_db_timestamp == rc->db_before)) { - // database is stale - - debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': database is stale", host->hostname, rc->chart?rc->chart:"NOCHART", rc->name); - - if (unlikely(!(rc->rrdcalc_flags & RRDCALC_FLAG_DB_STALE))) { - rc->rrdcalc_flags |= RRDCALC_FLAG_DB_STALE; - error("Health on host '%s', alarm '%s.%s': database is stale", host->hostname, rc->chart?rc->chart:"NOCHART", rc->name); - } - } - else if (unlikely(rc->rrdcalc_flags & RRDCALC_FLAG_DB_STALE)) - rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_STALE; - */ - - if(unlikely(value_is_null)) { - // collected value is null - rc->value = NAN; - rc->rrdcalc_flags |= RRDCALC_FLAG_DB_NAN; - - debug(D_HEALTH - , "Health on host '%s', alarm '%s.%s': database lookup returned empty value (possibly value is not collected yet)" - , host->hostname - , rc->chart ? rc->chart : "NOCHART" - , rc->name - ); - } - else - rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_NAN; - - debug(D_HEALTH - , "Health on host '%s', alarm '%s.%s': database lookup gave value " CALCULATED_NUMBER_FORMAT - , host->hostname - , rc->chart ? rc->chart : "NOCHART" - , rc->name - , rc->value - ); - } - - // ------------------------------------------------------------ - // if there is calculation expression, run it - - if(unlikely(rc->calculation)) { - if(unlikely(!expression_evaluate(rc->calculation))) { - // calculation failed - rc->value = NAN; - rc->rrdcalc_flags |= RRDCALC_FLAG_CALC_ERROR; - - debug(D_HEALTH - , "Health on host '%s', alarm '%s.%s': expression '%s' failed: %s" - , host->hostname - , rc->chart ? rc->chart : "NOCHART" - , rc->name - , rc->calculation->parsed_as - , buffer_tostring(rc->calculation->error_msg) - ); - } - else { - rc->rrdcalc_flags &= ~RRDCALC_FLAG_CALC_ERROR; - - debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': expression '%s' gave value " CALCULATED_NUMBER_FORMAT ": %s (source: %s)" - , host->hostname - , rc->chart ? rc->chart : "NOCHART" - , rc->name - , rc->calculation->parsed_as - , rc->calculation->result - , buffer_tostring(rc->calculation->error_msg) - , rc->source - ); - - rc->value = rc->calculation->result; - } - } - } - rrdhost_unlock(host); - - if(unlikely(runnable && !netdata_exit)) { - rrdhost_rdlock(host); - - for(rc = host->alarms; rc; rc = rc->next) { - if(unlikely(!(rc->rrdcalc_flags & RRDCALC_FLAG_RUNNABLE))) - continue; - - RRDCALC_STATUS warning_status = RRDCALC_STATUS_UNDEFINED; - RRDCALC_STATUS critical_status = RRDCALC_STATUS_UNDEFINED; - - // -------------------------------------------------------- - // check the warning expression - - if(likely(rc->warning)) { - if(unlikely(!expression_evaluate(rc->warning))) { - // calculation failed - rc->rrdcalc_flags |= RRDCALC_FLAG_WARN_ERROR; - - debug(D_HEALTH - , "Health on host '%s', alarm '%s.%s': warning expression failed with error: %s" - , host->hostname - , rc->chart ? rc->chart : "NOCHART" - , rc->name - , buffer_tostring(rc->warning->error_msg) - ); - } - else { - rc->rrdcalc_flags &= ~RRDCALC_FLAG_WARN_ERROR; - debug(D_HEALTH - , "Health on host '%s', alarm '%s.%s': warning expression gave value " CALCULATED_NUMBER_FORMAT ": %s (source: %s)" - , host->hostname - , rc->chart ? rc->chart : "NOCHART" - , rc->name - , rc->warning->result - , buffer_tostring(rc->warning->error_msg) - , rc->source - ); - warning_status = rrdcalc_value2status(rc->warning->result); - } - } - - // -------------------------------------------------------- - // check the critical expression - - if(likely(rc->critical)) { - if(unlikely(!expression_evaluate(rc->critical))) { - // calculation failed - rc->rrdcalc_flags |= RRDCALC_FLAG_CRIT_ERROR; - - debug(D_HEALTH - , "Health on host '%s', alarm '%s.%s': critical expression failed with error: %s" - , host->hostname - , rc->chart ? rc->chart : "NOCHART" - , rc->name - , buffer_tostring(rc->critical->error_msg) - ); - } - else { - rc->rrdcalc_flags &= ~RRDCALC_FLAG_CRIT_ERROR; - debug(D_HEALTH - , "Health on host '%s', alarm '%s.%s': critical expression gave value " CALCULATED_NUMBER_FORMAT ": %s (source: %s)" - , host->hostname - , rc->chart ? rc->chart : "NOCHART" - , rc->name - , rc->critical->result - , buffer_tostring(rc->critical->error_msg) - , rc->source - ); - critical_status = rrdcalc_value2status(rc->critical->result); - } - } - - // -------------------------------------------------------- - // decide the final alarm status - - RRDCALC_STATUS status = RRDCALC_STATUS_UNDEFINED; - - switch(warning_status) { - case RRDCALC_STATUS_CLEAR: - status = RRDCALC_STATUS_CLEAR; - break; - - case RRDCALC_STATUS_RAISED: - status = RRDCALC_STATUS_WARNING; - break; - - default: - break; - } - - switch(critical_status) { - case RRDCALC_STATUS_CLEAR: - if(status == RRDCALC_STATUS_UNDEFINED) - status = RRDCALC_STATUS_CLEAR; - break; - - case RRDCALC_STATUS_RAISED: - status = RRDCALC_STATUS_CRITICAL; - break; - - default: - break; - } - - // -------------------------------------------------------- - // check if the new status and the old differ - - if(status != rc->status) { - int delay = 0; - - // apply trigger hysteresis - - if(now > rc->delay_up_to_timestamp) { - rc->delay_up_current = rc->delay_up_duration; - rc->delay_down_current = rc->delay_down_duration; - rc->delay_last = 0; - rc->delay_up_to_timestamp = 0; - } - else { - rc->delay_up_current = (int) (rc->delay_up_current * rc->delay_multiplier); - if(rc->delay_up_current > rc->delay_max_duration) - rc->delay_up_current = rc->delay_max_duration; - - rc->delay_down_current = (int) (rc->delay_down_current * rc->delay_multiplier); - if(rc->delay_down_current > rc->delay_max_duration) - rc->delay_down_current = rc->delay_max_duration; - } - - if(status > rc->status) - delay = rc->delay_up_current; - else - delay = rc->delay_down_current; - - // COMMENTED: because we do need to send raising alarms - // if(now + delay < rc->delay_up_to_timestamp) - // delay = (int)(rc->delay_up_to_timestamp - now); - - rc->delay_last = delay; - rc->delay_up_to_timestamp = now + delay; - - // add the alarm into the log - - health_alarm_log( - host - , rc->id - , rc->next_event_id++ - , now - , rc->name - , rc->rrdset->id - , rc->rrdset->family - , rc->exec - , rc->recipient - , now - rc->last_status_change - , rc->old_value - , rc->value - , rc->status - , status - , rc->source - , rc->units - , rc->info - , rc->delay_last - , (rc->options & RRDCALC_FLAG_NO_CLEAR_NOTIFICATION) ? HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION : 0 - ); - - rc->last_status_change = now; - rc->status = status; - } - - rc->last_updated = now; - rc->next_update = now + rc->update_every; - - if(next_run > rc->next_update) - next_run = rc->next_update; - } - - rrdhost_unlock(host); - } - - if(unlikely(netdata_exit)) - break; - - // execute notifications - // and cleanup - health_alarm_log_process(host); - - if(unlikely(netdata_exit)) - break; - - } /* rrdhost_foreach */ - - rrd_unlock(); - - if(unlikely(netdata_exit)) - break; - - now = now_realtime_sec(); - if(now < next_run) { - debug(D_HEALTH, "Health monitoring iteration no %u done. Next iteration in %d secs", loop, (int) (next_run - now)); - sleep_usec(USEC_PER_SEC * (usec_t) (next_run - now)); - now = now_realtime_sec(); - } - else - debug(D_HEALTH, "Health monitoring iteration no %u done. Next iteration now", loop); - - } // forever - - netdata_thread_cleanup_pop(1); - return NULL; -} diff --git a/src/health.h b/src/health.h deleted file mode 100644 index f41571803..000000000 --- a/src/health.h +++ /dev/null @@ -1,438 +0,0 @@ -#ifndef NETDATA_HEALTH_H -#define NETDATA_HEALTH_H - -extern int default_health_enabled; - -extern int rrdvar_compare(void *a, void *b); - -typedef enum rrdvar_type { - RRDVAR_TYPE_CALCULATED = 1, - RRDVAR_TYPE_TIME_T = 2, - RRDVAR_TYPE_COLLECTED = 3, - RRDVAR_TYPE_TOTAL = 4, - RRDVAR_TYPE_INT = 5, - RRDVAR_TYPE_CALCULATED_ALLOCATED = 6 // a custom variable, allocated on purpose (ie. not inherited from charts) - // used only for custom host global variables -} RRDVAR_TYPE; - -// the variables as stored in the variables indexes -// there are 3 indexes: -// 1. at each chart (RRDSET.rrdvar_root_index) -// 2. at each context (RRDFAMILY.rrdvar_root_index) -// 3. at each host (RRDHOST.rrdvar_root_index) -typedef struct rrdvar { - avl avl; - - char *name; - uint32_t hash; - - RRDVAR_TYPE type; - void *value; - - time_t last_updated; -} RRDVAR; - -// variables linked to charts -// We link variables to point to the values that are already -// calculated / processed by the normal data collection process -// This means, there will be no speed penalty for using -// these variables - -typedef enum rrdvar_options { - RRDVAR_OPTION_DEFAULT = (0 << 0), - RRDVAR_OPTION_ALLOCATED = (1 << 0) // the value ptr is allocated (not a reference) - // future use -} RRDVAR_OPTIONS; - -typedef struct rrdsetvar { - char *variable; // variable name - uint32_t hash; // variable name hash - - char *key_fullid; // chart type.chart id.variable - char *key_fullname; // chart type.chart name.variable - - RRDVAR_TYPE type; - void *value; - - RRDVAR_OPTIONS options; - - RRDVAR *var_local; - RRDVAR *var_family; - RRDVAR *var_host; - RRDVAR *var_family_name; - RRDVAR *var_host_name; - - struct rrdset *rrdset; - - struct rrdsetvar *next; -} RRDSETVAR; - - -// variables linked to individual dimensions -// We link variables to point the values that are already -// calculated / processed by the normal data collection process -// This means, there will be no speed penalty for using -// these variables -typedef struct rrddimvar { - char *prefix; - char *suffix; - - char *key_id; // dimension id - char *key_name; // dimension name - char *key_contextid; // context + dimension id - char *key_contextname; // context + dimension name - char *key_fullidid; // chart type.chart id + dimension id - char *key_fullidname; // chart type.chart id + dimension name - char *key_fullnameid; // chart type.chart name + dimension id - char *key_fullnamename; // chart type.chart name + dimension name - - RRDVAR_TYPE type; - void *value; - - RRDVAR_OPTIONS options; - - RRDVAR *var_local_id; - RRDVAR *var_local_name; - - RRDVAR *var_family_id; - RRDVAR *var_family_name; - RRDVAR *var_family_contextid; - RRDVAR *var_family_contextname; - - RRDVAR *var_host_chartidid; - RRDVAR *var_host_chartidname; - RRDVAR *var_host_chartnameid; - RRDVAR *var_host_chartnamename; - - struct rrddim *rrddim; - - struct rrddimvar *next; -} RRDDIMVAR; - -// calculated variables (defined in health configuration) -// These aggregate time-series data at fixed intervals -// (defined in their update_every member below) -// They increase the overhead of netdata. -// -// These calculations are allocated and linked (->next) -// under RRDHOST. -// Then are also linked to RRDSET (of course only when the -// chart is found, via ->rrdset_next and ->rrdset_prev). -// This double-linked list is maintained sorted at all times -// having as RRDSET.calculations the RRDCALC to be processed -// next. - -#define RRDCALC_FLAG_DB_ERROR 0x00000001 -#define RRDCALC_FLAG_DB_NAN 0x00000002 -/* #define RRDCALC_FLAG_DB_STALE 0x00000004 */ -#define RRDCALC_FLAG_CALC_ERROR 0x00000008 -#define RRDCALC_FLAG_WARN_ERROR 0x00000010 -#define RRDCALC_FLAG_CRIT_ERROR 0x00000020 -#define RRDCALC_FLAG_RUNNABLE 0x00000040 -#define RRDCALC_FLAG_NO_CLEAR_NOTIFICATION 0x80000000 - -typedef struct rrdcalc { - uint32_t id; // the unique id of this alarm - uint32_t next_event_id; // the next event id that will be used for this alarm - - char *name; // the name of this alarm - uint32_t hash; - - char *exec; // the command to execute when this alarm switches state - char *recipient; // the recipient of the alarm (the first parameter to exec) - - char *chart; // the chart id this should be linked to - uint32_t hash_chart; - - char *source; // the source of this alarm - char *units; // the units of the alarm - char *info; // a short description of the alarm - - int update_every; // update frequency for the alarm - - // the red and green threshold of this alarm (to be set to the chart) - calculated_number green; - calculated_number red; - - // ------------------------------------------------------------------------ - // database lookup settings - - char *dimensions; // the chart dimensions - int group; // grouping method: average, max, etc. - int before; // ending point in time-series - int after; // starting point in time-series - uint32_t options; // calculation options - - // ------------------------------------------------------------------------ - // expressions related to the alarm - - EVAL_EXPRESSION *calculation; // expression to calculate the value of the alarm - EVAL_EXPRESSION *warning; // expression to check the warning condition - EVAL_EXPRESSION *critical; // expression to check the critical condition - - // ------------------------------------------------------------------------ - // notification delay settings - - int delay_up_duration; // duration to delay notifications when alarm raises - int delay_down_duration; // duration to delay notifications when alarm lowers - int delay_max_duration; // the absolute max delay to apply to this alarm - float delay_multiplier; // multiplier for all delays when alarms switch status - // while now < delay_up_to - - // ------------------------------------------------------------------------ - // runtime information - - RRDCALC_STATUS status; // the current status of the alarm - - calculated_number value; // the current value of the alarm - calculated_number old_value; // the previous value of the alarm - - uint32_t rrdcalc_flags; // check RRDCALC_FLAG_* - - time_t last_updated; // the last update timestamp of the alarm - time_t next_update; // the next update timestamp of the alarm - time_t last_status_change; // the timestamp of the last time this alarm changed status - - time_t db_after; // the first timestamp evaluated by the db lookup - time_t db_before; // the last timestamp evaluated by the db lookup - - time_t delay_up_to_timestamp; // the timestamp up to which we should delay notifications - int delay_up_current; // the current up notification delay duration - int delay_down_current; // the current down notification delay duration - int delay_last; // the last delay we used - - // ------------------------------------------------------------------------ - // variables this alarm exposes to the rest of the alarms - - RRDVAR *local; - RRDVAR *family; - RRDVAR *hostid; - RRDVAR *hostname; - - // ------------------------------------------------------------------------ - // the chart this alarm it is linked to - - struct rrdset *rrdset; - - // linking of this alarm on its chart - struct rrdcalc *rrdset_next; - struct rrdcalc *rrdset_prev; - - struct rrdcalc *next; -} RRDCALC; - -#define RRDCALC_HAS_DB_LOOKUP(rc) ((rc)->after) - -// RRDCALCTEMPLATE -// these are to be applied to charts found dynamically -// based on their context. -typedef struct rrdcalctemplate { - char *name; - uint32_t hash_name; - - char *exec; - char *recipient; - - char *context; - uint32_t hash_context; - - char *family_match; - SIMPLE_PATTERN *family_pattern; - - char *source; // the source of this alarm - char *units; // the units of the alarm - char *info; // a short description of the alarm - - int update_every; // update frequency for the alarm - - // the red and green threshold of this alarm (to be set to the chart) - calculated_number green; - calculated_number red; - - // ------------------------------------------------------------------------ - // database lookup settings - - char *dimensions; // the chart dimensions - int group; // grouping method: average, max, etc. - int before; // ending point in time-series - int after; // starting point in time-series - uint32_t options; // calculation options - - // ------------------------------------------------------------------------ - // notification delay settings - - int delay_up_duration; // duration to delay notifications when alarm raises - int delay_down_duration; // duration to delay notifications when alarm lowers - int delay_max_duration; // the absolute max delay to apply to this alarm - float delay_multiplier; // multiplier for all delays when alarms switch status - - // ------------------------------------------------------------------------ - // expressions related to the alarm - - EVAL_EXPRESSION *calculation; - EVAL_EXPRESSION *warning; - EVAL_EXPRESSION *critical; - - struct rrdcalctemplate *next; -} RRDCALCTEMPLATE; - -#define RRDCALCTEMPLATE_HAS_CALCULATION(rt) ((rt)->after) - -#define HEALTH_ENTRY_FLAG_PROCESSED 0x00000001 -#define HEALTH_ENTRY_FLAG_UPDATED 0x00000002 -#define HEALTH_ENTRY_FLAG_EXEC_RUN 0x00000004 -#define HEALTH_ENTRY_FLAG_EXEC_FAILED 0x00000008 -#define HEALTH_ENTRY_FLAG_SAVED 0x10000000 -#define HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION 0x80000000 - -typedef struct alarm_entry { - uint32_t unique_id; - uint32_t alarm_id; - uint32_t alarm_event_id; - - time_t when; - time_t duration; - time_t non_clear_duration; - - char *name; - uint32_t hash_name; - - char *chart; - uint32_t hash_chart; - - char *family; - - char *exec; - char *recipient; - time_t exec_run_timestamp; - int exec_code; - - char *source; - char *units; - char *info; - - calculated_number old_value; - calculated_number new_value; - - char *old_value_string; - char *new_value_string; - - RRDCALC_STATUS old_status; - RRDCALC_STATUS new_status; - - uint32_t flags; - - int delay; - time_t delay_up_to_timestamp; - - uint32_t updated_by_id; - uint32_t updates_id; - - struct alarm_entry *next; -} ALARM_ENTRY; - -typedef struct alarm_log { - uint32_t next_log_id; - uint32_t next_alarm_id; - unsigned int count; - unsigned int max; - ALARM_ENTRY *alarms; - netdata_rwlock_t alarm_log_rwlock; -} ALARM_LOG; - -#include "rrd.h" - -extern void rrdsetvar_rename_all(RRDSET *st); -extern RRDSETVAR *rrdsetvar_create(RRDSET *st, const char *variable, RRDVAR_TYPE type, void *value, RRDVAR_OPTIONS options); -extern void rrdsetvar_free(RRDSETVAR *rs); - -extern void rrddimvar_rename_all(RRDDIM *rd); -extern RRDDIMVAR *rrddimvar_create(RRDDIM *rd, RRDVAR_TYPE type, const char *prefix, const char *suffix, void *value, RRDVAR_OPTIONS options); -extern void rrddimvar_free(RRDDIMVAR *rs); - -extern void rrdsetcalc_link_matching(RRDSET *st); -extern void rrdsetcalc_unlink(RRDCALC *rc); -extern void rrdcalctemplate_link_matching(RRDSET *st); -extern RRDCALC *rrdcalc_find(RRDSET *st, const char *name); - -extern void health_init(void); -extern void *health_main(void *ptr); - -extern void health_reload(void); - -extern int health_variable_lookup(const char *variable, uint32_t hash, RRDCALC *rc, calculated_number *result); -extern void health_alarms2json(RRDHOST *host, BUFFER *wb, int all); -extern void health_alarm_log2json(RRDHOST *host, BUFFER *wb, uint32_t after); - -void health_api_v1_chart_variables2json(RRDSET *st, BUFFER *buf); - -extern RRDVAR *rrdvar_custom_host_variable_create(RRDHOST *host, const char *name); -extern void rrdvar_custom_host_variable_set(RRDHOST *host, RRDVAR *rv, calculated_number value); - -extern RRDSETVAR *rrdsetvar_custom_chart_variable_create(RRDSET *st, const char *name); -extern void rrdsetvar_custom_chart_variable_set(RRDSETVAR *rv, calculated_number value); - -extern void rrdvar_free_remaining_variables(RRDHOST *host, avl_tree_lock *tree_lock); - -extern const char *rrdcalc_status2string(RRDCALC_STATUS status); - - -extern int health_alarm_log_open(RRDHOST *host); -extern void health_alarm_log_close(RRDHOST *host); -extern void health_log_rotate(RRDHOST *host); -extern void health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae); -extern ssize_t health_alarm_log_read(RRDHOST *host, FILE *fp, const char *filename); -extern void health_alarm_log_load(RRDHOST *host); -extern void health_alarm_log( - RRDHOST *host, - uint32_t alarm_id, - uint32_t alarm_event_id, - time_t when, - const char *name, - const char *chart, - const char *family, - const char *exec, - const char *recipient, - time_t duration, - calculated_number old_value, - calculated_number new_value, - RRDCALC_STATUS old_status, - RRDCALC_STATUS new_status, - const char *source, - const char *units, - const char *info, - int delay, - uint32_t flags -); - -extern void health_readdir(RRDHOST *host, const char *path); -extern char *health_config_dir(void); -extern void health_reload_host(RRDHOST *host); -extern void health_alarm_log_free(RRDHOST *host); - -extern void rrdcalc_free(RRDCALC *rc); -extern void rrdcalc_unlink_and_free(RRDHOST *host, RRDCALC *rc); - -extern void rrdcalctemplate_free(RRDCALCTEMPLATE *rt); -extern void rrdcalctemplate_unlink_and_free(RRDHOST *host, RRDCALCTEMPLATE *rt); - -extern int rrdvar_callback_for_all_host_variables(RRDHOST *host, int (*callback)(void *rrdvar, void *data), void *data); - -#ifdef NETDATA_HEALTH_INTERNALS -#define RRDVAR_MAX_LENGTH 1024 - -extern int rrdcalc_exists(RRDHOST *host, const char *chart, const char *name, uint32_t hash_chart, uint32_t hash_name); -extern uint32_t rrdcalc_get_unique_id(RRDHOST *host, const char *chart, const char *name, uint32_t *next_event_id); -extern int rrdvar_fix_name(char *variable); - -extern RRDCALC *rrdcalc_create(RRDHOST *host, RRDCALCTEMPLATE *rt, const char *chart); -extern void rrdcalc_create_part2(RRDHOST *host, RRDCALC *rc); - -extern RRDVAR *rrdvar_create_and_index(const char *scope, avl_tree_lock *tree, const char *name, RRDVAR_TYPE type, void *value); -extern void rrdvar_free(RRDHOST *host, avl_tree_lock *tree, RRDVAR *rv); - -extern void health_alarm_log_free_one_nochecks_nounlink(ALARM_ENTRY *ae); - -#endif // NETDATA_HEALTH_INTERNALS - -#endif //NETDATA_HEALTH_H diff --git a/src/health_config.c b/src/health_config.c deleted file mode 100644 index a25ee7227..000000000 --- a/src/health_config.c +++ /dev/null @@ -1,900 +0,0 @@ -#define NETDATA_HEALTH_INTERNALS -#include "common.h" - -#define HEALTH_CONF_MAX_LINE 4096 - -#define HEALTH_ALARM_KEY "alarm" -#define HEALTH_TEMPLATE_KEY "template" -#define HEALTH_ON_KEY "on" -#define HEALTH_HOST_KEY "hosts" -#define HEALTH_OS_KEY "os" -#define HEALTH_FAMILIES_KEY "families" -#define HEALTH_LOOKUP_KEY "lookup" -#define HEALTH_CALC_KEY "calc" -#define HEALTH_EVERY_KEY "every" -#define HEALTH_GREEN_KEY "green" -#define HEALTH_RED_KEY "red" -#define HEALTH_WARN_KEY "warn" -#define HEALTH_CRIT_KEY "crit" -#define HEALTH_EXEC_KEY "exec" -#define HEALTH_RECIPIENT_KEY "to" -#define HEALTH_UNITS_KEY "units" -#define HEALTH_INFO_KEY "info" -#define HEALTH_DELAY_KEY "delay" -#define HEALTH_OPTIONS_KEY "options" - -static inline int rrdcalc_add_alarm_from_config(RRDHOST *host, RRDCALC *rc) { - if(!rc->chart) { - error("Health configuration for alarm '%s' does not have a chart", rc->name); - return 0; - } - - if(!rc->update_every) { - error("Health configuration for alarm '%s.%s' has no frequency (parameter 'every'). Ignoring it.", rc->chart?rc->chart:"NOCHART", rc->name); - return 0; - } - - if(!RRDCALC_HAS_DB_LOOKUP(rc) && !rc->calculation && !rc->warning && !rc->critical) { - error("Health configuration for alarm '%s.%s' is useless (no db lookup, no calculation, no warning and no critical expressions)", rc->chart?rc->chart:"NOCHART", rc->name); - return 0; - } - - if (rrdcalc_exists(host, rc->chart, rc->name, rc->hash_chart, rc->hash)) - return 0; - - rc->id = rrdcalc_get_unique_id(host, rc->chart, rc->name, &rc->next_event_id); - - debug(D_HEALTH, "Health configuration adding alarm '%s.%s' (%u): exec '%s', recipient '%s', green " CALCULATED_NUMBER_FORMAT_AUTO ", red " CALCULATED_NUMBER_FORMAT_AUTO ", lookup: group %d, after %d, before %d, options %u, dimensions '%s', update every %d, calculation '%s', warning '%s', critical '%s', source '%s', delay up %d, delay down %d, delay max %d, delay_multiplier %f", - rc->chart?rc->chart:"NOCHART", - rc->name, - rc->id, - (rc->exec)?rc->exec:"DEFAULT", - (rc->recipient)?rc->recipient:"DEFAULT", - rc->green, - rc->red, - rc->group, - rc->after, - rc->before, - rc->options, - (rc->dimensions)?rc->dimensions:"NONE", - rc->update_every, - (rc->calculation)?rc->calculation->parsed_as:"NONE", - (rc->warning)?rc->warning->parsed_as:"NONE", - (rc->critical)?rc->critical->parsed_as:"NONE", - rc->source, - rc->delay_up_duration, - rc->delay_down_duration, - rc->delay_max_duration, - rc->delay_multiplier - ); - - rrdcalc_create_part2(host, rc); - return 1; -} - -static inline int rrdcalctemplate_add_template_from_config(RRDHOST *host, RRDCALCTEMPLATE *rt) { - if(unlikely(!rt->context)) { - error("Health configuration for template '%s' does not have a context", rt->name); - return 0; - } - - if(unlikely(!rt->update_every)) { - error("Health configuration for template '%s' has no frequency (parameter 'every'). Ignoring it.", rt->name); - return 0; - } - - if(unlikely(!RRDCALCTEMPLATE_HAS_CALCULATION(rt) && !rt->warning && !rt->critical)) { - error("Health configuration for template '%s' is useless (no calculation, no warning and no critical evaluation)", rt->name); - return 0; - } - - RRDCALCTEMPLATE *t, *last = NULL; - for (t = host->templates; t ; last = t, t = t->next) { - if(unlikely(t->hash_name == rt->hash_name - && !strcmp(t->name, rt->name) - && !strcmp(t->family_match?t->family_match:"*", rt->family_match?rt->family_match:"*") - )) { - error("Health configuration template '%s' already exists for host '%s'.", rt->name, host->hostname); - return 0; - } - } - - debug(D_HEALTH, "Health configuration adding template '%s': context '%s', exec '%s', recipient '%s', green " CALCULATED_NUMBER_FORMAT_AUTO ", red " CALCULATED_NUMBER_FORMAT_AUTO ", lookup: group %d, after %d, before %d, options %u, dimensions '%s', update every %d, calculation '%s', warning '%s', critical '%s', source '%s', delay up %d, delay down %d, delay max %d, delay_multiplier %f", - rt->name, - (rt->context)?rt->context:"NONE", - (rt->exec)?rt->exec:"DEFAULT", - (rt->recipient)?rt->recipient:"DEFAULT", - rt->green, - rt->red, - rt->group, - rt->after, - rt->before, - rt->options, - (rt->dimensions)?rt->dimensions:"NONE", - rt->update_every, - (rt->calculation)?rt->calculation->parsed_as:"NONE", - (rt->warning)?rt->warning->parsed_as:"NONE", - (rt->critical)?rt->critical->parsed_as:"NONE", - rt->source, - rt->delay_up_duration, - rt->delay_down_duration, - rt->delay_max_duration, - rt->delay_multiplier - ); - - if(likely(last)) { - last->next = rt; - } - else { - rt->next = host->templates; - host->templates = rt; - } - - return 1; -} - -static inline int health_parse_duration(char *string, int *result) { - // make sure it is a number - if(!*string || !(isdigit(*string) || *string == '+' || *string == '-')) { - *result = 0; - return 0; - } - - char *e = NULL; - calculated_number n = str2ld(string, &e); - if(e && *e) { - switch (*e) { - case 'Y': - *result = (int) (n * 86400 * 365); - break; - case 'M': - *result = (int) (n * 86400 * 30); - break; - case 'w': - *result = (int) (n * 86400 * 7); - break; - case 'd': - *result = (int) (n * 86400); - break; - case 'h': - *result = (int) (n * 3600); - break; - case 'm': - *result = (int) (n * 60); - break; - - default: - case 's': - *result = (int) (n); - break; - } - } - else - *result = (int)(n); - - return 1; -} - -static inline int health_parse_delay( - size_t line, const char *path, const char *file, char *string, - int *delay_up_duration, - int *delay_down_duration, - int *delay_max_duration, - float *delay_multiplier) { - - char given_up = 0; - char given_down = 0; - char given_max = 0; - char given_multiplier = 0; - - char *s = string; - while(*s) { - char *key = s; - - while(*s && !isspace(*s)) s++; - while(*s && isspace(*s)) *s++ = '\0'; - - if(!*key) break; - - char *value = s; - while(*s && !isspace(*s)) s++; - while(*s && isspace(*s)) *s++ = '\0'; - - if(!strcasecmp(key, "up")) { - if (!health_parse_duration(value, delay_up_duration)) { - error("Health configuration at line %zu of file '%s/%s': invalid value '%s' for '%s' keyword", - line, path, file, value, key); - } - else given_up = 1; - } - else if(!strcasecmp(key, "down")) { - if (!health_parse_duration(value, delay_down_duration)) { - error("Health configuration at line %zu of file '%s/%s': invalid value '%s' for '%s' keyword", - line, path, file, value, key); - } - else given_down = 1; - } - else if(!strcasecmp(key, "multiplier")) { - *delay_multiplier = strtof(value, NULL); - if(isnan(*delay_multiplier) || isinf(*delay_multiplier) || islessequal(*delay_multiplier, 0)) { - error("Health configuration at line %zu of file '%s/%s': invalid value '%s' for '%s' keyword", - line, path, file, value, key); - } - else given_multiplier = 1; - } - else if(!strcasecmp(key, "max")) { - if (!health_parse_duration(value, delay_max_duration)) { - error("Health configuration at line %zu of file '%s/%s': invalid value '%s' for '%s' keyword", - line, path, file, value, key); - } - else given_max = 1; - } - else { - error("Health configuration at line %zu of file '%s/%s': unknown keyword '%s'", - line, path, file, key); - } - } - - if(!given_up) - *delay_up_duration = 0; - - if(!given_down) - *delay_down_duration = 0; - - if(!given_multiplier) - *delay_multiplier = 1.0; - - if(!given_max) { - if((*delay_max_duration) < (*delay_up_duration) * (*delay_multiplier)) - *delay_max_duration = (int)((*delay_up_duration) * (*delay_multiplier)); - - if((*delay_max_duration) < (*delay_down_duration) * (*delay_multiplier)) - *delay_max_duration = (int)((*delay_down_duration) * (*delay_multiplier)); - } - - return 1; -} - -static inline uint32_t health_parse_options(const char *s) { - uint32_t options = 0; - char buf[100+1] = ""; - - while(*s) { - buf[0] = '\0'; - - // skip spaces - while(*s && isspace(*s)) - s++; - - // find the next space - size_t count = 0; - while(*s && count < 100 && !isspace(*s)) - buf[count++] = *s++; - - if(buf[0]) { - buf[count] = '\0'; - - if(!strcasecmp(buf, "no-clear-notification") || !strcasecmp(buf, "no-clear")) - options |= RRDCALC_FLAG_NO_CLEAR_NOTIFICATION; - else - error("Ignoring unknown alarm option '%s'", buf); - } - } - - return options; -} - -static inline int health_parse_db_lookup( - size_t line, const char *path, const char *file, char *string, - int *group_method, int *after, int *before, int *every, - uint32_t *options, char **dimensions -) { - debug(D_HEALTH, "Health configuration parsing database lookup %zu@%s/%s: %s", line, path, file, string); - - if(*dimensions) freez(*dimensions); - *dimensions = NULL; - *after = 0; - *before = 0; - *every = 0; - *options = 0; - - char *s = string, *key; - - // first is the group method - key = s; - while(*s && !isspace(*s)) s++; - while(*s && isspace(*s)) *s++ = '\0'; - if(!*s) { - error("Health configuration invalid chart calculation at line %zu of file '%s/%s': expected group method followed by the 'after' time, but got '%s'", - line, path, file, key); - return 0; - } - - if((*group_method = web_client_api_request_v1_data_group(key, -1)) == -1) { - error("Health configuration at line %zu of file '%s/%s': invalid group method '%s'", - line, path, file, key); - return 0; - } - - // then is the 'after' time - key = s; - while(*s && !isspace(*s)) s++; - while(*s && isspace(*s)) *s++ = '\0'; - - if(!health_parse_duration(key, after)) { - error("Health configuration at line %zu of file '%s/%s': invalid duration '%s' after group method", - line, path, file, key); - return 0; - } - - // sane defaults - *every = abs(*after); - - // now we may have optional parameters - while(*s) { - key = s; - while(*s && !isspace(*s)) s++; - while(*s && isspace(*s)) *s++ = '\0'; - if(!*key) break; - - if(!strcasecmp(key, "at")) { - char *value = s; - while(*s && !isspace(*s)) s++; - while(*s && isspace(*s)) *s++ = '\0'; - - if (!health_parse_duration(value, before)) { - error("Health configuration at line %zu of file '%s/%s': invalid duration '%s' for '%s' keyword", - line, path, file, value, key); - } - } - else if(!strcasecmp(key, HEALTH_EVERY_KEY)) { - char *value = s; - while(*s && !isspace(*s)) s++; - while(*s && isspace(*s)) *s++ = '\0'; - - if (!health_parse_duration(value, every)) { - error("Health configuration at line %zu of file '%s/%s': invalid duration '%s' for '%s' keyword", - line, path, file, value, key); - } - } - else if(!strcasecmp(key, "absolute") || !strcasecmp(key, "abs") || !strcasecmp(key, "absolute_sum")) { - *options |= RRDR_OPTION_ABSOLUTE; - } - else if(!strcasecmp(key, "min2max")) { - *options |= RRDR_OPTION_MIN2MAX; - } - else if(!strcasecmp(key, "null2zero")) { - *options |= RRDR_OPTION_NULL2ZERO; - } - else if(!strcasecmp(key, "percentage")) { - *options |= RRDR_OPTION_PERCENTAGE; - } - else if(!strcasecmp(key, "unaligned")) { - *options |= RRDR_OPTION_NOT_ALIGNED; - } - else if(!strcasecmp(key, "match-ids") || !strcasecmp(key, "match_ids")) { - *options |= RRDR_OPTION_MATCH_IDS; - } - else if(!strcasecmp(key, "match-names") || !strcasecmp(key, "match_names")) { - *options |= RRDR_OPTION_MATCH_NAMES; - } - else if(!strcasecmp(key, "of")) { - if(*s && strcasecmp(s, "all") != 0) - *dimensions = strdupz(s); - break; - } - else { - error("Health configuration at line %zu of file '%s/%s': unknown keyword '%s'", - line, path, file, key); - } - } - - return 1; -} - -static inline char *health_source_file(size_t line, const char *path, const char *filename) { - char buffer[FILENAME_MAX + 1]; - snprintfz(buffer, FILENAME_MAX, "%zu@%s/%s", line, path, filename); - return strdupz(buffer); -} - -static inline void strip_quotes(char *s) { - while(*s) { - if(*s == '\'' || *s == '"') *s = ' '; - s++; - } -} - -int health_readfile(RRDHOST *host, const char *path, const char *filename) { - debug(D_HEALTH, "Health configuration reading file '%s/%s'", path, filename); - - static uint32_t - hash_alarm = 0, - hash_template = 0, - hash_os = 0, - hash_on = 0, - hash_host = 0, - hash_families = 0, - hash_calc = 0, - hash_green = 0, - hash_red = 0, - hash_warn = 0, - hash_crit = 0, - hash_exec = 0, - hash_every = 0, - hash_lookup = 0, - hash_units = 0, - hash_info = 0, - hash_recipient = 0, - hash_delay = 0, - hash_options = 0; - - char buffer[HEALTH_CONF_MAX_LINE + 1]; - - if(unlikely(!hash_alarm)) { - hash_alarm = simple_uhash(HEALTH_ALARM_KEY); - hash_template = simple_uhash(HEALTH_TEMPLATE_KEY); - hash_on = simple_uhash(HEALTH_ON_KEY); - hash_os = simple_uhash(HEALTH_OS_KEY); - hash_host = simple_uhash(HEALTH_HOST_KEY); - hash_families = simple_uhash(HEALTH_FAMILIES_KEY); - hash_calc = simple_uhash(HEALTH_CALC_KEY); - hash_lookup = simple_uhash(HEALTH_LOOKUP_KEY); - hash_green = simple_uhash(HEALTH_GREEN_KEY); - hash_red = simple_uhash(HEALTH_RED_KEY); - hash_warn = simple_uhash(HEALTH_WARN_KEY); - hash_crit = simple_uhash(HEALTH_CRIT_KEY); - hash_exec = simple_uhash(HEALTH_EXEC_KEY); - hash_every = simple_uhash(HEALTH_EVERY_KEY); - hash_units = simple_hash(HEALTH_UNITS_KEY); - hash_info = simple_hash(HEALTH_INFO_KEY); - hash_recipient = simple_hash(HEALTH_RECIPIENT_KEY); - hash_delay = simple_uhash(HEALTH_DELAY_KEY); - hash_options = simple_uhash(HEALTH_OPTIONS_KEY); - } - - snprintfz(buffer, HEALTH_CONF_MAX_LINE, "%s/%s", path, filename); - FILE *fp = fopen(buffer, "r"); - if(!fp) { - error("Health configuration cannot read file '%s'.", buffer); - return 0; - } - - RRDCALC *rc = NULL; - RRDCALCTEMPLATE *rt = NULL; - - int ignore_this = 0; - size_t line = 0, append = 0; - char *s; - while((s = fgets(&buffer[append], (int)(HEALTH_CONF_MAX_LINE - append), fp)) || append) { - int stop_appending = !s; - line++; - s = trim(buffer); - if(!s || *s == '#') continue; - - append = strlen(s); - if(!stop_appending && s[append - 1] == '\\') { - s[append - 1] = ' '; - append = &s[append] - buffer; - if(append < HEALTH_CONF_MAX_LINE) - continue; - else { - error("Health configuration has too long muli-line at line %zu of file '%s/%s'.", line, path, filename); - } - } - append = 0; - - char *key = s; - while(*s && *s != ':') s++; - if(!*s) { - error("Health configuration has invalid line %zu of file '%s/%s'. It does not contain a ':'. Ignoring it.", line, path, filename); - continue; - } - *s = '\0'; - s++; - - char *value = s; - key = trim_all(key); - value = trim_all(value); - - if(!key) { - error("Health configuration has invalid line %zu of file '%s/%s'. Keyword is empty. Ignoring it.", line, path, filename); - continue; - } - - if(!value) { - error("Health configuration has invalid line %zu of file '%s/%s'. value is empty. Ignoring it.", line, path, filename); - continue; - } - - uint32_t hash = simple_uhash(key); - - if(hash == hash_alarm && !strcasecmp(key, HEALTH_ALARM_KEY)) { - if (rc && (ignore_this || !rrdcalc_add_alarm_from_config(host, rc))) - rrdcalc_free(rc); - - if(rt) { - if (ignore_this || !rrdcalctemplate_add_template_from_config(host, rt)) - rrdcalctemplate_free(rt); - - rt = NULL; - } - - rc = callocz(1, sizeof(RRDCALC)); - rc->next_event_id = 1; - rc->name = strdupz(value); - rc->hash = simple_hash(rc->name); - rc->source = health_source_file(line, path, filename); - rc->green = NAN; - rc->red = NAN; - rc->value = NAN; - rc->old_value = NAN; - rc->delay_multiplier = 1.0; - - if(rrdvar_fix_name(rc->name)) - error("Health configuration renamed alarm '%s' to '%s'", value, rc->name); - - ignore_this = 0; - } - else if(hash == hash_template && !strcasecmp(key, HEALTH_TEMPLATE_KEY)) { - if(rc) { - if(ignore_this || !rrdcalc_add_alarm_from_config(host, rc)) - rrdcalc_free(rc); - - rc = NULL; - } - - if(rt && (ignore_this || !rrdcalctemplate_add_template_from_config(host, rt))) - rrdcalctemplate_free(rt); - - rt = callocz(1, sizeof(RRDCALCTEMPLATE)); - rt->name = strdupz(value); - rt->hash_name = simple_hash(rt->name); - rt->source = health_source_file(line, path, filename); - rt->green = NAN; - rt->red = NAN; - rt->delay_multiplier = 1.0; - - if(rrdvar_fix_name(rt->name)) - error("Health configuration renamed template '%s' to '%s'", value, rt->name); - - ignore_this = 0; - } - else if(hash == hash_os && !strcasecmp(key, HEALTH_OS_KEY)) { - char *os_match = value; - SIMPLE_PATTERN *os_pattern = simple_pattern_create(os_match, NULL, SIMPLE_PATTERN_EXACT); - - if(!simple_pattern_matches(os_pattern, host->os)) { - if(rc) - debug(D_HEALTH, "HEALTH on '%s' ignoring alarm '%s' defined at %zu@%s/%s: host O/S does not match '%s'", host->hostname, rc->name, line, path, filename, os_match); - - if(rt) - debug(D_HEALTH, "HEALTH on '%s' ignoring template '%s' defined at %zu@%s/%s: host O/S does not match '%s'", host->hostname, rt->name, line, path, filename, os_match); - - ignore_this = 1; - } - - simple_pattern_free(os_pattern); - } - else if(hash == hash_host && !strcasecmp(key, HEALTH_HOST_KEY)) { - char *host_match = value; - SIMPLE_PATTERN *host_pattern = simple_pattern_create(host_match, NULL, SIMPLE_PATTERN_EXACT); - - if(!simple_pattern_matches(host_pattern, host->hostname)) { - if(rc) - debug(D_HEALTH, "HEALTH on '%s' ignoring alarm '%s' defined at %zu@%s/%s: hostname does not match '%s'", host->hostname, rc->name, line, path, filename, host_match); - - if(rt) - debug(D_HEALTH, "HEALTH on '%s' ignoring template '%s' defined at %zu@%s/%s: hostname does not match '%s'", host->hostname, rt->name, line, path, filename, host_match); - - ignore_this = 1; - } - - simple_pattern_free(host_pattern); - } - else if(rc) { - if(hash == hash_on && !strcasecmp(key, HEALTH_ON_KEY)) { - if(rc->chart) { - if(strcmp(rc->chart, value) != 0) - error("Health configuration at line %zu of file '%s/%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, path, filename, rc->name, key, rc->chart, value, value); - - freez(rc->chart); - } - rc->chart = strdupz(value); - rc->hash_chart = simple_hash(rc->chart); - } - else if(hash == hash_lookup && !strcasecmp(key, HEALTH_LOOKUP_KEY)) { - health_parse_db_lookup(line, path, filename, value, &rc->group, &rc->after, &rc->before, - &rc->update_every, - &rc->options, &rc->dimensions); - } - else if(hash == hash_every && !strcasecmp(key, HEALTH_EVERY_KEY)) { - if(!health_parse_duration(value, &rc->update_every)) - error("Health configuration at line %zu of file '%s/%s' for alarm '%s' at key '%s' cannot parse duration: '%s'.", - line, path, filename, rc->name, key, value); - } - else if(hash == hash_green && !strcasecmp(key, HEALTH_GREEN_KEY)) { - char *e; - rc->green = str2ld(value, &e); - if(e && *e) { - error("Health configuration at line %zu of file '%s/%s' for alarm '%s' at key '%s' leaves this string unmatched: '%s'.", - line, path, filename, rc->name, key, e); - } - } - else if(hash == hash_red && !strcasecmp(key, HEALTH_RED_KEY)) { - char *e; - rc->red = str2ld(value, &e); - if(e && *e) { - error("Health configuration at line %zu of file '%s/%s' for alarm '%s' at key '%s' leaves this string unmatched: '%s'.", - line, path, filename, rc->name, key, e); - } - } - else if(hash == hash_calc && !strcasecmp(key, HEALTH_CALC_KEY)) { - const char *failed_at = NULL; - int error = 0; - rc->calculation = expression_parse(value, &failed_at, &error); - if(!rc->calculation) { - error("Health configuration at line %zu of file '%s/%s' for alarm '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", - line, path, filename, rc->name, key, value, expression_strerror(error), failed_at); - } - } - else if(hash == hash_warn && !strcasecmp(key, HEALTH_WARN_KEY)) { - const char *failed_at = NULL; - int error = 0; - rc->warning = expression_parse(value, &failed_at, &error); - if(!rc->warning) { - error("Health configuration at line %zu of file '%s/%s' for alarm '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", - line, path, filename, rc->name, key, value, expression_strerror(error), failed_at); - } - } - else if(hash == hash_crit && !strcasecmp(key, HEALTH_CRIT_KEY)) { - const char *failed_at = NULL; - int error = 0; - rc->critical = expression_parse(value, &failed_at, &error); - if(!rc->critical) { - error("Health configuration at line %zu of file '%s/%s' for alarm '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", - line, path, filename, rc->name, key, value, expression_strerror(error), failed_at); - } - } - else if(hash == hash_exec && !strcasecmp(key, HEALTH_EXEC_KEY)) { - if(rc->exec) { - if(strcmp(rc->exec, value) != 0) - error("Health configuration at line %zu of file '%s/%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, path, filename, rc->name, key, rc->exec, value, value); - - freez(rc->exec); - } - rc->exec = strdupz(value); - } - else if(hash == hash_recipient && !strcasecmp(key, HEALTH_RECIPIENT_KEY)) { - if(rc->recipient) { - if(strcmp(rc->recipient, value) != 0) - error("Health configuration at line %zu of file '%s/%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, path, filename, rc->name, key, rc->recipient, value, value); - - freez(rc->recipient); - } - rc->recipient = strdupz(value); - } - else if(hash == hash_units && !strcasecmp(key, HEALTH_UNITS_KEY)) { - if(rc->units) { - if(strcmp(rc->units, value) != 0) - error("Health configuration at line %zu of file '%s/%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, path, filename, rc->name, key, rc->units, value, value); - - freez(rc->units); - } - rc->units = strdupz(value); - strip_quotes(rc->units); - } - else if(hash == hash_info && !strcasecmp(key, HEALTH_INFO_KEY)) { - if(rc->info) { - if(strcmp(rc->info, value) != 0) - error("Health configuration at line %zu of file '%s/%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, path, filename, rc->name, key, rc->info, value, value); - - freez(rc->info); - } - rc->info = strdupz(value); - strip_quotes(rc->info); - } - else if(hash == hash_delay && !strcasecmp(key, HEALTH_DELAY_KEY)) { - health_parse_delay(line, path, filename, value, &rc->delay_up_duration, &rc->delay_down_duration, &rc->delay_max_duration, &rc->delay_multiplier); - } - else if(hash == hash_options && !strcasecmp(key, HEALTH_OPTIONS_KEY)) { - rc->options |= health_parse_options(value); - } - else { - error("Health configuration at line %zu of file '%s/%s' for alarm '%s' has unknown key '%s'.", - line, path, filename, rc->name, key); - } - } - else if(rt) { - if(hash == hash_on && !strcasecmp(key, HEALTH_ON_KEY)) { - if(rt->context) { - if(strcmp(rt->context, value) != 0) - error("Health configuration at line %zu of file '%s/%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, path, filename, rt->name, key, rt->context, value, value); - - freez(rt->context); - } - rt->context = strdupz(value); - rt->hash_context = simple_hash(rt->context); - } - else if(hash == hash_families && !strcasecmp(key, HEALTH_FAMILIES_KEY)) { - freez(rt->family_match); - simple_pattern_free(rt->family_pattern); - - rt->family_match = strdupz(value); - rt->family_pattern = simple_pattern_create(rt->family_match, NULL, SIMPLE_PATTERN_EXACT); - } - else if(hash == hash_lookup && !strcasecmp(key, HEALTH_LOOKUP_KEY)) { - health_parse_db_lookup(line, path, filename, value, &rt->group, &rt->after, &rt->before, - &rt->update_every, &rt->options, &rt->dimensions); - } - else if(hash == hash_every && !strcasecmp(key, HEALTH_EVERY_KEY)) { - if(!health_parse_duration(value, &rt->update_every)) - error("Health configuration at line %zu of file '%s/%s' for template '%s' at key '%s' cannot parse duration: '%s'.", - line, path, filename, rt->name, key, value); - } - else if(hash == hash_green && !strcasecmp(key, HEALTH_GREEN_KEY)) { - char *e; - rt->green = str2ld(value, &e); - if(e && *e) { - error("Health configuration at line %zu of file '%s/%s' for template '%s' at key '%s' leaves this string unmatched: '%s'.", - line, path, filename, rt->name, key, e); - } - } - else if(hash == hash_red && !strcasecmp(key, HEALTH_RED_KEY)) { - char *e; - rt->red = str2ld(value, &e); - if(e && *e) { - error("Health configuration at line %zu of file '%s/%s' for template '%s' at key '%s' leaves this string unmatched: '%s'.", - line, path, filename, rt->name, key, e); - } - } - else if(hash == hash_calc && !strcasecmp(key, HEALTH_CALC_KEY)) { - const char *failed_at = NULL; - int error = 0; - rt->calculation = expression_parse(value, &failed_at, &error); - if(!rt->calculation) { - error("Health configuration at line %zu of file '%s/%s' for template '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", - line, path, filename, rt->name, key, value, expression_strerror(error), failed_at); - } - } - else if(hash == hash_warn && !strcasecmp(key, HEALTH_WARN_KEY)) { - const char *failed_at = NULL; - int error = 0; - rt->warning = expression_parse(value, &failed_at, &error); - if(!rt->warning) { - error("Health configuration at line %zu of file '%s/%s' for template '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", - line, path, filename, rt->name, key, value, expression_strerror(error), failed_at); - } - } - else if(hash == hash_crit && !strcasecmp(key, HEALTH_CRIT_KEY)) { - const char *failed_at = NULL; - int error = 0; - rt->critical = expression_parse(value, &failed_at, &error); - if(!rt->critical) { - error("Health configuration at line %zu of file '%s/%s' for template '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", - line, path, filename, rt->name, key, value, expression_strerror(error), failed_at); - } - } - else if(hash == hash_exec && !strcasecmp(key, HEALTH_EXEC_KEY)) { - if(rt->exec) { - if(strcmp(rt->exec, value) != 0) - error("Health configuration at line %zu of file '%s/%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, path, filename, rt->name, key, rt->exec, value, value); - - freez(rt->exec); - } - rt->exec = strdupz(value); - } - else if(hash == hash_recipient && !strcasecmp(key, HEALTH_RECIPIENT_KEY)) { - if(rt->recipient) { - if(strcmp(rt->recipient, value) != 0) - error("Health configuration at line %zu of file '%s/%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, path, filename, rt->name, key, rt->recipient, value, value); - - freez(rt->recipient); - } - rt->recipient = strdupz(value); - } - else if(hash == hash_units && !strcasecmp(key, HEALTH_UNITS_KEY)) { - if(rt->units) { - if(strcmp(rt->units, value) != 0) - error("Health configuration at line %zu of file '%s/%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, path, filename, rt->name, key, rt->units, value, value); - - freez(rt->units); - } - rt->units = strdupz(value); - strip_quotes(rt->units); - } - else if(hash == hash_info && !strcasecmp(key, HEALTH_INFO_KEY)) { - if(rt->info) { - if(strcmp(rt->info, value) != 0) - error("Health configuration at line %zu of file '%s/%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, path, filename, rt->name, key, rt->info, value, value); - - freez(rt->info); - } - rt->info = strdupz(value); - strip_quotes(rt->info); - } - else if(hash == hash_delay && !strcasecmp(key, HEALTH_DELAY_KEY)) { - health_parse_delay(line, path, filename, value, &rt->delay_up_duration, &rt->delay_down_duration, &rt->delay_max_duration, &rt->delay_multiplier); - } - else if(hash == hash_options && !strcasecmp(key, HEALTH_OPTIONS_KEY)) { - rt->options |= health_parse_options(value); - } - else { - error("Health configuration at line %zu of file '%s/%s' for template '%s' has unknown key '%s'.", - line, path, filename, rt->name, key); - } - } - else { - error("Health configuration at line %zu of file '%s/%s' has unknown key '%s'. Expected either '" HEALTH_ALARM_KEY "' or '" HEALTH_TEMPLATE_KEY "'.", - line, path, filename, key); - } - } - - if(rc && (ignore_this || !rrdcalc_add_alarm_from_config(host, rc))) - rrdcalc_free(rc); - - if(rt && (ignore_this || !rrdcalctemplate_add_template_from_config(host, rt))) - rrdcalctemplate_free(rt); - - fclose(fp); - return 1; -} - -void health_readdir(RRDHOST *host, const char *path) { - if(!host->health_enabled) return; - - size_t pathlen = strlen(path); - - debug(D_HEALTH, "Health configuration reading directory '%s'", path); - - DIR *dir = opendir(path); - if (!dir) { - error("Health configuration cannot open directory '%s'.", path); - return; - } - - struct dirent *de = NULL; - while ((de = readdir(dir))) { - size_t len = strlen(de->d_name); - - if(de->d_type == DT_DIR - && ( - (de->d_name[0] == '.' && de->d_name[1] == '\0') - || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') - )) { - debug(D_HEALTH, "Ignoring directory '%s'", de->d_name); - continue; - } - - else if(de->d_type == DT_DIR) { - char *s = mallocz(pathlen + strlen(de->d_name) + 2); - strcpy(s, path); - strcat(s, "/"); - strcat(s, de->d_name); - health_readdir(host, s); - freez(s); - continue; - } - - else if((de->d_type == DT_LNK || de->d_type == DT_REG || de->d_type == DT_UNKNOWN) && - len > 5 && !strcmp(&de->d_name[len - 5], ".conf")) { - health_readfile(host, path, de->d_name); - } - - else debug(D_HEALTH, "Ignoring file '%s'", de->d_name); - } - - closedir(dir); -} - - diff --git a/src/health_json.c b/src/health_json.c deleted file mode 100644 index aba7425d7..000000000 --- a/src/health_json.c +++ /dev/null @@ -1,260 +0,0 @@ -#define NETDATA_HEALTH_INTERNALS -#include "common.h" - -static inline void health_string2json(BUFFER *wb, const char *prefix, const char *label, const char *value, const char *suffix) { - if(value && *value) { - buffer_sprintf(wb, "%s\"%s\":\"", prefix, label); - buffer_strcat_htmlescape(wb, value); - buffer_strcat(wb, "\""); - buffer_strcat(wb, suffix); - } - else - buffer_sprintf(wb, "%s\"%s\":null%s", prefix, label, suffix); -} - -static inline void health_alarm_entry2json_nolock(BUFFER *wb, ALARM_ENTRY *ae, RRDHOST *host) { - buffer_sprintf(wb, - "\n\t{\n" - "\t\t\"hostname\": \"%s\",\n" - "\t\t\"unique_id\": %u,\n" - "\t\t\"alarm_id\": %u,\n" - "\t\t\"alarm_event_id\": %u,\n" - "\t\t\"name\": \"%s\",\n" - "\t\t\"chart\": \"%s\",\n" - "\t\t\"family\": \"%s\",\n" - "\t\t\"processed\": %s,\n" - "\t\t\"updated\": %s,\n" - "\t\t\"exec_run\": %lu,\n" - "\t\t\"exec_failed\": %s,\n" - "\t\t\"exec\": \"%s\",\n" - "\t\t\"recipient\": \"%s\",\n" - "\t\t\"exec_code\": %d,\n" - "\t\t\"source\": \"%s\",\n" - "\t\t\"units\": \"%s\",\n" - "\t\t\"when\": %lu,\n" - "\t\t\"duration\": %lu,\n" - "\t\t\"non_clear_duration\": %lu,\n" - "\t\t\"status\": \"%s\",\n" - "\t\t\"old_status\": \"%s\",\n" - "\t\t\"delay\": %d,\n" - "\t\t\"delay_up_to_timestamp\": %lu,\n" - "\t\t\"updated_by_id\": %u,\n" - "\t\t\"updates_id\": %u,\n" - "\t\t\"value_string\": \"%s\",\n" - "\t\t\"old_value_string\": \"%s\",\n" - , host->hostname - , ae->unique_id - , ae->alarm_id - , ae->alarm_event_id - , ae->name - , ae->chart - , ae->family - , (ae->flags & HEALTH_ENTRY_FLAG_PROCESSED)?"true":"false" - , (ae->flags & HEALTH_ENTRY_FLAG_UPDATED)?"true":"false" - , (unsigned long)ae->exec_run_timestamp - , (ae->flags & HEALTH_ENTRY_FLAG_EXEC_FAILED)?"true":"false" - , ae->exec?ae->exec:host->health_default_exec - , ae->recipient?ae->recipient:host->health_default_recipient - , ae->exec_code - , ae->source - , ae->units?ae->units:"" - , (unsigned long)ae->when - , (unsigned long)ae->duration - , (unsigned long)ae->non_clear_duration - , rrdcalc_status2string(ae->new_status) - , rrdcalc_status2string(ae->old_status) - , ae->delay - , (unsigned long)ae->delay_up_to_timestamp - , ae->updated_by_id - , ae->updates_id - , ae->new_value_string - , ae->old_value_string - ); - - health_string2json(wb, "\t\t", "info", ae->info?ae->info:"", ",\n"); - - if(unlikely(ae->flags & HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION)) { - buffer_strcat(wb, "\t\t\"no_clear_notification\": true,\n"); - } - - buffer_strcat(wb, "\t\t\"value\":"); - buffer_rrd_value(wb, ae->new_value); - buffer_strcat(wb, ",\n"); - - buffer_strcat(wb, "\t\t\"old_value\":"); - buffer_rrd_value(wb, ae->old_value); - buffer_strcat(wb, "\n"); - - buffer_strcat(wb, "\t}"); -} - -void health_alarm_log2json(RRDHOST *host, BUFFER *wb, uint32_t after) { - netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock); - - buffer_strcat(wb, "["); - - unsigned int max = host->health_log.max; - unsigned int count = 0; - ALARM_ENTRY *ae; - for(ae = host->health_log.alarms; ae && count < max ; count++, ae = ae->next) { - if(ae->unique_id > after) { - if(likely(count)) buffer_strcat(wb, ","); - health_alarm_entry2json_nolock(wb, ae, host); - } - } - - buffer_strcat(wb, "\n]\n"); - - netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock); -} - -static inline void health_rrdcalc2json_nolock(RRDHOST *host, BUFFER *wb, RRDCALC *rc) { - char value_string[100 + 1]; - format_value_and_unit(value_string, 100, rc->value, rc->units, -1); - - buffer_sprintf(wb, - "\t\t\"%s.%s\": {\n" - "\t\t\t\"id\": %lu,\n" - "\t\t\t\"name\": \"%s\",\n" - "\t\t\t\"chart\": \"%s\",\n" - "\t\t\t\"family\": \"%s\",\n" - "\t\t\t\"active\": %s,\n" - "\t\t\t\"exec\": \"%s\",\n" - "\t\t\t\"recipient\": \"%s\",\n" - "\t\t\t\"source\": \"%s\",\n" - "\t\t\t\"units\": \"%s\",\n" - "\t\t\t\"info\": \"%s\",\n" - "\t\t\t\"status\": \"%s\",\n" - "\t\t\t\"last_status_change\": %lu,\n" - "\t\t\t\"last_updated\": %lu,\n" - "\t\t\t\"next_update\": %lu,\n" - "\t\t\t\"update_every\": %d,\n" - "\t\t\t\"delay_up_duration\": %d,\n" - "\t\t\t\"delay_down_duration\": %d,\n" - "\t\t\t\"delay_max_duration\": %d,\n" - "\t\t\t\"delay_multiplier\": %f,\n" - "\t\t\t\"delay\": %d,\n" - "\t\t\t\"delay_up_to_timestamp\": %lu,\n" - "\t\t\t\"value_string\": \"%s\",\n" - , rc->chart, rc->name - , (unsigned long)rc->id - , rc->name - , rc->chart - , (rc->rrdset && rc->rrdset->family)?rc->rrdset->family:"" - , (rc->rrdset)?"true":"false" - , rc->exec?rc->exec:host->health_default_exec - , rc->recipient?rc->recipient:host->health_default_recipient - , rc->source - , rc->units?rc->units:"" - , rc->info?rc->info:"" - , rrdcalc_status2string(rc->status) - , (unsigned long)rc->last_status_change - , (unsigned long)rc->last_updated - , (unsigned long)rc->next_update - , rc->update_every - , rc->delay_up_duration - , rc->delay_down_duration - , rc->delay_max_duration - , rc->delay_multiplier - , rc->delay_last - , (unsigned long)rc->delay_up_to_timestamp - , value_string - ); - - if(unlikely(rc->options & RRDCALC_FLAG_NO_CLEAR_NOTIFICATION)) { - buffer_strcat(wb, "\t\t\t\"no_clear_notification\": true,\n"); - } - - if(RRDCALC_HAS_DB_LOOKUP(rc)) { - if(rc->dimensions && *rc->dimensions) - health_string2json(wb, "\t\t\t", "lookup_dimensions", rc->dimensions, ",\n"); - - buffer_sprintf(wb, - "\t\t\t\"db_after\": %lu,\n" - "\t\t\t\"db_before\": %lu,\n" - "\t\t\t\"lookup_method\": \"%s\",\n" - "\t\t\t\"lookup_after\": %d,\n" - "\t\t\t\"lookup_before\": %d,\n" - "\t\t\t\"lookup_options\": \"", - (unsigned long) rc->db_after, - (unsigned long) rc->db_before, - group_method2string(rc->group), - rc->after, - rc->before - ); - buffer_data_options2string(wb, rc->options); - buffer_strcat(wb, "\",\n"); - } - - if(rc->calculation) { - health_string2json(wb, "\t\t\t", "calc", rc->calculation->source, ",\n"); - health_string2json(wb, "\t\t\t", "calc_parsed", rc->calculation->parsed_as, ",\n"); - } - - if(rc->warning) { - health_string2json(wb, "\t\t\t", "warn", rc->warning->source, ",\n"); - health_string2json(wb, "\t\t\t", "warn_parsed", rc->warning->parsed_as, ",\n"); - } - - if(rc->critical) { - health_string2json(wb, "\t\t\t", "crit", rc->critical->source, ",\n"); - health_string2json(wb, "\t\t\t", "crit_parsed", rc->critical->parsed_as, ",\n"); - } - - buffer_strcat(wb, "\t\t\t\"green\":"); - buffer_rrd_value(wb, rc->green); - buffer_strcat(wb, ",\n"); - - buffer_strcat(wb, "\t\t\t\"red\":"); - buffer_rrd_value(wb, rc->red); - buffer_strcat(wb, ",\n"); - - buffer_strcat(wb, "\t\t\t\"value\":"); - buffer_rrd_value(wb, rc->value); - buffer_strcat(wb, "\n"); - - buffer_strcat(wb, "\t\t}"); -} - -//void health_rrdcalctemplate2json_nolock(BUFFER *wb, RRDCALCTEMPLATE *rt) { -// -//} - -void health_alarms2json(RRDHOST *host, BUFFER *wb, int all) { - int i; - - rrdhost_rdlock(host); - buffer_sprintf(wb, "{\n\t\"hostname\": \"%s\"," - "\n\t\"latest_alarm_log_unique_id\": %u," - "\n\t\"status\": %s," - "\n\t\"now\": %lu," - "\n\t\"alarms\": {\n", - host->hostname, - (host->health_log.next_log_id > 0)?(host->health_log.next_log_id - 1):0, - host->health_enabled?"true":"false", - (unsigned long)now_realtime_sec()); - - RRDCALC *rc; - for(i = 0, rc = host->alarms; rc ; rc = rc->next) { - if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec)) - continue; - - if(likely(!all && !(rc->status == RRDCALC_STATUS_WARNING || rc->status == RRDCALC_STATUS_CRITICAL))) - continue; - - if(likely(i)) buffer_strcat(wb, ",\n"); - health_rrdcalc2json_nolock(host, wb, rc); - i++; - } - -// buffer_strcat(wb, "\n\t},\n\t\"templates\": {"); -// RRDCALCTEMPLATE *rt; -// for(rt = host->templates; rt ; rt = rt->next) -// health_rrdcalctemplate2json_nolock(wb, rt); - - buffer_strcat(wb, "\n\t}\n}\n"); - rrdhost_unlock(host); -} - - - diff --git a/src/health_log.c b/src/health_log.c deleted file mode 100644 index a44fbadb0..000000000 --- a/src/health_log.c +++ /dev/null @@ -1,463 +0,0 @@ -#define NETDATA_HEALTH_INTERNALS -#include "common.h" - -// ---------------------------------------------------------------------------- -// health alarm log load/save -// no need for locking - only one thread is reading / writing the alarms log - -inline int health_alarm_log_open(RRDHOST *host) { - if(host->health_log_fp) - fclose(host->health_log_fp); - - host->health_log_fp = fopen(host->health_log_filename, "a"); - - if(host->health_log_fp) { - if (setvbuf(host->health_log_fp, NULL, _IOLBF, 0) != 0) - error("HEALTH [%s]: cannot set line buffering on health log file '%s'.", host->hostname, host->health_log_filename); - return 0; - } - - error("HEALTH [%s]: cannot open health log file '%s'. Health data will be lost in case of netdata or server crash.", host->hostname, host->health_log_filename); - return -1; -} - -inline void health_alarm_log_close(RRDHOST *host) { - if(host->health_log_fp) { - fclose(host->health_log_fp); - host->health_log_fp = NULL; - } -} - -inline void health_log_rotate(RRDHOST *host) { - static size_t rotate_every = 0; - - if(unlikely(rotate_every == 0)) { - rotate_every = (size_t)config_get_number(CONFIG_SECTION_HEALTH, "rotate log every lines", 2000); - if(rotate_every < 100) rotate_every = 100; - } - - if(unlikely(host->health_log_entries_written > rotate_every)) { - health_alarm_log_close(host); - - char old_filename[FILENAME_MAX + 1]; - snprintfz(old_filename, FILENAME_MAX, "%s.old", host->health_log_filename); - - if(unlink(old_filename) == -1 && errno != ENOENT) - error("HEALTH [%s]: cannot remove old alarms log file '%s'", host->hostname, old_filename); - - if(link(host->health_log_filename, old_filename) == -1 && errno != ENOENT) - error("HEALTH [%s]: cannot move file '%s' to '%s'.", host->hostname, host->health_log_filename, old_filename); - - if(unlink(host->health_log_filename) == -1 && errno != ENOENT) - error("HEALTH [%s]: cannot remove old alarms log file '%s'", host->hostname, host->health_log_filename); - - // open it with truncate - host->health_log_fp = fopen(host->health_log_filename, "w"); - - if(host->health_log_fp) - fclose(host->health_log_fp); - else - error("HEALTH [%s]: cannot truncate health log '%s'", host->hostname, host->health_log_filename); - - host->health_log_fp = NULL; - - host->health_log_entries_written = 0; - health_alarm_log_open(host); - } -} - -inline void health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae) { - health_log_rotate(host); - - if(likely(host->health_log_fp)) { - if(unlikely(fprintf(host->health_log_fp - , "%c\t%s" - "\t%08x\t%08x\t%08x\t%08x\t%08x" - "\t%08x\t%08x\t%08x" - "\t%08x\t%08x\t%08x" - "\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" - "\t%d\t%d\t%d\t%d" - "\t" CALCULATED_NUMBER_FORMAT_AUTO "\t" CALCULATED_NUMBER_FORMAT_AUTO - "\n" - , (ae->flags & HEALTH_ENTRY_FLAG_SAVED)?'U':'A' - , host->hostname - - , ae->unique_id - , ae->alarm_id - , ae->alarm_event_id - , ae->updated_by_id - , ae->updates_id - - , (uint32_t)ae->when - , (uint32_t)ae->duration - , (uint32_t)ae->non_clear_duration - , (uint32_t)ae->flags - , (uint32_t)ae->exec_run_timestamp - , (uint32_t)ae->delay_up_to_timestamp - - , (ae->name)?ae->name:"" - , (ae->chart)?ae->chart:"" - , (ae->family)?ae->family:"" - , (ae->exec)?ae->exec:"" - , (ae->recipient)?ae->recipient:"" - , (ae->source)?ae->source:"" - , (ae->units)?ae->units:"" - , (ae->info)?ae->info:"" - - , ae->exec_code - , ae->new_status - , ae->old_status - , ae->delay - - , ae->new_value - , ae->old_value - ) < 0)) - error("HEALTH [%s]: failed to save alarm log entry to '%s'. Health data may be lost in case of abnormal restart.", host->hostname, host->health_log_filename); - else { - ae->flags |= HEALTH_ENTRY_FLAG_SAVED; - host->health_log_entries_written++; - } - } -} - -inline ssize_t health_alarm_log_read(RRDHOST *host, FILE *fp, const char *filename) { - errno = 0; - - char *s, *buf = mallocz(65536 + 1); - size_t line = 0, len = 0; - ssize_t loaded = 0, updated = 0, errored = 0, duplicate = 0; - - netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock); - - while((s = fgets_trim_len(buf, 65536, fp, &len))) { - host->health_log_entries_written++; - line++; - - int max_entries = 30, entries = 0; - char *pointers[max_entries]; - - pointers[entries++] = s++; - while(*s) { - if(unlikely(*s == '\t')) { - *s = '\0'; - pointers[entries++] = ++s; - if(entries >= max_entries) { - error("HEALTH [%s]: line %zu of file '%s' has more than %d entries. Ignoring excessive entries.", host->hostname, line, filename, max_entries); - break; - } - } - else s++; - } - - if(likely(*pointers[0] == 'U' || *pointers[0] == 'A')) { - ALARM_ENTRY *ae = NULL; - - if(entries < 26) { - error("HEALTH [%s]: line %zu of file '%s' should have at least 26 entries, but it has %d. Ignoring it.", host->hostname, line, filename, entries); - errored++; - continue; - } - - // check that we have valid ids - uint32_t unique_id = (uint32_t)strtoul(pointers[2], NULL, 16); - if(!unique_id) { - error("HEALTH [%s]: line %zu of file '%s' states alarm entry with invalid unique id %u (%s). Ignoring it.", host->hostname, line, filename, unique_id, pointers[2]); - errored++; - continue; - } - - uint32_t alarm_id = (uint32_t)strtoul(pointers[3], NULL, 16); - if(!alarm_id) { - error("HEALTH [%s]: line %zu of file '%s' states alarm entry for invalid alarm id %u (%s). Ignoring it.", host->hostname, line, filename, alarm_id, pointers[3]); - errored++; - continue; - } - - if(unlikely(*pointers[0] == 'A')) { - // make sure it is properly numbered - if(unlikely(host->health_log.alarms && unique_id < host->health_log.alarms->unique_id)) { - error("HEALTH [%s]: line %zu of file '%s' has alarm log entry %u in wrong order. Ignoring it.", host->hostname, line, filename, unique_id); - errored++; - continue; - } - - ae = callocz(1, sizeof(ALARM_ENTRY)); - } - else if(unlikely(*pointers[0] == 'U')) { - // find the original - for(ae = host->health_log.alarms; ae; ae = ae->next) { - if(unlikely(unique_id == ae->unique_id)) { - if(unlikely(*pointers[0] == 'A')) { - error("HEALTH [%s]: line %zu of file '%s' adds duplicate alarm log entry %u. Using the later." - , host->hostname, line, filename, unique_id); - *pointers[0] = 'U'; - duplicate++; - } - break; - } - else if(unlikely(unique_id > ae->unique_id)) { - // no need to continue - // the linked list is sorted - ae = NULL; - break; - } - } - } - - // if not found, skip this line - if(unlikely(!ae)) { - // error("HEALTH [%s]: line %zu of file '%s' updates alarm log entry with unique id %u, but it is not found.", host->hostname, line, filename, unique_id); - continue; - } - - // check for a possible host missmatch - //if(strcmp(pointers[1], host->hostname)) - // error("HEALTH [%s]: line %zu of file '%s' provides an alarm for host '%s' but this is named '%s'.", host->hostname, line, filename, pointers[1], host->hostname); - - ae->unique_id = unique_id; - ae->alarm_id = alarm_id; - ae->alarm_event_id = (uint32_t)strtoul(pointers[4], NULL, 16); - ae->updated_by_id = (uint32_t)strtoul(pointers[5], NULL, 16); - ae->updates_id = (uint32_t)strtoul(pointers[6], NULL, 16); - - ae->when = (uint32_t)strtoul(pointers[7], NULL, 16); - ae->duration = (uint32_t)strtoul(pointers[8], NULL, 16); - ae->non_clear_duration = (uint32_t)strtoul(pointers[9], NULL, 16); - - ae->flags = (uint32_t)strtoul(pointers[10], NULL, 16); - ae->flags |= HEALTH_ENTRY_FLAG_SAVED; - - ae->exec_run_timestamp = (uint32_t)strtoul(pointers[11], NULL, 16); - ae->delay_up_to_timestamp = (uint32_t)strtoul(pointers[12], NULL, 16); - - freez(ae->name); - ae->name = strdupz(pointers[13]); - ae->hash_name = simple_hash(ae->name); - - freez(ae->chart); - ae->chart = strdupz(pointers[14]); - ae->hash_chart = simple_hash(ae->chart); - - freez(ae->family); - ae->family = strdupz(pointers[15]); - - freez(ae->exec); - ae->exec = strdupz(pointers[16]); - if(!*ae->exec) { freez(ae->exec); ae->exec = NULL; } - - freez(ae->recipient); - ae->recipient = strdupz(pointers[17]); - if(!*ae->recipient) { freez(ae->recipient); ae->recipient = NULL; } - - freez(ae->source); - ae->source = strdupz(pointers[18]); - if(!*ae->source) { freez(ae->source); ae->source = NULL; } - - freez(ae->units); - ae->units = strdupz(pointers[19]); - if(!*ae->units) { freez(ae->units); ae->units = NULL; } - - freez(ae->info); - ae->info = strdupz(pointers[20]); - if(!*ae->info) { freez(ae->info); ae->info = NULL; } - - ae->exec_code = str2i(pointers[21]); - ae->new_status = str2i(pointers[22]); - ae->old_status = str2i(pointers[23]); - ae->delay = str2i(pointers[24]); - - ae->new_value = str2l(pointers[25]); - ae->old_value = str2l(pointers[26]); - - char value_string[100 + 1]; - freez(ae->old_value_string); - freez(ae->new_value_string); - ae->old_value_string = strdupz(format_value_and_unit(value_string, 100, ae->old_value, ae->units, -1)); - ae->new_value_string = strdupz(format_value_and_unit(value_string, 100, ae->new_value, ae->units, -1)); - - // add it to host if not already there - if(unlikely(*pointers[0] == 'A')) { - ae->next = host->health_log.alarms; - host->health_log.alarms = ae; - loaded++; - } - else updated++; - - if(unlikely(ae->unique_id > host->health_max_unique_id)) - host->health_max_unique_id = ae->unique_id; - - if(unlikely(ae->alarm_id >= host->health_max_alarm_id)) - host->health_max_alarm_id = ae->alarm_id; - } - else { - error("HEALTH [%s]: line %zu of file '%s' is invalid (unrecognized entry type '%s').", host->hostname, line, filename, pointers[0]); - errored++; - } - } - - netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock); - - freez(buf); - - if(!host->health_max_unique_id) host->health_max_unique_id = (uint32_t)now_realtime_sec(); - if(!host->health_max_alarm_id) host->health_max_alarm_id = (uint32_t)now_realtime_sec(); - - host->health_log.next_log_id = host->health_max_unique_id + 1; - host->health_log.next_alarm_id = host->health_max_alarm_id + 1; - - debug(D_HEALTH, "HEALTH [%s]: loaded file '%s' with %zd new alarm entries, updated %zd alarms, errors %zd entries, duplicate %zd", host->hostname, filename, loaded, updated, errored, duplicate); - return loaded; -} - -inline void health_alarm_log_load(RRDHOST *host) { - health_alarm_log_close(host); - - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s.old", host->health_log_filename); - FILE *fp = fopen(filename, "r"); - if(!fp) - error("HEALTH [%s]: cannot open health file: %s", host->hostname, filename); - else { - health_alarm_log_read(host, fp, filename); - fclose(fp); - } - - host->health_log_entries_written = 0; - fp = fopen(host->health_log_filename, "r"); - if(!fp) - error("HEALTH [%s]: cannot open health file: %s", host->hostname, host->health_log_filename); - else { - health_alarm_log_read(host, fp, host->health_log_filename); - fclose(fp); - } - - health_alarm_log_open(host); -} - - -// ---------------------------------------------------------------------------- -// health alarm log management - -inline void health_alarm_log( - RRDHOST *host, - uint32_t alarm_id, - uint32_t alarm_event_id, - time_t when, - const char *name, - const char *chart, - const char *family, - const char *exec, - const char *recipient, - time_t duration, - calculated_number old_value, - calculated_number new_value, - RRDCALC_STATUS old_status, - RRDCALC_STATUS new_status, - const char *source, - const char *units, - const char *info, - int delay, - uint32_t flags -) { - debug(D_HEALTH, "Health adding alarm log entry with id: %u", host->health_log.next_log_id); - - ALARM_ENTRY *ae = callocz(1, sizeof(ALARM_ENTRY)); - ae->name = strdupz(name); - ae->hash_name = simple_hash(ae->name); - - if(chart) { - ae->chart = strdupz(chart); - ae->hash_chart = simple_hash(ae->chart); - } - - if(family) - ae->family = strdupz(family); - - if(exec) ae->exec = strdupz(exec); - if(recipient) ae->recipient = strdupz(recipient); - if(source) ae->source = strdupz(source); - if(units) ae->units = strdupz(units); - if(info) ae->info = strdupz(info); - - ae->unique_id = host->health_log.next_log_id++; - ae->alarm_id = alarm_id; - ae->alarm_event_id = alarm_event_id; - ae->when = when; - ae->old_value = old_value; - ae->new_value = new_value; - - char value_string[100 + 1]; - ae->old_value_string = strdupz(format_value_and_unit(value_string, 100, ae->old_value, ae->units, -1)); - ae->new_value_string = strdupz(format_value_and_unit(value_string, 100, ae->new_value, ae->units, -1)); - - ae->old_status = old_status; - ae->new_status = new_status; - ae->duration = duration; - ae->delay = delay; - ae->delay_up_to_timestamp = when + delay; - - ae->flags |= flags; - - if(ae->old_status == RRDCALC_STATUS_WARNING || ae->old_status == RRDCALC_STATUS_CRITICAL) - ae->non_clear_duration += ae->duration; - - // link it - netdata_rwlock_wrlock(&host->health_log.alarm_log_rwlock); - ae->next = host->health_log.alarms; - host->health_log.alarms = ae; - host->health_log.count++; - netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock); - - // match previous alarms - netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock); - ALARM_ENTRY *t; - for(t = host->health_log.alarms ; t ; t = t->next) { - if(t != ae && t->alarm_id == ae->alarm_id) { - if(!(t->flags & HEALTH_ENTRY_FLAG_UPDATED) && !t->updated_by_id) { - t->flags |= HEALTH_ENTRY_FLAG_UPDATED; - t->updated_by_id = ae->unique_id; - ae->updates_id = t->unique_id; - - if((t->new_status == RRDCALC_STATUS_WARNING || t->new_status == RRDCALC_STATUS_CRITICAL) && - (t->old_status == RRDCALC_STATUS_WARNING || t->old_status == RRDCALC_STATUS_CRITICAL)) - ae->non_clear_duration += t->non_clear_duration; - - health_alarm_log_save(host, t); - } - - // no need to continue - break; - } - } - netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock); - - health_alarm_log_save(host, ae); -} - -inline void health_alarm_log_free_one_nochecks_nounlink(ALARM_ENTRY *ae) { - freez(ae->name); - freez(ae->chart); - freez(ae->family); - freez(ae->exec); - freez(ae->recipient); - freez(ae->source); - freez(ae->units); - freez(ae->info); - freez(ae->old_value_string); - freez(ae->new_value_string); - freez(ae); -} - -inline void health_alarm_log_free(RRDHOST *host) { - rrdhost_check_wrlock(host); - - netdata_rwlock_wrlock(&host->health_log.alarm_log_rwlock); - - ALARM_ENTRY *ae; - while((ae = host->health_log.alarms)) { - host->health_log.alarms = ae->next; - health_alarm_log_free_one_nochecks_nounlink(ae); - } - - netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock); -} diff --git a/src/inlined.h b/src/inlined.h deleted file mode 100644 index 9ab2dca73..000000000 --- a/src/inlined.h +++ /dev/null @@ -1,291 +0,0 @@ -#ifndef NETDATA_INLINED_H -#define NETDATA_INLINED_H - -#include "common.h" - -#ifdef KERNEL_32BIT -typedef uint32_t kernel_uint_t; -#define str2kernel_uint_t(string) str2uint32_t(string) -#define KERNEL_UINT_FORMAT "%u" -#else -typedef uint64_t kernel_uint_t; -#define str2kernel_uint_t(string) str2uint64_t(string) -#define KERNEL_UINT_FORMAT "%" PRIu64 -#endif - -#define str2pid_t(string) str2uint32_t(string) - - -// for faster execution, allow the compiler to inline -// these functions that are called thousands of times per second - -static inline uint32_t simple_hash(const char *name) { - unsigned char *s = (unsigned char *) name; - uint32_t hval = 0x811c9dc5; - while (*s) { - hval *= 16777619; - hval ^= (uint32_t) *s++; - } - return hval; -} - -static inline uint32_t simple_uhash(const char *name) { - unsigned char *s = (unsigned char *) name; - uint32_t hval = 0x811c9dc5, c; - while ((c = *s++)) { - if (unlikely(c >= 'A' && c <= 'Z')) c += 'a' - 'A'; - hval *= 16777619; - hval ^= c; - } - return hval; -} - -static inline int simple_hash_strcmp(const char *name, const char *b, uint32_t *hash) { - unsigned char *s = (unsigned char *) name; - uint32_t hval = 0x811c9dc5; - int ret = 0; - while (*s) { - if(!ret) ret = *s - *b++; - hval *= 16777619; - hval ^= (uint32_t) *s++; - } - *hash = hval; - return ret; -} - -static inline int str2i(const char *s) { - int n = 0; - char c, negative = (*s == '-'); - - for(c = (negative)?*(++s):*s; c >= '0' && c <= '9' ; c = *(++s)) { - n *= 10; - n += c - '0'; - } - - if(unlikely(negative)) - return -n; - - return n; -} - -static inline long str2l(const char *s) { - long n = 0; - char c, negative = (*s == '-'); - - for(c = (negative)?*(++s):*s; c >= '0' && c <= '9' ; c = *(++s)) { - n *= 10; - n += c - '0'; - } - - if(unlikely(negative)) - return -n; - - return n; -} - -static inline uint32_t str2uint32_t(const char *s) { - uint32_t n = 0; - char c; - for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) { - n *= 10; - n += c - '0'; - } - return n; -} - -static inline uint64_t str2uint64_t(const char *s) { - uint64_t n = 0; - char c; - for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) { - n *= 10; - n += c - '0'; - } - return n; -} - -static inline unsigned long str2ul(const char *s) { - unsigned long n = 0; - char c; - for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) { - n *= 10; - n += c - '0'; - } - return n; -} - -static inline unsigned long long str2ull(const char *s) { - unsigned long long n = 0; - char c; - for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) { - n *= 10; - n += c - '0'; - } - return n; -} - -static inline long long str2ll(const char *s, char **endptr) { - int negative = 0; - - if(unlikely(*s == '-')) { - s++; - negative = 1; - } - else if(unlikely(*s == '+')) - s++; - - long long n = 0; - char c; - for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) { - n *= 10; - n += c - '0'; - } - - if(unlikely(endptr)) - *endptr = (char *)s; - - if(unlikely(negative)) - return -n; - else - return n; -} - -static inline long double str2ld(const char *s, char **endptr) { - int negative = 0; - const char *start = s; - unsigned long long integer_part = 0; - unsigned long decimal_part = 0; - size_t decimal_digits = 0; - - switch(*s) { - case '-': - s++; - negative = 1; - break; - - case '+': - s++; - break; - - case 'n': - if(s[1] == 'a' && s[2] == 'n') { - if(endptr) *endptr = (char *)&s[3]; - return NAN; - } - break; - - case 'i': - if(s[1] == 'n' && s[2] == 'f') { - if(endptr) *endptr = (char *)&s[3]; - return INFINITY; - } - break; - - default: - break; - } - - while (*s >= '0' && *s <= '9') { - integer_part = (integer_part * 10) + (*s - '0'); - s++; - } - - if(unlikely(*s == '.')) { - decimal_part = 0; - s++; - - while (*s >= '0' && *s <= '9') { - decimal_part = (decimal_part * 10) + (*s - '0'); - s++; - decimal_digits++; - } - } - - if(unlikely(*s == 'e' || *s == 'E')) - return strtold(start, endptr); - - if(unlikely(endptr)) - *endptr = (char *)s; - - if(unlikely(negative)) { - if(unlikely(decimal_digits)) - return -((long double)integer_part + (long double)decimal_part / powl(10.0, decimal_digits)); - else - return -((long double)integer_part); - } - else { - if(unlikely(decimal_digits)) - return (long double)integer_part + (long double)decimal_part / powl(10.0, decimal_digits); - else - return (long double)integer_part; - } -} - -#ifdef NETDATA_STRCMP_OVERRIDE -#ifdef strcmp -#undef strcmp -#endif -#define strcmp(a, b) strsame(a, b) -#endif // NETDATA_STRCMP_OVERRIDE - -static inline int strsame(const char *a, const char *b) { - if(unlikely(a == b)) return 0; - while(*a && *a == *b) { a++; b++; } - return *a - *b; -} - -static inline char *strncpyz(char *dst, const char *src, size_t n) { - char *p = dst; - - while (*src && n--) - *dst++ = *src++; - - *dst = '\0'; - - return p; -} - -static inline int read_file(const char *filename, char *buffer, size_t size) { - int fd = open(filename, O_RDONLY, 0666); - if(unlikely(fd == -1)) - return 1; - - ssize_t r = read(fd, buffer, size); - if(unlikely(r == -1)) { - close(fd); - return 2; - } - buffer[r] = '\0'; - - close(fd); - return 0; -} - -static inline int read_single_number_file(const char *filename, unsigned long long *result) { - char buffer[30 + 1]; - - int ret = read_file(filename, buffer, 30); - if(unlikely(ret)) { - *result = 0; - return ret; - } - - buffer[30] = '\0'; - *result = str2ull(buffer); - return 0; -} - -static inline int read_single_signed_number_file(const char *filename, long long *result) { - char buffer[30 + 1]; - - int ret = read_file(filename, buffer, 30); - if(unlikely(ret)) { - *result = 0; - return ret; - } - - buffer[30] = '\0'; - *result = atoll(buffer); - return 0; -} - -#endif //NETDATA_INLINED_H diff --git a/src/ipc.c b/src/ipc.c deleted file mode 100644 index a9076fca4..000000000 --- a/src/ipc.c +++ /dev/null @@ -1,261 +0,0 @@ -#include "common.h" - -#include <sys/sem.h> -#include <sys/msg.h> -#include <sys/shm.h> - - -#ifndef SEMVMX -#define SEMVMX 32767 /* <= 32767 semaphore maximum value */ -#endif - -/* Some versions of libc only define IPC_INFO when __USE_GNU is defined. */ -#ifndef IPC_INFO -#define IPC_INFO 3 -#endif - -struct ipc_limits { - uint64_t shmmni; /* max number of segments */ - uint64_t shmmax; /* max segment size */ - uint64_t shmall; /* max total shared memory */ - uint64_t shmmin; /* min segment size */ - - int semmni; /* max number of arrays */ - int semmsl; /* max semaphores per array */ - int semmns; /* max semaphores system wide */ - int semopm; /* max ops per semop call */ - unsigned int semvmx; /* semaphore max value (constant) */ - - int msgmni; /* max queues system wide */ - size_t msgmax; /* max size of message */ - int msgmnb; /* default max size of queue */ -}; - -struct ipc_status { - int semusz; /* current number of arrays */ - int semaem; /* current semaphores system wide */ -}; - -/* - * The last arg of semctl is a union semun, but where is it defined? X/OPEN - * tells us to define it ourselves, but until recently Linux include files - * would also define it. - */ -#ifndef HAVE_UNION_SEMUN -/* according to X/OPEN we have to define it ourselves */ -union semun { - int val; - struct semid_ds *buf; - unsigned short int *array; - struct seminfo *__buf; -}; -#endif - -static inline int ipc_sem_get_limits(struct ipc_limits *lim) { - static procfile *ff = NULL; - static int error_shown = 0; - static char filename[FILENAME_MAX + 1] = ""; - - if(unlikely(!filename[0])) - snprintfz(filename, FILENAME_MAX, "%s/proc/sys/kernel/sem", netdata_configured_host_prefix); - - if(unlikely(!ff)) { - ff = procfile_open(filename, NULL, PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) { - if(unlikely(!error_shown)) { - error("IPC: Cannot open file '%s'.", filename); - error_shown = 1; - } - goto ipc; - } - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) { - if(unlikely(!error_shown)) { - error("IPC: Cannot read file '%s'.", filename); - error_shown = 1; - } - goto ipc; - } - - if(procfile_lines(ff) >= 1 && procfile_linewords(ff, 0) >= 4) { - lim->semvmx = SEMVMX; - lim->semmsl = str2i(procfile_lineword(ff, 0, 0)); - lim->semmns = str2i(procfile_lineword(ff, 0, 1)); - lim->semopm = str2i(procfile_lineword(ff, 0, 2)); - lim->semmni = str2i(procfile_lineword(ff, 0, 3)); - return 0; - } - else { - if(unlikely(!error_shown)) { - error("IPC: Invalid content in file '%s'.", filename); - error_shown = 1; - } - goto ipc; - } - -ipc: - // cannot do it from the file - // query IPC - { - struct seminfo seminfo = {.semmni = 0}; - union semun arg = {.array = (ushort *) &seminfo}; - - if(unlikely(semctl(0, 0, IPC_INFO, arg) < 0)) { - error("IPC: Failed to read '%s' and request IPC_INFO with semctl().", filename); - goto error; - } - - lim->semvmx = SEMVMX; - lim->semmni = seminfo.semmni; - lim->semmsl = seminfo.semmsl; - lim->semmns = seminfo.semmns; - lim->semopm = seminfo.semopm; - return 0; - } - -error: - lim->semvmx = 0; - lim->semmni = 0; - lim->semmsl = 0; - lim->semmns = 0; - lim->semopm = 0; - return -1; -} - -/* -printf ("------ Semaphore Limits --------\n"); -printf ("max number of arrays = %d\n", limits.semmni); -printf ("max semaphores per array = %d\n", limits.semmsl); -printf ("max semaphores system wide = %d\n", limits.semmns); -printf ("max ops per semop call = %d\n", limits.semopm); -printf ("semaphore max value = %u\n", limits.semvmx); - -printf ("------ Semaphore Status --------\n"); -printf ("used arrays = %d\n", status.semusz); -printf ("allocated semaphores = %d\n", status.semaem); -*/ - -static inline int ipc_sem_get_status(struct ipc_status *st) { - struct seminfo seminfo; - union semun arg; - - arg.array = (ushort *) (void *) &seminfo; - - if(unlikely(semctl (0, 0, SEM_INFO, arg) < 0)) { - /* kernel not configured for semaphores */ - static int error_shown = 0; - if(unlikely(!error_shown)) { - error("IPC: kernel is not configured for semaphores"); - error_shown = 1; - } - st->semusz = 0; - st->semaem = 0; - return -1; - } - - st->semusz = seminfo.semusz; - st->semaem = seminfo.semaem; - return 0; -} - -int do_ipc(int update_every, usec_t dt) { - (void)dt; - - static int initialized = 0, read_limits_next = -1; - static struct ipc_limits limits; - static struct ipc_status status; - static RRDSETVAR *arrays_max = NULL, *semaphores_max = NULL; - static RRDSET *st_semaphores = NULL, *st_arrays = NULL; - static RRDDIM *rd_semaphores = NULL, *rd_arrays = NULL; - - if(unlikely(!initialized)) { - initialized = 1; - - // make sure it works - if(ipc_sem_get_limits(&limits) == -1) { - error("unable to fetch semaphore limits"); - return 1; - } - - // make sure it works - if(ipc_sem_get_status(&status) == -1) { - error("unable to fetch semaphore statistics"); - return 1; - } - - // create the charts - if(unlikely(!st_semaphores)) { - st_semaphores = rrdset_create_localhost( - "system" - , "ipc_semaphores" - , NULL - , "ipc semaphores" - , NULL - , "IPC Semaphores" - , "semaphores" - , "linux" - , "ipc" - , 1000 - , localhost->rrd_update_every - , RRDSET_TYPE_AREA - ); - rd_semaphores = rrddim_add(st_semaphores, "semaphores", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - - if(unlikely(!st_arrays)) { - st_arrays = rrdset_create_localhost( - "system" - , "ipc_semaphore_arrays" - , NULL - , "ipc semaphores" - , NULL - , "IPC Semaphore Arrays" - , "arrays" - , "linux" - , "ipc" - , 1000 - , localhost->rrd_update_every - , RRDSET_TYPE_AREA - ); - rd_arrays = rrddim_add(st_arrays, "arrays", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - - // variables - semaphores_max = rrdsetvar_custom_chart_variable_create(st_semaphores, "ipc.semaphores.max"); - arrays_max = rrdsetvar_custom_chart_variable_create(st_arrays, "ipc.semaphores.arrays.max"); - } - - if(unlikely(read_limits_next < 0)) { - if(unlikely(ipc_sem_get_limits(&limits) == -1)) { - error("Unable to fetch semaphore limits."); - } - else { - if(semaphores_max) rrdsetvar_custom_chart_variable_set(semaphores_max, limits.semmns); - if(arrays_max) rrdsetvar_custom_chart_variable_set(arrays_max, limits.semmni); - - st_arrays->red = limits.semmni; - st_semaphores->red = limits.semmns; - - read_limits_next = 60 / update_every; - } - } - else - read_limits_next--; - - if(unlikely(ipc_sem_get_status(&status) == -1)) { - error("Unable to get semaphore statistics"); - return 0; - } - - if(st_semaphores->counter_done) rrdset_next(st_semaphores); - rrddim_set_by_pointer(st_semaphores, rd_semaphores, status.semaem); - rrdset_done(st_semaphores); - - if(st_arrays->counter_done) rrdset_next(st_arrays); - rrddim_set_by_pointer(st_arrays, rd_arrays, status.semusz); - rrdset_done(st_arrays); - - return 0; -} diff --git a/src/ipc.h b/src/ipc.h deleted file mode 100644 index 04f9df5cd..000000000 --- a/src/ipc.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef NETDATA_PLUGIN_IPC_H -#define NETDATA_PLUGIN_IPC_H 1 - -extern int do_ipc(int update_every, usec_t dt); - -#endif /* NETDATA_PLUGIN_IPC_H */ - diff --git a/src/locks.c b/src/locks.c deleted file mode 100644 index c5b42c921..000000000 --- a/src/locks.c +++ /dev/null @@ -1,319 +0,0 @@ -#include "common.h" - -// ---------------------------------------------------------------------------- -// automatic thread cancelability management, based on locks - -static __thread int netdata_thread_first_cancelability = 0; -static __thread int netdata_thread_lock_cancelability = 0; - -inline void netdata_thread_disable_cancelability(void) { - int old; - int ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old); - if(ret != 0) - error("THREAD_CANCELABILITY: pthread_setcancelstate() on thread %s returned error %d", netdata_thread_tag(), ret); - else { - if(!netdata_thread_lock_cancelability) - netdata_thread_first_cancelability = old; - - netdata_thread_lock_cancelability++; - } -} - -inline void netdata_thread_enable_cancelability(void) { - if(netdata_thread_lock_cancelability < 1) { - error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): invalid thread cancelability count %d on thread %s - results will be undefined - please report this!", netdata_thread_lock_cancelability, netdata_thread_tag()); - } - else if(netdata_thread_lock_cancelability == 1) { - int old = 1; - int ret = pthread_setcancelstate(netdata_thread_first_cancelability, &old); - if(ret != 0) - error("THREAD_CANCELABILITY: pthread_setcancelstate() on thread %s returned error %d", netdata_thread_tag(), ret); - else { - if(old != PTHREAD_CANCEL_DISABLE) - error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): old thread cancelability on thread %s was changed, expected DISABLED (%d), found %s (%d) - please report this!", netdata_thread_tag(), PTHREAD_CANCEL_DISABLE, (old == PTHREAD_CANCEL_ENABLE)?"ENABLED":"UNKNOWN", old); - } - - netdata_thread_lock_cancelability = 0; - } - else - netdata_thread_lock_cancelability--; -} - -// ---------------------------------------------------------------------------- -// mutex - -int __netdata_mutex_init(netdata_mutex_t *mutex) { - int ret = pthread_mutex_init(mutex, NULL); - if(unlikely(ret != 0)) - error("MUTEX_LOCK: failed to initialize (code %d).", ret); - return ret; -} - -int __netdata_mutex_lock(netdata_mutex_t *mutex) { - netdata_thread_disable_cancelability(); - - int ret = pthread_mutex_lock(mutex); - if(unlikely(ret != 0)) { - netdata_thread_enable_cancelability(); - error("MUTEX_LOCK: failed to get lock (code %d)", ret); - } - return ret; -} - -int __netdata_mutex_trylock(netdata_mutex_t *mutex) { - netdata_thread_disable_cancelability(); - - int ret = pthread_mutex_trylock(mutex); - if(ret != 0) - netdata_thread_enable_cancelability(); - - return ret; -} - -int __netdata_mutex_unlock(netdata_mutex_t *mutex) { - int ret = pthread_mutex_unlock(mutex); - if(unlikely(ret != 0)) - error("MUTEX_LOCK: failed to unlock (code %d).", ret); - else - netdata_thread_enable_cancelability(); - - return ret; -} - -int netdata_mutex_init_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) { - usec_t start = 0; - - if(unlikely(debug_flags & D_LOCKS)) { - start = now_boottime_usec(); - debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_init(0x%p) from %lu@%s, %s()", mutex, line, file, function); - } - - int ret = __netdata_mutex_init(mutex); - - debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_init(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function); - - return ret; -} - -int netdata_mutex_lock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) { - usec_t start = 0; - - if(unlikely(debug_flags & D_LOCKS)) { - start = now_boottime_usec(); - debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_lock(0x%p) from %lu@%s, %s()", mutex, line, file, function); - } - - int ret = __netdata_mutex_lock(mutex); - - debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_lock(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function); - - return ret; -} - -int netdata_mutex_trylock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) { - usec_t start = 0; - - if(unlikely(debug_flags & D_LOCKS)) { - start = now_boottime_usec(); - debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_trylock(0x%p) from %lu@%s, %s()", mutex, line, file, function); - } - - int ret = __netdata_mutex_trylock(mutex); - - debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_trylock(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function); - - return ret; -} - -int netdata_mutex_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) { - usec_t start = 0; - - if(unlikely(debug_flags & D_LOCKS)) { - start = now_boottime_usec(); - debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_unlock(0x%p) from %lu@%s, %s()", mutex, line, file, function); - } - - int ret = __netdata_mutex_unlock(mutex); - - debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_unlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function); - - return ret; -} - - -// ---------------------------------------------------------------------------- -// r/w lock - -int __netdata_rwlock_destroy(netdata_rwlock_t *rwlock) { - int ret = pthread_rwlock_destroy(rwlock); - if(unlikely(ret != 0)) - error("RW_LOCK: failed to destroy lock (code %d)", ret); - return ret; -} - -int __netdata_rwlock_init(netdata_rwlock_t *rwlock) { - int ret = pthread_rwlock_init(rwlock, NULL); - if(unlikely(ret != 0)) - error("RW_LOCK: failed to initialize lock (code %d)", ret); - return ret; -} - -int __netdata_rwlock_rdlock(netdata_rwlock_t *rwlock) { - netdata_thread_disable_cancelability(); - - int ret = pthread_rwlock_rdlock(rwlock); - if(unlikely(ret != 0)) { - netdata_thread_enable_cancelability(); - error("RW_LOCK: failed to obtain read lock (code %d)", ret); - } - - return ret; -} - -int __netdata_rwlock_wrlock(netdata_rwlock_t *rwlock) { - netdata_thread_disable_cancelability(); - - int ret = pthread_rwlock_wrlock(rwlock); - if(unlikely(ret != 0)) { - error("RW_LOCK: failed to obtain write lock (code %d)", ret); - netdata_thread_enable_cancelability(); - } - - return ret; -} - -int __netdata_rwlock_unlock(netdata_rwlock_t *rwlock) { - int ret = pthread_rwlock_unlock(rwlock); - if(unlikely(ret != 0)) - error("RW_LOCK: failed to release lock (code %d)", ret); - else - netdata_thread_enable_cancelability(); - - return ret; -} - -int __netdata_rwlock_tryrdlock(netdata_rwlock_t *rwlock) { - netdata_thread_disable_cancelability(); - - int ret = pthread_rwlock_tryrdlock(rwlock); - if(ret != 0) - netdata_thread_enable_cancelability(); - - return ret; -} - -int __netdata_rwlock_trywrlock(netdata_rwlock_t *rwlock) { - netdata_thread_disable_cancelability(); - - int ret = pthread_rwlock_trywrlock(rwlock); - if(ret != 0) - netdata_thread_enable_cancelability(); - - return ret; -} - - -int netdata_rwlock_destroy_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { - usec_t start = 0; - - if(unlikely(debug_flags & D_LOCKS)) { - start = now_boottime_usec(); - debug(D_LOCKS, "RW_LOCK: netdata_rwlock_destroy(0x%p) from %lu@%s, %s()", rwlock, line, file, function); - } - - int ret = __netdata_rwlock_destroy(rwlock); - - debug(D_LOCKS, "RW_LOCK: netdata_rwlock_destroy(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function); - - return ret; -} - -int netdata_rwlock_init_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { - usec_t start = 0; - - if(unlikely(debug_flags & D_LOCKS)) { - start = now_boottime_usec(); - debug(D_LOCKS, "RW_LOCK: netdata_rwlock_init(0x%p) from %lu@%s, %s()", rwlock, line, file, function); - } - - int ret = __netdata_rwlock_init(rwlock); - - debug(D_LOCKS, "RW_LOCK: netdata_rwlock_init(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function); - - return ret; -} - -int netdata_rwlock_rdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { - usec_t start = 0; - - if(unlikely(debug_flags & D_LOCKS)) { - start = now_boottime_usec(); - debug(D_LOCKS, "RW_LOCK: netdata_rwlock_rdlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function); - } - - int ret = __netdata_rwlock_rdlock(rwlock); - - debug(D_LOCKS, "RW_LOCK: netdata_rwlock_rdlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function); - - return ret; -} - -int netdata_rwlock_wrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { - usec_t start = 0; - - if(unlikely(debug_flags & D_LOCKS)) { - start = now_boottime_usec(); - debug(D_LOCKS, "RW_LOCK: netdata_rwlock_wrlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function); - } - - int ret = __netdata_rwlock_wrlock(rwlock); - - debug(D_LOCKS, "RW_LOCK: netdata_rwlock_wrlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function); - - return ret; -} - -int netdata_rwlock_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { - usec_t start = 0; - - if(unlikely(debug_flags & D_LOCKS)) { - start = now_boottime_usec(); - debug(D_LOCKS, "RW_LOCK: netdata_rwlock_unlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function); - } - - int ret = __netdata_rwlock_unlock(rwlock); - - debug(D_LOCKS, "RW_LOCK: netdata_rwlock_unlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function); - - return ret; -} - -int netdata_rwlock_tryrdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { - usec_t start = 0; - - if(unlikely(debug_flags & D_LOCKS)) { - start = now_boottime_usec(); - debug(D_LOCKS, "RW_LOCK: netdata_rwlock_tryrdlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function); - } - - int ret = __netdata_rwlock_tryrdlock(rwlock); - - debug(D_LOCKS, "RW_LOCK: netdata_rwlock_tryrdlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function); - - return ret; -} - -int netdata_rwlock_trywrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) { - usec_t start = 0; - - if(unlikely(debug_flags & D_LOCKS)) { - start = now_boottime_usec(); - debug(D_LOCKS, "RW_LOCK: netdata_rwlock_trywrlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function); - } - - int ret = __netdata_rwlock_trywrlock(rwlock); - - debug(D_LOCKS, "RW_LOCK: netdata_rwlock_trywrlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function); - - return ret; -} diff --git a/src/locks.h b/src/locks.h deleted file mode 100644 index 36962fef2..000000000 --- a/src/locks.h +++ /dev/null @@ -1,71 +0,0 @@ -#ifndef NETDATA_LOCKS_H -#define NETDATA_LOCKS_H - -typedef pthread_mutex_t netdata_mutex_t; -#define NETDATA_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER - -typedef pthread_rwlock_t netdata_rwlock_t; -#define NETDATA_RWLOCK_INITIALIZER PTHREAD_RWLOCK_INITIALIZER - -extern int __netdata_mutex_init(netdata_mutex_t *mutex); -extern int __netdata_mutex_lock(netdata_mutex_t *mutex); -extern int __netdata_mutex_trylock(netdata_mutex_t *mutex); -extern int __netdata_mutex_unlock(netdata_mutex_t *mutex); - -extern int __netdata_rwlock_destroy(netdata_rwlock_t *rwlock); -extern int __netdata_rwlock_init(netdata_rwlock_t *rwlock); -extern int __netdata_rwlock_rdlock(netdata_rwlock_t *rwlock); -extern int __netdata_rwlock_wrlock(netdata_rwlock_t *rwlock); -extern int __netdata_rwlock_unlock(netdata_rwlock_t *rwlock); -extern int __netdata_rwlock_tryrdlock(netdata_rwlock_t *rwlock); -extern int __netdata_rwlock_trywrlock(netdata_rwlock_t *rwlock); - -extern int netdata_mutex_init_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex); -extern int netdata_mutex_lock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex); -extern int netdata_mutex_trylock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex); -extern int netdata_mutex_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex); - -extern int netdata_rwlock_destroy_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock); -extern int netdata_rwlock_init_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock); -extern int netdata_rwlock_rdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock); -extern int netdata_rwlock_wrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock); -extern int netdata_rwlock_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock); -extern int netdata_rwlock_tryrdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock); -extern int netdata_rwlock_trywrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock); - -extern void netdata_thread_disable_cancelability(void); -extern void netdata_thread_enable_cancelability(void); - -#ifdef NETDATA_INTERNAL_CHECKS - -#define netdata_mutex_init(mutex) netdata_mutex_init_debug(__FILE__, __FUNCTION__, __LINE__, mutex) -#define netdata_mutex_lock(mutex) netdata_mutex_lock_debug(__FILE__, __FUNCTION__, __LINE__, mutex) -#define netdata_mutex_trylock(mutex) netdata_mutex_trylock_debug(__FILE__, __FUNCTION__, __LINE__, mutex) -#define netdata_mutex_unlock(mutex) netdata_mutex_unlock_debug(__FILE__, __FUNCTION__, __LINE__, mutex) - -#define netdata_rwlock_destroy(rwlock) netdata_rwlock_destroy_debug(__FILE__, __FUNCTION__, __LINE__, rwlock) -#define netdata_rwlock_init(rwlock) netdata_rwlock_init_debug(__FILE__, __FUNCTION__, __LINE__, rwlock) -#define netdata_rwlock_rdlock(rwlock) netdata_rwlock_rdlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock) -#define netdata_rwlock_wrlock(rwlock) netdata_rwlock_wrlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock) -#define netdata_rwlock_unlock(rwlock) netdata_rwlock_unlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock) -#define netdata_rwlock_tryrdlock(rwlock) netdata_rwlock_tryrdlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock) -#define netdata_rwlock_trywrlock(rwlock) netdata_rwlock_trywrlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock) - -#else // !NETDATA_INTERNAL_CHECKS - -#define netdata_mutex_init(mutex) __netdata_mutex_init(mutex) -#define netdata_mutex_lock(mutex) __netdata_mutex_lock(mutex) -#define netdata_mutex_trylock(mutex) __netdata_mutex_trylock(mutex) -#define netdata_mutex_unlock(mutex) __netdata_mutex_unlock(mutex) - -#define netdata_rwlock_destroy(rwlock) __netdata_rwlock_destroy(rwlock) -#define netdata_rwlock_init(rwlock) __netdata_rwlock_init(rwlock) -#define netdata_rwlock_rdlock(rwlock) __netdata_rwlock_rdlock(rwlock) -#define netdata_rwlock_wrlock(rwlock) __netdata_rwlock_wrlock(rwlock) -#define netdata_rwlock_unlock(rwlock) __netdata_rwlock_unlock(rwlock) -#define netdata_rwlock_tryrdlock(rwlock) __netdata_rwlock_tryrdlock(rwlock) -#define netdata_rwlock_trywrlock(rwlock) __netdata_rwlock_trywrlock(rwlock) - -#endif // NETDATA_INTERNAL_CHECKS - -#endif //NETDATA_LOCKS_H diff --git a/src/log.c b/src/log.c deleted file mode 100644 index edb4482a0..000000000 --- a/src/log.c +++ /dev/null @@ -1,427 +0,0 @@ -#include "common.h" - -int web_server_is_multithreaded = 1; - -const char *program_name = ""; -uint64_t debug_flags = DEBUG; - -int access_log_syslog = 1; -int error_log_syslog = 1; -int output_log_syslog = 1; // debug log - -int stdaccess_fd = -1; -FILE *stdaccess = NULL; - -const char *stdaccess_filename = NULL; -const char *stderr_filename = NULL; -const char *stdout_filename = NULL; - -void syslog_init(void) { - static int i = 0; - - if(!i) { - openlog(program_name, LOG_PID, LOG_DAEMON); - i = 1; - } -} - -#define LOG_DATE_LENGTH 26 - -static inline void log_date(char *buffer, size_t len) { - if(unlikely(!buffer || !len)) - return; - - time_t t; - struct tm *tmp, tmbuf; - - t = now_realtime_sec(); - tmp = localtime_r(&t, &tmbuf); - - if (tmp == NULL) { - buffer[0] = '\0'; - return; - } - - if (unlikely(strftime(buffer, len, "%Y-%m-%d %H:%M:%S", tmp) == 0)) - buffer[0] = '\0'; - - buffer[len - 1] = '\0'; -} - -static netdata_mutex_t log_mutex = NETDATA_MUTEX_INITIALIZER; -static inline void log_lock() { - netdata_mutex_lock(&log_mutex); -} -static inline void log_unlock() { - netdata_mutex_unlock(&log_mutex); -} - -int open_log_file(int fd, FILE **fp, const char *filename, int *enabled_syslog) { - int f, devnull = 0; - - if(!filename || !*filename || !strcmp(filename, "none") || !strcmp(filename, "/dev/null")) { - filename = "/dev/null"; - devnull = 1; - } - - if(!strcmp(filename, "syslog")) { - filename = "/dev/null"; - devnull = 1; - syslog_init(); - if(enabled_syslog) *enabled_syslog = 1; - } - else if(enabled_syslog) *enabled_syslog = 0; - - // don't do anything if the user is willing - // to have the standard one - if(!strcmp(filename, "system")) { - if(fd != -1 && fp != &stdaccess) - return fd; - - filename = "stderr"; - } - - if(!strcmp(filename, "stdout")) - f = STDOUT_FILENO; - - else if(!strcmp(filename, "stderr")) - f = STDERR_FILENO; - - else { - f = open(filename, O_WRONLY | O_APPEND | O_CREAT, 0664); - if(f == -1) { - error("Cannot open file '%s'. Leaving %d to its default.", filename, fd); - return fd; - } - } - - if(devnull && fp == &stdaccess) { - fd = -1; - *fp = NULL; - } - - // if there is a level-2 file pointer - // flush it before switching the level-1 fds - if(fp && *fp) - fflush(*fp); - - if(fd != f && fd != -1) { - // it automatically closes - int t = dup2(f, fd); - if (t == -1) { - error("Cannot dup2() new fd %d to old fd %d for '%s'", f, fd, filename); - close(f); - return fd; - } - // info("dup2() new fd %d to old fd %d for '%s'", f, fd, filename); - close(f); - } - else fd = f; - - if(fp && !*fp) { - *fp = fdopen(fd, "a"); - if (!*fp) - error("Cannot fdopen() fd %d ('%s')", fd, filename); - else { - if (setvbuf(*fp, NULL, _IOLBF, 0) != 0) - error("Cannot set line buffering on fd %d ('%s')", fd, filename); - } - } - - return fd; -} - -void reopen_all_log_files() { - if(stdout_filename) - open_log_file(STDOUT_FILENO, (FILE **)&stdout, stdout_filename, &output_log_syslog); - - if(stderr_filename) - open_log_file(STDERR_FILENO, (FILE **)&stderr, stderr_filename, &error_log_syslog); - - if(stdaccess_filename) - stdaccess_fd = open_log_file(stdaccess_fd, (FILE **)&stdaccess, stdaccess_filename, &access_log_syslog); -} - -void open_all_log_files() { - // disable stdin - open_log_file(STDIN_FILENO, (FILE **)&stdin, "/dev/null", NULL); - - open_log_file(STDOUT_FILENO, (FILE **)&stdout, stdout_filename, &output_log_syslog); - open_log_file(STDERR_FILENO, (FILE **)&stderr, stderr_filename, &error_log_syslog); - stdaccess_fd = open_log_file(stdaccess_fd, (FILE **)&stdaccess, stdaccess_filename, &access_log_syslog); -} - -// ---------------------------------------------------------------------------- -// error log throttling - -time_t error_log_throttle_period_backup = 0; -time_t error_log_throttle_period = 1200; -unsigned long error_log_errors_per_period = 200; - -int error_log_limit(int reset) { - static time_t start = 0; - static unsigned long counter = 0, prevented = 0; - - // do not throttle if the period is 0 - if(error_log_throttle_period == 0) - return 0; - - // prevent all logs if the errors per period is 0 - if(error_log_errors_per_period == 0) -#ifdef NETDATA_INTERNAL_CHECKS - return 0; -#else - return 1; -#endif - - time_t now = now_monotonic_sec(); - if(!start) start = now; - - if(reset) { - if(prevented) { - char date[LOG_DATE_LENGTH]; - log_date(date, LOG_DATE_LENGTH); - fprintf(stderr, "%s: %s Resetting logging for process '%s' (prevented %lu logs in the last %ld seconds).\n" - , date - , program_name - , program_name - , prevented - , now - start - ); - } - - start = now; - counter = 0; - prevented = 0; - } - - // detect if we log too much - counter++; - - if(now - start > error_log_throttle_period) { - if(prevented) { - char date[LOG_DATE_LENGTH]; - log_date(date, LOG_DATE_LENGTH); - fprintf(stderr, "%s: %s Resuming logging from process '%s' (prevented %lu logs in the last %ld seconds).\n" - , date - , program_name - , program_name - , prevented - , error_log_throttle_period - ); - } - - // restart the period accounting - start = now; - counter = 1; - prevented = 0; - - // log this error - return 0; - } - - if(counter > error_log_errors_per_period) { - if(!prevented) { - char date[LOG_DATE_LENGTH]; - log_date(date, LOG_DATE_LENGTH); - fprintf(stderr, "%s: %s Too many logs (%lu logs in %ld seconds, threshold is set to %lu logs in %ld seconds). Preventing more logs from process '%s' for %ld seconds.\n" - , date - , program_name - , counter - , now - start - , error_log_errors_per_period - , error_log_throttle_period - , program_name - , start + error_log_throttle_period - now); - } - - prevented++; - - // prevent logging this error -#ifdef NETDATA_INTERNAL_CHECKS - return 0; -#else - return 1; -#endif - } - - return 0; -} - -// ---------------------------------------------------------------------------- -// debug log - -void debug_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) { - va_list args; - - char date[LOG_DATE_LENGTH]; - log_date(date, LOG_DATE_LENGTH); - - va_start( args, fmt ); - printf("%s: %s DEBUG : %s : (%04lu@%-10.10s:%-15.15s): ", date, program_name, netdata_thread_tag(), line, file, function); - vprintf(fmt, args); - va_end( args ); - putchar('\n'); - - if(output_log_syslog) { - va_start( args, fmt ); - vsyslog(LOG_ERR, fmt, args ); - va_end( args ); - } - - fflush(stdout); -} - -// ---------------------------------------------------------------------------- -// info log - -void info_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) -{ - va_list args; - - // prevent logging too much - if(error_log_limit(0)) return; - - if(error_log_syslog) { - va_start( args, fmt ); - vsyslog(LOG_INFO, fmt, args ); - va_end( args ); - } - - char date[LOG_DATE_LENGTH]; - log_date(date, LOG_DATE_LENGTH); - - log_lock(); - - va_start( args, fmt ); - if(debug_flags) fprintf(stderr, "%s: %s INFO : %s : (%04lu@%-10.10s:%-15.15s): ", date, program_name, netdata_thread_tag(), line, file, function); - else fprintf(stderr, "%s: %s INFO : %s : ", date, program_name, netdata_thread_tag()); - vfprintf( stderr, fmt, args ); - va_end( args ); - - fputc('\n', stderr); - - log_unlock(); -} - -// ---------------------------------------------------------------------------- -// error log - -#if defined(STRERROR_R_CHAR_P) -// GLIBC version of strerror_r -static const char *strerror_result(const char *a, const char *b) { (void)b; return a; } -#elif defined(HAVE_STRERROR_R) -// POSIX version of strerror_r -static const char *strerror_result(int a, const char *b) { (void)a; return b; } -#elif defined(HAVE_C__GENERIC) - -// what a trick! -// http://stackoverflow.com/questions/479207/function-overloading-in-c -static const char *strerror_result_int(int a, const char *b) { (void)a; return b; } -static const char *strerror_result_string(const char *a, const char *b) { (void)b; return a; } - -#define strerror_result(a, b) _Generic((a), \ - int: strerror_result_int, \ - char *: strerror_result_string \ - )(a, b) - -#else -#error "cannot detect the format of function strerror_r()" -#endif - -void error_int( const char *prefix, const char *file, const char *function, const unsigned long line, const char *fmt, ... ) { - // save a copy of errno - just in case this function generates a new error - int __errno = errno; - - va_list args; - - // prevent logging too much - if(error_log_limit(0)) return; - - if(error_log_syslog) { - va_start( args, fmt ); - vsyslog(LOG_ERR, fmt, args ); - va_end( args ); - } - - char date[LOG_DATE_LENGTH]; - log_date(date, LOG_DATE_LENGTH); - - log_lock(); - - va_start( args, fmt ); - if(debug_flags) fprintf(stderr, "%s: %s %-5.5s : %s : (%04lu@%-10.10s:%-15.15s): ", date, program_name, prefix, netdata_thread_tag(), line, file, function); - else fprintf(stderr, "%s: %s %-5.5s : %s : ", date, program_name, prefix, netdata_thread_tag()); - vfprintf( stderr, fmt, args ); - va_end( args ); - - if(__errno) { - char buf[1024]; - fprintf(stderr, " (errno %d, %s)\n", __errno, strerror_result(strerror_r(__errno, buf, 1023), buf)); - errno = 0; - } - else - fputc('\n', stderr); - - log_unlock(); -} - -void fatal_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) { - va_list args; - - if(error_log_syslog) { - va_start( args, fmt ); - vsyslog(LOG_CRIT, fmt, args ); - va_end( args ); - } - - char date[LOG_DATE_LENGTH]; - log_date(date, LOG_DATE_LENGTH); - - log_lock(); - - va_start( args, fmt ); - if(debug_flags) fprintf(stderr, "%s: %s FATAL : %s : (%04lu@%-10.10s:%-15.15s): ", date, program_name, netdata_thread_tag(), line, file, function); - else fprintf(stderr, "%s: %s FATAL : %s :", date, program_name, netdata_thread_tag()); - vfprintf( stderr, fmt, args ); - va_end( args ); - - perror(" # "); - fputc('\n', stderr); - - log_unlock(); - - netdata_cleanup_and_exit(1); -} - -// ---------------------------------------------------------------------------- -// access log - -void log_access( const char *fmt, ... ) { - va_list args; - - if(access_log_syslog) { - va_start( args, fmt ); - vsyslog(LOG_INFO, fmt, args ); - va_end( args ); - } - - if(stdaccess) { - static netdata_mutex_t access_mutex = NETDATA_MUTEX_INITIALIZER; - - if(web_server_is_multithreaded) - netdata_mutex_lock(&access_mutex); - - char date[LOG_DATE_LENGTH]; - log_date(date, LOG_DATE_LENGTH); - fprintf(stdaccess, "%s: ", date); - - va_start( args, fmt ); - vfprintf( stdaccess, fmt, args ); - va_end( args ); - fputc('\n', stdaccess); - - if(web_server_is_multithreaded) - netdata_mutex_unlock(&access_mutex); - } -} diff --git a/src/log.h b/src/log.h deleted file mode 100644 index 81c85481b..000000000 --- a/src/log.h +++ /dev/null @@ -1,87 +0,0 @@ -#ifndef NETDATA_LOG_H -#define NETDATA_LOG_H 1 - -#define D_WEB_BUFFER 0x0000000000000001 -#define D_WEB_CLIENT 0x0000000000000002 -#define D_LISTENER 0x0000000000000004 -#define D_WEB_DATA 0x0000000000000008 -#define D_OPTIONS 0x0000000000000010 -#define D_PROCNETDEV_LOOP 0x0000000000000020 -#define D_RRD_STATS 0x0000000000000040 -#define D_WEB_CLIENT_ACCESS 0x0000000000000080 -#define D_TC_LOOP 0x0000000000000100 -#define D_DEFLATE 0x0000000000000200 -#define D_CONFIG 0x0000000000000400 -#define D_PLUGINSD 0x0000000000000800 -#define D_CHILDS 0x0000000000001000 -#define D_EXIT 0x0000000000002000 -#define D_CHECKS 0x0000000000004000 -#define D_NFACCT_LOOP 0x0000000000008000 -#define D_PROCFILE 0x0000000000010000 -#define D_RRD_CALLS 0x0000000000020000 -#define D_DICTIONARY 0x0000000000040000 -#define D_MEMORY 0x0000000000080000 -#define D_CGROUP 0x0000000000100000 -#define D_REGISTRY 0x0000000000200000 -#define D_VARIABLES 0x0000000000400000 -#define D_HEALTH 0x0000000000800000 -#define D_CONNECT_TO 0x0000000001000000 -#define D_RRDHOST 0x0000000002000000 -#define D_LOCKS 0x0000000004000000 -#define D_BACKEND 0x0000000008000000 -#define D_STATSD 0x0000000010000000 -#define D_POLLFD 0x0000000020000000 -#define D_STREAM 0x0000000040000000 -#define D_SYSTEM 0x8000000000000000 - -//#define DEBUG (D_WEB_CLIENT_ACCESS|D_LISTENER|D_RRD_STATS) -//#define DEBUG 0xffffffff -#define DEBUG (0) - -extern int web_server_is_multithreaded; - -extern uint64_t debug_flags; - -extern const char *program_name; - -extern int stdaccess_fd; -extern FILE *stdaccess; - -extern const char *stdaccess_filename; -extern const char *stderr_filename; -extern const char *stdout_filename; - -extern int access_log_syslog; -extern int error_log_syslog; -extern int output_log_syslog; - -extern time_t error_log_throttle_period, error_log_throttle_period_backup; -extern unsigned long error_log_errors_per_period; -extern int error_log_limit(int reset); - -extern void open_all_log_files(); -extern void reopen_all_log_files(); - -static inline void debug_dummy(void) {} - -#define error_log_limit_reset() do { error_log_throttle_period = error_log_throttle_period_backup; error_log_limit(1); } while(0) -#define error_log_limit_unlimited() do { error_log_throttle_period = 0; } while(0) - -#ifdef NETDATA_INTERNAL_CHECKS -#define debug(type, args...) do { if(unlikely(debug_flags & type)) debug_int(__FILE__, __FUNCTION__, __LINE__, ##args); } while(0) -#else -#define debug(type, args...) debug_dummy() -#endif - -#define info(args...) info_int(__FILE__, __FUNCTION__, __LINE__, ##args) -#define infoerr(args...) error_int("INFO", __FILE__, __FUNCTION__, __LINE__, ##args) -#define error(args...) error_int("ERROR", __FILE__, __FUNCTION__, __LINE__, ##args) -#define fatal(args...) fatal_int(__FILE__, __FUNCTION__, __LINE__, ##args) - -extern void debug_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(4, 5); -extern void info_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(4, 5); -extern void error_int( const char *prefix, const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(5, 6); -extern void fatal_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) NORETURN PRINTFLIKE(4, 5); -extern void log_access( const char *fmt, ... ) PRINTFLIKE(1, 2); - -#endif /* NETDATA_LOG_H */ diff --git a/src/macos_fw.c b/src/macos_fw.c deleted file mode 100644 index 5e8ce0ee4..000000000 --- a/src/macos_fw.c +++ /dev/null @@ -1,684 +0,0 @@ -#include "common.h" -#include <CoreFoundation/CoreFoundation.h> -#include <IOKit/IOKitLib.h> -#include <IOKit/storage/IOBlockStorageDriver.h> -#include <IOKit/IOBSD.h> -// NEEDED BY do_space, do_inodes -#include <sys/mount.h> -// NEEDED BY: struct ifaddrs, getifaddrs() -#include <net/if.h> -#include <ifaddrs.h> - -// NEEDED BY: do_bandwidth -#define IFA_DATA(s) (((struct if_data *)ifa->ifa_data)->ifi_ ## s) - -#define MAXDRIVENAME 31 - -#define KILO_FACTOR 1024 -#define MEGA_FACTOR 1048576 // 1024 * 1024 -#define GIGA_FACTOR 1073741824 // 1024 * 1024 * 1024 - -int do_macos_iokit(int update_every, usec_t dt) { - (void)dt; - - static int do_io = -1, do_space = -1, do_inodes = -1, do_bandwidth = -1; - - if (unlikely(do_io == -1)) { - do_io = config_get_boolean("plugin:macos:iokit", "disk i/o", 1); - do_space = config_get_boolean("plugin:macos:sysctl", "space usage for all disks", 1); - do_inodes = config_get_boolean("plugin:macos:sysctl", "inodes usage for all disks", 1); - do_bandwidth = config_get_boolean("plugin:macos:sysctl", "bandwidth", 1); - } - - RRDSET *st; - - mach_port_t master_port; - io_registry_entry_t drive, drive_media; - io_iterator_t drive_list; - CFDictionaryRef properties, statistics; - CFStringRef name; - CFNumberRef number; - kern_return_t status; - collected_number total_disk_reads = 0; - collected_number total_disk_writes = 0; - struct diskstat { - char name[MAXDRIVENAME]; - collected_number bytes_read; - collected_number bytes_write; - collected_number reads; - collected_number writes; - collected_number time_read; - collected_number time_write; - collected_number latency_read; - collected_number latency_write; - } diskstat; - struct cur_diskstat { - collected_number duration_read_ns; - collected_number duration_write_ns; - collected_number busy_time_ns; - } cur_diskstat; - struct prev_diskstat { - collected_number bytes_read; - collected_number bytes_write; - collected_number operations_read; - collected_number operations_write; - collected_number duration_read_ns; - collected_number duration_write_ns; - collected_number busy_time_ns; - } prev_diskstat; - - // NEEDED BY: do_space, do_inodes - struct statfs *mntbuf; - int mntsize, i; - char mntonname[MNAMELEN + 1]; - char title[4096 + 1]; - - // NEEDED BY: do_bandwidth - struct ifaddrs *ifa, *ifap; - - /* Get ports and services for drive statistics. */ - if (unlikely(IOMasterPort(bootstrap_port, &master_port))) { - error("MACOS: IOMasterPort() failed"); - do_io = 0; - error("DISABLED: system.io"); - /* Get the list of all drive objects. */ - } else if (unlikely(IOServiceGetMatchingServices(master_port, IOServiceMatching("IOBlockStorageDriver"), &drive_list))) { - error("MACOS: IOServiceGetMatchingServices() failed"); - do_io = 0; - error("DISABLED: system.io"); - } else { - while ((drive = IOIteratorNext(drive_list)) != 0) { - properties = 0; - statistics = 0; - number = 0; - bzero(&diskstat, sizeof(diskstat)); - - /* Get drive media object. */ - status = IORegistryEntryGetChildEntry(drive, kIOServicePlane, &drive_media); - if (unlikely(status != KERN_SUCCESS)) { - IOObjectRelease(drive); - continue; - } - - /* Get drive media properties. */ - if (likely(!IORegistryEntryCreateCFProperties(drive_media, (CFMutableDictionaryRef *)&properties, kCFAllocatorDefault, 0))) { - /* Get disk name. */ - if (likely(name = (CFStringRef)CFDictionaryGetValue(properties, CFSTR(kIOBSDNameKey)))) { - CFStringGetCString(name, diskstat.name, MAXDRIVENAME, kCFStringEncodingUTF8); - } - } - - /* Release. */ - CFRelease(properties); - IOObjectRelease(drive_media); - - if(unlikely(!diskstat.name || !*diskstat.name)) { - IOObjectRelease(drive); - continue; - } - - /* Obtain the properties for this drive object. */ - if (unlikely(IORegistryEntryCreateCFProperties(drive, (CFMutableDictionaryRef *)&properties, kCFAllocatorDefault, 0))) { - IOObjectRelease(drive); - error("MACOS: IORegistryEntryCreateCFProperties() failed"); - do_io = 0; - error("DISABLED: system.io"); - break; - } else if (likely(properties)) { - /* Obtain the statistics from the drive properties. */ - if (likely(statistics = (CFDictionaryRef)CFDictionaryGetValue(properties, CFSTR(kIOBlockStorageDriverStatisticsKey)))) { - - // -------------------------------------------------------------------- - - /* Get bytes read. */ - if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsBytesReadKey)))) { - CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.bytes_read); - total_disk_reads += diskstat.bytes_read; - } - - /* Get bytes written. */ - if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsBytesWrittenKey)))) { - CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.bytes_write); - total_disk_writes += diskstat.bytes_write; - } - - st = rrdset_find_bytype_localhost("disk", diskstat.name); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "disk" - , diskstat.name - , NULL - , diskstat.name - , "disk.io" - , "Disk I/O Bandwidth" - , "kilobytes/s" - , "macos" - , "iokit" - , 2000 - , update_every - , RRDSET_TYPE_AREA - ); - - rrddim_add(st, "reads", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "writes", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - prev_diskstat.bytes_read = rrddim_set(st, "reads", diskstat.bytes_read); - prev_diskstat.bytes_write = rrddim_set(st, "writes", diskstat.bytes_write); - rrdset_done(st); - - // -------------------------------------------------------------------- - - /* Get number of reads. */ - if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsReadsKey)))) { - CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.reads); - } - - /* Get number of writes. */ - if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsWritesKey)))) { - CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.writes); - } - - st = rrdset_find_bytype_localhost("disk_ops", diskstat.name); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "disk_ops" - , diskstat.name - , NULL - , diskstat.name - , "disk.ops" - , "Disk Completed I/O Operations" - , "operations/s" - , "macos" - , "iokit" - , 2001 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - prev_diskstat.operations_read = rrddim_set(st, "reads", diskstat.reads); - prev_diskstat.operations_write = rrddim_set(st, "writes", diskstat.writes); - rrdset_done(st); - - // -------------------------------------------------------------------- - - /* Get reads time. */ - if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsTotalReadTimeKey)))) { - CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.time_read); - } - - /* Get writes time. */ - if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsTotalWriteTimeKey)))) { - CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.time_write); - } - - st = rrdset_find_bytype_localhost("disk_util", diskstat.name); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "disk_util" - , diskstat.name - , NULL - , diskstat.name - , "disk.util" - , "Disk Utilization Time" - , "% of time working" - , "macos" - , "iokit" - , 2004 - , update_every - , RRDSET_TYPE_AREA - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "utilization", NULL, 1, 10000000, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - cur_diskstat.busy_time_ns = (diskstat.time_read + diskstat.time_write); - prev_diskstat.busy_time_ns = rrddim_set(st, "utilization", cur_diskstat.busy_time_ns); - rrdset_done(st); - - // -------------------------------------------------------------------- - - /* Get reads latency. */ - if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsLatentReadTimeKey)))) { - CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.latency_read); - } - - /* Get writes latency. */ - if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsLatentWriteTimeKey)))) { - CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.latency_write); - } - - st = rrdset_find_bytype_localhost("disk_iotime", diskstat.name); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "disk_iotime" - , diskstat.name - , NULL - , diskstat.name - , "disk.iotime" - , "Disk Total I/O Time" - , "milliseconds/s" - , "macos" - , "iokit" - , 2022 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "reads", NULL, 1, 1000000, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "writes", NULL, -1, 1000000, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - cur_diskstat.duration_read_ns = diskstat.time_read + diskstat.latency_read; - cur_diskstat.duration_write_ns = diskstat.time_write + diskstat.latency_write; - prev_diskstat.duration_read_ns = rrddim_set(st, "reads", cur_diskstat.duration_read_ns); - prev_diskstat.duration_write_ns = rrddim_set(st, "writes", cur_diskstat.duration_write_ns); - rrdset_done(st); - - // -------------------------------------------------------------------- - // calculate differential charts - // only if this is not the first time we run - - if (likely(dt)) { - - // -------------------------------------------------------------------- - - st = rrdset_find_bytype_localhost("disk_await", diskstat.name); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "disk_await" - , diskstat.name - , NULL - , diskstat.name - , "disk.await" - , "Average Completed I/O Operation Time" - , "ms per operation" - , "macos" - , "iokit" - , 2005 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "reads", NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(st, "writes", NULL, -1, 1000000, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set(st, "reads", (diskstat.reads - prev_diskstat.operations_read) ? - (cur_diskstat.duration_read_ns - prev_diskstat.duration_read_ns) / (diskstat.reads - prev_diskstat.operations_read) : 0); - rrddim_set(st, "writes", (diskstat.writes - prev_diskstat.operations_write) ? - (cur_diskstat.duration_write_ns - prev_diskstat.duration_write_ns) / (diskstat.writes - prev_diskstat.operations_write) : 0); - rrdset_done(st); - - // -------------------------------------------------------------------- - - st = rrdset_find_bytype_localhost("disk_avgsz", diskstat.name); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "disk_avgsz" - , diskstat.name - , NULL - , diskstat.name - , "disk.avgsz" - , "Average Completed I/O Operation Bandwidth" - , "kilobytes per operation" - , "macos" - , "iokit" - , 2006 - , update_every - , RRDSET_TYPE_AREA - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "reads", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(st, "writes", NULL, -1, 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set(st, "reads", (diskstat.reads - prev_diskstat.operations_read) ? - (diskstat.bytes_read - prev_diskstat.bytes_read) / (diskstat.reads - prev_diskstat.operations_read) : 0); - rrddim_set(st, "writes", (diskstat.writes - prev_diskstat.operations_write) ? - (diskstat.bytes_write - prev_diskstat.bytes_write) / (diskstat.writes - prev_diskstat.operations_write) : 0); - rrdset_done(st); - - // -------------------------------------------------------------------- - - st = rrdset_find_bytype_localhost("disk_svctm", diskstat.name); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "disk_svctm" - , diskstat.name - , NULL - , diskstat.name - , "disk.svctm" - , "Average Service Time" - , "ms per operation" - , "macos" - , "iokit" - , 2007 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "svctm", NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set(st, "svctm", ((diskstat.reads - prev_diskstat.operations_read) + (diskstat.writes - prev_diskstat.operations_write)) ? - (cur_diskstat.busy_time_ns - prev_diskstat.busy_time_ns) / ((diskstat.reads - prev_diskstat.operations_read) + (diskstat.writes - prev_diskstat.operations_write)) : 0); - rrdset_done(st); - } - } - - /* Release. */ - CFRelease(properties); - } - - /* Release. */ - IOObjectRelease(drive); - } - IOIteratorReset(drive_list); - - /* Release. */ - IOObjectRelease(drive_list); - } - - if (likely(do_io)) { - st = rrdset_find_bytype_localhost("system", "io"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "system" - , "io" - , NULL - , "disk" - , NULL - , "Disk I/O" - , "kilobytes/s" - , "macos" - , "iokit" - , 150 - , update_every - , RRDSET_TYPE_AREA - ); - rrddim_add(st, "in", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "out", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set(st, "in", total_disk_reads); - rrddim_set(st, "out", total_disk_writes); - rrdset_done(st); - } - - // Can be merged with FreeBSD plugin - // -------------------------------------------------------------------------- - - if (likely(do_space || do_inodes)) { - // there is no mount info in sysctl MIBs - if (unlikely(!(mntsize = getmntinfo(&mntbuf, MNT_NOWAIT)))) { - error("MACOS: getmntinfo() failed"); - do_space = 0; - error("DISABLED: disk_space.X"); - do_inodes = 0; - error("DISABLED: disk_inodes.X"); - } else { - for (i = 0; i < mntsize; i++) { - if (mntbuf[i].f_flags == MNT_RDONLY || - mntbuf[i].f_blocks == 0 || - // taken from gnulib/mountlist.c and shortened to FreeBSD related fstypes - strcmp(mntbuf[i].f_fstypename, "autofs") == 0 || - strcmp(mntbuf[i].f_fstypename, "procfs") == 0 || - strcmp(mntbuf[i].f_fstypename, "subfs") == 0 || - strcmp(mntbuf[i].f_fstypename, "devfs") == 0 || - strcmp(mntbuf[i].f_fstypename, "none") == 0) - continue; - - // -------------------------------------------------------------------------- - - if (likely(do_space)) { - st = rrdset_find_bytype_localhost("disk_space", mntbuf[i].f_mntonname); - if (unlikely(!st)) { - snprintfz(title, 4096, "Disk Space Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname); - st = rrdset_create_localhost( - "disk_space" - , mntbuf[i].f_mntonname - , NULL - , mntbuf[i].f_mntonname - , "disk.space" - , title - , "GB" - , "macos" - , "iokit" - , 2023 - , update_every - , RRDSET_TYPE_STACKED - ); - - rrddim_add(st, "avail", NULL, mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(st, "used", NULL, mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(st, "reserved_for_root", "reserved for root", mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); - } else - rrdset_next(st); - - rrddim_set(st, "avail", (collected_number) mntbuf[i].f_bavail); - rrddim_set(st, "used", (collected_number) (mntbuf[i].f_blocks - mntbuf[i].f_bfree)); - rrddim_set(st, "reserved_for_root", (collected_number) (mntbuf[i].f_bfree - mntbuf[i].f_bavail)); - rrdset_done(st); - } - - // -------------------------------------------------------------------------- - - if (likely(do_inodes)) { - st = rrdset_find_bytype_localhost("disk_inodes", mntbuf[i].f_mntonname); - if (unlikely(!st)) { - snprintfz(title, 4096, "Disk Files (inodes) Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname); - st = rrdset_create_localhost( - "disk_inodes" - , mntbuf[i].f_mntonname - , NULL - , mntbuf[i].f_mntonname - , "disk.inodes" - , title - , "Inodes" - , "macos" - , "iokit" - , 2024 - , update_every - , RRDSET_TYPE_STACKED - ); - - rrddim_add(st, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(st, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(st, "reserved_for_root", "reserved for root", 1, 1, RRD_ALGORITHM_ABSOLUTE); - } else - rrdset_next(st); - - rrddim_set(st, "avail", (collected_number) mntbuf[i].f_ffree); - rrddim_set(st, "used", (collected_number) (mntbuf[i].f_files - mntbuf[i].f_ffree)); - rrdset_done(st); - } - } - } - } - - // Can be merged with FreeBSD plugin - // -------------------------------------------------------------------- - - if (likely(do_bandwidth)) { - if (unlikely(getifaddrs(&ifap))) { - error("MACOS: getifaddrs()"); - do_bandwidth = 0; - error("DISABLED: system.ipv4"); - } else { - for (ifa = ifap; ifa; ifa = ifa->ifa_next) { - if (ifa->ifa_addr->sa_family != AF_LINK) - continue; - - // -------------------------------------------------------------------- - - st = rrdset_find_bytype_localhost("net", ifa->ifa_name); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "net" - , ifa->ifa_name - , NULL - , ifa->ifa_name - , "net.net" - , "Bandwidth" - , "kilobits/s" - , "macos" - , "iokit" - , 7000 - , update_every - , RRDSET_TYPE_AREA - ); - - rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set(st, "received", IFA_DATA(ibytes)); - rrddim_set(st, "sent", IFA_DATA(obytes)); - rrdset_done(st); - - // -------------------------------------------------------------------- - - st = rrdset_find_bytype_localhost("net_packets", ifa->ifa_name); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "net_packets" - , ifa->ifa_name - , NULL - , ifa->ifa_name - , "net.packets" - , "Packets" - , "packets/s" - , "macos" - , "iokit" - , 7001 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "multicast_received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "multicast_sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set(st, "received", IFA_DATA(ipackets)); - rrddim_set(st, "sent", IFA_DATA(opackets)); - rrddim_set(st, "multicast_received", IFA_DATA(imcasts)); - rrddim_set(st, "multicast_sent", IFA_DATA(omcasts)); - rrdset_done(st); - - // -------------------------------------------------------------------- - - st = rrdset_find_bytype_localhost("net_errors", ifa->ifa_name); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "net_errors" - , ifa->ifa_name - , NULL - , ifa->ifa_name - , "net.errors" - , "Interface Errors" - , "errors/s" - , "macos" - , "iokit" - , 7002 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set(st, "inbound", IFA_DATA(ierrors)); - rrddim_set(st, "outbound", IFA_DATA(oerrors)); - rrdset_done(st); - - // -------------------------------------------------------------------- - - st = rrdset_find_bytype_localhost("net_drops", ifa->ifa_name); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "net_drops" - , ifa->ifa_name - , NULL - , ifa->ifa_name - , "net.drops" - , "Interface Drops" - , "drops/s" - , "macos" - , "iokit" - , 7003 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set(st, "inbound", IFA_DATA(iqdrops)); - rrdset_done(st); - - // -------------------------------------------------------------------- - - st = rrdset_find_bytype_localhost("net_events", ifa->ifa_name); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "net_events" - , ifa->ifa_name - , NULL - , ifa->ifa_name - , "net.events" - , "Network Interface Events" - , "events/s" - , "macos" - , "iokit" - , 7006 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "frames", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "carrier", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set(st, "collisions", IFA_DATA(collisions)); - rrdset_done(st); - } - - freeifaddrs(ifap); - } - } - - - return 0; -} diff --git a/src/macos_mach_smi.c b/src/macos_mach_smi.c deleted file mode 100644 index 47d32a9f7..000000000 --- a/src/macos_mach_smi.c +++ /dev/null @@ -1,238 +0,0 @@ -#include "common.h" -#include <mach/mach.h> - -int do_macos_mach_smi(int update_every, usec_t dt) { - (void)dt; - - static int do_cpu = -1, do_ram = - 1, do_swapio = -1, do_pgfaults = -1; - - if (unlikely(do_cpu == -1)) { - do_cpu = config_get_boolean("plugin:macos:mach_smi", "cpu utilization", 1); - do_ram = config_get_boolean("plugin:macos:mach_smi", "system ram", 1); - do_swapio = config_get_boolean("plugin:macos:mach_smi", "swap i/o", 1); - do_pgfaults = config_get_boolean("plugin:macos:mach_smi", "memory page faults", 1); - } - - RRDSET *st; - - kern_return_t kr; - mach_msg_type_number_t count; - host_t host; - vm_size_t system_pagesize; - - - // NEEDED BY: do_cpu - natural_t cp_time[CPU_STATE_MAX]; - - // NEEDED BY: do_ram, do_swapio, do_pgfaults -#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1060) - vm_statistics64_data_t vm_statistics; -#else - vm_statistics_data_t vm_statistics; -#endif - - host = mach_host_self(); - kr = host_page_size(host, &system_pagesize); - if (unlikely(kr != KERN_SUCCESS)) - return -1; - - // -------------------------------------------------------------------- - - if (likely(do_cpu)) { - if (unlikely(HOST_CPU_LOAD_INFO_COUNT != 4)) { - error("MACOS: There are %d CPU states (4 was expected)", HOST_CPU_LOAD_INFO_COUNT); - do_cpu = 0; - error("DISABLED: system.cpu"); - } else { - count = HOST_CPU_LOAD_INFO_COUNT; - kr = host_statistics(host, HOST_CPU_LOAD_INFO, (host_info_t)cp_time, &count); - if (unlikely(kr != KERN_SUCCESS)) { - error("MACOS: host_statistics() failed: %s", mach_error_string(kr)); - do_cpu = 0; - error("DISABLED: system.cpu"); - } else { - - st = rrdset_find_bytype_localhost("system", "cpu"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "system" - , "cpu" - , NULL - , "cpu" - , "system.cpu" - , "Total CPU utilization" - , "percentage" - , "macos" - , "mach_smi" - , 100 - , update_every - , RRDSET_TYPE_STACKED - ); - - rrddim_add(st, "user", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rrddim_add(st, "nice", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rrddim_add(st, "system", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rrddim_add(st, "idle", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rrddim_hide(st, "idle"); - } - else rrdset_next(st); - - rrddim_set(st, "user", cp_time[CPU_STATE_USER]); - rrddim_set(st, "nice", cp_time[CPU_STATE_NICE]); - rrddim_set(st, "system", cp_time[CPU_STATE_SYSTEM]); - rrddim_set(st, "idle", cp_time[CPU_STATE_IDLE]); - rrdset_done(st); - } - } - } - - // -------------------------------------------------------------------- - - if (likely(do_ram || do_swapio || do_pgfaults)) { -#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1060) - count = sizeof(vm_statistics64_data_t); - kr = host_statistics64(host, HOST_VM_INFO64, (host_info64_t)&vm_statistics, &count); -#else - count = sizeof(vm_statistics_data_t); - kr = host_statistics(host, HOST_VM_INFO, (host_info_t)&vm_statistics, &count); -#endif - if (unlikely(kr != KERN_SUCCESS)) { - error("MACOS: host_statistics64() failed: %s", mach_error_string(kr)); - do_ram = 0; - error("DISABLED: system.ram"); - do_swapio = 0; - error("DISABLED: system.swapio"); - do_pgfaults = 0; - error("DISABLED: mem.pgfaults"); - } else { - if (likely(do_ram)) { - st = rrdset_find_localhost("system.ram"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "system" - , "ram" - , NULL - , "ram" - , NULL - , "System RAM" - , "MB" - , "macos" - , "mach_smi" - , 200 - , update_every - , RRDSET_TYPE_STACKED - ); - - rrddim_add(st, "active", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(st, "wired", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE); -#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090) - rrddim_add(st, "throttled", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(st, "compressor", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE); -#endif - rrddim_add(st, "inactive", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(st, "purgeable", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(st, "speculative", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(st, "free", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set(st, "active", vm_statistics.active_count); - rrddim_set(st, "wired", vm_statistics.wire_count); -#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090) - rrddim_set(st, "throttled", vm_statistics.throttled_count); - rrddim_set(st, "compressor", vm_statistics.compressor_page_count); -#endif - rrddim_set(st, "inactive", vm_statistics.inactive_count); - rrddim_set(st, "purgeable", vm_statistics.purgeable_count); - rrddim_set(st, "speculative", vm_statistics.speculative_count); - rrddim_set(st, "free", (vm_statistics.free_count - vm_statistics.speculative_count)); - rrdset_done(st); - } - -#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090) - // -------------------------------------------------------------------- - - if (likely(do_swapio)) { - st = rrdset_find_localhost("system.swapio"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "system" - , "swapio" - , NULL - , "swap" - , NULL - , "Swap I/O" - , "kilobytes/s" - , "macos" - , "mach_smi" - , 250 - , update_every - , RRDSET_TYPE_AREA - ); - - rrddim_add(st, "in", NULL, system_pagesize, 1024, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "out", NULL, -system_pagesize, 1024, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set(st, "in", vm_statistics.swapins); - rrddim_set(st, "out", vm_statistics.swapouts); - rrdset_done(st); - } -#endif - - // -------------------------------------------------------------------- - - if (likely(do_pgfaults)) { - st = rrdset_find_localhost("mem.pgfaults"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "mem" - , "pgfaults" - , NULL - , "system" - , NULL - , "Memory Page Faults" - , "page faults/s" - , "macos" - , "mach_smi" - , NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "memory", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "cow", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "pagein", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "pageout", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); -#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090) - rrddim_add(st, "compress", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "decompress", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); -#endif - rrddim_add(st, "zero_fill", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "reactivate", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "purge", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set(st, "memory", vm_statistics.faults); - rrddim_set(st, "cow", vm_statistics.cow_faults); - rrddim_set(st, "pagein", vm_statistics.pageins); - rrddim_set(st, "pageout", vm_statistics.pageouts); -#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090) - rrddim_set(st, "compress", vm_statistics.compressions); - rrddim_set(st, "decompress", vm_statistics.decompressions); -#endif - rrddim_set(st, "zero_fill", vm_statistics.zero_fill_count); - rrddim_set(st, "reactivate", vm_statistics.reactivations); - rrddim_set(st, "purge", vm_statistics.purges); - rrdset_done(st); - } - } - } - - // -------------------------------------------------------------------- - - return 0; -} diff --git a/src/macos_sysctl.c b/src/macos_sysctl.c deleted file mode 100644 index cb6fa8af9..000000000 --- a/src/macos_sysctl.c +++ /dev/null @@ -1,1504 +0,0 @@ -#include "common.h" -#include <Availability.h> -#include <sys/sysctl.h> -// NEEDED BY: do_bandwidth -#include <net/route.h> -// NEEDED BY do_tcp... -#include <sys/socketvar.h> -#include <netinet/tcp_var.h> -#include <netinet/tcp_fsm.h> -// NEEDED BY do_udp..., do_ip... -#include <netinet/ip_var.h> -// NEEDED BY do_udp... -#include <netinet/udp.h> -#include <netinet/udp_var.h> -// NEEDED BY do_icmp... -#include <netinet/ip.h> -#include <netinet/ip_icmp.h> -#include <netinet/icmp_var.h> -// NEEDED BY do_icmp6... -#include <netinet/icmp6.h> -// NEEDED BY do_uptime -#include <time.h> - -// MacOS calculates load averages once every 5 seconds -#define MIN_LOADAVG_UPDATE_EVERY 5 - -int do_macos_sysctl(int update_every, usec_t dt) { - static int do_loadavg = -1, do_swap = -1, do_bandwidth = -1, - do_tcp_packets = -1, do_tcp_errors = -1, do_tcp_handshake = -1, do_ecn = -1, - do_tcpext_syscookies = -1, do_tcpext_ofo = -1, do_tcpext_connaborts = -1, - do_udp_packets = -1, do_udp_errors = -1, do_icmp_packets = -1, do_icmpmsg = -1, - do_ip_packets = -1, do_ip_fragsout = -1, do_ip_fragsin = -1, do_ip_errors = -1, - do_ip6_packets = -1, do_ip6_fragsout = -1, do_ip6_fragsin = -1, do_ip6_errors = -1, - do_icmp6 = -1, do_icmp6_redir = -1, do_icmp6_errors = -1, do_icmp6_echos = -1, - do_icmp6_router = -1, do_icmp6_neighbor = -1, do_icmp6_types = -1, do_uptime = -1; - - - if (unlikely(do_loadavg == -1)) { - do_loadavg = config_get_boolean("plugin:macos:sysctl", "enable load average", 1); - do_swap = config_get_boolean("plugin:macos:sysctl", "system swap", 1); - do_bandwidth = config_get_boolean("plugin:macos:sysctl", "bandwidth", 1); - do_tcp_packets = config_get_boolean("plugin:macos:sysctl", "ipv4 TCP packets", 1); - do_tcp_errors = config_get_boolean("plugin:macos:sysctl", "ipv4 TCP errors", 1); - do_tcp_handshake = config_get_boolean("plugin:macos:sysctl", "ipv4 TCP handshake issues", 1); - do_ecn = config_get_boolean_ondemand("plugin:macos:sysctl", "ECN packets", CONFIG_BOOLEAN_AUTO); - do_tcpext_syscookies = config_get_boolean_ondemand("plugin:macos:sysctl", "TCP SYN cookies", CONFIG_BOOLEAN_AUTO); - do_tcpext_ofo = config_get_boolean_ondemand("plugin:macos:sysctl", "TCP out-of-order queue", CONFIG_BOOLEAN_AUTO); - do_tcpext_connaborts = config_get_boolean_ondemand("plugin:macos:sysctl", "TCP connection aborts", CONFIG_BOOLEAN_AUTO); - do_udp_packets = config_get_boolean("plugin:macos:sysctl", "ipv4 UDP packets", 1); - do_udp_errors = config_get_boolean("plugin:macos:sysctl", "ipv4 UDP errors", 1); - do_icmp_packets = config_get_boolean("plugin:macos:sysctl", "ipv4 ICMP packets", 1); - do_icmpmsg = config_get_boolean("plugin:macos:sysctl", "ipv4 ICMP messages", 1); - do_ip_packets = config_get_boolean("plugin:macos:sysctl", "ipv4 packets", 1); - do_ip_fragsout = config_get_boolean("plugin:macos:sysctl", "ipv4 fragments sent", 1); - do_ip_fragsin = config_get_boolean("plugin:macos:sysctl", "ipv4 fragments assembly", 1); - do_ip_errors = config_get_boolean("plugin:macos:sysctl", "ipv4 errors", 1); - do_ip6_packets = config_get_boolean_ondemand("plugin:macos:sysctl", "ipv6 packets", CONFIG_BOOLEAN_AUTO); - do_ip6_fragsout = config_get_boolean_ondemand("plugin:macos:sysctl", "ipv6 fragments sent", CONFIG_BOOLEAN_AUTO); - do_ip6_fragsin = config_get_boolean_ondemand("plugin:macos:sysctl", "ipv6 fragments assembly", CONFIG_BOOLEAN_AUTO); - do_ip6_errors = config_get_boolean_ondemand("plugin:macos:sysctl", "ipv6 errors", CONFIG_BOOLEAN_AUTO); - do_icmp6 = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp", CONFIG_BOOLEAN_AUTO); - do_icmp6_redir = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp redirects", CONFIG_BOOLEAN_AUTO); - do_icmp6_errors = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp errors", CONFIG_BOOLEAN_AUTO); - do_icmp6_echos = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp echos", CONFIG_BOOLEAN_AUTO); - do_icmp6_router = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp router", CONFIG_BOOLEAN_AUTO); - do_icmp6_neighbor = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp neighbor", CONFIG_BOOLEAN_AUTO); - do_icmp6_types = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp types", CONFIG_BOOLEAN_AUTO); - do_uptime = config_get_boolean("plugin:macos:sysctl", "system uptime", 1); - } - - RRDSET *st; - - int system_pagesize = getpagesize(); // wouldn't it be better to get value directly from hw.pagesize? - int i, n; - int common_error = 0; - size_t size; - - // NEEDED BY: do_loadavg - static usec_t next_loadavg_dt = 0; - struct loadavg sysload; - - // NEEDED BY: do_swap - struct xsw_usage swap_usage; - - // NEEDED BY: do_bandwidth - int mib[6]; - static char *ifstatdata = NULL; - char *lim, *next; - struct if_msghdr *ifm; - struct iftot { - u_long ift_ibytes; - u_long ift_obytes; - } iftot = {0, 0}; - - // NEEDED BY: do_tcp... - struct tcpstat tcpstat; - uint64_t tcps_states[TCP_NSTATES]; - - // NEEDED BY: do_udp... - struct udpstat udpstat; - - // NEEDED BY: do_icmp... - struct icmpstat icmpstat; - struct icmp_total { - u_long msgs_in; - u_long msgs_out; - } icmp_total = {0, 0}; - - // NEEDED BY: do_ip... - struct ipstat ipstat; - - // NEEDED BY: do_ip6... - /* - * Dirty workaround for /usr/include/netinet6/ip6_var.h absence. - * Struct ip6stat was copied from bsd/netinet6/ip6_var.h from xnu sources. - * Do the same for previously missing scope6_var.h on OS X < 10.11. - */ -#define IP6S_SRCRULE_COUNT 16 - -#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED < 101100) -#ifndef _NETINET6_SCOPE6_VAR_H_ -#define _NETINET6_SCOPE6_VAR_H_ -#include <sys/appleapiopts.h> - -#define SCOPE6_ID_MAX 16 -#endif -#else -#include <netinet6/scope6_var.h> -#endif - - struct ip6stat { - u_quad_t ip6s_total; /* total packets received */ - u_quad_t ip6s_tooshort; /* packet too short */ - u_quad_t ip6s_toosmall; /* not enough data */ - u_quad_t ip6s_fragments; /* fragments received */ - u_quad_t ip6s_fragdropped; /* frags dropped(dups, out of space) */ - u_quad_t ip6s_fragtimeout; /* fragments timed out */ - u_quad_t ip6s_fragoverflow; /* fragments that exceeded limit */ - u_quad_t ip6s_forward; /* packets forwarded */ - u_quad_t ip6s_cantforward; /* packets rcvd for unreachable dest */ - u_quad_t ip6s_redirectsent; /* packets forwarded on same net */ - u_quad_t ip6s_delivered; /* datagrams delivered to upper level */ - u_quad_t ip6s_localout; /* total ip packets generated here */ - u_quad_t ip6s_odropped; /* lost packets due to nobufs, etc. */ - u_quad_t ip6s_reassembled; /* total packets reassembled ok */ - u_quad_t ip6s_atmfrag_rcvd; /* atomic fragments received */ - u_quad_t ip6s_fragmented; /* datagrams successfully fragmented */ - u_quad_t ip6s_ofragments; /* output fragments created */ - u_quad_t ip6s_cantfrag; /* don't fragment flag was set, etc. */ - u_quad_t ip6s_badoptions; /* error in option processing */ - u_quad_t ip6s_noroute; /* packets discarded due to no route */ - u_quad_t ip6s_badvers; /* ip6 version != 6 */ - u_quad_t ip6s_rawout; /* total raw ip packets generated */ - u_quad_t ip6s_badscope; /* scope error */ - u_quad_t ip6s_notmember; /* don't join this multicast group */ - u_quad_t ip6s_nxthist[256]; /* next header history */ - u_quad_t ip6s_m1; /* one mbuf */ - u_quad_t ip6s_m2m[32]; /* two or more mbuf */ - u_quad_t ip6s_mext1; /* one ext mbuf */ - u_quad_t ip6s_mext2m; /* two or more ext mbuf */ - u_quad_t ip6s_exthdrtoolong; /* ext hdr are not continuous */ - u_quad_t ip6s_nogif; /* no match gif found */ - u_quad_t ip6s_toomanyhdr; /* discarded due to too many headers */ - - /* - * statistics for improvement of the source address selection - * algorithm: - */ - /* number of times that address selection fails */ - u_quad_t ip6s_sources_none; - /* number of times that an address on the outgoing I/F is chosen */ - u_quad_t ip6s_sources_sameif[SCOPE6_ID_MAX]; - /* number of times that an address on a non-outgoing I/F is chosen */ - u_quad_t ip6s_sources_otherif[SCOPE6_ID_MAX]; - /* - * number of times that an address that has the same scope - * from the destination is chosen. - */ - u_quad_t ip6s_sources_samescope[SCOPE6_ID_MAX]; - /* - * number of times that an address that has a different scope - * from the destination is chosen. - */ - u_quad_t ip6s_sources_otherscope[SCOPE6_ID_MAX]; - /* number of times that a deprecated address is chosen */ - u_quad_t ip6s_sources_deprecated[SCOPE6_ID_MAX]; - - u_quad_t ip6s_forward_cachehit; - u_quad_t ip6s_forward_cachemiss; - - /* number of times that each rule of source selection is applied. */ - u_quad_t ip6s_sources_rule[IP6S_SRCRULE_COUNT]; - - /* number of times we ignored address on expensive secondary interfaces */ - u_quad_t ip6s_sources_skip_expensive_secondary_if; - - /* pkt dropped, no mbufs for control data */ - u_quad_t ip6s_pktdropcntrl; - - /* total packets trimmed/adjusted */ - u_quad_t ip6s_adj; - /* hwcksum info discarded during adjustment */ - u_quad_t ip6s_adj_hwcsum_clr; - - /* duplicate address detection collisions */ - u_quad_t ip6s_dad_collide; - - /* DAD NS looped back */ - u_quad_t ip6s_dad_loopcount; - } ip6stat; - - // NEEDED BY: do_icmp6... - struct icmp6stat icmp6stat; - struct icmp6_total { - u_long msgs_in; - u_long msgs_out; - } icmp6_total = {0, 0}; - - // NEEDED BY: do_uptime - struct timespec boot_time, cur_time; - - // -------------------------------------------------------------------- - - if (next_loadavg_dt <= dt) { - if (likely(do_loadavg)) { - if (unlikely(GETSYSCTL_BY_NAME("vm.loadavg", sysload))) { - do_loadavg = 0; - error("DISABLED: system.load"); - } else { - - st = rrdset_find_bytype_localhost("system", "load"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "system" - , "load" - , NULL - , "load" - , NULL - , "System Load Average" - , "load" - , "macos" - , "sysctl" - , 100 - , (update_every < MIN_LOADAVG_UPDATE_EVERY) ? MIN_LOADAVG_UPDATE_EVERY : update_every - , RRDSET_TYPE_LINE - ); - rrddim_add(st, "load1", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(st, "load5", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(st, "load15", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set(st, "load1", (collected_number) ((double)sysload.ldavg[0] / sysload.fscale * 1000)); - rrddim_set(st, "load5", (collected_number) ((double)sysload.ldavg[1] / sysload.fscale * 1000)); - rrddim_set(st, "load15", (collected_number) ((double)sysload.ldavg[2] / sysload.fscale * 1000)); - rrdset_done(st); - } - } - - next_loadavg_dt = st->update_every * USEC_PER_SEC; - } - else next_loadavg_dt -= dt; - - // -------------------------------------------------------------------- - - if (likely(do_swap)) { - if (unlikely(GETSYSCTL_BY_NAME("vm.swapusage", swap_usage))) { - do_swap = 0; - error("DISABLED: system.swap"); - } else { - st = rrdset_find_localhost("system.swap"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "system" - , "swap" - , NULL - , "swap" - , NULL - , "System Swap" - , "MB" - , "macos" - , "sysctl" - , 201 - , update_every - , RRDSET_TYPE_STACKED - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "free", NULL, 1, 1048576, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(st, "used", NULL, 1, 1048576, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set(st, "free", swap_usage.xsu_avail); - rrddim_set(st, "used", swap_usage.xsu_used); - rrdset_done(st); - } - } - - // -------------------------------------------------------------------- - - if (likely(do_bandwidth)) { - mib[0] = CTL_NET; - mib[1] = PF_ROUTE; - mib[2] = 0; - mib[3] = AF_INET; - mib[4] = NET_RT_IFLIST2; - mib[5] = 0; - if (unlikely(sysctl(mib, 6, NULL, &size, NULL, 0))) { - error("MACOS: sysctl(%s...) failed: %s", "net interfaces", strerror(errno)); - do_bandwidth = 0; - error("DISABLED: system.ipv4"); - } else { - ifstatdata = reallocz(ifstatdata, size); - if (unlikely(sysctl(mib, 6, ifstatdata, &size, NULL, 0) < 0)) { - error("MACOS: sysctl(%s...) failed: %s", "net interfaces", strerror(errno)); - do_bandwidth = 0; - error("DISABLED: system.ipv4"); - } else { - lim = ifstatdata + size; - iftot.ift_ibytes = iftot.ift_obytes = 0; - for (next = ifstatdata; next < lim; ) { - ifm = (struct if_msghdr *)next; - next += ifm->ifm_msglen; - - if (ifm->ifm_type == RTM_IFINFO2) { - struct if_msghdr2 *if2m = (struct if_msghdr2 *)ifm; - - iftot.ift_ibytes += if2m->ifm_data.ifi_ibytes; - iftot.ift_obytes += if2m->ifm_data.ifi_obytes; - } - } - st = rrdset_find_localhost("system.ipv4"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "system" - , "ipv4" - , NULL - , "network" - , NULL - , "IPv4 Bandwidth" - , "kilobits/s" - , "macos" - , "sysctl" - , 500 - , update_every - , RRDSET_TYPE_AREA - ); - - rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set(st, "InOctets", iftot.ift_ibytes); - rrddim_set(st, "OutOctets", iftot.ift_obytes); - rrdset_done(st); - } - } - } - - // -------------------------------------------------------------------- - - // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html - if (likely(do_tcp_packets || do_tcp_errors || do_tcp_handshake || do_tcpext_connaborts || do_tcpext_ofo || do_tcpext_syscookies || do_ecn)) { - if (unlikely(GETSYSCTL_BY_NAME("net.inet.tcp.stats", tcpstat))){ - do_tcp_packets = 0; - error("DISABLED: ipv4.tcppackets"); - do_tcp_errors = 0; - error("DISABLED: ipv4.tcperrors"); - do_tcp_handshake = 0; - error("DISABLED: ipv4.tcphandshake"); - do_tcpext_connaborts = 0; - error("DISABLED: ipv4.tcpconnaborts"); - do_tcpext_ofo = 0; - error("DISABLED: ipv4.tcpofo"); - do_tcpext_syscookies = 0; - error("DISABLED: ipv4.tcpsyncookies"); - do_ecn = 0; - error("DISABLED: ipv4.ecnpkts"); - } else { - if (likely(do_tcp_packets)) { - st = rrdset_find_localhost("ipv4.tcppackets"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "tcppackets" - , NULL - , "tcp" - , NULL - , "IPv4 TCP Packets" - , "packets/s" - , "macos" - , "sysctl" - , 2600 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "InSegs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutSegs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "InSegs", tcpstat.tcps_rcvtotal); - rrddim_set(st, "OutSegs", tcpstat.tcps_sndtotal); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (likely(do_tcp_errors)) { - st = rrdset_find_localhost("ipv4.tcperrors"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "tcperrors" - , NULL - , "tcp" - , NULL - , "IPv4 TCP Errors" - , "packets/s" - , "macos" - , "sysctl" - , 2700 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "InErrs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "RetransSegs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "InErrs", tcpstat.tcps_rcvbadoff + tcpstat.tcps_rcvshort); - rrddim_set(st, "InCsumErrors", tcpstat.tcps_rcvbadsum); - rrddim_set(st, "RetransSegs", tcpstat.tcps_sndrexmitpack); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (likely(do_tcp_handshake)) { - st = rrdset_find_localhost("ipv4.tcphandshake"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "tcphandshake" - , NULL - , "tcp" - , NULL - , "IPv4 TCP Handshake Issues" - , "events/s" - , "macos" - , "sysctl" - , 2900 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "ActiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "PassiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "AttemptFails", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "EstabResets", tcpstat.tcps_drops); - rrddim_set(st, "ActiveOpens", tcpstat.tcps_connattempt); - rrddim_set(st, "PassiveOpens", tcpstat.tcps_accepts); - rrddim_set(st, "AttemptFails", tcpstat.tcps_conndrops); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_rcvpackafterwin || tcpstat.tcps_rcvafterclose || tcpstat.tcps_rcvmemdrop || tcpstat.tcps_persistdrop))) { - do_tcpext_connaborts = CONFIG_BOOLEAN_YES; - st = rrdset_find_localhost("ipv4.tcpconnaborts"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "tcpconnaborts" - , NULL - , "tcp" - , NULL - , "TCP Connection Aborts" - , "connections/s" - , "macos" - , "sysctl" - , 3010 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "TCPAbortOnData", "baddata", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "TCPAbortOnClose", "userclosed", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "TCPAbortOnMemory", "nomemory", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "TCPAbortOnTimeout", "timeout", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set(st, "TCPAbortOnData", tcpstat.tcps_rcvpackafterwin); - rrddim_set(st, "TCPAbortOnClose", tcpstat.tcps_rcvafterclose); - rrddim_set(st, "TCPAbortOnMemory", tcpstat.tcps_rcvmemdrop); - rrddim_set(st, "TCPAbortOnTimeout", tcpstat.tcps_persistdrop); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && tcpstat.tcps_rcvoopack)) { - do_tcpext_ofo = CONFIG_BOOLEAN_YES; - st = rrdset_find_localhost("ipv4.tcpofo"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "tcpofo" - , NULL - , "tcp" - , NULL - , "TCP Out-Of-Order Queue" - , "packets/s" - , "macos" - , "sysctl" - , 3050 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "TCPOFOQueue", "inqueue", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set(st, "TCPOFOQueue", tcpstat.tcps_rcvoopack); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_tcpext_syscookies == CONFIG_BOOLEAN_YES || (do_tcpext_syscookies == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_sc_sendcookie || tcpstat.tcps_sc_recvcookie || tcpstat.tcps_sc_zonefail))) { - do_tcpext_syscookies = CONFIG_BOOLEAN_YES; - - st = rrdset_find_localhost("ipv4.tcpsyncookies"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "tcpsyncookies" - , NULL - , "tcp" - , NULL - , "TCP SYN Cookies" - , "packets/s" - , "macos" - , "sysctl" - , 3100 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "SyncookiesRecv", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "SyncookiesSent", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "SyncookiesFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set(st, "SyncookiesRecv", tcpstat.tcps_sc_recvcookie); - rrddim_set(st, "SyncookiesSent", tcpstat.tcps_sc_sendcookie); - rrddim_set(st, "SyncookiesFailed", tcpstat.tcps_sc_zonefail); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - -#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101100) - if (do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_ecn_recv_ce || tcpstat.tcps_ecn_not_supported))) { - do_ecn = CONFIG_BOOLEAN_YES; - st = rrdset_find_localhost("ipv4.ecnpkts"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "ecnpkts" - , NULL - , "ecn" - , NULL - , "IPv4 ECN Statistics" - , "packets/s" - , "macos" - , "sysctl" - , 8700 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "InCEPkts", "CEP", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "InNoECTPkts", "NoECTP", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set(st, "InCEPkts", tcpstat.tcps_ecn_recv_ce); - rrddim_set(st, "InNoECTPkts", tcpstat.tcps_ecn_not_supported); - rrdset_done(st); - } -#endif - - } - } - - // -------------------------------------------------------------------- - - // see http://net-snmp.sourceforge.net/docs/mibs/udp.html - if (likely(do_udp_packets || do_udp_errors)) { - if (unlikely(GETSYSCTL_BY_NAME("net.inet.udp.stats", udpstat))) { - do_udp_packets = 0; - error("DISABLED: ipv4.udppackets"); - do_udp_errors = 0; - error("DISABLED: ipv4.udperrors"); - } else { - if (likely(do_udp_packets)) { - st = rrdset_find_localhost("ipv4.udppackets"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "udppackets" - , NULL - , "udp" - , NULL - , "IPv4 UDP Packets" - , "packets/s" - , "macos" - , "sysctl" - , 2601 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "InDatagrams", udpstat.udps_ipackets); - rrddim_set(st, "OutDatagrams", udpstat.udps_opackets); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (likely(do_udp_errors)) { - st = rrdset_find_localhost("ipv4.udperrors"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "udperrors" - , NULL - , "udp" - , NULL - , "IPv4 UDP Errors" - , "events/s" - , "macos" - , "sysctl" - , 2701 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); -#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090) - rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); -#endif - } else - rrdset_next(st); - - rrddim_set(st, "InErrors", udpstat.udps_hdrops + udpstat.udps_badlen); - rrddim_set(st, "NoPorts", udpstat.udps_noport); - rrddim_set(st, "RcvbufErrors", udpstat.udps_fullsock); -#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090) - rrddim_set(st, "InCsumErrors", udpstat.udps_badsum + udpstat.udps_nosum); - rrddim_set(st, "IgnoredMulti", udpstat.udps_filtermcast); -#else - rrddim_set(st, "InCsumErrors", udpstat.udps_badsum); -#endif - rrdset_done(st); - } - } - } - - // -------------------------------------------------------------------- - - if (likely(do_icmp_packets || do_icmpmsg)) { - if (unlikely(GETSYSCTL_BY_NAME("net.inet.icmp.stats", icmpstat))) { - do_icmp_packets = 0; - error("DISABLED: ipv4.icmp"); - error("DISABLED: ipv4.icmp_errors"); - do_icmpmsg = 0; - error("DISABLED: ipv4.icmpmsg"); - } else { - for (i = 0; i <= ICMP_MAXTYPE; i++) { - icmp_total.msgs_in += icmpstat.icps_inhist[i]; - icmp_total.msgs_out += icmpstat.icps_outhist[i]; - } - icmp_total.msgs_in += icmpstat.icps_badcode + icmpstat.icps_badlen + icmpstat.icps_checksum + icmpstat.icps_tooshort; - - // -------------------------------------------------------------------- - - if (likely(do_icmp_packets)) { - st = rrdset_find_localhost("ipv4.icmp"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "icmp" - , NULL - , "icmp" - , NULL - , "IPv4 ICMP Packets" - , "packets/s" - , "macos" - , "sysctl" - , 2602 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "InMsgs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutMsgs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "InMsgs", icmp_total.msgs_in); - rrddim_set(st, "OutMsgs", icmp_total.msgs_out); - - rrdset_done(st); - - // -------------------------------------------------------------------- - - st = rrdset_find_localhost("ipv4.icmp_errors"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "icmp_errors" - , NULL - , "icmp" - , NULL - , "IPv4 ICMP Errors" - , "packets/s" - , "macos" - , "sysctl" - , 2603 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "InErrors", icmpstat.icps_badcode + icmpstat.icps_badlen + icmpstat.icps_checksum + icmpstat.icps_tooshort); - rrddim_set(st, "OutErrors", icmpstat.icps_error); - rrddim_set(st, "InCsumErrors", icmpstat.icps_checksum); - - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (likely(do_icmpmsg)) { - st = rrdset_find_localhost("ipv4.icmpmsg"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "icmpmsg" - , NULL - , "icmp" - , NULL - , "IPv4 ICMP Messages" - , "packets/s" - , "macos" - , "sysctl" - , 2604 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "InEchoReps", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutEchoReps", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "InEchoReps", icmpstat.icps_inhist[ICMP_ECHOREPLY]); - rrddim_set(st, "OutEchoReps", icmpstat.icps_outhist[ICMP_ECHOREPLY]); - rrddim_set(st, "InEchos", icmpstat.icps_inhist[ICMP_ECHO]); - rrddim_set(st, "OutEchos", icmpstat.icps_outhist[ICMP_ECHO]); - - rrdset_done(st); - } - } - } - - // -------------------------------------------------------------------- - - // see also http://net-snmp.sourceforge.net/docs/mibs/ip.html - if (likely(do_ip_packets || do_ip_fragsout || do_ip_fragsin || do_ip_errors)) { - if (unlikely(GETSYSCTL_BY_NAME("net.inet.ip.stats", ipstat))) { - do_ip_packets = 0; - error("DISABLED: ipv4.packets"); - do_ip_fragsout = 0; - error("DISABLED: ipv4.fragsout"); - do_ip_fragsin = 0; - error("DISABLED: ipv4.fragsin"); - do_ip_errors = 0; - error("DISABLED: ipv4.errors"); - } else { - if (likely(do_ip_packets)) { - st = rrdset_find_localhost("ipv4.packets"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "packets" - , NULL - , "packets" - , NULL - , "IPv4 Packets" - , "packets/s" - , "macos" - , "sysctl" - , 3000 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "InReceives", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutRequests", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "ForwDatagrams", "forwarded", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "InDelivers", "delivered", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "OutRequests", ipstat.ips_localout); - rrddim_set(st, "InReceives", ipstat.ips_total); - rrddim_set(st, "ForwDatagrams", ipstat.ips_forward); - rrddim_set(st, "InDelivers", ipstat.ips_delivered); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (likely(do_ip_fragsout)) { - st = rrdset_find_localhost("ipv4.fragsout"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "fragsout" - , NULL - , "fragments" - , NULL - , "IPv4 Fragments Sent" - , "packets/s" - , "macos" - , "sysctl" - , 3010 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "FragCreates", "created", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "FragOKs", ipstat.ips_fragmented); - rrddim_set(st, "FragFails", ipstat.ips_cantfrag); - rrddim_set(st, "FragCreates", ipstat.ips_ofragments); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (likely(do_ip_fragsin)) { - st = rrdset_find_localhost("ipv4.fragsin"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "fragsin" - , NULL - , "fragments" - , NULL - , "IPv4 Fragments Reassembly" - , "packets/s" - , "macos" - , "sysctl" - , 3011 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "ReasmOKs", ipstat.ips_fragments); - rrddim_set(st, "ReasmFails", ipstat.ips_fragdropped); - rrddim_set(st, "ReasmReqds", ipstat.ips_reassembled); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (likely(do_ip_errors)) { - st = rrdset_find_localhost("ipv4.errors"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "errors" - , NULL - , "errors" - , NULL - , "IPv4 Errors" - , "packets/s" - , "macos" - , "sysctl" - , 3002 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "InDiscards", ipstat.ips_badsum + ipstat.ips_tooshort + ipstat.ips_toosmall + ipstat.ips_toolong); - rrddim_set(st, "OutDiscards", ipstat.ips_odropped); - rrddim_set(st, "InHdrErrors", ipstat.ips_badhlen + ipstat.ips_badlen + ipstat.ips_badoptions + ipstat.ips_badvers); - rrddim_set(st, "InAddrErrors", ipstat.ips_badaddr); - rrddim_set(st, "InUnknownProtos", ipstat.ips_noproto); - rrddim_set(st, "OutNoRoutes", ipstat.ips_noroute); - rrdset_done(st); - } - } - } - - // -------------------------------------------------------------------- - - if (likely(do_ip6_packets || do_ip6_fragsout || do_ip6_fragsin || do_ip6_errors)) { - if (unlikely(GETSYSCTL_BY_NAME("net.inet6.ip6.stats", ip6stat))) { - do_ip6_packets = 0; - error("DISABLED: ipv6.packets"); - do_ip6_fragsout = 0; - error("DISABLED: ipv6.fragsout"); - do_ip6_fragsin = 0; - error("DISABLED: ipv6.fragsin"); - do_ip6_errors = 0; - error("DISABLED: ipv6.errors"); - } else { - if (do_ip6_packets == CONFIG_BOOLEAN_YES || (do_ip6_packets == CONFIG_BOOLEAN_AUTO && - (ip6stat.ip6s_localout || ip6stat.ip6s_total || - ip6stat.ip6s_forward || ip6stat.ip6s_delivered))) { - do_ip6_packets = CONFIG_BOOLEAN_YES; - st = rrdset_find_localhost("ipv6.packets"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6" - , "packets" - , NULL - , "packets" - , NULL - , "IPv6 Packets" - , "packets/s" - , "macos" - , "sysctl" - , 3000 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "forwarded", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "delivers", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "sent", ip6stat.ip6s_localout); - rrddim_set(st, "received", ip6stat.ip6s_total); - rrddim_set(st, "forwarded", ip6stat.ip6s_forward); - rrddim_set(st, "delivers", ip6stat.ip6s_delivered); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_ip6_fragsout == CONFIG_BOOLEAN_YES || (do_ip6_fragsout == CONFIG_BOOLEAN_AUTO && - (ip6stat.ip6s_fragmented || ip6stat.ip6s_cantfrag || - ip6stat.ip6s_ofragments))) { - do_ip6_fragsout = CONFIG_BOOLEAN_YES; - st = rrdset_find_localhost("ipv6.fragsout"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6" - , "fragsout" - , NULL - , "fragments" - , NULL - , "IPv6 Fragments Sent" - , "packets/s" - , "macos" - , "sysctl" - , 3010 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "ok", ip6stat.ip6s_fragmented); - rrddim_set(st, "failed", ip6stat.ip6s_cantfrag); - rrddim_set(st, "all", ip6stat.ip6s_ofragments); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_ip6_fragsin == CONFIG_BOOLEAN_YES || (do_ip6_fragsin == CONFIG_BOOLEAN_AUTO && - (ip6stat.ip6s_reassembled || ip6stat.ip6s_fragdropped || - ip6stat.ip6s_fragtimeout || ip6stat.ip6s_fragments))) { - do_ip6_fragsin = CONFIG_BOOLEAN_YES; - st = rrdset_find_localhost("ipv6.fragsin"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6" - , "fragsin" - , NULL - , "fragments" - , NULL - , "IPv6 Fragments Reassembly" - , "packets/s" - , "macos" - , "sysctl" - , 3011 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "timeout", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "ok", ip6stat.ip6s_reassembled); - rrddim_set(st, "failed", ip6stat.ip6s_fragdropped); - rrddim_set(st, "timeout", ip6stat.ip6s_fragtimeout); - rrddim_set(st, "all", ip6stat.ip6s_fragments); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_ip6_errors == CONFIG_BOOLEAN_YES || (do_ip6_errors == CONFIG_BOOLEAN_AUTO && ( - ip6stat.ip6s_toosmall || - ip6stat.ip6s_odropped || - ip6stat.ip6s_badoptions || - ip6stat.ip6s_badvers || - ip6stat.ip6s_exthdrtoolong || - ip6stat.ip6s_sources_none || - ip6stat.ip6s_tooshort || - ip6stat.ip6s_cantforward || - ip6stat.ip6s_noroute))) { - do_ip6_errors = CONFIG_BOOLEAN_YES; - st = rrdset_find_localhost("ipv6.errors"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6" - , "errors" - , NULL - , "errors" - , NULL - , "IPv6 Errors" - , "packets/s" - , "macos" - , "sysctl" - , 3002 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "InTruncatedPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "InNoRoutes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "InDiscards", ip6stat.ip6s_toosmall); - rrddim_set(st, "OutDiscards", ip6stat.ip6s_odropped); - - rrddim_set(st, "InHdrErrors", - ip6stat.ip6s_badoptions + ip6stat.ip6s_badvers + ip6stat.ip6s_exthdrtoolong); - rrddim_set(st, "InAddrErrors", ip6stat.ip6s_sources_none); - rrddim_set(st, "InTruncatedPkts", ip6stat.ip6s_tooshort); - rrddim_set(st, "InNoRoutes", ip6stat.ip6s_cantforward); - - rrddim_set(st, "OutNoRoutes", ip6stat.ip6s_noroute); - rrdset_done(st); - } - } - } - - // -------------------------------------------------------------------- - - if (likely(do_icmp6 || do_icmp6_redir || do_icmp6_errors || do_icmp6_echos || do_icmp6_router || do_icmp6_neighbor || do_icmp6_types)) { - if (unlikely(GETSYSCTL_BY_NAME("net.inet6.icmp6.stats", icmp6stat))) { - do_icmp6 = 0; - error("DISABLED: ipv6.icmp"); - } else { - for (i = 0; i <= ICMP6_MAXTYPE; i++) { - icmp6_total.msgs_in += icmp6stat.icp6s_inhist[i]; - icmp6_total.msgs_out += icmp6stat.icp6s_outhist[i]; - } - icmp6_total.msgs_in += icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen + icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort; - if (do_icmp6 == CONFIG_BOOLEAN_YES || (do_icmp6 == CONFIG_BOOLEAN_AUTO && (icmp6_total.msgs_in || icmp6_total.msgs_out))) { - do_icmp6 = CONFIG_BOOLEAN_YES; - st = rrdset_find_localhost("ipv6.icmp"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6" - , "icmp" - , NULL - , "icmp" - , NULL - , "IPv6 ICMP Messages" - , "messages/s" - , "macos" - , "sysctl" - , 10000 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "sent", icmp6_total.msgs_in); - rrddim_set(st, "received", icmp6_total.msgs_out); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_icmp6_redir == CONFIG_BOOLEAN_YES || (do_icmp6_redir == CONFIG_BOOLEAN_AUTO && (icmp6stat.icp6s_inhist[ND_REDIRECT] || icmp6stat.icp6s_outhist[ND_REDIRECT]))) { - do_icmp6_redir = CONFIG_BOOLEAN_YES; - st = rrdset_find_localhost("ipv6.icmpredir"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6" - , "icmpredir" - , NULL - , "icmp" - , NULL - , "IPv6 ICMP Redirects" - , "redirects/s" - , "macos" - , "sysctl" - , 10050 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "sent", icmp6stat.icp6s_inhist[ND_REDIRECT]); - rrddim_set(st, "received", icmp6stat.icp6s_outhist[ND_REDIRECT]); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_icmp6_errors == CONFIG_BOOLEAN_YES || (do_icmp6_errors == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_badcode || - icmp6stat.icp6s_badlen || - icmp6stat.icp6s_checksum || - icmp6stat.icp6s_tooshort || - icmp6stat.icp6s_error || - icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH] || - icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED] || - icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB] || - icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH] || - icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED] || - icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB]))) { - do_icmp6_errors = CONFIG_BOOLEAN_YES; - st = rrdset_find_localhost("ipv6.icmperrors"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6" - , "icmperrors" - , NULL - , "icmp" - , NULL - , "IPv6 ICMP Errors" - , "errors/s" - , "macos" - , "sysctl" - , 10100 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "InDestUnreachs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "InPktTooBigs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "InTimeExcds", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "InParmProblems", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutDestUnreachs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutTimeExcds", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutParmProblems", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "InErrors", icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen + icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort); - rrddim_set(st, "OutErrors", icmp6stat.icp6s_error); - rrddim_set(st, "InCsumErrors", icmp6stat.icp6s_checksum); - rrddim_set(st, "InDestUnreachs", icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH]); - rrddim_set(st, "InPktTooBigs", icmp6stat.icp6s_badlen); - rrddim_set(st, "InTimeExcds", icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED]); - rrddim_set(st, "InParmProblems", icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB]); - rrddim_set(st, "OutDestUnreachs", icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH]); - rrddim_set(st, "OutTimeExcds", icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED]); - rrddim_set(st, "OutParmProblems", icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB]); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_icmp6_echos == CONFIG_BOOLEAN_YES || (do_icmp6_echos == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST] || - icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST] || - icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY] || - icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY]))) { - do_icmp6_echos = CONFIG_BOOLEAN_YES; - st = rrdset_find_localhost("ipv6.icmpechos"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6" - , "icmpechos" - , NULL - , "icmp" - , NULL - , "IPv6 ICMP Echo" - , "messages/s" - , "macos" - , "sysctl" - , 10200 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "InEchoReplies", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutEchoReplies", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "InEchos", icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST]); - rrddim_set(st, "OutEchos", icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST]); - rrddim_set(st, "InEchoReplies", icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY]); - rrddim_set(st, "OutEchoReplies", icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY]); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_icmp6_router == CONFIG_BOOLEAN_YES || (do_icmp6_router == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT] || - icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT] || - icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT] || - icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT]))) { - do_icmp6_router = CONFIG_BOOLEAN_YES; - st = rrdset_find_localhost("ipv6.icmprouter"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6" - , "icmprouter" - , NULL - , "icmp" - , NULL - , "IPv6 Router Messages" - , "messages/s" - , "macos" - , "sysctl" - , 10400 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "InSolicits", icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT]); - rrddim_set(st, "OutSolicits", icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT]); - rrddim_set(st, "InAdvertisements", icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT]); - rrddim_set(st, "OutAdvertisements", icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT]); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_icmp6_neighbor == CONFIG_BOOLEAN_YES || (do_icmp6_neighbor == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT] || - icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT] || - icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT] || - icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]))) { - do_icmp6_neighbor = CONFIG_BOOLEAN_YES; - st = rrdset_find_localhost("ipv6.icmpneighbor"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6" - , "icmpneighbor" - , NULL - , "icmp" - , NULL - , "IPv6 Neighbor Messages" - , "messages/s" - , "macos" - , "sysctl" - , 10500 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "InSolicits", icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT]); - rrddim_set(st, "OutSolicits", icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT]); - rrddim_set(st, "InAdvertisements", icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT]); - rrddim_set(st, "OutAdvertisements", icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if (do_icmp6_types == CONFIG_BOOLEAN_YES || (do_icmp6_types == CONFIG_BOOLEAN_AUTO && ( - icmp6stat.icp6s_inhist[1] || - icmp6stat.icp6s_inhist[128] || - icmp6stat.icp6s_inhist[129] || - icmp6stat.icp6s_inhist[136] || - icmp6stat.icp6s_outhist[1] || - icmp6stat.icp6s_outhist[128] || - icmp6stat.icp6s_outhist[129] || - icmp6stat.icp6s_outhist[133] || - icmp6stat.icp6s_outhist[135] || - icmp6stat.icp6s_outhist[136]))) { - do_icmp6_types = CONFIG_BOOLEAN_YES; - st = rrdset_find_localhost("ipv6.icmptypes"); - if (unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6" - , "icmptypes" - , NULL - , "icmp" - , NULL - , "IPv6 ICMP Types" - , "messages/s" - , "macos" - , "sysctl" - , 10700 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "InType1", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "InType128", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "InType129", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "InType136", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutType1", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutType128", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutType129", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutType133", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutType135", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "OutType143", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); - - rrddim_set(st, "InType1", icmp6stat.icp6s_inhist[1]); - rrddim_set(st, "InType128", icmp6stat.icp6s_inhist[128]); - rrddim_set(st, "InType129", icmp6stat.icp6s_inhist[129]); - rrddim_set(st, "InType136", icmp6stat.icp6s_inhist[136]); - rrddim_set(st, "OutType1", icmp6stat.icp6s_outhist[1]); - rrddim_set(st, "OutType128", icmp6stat.icp6s_outhist[128]); - rrddim_set(st, "OutType129", icmp6stat.icp6s_outhist[129]); - rrddim_set(st, "OutType133", icmp6stat.icp6s_outhist[133]); - rrddim_set(st, "OutType135", icmp6stat.icp6s_outhist[135]); - rrddim_set(st, "OutType143", icmp6stat.icp6s_outhist[143]); - rrdset_done(st); - } - } - } - - // -------------------------------------------------------------------- - - if (likely(do_uptime)) { - if (unlikely(GETSYSCTL_BY_NAME("kern.boottime", boot_time))) { - do_uptime = 0; - error("DISABLED: system.uptime"); - } else { - clock_gettime(CLOCK_REALTIME, &cur_time); - st = rrdset_find_localhost("system.uptime"); - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "system" - , "uptime" - , NULL - , "uptime" - , NULL - , "System Uptime" - , "seconds" - , "macos" - , "sysctl" - , 1000 - , update_every - , RRDSET_TYPE_LINE - ); - rrddim_add(st, "uptime", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set(st, "uptime", cur_time.tv_sec - boot_time.tv_sec); - rrdset_done(st); - } - } - - return 0; -} - -int getsysctl_by_name(const char *name, void *ptr, size_t len) -{ - size_t nlen = len; - - if (unlikely(sysctlbyname(name, ptr, &nlen, NULL, 0) == -1)) { - error("MACOS: sysctl(%s...) failed: %s", name, strerror(errno)); - return 1; - } - if (unlikely(nlen != len)) { - error("MACOS: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)len, (unsigned long)nlen); - return 1; - } - return 0; -} diff --git a/src/main.c b/src/main.c deleted file mode 100644 index 798c7f0fc..000000000 --- a/src/main.c +++ /dev/null @@ -1,1071 +0,0 @@ -#include "common.h" - -extern void *cgroups_main(void *ptr); - -void netdata_cleanup_and_exit(int ret) { - // enabling this, is wrong - // because the threads will be cancelled while cleaning up - // netdata_exit = 1; - - error_log_limit_unlimited(); - info("EXIT: netdata prepares to exit with code %d...", ret); - - // cleanup/save the database and exit - info("EXIT: cleaning up the database..."); - rrdhost_cleanup_all(); - - if(!ret) { - // exit cleanly - - // stop everything - info("EXIT: stopping master threads..."); - cancel_main_threads(); - - // free the database - info("EXIT: freeing database memory..."); - rrdhost_free_all(); - } - - // unlink the pid - if(pidfile[0]) { - info("EXIT: removing netdata PID file '%s'...", pidfile); - if(unlink(pidfile) != 0) - error("EXIT: cannot unlink pidfile '%s'.", pidfile); - } - - info("EXIT: all done - netdata is now exiting - bye bye..."); - exit(ret); -} - -struct netdata_static_thread static_threads[] = { - -#ifdef INTERNAL_PLUGIN_NFACCT - // nfacct requires root access - // so, we build it as an external plugin with setuid to root - {"PLUGIN[nfacct]", CONFIG_SECTION_PLUGINS, "nfacct", 1, NULL, NULL, nfacct_main}, -#endif - -#ifdef NETDATA_INTERNAL_CHECKS - // debugging plugin - {"PLUGIN[check]", CONFIG_SECTION_PLUGINS, "checks", 0, NULL, NULL, checks_main}, -#endif - -#if defined(__FreeBSD__) - // FreeBSD internal plugins - {"PLUGIN[freebsd]", CONFIG_SECTION_PLUGINS, "freebsd", 1, NULL, NULL, freebsd_main}, -#elif defined(__APPLE__) - // macOS internal plugins - {"PLUGIN[macos]", CONFIG_SECTION_PLUGINS, "macos", 1, NULL, NULL, macos_main}, -#else - // linux internal plugins - {"PLUGIN[proc]", CONFIG_SECTION_PLUGINS, "proc", 1, NULL, NULL, proc_main}, - {"PLUGIN[diskspace]", CONFIG_SECTION_PLUGINS, "diskspace", 1, NULL, NULL, proc_diskspace_main}, - {"PLUGIN[cgroup]", CONFIG_SECTION_PLUGINS, "cgroups", 1, NULL, NULL, cgroups_main}, - {"PLUGIN[tc]", CONFIG_SECTION_PLUGINS, "tc", 1, NULL, NULL, tc_main}, -#endif /* __FreeBSD__, __APPLE__*/ - - // common plugins for all systems - {"PLUGIN[idlejitter]", CONFIG_SECTION_PLUGINS, "idlejitter", 1, NULL, NULL, cpuidlejitter_main}, - {"BACKENDS", NULL, NULL, 1, NULL, NULL, backends_main}, - {"HEALTH", NULL, NULL, 1, NULL, NULL, health_main}, - {"PLUGINSD", NULL, NULL, 1, NULL, NULL, pluginsd_main}, - {"WEB_SERVER[multi]", NULL, NULL, 1, NULL, NULL, socket_listen_main_multi_threaded}, - {"WEB_SERVER[single]", NULL, NULL, 0, NULL, NULL, socket_listen_main_single_threaded}, - {"WEB_SERVER[static1]", NULL, NULL, 0, NULL, NULL, socket_listen_main_static_threaded}, - {"STREAM", NULL, NULL, 0, NULL, NULL, rrdpush_sender_thread}, - {"STATSD", NULL, NULL, 1, NULL, NULL, statsd_main}, - - {NULL, NULL, NULL, 0, NULL, NULL, NULL} -}; - -void web_server_threading_selection(void) { - web_server_mode = web_server_mode_id(config_get(CONFIG_SECTION_WEB, "mode", web_server_mode_name(web_server_mode))); - - int multi_threaded = (web_server_mode == WEB_SERVER_MODE_MULTI_THREADED); - int single_threaded = (web_server_mode == WEB_SERVER_MODE_SINGLE_THREADED); - int static_threaded = (web_server_mode == WEB_SERVER_MODE_STATIC_THREADED); - - int i; - for (i = 0; static_threads[i].name; i++) { - if (static_threads[i].start_routine == socket_listen_main_multi_threaded) - static_threads[i].enabled = multi_threaded; - - if (static_threads[i].start_routine == socket_listen_main_single_threaded) - static_threads[i].enabled = single_threaded; - - if (static_threads[i].start_routine == socket_listen_main_static_threaded) - static_threads[i].enabled = static_threaded; - } -} - -void web_server_config_options(void) { - web_client_timeout = (int) config_get_number(CONFIG_SECTION_WEB, "disconnect idle clients after seconds", web_client_timeout); - web_client_first_request_timeout = (int) config_get_number(CONFIG_SECTION_WEB, "timeout for first request", web_client_first_request_timeout); - - respect_web_browser_do_not_track_policy = config_get_boolean(CONFIG_SECTION_WEB, "respect do not track policy", respect_web_browser_do_not_track_policy); - web_x_frame_options = config_get(CONFIG_SECTION_WEB, "x-frame-options response header", ""); - if(!*web_x_frame_options) web_x_frame_options = NULL; - - web_allow_connections_from = simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow connections from", "localhost *"), NULL, SIMPLE_PATTERN_EXACT); - web_allow_dashboard_from = simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow dashboard from", "localhost *"), NULL, SIMPLE_PATTERN_EXACT); - web_allow_badges_from = simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow badges from", "*"), NULL, SIMPLE_PATTERN_EXACT); - web_allow_registry_from = simple_pattern_create(config_get(CONFIG_SECTION_REGISTRY, "allow from", "*"), NULL, SIMPLE_PATTERN_EXACT); - web_allow_streaming_from = simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow streaming from", "*"), NULL, SIMPLE_PATTERN_EXACT); - web_allow_netdataconf_from = simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow netdata.conf from", "localhost fd* 10.* 192.168.* 172.16.* 172.17.* 172.18.* 172.19.* 172.20.* 172.21.* 172.22.* 172.23.* 172.24.* 172.25.* 172.26.* 172.27.* 172.28.* 172.29.* 172.30.* 172.31.*"), NULL, SIMPLE_PATTERN_EXACT); - -#ifdef NETDATA_WITH_ZLIB - web_enable_gzip = config_get_boolean(CONFIG_SECTION_WEB, "enable gzip compression", web_enable_gzip); - - char *s = config_get(CONFIG_SECTION_WEB, "gzip compression strategy", "default"); - if(!strcmp(s, "default")) - web_gzip_strategy = Z_DEFAULT_STRATEGY; - else if(!strcmp(s, "filtered")) - web_gzip_strategy = Z_FILTERED; - else if(!strcmp(s, "huffman only")) - web_gzip_strategy = Z_HUFFMAN_ONLY; - else if(!strcmp(s, "rle")) - web_gzip_strategy = Z_RLE; - else if(!strcmp(s, "fixed")) - web_gzip_strategy = Z_FIXED; - else { - error("Invalid compression strategy '%s'. Valid strategies are 'default', 'filtered', 'huffman only', 'rle' and 'fixed'. Proceeding with 'default'.", s); - web_gzip_strategy = Z_DEFAULT_STRATEGY; - } - - web_gzip_level = (int)config_get_number(CONFIG_SECTION_WEB, "gzip compression level", 3); - if(web_gzip_level < 1) { - error("Invalid compression level %d. Valid levels are 1 (fastest) to 9 (best ratio). Proceeding with level 1 (fastest compression).", web_gzip_level); - web_gzip_level = 1; - } - else if(web_gzip_level > 9) { - error("Invalid compression level %d. Valid levels are 1 (fastest) to 9 (best ratio). Proceeding with level 9 (best compression).", web_gzip_level); - web_gzip_level = 9; - } -#endif /* NETDATA_WITH_ZLIB */ -} - - -int killpid(pid_t pid, int sig) -{ - int ret = -1; - debug(D_EXIT, "Request to kill pid %d", pid); - - errno = 0; - if(kill(pid, 0) == -1) { - switch(errno) { - case ESRCH: - error("Request to kill pid %d, but it is not running.", pid); - break; - - case EPERM: - error("Request to kill pid %d, but I do not have enough permissions.", pid); - break; - - default: - error("Request to kill pid %d, but I received an error.", pid); - break; - } - } - else { - errno = 0; - ret = kill(pid, sig); - if(ret == -1) { - switch(errno) { - case ESRCH: - error("Cannot kill pid %d, but it is not running.", pid); - break; - - case EPERM: - error("Cannot kill pid %d, but I do not have enough permissions.", pid); - break; - - default: - error("Cannot kill pid %d, but I received an error.", pid); - break; - } - } - } - - return ret; -} - -void cancel_main_threads() { - error_log_limit_unlimited(); - - int i, found = 0, max = 5 * USEC_PER_SEC, step = 100000; - for (i = 0; static_threads[i].name != NULL ; i++) { - if(static_threads[i].enabled == NETDATA_MAIN_THREAD_RUNNING) { - info("EXIT: Stopping master thread: %s", static_threads[i].name); - netdata_thread_cancel(*static_threads[i].thread); - found++; - } - } - - while(found && max > 0) { - max -= step; - info("Waiting %d threads to finish...", found); - sleep_usec(step); - found = 0; - for (i = 0; static_threads[i].name != NULL ; i++) { - if (static_threads[i].enabled != NETDATA_MAIN_THREAD_EXITED) - found++; - } - } - - if(found) { - for (i = 0; static_threads[i].name != NULL ; i++) { - if (static_threads[i].enabled != NETDATA_MAIN_THREAD_EXITED) - error("Master thread %s takes too long to exit. Giving up...", static_threads[i].name); - } - } - else - info("All threads finished."); -} - -struct option_def option_definitions[] = { - // opt description arg name default value - { 'c', "Configuration file to load.", "filename", CONFIG_DIR "/" CONFIG_FILENAME}, - { 'D', "Do not fork. Run in the foreground.", NULL, "run in the background"}, - { 'h', "Display this help message.", NULL, NULL}, - { 'P', "File to save a pid while running.", "filename", "do not save pid to a file"}, - { 'i', "The IP address to listen to.", "IP", "all IP addresses IPv4 and IPv6"}, - { 'p', "API/Web port to use.", "port", "19999"}, - { 's', "Prefix for /proc and /sys (for containers).", "path", "no prefix"}, - { 't', "The internal clock of netdata.", "seconds", "1"}, - { 'u', "Run as user.", "username", "netdata"}, - { 'v', "Print netdata version and exit.", NULL, NULL}, - { 'V', "Print netdata version and exit.", NULL, NULL}, - { 'W', "See Advanced options below.", "options", NULL}, -}; - -int help(int exitcode) { - FILE *stream; - if(exitcode == 0) - stream = stdout; - else - stream = stderr; - - int num_opts = sizeof(option_definitions) / sizeof(struct option_def); - int i; - int max_len_arg = 0; - - // Compute maximum argument length - for( i = 0; i < num_opts; i++ ) { - if(option_definitions[i].arg_name) { - int len_arg = (int)strlen(option_definitions[i].arg_name); - if(len_arg > max_len_arg) max_len_arg = len_arg; - } - } - - if(max_len_arg > 30) max_len_arg = 30; - if(max_len_arg < 20) max_len_arg = 20; - - fprintf(stream, "%s", "\n" - " ^\n" - " |.-. .-. .-. .-. . netdata \n" - " | '-' '-' '-' '-' real-time performance monitoring, done right! \n" - " +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+--->\n" - "\n" - " Copyright (C) 2016-2017, Costa Tsaousis <costa@tsaousis.gr>\n" - " Released under GNU General Public License v3 or later.\n" - " All rights reserved.\n" - "\n" - " Home Page : https://my-netdata.io\n" - " Source Code: https://github.com/firehol/netdata\n" - " Wiki / Docs: https://github.com/firehol/netdata/wiki\n" - " Support : https://github.com/firehol/netdata/issues\n" - " License : https://github.com/firehol/netdata/blob/master/LICENSE.md\n" - "\n" - " Twitter : https://twitter.com/linuxnetdata\n" - " Facebook : https://www.facebook.com/linuxnetdata/\n" - "\n" - " netdata is a https://firehol.org project.\n" - "\n" - "\n" - ); - - fprintf(stream, " SYNOPSIS: netdata [options]\n"); - fprintf(stream, "\n"); - fprintf(stream, " Options:\n\n"); - - // Output options description. - for( i = 0; i < num_opts; i++ ) { - fprintf(stream, " -%c %-*s %s", option_definitions[i].val, max_len_arg, option_definitions[i].arg_name ? option_definitions[i].arg_name : "", option_definitions[i].description); - if(option_definitions[i].default_value) { - fprintf(stream, "\n %c %-*s Default: %s\n", ' ', max_len_arg, "", option_definitions[i].default_value); - } else { - fprintf(stream, "\n"); - } - fprintf(stream, "\n"); - } - - fprintf(stream, "\n Advanced options:\n\n" - " -W stacksize=N Set the stacksize (in bytes).\n\n" - " -W debug_flags=N Set runtime tracing to debug.log.\n\n" - " -W unittest Run internal unittests and exit.\n\n" - " -W set section option value\n" - " set netdata.conf option from the command line.\n\n" - " -W simple-pattern pattern string\n" - " Check if string matches pattern and exit.\n\n" - ); - - fprintf(stream, "\n Signals netdata handles:\n\n" - " - HUP Close and reopen log files.\n" - " - USR1 Save internal DB to disk.\n" - " - USR2 Reload health configuration.\n" - "\n" - ); - - fflush(stream); - return exitcode; -} - -// TODO: Remove this function with the nix major release. -void remove_option(int opt_index, int *argc, char **argv) { - int i = opt_index; - // remove the options. - do { - *argc = *argc - 1; - for(i = opt_index; i < *argc; i++) { - argv[i] = argv[i+1]; - } - i = opt_index; - } while(argv[i][0] != '-' && opt_index >= *argc); -} - -static const char *verify_required_directory(const char *dir) { - if(chdir(dir) == -1) - fatal("Cannot cd to directory '%s'", dir); - - DIR *d = opendir(dir); - if(!d) - fatal("Cannot examine the contents of directory '%s'", dir); - closedir(d); - - return dir; -} - -void log_init(void) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/debug.log", netdata_configured_log_dir); - stdout_filename = config_get(CONFIG_SECTION_GLOBAL, "debug log", filename); - - snprintfz(filename, FILENAME_MAX, "%s/error.log", netdata_configured_log_dir); - stderr_filename = config_get(CONFIG_SECTION_GLOBAL, "error log", filename); - - snprintfz(filename, FILENAME_MAX, "%s/access.log", netdata_configured_log_dir); - stdaccess_filename = config_get(CONFIG_SECTION_GLOBAL, "access log", filename); - - error_log_throttle_period_backup = - error_log_throttle_period = config_get_number(CONFIG_SECTION_GLOBAL, "errors flood protection period", error_log_throttle_period); - error_log_errors_per_period = (unsigned long)config_get_number(CONFIG_SECTION_GLOBAL, "errors to trigger flood protection", (long long int)error_log_errors_per_period); - - setenv("NETDATA_ERRORS_THROTTLE_PERIOD", config_get(CONFIG_SECTION_GLOBAL, "errors flood protection period" , ""), 1); - setenv("NETDATA_ERRORS_PER_PERIOD", config_get(CONFIG_SECTION_GLOBAL, "errors to trigger flood protection", ""), 1); -} - -static void backwards_compatible_config() { - // allow existing configurations to work with the current version of netdata - - if(config_exists(CONFIG_SECTION_GLOBAL, "multi threaded web server")) { - int mode = config_get_boolean(CONFIG_SECTION_GLOBAL, "multi threaded web server", 1); - web_server_mode = (mode)?WEB_SERVER_MODE_MULTI_THREADED:WEB_SERVER_MODE_SINGLE_THREADED; - } - - // move [global] options to the [web] section - config_move(CONFIG_SECTION_GLOBAL, "http port listen backlog", - CONFIG_SECTION_WEB, "listen backlog"); - - config_move(CONFIG_SECTION_GLOBAL, "bind socket to IP", - CONFIG_SECTION_WEB, "bind to"); - - config_move(CONFIG_SECTION_GLOBAL, "bind to", - CONFIG_SECTION_WEB, "bind to"); - - config_move(CONFIG_SECTION_GLOBAL, "port", - CONFIG_SECTION_WEB, "default port"); - - config_move(CONFIG_SECTION_GLOBAL, "default port", - CONFIG_SECTION_WEB, "default port"); - - config_move(CONFIG_SECTION_GLOBAL, "disconnect idle web clients after seconds", - CONFIG_SECTION_WEB, "disconnect idle clients after seconds"); - - config_move(CONFIG_SECTION_GLOBAL, "respect web browser do not track policy", - CONFIG_SECTION_WEB, "respect do not track policy"); - - config_move(CONFIG_SECTION_GLOBAL, "web x-frame-options header", - CONFIG_SECTION_WEB, "x-frame-options response header"); - - config_move(CONFIG_SECTION_GLOBAL, "enable web responses gzip compression", - CONFIG_SECTION_WEB, "enable gzip compression"); - - config_move(CONFIG_SECTION_GLOBAL, "web compression strategy", - CONFIG_SECTION_WEB, "gzip compression strategy"); - - config_move(CONFIG_SECTION_GLOBAL, "web compression level", - CONFIG_SECTION_WEB, "gzip compression level"); - - config_move(CONFIG_SECTION_GLOBAL, "web files owner", - CONFIG_SECTION_WEB, "web files owner"); - - config_move(CONFIG_SECTION_GLOBAL, "web files group", - CONFIG_SECTION_WEB, "web files group"); - - config_move(CONFIG_SECTION_BACKEND, "opentsdb host tags", - CONFIG_SECTION_BACKEND, "host tags"); -} - -static void get_netdata_configured_variables() { - backwards_compatible_config(); - - // ------------------------------------------------------------------------ - // get the hostname - - char buf[HOSTNAME_MAX + 1]; - if(gethostname(buf, HOSTNAME_MAX) == -1) - error("Cannot get machine hostname."); - - netdata_configured_hostname = config_get(CONFIG_SECTION_GLOBAL, "hostname", buf); - debug(D_OPTIONS, "hostname set to '%s'", netdata_configured_hostname); - - // ------------------------------------------------------------------------ - // get default database size - - default_rrd_history_entries = (int) config_get_number(CONFIG_SECTION_GLOBAL, "history", align_entries_to_pagesize(default_rrd_memory_mode, RRD_DEFAULT_HISTORY_ENTRIES)); - - long h = align_entries_to_pagesize(default_rrd_memory_mode, default_rrd_history_entries); - if(h != default_rrd_history_entries) { - config_set_number(CONFIG_SECTION_GLOBAL, "history", h); - default_rrd_history_entries = (int)h; - } - - if(default_rrd_history_entries < 5 || default_rrd_history_entries > RRD_HISTORY_ENTRIES_MAX) { - error("Invalid history entries %d given. Defaulting to %d.", default_rrd_history_entries, RRD_DEFAULT_HISTORY_ENTRIES); - default_rrd_history_entries = RRD_DEFAULT_HISTORY_ENTRIES; - } - - // ------------------------------------------------------------------------ - // get default database update frequency - - default_rrd_update_every = (int) config_get_number(CONFIG_SECTION_GLOBAL, "update every", UPDATE_EVERY); - if(default_rrd_update_every < 1 || default_rrd_update_every > 600) { - error("Invalid data collection frequency (update every) %d given. Defaulting to %d.", default_rrd_update_every, UPDATE_EVERY_MAX); - default_rrd_update_every = UPDATE_EVERY; - } - - // ------------------------------------------------------------------------ - // get system paths - - netdata_configured_config_dir = config_get(CONFIG_SECTION_GLOBAL, "config directory", CONFIG_DIR); - netdata_configured_log_dir = config_get(CONFIG_SECTION_GLOBAL, "log directory", LOG_DIR); - netdata_configured_web_dir = config_get(CONFIG_SECTION_GLOBAL, "web files directory", WEB_DIR); - netdata_configured_cache_dir = config_get(CONFIG_SECTION_GLOBAL, "cache directory", CACHE_DIR); - netdata_configured_varlib_dir = config_get(CONFIG_SECTION_GLOBAL, "lib directory", VARLIB_DIR); - netdata_configured_home_dir = config_get(CONFIG_SECTION_GLOBAL, "home directory", CACHE_DIR); - - { - char plugins_dirs[(FILENAME_MAX * 2) + 1]; - snprintfz(plugins_dirs, FILENAME_MAX * 2, "\"%s\" \"%s/custom-plugins.d\"", PLUGINS_DIR, CONFIG_DIR); - netdata_configured_plugins_dir_base = strdupz(config_get(CONFIG_SECTION_GLOBAL, "plugins directory", plugins_dirs)); - quoted_strings_splitter(netdata_configured_plugins_dir_base, plugin_directories, PLUGINSD_MAX_DIRECTORIES, config_isspace); - netdata_configured_plugins_dir = plugin_directories[0]; - } - - // ------------------------------------------------------------------------ - // get default memory mode for the database - - default_rrd_memory_mode = rrd_memory_mode_id(config_get(CONFIG_SECTION_GLOBAL, "memory mode", rrd_memory_mode_name(default_rrd_memory_mode))); - - // ------------------------------------------------------------------------ - - netdata_configured_host_prefix = config_get(CONFIG_SECTION_GLOBAL, "host access prefix", ""); - - // -------------------------------------------------------------------- - // get KSM settings - -#ifdef MADV_MERGEABLE - enable_ksm = config_get_boolean(CONFIG_SECTION_GLOBAL, "memory deduplication (ksm)", enable_ksm); -#endif - - // -------------------------------------------------------------------- - // get various system parameters - - get_system_HZ(); - get_system_cpus(); - get_system_pid_max(); -} - -static void get_system_timezone(void) { - // avoid flood calls to stat(/etc/localtime) - // http://stackoverflow.com/questions/4554271/how-to-avoid-excessive-stat-etc-localtime-calls-in-strftime-on-linux - const char *tz = getenv("TZ"); - if(!tz || !*tz) - setenv("TZ", config_get(CONFIG_SECTION_GLOBAL, "TZ environment variable", ":/etc/localtime"), 0); - - char buffer[FILENAME_MAX + 1] = ""; - const char *timezone = NULL; - ssize_t ret; - - // use the TZ variable - if(tz && *tz && *tz != ':') { - timezone = tz; - // info("TIMEZONE: using TZ variable '%s'", timezone); - } - - // use the contents of /etc/timezone - if(!timezone && !read_file("/etc/timezone", buffer, FILENAME_MAX)) { - timezone = buffer; - // info("TIMEZONE: using the contents of /etc/timezone: '%s'", timezone); - } - - // read the link /etc/localtime - if(!timezone) { - ret = readlink("/etc/localtime", buffer, FILENAME_MAX); - - if(ret > 0) { - buffer[ret] = '\0'; - - char *cmp = "/usr/share/zoneinfo/"; - size_t cmp_len = strlen(cmp); - - char *s = strstr(buffer, cmp); - if (s && s[cmp_len]) { - timezone = &s[cmp_len]; - // info("TIMEZONE: using the link of /etc/localtime: '%s'", timezone); - } - } - else - buffer[0] = '\0'; - } - - // find the timezone from strftime() - if(!timezone) { - time_t t; - struct tm *tmp, tmbuf; - - t = now_realtime_sec(); - tmp = localtime_r(&t, &tmbuf); - - if (tmp != NULL) { - if(strftime(buffer, FILENAME_MAX, "%Z", tmp) == 0) - buffer[0] = '\0'; - else { - buffer[FILENAME_MAX] = '\0'; - timezone = buffer; - // info("TIMEZONE: using strftime(): '%s'", timezone); - } - } - } - - if(timezone && *timezone) { - // make sure it does not have illegal characters - // info("TIMEZONE: fixing '%s'", timezone); - - size_t len = strlen(timezone); - char tmp[len + 1]; - char *d = tmp; - *d = '\0'; - - while(*timezone) { - if(isalnum(*timezone) || *timezone == '_' || *timezone == '/') - *d++ = *timezone++; - else - timezone++; - } - *d = '\0'; - strncpyz(buffer, tmp, len); - timezone = buffer; - // info("TIMEZONE: fixed as '%s'", timezone); - } - - if(!timezone || !*timezone) - timezone = "unknown"; - - netdata_configured_timezone = config_get(CONFIG_SECTION_GLOBAL, "timezone", timezone); -} - -void set_global_environment() { - { - char b[16]; - snprintfz(b, 15, "%d", default_rrd_update_every); - setenv("NETDATA_UPDATE_EVERY", b, 1); - } - - setenv("NETDATA_HOSTNAME" , netdata_configured_hostname, 1); - setenv("NETDATA_CONFIG_DIR" , verify_required_directory(netdata_configured_config_dir), 1); - setenv("NETDATA_PLUGINS_DIR", verify_required_directory(netdata_configured_plugins_dir), 1); - setenv("NETDATA_WEB_DIR" , verify_required_directory(netdata_configured_web_dir), 1); - setenv("NETDATA_CACHE_DIR" , verify_required_directory(netdata_configured_cache_dir), 1); - setenv("NETDATA_LIB_DIR" , verify_required_directory(netdata_configured_varlib_dir), 1); - setenv("NETDATA_LOG_DIR" , verify_required_directory(netdata_configured_log_dir), 1); - setenv("HOME" , verify_required_directory(netdata_configured_home_dir), 1); - setenv("NETDATA_HOST_PREFIX", netdata_configured_host_prefix, 1); - - get_system_timezone(); - - // set the path we need - char path[1024 + 1], *p = getenv("PATH"); - if(!p) p = "/bin:/usr/bin"; - snprintfz(path, 1024, "%s:%s", p, "/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin"); - setenv("PATH", config_get(CONFIG_SECTION_PLUGINS, "PATH environment variable", path), 1); - - // python options - p = getenv("PYTHONPATH"); - if(!p) p = ""; - setenv("PYTHONPATH", config_get(CONFIG_SECTION_PLUGINS, "PYTHONPATH environment variable", p), 1); - - // disable buffering for python plugins - setenv("PYTHONUNBUFFERED", "1", 1); - - // switch to standard locale for plugins - setenv("LC_ALL", "C", 1); -} - -int main(int argc, char **argv) { - int i; - int config_loaded = 0; - int dont_fork = 0; - size_t default_stacksize; - - // set the name for logging - program_name = "netdata"; - - // parse depercated options - // TODO: Remove this block with the next major release. - { - i = 1; - while(i < argc) { - if(strcmp(argv[i], "-pidfile") == 0 && (i+1) < argc) { - strncpyz(pidfile, argv[i+1], FILENAME_MAX); - fprintf(stderr, "%s: deprecated option -- %s -- please use -P instead.\n", argv[0], argv[i]); - remove_option(i, &argc, argv); - } - else if(strcmp(argv[i], "-nodaemon") == 0 || strcmp(argv[i], "-nd") == 0) { - dont_fork = 1; - fprintf(stderr, "%s: deprecated option -- %s -- please use -D instead.\n ", argv[0], argv[i]); - remove_option(i, &argc, argv); - } - else if(strcmp(argv[i], "-ch") == 0 && (i+1) < argc) { - config_set(CONFIG_SECTION_GLOBAL, "host access prefix", argv[i+1]); - fprintf(stderr, "%s: deprecated option -- %s -- please use -s instead.\n", argv[0], argv[i]); - remove_option(i, &argc, argv); - } - else if(strcmp(argv[i], "-l") == 0 && (i+1) < argc) { - config_set(CONFIG_SECTION_GLOBAL, "history", argv[i+1]); - fprintf(stderr, "%s: deprecated option -- %s -- This option will be removed with V2.*.\n", argv[0], argv[i]); - remove_option(i, &argc, argv); - } - else i++; - } - } - - // parse options - { - int num_opts = sizeof(option_definitions) / sizeof(struct option_def); - char optstring[(num_opts * 2) + 1]; - - int string_i = 0; - for( i = 0; i < num_opts; i++ ) { - optstring[string_i] = option_definitions[i].val; - string_i++; - if(option_definitions[i].arg_name) { - optstring[string_i] = ':'; - string_i++; - } - } - // terminate optstring - optstring[string_i] ='\0'; - optstring[(num_opts *2)] ='\0'; - - int opt; - while( (opt = getopt(argc, argv, optstring)) != -1 ) { - switch(opt) { - case 'c': - if(config_load(optarg, 1) != 1) { - error("Cannot load configuration file %s.", optarg); - return 1; - } - else { - debug(D_OPTIONS, "Configuration loaded from %s.", optarg); - config_loaded = 1; - } - break; - case 'D': - dont_fork = 1; - break; - case 'h': - return help(0); - case 'i': - config_set(CONFIG_SECTION_WEB, "bind to", optarg); - break; - case 'P': - strncpy(pidfile, optarg, FILENAME_MAX); - pidfile[FILENAME_MAX] = '\0'; - break; - case 'p': - config_set(CONFIG_SECTION_GLOBAL, "default port", optarg); - break; - case 's': - config_set(CONFIG_SECTION_GLOBAL, "host access prefix", optarg); - break; - case 't': - config_set(CONFIG_SECTION_GLOBAL, "update every", optarg); - break; - case 'u': - config_set(CONFIG_SECTION_GLOBAL, "run as user", optarg); - break; - case 'v': - case 'V': - printf("%s %s\n", program_name, program_version); - return 0; - case 'W': - { - char* stacksize_string = "stacksize="; - char* debug_flags_string = "debug_flags="; - - if(strcmp(optarg, "unittest") == 0) { - if(unit_test_buffer()) return 1; - if(unit_test_str2ld()) return 1; - //default_rrd_update_every = 1; - //default_rrd_memory_mode = RRD_MEMORY_MODE_RAM; - //if(!config_loaded) config_load(NULL, 0); - get_netdata_configured_variables(); - default_rrd_update_every = 1; - default_rrd_memory_mode = RRD_MEMORY_MODE_RAM; - default_health_enabled = 0; - rrd_init("unittest"); - default_rrdpush_enabled = 0; - if(run_all_mockup_tests()) return 1; - if(unit_test_storage()) return 1; - fprintf(stderr, "\n\nALL TESTS PASSED\n\n"); - return 0; - } - else if(strcmp(optarg, "simple-pattern") == 0) { - if(optind + 2 > argc) { - fprintf(stderr, "%s", "\nUSAGE: -W simple-pattern 'pattern' 'string'\n\n" - " Checks if 'pattern' matches the given 'string'.\n" - " - 'pattern' can be one or more space separated words.\n" - " - each 'word' can contain one or more asterisks.\n" - " - words starting with '!' give negative matches.\n" - " - words are processed left to right\n" - "\n" - "Examples:\n" - "\n" - " > match all veth interfaces, except veth0:\n" - "\n" - " -W simple-pattern '!veth0 veth*' 'veth12'\n" - "\n" - "\n" - " > match all *.ext files directly in /path/:\n" - " (this will not match *.ext files in a subdir of /path/)\n" - "\n" - " -W simple-pattern '!/path/*/*.ext /path/*.ext' '/path/test.ext'\n" - "\n" - ); - return 1; - } - - const char *heystack = argv[optind]; - const char *needle = argv[optind + 1]; - size_t len = strlen(needle) + 1; - char wildcarded[len]; - - SIMPLE_PATTERN *p = simple_pattern_create(heystack, NULL, SIMPLE_PATTERN_EXACT); - int ret = simple_pattern_matches_extract(p, needle, wildcarded, len); - simple_pattern_free(p); - - if(ret) { - fprintf(stdout, "RESULT: MATCHED - pattern '%s' matches '%s', wildcarded '%s'\n", heystack, needle, wildcarded); - return 0; - } - else { - fprintf(stdout, "RESULT: NOT MATCHED - pattern '%s' does not match '%s', wildcarded '%s'\n", heystack, needle, wildcarded); - return 1; - } - } - else if(strncmp(optarg, stacksize_string, strlen(stacksize_string)) == 0) { - optarg += strlen(stacksize_string); - config_set(CONFIG_SECTION_GLOBAL, "pthread stack size", optarg); - } - else if(strncmp(optarg, debug_flags_string, strlen(debug_flags_string)) == 0) { - optarg += strlen(debug_flags_string); - config_set(CONFIG_SECTION_GLOBAL, "debug flags", optarg); - debug_flags = strtoull(optarg, NULL, 0); - } - else if(strcmp(optarg, "set") == 0) { - if(optind + 3 > argc) { - fprintf(stderr, "%s", "\nUSAGE: -W set 'section' 'key' 'value'\n\n" - " Overwrites settings of netdata.conf.\n" - "\n" - " These options interact with: -c netdata.conf\n" - " If -c netdata.conf is given on the command line,\n" - " before -W set... the user may overwrite command\n" - " line parameters at netdata.conf\n" - " If -c netdata.conf is given after (or missing)\n" - " -W set... the user cannot overwrite the command line\n" - " parameters." - "\n" - ); - return 1; - } - const char *section = argv[optind]; - const char *key = argv[optind + 1]; - const char *value = argv[optind + 2]; - optind += 3; - - // set this one as the default - // only if it is not already set in the config file - // so the caller can use -c netdata.conf before or - // after this parameter to prevent or allow overwriting - // variables at netdata.conf - config_set_default(section, key, value); - - // fprintf(stderr, "SET section '%s', key '%s', value '%s'\n", section, key, value); - } - else if(strcmp(optarg, "get") == 0) { - if(optind + 3 > argc) { - fprintf(stderr, "%s", "\nUSAGE: -W get 'section' 'key' 'value'\n\n" - " Prints settings of netdata.conf.\n" - "\n" - " These options interact with: -c netdata.conf\n" - " -c netdata.conf has to be given before -W get.\n" - "\n" - ); - return 1; - } - - if(!config_loaded) { - fprintf(stderr, "warning: no configuration file has been loaded. Use -c CONFIG_FILE, before -W get. Using default config.\n"); - config_load(NULL, 0); - } - - backwards_compatible_config(); - get_netdata_configured_variables(); - - const char *section = argv[optind]; - const char *key = argv[optind + 1]; - const char *def = argv[optind + 2]; - const char *value = config_get(section, key, def); - printf("%s\n", value); - return 0; - } - else { - fprintf(stderr, "Unknown -W parameter '%s'\n", optarg); - return help(1); - } - } - break; - - default: /* ? */ - fprintf(stderr, "Unknown parameter '%c'\n", opt); - return help(1); - } - } - } - -#ifdef _SC_OPEN_MAX - // close all open file descriptors, except the standard ones - // the caller may have left open files (lxc-attach has this issue) - { - int fd; - for(fd = (int) (sysconf(_SC_OPEN_MAX) - 1); fd > 2; fd--) - if(fd_is_valid(fd)) close(fd); - } -#endif - - if(!config_loaded) - config_load(NULL, 0); - - // ------------------------------------------------------------------------ - // initialize netdata - { - char *pmax = config_get(CONFIG_SECTION_GLOBAL, "glibc malloc arena max for plugins", "1"); - if(pmax && *pmax) - setenv("MALLOC_ARENA_MAX", pmax, 1); - -#if defined(HAVE_C_MALLOPT) - i = (int)config_get_number(CONFIG_SECTION_GLOBAL, "glibc malloc arena max for netdata", 1); - if(i > 0) - mallopt(M_ARENA_MAX, 1); -#endif - - // prepare configuration environment variables for the plugins - - get_netdata_configured_variables(); - set_global_environment(); - - // work while we are cd into config_dir - // to allow the plugins refer to their config - // files using relative filenames - if(chdir(netdata_configured_config_dir) == -1) - fatal("Cannot cd to '%s'", netdata_configured_config_dir); - } - - char *user = NULL; - - { - // -------------------------------------------------------------------- - // get the debugging flags from the configuration file - - char *flags = config_get(CONFIG_SECTION_GLOBAL, "debug flags", "0x0000000000000000"); - setenv("NETDATA_DEBUG_FLAGS", flags, 1); - - debug_flags = strtoull(flags, NULL, 0); - debug(D_OPTIONS, "Debug flags set to '0x%" PRIX64 "'.", debug_flags); - - if(debug_flags != 0) { - struct rlimit rl = { RLIM_INFINITY, RLIM_INFINITY }; - if(setrlimit(RLIMIT_CORE, &rl) != 0) - error("Cannot request unlimited core dumps for debugging... Proceeding anyway..."); - -#ifdef HAVE_SYS_PRCTL_H - prctl(PR_SET_DUMPABLE, 1, 0, 0, 0); -#endif - } - - - // -------------------------------------------------------------------- - // get log filenames and settings - - log_init(); - error_log_limit_unlimited(); - - - // -------------------------------------------------------------------- - // load stream.conf - { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/stream.conf", netdata_configured_config_dir); - appconfig_load(&stream_config, filename, 0); - } - - - // -------------------------------------------------------------------- - // setup process signals - - // block signals while initializing threads. - // this causes the threads to block signals. - signals_block(); - - // setup the signals we want to use - signals_init(); - - // setup threads configs - default_stacksize = netdata_threads_init(); - - - // -------------------------------------------------------------------- - // check which threads are enabled and initialize them - - for (i = 0; static_threads[i].name != NULL ; i++) { - struct netdata_static_thread *st = &static_threads[i]; - - if(st->config_name) - st->enabled = config_get_boolean(st->config_section, st->config_name, st->enabled); - - if(st->enabled && st->init_routine) - st->init_routine(); - } - - - // -------------------------------------------------------------------- - // get the user we should run - - // IMPORTANT: this is required before web_files_uid() - if(getuid() == 0) { - user = config_get(CONFIG_SECTION_GLOBAL, "run as user", NETDATA_USER); - } - else { - struct passwd *passwd = getpwuid(getuid()); - user = config_get(CONFIG_SECTION_GLOBAL, "run as user", (passwd && passwd->pw_name)?passwd->pw_name:""); - } - - // -------------------------------------------------------------------- - // create the listening sockets - - web_client_api_v1_init(); - web_server_threading_selection(); - - if(web_server_mode != WEB_SERVER_MODE_NONE) - api_listen_sockets_setup(); - } - - // initialize the log files - open_all_log_files(); - -#ifdef NETDATA_INTERNAL_CHECKS - if(debug_flags != 0) { - struct rlimit rl = { RLIM_INFINITY, RLIM_INFINITY }; - if(setrlimit(RLIMIT_CORE, &rl) != 0) - error("Cannot request unlimited core dumps for debugging... Proceeding anyway..."); -#ifdef HAVE_SYS_PRCTL_H - prctl(PR_SET_DUMPABLE, 1, 0, 0, 0); -#endif - } -#endif /* NETDATA_INTERNAL_CHECKS */ - - // get the max file limit - if(getrlimit(RLIMIT_NOFILE, &rlimit_nofile) != 0) - error("getrlimit(RLIMIT_NOFILE) failed"); - else - info("resources control: allowed file descriptors: soft = %zu, max = %zu", rlimit_nofile.rlim_cur, rlimit_nofile.rlim_max); - - // fork, switch user, create pid file, set process priority - if(become_daemon(dont_fork, user) == -1) - fatal("Cannot daemonize myself."); - - info("netdata started on pid %d.", getpid()); - - // IMPORTANT: these have to run once, while single threaded - // but after we have switched user - web_files_uid(); - web_files_gid(); - - netdata_threads_init_after_fork((size_t)config_get_number(CONFIG_SECTION_GLOBAL, "pthread stack size", (long)default_stacksize)); - - // ------------------------------------------------------------------------ - // initialize rrd, registry, health, rrdpush, etc. - - rrd_init(netdata_configured_hostname); - - - // ------------------------------------------------------------------------ - // enable log flood protection - - error_log_limit_reset(); - - - // ------------------------------------------------------------------------ - // spawn the threads - - web_server_config_options(); - - for (i = 0; static_threads[i].name != NULL ; i++) { - struct netdata_static_thread *st = &static_threads[i]; - - if(st->enabled) { - st->thread = mallocz(sizeof(netdata_thread_t)); - debug(D_SYSTEM, "Starting thread %s.", st->name); - netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_DEFAULT, st->start_routine, st); - } - else debug(D_SYSTEM, "Not starting thread %s.", st->name); - } - - info("netdata initialization completed. Enjoy real-time performance monitoring!"); - - - // ------------------------------------------------------------------------ - // unblock signals - - signals_unblock(); - - // ------------------------------------------------------------------------ - // Handle signals - - signals_handle(); - - // should never reach this point - // but we need it for rpmlint #2752 - return 1; -} diff --git a/src/main.h b/src/main.h deleted file mode 100644 index d29bf74e7..000000000 --- a/src/main.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef NETDATA_MAIN_H -#define NETDATA_MAIN_H 1 - -#define NETDATA_MAIN_THREAD_RUNNING CONFIG_BOOLEAN_YES -#define NETDATA_MAIN_THREAD_EXITING (CONFIG_BOOLEAN_YES + 1) -#define NETDATA_MAIN_THREAD_EXITED CONFIG_BOOLEAN_NO - -/** - * This struct contains information about command line options. - */ -struct option_def { - /** The option character */ - const char val; - /** The name of the long option. */ - const char *description; - /** Short descripton what the option does */ - /** Name of the argument displayed in SYNOPSIS */ - const char *arg_name; - /** Default value if not set */ - const char *default_value; -}; - -struct netdata_static_thread { - char *name; - - char *config_section; - char *config_name; - - volatile sig_atomic_t enabled; - - netdata_thread_t *thread; - - void (*init_routine) (void); - void *(*start_routine) (void *); -}; - -extern void cancel_main_threads(void); -extern int killpid(pid_t pid, int signal); -extern void netdata_cleanup_and_exit(int ret) NORETURN; - -#endif /* NETDATA_MAIN_H */ diff --git a/src/plugin_checks.c b/src/plugin_checks.c deleted file mode 100644 index b99b97d40..000000000 --- a/src/plugin_checks.c +++ /dev/null @@ -1,127 +0,0 @@ -#include "common.h" - -#ifdef NETDATA_INTERNAL_CHECKS - -static void checks_main_cleanup(void *ptr) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - - info("cleaning up..."); - - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; -} - -void *checks_main(void *ptr) { - netdata_thread_cleanup_push(checks_main_cleanup, ptr); - - usec_t usec = 0, susec = localhost->rrd_update_every * USEC_PER_SEC, loop_usec = 0, total_susec = 0; - struct timeval now, last, loop; - - RRDSET *check1, *check2, *check3, *apps_cpu = NULL; - - check1 = rrdset_create_localhost( - "netdata" - , "check1" - , NULL - , "netdata" - , NULL - , "Caller gives microseconds" - , "a million !" - , "netdata" - , "checks" - , 99999 - , localhost->rrd_update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(check1, "absolute", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(check1, "incremental", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - check2 = rrdset_create_localhost( - "netdata" - , "check2" - , NULL - , "netdata" - , NULL - , "Netdata calcs microseconds" - , "a million !" - , "netdata" - , "checks" - , 99999 - , localhost->rrd_update_every - , RRDSET_TYPE_LINE - ); - rrddim_add(check2, "absolute", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(check2, "incremental", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - check3 = rrdset_create_localhost( - "netdata" - , "checkdt" - , NULL - , "netdata" - , NULL - , "Clock difference" - , "microseconds diff" - , "netdata" - , "checks" - , 99999 - , localhost->rrd_update_every - , RRDSET_TYPE_LINE - ); - rrddim_add(check3, "caller", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(check3, "netdata", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(check3, "apps.plugin", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - - now_realtime_timeval(&last); - while(!netdata_exit) { - usleep(susec); - - // find the time to sleep in order to wait exactly update_every seconds - now_realtime_timeval(&now); - loop_usec = dt_usec(&now, &last); - usec = loop_usec - susec; - debug(D_PROCNETDEV_LOOP, "CHECK: last loop took %llu usec (worked for %llu, sleeped for %llu).", loop_usec, usec, susec); - - if(usec < (localhost->rrd_update_every * USEC_PER_SEC / 2ULL)) susec = (localhost->rrd_update_every * USEC_PER_SEC) - usec; - else susec = localhost->rrd_update_every * USEC_PER_SEC / 2ULL; - - // -------------------------------------------------------------------- - // Calculate loop time - - last.tv_sec = now.tv_sec; - last.tv_usec = now.tv_usec; - total_susec += loop_usec; - - // -------------------------------------------------------------------- - // check chart 1 - - if(check1->counter_done) rrdset_next_usec(check1, loop_usec); - rrddim_set(check1, "absolute", 1000000); - rrddim_set(check1, "incremental", total_susec); - rrdset_done(check1); - - // -------------------------------------------------------------------- - // check chart 2 - - if(check2->counter_done) rrdset_next(check2); - rrddim_set(check2, "absolute", 1000000); - rrddim_set(check2, "incremental", total_susec); - rrdset_done(check2); - - // -------------------------------------------------------------------- - // check chart 3 - - if(!apps_cpu) apps_cpu = rrdset_find_localhost("apps.cpu"); - if(check3->counter_done) rrdset_next_usec(check3, loop_usec); - now_realtime_timeval(&loop); - rrddim_set(check3, "caller", (long long) dt_usec(&loop, &check1->last_collected_time)); - rrddim_set(check3, "netdata", (long long) dt_usec(&loop, &check2->last_collected_time)); - if(apps_cpu) rrddim_set(check3, "apps.plugin", (long long) dt_usec(&loop, &apps_cpu->last_collected_time)); - rrdset_done(check3); - } - - netdata_thread_cleanup_pop(1); - return NULL; -} - -#endif // NETDATA_INTERNAL_CHECKS diff --git a/src/plugin_checks.h b/src/plugin_checks.h deleted file mode 100644 index 05a40bea0..000000000 --- a/src/plugin_checks.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef NETDATA_PLUGIN_CHECKS_H -#define NETDATA_PLUGIN_CHECKS_H 1 - -#ifdef NETDATA_INTERNAL_CHECKS -void *checks_main(void *ptr); -#endif // NETDATA_INTERNAL_CHECKS - -#endif /* NETDATA_PLUGIN_PROC_H */ diff --git a/src/plugin_freebsd.c b/src/plugin_freebsd.c deleted file mode 100644 index a0d3dc2ea..000000000 --- a/src/plugin_freebsd.c +++ /dev/null @@ -1,173 +0,0 @@ -#include "common.h" - -static struct freebsd_module { - const char *name; - const char *dim; - - int enabled; - - int (*func)(int update_every, usec_t dt); - usec_t duration; - - RRDDIM *rd; - -} freebsd_modules[] = { - - // system metrics - { .name = "kern.cp_time", .dim = "cp_time", .enabled = 1, .func = do_kern_cp_time }, - { .name = "vm.loadavg", .dim = "loadavg", .enabled = 1, .func = do_vm_loadavg }, - { .name = "system.ram", .dim = "system_ram", .enabled = 1, .func = do_system_ram }, - { .name = "vm.swap_info", .dim = "swap", .enabled = 1, .func = do_vm_swap_info }, - { .name = "vm.stats.vm.v_swappgs", .dim = "swap_io", .enabled = 1, .func = do_vm_stats_sys_v_swappgs }, - { .name = "vm.vmtotal", .dim = "vmtotal", .enabled = 1, .func = do_vm_vmtotal }, - { .name = "vm.stats.vm.v_forks", .dim = "forks", .enabled = 1, .func = do_vm_stats_sys_v_forks }, - { .name = "vm.stats.sys.v_swtch", .dim = "context_swtch", .enabled = 1, .func = do_vm_stats_sys_v_swtch }, - { .name = "hw.intrcnt", .dim = "hw_intr", .enabled = 1, .func = do_hw_intcnt }, - { .name = "vm.stats.sys.v_intr", .dim = "dev_intr", .enabled = 1, .func = do_vm_stats_sys_v_intr }, - { .name = "vm.stats.sys.v_soft", .dim = "soft_intr", .enabled = 1, .func = do_vm_stats_sys_v_soft }, - { .name = "net.isr", .dim = "net_isr", .enabled = 1, .func = do_net_isr }, - { .name = "kern.ipc.sem", .dim = "semaphores", .enabled = 1, .func = do_kern_ipc_sem }, - { .name = "kern.ipc.shm", .dim = "shared_memory", .enabled = 1, .func = do_kern_ipc_shm }, - { .name = "kern.ipc.msq", .dim = "message_queues", .enabled = 1, .func = do_kern_ipc_msq }, - { .name = "uptime", .dim = "uptime", .enabled = 1, .func = do_uptime }, - - // memory metrics - { .name = "vm.stats.vm.v_pgfaults", .dim = "pgfaults", .enabled = 1, .func = do_vm_stats_sys_v_pgfaults }, - - // CPU metrics - { .name = "kern.cp_times", .dim = "cp_times", .enabled = 1, .func = do_kern_cp_times }, - { .name = "dev.cpu.temperature", .dim = "cpu_temperature", .enabled = 1, .func = do_dev_cpu_temperature }, - { .name = "dev.cpu.0.freq", .dim = "cpu_frequency", .enabled = 1, .func = do_dev_cpu_0_freq }, - - // disk metrics - { .name = "kern.devstat", .dim = "kern_devstat", .enabled = 1, .func = do_kern_devstat }, - { .name = "getmntinfo", .dim = "getmntinfo", .enabled = 1, .func = do_getmntinfo }, - - // network metrics - { .name = "net.inet.tcp.states", .dim = "tcp_states", .enabled = 1, .func = do_net_inet_tcp_states }, - { .name = "net.inet.tcp.stats", .dim = "tcp_stats", .enabled = 1, .func = do_net_inet_tcp_stats }, - { .name = "net.inet.udp.stats", .dim = "udp_stats", .enabled = 1, .func = do_net_inet_udp_stats }, - { .name = "net.inet.icmp.stats", .dim = "icmp_stats", .enabled = 1, .func = do_net_inet_icmp_stats }, - { .name = "net.inet.ip.stats", .dim = "ip_stats", .enabled = 1, .func = do_net_inet_ip_stats }, - { .name = "net.inet6.ip6.stats", .dim = "ip6_stats", .enabled = 1, .func = do_net_inet6_ip6_stats }, - { .name = "net.inet6.icmp6.stats", .dim = "icmp6_stats", .enabled = 1, .func = do_net_inet6_icmp6_stats }, - - // network interfaces metrics - { .name = "getifaddrs", .dim = "getifaddrs", .enabled = 1, .func = do_getifaddrs }, - - // ZFS metrics - { .name = "kstat.zfs.misc.arcstats", .dim = "arcstats", .enabled = 1, .func = do_kstat_zfs_misc_arcstats }, - { .name = "kstat.zfs.misc.zio_trim", .dim = "trim", .enabled = 1, .func = do_kstat_zfs_misc_zio_trim }, - - // ipfw metrics - { .name = "ipfw", .dim = "ipfw", .enabled = 1, .func = do_ipfw }, - - // the terminator of this array - { .name = NULL, .dim = NULL, .enabled = 0, .func = NULL } -}; - -static void freebsd_main_cleanup(void *ptr) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - - info("cleaning up..."); - - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; -} - -void *freebsd_main(void *ptr) { - netdata_thread_cleanup_push(freebsd_main_cleanup, ptr); - - int vdo_cpu_netdata = config_get_boolean("plugin:freebsd", "netdata server resources", 1); - - // initialize FreeBSD plugin - if (freebsd_plugin_init()) - netdata_cleanup_and_exit(1); - - // check the enabled status for each module - int i; - for(i = 0 ; freebsd_modules[i].name ;i++) { - struct freebsd_module *pm = &freebsd_modules[i]; - - pm->enabled = config_get_boolean("plugin:freebsd", pm->name, pm->enabled); - pm->duration = 0ULL; - pm->rd = NULL; - } - - usec_t step = localhost->rrd_update_every * USEC_PER_SEC; - heartbeat_t hb; - heartbeat_init(&hb); - - while(!netdata_exit) { - usec_t hb_dt = heartbeat_next(&hb, step); - usec_t duration = 0ULL; - - if(unlikely(netdata_exit)) break; - - // BEGIN -- the job to be done - - for(i = 0 ; freebsd_modules[i].name ;i++) { - struct freebsd_module *pm = &freebsd_modules[i]; - if(unlikely(!pm->enabled)) continue; - - debug(D_PROCNETDEV_LOOP, "FREEBSD calling %s.", pm->name); - - pm->enabled = !pm->func(localhost->rrd_update_every, hb_dt); - pm->duration = heartbeat_dt_usec(&hb) - duration; - duration += pm->duration; - - if(unlikely(netdata_exit)) break; - } - - // END -- the job is done - - // -------------------------------------------------------------------- - - if(vdo_cpu_netdata) { - static RRDSET *st = NULL; - - if(unlikely(!st)) { - st = rrdset_find_bytype_localhost("netdata", "plugin_freebsd_modules"); - - if(!st) { - st = rrdset_create_localhost( - "netdata" - , "plugin_freebsd_modules" - , NULL - , "freebsd" - , NULL - , "NetData FreeBSD Plugin Modules Durations" - , "milliseconds/run" - , "netdata" - , "stats" - , 132001 - , localhost->rrd_update_every - , RRDSET_TYPE_STACKED - ); - - for(i = 0 ; freebsd_modules[i].name ;i++) { - struct freebsd_module *pm = &freebsd_modules[i]; - if(unlikely(!pm->enabled)) continue; - - pm->rd = rrddim_add(st, pm->dim, NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - } - } - } - else rrdset_next(st); - - for(i = 0 ; freebsd_modules[i].name ;i++) { - struct freebsd_module *pm = &freebsd_modules[i]; - if(unlikely(!pm->enabled)) continue; - - rrddim_set_by_pointer(st, pm->rd, pm->duration); - } - rrdset_done(st); - - global_statistics_charts(); - registry_statistics(); - } - } - - netdata_thread_cleanup_pop(1); - return NULL; -} diff --git a/src/plugin_freebsd.h b/src/plugin_freebsd.h deleted file mode 100644 index 0a6f40c15..000000000 --- a/src/plugin_freebsd.h +++ /dev/null @@ -1,130 +0,0 @@ -#ifndef NETDATA_PLUGIN_FREEBSD_H -#define NETDATA_PLUGIN_FREEBSD_H 1 - -#include <sys/sysctl.h> - -#define KILO_FACTOR 1024 -#define MEGA_FACTOR 1048576 // 1024 * 1024 -#define GIGA_FACTOR 1073741824 // 1024 * 1024 * 1024 - -#define MAX_INT_DIGITS 10 // maximum number of digits for int - -void *freebsd_main(void *ptr); - -extern int freebsd_plugin_init(); - -extern int do_vm_loadavg(int update_every, usec_t dt); -extern int do_vm_vmtotal(int update_every, usec_t dt); -extern int do_kern_cp_time(int update_every, usec_t dt); -extern int do_kern_cp_times(int update_every, usec_t dt); -extern int do_dev_cpu_temperature(int update_every, usec_t dt); -extern int do_dev_cpu_0_freq(int update_every, usec_t dt); -extern int do_hw_intcnt(int update_every, usec_t dt); -extern int do_vm_stats_sys_v_intr(int update_every, usec_t dt); -extern int do_vm_stats_sys_v_soft(int update_every, usec_t dt); -extern int do_vm_stats_sys_v_swtch(int update_every, usec_t dt); -extern int do_vm_stats_sys_v_forks(int update_every, usec_t dt); -extern int do_vm_swap_info(int update_every, usec_t dt); -extern int do_system_ram(int update_every, usec_t dt); -extern int do_vm_stats_sys_v_swappgs(int update_every, usec_t dt); -extern int do_vm_stats_sys_v_pgfaults(int update_every, usec_t dt); -extern int do_kern_ipc_sem(int update_every, usec_t dt); -extern int do_kern_ipc_shm(int update_every, usec_t dt); -extern int do_kern_ipc_msq(int update_every, usec_t dt); -extern int do_uptime(int update_every, usec_t dt); -extern int do_net_isr(int update_every, usec_t dt); -extern int do_net_inet_tcp_states(int update_every, usec_t dt); -extern int do_net_inet_tcp_stats(int update_every, usec_t dt); -extern int do_net_inet_udp_stats(int update_every, usec_t dt); -extern int do_net_inet_icmp_stats(int update_every, usec_t dt); -extern int do_net_inet_ip_stats(int update_every, usec_t dt); -extern int do_net_inet6_ip6_stats(int update_every, usec_t dt); -extern int do_net_inet6_icmp6_stats(int update_every, usec_t dt); -extern int do_getifaddrs(int update_every, usec_t dt); -extern int do_getmntinfo(int update_every, usec_t dt); -extern int do_kern_devstat(int update_every, usec_t dt); -extern int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt); -extern int do_kstat_zfs_misc_zio_trim(int update_every, usec_t dt); -extern int do_ipfw(int update_every, usec_t dt); - -#define GETSYSCTL_MIB(name, mib) getsysctl_mib(name, mib, sizeof(mib)/sizeof(int)) - -static inline int getsysctl_mib(const char *name, int *mib, size_t len) -{ - size_t nlen = len; - - if (unlikely(sysctlnametomib(name, mib, &nlen) == -1)) { - error("FREEBSD: sysctl(%s...) failed: %s", name, strerror(errno)); - return 1; - } - if (unlikely(nlen != len)) { - error("FREEBSD: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)len, (unsigned long)nlen); - return 1; - } - return 0; -} - -#define GETSYSCTL_SIMPLE(name, mib, var) getsysctl_simple(name, mib, sizeof(mib)/sizeof(int), &(var), sizeof(var)) -#define GETSYSCTL_WSIZE(name, mib, var, size) getsysctl_simple(name, mib, sizeof(mib)/sizeof(int), var, size) - -static inline int getsysctl_simple(const char *name, int *mib, size_t miblen, void *ptr, size_t len) -{ - size_t nlen = len; - - if (unlikely(!mib[0])) - if (unlikely(getsysctl_mib(name, mib, miblen))) - return 1; - - if (unlikely(sysctl(mib, miblen, ptr, &nlen, NULL, 0) == -1)) { - error("FREEBSD: sysctl(%s...) failed: %s", name, strerror(errno)); - return 1; - } - if (unlikely(nlen != len)) { - error("FREEBSD: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)len, (unsigned long)nlen); - return 1; - } - - return 0; -} - -#define GETSYSCTL_SIZE(name, mib, size) getsysctl(name, mib, sizeof(mib)/sizeof(int), NULL, &(size)) -#define GETSYSCTL(name, mib, var, size) getsysctl(name, mib, sizeof(mib)/sizeof(int), &(var), &(size)) - -static inline int getsysctl(const char *name, int *mib, size_t miblen, void *ptr, size_t *len) -{ - size_t nlen = *len; - - if (unlikely(!mib[0])) - if (unlikely(getsysctl_mib(name, mib, miblen))) - return 1; - - if (unlikely(sysctl(mib, miblen, ptr, len, NULL, 0) == -1)) { - error("FREEBSD: sysctl(%s...) failed: %s", name, strerror(errno)); - return 1; - } - if (unlikely(ptr != NULL && nlen != *len)) { - error("FREEBSD: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)*len, (unsigned long)nlen); - return 1; - } - - return 0; -} - -#define GETSYSCTL_BY_NAME(name, var) getsysctl_by_name(name, &(var), sizeof(var)) - -static inline int getsysctl_by_name(const char *name, void *ptr, size_t len) -{ - size_t nlen = len; - - if (unlikely(sysctlbyname(name, ptr, &nlen, NULL, 0) == -1)) { - error("FREEBSD: sysctl(%s...) failed: %s", name, strerror(errno)); - return 1; - } - if (unlikely(nlen != len)) { - error("FREEBSD: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)len, (unsigned long)nlen); - return 1; - } - return 0; -} - -#endif /* NETDATA_PLUGIN_FREEBSD_H */ diff --git a/src/plugin_idlejitter.c b/src/plugin_idlejitter.c deleted file mode 100644 index 77bd95d55..000000000 --- a/src/plugin_idlejitter.c +++ /dev/null @@ -1,90 +0,0 @@ -#include "common.h" - -#define CPU_IDLEJITTER_SLEEP_TIME_MS 20 - -static void cpuidlejitter_main_cleanup(void *ptr) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - - info("cleaning up..."); - - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; -} - -void *cpuidlejitter_main(void *ptr) { - netdata_thread_cleanup_push(cpuidlejitter_main_cleanup, ptr); - - usec_t sleep_ut = config_get_number("plugin:idlejitter", "loop time in ms", CPU_IDLEJITTER_SLEEP_TIME_MS) * USEC_PER_MS; - if(sleep_ut <= 0) { - config_set_number("plugin:idlejitter", "loop time in ms", CPU_IDLEJITTER_SLEEP_TIME_MS); - sleep_ut = CPU_IDLEJITTER_SLEEP_TIME_MS * USEC_PER_MS; - } - - RRDSET *st = rrdset_create_localhost( - "system" - , "idlejitter" - , NULL - , "idlejitter" - , NULL - , "CPU Idle Jitter" - , "microseconds lost/s" - , "idlejitter" - , NULL - , 800 - , localhost->rrd_update_every - , RRDSET_TYPE_AREA - ); - RRDDIM *rd_min = rrddim_add(st, "min", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - RRDDIM *rd_max = rrddim_add(st, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - RRDDIM *rd_avg = rrddim_add(st, "average", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - - usec_t update_every_ut = localhost->rrd_update_every * USEC_PER_SEC; - struct timeval before, after; - unsigned long long counter; - - for(counter = 0; 1 ;counter++) { - int iterations = 0; - usec_t error_total = 0, - error_min = 0, - error_max = 0, - elapsed = 0; - - if(netdata_exit) break; - - while(elapsed < update_every_ut) { - now_monotonic_timeval(&before); - sleep_usec(sleep_ut); - now_monotonic_timeval(&after); - - usec_t dt = dt_usec(&after, &before); - elapsed += dt; - - usec_t error = dt - sleep_ut; - error_total += error; - - if(unlikely(!iterations)) - error_min = error; - else if(error < error_min) - error_min = error; - - if(error > error_max) - error_max = error; - - iterations++; - } - - if(netdata_exit) break; - - if(iterations) { - if (likely(counter)) rrdset_next(st); - rrddim_set_by_pointer(st, rd_min, error_min); - rrddim_set_by_pointer(st, rd_max, error_max); - rrddim_set_by_pointer(st, rd_avg, error_total / iterations); - rrdset_done(st); - } - } - - netdata_thread_cleanup_pop(1); - return NULL; -} - diff --git a/src/plugin_idlejitter.h b/src/plugin_idlejitter.h deleted file mode 100644 index dc82f052f..000000000 --- a/src/plugin_idlejitter.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef NETDATA_PLUGIN_IDLEJITTER_H -#define NETDATA_PLUGIN_IDLEJITTER_H 1 - -extern void *cpuidlejitter_main(void *ptr); - -#endif /* NETDATA_PLUGIN_IDLEJITTER_H */ diff --git a/src/plugin_macos.c b/src/plugin_macos.c deleted file mode 100644 index 6ac3d25d1..000000000 --- a/src/plugin_macos.c +++ /dev/null @@ -1,67 +0,0 @@ -#include "common.h" - -static void macos_main_cleanup(void *ptr) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - - info("cleaning up..."); - - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; -} - -void *macos_main(void *ptr) { - netdata_thread_cleanup_push(macos_main_cleanup, ptr); - - // when ZERO, attempt to do it - int vdo_cpu_netdata = !config_get_boolean("plugin:macos", "netdata server resources", 1); - int vdo_macos_sysctl = !config_get_boolean("plugin:macos", "sysctl", 1); - int vdo_macos_mach_smi = !config_get_boolean("plugin:macos", "mach system management interface", 1); - int vdo_macos_iokit = !config_get_boolean("plugin:macos", "iokit", 1); - - // keep track of the time each module was called - unsigned long long sutime_macos_sysctl = 0ULL; - unsigned long long sutime_macos_mach_smi = 0ULL; - unsigned long long sutime_macos_iokit = 0ULL; - - usec_t step = localhost->rrd_update_every * USEC_PER_SEC; - heartbeat_t hb; - heartbeat_init(&hb); - - while(!netdata_exit) { - usec_t hb_dt = heartbeat_next(&hb, step); - - if(unlikely(netdata_exit)) break; - - // BEGIN -- the job to be done - - if(!vdo_macos_sysctl) { - debug(D_PROCNETDEV_LOOP, "MACOS: calling do_macos_sysctl()."); - vdo_macos_sysctl = do_macos_sysctl(localhost->rrd_update_every, hb_dt); - } - if(unlikely(netdata_exit)) break; - - if(!vdo_macos_mach_smi) { - debug(D_PROCNETDEV_LOOP, "MACOS: calling do_macos_mach_smi()."); - vdo_macos_mach_smi = do_macos_mach_smi(localhost->rrd_update_every, hb_dt); - } - if(unlikely(netdata_exit)) break; - - if(!vdo_macos_iokit) { - debug(D_PROCNETDEV_LOOP, "MACOS: calling do_macos_iokit()."); - vdo_macos_iokit = do_macos_iokit(localhost->rrd_update_every, hb_dt); - } - if(unlikely(netdata_exit)) break; - - // END -- the job is done - - // -------------------------------------------------------------------- - - if(!vdo_cpu_netdata) { - global_statistics_charts(); - registry_statistics(); - } - } - - netdata_thread_cleanup_pop(1); - return NULL; -} diff --git a/src/plugin_macos.h b/src/plugin_macos.h deleted file mode 100644 index 6ccf3e861..000000000 --- a/src/plugin_macos.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef NETDATA_PLUGIN_MACOS_H -#define NETDATA_PLUGIN_MACOS_H 1 - -void *macos_main(void *ptr); - -#define GETSYSCTL_BY_NAME(name, var) getsysctl_by_name(name, &(var), sizeof(var)) - -extern int getsysctl_by_name(const char *name, void *ptr, size_t len); - -extern int do_macos_sysctl(int update_every, usec_t dt); -extern int do_macos_mach_smi(int update_every, usec_t dt); -extern int do_macos_iokit(int update_every, usec_t dt); - -#endif /* NETDATA_PLUGIN_MACOS_H */ diff --git a/src/plugin_nfacct.c b/src/plugin_nfacct.c deleted file mode 100644 index 02815ef04..000000000 --- a/src/plugin_nfacct.c +++ /dev/null @@ -1,819 +0,0 @@ -#include "common.h" - -#ifdef INTERNAL_PLUGIN_NFACCT - -#ifdef HAVE_LIBMNL -#include <libmnl/libmnl.h> - -static inline size_t mnl_buffer_size() { - long s = MNL_SOCKET_BUFFER_SIZE; - if(s <= 0) return 8192; - return (size_t)s; -} - -// ---------------------------------------------------------------------------- -// DO_NFSTAT - collect netfilter connection tracker statistics via netlink -// example: https://github.com/formorer/pkg-conntrack-tools/blob/master/src/conntrack.c - -#ifdef HAVE_LINUX_NETFILTER_NFNETLINK_CONNTRACK_H -#define DO_NFSTAT 1 - -#define RRD_TYPE_NET_STAT_NETFILTER "netfilter" -#define RRD_TYPE_NET_STAT_CONNTRACK "netlink" // FIXME: should be "conntrack" when merged with the /proc plugin - -#include <linux/netfilter/nfnetlink_conntrack.h> - -static struct { - int update_every; - char *buf; - size_t buf_size; - struct mnl_socket *mnl; - struct nlmsghdr *nlh; - struct nfgenmsg *nfh; - unsigned int seq; - uint32_t portid; - - struct nlattr *tb[CTA_STATS_MAX+1]; - const char *attr2name[CTA_STATS_MAX+1]; - kernel_uint_t metrics[CTA_STATS_MAX+1]; - - struct nlattr *tb_exp[CTA_STATS_EXP_MAX+1]; - const char *attr2name_exp[CTA_STATS_EXP_MAX+1]; - kernel_uint_t metrics_exp[CTA_STATS_EXP_MAX+1]; -} nfstat_root = { - .update_every = 1, - .buf = NULL, - .buf_size = 0, - .mnl = NULL, - .nlh = NULL, - .nfh = NULL, - .seq = 0, - .portid = 0, - .tb = {}, - .attr2name = { - [CTA_STATS_SEARCHED] = "searched", - [CTA_STATS_FOUND] = "found", - [CTA_STATS_NEW] = "new", - [CTA_STATS_INVALID] = "invalid", - [CTA_STATS_IGNORE] = "ignore", - [CTA_STATS_DELETE] = "delete", - [CTA_STATS_DELETE_LIST] = "delete_list", - [CTA_STATS_INSERT] = "insert", - [CTA_STATS_INSERT_FAILED] = "insert_failed", - [CTA_STATS_DROP] = "drop", - [CTA_STATS_EARLY_DROP] = "early_drop", - [CTA_STATS_ERROR] = "icmp_error", - [CTA_STATS_SEARCH_RESTART] = "search_restart", - }, - .metrics = {}, - .tb_exp = {}, - .attr2name_exp = { - [CTA_STATS_EXP_NEW] = "new", - [CTA_STATS_EXP_CREATE] = "created", - [CTA_STATS_EXP_DELETE] = "deleted", - }, - .metrics_exp = {} -}; - - -static int nfstat_init(int update_every) { - nfstat_root.update_every = update_every; - - nfstat_root.buf_size = mnl_buffer_size(); - nfstat_root.buf = mallocz(nfstat_root.buf_size); - - nfstat_root.mnl = mnl_socket_open(NETLINK_NETFILTER); - if(!nfstat_root.mnl) { - error("NFSTAT: mnl_socket_open() failed"); - return 1; - } - - nfstat_root.seq = (unsigned int)now_realtime_sec() - 1; - - if(mnl_socket_bind(nfstat_root.mnl, 0, MNL_SOCKET_AUTOPID) < 0) { - error("NFSTAT: mnl_socket_bind() failed"); - return 1; - } - nfstat_root.portid = mnl_socket_get_portid(nfstat_root.mnl); - - return 0; -} - -static void nfstat_cleanup() { - if(nfstat_root.mnl) { - mnl_socket_close(nfstat_root.mnl); - nfstat_root.mnl = NULL; - } - - freez(nfstat_root.buf); - nfstat_root.buf = NULL; - nfstat_root.buf_size = 0; -} - -static struct nlmsghdr * nfct_mnl_nlmsghdr_put(char *buf, uint16_t subsys, uint16_t type, uint8_t family, uint32_t seq) { - struct nlmsghdr *nlh; - struct nfgenmsg *nfh; - - nlh = mnl_nlmsg_put_header(buf); - nlh->nlmsg_type = (subsys << 8) | type; - nlh->nlmsg_flags = NLM_F_REQUEST|NLM_F_DUMP; - nlh->nlmsg_seq = seq; - - nfh = mnl_nlmsg_put_extra_header(nlh, sizeof(struct nfgenmsg)); - nfh->nfgen_family = family; - nfh->version = NFNETLINK_V0; - nfh->res_id = 0; - - return nlh; -} - -static int nfct_stats_attr_cb(const struct nlattr *attr, void *data) { - const struct nlattr **tb = data; - int type = mnl_attr_get_type(attr); - - if (mnl_attr_type_valid(attr, CTA_STATS_MAX) < 0) - return MNL_CB_OK; - - if (mnl_attr_validate(attr, MNL_TYPE_U32) < 0) { - error("NFSTAT: mnl_attr_validate() failed"); - return MNL_CB_ERROR; - } - - tb[type] = attr; - return MNL_CB_OK; -} - -static int nfstat_callback(const struct nlmsghdr *nlh, void *data) { - (void)data; - - struct nfgenmsg *nfg = mnl_nlmsg_get_payload(nlh); - - mnl_attr_parse(nlh, sizeof(*nfg), nfct_stats_attr_cb, nfstat_root.tb); - - // printf("cpu=%-4u\t", ntohs(nfg->res_id)); - - int i; - // add the metrics of this CPU into the metrics - for (i = 0; i < CTA_STATS_MAX+1; i++) { - if (nfstat_root.tb[i]) { - // printf("%s=%u ", nfstat_root.attr2name[i], ntohl(mnl_attr_get_u32(nfstat_root.tb[i]))); - nfstat_root.metrics[i] += ntohl(mnl_attr_get_u32(nfstat_root.tb[i])); - } - } - // printf("\n"); - - return MNL_CB_OK; -} - -static int nfstat_collect_conntrack() { - // zero all metrics - we will sum the metrics of all CPUs later - int i; - for (i = 0; i < CTA_STATS_MAX+1; i++) - nfstat_root.metrics[i] = 0; - - // prepare the request - nfstat_root.nlh = nfct_mnl_nlmsghdr_put(nfstat_root.buf, NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET_STATS_CPU, AF_UNSPEC, nfstat_root.seq); - - // send the request - if(mnl_socket_sendto(nfstat_root.mnl, nfstat_root.nlh, nfstat_root.nlh->nlmsg_len) < 0) { - error("NFSTAT: mnl_socket_sendto() failed"); - return 1; - } - - // get the reply - ssize_t ret; - while ((ret = mnl_socket_recvfrom(nfstat_root.mnl, nfstat_root.buf, nfstat_root.buf_size)) > 0) { - if(mnl_cb_run( - nfstat_root.buf - , (size_t)ret - , nfstat_root.nlh->nlmsg_seq - , nfstat_root.portid - , nfstat_callback - , NULL - ) <= MNL_CB_STOP) - break; - } - - // verify we run without issues - if (ret == -1) { - error("NFSTAT: error communicating with kernel. This plugin can only work when netdata runs as root."); - return 1; - } - - return 0; -} - -static int nfexp_stats_attr_cb(const struct nlattr *attr, void *data) -{ - const struct nlattr **tb = data; - int type = mnl_attr_get_type(attr); - - if (mnl_attr_type_valid(attr, CTA_STATS_EXP_MAX) < 0) - return MNL_CB_OK; - - if (mnl_attr_validate(attr, MNL_TYPE_U32) < 0) { - error("NFSTAT EXP: mnl_attr_validate() failed"); - return MNL_CB_ERROR; - } - - tb[type] = attr; - return MNL_CB_OK; -} - -static int nfstat_callback_exp(const struct nlmsghdr *nlh, void *data) { - (void)data; - - struct nfgenmsg *nfg = mnl_nlmsg_get_payload(nlh); - - mnl_attr_parse(nlh, sizeof(*nfg), nfexp_stats_attr_cb, nfstat_root.tb_exp); - - int i; - for (i = 0; i < CTA_STATS_EXP_MAX+1; i++) { - if (nfstat_root.tb_exp[i]) { - nfstat_root.metrics_exp[i] += ntohl(mnl_attr_get_u32(nfstat_root.tb_exp[i])); - } - } - - return MNL_CB_OK; -} - -static int nfstat_collect_conntrack_expectations() { - // zero all metrics - we will sum the metrics of all CPUs later - int i; - for (i = 0; i < CTA_STATS_EXP_MAX+1; i++) - nfstat_root.metrics_exp[i] = 0; - - // prepare the request - nfstat_root.nlh = nfct_mnl_nlmsghdr_put(nfstat_root.buf, NFNL_SUBSYS_CTNETLINK_EXP, IPCTNL_MSG_EXP_GET_STATS_CPU, AF_UNSPEC, nfstat_root.seq); - - // send the request - if(mnl_socket_sendto(nfstat_root.mnl, nfstat_root.nlh, nfstat_root.nlh->nlmsg_len) < 0) { - error("NFSTAT: mnl_socket_sendto() failed"); - return 1; - } - - // get the reply - ssize_t ret; - while ((ret = mnl_socket_recvfrom(nfstat_root.mnl, nfstat_root.buf, nfstat_root.buf_size)) > 0) { - if(mnl_cb_run( - nfstat_root.buf - , (size_t)ret - , nfstat_root.nlh->nlmsg_seq - , nfstat_root.portid - , nfstat_callback_exp - , NULL - ) <= MNL_CB_STOP) - break; - } - - // verify we run without issues - if (ret == -1) { - error("NFSTAT: error communicating with kernel. This plugin can only work when netdata runs as root."); - return 1; - } - - return 0; -} - -static int nfstat_collect() { - nfstat_root.seq++; - - if(nfstat_collect_conntrack()) - return 1; - - if(nfstat_collect_conntrack_expectations()) - return 1; - - return 0; -} - -static void nfstat_send_metrics() { - - { - static RRDSET *st_new = NULL; - static RRDDIM *rd_new = NULL, *rd_ignore = NULL, *rd_invalid = NULL; - - if(!st_new) { - st_new = rrdset_create_localhost( - RRD_TYPE_NET_STAT_NETFILTER - , RRD_TYPE_NET_STAT_CONNTRACK "_new" - , NULL - , RRD_TYPE_NET_STAT_CONNTRACK - , NULL - , "Connection Tracker New Connections" - , "connections/s" - , "nfacct" - , NULL - , 3001 - , nfstat_root.update_every - , RRDSET_TYPE_LINE - ); - - rd_new = rrddim_add(st_new, nfstat_root.attr2name[CTA_STATS_NEW], NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_ignore = rrddim_add(st_new, nfstat_root.attr2name[CTA_STATS_IGNORE], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_invalid = rrddim_add(st_new, nfstat_root.attr2name[CTA_STATS_INVALID], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_new); - - rrddim_set_by_pointer(st_new, rd_new, (collected_number) nfstat_root.metrics[CTA_STATS_NEW]); - rrddim_set_by_pointer(st_new, rd_ignore, (collected_number) nfstat_root.metrics[CTA_STATS_IGNORE]); - rrddim_set_by_pointer(st_new, rd_invalid, (collected_number) nfstat_root.metrics[CTA_STATS_INVALID]); - - rrdset_done(st_new); - } - - // ---------------------------------------------------------------- - - { - static RRDSET *st_changes = NULL; - static RRDDIM *rd_inserted = NULL, *rd_deleted = NULL, *rd_delete_list = NULL; - - if(!st_changes) { - st_changes = rrdset_create_localhost( - RRD_TYPE_NET_STAT_NETFILTER - , RRD_TYPE_NET_STAT_CONNTRACK "_changes" - , NULL - , RRD_TYPE_NET_STAT_CONNTRACK - , NULL - , "Connection Tracker Changes" - , "changes/s" - , "nfacct" - , NULL - , 3002 - , nfstat_root.update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st_changes, RRDSET_FLAG_DETAIL); - - rd_inserted = rrddim_add(st_changes, nfstat_root.attr2name[CTA_STATS_INSERT], NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_deleted = rrddim_add(st_changes, nfstat_root.attr2name[CTA_STATS_DELETE], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_delete_list = rrddim_add(st_changes, nfstat_root.attr2name[CTA_STATS_DELETE_LIST], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_changes); - - rrddim_set_by_pointer(st_changes, rd_inserted, (collected_number) nfstat_root.metrics[CTA_STATS_INSERT]); - rrddim_set_by_pointer(st_changes, rd_deleted, (collected_number) nfstat_root.metrics[CTA_STATS_DELETE]); - rrddim_set_by_pointer(st_changes, rd_delete_list, (collected_number) nfstat_root.metrics[CTA_STATS_DELETE_LIST]); - - rrdset_done(st_changes); - } - - // ---------------------------------------------------------------- - - { - static RRDSET *st_search = NULL; - static RRDDIM *rd_searched = NULL, *rd_restarted = NULL, *rd_found = NULL; - - if(!st_search) { - st_search = rrdset_create_localhost( - RRD_TYPE_NET_STAT_NETFILTER - , RRD_TYPE_NET_STAT_CONNTRACK "_search" - , NULL - , RRD_TYPE_NET_STAT_CONNTRACK - , NULL - , "Connection Tracker Searches" - , "searches/s" - , "nfacct" - , NULL - , 3010 - , nfstat_root.update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st_search, RRDSET_FLAG_DETAIL); - - rd_searched = rrddim_add(st_search, nfstat_root.attr2name[CTA_STATS_SEARCHED], NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_restarted = rrddim_add(st_search, nfstat_root.attr2name[CTA_STATS_SEARCH_RESTART], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_found = rrddim_add(st_search, nfstat_root.attr2name[CTA_STATS_FOUND], NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_search); - - rrddim_set_by_pointer(st_search, rd_searched, (collected_number) nfstat_root.metrics[CTA_STATS_SEARCHED]); - rrddim_set_by_pointer(st_search, rd_restarted, (collected_number) nfstat_root.metrics[CTA_STATS_SEARCH_RESTART]); - rrddim_set_by_pointer(st_search, rd_found, (collected_number) nfstat_root.metrics[CTA_STATS_FOUND]); - - rrdset_done(st_search); - } - - // ---------------------------------------------------------------- - - { - static RRDSET *st_errors = NULL; - static RRDDIM *rd_error = NULL, *rd_insert_failed = NULL, *rd_drop = NULL, *rd_early_drop = NULL; - - if(!st_errors) { - st_errors = rrdset_create_localhost( - RRD_TYPE_NET_STAT_NETFILTER - , RRD_TYPE_NET_STAT_CONNTRACK "_errors" - , NULL - , RRD_TYPE_NET_STAT_CONNTRACK - , NULL - , "Connection Tracker Errors" - , "events/s" - , "nfacct" - , NULL - , 3005 - , nfstat_root.update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st_errors, RRDSET_FLAG_DETAIL); - - rd_error = rrddim_add(st_errors, nfstat_root.attr2name[CTA_STATS_ERROR], NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_insert_failed = rrddim_add(st_errors, nfstat_root.attr2name[CTA_STATS_INSERT_FAILED], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_drop = rrddim_add(st_errors, nfstat_root.attr2name[CTA_STATS_DROP], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_early_drop = rrddim_add(st_errors, nfstat_root.attr2name[CTA_STATS_EARLY_DROP], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_errors); - - rrddim_set_by_pointer(st_errors, rd_error, (collected_number) nfstat_root.metrics[CTA_STATS_ERROR]); - rrddim_set_by_pointer(st_errors, rd_insert_failed, (collected_number) nfstat_root.metrics[CTA_STATS_INSERT_FAILED]); - rrddim_set_by_pointer(st_errors, rd_drop, (collected_number) nfstat_root.metrics[CTA_STATS_DROP]); - rrddim_set_by_pointer(st_errors, rd_early_drop, (collected_number) nfstat_root.metrics[CTA_STATS_EARLY_DROP]); - - rrdset_done(st_errors); - } - - // ---------------------------------------------------------------- - - { - static RRDSET *st_expect = NULL; - static RRDDIM *rd_new = NULL, *rd_created = NULL, *rd_deleted = NULL; - - if(!st_expect) { - st_expect = rrdset_create_localhost( - RRD_TYPE_NET_STAT_NETFILTER - , RRD_TYPE_NET_STAT_CONNTRACK "_expect" - , NULL - , RRD_TYPE_NET_STAT_CONNTRACK - , NULL - , "Connection Tracker Expectations" - , "expectations/s" - , "nfacct" - , NULL - , 3003 - , nfstat_root.update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st_expect, RRDSET_FLAG_DETAIL); - - rd_created = rrddim_add(st_expect, nfstat_root.attr2name_exp[CTA_STATS_EXP_CREATE], NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_deleted = rrddim_add(st_expect, nfstat_root.attr2name_exp[CTA_STATS_EXP_DELETE], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_new = rrddim_add(st_expect, nfstat_root.attr2name_exp[CTA_STATS_EXP_NEW], NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_expect); - - rrddim_set_by_pointer(st_expect, rd_created, (collected_number) nfstat_root.metrics_exp[CTA_STATS_EXP_CREATE]); - rrddim_set_by_pointer(st_expect, rd_deleted, (collected_number) nfstat_root.metrics_exp[CTA_STATS_EXP_DELETE]); - rrddim_set_by_pointer(st_expect, rd_new, (collected_number) nfstat_root.metrics_exp[CTA_STATS_EXP_NEW]); - - rrdset_done(st_expect); - } - -} - -#endif // HAVE_LINUX_NETFILTER_NFNETLINK_CONNTRACK_H - - -// ---------------------------------------------------------------------------- -// DO_NFACCT - collect netfilter accounting statistics via netlink - -#ifdef HAVE_LIBNETFILTER_ACCT -#define DO_NFACCT 1 - -#include <libnetfilter_acct/libnetfilter_acct.h> - -struct nfacct_data { - char *name; - uint32_t hash; - - uint64_t pkts; - uint64_t bytes; - - RRDDIM *rd_bytes; - RRDDIM *rd_packets; - - int updated; - - struct nfacct_data *next; -}; - -static struct { - int update_every; - char *buf; - size_t buf_size; - struct mnl_socket *mnl; - struct nlmsghdr *nlh; - unsigned int seq; - uint32_t portid; - struct nfacct *nfacct_buffer; - struct nfacct_data *nfacct_metrics; -} nfacct_root = { - .update_every = 1, - .buf = NULL, - .buf_size = 0, - .mnl = NULL, - .nlh = NULL, - .seq = 0, - .portid = 0, - .nfacct_buffer = NULL, - .nfacct_metrics = NULL -}; - -static inline struct nfacct_data *nfacct_data_get(const char *name, uint32_t hash) { - struct nfacct_data *d = NULL, *last = NULL; - for(d = nfacct_root.nfacct_metrics; d ; last = d, d = d->next) { - if(unlikely(d->hash == hash && !strcmp(d->name, name))) - return d; - } - - d = callocz(1, sizeof(struct nfacct_data)); - d->name = strdupz(name); - d->hash = hash; - - if(!last) { - d->next = nfacct_root.nfacct_metrics; - nfacct_root.nfacct_metrics = d; - } - else { - d->next = last->next; - last->next = d; - } - - return d; -} - -static int nfacct_init(int update_every) { - nfacct_root.update_every = update_every; - - nfacct_root.buf_size = mnl_buffer_size(); - nfacct_root.buf = mallocz(nfacct_root.buf_size); - - nfacct_root.nfacct_buffer = nfacct_alloc(); - if(!nfacct_root.nfacct_buffer) { - error("nfacct.plugin: nfacct_alloc() failed."); - return 0; - } - - nfacct_root.seq = (unsigned int)now_realtime_sec() - 1; - - nfacct_root.mnl = mnl_socket_open(NETLINK_NETFILTER); - if(!nfacct_root.mnl) { - error("nfacct.plugin: mnl_socket_open() failed"); - return 1; - } - - if(mnl_socket_bind(nfacct_root.mnl, 0, MNL_SOCKET_AUTOPID) < 0) { - error("nfacct.plugin: mnl_socket_bind() failed"); - return 1; - } - nfacct_root.portid = mnl_socket_get_portid(nfacct_root.mnl); - - return 0; -} - -static void nfacct_cleanup() { - if(nfacct_root.mnl) { - mnl_socket_close(nfacct_root.mnl); - nfacct_root.mnl = NULL; - } - - if(nfacct_root.nfacct_buffer) { - nfacct_free(nfacct_root.nfacct_buffer); - nfacct_root.nfacct_buffer = NULL; - } - - freez(nfacct_root.buf); - nfacct_root.buf = NULL; - nfacct_root.buf_size = 0; - - // FIXME: cleanup the metrics linked list -} - -static int nfacct_callback(const struct nlmsghdr *nlh, void *data) { - (void)data; - - if(nfacct_nlmsg_parse_payload(nlh, nfacct_root.nfacct_buffer) < 0) { - error("NFACCT: nfacct_nlmsg_parse_payload() failed."); - return MNL_CB_OK; - } - - const char *name = nfacct_attr_get_str(nfacct_root.nfacct_buffer, NFACCT_ATTR_NAME); - uint32_t hash = simple_hash(name); - - struct nfacct_data *d = nfacct_data_get(name, hash); - - d->pkts = nfacct_attr_get_u64(nfacct_root.nfacct_buffer, NFACCT_ATTR_PKTS); - d->bytes = nfacct_attr_get_u64(nfacct_root.nfacct_buffer, NFACCT_ATTR_BYTES); - d->updated = 1; - - return MNL_CB_OK; -} - -static int nfacct_collect() { - // mark all old metrics as not-updated - struct nfacct_data *d; - for(d = nfacct_root.nfacct_metrics; d ; d = d->next) - d->updated = 0; - - // prepare the request - nfacct_root.seq++; - nfacct_root.nlh = nfacct_nlmsg_build_hdr(nfacct_root.buf, NFNL_MSG_ACCT_GET, NLM_F_DUMP, (uint32_t)nfacct_root.seq); - if(!nfacct_root.nlh) { - error("NFACCT: nfacct_nlmsg_build_hdr() failed"); - return 1; - } - - // send the request - if(mnl_socket_sendto(nfacct_root.mnl, nfacct_root.nlh, nfacct_root.nlh->nlmsg_len) < 0) { - error("NFACCT: mnl_socket_sendto() failed"); - return 1; - } - - // get the reply - ssize_t ret; - while((ret = mnl_socket_recvfrom(nfacct_root.mnl, nfacct_root.buf, nfacct_root.buf_size)) > 0) { - if(mnl_cb_run( - nfacct_root.buf - , (size_t)ret - , nfacct_root.seq - , nfacct_root.portid - , nfacct_callback - , NULL - ) <= 0) - break; - } - - // verify we run without issues - if (ret == -1) { - error("NFACCT: error communicating with kernel. This plugin can only work when netdata runs as root."); - return 1; - } - - return 0; -} - -static void nfacct_send_metrics() { - static RRDSET *st_bytes = NULL, *st_packets = NULL; - - if(!nfacct_root.nfacct_metrics) return; - struct nfacct_data *d; - - if(!st_packets) { - st_packets = rrdset_create_localhost( - "netfilter" - , "nfacct_packets" - , NULL - , "nfacct" - , NULL - , "Netfilter Accounting Packets" - , "packets/s" - , "nfacct" - , NULL - , 3206 - , nfacct_root.update_every - , RRDSET_TYPE_STACKED - ); - } - else rrdset_next(st_packets); - - for(d = nfacct_root.nfacct_metrics; d ; d = d->next) { - if(likely(d->updated)) { - if(unlikely(!d->rd_packets)) - d->rd_packets = rrddim_add( - st_packets - , d->name - , NULL - , 1 - , nfacct_root.update_every - , RRD_ALGORITHM_INCREMENTAL - ); - - rrddim_set_by_pointer( - st_packets - , d->rd_packets - , (collected_number)d->pkts - ); - } - } - - rrdset_done(st_packets); - - // ---------------------------------------------------------------- - - st_bytes = rrdset_find_bytype_localhost("netfilter", "nfacct_bytes"); - if(!st_bytes) { - st_bytes = rrdset_create_localhost( - "netfilter" - , "nfacct_bytes" - , NULL - , "nfacct" - , NULL - , "Netfilter Accounting Bandwidth" - , "kilobytes/s" - , "nfacct" - , NULL - , 3207 - , nfacct_root.update_every - , RRDSET_TYPE_STACKED - ); - } - else rrdset_next(st_bytes); - - for(d = nfacct_root.nfacct_metrics; d ; d = d->next) { - if(likely(d->updated)) { - if(unlikely(!d->rd_bytes)) - d->rd_bytes = rrddim_add( - st_bytes - , d->name - , NULL - , 1 - , 1000 * nfacct_root.update_every - , RRD_ALGORITHM_INCREMENTAL - ); - - rrddim_set_by_pointer( - st_bytes - , d->rd_bytes - , (collected_number)d->bytes - ); - } - } - - rrdset_done(st_bytes); -} - -#endif // HAVE_LIBNETFILTER_ACCT -#endif // HAVE_LIBMNL - -// ---------------------------------------------------------------------------- - -static void nfacct_main_cleanup(void *ptr) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - info("cleaning up..."); - -#ifdef DO_NFACCT - nfacct_cleanup(); -#endif - -#ifdef DO_NFSTAT - nfstat_cleanup(); -#endif - - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; -} - -void *nfacct_main(void *ptr) { - netdata_thread_cleanup_push(nfacct_main_cleanup, ptr); - - int update_every = (int)config_get_number("plugin:netfilter", "update every", localhost->rrd_update_every); - if(update_every < localhost->rrd_update_every) - update_every = localhost->rrd_update_every; - -#ifdef DO_NFACCT - int nfacct = !nfacct_init(update_every); -#endif - -#ifdef DO_NFSTAT - int nfstat = !nfstat_init(update_every); -#endif - - // ------------------------------------------------------------------------ - - usec_t step = update_every * USEC_PER_SEC; - heartbeat_t hb; - heartbeat_init(&hb); - for(;;) { - heartbeat_dt_usec(&hb); - heartbeat_next(&hb, step); - - if(unlikely(netdata_exit)) break; - -#ifdef DO_NFACCT - if(likely(nfacct)) { - nfacct = !nfacct_collect(); - - if(likely(nfacct)) - nfacct_send_metrics(); - } -#endif - -#ifdef DO_NFSTAT - if(likely(nfstat)) { - nfstat = !nfstat_collect(); - - if(likely(nfstat)) - nfstat_send_metrics(); - } -#endif - } - - netdata_thread_cleanup_pop(1); - return NULL; -} - -#endif // INTERNAL_PLUGIN_NFACCT diff --git a/src/plugin_nfacct.h b/src/plugin_nfacct.h deleted file mode 100644 index 88a3a9230..000000000 --- a/src/plugin_nfacct.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef NETDATA_NFACCT_H -#define NETDATA_NFACCT_H 1 - -extern void *nfacct_main(void *ptr); - -#endif /* NETDATA_NFACCT_H */ - diff --git a/src/plugin_proc.c b/src/plugin_proc.c deleted file mode 100644 index e0afb0d6d..000000000 --- a/src/plugin_proc.c +++ /dev/null @@ -1,201 +0,0 @@ -#include "common.h" - -static struct proc_module { - const char *name; - const char *dim; - - int enabled; - - int (*func)(int update_every, usec_t dt); - usec_t duration; - - RRDDIM *rd; - -} proc_modules[] = { - - // system metrics - { .name = "/proc/stat", .dim = "stat", .func = do_proc_stat }, - { .name = "/proc/uptime", .dim = "uptime", .func = do_proc_uptime }, - { .name = "/proc/loadavg", .dim = "loadavg", .func = do_proc_loadavg }, - { .name = "/proc/sys/kernel/random/entropy_avail", .dim = "entropy", .func = do_proc_sys_kernel_random_entropy_avail }, - - // CPU metrics - { .name = "/proc/interrupts", .dim = "interrupts", .func = do_proc_interrupts }, - { .name = "/proc/softirqs", .dim = "softirqs", .func = do_proc_softirqs }, - - // memory metrics - { .name = "/proc/vmstat", .dim = "vmstat", .func = do_proc_vmstat }, - { .name = "/proc/meminfo", .dim = "meminfo", .func = do_proc_meminfo }, - { .name = "/sys/kernel/mm/ksm", .dim = "ksm", .func = do_sys_kernel_mm_ksm }, - { .name = "/sys/devices/system/edac/mc", .dim = "ecc", .func = do_proc_sys_devices_system_edac_mc }, - { .name = "/sys/devices/system/node", .dim = "numa", .func = do_proc_sys_devices_system_node }, - - // network metrics - { .name = "/proc/net/dev", .dim = "netdev", .func = do_proc_net_dev }, - { .name = "/proc/net/sockstat", .dim = "sockstat", .func = do_proc_net_sockstat }, - { .name = "/proc/net/sockstat6", .dim = "sockstat6", .func = do_proc_net_sockstat6 }, - { .name = "/proc/net/netstat", .dim = "netstat", .func = do_proc_net_netstat }, // this has to be before /proc/net/snmp, because there is a shared metric - { .name = "/proc/net/snmp", .dim = "snmp", .func = do_proc_net_snmp }, - { .name = "/proc/net/snmp6", .dim = "snmp6", .func = do_proc_net_snmp6 }, - { .name = "/proc/net/softnet_stat", .dim = "softnet", .func = do_proc_net_softnet_stat }, - { .name = "/proc/net/ip_vs/stats", .dim = "ipvs", .func = do_proc_net_ip_vs_stats }, - - // firewall metrics - { .name = "/proc/net/stat/conntrack", .dim = "conntrack", .func = do_proc_net_stat_conntrack }, - { .name = "/proc/net/stat/synproxy", .dim = "synproxy", .func = do_proc_net_stat_synproxy }, - - // disk metrics - { .name = "/proc/diskstats", .dim = "diskstats", .func = do_proc_diskstats }, - - // NFS metrics - { .name = "/proc/net/rpc/nfsd", .dim = "nfsd", .func = do_proc_net_rpc_nfsd }, - { .name = "/proc/net/rpc/nfs", .dim = "nfs", .func = do_proc_net_rpc_nfs }, - - // ZFS metrics - { .name = "/proc/spl/kstat/zfs/arcstats", .dim = "zfs_arcstats", .func = do_proc_spl_kstat_zfs_arcstats }, - - // BTRFS metrics - { .name = "/sys/fs/btrfs", .dim = "btrfs", .func = do_sys_fs_btrfs }, - - // IPC metrics - { .name = "ipc", .dim = "ipc", .func = do_ipc }, - - // the terminator of this array - { .name = NULL, .dim = NULL, .func = NULL } -}; - -static void proc_main_cleanup(void *ptr) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - - info("cleaning up..."); - - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; -} - -void *proc_main(void *ptr) { - netdata_thread_cleanup_push(proc_main_cleanup, ptr); - - int vdo_cpu_netdata = config_get_boolean("plugin:proc", "netdata server resources", 1); - - // check the enabled status for each module - int i; - for(i = 0 ; proc_modules[i].name ;i++) { - struct proc_module *pm = &proc_modules[i]; - - pm->enabled = config_get_boolean("plugin:proc", pm->name, 1); - pm->duration = 0ULL; - pm->rd = NULL; - } - - usec_t step = localhost->rrd_update_every * USEC_PER_SEC; - heartbeat_t hb; - heartbeat_init(&hb); - - while(!netdata_exit) { - usec_t hb_dt = heartbeat_next(&hb, step); - usec_t duration = 0ULL; - - if(unlikely(netdata_exit)) break; - - // BEGIN -- the job to be done - - for(i = 0 ; proc_modules[i].name ;i++) { - struct proc_module *pm = &proc_modules[i]; - if(unlikely(!pm->enabled)) continue; - - debug(D_PROCNETDEV_LOOP, "PROC calling %s.", pm->name); - - pm->enabled = !pm->func(localhost->rrd_update_every, hb_dt); - pm->duration = heartbeat_dt_usec(&hb) - duration; - duration += pm->duration; - - if(unlikely(netdata_exit)) break; - } - - // END -- the job is done - - // -------------------------------------------------------------------- - - if(vdo_cpu_netdata) { - static RRDSET *st = NULL; - - if(unlikely(!st)) { - st = rrdset_find_bytype_localhost("netdata", "plugin_proc_modules"); - - if(!st) { - st = rrdset_create_localhost( - "netdata" - , "plugin_proc_modules" - , NULL - , "proc" - , NULL - , "NetData Proc Plugin Modules Durations" - , "milliseconds/run" - , "netdata" - , "stats" - , 132001 - , localhost->rrd_update_every - , RRDSET_TYPE_STACKED - ); - - for(i = 0 ; proc_modules[i].name ;i++) { - struct proc_module *pm = &proc_modules[i]; - if(unlikely(!pm->enabled)) continue; - - pm->rd = rrddim_add(st, pm->dim, NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - } - } - } - else rrdset_next(st); - - for(i = 0 ; proc_modules[i].name ;i++) { - struct proc_module *pm = &proc_modules[i]; - if(unlikely(!pm->enabled)) continue; - - rrddim_set_by_pointer(st, pm->rd, pm->duration); - } - rrdset_done(st); - - global_statistics_charts(); - registry_statistics(); - } - } - - netdata_thread_cleanup_pop(1); - return NULL; -} - -int get_numa_node_count(void) -{ - static int numa_node_count = -1; - - if (numa_node_count != -1) - return numa_node_count; - - numa_node_count = 0; - - char name[FILENAME_MAX + 1]; - snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/node"); - char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name); - - DIR *dir = opendir(dirname); - if(dir) { - struct dirent *de = NULL; - while((de = readdir(dir))) { - if(de->d_type != DT_DIR) - continue; - - if(strncmp(de->d_name, "node", 4) != 0) - continue; - - if(!isdigit(de->d_name[4])) - continue; - - numa_node_count++; - } - closedir(dir); - } - - return numa_node_count; -} diff --git a/src/plugin_proc.h b/src/plugin_proc.h deleted file mode 100644 index a7f9b4e38..000000000 --- a/src/plugin_proc.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef NETDATA_PLUGIN_PROC_H -#define NETDATA_PLUGIN_PROC_H 1 - -void *proc_main(void *ptr); - -extern int do_proc_net_dev(int update_every, usec_t dt); -extern int do_proc_diskstats(int update_every, usec_t dt); -extern int do_proc_net_snmp(int update_every, usec_t dt); -extern int do_proc_net_snmp6(int update_every, usec_t dt); -extern int do_proc_net_netstat(int update_every, usec_t dt); -extern int do_proc_net_stat_conntrack(int update_every, usec_t dt); -extern int do_proc_net_ip_vs_stats(int update_every, usec_t dt); -extern int do_proc_stat(int update_every, usec_t dt); -extern int do_proc_meminfo(int update_every, usec_t dt); -extern int do_proc_vmstat(int update_every, usec_t dt); -extern int do_proc_net_rpc_nfs(int update_every, usec_t dt); -extern int do_proc_net_rpc_nfsd(int update_every, usec_t dt); -extern int do_proc_sys_kernel_random_entropy_avail(int update_every, usec_t dt); -extern int do_proc_interrupts(int update_every, usec_t dt); -extern int do_proc_softirqs(int update_every, usec_t dt); -extern int do_sys_kernel_mm_ksm(int update_every, usec_t dt); -extern int do_proc_loadavg(int update_every, usec_t dt); -extern int do_proc_net_stat_synproxy(int update_every, usec_t dt); -extern int do_proc_net_softnet_stat(int update_every, usec_t dt); -extern int do_proc_uptime(int update_every, usec_t dt); -extern int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt); -extern int do_proc_sys_devices_system_node(int update_every, usec_t dt); -extern int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt); -extern int do_sys_fs_btrfs(int update_every, usec_t dt); -extern int do_proc_net_sockstat(int update_every, usec_t dt); -extern int do_proc_net_sockstat6(int update_every, usec_t dt); - -extern int get_numa_node_count(void); - -// metrics that need to be shared among data collectors -extern unsigned long long tcpext_TCPSynRetrans; - -// netdev renames -extern void netdev_rename_device_add(const char *host_device, const char *container_device, const char *container_name); -extern void netdev_rename_device_del(const char *host_device); - -#endif /* NETDATA_PLUGIN_PROC_H */ diff --git a/src/plugin_proc_diskspace.c b/src/plugin_proc_diskspace.c deleted file mode 100644 index 0a229f38e..000000000 --- a/src/plugin_proc_diskspace.c +++ /dev/null @@ -1,460 +0,0 @@ -#include "common.h" - -#define DELAULT_EXCLUDED_PATHS "/proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/*" -#define DEFAULT_EXCLUDED_FILESYSTEMS "" -#define CONFIG_SECTION_DISKSPACE "plugin:proc:diskspace" - -static struct mountinfo *disk_mountinfo_root = NULL; -static int check_for_new_mountpoints_every = 15; -static int cleanup_mount_points = 1; - -static inline void mountinfo_reload(int force) { - static time_t last_loaded = 0; - time_t now = now_realtime_sec(); - - if(force || now - last_loaded >= check_for_new_mountpoints_every) { - // mountinfo_free_all() can be called with NULL disk_mountinfo_root - mountinfo_free_all(disk_mountinfo_root); - - // re-read mountinfo in case something changed - disk_mountinfo_root = mountinfo_read(0); - - last_loaded = now; - } -} - -// Data to be stored in DICTIONARY dict_mountpoints used by do_disk_space_stats(). -// This DICTIONARY is used to lookup the settings of the mount point on each iteration. -struct mount_point_metadata { - int do_space; - int do_inodes; - int shown_error; - int updated; - - size_t collected; // the number of times this has been collected - - RRDSET *st_space; - RRDDIM *rd_space_used; - RRDDIM *rd_space_avail; - RRDDIM *rd_space_reserved; - - RRDSET *st_inodes; - RRDDIM *rd_inodes_used; - RRDDIM *rd_inodes_avail; - RRDDIM *rd_inodes_reserved; -}; - -static DICTIONARY *dict_mountpoints = NULL; - -#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete(st); (st) = NULL; } } while(st) - -int mount_point_cleanup(void *entry, void *data) { - (void)data; - - struct mount_point_metadata *mp = (struct mount_point_metadata *)entry; - if(!mp) return 0; - - if(likely(mp->updated)) { - mp->updated = 0; - return 0; - } - - if(likely(cleanup_mount_points && mp->collected)) { - mp->collected = 0; - mp->updated = 0; - mp->shown_error = 0; - - mp->rd_space_avail = NULL; - mp->rd_space_used = NULL; - mp->rd_space_reserved = NULL; - - mp->rd_inodes_avail = NULL; - mp->rd_inodes_used = NULL; - mp->rd_inodes_reserved = NULL; - - rrdset_obsolete_and_pointer_null(mp->st_space); - rrdset_obsolete_and_pointer_null(mp->st_inodes); - } - - return 0; -} - -static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) { - const char *family = mi->mount_point; - const char *disk = mi->persistent_id; - - static SIMPLE_PATTERN *excluded_mountpoints = NULL; - static SIMPLE_PATTERN *excluded_filesystems = NULL; - int do_space, do_inodes; - - if(unlikely(!dict_mountpoints)) { - SIMPLE_PREFIX_MODE mode = SIMPLE_PATTERN_EXACT; - - if(config_move("plugin:proc:/proc/diskstats", "exclude space metrics on paths", CONFIG_SECTION_DISKSPACE, "exclude space metrics on paths") != -1) { - // old configuration, enable backwards compatibility - mode = SIMPLE_PATTERN_PREFIX; - } - - excluded_mountpoints = simple_pattern_create( - config_get(CONFIG_SECTION_DISKSPACE, "exclude space metrics on paths", DELAULT_EXCLUDED_PATHS) - , NULL - , mode - ); - - excluded_filesystems = simple_pattern_create( - config_get(CONFIG_SECTION_DISKSPACE, "exclude space metrics on filesystems", DEFAULT_EXCLUDED_FILESYSTEMS) - , NULL - , SIMPLE_PATTERN_EXACT - ); - - dict_mountpoints = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED); - } - - struct mount_point_metadata *m = dictionary_get(dict_mountpoints, mi->mount_point); - if(unlikely(!m)) { - char var_name[4096 + 1]; - snprintfz(var_name, 4096, "plugin:proc:diskspace:%s", mi->mount_point); - - int def_space = config_get_boolean_ondemand(CONFIG_SECTION_DISKSPACE, "space usage for all disks", CONFIG_BOOLEAN_AUTO); - int def_inodes = config_get_boolean_ondemand(CONFIG_SECTION_DISKSPACE, "inodes usage for all disks", CONFIG_BOOLEAN_AUTO); - - if(unlikely(simple_pattern_matches(excluded_mountpoints, mi->mount_point))) { - def_space = CONFIG_BOOLEAN_NO; - def_inodes = CONFIG_BOOLEAN_NO; - } - - if(unlikely(simple_pattern_matches(excluded_filesystems, mi->filesystem))) { - def_space = CONFIG_BOOLEAN_NO; - def_inodes = CONFIG_BOOLEAN_NO; - } - - // check if the mount point is a directory #2407 - { - struct stat bs; - if(stat(mi->mount_point, &bs) == -1) { - error("DISKSPACE: Cannot stat() mount point '%s' (disk '%s', filesystem '%s', root '%s')." - , mi->mount_point - , disk - , mi->filesystem?mi->filesystem:"" - , mi->root?mi->root:"" - ); - def_space = CONFIG_BOOLEAN_NO; - def_inodes = CONFIG_BOOLEAN_NO; - } - else { - if((bs.st_mode & S_IFMT) != S_IFDIR) { - error("DISKSPACE: Mount point '%s' (disk '%s', filesystem '%s', root '%s') is not a directory." - , mi->mount_point - , disk - , mi->filesystem?mi->filesystem:"" - , mi->root?mi->root:"" - ); - def_space = CONFIG_BOOLEAN_NO; - def_inodes = CONFIG_BOOLEAN_NO; - } - } - } - - do_space = config_get_boolean_ondemand(var_name, "space usage", def_space); - do_inodes = config_get_boolean_ondemand(var_name, "inodes usage", def_inodes); - - struct mount_point_metadata mp = { - .do_space = do_space, - .do_inodes = do_inodes, - .shown_error = 0, - .updated = 0, - - .collected = 0, - - .st_space = NULL, - .rd_space_avail = NULL, - .rd_space_used = NULL, - .rd_space_reserved = NULL, - - .st_inodes = NULL, - .rd_inodes_avail = NULL, - .rd_inodes_used = NULL, - .rd_inodes_reserved = NULL - }; - - m = dictionary_set(dict_mountpoints, mi->mount_point, &mp, sizeof(struct mount_point_metadata)); - } - - m->updated = 1; - - if(unlikely(m->do_space == CONFIG_BOOLEAN_NO && m->do_inodes == CONFIG_BOOLEAN_NO)) - return; - - if(unlikely(mi->flags & MOUNTINFO_READONLY && !m->collected)) - return; - - struct statvfs buff_statvfs; - if (statvfs(mi->mount_point, &buff_statvfs) < 0) { - if(!m->shown_error) { - error("DISKSPACE: failed to statvfs() mount point '%s' (disk '%s', filesystem '%s', root '%s')" - , mi->mount_point - , disk - , mi->filesystem?mi->filesystem:"" - , mi->root?mi->root:"" - ); - m->shown_error = 1; - } - return; - } - m->shown_error = 0; - - // logic found at get_fs_usage() in coreutils - unsigned long bsize = (buff_statvfs.f_frsize) ? buff_statvfs.f_frsize : buff_statvfs.f_bsize; - - fsblkcnt_t bavail = buff_statvfs.f_bavail; - fsblkcnt_t btotal = buff_statvfs.f_blocks; - fsblkcnt_t bavail_root = buff_statvfs.f_bfree; - fsblkcnt_t breserved_root = bavail_root - bavail; - fsblkcnt_t bused; - if(likely(btotal >= bavail_root)) - bused = btotal - bavail_root; - else - bused = bavail_root - btotal; - -#ifdef NETDATA_INTERNAL_CHECKS - if(unlikely(btotal != bavail + breserved_root + bused)) - error("DISKSPACE: disk block statistics for '%s' (disk '%s') do not sum up: total = %llu, available = %llu, reserved = %llu, used = %llu", mi->mount_point, disk, (unsigned long long)btotal, (unsigned long long)bavail, (unsigned long long)breserved_root, (unsigned long long)bused); -#endif - - // -------------------------------------------------------------------------- - - fsfilcnt_t favail = buff_statvfs.f_favail; - fsfilcnt_t ftotal = buff_statvfs.f_files; - fsfilcnt_t favail_root = buff_statvfs.f_ffree; - fsfilcnt_t freserved_root = favail_root - favail; - fsfilcnt_t fused = ftotal - favail_root; - - if(m->do_inodes == CONFIG_BOOLEAN_AUTO && favail == (fsfilcnt_t)-1) { - // this file system does not support inodes reporting - // eg. cephfs - m->do_inodes = CONFIG_BOOLEAN_NO; - } - -#ifdef NETDATA_INTERNAL_CHECKS - if(unlikely(btotal != bavail + breserved_root + bused)) - error("DISKSPACE: disk inode statistics for '%s' (disk '%s') do not sum up: total = %llu, available = %llu, reserved = %llu, used = %llu", mi->mount_point, disk, (unsigned long long)ftotal, (unsigned long long)favail, (unsigned long long)freserved_root, (unsigned long long)fused); -#endif - - // -------------------------------------------------------------------------- - - int rendered = 0; - - if(m->do_space == CONFIG_BOOLEAN_YES || (m->do_space == CONFIG_BOOLEAN_AUTO && (bavail || breserved_root || bused))) { - if(unlikely(!m->st_space)) { - m->do_space = CONFIG_BOOLEAN_YES; - m->st_space = rrdset_find_bytype_localhost("disk_space", disk); - if(unlikely(!m->st_space)) { - char title[4096 + 1]; - snprintfz(title, 4096, "Disk Space Usage for %s [%s]", family, mi->mount_source); - m->st_space = rrdset_create_localhost( - "disk_space" - , disk - , NULL - , family - , "disk.space" - , title - , "GB" - , "diskspace" - , NULL - , 2023 - , update_every - , RRDSET_TYPE_STACKED - ); - } - - m->rd_space_avail = rrddim_add(m->st_space, "avail", NULL, (collected_number)bsize, 1024 * 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - m->rd_space_used = rrddim_add(m->st_space, "used", NULL, (collected_number)bsize, 1024 * 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - m->rd_space_reserved = rrddim_add(m->st_space, "reserved_for_root", "reserved for root", (collected_number)bsize, 1024 * 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } - else - rrdset_next(m->st_space); - - rrddim_set_by_pointer(m->st_space, m->rd_space_avail, (collected_number)bavail); - rrddim_set_by_pointer(m->st_space, m->rd_space_used, (collected_number)bused); - rrddim_set_by_pointer(m->st_space, m->rd_space_reserved, (collected_number)breserved_root); - rrdset_done(m->st_space); - - rendered++; - } - - // -------------------------------------------------------------------------- - - if(m->do_inodes == CONFIG_BOOLEAN_YES || (m->do_inodes == CONFIG_BOOLEAN_AUTO && (favail || freserved_root || fused))) { - if(unlikely(!m->st_inodes)) { - m->do_inodes = CONFIG_BOOLEAN_YES; - m->st_inodes = rrdset_find_bytype_localhost("disk_inodes", disk); - if(unlikely(!m->st_inodes)) { - char title[4096 + 1]; - snprintfz(title, 4096, "Disk Files (inodes) Usage for %s [%s]", family, mi->mount_source); - m->st_inodes = rrdset_create_localhost( - "disk_inodes" - , disk - , NULL - , family - , "disk.inodes" - , title - , "Inodes" - , "diskspace" - , NULL - , 2024 - , update_every - , RRDSET_TYPE_STACKED - ); - } - - m->rd_inodes_avail = rrddim_add(m->st_inodes, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - m->rd_inodes_used = rrddim_add(m->st_inodes, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - m->rd_inodes_reserved = rrddim_add(m->st_inodes, "reserved_for_root", "reserved for root", 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else - rrdset_next(m->st_inodes); - - rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_avail, (collected_number)favail); - rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_used, (collected_number)fused); - rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_reserved, (collected_number)freserved_root); - rrdset_done(m->st_inodes); - - rendered++; - } - - // -------------------------------------------------------------------------- - - if(likely(rendered)) - m->collected++; -} - -static void diskspace_main_cleanup(void *ptr) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - - info("cleaning up..."); - - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; -} - -void *proc_diskspace_main(void *ptr) { - netdata_thread_cleanup_push(diskspace_main_cleanup, ptr); - - int vdo_cpu_netdata = config_get_boolean("plugin:proc", "netdata server resources", 1); - - cleanup_mount_points = config_get_boolean(CONFIG_SECTION_DISKSPACE, "remove charts of unmounted disks" , cleanup_mount_points); - - int update_every = (int)config_get_number(CONFIG_SECTION_DISKSPACE, "update every", localhost->rrd_update_every); - if(update_every < localhost->rrd_update_every) - update_every = localhost->rrd_update_every; - - check_for_new_mountpoints_every = (int)config_get_number(CONFIG_SECTION_DISKSPACE, "check for new mount points every", check_for_new_mountpoints_every); - if(check_for_new_mountpoints_every < update_every) - check_for_new_mountpoints_every = update_every; - - struct rusage thread; - - usec_t duration = 0; - usec_t step = update_every * USEC_PER_SEC; - heartbeat_t hb; - heartbeat_init(&hb); - while(!netdata_exit) { - duration = heartbeat_dt_usec(&hb); - /* usec_t hb_dt = */ heartbeat_next(&hb, step); - - if(unlikely(netdata_exit)) break; - - - // -------------------------------------------------------------------------- - // this is smart enough not to reload it every time - - mountinfo_reload(0); - - - // -------------------------------------------------------------------------- - // disk space metrics - - struct mountinfo *mi; - for(mi = disk_mountinfo_root; mi; mi = mi->next) { - - if(unlikely(mi->flags & (MOUNTINFO_IS_DUMMY | MOUNTINFO_IS_BIND))) - continue; - - do_disk_space_stats(mi, update_every); - if(unlikely(netdata_exit)) break; - } - - if(unlikely(netdata_exit)) break; - - if(dict_mountpoints) - dictionary_get_all(dict_mountpoints, mount_point_cleanup, NULL); - - if(vdo_cpu_netdata) { - static RRDSET *stcpu_thread = NULL, *st_duration = NULL; - static RRDDIM *rd_user = NULL, *rd_system = NULL, *rd_duration = NULL; - - // ---------------------------------------------------------------- - - getrusage(RUSAGE_THREAD, &thread); - - if(unlikely(!stcpu_thread)) { - stcpu_thread = rrdset_create_localhost( - "netdata" - , "plugin_diskspace" - , NULL - , "diskspace" - , NULL - , "NetData Disk Space Plugin CPU usage" - , "milliseconds/s" - , "diskspace" - , NULL - , 132020 - , update_every - , RRDSET_TYPE_STACKED - ); - - rd_user = rrddim_add(stcpu_thread, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - rd_system = rrddim_add(stcpu_thread, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(stcpu_thread); - - rrddim_set_by_pointer(stcpu_thread, rd_user, thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec); - rrddim_set_by_pointer(stcpu_thread, rd_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec); - rrdset_done(stcpu_thread); - - // ---------------------------------------------------------------- - - if(unlikely(!st_duration)) { - st_duration = rrdset_create_localhost( - "netdata" - , "plugin_diskspace_dt" - , NULL - , "diskspace" - , NULL - , "NetData Disk Space Plugin Duration" - , "milliseconds/run" - , "diskspace" - , NULL - , 132021 - , update_every - , RRDSET_TYPE_AREA - ); - - rd_duration = rrddim_add(st_duration, "duration", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - } - else - rrdset_next(st_duration); - - rrddim_set_by_pointer(st_duration, rd_duration, duration); - rrdset_done(st_duration); - - // ---------------------------------------------------------------- - - if(unlikely(netdata_exit)) break; - } - } - - netdata_thread_cleanup_pop(1); - return NULL; -} diff --git a/src/plugin_proc_diskspace.h b/src/plugin_proc_diskspace.h deleted file mode 100644 index dcec28f75..000000000 --- a/src/plugin_proc_diskspace.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef NETDATA_PLUGIN_PROC_DISKSPACE_H -#define NETDATA_PLUGIN_PROC_DISKSPACE_H - -extern void *proc_diskspace_main(void *ptr); - -#endif //NETDATA_PLUGIN_PROC_DISKSPACE_H diff --git a/src/plugin_tc.c b/src/plugin_tc.c deleted file mode 100644 index 4b6d84e11..000000000 --- a/src/plugin_tc.c +++ /dev/null @@ -1,1164 +0,0 @@ -#include "common.h" - -#define RRD_TYPE_TC "tc" - -// ---------------------------------------------------------------------------- -// /sbin/tc processor -// this requires the script plugins.d/tc-qos-helper.sh - -#define TC_LINE_MAX 1024 - -struct tc_class { - avl avl; - - char *id; - uint32_t hash; - - char *name; - - char *leafid; - uint32_t leaf_hash; - - char *parentid; - uint32_t parent_hash; - - char hasparent; - char isleaf; - char isqdisc; - char render; - - unsigned long long bytes; - unsigned long long packets; - unsigned long long dropped; - unsigned long long overlimits; - unsigned long long requeues; - unsigned long long lended; - unsigned long long borrowed; - unsigned long long giants; - unsigned long long tokens; - unsigned long long ctokens; - - RRDDIM *rd_bytes; - RRDDIM *rd_packets; - RRDDIM *rd_dropped; - RRDDIM *rd_tokens; - RRDDIM *rd_ctokens; - - char name_updated; - char updated; // updated bytes - int unupdated; // the number of times, this has been found un-updated - - struct tc_class *next; - struct tc_class *prev; -}; - -struct tc_device { - avl avl; - - char *id; - uint32_t hash; - - char *name; - char *family; - - char name_updated; - char family_updated; - - char enabled; - char enabled_bytes; - char enabled_packets; - char enabled_dropped; - char enabled_tokens; - char enabled_ctokens; - char enabled_all_classes_qdiscs; - - RRDSET *st_bytes; - RRDSET *st_packets; - RRDSET *st_dropped; - RRDSET *st_tokens; - RRDSET *st_ctokens; - - avl_tree classes_index; - - struct tc_class *classes; - struct tc_class *last_class; - - struct tc_device *next; - struct tc_device *prev; -}; - - -struct tc_device *tc_device_root = NULL; - -// ---------------------------------------------------------------------------- -// tc_device index - -static int tc_device_compare(void* a, void* b) { - if(((struct tc_device *)a)->hash < ((struct tc_device *)b)->hash) return -1; - else if(((struct tc_device *)a)->hash > ((struct tc_device *)b)->hash) return 1; - else return strcmp(((struct tc_device *)a)->id, ((struct tc_device *)b)->id); -} - -avl_tree tc_device_root_index = { - NULL, - tc_device_compare -}; - -#define tc_device_index_add(st) (struct tc_device *)avl_insert(&tc_device_root_index, (avl *)(st)) -#define tc_device_index_del(st) (struct tc_device *)avl_remove(&tc_device_root_index, (avl *)(st)) - -static inline struct tc_device *tc_device_index_find(const char *id, uint32_t hash) { - struct tc_device tmp; - tmp.id = (char *)id; - tmp.hash = (hash)?hash:simple_hash(tmp.id); - - return (struct tc_device *)avl_search(&(tc_device_root_index), (avl *)&tmp); -} - - -// ---------------------------------------------------------------------------- -// tc_class index - -static int tc_class_compare(void* a, void* b) { - if(((struct tc_class *)a)->hash < ((struct tc_class *)b)->hash) return -1; - else if(((struct tc_class *)a)->hash > ((struct tc_class *)b)->hash) return 1; - else return strcmp(((struct tc_class *)a)->id, ((struct tc_class *)b)->id); -} - -#define tc_class_index_add(st, rd) (struct tc_class *)avl_insert(&((st)->classes_index), (avl *)(rd)) -#define tc_class_index_del(st, rd) (struct tc_class *)avl_remove(&((st)->classes_index), (avl *)(rd)) - -static inline struct tc_class *tc_class_index_find(struct tc_device *st, const char *id, uint32_t hash) { - struct tc_class tmp; - tmp.id = (char *)id; - tmp.hash = (hash)?hash:simple_hash(tmp.id); - - return (struct tc_class *)avl_search(&(st->classes_index), (avl *) &tmp); -} - -// ---------------------------------------------------------------------------- - -static inline void tc_class_free(struct tc_device *n, struct tc_class *c) { - if(c == n->classes) { - if(likely(c->next)) - n->classes = c->next; - else - n->classes = c->prev; - } - - if(c == n->last_class) { - if(unlikely(c->next)) - n->last_class = c->next; - else - n->last_class = c->prev; - } - - if(c->next) c->next->prev = c->prev; - if(c->prev) c->prev->next = c->next; - - debug(D_TC_LOOP, "Removing from device '%s' class '%s', parentid '%s', leafid '%s', unused=%d", n->id, c->id, c->parentid?c->parentid:"", c->leafid?c->leafid:"", c->unupdated); - - if(unlikely(tc_class_index_del(n, c) != c)) - error("plugin_tc: INTERNAL ERROR: attempt remove class '%s' from device '%s': removed a different calls", c->id, n->id); - - freez(c->id); - freez(c->name); - freez(c->leafid); - freez(c->parentid); - freez(c); -} - -static inline void tc_device_classes_cleanup(struct tc_device *d) { - static int cleanup_every = 999; - - if(unlikely(cleanup_every > 0)) { - cleanup_every = (int) config_get_number("plugin:tc", "cleanup unused classes every", 120); - if(cleanup_every < 0) cleanup_every = -cleanup_every; - } - - d->name_updated = 0; - d->family_updated = 0; - - struct tc_class *c = d->classes; - while(c) { - if(unlikely(cleanup_every && c->unupdated >= cleanup_every)) { - struct tc_class *nc = c->next; - tc_class_free(d, c); - c = nc; - } - else { - c->updated = 0; - c->name_updated = 0; - - c = c->next; - } - } -} - -static inline void tc_device_commit(struct tc_device *d) { - static int enable_new_interfaces = -1, enable_bytes = -1, enable_packets = -1, enable_dropped = -1, enable_tokens = -1, enable_ctokens = -1, enabled_all_classes_qdiscs = -1; - - if(unlikely(enable_new_interfaces == -1)) { - enable_new_interfaces = config_get_boolean_ondemand("plugin:tc", "enable new interfaces detected at runtime", CONFIG_BOOLEAN_YES); - enable_bytes = config_get_boolean_ondemand("plugin:tc", "enable traffic charts for all interfaces", CONFIG_BOOLEAN_AUTO); - enable_packets = config_get_boolean_ondemand("plugin:tc", "enable packets charts for all interfaces", CONFIG_BOOLEAN_AUTO); - enable_dropped = config_get_boolean_ondemand("plugin:tc", "enable dropped charts for all interfaces", CONFIG_BOOLEAN_AUTO); - enable_tokens = config_get_boolean_ondemand("plugin:tc", "enable tokens charts for all interfaces", CONFIG_BOOLEAN_NO); - enable_ctokens = config_get_boolean_ondemand("plugin:tc", "enable ctokens charts for all interfaces", CONFIG_BOOLEAN_NO); - enabled_all_classes_qdiscs = config_get_boolean_ondemand("plugin:tc", "enable show all classes and qdiscs for all interfaces", CONFIG_BOOLEAN_NO); - } - - if(unlikely(d->enabled == (char)-1)) { - char var_name[CONFIG_MAX_NAME + 1]; - snprintfz(var_name, CONFIG_MAX_NAME, "qos for %s", d->id); - - d->enabled = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_new_interfaces); - - snprintfz(var_name, CONFIG_MAX_NAME, "traffic chart for %s", d->id); - d->enabled_bytes = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_bytes); - - snprintfz(var_name, CONFIG_MAX_NAME, "packets chart for %s", d->id); - d->enabled_packets = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_packets); - - snprintfz(var_name, CONFIG_MAX_NAME, "dropped packets chart for %s", d->id); - d->enabled_dropped = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_dropped); - - snprintfz(var_name, CONFIG_MAX_NAME, "tokens chart for %s", d->id); - d->enabled_tokens = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_tokens); - - snprintfz(var_name, CONFIG_MAX_NAME, "ctokens chart for %s", d->id); - d->enabled_ctokens = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_ctokens); - - snprintfz(var_name, CONFIG_MAX_NAME, "show all classes for %s", d->id); - d->enabled_all_classes_qdiscs = (char)config_get_boolean_ondemand("plugin:tc", var_name, enabled_all_classes_qdiscs); - } - - // we only need to add leaf classes - struct tc_class *c, *x /*, *root = NULL */; - unsigned long long bytes_sum = 0, packets_sum = 0, dropped_sum = 0, tokens_sum = 0, ctokens_sum = 0; - int active_nodes = 0, updated_classes = 0, updated_qdiscs = 0; - - // prepare all classes - // we set reasonable defaults for the rest of the code below - - for(c = d->classes ; c ; c = c->next) { - c->render = 0; // do not render this class - - c->isleaf = 1; // this is a leaf class - c->hasparent = 0; // without a parent - - if(unlikely(!c->updated)) - c->unupdated++; // increase its unupdated counter - else { - c->unupdated = 0; // reset its unupdated counter - - // count how many of each kind - if(c->isqdisc) - updated_qdiscs++; - else - updated_classes++; - } - } - - if(unlikely(!d->enabled || (!updated_classes && !updated_qdiscs))) { - debug(D_TC_LOOP, "TC: Ignoring TC device '%s'. It is not enabled/updated.", d->name?d->name:d->id); - tc_device_classes_cleanup(d); - return; - } - - if(unlikely(updated_classes && updated_qdiscs)) { - error("TC: device '%s' has active both classes (%d) and qdiscs (%d). Will render only qdiscs.", d->id, updated_classes, updated_qdiscs); - - // set all classes to !updated - for(c = d->classes ; c ; c = c->next) - if(unlikely(!c->isqdisc && c->updated)) - c->updated = 0; - - updated_classes = 0; - } - - // mark the classes as leafs and parents - // - // TC is hierarchical: - // - classes can have other classes in them - // - the same is true for qdiscs (i.e. qdiscs have classes, that have other qdiscs) - // - // we need to present a chart with leaf nodes only, so that the sum - // of all dimensions of the chart, will be the total utilization - // of the interface. - // - // here we try to find the ones we need to report - // by default all nodes are marked with: isleaf = 1 (see above) - // - // so, here we remove the isleaf flag from nodes in the middle - // and we add the hasparent flag to leaf nodes we found their parent - if(likely(!d->enabled_all_classes_qdiscs)) { - for(c = d->classes; c; c = c->next) { - if(unlikely(!c->updated)) continue; - - //debug(D_TC_LOOP, "TC: In device '%s', %s '%s' has leafid: '%s' and parentid '%s'.", - // d->id, - // c->isqdisc?"qdisc":"class", - // c->id, - // c->leafid?c->leafid:"NULL", - // c->parentid?c->parentid:"NULL"); - - // find if c is leaf or not - for(x = d->classes; x; x = x->next) { - if(unlikely(!x->updated || c == x || !x->parentid)) continue; - - // classes have both parentid and leafid - // qdiscs have only parentid - // the following works for both (it is an OR) - - if((c->hash == x->parent_hash && strcmp(c->id, x->parentid) == 0) || - (c->leafid && c->leaf_hash == x->parent_hash && strcmp(c->leafid, x->parentid) == 0)) { - // debug(D_TC_LOOP, "TC: In device '%s', %s '%s' (leafid: '%s') has as leaf %s '%s' (parentid: '%s').", d->name?d->name:d->id, c->isqdisc?"qdisc":"class", c->name?c->name:c->id, c->leafid?c->leafid:c->id, x->isqdisc?"qdisc":"class", x->name?x->name:x->id, x->parentid?x->parentid:x->id); - c->isleaf = 0; - x->hasparent = 1; - } - } - } - } - - for(c = d->classes ; c ; c = c->next) { - if(unlikely(!c->updated)) continue; - - // debug(D_TC_LOOP, "TC: device '%s', %s '%s' isleaf=%d, hasparent=%d", d->id, (c->isqdisc)?"qdisc":"class", c->id, c->isleaf, c->hasparent); - - if(unlikely((c->isleaf && c->hasparent) || d->enabled_all_classes_qdiscs)) { - c->render = 1; - active_nodes++; - bytes_sum += c->bytes; - packets_sum += c->packets; - dropped_sum += c->dropped; - tokens_sum += c->tokens; - ctokens_sum += c->ctokens; - } - - //if(unlikely(!c->hasparent)) { - // if(root) error("TC: multiple root class/qdisc for device '%s' (old: '%s', new: '%s')", d->id, root->id, c->id); - // root = c; - // debug(D_TC_LOOP, "TC: found root class/qdisc '%s'", root->id); - //} - } - -#ifdef NETDATA_INTERNAL_CHECKS - // dump all the list to see what we know - - if(unlikely(debug_flags & D_TC_LOOP)) { - for(c = d->classes ; c ; c = c->next) { - if(c->render) debug(D_TC_LOOP, "TC: final nodes dump for '%s': class %s, OK", d->name, c->id); - else debug(D_TC_LOOP, "TC: final nodes dump for '%s': class %s, IGNORE (updated: %d, isleaf: %d, hasparent: %d, parent: %s)", d->name?d->name:d->id, c->id, c->updated, c->isleaf, c->hasparent, c->parentid?c->parentid:"(unset)"); - } - } -#endif - - if(unlikely(!active_nodes)) { - debug(D_TC_LOOP, "TC: Ignoring TC device '%s'. No useful classes/qdiscs.", d->name?d->name:d->id); - tc_device_classes_cleanup(d); - return; - } - - debug(D_TC_LOOP, "TC: evaluating TC device '%s'. enabled = %d/%d (bytes: %d/%d, packets: %d/%d, dropped: %d/%d, tokens: %d/%d, ctokens: %d/%d, all_classes_qdiscs: %d/%d), classes: (bytes = %llu, packets = %llu, dropped = %llu, tokens = %llu, ctokens = %llu).", - d->name?d->name:d->id, - d->enabled, enable_new_interfaces, - d->enabled_bytes, enable_bytes, - d->enabled_packets, enable_packets, - d->enabled_dropped, enable_dropped, - d->enabled_tokens, enable_tokens, - d->enabled_ctokens, enable_ctokens, - d->enabled_all_classes_qdiscs, enabled_all_classes_qdiscs, - bytes_sum, - packets_sum, - dropped_sum, - tokens_sum, - ctokens_sum - ); - - // -------------------------------------------------------------------- - // bytes - - if(d->enabled_bytes == CONFIG_BOOLEAN_YES || (d->enabled_bytes == CONFIG_BOOLEAN_AUTO && bytes_sum)) { - d->enabled_bytes = CONFIG_BOOLEAN_YES; - - if(unlikely(!d->st_bytes)) - d->st_bytes = rrdset_create_localhost( - RRD_TYPE_TC - , d->id - , d->name ? d->name : d->id - , d->family ? d->family : d->id - , RRD_TYPE_TC ".qos" - , "Class Usage" - , "kilobits/s" - , "tc" - , NULL - , 7000 - , localhost->rrd_update_every - , d->enabled_all_classes_qdiscs ? RRDSET_TYPE_LINE : RRDSET_TYPE_STACKED - ); - - else { - rrdset_next(d->st_bytes); - if(unlikely(d->name_updated)) rrdset_set_name(d->st_bytes, d->name); - - // FIXME - // update the family - } - - for(c = d->classes ; c ; c = c->next) { - if(unlikely(!c->render)) continue; - - if(unlikely(!c->rd_bytes)) - c->rd_bytes = rrddim_add(d->st_bytes, c->id, c->name?c->name:c->id, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - else if(unlikely(c->name_updated)) - rrddim_set_name(d->st_bytes, c->rd_bytes, c->name); - - rrddim_set_by_pointer(d->st_bytes, c->rd_bytes, c->bytes); - } - rrdset_done(d->st_bytes); - } - - // -------------------------------------------------------------------- - // packets - - if(d->enabled_packets == CONFIG_BOOLEAN_YES || (d->enabled_packets == CONFIG_BOOLEAN_AUTO && packets_sum)) { - d->enabled_packets = CONFIG_BOOLEAN_YES; - - if(unlikely(!d->st_packets)) { - char id[RRD_ID_LENGTH_MAX + 1]; - char name[RRD_ID_LENGTH_MAX + 1]; - snprintfz(id, RRD_ID_LENGTH_MAX, "%s_packets", d->id); - snprintfz(name, RRD_ID_LENGTH_MAX, "%s_packets", d->name?d->name:d->id); - - d->st_packets = rrdset_create_localhost( - RRD_TYPE_TC - , id - , name - , d->family ? d->family : d->id - , RRD_TYPE_TC ".qos_packets" - , "Class Packets" - , "packets/s" - , "tc" - , NULL - , 7010 - , localhost->rrd_update_every - , d->enabled_all_classes_qdiscs ? RRDSET_TYPE_LINE : RRDSET_TYPE_STACKED - ); - } - else { - rrdset_next(d->st_packets); - - if(unlikely(d->name_updated)) { - char name[RRD_ID_LENGTH_MAX + 1]; - snprintfz(name, RRD_ID_LENGTH_MAX, "%s_packets", d->name?d->name:d->id); - rrdset_set_name(d->st_packets, name); - } - - // FIXME - // update the family - } - - for(c = d->classes ; c ; c = c->next) { - if(unlikely(!c->render)) continue; - - if(unlikely(!c->rd_packets)) - c->rd_packets = rrddim_add(d->st_packets, c->id, c->name?c->name:c->id, 1, 1, RRD_ALGORITHM_INCREMENTAL); - else if(unlikely(c->name_updated)) - rrddim_set_name(d->st_packets, c->rd_packets, c->name); - - rrddim_set_by_pointer(d->st_packets, c->rd_packets, c->packets); - } - rrdset_done(d->st_packets); - } - - // -------------------------------------------------------------------- - // dropped - - if(d->enabled_dropped == CONFIG_BOOLEAN_YES || (d->enabled_dropped == CONFIG_BOOLEAN_AUTO && dropped_sum)) { - d->enabled_dropped = CONFIG_BOOLEAN_YES; - - if(unlikely(!d->st_dropped)) { - char id[RRD_ID_LENGTH_MAX + 1]; - char name[RRD_ID_LENGTH_MAX + 1]; - snprintfz(id, RRD_ID_LENGTH_MAX, "%s_dropped", d->id); - snprintfz(name, RRD_ID_LENGTH_MAX, "%s_dropped", d->name?d->name:d->id); - - d->st_dropped = rrdset_create_localhost( - RRD_TYPE_TC - , id - , name - , d->family ? d->family : d->id - , RRD_TYPE_TC ".qos_dropped" - , "Class Dropped Packets" - , "packets/s" - , "tc" - , NULL - , 7020 - , localhost->rrd_update_every - , d->enabled_all_classes_qdiscs ? RRDSET_TYPE_LINE : RRDSET_TYPE_STACKED - ); - } - else { - rrdset_next(d->st_dropped); - - if(unlikely(d->name_updated)) { - char name[RRD_ID_LENGTH_MAX + 1]; - snprintfz(name, RRD_ID_LENGTH_MAX, "%s_dropped", d->name?d->name:d->id); - rrdset_set_name(d->st_dropped, name); - } - - // FIXME - // update the family - } - - for(c = d->classes ; c ; c = c->next) { - if(unlikely(!c->render)) continue; - - if(unlikely(!c->rd_dropped)) - c->rd_dropped = rrddim_add(d->st_dropped, c->id, c->name?c->name:c->id, 1, 1, RRD_ALGORITHM_INCREMENTAL); - else if(unlikely(c->name_updated)) - rrddim_set_name(d->st_dropped, c->rd_dropped, c->name); - - rrddim_set_by_pointer(d->st_dropped, c->rd_dropped, c->dropped); - } - rrdset_done(d->st_dropped); - } - - // -------------------------------------------------------------------- - // tokens - - if(d->enabled_tokens == CONFIG_BOOLEAN_YES || (d->enabled_tokens == CONFIG_BOOLEAN_AUTO && tokens_sum)) { - d->enabled_tokens = CONFIG_BOOLEAN_YES; - - if(unlikely(!d->st_tokens)) { - char id[RRD_ID_LENGTH_MAX + 1]; - char name[RRD_ID_LENGTH_MAX + 1]; - snprintfz(id, RRD_ID_LENGTH_MAX, "%s_tokens", d->id); - snprintfz(name, RRD_ID_LENGTH_MAX, "%s_tokens", d->name?d->name:d->id); - - d->st_tokens = rrdset_create_localhost( - RRD_TYPE_TC - , id - , name - , d->family ? d->family : d->id - , RRD_TYPE_TC ".qos_tokens" - , "Class Tokens" - , "tokens" - , "tc" - , NULL - , 7030 - , localhost->rrd_update_every - , RRDSET_TYPE_LINE - ); - } - else { - rrdset_next(d->st_tokens); - - if(unlikely(d->name_updated)) { - char name[RRD_ID_LENGTH_MAX + 1]; - snprintfz(name, RRD_ID_LENGTH_MAX, "%s_tokens", d->name?d->name:d->id); - rrdset_set_name(d->st_tokens, name); - } - - // FIXME - // update the family - } - - for(c = d->classes ; c ; c = c->next) { - if(unlikely(!c->render)) continue; - - if(unlikely(!c->rd_tokens)) { - c->rd_tokens = rrddim_add(d->st_tokens, c->id, c->name?c->name:c->id, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else if(unlikely(c->name_updated)) - rrddim_set_name(d->st_tokens, c->rd_tokens, c->name); - - rrddim_set_by_pointer(d->st_tokens, c->rd_tokens, c->tokens); - } - rrdset_done(d->st_tokens); - } - - // -------------------------------------------------------------------- - // ctokens - - if(d->enabled_ctokens == CONFIG_BOOLEAN_YES || (d->enabled_ctokens == CONFIG_BOOLEAN_AUTO && ctokens_sum)) { - d->enabled_ctokens = CONFIG_BOOLEAN_YES; - - if(unlikely(!d->st_ctokens)) { - char id[RRD_ID_LENGTH_MAX + 1]; - char name[RRD_ID_LENGTH_MAX + 1]; - snprintfz(id, RRD_ID_LENGTH_MAX, "%s_ctokens", d->id); - snprintfz(name, RRD_ID_LENGTH_MAX, "%s_ctokens", d->name?d->name:d->id); - - d->st_ctokens = rrdset_create_localhost( - RRD_TYPE_TC - , id - , name - , d->family ? d->family : d->id - , RRD_TYPE_TC ".qos_ctokens" - , "Class cTokens" - , "ctokens" - , "tc" - , NULL - , 7040 - , localhost->rrd_update_every - , RRDSET_TYPE_LINE - ); - } - else { - debug(D_TC_LOOP, "TC: Updating _ctokens chart for device '%s'", d->name?d->name:d->id); - rrdset_next(d->st_ctokens); - - if(unlikely(d->name_updated)) { - char name[RRD_ID_LENGTH_MAX + 1]; - snprintfz(name, RRD_ID_LENGTH_MAX, "%s_ctokens", d->name?d->name:d->id); - rrdset_set_name(d->st_ctokens, name); - } - - // FIXME - // update the family - } - - for(c = d->classes ; c ; c = c->next) { - if(unlikely(!c->render)) continue; - - if(unlikely(!c->rd_ctokens)) - c->rd_ctokens = rrddim_add(d->st_ctokens, c->id, c->name?c->name:c->id, 1, 1, RRD_ALGORITHM_ABSOLUTE); - else if(unlikely(c->name_updated)) - rrddim_set_name(d->st_ctokens, c->rd_ctokens, c->name); - - rrddim_set_by_pointer(d->st_ctokens, c->rd_ctokens, c->ctokens); - } - rrdset_done(d->st_ctokens); - } - - tc_device_classes_cleanup(d); -} - -static inline void tc_device_set_class_name(struct tc_device *d, char *id, char *name) { - if(unlikely(!name || !*name)) return; - - struct tc_class *c = tc_class_index_find(d, id, 0); - if(likely(c)) { - if(likely(c->name)) { - if(!strcmp(c->name, name)) return; - freez(c->name); - c->name = NULL; - } - - if(likely(name && *name && strcmp(c->id, name) != 0)) { - debug(D_TC_LOOP, "TC: Setting device '%s', class '%s' name to '%s'", d->id, id, name); - c->name = strdupz(name); - c->name_updated = 1; - } - } -} - -static inline void tc_device_set_device_name(struct tc_device *d, char *name) { - if(unlikely(!name || !*name)) return; - - if(d->name) { - if(!strcmp(d->name, name)) return; - freez(d->name); - d->name = NULL; - } - - if(likely(name && *name && strcmp(d->id, name) != 0)) { - debug(D_TC_LOOP, "TC: Setting device '%s' name to '%s'", d->id, name); - d->name = strdupz(name); - d->name_updated = 1; - } -} - -static inline void tc_device_set_device_family(struct tc_device *d, char *family) { - freez(d->family); - d->family = NULL; - - if(likely(family && *family && strcmp(d->id, family) != 0)) { - debug(D_TC_LOOP, "TC: Setting device '%s' family to '%s'", d->id, family); - d->family = strdupz(family); - d->family_updated = 1; - } - // no need for null termination - it is already null -} - -static inline struct tc_device *tc_device_create(char *id) -{ - struct tc_device *d = tc_device_index_find(id, 0); - - if(!d) { - debug(D_TC_LOOP, "TC: Creating device '%s'", id); - - d = callocz(1, sizeof(struct tc_device)); - - d->id = strdupz(id); - d->hash = simple_hash(d->id); - d->enabled = (char)-1; - - avl_init(&d->classes_index, tc_class_compare); - if(unlikely(tc_device_index_add(d) != d)) - error("plugin_tc: INTERNAL ERROR: removing device '%s' removed a different device.", d->id); - - if(!tc_device_root) { - tc_device_root = d; - } - else { - d->next = tc_device_root; - tc_device_root->prev = d; - tc_device_root = d; - } - } - - return(d); -} - -static inline struct tc_class *tc_class_add(struct tc_device *n, char *id, char qdisc, char *parentid, char *leafid) -{ - struct tc_class *c = tc_class_index_find(n, id, 0); - - if(!c) { - debug(D_TC_LOOP, "TC: Creating in device '%s', class id '%s', parentid '%s', leafid '%s'", n->id, id, parentid?parentid:"", leafid?leafid:""); - - c = callocz(1, sizeof(struct tc_class)); - - if(unlikely(!n->classes)) - n->classes = c; - - else if(likely(n->last_class)) { - n->last_class->next = c; - c->prev = n->last_class; - } - - n->last_class = c; - - c->id = strdupz(id); - c->hash = simple_hash(c->id); - - c->isqdisc = qdisc; - if(parentid && *parentid) { - c->parentid = strdupz(parentid); - c->parent_hash = simple_hash(c->parentid); - } - - if(leafid && *leafid) { - c->leafid = strdupz(leafid); - c->leaf_hash = simple_hash(c->leafid); - } - - if(unlikely(tc_class_index_add(n, c) != c)) - error("plugin_tc: INTERNAL ERROR: attempt index class '%s' on device '%s': already exists", c->id, n->id); - } - return(c); -} - -static inline void tc_device_free(struct tc_device *n) -{ - if(n->next) n->next->prev = n->prev; - if(n->prev) n->prev->next = n->next; - if(tc_device_root == n) { - if(n->next) tc_device_root = n->next; - else tc_device_root = n->prev; - } - - if(unlikely(tc_device_index_del(n) != n)) - error("plugin_tc: INTERNAL ERROR: removing device '%s' removed a different device.", n->id); - - while(n->classes) tc_class_free(n, n->classes); - - freez(n->id); - freez(n->name); - freez(n->family); - freez(n); -} - -static inline void tc_device_free_all() -{ - while(tc_device_root) - tc_device_free(tc_device_root); -} - -#define PLUGINSD_MAX_WORDS 20 - -static inline int tc_space(char c) { - switch(c) { - case ' ': - case '\t': - case '\r': - case '\n': - return 1; - - default: - return 0; - } -} - -static inline void tc_split_words(char *str, char **words, int max_words) { - char *s = str; - int i = 0; - - // skip all white space - while(tc_space(*s)) s++; - - // store the first word - words[i++] = s; - - // while we have something - while(*s) { - // if it is a space - if(unlikely(tc_space(*s))) { - - // terminate the word - *s++ = '\0'; - - // skip all white space - while(tc_space(*s)) s++; - - // if we reached the end, stop - if(!*s) break; - - // store the next word - if(i < max_words) words[i++] = s; - else break; - } - else s++; - } - - // terminate the words - while(i < max_words) words[i++] = NULL; -} - -static pid_t tc_child_pid = 0; - -static void tc_main_cleanup(void *ptr) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - - info("cleaning up..."); - - if(tc_child_pid) { - info("TC: killing with SIGTERM tc-qos-helper process %d", tc_child_pid); - if(killpid(tc_child_pid, SIGTERM) != -1) { - siginfo_t info; - - info("TC: waiting for tc plugin child process pid %d to exit...", tc_child_pid); - waitid(P_PID, (id_t) tc_child_pid, &info, WEXITED); - // info("TC: finished tc plugin child process pid %d.", tc_child_pid); - } - - tc_child_pid = 0; - } - - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; -} - -void *tc_main(void *ptr) { - netdata_thread_cleanup_push(tc_main_cleanup, ptr); - - struct rusage thread; - - char buffer[TC_LINE_MAX+1] = ""; - char *words[PLUGINSD_MAX_WORDS] = { NULL }; - - uint32_t BEGIN_HASH = simple_hash("BEGIN"); - uint32_t END_HASH = simple_hash("END"); - uint32_t QDISC_HASH = simple_hash("qdisc"); - uint32_t CLASS_HASH = simple_hash("class"); - uint32_t SENT_HASH = simple_hash("Sent"); - uint32_t LENDED_HASH = simple_hash("lended:"); - uint32_t TOKENS_HASH = simple_hash("tokens:"); - uint32_t SETDEVICENAME_HASH = simple_hash("SETDEVICENAME"); - uint32_t SETDEVICEGROUP_HASH = simple_hash("SETDEVICEGROUP"); - uint32_t SETCLASSNAME_HASH = simple_hash("SETCLASSNAME"); - uint32_t WORKTIME_HASH = simple_hash("WORKTIME"); -#ifdef DETACH_PLUGINS_FROM_NETDATA - uint32_t MYPID_HASH = simple_hash("MYPID"); -#endif - uint32_t first_hash; - - snprintfz(buffer, TC_LINE_MAX, "%s/tc-qos-helper.sh", netdata_configured_plugins_dir); - char *tc_script = config_get("plugin:tc", "script to run to get tc values", buffer); - - while(!netdata_exit) { - FILE *fp; - struct tc_device *device = NULL; - struct tc_class *class = NULL; - - snprintfz(buffer, TC_LINE_MAX, "exec %s %d", tc_script, localhost->rrd_update_every); - debug(D_TC_LOOP, "executing '%s'", buffer); - - fp = mypopen(buffer, (pid_t *)&tc_child_pid); - if(unlikely(!fp)) { - error("TC: Cannot popen(\"%s\", \"r\").", buffer); - goto cleanup; - } - - while(fgets(buffer, TC_LINE_MAX, fp) != NULL) { - if(unlikely(netdata_exit)) break; - - buffer[TC_LINE_MAX] = '\0'; - // debug(D_TC_LOOP, "TC: read '%s'", buffer); - - tc_split_words(buffer, words, PLUGINSD_MAX_WORDS); - - if(unlikely(!words[0] || !*words[0])) { - // debug(D_TC_LOOP, "empty line"); - continue; - } - // else debug(D_TC_LOOP, "First word is '%s'", words[0]); - - first_hash = simple_hash(words[0]); - - if(unlikely(device && ((first_hash == CLASS_HASH && strcmp(words[0], "class") == 0) || (first_hash == QDISC_HASH && strcmp(words[0], "qdisc") == 0)))) { - // debug(D_TC_LOOP, "CLASS line on class id='%s', parent='%s', parentid='%s', leaf='%s', leafid='%s'", words[2], words[3], words[4], words[5], words[6]); - - char *type = words[1]; // the class/qdisc type: htb, fq_codel, etc - char *id = words[2]; // the class/qdisc major:minor - char *parent = words[3]; // the word 'parent' or 'root' - char *parentid = words[4]; // parentid - char *leaf = words[5]; // the word 'leaf' - char *leafid = words[6]; // leafid - - int parent_is_root = 0; - int parent_is_parent = 0; - if(likely(parent)) { - parent_is_parent = !strcmp(parent, "parent"); - - if(!parent_is_parent) - parent_is_root = !strcmp(parent, "root"); - } - - if(likely(type && id && (parent_is_root || parent_is_parent))) { - char qdisc = 0; - - if(first_hash == QDISC_HASH) { - qdisc = 1; - - if(!strcmp(type, "ingress")) { - // we don't want to get the ingress qdisc - // there should be an IFB interface for this - - class = NULL; - continue; - } - - if(parent_is_parent && parentid) { - // eliminate the minor number from parentid - // why: parentid is the id of the parent class - // but major: is also the id of the parent qdisc - - char *s = parentid; - while(*s && *s != ':') s++; - if(*s == ':') s[1] = '\0'; - } - } - - if(parent_is_root) { - parentid = NULL; - leafid = NULL; - } - else if(!leaf || strcmp(leaf, "leaf") != 0) - leafid = NULL; - - char leafbuf[20 + 1] = ""; - if(leafid && leafid[strlen(leafid) - 1] == ':') { - strncpyz(leafbuf, leafid, 20 - 1); - strcat(leafbuf, "1"); - leafid = leafbuf; - } - - class = tc_class_add(device, id, qdisc, parentid, leafid); - } - else { - // clear the last class - class = NULL; - } - } - else if(unlikely(first_hash == END_HASH && strcmp(words[0], "END") == 0)) { - // debug(D_TC_LOOP, "END line"); - - if(likely(device)) { - netdata_thread_disable_cancelability(); - tc_device_commit(device); - // tc_device_free(device); - netdata_thread_enable_cancelability(); - } - - device = NULL; - class = NULL; - } - else if(unlikely(first_hash == BEGIN_HASH && strcmp(words[0], "BEGIN") == 0)) { - // debug(D_TC_LOOP, "BEGIN line on device '%s'", words[1]); - - if(likely(words[1] && *words[1])) { - device = tc_device_create(words[1]); - } - else { - // tc_device_free(device); - device = NULL; - } - - class = NULL; - } - else if(unlikely(device && class && first_hash == SENT_HASH && strcmp(words[0], "Sent") == 0)) { - // debug(D_TC_LOOP, "SENT line '%s'", words[1]); - if(likely(words[1] && *words[1])) { - class->bytes = str2ull(words[1]); - class->updated = 1; - } - else { - class->updated = 0; - } - - if(likely(words[3] && *words[3])) - class->packets = str2ull(words[3]); - - if(likely(words[6] && *words[6])) - class->dropped = str2ull(words[6]); - - if(likely(words[8] && *words[8])) - class->overlimits = str2ull(words[8]); - - if(likely(words[10] && *words[10])) - class->requeues = str2ull(words[8]); - } - else if(unlikely(device && class && class->updated && first_hash == LENDED_HASH && strcmp(words[0], "lended:") == 0)) { - // debug(D_TC_LOOP, "LENDED line '%s'", words[1]); - if(likely(words[1] && *words[1])) - class->lended = str2ull(words[1]); - - if(likely(words[3] && *words[3])) - class->borrowed = str2ull(words[3]); - - if(likely(words[5] && *words[5])) - class->giants = str2ull(words[5]); - } - else if(unlikely(device && class && class->updated && first_hash == TOKENS_HASH && strcmp(words[0], "tokens:") == 0)) { - // debug(D_TC_LOOP, "TOKENS line '%s'", words[1]); - if(likely(words[1] && *words[1])) - class->tokens = str2ull(words[1]); - - if(likely(words[3] && *words[3])) - class->ctokens = str2ull(words[3]); - } - else if(unlikely(device && first_hash == SETDEVICENAME_HASH && strcmp(words[0], "SETDEVICENAME") == 0)) { - // debug(D_TC_LOOP, "SETDEVICENAME line '%s'", words[1]); - if(likely(words[1] && *words[1])) - tc_device_set_device_name(device, words[1]); - } - else if(unlikely(device && first_hash == SETDEVICEGROUP_HASH && strcmp(words[0], "SETDEVICEGROUP") == 0)) { - // debug(D_TC_LOOP, "SETDEVICEGROUP line '%s'", words[1]); - if(likely(words[1] && *words[1])) - tc_device_set_device_family(device, words[1]); - } - else if(unlikely(device && first_hash == SETCLASSNAME_HASH && strcmp(words[0], "SETCLASSNAME") == 0)) { - // debug(D_TC_LOOP, "SETCLASSNAME line '%s' '%s'", words[1], words[2]); - char *id = words[1]; - char *path = words[2]; - if(likely(id && *id && path && *path)) - tc_device_set_class_name(device, id, path); - } - else if(unlikely(first_hash == WORKTIME_HASH && strcmp(words[0], "WORKTIME") == 0)) { - // debug(D_TC_LOOP, "WORKTIME line '%s' '%s'", words[1], words[2]); - getrusage(RUSAGE_THREAD, &thread); - - static RRDSET *stcpu = NULL; - static RRDDIM *rd_user = NULL, *rd_system = NULL; - - if(unlikely(!stcpu)) { - stcpu = rrdset_create_localhost( - "netdata" - , "plugin_tc_cpu" - , NULL - , "tc.helper" - , NULL - , "NetData TC CPU usage" - , "milliseconds/s" - , "tc" - , NULL - , 135000 - , localhost->rrd_update_every - , RRDSET_TYPE_STACKED - ); - rd_user = rrddim_add(stcpu, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - rd_system = rrddim_add(stcpu, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(stcpu); - - rrddim_set_by_pointer(stcpu, rd_user , thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec); - rrddim_set_by_pointer(stcpu, rd_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec); - rrdset_done(stcpu); - - static RRDSET *sttime = NULL; - static RRDDIM *rd_run_time = NULL; - - if(unlikely(!sttime)) { - sttime = rrdset_create_localhost( - "netdata" - , "plugin_tc_time" - , NULL - , "tc.helper" - , NULL - , "NetData TC script execution" - , "milliseconds/run" - , "tc" - , NULL - , 135001 - , localhost->rrd_update_every - , RRDSET_TYPE_AREA - ); - rd_run_time = rrddim_add(sttime, "run_time", "run time", 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(sttime); - - rrddim_set_by_pointer(sttime, rd_run_time, str2ll(words[1], NULL)); - rrdset_done(sttime); - - } -#ifdef DETACH_PLUGINS_FROM_NETDATA - else if(unlikely(first_hash == MYPID_HASH && (strcmp(words[0], "MYPID") == 0))) { - // debug(D_TC_LOOP, "MYPID line '%s'", words[1]); - char *id = words[1]; - pid_t pid = atol(id); - - if(likely(pid)) tc_child_pid = pid; - - debug(D_TC_LOOP, "TC: Child PID is %d.", tc_child_pid); - } -#endif - //else { - // debug(D_TC_LOOP, "IGNORED line"); - //} - } - - // fgets() failed or loop broke - int code = mypclose(fp, (pid_t)tc_child_pid); - tc_child_pid = 0; - - if(unlikely(device)) { - // tc_device_free(device); - device = NULL; - class = NULL; - } - - if(unlikely(netdata_exit)) { - tc_device_free_all(); - goto cleanup; - } - - if(code == 1 || code == 127) { - // 1 = DISABLE - // 127 = cannot even run it - error("TC: tc-qos-helper.sh exited with code %d. Disabling it.", code); - - tc_device_free_all(); - goto cleanup; - } - - sleep((unsigned int) localhost->rrd_update_every); - } - -cleanup: ; // added semi-colon to prevent older gcc error: label at end of compound statement - netdata_thread_cleanup_pop(1); - return NULL; -} diff --git a/src/plugin_tc.h b/src/plugin_tc.h deleted file mode 100644 index 792c6496f..000000000 --- a/src/plugin_tc.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef NETDATA_PLUGIN_TC_H -#define NETDATA_PLUGIN_TC_H 1 - -extern void *tc_main(void *ptr); - -#endif /* NETDATA_PLUGIN_TC_H */ - diff --git a/src/plugins_d.c b/src/plugins_d.c deleted file mode 100644 index 5693dda06..000000000 --- a/src/plugins_d.c +++ /dev/null @@ -1,694 +0,0 @@ -#include "common.h" - -char *plugin_directories[PLUGINSD_MAX_DIRECTORIES] = { NULL }; -char *netdata_configured_plugins_dir_base; - -struct plugind *pluginsd_root = NULL; - -static inline int pluginsd_space(char c) { - switch(c) { - case ' ': - case '\t': - case '\r': - case '\n': - case '=': - return 1; - - default: - return 0; - } -} - -inline int config_isspace(char c) { - switch(c) { - case ' ': - case '\t': - case '\r': - case '\n': - case ',': - return 1; - - default: - return 0; - } -} - -// split a text into words, respecting quotes -inline int quoted_strings_splitter(char *str, char **words, int max_words, int (*custom_isspace)(char)) { - char *s = str, quote = 0; - int i = 0, j; - - // skip all white space - while(unlikely(custom_isspace(*s))) s++; - - // check for quote - if(unlikely(*s == '\'' || *s == '"')) { - quote = *s; // remember the quote - s++; // skip the quote - } - - // store the first word - words[i++] = s; - - // while we have something - while(likely(*s)) { - // if it is escape - if(unlikely(*s == '\\' && s[1])) { - s += 2; - continue; - } - - // if it is quote - else if(unlikely(*s == quote)) { - quote = 0; - *s = ' '; - continue; - } - - // if it is a space - else if(unlikely(quote == 0 && custom_isspace(*s))) { - - // terminate the word - *s++ = '\0'; - - // skip all white space - while(likely(custom_isspace(*s))) s++; - - // check for quote - if(unlikely(*s == '\'' || *s == '"')) { - quote = *s; // remember the quote - s++; // skip the quote - } - - // if we reached the end, stop - if(unlikely(!*s)) break; - - // store the next word - if(likely(i < max_words)) words[i++] = s; - else break; - } - - // anything else - else s++; - } - - // terminate the words - j = i; - while(likely(j < max_words)) words[j++] = NULL; - - return i; -} - -inline int pluginsd_split_words(char *str, char **words, int max_words) { - return quoted_strings_splitter(str, words, max_words, pluginsd_space); -} - -inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int trust_durations) { - int enabled = cd->enabled; - - if(!fp || !enabled) { - cd->enabled = 0; - return 0; - } - - size_t count = 0; - - char line[PLUGINSD_LINE_MAX + 1]; - - char *words[PLUGINSD_MAX_WORDS] = { NULL }; - uint32_t BEGIN_HASH = simple_hash(PLUGINSD_KEYWORD_BEGIN); - uint32_t END_HASH = simple_hash(PLUGINSD_KEYWORD_END); - uint32_t FLUSH_HASH = simple_hash(PLUGINSD_KEYWORD_FLUSH); - uint32_t CHART_HASH = simple_hash(PLUGINSD_KEYWORD_CHART); - uint32_t DIMENSION_HASH = simple_hash(PLUGINSD_KEYWORD_DIMENSION); - uint32_t DISABLE_HASH = simple_hash(PLUGINSD_KEYWORD_DISABLE); - uint32_t VARIABLE_HASH = simple_hash(PLUGINSD_KEYWORD_VARIABLE); - - RRDSET *st = NULL; - uint32_t hash; - - errno = 0; - clearerr(fp); - - if(unlikely(fileno(fp) == -1)) { - error("file descriptor given is not a valid stream"); - goto cleanup; - } - - while(!ferror(fp)) { - if(unlikely(netdata_exit)) break; - - char *r = fgets(line, PLUGINSD_LINE_MAX, fp); - if(unlikely(!r)) { - error("read failed"); - break; - } - - if(unlikely(netdata_exit)) break; - - line[PLUGINSD_LINE_MAX] = '\0'; - - int w = pluginsd_split_words(line, words, PLUGINSD_MAX_WORDS); - char *s = words[0]; - if(unlikely(!s || !*s || !w)) { - continue; - } - - // debug(D_PLUGINSD, "PLUGINSD: words 0='%s' 1='%s' 2='%s' 3='%s' 4='%s' 5='%s' 6='%s' 7='%s' 8='%s' 9='%s'", words[0], words[1], words[2], words[3], words[4], words[5], words[6], words[7], words[8], words[9]); - - if(likely(!simple_hash_strcmp(s, "SET", &hash))) { - char *dimension = words[1]; - char *value = words[2]; - - if(unlikely(!dimension || !*dimension)) { - error("requested a SET on chart '%s' of host '%s', without a dimension. Disabling it.", st->id, host->hostname); - enabled = 0; - break; - } - - if(unlikely(!value || !*value)) value = NULL; - - if(unlikely(!st)) { - error("requested a SET on dimension %s with value %s on host '%s', without a BEGIN. Disabling it.", dimension, value?value:"<nothing>", host->hostname); - enabled = 0; - break; - } - - if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) - debug(D_PLUGINSD, "is setting dimension %s/%s to %s", st->id, dimension, value?value:"<nothing>"); - - if(value) { - RRDDIM *rd = rrddim_find(st, dimension); - if(unlikely(!rd)) { - error("requested a SET to dimension with id '%s' on stats '%s' (%s) on host '%s', which does not exist. Disabling it.", dimension, st->name, st->id, st->rrdhost->hostname); - enabled = 0; - break; - } - else - rrddim_set_by_pointer(st, rd, strtoll(value, NULL, 0)); - } - } - else if(likely(hash == BEGIN_HASH && !strcmp(s, PLUGINSD_KEYWORD_BEGIN))) { - char *id = words[1]; - char *microseconds_txt = words[2]; - - if(unlikely(!id)) { - error("requested a BEGIN without a chart id for host '%s'. Disabling it.", host->hostname); - enabled = 0; - break; - } - - st = rrdset_find(host, id); - if(unlikely(!st)) { - error("requested a BEGIN on chart '%s', which does not exist on host '%s'. Disabling it.", id, host->hostname); - enabled = 0; - break; - } - - if(likely(st->counter_done)) { - usec_t microseconds = 0; - if(microseconds_txt && *microseconds_txt) microseconds = str2ull(microseconds_txt); - - if(likely(microseconds)) { - if(trust_durations) - rrdset_next_usec_unfiltered(st, microseconds); - else - rrdset_next_usec(st, microseconds); - } - else rrdset_next(st); - } - } - else if(likely(hash == END_HASH && !strcmp(s, PLUGINSD_KEYWORD_END))) { - if(unlikely(!st)) { - error("requested an END, without a BEGIN on host '%s'. Disabling it.", host->hostname); - enabled = 0; - break; - } - - if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) - debug(D_PLUGINSD, "requested an END on chart %s", st->id); - - rrdset_done(st); - st = NULL; - - count++; - } - else if(likely(hash == CHART_HASH && !strcmp(s, PLUGINSD_KEYWORD_CHART))) { - st = NULL; - - char *type = words[1]; - char *name = words[2]; - char *title = words[3]; - char *units = words[4]; - char *family = words[5]; - char *context = words[6]; - char *chart = words[7]; - char *priority_s = words[8]; - char *update_every_s = words[9]; - char *options = words[10]; - char *plugin = words[11]; - char *module = words[12]; - - // parse the id from type - char *id = NULL; - if(likely(type && (id = strchr(type, '.')))) { - *id = '\0'; - id++; - } - - // make sure we have the required variables - if(unlikely(!type || !*type || !id || !*id)) { - error("requested a CHART, without a type.id, on host '%s'. Disabling it.", host->hostname); - enabled = 0; - break; - } - - // parse the name, and make sure it does not include 'type.' - if(unlikely(name && *name)) { - // when data are coming from slaves - // name will be type.name - // so we have to remove 'type.' from name too - size_t len = strlen(type); - if(strncmp(type, name, len) == 0 && name[len] == '.') - name = &name[len + 1]; - - // if the name is the same with the id, - // or is just 'NULL', clear it. - if(unlikely(strcmp(name, id) == 0 || strcasecmp(name, "NULL") == 0 || strcasecmp(name, "(NULL)") == 0)) - name = NULL; - } - - int priority = 1000; - if(likely(priority_s && *priority_s)) priority = str2i(priority_s); - - int update_every = cd->update_every; - if(likely(update_every_s && *update_every_s)) update_every = str2i(update_every_s); - if(unlikely(!update_every)) update_every = cd->update_every; - - RRDSET_TYPE chart_type = RRDSET_TYPE_LINE; - if(unlikely(chart)) chart_type = rrdset_type_id(chart); - - if(unlikely(name && !*name)) name = NULL; - if(unlikely(family && !*family)) family = NULL; - if(unlikely(context && !*context)) context = NULL; - if(unlikely(!title)) title = ""; - if(unlikely(!units)) units = "unknown"; - - debug(D_PLUGINSD, "creating chart type='%s', id='%s', name='%s', family='%s', context='%s', chart='%s', priority=%d, update_every=%d" - , type, id - , name?name:"" - , family?family:"" - , context?context:"" - , rrdset_type_name(chart_type) - , priority - , update_every - ); - - st = rrdset_create( - host - , type - , id - , name - , family - , context - , title - , units - , (plugin && *plugin)?plugin:cd->filename - , module - , priority - , update_every - , chart_type - ); - - if(options && *options) { - if(strstr(options, "obsolete")) - rrdset_is_obsolete(st); - else - rrdset_isnot_obsolete(st); - - if(strstr(options, "detail")) - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - else - rrdset_flag_clear(st, RRDSET_FLAG_DETAIL); - - if(strstr(options, "hidden")) - rrdset_flag_set(st, RRDSET_FLAG_HIDDEN); - else - rrdset_flag_clear(st, RRDSET_FLAG_HIDDEN); - - if(strstr(options, "store_first")) - rrdset_flag_set(st, RRDSET_FLAG_STORE_FIRST); - else - rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST); - } - else { - rrdset_isnot_obsolete(st); - rrdset_flag_clear(st, RRDSET_FLAG_DETAIL); - rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST); - } - } - else if(likely(hash == DIMENSION_HASH && !strcmp(s, PLUGINSD_KEYWORD_DIMENSION))) { - char *id = words[1]; - char *name = words[2]; - char *algorithm = words[3]; - char *multiplier_s = words[4]; - char *divisor_s = words[5]; - char *options = words[6]; - - if(unlikely(!id || !*id)) { - error("requested a DIMENSION, without an id, host '%s' and chart '%s'. Disabling it.", host->hostname, st?st->id:"UNSET"); - enabled = 0; - break; - } - - if(unlikely(!st)) { - error("requested a DIMENSION, without a CHART, on host '%s'. Disabling it.", host->hostname); - enabled = 0; - break; - } - - long multiplier = 1; - if(multiplier_s && *multiplier_s) multiplier = strtol(multiplier_s, NULL, 0); - if(unlikely(!multiplier)) multiplier = 1; - - long divisor = 1; - if(likely(divisor_s && *divisor_s)) divisor = strtol(divisor_s, NULL, 0); - if(unlikely(!divisor)) divisor = 1; - - if(unlikely(!algorithm || !*algorithm)) algorithm = "absolute"; - - if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) - debug(D_PLUGINSD, "creating dimension in chart %s, id='%s', name='%s', algorithm='%s', multiplier=%ld, divisor=%ld, hidden='%s'" - , st->id - , id - , name?name:"" - , rrd_algorithm_name(rrd_algorithm_id(algorithm)) - , multiplier - , divisor - , options?options:"" - ); - - RRDDIM *rd = rrddim_add(st, id, name, multiplier, divisor, rrd_algorithm_id(algorithm)); - rrddim_flag_clear(rd, RRDDIM_FLAG_HIDDEN); - rrddim_flag_clear(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS); - if(options && *options) { - if(strstr(options, "hidden") != NULL) rrddim_flag_set(rd, RRDDIM_FLAG_HIDDEN); - if(strstr(options, "noreset") != NULL) rrddim_flag_set(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS); - if(strstr(options, "nooverflow") != NULL) rrddim_flag_set(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS); - } - } - else if(likely(hash == VARIABLE_HASH && !strcmp(s, PLUGINSD_KEYWORD_VARIABLE))) { - char *name = words[1]; - char *value = words[2]; - int global = (st)?0:1; - - if(name && *name) { - if((strcmp(name, "GLOBAL") == 0 || strcmp(name, "HOST") == 0)) { - global = 1; - name = words[2]; - value = words[3]; - } - else if((strcmp(name, "LOCAL") == 0 || strcmp(name, "CHART") == 0)) { - global = 0; - name = words[2]; - value = words[3]; - } - } - - if(unlikely(!name || !*name)) { - error("requested a VARIABLE on host '%s', without a variable name. Disabling it.", host->hostname); - enabled = 0; - break; - } - - if(unlikely(!value || !*value)) - value = NULL; - - if(value) { - char *endptr = NULL; - calculated_number v = (calculated_number)str2ld(value, &endptr); - - if(unlikely(endptr && *endptr)) { - if(endptr == value) - error("the value '%s' of VARIABLE '%s' on host '%s' cannot be parsed as a number", value, name, host->hostname); - else - error("the value '%s' of VARIABLE '%s' on host '%s' has leftovers: '%s'", value, name, host->hostname, endptr); - } - - if(global) { - RRDVAR *rv = rrdvar_custom_host_variable_create(host, name); - if (rv) rrdvar_custom_host_variable_set(host, rv, v); - else error("cannot find/create HOST VARIABLE '%s' on host '%s'", name, host->hostname); - } - else if(st) { - RRDSETVAR *rs = rrdsetvar_custom_chart_variable_create(st, name); - if (rs) rrdsetvar_custom_chart_variable_set(rs, v); - else error("cannot find/create CHART VARIABLE '%s' on host '%s', chart '%s'", name, host->hostname, st->id); - } - else - error("cannot find/create CHART VARIABLE '%s' on host '%s' without a chart", name, host->hostname); - } - else - error("cannot set %s VARIABLE '%s' on host '%s' to an empty value", (global)?"HOST":"CHART", name, host->hostname); - } - else if(likely(hash == FLUSH_HASH && !strcmp(s, PLUGINSD_KEYWORD_FLUSH))) { - debug(D_PLUGINSD, "requested a FLUSH"); - st = NULL; - } - else if(unlikely(hash == DISABLE_HASH && !strcmp(s, PLUGINSD_KEYWORD_DISABLE))) { - info("called DISABLE. Disabling it."); - enabled = 0; - break; - } - else { - error("sent command '%s' which is not known by netdata, for host '%s'. Disabling it.", s, host->hostname); - enabled = 0; - break; - } - } - -cleanup: - cd->enabled = enabled; - - if(likely(count)) { - cd->successful_collections += count; - cd->serial_failures = 0; - } - else - cd->serial_failures++; - - return count; -} - -static void pluginsd_worker_thread_cleanup(void *arg) { - struct plugind *cd = (struct plugind *)arg; - - if(cd->enabled && !cd->obsolete) { - cd->obsolete = 1; - - info("data collection thread exiting"); - - if (cd->pid) { - siginfo_t info; - info("killing child process pid %d", cd->pid); - if (killpid(cd->pid, SIGTERM) != -1) { - info("waiting for child process pid %d to exit...", cd->pid); - waitid(P_PID, (id_t) cd->pid, &info, WEXITED); - } - cd->pid = 0; - } - } -} - -void *pluginsd_worker_thread(void *arg) { - netdata_thread_cleanup_push(pluginsd_worker_thread_cleanup, arg); - - struct plugind *cd = (struct plugind *)arg; - - cd->obsolete = 0; - size_t count = 0; - - while(!netdata_exit) { - FILE *fp = mypopen(cd->cmd, &cd->pid); - if(unlikely(!fp)) { - error("Cannot popen(\"%s\", \"r\").", cd->cmd); - break; - } - - info("connected to '%s' running on pid %d", cd->fullfilename, cd->pid); - count = pluginsd_process(localhost, cd, fp, 0); - error("'%s' (pid %d) disconnected after %zu successful data collections (ENDs).", cd->fullfilename, cd->pid, count); - killpid(cd->pid, SIGTERM); - - // get the return code - int code = mypclose(fp, cd->pid); - - if(code != 0) { - // the plugin reports failure - - if(likely(!cd->successful_collections)) { - // nothing collected - disable it - error("'%s' (pid %d) exited with error code %d. Disabling it.", cd->fullfilename, cd->pid, code); - cd->enabled = 0; - } - else { - // we have collected something - - if(likely(cd->serial_failures <= 10)) { - error("'%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times). %s", cd->fullfilename, cd->pid, code, cd->successful_collections, cd->enabled?"Waiting a bit before starting it again.":"Will not start it again - it is disabled."); - sleep((unsigned int) (cd->update_every * 10)); - } - else { - error("'%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times). We tried %zu times to restart it, but it failed to generate data. Disabling it.", cd->fullfilename, cd->pid, code, cd->successful_collections, cd->serial_failures); - cd->enabled = 0; - } - } - } - else { - // the plugin reports success - - if(unlikely(!cd->successful_collections)) { - // we have collected nothing so far - - if(likely(cd->serial_failures <= 10)) { - error("'%s' (pid %d) does not generate useful output but it reports success (exits with 0). %s.", cd->fullfilename, cd->pid, cd->enabled?"Waiting a bit before starting it again.":"Will not start it again - it is now disabled."); - sleep((unsigned int) (cd->update_every * 10)); - } - else { - error("'%s' (pid %d) does not generate useful output, although it reports success (exits with 0), but we have tried %zu times to collect something. Disabling it.", cd->fullfilename, cd->pid, cd->serial_failures); - cd->enabled = 0; - } - } - else - sleep((unsigned int) cd->update_every); - } - cd->pid = 0; - - if(unlikely(!cd->enabled)) break; - } - - netdata_thread_cleanup_pop(1); - return NULL; -} - -static void pluginsd_main_cleanup(void *data) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data; - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - info("cleaning up..."); - - struct plugind *cd; - for (cd = pluginsd_root; cd; cd = cd->next) { - if (cd->enabled && !cd->obsolete) { - info("stopping plugin thread: %s", cd->id); - netdata_thread_cancel(cd->thread); - } - } - - info("cleanup completed."); - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; -} - -void *pluginsd_main(void *ptr) { - netdata_thread_cleanup_push(pluginsd_main_cleanup, ptr); - - int automatic_run = config_get_boolean(CONFIG_SECTION_PLUGINS, "enable running new plugins", 1); - int scan_frequency = (int) config_get_number(CONFIG_SECTION_PLUGINS, "check for new plugins every", 60); - if(scan_frequency < 1) scan_frequency = 1; - - // store the errno for each plugins directory - // so that we don't log broken directories on each loop - int directory_errors[PLUGINSD_MAX_DIRECTORIES] = { 0 }; - - while(!netdata_exit) { - int idx; - const char *directory_name; - - for( idx = 0; idx < PLUGINSD_MAX_DIRECTORIES && (directory_name = plugin_directories[idx]) ; idx++ ) { - if(unlikely(netdata_exit)) break; - - errno = 0; - DIR *dir = opendir(directory_name); - if(unlikely(!dir)) { - if(directory_errors[idx] != errno) { - directory_errors[idx] = errno; - error("cannot open plugins directory '%s'", directory_name); - } - continue; - } - - struct dirent *file = NULL; - while(likely((file = readdir(dir)))) { - if(unlikely(netdata_exit)) break; - - debug(D_PLUGINSD, "examining file '%s'", file->d_name); - - if(unlikely(strcmp(file->d_name, ".") == 0 || strcmp(file->d_name, "..") == 0)) continue; - - int len = (int) strlen(file->d_name); - if(unlikely(len <= (int)PLUGINSD_FILE_SUFFIX_LEN)) continue; - if(unlikely(strcmp(PLUGINSD_FILE_SUFFIX, &file->d_name[len - (int)PLUGINSD_FILE_SUFFIX_LEN]) != 0)) { - debug(D_PLUGINSD, "file '%s' does not end in '%s'", file->d_name, PLUGINSD_FILE_SUFFIX); - continue; - } - - char pluginname[CONFIG_MAX_NAME + 1]; - snprintfz(pluginname, CONFIG_MAX_NAME, "%.*s", (int)(len - PLUGINSD_FILE_SUFFIX_LEN), file->d_name); - int enabled = config_get_boolean(CONFIG_SECTION_PLUGINS, pluginname, automatic_run); - - if(unlikely(!enabled)) { - debug(D_PLUGINSD, "plugin '%s' is not enabled", file->d_name); - continue; - } - - // check if it runs already - struct plugind *cd; - for(cd = pluginsd_root ; cd ; cd = cd->next) - if(unlikely(strcmp(cd->filename, file->d_name) == 0)) break; - - if(likely(cd && !cd->obsolete)) { - debug(D_PLUGINSD, "plugin '%s' is already running", cd->filename); - continue; - } - - // it is not running - // allocate a new one, or use the obsolete one - if(unlikely(!cd)) { - cd = callocz(sizeof(struct plugind), 1); - - snprintfz(cd->id, CONFIG_MAX_NAME, "plugin:%s", pluginname); - - strncpyz(cd->filename, file->d_name, FILENAME_MAX); - snprintfz(cd->fullfilename, FILENAME_MAX, "%s/%s", directory_name, cd->filename); - - cd->enabled = enabled; - cd->update_every = (int) config_get_number(cd->id, "update every", localhost->rrd_update_every); - cd->started_t = now_realtime_sec(); - - char *def = ""; - snprintfz(cd->cmd, PLUGINSD_CMD_MAX, "exec %s %d %s", cd->fullfilename, cd->update_every, config_get(cd->id, "command options", def)); - - // link it - if(likely(pluginsd_root)) cd->next = pluginsd_root; - pluginsd_root = cd; - - // it is not currently running - cd->obsolete = 1; - - if(cd->enabled) { - char tag[NETDATA_THREAD_TAG_MAX + 1]; - snprintfz(tag, NETDATA_THREAD_TAG_MAX, "PLUGINSD[%s]", pluginname); - // spawn a new thread for it - netdata_thread_create(&cd->thread, tag, NETDATA_THREAD_OPTION_DEFAULT, pluginsd_worker_thread, cd); - } - } - } - - closedir(dir); - } - - sleep((unsigned int) scan_frequency); - } - - netdata_thread_cleanup_pop(1); - return NULL; -} diff --git a/src/plugins_d.h b/src/plugins_d.h deleted file mode 100644 index 692d7cae1..000000000 --- a/src/plugins_d.h +++ /dev/null @@ -1,57 +0,0 @@ -#ifndef NETDATA_PLUGINS_D_H -#define NETDATA_PLUGINS_D_H 1 - -#define PLUGINSD_FILE_SUFFIX ".plugin" -#define PLUGINSD_FILE_SUFFIX_LEN strlen(PLUGINSD_FILE_SUFFIX) -#define PLUGINSD_CMD_MAX (FILENAME_MAX*2) - -#define PLUGINSD_KEYWORD_CHART "CHART" -#define PLUGINSD_KEYWORD_DIMENSION "DIMENSION" -#define PLUGINSD_KEYWORD_BEGIN "BEGIN" -#define PLUGINSD_KEYWORD_END "END" -#define PLUGINSD_KEYWORD_FLUSH "FLUSH" -#define PLUGINSD_KEYWORD_DISABLE "DISABLE" -#define PLUGINSD_KEYWORD_VARIABLE "VARIABLE" - -#define PLUGINSD_LINE_MAX 1024 -#define PLUGINSD_MAX_WORDS 20 - -#define PLUGINSD_MAX_DIRECTORIES 20 -extern char *plugin_directories[PLUGINSD_MAX_DIRECTORIES]; - -struct plugind { - char id[CONFIG_MAX_NAME+1]; // config node id - - char filename[FILENAME_MAX+1]; // just the filename - char fullfilename[FILENAME_MAX+1]; // with path - char cmd[PLUGINSD_CMD_MAX+1]; // the command that it executes - - volatile pid_t pid; - netdata_thread_t thread; - - size_t successful_collections; // the number of times we have seen - // values collected from this plugin - - size_t serial_failures; // the number of times the plugin started - // without collecting values - - int update_every; // the plugin default data collection frequency - volatile sig_atomic_t obsolete; // do not touch this structure after setting this to 1 - volatile sig_atomic_t enabled; // if this is enabled or not - - time_t started_t; - - struct plugind *next; -}; - -extern struct plugind *pluginsd_root; - -extern void *pluginsd_main(void *ptr); - -extern size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int trust_durations); -extern int pluginsd_split_words(char *str, char **words, int max_words); - -extern int quoted_strings_splitter(char *str, char **words, int max_words, int (*custom_isspace)(char)); -extern int config_isspace(char c); - -#endif /* NETDATA_PLUGINS_D_H */ diff --git a/src/popen.c b/src/popen.c deleted file mode 100644 index eda1c05b9..000000000 --- a/src/popen.c +++ /dev/null @@ -1,204 +0,0 @@ -#include "common.h" - -/* -struct mypopen { - pid_t pid; - FILE *fp; - struct mypopen *next; - struct mypopen *prev; -}; - -static struct mypopen *mypopen_root = NULL; - -static void mypopen_add(FILE *fp, pid_t *pid) { - struct mypopen *mp = malloc(sizeof(struct mypopen)); - if(!mp) { - fatal("Cannot allocate %zu bytes", sizeof(struct mypopen)) - return; - } - - mp->fp = fp; - mp->pid = pid; - mp->next = popen_root; - mp->prev = NULL; - if(mypopen_root) mypopen_root->prev = mp; - mypopen_root = mp; -} - -static void mypopen_del(FILE *fp) { - struct mypopen *mp; - - for(mp = mypopen_root; mp; mp = mp->next) - if(mp->fd == fp) break; - - if(!mp) error("Cannot find mypopen() file pointer in open childs."); - else { - if(mp->next) mp->next->prev = mp->prev; - if(mp->prev) mp->prev->next = mp->next; - if(mypopen_root == mp) mypopen_root = mp->next; - free(mp); - } -} -*/ -#define PIPE_READ 0 -#define PIPE_WRITE 1 - -FILE *mypopen(const char *command, volatile pid_t *pidptr) -{ - int pipefd[2]; - - if(pipe(pipefd) == -1) return NULL; - - int pid = fork(); - if(pid == -1) { - close(pipefd[PIPE_READ]); - close(pipefd[PIPE_WRITE]); - return NULL; - } - if(pid != 0) { - // the parent - *pidptr = pid; - close(pipefd[PIPE_WRITE]); - FILE *fp = fdopen(pipefd[PIPE_READ], "r"); - /*mypopen_add(fp, pid);*/ - return(fp); - } - // the child - - // close all files - int i; - for(i = (int) (sysconf(_SC_OPEN_MAX) - 1); i > 0; i--) - if(i != STDIN_FILENO && i != STDERR_FILENO && i != pipefd[PIPE_WRITE]) close(i); - - // move the pipe to stdout - if(pipefd[PIPE_WRITE] != STDOUT_FILENO) { - dup2(pipefd[PIPE_WRITE], STDOUT_FILENO); - close(pipefd[PIPE_WRITE]); - } - -#ifdef DETACH_PLUGINS_FROM_NETDATA - // this was an attempt to detach the child and use the suspend mode charts.d - // unfortunatelly it does not work as expected. - - // fork again to become session leader - pid = fork(); - if(pid == -1) - error("pre-execution of command '%s' on pid %d: Cannot fork 2nd time.", command, getpid()); - - if(pid != 0) { - // the parent - exit(0); - } - - // set a new process group id for just this child - if( setpgid(0, 0) != 0 ) - error("pre-execution of command '%s' on pid %d: Cannot set a new process group.", command, getpid()); - - if( getpgid(0) != getpid() ) - error("pre-execution of command '%s' on pid %d: Cannot set a new process group. Process group set is incorrect. Expected %d, found %d", command, getpid(), getpid(), getpgid(0)); - - if( setsid() != 0 ) - error("pre-execution of command '%s' on pid %d: Cannot set session id.", command, getpid()); - - fprintf(stdout, "MYPID %d\n", getpid()); - fflush(NULL); -#endif - - // reset all signals - signals_unblock(); - signals_reset(); - - debug(D_CHILDS, "executing command: '%s' on pid %d.", command, getpid()); - execl("/bin/sh", "sh", "-c", command, NULL); - exit(1); -} - -FILE *mypopene(const char *command, volatile pid_t *pidptr, char **env) { - int pipefd[2]; - - if(pipe(pipefd) == -1) - return NULL; - - int pid = fork(); - if(pid == -1) { - close(pipefd[PIPE_READ]); - close(pipefd[PIPE_WRITE]); - return NULL; - } - if(pid != 0) { - // the parent - *pidptr = pid; - close(pipefd[PIPE_WRITE]); - FILE *fp = fdopen(pipefd[PIPE_READ], "r"); - return(fp); - } - // the child - - // close all files - int i; - for(i = (int) (sysconf(_SC_OPEN_MAX) - 1); i > 0; i--) - if(i != STDIN_FILENO && i != STDERR_FILENO && i != pipefd[PIPE_WRITE]) close(i); - - // move the pipe to stdout - if(pipefd[PIPE_WRITE] != STDOUT_FILENO) { - dup2(pipefd[PIPE_WRITE], STDOUT_FILENO); - close(pipefd[PIPE_WRITE]); - } - - execle("/bin/sh", "sh", "-c", command, NULL, env); - exit(1); -} - -int mypclose(FILE *fp, pid_t pid) { - debug(D_EXIT, "Request to mypclose() on pid %d", pid); - - /*mypopen_del(fp);*/ - - // close the pipe fd - // this is required in musl - // without it the childs do not exit - close(fileno(fp)); - - // close the pipe file pointer - fclose(fp); - - errno = 0; - - siginfo_t info; - if(waitid(P_PID, (id_t) pid, &info, WEXITED) != -1) { - switch(info.si_code) { - case CLD_EXITED: - if(info.si_status) - error("child pid %d exited with code %d.", info.si_pid, info.si_status); - return(info.si_status); - - case CLD_KILLED: - error("child pid %d killed by signal %d.", info.si_pid, info.si_status); - return(-1); - - case CLD_DUMPED: - error("child pid %d core dumped by signal %d.", info.si_pid, info.si_status); - return(-2); - - case CLD_STOPPED: - error("child pid %d stopped by signal %d.", info.si_pid, info.si_status); - return(0); - - case CLD_TRAPPED: - error("child pid %d trapped by signal %d.", info.si_pid, info.si_status); - return(-4); - - case CLD_CONTINUED: - error("child pid %d continued by signal %d.", info.si_pid, info.si_status); - return(0); - - default: - error("child pid %d gave us a SIGCHLD with code %d and status %d.", info.si_pid, info.si_code, info.si_status); - return(-5); - } - } - else - error("Cannot waitid() for pid %d", pid); - - return 0; -} diff --git a/src/popen.h b/src/popen.h deleted file mode 100644 index 3dd79bb4d..000000000 --- a/src/popen.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef NETDATA_POPEN_H -#define NETDATA_POPEN_H 1 - -#define PIPE_READ 0 -#define PIPE_WRITE 1 - -extern FILE *mypopen(const char *command, volatile pid_t *pidptr); -extern FILE *mypopene(const char *command, volatile pid_t *pidptr, char **env); -extern int mypclose(FILE *fp, pid_t pid); - -#endif /* NETDATA_POPEN_H */ diff --git a/src/proc_diskstats.c b/src/proc_diskstats.c deleted file mode 100644 index 8cde3334b..000000000 --- a/src/proc_diskstats.c +++ /dev/null @@ -1,1438 +0,0 @@ -#include "common.h" - -#define RRD_TYPE_DISK "disk" - -#define DISK_TYPE_UNKNOWN 0 -#define DISK_TYPE_PHYSICAL 1 -#define DISK_TYPE_PARTITION 2 -#define DISK_TYPE_VIRTUAL 3 - -#define CONFIG_SECTION_DISKSTATS "plugin:proc:/proc/diskstats" -#define DEFAULT_EXCLUDED_DISKS "loop* ram*" - -static struct disk { - char *disk; // the name of the disk (sda, sdb, etc, after being looked up) - char *device; // the device of the disk (before being looked up) - unsigned long major; - unsigned long minor; - int sector_size; - int type; - - char *mount_point; - - // disk options caching - int do_io; - int do_ops; - int do_mops; - int do_iotime; - int do_qops; - int do_util; - int do_backlog; - int do_bcache; - - int updated; - - int device_is_bcache; - - char *bcache_filename_dirty_data; - char *bcache_filename_writeback_rate; - char *bcache_filename_cache_congested; - char *bcache_filename_cache_available_percent; - char *bcache_filename_stats_five_minute_cache_hit_ratio; - char *bcache_filename_stats_hour_cache_hit_ratio; - char *bcache_filename_stats_day_cache_hit_ratio; - char *bcache_filename_stats_total_cache_hit_ratio; - char *bcache_filename_stats_total_cache_hits; - char *bcache_filename_stats_total_cache_misses; - char *bcache_filename_stats_total_cache_miss_collisions; - char *bcache_filename_stats_total_cache_bypass_hits; - char *bcache_filename_stats_total_cache_bypass_misses; - char *bcache_filename_stats_total_cache_readaheads; - - RRDSET *st_io; - RRDDIM *rd_io_reads; - RRDDIM *rd_io_writes; - - RRDSET *st_ops; - RRDDIM *rd_ops_reads; - RRDDIM *rd_ops_writes; - - RRDSET *st_qops; - RRDDIM *rd_qops_operations; - - RRDSET *st_backlog; - RRDDIM *rd_backlog_backlog; - - RRDSET *st_util; - RRDDIM *rd_util_utilization; - - RRDSET *st_mops; - RRDDIM *rd_mops_reads; - RRDDIM *rd_mops_writes; - - RRDSET *st_iotime; - RRDDIM *rd_iotime_reads; - RRDDIM *rd_iotime_writes; - - RRDSET *st_await; - RRDDIM *rd_await_reads; - RRDDIM *rd_await_writes; - - RRDSET *st_avgsz; - RRDDIM *rd_avgsz_reads; - RRDDIM *rd_avgsz_writes; - - RRDSET *st_svctm; - RRDDIM *rd_svctm_svctm; - - RRDSET *st_bcache_size; - RRDDIM *rd_bcache_dirty_size; - - RRDSET *st_bcache_usage; - RRDDIM *rd_bcache_available_percent; - - RRDSET *st_bcache_hit_ratio; - RRDDIM *rd_bcache_hit_ratio_5min; - RRDDIM *rd_bcache_hit_ratio_1hour; - RRDDIM *rd_bcache_hit_ratio_1day; - RRDDIM *rd_bcache_hit_ratio_total; - - RRDSET *st_bcache; - RRDDIM *rd_bcache_hits; - RRDDIM *rd_bcache_misses; - RRDDIM *rd_bcache_miss_collisions; - - RRDSET *st_bcache_bypass; - RRDDIM *rd_bcache_bypass_hits; - RRDDIM *rd_bcache_bypass_misses; - - RRDSET *st_bcache_rates; - RRDDIM *rd_bcache_rate_congested; - RRDDIM *rd_bcache_readaheads; - RRDDIM *rd_bcache_rate_writeback; - - struct disk *next; -} *disk_root = NULL; - -#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete(st); (st) = NULL; } } while(st) - -// static char *path_to_get_hw_sector_size = NULL; -// static char *path_to_get_hw_sector_size_partitions = NULL; -static char *path_to_sys_dev_block_major_minor_string = NULL; -static char *path_to_sys_block_device = NULL; -static char *path_to_sys_block_device_bcache = NULL; -static char *path_to_sys_devices_virtual_block_device = NULL; -static char *path_to_device_mapper = NULL; -static char *path_to_device_label = NULL; -static char *path_to_device_id = NULL; -static int name_disks_by_id = CONFIG_BOOLEAN_NO; - -static int global_enable_new_disks_detected_at_runtime = CONFIG_BOOLEAN_YES, - global_enable_performance_for_physical_disks = CONFIG_BOOLEAN_AUTO, - global_enable_performance_for_virtual_disks = CONFIG_BOOLEAN_AUTO, - global_enable_performance_for_partitions = CONFIG_BOOLEAN_NO, - global_do_io = CONFIG_BOOLEAN_AUTO, - global_do_ops = CONFIG_BOOLEAN_AUTO, - global_do_mops = CONFIG_BOOLEAN_AUTO, - global_do_iotime = CONFIG_BOOLEAN_AUTO, - global_do_qops = CONFIG_BOOLEAN_AUTO, - global_do_util = CONFIG_BOOLEAN_AUTO, - global_do_backlog = CONFIG_BOOLEAN_AUTO, - global_do_bcache = CONFIG_BOOLEAN_AUTO, - globals_initialized = 0, - global_cleanup_removed_disks = 1; - -static SIMPLE_PATTERN *excluded_disks = NULL; - -static unsigned long long int bcache_read_number_with_units(const char *filename) { - char buffer[50 + 1]; - if(read_file(filename, buffer, 50) == 0) { - static int unknown_units_error = 10; - - char *end = NULL; - long double value = str2ld(buffer, &end); - if(end && *end) { - if(*end == 'k') - return (unsigned long long int)(value * 1024.0); - else if(*end == 'M') - return (unsigned long long int)(value * 1024.0 * 1024.0); - else if(*end == 'G') - return (unsigned long long int)(value * 1024.0 * 1024.0 * 1024.0); - else if(unknown_units_error > 0) { - error("bcache file '%s' provides value '%s' with unknown units '%s'", filename, buffer, end); - unknown_units_error--; - } - } - - return (unsigned long long int)value; - } - - return 0; -} - -static inline int is_major_enabled(int major) { - static int8_t *major_configs = NULL; - static size_t major_size = 0; - - if(major < 0) return 1; - - size_t wanted_size = (size_t)major + 1; - - if(major_size < wanted_size) { - major_configs = reallocz(major_configs, wanted_size * sizeof(int8_t)); - - size_t i; - for(i = major_size; i < wanted_size ; i++) - major_configs[i] = -1; - - major_size = wanted_size; - } - - if(major_configs[major] == -1) { - char buffer[CONFIG_MAX_NAME + 1]; - snprintfz(buffer, CONFIG_MAX_NAME, "performance metrics for disks with major %d", major); - major_configs[major] = (char)config_get_boolean(CONFIG_SECTION_DISKSTATS, buffer, 1); - } - - return (int)major_configs[major]; -} - -static inline int get_disk_name_from_path(const char *path, char *result, size_t result_size, unsigned long major, unsigned long minor, char *disk) { - char filename[FILENAME_MAX + 1]; - int found = 0; - - result_size--; - - DIR *dir = opendir(path); - if (!dir) { - error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot open directory '%s'. Disabling device-mapper support.", disk, major, minor, path); - goto cleanup; - } - - struct dirent *de = NULL; - while ((de = readdir(dir))) { - if(de->d_type != DT_LNK) continue; - - snprintfz(filename, FILENAME_MAX, "%s/%s", path, de->d_name); - ssize_t len = readlink(filename, result, result_size); - if(len <= 0) { - error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot read link '%s'.", disk, major, minor, filename); - continue; - } - - result[len] = '\0'; - if(result[0] != '/') - snprintfz(filename, FILENAME_MAX, "%s/%s", path, result); - else - strncpyz(filename, result, FILENAME_MAX); - - struct stat sb; - if(stat(filename, &sb) == -1) { - error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot stat() file '%s'.", disk, major, minor, filename); - continue; - } - - if((sb.st_mode & S_IFMT) != S_IFBLK) { - // info("DEVICE-MAPPER ('%s', %lu:%lu): file '%s' is not a block device.", disk, major, minor, filename); - continue; - } - - if(major(sb.st_rdev) != major || minor(sb.st_rdev) != minor) { - // info("DEVICE-MAPPER ('%s', %lu:%lu): filename '%s' does not match %lu:%lu.", disk, major, minor, filename, (unsigned long)major(sb.st_rdev), (unsigned long)minor(sb.st_rdev)); - continue; - } - - // info("DEVICE-MAPPER ('%s', %lu:%lu): filename '%s' matches.", disk, major, minor, filename); - - strncpy(result, de->d_name, result_size); - found = 1; - break; - } - closedir(dir); - - -cleanup: - - if(!found) - result[0] = '\0'; - - return found; -} - -static inline char *get_disk_name(unsigned long major, unsigned long minor, char *disk) { - char result[FILENAME_MAX + 1] = ""; - - if(!path_to_device_mapper || !*path_to_device_mapper || !get_disk_name_from_path(path_to_device_mapper, result, FILENAME_MAX + 1, major, minor, disk)) - if(!path_to_device_label || !*path_to_device_label || !get_disk_name_from_path(path_to_device_label, result, FILENAME_MAX + 1, major, minor, disk)) - if(name_disks_by_id != CONFIG_BOOLEAN_YES || !path_to_device_id || !*path_to_device_id || !get_disk_name_from_path(path_to_device_id, result, FILENAME_MAX + 1, major, minor, disk)) - strncpy(result, disk, FILENAME_MAX); - - if(!result[0]) - strncpy(result, disk, FILENAME_MAX); - - netdata_fix_chart_name(result); - return strdup(result); -} - -static void get_disk_config(struct disk *d) { - int def_enable = global_enable_new_disks_detected_at_runtime; - - if(def_enable != CONFIG_BOOLEAN_NO && (simple_pattern_matches(excluded_disks, d->device) || simple_pattern_matches(excluded_disks, d->disk))) - def_enable = CONFIG_BOOLEAN_NO; - - char var_name[4096 + 1]; - snprintfz(var_name, 4096, "plugin:proc:/proc/diskstats:%s", d->disk); - - def_enable = config_get_boolean_ondemand(var_name, "enable", def_enable); - if(unlikely(def_enable == CONFIG_BOOLEAN_NO)) { - // the user does not want any metrics for this disk - d->do_io = CONFIG_BOOLEAN_NO; - d->do_ops = CONFIG_BOOLEAN_NO; - d->do_mops = CONFIG_BOOLEAN_NO; - d->do_iotime = CONFIG_BOOLEAN_NO; - d->do_qops = CONFIG_BOOLEAN_NO; - d->do_util = CONFIG_BOOLEAN_NO; - d->do_backlog = CONFIG_BOOLEAN_NO; - d->do_bcache = CONFIG_BOOLEAN_NO; - } - else { - // this disk is enabled - // check its direct settings - - int def_performance = CONFIG_BOOLEAN_AUTO; - - // since this is 'on demand' we can figure the performance settings - // based on the type of disk - - if(!d->device_is_bcache) { - switch(d->type) { - default: - case DISK_TYPE_UNKNOWN: - break; - - case DISK_TYPE_PHYSICAL: - def_performance = global_enable_performance_for_physical_disks; - break; - - case DISK_TYPE_PARTITION: - def_performance = global_enable_performance_for_partitions; - break; - - case DISK_TYPE_VIRTUAL: - def_performance = global_enable_performance_for_virtual_disks; - break; - } - } - - // check if we have to disable performance for this disk - if(def_performance) - def_performance = is_major_enabled((int)d->major); - - // ------------------------------------------------------------ - // now we have def_performance and def_space - // to work further - - // def_performance - // check the user configuration (this will also show our 'on demand' decision) - def_performance = config_get_boolean_ondemand(var_name, "enable performance metrics", def_performance); - - int ddo_io = CONFIG_BOOLEAN_NO, - ddo_ops = CONFIG_BOOLEAN_NO, - ddo_mops = CONFIG_BOOLEAN_NO, - ddo_iotime = CONFIG_BOOLEAN_NO, - ddo_qops = CONFIG_BOOLEAN_NO, - ddo_util = CONFIG_BOOLEAN_NO, - ddo_backlog = CONFIG_BOOLEAN_NO, - ddo_bcache = CONFIG_BOOLEAN_NO; - - // we enable individual performance charts only when def_performance is not disabled - if(unlikely(def_performance != CONFIG_BOOLEAN_NO)) { - ddo_io = global_do_io, - ddo_ops = global_do_ops, - ddo_mops = global_do_mops, - ddo_iotime = global_do_iotime, - ddo_qops = global_do_qops, - ddo_util = global_do_util, - ddo_backlog = global_do_backlog, - ddo_bcache = global_do_bcache; - } - - d->do_io = config_get_boolean_ondemand(var_name, "bandwidth", ddo_io); - d->do_ops = config_get_boolean_ondemand(var_name, "operations", ddo_ops); - d->do_mops = config_get_boolean_ondemand(var_name, "merged operations", ddo_mops); - d->do_iotime = config_get_boolean_ondemand(var_name, "i/o time", ddo_iotime); - d->do_qops = config_get_boolean_ondemand(var_name, "queued operations", ddo_qops); - d->do_util = config_get_boolean_ondemand(var_name, "utilization percentage", ddo_util); - d->do_backlog = config_get_boolean_ondemand(var_name, "backlog", ddo_backlog); - - if(d->device_is_bcache) - d->do_bcache = config_get_boolean_ondemand(var_name, "bcache", ddo_bcache); - else - d->do_bcache = 0; - } -} - -static struct disk *get_disk(unsigned long major, unsigned long minor, char *disk) { - static struct mountinfo *disk_mountinfo_root = NULL; - - struct disk *d; - - // search for it in our RAM list. - // this is sequential, but since we just walk through - // and the number of disks / partitions in a system - // should not be that many, it should be acceptable - for(d = disk_root; d ; d = d->next) - if(unlikely(d->major == major && d->minor == minor)) - return d; - - // not found - // create a new disk structure - d = (struct disk *)callocz(1, sizeof(struct disk)); - - d->disk = get_disk_name(major, minor, disk); - d->device = strdupz(disk); - d->major = major; - d->minor = minor; - d->type = DISK_TYPE_UNKNOWN; // Default type. Changed later if not correct. - d->sector_size = 512; // the default, will be changed below - d->next = NULL; - - // append it to the list - if(unlikely(!disk_root)) - disk_root = d; - else { - struct disk *last; - for(last = disk_root; last->next ;last = last->next); - last->next = d; - } - - char buffer[FILENAME_MAX + 1]; - - // find if it is a physical disk - // by checking if /sys/block/DISK is readable. - snprintfz(buffer, FILENAME_MAX, path_to_sys_block_device, disk); - if(likely(access(buffer, R_OK) == 0)) { - // assign it here, but it will be overwritten if it is not a physical disk - d->type = DISK_TYPE_PHYSICAL; - } - - // find if it is a partition - // by checking if /sys/dev/block/MAJOR:MINOR/partition is readable. - snprintfz(buffer, FILENAME_MAX, path_to_sys_dev_block_major_minor_string, major, minor, "partition"); - if(likely(access(buffer, R_OK) == 0)) { - d->type = DISK_TYPE_PARTITION; - } - else { - // find if it is a virtual disk - // by checking if /sys/devices/virtual/block/DISK is readable. - snprintfz(buffer, FILENAME_MAX, path_to_sys_devices_virtual_block_device, disk); - if(likely(access(buffer, R_OK) == 0)) { - d->type = DISK_TYPE_VIRTUAL; - } - else { - // find if it is a virtual device - // by checking if /sys/dev/block/MAJOR:MINOR/slaves has entries - snprintfz(buffer, FILENAME_MAX, path_to_sys_dev_block_major_minor_string, major, minor, "slaves/"); - DIR *dirp = opendir(buffer); - if (likely(dirp != NULL)) { - struct dirent *dp; - while ((dp = readdir(dirp))) { - // . and .. are also files in empty folders. - if (unlikely(strcmp(dp->d_name, ".") == 0 || strcmp(dp->d_name, "..") == 0)) { - continue; - } - - d->type = DISK_TYPE_VIRTUAL; - - // Stop the loop after we found one file. - break; - } - if (unlikely(closedir(dirp) == -1)) - error("Unable to close dir %s", buffer); - } - } - } - - // ------------------------------------------------------------------------ - // check if we can find its mount point - - // mountinfo_find() can be called with NULL disk_mountinfo_root - struct mountinfo *mi = mountinfo_find(disk_mountinfo_root, d->major, d->minor); - if(unlikely(!mi)) { - // mountinfo_free_all can be called with NULL - mountinfo_free_all(disk_mountinfo_root); - disk_mountinfo_root = mountinfo_read(0); - mi = mountinfo_find(disk_mountinfo_root, d->major, d->minor); - } - - if(unlikely(mi)) - d->mount_point = strdupz(mi->mount_point); - else - d->mount_point = NULL; - - // ------------------------------------------------------------------------ - // find the disk sector size - - /* - * sector size is always 512 bytes inside the kernel #3481 - * - { - char tf[FILENAME_MAX + 1], *t; - strncpyz(tf, d->device, FILENAME_MAX); - - // replace all / with ! - for(t = tf; *t ;t++) - if(unlikely(*t == '/')) *t = '!'; - - if(likely(d->type == DISK_TYPE_PARTITION)) - snprintfz(buffer, FILENAME_MAX, path_to_get_hw_sector_size_partitions, d->major, d->minor, tf); - else - snprintfz(buffer, FILENAME_MAX, path_to_get_hw_sector_size, tf); - - FILE *fpss = fopen(buffer, "r"); - if(likely(fpss)) { - char buffer2[1024 + 1]; - char *tmp = fgets(buffer2, 1024, fpss); - - if(likely(tmp)) { - d->sector_size = str2i(tmp); - if(unlikely(d->sector_size <= 0)) { - error("Invalid sector size %d for device %s in %s. Assuming 512.", d->sector_size, d->device, buffer); - d->sector_size = 512; - } - } - else error("Cannot read data for sector size for device %s from %s. Assuming 512.", d->device, buffer); - - fclose(fpss); - } - else error("Cannot read sector size for device %s from %s. Assuming 512.", d->device, buffer); - } - */ - - // ------------------------------------------------------------------------ - // check if the device is a bcache - - struct stat bcache; - snprintfz(buffer, FILENAME_MAX, path_to_sys_block_device_bcache, disk); - if(unlikely(stat(buffer, &bcache) == 0 && (bcache.st_mode & S_IFMT) == S_IFDIR)) { - // we have the 'bcache' directory - d->device_is_bcache = 1; - - char buffer2[FILENAME_MAX + 1]; - - snprintfz(buffer2, FILENAME_MAX, "%s/cache/congested", buffer); - if(access(buffer2, R_OK) == 0) - d->bcache_filename_cache_congested = strdupz(buffer2); - else - error("bcache file '%s' cannot be read.", buffer2); - - snprintfz(buffer2, FILENAME_MAX, "%s/readahead", buffer); - if(access(buffer2, R_OK) == 0) - d->bcache_filename_stats_total_cache_readaheads = strdupz(buffer2); - else - error("bcache file '%s' cannot be read.", buffer2); - - snprintfz(buffer2, FILENAME_MAX, "%s/dirty_data", buffer); - if(access(buffer2, R_OK) == 0) - d->bcache_filename_dirty_data = strdupz(buffer2); - else - error("bcache file '%s' cannot be read.", buffer2); - - snprintfz(buffer2, FILENAME_MAX, "%s/writeback_rate", buffer); - if(access(buffer2, R_OK) == 0) - d->bcache_filename_writeback_rate = strdupz(buffer2); - else - error("bcache file '%s' cannot be read.", buffer2); - - snprintfz(buffer2, FILENAME_MAX, "%s/cache/cache_available_percent", buffer); - if(access(buffer2, R_OK) == 0) - d->bcache_filename_cache_available_percent = strdupz(buffer2); - else - error("bcache file '%s' cannot be read.", buffer2); - - snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_hits", buffer); - if(access(buffer2, R_OK) == 0) - d->bcache_filename_stats_total_cache_hits = strdupz(buffer2); - else - error("bcache file '%s' cannot be read.", buffer2); - - snprintfz(buffer2, FILENAME_MAX, "%s/stats_five_minute/cache_hit_ratio", buffer); - if(access(buffer2, R_OK) == 0) - d->bcache_filename_stats_five_minute_cache_hit_ratio = strdupz(buffer2); - else - error("bcache file '%s' cannot be read.", buffer2); - - snprintfz(buffer2, FILENAME_MAX, "%s/stats_hour/cache_hit_ratio", buffer); - if(access(buffer2, R_OK) == 0) - d->bcache_filename_stats_hour_cache_hit_ratio = strdupz(buffer2); - else - error("bcache file '%s' cannot be read.", buffer2); - - snprintfz(buffer2, FILENAME_MAX, "%s/stats_day/cache_hit_ratio", buffer); - if(access(buffer2, R_OK) == 0) - d->bcache_filename_stats_day_cache_hit_ratio = strdupz(buffer2); - else - error("bcache file '%s' cannot be read.", buffer2); - - snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_hit_ratio", buffer); - if(access(buffer2, R_OK) == 0) - d->bcache_filename_stats_total_cache_hit_ratio = strdupz(buffer2); - else - error("bcache file '%s' cannot be read.", buffer2); - - snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_misses", buffer); - if(access(buffer2, R_OK) == 0) - d->bcache_filename_stats_total_cache_misses = strdupz(buffer2); - else - error("bcache file '%s' cannot be read.", buffer2); - - snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_bypass_hits", buffer); - if(access(buffer2, R_OK) == 0) - d->bcache_filename_stats_total_cache_bypass_hits = strdupz(buffer2); - else - error("bcache file '%s' cannot be read.", buffer2); - - snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_bypass_misses", buffer); - if(access(buffer2, R_OK) == 0) - d->bcache_filename_stats_total_cache_bypass_misses = strdupz(buffer2); - else - error("bcache file '%s' cannot be read.", buffer2); - - snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_miss_collisions", buffer); - if(access(buffer2, R_OK) == 0) - d->bcache_filename_stats_total_cache_miss_collisions = strdupz(buffer2); - else - error("bcache file '%s' cannot be read.", buffer2); - } - - get_disk_config(d); - return d; -} - -int do_proc_diskstats(int update_every, usec_t dt) { - static procfile *ff = NULL; - - if(unlikely(!globals_initialized)) { - globals_initialized = 1; - - global_enable_new_disks_detected_at_runtime = config_get_boolean(CONFIG_SECTION_DISKSTATS, "enable new disks detected at runtime", global_enable_new_disks_detected_at_runtime); - global_enable_performance_for_physical_disks = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "performance metrics for physical disks", global_enable_performance_for_physical_disks); - global_enable_performance_for_virtual_disks = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "performance metrics for virtual disks", global_enable_performance_for_virtual_disks); - global_enable_performance_for_partitions = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "performance metrics for partitions", global_enable_performance_for_partitions); - - global_do_io = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "bandwidth for all disks", global_do_io); - global_do_ops = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "operations for all disks", global_do_ops); - global_do_mops = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "merged operations for all disks", global_do_mops); - global_do_iotime = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "i/o time for all disks", global_do_iotime); - global_do_qops = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "queued operations for all disks", global_do_qops); - global_do_util = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "utilization percentage for all disks", global_do_util); - global_do_backlog = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "backlog for all disks", global_do_backlog); - global_do_bcache = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "bcache for all disks", global_do_bcache); - - global_cleanup_removed_disks = config_get_boolean(CONFIG_SECTION_DISKSTATS, "remove charts of removed disks" , global_cleanup_removed_disks); - - char buffer[FILENAME_MAX + 1]; - - snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s"); - path_to_sys_block_device = config_get(CONFIG_SECTION_DISKSTATS, "path to get block device", buffer); - - snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s/bcache"); - path_to_sys_block_device_bcache = config_get(CONFIG_SECTION_DISKSTATS, "path to get block device bcache", buffer); - - snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/virtual/block/%s"); - path_to_sys_devices_virtual_block_device = config_get(CONFIG_SECTION_DISKSTATS, "path to get virtual block device", buffer); - - snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/dev/block/%lu:%lu/%s"); - path_to_sys_dev_block_major_minor_string = config_get(CONFIG_SECTION_DISKSTATS, "path to get block device infos", buffer); - - //snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s/queue/hw_sector_size"); - //path_to_get_hw_sector_size = config_get(CONFIG_SECTION_DISKSTATS, "path to get h/w sector size", buffer); - - //snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/dev/block/%lu:%lu/subsystem/%s/../queue/hw_sector_size"); - //path_to_get_hw_sector_size_partitions = config_get(CONFIG_SECTION_DISKSTATS, "path to get h/w sector size for partitions", buffer); - - snprintfz(buffer, FILENAME_MAX, "%s/dev/mapper", netdata_configured_host_prefix); - path_to_device_mapper = config_get(CONFIG_SECTION_DISKSTATS, "path to device mapper", buffer); - - snprintfz(buffer, FILENAME_MAX, "%s/dev/disk/by-label", netdata_configured_host_prefix); - path_to_device_label = config_get(CONFIG_SECTION_DISKSTATS, "path to /dev/disk/by-label", buffer); - - snprintfz(buffer, FILENAME_MAX, "%s/dev/disk/by-id", netdata_configured_host_prefix); - path_to_device_id = config_get(CONFIG_SECTION_DISKSTATS, "path to /dev/disk/by-id", buffer); - - name_disks_by_id = config_get_boolean(CONFIG_SECTION_DISKSTATS, "name disks by id", name_disks_by_id); - - excluded_disks = simple_pattern_create( - config_get(CONFIG_SECTION_DISKSTATS, "exclude disks", DEFAULT_EXCLUDED_DISKS) - , NULL - , SIMPLE_PATTERN_EXACT - ); - } - - // -------------------------------------------------------------------------- - - if(unlikely(!ff)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/diskstats"); - ff = procfile_open(config_get(CONFIG_SECTION_DISKSTATS, "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT); - } - if(unlikely(!ff)) return 0; - - ff = procfile_readall(ff); - if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time - - size_t lines = procfile_lines(ff), l; - - collected_number system_read_kb = 0, system_write_kb = 0; - - for(l = 0; l < lines ;l++) { - // -------------------------------------------------------------------------- - // Read parameters - - char *disk; - unsigned long major = 0, minor = 0; - - collected_number reads = 0, mreads = 0, readsectors = 0, readms = 0, - writes = 0, mwrites = 0, writesectors = 0, writems = 0, - queued_ios = 0, busy_ms = 0, backlog_ms = 0; - - collected_number last_reads = 0, last_readsectors = 0, last_readms = 0, - last_writes = 0, last_writesectors = 0, last_writems = 0, - last_busy_ms = 0; - - size_t words = procfile_linewords(ff, l); - if(unlikely(words < 14)) continue; - - major = str2ul(procfile_lineword(ff, l, 0)); - minor = str2ul(procfile_lineword(ff, l, 1)); - disk = procfile_lineword(ff, l, 2); - - // # of reads completed # of writes completed - // This is the total number of reads or writes completed successfully. - reads = str2ull(procfile_lineword(ff, l, 3)); // rd_ios - writes = str2ull(procfile_lineword(ff, l, 7)); // wr_ios - - // # of reads merged # of writes merged - // Reads and writes which are adjacent to each other may be merged for - // efficiency. Thus two 4K reads may become one 8K read before it is - // ultimately handed to the disk, and so it will be counted (and queued) - mreads = str2ull(procfile_lineword(ff, l, 4)); // rd_merges_or_rd_sec - mwrites = str2ull(procfile_lineword(ff, l, 8)); // wr_merges - - // # of sectors read # of sectors written - // This is the total number of sectors read or written successfully. - readsectors = str2ull(procfile_lineword(ff, l, 5)); // rd_sec_or_wr_ios - writesectors = str2ull(procfile_lineword(ff, l, 9)); // wr_sec - - // # of milliseconds spent reading # of milliseconds spent writing - // This is the total number of milliseconds spent by all reads or writes (as - // measured from __make_request() to end_that_request_last()). - readms = str2ull(procfile_lineword(ff, l, 6)); // rd_ticks_or_wr_sec - writems = str2ull(procfile_lineword(ff, l, 10)); // wr_ticks - - // # of I/Os currently in progress - // The only field that should go to zero. Incremented as requests are - // given to appropriate struct request_queue and decremented as they finish. - queued_ios = str2ull(procfile_lineword(ff, l, 11)); // ios_pgr - - // # of milliseconds spent doing I/Os - // This field increases so long as field queued_ios is nonzero. - busy_ms = str2ull(procfile_lineword(ff, l, 12)); // tot_ticks - - // weighted # of milliseconds spent doing I/Os - // This field is incremented at each I/O start, I/O completion, I/O - // merge, or read of these stats by the number of I/Os in progress - // (field queued_ios) times the number of milliseconds spent doing I/O since the - // last update of this field. This can provide an easy measure of both - // I/O completion time and the backlog that may be accumulating. - backlog_ms = str2ull(procfile_lineword(ff, l, 13)); // rq_ticks - - - // -------------------------------------------------------------------------- - // remove slashes from disk names - char *s; - for(s = disk; *s ;s++) - if(*s == '/') *s = '_'; - - // -------------------------------------------------------------------------- - // get a disk structure for the disk - - struct disk *d = get_disk(major, minor, disk); - d->updated = 1; - - // -------------------------------------------------------------------------- - // count the global system disk I/O of physical disks - - if(unlikely(d->type == DISK_TYPE_PHYSICAL)) { - system_read_kb += readsectors * d->sector_size / 1024; - system_write_kb += writesectors * d->sector_size / 1024; - } - - // -------------------------------------------------------------------------- - // Set its family based on mount point - - char *family = d->mount_point; - if(!family) family = d->disk; - - - // -------------------------------------------------------------------------- - // Do performance metrics - - if(d->do_io == CONFIG_BOOLEAN_YES || (d->do_io == CONFIG_BOOLEAN_AUTO && (readsectors || writesectors))) { - d->do_io = CONFIG_BOOLEAN_YES; - - if(unlikely(!d->st_io)) { - d->st_io = rrdset_create_localhost( - RRD_TYPE_DISK - , d->device - , d->disk - , family - , "disk.io" - , "Disk I/O Bandwidth" - , "kilobytes/s" - , "proc" - , "diskstats" - , 2000 - , update_every - , RRDSET_TYPE_AREA - ); - - d->rd_io_reads = rrddim_add(d->st_io, "reads", NULL, d->sector_size, 1024, RRD_ALGORITHM_INCREMENTAL); - d->rd_io_writes = rrddim_add(d->st_io, "writes", NULL, d->sector_size * -1, 1024, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(d->st_io); - - last_readsectors = rrddim_set_by_pointer(d->st_io, d->rd_io_reads, readsectors); - last_writesectors = rrddim_set_by_pointer(d->st_io, d->rd_io_writes, writesectors); - rrdset_done(d->st_io); - } - - // -------------------------------------------------------------------- - - if(d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && (reads || writes))) { - d->do_ops = CONFIG_BOOLEAN_YES; - - if(unlikely(!d->st_ops)) { - d->st_ops = rrdset_create_localhost( - "disk_ops" - , d->device - , d->disk - , family - , "disk.ops" - , "Disk Completed I/O Operations" - , "operations/s" - , "proc" - , "diskstats" - , 2001 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(d->st_ops, RRDSET_FLAG_DETAIL); - - d->rd_ops_reads = rrddim_add(d->st_ops, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - d->rd_ops_writes = rrddim_add(d->st_ops, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(d->st_ops); - - last_reads = rrddim_set_by_pointer(d->st_ops, d->rd_ops_reads, reads); - last_writes = rrddim_set_by_pointer(d->st_ops, d->rd_ops_writes, writes); - rrdset_done(d->st_ops); - } - - // -------------------------------------------------------------------- - - if(d->do_qops == CONFIG_BOOLEAN_YES || (d->do_qops == CONFIG_BOOLEAN_AUTO && queued_ios)) { - d->do_qops = CONFIG_BOOLEAN_YES; - - if(unlikely(!d->st_qops)) { - d->st_qops = rrdset_create_localhost( - "disk_qops" - , d->device - , d->disk - , family - , "disk.qops" - , "Disk Current I/O Operations" - , "operations" - , "proc" - , "diskstats" - , 2002 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(d->st_qops, RRDSET_FLAG_DETAIL); - - d->rd_qops_operations = rrddim_add(d->st_qops, "operations", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(d->st_qops); - - rrddim_set_by_pointer(d->st_qops, d->rd_qops_operations, queued_ios); - rrdset_done(d->st_qops); - } - - // -------------------------------------------------------------------- - - if(d->do_backlog == CONFIG_BOOLEAN_YES || (d->do_backlog == CONFIG_BOOLEAN_AUTO && backlog_ms)) { - d->do_backlog = CONFIG_BOOLEAN_YES; - - if(unlikely(!d->st_backlog)) { - d->st_backlog = rrdset_create_localhost( - "disk_backlog" - , d->device - , d->disk - , family - , "disk.backlog" - , "Disk Backlog" - , "backlog (ms)" - , "proc" - , "diskstats" - , 2003 - , update_every - , RRDSET_TYPE_AREA - ); - - rrdset_flag_set(d->st_backlog, RRDSET_FLAG_DETAIL); - - d->rd_backlog_backlog = rrddim_add(d->st_backlog, "backlog", NULL, 1, 10, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(d->st_backlog); - - rrddim_set_by_pointer(d->st_backlog, d->rd_backlog_backlog, backlog_ms); - rrdset_done(d->st_backlog); - } - - // -------------------------------------------------------------------- - - if(d->do_util == CONFIG_BOOLEAN_YES || (d->do_util == CONFIG_BOOLEAN_AUTO && busy_ms)) { - d->do_util = CONFIG_BOOLEAN_YES; - - if(unlikely(!d->st_util)) { - d->st_util = rrdset_create_localhost( - "disk_util" - , d->device - , d->disk - , family - , "disk.util" - , "Disk Utilization Time" - , "% of time working" - , "proc" - , "diskstats" - , 2004 - , update_every - , RRDSET_TYPE_AREA - ); - - rrdset_flag_set(d->st_util, RRDSET_FLAG_DETAIL); - - d->rd_util_utilization = rrddim_add(d->st_util, "utilization", NULL, 1, 10, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(d->st_util); - - last_busy_ms = rrddim_set_by_pointer(d->st_util, d->rd_util_utilization, busy_ms); - rrdset_done(d->st_util); - } - - // -------------------------------------------------------------------- - - if(d->do_mops == CONFIG_BOOLEAN_YES || (d->do_mops == CONFIG_BOOLEAN_AUTO && (mreads || mwrites))) { - d->do_mops = CONFIG_BOOLEAN_YES; - - if(unlikely(!d->st_mops)) { - d->st_mops = rrdset_create_localhost( - "disk_mops" - , d->device - , d->disk - , family - , "disk.mops" - , "Disk Merged Operations" - , "merged operations/s" - , "proc" - , "diskstats" - , 2021 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(d->st_mops, RRDSET_FLAG_DETAIL); - - d->rd_mops_reads = rrddim_add(d->st_mops, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - d->rd_mops_writes = rrddim_add(d->st_mops, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(d->st_mops); - - rrddim_set_by_pointer(d->st_mops, d->rd_mops_reads, mreads); - rrddim_set_by_pointer(d->st_mops, d->rd_mops_writes, mwrites); - rrdset_done(d->st_mops); - } - - // -------------------------------------------------------------------- - - if(d->do_iotime == CONFIG_BOOLEAN_YES || (d->do_iotime == CONFIG_BOOLEAN_AUTO && (readms || writems))) { - d->do_iotime = CONFIG_BOOLEAN_YES; - - if(unlikely(!d->st_iotime)) { - d->st_iotime = rrdset_create_localhost( - "disk_iotime" - , d->device - , d->disk - , family - , "disk.iotime" - , "Disk Total I/O Time" - , "milliseconds/s" - , "proc" - , "diskstats" - , 2022 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(d->st_iotime, RRDSET_FLAG_DETAIL); - - d->rd_iotime_reads = rrddim_add(d->st_iotime, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - d->rd_iotime_writes = rrddim_add(d->st_iotime, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(d->st_iotime); - - last_readms = rrddim_set_by_pointer(d->st_iotime, d->rd_iotime_reads, readms); - last_writems = rrddim_set_by_pointer(d->st_iotime, d->rd_iotime_writes, writems); - rrdset_done(d->st_iotime); - } - - // -------------------------------------------------------------------- - // calculate differential charts - // only if this is not the first time we run - - if(likely(dt)) { - if( (d->do_iotime == CONFIG_BOOLEAN_YES || (d->do_iotime == CONFIG_BOOLEAN_AUTO && (readms || writems))) && - (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && (reads || writes)))) { - - if(unlikely(!d->st_await)) { - d->st_await = rrdset_create_localhost( - "disk_await" - , d->device - , d->disk - , family - , "disk.await" - , "Average Completed I/O Operation Time" - , "ms per operation" - , "proc" - , "diskstats" - , 2005 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(d->st_await, RRDSET_FLAG_DETAIL); - - d->rd_await_reads = rrddim_add(d->st_await, "reads", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - d->rd_await_writes = rrddim_add(d->st_await, "writes", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(d->st_await); - - rrddim_set_by_pointer(d->st_await, d->rd_await_reads, (reads - last_reads) ? (readms - last_readms) / (reads - last_reads) : 0); - rrddim_set_by_pointer(d->st_await, d->rd_await_writes, (writes - last_writes) ? (writems - last_writems) / (writes - last_writes) : 0); - rrdset_done(d->st_await); - } - - if( (d->do_io == CONFIG_BOOLEAN_YES || (d->do_io == CONFIG_BOOLEAN_AUTO && (readsectors || writesectors))) && - (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && (reads || writes)))) { - - if(unlikely(!d->st_avgsz)) { - d->st_avgsz = rrdset_create_localhost( - "disk_avgsz" - , d->device - , d->disk - , family - , "disk.avgsz" - , "Average Completed I/O Operation Bandwidth" - , "kilobytes per operation" - , "proc" - , "diskstats" - , 2006 - , update_every - , RRDSET_TYPE_AREA - ); - - rrdset_flag_set(d->st_avgsz, RRDSET_FLAG_DETAIL); - - d->rd_avgsz_reads = rrddim_add(d->st_avgsz, "reads", NULL, d->sector_size, 1024, RRD_ALGORITHM_ABSOLUTE); - d->rd_avgsz_writes = rrddim_add(d->st_avgsz, "writes", NULL, d->sector_size * -1, 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(d->st_avgsz); - - rrddim_set_by_pointer(d->st_avgsz, d->rd_avgsz_reads, (reads - last_reads) ? (readsectors - last_readsectors) / (reads - last_reads) : 0); - rrddim_set_by_pointer(d->st_avgsz, d->rd_avgsz_writes, (writes - last_writes) ? (writesectors - last_writesectors) / (writes - last_writes) : 0); - rrdset_done(d->st_avgsz); - } - - if( (d->do_util == CONFIG_BOOLEAN_YES || (d->do_util == CONFIG_BOOLEAN_AUTO && busy_ms)) && - (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && (reads || writes)))) { - - if(unlikely(!d->st_svctm)) { - d->st_svctm = rrdset_create_localhost( - "disk_svctm" - , d->device - , d->disk - , family - , "disk.svctm" - , "Average Service Time" - , "ms per operation" - , "proc" - , "diskstats" - , 2007 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(d->st_svctm, RRDSET_FLAG_DETAIL); - - d->rd_svctm_svctm = rrddim_add(d->st_svctm, "svctm", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(d->st_svctm); - - rrddim_set_by_pointer(d->st_svctm, d->rd_svctm_svctm, ((reads - last_reads) + (writes - last_writes)) ? (busy_ms - last_busy_ms) / ((reads - last_reads) + (writes - last_writes)) : 0); - rrdset_done(d->st_svctm); - } - } - - // -------------------------------------------------------------------------- - // read bcache metrics and generate the bcache charts - - if(d->device_is_bcache && d->do_bcache != CONFIG_BOOLEAN_NO) { - unsigned long long int - stats_total_cache_bypass_hits = 0, - stats_total_cache_bypass_misses = 0, - stats_total_cache_hits = 0, - stats_total_cache_miss_collisions = 0, - stats_total_cache_misses = 0, - stats_five_minute_cache_hit_ratio = 0, - stats_hour_cache_hit_ratio = 0, - stats_day_cache_hit_ratio = 0, - stats_total_cache_hit_ratio = 0, - cache_available_percent = 0, - cache_readaheads = 0, - cache_congested = 0, - dirty_data = 0, - writeback_rate = 0; - - // read the bcache values - - if(d->bcache_filename_dirty_data) - dirty_data = bcache_read_number_with_units(d->bcache_filename_dirty_data); - - if(d->bcache_filename_writeback_rate) - writeback_rate = bcache_read_number_with_units(d->bcache_filename_writeback_rate); - - if(d->bcache_filename_cache_congested) - cache_congested = bcache_read_number_with_units(d->bcache_filename_cache_congested); - - if(d->bcache_filename_cache_available_percent) - read_single_number_file(d->bcache_filename_cache_available_percent, &cache_available_percent); - - if(d->bcache_filename_stats_five_minute_cache_hit_ratio) - read_single_number_file(d->bcache_filename_stats_five_minute_cache_hit_ratio, &stats_five_minute_cache_hit_ratio); - - if(d->bcache_filename_stats_hour_cache_hit_ratio) - read_single_number_file(d->bcache_filename_stats_hour_cache_hit_ratio, &stats_hour_cache_hit_ratio); - - if(d->bcache_filename_stats_day_cache_hit_ratio) - read_single_number_file(d->bcache_filename_stats_day_cache_hit_ratio, &stats_day_cache_hit_ratio); - - if(d->bcache_filename_stats_total_cache_hit_ratio) - read_single_number_file(d->bcache_filename_stats_total_cache_hit_ratio, &stats_total_cache_hit_ratio); - - if(d->bcache_filename_stats_total_cache_hits) - read_single_number_file(d->bcache_filename_stats_total_cache_hits, &stats_total_cache_hits); - - if(d->bcache_filename_stats_total_cache_misses) - read_single_number_file(d->bcache_filename_stats_total_cache_misses, &stats_total_cache_misses); - - if(d->bcache_filename_stats_total_cache_miss_collisions) - read_single_number_file(d->bcache_filename_stats_total_cache_miss_collisions, &stats_total_cache_miss_collisions); - - if(d->bcache_filename_stats_total_cache_bypass_hits) - read_single_number_file(d->bcache_filename_stats_total_cache_bypass_hits, &stats_total_cache_bypass_hits); - - if(d->bcache_filename_stats_total_cache_bypass_misses) - read_single_number_file(d->bcache_filename_stats_total_cache_bypass_misses, &stats_total_cache_bypass_misses); - - if(d->bcache_filename_stats_total_cache_readaheads) - cache_readaheads = bcache_read_number_with_units(d->bcache_filename_stats_total_cache_readaheads); - - - // update the charts - - { - - if(unlikely(!d->st_bcache_hit_ratio)) { - d->st_bcache_hit_ratio = rrdset_create_localhost( - "disk_bcache_hit_ratio" - , d->device - , d->disk - , family - , "disk.bcache_hit_ratio" - , "BCache Cache Hit Ratio" - , "percentage" - , "proc" - , "diskstats" - , 2120 - , update_every - , RRDSET_TYPE_LINE - ); - - d->rd_bcache_hit_ratio_5min = rrddim_add(d->st_bcache_hit_ratio, "5min", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - d->rd_bcache_hit_ratio_1hour = rrddim_add(d->st_bcache_hit_ratio, "1hour", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - d->rd_bcache_hit_ratio_1day = rrddim_add(d->st_bcache_hit_ratio, "1day", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - d->rd_bcache_hit_ratio_total = rrddim_add(d->st_bcache_hit_ratio, "ever", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(d->st_bcache_hit_ratio); - - rrddim_set_by_pointer(d->st_bcache_hit_ratio, d->rd_bcache_hit_ratio_5min, stats_five_minute_cache_hit_ratio); - rrddim_set_by_pointer(d->st_bcache_hit_ratio, d->rd_bcache_hit_ratio_1hour, stats_hour_cache_hit_ratio); - rrddim_set_by_pointer(d->st_bcache_hit_ratio, d->rd_bcache_hit_ratio_1day, stats_day_cache_hit_ratio); - rrddim_set_by_pointer(d->st_bcache_hit_ratio, d->rd_bcache_hit_ratio_total, stats_total_cache_hit_ratio); - rrdset_done(d->st_bcache_hit_ratio); - } - - { - - if(unlikely(!d->st_bcache_rates)) { - d->st_bcache_rates = rrdset_create_localhost( - "disk_bcache_rates" - , d->device - , d->disk - , family - , "disk.bcache_rates" - , "BCache Rates" - , "KB/s" - , "proc" - , "diskstats" - , 2121 - , update_every - , RRDSET_TYPE_AREA - ); - - d->rd_bcache_rate_congested = rrddim_add(d->st_bcache_rates, "congested", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - d->rd_bcache_rate_writeback = rrddim_add(d->st_bcache_rates, "writeback", NULL, -1, 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(d->st_bcache_rates); - - rrddim_set_by_pointer(d->st_bcache_rates, d->rd_bcache_rate_writeback, writeback_rate); - rrddim_set_by_pointer(d->st_bcache_rates, d->rd_bcache_rate_congested, cache_congested); - rrdset_done(d->st_bcache_rates); - } - - { - if(unlikely(!d->st_bcache_size)) { - d->st_bcache_size = rrdset_create_localhost( - "disk_bcache_size" - , d->device - , d->disk - , family - , "disk.bcache_size" - , "BCache Cache Sizes" - , "MB" - , "proc" - , "diskstats" - , 2122 - , update_every - , RRDSET_TYPE_AREA - ); - - d->rd_bcache_dirty_size = rrddim_add(d->st_bcache_size, "dirty", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(d->st_bcache_size); - - rrddim_set_by_pointer(d->st_bcache_size, d->rd_bcache_dirty_size, dirty_data); - rrdset_done(d->st_bcache_size); - } - - { - if(unlikely(!d->st_bcache_usage)) { - d->st_bcache_usage = rrdset_create_localhost( - "disk_bcache_usage" - , d->device - , d->disk - , family - , "disk.bcache_usage" - , "BCache Cache Usage" - , "percent" - , "proc" - , "diskstats" - , 2123 - , update_every - , RRDSET_TYPE_AREA - ); - - d->rd_bcache_available_percent = rrddim_add(d->st_bcache_usage, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(d->st_bcache_usage); - - rrddim_set_by_pointer(d->st_bcache_usage, d->rd_bcache_available_percent, cache_available_percent); - rrdset_done(d->st_bcache_usage); - } - - if(d->do_bcache == CONFIG_BOOLEAN_YES || (d->do_bcache == CONFIG_BOOLEAN_AUTO && (stats_total_cache_hits != 0 || stats_total_cache_misses != 0 || stats_total_cache_miss_collisions != 0))) { - - if(unlikely(!d->st_bcache)) { - d->st_bcache = rrdset_create_localhost( - "disk_bcache" - , d->device - , d->disk - , family - , "disk.bcache" - , "BCache Cache I/O Operations" - , "operations/s" - , "proc" - , "diskstats" - , 2124 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(d->st_bcache, RRDSET_FLAG_DETAIL); - - d->rd_bcache_hits = rrddim_add(d->st_bcache, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - d->rd_bcache_misses = rrddim_add(d->st_bcache, "misses", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - d->rd_bcache_miss_collisions = rrddim_add(d->st_bcache, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - d->rd_bcache_readaheads = rrddim_add(d->st_bcache, "readaheads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(d->st_bcache); - - rrddim_set_by_pointer(d->st_bcache, d->rd_bcache_hits, stats_total_cache_hits); - rrddim_set_by_pointer(d->st_bcache, d->rd_bcache_misses, stats_total_cache_misses); - rrddim_set_by_pointer(d->st_bcache, d->rd_bcache_miss_collisions, stats_total_cache_miss_collisions); - rrddim_set_by_pointer(d->st_bcache, d->rd_bcache_readaheads, cache_readaheads); - rrdset_done(d->st_bcache); - } - - if(d->do_bcache == CONFIG_BOOLEAN_YES || (d->do_bcache == CONFIG_BOOLEAN_AUTO && (stats_total_cache_bypass_hits != 0 || stats_total_cache_bypass_misses != 0))) { - - if(unlikely(!d->st_bcache_bypass)) { - d->st_bcache_bypass = rrdset_create_localhost( - "disk_bcache_bypass" - , d->device - , d->disk - , family - , "disk.bcache_bypass" - , "BCache Cache Bypass I/O Operations" - , "operations/s" - , "proc" - , "diskstats" - , 2125 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(d->st_bcache_bypass, RRDSET_FLAG_DETAIL); - - d->rd_bcache_bypass_hits = rrddim_add(d->st_bcache_bypass, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - d->rd_bcache_bypass_misses = rrddim_add(d->st_bcache_bypass, "misses", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(d->st_bcache_bypass); - - rrddim_set_by_pointer(d->st_bcache_bypass, d->rd_bcache_bypass_hits, stats_total_cache_bypass_hits); - rrddim_set_by_pointer(d->st_bcache_bypass, d->rd_bcache_bypass_misses, stats_total_cache_bypass_misses); - rrdset_done(d->st_bcache_bypass); - } - } - } - - - // ------------------------------------------------------------------------ - // update the system total I/O - - if(global_do_io == CONFIG_BOOLEAN_YES || (global_do_io == CONFIG_BOOLEAN_AUTO && (system_read_kb || system_write_kb))) { - static RRDSET *st_io = NULL; - static RRDDIM *rd_in = NULL, *rd_out = NULL; - - if(unlikely(!st_io)) { - st_io = rrdset_create_localhost( - "system" - , "io" - , NULL - , "disk" - , NULL - , "Disk I/O" - , "kilobytes/s" - , "proc" - , "diskstats" - , 150 - , update_every - , RRDSET_TYPE_AREA - ); - - rd_in = rrddim_add(st_io, "in", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out = rrddim_add(st_io, "out", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st_io); - - rrddim_set_by_pointer(st_io, rd_in, system_read_kb); - rrddim_set_by_pointer(st_io, rd_out, system_write_kb); - rrdset_done(st_io); - } - - - // ------------------------------------------------------------------------ - // cleanup removed disks - - struct disk *d = disk_root, *last = NULL; - while(d) { - if(unlikely(global_cleanup_removed_disks && !d->updated)) { - struct disk *t = d; - - rrdset_obsolete_and_pointer_null(d->st_avgsz); - rrdset_obsolete_and_pointer_null(d->st_await); - rrdset_obsolete_and_pointer_null(d->st_backlog); - rrdset_obsolete_and_pointer_null(d->st_io); - rrdset_obsolete_and_pointer_null(d->st_iotime); - rrdset_obsolete_and_pointer_null(d->st_mops); - rrdset_obsolete_and_pointer_null(d->st_ops); - rrdset_obsolete_and_pointer_null(d->st_qops); - rrdset_obsolete_and_pointer_null(d->st_svctm); - rrdset_obsolete_and_pointer_null(d->st_util); - rrdset_obsolete_and_pointer_null(d->st_bcache); - rrdset_obsolete_and_pointer_null(d->st_bcache_bypass); - rrdset_obsolete_and_pointer_null(d->st_bcache_rates); - rrdset_obsolete_and_pointer_null(d->st_bcache_size); - rrdset_obsolete_and_pointer_null(d->st_bcache_usage); - rrdset_obsolete_and_pointer_null(d->st_bcache_hit_ratio); - - if(d == disk_root) { - disk_root = d = d->next; - last = NULL; - } - else if(last) { - last->next = d = d->next; - } - - freez(t->bcache_filename_dirty_data); - freez(t->bcache_filename_writeback_rate); - freez(t->bcache_filename_cache_congested); - freez(t->bcache_filename_cache_available_percent); - freez(t->bcache_filename_stats_five_minute_cache_hit_ratio); - freez(t->bcache_filename_stats_hour_cache_hit_ratio); - freez(t->bcache_filename_stats_day_cache_hit_ratio); - freez(t->bcache_filename_stats_total_cache_hit_ratio); - freez(t->bcache_filename_stats_total_cache_hits); - freez(t->bcache_filename_stats_total_cache_misses); - freez(t->bcache_filename_stats_total_cache_miss_collisions); - freez(t->bcache_filename_stats_total_cache_bypass_hits); - freez(t->bcache_filename_stats_total_cache_bypass_misses); - freez(t->bcache_filename_stats_total_cache_readaheads); - - freez(t->disk); - freez(t->device); - freez(t->mount_point); - freez(t); - } - else { - d->updated = 0; - last = d; - d = d->next; - } - } - - return 0; -} diff --git a/src/proc_interrupts.c b/src/proc_interrupts.c deleted file mode 100644 index 867f39eb2..000000000 --- a/src/proc_interrupts.c +++ /dev/null @@ -1,253 +0,0 @@ -#include "common.h" - -#define MAX_INTERRUPT_NAME 50 - -struct cpu_interrupt { - unsigned long long value; - RRDDIM *rd; -}; - -struct interrupt { - int used; - char *id; - char name[MAX_INTERRUPT_NAME + 1]; - RRDDIM *rd; - unsigned long long total; - struct cpu_interrupt cpu[]; -}; - -// since each interrupt is variable in size -// we use this to calculate its record size -#define recordsize(cpus) (sizeof(struct interrupt) + ((cpus) * sizeof(struct cpu_interrupt))) - -// given a base, get a pointer to each record -#define irrindex(base, line, cpus) ((struct interrupt *)&((char *)(base))[(line) * recordsize(cpus)]) - -static inline struct interrupt *get_interrupts_array(size_t lines, int cpus) { - static struct interrupt *irrs = NULL; - static size_t allocated = 0; - - if(unlikely(lines != allocated)) { - size_t l; - int c; - - irrs = (struct interrupt *)reallocz(irrs, lines * recordsize(cpus)); - - // reset all interrupt RRDDIM pointers as any line could have shifted - for(l = 0; l < lines ;l++) { - struct interrupt *irr = irrindex(irrs, l, cpus); - irr->rd = NULL; - irr->name[0] = '\0'; - for(c = 0; c < cpus ;c++) - irr->cpu[c].rd = NULL; - } - - allocated = lines; - } - - return irrs; -} - -int do_proc_interrupts(int update_every, usec_t dt) { - (void)dt; - static procfile *ff = NULL; - static int cpus = -1, do_per_core = -1; - struct interrupt *irrs = NULL; - - if(unlikely(do_per_core == -1)) - do_per_core = config_get_boolean("plugin:proc:/proc/interrupts", "interrupts per core", 1); - - if(unlikely(!ff)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/interrupts"); - ff = procfile_open(config_get("plugin:proc:/proc/interrupts", "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT); - } - if(unlikely(!ff)) - return 1; - - ff = procfile_readall(ff); - if(unlikely(!ff)) - return 0; // we return 0, so that we will retry to open it next time - - size_t lines = procfile_lines(ff), l; - size_t words = procfile_linewords(ff, 0); - - if(unlikely(!lines)) { - error("Cannot read /proc/interrupts, zero lines reported."); - return 1; - } - - // find how many CPUs are there - if(unlikely(cpus == -1)) { - uint32_t w; - cpus = 0; - for(w = 0; w < words ; w++) { - if(likely(strncmp(procfile_lineword(ff, 0, w), "CPU", 3) == 0)) - cpus++; - } - } - - if(unlikely(!cpus)) { - error("PLUGIN: PROC_INTERRUPTS: Cannot find the number of CPUs in /proc/interrupts"); - return 1; - } - - // allocate the size we need; - irrs = get_interrupts_array(lines, cpus); - irrs[0].used = 0; - - // loop through all lines - for(l = 1; l < lines ;l++) { - struct interrupt *irr = irrindex(irrs, l, cpus); - irr->used = 0; - irr->total = 0; - - words = procfile_linewords(ff, l); - if(unlikely(!words)) continue; - - irr->id = procfile_lineword(ff, l, 0); - if(unlikely(!irr->id || !irr->id[0])) continue; - - size_t idlen = strlen(irr->id); - if(unlikely(idlen && irr->id[idlen - 1] == ':')) - irr->id[idlen - 1] = '\0'; - - int c; - for(c = 0; c < cpus ;c++) { - if(likely((c + 1) < (int)words)) - irr->cpu[c].value = str2ull(procfile_lineword(ff, l, (uint32_t)(c + 1))); - else - irr->cpu[c].value = 0; - - irr->total += irr->cpu[c].value; - } - - if(unlikely(isdigit(irr->id[0]) && (uint32_t)(cpus + 2) < words)) { - strncpyz(irr->name, procfile_lineword(ff, l, words - 1), MAX_INTERRUPT_NAME); - size_t nlen = strlen(irr->name); - idlen = strlen(irr->id); - if(likely(nlen + 1 + idlen <= MAX_INTERRUPT_NAME)) { - irr->name[nlen] = '_'; - strncpyz(&irr->name[nlen + 1], irr->id, MAX_INTERRUPT_NAME - nlen - 1); - } - else { - irr->name[MAX_INTERRUPT_NAME - idlen - 1] = '_'; - strncpyz(&irr->name[MAX_INTERRUPT_NAME - idlen], irr->id, idlen); - } - } - else { - strncpyz(irr->name, irr->id, MAX_INTERRUPT_NAME); - } - - irr->used = 1; - } - - // -------------------------------------------------------------------- - - static RRDSET *st_system_interrupts = NULL; - if(unlikely(!st_system_interrupts)) - st_system_interrupts = rrdset_create_localhost( - "system" - , "interrupts" - , NULL - , "interrupts" - , NULL - , "System interrupts" - , "interrupts/s" - , "proc" - , "interrupts" - , 1000 - , update_every - , RRDSET_TYPE_STACKED - ); - else - rrdset_next(st_system_interrupts); - - for(l = 0; l < lines ;l++) { - struct interrupt *irr = irrindex(irrs, l, cpus); - if(unlikely(!irr->used)) continue; - - // some interrupt may have changed without changing the total number of lines - // if the same number of interrupts have been added and removed between two - // calls of this function. - if(unlikely(!irr->rd || strncmp(irr->rd->name, irr->name, MAX_INTERRUPT_NAME) != 0)) { - irr->rd = rrddim_find(st_system_interrupts, irr->id); - - if(unlikely(!irr->rd)) - irr->rd = rrddim_add(st_system_interrupts, irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL); - else - rrddim_set_name(st_system_interrupts, irr->rd, irr->name); - - // also reset per cpu RRDDIMs to avoid repeating strncmp() in the per core loop - if(likely(do_per_core)) { - int c; - for (c = 0; c < cpus ;c++) irr->cpu[c].rd = NULL; - } - } - - rrddim_set_by_pointer(st_system_interrupts, irr->rd, irr->total); - } - - rrdset_done(st_system_interrupts); - - // -------------------------------------------------------------------- - - if(likely(do_per_core)) { - static RRDSET **core_st = NULL; - static int old_cpus = 0; - - if(old_cpus < cpus) { - core_st = reallocz(core_st, sizeof(RRDSET *) * cpus); - memset(&core_st[old_cpus], 0, sizeof(RRDSET *) * (cpus - old_cpus)); - old_cpus = cpus; - } - - int c; - - for(c = 0; c < cpus ;c++) { - if(unlikely(!core_st[c])) { - char id[50+1]; - snprintfz(id, 50, "cpu%d_interrupts", c); - - char title[100+1]; - snprintfz(title, 100, "CPU%d Interrupts", c); - core_st[c] = rrdset_create_localhost( - "cpu" - , id - , NULL - , "interrupts" - , "cpu.interrupts" - , title - , "interrupts/s" - , "proc" - , "interrupts" - , 1100 + c - , update_every - , RRDSET_TYPE_STACKED - ); - } - else rrdset_next(core_st[c]); - - for(l = 0; l < lines ;l++) { - struct interrupt *irr = irrindex(irrs, l, cpus); - - if(unlikely(!irr->used)) continue; - - if(unlikely(!irr->cpu[c].rd)) { - irr->cpu[c].rd = rrddim_find(core_st[c], irr->id); - - if(unlikely(!irr->cpu[c].rd)) - irr->cpu[c].rd = rrddim_add(core_st[c], irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL); - else - rrddim_set_name(core_st[c], irr->cpu[c].rd, irr->name); - } - - rrddim_set_by_pointer(core_st[c], irr->cpu[c].rd, irr->cpu[c].value); - } - - rrdset_done(core_st[c]); - } - } - - return 0; -} diff --git a/src/proc_loadavg.c b/src/proc_loadavg.c deleted file mode 100644 index 868f7d50a..000000000 --- a/src/proc_loadavg.c +++ /dev/null @@ -1,119 +0,0 @@ -#include "common.h" - -// linux calculates this once every 5 seconds -#define MIN_LOADAVG_UPDATE_EVERY 5 - -int do_proc_loadavg(int update_every, usec_t dt) { - static procfile *ff = NULL; - static int do_loadavg = -1, do_all_processes = -1; - static usec_t next_loadavg_dt = 0; - - if(unlikely(!ff)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/loadavg"); - - ff = procfile_open(config_get("plugin:proc:/proc/loadavg", "filename to monitor", filename), " \t,:|/", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) - return 1; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) - return 0; // we return 0, so that we will retry to open it next time - - if(unlikely(do_loadavg == -1)) { - do_loadavg = config_get_boolean("plugin:proc:/proc/loadavg", "enable load average", 1); - do_all_processes = config_get_boolean("plugin:proc:/proc/loadavg", "enable total processes", 1); - } - - if(unlikely(procfile_lines(ff) < 1)) { - error("/proc/loadavg has no lines."); - return 1; - } - if(unlikely(procfile_linewords(ff, 0) < 6)) { - error("/proc/loadavg has less than 6 words in it."); - return 1; - } - - double load1 = strtod(procfile_lineword(ff, 0, 0), NULL); - double load5 = strtod(procfile_lineword(ff, 0, 1), NULL); - double load15 = strtod(procfile_lineword(ff, 0, 2), NULL); - - //unsigned long long running_processes = str2ull(procfile_lineword(ff, 0, 3)); - unsigned long long active_processes = str2ull(procfile_lineword(ff, 0, 4)); - //unsigned long long next_pid = str2ull(procfile_lineword(ff, 0, 5)); - - - // -------------------------------------------------------------------- - - if(next_loadavg_dt <= dt) { - if(likely(do_loadavg)) { - static RRDSET *load_chart = NULL; - static RRDDIM *rd_load1 = NULL, *rd_load5 = NULL, *rd_load15 = NULL; - - if(unlikely(!load_chart)) { - load_chart = rrdset_create_localhost( - "system" - , "load" - , NULL - , "load" - , NULL - , "System Load Average" - , "load" - , "proc" - , "loadavg" - , 100 - , (update_every < MIN_LOADAVG_UPDATE_EVERY) ? MIN_LOADAVG_UPDATE_EVERY : update_every - , RRDSET_TYPE_LINE - ); - - rd_load1 = rrddim_add(load_chart, "load1", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_load5 = rrddim_add(load_chart, "load5", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_load15 = rrddim_add(load_chart, "load15", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - } - else - rrdset_next(load_chart); - - rrddim_set_by_pointer(load_chart, rd_load1, (collected_number) (load1 * 1000)); - rrddim_set_by_pointer(load_chart, rd_load5, (collected_number) (load5 * 1000)); - rrddim_set_by_pointer(load_chart, rd_load15, (collected_number) (load15 * 1000)); - rrdset_done(load_chart); - - next_loadavg_dt = load_chart->update_every * USEC_PER_SEC; - } - else next_loadavg_dt = MIN_LOADAVG_UPDATE_EVERY * USEC_PER_SEC; - } - else next_loadavg_dt -= dt; - - // -------------------------------------------------------------------- - - if(likely(do_all_processes)) { - static RRDSET *processes_chart = NULL; - static RRDDIM *rd_active = NULL; - - if(unlikely(!processes_chart)) { - processes_chart = rrdset_create_localhost( - "system" - , "active_processes" - , NULL - , "processes" - , NULL - , "System Active Processes" - , "processes" - , "proc" - , "loadavg" - , 750 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_active = rrddim_add(processes_chart, "active", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(processes_chart); - - rrddim_set_by_pointer(processes_chart, rd_active, active_processes); - rrdset_done(processes_chart); - } - - return 0; -} diff --git a/src/proc_meminfo.c b/src/proc_meminfo.c deleted file mode 100644 index 3915bf0e9..000000000 --- a/src/proc_meminfo.c +++ /dev/null @@ -1,514 +0,0 @@ -#include "common.h" - -int do_proc_meminfo(int update_every, usec_t dt) { - (void)dt; - - static procfile *ff = NULL; - static int do_ram = -1, do_swap = -1, do_hwcorrupt = -1, do_committed = -1, do_writeback = -1, do_kernel = -1, do_slab = -1, do_hugepages = -1, do_transparent_hugepages = -1; - - static ARL_BASE *arl_base = NULL; - static ARL_ENTRY *arl_hwcorrupted = NULL, *arl_memavailable = NULL; - - static unsigned long long - MemTotal = 0, - MemFree = 0, - MemAvailable = 0, - Buffers = 0, - Cached = 0, - //SwapCached = 0, - //Active = 0, - //Inactive = 0, - //ActiveAnon = 0, - //InactiveAnon = 0, - //ActiveFile = 0, - //InactiveFile = 0, - //Unevictable = 0, - //Mlocked = 0, - SwapTotal = 0, - SwapFree = 0, - Dirty = 0, - Writeback = 0, - //AnonPages = 0, - //Mapped = 0, - //Shmem = 0, - Slab = 0, - SReclaimable = 0, - SUnreclaim = 0, - KernelStack = 0, - PageTables = 0, - NFS_Unstable = 0, - Bounce = 0, - WritebackTmp = 0, - //CommitLimit = 0, - Committed_AS = 0, - //VmallocTotal = 0, - VmallocUsed = 0, - //VmallocChunk = 0, - AnonHugePages = 0, - ShmemHugePages = 0, - HugePages_Total = 0, - HugePages_Free = 0, - HugePages_Rsvd = 0, - HugePages_Surp = 0, - Hugepagesize = 0, - //DirectMap4k = 0, - //DirectMap2M = 0, - HardwareCorrupted = 0; - - if(unlikely(!arl_base)) { - do_ram = config_get_boolean("plugin:proc:/proc/meminfo", "system ram", 1); - do_swap = config_get_boolean_ondemand("plugin:proc:/proc/meminfo", "system swap", CONFIG_BOOLEAN_AUTO); - do_hwcorrupt = config_get_boolean_ondemand("plugin:proc:/proc/meminfo", "hardware corrupted ECC", CONFIG_BOOLEAN_AUTO); - do_committed = config_get_boolean("plugin:proc:/proc/meminfo", "committed memory", 1); - do_writeback = config_get_boolean("plugin:proc:/proc/meminfo", "writeback memory", 1); - do_kernel = config_get_boolean("plugin:proc:/proc/meminfo", "kernel memory", 1); - do_slab = config_get_boolean("plugin:proc:/proc/meminfo", "slab memory", 1); - do_hugepages = config_get_boolean_ondemand("plugin:proc:/proc/meminfo", "hugepages", CONFIG_BOOLEAN_AUTO); - do_transparent_hugepages = config_get_boolean_ondemand("plugin:proc:/proc/meminfo", "transparent hugepages", CONFIG_BOOLEAN_AUTO); - - arl_base = arl_create("meminfo", NULL, 60); - arl_expect(arl_base, "MemTotal", &MemTotal); - arl_expect(arl_base, "MemFree", &MemFree); - arl_memavailable = arl_expect(arl_base, "MemAvailable", &MemAvailable); - arl_expect(arl_base, "Buffers", &Buffers); - arl_expect(arl_base, "Cached", &Cached); - //arl_expect(arl_base, "SwapCached", &SwapCached); - //arl_expect(arl_base, "Active", &Active); - //arl_expect(arl_base, "Inactive", &Inactive); - //arl_expect(arl_base, "ActiveAnon", &ActiveAnon); - //arl_expect(arl_base, "InactiveAnon", &InactiveAnon); - //arl_expect(arl_base, "ActiveFile", &ActiveFile); - //arl_expect(arl_base, "InactiveFile", &InactiveFile); - //arl_expect(arl_base, "Unevictable", &Unevictable); - //arl_expect(arl_base, "Mlocked", &Mlocked); - arl_expect(arl_base, "SwapTotal", &SwapTotal); - arl_expect(arl_base, "SwapFree", &SwapFree); - arl_expect(arl_base, "Dirty", &Dirty); - arl_expect(arl_base, "Writeback", &Writeback); - //arl_expect(arl_base, "AnonPages", &AnonPages); - //arl_expect(arl_base, "Mapped", &Mapped); - //arl_expect(arl_base, "Shmem", &Shmem); - arl_expect(arl_base, "Slab", &Slab); - arl_expect(arl_base, "SReclaimable", &SReclaimable); - arl_expect(arl_base, "SUnreclaim", &SUnreclaim); - arl_expect(arl_base, "KernelStack", &KernelStack); - arl_expect(arl_base, "PageTables", &PageTables); - arl_expect(arl_base, "NFS_Unstable", &NFS_Unstable); - arl_expect(arl_base, "Bounce", &Bounce); - arl_expect(arl_base, "WritebackTmp", &WritebackTmp); - //arl_expect(arl_base, "CommitLimit", &CommitLimit); - arl_expect(arl_base, "Committed_AS", &Committed_AS); - //arl_expect(arl_base, "VmallocTotal", &VmallocTotal); - arl_expect(arl_base, "VmallocUsed", &VmallocUsed); - //arl_expect(arl_base, "VmallocChunk", &VmallocChunk); - arl_hwcorrupted = arl_expect(arl_base, "HardwareCorrupted", &HardwareCorrupted); - arl_expect(arl_base, "AnonHugePages", &AnonHugePages); - arl_expect(arl_base, "ShmemHugePages", &ShmemHugePages); - arl_expect(arl_base, "HugePages_Total", &HugePages_Total); - arl_expect(arl_base, "HugePages_Free", &HugePages_Free); - arl_expect(arl_base, "HugePages_Rsvd", &HugePages_Rsvd); - arl_expect(arl_base, "HugePages_Surp", &HugePages_Surp); - arl_expect(arl_base, "Hugepagesize", &Hugepagesize); - //arl_expect(arl_base, "DirectMap4k", &DirectMap4k); - //arl_expect(arl_base, "DirectMap2M", &DirectMap2M); - } - - if(unlikely(!ff)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/meminfo"); - ff = procfile_open(config_get("plugin:proc:/proc/meminfo", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) - return 1; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) - return 0; // we return 0, so that we will retry to open it next time - - size_t lines = procfile_lines(ff), l; - - arl_begin(arl_base); - - for(l = 0; l < lines ;l++) { - size_t words = procfile_linewords(ff, l); - if(unlikely(words < 2)) continue; - - if(unlikely(arl_check(arl_base, - procfile_lineword(ff, l, 0), - procfile_lineword(ff, l, 1)))) break; - } - - // -------------------------------------------------------------------- - - // http://stackoverflow.com/questions/3019748/how-to-reliably-measure-available-memory-in-linux - unsigned long long MemCached = Cached + Slab; - unsigned long long MemUsed = MemTotal - MemFree - MemCached - Buffers; - - if(do_ram) { - { - static RRDSET *st_system_ram = NULL; - static RRDDIM *rd_free = NULL, *rd_used = NULL, *rd_cached = NULL, *rd_buffers = NULL; - - if(unlikely(!st_system_ram)) { - st_system_ram = rrdset_create_localhost( - "system" - , "ram" - , NULL - , "ram" - , NULL - , "System RAM" - , "MB" - , "proc" - , "meminfo" - , 200 - , update_every - , RRDSET_TYPE_STACKED - ); - - rd_free = rrddim_add(st_system_ram, "free", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - rd_used = rrddim_add(st_system_ram, "used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - rd_cached = rrddim_add(st_system_ram, "cached", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - rd_buffers = rrddim_add(st_system_ram, "buffers", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st_system_ram); - - rrddim_set_by_pointer(st_system_ram, rd_free, MemFree); - rrddim_set_by_pointer(st_system_ram, rd_used, MemUsed); - rrddim_set_by_pointer(st_system_ram, rd_cached, MemCached); - rrddim_set_by_pointer(st_system_ram, rd_buffers, Buffers); - - rrdset_done(st_system_ram); - } - - if(arl_memavailable->flags & ARL_ENTRY_FLAG_FOUND) { - static RRDSET *st_mem_available = NULL; - static RRDDIM *rd_avail = NULL; - - if(unlikely(!st_mem_available)) { - st_mem_available = rrdset_create_localhost( - "mem" - , "available" - , NULL - , "system" - , NULL - , "Available RAM for applications" - , "MB" - , "proc" - , "meminfo" - , NETDATA_CHART_PRIO_MEM_SYSTEM_AVAILABLE - , update_every - , RRDSET_TYPE_AREA - ); - - rd_avail = rrddim_add(st_mem_available, "MemAvailable", "avail", 1, 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st_mem_available); - - rrddim_set_by_pointer(st_mem_available, rd_avail, MemAvailable); - - rrdset_done(st_mem_available); - } - } - - // -------------------------------------------------------------------- - - unsigned long long SwapUsed = SwapTotal - SwapFree; - - if(do_swap == CONFIG_BOOLEAN_YES || SwapTotal || SwapUsed || SwapFree) { - do_swap = CONFIG_BOOLEAN_YES; - - static RRDSET *st_system_swap = NULL; - static RRDDIM *rd_free = NULL, *rd_used = NULL; - - if(unlikely(!st_system_swap)) { - st_system_swap = rrdset_create_localhost( - "system" - , "swap" - , NULL - , "swap" - , NULL - , "System Swap" - , "MB" - , "proc" - , "meminfo" - , 201 - , update_every - , RRDSET_TYPE_STACKED - ); - - rrdset_flag_set(st_system_swap, RRDSET_FLAG_DETAIL); - - rd_free = rrddim_add(st_system_swap, "free", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - rd_used = rrddim_add(st_system_swap, "used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st_system_swap); - - rrddim_set_by_pointer(st_system_swap, rd_used, SwapUsed); - rrddim_set_by_pointer(st_system_swap, rd_free, SwapFree); - - rrdset_done(st_system_swap); - } - - // -------------------------------------------------------------------- - - if(arl_hwcorrupted->flags & ARL_ENTRY_FLAG_FOUND && (do_hwcorrupt == CONFIG_BOOLEAN_YES || (do_hwcorrupt == CONFIG_BOOLEAN_AUTO && HardwareCorrupted > 0))) { - do_hwcorrupt = CONFIG_BOOLEAN_YES; - - static RRDSET *st_mem_hwcorrupt = NULL; - static RRDDIM *rd_corrupted = NULL; - - if(unlikely(!st_mem_hwcorrupt)) { - st_mem_hwcorrupt = rrdset_create_localhost( - "mem" - , "hwcorrupt" - , NULL - , "ecc" - , NULL - , "Corrupted Memory, detected by ECC" - , "MB" - , "proc" - , "meminfo" - , NETDATA_CHART_PRIO_MEM_HW - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st_mem_hwcorrupt, RRDSET_FLAG_DETAIL); - - rd_corrupted = rrddim_add(st_mem_hwcorrupt, "HardwareCorrupted", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st_mem_hwcorrupt); - - rrddim_set_by_pointer(st_mem_hwcorrupt, rd_corrupted, HardwareCorrupted); - - rrdset_done(st_mem_hwcorrupt); - } - - // -------------------------------------------------------------------- - - if(do_committed) { - static RRDSET *st_mem_committed = NULL; - static RRDDIM *rd_committed = NULL; - - if(unlikely(!st_mem_committed)) { - st_mem_committed = rrdset_create_localhost( - "mem" - , "committed" - , NULL - , "system" - , NULL - , "Committed (Allocated) Memory" - , "MB" - , "proc" - , "meminfo" - , NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED - , update_every - , RRDSET_TYPE_AREA - ); - - rrdset_flag_set(st_mem_committed, RRDSET_FLAG_DETAIL); - - rd_committed = rrddim_add(st_mem_committed, "Committed_AS", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st_mem_committed); - - rrddim_set_by_pointer(st_mem_committed, rd_committed, Committed_AS); - - rrdset_done(st_mem_committed); - } - - // -------------------------------------------------------------------- - - if(do_writeback) { - static RRDSET *st_mem_writeback = NULL; - static RRDDIM *rd_dirty = NULL, *rd_writeback = NULL, *rd_fusewriteback = NULL, *rd_nfs_writeback = NULL, *rd_bounce = NULL; - - if(unlikely(!st_mem_writeback)) { - st_mem_writeback = rrdset_create_localhost( - "mem" - , "writeback" - , NULL - , "kernel" - , NULL - , "Writeback Memory" - , "MB" - , "proc" - , "meminfo" - , NETDATA_CHART_PRIO_MEM_KERNEL - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st_mem_writeback, RRDSET_FLAG_DETAIL); - - rd_dirty = rrddim_add(st_mem_writeback, "Dirty", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - rd_writeback = rrddim_add(st_mem_writeback, "Writeback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - rd_fusewriteback = rrddim_add(st_mem_writeback, "FuseWriteback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - rd_nfs_writeback = rrddim_add(st_mem_writeback, "NfsWriteback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - rd_bounce = rrddim_add(st_mem_writeback, "Bounce", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st_mem_writeback); - - rrddim_set_by_pointer(st_mem_writeback, rd_dirty, Dirty); - rrddim_set_by_pointer(st_mem_writeback, rd_writeback, Writeback); - rrddim_set_by_pointer(st_mem_writeback, rd_fusewriteback, WritebackTmp); - rrddim_set_by_pointer(st_mem_writeback, rd_nfs_writeback, NFS_Unstable); - rrddim_set_by_pointer(st_mem_writeback, rd_bounce, Bounce); - - rrdset_done(st_mem_writeback); - } - - // -------------------------------------------------------------------- - - if(do_kernel) { - static RRDSET *st_mem_kernel = NULL; - static RRDDIM *rd_slab = NULL, *rd_kernelstack = NULL, *rd_pagetables = NULL, *rd_vmallocused = NULL; - - if(unlikely(!st_mem_kernel)) { - st_mem_kernel = rrdset_create_localhost( - "mem" - , "kernel" - , NULL - , "kernel" - , NULL - , "Memory Used by Kernel" - , "MB" - , "proc" - , "meminfo" - , NETDATA_CHART_PRIO_MEM_KERNEL + 1 - , update_every - , RRDSET_TYPE_STACKED - ); - - rrdset_flag_set(st_mem_kernel, RRDSET_FLAG_DETAIL); - - rd_slab = rrddim_add(st_mem_kernel, "Slab", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - rd_kernelstack = rrddim_add(st_mem_kernel, "KernelStack", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - rd_pagetables = rrddim_add(st_mem_kernel, "PageTables", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - rd_vmallocused = rrddim_add(st_mem_kernel, "VmallocUsed", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st_mem_kernel); - - rrddim_set_by_pointer(st_mem_kernel, rd_slab, Slab); - rrddim_set_by_pointer(st_mem_kernel, rd_kernelstack, KernelStack); - rrddim_set_by_pointer(st_mem_kernel, rd_pagetables, PageTables); - rrddim_set_by_pointer(st_mem_kernel, rd_vmallocused, VmallocUsed); - - rrdset_done(st_mem_kernel); - } - - // -------------------------------------------------------------------- - - if(do_slab) { - static RRDSET *st_mem_slab = NULL; - static RRDDIM *rd_reclaimable = NULL, *rd_unreclaimable = NULL; - - if(unlikely(!st_mem_slab)) { - st_mem_slab = rrdset_create_localhost( - "mem" - , "slab" - , NULL - , "slab" - , NULL - , "Reclaimable Kernel Memory" - , "MB" - , "proc" - , "meminfo" - , NETDATA_CHART_PRIO_MEM_SLAB - , update_every - , RRDSET_TYPE_STACKED - ); - - rrdset_flag_set(st_mem_slab, RRDSET_FLAG_DETAIL); - - rd_reclaimable = rrddim_add(st_mem_slab, "reclaimable", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - rd_unreclaimable = rrddim_add(st_mem_slab, "unreclaimable", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st_mem_slab); - - rrddim_set_by_pointer(st_mem_slab, rd_reclaimable, SReclaimable); - rrddim_set_by_pointer(st_mem_slab, rd_unreclaimable, SUnreclaim); - - rrdset_done(st_mem_slab); - } - - // -------------------------------------------------------------------- - - if(do_hugepages == CONFIG_BOOLEAN_YES || (do_hugepages == CONFIG_BOOLEAN_AUTO && Hugepagesize != 0 && HugePages_Total != 0)) { - do_hugepages = CONFIG_BOOLEAN_YES; - - static RRDSET *st_mem_hugepages = NULL; - static RRDDIM *rd_used = NULL, *rd_free = NULL, *rd_rsvd = NULL, *rd_surp = NULL; - - if(unlikely(!st_mem_hugepages)) { - st_mem_hugepages = rrdset_create_localhost( - "mem" - , "hugepages" - , NULL - , "hugepages" - , NULL - , "Dedicated HugePages Memory" - , "MB" - , "proc" - , "meminfo" - , NETDATA_CHART_PRIO_MEM_HUGEPAGES + 1 - , update_every - , RRDSET_TYPE_STACKED - ); - - rrdset_flag_set(st_mem_hugepages, RRDSET_FLAG_DETAIL); - - rd_free = rrddim_add(st_mem_hugepages, "free", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE); - rd_used = rrddim_add(st_mem_hugepages, "used", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE); - rd_surp = rrddim_add(st_mem_hugepages, "surplus", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE); - rd_rsvd = rrddim_add(st_mem_hugepages, "reserved", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st_mem_hugepages); - - rrddim_set_by_pointer(st_mem_hugepages, rd_used, HugePages_Total - HugePages_Free - HugePages_Rsvd); - rrddim_set_by_pointer(st_mem_hugepages, rd_free, HugePages_Free); - rrddim_set_by_pointer(st_mem_hugepages, rd_rsvd, HugePages_Rsvd); - rrddim_set_by_pointer(st_mem_hugepages, rd_surp, HugePages_Surp); - - rrdset_done(st_mem_hugepages); - } - - // -------------------------------------------------------------------- - - if(do_transparent_hugepages == CONFIG_BOOLEAN_YES || (do_transparent_hugepages == CONFIG_BOOLEAN_AUTO && (AnonHugePages != 0 || ShmemHugePages != 0))) { - do_transparent_hugepages = CONFIG_BOOLEAN_YES; - - static RRDSET *st_mem_transparent_hugepages = NULL; - static RRDDIM *rd_anonymous = NULL, *rd_shared = NULL; - - if(unlikely(!st_mem_transparent_hugepages)) { - st_mem_transparent_hugepages = rrdset_create_localhost( - "mem" - , "transparent_hugepages" - , NULL - , "hugepages" - , NULL - , "Transparent HugePages Memory" - , "MB" - , "proc" - , "meminfo" - , NETDATA_CHART_PRIO_MEM_HUGEPAGES - , update_every - , RRDSET_TYPE_STACKED - ); - - rrdset_flag_set(st_mem_transparent_hugepages, RRDSET_FLAG_DETAIL); - - rd_anonymous = rrddim_add(st_mem_transparent_hugepages, "anonymous", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - rd_shared = rrddim_add(st_mem_transparent_hugepages, "shmem", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st_mem_transparent_hugepages); - - rrddim_set_by_pointer(st_mem_transparent_hugepages, rd_anonymous, AnonHugePages); - rrddim_set_by_pointer(st_mem_transparent_hugepages, rd_shared, ShmemHugePages); - - rrdset_done(st_mem_transparent_hugepages); - } - - return 0; -} - diff --git a/src/proc_net_dev.c b/src/proc_net_dev.c deleted file mode 100644 index 341b9e0ca..000000000 --- a/src/proc_net_dev.c +++ /dev/null @@ -1,894 +0,0 @@ -#include "common.h" - -// ---------------------------------------------------------------------------- -// netdev list - -static struct netdev { - char *name; - uint32_t hash; - size_t len; - - // flags - int virtual; - int configured; - int enabled; - int updated; - - int do_bandwidth; - int do_packets; - int do_errors; - int do_drops; - int do_fifo; - int do_compressed; - int do_events; - - const char *chart_type_net_bytes; - const char *chart_type_net_packets; - const char *chart_type_net_errors; - const char *chart_type_net_fifo; - const char *chart_type_net_events; - const char *chart_type_net_drops; - const char *chart_type_net_compressed; - - const char *chart_id_net_bytes; - const char *chart_id_net_packets; - const char *chart_id_net_errors; - const char *chart_id_net_fifo; - const char *chart_id_net_events; - const char *chart_id_net_drops; - const char *chart_id_net_compressed; - - const char *chart_family; - - int flipped; - unsigned long priority; - - // data collected - kernel_uint_t rbytes; - kernel_uint_t rpackets; - kernel_uint_t rerrors; - kernel_uint_t rdrops; - kernel_uint_t rfifo; - kernel_uint_t rframe; - kernel_uint_t rcompressed; - kernel_uint_t rmulticast; - - kernel_uint_t tbytes; - kernel_uint_t tpackets; - kernel_uint_t terrors; - kernel_uint_t tdrops; - kernel_uint_t tfifo; - kernel_uint_t tcollisions; - kernel_uint_t tcarrier; - kernel_uint_t tcompressed; - - // charts - RRDSET *st_bandwidth; - RRDSET *st_packets; - RRDSET *st_errors; - RRDSET *st_drops; - RRDSET *st_fifo; - RRDSET *st_compressed; - RRDSET *st_events; - - // dimensions - RRDDIM *rd_rbytes; - RRDDIM *rd_rpackets; - RRDDIM *rd_rerrors; - RRDDIM *rd_rdrops; - RRDDIM *rd_rfifo; - RRDDIM *rd_rframe; - RRDDIM *rd_rcompressed; - RRDDIM *rd_rmulticast; - - RRDDIM *rd_tbytes; - RRDDIM *rd_tpackets; - RRDDIM *rd_terrors; - RRDDIM *rd_tdrops; - RRDDIM *rd_tfifo; - RRDDIM *rd_tcollisions; - RRDDIM *rd_tcarrier; - RRDDIM *rd_tcompressed; - - struct netdev *next; -} *netdev_root = NULL, *netdev_last_used = NULL; - -static size_t netdev_added = 0, netdev_found = 0; - -// ---------------------------------------------------------------------------- - -static void netdev_charts_release(struct netdev *d) { - if(d->st_bandwidth) rrdset_is_obsolete(d->st_bandwidth); - if(d->st_packets) rrdset_is_obsolete(d->st_packets); - if(d->st_errors) rrdset_is_obsolete(d->st_errors); - if(d->st_drops) rrdset_is_obsolete(d->st_drops); - if(d->st_fifo) rrdset_is_obsolete(d->st_fifo); - if(d->st_compressed) rrdset_is_obsolete(d->st_compressed); - if(d->st_events) rrdset_is_obsolete(d->st_events); - - d->st_bandwidth = NULL; - d->st_compressed = NULL; - d->st_drops = NULL; - d->st_errors = NULL; - d->st_events = NULL; - d->st_fifo = NULL; - d->st_packets = NULL; - - d->rd_rbytes = NULL; - d->rd_rpackets = NULL; - d->rd_rerrors = NULL; - d->rd_rdrops = NULL; - d->rd_rfifo = NULL; - d->rd_rframe = NULL; - d->rd_rcompressed = NULL; - d->rd_rmulticast = NULL; - - d->rd_tbytes = NULL; - d->rd_tpackets = NULL; - d->rd_terrors = NULL; - d->rd_tdrops = NULL; - d->rd_tfifo = NULL; - d->rd_tcollisions = NULL; - d->rd_tcarrier = NULL; - d->rd_tcompressed = NULL; -} - -static void netdev_free_strings(struct netdev *d) { - freez((void *)d->chart_type_net_bytes); - freez((void *)d->chart_type_net_compressed); - freez((void *)d->chart_type_net_drops); - freez((void *)d->chart_type_net_errors); - freez((void *)d->chart_type_net_events); - freez((void *)d->chart_type_net_fifo); - freez((void *)d->chart_type_net_packets); - - freez((void *)d->chart_id_net_bytes); - freez((void *)d->chart_id_net_compressed); - freez((void *)d->chart_id_net_drops); - freez((void *)d->chart_id_net_errors); - freez((void *)d->chart_id_net_events); - freez((void *)d->chart_id_net_fifo); - freez((void *)d->chart_id_net_packets); - - freez((void *)d->chart_family); -} - -static void netdev_free(struct netdev *d) { - netdev_charts_release(d); - netdev_free_strings(d); - - freez((void *)d->name); - freez((void *)d); - netdev_added--; -} - - -// ---------------------------------------------------------------------------- -// netdev renames - -static struct netdev_rename { - const char *host_device; - uint32_t hash; - - const char *container_device; - const char *container_name; - - int processed; - - struct netdev_rename *next; -} *netdev_rename_root = NULL; - -static int netdev_pending_renames = 0; -static netdata_mutex_t netdev_rename_mutex = NETDATA_MUTEX_INITIALIZER; - -static struct netdev_rename *netdev_rename_find(const char *host_device, uint32_t hash) { - struct netdev_rename *r; - - for(r = netdev_rename_root; r ; r = r->next) - if(r->hash == hash && !strcmp(host_device, r->host_device)) - return r; - - return NULL; -} - -// other threads can call this function to register a rename to a netdev -void netdev_rename_device_add(const char *host_device, const char *container_device, const char *container_name) { - netdata_mutex_lock(&netdev_rename_mutex); - - uint32_t hash = simple_hash(host_device); - struct netdev_rename *r = netdev_rename_find(host_device, hash); - if(!r) { - r = callocz(1, sizeof(struct netdev_rename)); - r->host_device = strdupz(host_device); - r->container_device = strdupz(container_device); - r->container_name = strdupz(container_name); - r->hash = hash; - r->next = netdev_rename_root; - r->processed = 0; - netdev_rename_root = r; - netdev_pending_renames++; - info("CGROUP: registered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name); - } - else { - if(strcmp(r->container_device, container_device) != 0 || strcmp(r->container_name, container_name) != 0) { - freez((void *) r->container_device); - freez((void *) r->container_name); - - r->container_device = strdupz(container_device); - r->container_name = strdupz(container_name); - r->processed = 0; - netdev_pending_renames++; - info("CGROUP: altered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name); - } - } - - netdata_mutex_unlock(&netdev_rename_mutex); -} - -// other threads can call this function to delete a rename to a netdev -void netdev_rename_device_del(const char *host_device) { - netdata_mutex_lock(&netdev_rename_mutex); - - struct netdev_rename *r, *last = NULL; - - uint32_t hash = simple_hash(host_device); - for(r = netdev_rename_root; r ; last = r, r = r->next) { - if (r->hash == hash && !strcmp(host_device, r->host_device)) { - if (netdev_rename_root == r) - netdev_rename_root = r->next; - else if (last) - last->next = r->next; - - if(!r->processed) - netdev_pending_renames--; - - info("CGROUP: unregistered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name); - - freez((void *) r->host_device); - freez((void *) r->container_name); - freez((void *) r->container_device); - freez((void *) r); - break; - } - } - - netdata_mutex_unlock(&netdev_rename_mutex); -} - -static inline void netdev_rename_cgroup(struct netdev *d, struct netdev_rename *r) { - info("CGROUP: renaming network interface '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name); - - netdev_charts_release(d); - netdev_free_strings(d); - - char buffer[RRD_ID_LENGTH_MAX + 1]; - - snprintfz(buffer, RRD_ID_LENGTH_MAX, "cgroup_%s", r->container_name); - d->chart_type_net_bytes = strdupz(buffer); - d->chart_type_net_compressed = strdupz(buffer); - d->chart_type_net_drops = strdupz(buffer); - d->chart_type_net_errors = strdupz(buffer); - d->chart_type_net_events = strdupz(buffer); - d->chart_type_net_fifo = strdupz(buffer); - d->chart_type_net_packets = strdupz(buffer); - - snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_%s", r->container_device); - d->chart_id_net_bytes = strdupz(buffer); - - snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_compressed_%s", r->container_device); - d->chart_id_net_compressed = strdupz(buffer); - - snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_drops_%s", r->container_device); - d->chart_id_net_drops = strdupz(buffer); - - snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_errors_%s", r->container_device); - d->chart_id_net_errors = strdupz(buffer); - - snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_events_%s", r->container_device); - d->chart_id_net_events = strdupz(buffer); - - snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_fifo_%s", r->container_device); - d->chart_id_net_fifo = strdupz(buffer); - - snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_packets_%s", r->container_device); - d->chart_id_net_packets = strdupz(buffer); - - snprintfz(buffer, RRD_ID_LENGTH_MAX, "net %s", r->container_device); - d->chart_family = strdupz(buffer); - - d->priority = 43000; - d->flipped = 1; -} - -static inline void netdev_rename(struct netdev *d) { - struct netdev_rename *r = netdev_rename_find(d->name, d->hash); - if(unlikely(r && !r->processed)) { - netdev_rename_cgroup(d, r); - r->processed = 1; - netdev_pending_renames--; - } -} - -static inline void netdev_rename_lock(struct netdev *d) { - netdata_mutex_lock(&netdev_rename_mutex); - netdev_rename(d); - netdata_mutex_unlock(&netdev_rename_mutex); -} - -static inline void netdev_rename_all_lock(void) { - netdata_mutex_lock(&netdev_rename_mutex); - - struct netdev *d; - for(d = netdev_root; d ; d = d->next) - netdev_rename(d); - - netdev_pending_renames = 0; - netdata_mutex_unlock(&netdev_rename_mutex); -} - -// ---------------------------------------------------------------------------- -// netdev data collection - -static void netdev_cleanup() { - if(likely(netdev_found == netdev_added)) return; - - netdev_added = 0; - struct netdev *d = netdev_root, *last = NULL; - while(d) { - if(unlikely(!d->updated)) { - // info("Removing network device '%s', linked after '%s'", d->name, last?last->name:"ROOT"); - - if(netdev_last_used == d) - netdev_last_used = last; - - struct netdev *t = d; - - if(d == netdev_root || !last) - netdev_root = d = d->next; - - else - last->next = d = d->next; - - t->next = NULL; - netdev_free(t); - } - else { - netdev_added++; - last = d; - d->updated = 0; - d = d->next; - } - } -} - -static struct netdev *get_netdev(const char *name) { - struct netdev *d; - - uint32_t hash = simple_hash(name); - - // search it, from the last position to the end - for(d = netdev_last_used ; d ; d = d->next) { - if(unlikely(hash == d->hash && !strcmp(name, d->name))) { - netdev_last_used = d->next; - return d; - } - } - - // search it from the beginning to the last position we used - for(d = netdev_root ; d != netdev_last_used ; d = d->next) { - if(unlikely(hash == d->hash && !strcmp(name, d->name))) { - netdev_last_used = d->next; - return d; - } - } - - // create a new one - d = callocz(1, sizeof(struct netdev)); - d->name = strdupz(name); - d->hash = simple_hash(d->name); - d->len = strlen(d->name); - - d->chart_type_net_bytes = strdupz("net"); - d->chart_type_net_compressed = strdupz("net_compressed"); - d->chart_type_net_drops = strdupz("net_drops"); - d->chart_type_net_errors = strdupz("net_errors"); - d->chart_type_net_events = strdupz("net_events"); - d->chart_type_net_fifo = strdupz("net_fifo"); - d->chart_type_net_packets = strdupz("net_packets"); - - d->chart_id_net_bytes = strdupz(d->name); - d->chart_id_net_compressed = strdupz(d->name); - d->chart_id_net_drops = strdupz(d->name); - d->chart_id_net_errors = strdupz(d->name); - d->chart_id_net_events = strdupz(d->name); - d->chart_id_net_fifo = strdupz(d->name); - d->chart_id_net_packets = strdupz(d->name); - - d->chart_family = strdupz(d->name); - d->priority = 7000; - - netdev_rename_lock(d); - - netdev_added++; - - // link it to the end - if(netdev_root) { - struct netdev *e; - for(e = netdev_root; e->next ; e = e->next) ; - e->next = d; - } - else - netdev_root = d; - - return d; -} - -int do_proc_net_dev(int update_every, usec_t dt) { - (void)dt; - static SIMPLE_PATTERN *disabled_list = NULL; - static procfile *ff = NULL; - static int enable_new_interfaces = -1; - static int do_bandwidth = -1, do_packets = -1, do_errors = -1, do_drops = -1, do_fifo = -1, do_compressed = -1, do_events = -1; - static char *path_to_sys_devices_virtual_net = NULL; - - if(unlikely(enable_new_interfaces == -1)) { - char filename[FILENAME_MAX + 1]; - - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/virtual/net/%s"); - path_to_sys_devices_virtual_net = config_get("plugin:proc:/proc/net/dev", "path to get virtual interfaces", filename); - - enable_new_interfaces = config_get_boolean_ondemand("plugin:proc:/proc/net/dev", "enable new interfaces detected at runtime", CONFIG_BOOLEAN_AUTO); - - do_bandwidth = config_get_boolean_ondemand("plugin:proc:/proc/net/dev", "bandwidth for all interfaces", CONFIG_BOOLEAN_AUTO); - do_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/dev", "packets for all interfaces", CONFIG_BOOLEAN_AUTO); - do_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/dev", "errors for all interfaces", CONFIG_BOOLEAN_AUTO); - do_drops = config_get_boolean_ondemand("plugin:proc:/proc/net/dev", "drops for all interfaces", CONFIG_BOOLEAN_AUTO); - do_fifo = config_get_boolean_ondemand("plugin:proc:/proc/net/dev", "fifo for all interfaces", CONFIG_BOOLEAN_AUTO); - do_compressed = config_get_boolean_ondemand("plugin:proc:/proc/net/dev", "compressed packets for all interfaces", CONFIG_BOOLEAN_AUTO); - do_events = config_get_boolean_ondemand("plugin:proc:/proc/net/dev", "frames, collisions, carrier counters for all interfaces", CONFIG_BOOLEAN_AUTO); - - disabled_list = simple_pattern_create(config_get("plugin:proc:/proc/net/dev", "disable by default interfaces matching", "lo fireqos* *-ifb"), NULL, SIMPLE_PATTERN_EXACT); - } - - if(unlikely(!ff)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/dev"); - ff = procfile_open(config_get("plugin:proc:/proc/net/dev", "filename to monitor", filename), " \t,:|", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) return 1; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time - - // rename all the devices, if we have pending renames - if(unlikely(netdev_pending_renames)) - netdev_rename_all_lock(); - - netdev_found = 0; - - kernel_uint_t system_rbytes = 0; - kernel_uint_t system_tbytes = 0; - - size_t lines = procfile_lines(ff), l; - for(l = 2; l < lines ;l++) { - // require 17 words on each line - if(unlikely(procfile_linewords(ff, l) < 17)) continue; - - struct netdev *d = get_netdev(procfile_lineword(ff, l, 0)); - d->updated = 1; - netdev_found++; - - if(unlikely(!d->configured)) { - // this is the first time we see this interface - - // remember we configured it - d->configured = 1; - - d->enabled = enable_new_interfaces; - - if(d->enabled) - d->enabled = !simple_pattern_matches(disabled_list, d->name); - - char buffer[FILENAME_MAX + 1]; - - snprintfz(buffer, FILENAME_MAX, path_to_sys_devices_virtual_net, d->name); - if(likely(access(buffer, R_OK) == 0)) { - d->virtual = 1; - } - else - d->virtual = 0; - - snprintfz(buffer, FILENAME_MAX, "plugin:proc:/proc/net/dev:%s", d->name); - d->enabled = config_get_boolean_ondemand(buffer, "enabled", d->enabled); - d->virtual = config_get_boolean(buffer, "virtual", d->virtual); - - if(d->enabled == CONFIG_BOOLEAN_NO) - continue; - - d->do_bandwidth = config_get_boolean_ondemand(buffer, "bandwidth", do_bandwidth); - d->do_packets = config_get_boolean_ondemand(buffer, "packets", do_packets); - d->do_errors = config_get_boolean_ondemand(buffer, "errors", do_errors); - d->do_drops = config_get_boolean_ondemand(buffer, "drops", do_drops); - d->do_fifo = config_get_boolean_ondemand(buffer, "fifo", do_fifo); - d->do_compressed = config_get_boolean_ondemand(buffer, "compressed", do_compressed); - d->do_events = config_get_boolean_ondemand(buffer, "events", do_events); - } - - if(unlikely(!d->enabled)) - continue; - - if(likely(d->do_bandwidth != CONFIG_BOOLEAN_NO || !d->virtual)) { - d->rbytes = str2kernel_uint_t(procfile_lineword(ff, l, 1)); - d->tbytes = str2kernel_uint_t(procfile_lineword(ff, l, 9)); - - if(likely(!d->virtual)) { - system_rbytes += d->rbytes; - system_tbytes += d->tbytes; - } - } - - if(likely(d->do_packets != CONFIG_BOOLEAN_NO)) { - d->rpackets = str2kernel_uint_t(procfile_lineword(ff, l, 2)); - d->rmulticast = str2kernel_uint_t(procfile_lineword(ff, l, 8)); - d->tpackets = str2kernel_uint_t(procfile_lineword(ff, l, 10)); - } - - if(likely(d->do_errors != CONFIG_BOOLEAN_NO)) { - d->rerrors = str2kernel_uint_t(procfile_lineword(ff, l, 3)); - d->terrors = str2kernel_uint_t(procfile_lineword(ff, l, 11)); - } - - if(likely(d->do_drops != CONFIG_BOOLEAN_NO)) { - d->rdrops = str2kernel_uint_t(procfile_lineword(ff, l, 4)); - d->tdrops = str2kernel_uint_t(procfile_lineword(ff, l, 12)); - } - - if(likely(d->do_fifo != CONFIG_BOOLEAN_NO)) { - d->rfifo = str2kernel_uint_t(procfile_lineword(ff, l, 5)); - d->tfifo = str2kernel_uint_t(procfile_lineword(ff, l, 13)); - } - - if(likely(d->do_compressed != CONFIG_BOOLEAN_NO)) { - d->rcompressed = str2kernel_uint_t(procfile_lineword(ff, l, 7)); - d->tcompressed = str2kernel_uint_t(procfile_lineword(ff, l, 16)); - } - - if(likely(d->do_events != CONFIG_BOOLEAN_NO)) { - d->rframe = str2kernel_uint_t(procfile_lineword(ff, l, 6)); - d->tcollisions = str2kernel_uint_t(procfile_lineword(ff, l, 14)); - d->tcarrier = str2kernel_uint_t(procfile_lineword(ff, l, 15)); - } - - // -------------------------------------------------------------------- - - if(unlikely((d->do_bandwidth == CONFIG_BOOLEAN_AUTO && (d->rbytes || d->tbytes)))) - d->do_bandwidth = CONFIG_BOOLEAN_YES; - - if(d->do_bandwidth == CONFIG_BOOLEAN_YES) { - if(unlikely(!d->st_bandwidth)) { - - d->st_bandwidth = rrdset_create_localhost( - d->chart_type_net_bytes - , d->chart_id_net_bytes - , NULL - , d->chart_family - , "net.net" - , "Bandwidth" - , "kilobits/s" - , "proc" - , "net/dev" - , d->priority - , update_every - , RRDSET_TYPE_AREA - ); - - d->rd_rbytes = rrddim_add(d->st_bandwidth, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - d->rd_tbytes = rrddim_add(d->st_bandwidth, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - - if(d->flipped) { - // flip receive/trasmit - - RRDDIM *td = d->rd_rbytes; - d->rd_rbytes = d->rd_tbytes; - d->rd_tbytes = td; - } - } - else rrdset_next(d->st_bandwidth); - - rrddim_set_by_pointer(d->st_bandwidth, d->rd_rbytes, (collected_number)d->rbytes); - rrddim_set_by_pointer(d->st_bandwidth, d->rd_tbytes, (collected_number)d->tbytes); - rrdset_done(d->st_bandwidth); - } - - // -------------------------------------------------------------------- - - if(unlikely((d->do_packets == CONFIG_BOOLEAN_AUTO && (d->rpackets || d->tpackets || d->rmulticast)))) - d->do_packets = CONFIG_BOOLEAN_YES; - - if(d->do_packets == CONFIG_BOOLEAN_YES) { - if(unlikely(!d->st_packets)) { - - d->st_packets = rrdset_create_localhost( - d->chart_type_net_packets - , d->chart_id_net_packets - , NULL - , d->chart_family - , "net.packets" - , "Packets" - , "packets/s" - , "proc" - , "net/dev" - , d->priority + 1 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(d->st_packets, RRDSET_FLAG_DETAIL); - - d->rd_rpackets = rrddim_add(d->st_packets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - d->rd_tpackets = rrddim_add(d->st_packets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - d->rd_rmulticast = rrddim_add(d->st_packets, "multicast", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - if(d->flipped) { - // flip receive/trasmit - - RRDDIM *td = d->rd_rpackets; - d->rd_rpackets = d->rd_tpackets; - d->rd_tpackets = td; - } - } - else rrdset_next(d->st_packets); - - rrddim_set_by_pointer(d->st_packets, d->rd_rpackets, (collected_number)d->rpackets); - rrddim_set_by_pointer(d->st_packets, d->rd_tpackets, (collected_number)d->tpackets); - rrddim_set_by_pointer(d->st_packets, d->rd_rmulticast, (collected_number)d->rmulticast); - rrdset_done(d->st_packets); - } - - // -------------------------------------------------------------------- - - if(unlikely((d->do_errors == CONFIG_BOOLEAN_AUTO && (d->rerrors || d->terrors)))) - d->do_errors = CONFIG_BOOLEAN_YES; - - if(d->do_errors == CONFIG_BOOLEAN_YES) { - if(unlikely(!d->st_errors)) { - - d->st_errors = rrdset_create_localhost( - d->chart_type_net_errors - , d->chart_id_net_errors - , NULL - , d->chart_family - , "net.errors" - , "Interface Errors" - , "errors/s" - , "proc" - , "net/dev" - , d->priority + 2 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(d->st_errors, RRDSET_FLAG_DETAIL); - - d->rd_rerrors = rrddim_add(d->st_errors, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - d->rd_terrors = rrddim_add(d->st_errors, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - - if(d->flipped) { - // flip receive/trasmit - - RRDDIM *td = d->rd_rerrors; - d->rd_rerrors = d->rd_terrors; - d->rd_terrors = td; - } - } - else rrdset_next(d->st_errors); - - rrddim_set_by_pointer(d->st_errors, d->rd_rerrors, (collected_number)d->rerrors); - rrddim_set_by_pointer(d->st_errors, d->rd_terrors, (collected_number)d->terrors); - rrdset_done(d->st_errors); - } - - // -------------------------------------------------------------------- - - if(unlikely((d->do_drops == CONFIG_BOOLEAN_AUTO && (d->rdrops || d->tdrops)))) - d->do_drops = CONFIG_BOOLEAN_YES; - - if(d->do_drops == CONFIG_BOOLEAN_YES) { - if(unlikely(!d->st_drops)) { - - d->st_drops = rrdset_create_localhost( - d->chart_type_net_drops - , d->chart_id_net_drops - , NULL - , d->chart_family - , "net.drops" - , "Interface Drops" - , "drops/s" - , "proc" - , "net/dev" - , d->priority + 3 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(d->st_drops, RRDSET_FLAG_DETAIL); - - d->rd_rdrops = rrddim_add(d->st_drops, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - d->rd_tdrops = rrddim_add(d->st_drops, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - - if(d->flipped) { - // flip receive/trasmit - - RRDDIM *td = d->rd_rdrops; - d->rd_rdrops = d->rd_tdrops; - d->rd_tdrops = td; - } - } - else rrdset_next(d->st_drops); - - rrddim_set_by_pointer(d->st_drops, d->rd_rdrops, (collected_number)d->rdrops); - rrddim_set_by_pointer(d->st_drops, d->rd_tdrops, (collected_number)d->tdrops); - rrdset_done(d->st_drops); - } - - // -------------------------------------------------------------------- - - if(unlikely((d->do_fifo == CONFIG_BOOLEAN_AUTO && (d->rfifo || d->tfifo)))) - d->do_fifo = CONFIG_BOOLEAN_YES; - - if(d->do_fifo == CONFIG_BOOLEAN_YES) { - if(unlikely(!d->st_fifo)) { - - d->st_fifo = rrdset_create_localhost( - d->chart_type_net_fifo - , d->chart_id_net_fifo - , NULL - , d->chart_family - , "net.fifo" - , "Interface FIFO Buffer Errors" - , "errors" - , "proc" - , "net/dev" - , d->priority + 4 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(d->st_fifo, RRDSET_FLAG_DETAIL); - - d->rd_rfifo = rrddim_add(d->st_fifo, "receive", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - d->rd_tfifo = rrddim_add(d->st_fifo, "transmit", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - - if(d->flipped) { - // flip receive/trasmit - - RRDDIM *td = d->rd_rfifo; - d->rd_rfifo = d->rd_tfifo; - d->rd_tfifo = td; - } - } - else rrdset_next(d->st_fifo); - - rrddim_set_by_pointer(d->st_fifo, d->rd_rfifo, (collected_number)d->rfifo); - rrddim_set_by_pointer(d->st_fifo, d->rd_tfifo, (collected_number)d->tfifo); - rrdset_done(d->st_fifo); - } - - // -------------------------------------------------------------------- - - if(unlikely((d->do_compressed == CONFIG_BOOLEAN_AUTO && (d->rcompressed || d->tcompressed)))) - d->do_compressed = CONFIG_BOOLEAN_YES; - - if(d->do_compressed == CONFIG_BOOLEAN_YES) { - if(unlikely(!d->st_compressed)) { - - d->st_compressed = rrdset_create_localhost( - d->chart_type_net_compressed - , d->chart_id_net_compressed - , NULL - , d->chart_family - , "net.compressed" - , "Compressed Packets" - , "packets/s" - , "proc" - , "net/dev" - , d->priority + 5 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(d->st_compressed, RRDSET_FLAG_DETAIL); - - d->rd_rcompressed = rrddim_add(d->st_compressed, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - d->rd_tcompressed = rrddim_add(d->st_compressed, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - - if(d->flipped) { - // flip receive/trasmit - - RRDDIM *td = d->rd_rcompressed; - d->rd_rcompressed = d->rd_tcompressed; - d->rd_tcompressed = td; - } - } - else rrdset_next(d->st_compressed); - - rrddim_set_by_pointer(d->st_compressed, d->rd_rcompressed, (collected_number)d->rcompressed); - rrddim_set_by_pointer(d->st_compressed, d->rd_tcompressed, (collected_number)d->tcompressed); - rrdset_done(d->st_compressed); - } - - // -------------------------------------------------------------------- - - if(unlikely((d->do_events == CONFIG_BOOLEAN_AUTO && (d->rframe || d->tcollisions || d->tcarrier)))) - d->do_events = CONFIG_BOOLEAN_YES; - - if(d->do_events == CONFIG_BOOLEAN_YES) { - if(unlikely(!d->st_events)) { - - d->st_events = rrdset_create_localhost( - d->chart_type_net_events - , d->chart_id_net_events - , NULL - , d->chart_family - , "net.events" - , "Network Interface Events" - , "events/s" - , "proc" - , "net/dev" - , d->priority + 6 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(d->st_events, RRDSET_FLAG_DETAIL); - - d->rd_rframe = rrddim_add(d->st_events, "frames", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - d->rd_tcollisions = rrddim_add(d->st_events, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - d->rd_tcarrier = rrddim_add(d->st_events, "carrier", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(d->st_events); - - rrddim_set_by_pointer(d->st_events, d->rd_rframe, (collected_number)d->rframe); - rrddim_set_by_pointer(d->st_events, d->rd_tcollisions, (collected_number)d->tcollisions); - rrddim_set_by_pointer(d->st_events, d->rd_tcarrier, (collected_number)d->tcarrier); - rrdset_done(d->st_events); - } - } - - if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && (system_rbytes || system_tbytes))) { - do_bandwidth = CONFIG_BOOLEAN_YES; - static RRDSET *st_system_net = NULL; - static RRDDIM *rd_in = NULL, *rd_out = NULL; - - if(unlikely(!st_system_net)) { - st_system_net = rrdset_create_localhost( - "system" - , "net" - , NULL - , "network" - , NULL - , "Physical Network Interfaces Aggregated Bandwidth" - , "kilobits/s" - , "proc" - , "net/dev" - , 500 - , update_every - , RRDSET_TYPE_AREA - ); - - rd_in = rrddim_add(st_system_net, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - rd_out = rrddim_add(st_system_net, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_system_net); - - rrddim_set_by_pointer(st_system_net, rd_in, (collected_number)system_rbytes); - rrddim_set_by_pointer(st_system_net, rd_out, (collected_number)system_tbytes); - - rrdset_done(st_system_net); - } - - netdev_cleanup(); - - return 0; -} diff --git a/src/proc_net_ip_vs_stats.c b/src/proc_net_ip_vs_stats.c deleted file mode 100644 index d76972f3c..000000000 --- a/src/proc_net_ip_vs_stats.c +++ /dev/null @@ -1,129 +0,0 @@ -#include "common.h" - -#define RRD_TYPE_NET_IPVS "ipvs" - -int do_proc_net_ip_vs_stats(int update_every, usec_t dt) { - (void)dt; - static int do_bandwidth = -1, do_sockets = -1, do_packets = -1; - static procfile *ff = NULL; - - if(do_bandwidth == -1) do_bandwidth = config_get_boolean("plugin:proc:/proc/net/ip_vs_stats", "IPVS bandwidth", 1); - if(do_sockets == -1) do_sockets = config_get_boolean("plugin:proc:/proc/net/ip_vs_stats", "IPVS connections", 1); - if(do_packets == -1) do_packets = config_get_boolean("plugin:proc:/proc/net/ip_vs_stats", "IPVS packets", 1); - - if(!ff) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/ip_vs_stats"); - ff = procfile_open(config_get("plugin:proc:/proc/net/ip_vs_stats", "filename to monitor", filename), " \t,:|", PROCFILE_FLAG_DEFAULT); - } - if(!ff) return 1; - - ff = procfile_readall(ff); - if(!ff) return 0; // we return 0, so that we will retry to open it next time - - // make sure we have 3 lines - if(procfile_lines(ff) < 3) return 1; - - // make sure we have 5 words on the 3rd line - if(procfile_linewords(ff, 2) < 5) return 1; - - unsigned long long entries, InPackets, OutPackets, InBytes, OutBytes; - - entries = strtoull(procfile_lineword(ff, 2, 0), NULL, 16); - InPackets = strtoull(procfile_lineword(ff, 2, 1), NULL, 16); - OutPackets = strtoull(procfile_lineword(ff, 2, 2), NULL, 16); - InBytes = strtoull(procfile_lineword(ff, 2, 3), NULL, 16); - OutBytes = strtoull(procfile_lineword(ff, 2, 4), NULL, 16); - - - // -------------------------------------------------------------------- - - if(do_sockets) { - static RRDSET *st = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_IPVS - , "sockets" - , NULL - , RRD_TYPE_NET_IPVS - , NULL - , "IPVS New Connections" - , "connections/s" - , "proc" - , "net/ip_vs_stats" - , 3101 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "connections", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set(st, "connections", entries); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_packets) { - static RRDSET *st = NULL; - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_IPVS - , "packets" - , NULL - , RRD_TYPE_NET_IPVS - , NULL - , "IPVS Packets" - , "packets/s" - , "proc" - , "net/ip_vs_stats" - , 3102 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set(st, "received", InPackets); - rrddim_set(st, "sent", OutPackets); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_bandwidth) { - static RRDSET *st = NULL; - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_IPVS - , "net" - , NULL - , RRD_TYPE_NET_IPVS - , NULL - , "IPVS Bandwidth" - , "kilobits/s" - , "proc" - , "net/ip_vs_stats" - , 3100 - , update_every - , RRDSET_TYPE_AREA - ); - - rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set(st, "received", InBytes); - rrddim_set(st, "sent", OutBytes); - rrdset_done(st); - } - - return 0; -} diff --git a/src/proc_net_netstat.c b/src/proc_net_netstat.c deleted file mode 100644 index dd070e4c3..000000000 --- a/src/proc_net_netstat.c +++ /dev/null @@ -1,761 +0,0 @@ -#include "common.h" - -unsigned long long tcpext_TCPSynRetrans; - -static void parse_line_pair(procfile *ff, ARL_BASE *base, size_t header_line, size_t values_line) { - size_t hwords = procfile_linewords(ff, header_line); - size_t vwords = procfile_linewords(ff, values_line); - size_t w; - - if(unlikely(vwords > hwords)) { - error("File /proc/net/netstat on header line %zu has %zu words, but on value line %zu has %zu words.", header_line, hwords, values_line, vwords); - vwords = hwords; - } - - for(w = 1; w < vwords ;w++) { - if(unlikely(arl_check(base, procfile_lineword(ff, header_line, w), procfile_lineword(ff, values_line, w)))) - break; - } -} - -int do_proc_net_netstat(int update_every, usec_t dt) { - (void)dt; - - static int do_bandwidth = -1, do_inerrors = -1, do_mcast = -1, do_bcast = -1, do_mcast_p = -1, do_bcast_p = -1, do_ecn = -1, \ - do_tcpext_reorder = -1, do_tcpext_syscookies = -1, do_tcpext_ofo = -1, do_tcpext_connaborts = -1, do_tcpext_memory = -1, - do_tcpext_listen = -1; - - static uint32_t hash_ipext = 0, hash_tcpext = 0; - static procfile *ff = NULL; - - static ARL_BASE *arl_tcpext = NULL; - static ARL_BASE *arl_ipext = NULL; - - // -------------------------------------------------------------------- - // IPv4 - - // IPv4 bandwidth - static unsigned long long ipext_InOctets = 0; - static unsigned long long ipext_OutOctets = 0; - - // IPv4 input errors - static unsigned long long ipext_InNoRoutes = 0; - static unsigned long long ipext_InTruncatedPkts = 0; - static unsigned long long ipext_InCsumErrors = 0; - - // IPv4 multicast bandwidth - static unsigned long long ipext_InMcastOctets = 0; - static unsigned long long ipext_OutMcastOctets = 0; - - // IPv4 multicast packets - static unsigned long long ipext_InMcastPkts = 0; - static unsigned long long ipext_OutMcastPkts = 0; - - // IPv4 broadcast bandwidth - static unsigned long long ipext_InBcastOctets = 0; - static unsigned long long ipext_OutBcastOctets = 0; - - // IPv4 broadcast packets - static unsigned long long ipext_InBcastPkts = 0; - static unsigned long long ipext_OutBcastPkts = 0; - - // IPv4 ECN - static unsigned long long ipext_InNoECTPkts = 0; - static unsigned long long ipext_InECT1Pkts = 0; - static unsigned long long ipext_InECT0Pkts = 0; - static unsigned long long ipext_InCEPkts = 0; - - // -------------------------------------------------------------------- - // IPv4 TCP - - // IPv4 TCP Reordering - static unsigned long long tcpext_TCPRenoReorder = 0; - static unsigned long long tcpext_TCPFACKReorder = 0; - static unsigned long long tcpext_TCPSACKReorder = 0; - static unsigned long long tcpext_TCPTSReorder = 0; - - // IPv4 TCP SYN Cookies - static unsigned long long tcpext_SyncookiesSent = 0; - static unsigned long long tcpext_SyncookiesRecv = 0; - static unsigned long long tcpext_SyncookiesFailed = 0; - - // IPv4 TCP Out Of Order Queue - // http://www.spinics.net/lists/netdev/msg204696.html - static unsigned long long tcpext_TCPOFOQueue = 0; // Number of packets queued in OFO queue - static unsigned long long tcpext_TCPOFODrop = 0; // Number of packets meant to be queued in OFO but dropped because socket rcvbuf limit hit. - static unsigned long long tcpext_TCPOFOMerge = 0; // Number of packets in OFO that were merged with other packets. - static unsigned long long tcpext_OfoPruned = 0; // packets dropped from out-of-order queue because of socket buffer overrun - - // IPv4 TCP connection resets - // https://github.com/ecki/net-tools/blob/bd8bceaed2311651710331a7f8990c3e31be9840/statistics.c - static unsigned long long tcpext_TCPAbortOnData = 0; // connections reset due to unexpected data - static unsigned long long tcpext_TCPAbortOnClose = 0; // connections reset due to early user close - static unsigned long long tcpext_TCPAbortOnMemory = 0; // connections aborted due to memory pressure - static unsigned long long tcpext_TCPAbortOnTimeout = 0; // connections aborted due to timeout - static unsigned long long tcpext_TCPAbortOnLinger = 0; // connections aborted after user close in linger timeout - static unsigned long long tcpext_TCPAbortFailed = 0; // times unable to send RST due to no memory - - // https://perfchron.com/2015/12/26/investigating-linux-network-issues-with-netstat-and-nstat/ - static unsigned long long tcpext_ListenOverflows = 0; // times the listen queue of a socket overflowed - static unsigned long long tcpext_ListenDrops = 0; // SYNs to LISTEN sockets ignored - - // IPv4 TCP memory pressures - static unsigned long long tcpext_TCPMemoryPressures = 0; - - // shared: tcpext_TCPSynRetrans - - - if(unlikely(!arl_ipext)) { - hash_ipext = simple_hash("IpExt"); - hash_tcpext = simple_hash("TcpExt"); - - do_bandwidth = config_get_boolean_ondemand("plugin:proc:/proc/net/netstat", "bandwidth", CONFIG_BOOLEAN_AUTO); - do_inerrors = config_get_boolean_ondemand("plugin:proc:/proc/net/netstat", "input errors", CONFIG_BOOLEAN_AUTO); - do_mcast = config_get_boolean_ondemand("plugin:proc:/proc/net/netstat", "multicast bandwidth", CONFIG_BOOLEAN_AUTO); - do_bcast = config_get_boolean_ondemand("plugin:proc:/proc/net/netstat", "broadcast bandwidth", CONFIG_BOOLEAN_AUTO); - do_mcast_p = config_get_boolean_ondemand("plugin:proc:/proc/net/netstat", "multicast packets", CONFIG_BOOLEAN_AUTO); - do_bcast_p = config_get_boolean_ondemand("plugin:proc:/proc/net/netstat", "broadcast packets", CONFIG_BOOLEAN_AUTO); - do_ecn = config_get_boolean_ondemand("plugin:proc:/proc/net/netstat", "ECN packets", CONFIG_BOOLEAN_AUTO); - - do_tcpext_reorder = config_get_boolean_ondemand("plugin:proc:/proc/net/netstat", "TCP reorders", CONFIG_BOOLEAN_AUTO); - do_tcpext_syscookies = config_get_boolean_ondemand("plugin:proc:/proc/net/netstat", "TCP SYN cookies", CONFIG_BOOLEAN_AUTO); - do_tcpext_ofo = config_get_boolean_ondemand("plugin:proc:/proc/net/netstat", "TCP out-of-order queue", CONFIG_BOOLEAN_AUTO); - do_tcpext_connaborts = config_get_boolean_ondemand("plugin:proc:/proc/net/netstat", "TCP connection aborts", CONFIG_BOOLEAN_AUTO); - do_tcpext_memory = config_get_boolean_ondemand("plugin:proc:/proc/net/netstat", "TCP memory pressures", CONFIG_BOOLEAN_AUTO); - do_tcpext_listen = config_get_boolean_ondemand("plugin:proc:/proc/net/netstat", "TCP listen issues", CONFIG_BOOLEAN_AUTO); - - arl_ipext = arl_create("netstat/ipext", NULL, 60); - arl_tcpext = arl_create("netstat/tcpext", NULL, 60); - - // -------------------------------------------------------------------- - // IPv4 - - if(do_bandwidth != CONFIG_BOOLEAN_NO) { - arl_expect(arl_ipext, "InOctets", &ipext_InOctets); - arl_expect(arl_ipext, "OutOctets", &ipext_OutOctets); - } - - if(do_inerrors != CONFIG_BOOLEAN_NO) { - arl_expect(arl_ipext, "InNoRoutes", &ipext_InNoRoutes); - arl_expect(arl_ipext, "InTruncatedPkts", &ipext_InTruncatedPkts); - arl_expect(arl_ipext, "InCsumErrors", &ipext_InCsumErrors); - } - - if(do_mcast != CONFIG_BOOLEAN_NO) { - arl_expect(arl_ipext, "InMcastOctets", &ipext_InMcastOctets); - arl_expect(arl_ipext, "OutMcastOctets", &ipext_OutMcastOctets); - } - - if(do_mcast_p != CONFIG_BOOLEAN_NO) { - arl_expect(arl_ipext, "InMcastPkts", &ipext_InMcastPkts); - arl_expect(arl_ipext, "OutMcastPkts", &ipext_OutMcastPkts); - } - - if(do_bcast != CONFIG_BOOLEAN_NO) { - arl_expect(arl_ipext, "InBcastPkts", &ipext_InBcastPkts); - arl_expect(arl_ipext, "OutBcastPkts", &ipext_OutBcastPkts); - } - - if(do_bcast_p != CONFIG_BOOLEAN_NO) { - arl_expect(arl_ipext, "InBcastOctets", &ipext_InBcastOctets); - arl_expect(arl_ipext, "OutBcastOctets", &ipext_OutBcastOctets); - } - - if(do_ecn != CONFIG_BOOLEAN_NO) { - arl_expect(arl_ipext, "InNoECTPkts", &ipext_InNoECTPkts); - arl_expect(arl_ipext, "InECT1Pkts", &ipext_InECT1Pkts); - arl_expect(arl_ipext, "InECT0Pkts", &ipext_InECT0Pkts); - arl_expect(arl_ipext, "InCEPkts", &ipext_InCEPkts); - } - - // -------------------------------------------------------------------- - // IPv4 TCP - - if(do_tcpext_reorder != CONFIG_BOOLEAN_NO) { - arl_expect(arl_tcpext, "TCPFACKReorder", &tcpext_TCPFACKReorder); - arl_expect(arl_tcpext, "TCPSACKReorder", &tcpext_TCPSACKReorder); - arl_expect(arl_tcpext, "TCPRenoReorder", &tcpext_TCPRenoReorder); - arl_expect(arl_tcpext, "TCPTSReorder", &tcpext_TCPTSReorder); - } - - if(do_tcpext_syscookies != CONFIG_BOOLEAN_NO) { - arl_expect(arl_tcpext, "SyncookiesSent", &tcpext_SyncookiesSent); - arl_expect(arl_tcpext, "SyncookiesRecv", &tcpext_SyncookiesRecv); - arl_expect(arl_tcpext, "SyncookiesFailed", &tcpext_SyncookiesFailed); - } - - if(do_tcpext_ofo != CONFIG_BOOLEAN_NO) { - arl_expect(arl_tcpext, "TCPOFOQueue", &tcpext_TCPOFOQueue); - arl_expect(arl_tcpext, "TCPOFODrop", &tcpext_TCPOFODrop); - arl_expect(arl_tcpext, "TCPOFOMerge", &tcpext_TCPOFOMerge); - arl_expect(arl_tcpext, "OfoPruned", &tcpext_OfoPruned); - } - - if(do_tcpext_connaborts != CONFIG_BOOLEAN_NO) { - arl_expect(arl_tcpext, "TCPAbortOnData", &tcpext_TCPAbortOnData); - arl_expect(arl_tcpext, "TCPAbortOnClose", &tcpext_TCPAbortOnClose); - arl_expect(arl_tcpext, "TCPAbortOnMemory", &tcpext_TCPAbortOnMemory); - arl_expect(arl_tcpext, "TCPAbortOnTimeout", &tcpext_TCPAbortOnTimeout); - arl_expect(arl_tcpext, "TCPAbortOnLinger", &tcpext_TCPAbortOnLinger); - arl_expect(arl_tcpext, "TCPAbortFailed", &tcpext_TCPAbortFailed); - } - - if(do_tcpext_memory != CONFIG_BOOLEAN_NO) { - arl_expect(arl_tcpext, "TCPMemoryPressures", &tcpext_TCPMemoryPressures); - } - - if(do_tcpext_listen != CONFIG_BOOLEAN_NO) { - arl_expect(arl_tcpext, "ListenOverflows", &tcpext_ListenOverflows); - arl_expect(arl_tcpext, "ListenDrops", &tcpext_ListenDrops); - } - - // shared metrics - arl_expect(arl_tcpext, "TCPSynRetrans", &tcpext_TCPSynRetrans); - } - - if(unlikely(!ff)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/netstat"); - ff = procfile_open(config_get("plugin:proc:/proc/net/netstat", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) return 1; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time - - size_t lines = procfile_lines(ff), l; - size_t words; - - arl_begin(arl_ipext); - arl_begin(arl_tcpext); - - for(l = 0; l < lines ;l++) { - char *key = procfile_lineword(ff, l, 0); - uint32_t hash = simple_hash(key); - - if(unlikely(hash == hash_ipext && strcmp(key, "IpExt") == 0)) { - size_t h = l++; - - words = procfile_linewords(ff, l); - if(unlikely(words < 2)) { - error("Cannot read /proc/net/netstat IpExt line. Expected 2+ params, read %zu.", words); - continue; - } - - parse_line_pair(ff, arl_ipext, h, l); - - // -------------------------------------------------------------------- - - if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && (ipext_InOctets || ipext_OutOctets))) { - do_bandwidth = CONFIG_BOOLEAN_YES; - static RRDSET *st_system_ipv4 = NULL; - static RRDDIM *rd_in = NULL, *rd_out = NULL; - - if(unlikely(!st_system_ipv4)) { - st_system_ipv4 = rrdset_create_localhost( - "system" - , "ipv4" - , NULL - , "network" - , NULL - , "IPv4 Bandwidth" - , "kilobits/s" - , "proc" - , "net/netstat" - , 501 - , update_every - , RRDSET_TYPE_AREA - ); - - rd_in = rrddim_add(st_system_ipv4, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - rd_out = rrddim_add(st_system_ipv4, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_system_ipv4); - - rrddim_set_by_pointer(st_system_ipv4, rd_in, ipext_InOctets); - rrddim_set_by_pointer(st_system_ipv4, rd_out, ipext_OutOctets); - - rrdset_done(st_system_ipv4); - } - - // -------------------------------------------------------------------- - - if(do_inerrors == CONFIG_BOOLEAN_YES || (do_inerrors == CONFIG_BOOLEAN_AUTO && (ipext_InNoRoutes || ipext_InTruncatedPkts))) { - do_inerrors = CONFIG_BOOLEAN_YES; - static RRDSET *st_ipv4_inerrors = NULL; - static RRDDIM *rd_noroutes = NULL, *rd_truncated = NULL, *rd_checksum = NULL; - - if(unlikely(!st_ipv4_inerrors)) { - st_ipv4_inerrors = rrdset_create_localhost( - "ipv4" - , "inerrors" - , NULL - , "errors" - , NULL - , "IPv4 Input Errors" - , "packets/s" - , "proc" - , "net/netstat" - , 4000 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st_ipv4_inerrors, RRDSET_FLAG_DETAIL); - - rd_noroutes = rrddim_add(st_ipv4_inerrors, "InNoRoutes", "noroutes", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_truncated = rrddim_add(st_ipv4_inerrors, "InTruncatedPkts", "truncated", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_checksum = rrddim_add(st_ipv4_inerrors, "InCsumErrors", "checksum", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_ipv4_inerrors); - - rrddim_set_by_pointer(st_ipv4_inerrors, rd_noroutes, ipext_InNoRoutes); - rrddim_set_by_pointer(st_ipv4_inerrors, rd_truncated, ipext_InTruncatedPkts); - rrddim_set_by_pointer(st_ipv4_inerrors, rd_checksum, ipext_InCsumErrors); - - rrdset_done(st_ipv4_inerrors); - } - - // -------------------------------------------------------------------- - - if(do_mcast == CONFIG_BOOLEAN_YES || (do_mcast == CONFIG_BOOLEAN_AUTO && (ipext_InMcastOctets || ipext_OutMcastOctets))) { - do_mcast = CONFIG_BOOLEAN_YES; - static RRDSET *st_ipv4_mcast = NULL; - static RRDDIM *rd_in = NULL, *rd_out = NULL; - - if(unlikely(!st_ipv4_mcast)) { - st_ipv4_mcast = rrdset_create_localhost( - "ipv4" - , "mcast" - , NULL - , "multicast" - , NULL - , "IPv4 Multicast Bandwidth" - , "kilobits/s" - , "proc" - , "net/netstat" - , 9000 - , update_every - , RRDSET_TYPE_AREA - ); - - rrdset_flag_set(st_ipv4_mcast, RRDSET_FLAG_DETAIL); - - rd_in = rrddim_add(st_ipv4_mcast, "InMcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - rd_out = rrddim_add(st_ipv4_mcast, "OutMcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_ipv4_mcast); - - rrddim_set_by_pointer(st_ipv4_mcast, rd_in, ipext_InMcastOctets); - rrddim_set_by_pointer(st_ipv4_mcast, rd_out, ipext_OutMcastOctets); - - rrdset_done(st_ipv4_mcast); - } - - // -------------------------------------------------------------------- - - if(do_bcast == CONFIG_BOOLEAN_YES || (do_bcast == CONFIG_BOOLEAN_AUTO && (ipext_InBcastOctets || ipext_OutBcastOctets))) { - do_bcast = CONFIG_BOOLEAN_YES; - - static RRDSET *st_ipv4_bcast = NULL; - static RRDDIM *rd_in = NULL, *rd_out = NULL; - - if(unlikely(!st_ipv4_bcast)) { - st_ipv4_bcast = rrdset_create_localhost( - "ipv4" - , "bcast" - , NULL - , "broadcast" - , NULL - , "IPv4 Broadcast Bandwidth" - , "kilobits/s" - , "proc" - , "net/netstat" - , 8000 - , update_every - , RRDSET_TYPE_AREA - ); - - rrdset_flag_set(st_ipv4_bcast, RRDSET_FLAG_DETAIL); - - rd_in = rrddim_add(st_ipv4_bcast, "InBcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - rd_out = rrddim_add(st_ipv4_bcast, "OutBcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_ipv4_bcast); - - rrddim_set_by_pointer(st_ipv4_bcast, rd_in, ipext_InBcastOctets); - rrddim_set_by_pointer(st_ipv4_bcast, rd_out, ipext_OutBcastOctets); - - rrdset_done(st_ipv4_bcast); - } - - // -------------------------------------------------------------------- - - if(do_mcast_p == CONFIG_BOOLEAN_YES || (do_mcast_p == CONFIG_BOOLEAN_AUTO && (ipext_InMcastPkts || ipext_OutMcastPkts))) { - do_mcast_p = CONFIG_BOOLEAN_YES; - - static RRDSET *st_ipv4_mcastpkts = NULL; - static RRDDIM *rd_in = NULL, *rd_out = NULL; - - if(unlikely(!st_ipv4_mcastpkts)) { - st_ipv4_mcastpkts = rrdset_create_localhost( - "ipv4" - , "mcastpkts" - , NULL - , "multicast" - , NULL - , "IPv4 Multicast Packets" - , "packets/s" - , "proc" - , "net/netstat" - , 8600 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st_ipv4_mcastpkts, RRDSET_FLAG_DETAIL); - - rd_in = rrddim_add(st_ipv4_mcastpkts, "InMcastPkts", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out = rrddim_add(st_ipv4_mcastpkts, "OutMcastPkts", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st_ipv4_mcastpkts); - - rrddim_set_by_pointer(st_ipv4_mcastpkts, rd_in, ipext_InMcastPkts); - rrddim_set_by_pointer(st_ipv4_mcastpkts, rd_out, ipext_OutMcastPkts); - - rrdset_done(st_ipv4_mcastpkts); - } - - // -------------------------------------------------------------------- - - if(do_bcast_p == CONFIG_BOOLEAN_YES || (do_bcast_p == CONFIG_BOOLEAN_AUTO && (ipext_InBcastPkts || ipext_OutBcastPkts))) { - do_bcast_p = CONFIG_BOOLEAN_YES; - - static RRDSET *st_ipv4_bcastpkts = NULL; - static RRDDIM *rd_in = NULL, *rd_out = NULL; - - if(unlikely(!st_ipv4_bcastpkts)) { - st_ipv4_bcastpkts = rrdset_create_localhost( - "ipv4" - , "bcastpkts" - , NULL - , "broadcast" - , NULL - , "IPv4 Broadcast Packets" - , "packets/s" - , "proc" - , "net/netstat" - , 8500 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st_ipv4_bcastpkts, RRDSET_FLAG_DETAIL); - - rd_in = rrddim_add(st_ipv4_bcastpkts, "InBcastPkts", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out = rrddim_add(st_ipv4_bcastpkts, "OutBcastPkts", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_ipv4_bcastpkts); - - rrddim_set_by_pointer(st_ipv4_bcastpkts, rd_in, ipext_InBcastPkts); - rrddim_set_by_pointer(st_ipv4_bcastpkts, rd_out, ipext_OutBcastPkts); - - rrdset_done(st_ipv4_bcastpkts); - } - - // -------------------------------------------------------------------- - - if(do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && (ipext_InCEPkts || ipext_InECT0Pkts || ipext_InECT1Pkts || ipext_InNoECTPkts))) { - do_ecn = CONFIG_BOOLEAN_YES; - - static RRDSET *st_ecnpkts = NULL; - static RRDDIM *rd_cep = NULL, *rd_noectp = NULL, *rd_ectp0 = NULL, *rd_ectp1 = NULL; - - if(unlikely(!st_ecnpkts)) { - st_ecnpkts = rrdset_create_localhost( - "ipv4" - , "ecnpkts" - , NULL - , "ecn" - , NULL - , "IPv4 ECN Statistics" - , "packets/s" - , "proc" - , "net/netstat" - , 8700 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st_ecnpkts, RRDSET_FLAG_DETAIL); - - rd_cep = rrddim_add(st_ecnpkts, "InCEPkts", "CEP", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_noectp = rrddim_add(st_ecnpkts, "InNoECTPkts", "NoECTP", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_ectp0 = rrddim_add(st_ecnpkts, "InECT0Pkts", "ECTP0", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_ectp1 = rrddim_add(st_ecnpkts, "InECT1Pkts", "ECTP1", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st_ecnpkts); - - rrddim_set_by_pointer(st_ecnpkts, rd_cep, ipext_InCEPkts); - rrddim_set_by_pointer(st_ecnpkts, rd_noectp, ipext_InNoECTPkts); - rrddim_set_by_pointer(st_ecnpkts, rd_ectp0, ipext_InECT0Pkts); - rrddim_set_by_pointer(st_ecnpkts, rd_ectp1, ipext_InECT1Pkts); - - rrdset_done(st_ecnpkts); - } - } - else if(unlikely(hash == hash_tcpext && strcmp(key, "TcpExt") == 0)) { - size_t h = l++; - - words = procfile_linewords(ff, l); - if(unlikely(words < 2)) { - error("Cannot read /proc/net/netstat TcpExt line. Expected 2+ params, read %zu.", words); - continue; - } - - parse_line_pair(ff, arl_tcpext, h, l); - - // -------------------------------------------------------------------- - - if(do_tcpext_memory == CONFIG_BOOLEAN_YES || (do_tcpext_memory == CONFIG_BOOLEAN_AUTO && (tcpext_TCPMemoryPressures))) { - do_tcpext_memory = CONFIG_BOOLEAN_YES; - - static RRDSET *st_tcpmemorypressures = NULL; - static RRDDIM *rd_pressures = NULL; - - if(unlikely(!st_tcpmemorypressures)) { - st_tcpmemorypressures = rrdset_create_localhost( - "ipv4" - , "tcpmemorypressures" - , NULL - , "tcp" - , NULL - , "TCP Memory Pressures" - , "events/s" - , "proc" - , "net/netstat" - , 3000 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_pressures = rrddim_add(st_tcpmemorypressures, "TCPMemoryPressures", "pressures", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_tcpmemorypressures); - - rrddim_set_by_pointer(st_tcpmemorypressures, rd_pressures, tcpext_TCPMemoryPressures); - - rrdset_done(st_tcpmemorypressures); - } - - // -------------------------------------------------------------------- - - if(do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && (tcpext_TCPAbortOnData || tcpext_TCPAbortOnClose || tcpext_TCPAbortOnMemory || tcpext_TCPAbortOnTimeout || tcpext_TCPAbortOnLinger || tcpext_TCPAbortFailed))) { - do_tcpext_connaborts = CONFIG_BOOLEAN_YES; - - static RRDSET *st_tcpconnaborts = NULL; - static RRDDIM *rd_baddata = NULL, *rd_userclosed = NULL, *rd_nomemory = NULL, *rd_timeout = NULL, *rd_linger = NULL, *rd_failed = NULL; - - if(unlikely(!st_tcpconnaborts)) { - st_tcpconnaborts = rrdset_create_localhost( - "ipv4" - , "tcpconnaborts" - , NULL - , "tcp" - , NULL - , "TCP Connection Aborts" - , "connections/s" - , "proc" - , "net/netstat" - , 3010 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_baddata = rrddim_add(st_tcpconnaborts, "TCPAbortOnData", "baddata", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_userclosed = rrddim_add(st_tcpconnaborts, "TCPAbortOnClose", "userclosed", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_nomemory = rrddim_add(st_tcpconnaborts, "TCPAbortOnMemory", "nomemory", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_timeout = rrddim_add(st_tcpconnaborts, "TCPAbortOnTimeout", "timeout", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_linger = rrddim_add(st_tcpconnaborts, "TCPAbortOnLinger", "linger", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_failed = rrddim_add(st_tcpconnaborts, "TCPAbortFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_tcpconnaborts); - - rrddim_set_by_pointer(st_tcpconnaborts, rd_baddata, tcpext_TCPAbortOnData); - rrddim_set_by_pointer(st_tcpconnaborts, rd_userclosed, tcpext_TCPAbortOnClose); - rrddim_set_by_pointer(st_tcpconnaborts, rd_nomemory, tcpext_TCPAbortOnMemory); - rrddim_set_by_pointer(st_tcpconnaborts, rd_timeout, tcpext_TCPAbortOnTimeout); - rrddim_set_by_pointer(st_tcpconnaborts, rd_linger, tcpext_TCPAbortOnLinger); - rrddim_set_by_pointer(st_tcpconnaborts, rd_failed, tcpext_TCPAbortFailed); - - rrdset_done(st_tcpconnaborts); - } - - // -------------------------------------------------------------------- - - if(do_tcpext_reorder == CONFIG_BOOLEAN_YES || (do_tcpext_reorder == CONFIG_BOOLEAN_AUTO && (tcpext_TCPRenoReorder || tcpext_TCPFACKReorder || tcpext_TCPSACKReorder || tcpext_TCPTSReorder))) { - do_tcpext_reorder = CONFIG_BOOLEAN_YES; - - static RRDSET *st_tcpreorders = NULL; - static RRDDIM *rd_timestamp = NULL, *rd_sack = NULL, *rd_fack = NULL, *rd_reno = NULL; - - if(unlikely(!st_tcpreorders)) { - st_tcpreorders = rrdset_create_localhost( - "ipv4" - , "tcpreorders" - , NULL - , "tcp" - , NULL - , "TCP Reordered Packets by Detection Method" - , "packets/s" - , "proc" - , "net/netstat" - , 3020 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_timestamp = rrddim_add(st_tcpreorders, "TCPTSReorder", "timestamp", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_sack = rrddim_add(st_tcpreorders, "TCPSACKReorder", "sack", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_fack = rrddim_add(st_tcpreorders, "TCPFACKReorder", "fack", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_reno = rrddim_add(st_tcpreorders, "TCPRenoReorder", "reno", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_tcpreorders); - - rrddim_set_by_pointer(st_tcpreorders, rd_timestamp, tcpext_TCPTSReorder); - rrddim_set_by_pointer(st_tcpreorders, rd_sack, tcpext_TCPSACKReorder); - rrddim_set_by_pointer(st_tcpreorders, rd_fack, tcpext_TCPFACKReorder); - rrddim_set_by_pointer(st_tcpreorders, rd_reno, tcpext_TCPRenoReorder); - - rrdset_done(st_tcpreorders); - } - - // -------------------------------------------------------------------- - - if(do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && (tcpext_TCPOFOQueue || tcpext_TCPOFODrop || tcpext_TCPOFOMerge))) { - do_tcpext_ofo = CONFIG_BOOLEAN_YES; - - static RRDSET *st_ipv4_tcpofo = NULL; - static RRDDIM *rd_inqueue = NULL, *rd_dropped = NULL, *rd_merged = NULL, *rd_pruned = NULL; - - if(unlikely(!st_ipv4_tcpofo)) { - - st_ipv4_tcpofo = rrdset_create_localhost( - "ipv4" - , "tcpofo" - , NULL - , "tcp" - , NULL - , "TCP Out-Of-Order Queue" - , "packets/s" - , "proc" - , "net/netstat" - , 3050 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_inqueue = rrddim_add(st_ipv4_tcpofo, "TCPOFOQueue", "inqueue", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_dropped = rrddim_add(st_ipv4_tcpofo, "TCPOFODrop", "dropped", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_merged = rrddim_add(st_ipv4_tcpofo, "TCPOFOMerge", "merged", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_pruned = rrddim_add(st_ipv4_tcpofo, "OfoPruned", "pruned", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_ipv4_tcpofo); - - rrddim_set_by_pointer(st_ipv4_tcpofo, rd_inqueue, tcpext_TCPOFOQueue); - rrddim_set_by_pointer(st_ipv4_tcpofo, rd_dropped, tcpext_TCPOFODrop); - rrddim_set_by_pointer(st_ipv4_tcpofo, rd_merged, tcpext_TCPOFOMerge); - rrddim_set_by_pointer(st_ipv4_tcpofo, rd_pruned, tcpext_OfoPruned); - - rrdset_done(st_ipv4_tcpofo); - } - - // -------------------------------------------------------------------- - - if(do_tcpext_syscookies == CONFIG_BOOLEAN_YES || (do_tcpext_syscookies == CONFIG_BOOLEAN_AUTO && (tcpext_SyncookiesSent || tcpext_SyncookiesRecv || tcpext_SyncookiesFailed))) { - do_tcpext_syscookies = CONFIG_BOOLEAN_YES; - - static RRDSET *st_syncookies = NULL; - static RRDDIM *rd_received = NULL, *rd_sent = NULL, *rd_failed = NULL; - - if(unlikely(!st_syncookies)) { - - st_syncookies = rrdset_create_localhost( - "ipv4" - , "tcpsyncookies" - , NULL - , "tcp" - , NULL - , "TCP SYN Cookies" - , "packets/s" - , "proc" - , "net/netstat" - , 3100 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_received = rrddim_add(st_syncookies, "SyncookiesRecv", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_sent = rrddim_add(st_syncookies, "SyncookiesSent", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_failed = rrddim_add(st_syncookies, "SyncookiesFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_syncookies); - - rrddim_set_by_pointer(st_syncookies, rd_received, tcpext_SyncookiesRecv); - rrddim_set_by_pointer(st_syncookies, rd_sent, tcpext_SyncookiesSent); - rrddim_set_by_pointer(st_syncookies, rd_failed, tcpext_SyncookiesFailed); - - rrdset_done(st_syncookies); - } - - // -------------------------------------------------------------------- - - if(do_tcpext_listen == CONFIG_BOOLEAN_YES || (do_tcpext_listen == CONFIG_BOOLEAN_AUTO && (tcpext_ListenOverflows || tcpext_ListenDrops))) { - do_tcpext_listen = CONFIG_BOOLEAN_YES; - - static RRDSET *st_listen = NULL; - static RRDDIM *rd_overflows = NULL, *rd_drops = NULL; - - if(unlikely(!st_listen)) { - - st_listen = rrdset_create_localhost( - "ipv4" - , "tcplistenissues" - , NULL - , "tcp" - , NULL - , "TCP Listen Socket Issues" - , "packets/s" - , "proc" - , "net/netstat" - , 3015 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_overflows = rrddim_add(st_listen, "ListenOverflows", "overflows", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_drops = rrddim_add(st_listen, "ListenDrops", "drops", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_listen); - - rrddim_set_by_pointer(st_listen, rd_overflows, tcpext_ListenOverflows); - rrddim_set_by_pointer(st_listen, rd_drops, tcpext_ListenDrops); - - rrdset_done(st_listen); - } - } - } - - return 0; -} diff --git a/src/proc_net_rpc_nfs.c b/src/proc_net_rpc_nfs.c deleted file mode 100644 index a4c778cba..000000000 --- a/src/proc_net_rpc_nfs.c +++ /dev/null @@ -1,449 +0,0 @@ -#include "common.h" - -struct nfs_procs { - char name[30]; - unsigned long long value; - int present; - RRDDIM *rd; -}; - -struct nfs_procs nfs_proc2_values[] = { - { "null" , 0ULL, 0, NULL} - , {"getattr" , 0ULL, 0, NULL} - , {"setattr" , 0ULL, 0, NULL} - , {"root" , 0ULL, 0, NULL} - , {"lookup" , 0ULL, 0, NULL} - , {"readlink", 0ULL, 0, NULL} - , {"read" , 0ULL, 0, NULL} - , {"wrcache" , 0ULL, 0, NULL} - , {"write" , 0ULL, 0, NULL} - , {"create" , 0ULL, 0, NULL} - , {"remove" , 0ULL, 0, NULL} - , {"rename" , 0ULL, 0, NULL} - , {"link" , 0ULL, 0, NULL} - , {"symlink" , 0ULL, 0, NULL} - , {"mkdir" , 0ULL, 0, NULL} - , {"rmdir" , 0ULL, 0, NULL} - , {"readdir" , 0ULL, 0, NULL} - , {"fsstat" , 0ULL, 0, NULL} - , - - /* termination */ - { "" , 0ULL, 0, NULL} -}; - -struct nfs_procs nfs_proc3_values[] = { - { "null" , 0ULL, 0, NULL} - , {"getattr" , 0ULL, 0, NULL} - , {"setattr" , 0ULL, 0, NULL} - , {"lookup" , 0ULL, 0, NULL} - , {"access" , 0ULL, 0, NULL} - , {"readlink" , 0ULL, 0, NULL} - , {"read" , 0ULL, 0, NULL} - , {"write" , 0ULL, 0, NULL} - , {"create" , 0ULL, 0, NULL} - , {"mkdir" , 0ULL, 0, NULL} - , {"symlink" , 0ULL, 0, NULL} - , {"mknod" , 0ULL, 0, NULL} - , {"remove" , 0ULL, 0, NULL} - , {"rmdir" , 0ULL, 0, NULL} - , {"rename" , 0ULL, 0, NULL} - , {"link" , 0ULL, 0, NULL} - , {"readdir" , 0ULL, 0, NULL} - , {"readdirplus", 0ULL, 0, NULL} - , {"fsstat" , 0ULL, 0, NULL} - , {"fsinfo" , 0ULL, 0, NULL} - , {"pathconf" , 0ULL, 0, NULL} - , {"commit" , 0ULL, 0, NULL} - , - - /* termination */ - { "" , 0ULL, 0, NULL} -}; - -struct nfs_procs nfs_proc4_values[] = { - { "null" , 0ULL, 0, NULL} - , {"read" , 0ULL, 0, NULL} - , {"write" , 0ULL, 0, NULL} - , {"commit" , 0ULL, 0, NULL} - , {"open" , 0ULL, 0, NULL} - , {"open_conf" , 0ULL, 0, NULL} - , {"open_noat" , 0ULL, 0, NULL} - , {"open_dgrd" , 0ULL, 0, NULL} - , {"close" , 0ULL, 0, NULL} - , {"setattr" , 0ULL, 0, NULL} - , {"fsinfo" , 0ULL, 0, NULL} - , {"renew" , 0ULL, 0, NULL} - , {"setclntid" , 0ULL, 0, NULL} - , {"confirm" , 0ULL, 0, NULL} - , {"lock" , 0ULL, 0, NULL} - , {"lockt" , 0ULL, 0, NULL} - , {"locku" , 0ULL, 0, NULL} - , {"access" , 0ULL, 0, NULL} - , {"getattr" , 0ULL, 0, NULL} - , {"lookup" , 0ULL, 0, NULL} - , {"lookup_root" , 0ULL, 0, NULL} - , {"remove" , 0ULL, 0, NULL} - , {"rename" , 0ULL, 0, NULL} - , {"link" , 0ULL, 0, NULL} - , {"symlink" , 0ULL, 0, NULL} - , {"create" , 0ULL, 0, NULL} - , {"pathconf" , 0ULL, 0, NULL} - , {"statfs" , 0ULL, 0, NULL} - , {"readlink" , 0ULL, 0, NULL} - , {"readdir" , 0ULL, 0, NULL} - , {"server_caps" , 0ULL, 0, NULL} - , {"delegreturn" , 0ULL, 0, NULL} - , {"getacl" , 0ULL, 0, NULL} - , {"setacl" , 0ULL, 0, NULL} - , {"fs_locations" , 0ULL, 0, NULL} - , {"rel_lkowner" , 0ULL, 0, NULL} - , {"secinfo" , 0ULL, 0, NULL} - , {"fsid_present" , 0ULL, 0, NULL} - , - - /* nfsv4.1 client ops */ - { "exchange_id" , 0ULL, 0, NULL} - , {"create_session" , 0ULL, 0, NULL} - , {"destroy_session" , 0ULL, 0, NULL} - , {"sequence" , 0ULL, 0, NULL} - , {"get_lease_time" , 0ULL, 0, NULL} - , {"reclaim_comp" , 0ULL, 0, NULL} - , {"layoutget" , 0ULL, 0, NULL} - , {"getdevinfo" , 0ULL, 0, NULL} - , {"layoutcommit" , 0ULL, 0, NULL} - , {"layoutreturn" , 0ULL, 0, NULL} - , {"secinfo_no" , 0ULL, 0, NULL} - , {"test_stateid" , 0ULL, 0, NULL} - , {"free_stateid" , 0ULL, 0, NULL} - , {"getdevicelist" , 0ULL, 0, NULL} - , {"bind_conn_to_ses", 0ULL, 0, NULL} - , {"destroy_clientid", 0ULL, 0, NULL} - , - - /* nfsv4.2 client ops */ - { "seek" , 0ULL, 0, NULL} - , {"allocate" , 0ULL, 0, NULL} - , {"deallocate" , 0ULL, 0, NULL} - , {"layoutstats" , 0ULL, 0, NULL} - , {"clone" , 0ULL, 0, NULL} - , - - /* termination */ - { "" , 0ULL, 0, NULL} -}; - -int do_proc_net_rpc_nfs(int update_every, usec_t dt) { - (void)dt; - - static procfile *ff = NULL; - static int do_net = -1, do_rpc = -1, do_proc2 = -1, do_proc3 = -1, do_proc4 = -1; - static int proc2_warning = 0, proc3_warning = 0, proc4_warning = 0; - - if(!ff) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/rpc/nfs"); - ff = procfile_open(config_get("plugin:proc:/proc/net/rpc/nfs", "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT); - } - if(!ff) return 1; - - ff = procfile_readall(ff); - if(!ff) return 0; // we return 0, so that we will retry to open it next time - - if(do_net == -1) do_net = config_get_boolean("plugin:proc:/proc/net/rpc/nfs", "network", 1); - if(do_rpc == -1) do_rpc = config_get_boolean("plugin:proc:/proc/net/rpc/nfs", "rpc", 1); - if(do_proc2 == -1) do_proc2 = config_get_boolean("plugin:proc:/proc/net/rpc/nfs", "NFS v2 procedures", 1); - if(do_proc3 == -1) do_proc3 = config_get_boolean("plugin:proc:/proc/net/rpc/nfs", "NFS v3 procedures", 1); - if(do_proc4 == -1) do_proc4 = config_get_boolean("plugin:proc:/proc/net/rpc/nfs", "NFS v4 procedures", 1); - - // if they are enabled, reset them to 1 - // later we do them =2 to avoid doing strcmp() for all lines - if(do_net) do_net = 1; - if(do_rpc) do_rpc = 1; - if(do_proc2) do_proc2 = 1; - if(do_proc3) do_proc3 = 1; - if(do_proc4) do_proc4 = 1; - - size_t lines = procfile_lines(ff), l; - - char *type; - unsigned long long net_count = 0, net_udp_count = 0, net_tcp_count = 0, net_tcp_connections = 0; - unsigned long long rpc_calls = 0, rpc_retransmits = 0, rpc_auth_refresh = 0; - - for(l = 0; l < lines ;l++) { - size_t words = procfile_linewords(ff, l); - if(!words) continue; - - type = procfile_lineword(ff, l, 0); - - if(do_net == 1 && strcmp(type, "net") == 0) { - if(words < 5) { - error("%s line of /proc/net/rpc/nfs has %zu words, expected %d", type, words, 5); - continue; - } - - net_count = str2ull(procfile_lineword(ff, l, 1)); - net_udp_count = str2ull(procfile_lineword(ff, l, 2)); - net_tcp_count = str2ull(procfile_lineword(ff, l, 3)); - net_tcp_connections = str2ull(procfile_lineword(ff, l, 4)); - - unsigned long long sum = net_count + net_udp_count + net_tcp_count + net_tcp_connections; - if(sum == 0ULL) do_net = -1; - else do_net = 2; - } - else if(do_rpc == 1 && strcmp(type, "rpc") == 0) { - if(words < 4) { - error("%s line of /proc/net/rpc/nfs has %zu words, expected %d", type, words, 6); - continue; - } - - rpc_calls = str2ull(procfile_lineword(ff, l, 1)); - rpc_retransmits = str2ull(procfile_lineword(ff, l, 2)); - rpc_auth_refresh = str2ull(procfile_lineword(ff, l, 3)); - - unsigned long long sum = rpc_calls + rpc_retransmits + rpc_auth_refresh; - if(sum == 0ULL) do_rpc = -1; - else do_rpc = 2; - } - else if(do_proc2 == 1 && strcmp(type, "proc2") == 0) { - // the first number is the count of numbers present - // so we start for word 2 - - unsigned long long sum = 0; - unsigned int i, j; - for(i = 0, j = 2; j < words && nfs_proc2_values[i].name[0] ; i++, j++) { - nfs_proc2_values[i].value = str2ull(procfile_lineword(ff, l, j)); - nfs_proc2_values[i].present = 1; - sum += nfs_proc2_values[i].value; - } - - if(sum == 0ULL) { - if(!proc2_warning) { - error("Disabling /proc/net/rpc/nfs v2 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it."); - proc2_warning = 1; - } - do_proc2 = 0; - } - else do_proc2 = 2; - } - else if(do_proc3 == 1 && strcmp(type, "proc3") == 0) { - // the first number is the count of numbers present - // so we start for word 2 - - unsigned long long sum = 0; - unsigned int i, j; - for(i = 0, j = 2; j < words && nfs_proc3_values[i].name[0] ; i++, j++) { - nfs_proc3_values[i].value = str2ull(procfile_lineword(ff, l, j)); - nfs_proc3_values[i].present = 1; - sum += nfs_proc3_values[i].value; - } - - if(sum == 0ULL) { - if(!proc3_warning) { - info("Disabling /proc/net/rpc/nfs v3 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it."); - proc3_warning = 1; - } - do_proc3 = 0; - } - else do_proc3 = 2; - } - else if(do_proc4 == 1 && strcmp(type, "proc4") == 0) { - // the first number is the count of numbers present - // so we start for word 2 - - unsigned long long sum = 0; - unsigned int i, j; - for(i = 0, j = 2; j < words && nfs_proc4_values[i].name[0] ; i++, j++) { - nfs_proc4_values[i].value = str2ull(procfile_lineword(ff, l, j)); - nfs_proc4_values[i].present = 1; - sum += nfs_proc4_values[i].value; - } - - if(sum == 0ULL) { - if(!proc4_warning) { - info("Disabling /proc/net/rpc/nfs v4 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it."); - proc4_warning = 1; - } - do_proc4 = 0; - } - else do_proc4 = 2; - } - } - - // -------------------------------------------------------------------- - - if(do_net == 2) { - static RRDSET *st = NULL; - static RRDDIM *rd_udp = NULL, - *rd_tcp = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "nfs" - , "net" - , NULL - , "network" - , NULL - , "NFS Client Network" - , "operations/s" - , "proc" - , "net/rpc/nfs" - , 2207 - , update_every - , RRDSET_TYPE_STACKED - ); - - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_udp = rrddim_add(st, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_tcp = rrddim_add(st, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - // ignore net_count, net_tcp_connections - (void)net_count; - (void)net_tcp_connections; - - rrddim_set_by_pointer(st, rd_udp, net_udp_count); - rrddim_set_by_pointer(st, rd_tcp, net_tcp_count); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_rpc == 2) { - static RRDSET *st = NULL; - static RRDDIM *rd_calls = NULL, - *rd_retransmits = NULL, - *rd_auth_refresh = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "nfs" - , "rpc" - , NULL - , "rpc" - , NULL - , "NFS Client Remote Procedure Calls Statistics" - , "calls/s" - , "proc" - , "net/rpc/nfs" - , 2208 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_calls = rrddim_add(st, "calls", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_retransmits = rrddim_add(st, "retransmits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_auth_refresh = rrddim_add(st, "auth_refresh", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_calls, rpc_calls); - rrddim_set_by_pointer(st, rd_retransmits, rpc_retransmits); - rrddim_set_by_pointer(st, rd_auth_refresh, rpc_auth_refresh); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_proc2 == 2) { - static RRDSET *st = NULL; - if(unlikely(!st)) { - st = rrdset_create_localhost( - "nfs" - , "proc2" - , NULL - , "nfsv2rpc" - , NULL - , "NFS v2 Client Remote Procedure Calls" - , "calls/s" - , "proc" - , "net/rpc/nfs" - , 2209 - , update_every - , RRDSET_TYPE_STACKED - ); - } - else rrdset_next(st); - - size_t i; - for(i = 0; nfs_proc2_values[i].present ; i++) { - if(unlikely(!nfs_proc2_values[i].rd)) - nfs_proc2_values[i].rd = rrddim_add(st, nfs_proc2_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st, nfs_proc2_values[i].rd, nfs_proc2_values[i].value); - } - - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_proc3 == 2) { - static RRDSET *st = NULL; - if(unlikely(!st)) { - st = rrdset_create_localhost( - "nfs" - , "proc3" - , NULL - , "nfsv3rpc" - , NULL - , "NFS v3 Client Remote Procedure Calls" - , "calls/s" - , "proc" - , "net/rpc/nfs" - , 2210 - , update_every - , RRDSET_TYPE_STACKED - ); - } - else rrdset_next(st); - - size_t i; - for(i = 0; nfs_proc3_values[i].present ; i++) { - if(unlikely(!nfs_proc3_values[i].rd)) - nfs_proc3_values[i].rd = rrddim_add(st, nfs_proc3_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st, nfs_proc3_values[i].rd, nfs_proc3_values[i].value); - } - - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_proc4 == 2) { - static RRDSET *st = NULL; - if(unlikely(!st)) { - st = rrdset_create_localhost( - "nfs" - , "proc4" - , NULL - , "nfsv4rpc" - , NULL - , "NFS v4 Client Remote Procedure Calls" - , "calls/s" - , "proc" - , "net/rpc/nfs" - , 2211 - , update_every - , RRDSET_TYPE_STACKED - ); - } - else rrdset_next(st); - - size_t i; - for(i = 0; nfs_proc4_values[i].present ; i++) { - if(unlikely(!nfs_proc4_values[i].rd)) - nfs_proc4_values[i].rd = rrddim_add(st, nfs_proc4_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st, nfs_proc4_values[i].rd, nfs_proc4_values[i].value); - } - - rrdset_done(st); - } - - return 0; -} diff --git a/src/proc_net_rpc_nfsd.c b/src/proc_net_rpc_nfsd.c deleted file mode 100644 index 8aca31aed..000000000 --- a/src/proc_net_rpc_nfsd.c +++ /dev/null @@ -1,1002 +0,0 @@ -#include "common.h" - -struct nfsd_procs { - char name[30]; - unsigned long long value; - int present; - RRDDIM *rd; -}; - -struct nfsd_procs nfsd_proc2_values[] = { - { "null" , 0ULL, 0, NULL} - , {"getattr" , 0ULL, 0, NULL} - , {"setattr" , 0ULL, 0, NULL} - , {"root" , 0ULL, 0, NULL} - , {"lookup" , 0ULL, 0, NULL} - , {"readlink", 0ULL, 0, NULL} - , {"read" , 0ULL, 0, NULL} - , {"wrcache" , 0ULL, 0, NULL} - , {"write" , 0ULL, 0, NULL} - , {"create" , 0ULL, 0, NULL} - , {"remove" , 0ULL, 0, NULL} - , {"rename" , 0ULL, 0, NULL} - , {"link" , 0ULL, 0, NULL} - , {"symlink" , 0ULL, 0, NULL} - , {"mkdir" , 0ULL, 0, NULL} - , {"rmdir" , 0ULL, 0, NULL} - , {"readdir" , 0ULL, 0, NULL} - , {"fsstat" , 0ULL, 0, NULL} - , - - /* termination */ - { "" , 0ULL, 0, NULL} -}; - -struct nfsd_procs nfsd_proc3_values[] = { - { "null" , 0ULL, 0, NULL} - , {"getattr" , 0ULL, 0, NULL} - , {"setattr" , 0ULL, 0, NULL} - , {"lookup" , 0ULL, 0, NULL} - , {"access" , 0ULL, 0, NULL} - , {"readlink" , 0ULL, 0, NULL} - , {"read" , 0ULL, 0, NULL} - , {"write" , 0ULL, 0, NULL} - , {"create" , 0ULL, 0, NULL} - , {"mkdir" , 0ULL, 0, NULL} - , {"symlink" , 0ULL, 0, NULL} - , {"mknod" , 0ULL, 0, NULL} - , {"remove" , 0ULL, 0, NULL} - , {"rmdir" , 0ULL, 0, NULL} - , {"rename" , 0ULL, 0, NULL} - , {"link" , 0ULL, 0, NULL} - , {"readdir" , 0ULL, 0, NULL} - , {"readdirplus", 0ULL, 0, NULL} - , {"fsstat" , 0ULL, 0, NULL} - , {"fsinfo" , 0ULL, 0, NULL} - , {"pathconf" , 0ULL, 0, NULL} - , {"commit" , 0ULL, 0, NULL} - , - - /* termination */ - { "" , 0ULL, 0, NULL} -}; - -struct nfsd_procs nfsd_proc4_values[] = { - { "null" , 0ULL, 0, NULL} - , {"read" , 0ULL, 0, NULL} - , {"write" , 0ULL, 0, NULL} - , {"commit" , 0ULL, 0, NULL} - , {"open" , 0ULL, 0, NULL} - , {"open_conf" , 0ULL, 0, NULL} - , {"open_noat" , 0ULL, 0, NULL} - , {"open_dgrd" , 0ULL, 0, NULL} - , {"close" , 0ULL, 0, NULL} - , {"setattr" , 0ULL, 0, NULL} - , {"fsinfo" , 0ULL, 0, NULL} - , {"renew" , 0ULL, 0, NULL} - , {"setclntid" , 0ULL, 0, NULL} - , {"confirm" , 0ULL, 0, NULL} - , {"lock" , 0ULL, 0, NULL} - , {"lockt" , 0ULL, 0, NULL} - , {"locku" , 0ULL, 0, NULL} - , {"access" , 0ULL, 0, NULL} - , {"getattr" , 0ULL, 0, NULL} - , {"lookup" , 0ULL, 0, NULL} - , {"lookup_root" , 0ULL, 0, NULL} - , {"remove" , 0ULL, 0, NULL} - , {"rename" , 0ULL, 0, NULL} - , {"link" , 0ULL, 0, NULL} - , {"symlink" , 0ULL, 0, NULL} - , {"create" , 0ULL, 0, NULL} - , {"pathconf" , 0ULL, 0, NULL} - , {"statfs" , 0ULL, 0, NULL} - , {"readlink" , 0ULL, 0, NULL} - , {"readdir" , 0ULL, 0, NULL} - , {"server_caps" , 0ULL, 0, NULL} - , {"delegreturn" , 0ULL, 0, NULL} - , {"getacl" , 0ULL, 0, NULL} - , {"setacl" , 0ULL, 0, NULL} - , {"fs_locations" , 0ULL, 0, NULL} - , {"rel_lkowner" , 0ULL, 0, NULL} - , {"secinfo" , 0ULL, 0, NULL} - , {"fsid_present" , 0ULL, 0, NULL} - , - - /* nfsv4.1 client ops */ - { "exchange_id" , 0ULL, 0, NULL} - , {"create_session" , 0ULL, 0, NULL} - , {"destroy_session" , 0ULL, 0, NULL} - , {"sequence" , 0ULL, 0, NULL} - , {"get_lease_time" , 0ULL, 0, NULL} - , {"reclaim_comp" , 0ULL, 0, NULL} - , {"layoutget" , 0ULL, 0, NULL} - , {"getdevinfo" , 0ULL, 0, NULL} - , {"layoutcommit" , 0ULL, 0, NULL} - , {"layoutreturn" , 0ULL, 0, NULL} - , {"secinfo_no" , 0ULL, 0, NULL} - , {"test_stateid" , 0ULL, 0, NULL} - , {"free_stateid" , 0ULL, 0, NULL} - , {"getdevicelist" , 0ULL, 0, NULL} - , {"bind_conn_to_ses", 0ULL, 0, NULL} - , {"destroy_clientid", 0ULL, 0, NULL} - , - - /* nfsv4.2 client ops */ - { "seek" , 0ULL, 0, NULL} - , {"allocate" , 0ULL, 0, NULL} - , {"deallocate" , 0ULL, 0, NULL} - , {"layoutstats" , 0ULL, 0, NULL} - , {"clone" , 0ULL, 0, NULL} - , - - /* termination */ - { "" , 0ULL, 0, NULL} -}; - -struct nfsd_procs nfsd4_ops_values[] = { - { "unused_op0" , 0ULL, 0, NULL} - , {"unused_op1" , 0ULL, 0, NULL} - , {"future_op2" , 0ULL, 0, NULL} - , {"access" , 0ULL, 0, NULL} - , {"close" , 0ULL, 0, NULL} - , {"commit" , 0ULL, 0, NULL} - , {"create" , 0ULL, 0, NULL} - , {"delegpurge" , 0ULL, 0, NULL} - , {"delegreturn" , 0ULL, 0, NULL} - , {"getattr" , 0ULL, 0, NULL} - , {"getfh" , 0ULL, 0, NULL} - , {"link" , 0ULL, 0, NULL} - , {"lock" , 0ULL, 0, NULL} - , {"lockt" , 0ULL, 0, NULL} - , {"locku" , 0ULL, 0, NULL} - , {"lookup" , 0ULL, 0, NULL} - , {"lookup_root" , 0ULL, 0, NULL} - , {"nverify" , 0ULL, 0, NULL} - , {"open" , 0ULL, 0, NULL} - , {"openattr" , 0ULL, 0, NULL} - , {"open_confirm" , 0ULL, 0, NULL} - , {"open_downgrade" , 0ULL, 0, NULL} - , {"putfh" , 0ULL, 0, NULL} - , {"putpubfh" , 0ULL, 0, NULL} - , {"putrootfh" , 0ULL, 0, NULL} - , {"read" , 0ULL, 0, NULL} - , {"readdir" , 0ULL, 0, NULL} - , {"readlink" , 0ULL, 0, NULL} - , {"remove" , 0ULL, 0, NULL} - , {"rename" , 0ULL, 0, NULL} - , {"renew" , 0ULL, 0, NULL} - , {"restorefh" , 0ULL, 0, NULL} - , {"savefh" , 0ULL, 0, NULL} - , {"secinfo" , 0ULL, 0, NULL} - , {"setattr" , 0ULL, 0, NULL} - , {"setclientid" , 0ULL, 0, NULL} - , {"setclientid_confirm" , 0ULL, 0, NULL} - , {"verify" , 0ULL, 0, NULL} - , {"write" , 0ULL, 0, NULL} - , {"release_lockowner" , 0ULL, 0, NULL} - , - - /* nfs41 */ - { "backchannel_ctl" , 0ULL, 0, NULL} - , {"bind_conn_to_session", 0ULL, 0, NULL} - , {"exchange_id" , 0ULL, 0, NULL} - , {"create_session" , 0ULL, 0, NULL} - , {"destroy_session" , 0ULL, 0, NULL} - , {"free_stateid" , 0ULL, 0, NULL} - , {"get_dir_delegation" , 0ULL, 0, NULL} - , {"getdeviceinfo" , 0ULL, 0, NULL} - , {"getdevicelist" , 0ULL, 0, NULL} - , {"layoutcommit" , 0ULL, 0, NULL} - , {"layoutget" , 0ULL, 0, NULL} - , {"layoutreturn" , 0ULL, 0, NULL} - , {"secinfo_no_name" , 0ULL, 0, NULL} - , {"sequence" , 0ULL, 0, NULL} - , {"set_ssv" , 0ULL, 0, NULL} - , {"test_stateid" , 0ULL, 0, NULL} - , {"want_delegation" , 0ULL, 0, NULL} - , {"destroy_clientid" , 0ULL, 0, NULL} - , {"reclaim_complete" , 0ULL, 0, NULL} - , - - /* nfs42 */ - { "allocate" , 0ULL, 0, NULL} - , {"copy" , 0ULL, 0, NULL} - , {"copy_notify" , 0ULL, 0, NULL} - , {"deallocate" , 0ULL, 0, NULL} - , {"ioadvise" , 0ULL, 0, NULL} - , {"layouterror" , 0ULL, 0, NULL} - , {"layoutstats" , 0ULL, 0, NULL} - , {"offload_cancel" , 0ULL, 0, NULL} - , {"offload_status" , 0ULL, 0, NULL} - , {"read_plus" , 0ULL, 0, NULL} - , {"seek" , 0ULL, 0, NULL} - , {"write_same" , 0ULL, 0, NULL} - , - - /* termination */ - { "" , 0ULL, 0, NULL} -}; - - -int do_proc_net_rpc_nfsd(int update_every, usec_t dt) { - (void)dt; - static procfile *ff = NULL; - static int do_rc = -1, do_fh = -1, do_io = -1, do_th = -1, do_ra = -1, do_net = -1, do_rpc = -1, do_proc2 = -1, do_proc3 = -1, do_proc4 = -1, do_proc4ops = -1; - static int ra_warning = 0, th_warning = 0, proc2_warning = 0, proc3_warning = 0, proc4_warning = 0, proc4ops_warning = 0; - - if(unlikely(!ff)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/rpc/nfsd"); - ff = procfile_open(config_get("plugin:proc:/proc/net/rpc/nfsd", "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) return 1; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time - - if(unlikely(do_rc == -1)) { - do_rc = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "read cache", 1); - do_fh = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "file handles", 1); - do_io = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "I/O", 1); - do_th = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "threads", 1); - do_ra = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "read ahead", 1); - do_net = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "network", 1); - do_rpc = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "rpc", 1); - do_proc2 = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "NFS v2 procedures", 1); - do_proc3 = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "NFS v3 procedures", 1); - do_proc4 = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "NFS v4 procedures", 1); - do_proc4ops = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "NFS v4 operations", 1); - } - - // if they are enabled, reset them to 1 - // later we do them = 2 to avoid doing strcmp() for all lines - if(do_rc) do_rc = 1; - if(do_fh) do_fh = 1; - if(do_io) do_io = 1; - if(do_th) do_th = 1; - if(do_ra) do_ra = 1; - if(do_net) do_net = 1; - if(do_rpc) do_rpc = 1; - if(do_proc2) do_proc2 = 1; - if(do_proc3) do_proc3 = 1; - if(do_proc4) do_proc4 = 1; - if(do_proc4ops) do_proc4ops = 1; - - size_t lines = procfile_lines(ff), l; - - char *type; - unsigned long long rc_hits = 0, rc_misses = 0, rc_nocache = 0; - unsigned long long fh_stale = 0, fh_total_lookups = 0, fh_anonymous_lookups = 0, fh_dir_not_in_dcache = 0, fh_non_dir_not_in_dcache = 0; - unsigned long long io_read = 0, io_write = 0; - unsigned long long th_threads = 0, th_fullcnt = 0, th_hist10 = 0, th_hist20 = 0, th_hist30 = 0, th_hist40 = 0, th_hist50 = 0, th_hist60 = 0, th_hist70 = 0, th_hist80 = 0, th_hist90 = 0, th_hist100 = 0; - unsigned long long ra_size = 0, ra_hist10 = 0, ra_hist20 = 0, ra_hist30 = 0, ra_hist40 = 0, ra_hist50 = 0, ra_hist60 = 0, ra_hist70 = 0, ra_hist80 = 0, ra_hist90 = 0, ra_hist100 = 0, ra_none = 0; - unsigned long long net_count = 0, net_udp_count = 0, net_tcp_count = 0, net_tcp_connections = 0; - unsigned long long rpc_calls = 0, rpc_bad_format = 0, rpc_bad_auth = 0, rpc_bad_client = 0; - - for(l = 0; l < lines ;l++) { - size_t words = procfile_linewords(ff, l); - if(unlikely(!words)) continue; - - type = procfile_lineword(ff, l, 0); - - if(do_rc == 1 && strcmp(type, "rc") == 0) { - if(unlikely(words < 4)) { - error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 4); - continue; - } - - rc_hits = str2ull(procfile_lineword(ff, l, 1)); - rc_misses = str2ull(procfile_lineword(ff, l, 2)); - rc_nocache = str2ull(procfile_lineword(ff, l, 3)); - - unsigned long long sum = rc_hits + rc_misses + rc_nocache; - if(sum == 0ULL) do_rc = -1; - else do_rc = 2; - } - else if(do_fh == 1 && strcmp(type, "fh") == 0) { - if(unlikely(words < 6)) { - error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 6); - continue; - } - - fh_stale = str2ull(procfile_lineword(ff, l, 1)); - fh_total_lookups = str2ull(procfile_lineword(ff, l, 2)); - fh_anonymous_lookups = str2ull(procfile_lineword(ff, l, 3)); - fh_dir_not_in_dcache = str2ull(procfile_lineword(ff, l, 4)); - fh_non_dir_not_in_dcache = str2ull(procfile_lineword(ff, l, 5)); - - unsigned long long sum = fh_stale + fh_total_lookups + fh_anonymous_lookups + fh_dir_not_in_dcache + fh_non_dir_not_in_dcache; - if(sum == 0ULL) do_fh = -1; - else do_fh = 2; - } - else if(do_io == 1 && strcmp(type, "io") == 0) { - if(unlikely(words < 3)) { - error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 3); - continue; - } - - io_read = str2ull(procfile_lineword(ff, l, 1)); - io_write = str2ull(procfile_lineword(ff, l, 2)); - - unsigned long long sum = io_read + io_write; - if(sum == 0ULL) do_io = -1; - else do_io = 2; - } - else if(do_th == 1 && strcmp(type, "th") == 0) { - if(unlikely(words < 13)) { - error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 13); - continue; - } - - th_threads = str2ull(procfile_lineword(ff, l, 1)); - th_fullcnt = str2ull(procfile_lineword(ff, l, 2)); - th_hist10 = (unsigned long long)(atof(procfile_lineword(ff, l, 3)) * 1000.0); - th_hist20 = (unsigned long long)(atof(procfile_lineword(ff, l, 4)) * 1000.0); - th_hist30 = (unsigned long long)(atof(procfile_lineword(ff, l, 5)) * 1000.0); - th_hist40 = (unsigned long long)(atof(procfile_lineword(ff, l, 6)) * 1000.0); - th_hist50 = (unsigned long long)(atof(procfile_lineword(ff, l, 7)) * 1000.0); - th_hist60 = (unsigned long long)(atof(procfile_lineword(ff, l, 8)) * 1000.0); - th_hist70 = (unsigned long long)(atof(procfile_lineword(ff, l, 9)) * 1000.0); - th_hist80 = (unsigned long long)(atof(procfile_lineword(ff, l, 10)) * 1000.0); - th_hist90 = (unsigned long long)(atof(procfile_lineword(ff, l, 11)) * 1000.0); - th_hist100 = (unsigned long long)(atof(procfile_lineword(ff, l, 12)) * 1000.0); - - // threads histogram has been disabled on recent kernels - // http://permalink.gmane.org/gmane.linux.nfs/24528 - unsigned long long sum = th_hist10 + th_hist20 + th_hist30 + th_hist40 + th_hist50 + th_hist60 + th_hist70 + th_hist80 + th_hist90 + th_hist100; - if(sum == 0ULL) { - if(!th_warning) { - info("Disabling /proc/net/rpc/nfsd threads histogram. It seems unused on this machine. It will be enabled automatically when found with data in it."); - th_warning = 1; - } - do_th = -1; - } - else do_th = 2; - } - else if(do_ra == 1 && strcmp(type, "ra") == 0) { - if(unlikely(words < 13)) { - error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 13); - continue; - } - - ra_size = str2ull(procfile_lineword(ff, l, 1)); - ra_hist10 = str2ull(procfile_lineword(ff, l, 2)); - ra_hist20 = str2ull(procfile_lineword(ff, l, 3)); - ra_hist30 = str2ull(procfile_lineword(ff, l, 4)); - ra_hist40 = str2ull(procfile_lineword(ff, l, 5)); - ra_hist50 = str2ull(procfile_lineword(ff, l, 6)); - ra_hist60 = str2ull(procfile_lineword(ff, l, 7)); - ra_hist70 = str2ull(procfile_lineword(ff, l, 8)); - ra_hist80 = str2ull(procfile_lineword(ff, l, 9)); - ra_hist90 = str2ull(procfile_lineword(ff, l, 10)); - ra_hist100 = str2ull(procfile_lineword(ff, l, 11)); - ra_none = str2ull(procfile_lineword(ff, l, 12)); - - unsigned long long sum = ra_hist10 + ra_hist20 + ra_hist30 + ra_hist40 + ra_hist50 + ra_hist60 + ra_hist70 + ra_hist80 + ra_hist90 + ra_hist100 + ra_none; - if(sum == 0ULL) { - if(!ra_warning) { - info("Disabling /proc/net/rpc/nfsd read ahead histogram. It seems unused on this machine. It will be enabled automatically when found with data in it."); - ra_warning = 1; - } - do_ra = -1; - } - else do_ra = 2; - } - else if(do_net == 1 && strcmp(type, "net") == 0) { - if(unlikely(words < 5)) { - error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 5); - continue; - } - - net_count = str2ull(procfile_lineword(ff, l, 1)); - net_udp_count = str2ull(procfile_lineword(ff, l, 2)); - net_tcp_count = str2ull(procfile_lineword(ff, l, 3)); - net_tcp_connections = str2ull(procfile_lineword(ff, l, 4)); - - unsigned long long sum = net_count + net_udp_count + net_tcp_count + net_tcp_connections; - if(sum == 0ULL) do_net = -1; - else do_net = 2; - } - else if(do_rpc == 1 && strcmp(type, "rpc") == 0) { - if(unlikely(words < 6)) { - error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 6); - continue; - } - - rpc_calls = str2ull(procfile_lineword(ff, l, 1)); - rpc_bad_format = str2ull(procfile_lineword(ff, l, 2)); - rpc_bad_auth = str2ull(procfile_lineword(ff, l, 3)); - rpc_bad_client = str2ull(procfile_lineword(ff, l, 4)); - - unsigned long long sum = rpc_calls + rpc_bad_format + rpc_bad_auth + rpc_bad_client; - if(sum == 0ULL) do_rpc = -1; - else do_rpc = 2; - } - else if(do_proc2 == 1 && strcmp(type, "proc2") == 0) { - // the first number is the count of numbers present - // so we start for word 2 - - unsigned long long sum = 0; - unsigned int i, j; - for(i = 0, j = 2; j < words && nfsd_proc2_values[i].name[0] ; i++, j++) { - nfsd_proc2_values[i].value = str2ull(procfile_lineword(ff, l, j)); - nfsd_proc2_values[i].present = 1; - sum += nfsd_proc2_values[i].value; - } - - if(sum == 0ULL) { - if(!proc2_warning) { - error("Disabling /proc/net/rpc/nfsd v2 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it."); - proc2_warning = 1; - } - do_proc2 = 0; - } - else do_proc2 = 2; - } - else if(do_proc3 == 1 && strcmp(type, "proc3") == 0) { - // the first number is the count of numbers present - // so we start for word 2 - - unsigned long long sum = 0; - unsigned int i, j; - for(i = 0, j = 2; j < words && nfsd_proc3_values[i].name[0] ; i++, j++) { - nfsd_proc3_values[i].value = str2ull(procfile_lineword(ff, l, j)); - nfsd_proc3_values[i].present = 1; - sum += nfsd_proc3_values[i].value; - } - - if(sum == 0ULL) { - if(!proc3_warning) { - info("Disabling /proc/net/rpc/nfsd v3 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it."); - proc3_warning = 1; - } - do_proc3 = 0; - } - else do_proc3 = 2; - } - else if(do_proc4 == 1 && strcmp(type, "proc4") == 0) { - // the first number is the count of numbers present - // so we start for word 2 - - unsigned long long sum = 0; - unsigned int i, j; - for(i = 0, j = 2; j < words && nfsd_proc4_values[i].name[0] ; i++, j++) { - nfsd_proc4_values[i].value = str2ull(procfile_lineword(ff, l, j)); - nfsd_proc4_values[i].present = 1; - sum += nfsd_proc4_values[i].value; - } - - if(sum == 0ULL) { - if(!proc4_warning) { - info("Disabling /proc/net/rpc/nfsd v4 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it."); - proc4_warning = 1; - } - do_proc4 = 0; - } - else do_proc4 = 2; - } - else if(do_proc4ops == 1 && strcmp(type, "proc4ops") == 0) { - // the first number is the count of numbers present - // so we start for word 2 - - unsigned long long sum = 0; - unsigned int i, j; - for(i = 0, j = 2; j < words && nfsd4_ops_values[i].name[0] ; i++, j++) { - nfsd4_ops_values[i].value = str2ull(procfile_lineword(ff, l, j)); - nfsd4_ops_values[i].present = 1; - sum += nfsd4_ops_values[i].value; - } - - if(sum == 0ULL) { - if(!proc4ops_warning) { - info("Disabling /proc/net/rpc/nfsd v4 operations chart. It seems unused on this machine. It will be enabled automatically when found with data in it."); - proc4ops_warning = 1; - } - do_proc4ops = 0; - } - else do_proc4ops = 2; - } - } - - // -------------------------------------------------------------------- - - if(do_rc == 2) { - static RRDSET *st = NULL; - static RRDDIM *rd_hits = NULL, - *rd_misses = NULL, - *rd_nocache = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "nfsd" - , "readcache" - , NULL - , "cache" - , NULL - , "NFS Server Read Cache" - , "reads/s" - , "proc" - , "net/rpc/nfsd" - , 2100 - , update_every - , RRDSET_TYPE_STACKED - ); - - rd_hits = rrddim_add(st, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_misses = rrddim_add(st, "misses", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_nocache = rrddim_add(st, "nocache", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_hits, rc_hits); - rrddim_set_by_pointer(st, rd_misses, rc_misses); - rrddim_set_by_pointer(st, rd_nocache, rc_nocache); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_fh == 2) { - static RRDSET *st = NULL; - static RRDDIM *rd_stale = NULL, - *rd_total_lookups = NULL, - *rd_anonymous_lookups = NULL, - *rd_dir_not_in_dcache = NULL, - *rd_non_dir_not_in_dcache = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "nfsd" - , "filehandles" - , NULL - , "filehandles" - , NULL - , "NFS Server File Handles" - , "handles/s" - , "proc" - , "net/rpc/nfsd" - , 2101 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_stale = rrddim_add(st, "stale", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_total_lookups = rrddim_add(st, "total_lookups", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_anonymous_lookups = rrddim_add(st, "anonymous_lookups", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_dir_not_in_dcache = rrddim_add(st, "dir_not_in_dcache", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_non_dir_not_in_dcache = rrddim_add(st, "non_dir_not_in_dcache", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_stale, fh_stale); - rrddim_set_by_pointer(st, rd_total_lookups, fh_total_lookups); - rrddim_set_by_pointer(st, rd_anonymous_lookups, fh_anonymous_lookups); - rrddim_set_by_pointer(st, rd_dir_not_in_dcache, fh_dir_not_in_dcache); - rrddim_set_by_pointer(st, rd_non_dir_not_in_dcache, fh_non_dir_not_in_dcache); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_io == 2) { - static RRDSET *st = NULL; - static RRDDIM *rd_read = NULL, - *rd_write = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "nfsd" - , "io" - , NULL - , "io" - , NULL - , "NFS Server I/O" - , "kilobytes/s" - , "proc" - , "net/rpc/nfsd" - , 2102 - , update_every - , RRDSET_TYPE_AREA - ); - - rd_read = rrddim_add(st, "read", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - rd_write = rrddim_add(st, "write", NULL, -1, 1000, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_read, io_read); - rrddim_set_by_pointer(st, rd_write, io_write); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_th == 2) { - { - static RRDSET *st = NULL; - static RRDDIM *rd_threads = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "nfsd" - , "threads" - , NULL - , "threads" - , NULL - , "NFS Server Threads" - , "threads" - , "proc" - , "net/rpc/nfsd" - , 2103 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_threads = rrddim_add(st, "threads", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_threads, th_threads); - rrdset_done(st); - } - - { - static RRDSET *st = NULL; - static RRDDIM *rd_full_count = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "nfsd" - , "threads_fullcnt" - , NULL - , "threads" - , NULL - , "NFS Server Threads Full Count" - , "ops/s" - , "proc" - , "net/rpc/nfsd" - , 2104 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_full_count = rrddim_add(st, "full_count", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_full_count, th_fullcnt); - rrdset_done(st); - } - - { - static RRDSET *st = NULL; - static RRDDIM *rd_th_hist10 = NULL, - *rd_th_hist20 = NULL, - *rd_th_hist30 = NULL, - *rd_th_hist40 = NULL, - *rd_th_hist50 = NULL, - *rd_th_hist60 = NULL, - *rd_th_hist70 = NULL, - *rd_th_hist80 = NULL, - *rd_th_hist90 = NULL, - *rd_th_hist100 = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "nfsd" - , "threads_histogram" - , NULL - , "threads" - , NULL - , "NFS Server Threads Usage Histogram" - , "percentage" - , "proc" - , "net/rpc/nfsd" - , 2105 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_th_hist10 = rrddim_add(st, "0%-10%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_th_hist20 = rrddim_add(st, "10%-20%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_th_hist30 = rrddim_add(st, "20%-30%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_th_hist40 = rrddim_add(st, "30%-40%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_th_hist50 = rrddim_add(st, "40%-50%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_th_hist60 = rrddim_add(st, "50%-60%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_th_hist70 = rrddim_add(st, "60%-70%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_th_hist80 = rrddim_add(st, "70%-80%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_th_hist90 = rrddim_add(st, "80%-90%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_th_hist100 = rrddim_add(st, "90%-100%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_th_hist10, th_hist10); - rrddim_set_by_pointer(st, rd_th_hist20, th_hist20); - rrddim_set_by_pointer(st, rd_th_hist30, th_hist30); - rrddim_set_by_pointer(st, rd_th_hist40, th_hist40); - rrddim_set_by_pointer(st, rd_th_hist50, th_hist50); - rrddim_set_by_pointer(st, rd_th_hist60, th_hist60); - rrddim_set_by_pointer(st, rd_th_hist70, th_hist70); - rrddim_set_by_pointer(st, rd_th_hist80, th_hist80); - rrddim_set_by_pointer(st, rd_th_hist90, th_hist90); - rrddim_set_by_pointer(st, rd_th_hist100, th_hist100); - rrdset_done(st); - } - } - - // -------------------------------------------------------------------- - - if(do_ra == 2) { - static RRDSET *st = NULL; - static RRDDIM *rd_ra_hist10 = NULL, - *rd_ra_hist20 = NULL, - *rd_ra_hist30 = NULL, - *rd_ra_hist40 = NULL, - *rd_ra_hist50 = NULL, - *rd_ra_hist60 = NULL, - *rd_ra_hist70 = NULL, - *rd_ra_hist80 = NULL, - *rd_ra_hist90 = NULL, - *rd_ra_hist100 = NULL, - *rd_ra_none = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "nfsd" - , "readahead" - , NULL - , "readahead" - , NULL - , "NFS Server Read Ahead Depth" - , "percentage" - , "proc" - , "net/rpc/nfsd" - , 2105 - , update_every - , RRDSET_TYPE_STACKED - ); - - rd_ra_hist10 = rrddim_add(st, "10%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_ra_hist20 = rrddim_add(st, "20%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_ra_hist30 = rrddim_add(st, "30%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_ra_hist40 = rrddim_add(st, "40%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_ra_hist50 = rrddim_add(st, "50%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_ra_hist60 = rrddim_add(st, "60%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_ra_hist70 = rrddim_add(st, "70%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_ra_hist80 = rrddim_add(st, "80%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_ra_hist90 = rrddim_add(st, "90%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_ra_hist100 = rrddim_add(st, "100%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_ra_none = rrddim_add(st, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - } - else rrdset_next(st); - - // ignore ra_size - (void)ra_size; - - rrddim_set_by_pointer(st, rd_ra_hist10, ra_hist10); - rrddim_set_by_pointer(st, rd_ra_hist20, ra_hist20); - rrddim_set_by_pointer(st, rd_ra_hist30, ra_hist30); - rrddim_set_by_pointer(st, rd_ra_hist40, ra_hist40); - rrddim_set_by_pointer(st, rd_ra_hist50, ra_hist50); - rrddim_set_by_pointer(st, rd_ra_hist60, ra_hist60); - rrddim_set_by_pointer(st, rd_ra_hist70, ra_hist70); - rrddim_set_by_pointer(st, rd_ra_hist80, ra_hist80); - rrddim_set_by_pointer(st, rd_ra_hist90, ra_hist90); - rrddim_set_by_pointer(st, rd_ra_hist100,ra_hist100); - rrddim_set_by_pointer(st, rd_ra_none, ra_none); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_net == 2) { - static RRDSET *st = NULL; - static RRDDIM *rd_udp = NULL, - *rd_tcp = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "nfsd" - , "net" - , NULL - , "network" - , NULL - , "NFS Server Network Statistics" - , "packets/s" - , "proc" - , "net/rpc/nfsd" - , 2107 - , update_every - , RRDSET_TYPE_STACKED - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_udp = rrddim_add(st, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_tcp = rrddim_add(st, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - // ignore net_count, net_tcp_connections - (void)net_count; - (void)net_tcp_connections; - - rrddim_set_by_pointer(st, rd_udp, net_udp_count); - rrddim_set_by_pointer(st, rd_tcp, net_tcp_count); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_rpc == 2) { - static RRDSET *st = NULL; - static RRDDIM *rd_calls = NULL, - *rd_bad_format = NULL, - *rd_bad_auth = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "nfsd" - , "rpc" - , NULL - , "rpc" - , NULL - , "NFS Server Remote Procedure Calls Statistics" - , "calls/s" - , "proc" - , "net/rpc/nfsd" - , 2108 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_calls = rrddim_add(st, "calls", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_bad_format = rrddim_add(st, "bad_format", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_bad_auth = rrddim_add(st, "bad_auth", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - // ignore rpc_bad_client - (void)rpc_bad_client; - - rrddim_set_by_pointer(st, rd_calls, rpc_calls); - rrddim_set_by_pointer(st, rd_bad_format, rpc_bad_format); - rrddim_set_by_pointer(st, rd_bad_auth, rpc_bad_auth); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_proc2 == 2) { - static RRDSET *st = NULL; - if(unlikely(!st)) { - st = rrdset_create_localhost( - "nfsd" - , "proc2" - , NULL - , "nfsv2rpc" - , NULL - , "NFS v2 Server Remote Procedure Calls" - , "calls/s" - , "proc" - , "net/rpc/nfsd" - , 2109 - , update_every - , RRDSET_TYPE_STACKED - ); - } - else rrdset_next(st); - - size_t i; - for(i = 0; nfsd_proc2_values[i].present ; i++) { - if(unlikely(!nfsd_proc2_values[i].rd)) - nfsd_proc2_values[i].rd = rrddim_add(st, nfsd_proc2_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st, nfsd_proc2_values[i].rd, nfsd_proc2_values[i].value); - } - - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_proc3 == 2) { - static RRDSET *st = NULL; - if(unlikely(!st)) { - st = rrdset_create_localhost( - "nfsd" - , "proc3" - , NULL - , "nfsv3rpc" - , NULL - , "NFS v3 Server Remote Procedure Calls" - , "calls/s" - , "proc" - , "net/rpc/nfsd" - , 2110 - , update_every - , RRDSET_TYPE_STACKED - ); - } - else rrdset_next(st); - - size_t i; - for(i = 0; nfsd_proc3_values[i].present ; i++) { - if(unlikely(!nfsd_proc3_values[i].rd)) - nfsd_proc3_values[i].rd = rrddim_add(st, nfsd_proc3_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st, nfsd_proc3_values[i].rd, nfsd_proc3_values[i].value); - } - - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_proc4 == 2) { - static RRDSET *st = NULL; - if(unlikely(!st)) { - st = rrdset_create_localhost( - "nfsd" - , "proc4" - , NULL - , "nfsv4rpc" - , NULL - , "NFS v4 Server Remote Procedure Calls" - , "calls/s" - , "proc" - , "net/rpc/nfsd" - , 2111 - , update_every - , RRDSET_TYPE_STACKED - ); - } - else rrdset_next(st); - - size_t i; - for(i = 0; nfsd_proc4_values[i].present ; i++) { - if(unlikely(!nfsd_proc4_values[i].rd)) - nfsd_proc4_values[i].rd = rrddim_add(st, nfsd_proc4_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st, nfsd_proc4_values[i].rd, nfsd_proc4_values[i].value); - } - - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_proc4ops == 2) { - static RRDSET *st = NULL; - if(unlikely(!st)) { - st = rrdset_create_localhost( - "nfsd" - , "proc4ops" - , NULL - , "nfsv2ops" - , NULL - , "NFS v4 Server Operations" - , "operations/s" - , "proc" - , "net/rpc/nfsd" - , 2112 - , update_every - , RRDSET_TYPE_STACKED - ); - } - else rrdset_next(st); - - size_t i; - for(i = 0; nfsd4_ops_values[i].present ; i++) { - if(unlikely(!nfsd4_ops_values[i].rd)) - nfsd4_ops_values[i].rd = rrddim_add(st, nfsd4_ops_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st, nfsd4_ops_values[i].rd, nfsd4_ops_values[i].value); - } - - rrdset_done(st); - } - - return 0; -} diff --git a/src/proc_net_snmp.c b/src/proc_net_snmp.c deleted file mode 100644 index 43c010c14..000000000 --- a/src/proc_net_snmp.c +++ /dev/null @@ -1,1020 +0,0 @@ -#include "common.h" - -#define RRD_TYPE_NET_SNMP "ipv4" - -static struct proc_net_snmp { - // kernel_uint_t ip_Forwarding; - kernel_uint_t ip_DefaultTTL; - kernel_uint_t ip_InReceives; - kernel_uint_t ip_InHdrErrors; - kernel_uint_t ip_InAddrErrors; - kernel_uint_t ip_ForwDatagrams; - kernel_uint_t ip_InUnknownProtos; - kernel_uint_t ip_InDiscards; - kernel_uint_t ip_InDelivers; - kernel_uint_t ip_OutRequests; - kernel_uint_t ip_OutDiscards; - kernel_uint_t ip_OutNoRoutes; - kernel_uint_t ip_ReasmTimeout; - kernel_uint_t ip_ReasmReqds; - kernel_uint_t ip_ReasmOKs; - kernel_uint_t ip_ReasmFails; - kernel_uint_t ip_FragOKs; - kernel_uint_t ip_FragFails; - kernel_uint_t ip_FragCreates; - - kernel_uint_t icmp_InMsgs; - kernel_uint_t icmp_OutMsgs; - kernel_uint_t icmp_InErrors; - kernel_uint_t icmp_OutErrors; - kernel_uint_t icmp_InCsumErrors; - - kernel_uint_t icmpmsg_InEchoReps; - kernel_uint_t icmpmsg_OutEchoReps; - kernel_uint_t icmpmsg_InDestUnreachs; - kernel_uint_t icmpmsg_OutDestUnreachs; - kernel_uint_t icmpmsg_InRedirects; - kernel_uint_t icmpmsg_OutRedirects; - kernel_uint_t icmpmsg_InEchos; - kernel_uint_t icmpmsg_OutEchos; - kernel_uint_t icmpmsg_InRouterAdvert; - kernel_uint_t icmpmsg_OutRouterAdvert; - kernel_uint_t icmpmsg_InRouterSelect; - kernel_uint_t icmpmsg_OutRouterSelect; - kernel_uint_t icmpmsg_InTimeExcds; - kernel_uint_t icmpmsg_OutTimeExcds; - kernel_uint_t icmpmsg_InParmProbs; - kernel_uint_t icmpmsg_OutParmProbs; - kernel_uint_t icmpmsg_InTimestamps; - kernel_uint_t icmpmsg_OutTimestamps; - kernel_uint_t icmpmsg_InTimestampReps; - kernel_uint_t icmpmsg_OutTimestampReps; - - //kernel_uint_t tcp_RtoAlgorithm; - //kernel_uint_t tcp_RtoMin; - //kernel_uint_t tcp_RtoMax; - ssize_t tcp_MaxConn; - kernel_uint_t tcp_ActiveOpens; - kernel_uint_t tcp_PassiveOpens; - kernel_uint_t tcp_AttemptFails; - kernel_uint_t tcp_EstabResets; - kernel_uint_t tcp_CurrEstab; - kernel_uint_t tcp_InSegs; - kernel_uint_t tcp_OutSegs; - kernel_uint_t tcp_RetransSegs; - kernel_uint_t tcp_InErrs; - kernel_uint_t tcp_OutRsts; - kernel_uint_t tcp_InCsumErrors; - - kernel_uint_t udp_InDatagrams; - kernel_uint_t udp_NoPorts; - kernel_uint_t udp_InErrors; - kernel_uint_t udp_OutDatagrams; - kernel_uint_t udp_RcvbufErrors; - kernel_uint_t udp_SndbufErrors; - kernel_uint_t udp_InCsumErrors; - kernel_uint_t udp_IgnoredMulti; - - kernel_uint_t udplite_InDatagrams; - kernel_uint_t udplite_NoPorts; - kernel_uint_t udplite_InErrors; - kernel_uint_t udplite_OutDatagrams; - kernel_uint_t udplite_RcvbufErrors; - kernel_uint_t udplite_SndbufErrors; - kernel_uint_t udplite_InCsumErrors; - kernel_uint_t udplite_IgnoredMulti; -} snmp_root = { 0 }; - -int do_proc_net_snmp(int update_every, usec_t dt) { - (void)dt; - - static procfile *ff = NULL; - static int do_ip_packets = -1, do_ip_fragsout = -1, do_ip_fragsin = -1, do_ip_errors = -1, - do_tcp_sockets = -1, do_tcp_packets = -1, do_tcp_errors = -1, do_tcp_handshake = -1, do_tcp_opens = -1, - do_udp_packets = -1, do_udp_errors = -1, do_icmp_packets = -1, do_icmpmsg = -1, do_udplite_packets = -1; - static uint32_t hash_ip = 0, hash_icmp = 0, hash_tcp = 0, hash_udp = 0, hash_icmpmsg = 0, hash_udplite = 0; - - static ARL_BASE *arl_ip = NULL, - *arl_icmp = NULL, - *arl_icmpmsg = NULL, - *arl_tcp = NULL, - *arl_udp = NULL, - *arl_udplite = NULL; - - static RRDVAR *tcp_max_connections_var = NULL; - static ssize_t last_max_connections = 0; - - if(unlikely(!arl_ip)) { - do_ip_packets = config_get_boolean("plugin:proc:/proc/net/snmp", "ipv4 packets", 1); - do_ip_fragsout = config_get_boolean("plugin:proc:/proc/net/snmp", "ipv4 fragments sent", 1); - do_ip_fragsin = config_get_boolean("plugin:proc:/proc/net/snmp", "ipv4 fragments assembly", 1); - do_ip_errors = config_get_boolean("plugin:proc:/proc/net/snmp", "ipv4 errors", 1); - do_tcp_sockets = config_get_boolean("plugin:proc:/proc/net/snmp", "ipv4 TCP connections", 1); - do_tcp_packets = config_get_boolean("plugin:proc:/proc/net/snmp", "ipv4 TCP packets", 1); - do_tcp_errors = config_get_boolean("plugin:proc:/proc/net/snmp", "ipv4 TCP errors", 1); - do_tcp_opens = config_get_boolean("plugin:proc:/proc/net/snmp", "ipv4 TCP opens", 1); - do_tcp_handshake = config_get_boolean("plugin:proc:/proc/net/snmp", "ipv4 TCP handshake issues", 1); - do_udp_packets = config_get_boolean("plugin:proc:/proc/net/snmp", "ipv4 UDP packets", 1); - do_udp_errors = config_get_boolean("plugin:proc:/proc/net/snmp", "ipv4 UDP errors", 1); - do_icmp_packets = config_get_boolean("plugin:proc:/proc/net/snmp", "ipv4 ICMP packets", 1); - do_icmpmsg = config_get_boolean("plugin:proc:/proc/net/snmp", "ipv4 ICMP messages", 1); - do_udplite_packets = config_get_boolean("plugin:proc:/proc/net/snmp", "ipv4 UDPLite packets", 1); - - hash_ip = simple_hash("Ip"); - hash_tcp = simple_hash("Tcp"); - hash_udp = simple_hash("Udp"); - hash_icmp = simple_hash("Icmp"); - hash_icmpmsg = simple_hash("IcmpMsg"); - hash_udplite = simple_hash("UdpLite"); - - arl_ip = arl_create("snmp/Ip", arl_callback_str2kernel_uint_t, 60); - // arl_expect(arl_ip, "Forwarding", &snmp_root.ip_Forwarding); - arl_expect(arl_ip, "DefaultTTL", &snmp_root.ip_DefaultTTL); - arl_expect(arl_ip, "InReceives", &snmp_root.ip_InReceives); - arl_expect(arl_ip, "InHdrErrors", &snmp_root.ip_InHdrErrors); - arl_expect(arl_ip, "InAddrErrors", &snmp_root.ip_InAddrErrors); - arl_expect(arl_ip, "ForwDatagrams", &snmp_root.ip_ForwDatagrams); - arl_expect(arl_ip, "InUnknownProtos", &snmp_root.ip_InUnknownProtos); - arl_expect(arl_ip, "InDiscards", &snmp_root.ip_InDiscards); - arl_expect(arl_ip, "InDelivers", &snmp_root.ip_InDelivers); - arl_expect(arl_ip, "OutRequests", &snmp_root.ip_OutRequests); - arl_expect(arl_ip, "OutDiscards", &snmp_root.ip_OutDiscards); - arl_expect(arl_ip, "OutNoRoutes", &snmp_root.ip_OutNoRoutes); - arl_expect(arl_ip, "ReasmTimeout", &snmp_root.ip_ReasmTimeout); - arl_expect(arl_ip, "ReasmReqds", &snmp_root.ip_ReasmReqds); - arl_expect(arl_ip, "ReasmOKs", &snmp_root.ip_ReasmOKs); - arl_expect(arl_ip, "ReasmFails", &snmp_root.ip_ReasmFails); - arl_expect(arl_ip, "FragOKs", &snmp_root.ip_FragOKs); - arl_expect(arl_ip, "FragFails", &snmp_root.ip_FragFails); - arl_expect(arl_ip, "FragCreates", &snmp_root.ip_FragCreates); - - arl_icmp = arl_create("snmp/Icmp", arl_callback_str2kernel_uint_t, 60); - arl_expect(arl_icmp, "InMsgs", &snmp_root.icmp_InMsgs); - arl_expect(arl_icmp, "OutMsgs", &snmp_root.icmp_OutMsgs); - arl_expect(arl_icmp, "InErrors", &snmp_root.icmp_InErrors); - arl_expect(arl_icmp, "OutErrors", &snmp_root.icmp_OutErrors); - arl_expect(arl_icmp, "InCsumErrors", &snmp_root.icmp_InCsumErrors); - - arl_icmpmsg = arl_create("snmp/Icmpmsg", arl_callback_str2kernel_uint_t, 60); - arl_expect(arl_icmpmsg, "InType0", &snmp_root.icmpmsg_InEchoReps); - arl_expect(arl_icmpmsg, "OutType0", &snmp_root.icmpmsg_OutEchoReps); - arl_expect(arl_icmpmsg, "InType3", &snmp_root.icmpmsg_InDestUnreachs); - arl_expect(arl_icmpmsg, "OutType3", &snmp_root.icmpmsg_OutDestUnreachs); - arl_expect(arl_icmpmsg, "InType5", &snmp_root.icmpmsg_InRedirects); - arl_expect(arl_icmpmsg, "OutType5", &snmp_root.icmpmsg_OutRedirects); - arl_expect(arl_icmpmsg, "InType8", &snmp_root.icmpmsg_InEchos); - arl_expect(arl_icmpmsg, "OutType8", &snmp_root.icmpmsg_OutEchos); - arl_expect(arl_icmpmsg, "InType9", &snmp_root.icmpmsg_InRouterAdvert); - arl_expect(arl_icmpmsg, "OutType9", &snmp_root.icmpmsg_OutRouterAdvert); - arl_expect(arl_icmpmsg, "InType10", &snmp_root.icmpmsg_InRouterSelect); - arl_expect(arl_icmpmsg, "OutType10", &snmp_root.icmpmsg_OutRouterSelect); - arl_expect(arl_icmpmsg, "InType11", &snmp_root.icmpmsg_InTimeExcds); - arl_expect(arl_icmpmsg, "OutType11", &snmp_root.icmpmsg_OutTimeExcds); - arl_expect(arl_icmpmsg, "InType12", &snmp_root.icmpmsg_InParmProbs); - arl_expect(arl_icmpmsg, "OutType12", &snmp_root.icmpmsg_OutParmProbs); - arl_expect(arl_icmpmsg, "InType13", &snmp_root.icmpmsg_InTimestamps); - arl_expect(arl_icmpmsg, "OutType13", &snmp_root.icmpmsg_OutTimestamps); - arl_expect(arl_icmpmsg, "InType14", &snmp_root.icmpmsg_InTimestampReps); - arl_expect(arl_icmpmsg, "OutType14", &snmp_root.icmpmsg_OutTimestampReps); - - arl_tcp = arl_create("snmp/Tcp", arl_callback_str2kernel_uint_t, 60); - // arl_expect(arl_tcp, "RtoAlgorithm", &snmp_root.tcp_RtoAlgorithm); - // arl_expect(arl_tcp, "RtoMin", &snmp_root.tcp_RtoMin); - // arl_expect(arl_tcp, "RtoMax", &snmp_root.tcp_RtoMax); - arl_expect(arl_tcp, "MaxConn", &snmp_root.tcp_MaxConn); - arl_expect(arl_tcp, "ActiveOpens", &snmp_root.tcp_ActiveOpens); - arl_expect(arl_tcp, "PassiveOpens", &snmp_root.tcp_PassiveOpens); - arl_expect(arl_tcp, "AttemptFails", &snmp_root.tcp_AttemptFails); - arl_expect(arl_tcp, "EstabResets", &snmp_root.tcp_EstabResets); - arl_expect(arl_tcp, "CurrEstab", &snmp_root.tcp_CurrEstab); - arl_expect(arl_tcp, "InSegs", &snmp_root.tcp_InSegs); - arl_expect(arl_tcp, "OutSegs", &snmp_root.tcp_OutSegs); - arl_expect(arl_tcp, "RetransSegs", &snmp_root.tcp_RetransSegs); - arl_expect(arl_tcp, "InErrs", &snmp_root.tcp_InErrs); - arl_expect(arl_tcp, "OutRsts", &snmp_root.tcp_OutRsts); - arl_expect(arl_tcp, "InCsumErrors", &snmp_root.tcp_InCsumErrors); - - arl_udp = arl_create("snmp/Udp", arl_callback_str2kernel_uint_t, 60); - arl_expect(arl_udp, "InDatagrams", &snmp_root.udp_InDatagrams); - arl_expect(arl_udp, "NoPorts", &snmp_root.udp_NoPorts); - arl_expect(arl_udp, "InErrors", &snmp_root.udp_InErrors); - arl_expect(arl_udp, "OutDatagrams", &snmp_root.udp_OutDatagrams); - arl_expect(arl_udp, "RcvbufErrors", &snmp_root.udp_RcvbufErrors); - arl_expect(arl_udp, "SndbufErrors", &snmp_root.udp_SndbufErrors); - arl_expect(arl_udp, "InCsumErrors", &snmp_root.udp_InCsumErrors); - arl_expect(arl_udp, "IgnoredMulti", &snmp_root.udp_IgnoredMulti); - - arl_udplite = arl_create("snmp/Udplite", arl_callback_str2kernel_uint_t, 60); - arl_expect(arl_udplite, "InDatagrams", &snmp_root.udplite_InDatagrams); - arl_expect(arl_udplite, "NoPorts", &snmp_root.udplite_NoPorts); - arl_expect(arl_udplite, "InErrors", &snmp_root.udplite_InErrors); - arl_expect(arl_udplite, "OutDatagrams", &snmp_root.udplite_OutDatagrams); - arl_expect(arl_udplite, "RcvbufErrors", &snmp_root.udplite_RcvbufErrors); - arl_expect(arl_udplite, "SndbufErrors", &snmp_root.udplite_SndbufErrors); - arl_expect(arl_udplite, "InCsumErrors", &snmp_root.udplite_InCsumErrors); - arl_expect(arl_udplite, "IgnoredMulti", &snmp_root.udplite_IgnoredMulti); - - tcp_max_connections_var = rrdvar_custom_host_variable_create(localhost, "tcp_max_connections"); - } - - if(unlikely(!ff)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/snmp"); - ff = procfile_open(config_get("plugin:proc:/proc/net/snmp", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) return 1; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time - - size_t lines = procfile_lines(ff), l; - size_t words, w; - - for(l = 0; l < lines ;l++) { - char *key = procfile_lineword(ff, l, 0); - uint32_t hash = simple_hash(key); - - if(unlikely(hash == hash_ip && strcmp(key, "Ip") == 0)) { - size_t h = l++; - - if(strcmp(procfile_lineword(ff, l, 0), "Ip") != 0) { - error("Cannot read Ip line from /proc/net/snmp."); - break; - } - - words = procfile_linewords(ff, l); - if(words < 3) { - error("Cannot read /proc/net/snmp Ip line. Expected 3+ params, read %zu.", words); - continue; - } - - arl_begin(arl_ip); - for(w = 1; w < words ; w++) { - if (unlikely(arl_check(arl_ip, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0)) - break; - } - - // -------------------------------------------------------------------- - - if(do_ip_packets) { - static RRDSET *st = NULL; - static RRDDIM *rd_InReceives = NULL, - *rd_OutRequests = NULL, - *rd_ForwDatagrams = NULL, - *rd_InDelivers = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP - , "packets" - , NULL - , "packets" - , NULL - , "IPv4 Packets" - , "packets/s" - , "proc" - , "net/snmp" - , 2450 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_InReceives = rrddim_add(st, "InReceives", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutRequests = rrddim_add(st, "OutRequests", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_ForwDatagrams = rrddim_add(st, "ForwDatagrams", "forwarded", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InDelivers = rrddim_add(st, "InDelivers", "delivered", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_OutRequests, (collected_number)snmp_root.ip_OutRequests); - rrddim_set_by_pointer(st, rd_InReceives, (collected_number)snmp_root.ip_InReceives); - rrddim_set_by_pointer(st, rd_ForwDatagrams, (collected_number)snmp_root.ip_ForwDatagrams); - rrddim_set_by_pointer(st, rd_InDelivers, (collected_number)snmp_root.ip_InDelivers); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_ip_fragsout) { - static RRDSET *st = NULL; - static RRDDIM *rd_FragOKs = NULL, - *rd_FragFails = NULL, - *rd_FragCreates = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP - , "fragsout" - , NULL - , "fragments" - , NULL - , "IPv4 Fragments Sent" - , "packets/s" - , "proc" - , "net/snmp" - , 3020 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_FragOKs = rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_FragFails = rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_FragCreates = rrddim_add(st, "FragCreates", "created", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_FragOKs, (collected_number)snmp_root.ip_FragOKs); - rrddim_set_by_pointer(st, rd_FragFails, (collected_number)snmp_root.ip_FragFails); - rrddim_set_by_pointer(st, rd_FragCreates, (collected_number)snmp_root.ip_FragCreates); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_ip_fragsin) { - static RRDSET *st = NULL; - static RRDDIM *rd_ReasmOKs = NULL, - *rd_ReasmFails = NULL, - *rd_ReasmReqds = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP - , "fragsin" - , NULL - , "fragments" - , NULL - , "IPv4 Fragments Reassembly" - , "packets/s" - , "proc" - , "net/snmp" - , 3030 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_ReasmOKs = rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_ReasmFails = rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_ReasmReqds = rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_ReasmOKs, (collected_number)snmp_root.ip_ReasmOKs); - rrddim_set_by_pointer(st, rd_ReasmFails, (collected_number)snmp_root.ip_ReasmFails); - rrddim_set_by_pointer(st, rd_ReasmReqds, (collected_number)snmp_root.ip_ReasmReqds); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_ip_errors) { - static RRDSET *st = NULL; - static RRDDIM *rd_InDiscards = NULL, - *rd_OutDiscards = NULL, - *rd_InHdrErrors = NULL, - *rd_OutNoRoutes = NULL, - *rd_InAddrErrors = NULL, - *rd_InUnknownProtos = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP - , "errors" - , NULL - , "errors" - , NULL - , "IPv4 Errors" - , "packets/s" - , "proc" - , "net/snmp" - , 2470 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_InDiscards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutDiscards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - - rd_InHdrErrors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutNoRoutes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - - rd_InAddrErrors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InUnknownProtos = rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_InDiscards, (collected_number)snmp_root.ip_InDiscards); - rrddim_set_by_pointer(st, rd_OutDiscards, (collected_number)snmp_root.ip_OutDiscards); - rrddim_set_by_pointer(st, rd_InHdrErrors, (collected_number)snmp_root.ip_InHdrErrors); - rrddim_set_by_pointer(st, rd_InAddrErrors, (collected_number)snmp_root.ip_InAddrErrors); - rrddim_set_by_pointer(st, rd_InUnknownProtos, (collected_number)snmp_root.ip_InUnknownProtos); - rrddim_set_by_pointer(st, rd_OutNoRoutes, (collected_number)snmp_root.ip_OutNoRoutes); - rrdset_done(st); - } - } - else if(unlikely(hash == hash_icmp && strcmp(key, "Icmp") == 0)) { - size_t h = l++; - - if(strcmp(procfile_lineword(ff, l, 0), "Icmp") != 0) { - error("Cannot read Icmp line from /proc/net/snmp."); - break; - } - - words = procfile_linewords(ff, l); - if(words < 3) { - error("Cannot read /proc/net/snmp Icmp line. Expected 3+ params, read %zu.", words); - continue; - } - - arl_begin(arl_icmp); - for(w = 1; w < words ; w++) { - if (unlikely(arl_check(arl_icmp, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0)) - break; - } - - // -------------------------------------------------------------------- - - if(do_icmp_packets) { - { - static RRDSET *st_packets = NULL; - static RRDDIM *rd_InMsgs = NULL, - *rd_OutMsgs = NULL; - - if(unlikely(!st_packets)) { - st_packets = rrdset_create_localhost( - RRD_TYPE_NET_SNMP - , "icmp" - , NULL - , "icmp" - , NULL - , "IPv4 ICMP Packets" - , "packets/s" - , "proc" - , "net/snmp" - , 2602 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_InMsgs = rrddim_add(st_packets, "InMsgs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutMsgs = rrddim_add(st_packets, "OutMsgs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st_packets); - - rrddim_set_by_pointer(st_packets, rd_InMsgs, (collected_number)snmp_root.icmp_InMsgs); - rrddim_set_by_pointer(st_packets, rd_OutMsgs, (collected_number)snmp_root.icmp_OutMsgs); - - rrdset_done(st_packets); - } - - { - static RRDSET *st_errors = NULL; - static RRDDIM *rd_InErrors = NULL, - *rd_OutErrors = NULL, - *rd_InCsumErrors = NULL; - - if(unlikely(!st_errors)) { - st_errors = rrdset_create_localhost( - RRD_TYPE_NET_SNMP - , "icmp_errors" - , NULL - , "icmp" - , NULL - , "IPv4 ICMP Errors" - , "packets/s" - , "proc" - , "net/snmp" - , 2603 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_InErrors = rrddim_add(st_errors, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutErrors = rrddim_add(st_errors, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InCsumErrors = rrddim_add(st_errors, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st_errors); - - rrddim_set_by_pointer(st_errors, rd_InErrors, (collected_number)snmp_root.icmp_InErrors); - rrddim_set_by_pointer(st_errors, rd_OutErrors, (collected_number)snmp_root.icmp_OutErrors); - rrddim_set_by_pointer(st_errors, rd_InCsumErrors, (collected_number)snmp_root.icmp_InCsumErrors); - - rrdset_done(st_errors); - } - } - } - else if(unlikely(hash == hash_icmpmsg && strcmp(key, "IcmpMsg") == 0)) { - size_t h = l++; - - if(strcmp(procfile_lineword(ff, l, 0), "IcmpMsg") != 0) { - error("Cannot read IcmpMsg line from /proc/net/snmp."); - break; - } - - words = procfile_linewords(ff, l); - if(words < 3) { - error("Cannot read /proc/net/snmp IcmpMsg line. Expected 3+ params, read %zu.", words); - continue; - } - - arl_begin(arl_icmpmsg); - for(w = 1; w < words ; w++) { - if (unlikely(arl_check(arl_icmpmsg, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0)) - break; - } - - // -------------------------------------------------------------------- - - if(do_icmpmsg) { - static RRDSET *st = NULL; - static RRDDIM *rd_InEchoReps = NULL, - *rd_OutEchoReps = NULL, - *rd_InDestUnreachs = NULL, - *rd_OutDestUnreachs = NULL, - *rd_InRedirects = NULL, - *rd_OutRedirects = NULL, - *rd_InEchos = NULL, - *rd_OutEchos = NULL, - *rd_InRouterAdvert = NULL, - *rd_OutRouterAdvert = NULL, - *rd_InRouterSelect = NULL, - *rd_OutRouterSelect = NULL, - *rd_InTimeExcds = NULL, - *rd_OutTimeExcds = NULL, - *rd_InParmProbs = NULL, - *rd_OutParmProbs = NULL, - *rd_InTimestamps = NULL, - *rd_OutTimestamps = NULL, - *rd_InTimestampReps = NULL, - *rd_OutTimestampReps = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP - , "icmpmsg" - , NULL - , "icmp" - , NULL - , "IPv4 ICMP Messages" - , "packets/s" - , "proc" - , "net/snmp" - , 2604 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_InEchoReps = rrddim_add(st, "InType0", "InEchoReps", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutEchoReps = rrddim_add(st, "OutType0", "OutEchoReps", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InDestUnreachs = rrddim_add(st, "InType3", "InDestUnreachs", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutDestUnreachs = rrddim_add(st, "OutType3", "OutDestUnreachs", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InRedirects = rrddim_add(st, "InType5", "InRedirects", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutRedirects = rrddim_add(st, "OutType5", "OutRedirects", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InEchos = rrddim_add(st, "InType8", "InEchos", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutEchos = rrddim_add(st, "OutType8", "OutEchos", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InRouterAdvert = rrddim_add(st, "InType9", "InRouterAdvert", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutRouterAdvert = rrddim_add(st, "OutType9", "OutRouterAdvert", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InRouterSelect = rrddim_add(st, "InType10", "InRouterSelect", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutRouterSelect = rrddim_add(st, "OutType10", "OutRouterSelect", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InTimeExcds = rrddim_add(st, "InType11", "InTimeExcds", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutTimeExcds = rrddim_add(st, "OutType11", "OutTimeExcds", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InParmProbs = rrddim_add(st, "InType12", "InParmProbs", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutParmProbs = rrddim_add(st, "OutType12", "OutParmProbs", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InTimestamps = rrddim_add(st, "InType13", "InTimestamps", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutTimestamps = rrddim_add(st, "OutType13", "OutTimestamps", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InTimestampReps = rrddim_add(st, "InType14", "InTimestampReps", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutTimestampReps = rrddim_add(st, "OutType14", "OutTimestampReps", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_InEchoReps, (collected_number)snmp_root.icmpmsg_InEchoReps); - rrddim_set_by_pointer(st, rd_OutEchoReps, (collected_number)snmp_root.icmpmsg_OutEchoReps); - rrddim_set_by_pointer(st, rd_InDestUnreachs, (collected_number)snmp_root.icmpmsg_InDestUnreachs); - rrddim_set_by_pointer(st, rd_OutDestUnreachs, (collected_number)snmp_root.icmpmsg_OutDestUnreachs); - rrddim_set_by_pointer(st, rd_InRedirects, (collected_number)snmp_root.icmpmsg_InRedirects); - rrddim_set_by_pointer(st, rd_OutRedirects, (collected_number)snmp_root.icmpmsg_OutRedirects); - rrddim_set_by_pointer(st, rd_InEchos, (collected_number)snmp_root.icmpmsg_InEchos); - rrddim_set_by_pointer(st, rd_OutEchos, (collected_number)snmp_root.icmpmsg_OutEchos); - rrddim_set_by_pointer(st, rd_InRouterAdvert, (collected_number)snmp_root.icmpmsg_InRouterAdvert); - rrddim_set_by_pointer(st, rd_OutRouterAdvert, (collected_number)snmp_root.icmpmsg_OutRouterAdvert); - rrddim_set_by_pointer(st, rd_InRouterSelect, (collected_number)snmp_root.icmpmsg_InRouterSelect); - rrddim_set_by_pointer(st, rd_OutRouterSelect, (collected_number)snmp_root.icmpmsg_OutRouterSelect); - rrddim_set_by_pointer(st, rd_InTimeExcds, (collected_number)snmp_root.icmpmsg_InTimeExcds); - rrddim_set_by_pointer(st, rd_OutTimeExcds, (collected_number)snmp_root.icmpmsg_OutTimeExcds); - rrddim_set_by_pointer(st, rd_InParmProbs, (collected_number)snmp_root.icmpmsg_InParmProbs); - rrddim_set_by_pointer(st, rd_OutParmProbs, (collected_number)snmp_root.icmpmsg_OutParmProbs); - rrddim_set_by_pointer(st, rd_InTimestamps, (collected_number)snmp_root.icmpmsg_InTimestamps); - rrddim_set_by_pointer(st, rd_OutTimestamps, (collected_number)snmp_root.icmpmsg_OutTimestamps); - rrddim_set_by_pointer(st, rd_InTimestampReps, (collected_number)snmp_root.icmpmsg_InTimestampReps); - rrddim_set_by_pointer(st, rd_OutTimestampReps, (collected_number)snmp_root.icmpmsg_OutTimestampReps); - - rrdset_done(st); - } - } - else if(unlikely(hash == hash_tcp && strcmp(key, "Tcp") == 0)) { - size_t h = l++; - - if(strcmp(procfile_lineword(ff, l, 0), "Tcp") != 0) { - error("Cannot read Tcp line from /proc/net/snmp."); - break; - } - - words = procfile_linewords(ff, l); - if(words < 3) { - error("Cannot read /proc/net/snmp Tcp line. Expected 3+ params, read %zu.", words); - continue; - } - - arl_begin(arl_tcp); - for(w = 1; w < words ; w++) { - if (unlikely(arl_check(arl_tcp, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0)) - break; - } - - // -------------------------------------------------------------------- - - if(snmp_root.tcp_MaxConn != last_max_connections) { - last_max_connections = snmp_root.tcp_MaxConn; - rrdvar_custom_host_variable_set(localhost, tcp_max_connections_var, last_max_connections); - } - - // -------------------------------------------------------------------- - - // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html - if(do_tcp_sockets) { - static RRDSET *st = NULL; - static RRDDIM *rd_CurrEstab = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP - , "tcpsock" - , NULL - , "tcp" - , NULL - , "IPv4 TCP Connections" - , "active connections" - , "proc" - , "net/snmp" - , 2501 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_CurrEstab = rrddim_add(st, "CurrEstab", "connections", 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_CurrEstab, (collected_number)snmp_root.tcp_CurrEstab); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_tcp_packets) { - static RRDSET *st = NULL; - static RRDDIM *rd_InSegs = NULL, - *rd_OutSegs = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP - , "tcppackets" - , NULL - , "tcp" - , NULL - , "IPv4 TCP Packets" - , "packets/s" - , "proc" - , "net/snmp" - , 2510 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_InSegs = rrddim_add(st, "InSegs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutSegs = rrddim_add(st, "OutSegs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_InSegs, (collected_number)snmp_root.tcp_InSegs); - rrddim_set_by_pointer(st, rd_OutSegs, (collected_number)snmp_root.tcp_OutSegs); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_tcp_errors) { - static RRDSET *st = NULL; - static RRDDIM *rd_InErrs = NULL, - *rd_InCsumErrors = NULL, - *rd_RetransSegs = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP - , "tcperrors" - , NULL - , "tcp" - , NULL - , "IPv4 TCP Errors" - , "packets/s" - , "proc" - , "net/snmp" - , 2525 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_InErrs = rrddim_add(st, "InErrs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_RetransSegs = rrddim_add(st, "RetransSegs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_InErrs, (collected_number)snmp_root.tcp_InErrs); - rrddim_set_by_pointer(st, rd_InCsumErrors, (collected_number)snmp_root.tcp_InCsumErrors); - rrddim_set_by_pointer(st, rd_RetransSegs, (collected_number)snmp_root.tcp_RetransSegs); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_tcp_opens) { - static RRDSET *st = NULL; - static RRDDIM *rd_ActiveOpens = NULL, - *rd_PassiveOpens = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP - , "tcpopens" - , NULL - , "tcp" - , NULL - , "IPv4 TCP Opens" - , "connections/s" - , "proc" - , "net/snmp" - , 2502 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_ActiveOpens = rrddim_add(st, "ActiveOpens", "active", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_PassiveOpens = rrddim_add(st, "PassiveOpens", "passive", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_ActiveOpens, (collected_number)snmp_root.tcp_ActiveOpens); - rrddim_set_by_pointer(st, rd_PassiveOpens, (collected_number)snmp_root.tcp_PassiveOpens); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_tcp_handshake) { - static RRDSET *st = NULL; - static RRDDIM *rd_EstabResets = NULL, - *rd_OutRsts = NULL, - *rd_AttemptFails = NULL, - *rd_TCPSynRetrans = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP - , "tcphandshake" - , NULL - , "tcp" - , NULL - , "IPv4 TCP Handshake Issues" - , "events/s" - , "proc" - , "net/snmp" - , 2530 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_EstabResets = rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutRsts = rrddim_add(st, "OutRsts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_AttemptFails = rrddim_add(st, "AttemptFails", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_TCPSynRetrans = rrddim_add(st, "TCPSynRetrans", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_EstabResets, (collected_number)snmp_root.tcp_EstabResets); - rrddim_set_by_pointer(st, rd_OutRsts, (collected_number)snmp_root.tcp_OutRsts); - rrddim_set_by_pointer(st, rd_AttemptFails, (collected_number)snmp_root.tcp_AttemptFails); - rrddim_set_by_pointer(st, rd_TCPSynRetrans, tcpext_TCPSynRetrans); - rrdset_done(st); - } - } - else if(unlikely(hash == hash_udp && strcmp(key, "Udp") == 0)) { - size_t h = l++; - - if(strcmp(procfile_lineword(ff, l, 0), "Udp") != 0) { - error("Cannot read Udp line from /proc/net/snmp."); - break; - } - - words = procfile_linewords(ff, l); - if(words < 3) { - error("Cannot read /proc/net/snmp Udp line. Expected 3+ params, read %zu.", words); - continue; - } - - arl_begin(arl_udp); - for(w = 1; w < words ; w++) { - if (unlikely(arl_check(arl_udp, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0)) - break; - } - - // -------------------------------------------------------------------- - - // see http://net-snmp.sourceforge.net/docs/mibs/udp.html - if(do_udp_packets) { - static RRDSET *st = NULL; - static RRDDIM *rd_InDatagrams = NULL, - *rd_OutDatagrams = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP - , "udppackets" - , NULL - , "udp" - , NULL - , "IPv4 UDP Packets" - , "packets/s" - , "proc" - , "net/snmp" - , 2602 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_InDatagrams = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutDatagrams = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_InDatagrams, (collected_number)snmp_root.udp_InDatagrams); - rrddim_set_by_pointer(st, rd_OutDatagrams, (collected_number)snmp_root.udp_OutDatagrams); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_udp_errors) { - static RRDSET *st = NULL; - static RRDDIM *rd_RcvbufErrors = NULL, - *rd_SndbufErrors = NULL, - *rd_InErrors = NULL, - *rd_NoPorts = NULL, - *rd_InCsumErrors = NULL, - *rd_IgnoredMulti = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP - , "udperrors" - , NULL - , "udp" - , NULL - , "IPv4 UDP Errors" - , "events/s" - , "proc" - , "net/snmp" - , 2701 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_NoPorts = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_IgnoredMulti = rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_InErrors, (collected_number)snmp_root.udp_InErrors); - rrddim_set_by_pointer(st, rd_NoPorts, (collected_number)snmp_root.udp_NoPorts); - rrddim_set_by_pointer(st, rd_RcvbufErrors, (collected_number)snmp_root.udp_RcvbufErrors); - rrddim_set_by_pointer(st, rd_SndbufErrors, (collected_number)snmp_root.udp_SndbufErrors); - rrddim_set_by_pointer(st, rd_InCsumErrors, (collected_number)snmp_root.udp_InCsumErrors); - rrddim_set_by_pointer(st, rd_IgnoredMulti, (collected_number)snmp_root.udp_IgnoredMulti); - rrdset_done(st); - } - } - else if(unlikely(hash == hash_udplite && strcmp(key, "UdpLite") == 0)) { - size_t h = l++; - - if(strcmp(procfile_lineword(ff, l, 0), "UdpLite") != 0) { - error("Cannot read UdpLite line from /proc/net/snmp."); - break; - } - - words = procfile_linewords(ff, l); - if(words < 3) { - error("Cannot read /proc/net/snmp UdpLite line. Expected 3+ params, read %zu.", words); - continue; - } - - arl_begin(arl_udplite); - for(w = 1; w < words ; w++) { - if (unlikely(arl_check(arl_udplite, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0)) - break; - } - - // -------------------------------------------------------------------- - - if(do_udplite_packets) { - { - static RRDSET *st = NULL; - static RRDDIM *rd_InDatagrams = NULL, - *rd_OutDatagrams = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP - , "udplite" - , NULL - , "udplite" - , NULL - , "IPv4 UDPLite Packets" - , "packets/s" - , "proc" - , "net/snmp" - , 2603 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_InDatagrams = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutDatagrams = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_InDatagrams, (collected_number)snmp_root.udplite_InDatagrams); - rrddim_set_by_pointer(st, rd_OutDatagrams, (collected_number)snmp_root.udplite_OutDatagrams); - rrdset_done(st); - } - - { - static RRDSET *st = NULL; - static RRDDIM *rd_RcvbufErrors = NULL, - *rd_SndbufErrors = NULL, - *rd_InErrors = NULL, - *rd_NoPorts = NULL, - *rd_InCsumErrors = NULL, - *rd_IgnoredMulti = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP - , "udplite_errors" - , NULL - , "udplite" - , NULL - , "IPv4 UDPLite Errors" - , "packets/s" - , "proc" - , "net/snmp" - , 2604 - , update_every - , RRDSET_TYPE_LINE); - - rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_NoPorts = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_IgnoredMulti = rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_NoPorts, (collected_number)snmp_root.udplite_NoPorts); - rrddim_set_by_pointer(st, rd_InErrors, (collected_number)snmp_root.udplite_InErrors); - rrddim_set_by_pointer(st, rd_InCsumErrors, (collected_number)snmp_root.udplite_InCsumErrors); - rrddim_set_by_pointer(st, rd_RcvbufErrors, (collected_number)snmp_root.udplite_RcvbufErrors); - rrddim_set_by_pointer(st, rd_SndbufErrors, (collected_number)snmp_root.udplite_SndbufErrors); - rrddim_set_by_pointer(st, rd_IgnoredMulti, (collected_number)snmp_root.udplite_IgnoredMulti); - rrdset_done(st); - } - } - } - } - - return 0; -} - diff --git a/src/proc_net_snmp6.c b/src/proc_net_snmp6.c deleted file mode 100644 index bd71b391a..000000000 --- a/src/proc_net_snmp6.c +++ /dev/null @@ -1,1265 +0,0 @@ -#include "common.h" - -#define RRD_TYPE_NET_SNMP6 "ipv6" - -int do_proc_net_snmp6(int update_every, usec_t dt) { - (void)dt; - - static procfile *ff = NULL; - - static int do_ip_packets = -1, - do_ip_fragsout = -1, - do_ip_fragsin = -1, - do_ip_errors = -1, - do_udplite_packets = -1, - do_udplite_errors = -1, - do_udp_packets = -1, - do_udp_errors = -1, - do_bandwidth = -1, - do_mcast = -1, - do_bcast = -1, - do_mcast_p = -1, - do_icmp = -1, - do_icmp_redir = -1, - do_icmp_errors = -1, - do_icmp_echos = -1, - do_icmp_groupmemb = -1, - do_icmp_router = -1, - do_icmp_neighbor = -1, - do_icmp_mldv2 = -1, - do_icmp_types = -1, - do_ect = -1; - - static ARL_BASE *arl_base = NULL; - - static unsigned long long Ip6InReceives = 0ULL; - static unsigned long long Ip6InHdrErrors = 0ULL; - static unsigned long long Ip6InTooBigErrors = 0ULL; - static unsigned long long Ip6InNoRoutes = 0ULL; - static unsigned long long Ip6InAddrErrors = 0ULL; - static unsigned long long Ip6InUnknownProtos = 0ULL; - static unsigned long long Ip6InTruncatedPkts = 0ULL; - static unsigned long long Ip6InDiscards = 0ULL; - static unsigned long long Ip6InDelivers = 0ULL; - static unsigned long long Ip6OutForwDatagrams = 0ULL; - static unsigned long long Ip6OutRequests = 0ULL; - static unsigned long long Ip6OutDiscards = 0ULL; - static unsigned long long Ip6OutNoRoutes = 0ULL; - static unsigned long long Ip6ReasmTimeout = 0ULL; - static unsigned long long Ip6ReasmReqds = 0ULL; - static unsigned long long Ip6ReasmOKs = 0ULL; - static unsigned long long Ip6ReasmFails = 0ULL; - static unsigned long long Ip6FragOKs = 0ULL; - static unsigned long long Ip6FragFails = 0ULL; - static unsigned long long Ip6FragCreates = 0ULL; - static unsigned long long Ip6InMcastPkts = 0ULL; - static unsigned long long Ip6OutMcastPkts = 0ULL; - static unsigned long long Ip6InOctets = 0ULL; - static unsigned long long Ip6OutOctets = 0ULL; - static unsigned long long Ip6InMcastOctets = 0ULL; - static unsigned long long Ip6OutMcastOctets = 0ULL; - static unsigned long long Ip6InBcastOctets = 0ULL; - static unsigned long long Ip6OutBcastOctets = 0ULL; - static unsigned long long Ip6InNoECTPkts = 0ULL; - static unsigned long long Ip6InECT1Pkts = 0ULL; - static unsigned long long Ip6InECT0Pkts = 0ULL; - static unsigned long long Ip6InCEPkts = 0ULL; - static unsigned long long Icmp6InMsgs = 0ULL; - static unsigned long long Icmp6InErrors = 0ULL; - static unsigned long long Icmp6OutMsgs = 0ULL; - static unsigned long long Icmp6OutErrors = 0ULL; - static unsigned long long Icmp6InCsumErrors = 0ULL; - static unsigned long long Icmp6InDestUnreachs = 0ULL; - static unsigned long long Icmp6InPktTooBigs = 0ULL; - static unsigned long long Icmp6InTimeExcds = 0ULL; - static unsigned long long Icmp6InParmProblems = 0ULL; - static unsigned long long Icmp6InEchos = 0ULL; - static unsigned long long Icmp6InEchoReplies = 0ULL; - static unsigned long long Icmp6InGroupMembQueries = 0ULL; - static unsigned long long Icmp6InGroupMembResponses = 0ULL; - static unsigned long long Icmp6InGroupMembReductions = 0ULL; - static unsigned long long Icmp6InRouterSolicits = 0ULL; - static unsigned long long Icmp6InRouterAdvertisements = 0ULL; - static unsigned long long Icmp6InNeighborSolicits = 0ULL; - static unsigned long long Icmp6InNeighborAdvertisements = 0ULL; - static unsigned long long Icmp6InRedirects = 0ULL; - static unsigned long long Icmp6InMLDv2Reports = 0ULL; - static unsigned long long Icmp6OutDestUnreachs = 0ULL; - static unsigned long long Icmp6OutPktTooBigs = 0ULL; - static unsigned long long Icmp6OutTimeExcds = 0ULL; - static unsigned long long Icmp6OutParmProblems = 0ULL; - static unsigned long long Icmp6OutEchos = 0ULL; - static unsigned long long Icmp6OutEchoReplies = 0ULL; - static unsigned long long Icmp6OutGroupMembQueries = 0ULL; - static unsigned long long Icmp6OutGroupMembResponses = 0ULL; - static unsigned long long Icmp6OutGroupMembReductions = 0ULL; - static unsigned long long Icmp6OutRouterSolicits = 0ULL; - static unsigned long long Icmp6OutRouterAdvertisements = 0ULL; - static unsigned long long Icmp6OutNeighborSolicits = 0ULL; - static unsigned long long Icmp6OutNeighborAdvertisements = 0ULL; - static unsigned long long Icmp6OutRedirects = 0ULL; - static unsigned long long Icmp6OutMLDv2Reports = 0ULL; - static unsigned long long Icmp6InType1 = 0ULL; - static unsigned long long Icmp6InType128 = 0ULL; - static unsigned long long Icmp6InType129 = 0ULL; - static unsigned long long Icmp6InType136 = 0ULL; - static unsigned long long Icmp6OutType1 = 0ULL; - static unsigned long long Icmp6OutType128 = 0ULL; - static unsigned long long Icmp6OutType129 = 0ULL; - static unsigned long long Icmp6OutType133 = 0ULL; - static unsigned long long Icmp6OutType135 = 0ULL; - static unsigned long long Icmp6OutType143 = 0ULL; - static unsigned long long Udp6InDatagrams = 0ULL; - static unsigned long long Udp6NoPorts = 0ULL; - static unsigned long long Udp6InErrors = 0ULL; - static unsigned long long Udp6OutDatagrams = 0ULL; - static unsigned long long Udp6RcvbufErrors = 0ULL; - static unsigned long long Udp6SndbufErrors = 0ULL; - static unsigned long long Udp6InCsumErrors = 0ULL; - static unsigned long long Udp6IgnoredMulti = 0ULL; - static unsigned long long UdpLite6InDatagrams = 0ULL; - static unsigned long long UdpLite6NoPorts = 0ULL; - static unsigned long long UdpLite6InErrors = 0ULL; - static unsigned long long UdpLite6OutDatagrams = 0ULL; - static unsigned long long UdpLite6RcvbufErrors = 0ULL; - static unsigned long long UdpLite6SndbufErrors = 0ULL; - static unsigned long long UdpLite6InCsumErrors = 0ULL; - - if(unlikely(!arl_base)) { - do_ip_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 packets", CONFIG_BOOLEAN_AUTO); - do_ip_fragsout = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 fragments sent", CONFIG_BOOLEAN_AUTO); - do_ip_fragsin = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 fragments assembly", CONFIG_BOOLEAN_AUTO); - do_ip_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 errors", CONFIG_BOOLEAN_AUTO); - do_udp_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 UDP packets", CONFIG_BOOLEAN_AUTO); - do_udp_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 UDP errors", CONFIG_BOOLEAN_AUTO); - do_udplite_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 UDPlite packets", CONFIG_BOOLEAN_AUTO); - do_udplite_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 UDPlite errors", CONFIG_BOOLEAN_AUTO); - do_bandwidth = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "bandwidth", CONFIG_BOOLEAN_AUTO); - do_mcast = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "multicast bandwidth", CONFIG_BOOLEAN_AUTO); - do_bcast = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "broadcast bandwidth", CONFIG_BOOLEAN_AUTO); - do_mcast_p = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "multicast packets", CONFIG_BOOLEAN_AUTO); - do_icmp = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp", CONFIG_BOOLEAN_AUTO); - do_icmp_redir = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp redirects", CONFIG_BOOLEAN_AUTO); - do_icmp_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp errors", CONFIG_BOOLEAN_AUTO); - do_icmp_echos = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp echos", CONFIG_BOOLEAN_AUTO); - do_icmp_groupmemb = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp group membership", CONFIG_BOOLEAN_AUTO); - do_icmp_router = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp router", CONFIG_BOOLEAN_AUTO); - do_icmp_neighbor = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp neighbor", CONFIG_BOOLEAN_AUTO); - do_icmp_mldv2 = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp mldv2", CONFIG_BOOLEAN_AUTO); - do_icmp_types = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp types", CONFIG_BOOLEAN_AUTO); - do_ect = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ect", CONFIG_BOOLEAN_AUTO); - - arl_base = arl_create("snmp6", NULL, 60); - arl_expect(arl_base, "Ip6InReceives", &Ip6InReceives); - arl_expect(arl_base, "Ip6InHdrErrors", &Ip6InHdrErrors); - arl_expect(arl_base, "Ip6InTooBigErrors", &Ip6InTooBigErrors); - arl_expect(arl_base, "Ip6InNoRoutes", &Ip6InNoRoutes); - arl_expect(arl_base, "Ip6InAddrErrors", &Ip6InAddrErrors); - arl_expect(arl_base, "Ip6InUnknownProtos", &Ip6InUnknownProtos); - arl_expect(arl_base, "Ip6InTruncatedPkts", &Ip6InTruncatedPkts); - arl_expect(arl_base, "Ip6InDiscards", &Ip6InDiscards); - arl_expect(arl_base, "Ip6InDelivers", &Ip6InDelivers); - arl_expect(arl_base, "Ip6OutForwDatagrams", &Ip6OutForwDatagrams); - arl_expect(arl_base, "Ip6OutRequests", &Ip6OutRequests); - arl_expect(arl_base, "Ip6OutDiscards", &Ip6OutDiscards); - arl_expect(arl_base, "Ip6OutNoRoutes", &Ip6OutNoRoutes); - arl_expect(arl_base, "Ip6ReasmTimeout", &Ip6ReasmTimeout); - arl_expect(arl_base, "Ip6ReasmReqds", &Ip6ReasmReqds); - arl_expect(arl_base, "Ip6ReasmOKs", &Ip6ReasmOKs); - arl_expect(arl_base, "Ip6ReasmFails", &Ip6ReasmFails); - arl_expect(arl_base, "Ip6FragOKs", &Ip6FragOKs); - arl_expect(arl_base, "Ip6FragFails", &Ip6FragFails); - arl_expect(arl_base, "Ip6FragCreates", &Ip6FragCreates); - arl_expect(arl_base, "Ip6InMcastPkts", &Ip6InMcastPkts); - arl_expect(arl_base, "Ip6OutMcastPkts", &Ip6OutMcastPkts); - arl_expect(arl_base, "Ip6InOctets", &Ip6InOctets); - arl_expect(arl_base, "Ip6OutOctets", &Ip6OutOctets); - arl_expect(arl_base, "Ip6InMcastOctets", &Ip6InMcastOctets); - arl_expect(arl_base, "Ip6OutMcastOctets", &Ip6OutMcastOctets); - arl_expect(arl_base, "Ip6InBcastOctets", &Ip6InBcastOctets); - arl_expect(arl_base, "Ip6OutBcastOctets", &Ip6OutBcastOctets); - arl_expect(arl_base, "Ip6InNoECTPkts", &Ip6InNoECTPkts); - arl_expect(arl_base, "Ip6InECT1Pkts", &Ip6InECT1Pkts); - arl_expect(arl_base, "Ip6InECT0Pkts", &Ip6InECT0Pkts); - arl_expect(arl_base, "Ip6InCEPkts", &Ip6InCEPkts); - arl_expect(arl_base, "Icmp6InMsgs", &Icmp6InMsgs); - arl_expect(arl_base, "Icmp6InErrors", &Icmp6InErrors); - arl_expect(arl_base, "Icmp6OutMsgs", &Icmp6OutMsgs); - arl_expect(arl_base, "Icmp6OutErrors", &Icmp6OutErrors); - arl_expect(arl_base, "Icmp6InCsumErrors", &Icmp6InCsumErrors); - arl_expect(arl_base, "Icmp6InDestUnreachs", &Icmp6InDestUnreachs); - arl_expect(arl_base, "Icmp6InPktTooBigs", &Icmp6InPktTooBigs); - arl_expect(arl_base, "Icmp6InTimeExcds", &Icmp6InTimeExcds); - arl_expect(arl_base, "Icmp6InParmProblems", &Icmp6InParmProblems); - arl_expect(arl_base, "Icmp6InEchos", &Icmp6InEchos); - arl_expect(arl_base, "Icmp6InEchoReplies", &Icmp6InEchoReplies); - arl_expect(arl_base, "Icmp6InGroupMembQueries", &Icmp6InGroupMembQueries); - arl_expect(arl_base, "Icmp6InGroupMembResponses", &Icmp6InGroupMembResponses); - arl_expect(arl_base, "Icmp6InGroupMembReductions", &Icmp6InGroupMembReductions); - arl_expect(arl_base, "Icmp6InRouterSolicits", &Icmp6InRouterSolicits); - arl_expect(arl_base, "Icmp6InRouterAdvertisements", &Icmp6InRouterAdvertisements); - arl_expect(arl_base, "Icmp6InNeighborSolicits", &Icmp6InNeighborSolicits); - arl_expect(arl_base, "Icmp6InNeighborAdvertisements", &Icmp6InNeighborAdvertisements); - arl_expect(arl_base, "Icmp6InRedirects", &Icmp6InRedirects); - arl_expect(arl_base, "Icmp6InMLDv2Reports", &Icmp6InMLDv2Reports); - arl_expect(arl_base, "Icmp6OutDestUnreachs", &Icmp6OutDestUnreachs); - arl_expect(arl_base, "Icmp6OutPktTooBigs", &Icmp6OutPktTooBigs); - arl_expect(arl_base, "Icmp6OutTimeExcds", &Icmp6OutTimeExcds); - arl_expect(arl_base, "Icmp6OutParmProblems", &Icmp6OutParmProblems); - arl_expect(arl_base, "Icmp6OutEchos", &Icmp6OutEchos); - arl_expect(arl_base, "Icmp6OutEchoReplies", &Icmp6OutEchoReplies); - arl_expect(arl_base, "Icmp6OutGroupMembQueries", &Icmp6OutGroupMembQueries); - arl_expect(arl_base, "Icmp6OutGroupMembResponses", &Icmp6OutGroupMembResponses); - arl_expect(arl_base, "Icmp6OutGroupMembReductions", &Icmp6OutGroupMembReductions); - arl_expect(arl_base, "Icmp6OutRouterSolicits", &Icmp6OutRouterSolicits); - arl_expect(arl_base, "Icmp6OutRouterAdvertisements", &Icmp6OutRouterAdvertisements); - arl_expect(arl_base, "Icmp6OutNeighborSolicits", &Icmp6OutNeighborSolicits); - arl_expect(arl_base, "Icmp6OutNeighborAdvertisements", &Icmp6OutNeighborAdvertisements); - arl_expect(arl_base, "Icmp6OutRedirects", &Icmp6OutRedirects); - arl_expect(arl_base, "Icmp6OutMLDv2Reports", &Icmp6OutMLDv2Reports); - arl_expect(arl_base, "Icmp6InType1", &Icmp6InType1); - arl_expect(arl_base, "Icmp6InType128", &Icmp6InType128); - arl_expect(arl_base, "Icmp6InType129", &Icmp6InType129); - arl_expect(arl_base, "Icmp6InType136", &Icmp6InType136); - arl_expect(arl_base, "Icmp6OutType1", &Icmp6OutType1); - arl_expect(arl_base, "Icmp6OutType128", &Icmp6OutType128); - arl_expect(arl_base, "Icmp6OutType129", &Icmp6OutType129); - arl_expect(arl_base, "Icmp6OutType133", &Icmp6OutType133); - arl_expect(arl_base, "Icmp6OutType135", &Icmp6OutType135); - arl_expect(arl_base, "Icmp6OutType143", &Icmp6OutType143); - arl_expect(arl_base, "Udp6InDatagrams", &Udp6InDatagrams); - arl_expect(arl_base, "Udp6NoPorts", &Udp6NoPorts); - arl_expect(arl_base, "Udp6InErrors", &Udp6InErrors); - arl_expect(arl_base, "Udp6OutDatagrams", &Udp6OutDatagrams); - arl_expect(arl_base, "Udp6RcvbufErrors", &Udp6RcvbufErrors); - arl_expect(arl_base, "Udp6SndbufErrors", &Udp6SndbufErrors); - arl_expect(arl_base, "Udp6InCsumErrors", &Udp6InCsumErrors); - arl_expect(arl_base, "Udp6IgnoredMulti", &Udp6IgnoredMulti); - arl_expect(arl_base, "UdpLite6InDatagrams", &UdpLite6InDatagrams); - arl_expect(arl_base, "UdpLite6NoPorts", &UdpLite6NoPorts); - arl_expect(arl_base, "UdpLite6InErrors", &UdpLite6InErrors); - arl_expect(arl_base, "UdpLite6OutDatagrams", &UdpLite6OutDatagrams); - arl_expect(arl_base, "UdpLite6RcvbufErrors", &UdpLite6RcvbufErrors); - arl_expect(arl_base, "UdpLite6SndbufErrors", &UdpLite6SndbufErrors); - arl_expect(arl_base, "UdpLite6InCsumErrors", &UdpLite6InCsumErrors); - } - - if(unlikely(!ff)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/snmp6"); - ff = procfile_open(config_get("plugin:proc:/proc/net/snmp6", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) - return 1; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) - return 0; // we return 0, so that we will retry to open it next time - - size_t lines = procfile_lines(ff), l; - - arl_begin(arl_base); - - for(l = 0; l < lines ;l++) { - size_t words = procfile_linewords(ff, l); - if(unlikely(words < 2)) { - if(unlikely(words)) error("Cannot read /proc/net/snmp6 line %zu. Expected 2 params, read %zu.", l, words); - continue; - } - - if(unlikely(arl_check(arl_base, - procfile_lineword(ff, l, 0), - procfile_lineword(ff, l, 1)))) break; - } - - // -------------------------------------------------------------------- - - if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && (Ip6InOctets || Ip6OutOctets))) { - do_bandwidth = CONFIG_BOOLEAN_YES; - static RRDSET *st = NULL; - static RRDDIM *rd_received = NULL, - *rd_sent = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "system" - , "ipv6" - , NULL - , "network" - , NULL - , "IPv6 Bandwidth" - , "kilobits/s" - , "proc" - , "net/snmp6" - , 502 - , update_every - , RRDSET_TYPE_AREA - ); - - rd_received = rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - rd_sent = rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_received, Ip6InOctets); - rrddim_set_by_pointer(st, rd_sent, Ip6OutOctets); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_ip_packets == CONFIG_BOOLEAN_YES || (do_ip_packets == CONFIG_BOOLEAN_AUTO && (Ip6InReceives || Ip6OutRequests || Ip6InDelivers || Ip6OutForwDatagrams))) { - do_ip_packets = CONFIG_BOOLEAN_YES; - static RRDSET *st = NULL; - static RRDDIM *rd_received = NULL, - *rd_sent = NULL, - *rd_forwarded = NULL, - *rd_delivers = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 - , "packets" - , NULL - , "packets" - , NULL - , "IPv6 Packets" - , "packets/s" - , "proc" - , "net/snmp6" - , 3000 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_received = rrddim_add(st, "InReceives", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_sent = rrddim_add(st, "OutRequests", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_forwarded = rrddim_add(st, "OutForwDatagrams", "forwarded", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_delivers = rrddim_add(st, "InDelivers", "delivers", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_received, Ip6InReceives); - rrddim_set_by_pointer(st, rd_sent, Ip6OutRequests); - rrddim_set_by_pointer(st, rd_forwarded, Ip6OutForwDatagrams); - rrddim_set_by_pointer(st, rd_delivers, Ip6InDelivers); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_ip_fragsout == CONFIG_BOOLEAN_YES || (do_ip_fragsout == CONFIG_BOOLEAN_AUTO && (Ip6FragOKs || Ip6FragFails || Ip6FragCreates))) { - do_ip_fragsout = CONFIG_BOOLEAN_YES; - static RRDSET *st = NULL; - static RRDDIM *rd_ok = NULL, - *rd_failed = NULL, - *rd_all = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 - , "fragsout" - , NULL - , "fragments6" - , NULL - , "IPv6 Fragments Sent" - , "packets/s" - , "proc" - , "net/snmp6" - , 3011 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_ok = rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_failed = rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_all = rrddim_add(st, "FragCreates", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_ok, Ip6FragOKs); - rrddim_set_by_pointer(st, rd_failed, Ip6FragFails); - rrddim_set_by_pointer(st, rd_all, Ip6FragCreates); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_ip_fragsin == CONFIG_BOOLEAN_YES || (do_ip_fragsin == CONFIG_BOOLEAN_AUTO - && ( - Ip6ReasmOKs - || Ip6ReasmFails - || Ip6ReasmTimeout - || Ip6ReasmReqds - ))) { - do_ip_fragsin = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_ok = NULL, - *rd_failed = NULL, - *rd_timeout = NULL, - *rd_all = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 - , "fragsin" - , NULL - , "fragments6" - , NULL - , "IPv6 Fragments Reassembly" - , "packets/s" - , "proc" - , "net/snmp6" - , 3012 - , update_every - , RRDSET_TYPE_LINE); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_ok = rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_failed = rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_timeout = rrddim_add(st, "ReasmTimeout", "timeout", -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_all = rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_ok, Ip6ReasmOKs); - rrddim_set_by_pointer(st, rd_failed, Ip6ReasmFails); - rrddim_set_by_pointer(st, rd_timeout, Ip6ReasmTimeout); - rrddim_set_by_pointer(st, rd_all, Ip6ReasmReqds); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_ip_errors == CONFIG_BOOLEAN_YES || (do_ip_errors == CONFIG_BOOLEAN_AUTO - && ( - Ip6InDiscards - || Ip6OutDiscards - || Ip6InHdrErrors - || Ip6InAddrErrors - || Ip6InUnknownProtos - || Ip6InTooBigErrors - || Ip6InTruncatedPkts - || Ip6InNoRoutes - ))) { - do_ip_errors = CONFIG_BOOLEAN_YES; - static RRDSET *st = NULL; - static RRDDIM *rd_InDiscards = NULL, - *rd_OutDiscards = NULL, - *rd_InHdrErrors = NULL, - *rd_InAddrErrors = NULL, - *rd_InUnknownProtos = NULL, - *rd_InTooBigErrors = NULL, - *rd_InTruncatedPkts = NULL, - *rd_InNoRoutes = NULL, - *rd_OutNoRoutes = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 - , "errors" - , NULL - , "errors" - , NULL - , "IPv6 Errors" - , "packets/s" - , "proc" - , "net/snmp6" - , 3002 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_InDiscards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutDiscards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InHdrErrors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InAddrErrors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InUnknownProtos = rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InTooBigErrors = rrddim_add(st, "InTooBigErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InTruncatedPkts = rrddim_add(st, "InTruncatedPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InNoRoutes = rrddim_add(st, "InNoRoutes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutNoRoutes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_InDiscards, Ip6InDiscards); - rrddim_set_by_pointer(st, rd_OutDiscards, Ip6OutDiscards); - rrddim_set_by_pointer(st, rd_InHdrErrors, Ip6InHdrErrors); - rrddim_set_by_pointer(st, rd_InAddrErrors, Ip6InAddrErrors); - rrddim_set_by_pointer(st, rd_InUnknownProtos, Ip6InUnknownProtos); - rrddim_set_by_pointer(st, rd_InTooBigErrors, Ip6InTooBigErrors); - rrddim_set_by_pointer(st, rd_InTruncatedPkts, Ip6InTruncatedPkts); - rrddim_set_by_pointer(st, rd_InNoRoutes, Ip6InNoRoutes); - rrddim_set_by_pointer(st, rd_OutNoRoutes, Ip6OutNoRoutes); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_udp_packets == CONFIG_BOOLEAN_YES || (do_udp_packets == CONFIG_BOOLEAN_AUTO && (Udp6InDatagrams || Udp6OutDatagrams))) { - do_udp_packets = CONFIG_BOOLEAN_YES; - static RRDSET *st = NULL; - static RRDDIM *rd_received = NULL, - *rd_sent = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 - , "udppackets" - , NULL - , "udp6" - , NULL - , "IPv6 UDP Packets" - , "packets/s" - , "proc" - , "net/snmp6" - , 3601 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_received = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_sent = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_received, Udp6InDatagrams); - rrddim_set_by_pointer(st, rd_sent, Udp6OutDatagrams); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_udp_errors == CONFIG_BOOLEAN_YES || (do_udp_errors == CONFIG_BOOLEAN_AUTO - && ( - Udp6InErrors - || Udp6NoPorts - || Udp6RcvbufErrors - || Udp6SndbufErrors - || Udp6InCsumErrors - || Udp6IgnoredMulti - ))) { - do_udp_errors = CONFIG_BOOLEAN_YES; - static RRDSET *st = NULL; - static RRDDIM *rd_RcvbufErrors = NULL, - *rd_SndbufErrors = NULL, - *rd_InErrors = NULL, - *rd_NoPorts = NULL, - *rd_InCsumErrors = NULL, - *rd_IgnoredMulti = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 - , "udperrors" - , NULL - , "udp6" - , NULL - , "IPv6 UDP Errors" - , "events/s" - , "proc" - , "net/snmp6" - , 3701 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_NoPorts = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_IgnoredMulti = rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_RcvbufErrors, Udp6RcvbufErrors); - rrddim_set_by_pointer(st, rd_SndbufErrors, Udp6SndbufErrors); - rrddim_set_by_pointer(st, rd_InErrors, Udp6InErrors); - rrddim_set_by_pointer(st, rd_NoPorts, Udp6NoPorts); - rrddim_set_by_pointer(st, rd_InCsumErrors, Udp6InCsumErrors); - rrddim_set_by_pointer(st, rd_IgnoredMulti, Udp6IgnoredMulti); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_udplite_packets == CONFIG_BOOLEAN_YES || (do_udplite_packets == CONFIG_BOOLEAN_AUTO && (UdpLite6InDatagrams || UdpLite6OutDatagrams))) { - do_udplite_packets = CONFIG_BOOLEAN_YES; - static RRDSET *st = NULL; - static RRDDIM *rd_received = NULL, - *rd_sent = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 - , "udplitepackets" - , NULL - , "udplite6" - , NULL - , "IPv6 UDPlite Packets" - , "packets/s" - , "proc" - , "net/snmp6" - , 3602 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_received = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_sent = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_received, UdpLite6InDatagrams); - rrddim_set_by_pointer(st, rd_sent, UdpLite6OutDatagrams); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_udplite_errors == CONFIG_BOOLEAN_YES || (do_udplite_errors == CONFIG_BOOLEAN_AUTO - && ( - UdpLite6InErrors - || UdpLite6NoPorts - || UdpLite6RcvbufErrors - || UdpLite6SndbufErrors - || Udp6InCsumErrors - || UdpLite6InCsumErrors - ))) { - do_udplite_errors = CONFIG_BOOLEAN_YES; - static RRDSET *st = NULL; - static RRDDIM *rd_RcvbufErrors = NULL, - *rd_SndbufErrors = NULL, - *rd_InErrors = NULL, - *rd_NoPorts = NULL, - *rd_InCsumErrors = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 - , "udpliteerrors" - , NULL - , "udplite6" - , NULL - , "IPv6 UDP Lite Errors" - , "events/s" - , "proc" - , "net/snmp6" - , 3701 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_NoPorts = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_InErrors, UdpLite6InErrors); - rrddim_set_by_pointer(st, rd_NoPorts, UdpLite6NoPorts); - rrddim_set_by_pointer(st, rd_RcvbufErrors, UdpLite6RcvbufErrors); - rrddim_set_by_pointer(st, rd_SndbufErrors, UdpLite6SndbufErrors); - rrddim_set_by_pointer(st, rd_InCsumErrors, UdpLite6InCsumErrors); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_mcast == CONFIG_BOOLEAN_YES || (do_mcast == CONFIG_BOOLEAN_AUTO && (Ip6OutMcastOctets || Ip6InMcastOctets))) { - do_mcast = CONFIG_BOOLEAN_YES; - static RRDSET *st = NULL; - static RRDDIM *rd_Ip6InMcastOctets = NULL, - *rd_Ip6OutMcastOctets = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 - , "mcast" - , NULL - , "multicast6" - , NULL - , "IPv6 Multicast Bandwidth" - , "kilobits/s" - , "proc" - , "net/snmp6" - , 9000 - , update_every - , RRDSET_TYPE_AREA - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_Ip6InMcastOctets = rrddim_add(st, "InMcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - rd_Ip6OutMcastOctets = rrddim_add(st, "OutMcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_Ip6InMcastOctets, Ip6InMcastOctets); - rrddim_set_by_pointer(st, rd_Ip6OutMcastOctets, Ip6OutMcastOctets); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_bcast == CONFIG_BOOLEAN_YES || (do_bcast == CONFIG_BOOLEAN_AUTO && (Ip6OutBcastOctets || Ip6InBcastOctets))) { - do_bcast = CONFIG_BOOLEAN_YES; - static RRDSET *st = NULL; - static RRDDIM *rd_Ip6InBcastOctets = NULL, - *rd_Ip6OutBcastOctets = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 - , "bcast" - , NULL - , "broadcast6" - , NULL - , "IPv6 Broadcast Bandwidth" - , "kilobits/s" - , "proc" - , "net/snmp6" - , 8000 - , update_every - , RRDSET_TYPE_AREA - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_Ip6InBcastOctets = rrddim_add(st, "InBcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - rd_Ip6OutBcastOctets = rrddim_add(st, "OutBcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_Ip6InBcastOctets, Ip6InBcastOctets); - rrddim_set_by_pointer(st, rd_Ip6OutBcastOctets, Ip6OutBcastOctets); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_mcast_p == CONFIG_BOOLEAN_YES || (do_mcast_p == CONFIG_BOOLEAN_AUTO && (Ip6OutMcastPkts || Ip6InMcastPkts))) { - do_mcast_p = CONFIG_BOOLEAN_YES; - static RRDSET *st = NULL; - static RRDDIM *rd_Ip6InMcastPkts = NULL, - *rd_Ip6OutMcastPkts = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 - , "mcastpkts" - , NULL - , "multicast6" - , NULL - , "IPv6 Multicast Packets" - , "packets/s" - , "proc" - , "net/snmp6" - , 9500 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_Ip6InMcastPkts = rrddim_add(st, "InMcastPkts", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_Ip6OutMcastPkts = rrddim_add(st, "OutMcastPkts", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_Ip6InMcastPkts, Ip6InMcastPkts); - rrddim_set_by_pointer(st, rd_Ip6OutMcastPkts, Ip6OutMcastPkts); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_icmp == CONFIG_BOOLEAN_YES || (do_icmp == CONFIG_BOOLEAN_AUTO && (Icmp6InMsgs || Icmp6OutMsgs))) { - do_icmp = CONFIG_BOOLEAN_YES; - static RRDSET *st = NULL; - static RRDDIM *rd_Icmp6InMsgs = NULL, - *rd_Icmp6OutMsgs = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 - , "icmp" - , NULL - , "icmp6" - , NULL - , "IPv6 ICMP Messages" - , "messages/s" - , "proc" - , "net/snmp6" - , 10000 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_Icmp6InMsgs = rrddim_add(st, "InMsgs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_Icmp6OutMsgs = rrddim_add(st, "OutMsgs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_Icmp6InMsgs, Icmp6InMsgs); - rrddim_set_by_pointer(st, rd_Icmp6OutMsgs, Icmp6OutMsgs); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_icmp_redir == CONFIG_BOOLEAN_YES || (do_icmp_redir == CONFIG_BOOLEAN_AUTO && (Icmp6InRedirects || Icmp6OutRedirects))) { - do_icmp_redir = CONFIG_BOOLEAN_YES; - static RRDSET *st = NULL; - static RRDDIM *rd_Icmp6InRedirects = NULL, - *rd_Icmp6OutRedirects = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 - , "icmpredir" - , NULL - , "icmp6" - , NULL - , "IPv6 ICMP Redirects" - , "redirects/s" - , "proc" - , "net/snmp6" - , 10050 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_Icmp6InRedirects = rrddim_add(st, "InRedirects", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_Icmp6OutRedirects = rrddim_add(st, "OutRedirects", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_Icmp6InRedirects, Icmp6InRedirects); - rrddim_set_by_pointer(st, rd_Icmp6OutRedirects, Icmp6OutRedirects); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_icmp_errors == CONFIG_BOOLEAN_YES || (do_icmp_errors == CONFIG_BOOLEAN_AUTO - && ( - Icmp6InErrors - || Icmp6OutErrors - || Icmp6InCsumErrors - || Icmp6InDestUnreachs - || Icmp6InPktTooBigs - || Icmp6InTimeExcds - || Icmp6InParmProblems - || Icmp6OutDestUnreachs - || Icmp6OutPktTooBigs - || Icmp6OutTimeExcds - || Icmp6OutParmProblems - ))) { - do_icmp_errors = CONFIG_BOOLEAN_YES; - static RRDSET *st = NULL; - static RRDDIM *rd_InErrors = NULL, - *rd_OutErrors = NULL, - *rd_InCsumErrors = NULL, - *rd_InDestUnreachs = NULL, - *rd_InPktTooBigs = NULL, - *rd_InTimeExcds = NULL, - *rd_InParmProblems = NULL, - *rd_OutDestUnreachs = NULL, - *rd_OutPktTooBigs = NULL, - *rd_OutTimeExcds = NULL, - *rd_OutParmProblems = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 - , "icmperrors" - , NULL - , "icmp6" - , NULL - , "IPv6 ICMP Errors" - , "errors/s" - , "proc" - , "net/snmp6" - , 10100 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutErrors = rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InDestUnreachs = rrddim_add(st, "InDestUnreachs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InPktTooBigs = rrddim_add(st, "InPktTooBigs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InTimeExcds = rrddim_add(st, "InTimeExcds", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InParmProblems = rrddim_add(st, "InParmProblems", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutDestUnreachs = rrddim_add(st, "OutDestUnreachs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutPktTooBigs = rrddim_add(st, "OutPktTooBigs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutTimeExcds = rrddim_add(st, "OutTimeExcds", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutParmProblems = rrddim_add(st, "OutParmProblems", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_InErrors, Icmp6InErrors); - rrddim_set_by_pointer(st, rd_OutErrors, Icmp6OutErrors); - rrddim_set_by_pointer(st, rd_InCsumErrors, Icmp6InCsumErrors); - rrddim_set_by_pointer(st, rd_InDestUnreachs, Icmp6InDestUnreachs); - rrddim_set_by_pointer(st, rd_InPktTooBigs, Icmp6InPktTooBigs); - rrddim_set_by_pointer(st, rd_InTimeExcds, Icmp6InTimeExcds); - rrddim_set_by_pointer(st, rd_InParmProblems, Icmp6InParmProblems); - rrddim_set_by_pointer(st, rd_OutDestUnreachs, Icmp6OutDestUnreachs); - rrddim_set_by_pointer(st, rd_OutPktTooBigs, Icmp6OutPktTooBigs); - rrddim_set_by_pointer(st, rd_OutTimeExcds, Icmp6OutTimeExcds); - rrddim_set_by_pointer(st, rd_OutParmProblems, Icmp6OutParmProblems); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_icmp_echos == CONFIG_BOOLEAN_YES || (do_icmp_echos == CONFIG_BOOLEAN_AUTO - && ( - Icmp6InEchos - || Icmp6OutEchos - || Icmp6InEchoReplies - || Icmp6OutEchoReplies - ))) { - do_icmp_echos = CONFIG_BOOLEAN_YES; - static RRDSET *st = NULL; - static RRDDIM *rd_InEchos = NULL, - *rd_OutEchos = NULL, - *rd_InEchoReplies = NULL, - *rd_OutEchoReplies = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 - , "icmpechos" - , NULL - , "icmp6" - , NULL - , "IPv6 ICMP Echo" - , "messages/s" - , "proc" - , "net/snmp6" - , 10200 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_InEchos = rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutEchos = rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InEchoReplies = rrddim_add(st, "InEchoReplies", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutEchoReplies = rrddim_add(st, "OutEchoReplies", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_InEchos, Icmp6InEchos); - rrddim_set_by_pointer(st, rd_OutEchos, Icmp6OutEchos); - rrddim_set_by_pointer(st, rd_InEchoReplies, Icmp6InEchoReplies); - rrddim_set_by_pointer(st, rd_OutEchoReplies, Icmp6OutEchoReplies); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_icmp_groupmemb == CONFIG_BOOLEAN_YES || (do_icmp_groupmemb == CONFIG_BOOLEAN_AUTO - && ( - Icmp6InGroupMembQueries - || Icmp6OutGroupMembQueries - || Icmp6InGroupMembResponses - || Icmp6OutGroupMembResponses - || Icmp6InGroupMembReductions - || Icmp6OutGroupMembReductions - ))) { - do_icmp_groupmemb = CONFIG_BOOLEAN_YES; - static RRDSET *st = NULL; - static RRDDIM *rd_InQueries = NULL, - *rd_OutQueries = NULL, - *rd_InResponses = NULL, - *rd_OutResponses = NULL, - *rd_InReductions = NULL, - *rd_OutReductions = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 - , "groupmemb" - , NULL - , "icmp6" - , NULL - , "IPv6 ICMP Group Membership" - , "messages/s" - , "proc" - , "net/snmp6" - , 10300 - , update_every - , RRDSET_TYPE_LINE); - - rd_InQueries = rrddim_add(st, "InQueries", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutQueries = rrddim_add(st, "OutQueries", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InResponses = rrddim_add(st, "InResponses", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutResponses = rrddim_add(st, "OutResponses", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InReductions = rrddim_add(st, "InReductions", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutReductions = rrddim_add(st, "OutReductions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_InQueries, Icmp6InGroupMembQueries); - rrddim_set_by_pointer(st, rd_OutQueries, Icmp6OutGroupMembQueries); - rrddim_set_by_pointer(st, rd_InResponses, Icmp6InGroupMembResponses); - rrddim_set_by_pointer(st, rd_OutResponses, Icmp6OutGroupMembResponses); - rrddim_set_by_pointer(st, rd_InReductions, Icmp6InGroupMembReductions); - rrddim_set_by_pointer(st, rd_OutReductions, Icmp6OutGroupMembReductions); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_icmp_router == CONFIG_BOOLEAN_YES || (do_icmp_router == CONFIG_BOOLEAN_AUTO - && ( - Icmp6InRouterSolicits - || Icmp6OutRouterSolicits - || Icmp6InRouterAdvertisements - || Icmp6OutRouterAdvertisements - ))) { - do_icmp_router = CONFIG_BOOLEAN_YES; - static RRDSET *st = NULL; - static RRDDIM *rd_InSolicits = NULL, - *rd_OutSolicits = NULL, - *rd_InAdvertisements = NULL, - *rd_OutAdvertisements = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 - , "icmprouter" - , NULL - , "icmp6" - , NULL - , "IPv6 Router Messages" - , "messages/s" - , "proc" - , "net/snmp6" - , 10400 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_InSolicits = rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutSolicits = rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InAdvertisements = rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutAdvertisements = rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_InSolicits, Icmp6InRouterSolicits); - rrddim_set_by_pointer(st, rd_OutSolicits, Icmp6OutRouterSolicits); - rrddim_set_by_pointer(st, rd_InAdvertisements, Icmp6InRouterAdvertisements); - rrddim_set_by_pointer(st, rd_OutAdvertisements, Icmp6OutRouterAdvertisements); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_icmp_neighbor == CONFIG_BOOLEAN_YES || (do_icmp_neighbor == CONFIG_BOOLEAN_AUTO - && ( - Icmp6InNeighborSolicits - || Icmp6OutNeighborSolicits - || Icmp6InNeighborAdvertisements - || Icmp6OutNeighborAdvertisements - ))) { - do_icmp_neighbor = CONFIG_BOOLEAN_YES; - static RRDSET *st = NULL; - static RRDDIM *rd_InSolicits = NULL, - *rd_OutSolicits = NULL, - *rd_InAdvertisements = NULL, - *rd_OutAdvertisements = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 - , "icmpneighbor" - , NULL - , "icmp6" - , NULL - , "IPv6 Neighbor Messages" - , "messages/s" - , "proc" - , "net/snmp6" - , 10500 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_InSolicits = rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutSolicits = rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InAdvertisements = rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutAdvertisements = rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_InSolicits, Icmp6InNeighborSolicits); - rrddim_set_by_pointer(st, rd_OutSolicits, Icmp6OutNeighborSolicits); - rrddim_set_by_pointer(st, rd_InAdvertisements, Icmp6InNeighborAdvertisements); - rrddim_set_by_pointer(st, rd_OutAdvertisements, Icmp6OutNeighborAdvertisements); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_icmp_mldv2 == CONFIG_BOOLEAN_YES || (do_icmp_mldv2 == CONFIG_BOOLEAN_AUTO && (Icmp6InMLDv2Reports || Icmp6OutMLDv2Reports))) { - do_icmp_mldv2 = CONFIG_BOOLEAN_YES; - static RRDSET *st = NULL; - static RRDDIM *rd_InMLDv2Reports = NULL, - *rd_OutMLDv2Reports = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 - , "icmpmldv2" - , NULL - , "icmp6" - , NULL - , "IPv6 ICMP MLDv2 Reports" - , "reports/s" - , "proc" - , "net/snmp6" - , 10600 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_InMLDv2Reports = rrddim_add(st, "InMLDv2Reports", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutMLDv2Reports = rrddim_add(st, "OutMLDv2Reports", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_InMLDv2Reports, Icmp6InMLDv2Reports); - rrddim_set_by_pointer(st, rd_OutMLDv2Reports, Icmp6OutMLDv2Reports); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_icmp_types == CONFIG_BOOLEAN_YES || (do_icmp_types == CONFIG_BOOLEAN_AUTO - && ( - Icmp6InType1 - || Icmp6InType128 - || Icmp6InType129 - || Icmp6InType136 - || Icmp6OutType1 - || Icmp6OutType128 - || Icmp6OutType129 - || Icmp6OutType133 - || Icmp6OutType135 - || Icmp6OutType143 - ))) { - do_icmp_types = CONFIG_BOOLEAN_YES; - static RRDSET *st = NULL; - static RRDDIM *rd_InType1 = NULL, - *rd_InType128 = NULL, - *rd_InType129 = NULL, - *rd_InType136 = NULL, - *rd_OutType1 = NULL, - *rd_OutType128 = NULL, - *rd_OutType129 = NULL, - *rd_OutType133 = NULL, - *rd_OutType135 = NULL, - *rd_OutType143 = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 - , "icmptypes" - , NULL - , "icmp6" - , NULL - , "IPv6 ICMP Types" - , "messages/s" - , "proc" - , "net/snmp6" - , 10700 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_InType1 = rrddim_add(st, "InType1", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InType128 = rrddim_add(st, "InType128", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InType129 = rrddim_add(st, "InType129", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InType136 = rrddim_add(st, "InType136", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutType1 = rrddim_add(st, "OutType1", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutType128 = rrddim_add(st, "OutType128", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutType129 = rrddim_add(st, "OutType129", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutType133 = rrddim_add(st, "OutType133", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutType135 = rrddim_add(st, "OutType135", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_OutType143 = rrddim_add(st, "OutType143", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_InType1, Icmp6InType1); - rrddim_set_by_pointer(st, rd_InType128, Icmp6InType128); - rrddim_set_by_pointer(st, rd_InType129, Icmp6InType129); - rrddim_set_by_pointer(st, rd_InType136, Icmp6InType136); - rrddim_set_by_pointer(st, rd_OutType1, Icmp6OutType1); - rrddim_set_by_pointer(st, rd_OutType128, Icmp6OutType128); - rrddim_set_by_pointer(st, rd_OutType129, Icmp6OutType129); - rrddim_set_by_pointer(st, rd_OutType133, Icmp6OutType133); - rrddim_set_by_pointer(st, rd_OutType135, Icmp6OutType135); - rrddim_set_by_pointer(st, rd_OutType143, Icmp6OutType143); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_ect == CONFIG_BOOLEAN_YES || (do_ect == CONFIG_BOOLEAN_AUTO - && ( - Ip6InNoECTPkts - || Ip6InECT1Pkts - || Ip6InECT0Pkts - || Ip6InCEPkts - ))) { - do_ect = CONFIG_BOOLEAN_YES; - static RRDSET *st = NULL; - static RRDDIM *rd_InNoECTPkts = NULL, - *rd_InECT1Pkts = NULL, - *rd_InECT0Pkts = NULL, - *rd_InCEPkts = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_SNMP6 - , "ect" - , NULL - , "packets" - , NULL - , "IPv6 ECT Packets" - , "packets/s" - , "proc" - , "net/snmp6" - , 10800 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_InNoECTPkts = rrddim_add(st, "InNoECTPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InECT1Pkts = rrddim_add(st, "InECT1Pkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InECT0Pkts = rrddim_add(st, "InECT0Pkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_InCEPkts = rrddim_add(st, "InCEPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_InNoECTPkts, Ip6InNoECTPkts); - rrddim_set_by_pointer(st, rd_InECT1Pkts, Ip6InECT1Pkts); - rrddim_set_by_pointer(st, rd_InECT0Pkts, Ip6InECT0Pkts); - rrddim_set_by_pointer(st, rd_InCEPkts, Ip6InCEPkts); - rrdset_done(st); - } - - return 0; -} - diff --git a/src/proc_net_sockstat.c b/src/proc_net_sockstat.c deleted file mode 100644 index db3070660..000000000 --- a/src/proc_net_sockstat.c +++ /dev/null @@ -1,514 +0,0 @@ -#include "common.h" - -static struct proc_net_sockstat { - kernel_uint_t sockets_used; - - kernel_uint_t tcp_inuse; - kernel_uint_t tcp_orphan; - kernel_uint_t tcp_tw; - kernel_uint_t tcp_alloc; - kernel_uint_t tcp_mem; - - kernel_uint_t udp_inuse; - kernel_uint_t udp_mem; - - kernel_uint_t udplite_inuse; - - kernel_uint_t raw_inuse; - - kernel_uint_t frag_inuse; - kernel_uint_t frag_memory; -} sockstat_root = { 0 }; - - -static int read_tcp_mem(void) { - static char *filename = NULL; - static RRDVAR *tcp_mem_low_threshold = NULL, - *tcp_mem_pressure_threshold = NULL, - *tcp_mem_high_threshold = NULL; - - if(unlikely(!tcp_mem_low_threshold)) { - tcp_mem_low_threshold = rrdvar_custom_host_variable_create(localhost, "tcp_mem_low"); - tcp_mem_pressure_threshold = rrdvar_custom_host_variable_create(localhost, "tcp_mem_pressure"); - tcp_mem_high_threshold = rrdvar_custom_host_variable_create(localhost, "tcp_mem_high"); - } - - if(unlikely(!filename)) { - char buffer[FILENAME_MAX + 1]; - snprintfz(buffer, FILENAME_MAX, "%s/proc/sys/net/ipv4/tcp_mem", netdata_configured_host_prefix); - filename = strdupz(buffer); - } - - char buffer[200 + 1], *start, *end; - if(read_file(filename, buffer, 200) != 0) return 1; - buffer[200] = '\0'; - - unsigned long long low = 0, pressure = 0, high = 0; - - start = buffer; - low = strtoull(start, &end, 10); - - start = end; - pressure = strtoull(start, &end, 10); - - start = end; - high = strtoull(start, &end, 10); - - // fprintf(stderr, "TCP MEM low = %llu, pressure = %llu, high = %llu\n", low, pressure, high); - - rrdvar_custom_host_variable_set(localhost, tcp_mem_low_threshold, low * sysconf(_SC_PAGESIZE) / 1024); - rrdvar_custom_host_variable_set(localhost, tcp_mem_pressure_threshold, pressure * sysconf(_SC_PAGESIZE) / 1024); - rrdvar_custom_host_variable_set(localhost, tcp_mem_high_threshold, high * sysconf(_SC_PAGESIZE) / 1024); - - return 0; -} - -static kernel_uint_t read_tcp_max_orphans(void) { - static char *filename = NULL; - static RRDVAR *tcp_max_orphans_var = NULL; - - if(unlikely(!filename)) { - char buffer[FILENAME_MAX + 1]; - snprintfz(buffer, FILENAME_MAX, "%s/proc/sys/net/ipv4/tcp_max_orphans", netdata_configured_host_prefix); - filename = strdupz(buffer); - } - - unsigned long long tcp_max_orphans = 0; - if(read_single_number_file(filename, &tcp_max_orphans) == 0) { - - if(unlikely(!tcp_max_orphans_var)) - tcp_max_orphans_var = rrdvar_custom_host_variable_create(localhost, "tcp_max_orphans"); - - rrdvar_custom_host_variable_set(localhost, tcp_max_orphans_var, tcp_max_orphans); - return tcp_max_orphans; - } - - return 0; -} - -int do_proc_net_sockstat(int update_every, usec_t dt) { - (void)dt; - - static procfile *ff = NULL; - - static uint32_t hash_sockets = 0, - hash_raw = 0, - hash_frag = 0, - hash_tcp = 0, - hash_udp = 0, - hash_udplite = 0; - - static long long update_constants_every = 60, update_constants_count = 0; - - static ARL_BASE *arl_sockets = NULL; - static ARL_BASE *arl_tcp = NULL; - static ARL_BASE *arl_udp = NULL; - static ARL_BASE *arl_udplite = NULL; - static ARL_BASE *arl_raw = NULL; - static ARL_BASE *arl_frag = NULL; - - static int do_sockets = -1, do_tcp_sockets = -1, do_tcp_mem = -1, do_udp_sockets = -1, do_udp_mem = -1, do_udplite_sockets = -1, do_raw_sockets = -1, do_frag_sockets = -1, do_frag_mem = -1; - - static char *keys[7] = { NULL }; - static uint32_t hashes[7] = { 0 }; - static ARL_BASE *bases[7] = { NULL }; - - if(unlikely(!arl_sockets)) { - do_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 sockets", CONFIG_BOOLEAN_AUTO); - do_tcp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 TCP sockets", CONFIG_BOOLEAN_AUTO); - do_tcp_mem = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 TCP memory", CONFIG_BOOLEAN_AUTO); - do_udp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 UDP sockets", CONFIG_BOOLEAN_AUTO); - do_udp_mem = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 UDP memory", CONFIG_BOOLEAN_AUTO); - do_udplite_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 UDPLITE sockets", CONFIG_BOOLEAN_AUTO); - do_raw_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 RAW sockets", CONFIG_BOOLEAN_AUTO); - do_frag_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 FRAG sockets", CONFIG_BOOLEAN_AUTO); - do_frag_mem = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 FRAG memory", CONFIG_BOOLEAN_AUTO); - - update_constants_every = config_get_number("plugin:proc:/proc/net/sockstat", "update constants every", update_constants_every); - update_constants_count = update_constants_every; - - arl_sockets = arl_create("sockstat/sockets", arl_callback_str2kernel_uint_t, 60); - arl_expect(arl_sockets, "used", &sockstat_root.sockets_used); - - arl_tcp = arl_create("sockstat/TCP", arl_callback_str2kernel_uint_t, 60); - arl_expect(arl_tcp, "inuse", &sockstat_root.tcp_inuse); - arl_expect(arl_tcp, "orphan", &sockstat_root.tcp_orphan); - arl_expect(arl_tcp, "tw", &sockstat_root.tcp_tw); - arl_expect(arl_tcp, "alloc", &sockstat_root.tcp_alloc); - arl_expect(arl_tcp, "mem", &sockstat_root.tcp_mem); - - arl_udp = arl_create("sockstat/UDP", arl_callback_str2kernel_uint_t, 60); - arl_expect(arl_udp, "inuse", &sockstat_root.udp_inuse); - arl_expect(arl_udp, "mem", &sockstat_root.udp_mem); - - arl_udplite = arl_create("sockstat/UDPLITE", arl_callback_str2kernel_uint_t, 60); - arl_expect(arl_udplite, "inuse", &sockstat_root.udplite_inuse); - - arl_raw = arl_create("sockstat/RAW", arl_callback_str2kernel_uint_t, 60); - arl_expect(arl_raw, "inuse", &sockstat_root.raw_inuse); - - arl_frag = arl_create("sockstat/FRAG", arl_callback_str2kernel_uint_t, 60); - arl_expect(arl_frag, "inuse", &sockstat_root.frag_inuse); - arl_expect(arl_frag, "memory", &sockstat_root.frag_memory); - - hash_sockets = simple_hash("sockets"); - hash_tcp = simple_hash("TCP"); - hash_udp = simple_hash("UDP"); - hash_udplite = simple_hash("UDPLITE"); - hash_raw = simple_hash("RAW"); - hash_frag = simple_hash("FRAG"); - - keys[0] = "sockets"; hashes[0] = hash_sockets; bases[0] = arl_sockets; - keys[1] = "TCP"; hashes[1] = hash_tcp; bases[1] = arl_tcp; - keys[2] = "UDP"; hashes[2] = hash_udp; bases[2] = arl_udp; - keys[3] = "UDPLITE"; hashes[3] = hash_udplite; bases[3] = arl_udplite; - keys[4] = "RAW"; hashes[4] = hash_raw; bases[4] = arl_raw; - keys[5] = "FRAG"; hashes[5] = hash_frag; bases[5] = arl_frag; - keys[6] = NULL; // terminator - } - - update_constants_count += update_every; - if(unlikely(update_constants_count > update_constants_every)) { - read_tcp_max_orphans(); - read_tcp_mem(); - update_constants_count = 0; - } - - if(unlikely(!ff)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/sockstat"); - ff = procfile_open(config_get("plugin:proc:/proc/net/sockstat", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) return 1; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time - - size_t lines = procfile_lines(ff), l; - - for(l = 0; l < lines ;l++) { - size_t words = procfile_linewords(ff, l); - char *key = procfile_lineword(ff, l, 0); - uint32_t hash = simple_hash(key); - - int k; - for(k = 0; keys[k] ; k++) { - if(unlikely(hash == hashes[k] && strcmp(key, keys[k]) == 0)) { - // fprintf(stderr, "KEY: '%s', l=%zu, w=1, words=%zu\n", key, l, words); - ARL_BASE *arl = bases[k]; - arl_begin(arl); - size_t w = 1; - - while(w + 1 < words) { - char *name = procfile_lineword(ff, l, w); w++; - char *value = procfile_lineword(ff, l, w); w++; - // fprintf(stderr, " > NAME '%s', VALUE '%s', l=%zu, w=%zu, words=%zu\n", name, value, l, w, words); - if(unlikely(arl_check(arl, name, value) != 0)) - break; - } - - break; - } - } - } - - // ------------------------------------------------------------------------ - - if(do_sockets == CONFIG_BOOLEAN_YES || (do_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.sockets_used)) { - do_sockets = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_used = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "sockstat_sockets" - , NULL - , "sockets" - , NULL - , "IPv4 Sockets Used" - , "sockets" - , "proc" - , "net/sockstat" - , 2400 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_used = rrddim_add(st, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_used, (collected_number)sockstat_root.sockets_used); - rrdset_done(st); - } - - // ------------------------------------------------------------------------ - - if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO && (sockstat_root.tcp_inuse || sockstat_root.tcp_orphan || sockstat_root.tcp_tw || sockstat_root.tcp_alloc))) { - do_tcp_sockets = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_inuse = NULL, - *rd_orphan = NULL, - *rd_timewait = NULL, - *rd_alloc = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "sockstat_tcp_sockets" - , NULL - , "tcp" - , NULL - , "IPv4 TCP Sockets" - , "sockets" - , "proc" - , "net/sockstat" - , 2500 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_alloc = rrddim_add(st, "alloc", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_orphan = rrddim_add(st, "orphan", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_timewait = rrddim_add(st, "timewait", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.tcp_inuse); - rrddim_set_by_pointer(st, rd_orphan, (collected_number)sockstat_root.tcp_orphan); - rrddim_set_by_pointer(st, rd_timewait, (collected_number)sockstat_root.tcp_tw); - rrddim_set_by_pointer(st, rd_alloc, (collected_number)sockstat_root.tcp_alloc); - rrdset_done(st); - } - - // ------------------------------------------------------------------------ - - if(do_tcp_mem == CONFIG_BOOLEAN_YES || (do_tcp_mem == CONFIG_BOOLEAN_AUTO && sockstat_root.tcp_mem)) { - do_tcp_mem = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_mem = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "sockstat_tcp_mem" - , NULL - , "tcp" - , NULL - , "IPv4 TCP Sockets Memory" - , "KB" - , "proc" - , "net/sockstat" - , 4000 - , update_every - , RRDSET_TYPE_AREA - ); - - rd_mem = rrddim_add(st, "mem", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_mem, (collected_number)sockstat_root.tcp_mem); - rrdset_done(st); - } - - // ------------------------------------------------------------------------ - - if(do_udp_sockets == CONFIG_BOOLEAN_YES || (do_udp_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.udp_inuse)) { - do_udp_sockets = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_inuse = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "sockstat_udp_sockets" - , NULL - , "udp" - , NULL - , "IPv4 UDP Sockets" - , "sockets" - , "proc" - , "net/sockstat" - , 2600 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.udp_inuse); - rrdset_done(st); - } - - // ------------------------------------------------------------------------ - - if(do_udp_mem == CONFIG_BOOLEAN_YES || (do_udp_mem == CONFIG_BOOLEAN_AUTO && sockstat_root.udp_mem)) { - do_udp_mem = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_mem = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "sockstat_udp_mem" - , NULL - , "udp" - , NULL - , "IPv4 UDP Sockets Memory" - , "KB" - , "proc" - , "net/sockstat" - , 2603 - , update_every - , RRDSET_TYPE_AREA - ); - - rd_mem = rrddim_add(st, "mem", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_mem, (collected_number)sockstat_root.udp_mem); - rrdset_done(st); - } - - // ------------------------------------------------------------------------ - - if(do_udplite_sockets == CONFIG_BOOLEAN_YES || (do_udplite_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.udplite_inuse)) { - do_udplite_sockets = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_inuse = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "sockstat_udplite_sockets" - , NULL - , "udplite" - , NULL - , "IPv4 UDPLITE Sockets" - , "sockets" - , "proc" - , "net/sockstat" - , 2602 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.udplite_inuse); - rrdset_done(st); - } - - // ------------------------------------------------------------------------ - - if(do_raw_sockets == CONFIG_BOOLEAN_YES || (do_raw_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.raw_inuse)) { - do_raw_sockets = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_inuse = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "sockstat_raw_sockets" - , NULL - , "raw" - , NULL - , "IPv4 RAW Sockets" - , "sockets" - , "proc" - , "net/sockstat" - , 3010 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.raw_inuse); - rrdset_done(st); - } - - // ------------------------------------------------------------------------ - - if(do_frag_sockets == CONFIG_BOOLEAN_YES || (do_frag_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.frag_inuse)) { - do_frag_sockets = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_inuse = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "sockstat_frag_sockets" - , NULL - , "fragments" - , NULL - , "IPv4 FRAG Sockets" - , "fragments" - , "proc" - , "net/sockstat" - , 3010 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.frag_inuse); - rrdset_done(st); - } - - // ------------------------------------------------------------------------ - - if(do_frag_mem == CONFIG_BOOLEAN_YES || (do_frag_mem == CONFIG_BOOLEAN_AUTO && sockstat_root.frag_memory)) { - do_frag_mem = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_mem = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "ipv4" - , "sockstat_frag_mem" - , NULL - , "fragments" - , NULL - , "IPv4 FRAG Sockets Memory" - , "KB" - , "proc" - , "net/sockstat" - , 3020 - , update_every - , RRDSET_TYPE_AREA - ); - - rd_mem = rrddim_add(st, "mem", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_mem, (collected_number)sockstat_root.frag_memory); - rrdset_done(st); - } - - return 0; -} - diff --git a/src/proc_net_sockstat6.c b/src/proc_net_sockstat6.c deleted file mode 100644 index 97175ccf7..000000000 --- a/src/proc_net_sockstat6.c +++ /dev/null @@ -1,269 +0,0 @@ -#include "common.h" - -static struct proc_net_sockstat6 { - kernel_uint_t tcp6_inuse; - kernel_uint_t udp6_inuse; - kernel_uint_t udplite6_inuse; - kernel_uint_t raw6_inuse; - kernel_uint_t frag6_inuse; -} sockstat6_root = { 0 }; - -int do_proc_net_sockstat6(int update_every, usec_t dt) { - (void)dt; - - static procfile *ff = NULL; - - static uint32_t hash_raw = 0, - hash_frag = 0, - hash_tcp = 0, - hash_udp = 0, - hash_udplite = 0; - - static ARL_BASE *arl_tcp = NULL; - static ARL_BASE *arl_udp = NULL; - static ARL_BASE *arl_udplite = NULL; - static ARL_BASE *arl_raw = NULL; - static ARL_BASE *arl_frag = NULL; - - static int do_tcp_sockets = -1, do_udp_sockets = -1, do_udplite_sockets = -1, do_raw_sockets = -1, do_frag_sockets = -1; - - static char *keys[6] = { NULL }; - static uint32_t hashes[6] = { 0 }; - static ARL_BASE *bases[6] = { NULL }; - - if(unlikely(!arl_tcp)) { - do_tcp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 TCP sockets", CONFIG_BOOLEAN_AUTO); - do_udp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 UDP sockets", CONFIG_BOOLEAN_AUTO); - do_udplite_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 UDPLITE sockets", CONFIG_BOOLEAN_AUTO); - do_raw_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 RAW sockets", CONFIG_BOOLEAN_AUTO); - do_frag_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 FRAG sockets", CONFIG_BOOLEAN_AUTO); - - arl_tcp = arl_create("sockstat6/TCP6", arl_callback_str2kernel_uint_t, 60); - arl_expect(arl_tcp, "inuse", &sockstat6_root.tcp6_inuse); - - arl_udp = arl_create("sockstat6/UDP6", arl_callback_str2kernel_uint_t, 60); - arl_expect(arl_udp, "inuse", &sockstat6_root.udp6_inuse); - - arl_udplite = arl_create("sockstat6/UDPLITE6", arl_callback_str2kernel_uint_t, 60); - arl_expect(arl_udplite, "inuse", &sockstat6_root.udplite6_inuse); - - arl_raw = arl_create("sockstat6/RAW6", arl_callback_str2kernel_uint_t, 60); - arl_expect(arl_raw, "inuse", &sockstat6_root.raw6_inuse); - - arl_frag = arl_create("sockstat6/FRAG6", arl_callback_str2kernel_uint_t, 60); - arl_expect(arl_frag, "inuse", &sockstat6_root.frag6_inuse); - - hash_tcp = simple_hash("TCP6"); - hash_udp = simple_hash("UDP6"); - hash_udplite = simple_hash("UDPLITE6"); - hash_raw = simple_hash("RAW6"); - hash_frag = simple_hash("FRAG6"); - - keys[0] = "TCP6"; hashes[0] = hash_tcp; bases[0] = arl_tcp; - keys[1] = "UDP6"; hashes[1] = hash_udp; bases[1] = arl_udp; - keys[2] = "UDPLITE6"; hashes[2] = hash_udplite; bases[2] = arl_udplite; - keys[3] = "RAW6"; hashes[3] = hash_raw; bases[3] = arl_raw; - keys[4] = "FRAG6"; hashes[4] = hash_frag; bases[4] = arl_frag; - keys[5] = NULL; // terminator - } - - if(unlikely(!ff)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/sockstat6"); - ff = procfile_open(config_get("plugin:proc:/proc/net/sockstat6", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) return 1; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time - - size_t lines = procfile_lines(ff), l; - - for(l = 0; l < lines ;l++) { - size_t words = procfile_linewords(ff, l); - char *key = procfile_lineword(ff, l, 0); - uint32_t hash = simple_hash(key); - - int k; - for(k = 0; keys[k] ; k++) { - if(unlikely(hash == hashes[k] && strcmp(key, keys[k]) == 0)) { - // fprintf(stderr, "KEY: '%s', l=%zu, w=1, words=%zu\n", key, l, words); - ARL_BASE *arl = bases[k]; - arl_begin(arl); - size_t w = 1; - - while(w + 1 < words) { - char *name = procfile_lineword(ff, l, w); w++; - char *value = procfile_lineword(ff, l, w); w++; - // fprintf(stderr, " > NAME '%s', VALUE '%s', l=%zu, w=%zu, words=%zu\n", name, value, l, w, words); - if(unlikely(arl_check(arl, name, value) != 0)) - break; - } - - break; - } - } - } - - // ------------------------------------------------------------------------ - - if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO && (sockstat6_root.tcp6_inuse))) { - do_tcp_sockets = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_inuse = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6" - , "sockstat6_tcp_sockets" - , NULL - , "tcp6" - , NULL - , "IPv6 TCP Sockets" - , "sockets" - , "proc" - , "net/sockstat6" - , 3599 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.tcp6_inuse); - rrdset_done(st); - } - - // ------------------------------------------------------------------------ - - if(do_udp_sockets == CONFIG_BOOLEAN_YES || (do_udp_sockets == CONFIG_BOOLEAN_AUTO && sockstat6_root.udp6_inuse)) { - do_udp_sockets = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_inuse = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6" - , "sockstat6_udp_sockets" - , NULL - , "udp6" - , NULL - , "IPv6 UDP Sockets" - , "sockets" - , "proc" - , "net/sockstat6" - , 3600 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.udp6_inuse); - rrdset_done(st); - } - - // ------------------------------------------------------------------------ - - if(do_udplite_sockets == CONFIG_BOOLEAN_YES || (do_udplite_sockets == CONFIG_BOOLEAN_AUTO && sockstat6_root.udplite6_inuse)) { - do_udplite_sockets = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_inuse = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6" - , "sockstat6_udplite_sockets" - , NULL - , "udplite6" - , NULL - , "IPv6 UDPLITE Sockets" - , "sockets" - , "proc" - , "net/sockstat6" - , 3601 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.udplite6_inuse); - rrdset_done(st); - } - - // ------------------------------------------------------------------------ - - if(do_raw_sockets == CONFIG_BOOLEAN_YES || (do_raw_sockets == CONFIG_BOOLEAN_AUTO && sockstat6_root.raw6_inuse)) { - do_raw_sockets = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_inuse = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6" - , "sockstat6_raw_sockets" - , NULL - , "raw6" - , NULL - , "IPv6 RAW Sockets" - , "sockets" - , "proc" - , "net/sockstat6" - , 3700 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.raw6_inuse); - rrdset_done(st); - } - - // ------------------------------------------------------------------------ - - if(do_frag_sockets == CONFIG_BOOLEAN_YES || (do_frag_sockets == CONFIG_BOOLEAN_AUTO && sockstat6_root.frag6_inuse)) { - do_frag_sockets = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - static RRDDIM *rd_inuse = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "ipv6" - , "sockstat6_frag_sockets" - , NULL - , "fragments6" - , NULL - , "IPv6 FRAG Sockets" - , "fragments" - , "proc" - , "net/sockstat6" - , 3010 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.frag6_inuse); - rrdset_done(st); - } - - return 0; -} diff --git a/src/proc_net_softnet_stat.c b/src/proc_net_softnet_stat.c deleted file mode 100644 index f3c117e16..000000000 --- a/src/proc_net_softnet_stat.c +++ /dev/null @@ -1,147 +0,0 @@ -#include "common.h" - -static inline char *softnet_column_name(size_t column) { - switch(column) { - // https://github.com/torvalds/linux/blob/a7fd20d1c476af4563e66865213474a2f9f473a4/net/core/net-procfs.c#L161-L166 - case 0: return "processed"; - case 1: return "dropped"; - case 2: return "squeezed"; - case 9: return "received_rps"; - case 10: return "flow_limit_count"; - default: return NULL; - } -} - -int do_proc_net_softnet_stat(int update_every, usec_t dt) { - (void)dt; - - static procfile *ff = NULL; - static int do_per_core = -1; - static size_t allocated_lines = 0, allocated_columns = 0; - static uint32_t *data = NULL; - - if(unlikely(do_per_core == -1)) do_per_core = config_get_boolean("plugin:proc:/proc/net/softnet_stat", "softnet_stat per core", 1); - - if(unlikely(!ff)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/softnet_stat"); - ff = procfile_open(config_get("plugin:proc:/proc/net/softnet_stat", "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) return 1; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time - - size_t lines = procfile_lines(ff), l; - size_t words = procfile_linewords(ff, 0), w; - - if(unlikely(!lines || !words)) { - error("Cannot read /proc/net/softnet_stat, %zu lines and %zu columns reported.", lines, words); - return 1; - } - - if(unlikely(lines > 200)) lines = 200; - if(unlikely(words > 50)) words = 50; - - if(unlikely(!data || lines > allocated_lines || words > allocated_columns)) { - freez(data); - allocated_lines = lines; - allocated_columns = words; - data = mallocz((allocated_lines + 1) * allocated_columns * sizeof(uint32_t)); - } - - // initialize to zero - memset(data, 0, (allocated_lines + 1) * allocated_columns * sizeof(uint32_t)); - - // parse the values - for(l = 0; l < lines ;l++) { - words = procfile_linewords(ff, l); - if(unlikely(!words)) continue; - - if(unlikely(words > allocated_columns)) - words = allocated_columns; - - for(w = 0; w < words ; w++) { - if(unlikely(softnet_column_name(w))) { - uint32_t t = (uint32_t)strtoul(procfile_lineword(ff, l, w), NULL, 16); - data[w] += t; - data[((l + 1) * allocated_columns) + w] = t; - } - } - } - - if(unlikely(data[(lines * allocated_columns)] == 0)) - lines--; - - RRDSET *st; - - // -------------------------------------------------------------------- - - st = rrdset_find_bytype_localhost("system", "softnet_stat"); - if(unlikely(!st)) { - st = rrdset_create_localhost( - "system" - , "softnet_stat" - , NULL - , "softnet_stat" - , "system.softnet_stat" - , "System softnet_stat" - , "events/s" - , "proc" - , "net/softnet_stat" - , 955 - , update_every - , RRDSET_TYPE_LINE - ); - for(w = 0; w < allocated_columns ;w++) - if(unlikely(softnet_column_name(w))) - rrddim_add(st, softnet_column_name(w), NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - for(w = 0; w < allocated_columns ;w++) - if(unlikely(softnet_column_name(w))) - rrddim_set(st, softnet_column_name(w), data[w]); - - rrdset_done(st); - - if(do_per_core) { - for(l = 0; l < lines ;l++) { - char id[50+1]; - snprintfz(id, 50, "cpu%zu_softnet_stat", l); - - st = rrdset_find_bytype_localhost("cpu", id); - if(unlikely(!st)) { - char title[100+1]; - snprintfz(title, 100, "CPU%zu softnet_stat", l); - - st = rrdset_create_localhost( - "cpu" - , id - , NULL - , "softnet_stat" - , "cpu.softnet_stat" - , title - , "events/s" - , "proc" - , "net/softnet_stat" - , 4101 + l - , update_every - , RRDSET_TYPE_LINE - ); - for(w = 0; w < allocated_columns ;w++) - if(unlikely(softnet_column_name(w))) - rrddim_add(st, softnet_column_name(w), NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - for(w = 0; w < allocated_columns ;w++) - if(unlikely(softnet_column_name(w))) - rrddim_set(st, softnet_column_name(w), data[((l + 1) * allocated_columns) + w]); - - rrdset_done(st); - } - } - - return 0; -} diff --git a/src/proc_net_stat_conntrack.c b/src/proc_net_stat_conntrack.c deleted file mode 100644 index 363fbc199..000000000 --- a/src/proc_net_stat_conntrack.c +++ /dev/null @@ -1,348 +0,0 @@ -#include "common.h" - -#define RRD_TYPE_NET_STAT_NETFILTER "netfilter" -#define RRD_TYPE_NET_STAT_CONNTRACK "conntrack" - -int do_proc_net_stat_conntrack(int update_every, usec_t dt) { - static procfile *ff = NULL; - static int do_sockets = -1, do_new = -1, do_changes = -1, do_expect = -1, do_search = -1, do_errors = -1; - static usec_t get_max_every = 10 * USEC_PER_SEC, usec_since_last_max = 0; - static int read_full = 1; - static char *nf_conntrack_filename, *nf_conntrack_count_filename, *nf_conntrack_max_filename; - static RRDVAR *rrdvar_max = NULL; - - unsigned long long aentries = 0, asearched = 0, afound = 0, anew = 0, ainvalid = 0, aignore = 0, adelete = 0, adelete_list = 0, - ainsert = 0, ainsert_failed = 0, adrop = 0, aearly_drop = 0, aicmp_error = 0, aexpect_new = 0, aexpect_create = 0, aexpect_delete = 0, asearch_restart = 0; - - if(unlikely(do_sockets == -1)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/stat/nf_conntrack"); - nf_conntrack_filename = config_get("plugin:proc:/proc/net/stat/nf_conntrack", "filename to monitor", filename); - - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/sys/net/netfilter/nf_conntrack_max"); - nf_conntrack_max_filename = config_get("plugin:proc:/proc/sys/net/netfilter/nf_conntrack_max", "filename to monitor", filename); - usec_since_last_max = get_max_every = config_get_number("plugin:proc:/proc/sys/net/netfilter/nf_conntrack_max", "read every seconds", 10) * USEC_PER_SEC; - - read_full = 1; - ff = procfile_open(nf_conntrack_filename, " \t:", PROCFILE_FLAG_DEFAULT); - if(!ff) read_full = 0; - - do_new = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter new connections", read_full); - do_changes = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter connection changes", read_full); - do_expect = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter connection expectations", read_full); - do_search = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter connection searches", read_full); - do_errors = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter errors", read_full); - - do_sockets = 1; - if(!read_full) { - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/sys/net/netfilter/nf_conntrack_count"); - nf_conntrack_count_filename = config_get("plugin:proc:/proc/sys/net/netfilter/nf_conntrack_count", "filename to monitor", filename); - - if(read_single_number_file(nf_conntrack_count_filename, &aentries)) - do_sockets = 0; - } - - do_sockets = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter connections", do_sockets); - - if(!do_sockets && !read_full) - return 1; - - rrdvar_max = rrdvar_custom_host_variable_create(localhost, "netfilter.conntrack.max"); - } - - if(likely(read_full)) { - if(unlikely(!ff)) { - ff = procfile_open(nf_conntrack_filename, " \t:", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) - return 0; // we return 0, so that we will retry to open it next time - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) - return 0; // we return 0, so that we will retry to open it next time - - size_t lines = procfile_lines(ff), l; - - for(l = 1; l < lines ;l++) { - size_t words = procfile_linewords(ff, l); - if(unlikely(words < 17)) { - if(unlikely(words)) error("Cannot read /proc/net/stat/nf_conntrack line. Expected 17 params, read %zu.", words); - continue; - } - - unsigned long long tentries = 0, tsearched = 0, tfound = 0, tnew = 0, tinvalid = 0, tignore = 0, tdelete = 0, tdelete_list = 0, tinsert = 0, tinsert_failed = 0, tdrop = 0, tearly_drop = 0, ticmp_error = 0, texpect_new = 0, texpect_create = 0, texpect_delete = 0, tsearch_restart = 0; - - tentries = strtoull(procfile_lineword(ff, l, 0), NULL, 16); - tsearched = strtoull(procfile_lineword(ff, l, 1), NULL, 16); - tfound = strtoull(procfile_lineword(ff, l, 2), NULL, 16); - tnew = strtoull(procfile_lineword(ff, l, 3), NULL, 16); - tinvalid = strtoull(procfile_lineword(ff, l, 4), NULL, 16); - tignore = strtoull(procfile_lineword(ff, l, 5), NULL, 16); - tdelete = strtoull(procfile_lineword(ff, l, 6), NULL, 16); - tdelete_list = strtoull(procfile_lineword(ff, l, 7), NULL, 16); - tinsert = strtoull(procfile_lineword(ff, l, 8), NULL, 16); - tinsert_failed = strtoull(procfile_lineword(ff, l, 9), NULL, 16); - tdrop = strtoull(procfile_lineword(ff, l, 10), NULL, 16); - tearly_drop = strtoull(procfile_lineword(ff, l, 11), NULL, 16); - ticmp_error = strtoull(procfile_lineword(ff, l, 12), NULL, 16); - texpect_new = strtoull(procfile_lineword(ff, l, 13), NULL, 16); - texpect_create = strtoull(procfile_lineword(ff, l, 14), NULL, 16); - texpect_delete = strtoull(procfile_lineword(ff, l, 15), NULL, 16); - tsearch_restart = strtoull(procfile_lineword(ff, l, 16), NULL, 16); - - if(unlikely(!aentries)) aentries = tentries; - - // sum all the cpus together - asearched += tsearched; // conntrack.search - afound += tfound; // conntrack.search - anew += tnew; // conntrack.new - ainvalid += tinvalid; // conntrack.new - aignore += tignore; // conntrack.new - adelete += tdelete; // conntrack.changes - adelete_list += tdelete_list; // conntrack.changes - ainsert += tinsert; // conntrack.changes - ainsert_failed += tinsert_failed; // conntrack.errors - adrop += tdrop; // conntrack.errors - aearly_drop += tearly_drop; // conntrack.errors - aicmp_error += ticmp_error; // conntrack.errors - aexpect_new += texpect_new; // conntrack.expect - aexpect_create += texpect_create; // conntrack.expect - aexpect_delete += texpect_delete; // conntrack.expect - asearch_restart += tsearch_restart; // conntrack.search - } - } - else { - if(unlikely(read_single_number_file(nf_conntrack_count_filename, &aentries))) - return 0; // we return 0, so that we will retry to open it next time - } - - usec_since_last_max += dt; - if(unlikely(rrdvar_max && usec_since_last_max >= get_max_every)) { - usec_since_last_max = 0; - - unsigned long long max; - if(likely(!read_single_number_file(nf_conntrack_max_filename, &max))) - rrdvar_custom_host_variable_set(localhost, rrdvar_max, max); - } - - // -------------------------------------------------------------------- - - if(do_sockets) { - static RRDSET *st = NULL; - static RRDDIM *rd_connections = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_STAT_NETFILTER - , RRD_TYPE_NET_STAT_CONNTRACK "_sockets" - , NULL - , RRD_TYPE_NET_STAT_CONNTRACK - , NULL - , "Connection Tracker Connections" - , "active connections" - , "proc" - , "net/stat/nf_conntrack" - , 3000 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_connections = rrddim_add(st, "connections", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_connections, aentries); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_new) { - static RRDSET *st = NULL; - static RRDDIM - *rd_new = NULL, - *rd_ignore = NULL, - *rd_invalid = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_STAT_NETFILTER - , RRD_TYPE_NET_STAT_CONNTRACK "_new" - , NULL - , RRD_TYPE_NET_STAT_CONNTRACK - , NULL - , "Connection Tracker New Connections" - , "connections/s" - , "proc" - , "net/stat/nf_conntrack" - , 3001 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_new = rrddim_add(st, "new", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_ignore = rrddim_add(st, "ignore", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_invalid = rrddim_add(st, "invalid", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_new, anew); - rrddim_set_by_pointer(st, rd_ignore, aignore); - rrddim_set_by_pointer(st, rd_invalid, ainvalid); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_changes) { - static RRDSET *st = NULL; - static RRDDIM - *rd_inserted = NULL, - *rd_deleted = NULL, - *rd_delete_list = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_STAT_NETFILTER - , RRD_TYPE_NET_STAT_CONNTRACK "_changes" - , NULL - , RRD_TYPE_NET_STAT_CONNTRACK - , NULL - , "Connection Tracker Changes" - , "changes/s" - , "proc" - , "net/stat/nf_conntrack" - , 3002 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_inserted = rrddim_add(st, "inserted", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_deleted = rrddim_add(st, "deleted", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_delete_list = rrddim_add(st, "delete_list", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_inserted, ainsert); - rrddim_set_by_pointer(st, rd_deleted, adelete); - rrddim_set_by_pointer(st, rd_delete_list, adelete_list); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_expect) { - static RRDSET *st = NULL; - static RRDDIM *rd_created = NULL, - *rd_deleted = NULL, - *rd_new = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_STAT_NETFILTER - , RRD_TYPE_NET_STAT_CONNTRACK "_expect" - , NULL - , RRD_TYPE_NET_STAT_CONNTRACK - , NULL - , "Connection Tracker Expectations" - , "expectations/s" - , "proc" - , "net/stat/nf_conntrack" - , 3003 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_created = rrddim_add(st, "created", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_deleted = rrddim_add(st, "deleted", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_new = rrddim_add(st, "new", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_created, aexpect_create); - rrddim_set_by_pointer(st, rd_deleted, aexpect_delete); - rrddim_set_by_pointer(st, rd_new, aexpect_new); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_search) { - static RRDSET *st = NULL; - static RRDDIM *rd_searched = NULL, - *rd_restarted = NULL, - *rd_found = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_STAT_NETFILTER - , RRD_TYPE_NET_STAT_CONNTRACK "_search" - , NULL - , RRD_TYPE_NET_STAT_CONNTRACK - , NULL - , "Connection Tracker Searches" - , "searches/s" - , "proc" - , "net/stat/nf_conntrack" - , 3010 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_searched = rrddim_add(st, "searched", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_restarted = rrddim_add(st, "restarted", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_found = rrddim_add(st, "found", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_searched, asearched); - rrddim_set_by_pointer(st, rd_restarted, asearch_restart); - rrddim_set_by_pointer(st, rd_found, afound); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if(do_errors) { - static RRDSET *st = NULL; - static RRDDIM *rd_icmp_error = NULL, - *rd_insert_failed = NULL, - *rd_drop = NULL, - *rd_early_drop = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_STAT_NETFILTER - , RRD_TYPE_NET_STAT_CONNTRACK "_errors" - , NULL - , RRD_TYPE_NET_STAT_CONNTRACK - , NULL - , "Connection Tracker Errors" - , "events/s" - , "proc" - , "net/stat/nf_conntrack" - , 3005 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - - rd_icmp_error = rrddim_add(st, "icmp_error", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_insert_failed = rrddim_add(st, "insert_failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_drop = rrddim_add(st, "drop", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_early_drop = rrddim_add(st, "early_drop", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_icmp_error, aicmp_error); - rrddim_set_by_pointer(st, rd_insert_failed, ainsert_failed); - rrddim_set_by_pointer(st, rd_drop, adrop); - rrddim_set_by_pointer(st, rd_early_drop, aearly_drop); - rrdset_done(st); - } - - return 0; -} diff --git a/src/proc_net_stat_synproxy.c b/src/proc_net_stat_synproxy.c deleted file mode 100644 index 0d6f6ee03..000000000 --- a/src/proc_net_stat_synproxy.c +++ /dev/null @@ -1,181 +0,0 @@ -#include "common.h" - -#define RRD_TYPE_NET_STAT_NETFILTER "netfilter" -#define RRD_TYPE_NET_STAT_SYNPROXY "synproxy" - -int do_proc_net_stat_synproxy(int update_every, usec_t dt) { - (void)dt; - - static int do_entries = -1, do_cookies = -1, do_syns = -1, do_reopened = -1; - static procfile *ff = NULL; - - if(unlikely(do_entries == -1)) { - do_entries = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY entries", CONFIG_BOOLEAN_AUTO); - do_cookies = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY cookies", CONFIG_BOOLEAN_AUTO); - do_syns = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY SYN received", CONFIG_BOOLEAN_AUTO); - do_reopened = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY connections reopened", CONFIG_BOOLEAN_AUTO); - } - - if(unlikely(!ff)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/stat/synproxy"); - ff = procfile_open(config_get("plugin:proc:/proc/net/stat/synproxy", "filename to monitor", filename), " \t,:|", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) - return 1; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) - return 0; // we return 0, so that we will retry to open it next time - - // make sure we have 3 lines - size_t lines = procfile_lines(ff), l; - if(unlikely(lines < 2)) { - error("/proc/net/stat/synproxy has %zu lines, expected no less than 2. Disabling it.", lines); - return 1; - } - - unsigned long long entries = 0, syn_received = 0, cookie_invalid = 0, cookie_valid = 0, cookie_retrans = 0, conn_reopened = 0; - - // synproxy gives its values per CPU - for(l = 1; l < lines ;l++) { - size_t words = procfile_linewords(ff, l); - if(unlikely(words < 6)) - continue; - - entries += strtoull(procfile_lineword(ff, l, 0), NULL, 16); - syn_received += strtoull(procfile_lineword(ff, l, 1), NULL, 16); - cookie_invalid += strtoull(procfile_lineword(ff, l, 2), NULL, 16); - cookie_valid += strtoull(procfile_lineword(ff, l, 3), NULL, 16); - cookie_retrans += strtoull(procfile_lineword(ff, l, 4), NULL, 16); - conn_reopened += strtoull(procfile_lineword(ff, l, 5), NULL, 16); - } - - unsigned long long events = entries + syn_received + cookie_invalid + cookie_valid + cookie_retrans + conn_reopened; - - // -------------------------------------------------------------------- - - if((do_entries == CONFIG_BOOLEAN_AUTO && events) || do_entries == CONFIG_BOOLEAN_YES) { - do_entries = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_STAT_NETFILTER - , RRD_TYPE_NET_STAT_SYNPROXY "_entries" - , NULL - , RRD_TYPE_NET_STAT_SYNPROXY - , NULL - , "SYNPROXY Entries Used" - , "entries" - , "proc" - , "net/stat/synproxy" - , 3304 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "entries", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set(st, "entries", entries); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if((do_syns == CONFIG_BOOLEAN_AUTO && events) || do_syns == CONFIG_BOOLEAN_YES) { - do_syns = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_STAT_NETFILTER - , RRD_TYPE_NET_STAT_SYNPROXY "_syn_received" - , NULL - , RRD_TYPE_NET_STAT_SYNPROXY - , NULL - , "SYNPROXY SYN Packets received" - , "SYN/s" - , "proc" - , "net/stat/synproxy" - , 3301 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set(st, "received", syn_received); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if((do_reopened == CONFIG_BOOLEAN_AUTO && events) || do_reopened == CONFIG_BOOLEAN_YES) { - do_reopened = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_STAT_NETFILTER - , RRD_TYPE_NET_STAT_SYNPROXY "_conn_reopened" - , NULL - , RRD_TYPE_NET_STAT_SYNPROXY - , NULL - , "SYNPROXY Connections Reopened" - , "connections/s" - , "proc" - , "net/stat/synproxy" - , 3303 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "reopened", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set(st, "reopened", conn_reopened); - rrdset_done(st); - } - - // -------------------------------------------------------------------- - - if((do_cookies == CONFIG_BOOLEAN_AUTO && events) || do_cookies == CONFIG_BOOLEAN_YES) { - do_cookies = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_STAT_NETFILTER - , RRD_TYPE_NET_STAT_SYNPROXY "_cookies" - , NULL - , RRD_TYPE_NET_STAT_SYNPROXY - , NULL - , "SYNPROXY TCP Cookies" - , "cookies/s" - , "proc" - , "net/stat/synproxy" - , 3302 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "valid", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "invalid", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(st, "retransmits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); - - rrddim_set(st, "valid", cookie_valid); - rrddim_set(st, "invalid", cookie_invalid); - rrddim_set(st, "retransmits", cookie_retrans); - rrdset_done(st); - } - - return 0; -} diff --git a/src/proc_self_mountinfo.c b/src/proc_self_mountinfo.c deleted file mode 100644 index 4ccdddff1..000000000 --- a/src/proc_self_mountinfo.c +++ /dev/null @@ -1,401 +0,0 @@ -#include "common.h" - -// ---------------------------------------------------------------------------- -// taken from gnulib/mountlist.c - -#ifndef ME_REMOTE -/* A file system is "remote" if its Fs_name contains a ':' - or if (it is of type (smbfs or cifs) and its Fs_name starts with '//') - or Fs_name is equal to "-hosts" (used by autofs to mount remote fs). */ -# define ME_REMOTE(Fs_name, Fs_type) \ - (strchr (Fs_name, ':') != NULL \ - || ((Fs_name)[0] == '/' \ - && (Fs_name)[1] == '/' \ - && (strcmp (Fs_type, "smbfs") == 0 \ - || strcmp (Fs_type, "cifs") == 0)) \ - || (strcmp("-hosts", Fs_name) == 0)) -#endif - -#define ME_DUMMY_0(Fs_name, Fs_type) \ - (strcmp (Fs_type, "autofs") == 0 \ - || strcmp (Fs_type, "proc") == 0 \ - || strcmp (Fs_type, "subfs") == 0 \ - /* for Linux 2.6/3.x */ \ - || strcmp (Fs_type, "debugfs") == 0 \ - || strcmp (Fs_type, "devpts") == 0 \ - || strcmp (Fs_type, "fusectl") == 0 \ - || strcmp (Fs_type, "mqueue") == 0 \ - || strcmp (Fs_type, "rpc_pipefs") == 0 \ - || strcmp (Fs_type, "sysfs") == 0 \ - /* FreeBSD, Linux 2.4 */ \ - || strcmp (Fs_type, "devfs") == 0 \ - /* for NetBSD 3.0 */ \ - || strcmp (Fs_type, "kernfs") == 0 \ - /* for Irix 6.5 */ \ - || strcmp (Fs_type, "ignore") == 0) - -/* Historically, we have marked as "dummy" any file system of type "none", - but now that programs like du need to know about bind-mounted directories, - we grant an exception to any with "bind" in its list of mount options. - I.e., those are *not* dummy entries. */ -# define ME_DUMMY(Fs_name, Fs_type) \ - (ME_DUMMY_0 (Fs_name, Fs_type) || strcmp (Fs_type, "none") == 0) - -// ---------------------------------------------------------------------------- - -// find the mount info with the given major:minor -// in the supplied linked list of mountinfo structures -struct mountinfo *mountinfo_find(struct mountinfo *root, unsigned long major, unsigned long minor) { - struct mountinfo *mi; - - for(mi = root; mi ; mi = mi->next) - if(unlikely(mi->major == major && mi->minor == minor)) - return mi; - - return NULL; -} - -// find the mount info with the given filesystem and mount_source -// in the supplied linked list of mountinfo structures -struct mountinfo *mountinfo_find_by_filesystem_mount_source(struct mountinfo *root, const char *filesystem, const char *mount_source) { - struct mountinfo *mi; - uint32_t filesystem_hash = simple_hash(filesystem), mount_source_hash = simple_hash(mount_source); - - for(mi = root; mi ; mi = mi->next) - if(unlikely(mi->filesystem - && mi->mount_source - && mi->filesystem_hash == filesystem_hash - && mi->mount_source_hash == mount_source_hash - && !strcmp(mi->filesystem, filesystem) - && !strcmp(mi->mount_source, mount_source))) - return mi; - - return NULL; -} - -struct mountinfo *mountinfo_find_by_filesystem_super_option(struct mountinfo *root, const char *filesystem, const char *super_options) { - struct mountinfo *mi; - uint32_t filesystem_hash = simple_hash(filesystem); - - size_t solen = strlen(super_options); - - for(mi = root; mi ; mi = mi->next) - if(unlikely(mi->filesystem - && mi->super_options - && mi->filesystem_hash == filesystem_hash - && !strcmp(mi->filesystem, filesystem))) { - - // super_options is a comma separated list - char *s = mi->super_options, *e; - while(*s) { - e = s + 1; - while(*e && *e != ',') e++; - - size_t len = e - s; - if(unlikely(len == solen && !strncmp(s, super_options, len))) - return mi; - - if(*e == ',') s = ++e; - else s = e; - } - } - - return NULL; -} - -static void mountinfo_free(struct mountinfo *mi) { - freez(mi->root); - freez(mi->mount_point); - freez(mi->mount_options); - freez(mi->persistent_id); -/* - if(mi->optional_fields_count) { - int i; - for(i = 0; i < mi->optional_fields_count ; i++) - free(*mi->optional_fields[i]); - } - free(mi->optional_fields); -*/ - freez(mi->filesystem); - freez(mi->mount_source); - freez(mi->super_options); - freez(mi); -} - -// free a linked list of mountinfo structures -void mountinfo_free_all(struct mountinfo *mi) { - while(mi) { - struct mountinfo *t = mi; - mi = mi->next; - - mountinfo_free(t); - } -} - -static char *strdupz_decoding_octal(const char *string) { - char *buffer = strdupz(string); - - char *d = buffer; - const char *s = string; - - while(*s) { - if(unlikely(*s == '\\')) { - s++; - if(likely(isdigit(*s) && isdigit(s[1]) && isdigit(s[2]))) { - char c = *s++ - '0'; - c <<= 3; - c |= *s++ - '0'; - c <<= 3; - c |= *s++ - '0'; - *d++ = c; - } - else *d++ = '_'; - } - else *d++ = *s++; - } - *d = '\0'; - - return buffer; -} - -static inline int is_read_only(const char *s) { - if(!s) return 0; - - size_t len = strlen(s); - if(len < 2) return 0; - if(len == 2) { - if(!strcmp(s, "ro")) return 1; - return 0; - } - if(!strncmp(s, "ro,", 3)) return 1; - if(!strncmp(&s[len - 3], ",ro", 3)) return 1; - if(strstr(s, ",ro,")) return 1; - return 0; -} - -// read the whole mountinfo into a linked list -struct mountinfo *mountinfo_read(int do_statvfs) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/proc/self/mountinfo", netdata_configured_host_prefix); - procfile *ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) { - snprintfz(filename, FILENAME_MAX, "%s/proc/1/mountinfo", netdata_configured_host_prefix); - ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) return NULL; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) - return NULL; - - struct mountinfo *root = NULL, *last = NULL, *mi = NULL; - - unsigned long l, lines = procfile_lines(ff); - for(l = 0; l < lines ;l++) { - if(unlikely(procfile_linewords(ff, l) < 5)) - continue; - - mi = mallocz(sizeof(struct mountinfo)); - - unsigned long w = 0; - mi->id = str2ul(procfile_lineword(ff, l, w)); w++; - mi->parentid = str2ul(procfile_lineword(ff, l, w)); w++; - - char *major = procfile_lineword(ff, l, w), *minor; w++; - for(minor = major; *minor && *minor != ':' ;minor++) ; - - if(unlikely(!*minor)) { - error("Cannot parse major:minor on '%s' at line %lu of '%s'", major, l + 1, filename); - freez(mi); - continue; - } - - *minor = '\0'; - minor++; - - mi->flags = 0; - - mi->major = str2ul(major); - mi->minor = str2ul(minor); - - mi->root = strdupz(procfile_lineword(ff, l, w)); w++; - mi->root_hash = simple_hash(mi->root); - - mi->mount_point = strdupz_decoding_octal(procfile_lineword(ff, l, w)); w++; - mi->mount_point_hash = simple_hash(mi->mount_point); - - mi->persistent_id = strdupz(mi->mount_point); - netdata_fix_chart_id(mi->persistent_id); - mi->persistent_id_hash = simple_hash(mi->persistent_id); - - mi->mount_options = strdupz(procfile_lineword(ff, l, w)); w++; - - if(unlikely(is_read_only(mi->mount_options))) - mi->flags |= MOUNTINFO_READONLY; - - // count the optional fields -/* - unsigned long wo = w; -*/ - mi->optional_fields_count = 0; - char *s = procfile_lineword(ff, l, w); - while(*s && *s != '-') { - w++; - s = procfile_lineword(ff, l, w); - mi->optional_fields_count++; - } - -/* - if(unlikely(mi->optional_fields_count)) { - // we have some optional fields - // read them into a new array of pointers; - - mi->optional_fields = mallocz(mi->optional_fields_count * sizeof(char *)); - - int i; - for(i = 0; i < mi->optional_fields_count ; i++) { - *mi->optional_fields[wo] = strdupz(procfile_lineword(ff, l, w)); - wo++; - } - } - else - mi->optional_fields = NULL; -*/ - - if(likely(*s == '-')) { - w++; - - mi->filesystem = strdupz(procfile_lineword(ff, l, w)); w++; - mi->filesystem_hash = simple_hash(mi->filesystem); - - mi->mount_source = strdupz_decoding_octal(procfile_lineword(ff, l, w)); w++; - mi->mount_source_hash = simple_hash(mi->mount_source); - - mi->super_options = strdupz(procfile_lineword(ff, l, w)); w++; - - if(unlikely(is_read_only(mi->super_options))) - mi->flags |= MOUNTINFO_READONLY; - - if(unlikely(ME_DUMMY(mi->mount_source, mi->filesystem))) - mi->flags |= MOUNTINFO_IS_DUMMY; - - if(unlikely(ME_REMOTE(mi->mount_source, mi->filesystem))) - mi->flags |= MOUNTINFO_IS_REMOTE; - - // mark as BIND the duplicates (i.e. same filesystem + same source) - if(do_statvfs) { - struct stat buf; - if(unlikely(stat(mi->mount_point, &buf) == -1)) { - mi->st_dev = 0; - mi->flags |= MOUNTINFO_NO_STAT; - } - else { - mi->st_dev = buf.st_dev; - - struct mountinfo *mt; - for(mt = root; mt; mt = mt->next) { - if(unlikely(mt->st_dev == mi->st_dev && !(mt->flags & MOUNTINFO_IS_SAME_DEV))) { - if(strlen(mi->mount_point) < strlen(mt->mount_point)) - mt->flags |= MOUNTINFO_IS_SAME_DEV; - else - mi->flags |= MOUNTINFO_IS_SAME_DEV; - } - } - } - } - else { - mi->st_dev = 0; - } - } - else { - mi->filesystem = NULL; - mi->filesystem_hash = 0; - - mi->mount_source = NULL; - mi->mount_source_hash = 0; - - mi->super_options = NULL; - - mi->st_dev = 0; - } - - // check if it has size - if(do_statvfs && !(mi->flags & MOUNTINFO_IS_DUMMY)) { - struct statvfs buff_statvfs; - if(unlikely(statvfs(mi->mount_point, &buff_statvfs) < 0)) { - mi->flags |= MOUNTINFO_NO_STAT; - } - else if(unlikely(!buff_statvfs.f_blocks /* || !buff_statvfs.f_files */)) { - mi->flags |= MOUNTINFO_NO_SIZE; - } - } - - // link it - if(unlikely(!root)) - root = mi; - else - last->next = mi; - - last = mi; - mi->next = NULL; - -/* -#ifdef NETDATA_INTERNAL_CHECKS - fprintf(stderr, "MOUNTINFO: %ld %ld %lu:%lu root '%s', persistent id '%s', mount point '%s', mount options '%s', filesystem '%s', mount source '%s', super options '%s'%s%s%s%s%s%s\n", - mi->id, - mi->parentid, - mi->major, - mi->minor, - mi->root, - mi->persistent_id, - (mi->mount_point)?mi->mount_point:"", - (mi->mount_options)?mi->mount_options:"", - (mi->filesystem)?mi->filesystem:"", - (mi->mount_source)?mi->mount_source:"", - (mi->super_options)?mi->super_options:"", - (mi->flags & MOUNTINFO_IS_DUMMY)?" DUMMY":"", - (mi->flags & MOUNTINFO_IS_BIND)?" BIND":"", - (mi->flags & MOUNTINFO_IS_REMOTE)?" REMOTE":"", - (mi->flags & MOUNTINFO_NO_STAT)?" NOSTAT":"", - (mi->flags & MOUNTINFO_NO_SIZE)?" NOSIZE":"", - (mi->flags & MOUNTINFO_IS_SAME_DEV)?" SAMEDEV":"" - ); -#endif -*/ - } - -/* find if the mount options have "bind" in them - { - FILE *fp = setmntent(MOUNTED, "r"); - if (fp != NULL) { - struct mntent mntbuf; - struct mntent *mnt; - char buf[4096 + 1]; - - while ((mnt = getmntent_r(fp, &mntbuf, buf, 4096))) { - char *bind = hasmntopt(mnt, "bind"); - if(unlikely(bind)) { - struct mountinfo *mi; - for(mi = root; mi ; mi = mi->next) { - if(unlikely(strcmp(mnt->mnt_dir, mi->mount_point) == 0)) { - fprintf(stderr, "Mount point '%s' is BIND\n", mi->mount_point); - mi->flags |= MOUNTINFO_IS_BIND; - break; - } - } - -#ifdef NETDATA_INTERNAL_CHECKS - if(unlikely(!mi)) { - error("Mount point '%s' not found in /proc/self/mountinfo", mnt->mnt_dir); - } -#endif - } - } - endmntent(fp); - } - } -*/ - - procfile_close(ff); - return root; -} diff --git a/src/proc_self_mountinfo.h b/src/proc_self_mountinfo.h deleted file mode 100644 index a8d337539..000000000 --- a/src/proc_self_mountinfo.h +++ /dev/null @@ -1,55 +0,0 @@ -#ifndef NETDATA_PROC_SELF_MOUNTINFO_H -#define NETDATA_PROC_SELF_MOUNTINFO_H 1 - -#define MOUNTINFO_IS_DUMMY 0x00000001 -#define MOUNTINFO_IS_REMOTE 0x00000002 -#define MOUNTINFO_IS_BIND 0x00000004 -#define MOUNTINFO_IS_SAME_DEV 0x00000008 -#define MOUNTINFO_NO_STAT 0x00000010 -#define MOUNTINFO_NO_SIZE 0x00000020 -#define MOUNTINFO_READONLY 0x00000040 - -struct mountinfo { - long id; // mount ID: unique identifier of the mount (may be reused after umount(2)). - long parentid; // parent ID: ID of parent mount (or of self for the top of the mount tree). - unsigned long major; // major:minor: value of st_dev for files on filesystem (see stat(2)). - unsigned long minor; - - char *persistent_id; // a calculated persistent id for the mount point - uint32_t persistent_id_hash; - - char *root; // root: root of the mount within the filesystem. - uint32_t root_hash; - - char *mount_point; // mount point: mount point relative to the process's root. - uint32_t mount_point_hash; - - char *mount_options; // mount options: per-mount options. - - int optional_fields_count; -/* - char ***optional_fields; // optional fields: zero or more fields of the form "tag[:value]". -*/ - char *filesystem; // filesystem type: name of filesystem in the form "type[.subtype]". - uint32_t filesystem_hash; - - char *mount_source; // mount source: filesystem-specific information or "none". - uint32_t mount_source_hash; - - char *super_options; // super options: per-superblock options. - - uint32_t flags; - - dev_t st_dev; // id of device as given by stat() - - struct mountinfo *next; -}; - -extern struct mountinfo *mountinfo_find(struct mountinfo *root, unsigned long major, unsigned long minor); -extern struct mountinfo *mountinfo_find_by_filesystem_mount_source(struct mountinfo *root, const char *filesystem, const char *mount_source); -extern struct mountinfo *mountinfo_find_by_filesystem_super_option(struct mountinfo *root, const char *filesystem, const char *super_options); - -extern void mountinfo_free_all(struct mountinfo *mi); -extern struct mountinfo *mountinfo_read(int do_statvfs); - -#endif /* NETDATA_PROC_SELF_MOUNTINFO_H */ \ No newline at end of file diff --git a/src/proc_softirqs.c b/src/proc_softirqs.c deleted file mode 100644 index cd7440b00..000000000 --- a/src/proc_softirqs.c +++ /dev/null @@ -1,249 +0,0 @@ -#include "common.h" - -#define MAX_INTERRUPT_NAME 50 - -struct cpu_interrupt { - unsigned long long value; - RRDDIM *rd; -}; - -struct interrupt { - int used; - char *id; - char name[MAX_INTERRUPT_NAME + 1]; - RRDDIM *rd; - unsigned long long total; - struct cpu_interrupt cpu[]; -}; - -// since each interrupt is variable in size -// we use this to calculate its record size -#define recordsize(cpus) (sizeof(struct interrupt) + ((cpus) * sizeof(struct cpu_interrupt))) - -// given a base, get a pointer to each record -#define irrindex(base, line, cpus) ((struct interrupt *)&((char *)(base))[(line) * recordsize(cpus)]) - -static inline struct interrupt *get_interrupts_array(size_t lines, int cpus) { - static struct interrupt *irrs = NULL; - static size_t allocated = 0; - - if(unlikely(lines != allocated)) { - uint32_t l; - int c; - - irrs = (struct interrupt *)reallocz(irrs, lines * recordsize(cpus)); - - // reset all interrupt RRDDIM pointers as any line could have shifted - for(l = 0; l < lines ;l++) { - struct interrupt *irr = irrindex(irrs, l, cpus); - irr->rd = NULL; - irr->name[0] = '\0'; - for(c = 0; c < cpus ;c++) - irr->cpu[c].rd = NULL; - } - - allocated = lines; - } - - return irrs; -} - -int do_proc_softirqs(int update_every, usec_t dt) { - (void)dt; - static procfile *ff = NULL; - static int cpus = -1, do_per_core = -1; - struct interrupt *irrs = NULL; - - if(unlikely(do_per_core == -1)) do_per_core = config_get_boolean("plugin:proc:/proc/softirqs", "interrupts per core", 1); - - if(unlikely(!ff)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/softirqs"); - ff = procfile_open(config_get("plugin:proc:/proc/softirqs", "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) return 1; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time - - size_t lines = procfile_lines(ff), l; - size_t words = procfile_linewords(ff, 0); - - if(unlikely(!lines)) { - error("Cannot read /proc/softirqs, zero lines reported."); - return 1; - } - - // find how many CPUs are there - if(unlikely(cpus == -1)) { - uint32_t w; - cpus = 0; - for(w = 0; w < words ; w++) { - if(likely(strncmp(procfile_lineword(ff, 0, w), "CPU", 3) == 0)) - cpus++; - } - } - - if(unlikely(!cpus)) { - error("PLUGIN: PROC_SOFTIRQS: Cannot find the number of CPUs in /proc/softirqs"); - return 1; - } - - // allocate the size we need; - irrs = get_interrupts_array(lines, cpus); - irrs[0].used = 0; - - // loop through all lines - for(l = 1; l < lines ;l++) { - struct interrupt *irr = irrindex(irrs, l, cpus); - irr->used = 0; - irr->total = 0; - - words = procfile_linewords(ff, l); - if(unlikely(!words)) continue; - - irr->id = procfile_lineword(ff, l, 0); - if(unlikely(!irr->id || !irr->id[0])) continue; - - size_t idlen = strlen(irr->id); - if(unlikely(idlen && irr->id[idlen - 1] == ':')) - irr->id[idlen - 1] = '\0'; - - int c; - for(c = 0; c < cpus ;c++) { - if(likely((c + 1) < (int)words)) - irr->cpu[c].value = str2ull(procfile_lineword(ff, l, (uint32_t)(c + 1))); - else - irr->cpu[c].value = 0; - - irr->total += irr->cpu[c].value; - } - - strncpyz(irr->name, irr->id, MAX_INTERRUPT_NAME); - - irr->used = 1; - } - - // -------------------------------------------------------------------- - - static RRDSET *st_system_softirqs = NULL; - if(unlikely(!st_system_softirqs)) - st_system_softirqs = rrdset_create_localhost( - "system" - , "softirqs" - , NULL - , "softirqs" - , NULL - , "System softirqs" - , "softirqs/s" - , "proc" - , "softirqs" - , 950 - , update_every - , RRDSET_TYPE_STACKED - ); - else - rrdset_next(st_system_softirqs); - - for(l = 0; l < lines ;l++) { - struct interrupt *irr = irrindex(irrs, l, cpus); - - if(unlikely(!irr->used)) continue; - - // some interrupt may have changed without changing the total number of lines - // if the same number of interrupts have been added and removed between two - // calls of this function. - if(unlikely(!irr->rd || strncmp(irr->name, irr->rd->name, MAX_INTERRUPT_NAME) != 0)) { - irr->rd = rrddim_find(st_system_softirqs, irr->id); - - if(unlikely(!irr->rd)) - irr->rd = rrddim_add(st_system_softirqs, irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL); - else - rrddim_set_name(st_system_softirqs, irr->rd, irr->name); - - // also reset per cpu RRDDIMs to avoid repeating strncmp() in the per core loop - if(likely(do_per_core)) { - int c; - for (c = 0; c < cpus ;c++) irr->cpu[c].rd = NULL; - } - } - - rrddim_set_by_pointer(st_system_softirqs, irr->rd, irr->total); - } - - rrdset_done(st_system_softirqs); - - // -------------------------------------------------------------------- - - if(do_per_core) { - static RRDSET **core_st = NULL; - static int old_cpus = 0; - - if(old_cpus < cpus) { - core_st = reallocz(core_st, sizeof(RRDSET *) * cpus); - memset(&core_st[old_cpus], 0, sizeof(RRDSET *) * (cpus - old_cpus)); - old_cpus = cpus; - } - - int c; - - for(c = 0; c < cpus ; c++) { - if(unlikely(!core_st[c])) { - // find if everything is just zero - unsigned long long core_sum = 0; - - for (l = 0; l < lines; l++) { - struct interrupt *irr = irrindex(irrs, l, cpus); - if (unlikely(!irr->used)) continue; - core_sum += irr->cpu[c].value; - } - - if (unlikely(core_sum == 0)) continue; // try next core - - char id[50 + 1]; - snprintfz(id, 50, "cpu%d_softirqs", c); - - char title[100 + 1]; - snprintfz(title, 100, "CPU%d softirqs", c); - - core_st[c] = rrdset_create_localhost( - "cpu" - , id - , NULL - , "softirqs" - , "cpu.softirqs" - , title - , "softirqs/s" - , "proc" - , "softirqs" - , 3000 + c - , update_every - , RRDSET_TYPE_STACKED - ); - } - else - rrdset_next(core_st[c]); - - for(l = 0; l < lines ;l++) { - struct interrupt *irr = irrindex(irrs, l, cpus); - - if(unlikely(!irr->used)) continue; - - if(unlikely(!irr->cpu[c].rd)) { - irr->cpu[c].rd = rrddim_find(core_st[c], irr->id); - - if(unlikely(!irr->cpu[c].rd)) - irr->cpu[c].rd = rrddim_add(core_st[c], irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL); - else - rrddim_set_name(core_st[c], irr->cpu[c].rd, irr->name); - } - - rrddim_set_by_pointer(core_st[c], irr->cpu[c].rd, irr->cpu[c].value); - } - - rrdset_done(core_st[c]); - } - } - - return 0; -} diff --git a/src/proc_spl_kstat_zfs.c b/src/proc_spl_kstat_zfs.c deleted file mode 100644 index 9d489d8e4..000000000 --- a/src/proc_spl_kstat_zfs.c +++ /dev/null @@ -1,153 +0,0 @@ -#include "common.h" -#include "zfs_common.h" - -#define ZFS_PROC_ARCSTATS "/proc/spl/kstat/zfs/arcstats" - -extern struct arcstats arcstats; - -int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt) { - (void)dt; - - static procfile *ff = NULL; - static ARL_BASE *arl_base = NULL; - - arcstats.l2exist = -1; - - if(unlikely(!arl_base)) { - arl_base = arl_create("arcstats", NULL, 60); - - arl_expect(arl_base, "hits", &arcstats.hits); - arl_expect(arl_base, "misses", &arcstats.misses); - arl_expect(arl_base, "demand_data_hits", &arcstats.demand_data_hits); - arl_expect(arl_base, "demand_data_misses", &arcstats.demand_data_misses); - arl_expect(arl_base, "demand_metadata_hits", &arcstats.demand_metadata_hits); - arl_expect(arl_base, "demand_metadata_misses", &arcstats.demand_metadata_misses); - arl_expect(arl_base, "prefetch_data_hits", &arcstats.prefetch_data_hits); - arl_expect(arl_base, "prefetch_data_misses", &arcstats.prefetch_data_misses); - arl_expect(arl_base, "prefetch_metadata_hits", &arcstats.prefetch_metadata_hits); - arl_expect(arl_base, "prefetch_metadata_misses", &arcstats.prefetch_metadata_misses); - arl_expect(arl_base, "mru_hits", &arcstats.mru_hits); - arl_expect(arl_base, "mru_ghost_hits", &arcstats.mru_ghost_hits); - arl_expect(arl_base, "mfu_hits", &arcstats.mfu_hits); - arl_expect(arl_base, "mfu_ghost_hits", &arcstats.mfu_ghost_hits); - arl_expect(arl_base, "deleted", &arcstats.deleted); - arl_expect(arl_base, "mutex_miss", &arcstats.mutex_miss); - arl_expect(arl_base, "evict_skip", &arcstats.evict_skip); - arl_expect(arl_base, "evict_not_enough", &arcstats.evict_not_enough); - arl_expect(arl_base, "evict_l2_cached", &arcstats.evict_l2_cached); - arl_expect(arl_base, "evict_l2_eligible", &arcstats.evict_l2_eligible); - arl_expect(arl_base, "evict_l2_ineligible", &arcstats.evict_l2_ineligible); - arl_expect(arl_base, "evict_l2_skip", &arcstats.evict_l2_skip); - arl_expect(arl_base, "hash_elements", &arcstats.hash_elements); - arl_expect(arl_base, "hash_elements_max", &arcstats.hash_elements_max); - arl_expect(arl_base, "hash_collisions", &arcstats.hash_collisions); - arl_expect(arl_base, "hash_chains", &arcstats.hash_chains); - arl_expect(arl_base, "hash_chain_max", &arcstats.hash_chain_max); - arl_expect(arl_base, "p", &arcstats.p); - arl_expect(arl_base, "c", &arcstats.c); - arl_expect(arl_base, "c_min", &arcstats.c_min); - arl_expect(arl_base, "c_max", &arcstats.c_max); - arl_expect(arl_base, "size", &arcstats.size); - arl_expect(arl_base, "hdr_size", &arcstats.hdr_size); - arl_expect(arl_base, "data_size", &arcstats.data_size); - arl_expect(arl_base, "metadata_size", &arcstats.metadata_size); - arl_expect(arl_base, "other_size", &arcstats.other_size); - arl_expect(arl_base, "anon_size", &arcstats.anon_size); - arl_expect(arl_base, "anon_evictable_data", &arcstats.anon_evictable_data); - arl_expect(arl_base, "anon_evictable_metadata", &arcstats.anon_evictable_metadata); - arl_expect(arl_base, "mru_size", &arcstats.mru_size); - arl_expect(arl_base, "mru_evictable_data", &arcstats.mru_evictable_data); - arl_expect(arl_base, "mru_evictable_metadata", &arcstats.mru_evictable_metadata); - arl_expect(arl_base, "mru_ghost_size", &arcstats.mru_ghost_size); - arl_expect(arl_base, "mru_ghost_evictable_data", &arcstats.mru_ghost_evictable_data); - arl_expect(arl_base, "mru_ghost_evictable_metadata", &arcstats.mru_ghost_evictable_metadata); - arl_expect(arl_base, "mfu_size", &arcstats.mfu_size); - arl_expect(arl_base, "mfu_evictable_data", &arcstats.mfu_evictable_data); - arl_expect(arl_base, "mfu_evictable_metadata", &arcstats.mfu_evictable_metadata); - arl_expect(arl_base, "mfu_ghost_size", &arcstats.mfu_ghost_size); - arl_expect(arl_base, "mfu_ghost_evictable_data", &arcstats.mfu_ghost_evictable_data); - arl_expect(arl_base, "mfu_ghost_evictable_metadata", &arcstats.mfu_ghost_evictable_metadata); - arl_expect(arl_base, "l2_hits", &arcstats.l2_hits); - arl_expect(arl_base, "l2_misses", &arcstats.l2_misses); - arl_expect(arl_base, "l2_feeds", &arcstats.l2_feeds); - arl_expect(arl_base, "l2_rw_clash", &arcstats.l2_rw_clash); - arl_expect(arl_base, "l2_read_bytes", &arcstats.l2_read_bytes); - arl_expect(arl_base, "l2_write_bytes", &arcstats.l2_write_bytes); - arl_expect(arl_base, "l2_writes_sent", &arcstats.l2_writes_sent); - arl_expect(arl_base, "l2_writes_done", &arcstats.l2_writes_done); - arl_expect(arl_base, "l2_writes_error", &arcstats.l2_writes_error); - arl_expect(arl_base, "l2_writes_lock_retry", &arcstats.l2_writes_lock_retry); - arl_expect(arl_base, "l2_evict_lock_retry", &arcstats.l2_evict_lock_retry); - arl_expect(arl_base, "l2_evict_reading", &arcstats.l2_evict_reading); - arl_expect(arl_base, "l2_evict_l1cached", &arcstats.l2_evict_l1cached); - arl_expect(arl_base, "l2_free_on_write", &arcstats.l2_free_on_write); - arl_expect(arl_base, "l2_cdata_free_on_write", &arcstats.l2_cdata_free_on_write); - arl_expect(arl_base, "l2_abort_lowmem", &arcstats.l2_abort_lowmem); - arl_expect(arl_base, "l2_cksum_bad", &arcstats.l2_cksum_bad); - arl_expect(arl_base, "l2_io_error", &arcstats.l2_io_error); - arl_expect(arl_base, "l2_size", &arcstats.l2_size); - arl_expect(arl_base, "l2_asize", &arcstats.l2_asize); - arl_expect(arl_base, "l2_hdr_size", &arcstats.l2_hdr_size); - arl_expect(arl_base, "l2_compress_successes", &arcstats.l2_compress_successes); - arl_expect(arl_base, "l2_compress_zeros", &arcstats.l2_compress_zeros); - arl_expect(arl_base, "l2_compress_failures", &arcstats.l2_compress_failures); - arl_expect(arl_base, "memory_throttle_count", &arcstats.memory_throttle_count); - arl_expect(arl_base, "duplicate_buffers", &arcstats.duplicate_buffers); - arl_expect(arl_base, "duplicate_buffers_size", &arcstats.duplicate_buffers_size); - arl_expect(arl_base, "duplicate_reads", &arcstats.duplicate_reads); - arl_expect(arl_base, "memory_direct_count", &arcstats.memory_direct_count); - arl_expect(arl_base, "memory_indirect_count", &arcstats.memory_indirect_count); - arl_expect(arl_base, "arc_no_grow", &arcstats.arc_no_grow); - arl_expect(arl_base, "arc_tempreserve", &arcstats.arc_tempreserve); - arl_expect(arl_base, "arc_loaned_bytes", &arcstats.arc_loaned_bytes); - arl_expect(arl_base, "arc_prune", &arcstats.arc_prune); - arl_expect(arl_base, "arc_meta_used", &arcstats.arc_meta_used); - arl_expect(arl_base, "arc_meta_limit", &arcstats.arc_meta_limit); - arl_expect(arl_base, "arc_meta_max", &arcstats.arc_meta_max); - arl_expect(arl_base, "arc_meta_min", &arcstats.arc_meta_min); - arl_expect(arl_base, "arc_need_free", &arcstats.arc_need_free); - arl_expect(arl_base, "arc_sys_free", &arcstats.arc_sys_free); - } - - if(unlikely(!ff)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, ZFS_PROC_ARCSTATS); - ff = procfile_open(config_get("plugin:proc:" ZFS_PROC_ARCSTATS, "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) - return 1; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) - return 0; // we return 0, so that we will retry to open it next time - - size_t lines = procfile_lines(ff), l; - - arl_begin(arl_base); - - for(l = 0; l < lines ;l++) { - size_t words = procfile_linewords(ff, l); - if(unlikely(words < 3)) { - if(unlikely(words)) error("Cannot read " ZFS_PROC_ARCSTATS " line %zu. Expected 3 params, read %zu.", l, words); - continue; - } - - const char *key = procfile_lineword(ff, l, 0); - const char *value = procfile_lineword(ff, l, 2); - - if(unlikely(arcstats.l2exist == -1)) { - if(key[0] == 'l' && key[1] == '2' && key[2] == '_') - arcstats.l2exist = 1; - } - - if(unlikely(arl_check(arl_base, key, value))) break; - } - - if(unlikely(arcstats.l2exist == -1)) - arcstats.l2exist = 0; - - generate_charts_arcstats("proc", update_every); - generate_charts_arc_summary("proc", update_every); - - return 0; -} diff --git a/src/proc_stat.c b/src/proc_stat.c deleted file mode 100644 index d1aefb73e..000000000 --- a/src/proc_stat.c +++ /dev/null @@ -1,557 +0,0 @@ -#include "common.h" - -struct per_core_single_number_file { - char found:1; - const char *filename; - int fd; - collected_number value; - RRDDIM *rd; -}; - -#define CORE_THROTTLE_COUNT_INDEX 0 -#define PACKAGE_THROTTLE_COUNT_INDEX 1 -#define SCALING_CUR_FREQ_INDEX 2 -#define PER_CORE_FILES 3 - -struct cpu_chart { - const char *id; - - RRDSET *st; - RRDDIM *rd_user; - RRDDIM *rd_nice; - RRDDIM *rd_system; - RRDDIM *rd_idle; - RRDDIM *rd_iowait; - RRDDIM *rd_irq; - RRDDIM *rd_softirq; - RRDDIM *rd_steal; - RRDDIM *rd_guest; - RRDDIM *rd_guest_nice; - - struct per_core_single_number_file files[PER_CORE_FILES]; -}; - -static int keep_per_core_fds_open = CONFIG_BOOLEAN_YES; - -static int read_per_core_files(struct cpu_chart *all_cpu_charts, size_t len, size_t index) { - char buf[50 + 1]; - size_t x, files_read = 0, files_nonzero = 0; - - for(x = 0; x < len ; x++) { - struct per_core_single_number_file *f = &all_cpu_charts[x].files[index]; - - f->found = 0; - - if(unlikely(!f->filename)) - continue; - - if(unlikely(f->fd == -1)) { - f->fd = open(f->filename, O_RDONLY); - if (unlikely(f->fd == -1)) { - error("Cannot open file '%s'", f->filename); - continue; - } - } - - ssize_t ret = read(f->fd, buf, 50); - if(unlikely(ret < 0)) { - // cannot read that file - - error("Cannot read file '%s'", f->filename); - close(f->fd); - f->fd = -1; - continue; - } - else { - // successful read - - // terminate the buffer - buf[ret] = '\0'; - - if(unlikely(keep_per_core_fds_open != CONFIG_BOOLEAN_YES)) { - close(f->fd); - f->fd = -1; - } - else if(lseek(f->fd, 0, SEEK_SET) == -1) { - error("Cannot seek in file '%s'", f->filename); - close(f->fd); - f->fd = -1; - } - } - - files_read++; - f->found = 1; - - f->value = str2ll(buf, NULL); - // info("read '%s', parsed as " COLLECTED_NUMBER_FORMAT, buf, f->value); - if(likely(f->value != 0)) - files_nonzero++; - } - - if(files_read == 0) - return -1; - - if(files_nonzero == 0) - return 0; - - return (int)files_nonzero; -} - -static void chart_per_core_files(struct cpu_chart *all_cpu_charts, size_t len, size_t index, RRDSET *st, collected_number multiplier, collected_number divisor, RRD_ALGORITHM algorithm) { - size_t x; - for(x = 0; x < len ; x++) { - struct per_core_single_number_file *f = &all_cpu_charts[x].files[index]; - - if(unlikely(!f->found)) - continue; - - if(unlikely(!f->rd)) - f->rd = rrddim_add(st, all_cpu_charts[x].id, NULL, multiplier, divisor, algorithm); - - rrddim_set_by_pointer(st, f->rd, f->value); - } -} - -int do_proc_stat(int update_every, usec_t dt) { - (void)dt; - - static struct cpu_chart *all_cpu_charts = NULL; - static size_t all_cpu_charts_size = 0; - static procfile *ff = NULL; - static int do_cpu = -1, do_cpu_cores = -1, do_interrupts = -1, do_context = -1, do_forks = -1, do_processes = -1, do_core_throttle_count = -1, do_package_throttle_count = -1, do_scaling_cur_freq = -1; - static uint32_t hash_intr, hash_ctxt, hash_processes, hash_procs_running, hash_procs_blocked; - static char *core_throttle_count_filename = NULL, *package_throttle_count_filename = NULL, *scaling_cur_freq_filename = NULL; - - if(unlikely(do_cpu == -1)) { - do_cpu = config_get_boolean("plugin:proc:/proc/stat", "cpu utilization", CONFIG_BOOLEAN_YES); - do_cpu_cores = config_get_boolean("plugin:proc:/proc/stat", "per cpu core utilization", CONFIG_BOOLEAN_YES); - do_interrupts = config_get_boolean("plugin:proc:/proc/stat", "cpu interrupts", CONFIG_BOOLEAN_YES); - do_context = config_get_boolean("plugin:proc:/proc/stat", "context switches", CONFIG_BOOLEAN_YES); - do_forks = config_get_boolean("plugin:proc:/proc/stat", "processes started", CONFIG_BOOLEAN_YES); - do_processes = config_get_boolean("plugin:proc:/proc/stat", "processes running", CONFIG_BOOLEAN_YES); - - // give sane defaults based on the number of processors - if(processors > 50) { - // the system has too many processors - keep_per_core_fds_open = CONFIG_BOOLEAN_NO; - do_core_throttle_count = CONFIG_BOOLEAN_NO; - do_package_throttle_count = CONFIG_BOOLEAN_NO; - do_scaling_cur_freq = CONFIG_BOOLEAN_NO; - } - else { - // the system has a reasonable number of processors - keep_per_core_fds_open = CONFIG_BOOLEAN_YES; - do_core_throttle_count = CONFIG_BOOLEAN_AUTO; - do_package_throttle_count = CONFIG_BOOLEAN_NO; - do_scaling_cur_freq = CONFIG_BOOLEAN_NO; - } - - keep_per_core_fds_open = config_get_boolean("plugin:proc:/proc/stat", "keep per core files open", keep_per_core_fds_open); - do_core_throttle_count = config_get_boolean_ondemand("plugin:proc:/proc/stat", "core_throttle_count", do_core_throttle_count); - do_package_throttle_count = config_get_boolean_ondemand("plugin:proc:/proc/stat", "package_throttle_count", do_package_throttle_count); - do_scaling_cur_freq = config_get_boolean_ondemand("plugin:proc:/proc/stat", "scaling_cur_freq", do_scaling_cur_freq); - - hash_intr = simple_hash("intr"); - hash_ctxt = simple_hash("ctxt"); - hash_processes = simple_hash("processes"); - hash_procs_running = simple_hash("procs_running"); - hash_procs_blocked = simple_hash("procs_blocked"); - - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/thermal_throttle/core_throttle_count"); - core_throttle_count_filename = config_get("plugin:proc:/proc/stat", "core_throttle_count filename to monitor", filename); - - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/thermal_throttle/package_throttle_count"); - package_throttle_count_filename = config_get("plugin:proc:/proc/stat", "package_throttle_count filename to monitor", filename); - - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/cpufreq/scaling_cur_freq"); - scaling_cur_freq_filename = config_get("plugin:proc:/proc/stat", "scaling_cur_freq filename to monitor", filename); - } - - if(unlikely(!ff)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/stat"); - ff = procfile_open(config_get("plugin:proc:/proc/stat", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) return 1; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time - - size_t lines = procfile_lines(ff), l; - size_t words; - - unsigned long long processes = 0, running = 0 , blocked = 0; - - for(l = 0; l < lines ;l++) { - char *row_key = procfile_lineword(ff, l, 0); - uint32_t hash = simple_hash(row_key); - - // faster strncmp(row_key, "cpu", 3) == 0 - if(likely(row_key[0] == 'c' && row_key[1] == 'p' && row_key[2] == 'u')) { - words = procfile_linewords(ff, l); - if(unlikely(words < 9)) { - error("Cannot read /proc/stat cpu line. Expected 9 params, read %zu.", words); - continue; - } - - size_t core = (row_key[3] == '\0') ? 0 : str2ul(&row_key[3]) + 1; - - if(likely((core == 0 && do_cpu) || (core > 0 && do_cpu_cores))) { - char *id; - unsigned long long user = 0, nice = 0, system = 0, idle = 0, iowait = 0, irq = 0, softirq = 0, steal = 0, guest = 0, guest_nice = 0; - - id = row_key; - user = str2ull(procfile_lineword(ff, l, 1)); - nice = str2ull(procfile_lineword(ff, l, 2)); - system = str2ull(procfile_lineword(ff, l, 3)); - idle = str2ull(procfile_lineword(ff, l, 4)); - iowait = str2ull(procfile_lineword(ff, l, 5)); - irq = str2ull(procfile_lineword(ff, l, 6)); - softirq = str2ull(procfile_lineword(ff, l, 7)); - steal = str2ull(procfile_lineword(ff, l, 8)); - - guest = str2ull(procfile_lineword(ff, l, 9)); - user -= guest; - - guest_nice = str2ull(procfile_lineword(ff, l, 10)); - nice -= guest_nice; - - char *title, *type, *context, *family; - long priority; - - if(core >= all_cpu_charts_size) { - size_t old_cpu_charts_size = all_cpu_charts_size; - all_cpu_charts_size = core + 1; - all_cpu_charts = reallocz(all_cpu_charts, sizeof(struct cpu_chart) * all_cpu_charts_size); - memset(&all_cpu_charts[old_cpu_charts_size], 0, sizeof(struct cpu_chart) * (all_cpu_charts_size - old_cpu_charts_size)); - } - struct cpu_chart *cpu_chart = &all_cpu_charts[core]; - - if(unlikely(!cpu_chart->st)) { - cpu_chart->id = strdupz(id); - - if(core == 0) { - title = "Total CPU utilization"; - type = "system"; - context = "system.cpu"; - family = id; - priority = 100; - } - else { - title = "Core utilization"; - type = "cpu"; - context = "cpu.cpu"; - family = "utilization"; - priority = 1000; - - // FIXME: check for /sys/devices/system/cpu/cpu*/cpufreq/scaling_cur_freq - // FIXME: check for /sys/devices/system/cpu/cpu*/cpufreq/stats/time_in_state - - char filename[FILENAME_MAX + 1]; - struct stat stbuf; - - if(do_core_throttle_count != CONFIG_BOOLEAN_NO) { - snprintfz(filename, FILENAME_MAX, core_throttle_count_filename, id); - if (stat(filename, &stbuf) == 0) { - cpu_chart->files[CORE_THROTTLE_COUNT_INDEX].filename = strdupz(filename); - cpu_chart->files[CORE_THROTTLE_COUNT_INDEX].fd = -1; - do_core_throttle_count = CONFIG_BOOLEAN_YES; - } - } - - if(do_package_throttle_count != CONFIG_BOOLEAN_NO) { - snprintfz(filename, FILENAME_MAX, package_throttle_count_filename, id); - if (stat(filename, &stbuf) == 0) { - cpu_chart->files[PACKAGE_THROTTLE_COUNT_INDEX].filename = strdupz(filename); - cpu_chart->files[PACKAGE_THROTTLE_COUNT_INDEX].fd = -1; - do_package_throttle_count = CONFIG_BOOLEAN_YES; - } - } - - if(do_scaling_cur_freq != CONFIG_BOOLEAN_NO) { - snprintfz(filename, FILENAME_MAX, scaling_cur_freq_filename, id); - if (stat(filename, &stbuf) == 0) { - cpu_chart->files[SCALING_CUR_FREQ_INDEX].filename = strdupz(filename); - cpu_chart->files[SCALING_CUR_FREQ_INDEX].fd = -1; - do_scaling_cur_freq = CONFIG_BOOLEAN_YES; - } - } - } - - cpu_chart->st = rrdset_create_localhost( - type - , id - , NULL - , family - , context - , title - , "percentage" - , "proc" - , "stat" - , priority - , update_every - , RRDSET_TYPE_STACKED - ); - - long multiplier = 1; - long divisor = 1; // sysconf(_SC_CLK_TCK); - - cpu_chart->rd_guest_nice = rrddim_add(cpu_chart->st, "guest_nice", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - cpu_chart->rd_guest = rrddim_add(cpu_chart->st, "guest", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - cpu_chart->rd_steal = rrddim_add(cpu_chart->st, "steal", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - cpu_chart->rd_softirq = rrddim_add(cpu_chart->st, "softirq", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - cpu_chart->rd_irq = rrddim_add(cpu_chart->st, "irq", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - cpu_chart->rd_user = rrddim_add(cpu_chart->st, "user", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - cpu_chart->rd_system = rrddim_add(cpu_chart->st, "system", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - cpu_chart->rd_nice = rrddim_add(cpu_chart->st, "nice", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - cpu_chart->rd_iowait = rrddim_add(cpu_chart->st, "iowait", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - cpu_chart->rd_idle = rrddim_add(cpu_chart->st, "idle", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rrddim_hide(cpu_chart->st, "idle"); - } - else rrdset_next(cpu_chart->st); - - rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_user, user); - rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_nice, nice); - rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_system, system); - rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_idle, idle); - rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_iowait, iowait); - rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_irq, irq); - rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_softirq, softirq); - rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_steal, steal); - rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_guest, guest); - rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_guest_nice, guest_nice); - rrdset_done(cpu_chart->st); - } - } - else if(unlikely(hash == hash_intr && strcmp(row_key, "intr") == 0)) { - if(likely(do_interrupts)) { - static RRDSET *st_intr = NULL; - static RRDDIM *rd_interrupts = NULL; - unsigned long long value = str2ull(procfile_lineword(ff, l, 1)); - - if(unlikely(!st_intr)) { - st_intr = rrdset_create_localhost( - "system" - , "intr" - , NULL - , "interrupts" - , NULL - , "CPU Interrupts" - , "interrupts/s" - , "proc" - , "stat" - , 900 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st_intr, RRDSET_FLAG_DETAIL); - - rd_interrupts = rrddim_add(st_intr, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st_intr); - - rrddim_set_by_pointer(st_intr, rd_interrupts, value); - rrdset_done(st_intr); - } - } - else if(unlikely(hash == hash_ctxt && strcmp(row_key, "ctxt") == 0)) { - if(likely(do_context)) { - static RRDSET *st_ctxt = NULL; - static RRDDIM *rd_switches = NULL; - unsigned long long value = str2ull(procfile_lineword(ff, l, 1)); - - if(unlikely(!st_ctxt)) { - st_ctxt = rrdset_create_localhost( - "system" - , "ctxt" - , NULL - , "processes" - , NULL - , "CPU Context Switches" - , "context switches/s" - , "proc" - , "stat" - , 800 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_switches = rrddim_add(st_ctxt, "switches", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st_ctxt); - - rrddim_set_by_pointer(st_ctxt, rd_switches, value); - rrdset_done(st_ctxt); - } - } - else if(unlikely(hash == hash_processes && !processes && strcmp(row_key, "processes") == 0)) { - processes = str2ull(procfile_lineword(ff, l, 1)); - } - else if(unlikely(hash == hash_procs_running && !running && strcmp(row_key, "procs_running") == 0)) { - running = str2ull(procfile_lineword(ff, l, 1)); - } - else if(unlikely(hash == hash_procs_blocked && !blocked && strcmp(row_key, "procs_blocked") == 0)) { - blocked = str2ull(procfile_lineword(ff, l, 1)); - } - } - - // -------------------------------------------------------------------- - - if(likely(do_forks)) { - static RRDSET *st_forks = NULL; - static RRDDIM *rd_started = NULL; - - if(unlikely(!st_forks)) { - st_forks = rrdset_create_localhost( - "system" - , "forks" - , NULL - , "processes" - , NULL - , "Started Processes" - , "processes/s" - , "proc" - , "stat" - , 700 - , update_every - , RRDSET_TYPE_LINE - ); - rrdset_flag_set(st_forks, RRDSET_FLAG_DETAIL); - - rd_started = rrddim_add(st_forks, "started", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st_forks); - - rrddim_set_by_pointer(st_forks, rd_started, processes); - rrdset_done(st_forks); - } - - // -------------------------------------------------------------------- - - if(likely(do_processes)) { - static RRDSET *st_processes = NULL; - static RRDDIM *rd_running = NULL; - static RRDDIM *rd_blocked = NULL; - - if(unlikely(!st_processes)) { - st_processes = rrdset_create_localhost( - "system" - , "processes" - , NULL - , "processes" - , NULL - , "System Processes" - , "processes" - , "proc" - , "stat" - , 600 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_running = rrddim_add(st_processes, "running", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_blocked = rrddim_add(st_processes, "blocked", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st_processes); - - rrddim_set_by_pointer(st_processes, rd_running, running); - rrddim_set_by_pointer(st_processes, rd_blocked, blocked); - rrdset_done(st_processes); - } - - if(likely(all_cpu_charts_size > 1)) { - if(likely(do_core_throttle_count != CONFIG_BOOLEAN_NO)) { - int r = read_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, CORE_THROTTLE_COUNT_INDEX); - if(likely(r != -1 && (do_core_throttle_count == CONFIG_BOOLEAN_YES || r > 0))) { - do_core_throttle_count = CONFIG_BOOLEAN_YES; - - static RRDSET *st_core_throttle_count = NULL; - - if (unlikely(!st_core_throttle_count)) - st_core_throttle_count = rrdset_create_localhost( - "cpu" - , "core_throttling" - , NULL - , "throttling" - , "cpu.core_throttling" - , "Core Thermal Throttling Events" - , "events/s" - , "proc" - , "stat" - , 5001 - , update_every - , RRDSET_TYPE_LINE - ); - else - rrdset_next(st_core_throttle_count); - - chart_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, CORE_THROTTLE_COUNT_INDEX, st_core_throttle_count, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrdset_done(st_core_throttle_count); - } - } - - if(likely(do_package_throttle_count != CONFIG_BOOLEAN_NO)) { - int r = read_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, PACKAGE_THROTTLE_COUNT_INDEX); - if(likely(r != -1 && (do_package_throttle_count == CONFIG_BOOLEAN_YES || r > 0))) { - do_package_throttle_count = CONFIG_BOOLEAN_YES; - - static RRDSET *st_package_throttle_count = NULL; - - if(unlikely(!st_package_throttle_count)) - st_package_throttle_count = rrdset_create_localhost( - "cpu" - , "package_throttling" - , NULL - , "throttling" - , "cpu.package_throttling" - , "Package Thermal Throttling Events" - , "events/s" - , "proc" - , "stat" - , 5002 - , update_every - , RRDSET_TYPE_LINE - ); - else - rrdset_next(st_package_throttle_count); - - chart_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, PACKAGE_THROTTLE_COUNT_INDEX, st_package_throttle_count, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrdset_done(st_package_throttle_count); - } - } - - if(likely(do_scaling_cur_freq != CONFIG_BOOLEAN_NO)) { - int r = read_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, SCALING_CUR_FREQ_INDEX); - if(likely(r != -1 && (do_scaling_cur_freq == CONFIG_BOOLEAN_YES || r > 0))) { - do_scaling_cur_freq = CONFIG_BOOLEAN_YES; - - static RRDSET *st_scaling_cur_freq = NULL; - - if(unlikely(!st_scaling_cur_freq)) - st_scaling_cur_freq = rrdset_create_localhost( - "cpu" - , "scaling_cur_freq" - , NULL - , "cpufreq" - , "cpu.scaling_cur_freq" - , "Per CPU Core, Current CPU Scaling Frequency" - , "MHz" - , "proc" - , "stat" - , 5003 - , update_every - , RRDSET_TYPE_LINE - ); - else - rrdset_next(st_scaling_cur_freq); - - chart_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, SCALING_CUR_FREQ_INDEX, st_scaling_cur_freq, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rrdset_done(st_scaling_cur_freq); - } - } - } - - return 0; -} diff --git a/src/proc_sys_kernel_random_entropy_avail.c b/src/proc_sys_kernel_random_entropy_avail.c deleted file mode 100644 index ca4d7657c..000000000 --- a/src/proc_sys_kernel_random_entropy_avail.c +++ /dev/null @@ -1,47 +0,0 @@ -#include "common.h" - -int do_proc_sys_kernel_random_entropy_avail(int update_every, usec_t dt) { - (void)dt; - - static procfile *ff = NULL; - - if(unlikely(!ff)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/sys/kernel/random/entropy_avail"); - ff = procfile_open(config_get("plugin:proc:/proc/sys/kernel/random/entropy_avail", "filename to monitor", filename), "", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) return 1; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time - - unsigned long long entropy = str2ull(procfile_lineword(ff, 0, 0)); - - static RRDSET *st = NULL; - static RRDDIM *rd = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "system" - , "entropy" - , NULL - , "entropy" - , NULL - , "Available Entropy" - , "entropy" - , "proc" - , "sys/kernel/random/entropy_avail" - , 1000 - , update_every - , RRDSET_TYPE_LINE - ); - - rd = rrddim_add(st, "entropy", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd, entropy); - rrdset_done(st); - - return 0; -} diff --git a/src/proc_uptime.c b/src/proc_uptime.c deleted file mode 100644 index 259de4760..000000000 --- a/src/proc_uptime.c +++ /dev/null @@ -1,103 +0,0 @@ -#include "common.h" - -static inline collected_number uptime_from_boottime(void) { -#ifdef CLOCK_BOOTTIME_IS_AVAILABLE - return now_boottime_usec() / 1000; -#else - error("uptime cannot be read from CLOCK_BOOTTIME on this system."); - return 0; -#endif -} - -static procfile *read_proc_uptime_ff = NULL; -static inline collected_number read_proc_uptime(void) { - if(unlikely(!read_proc_uptime_ff)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/uptime"); - - read_proc_uptime_ff = procfile_open(config_get("plugin:proc:/proc/uptime", "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT); - if(unlikely(!read_proc_uptime_ff)) return 0; - } - - read_proc_uptime_ff = procfile_readall(read_proc_uptime_ff); - if(unlikely(!read_proc_uptime_ff)) return 0; - - if(unlikely(procfile_lines(read_proc_uptime_ff) < 1)) { - error("/proc/uptime has no lines."); - return 0; - } - if(unlikely(procfile_linewords(read_proc_uptime_ff, 0) < 1)) { - error("/proc/uptime has less than 1 word in it."); - return 0; - } - - return (collected_number)(strtold(procfile_lineword(read_proc_uptime_ff, 0, 0), NULL) * 1000.0); -} - -int do_proc_uptime(int update_every, usec_t dt) { - (void)dt; - - static int use_boottime = -1; - - if(unlikely(use_boottime == -1)) { - collected_number uptime_boottime = uptime_from_boottime(); - collected_number uptime_proc = read_proc_uptime(); - - long long delta = (long long)uptime_boottime - (long long)uptime_proc; - if(delta < 0) delta = -delta; - - if(delta <= 1000 && uptime_boottime != 0) { - procfile_close(read_proc_uptime_ff); - info("Using now_boottime_usec() for uptime (dt is %lld ms)", delta); - use_boottime = 1; - } - else if(uptime_proc != 0) { - info("Using /proc/uptime for uptime (dt is %lld ms)", delta); - use_boottime = 0; - } - else { - error("Cannot find any way to read uptime on this system."); - return 1; - } - } - - collected_number uptime; - if(use_boottime) - uptime = uptime_from_boottime(); - else - uptime = read_proc_uptime(); - - - // -------------------------------------------------------------------- - - static RRDSET *st = NULL; - static RRDDIM *rd = NULL; - - if(unlikely(!st)) { - - st = rrdset_create_localhost( - "system" - , "uptime" - , NULL - , "uptime" - , NULL - , "System Uptime" - , "seconds" - , "proc" - , "uptime" - , 1000 - , update_every - , RRDSET_TYPE_LINE - ); - - rd = rrddim_add(st, "uptime", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - } - else - rrdset_next(st); - - rrddim_set_by_pointer(st, rd, uptime); - - rrdset_done(st); - - return 0; -} diff --git a/src/proc_vmstat.c b/src/proc_vmstat.c deleted file mode 100644 index 52e88d888..000000000 --- a/src/proc_vmstat.c +++ /dev/null @@ -1,255 +0,0 @@ -#include "common.h" - -int do_proc_vmstat(int update_every, usec_t dt) { - (void)dt; - - static procfile *ff = NULL; - static int do_swapio = -1, do_io = -1, do_pgfaults = -1, do_numa = -1; - static int has_numa = -1; - - static ARL_BASE *arl_base = NULL; - static unsigned long long numa_foreign = 0ULL; - static unsigned long long numa_hint_faults = 0ULL; - static unsigned long long numa_hint_faults_local = 0ULL; - static unsigned long long numa_huge_pte_updates = 0ULL; - static unsigned long long numa_interleave = 0ULL; - static unsigned long long numa_local = 0ULL; - static unsigned long long numa_other = 0ULL; - static unsigned long long numa_pages_migrated = 0ULL; - static unsigned long long numa_pte_updates = 0ULL; - static unsigned long long pgfault = 0ULL; - static unsigned long long pgmajfault = 0ULL; - static unsigned long long pgpgin = 0ULL; - static unsigned long long pgpgout = 0ULL; - static unsigned long long pswpin = 0ULL; - static unsigned long long pswpout = 0ULL; - - if(unlikely(!arl_base)) { - do_swapio = config_get_boolean_ondemand("plugin:proc:/proc/vmstat", "swap i/o", CONFIG_BOOLEAN_AUTO); - do_io = config_get_boolean("plugin:proc:/proc/vmstat", "disk i/o", 1); - do_pgfaults = config_get_boolean("plugin:proc:/proc/vmstat", "memory page faults", 1); - do_numa = config_get_boolean_ondemand("plugin:proc:/proc/vmstat", "system-wide numa metric summary", CONFIG_BOOLEAN_AUTO); - - - arl_base = arl_create("vmstat", NULL, 60); - arl_expect(arl_base, "pgfault", &pgfault); - arl_expect(arl_base, "pgmajfault", &pgmajfault); - arl_expect(arl_base, "pgpgin", &pgpgin); - arl_expect(arl_base, "pgpgout", &pgpgout); - arl_expect(arl_base, "pswpin", &pswpin); - arl_expect(arl_base, "pswpout", &pswpout); - - if(do_numa == CONFIG_BOOLEAN_YES || (do_numa == CONFIG_BOOLEAN_AUTO && get_numa_node_count() >= 2)) { - arl_expect(arl_base, "numa_foreign", &numa_foreign); - arl_expect(arl_base, "numa_hint_faults_local", &numa_hint_faults_local); - arl_expect(arl_base, "numa_hint_faults", &numa_hint_faults); - arl_expect(arl_base, "numa_huge_pte_updates", &numa_huge_pte_updates); - arl_expect(arl_base, "numa_interleave", &numa_interleave); - arl_expect(arl_base, "numa_local", &numa_local); - arl_expect(arl_base, "numa_other", &numa_other); - arl_expect(arl_base, "numa_pages_migrated", &numa_pages_migrated); - arl_expect(arl_base, "numa_pte_updates", &numa_pte_updates); - } - else { - // Do not expect numa metrics when they are not needed. - // By not adding them, the ARL will stop processing the file - // when all the expected metrics are collected. - // Also ARL will not parse their values. - has_numa = 0; - do_numa = CONFIG_BOOLEAN_NO; - } - } - - if(unlikely(!ff)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/vmstat"); - ff = procfile_open(config_get("plugin:proc:/proc/vmstat", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) return 1; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time - - size_t lines = procfile_lines(ff), l; - - arl_begin(arl_base); - for(l = 0; l < lines ;l++) { - size_t words = procfile_linewords(ff, l); - if(unlikely(words < 2)) { - if(unlikely(words)) error("Cannot read /proc/vmstat line %zu. Expected 2 params, read %zu.", l, words); - continue; - } - - if(unlikely(arl_check(arl_base, - procfile_lineword(ff, l, 0), - procfile_lineword(ff, l, 1)))) break; - } - - // -------------------------------------------------------------------- - - if(pswpin || pswpout || do_swapio == CONFIG_BOOLEAN_YES) { - do_swapio = CONFIG_BOOLEAN_YES; - - static RRDSET *st_swapio = NULL; - static RRDDIM *rd_in = NULL, *rd_out = NULL; - - if(unlikely(!st_swapio)) { - st_swapio = rrdset_create_localhost( - "system" - , "swapio" - , NULL - , "swap" - , NULL - , "Swap I/O" - , "kilobytes/s" - , "proc" - , "vmstat" - , 250 - , update_every - , RRDSET_TYPE_AREA - ); - - rd_in = rrddim_add(st_swapio, "in", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL); - rd_out = rrddim_add(st_swapio, "out", NULL, -sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st_swapio); - - rrddim_set_by_pointer(st_swapio, rd_in, pswpin); - rrddim_set_by_pointer(st_swapio, rd_out, pswpout); - rrdset_done(st_swapio); - } - - // -------------------------------------------------------------------- - - if(do_io) { - static RRDSET *st_io = NULL; - static RRDDIM *rd_in = NULL, *rd_out = NULL; - - if(unlikely(!st_io)) { - st_io = rrdset_create_localhost( - "system" - , "pgpgio" - , NULL - , "disk" - , NULL - , "Memory Paged from/to disk" - , "kilobytes/s" - , "proc" - , "vmstat" - , 151 - , update_every - , RRDSET_TYPE_AREA - ); - - rd_in = rrddim_add(st_io, "in", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_out = rrddim_add(st_io, "out", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st_io); - - rrddim_set_by_pointer(st_io, rd_in, pgpgin); - rrddim_set_by_pointer(st_io, rd_out, pgpgout); - rrdset_done(st_io); - } - - // -------------------------------------------------------------------- - - if(do_pgfaults) { - static RRDSET *st_pgfaults = NULL; - static RRDDIM *rd_minor = NULL, *rd_major = NULL; - - if(unlikely(!st_pgfaults)) { - st_pgfaults = rrdset_create_localhost( - "mem" - , "pgfaults" - , NULL - , "system" - , NULL - , "Memory Page Faults" - , "page faults/s" - , "proc" - , "vmstat" - , NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st_pgfaults, RRDSET_FLAG_DETAIL); - - rd_minor = rrddim_add(st_pgfaults, "minor", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_major = rrddim_add(st_pgfaults, "major", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st_pgfaults); - - rrddim_set_by_pointer(st_pgfaults, rd_minor, pgfault); - rrddim_set_by_pointer(st_pgfaults, rd_major, pgmajfault); - rrdset_done(st_pgfaults); - } - - // -------------------------------------------------------------------- - - // Ondemand criteria for NUMA. Since this won't change at run time, we - // check it only once. We check whether the node count is >= 2 because - // single-node systems have uninteresting statistics (since all accesses - // are local). - if(unlikely(has_numa == -1)) - - has_numa = (numa_local || numa_foreign || numa_interleave || numa_other || numa_pte_updates || - numa_huge_pte_updates || numa_hint_faults || numa_hint_faults_local || numa_pages_migrated) ? 1 : 0; - - if(do_numa == CONFIG_BOOLEAN_YES || (do_numa == CONFIG_BOOLEAN_AUTO && has_numa)) { - do_numa = CONFIG_BOOLEAN_YES; - - static RRDSET *st_numa = NULL; - static RRDDIM *rd_local = NULL, *rd_foreign = NULL, *rd_interleave = NULL, *rd_other = NULL, *rd_pte_updates = NULL, *rd_huge_pte_updates = NULL, *rd_hint_faults = NULL, *rd_hint_faults_local = NULL, *rd_pages_migrated = NULL; - - if(unlikely(!st_numa)) { - st_numa = rrdset_create_localhost( - "mem" - , "numa" - , NULL - , "numa" - , NULL - , "NUMA events" - , "events/s" - , "proc" - , "vmstat" - , NETDATA_CHART_PRIO_MEM_NUMA - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st_numa, RRDSET_FLAG_DETAIL); - - // These depend on CONFIG_NUMA in the kernel. - rd_local = rrddim_add(st_numa, "local", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_foreign = rrddim_add(st_numa, "foreign", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_interleave = rrddim_add(st_numa, "interleave", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_other = rrddim_add(st_numa, "other", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - // The following stats depend on CONFIG_NUMA_BALANCING in the - // kernel. - rd_pte_updates = rrddim_add(st_numa, "pte_updates", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_huge_pte_updates = rrddim_add(st_numa, "huge_pte_updates", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_hint_faults = rrddim_add(st_numa, "hint_faults", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_hint_faults_local = rrddim_add(st_numa, "hint_faults_local", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_pages_migrated = rrddim_add(st_numa, "pages_migrated", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st_numa); - - rrddim_set_by_pointer(st_numa, rd_local, numa_local); - rrddim_set_by_pointer(st_numa, rd_foreign, numa_foreign); - rrddim_set_by_pointer(st_numa, rd_interleave, numa_interleave); - rrddim_set_by_pointer(st_numa, rd_other, numa_other); - - rrddim_set_by_pointer(st_numa, rd_pte_updates, numa_pte_updates); - rrddim_set_by_pointer(st_numa, rd_huge_pte_updates, numa_huge_pte_updates); - rrddim_set_by_pointer(st_numa, rd_hint_faults, numa_hint_faults); - rrddim_set_by_pointer(st_numa, rd_hint_faults_local, numa_hint_faults_local); - rrddim_set_by_pointer(st_numa, rd_pages_migrated, numa_pages_migrated); - - rrdset_done(st_numa); - } - - return 0; -} - diff --git a/src/procfile.c b/src/procfile.c deleted file mode 100644 index 044f975b5..000000000 --- a/src/procfile.c +++ /dev/null @@ -1,468 +0,0 @@ -#include "common.h" -#include "procfile.h" - -#define PF_PREFIX "PROCFILE" - -#define PFWORDS_INCREASE_STEP 200 -#define PFLINES_INCREASE_STEP 10 -#define PROCFILE_INCREMENT_BUFFER 512 - -int procfile_adaptive_initial_allocation = 0; - -// if adaptive allocation is set, these store the -// max values we have seen so far -size_t procfile_max_lines = PFLINES_INCREASE_STEP; -size_t procfile_max_words = PFWORDS_INCREASE_STEP; -size_t procfile_max_allocation = PROCFILE_INCREMENT_BUFFER; - - -// ---------------------------------------------------------------------------- - -char *procfile_filename(procfile *ff) { - if(ff->filename[0]) return ff->filename; - - char buffer[FILENAME_MAX + 1]; - snprintfz(buffer, FILENAME_MAX, "/proc/self/fd/%d", ff->fd); - - ssize_t l = readlink(buffer, ff->filename, FILENAME_MAX); - if(unlikely(l == -1)) - snprintfz(ff->filename, FILENAME_MAX, "unknown filename for fd %d", ff->fd); - else - ff->filename[l] = '\0'; - - // on non-linux systems, something like this will be needed - // fcntl(ff->fd, F_GETPATH, ff->filename) - - return ff->filename; -} - -// ---------------------------------------------------------------------------- -// An array of words - -static inline void pfwords_add(procfile *ff, char *str) { - // debug(D_PROCFILE, PF_PREFIX ": adding word No %d: '%s'", fw->len, str); - - pfwords *fw = ff->words; - if(unlikely(fw->len == fw->size)) { - // debug(D_PROCFILE, PF_PREFIX ": expanding words"); - - ff->words = fw = reallocz(fw, sizeof(pfwords) + (fw->size + PFWORDS_INCREASE_STEP) * sizeof(char *)); - fw->size += PFWORDS_INCREASE_STEP; - } - - fw->words[fw->len++] = str; -} - -NEVERNULL -static inline pfwords *pfwords_new(void) { - // debug(D_PROCFILE, PF_PREFIX ": initializing words"); - - size_t size = (procfile_adaptive_initial_allocation) ? procfile_max_words : PFWORDS_INCREASE_STEP; - - pfwords *new = mallocz(sizeof(pfwords) + size * sizeof(char *)); - new->len = 0; - new->size = size; - return new; -} - -static inline void pfwords_reset(pfwords *fw) { - // debug(D_PROCFILE, PF_PREFIX ": reseting words"); - fw->len = 0; -} - -static inline void pfwords_free(pfwords *fw) { - // debug(D_PROCFILE, PF_PREFIX ": freeing words"); - - freez(fw); -} - - -// ---------------------------------------------------------------------------- -// An array of lines - -NEVERNULL -static inline size_t *pflines_add(procfile *ff) { - // debug(D_PROCFILE, PF_PREFIX ": adding line %d at word %d", fl->len, first_word); - - pflines *fl = ff->lines; - if(unlikely(fl->len == fl->size)) { - // debug(D_PROCFILE, PF_PREFIX ": expanding lines"); - - ff->lines = fl = reallocz(fl, sizeof(pflines) + (fl->size + PFLINES_INCREASE_STEP) * sizeof(ffline)); - fl->size += PFLINES_INCREASE_STEP; - } - - ffline *ffl = &fl->lines[fl->len++]; - ffl->words = 0; - ffl->first = ff->words->len; - - return &ffl->words; -} - -NEVERNULL -static inline pflines *pflines_new(void) { - // debug(D_PROCFILE, PF_PREFIX ": initializing lines"); - - size_t size = (unlikely(procfile_adaptive_initial_allocation)) ? procfile_max_words : PFLINES_INCREASE_STEP; - - pflines *new = mallocz(sizeof(pflines) + size * sizeof(ffline)); - new->len = 0; - new->size = size; - return new; -} - -static inline void pflines_reset(pflines *fl) { - // debug(D_PROCFILE, PF_PREFIX ": reseting lines"); - - fl->len = 0; -} - -static inline void pflines_free(pflines *fl) { - // debug(D_PROCFILE, PF_PREFIX ": freeing lines"); - - freez(fl); -} - - -// ---------------------------------------------------------------------------- -// The procfile - -void procfile_close(procfile *ff) { - if(unlikely(!ff)) return; - - debug(D_PROCFILE, PF_PREFIX ": Closing file '%s'", procfile_filename(ff)); - - if(likely(ff->lines)) pflines_free(ff->lines); - if(likely(ff->words)) pfwords_free(ff->words); - - if(likely(ff->fd != -1)) close(ff->fd); - freez(ff); -} - -NOINLINE -static void procfile_parser(procfile *ff) { - // debug(D_PROCFILE, PF_PREFIX ": Parsing file '%s'", ff->filename); - - char *s = ff->data // our current position - , *e = &ff->data[ff->len] // the terminating null - , *t = ff->data; // the first character of a word (or quoted / parenthesized string) - - // the look up array to find our type of character - PF_CHAR_TYPE *separators = ff->separators; - - char quote = 0; // the quote character - only when in quoted string - size_t opened = 0; // counts the number of open parenthesis - - size_t *line_words = pflines_add(ff); - - while(s < e) { - PF_CHAR_TYPE ct = separators[(unsigned char)(*s)]; - - // this is faster than a switch() - // read more here: http://lazarenko.me/switch/ - if(likely(ct == PF_CHAR_IS_WORD)) { - s++; - } - else if(likely(ct == PF_CHAR_IS_SEPARATOR)) { - if(!quote && !opened) { - if (s != t) { - // separator, but we have word before it - *s = '\0'; - pfwords_add(ff, t); - (*line_words)++; - t = ++s; - } - else { - // separator at the beginning - // skip it - t = ++s; - } - } - else { - // we are inside a quote or parenthesized string - s++; - } - } - else if(likely(ct == PF_CHAR_IS_NEWLINE)) { - // end of line - - *s = '\0'; - pfwords_add(ff, t); - (*line_words)++; - t = ++s; - - // debug(D_PROCFILE, PF_PREFIX ": ended line %d with %d words", l, ff->lines->lines[l].words); - - line_words = pflines_add(ff); - } - else if(likely(ct == PF_CHAR_IS_QUOTE)) { - if(unlikely(!quote && s == t)) { - // quote opened at the beginning - quote = *s; - t = ++s; - } - else if(unlikely(quote && quote == *s)) { - // quote closed - quote = 0; - - *s = '\0'; - pfwords_add(ff, t); - (*line_words)++; - t = ++s; - } - else - s++; - } - else if(likely(ct == PF_CHAR_IS_OPEN)) { - if(s == t) { - opened++; - t = ++s; - } - else if(opened) { - opened++; - s++; - } - else - s++; - } - else if(likely(ct == PF_CHAR_IS_CLOSE)) { - if(opened) { - opened--; - - if(!opened) { - *s = '\0'; - pfwords_add(ff, t); - (*line_words)++; - t = ++s; - } - else - s++; - } - else - s++; - } - else - fatal("Internal Error: procfile_readall() does not handle all the cases."); - } - - if(likely(s > t && t < e)) { - // the last word - if(unlikely(ff->len >= ff->size)) { - // we are going to loose the last byte - s = &ff->data[ff->size - 1]; - } - - *s = '\0'; - pfwords_add(ff, t); - (*line_words)++; - // t = ++s; - } -} - -procfile *procfile_readall(procfile *ff) { - // debug(D_PROCFILE, PF_PREFIX ": Reading file '%s'.", ff->filename); - - ff->len = 0; // zero the used size - ssize_t r = 1; // read at least once - while(r > 0) { - ssize_t s = ff->len; - ssize_t x = ff->size - s; - - if(unlikely(!x)) { - debug(D_PROCFILE, PF_PREFIX ": Expanding data buffer for file '%s'.", procfile_filename(ff)); - ff = reallocz(ff, sizeof(procfile) + ff->size + PROCFILE_INCREMENT_BUFFER); - ff->size += PROCFILE_INCREMENT_BUFFER; - } - - debug(D_PROCFILE, "Reading file '%s', from position %zd with length %zd", procfile_filename(ff), s, (ssize_t)(ff->size - s)); - r = read(ff->fd, &ff->data[s], ff->size - s); - if(unlikely(r == -1)) { - if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot read from file '%s' on fd %d", procfile_filename(ff), ff->fd); - procfile_close(ff); - return NULL; - } - - ff->len += r; - } - - // debug(D_PROCFILE, "Rewinding file '%s'", ff->filename); - if(unlikely(lseek(ff->fd, 0, SEEK_SET) == -1)) { - if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot rewind on file '%s'.", procfile_filename(ff)); - procfile_close(ff); - return NULL; - } - - pflines_reset(ff->lines); - pfwords_reset(ff->words); - procfile_parser(ff); - - if(unlikely(procfile_adaptive_initial_allocation)) { - if(unlikely(ff->len > procfile_max_allocation)) procfile_max_allocation = ff->len; - if(unlikely(ff->lines->len > procfile_max_lines)) procfile_max_lines = ff->lines->len; - if(unlikely(ff->words->len > procfile_max_words)) procfile_max_words = ff->words->len; - } - - // debug(D_PROCFILE, "File '%s' updated.", ff->filename); - return ff; -} - -NOINLINE -static void procfile_set_separators(procfile *ff, const char *separators) { - static PF_CHAR_TYPE def[256]; - static char initilized = 0; - - if(unlikely(!initilized)) { - // this is thread safe - // if initialized is zero, multiple threads may be executing - // this code at the same time, setting in def[] the exact same values - int i = 256; - while(i--) { - if(unlikely(i == '\n' || i == '\r')) - def[i] = PF_CHAR_IS_NEWLINE; - - else if(unlikely(isspace(i) || !isprint(i))) - def[i] = PF_CHAR_IS_SEPARATOR; - - else - def[i] = PF_CHAR_IS_WORD; - } - - initilized = 1; - } - - // copy the default - PF_CHAR_TYPE *ffs = ff->separators, *ffd = def, *ffe = &def[256]; - while(ffd != ffe) - *ffs++ = *ffd++; - - // set the separators - if(unlikely(!separators)) - separators = " \t=|"; - - ffs = ff->separators; - const char *s = separators; - while(*s) - ffs[(int)*s++] = PF_CHAR_IS_SEPARATOR; -} - -void procfile_set_quotes(procfile *ff, const char *quotes) { - PF_CHAR_TYPE *ffs = ff->separators; - - // remove all quotes - int i = 256; - while(i--) - if(unlikely(ffs[i] == PF_CHAR_IS_QUOTE)) - ffs[i] = PF_CHAR_IS_WORD; - - // if nothing given, return - if(unlikely(!quotes || !*quotes)) - return; - - // set the quotes - const char *s = quotes; - while(*s) - ffs[(int)*s++] = PF_CHAR_IS_QUOTE; -} - -void procfile_set_open_close(procfile *ff, const char *open, const char *close) { - PF_CHAR_TYPE *ffs = ff->separators; - - // remove all open/close - int i = 256; - while(i--) - if(unlikely(ffs[i] == PF_CHAR_IS_OPEN || ffs[i] == PF_CHAR_IS_CLOSE)) - ffs[i] = PF_CHAR_IS_WORD; - - // if nothing given, return - if(unlikely(!open || !*open || !close || !*close)) - return; - - // set the openings - const char *s = open; - while(*s) - ffs[(int)*s++] = PF_CHAR_IS_OPEN; - - // set the closings - s = close; - while(*s) - ffs[(int)*s++] = PF_CHAR_IS_CLOSE; -} - -procfile *procfile_open(const char *filename, const char *separators, uint32_t flags) { - debug(D_PROCFILE, PF_PREFIX ": Opening file '%s'", filename); - - int fd = open(filename, O_RDONLY, 0666); - if(unlikely(fd == -1)) { - if(unlikely(!(flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot open file '%s'", filename); - return NULL; - } - - // info("PROCFILE: opened '%s' on fd %d", filename, fd); - - size_t size = (unlikely(procfile_adaptive_initial_allocation)) ? procfile_max_allocation : PROCFILE_INCREMENT_BUFFER; - procfile *ff = mallocz(sizeof(procfile) + size); - - //strncpyz(ff->filename, filename, FILENAME_MAX); - ff->filename[0] = '\0'; - - ff->fd = fd; - ff->size = size; - ff->len = 0; - ff->flags = flags; - - ff->lines = pflines_new(); - ff->words = pfwords_new(); - - procfile_set_separators(ff, separators); - - debug(D_PROCFILE, "File '%s' opened.", filename); - return ff; -} - -procfile *procfile_reopen(procfile *ff, const char *filename, const char *separators, uint32_t flags) { - if(unlikely(!ff)) return procfile_open(filename, separators, flags); - - if(likely(ff->fd != -1)) { - // info("PROCFILE: closing fd %d", ff->fd); - close(ff->fd); - } - - ff->fd = open(filename, O_RDONLY, 0666); - if(unlikely(ff->fd == -1)) { - procfile_close(ff); - return NULL; - } - - // info("PROCFILE: opened '%s' on fd %d", filename, ff->fd); - - //strncpyz(ff->filename, filename, FILENAME_MAX); - ff->filename[0] = '\0'; - ff->flags = flags; - - // do not do the separators again if NULL is given - if(likely(separators)) procfile_set_separators(ff, separators); - - return ff; -} - -// ---------------------------------------------------------------------------- -// example parsing of procfile data - -void procfile_print(procfile *ff) { - size_t lines = procfile_lines(ff), l; - char *s; - - debug(D_PROCFILE, "File '%s' with %zu lines and %zu words", procfile_filename(ff), ff->lines->len, ff->words->len); - - for(l = 0; likely(l < lines) ;l++) { - size_t words = procfile_linewords(ff, l); - - debug(D_PROCFILE, " line %zu starts at word %zu and has %zu words", l, ff->lines->lines[l].first, ff->lines->lines[l].words); - - size_t w; - for(w = 0; likely(w < words) ;w++) { - s = procfile_lineword(ff, l, w); - debug(D_PROCFILE, " [%zu.%zu] '%s'", l, w, s); - } - } -} diff --git a/src/procfile.h b/src/procfile.h deleted file mode 100644 index 012c6efe1..000000000 --- a/src/procfile.h +++ /dev/null @@ -1,124 +0,0 @@ -/* - * procfile is a library for reading kernel files from /proc - * - * The idea is this: - * - * - every file is opened once with procfile_open(). - * - * - to read updated contents, we rewind it (lseek() to 0) and read again - * with procfile_readall(). - * - * - for every file, we use a buffer that is adjusted to fit its entire - * contents in memory, allowing us to read it with a single read() call. - * (this provides atomicity / consistency on the data read from the kernel) - * - * - once the data are read, we update two arrays of pointers: - * - a words array, pointing to each word in the data read - * - a lines array, pointing to the first word for each line - * - * This is highly optimized. Both arrays are automatically adjusted to - * fit all contents and are updated in a single pass on the data: - * - a raspberry Pi can process 5.000+ files / sec. - * - a J1900 celeron processor can process 23.000+ files / sec. -*/ - - -#ifndef NETDATA_PROCFILE_H -#define NETDATA_PROCFILE_H 1 - -// ---------------------------------------------------------------------------- -// An array of words - -typedef struct { - size_t len; // used entries - size_t size; // capacity - char *words[]; // array of pointers -} pfwords; - - -// ---------------------------------------------------------------------------- -// An array of lines - -typedef struct { - size_t words; // how many words this line has - size_t first; // the id of the first word of this line - // in the words array -} ffline; - -typedef struct { - size_t len; // used entries - size_t size; // capacity - ffline lines[]; // array of lines -} pflines; - - -// ---------------------------------------------------------------------------- -// The procfile - -#define PROCFILE_FLAG_DEFAULT 0x00000000 -#define PROCFILE_FLAG_NO_ERROR_ON_FILE_IO 0x00000001 - -typedef enum procfile_separator { - PF_CHAR_IS_SEPARATOR, - PF_CHAR_IS_NEWLINE, - PF_CHAR_IS_WORD, - PF_CHAR_IS_QUOTE, - PF_CHAR_IS_OPEN, - PF_CHAR_IS_CLOSE -} PF_CHAR_TYPE; - -typedef struct { - char filename[FILENAME_MAX + 1]; // not populated until profile_filename() is called - - uint32_t flags; - int fd; // the file desriptor - size_t len; // the bytes we have placed into data - size_t size; // the bytes we have allocated for data - pflines *lines; - pfwords *words; - PF_CHAR_TYPE separators[256]; - char data[]; // allocated buffer to keep file contents -} procfile; - -// close the proc file and free all related memory -extern void procfile_close(procfile *ff); - -// (re)read and parse the proc file -extern procfile *procfile_readall(procfile *ff); - -// open a /proc or /sys file -extern procfile *procfile_open(const char *filename, const char *separators, uint32_t flags); - -// re-open a file -// if separators == NULL, the last separators are used -extern procfile *procfile_reopen(procfile *ff, const char *filename, const char *separators, uint32_t flags); - -// example walk-through a procfile parsed file -extern void procfile_print(procfile *ff); - -extern void procfile_set_quotes(procfile *ff, const char *quotes); -extern void procfile_set_open_close(procfile *ff, const char *open, const char *close); - -extern char *procfile_filename(procfile *ff); - -// ---------------------------------------------------------------------------- - -// set this to 1, to have procfile adapt its initial buffer allocation to the max allocation used so far -extern int procfile_adaptive_initial_allocation; - -// return the number of lines present -#define procfile_lines(ff) ((ff)->lines->len) - -// return the number of words of the Nth line -#define procfile_linewords(ff, line) (((line) < procfile_lines(ff)) ? (ff)->lines->lines[(line)].words : 0) - -// return the Nth word of the file, or empty string -#define procfile_word(ff, word) (((word) < (ff)->words->len) ? (ff)->words->words[(word)] : "") - -// return the first word of the Nth line, or empty string -#define procfile_line(ff, line) (((line) < procfile_lines(ff)) ? procfile_word((ff), (ff)->lines->lines[(line)].first) : "") - -// return the Nth word of the current line -#define procfile_lineword(ff, line, word) (((line) < procfile_lines(ff) && (word) < procfile_linewords((ff), (line))) ? procfile_word((ff), (ff)->lines->lines[(line)].first + (word)) : "") - -#endif /* NETDATA_PROCFILE_H */ diff --git a/src/registry.c b/src/registry.c deleted file mode 100644 index bbc2ef366..000000000 --- a/src/registry.c +++ /dev/null @@ -1,414 +0,0 @@ -#include "common.h" - -#include "registry_internals.h" - -#define REGISTRY_STATUS_OK "ok" -#define REGISTRY_STATUS_FAILED "failed" -#define REGISTRY_STATUS_DISABLED "disabled" - -// ---------------------------------------------------------------------------- -// REGISTRY concurrency locking - -static inline void registry_lock(void) { - netdata_mutex_lock(®istry.lock); -} - -static inline void registry_unlock(void) { - netdata_mutex_unlock(®istry.lock); -} - - -// ---------------------------------------------------------------------------- -// COOKIES - -static void registry_set_cookie(struct web_client *w, const char *guid) { - char edate[100]; - time_t et = now_realtime_sec() + registry.persons_expiration; - struct tm etmbuf, *etm = gmtime_r(&et, &etmbuf); - strftime(edate, sizeof(edate), "%a, %d %b %Y %H:%M:%S %Z", etm); - - snprintfz(w->cookie1, NETDATA_WEB_REQUEST_COOKIE_SIZE, NETDATA_REGISTRY_COOKIE_NAME "=%s; Expires=%s", guid, edate); - - if(registry.registry_domain && registry.registry_domain[0]) - snprintfz(w->cookie2, NETDATA_WEB_REQUEST_COOKIE_SIZE, NETDATA_REGISTRY_COOKIE_NAME "=%s; Domain=%s; Expires=%s", guid, registry.registry_domain, edate); -} - -static inline void registry_set_person_cookie(struct web_client *w, REGISTRY_PERSON *p) { - registry_set_cookie(w, p->guid); -} - - -// ---------------------------------------------------------------------------- -// JSON GENERATION - -static inline void registry_json_header(RRDHOST *host, struct web_client *w, const char *action, const char *status) { - buffer_flush(w->response.data); - w->response.data->contenttype = CT_APPLICATION_JSON; - buffer_sprintf(w->response.data, "{\n\t\"action\": \"%s\",\n\t\"status\": \"%s\",\n\t\"hostname\": \"%s\",\n\t\"machine_guid\": \"%s\"", - action, status, host->registry_hostname, host->machine_guid); -} - -static inline void registry_json_footer(struct web_client *w) { - buffer_strcat(w->response.data, "\n}\n"); -} - -static inline int registry_json_disabled(RRDHOST *host, struct web_client *w, const char *action) { - registry_json_header(host, w, action, REGISTRY_STATUS_DISABLED); - - buffer_sprintf(w->response.data, ",\n\t\"registry\": \"%s\"", - registry.registry_to_announce); - - registry_json_footer(w); - return 200; -} - - -// ---------------------------------------------------------------------------- -// CALLBACKS FOR WALKING THROUGH REGISTRY OBJECTS - -// structure used be the callbacks below -struct registry_json_walk_person_urls_callback { - REGISTRY_PERSON *p; - REGISTRY_MACHINE *m; - struct web_client *w; - int count; -}; - -// callback for rendering PERSON_URLs -static int registry_json_person_url_callback(void *entry, void *data) { - REGISTRY_PERSON_URL *pu = (REGISTRY_PERSON_URL *)entry; - struct registry_json_walk_person_urls_callback *c = (struct registry_json_walk_person_urls_callback *)data; - struct web_client *w = c->w; - - if(unlikely(c->count++)) - buffer_strcat(w->response.data, ","); - - buffer_sprintf(w->response.data, "\n\t\t[ \"%s\", \"%s\", %u000, %u, \"%s\" ]", - pu->machine->guid, pu->url->url, pu->last_t, pu->usages, pu->machine_name); - - return 0; -} - -// callback for rendering MACHINE_URLs -static int registry_json_machine_url_callback(void *entry, void *data) { - REGISTRY_MACHINE_URL *mu = (REGISTRY_MACHINE_URL *)entry; - struct registry_json_walk_person_urls_callback *c = (struct registry_json_walk_person_urls_callback *)data; - struct web_client *w = c->w; - REGISTRY_MACHINE *m = c->m; - - if(unlikely(c->count++)) - buffer_strcat(w->response.data, ","); - - buffer_sprintf(w->response.data, "\n\t\t[ \"%s\", \"%s\", %u000, %u ]", - m->guid, mu->url->url, mu->last_t, mu->usages); - - return 1; -} - -// ---------------------------------------------------------------------------- - -// structure used be the callbacks below -struct registry_person_url_callback_verify_machine_exists_data { - REGISTRY_MACHINE *m; - int count; -}; - -static inline int registry_person_url_callback_verify_machine_exists(void *entry, void *data) { - struct registry_person_url_callback_verify_machine_exists_data *d = (struct registry_person_url_callback_verify_machine_exists_data *)data; - REGISTRY_PERSON_URL *pu = (REGISTRY_PERSON_URL *)entry; - REGISTRY_MACHINE *m = d->m; - - if(pu->machine == m) - d->count++; - - return 0; -} - -// ---------------------------------------------------------------------------- -// public HELLO request - -int registry_request_hello_json(RRDHOST *host, struct web_client *w) { - registry_json_header(host, w, "hello", REGISTRY_STATUS_OK); - - buffer_sprintf(w->response.data, ",\n\t\"registry\": \"%s\"", - registry.registry_to_announce); - - registry_json_footer(w); - return 200; -} - -// ---------------------------------------------------------------------------- -//public ACCESS request - -#define REGISTRY_VERIFY_COOKIES_GUID "give-me-back-this-cookie-now--please" - -// the main method for registering an access -int registry_request_access_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *name, time_t when) { - if(unlikely(!registry.enabled)) - return registry_json_disabled(host, w, "access"); - - // ------------------------------------------------------------------------ - // verify the browser supports cookies - - if(registry.verify_cookies_redirects > 0 && !person_guid[0]) { - buffer_flush(w->response.data); - registry_set_cookie(w, REGISTRY_VERIFY_COOKIES_GUID); - w->response.data->contenttype = CT_APPLICATION_JSON; - buffer_sprintf(w->response.data, "{ \"status\": \"redirect\", \"registry\": \"%s\" }", registry.registry_to_announce); - return 200; - } - - if(unlikely(person_guid[0] && !strcmp(person_guid, REGISTRY_VERIFY_COOKIES_GUID))) - person_guid[0] = '\0'; - - // ------------------------------------------------------------------------ - - registry_lock(); - - REGISTRY_PERSON *p = registry_request_access(person_guid, machine_guid, url, name, when); - if(!p) { - registry_json_header(host, w, "access", REGISTRY_STATUS_FAILED); - registry_json_footer(w); - registry_unlock(); - return 412; - } - - // set the cookie - registry_set_person_cookie(w, p); - - // generate the response - registry_json_header(host, w, "access", REGISTRY_STATUS_OK); - - buffer_sprintf(w->response.data, ",\n\t\"person_guid\": \"%s\",\n\t\"urls\": [", p->guid); - struct registry_json_walk_person_urls_callback c = { p, NULL, w, 0 }; - avl_traverse(&p->person_urls, registry_json_person_url_callback, &c); - buffer_strcat(w->response.data, "\n\t]\n"); - - registry_json_footer(w); - registry_unlock(); - return 200; -} - -// ---------------------------------------------------------------------------- -// public DELETE request - -// the main method for deleting a URL from a person -int registry_request_delete_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *delete_url, time_t when) { - if(!registry.enabled) - return registry_json_disabled(host, w, "delete"); - - registry_lock(); - - REGISTRY_PERSON *p = registry_request_delete(person_guid, machine_guid, url, delete_url, when); - if(!p) { - registry_json_header(host, w, "delete", REGISTRY_STATUS_FAILED); - registry_json_footer(w); - registry_unlock(); - return 412; - } - - // generate the response - registry_json_header(host, w, "delete", REGISTRY_STATUS_OK); - registry_json_footer(w); - registry_unlock(); - return 200; -} - -// ---------------------------------------------------------------------------- -// public SEARCH request - -// the main method for searching the URLs of a netdata -int registry_request_search_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *request_machine, time_t when) { - if(!registry.enabled) - return registry_json_disabled(host, w, "search"); - - registry_lock(); - - REGISTRY_MACHINE *m = registry_request_machine(person_guid, machine_guid, url, request_machine, when); - if(!m) { - registry_json_header(host, w, "search", REGISTRY_STATUS_FAILED); - registry_json_footer(w); - registry_unlock(); - return 404; - } - - registry_json_header(host, w, "search", REGISTRY_STATUS_OK); - - buffer_strcat(w->response.data, ",\n\t\"urls\": ["); - struct registry_json_walk_person_urls_callback c = { NULL, m, w, 0 }; - dictionary_get_all(m->machine_urls, registry_json_machine_url_callback, &c); - buffer_strcat(w->response.data, "\n\t]\n"); - - registry_json_footer(w); - registry_unlock(); - return 200; -} - -// ---------------------------------------------------------------------------- -// SWITCH REQUEST - -// the main method for switching user identity -int registry_request_switch_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *new_person_guid, time_t when) { - if(!registry.enabled) - return registry_json_disabled(host, w, "switch"); - - (void)url; - (void)when; - - registry_lock(); - - REGISTRY_PERSON *op = registry_person_find(person_guid); - if(!op) { - registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED); - registry_json_footer(w); - registry_unlock(); - return 430; - } - - REGISTRY_PERSON *np = registry_person_find(new_person_guid); - if(!np) { - registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED); - registry_json_footer(w); - registry_unlock(); - return 431; - } - - REGISTRY_MACHINE *m = registry_machine_find(machine_guid); - if(!m) { - registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED); - registry_json_footer(w); - registry_unlock(); - return 432; - } - - struct registry_person_url_callback_verify_machine_exists_data data = { m, 0 }; - - // verify the old person has access to this machine - avl_traverse(&op->person_urls, registry_person_url_callback_verify_machine_exists, &data); - if(!data.count) { - registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED); - registry_json_footer(w); - registry_unlock(); - return 433; - } - - // verify the new person has access to this machine - data.count = 0; - avl_traverse(&np->person_urls, registry_person_url_callback_verify_machine_exists, &data); - if(!data.count) { - registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED); - registry_json_footer(w); - registry_unlock(); - return 434; - } - - // set the cookie of the new person - // the user just switched identity - registry_set_person_cookie(w, np); - - // generate the response - registry_json_header(host, w, "switch", REGISTRY_STATUS_OK); - buffer_sprintf(w->response.data, ",\n\t\"person_guid\": \"%s\"", np->guid); - registry_json_footer(w); - - registry_unlock(); - return 200; -} - -// ---------------------------------------------------------------------------- -// STATISTICS - -void registry_statistics(void) { - if(!registry.enabled) return; - - static RRDSET *sts = NULL, *stc = NULL, *stm = NULL; - - if(unlikely(!sts)) { - sts = rrdset_create_localhost( - "netdata" - , "registry_sessions" - , NULL - , "registry" - , NULL - , "NetData Registry Sessions" - , "session" - , "registry" - , "stats" - , 131000 - , localhost->rrd_update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(sts, "sessions", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(sts); - - rrddim_set(sts, "sessions", registry.usages_count); - rrdset_done(sts); - - // ------------------------------------------------------------------------ - - if(unlikely(!stc)) { - stc = rrdset_create_localhost( - "netdata" - , "registry_entries" - , NULL - , "registry" - , NULL - , "NetData Registry Entries" - , "entries" - , "registry" - , "stats" - , 131100 - , localhost->rrd_update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(stc, "persons", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(stc, "machines", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(stc, "urls", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(stc, "persons_urls", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(stc, "machines_urls", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(stc); - - rrddim_set(stc, "persons", registry.persons_count); - rrddim_set(stc, "machines", registry.machines_count); - rrddim_set(stc, "urls", registry.urls_count); - rrddim_set(stc, "persons_urls", registry.persons_urls_count); - rrddim_set(stc, "machines_urls", registry.machines_urls_count); - rrdset_done(stc); - - // ------------------------------------------------------------------------ - - if(unlikely(!stm)) { - stm = rrdset_create_localhost( - "netdata" - , "registry_mem" - , NULL - , "registry" - , NULL - , "NetData Registry Memory" - , "KB" - , "registry" - , "stats" - , 131300 - , localhost->rrd_update_every - , RRDSET_TYPE_STACKED - ); - - rrddim_add(stm, "persons", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(stm, "machines", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(stm, "urls", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(stm, "persons_urls", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(stm, "machines_urls", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(stm); - - rrddim_set(stm, "persons", registry.persons_memory + registry.persons_count * sizeof(NAME_VALUE) + sizeof(DICTIONARY)); - rrddim_set(stm, "machines", registry.machines_memory + registry.machines_count * sizeof(NAME_VALUE) + sizeof(DICTIONARY)); - rrddim_set(stm, "urls", registry.urls_memory); - rrddim_set(stm, "persons_urls", registry.persons_urls_memory); - rrddim_set(stm, "machines_urls", registry.machines_urls_memory + registry.machines_count * sizeof(DICTIONARY) + registry.machines_urls_count * sizeof(NAME_VALUE)); - rrdset_done(stm); -} diff --git a/src/registry.h b/src/registry.h deleted file mode 100644 index 9aa241564..000000000 --- a/src/registry.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - * netdata registry - * - * this header file describes the public interface - * to the netdata registry - * - * only these high level functions are exposed - * - */ - -// ---------------------------------------------------------------------------- -// TODO -// -// 1. the default tracking cookie expires in 1 year, but the persons are not -// removed from the db - this means the database only grows - ideally the -// database should be cleaned in registry_db_save() for both on-disk and -// on-memory entries. -// -// Cleanup: -// i. Find all the PERSONs that have expired cookie -// ii. For each of their PERSON_URLs: -// - decrement the linked MACHINE links -// - if the linked MACHINE has no other links, remove the linked MACHINE too -// - remove the PERSON_URL -// -// 2. add protection to prevent abusing the registry by flooding it with -// requests to fill the memory and crash it. -// -// Possible protections: -// - limit the number of URLs per person -// - limit the number of URLs per machine -// - limit the number of persons -// - limit the number of machines -// - [DONE] limit the size of URLs -// - [DONE] limit the size of PERSON_URL names -// - limit the number of requests that add data to the registry, -// per client IP per hour -// -// 3. lower memory requirements -// -// - embed avl structures directly into registry objects, instead of DICTIONARY -// [DONE for PERSON_URLs, PENDING for MACHINE_URLs] -// - store GUIDs in memory as UUID instead of char * -// - do not track persons using the demo machines only -// (i.e. start tracking them only when they access a non-demo machine) -// - [DONE] do not track custom dashboards by default - - -#ifndef NETDATA_REGISTRY_H -#define NETDATA_REGISTRY_H 1 - -#define NETDATA_REGISTRY_COOKIE_NAME "netdata_registry_id" - -// initialize the registry -// should only happen when netdata starts -extern int registry_init(void); - -// free all data held by the registry -// should only happen when netdata exits -extern void registry_free(void); - -// HTTP requests handled by the registry -extern int registry_request_access_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *name, time_t when); -extern int registry_request_delete_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *delete_url, time_t when); -extern int registry_request_search_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *request_machine, time_t when); -extern int registry_request_switch_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *new_person_guid, time_t when); -extern int registry_request_hello_json(RRDHOST *host, struct web_client *w); - -// update the registry monitoring charts -extern void registry_statistics(void); - -extern char *registry_get_this_machine_guid(void); -extern char *registry_get_this_machine_hostname(void); - -extern int regenerate_guid(const char *guid, char *result); - -#endif /* NETDATA_REGISTRY_H */ diff --git a/src/registry_db.c b/src/registry_db.c deleted file mode 100644 index de6c634c3..000000000 --- a/src/registry_db.c +++ /dev/null @@ -1,343 +0,0 @@ -#include "registry_internals.h" - -int registry_db_should_be_saved(void) { - debug(D_REGISTRY, "log entries %llu, max %llu", registry.log_count, registry.save_registry_every_entries); - return registry.log_count > registry.save_registry_every_entries; -} - -// ---------------------------------------------------------------------------- -// INTERNAL FUNCTIONS FOR SAVING REGISTRY OBJECTS - -static int registry_machine_save_url(void *entry, void *file) { - REGISTRY_MACHINE_URL *mu = entry; - FILE *fp = file; - - debug(D_REGISTRY, "Registry: registry_machine_save_url('%s')", mu->url->url); - - int ret = fprintf(fp, "V\t%08x\t%08x\t%08x\t%02x\t%s\n", - mu->first_t, - mu->last_t, - mu->usages, - mu->flags, - mu->url->url - ); - - // error handling is done at registry_db_save() - - return ret; -} - -static int registry_machine_save(void *entry, void *file) { - REGISTRY_MACHINE *m = entry; - FILE *fp = file; - - debug(D_REGISTRY, "Registry: registry_machine_save('%s')", m->guid); - - int ret = fprintf(fp, "M\t%08x\t%08x\t%08x\t%s\n", - m->first_t, - m->last_t, - m->usages, - m->guid - ); - - if(ret >= 0) { - int ret2 = dictionary_get_all(m->machine_urls, registry_machine_save_url, fp); - if(ret2 < 0) return ret2; - ret += ret2; - } - - // error handling is done at registry_db_save() - - return ret; -} - -static inline int registry_person_save_url(void *entry, void *file) { - REGISTRY_PERSON_URL *pu = entry; - FILE *fp = file; - - debug(D_REGISTRY, "Registry: registry_person_save_url('%s')", pu->url->url); - - int ret = fprintf(fp, "U\t%08x\t%08x\t%08x\t%02x\t%s\t%s\t%s\n", - pu->first_t, - pu->last_t, - pu->usages, - pu->flags, - pu->machine->guid, - pu->machine_name, - pu->url->url - ); - - // error handling is done at registry_db_save() - - return ret; -} - -static inline int registry_person_save(void *entry, void *file) { - REGISTRY_PERSON *p = entry; - FILE *fp = file; - - debug(D_REGISTRY, "Registry: registry_person_save('%s')", p->guid); - - int ret = fprintf(fp, "P\t%08x\t%08x\t%08x\t%s\n", - p->first_t, - p->last_t, - p->usages, - p->guid - ); - - if(ret >= 0) { - //int ret2 = dictionary_get_all(p->person_urls, registry_person_save_url, fp); - int ret2 = avl_traverse(&p->person_urls, registry_person_save_url, fp); - if (ret2 < 0) return ret2; - ret += ret2; - } - - // error handling is done at registry_db_save() - - return ret; -} - -// ---------------------------------------------------------------------------- -// SAVE THE REGISTRY DATABASE - -int registry_db_save(void) { - if(unlikely(!registry.enabled)) - return -1; - - if(unlikely(!registry_db_should_be_saved())) - return -2; - - error_log_limit_unlimited(); - - char tmp_filename[FILENAME_MAX + 1]; - char old_filename[FILENAME_MAX + 1]; - - snprintfz(old_filename, FILENAME_MAX, "%s.old", registry.db_filename); - snprintfz(tmp_filename, FILENAME_MAX, "%s.tmp", registry.db_filename); - - debug(D_REGISTRY, "Registry: Creating file '%s'", tmp_filename); - FILE *fp = fopen(tmp_filename, "w"); - if(!fp) { - error("Registry: Cannot create file: %s", tmp_filename); - error_log_limit_reset(); - return -1; - } - - // dictionary_get_all() has its own locking, so this is safe to do - - debug(D_REGISTRY, "Saving all machines"); - int bytes1 = dictionary_get_all(registry.machines, registry_machine_save, fp); - if(bytes1 < 0) { - error("Registry: Cannot save registry machines - return value %d", bytes1); - fclose(fp); - error_log_limit_reset(); - return bytes1; - } - debug(D_REGISTRY, "Registry: saving machines took %d bytes", bytes1); - - debug(D_REGISTRY, "Saving all persons"); - int bytes2 = dictionary_get_all(registry.persons, registry_person_save, fp); - if(bytes2 < 0) { - error("Registry: Cannot save registry persons - return value %d", bytes2); - fclose(fp); - error_log_limit_reset(); - return bytes2; - } - debug(D_REGISTRY, "Registry: saving persons took %d bytes", bytes2); - - // save the totals - fprintf(fp, "T\t%016llx\t%016llx\t%016llx\t%016llx\t%016llx\t%016llx\n", - registry.persons_count, - registry.machines_count, - registry.usages_count + 1, // this is required - it is lost on db rotation - registry.urls_count, - registry.persons_urls_count, - registry.machines_urls_count - ); - - fclose(fp); - - errno = 0; - - // remove the .old db - debug(D_REGISTRY, "Registry: Removing old db '%s'", old_filename); - if(unlink(old_filename) == -1 && errno != ENOENT) - error("Registry: cannot remove old registry file '%s'", old_filename); - - // rename the db to .old - debug(D_REGISTRY, "Registry: Link current db '%s' to .old: '%s'", registry.db_filename, old_filename); - if(link(registry.db_filename, old_filename) == -1 && errno != ENOENT) - error("Registry: cannot move file '%s' to '%s'. Saving registry DB failed!", registry.db_filename, old_filename); - - else { - // remove the database (it is saved in .old) - debug(D_REGISTRY, "Registry: removing db '%s'", registry.db_filename); - if (unlink(registry.db_filename) == -1 && errno != ENOENT) - error("Registry: cannot remove old registry file '%s'", registry.db_filename); - - // move the .tmp to make it active - debug(D_REGISTRY, "Registry: linking tmp db '%s' to active db '%s'", tmp_filename, registry.db_filename); - if (link(tmp_filename, registry.db_filename) == -1) { - error("Registry: cannot move file '%s' to '%s'. Saving registry DB failed!", tmp_filename, - registry.db_filename); - - // move the .old back - debug(D_REGISTRY, "Registry: linking old db '%s' to active db '%s'", old_filename, registry.db_filename); - if(link(old_filename, registry.db_filename) == -1) - error("Registry: cannot move file '%s' to '%s'. Recovering the old registry DB failed!", old_filename, registry.db_filename); - } - else { - debug(D_REGISTRY, "Registry: removing tmp db '%s'", tmp_filename); - if(unlink(tmp_filename) == -1) - error("Registry: cannot remove tmp registry file '%s'", tmp_filename); - - // it has been moved successfully - // discard the current registry log - registry_log_recreate(); - registry.log_count = 0; - } - } - - // continue operations - error_log_limit_reset(); - - return -1; -} - -// ---------------------------------------------------------------------------- -// LOAD THE REGISTRY DATABASE - -size_t registry_db_load(void) { - char *s, buf[4096 + 1]; - REGISTRY_PERSON *p = NULL; - REGISTRY_MACHINE *m = NULL; - REGISTRY_URL *u = NULL; - size_t line = 0; - - debug(D_REGISTRY, "Registry: loading active db from: '%s'", registry.db_filename); - FILE *fp = fopen(registry.db_filename, "r"); - if(!fp) { - error("Registry: cannot open registry file: '%s'", registry.db_filename); - return 0; - } - - size_t len = 0; - buf[4096] = '\0'; - while((s = fgets_trim_len(buf, 4096, fp, &len))) { - line++; - - debug(D_REGISTRY, "Registry: read line %zu to length %zu: %s", line, len, s); - switch(*s) { - case 'T': // totals - if(unlikely(len != 103 || s[1] != '\t' || s[18] != '\t' || s[35] != '\t' || s[52] != '\t' || s[69] != '\t' || s[86] != '\t' || s[103] != '\0')) { - error("Registry totals line %zu is wrong (len = %zu).", line, len); - continue; - } - registry.persons_count = strtoull(&s[2], NULL, 16); - registry.machines_count = strtoull(&s[19], NULL, 16); - registry.usages_count = strtoull(&s[36], NULL, 16); - registry.urls_count = strtoull(&s[53], NULL, 16); - registry.persons_urls_count = strtoull(&s[70], NULL, 16); - registry.machines_urls_count = strtoull(&s[87], NULL, 16); - break; - - case 'P': // person - m = NULL; - // verify it is valid - if(unlikely(len != 65 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[65] != '\0')) { - error("Registry person line %zu is wrong (len = %zu).", line, len); - continue; - } - - s[1] = s[10] = s[19] = s[28] = '\0'; - p = registry_person_allocate(&s[29], strtoul(&s[2], NULL, 16)); - p->last_t = (uint32_t)strtoul(&s[11], NULL, 16); - p->usages = (uint32_t)strtoul(&s[20], NULL, 16); - debug(D_REGISTRY, "Registry loaded person '%s', first: %u, last: %u, usages: %u", p->guid, p->first_t, p->last_t, p->usages); - break; - - case 'M': // machine - p = NULL; - // verify it is valid - if(unlikely(len != 65 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[65] != '\0')) { - error("Registry person line %zu is wrong (len = %zu).", line, len); - continue; - } - - s[1] = s[10] = s[19] = s[28] = '\0'; - m = registry_machine_allocate(&s[29], strtoul(&s[2], NULL, 16)); - m->last_t = (uint32_t)strtoul(&s[11], NULL, 16); - m->usages = (uint32_t)strtoul(&s[20], NULL, 16); - debug(D_REGISTRY, "Registry loaded machine '%s', first: %u, last: %u, usages: %u", m->guid, m->first_t, m->last_t, m->usages); - break; - - case 'U': // person URL - if(unlikely(!p)) { - error("Registry: ignoring line %zu, no person loaded: %s", line, s); - continue; - } - - // verify it is valid - if(len < 69 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[31] != '\t' || s[68] != '\t') { - error("Registry person URL line %zu is wrong (len = %zu).", line, len); - continue; - } - - s[1] = s[10] = s[19] = s[28] = s[31] = s[68] = '\0'; - - // skip the name to find the url - char *url = &s[69]; - while(*url && *url != '\t') url++; - if(!*url) { - error("Registry person URL line %zu does not have a url.", line); - continue; - } - *url++ = '\0'; - - // u = registry_url_allocate_nolock(url, strlen(url)); - u = registry_url_get(url, strlen(url)); - - time_t first_t = strtoul(&s[2], NULL, 16); - - m = registry_machine_find(&s[32]); - if(!m) m = registry_machine_allocate(&s[32], first_t); - - REGISTRY_PERSON_URL *pu = registry_person_url_allocate(p, m, u, &s[69], strlen(&s[69]), first_t); - pu->last_t = (uint32_t)strtoul(&s[11], NULL, 16); - pu->usages = (uint32_t)strtoul(&s[20], NULL, 16); - pu->flags = (uint8_t)strtoul(&s[29], NULL, 16); - debug(D_REGISTRY, "Registry loaded person URL '%s' with name '%s' of machine '%s', first: %u, last: %u, usages: %u, flags: %02x", u->url, pu->machine_name, m->guid, pu->first_t, pu->last_t, pu->usages, pu->flags); - break; - - case 'V': // machine URL - if(unlikely(!m)) { - error("Registry: ignoring line %zu, no machine loaded: %s", line, s); - continue; - } - - // verify it is valid - if(len < 32 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[31] != '\t') { - error("Registry person URL line %zu is wrong (len = %zu).", line, len); - continue; - } - - s[1] = s[10] = s[19] = s[28] = s[31] = '\0'; - // u = registry_url_allocate_nolock(&s[32], strlen(&s[32])); - u = registry_url_get(&s[32], strlen(&s[32])); - - REGISTRY_MACHINE_URL *mu = registry_machine_url_allocate(m, u, strtoul(&s[2], NULL, 16)); - mu->last_t = (uint32_t)strtoul(&s[11], NULL, 16); - mu->usages = (uint32_t)strtoul(&s[20], NULL, 16); - mu->flags = (uint8_t)strtoul(&s[29], NULL, 16); - debug(D_REGISTRY, "Registry loaded machine URL '%s', machine '%s', first: %u, last: %u, usages: %u, flags: %02x", u->url, m->guid, mu->first_t, mu->last_t, mu->usages, mu->flags); - break; - - default: - error("Registry: ignoring line %zu of filename '%s': %s.", line, registry.db_filename, s); - break; - } - } - fclose(fp); - - return line; -} diff --git a/src/registry_init.c b/src/registry_init.c deleted file mode 100644 index 654f66d12..000000000 --- a/src/registry_init.c +++ /dev/null @@ -1,143 +0,0 @@ -#include "registry_internals.h" - -int registry_init(void) { - char filename[FILENAME_MAX + 1]; - - // registry enabled? - if(web_server_mode != WEB_SERVER_MODE_NONE) { - registry.enabled = config_get_boolean(CONFIG_SECTION_REGISTRY, "enabled", 0); - } - else { - info("Registry is disabled - use the central netdata"); - config_set_boolean(CONFIG_SECTION_REGISTRY, "enabled", 0); - registry.enabled = 0; - } - - // pathnames - snprintfz(filename, FILENAME_MAX, "%s/registry", netdata_configured_varlib_dir); - registry.pathname = config_get(CONFIG_SECTION_REGISTRY, "registry db directory", filename); - if(mkdir(registry.pathname, 0770) == -1 && errno != EEXIST) - fatal("Cannot create directory '%s'.", registry.pathname); - - // filenames - snprintfz(filename, FILENAME_MAX, "%s/netdata.public.unique.id", registry.pathname); - registry.machine_guid_filename = config_get(CONFIG_SECTION_REGISTRY, "netdata unique id file", filename); - - snprintfz(filename, FILENAME_MAX, "%s/registry.db", registry.pathname); - registry.db_filename = config_get(CONFIG_SECTION_REGISTRY, "registry db file", filename); - - snprintfz(filename, FILENAME_MAX, "%s/registry-log.db", registry.pathname); - registry.log_filename = config_get(CONFIG_SECTION_REGISTRY, "registry log file", filename); - - // configuration options - registry.save_registry_every_entries = (unsigned long long)config_get_number(CONFIG_SECTION_REGISTRY, "registry save db every new entries", 1000000); - registry.persons_expiration = config_get_number(CONFIG_SECTION_REGISTRY, "registry expire idle persons days", 365) * 86400; - registry.registry_domain = config_get(CONFIG_SECTION_REGISTRY, "registry domain", ""); - registry.registry_to_announce = config_get(CONFIG_SECTION_REGISTRY, "registry to announce", "https://registry.my-netdata.io"); - registry.hostname = config_get(CONFIG_SECTION_REGISTRY, "registry hostname", netdata_configured_hostname); - registry.verify_cookies_redirects = config_get_boolean(CONFIG_SECTION_REGISTRY, "verify browser cookies support", 1); - - setenv("NETDATA_REGISTRY_HOSTNAME", registry.hostname, 1); - setenv("NETDATA_REGISTRY_URL", registry.registry_to_announce, 1); - - registry.max_url_length = (size_t)config_get_number(CONFIG_SECTION_REGISTRY, "max URL length", 1024); - if(registry.max_url_length < 10) { - registry.max_url_length = 10; - config_set_number(CONFIG_SECTION_REGISTRY, "max URL length", (long long)registry.max_url_length); - } - - registry.max_name_length = (size_t)config_get_number(CONFIG_SECTION_REGISTRY, "max URL name length", 50); - if(registry.max_name_length < 10) { - registry.max_name_length = 10; - config_set_number(CONFIG_SECTION_REGISTRY, "max URL name length", (long long)registry.max_name_length); - } - - // initialize entries counters - registry.persons_count = 0; - registry.machines_count = 0; - registry.usages_count = 0; - registry.urls_count = 0; - registry.persons_urls_count = 0; - registry.machines_urls_count = 0; - - // initialize memory counters - registry.persons_memory = 0; - registry.machines_memory = 0; - registry.urls_memory = 0; - registry.persons_urls_memory = 0; - registry.machines_urls_memory = 0; - - // initialize locks - netdata_mutex_init(®istry.lock); - - // create dictionaries - registry.persons = dictionary_create(DICTIONARY_FLAGS); - registry.machines = dictionary_create(DICTIONARY_FLAGS); - avl_init(®istry.registry_urls_root_index, registry_url_compare); - - // load the registry database - if(registry.enabled) { - registry_log_open(); - registry_db_load(); - registry_log_load(); - - if(unlikely(registry_db_should_be_saved())) - registry_db_save(); - } - - return 0; -} - -void registry_free(void) { - if(!registry.enabled) return; - - // we need to destroy the dictionaries ourselves - // since the dictionaries use memory we allocated - - while(registry.persons->values_index.root) { - REGISTRY_PERSON *p = ((NAME_VALUE *)registry.persons->values_index.root)->value; - registry_person_del(p); - } - - while(registry.machines->values_index.root) { - REGISTRY_MACHINE *m = ((NAME_VALUE *)registry.machines->values_index.root)->value; - - // fprintf(stderr, "\nMACHINE: '%s', first: %u, last: %u, usages: %u\n", m->guid, m->first_t, m->last_t, m->usages); - - while(m->machine_urls->values_index.root) { - REGISTRY_MACHINE_URL *mu = ((NAME_VALUE *)m->machine_urls->values_index.root)->value; - - // fprintf(stderr, "\tURL: '%s', first: %u, last: %u, usages: %u, flags: 0x%02x\n", mu->url->url, mu->first_t, mu->last_t, mu->usages, mu->flags); - - //debug(D_REGISTRY, "Registry: destroying persons dictionary from url '%s'", mu->url->url); - //dictionary_destroy(mu->persons); - - debug(D_REGISTRY, "Registry: deleting url '%s' from person '%s'", mu->url->url, m->guid); - dictionary_del(m->machine_urls, mu->url->url); - - debug(D_REGISTRY, "Registry: unlinking url '%s' from machine", mu->url->url); - registry_url_unlink(mu->url); - - debug(D_REGISTRY, "Registry: freeing machine url"); - freez(mu); - } - - debug(D_REGISTRY, "Registry: deleting machine '%s' from machines registry", m->guid); - dictionary_del(registry.machines, m->guid); - - debug(D_REGISTRY, "Registry: destroying URL dictionary of machine '%s'", m->guid); - dictionary_destroy(m->machine_urls); - - debug(D_REGISTRY, "Registry: freeing machine '%s'", m->guid); - freez(m); - } - - // and free the memory of remaining dictionary structures - - debug(D_REGISTRY, "Registry: destroying persons dictionary"); - dictionary_destroy(registry.persons); - - debug(D_REGISTRY, "Registry: destroying machines dictionary"); - dictionary_destroy(registry.machines); -} - diff --git a/src/registry_internals.c b/src/registry_internals.c deleted file mode 100644 index 44b0a1513..000000000 --- a/src/registry_internals.c +++ /dev/null @@ -1,322 +0,0 @@ -#include "registry_internals.h" - -struct registry registry; - -// ---------------------------------------------------------------------------- -// common functions - -// parse a GUID and re-generated to be always lower case -// this is used as a protection against the variations of GUIDs -int regenerate_guid(const char *guid, char *result) { - uuid_t uuid; - if(unlikely(uuid_parse(guid, uuid) == -1)) { - info("Registry: GUID '%s' is not a valid GUID.", guid); - return -1; - } - else { - uuid_unparse_lower(uuid, result); - -#ifdef NETDATA_INTERNAL_CHECKS - if(strcmp(guid, result) != 0) - info("GUID '%s' and re-generated GUID '%s' differ!", guid, result); -#endif /* NETDATA_INTERNAL_CHECKS */ - } - - return 0; -} - -// make sure the names of the machines / URLs do not contain any tabs -// (which are used as our separator in the database files) -// and are properly trimmed (before and after) -static inline char *registry_fix_machine_name(char *name, size_t *len) { - char *s = name?name:""; - - // skip leading spaces - while(*s && isspace(*s)) s++; - - // make sure all spaces are a SPACE - char *t = s; - while(*t) { - if(unlikely(isspace(*t))) - *t = ' '; - - t++; - } - - // remove trailing spaces - while(--t >= s) { - if(*t == ' ') - *t = '\0'; - else - break; - } - t++; - - if(likely(len)) - *len = (t - s); - - return s; -} - -static inline char *registry_fix_url(char *url, size_t *len) { - size_t l = 0; - char *s = registry_fix_machine_name(url, &l); - - // protection from too big URLs - if(l > registry.max_url_length) { - l = registry.max_url_length; - s[l] = '\0'; - } - - if(len) *len = l; - return s; -} - - -// ---------------------------------------------------------------------------- -// HELPERS - -// verify the person, the machine and the URL exist in our DB -REGISTRY_PERSON_URL *registry_verify_request(char *person_guid, char *machine_guid, char *url, REGISTRY_PERSON **pp, REGISTRY_MACHINE **mm) { - char pbuf[GUID_LEN + 1], mbuf[GUID_LEN + 1]; - - if(!person_guid || !*person_guid || !machine_guid || !*machine_guid || !url || !*url) { - info("Registry Request Verification: invalid request! person: '%s', machine '%s', url '%s'", person_guid?person_guid:"UNSET", machine_guid?machine_guid:"UNSET", url?url:"UNSET"); - return NULL; - } - - // normalize the url - url = registry_fix_url(url, NULL); - - // make sure the person GUID is valid - if(regenerate_guid(person_guid, pbuf) == -1) { - info("Registry Request Verification: invalid person GUID, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url); - return NULL; - } - person_guid = pbuf; - - // make sure the machine GUID is valid - if(regenerate_guid(machine_guid, mbuf) == -1) { - info("Registry Request Verification: invalid machine GUID, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url); - return NULL; - } - machine_guid = mbuf; - - // make sure the machine exists - REGISTRY_MACHINE *m = registry_machine_find(machine_guid); - if(!m) { - info("Registry Request Verification: machine not found, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url); - return NULL; - } - if(mm) *mm = m; - - // make sure the person exist - REGISTRY_PERSON *p = registry_person_find(person_guid); - if(!p) { - info("Registry Request Verification: person not found, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url); - return NULL; - } - if(pp) *pp = p; - - REGISTRY_PERSON_URL *pu = registry_person_url_index_find(p, url); - if(!pu) { - info("Registry Request Verification: URL not found for person, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url); - return NULL; - } - return pu; -} - - -// ---------------------------------------------------------------------------- -// REGISTRY REQUESTS - -REGISTRY_PERSON *registry_request_access(char *person_guid, char *machine_guid, char *url, char *name, time_t when) { - debug(D_REGISTRY, "registry_request_access('%s', '%s', '%s'): NEW REQUEST", (person_guid)?person_guid:"", machine_guid, url); - - REGISTRY_MACHINE *m = registry_machine_get(machine_guid, when); - if(!m) return NULL; - - // make sure the name is valid - size_t namelen; - name = registry_fix_machine_name(name, &namelen); - - size_t urllen; - url = registry_fix_url(url, &urllen); - - REGISTRY_PERSON *p = registry_person_get(person_guid, when); - - REGISTRY_URL *u = registry_url_get(url, urllen); - registry_person_link_to_url(p, m, u, name, namelen, when); - registry_machine_link_to_url(m, u, when); - - registry_log('A', p, m, u, name); - - registry.usages_count++; - - return p; -} - -REGISTRY_PERSON *registry_request_delete(char *person_guid, char *machine_guid, char *url, char *delete_url, time_t when) { - (void) when; - - REGISTRY_PERSON *p = NULL; - REGISTRY_MACHINE *m = NULL; - REGISTRY_PERSON_URL *pu = registry_verify_request(person_guid, machine_guid, url, &p, &m); - if(!pu || !p || !m) return NULL; - - // normalize the url - delete_url = registry_fix_url(delete_url, NULL); - - // make sure the user is not deleting the url it uses - if(!strcmp(delete_url, pu->url->url)) { - info("Registry Delete Request: delete URL is the one currently accessed, person: '%s', machine '%s', url '%s', delete url '%s'" - , p->guid, m->guid, pu->url->url, delete_url); - return NULL; - } - - REGISTRY_PERSON_URL *dpu = registry_person_url_index_find(p, delete_url); - if(!dpu) { - info("Registry Delete Request: URL not found for person: '%s', machine '%s', url '%s', delete url '%s'", p->guid - , m->guid, pu->url->url, delete_url); - return NULL; - } - - registry_log('D', p, m, pu->url, dpu->url->url); - registry_person_unlink_from_url(p, dpu); - - return p; -} - - -// a structure to pass to the dictionary_get_all() callback handler -struct machine_request_callback_data { - REGISTRY_MACHINE *find_this_machine; - REGISTRY_PERSON_URL *result; -}; - -// the callback function -// this will be run for every PERSON_URL of this PERSON -static int machine_request_callback(void *entry, void *data) { - REGISTRY_PERSON_URL *mypu = (REGISTRY_PERSON_URL *)entry; - struct machine_request_callback_data *myrdata = (struct machine_request_callback_data *)data; - - if(mypu->machine == myrdata->find_this_machine) { - myrdata->result = mypu; - return -1; // this will also stop the walk through - } - - return 0; // continue -} - -REGISTRY_MACHINE *registry_request_machine(char *person_guid, char *machine_guid, char *url, char *request_machine, time_t when) { - (void)when; - - char mbuf[GUID_LEN + 1]; - - REGISTRY_PERSON *p = NULL; - REGISTRY_MACHINE *m = NULL; - REGISTRY_PERSON_URL *pu = registry_verify_request(person_guid, machine_guid, url, &p, &m); - if(!pu || !p || !m) return NULL; - - // make sure the machine GUID is valid - if(regenerate_guid(request_machine, mbuf) == -1) { - info("Registry Machine URLs request: invalid machine GUID, person: '%s', machine '%s', url '%s', request machine '%s'", p->guid, m->guid, pu->url->url, request_machine); - return NULL; - } - request_machine = mbuf; - - // make sure the machine exists - m = registry_machine_find(request_machine); - if(!m) { - info("Registry Machine URLs request: machine not found, person: '%s', machine '%s', url '%s', request machine '%s'", p->guid, machine_guid, pu->url->url, request_machine); - return NULL; - } - - // Verify the user has in the past accessed this machine - // We will walk through the PERSON_URLs to find the machine - // linking to our machine - - // a structure to pass to the dictionary_get_all() callback handler - struct machine_request_callback_data rdata = { m, NULL }; - - // request a walk through on the dictionary - avl_traverse(&p->person_urls, machine_request_callback, &rdata); - - if(rdata.result) - return m; - - return NULL; -} - - -// ---------------------------------------------------------------------------- -// REGISTRY THIS MACHINE UNIQUE ID - -static inline int is_machine_guid_blacklisted(const char *guid) { - // these are machine GUIDs that have been included in distribution packages. - // we blacklist them here, so that the next version of netdata will generate - // new ones. - - if(!strcmp(guid, "8a795b0c-2311-11e6-8563-000c295076a6") - || !strcmp(guid, "4aed1458-1c3e-11e6-a53f-000c290fc8f5") - ) { - error("Blacklisted machine GUID '%s' found.", guid); - return 1; - } - - return 0; -} - -char *registry_get_this_machine_hostname(void) { - return registry.hostname; -} - -char *registry_get_this_machine_guid(void) { - static char guid[GUID_LEN + 1] = ""; - - if(likely(guid[0])) - return guid; - - // read it from disk - int fd = open(registry.machine_guid_filename, O_RDONLY); - if(fd != -1) { - char buf[GUID_LEN + 1]; - if(read(fd, buf, GUID_LEN) != GUID_LEN) - error("Failed to read machine GUID from '%s'", registry.machine_guid_filename); - else { - buf[GUID_LEN] = '\0'; - if(regenerate_guid(buf, guid) == -1) { - error("Failed to validate machine GUID '%s' from '%s'. Ignoring it - this might mean this netdata will appear as duplicate in the registry.", - buf, registry.machine_guid_filename); - - guid[0] = '\0'; - } - else if(is_machine_guid_blacklisted(guid)) - guid[0] = '\0'; - } - close(fd); - } - - // generate a new one? - if(!guid[0]) { - uuid_t uuid; - - uuid_generate_time(uuid); - uuid_unparse_lower(uuid, guid); - guid[GUID_LEN] = '\0'; - - // save it - fd = open(registry.machine_guid_filename, O_WRONLY|O_CREAT|O_TRUNC, 444); - if(fd == -1) - fatal("Cannot create unique machine id file '%s'. Please fix this.", registry.machine_guid_filename); - - if(write(fd, guid, GUID_LEN) != GUID_LEN) - fatal("Cannot write the unique machine id file '%s'. Please fix this.", registry.machine_guid_filename); - - close(fd); - } - - setenv("NETDATA_REGISTRY_UNIQUE_ID", guid, 1); - - return guid; -} diff --git a/src/registry_internals.h b/src/registry_internals.h deleted file mode 100644 index cceaf292b..000000000 --- a/src/registry_internals.h +++ /dev/null @@ -1,86 +0,0 @@ -#include "common.h" - -#ifndef NETDATA_REGISTRY_INTERNALS_H_H -#define NETDATA_REGISTRY_INTERNALS_H_H - -#define REGISTRY_URL_FLAGS_DEFAULT 0x00 -#define REGISTRY_URL_FLAGS_EXPIRED 0x01 - -#define DICTIONARY_FLAGS (DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE | DICTIONARY_FLAG_NAME_LINK_DONT_CLONE | DICTIONARY_FLAG_SINGLE_THREADED) - -// ---------------------------------------------------------------------------- -// COMMON structures - -struct registry { - int enabled; - - // entries counters / statistics - unsigned long long persons_count; - unsigned long long machines_count; - unsigned long long usages_count; - unsigned long long urls_count; - unsigned long long persons_urls_count; - unsigned long long machines_urls_count; - unsigned long long log_count; - - // memory counters / statistics - unsigned long long persons_memory; - unsigned long long machines_memory; - unsigned long long urls_memory; - unsigned long long persons_urls_memory; - unsigned long long machines_urls_memory; - - // configuration - unsigned long long save_registry_every_entries; - char *registry_domain; - char *hostname; - char *registry_to_announce; - time_t persons_expiration; // seconds to expire idle persons - int verify_cookies_redirects; - - size_t max_url_length; - size_t max_name_length; - - // file/path names - char *pathname; - char *db_filename; - char *log_filename; - char *machine_guid_filename; - - // open files - FILE *log_fp; - - // the database - DICTIONARY *persons; // dictionary of REGISTRY_PERSON *, with key the REGISTRY_PERSON.guid - DICTIONARY *machines; // dictionary of REGISTRY_MACHINE *, with key the REGISTRY_MACHINE.guid - - avl_tree registry_urls_root_index; - - netdata_mutex_t lock; -}; - -#include "registry_url.h" -#include "registry_machine.h" -#include "registry_person.h" -#include "registry.h" - -extern struct registry registry; - -// REGISTRY LOW-LEVEL REQUESTS (in registry-internals.c) -extern REGISTRY_PERSON *registry_request_access(char *person_guid, char *machine_guid, char *url, char *name, time_t when); -extern REGISTRY_PERSON *registry_request_delete(char *person_guid, char *machine_guid, char *url, char *delete_url, time_t when); -extern REGISTRY_MACHINE *registry_request_machine(char *person_guid, char *machine_guid, char *url, char *request_machine, time_t when); - -// REGISTRY LOG (in registry_log.c) -extern void registry_log(char action, REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name); -extern int registry_log_open(void); -extern void registry_log_close(void); -extern void registry_log_recreate(void); -extern ssize_t registry_log_load(void); - -// REGISTRY DB (in registry_db.c) -extern int registry_db_save(void); -extern size_t registry_db_load(void); -extern int registry_db_should_be_saved(void); - -#endif //NETDATA_REGISTRY_INTERNALS_H_H diff --git a/src/registry_log.c b/src/registry_log.c deleted file mode 100644 index cca43b09f..000000000 --- a/src/registry_log.c +++ /dev/null @@ -1,133 +0,0 @@ -#include "registry_internals.h" - -void registry_log(char action, REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name) { - if(likely(registry.log_fp)) { - if(unlikely(fprintf(registry.log_fp, "%c\t%08x\t%s\t%s\t%s\t%s\n", - action, - p->last_t, - p->guid, - m->guid, - name, - u->url) < 0)) - error("Registry: failed to save log. Registry data may be lost in case of abnormal restart."); - - // we increase the counter even on failures - // so that the registry will be saved periodically - registry.log_count++; - - // this must be outside the log_lock(), or a deadlock will happen. - // registry_db_save() checks the same inside the log_lock, so only - // one thread will save the db - if(unlikely(registry_db_should_be_saved())) - registry_db_save(); - } -} - -int registry_log_open(void) { - if(registry.log_fp) - fclose(registry.log_fp); - - registry.log_fp = fopen(registry.log_filename, "a"); - if(registry.log_fp) { - if (setvbuf(registry.log_fp, NULL, _IOLBF, 0) != 0) - error("Cannot set line buffering on registry log file."); - return 0; - } - - error("Cannot open registry log file '%s'. Registry data will be lost in case of netdata or server crash.", registry.log_filename); - return -1; -} - -void registry_log_close(void) { - if(registry.log_fp) { - fclose(registry.log_fp); - registry.log_fp = NULL; - } -} - -void registry_log_recreate(void) { - if(registry.log_fp != NULL) { - registry_log_close(); - - // open it with truncate - registry.log_fp = fopen(registry.log_filename, "w"); - if(registry.log_fp) fclose(registry.log_fp); - else error("Cannot truncate registry log '%s'", registry.log_filename); - - registry.log_fp = NULL; - registry_log_open(); - } -} - -ssize_t registry_log_load(void) { - ssize_t line = -1; - - // closing the log is required here - // otherwise we will append to it the values we read - registry_log_close(); - - debug(D_REGISTRY, "Registry: loading active db from: %s", registry.log_filename); - FILE *fp = fopen(registry.log_filename, "r"); - if(!fp) - error("Registry: cannot open registry file: %s", registry.log_filename); - else { - char *s, buf[4096 + 1]; - line = 0; - size_t len = 0; - - while ((s = fgets_trim_len(buf, 4096, fp, &len))) { - line++; - - switch (s[0]) { - case 'A': // accesses - case 'D': // deletes - - // verify it is valid - if (unlikely(len < 85 || s[1] != '\t' || s[10] != '\t' || s[47] != '\t' || s[84] != '\t')) { - error("Registry: log line %zd is wrong (len = %zu).", line, len); - continue; - } - s[1] = s[10] = s[47] = s[84] = '\0'; - - // get the variables - time_t when = strtoul(&s[2], NULL, 16); - char *person_guid = &s[11]; - char *machine_guid = &s[48]; - char *name = &s[85]; - - // skip the name to find the url - char *url = name; - while(*url && *url != '\t') url++; - if(!*url) { - error("Registry: log line %zd does not have a url.", line); - continue; - } - *url++ = '\0'; - - // make sure the person exists - // without this, a new person guid will be created - REGISTRY_PERSON *p = registry_person_find(person_guid); - if(!p) p = registry_person_allocate(person_guid, when); - - if(s[0] == 'A') - registry_request_access(p->guid, machine_guid, url, name, when); - else - registry_request_delete(p->guid, machine_guid, url, name, when); - - registry.log_count++; - break; - - default: - error("Registry: ignoring line %zd of filename '%s': %s.", line, registry.log_filename, s); - break; - } - } - - fclose(fp); - } - - // open the log again - registry_log_open(); - - return line; -} diff --git a/src/registry_machine.c b/src/registry_machine.c deleted file mode 100644 index 6dc8200d3..000000000 --- a/src/registry_machine.c +++ /dev/null @@ -1,101 +0,0 @@ -#include "registry_internals.h" - -// ---------------------------------------------------------------------------- -// MACHINE - -REGISTRY_MACHINE *registry_machine_find(const char *machine_guid) { - debug(D_REGISTRY, "Registry: registry_machine_find('%s')", machine_guid); - return dictionary_get(registry.machines, machine_guid); -} - -REGISTRY_MACHINE_URL *registry_machine_url_allocate(REGISTRY_MACHINE *m, REGISTRY_URL *u, time_t when) { - debug(D_REGISTRY, "registry_machine_url_allocate('%s', '%s'): allocating %zu bytes", m->guid, u->url, sizeof(REGISTRY_MACHINE_URL)); - - REGISTRY_MACHINE_URL *mu = mallocz(sizeof(REGISTRY_MACHINE_URL)); - - mu->first_t = mu->last_t = (uint32_t)when; - mu->usages = 1; - mu->url = u; - mu->flags = REGISTRY_URL_FLAGS_DEFAULT; - - registry.machines_urls_memory += sizeof(REGISTRY_MACHINE_URL); - - debug(D_REGISTRY, "registry_machine_url_allocate('%s', '%s'): indexing URL in machine", m->guid, u->url); - dictionary_set(m->machine_urls, u->url, mu, sizeof(REGISTRY_MACHINE_URL)); - - registry_url_link(u); - - return mu; -} - -REGISTRY_MACHINE *registry_machine_allocate(const char *machine_guid, time_t when) { - debug(D_REGISTRY, "Registry: registry_machine_allocate('%s'): creating new machine, sizeof(MACHINE)=%zu", machine_guid, sizeof(REGISTRY_MACHINE)); - - REGISTRY_MACHINE *m = mallocz(sizeof(REGISTRY_MACHINE)); - - strncpyz(m->guid, machine_guid, GUID_LEN); - - debug(D_REGISTRY, "Registry: registry_machine_allocate('%s'): creating dictionary of urls", machine_guid); - m->machine_urls = dictionary_create(DICTIONARY_FLAGS); - - m->first_t = m->last_t = (uint32_t)when; - m->usages = 0; - - registry.machines_memory += sizeof(REGISTRY_MACHINE); - - registry.machines_count++; - dictionary_set(registry.machines, m->guid, m, sizeof(REGISTRY_MACHINE)); - - return m; -} - -// 1. validate machine GUID -// 2. if it is valid, find it or create it and return it -// 3. if it is not valid, return NULL -REGISTRY_MACHINE *registry_machine_get(const char *machine_guid, time_t when) { - REGISTRY_MACHINE *m = NULL; - - if(likely(machine_guid && *machine_guid)) { - // validate it is a GUID - char buf[GUID_LEN + 1]; - if(unlikely(regenerate_guid(machine_guid, buf) == -1)) - info("Registry: machine guid '%s' is not a valid guid. Ignoring it.", machine_guid); - else { - machine_guid = buf; - m = registry_machine_find(machine_guid); - if(!m) m = registry_machine_allocate(machine_guid, when); - } - } - - return m; -} - - -// ---------------------------------------------------------------------------- -// LINKING OF OBJECTS - -REGISTRY_MACHINE_URL *registry_machine_link_to_url(REGISTRY_MACHINE *m, REGISTRY_URL *u, time_t when) { - debug(D_REGISTRY, "registry_machine_link_to_url('%s', '%s'): searching for URL in machine", m->guid, u->url); - - REGISTRY_MACHINE_URL *mu = dictionary_get(m->machine_urls, u->url); - if(!mu) { - debug(D_REGISTRY, "registry_machine_link_to_url('%s', '%s'): not found", m->guid, u->url); - mu = registry_machine_url_allocate(m, u, when); - registry.machines_urls_count++; - } - else { - debug(D_REGISTRY, "registry_machine_link_to_url('%s', '%s'): found", m->guid, u->url); - mu->usages++; - if(likely(mu->last_t < (uint32_t)when)) mu->last_t = (uint32_t)when; - } - - m->usages++; - if(likely(m->last_t < (uint32_t)when)) m->last_t = (uint32_t)when; - - if(mu->flags & REGISTRY_URL_FLAGS_EXPIRED) { - debug(D_REGISTRY, "registry_machine_link_to_url('%s', '%s'): accessing an expired URL.", m->guid, u->url); - mu->flags &= ~REGISTRY_URL_FLAGS_EXPIRED; - } - - return mu; -} diff --git a/src/registry_machine.h b/src/registry_machine.h deleted file mode 100644 index be824d168..000000000 --- a/src/registry_machine.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef NETDATA_REGISTRY_MACHINE_H -#define NETDATA_REGISTRY_MACHINE_H - -#include "registry_internals.h" - -// ---------------------------------------------------------------------------- -// MACHINE structures - -// For each MACHINE-URL pair we keep this -struct registry_machine_url { - REGISTRY_URL *url; // de-duplicated URL - - uint8_t flags; - - uint32_t first_t; // the first time we saw this - uint32_t last_t; // the last time we saw this - uint32_t usages; // how many times this has been accessed -}; -typedef struct registry_machine_url REGISTRY_MACHINE_URL; - -// A machine -struct registry_machine { - char guid[GUID_LEN + 1]; // the GUID - - uint32_t links; // the number of REGISTRY_PERSON_URL linked to this machine - - DICTIONARY *machine_urls; // MACHINE_URL * - - uint32_t first_t; // the first time we saw this - uint32_t last_t; // the last time we saw this - uint32_t usages; // how many times this has been accessed -}; -typedef struct registry_machine REGISTRY_MACHINE; - -extern REGISTRY_MACHINE *registry_machine_find(const char *machine_guid); -extern REGISTRY_MACHINE_URL *registry_machine_url_allocate(REGISTRY_MACHINE *m, REGISTRY_URL *u, time_t when); -extern REGISTRY_MACHINE *registry_machine_allocate(const char *machine_guid, time_t when); -extern REGISTRY_MACHINE *registry_machine_get(const char *machine_guid, time_t when); -extern REGISTRY_MACHINE_URL *registry_machine_link_to_url(REGISTRY_MACHINE *m, REGISTRY_URL *u, time_t when); - -#endif //NETDATA_REGISTRY_MACHINE_H diff --git a/src/registry_person.c b/src/registry_person.c deleted file mode 100644 index d8b6cd98a..000000000 --- a/src/registry_person.c +++ /dev/null @@ -1,264 +0,0 @@ -#include "registry_internals.h" - -// ---------------------------------------------------------------------------- -// PERSON_URL INDEX - -int person_url_compare(void *a, void *b) { - register uint32_t hash1 = ((REGISTRY_PERSON_URL *)a)->url->hash; - register uint32_t hash2 = ((REGISTRY_PERSON_URL *)b)->url->hash; - - if(hash1 < hash2) return -1; - else if(hash1 > hash2) return 1; - else return strcmp(((REGISTRY_PERSON_URL *)a)->url->url, ((REGISTRY_PERSON_URL *)b)->url->url); -} - -inline REGISTRY_PERSON_URL *registry_person_url_index_find(REGISTRY_PERSON *p, const char *url) { - debug(D_REGISTRY, "Registry: registry_person_url_index_find('%s', '%s')", p->guid, url); - - char buf[sizeof(REGISTRY_URL) + strlen(url)]; - - REGISTRY_URL *u = (REGISTRY_URL *)&buf; - strcpy(u->url, url); - u->hash = simple_hash(u->url); - - REGISTRY_PERSON_URL tpu = { .url = u }; - - REGISTRY_PERSON_URL *pu = (REGISTRY_PERSON_URL *)avl_search(&p->person_urls, (void *)&tpu); - return pu; -} - -inline REGISTRY_PERSON_URL *registry_person_url_index_add(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) { - debug(D_REGISTRY, "Registry: registry_person_url_index_add('%s', '%s')", p->guid, pu->url->url); - REGISTRY_PERSON_URL *tpu = (REGISTRY_PERSON_URL *)avl_insert(&(p->person_urls), (avl *)(pu)); - if(tpu != pu) - error("Registry: registry_person_url_index_add('%s', '%s') already exists as '%s'", p->guid, pu->url->url, tpu->url->url); - - return tpu; -} - -inline REGISTRY_PERSON_URL *registry_person_url_index_del(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) { - debug(D_REGISTRY, "Registry: registry_person_url_index_del('%s', '%s')", p->guid, pu->url->url); - REGISTRY_PERSON_URL *tpu = (REGISTRY_PERSON_URL *)avl_remove(&(p->person_urls), (avl *)(pu)); - if(!tpu) - error("Registry: registry_person_url_index_del('%s', '%s') deleted nothing", p->guid, pu->url->url); - else if(tpu != pu) - error("Registry: registry_person_url_index_del('%s', '%s') deleted wrong URL '%s'", p->guid, pu->url->url, tpu->url->url); - - return tpu; -} - -// ---------------------------------------------------------------------------- -// PERSON_URL - -REGISTRY_PERSON_URL *registry_person_url_allocate(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when) { - debug(D_REGISTRY, "registry_person_url_allocate('%s', '%s', '%s'): allocating %zu bytes", p->guid, m->guid, u->url, sizeof(REGISTRY_PERSON_URL) + namelen); - - // protection from too big names - if(namelen > registry.max_name_length) - namelen = registry.max_name_length; - - REGISTRY_PERSON_URL *pu = mallocz(sizeof(REGISTRY_PERSON_URL) + namelen); - - // a simple strcpy() should do the job - // but I prefer to be safe, since the caller specified urllen - strncpyz(pu->machine_name, name, namelen); - - pu->machine = m; - pu->first_t = pu->last_t = (uint32_t)when; - pu->usages = 1; - pu->url = u; - pu->flags = REGISTRY_URL_FLAGS_DEFAULT; - m->links++; - - registry.persons_urls_memory += sizeof(REGISTRY_PERSON_URL) + namelen; - - debug(D_REGISTRY, "registry_person_url_allocate('%s', '%s', '%s'): indexing URL in person", p->guid, m->guid, u->url); - REGISTRY_PERSON_URL *tpu = registry_person_url_index_add(p, pu); - if(tpu != pu) { - error("Registry: Attempted to add duplicate person url '%s' with name '%s' to person '%s'", u->url, name, p->guid); - free(pu); - pu = tpu; - } - else - registry_url_link(u); - - return pu; -} - -void registry_person_url_free(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) { - debug(D_REGISTRY, "registry_person_url_free('%s', '%s')", p->guid, pu->url->url); - - REGISTRY_PERSON_URL *tpu = registry_person_url_index_del(p, pu); - if(tpu) { - registry_url_unlink(tpu->url); - tpu->machine->links--; - registry.persons_urls_memory -= sizeof(REGISTRY_PERSON_URL) + strlen(tpu->machine_name); - freez(tpu); - } -} - -// this function is needed to change the name of a PERSON_URL -REGISTRY_PERSON_URL *registry_person_url_reallocate(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when, REGISTRY_PERSON_URL *pu) { - debug(D_REGISTRY, "registry_person_url_reallocate('%s', '%s', '%s'): allocating %zu bytes", p->guid, m->guid, u->url, sizeof(REGISTRY_PERSON_URL) + namelen); - - // keep a backup - REGISTRY_PERSON_URL pu2 = { - .first_t = pu->first_t, - .last_t = pu->last_t, - .usages = pu->usages, - .flags = pu->flags, - .machine = pu->machine, - .machine_name = "" - }; - - // remove the existing one from the index - registry_person_url_free(p, pu); - pu = &pu2; - - // allocate a new one - REGISTRY_PERSON_URL *tpu = registry_person_url_allocate(p, m, u, name, namelen, when); - tpu->first_t = pu->first_t; - tpu->last_t = pu->last_t; - tpu->usages = pu->usages; - tpu->flags = pu->flags; - - return tpu; -} - - -// ---------------------------------------------------------------------------- -// PERSON - -REGISTRY_PERSON *registry_person_find(const char *person_guid) { - debug(D_REGISTRY, "Registry: registry_person_find('%s')", person_guid); - return dictionary_get(registry.persons, person_guid); -} - -REGISTRY_PERSON *registry_person_allocate(const char *person_guid, time_t when) { - debug(D_REGISTRY, "Registry: registry_person_allocate('%s'): allocating new person, sizeof(PERSON)=%zu", (person_guid)?person_guid:"", sizeof(REGISTRY_PERSON)); - - REGISTRY_PERSON *p = mallocz(sizeof(REGISTRY_PERSON)); - if(!person_guid) { - for(;;) { - uuid_t uuid; - uuid_generate(uuid); - uuid_unparse_lower(uuid, p->guid); - - debug(D_REGISTRY, "Registry: Checking if the generated person guid '%s' is unique", p->guid); - if (!dictionary_get(registry.persons, p->guid)) { - debug(D_REGISTRY, "Registry: generated person guid '%s' is unique", p->guid); - break; - } - else - info("Registry: generated person guid '%s' found in the registry. Retrying...", p->guid); - } - } - else - strncpyz(p->guid, person_guid, GUID_LEN); - - debug(D_REGISTRY, "Registry: registry_person_allocate('%s'): creating dictionary of urls", p->guid); - avl_init(&p->person_urls, person_url_compare); - - p->first_t = p->last_t = (uint32_t)when; - p->usages = 0; - - registry.persons_memory += sizeof(REGISTRY_PERSON); - - registry.persons_count++; - dictionary_set(registry.persons, p->guid, p, sizeof(REGISTRY_PERSON)); - - return p; -} - - -// 1. validate person GUID -// 2. if it is valid, find it -// 3. if it is not valid, create a new one -// 4. return it -REGISTRY_PERSON *registry_person_get(const char *person_guid, time_t when) { - debug(D_REGISTRY, "Registry: registry_person_get('%s'): creating dictionary of urls", person_guid); - - REGISTRY_PERSON *p = NULL; - - if(person_guid && *person_guid) { - char buf[GUID_LEN + 1]; - // validate it is a GUID - if(unlikely(regenerate_guid(person_guid, buf) == -1)) - info("Registry: person guid '%s' is not a valid guid. Ignoring it.", person_guid); - else { - person_guid = buf; - p = registry_person_find(person_guid); - } - } - - if(!p) p = registry_person_allocate(NULL, when); - - return p; -} - -void registry_person_del(REGISTRY_PERSON *p) { - debug(D_REGISTRY, "Registry: registry_person_del('%s'): creating dictionary of urls", p->guid); - - while(p->person_urls.root) - registry_person_unlink_from_url(p, (REGISTRY_PERSON_URL *)p->person_urls.root); - - debug(D_REGISTRY, "Registry: deleting person '%s' from persons registry", p->guid); - dictionary_del(registry.persons, p->guid); - - debug(D_REGISTRY, "Registry: freeing person '%s'", p->guid); - freez(p); -} - -// ---------------------------------------------------------------------------- -// LINKING OF OBJECTS - -REGISTRY_PERSON_URL *registry_person_link_to_url(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when) { - debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): searching for URL in person", p->guid, m->guid, u->url); - - REGISTRY_PERSON_URL *pu = registry_person_url_index_find(p, u->url); - if(!pu) { - debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): not found", p->guid, m->guid, u->url); - pu = registry_person_url_allocate(p, m, u, name, namelen, when); - registry.persons_urls_count++; - } - else { - debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): found", p->guid, m->guid, u->url); - pu->usages++; - if(likely(pu->last_t < (uint32_t)when)) pu->last_t = (uint32_t)when; - - if(pu->machine != m) { - REGISTRY_MACHINE_URL *mu = dictionary_get(pu->machine->machine_urls, u->url); - if(mu) { - debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): URL switched machines (old was '%s') - expiring it from previous machine.", - p->guid, m->guid, u->url, pu->machine->guid); - mu->flags |= REGISTRY_URL_FLAGS_EXPIRED; - } - else { - debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): URL switched machines (old was '%s') - but the URL is not linked to the old machine.", - p->guid, m->guid, u->url, pu->machine->guid); - } - - pu->machine->links--; - pu->machine = m; - } - - if(strcmp(pu->machine_name, name) != 0) { - // the name of the PERSON_URL has changed ! - pu = registry_person_url_reallocate(p, m, u, name, namelen, when, pu); - } - } - - p->usages++; - if(likely(p->last_t < (uint32_t)when)) p->last_t = (uint32_t)when; - - if(pu->flags & REGISTRY_URL_FLAGS_EXPIRED) { - debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): accessing an expired URL. Re-enabling URL.", p->guid, m->guid, u->url); - pu->flags &= ~REGISTRY_URL_FLAGS_EXPIRED; - } - - return pu; -} - -void registry_person_unlink_from_url(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) { - registry_person_url_free(p, pu); -} diff --git a/src/registry_person.h b/src/registry_person.h deleted file mode 100644 index 5f6cc2443..000000000 --- a/src/registry_person.h +++ /dev/null @@ -1,60 +0,0 @@ -#ifndef NETDATA_REGISTRY_PERSON_H -#define NETDATA_REGISTRY_PERSON_H - -#include "registry_internals.h" - -// ---------------------------------------------------------------------------- -// PERSON structures - -// for each PERSON-URL pair we keep this -struct registry_person_url { - avl avl; // binary tree node - - REGISTRY_URL *url; // de-duplicated URL - REGISTRY_MACHINE *machine; // link the MACHINE of this URL - - uint8_t flags; - - uint32_t first_t; // the first time we saw this - uint32_t last_t; // the last time we saw this - uint32_t usages; // how many times this has been accessed - - char machine_name[1]; // the name of the machine, as known by the user - // dynamically allocated to fit properly -}; -typedef struct registry_person_url REGISTRY_PERSON_URL; - -// A person -struct registry_person { - char guid[GUID_LEN + 1]; // the person GUID - - avl_tree person_urls; // dictionary of PERSON_URLs - - uint32_t first_t; // the first time we saw this - uint32_t last_t; // the last time we saw this - uint32_t usages; // how many times this has been accessed - - //uint32_t flags; - //char *email; -}; -typedef struct registry_person REGISTRY_PERSON; - -// PERSON_URL -extern REGISTRY_PERSON_URL *registry_person_url_index_find(REGISTRY_PERSON *p, const char *url); -extern REGISTRY_PERSON_URL *registry_person_url_index_add(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) NEVERNULL WARNUNUSED; -extern REGISTRY_PERSON_URL *registry_person_url_index_del(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) WARNUNUSED; - -extern REGISTRY_PERSON_URL *registry_person_url_allocate(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when); -extern REGISTRY_PERSON_URL *registry_person_url_reallocate(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when, REGISTRY_PERSON_URL *pu); - -// PERSON -extern REGISTRY_PERSON *registry_person_find(const char *person_guid); -extern REGISTRY_PERSON *registry_person_allocate(const char *person_guid, time_t when); -extern REGISTRY_PERSON *registry_person_get(const char *person_guid, time_t when); -extern void registry_person_del(REGISTRY_PERSON *p); - -// LINKING PERSON -> PERSON_URL -extern REGISTRY_PERSON_URL *registry_person_link_to_url(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when); -extern void registry_person_unlink_from_url(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu); - -#endif //NETDATA_REGISTRY_PERSON_H diff --git a/src/registry_url.c b/src/registry_url.c deleted file mode 100644 index 52d36a898..000000000 --- a/src/registry_url.c +++ /dev/null @@ -1,85 +0,0 @@ -#include "registry_internals.h" - -// ---------------------------------------------------------------------------- -// REGISTRY_URL - -int registry_url_compare(void *a, void *b) { - if(((REGISTRY_URL *)a)->hash < ((REGISTRY_URL *)b)->hash) return -1; - else if(((REGISTRY_URL *)a)->hash > ((REGISTRY_URL *)b)->hash) return 1; - else return strcmp(((REGISTRY_URL *)a)->url, ((REGISTRY_URL *)b)->url); -} - -inline REGISTRY_URL *registry_url_index_add(REGISTRY_URL *u) { - return (REGISTRY_URL *)avl_insert(&(registry.registry_urls_root_index), (avl *)(u)); -} - -inline REGISTRY_URL *registry_url_index_del(REGISTRY_URL *u) { - return (REGISTRY_URL *)avl_remove(&(registry.registry_urls_root_index), (avl *)(u)); -} - -REGISTRY_URL *registry_url_get(const char *url, size_t urllen) { - // protection from too big URLs - if(urllen > registry.max_url_length) - urllen = registry.max_url_length; - - debug(D_REGISTRY, "Registry: registry_url_get('%s', %zu)", url, urllen); - - char buf[sizeof(REGISTRY_URL) + urllen]; // no need for +1, 1 is already in REGISTRY_URL - REGISTRY_URL *n = (REGISTRY_URL *)&buf[0]; - n->len = (uint16_t)urllen; - strncpyz(n->url, url, n->len); - n->hash = simple_hash(n->url); - - REGISTRY_URL *u = (REGISTRY_URL *)avl_search(&(registry.registry_urls_root_index), (avl *)n); - if(!u) { - debug(D_REGISTRY, "Registry: registry_url_get('%s', %zu): allocating %zu bytes", url, urllen, sizeof(REGISTRY_URL) + urllen); - u = callocz(1, sizeof(REGISTRY_URL) + urllen); // no need for +1, 1 is already in REGISTRY_URL - - // a simple strcpy() should do the job - // but I prefer to be safe, since the caller specified urllen - u->len = (uint16_t)urllen; - strncpyz(u->url, url, u->len); - u->links = 0; - u->hash = simple_hash(u->url); - - registry.urls_memory += sizeof(REGISTRY_URL) + urllen; // no need for +1, 1 is already in REGISTRY_URL - - debug(D_REGISTRY, "Registry: registry_url_get('%s'): indexing it", url); - n = registry_url_index_add(u); - if(n != u) { - error("INTERNAL ERROR: registry_url_get(): url '%s' already exists in the registry as '%s'", u->url, n->url); - free(u); - u = n; - } - else - registry.urls_count++; - } - - return u; -} - -void registry_url_link(REGISTRY_URL *u) { - u->links++; - debug(D_REGISTRY, "Registry: registry_url_link('%s'): URL has now %u links", u->url, u->links); -} - -void registry_url_unlink(REGISTRY_URL *u) { - u->links--; - if(!u->links) { - debug(D_REGISTRY, "Registry: registry_url_unlink('%s'): No more links for this URL", u->url); - REGISTRY_URL *n = registry_url_index_del(u); - if(!n) { - error("INTERNAL ERROR: registry_url_unlink('%s'): cannot find url in index", u->url); - } - else { - if(n != u) { - error("INTERNAL ERROR: registry_url_unlink('%s'): deleted different url '%s'", u->url, n->url); - } - - registry.urls_memory -= sizeof(REGISTRY_URL) + n->len; // no need for +1, 1 is already in REGISTRY_URL - freez(n); - } - } - else - debug(D_REGISTRY, "Registry: registry_url_unlink('%s'): URL has %u links left", u->url, u->links); -} diff --git a/src/registry_url.h b/src/registry_url.h deleted file mode 100644 index 5ac52f5a5..000000000 --- a/src/registry_url.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef NETDATA_REGISTRY_URL_H -#define NETDATA_REGISTRY_URL_H - -#include "registry_internals.h" - -// ---------------------------------------------------------------------------- -// URL structures -// Save memory by de-duplicating URLs -// so instead of storing URLs all over the place -// we store them here and we keep pointers elsewhere - -struct registry_url { - avl avl; - uint32_t hash; // the index hash - - uint32_t links; // the number of links to this URL - when none is left, we free it - - uint16_t len; // the length of the URL in bytes - char url[1]; // the URL - dynamically allocated to more size -}; -typedef struct registry_url REGISTRY_URL; - -// REGISTRY_URL INDEX -extern int registry_url_compare(void *a, void *b); -extern REGISTRY_URL *registry_url_index_del(REGISTRY_URL *u) WARNUNUSED; -extern REGISTRY_URL *registry_url_index_add(REGISTRY_URL *u) NEVERNULL WARNUNUSED; - -// REGISTRY_URL MANAGEMENT -extern REGISTRY_URL *registry_url_get(const char *url, size_t urllen) NEVERNULL; -extern void registry_url_link(REGISTRY_URL *u); -extern void registry_url_unlink(REGISTRY_URL *u); - -#endif //NETDATA_REGISTRY_URL_H diff --git a/src/rrd.c b/src/rrd.c deleted file mode 100644 index 3be2d8e41..000000000 --- a/src/rrd.c +++ /dev/null @@ -1,148 +0,0 @@ -#define NETDATA_RRD_INTERNALS 1 -#include "common.h" - -// ---------------------------------------------------------------------------- -// globals - -/* -// if not zero it gives the time (in seconds) to remove un-updated dimensions -// DO NOT ENABLE -// if dimensions are removed, the chart generation will have to run again -int rrd_delete_unupdated_dimensions = 0; -*/ - -int default_rrd_update_every = UPDATE_EVERY; -int default_rrd_history_entries = RRD_DEFAULT_HISTORY_ENTRIES; -RRD_MEMORY_MODE default_rrd_memory_mode = RRD_MEMORY_MODE_SAVE; -int gap_when_lost_iterations_above = 1; - - -// ---------------------------------------------------------------------------- -// RRD - memory modes - -inline const char *rrd_memory_mode_name(RRD_MEMORY_MODE id) { - switch(id) { - case RRD_MEMORY_MODE_RAM: - return RRD_MEMORY_MODE_RAM_NAME; - - case RRD_MEMORY_MODE_MAP: - return RRD_MEMORY_MODE_MAP_NAME; - - case RRD_MEMORY_MODE_NONE: - return RRD_MEMORY_MODE_NONE_NAME; - - case RRD_MEMORY_MODE_SAVE: - return RRD_MEMORY_MODE_SAVE_NAME; - - case RRD_MEMORY_MODE_ALLOC: - return RRD_MEMORY_MODE_ALLOC_NAME; - } - - return RRD_MEMORY_MODE_SAVE_NAME; -} - -RRD_MEMORY_MODE rrd_memory_mode_id(const char *name) { - if(unlikely(!strcmp(name, RRD_MEMORY_MODE_RAM_NAME))) - return RRD_MEMORY_MODE_RAM; - - else if(unlikely(!strcmp(name, RRD_MEMORY_MODE_MAP_NAME))) - return RRD_MEMORY_MODE_MAP; - - else if(unlikely(!strcmp(name, RRD_MEMORY_MODE_NONE_NAME))) - return RRD_MEMORY_MODE_NONE; - - else if(unlikely(!strcmp(name, RRD_MEMORY_MODE_ALLOC_NAME))) - return RRD_MEMORY_MODE_ALLOC; - - return RRD_MEMORY_MODE_SAVE; -} - - -// ---------------------------------------------------------------------------- -// RRD - algorithms types - -RRD_ALGORITHM rrd_algorithm_id(const char *name) { - if(strcmp(name, RRD_ALGORITHM_INCREMENTAL_NAME) == 0) - return RRD_ALGORITHM_INCREMENTAL; - - else if(strcmp(name, RRD_ALGORITHM_ABSOLUTE_NAME) == 0) - return RRD_ALGORITHM_ABSOLUTE; - - else if(strcmp(name, RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL_NAME) == 0) - return RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL; - - else if(strcmp(name, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL_NAME) == 0) - return RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL; - - else - return RRD_ALGORITHM_ABSOLUTE; -} - -const char *rrd_algorithm_name(RRD_ALGORITHM algorithm) { - switch(algorithm) { - case RRD_ALGORITHM_ABSOLUTE: - default: - return RRD_ALGORITHM_ABSOLUTE_NAME; - - case RRD_ALGORITHM_INCREMENTAL: - return RRD_ALGORITHM_INCREMENTAL_NAME; - - case RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL: - return RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL_NAME; - - case RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL: - return RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL_NAME; - } -} - - -// ---------------------------------------------------------------------------- -// RRD - chart types - -inline RRDSET_TYPE rrdset_type_id(const char *name) { - if(unlikely(strcmp(name, RRDSET_TYPE_AREA_NAME) == 0)) - return RRDSET_TYPE_AREA; - - else if(unlikely(strcmp(name, RRDSET_TYPE_STACKED_NAME) == 0)) - return RRDSET_TYPE_STACKED; - - else // if(unlikely(strcmp(name, RRDSET_TYPE_LINE_NAME) == 0)) - return RRDSET_TYPE_LINE; -} - -const char *rrdset_type_name(RRDSET_TYPE chart_type) { - switch(chart_type) { - case RRDSET_TYPE_LINE: - default: - return RRDSET_TYPE_LINE_NAME; - - case RRDSET_TYPE_AREA: - return RRDSET_TYPE_AREA_NAME; - - case RRDSET_TYPE_STACKED: - return RRDSET_TYPE_STACKED_NAME; - } -} - - -// ---------------------------------------------------------------------------- -// RRD - cache directory - -char *rrdset_cache_dir(RRDHOST *host, const char *id, const char *config_section) { - char *ret = NULL; - - char b[FILENAME_MAX + 1]; - char n[FILENAME_MAX + 1]; - rrdset_strncpyz_name(b, id, FILENAME_MAX); - - snprintfz(n, FILENAME_MAX, "%s/%s", host->cache_dir, b); - ret = config_get(config_section, "cache directory", n); - - if(host->rrd_memory_mode == RRD_MEMORY_MODE_MAP || host->rrd_memory_mode == RRD_MEMORY_MODE_SAVE) { - int r = mkdir(ret, 0775); - if(r != 0 && errno != EEXIST) - error("Cannot create directory '%s'", ret); - } - - return ret; -} diff --git a/src/rrd.h b/src/rrd.h deleted file mode 100644 index d17daacd2..000000000 --- a/src/rrd.h +++ /dev/null @@ -1,747 +0,0 @@ -#ifndef NETDATA_RRD_H -#define NETDATA_RRD_H 1 - -#define UPDATE_EVERY 1 -#define UPDATE_EVERY_MAX 3600 - -#define RRD_DEFAULT_HISTORY_ENTRIES 3600 -#define RRD_HISTORY_ENTRIES_MAX (86400*365) - -extern int default_rrd_update_every; -extern int default_rrd_history_entries; -extern int gap_when_lost_iterations_above; - -#define RRD_ID_LENGTH_MAX 200 - -#define RRDSET_MAGIC "NETDATA RRD SET FILE V019" -#define RRDDIMENSION_MAGIC "NETDATA RRD DIMENSION FILE V019" - -typedef long long total_number; -#define TOTAL_NUMBER_FORMAT "%lld" - -typedef struct rrdhost RRDHOST; - -// ---------------------------------------------------------------------------- -// chart types - -typedef enum rrdset_type { - RRDSET_TYPE_LINE = 0, - RRDSET_TYPE_AREA = 1, - RRDSET_TYPE_STACKED = 2 -} RRDSET_TYPE; - -#define RRDSET_TYPE_LINE_NAME "line" -#define RRDSET_TYPE_AREA_NAME "area" -#define RRDSET_TYPE_STACKED_NAME "stacked" - -RRDSET_TYPE rrdset_type_id(const char *name); -const char *rrdset_type_name(RRDSET_TYPE chart_type); - - -// ---------------------------------------------------------------------------- -// memory mode - -typedef enum rrd_memory_mode { - RRD_MEMORY_MODE_NONE = 0, - RRD_MEMORY_MODE_RAM = 1, - RRD_MEMORY_MODE_MAP = 2, - RRD_MEMORY_MODE_SAVE = 3, - RRD_MEMORY_MODE_ALLOC = 4 -} RRD_MEMORY_MODE; - -#define RRD_MEMORY_MODE_NONE_NAME "none" -#define RRD_MEMORY_MODE_RAM_NAME "ram" -#define RRD_MEMORY_MODE_MAP_NAME "map" -#define RRD_MEMORY_MODE_SAVE_NAME "save" -#define RRD_MEMORY_MODE_ALLOC_NAME "alloc" - -extern RRD_MEMORY_MODE default_rrd_memory_mode; - -extern const char *rrd_memory_mode_name(RRD_MEMORY_MODE id); -extern RRD_MEMORY_MODE rrd_memory_mode_id(const char *name); - - -// ---------------------------------------------------------------------------- -// algorithms types - -typedef enum rrd_algorithm { - RRD_ALGORITHM_ABSOLUTE = 0, - RRD_ALGORITHM_INCREMENTAL = 1, - RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL = 2, - RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL = 3 -} RRD_ALGORITHM; - -#define RRD_ALGORITHM_ABSOLUTE_NAME "absolute" -#define RRD_ALGORITHM_INCREMENTAL_NAME "incremental" -#define RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL_NAME "percentage-of-incremental-row" -#define RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL_NAME "percentage-of-absolute-row" - -extern RRD_ALGORITHM rrd_algorithm_id(const char *name); -extern const char *rrd_algorithm_name(RRD_ALGORITHM algorithm); - -// ---------------------------------------------------------------------------- -// RRD FAMILY - -struct rrdfamily { - avl avl; - - const char *family; - uint32_t hash_family; - - size_t use_count; - - avl_tree_lock rrdvar_root_index; -}; -typedef struct rrdfamily RRDFAMILY; - - -// ---------------------------------------------------------------------------- -// flags -// use this for configuration flags, not for state control -// flags are set/unset in a manner that is not thread safe -// and may lead to missing information. - -typedef enum rrddim_flags { - RRDDIM_FLAG_NONE = 0, - RRDDIM_FLAG_HIDDEN = 1 << 0, // this dimension will not be offered to callers - RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS = 1 << 1 // do not offer RESET or OVERFLOW info to callers -} RRDDIM_FLAGS; - -#ifdef HAVE_C___ATOMIC -#define rrddim_flag_check(rd, flag) (__atomic_load_n(&((rd)->flags), __ATOMIC_SEQ_CST) & (flag)) -#define rrddim_flag_set(rd, flag) __atomic_or_fetch(&((rd)->flags), (flag), __ATOMIC_SEQ_CST) -#define rrddim_flag_clear(rd, flag) __atomic_and_fetch(&((rd)->flags), ~(flag), __ATOMIC_SEQ_CST) -#else -#define rrddim_flag_check(rd, flag) ((rd)->flags & (flag)) -#define rrddim_flag_set(rd, flag) (rd)->flags |= (flag) -#define rrddim_flag_clear(rd, flag) (rd)->flags &= ~(flag) -#endif - - -// ---------------------------------------------------------------------------- -// RRD DIMENSION - this is a metric - -struct rrddim { - // ------------------------------------------------------------------------ - // binary indexing structures - - avl avl; // the binary index - this has to be first member! - - // ------------------------------------------------------------------------ - // the dimension definition - - const char *id; // the id of this dimension (for internal identification) - const char *name; // the name of this dimension (as presented to user) - // this is a pointer to the config structure - // since the config always has a higher priority - // (the user overwrites the name of the charts) - // DO NOT FREE THIS - IT IS ALLOCATED IN CONFIG - - RRD_ALGORITHM algorithm; // the algorithm that is applied to add new collected values - RRD_MEMORY_MODE rrd_memory_mode; // the memory mode for this dimension - - collected_number multiplier; // the multiplier of the collected values - collected_number divisor; // the divider of the collected values - - uint32_t flags; // configuration flags for the dimension - - // ------------------------------------------------------------------------ - // members for temporary data we need for calculations - - uint32_t hash; // a simple hash of the id, to speed up searching / indexing - // instead of strcmp() every item in the binary index - // we first compare the hashes - - uint32_t hash_name; // a simple hash of the name - - char *cache_filename; // the filename we load/save from/to this set - - size_t collections_counter; // the number of times we added values to this rrdim - size_t unused[10]; - - int updated:1; // 1 when the dimension has been updated since the last processing - int exposed:1; // 1 when set what have sent this dimension to the central netdata - - struct timeval last_collected_time; // when was this dimension last updated - // this is actual date time we updated the last_collected_value - // THIS IS DIFFERENT FROM THE SAME MEMBER OF RRDSET - - calculated_number calculated_value; // the current calculated value, after applying the algorithm - resets to zero after being used - calculated_number last_calculated_value; // the last calculated value processed - - calculated_number last_stored_value; // the last value as stored in the database (after interpolation) - - collected_number collected_value; // the current value, as collected - resets to 0 after being used - collected_number last_collected_value; // the last value that was collected, after being processed - - // the *_volume members are used to calculate the accuracy of the rounding done by the - // storage number - they are printed to debug.log when debug is enabled for a set. - calculated_number collected_volume; // the sum of all collected values so far - calculated_number stored_volume; // the sum of all stored values so far - - struct rrddim *next; // linking of dimensions within the same data set - struct rrdset *rrdset; - - // ------------------------------------------------------------------------ - // members for checking the data when loading from disk - - long entries; // how many entries this dimension has in ram - // this is the same to the entries of the data set - // we set it here, to check the data when we load it from disk. - - int update_every; // every how many seconds is this updated - - size_t memsize; // the memory allocated for this dimension - - char magic[sizeof(RRDDIMENSION_MAGIC) + 1]; // a string to be saved, used to identify our data file - - struct rrddimvar *variables; - - // ------------------------------------------------------------------------ - // the values stored in this dimension, using our floating point numbers - - storage_number values[]; // the array of values - THIS HAS TO BE THE LAST MEMBER -}; -typedef struct rrddim RRDDIM; - -// ---------------------------------------------------------------------------- -// these loop macros make sure the linked list is accessed with the right lock - -#define rrddim_foreach_read(rd, st) \ - for((rd) = (st)->dimensions, rrdset_check_rdlock(st); (rd) ; (rd) = (rd)->next) - -#define rrddim_foreach_write(rd, st) \ - for((rd) = (st)->dimensions, rrdset_check_wrlock(st); (rd) ; (rd) = (rd)->next) - - -// ---------------------------------------------------------------------------- -// RRDSET - this is a chart - -// use this for configuration flags, not for state control -// flags are set/unset in a manner that is not thread safe -// and may lead to missing information. - -typedef enum rrdset_flags { - RRDSET_FLAG_ENABLED = 1 << 0, // enables or disables a chart - RRDSET_FLAG_DETAIL = 1 << 1, // if set, the data set should be considered as a detail of another - // (the master data set should be the one that has the same family and is not detail) - RRDSET_FLAG_DEBUG = 1 << 2, // enables or disables debugging for a chart - RRDSET_FLAG_OBSOLETE = 1 << 3, // this is marked by the collector/module as obsolete - RRDSET_FLAG_BACKEND_SEND = 1 << 4, // if set, this chart should be sent to backends - RRDSET_FLAG_BACKEND_IGNORE = 1 << 5, // if set, this chart should not be sent to backends - RRDSET_FLAG_EXPOSED_UPSTREAM = 1 << 6, // if set, we have sent this chart to netdata master (streaming) - RRDSET_FLAG_STORE_FIRST = 1 << 7, // if set, do not eliminate the first collection during interpolation - RRDSET_FLAG_HETEROGENEOUS = 1 << 8, // if set, the chart is not homogeneous (dimensions in it have multiple algorithms, multipliers or dividers) - RRDSET_FLAG_HOMEGENEOUS_CHECK= 1 << 9, // if set, the chart should be checked to determine if the dimensions as homogeneous - RRDSET_FLAG_HIDDEN = 1 << 10, // if set, do not show this chart on the dashboard, but use it for backends -} RRDSET_FLAGS; - -#ifdef HAVE_C___ATOMIC -#define rrdset_flag_check(st, flag) (__atomic_load_n(&((st)->flags), __ATOMIC_SEQ_CST) & (flag)) -#define rrdset_flag_set(st, flag) __atomic_or_fetch(&((st)->flags), flag, __ATOMIC_SEQ_CST) -#define rrdset_flag_clear(st, flag) __atomic_and_fetch(&((st)->flags), ~flag, __ATOMIC_SEQ_CST) -#else -#define rrdset_flag_check(st, flag) ((st)->flags & (flag)) -#define rrdset_flag_set(st, flag) (st)->flags |= (flag) -#define rrdset_flag_clear(st, flag) (st)->flags &= ~(flag) -#endif - -struct rrdset { - // ------------------------------------------------------------------------ - // binary indexing structures - - avl avl; // the index, with key the id - this has to be first! - avl avlname; // the index, with key the name - - // ------------------------------------------------------------------------ - // the set configuration - - char id[RRD_ID_LENGTH_MAX + 1]; // id of the data set - - const char *name; // the name of this dimension (as presented to user) - // this is a pointer to the config structure - // since the config always has a higher priority - // (the user overwrites the name of the charts) - - char *config_section; // the config section for the chart - - char *type; // the type of graph RRD_TYPE_* (a category, for determining graphing options) - char *family; // grouping sets under the same family - char *title; // title shown to user - char *units; // units of measurement - - char *context; // the template of this data set - uint32_t hash_context; // the hash of the chart's context - - RRDSET_TYPE chart_type; // line, area, stacked - - int update_every; // every how many seconds is this updated? - - long entries; // total number of entries in the data set - - long current_entry; // the entry that is currently being updated - // it goes around in a round-robin fashion - - uint32_t flags; // configuration flags - - int gap_when_lost_iterations_above; // after how many lost iterations a gap should be stored - // netdata will interpolate values for gaps lower than this - - long priority; // the sorting priority of this chart - - - // ------------------------------------------------------------------------ - // members for temporary data we need for calculations - - RRD_MEMORY_MODE rrd_memory_mode; // if set to 1, this is memory mapped - - char *cache_dir; // the directory to store dimensions - char cache_filename[FILENAME_MAX+1]; // the filename to store this set - - netdata_rwlock_t rrdset_rwlock; // protects dimensions linked list - - size_t counter; // the number of times we added values to this database - size_t counter_done; // the number of times rrdset_done() has been called - - time_t last_accessed_time; // the last time this RRDSET has been accessed - time_t upstream_resync_time; // the timestamp up to which we should resync clock upstream - - char *plugin_name; // the name of the plugin that generated this - char *module_name; // the name of the plugin module that generated this - - size_t unused[6]; - - uint32_t hash; // a simple hash on the id, to speed up searching - // we first compare hashes, and only if the hashes are equal we do string comparisons - - uint32_t hash_name; // a simple hash on the name - - usec_t usec_since_last_update; // the time in microseconds since the last collection of data - - struct timeval last_updated; // when this data set was last updated (updated every time the rrd_stats_done() function) - struct timeval last_collected_time; // when did this data set last collected values - - total_number collected_total; // used internally to calculate percentages - total_number last_collected_total; // used internally to calculate percentages - - RRDFAMILY *rrdfamily; // pointer to RRDFAMILY this chart belongs to - RRDHOST *rrdhost; // pointer to RRDHOST this chart belongs to - - struct rrdset *next; // linking of rrdsets - - // ------------------------------------------------------------------------ - // local variables - - calculated_number green; // green threshold for this chart - calculated_number red; // red threshold for this chart - - avl_tree_lock rrdvar_root_index; // RRDVAR index for this chart - RRDSETVAR *variables; // RRDSETVAR linked list for this chart (one RRDSETVAR, many RRDVARs) - RRDCALC *alarms; // RRDCALC linked list for this chart - - // ------------------------------------------------------------------------ - // members for checking the data when loading from disk - - unsigned long memsize; // how much mem we have allocated for this (without dimensions) - - char magic[sizeof(RRDSET_MAGIC) + 1]; // our magic - - // ------------------------------------------------------------------------ - // the dimensions - - avl_tree_lock dimensions_index; // the root of the dimensions index - RRDDIM *dimensions; // the actual data for every dimension - -}; -typedef struct rrdset RRDSET; - -#define rrdset_rdlock(st) netdata_rwlock_rdlock(&((st)->rrdset_rwlock)) -#define rrdset_wrlock(st) netdata_rwlock_wrlock(&((st)->rrdset_rwlock)) -#define rrdset_unlock(st) netdata_rwlock_unlock(&((st)->rrdset_rwlock)) - - -// ---------------------------------------------------------------------------- -// these loop macros make sure the linked list is accessed with the right lock - -#define rrdset_foreach_read(st, host) \ - for((st) = (host)->rrdset_root, rrdhost_check_rdlock(host); st ; (st) = (st)->next) - -#define rrdset_foreach_write(st, host) \ - for((st) = (host)->rrdset_root, rrdhost_check_wrlock(host); st ; (st) = (st)->next) - - -// ---------------------------------------------------------------------------- -// RRDHOST flags -// use this for configuration flags, not for state control -// flags are set/unset in a manner that is not thread safe -// and may lead to missing information. - -typedef enum rrdhost_flags { - RRDHOST_FLAG_ORPHAN = 1 << 0, // this host is orphan (not receiving data) - RRDHOST_FLAG_DELETE_OBSOLETE_CHARTS = 1 << 1, // delete files of obsolete charts - RRDHOST_FLAG_DELETE_ORPHAN_HOST = 1 << 2, // delete the entire host when orphan - RRDHOST_FLAG_BACKEND_SEND = 1 << 3, // send it to backends - RRDHOST_FLAG_BACKEND_DONT_SEND = 1 << 4, // don't send it to backends -} RRDHOST_FLAGS; - -#ifdef HAVE_C___ATOMIC -#define rrdhost_flag_check(host, flag) (__atomic_load_n(&((host)->flags), __ATOMIC_SEQ_CST) & (flag)) -#define rrdhost_flag_set(host, flag) __atomic_or_fetch(&((host)->flags), flag, __ATOMIC_SEQ_CST) -#define rrdhost_flag_clear(host, flag) __atomic_and_fetch(&((host)->flags), ~flag, __ATOMIC_SEQ_CST) -#else -#define rrdhost_flag_check(host, flag) ((host)->flags & (flag)) -#define rrdhost_flag_set(host, flag) (host)->flags |= (flag) -#define rrdhost_flag_clear(host, flag) (host)->flags &= ~(flag) -#endif - -#ifdef NETDATA_INTERNAL_CHECKS -#define rrdset_debug(st, fmt, args...) do { if(unlikely(debug_flags & D_RRD_STATS && rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) \ - debug_int(__FILE__, __FUNCTION__, __LINE__, "%s: " fmt, st->name, ##args); } while(0) -#else -#define rrdset_debug(st, fmt, args...) debug_dummy() -#endif - - -// ---------------------------------------------------------------------------- -// RRD HOST - -struct rrdhost { - avl avl; // the index of hosts - - // ------------------------------------------------------------------------ - // host information - - char *hostname; // the hostname of this host - uint32_t hash_hostname; // the hostname hash - - char *registry_hostname; // the registry hostname for this host - - char machine_guid[GUID_LEN + 1]; // the unique ID of this host - uint32_t hash_machine_guid; // the hash of the unique ID - - const char *os; // the O/S type of the host - const char *tags; // tags for this host - const char *timezone; // the timezone of the host - - uint32_t flags; // flags about this RRDHOST - - int rrd_update_every; // the update frequency of the host - long rrd_history_entries; // the number of history entries for the host's charts - RRD_MEMORY_MODE rrd_memory_mode; // the memory more for the charts of this host - - char *cache_dir; // the directory to save RRD cache files - char *varlib_dir; // the directory to save health log - - char *program_name; // the program name that collects metrics for this host - char *program_version; // the program version that collects metrics for this host - - // ------------------------------------------------------------------------ - // streaming of data to remote hosts - rrdpush - - int rrdpush_send_enabled:1; // 1 when this host sends metrics to another netdata - char *rrdpush_send_destination; // where to send metrics to - char *rrdpush_send_api_key; // the api key at the receiving netdata - - // the following are state information for the threading - // streaming metrics from this netdata to an upstream netdata - volatile int rrdpush_sender_spawn:1; // 1 when the sender thread has been spawn - netdata_thread_t rrdpush_sender_thread; // the sender thread - - volatile int rrdpush_sender_connected:1; // 1 when the sender is ready to push metrics - int rrdpush_sender_socket; // the fd of the socket to the remote host, or -1 - - volatile int rrdpush_sender_error_shown:1; // 1 when we have logged a communication error - volatile int rrdpush_sender_join:1; // 1 when we have to join the sending thread - - // metrics may be collected asynchronously - // these synchronize all the threads willing the write to our sending buffer - netdata_mutex_t rrdpush_sender_buffer_mutex; // exclusive access to rrdpush_sender_buffer - int rrdpush_sender_pipe[2]; // collector to sender thread signaling - BUFFER *rrdpush_sender_buffer; // collector fills it, sender sends it - - - // ------------------------------------------------------------------------ - // streaming of data from remote hosts - rrdpush - - volatile size_t connected_senders; // when remote hosts are streaming to this - // host, this is the counter of connected clients - - time_t senders_disconnected_time; // the time the last sender was disconnected - - // ------------------------------------------------------------------------ - // health monitoring options - - int health_enabled:1; // 1 when this host has health enabled - time_t health_delay_up_to; // a timestamp to delay alarms processing up to - char *health_default_exec; // the full path of the alarms notifications program - char *health_default_recipient; // the default recipient for all alarms - char *health_log_filename; // the alarms event log filename - size_t health_log_entries_written; // the number of alarm events writtern to the alarms event log - FILE *health_log_fp; // the FILE pointer to the open alarms event log file - - // all RRDCALCs are primarily allocated and linked here - // RRDCALCs may be linked to charts at any point - // (charts may or may not exist when these are loaded) - RRDCALC *alarms; - - ALARM_LOG health_log; // alarms historical events (event log) - uint32_t health_last_processed_id; // the last processed health id from the log - uint32_t health_max_unique_id; // the max alarm log unique id given for the host - uint32_t health_max_alarm_id; // the max alarm id given for the host - - // templates of alarms - // these are used to create alarms when charts - // are created or renamed, that match them - RRDCALCTEMPLATE *templates; - - - // ------------------------------------------------------------------------ - // the charts of the host - - RRDSET *rrdset_root; // the host charts - - - // ------------------------------------------------------------------------ - // locks - - netdata_rwlock_t rrdhost_rwlock; // lock for this RRDHOST (protects rrdset_root linked list) - - // ------------------------------------------------------------------------ - // indexes - - avl_tree_lock rrdset_root_index; // the host's charts index (by id) - avl_tree_lock rrdset_root_index_name; // the host's charts index (by name) - - avl_tree_lock rrdfamily_root_index; // the host's chart families index - avl_tree_lock rrdvar_root_index; // the host's chart variables index - - struct rrdhost *next; -}; -extern RRDHOST *localhost; - -#define rrdhost_rdlock(host) netdata_rwlock_rdlock(&((host)->rrdhost_rwlock)) -#define rrdhost_wrlock(host) netdata_rwlock_wrlock(&((host)->rrdhost_rwlock)) -#define rrdhost_unlock(host) netdata_rwlock_unlock(&((host)->rrdhost_rwlock)) - -// ---------------------------------------------------------------------------- -// these loop macros make sure the linked list is accessed with the right lock - -#define rrdhost_foreach_read(var) \ - for((var) = localhost, rrd_check_rdlock(); var ; (var) = (var)->next) - -#define rrdhost_foreach_write(var) \ - for((var) = localhost, rrd_check_wrlock(); var ; (var) = (var)->next) - - -// ---------------------------------------------------------------------------- -// global lock for all RRDHOSTs - -extern netdata_rwlock_t rrd_rwlock; - -#define rrd_rdlock() netdata_rwlock_rdlock(&rrd_rwlock) -#define rrd_wrlock() netdata_rwlock_wrlock(&rrd_rwlock) -#define rrd_unlock() netdata_rwlock_unlock(&rrd_rwlock) - -// ---------------------------------------------------------------------------- - -extern size_t rrd_hosts_available; -extern time_t rrdhost_free_orphan_time; - -extern void rrd_init(char *hostname); - -extern RRDHOST *rrdhost_find_by_hostname(const char *hostname, uint32_t hash); -extern RRDHOST *rrdhost_find_by_guid(const char *guid, uint32_t hash); - -extern RRDHOST *rrdhost_find_or_create( - const char *hostname - , const char *registry_hostname - , const char *guid - , const char *os - , const char *timezone - , const char *tags - , const char *program_name - , const char *program_version - , int update_every - , long history - , RRD_MEMORY_MODE mode - , int health_enabled - , int rrdpush_enabled - , char *rrdpush_destination - , char *rrdpush_api_key -); - -#if defined(NETDATA_INTERNAL_CHECKS) && defined(NETDATA_VERIFY_LOCKS) -extern void __rrdhost_check_wrlock(RRDHOST *host, const char *file, const char *function, const unsigned long line); -extern void __rrdhost_check_rdlock(RRDHOST *host, const char *file, const char *function, const unsigned long line); -extern void __rrdset_check_rdlock(RRDSET *st, const char *file, const char *function, const unsigned long line); -extern void __rrdset_check_wrlock(RRDSET *st, const char *file, const char *function, const unsigned long line); -extern void __rrd_check_rdlock(const char *file, const char *function, const unsigned long line); -extern void __rrd_check_wrlock(const char *file, const char *function, const unsigned long line); - -#define rrdhost_check_rdlock(host) __rrdhost_check_rdlock(host, __FILE__, __FUNCTION__, __LINE__) -#define rrdhost_check_wrlock(host) __rrdhost_check_wrlock(host, __FILE__, __FUNCTION__, __LINE__) -#define rrdset_check_rdlock(st) __rrdset_check_rdlock(st, __FILE__, __FUNCTION__, __LINE__) -#define rrdset_check_wrlock(st) __rrdset_check_wrlock(st, __FILE__, __FUNCTION__, __LINE__) -#define rrd_check_rdlock() __rrd_check_rdlock(__FILE__, __FUNCTION__, __LINE__) -#define rrd_check_wrlock() __rrd_check_wrlock(__FILE__, __FUNCTION__, __LINE__) - -#else -#define rrdhost_check_rdlock(host) (void)0 -#define rrdhost_check_wrlock(host) (void)0 -#define rrdset_check_rdlock(st) (void)0 -#define rrdset_check_wrlock(st) (void)0 -#define rrd_check_rdlock() (void)0 -#define rrd_check_wrlock() (void)0 -#endif - -// ---------------------------------------------------------------------------- -// RRDSET functions - -extern int rrdset_set_name(RRDSET *st, const char *name); - -extern RRDSET *rrdset_create_custom(RRDHOST *host - , const char *type - , const char *id - , const char *name - , const char *family - , const char *context - , const char *title - , const char *units - , const char *plugin - , const char *module - , long priority - , int update_every - , RRDSET_TYPE chart_type - , RRD_MEMORY_MODE memory_mode - , long history_entries); - -#define rrdset_create(host, type, id, name, family, context, title, units, plugin, module, priority, update_every, chart_type) \ - rrdset_create_custom(host, type, id, name, family, context, title, units, plugin, module, priority, update_every, chart_type, (host)->rrd_memory_mode, (host)->rrd_history_entries) - -#define rrdset_create_localhost(type, id, name, family, context, title, units, plugin, module, priority, update_every, chart_type) \ - rrdset_create(localhost, type, id, name, family, context, title, units, plugin, module, priority, update_every, chart_type) - -extern void rrdhost_free_all(void); -extern void rrdhost_save_all(void); -extern void rrdhost_cleanup_all(void); - -extern void rrdhost_cleanup_orphan_hosts_nolock(RRDHOST *protected); -extern void rrdhost_free(RRDHOST *host); -extern void rrdhost_save_charts(RRDHOST *host); -extern void rrdhost_delete_charts(RRDHOST *host); - -extern int rrdhost_should_be_removed(RRDHOST *host, RRDHOST *protected, time_t now); - -extern void rrdset_update_heterogeneous_flag(RRDSET *st); - -extern RRDSET *rrdset_find(RRDHOST *host, const char *id); -#define rrdset_find_localhost(id) rrdset_find(localhost, id) - -extern RRDSET *rrdset_find_bytype(RRDHOST *host, const char *type, const char *id); -#define rrdset_find_bytype_localhost(type, id) rrdset_find_bytype(localhost, type, id) - -extern RRDSET *rrdset_find_byname(RRDHOST *host, const char *name); -#define rrdset_find_byname_localhost(name) rrdset_find_byname(localhost, name) - -extern void rrdset_next_usec_unfiltered(RRDSET *st, usec_t microseconds); -extern void rrdset_next_usec(RRDSET *st, usec_t microseconds); -#define rrdset_next(st) rrdset_next_usec(st, 0ULL) - -extern void rrdset_done(RRDSET *st); - -extern void rrdset_is_obsolete(RRDSET *st); -extern void rrdset_isnot_obsolete(RRDSET *st); - -// checks if the RRDSET should be offered to viewers -#define rrdset_is_available_for_viewers(st) (rrdset_flag_check(st, RRDSET_FLAG_ENABLED) && !rrdset_flag_check(st, RRDSET_FLAG_HIDDEN) && !rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE) && (st)->dimensions && (st)->rrd_memory_mode != RRD_MEMORY_MODE_NONE) -#define rrdset_is_available_for_backends(st) (rrdset_flag_check(st, RRDSET_FLAG_ENABLED) && !rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE) && (st)->dimensions) - -// get the total duration in seconds of the round robin database -#define rrdset_duration(st) ((time_t)( (((st)->counter >= ((unsigned long)(st)->entries))?(unsigned long)(st)->entries:(st)->counter) * (st)->update_every )) - -// get the timestamp of the last entry in the round robin database -#define rrdset_last_entry_t(st) ((time_t)(((st)->last_updated.tv_sec))) - -// get the timestamp of first entry in the round robin database -#define rrdset_first_entry_t(st) ((time_t)(rrdset_last_entry_t(st) - rrdset_duration(st))) - -// get the last slot updated in the round robin database -#define rrdset_last_slot(st) ((unsigned long)(((st)->current_entry == 0) ? (st)->entries - 1 : (st)->current_entry - 1)) - -// get the first / oldest slot updated in the round robin database -#define rrdset_first_slot(st) ((unsigned long)( (((st)->counter >= ((unsigned long)(st)->entries)) ? (unsigned long)( ((unsigned long)(st)->current_entry > 0) ? ((unsigned long)(st)->current_entry) : ((unsigned long)(st)->entries) ) - 1 : 0) )) - -// get the slot of the round robin database, for the given timestamp (t) -// it always returns a valid slot, although may not be for the time requested if the time is outside the round robin database -#define rrdset_time2slot(st, t) ( \ - ( (time_t)(t) >= rrdset_last_entry_t(st)) ? ( rrdset_last_slot(st) ) : \ - ( ((time_t)(t) <= rrdset_first_entry_t(st)) ? rrdset_first_slot(st) : \ - ( (rrdset_last_slot(st) >= (unsigned long)((rrdset_last_entry_t(st) - (time_t)(t)) / (unsigned long)((st)->update_every)) ) ? \ - (rrdset_last_slot(st) - (unsigned long)((rrdset_last_entry_t(st) - (time_t)(t)) / (unsigned long)((st)->update_every)) ) : \ - (rrdset_last_slot(st) - (unsigned long)((rrdset_last_entry_t(st) - (time_t)(t)) / (unsigned long)((st)->update_every)) + (unsigned long)(st)->entries ) \ - ))) - -// get the timestamp of a specific slot in the round robin database -#define rrdset_slot2time(st, slot) ( rrdset_last_entry_t(st) - \ - ((unsigned long)(st)->update_every * ( \ - ( (unsigned long)(slot) > rrdset_last_slot(st)) ? \ - ( (rrdset_last_slot(st) - (unsigned long)(slot) + (unsigned long)(st)->entries) ) : \ - ( (rrdset_last_slot(st) - (unsigned long)(slot)) )) \ - )) - -// ---------------------------------------------------------------------------- -// RRD DIMENSION functions - -extern RRDDIM *rrddim_add_custom(RRDSET *st, const char *id, const char *name, collected_number multiplier, collected_number divisor, RRD_ALGORITHM algorithm, RRD_MEMORY_MODE memory_mode); -#define rrddim_add(st, id, name, multiplier, divisor, algorithm) rrddim_add_custom(st, id, name, multiplier, divisor, algorithm, (st)->rrd_memory_mode) - -extern int rrddim_set_name(RRDSET *st, RRDDIM *rd, const char *name); -extern int rrddim_set_algorithm(RRDSET *st, RRDDIM *rd, RRD_ALGORITHM algorithm); -extern int rrddim_set_multiplier(RRDSET *st, RRDDIM *rd, collected_number multiplier); -extern int rrddim_set_divisor(RRDSET *st, RRDDIM *rd, collected_number divisor); - -extern RRDDIM *rrddim_find(RRDSET *st, const char *id); - -extern int rrddim_hide(RRDSET *st, const char *id); -extern int rrddim_unhide(RRDSET *st, const char *id); - -extern collected_number rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, collected_number value); -extern collected_number rrddim_set(RRDSET *st, const char *id, collected_number value); - -extern long align_entries_to_pagesize(RRD_MEMORY_MODE mode, long entries); - -// ---------------------------------------------------------------------------- -// RRD internal functions - -#ifdef NETDATA_RRD_INTERNALS - -extern avl_tree_lock rrdhost_root_index; - -extern char *rrdset_strncpyz_name(char *to, const char *from, size_t length); -extern char *rrdset_cache_dir(RRDHOST *host, const char *id, const char *config_section); - -extern void rrddim_free(RRDSET *st, RRDDIM *rd); - -extern int rrddim_compare(void* a, void* b); -extern int rrdset_compare(void* a, void* b); -extern int rrdset_compare_name(void* a, void* b); -extern int rrdfamily_compare(void *a, void *b); - -extern RRDFAMILY *rrdfamily_create(RRDHOST *host, const char *id); -extern void rrdfamily_free(RRDHOST *host, RRDFAMILY *rc); - -#define rrdset_index_add(host, st) (RRDSET *)avl_insert_lock(&((host)->rrdset_root_index), (avl *)(st)) -#define rrdset_index_del(host, st) (RRDSET *)avl_remove_lock(&((host)->rrdset_root_index), (avl *)(st)) -extern RRDSET *rrdset_index_del_name(RRDHOST *host, RRDSET *st); - -extern void rrdset_free(RRDSET *st); -extern void rrdset_reset(RRDSET *st); -extern void rrdset_save(RRDSET *st); -extern void rrdset_delete(RRDSET *st); - -extern void rrdhost_cleanup_obsolete_charts(RRDHOST *host); - -#endif /* NETDATA_RRD_INTERNALS */ - - -#endif /* NETDATA_RRD_H */ diff --git a/src/rrd2json.c b/src/rrd2json.c deleted file mode 100644 index 24b3da340..000000000 --- a/src/rrd2json.c +++ /dev/null @@ -1,2059 +0,0 @@ -#include "common.h" - -void rrd_stats_api_v1_chart_with_data(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memory_used) -{ - rrdset_rdlock(st); - - buffer_sprintf(wb, - "\t\t{\n" - "\t\t\t\"id\": \"%s\",\n" - "\t\t\t\"name\": \"%s\",\n" - "\t\t\t\"type\": \"%s\",\n" - "\t\t\t\"family\": \"%s\",\n" - "\t\t\t\"context\": \"%s\",\n" - "\t\t\t\"title\": \"%s (%s)\",\n" - "\t\t\t\"priority\": %ld,\n" - "\t\t\t\"plugin\": \"%s\",\n" - "\t\t\t\"module\": \"%s\",\n" - "\t\t\t\"enabled\": %s,\n" - "\t\t\t\"units\": \"%s\",\n" - "\t\t\t\"data_url\": \"/api/v1/data?chart=%s\",\n" - "\t\t\t\"chart_type\": \"%s\",\n" - "\t\t\t\"duration\": %ld,\n" - "\t\t\t\"first_entry\": %ld,\n" - "\t\t\t\"last_entry\": %ld,\n" - "\t\t\t\"update_every\": %d,\n" - "\t\t\t\"dimensions\": {\n" - , st->id - , st->name - , st->type - , st->family - , st->context - , st->title, st->name - , st->priority - , st->plugin_name?st->plugin_name:"" - , st->module_name?st->module_name:"" - , rrdset_flag_check(st, RRDSET_FLAG_ENABLED)?"true":"false" - , st->units - , st->name - , rrdset_type_name(st->chart_type) - , st->entries * st->update_every - , rrdset_first_entry_t(st) - , rrdset_last_entry_t(st) - , st->update_every - ); - - unsigned long memory = st->memsize; - - size_t dimensions = 0; - RRDDIM *rd; - rrddim_foreach_read(rd, st) { - if(rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN)) continue; - - memory += rd->memsize; - - buffer_sprintf( - wb - , "%s" - "\t\t\t\t\"%s\": { \"name\": \"%s\" }" - , dimensions ? ",\n" : "" - , rd->id - , rd->name - ); - - dimensions++; - } - - if(dimensions_count) *dimensions_count += dimensions; - if(memory_used) *memory_used += memory; - - buffer_strcat(wb, "\n\t\t\t},\n\t\t\t\"green\": "); - buffer_rrd_value(wb, st->green); - buffer_strcat(wb, ",\n\t\t\t\"red\": "); - buffer_rrd_value(wb, st->red); - - buffer_strcat(wb, ",\n\t\t\t\"alarms\": {\n"); - size_t alarms = 0; - RRDCALC *rc; - for(rc = st->alarms; rc ; rc = rc->rrdset_next) { - - buffer_sprintf( - wb - , "%s" - "\t\t\t\t\"%s\": {\n" - "\t\t\t\t\t\"id\": %u,\n" - "\t\t\t\t\t\"status\": \"%s\",\n" - "\t\t\t\t\t\"units\": \"%s\",\n" - "\t\t\t\t\t\"update_every\": %d\n" - "\t\t\t\t}" - , (alarms) ? ",\n" : "" - , rc->name - , rc->id - , rrdcalc_status2string(rc->status) - , rc->units - , rc->update_every - ); - - alarms++; - } - - buffer_sprintf(wb, - "\n\t\t\t}\n\t\t}" - ); - - rrdset_unlock(st); -} - -void rrd_stats_api_v1_chart(RRDSET *st, BUFFER *wb) { - rrd_stats_api_v1_chart_with_data(st, wb, NULL, NULL); -} - -void rrd_stats_api_v1_charts(RRDHOST *host, BUFFER *wb) { - static char *custom_dashboard_info_js_filename = NULL; - size_t c, dimensions = 0, memory = 0, alarms = 0; - RRDSET *st; - - time_t now = now_realtime_sec(); - - if(unlikely(!custom_dashboard_info_js_filename)) - custom_dashboard_info_js_filename = config_get(CONFIG_SECTION_WEB, "custom dashboard_info.js", ""); - - buffer_sprintf(wb, "{\n" - "\t\"hostname\": \"%s\"" - ",\n\t\"version\": \"%s\"" - ",\n\t\"os\": \"%s\"" - ",\n\t\"timezone\": \"%s\"" - ",\n\t\"update_every\": %d" - ",\n\t\"history\": %ld" - ",\n\t\"custom_info\": \"%s\"" - ",\n\t\"charts\": {" - , host->hostname - , host->program_version - , host->os - , host->timezone - , host->rrd_update_every - , host->rrd_history_entries - , custom_dashboard_info_js_filename - ); - - c = 0; - rrdhost_rdlock(host); - rrdset_foreach_read(st, host) { - if(rrdset_is_available_for_viewers(st)) { - if(c) buffer_strcat(wb, ","); - buffer_strcat(wb, "\n\t\t\""); - buffer_strcat(wb, st->id); - buffer_strcat(wb, "\": "); - rrd_stats_api_v1_chart_with_data(st, wb, &dimensions, &memory); - - c++; - st->last_accessed_time = now; - } - } - - RRDCALC *rc; - for(rc = host->alarms; rc ; rc = rc->next) { - if(rc->rrdset) - alarms++; - } - rrdhost_unlock(host); - - buffer_sprintf(wb - , "\n\t}" - ",\n\t\"charts_count\": %zu" - ",\n\t\"dimensions_count\": %zu" - ",\n\t\"alarms_count\": %zu" - ",\n\t\"rrd_memory_bytes\": %zu" - ",\n\t\"hosts_count\": %zu" - ",\n\t\"hosts\": [" - , c - , dimensions - , alarms - , memory - , rrd_hosts_available - ); - - if(unlikely(rrd_hosts_available > 1)) { - rrd_rdlock(); - - size_t found = 0; - RRDHOST *h; - rrdhost_foreach_read(h) { - if(!rrdhost_should_be_removed(h, host, now)) { - buffer_sprintf(wb - , "%s\n\t\t{" - "\n\t\t\t\"hostname\": \"%s\"" - "\n\t\t}" - , (found > 0) ? "," : "" - , h->hostname - ); - - found++; - } - } - - rrd_unlock(); - } - else { - buffer_sprintf(wb - , "\n\t\t{" - "\n\t\t\t\"hostname\": \"%s\"" - "\n\t\t}" - , host->hostname - ); - } - - buffer_sprintf(wb, "\n\t]\n}\n"); -} - -// ---------------------------------------------------------------------------- -// BASH -// /api/v1/allmetrics?format=bash - -static inline size_t shell_name_copy(char *d, const char *s, size_t usable) { - size_t n; - - for(n = 0; *s && n < usable ; d++, s++, n++) { - register char c = *s; - - if(unlikely(!isalnum(c))) *d = '_'; - else *d = (char)toupper(c); - } - *d = '\0'; - - return n; -} - -#define SHELL_ELEMENT_MAX 100 - -void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, BUFFER *wb) { - rrdhost_rdlock(host); - - // for each chart - RRDSET *st; - rrdset_foreach_read(st, host) { - calculated_number total = 0.0; - char chart[SHELL_ELEMENT_MAX + 1]; - shell_name_copy(chart, st->name?st->name:st->id, SHELL_ELEMENT_MAX); - - buffer_sprintf(wb, "\n# chart: %s (name: %s)\n", st->id, st->name); - if(rrdset_is_available_for_viewers(st)) { - rrdset_rdlock(st); - - // for each dimension - RRDDIM *rd; - rrddim_foreach_read(rd, st) { - if(rd->collections_counter) { - char dimension[SHELL_ELEMENT_MAX + 1]; - shell_name_copy(dimension, rd->name?rd->name:rd->id, SHELL_ELEMENT_MAX); - - calculated_number n = rd->last_stored_value; - - if(isnan(n) || isinf(n)) - buffer_sprintf(wb, "NETDATA_%s_%s=\"\" # %s\n", chart, dimension, st->units); - else { - if(rd->multiplier < 0 || rd->divisor < 0) n = -n; - n = calculated_number_round(n); - if(!rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN)) total += n; - buffer_sprintf(wb, "NETDATA_%s_%s=\"" CALCULATED_NUMBER_FORMAT_ZERO "\" # %s\n", chart, dimension, n, st->units); - } - } - } - - total = calculated_number_round(total); - buffer_sprintf(wb, "NETDATA_%s_VISIBLETOTAL=\"" CALCULATED_NUMBER_FORMAT_ZERO "\" # %s\n", chart, total, st->units); - rrdset_unlock(st); - } - } - - buffer_strcat(wb, "\n# NETDATA ALARMS RUNNING\n"); - - RRDCALC *rc; - for(rc = host->alarms; rc ;rc = rc->next) { - if(!rc->rrdset) continue; - - char chart[SHELL_ELEMENT_MAX + 1]; - shell_name_copy(chart, rc->rrdset->name?rc->rrdset->name:rc->rrdset->id, SHELL_ELEMENT_MAX); - - char alarm[SHELL_ELEMENT_MAX + 1]; - shell_name_copy(alarm, rc->name, SHELL_ELEMENT_MAX); - - calculated_number n = rc->value; - - if(isnan(n) || isinf(n)) - buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"\" # %s\n", chart, alarm, rc->units); - else { - n = calculated_number_round(n); - buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"" CALCULATED_NUMBER_FORMAT_ZERO "\" # %s\n", chart, alarm, n, rc->units); - } - - buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_STATUS=\"%s\"\n", chart, alarm, rrdcalc_status2string(rc->status)); - } - - rrdhost_unlock(host); -} - -// ---------------------------------------------------------------------------- - -void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, BUFFER *wb) { - rrdhost_rdlock(host); - - buffer_strcat(wb, "{"); - - size_t chart_counter = 0; - size_t dimension_counter = 0; - - // for each chart - RRDSET *st; - rrdset_foreach_read(st, host) { - if(rrdset_is_available_for_viewers(st)) { - rrdset_rdlock(st); - - buffer_sprintf(wb, "%s\n" - "\t\"%s\": {\n" - "\t\t\"name\":\"%s\",\n" - "\t\t\"context\":\"%s\",\n" - "\t\t\"units\":\"%s\",\n" - "\t\t\"last_updated\": %ld,\n" - "\t\t\"dimensions\": {" - , chart_counter?",":"" - , st->id - , st->name - , st->context - , st->units - , rrdset_last_entry_t(st) - ); - - chart_counter++; - dimension_counter = 0; - - // for each dimension - RRDDIM *rd; - rrddim_foreach_read(rd, st) { - if(rd->collections_counter) { - - buffer_sprintf(wb, "%s\n" - "\t\t\t\"%s\": {\n" - "\t\t\t\t\"name\": \"%s\",\n" - "\t\t\t\t\"value\": " - , dimension_counter?",":"" - , rd->id - , rd->name - ); - - if(isnan(rd->last_stored_value)) - buffer_strcat(wb, "null"); - else - buffer_sprintf(wb, CALCULATED_NUMBER_FORMAT, rd->last_stored_value); - - buffer_strcat(wb, "\n\t\t\t}"); - - dimension_counter++; - } - } - - buffer_strcat(wb, "\n\t\t}\n\t}"); - rrdset_unlock(st); - } - } - - buffer_strcat(wb, "\n}"); - rrdhost_unlock(host); -} - -// ---------------------------------------------------------------------------- - -// RRDR dimension options -#define RRDR_EMPTY 0x01 // the dimension contains / the value is empty (null) -#define RRDR_RESET 0x02 // the dimension contains / the value is reset -#define RRDR_HIDDEN 0x04 // the dimension contains / the value is hidden -#define RRDR_NONZERO 0x08 // the dimension contains / the value is non-zero -#define RRDR_SELECTED 0x10 // the dimension is selected - -// RRDR result options -#define RRDR_RESULT_OPTION_ABSOLUTE 0x00000001 -#define RRDR_RESULT_OPTION_RELATIVE 0x00000002 - -typedef struct rrdresult { - RRDSET *st; // the chart this result refers to - - uint32_t result_options; // RRDR_RESULT_OPTION_* - - int d; // the number of dimensions - long n; // the number of values in the arrays - long rows; // the number of rows used - - uint8_t *od; // the options for the dimensions - - time_t *t; // array of n timestamps - calculated_number *v; // array n x d values - uint8_t *o; // array n x d options - - long c; // current line ( -1 ~ n ), ( -1 = none, use rrdr_rows() to get number of rows ) - - long group; // how many collected values were grouped for each row - int update_every; // what is the suggested update frequency in seconds - - calculated_number min; - calculated_number max; - - time_t before; - time_t after; - - int has_st_lock; // if st is read locked by us -} RRDR; - -#define rrdr_rows(r) ((r)->rows) - -/* -static void rrdr_dump(RRDR *r) -{ - long c, i; - RRDDIM *d; - - fprintf(stderr, "\nCHART %s (%s)\n", r->st->id, r->st->name); - - for(c = 0, d = r->st->dimensions; d ;c++, d = d->next) { - fprintf(stderr, "DIMENSION %s (%s), %s%s%s%s\n" - , d->id - , d->name - , (r->od[c] & RRDR_EMPTY)?"EMPTY ":"" - , (r->od[c] & RRDR_RESET)?"RESET ":"" - , (r->od[c] & RRDR_HIDDEN)?"HIDDEN ":"" - , (r->od[c] & RRDR_NONZERO)?"NONZERO ":"" - ); - } - - if(r->rows <= 0) { - fprintf(stderr, "RRDR does not have any values in it.\n"); - return; - } - - fprintf(stderr, "RRDR includes %d values in it:\n", r->rows); - - // for each line in the array - for(i = 0; i < r->rows ;i++) { - calculated_number *cn = &r->v[ i * r->d ]; - uint8_t *co = &r->o[ i * r->d ]; - - // print the id and the timestamp of the line - fprintf(stderr, "%ld %ld ", i + 1, r->t[i]); - - // for each dimension - for(c = 0, d = r->st->dimensions; d ;c++, d = d->next) { - if(unlikely(r->od[c] & RRDR_HIDDEN)) continue; - if(unlikely(!(r->od[c] & RRDR_NONZERO))) continue; - - if(co[c] & RRDR_EMPTY) - fprintf(stderr, "null "); - else - fprintf(stderr, CALCULATED_NUMBER_FORMAT " %s%s%s%s " - , cn[c] - , (co[c] & RRDR_EMPTY)?"E":" " - , (co[c] & RRDR_RESET)?"R":" " - , (co[c] & RRDR_HIDDEN)?"H":" " - , (co[c] & RRDR_NONZERO)?"N":" " - ); - } - - fprintf(stderr, "\n"); - } -} -*/ - -void rrdr_disable_not_selected_dimensions(RRDR *r, uint32_t options, const char *dims) { - rrdset_check_rdlock(r->st); - - if(unlikely(!dims || !*dims || (dims[0] == '*' && dims[1] == '\0'))) return; - - int match_ids = 0, match_names = 0; - - if(unlikely(options & RRDR_OPTION_MATCH_IDS)) - match_ids = 1; - if(unlikely(options & RRDR_OPTION_MATCH_NAMES)) - match_names = 1; - - if(likely(!match_ids && !match_names)) - match_ids = match_names = 1; - - SIMPLE_PATTERN *pattern = simple_pattern_create(dims, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT); - - RRDDIM *d; - long c, dims_selected = 0, dims_not_hidden_not_zero = 0; - for(c = 0, d = r->st->dimensions; d ;c++, d = d->next) { - if( (match_ids && simple_pattern_matches(pattern, d->id)) - || (match_names && simple_pattern_matches(pattern, d->name)) - ) { - r->od[c] |= RRDR_SELECTED; - if(unlikely(r->od[c] & RRDR_HIDDEN)) r->od[c] &= ~RRDR_HIDDEN; - dims_selected++; - - // since the user needs this dimension - // make it appear as NONZERO, to return it - // even if the dimension has only zeros - // unless option non_zero is set - if(unlikely(!(options & RRDR_OPTION_NONZERO))) - r->od[c] |= RRDR_NONZERO; - - // count the visible dimensions - if(likely(r->od[c] & RRDR_NONZERO)) - dims_not_hidden_not_zero++; - } - else { - r->od[c] |= RRDR_HIDDEN; - if(unlikely(r->od[c] & RRDR_SELECTED)) r->od[c] &= ~RRDR_SELECTED; - } - } - simple_pattern_free(pattern); - - // check if all dimensions are hidden - if(unlikely(!dims_not_hidden_not_zero && dims_selected)) { - // there are a few selected dimensions - // but they are all zero - // enable the selected ones - // to avoid returning an empty chart - for(c = 0, d = r->st->dimensions; d ;c++, d = d->next) - if(unlikely(r->od[c] & RRDR_SELECTED)) - r->od[c] |= RRDR_NONZERO; - } -} - -void rrdr_buffer_print_format(BUFFER *wb, uint32_t format) -{ - switch(format) { - case DATASOURCE_JSON: - buffer_strcat(wb, DATASOURCE_FORMAT_JSON); - break; - - case DATASOURCE_DATATABLE_JSON: - buffer_strcat(wb, DATASOURCE_FORMAT_DATATABLE_JSON); - break; - - case DATASOURCE_DATATABLE_JSONP: - buffer_strcat(wb, DATASOURCE_FORMAT_DATATABLE_JSONP); - break; - - case DATASOURCE_JSONP: - buffer_strcat(wb, DATASOURCE_FORMAT_JSONP); - break; - - case DATASOURCE_SSV: - buffer_strcat(wb, DATASOURCE_FORMAT_SSV); - break; - - case DATASOURCE_CSV: - buffer_strcat(wb, DATASOURCE_FORMAT_CSV); - break; - - case DATASOURCE_TSV: - buffer_strcat(wb, DATASOURCE_FORMAT_TSV); - break; - - case DATASOURCE_HTML: - buffer_strcat(wb, DATASOURCE_FORMAT_HTML); - break; - - case DATASOURCE_JS_ARRAY: - buffer_strcat(wb, DATASOURCE_FORMAT_JS_ARRAY); - break; - - case DATASOURCE_SSV_COMMA: - buffer_strcat(wb, DATASOURCE_FORMAT_SSV_COMMA); - break; - - default: - buffer_strcat(wb, "unknown"); - break; - } -} - -uint32_t rrdr_check_options(RRDR *r, uint32_t options, const char *dims) -{ - rrdset_check_rdlock(r->st); - - (void)dims; - - if(options & RRDR_OPTION_NONZERO) { - long i; - - // commented due to #1514 - - //if(dims && *dims) { - // the caller wants specific dimensions - // disable NONZERO option - // to make sure we don't accidentally prevent - // the specific dimensions from being returned - // i = 0; - //} - //else { - // find how many dimensions are not zero - long c; - RRDDIM *rd; - for(c = 0, i = 0, rd = r->st->dimensions; rd && c < r->d ; c++, rd = rd->next) { - if(unlikely(r->od[c] & RRDR_HIDDEN)) continue; - if(unlikely(!(r->od[c] & RRDR_NONZERO))) continue; - i++; - } - //} - - // if with nonzero we get i = 0 (no dimensions will be returned) - // disable nonzero to show all dimensions - if(!i) options &= ~RRDR_OPTION_NONZERO; - } - - return options; -} - -void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value) -{ - rrdset_check_rdlock(r->st); - - long rows = rrdr_rows(r); - long c, i; - RRDDIM *rd; - - //info("JSONWRAPPER(): %s: BEGIN", r->st->id); - char kq[2] = "", // key quote - sq[2] = ""; // string quote - - if( options & RRDR_OPTION_GOOGLE_JSON ) { - kq[0] = '\0'; - sq[0] = '\''; - } - else { - kq[0] = '"'; - sq[0] = '"'; - } - - buffer_sprintf(wb, "{\n" - " %sapi%s: 1,\n" - " %sid%s: %s%s%s,\n" - " %sname%s: %s%s%s,\n" - " %sview_update_every%s: %d,\n" - " %supdate_every%s: %d,\n" - " %sfirst_entry%s: %u,\n" - " %slast_entry%s: %u,\n" - " %sbefore%s: %u,\n" - " %safter%s: %u,\n" - " %sdimension_names%s: [" - , kq, kq - , kq, kq, sq, r->st->id, sq - , kq, kq, sq, r->st->name, sq - , kq, kq, r->update_every - , kq, kq, r->st->update_every - , kq, kq, (uint32_t)rrdset_first_entry_t(r->st) - , kq, kq, (uint32_t)rrdset_last_entry_t(r->st) - , kq, kq, (uint32_t)r->before - , kq, kq, (uint32_t)r->after - , kq, kq); - - for(c = 0, i = 0, rd = r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) { - if(unlikely(r->od[c] & RRDR_HIDDEN)) continue; - if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_NONZERO))) continue; - - if(i) buffer_strcat(wb, ", "); - buffer_strcat(wb, sq); - buffer_strcat(wb, rd->name); - buffer_strcat(wb, sq); - i++; - } - if(!i) { -#ifdef NETDATA_INTERNAL_CHECKS - error("RRDR is empty for %s (RRDR has %d dimensions, options is 0x%08x)", r->st->id, r->d, options); -#endif - rows = 0; - buffer_strcat(wb, sq); - buffer_strcat(wb, "no data"); - buffer_strcat(wb, sq); - } - - buffer_sprintf(wb, "],\n" - " %sdimension_ids%s: [" - , kq, kq); - - for(c = 0, i = 0, rd = r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) { - if(unlikely(r->od[c] & RRDR_HIDDEN)) continue; - if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_NONZERO))) continue; - - if(i) buffer_strcat(wb, ", "); - buffer_strcat(wb, sq); - buffer_strcat(wb, rd->id); - buffer_strcat(wb, sq); - i++; - } - if(!i) { - rows = 0; - buffer_strcat(wb, sq); - buffer_strcat(wb, "no data"); - buffer_strcat(wb, sq); - } - - buffer_sprintf(wb, "],\n" - " %slatest_values%s: [" - , kq, kq); - - for(c = 0, i = 0, rd = r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) { - if(unlikely(r->od[c] & RRDR_HIDDEN)) continue; - if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_NONZERO))) continue; - - if(i) buffer_strcat(wb, ", "); - i++; - - storage_number n = rd->values[rrdset_last_slot(r->st)]; - - if(!does_storage_number_exist(n)) - buffer_strcat(wb, "null"); - else - buffer_rrd_value(wb, unpack_storage_number(n)); - } - if(!i) { - rows = 0; - buffer_strcat(wb, "null"); - } - - buffer_sprintf(wb, "],\n" - " %sview_latest_values%s: [" - , kq, kq); - - i = 0; - if(rows) { - calculated_number total = 1; - - if(unlikely(options & RRDR_OPTION_PERCENTAGE)) { - total = 0; - for(c = 0, rd = r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) { - calculated_number *cn = &r->v[ (0) * r->d ]; - calculated_number n = cn[c]; - - if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) - n = -n; - - total += n; - } - // prevent a division by zero - if(total == 0) total = 1; - } - - for(c = 0, i = 0, rd = r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) { - if(unlikely(r->od[c] & RRDR_HIDDEN)) continue; - if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_NONZERO))) continue; - - if(i) buffer_strcat(wb, ", "); - i++; - - calculated_number *cn = &r->v[ (0) * r->d ]; - uint8_t *co = &r->o[ (0) * r->d ]; - calculated_number n = cn[c]; - - if(co[c] & RRDR_EMPTY) { - if(options & RRDR_OPTION_NULL2ZERO) - buffer_strcat(wb, "0"); - else - buffer_strcat(wb, "null"); - } - else { - if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) - n = -n; - - if(unlikely(options & RRDR_OPTION_PERCENTAGE)) - n = n * 100 / total; - - buffer_rrd_value(wb, n); - } - } - } - if(!i) { - rows = 0; - buffer_strcat(wb, "null"); - } - - buffer_sprintf(wb, "],\n" - " %sdimensions%s: %ld,\n" - " %spoints%s: %ld,\n" - " %sformat%s: %s" - , kq, kq, i - , kq, kq, rows - , kq, kq, sq - ); - - rrdr_buffer_print_format(wb, format); - - buffer_sprintf(wb, "%s,\n" - " %sresult%s: " - , sq - , kq, kq - ); - - if(string_value) buffer_strcat(wb, sq); - //info("JSONWRAPPER(): %s: END", r->st->id); -} - -void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value) -{ - (void)format; - - char kq[2] = "", // key quote - sq[2] = ""; // string quote - - if( options & RRDR_OPTION_GOOGLE_JSON ) { - kq[0] = '\0'; - sq[0] = '\''; - } - else { - kq[0] = '"'; - sq[0] = '"'; - } - - if(string_value) buffer_strcat(wb, sq); - - buffer_sprintf(wb, ",\n %smin%s: ", kq, kq); - buffer_rrd_value(wb, r->min); - buffer_sprintf(wb, ",\n %smax%s: ", kq, kq); - buffer_rrd_value(wb, r->max); - buffer_strcat(wb, "\n}\n"); -} - -#define JSON_DATES_JS 1 -#define JSON_DATES_TIMESTAMP 2 - -static void rrdr2json(RRDR *r, BUFFER *wb, uint32_t options, int datatable) -{ - rrdset_check_rdlock(r->st); - - //info("RRD2JSON(): %s: BEGIN", r->st->id); - int row_annotations = 0, dates, dates_with_new = 0; - char kq[2] = "", // key quote - sq[2] = "", // string quote - pre_label[101] = "", // before each label - post_label[101] = "", // after each label - pre_date[101] = "", // the beginning of line, to the date - post_date[101] = "", // closing the date - pre_value[101] = "", // before each value - post_value[101] = "", // after each value - post_line[101] = "", // at the end of each row - normal_annotation[201] = "", // default row annotation - overflow_annotation[201] = "", // overflow row annotation - data_begin[101] = "", // between labels and values - finish[101] = ""; // at the end of everything - - if(datatable) { - dates = JSON_DATES_JS; - if( options & RRDR_OPTION_GOOGLE_JSON ) { - kq[0] = '\0'; - sq[0] = '\''; - } - else { - kq[0] = '"'; - sq[0] = '"'; - } - row_annotations = 1; - snprintfz(pre_date, 100, " {%sc%s:[{%sv%s:%s", kq, kq, kq, kq, sq); - snprintfz(post_date, 100, "%s}", sq); - snprintfz(pre_label, 100, ",\n {%sid%s:%s%s,%slabel%s:%s", kq, kq, sq, sq, kq, kq, sq); - snprintfz(post_label, 100, "%s,%spattern%s:%s%s,%stype%s:%snumber%s}", sq, kq, kq, sq, sq, kq, kq, sq, sq); - snprintfz(pre_value, 100, ",{%sv%s:", kq, kq); - strcpy(post_value, "}"); - strcpy(post_line, "]}"); - snprintfz(data_begin, 100, "\n ],\n %srows%s:\n [\n", kq, kq); - strcpy(finish, "\n ]\n}"); - - snprintfz(overflow_annotation, 200, ",{%sv%s:%sRESET OR OVERFLOW%s},{%sv%s:%sThe counters have been wrapped.%s}", kq, kq, sq, sq, kq, kq, sq, sq); - snprintfz(normal_annotation, 200, ",{%sv%s:null},{%sv%s:null}", kq, kq, kq, kq); - - buffer_sprintf(wb, "{\n %scols%s:\n [\n", kq, kq); - buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%stime%s,%spattern%s:%s%s,%stype%s:%sdatetime%s},\n", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq); - buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%s%s,%spattern%s:%s%s,%stype%s:%sstring%s,%sp%s:{%srole%s:%sannotation%s}},\n", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, kq, kq, sq, sq); - buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%s%s,%spattern%s:%s%s,%stype%s:%sstring%s,%sp%s:{%srole%s:%sannotationText%s}}", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, kq, kq, sq, sq); - - // remove the valueobjects flag - // google wants its own keys - if(options & RRDR_OPTION_OBJECTSROWS) - options &= ~RRDR_OPTION_OBJECTSROWS; - } - else { - kq[0] = '"'; - sq[0] = '"'; - if(options & RRDR_OPTION_GOOGLE_JSON) { - dates = JSON_DATES_JS; - dates_with_new = 1; - } - else { - dates = JSON_DATES_TIMESTAMP; - dates_with_new = 0; - } - if( options & RRDR_OPTION_OBJECTSROWS ) - strcpy(pre_date, " { "); - else - strcpy(pre_date, " [ "); - strcpy(pre_label, ", \""); - strcpy(post_label, "\""); - strcpy(pre_value, ", "); - if( options & RRDR_OPTION_OBJECTSROWS ) - strcpy(post_line, "}"); - else - strcpy(post_line, "]"); - snprintfz(data_begin, 100, "],\n %sdata%s:\n [\n", kq, kq); - strcpy(finish, "\n ]\n}"); - - buffer_sprintf(wb, "{\n %slabels%s: [", kq, kq); - buffer_sprintf(wb, "%stime%s", sq, sq); - } - - // ------------------------------------------------------------------------- - // print the JSON header - - long c, i; - RRDDIM *rd; - - // print the header lines - for(c = 0, i = 0, rd = r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) { - if(unlikely(r->od[c] & RRDR_HIDDEN)) continue; - if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_NONZERO))) continue; - - buffer_strcat(wb, pre_label); - buffer_strcat(wb, rd->name); - buffer_strcat(wb, post_label); - i++; - } - if(!i) { - buffer_strcat(wb, pre_label); - buffer_strcat(wb, "no data"); - buffer_strcat(wb, post_label); - } - - // print the begin of row data - buffer_strcat(wb, data_begin); - - // if all dimensions are hidden, print a null - if(!i) { - buffer_strcat(wb, finish); - return; - } - - long start = 0, end = rrdr_rows(r), step = 1; - if((options & RRDR_OPTION_REVERSED)) { - start = rrdr_rows(r) - 1; - end = -1; - step = -1; - } - - // for each line in the array - calculated_number total = 1; - for(i = start; i != end ;i += step) { - calculated_number *cn = &r->v[ i * r->d ]; - uint8_t *co = &r->o[ i * r->d ]; - - time_t now = r->t[i]; - - if(dates == JSON_DATES_JS) { - // generate the local date time - struct tm tmbuf, *tm = localtime_r(&now, &tmbuf); - if(!tm) { error("localtime_r() failed."); continue; } - - if(likely(i != start)) buffer_strcat(wb, ",\n"); - buffer_strcat(wb, pre_date); - - if( options & RRDR_OPTION_OBJECTSROWS ) - buffer_sprintf(wb, "%stime%s: ", kq, kq); - - if(dates_with_new) - buffer_strcat(wb, "new "); - - buffer_jsdate(wb, tm->tm_year + 1900, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); - - buffer_strcat(wb, post_date); - - if(row_annotations) { - // google supports one annotation per row - int annotation_found = 0; - for(c = 0, rd = r->st->dimensions; rd ;c++, rd = rd->next) { - if(co[c] & RRDR_RESET) { - buffer_strcat(wb, overflow_annotation); - annotation_found = 1; - break; - } - } - if(!annotation_found) - buffer_strcat(wb, normal_annotation); - } - } - else { - // print the timestamp of the line - if(likely(i != start)) buffer_strcat(wb, ",\n"); - buffer_strcat(wb, pre_date); - - if( options & RRDR_OPTION_OBJECTSROWS ) - buffer_sprintf(wb, "%stime%s: ", kq, kq); - - buffer_rrd_value(wb, (calculated_number)r->t[i]); - // in ms - if(options & RRDR_OPTION_MILLISECONDS) buffer_strcat(wb, "000"); - - buffer_strcat(wb, post_date); - } - - int set_min_max = 0; - if(unlikely(options & RRDR_OPTION_PERCENTAGE)) { - total = 0; - for(c = 0, rd = r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) { - calculated_number n = cn[c]; - - if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) - n = -n; - - total += n; - } - // prevent a division by zero - if(total == 0) total = 1; - set_min_max = 1; - } - - // for each dimension - for(c = 0, rd = r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) { - if(unlikely(r->od[c] & RRDR_HIDDEN)) continue; - if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_NONZERO))) continue; - - calculated_number n = cn[c]; - - buffer_strcat(wb, pre_value); - - if( options & RRDR_OPTION_OBJECTSROWS ) - buffer_sprintf(wb, "%s%s%s: ", kq, rd->name, kq); - - if(co[c] & RRDR_EMPTY) { - if(options & RRDR_OPTION_NULL2ZERO) - buffer_strcat(wb, "0"); - else - buffer_strcat(wb, "null"); - } - else { - if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) - n = -n; - - if(unlikely(options & RRDR_OPTION_PERCENTAGE)) { - n = n * 100 / total; - - if(unlikely(set_min_max)) { - r->min = r->max = n; - set_min_max = 0; - } - - if(n < r->min) r->min = n; - if(n > r->max) r->max = n; - } - - buffer_rrd_value(wb, n); - } - - buffer_strcat(wb, post_value); - } - - buffer_strcat(wb, post_line); - } - - buffer_strcat(wb, finish); - //info("RRD2JSON(): %s: END", r->st->id); -} - -static void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t options, const char *startline, const char *separator, const char *endline, const char *betweenlines) -{ - rrdset_check_rdlock(r->st); - - //info("RRD2CSV(): %s: BEGIN", r->st->id); - long c, i; - RRDDIM *d; - - // print the csv header - for(c = 0, i = 0, d = r->st->dimensions; d && c < r->d ;c++, d = d->next) { - if(unlikely(r->od[c] & RRDR_HIDDEN)) continue; - if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_NONZERO))) continue; - - if(!i) { - buffer_strcat(wb, startline); - if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\""); - buffer_strcat(wb, "time"); - if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\""); - } - buffer_strcat(wb, separator); - if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\""); - buffer_strcat(wb, d->name); - if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\""); - i++; - } - buffer_strcat(wb, endline); - - if(!i) { - // no dimensions present - return; - } - - long start = 0, end = rrdr_rows(r), step = 1; - if((options & RRDR_OPTION_REVERSED)) { - start = rrdr_rows(r) - 1; - end = -1; - step = -1; - } - - // for each line in the array - calculated_number total = 1; - for(i = start; i != end ;i += step) { - calculated_number *cn = &r->v[ i * r->d ]; - uint8_t *co = &r->o[ i * r->d ]; - - buffer_strcat(wb, betweenlines); - buffer_strcat(wb, startline); - - time_t now = r->t[i]; - - if((options & RRDR_OPTION_SECONDS) || (options & RRDR_OPTION_MILLISECONDS)) { - // print the timestamp of the line - buffer_rrd_value(wb, (calculated_number)now); - // in ms - if(options & RRDR_OPTION_MILLISECONDS) buffer_strcat(wb, "000"); - } - else { - // generate the local date time - struct tm tmbuf, *tm = localtime_r(&now, &tmbuf); - if(!tm) { error("localtime() failed."); continue; } - buffer_date(wb, tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); - } - - int set_min_max = 0; - if(unlikely(options & RRDR_OPTION_PERCENTAGE)) { - total = 0; - for(c = 0, d = r->st->dimensions; d && c < r->d ;c++, d = d->next) { - calculated_number n = cn[c]; - - if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) - n = -n; - - total += n; - } - // prevent a division by zero - if(total == 0) total = 1; - set_min_max = 1; - } - - // for each dimension - for(c = 0, d = r->st->dimensions; d && c < r->d ;c++, d = d->next) { - if(unlikely(r->od[c] & RRDR_HIDDEN)) continue; - if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_NONZERO))) continue; - - buffer_strcat(wb, separator); - - calculated_number n = cn[c]; - - if(co[c] & RRDR_EMPTY) { - if(options & RRDR_OPTION_NULL2ZERO) - buffer_strcat(wb, "0"); - else - buffer_strcat(wb, "null"); - } - else { - if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) - n = -n; - - if(unlikely(options & RRDR_OPTION_PERCENTAGE)) { - n = n * 100 / total; - - if(unlikely(set_min_max)) { - r->min = r->max = n; - set_min_max = 0; - } - - if(n < r->min) r->min = n; - if(n > r->max) r->max = n; - } - - buffer_rrd_value(wb, n); - } - } - - buffer_strcat(wb, endline); - } - //info("RRD2CSV(): %s: END", r->st->id); -} - -inline static calculated_number rrdr2value(RRDR *r, long i, uint32_t options, int *all_values_are_null) { - rrdset_check_rdlock(r->st); - - long c; - RRDDIM *d; - - calculated_number *cn = &r->v[ i * r->d ]; - uint8_t *co = &r->o[ i * r->d ]; - - calculated_number sum = 0, min = 0, max = 0, v; - int all_null = 1, init = 1; - - calculated_number total = 1; - int set_min_max = 0; - if(unlikely(options & RRDR_OPTION_PERCENTAGE)) { - total = 0; - for(c = 0, d = r->st->dimensions; d && c < r->d ;c++, d = d->next) { - calculated_number n = cn[c]; - - if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) - n = -n; - - total += n; - } - // prevent a division by zero - if(total == 0) total = 1; - set_min_max = 1; - } - - // for each dimension - for(c = 0, d = r->st->dimensions; d && c < r->d ;c++, d = d->next) { - if(unlikely(r->od[c] & RRDR_HIDDEN)) continue; - if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_NONZERO))) continue; - - calculated_number n = cn[c]; - - if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) - n = -n; - - if(unlikely(options & RRDR_OPTION_PERCENTAGE)) { - n = n * 100 / total; - - if(unlikely(set_min_max)) { - r->min = r->max = n; - set_min_max = 0; - } - - if(n < r->min) r->min = n; - if(n > r->max) r->max = n; - } - - if(unlikely(init)) { - if(n > 0) { - min = 0; - max = n; - } - else { - min = n; - max = 0; - } - init = 0; - } - - if(likely(!(co[c] & RRDR_EMPTY))) { - all_null = 0; - sum += n; - } - - if(n < min) min = n; - if(n > max) max = n; - } - - if(unlikely(all_null)) { - if(likely(all_values_are_null)) - *all_values_are_null = 1; - return 0; - } - else { - if(likely(all_values_are_null)) - *all_values_are_null = 0; - } - - if(options & RRDR_OPTION_MIN2MAX) - v = max - min; - else - v = sum; - - return v; -} - -static void rrdr2ssv(RRDR *r, BUFFER *wb, uint32_t options, const char *prefix, const char *separator, const char *suffix) -{ - //info("RRD2SSV(): %s: BEGIN", r->st->id); - long i; - - buffer_strcat(wb, prefix); - long start = 0, end = rrdr_rows(r), step = 1; - if((options & RRDR_OPTION_REVERSED)) { - start = rrdr_rows(r) - 1; - end = -1; - step = -1; - } - - // for each line in the array - for(i = start; i != end ;i += step) { - int all_values_are_null = 0; - calculated_number v = rrdr2value(r, i, options, &all_values_are_null); - - if(likely(i != start)) { - if(r->min > v) r->min = v; - if(r->max < v) r->max = v; - } - else { - r->min = v; - r->max = v; - } - - if(likely(i != start)) - buffer_strcat(wb, separator); - - if(all_values_are_null) { - if(options & RRDR_OPTION_NULL2ZERO) - buffer_strcat(wb, "0"); - else - buffer_strcat(wb, "null"); - } - else - buffer_rrd_value(wb, v); - } - buffer_strcat(wb, suffix); - //info("RRD2SSV(): %s: END", r->st->id); -} - -inline static calculated_number *rrdr_line_values(RRDR *r) -{ - return &r->v[ r->c * r->d ]; -} - -inline static uint8_t *rrdr_line_options(RRDR *r) -{ - return &r->o[ r->c * r->d ]; -} - -inline static int rrdr_line_init(RRDR *r, time_t t) -{ - r->c++; - - if(unlikely(r->c >= r->n)) { - error("requested to step above RRDR size for chart %s", r->st->name); - r->c = r->n - 1; - } - - // save the time - r->t[r->c] = t; - - return 1; -} - -inline static void rrdr_lock_rrdset(RRDR *r) { - if(unlikely(!r)) { - error("NULL value given!"); - return; - } - - rrdset_rdlock(r->st); - r->has_st_lock = 1; -} - -inline static void rrdr_unlock_rrdset(RRDR *r) { - if(unlikely(!r)) { - error("NULL value given!"); - return; - } - - if(likely(r->has_st_lock)) { - rrdset_unlock(r->st); - r->has_st_lock = 0; - } -} - -inline static void rrdr_free(RRDR *r) -{ - if(unlikely(!r)) { - error("NULL value given!"); - return; - } - - rrdr_unlock_rrdset(r); - freez(r->t); - freez(r->v); - freez(r->o); - freez(r->od); - freez(r); -} - -static inline void rrdr_done(RRDR *r) -{ - r->rows = r->c + 1; - r->c = 0; -} - -static RRDR *rrdr_create(RRDSET *st, long n) -{ - if(unlikely(!st)) { - error("NULL value given!"); - return NULL; - } - - RRDR *r = callocz(1, sizeof(RRDR)); - r->st = st; - - rrdr_lock_rrdset(r); - - RRDDIM *rd; - rrddim_foreach_read(rd, st) r->d++; - - r->n = n; - - r->t = mallocz(n * sizeof(time_t)); - r->v = mallocz(n * r->d * sizeof(calculated_number)); - r->o = mallocz(n * r->d * sizeof(uint8_t)); - r->od = mallocz(r->d * sizeof(uint8_t)); - - // set the hidden flag on hidden dimensions - int c; - for(c = 0, rd = st->dimensions ; rd ; c++, rd = rd->next) { - if(unlikely(rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN))) - r->od[c] = RRDR_HIDDEN; - else - r->od[c] = 0; - } - - r->c = -1; - r->group = 1; - r->update_every = 1; - - return r; -} - -RRDR *rrd2rrdr(RRDSET *st, long points, long long after, long long before, int group_method, long group_time, int aligned) -{ -#ifdef NETDATA_INTERNAL_CHECKS - int debug = rrdset_flag_check(st, RRDSET_FLAG_DEBUG)?1:0; -#endif - int absolute_period_requested = -1; - - time_t first_entry_t = rrdset_first_entry_t(st); - time_t last_entry_t = rrdset_last_entry_t(st); - - if(before == 0 && after == 0) { - // dump the all the data - before = last_entry_t; - after = first_entry_t; - absolute_period_requested = 0; - } - - // allow relative for before (smaller than API_RELATIVE_TIME_MAX) - if(((before < 0)?-before:before) <= API_RELATIVE_TIME_MAX) { - if(abs(before) % st->update_every) { - // make sure it is multiple of st->update_every - if(before < 0) before = before - st->update_every - before % st->update_every; - else before = before + st->update_every - before % st->update_every; - } - if(before > 0) before = first_entry_t + before; - else before = last_entry_t + before; - absolute_period_requested = 0; - } - - // allow relative for after (smaller than API_RELATIVE_TIME_MAX) - if(((after < 0)?-after:after) <= API_RELATIVE_TIME_MAX) { - if(after == 0) after = -st->update_every; - if(abs(after) % st->update_every) { - // make sure it is multiple of st->update_every - if(after < 0) after = after - st->update_every - after % st->update_every; - else after = after + st->update_every - after % st->update_every; - } - after = before + after; - absolute_period_requested = 0; - } - - if(absolute_period_requested == -1) - absolute_period_requested = 1; - - // make sure they are within our timeframe - if(before > last_entry_t) before = last_entry_t; - if(before < first_entry_t) before = first_entry_t; - - if(after > last_entry_t) after = last_entry_t; - if(after < first_entry_t) after = first_entry_t; - - // check if they are upside down - if(after > before) { - time_t tmp = before; - before = after; - after = tmp; - } - - // the duration of the chart - time_t duration = before - after; - long available_points = duration / st->update_every; - - if(duration <= 0 || available_points <= 0) - return rrdr_create(st, 1); - - // check the number of wanted points in the result - if(unlikely(points < 0)) points = -points; - if(unlikely(points > available_points)) points = available_points; - if(unlikely(points == 0)) points = available_points; - - // calculate the desired grouping of source data points - long group = available_points / points; - if(unlikely(group <= 0)) group = 1; - if(unlikely(available_points % points > points / 2)) group++; // rounding to the closest integer - - // group_time enforces a certain grouping multiple - calculated_number group_sum_divisor = 1.0; - long group_points = 1; - if(unlikely(group_time > st->update_every)) { - if (unlikely(group_time > duration)) { - // group_time is above the available duration - - #ifdef NETDATA_INTERNAL_CHECKS - info("INTERNAL CHECK: %s: requested gtime %ld secs, is greater than the desired duration %ld secs", st->id, group_time, duration); - #endif - - group = points; // use all the points - } - else { - // the points we should group to satisfy gtime - group_points = group_time / st->update_every; - if(unlikely(group_time % group_points)) { - #ifdef NETDATA_INTERNAL_CHECKS - info("INTERNAL CHECK: %s: requested gtime %ld secs, is not a multiple of the chart's data collection frequency %d secs", st->id, group_time, st->update_every); - #endif - - group_points++; - } - - // adapt group according to group_points - if(unlikely(group < group_points)) group = group_points; // do not allow grouping below the desired one - if(unlikely(group % group_points)) group += group_points - (group % group_points); // make sure group is multiple of group_points - - //group_sum_divisor = group / group_points; - group_sum_divisor = (calculated_number)(group * st->update_every) / (calculated_number)group_time; - } - } - - time_t after_new = after - (after % ( ((aligned)?group:1) * st->update_every )); - time_t before_new = before - (before % ( ((aligned)?group:1) * st->update_every )); - long points_new = (before_new - after_new) / st->update_every / group; - - // find the starting and ending slots in our round robin db - long start_at_slot = rrdset_time2slot(st, before_new), - stop_at_slot = rrdset_time2slot(st, after_new); - -#ifdef NETDATA_INTERNAL_CHECKS - if(after_new < first_entry_t) - error("INTERNAL CHECK: after_new %u is too small, minimum %u", (uint32_t)after_new, (uint32_t)first_entry_t); - - if(after_new > last_entry_t) - error("INTERNAL CHECK: after_new %u is too big, maximum %u", (uint32_t)after_new, (uint32_t)last_entry_t); - - if(before_new < first_entry_t) - error("INTERNAL CHECK: before_new %u is too small, minimum %u", (uint32_t)before_new, (uint32_t)first_entry_t); - - if(before_new > last_entry_t) - error("INTERNAL CHECK: before_new %u is too big, maximum %u", (uint32_t)before_new, (uint32_t)last_entry_t); - - if(start_at_slot < 0 || start_at_slot >= st->entries) - error("INTERNAL CHECK: start_at_slot is invalid %ld, expected 0 to %ld", start_at_slot, st->entries - 1); - - if(stop_at_slot < 0 || stop_at_slot >= st->entries) - error("INTERNAL CHECK: stop_at_slot is invalid %ld, expected 0 to %ld", stop_at_slot, st->entries - 1); - - if(points_new > (before_new - after_new) / group / st->update_every + 1) - error("INTERNAL CHECK: points_new %ld is more than points %ld", points_new, (before_new - after_new) / group / st->update_every + 1); - - if(group < group_points) - error("INTERNAL CHECK: group %ld is less than the desired group points %ld", group, group_points); - - if(group > group_points && group % group_points) - error("INTERNAL CHECK: group %ld is not a multiple of the desired group points %ld", group, group_points); -#endif - - //info("RRD2RRDR(): %s: wanted %ld points, got %ld - group=%ld, wanted duration=%u, got %u - wanted %ld - %ld, got %ld - %ld", st->id, points, points_new, group, before - after, before_new - after_new, after, before, after_new, before_new); - - after = after_new; - before = before_new; - duration = before - after; - points = points_new; - - // Now we have: - // before = the end time of the calculation - // after = the start time of the calculation - // duration = the duration of the calculation - // group = the number of source points to aggregate / group together - // method = the method of grouping source points - // points = the number of points to generate - - - // ------------------------------------------------------------------------- - // initialize our result set - - RRDR *r = rrdr_create(st, points); - if(unlikely(!r)) { -#ifdef NETDATA_INTERNAL_CHECKS - error("INTERNAL CHECK: Cannot create RRDR for %s, after=%u, before=%u, duration=%u, points=%ld", st->id, (uint32_t)after, (uint32_t)before, (uint32_t)duration, points); -#endif - return NULL; - } - - if(unlikely(!r->d)) { -#ifdef NETDATA_INTERNAL_CHECKS - error("INTERNAL CHECK: Returning empty RRDR (no dimensions in RRDSET) for %s, after=%u, before=%u, duration=%u, points=%ld", st->id, (uint32_t)after, (uint32_t)before, (uint32_t)duration, points); -#endif - return r; - } - - if(unlikely(absolute_period_requested == 1)) - r->result_options |= RRDR_RESULT_OPTION_ABSOLUTE; - else - r->result_options |= RRDR_RESULT_OPTION_RELATIVE; - - // find how many dimensions we have - long dimensions = r->d; - - - // ------------------------------------------------------------------------- - // checks for debugging -#ifdef NETDATA_INTERNAL_CHECKS - if(debug) debug(D_RRD_STATS, "INFO %s first_t: %u, last_t: %u, all_duration: %u, after: %u, before: %u, duration: %u, points: %ld, group: %ld, group_points: %ld" - , st->id - , (uint32_t)first_entry_t - , (uint32_t)last_entry_t - , (uint32_t)(last_entry_t - first_entry_t) - , (uint32_t)after - , (uint32_t)before - , (uint32_t)duration - , points - , group - , group_points - ); -#endif - - // ------------------------------------------------------------------------- - // temp arrays for keeping values per dimension - - calculated_number last_values[dimensions]; // keep the last value of each dimension - calculated_number group_values[dimensions]; // keep sums when grouping - long group_counts[dimensions]; // keep the number of values added to group_values - uint8_t group_options[dimensions]; - uint8_t found_non_zero[dimensions]; - - - // initialize them - RRDDIM *rd; - long c; - rrdset_check_rdlock(st); - for( rd = st->dimensions, c = 0 ; rd && c < dimensions ; rd = rd->next, c++) { - last_values[c] = 0; - group_values[c] = (group_method == GROUP_MAX || group_method == GROUP_MIN)?NAN:0; - group_counts[c] = 0; - group_options[c] = 0; - found_non_zero[c] = 0; - } - - - // ------------------------------------------------------------------------- - // the main loop - - time_t now = rrdset_slot2time(st, start_at_slot), - dt = st->update_every, - group_start_t = 0; - -#ifdef NETDATA_INTERNAL_CHECKS - if(unlikely(debug)) debug(D_RRD_STATS, "BEGIN %s after_t: %u (stop_at_t: %ld), before_t: %u (start_at_t: %ld), start_t(now): %u, current_entry: %ld, entries: %ld" - , st->id - , (uint32_t)after - , stop_at_slot - , (uint32_t)before - , start_at_slot - , (uint32_t)now - , st->current_entry - , st->entries - ); -#endif - - r->group = group; - r->update_every = (int)group * st->update_every; - r->before = now; - r->after = now; - - //info("RRD2RRDR(): %s: STARTING", st->id); - - long slot = start_at_slot, counter = 0, stop_now = 0, added = 0, group_count = 0, add_this = 0; - for(; !stop_now ; now -= dt, slot--, counter++) { - if(unlikely(slot < 0)) slot = st->entries - 1; - if(unlikely(slot == stop_at_slot)) stop_now = counter; - -#ifdef NETDATA_INTERNAL_CHECKS - if(unlikely(debug)) debug(D_RRD_STATS, "ROW %s slot: %ld, entries_counter: %ld, group_count: %ld, added: %ld, now: %ld, %s %s" - , st->id - , slot - , counter - , group_count + 1 - , added - , now - , (group_count + 1 == group)?"PRINT":" - " - , (now >= after && now <= before)?"RANGE":" - " - ); -#endif - - // make sure we return data in the proper time range - if(unlikely(now > before)) continue; - if(unlikely(now < after)) break; - - if(unlikely(group_count == 0)) group_start_t = now; - group_count++; - - if(unlikely(group_count == group)) { - if(unlikely(added >= points)) break; - add_this = 1; - } - - // do the calculations - for(rd = st->dimensions, c = 0 ; rd && c < dimensions ; rd = rd->next, c++) { - storage_number n = rd->values[slot]; - if(unlikely(!does_storage_number_exist(n))) continue; - - group_counts[c]++; - - calculated_number value = unpack_storage_number(n); - if(likely(value != 0.0)) { - group_options[c] |= RRDR_NONZERO; - found_non_zero[c] = 1; - } - - if(unlikely(did_storage_number_reset(n))) - group_options[c] |= RRDR_RESET; - - switch(group_method) { - case GROUP_MIN: - if(unlikely(isnan(group_values[c])) || - calculated_number_fabs(value) < calculated_number_fabs(group_values[c])) - group_values[c] = value; - break; - - case GROUP_MAX: - if(unlikely(isnan(group_values[c])) || - calculated_number_fabs(value) > calculated_number_fabs(group_values[c])) - group_values[c] = value; - break; - - default: - case GROUP_SUM: - case GROUP_AVERAGE: - case GROUP_UNDEFINED: - group_values[c] += value; - break; - - case GROUP_INCREMENTAL_SUM: - if(unlikely(slot == start_at_slot)) - last_values[c] = value; - - group_values[c] += last_values[c] - value; - last_values[c] = value; - break; - } - } - - // added it - if(unlikely(add_this)) { - if(unlikely(!rrdr_line_init(r, group_start_t))) break; - - r->after = now; - - calculated_number *cn = rrdr_line_values(r); - uint8_t *co = rrdr_line_options(r); - - for(rd = st->dimensions, c = 0 ; rd && c < dimensions ; rd = rd->next, c++) { - - // update the dimension options - if(likely(found_non_zero[c])) r->od[c] |= RRDR_NONZERO; - - // store the specific point options - co[c] = group_options[c]; - - // store the value - if(unlikely(group_counts[c] == 0)) { - cn[c] = 0.0; - co[c] |= RRDR_EMPTY; - group_values[c] = (group_method == GROUP_MAX || group_method == GROUP_MIN)?NAN:0; - } - else { - switch(group_method) { - case GROUP_MIN: - case GROUP_MAX: - if(unlikely(isnan(group_values[c]))) - cn[c] = 0; - else { - cn[c] = group_values[c]; - group_values[c] = NAN; - } - break; - - case GROUP_SUM: - case GROUP_INCREMENTAL_SUM: - cn[c] = group_values[c]; - group_values[c] = 0; - break; - - default: - case GROUP_AVERAGE: - case GROUP_UNDEFINED: - if(unlikely(group_points != 1)) - cn[c] = group_values[c] / group_sum_divisor; - else - cn[c] = group_values[c] / group_counts[c]; - - group_values[c] = 0; - break; - } - - if(cn[c] < r->min) r->min = cn[c]; - if(cn[c] > r->max) r->max = cn[c]; - } - - // reset for the next loop - group_counts[c] = 0; - group_options[c] = 0; - } - - added++; - group_count = 0; - add_this = 0; - } - } - - rrdr_done(r); - //info("RRD2RRDR(): %s: END %ld loops made, %ld points generated", st->id, counter, rrdr_rows(r)); - //error("SHIFT: %s: wanted %ld points, got %ld", st->id, points, rrdr_rows(r)); - return r; -} - -int rrdset2value_api_v1( - RRDSET *st - , BUFFER *wb - , calculated_number *n - , const char *dimensions - , long points - , long long after - , long long before - , int group_method - , long group_time - , uint32_t options - , time_t *db_after - , time_t *db_before - , int *value_is_null -) { - RRDR *r = rrd2rrdr(st, points, after, before, group_method, group_time, !(options & RRDR_OPTION_NOT_ALIGNED)); - if(!r) { - if(value_is_null) *value_is_null = 1; - return 500; - } - - if(rrdr_rows(r) == 0) { - rrdr_free(r); - - if(db_after) *db_after = 0; - if(db_before) *db_before = 0; - if(value_is_null) *value_is_null = 1; - - return 400; - } - - if(wb) { - if (r->result_options & RRDR_RESULT_OPTION_RELATIVE) - buffer_no_cacheable(wb); - else if (r->result_options & RRDR_RESULT_OPTION_ABSOLUTE) - buffer_cacheable(wb); - } - - options = rrdr_check_options(r, options, dimensions); - - if(dimensions) - rrdr_disable_not_selected_dimensions(r, options, dimensions); - - if(db_after) *db_after = r->after; - if(db_before) *db_before = r->before; - - long i = (options & RRDR_OPTION_REVERSED)?rrdr_rows(r) - 1:0; - *n = rrdr2value(r, i, options, value_is_null); - - rrdr_free(r); - return 200; -} - -int rrdset2anything_api_v1( - RRDSET *st - , BUFFER *wb - , BUFFER *dimensions - , uint32_t format - , long points - , long long after - , long long before - , int group_method - , long group_time - , uint32_t options - , time_t *latest_timestamp -) { - st->last_accessed_time = now_realtime_sec(); - - RRDR *r = rrd2rrdr(st, points, after, before, group_method, group_time, !(options & RRDR_OPTION_NOT_ALIGNED)); - if(!r) { - buffer_strcat(wb, "Cannot generate output with these parameters on this chart."); - return 500; - } - - if(r->result_options & RRDR_RESULT_OPTION_RELATIVE) - buffer_no_cacheable(wb); - else if(r->result_options & RRDR_RESULT_OPTION_ABSOLUTE) - buffer_cacheable(wb); - - options = rrdr_check_options(r, options, (dimensions)?buffer_tostring(dimensions):NULL); - - if(dimensions) - rrdr_disable_not_selected_dimensions(r, options, buffer_tostring(dimensions)); - - if(latest_timestamp && rrdr_rows(r) > 0) - *latest_timestamp = r->before; - - switch(format) { - case DATASOURCE_SSV: - if(options & RRDR_OPTION_JSON_WRAP) { - wb->contenttype = CT_APPLICATION_JSON; - rrdr_json_wrapper_begin(r, wb, format, options, 1); - rrdr2ssv(r, wb, options, "", " ", ""); - rrdr_json_wrapper_end(r, wb, format, options, 1); - } - else { - wb->contenttype = CT_TEXT_PLAIN; - rrdr2ssv(r, wb, options, "", " ", ""); - } - break; - - case DATASOURCE_SSV_COMMA: - if(options & RRDR_OPTION_JSON_WRAP) { - wb->contenttype = CT_APPLICATION_JSON; - rrdr_json_wrapper_begin(r, wb, format, options, 1); - rrdr2ssv(r, wb, options, "", ",", ""); - rrdr_json_wrapper_end(r, wb, format, options, 1); - } - else { - wb->contenttype = CT_TEXT_PLAIN; - rrdr2ssv(r, wb, options, "", ",", ""); - } - break; - - case DATASOURCE_JS_ARRAY: - if(options & RRDR_OPTION_JSON_WRAP) { - wb->contenttype = CT_APPLICATION_JSON; - rrdr_json_wrapper_begin(r, wb, format, options, 0); - rrdr2ssv(r, wb, options, "[", ",", "]"); - rrdr_json_wrapper_end(r, wb, format, options, 0); - } - else { - wb->contenttype = CT_APPLICATION_JSON; - rrdr2ssv(r, wb, options, "[", ",", "]"); - } - break; - - case DATASOURCE_CSV: - if(options & RRDR_OPTION_JSON_WRAP) { - wb->contenttype = CT_APPLICATION_JSON; - rrdr_json_wrapper_begin(r, wb, format, options, 1); - rrdr2csv(r, wb, options, "", ",", "\\n", ""); - rrdr_json_wrapper_end(r, wb, format, options, 1); - } - else { - wb->contenttype = CT_TEXT_PLAIN; - rrdr2csv(r, wb, options, "", ",", "\r\n", ""); - } - break; - - case DATASOURCE_CSV_JSON_ARRAY: - wb->contenttype = CT_APPLICATION_JSON; - if(options & RRDR_OPTION_JSON_WRAP) { - rrdr_json_wrapper_begin(r, wb, format, options, 0); - buffer_strcat(wb, "[\n"); - rrdr2csv(r, wb, options + RRDR_OPTION_LABEL_QUOTES, "[", ",", "]", ",\n"); - buffer_strcat(wb, "\n]"); - rrdr_json_wrapper_end(r, wb, format, options, 0); - } - else { - wb->contenttype = CT_TEXT_PLAIN; - buffer_strcat(wb, "[\n"); - rrdr2csv(r, wb, options + RRDR_OPTION_LABEL_QUOTES, "[", ",", "]", ",\n"); - buffer_strcat(wb, "\n]"); - } - break; - - case DATASOURCE_TSV: - if(options & RRDR_OPTION_JSON_WRAP) { - wb->contenttype = CT_APPLICATION_JSON; - rrdr_json_wrapper_begin(r, wb, format, options, 1); - rrdr2csv(r, wb, options, "", "\t", "\\n", ""); - rrdr_json_wrapper_end(r, wb, format, options, 1); - } - else { - wb->contenttype = CT_TEXT_PLAIN; - rrdr2csv(r, wb, options, "", "\t", "\r\n", ""); - } - break; - - case DATASOURCE_HTML: - if(options & RRDR_OPTION_JSON_WRAP) { - wb->contenttype = CT_APPLICATION_JSON; - rrdr_json_wrapper_begin(r, wb, format, options, 1); - buffer_strcat(wb, "<html>\\n<center>\\n<table border=\\\"0\\\" cellpadding=\\\"5\\\" cellspacing=\\\"5\\\">\\n"); - rrdr2csv(r, wb, options, "<tr><td>", "</td><td>", "</td></tr>\\n", ""); - buffer_strcat(wb, "</table>\\n</center>\\n</html>\\n"); - rrdr_json_wrapper_end(r, wb, format, options, 1); - } - else { - wb->contenttype = CT_TEXT_HTML; - buffer_strcat(wb, "<html>\n<center>\n<table border=\"0\" cellpadding=\"5\" cellspacing=\"5\">\n"); - rrdr2csv(r, wb, options, "<tr><td>", "</td><td>", "</td></tr>\n", ""); - buffer_strcat(wb, "</table>\n</center>\n</html>\n"); - } - break; - - case DATASOURCE_DATATABLE_JSONP: - wb->contenttype = CT_APPLICATION_X_JAVASCRIPT; - - if(options & RRDR_OPTION_JSON_WRAP) - rrdr_json_wrapper_begin(r, wb, format, options, 0); - - rrdr2json(r, wb, options, 1); - - if(options & RRDR_OPTION_JSON_WRAP) - rrdr_json_wrapper_end(r, wb, format, options, 0); - break; - - case DATASOURCE_DATATABLE_JSON: - wb->contenttype = CT_APPLICATION_JSON; - - if(options & RRDR_OPTION_JSON_WRAP) - rrdr_json_wrapper_begin(r, wb, format, options, 0); - - rrdr2json(r, wb, options, 1); - - if(options & RRDR_OPTION_JSON_WRAP) - rrdr_json_wrapper_end(r, wb, format, options, 0); - break; - - case DATASOURCE_JSONP: - wb->contenttype = CT_APPLICATION_X_JAVASCRIPT; - if(options & RRDR_OPTION_JSON_WRAP) - rrdr_json_wrapper_begin(r, wb, format, options, 0); - - rrdr2json(r, wb, options, 0); - - if(options & RRDR_OPTION_JSON_WRAP) - rrdr_json_wrapper_end(r, wb, format, options, 0); - break; - - case DATASOURCE_JSON: - default: - wb->contenttype = CT_APPLICATION_JSON; - - if(options & RRDR_OPTION_JSON_WRAP) - rrdr_json_wrapper_begin(r, wb, format, options, 0); - - rrdr2json(r, wb, options, 0); - - if(options & RRDR_OPTION_JSON_WRAP) - rrdr_json_wrapper_end(r, wb, format, options, 0); - break; - } - - rrdr_free(r); - return 200; -} diff --git a/src/rrd2json.h b/src/rrd2json.h deleted file mode 100644 index b41c814ec..000000000 --- a/src/rrd2json.h +++ /dev/null @@ -1,82 +0,0 @@ -#ifndef NETDATA_RRD2JSON_H -#define NETDATA_RRD2JSON_H 1 - -#define HOSTNAME_MAX 1024 - -#define API_RELATIVE_TIME_MAX (3 * 365 * 86400) - -// type of JSON generations -#define DATASOURCE_INVALID (-1) -#define DATASOURCE_JSON 0 -#define DATASOURCE_DATATABLE_JSON 1 -#define DATASOURCE_DATATABLE_JSONP 2 -#define DATASOURCE_SSV 3 -#define DATASOURCE_CSV 4 -#define DATASOURCE_JSONP 5 -#define DATASOURCE_TSV 6 -#define DATASOURCE_HTML 7 -#define DATASOURCE_JS_ARRAY 8 -#define DATASOURCE_SSV_COMMA 9 -#define DATASOURCE_CSV_JSON_ARRAY 10 - -#define DATASOURCE_FORMAT_JSON "json" -#define DATASOURCE_FORMAT_DATATABLE_JSON "datatable" -#define DATASOURCE_FORMAT_DATATABLE_JSONP "datasource" -#define DATASOURCE_FORMAT_JSONP "jsonp" -#define DATASOURCE_FORMAT_SSV "ssv" -#define DATASOURCE_FORMAT_CSV "csv" -#define DATASOURCE_FORMAT_TSV "tsv" -#define DATASOURCE_FORMAT_HTML "html" -#define DATASOURCE_FORMAT_JS_ARRAY "array" -#define DATASOURCE_FORMAT_SSV_COMMA "ssvcomma" -#define DATASOURCE_FORMAT_CSV_JSON_ARRAY "csvjsonarray" - -#define ALLMETRICS_FORMAT_SHELL "shell" -#define ALLMETRICS_FORMAT_PROMETHEUS "prometheus" -#define ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "prometheus_all_hosts" -#define ALLMETRICS_FORMAT_JSON "json" - -#define ALLMETRICS_SHELL 1 -#define ALLMETRICS_PROMETHEUS 2 -#define ALLMETRICS_JSON 3 -#define ALLMETRICS_PROMETHEUS_ALL_HOSTS 4 - -#define GROUP_UNDEFINED 0 -#define GROUP_AVERAGE 1 -#define GROUP_MIN 2 -#define GROUP_MAX 3 -#define GROUP_SUM 4 -#define GROUP_INCREMENTAL_SUM 5 - -#define RRDR_OPTION_NONZERO 0x00000001 // don't output dimensions will just zero values -#define RRDR_OPTION_REVERSED 0x00000002 // output the rows in reverse order (oldest to newest) -#define RRDR_OPTION_ABSOLUTE 0x00000004 // values positive, for DATASOURCE_SSV before summing -#define RRDR_OPTION_MIN2MAX 0x00000008 // when adding dimensions, use max - min, instead of sum -#define RRDR_OPTION_SECONDS 0x00000010 // output seconds, instead of dates -#define RRDR_OPTION_MILLISECONDS 0x00000020 // output milliseconds, instead of dates -#define RRDR_OPTION_NULL2ZERO 0x00000040 // do not show nulls, convert them to zeros -#define RRDR_OPTION_OBJECTSROWS 0x00000080 // each row of values should be an object, not an array -#define RRDR_OPTION_GOOGLE_JSON 0x00000100 // comply with google JSON/JSONP specs -#define RRDR_OPTION_JSON_WRAP 0x00000200 // wrap the response in a JSON header with info about the result -#define RRDR_OPTION_LABEL_QUOTES 0x00000400 // in CSV output, wrap header labels in double quotes -#define RRDR_OPTION_PERCENTAGE 0x00000800 // give values as percentage of total -#define RRDR_OPTION_NOT_ALIGNED 0x00001000 // do not align charts for persistant timeframes -#define RRDR_OPTION_DISPLAY_ABS 0x00002000 // for badges, display the absolute value, but calculate colors with sign -#define RRDR_OPTION_MATCH_IDS 0x00004000 // when filtering dimensions, match only IDs -#define RRDR_OPTION_MATCH_NAMES 0x00008000 // when filtering dimensions, match only names - -extern void rrd_stats_api_v1_chart(RRDSET *st, BUFFER *wb); -extern void rrd_stats_api_v1_charts(RRDHOST *host, BUFFER *wb); - -extern void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, BUFFER *wb); -extern void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, BUFFER *wb); - -extern int rrdset2anything_api_v1(RRDSET *st, BUFFER *out, BUFFER *dimensions, uint32_t format, long points - , long long after, long long before, int group_method, long group_time, uint32_t options - , time_t *latest_timestamp); - -extern int rrdset2value_api_v1(RRDSET *st, BUFFER *wb, calculated_number *n, const char *dimensions, long points - , long long after, long long before, int group_method, long group_time, uint32_t options - , time_t *db_after, time_t *db_before, int *value_is_null); - -#endif /* NETDATA_RRD2JSON_H */ diff --git a/src/rrd2json_api_old.c b/src/rrd2json_api_old.c deleted file mode 100644 index 003b8626d..000000000 --- a/src/rrd2json_api_old.c +++ /dev/null @@ -1,487 +0,0 @@ -#include "common.h" - -unsigned long rrdset_info2json_api_old(RRDSET *st, char *options, BUFFER *wb) { - time_t now = now_realtime_sec(); - - rrdset_rdlock(st); - - st->last_accessed_time = now; - - buffer_sprintf(wb, - "\t\t{\n" - "\t\t\t\"id\": \"%s\",\n" - "\t\t\t\"name\": \"%s\",\n" - "\t\t\t\"type\": \"%s\",\n" - "\t\t\t\"family\": \"%s\",\n" - "\t\t\t\"context\": \"%s\",\n" - "\t\t\t\"title\": \"%s (%s)\",\n" - "\t\t\t\"priority\": %ld,\n" - "\t\t\t\"enabled\": %d,\n" - "\t\t\t\"units\": \"%s\",\n" - "\t\t\t\"url\": \"/data/%s/%s\",\n" - "\t\t\t\"chart_type\": \"%s\",\n" - "\t\t\t\"counter\": %lu,\n" - "\t\t\t\"entries\": %ld,\n" - "\t\t\t\"first_entry_t\": %ld,\n" - "\t\t\t\"last_entry\": %lu,\n" - "\t\t\t\"last_entry_t\": %ld,\n" - "\t\t\t\"last_entry_secs_ago\": %ld,\n" - "\t\t\t\"update_every\": %d,\n" - "\t\t\t\"isdetail\": %d,\n" - "\t\t\t\"usec_since_last_update\": %llu,\n" - "\t\t\t\"collected_total\": " TOTAL_NUMBER_FORMAT ",\n" - "\t\t\t\"last_collected_total\": " TOTAL_NUMBER_FORMAT ",\n" - "\t\t\t\"dimensions\": [\n" - , st->id - , st->name - , st->type - , st->family - , st->context - , st->title, st->name - , st->priority - , rrdset_flag_check(st, RRDSET_FLAG_ENABLED)?1:0 - , st->units - , st->name, options?options:"" - , rrdset_type_name(st->chart_type) - , st->counter - , st->entries - , rrdset_first_entry_t(st) - , rrdset_last_slot(st) - , rrdset_last_entry_t(st) - , (now < rrdset_last_entry_t(st)) ? (time_t)0 : now - rrdset_last_entry_t(st) - , st->update_every - , rrdset_flag_check(st, RRDSET_FLAG_DETAIL)?1:0 - , st->usec_since_last_update - , st->collected_total - , st->last_collected_total - ); - - unsigned long memory = st->memsize; - - RRDDIM *rd; - rrddim_foreach_read(rd, st) { - - memory += rd->memsize; - - buffer_sprintf(wb, - "\t\t\t\t{\n" - "\t\t\t\t\t\"id\": \"%s\",\n" - "\t\t\t\t\t\"name\": \"%s\",\n" - "\t\t\t\t\t\"entries\": %ld,\n" - "\t\t\t\t\t\"isHidden\": %d,\n" - "\t\t\t\t\t\"algorithm\": \"%s\",\n" - "\t\t\t\t\t\"multiplier\": " COLLECTED_NUMBER_FORMAT ",\n" - "\t\t\t\t\t\"divisor\": " COLLECTED_NUMBER_FORMAT ",\n" - "\t\t\t\t\t\"last_entry_t\": %ld,\n" - "\t\t\t\t\t\"collected_value\": " COLLECTED_NUMBER_FORMAT ",\n" - "\t\t\t\t\t\"calculated_value\": " CALCULATED_NUMBER_FORMAT ",\n" - "\t\t\t\t\t\"last_collected_value\": " COLLECTED_NUMBER_FORMAT ",\n" - "\t\t\t\t\t\"last_calculated_value\": " CALCULATED_NUMBER_FORMAT ",\n" - "\t\t\t\t\t\"memory\": %lu\n" - "\t\t\t\t}%s\n" - , rd->id - , rd->name - , rd->entries - , rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN)?1:0 - , rrd_algorithm_name(rd->algorithm) - , rd->multiplier - , rd->divisor - , rd->last_collected_time.tv_sec - , rd->collected_value - , rd->calculated_value - , rd->last_collected_value - , rd->last_calculated_value - , rd->memsize - , rd->next?",":"" - ); - } - - buffer_sprintf(wb, - "\t\t\t],\n" - "\t\t\t\"memory\" : %lu\n" - "\t\t}" - , memory - ); - - rrdset_unlock(st); - return memory; -} - -#define RRD_GRAPH_JSON_HEADER "{\n\t\"charts\": [\n" -#define RRD_GRAPH_JSON_FOOTER "\n\t]\n}\n" - -void rrd_graph2json_api_old(RRDSET *st, char *options, BUFFER *wb) -{ - buffer_strcat(wb, RRD_GRAPH_JSON_HEADER); - rrdset_info2json_api_old(st, options, wb); - buffer_strcat(wb, RRD_GRAPH_JSON_FOOTER); -} - -void rrd_all2json_api_old(RRDHOST *host, BUFFER *wb) -{ - unsigned long memory = 0; - long c = 0; - RRDSET *st; - - time_t now = now_realtime_sec(); - - buffer_strcat(wb, RRD_GRAPH_JSON_HEADER); - - rrdhost_rdlock(host); - rrdset_foreach_read(st, host) { - if(rrdset_is_available_for_viewers(st)) { - if(c) buffer_strcat(wb, ",\n"); - memory += rrdset_info2json_api_old(st, NULL, wb); - - c++; - st->last_accessed_time = now; - } - } - rrdhost_unlock(host); - - buffer_sprintf(wb, "\n\t],\n" - "\t\"hostname\": \"%s\",\n" - "\t\"update_every\": %d,\n" - "\t\"history\": %ld,\n" - "\t\"memory\": %lu\n" - "}\n" - , host->hostname - , host->rrd_update_every - , host->rrd_history_entries - , memory - ); -} - -time_t rrdset2json_api_old( - int type - , RRDSET *st - , BUFFER *wb - , long points - , long group - , int group_method - , time_t after - , time_t before - , int only_non_zero -) { - int c; - rrdset_rdlock(st); - - st->last_accessed_time = now_realtime_sec(); - - // ------------------------------------------------------------------------- - // switch from JSON to google JSON - - char kq[2] = "\""; - char sq[2] = "\""; - switch(type) { - case DATASOURCE_DATATABLE_JSON: - case DATASOURCE_DATATABLE_JSONP: - kq[0] = '\0'; - sq[0] = '\''; - break; - - case DATASOURCE_JSON: - default: - break; - } - - - // ------------------------------------------------------------------------- - // validate the parameters - - if(points < 1) points = 1; - if(group < 1) group = 1; - - if(before == 0 || before > rrdset_last_entry_t(st)) before = rrdset_last_entry_t(st); - if(after == 0 || after < rrdset_first_entry_t(st)) after = rrdset_first_entry_t(st); - - // --- - - // our return value (the last timestamp printed) - // this is required to detect re-transmit in google JSONP - time_t last_timestamp = 0; - - - // ------------------------------------------------------------------------- - // find how many dimensions we have - - int dimensions = 0; - RRDDIM *rd; - rrddim_foreach_read(rd, st) dimensions++; - if(!dimensions) { - rrdset_unlock(st); - buffer_strcat(wb, "No dimensions yet."); - return 0; - } - - - // ------------------------------------------------------------------------- - // prepare various strings, to speed up the loop - - char overflow_annotation[201]; snprintfz(overflow_annotation, 200, ",{%sv%s:%sRESET OR OVERFLOW%s},{%sv%s:%sThe counters have been wrapped.%s}", kq, kq, sq, sq, kq, kq, sq, sq); - char normal_annotation[201]; snprintfz(normal_annotation, 200, ",{%sv%s:null},{%sv%s:null}", kq, kq, kq, kq); - char pre_date[51]; snprintfz(pre_date, 50, " {%sc%s:[{%sv%s:%s", kq, kq, kq, kq, sq); - char post_date[21]; snprintfz(post_date, 20, "%s}", sq); - char pre_value[21]; snprintfz(pre_value, 20, ",{%sv%s:", kq, kq); - char post_value[21]; strcpy(post_value, "}"); - - - // ------------------------------------------------------------------------- - // checks for debugging - - if(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)) { - debug(D_RRD_STATS, "%s first_entry_t = %ld, last_entry_t = %ld, duration = %ld, after = %ld, before = %ld, duration = %ld, entries_to_show = %ld, group = %ld" - , st->id - , rrdset_first_entry_t(st) - , rrdset_last_entry_t(st) - , rrdset_last_entry_t(st) - rrdset_first_entry_t(st) - , after - , before - , before - after - , points - , group - ); - - if(before < after) - debug(D_RRD_STATS, "WARNING: %s The newest value in the database (%ld) is earlier than the oldest (%ld)", st->name, before, after); - - if((before - after) > st->entries * st->update_every) - debug(D_RRD_STATS, "WARNING: %s The time difference between the oldest and the newest entries (%ld) is higher than the capacity of the database (%ld)", st->name, before - after, st->entries * st->update_every); - } - - - // ------------------------------------------------------------------------- - // temp arrays for keeping values per dimension - - calculated_number group_values[dimensions]; // keep sums when grouping - int print_hidden[dimensions]; // keep hidden flags - int found_non_zero[dimensions]; - int found_non_existing[dimensions]; - - // initialize them - for( rd = st->dimensions, c = 0 ; rd && c < dimensions ; rd = rd->next, c++) { - group_values[c] = 0; - print_hidden[c] = rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN)?1:0; - found_non_zero[c] = 0; - found_non_existing[c] = 0; - } - - - // error("OLD: points=%d after=%d before=%d group=%d, duration=%d", entries_to_show, before - (st->update_every * group * entries_to_show), before, group, before - after + 1); - // rrd2array(st, entries_to_show, before - (st->update_every * group * entries_to_show), before, group_method, only_non_zero); - // rrd2rrdr(st, entries_to_show, before - (st->update_every * group * entries_to_show), before, group_method); - - // ------------------------------------------------------------------------- - // remove dimensions that contain only zeros - - int max_loop = 1; - if(only_non_zero) max_loop = 2; - - for(; max_loop ; max_loop--) { - - // ------------------------------------------------------------------------- - // print the JSON header - - buffer_sprintf(wb, "{\n %scols%s:\n [\n", kq, kq); - buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%stime%s,%spattern%s:%s%s,%stype%s:%sdatetime%s},\n", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq); - buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%s%s,%spattern%s:%s%s,%stype%s:%sstring%s,%sp%s:{%srole%s:%sannotation%s}},\n", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, kq, kq, sq, sq); - buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%s%s,%spattern%s:%s%s,%stype%s:%sstring%s,%sp%s:{%srole%s:%sannotationText%s}}", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, kq, kq, sq, sq); - - // print the header for each dimension - // and update the print_hidden array for the dimensions that should be hidden - int pc = 0; - for( rd = st->dimensions, c = 0 ; rd && c < dimensions ; rd = rd->next, c++) { - if(!print_hidden[c]) { - pc++; - buffer_sprintf(wb, ",\n {%sid%s:%s%s,%slabel%s:%s%s%s,%spattern%s:%s%s,%stype%s:%snumber%s}", kq, kq, sq, sq, kq, kq, sq, rd->name, sq, kq, kq, sq, sq, kq, kq, sq, sq); - } - } - if(!pc) { - buffer_sprintf(wb, ",\n {%sid%s:%s%s,%slabel%s:%s%s%s,%spattern%s:%s%s,%stype%s:%snumber%s}", kq, kq, sq, sq, kq, kq, sq, "no data", sq, kq, kq, sq, sq, kq, kq, sq, sq); - } - - // print the begin of row data - buffer_sprintf(wb, "\n ],\n %srows%s:\n [\n", kq, kq); - - - // ------------------------------------------------------------------------- - // the main loop - - int annotate_reset = 0; - int annotation_count = 0; - - long t = rrdset_time2slot(st, before), - stop_at_t = rrdset_time2slot(st, after), - stop_now = 0; - - t -= t % group; - - time_t now = rrdset_slot2time(st, t), - dt = st->update_every; - - long count = 0, printed = 0, group_count = 0; - last_timestamp = 0; - - if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) - debug(D_RRD_STATS, "%s: REQUEST after:%u before:%u, points:%ld, group:%ld, CHART cur:%ld first: %u last:%u, CALC start_t:%ld, stop_t:%ld" - , st->id - , (uint32_t)after - , (uint32_t)before - , points - , group - , st->current_entry - , (uint32_t)rrdset_first_entry_t(st) - , (uint32_t)rrdset_last_entry_t(st) - , t - , stop_at_t - ); - - long counter = 0; - for(; !stop_now ; now -= dt, t--, counter++) { - if(t < 0) t = st->entries - 1; - if(t == stop_at_t) stop_now = counter; - - int print_this = 0; - - if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) - debug(D_RRD_STATS, "%s t = %ld, count = %ld, group_count = %ld, printed = %ld, now = %ld, %s %s" - , st->id - , t - , count + 1 - , group_count + 1 - , printed - , now - , (group_count + 1 == group)?"PRINT":" - " - , (now >= after && now <= before)?"RANGE":" - " - ); - - - // make sure we return data in the proper time range - if(now > before) continue; - if(now < after) break; - - //if(rrdset_slot2time(st, t) != now) - // error("%s: slot=%ld, now=%ld, slot2time=%ld, diff=%ld, last_entry_t=%ld, rrdset_last_slot=%ld", st->id, t, now, rrdset_slot2time(st,t), now - rrdset_slot2time(st,t), rrdset_last_entry_t(st), rrdset_last_slot(st)); - - count++; - group_count++; - - // check if we have to print this now - if(group_count == group) { - if(printed >= points) { - // debug(D_RRD_STATS, "Already printed all rows. Stopping."); - break; - } - - // generate the local date time - struct tm tmbuf, *tm = localtime_r(&now, &tmbuf); - if(!tm) { error("localtime() failed."); continue; } - if(now > last_timestamp) last_timestamp = now; - - if(printed) buffer_strcat(wb, "]},\n"); - buffer_strcat(wb, pre_date); - buffer_jsdate(wb, tm->tm_year + 1900, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); - buffer_strcat(wb, post_date); - - print_this = 1; - } - - // do the calculations - for(rd = st->dimensions, c = 0 ; rd && c < dimensions ; rd = rd->next, c++) { - storage_number n = rd->values[t]; - calculated_number value = unpack_storage_number(n); - - if(!does_storage_number_exist(n)) { - value = 0.0; - found_non_existing[c]++; - } - if(did_storage_number_reset(n)) annotate_reset = 1; - - switch(group_method) { - case GROUP_MAX: - if(abs(value) > abs(group_values[c])) group_values[c] = value; - break; - - case GROUP_SUM: - group_values[c] += value; - break; - - default: - case GROUP_AVERAGE: - group_values[c] += value; - if(print_this) group_values[c] /= ( group_count - found_non_existing[c] ); - break; - } - } - - if(print_this) { - if(annotate_reset) { - annotation_count++; - buffer_strcat(wb, overflow_annotation); - annotate_reset = 0; - } - else - buffer_strcat(wb, normal_annotation); - - pc = 0; - for(c = 0 ; c < dimensions ; c++) { - if(found_non_existing[c] == group_count) { - // all entries are non-existing - pc++; - buffer_strcat(wb, pre_value); - buffer_strcat(wb, "null"); - buffer_strcat(wb, post_value); - } - else if(!print_hidden[c]) { - pc++; - buffer_strcat(wb, pre_value); - buffer_rrd_value(wb, group_values[c]); - buffer_strcat(wb, post_value); - - if(group_values[c]) found_non_zero[c]++; - } - - // reset them for the next loop - group_values[c] = 0; - found_non_existing[c] = 0; - } - - // if all dimensions are hidden, print a null - if(!pc) { - buffer_strcat(wb, pre_value); - buffer_strcat(wb, "null"); - buffer_strcat(wb, post_value); - } - - printed++; - group_count = 0; - } - } - - if(printed) buffer_strcat(wb, "]}"); - buffer_strcat(wb, "\n ]\n}\n"); - - if(only_non_zero && max_loop > 1) { - int changed = 0; - for(rd = st->dimensions, c = 0 ; rd && c < dimensions ; rd = rd->next, c++) { - group_values[c] = 0; - found_non_existing[c] = 0; - - if(!print_hidden[c] && !found_non_zero[c]) { - changed = 1; - print_hidden[c] = 1; - } - } - - if(changed) buffer_flush(wb); - else break; - } - else break; - - } // max_loop - - debug(D_RRD_STATS, "RRD_STATS_JSON: %s total %zu bytes", st->name, wb->len); - - rrdset_unlock(st); - return last_timestamp; -} diff --git a/src/rrd2json_api_old.h b/src/rrd2json_api_old.h deleted file mode 100644 index f8c63814f..000000000 --- a/src/rrd2json_api_old.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef NETDATA_RRD2JSON_API_OLD_H -#define NETDATA_RRD2JSON_API_OLD_H - -extern unsigned long rrdset_info2json_api_old(RRDSET *st, char *options, BUFFER *wb); - -extern void rrd_graph2json_api_old(RRDSET *st, char *options, BUFFER *wb); - -extern void rrd_all2json_api_old(RRDHOST *host, BUFFER *wb); - -extern time_t rrdset2json_api_old(int type, RRDSET *st, BUFFER *wb, long entries_to_show, long group, int group_method - , time_t after, time_t before, int only_non_zero); - - -#endif //NETDATA_RRD2JSON_API_OLD_H diff --git a/src/rrdcalc.c b/src/rrdcalc.c deleted file mode 100644 index 4e41539e2..000000000 --- a/src/rrdcalc.c +++ /dev/null @@ -1,424 +0,0 @@ -#define NETDATA_HEALTH_INTERNALS -#include "common.h" - -// ---------------------------------------------------------------------------- -// RRDCALC management - -inline const char *rrdcalc_status2string(RRDCALC_STATUS status) { - switch(status) { - case RRDCALC_STATUS_REMOVED: - return "REMOVED"; - - case RRDCALC_STATUS_UNDEFINED: - return "UNDEFINED"; - - case RRDCALC_STATUS_UNINITIALIZED: - return "UNINITIALIZED"; - - case RRDCALC_STATUS_CLEAR: - return "CLEAR"; - - case RRDCALC_STATUS_RAISED: - return "RAISED"; - - case RRDCALC_STATUS_WARNING: - return "WARNING"; - - case RRDCALC_STATUS_CRITICAL: - return "CRITICAL"; - - default: - error("Unknown alarm status %d", status); - return "UNKNOWN"; - } -} - -static void rrdsetcalc_link(RRDSET *st, RRDCALC *rc) { - RRDHOST *host = st->rrdhost; - - debug(D_HEALTH, "Health linking alarm '%s.%s' to chart '%s' of host '%s'", rc->chart?rc->chart:"NOCHART", rc->name, st->id, host->hostname); - - rc->last_status_change = now_realtime_sec(); - rc->rrdset = st; - - rc->rrdset_next = st->alarms; - rc->rrdset_prev = NULL; - - if(rc->rrdset_next) - rc->rrdset_next->rrdset_prev = rc; - - st->alarms = rc; - - if(rc->update_every < rc->rrdset->update_every) { - error("Health alarm '%s.%s' has update every %d, less than chart update every %d. Setting alarm update frequency to %d.", rc->rrdset->id, rc->name, rc->update_every, rc->rrdset->update_every, rc->rrdset->update_every); - rc->update_every = rc->rrdset->update_every; - } - - if(!isnan(rc->green) && isnan(st->green)) { - debug(D_HEALTH, "Health alarm '%s.%s' green threshold set from " CALCULATED_NUMBER_FORMAT_AUTO " to " CALCULATED_NUMBER_FORMAT_AUTO ".", rc->rrdset->id, rc->name, rc->rrdset->green, rc->green); - st->green = rc->green; - } - - if(!isnan(rc->red) && isnan(st->red)) { - debug(D_HEALTH, "Health alarm '%s.%s' red threshold set from " CALCULATED_NUMBER_FORMAT_AUTO " to " CALCULATED_NUMBER_FORMAT_AUTO ".", rc->rrdset->id, rc->name, rc->rrdset->red, rc->red); - st->red = rc->red; - } - - rc->local = rrdvar_create_and_index("local", &st->rrdvar_root_index, rc->name, RRDVAR_TYPE_CALCULATED, &rc->value); - rc->family = rrdvar_create_and_index("family", &st->rrdfamily->rrdvar_root_index, rc->name, RRDVAR_TYPE_CALCULATED, &rc->value); - - char fullname[RRDVAR_MAX_LENGTH + 1]; - snprintfz(fullname, RRDVAR_MAX_LENGTH, "%s.%s", st->id, rc->name); - rc->hostid = rrdvar_create_and_index("host", &host->rrdvar_root_index, fullname, RRDVAR_TYPE_CALCULATED, &rc->value); - - snprintfz(fullname, RRDVAR_MAX_LENGTH, "%s.%s", st->name, rc->name); - rc->hostname = rrdvar_create_and_index("host", &host->rrdvar_root_index, fullname, RRDVAR_TYPE_CALCULATED, &rc->value); - - if(!rc->units) rc->units = strdupz(st->units); - - { - time_t now = now_realtime_sec(); - health_alarm_log( - host, - rc->id, - rc->next_event_id++, - now, - rc->name, - rc->rrdset->id, - rc->rrdset->family, - rc->exec, - rc->recipient, - now - rc->last_status_change, - rc->old_value, - rc->value, - rc->status, - RRDCALC_STATUS_UNINITIALIZED, - rc->source, - rc->units, - rc->info, - 0, - 0 - ); - } -} - -static inline int rrdcalc_is_matching_this_rrdset(RRDCALC *rc, RRDSET *st) { - if( (rc->hash_chart == st->hash && !strcmp(rc->chart, st->id)) || - (rc->hash_chart == st->hash_name && !strcmp(rc->chart, st->name))) - return 1; - - return 0; -} - -// this has to be called while the RRDHOST is locked -inline void rrdsetcalc_link_matching(RRDSET *st) { - RRDHOST *host = st->rrdhost; - // debug(D_HEALTH, "find matching alarms for chart '%s'", st->id); - - RRDCALC *rc; - for(rc = host->alarms; rc ; rc = rc->next) { - if(unlikely(rc->rrdset)) - continue; - - if(unlikely(rrdcalc_is_matching_this_rrdset(rc, st))) - rrdsetcalc_link(st, rc); - } -} - -// this has to be called while the RRDHOST is locked -inline void rrdsetcalc_unlink(RRDCALC *rc) { - RRDSET *st = rc->rrdset; - - if(!st) { - debug(D_HEALTH, "Requested to unlink RRDCALC '%s.%s' which is not linked to any RRDSET", rc->chart?rc->chart:"NOCHART", rc->name); - error("Requested to unlink RRDCALC '%s.%s' which is not linked to any RRDSET", rc->chart?rc->chart:"NOCHART", rc->name); - return; - } - - RRDHOST *host = st->rrdhost; - - { - time_t now = now_realtime_sec(); - health_alarm_log( - host, - rc->id, - rc->next_event_id++, - now, - rc->name, - rc->rrdset->id, - rc->rrdset->family, - rc->exec, - rc->recipient, - now - rc->last_status_change, - rc->old_value, - rc->value, - rc->status, - RRDCALC_STATUS_REMOVED, - rc->source, - rc->units, - rc->info, - 0, - 0 - ); - } - - debug(D_HEALTH, "Health unlinking alarm '%s.%s' from chart '%s' of host '%s'", rc->chart?rc->chart:"NOCHART", rc->name, st->id, host->hostname); - - // unlink it - if(rc->rrdset_prev) - rc->rrdset_prev->rrdset_next = rc->rrdset_next; - - if(rc->rrdset_next) - rc->rrdset_next->rrdset_prev = rc->rrdset_prev; - - if(st->alarms == rc) - st->alarms = rc->rrdset_next; - - rc->rrdset_prev = rc->rrdset_next = NULL; - - rrdvar_free(host, &st->rrdvar_root_index, rc->local); - rc->local = NULL; - - rrdvar_free(host, &st->rrdfamily->rrdvar_root_index, rc->family); - rc->family = NULL; - - rrdvar_free(host, &host->rrdvar_root_index, rc->hostid); - rc->hostid = NULL; - - rrdvar_free(host, &host->rrdvar_root_index, rc->hostname); - rc->hostname = NULL; - - rc->rrdset = NULL; - - // RRDCALC will remain in RRDHOST - // so that if the matching chart is found in the future - // it will be applied automatically -} - -RRDCALC *rrdcalc_find(RRDSET *st, const char *name) { - RRDCALC *rc; - uint32_t hash = simple_hash(name); - - for( rc = st->alarms; rc ; rc = rc->rrdset_next ) { - if(unlikely(rc->hash == hash && !strcmp(rc->name, name))) - return rc; - } - - return NULL; -} - -inline int rrdcalc_exists(RRDHOST *host, const char *chart, const char *name, uint32_t hash_chart, uint32_t hash_name) { - RRDCALC *rc; - - if(unlikely(!chart)) { - error("attempt to find RRDCALC '%s' without giving a chart name", name); - return 1; - } - - if(unlikely(!hash_chart)) hash_chart = simple_hash(chart); - if(unlikely(!hash_name)) hash_name = simple_hash(name); - - // make sure it does not already exist - for(rc = host->alarms; rc ; rc = rc->next) { - if (unlikely(rc->chart && rc->hash == hash_name && rc->hash_chart == hash_chart && !strcmp(name, rc->name) && !strcmp(chart, rc->chart))) { - debug(D_HEALTH, "Health alarm '%s.%s' already exists in host '%s'.", chart, name, host->hostname); - info("Health alarm '%s.%s' already exists in host '%s'.", chart, name, host->hostname); - return 1; - } - } - - return 0; -} - -inline uint32_t rrdcalc_get_unique_id(RRDHOST *host, const char *chart, const char *name, uint32_t *next_event_id) { - if(chart && name) { - uint32_t hash_chart = simple_hash(chart); - uint32_t hash_name = simple_hash(name); - - // re-use old IDs, by looking them up in the alarm log - ALARM_ENTRY *ae; - for(ae = host->health_log.alarms; ae ;ae = ae->next) { - if(unlikely(ae->hash_name == hash_name && ae->hash_chart == hash_chart && !strcmp(name, ae->name) && !strcmp(chart, ae->chart))) { - if(next_event_id) *next_event_id = ae->alarm_event_id + 1; - return ae->alarm_id; - } - } - } - - return host->health_log.next_alarm_id++; -} - -inline void rrdcalc_create_part2(RRDHOST *host, RRDCALC *rc) { - rrdhost_check_rdlock(host); - - if(rc->calculation) { - rc->calculation->status = &rc->status; - rc->calculation->this = &rc->value; - rc->calculation->after = &rc->db_after; - rc->calculation->before = &rc->db_before; - rc->calculation->rrdcalc = rc; - } - - if(rc->warning) { - rc->warning->status = &rc->status; - rc->warning->this = &rc->value; - rc->warning->after = &rc->db_after; - rc->warning->before = &rc->db_before; - rc->warning->rrdcalc = rc; - } - - if(rc->critical) { - rc->critical->status = &rc->status; - rc->critical->this = &rc->value; - rc->critical->after = &rc->db_after; - rc->critical->before = &rc->db_before; - rc->critical->rrdcalc = rc; - } - - // link it to the host - if(likely(host->alarms)) { - // append it - RRDCALC *t; - for(t = host->alarms; t && t->next ; t = t->next) ; - t->next = rc; - } - else { - host->alarms = rc; - } - - // link it to its chart - RRDSET *st; - rrdset_foreach_read(st, host) { - if(rrdcalc_is_matching_this_rrdset(rc, st)) { - rrdsetcalc_link(st, rc); - break; - } - } -} - -inline RRDCALC *rrdcalc_create(RRDHOST *host, RRDCALCTEMPLATE *rt, const char *chart) { - - debug(D_HEALTH, "Health creating dynamic alarm (from template) '%s.%s'", chart, rt->name); - - if(rrdcalc_exists(host, chart, rt->name, 0, 0)) - return NULL; - - RRDCALC *rc = callocz(1, sizeof(RRDCALC)); - rc->next_event_id = 1; - rc->id = rrdcalc_get_unique_id(host, chart, rt->name, &rc->next_event_id); - rc->name = strdupz(rt->name); - rc->hash = simple_hash(rc->name); - rc->chart = strdupz(chart); - rc->hash_chart = simple_hash(rc->chart); - - if(rt->dimensions) rc->dimensions = strdupz(rt->dimensions); - - rc->green = rt->green; - rc->red = rt->red; - rc->value = NAN; - rc->old_value = NAN; - - rc->delay_up_duration = rt->delay_up_duration; - rc->delay_down_duration = rt->delay_down_duration; - rc->delay_max_duration = rt->delay_max_duration; - rc->delay_multiplier = rt->delay_multiplier; - - rc->group = rt->group; - rc->after = rt->after; - rc->before = rt->before; - rc->update_every = rt->update_every; - rc->options = rt->options; - - if(rt->exec) rc->exec = strdupz(rt->exec); - if(rt->recipient) rc->recipient = strdupz(rt->recipient); - if(rt->source) rc->source = strdupz(rt->source); - if(rt->units) rc->units = strdupz(rt->units); - if(rt->info) rc->info = strdupz(rt->info); - - if(rt->calculation) { - rc->calculation = expression_parse(rt->calculation->source, NULL, NULL); - if(!rc->calculation) - error("Health alarm '%s.%s': failed to parse calculation expression '%s'", chart, rt->name, rt->calculation->source); - } - if(rt->warning) { - rc->warning = expression_parse(rt->warning->source, NULL, NULL); - if(!rc->warning) - error("Health alarm '%s.%s': failed to re-parse warning expression '%s'", chart, rt->name, rt->warning->source); - } - if(rt->critical) { - rc->critical = expression_parse(rt->critical->source, NULL, NULL); - if(!rc->critical) - error("Health alarm '%s.%s': failed to re-parse critical expression '%s'", chart, rt->name, rt->critical->source); - } - - debug(D_HEALTH, "Health runtime added alarm '%s.%s': exec '%s', recipient '%s', green " CALCULATED_NUMBER_FORMAT_AUTO ", red " CALCULATED_NUMBER_FORMAT_AUTO ", lookup: group %d, after %d, before %d, options %u, dimensions '%s', update every %d, calculation '%s', warning '%s', critical '%s', source '%s', delay up %d, delay down %d, delay max %d, delay_multiplier %f", - (rc->chart)?rc->chart:"NOCHART", - rc->name, - (rc->exec)?rc->exec:"DEFAULT", - (rc->recipient)?rc->recipient:"DEFAULT", - rc->green, - rc->red, - rc->group, - rc->after, - rc->before, - rc->options, - (rc->dimensions)?rc->dimensions:"NONE", - rc->update_every, - (rc->calculation)?rc->calculation->parsed_as:"NONE", - (rc->warning)?rc->warning->parsed_as:"NONE", - (rc->critical)?rc->critical->parsed_as:"NONE", - rc->source, - rc->delay_up_duration, - rc->delay_down_duration, - rc->delay_max_duration, - rc->delay_multiplier - ); - - rrdcalc_create_part2(host, rc); - return rc; -} - -void rrdcalc_free(RRDCALC *rc) { - if(unlikely(!rc)) return; - - expression_free(rc->calculation); - expression_free(rc->warning); - expression_free(rc->critical); - - freez(rc->name); - freez(rc->chart); - freez(rc->family); - freez(rc->dimensions); - freez(rc->exec); - freez(rc->recipient); - freez(rc->source); - freez(rc->units); - freez(rc->info); - freez(rc); -} - -void rrdcalc_unlink_and_free(RRDHOST *host, RRDCALC *rc) { - if(unlikely(!rc)) return; - - debug(D_HEALTH, "Health removing alarm '%s.%s' of host '%s'", rc->chart?rc->chart:"NOCHART", rc->name, host->hostname); - - // unlink it from RRDSET - if(rc->rrdset) rrdsetcalc_unlink(rc); - - // unlink it from RRDHOST - if(unlikely(rc == host->alarms)) - host->alarms = rc->next; - - else { - RRDCALC *t; - for(t = host->alarms; t && t->next != rc; t = t->next) ; - if(t) { - t->next = rc->next; - rc->next = NULL; - } - else - error("Cannot unlink alarm '%s.%s' from host '%s': not found", rc->chart?rc->chart:"NOCHART", rc->name, host->hostname); - } - - rrdcalc_free(rc); -} diff --git a/src/rrdcalctemplate.c b/src/rrdcalctemplate.c deleted file mode 100644 index 75a7002b3..000000000 --- a/src/rrdcalctemplate.c +++ /dev/null @@ -1,69 +0,0 @@ -#define NETDATA_HEALTH_INTERNALS -#include "common.h" - -// ---------------------------------------------------------------------------- -// RRDCALCTEMPLATE management - -void rrdcalctemplate_link_matching(RRDSET *st) { - RRDHOST *host = st->rrdhost; - RRDCALCTEMPLATE *rt; - - for(rt = host->templates; rt ; rt = rt->next) { - if(rt->hash_context == st->hash_context && !strcmp(rt->context, st->context) - && (!rt->family_pattern || simple_pattern_matches(rt->family_pattern, st->family))) { - RRDCALC *rc = rrdcalc_create(host, rt, st->id); - if(unlikely(!rc)) - info("Health tried to create alarm from template '%s' on chart '%s' of host '%s', but it failed", rt->name, st->id, host->hostname); - -#ifdef NETDATA_INTERNAL_CHECKS - else if(rc->rrdset != st) - error("Health alarm '%s.%s' should be linked to chart '%s', but it is not", rc->chart?rc->chart:"NOCHART", rc->name, st->id); -#endif - } - } -} - -inline void rrdcalctemplate_free(RRDCALCTEMPLATE *rt) { - if(unlikely(!rt)) return; - - expression_free(rt->calculation); - expression_free(rt->warning); - expression_free(rt->critical); - - freez(rt->family_match); - simple_pattern_free(rt->family_pattern); - - freez(rt->name); - freez(rt->exec); - freez(rt->recipient); - freez(rt->context); - freez(rt->source); - freez(rt->units); - freez(rt->info); - freez(rt->dimensions); - freez(rt); -} - -inline void rrdcalctemplate_unlink_and_free(RRDHOST *host, RRDCALCTEMPLATE *rt) { - if(unlikely(!rt)) return; - - debug(D_HEALTH, "Health removing template '%s' of host '%s'", rt->name, host->hostname); - - if(host->templates == rt) { - host->templates = rt->next; - } - else { - RRDCALCTEMPLATE *t; - for (t = host->templates; t && t->next != rt; t = t->next ) ; - if(t) { - t->next = rt->next; - rt->next = NULL; - } - else - error("Cannot find RRDCALCTEMPLATE '%s' linked in host '%s'", rt->name, host->hostname); - } - - rrdcalctemplate_free(rt); -} - - diff --git a/src/rrddim.c b/src/rrddim.c deleted file mode 100644 index a54c6452f..000000000 --- a/src/rrddim.c +++ /dev/null @@ -1,387 +0,0 @@ -#define NETDATA_RRD_INTERNALS 1 -#include "common.h" - -// ---------------------------------------------------------------------------- -// RRDDIM index - -int rrddim_compare(void* a, void* b) { - if(((RRDDIM *)a)->hash < ((RRDDIM *)b)->hash) return -1; - else if(((RRDDIM *)a)->hash > ((RRDDIM *)b)->hash) return 1; - else return strcmp(((RRDDIM *)a)->id, ((RRDDIM *)b)->id); -} - -#define rrddim_index_add(st, rd) (RRDDIM *)avl_insert_lock(&((st)->dimensions_index), (avl *)(rd)) -#define rrddim_index_del(st,rd ) (RRDDIM *)avl_remove_lock(&((st)->dimensions_index), (avl *)(rd)) - -static inline RRDDIM *rrddim_index_find(RRDSET *st, const char *id, uint32_t hash) { - RRDDIM tmp = { - .id = id, - .hash = (hash)?hash:simple_hash(id) - }; - return (RRDDIM *)avl_search_lock(&(st->dimensions_index), (avl *) &tmp); -} - - -// ---------------------------------------------------------------------------- -// RRDDIM - find a dimension - -inline RRDDIM *rrddim_find(RRDSET *st, const char *id) { - debug(D_RRD_CALLS, "rrddim_find() for chart %s, dimension %s", st->name, id); - - return rrddim_index_find(st, id, 0); -} - - -// ---------------------------------------------------------------------------- -// RRDDIM rename a dimension - -inline int rrddim_set_name(RRDSET *st, RRDDIM *rd, const char *name) { - if(unlikely(!name || !*name || !strcmp(rd->name, name))) - return 0; - - debug(D_RRD_CALLS, "rrddim_set_name() from %s.%s to %s.%s", st->name, rd->name, st->name, name); - - char varname[CONFIG_MAX_NAME + 1]; - snprintfz(varname, CONFIG_MAX_NAME, "dim %s name", rd->id); - rd->name = config_set_default(st->config_section, varname, name); - rd->hash_name = simple_hash(rd->name); - rrddimvar_rename_all(rd); - rd->exposed = 0; - return 1; -} - -inline int rrddim_set_algorithm(RRDSET *st, RRDDIM *rd, RRD_ALGORITHM algorithm) { - if(unlikely(rd->algorithm == algorithm)) - return 0; - - debug(D_RRD_CALLS, "Updating algorithm of dimension '%s/%s' from %s to %s", st->id, rd->name, rrd_algorithm_name(rd->algorithm), rrd_algorithm_name(algorithm)); - rd->algorithm = algorithm; - rd->exposed = 0; - rrdset_flag_set(st, RRDSET_FLAG_HOMEGENEOUS_CHECK); - return 1; -} - -inline int rrddim_set_multiplier(RRDSET *st, RRDDIM *rd, collected_number multiplier) { - if(unlikely(rd->multiplier == multiplier)) - return 0; - - debug(D_RRD_CALLS, "Updating multiplier of dimension '%s/%s' from " COLLECTED_NUMBER_FORMAT " to " COLLECTED_NUMBER_FORMAT, st->id, rd->name, rd->multiplier, multiplier); - rd->multiplier = multiplier; - rd->exposed = 0; - rrdset_flag_set(st, RRDSET_FLAG_HOMEGENEOUS_CHECK); - return 1; -} - -inline int rrddim_set_divisor(RRDSET *st, RRDDIM *rd, collected_number divisor) { - if(unlikely(rd->divisor == divisor)) - return 0; - - debug(D_RRD_CALLS, "Updating divisor of dimension '%s/%s' from " COLLECTED_NUMBER_FORMAT " to " COLLECTED_NUMBER_FORMAT, st->id, rd->name, rd->divisor, divisor); - rd->divisor = divisor; - rd->exposed = 0; - rrdset_flag_set(st, RRDSET_FLAG_HOMEGENEOUS_CHECK); - return 1; -} - -// ---------------------------------------------------------------------------- -// RRDDIM create a dimension - -RRDDIM *rrddim_add_custom(RRDSET *st, const char *id, const char *name, collected_number multiplier, collected_number divisor, RRD_ALGORITHM algorithm, RRD_MEMORY_MODE memory_mode) { - RRDDIM *rd = rrddim_find(st, id); - if(unlikely(rd)) { - debug(D_RRD_CALLS, "Cannot create rrd dimension '%s/%s', it already exists.", st->id, name?name:"<NONAME>"); - - rrddim_set_name(st, rd, name); - rrddim_set_algorithm(st, rd, algorithm); - rrddim_set_multiplier(st, rd, multiplier); - rrddim_set_divisor(st, rd, divisor); - - return rd; - } - - RRDHOST *host = st->rrdhost; - char filename[FILENAME_MAX + 1]; - char fullfilename[FILENAME_MAX + 1]; - - char varname[CONFIG_MAX_NAME + 1]; - unsigned long size = sizeof(RRDDIM) + (st->entries * sizeof(storage_number)); - - debug(D_RRD_CALLS, "Adding dimension '%s/%s'.", st->id, id); - - rrdset_strncpyz_name(filename, id, FILENAME_MAX); - snprintfz(fullfilename, FILENAME_MAX, "%s/%s.db", st->cache_dir, filename); - - if(memory_mode == RRD_MEMORY_MODE_SAVE || memory_mode == RRD_MEMORY_MODE_MAP || memory_mode == RRD_MEMORY_MODE_RAM) { - rd = (RRDDIM *)mymmap( - (memory_mode == RRD_MEMORY_MODE_RAM)?NULL:fullfilename - , size - , ((memory_mode == RRD_MEMORY_MODE_MAP) ? MAP_SHARED : MAP_PRIVATE) - , 1 - ); - - if(likely(rd)) { - // we have a file mapped for rd - - memset(&rd->avl, 0, sizeof(avl)); - rd->id = NULL; - rd->name = NULL; - rd->cache_filename = NULL; - rd->variables = NULL; - rd->next = NULL; - rd->rrdset = NULL; - rd->exposed = 0; - - struct timeval now; - now_realtime_timeval(&now); - - if(memory_mode == RRD_MEMORY_MODE_RAM) { - memset(rd, 0, size); - } - else { - int reset = 0; - - if(strcmp(rd->magic, RRDDIMENSION_MAGIC) != 0) { - info("Initializing file %s.", fullfilename); - memset(rd, 0, size); - reset = 1; - } - else if(rd->memsize != size) { - error("File %s does not have the desired size, expected %lu but found %lu. Clearing it.", fullfilename, size, rd->memsize); - memset(rd, 0, size); - reset = 1; - } - else if(rd->update_every != st->update_every) { - error("File %s does not have the same update frequency, expected %d but found %d. Clearing it.", fullfilename, st->update_every, rd->update_every); - memset(rd, 0, size); - reset = 1; - } - else if(dt_usec(&now, &rd->last_collected_time) > (rd->entries * rd->update_every * USEC_PER_SEC)) { - info("File %s is too old (last collected %llu seconds ago, but the database is %ld seconds). Clearing it.", fullfilename, dt_usec(&now, &rd->last_collected_time) / USEC_PER_SEC, rd->entries * rd->update_every); - memset(rd, 0, size); - reset = 1; - } - - if(!reset) { - if(rd->algorithm != algorithm) { - info("File %s does not have the expected algorithm (expected %u '%s', found %u '%s'). Previous values may be wrong.", - fullfilename, algorithm, rrd_algorithm_name(algorithm), rd->algorithm, rrd_algorithm_name(rd->algorithm)); - } - - if(rd->multiplier != multiplier) { - info("File %s does not have the expected multiplier (expected " COLLECTED_NUMBER_FORMAT ", found " COLLECTED_NUMBER_FORMAT "). Previous values may be wrong.", fullfilename, multiplier, rd->multiplier); - } - - if(rd->divisor != divisor) { - info("File %s does not have the expected divisor (expected " COLLECTED_NUMBER_FORMAT ", found " COLLECTED_NUMBER_FORMAT "). Previous values may be wrong.", fullfilename, divisor, rd->divisor); - } - } - } - - // make sure we have the right memory mode - // even if we cleared the memory - rd->rrd_memory_mode = memory_mode; - } - } - - if(unlikely(!rd)) { - // if we didn't manage to get a mmap'd dimension, just create one - rd = callocz(1, size); - rd->rrd_memory_mode = (memory_mode == RRD_MEMORY_MODE_NONE) ? RRD_MEMORY_MODE_NONE : RRD_MEMORY_MODE_ALLOC; - } - - rd->memsize = size; - - strcpy(rd->magic, RRDDIMENSION_MAGIC); - - rd->id = strdupz(id); - rd->hash = simple_hash(rd->id); - - rd->cache_filename = strdupz(fullfilename); - - snprintfz(varname, CONFIG_MAX_NAME, "dim %s name", rd->id); - rd->name = config_get(st->config_section, varname, (name && *name)?name:rd->id); - rd->hash_name = simple_hash(rd->name); - - snprintfz(varname, CONFIG_MAX_NAME, "dim %s algorithm", rd->id); - rd->algorithm = rrd_algorithm_id(config_get(st->config_section, varname, rrd_algorithm_name(algorithm))); - - snprintfz(varname, CONFIG_MAX_NAME, "dim %s multiplier", rd->id); - rd->multiplier = config_get_number(st->config_section, varname, multiplier); - - snprintfz(varname, CONFIG_MAX_NAME, "dim %s divisor", rd->id); - rd->divisor = config_get_number(st->config_section, varname, divisor); - if(!rd->divisor) rd->divisor = 1; - - rd->entries = st->entries; - rd->update_every = st->update_every; - - if(rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST)) - rd->collections_counter = 1; - else - rd->collections_counter = 0; - - rd->updated = 0; - rd->flags = 0x00000000; - - rd->calculated_value = 0; - rd->last_calculated_value = 0; - rd->collected_value = 0; - rd->last_collected_value = 0; - rd->collected_volume = 0; - rd->stored_volume = 0; - rd->last_stored_value = 0; - rd->values[st->current_entry] = SN_EMPTY_SLOT; // pack_storage_number(0, SN_NOT_EXISTS); - rd->last_collected_time.tv_sec = 0; - rd->last_collected_time.tv_usec = 0; - rd->rrdset = st; - - // append this dimension - rrdset_wrlock(st); - if(!st->dimensions) - st->dimensions = rd; - else { - RRDDIM *td = st->dimensions; - - if(td->algorithm != rd->algorithm || abs(td->multiplier) != abs(rd->multiplier) || abs(td->divisor) != abs(rd->divisor)) { - if(!rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS)) { - #ifdef NETDATA_INTERNAL_CHECKS - info("Dimension '%s' added on chart '%s' of host '%s' is not homogeneous to other dimensions already present (algorithm is '%s' vs '%s', multiplier is " COLLECTED_NUMBER_FORMAT " vs " COLLECTED_NUMBER_FORMAT ", divisor is " COLLECTED_NUMBER_FORMAT " vs " COLLECTED_NUMBER_FORMAT ").", - rd->name, - st->name, - host->hostname, - rrd_algorithm_name(rd->algorithm), rrd_algorithm_name(td->algorithm), - rd->multiplier, td->multiplier, - rd->divisor, td->divisor - ); - #endif - rrdset_flag_set(st, RRDSET_FLAG_HETEROGENEOUS); - } - } - - for(; td->next; td = td->next) ; - td->next = rd; - } - - if(host->health_enabled) { - rrddimvar_create(rd, RRDVAR_TYPE_CALCULATED, NULL, NULL, &rd->last_stored_value, RRDVAR_OPTION_DEFAULT); - rrddimvar_create(rd, RRDVAR_TYPE_COLLECTED, NULL, "_raw", &rd->last_collected_value, RRDVAR_OPTION_DEFAULT); - rrddimvar_create(rd, RRDVAR_TYPE_TIME_T, NULL, "_last_collected_t", &rd->last_collected_time.tv_sec, RRDVAR_OPTION_DEFAULT); - } - - rrdset_unlock(st); - - if(unlikely(rrddim_index_add(st, rd) != rd)) - error("RRDDIM: INTERNAL ERROR: attempt to index duplicate dimension '%s' on chart '%s'", rd->id, st->id); - - return(rd); -} - -// ---------------------------------------------------------------------------- -// RRDDIM remove / free a dimension - -void rrddim_free(RRDSET *st, RRDDIM *rd) -{ - debug(D_RRD_CALLS, "rrddim_free() %s.%s", st->name, rd->name); - - if(rd == st->dimensions) - st->dimensions = rd->next; - else { - RRDDIM *i; - for (i = st->dimensions; i && i->next != rd; i = i->next) ; - - if (i && i->next == rd) - i->next = rd->next; - else - error("Request to free dimension '%s.%s' but it is not linked.", st->id, rd->name); - } - rd->next = NULL; - - while(rd->variables) - rrddimvar_free(rd->variables); - - if(unlikely(rrddim_index_del(st, rd) != rd)) - error("RRDDIM: INTERNAL ERROR: attempt to remove from index dimension '%s' on chart '%s', removed a different dimension.", rd->id, st->id); - - // free(rd->annotations); - - switch(rd->rrd_memory_mode) { - case RRD_MEMORY_MODE_SAVE: - case RRD_MEMORY_MODE_MAP: - case RRD_MEMORY_MODE_RAM: - debug(D_RRD_CALLS, "Unmapping dimension '%s'.", rd->name); - freez((void *)rd->id); - freez(rd->cache_filename); - munmap(rd, rd->memsize); - break; - - case RRD_MEMORY_MODE_ALLOC: - case RRD_MEMORY_MODE_NONE: - debug(D_RRD_CALLS, "Removing dimension '%s'.", rd->name); - freez((void *)rd->id); - freez(rd->cache_filename); - freez(rd); - break; - } -} - - -// ---------------------------------------------------------------------------- -// RRDDIM - set dimension options - -int rrddim_hide(RRDSET *st, const char *id) { - debug(D_RRD_CALLS, "rrddim_hide() for chart %s, dimension %s", st->name, id); - - RRDHOST *host = st->rrdhost; - - RRDDIM *rd = rrddim_find(st, id); - if(unlikely(!rd)) { - error("Cannot find dimension with id '%s' on stats '%s' (%s) on host '%s'.", id, st->name, st->id, host->hostname); - return 1; - } - - rrddim_flag_set(rd, RRDDIM_FLAG_HIDDEN); - return 0; -} - -int rrddim_unhide(RRDSET *st, const char *id) { - debug(D_RRD_CALLS, "rrddim_unhide() for chart %s, dimension %s", st->name, id); - - RRDHOST *host = st->rrdhost; - RRDDIM *rd = rrddim_find(st, id); - if(unlikely(!rd)) { - error("Cannot find dimension with id '%s' on stats '%s' (%s) on host '%s'.", id, st->name, st->id, host->hostname); - return 1; - } - - rrddim_flag_clear(rd, RRDDIM_FLAG_HIDDEN); - return 0; -} - - -// ---------------------------------------------------------------------------- -// RRDDIM - collect values for a dimension - -inline collected_number rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, collected_number value) { - debug(D_RRD_CALLS, "rrddim_set_by_pointer() for chart %s, dimension %s, value " COLLECTED_NUMBER_FORMAT, st->name, rd->name, value); - - now_realtime_timeval(&rd->last_collected_time); - rd->collected_value = value; - rd->updated = 1; - - rd->collections_counter++; - - // fprintf(stderr, "%s.%s %llu " COLLECTED_NUMBER_FORMAT " dt %0.6f" " rate " CALCULATED_NUMBER_FORMAT "\n", st->name, rd->name, st->usec_since_last_update, value, (float)((double)st->usec_since_last_update / (double)1000000), (calculated_number)((value - rd->last_collected_value) * (calculated_number)rd->multiplier / (calculated_number)rd->divisor * 1000000.0 / (calculated_number)st->usec_since_last_update)); - - return rd->last_collected_value; -} - -collected_number rrddim_set(RRDSET *st, const char *id, collected_number value) { - RRDHOST *host = st->rrdhost; - RRDDIM *rd = rrddim_find(st, id); - if(unlikely(!rd)) { - error("Cannot find dimension with id '%s' on stats '%s' (%s) on host '%s'.", id, st->name, st->id, host->hostname); - return 0; - } - - return rrddim_set_by_pointer(st, rd, value); -} diff --git a/src/rrddimvar.c b/src/rrddimvar.c deleted file mode 100644 index 28a3e7fa6..000000000 --- a/src/rrddimvar.c +++ /dev/null @@ -1,212 +0,0 @@ -#define NETDATA_HEALTH_INTERNALS -#include "common.h" - -// ---------------------------------------------------------------------------- -// RRDDIMVAR management -// DIMENSION VARIABLES - -#define RRDDIMVAR_ID_MAX 1024 - -static inline void rrddimvar_free_variables(RRDDIMVAR *rs) { - RRDDIM *rd = rs->rrddim; - RRDSET *st = rd->rrdset; - RRDHOST *host = st->rrdhost; - - // CHART VARIABLES FOR THIS DIMENSION - - rrdvar_free(host, &st->rrdvar_root_index, rs->var_local_id); - rs->var_local_id = NULL; - - rrdvar_free(host, &st->rrdvar_root_index, rs->var_local_name); - rs->var_local_name = NULL; - - // FAMILY VARIABLES FOR THIS DIMENSION - - rrdvar_free(host, &st->rrdfamily->rrdvar_root_index, rs->var_family_id); - rs->var_family_id = NULL; - - rrdvar_free(host, &st->rrdfamily->rrdvar_root_index, rs->var_family_name); - rs->var_family_name = NULL; - - rrdvar_free(host, &st->rrdfamily->rrdvar_root_index, rs->var_family_contextid); - rs->var_family_contextid = NULL; - - rrdvar_free(host, &st->rrdfamily->rrdvar_root_index, rs->var_family_contextname); - rs->var_family_contextname = NULL; - - // HOST VARIABLES FOR THIS DIMENSION - - rrdvar_free(host, &host->rrdvar_root_index, rs->var_host_chartidid); - rs->var_host_chartidid = NULL; - - rrdvar_free(host, &host->rrdvar_root_index, rs->var_host_chartidname); - rs->var_host_chartidname = NULL; - - rrdvar_free(host, &host->rrdvar_root_index, rs->var_host_chartnameid); - rs->var_host_chartnameid = NULL; - - rrdvar_free(host, &host->rrdvar_root_index, rs->var_host_chartnamename); - rs->var_host_chartnamename = NULL; - - // KEYS - - freez(rs->key_id); - rs->key_id = NULL; - - freez(rs->key_name); - rs->key_name = NULL; - - freez(rs->key_fullidid); - rs->key_fullidid = NULL; - - freez(rs->key_fullidname); - rs->key_fullidname = NULL; - - freez(rs->key_contextid); - rs->key_contextid = NULL; - - freez(rs->key_contextname); - rs->key_contextname = NULL; - - freez(rs->key_fullnameid); - rs->key_fullnameid = NULL; - - freez(rs->key_fullnamename); - rs->key_fullnamename = NULL; -} - -static inline void rrddimvar_create_variables(RRDDIMVAR *rs) { - rrddimvar_free_variables(rs); - - RRDDIM *rd = rs->rrddim; - RRDSET *st = rd->rrdset; - RRDHOST *host = st->rrdhost; - - char buffer[RRDDIMVAR_ID_MAX + 1]; - - // KEYS - - snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s%s%s", rs->prefix, rd->id, rs->suffix); - rs->key_id = strdupz(buffer); - - snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s%s%s", rs->prefix, rd->name, rs->suffix); - rs->key_name = strdupz(buffer); - - snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s.%s", st->id, rs->key_id); - rs->key_fullidid = strdupz(buffer); - - snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s.%s", st->id, rs->key_name); - rs->key_fullidname = strdupz(buffer); - - snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s.%s", st->context, rs->key_id); - rs->key_contextid = strdupz(buffer); - - snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s.%s", st->context, rs->key_name); - rs->key_contextname = strdupz(buffer); - - snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s.%s", st->name, rs->key_id); - rs->key_fullnameid = strdupz(buffer); - - snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s.%s", st->name, rs->key_name); - rs->key_fullnamename = strdupz(buffer); - - // CHART VARIABLES FOR THIS DIMENSION - // ----------------------------------- - // - // dimensions are available as: - // - $id - // - $name - - rs->var_local_id = rrdvar_create_and_index("local", &st->rrdvar_root_index, rs->key_id, rs->type, rs->value); - rs->var_local_name = rrdvar_create_and_index("local", &st->rrdvar_root_index, rs->key_name, rs->type, rs->value); - - // FAMILY VARIABLES FOR THIS DIMENSION - // ----------------------------------- - // - // dimensions are available as: - // - $id (only the first, when multiple overlap) - // - $name (only the first, when multiple overlap) - // - $chart-context.id - // - $chart-context.name - - rs->var_family_id = rrdvar_create_and_index("family", &st->rrdfamily->rrdvar_root_index, rs->key_id, rs->type, rs->value); - rs->var_family_name = rrdvar_create_and_index("family", &st->rrdfamily->rrdvar_root_index, rs->key_name, rs->type, rs->value); - rs->var_family_contextid = rrdvar_create_and_index("family", &st->rrdfamily->rrdvar_root_index, rs->key_contextid, rs->type, rs->value); - rs->var_family_contextname = rrdvar_create_and_index("family", &st->rrdfamily->rrdvar_root_index, rs->key_contextname, rs->type, rs->value); - - // HOST VARIABLES FOR THIS DIMENSION - // ----------------------------------- - // - // dimensions are available as: - // - $chart-id.id - // - $chart-id.name - // - $chart-name.id - // - $chart-name.name - - rs->var_host_chartidid = rrdvar_create_and_index("host", &host->rrdvar_root_index, rs->key_fullidid, rs->type, rs->value); - rs->var_host_chartidname = rrdvar_create_and_index("host", &host->rrdvar_root_index, rs->key_fullidname, rs->type, rs->value); - rs->var_host_chartnameid = rrdvar_create_and_index("host", &host->rrdvar_root_index, rs->key_fullnameid, rs->type, rs->value); - rs->var_host_chartnamename = rrdvar_create_and_index("host", &host->rrdvar_root_index, rs->key_fullnamename, rs->type, rs->value); -} - -RRDDIMVAR *rrddimvar_create(RRDDIM *rd, RRDVAR_TYPE type, const char *prefix, const char *suffix, void *value, RRDVAR_OPTIONS options) { - RRDSET *st = rd->rrdset; - - debug(D_VARIABLES, "RRDDIMSET create for chart id '%s' name '%s', dimension id '%s', name '%s%s%s'", st->id, st->name, rd->id, (prefix)?prefix:"", rd->name, (suffix)?suffix:""); - - if(!prefix) prefix = ""; - if(!suffix) suffix = ""; - - RRDDIMVAR *rs = (RRDDIMVAR *)callocz(1, sizeof(RRDDIMVAR)); - - rs->prefix = strdupz(prefix); - rs->suffix = strdupz(suffix); - - rs->type = type; - rs->value = value; - rs->options = options; - rs->rrddim = rd; - - rs->next = rd->variables; - rd->variables = rs; - - rrddimvar_create_variables(rs); - - return rs; -} - -void rrddimvar_rename_all(RRDDIM *rd) { - RRDSET *st = rd->rrdset; - debug(D_VARIABLES, "RRDDIMSET rename for chart id '%s' name '%s', dimension id '%s', name '%s'", st->id, st->name, rd->id, rd->name); - - RRDDIMVAR *rs, *next = rd->variables; - while((rs = next)) { - next = rs->next; - rrddimvar_create_variables(rs); - } -} - -void rrddimvar_free(RRDDIMVAR *rs) { - RRDDIM *rd = rs->rrddim; - RRDSET *st = rd->rrdset; - debug(D_VARIABLES, "RRDDIMSET free for chart id '%s' name '%s', dimension id '%s', name '%s', prefix='%s', suffix='%s'", st->id, st->name, rd->id, rd->name, rs->prefix, rs->suffix); - - rrddimvar_free_variables(rs); - - if(rd->variables == rs) { - debug(D_VARIABLES, "RRDDIMSET removing first entry for chart id '%s' name '%s', dimension id '%s', name '%s'", st->id, st->name, rd->id, rd->name); - rd->variables = rs->next; - } - else { - debug(D_VARIABLES, "RRDDIMSET removing non-first entry for chart id '%s' name '%s', dimension id '%s', name '%s'", st->id, st->name, rd->id, rd->name); - RRDDIMVAR *t; - for (t = rd->variables; t && t->next != rs; t = t->next) ; - if(!t) error("RRDDIMVAR '%s' not found in dimension '%s/%s' variables linked list", rs->key_name, st->id, rd->id); - else t->next = rs->next; - } - - freez(rs->prefix); - freez(rs->suffix); - freez(rs); -} - diff --git a/src/rrdfamily.c b/src/rrdfamily.c deleted file mode 100644 index 905ae480b..000000000 --- a/src/rrdfamily.c +++ /dev/null @@ -1,59 +0,0 @@ -#define NETDATA_RRD_INTERNALS 1 -#include "common.h" - -// ---------------------------------------------------------------------------- -// RRDFAMILY index - -int rrdfamily_compare(void *a, void *b) { - if(((RRDFAMILY *)a)->hash_family < ((RRDFAMILY *)b)->hash_family) return -1; - else if(((RRDFAMILY *)a)->hash_family > ((RRDFAMILY *)b)->hash_family) return 1; - else return strcmp(((RRDFAMILY *)a)->family, ((RRDFAMILY *)b)->family); -} - -#define rrdfamily_index_add(host, rc) (RRDFAMILY *)avl_insert_lock(&((host)->rrdfamily_root_index), (avl *)(rc)) -#define rrdfamily_index_del(host, rc) (RRDFAMILY *)avl_remove_lock(&((host)->rrdfamily_root_index), (avl *)(rc)) - -static RRDFAMILY *rrdfamily_index_find(RRDHOST *host, const char *id, uint32_t hash) { - RRDFAMILY tmp; - tmp.family = id; - tmp.hash_family = (hash)?hash:simple_hash(tmp.family); - - return (RRDFAMILY *)avl_search_lock(&(host->rrdfamily_root_index), (avl *) &tmp); -} - -RRDFAMILY *rrdfamily_create(RRDHOST *host, const char *id) { - RRDFAMILY *rc = rrdfamily_index_find(host, id, 0); - if(!rc) { - rc = callocz(1, sizeof(RRDFAMILY)); - - rc->family = strdupz(id); - rc->hash_family = simple_hash(rc->family); - - // initialize the variables index - avl_init_lock(&rc->rrdvar_root_index, rrdvar_compare); - - RRDFAMILY *ret = rrdfamily_index_add(host, rc); - if(ret != rc) - error("RRDFAMILY: INTERNAL ERROR: Expected to INSERT RRDFAMILY '%s' into index, but inserted '%s'.", rc->family, (ret)?ret->family:"NONE"); - } - - rc->use_count++; - return rc; -} - -void rrdfamily_free(RRDHOST *host, RRDFAMILY *rc) { - rc->use_count--; - if(!rc->use_count) { - RRDFAMILY *ret = rrdfamily_index_del(host, rc); - if(ret != rc) - error("RRDFAMILY: INTERNAL ERROR: Expected to DELETE RRDFAMILY '%s' from index, but deleted '%s'.", rc->family, (ret)?ret->family:"NONE"); - else { - debug(D_RRD_CALLS, "RRDFAMILY: Cleaning up remaining family variables for host '%s', family '%s'", host->hostname, rc->family); - rrdvar_free_remaining_variables(host, &rc->rrdvar_root_index); - - freez((void *) rc->family); - freez(rc); - } - } -} - diff --git a/src/rrdhost.c b/src/rrdhost.c deleted file mode 100644 index e62e61ae8..000000000 --- a/src/rrdhost.c +++ /dev/null @@ -1,735 +0,0 @@ -#define NETDATA_RRD_INTERNALS 1 -#include "common.h" - -RRDHOST *localhost = NULL; -size_t rrd_hosts_available = 0; -netdata_rwlock_t rrd_rwlock = NETDATA_RWLOCK_INITIALIZER; - -time_t rrdset_free_obsolete_time = 3600; -time_t rrdhost_free_orphan_time = 3600; - -// ---------------------------------------------------------------------------- -// RRDHOST index - -int rrdhost_compare(void* a, void* b) { - if(((RRDHOST *)a)->hash_machine_guid < ((RRDHOST *)b)->hash_machine_guid) return -1; - else if(((RRDHOST *)a)->hash_machine_guid > ((RRDHOST *)b)->hash_machine_guid) return 1; - else return strcmp(((RRDHOST *)a)->machine_guid, ((RRDHOST *)b)->machine_guid); -} - -avl_tree_lock rrdhost_root_index = { - .avl_tree = { NULL, rrdhost_compare }, - .rwlock = AVL_LOCK_INITIALIZER -}; - -RRDHOST *rrdhost_find_by_guid(const char *guid, uint32_t hash) { - debug(D_RRDHOST, "Searching in index for host with guid '%s'", guid); - - RRDHOST tmp; - strncpyz(tmp.machine_guid, guid, GUID_LEN); - tmp.hash_machine_guid = (hash)?hash:simple_hash(tmp.machine_guid); - - return (RRDHOST *)avl_search_lock(&(rrdhost_root_index), (avl *) &tmp); -} - -RRDHOST *rrdhost_find_by_hostname(const char *hostname, uint32_t hash) { - if(unlikely(!strcmp(hostname, "localhost"))) - return localhost; - - if(unlikely(!hash)) hash = simple_hash(hostname); - - rrd_rdlock(); - RRDHOST *host; - rrdhost_foreach_read(host) { - if(unlikely((hash == host->hash_hostname && !strcmp(hostname, host->hostname)))) { - rrd_unlock(); - return host; - } - } - rrd_unlock(); - - return NULL; -} - -#define rrdhost_index_add(rrdhost) (RRDHOST *)avl_insert_lock(&(rrdhost_root_index), (avl *)(rrdhost)) -#define rrdhost_index_del(rrdhost) (RRDHOST *)avl_remove_lock(&(rrdhost_root_index), (avl *)(rrdhost)) - - -// ---------------------------------------------------------------------------- -// RRDHOST - internal helpers - -static inline void rrdhost_init_tags(RRDHOST *host, const char *tags) { - if(host->tags && tags && !strcmp(host->tags, tags)) - return; - - void *old = (void *)host->tags; - host->tags = (tags && *tags)?strdupz(tags):NULL; - freez(old); -} - -static inline void rrdhost_init_hostname(RRDHOST *host, const char *hostname) { - if(host->hostname && hostname && !strcmp(host->hostname, hostname)) - return; - - void *old = host->hostname; - host->hostname = strdupz(hostname?hostname:"localhost"); - host->hash_hostname = simple_hash(host->hostname); - freez(old); -} - -static inline void rrdhost_init_os(RRDHOST *host, const char *os) { - if(host->os && os && !strcmp(host->os, os)) - return; - - void *old = (void *)host->os; - host->os = strdupz(os?os:"unknown"); - freez(old); -} - -static inline void rrdhost_init_timezone(RRDHOST *host, const char *timezone) { - if(host->timezone && timezone && !strcmp(host->timezone, timezone)) - return; - - void *old = (void *)host->timezone; - host->timezone = strdupz((timezone && *timezone)?timezone:"unknown"); - freez(old); -} - -static inline void rrdhost_init_machine_guid(RRDHOST *host, const char *machine_guid) { - strncpy(host->machine_guid, machine_guid, GUID_LEN); - host->machine_guid[GUID_LEN] = '\0'; - host->hash_machine_guid = simple_hash(host->machine_guid); -} - - -// ---------------------------------------------------------------------------- -// RRDHOST - add a host - -RRDHOST *rrdhost_create(const char *hostname, - const char *registry_hostname, - const char *guid, - const char *os, - const char *timezone, - const char *tags, - const char *program_name, - const char *program_version, - int update_every, - long entries, - RRD_MEMORY_MODE memory_mode, - int health_enabled, - int rrdpush_enabled, - char *rrdpush_destination, - char *rrdpush_api_key, - int is_localhost -) { - debug(D_RRDHOST, "Host '%s': adding with guid '%s'", hostname, guid); - - rrd_check_wrlock(); - - RRDHOST *host = callocz(1, sizeof(RRDHOST)); - - host->rrd_update_every = (update_every > 0)?update_every:1; - host->rrd_history_entries = align_entries_to_pagesize(memory_mode, entries); - host->rrd_memory_mode = memory_mode; - host->health_enabled = (memory_mode == RRD_MEMORY_MODE_NONE)? 0 : health_enabled; - host->rrdpush_send_enabled = (rrdpush_enabled && rrdpush_destination && *rrdpush_destination && rrdpush_api_key && *rrdpush_api_key); - host->rrdpush_send_destination = (host->rrdpush_send_enabled)?strdupz(rrdpush_destination):NULL; - host->rrdpush_send_api_key = (host->rrdpush_send_enabled)?strdupz(rrdpush_api_key):NULL; - - host->rrdpush_sender_pipe[0] = -1; - host->rrdpush_sender_pipe[1] = -1; - host->rrdpush_sender_socket = -1; - - netdata_mutex_init(&host->rrdpush_sender_buffer_mutex); - netdata_rwlock_init(&host->rrdhost_rwlock); - - rrdhost_init_hostname(host, hostname); - rrdhost_init_machine_guid(host, guid); - rrdhost_init_os(host, os); - rrdhost_init_timezone(host, timezone); - rrdhost_init_tags(host, tags); - - host->program_name = strdupz((program_name && *program_name)?program_name:"unknown"); - host->program_version = strdupz((program_version && *program_version)?program_version:"unknown"); - host->registry_hostname = strdupz((registry_hostname && *registry_hostname)?registry_hostname:hostname); - - avl_init_lock(&(host->rrdset_root_index), rrdset_compare); - avl_init_lock(&(host->rrdset_root_index_name), rrdset_compare_name); - avl_init_lock(&(host->rrdfamily_root_index), rrdfamily_compare); - avl_init_lock(&(host->rrdvar_root_index), rrdvar_compare); - - if(config_get_boolean(CONFIG_SECTION_GLOBAL, "delete obsolete charts files", 1)) - rrdhost_flag_set(host, RRDHOST_FLAG_DELETE_OBSOLETE_CHARTS); - - if(config_get_boolean(CONFIG_SECTION_GLOBAL, "delete orphan hosts files", 1) && !is_localhost) - rrdhost_flag_set(host, RRDHOST_FLAG_DELETE_ORPHAN_HOST); - - - // ------------------------------------------------------------------------ - // initialize health variables - - host->health_log.next_log_id = 1; - host->health_log.next_alarm_id = 1; - host->health_log.max = 1000; - host->health_log.next_log_id = - host->health_log.next_alarm_id = (uint32_t)now_realtime_sec(); - - long n = config_get_number(CONFIG_SECTION_HEALTH, "in memory max health log entries", host->health_log.max); - if(n < 10) { - error("Host '%s': health configuration has invalid max log entries %ld. Using default %u", host->hostname, n, host->health_log.max); - config_set_number(CONFIG_SECTION_HEALTH, "in memory max health log entries", (long)host->health_log.max); - } - else - host->health_log.max = (unsigned int)n; - - netdata_rwlock_init(&host->health_log.alarm_log_rwlock); - - char filename[FILENAME_MAX + 1]; - - if(is_localhost) { - - host->cache_dir = strdupz(netdata_configured_cache_dir); - host->varlib_dir = strdupz(netdata_configured_varlib_dir); - - } - else { - // this is not localhost - append our GUID to localhost path - - snprintfz(filename, FILENAME_MAX, "%s/%s", netdata_configured_cache_dir, host->machine_guid); - host->cache_dir = strdupz(filename); - - if(host->rrd_memory_mode == RRD_MEMORY_MODE_MAP || host->rrd_memory_mode == RRD_MEMORY_MODE_SAVE) { - int r = mkdir(host->cache_dir, 0775); - if(r != 0 && errno != EEXIST) - error("Host '%s': cannot create directory '%s'", host->hostname, host->cache_dir); - } - - snprintfz(filename, FILENAME_MAX, "%s/%s", netdata_configured_varlib_dir, host->machine_guid); - host->varlib_dir = strdupz(filename); - - if(host->health_enabled) { - int r = mkdir(host->varlib_dir, 0775); - if(r != 0 && errno != EEXIST) - error("Host '%s': cannot create directory '%s'", host->hostname, host->varlib_dir); - } - - } - - if(host->health_enabled) { - snprintfz(filename, FILENAME_MAX, "%s/health", host->varlib_dir); - int r = mkdir(filename, 0775); - if(r != 0 && errno != EEXIST) - error("Host '%s': cannot create directory '%s'", host->hostname, filename); - } - - snprintfz(filename, FILENAME_MAX, "%s/health/health-log.db", host->varlib_dir); - host->health_log_filename = strdupz(filename); - - snprintfz(filename, FILENAME_MAX, "%s/alarm-notify.sh", netdata_configured_plugins_dir); - host->health_default_exec = strdupz(config_get(CONFIG_SECTION_HEALTH, "script to execute on alarm", filename)); - host->health_default_recipient = strdup("root"); - - - // ------------------------------------------------------------------------ - // load health configuration - - if(host->health_enabled) { - health_alarm_log_load(host); - health_alarm_log_open(host); - - rrdhost_wrlock(host); - health_readdir(host, health_config_dir()); - rrdhost_unlock(host); - } - - - // ------------------------------------------------------------------------ - // link it and add it to the index - - if(is_localhost) { - host->next = localhost; - localhost = host; - } - else { - if(localhost) { - host->next = localhost->next; - localhost->next = host; - } - else localhost = host; - } - - RRDHOST *t = rrdhost_index_add(host); - - if(t != host) { - error("Host '%s': cannot add host with machine guid '%s' to index. It already exists as host '%s' with machine guid '%s'.", host->hostname, host->machine_guid, t->hostname, t->machine_guid); - rrdhost_free(host); - host = NULL; - } - else { - info("Host '%s' (at registry as '%s') with guid '%s' initialized" - ", os '%s'" - ", timezone '%s'" - ", tags '%s'" - ", program_name '%s'" - ", program_version '%s'" - ", update every %d" - ", memory mode %s" - ", history entries %ld" - ", streaming %s" - " (to '%s' with api key '%s')" - ", health %s" - ", cache_dir '%s'" - ", varlib_dir '%s'" - ", health_log '%s'" - ", alarms default handler '%s'" - ", alarms default recipient '%s'" - , host->hostname - , host->registry_hostname - , host->machine_guid - , host->os - , host->timezone - , (host->tags)?host->tags:"" - , host->program_name - , host->program_version - , host->rrd_update_every - , rrd_memory_mode_name(host->rrd_memory_mode) - , host->rrd_history_entries - , host->rrdpush_send_enabled?"enabled":"disabled" - , host->rrdpush_send_destination?host->rrdpush_send_destination:"" - , host->rrdpush_send_api_key?host->rrdpush_send_api_key:"" - , host->health_enabled?"enabled":"disabled" - , host->cache_dir - , host->varlib_dir - , host->health_log_filename - , host->health_default_exec - , host->health_default_recipient - ); - } - - rrd_hosts_available++; - - return host; -} - -RRDHOST *rrdhost_find_or_create( - const char *hostname - , const char *registry_hostname - , const char *guid - , const char *os - , const char *timezone - , const char *tags - , const char *program_name - , const char *program_version - , int update_every - , long history - , RRD_MEMORY_MODE mode - , int health_enabled - , int rrdpush_enabled - , char *rrdpush_destination - , char *rrdpush_api_key -) { - debug(D_RRDHOST, "Searching for host '%s' with guid '%s'", hostname, guid); - - rrd_wrlock(); - RRDHOST *host = rrdhost_find_by_guid(guid, 0); - if(!host) { - host = rrdhost_create( - hostname - , registry_hostname - , guid - , os - , timezone - , tags - , program_name - , program_version - , update_every - , history - , mode - , health_enabled - , rrdpush_enabled - , rrdpush_destination - , rrdpush_api_key - , 0 - ); - } - else { - host->health_enabled = health_enabled; - - if(strcmp(host->hostname, hostname) != 0) { - info("Host '%s' has been renamed to '%s'. If this is not intentional it may mean multiple hosts are using the same machine_guid.", host->hostname, hostname); - char *t = host->hostname; - host->hostname = strdupz(hostname); - host->hash_hostname = simple_hash(host->hostname); - freez(t); - } - - if(strcmp(host->program_name, program_name) != 0) { - info("Host '%s' switched program name from '%s' to '%s'", host->hostname, host->program_name, program_name); - char *t = host->program_name; - host->program_name = strdupz(program_name); - freez(t); - } - - if(strcmp(host->program_version, program_version) != 0) { - info("Host '%s' switched program version from '%s' to '%s'", host->hostname, host->program_version, program_version); - char *t = host->program_version; - host->program_version = strdupz(program_version); - freez(t); - } - - if(host->rrd_update_every != update_every) - error("Host '%s' has an update frequency of %d seconds, but the wanted one is %d seconds. Restart netdata here to apply the new settings.", host->hostname, host->rrd_update_every, update_every); - - if(host->rrd_history_entries < history) - error("Host '%s' has history of %ld entries, but the wanted one is %ld entries. Restart netdata here to apply the new settings.", host->hostname, host->rrd_history_entries, history); - - if(host->rrd_memory_mode != mode) - error("Host '%s' has memory mode '%s', but the wanted one is '%s'. Restart netdata here to apply the new settings.", host->hostname, rrd_memory_mode_name(host->rrd_memory_mode), rrd_memory_mode_name(mode)); - - // update host tags - rrdhost_init_tags(host, tags); - } - - rrdhost_cleanup_orphan_hosts_nolock(host); - - rrd_unlock(); - - return host; -} - -inline int rrdhost_should_be_removed(RRDHOST *host, RRDHOST *protected, time_t now) { - if(host != protected - && host != localhost - && rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN) - && !host->connected_senders - && host->senders_disconnected_time - && host->senders_disconnected_time + rrdhost_free_orphan_time < now) - return 1; - - return 0; -} - -void rrdhost_cleanup_orphan_hosts_nolock(RRDHOST *protected) { - time_t now = now_realtime_sec(); - - RRDHOST *host; - -restart_after_removal: - rrdhost_foreach_write(host) { - if(rrdhost_should_be_removed(host, protected, now)) { - info("Host '%s' with machine guid '%s' is obsolete - cleaning up.", host->hostname, host->machine_guid); - - if(rrdhost_flag_check(host, RRDHOST_FLAG_DELETE_ORPHAN_HOST)) - rrdhost_delete_charts(host); - else - rrdhost_save_charts(host); - - rrdhost_free(host); - goto restart_after_removal; - } - } -} - -// ---------------------------------------------------------------------------- -// RRDHOST global / startup initialization - -void rrd_init(char *hostname) { - rrdset_free_obsolete_time = config_get_number(CONFIG_SECTION_GLOBAL, "cleanup obsolete charts after seconds", rrdset_free_obsolete_time); - gap_when_lost_iterations_above = (int)config_get_number(CONFIG_SECTION_GLOBAL, "gap when lost iterations above", gap_when_lost_iterations_above); - if(gap_when_lost_iterations_above < 1) - gap_when_lost_iterations_above = 1; - - health_init(); - registry_init(); - rrdpush_init(); - - debug(D_RRDHOST, "Initializing localhost with hostname '%s'", hostname); - rrd_wrlock(); - localhost = rrdhost_create( - hostname - , registry_get_this_machine_hostname() - , registry_get_this_machine_guid() - , os_type - , netdata_configured_timezone - , config_get(CONFIG_SECTION_BACKEND, "host tags", "") - , program_name - , program_version - , default_rrd_update_every - , default_rrd_history_entries - , default_rrd_memory_mode - , default_health_enabled - , default_rrdpush_enabled - , default_rrdpush_destination - , default_rrdpush_api_key - , 1 - ); - rrd_unlock(); -} - -// ---------------------------------------------------------------------------- -// RRDHOST - lock validations -// there are only used when NETDATA_INTERNAL_CHECKS is set - -void __rrdhost_check_rdlock(RRDHOST *host, const char *file, const char *function, const unsigned long line) { - debug(D_RRDHOST, "Checking read lock on host '%s'", host->hostname); - - int ret = netdata_rwlock_trywrlock(&host->rrdhost_rwlock); - if(ret == 0) - fatal("RRDHOST '%s' should be read-locked, but it is not, at function %s() at line %lu of file '%s'", host->hostname, function, line, file); -} - -void __rrdhost_check_wrlock(RRDHOST *host, const char *file, const char *function, const unsigned long line) { - debug(D_RRDHOST, "Checking write lock on host '%s'", host->hostname); - - int ret = netdata_rwlock_tryrdlock(&host->rrdhost_rwlock); - if(ret == 0) - fatal("RRDHOST '%s' should be write-locked, but it is not, at function %s() at line %lu of file '%s'", host->hostname, function, line, file); -} - -void __rrd_check_rdlock(const char *file, const char *function, const unsigned long line) { - debug(D_RRDHOST, "Checking read lock on all RRDs"); - - int ret = netdata_rwlock_trywrlock(&rrd_rwlock); - if(ret == 0) - fatal("RRDs should be read-locked, but it are not, at function %s() at line %lu of file '%s'", function, line, file); -} - -void __rrd_check_wrlock(const char *file, const char *function, const unsigned long line) { - debug(D_RRDHOST, "Checking write lock on all RRDs"); - - int ret = netdata_rwlock_tryrdlock(&rrd_rwlock); - if(ret == 0) - fatal("RRDs should be write-locked, but it are not, at function %s() at line %lu of file '%s'", function, line, file); -} - -// ---------------------------------------------------------------------------- -// RRDHOST - free - -void rrdhost_free(RRDHOST *host) { - if(!host) return; - - info("Freeing all memory for host '%s'...", host->hostname); - - rrd_check_wrlock(); // make sure the RRDs are write locked - - // stop a possibly running thread - rrdpush_sender_thread_stop(host); - - rrdhost_wrlock(host); // lock this RRDHOST - - // ------------------------------------------------------------------------ - // release its children resources - - while(host->rrdset_root) - rrdset_free(host->rrdset_root); - - while(host->alarms) - rrdcalc_unlink_and_free(host, host->alarms); - - while(host->templates) - rrdcalctemplate_unlink_and_free(host, host->templates); - - debug(D_RRD_CALLS, "RRDHOST: Cleaning up remaining host variables for host '%s'", host->hostname); - rrdvar_free_remaining_variables(host, &host->rrdvar_root_index); - - health_alarm_log_free(host); - - // ------------------------------------------------------------------------ - // remove it from the indexes - - if(rrdhost_index_del(host) != host) - error("RRDHOST '%s' removed from index, deleted the wrong entry.", host->hostname); - - - // ------------------------------------------------------------------------ - // unlink it from the host - - if(host == localhost) { - localhost = host->next; - } - else { - // find the previous one - RRDHOST *h; - for(h = localhost; h && h->next != host ; h = h->next) ; - - // bypass it - if(h) h->next = host->next; - else error("Request to free RRDHOST '%s': cannot find it", host->hostname); - } - - // ------------------------------------------------------------------------ - // free it - - freez((void *)host->tags); - freez((void *)host->os); - freez((void *)host->timezone); - freez(host->program_version); - freez(host->program_name); - freez(host->cache_dir); - freez(host->varlib_dir); - freez(host->rrdpush_send_api_key); - freez(host->rrdpush_send_destination); - freez(host->health_default_exec); - freez(host->health_default_recipient); - freez(host->health_log_filename); - freez(host->hostname); - freez(host->registry_hostname); - rrdhost_unlock(host); - netdata_rwlock_destroy(&host->health_log.alarm_log_rwlock); - netdata_rwlock_destroy(&host->rrdhost_rwlock); - freez(host); - - rrd_hosts_available--; -} - -void rrdhost_free_all(void) { - rrd_wrlock(); - while(localhost) rrdhost_free(localhost); - rrd_unlock(); -} - -// ---------------------------------------------------------------------------- -// RRDHOST - save host files - -void rrdhost_save_charts(RRDHOST *host) { - if(!host) return; - - info("Saving/Closing database of host '%s'...", host->hostname); - - RRDSET *st; - - // we get a write lock - // to ensure only one thread is saving the database - rrdhost_wrlock(host); - - rrdset_foreach_write(st, host) { - rrdset_rdlock(st); - rrdset_save(st); - rrdset_unlock(st); - } - - rrdhost_unlock(host); -} - -// ---------------------------------------------------------------------------- -// RRDHOST - delete host files - -void rrdhost_delete_charts(RRDHOST *host) { - if(!host) return; - - info("Deleting database of host '%s'...", host->hostname); - - RRDSET *st; - - // we get a write lock - // to ensure only one thread is saving the database - rrdhost_wrlock(host); - - rrdset_foreach_write(st, host) { - rrdset_rdlock(st); - rrdset_delete(st); - rrdset_unlock(st); - } - - recursively_delete_dir(host->cache_dir, "left over host"); - - rrdhost_unlock(host); -} - -// ---------------------------------------------------------------------------- -// RRDHOST - cleanup host files - -void rrdhost_cleanup_charts(RRDHOST *host) { - if(!host) return; - - info("Cleaning up database of host '%s'...", host->hostname); - - RRDSET *st; - uint32_t rrdhost_delete_obsolete_charts = rrdhost_flag_check(host, RRDHOST_FLAG_DELETE_OBSOLETE_CHARTS); - - // we get a write lock - // to ensure only one thread is saving the database - rrdhost_wrlock(host); - - rrdset_foreach_write(st, host) { - rrdset_rdlock(st); - - if(rrdhost_delete_obsolete_charts && rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)) - rrdset_delete(st); - else - rrdset_save(st); - - rrdset_unlock(st); - } - - rrdhost_unlock(host); -} - - -// ---------------------------------------------------------------------------- -// RRDHOST - save all hosts to disk - -void rrdhost_save_all(void) { - info("Saving database [%zu hosts(s)]...", rrd_hosts_available); - - rrd_rdlock(); - - RRDHOST *host; - rrdhost_foreach_read(host) - rrdhost_save_charts(host); - - rrd_unlock(); -} - -// ---------------------------------------------------------------------------- -// RRDHOST - save or delete all hosts from disk - -void rrdhost_cleanup_all(void) { - info("Cleaning up database [%zu hosts(s)]...", rrd_hosts_available); - - rrd_rdlock(); - - RRDHOST *host; - rrdhost_foreach_read(host) { - if(host != localhost && rrdhost_flag_check(host, RRDHOST_FLAG_DELETE_OBSOLETE_CHARTS) && !host->connected_senders) - rrdhost_delete_charts(host); - else - rrdhost_cleanup_charts(host); - } - - rrd_unlock(); -} - - -// ---------------------------------------------------------------------------- -// RRDHOST - save or delete all the host charts from disk - -void rrdhost_cleanup_obsolete_charts(RRDHOST *host) { - time_t now = now_realtime_sec(); - - RRDSET *st; - - uint32_t rrdhost_delete_obsolete_charts = rrdhost_flag_check(host, RRDHOST_FLAG_DELETE_OBSOLETE_CHARTS); - -restart_after_removal: - rrdset_foreach_write(st, host) { - if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE) - && st->last_accessed_time + rrdset_free_obsolete_time < now - && st->last_updated.tv_sec + rrdset_free_obsolete_time < now - && st->last_collected_time.tv_sec + rrdset_free_obsolete_time < now - )) { - - rrdset_rdlock(st); - - if(rrdhost_delete_obsolete_charts) - rrdset_delete(st); - else - rrdset_save(st); - - rrdset_unlock(st); - - rrdset_free(st); - goto restart_after_removal; - } - } -} diff --git a/src/rrdpush.c b/src/rrdpush.c deleted file mode 100644 index 8f71c6d4c..000000000 --- a/src/rrdpush.c +++ /dev/null @@ -1,1160 +0,0 @@ -#include "common.h" - -/* - * rrdpush - * - * 3 threads are involved for all stream operations - * - * 1. a random data collection thread, calling rrdset_done_push() - * this is called for each chart. - * - * the output of this work is kept in a BUFFER in RRDHOST - * the sender thread is signalled via a pipe (also in RRDHOST) - * - * 2. a sender thread running at the sending netdata - * this is spawned automatically on the first chart to be pushed - * - * It tries to push the metrics to the remote netdata, as fast - * as possible (i.e. immediately after they are collected). - * - * 3. a receiver thread, running at the receiving netdata - * this is spawned automatically when the sender connects to - * the receiver. - * - */ - -#define START_STREAMING_PROMPT "Hit me baby, push them over..." - -typedef enum { - RRDPUSH_MULTIPLE_CONNECTIONS_ALLOW, - RRDPUSH_MULTIPLE_CONNECTIONS_DENY_NEW -} RRDPUSH_MULTIPLE_CONNECTIONS_STRATEGY; - -int default_rrdpush_enabled = 0; -char *default_rrdpush_destination = NULL; -char *default_rrdpush_api_key = NULL; - -int rrdpush_init() { - default_rrdpush_enabled = appconfig_get_boolean(&stream_config, CONFIG_SECTION_STREAM, "enabled", default_rrdpush_enabled); - default_rrdpush_destination = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "destination", ""); - default_rrdpush_api_key = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "api key", ""); - rrdhost_free_orphan_time = config_get_number(CONFIG_SECTION_GLOBAL, "cleanup orphan hosts after seconds", rrdhost_free_orphan_time); - - if(default_rrdpush_enabled && (!default_rrdpush_destination || !*default_rrdpush_destination || !default_rrdpush_api_key || !*default_rrdpush_api_key)) { - error("STREAM [send]: cannot enable sending thread - information is missing."); - default_rrdpush_enabled = 0; - } - - return default_rrdpush_enabled; -} - -#define CONNECTED_TO_SIZE 100 - -// data collection happens from multiple threads -// each of these threads calls rrdset_done() -// which in turn calls rrdset_done_push() -// which uses this pipe to notify the streaming thread -// that there are more data ready to be sent -#define PIPE_READ 0 -#define PIPE_WRITE 1 - -// to have the remote netdata re-sync the charts -// to its current clock, we send for this many -// iterations a BEGIN line without microseconds -// this is for the first iterations of each chart -unsigned int remote_clock_resync_iterations = 60; - -#define rrdpush_buffer_lock(host) netdata_mutex_lock(&((host)->rrdpush_sender_buffer_mutex)) -#define rrdpush_buffer_unlock(host) netdata_mutex_unlock(&((host)->rrdpush_sender_buffer_mutex)) - -// checks if the current chart definition has been sent -static inline int need_to_send_chart_definition(RRDSET *st) { - rrdset_check_rdlock(st); - - if(unlikely(!(rrdset_flag_check(st, RRDSET_FLAG_EXPOSED_UPSTREAM)))) - return 1; - - RRDDIM *rd; - rrddim_foreach_read(rd, st) - if(unlikely(!rd->exposed)) - return 1; - - return 0; -} - -// sends the current chart definition -static inline void rrdpush_send_chart_definition_nolock(RRDSET *st) { - RRDHOST *host = st->rrdhost; - - rrdset_flag_set(st, RRDSET_FLAG_EXPOSED_UPSTREAM); - - // send the chart - buffer_sprintf( - host->rrdpush_sender_buffer - , "CHART \"%s\" \"%s\" \"%s\" \"%s\" \"%s\" \"%s\" \"%s\" %ld %d \"%s %s %s %s\" \"%s\" \"%s\"\n" - , st->id - , st->name - , st->title - , st->units - , st->family - , st->context - , rrdset_type_name(st->chart_type) - , st->priority - , st->update_every - , rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)?"obsolete":"" - , rrdset_flag_check(st, RRDSET_FLAG_DETAIL)?"detail":"" - , rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST)?"store_first":"" - , rrdset_flag_check(st, RRDSET_FLAG_HIDDEN)?"hidden":"" - , (st->plugin_name)?st->plugin_name:"" - , (st->module_name)?st->module_name:"" - ); - - // send the dimensions - RRDDIM *rd; - rrddim_foreach_read(rd, st) { - buffer_sprintf( - host->rrdpush_sender_buffer - , "DIMENSION \"%s\" \"%s\" \"%s\" " COLLECTED_NUMBER_FORMAT " " COLLECTED_NUMBER_FORMAT " \"%s %s\"\n" - , rd->id - , rd->name - , rrd_algorithm_name(rd->algorithm) - , rd->multiplier - , rd->divisor - , rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN)?"hidden":"" - , rrddim_flag_check(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS)?"noreset":"" - ); - rd->exposed = 1; - } - - // send the chart local custom variables - RRDSETVAR *rs; - for(rs = st->variables; rs ;rs = rs->next) { - if(unlikely(rs->options && RRDVAR_OPTION_ALLOCATED)) { - calculated_number *value = (calculated_number *) rs->value; - - buffer_sprintf( - host->rrdpush_sender_buffer - , "VARIABLE CHART %s = " CALCULATED_NUMBER_FORMAT "\n" - , rs->variable - , *value - ); - } - } - - st->upstream_resync_time = st->last_collected_time.tv_sec + (remote_clock_resync_iterations * st->update_every); -} - -// sends the current chart dimensions -static inline void rrdpush_send_chart_metrics_nolock(RRDSET *st) { - RRDHOST *host = st->rrdhost; - buffer_sprintf(host->rrdpush_sender_buffer, "BEGIN \"%s\" %llu\n", st->id, (st->upstream_resync_time > st->last_collected_time.tv_sec)?st->usec_since_last_update:0); - - RRDDIM *rd; - rrddim_foreach_read(rd, st) { - if(rd->updated && rd->exposed) - buffer_sprintf(host->rrdpush_sender_buffer - , "SET \"%s\" = " COLLECTED_NUMBER_FORMAT "\n" - , rd->id - , rd->collected_value - ); - } - - buffer_strcat(host->rrdpush_sender_buffer, "END\n"); -} - -static void rrdpush_sender_thread_spawn(RRDHOST *host); - -void rrdset_push_chart_definition(RRDSET *st) { - RRDHOST *host = st->rrdhost; - - rrdset_rdlock(st); - rrdpush_buffer_lock(host); - rrdpush_send_chart_definition_nolock(st); - rrdpush_buffer_unlock(host); - rrdset_unlock(st); -} - -void rrdset_done_push(RRDSET *st) { - RRDHOST *host = st->rrdhost; - - if(unlikely(!rrdset_flag_check(st, RRDSET_FLAG_ENABLED))) - return; - - rrdpush_buffer_lock(host); - - if(unlikely(host->rrdpush_send_enabled && !host->rrdpush_sender_spawn)) - rrdpush_sender_thread_spawn(host); - - if(unlikely(!host->rrdpush_sender_buffer || !host->rrdpush_sender_connected)) { - if(unlikely(!host->rrdpush_sender_error_shown)) - error("STREAM %s [send]: not ready - discarding collected metrics.", host->hostname); - - host->rrdpush_sender_error_shown = 1; - - rrdpush_buffer_unlock(host); - return; - } - else if(unlikely(host->rrdpush_sender_error_shown)) { - info("STREAM %s [send]: sending metrics...", host->hostname); - host->rrdpush_sender_error_shown = 0; - } - - if(need_to_send_chart_definition(st)) - rrdpush_send_chart_definition_nolock(st); - - rrdpush_send_chart_metrics_nolock(st); - - // signal the sender there are more data - if(host->rrdpush_sender_pipe[PIPE_WRITE] != -1 && write(host->rrdpush_sender_pipe[PIPE_WRITE], " ", 1) == -1) - error("STREAM %s [send]: cannot write to internal pipe", host->hostname); - - rrdpush_buffer_unlock(host); -} - -// ---------------------------------------------------------------------------- -// rrdpush sender thread - -static inline void rrdpush_sender_add_host_variable_to_buffer_nolock(RRDHOST *host, RRDVAR *rv) { - calculated_number *value = (calculated_number *)rv->value; - - buffer_sprintf( - host->rrdpush_sender_buffer - , "VARIABLE HOST %s = " CALCULATED_NUMBER_FORMAT "\n" - , rv->name - , *value - ); - - debug(D_STREAM, "RRDVAR pushed HOST VARIABLE %s = " CALCULATED_NUMBER_FORMAT, rv->name, *value); -} - -void rrdpush_sender_send_this_host_variable_now(RRDHOST *host, RRDVAR *rv) { - if(host->rrdpush_send_enabled && host->rrdpush_sender_spawn && host->rrdpush_sender_connected) { - rrdpush_buffer_lock(host); - rrdpush_sender_add_host_variable_to_buffer_nolock(host, rv); - rrdpush_buffer_unlock(host); - } -} - -static int rrdpush_sender_thread_custom_host_variables_callback(void *rrdvar_ptr, void *host_ptr) { - RRDVAR *rv = (RRDVAR *)rrdvar_ptr; - RRDHOST *host = (RRDHOST *)host_ptr; - - if(unlikely(rv->type == RRDVAR_TYPE_CALCULATED_ALLOCATED)) { - rrdpush_sender_add_host_variable_to_buffer_nolock(host, rv); - - // return 1, so that the traversal will return the number of variables sent - return 1; - } - - // returning a negative number will break the traversal - return 0; -} - -static void rrdpush_sender_thread_send_custom_host_variables(RRDHOST *host) { - int ret = rrdvar_callback_for_all_host_variables(host, rrdpush_sender_thread_custom_host_variables_callback, host); - debug(D_STREAM, "RRDVAR sent %d VARIABLES", ret); -} - -// resets all the chart, so that their definitions -// will be resent to the central netdata -static void rrdpush_sender_thread_reset_all_charts(RRDHOST *host) { - rrdhost_rdlock(host); - - RRDSET *st; - rrdset_foreach_read(st, host) { - rrdset_flag_clear(st, RRDSET_FLAG_EXPOSED_UPSTREAM); - - st->upstream_resync_time = 0; - - rrdset_rdlock(st); - - RRDDIM *rd; - rrddim_foreach_read(rd, st) - rd->exposed = 0; - - rrdset_unlock(st); - } - - rrdhost_unlock(host); -} - -static inline void rrdpush_sender_thread_data_flush(RRDHOST *host) { - rrdpush_buffer_lock(host); - - if(buffer_strlen(host->rrdpush_sender_buffer)) - error("STREAM %s [send]: discarding %zu bytes of metrics already in the buffer.", host->hostname, buffer_strlen(host->rrdpush_sender_buffer)); - - buffer_flush(host->rrdpush_sender_buffer); - - rrdpush_sender_thread_reset_all_charts(host); - rrdpush_sender_thread_send_custom_host_variables(host); - - rrdpush_buffer_unlock(host); -} - -void rrdpush_sender_thread_stop(RRDHOST *host) { - rrdpush_buffer_lock(host); - rrdhost_wrlock(host); - - netdata_thread_t thr = 0; - - if(host->rrdpush_sender_spawn) { - info("STREAM %s [send]: signaling sending thread to stop...", host->hostname); - - // signal the thread that we want to join it - host->rrdpush_sender_join = 1; - - // copy the thread id, so that we will be waiting for the right one - // even if a new one has been spawn - thr = host->rrdpush_sender_thread; - - // signal it to cancel - netdata_thread_cancel(host->rrdpush_sender_thread); - } - - rrdhost_unlock(host); - rrdpush_buffer_unlock(host); - - if(thr != 0) { - info("STREAM %s [send]: waiting for the sending thread to stop...", host->hostname); - void *result; - netdata_thread_join(thr, &result); - info("STREAM %s [send]: sending thread has exited.", host->hostname); - } -} - -static inline void rrdpush_sender_thread_close_socket(RRDHOST *host) { - host->rrdpush_sender_connected = 0; - - if(host->rrdpush_sender_socket != -1) { - close(host->rrdpush_sender_socket); - host->rrdpush_sender_socket = -1; - } -} - -static int rrdpush_sender_thread_connect_to_master(RRDHOST *host, int default_port, int timeout, size_t *reconnects_counter, char *connected_to, size_t connected_to_size) { - struct timeval tv = { - .tv_sec = timeout, - .tv_usec = 0 - }; - - // make sure the socket is closed - rrdpush_sender_thread_close_socket(host); - - debug(D_STREAM, "STREAM: Attempting to connect..."); - info("STREAM %s [send to %s]: connecting...", host->hostname, host->rrdpush_send_destination); - - host->rrdpush_sender_socket = connect_to_one_of( - host->rrdpush_send_destination - , default_port - , &tv - , reconnects_counter - , connected_to - , connected_to_size - ); - - if(unlikely(host->rrdpush_sender_socket == -1)) { - error("STREAM %s [send to %s]: failed to connect", host->hostname, host->rrdpush_send_destination); - return 0; - } - - info("STREAM %s [send to %s]: initializing communication...", host->hostname, connected_to); - - #define HTTP_HEADER_SIZE 8192 - char http[HTTP_HEADER_SIZE + 1]; - snprintfz(http, HTTP_HEADER_SIZE, - "STREAM key=%s&hostname=%s®istry_hostname=%s&machine_guid=%s&update_every=%d&os=%s&timezone=%s&tags=%s HTTP/1.1\r\n" - "User-Agent: %s/%s\r\n" - "Accept: */*\r\n\r\n" - , host->rrdpush_send_api_key - , host->hostname - , host->registry_hostname - , host->machine_guid - , default_rrd_update_every - , host->os - , host->timezone - , (host->tags)?host->tags:"" - , host->program_name - , host->program_version - ); - - if(send_timeout(host->rrdpush_sender_socket, http, strlen(http), 0, timeout) == -1) { - error("STREAM %s [send to %s]: failed to send HTTP header to remote netdata.", host->hostname, connected_to); - rrdpush_sender_thread_close_socket(host); - return 0; - } - - info("STREAM %s [send to %s]: waiting response from remote netdata...", host->hostname, connected_to); - - if(recv_timeout(host->rrdpush_sender_socket, http, HTTP_HEADER_SIZE, 0, timeout) == -1) { - error("STREAM %s [send to %s]: remote netdata does not respond.", host->hostname, connected_to); - rrdpush_sender_thread_close_socket(host); - return 0; - } - - if(strncmp(http, START_STREAMING_PROMPT, strlen(START_STREAMING_PROMPT)) != 0) { - error("STREAM %s [send to %s]: server is not replying properly (is it a netdata?).", host->hostname, connected_to); - rrdpush_sender_thread_close_socket(host); - return 0; - } - - info("STREAM %s [send to %s]: established communication - ready to send metrics...", host->hostname, connected_to); - - if(sock_setnonblock(host->rrdpush_sender_socket) < 0) - error("STREAM %s [send to %s]: cannot set non-blocking mode for socket.", host->hostname, connected_to); - - if(sock_enlarge_out(host->rrdpush_sender_socket) < 0) - error("STREAM %s [send to %s]: cannot enlarge the socket buffer.", host->hostname, connected_to); - - debug(D_STREAM, "STREAM: Connected on fd %d...", host->rrdpush_sender_socket); - - return 1; -} - -static void rrdpush_sender_thread_cleanup_callback(void *ptr) { - RRDHOST *host = (RRDHOST *)ptr; - - rrdpush_buffer_lock(host); - rrdhost_wrlock(host); - - info("STREAM %s [send]: sending thread cleans up...", host->hostname); - - rrdpush_sender_thread_close_socket(host); - - // close the pipe - if(host->rrdpush_sender_pipe[PIPE_READ] != -1) { - close(host->rrdpush_sender_pipe[PIPE_READ]); - host->rrdpush_sender_pipe[PIPE_READ] = -1; - } - - if(host->rrdpush_sender_pipe[PIPE_WRITE] != -1) { - close(host->rrdpush_sender_pipe[PIPE_WRITE]); - host->rrdpush_sender_pipe[PIPE_WRITE] = -1; - } - - buffer_free(host->rrdpush_sender_buffer); - host->rrdpush_sender_buffer = NULL; - - if(!host->rrdpush_sender_join) { - info("STREAM %s [send]: sending thread detaches itself.", host->hostname); - netdata_thread_detach(netdata_thread_self()); - } - - host->rrdpush_sender_spawn = 0; - - info("STREAM %s [send]: sending thread now exits.", host->hostname); - - rrdhost_unlock(host); - rrdpush_buffer_unlock(host); -} - -void *rrdpush_sender_thread(void *ptr) { - RRDHOST *host = (RRDHOST *)ptr; - - if(!host->rrdpush_send_enabled || !host->rrdpush_send_destination || !*host->rrdpush_send_destination || !host->rrdpush_send_api_key || !*host->rrdpush_send_api_key) { - error("STREAM %s [send]: thread created (task id %d), but host has streaming disabled.", host->hostname, gettid()); - return NULL; - } - - info("STREAM %s [send]: thread created (task id %d)", host->hostname, gettid()); - - int timeout = (int)appconfig_get_number(&stream_config, CONFIG_SECTION_STREAM, "timeout seconds", 60); - int default_port = (int)appconfig_get_number(&stream_config, CONFIG_SECTION_STREAM, "default port", 19999); - size_t max_size = (size_t)appconfig_get_number(&stream_config, CONFIG_SECTION_STREAM, "buffer size bytes", 1024 * 1024); - unsigned int reconnect_delay = (unsigned int)appconfig_get_number(&stream_config, CONFIG_SECTION_STREAM, "reconnect delay seconds", 5); - remote_clock_resync_iterations = (unsigned int)appconfig_get_number(&stream_config, CONFIG_SECTION_STREAM, "initial clock resync iterations", remote_clock_resync_iterations); - char connected_to[CONNECTED_TO_SIZE + 1] = ""; - - // initialize rrdpush globals - host->rrdpush_sender_buffer = buffer_create(1); - host->rrdpush_sender_connected = 0; - if(pipe(host->rrdpush_sender_pipe) == -1) fatal("STREAM %s [send]: cannot create required pipe.", host->hostname); - - // initialize local variables - size_t begin = 0; - size_t reconnects_counter = 0; - size_t sent_bytes = 0; - size_t sent_bytes_on_this_connection = 0; - - - time_t last_sent_t = 0; - struct pollfd fds[2], *ifd, *ofd; - nfds_t fdmax; - - ifd = &fds[0]; - ofd = &fds[1]; - - size_t not_connected_loops = 0; - - netdata_thread_cleanup_push(rrdpush_sender_thread_cleanup_callback, host); - - for(; host->rrdpush_send_enabled && !netdata_exit ;) { - // check for outstanding cancellation requests - netdata_thread_testcancel(); - - // if we don't have socket open, lets wait a bit - if(unlikely(host->rrdpush_sender_socket == -1)) { - if(not_connected_loops == 0 && sent_bytes_on_this_connection > 0) { - // fast re-connection on first disconnect - sleep_usec(USEC_PER_MS * 500); // milliseconds - } - else { - // slow re-connection on repeating errors - sleep_usec(USEC_PER_SEC * reconnect_delay); // seconds - } - - if(rrdpush_sender_thread_connect_to_master(host, default_port, timeout, &reconnects_counter, connected_to, CONNECTED_TO_SIZE)) { - last_sent_t = now_monotonic_sec(); - - // reset the buffer, to properly send charts and metrics - rrdpush_sender_thread_data_flush(host); - - // make sure the next reconnection will be immediate - not_connected_loops = 0; - - // reset the bytes we have sent for this session - sent_bytes_on_this_connection = 0; - - // let the data collection threads know we are ready - host->rrdpush_sender_connected = 1; - } - else { - // increase the failed connections counter - not_connected_loops++; - - // reset the number of bytes sent - sent_bytes_on_this_connection = 0; - } - - // loop through - continue; - } - else if(unlikely(now_monotonic_sec() - last_sent_t > timeout)) { - error("STREAM %s [send to %s]: could not send metrics for %d seconds - closing connection - we have sent %zu bytes on this connection.", host->hostname, connected_to, timeout, sent_bytes_on_this_connection); - rrdpush_sender_thread_close_socket(host); - } - - ifd->fd = host->rrdpush_sender_pipe[PIPE_READ]; - ifd->events = POLLIN; - ifd->revents = 0; - - ofd->fd = host->rrdpush_sender_socket; - ofd->revents = 0; - if(ofd->fd != -1 && begin < buffer_strlen(host->rrdpush_sender_buffer)) { - debug(D_STREAM, "STREAM: Requesting data output on streaming socket %d...", ofd->fd); - ofd->events = POLLOUT; - fdmax = 2; - } - else { - debug(D_STREAM, "STREAM: Not requesting data output on streaming socket %d (nothing to send now)...", ofd->fd); - ofd->events = 0; - fdmax = 1; - } - - debug(D_STREAM, "STREAM: Waiting for poll() events (current buffer length %zu bytes)...", buffer_strlen(host->rrdpush_sender_buffer)); - if(unlikely(netdata_exit)) break; - int retval = poll(fds, fdmax, 1000); - if(unlikely(netdata_exit)) break; - - if(unlikely(retval == -1)) { - debug(D_STREAM, "STREAM: poll() failed (current buffer length %zu bytes)...", buffer_strlen(host->rrdpush_sender_buffer)); - - if(errno == EAGAIN || errno == EINTR) { - debug(D_STREAM, "STREAM: poll() failed with EAGAIN or EINTR..."); - } - else { - error("STREAM %s [send to %s]: failed to poll(). Closing socket.", host->hostname, connected_to); - rrdpush_sender_thread_close_socket(host); - } - - continue; - } - else if(likely(retval)) { - if (ifd->revents & POLLIN || ifd->revents & POLLPRI) { - debug(D_STREAM, "STREAM: Data added to send buffer (current buffer length %zu bytes)...", buffer_strlen(host->rrdpush_sender_buffer)); - - char buffer[1000 + 1]; - if (read(host->rrdpush_sender_pipe[PIPE_READ], buffer, 1000) == -1) - error("STREAM %s [send to %s]: cannot read from internal pipe.", host->hostname, connected_to); - } - - if (ofd->revents & POLLOUT) { - if (begin < buffer_strlen(host->rrdpush_sender_buffer)) { - debug(D_STREAM, "STREAM: Sending data (current buffer length %zu bytes, begin = %zu)...", buffer_strlen(host->rrdpush_sender_buffer), begin); - - // BEGIN RRDPUSH LOCKED SESSION - - // during this session, data collectors - // will not be able to append data to our buffer - // but the socket is in non-blocking mode - // so, we will not block at send() - - netdata_thread_disable_cancelability(); - - debug(D_STREAM, "STREAM: Getting exclusive lock on host..."); - rrdpush_buffer_lock(host); - - debug(D_STREAM, "STREAM: Sending data, starting from %zu, size %zu...", begin, buffer_strlen(host->rrdpush_sender_buffer)); - ssize_t ret = send(host->rrdpush_sender_socket, &host->rrdpush_sender_buffer->buffer[begin], buffer_strlen(host->rrdpush_sender_buffer) - begin, MSG_DONTWAIT); - if (unlikely(ret == -1)) { - if (errno != EAGAIN && errno != EINTR && errno != EWOULDBLOCK) { - debug(D_STREAM, "STREAM: Send failed - closing socket..."); - error("STREAM %s [send to %s]: failed to send metrics - closing connection - we have sent %zu bytes on this connection.", host->hostname, connected_to, sent_bytes_on_this_connection); - rrdpush_sender_thread_close_socket(host); - } - else { - debug(D_STREAM, "STREAM: Send failed - will retry..."); - } - } - else if (likely(ret > 0)) { - // DEBUG - dump the string to see it - //char c = host->rrdpush_sender_buffer->buffer[begin + ret]; - //host->rrdpush_sender_buffer->buffer[begin + ret] = '\0'; - //debug(D_STREAM, "STREAM: sent from %zu to %zd:\n%s\n", begin, ret, &host->rrdpush_sender_buffer->buffer[begin]); - //host->rrdpush_sender_buffer->buffer[begin + ret] = c; - - sent_bytes_on_this_connection += ret; - sent_bytes += ret; - begin += ret; - - if (begin == buffer_strlen(host->rrdpush_sender_buffer)) { - // we send it all - - debug(D_STREAM, "STREAM: Sent %zd bytes (the whole buffer)...", ret); - buffer_flush(host->rrdpush_sender_buffer); - begin = 0; - } - else { - debug(D_STREAM, "STREAM: Sent %zd bytes (part of the data buffer)...", ret); - } - - last_sent_t = now_monotonic_sec(); - } - else { - debug(D_STREAM, "STREAM: send() returned %zd - closing the socket...", ret); - error("STREAM %s [send to %s]: failed to send metrics (send() returned %zd) - closing connection - we have sent %zu bytes on this connection.", - host->hostname, connected_to, ret, sent_bytes_on_this_connection); - rrdpush_sender_thread_close_socket(host); - } - - debug(D_STREAM, "STREAM: Releasing exclusive lock on host..."); - rrdpush_buffer_unlock(host); - - netdata_thread_enable_cancelability(); - - // END RRDPUSH LOCKED SESSION - } - else { - debug(D_STREAM, "STREAM: we have sent the entire buffer, but we received POLLOUT..."); - } - } - - if(host->rrdpush_sender_socket != -1) { - char *error = NULL; - - if (unlikely(ofd->revents & POLLERR)) - error = "socket reports errors (POLLERR)"; - - else if (unlikely(ofd->revents & POLLHUP)) - error = "connection closed by remote end (POLLHUP)"; - - else if (unlikely(ofd->revents & POLLNVAL)) - error = "connection is invalid (POLLNVAL)"; - - if(unlikely(error)) { - debug(D_STREAM, "STREAM: %s - closing socket...", error); - error("STREAM %s [send to %s]: %s - reopening socket - we have sent %zu bytes on this connection.", host->hostname, connected_to, error, sent_bytes_on_this_connection); - rrdpush_sender_thread_close_socket(host); - } - } - } - else { - debug(D_STREAM, "STREAM: poll() timed out."); - } - - // protection from overflow - if(buffer_strlen(host->rrdpush_sender_buffer) > max_size) { - debug(D_STREAM, "STREAM: Buffer is too big (%zu bytes), bigger than the max (%zu) - flushing it...", buffer_strlen(host->rrdpush_sender_buffer), max_size); - errno = 0; - error("STREAM %s [send to %s]: too many data pending - buffer is %zu bytes long, %zu unsent - we have sent %zu bytes in total, %zu on this connection. Closing connection to flush the data.", host->hostname, connected_to, host->rrdpush_sender_buffer->len, host->rrdpush_sender_buffer->len - begin, sent_bytes, sent_bytes_on_this_connection); - rrdpush_sender_thread_close_socket(host); - } - } - - netdata_thread_cleanup_pop(1); - return NULL; -} - - -// ---------------------------------------------------------------------------- -// rrdpush receiver thread - -static void log_stream_connection(const char *client_ip, const char *client_port, const char *api_key, const char *machine_guid, const char *host, const char *msg) { - log_access("STREAM: %d '[%s]:%s' '%s' host '%s' api key '%s' machine guid '%s'", gettid(), client_ip, client_port, msg, host, api_key, machine_guid); -} - -static RRDPUSH_MULTIPLE_CONNECTIONS_STRATEGY get_multiple_connections_strategy(struct config *c, const char *section, const char *name, RRDPUSH_MULTIPLE_CONNECTIONS_STRATEGY def) { - char *value; - switch(def) { - default: - case RRDPUSH_MULTIPLE_CONNECTIONS_ALLOW: - value = "allow"; - break; - - case RRDPUSH_MULTIPLE_CONNECTIONS_DENY_NEW: - value = "deny"; - break; - } - - value = appconfig_get(c, section, name, value); - - RRDPUSH_MULTIPLE_CONNECTIONS_STRATEGY ret = def; - - if(strcasecmp(value, "allow") == 0 || strcasecmp(value, "permit") == 0 || strcasecmp(value, "accept") == 0) - ret = RRDPUSH_MULTIPLE_CONNECTIONS_ALLOW; - - else if(strcasecmp(value, "deny") == 0 || strcasecmp(value, "reject") == 0 || strcasecmp(value, "block") == 0) - ret = RRDPUSH_MULTIPLE_CONNECTIONS_DENY_NEW; - - else - error("Invalid stream config value at section [%s], setting '%s', value '%s'", section, name, value); - - return ret; -} - -static int rrdpush_receive(int fd - , const char *key - , const char *hostname - , const char *registry_hostname - , const char *machine_guid - , const char *os - , const char *timezone - , const char *tags - , const char *program_name - , const char *program_version - , int update_every - , char *client_ip - , char *client_port -) { - RRDHOST *host; - int history = default_rrd_history_entries; - RRD_MEMORY_MODE mode = default_rrd_memory_mode; - int health_enabled = default_health_enabled; - int rrdpush_enabled = default_rrdpush_enabled; - char *rrdpush_destination = default_rrdpush_destination; - char *rrdpush_api_key = default_rrdpush_api_key; - time_t alarms_delay = 60; - RRDPUSH_MULTIPLE_CONNECTIONS_STRATEGY rrdpush_multiple_connections_strategy = RRDPUSH_MULTIPLE_CONNECTIONS_ALLOW; - - update_every = (int)appconfig_get_number(&stream_config, machine_guid, "update every", update_every); - if(update_every < 0) update_every = 1; - - history = (int)appconfig_get_number(&stream_config, key, "default history", history); - history = (int)appconfig_get_number(&stream_config, machine_guid, "history", history); - if(history < 5) history = 5; - - mode = rrd_memory_mode_id(appconfig_get(&stream_config, key, "default memory mode", rrd_memory_mode_name(mode))); - mode = rrd_memory_mode_id(appconfig_get(&stream_config, machine_guid, "memory mode", rrd_memory_mode_name(mode))); - - health_enabled = appconfig_get_boolean_ondemand(&stream_config, key, "health enabled by default", health_enabled); - health_enabled = appconfig_get_boolean_ondemand(&stream_config, machine_guid, "health enabled", health_enabled); - - alarms_delay = appconfig_get_number(&stream_config, key, "default postpone alarms on connect seconds", alarms_delay); - alarms_delay = appconfig_get_number(&stream_config, machine_guid, "postpone alarms on connect seconds", alarms_delay); - - rrdpush_enabled = appconfig_get_boolean(&stream_config, key, "default proxy enabled", rrdpush_enabled); - rrdpush_enabled = appconfig_get_boolean(&stream_config, machine_guid, "proxy enabled", rrdpush_enabled); - - rrdpush_destination = appconfig_get(&stream_config, key, "default proxy destination", rrdpush_destination); - rrdpush_destination = appconfig_get(&stream_config, machine_guid, "proxy destination", rrdpush_destination); - - rrdpush_api_key = appconfig_get(&stream_config, key, "default proxy api key", rrdpush_api_key); - rrdpush_api_key = appconfig_get(&stream_config, machine_guid, "proxy api key", rrdpush_api_key); - - rrdpush_multiple_connections_strategy = get_multiple_connections_strategy(&stream_config, key, "multiple connections", rrdpush_multiple_connections_strategy); - rrdpush_multiple_connections_strategy = get_multiple_connections_strategy(&stream_config, machine_guid, "multiple connections", rrdpush_multiple_connections_strategy); - - tags = appconfig_set_default(&stream_config, machine_guid, "host tags", (tags)?tags:""); - if(tags && !*tags) tags = NULL; - - if(!strcmp(machine_guid, "localhost")) - host = localhost; - else - host = rrdhost_find_or_create( - hostname - , registry_hostname - , machine_guid - , os - , timezone - , tags - , program_name - , program_version - , update_every - , history - , mode - , (health_enabled != CONFIG_BOOLEAN_NO) - , (rrdpush_enabled && rrdpush_destination && *rrdpush_destination && rrdpush_api_key && *rrdpush_api_key) - , rrdpush_destination - , rrdpush_api_key - ); - - if(!host) { - close(fd); - log_stream_connection(client_ip, client_port, key, machine_guid, hostname, "FAILED - CANNOT ACQUIRE HOST"); - error("STREAM %s [receive from [%s]:%s]: failed to find/create host structure.", hostname, client_ip, client_port); - return 1; - } - -#ifdef NETDATA_INTERNAL_CHECKS - info("STREAM %s [receive from [%s]:%s]: client willing to stream metrics for host '%s' with machine_guid '%s': update every = %d, history = %ld, memory mode = %s, health %s, tags '%s'" - , hostname - , client_ip - , client_port - , host->hostname - , host->machine_guid - , host->rrd_update_every - , host->rrd_history_entries - , rrd_memory_mode_name(host->rrd_memory_mode) - , (health_enabled == CONFIG_BOOLEAN_NO)?"disabled":((health_enabled == CONFIG_BOOLEAN_YES)?"enabled":"auto") - , host->tags?host->tags:"" - ); -#endif // NETDATA_INTERNAL_CHECKS - - struct plugind cd = { - .enabled = 1, - .update_every = default_rrd_update_every, - .pid = 0, - .serial_failures = 0, - .successful_collections = 0, - .obsolete = 0, - .started_t = now_realtime_sec(), - .next = NULL, - }; - - // put the client IP and port into the buffers used by plugins.d - snprintfz(cd.id, CONFIG_MAX_NAME, "%s:%s", client_ip, client_port); - snprintfz(cd.filename, FILENAME_MAX, "%s:%s", client_ip, client_port); - snprintfz(cd.fullfilename, FILENAME_MAX, "%s:%s", client_ip, client_port); - snprintfz(cd.cmd, PLUGINSD_CMD_MAX, "%s:%s", client_ip, client_port); - - info("STREAM %s [receive from [%s]:%s]: initializing communication...", host->hostname, client_ip, client_port); - if(send_timeout(fd, START_STREAMING_PROMPT, strlen(START_STREAMING_PROMPT), 0, 60) != strlen(START_STREAMING_PROMPT)) { - log_stream_connection(client_ip, client_port, key, host->machine_guid, host->hostname, "FAILED - CANNOT REPLY"); - error("STREAM %s [receive from [%s]:%s]: cannot send ready command.", host->hostname, client_ip, client_port); - close(fd); - return 0; - } - - // remove the non-blocking flag from the socket - if(sock_delnonblock(fd) < 0) - error("STREAM %s [receive from [%s]:%s]: cannot remove the non-blocking flag from socket %d", host->hostname, client_ip, client_port, fd); - - // convert the socket to a FILE * - FILE *fp = fdopen(fd, "r"); - if(!fp) { - log_stream_connection(client_ip, client_port, key, host->machine_guid, host->hostname, "FAILED - SOCKET ERROR"); - error("STREAM %s [receive from [%s]:%s]: failed to get a FILE for FD %d.", host->hostname, client_ip, client_port, fd); - close(fd); - return 0; - } - - rrdhost_wrlock(host); - if(host->connected_senders > 0) { - switch(rrdpush_multiple_connections_strategy) { - case RRDPUSH_MULTIPLE_CONNECTIONS_ALLOW: - info("STREAM %s [receive from [%s]:%s]: multiple streaming connections for the same host detected. If multiple netdata are pushing metrics for the same charts, at the same time, the result is unexpected.", host->hostname, client_ip, client_port); - break; - - case RRDPUSH_MULTIPLE_CONNECTIONS_DENY_NEW: - rrdhost_unlock(host); - log_stream_connection(client_ip, client_port, key, host->machine_guid, host->hostname, "REJECTED - ALREADY CONNECTED"); - info("STREAM %s [receive from [%s]:%s]: multiple streaming connections for the same host detected. Rejecting new connection.", host->hostname, client_ip, client_port); - fclose(fp); - return 0; - } - } - - rrdhost_flag_clear(host, RRDHOST_FLAG_ORPHAN); - host->connected_senders++; - host->senders_disconnected_time = 0; - if(health_enabled != CONFIG_BOOLEAN_NO) { - if(alarms_delay > 0) { - host->health_delay_up_to = now_realtime_sec() + alarms_delay; - info("Postponing health checks for %ld seconds, on host '%s', because it was just connected." - , alarms_delay - , host->hostname - ); - } - } - rrdhost_unlock(host); - - // call the plugins.d processor to receive the metrics - info("STREAM %s [receive from [%s]:%s]: receiving metrics...", host->hostname, client_ip, client_port); - log_stream_connection(client_ip, client_port, key, host->machine_guid, host->hostname, "CONNECTED"); - - size_t count = pluginsd_process(host, &cd, fp, 1); - - log_stream_connection(client_ip, client_port, key, host->machine_guid, host->hostname, "DISCONNECTED"); - error("STREAM %s [receive from [%s]:%s]: disconnected (completed %zu updates).", host->hostname, client_ip, client_port, count); - - rrdhost_wrlock(host); - host->senders_disconnected_time = now_realtime_sec(); - host->connected_senders--; - if(!host->connected_senders) { - rrdhost_flag_set(host, RRDHOST_FLAG_ORPHAN); - if(health_enabled == CONFIG_BOOLEAN_AUTO) - host->health_enabled = 0; - } - rrdhost_unlock(host); - - if(host->connected_senders == 0) - rrdpush_sender_thread_stop(host); - - // cleanup - fclose(fp); - - return (int)count; -} - -struct rrdpush_thread { - int fd; - char *key; - char *hostname; - char *registry_hostname; - char *machine_guid; - char *os; - char *timezone; - char *tags; - char *client_ip; - char *client_port; - char *program_name; - char *program_version; - int update_every; -}; - -static void rrdpush_receiver_thread_cleanup(void *ptr) { - static __thread int executed = 0; - if(!executed) { - executed = 1; - struct rrdpush_thread *rpt = (struct rrdpush_thread *) ptr; - - info("STREAM %s [receive from [%s]:%s]: receive thread ended (task id %d)", rpt->hostname, rpt->client_ip, rpt->client_port, gettid()); - - freez(rpt->key); - freez(rpt->hostname); - freez(rpt->registry_hostname); - freez(rpt->machine_guid); - freez(rpt->os); - freez(rpt->timezone); - freez(rpt->tags); - freez(rpt->client_ip); - freez(rpt->client_port); - freez(rpt->program_name); - freez(rpt->program_version); - freez(rpt); - } -} - -static void *rrdpush_receiver_thread(void *ptr) { - netdata_thread_cleanup_push(rrdpush_receiver_thread_cleanup, ptr); - - struct rrdpush_thread *rpt = (struct rrdpush_thread *)ptr; - info("STREAM %s [%s]:%s: receive thread created (task id %d)", rpt->hostname, rpt->client_ip, rpt->client_port, gettid()); - - rrdpush_receive( - rpt->fd - , rpt->key - , rpt->hostname - , rpt->registry_hostname - , rpt->machine_guid - , rpt->os - , rpt->timezone - , rpt->tags - , rpt->program_name - , rpt->program_version - , rpt->update_every - , rpt->client_ip - , rpt->client_port - ); - - netdata_thread_cleanup_pop(1); - return NULL; -} - -static void rrdpush_sender_thread_spawn(RRDHOST *host) { - rrdhost_wrlock(host); - - if(!host->rrdpush_sender_spawn) { - char tag[NETDATA_THREAD_TAG_MAX + 1]; - snprintfz(tag, NETDATA_THREAD_TAG_MAX, "STREAM_SENDER[%s]", host->hostname); - - if(netdata_thread_create(&host->rrdpush_sender_thread, tag, NETDATA_THREAD_OPTION_JOINABLE, rrdpush_sender_thread, (void *) host)) - error("STREAM %s [send]: failed to create new thread for client.", host->hostname); - else - host->rrdpush_sender_spawn = 1; - } - - rrdhost_unlock(host); -} - -int rrdpush_receiver_permission_denied(struct web_client *w) { - // we always respond with the same message and error code - // to prevent an attacker from gaining info about the error - buffer_flush(w->response.data); - buffer_sprintf(w->response.data, "You are not permitted to access this. Check the logs for more info."); - return 401; -} - -int rrdpush_receiver_thread_spawn(RRDHOST *host, struct web_client *w, char *url) { - (void)host; - - info("clients wants to STREAM metrics."); - - char *key = NULL, *hostname = NULL, *registry_hostname = NULL, *machine_guid = NULL, *os = "unknown", *timezone = "unknown", *tags = NULL; - int update_every = default_rrd_update_every; - char buf[GUID_LEN + 1]; - - while(url) { - char *value = mystrsep(&url, "?&"); - if(!value || !*value) continue; - - char *name = mystrsep(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - if(!strcmp(name, "key")) - key = value; - else if(!strcmp(name, "hostname")) - hostname = value; - else if(!strcmp(name, "registry_hostname")) - registry_hostname = value; - else if(!strcmp(name, "machine_guid")) - machine_guid = value; - else if(!strcmp(name, "update_every")) - update_every = (int)strtoul(value, NULL, 0); - else if(!strcmp(name, "os")) - os = value; - else if(!strcmp(name, "timezone")) - timezone = value; - else if(!strcmp(name, "tags")) - tags = value; - else - info("STREAM [receive from [%s]:%s]: request has parameter '%s' = '%s', which is not used.", w->client_ip, w->client_port, key, value); - } - - if(!key || !*key) { - log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname)?hostname:"-", "ACCESS DENIED - NO KEY"); - error("STREAM [receive from [%s]:%s]: request without an API key. Forbidding access.", w->client_ip, w->client_port); - return rrdpush_receiver_permission_denied(w); - } - - if(!hostname || !*hostname) { - log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname)?hostname:"-", "ACCESS DENIED - NO HOSTNAME"); - error("STREAM [receive from [%s]:%s]: request without a hostname. Forbidding access.", w->client_ip, w->client_port); - return rrdpush_receiver_permission_denied(w); - } - - if(!machine_guid || !*machine_guid) { - log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname)?hostname:"-", "ACCESS DENIED - NO MACHINE GUID"); - error("STREAM [receive from [%s]:%s]: request without a machine GUID. Forbidding access.", w->client_ip, w->client_port); - return rrdpush_receiver_permission_denied(w); - } - - if(regenerate_guid(key, buf) == -1) { - log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname)?hostname:"-", "ACCESS DENIED - INVALID KEY"); - error("STREAM [receive from [%s]:%s]: API key '%s' is not valid GUID (use the command uuidgen to generate one). Forbidding access.", w->client_ip, w->client_port, key); - return rrdpush_receiver_permission_denied(w); - } - - if(regenerate_guid(machine_guid, buf) == -1) { - log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname)?hostname:"-", "ACCESS DENIED - INVALID MACHINE GUID"); - error("STREAM [receive from [%s]:%s]: machine GUID '%s' is not GUID. Forbidding access.", w->client_ip, w->client_port, machine_guid); - return rrdpush_receiver_permission_denied(w); - } - - if(!appconfig_get_boolean(&stream_config, key, "enabled", 0)) { - log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname)?hostname:"-", "ACCESS DENIED - KEY NOT ENABLED"); - error("STREAM [receive from [%s]:%s]: API key '%s' is not allowed. Forbidding access.", w->client_ip, w->client_port, key); - return rrdpush_receiver_permission_denied(w); - } - - { - SIMPLE_PATTERN *key_allow_from = simple_pattern_create(appconfig_get(&stream_config, key, "allow from", "*"), NULL, SIMPLE_PATTERN_EXACT); - if(key_allow_from) { - if(!simple_pattern_matches(key_allow_from, w->client_ip)) { - simple_pattern_free(key_allow_from); - log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname) ? hostname : "-", "ACCESS DENIED - KEY NOT ALLOWED FROM THIS IP"); - error("STREAM [receive from [%s]:%s]: API key '%s' is not permitted from this IP. Forbidding access.", w->client_ip, w->client_port, key); - return rrdpush_receiver_permission_denied(w); - } - simple_pattern_free(key_allow_from); - } - } - - if(!appconfig_get_boolean(&stream_config, machine_guid, "enabled", 1)) { - log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname)?hostname:"-", "ACCESS DENIED - MACHINE GUID NOT ENABLED"); - error("STREAM [receive from [%s]:%s]: machine GUID '%s' is not allowed. Forbidding access.", w->client_ip, w->client_port, machine_guid); - return rrdpush_receiver_permission_denied(w); - } - - { - SIMPLE_PATTERN *machine_allow_from = simple_pattern_create(appconfig_get(&stream_config, machine_guid, "allow from", "*"), NULL, SIMPLE_PATTERN_EXACT); - if(machine_allow_from) { - if(!simple_pattern_matches(machine_allow_from, w->client_ip)) { - simple_pattern_free(machine_allow_from); - log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname) ? hostname : "-", "ACCESS DENIED - MACHINE GUID NOT ALLOWED FROM THIS IP"); - error("STREAM [receive from [%s]:%s]: Machine GUID '%s' is not permitted from this IP. Forbidding access.", w->client_ip, w->client_port, machine_guid); - return rrdpush_receiver_permission_denied(w); - } - simple_pattern_free(machine_allow_from); - } - } - - struct rrdpush_thread *rpt = callocz(1, sizeof(struct rrdpush_thread)); - rpt->fd = w->ifd; - rpt->key = strdupz(key); - rpt->hostname = strdupz(hostname); - rpt->registry_hostname = strdupz((registry_hostname && *registry_hostname)?registry_hostname:hostname); - rpt->machine_guid = strdupz(machine_guid); - rpt->os = strdupz(os); - rpt->timezone = strdupz(timezone); - rpt->tags = (tags)?strdupz(tags):NULL; - rpt->client_ip = strdupz(w->client_ip); - rpt->client_port = strdupz(w->client_port); - rpt->update_every = update_every; - - if(w->user_agent && w->user_agent[0]) { - char *t = strchr(w->user_agent, '/'); - if(t && *t) { - *t = '\0'; - t++; - } - - rpt->program_name = strdupz(w->user_agent); - if(t && *t) rpt->program_version = strdupz(t); - } - - netdata_thread_t thread; - - debug(D_SYSTEM, "starting STREAM receive thread."); - - char tag[FILENAME_MAX + 1]; - snprintfz(tag, FILENAME_MAX, "STREAM_RECEIVER[%s,[%s]:%s]", rpt->hostname, w->client_ip, w->client_port); - - if(netdata_thread_create(&thread, tag, NETDATA_THREAD_OPTION_DEFAULT, rrdpush_receiver_thread, (void *)rpt)) - error("Failed to create new STREAM receive thread for client."); - - // prevent the caller from closing the streaming socket - if(web_server_mode == WEB_SERVER_MODE_STATIC_THREADED) { - web_client_flag_set(w, WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET); - } - else { - if(w->ifd == w->ofd) - w->ifd = w->ofd = -1; - else - w->ifd = -1; - } - - buffer_flush(w->response.data); - return 200; -} diff --git a/src/rrdpush.h b/src/rrdpush.h deleted file mode 100644 index bf3d9435c..000000000 --- a/src/rrdpush.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef NETDATA_RRDPUSH_H -#define NETDATA_RRDPUSH_H - -extern int default_rrdpush_enabled; -extern char *default_rrdpush_destination; -extern char *default_rrdpush_api_key; -extern unsigned int remote_clock_resync_iterations; - -extern int rrdpush_init(); -extern void rrdset_done_push(RRDSET *st); -extern void rrdset_push_chart_definition(RRDSET *st); -extern void *rrdpush_sender_thread(void *ptr); - -extern int rrdpush_receiver_thread_spawn(RRDHOST *host, struct web_client *w, char *url); -extern void rrdpush_sender_thread_stop(RRDHOST *host); - -extern void rrdpush_sender_send_this_host_variable_now(RRDHOST *host, RRDVAR *rv); - -#endif //NETDATA_RRDPUSH_H diff --git a/src/rrdset.c b/src/rrdset.c deleted file mode 100644 index bbd0ae728..000000000 --- a/src/rrdset.c +++ /dev/null @@ -1,1544 +0,0 @@ -#define NETDATA_RRD_INTERNALS 1 -#include "common.h" - -void __rrdset_check_rdlock(RRDSET *st, const char *file, const char *function, const unsigned long line) { - debug(D_RRD_CALLS, "Checking read lock on chart '%s'", st->id); - - int ret = netdata_rwlock_trywrlock(&st->rrdset_rwlock); - if(ret == 0) - fatal("RRDSET '%s' should be read-locked, but it is not, at function %s() at line %lu of file '%s'", st->id, function, line, file); -} - -void __rrdset_check_wrlock(RRDSET *st, const char *file, const char *function, const unsigned long line) { - debug(D_RRD_CALLS, "Checking write lock on chart '%s'", st->id); - - int ret = netdata_rwlock_tryrdlock(&st->rrdset_rwlock); - if(ret == 0) - fatal("RRDSET '%s' should be write-locked, but it is not, at function %s() at line %lu of file '%s'", st->id, function, line, file); -} - - -// ---------------------------------------------------------------------------- -// RRDSET index - -int rrdset_compare(void* a, void* b) { - if(((RRDSET *)a)->hash < ((RRDSET *)b)->hash) return -1; - else if(((RRDSET *)a)->hash > ((RRDSET *)b)->hash) return 1; - else return strcmp(((RRDSET *)a)->id, ((RRDSET *)b)->id); -} - -static RRDSET *rrdset_index_find(RRDHOST *host, const char *id, uint32_t hash) { - RRDSET tmp; - strncpyz(tmp.id, id, RRD_ID_LENGTH_MAX); - tmp.hash = (hash)?hash:simple_hash(tmp.id); - - return (RRDSET *)avl_search_lock(&(host->rrdset_root_index), (avl *) &tmp); -} - -// ---------------------------------------------------------------------------- -// RRDSET name index - -#define rrdset_from_avlname(avlname_ptr) ((RRDSET *)((avlname_ptr) - offsetof(RRDSET, avlname))) - -int rrdset_compare_name(void* a, void* b) { - RRDSET *A = rrdset_from_avlname(a); - RRDSET *B = rrdset_from_avlname(b); - - // fprintf(stderr, "COMPARING: %s with %s\n", A->name, B->name); - - if(A->hash_name < B->hash_name) return -1; - else if(A->hash_name > B->hash_name) return 1; - else return strcmp(A->name, B->name); -} - -RRDSET *rrdset_index_add_name(RRDHOST *host, RRDSET *st) { - void *result; - // fprintf(stderr, "ADDING: %s (name: %s)\n", st->id, st->name); - result = avl_insert_lock(&host->rrdset_root_index_name, (avl *) (&st->avlname)); - if(result) return rrdset_from_avlname(result); - return NULL; -} - -RRDSET *rrdset_index_del_name(RRDHOST *host, RRDSET *st) { - void *result; - // fprintf(stderr, "DELETING: %s (name: %s)\n", st->id, st->name); - result = (RRDSET *)avl_remove_lock(&((host)->rrdset_root_index_name), (avl *)(&st->avlname)); - if(result) return rrdset_from_avlname(result); - return NULL; -} - - -// ---------------------------------------------------------------------------- -// RRDSET - find charts - -static inline RRDSET *rrdset_index_find_name(RRDHOST *host, const char *name, uint32_t hash) { - void *result = NULL; - RRDSET tmp; - tmp.name = name; - tmp.hash_name = (hash)?hash:simple_hash(tmp.name); - - // fprintf(stderr, "SEARCHING: %s\n", name); - result = avl_search_lock(&host->rrdset_root_index_name, (avl *) (&(tmp.avlname))); - if(result) { - RRDSET *st = rrdset_from_avlname(result); - if(strcmp(st->magic, RRDSET_MAGIC) != 0) - error("Search for RRDSET %s returned an invalid RRDSET %s (name %s)", name, st->id, st->name); - - // fprintf(stderr, "FOUND: %s\n", name); - return rrdset_from_avlname(result); - } - // fprintf(stderr, "NOT FOUND: %s\n", name); - return NULL; -} - -inline RRDSET *rrdset_find(RRDHOST *host, const char *id) { - debug(D_RRD_CALLS, "rrdset_find() for chart '%s' in host '%s'", id, host->hostname); - RRDSET *st = rrdset_index_find(host, id, 0); - return(st); -} - -inline RRDSET *rrdset_find_bytype(RRDHOST *host, const char *type, const char *id) { - debug(D_RRD_CALLS, "rrdset_find_bytype() for chart '%s.%s' in host '%s'", type, id, host->hostname); - - char buf[RRD_ID_LENGTH_MAX + 1]; - strncpyz(buf, type, RRD_ID_LENGTH_MAX - 1); - strcat(buf, "."); - int len = (int) strlen(buf); - strncpyz(&buf[len], id, (size_t) (RRD_ID_LENGTH_MAX - len)); - - return(rrdset_find(host, buf)); -} - -inline RRDSET *rrdset_find_byname(RRDHOST *host, const char *name) { - debug(D_RRD_CALLS, "rrdset_find_byname() for chart '%s' in host '%s'", name, host->hostname); - RRDSET *st = rrdset_index_find_name(host, name, 0); - return(st); -} - -// ---------------------------------------------------------------------------- -// RRDSET - rename charts - -char *rrdset_strncpyz_name(char *to, const char *from, size_t length) { - char c, *p = to; - - while (length-- && (c = *from++)) { - if(c != '.' && !isalnum(c)) - c = '_'; - - *p++ = c; - } - - *p = '\0'; - - return to; -} - -int rrdset_set_name(RRDSET *st, const char *name) { - if(unlikely(st->name && !strcmp(st->name, name))) - return 1; - - RRDHOST *host = st->rrdhost; - - debug(D_RRD_CALLS, "rrdset_set_name() old: '%s', new: '%s'", st->name?st->name:"", name); - - char b[CONFIG_MAX_VALUE + 1]; - char n[RRD_ID_LENGTH_MAX + 1]; - - snprintfz(n, RRD_ID_LENGTH_MAX, "%s.%s", st->type, name); - rrdset_strncpyz_name(b, n, CONFIG_MAX_VALUE); - - if(rrdset_index_find_name(host, b, 0)) { - error("RRDSET: chart name '%s' on host '%s' already exists.", b, host->hostname); - return 0; - } - - if(st->name) { - rrdset_index_del_name(host, st); - st->name = config_set_default(st->config_section, "name", b); - st->hash_name = simple_hash(st->name); - rrdsetvar_rename_all(st); - } - else { - st->name = config_get(st->config_section, "name", b); - st->hash_name = simple_hash(st->name); - } - - rrdset_wrlock(st); - RRDDIM *rd; - rrddim_foreach_write(rd, st) - rrddimvar_rename_all(rd); - rrdset_unlock(st); - - if(unlikely(rrdset_index_add_name(host, st) != st)) - error("RRDSET: INTERNAL ERROR: attempted to index duplicate chart name '%s'", st->name); - - return 1; -} - -inline void rrdset_is_obsolete(RRDSET *st) { - RRDHOST *host = st->rrdhost; - - if(unlikely(!(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)))) { - rrdset_flag_set(st, RRDSET_FLAG_OBSOLETE); - rrdset_flag_clear(st, RRDSET_FLAG_EXPOSED_UPSTREAM); - - // the chart will not get more updates (data collection) - // so, we have to push its definition now - if(unlikely(host->rrdpush_send_enabled)) - rrdset_push_chart_definition(st); - } -} - -inline void rrdset_isnot_obsolete(RRDSET *st) { - if(unlikely((rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)))) { - rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE); - rrdset_flag_clear(st, RRDSET_FLAG_EXPOSED_UPSTREAM); - - // the chart will be pushed upstream automatically - // due to data collection - } -} - -inline void rrdset_update_heterogeneous_flag(RRDSET *st) { - RRDHOST *host = st->rrdhost; - RRDDIM *rd; - - rrdset_flag_clear(st, RRDSET_FLAG_HOMEGENEOUS_CHECK); - - RRD_ALGORITHM algorithm = st->dimensions->algorithm; - collected_number multiplier = abs(st->dimensions->multiplier); - collected_number divisor = abs(st->dimensions->divisor); - - rrddim_foreach_read(rd, st) { - if(algorithm != rd->algorithm || multiplier != abs(rd->multiplier) || divisor != abs(rd->divisor)) { - if(!rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS)) { - #ifdef NETDATA_INTERNAL_CHECKS - info("Dimension '%s' added on chart '%s' of host '%s' is not homogeneous to other dimensions already present (algorithm is '%s' vs '%s', multiplier is " COLLECTED_NUMBER_FORMAT " vs " COLLECTED_NUMBER_FORMAT ", divisor is " COLLECTED_NUMBER_FORMAT " vs " COLLECTED_NUMBER_FORMAT ").", - rd->name, - st->name, - host->hostname, - rrd_algorithm_name(rd->algorithm), rrd_algorithm_name(algorithm), - rd->multiplier, multiplier, - rd->divisor, divisor - ); - #endif - rrdset_flag_set(st, RRDSET_FLAG_HETEROGENEOUS); - } - return; - } - } - - rrdset_flag_clear(st, RRDSET_FLAG_HETEROGENEOUS); -} - -// ---------------------------------------------------------------------------- -// RRDSET - reset a chart - -void rrdset_reset(RRDSET *st) { - debug(D_RRD_CALLS, "rrdset_reset() %s", st->name); - - st->last_collected_time.tv_sec = 0; - st->last_collected_time.tv_usec = 0; - st->last_updated.tv_sec = 0; - st->last_updated.tv_usec = 0; - st->current_entry = 0; - st->counter = 0; - st->counter_done = 0; - - RRDDIM *rd; - rrddim_foreach_read(rd, st) { - rd->last_collected_time.tv_sec = 0; - rd->last_collected_time.tv_usec = 0; - rd->collections_counter = 0; - // memset(rd->values, 0, rd->entries * sizeof(storage_number)); - } -} - -// ---------------------------------------------------------------------------- -// RRDSET - helpers for rrdset_create() - -inline long align_entries_to_pagesize(RRD_MEMORY_MODE mode, long entries) { - if(unlikely(entries < 5)) entries = 5; - if(unlikely(entries > RRD_HISTORY_ENTRIES_MAX)) entries = RRD_HISTORY_ENTRIES_MAX; - - if(unlikely(mode == RRD_MEMORY_MODE_NONE || mode == RRD_MEMORY_MODE_ALLOC)) - return entries; - - long page = (size_t)sysconf(_SC_PAGESIZE); - long size = sizeof(RRDDIM) + entries * sizeof(storage_number); - if(unlikely(size % page)) { - size -= (size % page); - size += page; - - long n = (size - sizeof(RRDDIM)) / sizeof(storage_number); - return n; - } - - return entries; -} - -static inline void last_collected_time_align(RRDSET *st) { - st->last_collected_time.tv_sec -= st->last_collected_time.tv_sec % st->update_every; - - if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST))) - st->last_collected_time.tv_usec = 0; - else - st->last_collected_time.tv_usec = 500000; -} - -static inline void last_updated_time_align(RRDSET *st) { - st->last_updated.tv_sec -= st->last_updated.tv_sec % st->update_every; - st->last_updated.tv_usec = 0; -} - -// ---------------------------------------------------------------------------- -// RRDSET - free a chart - -void rrdset_free(RRDSET *st) { - if(unlikely(!st)) return; - - RRDHOST *host = st->rrdhost; - - rrdhost_check_wrlock(host); // make sure we have a write lock on the host - rrdset_wrlock(st); // lock this RRDSET - - // info("Removing chart '%s' ('%s')", st->id, st->name); - - // ------------------------------------------------------------------------ - // remove it from the indexes - - if(unlikely(rrdset_index_del(host, st) != st)) - error("RRDSET: INTERNAL ERROR: attempt to remove from index chart '%s', removed a different chart.", st->id); - - rrdset_index_del_name(host, st); - - // ------------------------------------------------------------------------ - // free its children structures - - while(st->variables) rrdsetvar_free(st->variables); - while(st->alarms) rrdsetcalc_unlink(st->alarms); - while(st->dimensions) rrddim_free(st, st->dimensions); - - rrdfamily_free(host, st->rrdfamily); - - debug(D_RRD_CALLS, "RRDSET: Cleaning up remaining chart variables for host '%s', chart '%s'", host->hostname, st->id); - rrdvar_free_remaining_variables(host, &st->rrdvar_root_index); - - // ------------------------------------------------------------------------ - // unlink it from the host - - if(st == host->rrdset_root) { - host->rrdset_root = st->next; - } - else { - // find the previous one - RRDSET *s; - for(s = host->rrdset_root; s && s->next != st ; s = s->next) ; - - // bypass it - if(s) s->next = st->next; - else error("Request to free RRDSET '%s': cannot find it under host '%s'", st->id, host->hostname); - } - - rrdset_unlock(st); - - // ------------------------------------------------------------------------ - // free it - - netdata_rwlock_destroy(&st->rrdset_rwlock); - - // free directly allocated members - freez(st->config_section); - freez(st->plugin_name); - freez(st->module_name); - - switch(st->rrd_memory_mode) { - case RRD_MEMORY_MODE_SAVE: - case RRD_MEMORY_MODE_MAP: - case RRD_MEMORY_MODE_RAM: - debug(D_RRD_CALLS, "Unmapping stats '%s'.", st->name); - munmap(st, st->memsize); - break; - - case RRD_MEMORY_MODE_ALLOC: - case RRD_MEMORY_MODE_NONE: - freez(st); - break; - } -} - -void rrdset_save(RRDSET *st) { - rrdset_check_rdlock(st); - - // info("Saving chart '%s' ('%s')", st->id, st->name); - - if(st->rrd_memory_mode == RRD_MEMORY_MODE_SAVE) { - debug(D_RRD_STATS, "Saving stats '%s' to '%s'.", st->name, st->cache_filename); - memory_file_save(st->cache_filename, st, st->memsize); - } - - RRDDIM *rd; - rrddim_foreach_read(rd, st) { - if(likely(rd->rrd_memory_mode == RRD_MEMORY_MODE_SAVE)) { - debug(D_RRD_STATS, "Saving dimension '%s' to '%s'.", rd->name, rd->cache_filename); - memory_file_save(rd->cache_filename, rd, rd->memsize); - } - } -} - -void rrdset_delete(RRDSET *st) { - RRDDIM *rd; - - rrdset_check_rdlock(st); - - info("Deleting chart '%s' ('%s') from disk...", st->id, st->name); - - if(st->rrd_memory_mode == RRD_MEMORY_MODE_SAVE || st->rrd_memory_mode == RRD_MEMORY_MODE_MAP) { - info("Deleting chart header file '%s'.", st->cache_filename); - if(unlikely(unlink(st->cache_filename) == -1)) - error("Cannot delete chart header file '%s'", st->cache_filename); - } - - rrddim_foreach_read(rd, st) { - if(likely(rd->rrd_memory_mode == RRD_MEMORY_MODE_SAVE || rd->rrd_memory_mode == RRD_MEMORY_MODE_MAP)) { - info("Deleting dimension file '%s'.", rd->cache_filename); - if(unlikely(unlink(rd->cache_filename) == -1)) - error("Cannot delete dimension file '%s'", rd->cache_filename); - } - } - - recursively_delete_dir(st->cache_dir, "left-over chart"); -} - -// ---------------------------------------------------------------------------- -// RRDSET - create a chart - -static inline RRDSET *rrdset_find_on_create(RRDHOST *host, const char *fullid) { - RRDSET *st = rrdset_find(host, fullid); - if(unlikely(st)) { - rrdset_isnot_obsolete(st); - debug(D_RRD_CALLS, "RRDSET '%s', already exists.", fullid); - return st; - } - - return NULL; -} - -RRDSET *rrdset_create_custom( - RRDHOST *host - , const char *type - , const char *id - , const char *name - , const char *family - , const char *context - , const char *title - , const char *units - , const char *plugin - , const char *module - , long priority - , int update_every - , RRDSET_TYPE chart_type - , RRD_MEMORY_MODE memory_mode - , long history_entries -) { - if(!type || !type[0]) { - fatal("Cannot create rrd stats without a type: id '%s', name '%s', family '%s', context '%s', title '%s', units '%s', plugin '%s', module '%s'." - , (id && *id)?id:"<unset>" - , (name && *name)?name:"<unset>" - , (family && *family)?family:"<unset>" - , (context && *context)?context:"<unset>" - , (title && *title)?title:"<unset>" - , (units && *units)?units:"<unset>" - , (plugin && *plugin)?plugin:"<unset>" - , (module && *module)?module:"<unset>" - ); - return NULL; - } - - if(!id || !id[0]) { - fatal("Cannot create rrd stats without an id: type '%s', name '%s', family '%s', context '%s', title '%s', units '%s', plugin '%s', module '%s'." - , type - , (name && *name)?name:"<unset>" - , (family && *family)?family:"<unset>" - , (context && *context)?context:"<unset>" - , (title && *title)?title:"<unset>" - , (units && *units)?units:"<unset>" - , (plugin && *plugin)?plugin:"<unset>" - , (module && *module)?module:"<unset>" - ); - return NULL; - } - - // ------------------------------------------------------------------------ - // check if it already exists - - char fullid[RRD_ID_LENGTH_MAX + 1]; - snprintfz(fullid, RRD_ID_LENGTH_MAX, "%s.%s", type, id); - - RRDSET *st = rrdset_find_on_create(host, fullid); - if(st) return st; - - rrdhost_wrlock(host); - - st = rrdset_find_on_create(host, fullid); - if(st) { - rrdhost_unlock(host); - return st; - } - - char fullfilename[FILENAME_MAX + 1]; - - // ------------------------------------------------------------------------ - // compose the config_section for this chart - - char config_section[RRD_ID_LENGTH_MAX + 1]; - if(host == localhost) - strcpy(config_section, fullid); - else - snprintfz(config_section, RRD_ID_LENGTH_MAX, "%s/%s", host->machine_guid, fullid); - - // ------------------------------------------------------------------------ - // get the options from the config, we need to create it - - long rentries = config_get_number(config_section, "history", history_entries); - long entries = align_entries_to_pagesize(memory_mode, rentries); - if(entries != rentries) entries = config_set_number(config_section, "history", entries); - - if(memory_mode == RRD_MEMORY_MODE_NONE && entries != rentries) - entries = config_set_number(config_section, "history", 10); - - int enabled = config_get_boolean(config_section, "enabled", 1); - if(!enabled) entries = 5; - - unsigned long size = sizeof(RRDSET); - char *cache_dir = rrdset_cache_dir(host, fullid, config_section); - - time_t now = now_realtime_sec(); - - // ------------------------------------------------------------------------ - // load it or allocate it - - debug(D_RRD_CALLS, "Creating RRD_STATS for '%s.%s'.", type, id); - - snprintfz(fullfilename, FILENAME_MAX, "%s/main.db", cache_dir); - if(memory_mode == RRD_MEMORY_MODE_SAVE || memory_mode == RRD_MEMORY_MODE_MAP || memory_mode == RRD_MEMORY_MODE_RAM) { - st = (RRDSET *) mymmap( - (memory_mode == RRD_MEMORY_MODE_RAM)?NULL:fullfilename - , size - , ((memory_mode == RRD_MEMORY_MODE_MAP) ? MAP_SHARED : MAP_PRIVATE) - , 0 - ); - - if(st) { - memset(&st->avl, 0, sizeof(avl)); - memset(&st->avlname, 0, sizeof(avl)); - memset(&st->rrdvar_root_index, 0, sizeof(avl_tree_lock)); - memset(&st->dimensions_index, 0, sizeof(avl_tree_lock)); - memset(&st->rrdset_rwlock, 0, sizeof(netdata_rwlock_t)); - - st->name = NULL; - st->config_section = NULL; - st->type = NULL; - st->family = NULL; - st->title = NULL; - st->units = NULL; - st->context = NULL; - st->cache_dir = NULL; - st->plugin_name = NULL; - st->module_name = NULL; - st->dimensions = NULL; - st->rrdfamily = NULL; - st->rrdhost = NULL; - st->next = NULL; - st->variables = NULL; - st->alarms = NULL; - st->flags = 0x00000000; - - if(memory_mode == RRD_MEMORY_MODE_RAM) { - memset(st, 0, size); - } - else { - if(strcmp(st->magic, RRDSET_MAGIC) != 0) { - info("Initializing file %s.", fullfilename); - memset(st, 0, size); - } - else if(strcmp(st->id, fullid) != 0) { - error("File %s contents are not for chart %s. Clearing it.", fullfilename, fullid); - // munmap(st, size); - // st = NULL; - memset(st, 0, size); - } - else if(st->memsize != size || st->entries != entries) { - error("File %s does not have the desired size. Clearing it.", fullfilename); - memset(st, 0, size); - } - else if(st->update_every != update_every) { - error("File %s does not have the desired update frequency. Clearing it.", fullfilename); - memset(st, 0, size); - } - else if((now - st->last_updated.tv_sec) > update_every * entries) { - error("File %s is too old. Clearing it.", fullfilename); - memset(st, 0, size); - } - else if(st->last_updated.tv_sec > now + update_every) { - error("File %s refers to the future. Clearing it.", fullfilename); - memset(st, 0, size); - } - - // make sure the database is aligned - if(st->last_updated.tv_sec) { - st->update_every = update_every; - last_updated_time_align(st); - } - } - - // make sure we have the right memory mode - // even if we cleared the memory - st->rrd_memory_mode = memory_mode; - } - } - - if(unlikely(!st)) { - st = callocz(1, size); - st->rrd_memory_mode = (memory_mode == RRD_MEMORY_MODE_NONE) ? RRD_MEMORY_MODE_NONE : RRD_MEMORY_MODE_ALLOC; - } - - st->plugin_name = plugin?strdup(plugin):NULL; - st->module_name = module?strdup(module):NULL; - - st->config_section = strdup(config_section); - st->rrdhost = host; - st->memsize = size; - st->entries = entries; - st->update_every = update_every; - - if(st->current_entry >= st->entries) st->current_entry = 0; - - strcpy(st->cache_filename, fullfilename); - strcpy(st->magic, RRDSET_MAGIC); - - strcpy(st->id, fullid); - st->hash = simple_hash(st->id); - - st->cache_dir = cache_dir; - - st->chart_type = rrdset_type_id(config_get(st->config_section, "chart type", rrdset_type_name(chart_type))); - st->type = config_get(st->config_section, "type", type); - - st->family = config_get(st->config_section, "family", family?family:st->type); - json_fix_string(st->family); - - st->units = config_get(st->config_section, "units", units?units:""); - json_fix_string(st->units); - - st->context = config_get(st->config_section, "context", context?context:st->id); - json_fix_string(st->context); - st->hash_context = simple_hash(st->context); - - st->priority = config_get_number(st->config_section, "priority", priority); - if(enabled) - rrdset_flag_set(st, RRDSET_FLAG_ENABLED); - else - rrdset_flag_clear(st, RRDSET_FLAG_ENABLED); - - rrdset_flag_clear(st, RRDSET_FLAG_DETAIL); - rrdset_flag_clear(st, RRDSET_FLAG_DEBUG); - rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE); - rrdset_flag_clear(st, RRDSET_FLAG_EXPOSED_UPSTREAM); - - // if(!strcmp(st->id, "disk_util.dm-0")) { - // st->debug = 1; - // error("enabled debugging for '%s'", st->id); - // } - // else error("not enabled debugging for '%s'", st->id); - - st->green = NAN; - st->red = NAN; - - st->last_collected_time.tv_sec = 0; - st->last_collected_time.tv_usec = 0; - st->counter_done = 0; - - st->gap_when_lost_iterations_above = (int) (gap_when_lost_iterations_above + 2); - - st->last_accessed_time = 0; - st->upstream_resync_time = 0; - - avl_init_lock(&st->dimensions_index, rrddim_compare); - avl_init_lock(&st->rrdvar_root_index, rrdvar_compare); - - netdata_rwlock_init(&st->rrdset_rwlock); - - if(name && *name && rrdset_set_name(st, name)) - // we did set the name - ; - else - // could not use the name, use the id - rrdset_set_name(st, id); - - st->title = config_get(st->config_section, "title", title); - json_fix_string(st->title); - - st->rrdfamily = rrdfamily_create(host, st->family); - - st->next = host->rrdset_root; - host->rrdset_root = st; - - if(host->health_enabled) { - rrdsetvar_create(st, "last_collected_t", RRDVAR_TYPE_TIME_T, &st->last_collected_time.tv_sec, RRDVAR_OPTION_DEFAULT); - rrdsetvar_create(st, "collected_total_raw", RRDVAR_TYPE_TOTAL, &st->last_collected_total, RRDVAR_OPTION_DEFAULT); - rrdsetvar_create(st, "green", RRDVAR_TYPE_CALCULATED, &st->green, RRDVAR_OPTION_DEFAULT); - rrdsetvar_create(st, "red", RRDVAR_TYPE_CALCULATED, &st->red, RRDVAR_OPTION_DEFAULT); - rrdsetvar_create(st, "update_every", RRDVAR_TYPE_INT, &st->update_every, RRDVAR_OPTION_DEFAULT); - } - - if(unlikely(rrdset_index_add(host, st) != st)) - error("RRDSET: INTERNAL ERROR: attempt to index duplicate chart '%s'", st->id); - - rrdsetcalc_link_matching(st); - rrdcalctemplate_link_matching(st); - - rrdhost_cleanup_obsolete_charts(host); - - rrdhost_unlock(host); - - return(st); -} - - -// ---------------------------------------------------------------------------- -// RRDSET - data collection iteration control - -inline void rrdset_next_usec_unfiltered(RRDSET *st, usec_t microseconds) { - if(unlikely(!st->last_collected_time.tv_sec || !microseconds)) { - // call the full next_usec() function - rrdset_next_usec(st, microseconds); - return; - } - - st->usec_since_last_update = microseconds; -} - -inline void rrdset_next_usec(RRDSET *st, usec_t microseconds) { - struct timeval now; - now_realtime_timeval(&now); - - if(unlikely(!st->last_collected_time.tv_sec)) { - // the first entry - microseconds = st->update_every * USEC_PER_SEC; - } - else if(unlikely(!microseconds)) { - // no dt given by the plugin - microseconds = dt_usec(&now, &st->last_collected_time); - } - else { - // microseconds has the time since the last collection - susec_t since_last_usec = dt_usec_signed(&now, &st->last_collected_time); - - if(unlikely(since_last_usec < 0)) { - // oops! the database is in the future - info("RRD database for chart '%s' on host '%s' is %0.5" LONG_DOUBLE_MODIFIER " secs in the future (counter #%zu, update #%zu). Adjusting it to current time.", st->id, st->rrdhost->hostname, (LONG_DOUBLE)-since_last_usec / USEC_PER_SEC, st->counter, st->counter_done); - - st->last_collected_time.tv_sec = now.tv_sec - st->update_every; - st->last_collected_time.tv_usec = now.tv_usec; - last_collected_time_align(st); - - st->last_updated.tv_sec = now.tv_sec - st->update_every; - st->last_updated.tv_usec = now.tv_usec; - last_updated_time_align(st); - - microseconds = st->update_every * USEC_PER_SEC; - } - else if(unlikely((usec_t)since_last_usec > (usec_t)(st->update_every * 10 * USEC_PER_SEC))) { - // oops! the database is too far behind - info("RRD database for chart '%s' on host '%s' is %0.5" LONG_DOUBLE_MODIFIER " secs in the past (counter #%zu, update #%zu). Adjusting it to current time.", st->id, st->rrdhost->hostname, (LONG_DOUBLE)since_last_usec / USEC_PER_SEC, st->counter, st->counter_done); - - microseconds = (usec_t)since_last_usec; - } - } - - #ifdef NETDATA_INTERNAL_CHECKS - debug(D_RRD_CALLS, "rrdset_next_usec() for chart %s with microseconds %llu", st->name, microseconds); - rrdset_debug(st, "NEXT: %llu microseconds", microseconds); - #endif - - st->usec_since_last_update = microseconds; -} - - -// ---------------------------------------------------------------------------- -// RRDSET - process the collected values for all dimensions of a chart - -static inline usec_t rrdset_init_last_collected_time(RRDSET *st) { - now_realtime_timeval(&st->last_collected_time); - last_collected_time_align(st); - - usec_t last_collect_ut = st->last_collected_time.tv_sec * USEC_PER_SEC + st->last_collected_time.tv_usec; - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "initialized last collected time to %0.3" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE)last_collect_ut / USEC_PER_SEC); - #endif - - return last_collect_ut; -} - -static inline usec_t rrdset_update_last_collected_time(RRDSET *st) { - usec_t last_collect_ut = st->last_collected_time.tv_sec * USEC_PER_SEC + st->last_collected_time.tv_usec; - usec_t ut = last_collect_ut + st->usec_since_last_update; - st->last_collected_time.tv_sec = (time_t) (ut / USEC_PER_SEC); - st->last_collected_time.tv_usec = (suseconds_t) (ut % USEC_PER_SEC); - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "updated last collected time to %0.3" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE)last_collect_ut / USEC_PER_SEC); - #endif - - return last_collect_ut; -} - -static inline usec_t rrdset_init_last_updated_time(RRDSET *st) { - // copy the last collected time to last updated time - st->last_updated.tv_sec = st->last_collected_time.tv_sec; - st->last_updated.tv_usec = st->last_collected_time.tv_usec; - - if(rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST)) - st->last_updated.tv_sec -= st->update_every; - - last_updated_time_align(st); - - usec_t last_updated_ut = st->last_updated.tv_sec * USEC_PER_SEC + st->last_updated.tv_usec; - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "initialized last updated time to %0.3" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE)last_updated_ut / USEC_PER_SEC); - #endif - - return last_updated_ut; -} - -static inline void rrdset_done_push_exclusive(RRDSET *st) { -// usec_t update_every_ut = st->update_every * USEC_PER_SEC; // st->update_every in microseconds -// -// if(unlikely(st->usec_since_last_update > update_every_ut * remote_clock_resync_iterations)) { -// error("Chart '%s' was last collected %llu usec before. Resetting it.", st->id, st->usec_since_last_update); -// rrdset_reset(st); -// st->usec_since_last_update = update_every_ut; -// } - - if(unlikely(!st->last_collected_time.tv_sec)) { - // it is the first entry - // set the last_collected_time to now - rrdset_init_last_collected_time(st); - } - else { - // it is not the first entry - // calculate the proper last_collected_time, using usec_since_last_update - rrdset_update_last_collected_time(st); - } - - st->counter_done++; - - rrdset_rdlock(st); - rrdset_done_push(st); - rrdset_unlock(st); -} - - -static inline size_t rrdset_done_interpolate( - RRDSET *st - , usec_t update_every_ut - , usec_t last_stored_ut - , usec_t next_store_ut - , usec_t last_collect_ut - , usec_t now_collect_ut - , char store_this_entry - , uint32_t storage_flags -) { - RRDDIM *rd; - - size_t stored_entries = 0; // the number of entries we have stored in the db, during this call to rrdset_done() - - usec_t first_ut = last_stored_ut, last_ut = 0; - ssize_t iterations = (ssize_t)((now_collect_ut - last_stored_ut) / (update_every_ut)); - if((now_collect_ut % (update_every_ut)) == 0) iterations++; - - size_t counter = st->counter; - long current_entry = st->current_entry; - - for( ; next_store_ut <= now_collect_ut ; last_collect_ut = next_store_ut, next_store_ut += update_every_ut, iterations-- ) { - - #ifdef NETDATA_INTERNAL_CHECKS - if(iterations < 0) { error("INTERNAL CHECK: %s: iterations calculation wrapped! first_ut = %llu, last_stored_ut = %llu, next_store_ut = %llu, now_collect_ut = %llu", st->name, first_ut, last_stored_ut, next_store_ut, now_collect_ut); } - rrdset_debug(st, "last_stored_ut = %0.3" LONG_DOUBLE_MODIFIER " (last updated time)", (LONG_DOUBLE)last_stored_ut/USEC_PER_SEC); - rrdset_debug(st, "next_store_ut = %0.3" LONG_DOUBLE_MODIFIER " (next interpolation point)", (LONG_DOUBLE)next_store_ut/USEC_PER_SEC); - #endif - - last_ut = next_store_ut; - - rrddim_foreach_read(rd, st) { - calculated_number new_value; - - switch(rd->algorithm) { - case RRD_ALGORITHM_INCREMENTAL: - new_value = (calculated_number) - ( rd->calculated_value - * (calculated_number)(next_store_ut - last_collect_ut) - / (calculated_number)(now_collect_ut - last_collect_ut) - ); - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "%s: CALC2 INC " - CALCULATED_NUMBER_FORMAT " = " - CALCULATED_NUMBER_FORMAT - " * (%llu - %llu)" - " / (%llu - %llu)" - , rd->name - , new_value - , rd->calculated_value - , next_store_ut, last_collect_ut - , now_collect_ut, last_collect_ut - ); - #endif - - rd->calculated_value -= new_value; - new_value += rd->last_calculated_value; - rd->last_calculated_value = 0; - new_value /= (calculated_number)st->update_every; - - if(unlikely(next_store_ut - last_stored_ut < update_every_ut)) { - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "%s: COLLECTION POINT IS SHORT " CALCULATED_NUMBER_FORMAT " - EXTRAPOLATING", - rd->name - , (calculated_number)(next_store_ut - last_stored_ut) - ); - #endif - - new_value = new_value * (calculated_number)(st->update_every * USEC_PER_SEC) / (calculated_number)(next_store_ut - last_stored_ut); - } - break; - - case RRD_ALGORITHM_ABSOLUTE: - case RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL: - case RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL: - default: - if(iterations == 1) { - // this is the last iteration - // do not interpolate - // just show the calculated value - - new_value = rd->calculated_value; - } - else { - // we have missed an update - // interpolate in the middle values - - new_value = (calculated_number) - ( ( (rd->calculated_value - rd->last_calculated_value) - * (calculated_number)(next_store_ut - last_collect_ut) - / (calculated_number)(now_collect_ut - last_collect_ut) - ) - + rd->last_calculated_value - ); - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "%s: CALC2 DEF " - CALCULATED_NUMBER_FORMAT " = (((" - "(" CALCULATED_NUMBER_FORMAT " - " CALCULATED_NUMBER_FORMAT ")" - " * %llu" - " / %llu) + " CALCULATED_NUMBER_FORMAT - , rd->name - , new_value - , rd->calculated_value, rd->last_calculated_value - , (next_store_ut - first_ut) - , (now_collect_ut - first_ut), rd->last_calculated_value - ); - #endif - } - break; - } - - if(unlikely(!store_this_entry)) { - rd->values[current_entry] = SN_EMPTY_SLOT; //pack_storage_number(0, SN_NOT_EXISTS); - continue; - } - - if(likely(rd->updated && rd->collections_counter > 1 && iterations < st->gap_when_lost_iterations_above)) { - rd->values[current_entry] = pack_storage_number(new_value, storage_flags ); - rd->last_stored_value = new_value; - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "%s: STORE[%ld] " - CALCULATED_NUMBER_FORMAT " = " CALCULATED_NUMBER_FORMAT - , rd->name - , current_entry - , unpack_storage_number(rd->values[current_entry]), new_value - ); - #endif - - } - else { - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "%s: STORE[%ld] = NON EXISTING " - , rd->name - , current_entry - ); - #endif - - rd->values[current_entry] = SN_EMPTY_SLOT; // pack_storage_number(0, SN_NOT_EXISTS); - rd->last_stored_value = NAN; - } - - stored_entries++; - - #ifdef NETDATA_INTERNAL_CHECKS - if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) { - calculated_number t1 = new_value * (calculated_number)rd->multiplier / (calculated_number)rd->divisor; - calculated_number t2 = unpack_storage_number(rd->values[current_entry]); - - calculated_number accuracy = accuracy_loss(t1, t2); - debug(D_RRD_STATS, "%s/%s: UNPACK[%ld] = " CALCULATED_NUMBER_FORMAT " FLAGS=0x%08x (original = " CALCULATED_NUMBER_FORMAT ", accuracy loss = " CALCULATED_NUMBER_FORMAT "%%%s)" - , st->id, rd->name - , current_entry - , t2 - , get_storage_number_flags(rd->values[current_entry]) - , t1 - , accuracy - , (accuracy > ACCURACY_LOSS) ? " **TOO BIG** " : "" - ); - - rd->collected_volume += t1; - rd->stored_volume += t2; - - accuracy = accuracy_loss(rd->collected_volume, rd->stored_volume); - debug(D_RRD_STATS, "%s/%s: VOLUME[%ld] = " CALCULATED_NUMBER_FORMAT ", calculated = " CALCULATED_NUMBER_FORMAT ", accuracy loss = " CALCULATED_NUMBER_FORMAT "%%%s" - , st->id, rd->name - , current_entry - , rd->stored_volume - , rd->collected_volume - , accuracy - , (accuracy > ACCURACY_LOSS) ? " **TOO BIG** " : "" - ); - } - #endif - } - // reset the storage flags for the next point, if any; - storage_flags = SN_EXISTS; - - counter++; - current_entry = ((current_entry + 1) >= st->entries) ? 0 : current_entry + 1; - last_stored_ut = next_store_ut; - } - - st->counter = counter; - st->current_entry = current_entry; - - if(likely(last_ut)) { - st->last_updated.tv_sec = (time_t) (last_ut / USEC_PER_SEC); - st->last_updated.tv_usec = 0; - } - - return stored_entries; -} - -static inline void rrdset_done_fill_the_gap(RRDSET *st) { - usec_t update_every_ut = st->update_every * USEC_PER_SEC; - usec_t now_collect_ut = st->last_collected_time.tv_sec * USEC_PER_SEC + st->last_collected_time.tv_usec; - - long c = 0, entries = st->entries; - RRDDIM *rd; - rrddim_foreach_read(rd, st) { - usec_t next_store_ut = (st->last_updated.tv_sec + st->update_every) * USEC_PER_SEC; - long current_entry = st->current_entry; - - for(c = 0; c < entries && next_store_ut <= now_collect_ut ; next_store_ut += update_every_ut, c++) { - rd->values[current_entry] = SN_EMPTY_SLOT; - current_entry = ((current_entry + 1) >= entries) ? 0 : current_entry + 1; - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "%s: STORE[%ld] = NON EXISTING (FILLED THE GAP)", rd->name, current_entry); - #endif - } - } - - if(c > 0) { - c--; - st->last_updated.tv_sec += c * st->update_every; - - st->current_entry += c; - if(st->current_entry >= st->entries) - st->current_entry -= st->entries; - } -} - -void rrdset_done(RRDSET *st) { - if(unlikely(netdata_exit)) return; - - if(unlikely(st->rrd_memory_mode == RRD_MEMORY_MODE_NONE)) { - if(unlikely(st->rrdhost->rrdpush_send_enabled)) - rrdset_done_push_exclusive(st); - - return; - } - - debug(D_RRD_CALLS, "rrdset_done() for chart %s", st->name); - - RRDDIM *rd; - - char - store_this_entry = 1, // boolean: 1 = store this entry, 0 = don't store this entry - first_entry = 0; // boolean: 1 = this is the first entry seen for this chart, 0 = all other entries - - usec_t - last_collect_ut, // the timestamp in microseconds, of the last collected value - now_collect_ut, // the timestamp in microseconds, of this collected value (this is NOW) - last_stored_ut, // the timestamp in microseconds, of the last stored entry in the db - next_store_ut, // the timestamp in microseconds, of the next entry to store in the db - update_every_ut = st->update_every * USEC_PER_SEC; // st->update_every in microseconds - - netdata_thread_disable_cancelability(); - - // a read lock is OK here - rrdset_rdlock(st); - - if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE))) { - error("Chart '%s' has the OBSOLETE flag set, but it is collected.", st->id); - rrdset_isnot_obsolete(st); - } - - // check if the chart has a long time to be updated - if(unlikely(st->usec_since_last_update > st->entries * update_every_ut)) { - info("host '%s', chart %s: took too long to be updated (counter #%zu, update #%zu, %0.3" LONG_DOUBLE_MODIFIER " secs). Resetting it.", st->rrdhost->hostname, st->name, st->counter, st->counter_done, (LONG_DOUBLE)st->usec_since_last_update / USEC_PER_SEC); - rrdset_reset(st); - st->usec_since_last_update = update_every_ut; - store_this_entry = 0; - first_entry = 1; - } - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "microseconds since last update: %llu", st->usec_since_last_update); - #endif - - // set last_collected_time - if(unlikely(!st->last_collected_time.tv_sec)) { - // it is the first entry - // set the last_collected_time to now - last_collect_ut = rrdset_init_last_collected_time(st) - update_every_ut; - - // the first entry should not be stored - store_this_entry = 0; - first_entry = 1; - } - else { - // it is not the first entry - // calculate the proper last_collected_time, using usec_since_last_update - last_collect_ut = rrdset_update_last_collected_time(st); - } - - // if this set has not been updated in the past - // we fake the last_update time to be = now - usec_since_last_update - if(unlikely(!st->last_updated.tv_sec)) { - // it has never been updated before - // set a fake last_updated, in the past using usec_since_last_update - rrdset_init_last_updated_time(st); - - // the first entry should not be stored - store_this_entry = 0; - first_entry = 1; - } - - // check if we will re-write the entire data set - if(unlikely(dt_usec(&st->last_collected_time, &st->last_updated) > st->entries * update_every_ut)) { - info("%s: too old data (last updated at %ld.%ld, last collected at %ld.%ld). Resetting it. Will not store the next entry.", st->name, st->last_updated.tv_sec, st->last_updated.tv_usec, st->last_collected_time.tv_sec, st->last_collected_time.tv_usec); - rrdset_reset(st); - rrdset_init_last_updated_time(st); - - st->usec_since_last_update = update_every_ut; - - // the first entry should not be stored - store_this_entry = 0; - first_entry = 1; - } - - // these are the 3 variables that will help us in interpolation - // last_stored_ut = the last time we added a value to the storage - // now_collect_ut = the time the current value has been collected - // next_store_ut = the time of the next interpolation point - now_collect_ut = st->last_collected_time.tv_sec * USEC_PER_SEC + st->last_collected_time.tv_usec; - last_stored_ut = st->last_updated.tv_sec * USEC_PER_SEC + st->last_updated.tv_usec; - next_store_ut = (st->last_updated.tv_sec + st->update_every) * USEC_PER_SEC; - - if(unlikely(!st->counter_done)) { - // if we have not collected metrics this session (st->counter_done == 0) - // and we have collected metrics for this chart in the past (st->counter != 0) - // fill the gap (the chart has been just loaded from disk) - if(unlikely(st->counter)) { - rrdset_done_fill_the_gap(st); - last_stored_ut = st->last_updated.tv_sec * USEC_PER_SEC + st->last_updated.tv_usec; - next_store_ut = (st->last_updated.tv_sec + st->update_every) * USEC_PER_SEC; - } - - if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST))) { - store_this_entry = 1; - last_collect_ut = next_store_ut - update_every_ut; - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "Fixed first entry."); - #endif - } - else { - store_this_entry = 0; - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "Will not store the next entry."); - #endif - } - } - st->counter_done++; - - if(unlikely(st->rrdhost->rrdpush_send_enabled)) - rrdset_done_push(st); - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "last_collect_ut = %0.3" LONG_DOUBLE_MODIFIER " (last collection time)", (LONG_DOUBLE)last_collect_ut/USEC_PER_SEC); - rrdset_debug(st, "now_collect_ut = %0.3" LONG_DOUBLE_MODIFIER " (current collection time)", (LONG_DOUBLE)now_collect_ut/USEC_PER_SEC); - rrdset_debug(st, "last_stored_ut = %0.3" LONG_DOUBLE_MODIFIER " (last updated time)", (LONG_DOUBLE)last_stored_ut/USEC_PER_SEC); - rrdset_debug(st, "next_store_ut = %0.3" LONG_DOUBLE_MODIFIER " (next interpolation point)", (LONG_DOUBLE)next_store_ut/USEC_PER_SEC); - #endif - - // calculate totals and count the dimensions - int dimensions = 0; - st->collected_total = 0; - rrddim_foreach_read(rd, st) { - dimensions++; - if(likely(rd->updated)) - st->collected_total += rd->collected_value; - } - - uint32_t storage_flags = SN_EXISTS; - - // process all dimensions to calculate their values - // based on the collected figures only - // at this stage we do not interpolate anything - rrddim_foreach_read(rd, st) { - - if(unlikely(!rd->updated)) { - rd->calculated_value = 0; - continue; - } - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "%s: START " - " last_collected_value = " COLLECTED_NUMBER_FORMAT - " collected_value = " COLLECTED_NUMBER_FORMAT - " last_calculated_value = " CALCULATED_NUMBER_FORMAT - " calculated_value = " CALCULATED_NUMBER_FORMAT - , rd->name - , rd->last_collected_value - , rd->collected_value - , rd->last_calculated_value - , rd->calculated_value - ); - #endif - - switch(rd->algorithm) { - case RRD_ALGORITHM_ABSOLUTE: - rd->calculated_value = (calculated_number)rd->collected_value - * (calculated_number)rd->multiplier - / (calculated_number)rd->divisor; - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "%s: CALC ABS/ABS-NO-IN " - CALCULATED_NUMBER_FORMAT " = " - COLLECTED_NUMBER_FORMAT - " * " CALCULATED_NUMBER_FORMAT - " / " CALCULATED_NUMBER_FORMAT - , rd->name - , rd->calculated_value - , rd->collected_value - , (calculated_number)rd->multiplier - , (calculated_number)rd->divisor - ); - #endif - - break; - - case RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL: - if(unlikely(!st->collected_total)) - rd->calculated_value = 0; - else - // the percentage of the current value - // over the total of all dimensions - rd->calculated_value = - (calculated_number)100 - * (calculated_number)rd->collected_value - / (calculated_number)st->collected_total; - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "%s: CALC PCENT-ROW " - CALCULATED_NUMBER_FORMAT " = 100" - " * " COLLECTED_NUMBER_FORMAT - " / " COLLECTED_NUMBER_FORMAT - , rd->name - , rd->calculated_value - , rd->collected_value - , st->collected_total - ); - #endif - - break; - - case RRD_ALGORITHM_INCREMENTAL: - if(unlikely(rd->collections_counter <= 1)) { - rd->calculated_value = 0; - continue; - } - - // if the new is smaller than the old (an overflow, or reset), set the old equal to the new - // to reset the calculation (it will give zero as the calculation for this second) - if(unlikely(rd->last_collected_value > rd->collected_value)) { - debug(D_RRD_STATS, "%s.%s: RESET or OVERFLOW. Last collected value = " COLLECTED_NUMBER_FORMAT ", current = " COLLECTED_NUMBER_FORMAT - , st->name, rd->name - , rd->last_collected_value - , rd->collected_value); - - if(!(rrddim_flag_check(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS))) - storage_flags = SN_EXISTS_RESET; - - rd->last_collected_value = rd->collected_value; - } - - rd->calculated_value += - (calculated_number)(rd->collected_value - rd->last_collected_value) - * (calculated_number)rd->multiplier - / (calculated_number)rd->divisor; - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "%s: CALC INC PRE " - CALCULATED_NUMBER_FORMAT " = (" - COLLECTED_NUMBER_FORMAT " - " COLLECTED_NUMBER_FORMAT - ")" - " * " CALCULATED_NUMBER_FORMAT - " / " CALCULATED_NUMBER_FORMAT - , rd->name - , rd->calculated_value - , rd->collected_value, rd->last_collected_value - , (calculated_number)rd->multiplier - , (calculated_number)rd->divisor - ); - #endif - - break; - - case RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL: - if(unlikely(rd->collections_counter <= 1)) { - rd->calculated_value = 0; - continue; - } - - // if the new is smaller than the old (an overflow, or reset), set the old equal to the new - // to reset the calculation (it will give zero as the calculation for this second) - if(unlikely(rd->last_collected_value > rd->collected_value)) { - debug(D_RRD_STATS, "%s.%s: RESET or OVERFLOW. Last collected value = " COLLECTED_NUMBER_FORMAT ", current = " COLLECTED_NUMBER_FORMAT - , st->name, rd->name - , rd->last_collected_value - , rd->collected_value - ); - - if(!(rrddim_flag_check(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS))) - storage_flags = SN_EXISTS_RESET; - - rd->last_collected_value = rd->collected_value; - } - - // the percentage of the current increment - // over the increment of all dimensions together - if(unlikely(st->collected_total == st->last_collected_total)) - rd->calculated_value = 0; - else - rd->calculated_value = - (calculated_number)100 - * (calculated_number)(rd->collected_value - rd->last_collected_value) - / (calculated_number)(st->collected_total - st->last_collected_total); - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "%s: CALC PCENT-DIFF " - CALCULATED_NUMBER_FORMAT " = 100" - " * (" COLLECTED_NUMBER_FORMAT " - " COLLECTED_NUMBER_FORMAT ")" - " / (" COLLECTED_NUMBER_FORMAT " - " COLLECTED_NUMBER_FORMAT ")" - , rd->name - , rd->calculated_value - , rd->collected_value, rd->last_collected_value - , st->collected_total, st->last_collected_total - ); - #endif - - break; - - default: - // make the default zero, to make sure - // it gets noticed when we add new types - rd->calculated_value = 0; - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "%s: CALC " - CALCULATED_NUMBER_FORMAT " = 0" - , rd->name - , rd->calculated_value - ); - #endif - - break; - } - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "%s: PHASE2 " - " last_collected_value = " COLLECTED_NUMBER_FORMAT - " collected_value = " COLLECTED_NUMBER_FORMAT - " last_calculated_value = " CALCULATED_NUMBER_FORMAT - " calculated_value = " CALCULATED_NUMBER_FORMAT - , rd->name - , rd->last_collected_value - , rd->collected_value - , rd->last_calculated_value - , rd->calculated_value - ); - #endif - - } - - // at this point we have all the calculated values ready - // it is now time to interpolate values on a second boundary - -#ifdef NETDATA_INTERNAL_CHECKS - if(unlikely(now_collect_ut < next_store_ut)) { - // this is collected in the same interpolation point - rrdset_debug(st, "THIS IS IN THE SAME INTERPOLATION POINT"); - info("INTERNAL CHECK: host '%s', chart '%s' is collected in the same interpolation point: short by %llu microseconds", st->rrdhost->hostname, st->name, next_store_ut - now_collect_ut); - } -#endif - - rrdset_done_interpolate(st - , update_every_ut - , last_stored_ut - , next_store_ut - , last_collect_ut - , now_collect_ut - , store_this_entry - , storage_flags - ); - - st->last_collected_total = st->collected_total; - - rrddim_foreach_read(rd, st) { - if(unlikely(!rd->updated)) - continue; - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "%s: setting last_collected_value (old: " COLLECTED_NUMBER_FORMAT ") to last_collected_value (new: " COLLECTED_NUMBER_FORMAT ")", rd->name, rd->last_collected_value, rd->collected_value); - #endif - - rd->last_collected_value = rd->collected_value; - - switch(rd->algorithm) { - case RRD_ALGORITHM_INCREMENTAL: - if(unlikely(!first_entry)) { - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "%s: setting last_calculated_value (old: " CALCULATED_NUMBER_FORMAT ") to last_calculated_value (new: " CALCULATED_NUMBER_FORMAT ")", rd->name, rd->last_calculated_value + rd->calculated_value, rd->calculated_value); - #endif - - rd->last_calculated_value += rd->calculated_value; - } - else { - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "THIS IS THE FIRST POINT"); - #endif - } - break; - - case RRD_ALGORITHM_ABSOLUTE: - case RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL: - case RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL: - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "%s: setting last_calculated_value (old: " CALCULATED_NUMBER_FORMAT ") to last_calculated_value (new: " CALCULATED_NUMBER_FORMAT ")", rd->name, rd->last_calculated_value, rd->calculated_value); - #endif - - rd->last_calculated_value = rd->calculated_value; - break; - } - - rd->calculated_value = 0; - rd->collected_value = 0; - rd->updated = 0; - - #ifdef NETDATA_INTERNAL_CHECKS - rrdset_debug(st, "%s: END " - " last_collected_value = " COLLECTED_NUMBER_FORMAT - " collected_value = " COLLECTED_NUMBER_FORMAT - " last_calculated_value = " CALCULATED_NUMBER_FORMAT - " calculated_value = " CALCULATED_NUMBER_FORMAT - , rd->name - , rd->last_collected_value - , rd->collected_value - , rd->last_calculated_value - , rd->calculated_value - ); - #endif - - } - - // ALL DONE ABOUT THE DATA UPDATE - // -------------------------------------------------------------------- - -/* - // find if there are any obsolete dimensions (not updated recently) - if(unlikely(rrd_delete_unupdated_dimensions)) { - - for( rd = st->dimensions; likely(rd) ; rd = rd->next ) - if((rd->last_collected_time.tv_sec + (rrd_delete_unupdated_dimensions * st->update_every)) < st->last_collected_time.tv_sec) - break; - - if(unlikely(rd)) { - RRDDIM *last; - // there is dimension to free - // upgrade our read lock to a write lock - rrdset_unlock(st); - rrdset_wrlock(st); - - for( rd = st->dimensions, last = NULL ; likely(rd) ; ) { - // remove it only it is not updated in rrd_delete_unupdated_dimensions seconds - - if(unlikely((rd->last_collected_time.tv_sec + (rrd_delete_unupdated_dimensions * st->update_every)) < st->last_collected_time.tv_sec)) { - info("Removing obsolete dimension '%s' (%s) of '%s' (%s).", rd->name, rd->id, st->name, st->id); - - if(unlikely(!last)) { - st->dimensions = rd->next; - rd->next = NULL; - rrddim_free(st, rd); - rd = st->dimensions; - continue; - } - else { - last->next = rd->next; - rd->next = NULL; - rrddim_free(st, rd); - rd = last->next; - continue; - } - } - - last = rd; - rd = rd->next; - } - - if(unlikely(!st->dimensions)) { - info("Disabling chart %s (%s) since it does not have any dimensions", st->name, st->id); - st->enabled = 0; - } - } - } -*/ - - rrdset_unlock(st); - - netdata_thread_enable_cancelability(); -} diff --git a/src/rrdsetvar.c b/src/rrdsetvar.c deleted file mode 100644 index aec57efa9..000000000 --- a/src/rrdsetvar.c +++ /dev/null @@ -1,183 +0,0 @@ -#define NETDATA_HEALTH_INTERNALS -#include "common.h" - -// ---------------------------------------------------------------------------- -// RRDSETVAR management -// CHART VARIABLES - -static inline void rrdsetvar_free_variables(RRDSETVAR *rs) { - RRDSET *st = rs->rrdset; - RRDHOST *host = st->rrdhost; - - // ------------------------------------------------------------------------ - // CHART - rrdvar_free(host, &st->rrdvar_root_index, rs->var_local); - rs->var_local = NULL; - - // ------------------------------------------------------------------------ - // FAMILY - rrdvar_free(host, &st->rrdfamily->rrdvar_root_index, rs->var_family); - rs->var_family = NULL; - - rrdvar_free(host, &st->rrdfamily->rrdvar_root_index, rs->var_family_name); - rs->var_family_name = NULL; - - // ------------------------------------------------------------------------ - // HOST - rrdvar_free(host, &host->rrdvar_root_index, rs->var_host); - rs->var_host = NULL; - - rrdvar_free(host, &host->rrdvar_root_index, rs->var_host_name); - rs->var_host_name = NULL; - - // ------------------------------------------------------------------------ - // KEYS - freez(rs->key_fullid); - rs->key_fullid = NULL; - - freez(rs->key_fullname); - rs->key_fullname = NULL; -} - -static inline void rrdsetvar_create_variables(RRDSETVAR *rs) { - RRDSET *st = rs->rrdset; - RRDHOST *host = st->rrdhost; - - // ------------------------------------------------------------------------ - // free the old ones (if any) - - rrdsetvar_free_variables(rs); - - // ------------------------------------------------------------------------ - // KEYS - - char buffer[RRDVAR_MAX_LENGTH + 1]; - snprintfz(buffer, RRDVAR_MAX_LENGTH, "%s.%s", st->id, rs->variable); - rs->key_fullid = strdupz(buffer); - - snprintfz(buffer, RRDVAR_MAX_LENGTH, "%s.%s", st->name, rs->variable); - rs->key_fullname = strdupz(buffer); - - // ------------------------------------------------------------------------ - // CHART - rs->var_local = rrdvar_create_and_index("local", &st->rrdvar_root_index, rs->variable, rs->type, rs->value); - - // ------------------------------------------------------------------------ - // FAMILY - rs->var_family = rrdvar_create_and_index("family", &st->rrdfamily->rrdvar_root_index, rs->key_fullid, rs->type, rs->value); - rs->var_family_name = rrdvar_create_and_index("family", &st->rrdfamily->rrdvar_root_index, rs->key_fullname, rs->type, rs->value); - - // ------------------------------------------------------------------------ - // HOST - rs->var_host = rrdvar_create_and_index("host", &host->rrdvar_root_index, rs->key_fullid, rs->type, rs->value); - rs->var_host_name = rrdvar_create_and_index("host", &host->rrdvar_root_index, rs->key_fullname, rs->type, rs->value); -} - -RRDSETVAR *rrdsetvar_create(RRDSET *st, const char *variable, RRDVAR_TYPE type, void *value, RRDVAR_OPTIONS options) { - debug(D_VARIABLES, "RRDVARSET create for chart id '%s' name '%s' with variable name '%s'", st->id, st->name, variable); - RRDSETVAR *rs = (RRDSETVAR *)callocz(1, sizeof(RRDSETVAR)); - - rs->variable = strdupz(variable); - rs->hash = simple_hash(rs->variable); - rs->type = type; - rs->value = value; - rs->options = options; - rs->rrdset = st; - - rs->next = st->variables; - st->variables = rs; - - rrdsetvar_create_variables(rs); - - return rs; -} - -void rrdsetvar_rename_all(RRDSET *st) { - debug(D_VARIABLES, "RRDSETVAR rename for chart id '%s' name '%s'", st->id, st->name); - - RRDSETVAR *rs; - for(rs = st->variables; rs ; rs = rs->next) - rrdsetvar_create_variables(rs); - - rrdsetcalc_link_matching(st); -} - -void rrdsetvar_free(RRDSETVAR *rs) { - RRDSET *st = rs->rrdset; - debug(D_VARIABLES, "RRDSETVAR free for chart id '%s' name '%s', variable '%s'", st->id, st->name, rs->variable); - - if(st->variables == rs) { - st->variables = rs->next; - } - else { - RRDSETVAR *t; - for (t = st->variables; t && t->next != rs; t = t->next); - if(!t) error("RRDSETVAR '%s' not found in chart '%s' variables linked list", rs->key_fullname, st->id); - else t->next = rs->next; - } - - rrdsetvar_free_variables(rs); - - freez(rs->variable); - - if(rs->options & RRDVAR_OPTION_ALLOCATED) - freez(rs->value); - - freez(rs); -} - -// -------------------------------------------------------------------------------------------------------------------- -// custom chart variables - -RRDSETVAR *rrdsetvar_custom_chart_variable_create(RRDSET *st, const char *name) { - RRDHOST *host = st->rrdhost; - - char *n = strdupz(name); - rrdvar_fix_name(n); - uint32_t hash = simple_hash(n); - - rrdset_wrlock(st); - - // find it - RRDSETVAR *rs; - for(rs = st->variables; rs ; rs = rs->next) { - if(hash == rs->hash && strcmp(n, rs->variable) == 0) { - rrdset_unlock(st); - if(rs->options & RRDVAR_OPTION_ALLOCATED) { - free(n); - return rs; - } - else { - error("RRDSETVAR: custom variable '%s' on chart '%s' of host '%s', conflicts with an internal chart variable", n, st->id, host->hostname); - free(n); - return NULL; - } - } - } - - // not found, allocate one - - calculated_number *v = mallocz(sizeof(calculated_number)); - *v = NAN; - - rs = rrdsetvar_create(st, n, RRDVAR_TYPE_CALCULATED, v, RRDVAR_OPTION_ALLOCATED); - rrdset_unlock(st); - - free(n); - return rs; -} - -void rrdsetvar_custom_chart_variable_set(RRDSETVAR *rs, calculated_number value) { - if(unlikely(!(rs->options & RRDVAR_OPTION_ALLOCATED))) { - error("RRDSETVAR: requested to set variable '%s' of chart '%s' on host '%s' to value " CALCULATED_NUMBER_FORMAT " but the variable is not a custom one.", rs->variable, rs->rrdset->id, rs->rrdset->rrdhost->hostname, value); - } - else { - calculated_number *v = rs->value; - if(*v != value) { - *v = value; - - // mark the chart to be sent upstream - rrdset_flag_clear(rs->rrdset, RRDSET_FLAG_EXPOSED_UPSTREAM); - } - } -} diff --git a/src/rrdvar.c b/src/rrdvar.c deleted file mode 100644 index 6936c36f1..000000000 --- a/src/rrdvar.c +++ /dev/null @@ -1,275 +0,0 @@ -#define NETDATA_HEALTH_INTERNALS -#include "common.h" - -// ---------------------------------------------------------------------------- -// RRDVAR management - -inline int rrdvar_fix_name(char *variable) { - int fixed = 0; - while(*variable) { - if (!isalnum(*variable) && *variable != '.' && *variable != '_') { - *variable++ = '_'; - fixed++; - } - else - variable++; - } - - return fixed; -} - -int rrdvar_compare(void* a, void* b) { - if(((RRDVAR *)a)->hash < ((RRDVAR *)b)->hash) return -1; - else if(((RRDVAR *)a)->hash > ((RRDVAR *)b)->hash) return 1; - else return strcmp(((RRDVAR *)a)->name, ((RRDVAR *)b)->name); -} - -static inline RRDVAR *rrdvar_index_add(avl_tree_lock *tree, RRDVAR *rv) { - RRDVAR *ret = (RRDVAR *)avl_insert_lock(tree, (avl *)(rv)); - if(ret != rv) - debug(D_VARIABLES, "Request to insert RRDVAR '%s' into index failed. Already exists.", rv->name); - - return ret; -} - -static inline RRDVAR *rrdvar_index_del(avl_tree_lock *tree, RRDVAR *rv) { - RRDVAR *ret = (RRDVAR *)avl_remove_lock(tree, (avl *)(rv)); - if(!ret) - error("Request to remove RRDVAR '%s' from index failed. Not Found.", rv->name); - - return ret; -} - -static inline RRDVAR *rrdvar_index_find(avl_tree_lock *tree, const char *name, uint32_t hash) { - RRDVAR tmp; - tmp.name = (char *)name; - tmp.hash = (hash)?hash:simple_hash(tmp.name); - - return (RRDVAR *)avl_search_lock(tree, (avl *)&tmp); -} - -inline void rrdvar_free(RRDHOST *host, avl_tree_lock *tree, RRDVAR *rv) { - (void)host; - - if(!rv) return; - - if(tree) { - debug(D_VARIABLES, "Deleting variable '%s'", rv->name); - if(unlikely(!rrdvar_index_del(tree, rv))) - error("RRDVAR: Attempted to delete variable '%s' from host '%s', but it is not found.", rv->name, host->hostname); - } - - if(rv->type == RRDVAR_TYPE_CALCULATED_ALLOCATED) - freez(rv->value); - - freez(rv->name); - freez(rv); -} - -inline RRDVAR *rrdvar_create_and_index(const char *scope, avl_tree_lock *tree, const char *name, RRDVAR_TYPE type, void *value) { - char *variable = strdupz(name); - rrdvar_fix_name(variable); - uint32_t hash = simple_hash(variable); - - RRDVAR *rv = rrdvar_index_find(tree, variable, hash); - if(unlikely(!rv)) { - debug(D_VARIABLES, "Variable '%s' not found in scope '%s'. Creating a new one.", variable, scope); - - rv = callocz(1, sizeof(RRDVAR)); - rv->name = variable; - rv->hash = hash; - rv->type = type; - rv->value = value; - - RRDVAR *ret = rrdvar_index_add(tree, rv); - if(unlikely(ret != rv)) { - debug(D_VARIABLES, "Variable '%s' in scope '%s' already exists", variable, scope); - freez(rv); - freez(variable); - rv = NULL; - } - else - debug(D_VARIABLES, "Variable '%s' created in scope '%s'", variable, scope); - } - else { - debug(D_VARIABLES, "Variable '%s' is already found in scope '%s'.", variable, scope); - - // already exists - freez(variable); - - // this is important - // it must return NULL - not the existing variable - or double-free will happen - rv = NULL; - } - - return rv; -} - -void rrdvar_free_remaining_variables(RRDHOST *host, avl_tree_lock *tree_lock) { - // FIXME: this is not bullet proof - avl should support some means to destroy it - // with a callback for each item already in the index - - RRDVAR *rv, *last = NULL; - while((rv = (RRDVAR *)tree_lock->avl_tree.root)) { - if(unlikely(rv == last)) { - error("RRDVAR: INTERNAL ERROR: Cannot cleanup tree of RRDVARs"); - break; - } - last = rv; - rrdvar_free(host, tree_lock, rv); - } -} - -// ---------------------------------------------------------------------------- -// CUSTOM HOST VARIABLES - -inline int rrdvar_callback_for_all_host_variables(RRDHOST *host, int (*callback)(void *rrdvar, void *data), void *data) { - return avl_traverse_lock(&host->rrdvar_root_index, callback, data); -} - -static RRDVAR *rrdvar_custom_variable_create(const char *scope, avl_tree_lock *tree_lock, const char *name) { - calculated_number *v = callocz(1, sizeof(calculated_number)); - *v = NAN; - - RRDVAR *rv = rrdvar_create_and_index(scope, tree_lock, name, RRDVAR_TYPE_CALCULATED_ALLOCATED, v); - if(unlikely(!rv)) { - free(v); - debug(D_VARIABLES, "Requested variable '%s' already exists - possibly 2 plugins are updating it at the same time.", name); - - char *variable = strdupz(name); - rrdvar_fix_name(variable); - uint32_t hash = simple_hash(variable); - - rv = rrdvar_index_find(tree_lock, variable, hash); - - freez(variable); - } - - return rv; -} - -RRDVAR *rrdvar_custom_host_variable_create(RRDHOST *host, const char *name) { - return rrdvar_custom_variable_create("host", &host->rrdvar_root_index, name); -} - -void rrdvar_custom_host_variable_set(RRDHOST *host, RRDVAR *rv, calculated_number value) { - if(rv->type != RRDVAR_TYPE_CALCULATED_ALLOCATED) - error("requested to set variable '%s' to value " CALCULATED_NUMBER_FORMAT " but the variable is not a custom one.", rv->name, value); - else { - calculated_number *v = rv->value; - if(*v != value) { - *v = value; - - // if the host is streaming, send this variable upstream immediately - rrdpush_sender_send_this_host_variable_now(host, rv); - } - } -} - -// ---------------------------------------------------------------------------- -// RRDVAR lookup - -static calculated_number rrdvar2number(RRDVAR *rv) { - switch(rv->type) { - case RRDVAR_TYPE_CALCULATED_ALLOCATED: - case RRDVAR_TYPE_CALCULATED: { - calculated_number *n = (calculated_number *)rv->value; - return *n; - } - - case RRDVAR_TYPE_TIME_T: { - time_t *n = (time_t *)rv->value; - return *n; - } - - case RRDVAR_TYPE_COLLECTED: { - collected_number *n = (collected_number *)rv->value; - return *n; - } - - case RRDVAR_TYPE_TOTAL: { - total_number *n = (total_number *)rv->value; - return *n; - } - - case RRDVAR_TYPE_INT: { - int *n = (int *)rv->value; - return *n; - } - - default: - error("I don't know how to convert RRDVAR type %u to calculated_number", rv->type); - return NAN; - } -} - -int health_variable_lookup(const char *variable, uint32_t hash, RRDCALC *rc, calculated_number *result) { - RRDSET *st = rc->rrdset; - if(!st) return 0; - - RRDHOST *host = st->rrdhost; - RRDVAR *rv; - - rv = rrdvar_index_find(&st->rrdvar_root_index, variable, hash); - if(rv) { - *result = rrdvar2number(rv); - return 1; - } - - rv = rrdvar_index_find(&st->rrdfamily->rrdvar_root_index, variable, hash); - if(rv) { - *result = rrdvar2number(rv); - return 1; - } - - rv = rrdvar_index_find(&host->rrdvar_root_index, variable, hash); - if(rv) { - *result = rrdvar2number(rv); - return 1; - } - - return 0; -} - -// ---------------------------------------------------------------------------- -// RRDVAR to JSON - -struct variable2json_helper { - BUFFER *buf; - size_t counter; -}; - -static int single_variable2json(void *entry, void *data) { - struct variable2json_helper *helper = (struct variable2json_helper *)data; - RRDVAR *rv = (RRDVAR *)entry; - calculated_number value = rrdvar2number(rv); - - if(unlikely(isnan(value) || isinf(value))) - buffer_sprintf(helper->buf, "%s\n\t\t\"%s\": null", helper->counter?",":"", rv->name); - else - buffer_sprintf(helper->buf, "%s\n\t\t\"%s\": %0.5" LONG_DOUBLE_MODIFIER, helper->counter?",":"", rv->name, (LONG_DOUBLE)value); - - helper->counter++; - - return 0; -} - -void health_api_v1_chart_variables2json(RRDSET *st, BUFFER *buf) { - RRDHOST *host = st->rrdhost; - - struct variable2json_helper helper = { - .buf = buf, - .counter = 0 - }; - - buffer_sprintf(buf, "{\n\t\"chart\": \"%s\",\n\t\"chart_name\": \"%s\",\n\t\"chart_context\": \"%s\",\n\t\"chart_variables\": {", st->id, st->name, st->context); - avl_traverse_lock(&st->rrdvar_root_index, single_variable2json, (void *)&helper); - buffer_sprintf(buf, "\n\t},\n\t\"family\": \"%s\",\n\t\"family_variables\": {", st->family); - helper.counter = 0; - avl_traverse_lock(&st->rrdfamily->rrdvar_root_index, single_variable2json, (void *)&helper); - buffer_sprintf(buf, "\n\t},\n\t\"host\": \"%s\",\n\t\"host_variables\": {", host->hostname); - helper.counter = 0; - avl_traverse_lock(&host->rrdvar_root_index, single_variable2json, (void *)&helper); - buffer_strcat(buf, "\n\t}\n}\n"); -} - diff --git a/src/signals.c b/src/signals.c deleted file mode 100644 index 331e80358..000000000 --- a/src/signals.c +++ /dev/null @@ -1,168 +0,0 @@ -#include "common.h" - -typedef enum signal_action { - NETDATA_SIGNAL_END_OF_LIST, - NETDATA_SIGNAL_IGNORE, - NETDATA_SIGNAL_EXIT_CLEANLY, - NETDATA_SIGNAL_SAVE_DATABASE, - NETDATA_SIGNAL_LOG_ROTATE, - NETDATA_SIGNAL_RELOAD_HEALTH, - NETDATA_SIGNAL_FATAL, -} SIGNAL_ACTION; - -static struct { - int signo; // the signal - const char *name; // the name of the signal - size_t count; // the number of signals received - SIGNAL_ACTION action; // the action to take -} signals_waiting[] = { - { SIGPIPE, "SIGPIPE", 0, NETDATA_SIGNAL_IGNORE }, - { SIGINT , "SIGINT", 0, NETDATA_SIGNAL_EXIT_CLEANLY }, - { SIGQUIT, "SIGQUIT", 0, NETDATA_SIGNAL_EXIT_CLEANLY }, - { SIGTERM, "SIGTERM", 0, NETDATA_SIGNAL_EXIT_CLEANLY }, - { SIGHUP, "SIGHUP", 0, NETDATA_SIGNAL_LOG_ROTATE }, - { SIGUSR1, "SIGUSR1", 0, NETDATA_SIGNAL_SAVE_DATABASE }, - { SIGUSR2, "SIGUSR2", 0, NETDATA_SIGNAL_RELOAD_HEALTH }, - { SIGBUS, "SIGBUS", 0, NETDATA_SIGNAL_FATAL }, - - // terminator - { 0, "NONE", 0, NETDATA_SIGNAL_END_OF_LIST } -}; - -static void signal_handler(int signo) { - // find the entry in the list - int i; - for(i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST ; i++) { - if(unlikely(signals_waiting[i].signo == signo)) { - signals_waiting[i].count++; - - if(signals_waiting[i].action == NETDATA_SIGNAL_FATAL) { - char buffer[200 + 1]; - snprintfz(buffer, 200, "\nSIGNAL HANLDER: received: %s. Oops! This is bad!\n", signals_waiting[i].name); - if(write(STDERR_FILENO, buffer, strlen(buffer)) == -1) { - // nothing to do - we cannot write but there is no way to complaint about it - ; - } - } - - return; - } - } -} - -void signals_block(void) { - sigset_t sigset; - sigfillset(&sigset); - - if(pthread_sigmask(SIG_BLOCK, &sigset, NULL) == -1) - error("SIGNAL: Could not block signals for threads"); -} - -void signals_unblock(void) { - sigset_t sigset; - sigfillset(&sigset); - - if(pthread_sigmask(SIG_UNBLOCK, &sigset, NULL) == -1) { - error("SIGNAL: Could not unblock signals for threads"); - } -} - -void signals_init(void) { - // Catch signals which we want to use - struct sigaction sa; - sa.sa_flags = 0; - - // ignore all signals while we run in a signal handler - sigfillset(&sa.sa_mask); - - int i; - for (i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST; i++) { - if(signals_waiting[i].action == NETDATA_SIGNAL_IGNORE) - sa.sa_handler = SIG_IGN; - else - sa.sa_handler = signal_handler; - - if(sigaction(signals_waiting[i].signo, &sa, NULL) == -1) - error("SIGNAL: Failed to change signal handler for: %s", signals_waiting[i].name); - } -} - -void signals_reset(void) { - struct sigaction sa; - sigemptyset(&sa.sa_mask); - sa.sa_handler = SIG_DFL; - sa.sa_flags = 0; - - int i; - for (i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST; i++) { - if(sigaction(signals_waiting[i].signo, &sa, NULL) == -1) - error("SIGNAL: Failed to reset signal handler for: %s", signals_waiting[i].name); - } -} - -void signals_handle(void) { - while(1) { - - // pause() causes the calling process (or thread) to sleep until a signal - // is delivered that either terminates the process or causes the invocation - // of a signal-catching function. - if(pause() == -1 && errno == EINTR) { - - // loop once, but keep looping while signals are coming in - // this is needed because a few operations may take some time - // so we need to check for new signals before pausing again - int found = 1; - while(found) { - found = 0; - - // execute the actions of the signals - int i; - for (i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST; i++) { - if (signals_waiting[i].count) { - found = 1; - signals_waiting[i].count = 0; - const char *name = signals_waiting[i].name; - - switch (signals_waiting[i].action) { - case NETDATA_SIGNAL_RELOAD_HEALTH: - error_log_limit_unlimited(); - info("SIGNAL: Received %s. Reloading HEALTH configuration...", name); - health_reload(); - error_log_limit_reset(); - break; - - case NETDATA_SIGNAL_SAVE_DATABASE: - error_log_limit_unlimited(); - info("SIGNAL: Received %s. Saving databases...", name); - rrdhost_save_all(); - info("Databases saved."); - error_log_limit_reset(); - break; - - case NETDATA_SIGNAL_LOG_ROTATE: - error_log_limit_unlimited(); - info("SIGNAL: Received %s. Reopening all log files...", name); - reopen_all_log_files(); - error_log_limit_reset(); - break; - - case NETDATA_SIGNAL_EXIT_CLEANLY: - info("SIGNAL: Received %s. Cleaning up to exit...", name); - netdata_cleanup_and_exit(0); - exit(0); - - case NETDATA_SIGNAL_FATAL: - fatal("SIGNAL: Received %s. netdata now exits.", name); - - default: - info("SIGNAL: Received %s. No signal handler configured. Ignoring it.", name); - break; - } - } - } - } - } - else - error("SIGNAL: pause() returned but it was not interrupted by a signal."); - } -} diff --git a/src/signals.h b/src/signals.h deleted file mode 100644 index 2fdd36552..000000000 --- a/src/signals.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef NETDATA_SIGNALS_H -#define NETDATA_SIGNALS_H - -extern void signals_init(void); -extern void signals_block(void); -extern void signals_unblock(void); -extern void signals_reset(void); -extern void signals_handle(void) NORETURN; - -#endif //NETDATA_SIGNALS_H diff --git a/src/simple_pattern.c b/src/simple_pattern.c deleted file mode 100644 index 747b5150a..000000000 --- a/src/simple_pattern.c +++ /dev/null @@ -1,260 +0,0 @@ -#include "common.h" - -struct simple_pattern { - const char *match; - size_t len; - - SIMPLE_PREFIX_MODE mode; - char negative; - - struct simple_pattern *child; - - struct simple_pattern *next; -}; - -static inline struct simple_pattern *parse_pattern(char *str, SIMPLE_PREFIX_MODE default_mode) { - // fprintf(stderr, "PARSING PATTERN: '%s'\n", str); - - SIMPLE_PREFIX_MODE mode; - struct simple_pattern *child = NULL; - - char *s = str, *c = str; - - // skip asterisks in front - while(*c == '*') c++; - - // find the next asterisk - while(*c && *c != '*') c++; - - // do we have an asterisk in the middle? - if(*c == '*' && c[1] != '\0') { - // yes, we have - child = parse_pattern(c, default_mode); - c[1] = '\0'; - } - - // check what this one matches - - size_t len = strlen(s); - if(len >= 2 && *s == '*' && s[len - 1] == '*') { - s[len - 1] = '\0'; - s++; - mode = SIMPLE_PATTERN_SUBSTRING; - } - else if(len >= 1 && *s == '*') { - s++; - mode = SIMPLE_PATTERN_SUFFIX; - } - else if(len >= 1 && s[len - 1] == '*') { - s[len - 1] = '\0'; - mode = SIMPLE_PATTERN_PREFIX; - } - else - mode = default_mode; - - // allocate the structure - struct simple_pattern *m = callocz(1, sizeof(struct simple_pattern)); - if(*s) { - m->match = strdupz(s); - m->len = strlen(m->match); - m->mode = mode; - } - else { - m->mode = SIMPLE_PATTERN_SUBSTRING; - } - - m->child = child; - - return m; -} - -SIMPLE_PATTERN *simple_pattern_create(const char *list, const char *separators, SIMPLE_PREFIX_MODE default_mode) { - struct simple_pattern *root = NULL, *last = NULL; - - if(unlikely(!list || !*list)) return root; - - int isseparator[256] = { - [' '] = 1 // space - , ['\t'] = 1 // tab - , ['\r'] = 1 // carriage return - , ['\n'] = 1 // new line - , ['\f'] = 1 // form feed - , ['\v'] = 1 // vertical tab - }; - - if (unlikely(separators && *separators)) { - memset(&isseparator[0], 0, sizeof(isseparator)); - while(*separators) isseparator[(unsigned char)*separators++] = 1; - } - - char *buf = mallocz(strlen(list) + 1); - const char *s = list; - - while(s && *s) { - buf[0] = '\0'; - char *c = buf; - - char negative = 0; - - // skip all spaces - while(isseparator[(unsigned char)*s]) - s++; - - if(*s == '!') { - negative = 1; - s++; - } - - // empty string - if(unlikely(!*s)) - break; - - // find the next space - char escape = 0; - while(*s) { - if(*s == '\\' && !escape) { - escape = 1; - s++; - } - else { - if (isseparator[(unsigned char)*s] && !escape) { - s++; - break; - } - - *c++ = *s++; - escape = 0; - } - } - - // terminate our string - *c = '\0'; - - // if we matched the empty string, skip it - if(unlikely(!*buf)) - continue; - - // fprintf(stderr, "FOUND PATTERN: '%s'\n", buf); - struct simple_pattern *m = parse_pattern(buf, default_mode); - m->negative = negative; - - // link it at the end - if(unlikely(!root)) - root = last = m; - else { - last->next = m; - last = m; - } - } - - freez(buf); - return (SIMPLE_PATTERN *)root; -} - -static inline char *add_wildcarded(const char *matched, size_t matched_size, char *wildcarded, size_t *wildcarded_size) { - //if(matched_size) { - // char buf[matched_size + 1]; - // strncpyz(buf, matched, matched_size); - // fprintf(stderr, "ADD WILDCARDED '%s' of length %zu\n", buf, matched_size); - //} - - if(unlikely(wildcarded && *wildcarded_size && matched && *matched && matched_size)) { - size_t wss = *wildcarded_size - 1; - size_t len = (matched_size < wss)?matched_size:wss; - if(likely(len)) { - strncpyz(wildcarded, matched, len); - - *wildcarded_size -= len; - return &wildcarded[len]; - } - } - - return wildcarded; -} - -static inline int match_pattern(struct simple_pattern *m, const char *str, size_t len, char *wildcarded, size_t *wildcarded_size) { - char *s; - - if(m->len <= len) { - switch(m->mode) { - case SIMPLE_PATTERN_SUBSTRING: - if(!m->len) return 1; - if((s = strstr(str, m->match))) { - wildcarded = add_wildcarded(str, s - str, wildcarded, wildcarded_size); - if(!m->child) { - wildcarded = add_wildcarded(&s[m->len], len - (&s[m->len] - str), wildcarded, wildcarded_size); - return 1; - } - return match_pattern(m->child, &s[m->len], len - (s - str) - m->len, wildcarded, wildcarded_size); - } - break; - - case SIMPLE_PATTERN_PREFIX: - if(unlikely(strncmp(str, m->match, m->len) == 0)) { - if(!m->child) { - wildcarded = add_wildcarded(&str[m->len], len - m->len, wildcarded, wildcarded_size); - return 1; - } - return match_pattern(m->child, &str[m->len], len - m->len, wildcarded, wildcarded_size); - } - break; - - case SIMPLE_PATTERN_SUFFIX: - if(unlikely(strcmp(&str[len - m->len], m->match) == 0)) { - wildcarded = add_wildcarded(str, len - m->len, wildcarded, wildcarded_size); - if(!m->child) return 1; - return 0; - } - break; - - case SIMPLE_PATTERN_EXACT: - default: - if(unlikely(strcmp(str, m->match) == 0)) { - if(!m->child) return 1; - return 0; - } - break; - } - } - - return 0; -} - -int simple_pattern_matches_extract(SIMPLE_PATTERN *list, const char *str, char *wildcarded, size_t wildcarded_size) { - struct simple_pattern *m, *root = (struct simple_pattern *)list; - - if(unlikely(!root || !str || !*str)) return 0; - - size_t len = strlen(str); - for(m = root; m ; m = m->next) { - char *ws = wildcarded; - size_t wss = wildcarded_size; - if(unlikely(ws)) *ws = '\0'; - - if (match_pattern(m, str, len, ws, &wss)) { - - //if(ws && wss) - // fprintf(stderr, "FINAL WILDCARDED '%s' of length %zu\n", ws, strlen(ws)); - - if (m->negative) return 0; - return 1; - } - } - - return 0; -} - -static inline void free_pattern(struct simple_pattern *m) { - if(!m) return; - - free_pattern(m->child); - free_pattern(m->next); - freez((void *)m->match); - freez(m); -} - -void simple_pattern_free(SIMPLE_PATTERN *list) { - if(!list) return; - - free_pattern(((struct simple_pattern *)list)); -} diff --git a/src/simple_pattern.h b/src/simple_pattern.h deleted file mode 100644 index d0b75af7e..000000000 --- a/src/simple_pattern.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef NETDATA_SIMPLE_PATTERN_H -#define NETDATA_SIMPLE_PATTERN_H - -typedef enum { - SIMPLE_PATTERN_EXACT, - SIMPLE_PATTERN_PREFIX, - SIMPLE_PATTERN_SUFFIX, - SIMPLE_PATTERN_SUBSTRING -} SIMPLE_PREFIX_MODE; - -typedef void SIMPLE_PATTERN; - -// create a simple_pattern from the string given -// default_mode is used in cases where EXACT matches, without an asterisk, -// should be considered PREFIX matches. -extern SIMPLE_PATTERN *simple_pattern_create(const char *list, const char *separators, SIMPLE_PREFIX_MODE default_mode); - -// test if string str is matched from the pattern and fill 'wildcarded' with the parts matched by '*' -extern int simple_pattern_matches_extract(SIMPLE_PATTERN *list, const char *str, char *wildcarded, size_t wildcarded_size); - -// test if string str is matched from the pattern -#define simple_pattern_matches(list, str) simple_pattern_matches_extract(list, str, NULL, 0) - -// free a simple_pattern that was created with simple_pattern_create() -// list can be NULL, in which case, this does nothing. -extern void simple_pattern_free(SIMPLE_PATTERN *list); - -#endif //NETDATA_SIMPLE_PATTERN_H diff --git a/src/socket.c b/src/socket.c deleted file mode 100644 index 8bede73fd..000000000 --- a/src/socket.c +++ /dev/null @@ -1,1518 +0,0 @@ -#include "common.h" - -// -------------------------------------------------------------------------------------------------------------------- -// various library calls - -#ifdef __gnu_linux__ -#define LARGE_SOCK_SIZE 33554431 // don't ask why - I found it at brubeck source - I guess it is just a large number -#else -#define LARGE_SOCK_SIZE 4096 -#endif - -int sock_setnonblock(int fd) { - int flags; - - flags = fcntl(fd, F_GETFL); - flags |= O_NONBLOCK; - - int ret = fcntl(fd, F_SETFL, flags); - if(ret < 0) - error("Failed to set O_NONBLOCK on socket %d", fd); - - return ret; -} - -int sock_delnonblock(int fd) { - int flags; - - flags = fcntl(fd, F_GETFL); - flags &= ~O_NONBLOCK; - - int ret = fcntl(fd, F_SETFL, flags); - if(ret < 0) - error("Failed to remove O_NONBLOCK on socket %d", fd); - - return ret; -} - -int sock_setreuse(int fd, int reuse) { - int ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(reuse)); - - if(ret == -1) - error("Failed to set SO_REUSEADDR on socket %d", fd); - - return ret; -} - -int sock_setreuse_port(int fd, int reuse) { - int ret = -1; - -#ifdef SO_REUSEPORT - ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &reuse, sizeof(reuse)); - if(ret == -1 && errno != ENOPROTOOPT) - error("failed to set SO_REUSEPORT on socket %d", fd); -#endif - - return ret; -} - -int sock_enlarge_in(int fd) { - int ret, bs = LARGE_SOCK_SIZE; - - ret = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &bs, sizeof(bs)); - - if(ret == -1) - error("Failed to set SO_RCVBUF on socket %d", fd); - - return ret; -} - -int sock_enlarge_out(int fd) { - int ret, bs = LARGE_SOCK_SIZE; - ret = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &bs, sizeof(bs)); - - if(ret == -1) - error("Failed to set SO_SNDBUF on socket %d", fd); - - return ret; -} - - -// -------------------------------------------------------------------------------------------------------------------- - -char *strdup_client_description(int family, const char *protocol, const char *ip, int port) { - char buffer[100 + 1]; - - switch(family) { - case AF_INET: - snprintfz(buffer, 100, "%s:%s:%d", protocol, ip, port); - break; - - case AF_INET6: - default: - snprintfz(buffer, 100, "%s:[%s]:%d", protocol, ip, port); - break; - - case AF_UNIX: - snprintfz(buffer, 100, "%s:%s", protocol, ip); - break; - } - - return strdupz(buffer); -} - -// -------------------------------------------------------------------------------------------------------------------- -// listening sockets - -int create_listen_socket_unix(const char *path, int listen_backlog) { - int sock; - - debug(D_LISTENER, "LISTENER: UNIX creating new listening socket on path '%s'", path); - - sock = socket(AF_UNIX, SOCK_STREAM, 0); - if(sock < 0) { - error("LISTENER: UNIX socket() on path '%s' failed.", path); - return -1; - } - - sock_setnonblock(sock); - sock_enlarge_in(sock); - - struct sockaddr_un name; - memset(&name, 0, sizeof(struct sockaddr_un)); - name.sun_family = AF_UNIX; - strncpy(name.sun_path, path, sizeof(name.sun_path)-1); - - errno = 0; - if (unlink(path) == -1 && errno != ENOENT) - error("LISTENER: failed to remove existing (probably obsolete or left-over) file on UNIX socket path '%s'.", path); - - if(bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) { - close(sock); - error("LISTENER: UNIX bind() on path '%s' failed.", path); - return -1; - } - - // we have to chmod this to 0777 so that the client will be able - // to read from and write to this socket. - if(chmod(path, 0777) == -1) - error("LISTENER: failed to chmod() socket file '%s'.", path); - - if(listen(sock, listen_backlog) < 0) { - close(sock); - error("LISTENER: UNIX listen() on path '%s' failed.", path); - return -1; - } - - debug(D_LISTENER, "LISTENER: Listening on UNIX path '%s'", path); - return sock; -} - -int create_listen_socket4(int socktype, const char *ip, int port, int listen_backlog) { - int sock; - - debug(D_LISTENER, "LISTENER: IPv4 creating new listening socket on ip '%s' port %d, socktype %d", ip, port, socktype); - - sock = socket(AF_INET, socktype, 0); - if(sock < 0) { - error("LISTENER: IPv4 socket() on ip '%s' port %d, socktype %d failed.", ip, port, socktype); - return -1; - } - - sock_setreuse(sock, 1); - sock_setreuse_port(sock, 1); - sock_setnonblock(sock); - sock_enlarge_in(sock); - - struct sockaddr_in name; - memset(&name, 0, sizeof(struct sockaddr_in)); - name.sin_family = AF_INET; - name.sin_port = htons (port); - - int ret = inet_pton(AF_INET, ip, (void *)&name.sin_addr.s_addr); - if(ret != 1) { - error("LISTENER: Failed to convert IP '%s' to a valid IPv4 address.", ip); - close(sock); - return -1; - } - - if(bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) { - close(sock); - error("LISTENER: IPv4 bind() on ip '%s' port %d, socktype %d failed.", ip, port, socktype); - return -1; - } - - if(socktype == SOCK_STREAM && listen(sock, listen_backlog) < 0) { - close(sock); - error("LISTENER: IPv4 listen() on ip '%s' port %d, socktype %d failed.", ip, port, socktype); - return -1; - } - - debug(D_LISTENER, "LISTENER: Listening on IPv4 ip '%s' port %d, socktype %d", ip, port, socktype); - return sock; -} - -int create_listen_socket6(int socktype, uint32_t scope_id, const char *ip, int port, int listen_backlog) { - int sock; - int ipv6only = 1; - - debug(D_LISTENER, "LISTENER: IPv6 creating new listening socket on ip '%s' port %d, socktype %d", ip, port, socktype); - - sock = socket(AF_INET6, socktype, 0); - if (sock < 0) { - error("LISTENER: IPv6 socket() on ip '%s' port %d, socktype %d, failed.", ip, port, socktype); - return -1; - } - - sock_setreuse(sock, 1); - sock_setreuse_port(sock, 1); - sock_setnonblock(sock); - sock_enlarge_in(sock); - - /* IPv6 only */ - if(setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (void*)&ipv6only, sizeof(ipv6only)) != 0) - error("LISTENER: Cannot set IPV6_V6ONLY on ip '%s' port %d, socktype %d.", ip, port, socktype); - - struct sockaddr_in6 name; - memset(&name, 0, sizeof(struct sockaddr_in6)); - name.sin6_family = AF_INET6; - name.sin6_port = htons ((uint16_t) port); - name.sin6_scope_id = scope_id; - - int ret = inet_pton(AF_INET6, ip, (void *)&name.sin6_addr.s6_addr); - if(ret != 1) { - error("LISTENER: Failed to convert IP '%s' to a valid IPv6 address.", ip); - close(sock); - return -1; - } - - name.sin6_scope_id = scope_id; - - if (bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) { - close(sock); - error("LISTENER: IPv6 bind() on ip '%s' port %d, socktype %d failed.", ip, port, socktype); - return -1; - } - - if (socktype == SOCK_STREAM && listen(sock, listen_backlog) < 0) { - close(sock); - error("LISTENER: IPv6 listen() on ip '%s' port %d, socktype %d failed.", ip, port, socktype); - return -1; - } - - debug(D_LISTENER, "LISTENER: Listening on IPv6 ip '%s' port %d, socktype %d", ip, port, socktype); - return sock; -} - -static inline int listen_sockets_add(LISTEN_SOCKETS *sockets, int fd, int family, int socktype, const char *protocol, const char *ip, int port) { - if(sockets->opened >= MAX_LISTEN_FDS) { - error("LISTENER: Too many listening sockets. Failed to add listening %s socket at ip '%s' port %d, protocol %s, socktype %d", protocol, ip, port, protocol, socktype); - close(fd); - return -1; - } - - sockets->fds[sockets->opened] = fd; - sockets->fds_types[sockets->opened] = socktype; - sockets->fds_families[sockets->opened] = family; - sockets->fds_names[sockets->opened] = strdup_client_description(family, protocol, ip, port); - - sockets->opened++; - return 0; -} - -int listen_sockets_check_is_member(LISTEN_SOCKETS *sockets, int fd) { - size_t i; - for(i = 0; i < sockets->opened ;i++) - if(sockets->fds[i] == fd) return 1; - - return 0; -} - -static inline void listen_sockets_init(LISTEN_SOCKETS *sockets) { - size_t i; - for(i = 0; i < MAX_LISTEN_FDS ;i++) { - sockets->fds[i] = -1; - sockets->fds_names[i] = NULL; - sockets->fds_types[i] = -1; - } - - sockets->opened = 0; - sockets->failed = 0; -} - -void listen_sockets_close(LISTEN_SOCKETS *sockets) { - size_t i; - for(i = 0; i < sockets->opened ;i++) { - close(sockets->fds[i]); - sockets->fds[i] = -1; - - freez(sockets->fds_names[i]); - sockets->fds_names[i] = NULL; - - sockets->fds_types[i] = -1; - } - - sockets->opened = 0; - sockets->failed = 0; -} - -static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, int default_port, int listen_backlog) { - int added = 0; - struct addrinfo hints; - struct addrinfo *result = NULL, *rp = NULL; - - char buffer[strlen(definition) + 1]; - strcpy(buffer, definition); - - char buffer2[10 + 1]; - snprintfz(buffer2, 10, "%d", default_port); - - char *ip = buffer, *port = buffer2, *interface = "";; - - int protocol = IPPROTO_TCP, socktype = SOCK_STREAM; - const char *protocol_str = "tcp"; - - if(strncmp(ip, "tcp:", 4) == 0) { - ip += 4; - protocol = IPPROTO_TCP; - socktype = SOCK_STREAM; - protocol_str = "tcp"; - } - else if(strncmp(ip, "udp:", 4) == 0) { - ip += 4; - protocol = IPPROTO_UDP; - socktype = SOCK_DGRAM; - protocol_str = "udp"; - } - else if(strncmp(ip, "unix:", 5) == 0) { - char *path = ip + 5; - socktype = SOCK_STREAM; - protocol_str = "unix"; - - int fd = create_listen_socket_unix(path, listen_backlog); - if (fd == -1) { - error("LISTENER: Cannot create unix socket '%s'", path); - sockets->failed++; - } - else { - listen_sockets_add(sockets, fd, AF_UNIX, socktype, protocol_str, path, 0); - added++; - } - return added; - } - - char *e = ip; - if(*e == '[') { - e = ++ip; - while(*e && *e != ']') e++; - if(*e == ']') { - *e = '\0'; - e++; - } - } - else { - while(*e && *e != ':' && *e != '%') e++; - } - - if(*e == '%') { - *e = '\0'; - e++; - interface = e; - while(*e && *e != ':') e++; - } - - if(*e == ':') { - port = e + 1; - *e = '\0'; - } - - uint32_t scope_id = 0; - if(*interface) { - scope_id = if_nametoindex(interface); - if(!scope_id) - error("LISTENER: Cannot find a network interface named '%s'. Continuing with limiting the network interface", interface); - } - - if(!*ip || *ip == '*' || !strcmp(ip, "any") || !strcmp(ip, "all")) - ip = NULL; - - if(!*port) - port = buffer2; - - memset(&hints, 0, sizeof(hints)); - hints.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */ - hints.ai_socktype = socktype; - hints.ai_flags = AI_PASSIVE; /* For wildcard IP address */ - hints.ai_protocol = protocol; - hints.ai_canonname = NULL; - hints.ai_addr = NULL; - hints.ai_next = NULL; - - int r = getaddrinfo(ip, port, &hints, &result); - if (r != 0) { - error("LISTENER: getaddrinfo('%s', '%s'): %s\n", ip, port, gai_strerror(r)); - return -1; - } - - for (rp = result; rp != NULL; rp = rp->ai_next) { - int fd = -1; - int family = -1; - - char rip[INET_ADDRSTRLEN + INET6_ADDRSTRLEN] = "INVALID"; - int rport = default_port; - - family = rp->ai_addr->sa_family; - switch (family) { - case AF_INET: { - struct sockaddr_in *sin = (struct sockaddr_in *) rp->ai_addr; - inet_ntop(AF_INET, &sin->sin_addr, rip, INET_ADDRSTRLEN); - rport = ntohs(sin->sin_port); - // info("Attempting to listen on IPv4 '%s' ('%s'), port %d ('%s'), socktype %d", rip, ip, rport, port, socktype); - fd = create_listen_socket4(socktype, rip, rport, listen_backlog); - break; - } - - case AF_INET6: { - struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) rp->ai_addr; - inet_ntop(AF_INET6, &sin6->sin6_addr, rip, INET6_ADDRSTRLEN); - rport = ntohs(sin6->sin6_port); - // info("Attempting to listen on IPv6 '%s' ('%s'), port %d ('%s'), socktype %d", rip, ip, rport, port, socktype); - fd = create_listen_socket6(socktype, scope_id, rip, rport, listen_backlog); - break; - } - - default: - debug(D_LISTENER, "LISTENER: Unknown socket family %d", family); - break; - } - - if (fd == -1) { - error("LISTENER: Cannot bind to ip '%s', port %d", rip, rport); - sockets->failed++; - } - else { - listen_sockets_add(sockets, fd, family, socktype, protocol_str, rip, rport); - added++; - } - } - - freeaddrinfo(result); - - return added; -} - -int listen_sockets_setup(LISTEN_SOCKETS *sockets) { - listen_sockets_init(sockets); - - sockets->backlog = (int) config_get_number(sockets->config_section, "listen backlog", sockets->backlog); - - int old_port = sockets->default_port; - sockets->default_port = (int) config_get_number(sockets->config_section, "default port", sockets->default_port); - if(sockets->default_port < 1 || sockets->default_port > 65535) { - error("LISTENER: Invalid listen port %d given. Defaulting to %d.", sockets->default_port, old_port); - sockets->default_port = (int) config_set_number(sockets->config_section, "default port", old_port); - } - debug(D_OPTIONS, "LISTENER: Default listen port set to %d.", sockets->default_port); - - char *s = config_get(sockets->config_section, "bind to", sockets->default_bind_to); - while(*s) { - char *e = s; - - // skip separators, moving both s(tart) and e(nd) - while(isspace(*e) || *e == ',') s = ++e; - - // move e(nd) to the first separator - while(*e && !isspace(*e) && *e != ',') e++; - - // is there anything? - if(!*s || s == e) break; - - char buf[e - s + 1]; - strncpyz(buf, s, e - s); - bind_to_this(sockets, buf, sockets->default_port, sockets->backlog); - - s = e; - } - - if(sockets->failed) { - size_t i; - for(i = 0; i < sockets->opened ;i++) - info("LISTENER: Listen socket %s opened successfully.", sockets->fds_names[i]); - } - - return (int)sockets->opened; -} - - -// -------------------------------------------------------------------------------------------------------------------- -// connect to another host/port - -// connect_to_this_unix() -// path the path of the unix socket -// timeout the timeout for establishing a connection - -static inline int connect_to_unix(const char *path, struct timeval *timeout) { - int fd = socket(AF_UNIX, SOCK_STREAM, 0); - if(fd == -1) { - error("Failed to create UNIX socket() for '%s'", path); - return -1; - } - - if(timeout) { - if(setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, (char *) timeout, sizeof(struct timeval)) < 0) - error("Failed to set timeout on UNIX socket '%s'", path); - } - - struct sockaddr_un addr; - memset(&addr, 0, sizeof(addr)); - addr.sun_family = AF_UNIX; - strncpy(addr.sun_path, path, sizeof(addr.sun_path)-1); - - if (connect(fd, (struct sockaddr*)&addr, sizeof(addr)) == -1) { - error("Cannot connect to UNIX socket on path '%s'.", path); - close(fd); - return -1; - } - - debug(D_CONNECT_TO, "Connected to UNIX socket on path '%s'.", path); - - return fd; -} - -// connect_to_this_ip46() -// protocol IPPROTO_TCP, IPPROTO_UDP -// socktype SOCK_STREAM, SOCK_DGRAM -// host the destination hostname or IP address (IPv4 or IPv6) to connect to -// if it resolves to many IPs, all are tried (IPv4 and IPv6) -// scope_id the if_index id of the interface to use for connecting (0 = any) -// (used only under IPv6) -// service the service name or port to connect to -// timeout the timeout for establishing a connection - -static inline int connect_to_this_ip46(int protocol, int socktype, const char *host, uint32_t scope_id, const char *service, struct timeval *timeout) { - struct addrinfo hints; - struct addrinfo *ai_head = NULL, *ai = NULL; - - memset(&hints, 0, sizeof(hints)); - hints.ai_family = PF_UNSPEC; /* Allow IPv4 or IPv6 */ - hints.ai_socktype = socktype; - hints.ai_protocol = protocol; - - int ai_err = getaddrinfo(host, service, &hints, &ai_head); - if (ai_err != 0) { - error("Cannot resolve host '%s', port '%s': %s", host, service, gai_strerror(ai_err)); - return -1; - } - - int fd = -1; - for (ai = ai_head; ai != NULL && fd == -1; ai = ai->ai_next) { - - if (ai->ai_family == PF_INET6) { - struct sockaddr_in6 *pSadrIn6 = (struct sockaddr_in6 *) ai->ai_addr; - if(pSadrIn6->sin6_scope_id == 0) { - pSadrIn6->sin6_scope_id = scope_id; - } - } - - char hostBfr[NI_MAXHOST + 1]; - char servBfr[NI_MAXSERV + 1]; - - getnameinfo(ai->ai_addr, - ai->ai_addrlen, - hostBfr, - sizeof(hostBfr), - servBfr, - sizeof(servBfr), - NI_NUMERICHOST | NI_NUMERICSERV); - - debug(D_CONNECT_TO, "Address info: host = '%s', service = '%s', ai_flags = 0x%02X, ai_family = %d (PF_INET = %d, PF_INET6 = %d), ai_socktype = %d (SOCK_STREAM = %d, SOCK_DGRAM = %d), ai_protocol = %d (IPPROTO_TCP = %d, IPPROTO_UDP = %d), ai_addrlen = %lu (sockaddr_in = %lu, sockaddr_in6 = %lu)", - hostBfr, - servBfr, - (unsigned int)ai->ai_flags, - ai->ai_family, - PF_INET, - PF_INET6, - ai->ai_socktype, - SOCK_STREAM, - SOCK_DGRAM, - ai->ai_protocol, - IPPROTO_TCP, - IPPROTO_UDP, - (unsigned long)ai->ai_addrlen, - (unsigned long)sizeof(struct sockaddr_in), - (unsigned long)sizeof(struct sockaddr_in6)); - - switch (ai->ai_addr->sa_family) { - case PF_INET: { - struct sockaddr_in *pSadrIn = (struct sockaddr_in *)ai->ai_addr; - debug(D_CONNECT_TO, "ai_addr = sin_family: %d (AF_INET = %d, AF_INET6 = %d), sin_addr: '%s', sin_port: '%s'", - pSadrIn->sin_family, - AF_INET, - AF_INET6, - hostBfr, - servBfr); - break; - } - - case PF_INET6: { - struct sockaddr_in6 *pSadrIn6 = (struct sockaddr_in6 *) ai->ai_addr; - debug(D_CONNECT_TO,"ai_addr = sin6_family: %d (AF_INET = %d, AF_INET6 = %d), sin6_addr: '%s', sin6_port: '%s', sin6_flowinfo: %u, sin6_scope_id: %u", - pSadrIn6->sin6_family, - AF_INET, - AF_INET6, - hostBfr, - servBfr, - pSadrIn6->sin6_flowinfo, - pSadrIn6->sin6_scope_id); - break; - } - - default: { - debug(D_CONNECT_TO, "Unknown protocol family %d.", ai->ai_family); - continue; - } - } - - fd = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol); - if(fd != -1) { - if(timeout) { - if(setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, (char *) timeout, sizeof(struct timeval)) < 0) - error("Failed to set timeout on the socket to ip '%s' port '%s'", hostBfr, servBfr); - } - - errno = 0; - if(connect(fd, ai->ai_addr, ai->ai_addrlen) < 0) { - if(errno == EALREADY || errno == EINPROGRESS) { - info("Waiting for connection to ip %s port %s to be established", hostBfr, servBfr); - - fd_set fds; - FD_ZERO(&fds); - FD_SET(0, &fds); - int rc = select (1, NULL, &fds, NULL, timeout); - - if(rc > 0 && FD_ISSET(fd, &fds)) { - info("connect() to ip %s port %s completed successfully", hostBfr, servBfr); - } - else if(rc == -1) { - error("Failed to connect to '%s', port '%s'. select() returned %d", hostBfr, servBfr, rc); - close(fd); - fd = -1; - } - else { - error("Timed out while connecting to '%s', port '%s'. select() returned %d", hostBfr, servBfr, rc); - close(fd); - fd = -1; - } - } - else { - error("Failed to connect to '%s', port '%s'", hostBfr, servBfr); - close(fd); - fd = -1; - } - } - - if(fd != -1) - debug(D_CONNECT_TO, "Connected to '%s' on port '%s'.", hostBfr, servBfr); - } - } - - freeaddrinfo(ai_head); - - return fd; -} - -// connect_to_this() -// -// definition format: -// -// [PROTOCOL:]IP[%INTERFACE][:PORT] -// -// PROTOCOL = tcp or udp -// IP = IPv4 or IPv6 IP or hostname, optionally enclosed in [] (required for IPv6) -// INTERFACE = for IPv6 only, the network interface to use -// PORT = port number or service name - -int connect_to_this(const char *definition, int default_port, struct timeval *timeout) { - char buffer[strlen(definition) + 1]; - strcpy(buffer, definition); - - char default_service[10 + 1]; - snprintfz(default_service, 10, "%d", default_port); - - char *host = buffer, *service = default_service, *interface = ""; - int protocol = IPPROTO_TCP, socktype = SOCK_STREAM; - uint32_t scope_id = 0; - - if(strncmp(host, "tcp:", 4) == 0) { - host += 4; - protocol = IPPROTO_TCP; - socktype = SOCK_STREAM; - } - else if(strncmp(host, "udp:", 4) == 0) { - host += 4; - protocol = IPPROTO_UDP; - socktype = SOCK_DGRAM; - } - else if(strncmp(host, "unix:", 5) == 0) { - char *path = host + 5; - return connect_to_unix(path, timeout); - } - - char *e = host; - if(*e == '[') { - e = ++host; - while(*e && *e != ']') e++; - if(*e == ']') { - *e = '\0'; - e++; - } - } - else { - while(*e && *e != ':' && *e != '%') e++; - } - - if(*e == '%') { - *e = '\0'; - e++; - interface = e; - while(*e && *e != ':') e++; - } - - if(*e == ':') { - *e = '\0'; - e++; - service = e; - } - - debug(D_CONNECT_TO, "Attempting connection to host = '%s', service = '%s', interface = '%s', protocol = %d (tcp = %d, udp = %d)", host, service, interface, protocol, IPPROTO_TCP, IPPROTO_UDP); - - if(!*host) { - error("Definition '%s' does not specify a host.", definition); - return -1; - } - - if(*interface) { - scope_id = if_nametoindex(interface); - if(!scope_id) - error("Cannot find a network interface named '%s'. Continuing with limiting the network interface", interface); - } - - if(!*service) - service = default_service; - - - return connect_to_this_ip46(protocol, socktype, host, scope_id, service, timeout); -} - -int connect_to_one_of(const char *destination, int default_port, struct timeval *timeout, size_t *reconnects_counter, char *connected_to, size_t connected_to_size) { - int sock = -1; - - const char *s = destination; - while(*s) { - const char *e = s; - - // skip separators, moving both s(tart) and e(nd) - while(isspace(*e) || *e == ',') s = ++e; - - // move e(nd) to the first separator - while(*e && !isspace(*e) && *e != ',') e++; - - // is there anything? - if(!*s || s == e) break; - - char buf[e - s + 1]; - strncpyz(buf, s, e - s); - if(reconnects_counter) *reconnects_counter += 1; - sock = connect_to_this(buf, default_port, timeout); - if(sock != -1) { - if(connected_to && connected_to_size) { - strncpy(connected_to, buf, connected_to_size); - connected_to[connected_to_size - 1] = '\0'; - } - break; - } - s = e; - } - - return sock; -} - - -// -------------------------------------------------------------------------------------------------------------------- -// helpers to send/receive data in one call, in blocking mode, with a timeout - -ssize_t recv_timeout(int sockfd, void *buf, size_t len, int flags, int timeout) { - for(;;) { - struct pollfd fd = { - .fd = sockfd, - .events = POLLIN, - .revents = 0 - }; - - errno = 0; - int retval = poll(&fd, 1, timeout * 1000); - - if(retval == -1) { - // failed - - if(errno == EINTR || errno == EAGAIN) - continue; - - return -1; - } - - if(!retval) { - // timeout - return 0; - } - - if(fd.events & POLLIN) break; - } - - return recv(sockfd, buf, len, flags); -} - -ssize_t send_timeout(int sockfd, void *buf, size_t len, int flags, int timeout) { - for(;;) { - struct pollfd fd = { - .fd = sockfd, - .events = POLLOUT, - .revents = 0 - }; - - errno = 0; - int retval = poll(&fd, 1, timeout * 1000); - - if(retval == -1) { - // failed - - if(errno == EINTR || errno == EAGAIN) - continue; - - return -1; - } - - if(!retval) { - // timeout - return 0; - } - - if(fd.events & POLLOUT) break; - } - - return send(sockfd, buf, len, flags); -} - - -// -------------------------------------------------------------------------------------------------------------------- -// accept4() replacement for systems that do not have one - -#ifndef HAVE_ACCEPT4 -int accept4(int sock, struct sockaddr *addr, socklen_t *addrlen, int flags) { - int fd = accept(sock, addr, addrlen); - int newflags = 0; - - if (fd < 0) return fd; - - if (flags & SOCK_NONBLOCK) { - newflags |= O_NONBLOCK; - flags &= ~SOCK_NONBLOCK; - } - -#ifdef SOCK_CLOEXEC -#ifdef O_CLOEXEC - if (flags & SOCK_CLOEXEC) { - newflags |= O_CLOEXEC; - flags &= ~SOCK_CLOEXEC; - } -#endif -#endif - - if (flags) { - close(fd); - errno = EINVAL; - return -1; - } - - if (fcntl(fd, F_SETFL, newflags) < 0) { - int saved_errno = errno; - close(fd); - errno = saved_errno; - return -1; - } - - return fd; -} -#endif - - -// -------------------------------------------------------------------------------------------------------------------- -// accept_socket() - accept a socket and store client IP and port - -int accept_socket(int fd, int flags, char *client_ip, size_t ipsize, char *client_port, size_t portsize, SIMPLE_PATTERN *access_list) { - struct sockaddr_storage sadr; - socklen_t addrlen = sizeof(sadr); - - int nfd = accept4(fd, (struct sockaddr *)&sadr, &addrlen, flags); - if (likely(nfd >= 0)) { - if (getnameinfo((struct sockaddr *)&sadr, addrlen, client_ip, (socklen_t)ipsize, client_port, (socklen_t)portsize, NI_NUMERICHOST | NI_NUMERICSERV) != 0) { - error("LISTENER: cannot getnameinfo() on received client connection."); - strncpyz(client_ip, "UNKNOWN", ipsize - 1); - strncpyz(client_port, "UNKNOWN", portsize - 1); - } - - client_ip[ipsize - 1] = '\0'; - client_port[portsize - 1] = '\0'; - - switch (((struct sockaddr *)&sadr)->sa_family) { - case AF_UNIX: - debug(D_LISTENER, "New UNIX domain web client from %s on socket %d.", client_ip, fd); - // set the port - certain versions of libc return garbage on unix sockets - strncpy(client_port, "UNIX", portsize); - client_port[portsize - 1] = '\0'; - break; - - case AF_INET: - debug(D_LISTENER, "New IPv4 web client from %s port %s on socket %d.", client_ip, client_port, fd); - break; - - case AF_INET6: - if (strncmp(client_ip, "::ffff:", 7) == 0) { - memmove(client_ip, &client_ip[7], strlen(&client_ip[7]) + 1); - debug(D_LISTENER, "New IPv4 web client from %s port %s on socket %d.", client_ip, client_port, fd); - } - else - debug(D_LISTENER, "New IPv6 web client from %s port %s on socket %d.", client_ip, client_port, fd); - break; - - default: - debug(D_LISTENER, "New UNKNOWN web client from %s port %s on socket %d.", client_ip, client_port, fd); - break; - } - - if(access_list) { - if(!strcmp(client_ip, "127.0.0.1") || !strcmp(client_ip, "::1")) { - strncpy(client_ip, "localhost", ipsize); - client_ip[ipsize - 1] = '\0'; - } - - if(unlikely(!simple_pattern_matches(access_list, client_ip))) { - errno = 0; - debug(D_LISTENER, "Permission denied for client '%s', port '%s'", client_ip, client_port); - error("DENIED ACCESS to client '%s'", client_ip); - close(nfd); - nfd = -1; - errno = EPERM; - } - } - } -#ifdef HAVE_ACCEPT4 - else if(errno == ENOSYS) - error("netdata has been compiled with the assumption that the system has the accept4() call, but it is not here. Recompile netdata like this: ./configure --disable-accept4 ..."); -#endif - - return nfd; -} - - -// -------------------------------------------------------------------------------------------------------------------- -// poll() based listener -// this should be the fastest possible listener for up to 100 sockets -// above 100, an epoll() interface is needed on Linux - -#define POLL_FDS_INCREASE_STEP 10 - -inline POLLINFO *poll_add_fd(POLLJOB *p - , int fd - , int socktype - , uint32_t flags - , const char *client_ip - , const char *client_port - , void *(*add_callback)(POLLINFO *pi, short int *events, void *data) - , void (*del_callback)(POLLINFO *pi) - , int (*rcv_callback)(POLLINFO *pi, short int *events) - , int (*snd_callback)(POLLINFO *pi, short int *events) - , void *data -) { - debug(D_POLLFD, "POLLFD: ADD: request to add fd %d, slots = %zu, used = %zu, min = %zu, max = %zu, next free = %zd", fd, p->slots, p->used, p->min, p->max, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1); - - if(unlikely(fd < 0)) return NULL; - - //if(p->limit && p->used >= p->limit) { - // info("Max sockets limit reached (%zu sockets), dropping connection", p->used); - // close(fd); - // return NULL; - //} - - if(unlikely(!p->first_free)) { - size_t new_slots = p->slots + POLL_FDS_INCREASE_STEP; - debug(D_POLLFD, "POLLFD: ADD: increasing size (current = %zu, new = %zu, used = %zu, min = %zu, max = %zu)", p->slots, new_slots, p->used, p->min, p->max); - - p->fds = reallocz(p->fds, sizeof(struct pollfd) * new_slots); - p->inf = reallocz(p->inf, sizeof(POLLINFO) * new_slots); - - // reset all the newly added slots - ssize_t i; - for(i = new_slots - 1; i >= (ssize_t)p->slots ; i--) { - debug(D_POLLFD, "POLLFD: ADD: resetting new slot %zd", i); - p->fds[i].fd = -1; - p->fds[i].events = 0; - p->fds[i].revents = 0; - - p->inf[i].p = p; - p->inf[i].slot = (size_t)i; - p->inf[i].flags = 0; - p->inf[i].socktype = -1; - p->inf[i].client_ip = NULL; - p->inf[i].client_port = NULL; - p->inf[i].del_callback = p->del_callback; - p->inf[i].rcv_callback = p->rcv_callback; - p->inf[i].snd_callback = p->snd_callback; - p->inf[i].data = NULL; - - // link them so that the first free will be earlier in the array - // (we loop decrementing i) - p->inf[i].next = p->first_free; - p->first_free = &p->inf[i]; - } - - p->slots = new_slots; - } - - POLLINFO *pi = p->first_free; - p->first_free = p->first_free->next; - - debug(D_POLLFD, "POLLFD: ADD: selected slot %zu, next free is %zd", pi->slot, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1); - - struct pollfd *pf = &p->fds[pi->slot]; - pf->fd = fd; - pf->events = POLLIN; - pf->revents = 0; - - pi->fd = fd; - pi->p = p; - pi->socktype = socktype; - pi->flags = flags; - pi->next = NULL; - pi->client_ip = strdupz(client_ip); - pi->client_port = strdupz(client_port); - - pi->del_callback = del_callback; - pi->rcv_callback = rcv_callback; - pi->snd_callback = snd_callback; - - pi->connected_t = now_boottime_sec(); - pi->last_received_t = 0; - pi->last_sent_t = 0; - pi->last_sent_t = 0; - pi->recv_count = 0; - pi->send_count = 0; - - netdata_thread_disable_cancelability(); - p->used++; - if(unlikely(pi->slot > p->max)) - p->max = pi->slot; - - if(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET) { - pi->data = add_callback(pi, &pf->events, data); - } - - if(pi->flags & POLLINFO_FLAG_SERVER_SOCKET) { - p->min = pi->slot; - } - netdata_thread_enable_cancelability(); - - debug(D_POLLFD, "POLLFD: ADD: completed, slots = %zu, used = %zu, min = %zu, max = %zu, next free = %zd", p->slots, p->used, p->min, p->max, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1); - - return pi; -} - -inline void poll_close_fd(POLLINFO *pi) { - POLLJOB *p = pi->p; - - struct pollfd *pf = &p->fds[pi->slot]; - debug(D_POLLFD, "POLLFD: DEL: request to clear slot %zu (fd %d), old next free was %zd", pi->slot, pf->fd, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1); - - if(unlikely(pf->fd == -1)) return; - - netdata_thread_disable_cancelability(); - - if(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET) { - pi->del_callback(pi); - - if(likely(!(pi->flags & POLLINFO_FLAG_DONT_CLOSE))) { - if(close(pf->fd) == -1) - error("Failed to close() poll_events() socket %d", pf->fd); - } - } - - pf->fd = -1; - pf->events = 0; - pf->revents = 0; - - pi->fd = -1; - pi->socktype = -1; - pi->flags = 0; - pi->data = NULL; - - pi->del_callback = NULL; - pi->rcv_callback = NULL; - pi->snd_callback = NULL; - - freez(pi->client_ip); - pi->client_ip = NULL; - - freez(pi->client_port); - pi->client_port = NULL; - - pi->next = p->first_free; - p->first_free = pi; - - p->used--; - if(unlikely(p->max == pi->slot)) { - p->max = p->min; - ssize_t i; - for(i = (ssize_t)pi->slot; i > (ssize_t)p->min ;i--) { - if (unlikely(p->fds[i].fd != -1)) { - p->max = (size_t)i; - break; - } - } - } - netdata_thread_enable_cancelability(); - - debug(D_POLLFD, "POLLFD: DEL: completed, slots = %zu, used = %zu, min = %zu, max = %zu, next free = %zd", p->slots, p->used, p->min, p->max, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1); -} - -void *poll_default_add_callback(POLLINFO *pi, short int *events, void *data) { - (void)pi; - (void)events; - (void)data; - - // error("POLLFD: internal error: poll_default_add_callback() called"); - - return NULL; -} - -void poll_default_del_callback(POLLINFO *pi) { - if(pi->data) - error("POLLFD: internal error: del_callback_default() called with data pointer - possible memory leak"); -} - -int poll_default_rcv_callback(POLLINFO *pi, short int *events) { - *events |= POLLIN; - - char buffer[1024 + 1]; - - ssize_t rc; - do { - rc = recv(pi->fd, buffer, 1024, MSG_DONTWAIT); - if (rc < 0) { - // read failed - if (errno != EWOULDBLOCK && errno != EAGAIN) { - error("POLLFD: poll_default_rcv_callback(): recv() failed with %zd.", rc); - return -1; - } - } else if (rc) { - // data received - info("POLLFD: internal error: poll_default_rcv_callback() is discarding %zd bytes received on socket %d", rc, pi->fd); - } - } while (rc != -1); - - return 0; -} - -int poll_default_snd_callback(POLLINFO *pi, short int *events) { - *events &= ~POLLOUT; - - info("POLLFD: internal error: poll_default_snd_callback(): nothing to send on socket %d", pi->fd); - return 0; -} - -void poll_default_tmr_callback(void *timer_data) { - (void)timer_data; -} - -static void poll_events_cleanup(void *data) { - POLLJOB *p = (POLLJOB *)data; - - size_t i; - for(i = 0 ; i <= p->max ; i++) { - POLLINFO *pi = &p->inf[i]; - poll_close_fd(pi); - } - - freez(p->fds); - freez(p->inf); -} - -static void poll_events_process(POLLJOB *p, POLLINFO *pi, struct pollfd *pf, short int revents, time_t now) { - short int events = pf->events; - int fd = pf->fd; - pf->revents = 0; - size_t i = pi->slot; - - if(unlikely(fd == -1)) { - debug(D_POLLFD, "POLLFD: LISTENER: ignoring slot %zu, it does not have an fd", i); - return; - } - - debug(D_POLLFD, "POLLFD: LISTENER: processing events for slot %zu (events = %d, revents = %d)", i, events, revents); - - if(revents & POLLIN || revents & POLLPRI) { - // receiving data - - pi->last_received_t = now; - pi->recv_count++; - - if(likely(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET)) { - // read data from client TCP socket - debug(D_POLLFD, "POLLFD: LISTENER: reading data from TCP client slot %zu (fd %d)", i, fd); - - pf->events = 0; - if (pi->rcv_callback(pi, &pf->events) == -1) { - poll_close_fd(&p->inf[i]); - return; - } - pf = &p->fds[i]; - pi = &p->inf[i]; - -#ifdef NETDATA_INTERNAL_CHECKS - // this is common - it is used for web server file copies - if(unlikely(!(pf->events & (POLLIN|POLLOUT)))) { - error("POLLFD: LISTENER: after reading, client slot %zu (fd %d) from '%s:%s' was left without expecting input or output. ", i, fd, pi->client_ip?pi->client_ip:"<undefined-ip>", pi->client_port?pi->client_port:"<undefined-port>"); - //poll_close_fd(pi); - //return; - } -#endif - } - else if(likely(pi->flags & POLLINFO_FLAG_SERVER_SOCKET)) { - // new connection - // debug(D_POLLFD, "POLLFD: LISTENER: accepting connections from slot %zu (fd %d)", i, fd); - - switch(pi->socktype) { - case SOCK_STREAM: { - // a TCP socket - // we accept the connection - - int nfd; - do { - char client_ip[NI_MAXHOST + 1]; - char client_port[NI_MAXSERV + 1]; - - debug(D_POLLFD, "POLLFD: LISTENER: calling accept4() slot %zu (fd %d)", i, fd); - nfd = accept_socket(fd, SOCK_NONBLOCK, client_ip, NI_MAXHOST + 1, client_port, NI_MAXSERV + 1, p->access_list); - if (unlikely(nfd < 0)) { - // accept failed - - debug(D_POLLFD, "POLLFD: LISTENER: accept4() slot %zu (fd %d) failed.", i, fd); - - if(unlikely(errno == EMFILE)) { - error("POLLFD: LISTENER: too many open files - sleeping for 1ms - used by this thread %zu, max for this thread %zu", p->used, p->limit); - usleep(1000); // 10ms - } - else if(unlikely(errno != EWOULDBLOCK && errno != EAGAIN)) - error("POLLFD: LISTENER: accept() failed."); - - break; - } - else { - // accept ok - // info("POLLFD: LISTENER: client '[%s]:%s' connected to '%s' on fd %d", client_ip, client_port, sockets->fds_names[i], nfd); - poll_add_fd(p - , nfd - , SOCK_STREAM - , POLLINFO_FLAG_CLIENT_SOCKET - , client_ip - , client_port - , p->add_callback - , p->del_callback - , p->rcv_callback - , p->snd_callback - , NULL - ); - - // it may have reallocated them, so refresh our pointers - pf = &p->fds[i]; - pi = &p->inf[i]; - } - } while (nfd >= 0 && (!p->limit || p->used < p->limit)); - break; - } - - case SOCK_DGRAM: { - // a UDP socket - // we read data from the server socket - - debug(D_POLLFD, "POLLFD: LISTENER: reading data from UDP slot %zu (fd %d)", i, fd); - - // FIXME: access_list is not applied to UDP - - pf->events = 0; - pi->rcv_callback(pi, &pf->events); - break; - } - - default: { - error("POLLFD: LISTENER: Unknown socktype %d on slot %zu", pi->socktype, pi->slot); - break; - } - } - } - } - - if(unlikely(revents & POLLOUT)) { - // sending data - debug(D_POLLFD, "POLLFD: LISTENER: sending data to socket on slot %zu (fd %d)", i, fd); - - pi->last_sent_t = now; - pi->send_count++; - - pf->events = 0; - if (pi->snd_callback(pi, &pf->events) == -1) { - poll_close_fd(&p->inf[i]); - return; - } - pf = &p->fds[i]; - pi = &p->inf[i]; - -#ifdef NETDATA_INTERNAL_CHECKS - // this is common - it is used for streaming - if(unlikely(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET && !(pf->events & (POLLIN|POLLOUT)))) { - error("POLLFD: LISTENER: after sending, client slot %zu (fd %d) from '%s:%s' was left without expecting input or output. ", i, fd, pi->client_ip?pi->client_ip:"<undefined-ip>", pi->client_port?pi->client_port:"<undefined-port>"); - //poll_close_fd(pi); - //return; - } -#endif - } - - if(unlikely(revents & POLLERR)) { - error("POLLFD: LISTENER: processing POLLERR events for slot %zu fd %d (events = %d, revents = %d)", i, events, revents, fd); - pf->events = 0; - poll_close_fd(pi); - return; - } - - if(unlikely(revents & POLLHUP)) { - error("POLLFD: LISTENER: processing POLLHUP events for slot %zu fd %d (events = %d, revents = %d)", i, events, revents, fd); - pf->events = 0; - poll_close_fd(pi); - return; - } - - if(unlikely(revents & POLLNVAL)) { - error("POLLFD: LISTENER: processing POLLNVAL events for slot %zu fd %d (events = %d, revents = %d)", i, events, revents, fd); - pf->events = 0; - poll_close_fd(pi); - return; - } -} - -void poll_events(LISTEN_SOCKETS *sockets - , void *(*add_callback)(POLLINFO *pi, short int *events, void *data) - , void (*del_callback)(POLLINFO *pi) - , int (*rcv_callback)(POLLINFO *pi, short int *events) - , int (*snd_callback)(POLLINFO *pi, short int *events) - , void (*tmr_callback)(void *timer_data) - , SIMPLE_PATTERN *access_list - , void *data - , time_t tcp_request_timeout_seconds - , time_t tcp_idle_timeout_seconds - , time_t timer_milliseconds - , void *timer_data - , size_t max_tcp_sockets -) { - if(!sockets || !sockets->opened) { - error("POLLFD: internal error: no listening sockets are opened"); - return; - } - - if(timer_milliseconds <= 0) timer_milliseconds = 0; - - int retval; - - POLLJOB p = { - .slots = 0, - .used = 0, - .max = 0, - .limit = max_tcp_sockets, - .fds = NULL, - .inf = NULL, - .first_free = NULL, - - .complete_request_timeout = tcp_request_timeout_seconds, - .idle_timeout = tcp_idle_timeout_seconds, - .checks_every = (tcp_idle_timeout_seconds / 3) + 1, - - .access_list = access_list, - - .timer_milliseconds = timer_milliseconds, - .timer_data = timer_data, - - .add_callback = add_callback?add_callback:poll_default_add_callback, - .del_callback = del_callback?del_callback:poll_default_del_callback, - .rcv_callback = rcv_callback?rcv_callback:poll_default_rcv_callback, - .snd_callback = snd_callback?snd_callback:poll_default_snd_callback, - .tmr_callback = tmr_callback?tmr_callback:poll_default_tmr_callback - }; - - size_t i; - for(i = 0; i < sockets->opened ;i++) { - - POLLINFO *pi = poll_add_fd(&p - , sockets->fds[i] - , sockets->fds_types[i] - , POLLINFO_FLAG_SERVER_SOCKET - , (sockets->fds_names[i])?sockets->fds_names[i]:"UNKNOWN" - , "" - , p.add_callback - , p.del_callback - , p.rcv_callback - , p.snd_callback - , NULL - ); - - pi->data = data; - info("POLLFD: LISTENER: listening on '%s'", (sockets->fds_names[i])?sockets->fds_names[i]:"UNKNOWN"); - } - - int listen_sockets_active = 1; - - int timeout_ms = 1000; // in milliseconds - time_t last_check = now_boottime_sec(); - - usec_t timer_usec = timer_milliseconds * USEC_PER_MS; - usec_t now_usec = 0, next_timer_usec = 0, last_timer_usec = 0; - if(unlikely(timer_usec)) { - now_usec = now_boottime_usec(); - next_timer_usec = now_usec - (now_usec % timer_usec) + timer_usec; - } - - netdata_thread_cleanup_push(poll_events_cleanup, &p); - - while(!netdata_exit) { - if(unlikely(timer_usec)) { - now_usec = now_boottime_usec(); - - if(unlikely(timer_usec && now_usec >= next_timer_usec)) { - debug(D_POLLFD, "Calling timer callback after %zu usec", (size_t)(now_usec - last_timer_usec)); - last_timer_usec = now_usec; - p.tmr_callback(p.timer_data); - now_usec = now_boottime_usec(); - next_timer_usec = now_usec - (now_usec % timer_usec) + timer_usec; - } - - usec_t dt_usec = next_timer_usec - now_usec; - if(dt_usec > 1000 * USEC_PER_MS) - timeout_ms = 1000; - else - timeout_ms = (int)(dt_usec / USEC_PER_MS); - } - - // enable or disable the TCP listening sockets, based on the current number of sockets used and the limit set - if((listen_sockets_active && (p.limit && p.used >= p.limit)) || (!listen_sockets_active && (!p.limit || p.used < p.limit))) { - listen_sockets_active = !listen_sockets_active; - info("%s listening sockets (used TCP sockets %zu, max allowed for this worker %zu)", (listen_sockets_active)?"ENABLING":"DISABLING", p.used, p.limit); - for (i = 0; i <= p.max; i++) { - if(p.inf[i].flags & POLLINFO_FLAG_SERVER_SOCKET && p.inf[i].socktype == SOCK_STREAM) { - p.fds[i].events = (short int) ((listen_sockets_active) ? POLLIN : 0); - } - } - } - - debug(D_POLLFD, "POLLFD: LISTENER: Waiting on %zu sockets for %zu ms...", p.max + 1, (size_t)timeout_ms); - retval = poll(p.fds, p.max + 1, timeout_ms); - time_t now = now_boottime_sec(); - - if(unlikely(retval == -1)) { - error("POLLFD: LISTENER: poll() failed while waiting on %zu sockets.", p.max + 1); - break; - } - else if(unlikely(!retval)) { - debug(D_POLLFD, "POLLFD: LISTENER: poll() timeout."); - } - else { - for (i = 0; i <= p.max; i++) { - struct pollfd *pf = &p.fds[i]; - short int revents = pf->revents; - if (unlikely(revents)) - poll_events_process(&p, &p.inf[i], pf, revents, now); - } - } - - if(unlikely(p.checks_every > 0 && now - last_check > p.checks_every)) { - last_check = now; - - // security checks - for(i = 0; i <= p.max; i++) { - POLLINFO *pi = &p.inf[i]; - - if(likely(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET)) { - if (unlikely(pi->send_count == 0 && p.complete_request_timeout > 0 && (now - pi->connected_t) >= p.complete_request_timeout)) { - info("POLLFD: LISTENER: client slot %zu (fd %d) from '%s:%s' has not sent a complete request in %zu seconds - closing it. " - , i - , pi->fd - , pi->client_ip ? pi->client_ip : "<undefined-ip>" - , pi->client_port ? pi->client_port : "<undefined-port>" - , (size_t) p.complete_request_timeout - ); - poll_close_fd(pi); - } - else if(unlikely(pi->recv_count && p.idle_timeout > 0 && now - ((pi->last_received_t > pi->last_sent_t) ? pi->last_received_t : pi->last_sent_t) >= p.idle_timeout )) { - info("POLLFD: LISTENER: client slot %zu (fd %d) from '%s:%s' is idle for more than %zu seconds - closing it. " - , i - , pi->fd - , pi->client_ip ? pi->client_ip : "<undefined-ip>" - , pi->client_port ? pi->client_port : "<undefined-port>" - , (size_t) p.idle_timeout - ); - poll_close_fd(pi); - } - } - } - } - } - - netdata_thread_cleanup_pop(1); - debug(D_POLLFD, "POLLFD: LISTENER: cleanup completed"); -} diff --git a/src/socket.h b/src/socket.h deleted file mode 100644 index 7b3e726ec..000000000 --- a/src/socket.h +++ /dev/null @@ -1,162 +0,0 @@ -#ifndef NETDATA_SOCKET_H -#define NETDATA_SOCKET_H - -#ifndef MAX_LISTEN_FDS -#define MAX_LISTEN_FDS 50 -#endif - -typedef struct listen_sockets { - const char *config_section; // the netdata configuration section to read settings from - const char *default_bind_to; // the default bind to configuration string - int default_port; // the default port to use - int backlog; // the default listen backlog to use - - size_t opened; // the number of sockets opened - size_t failed; // the number of sockets attempted to open, but failed - int fds[MAX_LISTEN_FDS]; // the open sockets - char *fds_names[MAX_LISTEN_FDS]; // descriptions for the open sockets - int fds_types[MAX_LISTEN_FDS]; // the socktype for the open sockets (SOCK_STREAM, SOCK_DGRAM) - int fds_families[MAX_LISTEN_FDS]; // the family of the open sockets (AF_UNIX, AF_INET, AF_INET6) -} LISTEN_SOCKETS; - -extern char *strdup_client_description(int family, const char *protocol, const char *ip, int port); - -extern int listen_sockets_setup(LISTEN_SOCKETS *sockets); -extern void listen_sockets_close(LISTEN_SOCKETS *sockets); - -extern int connect_to_this(const char *definition, int default_port, struct timeval *timeout); -extern int connect_to_one_of(const char *destination, int default_port, struct timeval *timeout, size_t *reconnects_counter, char *connected_to, size_t connected_to_size); - -extern ssize_t recv_timeout(int sockfd, void *buf, size_t len, int flags, int timeout); -extern ssize_t send_timeout(int sockfd, void *buf, size_t len, int flags, int timeout); - -extern int sock_setnonblock(int fd); -extern int sock_delnonblock(int fd); -extern int sock_setreuse(int fd, int reuse); -extern int sock_setreuse_port(int fd, int reuse); -extern int sock_enlarge_in(int fd); -extern int sock_enlarge_out(int fd); - -extern int accept_socket(int fd, int flags, char *client_ip, size_t ipsize, char *client_port, size_t portsize, SIMPLE_PATTERN *access_list); - -#ifndef HAVE_ACCEPT4 -extern int accept4(int sock, struct sockaddr *addr, socklen_t *addrlen, int flags); - -#ifndef SOCK_NONBLOCK -#define SOCK_NONBLOCK 00004000 -#endif /* #ifndef SOCK_NONBLOCK */ - -#ifndef SOCK_CLOEXEC -#define SOCK_CLOEXEC 02000000 -#endif /* #ifndef SOCK_CLOEXEC */ - -#endif /* #ifndef HAVE_ACCEPT4 */ - - -// ---------------------------------------------------------------------------- -// poll() based listener - -#define POLLINFO_FLAG_SERVER_SOCKET 0x00000001 -#define POLLINFO_FLAG_CLIENT_SOCKET 0x00000002 -#define POLLINFO_FLAG_DONT_CLOSE 0x00000004 - -typedef struct poll POLLJOB; - -typedef struct pollinfo { - POLLJOB *p; // the parent - size_t slot; // the slot id - - int fd; // the file descriptor - int socktype; // the client socket type - char *client_ip; // the connected client IP - char *client_port; // the connected client port - - time_t connected_t; // the time the socket connected - time_t last_received_t; // the time the socket last received data - time_t last_sent_t; // the time the socket last sent data - - size_t recv_count; // the number of times the socket was ready for inbound traffic - size_t send_count; // the number of times the socket was ready for outbound traffic - - uint32_t flags; // internal flags - - // callbacks for this socket - void (*del_callback)(struct pollinfo *pi); - int (*rcv_callback)(struct pollinfo *pi, short int *events); - int (*snd_callback)(struct pollinfo *pi, short int *events); - - // the user data - void *data; - - // linking of free pollinfo structures - // for quickly finding the next available - // this is like a stack, it grows and shrinks - // (with gaps - lower empty slots are preferred) - struct pollinfo *next; -} POLLINFO; - -struct poll { - size_t slots; - size_t used; - size_t min; - size_t max; - - size_t limit; - - time_t complete_request_timeout; - time_t idle_timeout; - time_t checks_every; - - time_t timer_milliseconds; - void *timer_data; - - struct pollfd *fds; - struct pollinfo *inf; - struct pollinfo *first_free; - - SIMPLE_PATTERN *access_list; - - void *(*add_callback)(POLLINFO *pi, short int *events, void *data); - void (*del_callback)(POLLINFO *pi); - int (*rcv_callback)(POLLINFO *pi, short int *events); - int (*snd_callback)(POLLINFO *pi, short int *events); - void (*tmr_callback)(void *timer_data); -}; - -#define pollinfo_from_slot(p, slot) (&((p)->inf[(slot)])) - -extern int poll_default_snd_callback(POLLINFO *pi, short int *events); -extern int poll_default_rcv_callback(POLLINFO *pi, short int *events); -extern void poll_default_del_callback(POLLINFO *pi); -extern void *poll_default_add_callback(POLLINFO *pi, short int *events, void *data); - -extern POLLINFO *poll_add_fd(POLLJOB *p - , int fd - , int socktype - , uint32_t flags - , const char *client_ip - , const char *client_port - , void *(*add_callback)(POLLINFO *pi, short int *events, void *data) - , void (*del_callback)(POLLINFO *pi) - , int (*rcv_callback)(POLLINFO *pi, short int *events) - , int (*snd_callback)(POLLINFO *pi, short int *events) - , void *data -); -extern void poll_close_fd(POLLINFO *pi); - -extern void poll_events(LISTEN_SOCKETS *sockets - , void *(*add_callback)(POLLINFO *pi, short int *events, void *data) - , void (*del_callback)(POLLINFO *pi) - , int (*rcv_callback)(POLLINFO *pi, short int *events) - , int (*snd_callback)(POLLINFO *pi, short int *events) - , void (*tmr_callback)(void *timer_data) - , SIMPLE_PATTERN *access_list - , void *data - , time_t tcp_request_timeout_seconds - , time_t tcp_idle_timeout_seconds - , time_t timer_milliseconds - , void *timer_data - , size_t max_tcp_sockets -); - -#endif //NETDATA_SOCKET_H diff --git a/src/statistical.c b/src/statistical.c deleted file mode 100644 index d4b33fd5a..000000000 --- a/src/statistical.c +++ /dev/null @@ -1,459 +0,0 @@ -#include "common.h" - -// -------------------------------------------------------------------------------------------------------------------- - -inline LONG_DOUBLE sum_and_count(const LONG_DOUBLE *series, size_t entries, size_t *count) { - if(unlikely(entries == 0)) { - if(likely(count)) - *count = 0; - - return NAN; - } - - if(unlikely(entries == 1)) { - if(likely(count)) - *count = (isnan(series[0])?0:1); - - return series[0]; - } - - size_t i, c = 0; - LONG_DOUBLE sum = 0; - - for(i = 0; i < entries ; i++) { - LONG_DOUBLE value = series[i]; - if(unlikely(isnan(value) || isinf(value))) continue; - c++; - sum += value; - } - - if(likely(count)) - *count = c; - - if(unlikely(c == 0)) - return NAN; - - return sum; -} - -inline LONG_DOUBLE sum(const LONG_DOUBLE *series, size_t entries) { - return sum_and_count(series, entries, NULL); -} - -inline LONG_DOUBLE average(const LONG_DOUBLE *series, size_t entries) { - size_t count = 0; - LONG_DOUBLE sum = sum_and_count(series, entries, &count); - - if(unlikely(count == 0)) - return NAN; - - return sum / (LONG_DOUBLE)count; -} - -// -------------------------------------------------------------------------------------------------------------------- - -LONG_DOUBLE moving_average(const LONG_DOUBLE *series, size_t entries, size_t period) { - if(unlikely(period <= 0)) - return 0.0; - - size_t i, count; - LONG_DOUBLE sum = 0, avg = 0; - LONG_DOUBLE p[period]; - - for(count = 0; count < period ; count++) - p[count] = 0.0; - - for(i = 0, count = 0; i < entries; i++) { - LONG_DOUBLE value = series[i]; - if(unlikely(isnan(value) || isinf(value))) continue; - - if(unlikely(count < period)) { - sum += value; - avg = (count == period - 1) ? sum / (LONG_DOUBLE)period : 0; - } - else { - sum = sum - p[count % period] + value; - avg = sum / (LONG_DOUBLE)period; - } - - p[count % period] = value; - count++; - } - - return avg; -} - -// -------------------------------------------------------------------------------------------------------------------- - -static int qsort_compare(const void *a, const void *b) { - LONG_DOUBLE *p1 = (LONG_DOUBLE *)a, *p2 = (LONG_DOUBLE *)b; - LONG_DOUBLE n1 = *p1, n2 = *p2; - - if(unlikely(isnan(n1) || isnan(n2))) { - if(isnan(n1) && !isnan(n2)) return -1; - if(!isnan(n1) && isnan(n2)) return 1; - return 0; - } - if(unlikely(isinf(n1) || isinf(n2))) { - if(!isinf(n1) && isinf(n2)) return -1; - if(isinf(n1) && !isinf(n2)) return 1; - return 0; - } - - if(unlikely(n1 < n2)) return -1; - if(unlikely(n1 > n2)) return 1; - return 0; -} - -inline void sort_series(LONG_DOUBLE *series, size_t entries) { - qsort(series, entries, sizeof(LONG_DOUBLE), qsort_compare); -} - -inline LONG_DOUBLE *copy_series(const LONG_DOUBLE *series, size_t entries) { - LONG_DOUBLE *copy = mallocz(sizeof(LONG_DOUBLE) * entries); - memcpy(copy, series, sizeof(LONG_DOUBLE) * entries); - return copy; -} - -LONG_DOUBLE median_on_sorted_series(const LONG_DOUBLE *series, size_t entries) { - if(unlikely(entries == 0)) - return NAN; - - if(unlikely(entries == 1)) - return series[0]; - - if(unlikely(entries == 2)) - return (series[0] + series[1]) / 2; - - LONG_DOUBLE avg; - if(entries % 2 == 0) { - size_t m = entries / 2; - avg = (series[m] + series[m + 1]) / 2; - } - else { - avg = series[entries / 2]; - } - - return avg; -} - -LONG_DOUBLE median(const LONG_DOUBLE *series, size_t entries) { - if(unlikely(entries == 0)) - return NAN; - - if(unlikely(entries == 1)) - return series[0]; - - if(unlikely(entries == 2)) - return (series[0] + series[1]) / 2; - - LONG_DOUBLE *copy = copy_series(series, entries); - sort_series(copy, entries); - - LONG_DOUBLE avg = median_on_sorted_series(copy, entries); - - freez(copy); - return avg; -} - -// -------------------------------------------------------------------------------------------------------------------- - -LONG_DOUBLE moving_median(const LONG_DOUBLE *series, size_t entries, size_t period) { - if(entries <= period) - return median(series, entries); - - LONG_DOUBLE *data = copy_series(series, entries); - - size_t i; - for(i = period; i < entries; i++) { - data[i - period] = median(&series[i - period], period); - } - - LONG_DOUBLE avg = median(data, entries - period); - freez(data); - return avg; -} - -// -------------------------------------------------------------------------------------------------------------------- - -// http://stackoverflow.com/a/15150143/4525767 -LONG_DOUBLE running_median_estimate(const LONG_DOUBLE *series, size_t entries) { - LONG_DOUBLE median = 0.0f; - LONG_DOUBLE average = 0.0f; - size_t i; - - for(i = 0; i < entries ; i++) { - LONG_DOUBLE value = series[i]; - if(unlikely(isnan(value) || isinf(value))) continue; - - average += ( value - average ) * 0.1f; // rough running average. - median += copysignl( average * 0.01, value - median ); - } - - return median; -} - -// -------------------------------------------------------------------------------------------------------------------- - -LONG_DOUBLE standard_deviation(const LONG_DOUBLE *series, size_t entries) { - if(unlikely(entries < 1)) - return NAN; - - if(unlikely(entries == 1)) - return series[0]; - - size_t i, count = 0; - LONG_DOUBLE sum = 0; - - for(i = 0; i < entries ; i++) { - LONG_DOUBLE value = series[i]; - if(unlikely(isnan(value) || isinf(value))) continue; - - count++; - sum += value; - } - - if(unlikely(count == 0)) - return NAN; - - if(unlikely(count == 1)) - return sum; - - LONG_DOUBLE average = sum / (LONG_DOUBLE)count; - - for(i = 0, count = 0, sum = 0; i < entries ; i++) { - LONG_DOUBLE value = series[i]; - if(unlikely(isnan(value) || isinf(value))) continue; - - count++; - sum += powl(value - average, 2); - } - - if(unlikely(count == 0)) - return NAN; - - if(unlikely(count == 1)) - return average; - - LONG_DOUBLE variance = sum / (LONG_DOUBLE)(count - 1); // remove -1 to have a population stddev - - LONG_DOUBLE stddev = sqrtl(variance); - return stddev; -} - -// -------------------------------------------------------------------------------------------------------------------- - -LONG_DOUBLE single_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha) { - size_t i, count = 0; - LONG_DOUBLE level = 0, sum = 0; - - if(unlikely(isnan(alpha))) - alpha = 0.3; - - for(i = 0; i < entries ; i++) { - LONG_DOUBLE value = series[i]; - if(unlikely(isnan(value) || isinf(value))) continue; - count++; - - sum += value; - - LONG_DOUBLE last_level = level; - level = alpha * value + (1.0 - alpha) * last_level; - } - - return level; -} - -// -------------------------------------------------------------------------------------------------------------------- - -// http://grisha.org/blog/2016/02/16/triple-exponential-smoothing-forecasting-part-ii/ -LONG_DOUBLE double_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE *forecast) { - size_t i, count = 0; - LONG_DOUBLE level = series[0], trend, sum; - - if(unlikely(isnan(alpha))) - alpha = 0.3; - - if(unlikely(isnan(beta))) - beta = 0.05; - - if(likely(entries > 1)) - trend = series[1] - series[0]; - else - trend = 0; - - sum = series[0]; - - for(i = 1; i < entries ; i++) { - LONG_DOUBLE value = series[i]; - if(unlikely(isnan(value) || isinf(value))) continue; - count++; - - sum += value; - - LONG_DOUBLE last_level = level; - - level = alpha * value + (1.0 - alpha) * (level + trend); - trend = beta * (level - last_level) + (1.0 - beta) * trend; - } - - if(forecast) - *forecast = level + trend; - - return level; -} - -// -------------------------------------------------------------------------------------------------------------------- - -/* - * Based on th R implementation - * - * a: level component - * b: trend component - * s: seasonal component - * - * Additive: - * - * Yhat[t+h] = a[t] + h * b[t] + s[t + 1 + (h - 1) mod p], - * a[t] = α (Y[t] - s[t-p]) + (1-α) (a[t-1] + b[t-1]) - * b[t] = β (a[t] - a[t-1]) + (1-β) b[t-1] - * s[t] = γ (Y[t] - a[t]) + (1-γ) s[t-p] - * - * Multiplicative: - * - * Yhat[t+h] = (a[t] + h * b[t]) * s[t + 1 + (h - 1) mod p], - * a[t] = α (Y[t] / s[t-p]) + (1-α) (a[t-1] + b[t-1]) - * b[t] = β (a[t] - a[t-1]) + (1-β) b[t-1] - * s[t] = γ (Y[t] / a[t]) + (1-γ) s[t-p] - */ -static int __HoltWinters( - const LONG_DOUBLE *series, - int entries, // start_time + h - - LONG_DOUBLE alpha, // alpha parameter of Holt-Winters Filter. - LONG_DOUBLE beta, // beta parameter of Holt-Winters Filter. If set to 0, the function will do exponential smoothing. - LONG_DOUBLE gamma, // gamma parameter used for the seasonal component. If set to 0, an non-seasonal model is fitted. - - const int *seasonal, - const int *period, - const LONG_DOUBLE *a, // Start value for level (a[0]). - const LONG_DOUBLE *b, // Start value for trend (b[0]). - LONG_DOUBLE *s, // Vector of start values for the seasonal component (s_1[0] ... s_p[0]) - - /* return values */ - LONG_DOUBLE *SSE, // The final sum of squared errors achieved in optimizing - LONG_DOUBLE *level, // Estimated values for the level component (size entries - t + 2) - LONG_DOUBLE *trend, // Estimated values for the trend component (size entries - t + 2) - LONG_DOUBLE *season // Estimated values for the seasonal component (size entries - t + 2) -) -{ - if(unlikely(entries < 4)) - return 0; - - int start_time = 2; - - LONG_DOUBLE res = 0, xhat = 0, stmp = 0; - int i, i0, s0; - - /* copy start values to the beginning of the vectors */ - level[0] = *a; - if(beta > 0) trend[0] = *b; - if(gamma > 0) memcpy(season, s, *period * sizeof(LONG_DOUBLE)); - - for(i = start_time - 1; i < entries; i++) { - /* indices for period i */ - i0 = i - start_time + 2; - s0 = i0 + *period - 1; - - /* forecast *for* period i */ - xhat = level[i0 - 1] + (beta > 0 ? trend[i0 - 1] : 0); - stmp = gamma > 0 ? season[s0 - *period] : (*seasonal != 1); - if (*seasonal == 1) - xhat += stmp; - else - xhat *= stmp; - - /* Sum of Squared Errors */ - res = series[i] - xhat; - *SSE += res * res; - - /* estimate of level *in* period i */ - if (*seasonal == 1) - level[i0] = alpha * (series[i] - stmp) - + (1 - alpha) * (level[i0 - 1] + trend[i0 - 1]); - else - level[i0] = alpha * (series[i] / stmp) - + (1 - alpha) * (level[i0 - 1] + trend[i0 - 1]); - - /* estimate of trend *in* period i */ - if (beta > 0) - trend[i0] = beta * (level[i0] - level[i0 - 1]) - + (1 - beta) * trend[i0 - 1]; - - /* estimate of seasonal component *in* period i */ - if (gamma > 0) { - if (*seasonal == 1) - season[s0] = gamma * (series[i] - level[i0]) - + (1 - gamma) * stmp; - else - season[s0] = gamma * (series[i] / level[i0]) - + (1 - gamma) * stmp; - } - } - - return 1; -} - -LONG_DOUBLE holtwinters(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE gamma, LONG_DOUBLE *forecast) { - if(unlikely(isnan(alpha))) - alpha = 0.3; - - if(unlikely(isnan(beta))) - beta = 0.05; - - if(unlikely(isnan(gamma))) - gamma = 0; - - int seasonal = 0; - int period = 0; - LONG_DOUBLE a0 = series[0]; - LONG_DOUBLE b0 = 0; - LONG_DOUBLE s[] = {}; - - LONG_DOUBLE errors = 0.0; - size_t nb_computations = entries; - LONG_DOUBLE *estimated_level = callocz(nb_computations, sizeof(LONG_DOUBLE)); - LONG_DOUBLE *estimated_trend = callocz(nb_computations, sizeof(LONG_DOUBLE)); - LONG_DOUBLE *estimated_season = callocz(nb_computations, sizeof(LONG_DOUBLE)); - - int ret = __HoltWinters( - series, - (int)entries, - alpha, - beta, - gamma, - &seasonal, - &period, - &a0, - &b0, - s, - &errors, - estimated_level, - estimated_trend, - estimated_season - ); - - LONG_DOUBLE value = estimated_level[nb_computations - 1]; - - if(forecast) - *forecast = 0.0; - - freez(estimated_level); - freez(estimated_trend); - freez(estimated_season); - - if(!ret) - return 0.0; - - return value; -} diff --git a/src/statistical.h b/src/statistical.h deleted file mode 100644 index 675389021..000000000 --- a/src/statistical.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef NETDATA_STATISTICAL_H -#define NETDATA_STATISTICAL_H - -extern LONG_DOUBLE average(const LONG_DOUBLE *series, size_t entries); -extern LONG_DOUBLE moving_average(const LONG_DOUBLE *series, size_t entries, size_t period); -extern LONG_DOUBLE median(const LONG_DOUBLE *series, size_t entries); -extern LONG_DOUBLE moving_median(const LONG_DOUBLE *series, size_t entries, size_t period); -extern LONG_DOUBLE running_median_estimate(const LONG_DOUBLE *series, size_t entries); -extern LONG_DOUBLE standard_deviation(const LONG_DOUBLE *series, size_t entries); -extern LONG_DOUBLE single_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha); -extern LONG_DOUBLE double_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE *forecast); -extern LONG_DOUBLE holtwinters(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE gamma, LONG_DOUBLE *forecast); -extern LONG_DOUBLE sum_and_count(const LONG_DOUBLE *series, size_t entries, size_t *count); -extern LONG_DOUBLE sum(const LONG_DOUBLE *series, size_t entries); -extern LONG_DOUBLE median_on_sorted_series(const LONG_DOUBLE *series, size_t entries); -extern LONG_DOUBLE *copy_series(const LONG_DOUBLE *series, size_t entries); -extern void sort_series(LONG_DOUBLE *series, size_t entries); - -#endif //NETDATA_STATISTICAL_H diff --git a/src/statsd.c b/src/statsd.c deleted file mode 100644 index 44ebd8894..000000000 --- a/src/statsd.c +++ /dev/null @@ -1,2499 +0,0 @@ -#include "common.h" - -#define STATSD_CHART_PREFIX "statsd" -#define STATSD_CHART_PRIORITY 90000 - -// -------------------------------------------------------------------------------------- - -// #define STATSD_MULTITHREADED 1 - -#ifdef STATSD_MULTITHREADED -// DO NOT ENABLE MULTITHREADING - IT IS NOT WELL TESTED -#define STATSD_AVL_TREE avl_tree_lock -#define STATSD_AVL_INSERT avl_insert_lock -#define STATSD_AVL_SEARCH avl_search_lock -#define STATSD_AVL_INDEX_INIT { .avl_tree = { NULL, statsd_metric_compare }, .rwlock = AVL_LOCK_INITIALIZER } -#define STATSD_FIRST_PTR_MUTEX netdata_mutex_t first_mutex -#define STATSD_FIRST_PTR_MUTEX_INIT .first_mutex = NETDATA_MUTEX_INITIALIZER -#define STATSD_FIRST_PTR_MUTEX_LOCK(index) netdata_mutex_lock(&((index)->first_mutex)) -#define STATSD_FIRST_PTR_MUTEX_UNLOCK(index) netdata_mutex_unlock(&((index)->first_mutex)) -#define STATSD_DICTIONARY_OPTIONS DICTIONARY_FLAG_DEFAULT -#else -#define STATSD_AVL_TREE avl_tree -#define STATSD_AVL_INSERT avl_insert -#define STATSD_AVL_SEARCH avl_search -#define STATSD_AVL_INDEX_INIT { .root = NULL, .compar = statsd_metric_compare } -#define STATSD_FIRST_PTR_MUTEX -#define STATSD_FIRST_PTR_MUTEX_INIT -#define STATSD_FIRST_PTR_MUTEX_LOCK(index) -#define STATSD_FIRST_PTR_MUTEX_UNLOCK(index) -#define STATSD_DICTIONARY_OPTIONS DICTIONARY_FLAG_SINGLE_THREADED -#endif - -#define STATSD_DECIMAL_DETAIL 1000 // floating point values get multiplied by this, with the same divisor - -// -------------------------------------------------------------------------------------------------------------------- -// data specific to each metric type - -typedef struct statsd_metric_gauge { - LONG_DOUBLE value; -} STATSD_METRIC_GAUGE; - -typedef struct statsd_metric_counter { // counter and meter - long long value; -} STATSD_METRIC_COUNTER; - -typedef struct statsd_histogram_extensions { - netdata_mutex_t mutex; - - // average is stored in metric->last - collected_number last_min; - collected_number last_max; - collected_number last_percentile; - collected_number last_median; - collected_number last_stddev; - collected_number last_sum; - - int zeroed; - - RRDDIM *rd_min; - RRDDIM *rd_max; - RRDDIM *rd_percentile; - RRDDIM *rd_median; - RRDDIM *rd_stddev; - RRDDIM *rd_sum; - - size_t size; - size_t used; - LONG_DOUBLE *values; // dynamic array of values collected -} STATSD_METRIC_HISTOGRAM_EXTENSIONS; - -typedef struct statsd_metric_histogram { // histogram and timer - STATSD_METRIC_HISTOGRAM_EXTENSIONS *ext; -} STATSD_METRIC_HISTOGRAM; - -typedef struct statsd_metric_set { - DICTIONARY *dict; - size_t unique; -} STATSD_METRIC_SET; - - -// -------------------------------------------------------------------------------------------------------------------- -// this is a metric - for all types of metrics - -typedef enum statsd_metric_options { - STATSD_METRIC_OPTION_NONE = 0x00000000, // no options set - STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED = 0x00000001, // do not update the chart dimension, when this metric is not collected - STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED = 0x00000002, // render a private chart for this metric - STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED = 0x00000004, // the metric has been checked if it should get private chart or not - STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT = 0x00000008, // show the count of events for this private chart - STATSD_METRIC_OPTION_CHECKED_IN_APPS = 0x00000010, // set when this metric has been checked agains apps -} STATS_METRIC_OPTIONS; - -typedef enum statsd_metric_type { - STATSD_METRIC_TYPE_GAUGE, - STATSD_METRIC_TYPE_COUNTER, - STATSD_METRIC_TYPE_METER, - STATSD_METRIC_TYPE_TIMER, - STATSD_METRIC_TYPE_HISTOGRAM, - STATSD_METRIC_TYPE_SET -} STATSD_METRIC_TYPE; - - -typedef struct statsd_metric { - avl avl; // indexing - - const char *name; // the name of the metric - uint32_t hash; // hash of the name - - STATSD_METRIC_TYPE type; - - // metadata about data collection - collected_number events; // the number of times this metric has been collected (never resets) - size_t count; // the number of times this metric has been collected since the last flush - - // the actual collected data - union { - STATSD_METRIC_GAUGE gauge; - STATSD_METRIC_COUNTER counter; - STATSD_METRIC_HISTOGRAM histogram; - STATSD_METRIC_SET set; - }; - - // chart related members - STATS_METRIC_OPTIONS options; // STATSD_METRIC_OPTION_* (bitfield) - char reset; // set to 1 to reset this metric to zero - collected_number last; // the last value sent to netdata - RRDSET *st; // the chart of this metric - RRDDIM *rd_value; // the dimension of this metric value - RRDDIM *rd_count; // the dimension for the number of events received - - // linking, used for walking through all metrics - struct statsd_metric *next; -} STATSD_METRIC; - - -// -------------------------------------------------------------------------------------------------------------------- -// each type of metric has its own index - -typedef struct statsd_index { - char *name; // the name of the index of metrics - size_t events; // the number of events processed for this index - size_t metrics; // the number of metrics in this index - - STATSD_AVL_TREE index; // the AVL tree - - STATSD_METRIC *first; // the linked list of metrics (new metrics are added in front) - STATSD_FIRST_PTR_MUTEX; // when mutli-threading is enabled, a lock to protect the linked list - - STATS_METRIC_OPTIONS default_options; // default options for all metrics in this index -} STATSD_INDEX; - -static int statsd_metric_compare(void* a, void* b); - -// -------------------------------------------------------------------------------------------------------------------- -// synthetic charts - -typedef enum statsd_app_chart_dimension_value_type { - STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS, - STATSD_APP_CHART_DIM_VALUE_TYPE_LAST, - STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE, - STATSD_APP_CHART_DIM_VALUE_TYPE_SUM, - STATSD_APP_CHART_DIM_VALUE_TYPE_MIN, - STATSD_APP_CHART_DIM_VALUE_TYPE_MAX, - STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE, - STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN, - STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV -} STATSD_APP_CHART_DIM_VALUE_TYPE; - -typedef struct statsd_app_chart_dimension { - const char *name; // the name of this dimension - const char *metric; // the source metric name of this dimension - uint32_t metric_hash; // hash for fast string comparisons - - SIMPLE_PATTERN *metric_pattern; // set when the 'metric' is a simple pattern - - collected_number multiplier; // the multipler of the dimension - collected_number divisor; // the divisor of the dimension - RRDDIM_FLAGS flags; // the RRDDIM flags for this dimension - - STATSD_APP_CHART_DIM_VALUE_TYPE value_type; // which value to use of the source metric - - RRDDIM *rd; // a pointer to the RRDDIM that has been created for this dimension - collected_number *value_ptr; // a pointer to the source metric value - RRD_ALGORITHM algorithm; // the algorithm of this dimension - - struct statsd_app_chart_dimension *next; // the next dimension for this chart -} STATSD_APP_CHART_DIM; - -typedef struct statsd_app_chart { - const char *source; - const char *id; - const char *name; - const char *title; - const char *family; - const char *context; - const char *units; - long priority; - RRDSET_TYPE chart_type; - STATSD_APP_CHART_DIM *dimensions; - size_t dimensions_count; - size_t dimensions_linked_count; - - RRDSET *st; - struct statsd_app_chart *next; -} STATSD_APP_CHART; - -typedef struct statsd_app { - const char *name; - SIMPLE_PATTERN *metrics; - STATS_METRIC_OPTIONS default_options; - RRD_MEMORY_MODE rrd_memory_mode; - DICTIONARY *dict; - long rrd_history_entries; - - const char *source; - STATSD_APP_CHART *charts; - struct statsd_app *next; -} STATSD_APP; - -// -------------------------------------------------------------------------------------------------------------------- -// global statsd data - -struct collection_thread_status { - int status; - size_t max_sockets; - - netdata_thread_t thread; - struct rusage rusage; - RRDSET *st_cpu; - RRDDIM *rd_user; - RRDDIM *rd_system; -}; - -static struct statsd { - STATSD_INDEX gauges; - STATSD_INDEX counters; - STATSD_INDEX timers; - STATSD_INDEX histograms; - STATSD_INDEX meters; - STATSD_INDEX sets; - size_t unknown_types; - size_t socket_errors; - size_t tcp_socket_connects; - size_t tcp_socket_disconnects; - size_t tcp_socket_connected; - size_t tcp_socket_reads; - size_t tcp_packets_received; - size_t tcp_bytes_read; - size_t udp_socket_reads; - size_t udp_packets_received; - size_t udp_bytes_read; - - int enabled; - int update_every; - SIMPLE_PATTERN *charts_for; - - size_t tcp_idle_timeout; - size_t decimal_detail; - size_t private_charts; - size_t max_private_charts; - size_t max_private_charts_hard; - RRD_MEMORY_MODE private_charts_memory_mode; - long private_charts_rrd_history_entries; - int private_charts_hidden; - - STATSD_APP *apps; - size_t recvmmsg_size; - size_t histogram_increase_step; - double histogram_percentile; - char *histogram_percentile_str; - - int threads; - struct collection_thread_status *collection_threads_status; - - LISTEN_SOCKETS sockets; -} statsd = { - .enabled = 1, - .max_private_charts = 200, - .max_private_charts_hard = 1000, - .private_charts_hidden = 0, - .recvmmsg_size = 10, - .decimal_detail = STATSD_DECIMAL_DETAIL, - - .gauges = { - .name = "gauge", - .events = 0, - .metrics = 0, - .index = STATSD_AVL_INDEX_INIT, - .default_options = STATSD_METRIC_OPTION_NONE, - .first = NULL, - STATSD_FIRST_PTR_MUTEX_INIT - }, - .counters = { - .name = "counter", - .events = 0, - .metrics = 0, - .index = STATSD_AVL_INDEX_INIT, - .default_options = STATSD_METRIC_OPTION_NONE, - .first = NULL, - STATSD_FIRST_PTR_MUTEX_INIT - }, - .timers = { - .name = "timer", - .events = 0, - .metrics = 0, - .index = STATSD_AVL_INDEX_INIT, - .default_options = STATSD_METRIC_OPTION_NONE, - .first = NULL, - STATSD_FIRST_PTR_MUTEX_INIT - }, - .histograms = { - .name = "histogram", - .events = 0, - .metrics = 0, - .index = STATSD_AVL_INDEX_INIT, - .default_options = STATSD_METRIC_OPTION_NONE, - .first = NULL, - STATSD_FIRST_PTR_MUTEX_INIT - }, - .meters = { - .name = "meter", - .events = 0, - .metrics = 0, - .index = STATSD_AVL_INDEX_INIT, - .default_options = STATSD_METRIC_OPTION_NONE, - .first = NULL, - STATSD_FIRST_PTR_MUTEX_INIT - }, - .sets = { - .name = "set", - .events = 0, - .metrics = 0, - .index = STATSD_AVL_INDEX_INIT, - .default_options = STATSD_METRIC_OPTION_NONE, - .first = NULL, - STATSD_FIRST_PTR_MUTEX_INIT - }, - - .tcp_idle_timeout = 600, - - .apps = NULL, - .histogram_percentile = 95.0, - .histogram_increase_step = 10, - .threads = 0, - .collection_threads_status = NULL, - .sockets = { - .config_section = CONFIG_SECTION_STATSD, - .default_bind_to = "udp:localhost tcp:localhost", - .default_port = STATSD_LISTEN_PORT, - .backlog = STATSD_LISTEN_BACKLOG - }, -}; - - -// -------------------------------------------------------------------------------------------------------------------- -// statsd index management - add/find metrics - -static int statsd_metric_compare(void* a, void* b) { - if(((STATSD_METRIC *)a)->hash < ((STATSD_METRIC *)b)->hash) return -1; - else if(((STATSD_METRIC *)a)->hash > ((STATSD_METRIC *)b)->hash) return 1; - else return strcmp(((STATSD_METRIC *)a)->name, ((STATSD_METRIC *)b)->name); -} - -static inline STATSD_METRIC *statsd_metric_index_find(STATSD_INDEX *index, const char *name, uint32_t hash) { - STATSD_METRIC tmp; - tmp.name = name; - tmp.hash = (hash)?hash:simple_hash(tmp.name); - - return (STATSD_METRIC *)STATSD_AVL_SEARCH(&index->index, (avl *)&tmp); -} - -static inline STATSD_METRIC *statsd_find_or_add_metric(STATSD_INDEX *index, const char *name, STATSD_METRIC_TYPE type) { - debug(D_STATSD, "searching for metric '%s' under '%s'", name, index->name); - - uint32_t hash = simple_hash(name); - - STATSD_METRIC *m = statsd_metric_index_find(index, name, hash); - if(unlikely(!m)) { - debug(D_STATSD, "Creating new %s metric '%s'", index->name, name); - - m = (STATSD_METRIC *)callocz(sizeof(STATSD_METRIC), 1); - m->name = strdupz(name); - m->hash = hash; - m->type = type; - m->options = index->default_options; - - if(type == STATSD_METRIC_TYPE_HISTOGRAM || type == STATSD_METRIC_TYPE_TIMER) { - m->histogram.ext = callocz(sizeof(STATSD_METRIC_HISTOGRAM_EXTENSIONS), 1); - netdata_mutex_init(&m->histogram.ext->mutex); - } - STATSD_METRIC *n = (STATSD_METRIC *)STATSD_AVL_INSERT(&index->index, (avl *)m); - if(unlikely(n != m)) { - freez((void *)m->histogram.ext); - freez((void *)m->name); - freez((void *)m); - m = n; - } - else { - STATSD_FIRST_PTR_MUTEX_LOCK(index); - index->metrics++; - m->next = index->first; - index->first = m; - STATSD_FIRST_PTR_MUTEX_UNLOCK(index); - } - } - - index->events++; - return m; -} - - -// -------------------------------------------------------------------------------------------------------------------- -// statsd parsing numbers - -static inline LONG_DOUBLE statsd_parse_float(const char *v, LONG_DOUBLE def) { - LONG_DOUBLE value; - - if(likely(v && *v)) { - char *e = NULL; - value = str2ld(v, &e); - if(unlikely(e && *e)) - error("STATSD: excess data '%s' after value '%s'", e, v); - } - else - value = def; - - return value; -} - -static inline long long statsd_parse_int(const char *v, long long def) { - long long value; - - if(likely(v && *v)) { - char *e = NULL; - value = str2ll(v, &e); - if(unlikely(e && *e)) - error("STATSD: excess data '%s' after value '%s'", e, v); - } - else - value = def; - - return value; -} - - -// -------------------------------------------------------------------------------------------------------------------- -// statsd processors per metric type - -static inline void statsd_reset_metric(STATSD_METRIC *m) { - m->reset = 0; - m->count = 0; -} - -static inline int value_is_zinit(const char *value) { - return (value && *value == 'z' && *++value == 'i' && *++value == 'n' && *++value == 'i' && *++value == 't' && *++value == '\0'); -} - -static inline void statsd_process_gauge(STATSD_METRIC *m, const char *value, const char *sampling) { - if(unlikely(!value || !*value)) { - error("STATSD: metric '%s' of type gauge, with empty value is ignored.", m->name); - return; - } - - if(unlikely(m->reset)) { - // no need to reset anything specific for gauges - statsd_reset_metric(m); - } - - if(unlikely(value_is_zinit(value))) { - // magic loading of metric, without affecting anything - } - else { - if (unlikely(*value == '+' || *value == '-')) - m->gauge.value += statsd_parse_float(value, 1.0) / statsd_parse_float(sampling, 1.0); - else - m->gauge.value = statsd_parse_float(value, 1.0) / statsd_parse_float(sampling, 1.0); - - m->events++; - m->count++; - } -} - -static inline void statsd_process_counter(STATSD_METRIC *m, const char *value, const char *sampling) { - // we accept empty values for counters - - if(unlikely(m->reset)) statsd_reset_metric(m); - - if(unlikely(value_is_zinit(value))) { - // magic loading of metric, without affecting anything - } - else { - m->counter.value += llrintl((LONG_DOUBLE) statsd_parse_int(value, 1) / statsd_parse_float(sampling, 1.0)); - - m->events++; - m->count++; - } -} - -static inline void statsd_process_meter(STATSD_METRIC *m, const char *value, const char *sampling) { - // this is the same with the counter - statsd_process_counter(m, value, sampling); -} - -static inline void statsd_process_histogram(STATSD_METRIC *m, const char *value, const char *sampling) { - if(unlikely(!value || !*value)) { - error("STATSD: metric '%s' of type histogram, with empty value is ignored.", m->name); - return; - } - - if(unlikely(m->reset)) { - m->histogram.ext->used = 0; - statsd_reset_metric(m); - } - - if(unlikely(value_is_zinit(value))) { - // magic loading of metric, without affecting anything - } - else { - if (unlikely(m->histogram.ext->used == m->histogram.ext->size)) { - netdata_mutex_lock(&m->histogram.ext->mutex); - m->histogram.ext->size += statsd.histogram_increase_step; - m->histogram.ext->values = reallocz(m->histogram.ext->values, sizeof(LONG_DOUBLE) * m->histogram.ext->size); - netdata_mutex_unlock(&m->histogram.ext->mutex); - } - - m->histogram.ext->values[m->histogram.ext->used++] = statsd_parse_float(value, 1.0) / statsd_parse_float(sampling, 1.0); - - m->events++; - m->count++; - } -} - -static inline void statsd_process_timer(STATSD_METRIC *m, const char *value, const char *sampling) { - if(unlikely(!value || !*value)) { - error("STATSD: metric of type timer, with empty value is ignored."); - return; - } - - // timers are a use case of histogram - statsd_process_histogram(m, value, sampling); -} - -static inline void statsd_process_set(STATSD_METRIC *m, const char *value) { - if(unlikely(!value || !*value)) { - error("STATSD: metric of type set, with empty value is ignored."); - return; - } - - if(unlikely(m->reset)) { - if(likely(m->set.dict)) { - dictionary_destroy(m->set.dict); - m->set.dict = NULL; - } - statsd_reset_metric(m); - } - - if (unlikely(!m->set.dict)) { - m->set.dict = dictionary_create(STATSD_DICTIONARY_OPTIONS | DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE); - m->set.unique = 0; - } - - if(unlikely(value_is_zinit(value))) { - // magic loading of metric, without affecting anything - } - else { - void *t = dictionary_get(m->set.dict, value); - if (unlikely(!t)) { - dictionary_set(m->set.dict, value, NULL, 1); - m->set.unique++; - } - - m->events++; - m->count++; - } -} - - -// -------------------------------------------------------------------------------------------------------------------- -// statsd parsing - -static void statsd_process_metric(const char *name, const char *value, const char *type, const char *sampling) { - debug(D_STATSD, "STATSD: raw metric '%s', value '%s', type '%s', rate '%s'", name?name:"(null)", value?value:"(null)", type?type:"(null)", sampling?sampling:"(null)"); - - if(unlikely(!name || !*name)) return; - if(unlikely(!type || !*type)) type = "m"; - - char t0 = type[0], t1 = type[1]; - - if(unlikely(t0 == 'g' && t1 == '\0')) { - statsd_process_gauge( - statsd_find_or_add_metric(&statsd.gauges, name, STATSD_METRIC_TYPE_GAUGE), - value, sampling); - } - else if(unlikely((t0 == 'c' || t0 == 'C') && t1 == '\0')) { - // etsy/statsd uses 'c' - // brubeck uses 'C' - statsd_process_counter( - statsd_find_or_add_metric(&statsd.counters, name, STATSD_METRIC_TYPE_COUNTER), - value, sampling); - } - else if(unlikely(t0 == 'm' && t1 == '\0')) { - statsd_process_meter( - statsd_find_or_add_metric(&statsd.meters, name, STATSD_METRIC_TYPE_METER), - value, sampling); - } - else if(unlikely(t0 == 'h' && t1 == '\0')) { - statsd_process_histogram( - statsd_find_or_add_metric(&statsd.histograms, name, STATSD_METRIC_TYPE_HISTOGRAM), - value, sampling); - } - else if(unlikely(t0 == 's' && t1 == '\0')) { - statsd_process_set( - statsd_find_or_add_metric(&statsd.sets, name, STATSD_METRIC_TYPE_SET), - value); - } - else if(unlikely(t0 == 'm' && t1 == 's' && type[2] == '\0')) { - statsd_process_timer( - statsd_find_or_add_metric(&statsd.timers, name, STATSD_METRIC_TYPE_TIMER), - value, sampling); - } - else { - statsd.unknown_types++; - error("STATSD: metric '%s' with value '%s' is sent with unknown metric type '%s'", name, value?value:"", type); - } -} - -static inline const char *statsd_parse_skip_up_to(const char *s, char d1, char d2) { - char c; - - for(c = *s; c && c != d1 && c != d2 && c != '\r' && c != '\n'; c = *++s) ; - - return s; -} - -const char *statsd_parse_skip_spaces(const char *s) { - char c; - - for(c = *s; c && ( c == ' ' || c == '\t' || c == '\r' || c == '\n' ); c = *++s) ; - - return s; -} - -static inline const char *statsd_parse_field_trim(const char *start, char *end) { - if(unlikely(!start)) { - start = end; - return start; - } - - while(start <= end && (*start == ' ' || *start == '\t')) - start++; - - *end = '\0'; - end--; - while(end >= start && (*end == ' ' || *end == '\t')) - *end-- = '\0'; - - return start; -} - -static inline size_t statsd_process(char *buffer, size_t size, int require_newlines) { - buffer[size] = '\0'; - debug(D_STATSD, "RECEIVED: %zu bytes: '%s'", size, buffer); - - const char *s = buffer; - while(*s) { - const char *name = NULL, *value = NULL, *type = NULL, *sampling = NULL; - char *name_end = NULL, *value_end = NULL, *type_end = NULL, *sampling_end = NULL; - - s = name_end = (char *)statsd_parse_skip_up_to(name = s, ':', '|'); - if(name == name_end) { - s = statsd_parse_skip_spaces(s); - continue; - } - - if(likely(*s == ':')) - s = value_end = (char *) statsd_parse_skip_up_to(value = ++s, '|', '|'); - - if(likely(*s == '|')) - s = type_end = (char *) statsd_parse_skip_up_to(type = ++s, '|', '@'); - - if(likely(*s == '|' || *s == '@')) { - s = sampling_end = (char *) statsd_parse_skip_up_to(sampling = ++s, '\r', '\n'); - if(*sampling == '@') sampling++; - } - - // skip everything until the end of the line - while(*s && *s != '\n') s++; - - if(unlikely(require_newlines && *s != '\n' && s > buffer)) { - // move the remaining data to the beginning - size -= (name - buffer); - memmove(buffer, name, size); - return size; - } - else - s = statsd_parse_skip_spaces(s); - - statsd_process_metric( - statsd_parse_field_trim(name, name_end) - , statsd_parse_field_trim(value, value_end) - , statsd_parse_field_trim(type, type_end) - , statsd_parse_field_trim(sampling, sampling_end) - ); - } - - return 0; -} - - -// -------------------------------------------------------------------------------------------------------------------- -// statsd pollfd interface - -#define STATSD_TCP_BUFFER_SIZE 65536 // minimize tcp reads -#define STATSD_UDP_BUFFER_SIZE 9000 // this should be up to MTU - -typedef enum { - STATSD_SOCKET_DATA_TYPE_TCP, - STATSD_SOCKET_DATA_TYPE_UDP -} STATSD_SOCKET_DATA_TYPE; - -struct statsd_tcp { - STATSD_SOCKET_DATA_TYPE type; - size_t size; - size_t len; - char buffer[]; -}; - -#ifdef HAVE_RECVMMSG -struct statsd_udp { - int *running; - STATSD_SOCKET_DATA_TYPE type; - size_t size; - struct iovec *iovecs; - struct mmsghdr *msgs; -}; -#else -struct statsd_udp { - int *running; - STATSD_SOCKET_DATA_TYPE type; - char buffer[STATSD_UDP_BUFFER_SIZE]; -}; -#endif - -// new TCP client connected -static void *statsd_add_callback(POLLINFO *pi, short int *events, void *data) { - (void)pi; - (void)data; - - *events = POLLIN; - - struct statsd_tcp *t = (struct statsd_tcp *)callocz(sizeof(struct statsd_tcp) + STATSD_TCP_BUFFER_SIZE, 1); - t->type = STATSD_SOCKET_DATA_TYPE_TCP; - t->size = STATSD_TCP_BUFFER_SIZE - 1; - statsd.tcp_socket_connects++; - statsd.tcp_socket_connected++; - - return t; -} - -// TCP client disconnected -static void statsd_del_callback(POLLINFO *pi) { - struct statsd_tcp *t = pi->data; - - if(likely(t)) { - if(t->type == STATSD_SOCKET_DATA_TYPE_TCP) { - if(t->len != 0) { - statsd.socket_errors++; - error("STATSD: client is probably sending unterminated metrics. Closed socket left with '%s'. Trying to process it.", t->buffer); - statsd_process(t->buffer, t->len, 0); - } - statsd.tcp_socket_disconnects++; - statsd.tcp_socket_connected--; - } - else - error("STATSD: internal error: received socket data type is %d, but expected %d", (int)t->type, (int)STATSD_SOCKET_DATA_TYPE_TCP); - - freez(t); - } -} - -// Receive data -static int statsd_rcv_callback(POLLINFO *pi, short int *events) { - *events = POLLIN; - - int fd = pi->fd; - - switch(pi->socktype) { - case SOCK_STREAM: { - struct statsd_tcp *d = (struct statsd_tcp *)pi->data; - if(unlikely(!d)) { - error("STATSD: internal error: expected TCP data pointer is NULL"); - statsd.socket_errors++; - return -1; - } - -#ifdef NETDATA_INTERNAL_CHECKS - if(unlikely(d->type != STATSD_SOCKET_DATA_TYPE_TCP)) { - error("STATSD: internal error: socket data type should be %d, but it is %d", (int)STATSD_SOCKET_DATA_TYPE_TCP, (int)d->type); - statsd.socket_errors++; - return -1; - } -#endif - - int ret = 0; - ssize_t rc; - do { - rc = recv(fd, &d->buffer[d->len], d->size - d->len, MSG_DONTWAIT); - if (rc < 0) { - // read failed - if (errno != EWOULDBLOCK && errno != EAGAIN && errno != EINTR) { - error("STATSD: recv() on TCP socket %d failed.", fd); - statsd.socket_errors++; - ret = -1; - } - } - else if (!rc) { - // connection closed - debug(D_STATSD, "STATSD: client disconnected."); - ret = -1; - } - else { - // data received - d->len += rc; - statsd.tcp_socket_reads++; - statsd.tcp_bytes_read += rc; - } - - if(likely(d->len > 0)) { - statsd.tcp_packets_received++; - d->len = statsd_process(d->buffer, d->len, 1); - } - - if(unlikely(ret == -1)) - return -1; - - } while (rc != -1); - break; - } - - case SOCK_DGRAM: { - struct statsd_udp *d = (struct statsd_udp *)pi->data; - if(unlikely(!d)) { - error("STATSD: internal error: expected UDP data pointer is NULL"); - statsd.socket_errors++; - return -1; - } - -#ifdef NETDATA_INTERNAL_CHECKS - if(unlikely(d->type != STATSD_SOCKET_DATA_TYPE_UDP)) { - error("STATSD: internal error: socket data should be %d, but it is %d", (int)d->type, (int)STATSD_SOCKET_DATA_TYPE_UDP); - statsd.socket_errors++; - return -1; - } -#endif - -#ifdef HAVE_RECVMMSG - ssize_t rc; - do { - rc = recvmmsg(fd, d->msgs, (unsigned int)d->size, MSG_DONTWAIT, NULL); - if (rc < 0) { - // read failed - if (errno != EWOULDBLOCK && errno != EAGAIN && errno != EINTR) { - error("STATSD: recvmmsg() on UDP socket %d failed.", fd); - statsd.socket_errors++; - return -1; - } - } else if (rc) { - // data received - statsd.udp_socket_reads++; - statsd.udp_packets_received += rc; - - size_t i; - for (i = 0; i < (size_t)rc; ++i) { - size_t len = (size_t)d->msgs[i].msg_len; - statsd.udp_bytes_read += len; - statsd_process(d->msgs[i].msg_hdr.msg_iov->iov_base, len, 0); - } - } - } while (rc != -1); - -#else // !HAVE_RECVMMSG - ssize_t rc; - do { - rc = recv(fd, d->buffer, STATSD_UDP_BUFFER_SIZE - 1, MSG_DONTWAIT); - if (rc < 0) { - // read failed - if (errno != EWOULDBLOCK && errno != EAGAIN && errno != EINTR) { - error("STATSD: recv() on UDP socket %d failed.", fd); - statsd.socket_errors++; - return -1; - } - } else if (rc) { - // data received - statsd.udp_socket_reads++; - statsd.udp_packets_received++; - statsd.udp_bytes_read += rc; - statsd_process(d->buffer, (size_t) rc, 0); - } - } while (rc != -1); -#endif - - break; - } - - default: { - error("STATSD: internal error: unknown socktype %d on socket %d", pi->socktype, fd); - statsd.socket_errors++; - return -1; - } - } - - return 0; -} - -static int statsd_snd_callback(POLLINFO *pi, short int *events) { - (void)pi; - (void)events; - - error("STATSD: snd_callback() called, but we never requested to send data to statsd clients."); - return -1; -} - -static void statsd_timer_callback(void *timer_data) { - struct collection_thread_status *status = timer_data; - getrusage(RUSAGE_THREAD, &status->rusage); -} - -// -------------------------------------------------------------------------------------------------------------------- -// statsd child thread to collect metrics from network - -void statsd_collector_thread_cleanup(void *data) { - struct statsd_udp *d = data; - *d->running = 0; - - info("cleaning up..."); - -#ifdef HAVE_RECVMMSG - size_t i; - for (i = 0; i < d->size; i++) - freez(d->iovecs[i].iov_base); - - freez(d->iovecs); - freez(d->msgs); -#endif - - freez(d); -} - -void *statsd_collector_thread(void *ptr) { - struct collection_thread_status *status = ptr; - status->status = 1; - - info("STATSD collector thread started with taskid %d", gettid()); - - struct statsd_udp *d = callocz(sizeof(struct statsd_udp), 1); - d->running = &status->status; - - netdata_thread_cleanup_push(statsd_collector_thread_cleanup, d); - -#ifdef HAVE_RECVMMSG - d->type = STATSD_SOCKET_DATA_TYPE_UDP; - d->size = statsd.recvmmsg_size; - d->iovecs = callocz(sizeof(struct iovec), d->size); - d->msgs = callocz(sizeof(struct mmsghdr), d->size); - - size_t i; - for (i = 0; i < d->size; i++) { - d->iovecs[i].iov_base = mallocz(STATSD_UDP_BUFFER_SIZE); - d->iovecs[i].iov_len = STATSD_UDP_BUFFER_SIZE - 1; - d->msgs[i].msg_hdr.msg_iov = &d->iovecs[i]; - d->msgs[i].msg_hdr.msg_iovlen = 1; - } -#endif - - poll_events(&statsd.sockets - , statsd_add_callback - , statsd_del_callback - , statsd_rcv_callback - , statsd_snd_callback - , statsd_timer_callback - , NULL - , (void *)d - , 0 // tcp request timeout, 0 = disabled - , statsd.tcp_idle_timeout // tcp idle timeout, 0 = disabled - , statsd.update_every * 1000 - , ptr // timer_data - , status->max_sockets - ); - - netdata_thread_cleanup_pop(1); - return NULL; -} - - -// -------------------------------------------------------------------------------------------------------------------- -// statsd applications configuration files parsing - -#define STATSD_CONF_LINE_MAX 8192 - -static STATSD_APP_CHART_DIM_VALUE_TYPE string2valuetype(const char *type, size_t line, const char *path, const char *filename) { - if(!type || !*type) type = "last"; - - if(!strcmp(type, "events")) return STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS; - else if(!strcmp(type, "last")) return STATSD_APP_CHART_DIM_VALUE_TYPE_LAST; - else if(!strcmp(type, "min")) return STATSD_APP_CHART_DIM_VALUE_TYPE_MIN; - else if(!strcmp(type, "max")) return STATSD_APP_CHART_DIM_VALUE_TYPE_MAX; - else if(!strcmp(type, "sum")) return STATSD_APP_CHART_DIM_VALUE_TYPE_SUM; - else if(!strcmp(type, "average")) return STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE; - else if(!strcmp(type, "median")) return STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN; - else if(!strcmp(type, "stddev")) return STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV; - else if(!strcmp(type, "percentile")) return STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE; - - error("STATSD: invalid type '%s' at line %zu of file '%s/%s'. Using 'last'.", type, line, path, filename); - return STATSD_APP_CHART_DIM_VALUE_TYPE_LAST; -} - -static const char *valuetype2string(STATSD_APP_CHART_DIM_VALUE_TYPE type) { - switch(type) { - case STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS: return "events"; - case STATSD_APP_CHART_DIM_VALUE_TYPE_LAST: return "last"; - case STATSD_APP_CHART_DIM_VALUE_TYPE_MIN: return "min"; - case STATSD_APP_CHART_DIM_VALUE_TYPE_MAX: return "max"; - case STATSD_APP_CHART_DIM_VALUE_TYPE_SUM: return "sum"; - case STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE: return "average"; - case STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN: return "median"; - case STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV: return "stddev"; - case STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE: return "percentile"; - } - - return "unknown"; -} - -static STATSD_APP_CHART_DIM *add_dimension_to_app_chart( - STATSD_APP *app - , STATSD_APP_CHART *chart - , const char *metric_name - , const char *dim_name - , collected_number multiplier - , collected_number divisor - , RRDDIM_FLAGS flags - , STATSD_APP_CHART_DIM_VALUE_TYPE value_type -) { - STATSD_APP_CHART_DIM *dim = callocz(sizeof(STATSD_APP_CHART_DIM), 1); - - dim->metric = strdupz(metric_name); - dim->metric_hash = simple_hash(dim->metric); - - dim->name = strdupz((dim_name)?dim_name:""); - dim->multiplier = multiplier; - dim->divisor = divisor; - dim->value_type = value_type; - dim->flags = flags; - - if(!dim->multiplier) - dim->multiplier = 1; - - if(!dim->divisor) - dim->divisor = 1; - - // append it to the list of dimension - STATSD_APP_CHART_DIM *tdim; - for(tdim = chart->dimensions; tdim && tdim->next ; tdim = tdim->next) ; - if(!tdim) { - dim->next = chart->dimensions; - chart->dimensions = dim; - } - else { - dim->next = tdim->next; - tdim->next = dim; - } - chart->dimensions_count++; - - debug(D_STATSD, "Added dimension '%s' to chart '%s' of app '%s', for metric '%s', with type %u, multiplier " COLLECTED_NUMBER_FORMAT ", divisor " COLLECTED_NUMBER_FORMAT, - dim->name, chart->id, app->name, dim->metric, dim->value_type, dim->multiplier, dim->divisor); - - return dim; -} - -static int statsd_readfile(const char *path, const char *filename, STATSD_APP *app, STATSD_APP_CHART *chart, DICTIONARY *dict) { - debug(D_STATSD, "STATSD configuration reading file '%s/%s'", path, filename); - - char *buffer = mallocz(STATSD_CONF_LINE_MAX + 1); - - if(filename[0] == '/') - strncpyz(buffer, filename, STATSD_CONF_LINE_MAX); - else - snprintfz(buffer, STATSD_CONF_LINE_MAX, "%s/%s", path, filename); - - FILE *fp = fopen(buffer, "r"); - if(!fp) { - error("STATSD: cannot open file '%s'.", buffer); - freez(buffer); - return -1; - } - - size_t line = 0; - char *s; - while(fgets(buffer, STATSD_CONF_LINE_MAX, fp) != NULL) { - buffer[STATSD_CONF_LINE_MAX] = '\0'; - line++; - - s = trim(buffer); - if (!s || *s == '#') { - debug(D_STATSD, "STATSD: ignoring line %zu of file '%s/%s', it is empty.", line, path, filename); - continue; - } - - debug(D_STATSD, "STATSD: processing line %zu of file '%s/%s': %s", line, path, filename, buffer); - - if(*s == 'i' && strncmp(s, "include", 7) == 0) { - s = trim(&s[7]); - if(s && *s) - statsd_readfile(path, s, app, chart, dict); - else - error("STATSD: ignoring line %zu of file '%s/%s', include filename is empty", line, path, s); - - continue; - } - - int len = (int) strlen(s); - if (*s == '[' && s[len - 1] == ']') { - // new section - s[len - 1] = '\0'; - s++; - - if (!strcmp(s, "app")) { - // a new app - app = callocz(sizeof(STATSD_APP), 1); - app->name = strdupz("unnamed"); - app->rrd_memory_mode = localhost->rrd_memory_mode; - app->rrd_history_entries = localhost->rrd_history_entries; - - app->next = statsd.apps; - statsd.apps = app; - chart = NULL; - dict = NULL; - - { - char lineandfile[FILENAME_MAX + 1]; - snprintfz(lineandfile, FILENAME_MAX, "%zu@%s", line, filename); - app->source = strdupz(lineandfile); - } - } - else if(app) { - if(!strcmp(s, "dictionary")) { - if(!app->dict) - app->dict = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED); - - dict = app->dict; - } - else { - dict = NULL; - - // a new chart - chart = callocz(sizeof(STATSD_APP_CHART), 1); - netdata_fix_chart_id(s); - chart->id = strdupz(s); - chart->name = strdupz(s); - chart->title = strdupz("Statsd chart"); - chart->context = strdupz(s); - chart->family = strdupz("overview"); - chart->units = strdupz("value"); - chart->priority = STATSD_CHART_PRIORITY; - chart->chart_type = RRDSET_TYPE_LINE; - - chart->next = app->charts; - app->charts = chart; - - { - char lineandfile[FILENAME_MAX + 1]; - snprintfz(lineandfile, FILENAME_MAX, "%zu@%s", line, filename); - chart->source = strdupz(lineandfile); - } - } - } - else - error("STATSD: ignoring line %zu ('%s') of file '%s/%s', [app] is not defined.", line, s, path, filename); - - continue; - } - - if(!app) { - error("STATSD: ignoring line %zu ('%s') of file '%s/%s', it is outside all sections.", line, s, path, filename); - continue; - } - - char *name = s; - char *value = strchr(s, '='); - if(!value) { - error("STATSD: ignoring line %zu ('%s') of file '%s/%s', there is no = in it.", line, s, path, filename); - continue; - } - *value = '\0'; - value++; - - name = trim(name); - value = trim(value); - - if(!name || *name == '#') { - error("STATSD: ignoring line %zu of file '%s/%s', name is empty.", line, path, filename); - continue; - } - if(!value) { - debug(D_CONFIG, "STATSD: ignoring line %zu of file '%s/%s', value is empty.", line, path, filename); - continue; - } - - if(unlikely(dict)) { - // parse [dictionary] members - - dictionary_set(dict, name, value, strlen(value) + 1); - } - else if(!chart) { - // parse [app] members - - if(!strcmp(name, "name")) { - freez((void *)app->name); - netdata_fix_chart_name(value); - app->name = strdupz(value); - } - else if (!strcmp(name, "metrics")) { - simple_pattern_free(app->metrics); - app->metrics = simple_pattern_create(value, NULL, SIMPLE_PATTERN_EXACT); - } - else if (!strcmp(name, "private charts")) { - if (!strcmp(value, "yes") || !strcmp(value, "on")) - app->default_options |= STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED; - else - app->default_options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED; - } - else if (!strcmp(name, "gaps when not collected")) { - if (!strcmp(value, "yes") || !strcmp(value, "on")) - app->default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED; - } - else if (!strcmp(name, "memory mode")) { - app->rrd_memory_mode = rrd_memory_mode_id(value); - } - else if (!strcmp(name, "history")) { - app->rrd_history_entries = atol(value); - if (app->rrd_history_entries < 5) - app->rrd_history_entries = 5; - } - else { - error("STATSD: ignoring line %zu ('%s') of file '%s/%s'. Unknown keyword for the [app] section.", line, name, path, filename); - continue; - } - } - else { - // parse [chart] members - - if(!strcmp(name, "name")) { - freez((void *)chart->name); - netdata_fix_chart_id(value); - chart->name = strdupz(value); - } - else if(!strcmp(name, "title")) { - freez((void *)chart->title); - chart->title = strdupz(value); - } - else if (!strcmp(name, "family")) { - freez((void *)chart->family); - chart->family = strdupz(value); - } - else if (!strcmp(name, "context")) { - freez((void *)chart->context); - netdata_fix_chart_id(value); - chart->context = strdupz(value); - } - else if (!strcmp(name, "units")) { - freez((void *)chart->units); - chart->units = strdupz(value); - } - else if (!strcmp(name, "priority")) { - chart->priority = atol(value); - } - else if (!strcmp(name, "type")) { - chart->chart_type = rrdset_type_id(value); - } - else if (!strcmp(name, "dimension")) { - // metric [name [type [multiplier [divisor]]]] - char *words[10]; - pluginsd_split_words(value, words, 10); - - int pattern = 0; - size_t i = 0; - char *metric_name = words[i++]; - - if(strcmp(metric_name, "pattern") == 0) { - metric_name = words[i++]; - pattern = 1; - } - - char *dim_name = words[i++]; - char *type = words[i++]; - char *multipler = words[i++]; - char *divisor = words[i++]; - char *options = words[i++]; - - RRDDIM_FLAGS flags = RRDDIM_FLAG_NONE; - if(options && *options) { - if(strstr(options, "hidden") != NULL) flags |= RRDDIM_FLAG_HIDDEN; - if(strstr(options, "noreset") != NULL) flags |= RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS; - if(strstr(options, "nooverflow") != NULL) flags |= RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS; - } - - if(!pattern) { - if(app->dict) { - if(dim_name && *dim_name) { - char *n = dictionary_get(app->dict, dim_name); - if(n) dim_name = n; - } - else { - dim_name = dictionary_get(app->dict, metric_name); - } - } - - if(!dim_name || !*dim_name) - dim_name = metric_name; - } - - STATSD_APP_CHART_DIM *dim = add_dimension_to_app_chart( - app - , chart - , metric_name - , dim_name - , (multipler && *multipler)?str2l(multipler):1 - , (divisor && *divisor)?str2l(divisor):1 - , flags - , string2valuetype(type, line, path, filename) - ); - - if(pattern) - dim->metric_pattern = simple_pattern_create(dim->metric, NULL, SIMPLE_PATTERN_EXACT); - } - else { - error("STATSD: ignoring line %zu ('%s') of file '%s/%s'. Unknown keyword for the [%s] section.", line, name, path, filename, chart->id); - continue; - } - } - } - - freez(buffer); - fclose(fp); - return 0; -} - -static void statsd_readdir(const char *path) { - size_t pathlen = strlen(path); - - debug(D_STATSD, "STATSD configuration reading directory '%s'", path); - - DIR *dir = opendir(path); - if (!dir) { - error("STATSD configuration cannot open directory '%s'.", path); - return; - } - - struct dirent *de = NULL; - while ((de = readdir(dir))) { - size_t len = strlen(de->d_name); - - if(de->d_type == DT_DIR - && ( - (de->d_name[0] == '.' && de->d_name[1] == '\0') - || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') - )) { - debug(D_STATSD, "STATSD: ignoring directory '%s'", de->d_name); - continue; - } - - else if(de->d_type == DT_DIR) { - char *s = mallocz(pathlen + strlen(de->d_name) + 2); - strcpy(s, path); - strcat(s, "/"); - strcat(s, de->d_name); - statsd_readdir(s); - freez(s); - continue; - } - - else if((de->d_type == DT_LNK || de->d_type == DT_REG || de->d_type == DT_UNKNOWN) && - len > 5 && !strcmp(&de->d_name[len - 5], ".conf")) { - statsd_readfile(path, de->d_name, NULL, NULL, NULL); - } - - else debug(D_STATSD, "STATSD: ignoring file '%s'", de->d_name); - } - - closedir(dir); -} - -// -------------------------------------------------------------------------------------------------------------------- -// send metrics to netdata - in private charts - called from the main thread - -// extract chart type and chart id from metric name -static inline void statsd_get_metric_type_and_id(STATSD_METRIC *m, char *type, char *id, const char *defid, size_t len) { - char *s; - - snprintfz(type, len, "%s_%s_%s", STATSD_CHART_PREFIX, defid, m->name); - for(s = type; *s ;s++) - if(unlikely(*s == '.')) break; - - if(*s == '.') { - *s++ = '\0'; - strncpyz(id, s, len); - } - else { - strncpyz(id, defid, len); - } - - netdata_fix_chart_id(type); - netdata_fix_chart_id(id); -} - -static inline RRDSET *statsd_private_rrdset_create( - STATSD_METRIC *m - , const char *type - , const char *id - , const char *name - , const char *family - , const char *context - , const char *title - , const char *units - , long priority - , int update_every - , RRDSET_TYPE chart_type -) { - RRD_MEMORY_MODE memory_mode = statsd.private_charts_memory_mode; - long history = statsd.private_charts_rrd_history_entries; - - if(unlikely(statsd.private_charts >= statsd.max_private_charts)) { - debug(D_STATSD, "STATSD: metric '%s' will be charted with memory mode = none, because the maximum number of charts has been reached.", m->name); - info("STATSD: metric '%s' will be charted with memory mode = none, because the maximum number of charts (%zu) has been reached. Increase the number of charts by editing netdata.conf, [statsd] section.", m->name, statsd.max_private_charts); - memory_mode = RRD_MEMORY_MODE_NONE; - history = 5; - } - - statsd.private_charts++; - RRDSET *st = rrdset_create_custom( - localhost // host - , type // type - , id // id - , name // name - , family // family - , context // context - , title // title - , units // units - , "statsd" // plugin - , "private_chart" // module - , priority // priority - , update_every // update every - , chart_type // chart type - , memory_mode // memory mode - , history // history - ); - rrdset_flag_set(st, RRDSET_FLAG_STORE_FIRST); - - if(statsd.private_charts_hidden) - rrdset_flag_set(st, RRDSET_FLAG_HIDDEN); - - // rrdset_flag_set(st, RRDSET_FLAG_DEBUG); - return st; -} - -static inline void statsd_private_chart_gauge(STATSD_METRIC *m) { - debug(D_STATSD, "updating private chart for gauge metric '%s'", m->name); - - if(unlikely(!m->st)) { - char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1]; - statsd_get_metric_type_and_id(m, type, id, "gauge", RRD_ID_LENGTH_MAX); - - char context[RRD_ID_LENGTH_MAX + 1]; - snprintfz(context, RRD_ID_LENGTH_MAX, "statsd_gauge.%s", m->name); - - char title[RRD_ID_LENGTH_MAX + 1]; - snprintfz(title, RRD_ID_LENGTH_MAX, "statsd private chart for gauge %s", m->name); - - m->st = statsd_private_rrdset_create( - m - , type - , id - , NULL // name - , "gauges" // family (submenu) - , context // context - , title // title - , "value" // units - , STATSD_CHART_PRIORITY - , statsd.update_every - , RRDSET_TYPE_LINE - ); - - m->rd_value = rrddim_add(m->st, "gauge", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE); - - if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT) - m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(m->st); - - rrddim_set_by_pointer(m->st, m->rd_value, m->last); - - if(m->rd_count) - rrddim_set_by_pointer(m->st, m->rd_count, m->events); - - rrdset_done(m->st); -} - -static inline void statsd_private_chart_counter_or_meter(STATSD_METRIC *m, const char *dim, const char *family) { - debug(D_STATSD, "updating private chart for %s metric '%s'", dim, m->name); - - if(unlikely(!m->st)) { - char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1]; - statsd_get_metric_type_and_id(m, type, id, dim, RRD_ID_LENGTH_MAX); - - char context[RRD_ID_LENGTH_MAX + 1]; - snprintfz(context, RRD_ID_LENGTH_MAX, "statsd_%s.%s", dim, m->name); - - char title[RRD_ID_LENGTH_MAX + 1]; - snprintfz(title, RRD_ID_LENGTH_MAX, "statsd private chart for %s %s", dim, m->name); - - m->st = statsd_private_rrdset_create( - m - , type - , id - , NULL // name - , family // family (submenu) - , context // context - , title // title - , "events/s" // units - , STATSD_CHART_PRIORITY - , statsd.update_every - , RRDSET_TYPE_AREA - ); - - m->rd_value = rrddim_add(m->st, dim, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT) - m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(m->st); - - rrddim_set_by_pointer(m->st, m->rd_value, m->last); - - if(m->rd_count) - rrddim_set_by_pointer(m->st, m->rd_count, m->events); - - rrdset_done(m->st); -} - -static inline void statsd_private_chart_set(STATSD_METRIC *m) { - debug(D_STATSD, "updating private chart for set metric '%s'", m->name); - - if(unlikely(!m->st)) { - char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1]; - statsd_get_metric_type_and_id(m, type, id, "set", RRD_ID_LENGTH_MAX); - - char context[RRD_ID_LENGTH_MAX + 1]; - snprintfz(context, RRD_ID_LENGTH_MAX, "statsd_set.%s", m->name); - - char title[RRD_ID_LENGTH_MAX + 1]; - snprintfz(title, RRD_ID_LENGTH_MAX, "statsd private chart for set %s", m->name); - - m->st = statsd_private_rrdset_create( - m - , type - , id - , NULL // name - , "sets" // family (submenu) - , context // context - , title // title - , "entries" // units - , STATSD_CHART_PRIORITY - , statsd.update_every - , RRDSET_TYPE_LINE - ); - - m->rd_value = rrddim_add(m->st, "set", "set size", 1, 1, RRD_ALGORITHM_ABSOLUTE); - - if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT) - m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(m->st); - - rrddim_set_by_pointer(m->st, m->rd_value, m->last); - - if(m->rd_count) - rrddim_set_by_pointer(m->st, m->rd_count, m->events); - - rrdset_done(m->st); -} - -static inline void statsd_private_chart_timer_or_histogram(STATSD_METRIC *m, const char *dim, const char *family, const char *units) { - debug(D_STATSD, "updating private chart for %s metric '%s'", dim, m->name); - - if(unlikely(!m->st)) { - char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1]; - statsd_get_metric_type_and_id(m, type, id, dim, RRD_ID_LENGTH_MAX); - - char context[RRD_ID_LENGTH_MAX + 1]; - snprintfz(context, RRD_ID_LENGTH_MAX, "statsd_%s.%s", dim, m->name); - - char title[RRD_ID_LENGTH_MAX + 1]; - snprintfz(title, RRD_ID_LENGTH_MAX, "statsd private chart for %s %s", dim, m->name); - - m->st = statsd_private_rrdset_create( - m - , type - , id - , NULL // name - , family // family (submenu) - , context // context - , title // title - , units // units - , STATSD_CHART_PRIORITY - , statsd.update_every - , RRDSET_TYPE_AREA - ); - - m->histogram.ext->rd_min = rrddim_add(m->st, "min", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE); - m->histogram.ext->rd_max = rrddim_add(m->st, "max", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE); - m->rd_value = rrddim_add(m->st, "average", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE); - m->histogram.ext->rd_percentile = rrddim_add(m->st, statsd.histogram_percentile_str, NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE); - m->histogram.ext->rd_median = rrddim_add(m->st, "median", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE); - m->histogram.ext->rd_stddev = rrddim_add(m->st, "stddev", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE); - m->histogram.ext->rd_sum = rrddim_add(m->st, "sum", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE); - - if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT) - m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(m->st); - - rrddim_set_by_pointer(m->st, m->histogram.ext->rd_min, m->histogram.ext->last_min); - rrddim_set_by_pointer(m->st, m->histogram.ext->rd_max, m->histogram.ext->last_max); - rrddim_set_by_pointer(m->st, m->histogram.ext->rd_percentile, m->histogram.ext->last_percentile); - rrddim_set_by_pointer(m->st, m->histogram.ext->rd_median, m->histogram.ext->last_median); - rrddim_set_by_pointer(m->st, m->histogram.ext->rd_stddev, m->histogram.ext->last_stddev); - rrddim_set_by_pointer(m->st, m->histogram.ext->rd_sum, m->histogram.ext->last_sum); - rrddim_set_by_pointer(m->st, m->rd_value, m->last); - - if(m->rd_count) - rrddim_set_by_pointer(m->st, m->rd_count, m->events); - - rrdset_done(m->st); -} - -// -------------------------------------------------------------------------------------------------------------------- -// statsd flush metrics - -static inline void statsd_flush_gauge(STATSD_METRIC *m) { - debug(D_STATSD, "flushing gauge metric '%s'", m->name); - - int updated = 0; - if(m->count && !m->reset) { - m->last = (collected_number) (m->gauge.value * statsd.decimal_detail); - - m->reset = 1; - updated = 1; - } - - if(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED))) - statsd_private_chart_gauge(m); -} - -static inline void statsd_flush_counter_or_meter(STATSD_METRIC *m, const char *dim, const char *family) { - debug(D_STATSD, "flushing %s metric '%s'", dim, m->name); - - int updated = 0; - if(m->count && !m->reset) { - m->last = m->counter.value; - - m->reset = 1; - updated = 1; - } - - if(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED))) - statsd_private_chart_counter_or_meter(m, dim, family); -} - -static inline void statsd_flush_counter(STATSD_METRIC *m) { - statsd_flush_counter_or_meter(m, "counter", "counters"); -} - -static inline void statsd_flush_meter(STATSD_METRIC *m) { - statsd_flush_counter_or_meter(m, "meter", "meters"); -} - -static inline void statsd_flush_set(STATSD_METRIC *m) { - debug(D_STATSD, "flushing set metric '%s'", m->name); - - int updated = 0; - if(m->count && !m->reset) { - m->last = (collected_number)m->set.unique; - - m->reset = 1; - updated = 1; - } - - if(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED))) - statsd_private_chart_set(m); -} - -static inline void statsd_flush_timer_or_histogram(STATSD_METRIC *m, const char *dim, const char *family, const char *units) { - debug(D_STATSD, "flushing %s metric '%s'", dim, m->name); - - netdata_mutex_lock(&m->histogram.ext->mutex); - - if(unlikely(!m->histogram.ext->zeroed)) { - // reset the metrics - // if we collected anything, they will be updated below - // this ensures that we report zeros if nothing is collected - - m->histogram.ext->last_min = 0; - m->histogram.ext->last_max = 0; - m->last = 0; - m->histogram.ext->last_median = 0; - m->histogram.ext->last_stddev = 0; - m->histogram.ext->last_sum = 0; - m->histogram.ext->last_percentile = 0; - - m->histogram.ext->zeroed = 1; - } - - int updated = 0; - if(m->count && !m->reset && m->histogram.ext->used > 0) { - size_t len = m->histogram.ext->used; - LONG_DOUBLE *series = m->histogram.ext->values; - sort_series(series, len); - - m->histogram.ext->last_min = (collected_number)roundl(series[0] * statsd.decimal_detail); - m->histogram.ext->last_max = (collected_number)roundl(series[len - 1] * statsd.decimal_detail); - m->last = (collected_number)roundl(average(series, len) * statsd.decimal_detail); - m->histogram.ext->last_median = (collected_number)roundl(median_on_sorted_series(series, len) * statsd.decimal_detail); - m->histogram.ext->last_stddev = (collected_number)roundl(standard_deviation(series, len) * statsd.decimal_detail); - m->histogram.ext->last_sum = (collected_number)roundl(sum(series, len) * statsd.decimal_detail); - - size_t pct_len = (size_t)floor((double)len * statsd.histogram_percentile / 100.0); - if(pct_len < 1) - m->histogram.ext->last_percentile = (collected_number)(series[0] * statsd.decimal_detail); - else - m->histogram.ext->last_percentile = (collected_number)roundl(series[pct_len - 1] * statsd.decimal_detail); - - debug(D_STATSD, "STATSD %s metric %s: min " COLLECTED_NUMBER_FORMAT ", max " COLLECTED_NUMBER_FORMAT ", last " COLLECTED_NUMBER_FORMAT ", pcent " COLLECTED_NUMBER_FORMAT ", median " COLLECTED_NUMBER_FORMAT ", stddev " COLLECTED_NUMBER_FORMAT ", sum " COLLECTED_NUMBER_FORMAT, - dim, m->name, m->histogram.ext->last_min, m->histogram.ext->last_max, m->last, m->histogram.ext->last_percentile, m->histogram.ext->last_median, m->histogram.ext->last_stddev, m->histogram.ext->last_sum); - - m->histogram.ext->zeroed = 0; - m->reset = 1; - updated = 1; - } - - if(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED))) - statsd_private_chart_timer_or_histogram(m, dim, family, units); - - netdata_mutex_unlock(&m->histogram.ext->mutex); -} - -static inline void statsd_flush_timer(STATSD_METRIC *m) { - statsd_flush_timer_or_histogram(m, "timer", "timers", "milliseconds"); -} - -static inline void statsd_flush_histogram(STATSD_METRIC *m) { - statsd_flush_timer_or_histogram(m, "histogram", "histograms", "value"); -} - -static inline RRD_ALGORITHM statsd_algorithm_for_metric(STATSD_METRIC *m) { - switch(m->type) { - default: - case STATSD_METRIC_TYPE_GAUGE: - case STATSD_METRIC_TYPE_SET: - case STATSD_METRIC_TYPE_TIMER: - case STATSD_METRIC_TYPE_HISTOGRAM: - return RRD_ALGORITHM_ABSOLUTE; - - case STATSD_METRIC_TYPE_METER: - case STATSD_METRIC_TYPE_COUNTER: - return RRD_ALGORITHM_INCREMENTAL; - } -} - -static inline void link_metric_to_app_dimension(STATSD_APP *app, STATSD_METRIC *m, STATSD_APP_CHART *chart, STATSD_APP_CHART_DIM *dim) { - if(dim->value_type == STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS) { - dim->value_ptr = &m->events; - dim->algorithm = RRD_ALGORITHM_INCREMENTAL; - } - else if(m->type == STATSD_METRIC_TYPE_HISTOGRAM || m->type == STATSD_METRIC_TYPE_TIMER) { - dim->algorithm = RRD_ALGORITHM_ABSOLUTE; - dim->divisor *= statsd.decimal_detail; - - switch(dim->value_type) { - case STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS: - // will never match - added to avoid warning - break; - - case STATSD_APP_CHART_DIM_VALUE_TYPE_LAST: - case STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE: - dim->value_ptr = &m->last; - break; - - case STATSD_APP_CHART_DIM_VALUE_TYPE_SUM: - dim->value_ptr = &m->histogram.ext->last_sum; - break; - - case STATSD_APP_CHART_DIM_VALUE_TYPE_MIN: - dim->value_ptr = &m->histogram.ext->last_min; - break; - - case STATSD_APP_CHART_DIM_VALUE_TYPE_MAX: - dim->value_ptr = &m->histogram.ext->last_max; - break; - - case STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN: - dim->value_ptr = &m->histogram.ext->last_median; - break; - - case STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE: - dim->value_ptr = &m->histogram.ext->last_percentile; - break; - - case STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV: - dim->value_ptr = &m->histogram.ext->last_stddev; - break; - } - } - else { - if (dim->value_type != STATSD_APP_CHART_DIM_VALUE_TYPE_LAST) - error("STATSD: unsupported value type for dimension '%s' of chart '%s' of app '%s' on metric '%s'", dim->name, chart->id, app->name, m->name); - - dim->value_ptr = &m->last; - dim->algorithm = statsd_algorithm_for_metric(m); - - if(m->type == STATSD_METRIC_TYPE_GAUGE) - dim->divisor *= statsd.decimal_detail; - } - - if(unlikely(chart->st && dim->rd)) { - rrddim_set_algorithm(chart->st, dim->rd, dim->algorithm); - rrddim_set_multiplier(chart->st, dim->rd, dim->multiplier); - rrddim_set_divisor(chart->st, dim->rd, dim->divisor); - } - - chart->dimensions_linked_count++; - debug(D_STATSD, "metric '%s' of type %u linked with app '%s', chart '%s', dimension '%s', algorithm '%s'", m->name, m->type, app->name, chart->id, dim->name, rrd_algorithm_name(dim->algorithm)); -} - -static inline void check_if_metric_is_for_app(STATSD_INDEX *index, STATSD_METRIC *m) { - (void)index; - - STATSD_APP *app; - for(app = statsd.apps; app ;app = app->next) { - if(unlikely(simple_pattern_matches(app->metrics, m->name))) { - debug(D_STATSD, "metric '%s' matches app '%s'", m->name, app->name); - - // the metric should get the options from the app - - if(app->default_options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED) - m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED; - else - m->options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED; - - if(app->default_options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED) - m->options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED; - else - m->options &= ~STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED; - - m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED; - - // check if there is a chart in this app, willing to get this metric - STATSD_APP_CHART *chart; - for(chart = app->charts; chart; chart = chart->next) { - - STATSD_APP_CHART_DIM *dim; - for(dim = chart->dimensions; dim ; dim = dim->next) { - if(unlikely(dim->metric_pattern)) { - size_t dim_name_len = strlen(dim->name); - size_t wildcarded_len = dim_name_len + strlen(m->name) + 1; - char wildcarded[wildcarded_len]; - - strcpy(wildcarded, dim->name); - char *ws = &wildcarded[dim_name_len]; - - if(simple_pattern_matches_extract(dim->metric_pattern, m->name, ws, wildcarded_len - dim_name_len)) { - - char *final_name = NULL; - - if(app->dict) { - if(likely(*wildcarded)) { - // use the name of the wildcarded string - final_name = dictionary_get(app->dict, wildcarded); - } - - if(unlikely(!final_name)) { - // use the name of the metric - final_name = dictionary_get(app->dict, m->name); - } - } - - if(unlikely(!final_name)) - final_name = wildcarded; - - add_dimension_to_app_chart( - app - , chart - , m->name - , final_name - , dim->multiplier - , dim->divisor - , dim->flags - , dim->value_type - ); - - // the new dimension is appended to the list - // so, it will be matched and linked later too - } - } - else if(!dim->value_ptr && dim->metric_hash == m->hash && !strcmp(dim->metric, m->name)) { - // we have a match - this metric should be linked to this dimension - link_metric_to_app_dimension(app, m, chart, dim); - } - } - - } - } - } -} - -static inline RRDDIM *statsd_add_dim_to_app_chart(STATSD_APP *app, STATSD_APP_CHART *chart, STATSD_APP_CHART_DIM *dim) { - (void)app; - - // allow the same statsd metric to be added multiple times to the same chart - - STATSD_APP_CHART_DIM *tdim; - size_t count_same_metric = 0, count_same_metric_value_type = 0; - size_t pos_same_metric_value_type = 0; - - for (tdim = chart->dimensions; tdim && tdim->next; tdim = tdim->next) { - if (dim->metric_hash == tdim->metric_hash && !strcmp(dim->metric, tdim->metric)) { - count_same_metric++; - - if(dim->value_type == tdim->value_type) { - count_same_metric_value_type++; - if (tdim == dim) - pos_same_metric_value_type = count_same_metric_value_type; - } - } - } - - if(count_same_metric > 1) { - // the same metric is found multiple times - - size_t len = strlen(dim->metric) + 100; - char metric[ len + 1 ]; - - if(count_same_metric_value_type > 1) { - // the same metric, with the same value type, is added multiple times - snprintfz(metric, len, "%s_%s%zu", dim->metric, valuetype2string(dim->value_type), pos_same_metric_value_type); - } - else { - // the same metric, with different value type is added - snprintfz(metric, len, "%s_%s", dim->metric, valuetype2string(dim->value_type)); - } - - dim->rd = rrddim_add(chart->st, metric, dim->name, dim->multiplier, dim->divisor, dim->algorithm); - if(dim->flags != RRDDIM_FLAG_NONE) dim->rd->flags |= dim->flags; - return dim->rd; - } - - dim->rd = rrddim_add(chart->st, dim->metric, dim->name, dim->multiplier, dim->divisor, dim->algorithm); - if(dim->flags != RRDDIM_FLAG_NONE) dim->rd->flags |= dim->flags; - return dim->rd; -} - -static inline void statsd_update_app_chart(STATSD_APP *app, STATSD_APP_CHART *chart) { - debug(D_STATSD, "updating chart '%s' for app '%s'", chart->id, app->name); - - if(!chart->st) { - chart->st = rrdset_create_custom( - localhost // host - , app->name // type - , chart->id // id - , chart->name // name - , chart->family // family - , chart->context // context - , chart->title // title - , chart->units // units - , "statsd" // plugin - , chart->source // module - , chart->priority // priority - , statsd.update_every // update every - , chart->chart_type // chart type - , app->rrd_memory_mode // memory mode - , app->rrd_history_entries // history - ); - - rrdset_flag_set(chart->st, RRDSET_FLAG_STORE_FIRST); - // rrdset_flag_set(chart->st, RRDSET_FLAG_DEBUG); - } - else rrdset_next(chart->st); - - STATSD_APP_CHART_DIM *dim; - for(dim = chart->dimensions; dim ;dim = dim->next) { - if(likely(!dim->metric_pattern)) { - if (unlikely(!dim->rd)) - statsd_add_dim_to_app_chart(app, chart, dim); - - if (unlikely(dim->value_ptr)) { - debug(D_STATSD, "updating dimension '%s' (%s) of chart '%s' (%s) for app '%s' with value " COLLECTED_NUMBER_FORMAT, dim->name, dim->rd->id, chart->id, chart->st->id, app->name, *dim->value_ptr); - rrddim_set_by_pointer(chart->st, dim->rd, *dim->value_ptr); - } - } - } - - rrdset_done(chart->st); - debug(D_STATSD, "completed update of chart '%s' for app '%s'", chart->id, app->name); -} - -static inline void statsd_update_all_app_charts(void) { - // debug(D_STATSD, "updating app charts"); - - STATSD_APP *app; - for(app = statsd.apps; app ;app = app->next) { - // debug(D_STATSD, "updating charts for app '%s'", app->name); - - STATSD_APP_CHART *chart; - for(chart = app->charts; chart ;chart = chart->next) { - if(unlikely(chart->dimensions_linked_count)) { - statsd_update_app_chart(app, chart); - } - } - } - - // debug(D_STATSD, "completed update of app charts"); -} - -const char *statsd_metric_type_string(STATSD_METRIC_TYPE type) { - switch(type) { - case STATSD_METRIC_TYPE_COUNTER: return "counter"; - case STATSD_METRIC_TYPE_GAUGE: return "gauge"; - case STATSD_METRIC_TYPE_HISTOGRAM: return "histogram"; - case STATSD_METRIC_TYPE_METER: return "meter"; - case STATSD_METRIC_TYPE_SET: return "set"; - case STATSD_METRIC_TYPE_TIMER: return "timer"; - default: return "unknown"; - } -} - -static inline void statsd_flush_index_metrics(STATSD_INDEX *index, void (*flush_metric)(STATSD_METRIC *)) { - STATSD_METRIC *m; - for(m = index->first; m ; m = m->next) { - if(unlikely(!(m->options & STATSD_METRIC_OPTION_CHECKED_IN_APPS))) { - log_access("NEW STATSD METRIC '%s': '%s'", statsd_metric_type_string(m->type), m->name); - check_if_metric_is_for_app(index, m); - m->options |= STATSD_METRIC_OPTION_CHECKED_IN_APPS; - } - - if(unlikely(!(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED))) { - if(statsd.private_charts >= statsd.max_private_charts_hard) { - debug(D_STATSD, "STATSD: metric '%s' will not be charted, because the hard limit of the maximum number of charts has been reached.", m->name); - info("STATSD: metric '%s' will not be charted, because the hard limit of the maximum number of charts (%zu) has been reached. Increase the number of charts by editing netdata.conf, [statsd] section.", m->name, statsd.max_private_charts); - m->options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED; - } - else { - if (simple_pattern_matches(statsd.charts_for, m->name)) { - debug(D_STATSD, "STATSD: metric '%s' will be charted.", m->name); - m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED; - } else { - debug(D_STATSD, "STATSD: metric '%s' will not be charted.", m->name); - m->options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED; - } - } - - m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED; - } - - flush_metric(m); - } -} - - -// -------------------------------------------------------------------------------------- -// statsd main thread - -static int statsd_listen_sockets_setup(void) { - return listen_sockets_setup(&statsd.sockets); -} - -static void statsd_main_cleanup(void *data) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data; - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - info("cleaning up..."); - - if (statsd.collection_threads_status) { - int i; - for (i = 0; i < statsd.threads; i++) { - if(statsd.collection_threads_status[i].status) { - info("STATSD: stopping data collection thread %d...", i + 1); - netdata_thread_cancel(statsd.collection_threads_status[i].thread); - } - else { - info("STATSD: data collection thread %d found stopped.", i + 1); - } - } - } - - info("STATSD: closing sockets..."); - listen_sockets_close(&statsd.sockets); - - info("STATSD: cleanup completed."); - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; -} - -void *statsd_main(void *ptr) { - netdata_thread_cleanup_push(statsd_main_cleanup, ptr); - - // ---------------------------------------------------------------------------------------------------------------- - // statsd configuration - - statsd.enabled = config_get_boolean(CONFIG_SECTION_STATSD, "enabled", statsd.enabled); - - statsd.update_every = default_rrd_update_every; - statsd.update_every = (int)config_get_number(CONFIG_SECTION_STATSD, "update every (flushInterval)", statsd.update_every); - if(statsd.update_every < default_rrd_update_every) { - error("STATSD: minimum flush interval %d given, but the minimum is the update every of netdata. Using %d", statsd.update_every, default_rrd_update_every); - statsd.update_every = default_rrd_update_every; - } - -#ifdef HAVE_RECVMMSG - statsd.recvmmsg_size = (size_t)config_get_number(CONFIG_SECTION_STATSD, "udp messages to process at once", (long long)statsd.recvmmsg_size); -#endif - - statsd.charts_for = simple_pattern_create(config_get(CONFIG_SECTION_STATSD, "create private charts for metrics matching", "*"), NULL, SIMPLE_PATTERN_EXACT); - statsd.max_private_charts = (size_t)config_get_number(CONFIG_SECTION_STATSD, "max private charts allowed", (long long)statsd.max_private_charts); - statsd.max_private_charts_hard = (size_t)config_get_number(CONFIG_SECTION_STATSD, "max private charts hard limit", (long long)statsd.max_private_charts * 5); - statsd.private_charts_memory_mode = rrd_memory_mode_id(config_get(CONFIG_SECTION_STATSD, "private charts memory mode", rrd_memory_mode_name(default_rrd_memory_mode))); - statsd.private_charts_rrd_history_entries = (int)config_get_number(CONFIG_SECTION_STATSD, "private charts history", default_rrd_history_entries); - statsd.decimal_detail = (size_t)config_get_number(CONFIG_SECTION_STATSD, "decimal detail", (long long int)statsd.decimal_detail); - statsd.tcp_idle_timeout = (size_t) config_get_number(CONFIG_SECTION_STATSD, "disconnect idle tcp clients after seconds", (long long int)statsd.tcp_idle_timeout); - statsd.private_charts_hidden = (int)config_get_boolean(CONFIG_SECTION_STATSD, "private charts hidden", statsd.private_charts_hidden); - - statsd.histogram_percentile = (double)config_get_float(CONFIG_SECTION_STATSD, "histograms and timers percentile (percentThreshold)", statsd.histogram_percentile); - if(isless(statsd.histogram_percentile, 0) || isgreater(statsd.histogram_percentile, 100)) { - error("STATSD: invalid histograms and timers percentile %0.5f given", statsd.histogram_percentile); - statsd.histogram_percentile = 95.0; - } - { - char buffer[100 + 1]; - snprintf(buffer, 100, "%0.1f%%", statsd.histogram_percentile); - statsd.histogram_percentile_str = strdupz(buffer); - } - - if(config_get_boolean(CONFIG_SECTION_STATSD, "add dimension for number of events received", 1)) { - statsd.gauges.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT; - statsd.counters.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT; - statsd.meters.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT; - statsd.sets.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT; - statsd.histograms.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT; - statsd.timers.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT; - } - - if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on gauges (deleteGauges)", 0)) - statsd.gauges.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED; - - if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on counters (deleteCounters)", 0)) - statsd.counters.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED; - - if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on meters (deleteMeters)", 0)) - statsd.meters.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED; - - if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on sets (deleteSets)", 0)) - statsd.sets.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED; - - if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on histograms (deleteHistograms)", 0)) - statsd.histograms.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED; - - if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on timers (deleteTimers)", 0)) - statsd.timers.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED; - - size_t max_sockets = (size_t)config_get_number(CONFIG_SECTION_STATSD, "statsd server max TCP sockets", (long long int)(rlimit_nofile.rlim_cur / 4)); - -#ifdef STATSD_MULTITHREADED - statsd.threads = (int)config_get_number(CONFIG_SECTION_STATSD, "threads", processors); - if(statsd.threads < 1) { - error("STATSD: Invalid number of threads %d, using %d", statsd.threads, processors); - statsd.threads = processors; - config_set_number(CONFIG_SECTION_STATSD, "collector threads", statsd.threads); - } -#else - statsd.threads = 1; -#endif - - // read custom application definitions - { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/statsd.d", netdata_configured_config_dir); - statsd_readdir(filename); - } - - // ---------------------------------------------------------------------------------------------------------------- - // statsd setup - - if(!statsd.enabled) return NULL; - - statsd_listen_sockets_setup(); - if(!statsd.sockets.opened) { - error("STATSD: No statsd sockets to listen to. statsd will be disabled."); - goto cleanup; - } - - statsd.collection_threads_status = callocz((size_t)statsd.threads, sizeof(struct collection_thread_status)); - - int i; - for(i = 0; i < statsd.threads ;i++) { - statsd.collection_threads_status[i].max_sockets = max_sockets / statsd.threads; - char tag[NETDATA_THREAD_TAG_MAX + 1]; - snprintfz(tag, NETDATA_THREAD_TAG_MAX, "STATSD_COLLECTOR[%d]", i + 1); - netdata_thread_create(&statsd.collection_threads_status[i].thread, tag, NETDATA_THREAD_OPTION_DEFAULT, statsd_collector_thread, &statsd.collection_threads_status[i]); - } - - // ---------------------------------------------------------------------------------------------------------------- - // statsd monitoring charts - - RRDSET *st_metrics = rrdset_create_localhost( - "netdata" - , "statsd_metrics" - , NULL - , "statsd" - , NULL - , "Metrics in the netdata statsd database" - , "metrics" - , "statsd" - , "stats" - , 132010 - , statsd.update_every - , RRDSET_TYPE_STACKED - ); - RRDDIM *rd_metrics_gauge = rrddim_add(st_metrics, "gauges", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - RRDDIM *rd_metrics_counter = rrddim_add(st_metrics, "counters", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - RRDDIM *rd_metrics_timer = rrddim_add(st_metrics, "timers", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - RRDDIM *rd_metrics_meter = rrddim_add(st_metrics, "meters", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - RRDDIM *rd_metrics_histogram = rrddim_add(st_metrics, "histograms", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - RRDDIM *rd_metrics_set = rrddim_add(st_metrics, "sets", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - - RRDSET *st_events = rrdset_create_localhost( - "netdata" - , "statsd_events" - , NULL - , "statsd" - , NULL - , "Events processed by the netdata statsd server" - , "events/s" - , "statsd" - , "stats" - , 132011 - , statsd.update_every - , RRDSET_TYPE_STACKED - ); - RRDDIM *rd_events_gauge = rrddim_add(st_events, "gauges", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - RRDDIM *rd_events_counter = rrddim_add(st_events, "counters", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - RRDDIM *rd_events_timer = rrddim_add(st_events, "timers", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - RRDDIM *rd_events_meter = rrddim_add(st_events, "meters", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - RRDDIM *rd_events_histogram = rrddim_add(st_events, "histograms", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - RRDDIM *rd_events_set = rrddim_add(st_events, "sets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - RRDDIM *rd_events_unknown = rrddim_add(st_events, "unknown", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - RRDDIM *rd_events_errors = rrddim_add(st_events, "errors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - RRDSET *st_reads = rrdset_create_localhost( - "netdata" - , "statsd_reads" - , NULL - , "statsd" - , NULL - , "Read operations made by the netdata statsd server" - , "reads/s" - , "statsd" - , "stats" - , 132012 - , statsd.update_every - , RRDSET_TYPE_STACKED - ); - RRDDIM *rd_reads_tcp = rrddim_add(st_reads, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - RRDDIM *rd_reads_udp = rrddim_add(st_reads, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - RRDSET *st_bytes = rrdset_create_localhost( - "netdata" - , "statsd_bytes" - , NULL - , "statsd" - , NULL - , "Bytes read by the netdata statsd server" - , "kilobits/s" - , "netdata" - , "stats" - , 132013 - , statsd.update_every - , RRDSET_TYPE_STACKED - ); - RRDDIM *rd_bytes_tcp = rrddim_add(st_bytes, "tcp", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - RRDDIM *rd_bytes_udp = rrddim_add(st_bytes, "udp", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - - RRDSET *st_packets = rrdset_create_localhost( - "netdata" - , "statsd_packets" - , NULL - , "statsd" - , NULL - , "Network packets processed by the netdata statsd server" - , "packets/s" - , "netdata" - , "stats" - , 132014 - , statsd.update_every - , RRDSET_TYPE_STACKED - ); - RRDDIM *rd_packets_tcp = rrddim_add(st_packets, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - RRDDIM *rd_packets_udp = rrddim_add(st_packets, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - RRDSET *st_tcp_connects = rrdset_create_localhost( - "netdata" - , "tcp_connects" - , NULL - , "statsd" - , NULL - , "statsd server TCP connects and disconnects" - , "events" - , "statsd" - , "stats" - , 132015 - , statsd.update_every - , RRDSET_TYPE_LINE - ); - RRDDIM *rd_tcp_connects = rrddim_add(st_tcp_connects, "connects", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - RRDDIM *rd_tcp_disconnects = rrddim_add(st_tcp_connects, "disconnects", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - - RRDSET *st_tcp_connected = rrdset_create_localhost( - "netdata" - , "tcp_connected" - , NULL - , "statsd" - , NULL - , "statsd server TCP connected sockets" - , "connected" - , "statsd" - , "stats" - , 132016 - , statsd.update_every - , RRDSET_TYPE_LINE - ); - RRDDIM *rd_tcp_connected = rrddim_add(st_tcp_connected, "connected", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - - RRDSET *st_pcharts = rrdset_create_localhost( - "netdata" - , "private_charts" - , NULL - , "statsd" - , NULL - , "Private metric charts created by the netdata statsd server" - , "charts" - , "statsd" - , "stats" - , 132020 - , statsd.update_every - , RRDSET_TYPE_AREA - ); - RRDDIM *rd_pcharts = rrddim_add(st_pcharts, "charts", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - - RRDSET *stcpu_thread = rrdset_create_localhost( - "netdata" - , "plugin_statsd_charting_cpu" - , NULL - , "statsd" - , "netdata.statsd_cpu" - , "NetData statsd charting thread CPU usage" - , "milliseconds/s" - , "statsd" - , "stats" - , 132001 - , statsd.update_every - , RRDSET_TYPE_STACKED - ); - - RRDDIM *rd_user = rrddim_add(stcpu_thread, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - RRDDIM *rd_system = rrddim_add(stcpu_thread, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - struct rusage thread; - - for(i = 0; i < statsd.threads ;i++) { - char id[100 + 1]; - char title[100 + 1]; - - snprintfz(id, 100, "plugin_statsd_collector%d_cpu", i + 1); - snprintfz(title, 100, "NetData statsd collector thread No %d CPU usage", i + 1); - - statsd.collection_threads_status[i].st_cpu = rrdset_create_localhost( - "netdata" - , id - , NULL - , "statsd" - , "netdata.statsd_cpu" - , title - , "milliseconds/s" - , "statsd" - , "stats" - , 132002 + i - , statsd.update_every - , RRDSET_TYPE_STACKED - ); - - statsd.collection_threads_status[i].rd_user = rrddim_add(statsd.collection_threads_status[i].st_cpu, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - statsd.collection_threads_status[i].rd_system = rrddim_add(statsd.collection_threads_status[i].st_cpu, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - } - - // ---------------------------------------------------------------------------------------------------------------- - // statsd thread to turn metrics into charts - - usec_t step = statsd.update_every * USEC_PER_SEC; - heartbeat_t hb; - heartbeat_init(&hb); - while(!netdata_exit) { - usec_t hb_dt = heartbeat_next(&hb, step); - - statsd_flush_index_metrics(&statsd.gauges, statsd_flush_gauge); - statsd_flush_index_metrics(&statsd.counters, statsd_flush_counter); - statsd_flush_index_metrics(&statsd.meters, statsd_flush_meter); - statsd_flush_index_metrics(&statsd.timers, statsd_flush_timer); - statsd_flush_index_metrics(&statsd.histograms, statsd_flush_histogram); - statsd_flush_index_metrics(&statsd.sets, statsd_flush_set); - - statsd_update_all_app_charts(); - - getrusage(RUSAGE_THREAD, &thread); - - if(unlikely(netdata_exit)) - break; - - if(likely(hb_dt)) { - rrdset_next(st_metrics); - rrdset_next(st_events); - rrdset_next(st_reads); - rrdset_next(st_bytes); - rrdset_next(st_packets); - rrdset_next(st_tcp_connects); - rrdset_next(st_tcp_connected); - rrdset_next(st_pcharts); - rrdset_next(stcpu_thread); - for(i = 0; i < statsd.threads ;i++) - rrdset_next(statsd.collection_threads_status[i].st_cpu); - } - - rrddim_set_by_pointer(st_metrics, rd_metrics_gauge, (collected_number)statsd.gauges.metrics); - rrddim_set_by_pointer(st_metrics, rd_metrics_counter, (collected_number)statsd.counters.metrics); - rrddim_set_by_pointer(st_metrics, rd_metrics_timer, (collected_number)statsd.timers.metrics); - rrddim_set_by_pointer(st_metrics, rd_metrics_meter, (collected_number)statsd.meters.metrics); - rrddim_set_by_pointer(st_metrics, rd_metrics_histogram, (collected_number)statsd.histograms.metrics); - rrddim_set_by_pointer(st_metrics, rd_metrics_set, (collected_number)statsd.sets.metrics); - rrdset_done(st_metrics); - - rrddim_set_by_pointer(st_events, rd_events_gauge, (collected_number)statsd.gauges.events); - rrddim_set_by_pointer(st_events, rd_events_counter, (collected_number)statsd.counters.events); - rrddim_set_by_pointer(st_events, rd_events_timer, (collected_number)statsd.timers.events); - rrddim_set_by_pointer(st_events, rd_events_meter, (collected_number)statsd.meters.events); - rrddim_set_by_pointer(st_events, rd_events_histogram, (collected_number)statsd.histograms.events); - rrddim_set_by_pointer(st_events, rd_events_set, (collected_number)statsd.sets.events); - rrddim_set_by_pointer(st_events, rd_events_unknown, (collected_number)statsd.unknown_types); - rrddim_set_by_pointer(st_events, rd_events_errors, (collected_number)statsd.socket_errors); - rrdset_done(st_events); - - rrddim_set_by_pointer(st_reads, rd_reads_tcp, (collected_number)statsd.tcp_socket_reads); - rrddim_set_by_pointer(st_reads, rd_reads_udp, (collected_number)statsd.udp_socket_reads); - rrdset_done(st_reads); - - rrddim_set_by_pointer(st_bytes, rd_bytes_tcp, (collected_number)statsd.tcp_bytes_read); - rrddim_set_by_pointer(st_bytes, rd_bytes_udp, (collected_number)statsd.udp_bytes_read); - rrdset_done(st_bytes); - - rrddim_set_by_pointer(st_packets, rd_packets_tcp, (collected_number)statsd.tcp_packets_received); - rrddim_set_by_pointer(st_packets, rd_packets_udp, (collected_number)statsd.udp_packets_received); - rrdset_done(st_packets); - - rrddim_set_by_pointer(st_tcp_connects, rd_tcp_connects, (collected_number)statsd.tcp_socket_connects); - rrddim_set_by_pointer(st_tcp_connects, rd_tcp_disconnects, (collected_number)statsd.tcp_socket_disconnects); - rrdset_done(st_tcp_connects); - - rrddim_set_by_pointer(st_tcp_connected, rd_tcp_connected, (collected_number)statsd.tcp_socket_connected); - rrdset_done(st_tcp_connected); - - rrddim_set_by_pointer(st_pcharts, rd_pcharts, (collected_number)statsd.private_charts); - rrdset_done(st_pcharts); - - rrddim_set_by_pointer(stcpu_thread, rd_user, thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec); - rrddim_set_by_pointer(stcpu_thread, rd_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec); - rrdset_done(stcpu_thread); - - for(i = 0; i < statsd.threads ;i++) { - rrddim_set_by_pointer(statsd.collection_threads_status[i].st_cpu, statsd.collection_threads_status[i].rd_user, statsd.collection_threads_status[i].rusage.ru_utime.tv_sec * 1000000ULL + statsd.collection_threads_status[i].rusage.ru_utime.tv_usec); - rrddim_set_by_pointer(statsd.collection_threads_status[i].st_cpu, statsd.collection_threads_status[i].rd_system, statsd.collection_threads_status[i].rusage.ru_stime.tv_sec * 1000000ULL + statsd.collection_threads_status[i].rusage.ru_stime.tv_usec); - rrdset_done(statsd.collection_threads_status[i].st_cpu); - } - } - -cleanup: ; // added semi-colon to prevent older gcc error: label at end of compound statement - netdata_thread_cleanup_pop(1); - return NULL; -} diff --git a/src/statsd.h b/src/statsd.h deleted file mode 100644 index 17af098e8..000000000 --- a/src/statsd.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef NETDATA_STATSD_H -#define NETDATA_STATSD_H - -#define STATSD_LISTEN_PORT 8125 -#define STATSD_LISTEN_BACKLOG 4096 - -extern void *statsd_main(void *ptr); - -#endif //NETDATA_STATSD_H diff --git a/src/storage_number.c b/src/storage_number.c deleted file mode 100644 index c7bbaa8d9..000000000 --- a/src/storage_number.c +++ /dev/null @@ -1,231 +0,0 @@ -#include "common.h" - -storage_number pack_storage_number(calculated_number value, uint32_t flags) -{ - // bit 32 = sign 0:positive, 1:negative - // bit 31 = 0:divide, 1:multiply - // bit 30, 29, 28 = (multiplier or divider) 0-7 (8 total) - // bit 27, 26, 25 flags - // bit 24 to bit 1 = the value - - storage_number r = get_storage_number_flags(flags); - if(!value) return r; - - int m = 0; - calculated_number n = value; - - // if the value is negative - // add the sign bit and make it positive - if(n < 0) { - r += (1 << 31); // the sign bit 32 - n = -n; - } - - // make its integer part fit in 0x00ffffff - // by dividing it by 10 up to 7 times - // and increasing the multiplier - while(m < 7 && n > (calculated_number)0x00ffffff) { - n /= 10; - m++; - } - - if(m) { - // the value was too big and we divided it - // so we add a multiplier to unpack it - r += (1 << 30) + (m << 27); // the multiplier m - - if(n > (calculated_number)0x00ffffff) { - #ifdef NETDATA_INTERNAL_CHECKS - error("Number " CALCULATED_NUMBER_FORMAT " is too big.", value); - #endif - r += 0x00ffffff; - return r; - } - } - else { - // 0x0019999e is the number that can be multiplied - // by 10 to give 0x00ffffff - // while the value is below 0x0019999e we can - // multiply it by 10, up to 7 times, increasing - // the multiplier - while(m < 7 && n < (calculated_number)0x0019999e) { - n *= 10; - m++; - } - - // the value was small enough and we multiplied it - // so we add a divider to unpack it - r += (0 << 30) + (m << 27); // the divider m - } - -#ifdef STORAGE_WITH_MATH - // without this there are rounding problems - // example: 0.9 becomes 0.89 - r += lrint((double) n); -#else - r += (storage_number)n; -#endif - - return r; -} - -calculated_number unpack_storage_number(storage_number value) -{ - if(!value) return 0; - - int sign = 0, exp = 0; - - value ^= get_storage_number_flags(value); - - if(value & (1 << 31)) { - sign = 1; - value ^= 1 << 31; - } - - if(value & (1 << 30)) { - exp = 1; - value ^= 1 << 30; - } - - int mul = value >> 27; - value ^= mul << 27; - - calculated_number n = value; - - // fprintf(stderr, "UNPACK: %08X, sign = %d, exp = %d, mul = %d, n = " CALCULATED_NUMBER_FORMAT "\n", value, sign, exp, mul, n); - - while(mul > 0) { - if(exp) n *= 10; - else n /= 10; - mul--; - } - - if(sign) n = -n; - return n; -} - -/* -int print_calculated_number(char *str, calculated_number value) -{ - char *wstr = str; - - int sign = (value < 0) ? 1 : 0; - if(sign) value = -value; - -#ifdef STORAGE_WITH_MATH - // without llrintl() there are rounding problems - // for example 0.9 becomes 0.89 - unsigned long long uvalue = (unsigned long long int) llrintl(value * (calculated_number)100000); -#else - unsigned long long uvalue = value * (calculated_number)100000; -#endif - - wstr = print_number_llu_r_smart(str, uvalue); - - // make sure we have 6 bytes at least - while((wstr - str) < 6) *wstr++ = '0'; - - // put the sign back - if(sign) *wstr++ = '-'; - - // reverse it - char *begin = str, *end = --wstr, aux; - while (end > begin) aux = *end, *end-- = *begin, *begin++ = aux; - // wstr--; - // strreverse(str, wstr); - - // remove trailing zeros - int decimal = 5; - while(decimal > 0 && *wstr == '0') { - *wstr-- = '\0'; - decimal--; - } - - // terminate it, one position to the right - // to let space for a dot - wstr[2] = '\0'; - - // make space for the dot - int i; - for(i = 0; i < decimal ;i++) { - wstr[1] = wstr[0]; - wstr--; - } - - // put the dot - if(wstr[2] == '\0') { wstr[1] = '\0'; decimal--; } - else wstr[1] = '.'; - - // return the buffer length - return (int) ((wstr - str) + 2 + decimal ); -} -*/ - -int print_calculated_number(char *str, calculated_number value) { - // info("printing number " CALCULATED_NUMBER_FORMAT, value); - char integral_str[50], fractional_str[50]; - - char *wstr = str; - - if(unlikely(value < 0)) { - *wstr++ = '-'; - value = -value; - } - - calculated_number integral, fractional; - -#ifdef STORAGE_WITH_MATH - fractional = calculated_number_modf(value, &integral) * 10000000.0; -#else - fractional = ((unsigned long long)(value * 10000000ULL) % 10000000ULL); -#endif - - unsigned long long integral_int = (unsigned long long)integral; - unsigned long long fractional_int = (unsigned long long)calculated_number_llrint(fractional); - if(unlikely(fractional_int >= 10000000)) { - integral_int += 1; - fractional_int -= 10000000; - } - - // info("integral " CALCULATED_NUMBER_FORMAT " (%llu), fractional " CALCULATED_NUMBER_FORMAT " (%llu)", integral, integral_int, fractional, fractional_int); - - char *istre; - if(unlikely(integral_int == 0)) { - integral_str[0] = '0'; - istre = &integral_str[1]; - } - else - // convert the integral part to string (reversed) - istre = print_number_llu_r_smart(integral_str, integral_int); - - // copy reversed the integral string - istre--; - while( istre >= integral_str ) *wstr++ = *istre--; - - if(likely(fractional_int != 0)) { - // add a dot - *wstr++ = '.'; - - // convert the fractional part to string (reversed) - char *fstre = print_number_llu_r_smart(fractional_str, fractional_int); - - // prepend zeros to reach 7 digits length - int decimal = 7; - int len = (int)(fstre - fractional_str); - while(len < decimal) { - *wstr++ = '0'; - len++; - } - - char *begin = fractional_str; - while(begin < fstre && *begin == '0') begin++; - - // copy reversed the fractional string - fstre--; - while( fstre >= begin ) *wstr++ = *fstre--; - } - - *wstr = '\0'; - // info("printed number '%s'", str); - return (int)(wstr - str); -} diff --git a/src/storage_number.h b/src/storage_number.h deleted file mode 100644 index ef81863fd..000000000 --- a/src/storage_number.h +++ /dev/null @@ -1,88 +0,0 @@ -#ifndef NETDATA_STORAGE_NUMBER_H -#define NETDATA_STORAGE_NUMBER_H - -#ifdef NETDATA_WITHOUT_LONG_DOUBLE - -#define powl pow -#define modfl modf -#define llrintl llrint -#define roundl round -#define sqrtl sqrt -#define copysignl copysign -#define strtold strtod - -typedef double calculated_number; -#define CALCULATED_NUMBER_FORMAT "%0.7f" -#define CALCULATED_NUMBER_FORMAT_ZERO "%0.0f" -#define CALCULATED_NUMBER_FORMAT_AUTO "%f" - -#define LONG_DOUBLE_MODIFIER "f" -typedef double LONG_DOUBLE; - -#else - -typedef long double calculated_number; -#define CALCULATED_NUMBER_FORMAT "%0.7Lf" -#define CALCULATED_NUMBER_FORMAT_ZERO "%0.0Lf" -#define CALCULATED_NUMBER_FORMAT_AUTO "%Lf" - -#define LONG_DOUBLE_MODIFIER "Lf" -typedef long double LONG_DOUBLE; - -#endif - -//typedef long long calculated_number; -//#define CALCULATED_NUMBER_FORMAT "%lld" - -typedef long long collected_number; -#define COLLECTED_NUMBER_FORMAT "%lld" - -/* -typedef long double collected_number; -#define COLLECTED_NUMBER_FORMAT "%0.7Lf" -*/ - -#define calculated_number_modf(x, y) modfl(x, y) -#define calculated_number_llrint(x) llrintl(x) -#define calculated_number_round(x) roundl(x) -#define calculated_number_fabs(x) fabsl(x) -#define calculated_number_epsilon (calculated_number)0.0000001 - -#define calculated_number_equal(a, b) (calculated_number_fabs((a) - (b)) < calculated_number_epsilon) - -typedef uint32_t storage_number; -#define STORAGE_NUMBER_FORMAT "%u" - -#define SN_NOT_EXISTS (0x0 << 24) -#define SN_EXISTS (0x1 << 24) -#define SN_EXISTS_RESET (0x2 << 24) -#define SN_EXISTS_UNDEF1 (0x3 << 24) -#define SN_EXISTS_UNDEF2 (0x4 << 24) -#define SN_EXISTS_UNDEF3 (0x5 << 24) -#define SN_EXISTS_UNDEF4 (0x6 << 24) - -#define SN_FLAGS_MASK (~(0x6 << 24)) - -// extract the flags -#define get_storage_number_flags(value) ((((storage_number)(value)) & (1 << 24)) | (((storage_number)(value)) & (2 << 24)) | (((storage_number)(value)) & (4 << 24))) -#define SN_EMPTY_SLOT 0x00000000 - -// checks -#define does_storage_number_exist(value) ((get_storage_number_flags(value) != 0)?1:0) -#define did_storage_number_reset(value) ((get_storage_number_flags(value) == SN_EXISTS_RESET)?1:0) - -storage_number pack_storage_number(calculated_number value, uint32_t flags); -calculated_number unpack_storage_number(storage_number value); - -int print_calculated_number(char *str, calculated_number value); - -#define STORAGE_NUMBER_POSITIVE_MAX (167772150000000.0) -#define STORAGE_NUMBER_POSITIVE_MIN (0.0000001) -#define STORAGE_NUMBER_NEGATIVE_MAX (-0.0000001) -#define STORAGE_NUMBER_NEGATIVE_MIN (-167772150000000.0) - -// accepted accuracy loss -#define ACCURACY_LOSS 0.0001 -#define accuracy_loss(t1, t2) (((t1) == (t2) || (t1) == 0.0 || (t2) == 0.0) ? 0.0 : (100.0 - (((t1) > (t2)) ? ((t2) * 100.0 / (t1) ) : ((t1) * 100.0 / (t2))))) - -#endif /* NETDATA_STORAGE_NUMBER_H */ diff --git a/src/sys_devices_system_edac_mc.c b/src/sys_devices_system_edac_mc.c deleted file mode 100644 index caa16192e..000000000 --- a/src/sys_devices_system_edac_mc.c +++ /dev/null @@ -1,204 +0,0 @@ -#include "common.h" - -struct mc { - char *name; - char ce_updated; - char ue_updated; - - char *ce_count_filename; - char *ue_count_filename; - - procfile *ce_ff; - procfile *ue_ff; - - collected_number ce_count; - collected_number ue_count; - - RRDDIM *ce_rd; - RRDDIM *ue_rd; - - struct mc *next; -}; -static struct mc *mc_root = NULL; - -static void find_all_mc() { - char name[FILENAME_MAX + 1]; - snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/edac/mc"); - char *dirname = config_get("plugin:proc:/sys/devices/system/edac/mc", "directory to monitor", name); - - DIR *dir = opendir(dirname); - if(unlikely(!dir)) { - error("Cannot read ECC memory errors directory '%s'", dirname); - return; - } - - struct dirent *de = NULL; - while((de = readdir(dir))) { - if(de->d_type == DT_DIR && de->d_name[0] == 'm' && de->d_name[1] == 'c' && isdigit(de->d_name[2])) { - struct mc *m = callocz(1, sizeof(struct mc)); - m->name = strdupz(de->d_name); - - struct stat st; - - snprintfz(name, FILENAME_MAX, "%s/%s/ce_count", dirname, de->d_name); - if(stat(name, &st) != -1) - m->ce_count_filename = strdupz(name); - - snprintfz(name, FILENAME_MAX, "%s/%s/ue_count", dirname, de->d_name); - if(stat(name, &st) != -1) - m->ue_count_filename = strdupz(name); - - if(!m->ce_count_filename && !m->ue_count_filename) { - freez(m->name); - freez(m); - } - else { - m->next = mc_root; - mc_root = m; - } - } - } - - closedir(dir); -} - -int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt) { - (void)dt; - - if(unlikely(mc_root == NULL)) { - find_all_mc(); - if(unlikely(mc_root == NULL)) - return 1; - } - - static int do_ce = -1, do_ue = -1; - calculated_number ce_sum = 0, ue_sum = 0; - struct mc *m; - - if(unlikely(do_ce == -1)) { - do_ce = config_get_boolean_ondemand("plugin:proc:/sys/devices/system/edac/mc", "enable ECC memory correctable errors", CONFIG_BOOLEAN_AUTO); - do_ue = config_get_boolean_ondemand("plugin:proc:/sys/devices/system/edac/mc", "enable ECC memory uncorrectable errors", CONFIG_BOOLEAN_AUTO); - } - - if(do_ce != CONFIG_BOOLEAN_NO) { - for(m = mc_root; m; m = m->next) { - if(m->ce_count_filename) { - m->ce_updated = 0; - - if(unlikely(!m->ce_ff)) { - m->ce_ff = procfile_open(m->ce_count_filename, " \t", PROCFILE_FLAG_DEFAULT); - if(unlikely(!m->ce_ff)) - continue; - } - - m->ce_ff = procfile_readall(m->ce_ff); - if(unlikely(!m->ce_ff || procfile_lines(m->ce_ff) < 1 || procfile_linewords(m->ce_ff, 0) < 1)) - continue; - - m->ce_count = str2ull(procfile_lineword(m->ce_ff, 0, 0)); - ce_sum += m->ce_count; - m->ce_updated = 1; - } - } - } - - if(do_ue != CONFIG_BOOLEAN_NO) { - for(m = mc_root; m; m = m->next) { - if(m->ue_count_filename) { - m->ue_updated = 0; - - if(unlikely(!m->ue_ff)) { - m->ue_ff = procfile_open(m->ue_count_filename, " \t", PROCFILE_FLAG_DEFAULT); - if(unlikely(!m->ue_ff)) - continue; - } - - m->ue_ff = procfile_readall(m->ue_ff); - if(unlikely(!m->ue_ff || procfile_lines(m->ue_ff) < 1 || procfile_linewords(m->ue_ff, 0) < 1)) - continue; - - m->ue_count = str2ull(procfile_lineword(m->ue_ff, 0, 0)); - ue_sum += m->ue_count; - m->ue_updated = 1; - } - } - } - - // -------------------------------------------------------------------- - - if(do_ce == CONFIG_BOOLEAN_YES || (do_ce == CONFIG_BOOLEAN_AUTO && ce_sum > 0)) { - do_ce = CONFIG_BOOLEAN_YES; - - static RRDSET *ce_st = NULL; - - if(unlikely(!ce_st)) { - ce_st = rrdset_create_localhost( - "mem" - , "ecc_ce" - , NULL - , "ecc" - , NULL - , "ECC Memory Correctable Errors" - , "errors" - , "proc" - , "/sys/devices/system/edac/mc" - , NETDATA_CHART_PRIO_MEM_HW + 50 - , update_every - , RRDSET_TYPE_LINE - ); - } - else - rrdset_next(ce_st); - - for(m = mc_root; m; m = m->next) { - if (m->ce_count_filename && m->ce_updated) { - if(unlikely(!m->ce_rd)) - m->ce_rd = rrddim_add(ce_st, m->name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(ce_st, m->ce_rd, m->ce_count); - } - } - - rrdset_done(ce_st); - } - - // -------------------------------------------------------------------- - - if(do_ue == CONFIG_BOOLEAN_YES || (do_ue == CONFIG_BOOLEAN_AUTO && ue_sum > 0)) { - do_ue = CONFIG_BOOLEAN_YES; - - static RRDSET *ue_st = NULL; - - if(unlikely(!ue_st)) { - ue_st = rrdset_create_localhost( - "mem" - , "ecc_ue" - , NULL - , "ecc" - , NULL - , "ECC Memory Uncorrectable Errors" - , "errors" - , "proc" - , "/sys/devices/system/edac/mc" - , NETDATA_CHART_PRIO_MEM_HW + 60 - , update_every - , RRDSET_TYPE_LINE - ); - } - else - rrdset_next(ue_st); - - for(m = mc_root; m; m = m->next) { - if (m->ue_count_filename && m->ue_updated) { - if(unlikely(!m->ue_rd)) - m->ue_rd = rrddim_add(ue_st, m->name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(ue_st, m->ue_rd, m->ue_count); - } - } - - rrdset_done(ue_st); - } - - return 0; -} diff --git a/src/sys_devices_system_node.c b/src/sys_devices_system_node.c deleted file mode 100644 index d04c8dc30..000000000 --- a/src/sys_devices_system_node.c +++ /dev/null @@ -1,161 +0,0 @@ -#include "common.h" - -struct node { - char *name; - char *numastat_filename; - procfile *numastat_ff; - RRDSET *numastat_st; - struct node *next; -}; -static struct node *numa_root = NULL; - -static int find_all_nodes() { - int numa_node_count = 0; - char name[FILENAME_MAX + 1]; - snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/node"); - char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name); - - DIR *dir = opendir(dirname); - if(!dir) { - error("Cannot read NUMA node directory '%s'", dirname); - return 0; - } - - struct dirent *de = NULL; - while((de = readdir(dir))) { - if(de->d_type != DT_DIR) - continue; - - if(strncmp(de->d_name, "node", 4) != 0) - continue; - - if(!isdigit(de->d_name[4])) - continue; - - numa_node_count++; - - struct node *m = callocz(1, sizeof(struct node)); - m->name = strdupz(de->d_name); - - struct stat st; - - snprintfz(name, FILENAME_MAX, "%s/%s/numastat", dirname, de->d_name); - if(stat(name, &st) == -1) { - freez(m->name); - freez(m); - continue; - } - - m->numastat_filename = strdupz(name); - - m->next = numa_root; - numa_root = m; - } - - closedir(dir); - - return numa_node_count; -} - -int do_proc_sys_devices_system_node(int update_every, usec_t dt) { - (void)dt; - - static uint32_t hash_local_node = 0, hash_numa_foreign = 0, hash_interleave_hit = 0, hash_other_node = 0, hash_numa_hit = 0, hash_numa_miss = 0; - static int do_numastat = -1, numa_node_count = 0; - struct node *m; - - if(unlikely(numa_root == NULL)) { - numa_node_count = find_all_nodes(); - if(unlikely(numa_root == NULL)) - return 1; - } - - if(unlikely(do_numastat == -1)) { - do_numastat = config_get_boolean_ondemand("plugin:proc:/sys/devices/system/node", "enable per-node numa metrics", CONFIG_BOOLEAN_AUTO); - - hash_local_node = simple_hash("local_node"); - hash_numa_foreign = simple_hash("numa_foreign"); - hash_interleave_hit = simple_hash("interleave_hit"); - hash_other_node = simple_hash("other_node"); - hash_numa_hit = simple_hash("numa_hit"); - hash_numa_miss = simple_hash("numa_miss"); - } - - if(do_numastat == CONFIG_BOOLEAN_YES || (do_numastat == CONFIG_BOOLEAN_AUTO && numa_node_count >= 2)) { - for(m = numa_root; m; m = m->next) { - if(m->numastat_filename) { - - if(unlikely(!m->numastat_ff)) { - m->numastat_ff = procfile_open(m->numastat_filename, " ", PROCFILE_FLAG_DEFAULT); - - if(unlikely(!m->numastat_ff)) - continue; - } - - m->numastat_ff = procfile_readall(m->numastat_ff); - if(unlikely(!m->numastat_ff || procfile_lines(m->numastat_ff) < 1 || procfile_linewords(m->numastat_ff, 0) < 1)) - continue; - - if(unlikely(!m->numastat_st)) { - m->numastat_st = rrdset_create_localhost( - "mem" - , m->name - , NULL - , "numa" - , NULL - , "NUMA events" - , "events/s" - , "proc" - , "/sys/devices/system/node" - , NETDATA_CHART_PRIO_MEM_NUMA + 10 - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(m->numastat_st, RRDSET_FLAG_DETAIL); - - rrddim_add(m->numastat_st, "numa_hit", "hit", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(m->numastat_st, "numa_miss", "miss", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(m->numastat_st, "local_node", "local", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(m->numastat_st, "numa_foreign", "foreign", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(m->numastat_st, "interleave_hit", "interleave", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(m->numastat_st, "other_node", "other", 1, 1, RRD_ALGORITHM_INCREMENTAL); - - } - else rrdset_next(m->numastat_st); - - size_t lines = procfile_lines(m->numastat_ff), l; - for(l = 0; l < lines; l++) { - size_t words = procfile_linewords(m->numastat_ff, l); - - if(unlikely(words < 2)) { - if(unlikely(words)) - error("Cannot read %s numastat line %zu. Expected 2 params, read %zu.", m->name, l, words); - continue; - } - - char *name = procfile_lineword(m->numastat_ff, l, 0); - char *value = procfile_lineword(m->numastat_ff, l, 1); - - if (unlikely(!name || !*name || !value || !*value)) - continue; - - uint32_t hash = simple_hash(name); - if(likely( - (hash == hash_numa_hit && !strcmp(name, "numa_hit")) - || (hash == hash_numa_miss && !strcmp(name, "numa_miss")) - || (hash == hash_local_node && !strcmp(name, "local_node")) - || (hash == hash_numa_foreign && !strcmp(name, "numa_foreign")) - || (hash == hash_interleave_hit && !strcmp(name, "interleave_hit")) - || (hash == hash_other_node && !strcmp(name, "other_node")) - )) - rrddim_set(m->numastat_st, name, (collected_number)str2kernel_uint_t(value)); - } - - rrdset_done(m->numastat_st); - } - } - } - - return 0; -} diff --git a/src/sys_fs_btrfs.c b/src/sys_fs_btrfs.c deleted file mode 100644 index a8dfb5c91..000000000 --- a/src/sys_fs_btrfs.c +++ /dev/null @@ -1,714 +0,0 @@ -#include "common.h" - -typedef struct btrfs_disk { - char *name; - uint32_t hash; - int exists; - - char *size_filename; - char *hw_sector_size_filename; - unsigned long long size; - unsigned long long hw_sector_size; - - struct btrfs_disk *next; -} BTRFS_DISK; - -typedef struct btrfs_node { - int exists; - int logged_error; - - char *id; - uint32_t hash; - - char *label; - - // unsigned long long int sectorsize; - // unsigned long long int nodesize; - // unsigned long long int quota_override; - - #define declare_btrfs_allocation_section_field(SECTION, FIELD) \ - char *allocation_ ## SECTION ## _ ## FIELD ## _filename; \ - unsigned long long int allocation_ ## SECTION ## _ ## FIELD; - - #define declare_btrfs_allocation_field(FIELD) \ - char *allocation_ ## FIELD ## _filename; \ - unsigned long long int allocation_ ## FIELD; - - RRDSET *st_allocation_disks; - RRDDIM *rd_allocation_disks_unallocated; - RRDDIM *rd_allocation_disks_data_used; - RRDDIM *rd_allocation_disks_data_free; - RRDDIM *rd_allocation_disks_metadata_used; - RRDDIM *rd_allocation_disks_metadata_free; - RRDDIM *rd_allocation_disks_system_used; - RRDDIM *rd_allocation_disks_system_free; - unsigned long long all_disks_total; - - RRDSET *st_allocation_data; - RRDDIM *rd_allocation_data_free; - RRDDIM *rd_allocation_data_used; - declare_btrfs_allocation_section_field(data, total_bytes) - declare_btrfs_allocation_section_field(data, bytes_used) - declare_btrfs_allocation_section_field(data, disk_total) - declare_btrfs_allocation_section_field(data, disk_used) - - RRDSET *st_allocation_metadata; - RRDDIM *rd_allocation_metadata_free; - RRDDIM *rd_allocation_metadata_used; - RRDDIM *rd_allocation_metadata_reserved; - declare_btrfs_allocation_section_field(metadata, total_bytes) - declare_btrfs_allocation_section_field(metadata, bytes_used) - declare_btrfs_allocation_section_field(metadata, disk_total) - declare_btrfs_allocation_section_field(metadata, disk_used) - //declare_btrfs_allocation_field(global_rsv_reserved) - declare_btrfs_allocation_field(global_rsv_size) - - RRDSET *st_allocation_system; - RRDDIM *rd_allocation_system_free; - RRDDIM *rd_allocation_system_used; - declare_btrfs_allocation_section_field(system, total_bytes) - declare_btrfs_allocation_section_field(system, bytes_used) - declare_btrfs_allocation_section_field(system, disk_total) - declare_btrfs_allocation_section_field(system, disk_used) - - BTRFS_DISK *disks; - - struct btrfs_node *next; -} BTRFS_NODE; - -static BTRFS_NODE *nodes = NULL; - -static inline void btrfs_free_disk(BTRFS_DISK *d) { - freez(d->name); - freez(d->size_filename); - freez(d->hw_sector_size_filename); - freez(d); -} - -static inline void btrfs_free_node(BTRFS_NODE *node) { - // info("BTRFS: destroying '%s'", node->id); - - if(node->st_allocation_disks) - rrdset_is_obsolete(node->st_allocation_disks); - - if(node->st_allocation_data) - rrdset_is_obsolete(node->st_allocation_data); - - if(node->st_allocation_metadata) - rrdset_is_obsolete(node->st_allocation_metadata); - - if(node->st_allocation_system) - rrdset_is_obsolete(node->st_allocation_system); - - freez(node->allocation_data_bytes_used_filename); - freez(node->allocation_data_total_bytes_filename); - - freez(node->allocation_metadata_bytes_used_filename); - freez(node->allocation_metadata_total_bytes_filename); - - freez(node->allocation_system_bytes_used_filename); - freez(node->allocation_system_total_bytes_filename); - - while(node->disks) { - BTRFS_DISK *d = node->disks; - node->disks = node->disks->next; - btrfs_free_disk(d); - } - - freez(node->label); - freez(node->id); - freez(node); -} - -static inline int find_btrfs_disks(BTRFS_NODE *node, const char *path) { - char filename[FILENAME_MAX + 1]; - - node->all_disks_total = 0; - - BTRFS_DISK *d; - for(d = node->disks ; d ; d = d->next) - d->exists = 0; - - DIR *dir = opendir(path); - if (!dir) { - if(!node->logged_error) { - error("BTRFS: Cannot open directory '%s'.", path); - node->logged_error = 1; - } - return 1; - } - node->logged_error = 0; - - struct dirent *de = NULL; - while ((de = readdir(dir))) { - if (de->d_type != DT_LNK - || !strcmp(de->d_name, ".") - || !strcmp(de->d_name, "..") - ) { - // info("BTRFS: ignoring '%s'", de->d_name); - continue; - } - - uint32_t hash = simple_hash(de->d_name); - - // -------------------------------------------------------------------- - // search for it - - for(d = node->disks ; d ; d = d->next) { - if(hash == d->hash && !strcmp(de->d_name, d->name)) - break; - } - - // -------------------------------------------------------------------- - // did we find it? - - if(!d) { - d = callocz(sizeof(BTRFS_DISK), 1); - - d->name = strdupz(de->d_name); - d->hash = simple_hash(d->name); - - snprintfz(filename, FILENAME_MAX, "%s/%s/size", path, de->d_name); - d->size_filename = strdupz(filename); - - // for disks - snprintfz(filename, FILENAME_MAX, "%s/%s/queue/hw_sector_size", path, de->d_name); - struct stat sb; - if(stat(filename, &sb) == -1) - // for partitions - snprintfz(filename, FILENAME_MAX, "%s/%s/../queue/hw_sector_size", path, de->d_name); - - d->hw_sector_size_filename = strdupz(filename); - - // link it - d->next = node->disks; - node->disks = d; - } - - d->exists = 1; - - - // -------------------------------------------------------------------- - // update the values - - if(read_single_number_file(d->size_filename, &d->size) != 0) { - error("BTRFS: failed to read '%s'", d->size_filename); - d->exists = 0; - continue; - } - - if(read_single_number_file(d->hw_sector_size_filename, &d->hw_sector_size) != 0) { - error("BTRFS: failed to read '%s'", d->hw_sector_size_filename); - d->exists = 0; - continue; - } - - node->all_disks_total += d->size * d->hw_sector_size; - } - closedir(dir); - - // ------------------------------------------------------------------------ - // cleanup - - BTRFS_DISK *last = NULL; - d = node->disks; - - while(d) { - if(unlikely(!d->exists)) { - if(unlikely(node->disks == d)) { - node->disks = d->next; - btrfs_free_disk(d); - d = node->disks; - last = NULL; - } - else { - last->next = d->next; - btrfs_free_disk(d); - d = last->next; - } - - continue; - } - - last = d; - d = d->next; - } - - return 0; -} - - -static inline int find_all_btrfs_pools(const char *path) { - static int logged_error = 0; - char filename[FILENAME_MAX + 1]; - - BTRFS_NODE *node; - for(node = nodes ; node ; node = node->next) - node->exists = 0; - - DIR *dir = opendir(path); - if (!dir) { - if(!logged_error) { - error("BTRFS: Cannot open directory '%s'.", path); - logged_error = 1; - } - return 1; - } - logged_error = 0; - - struct dirent *de = NULL; - while ((de = readdir(dir))) { - if(de->d_type != DT_DIR - || !strcmp(de->d_name, ".") - || !strcmp(de->d_name, "..") - || !strcmp(de->d_name, "features") - ) { - // info("BTRFS: ignoring '%s'", de->d_name); - continue; - } - - uint32_t hash = simple_hash(de->d_name); - - // search for it - for(node = nodes ; node ; node = node->next) { - if(hash == node->hash && !strcmp(de->d_name, node->id)) - break; - } - - // did we find it? - if(node) { - // info("BTRFS: already exists '%s'", de->d_name); - node->exists = 1; - - // update the disk sizes - snprintfz(filename, FILENAME_MAX, "%s/%s/devices", path, de->d_name); - find_btrfs_disks(node, filename); - - continue; - } - - // info("BTRFS: adding '%s'", de->d_name); - - // not found, create it - node = callocz(sizeof(BTRFS_NODE), 1); - - node->id = strdupz(de->d_name); - node->hash = simple_hash(node->id); - node->exists = 1; - - { - char label[FILENAME_MAX + 1] = ""; - - snprintfz(filename, FILENAME_MAX, "%s/%s/label", path, de->d_name); - read_file(filename, label, FILENAME_MAX); - - char *s = label; - if (s[0]) - s = trim(label); - - if(s && s[0]) - node->label = strdupz(s); - else - node->label = strdupz(node->id); - } - - //snprintfz(filename, FILENAME_MAX, "%s/%s/sectorsize", path, de->d_name); - //if(read_single_number_file(filename, &node->sectorsize) != 0) { - // error("BTRFS: failed to read '%s'", filename); - // btrfs_free_node(node); - // continue; - //} - - //snprintfz(filename, FILENAME_MAX, "%s/%s/nodesize", path, de->d_name); - //if(read_single_number_file(filename, &node->nodesize) != 0) { - // error("BTRFS: failed to read '%s'", filename); - // btrfs_free_node(node); - // continue; - //} - - //snprintfz(filename, FILENAME_MAX, "%s/%s/quota_override", path, de->d_name); - //if(read_single_number_file(filename, &node->quota_override) != 0) { - // error("BTRFS: failed to read '%s'", filename); - // btrfs_free_node(node); - // continue; - //} - - // -------------------------------------------------------------------- - // macros to simplify our life - - #define init_btrfs_allocation_field(FIELD) {\ - snprintfz(filename, FILENAME_MAX, "%s/%s/allocation/" #FIELD, path, de->d_name); \ - if(read_single_number_file(filename, &node->allocation_ ## FIELD) != 0) {\ - error("BTRFS: failed to read '%s'", filename);\ - btrfs_free_node(node);\ - continue;\ - }\ - if(!node->allocation_ ## FIELD ## _filename)\ - node->allocation_ ## FIELD ## _filename = strdupz(filename);\ - } - - #define init_btrfs_allocation_section_field(SECTION, FIELD) {\ - snprintfz(filename, FILENAME_MAX, "%s/%s/allocation/" #SECTION "/" #FIELD, path, de->d_name); \ - if(read_single_number_file(filename, &node->allocation_ ## SECTION ## _ ## FIELD) != 0) {\ - error("BTRFS: failed to read '%s'", filename);\ - btrfs_free_node(node);\ - continue;\ - }\ - if(!node->allocation_ ## SECTION ## _ ## FIELD ## _filename)\ - node->allocation_ ## SECTION ## _ ## FIELD ## _filename = strdupz(filename);\ - } - - // -------------------------------------------------------------------- - // allocation/data - - init_btrfs_allocation_section_field(data, total_bytes); - init_btrfs_allocation_section_field(data, bytes_used); - init_btrfs_allocation_section_field(data, disk_total); - init_btrfs_allocation_section_field(data, disk_used); - - - // -------------------------------------------------------------------- - // allocation/metadata - - init_btrfs_allocation_section_field(metadata, total_bytes); - init_btrfs_allocation_section_field(metadata, bytes_used); - init_btrfs_allocation_section_field(metadata, disk_total); - init_btrfs_allocation_section_field(metadata, disk_used); - - init_btrfs_allocation_field(global_rsv_size); - // init_btrfs_allocation_field(global_rsv_reserved); - - - // -------------------------------------------------------------------- - // allocation/system - - init_btrfs_allocation_section_field(system, total_bytes); - init_btrfs_allocation_section_field(system, bytes_used); - init_btrfs_allocation_section_field(system, disk_total); - init_btrfs_allocation_section_field(system, disk_used); - - - // -------------------------------------------------------------------- - // find all disks related to this node - // and collect their sizes - - snprintfz(filename, FILENAME_MAX, "%s/%s/devices", path, de->d_name); - find_btrfs_disks(node, filename); - - - // -------------------------------------------------------------------- - // link it - - // info("BTRFS: linking '%s'", node->id); - node->next = nodes; - nodes = node; - } - closedir(dir); - - - // ------------------------------------------------------------------------ - // cleanup - - BTRFS_NODE *last = NULL; - node = nodes; - - while(node) { - if(unlikely(!node->exists)) { - if(unlikely(nodes == node)) { - nodes = node->next; - btrfs_free_node(node); - node = nodes; - last = NULL; - } - else { - last->next = node->next; - btrfs_free_node(node); - node = last->next; - } - - continue; - } - - last = node; - node = node->next; - } - - return 0; -} - -int do_sys_fs_btrfs(int update_every, usec_t dt) { - static int initialized = 0 - , do_allocation_disks = CONFIG_BOOLEAN_AUTO - , do_allocation_system = CONFIG_BOOLEAN_AUTO - , do_allocation_data = CONFIG_BOOLEAN_AUTO - , do_allocation_metadata = CONFIG_BOOLEAN_AUTO; - - static usec_t refresh_delta = 0, refresh_every = 60 * USEC_PER_SEC; - static char *btrfs_path = NULL; - - (void)dt; - - if(unlikely(!initialized)) { - initialized = 1; - - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/fs/btrfs"); - btrfs_path = config_get("plugin:proc:/sys/fs/btrfs", "path to monitor", filename); - - refresh_every = config_get_number("plugin:proc:/sys/fs/btrfs", "check for btrfs changes every", refresh_every / USEC_PER_SEC) * USEC_PER_SEC; - refresh_delta = refresh_every; - - do_allocation_disks = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "physical disks allocation", do_allocation_disks); - do_allocation_data = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "data allocation", do_allocation_data); - do_allocation_metadata = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "metadata allocation", do_allocation_metadata); - do_allocation_system = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "system allocation", do_allocation_system); - } - - refresh_delta += dt; - if(refresh_delta >= refresh_every) { - refresh_delta = 0; - find_all_btrfs_pools(btrfs_path); - } - - BTRFS_NODE *node; - for(node = nodes; node ; node = node->next) { - // -------------------------------------------------------------------- - // allocation/system - - #define collect_btrfs_allocation_field(FIELD) \ - read_single_number_file(node->allocation_ ## FIELD ## _filename, &node->allocation_ ## FIELD) - - #define collect_btrfs_allocation_section_field(SECTION, FIELD) \ - read_single_number_file(node->allocation_ ## SECTION ## _ ## FIELD ## _filename, &node->allocation_ ## SECTION ## _ ## FIELD) - - if(do_allocation_disks != CONFIG_BOOLEAN_NO) { - if( collect_btrfs_allocation_section_field(data, disk_total) != 0 - || collect_btrfs_allocation_section_field(data, disk_used) != 0 - || collect_btrfs_allocation_section_field(metadata, disk_total) != 0 - || collect_btrfs_allocation_section_field(metadata, disk_used) != 0 - || collect_btrfs_allocation_section_field(system, disk_total) != 0 - || collect_btrfs_allocation_section_field(system, disk_used) != 0) { - error("BTRFS: failed to collect physical disks allocation for '%s'", node->id); - // make it refresh btrfs at the next iteration - refresh_delta = refresh_every; - continue; - } - } - - if(do_allocation_data != CONFIG_BOOLEAN_NO) { - if (collect_btrfs_allocation_section_field(data, total_bytes) != 0 - || collect_btrfs_allocation_section_field(data, bytes_used) != 0) { - error("BTRFS: failed to collect allocation/data for '%s'", node->id); - // make it refresh btrfs at the next iteration - refresh_delta = refresh_every; - continue; - } - } - - if(do_allocation_metadata != CONFIG_BOOLEAN_NO) { - if (collect_btrfs_allocation_section_field(metadata, total_bytes) != 0 - || collect_btrfs_allocation_section_field(metadata, bytes_used) != 0 - || collect_btrfs_allocation_field(global_rsv_size) != 0 - ) { - error("BTRFS: failed to collect allocation/metadata for '%s'", node->id); - // make it refresh btrfs at the next iteration - refresh_delta = refresh_every; - continue; - } - } - - if(do_allocation_system != CONFIG_BOOLEAN_NO) { - if (collect_btrfs_allocation_section_field(system, total_bytes) != 0 - || collect_btrfs_allocation_section_field(system, bytes_used) != 0) { - error("BTRFS: failed to collect allocation/system for '%s'", node->id); - // make it refresh btrfs at the next iteration - refresh_delta = refresh_every; - continue; - } - } - - // -------------------------------------------------------------------- - // allocation/disks - - if(do_allocation_disks == CONFIG_BOOLEAN_YES || (do_allocation_disks == CONFIG_BOOLEAN_AUTO && node->all_disks_total && node->allocation_data_disk_total)) { - do_allocation_disks = CONFIG_BOOLEAN_YES; - - if(unlikely(!node->st_allocation_disks)) { - char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1]; - - snprintf(id, RRD_ID_LENGTH_MAX, "disk_%s", node->id); - snprintf(name, RRD_ID_LENGTH_MAX, "disk_%s", node->label); - snprintf(title, 200, "BTRFS Disk Allocation for %s", node->label); - - netdata_fix_chart_id(id); - netdata_fix_chart_name(name); - - node->st_allocation_disks = rrdset_create_localhost( - "btrfs" - , id - , name - , node->label - , "btrfs.disk" - , title - , "MB" - , "proc" - , "sys/fs/btrfs" - , 2300 - , update_every - , RRDSET_TYPE_STACKED - ); - - node->rd_allocation_disks_unallocated = rrddim_add(node->st_allocation_disks, "unallocated", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - node->rd_allocation_disks_data_used = rrddim_add(node->st_allocation_disks, "data_used", "data used", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - node->rd_allocation_disks_data_free = rrddim_add(node->st_allocation_disks, "data_free", "data free", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - node->rd_allocation_disks_metadata_used = rrddim_add(node->st_allocation_disks, "meta_used", "meta used", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - node->rd_allocation_disks_metadata_free = rrddim_add(node->st_allocation_disks, "meta_free", "meta free", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - node->rd_allocation_disks_system_used = rrddim_add(node->st_allocation_disks, "sys_used", "sys used", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - node->rd_allocation_disks_system_free = rrddim_add(node->st_allocation_disks, "sys_free", "sys free", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(node->st_allocation_disks); - - // unsigned long long disk_used = node->allocation_data_disk_used + node->allocation_metadata_disk_used + node->allocation_system_disk_used; - unsigned long long disk_total = node->allocation_data_disk_total + node->allocation_metadata_disk_total + node->allocation_system_disk_total; - unsigned long long disk_unallocated = node->all_disks_total - disk_total; - - rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_unallocated, disk_unallocated); - rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_data_used, node->allocation_data_disk_used); - rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_data_free, node->allocation_data_disk_total - node->allocation_data_disk_used); - rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_metadata_used, node->allocation_metadata_disk_used); - rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_metadata_free, node->allocation_metadata_disk_total - node->allocation_metadata_disk_used); - rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_system_used, node->allocation_system_disk_used); - rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_system_free, node->allocation_system_disk_total - node->allocation_system_disk_used); - rrdset_done(node->st_allocation_disks); - } - - - // -------------------------------------------------------------------- - // allocation/data - - if(do_allocation_data == CONFIG_BOOLEAN_YES || (do_allocation_data == CONFIG_BOOLEAN_AUTO && node->allocation_data_total_bytes)) { - do_allocation_data = CONFIG_BOOLEAN_YES; - - if(unlikely(!node->st_allocation_data)) { - char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1]; - - snprintf(id, RRD_ID_LENGTH_MAX, "data_%s", node->id); - snprintf(name, RRD_ID_LENGTH_MAX, "data_%s", node->label); - snprintf(title, 200, "BTRFS Data Allocation for %s", node->label); - - netdata_fix_chart_id(id); - netdata_fix_chart_name(name); - - node->st_allocation_data = rrdset_create_localhost( - "btrfs" - , id - , name - , node->label - , "btrfs.data" - , title - , "MB" - , "proc" - , "sys/fs/btrfs" - , 2301 - , update_every - , RRDSET_TYPE_STACKED - ); - - node->rd_allocation_data_free = rrddim_add(node->st_allocation_data, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - node->rd_allocation_data_used = rrddim_add(node->st_allocation_data, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(node->st_allocation_data); - - rrddim_set_by_pointer(node->st_allocation_data, node->rd_allocation_data_free, node->allocation_data_total_bytes - node->allocation_data_bytes_used); - rrddim_set_by_pointer(node->st_allocation_data, node->rd_allocation_data_used, node->allocation_data_bytes_used); - rrdset_done(node->st_allocation_data); - } - - // -------------------------------------------------------------------- - // allocation/metadata - - if(do_allocation_metadata == CONFIG_BOOLEAN_YES || (do_allocation_metadata == CONFIG_BOOLEAN_AUTO && node->allocation_metadata_total_bytes)) { - do_allocation_metadata = CONFIG_BOOLEAN_YES; - - if(unlikely(!node->st_allocation_metadata)) { - char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1]; - - snprintf(id, RRD_ID_LENGTH_MAX, "metadata_%s", node->id); - snprintf(name, RRD_ID_LENGTH_MAX, "metadata_%s", node->label); - snprintf(title, 200, "BTRFS Metadata Allocation for %s", node->label); - - netdata_fix_chart_id(id); - netdata_fix_chart_name(name); - - node->st_allocation_metadata = rrdset_create_localhost( - "btrfs" - , id - , name - , node->label - , "btrfs.metadata" - , title - , "MB" - , "proc" - , "sys/fs/btrfs" - , 2302 - , update_every - , RRDSET_TYPE_STACKED - ); - - node->rd_allocation_metadata_free = rrddim_add(node->st_allocation_metadata, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - node->rd_allocation_metadata_used = rrddim_add(node->st_allocation_metadata, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - node->rd_allocation_metadata_reserved = rrddim_add(node->st_allocation_metadata, "reserved", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(node->st_allocation_metadata); - - rrddim_set_by_pointer(node->st_allocation_metadata, node->rd_allocation_metadata_free, node->allocation_metadata_total_bytes - node->allocation_metadata_bytes_used - node->allocation_global_rsv_size); - rrddim_set_by_pointer(node->st_allocation_metadata, node->rd_allocation_metadata_used, node->allocation_metadata_bytes_used); - rrddim_set_by_pointer(node->st_allocation_metadata, node->rd_allocation_metadata_reserved, node->allocation_global_rsv_size); - rrdset_done(node->st_allocation_metadata); - } - - // -------------------------------------------------------------------- - // allocation/system - - if(do_allocation_system == CONFIG_BOOLEAN_YES || (do_allocation_system == CONFIG_BOOLEAN_AUTO && node->allocation_system_total_bytes)) { - do_allocation_system = CONFIG_BOOLEAN_YES; - - if(unlikely(!node->st_allocation_system)) { - char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1]; - - snprintf(id, RRD_ID_LENGTH_MAX, "system_%s", node->id); - snprintf(name, RRD_ID_LENGTH_MAX, "system_%s", node->label); - snprintf(title, 200, "BTRFS System Allocation for %s", node->label); - - netdata_fix_chart_id(id); - netdata_fix_chart_name(name); - - node->st_allocation_system = rrdset_create_localhost( - "btrfs" - , id - , name - , node->label - , "btrfs.system" - , title - , "MB" - , "proc" - , "sys/fs/btrfs" - , 2303 - , update_every - , RRDSET_TYPE_STACKED - ); - - node->rd_allocation_system_free = rrddim_add(node->st_allocation_system, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - node->rd_allocation_system_used = rrddim_add(node->st_allocation_system, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(node->st_allocation_system); - - rrddim_set_by_pointer(node->st_allocation_system, node->rd_allocation_system_free, node->allocation_system_total_bytes - node->allocation_system_bytes_used); - rrddim_set_by_pointer(node->st_allocation_system, node->rd_allocation_system_used, node->allocation_system_bytes_used); - rrdset_done(node->st_allocation_system); - } - } - - return 0; -} - diff --git a/src/sys_fs_cgroup.c b/src/sys_fs_cgroup.c deleted file mode 100644 index f6e613c4b..000000000 --- a/src/sys_fs_cgroup.c +++ /dev/null @@ -1,2765 +0,0 @@ -#include "common.h" - -// ---------------------------------------------------------------------------- -// cgroup globals - -#define CHART_PRIORITY_SYSTEMD_SERVICES 19000 -#define CHART_PRIORITY_CONTAINERS 40000 - -static long system_page_size = 4096; // system will be queried via sysconf() in configuration() - -static int cgroup_enable_cpuacct_stat = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_memory = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_detailed_memory = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_memory_failcnt = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_swap = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_blkio_io = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_blkio_ops = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_blkio_throttle_io = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_blkio_throttle_ops = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_blkio_merged_ops = CONFIG_BOOLEAN_AUTO; -static int cgroup_enable_blkio_queued_ops = CONFIG_BOOLEAN_AUTO; - -static int cgroup_enable_systemd_services = CONFIG_BOOLEAN_YES; -static int cgroup_enable_systemd_services_detailed_memory = CONFIG_BOOLEAN_NO; -static int cgroup_used_memory_without_cache = CONFIG_BOOLEAN_YES; - -static int cgroup_search_in_devices = 1; - -static int cgroup_enable_new_cgroups_detected_at_runtime = 1; -static int cgroup_check_for_new_every = 10; -static int cgroup_update_every = 1; - -static int cgroup_recheck_zero_blkio_every_iterations = 10; -static int cgroup_recheck_zero_mem_failcnt_every_iterations = 10; -static int cgroup_recheck_zero_mem_detailed_every_iterations = 10; - -static char *cgroup_cpuacct_base = NULL; -static char *cgroup_blkio_base = NULL; -static char *cgroup_memory_base = NULL; -static char *cgroup_devices_base = NULL; - -static int cgroup_root_count = 0; -static int cgroup_root_max = 1000; -static int cgroup_max_depth = 0; - -static SIMPLE_PATTERN *enabled_cgroup_patterns = NULL; -static SIMPLE_PATTERN *enabled_cgroup_paths = NULL; -static SIMPLE_PATTERN *enabled_cgroup_renames = NULL; -static SIMPLE_PATTERN *systemd_services_cgroups = NULL; - -static char *cgroups_rename_script = NULL; -static char *cgroups_network_interface_script = NULL; - -static int cgroups_check = 0; - -static uint32_t Read_hash = 0; -static uint32_t Write_hash = 0; -static uint32_t user_hash = 0; -static uint32_t system_hash = 0; - -void read_cgroup_plugin_configuration() { - system_page_size = sysconf(_SC_PAGESIZE); - - Read_hash = simple_hash("Read"); - Write_hash = simple_hash("Write"); - user_hash = simple_hash("user"); - system_hash = simple_hash("system"); - - cgroup_update_every = (int)config_get_number("plugin:cgroups", "update every", localhost->rrd_update_every); - if(cgroup_update_every < localhost->rrd_update_every) - cgroup_update_every = localhost->rrd_update_every; - - cgroup_check_for_new_every = (int)config_get_number("plugin:cgroups", "check for new cgroups every", cgroup_check_for_new_every * cgroup_update_every); - if(cgroup_check_for_new_every < cgroup_update_every) - cgroup_check_for_new_every = cgroup_update_every; - - cgroup_enable_cpuacct_stat = config_get_boolean_ondemand("plugin:cgroups", "enable cpuacct stat (total CPU)", cgroup_enable_cpuacct_stat); - cgroup_enable_cpuacct_usage = config_get_boolean_ondemand("plugin:cgroups", "enable cpuacct usage (per core CPU)", cgroup_enable_cpuacct_usage); - - cgroup_enable_memory = config_get_boolean_ondemand("plugin:cgroups", "enable memory (used mem including cache)", cgroup_enable_memory); - cgroup_enable_detailed_memory = config_get_boolean_ondemand("plugin:cgroups", "enable detailed memory", cgroup_enable_detailed_memory); - cgroup_enable_memory_failcnt = config_get_boolean_ondemand("plugin:cgroups", "enable memory limits fail count", cgroup_enable_memory_failcnt); - cgroup_enable_swap = config_get_boolean_ondemand("plugin:cgroups", "enable swap memory", cgroup_enable_swap); - - cgroup_enable_blkio_io = config_get_boolean_ondemand("plugin:cgroups", "enable blkio bandwidth", cgroup_enable_blkio_io); - cgroup_enable_blkio_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio operations", cgroup_enable_blkio_ops); - cgroup_enable_blkio_throttle_io = config_get_boolean_ondemand("plugin:cgroups", "enable blkio throttle bandwidth", cgroup_enable_blkio_throttle_io); - cgroup_enable_blkio_throttle_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio throttle operations", cgroup_enable_blkio_throttle_ops); - cgroup_enable_blkio_queued_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio queued operations", cgroup_enable_blkio_queued_ops); - cgroup_enable_blkio_merged_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio merged operations", cgroup_enable_blkio_merged_ops); - - cgroup_recheck_zero_blkio_every_iterations = (int)config_get_number("plugin:cgroups", "recheck zero blkio every iterations", cgroup_recheck_zero_blkio_every_iterations); - cgroup_recheck_zero_mem_failcnt_every_iterations = (int)config_get_number("plugin:cgroups", "recheck zero memory failcnt every iterations", cgroup_recheck_zero_mem_failcnt_every_iterations); - cgroup_recheck_zero_mem_detailed_every_iterations = (int)config_get_number("plugin:cgroups", "recheck zero detailed memory every iterations", cgroup_recheck_zero_mem_detailed_every_iterations); - - cgroup_enable_systemd_services = config_get_boolean("plugin:cgroups", "enable systemd services", cgroup_enable_systemd_services); - cgroup_enable_systemd_services_detailed_memory = config_get_boolean("plugin:cgroups", "enable systemd services detailed memory", cgroup_enable_systemd_services_detailed_memory); - cgroup_used_memory_without_cache = config_get_boolean("plugin:cgroups", "report used memory without cache", cgroup_used_memory_without_cache); - - char filename[FILENAME_MAX + 1], *s; - struct mountinfo *mi, *root = mountinfo_read(0); - - mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "cpuacct"); - if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "cpuacct"); - if(!mi) { - error("CGROUP: cannot find cpuacct mountinfo. Assuming default: /sys/fs/cgroup/cpuacct"); - s = "/sys/fs/cgroup/cpuacct"; - } - else s = mi->mount_point; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s); - cgroup_cpuacct_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/cpuacct", filename); - - mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "blkio"); - if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "blkio"); - if(!mi) { - error("CGROUP: cannot find blkio mountinfo. Assuming default: /sys/fs/cgroup/blkio"); - s = "/sys/fs/cgroup/blkio"; - } - else s = mi->mount_point; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s); - cgroup_blkio_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/blkio", filename); - - mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "memory"); - if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "memory"); - if(!mi) { - error("CGROUP: cannot find memory mountinfo. Assuming default: /sys/fs/cgroup/memory"); - s = "/sys/fs/cgroup/memory"; - } - else s = mi->mount_point; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s); - cgroup_memory_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/memory", filename); - - mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "devices"); - if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "devices"); - if(!mi) { - error("CGROUP: cannot find devices mountinfo. Assuming default: /sys/fs/cgroup/devices"); - s = "/sys/fs/cgroup/devices"; - } - else s = mi->mount_point; - snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s); - cgroup_devices_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/devices", filename); - - cgroup_root_max = (int)config_get_number("plugin:cgroups", "max cgroups to allow", cgroup_root_max); - cgroup_max_depth = (int)config_get_number("plugin:cgroups", "max cgroups depth to monitor", cgroup_max_depth); - - cgroup_enable_new_cgroups_detected_at_runtime = config_get_boolean("plugin:cgroups", "enable new cgroups detected at run time", cgroup_enable_new_cgroups_detected_at_runtime); - - enabled_cgroup_patterns = simple_pattern_create( - config_get("plugin:cgroups", "enable by default cgroups matching", - // ---------------------------------------------------------------- - - " !*/init.scope " // ignore init.scope - " !/system.slice/run-*.scope " // ignore system.slice/run-XXXX.scope - " *.scope " // we need all other *.scope for sure - - // ---------------------------------------------------------------- - - " /machine.slice/*.service " // #3367 systemd-nspawn - - // ---------------------------------------------------------------- - - " !*/vcpu* " // libvirtd adds these sub-cgroups - " !*/emulator " // libvirtd adds these sub-cgroups - " !*.mount " - " !*.partition " - " !*.service " - " !*.socket " - " !*.slice " - " !*.swap " - " !*.user " - " !/ " - " !/docker " - " !/libvirt " - " !/lxc " - " !/lxc/*/* " // #1397 #2649 - " !/machine " - " !/qemu " - " !/system " - " !/systemd " - " !/user " - " * " // enable anything else - ), NULL, SIMPLE_PATTERN_EXACT); - - enabled_cgroup_paths = simple_pattern_create( - config_get("plugin:cgroups", "search for cgroups in subpaths matching", - " !*/init.scope " // ignore init.scope - " !*-qemu " // #345 - " !*.libvirt-qemu " // #3010 - " !/init.scope " - " !/system " - " !/systemd " - " !/user " - " !/user.slice " - " !/lxc/*/* " // #2161 #2649 - " * " - ), NULL, SIMPLE_PATTERN_EXACT); - - snprintfz(filename, FILENAME_MAX, "%s/cgroup-name.sh", netdata_configured_plugins_dir); - cgroups_rename_script = config_get("plugin:cgroups", "script to get cgroup names", filename); - - snprintfz(filename, FILENAME_MAX, "%s/cgroup-network", netdata_configured_plugins_dir); - cgroups_network_interface_script = config_get("plugin:cgroups", "script to get cgroup network interfaces", filename); - - enabled_cgroup_renames = simple_pattern_create( - config_get("plugin:cgroups", "run script to rename cgroups matching", - " !/ " - " !*.mount " - " !*.socket " - " !*.partition " - " /machine.slice/*.service " // #3367 systemd-nspawn - " !*.service " - " !*.slice " - " !*.swap " - " !*.user " - " !init.scope " - " !*.scope/vcpu* " // libvirtd adds these sub-cgroups - " !*.scope/emulator " // libvirtd adds these sub-cgroups - " *.scope " - " *docker* " - " *lxc* " - " *qemu* " - " *kubepods* " // #3396 kubernetes - " *.libvirt-qemu " // #3010 - " * " - ), NULL, SIMPLE_PATTERN_EXACT); - - if(cgroup_enable_systemd_services) { - systemd_services_cgroups = simple_pattern_create( - config_get("plugin:cgroups", "cgroups to match as systemd services", - " !/system.slice/*/*.service " - " /system.slice/*.service " - ), NULL, SIMPLE_PATTERN_EXACT); - } - - mountinfo_free_all(root); -} - -// ---------------------------------------------------------------------------- -// cgroup objects - -struct blkio { - int updated; - int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO - int delay_counter; - - char *filename; - - unsigned long long Read; - unsigned long long Write; -/* - unsigned long long Sync; - unsigned long long Async; - unsigned long long Total; -*/ -}; - -// https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt -struct memory { - ARL_BASE *arl_base; - ARL_ENTRY *arl_dirty; - ARL_ENTRY *arl_swap; - - int updated_detailed; - int updated_usage_in_bytes; - int updated_msw_usage_in_bytes; - int updated_failcnt; - - int enabled_detailed; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO - int enabled_usage_in_bytes; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO - int enabled_msw_usage_in_bytes; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO - int enabled_failcnt; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO - - int delay_counter_detailed; - int delay_counter_failcnt; - - char *filename_detailed; - char *filename_usage_in_bytes; - char *filename_msw_usage_in_bytes; - char *filename_failcnt; - - int detailed_has_dirty; - int detailed_has_swap; - - // detailed metrics - unsigned long long cache; - unsigned long long rss; - unsigned long long rss_huge; - unsigned long long mapped_file; - unsigned long long writeback; - unsigned long long dirty; - unsigned long long swap; - unsigned long long pgpgin; - unsigned long long pgpgout; - unsigned long long pgfault; - unsigned long long pgmajfault; -/* - unsigned long long inactive_anon; - unsigned long long active_anon; - unsigned long long inactive_file; - unsigned long long active_file; - unsigned long long unevictable; - unsigned long long hierarchical_memory_limit; - unsigned long long total_cache; - unsigned long long total_rss; - unsigned long long total_rss_huge; - unsigned long long total_mapped_file; - unsigned long long total_writeback; - unsigned long long total_dirty; - unsigned long long total_swap; - unsigned long long total_pgpgin; - unsigned long long total_pgpgout; - unsigned long long total_pgfault; - unsigned long long total_pgmajfault; - unsigned long long total_inactive_anon; - unsigned long long total_active_anon; - unsigned long long total_inactive_file; - unsigned long long total_active_file; - unsigned long long total_unevictable; -*/ - - // single file metrics - unsigned long long usage_in_bytes; - unsigned long long msw_usage_in_bytes; - unsigned long long failcnt; -}; - -// https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt -struct cpuacct_stat { - int updated; - int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO - - char *filename; - - unsigned long long user; - unsigned long long system; -}; - -// https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt -struct cpuacct_usage { - int updated; - int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO - - char *filename; - - unsigned int cpus; - unsigned long long *cpu_percpu; -}; - -struct cgroup_network_interface { - const char *host_device; - const char *container_device; - struct cgroup_network_interface *next; -}; - -#define CGROUP_OPTIONS_DISABLED_DUPLICATE 0x00000001 -#define CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE 0x00000002 - -struct cgroup { - uint32_t options; - - char available; // found in the filesystem - char enabled; // enabled in the config - - char *id; - uint32_t hash; - - char *chart_id; - uint32_t hash_chart; - - char *chart_title; - - struct cpuacct_stat cpuacct_stat; - struct cpuacct_usage cpuacct_usage; - - struct memory memory; - - struct blkio io_service_bytes; // bytes - struct blkio io_serviced; // operations - - struct blkio throttle_io_service_bytes; // bytes - struct blkio throttle_io_serviced; // operations - - struct blkio io_merged; // operations - struct blkio io_queued; // operations - - struct cgroup_network_interface *interfaces; - - // per cgroup charts - RRDSET *st_cpu; - RRDSET *st_cpu_per_core; - RRDSET *st_mem; - RRDSET *st_writeback; - RRDSET *st_mem_activity; - RRDSET *st_pgfaults; - RRDSET *st_mem_usage; - RRDSET *st_mem_failcnt; - RRDSET *st_io; - RRDSET *st_serviced_ops; - RRDSET *st_throttle_io; - RRDSET *st_throttle_serviced_ops; - RRDSET *st_queued_ops; - RRDSET *st_merged_ops; - - // services - RRDDIM *rd_cpu; - RRDDIM *rd_mem_usage; - RRDDIM *rd_mem_failcnt; - RRDDIM *rd_swap_usage; - - RRDDIM *rd_mem_detailed_cache; - RRDDIM *rd_mem_detailed_rss; - RRDDIM *rd_mem_detailed_mapped; - RRDDIM *rd_mem_detailed_writeback; - RRDDIM *rd_mem_detailed_pgpgin; - RRDDIM *rd_mem_detailed_pgpgout; - RRDDIM *rd_mem_detailed_pgfault; - RRDDIM *rd_mem_detailed_pgmajfault; - - RRDDIM *rd_io_service_bytes_read; - RRDDIM *rd_io_serviced_read; - RRDDIM *rd_throttle_io_read; - RRDDIM *rd_throttle_io_serviced_read; - RRDDIM *rd_io_queued_read; - RRDDIM *rd_io_merged_read; - - RRDDIM *rd_io_service_bytes_write; - RRDDIM *rd_io_serviced_write; - RRDDIM *rd_throttle_io_write; - RRDDIM *rd_throttle_io_serviced_write; - RRDDIM *rd_io_queued_write; - RRDDIM *rd_io_merged_write; - - struct cgroup *next; - -} *cgroup_root = NULL; - -// ---------------------------------------------------------------------------- -// read values from /sys - -static inline void cgroup_read_cpuacct_stat(struct cpuacct_stat *cp) { - static procfile *ff = NULL; - - if(likely(cp->filename)) { - ff = procfile_reopen(ff, cp->filename, NULL, PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) { - cp->updated = 0; - cgroups_check = 1; - return; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) { - cp->updated = 0; - cgroups_check = 1; - return; - } - - unsigned long i, lines = procfile_lines(ff); - - if(unlikely(lines < 1)) { - error("CGROUP: file '%s' should have 1+ lines.", cp->filename); - cp->updated = 0; - return; - } - - for(i = 0; i < lines ; i++) { - char *s = procfile_lineword(ff, i, 0); - uint32_t hash = simple_hash(s); - - if(unlikely(hash == user_hash && !strcmp(s, "user"))) - cp->user = str2ull(procfile_lineword(ff, i, 1)); - - else if(unlikely(hash == system_hash && !strcmp(s, "system"))) - cp->system = str2ull(procfile_lineword(ff, i, 1)); - } - - cp->updated = 1; - - if(unlikely(cp->enabled == CONFIG_BOOLEAN_AUTO && (cp->user || cp->system))) - cp->enabled = CONFIG_BOOLEAN_YES; - } -} - -static inline void cgroup_read_cpuacct_usage(struct cpuacct_usage *ca) { - static procfile *ff = NULL; - - if(likely(ca->filename)) { - ff = procfile_reopen(ff, ca->filename, NULL, PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) { - ca->updated = 0; - cgroups_check = 1; - return; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) { - ca->updated = 0; - cgroups_check = 1; - return; - } - - if(unlikely(procfile_lines(ff) < 1)) { - error("CGROUP: file '%s' should have 1+ lines but has %zu.", ca->filename, procfile_lines(ff)); - ca->updated = 0; - return; - } - - unsigned long i = procfile_linewords(ff, 0); - if(unlikely(i == 0)) { - ca->updated = 0; - return; - } - - // we may have 1 more CPU reported - while(i > 0) { - char *s = procfile_lineword(ff, 0, i - 1); - if(!*s) i--; - else break; - } - - if(unlikely(i != ca->cpus)) { - freez(ca->cpu_percpu); - ca->cpu_percpu = mallocz(sizeof(unsigned long long) * i); - ca->cpus = (unsigned int)i; - } - - unsigned long long total = 0; - for(i = 0; i < ca->cpus ;i++) { - unsigned long long n = str2ull(procfile_lineword(ff, 0, i)); - ca->cpu_percpu[i] = n; - total += n; - } - - ca->updated = 1; - - if(unlikely(ca->enabled == CONFIG_BOOLEAN_AUTO && total)) - ca->enabled = CONFIG_BOOLEAN_YES; - } -} - -static inline void cgroup_read_blkio(struct blkio *io) { - if(unlikely(io->enabled == CONFIG_BOOLEAN_AUTO && io->delay_counter > 0)) { - io->delay_counter--; - return; - } - - if(likely(io->filename)) { - static procfile *ff = NULL; - - ff = procfile_reopen(ff, io->filename, NULL, PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) { - io->updated = 0; - cgroups_check = 1; - return; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) { - io->updated = 0; - cgroups_check = 1; - return; - } - - unsigned long i, lines = procfile_lines(ff); - - if(unlikely(lines < 1)) { - error("CGROUP: file '%s' should have 1+ lines.", io->filename); - io->updated = 0; - return; - } - - io->Read = 0; - io->Write = 0; -/* - io->Sync = 0; - io->Async = 0; - io->Total = 0; -*/ - - for(i = 0; i < lines ; i++) { - char *s = procfile_lineword(ff, i, 1); - uint32_t hash = simple_hash(s); - - if(unlikely(hash == Read_hash && !strcmp(s, "Read"))) - io->Read += str2ull(procfile_lineword(ff, i, 2)); - - else if(unlikely(hash == Write_hash && !strcmp(s, "Write"))) - io->Write += str2ull(procfile_lineword(ff, i, 2)); - -/* - else if(unlikely(hash == Sync_hash && !strcmp(s, "Sync"))) - io->Sync += str2ull(procfile_lineword(ff, i, 2)); - - else if(unlikely(hash == Async_hash && !strcmp(s, "Async"))) - io->Async += str2ull(procfile_lineword(ff, i, 2)); - - else if(unlikely(hash == Total_hash && !strcmp(s, "Total"))) - io->Total += str2ull(procfile_lineword(ff, i, 2)); -*/ - } - - io->updated = 1; - - if(unlikely(io->enabled == CONFIG_BOOLEAN_AUTO)) { - if(unlikely(io->Read || io->Write)) - io->enabled = CONFIG_BOOLEAN_YES; - else - io->delay_counter = cgroup_recheck_zero_blkio_every_iterations; - } - } -} - -static inline void cgroup_read_memory(struct memory *mem) { - static procfile *ff = NULL; - - // read detailed ram usage - if(likely(mem->filename_detailed)) { - if(unlikely(mem->enabled_detailed == CONFIG_BOOLEAN_AUTO && mem->delay_counter_detailed > 0)) { - mem->delay_counter_detailed--; - goto memory_next; - } - - ff = procfile_reopen(ff, mem->filename_detailed, NULL, PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) { - mem->updated_detailed = 0; - cgroups_check = 1; - goto memory_next; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) { - mem->updated_detailed = 0; - cgroups_check = 1; - goto memory_next; - } - - unsigned long i, lines = procfile_lines(ff); - - if(unlikely(lines < 1)) { - error("CGROUP: file '%s' should have 1+ lines.", mem->filename_detailed); - mem->updated_detailed = 0; - goto memory_next; - } - - if(unlikely(!mem->arl_base)) { - mem->arl_base = arl_create("cgroup/memory", NULL, 60); - - arl_expect(mem->arl_base, "cache", &mem->cache); - arl_expect(mem->arl_base, "rss", &mem->rss); - arl_expect(mem->arl_base, "rss_huge", &mem->rss_huge); - arl_expect(mem->arl_base, "mapped_file", &mem->mapped_file); - arl_expect(mem->arl_base, "writeback", &mem->writeback); - mem->arl_dirty = arl_expect(mem->arl_base, "dirty", &mem->dirty); - mem->arl_swap = arl_expect(mem->arl_base, "swap", &mem->swap); - arl_expect(mem->arl_base, "pgpgin", &mem->pgpgin); - arl_expect(mem->arl_base, "pgpgout", &mem->pgpgout); - arl_expect(mem->arl_base, "pgfault", &mem->pgfault); - arl_expect(mem->arl_base, "pgmajfault", &mem->pgmajfault); - } - - arl_begin(mem->arl_base); - - for(i = 0; i < lines ; i++) { - if(arl_check(mem->arl_base, - procfile_lineword(ff, i, 0), - procfile_lineword(ff, i, 1))) break; - } - - if(unlikely(mem->arl_dirty->flags & ARL_ENTRY_FLAG_FOUND)) - mem->detailed_has_dirty = 1; - - if(unlikely(mem->arl_swap->flags & ARL_ENTRY_FLAG_FOUND)) - mem->detailed_has_swap = 1; - - // fprintf(stderr, "READ: '%s', cache: %llu, rss: %llu, rss_huge: %llu, mapped_file: %llu, writeback: %llu, dirty: %llu, swap: %llu, pgpgin: %llu, pgpgout: %llu, pgfault: %llu, pgmajfault: %llu, inactive_anon: %llu, active_anon: %llu, inactive_file: %llu, active_file: %llu, unevictable: %llu, hierarchical_memory_limit: %llu, total_cache: %llu, total_rss: %llu, total_rss_huge: %llu, total_mapped_file: %llu, total_writeback: %llu, total_dirty: %llu, total_swap: %llu, total_pgpgin: %llu, total_pgpgout: %llu, total_pgfault: %llu, total_pgmajfault: %llu, total_inactive_anon: %llu, total_active_anon: %llu, total_inactive_file: %llu, total_active_file: %llu, total_unevictable: %llu\n", mem->filename, mem->cache, mem->rss, mem->rss_huge, mem->mapped_file, mem->writeback, mem->dirty, mem->swap, mem->pgpgin, mem->pgpgout, mem->pgfault, mem->pgmajfault, mem->inactive_anon, mem->active_anon, mem->inactive_file, mem->active_file, mem->unevictable, mem->hierarchical_memory_limit, mem->total_cache, mem->total_rss, mem->total_rss_huge, mem->total_mapped_file, mem->total_writeback, mem->total_dirty, mem->total_swap, mem->total_pgpgin, mem->total_pgpgout, mem->total_pgfault, mem->total_pgmajfault, mem->total_inactive_anon, mem->total_active_anon, mem->total_inactive_file, mem->total_active_file, mem->total_unevictable); - - mem->updated_detailed = 1; - - if(unlikely(mem->enabled_detailed == CONFIG_BOOLEAN_AUTO)) { - if(mem->cache || mem->dirty || mem->rss || mem->rss_huge || mem->mapped_file || mem->writeback || mem->swap || mem->pgpgin || mem->pgpgout || mem->pgfault || mem->pgmajfault) - mem->enabled_detailed = CONFIG_BOOLEAN_YES; - else - mem->delay_counter_detailed = cgroup_recheck_zero_mem_detailed_every_iterations; - } - } - -memory_next: - - // read usage_in_bytes - if(likely(mem->filename_usage_in_bytes)) { - mem->updated_usage_in_bytes = !read_single_number_file(mem->filename_usage_in_bytes, &mem->usage_in_bytes); - if(unlikely(mem->updated_usage_in_bytes && mem->enabled_usage_in_bytes == CONFIG_BOOLEAN_AUTO && mem->usage_in_bytes)) - mem->enabled_usage_in_bytes = CONFIG_BOOLEAN_YES; - } - - // read msw_usage_in_bytes - if(likely(mem->filename_msw_usage_in_bytes)) { - mem->updated_msw_usage_in_bytes = !read_single_number_file(mem->filename_msw_usage_in_bytes, &mem->msw_usage_in_bytes); - if(unlikely(mem->updated_msw_usage_in_bytes && mem->enabled_msw_usage_in_bytes == CONFIG_BOOLEAN_AUTO && mem->msw_usage_in_bytes)) - mem->enabled_msw_usage_in_bytes = CONFIG_BOOLEAN_YES; - } - - // read failcnt - if(likely(mem->filename_failcnt)) { - if(unlikely(mem->enabled_failcnt == CONFIG_BOOLEAN_AUTO && mem->delay_counter_failcnt > 0)) { - mem->updated_failcnt = 0; - mem->delay_counter_failcnt--; - } - else { - mem->updated_failcnt = !read_single_number_file(mem->filename_failcnt, &mem->failcnt); - if(unlikely(mem->updated_failcnt && mem->enabled_failcnt == CONFIG_BOOLEAN_AUTO)) { - if(unlikely(!mem->failcnt)) - mem->delay_counter_failcnt = cgroup_recheck_zero_mem_failcnt_every_iterations; - else - mem->enabled_failcnt = CONFIG_BOOLEAN_YES; - } - } - } -} - -static inline void cgroup_read(struct cgroup *cg) { - debug(D_CGROUP, "reading metrics for cgroups '%s'", cg->id); - - cgroup_read_cpuacct_stat(&cg->cpuacct_stat); - cgroup_read_cpuacct_usage(&cg->cpuacct_usage); - cgroup_read_memory(&cg->memory); - cgroup_read_blkio(&cg->io_service_bytes); - cgroup_read_blkio(&cg->io_serviced); - cgroup_read_blkio(&cg->throttle_io_service_bytes); - cgroup_read_blkio(&cg->throttle_io_serviced); - cgroup_read_blkio(&cg->io_merged); - cgroup_read_blkio(&cg->io_queued); -} - -static inline void read_all_cgroups(struct cgroup *root) { - debug(D_CGROUP, "reading metrics for all cgroups"); - - struct cgroup *cg; - - for(cg = root; cg ; cg = cg->next) - if(cg->enabled && cg->available) - cgroup_read(cg); -} - -// ---------------------------------------------------------------------------- -// cgroup network interfaces - -#define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048 -static inline void read_cgroup_network_interfaces(struct cgroup *cg) { - debug(D_CGROUP, "looking for the network interfaces of cgroup '%s' with chart id '%s' and title '%s'", cg->id, cg->chart_id, cg->chart_title); - - pid_t cgroup_pid; - char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1]; - - snprintfz(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec %s --cgroup '%s%s'", cgroups_network_interface_script, cgroup_cpuacct_base, cg->id); - - debug(D_CGROUP, "executing command '%s' for cgroup '%s'", buffer, cg->id); - FILE *fp = mypopen(buffer, &cgroup_pid); - if(fp) { - char *s; - while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, fp))) { - trim(s); - - if(*s && *s != '\n') { - char *t = s; - while(*t && *t != ' ') t++; - if(*t == ' ') { - *t = '\0'; - t++; - } - - if(!*s) { - error("CGROUP: empty host interface returned by script"); - continue; - } - - if(!*t) { - error("CGROUP: empty guest interface returned by script"); - continue; - } - - struct cgroup_network_interface *i = callocz(1, sizeof(struct cgroup_network_interface)); - i->host_device = strdupz(s); - i->container_device = strdupz(t); - i->next = cg->interfaces; - cg->interfaces = i; - - info("CGROUP: cgroup '%s' has network interface '%s' as '%s'", cg->id, i->host_device, i->container_device); - - // register a device rename to proc_net_dev.c - netdev_rename_device_add(i->host_device, i->container_device, cg->chart_id); - } - } - - mypclose(fp, cgroup_pid); - // debug(D_CGROUP, "closed command for cgroup '%s'", cg->id); - } - else - error("CGROUP: cannot popen(\"%s\", \"r\").", buffer); -} - -static inline void free_cgroup_network_interfaces(struct cgroup *cg) { - while(cg->interfaces) { - struct cgroup_network_interface *i = cg->interfaces; - cg->interfaces = i->next; - - // delete the registration of proc_net_dev rename - netdev_rename_device_del(i->host_device); - - freez((void *)i->host_device); - freez((void *)i->container_device); - freez((void *)i); - } -} - -// ---------------------------------------------------------------------------- -// add/remove/find cgroup objects - -#define CGROUP_CHARTID_LINE_MAX 1024 - -static inline char *cgroup_title_strdupz(const char *s) { - if(!s || !*s) s = "/"; - - if(*s == '/' && s[1] != '\0') s++; - - char *r = strdupz(s); - netdata_fix_chart_name(r); - - return r; -} - -static inline char *cgroup_chart_id_strdupz(const char *s) { - if(!s || !*s) s = "/"; - - if(*s == '/' && s[1] != '\0') s++; - - char *r = strdupz(s); - netdata_fix_chart_id(r); - - return r; -} - -static inline void cgroup_get_chart_name(struct cgroup *cg) { - debug(D_CGROUP, "looking for the name of cgroup '%s' with chart id '%s' and title '%s'", cg->id, cg->chart_id, cg->chart_title); - - pid_t cgroup_pid; - char buffer[CGROUP_CHARTID_LINE_MAX + 1]; - - snprintfz(buffer, CGROUP_CHARTID_LINE_MAX, "exec %s '%s' '%s'", cgroups_rename_script, cg->chart_id, cg->id); - - debug(D_CGROUP, "executing command \"%s\" for cgroup '%s'", buffer, cg->id); - FILE *fp = mypopen(buffer, &cgroup_pid); - if(fp) { - // debug(D_CGROUP, "reading from command '%s' for cgroup '%s'", buffer, cg->id); - char *s = fgets(buffer, CGROUP_CHARTID_LINE_MAX, fp); - // debug(D_CGROUP, "closing command for cgroup '%s'", cg->id); - mypclose(fp, cgroup_pid); - // debug(D_CGROUP, "closed command for cgroup '%s'", cg->id); - - if(s && *s && *s != '\n') { - debug(D_CGROUP, "cgroup '%s' should be renamed to '%s'", cg->id, s); - - trim(s); - - freez(cg->chart_title); - cg->chart_title = cgroup_title_strdupz(s); - - freez(cg->chart_id); - cg->chart_id = cgroup_chart_id_strdupz(s); - cg->hash_chart = simple_hash(cg->chart_id); - } - } - else - error("CGROUP: cannot popen(\"%s\", \"r\").", buffer); -} - -static inline struct cgroup *cgroup_add(const char *id) { - if(!id || !*id) id = "/"; - debug(D_CGROUP, "adding to list, cgroup with id '%s'", id); - - if(cgroup_root_count >= cgroup_root_max) { - info("CGROUP: maximum number of cgroups reached (%d). Not adding cgroup '%s'", cgroup_root_count, id); - return NULL; - } - - int def = simple_pattern_matches(enabled_cgroup_patterns, id)?cgroup_enable_new_cgroups_detected_at_runtime:0; - struct cgroup *cg = callocz(1, sizeof(struct cgroup)); - - cg->id = strdupz(id); - cg->hash = simple_hash(cg->id); - - cg->chart_title = cgroup_title_strdupz(id); - - cg->chart_id = cgroup_chart_id_strdupz(id); - cg->hash_chart = simple_hash(cg->chart_id); - - if(!cgroup_root) - cgroup_root = cg; - else { - // append it - struct cgroup *e; - for(e = cgroup_root; e->next ;e = e->next) ; - e->next = cg; - } - - cgroup_root_count++; - - // fix the chart_id and title by calling the external script - if(simple_pattern_matches(enabled_cgroup_renames, cg->id)) { - - cgroup_get_chart_name(cg); - - debug(D_CGROUP, "cgroup '%s' renamed to '%s' (title: '%s')", cg->id, cg->chart_id, cg->chart_title); - } - else - debug(D_CGROUP, "cgroup '%s' will not be renamed - it matches the list of disabled cgroup renames (will be shown as '%s')", cg->id, cg->chart_id); - - int user_configurable = 1; - - // check if this cgroup should be a systemd service - if(cgroup_enable_systemd_services) { - if(simple_pattern_matches(systemd_services_cgroups, cg->id) || - simple_pattern_matches(systemd_services_cgroups, cg->chart_id)) { - debug(D_CGROUP, "cgroup '%s' with chart id '%s' (title: '%s') matches systemd services cgroups", cg->id, cg->chart_id, cg->chart_title); - - char buffer[CGROUP_CHARTID_LINE_MAX + 1]; - cg->options |= CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE; - - strncpy(buffer, cg->id, CGROUP_CHARTID_LINE_MAX); - char *s = buffer; - - //freez(cg->chart_id); - //cg->chart_id = cgroup_chart_id_strdupz(s); - //cg->hash_chart = simple_hash(cg->chart_id); - - // skip to the last slash - size_t len = strlen(s); - while(len--) if(unlikely(s[len] == '/')) break; - if(len) s = &s[len + 1]; - - // remove extension - len = strlen(s); - while(len--) if(unlikely(s[len] == '.')) break; - if(len) s[len] = '\0'; - - freez(cg->chart_title); - cg->chart_title = cgroup_title_strdupz(s); - - cg->enabled = 1; - user_configurable = 0; - - debug(D_CGROUP, "cgroup '%s' renamed to '%s' (title: '%s')", cg->id, cg->chart_id, cg->chart_title); - } - else - debug(D_CGROUP, "cgroup '%s' with chart id '%s' (title: '%s') does not match systemd services groups", cg->id, cg->chart_id, cg->chart_title); - } - - if(user_configurable) { - // allow the user to enable/disable this individualy - char option[FILENAME_MAX + 1]; - snprintfz(option, FILENAME_MAX, "enable cgroup %s", cg->chart_title); - cg->enabled = (char) config_get_boolean("plugin:cgroups", option, def); - } - - // detect duplicate cgroups - if(cg->enabled) { - struct cgroup *t; - for (t = cgroup_root; t; t = t->next) { - if (t != cg && t->enabled && t->hash_chart == cg->hash_chart && !strcmp(t->chart_id, cg->chart_id)) { - if (!strncmp(t->chart_id, "/system.slice/", 14) && !strncmp(cg->chart_id, "/init.scope/system.slice/", 25)) { - error("CGROUP: chart id '%s' already exists with id '%s' and is enabled. Swapping them by enabling cgroup with id '%s' and disabling cgroup with id '%s'.", - cg->chart_id, t->id, cg->id, t->id); - debug(D_CGROUP, "Control group with chart id '%s' already exists with id '%s' and is enabled. Swapping them by enabling cgroup with id '%s' and disabling cgroup with id '%s'.", - cg->chart_id, t->id, cg->id, t->id); - t->enabled = 0; - t->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE; - } - else { - error("CGROUP: chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.", - cg->chart_id, t->id, cg->id); - debug(D_CGROUP, "Control group with chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.", - cg->chart_id, t->id, cg->id); - cg->enabled = 0; - cg->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE; - } - - break; - } - } - } - - if(cg->enabled && !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE)) - read_cgroup_network_interfaces(cg); - - debug(D_CGROUP, "ADDED CGROUP: '%s' with chart id '%s' and title '%s' as %s (default was %s)", cg->id, cg->chart_id, cg->chart_title, (cg->enabled)?"enabled":"disabled", (def)?"enabled":"disabled"); - - return cg; -} - -static inline void cgroup_free(struct cgroup *cg) { - debug(D_CGROUP, "Removing cgroup '%s' with chart id '%s' (was %s and %s)", cg->id, cg->chart_id, (cg->enabled)?"enabled":"disabled", (cg->available)?"available":"not available"); - - if(cg->st_cpu) rrdset_is_obsolete(cg->st_cpu); - if(cg->st_cpu_per_core) rrdset_is_obsolete(cg->st_cpu_per_core); - if(cg->st_mem) rrdset_is_obsolete(cg->st_mem); - if(cg->st_writeback) rrdset_is_obsolete(cg->st_writeback); - if(cg->st_mem_activity) rrdset_is_obsolete(cg->st_mem_activity); - if(cg->st_pgfaults) rrdset_is_obsolete(cg->st_pgfaults); - if(cg->st_mem_usage) rrdset_is_obsolete(cg->st_mem_usage); - if(cg->st_mem_failcnt) rrdset_is_obsolete(cg->st_mem_failcnt); - if(cg->st_io) rrdset_is_obsolete(cg->st_io); - if(cg->st_serviced_ops) rrdset_is_obsolete(cg->st_serviced_ops); - if(cg->st_throttle_io) rrdset_is_obsolete(cg->st_throttle_io); - if(cg->st_throttle_serviced_ops) rrdset_is_obsolete(cg->st_throttle_serviced_ops); - if(cg->st_queued_ops) rrdset_is_obsolete(cg->st_queued_ops); - if(cg->st_merged_ops) rrdset_is_obsolete(cg->st_merged_ops); - - free_cgroup_network_interfaces(cg); - - freez(cg->cpuacct_usage.cpu_percpu); - - freez(cg->cpuacct_stat.filename); - freez(cg->cpuacct_usage.filename); - - arl_free(cg->memory.arl_base); - freez(cg->memory.filename_detailed); - freez(cg->memory.filename_failcnt); - freez(cg->memory.filename_usage_in_bytes); - freez(cg->memory.filename_msw_usage_in_bytes); - - freez(cg->io_service_bytes.filename); - freez(cg->io_serviced.filename); - - freez(cg->throttle_io_service_bytes.filename); - freez(cg->throttle_io_serviced.filename); - - freez(cg->io_merged.filename); - freez(cg->io_queued.filename); - - freez(cg->id); - freez(cg->chart_id); - freez(cg->chart_title); - - freez(cg); - - cgroup_root_count--; -} - -// find if a given cgroup exists -static inline struct cgroup *cgroup_find(const char *id) { - debug(D_CGROUP, "searching for cgroup '%s'", id); - - uint32_t hash = simple_hash(id); - - struct cgroup *cg; - for(cg = cgroup_root; cg ; cg = cg->next) { - if(hash == cg->hash && strcmp(id, cg->id) == 0) - break; - } - - debug(D_CGROUP, "cgroup '%s' %s in memory", id, (cg)?"found":"not found"); - return cg; -} - -// ---------------------------------------------------------------------------- -// detect running cgroups - -// callback for find_file_in_subdirs() -static inline void found_subdir_in_dir(const char *dir) { - debug(D_CGROUP, "examining cgroup dir '%s'", dir); - - struct cgroup *cg = cgroup_find(dir); - if(!cg) { - if(*dir && cgroup_max_depth > 0) { - int depth = 0; - const char *s; - - for(s = dir; *s ;s++) - if(unlikely(*s == '/')) - depth++; - - if(depth > cgroup_max_depth) { - info("CGROUP: '%s' is too deep (%d, while max is %d)", dir, depth, cgroup_max_depth); - return; - } - } - // debug(D_CGROUP, "will add dir '%s' as cgroup", dir); - cg = cgroup_add(dir); - } - - if(cg) cg->available = 1; -} - -static inline int find_dir_in_subdirs(const char *base, const char *this, void (*callback)(const char *)) { - if(!this) this = base; - debug(D_CGROUP, "searching for directories in '%s' (base '%s')", this?this:"", base); - - size_t dirlen = strlen(this), baselen = strlen(base); - - int ret = -1; - int enabled = -1; - - const char *relative_path = &this[baselen]; - if(!*relative_path) relative_path = "/"; - - DIR *dir = opendir(this); - if(!dir) { - error("CGROUP: cannot read directory '%s'", base); - return ret; - } - ret = 1; - - callback(relative_path); - - struct dirent *de = NULL; - while((de = readdir(dir))) { - if(de->d_type == DT_DIR - && ( - (de->d_name[0] == '.' && de->d_name[1] == '\0') - || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') - )) - continue; - - if(de->d_type == DT_DIR) { - if(enabled == -1) { - const char *r = relative_path; - if(*r == '\0') r = "/"; - - // do not decent in directories we are not interested - int def = simple_pattern_matches(enabled_cgroup_paths, r); - - // we check for this option here - // so that the config will not have settings - // for leaf directories - char option[FILENAME_MAX + 1]; - snprintfz(option, FILENAME_MAX, "search for cgroups under %s", r); - option[FILENAME_MAX] = '\0'; - enabled = config_get_boolean("plugin:cgroups", option, def); - } - - if(enabled) { - char *s = mallocz(dirlen + strlen(de->d_name) + 2); - strcpy(s, this); - strcat(s, "/"); - strcat(s, de->d_name); - int ret2 = find_dir_in_subdirs(base, s, callback); - if(ret2 > 0) ret += ret2; - freez(s); - } - } - } - - closedir(dir); - return ret; -} - -static inline void mark_all_cgroups_as_not_available() { - debug(D_CGROUP, "marking all cgroups as not available"); - - struct cgroup *cg; - - // mark all as not available - for(cg = cgroup_root; cg ; cg = cg->next) { - cg->available = 0; - } -} - -static inline void cleanup_all_cgroups() { - struct cgroup *cg = cgroup_root, *last = NULL; - - for(; cg ;) { - if(!cg->available) { - // enable the first duplicate cgroup - { - struct cgroup *t; - for(t = cgroup_root; t ; t = t->next) { - if(t != cg && t->available && !t->enabled && t->options & CGROUP_OPTIONS_DISABLED_DUPLICATE && t->hash_chart == cg->hash_chart && !strcmp(t->chart_id, cg->chart_id)) { - debug(D_CGROUP, "Enabling duplicate of cgroup '%s' with id '%s', because the original with id '%s' stopped.", t->chart_id, t->id, cg->id); - t->enabled = 1; - t->options &= ~CGROUP_OPTIONS_DISABLED_DUPLICATE; - break; - } - } - } - - if(!last) - cgroup_root = cg->next; - else - last->next = cg->next; - - cgroup_free(cg); - - if(!last) - cg = cgroup_root; - else - cg = last->next; - } - else { - last = cg; - cg = cg->next; - } - } -} - -static inline void find_all_cgroups() { - debug(D_CGROUP, "searching for cgroups"); - - mark_all_cgroups_as_not_available(); - - if(cgroup_enable_cpuacct_stat || cgroup_enable_cpuacct_usage) { - if(find_dir_in_subdirs(cgroup_cpuacct_base, NULL, found_subdir_in_dir) == -1) { - cgroup_enable_cpuacct_stat = - cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_NO; - error("CGROUP: disabled cpu statistics."); - } - } - - if(cgroup_enable_blkio_io || cgroup_enable_blkio_ops || cgroup_enable_blkio_throttle_io || cgroup_enable_blkio_throttle_ops || cgroup_enable_blkio_merged_ops || cgroup_enable_blkio_queued_ops) { - if(find_dir_in_subdirs(cgroup_blkio_base, NULL, found_subdir_in_dir) == -1) { - cgroup_enable_blkio_io = - cgroup_enable_blkio_ops = - cgroup_enable_blkio_throttle_io = - cgroup_enable_blkio_throttle_ops = - cgroup_enable_blkio_merged_ops = - cgroup_enable_blkio_queued_ops = CONFIG_BOOLEAN_NO; - error("CGROUP: disabled blkio statistics."); - } - } - - if(cgroup_enable_memory || cgroup_enable_detailed_memory || cgroup_enable_swap || cgroup_enable_memory_failcnt) { - if(find_dir_in_subdirs(cgroup_memory_base, NULL, found_subdir_in_dir) == -1) { - cgroup_enable_memory = - cgroup_enable_detailed_memory = - cgroup_enable_swap = - cgroup_enable_memory_failcnt = CONFIG_BOOLEAN_NO; - error("CGROUP: disabled memory statistics."); - } - } - - if(cgroup_search_in_devices) { - if(find_dir_in_subdirs(cgroup_devices_base, NULL, found_subdir_in_dir) == -1) { - cgroup_search_in_devices = 0; - error("CGROUP: disabled devices statistics."); - } - } - - // remove any non-existing cgroups - cleanup_all_cgroups(); - - struct cgroup *cg; - struct stat buf; - for(cg = cgroup_root; cg ; cg = cg->next) { - // fprintf(stderr, " >>> CGROUP '%s' (%u - %s) with name '%s'\n", cg->id, cg->hash, cg->available?"available":"stopped", cg->name); - - if(unlikely(!cg->available)) - continue; - - debug(D_CGROUP, "checking paths for cgroup '%s'", cg->id); - - // check for newly added cgroups - // and update the filenames they read - char filename[FILENAME_MAX + 1]; - if(unlikely(cgroup_enable_cpuacct_stat && !cg->cpuacct_stat.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.stat", cgroup_cpuacct_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->cpuacct_stat.filename = strdupz(filename); - cg->cpuacct_stat.enabled = cgroup_enable_cpuacct_stat; - debug(D_CGROUP, "cpuacct.stat filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_stat.filename); - } - else - debug(D_CGROUP, "cpuacct.stat file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - - if(unlikely(cgroup_enable_cpuacct_usage && !cg->cpuacct_usage.filename && !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE))) { - snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.usage_percpu", cgroup_cpuacct_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->cpuacct_usage.filename = strdupz(filename); - cg->cpuacct_usage.enabled = cgroup_enable_cpuacct_usage; - debug(D_CGROUP, "cpuacct.usage_percpu filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_usage.filename); - } - else - debug(D_CGROUP, "cpuacct.usage_percpu file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - - if(unlikely((cgroup_enable_detailed_memory || cgroup_used_memory_without_cache) && !cg->memory.filename_detailed && (cgroup_used_memory_without_cache || cgroup_enable_systemd_services_detailed_memory || !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE)))) { - snprintfz(filename, FILENAME_MAX, "%s%s/memory.stat", cgroup_memory_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->memory.filename_detailed = strdupz(filename); - cg->memory.enabled_detailed = (cgroup_enable_detailed_memory == CONFIG_BOOLEAN_YES)?CONFIG_BOOLEAN_YES:CONFIG_BOOLEAN_AUTO; - debug(D_CGROUP, "memory.stat filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_detailed); - } - else - debug(D_CGROUP, "memory.stat file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - - if(unlikely(cgroup_enable_memory && !cg->memory.filename_usage_in_bytes)) { - snprintfz(filename, FILENAME_MAX, "%s%s/memory.usage_in_bytes", cgroup_memory_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->memory.filename_usage_in_bytes = strdupz(filename); - cg->memory.enabled_usage_in_bytes = cgroup_enable_memory; - debug(D_CGROUP, "memory.usage_in_bytes filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_usage_in_bytes); - } - else - debug(D_CGROUP, "memory.usage_in_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - - if(unlikely(cgroup_enable_swap && !cg->memory.filename_msw_usage_in_bytes)) { - snprintfz(filename, FILENAME_MAX, "%s%s/memory.msw_usage_in_bytes", cgroup_memory_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->memory.filename_msw_usage_in_bytes = strdupz(filename); - cg->memory.enabled_msw_usage_in_bytes = cgroup_enable_swap; - debug(D_CGROUP, "memory.msw_usage_in_bytes filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_msw_usage_in_bytes); - } - else - debug(D_CGROUP, "memory.msw_usage_in_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - - if(unlikely(cgroup_enable_memory_failcnt && !cg->memory.filename_failcnt)) { - snprintfz(filename, FILENAME_MAX, "%s%s/memory.failcnt", cgroup_memory_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->memory.filename_failcnt = strdupz(filename); - cg->memory.enabled_failcnt = cgroup_enable_memory_failcnt; - debug(D_CGROUP, "memory.failcnt filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_failcnt); - } - else - debug(D_CGROUP, "memory.failcnt file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - - if(unlikely(cgroup_enable_blkio_io && !cg->io_service_bytes.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_service_bytes", cgroup_blkio_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->io_service_bytes.filename = strdupz(filename); - cg->io_service_bytes.enabled = cgroup_enable_blkio_io; - debug(D_CGROUP, "io_service_bytes filename for cgroup '%s': '%s'", cg->id, cg->io_service_bytes.filename); - } - else - debug(D_CGROUP, "io_service_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - - if(unlikely(cgroup_enable_blkio_ops && !cg->io_serviced.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_serviced", cgroup_blkio_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->io_serviced.filename = strdupz(filename); - cg->io_serviced.enabled = cgroup_enable_blkio_ops; - debug(D_CGROUP, "io_serviced filename for cgroup '%s': '%s'", cg->id, cg->io_serviced.filename); - } - else - debug(D_CGROUP, "io_serviced file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - - if(unlikely(cgroup_enable_blkio_throttle_io && !cg->throttle_io_service_bytes.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_service_bytes", cgroup_blkio_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->throttle_io_service_bytes.filename = strdupz(filename); - cg->throttle_io_service_bytes.enabled = cgroup_enable_blkio_throttle_io; - debug(D_CGROUP, "throttle_io_service_bytes filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_service_bytes.filename); - } - else - debug(D_CGROUP, "throttle_io_service_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - - if(unlikely(cgroup_enable_blkio_throttle_ops && !cg->throttle_io_serviced.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_serviced", cgroup_blkio_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->throttle_io_serviced.filename = strdupz(filename); - cg->throttle_io_serviced.enabled = cgroup_enable_blkio_throttle_ops; - debug(D_CGROUP, "throttle_io_serviced filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_serviced.filename); - } - else - debug(D_CGROUP, "throttle_io_serviced file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - - if(unlikely(cgroup_enable_blkio_merged_ops && !cg->io_merged.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_merged", cgroup_blkio_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->io_merged.filename = strdupz(filename); - cg->io_merged.enabled = cgroup_enable_blkio_merged_ops; - debug(D_CGROUP, "io_merged filename for cgroup '%s': '%s'", cg->id, cg->io_merged.filename); - } - else - debug(D_CGROUP, "io_merged file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - - if(unlikely(cgroup_enable_blkio_queued_ops && !cg->io_queued.filename)) { - snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_queued", cgroup_blkio_base, cg->id); - if(likely(stat(filename, &buf) != -1)) { - cg->io_queued.filename = strdupz(filename); - cg->io_queued.enabled = cgroup_enable_blkio_queued_ops; - debug(D_CGROUP, "io_queued filename for cgroup '%s': '%s'", cg->id, cg->io_queued.filename); - } - else - debug(D_CGROUP, "io_queued file for cgroup '%s': '%s' does not exist.", cg->id, filename); - } - } - - debug(D_CGROUP, "done searching for cgroups"); -} - -// ---------------------------------------------------------------------------- -// generate charts - -#define CHART_TITLE_MAX 300 - -void update_systemd_services_charts( - int update_every - , int do_cpu - , int do_mem_usage - , int do_mem_detailed - , int do_mem_failcnt - , int do_swap_usage - , int do_io - , int do_io_ops - , int do_throttle_io - , int do_throttle_ops - , int do_queued_ops - , int do_merged_ops -) { - static RRDSET - *st_cpu = NULL, - *st_mem_usage = NULL, - *st_mem_failcnt = NULL, - *st_swap_usage = NULL, - - *st_mem_detailed_cache = NULL, - *st_mem_detailed_rss = NULL, - *st_mem_detailed_mapped = NULL, - *st_mem_detailed_writeback = NULL, - *st_mem_detailed_pgfault = NULL, - *st_mem_detailed_pgmajfault = NULL, - *st_mem_detailed_pgpgin = NULL, - *st_mem_detailed_pgpgout = NULL, - - *st_io_read = NULL, - *st_io_serviced_read = NULL, - *st_throttle_io_read = NULL, - *st_throttle_ops_read = NULL, - *st_queued_ops_read = NULL, - *st_merged_ops_read = NULL, - - *st_io_write = NULL, - *st_io_serviced_write = NULL, - *st_throttle_io_write = NULL, - *st_throttle_ops_write = NULL, - *st_queued_ops_write = NULL, - *st_merged_ops_write = NULL; - - // create the charts - - if(likely(do_cpu)) { - if(unlikely(!st_cpu)) { - char title[CHART_TITLE_MAX + 1]; - snprintfz(title, CHART_TITLE_MAX, "Systemd Services CPU utilization (%d%% = %d core%s)", (processors * 100), processors, (processors > 1) ? "s" : ""); - - st_cpu = rrdset_create_localhost( - "services" - , "cpu" - , NULL - , "cpu" - , "services.cpu" - , title - , "%" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_cpu); - } - - if(likely(do_mem_usage)) { - if(unlikely(!st_mem_usage)) { - - st_mem_usage = rrdset_create_localhost( - "services" - , "mem_usage" - , NULL - , "mem" - , "services.mem_usage" - , (cgroup_used_memory_without_cache) ? "Systemd Services Used Memory without Cache" - : "Systemd Services Used Memory" - , "MB" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 10 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_mem_usage); - } - - if(likely(do_mem_detailed)) { - if(unlikely(!st_mem_detailed_rss)) { - - st_mem_detailed_rss = rrdset_create_localhost( - "services" - , "mem_rss" - , NULL - , "mem" - , "services.mem_rss" - , "Systemd Services RSS Memory" - , "MB" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 20 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_mem_detailed_rss); - - if(unlikely(!st_mem_detailed_mapped)) { - - st_mem_detailed_mapped = rrdset_create_localhost( - "services" - , "mem_mapped" - , NULL - , "mem" - , "services.mem_mapped" - , "Systemd Services Mapped Memory" - , "MB" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 30 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_mem_detailed_mapped); - - if(unlikely(!st_mem_detailed_cache)) { - - st_mem_detailed_cache = rrdset_create_localhost( - "services" - , "mem_cache" - , NULL - , "mem" - , "services.mem_cache" - , "Systemd Services Cache Memory" - , "MB" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 40 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_mem_detailed_cache); - - if(unlikely(!st_mem_detailed_writeback)) { - - st_mem_detailed_writeback = rrdset_create_localhost( - "services" - , "mem_writeback" - , NULL - , "mem" - , "services.mem_writeback" - , "Systemd Services Writeback Memory" - , "MB" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 50 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_mem_detailed_writeback); - - if(unlikely(!st_mem_detailed_pgfault)) { - - st_mem_detailed_pgfault = rrdset_create_localhost( - "services" - , "mem_pgfault" - , NULL - , "mem" - , "services.mem_pgfault" - , "Systemd Services Memory Minor Page Faults" - , "MB/s" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 60 - , update_every - , RRDSET_TYPE_STACKED - ); - } - else - rrdset_next(st_mem_detailed_pgfault); - - if(unlikely(!st_mem_detailed_pgmajfault)) { - - st_mem_detailed_pgmajfault = rrdset_create_localhost( - "services" - , "mem_pgmajfault" - , NULL - , "mem" - , "services.mem_pgmajfault" - , "Systemd Services Memory Major Page Faults" - , "MB/s" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 70 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_mem_detailed_pgmajfault); - - if(unlikely(!st_mem_detailed_pgpgin)) { - - st_mem_detailed_pgpgin = rrdset_create_localhost( - "services" - , "mem_pgpgin" - , NULL - , "mem" - , "services.mem_pgpgin" - , "Systemd Services Memory Charging Activity" - , "MB/s" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 80 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_mem_detailed_pgpgin); - - if(unlikely(!st_mem_detailed_pgpgout)) { - - st_mem_detailed_pgpgout = rrdset_create_localhost( - "services" - , "mem_pgpgout" - , NULL - , "mem" - , "services.mem_pgpgout" - , "Systemd Services Memory Uncharging Activity" - , "MB/s" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 90 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_mem_detailed_pgpgout); - } - - if(likely(do_mem_failcnt)) { - if(unlikely(!st_mem_failcnt)) { - - st_mem_failcnt = rrdset_create_localhost( - "services" - , "mem_failcnt" - , NULL - , "mem" - , "services.mem_failcnt" - , "Systemd Services Memory Limit Failures" - , "MB" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 110 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_mem_failcnt); - } - - if(likely(do_swap_usage)) { - if(unlikely(!st_swap_usage)) { - - st_swap_usage = rrdset_create_localhost( - "services" - , "swap_usage" - , NULL - , "swap" - , "services.swap_usage" - , "Systemd Services Swap Memory Used" - , "MB" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 100 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_swap_usage); - } - - if(likely(do_io)) { - if(unlikely(!st_io_read)) { - - st_io_read = rrdset_create_localhost( - "services" - , "io_read" - , NULL - , "disk" - , "services.io_read" - , "Systemd Services Disk Read Bandwidth" - , "KB/s" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 120 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_io_read); - - if(unlikely(!st_io_write)) { - - st_io_write = rrdset_create_localhost( - "services" - , "io_write" - , NULL - , "disk" - , "services.io_write" - , "Systemd Services Disk Write Bandwidth" - , "KB/s" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 130 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_io_write); - } - - if(likely(do_io_ops)) { - if(unlikely(!st_io_serviced_read)) { - - st_io_serviced_read = rrdset_create_localhost( - "services" - , "io_ops_read" - , NULL - , "disk" - , "services.io_ops_read" - , "Systemd Services Disk Read Operations" - , "operations/s" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 140 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_io_serviced_read); - - if(unlikely(!st_io_serviced_write)) { - - st_io_serviced_write = rrdset_create_localhost( - "services" - , "io_ops_write" - , NULL - , "disk" - , "services.io_ops_write" - , "Systemd Services Disk Write Operations" - , "operations/s" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 150 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_io_serviced_write); - } - - if(likely(do_throttle_io)) { - if(unlikely(!st_throttle_io_read)) { - - st_throttle_io_read = rrdset_create_localhost( - "services" - , "throttle_io_read" - , NULL - , "disk" - , "services.throttle_io_read" - , "Systemd Services Throttle Disk Read Bandwidth" - , "KB/s" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 160 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_throttle_io_read); - - if(unlikely(!st_throttle_io_write)) { - - st_throttle_io_write = rrdset_create_localhost( - "services" - , "throttle_io_write" - , NULL - , "disk" - , "services.throttle_io_write" - , "Systemd Services Throttle Disk Write Bandwidth" - , "KB/s" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 170 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_throttle_io_write); - } - - if(likely(do_throttle_ops)) { - if(unlikely(!st_throttle_ops_read)) { - - st_throttle_ops_read = rrdset_create_localhost( - "services" - , "throttle_io_ops_read" - , NULL - , "disk" - , "services.throttle_io_ops_read" - , "Systemd Services Throttle Disk Read Operations" - , "operations/s" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 180 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_throttle_ops_read); - - if(unlikely(!st_throttle_ops_write)) { - - st_throttle_ops_write = rrdset_create_localhost( - "services" - , "throttle_io_ops_write" - , NULL - , "disk" - , "services.throttle_io_ops_write" - , "Systemd Services Throttle Disk Write Operations" - , "operations/s" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 190 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_throttle_ops_write); - } - - if(likely(do_queued_ops)) { - if(unlikely(!st_queued_ops_read)) { - - st_queued_ops_read = rrdset_create_localhost( - "services" - , "queued_io_ops_read" - , NULL - , "disk" - , "services.queued_io_ops_read" - , "Systemd Services Queued Disk Read Operations" - , "operations/s" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 200 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_queued_ops_read); - - if(unlikely(!st_queued_ops_write)) { - - st_queued_ops_write = rrdset_create_localhost( - "services" - , "queued_io_ops_write" - , NULL - , "disk" - , "services.queued_io_ops_write" - , "Systemd Services Queued Disk Write Operations" - , "operations/s" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 210 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_queued_ops_write); - } - - if(likely(do_merged_ops)) { - if(unlikely(!st_merged_ops_read)) { - - st_merged_ops_read = rrdset_create_localhost( - "services" - , "merged_io_ops_read" - , NULL - , "disk" - , "services.merged_io_ops_read" - , "Systemd Services Merged Disk Read Operations" - , "operations/s" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 220 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_merged_ops_read); - - if(unlikely(!st_merged_ops_write)) { - - st_merged_ops_write = rrdset_create_localhost( - "services" - , "merged_io_ops_write" - , NULL - , "disk" - , "services.merged_io_ops_write" - , "Systemd Services Merged Disk Write Operations" - , "operations/s" - , "cgroup" - , "systemd" - , CHART_PRIORITY_SYSTEMD_SERVICES + 230 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_merged_ops_write); - } - - // update the values - struct cgroup *cg; - for(cg = cgroup_root; cg ; cg = cg->next) { - if(unlikely(!cg->available || !cg->enabled || !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE))) - continue; - - if(likely(do_cpu && cg->cpuacct_stat.updated)) { - if(unlikely(!cg->rd_cpu)) - cg->rd_cpu = rrddim_add(st_cpu, cg->chart_id, cg->chart_title, 100, hz, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_cpu, cg->rd_cpu, cg->cpuacct_stat.user + cg->cpuacct_stat.system); - } - - if(likely(do_mem_usage && cg->memory.updated_usage_in_bytes)) { - if(unlikely(!cg->rd_mem_usage)) - cg->rd_mem_usage = rrddim_add(st_mem_usage, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - - rrddim_set_by_pointer(st_mem_usage, cg->rd_mem_usage, cg->memory.usage_in_bytes - ((cgroup_used_memory_without_cache)?cg->memory.cache:0)); - } - - if(likely(do_mem_detailed && cg->memory.updated_detailed)) { - if(unlikely(!cg->rd_mem_detailed_rss)) - cg->rd_mem_detailed_rss = rrddim_add(st_mem_detailed_rss, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - - rrddim_set_by_pointer(st_mem_detailed_rss, cg->rd_mem_detailed_rss, cg->memory.rss + cg->memory.rss_huge); - - if(unlikely(!cg->rd_mem_detailed_mapped)) - cg->rd_mem_detailed_mapped = rrddim_add(st_mem_detailed_mapped, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - - rrddim_set_by_pointer(st_mem_detailed_mapped, cg->rd_mem_detailed_mapped, cg->memory.mapped_file); - - if(unlikely(!cg->rd_mem_detailed_cache)) - cg->rd_mem_detailed_cache = rrddim_add(st_mem_detailed_cache, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - - rrddim_set_by_pointer(st_mem_detailed_cache, cg->rd_mem_detailed_cache, cg->memory.cache); - - if(unlikely(!cg->rd_mem_detailed_writeback)) - cg->rd_mem_detailed_writeback = rrddim_add(st_mem_detailed_writeback, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - - rrddim_set_by_pointer(st_mem_detailed_writeback, cg->rd_mem_detailed_writeback, cg->memory.writeback); - - if(unlikely(!cg->rd_mem_detailed_pgfault)) - cg->rd_mem_detailed_pgfault = rrddim_add(st_mem_detailed_pgfault, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_mem_detailed_pgfault, cg->rd_mem_detailed_pgfault, cg->memory.pgfault); - - if(unlikely(!cg->rd_mem_detailed_pgmajfault)) - cg->rd_mem_detailed_pgmajfault = rrddim_add(st_mem_detailed_pgmajfault, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_mem_detailed_pgmajfault, cg->rd_mem_detailed_pgmajfault, cg->memory.pgmajfault); - - if(unlikely(!cg->rd_mem_detailed_pgpgin)) - cg->rd_mem_detailed_pgpgin = rrddim_add(st_mem_detailed_pgpgin, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_mem_detailed_pgpgin, cg->rd_mem_detailed_pgpgin, cg->memory.pgpgin); - - if(unlikely(!cg->rd_mem_detailed_pgpgout)) - cg->rd_mem_detailed_pgpgout = rrddim_add(st_mem_detailed_pgpgout, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_mem_detailed_pgpgout, cg->rd_mem_detailed_pgpgout, cg->memory.pgpgout); - } - - if(likely(do_mem_failcnt && cg->memory.updated_failcnt)) { - if(unlikely(!cg->rd_mem_failcnt)) - cg->rd_mem_failcnt = rrddim_add(st_mem_failcnt, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_mem_failcnt, cg->rd_mem_failcnt, cg->memory.failcnt); - } - - if(likely(do_swap_usage && cg->memory.updated_msw_usage_in_bytes)) { - if(unlikely(!cg->rd_swap_usage)) - cg->rd_swap_usage = rrddim_add(st_swap_usage, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - - rrddim_set_by_pointer(st_swap_usage, cg->rd_swap_usage, cg->memory.msw_usage_in_bytes); - } - - if(likely(do_io && cg->io_service_bytes.updated)) { - if(unlikely(!cg->rd_io_service_bytes_read)) - cg->rd_io_service_bytes_read = rrddim_add(st_io_read, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_io_read, cg->rd_io_service_bytes_read, cg->io_service_bytes.Read); - - if(unlikely(!cg->rd_io_service_bytes_write)) - cg->rd_io_service_bytes_write = rrddim_add(st_io_write, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_io_write, cg->rd_io_service_bytes_write, cg->io_service_bytes.Write); - } - - if(likely(do_io_ops && cg->io_serviced.updated)) { - if(unlikely(!cg->rd_io_serviced_read)) - cg->rd_io_serviced_read = rrddim_add(st_io_serviced_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_io_serviced_read, cg->rd_io_serviced_read, cg->io_serviced.Read); - - if(unlikely(!cg->rd_io_serviced_write)) - cg->rd_io_serviced_write = rrddim_add(st_io_serviced_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_io_serviced_write, cg->rd_io_serviced_write, cg->io_serviced.Write); - } - - if(likely(do_throttle_io && cg->throttle_io_service_bytes.updated)) { - if(unlikely(!cg->rd_throttle_io_read)) - cg->rd_throttle_io_read = rrddim_add(st_throttle_io_read, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_throttle_io_read, cg->rd_throttle_io_read, cg->throttle_io_service_bytes.Read); - - if(unlikely(!cg->rd_throttle_io_write)) - cg->rd_throttle_io_write = rrddim_add(st_throttle_io_write, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_throttle_io_write, cg->rd_throttle_io_write, cg->throttle_io_service_bytes.Write); - } - - if(likely(do_throttle_ops && cg->throttle_io_serviced.updated)) { - if(unlikely(!cg->rd_throttle_io_serviced_read)) - cg->rd_throttle_io_serviced_read = rrddim_add(st_throttle_ops_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_throttle_ops_read, cg->rd_throttle_io_serviced_read, cg->throttle_io_serviced.Read); - - if(unlikely(!cg->rd_throttle_io_serviced_write)) - cg->rd_throttle_io_serviced_write = rrddim_add(st_throttle_ops_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_throttle_ops_write, cg->rd_throttle_io_serviced_write, cg->throttle_io_serviced.Write); - } - - if(likely(do_queued_ops && cg->io_queued.updated)) { - if(unlikely(!cg->rd_io_queued_read)) - cg->rd_io_queued_read = rrddim_add(st_queued_ops_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_queued_ops_read, cg->rd_io_queued_read, cg->io_queued.Read); - - if(unlikely(!cg->rd_io_queued_write)) - cg->rd_io_queued_write = rrddim_add(st_queued_ops_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_queued_ops_write, cg->rd_io_queued_write, cg->io_queued.Write); - } - - if(likely(do_merged_ops && cg->io_merged.updated)) { - if(unlikely(!cg->rd_io_merged_read)) - cg->rd_io_merged_read = rrddim_add(st_merged_ops_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_merged_ops_read, cg->rd_io_merged_read, cg->io_merged.Read); - - if(unlikely(!cg->rd_io_merged_write)) - cg->rd_io_merged_write = rrddim_add(st_merged_ops_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_merged_ops_write, cg->rd_io_merged_write, cg->io_merged.Write); - } - } - - // complete the iteration - if(likely(do_cpu)) - rrdset_done(st_cpu); - - if(likely(do_mem_usage)) - rrdset_done(st_mem_usage); - - if(unlikely(do_mem_detailed)) { - rrdset_done(st_mem_detailed_cache); - rrdset_done(st_mem_detailed_rss); - rrdset_done(st_mem_detailed_mapped); - rrdset_done(st_mem_detailed_writeback); - rrdset_done(st_mem_detailed_pgfault); - rrdset_done(st_mem_detailed_pgmajfault); - rrdset_done(st_mem_detailed_pgpgin); - rrdset_done(st_mem_detailed_pgpgout); - } - - if(likely(do_mem_failcnt)) - rrdset_done(st_mem_failcnt); - - if(likely(do_swap_usage)) - rrdset_done(st_swap_usage); - - if(likely(do_io)) { - rrdset_done(st_io_read); - rrdset_done(st_io_write); - } - - if(likely(do_io_ops)) { - rrdset_done(st_io_serviced_read); - rrdset_done(st_io_serviced_write); - } - - if(likely(do_throttle_io)) { - rrdset_done(st_throttle_io_read); - rrdset_done(st_throttle_io_write); - } - - if(likely(do_throttle_ops)) { - rrdset_done(st_throttle_ops_read); - rrdset_done(st_throttle_ops_write); - } - - if(likely(do_queued_ops)) { - rrdset_done(st_queued_ops_read); - rrdset_done(st_queued_ops_write); - } - - if(likely(do_merged_ops)) { - rrdset_done(st_merged_ops_read); - rrdset_done(st_merged_ops_write); - } -} - -static inline char *cgroup_chart_type(char *buffer, const char *id, size_t len) { - if(buffer[0]) return buffer; - - if(id[0] == '\0' || (id[0] == '/' && id[1] == '\0')) - strncpy(buffer, "cgroup_root", len); - else - snprintfz(buffer, len, "cgroup_%s", id); - - netdata_fix_chart_id(buffer); - return buffer; -} - -void update_cgroup_charts(int update_every) { - debug(D_CGROUP, "updating cgroups charts"); - - char type[RRD_ID_LENGTH_MAX + 1]; - char title[CHART_TITLE_MAX + 1]; - - int services_do_cpu = 0, - services_do_mem_usage = 0, - services_do_mem_detailed = 0, - services_do_mem_failcnt = 0, - services_do_swap_usage = 0, - services_do_io = 0, - services_do_io_ops = 0, - services_do_throttle_io = 0, - services_do_throttle_ops = 0, - services_do_queued_ops = 0, - services_do_merged_ops = 0; - - struct cgroup *cg; - for(cg = cgroup_root; cg ; cg = cg->next) { - if(unlikely(!cg->available || !cg->enabled)) - continue; - - if(likely(cgroup_enable_systemd_services && cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE)) { - if(cg->cpuacct_stat.updated && cg->cpuacct_stat.enabled == CONFIG_BOOLEAN_YES) services_do_cpu++; - - if(cgroup_enable_systemd_services_detailed_memory && cg->memory.updated_detailed && cg->memory.enabled_detailed) services_do_mem_detailed++; - if(cg->memory.updated_usage_in_bytes && cg->memory.enabled_usage_in_bytes == CONFIG_BOOLEAN_YES) services_do_mem_usage++; - if(cg->memory.updated_failcnt && cg->memory.enabled_failcnt == CONFIG_BOOLEAN_YES) services_do_mem_failcnt++; - if(cg->memory.updated_msw_usage_in_bytes && cg->memory.enabled_msw_usage_in_bytes == CONFIG_BOOLEAN_YES) services_do_swap_usage++; - - if(cg->io_service_bytes.updated && cg->io_service_bytes.enabled == CONFIG_BOOLEAN_YES) services_do_io++; - if(cg->io_serviced.updated && cg->io_serviced.enabled == CONFIG_BOOLEAN_YES) services_do_io_ops++; - if(cg->throttle_io_service_bytes.updated && cg->throttle_io_service_bytes.enabled == CONFIG_BOOLEAN_YES) services_do_throttle_io++; - if(cg->throttle_io_serviced.updated && cg->throttle_io_serviced.enabled == CONFIG_BOOLEAN_YES) services_do_throttle_ops++; - if(cg->io_queued.updated && cg->io_queued.enabled == CONFIG_BOOLEAN_YES) services_do_queued_ops++; - if(cg->io_merged.updated && cg->io_merged.enabled == CONFIG_BOOLEAN_YES) services_do_merged_ops++; - continue; - } - - type[0] = '\0'; - - if(likely(cg->cpuacct_stat.updated && cg->cpuacct_stat.enabled == CONFIG_BOOLEAN_YES)) { - if(unlikely(!cg->st_cpu)) { - snprintfz(title, CHART_TITLE_MAX, "CPU Usage (%d%% = %d core%s) for cgroup %s", (processors * 100), processors, (processors > 1) ? "s" : "", cg->chart_title); - - cg->st_cpu = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "cpu" - , NULL - , "cpu" - , "cgroup.cpu" - , title - , "%" - , "cgroup" - , "default" - , CHART_PRIORITY_CONTAINERS - , update_every - , RRDSET_TYPE_STACKED - ); - - rrddim_add(cg->st_cpu, "user", NULL, 100, hz, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(cg->st_cpu, "system", NULL, 100, hz, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(cg->st_cpu); - - rrddim_set(cg->st_cpu, "user", cg->cpuacct_stat.user); - rrddim_set(cg->st_cpu, "system", cg->cpuacct_stat.system); - rrdset_done(cg->st_cpu); - } - - if(likely(cg->cpuacct_usage.updated && cg->cpuacct_usage.enabled == CONFIG_BOOLEAN_YES)) { - char id[RRD_ID_LENGTH_MAX + 1]; - unsigned int i; - - if(unlikely(!cg->st_cpu_per_core)) { - snprintfz(title, CHART_TITLE_MAX, "CPU Usage (%d%% = %d core%s) Per Core for cgroup %s", (processors * 100), processors, (processors > 1) ? "s" : "", cg->chart_title); - - cg->st_cpu_per_core = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "cpu_per_core" - , NULL - , "cpu" - , "cgroup.cpu_per_core" - , title - , "%" - , "cgroup" - , "default" - , CHART_PRIORITY_CONTAINERS + 100 - , update_every - , RRDSET_TYPE_STACKED - ); - - for(i = 0; i < cg->cpuacct_usage.cpus; i++) { - snprintfz(id, RRD_ID_LENGTH_MAX, "cpu%u", i); - rrddim_add(cg->st_cpu_per_core, id, NULL, 100, 1000000000, RRD_ALGORITHM_INCREMENTAL); - } - } - else - rrdset_next(cg->st_cpu_per_core); - - for(i = 0; i < cg->cpuacct_usage.cpus ;i++) { - snprintfz(id, RRD_ID_LENGTH_MAX, "cpu%u", i); - rrddim_set(cg->st_cpu_per_core, id, cg->cpuacct_usage.cpu_percpu[i]); - } - rrdset_done(cg->st_cpu_per_core); - } - - if(likely(cg->memory.updated_detailed && cg->memory.enabled_detailed == CONFIG_BOOLEAN_YES)) { - if(unlikely(!cg->st_mem)) { - snprintfz(title, CHART_TITLE_MAX, "Memory Usage for cgroup %s", cg->chart_title); - - cg->st_mem = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "mem" - , NULL - , "mem" - , "cgroup.mem" - , title - , "MB" - , "cgroup" - , "default" - , CHART_PRIORITY_CONTAINERS + 210 - , update_every - , RRDSET_TYPE_STACKED - ); - - rrddim_add(cg->st_mem, "cache", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(cg->st_mem, "rss", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - - if(cg->memory.detailed_has_swap) - rrddim_add(cg->st_mem, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - - rrddim_add(cg->st_mem, "rss_huge", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(cg->st_mem, "mapped_file", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } - else - rrdset_next(cg->st_mem); - - rrddim_set(cg->st_mem, "cache", cg->memory.cache); - rrddim_set(cg->st_mem, "rss", cg->memory.rss); - - if(cg->memory.detailed_has_swap) - rrddim_set(cg->st_mem, "swap", cg->memory.swap); - - rrddim_set(cg->st_mem, "rss_huge", cg->memory.rss_huge); - rrddim_set(cg->st_mem, "mapped_file", cg->memory.mapped_file); - rrdset_done(cg->st_mem); - - if(unlikely(!cg->st_writeback)) { - snprintfz(title, CHART_TITLE_MAX, "Writeback Memory for cgroup %s", cg->chart_title); - - cg->st_writeback = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "writeback" - , NULL - , "mem" - , "cgroup.writeback" - , title - , "MB" - , "cgroup" - , "default" - , CHART_PRIORITY_CONTAINERS + 300 - , update_every - , RRDSET_TYPE_AREA - ); - - if(cg->memory.detailed_has_dirty) - rrddim_add(cg->st_writeback, "dirty", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - - rrddim_add(cg->st_writeback, "writeback", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } - else - rrdset_next(cg->st_writeback); - - if(cg->memory.detailed_has_dirty) - rrddim_set(cg->st_writeback, "dirty", cg->memory.dirty); - - rrddim_set(cg->st_writeback, "writeback", cg->memory.writeback); - rrdset_done(cg->st_writeback); - - if(unlikely(!cg->st_mem_activity)) { - snprintfz(title, CHART_TITLE_MAX, "Memory Activity for cgroup %s", cg->chart_title); - - cg->st_mem_activity = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "mem_activity" - , NULL - , "mem" - , "cgroup.mem_activity" - , title - , "MB/s" - , "cgroup" - , "default" - , CHART_PRIORITY_CONTAINERS + 400 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(cg->st_mem_activity, "pgpgin", "in", system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(cg->st_mem_activity, "pgpgout", "out", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(cg->st_mem_activity); - - rrddim_set(cg->st_mem_activity, "pgpgin", cg->memory.pgpgin); - rrddim_set(cg->st_mem_activity, "pgpgout", cg->memory.pgpgout); - rrdset_done(cg->st_mem_activity); - - if(unlikely(!cg->st_pgfaults)) { - snprintfz(title, CHART_TITLE_MAX, "Memory Page Faults for cgroup %s", cg->chart_title); - - cg->st_pgfaults = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "pgfaults" - , NULL - , "mem" - , "cgroup.pgfaults" - , title - , "MB/s" - , "cgroup" - , "default" - , CHART_PRIORITY_CONTAINERS + 500 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(cg->st_pgfaults, "pgfault", NULL, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(cg->st_pgfaults, "pgmajfault", "swap", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(cg->st_pgfaults); - - rrddim_set(cg->st_pgfaults, "pgfault", cg->memory.pgfault); - rrddim_set(cg->st_pgfaults, "pgmajfault", cg->memory.pgmajfault); - rrdset_done(cg->st_pgfaults); - } - - if(likely(cg->memory.updated_usage_in_bytes && cg->memory.enabled_usage_in_bytes == CONFIG_BOOLEAN_YES)) { - if(unlikely(!cg->st_mem_usage)) { - snprintfz(title, CHART_TITLE_MAX, "Used Memory %sfor cgroup %s", (cgroup_used_memory_without_cache && cg->memory.updated_detailed)?"without Cache ":"", cg->chart_title); - - cg->st_mem_usage = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "mem_usage" - , NULL - , "mem" - , "cgroup.mem_usage" - , title - , "MB" - , "cgroup" - , "default" - , CHART_PRIORITY_CONTAINERS + 200 - , update_every - , RRDSET_TYPE_STACKED - ); - - rrddim_add(cg->st_mem_usage, "ram", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(cg->st_mem_usage, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } - else - rrdset_next(cg->st_mem_usage); - - rrddim_set(cg->st_mem_usage, "ram", cg->memory.usage_in_bytes - ((cgroup_used_memory_without_cache)?cg->memory.cache:0)); - rrddim_set(cg->st_mem_usage, "swap", (cg->memory.msw_usage_in_bytes > cg->memory.usage_in_bytes)?cg->memory.msw_usage_in_bytes - cg->memory.usage_in_bytes:0); - rrdset_done(cg->st_mem_usage); - } - - if(likely(cg->memory.updated_failcnt && cg->memory.enabled_failcnt == CONFIG_BOOLEAN_YES)) { - if(unlikely(!cg->st_mem_failcnt)) { - snprintfz(title, CHART_TITLE_MAX, "Memory Limit Failures for cgroup %s", cg->chart_title); - - cg->st_mem_failcnt = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "mem_failcnt" - , NULL - , "mem" - , "cgroup.mem_failcnt" - , title - , "count" - , "cgroup" - , "default" - , CHART_PRIORITY_CONTAINERS + 250 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(cg->st_mem_failcnt, "failures", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(cg->st_mem_failcnt); - - rrddim_set(cg->st_mem_failcnt, "failures", cg->memory.failcnt); - rrdset_done(cg->st_mem_failcnt); - } - - if(likely(cg->io_service_bytes.updated && cg->io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) { - if(unlikely(!cg->st_io)) { - snprintfz(title, CHART_TITLE_MAX, "I/O Bandwidth (all disks) for cgroup %s", cg->chart_title); - - cg->st_io = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "io" - , NULL - , "disk" - , "cgroup.io" - , title - , "KB/s" - , "cgroup" - , "default" - , CHART_PRIORITY_CONTAINERS + 1200 - , update_every - , RRDSET_TYPE_AREA - ); - - rrddim_add(cg->st_io, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(cg->st_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(cg->st_io); - - rrddim_set(cg->st_io, "read", cg->io_service_bytes.Read); - rrddim_set(cg->st_io, "write", cg->io_service_bytes.Write); - rrdset_done(cg->st_io); - } - - if(likely(cg->io_serviced.updated && cg->io_serviced.enabled == CONFIG_BOOLEAN_YES)) { - if(unlikely(!cg->st_serviced_ops)) { - snprintfz(title, CHART_TITLE_MAX, "Serviced I/O Operations (all disks) for cgroup %s", cg->chart_title); - - cg->st_serviced_ops = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "serviced_ops" - , NULL - , "disk" - , "cgroup.serviced_ops" - , title - , "operations/s" - , "cgroup" - , "default" - , CHART_PRIORITY_CONTAINERS + 1200 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(cg->st_serviced_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(cg->st_serviced_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(cg->st_serviced_ops); - - rrddim_set(cg->st_serviced_ops, "read", cg->io_serviced.Read); - rrddim_set(cg->st_serviced_ops, "write", cg->io_serviced.Write); - rrdset_done(cg->st_serviced_ops); - } - - if(likely(cg->throttle_io_service_bytes.updated && cg->throttle_io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) { - if(unlikely(!cg->st_throttle_io)) { - snprintfz(title, CHART_TITLE_MAX, "Throttle I/O Bandwidth (all disks) for cgroup %s", cg->chart_title); - - cg->st_throttle_io = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "throttle_io" - , NULL - , "disk" - , "cgroup.throttle_io" - , title - , "KB/s" - , "cgroup" - , "default" - , CHART_PRIORITY_CONTAINERS + 1200 - , update_every - , RRDSET_TYPE_AREA - ); - - rrddim_add(cg->st_throttle_io, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(cg->st_throttle_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(cg->st_throttle_io); - - rrddim_set(cg->st_throttle_io, "read", cg->throttle_io_service_bytes.Read); - rrddim_set(cg->st_throttle_io, "write", cg->throttle_io_service_bytes.Write); - rrdset_done(cg->st_throttle_io); - } - - if(likely(cg->throttle_io_serviced.updated && cg->throttle_io_serviced.enabled == CONFIG_BOOLEAN_YES)) { - if(unlikely(!cg->st_throttle_serviced_ops)) { - snprintfz(title, CHART_TITLE_MAX, "Throttle Serviced I/O Operations (all disks) for cgroup %s", cg->chart_title); - - cg->st_throttle_serviced_ops = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "throttle_serviced_ops" - , NULL - , "disk" - , "cgroup.throttle_serviced_ops" - , title - , "operations/s" - , "cgroup" - , "default" - , CHART_PRIORITY_CONTAINERS + 1200 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(cg->st_throttle_serviced_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(cg->st_throttle_serviced_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(cg->st_throttle_serviced_ops); - - rrddim_set(cg->st_throttle_serviced_ops, "read", cg->throttle_io_serviced.Read); - rrddim_set(cg->st_throttle_serviced_ops, "write", cg->throttle_io_serviced.Write); - rrdset_done(cg->st_throttle_serviced_ops); - } - - if(likely(cg->io_queued.updated && cg->io_queued.enabled == CONFIG_BOOLEAN_YES)) { - if(unlikely(!cg->st_queued_ops)) { - snprintfz(title, CHART_TITLE_MAX, "Queued I/O Operations (all disks) for cgroup %s", cg->chart_title); - - cg->st_queued_ops = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "queued_ops" - , NULL - , "disk" - , "cgroup.queued_ops" - , title - , "operations" - , "cgroup" - , "default" - , CHART_PRIORITY_CONTAINERS + 2000 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(cg->st_queued_ops, "read", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rrddim_add(cg->st_queued_ops, "write", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else - rrdset_next(cg->st_queued_ops); - - rrddim_set(cg->st_queued_ops, "read", cg->io_queued.Read); - rrddim_set(cg->st_queued_ops, "write", cg->io_queued.Write); - rrdset_done(cg->st_queued_ops); - } - - if(likely(cg->io_merged.updated && cg->io_merged.enabled == CONFIG_BOOLEAN_YES)) { - if(unlikely(!cg->st_merged_ops)) { - snprintfz(title, CHART_TITLE_MAX, "Merged I/O Operations (all disks) for cgroup %s", cg->chart_title); - - cg->st_merged_ops = rrdset_create_localhost( - cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX) - , "merged_ops" - , NULL - , "disk" - , "cgroup.merged_ops" - , title - , "operations/s" - , "cgroup" - , "default" - , CHART_PRIORITY_CONTAINERS + 2100 - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(cg->st_merged_ops, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(cg->st_merged_ops, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(cg->st_merged_ops); - - rrddim_set(cg->st_merged_ops, "read", cg->io_merged.Read); - rrddim_set(cg->st_merged_ops, "write", cg->io_merged.Write); - rrdset_done(cg->st_merged_ops); - } - } - - if(likely(cgroup_enable_systemd_services)) - update_systemd_services_charts(update_every, services_do_cpu, services_do_mem_usage, services_do_mem_detailed - , services_do_mem_failcnt, services_do_swap_usage, services_do_io - , services_do_io_ops, services_do_throttle_io, services_do_throttle_ops - , services_do_queued_ops, services_do_merged_ops - ); - - debug(D_CGROUP, "done updating cgroups charts"); -} - -// ---------------------------------------------------------------------------- -// cgroups main - -static void cgroup_main_cleanup(void *ptr) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - - info("cleaning up..."); - - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; -} - -void *cgroups_main(void *ptr) { - netdata_thread_cleanup_push(cgroup_main_cleanup, ptr); - - struct rusage thread; - - // when ZERO, attempt to do it - int vdo_cpu_netdata = config_get_boolean("plugin:cgroups", "cgroups plugin resource charts", 1); - - read_cgroup_plugin_configuration(); - - RRDSET *stcpu_thread = NULL; - - heartbeat_t hb; - heartbeat_init(&hb); - usec_t step = cgroup_update_every * USEC_PER_SEC; - usec_t find_every = cgroup_check_for_new_every * USEC_PER_SEC, find_dt = 0; - - while(!netdata_exit) { - usec_t hb_dt = heartbeat_next(&hb, step); - if(unlikely(netdata_exit)) break; - - // BEGIN -- the job to be done - - find_dt += hb_dt; - if(unlikely(find_dt >= find_every || cgroups_check)) { - find_all_cgroups(); - find_dt = 0; - cgroups_check = 0; - } - - read_all_cgroups(cgroup_root); - update_cgroup_charts(cgroup_update_every); - - // END -- the job is done - - // -------------------------------------------------------------------- - - if(vdo_cpu_netdata) { - getrusage(RUSAGE_THREAD, &thread); - - if(unlikely(!stcpu_thread)) { - - stcpu_thread = rrdset_create_localhost( - "netdata" - , "plugin_cgroups_cpu" - , NULL - , "cgroups" - , NULL - , "NetData CGroups Plugin CPU usage" - , "milliseconds/s" - , "cgroup" - , "stats" - , 132000 - , cgroup_update_every - , RRDSET_TYPE_STACKED - ); - - rrddim_add(stcpu_thread, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(stcpu_thread, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(stcpu_thread); - - rrddim_set(stcpu_thread, "user" , thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec); - rrddim_set(stcpu_thread, "system", thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec); - rrdset_done(stcpu_thread); - } - } - - netdata_thread_cleanup_pop(1); - return NULL; -} diff --git a/src/sys_kernel_mm_ksm.c b/src/sys_kernel_mm_ksm.c deleted file mode 100644 index 7ca1366b4..000000000 --- a/src/sys_kernel_mm_ksm.c +++ /dev/null @@ -1,197 +0,0 @@ -#include "common.h" - -typedef struct ksm_name_value { - char filename[FILENAME_MAX + 1]; - unsigned long long value; -} KSM_NAME_VALUE; - -#define PAGES_SHARED 0 -#define PAGES_SHARING 1 -#define PAGES_UNSHARED 2 -#define PAGES_VOLATILE 3 -#define PAGES_TO_SCAN 4 - -KSM_NAME_VALUE values[] = { - [PAGES_SHARED] = { "/sys/kernel/mm/ksm/pages_shared", 0ULL }, - [PAGES_SHARING] = { "/sys/kernel/mm/ksm/pages_sharing", 0ULL }, - [PAGES_UNSHARED] = { "/sys/kernel/mm/ksm/pages_unshared", 0ULL }, - [PAGES_VOLATILE] = { "/sys/kernel/mm/ksm/pages_volatile", 0ULL }, - // [PAGES_TO_SCAN] = { "/sys/kernel/mm/ksm/pages_to_scan", 0ULL }, -}; - -int do_sys_kernel_mm_ksm(int update_every, usec_t dt) { - (void)dt; - static procfile *ff_pages_shared = NULL, *ff_pages_sharing = NULL, *ff_pages_unshared = NULL, *ff_pages_volatile = NULL/*, *ff_pages_to_scan = NULL*/; - static unsigned long page_size = 0; - - if(unlikely(page_size == 0)) - page_size = (unsigned long)sysconf(_SC_PAGESIZE); - - if(unlikely(!ff_pages_shared)) { - snprintfz(values[PAGES_SHARED].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_shared"); - snprintfz(values[PAGES_SHARED].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_shared", values[PAGES_SHARED].filename)); - ff_pages_shared = procfile_open(values[PAGES_SHARED].filename, " \t:", PROCFILE_FLAG_DEFAULT); - } - - if(unlikely(!ff_pages_sharing)) { - snprintfz(values[PAGES_SHARING].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_sharing"); - snprintfz(values[PAGES_SHARING].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_sharing", values[PAGES_SHARING].filename)); - ff_pages_sharing = procfile_open(values[PAGES_SHARING].filename, " \t:", PROCFILE_FLAG_DEFAULT); - } - - if(unlikely(!ff_pages_unshared)) { - snprintfz(values[PAGES_UNSHARED].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_unshared"); - snprintfz(values[PAGES_UNSHARED].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_unshared", values[PAGES_UNSHARED].filename)); - ff_pages_unshared = procfile_open(values[PAGES_UNSHARED].filename, " \t:", PROCFILE_FLAG_DEFAULT); - } - - if(unlikely(!ff_pages_volatile)) { - snprintfz(values[PAGES_VOLATILE].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_volatile"); - snprintfz(values[PAGES_VOLATILE].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_volatile", values[PAGES_VOLATILE].filename)); - ff_pages_volatile = procfile_open(values[PAGES_VOLATILE].filename, " \t:", PROCFILE_FLAG_DEFAULT); - } - - //if(unlikely(!ff_pages_to_scan)) { - // snprintfz(values[PAGES_TO_SCAN].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_to_scan"); - // snprintfz(values[PAGES_TO_SCAN].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_to_scan", values[PAGES_TO_SCAN].filename)); - // ff_pages_to_scan = procfile_open(values[PAGES_TO_SCAN].filename, " \t:", PROCFILE_FLAG_DEFAULT); - //} - - if(unlikely(!ff_pages_shared || !ff_pages_sharing || !ff_pages_unshared || !ff_pages_volatile /*|| !ff_pages_to_scan */)) - return 1; - - unsigned long long pages_shared = 0, pages_sharing = 0, pages_unshared = 0, pages_volatile = 0, /*pages_to_scan = 0,*/ offered = 0, saved = 0; - - ff_pages_shared = procfile_readall(ff_pages_shared); - if(unlikely(!ff_pages_shared)) return 0; // we return 0, so that we will retry to open it next time - pages_shared = str2ull(procfile_lineword(ff_pages_shared, 0, 0)); - - ff_pages_sharing = procfile_readall(ff_pages_sharing); - if(unlikely(!ff_pages_sharing)) return 0; // we return 0, so that we will retry to open it next time - pages_sharing = str2ull(procfile_lineword(ff_pages_sharing, 0, 0)); - - ff_pages_unshared = procfile_readall(ff_pages_unshared); - if(unlikely(!ff_pages_unshared)) return 0; // we return 0, so that we will retry to open it next time - pages_unshared = str2ull(procfile_lineword(ff_pages_unshared, 0, 0)); - - ff_pages_volatile = procfile_readall(ff_pages_volatile); - if(unlikely(!ff_pages_volatile)) return 0; // we return 0, so that we will retry to open it next time - pages_volatile = str2ull(procfile_lineword(ff_pages_volatile, 0, 0)); - - //ff_pages_to_scan = procfile_readall(ff_pages_to_scan); - //if(unlikely(!ff_pages_to_scan)) return 0; // we return 0, so that we will retry to open it next time - //pages_to_scan = str2ull(procfile_lineword(ff_pages_to_scan, 0, 0)); - - offered = pages_sharing + pages_shared + pages_unshared + pages_volatile; - saved = pages_sharing; - - if(unlikely(!offered /*|| !pages_to_scan*/)) return 0; - - // -------------------------------------------------------------------- - - { - static RRDSET *st_mem_ksm = NULL; - static RRDDIM *rd_shared = NULL, *rd_unshared = NULL, *rd_sharing = NULL, *rd_volatile = NULL/*, *rd_to_scan = NULL*/; - - if (unlikely(!st_mem_ksm)) { - st_mem_ksm = rrdset_create_localhost( - "mem" - , "ksm" - , NULL - , "ksm" - , NULL - , "Kernel Same Page Merging" - , "MB" - , "proc" - , "/sys/kernel/mm/ksm" - , NETDATA_CHART_PRIO_MEM_KSM - , update_every - , RRDSET_TYPE_AREA - ); - - rd_shared = rrddim_add(st_mem_ksm, "shared", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rd_unshared = rrddim_add(st_mem_ksm, "unshared", NULL, -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rd_sharing = rrddim_add(st_mem_ksm, "sharing", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rd_volatile = rrddim_add(st_mem_ksm, "volatile", NULL, -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - //rd_to_scan = rrddim_add(st_mem_ksm, "to_scan", "to scan", -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } - else - rrdset_next(st_mem_ksm); - - rrddim_set_by_pointer(st_mem_ksm, rd_shared, pages_shared * page_size); - rrddim_set_by_pointer(st_mem_ksm, rd_unshared, pages_unshared * page_size); - rrddim_set_by_pointer(st_mem_ksm, rd_sharing, pages_sharing * page_size); - rrddim_set_by_pointer(st_mem_ksm, rd_volatile, pages_volatile * page_size); - //rrddim_set_by_pointer(st_mem_ksm, rd_to_scan, pages_to_scan * page_size); - - rrdset_done(st_mem_ksm); - } - - // -------------------------------------------------------------------- - - { - static RRDSET *st_mem_ksm_savings = NULL; - static RRDDIM *rd_savings = NULL, *rd_offered = NULL; - - if (unlikely(!st_mem_ksm_savings)) { - st_mem_ksm_savings = rrdset_create_localhost( - "mem" - , "ksm_savings" - , NULL - , "ksm" - , NULL - , "Kernel Same Page Merging Savings" - , "MB" - , "proc" - , "/sys/kernel/mm/ksm" - , NETDATA_CHART_PRIO_MEM_KSM + 1 - , update_every - , RRDSET_TYPE_AREA - ); - - rd_savings = rrddim_add(st_mem_ksm_savings, "savings", NULL, -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rd_offered = rrddim_add(st_mem_ksm_savings, "offered", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } - else - rrdset_next(st_mem_ksm_savings); - - rrddim_set_by_pointer(st_mem_ksm_savings, rd_savings, saved * page_size); - rrddim_set_by_pointer(st_mem_ksm_savings, rd_offered, offered * page_size); - - rrdset_done(st_mem_ksm_savings); - } - - // -------------------------------------------------------------------- - - { - static RRDSET *st_mem_ksm_ratios = NULL; - static RRDDIM *rd_savings = NULL; - - if (unlikely(!st_mem_ksm_ratios)) { - st_mem_ksm_ratios = rrdset_create_localhost( - "mem" - , "ksm_ratios" - , NULL - , "ksm" - , NULL - , "Kernel Same Page Merging Effectiveness" - , "percentage" - , "proc" - , "/sys/kernel/mm/ksm" - , NETDATA_CHART_PRIO_MEM_KSM + 2 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_savings = rrddim_add(st_mem_ksm_ratios, "savings", NULL, 1, 10000, RRD_ALGORITHM_ABSOLUTE); - } - else - rrdset_next(st_mem_ksm_ratios); - - rrddim_set_by_pointer(st_mem_ksm_ratios, rd_savings, (saved * 1000000) / offered); - - rrdset_done(st_mem_ksm_ratios); - } - - return 0; -} diff --git a/src/threads.c b/src/threads.c deleted file mode 100644 index b9ca3c085..000000000 --- a/src/threads.c +++ /dev/null @@ -1,181 +0,0 @@ -#include "common.h" - -static size_t default_stacksize = 0, wanted_stacksize = 0; -static pthread_attr_t *attr = NULL; - -// ---------------------------------------------------------------------------- -// per thread data - -typedef struct { - void *arg; - pthread_t *thread; - const char *tag; - void *(*start_routine) (void *); - NETDATA_THREAD_OPTIONS options; -} NETDATA_THREAD; - -static __thread NETDATA_THREAD *netdata_thread = NULL; - -const char *netdata_thread_tag(void) { - return ((netdata_thread && netdata_thread->tag && *netdata_thread->tag)?netdata_thread->tag:"MAIN"); -} - -// ---------------------------------------------------------------------------- -// compatibility library functions - -pid_t gettid(void) { -#ifdef __FreeBSD__ - - return (pid_t)pthread_getthreadid_np(); - -#elif defined(__APPLE__) - - #if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1060) - uint64_t curthreadid; - pthread_threadid_np(NULL, &curthreadid); - return (pid_t)curthreadid; - #else /* __MAC_OS_X_VERSION_MIN_REQUIRED */ - return (pid_t)pthread_self; - #endif /* __MAC_OS_X_VERSION_MIN_REQUIRED */ - -#else /* __APPLE__*/ - - return (pid_t)syscall(SYS_gettid); - -#endif /* __FreeBSD__, __APPLE__*/ -} - -// ---------------------------------------------------------------------------- -// early initialization - -size_t netdata_threads_init(void) { - int i; - - // -------------------------------------------------------------------- - // get the required stack size of the threads of netdata - - attr = callocz(1, sizeof(pthread_attr_t)); - i = pthread_attr_init(attr); - if(i != 0) - fatal("pthread_attr_init() failed with code %d.", i); - - i = pthread_attr_getstacksize(attr, &default_stacksize); - if(i != 0) - fatal("pthread_attr_getstacksize() failed with code %d.", i); - else - debug(D_OPTIONS, "initial pthread stack size is %zu bytes", default_stacksize); - - return default_stacksize; -} - -// ---------------------------------------------------------------------------- -// late initialization - -void netdata_threads_init_after_fork(size_t stacksize) { - wanted_stacksize = stacksize; - int i; - - // ------------------------------------------------------------------------ - // set default pthread stack size - - if(attr && default_stacksize < wanted_stacksize && wanted_stacksize > 0) { - i = pthread_attr_setstacksize(attr, wanted_stacksize); - if(i != 0) - fatal("pthread_attr_setstacksize() to %zu bytes, failed with code %d.", wanted_stacksize, i); - else - debug(D_SYSTEM, "Successfully set pthread stacksize to %zu bytes", wanted_stacksize); - } -} - - -// ---------------------------------------------------------------------------- -// netdata_thread_create - -static void thread_cleanup(void *ptr) { - if(netdata_thread != ptr) { - NETDATA_THREAD *info = (NETDATA_THREAD *)ptr; - error("THREADS: internal error - thread local variable does not match the one passed to this function. Expected thread '%s', passed thread '%s'", netdata_thread->tag, info->tag); - } - - if(!(netdata_thread->options & NETDATA_THREAD_OPTION_DONT_LOG_CLEANUP)) - info("thread with task id %d finished", gettid()); - - freez((void *)netdata_thread->tag); - netdata_thread->tag = NULL; - - freez(netdata_thread); - netdata_thread = NULL; -} - -static void *thread_start(void *ptr) { - netdata_thread = (NETDATA_THREAD *)ptr; - - if(!(netdata_thread->options & NETDATA_THREAD_OPTION_DONT_LOG_STARTUP)) - info("thread created with task id %d", gettid()); - - if(pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL) != 0) - error("cannot set pthread cancel type to DEFERRED."); - - if(pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0) - error("cannot set pthread cancel state to ENABLE."); - - void *ret = NULL; - pthread_cleanup_push(thread_cleanup, ptr); - ret = netdata_thread->start_routine(netdata_thread->arg); - pthread_cleanup_pop(1); - - return ret; -} - -int netdata_thread_create(netdata_thread_t *thread, const char *tag, NETDATA_THREAD_OPTIONS options, void *(*start_routine) (void *), void *arg) { - NETDATA_THREAD *info = mallocz(sizeof(NETDATA_THREAD)); - info->arg = arg; - info->thread = thread; - info->tag = strdupz(tag); - info->start_routine = start_routine; - info->options = options; - - int ret = pthread_create(thread, attr, thread_start, info); - if(ret != 0) - error("failed to create new thread for %s. pthread_create() failed with code %d", tag, ret); - - else { - if (!(options & NETDATA_THREAD_OPTION_JOINABLE)) { - int ret2 = pthread_detach(*thread); - if (ret2 != 0) - error("cannot request detach of newly created %s thread. pthread_detach() failed with code %d", tag, ret2); - } - } - - return ret; -} - -// ---------------------------------------------------------------------------- -// netdata_thread_cancel - -int netdata_thread_cancel(netdata_thread_t thread) { - int ret = pthread_cancel(thread); - if(ret != 0) - error("cannot cancel thread. pthread_cancel() failed with code %d.", ret); - - return ret; -} - -// ---------------------------------------------------------------------------- -// netdata_thread_join - -int netdata_thread_join(netdata_thread_t thread, void **retval) { - int ret = pthread_join(thread, retval); - if(ret != 0) - error("cannot join thread. pthread_join() failed with code %d.", ret); - - return ret; -} - -int netdata_thread_detach(pthread_t thread) { - int ret = pthread_detach(thread); - if(ret != 0) - error("cannot detach thread. pthread_detach() failed with code %d.", ret); - - return ret; -} diff --git a/src/threads.h b/src/threads.h deleted file mode 100644 index e2ed6a4ff..000000000 --- a/src/threads.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef NETDATA_THREADS_H -#define NETDATA_THREADS_H - -extern pid_t gettid(void); - -typedef enum { - NETDATA_THREAD_OPTION_DEFAULT = 0 << 0, - NETDATA_THREAD_OPTION_JOINABLE = 1 << 0, - NETDATA_THREAD_OPTION_DONT_LOG_STARTUP = 1 << 1, - NETDATA_THREAD_OPTION_DONT_LOG_CLEANUP = 1 << 2, - NETDATA_THREAD_OPTION_DONT_LOG = NETDATA_THREAD_OPTION_DONT_LOG_STARTUP|NETDATA_THREAD_OPTION_DONT_LOG_CLEANUP, -} NETDATA_THREAD_OPTIONS; - -#define netdata_thread_cleanup_push(func, arg) pthread_cleanup_push(func, arg) -#define netdata_thread_cleanup_pop(execute) pthread_cleanup_pop(execute) - -typedef pthread_t netdata_thread_t; - -#define NETDATA_THREAD_TAG_MAX 100 -extern const char *netdata_thread_tag(void); - -extern size_t netdata_threads_init(void); -extern void netdata_threads_init_after_fork(size_t stacksize); - -extern int netdata_thread_create(netdata_thread_t *thread, const char *tag, NETDATA_THREAD_OPTIONS options, void *(*start_routine) (void *), void *arg); -extern int netdata_thread_cancel(netdata_thread_t thread); -extern int netdata_thread_join(netdata_thread_t thread, void **retval); -extern int netdata_thread_detach(pthread_t thread); - -#define netdata_thread_self pthread_self -#define netdata_thread_testcancel pthread_testcancel - -#endif //NETDATA_THREADS_H diff --git a/src/unit_test.c b/src/unit_test.c deleted file mode 100644 index e3eb146ad..000000000 --- a/src/unit_test.c +++ /dev/null @@ -1,1368 +0,0 @@ -#include "common.h" - -static int check_number_printing(void) { - struct { - calculated_number n; - const char *correct; - } values[] = { - { .n = 0, .correct = "0" }, - { .n = 0.0000001, .correct = "0.0000001" }, - { .n = 0.00000009, .correct = "0.0000001" }, - { .n = 0.000000001, .correct = "0" }, - { .n = 99.99999999999999999, .correct = "100" }, - { .n = -99.99999999999999999, .correct = "-100" }, - { .n = 123.4567890123456789, .correct = "123.456789" }, - { .n = 9999.9999999, .correct = "9999.9999999" }, - { .n = -9999.9999999, .correct = "-9999.9999999" }, - { .n = 0, .correct = NULL }, - }; - - char netdata[50], system[50]; - int i, failed = 0; - for(i = 0; values[i].correct ; i++) { - print_calculated_number(netdata, values[i].n); - snprintfz(system, 49, "%0.12" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE)values[i].n); - - int ok = 1; - if(strcmp(netdata, values[i].correct) != 0) { - ok = 0; - failed++; - } - - fprintf(stderr, "'%s' (system) printed as '%s' (netdata): %s\n", system, netdata, ok?"OK":"FAILED"); - } - - if(failed) return 1; - return 0; -} - -static int check_rrdcalc_comparisons(void) { - RRDCALC_STATUS a, b; - - // make sure calloc() sets the status to UNINITIALIZED - memset(&a, 0, sizeof(RRDCALC_STATUS)); - if(a != RRDCALC_STATUS_UNINITIALIZED) { - fprintf(stderr, "%s is not zero.\n", rrdcalc_status2string(RRDCALC_STATUS_UNINITIALIZED)); - return 1; - } - - a = RRDCALC_STATUS_REMOVED; - b = RRDCALC_STATUS_UNDEFINED; - if(!(a < b)) { - fprintf(stderr, "%s is not less than %s\n", rrdcalc_status2string(a), rrdcalc_status2string(b)); - return 1; - } - - a = RRDCALC_STATUS_UNDEFINED; - b = RRDCALC_STATUS_UNINITIALIZED; - if(!(a < b)) { - fprintf(stderr, "%s is not less than %s\n", rrdcalc_status2string(a), rrdcalc_status2string(b)); - return 1; - } - - a = RRDCALC_STATUS_UNINITIALIZED; - b = RRDCALC_STATUS_CLEAR; - if(!(a < b)) { - fprintf(stderr, "%s is not less than %s\n", rrdcalc_status2string(a), rrdcalc_status2string(b)); - return 1; - } - - a = RRDCALC_STATUS_CLEAR; - b = RRDCALC_STATUS_RAISED; - if(!(a < b)) { - fprintf(stderr, "%s is not less than %s\n", rrdcalc_status2string(a), rrdcalc_status2string(b)); - return 1; - } - - a = RRDCALC_STATUS_RAISED; - b = RRDCALC_STATUS_WARNING; - if(!(a < b)) { - fprintf(stderr, "%s is not less than %s\n", rrdcalc_status2string(a), rrdcalc_status2string(b)); - return 1; - } - - a = RRDCALC_STATUS_WARNING; - b = RRDCALC_STATUS_CRITICAL; - if(!(a < b)) { - fprintf(stderr, "%s is not less than %s\n", rrdcalc_status2string(a), rrdcalc_status2string(b)); - return 1; - } - - fprintf(stderr, "RRDCALC_STATUSes are sortable.\n"); - - return 0; -} - -int check_storage_number(calculated_number n, int debug) { - char buffer[100]; - uint32_t flags = SN_EXISTS; - - storage_number s = pack_storage_number(n, flags); - calculated_number d = unpack_storage_number(s); - - if(!does_storage_number_exist(s)) { - fprintf(stderr, "Exists flags missing for number " CALCULATED_NUMBER_FORMAT "!\n", n); - return 5; - } - - calculated_number ddiff = d - n; - calculated_number dcdiff = ddiff * 100.0 / n; - - if(dcdiff < 0) dcdiff = -dcdiff; - - size_t len = (size_t)print_calculated_number(buffer, d); - calculated_number p = str2ld(buffer, NULL); - calculated_number pdiff = n - p; - calculated_number pcdiff = pdiff * 100.0 / n; - if(pcdiff < 0) pcdiff = -pcdiff; - - if(debug) { - fprintf(stderr, - CALCULATED_NUMBER_FORMAT " original\n" - CALCULATED_NUMBER_FORMAT " packed and unpacked, (stored as 0x%08X, diff " CALCULATED_NUMBER_FORMAT ", " CALCULATED_NUMBER_FORMAT "%%)\n" - "%s printed after unpacked (%zu bytes)\n" - CALCULATED_NUMBER_FORMAT " re-parsed from printed (diff " CALCULATED_NUMBER_FORMAT ", " CALCULATED_NUMBER_FORMAT "%%)\n\n", - n, - d, s, ddiff, dcdiff, - buffer, len, - p, pdiff, pcdiff - ); - if(len != strlen(buffer)) fprintf(stderr, "ERROR: printed number %s is reported to have length %zu but it has %zu\n", buffer, len, strlen(buffer)); - if(dcdiff > ACCURACY_LOSS) fprintf(stderr, "WARNING: packing number " CALCULATED_NUMBER_FORMAT " has accuracy loss " CALCULATED_NUMBER_FORMAT " %%\n", n, dcdiff); - if(pcdiff > ACCURACY_LOSS) fprintf(stderr, "WARNING: re-parsing the packed, unpacked and printed number " CALCULATED_NUMBER_FORMAT " has accuracy loss " CALCULATED_NUMBER_FORMAT " %%\n", n, pcdiff); - } - - if(len != strlen(buffer)) return 1; - if(dcdiff > ACCURACY_LOSS) return 3; - if(pcdiff > ACCURACY_LOSS) return 4; - return 0; -} - -calculated_number storage_number_min(calculated_number n) { - calculated_number r = 1, last; - - do { - last = n; - n /= 2.0; - storage_number t = pack_storage_number(n, SN_EXISTS); - r = unpack_storage_number(t); - } while(r != 0.0 && r != last); - - return last; -} - -void benchmark_storage_number(int loop, int multiplier) { - int i, j; - calculated_number n, d; - storage_number s; - unsigned long long user, system, total, mine, their; - - char buffer[100]; - - struct rusage now, last; - - fprintf(stderr, "\n\nBenchmarking %d numbers, please wait...\n\n", loop); - - // ------------------------------------------------------------------------ - - fprintf(stderr, "SYSTEM LONG DOUBLE SIZE: %zu bytes\n", sizeof(calculated_number)); - fprintf(stderr, "NETDATA FLOATING POINT SIZE: %zu bytes\n", sizeof(storage_number)); - - mine = (calculated_number)sizeof(storage_number) * (calculated_number)loop; - their = (calculated_number)sizeof(calculated_number) * (calculated_number)loop; - - if(mine > their) { - fprintf(stderr, "\nNETDATA NEEDS %0.2" LONG_DOUBLE_MODIFIER " TIMES MORE MEMORY. Sorry!\n", (LONG_DOUBLE)(mine / their)); - } - else { - fprintf(stderr, "\nNETDATA INTERNAL FLOATING POINT ARITHMETICS NEEDS %0.2" LONG_DOUBLE_MODIFIER " TIMES LESS MEMORY.\n", (LONG_DOUBLE)(their / mine)); - } - - fprintf(stderr, "\nNETDATA FLOATING POINT\n"); - fprintf(stderr, "MIN POSITIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", storage_number_min(1)); - fprintf(stderr, "MAX POSITIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", (calculated_number)STORAGE_NUMBER_POSITIVE_MAX); - fprintf(stderr, "MIN NEGATIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", (calculated_number)STORAGE_NUMBER_NEGATIVE_MIN); - fprintf(stderr, "MAX NEGATIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", -storage_number_min(1)); - fprintf(stderr, "Maximum accuracy loss: " CALCULATED_NUMBER_FORMAT "%%\n\n\n", (calculated_number)ACCURACY_LOSS); - - // ------------------------------------------------------------------------ - - fprintf(stderr, "INTERNAL LONG DOUBLE PRINTING: "); - getrusage(RUSAGE_SELF, &last); - - // do the job - for(j = 1; j < 11 ;j++) { - n = STORAGE_NUMBER_POSITIVE_MIN * j; - - for(i = 0; i < loop ;i++) { - n *= multiplier; - if(n > STORAGE_NUMBER_POSITIVE_MAX) n = STORAGE_NUMBER_POSITIVE_MIN; - - print_calculated_number(buffer, n); - } - } - - getrusage(RUSAGE_SELF, &now); - user = now.ru_utime.tv_sec * 1000000ULL + now.ru_utime.tv_usec - last.ru_utime.tv_sec * 1000000ULL + last.ru_utime.tv_usec; - system = now.ru_stime.tv_sec * 1000000ULL + now.ru_stime.tv_usec - last.ru_stime.tv_sec * 1000000ULL + last.ru_stime.tv_usec; - total = user + system; - mine = total; - - fprintf(stderr, "user %0.5" LONG_DOUBLE_MODIFIER", system %0.5" LONG_DOUBLE_MODIFIER ", total %0.5" LONG_DOUBLE_MODIFIER "\n", (LONG_DOUBLE)(user / 1000000.0), (LONG_DOUBLE)(system / 1000000.0), (LONG_DOUBLE)(total / 1000000.0)); - - // ------------------------------------------------------------------------ - - fprintf(stderr, "SYSTEM LONG DOUBLE PRINTING: "); - getrusage(RUSAGE_SELF, &last); - - // do the job - for(j = 1; j < 11 ;j++) { - n = STORAGE_NUMBER_POSITIVE_MIN * j; - - for(i = 0; i < loop ;i++) { - n *= multiplier; - if(n > STORAGE_NUMBER_POSITIVE_MAX) n = STORAGE_NUMBER_POSITIVE_MIN; - snprintfz(buffer, 100, CALCULATED_NUMBER_FORMAT, n); - } - } - - getrusage(RUSAGE_SELF, &now); - user = now.ru_utime.tv_sec * 1000000ULL + now.ru_utime.tv_usec - last.ru_utime.tv_sec * 1000000ULL + last.ru_utime.tv_usec; - system = now.ru_stime.tv_sec * 1000000ULL + now.ru_stime.tv_usec - last.ru_stime.tv_sec * 1000000ULL + last.ru_stime.tv_usec; - total = user + system; - their = total; - - fprintf(stderr, "user %0.5" LONG_DOUBLE_MODIFIER ", system %0.5" LONG_DOUBLE_MODIFIER ", total %0.5" LONG_DOUBLE_MODIFIER "\n", (LONG_DOUBLE)(user / 1000000.0), (LONG_DOUBLE)(system / 1000000.0), (LONG_DOUBLE)(total / 1000000.0)); - - if(mine > total) { - fprintf(stderr, "NETDATA CODE IS SLOWER %0.2" LONG_DOUBLE_MODIFIER " %%\n", (LONG_DOUBLE)(mine * 100.0 / their - 100.0)); - } - else { - fprintf(stderr, "NETDATA CODE IS F A S T E R %0.2" LONG_DOUBLE_MODIFIER " %%\n", (LONG_DOUBLE)(their * 100.0 / mine - 100.0)); - } - - // ------------------------------------------------------------------------ - - fprintf(stderr, "\nINTERNAL LONG DOUBLE PRINTING WITH PACK / UNPACK: "); - getrusage(RUSAGE_SELF, &last); - - // do the job - for(j = 1; j < 11 ;j++) { - n = STORAGE_NUMBER_POSITIVE_MIN * j; - - for(i = 0; i < loop ;i++) { - n *= multiplier; - if(n > STORAGE_NUMBER_POSITIVE_MAX) n = STORAGE_NUMBER_POSITIVE_MIN; - - s = pack_storage_number(n, 1); - d = unpack_storage_number(s); - print_calculated_number(buffer, d); - } - } - - getrusage(RUSAGE_SELF, &now); - user = now.ru_utime.tv_sec * 1000000ULL + now.ru_utime.tv_usec - last.ru_utime.tv_sec * 1000000ULL + last.ru_utime.tv_usec; - system = now.ru_stime.tv_sec * 1000000ULL + now.ru_stime.tv_usec - last.ru_stime.tv_sec * 1000000ULL + last.ru_stime.tv_usec; - total = user + system; - mine = total; - - fprintf(stderr, "user %0.5" LONG_DOUBLE_MODIFIER ", system %0.5" LONG_DOUBLE_MODIFIER ", total %0.5" LONG_DOUBLE_MODIFIER "\n", (LONG_DOUBLE)(user / 1000000.0), (LONG_DOUBLE)(system / 1000000.0), (LONG_DOUBLE)(total / 1000000.0)); - - if(mine > their) { - fprintf(stderr, "WITH PACKING UNPACKING NETDATA CODE IS SLOWER %0.2" LONG_DOUBLE_MODIFIER " %%\n", (LONG_DOUBLE)(mine * 100.0 / their - 100.0)); - } - else { - fprintf(stderr, "EVEN WITH PACKING AND UNPACKING, NETDATA CODE IS F A S T E R %0.2" LONG_DOUBLE_MODIFIER " %%\n", (LONG_DOUBLE)(their * 100.0 / mine - 100.0)); - } - - // ------------------------------------------------------------------------ - -} - -static int check_storage_number_exists() { - uint32_t flags = SN_EXISTS; - - - for(flags = 0; flags < 7 ; flags++) { - if(get_storage_number_flags(flags << 24) != flags << 24) { - fprintf(stderr, "Flag 0x%08x is not checked correctly. It became 0x%08x\n", flags << 24, get_storage_number_flags(flags << 24)); - return 1; - } - } - - flags = SN_EXISTS; - calculated_number n = 0.0; - - storage_number s = pack_storage_number(n, flags); - calculated_number d = unpack_storage_number(s); - if(get_storage_number_flags(s) != flags) { - fprintf(stderr, "Wrong flags. Given %08x, Got %08x!\n", flags, get_storage_number_flags(s)); - return 1; - } - if(n != d) { - fprintf(stderr, "Wrong number returned. Expected " CALCULATED_NUMBER_FORMAT ", returned " CALCULATED_NUMBER_FORMAT "!\n", n, d); - return 1; - } - - return 0; -} - -int unit_test_storage() -{ - if(check_storage_number_exists()) return 0; - - calculated_number c, a = 0; - int i, j, g, r = 0; - - for(g = -1; g <= 1 ; g++) { - a = 0; - - if(!g) continue; - - for(j = 0; j < 9 ;j++) { - a += 0.0000001; - c = a * g; - for(i = 0; i < 21 ;i++, c *= 10) { - if(c > 0 && c < STORAGE_NUMBER_POSITIVE_MIN) continue; - if(c < 0 && c > STORAGE_NUMBER_NEGATIVE_MAX) continue; - - if(check_storage_number(c, 1)) return 1; - } - } - } - - benchmark_storage_number(1000000, 2); - return r; -} - -int unit_test_str2ld() { - char *values[] = { - "1.2345678", "-35.6", "0.00123", "23842384234234.2", ".1", "1.2e-10", - "hello", "1wrong", "nan", "inf", NULL - }; - - int i; - for(i = 0; values[i] ; i++) { - char *e_mine = "hello", *e_sys = "world"; - LONG_DOUBLE mine = str2ld(values[i], &e_mine); - LONG_DOUBLE sys = strtold(values[i], &e_sys); - - if(isnan(mine)) { - if(!isnan(sys)) { - fprintf(stderr, "Value '%s' is parsed as %" LONG_DOUBLE_MODIFIER ", but system believes it is %" LONG_DOUBLE_MODIFIER ".\n", values[i], mine, sys); - return -1; - } - } - else if(isinf(mine)) { - if(!isinf(sys)) { - fprintf(stderr, "Value '%s' is parsed as %" LONG_DOUBLE_MODIFIER ", but system believes it is %" LONG_DOUBLE_MODIFIER ".\n", values[i], mine, sys); - return -1; - } - } - else if(mine != sys && abs(mine-sys) > 0.000001) { - fprintf(stderr, "Value '%s' is parsed as %" LONG_DOUBLE_MODIFIER ", but system believes it is %" LONG_DOUBLE_MODIFIER ", delta %" LONG_DOUBLE_MODIFIER ".\n", values[i], mine, sys, sys-mine); - return -1; - } - - if(e_mine != e_sys) { - fprintf(stderr, "Value '%s' is parsed correctly, but endptr is not right\n", values[i]); - return -1; - } - - fprintf(stderr, "str2ld() parsed value '%s' exactly the same way with strtold(), returned %" LONG_DOUBLE_MODIFIER " vs %" LONG_DOUBLE_MODIFIER "\n", values[i], mine, sys); - } - - return 0; -} - -int unit_test_buffer() { - BUFFER *wb = buffer_create(1); - char string[2048 + 1]; - char final[9000 + 1]; - int i; - - for(i = 0; i < 2048; i++) - string[i] = (char)((i % 24) + 'a'); - string[2048] = '\0'; - - const char *fmt = "string1: %s\nstring2: %s\nstring3: %s\nstring4: %s"; - buffer_sprintf(wb, fmt, string, string, string, string); - snprintfz(final, 9000, fmt, string, string, string, string); - - const char *s = buffer_tostring(wb); - - if(buffer_strlen(wb) != strlen(final) || strcmp(s, final) != 0) { - fprintf(stderr, "\nbuffer_sprintf() is faulty.\n"); - fprintf(stderr, "\nstring : %s (length %zu)\n", string, strlen(string)); - fprintf(stderr, "\nbuffer : %s (length %zu)\n", s, buffer_strlen(wb)); - fprintf(stderr, "\nexpected: %s (length %zu)\n", final, strlen(final)); - buffer_free(wb); - return -1; - } - - fprintf(stderr, "buffer_sprintf() works as expected.\n"); - buffer_free(wb); - return 0; -} - -// -------------------------------------------------------------------------------------------------------------------- - -struct feed_values { - unsigned long long microseconds; - collected_number value; -}; - -struct test { - char name[100]; - char description[1024]; - - int update_every; - unsigned long long multiplier; - unsigned long long divisor; - RRD_ALGORITHM algorithm; - - unsigned long feed_entries; - unsigned long result_entries; - struct feed_values *feed; - calculated_number *results; - - collected_number *feed2; - calculated_number *results2; -}; - -// -------------------------------------------------------------------------------------------------------------------- -// test1 -// test absolute values stored - -struct feed_values test1_feed[] = { - { 0, 10 }, - { 1000000, 20 }, - { 1000000, 30 }, - { 1000000, 40 }, - { 1000000, 50 }, - { 1000000, 60 }, - { 1000000, 70 }, - { 1000000, 80 }, - { 1000000, 90 }, - { 1000000, 100 }, -}; - -calculated_number test1_results[] = { - 20, 30, 40, 50, 60, 70, 80, 90, 100 -}; - -struct test test1 = { - "test1", // name - "test absolute values stored at exactly second boundaries", - 1, // update_every - 1, // multiplier - 1, // divisor - RRD_ALGORITHM_ABSOLUTE, // algorithm - 10, // feed entries - 9, // result entries - test1_feed, // feed - test1_results, // results - NULL, // feed2 - NULL // results2 -}; - -// -------------------------------------------------------------------------------------------------------------------- -// test2 -// test absolute values stored in the middle of second boundaries - -struct feed_values test2_feed[] = { - { 500000, 10 }, - { 1000000, 20 }, - { 1000000, 30 }, - { 1000000, 40 }, - { 1000000, 50 }, - { 1000000, 60 }, - { 1000000, 70 }, - { 1000000, 80 }, - { 1000000, 90 }, - { 1000000, 100 }, -}; - -calculated_number test2_results[] = { - 20, 30, 40, 50, 60, 70, 80, 90, 100 -}; - -struct test test2 = { - "test2", // name - "test absolute values stored in the middle of second boundaries", - 1, // update_every - 1, // multiplier - 1, // divisor - RRD_ALGORITHM_ABSOLUTE, // algorithm - 10, // feed entries - 9, // result entries - test2_feed, // feed - test2_results, // results - NULL, // feed2 - NULL // results2 -}; - -// -------------------------------------------------------------------------------------------------------------------- -// test3 - -struct feed_values test3_feed[] = { - { 0, 10 }, - { 1000000, 20 }, - { 1000000, 30 }, - { 1000000, 40 }, - { 1000000, 50 }, - { 1000000, 60 }, - { 1000000, 70 }, - { 1000000, 80 }, - { 1000000, 90 }, - { 1000000, 100 }, -}; - -calculated_number test3_results[] = { - 10, 10, 10, 10, 10, 10, 10, 10, 10 -}; - -struct test test3 = { - "test3", // name - "test incremental values stored at exactly second boundaries", - 1, // update_every - 1, // multiplier - 1, // divisor - RRD_ALGORITHM_INCREMENTAL, // algorithm - 10, // feed entries - 9, // result entries - test3_feed, // feed - test3_results, // results - NULL, // feed2 - NULL // results2 -}; - -// -------------------------------------------------------------------------------------------------------------------- -// test4 - -struct feed_values test4_feed[] = { - { 500000, 10 }, - { 1000000, 20 }, - { 1000000, 30 }, - { 1000000, 40 }, - { 1000000, 50 }, - { 1000000, 60 }, - { 1000000, 70 }, - { 1000000, 80 }, - { 1000000, 90 }, - { 1000000, 100 }, -}; - -calculated_number test4_results[] = { - 10, 10, 10, 10, 10, 10, 10, 10, 10 -}; - -struct test test4 = { - "test4", // name - "test incremental values stored in the middle of second boundaries", - 1, // update_every - 1, // multiplier - 1, // divisor - RRD_ALGORITHM_INCREMENTAL, // algorithm - 10, // feed entries - 9, // result entries - test4_feed, // feed - test4_results, // results - NULL, // feed2 - NULL // results2 -}; - -// -------------------------------------------------------------------------------------------------------------------- -// test5 - -struct feed_values test5_feed[] = { - { 500000, 1000 }, - { 1000000, 2000 }, - { 1000000, 2000 }, - { 1000000, 2000 }, - { 1000000, 3000 }, - { 1000000, 2000 }, - { 1000000, 2000 }, - { 1000000, 2000 }, - { 1000000, 2000 }, - { 1000000, 2000 }, -}; - -calculated_number test5_results[] = { - 1000, 500, 0, 500, 500, 0, 0, 0, 0 -}; - -struct test test5 = { - "test5", // name - "test incremental values ups and downs", - 1, // update_every - 1, // multiplier - 1, // divisor - RRD_ALGORITHM_INCREMENTAL, // algorithm - 10, // feed entries - 9, // result entries - test5_feed, // feed - test5_results, // results - NULL, // feed2 - NULL // results2 -}; - -// -------------------------------------------------------------------------------------------------------------------- -// test6 - -struct feed_values test6_feed[] = { - { 250000, 1000 }, - { 250000, 2000 }, - { 250000, 3000 }, - { 250000, 4000 }, - { 250000, 5000 }, - { 250000, 6000 }, - { 250000, 7000 }, - { 250000, 8000 }, - { 250000, 9000 }, - { 250000, 10000 }, - { 250000, 11000 }, - { 250000, 12000 }, - { 250000, 13000 }, - { 250000, 14000 }, - { 250000, 15000 }, - { 250000, 16000 }, -}; - -calculated_number test6_results[] = { - 4000, 4000, 4000, 4000 -}; - -struct test test6 = { - "test6", // name - "test incremental values updated within the same second", - 1, // update_every - 1, // multiplier - 1, // divisor - RRD_ALGORITHM_INCREMENTAL, // algorithm - 16, // feed entries - 4, // result entries - test6_feed, // feed - test6_results, // results - NULL, // feed2 - NULL // results2 -}; - -// -------------------------------------------------------------------------------------------------------------------- -// test7 - -struct feed_values test7_feed[] = { - { 500000, 1000 }, - { 2000000, 2000 }, - { 2000000, 3000 }, - { 2000000, 4000 }, - { 2000000, 5000 }, - { 2000000, 6000 }, - { 2000000, 7000 }, - { 2000000, 8000 }, - { 2000000, 9000 }, - { 2000000, 10000 }, -}; - -calculated_number test7_results[] = { - 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500 -}; - -struct test test7 = { - "test7", // name - "test incremental values updated in long durations", - 1, // update_every - 1, // multiplier - 1, // divisor - RRD_ALGORITHM_INCREMENTAL, // algorithm - 10, // feed entries - 18, // result entries - test7_feed, // feed - test7_results, // results - NULL, // feed2 - NULL // results2 -}; - -// -------------------------------------------------------------------------------------------------------------------- -// test8 - -struct feed_values test8_feed[] = { - { 500000, 1000 }, - { 2000000, 2000 }, - { 2000000, 3000 }, - { 2000000, 4000 }, - { 2000000, 5000 }, - { 2000000, 6000 }, -}; - -calculated_number test8_results[] = { - 1250, 2000, 2250, 3000, 3250, 4000, 4250, 5000, 5250, 6000 -}; - -struct test test8 = { - "test8", // name - "test absolute values updated in long durations", - 1, // update_every - 1, // multiplier - 1, // divisor - RRD_ALGORITHM_ABSOLUTE, // algorithm - 6, // feed entries - 10, // result entries - test8_feed, // feed - test8_results, // results - NULL, // feed2 - NULL // results2 -}; - -// -------------------------------------------------------------------------------------------------------------------- -// test9 - -struct feed_values test9_feed[] = { - { 250000, 1000 }, - { 250000, 2000 }, - { 250000, 3000 }, - { 250000, 4000 }, - { 250000, 5000 }, - { 250000, 6000 }, - { 250000, 7000 }, - { 250000, 8000 }, - { 250000, 9000 }, - { 250000, 10000 }, - { 250000, 11000 }, - { 250000, 12000 }, - { 250000, 13000 }, - { 250000, 14000 }, - { 250000, 15000 }, - { 250000, 16000 }, -}; - -calculated_number test9_results[] = { - 4000, 8000, 12000, 16000 -}; - -struct test test9 = { - "test9", // name - "test absolute values updated within the same second", - 1, // update_every - 1, // multiplier - 1, // divisor - RRD_ALGORITHM_ABSOLUTE, // algorithm - 16, // feed entries - 4, // result entries - test9_feed, // feed - test9_results, // results - NULL, // feed2 - NULL // results2 -}; - -// -------------------------------------------------------------------------------------------------------------------- -// test10 - -struct feed_values test10_feed[] = { - { 500000, 1000 }, - { 600000, 1000 + 600 }, - { 200000, 1600 + 200 }, - { 1000000, 1800 + 1000 }, - { 200000, 2800 + 200 }, - { 2000000, 3000 + 2000 }, - { 600000, 5000 + 600 }, - { 400000, 5600 + 400 }, - { 900000, 6000 + 900 }, - { 1000000, 6900 + 1000 }, -}; - -calculated_number test10_results[] = { - 1000, 1000, 1000, 1000, 1000, 1000, 1000 -}; - -struct test test10 = { - "test10", // name - "test incremental values updated in short and long durations", - 1, // update_every - 1, // multiplier - 1, // divisor - RRD_ALGORITHM_INCREMENTAL, // algorithm - 10, // feed entries - 7, // result entries - test10_feed, // feed - test10_results, // results - NULL, // feed2 - NULL // results2 -}; - -// -------------------------------------------------------------------------------------------------------------------- -// test11 - -struct feed_values test11_feed[] = { - { 0, 10 }, - { 1000000, 20 }, - { 1000000, 30 }, - { 1000000, 40 }, - { 1000000, 50 }, - { 1000000, 60 }, - { 1000000, 70 }, - { 1000000, 80 }, - { 1000000, 90 }, - { 1000000, 100 }, -}; - -collected_number test11_feed2[] = { - 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 -}; - -calculated_number test11_results[] = { - 50, 50, 50, 50, 50, 50, 50, 50, 50 -}; - -calculated_number test11_results2[] = { - 50, 50, 50, 50, 50, 50, 50, 50, 50 -}; - -struct test test11 = { - "test11", // name - "test percentage-of-incremental-row with equal values", - 1, // update_every - 1, // multiplier - 1, // divisor - RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL, // algorithm - 10, // feed entries - 9, // result entries - test11_feed, // feed - test11_results, // results - test11_feed2, // feed2 - test11_results2 // results2 -}; - -// -------------------------------------------------------------------------------------------------------------------- -// test12 - -struct feed_values test12_feed[] = { - { 0, 10 }, - { 1000000, 20 }, - { 1000000, 30 }, - { 1000000, 40 }, - { 1000000, 50 }, - { 1000000, 60 }, - { 1000000, 70 }, - { 1000000, 80 }, - { 1000000, 90 }, - { 1000000, 100 }, -}; - -collected_number test12_feed2[] = { - 10*3, 20*3, 30*3, 40*3, 50*3, 60*3, 70*3, 80*3, 90*3, 100*3 -}; - -calculated_number test12_results[] = { - 25, 25, 25, 25, 25, 25, 25, 25, 25 -}; - -calculated_number test12_results2[] = { - 75, 75, 75, 75, 75, 75, 75, 75, 75 -}; - -struct test test12 = { - "test12", // name - "test percentage-of-incremental-row with equal values", - 1, // update_every - 1, // multiplier - 1, // divisor - RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL, // algorithm - 10, // feed entries - 9, // result entries - test12_feed, // feed - test12_results, // results - test12_feed2, // feed2 - test12_results2 // results2 -}; - -// -------------------------------------------------------------------------------------------------------------------- -// test13 - -struct feed_values test13_feed[] = { - { 500000, 1000 }, - { 600000, 1000 + 600 }, - { 200000, 1600 + 200 }, - { 1000000, 1800 + 1000 }, - { 200000, 2800 + 200 }, - { 2000000, 3000 + 2000 }, - { 600000, 5000 + 600 }, - { 400000, 5600 + 400 }, - { 900000, 6000 + 900 }, - { 1000000, 6900 + 1000 }, -}; - -calculated_number test13_results[] = { - 83.3333300, 100, 100, 100, 100, 100, 100 -}; - -struct test test13 = { - "test13", // name - "test incremental values updated in short and long durations", - 1, // update_every - 1, // multiplier - 1, // divisor - RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL, // algorithm - 10, // feed entries - 7, // result entries - test13_feed, // feed - test13_results, // results - NULL, // feed2 - NULL // results2 -}; - -// -------------------------------------------------------------------------------------------------------------------- -// test14 - -struct feed_values test14_feed[] = { - { 0, 0x015397dc42151c41ULL }, - { 13573000, 0x015397e612e3ff5dULL }, - { 29969000, 0x015397f905ecdaa8ULL }, - { 29958000, 0x0153980c2a6cb5e4ULL }, - { 30054000, 0x0153981f4032fb83ULL }, - { 34952000, 0x015398355efadaccULL }, - { 25046000, 0x01539845ba4b09f8ULL }, - { 29947000, 0x0153985948bf381dULL }, - { 30054000, 0x0153986c5b9c27e2ULL }, - { 29942000, 0x0153987f888982d0ULL }, -}; - -calculated_number test14_results[] = { - 23.1383300, 21.8515600, 21.8804600, 21.7788000, 22.0112200, 22.4386100, 22.0906100, 21.9150800 -}; - -struct test test14 = { - "test14", // name - "issue #981 with real data", - 30, // update_every - 8, // multiplier - 1000000000, // divisor - RRD_ALGORITHM_INCREMENTAL, // algorithm - 10, // feed entries - 8, // result entries - test14_feed, // feed - test14_results, // results - NULL, // feed2 - NULL // results2 -}; - -struct feed_values test14b_feed[] = { - { 0, 0 }, - { 13573000, 13573000 }, - { 29969000, 13573000 + 29969000 }, - { 29958000, 13573000 + 29969000 + 29958000 }, - { 30054000, 13573000 + 29969000 + 29958000 + 30054000 }, - { 34952000, 13573000 + 29969000 + 29958000 + 30054000 + 34952000 }, - { 25046000, 13573000 + 29969000 + 29958000 + 30054000 + 34952000 + 25046000 }, - { 29947000, 13573000 + 29969000 + 29958000 + 30054000 + 34952000 + 25046000 + 29947000 }, - { 30054000, 13573000 + 29969000 + 29958000 + 30054000 + 34952000 + 25046000 + 29947000 + 30054000 }, - { 29942000, 13573000 + 29969000 + 29958000 + 30054000 + 34952000 + 25046000 + 29947000 + 30054000 + 29942000 }, -}; - -calculated_number test14b_results[] = { - 1000000, 1000000, 1000000, 1000000, 1000000, 1000000, 1000000, 1000000 -}; - -struct test test14b = { - "test14b", // name - "issue #981 with dummy data", - 30, // update_every - 1, // multiplier - 1, // divisor - RRD_ALGORITHM_INCREMENTAL, // algorithm - 10, // feed entries - 8, // result entries - test14b_feed, // feed - test14b_results, // results - NULL, // feed2 - NULL // results2 -}; - -struct feed_values test14c_feed[] = { - { 29000000, 29000000 }, - { 1000000, 29000000 + 1000000 }, - { 30000000, 29000000 + 1000000 + 30000000 }, - { 30000000, 29000000 + 1000000 + 30000000 + 30000000 }, - { 30000000, 29000000 + 1000000 + 30000000 + 30000000 + 30000000 }, - { 30000000, 29000000 + 1000000 + 30000000 + 30000000 + 30000000 + 30000000 }, - { 30000000, 29000000 + 1000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 }, - { 30000000, 29000000 + 1000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 }, - { 30000000, 29000000 + 1000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 }, - { 30000000, 29000000 + 1000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 }, -}; - -calculated_number test14c_results[] = { - 1000000, 1000000, 1000000, 1000000, 1000000, 1000000, 1000000, 1000000, 1000000 -}; - -struct test test14c = { - "test14c", // name - "issue #981 with dummy data, checking for late start", - 30, // update_every - 1, // multiplier - 1, // divisor - RRD_ALGORITHM_INCREMENTAL, // algorithm - 10, // feed entries - 9, // result entries - test14c_feed, // feed - test14c_results, // results - NULL, // feed2 - NULL // results2 -}; - -// -------------------------------------------------------------------------------------------------------------------- -// test15 - -struct feed_values test15_feed[] = { - { 0, 1068066388 }, - { 1008752, 1068822698 }, - { 993809, 1069573072 }, - { 995911, 1070324135 }, - { 1014562, 1071078166 }, - { 994684, 1071831349 }, - { 993128, 1072235739 }, - { 1010332, 1072958871 }, - { 1003394, 1073707019 }, - { 995201, 1074460255 }, -}; - -collected_number test15_feed2[] = { - 178825286, 178825286, 178825286, 178825286, 178825498, 178825498, 179165652, 179202964, 179203282, 179204130 -}; - -calculated_number test15_results[] = { - 5857.4080000, 5898.4540000, 5891.6590000, 5806.3160000, 5914.2640000, 3202.2630000, 5589.6560000, 5822.5260000, 5911.7520000 -}; - -calculated_number test15_results2[] = { - 0.0000000, 0.0000000, 0.0024944, 1.6324779, 0.0212777, 2655.1890000, 290.5387000, 5.6733610, 6.5960220 -}; - -struct test test15 = { - "test15", // name - "test incremental with 2 dimensions", - 1, // update_every - 8, // multiplier - 1024, // divisor - RRD_ALGORITHM_INCREMENTAL, // algorithm - 10, // feed entries - 9, // result entries - test15_feed, // feed - test15_results, // results - test15_feed2, // feed2 - test15_results2 // results2 -}; - -// -------------------------------------------------------------------------------------------------------------------- - -int run_test(struct test *test) -{ - fprintf(stderr, "\nRunning test '%s':\n%s\n", test->name, test->description); - - default_rrd_memory_mode = RRD_MEMORY_MODE_ALLOC; - default_rrd_update_every = test->update_every; - - char name[101]; - snprintfz(name, 100, "unittest-%s", test->name); - - // create the chart - RRDSET *st = rrdset_create_localhost("netdata", name, name, "netdata", NULL, "Unit Testing", "a value", "unittest", NULL, 1 - , test->update_every, RRDSET_TYPE_LINE); - RRDDIM *rd = rrddim_add(st, "dim1", NULL, test->multiplier, test->divisor, test->algorithm); - - RRDDIM *rd2 = NULL; - if(test->feed2) - rd2 = rrddim_add(st, "dim2", NULL, test->multiplier, test->divisor, test->algorithm); - - rrdset_flag_set(st, RRDSET_FLAG_DEBUG); - - // feed it with the test data - time_t time_now = 0, time_start = now_realtime_sec(); - unsigned long c; - collected_number last = 0; - for(c = 0; c < test->feed_entries; c++) { - if(debug_flags) fprintf(stderr, "\n\n"); - - if(c) { - time_now += test->feed[c].microseconds; - fprintf(stderr, " > %s: feeding position %lu, after %0.3f seconds (%0.3f seconds from start), delta " CALCULATED_NUMBER_FORMAT ", rate " CALCULATED_NUMBER_FORMAT "\n", - test->name, c+1, - (float)test->feed[c].microseconds / 1000000.0, - (float)time_now / 1000000.0, - ((calculated_number)test->feed[c].value - (calculated_number)last) * (calculated_number)test->multiplier / (calculated_number)test->divisor, - (((calculated_number)test->feed[c].value - (calculated_number)last) * (calculated_number)test->multiplier / (calculated_number)test->divisor) / (calculated_number)test->feed[c].microseconds * (calculated_number)1000000); - - // rrdset_next_usec_unfiltered(st, test->feed[c].microseconds); - st->usec_since_last_update = test->feed[c].microseconds; - } - else { - fprintf(stderr, " > %s: feeding position %lu\n", test->name, c+1); - } - - fprintf(stderr, " >> %s with value " COLLECTED_NUMBER_FORMAT "\n", rd->name, test->feed[c].value); - rrddim_set(st, "dim1", test->feed[c].value); - last = test->feed[c].value; - - if(rd2) { - fprintf(stderr, " >> %s with value " COLLECTED_NUMBER_FORMAT "\n", rd2->name, test->feed2[c]); - rrddim_set(st, "dim2", test->feed2[c]); - } - - rrdset_done(st); - - // align the first entry to second boundary - if(!c) { - fprintf(stderr, " > %s: fixing first collection time to be %llu microseconds to second boundary\n", test->name, test->feed[c].microseconds); - rd->last_collected_time.tv_usec = st->last_collected_time.tv_usec = st->last_updated.tv_usec = test->feed[c].microseconds; - // time_start = st->last_collected_time.tv_sec; - } - } - - // check the result - int errors = 0; - - if(st->counter != test->result_entries) { - fprintf(stderr, " %s stored %zu entries, but we were expecting %lu, ### E R R O R ###\n", test->name, st->counter, test->result_entries); - errors++; - } - - unsigned long max = (st->counter < test->result_entries)?st->counter:test->result_entries; - for(c = 0 ; c < max ; c++) { - calculated_number v = unpack_storage_number(rd->values[c]); - calculated_number n = test->results[c]; - int same = (calculated_number_round(v * 10000000.0) == calculated_number_round(n * 10000000.0))?1:0; - fprintf(stderr, " %s/%s: checking position %lu (at %lu secs), expecting value " CALCULATED_NUMBER_FORMAT ", found " CALCULATED_NUMBER_FORMAT ", %s\n", - test->name, rd->name, c+1, - (rrdset_first_entry_t(st) + c * st->update_every) - time_start, - n, v, (same)?"OK":"### E R R O R ###"); - - if(!same) errors++; - - if(rd2) { - v = unpack_storage_number(rd2->values[c]); - n = test->results2[c]; - same = (calculated_number_round(v * 10000000.0) == calculated_number_round(n * 10000000.0))?1:0; - fprintf(stderr, " %s/%s: checking position %lu (at %lu secs), expecting value " CALCULATED_NUMBER_FORMAT ", found " CALCULATED_NUMBER_FORMAT ", %s\n", - test->name, rd2->name, c+1, - (rrdset_first_entry_t(st) + c * st->update_every) - time_start, - n, v, (same)?"OK":"### E R R O R ###"); - if(!same) errors++; - } - } - - return errors; -} - -static int test_variable_renames(void) { - fprintf(stderr, "Creating chart\n"); - RRDSET *st = rrdset_create_localhost("chart", "ID", NULL, "family", "context", "Unit Testing", "a value", "unittest", NULL, 1, 1, RRDSET_TYPE_LINE); - fprintf(stderr, "Created chart with id '%s', name '%s'\n", st->id, st->name); - - fprintf(stderr, "Creating dimension DIM1\n"); - RRDDIM *rd1 = rrddim_add(st, "DIM1", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - fprintf(stderr, "Created dimension with id '%s', name '%s'\n", rd1->id, rd1->name); - - fprintf(stderr, "Creating dimension DIM2\n"); - RRDDIM *rd2 = rrddim_add(st, "DIM2", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - fprintf(stderr, "Created dimension with id '%s', name '%s'\n", rd2->id, rd2->name); - - fprintf(stderr, "Renaming chart to CHARTNAME1\n"); - rrdset_set_name(st, "CHARTNAME1"); - fprintf(stderr, "Renamed chart with id '%s' to name '%s'\n", st->id, st->name); - - fprintf(stderr, "Renaming chart to CHARTNAME2\n"); - rrdset_set_name(st, "CHARTNAME2"); - fprintf(stderr, "Renamed chart with id '%s' to name '%s'\n", st->id, st->name); - - fprintf(stderr, "Renaming dimension DIM1 to DIM1NAME1\n"); - rrddim_set_name(st, rd1, "DIM1NAME1"); - fprintf(stderr, "Renamed dimension with id '%s' to name '%s'\n", rd1->id, rd1->name); - - fprintf(stderr, "Renaming dimension DIM1 to DIM1NAME2\n"); - rrddim_set_name(st, rd1, "DIM1NAME2"); - fprintf(stderr, "Renamed dimension with id '%s' to name '%s'\n", rd1->id, rd1->name); - - fprintf(stderr, "Renaming dimension DIM2 to DIM2NAME1\n"); - rrddim_set_name(st, rd2, "DIM2NAME1"); - fprintf(stderr, "Renamed dimension with id '%s' to name '%s'\n", rd2->id, rd2->name); - - fprintf(stderr, "Renaming dimension DIM2 to DIM2NAME2\n"); - rrddim_set_name(st, rd2, "DIM2NAME2"); - fprintf(stderr, "Renamed dimension with id '%s' to name '%s'\n", rd2->id, rd2->name); - - BUFFER *buf = buffer_create(1); - health_api_v1_chart_variables2json(st, buf); - fprintf(stderr, "%s", buffer_tostring(buf)); - buffer_free(buf); - return 1; -} - -int run_all_mockup_tests(void) -{ - if(check_number_printing()) - return 1; - - if(check_rrdcalc_comparisons()) - return 1; - - if(!test_variable_renames()) - return 1; - - if(run_test(&test1)) - return 1; - - if(run_test(&test2)) - return 1; - - if(run_test(&test3)) - return 1; - - if(run_test(&test4)) - return 1; - - if(run_test(&test5)) - return 1; - - if(run_test(&test6)) - return 1; - - if(run_test(&test7)) - return 1; - - if(run_test(&test8)) - return 1; - - if(run_test(&test9)) - return 1; - - if(run_test(&test10)) - return 1; - - if(run_test(&test11)) - return 1; - - if(run_test(&test12)) - return 1; - - if(run_test(&test13)) - return 1; - - if(run_test(&test14)) - return 1; - - if(run_test(&test14b)) - return 1; - - if(run_test(&test14c)) - return 1; - - if(run_test(&test15)) - return 1; - - - - return 0; -} - -int unit_test(long delay, long shift) -{ - static int repeat = 0; - repeat++; - - char name[101]; - snprintfz(name, 100, "unittest-%d-%ld-%ld", repeat, delay, shift); - - //debug_flags = 0xffffffff; - default_rrd_memory_mode = RRD_MEMORY_MODE_ALLOC; - default_rrd_update_every = 1; - - int do_abs = 1; - int do_inc = 1; - int do_abst = 0; - int do_absi = 0; - - RRDSET *st = rrdset_create_localhost("netdata", name, name, "netdata", NULL, "Unit Testing", "a value", "unittest", NULL, 1, 1 - , RRDSET_TYPE_LINE); - rrdset_flag_set(st, RRDSET_FLAG_DEBUG); - - RRDDIM *rdabs = NULL; - RRDDIM *rdinc = NULL; - RRDDIM *rdabst = NULL; - RRDDIM *rdabsi = NULL; - - if(do_abs) rdabs = rrddim_add(st, "absolute", "absolute", 1, 1, RRD_ALGORITHM_ABSOLUTE); - if(do_inc) rdinc = rrddim_add(st, "incremental", "incremental", 1, 1, RRD_ALGORITHM_INCREMENTAL); - if(do_abst) rdabst = rrddim_add(st, "percentage-of-absolute-row", "percentage-of-absolute-row", 1, 1, RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL); - if(do_absi) rdabsi = rrddim_add(st, "percentage-of-incremental-row", "percentage-of-incremental-row", 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - - long increment = 1000; - collected_number i = 0; - - unsigned long c, dimensions = 0; - RRDDIM *rd; - for(rd = st->dimensions ; rd ; rd = rd->next) dimensions++; - - for(c = 0; c < 20 ;c++) { - i += increment; - - fprintf(stderr, "\n\nLOOP = %lu, DELAY = %ld, VALUE = " COLLECTED_NUMBER_FORMAT "\n", c, delay, i); - if(c) { - // rrdset_next_usec_unfiltered(st, delay); - st->usec_since_last_update = delay; - } - if(do_abs) rrddim_set(st, "absolute", i); - if(do_inc) rrddim_set(st, "incremental", i); - if(do_abst) rrddim_set(st, "percentage-of-absolute-row", i); - if(do_absi) rrddim_set(st, "percentage-of-incremental-row", i); - - if(!c) { - now_realtime_timeval(&st->last_collected_time); - st->last_collected_time.tv_usec = shift; - } - - // prevent it from deleting the dimensions - for(rd = st->dimensions ; rd ; rd = rd->next) - rd->last_collected_time.tv_sec = st->last_collected_time.tv_sec; - - rrdset_done(st); - } - - unsigned long oincrement = increment; - increment = increment * st->update_every * 1000000 / delay; - fprintf(stderr, "\n\nORIGINAL INCREMENT: %lu, INCREMENT %ld, DELAY %ld, SHIFT %ld\n", oincrement * 10, increment * 10, delay, shift); - - int ret = 0; - storage_number sn; - calculated_number cn, v; - for(c = 0 ; c < st->counter ; c++) { - fprintf(stderr, "\nPOSITION: c = %lu, EXPECTED VALUE %lu\n", c, (oincrement + c * increment + increment * (1000000 - shift) / 1000000 )* 10); - - for(rd = st->dimensions ; rd ; rd = rd->next) { - sn = rd->values[c]; - cn = unpack_storage_number(sn); - fprintf(stderr, "\t %s " CALCULATED_NUMBER_FORMAT " (PACKED AS " STORAGE_NUMBER_FORMAT ") -> ", rd->id, cn, sn); - - if(rd == rdabs) v = - ( oincrement - // + (increment * (1000000 - shift) / 1000000) - + (c + 1) * increment - ); - - else if(rd == rdinc) v = (c?(increment):(increment * (1000000 - shift) / 1000000)); - else if(rd == rdabst) v = oincrement / dimensions / 10; - else if(rd == rdabsi) v = oincrement / dimensions / 10; - else v = 0; - - if(v == cn) fprintf(stderr, "passed.\n"); - else { - fprintf(stderr, "ERROR! (expected " CALCULATED_NUMBER_FORMAT ")\n", v); - ret = 1; - } - } - } - - if(ret) - fprintf(stderr, "\n\nUNIT TEST(%ld, %ld) FAILED\n\n", delay, shift); - - return ret; -} diff --git a/src/unit_test.h b/src/unit_test.h deleted file mode 100644 index 68ed61fcb..000000000 --- a/src/unit_test.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef NETDATA_UNIT_TEST_H -#define NETDATA_UNIT_TEST_H 1 - -extern int unit_test_storage(void); -extern int unit_test(long delay, long shift); -extern int run_all_mockup_tests(void); -extern int unit_test_str2ld(void); -extern int unit_test_buffer(void); - -#endif /* NETDATA_UNIT_TEST_H */ diff --git a/src/url.c b/src/url.c deleted file mode 100644 index 6be4d9648..000000000 --- a/src/url.c +++ /dev/null @@ -1,77 +0,0 @@ -#include "common.h" - -// ---------------------------------------------------------------------------- -// URL encode / decode -// code from: http://www.geekhideout.com/urlcode.shtml - -/* Converts a hex character to its integer value */ -char from_hex(char ch) { - return (char)(isdigit(ch) ? ch - '0' : tolower(ch) - 'a' + 10); -} - -/* Converts an integer value to its hex character*/ -char to_hex(char code) { - static char hex[] = "0123456789abcdef"; - return hex[code & 15]; -} - -/* Returns a url-encoded version of str */ -/* IMPORTANT: be sure to free() the returned string after use */ -char *url_encode(char *str) { - char *buf, *pbuf; - - pbuf = buf = mallocz(strlen(str) * 3 + 1); - - while (*str) { - if (isalnum(*str) || *str == '-' || *str == '_' || *str == '.' || *str == '~') - *pbuf++ = *str; - - else if (*str == ' ') - *pbuf++ = '+'; - - else - *pbuf++ = '%', *pbuf++ = to_hex(*str >> 4), *pbuf++ = to_hex(*str & 15); - - str++; - } - *pbuf = '\0'; - - pbuf = strdupz(buf); - freez(buf); - return pbuf; -} - -/* Returns a url-decoded version of str */ -/* IMPORTANT: be sure to free() the returned string after use */ -char *url_decode(char *str) { - size_t size = strlen(str) + 1; - - char *buf = mallocz(size); - return url_decode_r(buf, str, size); -} - -char *url_decode_r(char *to, char *url, size_t size) { - char *s = url, // source - *d = to, // destination - *e = &to[size - 1]; // destination end - - while(*s && d < e) { - if(unlikely(*s == '%')) { - if(likely(s[1] && s[2])) { - *d++ = from_hex(s[1]) << 4 | from_hex(s[2]); - s += 2; - } - } - else if(unlikely(*s == '+')) - *d++ = ' '; - - else - *d++ = *s; - - s++; - } - - *d = '\0'; - - return to; -} diff --git a/src/url.h b/src/url.h deleted file mode 100644 index fa44d49a8..000000000 --- a/src/url.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef NETDATA_URL_H -#define NETDATA_URL_H 1 - -// ---------------------------------------------------------------------------- -// URL encode / decode -// code from: http://www.geekhideout.com/urlcode.shtml - -/* Converts a hex character to its integer value */ -extern char from_hex(char ch); - -/* Converts an integer value to its hex character*/ -extern char to_hex(char code); - -/* Returns a url-encoded version of str */ -/* IMPORTANT: be sure to free() the returned string after use */ -extern char *url_encode(char *str); - -/* Returns a url-decoded version of str */ -/* IMPORTANT: be sure to free() the returned string after use */ -extern char *url_decode(char *str); - -extern char *url_decode_r(char *to, char *url, size_t size); - -#endif /* NETDATA_URL_H */ diff --git a/src/web_api_old.c b/src/web_api_old.c deleted file mode 100644 index 373e7e9f8..000000000 --- a/src/web_api_old.c +++ /dev/null @@ -1,237 +0,0 @@ -#include "common.h" - -int web_client_api_old_data_request(RRDHOST *host, struct web_client *w, char *url, int datasource_type) { - if(!url || !*url) { - buffer_flush(w->response.data); - buffer_sprintf(w->response.data, "Incomplete request."); - return 400; - } - - RRDSET *st = NULL; - - char *args = strchr(url, '?'); - if(args) { - *args='\0'; - args = &args[1]; - } - - // get the name of the data to show - char *tok = mystrsep(&url, "/"); - if(!tok) tok = ""; - - // do we have such a data set? - if(*tok) { - debug(D_WEB_CLIENT, "%llu: Searching for RRD data with name '%s'.", w->id, tok); - st = rrdset_find_byname(host, tok); - if(!st) st = rrdset_find(host, tok); - } - - if(!st) { - // we don't have it - // try to send a file with that name - buffer_flush(w->response.data); - return(mysendfile(w, tok)); - } - - // we have it - debug(D_WEB_CLIENT, "%llu: Found RRD data with name '%s'.", w->id, tok); - - // how many entries does the client want? - int lines = (int)st->entries; - int group_count = 1; - time_t after = 0, before = 0; - int group_method = GROUP_AVERAGE; - int nonzero = 0; - - if(url) { - // parse the lines required - tok = mystrsep(&url, "/"); - if(tok) lines = str2i(tok); - if(lines < 1) lines = 1; - } - if(url) { - // parse the group count required - tok = mystrsep(&url, "/"); - if(tok && *tok) group_count = str2i(tok); - if(group_count < 1) group_count = 1; - //if(group_count > save_history / 20) group_count = save_history / 20; - } - if(url) { - // parse the grouping method required - tok = mystrsep(&url, "/"); - if(tok && *tok) { - if(strcmp(tok, "max") == 0) group_method = GROUP_MAX; - else if(strcmp(tok, "average") == 0) group_method = GROUP_AVERAGE; - else if(strcmp(tok, "sum") == 0) group_method = GROUP_SUM; - else debug(D_WEB_CLIENT, "%llu: Unknown group method '%s'", w->id, tok); - } - } - if(url) { - // parse after time - tok = mystrsep(&url, "/"); - if(tok && *tok) after = str2ul(tok); - if(after < 0) after = 0; - } - if(url) { - // parse before time - tok = mystrsep(&url, "/"); - if(tok && *tok) before = str2ul(tok); - if(before < 0) before = 0; - } - if(url) { - // parse nonzero - tok = mystrsep(&url, "/"); - if(tok && *tok && strcmp(tok, "nonzero") == 0) nonzero = 1; - } - - w->response.data->contenttype = CT_APPLICATION_JSON; - buffer_flush(w->response.data); - - char *google_version = "0.6"; - char *google_reqId = "0"; - char *google_sig = "0"; - char *google_out = "json"; - char *google_responseHandler = "google.visualization.Query.setResponse"; - char *google_outFileName = NULL; - time_t last_timestamp_in_data = 0; - if(datasource_type == DATASOURCE_DATATABLE_JSON || datasource_type == DATASOURCE_DATATABLE_JSONP) { - - w->response.data->contenttype = CT_APPLICATION_X_JAVASCRIPT; - - while(args) { - tok = mystrsep(&args, "&"); - if(tok && *tok) { - char *name = mystrsep(&tok, "="); - if(name && *name && strcmp(name, "tqx") == 0) { - char *key = mystrsep(&tok, ":"); - char *value = mystrsep(&tok, ";"); - if(key && value && *key && *value) { - if(strcmp(key, "version") == 0) - google_version = value; - - else if(strcmp(key, "reqId") == 0) - google_reqId = value; - - else if(strcmp(key, "sig") == 0) - google_sig = value; - - else if(strcmp(key, "out") == 0) - google_out = value; - - else if(strcmp(key, "responseHandler") == 0) - google_responseHandler = value; - - else if(strcmp(key, "outFileName") == 0) - google_outFileName = value; - } - } - } - } - - debug(D_WEB_CLIENT_ACCESS, "%llu: GOOGLE JSONP: version = '%s', reqId = '%s', sig = '%s', out = '%s', responseHandler = '%s', outFileName = '%s'", - w->id, google_version, google_reqId, google_sig, google_out, google_responseHandler, google_outFileName - ); - - if(datasource_type == DATASOURCE_DATATABLE_JSONP) { - last_timestamp_in_data = strtoul(google_sig, NULL, 0); - - // check the client wants json - if(strcmp(google_out, "json") != 0) { - buffer_sprintf(w->response.data, - "%s({version:'%s',reqId:'%s',status:'error',errors:[{reason:'invalid_query',message:'output format is not supported',detailed_message:'the format %s requested is not supported by netdata.'}]});", - google_responseHandler, google_version, google_reqId, google_out); - return 200; - } - } - } - - if(datasource_type == DATASOURCE_DATATABLE_JSONP) { - buffer_sprintf(w->response.data, - "%s({version:'%s',reqId:'%s',status:'ok',sig:'%ld',table:", - google_responseHandler, google_version, google_reqId, st->last_updated.tv_sec); - } - - debug(D_WEB_CLIENT_ACCESS, "%llu: Sending RRD data '%s' (id %s, %d lines, %d group, %d group_method, %ld after, %ld before).", - w->id, st->name, st->id, lines, group_count, group_method, after, before); - - time_t timestamp_in_data = rrdset2json_api_old(datasource_type, st, w->response.data, lines, group_count - , group_method, (unsigned long) after, (unsigned long) before - , nonzero); - - if(datasource_type == DATASOURCE_DATATABLE_JSONP) { - if(timestamp_in_data > last_timestamp_in_data) - buffer_strcat(w->response.data, "});"); - - else { - // the client already has the latest data - buffer_flush(w->response.data); - buffer_sprintf(w->response.data, - "%s({version:'%s',reqId:'%s',status:'error',errors:[{reason:'not_modified',message:'Data not modified'}]});", - google_responseHandler, google_version, google_reqId); - } - } - - return 200; -} - -inline int web_client_api_old_data_request_json(RRDHOST *host, struct web_client *w, char *url) { - return web_client_api_old_data_request(host, w, url, DATASOURCE_JSON); -} - -inline int web_client_api_old_data_request_jsonp(RRDHOST *host, struct web_client *w, char *url) { - return web_client_api_old_data_request(host, w, url, DATASOURCE_DATATABLE_JSONP); -} - -inline int web_client_api_old_graph_request(RRDHOST *host, struct web_client *w, char *url) { - // get the name of the data to show - char *tok = mystrsep(&url, "/?&"); - if(tok && *tok) { - debug(D_WEB_CLIENT, "%llu: Searching for RRD data with name '%s'.", w->id, tok); - - // do we have such a data set? - RRDSET *st = rrdset_find_byname(host, tok); - if(!st) st = rrdset_find(host, tok); - if(!st) { - // we don't have it - // try to send a file with that name - buffer_flush(w->response.data); - return mysendfile(w, tok); - } - st->last_accessed_time = now_realtime_sec(); - - debug(D_WEB_CLIENT_ACCESS, "%llu: Sending %s.json of RRD_STATS...", w->id, st->name); - w->response.data->contenttype = CT_APPLICATION_JSON; - buffer_flush(w->response.data); - rrd_graph2json_api_old(st, url, w->response.data); - return 200; - } - - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "Graph name?\r\n"); - return 400; -} - -inline int web_client_api_old_list_request(RRDHOST *host, struct web_client *w, char *url) { - (void)url; - - buffer_flush(w->response.data); - RRDSET *st; - - rrdhost_rdlock(host); - rrdset_foreach_read(st, host) { - if(rrdset_is_available_for_viewers(st)) - buffer_sprintf(w->response.data, "%s\n", st->name); - } - rrdhost_unlock(host); - - return 200; -} - -inline int web_client_api_old_all_json(RRDHOST *host, struct web_client *w, char *url) { - (void)url; - - w->response.data->contenttype = CT_APPLICATION_JSON; - buffer_flush(w->response.data); - rrd_all2json_api_old(host, w->response.data); - return 200; -} diff --git a/src/web_api_old.h b/src/web_api_old.h deleted file mode 100644 index dff48c2f3..000000000 --- a/src/web_api_old.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef NETDATA_WEB_API_OLD_H -#define NETDATA_WEB_API_OLD_H - -#include "common.h" - -extern int web_client_api_old_data_request(RRDHOST *host, struct web_client *w, char *url, int datasource_type); -extern int web_client_api_old_data_request_json(RRDHOST *host, struct web_client *w, char *url); -extern int web_client_api_old_data_request_jsonp(RRDHOST *host, struct web_client *w, char *url); -extern int web_client_api_old_graph_request(RRDHOST *host, struct web_client *w, char *url); -extern int web_client_api_old_list_request(RRDHOST *host, struct web_client *w, char *url); -extern int web_client_api_old_all_json(RRDHOST *host, struct web_client *w, char *url); - -#endif //NETDATA_WEB_API_OLD_H diff --git a/src/web_api_v1.c b/src/web_api_v1.c deleted file mode 100644 index c32660c81..000000000 --- a/src/web_api_v1.c +++ /dev/null @@ -1,1017 +0,0 @@ -#include "common.h" - -static struct { - const char *name; - uint32_t hash; - int value; -} api_v1_data_groups[] = { - { "average" , 0 , GROUP_AVERAGE} - , {"min" , 0 , GROUP_MIN} - , {"max" , 0 , GROUP_MAX} - , {"sum" , 0 , GROUP_SUM} - , {"incremental_sum", 0 , GROUP_INCREMENTAL_SUM} - , {"incremental-sum", 0 , GROUP_INCREMENTAL_SUM} - , { NULL, 0, 0} -}; - -static struct { - const char *name; - uint32_t hash; - uint32_t value; -} api_v1_data_options[] = { - { "nonzero" , 0 , RRDR_OPTION_NONZERO} - , {"flip" , 0 , RRDR_OPTION_REVERSED} - , {"reversed" , 0 , RRDR_OPTION_REVERSED} - , {"reverse" , 0 , RRDR_OPTION_REVERSED} - , {"jsonwrap" , 0 , RRDR_OPTION_JSON_WRAP} - , {"min2max" , 0 , RRDR_OPTION_MIN2MAX} - , {"ms" , 0 , RRDR_OPTION_MILLISECONDS} - , {"milliseconds" , 0 , RRDR_OPTION_MILLISECONDS} - , {"abs" , 0 , RRDR_OPTION_ABSOLUTE} - , {"absolute" , 0 , RRDR_OPTION_ABSOLUTE} - , {"absolute_sum" , 0 , RRDR_OPTION_ABSOLUTE} - , {"absolute-sum" , 0 , RRDR_OPTION_ABSOLUTE} - , {"display_absolute", 0 , RRDR_OPTION_DISPLAY_ABS} - , {"display-absolute", 0 , RRDR_OPTION_DISPLAY_ABS} - , {"seconds" , 0 , RRDR_OPTION_SECONDS} - , {"null2zero" , 0 , RRDR_OPTION_NULL2ZERO} - , {"objectrows" , 0 , RRDR_OPTION_OBJECTSROWS} - , {"google_json" , 0 , RRDR_OPTION_GOOGLE_JSON} - , {"google-json" , 0 , RRDR_OPTION_GOOGLE_JSON} - , {"percentage" , 0 , RRDR_OPTION_PERCENTAGE} - , {"unaligned" , 0 , RRDR_OPTION_NOT_ALIGNED} - , {"match_ids" , 0 , RRDR_OPTION_MATCH_IDS} - , {"match-ids" , 0 , RRDR_OPTION_MATCH_IDS} - , {"match_names" , 0 , RRDR_OPTION_MATCH_NAMES} - , {"match-names" , 0 , RRDR_OPTION_MATCH_NAMES} - , { NULL, 0, 0} -}; - -static struct { - const char *name; - uint32_t hash; - uint32_t value; -} api_v1_data_formats[] = { - { DATASOURCE_FORMAT_DATATABLE_JSON , 0 , DATASOURCE_DATATABLE_JSON} - , {DATASOURCE_FORMAT_DATATABLE_JSONP, 0 , DATASOURCE_DATATABLE_JSONP} - , {DATASOURCE_FORMAT_JSON , 0 , DATASOURCE_JSON} - , {DATASOURCE_FORMAT_JSONP , 0 , DATASOURCE_JSONP} - , {DATASOURCE_FORMAT_SSV , 0 , DATASOURCE_SSV} - , {DATASOURCE_FORMAT_CSV , 0 , DATASOURCE_CSV} - , {DATASOURCE_FORMAT_TSV , 0 , DATASOURCE_TSV} - , {"tsv-excel" , 0 , DATASOURCE_TSV} - , {DATASOURCE_FORMAT_HTML , 0 , DATASOURCE_HTML} - , {DATASOURCE_FORMAT_JS_ARRAY , 0 , DATASOURCE_JS_ARRAY} - , {DATASOURCE_FORMAT_SSV_COMMA , 0 , DATASOURCE_SSV_COMMA} - , {DATASOURCE_FORMAT_CSV_JSON_ARRAY , 0 , DATASOURCE_CSV_JSON_ARRAY} - , { NULL, 0, 0} -}; - -static struct { - const char *name; - uint32_t hash; - uint32_t value; -} api_v1_data_google_formats[] = { - // this is not error - when google requests json, it expects javascript - // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source#responseformat - { "json" , 0 , DATASOURCE_DATATABLE_JSONP} - , {"html" , 0 , DATASOURCE_HTML} - , {"csv" , 0 , DATASOURCE_CSV} - , {"tsv-excel", 0 , DATASOURCE_TSV} - , { NULL, 0, 0} -}; - -void web_client_api_v1_init(void) { - int i; - - for(i = 0; api_v1_data_groups[i].name ; i++) - api_v1_data_groups[i].hash = simple_hash(api_v1_data_groups[i].name); - - for(i = 0; api_v1_data_options[i].name ; i++) - api_v1_data_options[i].hash = simple_hash(api_v1_data_options[i].name); - - for(i = 0; api_v1_data_formats[i].name ; i++) - api_v1_data_formats[i].hash = simple_hash(api_v1_data_formats[i].name); - - for(i = 0; api_v1_data_google_formats[i].name ; i++) - api_v1_data_google_formats[i].hash = simple_hash(api_v1_data_google_formats[i].name); -} - -inline int web_client_api_request_v1_data_group(char *name, int def) { - int i; - - uint32_t hash = simple_hash(name); - for(i = 0; api_v1_data_groups[i].name ; i++) - if(unlikely(hash == api_v1_data_groups[i].hash && !strcmp(name, api_v1_data_groups[i].name))) - return api_v1_data_groups[i].value; - - return def; -} - -inline uint32_t web_client_api_request_v1_data_options(char *o) { - uint32_t ret = 0x00000000; - char *tok; - - while(o && *o && (tok = mystrsep(&o, ", |"))) { - if(!*tok) continue; - - uint32_t hash = simple_hash(tok); - int i; - for(i = 0; api_v1_data_options[i].name ; i++) { - if (unlikely(hash == api_v1_data_options[i].hash && !strcmp(tok, api_v1_data_options[i].name))) { - ret |= api_v1_data_options[i].value; - break; - } - } - } - - return ret; -} - -inline uint32_t web_client_api_request_v1_data_format(char *name) { - uint32_t hash = simple_hash(name); - int i; - - for(i = 0; api_v1_data_formats[i].name ; i++) { - if (unlikely(hash == api_v1_data_formats[i].hash && !strcmp(name, api_v1_data_formats[i].name))) { - return api_v1_data_formats[i].value; - } - } - - return DATASOURCE_JSON; -} - -inline uint32_t web_client_api_request_v1_data_google_format(char *name) { - uint32_t hash = simple_hash(name); - int i; - - for(i = 0; api_v1_data_google_formats[i].name ; i++) { - if (unlikely(hash == api_v1_data_google_formats[i].hash && !strcmp(name, api_v1_data_google_formats[i].name))) { - return api_v1_data_google_formats[i].value; - } - } - - return DATASOURCE_JSON; -} - - -inline int web_client_api_request_v1_alarms(RRDHOST *host, struct web_client *w, char *url) { - int all = 0; - - while(url) { - char *value = mystrsep(&url, "?&"); - if (!value || !*value) continue; - - if(!strcmp(value, "all")) all = 1; - else if(!strcmp(value, "active")) all = 0; - } - - buffer_flush(w->response.data); - w->response.data->contenttype = CT_APPLICATION_JSON; - health_alarms2json(host, w->response.data, all); - return 200; -} - -inline int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url) { - uint32_t after = 0; - - while(url) { - char *value = mystrsep(&url, "?&"); - if (!value || !*value) continue; - - char *name = mystrsep(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - if(!strcmp(name, "after")) after = (uint32_t)strtoul(value, NULL, 0); - } - - buffer_flush(w->response.data); - w->response.data->contenttype = CT_APPLICATION_JSON; - health_alarm_log2json(host, w->response.data, after); - return 200; -} - -inline int web_client_api_request_single_chart(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf)) { - int ret = 400; - char *chart = NULL; - - buffer_flush(w->response.data); - - while(url) { - char *value = mystrsep(&url, "?&"); - if(!value || !*value) continue; - - char *name = mystrsep(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "chart")) chart = value; - //else { - /// buffer_sprintf(w->response.data, "Unknown parameter '%s' in request.", name); - // goto cleanup; - //} - } - - if(!chart || !*chart) { - buffer_sprintf(w->response.data, "No chart id is given at the request."); - goto cleanup; - } - - RRDSET *st = rrdset_find(host, chart); - if(!st) st = rrdset_find_byname(host, chart); - if(!st) { - buffer_strcat(w->response.data, "Chart is not found: "); - buffer_strcat_htmlescape(w->response.data, chart); - ret = 404; - goto cleanup; - } - - w->response.data->contenttype = CT_APPLICATION_JSON; - st->last_accessed_time = now_realtime_sec(); - callback(st, w->response.data); - return 200; - - cleanup: - return ret; -} - -inline int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url) { - return web_client_api_request_single_chart(host, w, url, health_api_v1_chart_variables2json); -} - -inline int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w, char *url) { - (void)url; - - buffer_flush(w->response.data); - w->response.data->contenttype = CT_APPLICATION_JSON; - rrd_stats_api_v1_charts(host, w->response.data); - return 200; -} - -inline int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url) { - int format = ALLMETRICS_SHELL; - int help = 0, types = 0, timestamps = 1, names = backend_send_names; // prometheus options - const char *prometheus_server = w->client_ip; - uint32_t prometheus_options = backend_options; - const char *prometheus_prefix = backend_prefix; - - while(url) { - char *value = mystrsep(&url, "?&"); - if (!value || !*value) continue; - - char *name = mystrsep(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - if(!strcmp(name, "format")) { - if(!strcmp(value, ALLMETRICS_FORMAT_SHELL)) - format = ALLMETRICS_SHELL; - else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS)) - format = ALLMETRICS_PROMETHEUS; - else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS)) - format = ALLMETRICS_PROMETHEUS_ALL_HOSTS; - else if(!strcmp(value, ALLMETRICS_FORMAT_JSON)) - format = ALLMETRICS_JSON; - else - format = 0; - } - else if(!strcmp(name, "help")) { - if(!strcmp(value, "yes")) - help = 1; - else - help = 0; - } - else if(!strcmp(name, "types")) { - if(!strcmp(value, "yes")) - types = 1; - else - types = 0; - } - else if(!strcmp(name, "names")) { - if(!strcmp(value, "yes")) - names = 1; - else - names = 0; - } - else if(!strcmp(name, "timestamps")) { - if(!strcmp(value, "yes")) - timestamps = 1; - else - timestamps = 0; - } - else if(!strcmp(name, "server")) { - prometheus_server = value; - } - else if(!strcmp(name, "prefix")) { - prometheus_prefix = value; - } - else if(!strcmp(name, "data") || !strcmp(name, "source") || !strcmp(name, "data source") || !strcmp(name, "data-source") || !strcmp(name, "data_source") || !strcmp(name, "datasource")) { - prometheus_options = backend_parse_data_source(value, prometheus_options); - } - } - - buffer_flush(w->response.data); - buffer_no_cacheable(w->response.data); - - switch(format) { - case ALLMETRICS_JSON: - w->response.data->contenttype = CT_APPLICATION_JSON; - rrd_stats_api_v1_charts_allmetrics_json(host, w->response.data); - return 200; - - case ALLMETRICS_SHELL: - w->response.data->contenttype = CT_TEXT_PLAIN; - rrd_stats_api_v1_charts_allmetrics_shell(host, w->response.data); - return 200; - - case ALLMETRICS_PROMETHEUS: - w->response.data->contenttype = CT_PROMETHEUS; - rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(host, w->response.data, prometheus_server, prometheus_prefix, prometheus_options, help, types, names, timestamps); - return 200; - - case ALLMETRICS_PROMETHEUS_ALL_HOSTS: - w->response.data->contenttype = CT_PROMETHEUS; - rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(host, w->response.data, prometheus_server, prometheus_prefix, prometheus_options, help, types, names, timestamps); - return 200; - - default: - w->response.data->contenttype = CT_TEXT_PLAIN; - buffer_strcat(w->response.data, "Which format? '" ALLMETRICS_FORMAT_SHELL "', '" ALLMETRICS_FORMAT_PROMETHEUS "', '" ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "' and '" ALLMETRICS_FORMAT_JSON "' are currently supported."); - return 400; - } -} - -inline int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w, char *url) { - return web_client_api_request_single_chart(host, w, url, rrd_stats_api_v1_chart); -} - -int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *url) { - int ret = 400; - buffer_flush(w->response.data); - - BUFFER *dimensions = NULL; - - const char *chart = NULL - , *before_str = NULL - , *after_str = NULL - , *points_str = NULL - , *multiply_str = NULL - , *divide_str = NULL - , *label = NULL - , *units = NULL - , *label_color = NULL - , *value_color = NULL - , *refresh_str = NULL - , *precision_str = NULL - , *scale_str = NULL - , *alarm = NULL; - - int group = GROUP_AVERAGE; - uint32_t options = 0x00000000; - - while(url) { - char *value = mystrsep(&url, "/?&"); - if(!value || !*value) continue; - - char *name = mystrsep(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - debug(D_WEB_CLIENT, "%llu: API v1 badge.svg query param '%s' with value '%s'", w->id, name, value); - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "chart")) chart = value; - else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) { - if(!dimensions) - dimensions = buffer_create(100); - - buffer_strcat(dimensions, "|"); - buffer_strcat(dimensions, value); - } - else if(!strcmp(name, "after")) after_str = value; - else if(!strcmp(name, "before")) before_str = value; - else if(!strcmp(name, "points")) points_str = value; - else if(!strcmp(name, "group")) { - group = web_client_api_request_v1_data_group(value, GROUP_AVERAGE); - } - else if(!strcmp(name, "options")) { - options |= web_client_api_request_v1_data_options(value); - } - else if(!strcmp(name, "label")) label = value; - else if(!strcmp(name, "units")) units = value; - else if(!strcmp(name, "label_color")) label_color = value; - else if(!strcmp(name, "value_color")) value_color = value; - else if(!strcmp(name, "multiply")) multiply_str = value; - else if(!strcmp(name, "divide")) divide_str = value; - else if(!strcmp(name, "refresh")) refresh_str = value; - else if(!strcmp(name, "precision")) precision_str = value; - else if(!strcmp(name, "scale")) scale_str = value; - else if(!strcmp(name, "alarm")) alarm = value; - } - - if(!chart || !*chart) { - buffer_no_cacheable(w->response.data); - buffer_sprintf(w->response.data, "No chart id is given at the request."); - goto cleanup; - } - - int scale = (scale_str && *scale_str)?str2i(scale_str):100; - - RRDSET *st = rrdset_find(host, chart); - if(!st) st = rrdset_find_byname(host, chart); - if(!st) { - buffer_no_cacheable(w->response.data); - buffer_svg(w->response.data, "chart not found", NAN, "", NULL, NULL, -1, scale, 0); - ret = 200; - goto cleanup; - } - st->last_accessed_time = now_realtime_sec(); - - RRDCALC *rc = NULL; - if(alarm) { - rc = rrdcalc_find(st, alarm); - if (!rc) { - buffer_no_cacheable(w->response.data); - buffer_svg(w->response.data, "alarm not found", NAN, "", NULL, NULL, -1, scale, 0); - ret = 200; - goto cleanup; - } - } - - long long multiply = (multiply_str && *multiply_str )?str2l(multiply_str):1; - long long divide = (divide_str && *divide_str )?str2l(divide_str):1; - long long before = (before_str && *before_str )?str2l(before_str):0; - long long after = (after_str && *after_str )?str2l(after_str):-st->update_every; - int points = (points_str && *points_str )?str2i(points_str):1; - int precision = (precision_str && *precision_str)?str2i(precision_str):-1; - - if(!multiply) multiply = 1; - if(!divide) divide = 1; - - int refresh = 0; - if(refresh_str && *refresh_str) { - if(!strcmp(refresh_str, "auto")) { - if(rc) refresh = rc->update_every; - else if(options & RRDR_OPTION_NOT_ALIGNED) - refresh = st->update_every; - else { - refresh = (int)(before - after); - if(refresh < 0) refresh = -refresh; - } - } - else { - refresh = str2i(refresh_str); - if(refresh < 0) refresh = -refresh; - } - } - - if(!label) { - if(alarm) { - char *s = (char *)alarm; - while(*s) { - if(*s == '_') *s = ' '; - s++; - } - label = alarm; - } - else if(dimensions) { - const char *dim = buffer_tostring(dimensions); - if(*dim == '|') dim++; - label = dim; - } - else - label = st->name; - } - if(!units) { - if(alarm) { - if(rc->units) - units = rc->units; - else - units = ""; - } - else if(options & RRDR_OPTION_PERCENTAGE) - units = "%"; - else - units = st->units; - } - - debug(D_WEB_CLIENT, "%llu: API command 'badge.svg' for chart '%s', alarm '%s', dimensions '%s', after '%lld', before '%lld', points '%d', group '%d', options '0x%08x'" - , w->id - , chart - , alarm?alarm:"" - , (dimensions)?buffer_tostring(dimensions):"" - , after - , before - , points - , group - , options - ); - - if(rc) { - if (refresh > 0) { - buffer_sprintf(w->response.header, "Refresh: %d\r\n", refresh); - w->response.data->expires = now_realtime_sec() + refresh; - } - else buffer_no_cacheable(w->response.data); - - if(!value_color) { - switch(rc->status) { - case RRDCALC_STATUS_CRITICAL: - value_color = "red"; - break; - - case RRDCALC_STATUS_WARNING: - value_color = "orange"; - break; - - case RRDCALC_STATUS_CLEAR: - value_color = "brightgreen"; - break; - - case RRDCALC_STATUS_UNDEFINED: - value_color = "lightgrey"; - break; - - case RRDCALC_STATUS_UNINITIALIZED: - value_color = "#000"; - break; - - default: - value_color = "grey"; - break; - } - } - - buffer_svg(w->response.data, - label, - (isnan(rc->value)||isinf(rc->value)) ? rc->value : rc->value * multiply / divide, - units, - label_color, - value_color, - precision, - scale, - options - ); - ret = 200; - } - else { - time_t latest_timestamp = 0; - int value_is_null = 1; - calculated_number n = NAN; - ret = 500; - - // if the collected value is too old, don't calculate its value - if (rrdset_last_entry_t(st) >= (now_realtime_sec() - (st->update_every * st->gap_when_lost_iterations_above))) - ret = rrdset2value_api_v1(st, w->response.data, &n, (dimensions) ? buffer_tostring(dimensions) : NULL - , points, after, before, group, 0, options, NULL, &latest_timestamp, &value_is_null); - - // if the value cannot be calculated, show empty badge - if (ret != 200) { - buffer_no_cacheable(w->response.data); - value_is_null = 1; - n = 0; - ret = 200; - } - else if (refresh > 0) { - buffer_sprintf(w->response.header, "Refresh: %d\r\n", refresh); - w->response.data->expires = now_realtime_sec() + refresh; - } - else buffer_no_cacheable(w->response.data); - - // render the badge - buffer_svg(w->response.data, - label, - (value_is_null)?NAN:(n * multiply / divide), - units, - label_color, - value_color, - precision, - scale, - options - ); - } - - cleanup: - buffer_free(dimensions); - return ret; -} - -// returns the HTTP code -inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url) { - debug(D_WEB_CLIENT, "%llu: API v1 data with URL '%s'", w->id, url); - - int ret = 400; - BUFFER *dimensions = NULL; - - buffer_flush(w->response.data); - - char *google_version = "0.6", - *google_reqId = "0", - *google_sig = "0", - *google_out = "json", - *responseHandler = NULL, - *outFileName = NULL; - - time_t last_timestamp_in_data = 0, google_timestamp = 0; - - char *chart = NULL - , *before_str = NULL - , *after_str = NULL - , *group_time_str = NULL - , *points_str = NULL; - - int group = GROUP_AVERAGE; - uint32_t format = DATASOURCE_JSON; - uint32_t options = 0x00000000; - - while(url) { - char *value = mystrsep(&url, "?&"); - if(!value || !*value) continue; - - char *name = mystrsep(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - debug(D_WEB_CLIENT, "%llu: API v1 data query param '%s' with value '%s'", w->id, name, value); - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "chart")) chart = value; - else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) { - if(!dimensions) dimensions = buffer_create(100); - buffer_strcat(dimensions, "|"); - buffer_strcat(dimensions, value); - } - else if(!strcmp(name, "after")) after_str = value; - else if(!strcmp(name, "before")) before_str = value; - else if(!strcmp(name, "points")) points_str = value; - else if(!strcmp(name, "gtime")) group_time_str = value; - else if(!strcmp(name, "group")) { - group = web_client_api_request_v1_data_group(value, GROUP_AVERAGE); - } - else if(!strcmp(name, "format")) { - format = web_client_api_request_v1_data_format(value); - } - else if(!strcmp(name, "options")) { - options |= web_client_api_request_v1_data_options(value); - } - else if(!strcmp(name, "callback")) { - responseHandler = value; - } - else if(!strcmp(name, "filename")) { - outFileName = value; - } - else if(!strcmp(name, "tqx")) { - // parse Google Visualization API options - // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source - char *tqx_name, *tqx_value; - - while(value) { - tqx_value = mystrsep(&value, ";"); - if(!tqx_value || !*tqx_value) continue; - - tqx_name = mystrsep(&tqx_value, ":"); - if(!tqx_name || !*tqx_name) continue; - if(!tqx_value || !*tqx_value) continue; - - if(!strcmp(tqx_name, "version")) - google_version = tqx_value; - else if(!strcmp(tqx_name, "reqId")) - google_reqId = tqx_value; - else if(!strcmp(tqx_name, "sig")) { - google_sig = tqx_value; - google_timestamp = strtoul(google_sig, NULL, 0); - } - else if(!strcmp(tqx_name, "out")) { - google_out = tqx_value; - format = web_client_api_request_v1_data_google_format(google_out); - } - else if(!strcmp(tqx_name, "responseHandler")) - responseHandler = tqx_value; - else if(!strcmp(tqx_name, "outFileName")) - outFileName = tqx_value; - } - } - } - - if(!chart || !*chart) { - buffer_sprintf(w->response.data, "No chart id is given at the request."); - goto cleanup; - } - - RRDSET *st = rrdset_find(host, chart); - if(!st) st = rrdset_find_byname(host, chart); - if(!st) { - buffer_strcat(w->response.data, "Chart is not found: "); - buffer_strcat_htmlescape(w->response.data, chart); - ret = 404; - goto cleanup; - } - st->last_accessed_time = now_realtime_sec(); - - long long before = (before_str && *before_str)?str2l(before_str):0; - long long after = (after_str && *after_str) ?str2l(after_str):0; - int points = (points_str && *points_str)?str2i(points_str):0; - long group_time = (group_time_str && *group_time_str)?str2l(group_time_str):0; - - debug(D_WEB_CLIENT, "%llu: API command 'data' for chart '%s', dimensions '%s', after '%lld', before '%lld', points '%d', group '%d', format '%u', options '0x%08x'" - , w->id - , chart - , (dimensions)?buffer_tostring(dimensions):"" - , after - , before - , points - , group - , format - , options - ); - - if(outFileName && *outFileName) { - buffer_sprintf(w->response.header, "Content-Disposition: attachment; filename=\"%s\"\r\n", outFileName); - debug(D_WEB_CLIENT, "%llu: generating outfilename header: '%s'", w->id, outFileName); - } - - if(format == DATASOURCE_DATATABLE_JSONP) { - if(responseHandler == NULL) - responseHandler = "google.visualization.Query.setResponse"; - - debug(D_WEB_CLIENT_ACCESS, "%llu: GOOGLE JSON/JSONP: version = '%s', reqId = '%s', sig = '%s', out = '%s', responseHandler = '%s', outFileName = '%s'", - w->id, google_version, google_reqId, google_sig, google_out, responseHandler, outFileName - ); - - buffer_sprintf(w->response.data, - "%s({version:'%s',reqId:'%s',status:'ok',sig:'%ld',table:", - responseHandler, google_version, google_reqId, st->last_updated.tv_sec); - } - else if(format == DATASOURCE_JSONP) { - if(responseHandler == NULL) - responseHandler = "callback"; - - buffer_strcat(w->response.data, responseHandler); - buffer_strcat(w->response.data, "("); - } - - ret = rrdset2anything_api_v1(st, w->response.data, dimensions, format, points, after, before, group, group_time - , options, &last_timestamp_in_data); - - if(format == DATASOURCE_DATATABLE_JSONP) { - if(google_timestamp < last_timestamp_in_data) - buffer_strcat(w->response.data, "});"); - - else { - // the client already has the latest data - buffer_flush(w->response.data); - buffer_sprintf(w->response.data, - "%s({version:'%s',reqId:'%s',status:'error',errors:[{reason:'not_modified',message:'Data not modified'}]});", - responseHandler, google_version, google_reqId); - } - } - else if(format == DATASOURCE_JSONP) - buffer_strcat(w->response.data, ");"); - - cleanup: - buffer_free(dimensions); - return ret; -} - -inline int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *w, char *url) { - static uint32_t hash_action = 0, hash_access = 0, hash_hello = 0, hash_delete = 0, hash_search = 0, - hash_switch = 0, hash_machine = 0, hash_url = 0, hash_name = 0, hash_delete_url = 0, hash_for = 0, - hash_to = 0 /*, hash_redirects = 0 */; - - if(unlikely(!hash_action)) { - hash_action = simple_hash("action"); - hash_access = simple_hash("access"); - hash_hello = simple_hash("hello"); - hash_delete = simple_hash("delete"); - hash_search = simple_hash("search"); - hash_switch = simple_hash("switch"); - hash_machine = simple_hash("machine"); - hash_url = simple_hash("url"); - hash_name = simple_hash("name"); - hash_delete_url = simple_hash("delete_url"); - hash_for = simple_hash("for"); - hash_to = simple_hash("to"); -/* - hash_redirects = simple_hash("redirects"); -*/ - } - - char person_guid[GUID_LEN + 1] = ""; - - debug(D_WEB_CLIENT, "%llu: API v1 registry with URL '%s'", w->id, url); - - // FIXME - // The browser may send multiple cookies with our id - - char *cookie = strstr(w->response.data->buffer, NETDATA_REGISTRY_COOKIE_NAME "="); - if(cookie) - strncpyz(person_guid, &cookie[sizeof(NETDATA_REGISTRY_COOKIE_NAME)], 36); - - char action = '\0'; - char *machine_guid = NULL, - *machine_url = NULL, - *url_name = NULL, - *search_machine_guid = NULL, - *delete_url = NULL, - *to_person_guid = NULL; -/* - int redirects = 0; -*/ - - while(url) { - char *value = mystrsep(&url, "?&"); - if (!value || !*value) continue; - - char *name = mystrsep(&value, "="); - if (!name || !*name) continue; - if (!value || !*value) continue; - - debug(D_WEB_CLIENT, "%llu: API v1 registry query param '%s' with value '%s'", w->id, name, value); - - uint32_t hash = simple_hash(name); - - if(hash == hash_action && !strcmp(name, "action")) { - uint32_t vhash = simple_hash(value); - - if(vhash == hash_access && !strcmp(value, "access")) action = 'A'; - else if(vhash == hash_hello && !strcmp(value, "hello")) action = 'H'; - else if(vhash == hash_delete && !strcmp(value, "delete")) action = 'D'; - else if(vhash == hash_search && !strcmp(value, "search")) action = 'S'; - else if(vhash == hash_switch && !strcmp(value, "switch")) action = 'W'; -#ifdef NETDATA_INTERNAL_CHECKS - else error("unknown registry action '%s'", value); -#endif /* NETDATA_INTERNAL_CHECKS */ - } -/* - else if(hash == hash_redirects && !strcmp(name, "redirects")) - redirects = atoi(value); -*/ - else if(hash == hash_machine && !strcmp(name, "machine")) - machine_guid = value; - - else if(hash == hash_url && !strcmp(name, "url")) - machine_url = value; - - else if(action == 'A') { - if(hash == hash_name && !strcmp(name, "name")) - url_name = value; - } - else if(action == 'D') { - if(hash == hash_delete_url && !strcmp(name, "delete_url")) - delete_url = value; - } - else if(action == 'S') { - if(hash == hash_for && !strcmp(name, "for")) - search_machine_guid = value; - } - else if(action == 'W') { - if(hash == hash_to && !strcmp(name, "to")) - to_person_guid = value; - } -#ifdef NETDATA_INTERNAL_CHECKS - else error("unused registry URL parameter '%s' with value '%s'", name, value); -#endif /* NETDATA_INTERNAL_CHECKS */ - } - - if(unlikely(respect_web_browser_do_not_track_policy && web_client_has_donottrack(w))) { - buffer_flush(w->response.data); - buffer_sprintf(w->response.data, "Your web browser is sending 'DNT: 1' (Do Not Track). The registry requires persistent cookies on your browser to work."); - return 400; - } - - if(unlikely(action == 'H')) { - // HELLO request, dashboard ACL - if(unlikely(!web_client_can_access_dashboard(w))) - return web_client_permission_denied(w); - } - else { - // everything else, registry ACL - if(unlikely(!web_client_can_access_registry(w))) - return web_client_permission_denied(w); - } - - switch(action) { - case 'A': - if(unlikely(!machine_guid || !machine_url || !url_name)) { - error("Invalid registry request - access requires these parameters: machine ('%s'), url ('%s'), name ('%s')", machine_guid ? machine_guid : "UNSET", machine_url ? machine_url : "UNSET", url_name ? url_name : "UNSET"); - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "Invalid registry Access request."); - return 400; - } - - web_client_enable_tracking_required(w); - return registry_request_access_json(host, w, person_guid, machine_guid, machine_url, url_name, now_realtime_sec()); - - case 'D': - if(unlikely(!machine_guid || !machine_url || !delete_url)) { - error("Invalid registry request - delete requires these parameters: machine ('%s'), url ('%s'), delete_url ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", delete_url?delete_url:"UNSET"); - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "Invalid registry Delete request."); - return 400; - } - - web_client_enable_tracking_required(w); - return registry_request_delete_json(host, w, person_guid, machine_guid, machine_url, delete_url, now_realtime_sec()); - - case 'S': - if(unlikely(!machine_guid || !machine_url || !search_machine_guid)) { - error("Invalid registry request - search requires these parameters: machine ('%s'), url ('%s'), for ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", search_machine_guid?search_machine_guid:"UNSET"); - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "Invalid registry Search request."); - return 400; - } - - web_client_enable_tracking_required(w); - return registry_request_search_json(host, w, person_guid, machine_guid, machine_url, search_machine_guid, now_realtime_sec()); - - case 'W': - if(unlikely(!machine_guid || !machine_url || !to_person_guid)) { - error("Invalid registry request - switching identity requires these parameters: machine ('%s'), url ('%s'), to ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", to_person_guid?to_person_guid:"UNSET"); - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "Invalid registry Switch request."); - return 400; - } - - web_client_enable_tracking_required(w); - return registry_request_switch_json(host, w, person_guid, machine_guid, machine_url, to_person_guid, now_realtime_sec()); - - case 'H': - return registry_request_hello_json(host, w); - - default: - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "Invalid registry request - you need to set an action: hello, access, delete, search"); - return 400; - } -} - -static struct api_command { - const char *command; - uint32_t hash; - WEB_CLIENT_ACL acl; - int (*callback)(RRDHOST *host, struct web_client *w, char *url); -} api_commands[] = { - { "data", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_data }, - { "chart", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_chart }, - { "charts", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_charts }, - - // registry checks the ACL by itself, so we allow everything - { "registry", 0, WEB_CLIENT_ACL_NOCHECK, web_client_api_request_v1_registry }, - - // badges can be fetched with both dashboard and badge permissions - { "badge.svg", 0, WEB_CLIENT_ACL_DASHBOARD|WEB_CLIENT_ACL_BADGE, web_client_api_request_v1_badge }, - - { "alarms", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarms }, - { "alarm_log", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarm_log }, - { "alarm_variables", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarm_variables }, - { "allmetrics", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_allmetrics }, - - // terminator - { NULL, 0, WEB_CLIENT_ACL_NONE, NULL }, -}; - -inline int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url) { - static int initialized = 0; - int i; - - if(unlikely(initialized == 0)) { - initialized = 1; - - for(i = 0; api_commands[i].command ; i++) - api_commands[i].hash = simple_hash(api_commands[i].command); - } - - // get the command - char *tok = mystrsep(&url, "/?&"); - if(tok && *tok) { - debug(D_WEB_CLIENT, "%llu: Searching for API v1 command '%s'.", w->id, tok); - uint32_t hash = simple_hash(tok); - - for(i = 0; api_commands[i].command ;i++) { - if(unlikely(hash == api_commands[i].hash && !strcmp(tok, api_commands[i].command))) { - if(unlikely(api_commands[i].acl != WEB_CLIENT_ACL_NOCHECK) && !(w->acl & api_commands[i].acl)) - return web_client_permission_denied(w); - - return api_commands[i].callback(host, w, url); - } - } - - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "Unsupported v1 API command: "); - buffer_strcat_htmlescape(w->response.data, tok); - return 404; - } - else { - buffer_flush(w->response.data); - buffer_sprintf(w->response.data, "Which API v1 command?"); - return 400; - } -} diff --git a/src/web_api_v1.h b/src/web_api_v1.h deleted file mode 100644 index 6f4de1aba..000000000 --- a/src/web_api_v1.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef NETDATA_WEB_API_V1_H -#define NETDATA_WEB_API_V1_H - -extern int web_client_api_request_v1_data_group(char *name, int def); -extern uint32_t web_client_api_request_v1_data_options(char *o); -extern uint32_t web_client_api_request_v1_data_format(char *name); -extern uint32_t web_client_api_request_v1_data_google_format(char *name); - -extern int web_client_api_request_v1_alarms(RRDHOST *host, struct web_client *w, char *url); -extern int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url); -extern int web_client_api_request_single_chart(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf)); -extern int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url); -extern int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w, char *url); -extern int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url); -extern int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w, char *url); -extern int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *url); -extern int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url); -extern int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *w, char *url); -extern int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url); - -extern void web_client_api_v1_init(void); - -#endif //NETDATA_WEB_API_V1_H diff --git a/src/web_buffer.c b/src/web_buffer.c deleted file mode 100644 index 50c76f6d6..000000000 --- a/src/web_buffer.c +++ /dev/null @@ -1,400 +0,0 @@ -#include "common.h" - -#define BUFFER_OVERFLOW_EOF "EOF" - -static inline void buffer_overflow_init(BUFFER *b) -{ - b->buffer[b->size] = '\0'; - strcpy(&b->buffer[b->size + 1], BUFFER_OVERFLOW_EOF); -} - -#ifdef NETDATA_INTERNAL_CHECKS -#define buffer_overflow_check(b) _buffer_overflow_check(b, __FILE__, __FUNCTION__, __LINE__) -#else -#define buffer_overflow_check(b) -#endif - -static inline void _buffer_overflow_check(BUFFER *b, const char *file, const char *function, const unsigned long line) -{ - if(b->len > b->size) { - error("BUFFER: length %zu is above size %zu, at line %lu, at function %s() of file '%s'.", b->len, b->size, line, function, file); - b->len = b->size; - } - - if(b->buffer[b->size] != '\0' || strcmp(&b->buffer[b->size + 1], BUFFER_OVERFLOW_EOF) != 0) { - error("BUFFER: detected overflow at line %lu, at function %s() of file '%s'.", line, function, file); - buffer_overflow_init(b); - } -} - - -void buffer_reset(BUFFER *wb) -{ - buffer_flush(wb); - - wb->contenttype = CT_TEXT_PLAIN; - wb->options = 0; - wb->date = 0; - wb->expires = 0; - - buffer_overflow_check(wb); -} - -const char *buffer_tostring(BUFFER *wb) -{ - buffer_need_bytes(wb, 1); - wb->buffer[wb->len] = '\0'; - - buffer_overflow_check(wb); - - return(wb->buffer); -} - -void buffer_char_replace(BUFFER *wb, char from, char to) -{ - char *s = wb->buffer, *end = &wb->buffer[wb->len]; - - while(s != end) { - if(*s == from) *s = to; - s++; - } - - buffer_overflow_check(wb); -} - -// This trick seems to give an 80% speed increase in 32bit systems -// print_calculated_number_llu_r() will just print the digits up to the -// point the remaining value fits in 32 bits, and then calls -// print_calculated_number_lu_r() to print the rest with 32 bit arithmetic. - -inline char *print_number_lu_r(char *str, unsigned long uvalue) { - char *wstr = str; - - // print each digit - do *wstr++ = (char)('0' + (uvalue % 10)); while(uvalue /= 10); - return wstr; -} - -inline char *print_number_llu_r(char *str, unsigned long long uvalue) { - char *wstr = str; - - // print each digit - do *wstr++ = (char)('0' + (uvalue % 10)); while((uvalue /= 10) && uvalue > (unsigned long long)0xffffffff); - if(uvalue) return print_number_lu_r(wstr, uvalue); - return wstr; -} - -inline char *print_number_llu_r_smart(char *str, unsigned long long uvalue) { -#ifdef ENVIRONMENT32 - if(uvalue > (unsigned long long)0xffffffff) - str = print_number_llu_r(str, uvalue); - else - str = print_number_lu_r(str, uvalue); -#else - do *str++ = (char)('0' + (uvalue % 10)); while(uvalue /= 10); -#endif - - return str; -} - -void buffer_print_llu(BUFFER *wb, unsigned long long uvalue) -{ - buffer_need_bytes(wb, 50); - - char *str = &wb->buffer[wb->len]; - char *wstr = str; - -#ifdef ENVIRONMENT32 - if(uvalue > (unsigned long long)0xffffffff) - wstr = print_number_llu_r(wstr, uvalue); - else - wstr = print_number_lu_r(wstr, uvalue); -#else - do *wstr++ = (char)('0' + (uvalue % 10)); while(uvalue /= 10); -#endif - - // terminate it - *wstr = '\0'; - - // reverse it - char *begin = str, *end = wstr - 1, aux; - while (end > begin) aux = *end, *end-- = *begin, *begin++ = aux; - - // return the buffer length - wb->len += wstr - str; -} - -void buffer_strcat(BUFFER *wb, const char *txt) -{ - // buffer_sprintf(wb, "%s", txt); - - if(unlikely(!txt || !*txt)) return; - - buffer_need_bytes(wb, 1); - - char *s = &wb->buffer[wb->len], *start, *end = &wb->buffer[wb->size]; - size_t len = wb->len; - - start = s; - while(*txt && s != end) - *s++ = *txt++; - - len += s - start; - - wb->len = len; - buffer_overflow_check(wb); - - if(*txt) { - debug(D_WEB_BUFFER, "strcat(): increasing web_buffer at position %zu, size = %zu\n", wb->len, wb->size); - len = strlen(txt); - buffer_increase(wb, len); - buffer_strcat(wb, txt); - } - else { - // terminate the string - // without increasing the length - buffer_need_bytes(wb, (size_t)1); - wb->buffer[wb->len] = '\0'; - } -} - -void buffer_strcat_htmlescape(BUFFER *wb, const char *txt) -{ - while(*txt) { - switch(*txt) { - case '&': buffer_strcat(wb, "&"); break; - case '<': buffer_strcat(wb, "<"); break; - case '>': buffer_strcat(wb, ">"); break; - case '"': buffer_strcat(wb, """); break; - case '/': buffer_strcat(wb, "/"); break; - case '\'': buffer_strcat(wb, "'"); break; - default: { - buffer_need_bytes(wb, 1); - wb->buffer[wb->len++] = *txt; - } - } - txt++; - } - - buffer_overflow_check(wb); -} - -void buffer_snprintf(BUFFER *wb, size_t len, const char *fmt, ...) -{ - if(unlikely(!fmt || !*fmt)) return; - - buffer_need_bytes(wb, len + 1); - - va_list args; - va_start(args, fmt); - wb->len += vsnprintfz(&wb->buffer[wb->len], len, fmt, args); - va_end(args); - - buffer_overflow_check(wb); - - // the buffer is \0 terminated by vsnprintfz -} - -void buffer_vsprintf(BUFFER *wb, const char *fmt, va_list args) -{ - if(unlikely(!fmt || !*fmt)) return; - - buffer_need_bytes(wb, 2); - - size_t len = wb->size - wb->len - 1; - - wb->len += vsnprintfz(&wb->buffer[wb->len], len, fmt, args); - - buffer_overflow_check(wb); - - // the buffer is \0 terminated by vsnprintfz -} - -void buffer_sprintf(BUFFER *wb, const char *fmt, ...) -{ - if(unlikely(!fmt || !*fmt)) return; - - va_list args; - size_t wrote = 0, need = 2, multiplier = 0, len; - - do { - need += wrote + multiplier * WEB_DATA_LENGTH_INCREASE_STEP; - multiplier++; - - debug(D_WEB_BUFFER, "web_buffer_sprintf(): increasing web_buffer at position %zu, size = %zu, by %zu bytes (wrote = %zu)\n", wb->len, wb->size, need, wrote); - buffer_need_bytes(wb, need); - - len = wb->size - wb->len - 1; - - va_start(args, fmt); - wrote = (size_t) vsnprintfz(&wb->buffer[wb->len], len, fmt, args); - va_end(args); - - } while(wrote >= len); - - wb->len += wrote; - - // the buffer is \0 terminated by vsnprintf -} - - -void buffer_rrd_value(BUFFER *wb, calculated_number value) -{ - buffer_need_bytes(wb, 50); - - if(isnan(value) || isinf(value)) { - buffer_strcat(wb, "null"); - return; - } - else - wb->len += print_calculated_number(&wb->buffer[wb->len], value); - - // terminate it - buffer_need_bytes(wb, 1); - wb->buffer[wb->len] = '\0'; - - buffer_overflow_check(wb); -} - -// generate a javascript date, the fastest possible way... -void buffer_jsdate(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds) -{ - // 10 20 30 = 35 - // 01234567890123456789012345678901234 - // Date(2014,04,01,03,28,20) - - buffer_need_bytes(wb, 30); - - char *b = &wb->buffer[wb->len], *p; - unsigned int *q = (unsigned int *)b; - - #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - *q++ = 0x65746144; // "Date" backwards. - #else - *q++ = 0x44617465; // "Date" - #endif - p = (char *)q; - - *p++ = '('; - *p++ = '0' + year / 1000; year %= 1000; - *p++ = '0' + year / 100; year %= 100; - *p++ = '0' + year / 10; - *p++ = '0' + year % 10; - *p++ = ','; - *p = '0' + month / 10; if (*p != '0') p++; - *p++ = '0' + month % 10; - *p++ = ','; - *p = '0' + day / 10; if (*p != '0') p++; - *p++ = '0' + day % 10; - *p++ = ','; - *p = '0' + hours / 10; if (*p != '0') p++; - *p++ = '0' + hours % 10; - *p++ = ','; - *p = '0' + minutes / 10; if (*p != '0') p++; - *p++ = '0' + minutes % 10; - *p++ = ','; - *p = '0' + seconds / 10; if (*p != '0') p++; - *p++ = '0' + seconds % 10; - - unsigned short *r = (unsigned short *)p; - -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - *r++ = 0x0029; // ")\0" backwards. - #else - *r++ = 0x2900; // ")\0" - #endif - - wb->len += (size_t)((char *)r - b - 1); - - // terminate it - wb->buffer[wb->len] = '\0'; - buffer_overflow_check(wb); -} - -// generate a date, the fastest possible way... -void buffer_date(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds) -{ - // 10 20 30 = 35 - // 01234567890123456789012345678901234 - // 2014-04-01 03:28:20 - - buffer_need_bytes(wb, 36); - - char *b = &wb->buffer[wb->len]; - char *p = b; - - *p++ = '0' + year / 1000; year %= 1000; - *p++ = '0' + year / 100; year %= 100; - *p++ = '0' + year / 10; - *p++ = '0' + year % 10; - *p++ = '-'; - *p++ = '0' + month / 10; - *p++ = '0' + month % 10; - *p++ = '-'; - *p++ = '0' + day / 10; - *p++ = '0' + day % 10; - *p++ = ' '; - *p++ = '0' + hours / 10; - *p++ = '0' + hours % 10; - *p++ = ':'; - *p++ = '0' + minutes / 10; - *p++ = '0' + minutes % 10; - *p++ = ':'; - *p++ = '0' + seconds / 10; - *p++ = '0' + seconds % 10; - *p = '\0'; - - wb->len += (size_t)(p - b); - - // terminate it - wb->buffer[wb->len] = '\0'; - buffer_overflow_check(wb); -} - -BUFFER *buffer_create(size_t size) -{ - BUFFER *b; - - debug(D_WEB_BUFFER, "Creating new web buffer of size %zu.", size); - - b = callocz(1, sizeof(BUFFER)); - b->buffer = mallocz(size + sizeof(BUFFER_OVERFLOW_EOF) + 2); - b->buffer[0] = '\0'; - b->size = size; - b->contenttype = CT_TEXT_PLAIN; - buffer_overflow_init(b); - buffer_overflow_check(b); - - return(b); -} - -void buffer_free(BUFFER *b) { - if(unlikely(!b)) return; - - buffer_overflow_check(b); - - debug(D_WEB_BUFFER, "Freeing web buffer of size %zu.", b->size); - - freez(b->buffer); - freez(b); -} - -void buffer_increase(BUFFER *b, size_t free_size_required) -{ - buffer_overflow_check(b); - - size_t left = b->size - b->len; - - if(left >= free_size_required) return; - - size_t increase = free_size_required - left; - if(increase < WEB_DATA_LENGTH_INCREASE_STEP) increase = WEB_DATA_LENGTH_INCREASE_STEP; - - debug(D_WEB_BUFFER, "Increasing data buffer from size %zu to %zu.", b->size, b->size + increase); - - b->buffer = reallocz(b->buffer, b->size + increase + sizeof(BUFFER_OVERFLOW_EOF) + 2); - b->size += increase; - - buffer_overflow_init(b); - buffer_overflow_check(b); -} diff --git a/src/web_buffer.h b/src/web_buffer.h deleted file mode 100644 index 694c9d4ce..000000000 --- a/src/web_buffer.h +++ /dev/null @@ -1,81 +0,0 @@ -#ifndef NETDATA_WEB_BUFFER_H -#define NETDATA_WEB_BUFFER_H 1 - -#define WEB_DATA_LENGTH_INCREASE_STEP 1024 - -typedef struct web_buffer { - size_t size; // allocation size of buffer, in bytes - size_t len; // current data length in buffer, in bytes - char *buffer; // the buffer itself - uint8_t contenttype; // the content type of the data in the buffer - uint8_t options; // options related to the content - time_t date; // the timestamp this content has been generated - time_t expires; // the timestamp this content expires -} BUFFER; - -// options -#define WB_CONTENT_CACHEABLE 1 -#define WB_CONTENT_NO_CACHEABLE 2 - -// content-types -#define CT_APPLICATION_JSON 1 -#define CT_TEXT_PLAIN 2 -#define CT_TEXT_HTML 3 -#define CT_APPLICATION_X_JAVASCRIPT 4 -#define CT_TEXT_CSS 5 -#define CT_TEXT_XML 6 -#define CT_APPLICATION_XML 7 -#define CT_TEXT_XSL 8 -#define CT_APPLICATION_OCTET_STREAM 9 -#define CT_APPLICATION_X_FONT_TRUETYPE 10 -#define CT_APPLICATION_X_FONT_OPENTYPE 11 -#define CT_APPLICATION_FONT_WOFF 12 -#define CT_APPLICATION_FONT_WOFF2 13 -#define CT_APPLICATION_VND_MS_FONTOBJ 14 -#define CT_IMAGE_SVG_XML 15 -#define CT_IMAGE_PNG 16 -#define CT_IMAGE_JPG 17 -#define CT_IMAGE_GIF 18 -#define CT_IMAGE_XICON 19 -#define CT_IMAGE_ICNS 20 -#define CT_IMAGE_BMP 21 -#define CT_PROMETHEUS 22 - -#define buffer_cacheable(wb) do { (wb)->options |= WB_CONTENT_CACHEABLE; if((wb)->options & WB_CONTENT_NO_CACHEABLE) (wb)->options &= ~WB_CONTENT_NO_CACHEABLE; } while(0) -#define buffer_no_cacheable(wb) do { (wb)->options |= WB_CONTENT_NO_CACHEABLE; if((wb)->options & WB_CONTENT_CACHEABLE) (wb)->options &= ~WB_CONTENT_CACHEABLE; (wb)->expires = 0; } while(0) - -#define buffer_strlen(wb) ((wb)->len) -extern const char *buffer_tostring(BUFFER *wb); - -#define buffer_flush(wb) wb->buffer[(wb)->len = 0] = '\0' -extern void buffer_reset(BUFFER *wb); - -extern void buffer_strcat(BUFFER *wb, const char *txt); -extern void buffer_rrd_value(BUFFER *wb, calculated_number value); - -extern void buffer_date(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds); -extern void buffer_jsdate(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds); - -extern BUFFER *buffer_create(size_t size); -extern void buffer_free(BUFFER *b); -extern void buffer_increase(BUFFER *b, size_t free_size_required); - -extern void buffer_snprintf(BUFFER *wb, size_t len, const char *fmt, ...) PRINTFLIKE(3, 4); -extern void buffer_vsprintf(BUFFER *wb, const char *fmt, va_list args); -extern void buffer_sprintf(BUFFER *wb, const char *fmt, ...) PRINTFLIKE(2,3); -extern void buffer_strcat_htmlescape(BUFFER *wb, const char *txt); - -extern void buffer_char_replace(BUFFER *wb, char from, char to); - -extern char *print_number_lu_r(char *str, unsigned long uvalue); -extern char *print_number_llu_r(char *str, unsigned long long uvalue); -extern char *print_number_llu_r_smart(char *str, unsigned long long uvalue); - -extern void buffer_print_llu(BUFFER *wb, unsigned long long uvalue); - -static inline void buffer_need_bytes(BUFFER *buffer, size_t needed_free_size) { - if(unlikely(buffer->size - buffer->len < needed_free_size)) - buffer_increase(buffer, needed_free_size); -} - -#endif /* NETDATA_WEB_BUFFER_H */ diff --git a/src/web_buffer_svg.c b/src/web_buffer_svg.c deleted file mode 100644 index c05e526ed..000000000 --- a/src/web_buffer_svg.c +++ /dev/null @@ -1,832 +0,0 @@ -#include "common.h" - -#define BADGE_HORIZONTAL_PADDING 4 -#define VERDANA_KERNING 0.2 -#define VERDANA_PADDING 1.0 - -/* - * verdana11_widths[] has been generated with this method: - * https://github.com/badges/shields/blob/master/measure-text.js -*/ - -double verdana11_widths[256] = { - [0] = 0.0, - [1] = 0.0, - [2] = 0.0, - [3] = 0.0, - [4] = 0.0, - [5] = 0.0, - [6] = 0.0, - [7] = 0.0, - [8] = 0.0, - [9] = 0.0, - [10] = 0.0, - [11] = 0.0, - [12] = 0.0, - [13] = 0.0, - [14] = 0.0, - [15] = 0.0, - [16] = 0.0, - [17] = 0.0, - [18] = 0.0, - [19] = 0.0, - [20] = 0.0, - [21] = 0.0, - [22] = 0.0, - [23] = 0.0, - [24] = 0.0, - [25] = 0.0, - [26] = 0.0, - [27] = 0.0, - [28] = 0.0, - [29] = 0.0, - [30] = 0.0, - [31] = 0.0, - [32] = 3.8671874999999996, // - [33] = 4.3291015625, // ! - [34] = 5.048828125, // " - [35] = 9.001953125, // # - [36] = 6.9931640625, // $ - [37] = 11.837890625, // % - [38] = 7.992187499999999, // & - [39] = 2.9541015625, // ' - [40] = 4.9951171875, // ( - [41] = 4.9951171875, // ) - [42] = 6.9931640625, // * - [43] = 9.001953125, // + - [44] = 4.00146484375, // , - [45] = 4.9951171875, // - - [46] = 4.00146484375, // . - [47] = 4.9951171875, // / - [48] = 6.9931640625, // 0 - [49] = 6.9931640625, // 1 - [50] = 6.9931640625, // 2 - [51] = 6.9931640625, // 3 - [52] = 6.9931640625, // 4 - [53] = 6.9931640625, // 5 - [54] = 6.9931640625, // 6 - [55] = 6.9931640625, // 7 - [56] = 6.9931640625, // 8 - [57] = 6.9931640625, // 9 - [58] = 4.9951171875, // : - [59] = 4.9951171875, // ; - [60] = 9.001953125, // < - [61] = 9.001953125, // = - [62] = 9.001953125, // > - [63] = 5.99951171875, // ? - [64] = 11.0, // @ - [65] = 7.51953125, // A - [66] = 7.541015625, // B - [67] = 7.680664062499999, // C - [68] = 8.4755859375, // D - [69] = 6.95556640625, // E - [70] = 6.32177734375, // F - [71] = 8.529296875, // G - [72] = 8.26611328125, // H - [73] = 4.6298828125, // I - [74] = 5.00048828125, // J - [75] = 7.62158203125, // K - [76] = 6.123046875, // L - [77] = 9.2705078125, // M - [78] = 8.228515625, // N - [79] = 8.658203125, // O - [80] = 6.63330078125, // P - [81] = 8.658203125, // Q - [82] = 7.6484375, // R - [83] = 7.51953125, // S - [84] = 6.7783203125, // T - [85] = 8.05126953125, // U - [86] = 7.51953125, // V - [87] = 10.87646484375, // W - [88] = 7.53564453125, // X - [89] = 6.767578125, // Y - [90] = 7.53564453125, // Z - [91] = 4.9951171875, // [ - [92] = 4.9951171875, // backslash - [93] = 4.9951171875, // ] - [94] = 9.001953125, // ^ - [95] = 6.9931640625, // _ - [96] = 6.9931640625, // ` - [97] = 6.6064453125, // a - [98] = 6.853515625, // b - [99] = 5.73095703125, // c - [100] = 6.853515625, // d - [101] = 6.552734375, // e - [102] = 3.8671874999999996, // f - [103] = 6.853515625, // g - [104] = 6.9609375, // h - [105] = 3.0185546875, // i - [106] = 3.78662109375, // j - [107] = 6.509765625, // k - [108] = 3.0185546875, // l - [109] = 10.69921875, // m - [110] = 6.9609375, // n - [111] = 6.67626953125, // o - [112] = 6.853515625, // p - [113] = 6.853515625, // q - [114] = 4.6943359375, // r - [115] = 5.73095703125, // s - [116] = 4.33447265625, // t - [117] = 6.9609375, // u - [118] = 6.509765625, // v - [119] = 9.001953125, // w - [120] = 6.509765625, // x - [121] = 6.509765625, // y - [122] = 5.779296875, // z - [123] = 6.982421875, // { - [124] = 4.9951171875, // | - [125] = 6.982421875, // } - [126] = 9.001953125, // ~ - [127] = 0.0, - [128] = 0.0, - [129] = 0.0, - [130] = 0.0, - [131] = 0.0, - [132] = 0.0, - [133] = 0.0, - [134] = 0.0, - [135] = 0.0, - [136] = 0.0, - [137] = 0.0, - [138] = 0.0, - [139] = 0.0, - [140] = 0.0, - [141] = 0.0, - [142] = 0.0, - [143] = 0.0, - [144] = 0.0, - [145] = 0.0, - [146] = 0.0, - [147] = 0.0, - [148] = 0.0, - [149] = 0.0, - [150] = 0.0, - [151] = 0.0, - [152] = 0.0, - [153] = 0.0, - [154] = 0.0, - [155] = 0.0, - [156] = 0.0, - [157] = 0.0, - [158] = 0.0, - [159] = 0.0, - [160] = 0.0, - [161] = 0.0, - [162] = 0.0, - [163] = 0.0, - [164] = 0.0, - [165] = 0.0, - [166] = 0.0, - [167] = 0.0, - [168] = 0.0, - [169] = 0.0, - [170] = 0.0, - [171] = 0.0, - [172] = 0.0, - [173] = 0.0, - [174] = 0.0, - [175] = 0.0, - [176] = 0.0, - [177] = 0.0, - [178] = 0.0, - [179] = 0.0, - [180] = 0.0, - [181] = 0.0, - [182] = 0.0, - [183] = 0.0, - [184] = 0.0, - [185] = 0.0, - [186] = 0.0, - [187] = 0.0, - [188] = 0.0, - [189] = 0.0, - [190] = 0.0, - [191] = 0.0, - [192] = 0.0, - [193] = 0.0, - [194] = 0.0, - [195] = 0.0, - [196] = 0.0, - [197] = 0.0, - [198] = 0.0, - [199] = 0.0, - [200] = 0.0, - [201] = 0.0, - [202] = 0.0, - [203] = 0.0, - [204] = 0.0, - [205] = 0.0, - [206] = 0.0, - [207] = 0.0, - [208] = 0.0, - [209] = 0.0, - [210] = 0.0, - [211] = 0.0, - [212] = 0.0, - [213] = 0.0, - [214] = 0.0, - [215] = 0.0, - [216] = 0.0, - [217] = 0.0, - [218] = 0.0, - [219] = 0.0, - [220] = 0.0, - [221] = 0.0, - [222] = 0.0, - [223] = 0.0, - [224] = 0.0, - [225] = 0.0, - [226] = 0.0, - [227] = 0.0, - [228] = 0.0, - [229] = 0.0, - [230] = 0.0, - [231] = 0.0, - [232] = 0.0, - [233] = 0.0, - [234] = 0.0, - [235] = 0.0, - [236] = 0.0, - [237] = 0.0, - [238] = 0.0, - [239] = 0.0, - [240] = 0.0, - [241] = 0.0, - [242] = 0.0, - [243] = 0.0, - [244] = 0.0, - [245] = 0.0, - [246] = 0.0, - [247] = 0.0, - [248] = 0.0, - [249] = 0.0, - [250] = 0.0, - [251] = 0.0, - [252] = 0.0, - [253] = 0.0, - [254] = 0.0, - [255] = 0.0 -}; - -// find the width of the string using the verdana 11points font -// re-write the string in place, skiping zero-length characters -static inline double verdana11_width(char *s) { - double w = 0.0; - char *d = s; - - while(*s) { - double t = verdana11_widths[(unsigned char)*s]; - if(t == 0.0) - s++; - else { - w += t + VERDANA_KERNING; - if(d != s) - *d++ = *s++; - else - d = ++s; - } - } - - *d = '\0'; - w -= VERDANA_KERNING; - w += VERDANA_PADDING; - return w; -} - -static inline size_t escape_xmlz(char *dst, const char *src, size_t len) { - size_t i = len; - - // required escapes from - // https://github.com/badges/shields/blob/master/badge.js - while(*src && i) { - switch(*src) { - case '\\': - *dst++ = '/'; - src++; - i--; - break; - - case '&': - if(i > 5) { - strcpy(dst, "&"); - i -= 5; - dst += 5; - src++; - } - else goto cleanup; - break; - - case '<': - if(i > 4) { - strcpy(dst, "<"); - i -= 4; - dst += 4; - src++; - } - else goto cleanup; - break; - - case '>': - if(i > 4) { - strcpy(dst, ">"); - i -= 4; - dst += 4; - src++; - } - else goto cleanup; - break; - - case '"': - if(i > 6) { - strcpy(dst, """); - i -= 6; - dst += 6; - src++; - } - else goto cleanup; - break; - - case '\'': - if(i > 6) { - strcpy(dst, "'"); - i -= 6; - dst += 6; - src++; - } - else goto cleanup; - break; - - default: - i--; - *dst++ = *src++; - break; - } - } - -cleanup: - *dst = '\0'; - return len - i; -} - -static inline char *format_value_with_precision_and_unit(char *value_string, size_t value_string_len, calculated_number value, const char *units, int precision) { - if(unlikely(isnan(value) || isinf(value))) - value = 0.0; - - char *separator = ""; - if(unlikely(isalnum(*units))) - separator = " "; - - if(precision < 0) { - int len, lstop = 0, trim_zeros = 1; - - calculated_number abs = value; - if(isless(value, 0)) { - lstop = 1; - abs = calculated_number_fabs(value); - } - - if(isgreaterequal(abs, 1000)) { - len = snprintfz(value_string, value_string_len, "%0.0" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value); - trim_zeros = 0; - } - else if(isgreaterequal(abs, 10)) len = snprintfz(value_string, value_string_len, "%0.1" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value); - else if(isgreaterequal(abs, 1)) len = snprintfz(value_string, value_string_len, "%0.2" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value); - else if(isgreaterequal(abs, 0.1)) len = snprintfz(value_string, value_string_len, "%0.2" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value); - else if(isgreaterequal(abs, 0.01)) len = snprintfz(value_string, value_string_len, "%0.4" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value); - else if(isgreaterequal(abs, 0.001)) len = snprintfz(value_string, value_string_len, "%0.5" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value); - else if(isgreaterequal(abs, 0.0001)) len = snprintfz(value_string, value_string_len, "%0.6" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value); - else len = snprintfz(value_string, value_string_len, "%0.7" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value); - - if(unlikely(trim_zeros)) { - int l; - // remove trailing zeros from the decimal part - for(l = len - 1; l > lstop; l--) { - if(likely(value_string[l] == '0')) { - value_string[l] = '\0'; - len--; - } - - else if(unlikely(value_string[l] == '.')) { - value_string[l] = '\0'; - len--; - break; - } - - else - break; - } - } - - if(unlikely(len <= 0)) len = 1; - snprintfz(&value_string[len], value_string_len - len, "%s%s", separator, units); - } - else { - if(precision > 50) precision = 50; - snprintfz(value_string, value_string_len, "%0.*" LONG_DOUBLE_MODIFIER "%s%s", precision, (LONG_DOUBLE) value, separator, units); - } - - return value_string; -} - -inline char *format_value_and_unit(char *value_string, size_t value_string_len, calculated_number value, const char *units, int precision) { - static uint32_t - hash_seconds = 0, - hash_seconds_ago = 0, - hash_minutes = 0, - hash_minutes_ago = 0, - hash_hours = 0, - hash_hours_ago = 0, - hash_onoff = 0, - hash_updown = 0, - hash_okerror = 0, - hash_okfailed = 0, - hash_empty = 0, - hash_null = 0, - hash_percentage = 0, - hash_percent = 0, - hash_pcent = 0; - - if(unlikely(!hash_seconds)) { - hash_seconds = simple_hash("seconds"); - hash_seconds_ago = simple_hash("seconds ago"); - hash_minutes = simple_hash("minutes"); - hash_minutes_ago = simple_hash("minutes ago"); - hash_hours = simple_hash("hours"); - hash_hours_ago = simple_hash("hours ago"); - hash_onoff = simple_hash("on/off"); - hash_updown = simple_hash("up/down"); - hash_okerror = simple_hash("ok/error"); - hash_okfailed = simple_hash("ok/failed"); - hash_empty = simple_hash("empty"); - hash_null = simple_hash("null"); - hash_percentage = simple_hash("percentage"); - hash_percent = simple_hash("percent"); - hash_pcent = simple_hash("pcent"); - } - - if(unlikely(!units)) units = ""; - - uint32_t hash_units = simple_hash(units); - - if(unlikely((hash_units == hash_seconds && !strcmp(units, "seconds")) || (hash_units == hash_seconds_ago && !strcmp(units, "seconds ago")))) { - if(value == 0.0) { - snprintfz(value_string, value_string_len, "%s", "now"); - return value_string; - } - else if(isnan(value) || isinf(value)) { - snprintfz(value_string, value_string_len, "%s", "never"); - return value_string; - } - - const char *suffix = (hash_units == hash_seconds_ago)?" ago":""; - - size_t s = (size_t)value; - size_t d = s / 86400; - s = s % 86400; - - size_t h = s / 3600; - s = s % 3600; - - size_t m = s / 60; - s = s % 60; - - if(d) - snprintfz(value_string, value_string_len, "%zu %s %02zu:%02zu:%02zu%s", d, (d == 1)?"day":"days", h, m, s, suffix); - else - snprintfz(value_string, value_string_len, "%02zu:%02zu:%02zu%s", h, m, s, suffix); - - return value_string; - } - - else if(unlikely((hash_units == hash_minutes && !strcmp(units, "minutes")) || (hash_units == hash_minutes_ago && !strcmp(units, "minutes ago")))) { - if(value == 0.0) { - snprintfz(value_string, value_string_len, "%s", "now"); - return value_string; - } - else if(isnan(value) || isinf(value)) { - snprintfz(value_string, value_string_len, "%s", "never"); - return value_string; - } - - const char *suffix = (hash_units == hash_minutes_ago)?" ago":""; - - size_t m = (size_t)value; - size_t d = m / (60 * 24); - m = m % (60 * 24); - - size_t h = m / 60; - m = m % 60; - - if(d) - snprintfz(value_string, value_string_len, "%zud %02zuh %02zum%s", d, h, m, suffix); - else - snprintfz(value_string, value_string_len, "%zuh %zum%s", h, m, suffix); - - return value_string; - } - - else if(unlikely((hash_units == hash_hours && !strcmp(units, "hours")) || (hash_units == hash_hours_ago && !strcmp(units, "hours ago")))) { - if(value == 0.0) { - snprintfz(value_string, value_string_len, "%s", "now"); - return value_string; - } - else if(isnan(value) || isinf(value)) { - snprintfz(value_string, value_string_len, "%s", "never"); - return value_string; - } - - const char *suffix = (hash_units == hash_hours_ago)?" ago":""; - - size_t h = (size_t)value; - size_t d = h / 24; - h = h % 24; - - if(d) - snprintfz(value_string, value_string_len, "%zud %zuh%s", d, h, suffix); - else - snprintfz(value_string, value_string_len, "%zuh%s", h, suffix); - - return value_string; - } - - else if(unlikely(hash_units == hash_onoff && !strcmp(units, "on/off"))) { - snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"on":"off"); - return value_string; - } - - else if(unlikely(hash_units == hash_updown && !strcmp(units, "up/down"))) { - snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"up":"down"); - return value_string; - } - - else if(unlikely(hash_units == hash_okerror && !strcmp(units, "ok/error"))) { - snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"ok":"error"); - return value_string; - } - - else if(unlikely(hash_units == hash_okfailed && !strcmp(units, "ok/failed"))) { - snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"ok":"failed"); - return value_string; - } - - else if(unlikely(hash_units == hash_empty && !strcmp(units, "empty"))) - units = ""; - - else if(unlikely(hash_units == hash_null && !strcmp(units, "null"))) - units = ""; - - else if(unlikely(hash_units == hash_percentage && !strcmp(units, "percentage"))) - units = "%"; - - else if(unlikely(hash_units == hash_percent && !strcmp(units, "percent"))) - units = "%"; - - else if(unlikely(hash_units == hash_pcent && !strcmp(units, "pcent"))) - units = "%"; - - - if(unlikely(isnan(value) || isinf(value))) { - strcpy(value_string, "-"); - return value_string; - } - - return format_value_with_precision_and_unit(value_string, value_string_len, value, units, precision); -} - -static inline const char *color_map(const char *color) { - // colors from: - // https://github.com/badges/shields/blob/master/colorscheme.json - if(!strcmp(color, "brightgreen")) return "#4c1"; - else if(!strcmp(color, "green")) return "#97CA00"; - else if(!strcmp(color, "yellow")) return "#dfb317"; - else if(!strcmp(color, "yellowgreen")) return "#a4a61d"; - else if(!strcmp(color, "orange")) return "#fe7d37"; - else if(!strcmp(color, "red")) return "#e05d44"; - else if(!strcmp(color, "blue")) return "#007ec6"; - else if(!strcmp(color, "grey")) return "#555"; - else if(!strcmp(color, "gray")) return "#555"; - else if(!strcmp(color, "lightgrey")) return "#9f9f9f"; - else if(!strcmp(color, "lightgray")) return "#9f9f9f"; - return color; -} - -typedef enum color_comparison { - COLOR_COMPARE_EQUAL, - COLOR_COMPARE_NOTEQUAL, - COLOR_COMPARE_LESS, - COLOR_COMPARE_LESSEQUAL, - COLOR_COMPARE_GREATER, - COLOR_COMPARE_GREATEREQUAL, -} BADGE_COLOR_COMPARISON; - -static inline void calc_colorz(const char *color, char *final, size_t len, calculated_number value) { - if(isnan(value) || isinf(value)) - value = NAN; - - char color_buffer[256 + 1] = ""; - char value_buffer[256 + 1] = ""; - BADGE_COLOR_COMPARISON comparison = COLOR_COMPARE_GREATER; - - // example input: - // color<max|color>min|color:null... - - const char *c = color; - while(*c) { - char *dc = color_buffer, *dv = NULL; - size_t ci = 0, vi = 0; - - const char *t = c; - - while(*t && *t != '|') { - switch(*t) { - case '!': - if(t[1] == '=') t++; - comparison = COLOR_COMPARE_NOTEQUAL; - dv = value_buffer; - break; - - case '=': - case ':': - comparison = COLOR_COMPARE_EQUAL; - dv = value_buffer; - break; - - case '}': - case ')': - case '>': - if(t[1] == '=') { - comparison = COLOR_COMPARE_GREATEREQUAL; - t++; - } - else - comparison = COLOR_COMPARE_GREATER; - dv = value_buffer; - break; - - case '{': - case '(': - case '<': - if(t[1] == '=') { - comparison = COLOR_COMPARE_LESSEQUAL; - t++; - } - else if(t[1] == '>' || t[1] == ')' || t[1] == '}') { - comparison = COLOR_COMPARE_NOTEQUAL; - t++; - } - else - comparison = COLOR_COMPARE_LESS; - dv = value_buffer; - break; - - default: - if(dv) { - if(vi < 256) { - vi++; - *dv++ = *t; - } - } - else { - if(ci < 256) { - ci++; - *dc++ = *t; - } - } - break; - } - - t++; - } - - // prepare for next iteration - if(*t == '|') t++; - c = t; - - // do the math - *dc = '\0'; - if(dv) { - *dv = '\0'; - calculated_number v; - - if(!*value_buffer || !strcmp(value_buffer, "null")) { - v = NAN; - } - else { - v = str2l(value_buffer); - if(isnan(v) || isinf(v)) - v = NAN; - } - - if(unlikely(isnan(value) || isnan(v))) { - if(isnan(value) && isnan(v)) - break; - } - else { - if (unlikely(comparison == COLOR_COMPARE_LESS && isless(value, v))) break; - else if (unlikely(comparison == COLOR_COMPARE_LESSEQUAL && islessequal(value, v))) break; - else if (unlikely(comparison == COLOR_COMPARE_GREATER && isgreater(value, v))) break; - else if (unlikely(comparison == COLOR_COMPARE_GREATEREQUAL && isgreaterequal(value, v))) break; - else if (unlikely(comparison == COLOR_COMPARE_EQUAL && !islessgreater(value, v))) break; - else if (unlikely(comparison == COLOR_COMPARE_NOTEQUAL && islessgreater(value, v))) break; - } - } - else - break; - } - - const char *b; - if(color_buffer[0]) - b = color_buffer; - else - b = color; - - strncpyz(final, b, len); -} - -// value + units -#define VALUE_STRING_SIZE 100 - -// label -#define LABEL_STRING_SIZE 200 - -// colors -#define COLOR_STRING_SIZE 100 - -void buffer_svg(BUFFER *wb, const char *label, calculated_number value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options) { - char label_buffer[LABEL_STRING_SIZE + 1] - , value_color_buffer[COLOR_STRING_SIZE + 1] - , value_string[VALUE_STRING_SIZE + 1] - , label_escaped[LABEL_STRING_SIZE + 1] - , value_escaped[VALUE_STRING_SIZE + 1] - , label_color_escaped[COLOR_STRING_SIZE + 1] - , value_color_escaped[COLOR_STRING_SIZE + 1]; - - double label_width, value_width, total_width, height = 20.0, font_size = 11.0, text_offset = 5.8, round_corner = 3.0; - - if(scale < 100) scale = 100; - - if(unlikely(!label_color || !*label_color)) - label_color = "#555"; - - if(unlikely(!value_color || !*value_color)) - value_color = (isnan(value) || isinf(value))?"#999":"#4c1"; - - calc_colorz(value_color, value_color_buffer, COLOR_STRING_SIZE, value); - format_value_and_unit(value_string, VALUE_STRING_SIZE, (options & RRDR_OPTION_DISPLAY_ABS)?calculated_number_fabs(value):value, units, precision); - - // we need to copy the label, since verdana11_width may write to it - strncpyz(label_buffer, label, LABEL_STRING_SIZE); - - label_width = verdana11_width(label_buffer) + (BADGE_HORIZONTAL_PADDING * 2); - value_width = verdana11_width(value_string) + (BADGE_HORIZONTAL_PADDING * 2); - total_width = label_width + value_width; - - escape_xmlz(label_escaped, label_buffer, LABEL_STRING_SIZE); - escape_xmlz(value_escaped, value_string, VALUE_STRING_SIZE); - escape_xmlz(label_color_escaped, color_map(label_color), COLOR_STRING_SIZE); - escape_xmlz(value_color_escaped, color_map(value_color_buffer), COLOR_STRING_SIZE); - - wb->contenttype = CT_IMAGE_SVG_XML; - - total_width = total_width * scale / 100.0; - height = height * scale / 100.0; - font_size = font_size * scale / 100.0; - text_offset = text_offset * scale / 100.0; - label_width = label_width * scale / 100.0; - value_width = value_width * scale / 100.0; - round_corner = round_corner * scale / 100.0; - - // svg template from: - // https://raw.githubusercontent.com/badges/shields/master/templates/flat-template.svg - buffer_sprintf(wb, - "<svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" width=\"%0.2f\" height=\"%0.2f\">" - "<linearGradient id=\"smooth\" x2=\"0\" y2=\"100%%\">" - "<stop offset=\"0\" stop-color=\"#bbb\" stop-opacity=\".1\"/>" - "<stop offset=\"1\" stop-opacity=\".1\"/>" - "</linearGradient>" - "<mask id=\"round\">" - "<rect width=\"%0.2f\" height=\"%0.2f\" rx=\"%0.2f\" fill=\"#fff\"/>" - "</mask>" - "<g mask=\"url(#round)\">" - "<rect width=\"%0.2f\" height=\"%0.2f\" fill=\"%s\"/>" - "<rect x=\"%0.2f\" width=\"%0.2f\" height=\"%0.2f\" fill=\"%s\"/>" - "<rect width=\"%0.2f\" height=\"%0.2f\" fill=\"url(#smooth)\"/>" - "</g>" - "<g fill=\"#fff\" text-anchor=\"middle\" font-family=\"DejaVu Sans,Verdana,Geneva,sans-serif\" font-size=\"%0.2f\">" - "<text x=\"%0.2f\" y=\"%0.0f\" fill=\"#010101\" fill-opacity=\".3\">%s</text>" - "<text x=\"%0.2f\" y=\"%0.0f\">%s</text>" - "<text x=\"%0.2f\" y=\"%0.0f\" fill=\"#010101\" fill-opacity=\".3\">%s</text>" - "<text x=\"%0.2f\" y=\"%0.0f\">%s</text>" - "</g>" - "</svg>", - total_width, height, - total_width, height, round_corner, - label_width, height, label_color_escaped, - label_width, value_width, height, value_color_escaped, - total_width, height, - font_size, - label_width / 2, ceil(height - text_offset), label_escaped, - label_width / 2, ceil(height - text_offset - 1.0), label_escaped, - label_width + value_width / 2 -1, ceil(height - text_offset), value_escaped, - label_width + value_width / 2 -1, ceil(height - text_offset - 1.0), value_escaped); -} diff --git a/src/web_buffer_svg.h b/src/web_buffer_svg.h deleted file mode 100644 index c23abf0dc..000000000 --- a/src/web_buffer_svg.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef NETDATA_WEB_BUFFER_SVG_H -#define NETDATA_WEB_BUFFER_SVG_H 1 - -extern void buffer_svg(BUFFER *wb, const char *label, calculated_number value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options); -extern char *format_value_and_unit(char *value_string, size_t value_string_len, calculated_number value, const char *units, int precision); - -#endif /* NETDATA_WEB_BUFFER_SVG_H */ diff --git a/src/web_client.c b/src/web_client.c deleted file mode 100644 index 477fb3d57..000000000 --- a/src/web_client.c +++ /dev/null @@ -1,1706 +0,0 @@ -#include "common.h" - -// this is an async I/O implementation of the web server request parser -// it is used by all netdata web servers - -int respect_web_browser_do_not_track_policy = 0; -char *web_x_frame_options = NULL; - -#ifdef NETDATA_WITH_ZLIB -int web_enable_gzip = 1, web_gzip_level = 3, web_gzip_strategy = Z_DEFAULT_STRATEGY; -#endif /* NETDATA_WITH_ZLIB */ - -inline int web_client_permission_denied(struct web_client *w) { - w->response.data->contenttype = CT_TEXT_PLAIN; - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "You are not allowed to access this resource."); - w->response.code = 403; - return 403; -} - -static inline int web_client_crock_socket(struct web_client *w) { -#ifdef TCP_CORK - if(likely(web_client_is_corkable(w) && !w->tcp_cork && w->ofd != -1)) { - w->tcp_cork = 1; - if(unlikely(setsockopt(w->ofd, IPPROTO_TCP, TCP_CORK, (char *) &w->tcp_cork, sizeof(int)) != 0)) { - error("%llu: failed to enable TCP_CORK on socket.", w->id); - - w->tcp_cork = 0; - return -1; - } - } -#else - (void)w; -#endif /* TCP_CORK */ - - return 0; -} - -static inline int web_client_uncrock_socket(struct web_client *w) { -#ifdef TCP_CORK - if(likely(w->tcp_cork && w->ofd != -1)) { - w->tcp_cork = 0; - if(unlikely(setsockopt(w->ofd, IPPROTO_TCP, TCP_CORK, (char *) &w->tcp_cork, sizeof(int)) != 0)) { - error("%llu: failed to disable TCP_CORK on socket.", w->id); - w->tcp_cork = 1; - return -1; - } - } -#else - (void)w; -#endif /* TCP_CORK */ - - return 0; -} - -void web_client_request_done(struct web_client *w) { - web_client_uncrock_socket(w); - - debug(D_WEB_CLIENT, "%llu: Resetting client.", w->id); - - if(likely(w->last_url[0])) { - struct timeval tv; - now_realtime_timeval(&tv); - - size_t size = (w->mode == WEB_CLIENT_MODE_FILECOPY)?w->response.rlen:w->response.data->len; - size_t sent = size; -#ifdef NETDATA_WITH_ZLIB - if(likely(w->response.zoutput)) sent = (size_t)w->response.zstream.total_out; -#endif - - // -------------------------------------------------------------------- - // global statistics - - finished_web_request_statistics(dt_usec(&tv, &w->tv_in), - w->stats_received_bytes, - w->stats_sent_bytes, - size, - sent); - - w->stats_received_bytes = 0; - w->stats_sent_bytes = 0; - - - // -------------------------------------------------------------------- - - const char *mode; - switch(w->mode) { - case WEB_CLIENT_MODE_FILECOPY: - mode = "FILECOPY"; - break; - - case WEB_CLIENT_MODE_OPTIONS: - mode = "OPTIONS"; - break; - - case WEB_CLIENT_MODE_STREAM: - mode = "STREAM"; - break; - - case WEB_CLIENT_MODE_NORMAL: - mode = "DATA"; - break; - - default: - mode = "UNKNOWN"; - break; - } - - // access log - log_access("%llu: %d '[%s]:%s' '%s' (sent/all = %zu/%zu bytes %0.0f%%, prep/sent/total = %0.2f/%0.2f/%0.2f ms) %d '%s'", - w->id - , gettid() - , w->client_ip - , w->client_port - , mode - , sent - , size - , -((size > 0) ? ((size - sent) / (double) size * 100.0) : 0.0) - , dt_usec(&w->tv_ready, &w->tv_in) / 1000.0 - , dt_usec(&tv, &w->tv_ready) / 1000.0 - , dt_usec(&tv, &w->tv_in) / 1000.0 - , w->response.code - , w->last_url - ); - } - - if(unlikely(w->mode == WEB_CLIENT_MODE_FILECOPY)) { - if(w->ifd != w->ofd) { - debug(D_WEB_CLIENT, "%llu: Closing filecopy input file descriptor %d.", w->id, w->ifd); - - if(web_server_mode != WEB_SERVER_MODE_STATIC_THREADED) { - if (w->ifd != -1) close(w->ifd); - } - - w->ifd = w->ofd; - } - } - - w->last_url[0] = '\0'; - w->cookie1[0] = '\0'; - w->cookie2[0] = '\0'; - w->origin[0] = '*'; - w->origin[1] = '\0'; - - freez(w->user_agent); w->user_agent = NULL; - - w->mode = WEB_CLIENT_MODE_NORMAL; - - w->tcp_cork = 0; - web_client_disable_donottrack(w); - web_client_disable_tracking_required(w); - web_client_disable_keepalive(w); - w->decoded_url[0] = '\0'; - - buffer_reset(w->response.header_output); - buffer_reset(w->response.header); - buffer_reset(w->response.data); - w->response.rlen = 0; - w->response.sent = 0; - w->response.code = 0; - - w->header_parse_tries = 0; - w->header_parse_last_size = 0; - - web_client_enable_wait_receive(w); - web_client_disable_wait_send(w); - - w->response.zoutput = 0; - - // if we had enabled compression, release it -#ifdef NETDATA_WITH_ZLIB - if(w->response.zinitialized) { - debug(D_DEFLATE, "%llu: Freeing compression resources.", w->id); - deflateEnd(&w->response.zstream); - w->response.zsent = 0; - w->response.zhave = 0; - w->response.zstream.avail_in = 0; - w->response.zstream.avail_out = 0; - w->response.zstream.total_in = 0; - w->response.zstream.total_out = 0; - w->response.zinitialized = 0; - } -#endif // NETDATA_WITH_ZLIB -} - -uid_t web_files_uid(void) { - static char *web_owner = NULL; - static uid_t owner_uid = 0; - - if(unlikely(!web_owner)) { - // getpwuid() is not thread safe, - // but we have called this function once - // while single threaded - struct passwd *pw = getpwuid(geteuid()); - web_owner = config_get(CONFIG_SECTION_WEB, "web files owner", (pw)?(pw->pw_name?pw->pw_name:""):""); - if(!web_owner || !*web_owner) - owner_uid = geteuid(); - else { - // getpwnam() is not thread safe, - // but we have called this function once - // while single threaded - pw = getpwnam(web_owner); - if(!pw) { - error("User '%s' is not present. Ignoring option.", web_owner); - owner_uid = geteuid(); - } - else { - debug(D_WEB_CLIENT, "Web files owner set to %s.", web_owner); - owner_uid = pw->pw_uid; - } - } - } - - return(owner_uid); -} - -gid_t web_files_gid(void) { - static char *web_group = NULL; - static gid_t owner_gid = 0; - - if(unlikely(!web_group)) { - // getgrgid() is not thread safe, - // but we have called this function once - // while single threaded - struct group *gr = getgrgid(getegid()); - web_group = config_get(CONFIG_SECTION_WEB, "web files group", (gr)?(gr->gr_name?gr->gr_name:""):""); - if(!web_group || !*web_group) - owner_gid = getegid(); - else { - // getgrnam() is not thread safe, - // but we have called this function once - // while single threaded - gr = getgrnam(web_group); - if(!gr) { - error("Group '%s' is not present. Ignoring option.", web_group); - owner_gid = getegid(); - } - else { - debug(D_WEB_CLIENT, "Web files group set to %s.", web_group); - owner_gid = gr->gr_gid; - } - } - } - - return(owner_gid); -} - -static struct { - const char *extension; - uint32_t hash; - uint8_t contenttype; -} mime_types[] = { - { "html" , 0 , CT_TEXT_HTML} - , {"js" , 0 , CT_APPLICATION_X_JAVASCRIPT} - , {"css" , 0 , CT_TEXT_CSS} - , {"xml" , 0 , CT_TEXT_XML} - , {"xsl" , 0 , CT_TEXT_XSL} - , {"txt" , 0 , CT_TEXT_PLAIN} - , {"svg" , 0 , CT_IMAGE_SVG_XML} - , {"ttf" , 0 , CT_APPLICATION_X_FONT_TRUETYPE} - , {"otf" , 0 , CT_APPLICATION_X_FONT_OPENTYPE} - , {"woff2", 0 , CT_APPLICATION_FONT_WOFF2} - , {"woff" , 0 , CT_APPLICATION_FONT_WOFF} - , {"eot" , 0 , CT_APPLICATION_VND_MS_FONTOBJ} - , {"png" , 0 , CT_IMAGE_PNG} - , {"jpg" , 0 , CT_IMAGE_JPG} - , {"jpeg" , 0 , CT_IMAGE_JPG} - , {"gif" , 0 , CT_IMAGE_GIF} - , {"bmp" , 0 , CT_IMAGE_BMP} - , {"ico" , 0 , CT_IMAGE_XICON} - , {"icns" , 0 , CT_IMAGE_ICNS} - , { NULL, 0, 0} -}; - -static inline uint8_t contenttype_for_filename(const char *filename) { - // info("checking filename '%s'", filename); - - static int initialized = 0; - int i; - - if(unlikely(!initialized)) { - for (i = 0; mime_types[i].extension; i++) - mime_types[i].hash = simple_hash(mime_types[i].extension); - - initialized = 1; - } - - const char *s = filename, *last_dot = NULL; - - // find the last dot - while(*s) { - if(unlikely(*s == '.')) last_dot = s; - s++; - } - - if(unlikely(!last_dot || !*last_dot || !last_dot[1])) { - // info("no extension for filename '%s'", filename); - return CT_APPLICATION_OCTET_STREAM; - } - last_dot++; - - // info("extension for filename '%s' is '%s'", filename, last_dot); - - uint32_t hash = simple_hash(last_dot); - for(i = 0; mime_types[i].extension ; i++) { - if(unlikely(hash == mime_types[i].hash && !strcmp(last_dot, mime_types[i].extension))) { - // info("matched extension for filename '%s': '%s'", filename, last_dot); - return mime_types[i].contenttype; - } - } - - // info("not matched extension for filename '%s': '%s'", filename, last_dot); - return CT_APPLICATION_OCTET_STREAM; -} - -static inline int access_to_file_is_not_permitted(struct web_client *w, const char *filename) { - w->response.data->contenttype = CT_TEXT_HTML; - buffer_strcat(w->response.data, "Access to file is not permitted: "); - buffer_strcat_htmlescape(w->response.data, filename); - return 403; -} - -int mysendfile(struct web_client *w, char *filename) { - debug(D_WEB_CLIENT, "%llu: Looking for file '%s/%s'", w->id, netdata_configured_web_dir, filename); - - if(!web_client_can_access_dashboard(w)) - return web_client_permission_denied(w); - - // skip leading slashes - while (*filename == '/') filename++; - - // if the filename contain known paths, skip them - if(strncmp(filename, WEB_PATH_FILE "/", strlen(WEB_PATH_FILE) + 1) == 0) - filename = &filename[strlen(WEB_PATH_FILE) + 1]; - - // if the filename contains "strange" characters, refuse to serve it - char *s; - for(s = filename; *s ;s++) { - if( !isalnum(*s) && *s != '/' && *s != '.' && *s != '-' && *s != '_') { - debug(D_WEB_CLIENT_ACCESS, "%llu: File '%s' is not acceptable.", w->id, filename); - w->response.data->contenttype = CT_TEXT_HTML; - buffer_sprintf(w->response.data, "Filename contains invalid characters: "); - buffer_strcat_htmlescape(w->response.data, filename); - return 400; - } - } - - // if the filename contains a .. refuse to serve it - if(strstr(filename, "..") != 0) { - debug(D_WEB_CLIENT_ACCESS, "%llu: File '%s' is not acceptable.", w->id, filename); - w->response.data->contenttype = CT_TEXT_HTML; - buffer_strcat(w->response.data, "Relative filenames are not supported: "); - buffer_strcat_htmlescape(w->response.data, filename); - return 400; - } - - // find the physical file on disk - char webfilename[FILENAME_MAX + 1]; - snprintfz(webfilename, FILENAME_MAX, "%s/%s", netdata_configured_web_dir, filename); - - struct stat statbuf; - int done = 0; - while(!done) { - // check if the file exists - if (lstat(webfilename, &statbuf) != 0) { - debug(D_WEB_CLIENT_ACCESS, "%llu: File '%s' is not found.", w->id, webfilename); - w->response.data->contenttype = CT_TEXT_HTML; - buffer_strcat(w->response.data, "File does not exist, or is not accessible: "); - buffer_strcat_htmlescape(w->response.data, webfilename); - return 404; - } - - if ((statbuf.st_mode & S_IFMT) == S_IFDIR) { - snprintfz(webfilename, FILENAME_MAX, "%s/%s/index.html", netdata_configured_web_dir, filename); - continue; - } - - if ((statbuf.st_mode & S_IFMT) != S_IFREG) { - error("%llu: File '%s' is not a regular file. Access Denied.", w->id, webfilename); - return access_to_file_is_not_permitted(w, webfilename); - } - - // check if the file is owned by expected user - if (statbuf.st_uid != web_files_uid()) { - error("%llu: File '%s' is owned by user %u (expected user %u). Access Denied.", w->id, webfilename, statbuf.st_uid, web_files_uid()); - return access_to_file_is_not_permitted(w, webfilename); - } - - // check if the file is owned by expected group - if (statbuf.st_gid != web_files_gid()) { - error("%llu: File '%s' is owned by group %u (expected group %u). Access Denied.", w->id, webfilename, statbuf.st_gid, web_files_gid()); - return access_to_file_is_not_permitted(w, webfilename); - } - - done = 1; - } - - // open the file - w->ifd = open(webfilename, O_NONBLOCK, O_RDONLY); - if(w->ifd == -1) { - w->ifd = w->ofd; - - if(errno == EBUSY || errno == EAGAIN) { - error("%llu: File '%s' is busy, sending 307 Moved Temporarily to force retry.", w->id, webfilename); - w->response.data->contenttype = CT_TEXT_HTML; - buffer_sprintf(w->response.header, "Location: /" WEB_PATH_FILE "/%s\r\n", filename); - buffer_strcat(w->response.data, "File is currently busy, please try again later: "); - buffer_strcat_htmlescape(w->response.data, webfilename); - return 307; - } - else { - error("%llu: Cannot open file '%s'.", w->id, webfilename); - w->response.data->contenttype = CT_TEXT_HTML; - buffer_strcat(w->response.data, "Cannot open file: "); - buffer_strcat_htmlescape(w->response.data, webfilename); - return 404; - } - } - - sock_setnonblock(w->ifd); - - w->response.data->contenttype = contenttype_for_filename(webfilename); - debug(D_WEB_CLIENT_ACCESS, "%llu: Sending file '%s' (%ld bytes, ifd %d, ofd %d).", w->id, webfilename, statbuf.st_size, w->ifd, w->ofd); - - w->mode = WEB_CLIENT_MODE_FILECOPY; - web_client_enable_wait_receive(w); - web_client_disable_wait_send(w); - buffer_flush(w->response.data); - buffer_need_bytes(w->response.data, (size_t)statbuf.st_size); - w->response.rlen = (size_t)statbuf.st_size; -#ifdef __APPLE__ - w->response.data->date = statbuf.st_mtimespec.tv_sec; -#else - w->response.data->date = statbuf.st_mtim.tv_sec; -#endif /* __APPLE__ */ - buffer_cacheable(w->response.data); - - return 200; -} - - -#ifdef NETDATA_WITH_ZLIB -void web_client_enable_deflate(struct web_client *w, int gzip) { - if(unlikely(w->response.zinitialized)) { - debug(D_DEFLATE, "%llu: Compression has already be initialized for this client.", w->id); - return; - } - - if(unlikely(w->response.sent)) { - error("%llu: Cannot enable compression in the middle of a conversation.", w->id); - return; - } - - w->response.zstream.zalloc = Z_NULL; - w->response.zstream.zfree = Z_NULL; - w->response.zstream.opaque = Z_NULL; - - w->response.zstream.next_in = (Bytef *)w->response.data->buffer; - w->response.zstream.avail_in = 0; - w->response.zstream.total_in = 0; - - w->response.zstream.next_out = w->response.zbuffer; - w->response.zstream.avail_out = 0; - w->response.zstream.total_out = 0; - - w->response.zstream.zalloc = Z_NULL; - w->response.zstream.zfree = Z_NULL; - w->response.zstream.opaque = Z_NULL; - -// if(deflateInit(&w->response.zstream, Z_DEFAULT_COMPRESSION) != Z_OK) { -// error("%llu: Failed to initialize zlib. Proceeding without compression.", w->id); -// return; -// } - - // Select GZIP compression: windowbits = 15 + 16 = 31 - if(deflateInit2(&w->response.zstream, web_gzip_level, Z_DEFLATED, 15 + ((gzip)?16:0), 8, web_gzip_strategy) != Z_OK) { - error("%llu: Failed to initialize zlib. Proceeding without compression.", w->id); - return; - } - - w->response.zsent = 0; - w->response.zoutput = 1; - w->response.zinitialized = 1; - - debug(D_DEFLATE, "%llu: Initialized compression.", w->id); -} -#endif // NETDATA_WITH_ZLIB - -void buffer_data_options2string(BUFFER *wb, uint32_t options) { - int count = 0; - - if(options & RRDR_OPTION_NONZERO) { - if(count++) buffer_strcat(wb, " "); - buffer_strcat(wb, "nonzero"); - } - - if(options & RRDR_OPTION_REVERSED) { - if(count++) buffer_strcat(wb, " "); - buffer_strcat(wb, "flip"); - } - - if(options & RRDR_OPTION_JSON_WRAP) { - if(count++) buffer_strcat(wb, " "); - buffer_strcat(wb, "jsonwrap"); - } - - if(options & RRDR_OPTION_MIN2MAX) { - if(count++) buffer_strcat(wb, " "); - buffer_strcat(wb, "min2max"); - } - - if(options & RRDR_OPTION_MILLISECONDS) { - if(count++) buffer_strcat(wb, " "); - buffer_strcat(wb, "ms"); - } - - if(options & RRDR_OPTION_ABSOLUTE) { - if(count++) buffer_strcat(wb, " "); - buffer_strcat(wb, "absolute"); - } - - if(options & RRDR_OPTION_SECONDS) { - if(count++) buffer_strcat(wb, " "); - buffer_strcat(wb, "seconds"); - } - - if(options & RRDR_OPTION_NULL2ZERO) { - if(count++) buffer_strcat(wb, " "); - buffer_strcat(wb, "null2zero"); - } - - if(options & RRDR_OPTION_OBJECTSROWS) { - if(count++) buffer_strcat(wb, " "); - buffer_strcat(wb, "objectrows"); - } - - if(options & RRDR_OPTION_GOOGLE_JSON) { - if(count++) buffer_strcat(wb, " "); - buffer_strcat(wb, "google_json"); - } - - if(options & RRDR_OPTION_PERCENTAGE) { - if(count++) buffer_strcat(wb, " "); - buffer_strcat(wb, "percentage"); - } - - if(options & RRDR_OPTION_NOT_ALIGNED) { - if(count++) buffer_strcat(wb, " "); - buffer_strcat(wb, "unaligned"); - } -} - -const char *group_method2string(int group) { - switch(group) { - case GROUP_UNDEFINED: - return ""; - - case GROUP_AVERAGE: - return "average"; - - case GROUP_MIN: - return "min"; - - case GROUP_MAX: - return "max"; - - case GROUP_SUM: - return "sum"; - - case GROUP_INCREMENTAL_SUM: - return "incremental-sum"; - - default: - return "unknown-group-method"; - } -} - -static inline int check_host_and_call(RRDHOST *host, struct web_client *w, char *url, int (*func)(RRDHOST *, struct web_client *, char *)) { - if(unlikely(host->rrd_memory_mode == RRD_MEMORY_MODE_NONE)) { - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "This host does not maintain a database"); - return 400; - } - - return func(host, w, url); -} - -static inline int check_host_and_dashboard_acl_and_call(RRDHOST *host, struct web_client *w, char *url, int (*func)(RRDHOST *, struct web_client *, char *)) { - if(!web_client_can_access_dashboard(w)) - return web_client_permission_denied(w); - - return check_host_and_call(host, w, url, func); -} - -int web_client_api_request(RRDHOST *host, struct web_client *w, char *url) -{ - // get the api version - char *tok = mystrsep(&url, "/?&"); - if(tok && *tok) { - debug(D_WEB_CLIENT, "%llu: Searching for API version '%s'.", w->id, tok); - if(strcmp(tok, "v1") == 0) - return web_client_api_request_v1(host, w, url); - else { - buffer_flush(w->response.data); - w->response.data->contenttype = CT_TEXT_HTML; - buffer_strcat(w->response.data, "Unsupported API version: "); - buffer_strcat_htmlescape(w->response.data, tok); - return 404; - } - } - else { - buffer_flush(w->response.data); - buffer_sprintf(w->response.data, "Which API version?"); - return 400; - } -} - -const char *web_content_type_to_string(uint8_t contenttype) { - switch(contenttype) { - case CT_TEXT_HTML: - return "text/html; charset=utf-8"; - - case CT_APPLICATION_XML: - return "application/xml; charset=utf-8"; - - case CT_APPLICATION_JSON: - return "application/json; charset=utf-8"; - - case CT_APPLICATION_X_JAVASCRIPT: - return "application/x-javascript; charset=utf-8"; - - case CT_TEXT_CSS: - return "text/css; charset=utf-8"; - - case CT_TEXT_XML: - return "text/xml; charset=utf-8"; - - case CT_TEXT_XSL: - return "text/xsl; charset=utf-8"; - - case CT_APPLICATION_OCTET_STREAM: - return "application/octet-stream"; - - case CT_IMAGE_SVG_XML: - return "image/svg+xml"; - - case CT_APPLICATION_X_FONT_TRUETYPE: - return "application/x-font-truetype"; - - case CT_APPLICATION_X_FONT_OPENTYPE: - return "application/x-font-opentype"; - - case CT_APPLICATION_FONT_WOFF: - return "application/font-woff"; - - case CT_APPLICATION_FONT_WOFF2: - return "application/font-woff2"; - - case CT_APPLICATION_VND_MS_FONTOBJ: - return "application/vnd.ms-fontobject"; - - case CT_IMAGE_PNG: - return "image/png"; - - case CT_IMAGE_JPG: - return "image/jpeg"; - - case CT_IMAGE_GIF: - return "image/gif"; - - case CT_IMAGE_XICON: - return "image/x-icon"; - - case CT_IMAGE_BMP: - return "image/bmp"; - - case CT_IMAGE_ICNS: - return "image/icns"; - - case CT_PROMETHEUS: - return "text/plain; version=0.0.4"; - - default: - case CT_TEXT_PLAIN: - return "text/plain; charset=utf-8"; - } -} - - -const char *web_response_code_to_string(int code) { - switch(code) { - case 200: - return "OK"; - - case 307: - return "Temporary Redirect"; - - case 400: - return "Bad Request"; - - case 403: - return "Forbidden"; - - case 404: - return "Not Found"; - - case 412: - return "Preconditions Failed"; - - default: - if(code >= 100 && code < 200) - return "Informational"; - - if(code >= 200 && code < 300) - return "Successful"; - - if(code >= 300 && code < 400) - return "Redirection"; - - if(code >= 400 && code < 500) - return "Bad Request"; - - if(code >= 500 && code < 600) - return "Server Error"; - - return "Undefined Error"; - } -} - -static inline char *http_header_parse(struct web_client *w, char *s, int parse_useragent) { - static uint32_t hash_origin = 0, hash_connection = 0, hash_accept_encoding = 0, hash_donottrack = 0, hash_useragent = 0; - - if(unlikely(!hash_origin)) { - hash_origin = simple_uhash("Origin"); - hash_connection = simple_uhash("Connection"); - hash_accept_encoding = simple_uhash("Accept-Encoding"); - hash_donottrack = simple_uhash("DNT"); - hash_useragent = simple_uhash("User-Agent"); - } - - char *e = s; - - // find the : - while(*e && *e != ':') e++; - if(!*e) return e; - - // get the name - *e = '\0'; - - // find the value - char *v = e + 1, *ve; - - // skip leading spaces from value - while(*v == ' ') v++; - ve = v; - - // find the \r - while(*ve && *ve != '\r') ve++; - if(!*ve || ve[1] != '\n') { - *e = ':'; - return ve; - } - - // terminate the value - *ve = '\0'; - - // fprintf(stderr, "HEADER: '%s' = '%s'\n", s, v); - uint32_t hash = simple_uhash(s); - - if(hash == hash_origin && !strcasecmp(s, "Origin")) - strncpyz(w->origin, v, NETDATA_WEB_REQUEST_ORIGIN_HEADER_SIZE); - - else if(hash == hash_connection && !strcasecmp(s, "Connection")) { - if(strcasestr(v, "keep-alive")) - web_client_enable_keepalive(w); - } - else if(respect_web_browser_do_not_track_policy && hash == hash_donottrack && !strcasecmp(s, "DNT")) { - if(*v == '0') web_client_disable_donottrack(w); - else if(*v == '1') web_client_enable_donottrack(w); - } - else if(parse_useragent && hash == hash_useragent && !strcasecmp(s, "User-Agent")) { - w->user_agent = strdupz(v); - } -#ifdef NETDATA_WITH_ZLIB - else if(hash == hash_accept_encoding && !strcasecmp(s, "Accept-Encoding")) { - if(web_enable_gzip) { - if(strcasestr(v, "gzip")) - web_client_enable_deflate(w, 1); - // - // does not seem to work - // else if(strcasestr(v, "deflate")) - // web_client_enable_deflate(w, 0); - } - } -#endif /* NETDATA_WITH_ZLIB */ - - *e = ':'; - *ve = '\r'; - return ve; -} - -// http_request_validate() -// returns: -// = 0 : all good, process the request -// > 0 : request is not supported -// < 0 : request is incomplete - wait for more data - -typedef enum { - HTTP_VALIDATION_OK, - HTTP_VALIDATION_NOT_SUPPORTED, - HTTP_VALIDATION_INCOMPLETE -} HTTP_VALIDATION; - -static inline HTTP_VALIDATION http_request_validate(struct web_client *w) { - char *s = (char *)buffer_tostring(w->response.data), *encoded_url = NULL; - - size_t last_pos = w->header_parse_last_size; - if(last_pos > 4) last_pos -= 4; // allow searching for \r\n\r\n - else last_pos = 0; - - w->header_parse_tries++; - w->header_parse_last_size = buffer_strlen(w->response.data); - - if(w->header_parse_tries > 1) { - if(w->header_parse_last_size < last_pos) - last_pos = 0; - - if(strstr(&s[last_pos], "\r\n\r\n") == NULL) { - if(w->header_parse_tries > 10) { - info("Disabling slow client after %zu attempts to read the request (%zu bytes received)", w->header_parse_tries, buffer_strlen(w->response.data)); - w->header_parse_tries = 0; - w->header_parse_last_size = 0; - web_client_disable_wait_receive(w); - return HTTP_VALIDATION_NOT_SUPPORTED; - } - - return HTTP_VALIDATION_INCOMPLETE; - } - } - - // is is a valid request? - if(!strncmp(s, "GET ", 4)) { - encoded_url = s = &s[4]; - w->mode = WEB_CLIENT_MODE_NORMAL; - } - else if(!strncmp(s, "OPTIONS ", 8)) { - encoded_url = s = &s[8]; - w->mode = WEB_CLIENT_MODE_OPTIONS; - } - else if(!strncmp(s, "STREAM ", 7)) { - encoded_url = s = &s[7]; - w->mode = WEB_CLIENT_MODE_STREAM; - } - else { - w->header_parse_tries = 0; - w->header_parse_last_size = 0; - web_client_disable_wait_receive(w); - return HTTP_VALIDATION_NOT_SUPPORTED; - } - - // find the SPACE + "HTTP/" - while(*s) { - // find the next space - while (*s && *s != ' ') s++; - - // is it SPACE + "HTTP/" ? - if(*s && !strncmp(s, " HTTP/", 6)) break; - else s++; - } - - // incomplete requests - if(unlikely(!*s)) { - web_client_enable_wait_receive(w); - return HTTP_VALIDATION_INCOMPLETE; - } - - // we have the end of encoded_url - remember it - char *ue = s; - - // make sure we have complete request - // complete requests contain: \r\n\r\n - while(*s) { - // find a line feed - while(*s && *s++ != '\r'); - - // did we reach the end? - if(unlikely(!*s)) break; - - // is it \r\n ? - if(likely(*s++ == '\n')) { - - // is it again \r\n ? (header end) - if(unlikely(*s == '\r' && s[1] == '\n')) { - // a valid complete HTTP request found - - *ue = '\0'; - url_decode_r(w->decoded_url, encoded_url, NETDATA_WEB_REQUEST_URL_SIZE + 1); - *ue = ' '; - - // copy the URL - we are going to overwrite parts of it - // FIXME -- we should avoid it - strncpyz(w->last_url, w->decoded_url, NETDATA_WEB_REQUEST_URL_SIZE); - - w->header_parse_tries = 0; - w->header_parse_last_size = 0; - web_client_disable_wait_receive(w); - return HTTP_VALIDATION_OK; - } - - // another header line - s = http_header_parse(w, s, - (w->mode == WEB_CLIENT_MODE_STREAM) // parse user agent - ); - } - } - - // incomplete request - web_client_enable_wait_receive(w); - return HTTP_VALIDATION_INCOMPLETE; -} - -static inline void web_client_send_http_header(struct web_client *w) { - if(unlikely(w->response.code != 200)) - buffer_no_cacheable(w->response.data); - - // set a proper expiration date, if not already set - if(unlikely(!w->response.data->expires)) { - if(w->response.data->options & WB_CONTENT_NO_CACHEABLE) - w->response.data->expires = w->tv_ready.tv_sec + localhost->rrd_update_every; - else - w->response.data->expires = w->tv_ready.tv_sec + 86400; - } - - // prepare the HTTP response header - debug(D_WEB_CLIENT, "%llu: Generating HTTP header with response %d.", w->id, w->response.code); - - const char *content_type_string = web_content_type_to_string(w->response.data->contenttype); - const char *code_msg = web_response_code_to_string(w->response.code); - - // prepare the last modified and expiration dates - char date[32], edate[32]; - { - struct tm tmbuf, *tm; - - tm = gmtime_r(&w->response.data->date, &tmbuf); - strftime(date, sizeof(date), "%a, %d %b %Y %H:%M:%S %Z", tm); - - tm = gmtime_r(&w->response.data->expires, &tmbuf); - strftime(edate, sizeof(edate), "%a, %d %b %Y %H:%M:%S %Z", tm); - } - - buffer_sprintf(w->response.header_output, - "HTTP/1.1 %d %s\r\n" - "Connection: %s\r\n" - "Server: NetData Embedded HTTP Server v%s\r\n" - "Access-Control-Allow-Origin: %s\r\n" - "Access-Control-Allow-Credentials: true\r\n" - "Content-Type: %s\r\n" - "Date: %s\r\n" - , w->response.code, code_msg - , web_client_has_keepalive(w)?"keep-alive":"close" - , VERSION - , w->origin - , content_type_string - , date - ); - - if(unlikely(web_x_frame_options)) - buffer_sprintf(w->response.header_output, "X-Frame-Options: %s\r\n", web_x_frame_options); - - if(w->cookie1[0] || w->cookie2[0]) { - if(w->cookie1[0]) { - buffer_sprintf(w->response.header_output, - "Set-Cookie: %s\r\n", - w->cookie1); - } - - if(w->cookie2[0]) { - buffer_sprintf(w->response.header_output, - "Set-Cookie: %s\r\n", - w->cookie2); - } - - if(respect_web_browser_do_not_track_policy) - buffer_sprintf(w->response.header_output, - "Tk: T;cookies\r\n"); - } - else { - if(respect_web_browser_do_not_track_policy) { - if(web_client_has_tracking_required(w)) - buffer_sprintf(w->response.header_output, - "Tk: T;cookies\r\n"); - else - buffer_sprintf(w->response.header_output, - "Tk: N\r\n"); - } - } - - if(w->mode == WEB_CLIENT_MODE_OPTIONS) { - buffer_strcat(w->response.header_output, - "Access-Control-Allow-Methods: GET, OPTIONS\r\n" - "Access-Control-Allow-Headers: accept, x-requested-with, origin, content-type, cookie, pragma, cache-control\r\n" - "Access-Control-Max-Age: 1209600\r\n" // 86400 * 14 - ); - } - else { - buffer_sprintf(w->response.header_output, - "Cache-Control: %s\r\n" - "Expires: %s\r\n", - (w->response.data->options & WB_CONTENT_NO_CACHEABLE)?"no-cache":"public", - edate); - } - - // copy a possibly available custom header - if(unlikely(buffer_strlen(w->response.header))) - buffer_strcat(w->response.header_output, buffer_tostring(w->response.header)); - - // headers related to the transfer method - if(likely(w->response.zoutput)) { - buffer_strcat(w->response.header_output, - "Content-Encoding: gzip\r\n" - "Transfer-Encoding: chunked\r\n" - ); - } - else { - if(likely((w->response.data->len || w->response.rlen))) { - // we know the content length, put it - buffer_sprintf(w->response.header_output, "Content-Length: %zu\r\n", w->response.data->len? w->response.data->len: w->response.rlen); - } - else { - // we don't know the content length, disable keep-alive - web_client_disable_keepalive(w); - } - } - - // end of HTTP header - buffer_strcat(w->response.header_output, "\r\n"); - - // sent the HTTP header - debug(D_WEB_DATA, "%llu: Sending response HTTP header of size %zu: '%s'" - , w->id - , buffer_strlen(w->response.header_output) - , buffer_tostring(w->response.header_output) - ); - - web_client_crock_socket(w); - - size_t count = 0; - ssize_t bytes; - while((bytes = send(w->ofd, buffer_tostring(w->response.header_output), buffer_strlen(w->response.header_output), 0)) == -1) { - count++; - - if(count > 100 || (errno != EAGAIN && errno != EWOULDBLOCK)) { - error("Cannot send HTTP headers to web client."); - break; - } - } - - if(bytes != (ssize_t) buffer_strlen(w->response.header_output)) { - if(bytes > 0) - w->stats_sent_bytes += bytes; - - error("HTTP headers failed to be sent (I sent %zu bytes but the system sent %zd bytes). Closing web client." - , buffer_strlen(w->response.header_output) - , bytes); - - WEB_CLIENT_IS_DEAD(w); - return; - } - else - w->stats_sent_bytes += bytes; -} - -static inline int web_client_process_url(RRDHOST *host, struct web_client *w, char *url); - -static inline int web_client_switch_host(RRDHOST *host, struct web_client *w, char *url) { - static uint32_t hash_localhost = 0; - - if(unlikely(!hash_localhost)) { - hash_localhost = simple_hash("localhost"); - } - - if(host != localhost) { - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "Nesting of hosts is not allowed."); - return 400; - } - - char *tok = mystrsep(&url, "/?&"); - if(tok && *tok) { - debug(D_WEB_CLIENT, "%llu: Searching for host with name '%s'.", w->id, tok); - - // copy the URL, we need it to serve files - w->last_url[0] = '/'; - if(url && *url) strncpyz(&w->last_url[1], url, NETDATA_WEB_REQUEST_URL_SIZE - 1); - else w->last_url[1] = '\0'; - - uint32_t hash = simple_hash(tok); - - host = rrdhost_find_by_hostname(tok, hash); - if(!host) host = rrdhost_find_by_guid(tok, hash); - - if(host) return web_client_process_url(host, w, url); - } - - buffer_flush(w->response.data); - w->response.data->contenttype = CT_TEXT_HTML; - buffer_strcat(w->response.data, "This netdata does not maintain a database for host: "); - buffer_strcat_htmlescape(w->response.data, tok?tok:""); - return 404; -} - -static inline int web_client_process_url(RRDHOST *host, struct web_client *w, char *url) { - static uint32_t - hash_api = 0, - hash_netdata_conf = 0, - hash_data = 0, - hash_datasource = 0, - hash_graph = 0, - hash_list = 0, - hash_all_json = 0, - hash_host = 0; - -#ifdef NETDATA_INTERNAL_CHECKS - static uint32_t hash_exit = 0, hash_debug = 0, hash_mirror = 0; -#endif - - if(unlikely(!hash_api)) { - hash_api = simple_hash("api"); - hash_netdata_conf = simple_hash("netdata.conf"); - hash_data = simple_hash(WEB_PATH_DATA); - hash_datasource = simple_hash(WEB_PATH_DATASOURCE); - hash_graph = simple_hash(WEB_PATH_GRAPH); - hash_list = simple_hash("list"); - hash_all_json = simple_hash("all.json"); - hash_host = simple_hash("host"); -#ifdef NETDATA_INTERNAL_CHECKS - hash_exit = simple_hash("exit"); - hash_debug = simple_hash("debug"); - hash_mirror = simple_hash("mirror"); -#endif - } - - char *tok = mystrsep(&url, "/?"); - if(likely(tok && *tok)) { - uint32_t hash = simple_hash(tok); - debug(D_WEB_CLIENT, "%llu: Processing command '%s'.", w->id, tok); - - if(unlikely(hash == hash_api && strcmp(tok, "api") == 0)) { // current API - debug(D_WEB_CLIENT_ACCESS, "%llu: API request ...", w->id); - return check_host_and_call(host, w, url, web_client_api_request); - } - else if(unlikely(hash == hash_host && strcmp(tok, "host") == 0)) { // host switching - debug(D_WEB_CLIENT_ACCESS, "%llu: host switch request ...", w->id); - return web_client_switch_host(host, w, url); - } - else if(unlikely(hash == hash_data && strcmp(tok, WEB_PATH_DATA) == 0)) { // old API "data" - debug(D_WEB_CLIENT_ACCESS, "%llu: old API data request...", w->id); - return check_host_and_dashboard_acl_and_call(host, w, url, web_client_api_old_data_request_json); - } - else if(unlikely(hash == hash_datasource && strcmp(tok, WEB_PATH_DATASOURCE) == 0)) { // old API "datasource" - debug(D_WEB_CLIENT_ACCESS, "%llu: old API datasource request...", w->id); - return check_host_and_dashboard_acl_and_call(host, w, url, web_client_api_old_data_request_jsonp); - } - else if(unlikely(hash == hash_graph && strcmp(tok, WEB_PATH_GRAPH) == 0)) { // old API "graph" - debug(D_WEB_CLIENT_ACCESS, "%llu: old API graph request...", w->id); - return check_host_and_dashboard_acl_and_call(host, w, url, web_client_api_old_graph_request); - } - else if(unlikely(hash == hash_list && strcmp(tok, "list") == 0)) { // old API "list" - debug(D_WEB_CLIENT_ACCESS, "%llu: old API list request...", w->id); - return check_host_and_dashboard_acl_and_call(host, w, url, web_client_api_old_list_request); - } - else if(unlikely(hash == hash_all_json && strcmp(tok, "all.json") == 0)) { // old API "all.json" - debug(D_WEB_CLIENT_ACCESS, "%llu: old API all.json request...", w->id); - return check_host_and_dashboard_acl_and_call(host, w, url, web_client_api_old_all_json); - } - else if(unlikely(hash == hash_netdata_conf && strcmp(tok, "netdata.conf") == 0)) { // netdata.conf - if(unlikely(!web_client_can_access_netdataconf(w))) - return web_client_permission_denied(w); - - debug(D_WEB_CLIENT_ACCESS, "%llu: generating netdata.conf ...", w->id); - w->response.data->contenttype = CT_TEXT_PLAIN; - buffer_flush(w->response.data); - config_generate(w->response.data, 0); - return 200; - } -#ifdef NETDATA_INTERNAL_CHECKS - else if(unlikely(hash == hash_exit && strcmp(tok, "exit") == 0)) { - if(unlikely(!web_client_can_access_netdataconf(w))) - return web_client_permission_denied(w); - - w->response.data->contenttype = CT_TEXT_PLAIN; - buffer_flush(w->response.data); - - if(!netdata_exit) - buffer_strcat(w->response.data, "ok, will do..."); - else - buffer_strcat(w->response.data, "I am doing it already"); - - error("web request to exit received."); - netdata_cleanup_and_exit(0); - return 200; - } - else if(unlikely(hash == hash_debug && strcmp(tok, "debug") == 0)) { - if(unlikely(!web_client_can_access_netdataconf(w))) - return web_client_permission_denied(w); - - buffer_flush(w->response.data); - - // get the name of the data to show - tok = mystrsep(&url, "/?&"); - if(tok && *tok) { - debug(D_WEB_CLIENT, "%llu: Searching for RRD data with name '%s'.", w->id, tok); - - // do we have such a data set? - RRDSET *st = rrdset_find_byname(host, tok); - if(!st) st = rrdset_find(host, tok); - if(!st) { - w->response.data->contenttype = CT_TEXT_HTML; - buffer_strcat(w->response.data, "Chart is not found: "); - buffer_strcat_htmlescape(w->response.data, tok); - debug(D_WEB_CLIENT_ACCESS, "%llu: %s is not found.", w->id, tok); - return 404; - } - - debug_flags |= D_RRD_STATS; - - if(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)) - rrdset_flag_clear(st, RRDSET_FLAG_DEBUG); - else - rrdset_flag_set(st, RRDSET_FLAG_DEBUG); - - w->response.data->contenttype = CT_TEXT_HTML; - buffer_sprintf(w->response.data, "Chart has now debug %s: ", rrdset_flag_check(st, RRDSET_FLAG_DEBUG)?"enabled":"disabled"); - buffer_strcat_htmlescape(w->response.data, tok); - debug(D_WEB_CLIENT_ACCESS, "%llu: debug for %s is %s.", w->id, tok, rrdset_flag_check(st, RRDSET_FLAG_DEBUG)?"enabled":"disabled"); - return 200; - } - - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "debug which chart?\r\n"); - return 400; - } - else if(unlikely(hash == hash_mirror && strcmp(tok, "mirror") == 0)) { - if(unlikely(!web_client_can_access_netdataconf(w))) - return web_client_permission_denied(w); - - debug(D_WEB_CLIENT_ACCESS, "%llu: Mirroring...", w->id); - - // replace the zero bytes with spaces - buffer_char_replace(w->response.data, '\0', ' '); - - // just leave the buffer as is - // it will be copied back to the client - - return 200; - } -#endif /* NETDATA_INTERNAL_CHECKS */ - } - - char filename[FILENAME_MAX+1]; - url = filename; - strncpyz(filename, w->last_url, FILENAME_MAX); - tok = mystrsep(&url, "?"); - buffer_flush(w->response.data); - return mysendfile(w, (tok && *tok)?tok:"/"); -} - -void web_client_process_request(struct web_client *w) { - - // start timing us - now_realtime_timeval(&w->tv_in); - - switch(http_request_validate(w)) { - case HTTP_VALIDATION_OK: - switch(w->mode) { - case WEB_CLIENT_MODE_STREAM: - if(unlikely(!web_client_can_access_stream(w))) { - web_client_permission_denied(w); - return; - } - - w->response.code = rrdpush_receiver_thread_spawn(localhost, w, w->decoded_url); - return; - - case WEB_CLIENT_MODE_OPTIONS: - if(unlikely(!web_client_can_access_dashboard(w) && !web_client_can_access_registry(w) && !web_client_can_access_badges(w))) { - web_client_permission_denied(w); - return; - } - - w->response.data->contenttype = CT_TEXT_PLAIN; - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "OK"); - w->response.code = 200; - break; - - case WEB_CLIENT_MODE_FILECOPY: - case WEB_CLIENT_MODE_NORMAL: - if(unlikely(!web_client_can_access_dashboard(w) && !web_client_can_access_registry(w) && !web_client_can_access_badges(w))) { - web_client_permission_denied(w); - return; - } - - w->response.code = web_client_process_url(localhost, w, w->decoded_url); - break; - } - break; - - case HTTP_VALIDATION_INCOMPLETE: - if(w->response.data->len > NETDATA_WEB_REQUEST_MAX_SIZE) { - strcpy(w->last_url, "too big request"); - - debug(D_WEB_CLIENT_ACCESS, "%llu: Received request is too big (%zu bytes).", w->id, w->response.data->len); - - buffer_flush(w->response.data); - buffer_sprintf(w->response.data, "Received request is too big (%zu bytes).\r\n", w->response.data->len); - w->response.code = 400; - } - else { - // wait for more data - return; - } - break; - - case HTTP_VALIDATION_NOT_SUPPORTED: - debug(D_WEB_CLIENT_ACCESS, "%llu: Cannot understand '%s'.", w->id, w->response.data->buffer); - - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "I don't understand you...\r\n"); - w->response.code = 400; - break; - } - - // keep track of the time we done processing - now_realtime_timeval(&w->tv_ready); - - w->response.sent = 0; - - // set a proper last modified date - if(unlikely(!w->response.data->date)) - w->response.data->date = w->tv_ready.tv_sec; - - web_client_send_http_header(w); - - // enable sending immediately if we have data - if(w->response.data->len) web_client_enable_wait_send(w); - else web_client_disable_wait_send(w); - - switch(w->mode) { - case WEB_CLIENT_MODE_STREAM: - debug(D_WEB_CLIENT, "%llu: STREAM done.", w->id); - break; - - case WEB_CLIENT_MODE_OPTIONS: - debug(D_WEB_CLIENT, "%llu: Done preparing the OPTIONS response. Sending data (%zu bytes) to client.", w->id, w->response.data->len); - break; - - case WEB_CLIENT_MODE_NORMAL: - debug(D_WEB_CLIENT, "%llu: Done preparing the response. Sending data (%zu bytes) to client.", w->id, w->response.data->len); - break; - - case WEB_CLIENT_MODE_FILECOPY: - if(w->response.rlen) { - debug(D_WEB_CLIENT, "%llu: Done preparing the response. Will be sending data file of %zu bytes to client.", w->id, w->response.rlen); - web_client_enable_wait_receive(w); - - /* - // utilize the kernel sendfile() for copying the file to the socket. - // this block of code can be commented, without anything missing. - // when it is commented, the program will copy the data using async I/O. - { - long len = sendfile(w->ofd, w->ifd, NULL, w->response.data->rbytes); - if(len != w->response.data->rbytes) - error("%llu: sendfile() should copy %ld bytes, but copied %ld. Falling back to manual copy.", w->id, w->response.data->rbytes, len); - else - web_client_request_done(w); - } - */ - } - else - debug(D_WEB_CLIENT, "%llu: Done preparing the response. Will be sending an unknown amount of bytes to client.", w->id); - break; - - default: - fatal("%llu: Unknown client mode %u.", w->id, w->mode); - break; - } -} - -ssize_t web_client_send_chunk_header(struct web_client *w, size_t len) -{ - debug(D_DEFLATE, "%llu: OPEN CHUNK of %zu bytes (hex: %zx).", w->id, len, len); - char buf[24]; - sprintf(buf, "%zX\r\n", len); - - ssize_t bytes = send(w->ofd, buf, strlen(buf), 0); - if(bytes > 0) { - debug(D_DEFLATE, "%llu: Sent chunk header %zd bytes.", w->id, bytes); - w->stats_sent_bytes += bytes; - } - - else if(bytes == 0) { - debug(D_WEB_CLIENT, "%llu: Did not send chunk header to the client.", w->id); - WEB_CLIENT_IS_DEAD(w); - } - else { - debug(D_WEB_CLIENT, "%llu: Failed to send chunk header to client.", w->id); - WEB_CLIENT_IS_DEAD(w); - } - - return bytes; -} - -ssize_t web_client_send_chunk_close(struct web_client *w) -{ - //debug(D_DEFLATE, "%llu: CLOSE CHUNK.", w->id); - - ssize_t bytes = send(w->ofd, "\r\n", 2, 0); - if(bytes > 0) { - debug(D_DEFLATE, "%llu: Sent chunk suffix %zd bytes.", w->id, bytes); - w->stats_sent_bytes += bytes; - } - - else if(bytes == 0) { - debug(D_WEB_CLIENT, "%llu: Did not send chunk suffix to the client.", w->id); - WEB_CLIENT_IS_DEAD(w); - } - else { - debug(D_WEB_CLIENT, "%llu: Failed to send chunk suffix to client.", w->id); - WEB_CLIENT_IS_DEAD(w); - } - - return bytes; -} - -ssize_t web_client_send_chunk_finalize(struct web_client *w) -{ - //debug(D_DEFLATE, "%llu: FINALIZE CHUNK.", w->id); - - ssize_t bytes = send(w->ofd, "\r\n0\r\n\r\n", 7, 0); - if(bytes > 0) { - debug(D_DEFLATE, "%llu: Sent chunk suffix %zd bytes.", w->id, bytes); - w->stats_sent_bytes += bytes; - } - - else if(bytes == 0) { - debug(D_WEB_CLIENT, "%llu: Did not send chunk finalize suffix to the client.", w->id); - WEB_CLIENT_IS_DEAD(w); - } - else { - debug(D_WEB_CLIENT, "%llu: Failed to send chunk finalize suffix to client.", w->id); - WEB_CLIENT_IS_DEAD(w); - } - - return bytes; -} - -#ifdef NETDATA_WITH_ZLIB -ssize_t web_client_send_deflate(struct web_client *w) -{ - ssize_t len = 0, t = 0; - - // when using compression, - // w->response.sent is the amount of bytes passed through compression - - debug(D_DEFLATE, "%llu: web_client_send_deflate(): w->response.data->len = %zu, w->response.sent = %zu, w->response.zhave = %zu, w->response.zsent = %zu, w->response.zstream.avail_in = %u, w->response.zstream.avail_out = %u, w->response.zstream.total_in = %lu, w->response.zstream.total_out = %lu.", - w->id, w->response.data->len, w->response.sent, w->response.zhave, w->response.zsent, w->response.zstream.avail_in, w->response.zstream.avail_out, w->response.zstream.total_in, w->response.zstream.total_out); - - if(w->response.data->len - w->response.sent == 0 && w->response.zstream.avail_in == 0 && w->response.zhave == w->response.zsent && w->response.zstream.avail_out != 0) { - // there is nothing to send - - debug(D_WEB_CLIENT, "%llu: Out of output data.", w->id); - - // finalize the chunk - if(w->response.sent != 0) { - t = web_client_send_chunk_finalize(w); - if(t < 0) return t; - } - - if(w->mode == WEB_CLIENT_MODE_FILECOPY && web_client_has_wait_receive(w) && w->response.rlen && w->response.rlen > w->response.data->len) { - // we have to wait, more data will come - debug(D_WEB_CLIENT, "%llu: Waiting for more data to become available.", w->id); - web_client_disable_wait_send(w); - return t; - } - - if(unlikely(!web_client_has_keepalive(w))) { - debug(D_WEB_CLIENT, "%llu: Closing (keep-alive is not enabled). %zu bytes sent.", w->id, w->response.sent); - WEB_CLIENT_IS_DEAD(w); - return t; - } - - // reset the client - web_client_request_done(w); - debug(D_WEB_CLIENT, "%llu: Done sending all data on socket.", w->id); - return t; - } - - if(w->response.zhave == w->response.zsent) { - // compress more input data - - // close the previous open chunk - if(w->response.sent != 0) { - t = web_client_send_chunk_close(w); - if(t < 0) return t; - } - - debug(D_DEFLATE, "%llu: Compressing %zu new bytes starting from %zu (and %u left behind).", w->id, (w->response.data->len - w->response.sent), w->response.sent, w->response.zstream.avail_in); - - // give the compressor all the data not passed through the compressor yet - if(w->response.data->len > w->response.sent) { - w->response.zstream.next_in = (Bytef *)&w->response.data->buffer[w->response.sent - w->response.zstream.avail_in]; - w->response.zstream.avail_in += (uInt) (w->response.data->len - w->response.sent); - } - - // reset the compressor output buffer - w->response.zstream.next_out = w->response.zbuffer; - w->response.zstream.avail_out = NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE; - - // ask for FINISH if we have all the input - int flush = Z_SYNC_FLUSH; - if(w->mode == WEB_CLIENT_MODE_NORMAL - || (w->mode == WEB_CLIENT_MODE_FILECOPY && !web_client_has_wait_receive(w) && w->response.data->len == w->response.rlen)) { - flush = Z_FINISH; - debug(D_DEFLATE, "%llu: Requesting Z_FINISH, if possible.", w->id); - } - else { - debug(D_DEFLATE, "%llu: Requesting Z_SYNC_FLUSH.", w->id); - } - - // compress - if(deflate(&w->response.zstream, flush) == Z_STREAM_ERROR) { - error("%llu: Compression failed. Closing down client.", w->id); - web_client_request_done(w); - return(-1); - } - - w->response.zhave = NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE - w->response.zstream.avail_out; - w->response.zsent = 0; - - // keep track of the bytes passed through the compressor - w->response.sent = w->response.data->len; - - debug(D_DEFLATE, "%llu: Compression produced %zu bytes.", w->id, w->response.zhave); - - // open a new chunk - ssize_t t2 = web_client_send_chunk_header(w, w->response.zhave); - if(t2 < 0) return t2; - t += t2; - } - - debug(D_WEB_CLIENT, "%llu: Sending %zu bytes of data (+%zd of chunk header).", w->id, w->response.zhave - w->response.zsent, t); - - len = send(w->ofd, &w->response.zbuffer[w->response.zsent], (size_t) (w->response.zhave - w->response.zsent), MSG_DONTWAIT); - if(len > 0) { - w->stats_sent_bytes += len; - w->response.zsent += len; - len += t; - debug(D_WEB_CLIENT, "%llu: Sent %zd bytes.", w->id, len); - } - else if(len == 0) { - debug(D_WEB_CLIENT, "%llu: Did not send any bytes to the client (zhave = %zu, zsent = %zu, need to send = %zu).", - w->id, w->response.zhave, w->response.zsent, w->response.zhave - w->response.zsent); - - WEB_CLIENT_IS_DEAD(w); - } - else { - debug(D_WEB_CLIENT, "%llu: Failed to send data to client.", w->id); - WEB_CLIENT_IS_DEAD(w); - } - - return(len); -} -#endif // NETDATA_WITH_ZLIB - -ssize_t web_client_send(struct web_client *w) { -#ifdef NETDATA_WITH_ZLIB - if(likely(w->response.zoutput)) return web_client_send_deflate(w); -#endif // NETDATA_WITH_ZLIB - - ssize_t bytes; - - if(unlikely(w->response.data->len - w->response.sent == 0)) { - // there is nothing to send - - debug(D_WEB_CLIENT, "%llu: Out of output data.", w->id); - - // there can be two cases for this - // A. we have done everything - // B. we temporarily have nothing to send, waiting for the buffer to be filled by ifd - - if(w->mode == WEB_CLIENT_MODE_FILECOPY && web_client_has_wait_receive(w) && w->response.rlen && w->response.rlen > w->response.data->len) { - // we have to wait, more data will come - debug(D_WEB_CLIENT, "%llu: Waiting for more data to become available.", w->id); - web_client_disable_wait_send(w); - return 0; - } - - if(unlikely(!web_client_has_keepalive(w))) { - debug(D_WEB_CLIENT, "%llu: Closing (keep-alive is not enabled). %zu bytes sent.", w->id, w->response.sent); - WEB_CLIENT_IS_DEAD(w); - return 0; - } - - web_client_request_done(w); - debug(D_WEB_CLIENT, "%llu: Done sending all data on socket. Waiting for next request on the same socket.", w->id); - return 0; - } - - bytes = send(w->ofd, &w->response.data->buffer[w->response.sent], w->response.data->len - w->response.sent, MSG_DONTWAIT); - if(likely(bytes > 0)) { - w->stats_sent_bytes += bytes; - w->response.sent += bytes; - debug(D_WEB_CLIENT, "%llu: Sent %zd bytes.", w->id, bytes); - } - else if(likely(bytes == 0)) { - debug(D_WEB_CLIENT, "%llu: Did not send any bytes to the client.", w->id); - WEB_CLIENT_IS_DEAD(w); - } - else { - debug(D_WEB_CLIENT, "%llu: Failed to send data to client.", w->id); - WEB_CLIENT_IS_DEAD(w); - } - - return(bytes); -} - -ssize_t web_client_read_file(struct web_client *w) -{ - if(unlikely(w->response.rlen > w->response.data->size)) - buffer_need_bytes(w->response.data, w->response.rlen - w->response.data->size); - - if(unlikely(w->response.rlen <= w->response.data->len)) - return 0; - - ssize_t left = w->response.rlen - w->response.data->len; - ssize_t bytes = read(w->ifd, &w->response.data->buffer[w->response.data->len], (size_t)left); - if(likely(bytes > 0)) { - size_t old = w->response.data->len; - w->response.data->len += bytes; - w->response.data->buffer[w->response.data->len] = '\0'; - - debug(D_WEB_CLIENT, "%llu: Read %zd bytes.", w->id, bytes); - debug(D_WEB_DATA, "%llu: Read data: '%s'.", w->id, &w->response.data->buffer[old]); - - web_client_enable_wait_send(w); - - if(w->response.rlen && w->response.data->len >= w->response.rlen) - web_client_disable_wait_receive(w); - } - else if(likely(bytes == 0)) { - debug(D_WEB_CLIENT, "%llu: Out of input file data.", w->id); - - // if we cannot read, it means we have an error on input. - // if however, we are copying a file from ifd to ofd, we should not return an error. - // in this case, the error should be generated when the file has been sent to the client. - - // we are copying data from ifd to ofd - // let it finish copying... - web_client_disable_wait_receive(w); - - debug(D_WEB_CLIENT, "%llu: Read the whole file.", w->id); - - if(web_server_mode != WEB_SERVER_MODE_STATIC_THREADED) { - if (w->ifd != w->ofd) close(w->ifd); - } - - w->ifd = w->ofd; - } - else { - debug(D_WEB_CLIENT, "%llu: read data failed.", w->id); - WEB_CLIENT_IS_DEAD(w); - } - - return(bytes); -} - -ssize_t web_client_receive(struct web_client *w) -{ - if(unlikely(w->mode == WEB_CLIENT_MODE_FILECOPY)) - return web_client_read_file(w); - - // do we have any space for more data? - buffer_need_bytes(w->response.data, NETDATA_WEB_REQUEST_RECEIVE_SIZE); - - ssize_t left = w->response.data->size - w->response.data->len; - ssize_t bytes = recv(w->ifd, &w->response.data->buffer[w->response.data->len], (size_t) (left - 1), MSG_DONTWAIT); - - if(likely(bytes > 0)) { - w->stats_received_bytes += bytes; - - size_t old = w->response.data->len; - w->response.data->len += bytes; - w->response.data->buffer[w->response.data->len] = '\0'; - - debug(D_WEB_CLIENT, "%llu: Received %zd bytes.", w->id, bytes); - debug(D_WEB_DATA, "%llu: Received data: '%s'.", w->id, &w->response.data->buffer[old]); - } - else { - debug(D_WEB_CLIENT, "%llu: receive data failed.", w->id); - WEB_CLIENT_IS_DEAD(w); - } - - return(bytes); -} diff --git a/src/web_client.h b/src/web_client.h deleted file mode 100644 index b495c37e1..000000000 --- a/src/web_client.h +++ /dev/null @@ -1,189 +0,0 @@ -#ifndef NETDATA_WEB_CLIENT_H -#define NETDATA_WEB_CLIENT_H 1 - -#ifdef NETDATA_WITH_ZLIB -extern int web_enable_gzip, - web_gzip_level, - web_gzip_strategy; -#endif /* NETDATA_WITH_ZLIB */ - -extern int respect_web_browser_do_not_track_policy; -extern char *web_x_frame_options; - -typedef enum web_client_mode { - WEB_CLIENT_MODE_NORMAL = 0, - WEB_CLIENT_MODE_FILECOPY = 1, - WEB_CLIENT_MODE_OPTIONS = 2, - WEB_CLIENT_MODE_STREAM = 3 -} WEB_CLIENT_MODE; - -typedef enum web_client_flags { - WEB_CLIENT_FLAG_DEAD = 1 << 1, // if set, this client is dead - - WEB_CLIENT_FLAG_KEEPALIVE = 1 << 2, // if set, the web client will be re-used - - WEB_CLIENT_FLAG_WAIT_RECEIVE = 1 << 3, // if set, we are waiting more input data - WEB_CLIENT_FLAG_WAIT_SEND = 1 << 4, // if set, we have data to send to the client - - WEB_CLIENT_FLAG_DO_NOT_TRACK = 1 << 5, // if set, we should not set cookies on this client - WEB_CLIENT_FLAG_TRACKING_REQUIRED = 1 << 6, // if set, we need to send cookies - - WEB_CLIENT_FLAG_TCP_CLIENT = 1 << 7, // if set, the client is using a TCP socket - WEB_CLIENT_FLAG_UNIX_CLIENT = 1 << 8, // if set, the client is using a UNIX socket - - WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET = 1 << 9, // don't close the socket when cleaning up (static-threaded web server) -} WEB_CLIENT_FLAGS; - -//#ifdef HAVE_C___ATOMIC -//#define web_client_flag_check(w, flag) (__atomic_load_n(&((w)->flags), __ATOMIC_SEQ_CST) & flag) -//#define web_client_flag_set(w, flag) __atomic_or_fetch(&((w)->flags), flag, __ATOMIC_SEQ_CST) -//#define web_client_flag_clear(w, flag) __atomic_and_fetch(&((w)->flags), ~flag, __ATOMIC_SEQ_CST) -//#else -#define web_client_flag_check(w, flag) ((w)->flags & (flag)) -#define web_client_flag_set(w, flag) (w)->flags |= flag -#define web_client_flag_clear(w, flag) (w)->flags &= ~flag -//#endif - -#define WEB_CLIENT_IS_DEAD(w) web_client_flag_set(w, WEB_CLIENT_FLAG_DEAD) -#define web_client_check_dead(w) web_client_flag_check(w, WEB_CLIENT_FLAG_DEAD) - -#define web_client_has_keepalive(w) web_client_flag_check(w, WEB_CLIENT_FLAG_KEEPALIVE) -#define web_client_enable_keepalive(w) web_client_flag_set(w, WEB_CLIENT_FLAG_KEEPALIVE) -#define web_client_disable_keepalive(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_KEEPALIVE) - -#define web_client_has_donottrack(w) web_client_flag_check(w, WEB_CLIENT_FLAG_DO_NOT_TRACK) -#define web_client_enable_donottrack(w) web_client_flag_set(w, WEB_CLIENT_FLAG_DO_NOT_TRACK) -#define web_client_disable_donottrack(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_DO_NOT_TRACK) - -#define web_client_has_tracking_required(w) web_client_flag_check(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED) -#define web_client_enable_tracking_required(w) web_client_flag_set(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED) -#define web_client_disable_tracking_required(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED) - -#define web_client_has_wait_receive(w) web_client_flag_check(w, WEB_CLIENT_FLAG_WAIT_RECEIVE) -#define web_client_enable_wait_receive(w) web_client_flag_set(w, WEB_CLIENT_FLAG_WAIT_RECEIVE) -#define web_client_disable_wait_receive(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_WAIT_RECEIVE) - -#define web_client_has_wait_send(w) web_client_flag_check(w, WEB_CLIENT_FLAG_WAIT_SEND) -#define web_client_enable_wait_send(w) web_client_flag_set(w, WEB_CLIENT_FLAG_WAIT_SEND) -#define web_client_disable_wait_send(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_WAIT_SEND) - -#define web_client_set_tcp(w) web_client_flag_set(w, WEB_CLIENT_FLAG_TCP_CLIENT) -#define web_client_set_unix(w) web_client_flag_set(w, WEB_CLIENT_FLAG_UNIX_CLIENT) -#define web_client_check_unix(w) web_client_flag_check(w, WEB_CLIENT_FLAG_UNIX_CLIENT) - -#define web_client_is_corkable(w) web_client_flag_check(w, WEB_CLIENT_FLAG_TCP_CLIENT) - -#define NETDATA_WEB_REQUEST_URL_SIZE 8192 -#define NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE 16384 -#define NETDATA_WEB_RESPONSE_HEADER_SIZE 4096 -#define NETDATA_WEB_REQUEST_COOKIE_SIZE 1024 -#define NETDATA_WEB_REQUEST_ORIGIN_HEADER_SIZE 1024 -#define NETDATA_WEB_RESPONSE_INITIAL_SIZE 16384 -#define NETDATA_WEB_REQUEST_RECEIVE_SIZE 16384 -#define NETDATA_WEB_REQUEST_MAX_SIZE 16384 - -struct response { - BUFFER *header; // our response header - BUFFER *header_output; // internal use - BUFFER *data; // our response data buffer - - int code; // the HTTP response code - - size_t rlen; // if non-zero, the excepted size of ifd (input of firecopy) - size_t sent; // current data length sent to output - - int zoutput; // if set to 1, web_client_send() will send compressed data -#ifdef NETDATA_WITH_ZLIB - z_stream zstream; // zlib stream for sending compressed output to client - Bytef zbuffer[NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE]; // temporary buffer for storing compressed output - size_t zsent; // the compressed bytes we have sent to the client - size_t zhave; // the compressed bytes that we have received from zlib - int zinitialized:1; -#endif /* NETDATA_WITH_ZLIB */ - -}; - -typedef enum web_client_acl { - WEB_CLIENT_ACL_NONE = 0, - WEB_CLIENT_ACL_NOCHECK = 0, - WEB_CLIENT_ACL_DASHBOARD = 1 << 0, - WEB_CLIENT_ACL_REGISTRY = 1 << 1, - WEB_CLIENT_ACL_BADGE = 1 << 2 -} WEB_CLIENT_ACL; - -#define web_client_can_access_dashboard(w) ((w)->acl & WEB_CLIENT_ACL_DASHBOARD) -#define web_client_can_access_registry(w) ((w)->acl & WEB_CLIENT_ACL_REGISTRY) -#define web_client_can_access_badges(w) ((w)->acl & WEB_CLIENT_ACL_BADGE) - -#define web_client_can_access_stream(w) \ - (!web_allow_streaming_from || simple_pattern_matches(web_allow_streaming_from, (w)->client_ip)) - -#define web_client_can_access_netdataconf(w) \ - (!web_allow_netdataconf_from || simple_pattern_matches(web_allow_netdataconf_from, (w)->client_ip)) - -struct web_client { - unsigned long long id; - - WEB_CLIENT_FLAGS flags; // status flags for the client - WEB_CLIENT_MODE mode; // the operational mode of the client - WEB_CLIENT_ACL acl; // the access list of the client - - size_t header_parse_tries; - size_t header_parse_last_size; - - int tcp_cork; // 1 = we have a cork on the socket - - int ifd; - int ofd; - - char client_ip[NI_MAXHOST+1]; - char client_port[NI_MAXSERV+1]; - - char decoded_url[NETDATA_WEB_REQUEST_URL_SIZE + 1]; // we decode the URL in this buffer - char last_url[NETDATA_WEB_REQUEST_URL_SIZE+1]; // we keep a copy of the decoded URL here - - struct timeval tv_in, tv_ready; - - char cookie1[NETDATA_WEB_REQUEST_COOKIE_SIZE+1]; - char cookie2[NETDATA_WEB_REQUEST_COOKIE_SIZE+1]; - char origin[NETDATA_WEB_REQUEST_ORIGIN_HEADER_SIZE+1]; - char *user_agent; - - struct response response; - - size_t stats_received_bytes; - size_t stats_sent_bytes; - - // cache of web_client allocations - struct web_client *prev; // maintain a linked list of web clients - struct web_client *next; // for the web servers that need it - - // MULTI-THREADED WEB SERVER MEMBERS - netdata_thread_t thread; // the thread servicing this client - volatile int running; // 1 when the thread runs, 0 otherwise - - // STATIC-THREADED WEB SERVER MEMBERS - size_t pollinfo_slot; // POLLINFO slot of the web client - size_t pollinfo_filecopy_slot; // POLLINFO slot of the file read -}; - -extern uid_t web_files_uid(void); -extern uid_t web_files_gid(void); - -extern int web_client_permission_denied(struct web_client *w); - -extern ssize_t web_client_send(struct web_client *w); -extern ssize_t web_client_receive(struct web_client *w); -extern ssize_t web_client_read_file(struct web_client *w); - -extern void web_client_process_request(struct web_client *w); -extern void web_client_request_done(struct web_client *w); - -extern int web_client_api_request_v1_data_group(char *name, int def); -extern const char *group_method2string(int group); - -extern void buffer_data_options2string(BUFFER *wb, uint32_t options); - -extern int mysendfile(struct web_client *w, char *filename); - -#endif diff --git a/src/web_server.c b/src/web_server.c deleted file mode 100644 index 31c546411..000000000 --- a/src/web_server.c +++ /dev/null @@ -1,1292 +0,0 @@ -#include "common.h" - -// this file includes 3 web servers: -// -// 1. single-threaded, based on select() -// 2. multi-threaded, based on poll() that spawns threads to handle the requests, based on select() -// 3. static-threaded, based on poll() using a fixed number of threads (configured at netdata.conf) - -WEB_SERVER_MODE web_server_mode = WEB_SERVER_MODE_STATIC_THREADED; - -// -------------------------------------------------------------------------------------- - -WEB_SERVER_MODE web_server_mode_id(const char *mode) { - if(!strcmp(mode, "none")) - return WEB_SERVER_MODE_NONE; - else if(!strcmp(mode, "single") || !strcmp(mode, "single-threaded")) - return WEB_SERVER_MODE_SINGLE_THREADED; - else if(!strcmp(mode, "static") || !strcmp(mode, "static-threaded")) - return WEB_SERVER_MODE_STATIC_THREADED; - else // if(!strcmp(mode, "multi") || !strcmp(mode, "multi-threaded")) - return WEB_SERVER_MODE_MULTI_THREADED; -} - -const char *web_server_mode_name(WEB_SERVER_MODE id) { - switch(id) { - case WEB_SERVER_MODE_NONE: - return "none"; - - case WEB_SERVER_MODE_SINGLE_THREADED: - return "single-threaded"; - - case WEB_SERVER_MODE_STATIC_THREADED: - return "static-threaded"; - - default: - case WEB_SERVER_MODE_MULTI_THREADED: - return "multi-threaded"; - } -} - -// -------------------------------------------------------------------------------------- -// API sockets - -static LISTEN_SOCKETS api_sockets = { - .config_section = CONFIG_SECTION_WEB, - .default_bind_to = "*", - .default_port = API_LISTEN_PORT, - .backlog = API_LISTEN_BACKLOG -}; - -int api_listen_sockets_setup(void) { - int socks = listen_sockets_setup(&api_sockets); - - if(!socks) - fatal("LISTENER: Cannot listen on any API socket. Exiting..."); - - return socks; -} - - -// -------------------------------------------------------------------------------------- -// access lists - -SIMPLE_PATTERN *web_allow_connections_from = NULL; -SIMPLE_PATTERN *web_allow_streaming_from = NULL; -SIMPLE_PATTERN *web_allow_netdataconf_from = NULL; - -// WEB_CLIENT_ACL -SIMPLE_PATTERN *web_allow_dashboard_from = NULL; -SIMPLE_PATTERN *web_allow_registry_from = NULL; -SIMPLE_PATTERN *web_allow_badges_from = NULL; - -static void web_client_update_acl_matches(struct web_client *w) { - w->acl = WEB_CLIENT_ACL_NONE; - - if(!web_allow_dashboard_from || simple_pattern_matches(web_allow_dashboard_from, w->client_ip)) - w->acl |= WEB_CLIENT_ACL_DASHBOARD; - - if(!web_allow_registry_from || simple_pattern_matches(web_allow_registry_from, w->client_ip)) - w->acl |= WEB_CLIENT_ACL_REGISTRY; - - if(!web_allow_badges_from || simple_pattern_matches(web_allow_badges_from, w->client_ip)) - w->acl |= WEB_CLIENT_ACL_BADGE; -} - - -// -------------------------------------------------------------------------------------- - -static void log_connection(struct web_client *w, const char *msg) { - log_access("%llu: %d '[%s]:%s' '%s'", w->id, gettid(), w->client_ip, w->client_port, msg); -} - -// ---------------------------------------------------------------------------- -// allocate and free web_clients - -static void web_client_zero(struct web_client *w) { - // zero everything about it - but keep the buffers - - // remember the pointers to the buffers - BUFFER *b1 = w->response.data; - BUFFER *b2 = w->response.header; - BUFFER *b3 = w->response.header_output; - - // empty the buffers - buffer_flush(b1); - buffer_flush(b2); - buffer_flush(b3); - - freez(w->user_agent); - - // zero everything - memset(w, 0, sizeof(struct web_client)); - - // restore the pointers of the buffers - w->response.data = b1; - w->response.header = b2; - w->response.header_output = b3; -} - -static void web_client_free(struct web_client *w) { - buffer_free(w->response.header_output); - buffer_free(w->response.header); - buffer_free(w->response.data); - freez(w->user_agent); - freez(w); -} - -static struct web_client *web_client_alloc(void) { - struct web_client *w = callocz(1, sizeof(struct web_client)); - w->response.data = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE); - w->response.header = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE); - w->response.header_output = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE); - return w; -} - -// ---------------------------------------------------------------------------- -// web clients caching - -// When clients connect and disconnect, avoid allocating and releasing memory. -// Instead, when new clients get connected, reuse any memory previously allocated -// for serving web clients that are now disconnected. - -// The size of the cache is adaptive. It caches the structures of 2x -// the number of currently connected clients. - -// Comments per server: -// SINGLE-THREADED : 1 cache is maintained -// MULTI-THREADED : 1 cache is maintained -// STATIC-THREADED : 1 cache for each thred of the web server - -struct clients_cache { - pid_t pid; - - struct web_client *used; // the structures of the currently connected clients - size_t used_count; // the count the currently connected clients - - struct web_client *avail; // the cached structures, available for future clients - size_t avail_count; // the number of cached structures - - size_t reused; // the number of re-uses - size_t allocated; // the number of allocations -}; - -static __thread struct clients_cache web_clients_cache = { - .pid = 0, - .used = NULL, - .used_count = 0, - .avail = NULL, - .avail_count = 0, - .allocated = 0, - .reused = 0 -}; - -static inline void web_client_cache_verify(int force) { -#ifdef NETDATA_INTERNAL_CHECKS - static __thread size_t count = 0; - count++; - - if(unlikely(force || count > 1000)) { - count = 0; - - struct web_client *w; - size_t used = 0, avail = 0; - for(w = web_clients_cache.used; w ; w = w->next) used++; - for(w = web_clients_cache.avail; w ; w = w->next) avail++; - - info("web_client_cache has %zu (%zu) used and %zu (%zu) available clients, allocated %zu, reused %zu (hit %zu%%)." - , used, web_clients_cache.used_count - , avail, web_clients_cache.avail_count - , web_clients_cache.allocated - , web_clients_cache.reused - , (web_clients_cache.allocated + web_clients_cache.reused)?(web_clients_cache.reused * 100 / (web_clients_cache.allocated + web_clients_cache.reused)):0 - ); - } -#else - if(unlikely(force)) { - info("web_client_cache has %zu used and %zu available clients, allocated %zu, reused %zu (hit %zu%%)." - , web_clients_cache.used_count - , web_clients_cache.avail_count - , web_clients_cache.allocated - , web_clients_cache.reused - , (web_clients_cache.allocated + web_clients_cache.reused)?(web_clients_cache.reused * 100 / (web_clients_cache.allocated + web_clients_cache.reused)):0 - ); - } -#endif -} - -// destroy the cache and free all the memory it uses -static void web_client_cache_destroy(void) { -#ifdef NETDATA_INTERNAL_CHECKS - if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid())) - error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid()); - - web_client_cache_verify(1); -#endif - - netdata_thread_disable_cancelability(); - - struct web_client *w, *t; - - w = web_clients_cache.used; - while(w) { - t = w; - w = w->next; - web_client_free(t); - } - web_clients_cache.used = NULL; - web_clients_cache.used_count = 0; - - w = web_clients_cache.avail; - while(w) { - t = w; - w = w->next; - web_client_free(t); - } - web_clients_cache.avail = NULL; - web_clients_cache.avail_count = 0; - - netdata_thread_enable_cancelability(); -} - -static struct web_client *web_client_get_from_cache_or_allocate() { - -#ifdef NETDATA_INTERNAL_CHECKS - if(unlikely(web_clients_cache.pid == 0)) - web_clients_cache.pid = gettid(); - - if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid())) - error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid()); -#endif - - netdata_thread_disable_cancelability(); - - struct web_client *w = web_clients_cache.avail; - - if(w) { - // get it from avail - if (w == web_clients_cache.avail) web_clients_cache.avail = w->next; - if(w->prev) w->prev->next = w->next; - if(w->next) w->next->prev = w->prev; - web_clients_cache.avail_count--; - web_client_zero(w); - web_clients_cache.reused++; - } - else { - // allocate it - w = web_client_alloc(); - web_clients_cache.allocated++; - } - - // link it to used web clients - if (web_clients_cache.used) web_clients_cache.used->prev = w; - w->next = web_clients_cache.used; - w->prev = NULL; - web_clients_cache.used = w; - web_clients_cache.used_count++; - - // initialize it - w->id = web_client_connected(); - w->mode = WEB_CLIENT_MODE_NORMAL; - - netdata_thread_enable_cancelability(); - - return w; -} - -static void web_client_release(struct web_client *w) { -#ifdef NETDATA_INTERNAL_CHECKS - if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid())) - error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid()); - - if(unlikely(w->running)) - error("%llu: releasing web client from %s port %s, but it still running.", w->id, w->client_ip, w->client_port); -#endif - - debug(D_WEB_CLIENT_ACCESS, "%llu: Closing web client from %s port %s.", w->id, w->client_ip, w->client_port); - - log_connection(w, "DISCONNECTED"); - web_client_request_done(w); - web_client_disconnected(); - - netdata_thread_disable_cancelability(); - - if(web_server_mode != WEB_SERVER_MODE_STATIC_THREADED) { - if (w->ifd != -1) close(w->ifd); - if (w->ofd != -1 && w->ofd != w->ifd) close(w->ofd); - w->ifd = w->ofd = -1; - } - - // unlink it from the used - if (w == web_clients_cache.used) web_clients_cache.used = w->next; - if(w->prev) w->prev->next = w->next; - if(w->next) w->next->prev = w->prev; - web_clients_cache.used_count--; - - if(web_clients_cache.avail_count >= 2 * web_clients_cache.used_count) { - // we have too many of them - free it - web_client_free(w); - } - else { - // link it to the avail - if (web_clients_cache.avail) web_clients_cache.avail->prev = w; - w->next = web_clients_cache.avail; - w->prev = NULL; - web_clients_cache.avail = w; - web_clients_cache.avail_count++; - } - - netdata_thread_enable_cancelability(); -} - - -// ---------------------------------------------------------------------------- -// high level web clients connection management - -static void web_client_initialize_connection(struct web_client *w) { - int flag = 1; - if(setsockopt(w->ifd, IPPROTO_TCP, TCP_NODELAY, (char *) &flag, sizeof(int)) != 0) - error("%llu: failed to enable TCP_NODELAY on socket fd %d.", w->id, w->ifd); - - flag = 1; - if(setsockopt(w->ifd, SOL_SOCKET, SO_KEEPALIVE, (char *) &flag, sizeof(int)) != 0) - error("%llu: failed to enable SO_KEEPALIVE on socket fd %d.", w->id, w->ifd); - - web_client_update_acl_matches(w); - - w->origin[0] = '*'; w->origin[1] = '\0'; - w->cookie1[0] = '\0'; w->cookie2[0] = '\0'; - freez(w->user_agent); w->user_agent = NULL; - - web_client_enable_wait_receive(w); - - log_connection(w, "CONNECTED"); - - web_client_cache_verify(0); -} - -static struct web_client *web_client_create_on_fd(int fd, const char *client_ip, const char *client_port) { - struct web_client *w; - - w = web_client_get_from_cache_or_allocate(); - w->ifd = w->ofd = fd; - - strncpyz(w->client_ip, client_ip, sizeof(w->client_ip) - 1); - strncpyz(w->client_port, client_port, sizeof(w->client_port) - 1); - - if(unlikely(!*w->client_ip)) strcpy(w->client_ip, "-"); - if(unlikely(!*w->client_port)) strcpy(w->client_port, "-"); - - web_client_initialize_connection(w); - return(w); -} - -static struct web_client *web_client_create_on_listenfd(int listener) { - struct web_client *w; - - w = web_client_get_from_cache_or_allocate(); - w->ifd = w->ofd = accept_socket(listener, SOCK_NONBLOCK, w->client_ip, sizeof(w->client_ip), w->client_port, sizeof(w->client_port), web_allow_connections_from); - - if(unlikely(!*w->client_ip)) strcpy(w->client_ip, "-"); - if(unlikely(!*w->client_port)) strcpy(w->client_port, "-"); - - if (w->ifd == -1) { - if(errno == EPERM) - log_connection(w, "ACCESS DENIED"); - else { - log_connection(w, "CONNECTION FAILED"); - error("%llu: Failed to accept new incoming connection.", w->id); - } - - web_client_release(w); - return NULL; - } - - web_client_initialize_connection(w); - return(w); -} - - -// -------------------------------------------------------------------------------------- -// the thread of a single client - for the MULTI-THREADED web server - -// 1. waits for input and output, using async I/O -// 2. it processes HTTP requests -// 3. it generates HTTP responses -// 4. it copies data from input to output if mode is FILECOPY - -int web_client_timeout = DEFAULT_DISCONNECT_IDLE_WEB_CLIENTS_AFTER_SECONDS; -int web_client_first_request_timeout = DEFAULT_TIMEOUT_TO_RECEIVE_FIRST_WEB_REQUEST; - -static void multi_threaded_web_client_worker_main_cleanup(void *ptr) { - struct web_client *w = ptr; - WEB_CLIENT_IS_DEAD(w); - w->running = 0; -} - -static void *multi_threaded_web_client_worker_main(void *ptr) { - netdata_thread_cleanup_push(multi_threaded_web_client_worker_main_cleanup, ptr); - - struct web_client *w = ptr; - w->running = 1; - - struct pollfd fds[2], *ifd, *ofd; - int retval, timeout_ms; - nfds_t fdmax = 0; - - while(!netdata_exit) { - if(unlikely(web_client_check_dead(w))) { - debug(D_WEB_CLIENT, "%llu: client is dead.", w->id); - break; - } - else if(unlikely(!web_client_has_wait_receive(w) && !web_client_has_wait_send(w))) { - debug(D_WEB_CLIENT, "%llu: client is not set for neither receiving nor sending data.", w->id); - break; - } - - if(unlikely(w->ifd < 0 || w->ofd < 0)) { - error("%llu: invalid file descriptor, ifd = %d, ofd = %d (required 0 <= fd", w->id, w->ifd, w->ofd); - break; - } - - if(w->ifd == w->ofd) { - fds[0].fd = w->ifd; - fds[0].events = 0; - fds[0].revents = 0; - - if(web_client_has_wait_receive(w)) fds[0].events |= POLLIN; - if(web_client_has_wait_send(w)) fds[0].events |= POLLOUT; - - fds[1].fd = -1; - fds[1].events = 0; - fds[1].revents = 0; - - ifd = ofd = &fds[0]; - - fdmax = 1; - } - else { - fds[0].fd = w->ifd; - fds[0].events = 0; - fds[0].revents = 0; - if(web_client_has_wait_receive(w)) fds[0].events |= POLLIN; - ifd = &fds[0]; - - fds[1].fd = w->ofd; - fds[1].events = 0; - fds[1].revents = 0; - if(web_client_has_wait_send(w)) fds[1].events |= POLLOUT; - ofd = &fds[1]; - - fdmax = 2; - } - - debug(D_WEB_CLIENT, "%llu: Waiting socket async I/O for %s %s", w->id, web_client_has_wait_receive(w)?"INPUT":"", web_client_has_wait_send(w)?"OUTPUT":""); - errno = 0; - timeout_ms = web_client_timeout * 1000; - retval = poll(fds, fdmax, timeout_ms); - - if(unlikely(netdata_exit)) break; - - if(unlikely(retval == -1)) { - if(errno == EAGAIN || errno == EINTR) { - debug(D_WEB_CLIENT, "%llu: EAGAIN received.", w->id); - continue; - } - - debug(D_WEB_CLIENT, "%llu: LISTENER: poll() failed (input fd = %d, output fd = %d). Closing client.", w->id, w->ifd, w->ofd); - break; - } - else if(unlikely(!retval)) { - debug(D_WEB_CLIENT, "%llu: Timeout while waiting socket async I/O for %s %s", w->id, web_client_has_wait_receive(w)?"INPUT":"", web_client_has_wait_send(w)?"OUTPUT":""); - break; - } - - if(unlikely(netdata_exit)) break; - - int used = 0; - if(web_client_has_wait_send(w) && ofd->revents & POLLOUT) { - used++; - if(web_client_send(w) < 0) { - debug(D_WEB_CLIENT, "%llu: Cannot send data to client. Closing client.", w->id); - break; - } - } - - if(unlikely(netdata_exit)) break; - - if(web_client_has_wait_receive(w) && (ifd->revents & POLLIN || ifd->revents & POLLPRI)) { - used++; - if(web_client_receive(w) < 0) { - debug(D_WEB_CLIENT, "%llu: Cannot receive data from client. Closing client.", w->id); - break; - } - - if(w->mode == WEB_CLIENT_MODE_NORMAL) { - debug(D_WEB_CLIENT, "%llu: Attempting to process received data.", w->id); - web_client_process_request(w); - - // if the sockets are closed, may have transferred this client - // to plugins.d - if(unlikely(w->mode == WEB_CLIENT_MODE_STREAM)) - break; - } - } - - if(unlikely(!used)) { - debug(D_WEB_CLIENT_ACCESS, "%llu: Received error on socket.", w->id); - break; - } - } - - if(w->mode != WEB_CLIENT_MODE_STREAM) - log_connection(w, "DISCONNECTED"); - - web_client_request_done(w); - - debug(D_WEB_CLIENT, "%llu: done...", w->id); - - // close the sockets/files now - // to free file descriptors - if(w->ifd == w->ofd) { - if(w->ifd != -1) close(w->ifd); - } - else { - if(w->ifd != -1) close(w->ifd); - if(w->ofd != -1) close(w->ofd); - } - w->ifd = -1; - w->ofd = -1; - - netdata_thread_cleanup_pop(1); - return NULL; -} - -// -------------------------------------------------------------------------------------- -// the main socket listener - MULTI-THREADED - -// 1. it accepts new incoming requests on our port -// 2. creates a new web_client for each connection received -// 3. spawns a new netdata_thread to serve the client (this is optimal for keep-alive clients) -// 4. cleans up old web_clients that their netdata_threads have been exited - -static void web_client_multi_threaded_web_server_release_clients(void) { - struct web_client *w; - for(w = web_clients_cache.used; w ; ) { - if(unlikely(!w->running && web_client_check_dead(w))) { - struct web_client *t = w->next; - web_client_release(w); - w = t; - } - else - w = w->next; - } -} - -static void web_client_multi_threaded_web_server_stop_all_threads(void) { - struct web_client *w; - - int found = 1, max = 2 * USEC_PER_SEC, step = 50000; - for(w = web_clients_cache.used; w ; w = w->next) { - if(w->running) { - found++; - info("stopping web client %s, id %llu", w->client_ip, w->id); - netdata_thread_cancel(w->thread); - } - } - - while(found && max > 0) { - max -= step; - info("Waiting %d web threads to finish...", found); - sleep_usec(step); - found = 0; - for(w = web_clients_cache.used; w ; w = w->next) - if(w->running) found++; - } - - if(found) - error("%d web threads are taking too long to finish. Giving up.", found); -} - -static struct pollfd *socket_listen_main_multi_threaded_fds = NULL; - -static void socket_listen_main_multi_threaded_cleanup(void *data) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data; - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - - info("cleaning up..."); - - info("releasing allocated memory..."); - freez(socket_listen_main_multi_threaded_fds); - - info("closing all sockets..."); - listen_sockets_close(&api_sockets); - - info("stopping all running web server threads..."); - web_client_multi_threaded_web_server_stop_all_threads(); - - info("freeing web clients cache..."); - web_client_cache_destroy(); - - info("cleanup completed."); - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; -} - -#define CLEANUP_EVERY_EVENTS 60 -void *socket_listen_main_multi_threaded(void *ptr) { - netdata_thread_cleanup_push(socket_listen_main_multi_threaded_cleanup, ptr); - - web_server_mode = WEB_SERVER_MODE_MULTI_THREADED; - web_server_is_multithreaded = 1; - - struct web_client *w; - int retval, counter = 0; - - if(!api_sockets.opened) - fatal("LISTENER: No sockets to listen to."); - - socket_listen_main_multi_threaded_fds = callocz(sizeof(struct pollfd), api_sockets.opened); - - size_t i; - for(i = 0; i < api_sockets.opened ;i++) { - socket_listen_main_multi_threaded_fds[i].fd = api_sockets.fds[i]; - socket_listen_main_multi_threaded_fds[i].events = POLLIN; - socket_listen_main_multi_threaded_fds[i].revents = 0; - - info("Listening on '%s'", (api_sockets.fds_names[i])?api_sockets.fds_names[i]:"UNKNOWN"); - } - - int timeout_ms = 1 * 1000; - - while(!netdata_exit) { - - // debug(D_WEB_CLIENT, "LISTENER: Waiting..."); - retval = poll(socket_listen_main_multi_threaded_fds, api_sockets.opened, timeout_ms); - - if(unlikely(retval == -1)) { - error("LISTENER: poll() failed."); - continue; - } - else if(unlikely(!retval)) { - debug(D_WEB_CLIENT, "LISTENER: poll() timeout."); - counter++; - continue; - } - - for(i = 0 ; i < api_sockets.opened ; i++) { - short int revents = socket_listen_main_multi_threaded_fds[i].revents; - - // check for new incoming connections - if(revents & POLLIN || revents & POLLPRI) { - socket_listen_main_multi_threaded_fds[i].revents = 0; - - w = web_client_create_on_listenfd(socket_listen_main_multi_threaded_fds[i].fd); - if(unlikely(!w)) { - // no need for error log - web_client_create_on_listenfd already logged the error - continue; - } - - if(api_sockets.fds_families[i] == AF_UNIX) - web_client_set_unix(w); - else - web_client_set_tcp(w); - - char tag[NETDATA_THREAD_TAG_MAX + 1]; - snprintfz(tag, NETDATA_THREAD_TAG_MAX, "WEB_CLIENT[%llu,[%s]:%s]", w->id, w->client_ip, w->client_port); - - w->running = 1; - if(netdata_thread_create(&w->thread, tag, NETDATA_THREAD_OPTION_DONT_LOG, multi_threaded_web_client_worker_main, w) != 0) { - w->running = 0; - web_client_release(w); - } - } - } - - counter++; - if(counter > CLEANUP_EVERY_EVENTS) { - counter = 0; - web_client_multi_threaded_web_server_release_clients(); - } - } - - netdata_thread_cleanup_pop(1); - return NULL; -} - - -// -------------------------------------------------------------------------------------- -// the main socket listener - SINGLE-THREADED - -struct web_client *single_threaded_clients[FD_SETSIZE]; - -static inline int single_threaded_link_client(struct web_client *w, fd_set *ifds, fd_set *ofds, fd_set *efds, int *max) { - if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w)))) { - return 1; - } - - if(unlikely(w->ifd < 0 || w->ifd >= (int)FD_SETSIZE || w->ofd < 0 || w->ofd >= (int)FD_SETSIZE)) { - error("%llu: invalid file descriptor, ifd = %d, ofd = %d (required 0 <= fd < FD_SETSIZE (%d)", w->id, w->ifd, w->ofd, (int)FD_SETSIZE); - return 1; - } - - FD_SET(w->ifd, efds); - if(unlikely(*max < w->ifd)) *max = w->ifd; - - if(unlikely(w->ifd != w->ofd)) { - if(*max < w->ofd) *max = w->ofd; - FD_SET(w->ofd, efds); - } - - if(web_client_has_wait_receive(w)) FD_SET(w->ifd, ifds); - if(web_client_has_wait_send(w)) FD_SET(w->ofd, ofds); - - single_threaded_clients[w->ifd] = w; - single_threaded_clients[w->ofd] = w; - - return 0; -} - -static inline int single_threaded_unlink_client(struct web_client *w, fd_set *ifds, fd_set *ofds, fd_set *efds) { - FD_CLR(w->ifd, efds); - if(unlikely(w->ifd != w->ofd)) FD_CLR(w->ofd, efds); - - if(web_client_has_wait_receive(w)) FD_CLR(w->ifd, ifds); - if(web_client_has_wait_send(w)) FD_CLR(w->ofd, ofds); - - single_threaded_clients[w->ifd] = NULL; - single_threaded_clients[w->ofd] = NULL; - - if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w)))) { - return 1; - } - - return 0; -} - -static void socket_listen_main_single_threaded_cleanup(void *data) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data; - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - - info("closing all sockets..."); - listen_sockets_close(&api_sockets); - - info("freeing web clients cache..."); - web_client_cache_destroy(); - - info("cleanup completed."); - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; -} - -void *socket_listen_main_single_threaded(void *ptr) { - netdata_thread_cleanup_push(socket_listen_main_single_threaded_cleanup, ptr); - web_server_mode = WEB_SERVER_MODE_SINGLE_THREADED; - web_server_is_multithreaded = 0; - - struct web_client *w; - - if(!api_sockets.opened) - fatal("LISTENER: no listen sockets available."); - - size_t i; - for(i = 0; i < (size_t)FD_SETSIZE ; i++) - single_threaded_clients[i] = NULL; - - fd_set ifds, ofds, efds, rifds, rofds, refds; - FD_ZERO (&ifds); - FD_ZERO (&ofds); - FD_ZERO (&efds); - int fdmax = 0; - - for(i = 0; i < api_sockets.opened ; i++) { - if (api_sockets.fds[i] < 0 || api_sockets.fds[i] >= (int)FD_SETSIZE) - fatal("LISTENER: Listen socket %d is not ready, or invalid.", api_sockets.fds[i]); - - info("Listening on '%s'", (api_sockets.fds_names[i])?api_sockets.fds_names[i]:"UNKNOWN"); - - FD_SET(api_sockets.fds[i], &ifds); - FD_SET(api_sockets.fds[i], &efds); - if(fdmax < api_sockets.fds[i]) - fdmax = api_sockets.fds[i]; - } - - while(!netdata_exit) { - debug(D_WEB_CLIENT_ACCESS, "LISTENER: single threaded web server waiting (fdmax = %d)...", fdmax); - - struct timeval tv = { .tv_sec = 1, .tv_usec = 0 }; - rifds = ifds; - rofds = ofds; - refds = efds; - int retval = select(fdmax+1, &rifds, &rofds, &refds, &tv); - - if(unlikely(retval == -1)) { - error("LISTENER: select() failed."); - continue; - } - else if(likely(retval)) { - debug(D_WEB_CLIENT_ACCESS, "LISTENER: got something."); - - for(i = 0; i < api_sockets.opened ; i++) { - if (FD_ISSET(api_sockets.fds[i], &rifds)) { - debug(D_WEB_CLIENT_ACCESS, "LISTENER: new connection."); - w = web_client_create_on_listenfd(api_sockets.fds[i]); - if(unlikely(!w)) - continue; - - if(api_sockets.fds_families[i] == AF_UNIX) - web_client_set_unix(w); - else - web_client_set_tcp(w); - - if (single_threaded_link_client(w, &ifds, &ofds, &ifds, &fdmax) != 0) { - web_client_release(w); - } - } - } - - for(i = 0 ; i <= (size_t)fdmax ; i++) { - if(likely(!FD_ISSET(i, &rifds) && !FD_ISSET(i, &rofds) && !FD_ISSET(i, &refds))) - continue; - - w = single_threaded_clients[i]; - if(unlikely(!w)) { - // error("no client on slot %zu", i); - continue; - } - - if(unlikely(single_threaded_unlink_client(w, &ifds, &ofds, &efds) != 0)) { - // error("failed to unlink client %zu", i); - web_client_release(w); - continue; - } - - if (unlikely(FD_ISSET(w->ifd, &refds) || FD_ISSET(w->ofd, &refds))) { - // error("no input on client %zu", i); - web_client_release(w); - continue; - } - - if (unlikely(web_client_has_wait_receive(w) && FD_ISSET(w->ifd, &rifds))) { - if (unlikely(web_client_receive(w) < 0)) { - // error("cannot read from client %zu", i); - web_client_release(w); - continue; - } - - if (w->mode != WEB_CLIENT_MODE_FILECOPY) { - debug(D_WEB_CLIENT, "%llu: Processing received data.", w->id); - web_client_process_request(w); - } - } - - if (unlikely(web_client_has_wait_send(w) && FD_ISSET(w->ofd, &rofds))) { - if (unlikely(web_client_send(w) < 0)) { - // error("cannot send data to client %zu", i); - debug(D_WEB_CLIENT, "%llu: Cannot send data to client. Closing client.", w->id); - web_client_release(w); - continue; - } - } - - if(unlikely(single_threaded_link_client(w, &ifds, &ofds, &efds, &fdmax) != 0)) { - // error("failed to link client %zu", i); - web_client_release(w); - } - } - } - else { - debug(D_WEB_CLIENT_ACCESS, "LISTENER: single threaded web server timeout."); - } - } - - netdata_thread_cleanup_pop(1); - return NULL; -} - - -// -------------------------------------------------------------------------------------- -// the main socket listener - STATIC-THREADED - -struct web_server_static_threaded_worker { - netdata_thread_t thread; - - int id; - int running; - - size_t max_sockets; - - volatile size_t connected; - volatile size_t disconnected; - volatile size_t receptions; - volatile size_t sends; - volatile size_t max_concurrent; - - volatile size_t files_read; - volatile size_t file_reads; -}; - -static long long static_threaded_workers_count = 1; -static struct web_server_static_threaded_worker *static_workers_private_data = NULL; -static __thread struct web_server_static_threaded_worker *worker_private = NULL; - -// ---------------------------------------------------------------------------- - -static inline int web_server_check_client_status(struct web_client *w) { - if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w)))) - return -1; - - return 0; -} - -// ---------------------------------------------------------------------------- -// web server files - -static void *web_server_file_add_callback(POLLINFO *pi, short int *events, void *data) { - struct web_client *w = (struct web_client *)data; - - worker_private->files_read++; - - debug(D_WEB_CLIENT, "%llu: ADDED FILE READ ON FD %d", w->id, pi->fd); - *events = POLLIN; - pi->data = w; - return w; -} - -static void web_werver_file_del_callback(POLLINFO *pi) { - struct web_client *w = (struct web_client *)pi->data; - debug(D_WEB_CLIENT, "%llu: RELEASE FILE READ ON FD %d", w->id, pi->fd); - - w->pollinfo_filecopy_slot = 0; - - if(unlikely(!w->pollinfo_slot)) { - debug(D_WEB_CLIENT, "%llu: CROSS WEB CLIENT CLEANUP (iFD %d, oFD %d)", w->id, pi->fd, w->ofd); - web_client_release(w); - } -} - -static int web_server_file_read_callback(POLLINFO *pi, short int *events) { - struct web_client *w = (struct web_client *)pi->data; - - // if there is no POLLINFO linked to this, it means the client disconnected - // stop the file reading too - if(unlikely(!w->pollinfo_slot)) { - debug(D_WEB_CLIENT, "%llu: PREVENTED ATTEMPT TO READ FILE ON FD %d, ON CLOSED WEB CLIENT", w->id, pi->fd); - return -1; - } - - if(unlikely(w->mode != WEB_CLIENT_MODE_FILECOPY || w->ifd == w->ofd)) { - debug(D_WEB_CLIENT, "%llu: PREVENTED ATTEMPT TO READ FILE ON FD %d, ON NON-FILECOPY WEB CLIENT", w->id, pi->fd); - return -1; - } - - debug(D_WEB_CLIENT, "%llu: READING FILE ON FD %d", w->id, pi->fd); - - worker_private->file_reads++; - ssize_t ret = unlikely(web_client_read_file(w)); - - if(likely(web_client_has_wait_send(w))) { - POLLJOB *p = pi->p; // our POLLJOB - POLLINFO *wpi = pollinfo_from_slot(p, w->pollinfo_slot); // POLLINFO of the client socket - - debug(D_WEB_CLIENT, "%llu: SIGNALING W TO SEND (iFD %d, oFD %d)", w->id, pi->fd, wpi->fd); - p->fds[wpi->slot].events |= POLLOUT; - } - - if(unlikely(ret <= 0 || w->ifd == w->ofd)) { - debug(D_WEB_CLIENT, "%llu: DONE READING FILE ON FD %d", w->id, pi->fd); - return -1; - } - - *events = POLLIN; - return 0; -} - -static int web_server_file_write_callback(POLLINFO *pi, short int *events) { - (void)pi; - (void)events; - - error("Writing to web files is not supported!"); - - return -1; -} - -// ---------------------------------------------------------------------------- -// web server clients - -static void *web_server_add_callback(POLLINFO *pi, short int *events, void *data) { - (void)data; - - worker_private->connected++; - - size_t concurrent = worker_private->connected - worker_private->disconnected; - if(unlikely(concurrent > worker_private->max_concurrent)) - worker_private->max_concurrent = concurrent; - - *events = POLLIN; - - debug(D_WEB_CLIENT_ACCESS, "LISTENER on %d: new connection.", pi->fd); - struct web_client *w = web_client_create_on_fd(pi->fd, pi->client_ip, pi->client_port); - w->pollinfo_slot = pi->slot; - - if(unlikely(pi->socktype == AF_UNIX)) - web_client_set_unix(w); - else - web_client_set_tcp(w); - - debug(D_WEB_CLIENT, "%llu: ADDED CLIENT FD %d", w->id, pi->fd); - return w; -} - -// TCP client disconnected -static void web_server_del_callback(POLLINFO *pi) { - worker_private->disconnected++; - - struct web_client *w = (struct web_client *)pi->data; - - w->pollinfo_slot = 0; - if(unlikely(w->pollinfo_filecopy_slot)) { - POLLINFO *fpi = pollinfo_from_slot(pi->p, w->pollinfo_filecopy_slot); // POLLINFO of the client socket - debug(D_WEB_CLIENT, "%llu: THE CLIENT WILL BE FRED BY READING FILE JOB ON FD %d", w->id, fpi->fd); - } - else { - if(web_client_flag_check(w, WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET)) - pi->flags |= POLLINFO_FLAG_DONT_CLOSE; - - debug(D_WEB_CLIENT, "%llu: CLOSING CLIENT FD %d", w->id, pi->fd); - web_client_release(w); - } -} - -static int web_server_rcv_callback(POLLINFO *pi, short int *events) { - worker_private->receptions++; - - struct web_client *w = (struct web_client *)pi->data; - int fd = pi->fd; - - if(unlikely(web_client_receive(w) < 0)) - return -1; - - debug(D_WEB_CLIENT, "%llu: processing received data on fd %d.", w->id, fd); - web_client_process_request(w); - - if(unlikely(w->mode == WEB_CLIENT_MODE_FILECOPY)) { - if(w->pollinfo_filecopy_slot == 0) { - debug(D_WEB_CLIENT, "%llu: FILECOPY DETECTED ON FD %d", w->id, pi->fd); - - if (unlikely(w->ifd != -1 && w->ifd != w->ofd && w->ifd != fd)) { - // add a new socket to poll_events, with the same - debug(D_WEB_CLIENT, "%llu: CREATING FILECOPY SLOT ON FD %d", w->id, pi->fd); - - POLLINFO *fpi = poll_add_fd( - pi->p - , w->ifd - , 0 - , POLLINFO_FLAG_CLIENT_SOCKET - , "FILENAME" - , "" - , web_server_file_add_callback - , web_werver_file_del_callback - , web_server_file_read_callback - , web_server_file_write_callback - , (void *) w - ); - - if(fpi) - w->pollinfo_filecopy_slot = fpi->slot; - else { - error("Failed to add filecopy fd. Closing client."); - return -1; - } - } - } - } - else { - if(unlikely(w->ifd == fd && web_client_has_wait_receive(w))) - *events |= POLLIN; - } - - if(unlikely(w->ofd == fd && web_client_has_wait_send(w))) - *events |= POLLOUT; - - return web_server_check_client_status(w); -} - -static int web_server_snd_callback(POLLINFO *pi, short int *events) { - worker_private->sends++; - - struct web_client *w = (struct web_client *)pi->data; - int fd = pi->fd; - - debug(D_WEB_CLIENT, "%llu: sending data on fd %d.", w->id, fd); - - if(unlikely(web_client_send(w) < 0)) - return -1; - - if(unlikely(w->ifd == fd && web_client_has_wait_receive(w))) - *events |= POLLIN; - - if(unlikely(w->ofd == fd && web_client_has_wait_send(w))) - *events |= POLLOUT; - - return web_server_check_client_status(w); -} - -static void web_server_tmr_callback(void *timer_data) { - worker_private = (struct web_server_static_threaded_worker *)timer_data; - - static __thread RRDSET *st = NULL; - static __thread RRDDIM *rd_user = NULL, *rd_system = NULL; - - if(unlikely(!st)) { - char id[100 + 1]; - char title[100 + 1]; - - snprintfz(id, 100, "web_thread%d_cpu", worker_private->id + 1); - snprintfz(title, 100, "NetData web server thread No %d CPU usage", worker_private->id + 1); - - st = rrdset_create_localhost( - "netdata" - , id - , NULL - , "web" - , "netdata.web_cpu" - , title - , "milliseconds/s" - , "web" - , "stats" - , 132000 + worker_private->id - , default_rrd_update_every - , RRDSET_TYPE_STACKED - ); - - rd_user = rrddim_add(st, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - rd_system = rrddim_add(st, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st); - - struct rusage rusage; - getrusage(RUSAGE_THREAD, &rusage); - rrddim_set_by_pointer(st, rd_user, rusage.ru_utime.tv_sec * 1000000ULL + rusage.ru_utime.tv_usec); - rrddim_set_by_pointer(st, rd_system, rusage.ru_stime.tv_sec * 1000000ULL + rusage.ru_stime.tv_usec); - rrdset_done(st); -} - -// ---------------------------------------------------------------------------- -// web server worker thread - -static void socket_listen_main_static_threaded_worker_cleanup(void *ptr) { - worker_private = (struct web_server_static_threaded_worker *)ptr; - - info("freeing local web clients cache..."); - web_client_cache_destroy(); - - info("stopped after %zu connects, %zu disconnects (max concurrent %zu), %zu receptions and %zu sends", - worker_private->connected, - worker_private->disconnected, - worker_private->max_concurrent, - worker_private->receptions, - worker_private->sends - ); - - worker_private->running = 0; -} - -void *socket_listen_main_static_threaded_worker(void *ptr) { - worker_private = (struct web_server_static_threaded_worker *)ptr; - worker_private->running = 1; - - netdata_thread_cleanup_push(socket_listen_main_static_threaded_worker_cleanup, ptr); - - poll_events(&api_sockets - , web_server_add_callback - , web_server_del_callback - , web_server_rcv_callback - , web_server_snd_callback - , web_server_tmr_callback - , web_allow_connections_from - , NULL - , web_client_first_request_timeout - , web_client_timeout - , default_rrd_update_every * 1000 // timer_milliseconds - , ptr // timer_data - , worker_private->max_sockets - ); - - netdata_thread_cleanup_pop(1); - return NULL; -} - - -// ---------------------------------------------------------------------------- -// web server main thread - also becomes a worker - -static void socket_listen_main_static_threaded_cleanup(void *ptr) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - - int i, found = 0, max = 2 * USEC_PER_SEC, step = 50000; - - // we start from 1, - 0 is self - for(i = 1; i < static_threaded_workers_count; i++) { - if(static_workers_private_data[i].running) { - found++; - info("stopping worker %d", i + 1); - netdata_thread_cancel(static_workers_private_data[i].thread); - } - else - info("found stopped worker %d", i + 1); - } - - while(found && max > 0) { - max -= step; - info("Waiting %d static web threads to finish...", found); - sleep_usec(step); - found = 0; - - // we start from 1, - 0 is self - for(i = 1; i < static_threaded_workers_count; i++) { - if (static_workers_private_data[i].running) - found++; - } - } - - if(found) - error("%d static web threads are taking too long to finish. Giving up.", found); - - info("closing all web server sockets..."); - listen_sockets_close(&api_sockets); - - info("all static web threads stopped."); - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; -} - -void *socket_listen_main_static_threaded(void *ptr) { - netdata_thread_cleanup_push(socket_listen_main_static_threaded_cleanup, ptr); - web_server_mode = WEB_SERVER_MODE_STATIC_THREADED; - - if(!api_sockets.opened) - fatal("LISTENER: no listen sockets available."); - - // 6 threads is the optimal value - // since 6 are the parallel connections browsers will do - // so, if the machine has more CPUs, avoid using resources unnecessarily - int def_thread_count = (processors > 6)?6:processors; - - static_threaded_workers_count = config_get_number(CONFIG_SECTION_WEB, "web server threads", def_thread_count); - if(static_threaded_workers_count < 1) static_threaded_workers_count = 1; - - size_t max_sockets = (size_t)config_get_number(CONFIG_SECTION_WEB, "web server max sockets", (long long int)(rlimit_nofile.rlim_cur / 2)); - - static_workers_private_data = callocz((size_t)static_threaded_workers_count, sizeof(struct web_server_static_threaded_worker)); - - web_server_is_multithreaded = (static_threaded_workers_count > 1); - - int i; - for(i = 1; i < static_threaded_workers_count; i++) { - static_workers_private_data[i].id = i; - static_workers_private_data[i].max_sockets = max_sockets / static_threaded_workers_count; - - char tag[50 + 1]; - snprintfz(tag, 50, "WEB_SERVER[static%d]", i+1); - - info("starting worker %d", i+1); - netdata_thread_create(&static_workers_private_data[i].thread, tag, NETDATA_THREAD_OPTION_DEFAULT, socket_listen_main_static_threaded_worker, (void *)&static_workers_private_data[i]); - } - - // and the main one - static_workers_private_data[0].max_sockets = max_sockets / static_threaded_workers_count; - socket_listen_main_static_threaded_worker((void *)&static_workers_private_data[0]); - - netdata_thread_cleanup_pop(1); - return NULL; -} diff --git a/src/web_server.h b/src/web_server.h deleted file mode 100644 index 7492547ef..000000000 --- a/src/web_server.h +++ /dev/null @@ -1,46 +0,0 @@ -#ifndef NETDATA_WEB_SERVER_H -#define NETDATA_WEB_SERVER_H 1 - -#define WEB_PATH_FILE "file" -#define WEB_PATH_DATA "data" -#define WEB_PATH_DATASOURCE "datasource" -#define WEB_PATH_GRAPH "graph" - -#ifndef API_LISTEN_PORT -#define API_LISTEN_PORT 19999 -#endif - -#ifndef API_LISTEN_BACKLOG -#define API_LISTEN_BACKLOG 4096 -#endif - -typedef enum web_server_mode { - WEB_SERVER_MODE_SINGLE_THREADED, - WEB_SERVER_MODE_STATIC_THREADED, - WEB_SERVER_MODE_MULTI_THREADED, - WEB_SERVER_MODE_NONE -} WEB_SERVER_MODE; - -extern SIMPLE_PATTERN *web_allow_connections_from; -extern SIMPLE_PATTERN *web_allow_dashboard_from; -extern SIMPLE_PATTERN *web_allow_registry_from; -extern SIMPLE_PATTERN *web_allow_badges_from; -extern SIMPLE_PATTERN *web_allow_streaming_from; -extern SIMPLE_PATTERN *web_allow_netdataconf_from; - -extern WEB_SERVER_MODE web_server_mode; - -extern WEB_SERVER_MODE web_server_mode_id(const char *mode); -extern const char *web_server_mode_name(WEB_SERVER_MODE id); - -extern void *socket_listen_main_multi_threaded(void *ptr); -extern void *socket_listen_main_single_threaded(void *ptr); -extern void *socket_listen_main_static_threaded(void *ptr); -extern int api_listen_sockets_setup(void); - -#define DEFAULT_TIMEOUT_TO_RECEIVE_FIRST_WEB_REQUEST 60 -#define DEFAULT_DISCONNECT_IDLE_WEB_CLIENTS_AFTER_SECONDS 60 -extern int web_client_timeout; -extern int web_client_first_request_timeout; - -#endif /* NETDATA_WEB_SERVER_H */ diff --git a/src/zfs_common.c b/src/zfs_common.c deleted file mode 100644 index 05935dd0f..000000000 --- a/src/zfs_common.c +++ /dev/null @@ -1,713 +0,0 @@ -#include "common.h" -#include "zfs_common.h" - -struct arcstats arcstats = { 0 }; - -void generate_charts_arcstats(const char *plugin, int update_every) { - - // ARC reads - unsigned long long aread = arcstats.hits + arcstats.misses; - - // Demand reads - unsigned long long dhit = arcstats.demand_data_hits + arcstats.demand_metadata_hits; - unsigned long long dmiss = arcstats.demand_data_misses + arcstats.demand_metadata_misses; - unsigned long long dread = dhit + dmiss; - - // Prefetch reads - unsigned long long phit = arcstats.prefetch_data_hits + arcstats.prefetch_metadata_hits; - unsigned long long pmiss = arcstats.prefetch_data_misses + arcstats.prefetch_metadata_misses; - unsigned long long pread = phit + pmiss; - - // Metadata reads - unsigned long long mhit = arcstats.prefetch_metadata_hits + arcstats.demand_metadata_hits; - unsigned long long mmiss = arcstats.prefetch_metadata_misses + arcstats.demand_metadata_misses; - unsigned long long mread = mhit + mmiss; - - // l2 reads - unsigned long long l2hit = arcstats.l2_hits; - unsigned long long l2miss = arcstats.l2_misses; - unsigned long long l2read = l2hit + l2miss; - - // -------------------------------------------------------------------- - - { - static RRDSET *st_arc_size = NULL; - static RRDDIM *rd_arc_size = NULL; - static RRDDIM *rd_arc_target_size = NULL; - static RRDDIM *rd_arc_target_min_size = NULL; - static RRDDIM *rd_arc_target_max_size = NULL; - - if (unlikely(!st_arc_size)) { - st_arc_size = rrdset_create_localhost( - "zfs" - , "arc_size" - , NULL - , ZFS_FAMILY_SIZE - , NULL - , "ZFS ARC Size" - , "MB" - , plugin - , "zfs" - , 2500 - , update_every - , RRDSET_TYPE_AREA - ); - - rd_arc_size = rrddim_add(st_arc_size, "size", "arcsz", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rd_arc_target_size = rrddim_add(st_arc_size, "target", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rd_arc_target_min_size = rrddim_add(st_arc_size, "min", "min (hard limit)", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rd_arc_target_max_size = rrddim_add(st_arc_size, "max", "max (high water)", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } - else - rrdset_next(st_arc_size); - - rrddim_set_by_pointer(st_arc_size, rd_arc_size, arcstats.size); - rrddim_set_by_pointer(st_arc_size, rd_arc_target_size, arcstats.c); - rrddim_set_by_pointer(st_arc_size, rd_arc_target_min_size, arcstats.c_min); - rrddim_set_by_pointer(st_arc_size, rd_arc_target_max_size, arcstats.c_max); - rrdset_done(st_arc_size); - } - - // -------------------------------------------------------------------- - - if(likely(arcstats.l2exist)) { - static RRDSET *st_l2_size = NULL; - static RRDDIM *rd_l2_size = NULL; - static RRDDIM *rd_l2_asize = NULL; - - if (unlikely(!st_l2_size)) { - st_l2_size = rrdset_create_localhost( - "zfs" - , "l2_size" - , NULL - , ZFS_FAMILY_SIZE - , NULL - , "ZFS L2 ARC Size" - , "MB" - , plugin - , "zfs" - , 2500 - , update_every - , RRDSET_TYPE_AREA - ); - - rd_l2_asize = rrddim_add(st_l2_size, "actual", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rd_l2_size = rrddim_add(st_l2_size, "size", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } - else - rrdset_next(st_l2_size); - - rrddim_set_by_pointer(st_l2_size, rd_l2_size, arcstats.l2_size); - rrddim_set_by_pointer(st_l2_size, rd_l2_asize, arcstats.l2_asize); - rrdset_done(st_l2_size); - } - - // -------------------------------------------------------------------- - - { - static RRDSET *st_reads = NULL; - static RRDDIM *rd_aread = NULL; - static RRDDIM *rd_dread = NULL; - static RRDDIM *rd_pread = NULL; - static RRDDIM *rd_mread = NULL; - static RRDDIM *rd_l2read = NULL; - - if (unlikely(!st_reads)) { - st_reads = rrdset_create_localhost( - "zfs" - , "reads" - , NULL - , ZFS_FAMILY_ACCESSES - , NULL - , "ZFS Reads" - , "reads/s" - , plugin - , "zfs" - , 2510 - , update_every - , RRDSET_TYPE_AREA - ); - - rd_aread = rrddim_add(st_reads, "areads", "arc", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_dread = rrddim_add(st_reads, "dreads", "demand", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_pread = rrddim_add(st_reads, "preads", "prefetch", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_mread = rrddim_add(st_reads, "mreads", "metadata", 1, 1, RRD_ALGORITHM_INCREMENTAL); - - if(arcstats.l2exist) - rd_l2read = rrddim_add(st_reads, "l2reads", "l2", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_reads); - - rrddim_set_by_pointer(st_reads, rd_aread, aread); - rrddim_set_by_pointer(st_reads, rd_dread, dread); - rrddim_set_by_pointer(st_reads, rd_pread, pread); - rrddim_set_by_pointer(st_reads, rd_mread, mread); - - if(arcstats.l2exist) - rrddim_set_by_pointer(st_reads, rd_l2read, l2read); - - rrdset_done(st_reads); - } - - // -------------------------------------------------------------------- - - if(likely(arcstats.l2exist)) { - static RRDSET *st_l2bytes = NULL; - static RRDDIM *rd_l2_read_bytes = NULL; - static RRDDIM *rd_l2_write_bytes = NULL; - - if (unlikely(!st_l2bytes)) { - st_l2bytes = rrdset_create_localhost( - "zfs" - , "bytes" - , NULL - , ZFS_FAMILY_ACCESSES - , NULL - , "ZFS ARC L2 Read/Write Rate" - , "kilobytes/s" - , plugin - , "zfs" - , 2700 - , update_every - , RRDSET_TYPE_AREA - ); - - rd_l2_read_bytes = rrddim_add(st_l2bytes, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL); - rd_l2_write_bytes = rrddim_add(st_l2bytes, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_l2bytes); - - rrddim_set_by_pointer(st_l2bytes, rd_l2_read_bytes, arcstats.l2_read_bytes); - rrddim_set_by_pointer(st_l2bytes, rd_l2_write_bytes, arcstats.l2_write_bytes); - rrdset_done(st_l2bytes); - } - - // -------------------------------------------------------------------- - - { - static RRDSET *st_ahits = NULL; - static RRDDIM *rd_ahits = NULL; - static RRDDIM *rd_amisses = NULL; - - if (unlikely(!st_ahits)) { - st_ahits = rrdset_create_localhost( - "zfs" - , "hits" - , NULL - , ZFS_FAMILY_EFFICIENCY - , NULL - , "ZFS ARC Hits" - , "percentage" - , plugin - , "zfs" - , 2520 - , update_every - , RRDSET_TYPE_STACKED - ); - - rd_ahits = rrddim_add(st_ahits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_amisses = rrddim_add(st_ahits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - } - else - rrdset_next(st_ahits); - - rrddim_set_by_pointer(st_ahits, rd_ahits, arcstats.hits); - rrddim_set_by_pointer(st_ahits, rd_amisses, arcstats.misses); - rrdset_done(st_ahits); - } - - // -------------------------------------------------------------------- - - { - static RRDSET *st_dhits = NULL; - static RRDDIM *rd_dhits = NULL; - static RRDDIM *rd_dmisses = NULL; - - if (unlikely(!st_dhits)) { - st_dhits = rrdset_create_localhost( - "zfs" - , "dhits" - , NULL - , ZFS_FAMILY_EFFICIENCY - , NULL - , "ZFS Demand Hits" - , "percentage" - , plugin - , "zfs" - , 2530 - , update_every - , RRDSET_TYPE_STACKED - ); - - rd_dhits = rrddim_add(st_dhits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_dmisses = rrddim_add(st_dhits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - } - else - rrdset_next(st_dhits); - - rrddim_set_by_pointer(st_dhits, rd_dhits, dhit); - rrddim_set_by_pointer(st_dhits, rd_dmisses, dmiss); - rrdset_done(st_dhits); - } - - // -------------------------------------------------------------------- - - { - static RRDSET *st_phits = NULL; - static RRDDIM *rd_phits = NULL; - static RRDDIM *rd_pmisses = NULL; - - if (unlikely(!st_phits)) { - st_phits = rrdset_create_localhost( - "zfs" - , "phits" - , NULL - , ZFS_FAMILY_EFFICIENCY - , NULL - , "ZFS Prefetch Hits" - , "percentage" - , plugin - , "zfs" - , 2540 - , update_every - , RRDSET_TYPE_STACKED - ); - - rd_phits = rrddim_add(st_phits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_pmisses = rrddim_add(st_phits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - } - else - rrdset_next(st_phits); - - rrddim_set_by_pointer(st_phits, rd_phits, phit); - rrddim_set_by_pointer(st_phits, rd_pmisses, pmiss); - rrdset_done(st_phits); - } - - // -------------------------------------------------------------------- - - { - static RRDSET *st_mhits = NULL; - static RRDDIM *rd_mhits = NULL; - static RRDDIM *rd_mmisses = NULL; - - if (unlikely(!st_mhits)) { - st_mhits = rrdset_create_localhost( - "zfs" - , "mhits" - , NULL - , ZFS_FAMILY_EFFICIENCY - , NULL - , "ZFS Metadata Hits" - , "percentage" - , plugin - , "zfs" - , 2550 - , update_every - , RRDSET_TYPE_STACKED - ); - - rd_mhits = rrddim_add(st_mhits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_mmisses = rrddim_add(st_mhits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - } - else - rrdset_next(st_mhits); - - rrddim_set_by_pointer(st_mhits, rd_mhits, mhit); - rrddim_set_by_pointer(st_mhits, rd_mmisses, mmiss); - rrdset_done(st_mhits); - } - - // -------------------------------------------------------------------- - - if(likely(arcstats.l2exist)) { - static RRDSET *st_l2hits = NULL; - static RRDDIM *rd_l2hits = NULL; - static RRDDIM *rd_l2misses = NULL; - - if (unlikely(!st_l2hits)) { - st_l2hits = rrdset_create_localhost( - "zfs" - , "l2hits" - , NULL - , ZFS_FAMILY_EFFICIENCY - , NULL - , "ZFS L2 Hits" - , "percentage" - , plugin - , "zfs" - , 2560 - , update_every - , RRDSET_TYPE_STACKED - ); - - rd_l2hits = rrddim_add(st_l2hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_l2misses = rrddim_add(st_l2hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - } - else - rrdset_next(st_l2hits); - - rrddim_set_by_pointer(st_l2hits, rd_l2hits, l2hit); - rrddim_set_by_pointer(st_l2hits, rd_l2misses, l2miss); - rrdset_done(st_l2hits); - } - - // -------------------------------------------------------------------- - - { - static RRDSET *st_list_hits = NULL; - static RRDDIM *rd_mfu = NULL; - static RRDDIM *rd_mru = NULL; - static RRDDIM *rd_mfug = NULL; - static RRDDIM *rd_mrug = NULL; - - if (unlikely(!st_list_hits)) { - st_list_hits = rrdset_create_localhost( - "zfs" - , "list_hits" - , NULL - , ZFS_FAMILY_EFFICIENCY - , NULL - , "ZFS List Hits" - , "hits/s" - , plugin - , "zfs" - , 2600 - , update_every - , RRDSET_TYPE_AREA - ); - - rd_mfu = rrddim_add(st_list_hits, "mfu", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_mfug = rrddim_add(st_list_hits, "mfug", "mfu ghost", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_mru = rrddim_add(st_list_hits, "mru", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_mrug = rrddim_add(st_list_hits, "mrug", "mru ghost", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_list_hits); - - rrddim_set_by_pointer(st_list_hits, rd_mfu, arcstats.mfu_hits); - rrddim_set_by_pointer(st_list_hits, rd_mru, arcstats.mru_hits); - rrddim_set_by_pointer(st_list_hits, rd_mfug, arcstats.mfu_ghost_hits); - rrddim_set_by_pointer(st_list_hits, rd_mrug, arcstats.mru_ghost_hits); - rrdset_done(st_list_hits); - } -} - -void generate_charts_arc_summary(const char *plugin, int update_every) { - unsigned long long arc_accesses_total = arcstats.hits + arcstats.misses; - unsigned long long real_hits = arcstats.mfu_hits + arcstats.mru_hits; - unsigned long long real_misses = arc_accesses_total - real_hits; - - //unsigned long long anon_hits = arcstats.hits - (arcstats.mfu_hits + arcstats.mru_hits + arcstats.mfu_ghost_hits + arcstats.mru_ghost_hits); - - unsigned long long arc_size = arcstats.size; - unsigned long long mru_size = arcstats.p; - //unsigned long long target_min_size = arcstats.c_min; - //unsigned long long target_max_size = arcstats.c_max; - unsigned long long target_size = arcstats.c; - //unsigned long long target_size_ratio = (target_max_size / target_min_size); - - unsigned long long mfu_size; - if(arc_size > target_size) - mfu_size = arc_size - mru_size; - else - mfu_size = target_size - mru_size; - - // -------------------------------------------------------------------- - - { - static RRDSET *st_arc_size_breakdown = NULL; - static RRDDIM *rd_most_recent = NULL; - static RRDDIM *rd_most_frequent = NULL; - - if (unlikely(!st_arc_size_breakdown)) { - st_arc_size_breakdown = rrdset_create_localhost( - "zfs" - , "arc_size_breakdown" - , NULL - , ZFS_FAMILY_EFFICIENCY - , NULL - , "ZFS ARC Size Breakdown" - , "percentage" - , plugin - , "zfs" - , 2520 - , update_every - , RRDSET_TYPE_STACKED - ); - - rd_most_recent = rrddim_add(st_arc_size_breakdown, "recent", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL); - rd_most_frequent = rrddim_add(st_arc_size_breakdown, "frequent", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL); - } - else - rrdset_next(st_arc_size_breakdown); - - rrddim_set_by_pointer(st_arc_size_breakdown, rd_most_recent, mru_size); - rrddim_set_by_pointer(st_arc_size_breakdown, rd_most_frequent, mfu_size); - rrdset_done(st_arc_size_breakdown); - } - - // -------------------------------------------------------------------- - - { - static RRDSET *st_memory = NULL; -#ifndef __FreeBSD__ - static RRDDIM *rd_direct = NULL; -#endif - static RRDDIM *rd_throttled = NULL; -#ifndef __FreeBSD__ - static RRDDIM *rd_indirect = NULL; -#endif - - if (unlikely(!st_memory)) { - st_memory = rrdset_create_localhost( - "zfs" - , "memory_ops" - , NULL - , ZFS_FAMILY_OPERATIONS - , NULL - , "ZFS Memory Operations" - , "operations/s" - , plugin - , "zfs" - , 2523 - , update_every - , RRDSET_TYPE_LINE - ); - -#ifndef __FreeBSD__ - rd_direct = rrddim_add(st_memory, "direct", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); -#endif - rd_throttled = rrddim_add(st_memory, "throttled", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); -#ifndef __FreeBSD__ - rd_indirect = rrddim_add(st_memory, "indirect", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); -#endif - } - else - rrdset_next(st_memory); - -#ifndef __FreeBSD__ - rrddim_set_by_pointer(st_memory, rd_direct, arcstats.memory_direct_count); -#endif - rrddim_set_by_pointer(st_memory, rd_throttled, arcstats.memory_throttle_count); -#ifndef __FreeBSD__ - rrddim_set_by_pointer(st_memory, rd_indirect, arcstats.memory_indirect_count); -#endif - rrdset_done(st_memory); - } - - // -------------------------------------------------------------------- - - { - static RRDSET *st_important_ops = NULL; - static RRDDIM *rd_deleted = NULL; - static RRDDIM *rd_mutex_misses = NULL; - static RRDDIM *rd_evict_skips = NULL; - static RRDDIM *rd_hash_collisions = NULL; - - if (unlikely(!st_important_ops)) { - st_important_ops = rrdset_create_localhost( - "zfs" - , "important_ops" - , NULL - , ZFS_FAMILY_OPERATIONS - , NULL - , "ZFS Important Operations" - , "operations/s" - , plugin - , "zfs" - , 2522 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_evict_skips = rrddim_add(st_important_ops, "eskip", "evict skip", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_deleted = rrddim_add(st_important_ops, "deleted", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_mutex_misses = rrddim_add(st_important_ops, "mtxmis", "mutex miss", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_hash_collisions = rrddim_add(st_important_ops, "hash_collisions", "hash collisions", 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st_important_ops); - - rrddim_set_by_pointer(st_important_ops, rd_deleted, arcstats.deleted); - rrddim_set_by_pointer(st_important_ops, rd_evict_skips, arcstats.evict_skip); - rrddim_set_by_pointer(st_important_ops, rd_mutex_misses, arcstats.mutex_miss); - rrddim_set_by_pointer(st_important_ops, rd_hash_collisions, arcstats.hash_collisions); - rrdset_done(st_important_ops); - } - - // -------------------------------------------------------------------- - - { - static RRDSET *st_actual_hits = NULL; - static RRDDIM *rd_actual_hits = NULL; - static RRDDIM *rd_actual_misses = NULL; - - if (unlikely(!st_actual_hits)) { - st_actual_hits = rrdset_create_localhost( - "zfs" - , "actual_hits" - , NULL - , ZFS_FAMILY_EFFICIENCY - , NULL - , "ZFS Actual Cache Hits" - , "percentage" - , plugin - , "zfs" - , 2519 - , update_every - , RRDSET_TYPE_STACKED - ); - - rd_actual_hits = rrddim_add(st_actual_hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_actual_misses = rrddim_add(st_actual_hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - } - else - rrdset_next(st_actual_hits); - - rrddim_set_by_pointer(st_actual_hits, rd_actual_hits, real_hits); - rrddim_set_by_pointer(st_actual_hits, rd_actual_misses, real_misses); - rrdset_done(st_actual_hits); - } - - // -------------------------------------------------------------------- - - { - static RRDSET *st_demand_data_hits = NULL; - static RRDDIM *rd_demand_data_hits = NULL; - static RRDDIM *rd_demand_data_misses = NULL; - - if (unlikely(!st_demand_data_hits)) { - st_demand_data_hits = rrdset_create_localhost( - "zfs" - , "demand_data_hits" - , NULL - , ZFS_FAMILY_EFFICIENCY - , NULL - , "ZFS Data Demand Efficiency" - , "percentage" - , plugin - , "zfs" - , 2531 - , update_every - , RRDSET_TYPE_STACKED - ); - - rd_demand_data_hits = rrddim_add(st_demand_data_hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_demand_data_misses = rrddim_add(st_demand_data_hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - } - else - rrdset_next(st_demand_data_hits); - - rrddim_set_by_pointer(st_demand_data_hits, rd_demand_data_hits, arcstats.demand_data_hits); - rrddim_set_by_pointer(st_demand_data_hits, rd_demand_data_misses, arcstats.demand_data_misses); - rrdset_done(st_demand_data_hits); - } - - // -------------------------------------------------------------------- - - { - static RRDSET *st_prefetch_data_hits = NULL; - static RRDDIM *rd_prefetch_data_hits = NULL; - static RRDDIM *rd_prefetch_data_misses = NULL; - - if (unlikely(!st_prefetch_data_hits)) { - st_prefetch_data_hits = rrdset_create_localhost( - "zfs" - , "prefetch_data_hits" - , NULL - , ZFS_FAMILY_EFFICIENCY - , NULL - , "ZFS Data Prefetch Efficiency" - , "percentage" - , plugin - , "zfs" - , 2532 - , update_every - , RRDSET_TYPE_STACKED - ); - - rd_prefetch_data_hits = rrddim_add(st_prefetch_data_hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rd_prefetch_data_misses = rrddim_add(st_prefetch_data_hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - } - else - rrdset_next(st_prefetch_data_hits); - - rrddim_set_by_pointer(st_prefetch_data_hits, rd_prefetch_data_hits, arcstats.prefetch_data_hits); - rrddim_set_by_pointer(st_prefetch_data_hits, rd_prefetch_data_misses, arcstats.prefetch_data_misses); - rrdset_done(st_prefetch_data_hits); - } - - // -------------------------------------------------------------------- - - { - static RRDSET *st_hash_elements = NULL; - static RRDDIM *rd_hash_elements_current = NULL; - static RRDDIM *rd_hash_elements_max = NULL; - - if (unlikely(!st_hash_elements)) { - st_hash_elements = rrdset_create_localhost( - "zfs" - , "hash_elements" - , NULL - , ZFS_FAMILY_HASH - , NULL - , "ZFS ARC Hash Elements" - , "elements" - , plugin - , "zfs" - , 2800 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_hash_elements_current = rrddim_add(st_hash_elements, "current", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_hash_elements_max = rrddim_add(st_hash_elements, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else - rrdset_next(st_hash_elements); - - rrddim_set_by_pointer(st_hash_elements, rd_hash_elements_current, arcstats.hash_elements); - rrddim_set_by_pointer(st_hash_elements, rd_hash_elements_max, arcstats.hash_elements_max); - rrdset_done(st_hash_elements); - } - - // -------------------------------------------------------------------- - - { - static RRDSET *st_hash_chains = NULL; - static RRDDIM *rd_hash_chains_current = NULL; - static RRDDIM *rd_hash_chains_max = NULL; - - if (unlikely(!st_hash_chains)) { - st_hash_chains = rrdset_create_localhost( - "zfs" - , "hash_chains" - , NULL - , ZFS_FAMILY_HASH - , NULL - , "ZFS ARC Hash Chains" - , "chains" - , plugin - , "zfs" - , 2810 - , update_every - , RRDSET_TYPE_LINE - ); - - rd_hash_chains_current = rrddim_add(st_hash_chains, "current", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_hash_chains_max = rrddim_add(st_hash_chains, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else - rrdset_next(st_hash_chains); - - rrddim_set_by_pointer(st_hash_chains, rd_hash_chains_current, arcstats.hash_chains); - rrddim_set_by_pointer(st_hash_chains, rd_hash_chains_max, arcstats.hash_chain_max); - rrdset_done(st_hash_chains); - } - - // -------------------------------------------------------------------- - -} \ No newline at end of file diff --git a/src/zfs_common.h b/src/zfs_common.h deleted file mode 100644 index 4494e70ca..000000000 --- a/src/zfs_common.h +++ /dev/null @@ -1,111 +0,0 @@ -#ifndef NETDATA_ZFS_COMMON_H -#define NETDATA_ZFS_COMMON_H - -#define ZFS_FAMILY_SIZE "size" -#define ZFS_FAMILY_EFFICIENCY "efficiency" -#define ZFS_FAMILY_ACCESSES "accesses" -#define ZFS_FAMILY_OPERATIONS "operations" -#define ZFS_FAMILY_HASH "hashes" - -struct arcstats { - // values - unsigned long long hits; - unsigned long long misses; - unsigned long long demand_data_hits; - unsigned long long demand_data_misses; - unsigned long long demand_metadata_hits; - unsigned long long demand_metadata_misses; - unsigned long long prefetch_data_hits; - unsigned long long prefetch_data_misses; - unsigned long long prefetch_metadata_hits; - unsigned long long prefetch_metadata_misses; - unsigned long long mru_hits; - unsigned long long mru_ghost_hits; - unsigned long long mfu_hits; - unsigned long long mfu_ghost_hits; - unsigned long long deleted; - unsigned long long mutex_miss; - unsigned long long evict_skip; - unsigned long long evict_not_enough; - unsigned long long evict_l2_cached; - unsigned long long evict_l2_eligible; - unsigned long long evict_l2_ineligible; - unsigned long long evict_l2_skip; - unsigned long long hash_elements; - unsigned long long hash_elements_max; - unsigned long long hash_collisions; - unsigned long long hash_chains; - unsigned long long hash_chain_max; - unsigned long long p; - unsigned long long c; - unsigned long long c_min; - unsigned long long c_max; - unsigned long long size; - unsigned long long hdr_size; - unsigned long long data_size; - unsigned long long metadata_size; - unsigned long long other_size; - unsigned long long anon_size; - unsigned long long anon_evictable_data; - unsigned long long anon_evictable_metadata; - unsigned long long mru_size; - unsigned long long mru_evictable_data; - unsigned long long mru_evictable_metadata; - unsigned long long mru_ghost_size; - unsigned long long mru_ghost_evictable_data; - unsigned long long mru_ghost_evictable_metadata; - unsigned long long mfu_size; - unsigned long long mfu_evictable_data; - unsigned long long mfu_evictable_metadata; - unsigned long long mfu_ghost_size; - unsigned long long mfu_ghost_evictable_data; - unsigned long long mfu_ghost_evictable_metadata; - unsigned long long l2_hits; - unsigned long long l2_misses; - unsigned long long l2_feeds; - unsigned long long l2_rw_clash; - unsigned long long l2_read_bytes; - unsigned long long l2_write_bytes; - unsigned long long l2_writes_sent; - unsigned long long l2_writes_done; - unsigned long long l2_writes_error; - unsigned long long l2_writes_lock_retry; - unsigned long long l2_evict_lock_retry; - unsigned long long l2_evict_reading; - unsigned long long l2_evict_l1cached; - unsigned long long l2_free_on_write; - unsigned long long l2_cdata_free_on_write; - unsigned long long l2_abort_lowmem; - unsigned long long l2_cksum_bad; - unsigned long long l2_io_error; - unsigned long long l2_size; - unsigned long long l2_asize; - unsigned long long l2_hdr_size; - unsigned long long l2_compress_successes; - unsigned long long l2_compress_zeros; - unsigned long long l2_compress_failures; - unsigned long long memory_throttle_count; - unsigned long long duplicate_buffers; - unsigned long long duplicate_buffers_size; - unsigned long long duplicate_reads; - unsigned long long memory_direct_count; - unsigned long long memory_indirect_count; - unsigned long long arc_no_grow; - unsigned long long arc_tempreserve; - unsigned long long arc_loaned_bytes; - unsigned long long arc_prune; - unsigned long long arc_meta_used; - unsigned long long arc_meta_limit; - unsigned long long arc_meta_max; - unsigned long long arc_meta_min; - unsigned long long arc_need_free; - unsigned long long arc_sys_free; - - // flags - int l2exist; -}; - -void generate_charts_arcstats(const char *plugin, int update_every); -void generate_charts_arc_summary(const char *plugin, int update_every); - -#endif //NETDATA_ZFS_COMMON_H diff --git a/streaming/Makefile.am b/streaming/Makefile.am new file mode 100644 index 000000000..84048948b --- /dev/null +++ b/streaming/Makefile.am @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_libconfig_DATA = \ + stream.conf \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/streaming/Makefile.in b/streaming/Makefile.in new file mode 100644 index 000000000..c3a5fdc74 --- /dev/null +++ b/streaming/Makefile.in @@ -0,0 +1,521 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = streaming +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_libconfig_DATA) $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(libconfigdir)" +DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_libconfig_DATA = \ + stream.conf \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu streaming/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu streaming/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-dist_libconfigDATA: $(dist_libconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \ + done + +uninstall-dist_libconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir) +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: + for dir in "$(DESTDIR)$(libconfigdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-dist_libconfigDATA + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-dist_libconfigDATA + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dist_libconfigDATA install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \ + uninstall-am uninstall-dist_libconfigDATA + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/streaming/README.md b/streaming/README.md new file mode 100644 index 000000000..1f3bd7391 --- /dev/null +++ b/streaming/README.md @@ -0,0 +1,413 @@ +# Metrics streaming + +Each netdata is able to replicate/mirror its database to another netdata, by streaming collected +metrics, in real-time to it. This is quite different to [data archiving to third party time-series +databases](../backends). + +When a netdata streams metrics to another netdata, the receiving one is able to perform everything +a netdata performs: + +- visualize them with a dashboard +- run health checks that trigger alarms and send alarm notifications +- archive metrics to a backend time-series database + +The following configurations are supported: + +#### netdata without a database or web API (headless collector) + +Local netdata (`slave`), **without any database or alarms**, collects metrics and sends them to +another netdata (`master`). + +The user can take the full functionality of the `slave` netdata at +http://master.ip:19999/host/slave.hostname/. Alarms for the `slave` are served by the `master`. + +In this mode the `slave` is just a plain data collector. +It runs with... **5MB** of RAM (yes, you read correct), spawns all external plugins, but instead +of maintaining a local database and accepting dashboard requests, it streams all metrics to the +`master`. + +The same `master` can collect data for any number of `slaves`. + +#### database replication + +Local netdata (`slave`), **with a local database (and possibly alarms)**, collects metrics and +sends them to another netdata (`master`). + +The user can use all the functions **at both** http://slave.ip:19999/ and +http://master.ip:19999/host/slave.hostname/. + +The `slave` and the `master` may have different data retention policies for the same metrics. + +Alarms for the `slave` are triggered by **both** the `slave` and the `master` (and actually +each can have different alarms configurations or have alarms disabled). + +#### netdata proxies + +Local netdata (`slave`), with or without a database, collects metrics and sends them to another +netdata (`proxy`), which may or may not maintain a database, which forwards them to another +netdata (`master`). + +Alarms for the slave can be triggered by any of the involved hosts that maintains a database. + +Any number of daisy chaining netdata servers are supported, each with or without a database and +with or without alarms for the `slave` metrics. + +#### mix and match with backends + +All nodes that maintain a database can also send their data to a backend database. +This allows quite complex setups. + +Example: + +1. netdata `A`, `B` do not maintain a database and stream metrics to netdata `C`(live streaming functionality, i.e. this PR) +2. netdata `C` maintains a database for `A`, `B`, `C` and archives all metrics to `graphite` with 10 second detail (backends functionality) +3. netdata `C` also streams data for `A`, `B`, `C` to netdata `D`, which also collects data from `E`, `F` and `G` from another DMZ (live streaming functionality, i.e. this PR) +4. netdata `D` is just a proxy, without a database, that streams all data to a remote site at netdata `H` +5. netdata `H` maintains a database for `A`, `B`, `C`, `D`, `E`, `F`, `G`, `H` and sends all data to `opentsdb` with 5 seconds detail (backends functionality) +6. alarms are triggered by `H` for all hosts +7. users can use all the netdata that maintain a database to view metrics (i.e. at `H` all hosts can be viewed). + +#### netdata.conf configuration + +These are options that affect the operation of netdata in this area: + +``` +[global] + memory mode = none | ram | save | map +``` + +`[global].memory mode = none` disables the database at this host. This also disables health +monitoring (there cannot be health monitoring without a database). + +``` +[web] + mode = none | static-threaded | single-threaded | multi-threaded +``` + +`[web].mode = none` disables the API (netdata will not listen to any ports). +This also disables the registry (there cannot be a registry without an API). + +``` +[backend] + enabled = yes | no + type = graphite | opentsdb + destination = IP:PORT ... + update every = 10 +``` + +`[backend]` configures data archiving to a backend (it archives all databases maintained on +this host). + +#### streaming configuration + +A new file is introduced: [stream.conf](stream.conf) (to edit it on your system run +`/etc/netdata/edit-config stream.conf`). This file holds streaming configuration for both the +sending and the receiving netdata. + +API keys are used to authorize the communication of a pair of sending-receiving netdata. +Once the communication is authorized, the sending netdata can push metrics for any number of hosts. + +You can generate an API key with the command `uuidgen`. API keys are just random GUIDs. +You can use the same API key on all your netdata, or use a different API key for any pair of +sending-receiving netdata. + +##### options for the sending node + +This is the section for the sending netdata. On the receiving node, `[stream].enabled` can be `no`. +If it is `yes`, the receiving node will also stream the metrics to another node (i.e. it will be +a `proxy`). + +``` +[stream] + enabled = yes | no + destination = IP:PORT ... + api key = XXXXXXXXXXX +``` + +This is an overview of how these options can be combined: + +target | memory<br/>mode | web<br/>mode | stream<br/>enabled | backend | alarms | dashboard +-------|:-----------:|:---:|:------:|:-------:|:---------:|:----: +headless collector|`none`|`none`|`yes`|only for `data source = as collected`|not possible|no +headless proxy|`none`|not `none`|`yes`|only for `data source = as collected`|not possible|no +proxy with db|not `none`|not `none`|`yes`|possible|possible|yes +central netdata|not `none`|not `none`|`no`|possible|possible|yes + +##### options for the receiving node + +`stream.conf` looks like this: + +```sh +# replace API_KEY with your uuidgen generated GUID +[API_KEY] + enabled = yes + default history = 3600 + default memory mode = save + health enabled by default = auto + allow from = * +``` + +You can add many such sections, one for each API key. The above are used as default values for +all hosts pushed with this API key. + +You can also add sections like this: + +```sh +# replace MACHINE_GUID with the slave /var/lib/netdata/registry/netdata.public.unique.id +[MACHINE_GUID] + enabled = yes + history = 3600 + memory mode = save + health enabled = yes + allow from = * +``` + +The above is the receiver configuration of a single host, at the receiver end. `MACHINE_GUID` is +the unique id the netdata generating the metrics (i.e. the netdata that originally collects +them `/var/lib/netdata/registry/netdata.unique.id`). So, metrics for netdata `A` that pass through +any number of other netdata, will have the same `MACHINE_GUID`. + +####### allow from + +`allow from` settings are [netdata simple patterns](../libnetdata/simple_pattern): string matches +that use `*` as wildcard (any number of times) and a `!` prefix for a negative match. +So: `allow from = !10.1.2.3 10.*` will allow all IPs in `10.*` except `10.1.2.3`. The order is +important: left to right, the first positive or negative match is used. + +`allow from` is available in netdata v1.9+ + +#### tracing + +When a `slave` is trying to push metrics to a `master` or `proxy`, it logs entries like these: + +``` +2017-02-25 01:57:44: netdata: ERROR: Failed to connect to '10.11.12.1', port '19999' (errno 111, Connection refused) +2017-02-25 01:57:44: netdata: ERROR: STREAM costa-pc [send to 10.11.12.1:19999]: failed to connect +2017-02-25 01:58:04: netdata: INFO : STREAM costa-pc [send to 10.11.12.1:19999]: initializing communication... +2017-02-25 01:58:04: netdata: INFO : STREAM costa-pc [send to 10.11.12.1:19999]: waiting response from remote netdata... +2017-02-25 01:58:14: netdata: INFO : STREAM costa-pc [send to 10.11.12.1:19999]: established communication - sending metrics... +2017-02-25 01:58:14: netdata: ERROR: STREAM costa-pc [send]: discarding 1900 bytes of metrics already in the buffer. +2017-02-25 01:58:14: netdata: INFO : STREAM costa-pc [send]: ready - sending metrics... +``` + +The receiving end (`proxy` or `master`) logs entries like these: + +``` +2017-02-25 01:58:04: netdata: INFO : STREAM [receive from [10.11.12.11]:33554]: new client connection. +2017-02-25 01:58:04: netdata: INFO : STREAM costa-pc [10.11.12.11]:33554: receive thread created (task id 7698) +2017-02-25 01:58:14: netdata: INFO : Host 'costa-pc' with guid '12345678-b5a6-11e6-8a50-00508db7e9c9' initialized, os: linux, update every: 1, memory mode: ram, history entries: 3600, streaming: disabled, health: enabled, cache_dir: '/var/cache/netdata/12345678-b5a6-11e6-8a50-00508db7e9c9', varlib_dir: '/var/lib/netdata/12345678-b5a6-11e6-8a50-00508db7e9c9', health_log: '/var/lib/netdata/12345678-b5a6-11e6-8a50-00508db7e9c9/health/health-log.db', alarms default handler: '/usr/libexec/netdata/plugins.d/alarm-notify.sh', alarms default recipient: 'root' +2017-02-25 01:58:14: netdata: INFO : STREAM costa-pc [receive from [10.11.12.11]:33554]: initializing communication... +2017-02-25 01:58:14: netdata: INFO : STREAM costa-pc [receive from [10.11.12.11]:33554]: receiving metrics... +``` + +For netdata v1.9+, streaming can also be monitored via `access.log`. + + +#### Viewing remote host dashboards, using mirrored databases + +On any receiving netdata, that maintains remote databases and has its web server enabled, +`my-netdata` menu will include a list of the mirrored databases. + +![image](https://cloud.githubusercontent.com/assets/2662304/24080824/24cd2d3c-0caf-11e7-909d-a8dd1dbb95d7.png) + +Selecting any of these, the server will offer a dashboard using the mirrored metrics. + + +## Monitoring ephemeral nodes + +Auto-scaling is probably the most trendy service deployment strategy these days. + +Auto-scaling detects the need for additional resources and boots VMs on demand, based on a template. Soon after they start running the applications, a load balancer starts distributing traffic to them, allowing the service to grow horizontally to the scale needed to handle the load. When demands falls, auto-scaling starts shutting down VMs that are no longer needed. + +<p align="center"> +<img src="https://cloud.githubusercontent.com/assets/2662304/23627426/65a9074a-02b9-11e7-9664-cd8f258a00af.png"/> +</p> + +What a fantastic feature for controlling infrastructure costs! Pay only for what you need for the time you need it! + +In auto-scaling, all servers are ephemeral, they live for just a few hours. Every VM is a brand new instance of the application, that was automatically created based on a template. + +So, how can we monitor them? How can we be sure that everything is working as expected on all of them? + +### The netdata way + +We recently made a significant improvement at the core of netdata to support monitoring such setups. + +Following the netdata way of monitoring, we wanted: + +1. **real-time performance monitoring**, collecting **_thousands of metrics per server per second_**, visualized in interactive, automatically created dashboards. +2. **real-time alarms**, for all nodes. +3. **zero configuration**, all ephemeral servers should have exactly the same configuration, and nothing should be configured at any system for each of the ephemeral nodes. We shouldn't care if 10 or 100 servers are spawned to handle the load. +4. **self-cleanup**, so that nothing needs to be done for cleaning up the monitoring infrastructure from the hundreds of nodes that may have been monitored through time. + +#### How it works + +All monitoring solutions, including netdata, work like this: + +1. `collect metrics`, from the system and the running applications +2. `store metrics`, in a time-series database +3. `examine metrics` periodically, for triggering alarms and sending alarm notifications +4. `visualize metrics`, so that users can see what exactly is happening + +netdata used to be self-contained, so that all these functions were handled entirely by each server. The changes we made, allow each netdata to be configured independently for each function. So, each netdata can now act as: + +- a `self contained system`, much like it used to be. +- a `data collector`, that collects metrics from a host and pushes them to another netdata (with or without a local database and alarms). +- a `proxy`, that receives metrics from other hosts and pushes them immediately to other netdata servers. netdata proxies can also be `store and forward proxies` meaning that they are able to maintain a local database for all metrics passing through them (with or without alarms). +- a `time-series database` node, where data are kept, alarms are run and queries are served to visualise the metrics. + +### Configuring an auto-scaling setup + +<p align="center"> +<img src="https://cloud.githubusercontent.com/assets/2662304/23627468/96daf7ba-02b9-11e7-95ac-1f767dd8dab8.png"/> +</p> + +You need a netdata `master`. This node should not be ephemeral. It will be the node where all ephemeral nodes (let's call them `slaves`) will be sending their metrics. + +The master will need to authorize the slaves for accepting their metrics. This is done with an API key. + +#### API keys + +API keys are just random GUIDs. Use the Linux command `uuidgen` to generate one. You can use the same API key for all your `slaves`, or you can configure one API for each of them. This is entirely your decision. + +We suggest to use the same API key for each ephemeral node template you have, so that all replicas of the same ephemeral node will have exactly the same configuration. + +I will use this API_KEY: `11111111-2222-3333-4444-555555555555`. Replace it with your own. + +#### Configuring the `master` + +On the master, edit `/etc/netdata/stream.conf` (to edit it on your system run `/etc/netdata/edit-config stream.conf`) and set these: + +```bash +[11111111-2222-3333-4444-555555555555] + # enable/disable this API key + enabled = yes + + # one hour of data for each of the slaves + default history = 3600 + + # do not save slave metrics on disk + default memory = ram + + # alarms checks, only while the slave is connected + health enabled by default = auto +``` +*`stream.conf` on master, to enable receiving metrics from slaves using the API key.* + +If you used many API keys, you can add one such section for each API key. + +When done, restart netdata on the `master` node. It is now ready to receive metrics. + +#### Configuring the `slaves` + +On each of the slaves, edit `/etc/netdata/stream.conf` (to edit it on your system run `/etc/netdata/edit-config stream.conf`) and set these: + +```bash +[stream] + # stream metrics to another netdata + enabled = yes + + # the IP and PORT of the master + destination = 10.11.12.13:19999 + + # the API key to use + api key = 11111111-2222-3333-4444-555555555555 +``` +*`stream.conf` on slaves, to enable pushing metrics to master at `10.11.12.13:19999`.* + +Using just the above configuration, the `slaves` will be pushing their metrics to the `master` netdata, but they will still maintain a local database of the metrics and run health checks. To disable them, edit `/etc/netdata/netdata.conf` and set: + +```bash +[global] + # disable the local database + memory mode = none + +[health] + # disable health checks + enabled = no +``` +*`netdata.conf` configuration on slaves, to disable the local database and health checks.* + +Keep in mind that setting `memory mode = none` will also force `[health].enabled = no` (health checks require access to a local database). But you can keep the database and disable health checks if you need to. You are however sending all the metrics to the master server, which can handle the health checking (`[health].enabled = yes`) + +#### netdata unique id + +The file `/var/lib/netdata/registry/netdata.public.unique.id` contains a random GUID that **uniquely identifies each netdata**. This file is automatically generated, by netdata, the first time it is started and remains unaltaired forever. + +> If you are building an image to be used for automated provisioning of autoscaled VMs, it important to delete that file from the image, so that each instance of your image will generate its own. + +#### Troubleshooting metrics streaming + +Both the sender and the receiver of metrics log information at `/var/log/netdata/error.log`. + + +On both master and slave do this: + +``` +tail -f /var/log/netdata/error.log | grep STREAM +``` + +If the slave manages to connect to the master you will see something like (on the master): + +``` +2017-03-09 09:38:52: netdata: INFO : STREAM [receive from [10.11.12.86]:38564]: new client connection. +2017-03-09 09:38:52: netdata: INFO : STREAM xxx [10.11.12.86]:38564: receive thread created (task id 27721) +2017-03-09 09:38:52: netdata: INFO : STREAM xxx [receive from [10.11.12.86]:38564]: client willing to stream metrics for host 'xxx' with machine_guid '1234567-1976-11e6-ae19-7cdd9077342a': update every = 1, history = 3600, memory mode = ram, health auto +2017-03-09 09:38:52: netdata: INFO : STREAM xxx [receive from [10.11.12.86]:38564]: initializing communication... +2017-03-09 09:38:52: netdata: INFO : STREAM xxx [receive from [10.11.12.86]:38564]: receiving metrics... +``` + +and something like this on the slave: + +``` +2017-03-09 09:38:28: netdata: INFO : STREAM xxx [send to box:19999]: connecting... +2017-03-09 09:38:28: netdata: INFO : STREAM xxx [send to box:19999]: initializing communication... +2017-03-09 09:38:28: netdata: INFO : STREAM xxx [send to box:19999]: waiting response from remote netdata... +2017-03-09 09:38:28: netdata: INFO : STREAM xxx [send to box:19999]: established communication - sending metrics... +``` + +### Archiving to a time-series database + +The `master` netdata node can also archive metrics, for all `slaves`, to a time-series database. At the time of this writing, netdata supports: + +- graphite +- opentsdb +- prometheus +- json document DBs +- all the compatibles to the above (e.g. kairosdb, influxdb, etc) + +Check the netdata [backends documentation](../backends) for configuring this. + +This is how such a solution will work: + +<p align="center"> +<img src="https://cloud.githubusercontent.com/assets/2662304/23627295/e3569adc-02b8-11e7-9d55-4014bf98c1b3.png"/> +</p> + +### An advanced setup + +netdata also supports `proxies` with and without a local database, and data retention can be different between all nodes. + +This means a setup like the following is also possible: + +<p align="center"> +<img src="https://cloud.githubusercontent.com/assets/2662304/23629551/bb1fd9c2-02c0-11e7-90f5-cab5a3ed4c53.png"/> +</p> + + +## proxies + +A proxy is a netdata that is receiving metrics from a netdata, and streams them to another netdata. + +netdata proxies may or may not maintain a database for the metrics passing through them. +When they maintain a database, they can also run health checks (alarms and notifications) +for the remote host that is streaming the metrics. + +To configure a proxy, configure it as a receiving and a sending netdata at the same time, +using [stream.conf](stream.conf). + +The sending side of a netdata proxy, connects and disconnects to the final destination of the +metrics, following the same pattern of the receiving side. + +For a practical example see [Monitoring ephemeral nodes](#monitoring-ephemeral-nodes). + diff --git a/streaming/rrdpush.c b/streaming/rrdpush.c new file mode 100644 index 000000000..df1a2177f --- /dev/null +++ b/streaming/rrdpush.c @@ -0,0 +1,1279 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "rrdpush.h" + +/* + * rrdpush + * + * 3 threads are involved for all stream operations + * + * 1. a random data collection thread, calling rrdset_done_push() + * this is called for each chart. + * + * the output of this work is kept in a BUFFER in RRDHOST + * the sender thread is signalled via a pipe (also in RRDHOST) + * + * 2. a sender thread running at the sending netdata + * this is spawned automatically on the first chart to be pushed + * + * It tries to push the metrics to the remote netdata, as fast + * as possible (i.e. immediately after they are collected). + * + * 3. a receiver thread, running at the receiving netdata + * this is spawned automatically when the sender connects to + * the receiver. + * + */ + +#define START_STREAMING_PROMPT "Hit me baby, push them over..." + +typedef enum { + RRDPUSH_MULTIPLE_CONNECTIONS_ALLOW, + RRDPUSH_MULTIPLE_CONNECTIONS_DENY_NEW +} RRDPUSH_MULTIPLE_CONNECTIONS_STRATEGY; + +static struct config stream_config = { + .sections = NULL, + .mutex = NETDATA_MUTEX_INITIALIZER, + .index = { + .avl_tree = { + .root = NULL, + .compar = appconfig_section_compare + }, + .rwlock = AVL_LOCK_INITIALIZER + } +}; + +unsigned int default_rrdpush_enabled = 0; +char *default_rrdpush_destination = NULL; +char *default_rrdpush_api_key = NULL; +char *default_rrdpush_send_charts_matching = NULL; + +static void load_stream_conf() { + errno = 0; + char *filename = strdupz_path_subpath(netdata_configured_user_config_dir, "stream.conf"); + if(!appconfig_load(&stream_config, filename, 0)) { + info("CONFIG: cannot load user config '%s'. Will try stock config.", filename); + freez(filename); + + filename = strdupz_path_subpath(netdata_configured_stock_config_dir, "stream.conf"); + if(!appconfig_load(&stream_config, filename, 0)) + info("CONFIG: cannot load stock config '%s'. Running with internal defaults.", filename); + } + freez(filename); +} + +int rrdpush_init() { + // -------------------------------------------------------------------- + // load stream.conf + load_stream_conf(); + + default_rrdpush_enabled = (unsigned int)appconfig_get_boolean(&stream_config, CONFIG_SECTION_STREAM, "enabled", default_rrdpush_enabled); + default_rrdpush_destination = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "destination", ""); + default_rrdpush_api_key = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "api key", ""); + default_rrdpush_send_charts_matching = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "send charts matching", "*"); + rrdhost_free_orphan_time = config_get_number(CONFIG_SECTION_GLOBAL, "cleanup orphan hosts after seconds", rrdhost_free_orphan_time); + + if(default_rrdpush_enabled && (!default_rrdpush_destination || !*default_rrdpush_destination || !default_rrdpush_api_key || !*default_rrdpush_api_key)) { + error("STREAM [send]: cannot enable sending thread - information is missing."); + default_rrdpush_enabled = 0; + } + + return default_rrdpush_enabled; +} + +#define CONNECTED_TO_SIZE 100 + +// data collection happens from multiple threads +// each of these threads calls rrdset_done() +// which in turn calls rrdset_done_push() +// which uses this pipe to notify the streaming thread +// that there are more data ready to be sent +#define PIPE_READ 0 +#define PIPE_WRITE 1 + +// to have the remote netdata re-sync the charts +// to its current clock, we send for this many +// iterations a BEGIN line without microseconds +// this is for the first iterations of each chart +unsigned int remote_clock_resync_iterations = 60; + +#define rrdpush_buffer_lock(host) netdata_mutex_lock(&((host)->rrdpush_sender_buffer_mutex)) +#define rrdpush_buffer_unlock(host) netdata_mutex_unlock(&((host)->rrdpush_sender_buffer_mutex)) + +static inline int should_send_chart_matching(RRDSET *st) { + if(unlikely(!rrdset_flag_check(st, RRDSET_FLAG_ENABLED))) { + rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_SEND); + rrdset_flag_set(st, RRDSET_FLAG_UPSTREAM_IGNORE); + } + else if(!rrdset_flag_check(st, RRDSET_FLAG_UPSTREAM_SEND|RRDSET_FLAG_UPSTREAM_IGNORE)) { + RRDHOST *host = st->rrdhost; + + if(simple_pattern_matches(host->rrdpush_send_charts_matching, st->id) || + simple_pattern_matches(host->rrdpush_send_charts_matching, st->name)) { + rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_IGNORE); + rrdset_flag_set(st, RRDSET_FLAG_UPSTREAM_SEND); + } + else { + rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_SEND); + rrdset_flag_set(st, RRDSET_FLAG_UPSTREAM_IGNORE); + } + } + + return(rrdset_flag_check(st, RRDSET_FLAG_UPSTREAM_SEND)); +} + +// checks if the current chart definition has been sent +static inline int need_to_send_chart_definition(RRDSET *st) { + rrdset_check_rdlock(st); + + if(unlikely(!(rrdset_flag_check(st, RRDSET_FLAG_UPSTREAM_EXPOSED)))) + return 1; + + RRDDIM *rd; + rrddim_foreach_read(rd, st) { + if(unlikely(!rd->exposed)) { + #ifdef NETDATA_INTERNAL_CHECKS + info("host '%s', chart '%s', dimension '%s' flag 'exposed' triggered chart refresh to upstream", st->rrdhost->hostname, st->id, rd->id); + #endif + return 1; + } + } + + return 0; +} + +// sends the current chart definition +static inline void rrdpush_send_chart_definition_nolock(RRDSET *st) { + RRDHOST *host = st->rrdhost; + + rrdset_flag_set(st, RRDSET_FLAG_UPSTREAM_EXPOSED); + + // properly set the name for the remote end to parse it + char *name = ""; + if(unlikely(strcmp(st->id, st->name))) { + // they differ + name = strchr(st->name, '.'); + if(name) + name++; + else + name = ""; + } + + // info("CHART '%s' '%s'", st->id, name); + + // send the chart + buffer_sprintf( + host->rrdpush_sender_buffer + , "CHART \"%s\" \"%s\" \"%s\" \"%s\" \"%s\" \"%s\" \"%s\" %ld %d \"%s %s %s %s\" \"%s\" \"%s\"\n" + , st->id + , name + , st->title + , st->units + , st->family + , st->context + , rrdset_type_name(st->chart_type) + , st->priority + , st->update_every + , rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)?"obsolete":"" + , rrdset_flag_check(st, RRDSET_FLAG_DETAIL)?"detail":"" + , rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST)?"store_first":"" + , rrdset_flag_check(st, RRDSET_FLAG_HIDDEN)?"hidden":"" + , (st->plugin_name)?st->plugin_name:"" + , (st->module_name)?st->module_name:"" + ); + + // send the dimensions + RRDDIM *rd; + rrddim_foreach_read(rd, st) { + buffer_sprintf( + host->rrdpush_sender_buffer + , "DIMENSION \"%s\" \"%s\" \"%s\" " COLLECTED_NUMBER_FORMAT " " COLLECTED_NUMBER_FORMAT " \"%s %s\"\n" + , rd->id + , rd->name + , rrd_algorithm_name(rd->algorithm) + , rd->multiplier + , rd->divisor + , rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN)?"hidden":"" + , rrddim_flag_check(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS)?"noreset":"" + ); + rd->exposed = 1; + } + + // send the chart local custom variables + RRDSETVAR *rs; + for(rs = st->variables; rs ;rs = rs->next) { + if(unlikely(rs->type == RRDVAR_TYPE_CALCULATED && rs->options & RRDVAR_OPTION_CUSTOM_CHART_VAR)) { + calculated_number *value = (calculated_number *) rs->value; + + buffer_sprintf( + host->rrdpush_sender_buffer + , "VARIABLE CHART %s = " CALCULATED_NUMBER_FORMAT "\n" + , rs->variable + , *value + ); + } + } + + st->upstream_resync_time = st->last_collected_time.tv_sec + (remote_clock_resync_iterations * st->update_every); +} + +// sends the current chart dimensions +static inline void rrdpush_send_chart_metrics_nolock(RRDSET *st) { + RRDHOST *host = st->rrdhost; + buffer_sprintf(host->rrdpush_sender_buffer, "BEGIN \"%s\" %llu\n", st->id, (st->last_collected_time.tv_sec > st->upstream_resync_time)?st->usec_since_last_update:0); + + RRDDIM *rd; + rrddim_foreach_read(rd, st) { + if(rd->updated && rd->exposed) + buffer_sprintf(host->rrdpush_sender_buffer + , "SET \"%s\" = " COLLECTED_NUMBER_FORMAT "\n" + , rd->id + , rd->collected_value + ); + } + + buffer_strcat(host->rrdpush_sender_buffer, "END\n"); +} + +static void rrdpush_sender_thread_spawn(RRDHOST *host); + +void rrdset_push_chart_definition_now(RRDSET *st) { + RRDHOST *host = st->rrdhost; + + if(unlikely(!host->rrdpush_send_enabled || !should_send_chart_matching(st))) + return; + + rrdset_rdlock(st); + rrdpush_buffer_lock(host); + rrdpush_send_chart_definition_nolock(st); + rrdpush_buffer_unlock(host); + rrdset_unlock(st); +} + +void rrdset_done_push(RRDSET *st) { + if(unlikely(!should_send_chart_matching(st))) + return; + + RRDHOST *host = st->rrdhost; + + rrdpush_buffer_lock(host); + + if(unlikely(host->rrdpush_send_enabled && !host->rrdpush_sender_spawn)) + rrdpush_sender_thread_spawn(host); + + if(unlikely(!host->rrdpush_sender_buffer || !host->rrdpush_sender_connected)) { + if(unlikely(!host->rrdpush_sender_error_shown)) + error("STREAM %s [send]: not ready - discarding collected metrics.", host->hostname); + + host->rrdpush_sender_error_shown = 1; + + rrdpush_buffer_unlock(host); + return; + } + else if(unlikely(host->rrdpush_sender_error_shown)) { + info("STREAM %s [send]: sending metrics...", host->hostname); + host->rrdpush_sender_error_shown = 0; + } + + if(need_to_send_chart_definition(st)) + rrdpush_send_chart_definition_nolock(st); + + rrdpush_send_chart_metrics_nolock(st); + + // signal the sender there are more data + if(host->rrdpush_sender_pipe[PIPE_WRITE] != -1 && write(host->rrdpush_sender_pipe[PIPE_WRITE], " ", 1) == -1) + error("STREAM %s [send]: cannot write to internal pipe", host->hostname); + + rrdpush_buffer_unlock(host); +} + +// ---------------------------------------------------------------------------- +// rrdpush sender thread + +static inline void rrdpush_sender_add_host_variable_to_buffer_nolock(RRDHOST *host, RRDVAR *rv) { + calculated_number *value = (calculated_number *)rv->value; + + buffer_sprintf( + host->rrdpush_sender_buffer + , "VARIABLE HOST %s = " CALCULATED_NUMBER_FORMAT "\n" + , rv->name + , *value + ); + + debug(D_STREAM, "RRDVAR pushed HOST VARIABLE %s = " CALCULATED_NUMBER_FORMAT, rv->name, *value); +} + +void rrdpush_sender_send_this_host_variable_now(RRDHOST *host, RRDVAR *rv) { + if(host->rrdpush_send_enabled && host->rrdpush_sender_spawn && host->rrdpush_sender_connected) { + rrdpush_buffer_lock(host); + rrdpush_sender_add_host_variable_to_buffer_nolock(host, rv); + rrdpush_buffer_unlock(host); + } +} + +static int rrdpush_sender_thread_custom_host_variables_callback(void *rrdvar_ptr, void *host_ptr) { + RRDVAR *rv = (RRDVAR *)rrdvar_ptr; + RRDHOST *host = (RRDHOST *)host_ptr; + + if(unlikely(rv->options & RRDVAR_OPTION_CUSTOM_HOST_VAR && rv->type == RRDVAR_TYPE_CALCULATED)) { + rrdpush_sender_add_host_variable_to_buffer_nolock(host, rv); + + // return 1, so that the traversal will return the number of variables sent + return 1; + } + + // returning a negative number will break the traversal + return 0; +} + +static void rrdpush_sender_thread_send_custom_host_variables(RRDHOST *host) { + int ret = rrdvar_callback_for_all_host_variables(host, rrdpush_sender_thread_custom_host_variables_callback, host); + (void)ret; + + debug(D_STREAM, "RRDVAR sent %d VARIABLES", ret); +} + +// resets all the chart, so that their definitions +// will be resent to the central netdata +static void rrdpush_sender_thread_reset_all_charts(RRDHOST *host) { + rrdhost_rdlock(host); + + RRDSET *st; + rrdset_foreach_read(st, host) { + rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); + + st->upstream_resync_time = 0; + + rrdset_rdlock(st); + + RRDDIM *rd; + rrddim_foreach_read(rd, st) + rd->exposed = 0; + + rrdset_unlock(st); + } + + rrdhost_unlock(host); +} + +static inline void rrdpush_sender_thread_data_flush(RRDHOST *host) { + rrdpush_buffer_lock(host); + + if(buffer_strlen(host->rrdpush_sender_buffer)) + error("STREAM %s [send]: discarding %zu bytes of metrics already in the buffer.", host->hostname, buffer_strlen(host->rrdpush_sender_buffer)); + + buffer_flush(host->rrdpush_sender_buffer); + + rrdpush_sender_thread_reset_all_charts(host); + rrdpush_sender_thread_send_custom_host_variables(host); + + rrdpush_buffer_unlock(host); +} + +void rrdpush_sender_thread_stop(RRDHOST *host) { + rrdpush_buffer_lock(host); + rrdhost_wrlock(host); + + netdata_thread_t thr = 0; + + if(host->rrdpush_sender_spawn) { + info("STREAM %s [send]: signaling sending thread to stop...", host->hostname); + + // signal the thread that we want to join it + host->rrdpush_sender_join = 1; + + // copy the thread id, so that we will be waiting for the right one + // even if a new one has been spawn + thr = host->rrdpush_sender_thread; + + // signal it to cancel + netdata_thread_cancel(host->rrdpush_sender_thread); + } + + rrdhost_unlock(host); + rrdpush_buffer_unlock(host); + + if(thr != 0) { + info("STREAM %s [send]: waiting for the sending thread to stop...", host->hostname); + void *result; + netdata_thread_join(thr, &result); + info("STREAM %s [send]: sending thread has exited.", host->hostname); + } +} + +static inline void rrdpush_sender_thread_close_socket(RRDHOST *host) { + host->rrdpush_sender_connected = 0; + + if(host->rrdpush_sender_socket != -1) { + close(host->rrdpush_sender_socket); + host->rrdpush_sender_socket = -1; + } +} + +static int rrdpush_sender_thread_connect_to_master(RRDHOST *host, int default_port, int timeout, size_t *reconnects_counter, char *connected_to, size_t connected_to_size) { + struct timeval tv = { + .tv_sec = timeout, + .tv_usec = 0 + }; + + // make sure the socket is closed + rrdpush_sender_thread_close_socket(host); + + debug(D_STREAM, "STREAM: Attempting to connect..."); + info("STREAM %s [send to %s]: connecting...", host->hostname, host->rrdpush_send_destination); + + host->rrdpush_sender_socket = connect_to_one_of( + host->rrdpush_send_destination + , default_port + , &tv + , reconnects_counter + , connected_to + , connected_to_size + ); + + if(unlikely(host->rrdpush_sender_socket == -1)) { + error("STREAM %s [send to %s]: failed to connect", host->hostname, host->rrdpush_send_destination); + return 0; + } + + info("STREAM %s [send to %s]: initializing communication...", host->hostname, connected_to); + + #define HTTP_HEADER_SIZE 8192 + char http[HTTP_HEADER_SIZE + 1]; + snprintfz(http, HTTP_HEADER_SIZE, + "STREAM key=%s&hostname=%s®istry_hostname=%s&machine_guid=%s&update_every=%d&os=%s&timezone=%s&tags=%s HTTP/1.1\r\n" + "User-Agent: %s/%s\r\n" + "Accept: */*\r\n\r\n" + , host->rrdpush_send_api_key + , host->hostname + , host->registry_hostname + , host->machine_guid + , default_rrd_update_every + , host->os + , host->timezone + , (host->tags)?host->tags:"" + , host->program_name + , host->program_version + ); + + if(send_timeout(host->rrdpush_sender_socket, http, strlen(http), 0, timeout) == -1) { + error("STREAM %s [send to %s]: failed to send HTTP header to remote netdata.", host->hostname, connected_to); + rrdpush_sender_thread_close_socket(host); + return 0; + } + + info("STREAM %s [send to %s]: waiting response from remote netdata...", host->hostname, connected_to); + + if(recv_timeout(host->rrdpush_sender_socket, http, HTTP_HEADER_SIZE, 0, timeout) == -1) { + error("STREAM %s [send to %s]: remote netdata does not respond.", host->hostname, connected_to); + rrdpush_sender_thread_close_socket(host); + return 0; + } + + if(strncmp(http, START_STREAMING_PROMPT, strlen(START_STREAMING_PROMPT)) != 0) { + error("STREAM %s [send to %s]: server is not replying properly (is it a netdata?).", host->hostname, connected_to); + rrdpush_sender_thread_close_socket(host); + return 0; + } + + info("STREAM %s [send to %s]: established communication - ready to send metrics...", host->hostname, connected_to); + + if(sock_setnonblock(host->rrdpush_sender_socket) < 0) + error("STREAM %s [send to %s]: cannot set non-blocking mode for socket.", host->hostname, connected_to); + + if(sock_enlarge_out(host->rrdpush_sender_socket) < 0) + error("STREAM %s [send to %s]: cannot enlarge the socket buffer.", host->hostname, connected_to); + + debug(D_STREAM, "STREAM: Connected on fd %d...", host->rrdpush_sender_socket); + + return 1; +} + +static void rrdpush_sender_thread_cleanup_callback(void *ptr) { + RRDHOST *host = (RRDHOST *)ptr; + + rrdpush_buffer_lock(host); + rrdhost_wrlock(host); + + info("STREAM %s [send]: sending thread cleans up...", host->hostname); + + rrdpush_sender_thread_close_socket(host); + + // close the pipe + if(host->rrdpush_sender_pipe[PIPE_READ] != -1) { + close(host->rrdpush_sender_pipe[PIPE_READ]); + host->rrdpush_sender_pipe[PIPE_READ] = -1; + } + + if(host->rrdpush_sender_pipe[PIPE_WRITE] != -1) { + close(host->rrdpush_sender_pipe[PIPE_WRITE]); + host->rrdpush_sender_pipe[PIPE_WRITE] = -1; + } + + buffer_free(host->rrdpush_sender_buffer); + host->rrdpush_sender_buffer = NULL; + + if(!host->rrdpush_sender_join) { + info("STREAM %s [send]: sending thread detaches itself.", host->hostname); + netdata_thread_detach(netdata_thread_self()); + } + + host->rrdpush_sender_spawn = 0; + + info("STREAM %s [send]: sending thread now exits.", host->hostname); + + rrdhost_unlock(host); + rrdpush_buffer_unlock(host); +} + +void *rrdpush_sender_thread(void *ptr) { + RRDHOST *host = (RRDHOST *)ptr; + + if(!host->rrdpush_send_enabled || !host->rrdpush_send_destination || !*host->rrdpush_send_destination || !host->rrdpush_send_api_key || !*host->rrdpush_send_api_key) { + error("STREAM %s [send]: thread created (task id %d), but host has streaming disabled.", host->hostname, gettid()); + return NULL; + } + + info("STREAM %s [send]: thread created (task id %d)", host->hostname, gettid()); + + int timeout = (int)appconfig_get_number(&stream_config, CONFIG_SECTION_STREAM, "timeout seconds", 60); + int default_port = (int)appconfig_get_number(&stream_config, CONFIG_SECTION_STREAM, "default port", 19999); + size_t max_size = (size_t)appconfig_get_number(&stream_config, CONFIG_SECTION_STREAM, "buffer size bytes", 1024 * 1024); + unsigned int reconnect_delay = (unsigned int)appconfig_get_number(&stream_config, CONFIG_SECTION_STREAM, "reconnect delay seconds", 5); + remote_clock_resync_iterations = (unsigned int)appconfig_get_number(&stream_config, CONFIG_SECTION_STREAM, "initial clock resync iterations", remote_clock_resync_iterations); + char connected_to[CONNECTED_TO_SIZE + 1] = ""; + + // initialize rrdpush globals + host->rrdpush_sender_buffer = buffer_create(1); + host->rrdpush_sender_connected = 0; + if(pipe(host->rrdpush_sender_pipe) == -1) fatal("STREAM %s [send]: cannot create required pipe.", host->hostname); + + // initialize local variables + size_t begin = 0; + size_t reconnects_counter = 0; + size_t sent_bytes = 0; + size_t sent_bytes_on_this_connection = 0; + size_t send_attempts = 0; + + + time_t last_sent_t = 0; + struct pollfd fds[2], *ifd, *ofd; + nfds_t fdmax; + + ifd = &fds[0]; + ofd = &fds[1]; + + size_t not_connected_loops = 0; + + netdata_thread_cleanup_push(rrdpush_sender_thread_cleanup_callback, host); + + for(; host->rrdpush_send_enabled && !netdata_exit ;) { + // check for outstanding cancellation requests + netdata_thread_testcancel(); + + // if we don't have socket open, lets wait a bit + if(unlikely(host->rrdpush_sender_socket == -1)) { + send_attempts = 0; + + if(not_connected_loops == 0 && sent_bytes_on_this_connection > 0) { + // fast re-connection on first disconnect + sleep_usec(USEC_PER_MS * 500); // milliseconds + } + else { + // slow re-connection on repeating errors + sleep_usec(USEC_PER_SEC * reconnect_delay); // seconds + } + + if(rrdpush_sender_thread_connect_to_master(host, default_port, timeout, &reconnects_counter, connected_to, CONNECTED_TO_SIZE)) { + last_sent_t = now_monotonic_sec(); + + // reset the buffer, to properly send charts and metrics + rrdpush_sender_thread_data_flush(host); + + // send from the beginning + begin = 0; + + // make sure the next reconnection will be immediate + not_connected_loops = 0; + + // reset the bytes we have sent for this session + sent_bytes_on_this_connection = 0; + + // let the data collection threads know we are ready + host->rrdpush_sender_connected = 1; + } + else { + // increase the failed connections counter + not_connected_loops++; + + // reset the number of bytes sent + sent_bytes_on_this_connection = 0; + } + + // loop through + continue; + } + else if(unlikely(now_monotonic_sec() - last_sent_t > timeout)) { + error("STREAM %s [send to %s]: could not send metrics for %d seconds - closing connection - we have sent %zu bytes on this connection via %zu send attempts.", host->hostname, connected_to, timeout, sent_bytes_on_this_connection, send_attempts); + rrdpush_sender_thread_close_socket(host); + } + + ifd->fd = host->rrdpush_sender_pipe[PIPE_READ]; + ifd->events = POLLIN; + ifd->revents = 0; + + ofd->fd = host->rrdpush_sender_socket; + ofd->revents = 0; + if(ofd->fd != -1 && begin < buffer_strlen(host->rrdpush_sender_buffer)) { + debug(D_STREAM, "STREAM: Requesting data output on streaming socket %d...", ofd->fd); + ofd->events = POLLOUT; + fdmax = 2; + send_attempts++; + } + else { + debug(D_STREAM, "STREAM: Not requesting data output on streaming socket %d (nothing to send now)...", ofd->fd); + ofd->events = 0; + fdmax = 1; + } + + debug(D_STREAM, "STREAM: Waiting for poll() events (current buffer length %zu bytes)...", buffer_strlen(host->rrdpush_sender_buffer)); + if(unlikely(netdata_exit)) break; + int retval = poll(fds, fdmax, 1000); + if(unlikely(netdata_exit)) break; + + if(unlikely(retval == -1)) { + debug(D_STREAM, "STREAM: poll() failed (current buffer length %zu bytes)...", buffer_strlen(host->rrdpush_sender_buffer)); + + if(errno == EAGAIN || errno == EINTR) { + debug(D_STREAM, "STREAM: poll() failed with EAGAIN or EINTR..."); + } + else { + error("STREAM %s [send to %s]: failed to poll(). Closing socket.", host->hostname, connected_to); + rrdpush_sender_thread_close_socket(host); + } + + continue; + } + else if(likely(retval)) { + if (ifd->revents & POLLIN || ifd->revents & POLLPRI) { + debug(D_STREAM, "STREAM: Data added to send buffer (current buffer length %zu bytes)...", buffer_strlen(host->rrdpush_sender_buffer)); + + char buffer[1000 + 1]; + if (read(host->rrdpush_sender_pipe[PIPE_READ], buffer, 1000) == -1) + error("STREAM %s [send to %s]: cannot read from internal pipe.", host->hostname, connected_to); + } + + if (ofd->revents & POLLOUT) { + if (begin < buffer_strlen(host->rrdpush_sender_buffer)) { + debug(D_STREAM, "STREAM: Sending data (current buffer length %zu bytes, begin = %zu)...", buffer_strlen(host->rrdpush_sender_buffer), begin); + + // BEGIN RRDPUSH LOCKED SESSION + + // during this session, data collectors + // will not be able to append data to our buffer + // but the socket is in non-blocking mode + // so, we will not block at send() + + netdata_thread_disable_cancelability(); + + debug(D_STREAM, "STREAM: Getting exclusive lock on host..."); + rrdpush_buffer_lock(host); + + debug(D_STREAM, "STREAM: Sending data, starting from %zu, size %zu...", begin, buffer_strlen(host->rrdpush_sender_buffer)); + ssize_t ret = send(host->rrdpush_sender_socket, &host->rrdpush_sender_buffer->buffer[begin], buffer_strlen(host->rrdpush_sender_buffer) - begin, MSG_DONTWAIT); + if (unlikely(ret == -1)) { + if (errno != EAGAIN && errno != EINTR && errno != EWOULDBLOCK) { + debug(D_STREAM, "STREAM: Send failed - closing socket..."); + error("STREAM %s [send to %s]: failed to send metrics - closing connection - we have sent %zu bytes on this connection.", host->hostname, connected_to, sent_bytes_on_this_connection); + rrdpush_sender_thread_close_socket(host); + } + else { + debug(D_STREAM, "STREAM: Send failed - will retry..."); + } + } + else if (likely(ret > 0)) { + // DEBUG - dump the string to see it + //char c = host->rrdpush_sender_buffer->buffer[begin + ret]; + //host->rrdpush_sender_buffer->buffer[begin + ret] = '\0'; + //debug(D_STREAM, "STREAM: sent from %zu to %zd:\n%s\n", begin, ret, &host->rrdpush_sender_buffer->buffer[begin]); + //host->rrdpush_sender_buffer->buffer[begin + ret] = c; + + sent_bytes_on_this_connection += ret; + sent_bytes += ret; + begin += ret; + + if (begin == buffer_strlen(host->rrdpush_sender_buffer)) { + // we send it all + + debug(D_STREAM, "STREAM: Sent %zd bytes (the whole buffer)...", ret); + buffer_flush(host->rrdpush_sender_buffer); + begin = 0; + } + else { + debug(D_STREAM, "STREAM: Sent %zd bytes (part of the data buffer)...", ret); + } + + last_sent_t = now_monotonic_sec(); + } + else { + debug(D_STREAM, "STREAM: send() returned %zd - closing the socket...", ret); + error("STREAM %s [send to %s]: failed to send metrics (send() returned %zd) - closing connection - we have sent %zu bytes on this connection.", + host->hostname, connected_to, ret, sent_bytes_on_this_connection); + rrdpush_sender_thread_close_socket(host); + } + + debug(D_STREAM, "STREAM: Releasing exclusive lock on host..."); + rrdpush_buffer_unlock(host); + + netdata_thread_enable_cancelability(); + + // END RRDPUSH LOCKED SESSION + } + else { + debug(D_STREAM, "STREAM: we have sent the entire buffer, but we received POLLOUT..."); + } + } + + if(host->rrdpush_sender_socket != -1) { + char *error = NULL; + + if (unlikely(ofd->revents & POLLERR)) + error = "socket reports errors (POLLERR)"; + + else if (unlikely(ofd->revents & POLLHUP)) + error = "connection closed by remote end (POLLHUP)"; + + else if (unlikely(ofd->revents & POLLNVAL)) + error = "connection is invalid (POLLNVAL)"; + + if(unlikely(error)) { + debug(D_STREAM, "STREAM: %s - closing socket...", error); + error("STREAM %s [send to %s]: %s - reopening socket - we have sent %zu bytes on this connection.", host->hostname, connected_to, error, sent_bytes_on_this_connection); + rrdpush_sender_thread_close_socket(host); + } + } + } + else { + debug(D_STREAM, "STREAM: poll() timed out."); + } + + // protection from overflow + if(buffer_strlen(host->rrdpush_sender_buffer) > max_size) { + debug(D_STREAM, "STREAM: Buffer is too big (%zu bytes), bigger than the max (%zu) - flushing it...", buffer_strlen(host->rrdpush_sender_buffer), max_size); + errno = 0; + error("STREAM %s [send to %s]: too many data pending - buffer is %zu bytes long, %zu unsent - we have sent %zu bytes in total, %zu on this connection. Closing connection to flush the data.", host->hostname, connected_to, host->rrdpush_sender_buffer->len, host->rrdpush_sender_buffer->len - begin, sent_bytes, sent_bytes_on_this_connection); + rrdpush_sender_thread_close_socket(host); + } + } + + netdata_thread_cleanup_pop(1); + return NULL; +} + + +// ---------------------------------------------------------------------------- +// rrdpush receiver thread + +static void log_stream_connection(const char *client_ip, const char *client_port, const char *api_key, const char *machine_guid, const char *host, const char *msg) { + log_access("STREAM: %d '[%s]:%s' '%s' host '%s' api key '%s' machine guid '%s'", gettid(), client_ip, client_port, msg, host, api_key, machine_guid); +} + +static RRDPUSH_MULTIPLE_CONNECTIONS_STRATEGY get_multiple_connections_strategy(struct config *c, const char *section, const char *name, RRDPUSH_MULTIPLE_CONNECTIONS_STRATEGY def) { + char *value; + switch(def) { + default: + case RRDPUSH_MULTIPLE_CONNECTIONS_ALLOW: + value = "allow"; + break; + + case RRDPUSH_MULTIPLE_CONNECTIONS_DENY_NEW: + value = "deny"; + break; + } + + value = appconfig_get(c, section, name, value); + + RRDPUSH_MULTIPLE_CONNECTIONS_STRATEGY ret = def; + + if(strcasecmp(value, "allow") == 0 || strcasecmp(value, "permit") == 0 || strcasecmp(value, "accept") == 0) + ret = RRDPUSH_MULTIPLE_CONNECTIONS_ALLOW; + + else if(strcasecmp(value, "deny") == 0 || strcasecmp(value, "reject") == 0 || strcasecmp(value, "block") == 0) + ret = RRDPUSH_MULTIPLE_CONNECTIONS_DENY_NEW; + + else + error("Invalid stream config value at section [%s], setting '%s', value '%s'", section, name, value); + + return ret; +} + +static int rrdpush_receive(int fd + , const char *key + , const char *hostname + , const char *registry_hostname + , const char *machine_guid + , const char *os + , const char *timezone + , const char *tags + , const char *program_name + , const char *program_version + , int update_every + , char *client_ip + , char *client_port +) { + RRDHOST *host; + int history = default_rrd_history_entries; + RRD_MEMORY_MODE mode = default_rrd_memory_mode; + int health_enabled = default_health_enabled; + int rrdpush_enabled = default_rrdpush_enabled; + char *rrdpush_destination = default_rrdpush_destination; + char *rrdpush_api_key = default_rrdpush_api_key; + char *rrdpush_send_charts_matching = default_rrdpush_send_charts_matching; + time_t alarms_delay = 60; + RRDPUSH_MULTIPLE_CONNECTIONS_STRATEGY rrdpush_multiple_connections_strategy = RRDPUSH_MULTIPLE_CONNECTIONS_ALLOW; + + update_every = (int)appconfig_get_number(&stream_config, machine_guid, "update every", update_every); + if(update_every < 0) update_every = 1; + + history = (int)appconfig_get_number(&stream_config, key, "default history", history); + history = (int)appconfig_get_number(&stream_config, machine_guid, "history", history); + if(history < 5) history = 5; + + mode = rrd_memory_mode_id(appconfig_get(&stream_config, key, "default memory mode", rrd_memory_mode_name(mode))); + mode = rrd_memory_mode_id(appconfig_get(&stream_config, machine_guid, "memory mode", rrd_memory_mode_name(mode))); + + health_enabled = appconfig_get_boolean_ondemand(&stream_config, key, "health enabled by default", health_enabled); + health_enabled = appconfig_get_boolean_ondemand(&stream_config, machine_guid, "health enabled", health_enabled); + + alarms_delay = appconfig_get_number(&stream_config, key, "default postpone alarms on connect seconds", alarms_delay); + alarms_delay = appconfig_get_number(&stream_config, machine_guid, "postpone alarms on connect seconds", alarms_delay); + + rrdpush_enabled = appconfig_get_boolean(&stream_config, key, "default proxy enabled", rrdpush_enabled); + rrdpush_enabled = appconfig_get_boolean(&stream_config, machine_guid, "proxy enabled", rrdpush_enabled); + + rrdpush_destination = appconfig_get(&stream_config, key, "default proxy destination", rrdpush_destination); + rrdpush_destination = appconfig_get(&stream_config, machine_guid, "proxy destination", rrdpush_destination); + + rrdpush_api_key = appconfig_get(&stream_config, key, "default proxy api key", rrdpush_api_key); + rrdpush_api_key = appconfig_get(&stream_config, machine_guid, "proxy api key", rrdpush_api_key); + + rrdpush_multiple_connections_strategy = get_multiple_connections_strategy(&stream_config, key, "multiple connections", rrdpush_multiple_connections_strategy); + rrdpush_multiple_connections_strategy = get_multiple_connections_strategy(&stream_config, machine_guid, "multiple connections", rrdpush_multiple_connections_strategy); + + rrdpush_send_charts_matching = appconfig_get(&stream_config, key, "default proxy send charts matching", rrdpush_send_charts_matching); + rrdpush_send_charts_matching = appconfig_get(&stream_config, machine_guid, "proxy send charts matching", rrdpush_send_charts_matching); + + tags = appconfig_set_default(&stream_config, machine_guid, "host tags", (tags)?tags:""); + if(tags && !*tags) tags = NULL; + + if(!strcmp(machine_guid, "localhost")) + host = localhost; + else + host = rrdhost_find_or_create( + hostname + , registry_hostname + , machine_guid + , os + , timezone + , tags + , program_name + , program_version + , update_every + , history + , mode + , (unsigned int)(health_enabled != CONFIG_BOOLEAN_NO) + , (unsigned int)(rrdpush_enabled && rrdpush_destination && *rrdpush_destination && rrdpush_api_key && *rrdpush_api_key) + , rrdpush_destination + , rrdpush_api_key + , rrdpush_send_charts_matching + ); + + if(!host) { + close(fd); + log_stream_connection(client_ip, client_port, key, machine_guid, hostname, "FAILED - CANNOT ACQUIRE HOST"); + error("STREAM %s [receive from [%s]:%s]: failed to find/create host structure.", hostname, client_ip, client_port); + return 1; + } + +#ifdef NETDATA_INTERNAL_CHECKS + info("STREAM %s [receive from [%s]:%s]: client willing to stream metrics for host '%s' with machine_guid '%s': update every = %d, history = %ld, memory mode = %s, health %s, tags '%s'" + , hostname + , client_ip + , client_port + , host->hostname + , host->machine_guid + , host->rrd_update_every + , host->rrd_history_entries + , rrd_memory_mode_name(host->rrd_memory_mode) + , (health_enabled == CONFIG_BOOLEAN_NO)?"disabled":((health_enabled == CONFIG_BOOLEAN_YES)?"enabled":"auto") + , host->tags?host->tags:"" + ); +#endif // NETDATA_INTERNAL_CHECKS + + struct plugind cd = { + .enabled = 1, + .update_every = default_rrd_update_every, + .pid = 0, + .serial_failures = 0, + .successful_collections = 0, + .obsolete = 0, + .started_t = now_realtime_sec(), + .next = NULL, + }; + + // put the client IP and port into the buffers used by plugins.d + snprintfz(cd.id, CONFIG_MAX_NAME, "%s:%s", client_ip, client_port); + snprintfz(cd.filename, FILENAME_MAX, "%s:%s", client_ip, client_port); + snprintfz(cd.fullfilename, FILENAME_MAX, "%s:%s", client_ip, client_port); + snprintfz(cd.cmd, PLUGINSD_CMD_MAX, "%s:%s", client_ip, client_port); + + info("STREAM %s [receive from [%s]:%s]: initializing communication...", host->hostname, client_ip, client_port); + if(send_timeout(fd, START_STREAMING_PROMPT, strlen(START_STREAMING_PROMPT), 0, 60) != strlen(START_STREAMING_PROMPT)) { + log_stream_connection(client_ip, client_port, key, host->machine_guid, host->hostname, "FAILED - CANNOT REPLY"); + error("STREAM %s [receive from [%s]:%s]: cannot send ready command.", host->hostname, client_ip, client_port); + close(fd); + return 0; + } + + // remove the non-blocking flag from the socket + if(sock_delnonblock(fd) < 0) + error("STREAM %s [receive from [%s]:%s]: cannot remove the non-blocking flag from socket %d", host->hostname, client_ip, client_port, fd); + + // convert the socket to a FILE * + FILE *fp = fdopen(fd, "r"); + if(!fp) { + log_stream_connection(client_ip, client_port, key, host->machine_guid, host->hostname, "FAILED - SOCKET ERROR"); + error("STREAM %s [receive from [%s]:%s]: failed to get a FILE for FD %d.", host->hostname, client_ip, client_port, fd); + close(fd); + return 0; + } + + rrdhost_wrlock(host); + if(host->connected_senders > 0) { + switch(rrdpush_multiple_connections_strategy) { + case RRDPUSH_MULTIPLE_CONNECTIONS_ALLOW: + info("STREAM %s [receive from [%s]:%s]: multiple streaming connections for the same host detected. If multiple netdata are pushing metrics for the same charts, at the same time, the result is unexpected.", host->hostname, client_ip, client_port); + break; + + case RRDPUSH_MULTIPLE_CONNECTIONS_DENY_NEW: + rrdhost_unlock(host); + log_stream_connection(client_ip, client_port, key, host->machine_guid, host->hostname, "REJECTED - ALREADY CONNECTED"); + info("STREAM %s [receive from [%s]:%s]: multiple streaming connections for the same host detected. Rejecting new connection.", host->hostname, client_ip, client_port); + fclose(fp); + return 0; + } + } + + rrdhost_flag_clear(host, RRDHOST_FLAG_ORPHAN); + host->connected_senders++; + host->senders_disconnected_time = 0; + if(health_enabled != CONFIG_BOOLEAN_NO) { + if(alarms_delay > 0) { + host->health_delay_up_to = now_realtime_sec() + alarms_delay; + info("Postponing health checks for %ld seconds, on host '%s', because it was just connected." + , alarms_delay + , host->hostname + ); + } + } + rrdhost_unlock(host); + + // call the plugins.d processor to receive the metrics + info("STREAM %s [receive from [%s]:%s]: receiving metrics...", host->hostname, client_ip, client_port); + log_stream_connection(client_ip, client_port, key, host->machine_guid, host->hostname, "CONNECTED"); + + size_t count = pluginsd_process(host, &cd, fp, 1); + + log_stream_connection(client_ip, client_port, key, host->machine_guid, host->hostname, "DISCONNECTED"); + error("STREAM %s [receive from [%s]:%s]: disconnected (completed %zu updates).", host->hostname, client_ip, client_port, count); + + rrdhost_wrlock(host); + host->senders_disconnected_time = now_realtime_sec(); + host->connected_senders--; + if(!host->connected_senders) { + rrdhost_flag_set(host, RRDHOST_FLAG_ORPHAN); + if(health_enabled == CONFIG_BOOLEAN_AUTO) + host->health_enabled = 0; + } + rrdhost_unlock(host); + + if(host->connected_senders == 0) + rrdpush_sender_thread_stop(host); + + // cleanup + fclose(fp); + + return (int)count; +} + +struct rrdpush_thread { + int fd; + char *key; + char *hostname; + char *registry_hostname; + char *machine_guid; + char *os; + char *timezone; + char *tags; + char *client_ip; + char *client_port; + char *program_name; + char *program_version; + int update_every; +}; + +static void rrdpush_receiver_thread_cleanup(void *ptr) { + static __thread int executed = 0; + if(!executed) { + executed = 1; + struct rrdpush_thread *rpt = (struct rrdpush_thread *) ptr; + + info("STREAM %s [receive from [%s]:%s]: receive thread ended (task id %d)", rpt->hostname, rpt->client_ip, rpt->client_port, gettid()); + + freez(rpt->key); + freez(rpt->hostname); + freez(rpt->registry_hostname); + freez(rpt->machine_guid); + freez(rpt->os); + freez(rpt->timezone); + freez(rpt->tags); + freez(rpt->client_ip); + freez(rpt->client_port); + freez(rpt->program_name); + freez(rpt->program_version); + freez(rpt); + } +} + +static void *rrdpush_receiver_thread(void *ptr) { + netdata_thread_cleanup_push(rrdpush_receiver_thread_cleanup, ptr); + + struct rrdpush_thread *rpt = (struct rrdpush_thread *)ptr; + info("STREAM %s [%s]:%s: receive thread created (task id %d)", rpt->hostname, rpt->client_ip, rpt->client_port, gettid()); + + rrdpush_receive( + rpt->fd + , rpt->key + , rpt->hostname + , rpt->registry_hostname + , rpt->machine_guid + , rpt->os + , rpt->timezone + , rpt->tags + , rpt->program_name + , rpt->program_version + , rpt->update_every + , rpt->client_ip + , rpt->client_port + ); + + netdata_thread_cleanup_pop(1); + return NULL; +} + +static void rrdpush_sender_thread_spawn(RRDHOST *host) { + rrdhost_wrlock(host); + + if(!host->rrdpush_sender_spawn) { + char tag[NETDATA_THREAD_TAG_MAX + 1]; + snprintfz(tag, NETDATA_THREAD_TAG_MAX, "STREAM_SENDER[%s]", host->hostname); + + if(netdata_thread_create(&host->rrdpush_sender_thread, tag, NETDATA_THREAD_OPTION_JOINABLE, rrdpush_sender_thread, (void *) host)) + error("STREAM %s [send]: failed to create new thread for client.", host->hostname); + else + host->rrdpush_sender_spawn = 1; + } + + rrdhost_unlock(host); +} + +int rrdpush_receiver_permission_denied(struct web_client *w) { + // we always respond with the same message and error code + // to prevent an attacker from gaining info about the error + buffer_flush(w->response.data); + buffer_sprintf(w->response.data, "You are not permitted to access this. Check the logs for more info."); + return 401; +} + +int rrdpush_receiver_too_busy_now(struct web_client *w) { + // we always respond with the same message and error code + // to prevent an attacker from gaining info about the error + buffer_flush(w->response.data); + buffer_sprintf(w->response.data, "The server is too busy now to accept this request. Try later."); + return 503; +} + +int rrdpush_receiver_thread_spawn(RRDHOST *host, struct web_client *w, char *url) { + (void)host; + + info("clients wants to STREAM metrics."); + + char *key = NULL, *hostname = NULL, *registry_hostname = NULL, *machine_guid = NULL, *os = "unknown", *timezone = "unknown", *tags = NULL; + int update_every = default_rrd_update_every; + char buf[GUID_LEN + 1]; + + while(url) { + char *value = mystrsep(&url, "?&"); + if(!value || !*value) continue; + + char *name = mystrsep(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + if(!strcmp(name, "key")) + key = value; + else if(!strcmp(name, "hostname")) + hostname = value; + else if(!strcmp(name, "registry_hostname")) + registry_hostname = value; + else if(!strcmp(name, "machine_guid")) + machine_guid = value; + else if(!strcmp(name, "update_every")) + update_every = (int)strtoul(value, NULL, 0); + else if(!strcmp(name, "os")) + os = value; + else if(!strcmp(name, "timezone")) + timezone = value; + else if(!strcmp(name, "tags")) + tags = value; + else + info("STREAM [receive from [%s]:%s]: request has parameter '%s' = '%s', which is not used.", w->client_ip, w->client_port, key, value); + } + + if(!key || !*key) { + log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname)?hostname:"-", "ACCESS DENIED - NO KEY"); + error("STREAM [receive from [%s]:%s]: request without an API key. Forbidding access.", w->client_ip, w->client_port); + return rrdpush_receiver_permission_denied(w); + } + + if(!hostname || !*hostname) { + log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname)?hostname:"-", "ACCESS DENIED - NO HOSTNAME"); + error("STREAM [receive from [%s]:%s]: request without a hostname. Forbidding access.", w->client_ip, w->client_port); + return rrdpush_receiver_permission_denied(w); + } + + if(!machine_guid || !*machine_guid) { + log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname)?hostname:"-", "ACCESS DENIED - NO MACHINE GUID"); + error("STREAM [receive from [%s]:%s]: request without a machine GUID. Forbidding access.", w->client_ip, w->client_port); + return rrdpush_receiver_permission_denied(w); + } + + if(regenerate_guid(key, buf) == -1) { + log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname)?hostname:"-", "ACCESS DENIED - INVALID KEY"); + error("STREAM [receive from [%s]:%s]: API key '%s' is not valid GUID (use the command uuidgen to generate one). Forbidding access.", w->client_ip, w->client_port, key); + return rrdpush_receiver_permission_denied(w); + } + + if(regenerate_guid(machine_guid, buf) == -1) { + log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname)?hostname:"-", "ACCESS DENIED - INVALID MACHINE GUID"); + error("STREAM [receive from [%s]:%s]: machine GUID '%s' is not GUID. Forbidding access.", w->client_ip, w->client_port, machine_guid); + return rrdpush_receiver_permission_denied(w); + } + + if(!appconfig_get_boolean(&stream_config, key, "enabled", 0)) { + log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname)?hostname:"-", "ACCESS DENIED - KEY NOT ENABLED"); + error("STREAM [receive from [%s]:%s]: API key '%s' is not allowed. Forbidding access.", w->client_ip, w->client_port, key); + return rrdpush_receiver_permission_denied(w); + } + + { + SIMPLE_PATTERN *key_allow_from = simple_pattern_create(appconfig_get(&stream_config, key, "allow from", "*"), NULL, SIMPLE_PATTERN_EXACT); + if(key_allow_from) { + if(!simple_pattern_matches(key_allow_from, w->client_ip)) { + simple_pattern_free(key_allow_from); + log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname) ? hostname : "-", "ACCESS DENIED - KEY NOT ALLOWED FROM THIS IP"); + error("STREAM [receive from [%s]:%s]: API key '%s' is not permitted from this IP. Forbidding access.", w->client_ip, w->client_port, key); + return rrdpush_receiver_permission_denied(w); + } + simple_pattern_free(key_allow_from); + } + } + + if(!appconfig_get_boolean(&stream_config, machine_guid, "enabled", 1)) { + log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname)?hostname:"-", "ACCESS DENIED - MACHINE GUID NOT ENABLED"); + error("STREAM [receive from [%s]:%s]: machine GUID '%s' is not allowed. Forbidding access.", w->client_ip, w->client_port, machine_guid); + return rrdpush_receiver_permission_denied(w); + } + + { + SIMPLE_PATTERN *machine_allow_from = simple_pattern_create(appconfig_get(&stream_config, machine_guid, "allow from", "*"), NULL, SIMPLE_PATTERN_EXACT); + if(machine_allow_from) { + if(!simple_pattern_matches(machine_allow_from, w->client_ip)) { + simple_pattern_free(machine_allow_from); + log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname) ? hostname : "-", "ACCESS DENIED - MACHINE GUID NOT ALLOWED FROM THIS IP"); + error("STREAM [receive from [%s]:%s]: Machine GUID '%s' is not permitted from this IP. Forbidding access.", w->client_ip, w->client_port, machine_guid); + return rrdpush_receiver_permission_denied(w); + } + simple_pattern_free(machine_allow_from); + } + } + + if(unlikely(web_client_streaming_rate_t > 0)) { + static netdata_mutex_t stream_rate_mutex = NETDATA_MUTEX_INITIALIZER; + static volatile time_t last_stream_accepted_t = 0; + + netdata_mutex_lock(&stream_rate_mutex); + time_t now = now_realtime_sec(); + + if(unlikely(last_stream_accepted_t == 0)) + last_stream_accepted_t = now; + + if(now - last_stream_accepted_t < web_client_streaming_rate_t) { + netdata_mutex_unlock(&stream_rate_mutex); + error("STREAM [receive from [%s]:%s]: too busy to accept new streaming request. Will be allowed in %ld secs.", w->client_ip, w->client_port, (long)(web_client_streaming_rate_t - (now - last_stream_accepted_t))); + return rrdpush_receiver_too_busy_now(w); + } + + last_stream_accepted_t = now; + netdata_mutex_unlock(&stream_rate_mutex); + } + + struct rrdpush_thread *rpt = callocz(1, sizeof(struct rrdpush_thread)); + rpt->fd = w->ifd; + rpt->key = strdupz(key); + rpt->hostname = strdupz(hostname); + rpt->registry_hostname = strdupz((registry_hostname && *registry_hostname)?registry_hostname:hostname); + rpt->machine_guid = strdupz(machine_guid); + rpt->os = strdupz(os); + rpt->timezone = strdupz(timezone); + rpt->tags = (tags)?strdupz(tags):NULL; + rpt->client_ip = strdupz(w->client_ip); + rpt->client_port = strdupz(w->client_port); + rpt->update_every = update_every; + + if(w->user_agent && w->user_agent[0]) { + char *t = strchr(w->user_agent, '/'); + if(t && *t) { + *t = '\0'; + t++; + } + + rpt->program_name = strdupz(w->user_agent); + if(t && *t) rpt->program_version = strdupz(t); + } + + netdata_thread_t thread; + + debug(D_SYSTEM, "starting STREAM receive thread."); + + char tag[FILENAME_MAX + 1]; + snprintfz(tag, FILENAME_MAX, "STREAM_RECEIVER[%s,[%s]:%s]", rpt->hostname, w->client_ip, w->client_port); + + if(netdata_thread_create(&thread, tag, NETDATA_THREAD_OPTION_DEFAULT, rrdpush_receiver_thread, (void *)rpt)) + error("Failed to create new STREAM receive thread for client."); + + // prevent the caller from closing the streaming socket + if(web_server_mode == WEB_SERVER_MODE_STATIC_THREADED) { + web_client_flag_set(w, WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET); + } + else { + if(w->ifd == w->ofd) + w->ifd = w->ofd = -1; + else + w->ifd = -1; + } + + buffer_flush(w->response.data); + return 200; +} diff --git a/streaming/rrdpush.h b/streaming/rrdpush.h new file mode 100644 index 000000000..7bf3db93a --- /dev/null +++ b/streaming/rrdpush.h @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_RRDPUSH_H +#define NETDATA_RRDPUSH_H 1 + +#include "web/server/web_client.h" +#include "daemon/common.h" + +extern unsigned int default_rrdpush_enabled; +extern char *default_rrdpush_destination; +extern char *default_rrdpush_api_key; +extern char *default_rrdpush_send_charts_matching; +extern unsigned int remote_clock_resync_iterations; + +extern int rrdpush_init(); +extern void rrdset_done_push(RRDSET *st); +extern void rrdset_push_chart_definition_now(RRDSET *st); +extern void *rrdpush_sender_thread(void *ptr); + +extern int rrdpush_receiver_thread_spawn(RRDHOST *host, struct web_client *w, char *url); +extern void rrdpush_sender_thread_stop(RRDHOST *host); + +extern void rrdpush_sender_send_this_host_variable_now(RRDHOST *host, RRDVAR *rv); + +#endif //NETDATA_RRDPUSH_H diff --git a/streaming/stream.conf b/streaming/stream.conf new file mode 100644 index 000000000..493eba37c --- /dev/null +++ b/streaming/stream.conf @@ -0,0 +1,191 @@ +# netdata configuration for aggregating data from remote hosts +# +# API keys authorize a pair of sending-receiving netdata servers. +# Once their communication is authorized, they can exchange metrics for any +# number of hosts. +# +# You can generate API keys, with the linux command: uuidgen + + +# ----------------------------------------------------------------------------- +# 1. ON SLAVE NETDATA - THE ONE THAT WILL BE SENDING METRICS + +[stream] + # Enable this on slaves, to have them send metrics. + enabled = no + + # Where is the receiving netdata? + # A space separated list of: + # + # [PROTOCOL:]HOST[%INTERFACE][:PORT] + # + # If many are given, the first available will get the metrics. + # + # PROTOCOL = tcp, udp, or unix (only tcp and unix are supported by masters) + # HOST = an IPv4, IPv6 IP, or a hostname, or a unix domain socket path. + # IPv6 IPs should be given with brackets [ip:address] + # INTERFACE = the network interface to use (only for IPv6) + # PORT = the port number or service name (/etc/services) + # + # This communication is not HTTP (it cannot be proxied by web proxies). + destination = + + # The API_KEY to use (as the sender) + api key = + + # The timeout to connect and send metrics + timeout seconds = 60 + + # If the destination line above does not specify a port, use this + default port = 19999 + + # filter the charts to be streamed + # netdata SIMPLE PATTERN: + # - space separated list of patterns (use \ to include spaces in patterns) + # - use * as wildcard, any number of times within each pattern + # - prefix a pattern with ! for a negative match (ie not stream the charts it matches) + # - the order of patterns is important (left to right) + # To send all except a few, use: !this !that * (ie append a wildcard pattern) + send charts matching = * + + # The buffer to use for sending metrics. + # 1MB is good for 10-20 seconds of data, so increase this if you expect latencies. + # The buffer is flushed on reconnects (this will not prevent gaps at the charts). + buffer size bytes = 1048576 + + # If the connection fails, or it disconnects, + # retry after that many seconds. + reconnect delay seconds = 5 + + # Sync the clock of the charts for that many iterations, when starting. + initial clock resync iterations = 60 + + +# ----------------------------------------------------------------------------- +# 2. ON MASTER NETDATA - THE ONE THAT WILL BE RECEIVING METRICS + +# You can have one API key per slave, +# or the same API key for all slaves. +# +# netdata searches for options in this order: +# +# a) master netdata settings (netdata.conf) +# b) [stream] section (above) +# c) [API_KEY] section (below, settings for the API key) +# d) [MACHINE_GUID] section (below, settings for each machine) +# +# You can combine the above (the more specific setting will be used). + +# API key authentication +# If the key is not listed here, it will not be able to push metrics. + +# [API_KEY] is [YOUR-API-KEY], i.e [11111111-2222-3333-4444-555555555555] +[API_KEY] + # Default settings for this API key + + # You can disable the API key, by setting this to: no + # The default (for unknown API keys) is: no + enabled = no + + # A list of simple patterns matching the IPs of the servers that + # will be pushing metrics using this API key. + # The metrics are received via the API port, so the same IPs + # should also be matched at netdata.conf [web].allow connections from + allow from = * + + # The default history in entries, for all hosts using this API key. + # You can also set it per host below. + # If you don't set it here, the history size of the central netdata + # will be used. + default history = 3600 + + # The default memory mode to be used for all hosts using this API key. + # You can also set it per host below. + # If you don't set it here, the memory mode of netdata.conf will be used. + # Valid modes: + # save save on exit, load on start + # map like swap (continuously syncing to disks - you need SSD) + # ram keep it in RAM, don't touch the disk + # none no database at all (use this on headless proxies) + default memory mode = ram + + # Shall we enable health monitoring for the hosts using this API key? + # 3 possible values: + # yes enable alarms + # no do not enable alarms + # auto enable alarms, only when the sending netdata is connected + # You can also set it per host, below. + # The default is taken from [health].enabled of netdata.conf + health enabled by default = auto + + # postpone alarms for a short period after the sender is connected + default postpone alarms on connect seconds = 60 + + # allow or deny multiple connections for the same host? + # If you are sure all your netdata have their own machine GUID, + # set this to 'allow', since it allows faster reconnects. + # When set to 'deny', new connections for a host will not be + # accepted until an existing connection is cleared. + multiple connections = allow + + # need to route metrics differently? set these. + # the defaults are the ones at the [stream] section (above) + #default proxy enabled = yes | no + #default proxy destination = IP:PORT IP:PORT ... + #default proxy api key = API_KEY + #default proxy send charts matching = * + + +# ----------------------------------------------------------------------------- +# 3. PER SENDING HOST SETTINGS, ON MASTER NETDATA +# THIS IS OPTIONAL - YOU DON'T HAVE TO CONFIGURE IT + +# This section exists to give you finer control of the master settings for each +# slave host, when the same API key is used by many netdata slaves / proxies. +# +# Each netdata has a unique GUID - generated the first time netdata starts. +# You can find it at /var/lib/netdata/registry/netdata.public.unique.id +# (at the slave). +# +# The host sending data will have one. If the host is not ephemeral, +# you can give settings for each sending host here. + +[MACHINE_GUID] + # enable this host: yes | no + # When disabled, the master will not receive metrics for this host. + # THIS IS NOT A SECURITY MECHANISM - AN ATTACKER CAN SET ANY OTHER GUID. + # Use only the API key for security. + enabled = no + + # A list of simple patterns matching the IPs of the servers that + # will be pushing metrics using this MACHINE GUID. + # The metrics are received via the API port, so the same IPs + # should also be matched at netdata.conf [web].allow connections from + # and at stream.conf [API_KEY].allow from + allow from = * + + # The number of entries in the database + history = 3600 + + # The memory mode of the database: save | map | ram | none + memory mode = save + + # Health / alarms control: yes | no | auto + health enabled = yes + + # postpone alarms when the sender connects + postpone alarms on connect seconds = 60 + + # allow or deny multiple connections for the same host? + # If you are sure all your netdata have their own machine GUID, + # set this to 'allow', since it allows faster reconnects. + # When set to 'deny', new connections for a host will not be + # accepted until an existing connection is cleared. + multiple connections = allow + + # need to route metrics differently? + # the defaults are the ones at the [API KEY] section + #proxy enabled = yes | no + #proxy destination = IP:PORT IP:PORT ... + #proxy api key = API_KEY + #proxy send charts matching = * diff --git a/system/Makefile.am b/system/Makefile.am index 255147bad..b085dca88 100644 --- a/system/Makefile.am +++ b/system/Makefile.am @@ -1,20 +1,26 @@ # # Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com> +# SPDX-License-Identifier: GPL-3.0-or-later # MAINTAINERCLEANFILES= $(srcdir)/Makefile.in CLEANFILES = \ + edit-config \ netdata-openrc \ netdata.logrotate \ netdata.service \ netdata-init-d \ netdata-lsb \ netdata-freebsd \ + netdata.plist \ $(NULL) include $(top_srcdir)/build/subst.inc - SUFFIXES = .in +dist_config_SCRIPTS = \ + edit-config \ + $(NULL) + nodist_noinst_DATA = \ netdata-openrc \ netdata.logrotate \ @@ -22,14 +28,17 @@ nodist_noinst_DATA = \ netdata-init-d \ netdata-lsb \ netdata-freebsd \ + netdata.plist \ $(NULL) dist_noinst_DATA = \ + edit-config.in \ netdata-openrc.in \ netdata.logrotate.in \ netdata.service.in \ netdata-init-d.in \ netdata-lsb.in \ netdata-freebsd.in \ + netdata.plist.in \ netdata.conf \ $(NULL) diff --git a/system/Makefile.in b/system/Makefile.in index a9843de4f..3598ebde5 100644 --- a/system/Makefile.in +++ b/system/Makefile.in @@ -14,6 +14,7 @@ @SET_MAKE@ + VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ @@ -79,23 +80,55 @@ POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \ - $(srcdir)/Makefile.am $(dist_noinst_DATA) + $(srcdir)/Makefile.am $(dist_config_SCRIPTS) \ + $(dist_noinst_DATA) subdir = system ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \ - $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \ - $(top_srcdir)/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \ - $(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(configdir)" +SCRIPTS = $(dist_config_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false @@ -215,6 +248,7 @@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ +build_target = @build_target@ build_vendor = @build_vendor@ builddir = @builddir@ cachedir = @cachedir@ @@ -236,6 +270,7 @@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ +libconfigdir = @libconfigdir@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ @@ -265,18 +300,25 @@ webdir = @webdir@ # # Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com> +# SPDX-License-Identifier: GPL-3.0-or-later # MAINTAINERCLEANFILES = $(srcdir)/Makefile.in CLEANFILES = \ + edit-config \ netdata-openrc \ netdata.logrotate \ netdata.service \ netdata-init-d \ netdata-lsb \ netdata-freebsd \ + netdata.plist \ $(NULL) SUFFIXES = .in +dist_config_SCRIPTS = \ + edit-config \ + $(NULL) + nodist_noinst_DATA = \ netdata-openrc \ netdata.logrotate \ @@ -284,15 +326,18 @@ nodist_noinst_DATA = \ netdata-init-d \ netdata-lsb \ netdata-freebsd \ + netdata.plist \ $(NULL) dist_noinst_DATA = \ + edit-config.in \ netdata-openrc.in \ netdata.logrotate.in \ netdata.service.in \ netdata-init-d.in \ netdata-lsb.in \ netdata-freebsd.in \ + netdata.plist.in \ netdata.conf \ $(NULL) @@ -331,6 +376,41 @@ $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): +install-dist_configSCRIPTS: $(dist_config_SCRIPTS) + @$(NORMAL_INSTALL) + @list='$(dist_config_SCRIPTS)'; test -n "$(configdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(configdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(configdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ + done | \ + sed -e 'p;s,.*/,,;n' \ + -e 'h;s|.*|.|' \ + -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ + $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ + { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ + if ($$2 == $$4) { files[d] = files[d] " " $$1; \ + if (++n[d] == $(am__install_max)) { \ + print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ + else { print "f", d "/" $$4, $$1 } } \ + END { for (d in files) print "f", d, files[d] }' | \ + while read type dir files; do \ + if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ + test -z "$$files" || { \ + echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(configdir)$$dir'"; \ + $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(configdir)$$dir" || exit $$?; \ + } \ + ; done + +uninstall-dist_configSCRIPTS: + @$(NORMAL_UNINSTALL) + @list='$(dist_config_SCRIPTS)'; test -n "$(configdir)" || exit 0; \ + files=`for p in $$list; do echo "$$p"; done | \ + sed -e 's,.*/,,;$(transform)'`; \ + dir='$(DESTDIR)$(configdir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: @@ -370,8 +450,11 @@ distdir: $(DISTFILES) done check-am: all-am check: check-am -all-am: Makefile $(DATA) +all-am: Makefile $(SCRIPTS) $(DATA) installdirs: + for dir in "$(DESTDIR)$(configdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done install: install-am install-exec: install-exec-am install-data: install-data-am @@ -424,7 +507,7 @@ info: info-am info-am: -install-data-am: +install-data-am: install-dist_configSCRIPTS install-dvi: install-dvi-am @@ -468,20 +551,21 @@ ps: ps-am ps-am: -uninstall-am: +uninstall-am: uninstall-dist_configSCRIPTS .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic cscopelist-am \ ctags-am distclean distclean-generic distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am + install-data-am install-dist_configSCRIPTS install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \ + uninstall-am uninstall-dist_configSCRIPTS .in: if sed \ @@ -489,6 +573,9 @@ uninstall-am: -e 's#[@]sbindir_POST@#$(sbindir)#g' \ -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \ -e 's#[@]pythondir_POST@#$(pythondir)#g' \ + -e 's#[@]configdir_POST@#$(configdir)#g' \ + -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \ + -e 's#[@]cachedir_POST@#$(cachedir)#g' \ $< > $@.tmp; then \ mv "$@.tmp" "$@"; \ else \ diff --git a/system/edit-config b/system/edit-config new file mode 100644 index 000000000..1696f9f34 --- /dev/null +++ b/system/edit-config @@ -0,0 +1,96 @@ +#!/usr/bin/env sh + +[ -f /etc/profile ] && . /etc/profile + +file="${1}" + +EDITOR="${EDITOR-vi}" +[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata" +[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d" + +if [ -z "${file}" ] +then + cat <<USAGE + +USAGE: + ${0} FILENAME + + Copy and edit the stock config file named: FILENAME + if FILENAME is already copied, it will be edited as-is. + + The EDITOR shell variable is used to define the editor to be used. + + Stock config files at: '${NETDATA_STOCK_CONFIG_DIR}' + User config files at: '${NETDATA_USER_CONFIG_DIR}' + + Available files in '${NETDATA_STOCK_CONFIG_DIR}' to copy and edit: + +USAGE + + cd "${NETDATA_STOCK_CONFIG_DIR}" || exit 1 + ls >&2 -R *.conf */*.conf + exit 1 + +fi + +file_is_in_path() { + local file path real + file="${1}" + path="${2}" + + real="$(readlink -f "${file}")" + + # we don't have working readlink + [ -z "${real}" ] && return 0 + + if [ ! -z "${real}" ] && [ -z "$(echo "${real}" | grep -E "^${path}.*$")" ] + then + echo >&2 "File '${file}' is physically at '${real}', which is not in '${path}'. Aborting." + exit 1 + fi + + return 0 +} + +edit() { + echo >&2 "Editing '${1}' ..." + + # check we can edit + file_is_in_path "${1}" "${NETDATA_USER_CONFIG_DIR}" || exit 1 + + "${EDITOR}" "${1}" + exit $? +} + +copy_and_edit() { + # check we can copy + file_is_in_path "${NETDATA_STOCK_CONFIG_DIR}/${1}" "${NETDATA_STOCK_CONFIG_DIR}" || exit 1 + + if [ ! -f "${NETDATA_USER_CONFIG_DIR}/${1}" ] + then + echo >&2 "Copying '${NETDATA_STOCK_CONFIG_DIR}/${1}' to '${NETDATA_USER_CONFIG_DIR}/${1}' ... " + cp -p "${NETDATA_STOCK_CONFIG_DIR}/${1}" "${NETDATA_USER_CONFIG_DIR}/${1}" || exit 1 + fi + + edit "${NETDATA_USER_CONFIG_DIR}/${1}" +} + +# make sure it is not absolute filename +c1="$(echo "${file}" | cut -b 1)" +if [ "${c1}" = "/" ] || [ "${c1}" = "." ] +then + echo >&2 "Please don't use filenames starting with '/' or '.'" + exit 1 +fi + +# already exists +if [ -f "${NETDATA_USER_CONFIG_DIR}/${file}" ] +then + edit "${NETDATA_USER_CONFIG_DIR}/${file}" +fi + +[ -f "${NETDATA_USER_CONFIG_DIR}/${file}" ] && edit "${NETDATA_USER_CONFIG_DIR}/${file}" +[ -f "${NETDATA_STOCK_CONFIG_DIR}/${file}" ] && copy_and_edit "${file}" + +echo >&2 "File '${file}' is not found in '${NETDATA_STOCK_CONFIG_DIR}'" +exit 1 diff --git a/system/edit-config.in b/system/edit-config.in new file mode 100755 index 000000000..1b86549fa --- /dev/null +++ b/system/edit-config.in @@ -0,0 +1,96 @@ +#!/usr/bin/env sh + +[ -f /etc/profile ] && . /etc/profile + +file="${1}" + +EDITOR="${EDITOR-vi}" +[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@" +[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@" + +if [ -z "${file}" ] +then + cat <<USAGE + +USAGE: + ${0} FILENAME + + Copy and edit the stock config file named: FILENAME + if FILENAME is already copied, it will be edited as-is. + + The EDITOR shell variable is used to define the editor to be used. + + Stock config files at: '${NETDATA_STOCK_CONFIG_DIR}' + User config files at: '${NETDATA_USER_CONFIG_DIR}' + + Available files in '${NETDATA_STOCK_CONFIG_DIR}' to copy and edit: + +USAGE + + cd "${NETDATA_STOCK_CONFIG_DIR}" || exit 1 + ls >&2 -R *.conf */*.conf + exit 1 + +fi + +file_is_in_path() { + local file path real + file="${1}" + path="${2}" + + real="$(readlink -f "${file}")" + + # we don't have working readlink + [ -z "${real}" ] && return 0 + + if [ ! -z "${real}" ] && [ -z "$(echo "${real}" | grep -E "^${path}.*$")" ] + then + echo >&2 "File '${file}' is physically at '${real}', which is not in '${path}'. Aborting." + exit 1 + fi + + return 0 +} + +edit() { + echo >&2 "Editing '${1}' ..." + + # check we can edit + file_is_in_path "${1}" "${NETDATA_USER_CONFIG_DIR}" || exit 1 + + "${EDITOR}" "${1}" + exit $? +} + +copy_and_edit() { + # check we can copy + file_is_in_path "${NETDATA_STOCK_CONFIG_DIR}/${1}" "${NETDATA_STOCK_CONFIG_DIR}" || exit 1 + + if [ ! -f "${NETDATA_USER_CONFIG_DIR}/${1}" ] + then + echo >&2 "Copying '${NETDATA_STOCK_CONFIG_DIR}/${1}' to '${NETDATA_USER_CONFIG_DIR}/${1}' ... " + cp -p "${NETDATA_STOCK_CONFIG_DIR}/${1}" "${NETDATA_USER_CONFIG_DIR}/${1}" || exit 1 + fi + + edit "${NETDATA_USER_CONFIG_DIR}/${1}" +} + +# make sure it is not absolute filename +c1="$(echo "${file}" | cut -b 1)" +if [ "${c1}" = "/" ] || [ "${c1}" = "." ] +then + echo >&2 "Please don't use filenames starting with '/' or '.'" + exit 1 +fi + +# already exists +if [ -f "${NETDATA_USER_CONFIG_DIR}/${file}" ] +then + edit "${NETDATA_USER_CONFIG_DIR}/${file}" +fi + +[ -f "${NETDATA_USER_CONFIG_DIR}/${file}" ] && edit "${NETDATA_USER_CONFIG_DIR}/${file}" +[ -f "${NETDATA_STOCK_CONFIG_DIR}/${file}" ] && copy_and_edit "${file}" + +echo >&2 "File '${file}' is not found in '${NETDATA_STOCK_CONFIG_DIR}'" +exit 1 diff --git a/system/netdata-freebsd.in b/system/netdata-freebsd.in index fac2e82c2..300ddc9ec 100644 --- a/system/netdata-freebsd.in +++ b/system/netdata-freebsd.in @@ -1,11 +1,13 @@ #!/bin/sh +# SPDX-License-Identifier: GPL-3.0-or-later . /etc/rc.subr name=netdata rcvar=netdata_enable -pidfile="@localstatedir_POST@/run/netdata.pid" +piddir="@localstatedir_POST@/run/netdata" +pidfile="${pidfile}/netdata.pid" command="@sbindir_POST@/netdata" command_args="-P ${pidfile}" @@ -22,11 +24,13 @@ savedb_cmd="netdata_savedb" netdata_prestart() { + [ ! -d "${piddir}" ] && mkdir -p "${piddir}" return 0 } netdata_poststop() { + [ -f "${pidfile}" ] && rm "${pidfile}" return 0 } diff --git a/system/netdata-init-d.in b/system/netdata-init-d.in index 90a4b95be..9ac510196 100644 --- a/system/netdata-init-d.in +++ b/system/netdata-init-d.in @@ -1,4 +1,5 @@ #!/bin/sh +# SPDX-License-Identifier: GPL-3.0-or-later # # netdata Real-time performance monitoring, done right # chkconfig: 345 99 01 @@ -13,7 +14,8 @@ DAEMON="netdata" DAEMON_PATH=@sbindir_POST@ -PIDFILE=@localstatedir_POST@/run/$DAEMON.pid +PIDFILE_PATH=@localstatedir_POST@/run/netdata +PIDFILE=$PIDFILE_PATH/$DAEMON.pid DAEMONOPTS="-P $PIDFILE" STOP_TIMEOUT="60" @@ -24,6 +26,7 @@ LOCKFILE=/var/lock/subsys/$DAEMON service_start() { [ -x $DAEMON_PATH ] || exit 5 + [ ! -d $PIDFILE_PATH ] && mkdir -p $PIDFILE_PATH echo -n "Starting $DAEMON..." daemon $DAEMON_PATH/$DAEMON $DAEMONOPTS RETVAL=$? diff --git a/system/netdata-lsb.in b/system/netdata-lsb.in index 3bbde1a49..e623f1e0c 100644 --- a/system/netdata-lsb.in +++ b/system/netdata-lsb.in @@ -1,4 +1,5 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-3.0-or-later # ### BEGIN INIT INFO # Provides: netdata @@ -19,7 +20,8 @@ ${DEBIAN_SCRIPT_DEBUG:+ set -v -x} DAEMON="netdata" DAEMON_PATH=@sbindir_POST@ -PIDFILE=@localstatedir_POST@/run/$DAEMON.pid +PIDFILE_PATH=@localstatedir_POST@/run/netdata +PIDFILE=$PIDFILE_PATH/$DAEMON.pid DAEMONOPTS="-P $PIDFILE" test -x $DAEMON_PATH/$DAEMON || exit 0 @@ -31,6 +33,10 @@ cd / umask 022 service_start() { + if [ ! -d $PIDFILE_PATH ]; then + mkdir -p $PIDFILE_PATH + fi + log_daemon_msg "Starting real-time performance monitoring" "netdata" start_daemon -p $PIDFILE $DAEMON_PATH/$DAEMON $DAEMONOPTS RETVAL=$? diff --git a/system/netdata-openrc.in b/system/netdata-openrc.in index ce76f7e64..e0070526f 100644 --- a/system/netdata-openrc.in +++ b/system/netdata-openrc.in @@ -1,84 +1,58 @@ #!/sbin/openrc-run +# SPDX-License-Identifier: GPL-3.0-or-later # The user netdata is configured to run as. # If you edit its configuration file to set a different # user, set it here too, to have its files switch ownership -: ${NETDATA_OWNER:=netdata:netdata} - -# The URL to download netdata config. -: ${NETDATA_CONFIG_URL:=http://localhost:19999/netdata.conf} +: "${NETDATA_OWNER:=netdata:netdata}" # The timeout in seconds to wait for netdata # to save its database on disk and exit. -: ${NETDATA_WAIT_EXIT_TIMEOUT:=60} +: "${NETDATA_WAIT_EXIT_TIMEOUT:=60}" # When set to 1, if netdata does not exit in # NETDATA_WAIT_EXIT_TIMEOUT, we will force it # to exit. -: ${NETDATA_FORCE_EXIT:=0} +: "${NETDATA_FORCE_EXIT:=0}" # Netdata will use these services, only if they # are enabled to start. -: ${NETDATA_START_AFTER_SERVICES:=apache2 squid nginx mysql named opensips upsd hostapd postfix lm_sensors} +: "${NETDATA_START_AFTER_SERVICES:=apache2 squid nginx mysql named opensips upsd hostapd postfix lm_sensors}" -extra_started_commands="getconf" -pidfile="/run/netdata.pid" +extra_started_commands="reload rotate save" +pidfile="@localstatedir_POST@/run/netdata/netdata.pid" command="@sbindir_POST@/netdata" -command_background="yes" command_args="-P ${pidfile} ${NETDATA_EXTRA_ARGS}" -# start_stop_daemon_args="-u ${NETDATA_OWNER}" -start_stop_daemon_args="" +start_stop_daemon_args="-u ${NETDATA_OWNER}" +required_files="/etc/netdata/netdata.conf" +if [ "${NETDATA_FORCE_EXIT}" -eq 1 ]; then + retry="TERM/${NETDATA_WAIT_EXIT_TIMEOUT}/KILL/1" +else + retry="TERM/${NETDATA_WAIT_EXIT_TIMEOUT}" +fi depend() { - use logger - need net - after ${NETDATA_START_AFTER_SERVICES} + use logger + need net + after ${NETDATA_START_AFTER_SERVICES} - checkpath -o ${NETDATA_OWNER} -d @localstatedir_POST@/cache/netdata /run/netdata + checkpath -o ${NETDATA_OWNER} -d @localstatedir_POST@/cache/netdata @localstatedir_POST@/run/netdata } -start_post() { - if [ ! -f @sysconfdir_POST@/netdata/netdata.conf ]; then - ebegin "Downloading default configuration to @sysconfdir_POST@/netdata/netdata.conf" - sleep 2 - curl -s -o @sysconfdir_POST@/netdata/netdata.conf.new "${NETDATA_CONFIG_URL}" - ret=$? - if [ $ret -eq 0 && -s @sysconfdir_POST@/netdata/netdata.conf.new ]; then - mv @sysconfdir_POST@/netdata/netdata.conf.new @sysconfdir_POST@/netdata/netdata.conf - else - ret=1 - rm @sysconfdir_POST@/netdata/netdata.conf.new 2>/dev/null - fi - eend $ret - fi +reload() { + ebegin "Reloading Netdata" + start-stop-daemon --signal SIGUSR2 --pidfile "${pidfile}" + eend $? "Failed to reload Netdata" } -stop_post() { - local result ret=0 count=0 sigkill=0 - - ebegin "Waiting for netdata to save its database" - while [ -f "${pidfile}" ]; do - if [ $count -gt ${NETDATA_WAIT_EXIT_TIMEOUT} ]; then - sigkill=1 - break - fi - - count=$((count + 1)) - kill -0 $(cat ${pidfile}) 2>/dev/null - ret=$? - test $ret -eq 0 && sleep 1 - done - eend $sigkill - - if [ $sigkill -eq 1 -a -f "${pidfile}" ]; then - ebegin "Netdata is taking too long to exit, forcing it to quit" - kill -SIGKILL $(cat ${pidfile}) 2>/dev/null - eend $? - fi +rotate() { + ebegin "Logrotating Netdata" + start-stop-daemon --signal SIGHUP --pidfile "${pidfile}" + eend $? "Failed to logrotate Netdata" } -getconf() { - ebegin "Downloading configuration from netdata to /tmp/netdata.conf" - curl -o /tmp/netdata.conf "${NETDATA_CONFIG_URL}" - eend $? +save() { + ebegin "Saving Netdata database" + start-stop-daemon --signal SIGUSR1 --pidfile "${pidfile}" + eend $? "Failed to save Netdata database" } diff --git a/system/netdata.logrotate.in b/system/netdata.logrotate.in index ae1ac50a7..a766b6cce 100644 --- a/system/netdata.logrotate.in +++ b/system/netdata.logrotate.in @@ -7,6 +7,6 @@ notifempty sharedscripts postrotate - /bin/kill -HUP `pidof netdata 2>/dev/null` 2>/dev/null || true + /bin/kill -HUP `cat @localstatedir_POST@/run/netdata/netdata.pid 2>/dev/null` 2>/dev/null || true endscript } diff --git a/system/netdata.plist.in b/system/netdata.plist.in new file mode 100644 index 000000000..a969b3177 --- /dev/null +++ b/system/netdata.plist.in @@ -0,0 +1,15 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> +<!-- SPDX-License-Identifier: GPL-3.0-or-later --> +<plist version="1.0"> +<dict> + <key>Label</key> + <string>com.github.netdata</string> + <key>ProgramArguments</key> + <array> + <string>@sbindir_POST@/netdata</string> + </array> + <key>RunAtLoad</key> + <true/> +</dict> +</plist> diff --git a/system/netdata.service.in b/system/netdata.service.in index 58cdff225..dd0ee3cca 100644 --- a/system/netdata.service.in +++ b/system/netdata.service.in @@ -1,5 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later [Unit] Description=Real time performance monitoring + +# append here other services you want netdata to wait for them to start After=network.target httpd.service squid.service nfs-server.service mysqld.service mysql.service named.service postfix.service chronyd.service [Service] @@ -8,9 +11,13 @@ User=netdata Group=netdata RuntimeDirectory=netdata RuntimeDirectoryMode=0775 -ExecStart=@sbindir_POST@/netdata -P /run/netdata/netdata.pid -D +PIDFile=@localstatedir_POST@/run/netdata/netdata.pid +ExecStart=@sbindir_POST@/netdata -P @localstatedir_POST@/run/netdata/netdata.pid -D -W set global 'process scheduling policy' 'keep' -W set global 'OOM score' 'keep' ExecStartPre=/bin/mkdir -p @localstatedir_POST@/cache/netdata ExecStartPre=/bin/chown -R netdata:netdata @localstatedir_POST@/cache/netdata +ExecStartPre=/bin/mkdir -p @localstatedir_POST@/run/netdata +ExecStartPre=/bin/chown -R netdata:netdata @localstatedir_POST@/run/netdata +#ExecStopPost=/bin/rm @localstatedir_POST@/run/netdata/netdata.pid PermissionsStartOnly=true # saving a big db on slow disks may need some time @@ -24,20 +31,17 @@ RestartSec=30 # netdata (via [global].OOM score in netdata.conf) can only increase the value set here. # To decrease it, set the minimum here and set the same or a higher value in netdata.conf. # Valid values: -1000 (never kill netdata) to 1000 (always kill netdata). -#OOMScoreAdjust=0 +OOMScoreAdjust=1000 -# By default netdata switches to scheduling policy idle, which makes it use CPU, only -# when there is spare available. # Valid policies: other (the system default) | batch | idle | fifo | rr -#CPUSchedulingPolicy=other +# To give netdata the max priority, set CPUSchedulingPolicy=rr and CPUSchedulingPriority=99 +CPUSchedulingPolicy=idle -# This sets the maximum scheduling priority netdata can set (for policies: rr and fifo). -# netdata (via [global].process scheduling priority in netdata.conf) can only lower this value. +# This sets the scheduling priority (for policies: rr and fifo). # Priority gets values 1 (lowest) to 99 (highest). #CPUSchedulingPriority=1 -# For scheduling policy 'other' and 'batch', this sets the lowest niceness of netdata. -# netdata (via [global].process nice level in netdata.conf) can only increase the value set here. +# For scheduling policy 'other' and 'batch', this sets the lowest niceness of netdata (-20 highest to 19 lowest). #Nice=0 [Install] diff --git a/tests/Makefile.am b/tests/Makefile.am index fe07653f1..722266d77 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -1,4 +1,6 @@ -MAINTAINERCLEANFILES= $(srcdir)/Makefile.in +# SPDX-License-Identifier: GPL-3.0-or-later + +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in dist_noinst_DATA = \ README.md \ diff --git a/tests/Makefile.in b/tests/Makefile.in index ff5bd2f28..c9109dbdd 100644 --- a/tests/Makefile.in +++ b/tests/Makefile.in @@ -14,6 +14,8 @@ @SET_MAKE@ +# SPDX-License-Identifier: GPL-3.0-or-later + VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' @@ -83,14 +85,16 @@ subdir = tests DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(dist_noinst_SCRIPTS) $(dist_noinst_DATA) ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \ - $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \ - $(top_srcdir)/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \ - $(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d @@ -217,6 +221,7 @@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ +build_target = @build_target@ build_vendor = @build_vendor@ builddir = @builddir@ cachedir = @cachedir@ @@ -238,6 +243,7 @@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ +libconfigdir = @libconfigdir@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ diff --git a/tests/node.d/fronius.chart.spec.js b/tests/node.d/fronius.chart.spec.js index 2d2b57cde..92e88d2ee 100644 --- a/tests/node.d/fronius.chart.spec.js +++ b/tests/node.d/fronius.chart.spec.js @@ -1,4 +1,5 @@ "use strict"; +// SPDX-License-Identifier: GPL-3.0-or-later var netdata = require("../../node.d/node_modules/netdata"); // remember: subject will be a singleton! @@ -158,4 +159,4 @@ describe("fronius chart creation", function () { expect(first).toBe(second); }); -}); \ No newline at end of file +}); diff --git a/tests/node.d/fronius.parse.spec.js b/tests/node.d/fronius.parse.spec.js index 0dbfa030d..e6f308fe3 100644 --- a/tests/node.d/fronius.parse.spec.js +++ b/tests/node.d/fronius.parse.spec.js @@ -1,4 +1,5 @@ "use strict"; +// SPDX-License-Identifier: GPL-3.0-or-later var netdata = require("../../node.d/node_modules/netdata"); // remember: subject will be a singleton! @@ -329,4 +330,4 @@ describe("fronius parsing for inverters", function () { expect(result.value).toBe(1000); }); -}); \ No newline at end of file +}); diff --git a/tests/node.d/fronius.process.spec.js b/tests/node.d/fronius.process.spec.js index daa84f390..141fa8a6a 100644 --- a/tests/node.d/fronius.process.spec.js +++ b/tests/node.d/fronius.process.spec.js @@ -1,4 +1,5 @@ "use strict"; +// SPDX-License-Identifier: GPL-3.0-or-later var netdata = require("../../node.d/node_modules/netdata"); // remember: subject will be a singleton! @@ -71,4 +72,4 @@ describe("fronius main processing", function () { }); -}); \ No newline at end of file +}); diff --git a/tests/node.d/fronius.validation.spec.js b/tests/node.d/fronius.validation.spec.js index 08b7e430f..b7938d52f 100644 --- a/tests/node.d/fronius.validation.spec.js +++ b/tests/node.d/fronius.validation.spec.js @@ -1,4 +1,5 @@ "use strict"; +// SPDX-License-Identifier: GPL-3.0-or-later var netdata = require("../../node.d/node_modules/netdata"); // remember: subject will be a singleton! @@ -151,4 +152,4 @@ describe("fronius configuration validation", function () { expect(subject.serviceExecute).toHaveBeenCalledWith(name2, "solar2.local/", 3); }); -}); \ No newline at end of file +}); diff --git a/tests/stress.sh b/tests/stress.sh index d09d69895..9c9393d3b 100755 --- a/tests/stress.sh +++ b/tests/stress.sh @@ -1,17 +1,19 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-3.0-or-later # set the host to connect to if [ ! -z "$1" ] - then +then host="$1" else host="http://127.0.0.1:19999" fi echo "using netdata server at: $host" +# shellcheck disable=SC2207 disable=SC1117 charts=($(curl "$host/netdata.conf" 2>/dev/null | grep "^\[" | cut -d '[' -f 2 | cut -d ']' -f 1 | grep -v ^global$ | grep -v "^plugin" | sort -u)) if [ "${#charts[@]}" -eq 0 ] - then +then echo "Cannot download charts from server: $host" exit 1 fi @@ -26,7 +28,7 @@ entries="$(curl "$host/netdata.conf" 2>/dev/null | grep "history = " | head -n 1 [ $entries -gt 3600 ] && entries=3600 if [ $entries -ne 3600 ] - then +then echo >&2 "You are running a test for a history of $entries entries." fi @@ -41,19 +43,20 @@ duration=$((now - first)) file="$(mktemp /tmp/netdata-stress-XXXXXXXX)" cleanup() { echo "cleanup" - [ -f $file ] && rm $file + [ -f "$file" ] && rm "$file" } trap cleanup EXIT -while [ 1 = 1 ] +while true do - echo "curl --compressed --keepalive-time 120 --header \"Connection: keep-alive\" \\" >$file + echo "curl --compressed --keepalive-time 120 --header \"Connection: keep-alive\" \\" >"$file" + # shellcheck disable=SC2034 for x in {1..100} do dt=$((RANDOM * duration / 32767)) st=$((RANDOM * duration / 32767)) et=$(( st + dt )) - [ $et -gt $now ] && st=$(( now - dt )) + [ $et -gt "$now" ] && st=$(( now - dt )) points=$((RANDOM * 2000 / 32767 + 2)) st=$((first + st)) @@ -69,6 +72,6 @@ do format="${formats[$format]}" echo "--url \"$host/api/v1/data?chart=$chart&mode=$mode&format=$format&options=$options&after=$st&before=$et&points=$points\" \\" - done >>$file - bash $file >/dev/null + done >>"$file" + bash "$file" >/dev/null done diff --git a/tests/web/easypiechart.chart.spec.js b/tests/web/easypiechart.chart.spec.js index 8f5e49631..23bf33d69 100644 --- a/tests/web/easypiechart.chart.spec.js +++ b/tests/web/easypiechart.chart.spec.js @@ -36,4 +36,4 @@ describe("creation of easy pie charts", function () { }; } -}); \ No newline at end of file +}); diff --git a/tests/web/easypiechart.percentage.spec.js b/tests/web/easypiechart.percentage.spec.js index e6168bdd7..976b33964 100644 --- a/tests/web/easypiechart.percentage.spec.js +++ b/tests/web/easypiechart.percentage.spec.js @@ -139,4 +139,4 @@ function createState(min, max) { easyPieChartMax: max } }; -} \ No newline at end of file +} diff --git a/web/Makefile.am b/web/Makefile.am index ac9228744..1ec8d586d 100644 --- a/web/Makefile.am +++ b/web/Makefile.am @@ -1,128 +1,14 @@ -# -# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com> -# -MAINTAINERCLEANFILES= $(srcdir)/Makefile.in +# SPDX-License-Identifier: GPL-3.0-or-later -dist_web_DATA = \ - demo.html \ - demo2.html \ - demosites.html \ - demosites2.html \ - dashboard.html \ - dashboard.js \ - dashboard_info.js \ - dashboard_info_custom_example.js \ - dashboard.css \ - dashboard.slate.css \ - favicon.ico \ - goto-host-from-alarm.html \ - index.html \ - infographic.html \ - netdata-swagger.yaml \ - netdata-swagger.json \ - robots.txt \ - refresh-badges.js \ - registry.html \ - sitemap.xml \ - tv.html \ - version.txt \ - $(NULL) - -webolddir=$(webdir)/old -dist_webold_DATA = \ - old/datasource.html \ - old/index.html \ - old/index.js \ - old/netdata.js \ - old/theme.css \ - $(NULL) - -weblibdir=$(webdir)/lib -dist_weblib_DATA = \ - lib/bootstrap-3.3.7.min.js \ - lib/bootstrap-slider-10.0.0.min.js \ - lib/bootstrap-table-1.11.0.min.js \ - lib/bootstrap-table-export-1.11.0.min.js \ - lib/bootstrap-toggle-2.2.2.min.js \ - lib/clipboard-polyfill-be05dad.js \ - lib/c3-0.4.18.min.js \ - lib/d3-4.12.2.min.js \ - lib/d3pie-0.2.1-netdata-3.js \ - lib/dygraph-c91c859.min.js \ - lib/dygraph-smooth-plotter-c91c859.js \ - lib/fontawesome-all-5.0.1.min.js \ - lib/gauge-1.3.2.min.js \ - lib/jquery-2.2.4.min.js \ - lib/jquery.easypiechart-97b5824.min.js \ - lib/jquery.peity-3.2.0.min.js \ - lib/jquery.sparkline-2.1.2.min.js \ - lib/lz-string-1.4.4.min.js \ - lib/morris-0.5.1.min.js \ - lib/pako-1.0.6.min.js \ - lib/perfect-scrollbar-0.6.15.min.js \ - lib/raphael-2.2.4-min.js \ - lib/tableExport-1.6.0.min.js \ - $(NULL) - -webcssdir=$(webdir)/css -dist_webcss_DATA = \ - css/morris-0.5.1.css \ - css/bootstrap-3.3.7.css \ - css/bootstrap-theme-3.3.7.min.css \ - css/bootstrap-slate-flat-3.3.7.css \ - css/bootstrap-slider-10.0.0.min.css \ - css/bootstrap-toggle-2.2.2.min.css \ - css/c3-0.4.18.min.css \ - $(NULL) +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -webfontsdir=$(webdir)/fonts -dist_webfonts_DATA = \ - fonts/glyphicons-halflings-regular.eot \ - fonts/glyphicons-halflings-regular.svg \ - fonts/glyphicons-halflings-regular.ttf \ - fonts/glyphicons-halflings-regular.woff \ - fonts/glyphicons-halflings-regular.woff2 \ - $(NULL) +SUBDIRS = \ + api \ + gui \ + server \ + $(NULL) -webimagesdir=$(webdir)/images -dist_webimages_DATA = \ - images/alert-128-orange.png \ - images/alert-128-red.png \ - images/alert-multi-size-orange.ico \ - images/alert-multi-size-red.ico \ - images/animated.gif \ - images/check-mark-2-128-green.png \ - images/check-mark-2-multi-size-green.ico \ - images/post.png \ - images/seo-performance-16.png \ - images/seo-performance-24.png \ - images/seo-performance-32.png \ - images/seo-performance-48.png \ - images/seo-performance-64.png \ - images/seo-performance-72.png \ - images/seo-performance-114.png \ - images/seo-performance-128.png \ - images/seo-performance-256.png \ - images/seo-performance-512.png \ - images/seo-performance-multi-size.ico \ - images/seo-performance-multi-size.icns \ +dist_noinst_DATA = \ + README.md \ $(NULL) - - -webwellknowndir=$(webdir)/.well-known -dist_webwellknown_DATA = \ - $(NULL) - -webdntdir=$(webdir)/.well-known/dnt -dist_webdnt_DATA = \ - .well-known/dnt/cookies \ - $(NULL) - -version.txt: - if test -d "$(top_srcdir)/.git"; then \ - git --git-dir="$(top_srcdir)/.git" log -n 1 --format=%H; \ - fi > $@.tmp - test -s $@.tmp || echo 0 > $@.tmp - mv $@.tmp $@ - -.PHONY: version.txt diff --git a/web/Makefile.in b/web/Makefile.in index 78900cc7f..d598811bc 100644 --- a/web/Makefile.in +++ b/web/Makefile.in @@ -14,6 +14,8 @@ @SET_MAKE@ +# SPDX-License-Identifier: GPL-3.0-or-later + VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ @@ -80,19 +82,18 @@ build_triplet = @build@ host_triplet = @host@ subdir = web DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_web_DATA) $(dist_webcss_DATA) $(dist_webdnt_DATA) \ - $(dist_webfonts_DATA) $(dist_webimages_DATA) \ - $(dist_weblib_DATA) $(dist_webold_DATA) \ - $(dist_webwellknown_DATA) + $(dist_noinst_DATA) ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \ - $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \ - $(top_srcdir)/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \ - $(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d @@ -113,48 +114,74 @@ am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(webdir)" "$(DESTDIR)$(webcssdir)" \ - "$(DESTDIR)$(webdntdir)" "$(DESTDIR)$(webfontsdir)" \ - "$(DESTDIR)$(webimagesdir)" "$(DESTDIR)$(weblibdir)" \ - "$(DESTDIR)$(webolddir)" "$(DESTDIR)$(webwellknowndir)" -DATA = $(dist_web_DATA) $(dist_webcss_DATA) $(dist_webdnt_DATA) \ - $(dist_webfonts_DATA) $(dist_webimages_DATA) \ - $(dist_weblib_DATA) $(dist_webold_DATA) \ - $(dist_webwellknown_DATA) +DATA = $(dist_noinst_DATA) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + distdir am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ @@ -252,6 +279,7 @@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ +build_target = @build_target@ build_vendor = @build_vendor@ builddir = @builddir@ cachedir = @cachedir@ @@ -273,6 +301,7 @@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ +libconfigdir = @libconfigdir@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ @@ -299,126 +328,19 @@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ varlibdir = @varlibdir@ webdir = @webdir@ - -# -# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com> -# +AUTOMAKE_OPTIONS = subdir-objects MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_web_DATA = \ - demo.html \ - demo2.html \ - demosites.html \ - demosites2.html \ - dashboard.html \ - dashboard.js \ - dashboard_info.js \ - dashboard_info_custom_example.js \ - dashboard.css \ - dashboard.slate.css \ - favicon.ico \ - goto-host-from-alarm.html \ - index.html \ - infographic.html \ - netdata-swagger.yaml \ - netdata-swagger.json \ - robots.txt \ - refresh-badges.js \ - registry.html \ - sitemap.xml \ - tv.html \ - version.txt \ - $(NULL) - -webolddir = $(webdir)/old -dist_webold_DATA = \ - old/datasource.html \ - old/index.html \ - old/index.js \ - old/netdata.js \ - old/theme.css \ - $(NULL) - -weblibdir = $(webdir)/lib -dist_weblib_DATA = \ - lib/bootstrap-3.3.7.min.js \ - lib/bootstrap-slider-10.0.0.min.js \ - lib/bootstrap-table-1.11.0.min.js \ - lib/bootstrap-table-export-1.11.0.min.js \ - lib/bootstrap-toggle-2.2.2.min.js \ - lib/clipboard-polyfill-be05dad.js \ - lib/c3-0.4.18.min.js \ - lib/d3-4.12.2.min.js \ - lib/d3pie-0.2.1-netdata-3.js \ - lib/dygraph-c91c859.min.js \ - lib/dygraph-smooth-plotter-c91c859.js \ - lib/fontawesome-all-5.0.1.min.js \ - lib/gauge-1.3.2.min.js \ - lib/jquery-2.2.4.min.js \ - lib/jquery.easypiechart-97b5824.min.js \ - lib/jquery.peity-3.2.0.min.js \ - lib/jquery.sparkline-2.1.2.min.js \ - lib/lz-string-1.4.4.min.js \ - lib/morris-0.5.1.min.js \ - lib/pako-1.0.6.min.js \ - lib/perfect-scrollbar-0.6.15.min.js \ - lib/raphael-2.2.4-min.js \ - lib/tableExport-1.6.0.min.js \ - $(NULL) - -webcssdir = $(webdir)/css -dist_webcss_DATA = \ - css/morris-0.5.1.css \ - css/bootstrap-3.3.7.css \ - css/bootstrap-theme-3.3.7.min.css \ - css/bootstrap-slate-flat-3.3.7.css \ - css/bootstrap-slider-10.0.0.min.css \ - css/bootstrap-toggle-2.2.2.min.css \ - css/c3-0.4.18.min.css \ - $(NULL) - -webfontsdir = $(webdir)/fonts -dist_webfonts_DATA = \ - fonts/glyphicons-halflings-regular.eot \ - fonts/glyphicons-halflings-regular.svg \ - fonts/glyphicons-halflings-regular.ttf \ - fonts/glyphicons-halflings-regular.woff \ - fonts/glyphicons-halflings-regular.woff2 \ - $(NULL) - -webimagesdir = $(webdir)/images -dist_webimages_DATA = \ - images/alert-128-orange.png \ - images/alert-128-red.png \ - images/alert-multi-size-orange.ico \ - images/alert-multi-size-red.ico \ - images/animated.gif \ - images/check-mark-2-128-green.png \ - images/check-mark-2-multi-size-green.ico \ - images/post.png \ - images/seo-performance-16.png \ - images/seo-performance-24.png \ - images/seo-performance-32.png \ - images/seo-performance-48.png \ - images/seo-performance-64.png \ - images/seo-performance-72.png \ - images/seo-performance-114.png \ - images/seo-performance-128.png \ - images/seo-performance-256.png \ - images/seo-performance-512.png \ - images/seo-performance-multi-size.ico \ - images/seo-performance-multi-size.icns \ - $(NULL) - -webwellknowndir = $(webdir)/.well-known -dist_webwellknown_DATA = \ +SUBDIRS = \ + api \ + gui \ + server \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ $(NULL) -webdntdir = $(webdir)/.well-known/dnt -dist_webdnt_DATA = \ - .well-known/dnt/cookies \ - $(NULL) - -all: all-am +all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @@ -451,180 +373,105 @@ $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): -install-dist_webDATA: $(dist_web_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_web_DATA)'; test -n "$(webdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(webdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(webdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(webdir)" || exit $$?; \ - done - -uninstall-dist_webDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_web_DATA)'; test -n "$(webdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(webdir)'; $(am__uninstall_files_from_dir) -install-dist_webcssDATA: $(dist_webcss_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_webcss_DATA)'; test -n "$(webcssdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(webcssdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(webcssdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webcssdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(webcssdir)" || exit $$?; \ - done - -uninstall-dist_webcssDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_webcss_DATA)'; test -n "$(webcssdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(webcssdir)'; $(am__uninstall_files_from_dir) -install-dist_webdntDATA: $(dist_webdnt_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_webdnt_DATA)'; test -n "$(webdntdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(webdntdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(webdntdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webdntdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(webdntdir)" || exit $$?; \ - done - -uninstall-dist_webdntDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_webdnt_DATA)'; test -n "$(webdntdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(webdntdir)'; $(am__uninstall_files_from_dir) -install-dist_webfontsDATA: $(dist_webfonts_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_webfonts_DATA)'; test -n "$(webfontsdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(webfontsdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(webfontsdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webfontsdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(webfontsdir)" || exit $$?; \ - done - -uninstall-dist_webfontsDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_webfonts_DATA)'; test -n "$(webfontsdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(webfontsdir)'; $(am__uninstall_files_from_dir) -install-dist_webimagesDATA: $(dist_webimages_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_webimages_DATA)'; test -n "$(webimagesdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(webimagesdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(webimagesdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webimagesdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(webimagesdir)" || exit $$?; \ - done - -uninstall-dist_webimagesDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_webimages_DATA)'; test -n "$(webimagesdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(webimagesdir)'; $(am__uninstall_files_from_dir) -install-dist_weblibDATA: $(dist_weblib_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_weblib_DATA)'; test -n "$(weblibdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(weblibdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(weblibdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(weblibdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(weblibdir)" || exit $$?; \ - done -uninstall-dist_weblibDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_weblib_DATA)'; test -n "$(weblibdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(weblibdir)'; $(am__uninstall_files_from_dir) -install-dist_weboldDATA: $(dist_webold_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_webold_DATA)'; test -n "$(webolddir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(webolddir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(webolddir)" || exit 1; \ +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webolddir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(webolddir)" || exit $$?; \ - done - -uninstall-dist_weboldDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_webold_DATA)'; test -n "$(webolddir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(webolddir)'; $(am__uninstall_files_from_dir) -install-dist_webwellknownDATA: $(dist_webwellknown_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_webwellknown_DATA)'; test -n "$(webwellknowndir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(webwellknowndir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(webwellknowndir)" || exit 1; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webwellknowndir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(webwellknowndir)" || exit $$?; \ - done - -uninstall-dist_webwellknownDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_webwellknown_DATA)'; test -n "$(webwellknowndir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(webwellknowndir)'; $(am__uninstall_files_from_dir) -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ @@ -656,22 +503,45 @@ distdir: $(DISTFILES) || exit 1; \ fi; \ done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done check-am: all-am -check: check-am +check: check-recursive all-am: Makefile $(DATA) -installdirs: - for dir in "$(DESTDIR)$(webdir)" "$(DESTDIR)$(webcssdir)" "$(DESTDIR)$(webdntdir)" "$(DESTDIR)$(webfontsdir)" "$(DESTDIR)$(webimagesdir)" "$(DESTDIR)$(weblibdir)" "$(DESTDIR)$(webolddir)" "$(DESTDIR)$(webwellknowndir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am +installdirs: installdirs-recursive +installdirs-am: +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am -installcheck: installcheck-am +installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ @@ -694,108 +564,86 @@ maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am +clean: clean-recursive clean-am: clean-generic mostlyclean-am -distclean: distclean-am +distclean: distclean-recursive -rm -f Makefile -distclean-am: clean-am distclean-generic +distclean-am: clean-am distclean-generic distclean-tags -dvi: dvi-am +dvi: dvi-recursive dvi-am: -html: html-am +html: html-recursive html-am: -info: info-am +info: info-recursive info-am: -install-data-am: install-dist_webDATA install-dist_webcssDATA \ - install-dist_webdntDATA install-dist_webfontsDATA \ - install-dist_webimagesDATA install-dist_weblibDATA \ - install-dist_weboldDATA install-dist_webwellknownDATA +install-data-am: -install-dvi: install-dvi-am +install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: -install-html: install-html-am +install-html: install-html-recursive install-html-am: -install-info: install-info-am +install-info: install-info-recursive install-info-am: install-man: -install-pdf: install-pdf-am +install-pdf: install-pdf-recursive install-pdf-am: -install-ps: install-ps-am +install-ps: install-ps-recursive install-ps-am: installcheck-am: -maintainer-clean: maintainer-clean-am +maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic -mostlyclean: mostlyclean-am +mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic -pdf: pdf-am +pdf: pdf-recursive pdf-am: -ps: ps-am +ps: ps-recursive ps-am: -uninstall-am: uninstall-dist_webDATA uninstall-dist_webcssDATA \ - uninstall-dist_webdntDATA uninstall-dist_webfontsDATA \ - uninstall-dist_webimagesDATA uninstall-dist_weblibDATA \ - uninstall-dist_weboldDATA uninstall-dist_webwellknownDATA - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dist_webDATA install-dist_webcssDATA \ - install-dist_webdntDATA install-dist_webfontsDATA \ - install-dist_webimagesDATA install-dist_weblibDATA \ - install-dist_weboldDATA install-dist_webwellknownDATA \ - install-dvi install-dvi-am install-exec install-exec-am \ - install-html install-html-am install-info install-info-am \ - install-man install-pdf install-pdf-am install-ps \ - install-ps-am install-strip installcheck installcheck-am \ - installdirs maintainer-clean maintainer-clean-generic \ - mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags-am \ - uninstall uninstall-am uninstall-dist_webDATA \ - uninstall-dist_webcssDATA uninstall-dist_webdntDATA \ - uninstall-dist_webfontsDATA uninstall-dist_webimagesDATA \ - uninstall-dist_weblibDATA uninstall-dist_weboldDATA \ - uninstall-dist_webwellknownDATA - - -version.txt: - if test -d "$(top_srcdir)/.git"; then \ - git --git-dir="$(top_srcdir)/.git" log -n 1 --format=%H; \ - fi > $@.tmp - test -s $@.tmp || echo 0 > $@.tmp - mv $@.tmp $@ - -.PHONY: version.txt +uninstall-am: + +.MAKE: $(am__recursive_targets) install-am install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ + check-am clean clean-generic cscopelist-am ctags ctags-am \ + distclean distclean-generic distclean-tags distdir dvi dvi-am \ + html html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs installdirs-am maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags tags-am uninstall uninstall-am + # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. diff --git a/web/README.md b/web/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/web/api/Makefile.am b/web/api/Makefile.am new file mode 100644 index 000000000..5755612c8 --- /dev/null +++ b/web/api/Makefile.am @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +SUBDIRS = \ + badges \ + queries \ + exporters \ + formatters \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +dist_web_DATA = \ + netdata-swagger.yaml \ + netdata-swagger.json \ + $(NULL) diff --git a/web/api/Makefile.in b/web/api/Makefile.in new file mode 100644 index 000000000..7bcd57e1a --- /dev/null +++ b/web/api/Makefile.in @@ -0,0 +1,709 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) $(dist_web_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(webdir)" +DATA = $(dist_noinst_DATA) $(dist_web_DATA) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + distdir +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = $(SUBDIRS) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +SUBDIRS = \ + badges \ + queries \ + exporters \ + formatters \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +dist_web_DATA = \ + netdata-swagger.yaml \ + netdata-swagger.json \ + $(NULL) + +all: all-recursive + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-dist_webDATA: $(dist_web_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_web_DATA)'; test -n "$(webdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(webdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(webdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(webdir)" || exit $$?; \ + done + +uninstall-dist_webDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_web_DATA)'; test -n "$(webdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(webdir)'; $(am__uninstall_files_from_dir) + +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-recursive +all-am: Makefile $(DATA) +installdirs: installdirs-recursive +installdirs-am: + for dir in "$(DESTDIR)$(webdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-recursive + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-recursive + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: install-dist_webDATA + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: uninstall-dist_webDATA + +.MAKE: $(am__recursive_targets) install-am install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ + check-am clean clean-generic cscopelist-am ctags ctags-am \ + distclean distclean-generic distclean-tags distdir dvi dvi-am \ + html html-am info info-am install install-am install-data \ + install-data-am install-dist_webDATA install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + installdirs-am maintainer-clean maintainer-clean-generic \ + mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags \ + tags-am uninstall uninstall-am uninstall-dist_webDATA + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/README.md b/web/api/README.md new file mode 100644 index 000000000..973d1bb6b --- /dev/null +++ b/web/api/README.md @@ -0,0 +1,18 @@ +# netdata REST API + +The complete documentation of the netdata API is available at the **[Swagger Editor](https://editor.swagger.io/?url=https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml)**. + +If your prefer it over the Swagger Editor, you can also use **[Swagger UI](https://registry.my-netdata.io/swagger/#!/default/get_data)**. This however does not provide all the information available. + +## google charts + +netdata is a [Google Visualization API datatable and datasource provider](https://developers.google.com/chart/interactive/docs/reference), so it can directly be used with [Google Charts](https://developers.google.com/chart/interactive/docs/). + +Check this [single chart, jsfiddle example](https://jsfiddle.net/ktsaou/ensu4uws/9/): + +![image](https://cloud.githubusercontent.com/assets/2662304/23824762/1e236b84-0685-11e7-89f4-06fdf67d873a.png) + +and this [multi chart, jsfiddle example](https://jsfiddle.net/ktsaou/L5y2eqp2/): + +![image](https://cloud.githubusercontent.com/assets/2662304/23824766/31a4a68c-0685-11e7-8429-8327cab64be2.png) + diff --git a/web/api/badges/Makefile.am b/web/api/badges/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/web/api/badges/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/badges/Makefile.in b/web/api/badges/Makefile.in new file mode 100644 index 000000000..79a3a17a2 --- /dev/null +++ b/web/api/badges/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api/badges +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/badges/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/badges/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/badges/README.md b/web/api/badges/README.md new file mode 100644 index 000000000..cf0b22bea --- /dev/null +++ b/web/api/badges/README.md @@ -0,0 +1,324 @@ +# Netdata badges + +**Badges are cool!** + +Netdata can generate badges for any chart and any dimension at any time-frame. Badges come in `SVG` and can be added to any web page using an `<IMG>` HTML tag. + +**Netdata badges are powerful**! + +Given that netdata collects from **1.000** to **5.000** metrics per server (depending on the number of network interfaces, disks, cpu cores, applications running, users logged in, containers running, etc) and that netdata already has data reduction/aggregation functions embedded, the badges can be quite powerful. + +For each metric/dimension and for arbitrary time-frames badges can show **min**, **max** or **average** value, but also **sum** or **incremental-sum** to have their **volume**. + +For example, there is [a chart in netdata that shows the current requests/s of nginx](http://london.my-netdata.io/#nginx_local_nginx). Using this chart alone we can show the following badges (we could add more time-frames, like **today**, **yesterday**, etc): + +<a href="https://registry.my-netdata.io/#nginx_local_nginx"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=nginx_local.connections&dimensions=active&value_color=grey:null%7Cblue&label=nginx%20active%20connections%20now&units=null&precision=0"/></a> <a href="https://registry.my-netdata.io/#nginx_local_nginx"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=nginx_local.connections&dimensions=active&after=-3600&value_color=orange&label=last%20hour%20average&units=null&options=unaligned&precision=0"/></a> <a href="https://registry.my-netdata.io/#nginx_local_nginx"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=nginx_local.connections&dimensions=active&group=max&after=-3600&value_color=red&label=last%20hour%20max&units=null&options=unaligned&precision=0"/></a> + +Similarly, there is [a chart that shows outbound bandwidth per class](http://london.my-netdata.io/#tc_eth0), using QoS data. So it shows `kilobits/s` per class. Using this chart we can show: + +<a href="https://registry.my-netdata.io/#tc_eth0"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=tc.world_out&dimensions=web_server&value_color=green&label=web%20server%20sends%20now&units=kbps"/></a> <a href="https://registry.my-netdata.io/#tc_eth0"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=tc.world_out&dimensions=web_server&after=-86400&options=unaligned&group=sum÷=8388608&value_color=blue&label=web%20server%20sent%20today&units=GB"/></a> + +The right one is a **volume** calculation. Netdata calculated the total of the last 86.400 seconds (a day) which gives `kilobits`, then divided it by 8 to make it KB, then by 1024 to make it MB and then by 1024 to make it GB. Calculations like this are quite accurate, since for every value collected, every second, netdata interpolates it to second boundary using microsecond calculations. + +Let's see a few more badge examples (they come from the [netdata registry](https://github.com/netdata/netdata/wiki/mynetdata-menu-item)): + +- **cpu usage of user `root`** (you can pick any user; 100% = 1 core). This will be `green <10%`, `yellow <20%`, `orange <50%`, `blue <100%` (1 core), `red` otherwise (you define thresholds and colors on the URL). + + <a href="https://registry.my-netdata.io/#apps_cpu"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20cpu%20now&units=%25"></img></a> <a href="https://registry.my-netdata.io/#apps_cpu"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&after=-3600&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20average%20cpu%20last%20hour&units=%25"></img></a> + +- **mysql queries per second** + + <a href="https://registry.my-netdata.io/#mysql_local"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.queries&dimensions=questions&label=mysql%20queries%20now&value_color=red&units=%5Cs"></img></a> <a href="https://registry.my-netdata.io/#mysql_local"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.queries&dimensions=questions&after=-3600&options=unaligned&group=sum&label=mysql%20queries%20this%20hour&value_color=green&units=null"></img></a> <a href="https://registry.my-netdata.io/#mysql_local"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.queries&dimensions=questions&after=-86400&options=unaligned&group=sum&label=mysql%20queries%20today&value_color=blue&units=null"></img></a> + + niche ones: **mysql SELECT statements with JOIN, which did full table scans**: + + <a href="https://registry.my-netdata.io/#mysql_local_issues"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.join_issues&dimensions=scan&after=-3600&label=full%20table%20scans%20the%20last%20hour&value_color=orange&group=sum&units=null"></img></a> + +--- + +> So, every single line on the charts of a [netdata dashboard](http://london.my-netdata.io/), can become a badge and this badge can calculate **average**, **min**, **max**, or **volume** for any time-frame! And you can also vary the badge color using conditions on the calculated value. + +--- + +## How to create badges + +The basic URL is `http://your.netdata:19999/api/v1/badge.svg?option1&option2&option3&...`. + +Here is what you can put for `options` (these are standard netdata API options): + +- `chart=CHART.NAME` + + The chart to get the values from. + + **This is the only parameter required** and with just this parameter, netdata will return the sum of the latest values of all chart dimensions. + + Example: + +```html + <a href="#"> + <img src="http://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu"></img> + </a> +``` + + Which produces this: + + <a href="#"> + <img src="http://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu"></img> + </a> + +- `alarm=NAME` + + Render the current value and status of an alarm linked to the chart. This option can be ignored if the badge to be generated is not related to an alarm. + + The current value of the alarm will be rendered. The color of the badge will indicate the status of the alarm. + + For alarm badges, **both `chart` and `alarm` parameters are required**. + +- `dimensions=DIMENSION1|DIMENSION2|...` + + The dimensions of the chart to use. If you don't set any dimension, all will be used. When multiple dimensions are used, netdata will sum their values. You can append `options=absolute` if you want this sum to convert all values to positive before adding them. + + Pipes in HTML have to escaped with `%7C`. + + Example: + +```html + <a href="#"> + <img src="http://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&dimensions=system%7Cnice"></img> + </a> +``` + + Which produces this: + + <a href="#"> + <img src="http://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&dimensions=system%7Cnice"></img> + </a> + +- `before=SECONDS` and `after=SECONDS` + + The timeframe. These can be absolute unix timestamps, or relative to now, number of seconds. By default `before=0` and `after=-1` (1 second in the past). + + To get the last minute set `after=-60`. This will give the average of the last complete minute (XX:XX:00 - XX:XX:59). + + To get the max of the last hour set `after=-3600&group=max`. This will give the maximum value of the last complete hour (XX:00:00 - XX:59:59) + + Example: + +```html + <a href="#"> + <img src="http://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60"></img> + </a> +``` + + Which produces the average of last complete minute (XX:XX:00 - XX:XX:59): + + <a href="#"> + <img src="http://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60"></img> + </a> + + While this is the previous minute (one minute before the last one, again aligned XX:XX:00 - XX:XX:59): + +```html + <a href="#"> + <img src="http://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&before=-60&after=-60"></img> + </a> +``` + + It produces this: + + <a href="#"> + <img src="http://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&before=-60&after=-60"></img> + </a> + +- `group=min` or `group=max` or `group=average` (the default) or `group=sum` or `group=incremental-sum` + + If netdata will have to reduce (aggregate) the data to calculate the value, which aggregation method to use. + + - `max` will find the max value for the timeframe. This works on both positive and negative dimensions. It will find the most extreme value. + + - `min` will find the min value for the timeframe. This works on both positive and negative dimensions. It will find the number closest to zero. + + - `average` will calculate the average value for the timeframe. + + - `sum` will sum all the values for the timeframe. This is nice for finding the volume of dimensions for a timeframe. So if you have a dimension that reports `X per second`, you can find the volume of the dimension in a timeframe, by adding its values in that timeframe. + + - `incremental-sum` will sum the difference of each value to its next. Let's assume you have a dimension that does not measure the rate of something, but the absolute value of it. So it has values like this "1, 5, 3, 7, 4". `incremental-sum` will calculate the difference of adjacent values. In this example, they will be `(5 - 1) + (3 - 5) + (7 - 3) + (4 - 7) = 3` (which is equal to the last value minus the first = 4 - 1). + +- `options=opt1|opt2|opt3|...` + + These fine tune various options of the API. Here is what you can use for badges (the API has more option, but only these are useful for badges): + + - `percentage`, instead of returning the value, calculate the percentage of the sum of the selected dimensions, versus the sum of all the dimensions of the chart. This also sets the units to `%`. + + - `absolute` or `abs`, turn all values positive and then sum them. + + - `display_absolute` or `display-absolute`, to use the signed value during color calculation, but display the absolute value on the badge. + + - `min2max`, when multiple dimensions are given, do not sum them, but take their `max - min`. + + - `unaligned`, when data are reduced / aggregated (e.g. the request is about the average of the last minute, or hour), netdata by default aligns them so that the charts will have a constant shape (so average per minute returns always XX:XX:00 - XX:XX:59). Setting the `unaligned` option, netdata will aggregate data without any alignment, so if the request is for 60 seconds, it will aggregate the latest 60 seconds of collected data. + +These are options dedicated to badges: + +- `label=TEXT` + + The label of the badge. + +- `units=TEXT` + + The units of the badge. If you want to put a `/`, please put a `\`. This is because netdata allows badges parameters to be given as path in URL, instead of query string. You can also use `null` or `empty` to show it without any units. + + The units `seconds`, `minutes` and `hours` trigger special formatting. The value has to be in this unit, and netdata will automatically change it to show a more pretty duration. + +- `multiply=NUMBER` + + Multiply the value with this number. The default is `1`. + +- `divide=NUMBER` + + Divide the value with this number. The default is `1`. + +- `label_color=COLOR` + + The color of the label (the left part). You can use any HTML color, include `#NNN` and `#NNNNNN`. The following colors are defined in netdata (and you can use them by name): `green`, `brightgreen`, `yellow`, `yellowgreen`, `orange`, `red`, `blue`, `grey`, `gray`, `lightgrey`, `lightgray`. These are taken from https://github.com/badges/shields so they are compatible with standard badges. + +- `value_color=COLOR:null|COLOR<VALUE|COLOR>VALUE|COLOR>=VALUE|COLOR<=VALUE|...` + + You can add a pipe delimited list of conditions to pick the color. The first matching (left to right) will be used. + + Example: `value_color=grey:null|green<10|yellow<100|orange<1000|blue<10000|red` + + The above will set `grey` if no value exists (not collected within the `gap when lost iterations above` in netdata.conf for the chart), `green` if the value is less than 10, `yellow` if the value is less than 100, etc up to `red` which will be used if no other conditions match. + + The supported operators are `<`, `>`, `<=`, `>=`, `=` (or `:`) and `!=` (or `<>`). + +- `precision=NUMBER` + + The number of decimal digits of the value. By default netdata will add: + + - no decimal digits for values > 1000 + - 1 decimal digit for values > 100 + - 2 decimal digits for values > 1 + - 3 decimal digits for values > 0.1 + - 4 decimal digits for values <= 0.1 + + Using the `precision=NUMBER` you can set your preference per badge. + +- `scale=XXX` + + This option scales the svg image. It accepts values above or equal to 100 (100% is the default scale). For example, lets get a few different sizes: + + <img src="http://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=100"></img> original<br/> + <img src="http://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=125"></img> `scale=125`<br/> + <img src="http://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=150"></img> `scale=150`<br/> + <img src="http://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=175"></img> `scale=175`<br/> + <img src="http://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=200"></img> `scale=200` + + +- `refresh=auto` or `refresh=SECONDS` + + This option enables auto-refreshing of images. netdata will send the HTTP header `Refresh: SECONDS` to the web browser, thus requesting automatic refresh of the images at regular intervals. + + `auto` will calculate the proper `SECONDS` to avoid unnecessary refreshes. If `SECONDS` is zero, this feature is disabled (it is also disabled by default). + + Auto-refreshing like this, works only if you access the badge directly. So, you may have to put it an `embed` or `iframe` for it to be auto-refreshed. Use something like this: + +```html +<embed src="BADGE_URL" type="image/svg+xml" height="20" /> +``` + + Another way is to use javascript to auto-refresh them. You can auto-refresh all the netdata badges on a page using javascript. You have to add a class to all the netdata badges, like this `<img class="netdata-badge" src="..."/>`. Then add this javascript code to your page (it requires jquery): + +```html +<script> + var NETDATA_BADGES_AUTOREFRESH_SECONDS = 5; + function refreshNetdataBadges() { + var now = new Date().getTime().toString(); + $('.netdata-badge').each(function() { + this.src = this.src.replace(/\&_=\d*/, '') + '&_=' + now; + }); + setTimeout(refreshNetdataBadges, NETDATA_BADGES_AUTOREFRESH_SECONDS * 1000); + } + setTimeout(refreshNetdataBadges, NETDATA_BADGES_AUTOREFRESH_SECONDS * 1000); +</script> +``` + +A more advanced badges refresh method is to include `http://your.netdata.ip:19999/refresh-badges.js` in your page. For more information and use example, [check this](https://github.com/netdata/netdata/blob/master/web/gui/refresh-badges.js). + +--- + +## Escaping URLs + +Keep in mind that if you add badge URLs to your HTML pages you have to escape the special characters: + +character|name|escape sequence +:-------:|:--:|:-------------: +` `|space (in labels and units)|`%20` +` # `|hash (for colors)|`%23` +` % `|percent (in units)|`%25` +` < `|less than|`%3C` +` > `|greater than|`%3E` +` \ `|backslash (when you need a `/`)|`%5C` +` \| `|pipe (delimiting parameters)|`%7C` + +--- + +## Using the path instead of the query string + +The badges can also be generated using the URL path for passing parameters. The format is exactly the same. + +So instead of: + + `http://your.netdata:19999/api/v1/badge.svg?option1&option2&option3&...` + +you can write: + + `http://your.netdata:19999/api/v1/badge.svg/option1/option2/option3/...` + +You can also append anything else you like, like this: + + `http://your.netdata:19999/api/v1/badge.svg/option1/option2/option3/my-super-badge.svg` + +## FAQ + +#### Is it fast? +On modern hardware, netdata can generate about **2.000 badges per second per core**, before noticing any delays. It generates a badge in about half a millisecond! + +Of course these timing are for badges that use recent data. If you need badges that do calculations over long durations (a day, or more), timing will differ. netdata logs its timings at its `access.log`, so take a look there before adding a heavy badge on a busy web site. Of course, you can cache such badges or have a cron job get them from netdata and save them at your web server at regular intervals. + + +#### Embedding badges in github + +You have 2 options a) SVG images with markdown and b) SVG images with HTML (directly in .md files). + +For example, this is the cpu badge shown above: + +- Markdown example: + +```md +[![A nice name](https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20cpu%20now&units=%25)](https://registry.my-netdata.io/#apps_cpu) +``` + +- HTML example: + +```html +<a href="https://registry.my-netdata.io/#apps_cpu"> + <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20cpu%20now&units=%25"></img> +</a> +``` + +Both produce this: + +<a href="https://registry.my-netdata.io/#apps_cpu"> + <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20cpu%20now&units=%25"></img> +</a> + +#### auto-refreshing badges in github + +Unfortunately it cannot be done. Github fetches all the images using a proxy and rewrites all the URLs to be served by the proxy. + +You can refresh them from your browser console though. Press F12 to open the web browser console (switch to the console too), paste the following and press enter. They will refresh: + +```js +var len = document.images.length; while(len--) { document.images[len].src = document.images[len].src.replace(/\?cacheBuster=\d*/, "") + "?cacheBuster=" + new Date().getTime().toString(); }; +``` \ No newline at end of file diff --git a/web/api/badges/web_buffer_svg.c b/web/api/badges/web_buffer_svg.c new file mode 100644 index 000000000..d0600359e --- /dev/null +++ b/web/api/badges/web_buffer_svg.c @@ -0,0 +1,1142 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "web_buffer_svg.h" + +#define BADGE_HORIZONTAL_PADDING 4 +#define VERDANA_KERNING 0.2 +#define VERDANA_PADDING 1.0 + +/* + * verdana11_widths[] has been generated with this method: + * https://github.com/badges/shields/blob/master/measure-text.js +*/ + +static double verdana11_widths[256] = { + [0] = 0.0, + [1] = 0.0, + [2] = 0.0, + [3] = 0.0, + [4] = 0.0, + [5] = 0.0, + [6] = 0.0, + [7] = 0.0, + [8] = 0.0, + [9] = 0.0, + [10] = 0.0, + [11] = 0.0, + [12] = 0.0, + [13] = 0.0, + [14] = 0.0, + [15] = 0.0, + [16] = 0.0, + [17] = 0.0, + [18] = 0.0, + [19] = 0.0, + [20] = 0.0, + [21] = 0.0, + [22] = 0.0, + [23] = 0.0, + [24] = 0.0, + [25] = 0.0, + [26] = 0.0, + [27] = 0.0, + [28] = 0.0, + [29] = 0.0, + [30] = 0.0, + [31] = 0.0, + [32] = 3.8671874999999996, // + [33] = 4.3291015625, // ! + [34] = 5.048828125, // " + [35] = 9.001953125, // # + [36] = 6.9931640625, // $ + [37] = 11.837890625, // % + [38] = 7.992187499999999, // & + [39] = 2.9541015625, // ' + [40] = 4.9951171875, // ( + [41] = 4.9951171875, // ) + [42] = 6.9931640625, // * + [43] = 9.001953125, // + + [44] = 4.00146484375, // , + [45] = 4.9951171875, // - + [46] = 4.00146484375, // . + [47] = 4.9951171875, // / + [48] = 6.9931640625, // 0 + [49] = 6.9931640625, // 1 + [50] = 6.9931640625, // 2 + [51] = 6.9931640625, // 3 + [52] = 6.9931640625, // 4 + [53] = 6.9931640625, // 5 + [54] = 6.9931640625, // 6 + [55] = 6.9931640625, // 7 + [56] = 6.9931640625, // 8 + [57] = 6.9931640625, // 9 + [58] = 4.9951171875, // : + [59] = 4.9951171875, // ; + [60] = 9.001953125, // < + [61] = 9.001953125, // = + [62] = 9.001953125, // > + [63] = 5.99951171875, // ? + [64] = 11.0, // @ + [65] = 7.51953125, // A + [66] = 7.541015625, // B + [67] = 7.680664062499999, // C + [68] = 8.4755859375, // D + [69] = 6.95556640625, // E + [70] = 6.32177734375, // F + [71] = 8.529296875, // G + [72] = 8.26611328125, // H + [73] = 4.6298828125, // I + [74] = 5.00048828125, // J + [75] = 7.62158203125, // K + [76] = 6.123046875, // L + [77] = 9.2705078125, // M + [78] = 8.228515625, // N + [79] = 8.658203125, // O + [80] = 6.63330078125, // P + [81] = 8.658203125, // Q + [82] = 7.6484375, // R + [83] = 7.51953125, // S + [84] = 6.7783203125, // T + [85] = 8.05126953125, // U + [86] = 7.51953125, // V + [87] = 10.87646484375, // W + [88] = 7.53564453125, // X + [89] = 6.767578125, // Y + [90] = 7.53564453125, // Z + [91] = 4.9951171875, // [ + [92] = 4.9951171875, // backslash + [93] = 4.9951171875, // ] + [94] = 9.001953125, // ^ + [95] = 6.9931640625, // _ + [96] = 6.9931640625, // ` + [97] = 6.6064453125, // a + [98] = 6.853515625, // b + [99] = 5.73095703125, // c + [100] = 6.853515625, // d + [101] = 6.552734375, // e + [102] = 3.8671874999999996, // f + [103] = 6.853515625, // g + [104] = 6.9609375, // h + [105] = 3.0185546875, // i + [106] = 3.78662109375, // j + [107] = 6.509765625, // k + [108] = 3.0185546875, // l + [109] = 10.69921875, // m + [110] = 6.9609375, // n + [111] = 6.67626953125, // o + [112] = 6.853515625, // p + [113] = 6.853515625, // q + [114] = 4.6943359375, // r + [115] = 5.73095703125, // s + [116] = 4.33447265625, // t + [117] = 6.9609375, // u + [118] = 6.509765625, // v + [119] = 9.001953125, // w + [120] = 6.509765625, // x + [121] = 6.509765625, // y + [122] = 5.779296875, // z + [123] = 6.982421875, // { + [124] = 4.9951171875, // | + [125] = 6.982421875, // } + [126] = 9.001953125, // ~ + [127] = 0.0, + [128] = 0.0, + [129] = 0.0, + [130] = 0.0, + [131] = 0.0, + [132] = 0.0, + [133] = 0.0, + [134] = 0.0, + [135] = 0.0, + [136] = 0.0, + [137] = 0.0, + [138] = 0.0, + [139] = 0.0, + [140] = 0.0, + [141] = 0.0, + [142] = 0.0, + [143] = 0.0, + [144] = 0.0, + [145] = 0.0, + [146] = 0.0, + [147] = 0.0, + [148] = 0.0, + [149] = 0.0, + [150] = 0.0, + [151] = 0.0, + [152] = 0.0, + [153] = 0.0, + [154] = 0.0, + [155] = 0.0, + [156] = 0.0, + [157] = 0.0, + [158] = 0.0, + [159] = 0.0, + [160] = 0.0, + [161] = 0.0, + [162] = 0.0, + [163] = 0.0, + [164] = 0.0, + [165] = 0.0, + [166] = 0.0, + [167] = 0.0, + [168] = 0.0, + [169] = 0.0, + [170] = 0.0, + [171] = 0.0, + [172] = 0.0, + [173] = 0.0, + [174] = 0.0, + [175] = 0.0, + [176] = 0.0, + [177] = 0.0, + [178] = 0.0, + [179] = 0.0, + [180] = 0.0, + [181] = 0.0, + [182] = 0.0, + [183] = 0.0, + [184] = 0.0, + [185] = 0.0, + [186] = 0.0, + [187] = 0.0, + [188] = 0.0, + [189] = 0.0, + [190] = 0.0, + [191] = 0.0, + [192] = 0.0, + [193] = 0.0, + [194] = 0.0, + [195] = 0.0, + [196] = 0.0, + [197] = 0.0, + [198] = 0.0, + [199] = 0.0, + [200] = 0.0, + [201] = 0.0, + [202] = 0.0, + [203] = 0.0, + [204] = 0.0, + [205] = 0.0, + [206] = 0.0, + [207] = 0.0, + [208] = 0.0, + [209] = 0.0, + [210] = 0.0, + [211] = 0.0, + [212] = 0.0, + [213] = 0.0, + [214] = 0.0, + [215] = 0.0, + [216] = 0.0, + [217] = 0.0, + [218] = 0.0, + [219] = 0.0, + [220] = 0.0, + [221] = 0.0, + [222] = 0.0, + [223] = 0.0, + [224] = 0.0, + [225] = 0.0, + [226] = 0.0, + [227] = 0.0, + [228] = 0.0, + [229] = 0.0, + [230] = 0.0, + [231] = 0.0, + [232] = 0.0, + [233] = 0.0, + [234] = 0.0, + [235] = 0.0, + [236] = 0.0, + [237] = 0.0, + [238] = 0.0, + [239] = 0.0, + [240] = 0.0, + [241] = 0.0, + [242] = 0.0, + [243] = 0.0, + [244] = 0.0, + [245] = 0.0, + [246] = 0.0, + [247] = 0.0, + [248] = 0.0, + [249] = 0.0, + [250] = 0.0, + [251] = 0.0, + [252] = 0.0, + [253] = 0.0, + [254] = 0.0, + [255] = 0.0 +}; + +// find the width of the string using the verdana 11points font +// re-write the string in place, skiping zero-length characters +static inline double verdana11_width(char *s) { + double w = 0.0; + char *d = s; + + while(*s) { + double t = verdana11_widths[(unsigned char)*s]; + if(t == 0.0) + s++; + else { + w += t + VERDANA_KERNING; + if(d != s) + *d++ = *s++; + else + d = ++s; + } + } + + *d = '\0'; + w -= VERDANA_KERNING; + w += VERDANA_PADDING; + return w; +} + +static inline size_t escape_xmlz(char *dst, const char *src, size_t len) { + size_t i = len; + + // required escapes from + // https://github.com/badges/shields/blob/master/badge.js + while(*src && i) { + switch(*src) { + case '\\': + *dst++ = '/'; + src++; + i--; + break; + + case '&': + if(i > 5) { + strcpy(dst, "&"); + i -= 5; + dst += 5; + src++; + } + else goto cleanup; + break; + + case '<': + if(i > 4) { + strcpy(dst, "<"); + i -= 4; + dst += 4; + src++; + } + else goto cleanup; + break; + + case '>': + if(i > 4) { + strcpy(dst, ">"); + i -= 4; + dst += 4; + src++; + } + else goto cleanup; + break; + + case '"': + if(i > 6) { + strcpy(dst, """); + i -= 6; + dst += 6; + src++; + } + else goto cleanup; + break; + + case '\'': + if(i > 6) { + strcpy(dst, "'"); + i -= 6; + dst += 6; + src++; + } + else goto cleanup; + break; + + default: + i--; + *dst++ = *src++; + break; + } + } + +cleanup: + *dst = '\0'; + return len - i; +} + +static inline char *format_value_with_precision_and_unit(char *value_string, size_t value_string_len, calculated_number value, const char *units, int precision) { + if(unlikely(isnan(value) || isinf(value))) + value = 0.0; + + char *separator = ""; + if(unlikely(isalnum(*units))) + separator = " "; + + if(precision < 0) { + int len, lstop = 0, trim_zeros = 1; + + calculated_number abs = value; + if(isless(value, 0)) { + lstop = 1; + abs = calculated_number_fabs(value); + } + + if(isgreaterequal(abs, 1000)) { + len = snprintfz(value_string, value_string_len, "%0.0" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value); + trim_zeros = 0; + } + else if(isgreaterequal(abs, 10)) len = snprintfz(value_string, value_string_len, "%0.1" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value); + else if(isgreaterequal(abs, 1)) len = snprintfz(value_string, value_string_len, "%0.2" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value); + else if(isgreaterequal(abs, 0.1)) len = snprintfz(value_string, value_string_len, "%0.2" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value); + else if(isgreaterequal(abs, 0.01)) len = snprintfz(value_string, value_string_len, "%0.4" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value); + else if(isgreaterequal(abs, 0.001)) len = snprintfz(value_string, value_string_len, "%0.5" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value); + else if(isgreaterequal(abs, 0.0001)) len = snprintfz(value_string, value_string_len, "%0.6" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value); + else len = snprintfz(value_string, value_string_len, "%0.7" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value); + + if(unlikely(trim_zeros)) { + int l; + // remove trailing zeros from the decimal part + for(l = len - 1; l > lstop; l--) { + if(likely(value_string[l] == '0')) { + value_string[l] = '\0'; + len--; + } + + else if(unlikely(value_string[l] == '.')) { + value_string[l] = '\0'; + len--; + break; + } + + else + break; + } + } + + if(unlikely(len <= 0)) len = 1; + snprintfz(&value_string[len], value_string_len - len, "%s%s", separator, units); + } + else { + if(precision > 50) precision = 50; + snprintfz(value_string, value_string_len, "%0.*" LONG_DOUBLE_MODIFIER "%s%s", precision, (LONG_DOUBLE) value, separator, units); + } + + return value_string; +} + +typedef enum badge_units_format { + UNITS_FORMAT_NONE, + UNITS_FORMAT_SECONDS, + UNITS_FORMAT_SECONDS_AGO, + UNITS_FORMAT_MINUTES, + UNITS_FORMAT_MINUTES_AGO, + UNITS_FORMAT_HOURS, + UNITS_FORMAT_HOURS_AGO, + UNITS_FORMAT_ONOFF, + UNITS_FORMAT_UPDOWN, + UNITS_FORMAT_OKERROR, + UNITS_FORMAT_OKFAILED, + UNITS_FORMAT_EMPTY, + UNITS_FORMAT_PERCENT +} UNITS_FORMAT; + + +static struct units_formatter { + const char *units; + uint32_t hash; + UNITS_FORMAT format; +} badge_units_formatters[] = { + { "seconds", 0, UNITS_FORMAT_SECONDS }, + { "seconds ago", 0, UNITS_FORMAT_SECONDS_AGO }, + { "minutes", 0, UNITS_FORMAT_MINUTES }, + { "minutes ago", 0, UNITS_FORMAT_MINUTES_AGO }, + { "hours", 0, UNITS_FORMAT_HOURS }, + { "hours ago", 0, UNITS_FORMAT_HOURS_AGO }, + { "on/off", 0, UNITS_FORMAT_ONOFF }, + { "on-off", 0, UNITS_FORMAT_ONOFF }, + { "onoff", 0, UNITS_FORMAT_ONOFF }, + { "up/down", 0, UNITS_FORMAT_UPDOWN }, + { "up-down", 0, UNITS_FORMAT_UPDOWN }, + { "updown", 0, UNITS_FORMAT_UPDOWN }, + { "ok/error", 0, UNITS_FORMAT_OKERROR }, + { "ok-error", 0, UNITS_FORMAT_OKERROR }, + { "okerror", 0, UNITS_FORMAT_OKERROR }, + { "ok/failed", 0, UNITS_FORMAT_OKFAILED }, + { "ok-failed", 0, UNITS_FORMAT_OKFAILED }, + { "okfailed", 0, UNITS_FORMAT_OKFAILED }, + { "empty", 0, UNITS_FORMAT_EMPTY }, + { "null", 0, UNITS_FORMAT_EMPTY }, + { "percentage", 0, UNITS_FORMAT_PERCENT }, + { "percent", 0, UNITS_FORMAT_PERCENT }, + { "pcent", 0, UNITS_FORMAT_PERCENT }, + + // terminator + { NULL, 0, UNITS_FORMAT_NONE } +}; + +inline char *format_value_and_unit(char *value_string, size_t value_string_len, calculated_number value, const char *units, int precision) { + static int max = -1; + int i; + + if(unlikely(max == -1)) { + for(i = 0; badge_units_formatters[i].units; i++) + badge_units_formatters[i].hash = simple_hash(badge_units_formatters[i].units); + + max = i; + } + + if(unlikely(!units)) units = ""; + uint32_t hash_units = simple_hash(units); + + UNITS_FORMAT format = UNITS_FORMAT_NONE; + for(i = 0; i < max; i++) { + struct units_formatter *ptr = &badge_units_formatters[i]; + + if(hash_units == ptr->hash && !strcmp(units, ptr->units)) { + format = ptr->format; + break; + } + } + + if(unlikely(format == UNITS_FORMAT_SECONDS || format == UNITS_FORMAT_SECONDS_AGO)) { + if(value == 0.0) { + snprintfz(value_string, value_string_len, "%s", "now"); + return value_string; + } + else if(isnan(value) || isinf(value)) { + snprintfz(value_string, value_string_len, "%s", "undefined"); + return value_string; + } + + const char *suffix = (format == UNITS_FORMAT_SECONDS_AGO)?" ago":""; + + size_t s = (size_t)value; + size_t d = s / 86400; + s = s % 86400; + + size_t h = s / 3600; + s = s % 3600; + + size_t m = s / 60; + s = s % 60; + + if(d) + snprintfz(value_string, value_string_len, "%zu %s %02zu:%02zu:%02zu%s", d, (d == 1)?"day":"days", h, m, s, suffix); + else + snprintfz(value_string, value_string_len, "%02zu:%02zu:%02zu%s", h, m, s, suffix); + + return value_string; + } + + else if(unlikely(format == UNITS_FORMAT_MINUTES || format == UNITS_FORMAT_MINUTES_AGO)) { + if(value == 0.0) { + snprintfz(value_string, value_string_len, "%s", "now"); + return value_string; + } + else if(isnan(value) || isinf(value)) { + snprintfz(value_string, value_string_len, "%s", "undefined"); + return value_string; + } + + const char *suffix = (format == UNITS_FORMAT_MINUTES_AGO)?" ago":""; + + size_t m = (size_t)value; + size_t d = m / (60 * 24); + m = m % (60 * 24); + + size_t h = m / 60; + m = m % 60; + + if(d) + snprintfz(value_string, value_string_len, "%zud %02zuh %02zum%s", d, h, m, suffix); + else + snprintfz(value_string, value_string_len, "%zuh %zum%s", h, m, suffix); + + return value_string; + } + + else if(unlikely(format == UNITS_FORMAT_HOURS || format == UNITS_FORMAT_HOURS_AGO)) { + if(value == 0.0) { + snprintfz(value_string, value_string_len, "%s", "now"); + return value_string; + } + else if(isnan(value) || isinf(value)) { + snprintfz(value_string, value_string_len, "%s", "undefined"); + return value_string; + } + + const char *suffix = (format == UNITS_FORMAT_HOURS_AGO)?" ago":""; + + size_t h = (size_t)value; + size_t d = h / 24; + h = h % 24; + + if(d) + snprintfz(value_string, value_string_len, "%zud %zuh%s", d, h, suffix); + else + snprintfz(value_string, value_string_len, "%zuh%s", h, suffix); + + return value_string; + } + + else if(unlikely(format == UNITS_FORMAT_ONOFF)) { + snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"on":"off"); + return value_string; + } + + else if(unlikely(format == UNITS_FORMAT_UPDOWN)) { + snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"up":"down"); + return value_string; + } + + else if(unlikely(format == UNITS_FORMAT_OKERROR)) { + snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"ok":"error"); + return value_string; + } + + else if(unlikely(format == UNITS_FORMAT_OKFAILED)) { + snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"ok":"failed"); + return value_string; + } + + else if(unlikely(format == UNITS_FORMAT_EMPTY)) + units = ""; + + else if(unlikely(format == UNITS_FORMAT_PERCENT)) + units = "%"; + + if(unlikely(isnan(value) || isinf(value))) { + strcpy(value_string, "-"); + return value_string; + } + + return format_value_with_precision_and_unit(value_string, value_string_len, value, units, precision); +} + +static struct badge_color { + const char *name; + uint32_t hash; + const char *color; +} badge_colors[] = { + + // colors from: + // https://github.com/badges/shields/blob/master/colorscheme.json + + { "brightgreen", 0, "#4c1" }, + { "green", 0, "#97CA00" }, + { "yellow", 0, "#dfb317" }, + { "yellowgreen", 0, "#a4a61d" }, + { "orange", 0, "#fe7d37" }, + { "red", 0, "#e05d44" }, + { "blue", 0, "#007ec6" }, + { "grey", 0, "#555" }, + { "gray", 0, "#555" }, + { "lightgrey", 0, "#9f9f9f" }, + { "lightgray", 0, "#9f9f9f" }, + + // terminator + { NULL, 0, NULL } +}; + +static inline const char *color_map(const char *color) { + static int max = -1; + int i; + + if(unlikely(max == -1)) { + for(i = 0; badge_colors[i].name ;i++) + badge_colors[i].hash = simple_hash(badge_colors[i].name); + + max = i; + } + + uint32_t hash = simple_hash(color); + + for(i = 0; i < max; i++) { + struct badge_color *ptr = &badge_colors[i]; + + if(hash == ptr->hash && !strcmp(color, ptr->name)) + return ptr->color; + } + + return color; +} + +typedef enum color_comparison { + COLOR_COMPARE_EQUAL, + COLOR_COMPARE_NOTEQUAL, + COLOR_COMPARE_LESS, + COLOR_COMPARE_LESSEQUAL, + COLOR_COMPARE_GREATER, + COLOR_COMPARE_GREATEREQUAL, +} BADGE_COLOR_COMPARISON; + +static inline void calc_colorz(const char *color, char *final, size_t len, calculated_number value) { + if(isnan(value) || isinf(value)) + value = NAN; + + char color_buffer[256 + 1] = ""; + char value_buffer[256 + 1] = ""; + BADGE_COLOR_COMPARISON comparison = COLOR_COMPARE_GREATER; + + // example input: + // color<max|color>min|color:null... + + const char *c = color; + while(*c) { + char *dc = color_buffer, *dv = NULL; + size_t ci = 0, vi = 0; + + const char *t = c; + + while(*t && *t != '|') { + switch(*t) { + case '!': + if(t[1] == '=') t++; + comparison = COLOR_COMPARE_NOTEQUAL; + dv = value_buffer; + break; + + case '=': + case ':': + comparison = COLOR_COMPARE_EQUAL; + dv = value_buffer; + break; + + case '}': + case ')': + case '>': + if(t[1] == '=') { + comparison = COLOR_COMPARE_GREATEREQUAL; + t++; + } + else + comparison = COLOR_COMPARE_GREATER; + dv = value_buffer; + break; + + case '{': + case '(': + case '<': + if(t[1] == '=') { + comparison = COLOR_COMPARE_LESSEQUAL; + t++; + } + else if(t[1] == '>' || t[1] == ')' || t[1] == '}') { + comparison = COLOR_COMPARE_NOTEQUAL; + t++; + } + else + comparison = COLOR_COMPARE_LESS; + dv = value_buffer; + break; + + default: + if(dv) { + if(vi < 256) { + vi++; + *dv++ = *t; + } + } + else { + if(ci < 256) { + ci++; + *dc++ = *t; + } + } + break; + } + + t++; + } + + // prepare for next iteration + if(*t == '|') t++; + c = t; + + // do the math + *dc = '\0'; + if(dv) { + *dv = '\0'; + calculated_number v; + + if(!*value_buffer || !strcmp(value_buffer, "null")) { + v = NAN; + } + else { + v = str2l(value_buffer); + if(isnan(v) || isinf(v)) + v = NAN; + } + + if(unlikely(isnan(value) || isnan(v))) { + if(isnan(value) && isnan(v)) + break; + } + else { + if (unlikely(comparison == COLOR_COMPARE_LESS && isless(value, v))) break; + else if (unlikely(comparison == COLOR_COMPARE_LESSEQUAL && islessequal(value, v))) break; + else if (unlikely(comparison == COLOR_COMPARE_GREATER && isgreater(value, v))) break; + else if (unlikely(comparison == COLOR_COMPARE_GREATEREQUAL && isgreaterequal(value, v))) break; + else if (unlikely(comparison == COLOR_COMPARE_EQUAL && !islessgreater(value, v))) break; + else if (unlikely(comparison == COLOR_COMPARE_NOTEQUAL && islessgreater(value, v))) break; + } + } + else + break; + } + + const char *b; + if(color_buffer[0]) + b = color_buffer; + else + b = color; + + strncpyz(final, b, len); +} + +// value + units +#define VALUE_STRING_SIZE 100 + +// label +#define LABEL_STRING_SIZE 200 + +// colors +#define COLOR_STRING_SIZE 100 + +void buffer_svg(BUFFER *wb, const char *label, calculated_number value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options) { + char label_buffer[LABEL_STRING_SIZE + 1] + , value_color_buffer[COLOR_STRING_SIZE + 1] + , value_string[VALUE_STRING_SIZE + 1] + , label_escaped[LABEL_STRING_SIZE + 1] + , value_escaped[VALUE_STRING_SIZE + 1] + , label_color_escaped[COLOR_STRING_SIZE + 1] + , value_color_escaped[COLOR_STRING_SIZE + 1]; + + double label_width, value_width, total_width, height = 20.0, font_size = 11.0, text_offset = 5.8, round_corner = 3.0; + + if(scale < 100) scale = 100; + + if(unlikely(!label_color || !*label_color)) + label_color = "#555"; + + if(unlikely(!value_color || !*value_color)) + value_color = (isnan(value) || isinf(value))?"#999":"#4c1"; + + calc_colorz(value_color, value_color_buffer, COLOR_STRING_SIZE, value); + format_value_and_unit(value_string, VALUE_STRING_SIZE, (options & RRDR_OPTION_DISPLAY_ABS)?calculated_number_fabs(value):value, units, precision); + + // we need to copy the label, since verdana11_width may write to it + strncpyz(label_buffer, label, LABEL_STRING_SIZE); + + label_width = verdana11_width(label_buffer) + (BADGE_HORIZONTAL_PADDING * 2); + value_width = verdana11_width(value_string) + (BADGE_HORIZONTAL_PADDING * 2); + total_width = label_width + value_width; + + escape_xmlz(label_escaped, label_buffer, LABEL_STRING_SIZE); + escape_xmlz(value_escaped, value_string, VALUE_STRING_SIZE); + escape_xmlz(label_color_escaped, color_map(label_color), COLOR_STRING_SIZE); + escape_xmlz(value_color_escaped, color_map(value_color_buffer), COLOR_STRING_SIZE); + + wb->contenttype = CT_IMAGE_SVG_XML; + + total_width = total_width * scale / 100.0; + height = height * scale / 100.0; + font_size = font_size * scale / 100.0; + text_offset = text_offset * scale / 100.0; + label_width = label_width * scale / 100.0; + value_width = value_width * scale / 100.0; + round_corner = round_corner * scale / 100.0; + + // svg template from: + // https://raw.githubusercontent.com/badges/shields/master/templates/flat-template.svg + buffer_sprintf(wb, + "<svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" width=\"%0.2f\" height=\"%0.2f\">" + "<linearGradient id=\"smooth\" x2=\"0\" y2=\"100%%\">" + "<stop offset=\"0\" stop-color=\"#bbb\" stop-opacity=\".1\"/>" + "<stop offset=\"1\" stop-opacity=\".1\"/>" + "</linearGradient>" + "<mask id=\"round\">" + "<rect width=\"%0.2f\" height=\"%0.2f\" rx=\"%0.2f\" fill=\"#fff\"/>" + "</mask>" + "<g mask=\"url(#round)\">" + "<rect width=\"%0.2f\" height=\"%0.2f\" fill=\"%s\"/>" + "<rect x=\"%0.2f\" width=\"%0.2f\" height=\"%0.2f\" fill=\"%s\"/>" + "<rect width=\"%0.2f\" height=\"%0.2f\" fill=\"url(#smooth)\"/>" + "</g>" + "<g fill=\"#fff\" text-anchor=\"middle\" font-family=\"DejaVu Sans,Verdana,Geneva,sans-serif\" font-size=\"%0.2f\">" + "<text x=\"%0.2f\" y=\"%0.0f\" fill=\"#010101\" fill-opacity=\".3\">%s</text>" + "<text x=\"%0.2f\" y=\"%0.0f\">%s</text>" + "<text x=\"%0.2f\" y=\"%0.0f\" fill=\"#010101\" fill-opacity=\".3\">%s</text>" + "<text x=\"%0.2f\" y=\"%0.0f\">%s</text>" + "</g>" + "</svg>", + total_width, height, + total_width, height, round_corner, + label_width, height, label_color_escaped, + label_width, value_width, height, value_color_escaped, + total_width, height, + font_size, + label_width / 2, ceil(height - text_offset), label_escaped, + label_width / 2, ceil(height - text_offset - 1.0), label_escaped, + label_width + value_width / 2 -1, ceil(height - text_offset), value_escaped, + label_width + value_width / 2 -1, ceil(height - text_offset - 1.0), value_escaped); +} + +int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *url) { + int ret = 400; + buffer_flush(w->response.data); + + BUFFER *dimensions = NULL; + + const char *chart = NULL + , *before_str = NULL + , *after_str = NULL + , *points_str = NULL + , *multiply_str = NULL + , *divide_str = NULL + , *label = NULL + , *units = NULL + , *label_color = NULL + , *value_color = NULL + , *refresh_str = NULL + , *precision_str = NULL + , *scale_str = NULL + , *alarm = NULL; + + int group = RRDR_GROUPING_AVERAGE; + uint32_t options = 0x00000000; + + while(url) { + char *value = mystrsep(&url, "/?&"); + if(!value || !*value) continue; + + char *name = mystrsep(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + debug(D_WEB_CLIENT, "%llu: API v1 badge.svg query param '%s' with value '%s'", w->id, name, value); + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "chart")) chart = value; + else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) { + if(!dimensions) + dimensions = buffer_create(100); + + buffer_strcat(dimensions, "|"); + buffer_strcat(dimensions, value); + } + else if(!strcmp(name, "after")) after_str = value; + else if(!strcmp(name, "before")) before_str = value; + else if(!strcmp(name, "points")) points_str = value; + else if(!strcmp(name, "group")) { + group = web_client_api_request_v1_data_group(value, RRDR_GROUPING_AVERAGE); + } + else if(!strcmp(name, "options")) { + options |= web_client_api_request_v1_data_options(value); + } + else if(!strcmp(name, "label")) label = value; + else if(!strcmp(name, "units")) units = value; + else if(!strcmp(name, "label_color")) label_color = value; + else if(!strcmp(name, "value_color")) value_color = value; + else if(!strcmp(name, "multiply")) multiply_str = value; + else if(!strcmp(name, "divide")) divide_str = value; + else if(!strcmp(name, "refresh")) refresh_str = value; + else if(!strcmp(name, "precision")) precision_str = value; + else if(!strcmp(name, "scale")) scale_str = value; + else if(!strcmp(name, "alarm")) alarm = value; + } + + if(!chart || !*chart) { + buffer_no_cacheable(w->response.data); + buffer_sprintf(w->response.data, "No chart id is given at the request."); + goto cleanup; + } + + int scale = (scale_str && *scale_str)?str2i(scale_str):100; + + RRDSET *st = rrdset_find(host, chart); + if(!st) st = rrdset_find_byname(host, chart); + if(!st) { + buffer_no_cacheable(w->response.data); + buffer_svg(w->response.data, "chart not found", NAN, "", NULL, NULL, -1, scale, 0); + ret = 200; + goto cleanup; + } + st->last_accessed_time = now_realtime_sec(); + + RRDCALC *rc = NULL; + if(alarm) { + rc = rrdcalc_find(st, alarm); + if (!rc) { + buffer_no_cacheable(w->response.data); + buffer_svg(w->response.data, "alarm not found", NAN, "", NULL, NULL, -1, scale, 0); + ret = 200; + goto cleanup; + } + } + + long long multiply = (multiply_str && *multiply_str )?str2l(multiply_str):1; + long long divide = (divide_str && *divide_str )?str2l(divide_str):1; + long long before = (before_str && *before_str )?str2l(before_str):0; + long long after = (after_str && *after_str )?str2l(after_str):-st->update_every; + int points = (points_str && *points_str )?str2i(points_str):1; + int precision = (precision_str && *precision_str)?str2i(precision_str):-1; + + if(!multiply) multiply = 1; + if(!divide) divide = 1; + + int refresh = 0; + if(refresh_str && *refresh_str) { + if(!strcmp(refresh_str, "auto")) { + if(rc) refresh = rc->update_every; + else if(options & RRDR_OPTION_NOT_ALIGNED) + refresh = st->update_every; + else { + refresh = (int)(before - after); + if(refresh < 0) refresh = -refresh; + } + } + else { + refresh = str2i(refresh_str); + if(refresh < 0) refresh = -refresh; + } + } + + if(!label) { + if(alarm) { + char *s = (char *)alarm; + while(*s) { + if(*s == '_') *s = ' '; + s++; + } + label = alarm; + } + else if(dimensions) { + const char *dim = buffer_tostring(dimensions); + if(*dim == '|') dim++; + label = dim; + } + else + label = st->name; + } + if(!units) { + if(alarm) { + if(rc->units) + units = rc->units; + else + units = ""; + } + else if(options & RRDR_OPTION_PERCENTAGE) + units = "%"; + else + units = st->units; + } + + debug(D_WEB_CLIENT, "%llu: API command 'badge.svg' for chart '%s', alarm '%s', dimensions '%s', after '%lld', before '%lld', points '%d', group '%d', options '0x%08x'" + , w->id + , chart + , alarm?alarm:"" + , (dimensions)?buffer_tostring(dimensions):"" + , after + , before + , points + , group + , options + ); + + if(rc) { + if (refresh > 0) { + buffer_sprintf(w->response.header, "Refresh: %d\r\n", refresh); + w->response.data->expires = now_realtime_sec() + refresh; + } + else buffer_no_cacheable(w->response.data); + + if(!value_color) { + switch(rc->status) { + case RRDCALC_STATUS_CRITICAL: + value_color = "red"; + break; + + case RRDCALC_STATUS_WARNING: + value_color = "orange"; + break; + + case RRDCALC_STATUS_CLEAR: + value_color = "brightgreen"; + break; + + case RRDCALC_STATUS_UNDEFINED: + value_color = "lightgrey"; + break; + + case RRDCALC_STATUS_UNINITIALIZED: + value_color = "#000"; + break; + + default: + value_color = "grey"; + break; + } + } + + buffer_svg(w->response.data, + label, + (isnan(rc->value)||isinf(rc->value)) ? rc->value : rc->value * multiply / divide, + units, + label_color, + value_color, + precision, + scale, + options + ); + ret = 200; + } + else { + time_t latest_timestamp = 0; + int value_is_null = 1; + calculated_number n = NAN; + ret = 500; + + // if the collected value is too old, don't calculate its value + if (rrdset_last_entry_t(st) >= (now_realtime_sec() - (st->update_every * st->gap_when_lost_iterations_above))) + ret = rrdset2value_api_v1(st, w->response.data, &n, (dimensions) ? buffer_tostring(dimensions) : NULL + , points, after, before, group, 0, options, NULL, &latest_timestamp, &value_is_null); + + // if the value cannot be calculated, show empty badge + if (ret != 200) { + buffer_no_cacheable(w->response.data); + value_is_null = 1; + n = 0; + ret = 200; + } + else if (refresh > 0) { + buffer_sprintf(w->response.header, "Refresh: %d\r\n", refresh); + w->response.data->expires = now_realtime_sec() + refresh; + } + else buffer_no_cacheable(w->response.data); + + // render the badge + buffer_svg(w->response.data, + label, + (value_is_null)?NAN:(n * multiply / divide), + units, + label_color, + value_color, + precision, + scale, + options + ); + } + + cleanup: + buffer_free(dimensions); + return ret; +} diff --git a/web/api/badges/web_buffer_svg.h b/web/api/badges/web_buffer_svg.h new file mode 100644 index 000000000..f75677bf9 --- /dev/null +++ b/web/api/badges/web_buffer_svg.h @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_WEB_BUFFER_SVG_H +#define NETDATA_WEB_BUFFER_SVG_H 1 + +#include "libnetdata/libnetdata.h" +#include "web/server/web_client.h" + +extern void buffer_svg(BUFFER *wb, const char *label, calculated_number value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options); +extern char *format_value_and_unit(char *value_string, size_t value_string_len, calculated_number value, const char *units, int precision); + +extern int web_client_api_request_v1_badge(struct rrdhost *host, struct web_client *w, char *url); + +#include "web/api/web_api_v1.h" + +#endif /* NETDATA_WEB_BUFFER_SVG_H */ diff --git a/web/api/exporters/Makefile.am b/web/api/exporters/Makefile.am new file mode 100644 index 000000000..3dce98aa9 --- /dev/null +++ b/web/api/exporters/Makefile.am @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +SUBDIRS = \ + shell \ + prometheus \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/exporters/Makefile.in b/web/api/exporters/Makefile.in new file mode 100644 index 000000000..50e4a6cda --- /dev/null +++ b/web/api/exporters/Makefile.in @@ -0,0 +1,649 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api/exporters +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + distdir +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = $(SUBDIRS) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +SUBDIRS = \ + shell \ + prometheus \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-recursive + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/exporters/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/exporters/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-recursive +all-am: Makefile $(DATA) +installdirs: installdirs-recursive +installdirs-am: +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-recursive + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-recursive + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: + +.MAKE: $(am__recursive_targets) install-am install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ + check-am clean clean-generic cscopelist-am ctags ctags-am \ + distclean distclean-generic distclean-tags distdir dvi dvi-am \ + html html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs installdirs-am maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/exporters/README.md b/web/api/exporters/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/web/api/exporters/allmetrics.c b/web/api/exporters/allmetrics.c new file mode 100644 index 000000000..a426db0cc --- /dev/null +++ b/web/api/exporters/allmetrics.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "allmetrics.h" + +struct prometheus_output_options { + char *name; + PROMETHEUS_OUTPUT_OPTIONS flag; +} prometheus_output_flags_root[] = { + { "help", PROMETHEUS_OUTPUT_HELP }, + { "types", PROMETHEUS_OUTPUT_TYPES }, + { "names", PROMETHEUS_OUTPUT_NAMES }, + { "timestamps", PROMETHEUS_OUTPUT_TIMESTAMPS }, + { "variables", PROMETHEUS_OUTPUT_VARIABLES }, + + // terminator + { NULL, PROMETHEUS_OUTPUT_NONE }, +}; + +inline int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url) { + int format = ALLMETRICS_SHELL; + const char *prometheus_server = w->client_ip; + uint32_t prometheus_backend_options = global_backend_options; + PROMETHEUS_OUTPUT_OPTIONS prometheus_output_options = PROMETHEUS_OUTPUT_TIMESTAMPS | ((global_backend_options & BACKEND_OPTION_SEND_NAMES)?PROMETHEUS_OUTPUT_NAMES:0); + const char *prometheus_prefix = global_backend_prefix; + + while(url) { + char *value = mystrsep(&url, "?&"); + if (!value || !*value) continue; + + char *name = mystrsep(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + if(!strcmp(name, "format")) { + if(!strcmp(value, ALLMETRICS_FORMAT_SHELL)) + format = ALLMETRICS_SHELL; + else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS)) + format = ALLMETRICS_PROMETHEUS; + else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS)) + format = ALLMETRICS_PROMETHEUS_ALL_HOSTS; + else if(!strcmp(value, ALLMETRICS_FORMAT_JSON)) + format = ALLMETRICS_JSON; + else + format = 0; + } + else if(!strcmp(name, "server")) { + prometheus_server = value; + } + else if(!strcmp(name, "prefix")) { + prometheus_prefix = value; + } + else if(!strcmp(name, "data") || !strcmp(name, "source") || !strcmp(name, "data source") || !strcmp(name, "data-source") || !strcmp(name, "data_source") || !strcmp(name, "datasource")) { + prometheus_backend_options = backend_parse_data_source(value, prometheus_backend_options); + } + else { + int i; + for(i = 0; prometheus_output_flags_root[i].name ; i++) { + if(!strcmp(name, prometheus_output_flags_root[i].name)) { + if(!strcmp(value, "yes") || !strcmp(value, "1") || !strcmp(value, "true")) + prometheus_output_options |= prometheus_output_flags_root[i].flag; + else + prometheus_output_options &= ~prometheus_output_flags_root[i].flag; + + break; + } + } + } + } + + buffer_flush(w->response.data); + buffer_no_cacheable(w->response.data); + + switch(format) { + case ALLMETRICS_JSON: + w->response.data->contenttype = CT_APPLICATION_JSON; + rrd_stats_api_v1_charts_allmetrics_json(host, w->response.data); + return 200; + + case ALLMETRICS_SHELL: + w->response.data->contenttype = CT_TEXT_PLAIN; + rrd_stats_api_v1_charts_allmetrics_shell(host, w->response.data); + return 200; + + case ALLMETRICS_PROMETHEUS: + w->response.data->contenttype = CT_PROMETHEUS; + rrd_stats_api_v1_charts_allmetrics_prometheus_single_host( + host + , w->response.data + , prometheus_server + , prometheus_prefix + , prometheus_backend_options + , prometheus_output_options + ); + return 200; + + case ALLMETRICS_PROMETHEUS_ALL_HOSTS: + w->response.data->contenttype = CT_PROMETHEUS; + rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts( + host + , w->response.data + , prometheus_server + , prometheus_prefix + , prometheus_backend_options + , prometheus_output_options + ); + return 200; + + default: + w->response.data->contenttype = CT_TEXT_PLAIN; + buffer_strcat(w->response.data, "Which format? '" ALLMETRICS_FORMAT_SHELL "', '" ALLMETRICS_FORMAT_PROMETHEUS "', '" ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "' and '" ALLMETRICS_FORMAT_JSON "' are currently supported."); + return 400; + } +} diff --git a/web/api/exporters/allmetrics.h b/web/api/exporters/allmetrics.h new file mode 100644 index 000000000..e8dedabf4 --- /dev/null +++ b/web/api/exporters/allmetrics.h @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_ALLMETRICS_H +#define NETDATA_API_ALLMETRICS_H + +#include "web/api/formatters/rrd2json.h" +#include "shell/allmetrics_shell.h" + +extern int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url); + +#endif //NETDATA_API_ALLMETRICS_H diff --git a/web/api/exporters/prometheus/Makefile.am b/web/api/exporters/prometheus/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/web/api/exporters/prometheus/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/exporters/prometheus/Makefile.in b/web/api/exporters/prometheus/Makefile.in new file mode 100644 index 000000000..202f19b3d --- /dev/null +++ b/web/api/exporters/prometheus/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api/exporters/prometheus +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/exporters/prometheus/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/exporters/prometheus/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/exporters/prometheus/README.md b/web/api/exporters/prometheus/README.md new file mode 100644 index 000000000..437f90c53 --- /dev/null +++ b/web/api/exporters/prometheus/README.md @@ -0,0 +1,3 @@ +# prometheus exporter + +The prometheus exporter for netdata is located at the [backends section for prometheus](../../../../backends/prometheus). diff --git a/web/api/exporters/shell/Makefile.am b/web/api/exporters/shell/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/web/api/exporters/shell/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/exporters/shell/Makefile.in b/web/api/exporters/shell/Makefile.in new file mode 100644 index 000000000..40a3c2b49 --- /dev/null +++ b/web/api/exporters/shell/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api/exporters/shell +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/exporters/shell/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/exporters/shell/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/exporters/shell/README.md b/web/api/exporters/shell/README.md new file mode 100644 index 000000000..065afff5c --- /dev/null +++ b/web/api/exporters/shell/README.md @@ -0,0 +1,64 @@ +# shell exporter + +Shell scripts can now query netdata: + +```sh +eval "$(curl -s 'http://localhost:19999/api/v1/allmetrics')" +``` + +after this command, all the netdata metrics are exposed to shell. Check: + +```sh +# source the metrics +eval "$(curl -s 'http://localhost:19999/api/v1/allmetrics')" + +# let's see if there are variables exposed by netdata for system.cpu +set | grep "^NETDATA_SYSTEM_CPU" + +NETDATA_SYSTEM_CPU_GUEST=0 +NETDATA_SYSTEM_CPU_GUEST_NICE=0 +NETDATA_SYSTEM_CPU_IDLE=95 +NETDATA_SYSTEM_CPU_IOWAIT=0 +NETDATA_SYSTEM_CPU_IRQ=0 +NETDATA_SYSTEM_CPU_NICE=0 +NETDATA_SYSTEM_CPU_SOFTIRQ=0 +NETDATA_SYSTEM_CPU_STEAL=0 +NETDATA_SYSTEM_CPU_SYSTEM=1 +NETDATA_SYSTEM_CPU_USER=4 +NETDATA_SYSTEM_CPU_VISIBLETOTAL=5 + +# let's see the total cpu utilization of the system +echo ${NETDATA_SYSTEM_CPU_VISIBLETOTAL} +5 + +# what about alarms? +set | grep "^NETDATA_ALARM_SYSTEM_SWAP_" +NETDATA_ALARM_SYSTEM_SWAP_RAM_IN_SWAP_STATUS=CRITICAL +NETDATA_ALARM_SYSTEM_SWAP_RAM_IN_SWAP_VALUE=53 +NETDATA_ALARM_SYSTEM_SWAP_USED_SWAP_STATUS=CLEAR +NETDATA_ALARM_SYSTEM_SWAP_USED_SWAP_VALUE=51 + +# let's get the current status of the alarm 'ram in swap' +echo ${NETDATA_ALARM_SYSTEM_SWAP_RAM_IN_SWAP_STATUS} +CRITICAL + +# is it fast? +time curl -s 'http://localhost:19999/api/v1/allmetrics' >/dev/null + +real 0m0,070s +user 0m0,000s +sys 0m0,007s + +# it is... +# 0.07 seconds for curl to be loaded, connect to netdata and fetch the response back... +``` + +The `_VISIBLETOTAL` variable sums up all the dimensions of each chart. + +The format of the variables is: + +```sh +NETDATA_${chart_id^^}_${dimension_id^^}="${value}" +``` + +The value is rounded to the closest integer, since shell script cannot process decimal numbers. diff --git a/web/api/exporters/shell/allmetrics_shell.c b/web/api/exporters/shell/allmetrics_shell.c new file mode 100644 index 000000000..e380deec4 --- /dev/null +++ b/web/api/exporters/shell/allmetrics_shell.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "allmetrics_shell.h" + +// ---------------------------------------------------------------------------- +// BASH +// /api/v1/allmetrics?format=bash + +static inline size_t shell_name_copy(char *d, const char *s, size_t usable) { + size_t n; + + for(n = 0; *s && n < usable ; d++, s++, n++) { + register char c = *s; + + if(unlikely(!isalnum(c))) *d = '_'; + else *d = (char)toupper(c); + } + *d = '\0'; + + return n; +} + +#define SHELL_ELEMENT_MAX 100 + +void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, BUFFER *wb) { + rrdhost_rdlock(host); + + // for each chart + RRDSET *st; + rrdset_foreach_read(st, host) { + calculated_number total = 0.0; + char chart[SHELL_ELEMENT_MAX + 1]; + shell_name_copy(chart, st->name?st->name:st->id, SHELL_ELEMENT_MAX); + + buffer_sprintf(wb, "\n# chart: %s (name: %s)\n", st->id, st->name); + if(rrdset_is_available_for_viewers(st)) { + rrdset_rdlock(st); + + // for each dimension + RRDDIM *rd; + rrddim_foreach_read(rd, st) { + if(rd->collections_counter) { + char dimension[SHELL_ELEMENT_MAX + 1]; + shell_name_copy(dimension, rd->name?rd->name:rd->id, SHELL_ELEMENT_MAX); + + calculated_number n = rd->last_stored_value; + + if(isnan(n) || isinf(n)) + buffer_sprintf(wb, "NETDATA_%s_%s=\"\" # %s\n", chart, dimension, st->units); + else { + if(rd->multiplier < 0 || rd->divisor < 0) n = -n; + n = calculated_number_round(n); + if(!rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN)) total += n; + buffer_sprintf(wb, "NETDATA_%s_%s=\"" CALCULATED_NUMBER_FORMAT_ZERO "\" # %s\n", chart, dimension, n, st->units); + } + } + } + + total = calculated_number_round(total); + buffer_sprintf(wb, "NETDATA_%s_VISIBLETOTAL=\"" CALCULATED_NUMBER_FORMAT_ZERO "\" # %s\n", chart, total, st->units); + rrdset_unlock(st); + } + } + + buffer_strcat(wb, "\n# NETDATA ALARMS RUNNING\n"); + + RRDCALC *rc; + for(rc = host->alarms; rc ;rc = rc->next) { + if(!rc->rrdset) continue; + + char chart[SHELL_ELEMENT_MAX + 1]; + shell_name_copy(chart, rc->rrdset->name?rc->rrdset->name:rc->rrdset->id, SHELL_ELEMENT_MAX); + + char alarm[SHELL_ELEMENT_MAX + 1]; + shell_name_copy(alarm, rc->name, SHELL_ELEMENT_MAX); + + calculated_number n = rc->value; + + if(isnan(n) || isinf(n)) + buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"\" # %s\n", chart, alarm, rc->units); + else { + n = calculated_number_round(n); + buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"" CALCULATED_NUMBER_FORMAT_ZERO "\" # %s\n", chart, alarm, n, rc->units); + } + + buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_STATUS=\"%s\"\n", chart, alarm, rrdcalc_status2string(rc->status)); + } + + rrdhost_unlock(host); +} + +// ---------------------------------------------------------------------------- + +void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, BUFFER *wb) { + rrdhost_rdlock(host); + + buffer_strcat(wb, "{"); + + size_t chart_counter = 0; + size_t dimension_counter = 0; + + // for each chart + RRDSET *st; + rrdset_foreach_read(st, host) { + if(rrdset_is_available_for_viewers(st)) { + rrdset_rdlock(st); + + buffer_sprintf(wb, "%s\n" + "\t\"%s\": {\n" + "\t\t\"name\":\"%s\",\n" + "\t\t\"context\":\"%s\",\n" + "\t\t\"units\":\"%s\",\n" + "\t\t\"last_updated\": %ld,\n" + "\t\t\"dimensions\": {" + , chart_counter?",":"" + , st->id + , st->name + , st->context + , st->units + , rrdset_last_entry_t(st) + ); + + chart_counter++; + dimension_counter = 0; + + // for each dimension + RRDDIM *rd; + rrddim_foreach_read(rd, st) { + if(rd->collections_counter) { + + buffer_sprintf(wb, "%s\n" + "\t\t\t\"%s\": {\n" + "\t\t\t\t\"name\": \"%s\",\n" + "\t\t\t\t\"value\": " + , dimension_counter?",":"" + , rd->id + , rd->name + ); + + if(isnan(rd->last_stored_value)) + buffer_strcat(wb, "null"); + else + buffer_sprintf(wb, CALCULATED_NUMBER_FORMAT, rd->last_stored_value); + + buffer_strcat(wb, "\n\t\t\t}"); + + dimension_counter++; + } + } + + buffer_strcat(wb, "\n\t\t}\n\t}"); + rrdset_unlock(st); + } + } + + buffer_strcat(wb, "\n}"); + rrdhost_unlock(host); +} + diff --git a/web/api/exporters/shell/allmetrics_shell.h b/web/api/exporters/shell/allmetrics_shell.h new file mode 100644 index 000000000..1d7611a2d --- /dev/null +++ b/web/api/exporters/shell/allmetrics_shell.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_ALLMETRICS_SHELL_H +#define NETDATA_API_ALLMETRICS_SHELL_H + +#include "../allmetrics.h" + +#define ALLMETRICS_FORMAT_SHELL "shell" +#define ALLMETRICS_FORMAT_PROMETHEUS "prometheus" +#define ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "prometheus_all_hosts" +#define ALLMETRICS_FORMAT_JSON "json" + +#define ALLMETRICS_SHELL 1 +#define ALLMETRICS_PROMETHEUS 2 +#define ALLMETRICS_JSON 3 +#define ALLMETRICS_PROMETHEUS_ALL_HOSTS 4 + +extern void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, BUFFER *wb); +extern void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, BUFFER *wb); + +#endif //NETDATA_API_ALLMETRICS_SHELL_H diff --git a/web/api/formatters/Makefile.am b/web/api/formatters/Makefile.am new file mode 100644 index 000000000..324575906 --- /dev/null +++ b/web/api/formatters/Makefile.am @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +SUBDIRS = \ + csv \ + json \ + ssv \ + value \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/formatters/Makefile.in b/web/api/formatters/Makefile.in new file mode 100644 index 000000000..a2ea0e53c --- /dev/null +++ b/web/api/formatters/Makefile.in @@ -0,0 +1,651 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api/formatters +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + distdir +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = $(SUBDIRS) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +SUBDIRS = \ + csv \ + json \ + ssv \ + value \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-recursive + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/formatters/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/formatters/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-recursive +all-am: Makefile $(DATA) +installdirs: installdirs-recursive +installdirs-am: +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-recursive + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-recursive + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: + +.MAKE: $(am__recursive_targets) install-am install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ + check-am clean clean-generic cscopelist-am ctags ctags-am \ + distclean distclean-generic distclean-tags distdir dvi dvi-am \ + html html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs installdirs-am maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/formatters/README.md b/web/api/formatters/README.md new file mode 100644 index 000000000..ad0df2d46 --- /dev/null +++ b/web/api/formatters/README.md @@ -0,0 +1,72 @@ +# Query formatting + +API data queries need to be formatted before returned to the caller. +Using API parameters, the caller may define the format he/she wishes to get back. + +The following formats are supported: + +format|module|content type|description +:---:|:---:|:---:|:----- +`array`|[ssv](ssv)|application/json|a JSON array +`csv`|[csv](csv)|text/plain|a text table, comma separated, with a header line (dimension names) and `\r\n` at the end of the lines +`csvjsonarray`|[csv](csv)|application/json|a JSON array, with each row as another array (the first row has the dimension names) +`datasource`|[json](json)|application/json|a Google Visualization Provider `datasource` javascript callback +`datatable`|[json](json)|application/json|a Google `datatable` +`html`|[csv](csv)|text/html|an html table +`json`|[json](json)|application/json|a JSON object +`jsonp`|[json](json)|application/json|a JSONP javascript callback +`markdown`|[csv](csv)|text/plain|a markdown table +`ssv`|[ssv](ssv)|text/plain|a space separated list of values +`ssvcomma`|[ssv](ssv)|text/plain|a comma separated list of values +`tsv`|[csv](csv)|text/plain|a TAB delimited `csv` (MS Excel flavor) + +For examples of each format, check the relative module documentation. + +## Metadata with the `jsonwrap` option + +All data queries can be encapsulated to JSON object having metadata about the query and the results. + +This is done by adding the `options=jsonwrap` to the API URL (if there are other `options` append +`,jsonwrap` to the existing ones). + +This is such an object: + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.cpu&after=-3600&points=6&group=average&format=csv&options=nonzero,jsonwrap' +{ + "api": 1, + "id": "system.cpu", + "name": "system.cpu", + "view_update_every": 600, + "update_every": 1, + "first_entry": 1540387074, + "last_entry": 1540647070, + "before": 1540647000, + "after": 1540644000, + "dimension_names": ["steal", "softirq", "user", "system", "iowait"], + "dimension_ids": ["steal", "softirq", "user", "system", "iowait"], + "latest_values": [0, 0.2493766, 1.745636, 0.4987531, 0], + "view_latest_values": [0.0158314, 0.0516506, 0.866549, 0.7196127, 0.0050002], + "dimensions": 5, + "points": 6, + "format": "csv", + "result": "time,steal,softirq,user,system,iowait\n2018-10-27 13:30:00,0.0158314,0.0516506,0.866549,0.7196127,0.0050002\n2018-10-27 13:20:00,0.0149856,0.0529183,0.8673155,0.7121144,0.0049979\n2018-10-27 13:10:00,0.0137501,0.053315,0.8578097,0.7197613,0.0054209\n2018-10-27 13:00:00,0.0154252,0.0554688,0.899432,0.7200638,0.0067252\n2018-10-27 12:50:00,0.0145866,0.0495922,0.8404341,0.7011141,0.0041688\n2018-10-27 12:40:00,0.0162366,0.0595954,0.8827475,0.7020573,0.0041636\n", + "min": 0, + "max": 0 +} +``` + +## Downloading data query result files + +Following the [Google Visualization Provider guidelines](https://developers.google.com/chart/interactive/docs/dev/implementing_data_source), +netdata supports parsing `tqx` options. + +Using these options, any netdata data query can instruct the web browser to download +the result and save it under a given filename. + +For example, to download a CSV file with CPU utilization of the last hour, +[click here](https://registry.my-netdata.io/api/v1/data?chart=system.cpu&after=-3600&format=csv&options=nonzero&tqx=outFileName:system+cpu+utilization+of+the+last_hour.csv). + + +This is done by appending `&tqx=outFileName:FILENAME` to any data query. +The output will be in the format given with `&format=`. diff --git a/web/api/formatters/charts2json.c b/web/api/formatters/charts2json.c new file mode 100644 index 000000000..f60f7f540 --- /dev/null +++ b/web/api/formatters/charts2json.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "charts2json.h" + +// generate JSON for the /api/v1/charts API call + +void charts2json(RRDHOST *host, BUFFER *wb) { + static char *custom_dashboard_info_js_filename = NULL; + size_t c, dimensions = 0, memory = 0, alarms = 0; + RRDSET *st; + + time_t now = now_realtime_sec(); + + if(unlikely(!custom_dashboard_info_js_filename)) + custom_dashboard_info_js_filename = config_get(CONFIG_SECTION_WEB, "custom dashboard_info.js", ""); + + buffer_sprintf(wb, "{\n" + "\t\"hostname\": \"%s\"" + ",\n\t\"version\": \"%s\"" + ",\n\t\"os\": \"%s\"" + ",\n\t\"timezone\": \"%s\"" + ",\n\t\"update_every\": %d" + ",\n\t\"history\": %ld" + ",\n\t\"custom_info\": \"%s\"" + ",\n\t\"charts\": {" + , host->hostname + , host->program_version + , host->os + , host->timezone + , host->rrd_update_every + , host->rrd_history_entries + , custom_dashboard_info_js_filename + ); + + c = 0; + rrdhost_rdlock(host); + rrdset_foreach_read(st, host) { + if(rrdset_is_available_for_viewers(st)) { + if(c) buffer_strcat(wb, ","); + buffer_strcat(wb, "\n\t\t\""); + buffer_strcat(wb, st->id); + buffer_strcat(wb, "\": "); + rrdset2json(st, wb, &dimensions, &memory); + + c++; + st->last_accessed_time = now; + } + } + + RRDCALC *rc; + for(rc = host->alarms; rc ; rc = rc->next) { + if(rc->rrdset) + alarms++; + } + rrdhost_unlock(host); + + buffer_sprintf(wb + , "\n\t}" + ",\n\t\"charts_count\": %zu" + ",\n\t\"dimensions_count\": %zu" + ",\n\t\"alarms_count\": %zu" + ",\n\t\"rrd_memory_bytes\": %zu" + ",\n\t\"hosts_count\": %zu" + ",\n\t\"hosts\": [" + , c + , dimensions + , alarms + , memory + , rrd_hosts_available + ); + + if(unlikely(rrd_hosts_available > 1)) { + rrd_rdlock(); + + size_t found = 0; + RRDHOST *h; + rrdhost_foreach_read(h) { + if(!rrdhost_should_be_removed(h, host, now)) { + buffer_sprintf(wb + , "%s\n\t\t{" + "\n\t\t\t\"hostname\": \"%s\"" + "\n\t\t}" + , (found > 0) ? "," : "" + , h->hostname + ); + + found++; + } + } + + rrd_unlock(); + } + else { + buffer_sprintf(wb + , "\n\t\t{" + "\n\t\t\t\"hostname\": \"%s\"" + "\n\t\t}" + , host->hostname + ); + } + + buffer_sprintf(wb, "\n\t]\n}\n"); +} diff --git a/web/api/formatters/charts2json.h b/web/api/formatters/charts2json.h new file mode 100644 index 000000000..5d6d80060 --- /dev/null +++ b/web/api/formatters/charts2json.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_FORMATTER_CHARTS2JSON_H +#define NETDATA_API_FORMATTER_CHARTS2JSON_H + +#include "rrd2json.h" + +extern void charts2json(RRDHOST *host, BUFFER *wb); + +#endif //NETDATA_API_FORMATTER_CHARTS2JSON_H diff --git a/web/api/formatters/csv/Makefile.am b/web/api/formatters/csv/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/web/api/formatters/csv/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/formatters/csv/Makefile.in b/web/api/formatters/csv/Makefile.in new file mode 100644 index 000000000..9ba808443 --- /dev/null +++ b/web/api/formatters/csv/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api/formatters/csv +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/formatters/csv/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/formatters/csv/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/formatters/csv/README.md b/web/api/formatters/csv/README.md new file mode 100644 index 000000000..4711dcbd1 --- /dev/null +++ b/web/api/formatters/csv/README.md @@ -0,0 +1,139 @@ +# CSV formatter + +The CSV formatter presents [results of database queries](../../queries) in the following formats: + +format|content type|description +:---:|:---:|:----- +`csv`|text/plain|a text table, comma separated, with a header line (dimension names) and `\r\n` at the end of the lines +`csvjsonarray`|application/json|a JSON array, with each row as another array (the first row has the dimension names) +`tsv`|text/plain|like `csv` but TAB is used instead of comma to separate values (MS Excel flavor) +`html`|text/html|an html table +`markdown`|text/plain|markdown table + +In all formats the date and time is the first column. + +The CSV formatter respects the following API `&options=`: + +option|supported|description +:---:|:---:|:--- +`nonzero`|yes|to return only the dimensions that have at least a non-zero value +`flip`|yes|to return the rows older to newer (the default is newer to older) +`seconds`|yes|to return the date and time in unix timestamp +`ms`|yes|to return the date and time in unit timestamp as milliseconds +`percent`|yes|to replace all values with their percentage over the row total +`abs`|yes|to turn all values positive +`null2zero`|yes|to replace gaps with zeros (the default prints the string `null` + + +## Examples + +Get the system total bandwidth for all physical network interfaces, over the last hour, +in 6 rows (one for every 10 minutes), in `csv` format: + +Netdata always returns bandwidth in `kilobits`. + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.net&format=csv&after=-3600&group=sum&points=6&options=abs' +time,received,sent +2018-10-26 23:50:00,90214.67847,215137.79762 +2018-10-26 23:40:00,90126.32286,238587.57522 +2018-10-26 23:30:00,86061.22688,213389.23526 +2018-10-26 23:20:00,85590.75164,206129.01608 +2018-10-26 23:10:00,83163.30691,194311.77384 +2018-10-26 23:00:00,85167.29657,197538.07773 +``` + +--- + +Get the max RAM used by the SQL server and any cron jobs, over the last hour, in 2 rows (one for every 30 +minutes), in `tsv` format, and format the date and time as unix timestamp: + +Netdata always returns memory in `MB`. + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=apps.mem&format=tsv&after=-3600&group=max&points=2&options=nonzero,seconds&dimensions=sql,cron' +time sql cron +1540598400 61.95703 0.25 +1540596600 61.95703 0.25 +``` + +--- + +Get an HTML table of the last 4 values (4 seconds) of system CPU utilization: + +Netdata always returns CPU utilization as `%`. + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.cpu&format=html&after=-4&options=nonzero' +<html> +<center> +<table border="0" cellpadding="5" cellspacing="5"> +<tr><td>time</td><td>softirq</td><td>user</td><td>system</td></tr> +<tr><td>2018-10-27 00:16:07</td><td>0.25</td><td>1</td><td>0.75</td></tr> +<tr><td>2018-10-27 00:16:06</td><td>0</td><td>1.0025063</td><td>0.5012531</td></tr> +<tr><td>2018-10-27 00:16:05</td><td>0</td><td>1</td><td>0.75</td></tr> +<tr><td>2018-10-27 00:16:04</td><td>0</td><td>1.0025063</td><td>0.7518797</td></tr> +</table> +</center> +</html> +``` + +This is how it looks when rendered by a web browser: + +![image](https://user-images.githubusercontent.com/2662304/47597887-bafbf480-d99c-11e8-864a-d880bb8d2e5b.png) + + +--- + +Get a JSON array with the average bandwidth rate of the mysql server, over the last hour, in 6 values +(one every 10 minutes), and return the date and time in milliseconds: + +Netdata always returns bandwidth rates in `kilobits/s`. + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=mysql_local.net&format=csvjsonarray&after=-3600&points=6&group=average&options=abs,ms' +[ +["time","in","out"], +[1540599600000,0.7499986,120.2810185], +[1540599000000,0.7500019,120.2815509], +[1540598400000,0.7499999,120.2812319], +[1540597800000,0.7500044,120.2819634], +[1540597200000,0.7499968,120.2807337], +[1540596600000,0.7499988,120.2810527] +] +``` + +--- + +Get the number of processes started per minute, for the last 10 minutes, in `markdown` format: + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.forks&format=markdown&after=-600&points=10&group=sum' +time|started +:---:|:---: +2018-10-27 03:52:00|245.1706149 +2018-10-27 03:51:00|152.6654636 +2018-10-27 03:50:00|163.1755789 +2018-10-27 03:49:00|176.1574766 +2018-10-27 03:48:00|178.0137076 +2018-10-27 03:47:00|183.8306543 +2018-10-27 03:46:00|264.1635621 +2018-10-27 03:45:00|205.001551 +2018-10-27 03:44:00|7026.9852167 +2018-10-27 03:43:00|205.9904794 +``` + +And this is how it looks when formatted: + +time|started +:---:|:---: +2018-10-27 03:52:00|245.1706149 +2018-10-27 03:51:00|152.6654636 +2018-10-27 03:50:00|163.1755789 +2018-10-27 03:49:00|176.1574766 +2018-10-27 03:48:00|178.0137076 +2018-10-27 03:47:00|183.8306543 +2018-10-27 03:46:00|264.1635621 +2018-10-27 03:45:00|205.001551 +2018-10-27 03:44:00|7026.9852167 +2018-10-27 03:43:00|205.9904794 diff --git a/web/api/formatters/csv/csv.c b/web/api/formatters/csv/csv.c new file mode 100644 index 000000000..53bf29828 --- /dev/null +++ b/web/api/formatters/csv/csv.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "libnetdata/libnetdata.h" +#include "csv.h" + +void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const char *startline, const char *separator, const char *endline, const char *betweenlines) { + rrdset_check_rdlock(r->st); + + //info("RRD2CSV(): %s: BEGIN", r->st->id); + long c, i; + RRDDIM *d; + + // print the csv header + for(c = 0, i = 0, d = r->st->dimensions; d && c < r->d ;c++, d = d->next) { + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue; + + if(!i) { + buffer_strcat(wb, startline); + if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\""); + buffer_strcat(wb, "time"); + if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\""); + } + buffer_strcat(wb, separator); + if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\""); + buffer_strcat(wb, d->name); + if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\""); + i++; + } + buffer_strcat(wb, endline); + + if(format == DATASOURCE_CSV_MARKDOWN) { + // print the --- line after header + for(c = 0, i = 0, d = r->st->dimensions; d && c < r->d ;c++, d = d->next) { + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue; + + if(!i) { + buffer_strcat(wb, startline); + if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\""); + buffer_strcat(wb, ":---:"); + if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\""); + } + buffer_strcat(wb, separator); + if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\""); + buffer_strcat(wb, ":---:"); + if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\""); + i++; + } + buffer_strcat(wb, endline); + } + + if(!i) { + // no dimensions present + return; + } + + long start = 0, end = rrdr_rows(r), step = 1; + if(!(options & RRDR_OPTION_REVERSED)) { + start = rrdr_rows(r) - 1; + end = -1; + step = -1; + } + + // for each line in the array + calculated_number total = 1; + for(i = start; i != end ;i += step) { + calculated_number *cn = &r->v[ i * r->d ]; + RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ]; + + buffer_strcat(wb, betweenlines); + buffer_strcat(wb, startline); + + time_t now = r->t[i]; + + if((options & RRDR_OPTION_SECONDS) || (options & RRDR_OPTION_MILLISECONDS)) { + // print the timestamp of the line + buffer_rrd_value(wb, (calculated_number)now); + // in ms + if(options & RRDR_OPTION_MILLISECONDS) buffer_strcat(wb, "000"); + } + else { + // generate the local date time + struct tm tmbuf, *tm = localtime_r(&now, &tmbuf); + if(!tm) { error("localtime() failed."); continue; } + buffer_date(wb, tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); + } + + int set_min_max = 0; + if(unlikely(options & RRDR_OPTION_PERCENTAGE)) { + total = 0; + for(c = 0, d = r->st->dimensions; d && c < r->d ;c++, d = d->next) { + calculated_number n = cn[c]; + + if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) + n = -n; + + total += n; + } + // prevent a division by zero + if(total == 0) total = 1; + set_min_max = 1; + } + + // for each dimension + for(c = 0, d = r->st->dimensions; d && c < r->d ;c++, d = d->next) { + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue; + + buffer_strcat(wb, separator); + + calculated_number n = cn[c]; + + if(co[c] & RRDR_VALUE_EMPTY) { + if(options & RRDR_OPTION_NULL2ZERO) + buffer_strcat(wb, "0"); + else + buffer_strcat(wb, "null"); + } + else { + if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) + n = -n; + + if(unlikely(options & RRDR_OPTION_PERCENTAGE)) { + n = n * 100 / total; + + if(unlikely(set_min_max)) { + r->min = r->max = n; + set_min_max = 0; + } + + if(n < r->min) r->min = n; + if(n > r->max) r->max = n; + } + + buffer_rrd_value(wb, n); + } + } + + buffer_strcat(wb, endline); + } + //info("RRD2CSV(): %s: END", r->st->id); +} diff --git a/web/api/formatters/csv/csv.h b/web/api/formatters/csv/csv.h new file mode 100644 index 000000000..a89742d34 --- /dev/null +++ b/web/api/formatters/csv/csv.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_FORMATTER_CSV_H +#define NETDATA_API_FORMATTER_CSV_H + +#include "web/api/queries/rrdr.h" + +extern void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const char *startline, const char *separator, const char *endline, const char *betweenlines); + +#include "../rrd2json.h" + +#endif //NETDATA_API_FORMATTER_CSV_H diff --git a/web/api/formatters/json/Makefile.am b/web/api/formatters/json/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/web/api/formatters/json/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/formatters/json/Makefile.in b/web/api/formatters/json/Makefile.in new file mode 100644 index 000000000..5289347d0 --- /dev/null +++ b/web/api/formatters/json/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api/formatters/json +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/formatters/json/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/formatters/json/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/formatters/json/README.md b/web/api/formatters/json/README.md new file mode 100644 index 000000000..4f0bad5ca --- /dev/null +++ b/web/api/formatters/json/README.md @@ -0,0 +1,150 @@ +# JSON formatter + +The CSV formatter presents [results of database queries](../../queries) in the following formats: + +format|content type|description +:---:|:---:|:----- +`json`|application/json|return the query result as a json object +`jsonp`|application/json|return the query result as a JSONP javascript callback +`datatable`|application/json|return the query result as a Google `datatable` +`datasource`|application/json|return the query result as a Google Visualization Provider `datasource` javascript callback + +The CSV formatter respects the following API `&options=`: + +option|supported|description +:---:|:---:|:--- +`google_json`|yes|enable the Google flavor of JSON (using double quotes for strings and `Date()` function for dates +`objectrows`|yes|return each row as an object, instead of an array +`nonzero`|yes|to return only the dimensions that have at least a non-zero value +`flip`|yes|to return the rows older to newer (the default is newer to older) +`seconds`|yes|to return the date and time in unix timestamp +`ms`|yes|to return the date and time in unit timestamp as milliseconds +`percent`|yes|to replace all values with their percentage over the row total +`abs`|yes|to turn all values positive +`null2zero`|yes|to replace gaps with zeros (the default prints the string `null` + +## Examples + +To show the differences between each format, in the following examples we query the same +chart (having just one dimension called `active`), changing only the query `format` and its `options`. + +> Using `format=json` and `options=` + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&format=json&options=' +{ + "labels": ["time", "active"], + "data": + [ + [ 1540644600, 224.2516667], + [ 1540644000, 229.29], + [ 1540643400, 222.41], + [ 1540642800, 226.6816667], + [ 1540642200, 246.4083333], + [ 1540641600, 241.0966667] + ] +} +``` + +> Using `format=json` and `options=objectrows` + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&format=json&options=objectrows' +{ + "labels": ["time", "active"], + "data": + [ + { "time": 1540644600, "active": 224.2516667}, + { "time": 1540644000, "active": 229.29}, + { "time": 1540643400, "active": 222.41}, + { "time": 1540642800, "active": 226.6816667}, + { "time": 1540642200, "active": 246.4083333}, + { "time": 1540641600, "active": 241.0966667} + ] +} +``` + +> Using `format=json` and `options=objectrows,google_json` + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&formatjson&options=objectrows,google_json' +{ + "labels": ["time", "active"], + "data": + [ + { "time": new Date(2018,9,27,12,50,0), "active": 224.2516667}, + { "time": new Date(2018,9,27,12,40,0), "active": 229.29}, + { "time": new Date(2018,9,27,12,30,0), "active": 222.41}, + { "time": new Date(2018,9,27,12,20,0), "active": 226.6816667}, + { "time": new Date(2018,9,27,12,10,0), "active": 246.4083333}, + { "time": new Date(2018,9,27,12,0,0), "active": 241.0966667} + ] +} +``` + +> Using `format=jsonp` and `options=` + +```bash +curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&formjsonp&options=' +callback({ + "labels": ["time", "active"], + "data": + [ + [ 1540645200, 235.885], + [ 1540644600, 224.2516667], + [ 1540644000, 229.29], + [ 1540643400, 222.41], + [ 1540642800, 226.6816667], + [ 1540642200, 246.4083333] + ] +}); +``` + +> Using `format=datatable` and `options=` + +```bash +$ curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&formdatatable&options=' +{ + "cols": + [ + {"id":"","label":"time","pattern":"","type":"datetime"}, + {"id":"","label":"","pattern":"","type":"string","p":{"role":"annotation"}}, + {"id":"","label":"","pattern":"","type":"string","p":{"role":"annotationText"}}, + {"id":"","label":"active","pattern":"","type":"number"} + ], + "rows": + [ + {"c":[{"v":"Date(2018,9,27,13,0,0)"},{"v":null},{"v":null},{"v":235.885}]}, + {"c":[{"v":"Date(2018,9,27,12,50,0)"},{"v":null},{"v":null},{"v":224.2516667}]}, + {"c":[{"v":"Date(2018,9,27,12,40,0)"},{"v":null},{"v":null},{"v":229.29}]}, + {"c":[{"v":"Date(2018,9,27,12,30,0)"},{"v":null},{"v":null},{"v":222.41}]}, + {"c":[{"v":"Date(2018,9,27,12,20,0)"},{"v":null},{"v":null},{"v":226.6816667}]}, + {"c":[{"v":"Date(2018,9,27,12,10,0)"},{"v":null},{"v":null},{"v":246.4083333}]} + ] +} +``` + +> Using `format=datasource` and `options=` + +```bash +curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&format=datasource&options=' +google.visualization.Query.setResponse({version:'0.6',reqId:'0',status:'ok',sig:'1540645368',table:{ + "cols": + [ + {"id":"","label":"time","pattern":"","type":"datetime"}, + {"id":"","label":"","pattern":"","type":"string","p":{"role":"annotation"}}, + {"id":"","label":"","pattern":"","type":"string","p":{"role":"annotationText"}}, + {"id":"","label":"active","pattern":"","type":"number"} + ], + "rows": + [ + {"c":[{"v":"Date(2018,9,27,13,0,0)"},{"v":null},{"v":null},{"v":235.885}]}, + {"c":[{"v":"Date(2018,9,27,12,50,0)"},{"v":null},{"v":null},{"v":224.2516667}]}, + {"c":[{"v":"Date(2018,9,27,12,40,0)"},{"v":null},{"v":null},{"v":229.29}]}, + {"c":[{"v":"Date(2018,9,27,12,30,0)"},{"v":null},{"v":null},{"v":222.41}]}, + {"c":[{"v":"Date(2018,9,27,12,20,0)"},{"v":null},{"v":null},{"v":226.6816667}]}, + {"c":[{"v":"Date(2018,9,27,12,10,0)"},{"v":null},{"v":null},{"v":246.4083333}]} + ] +}}); +``` + diff --git a/web/api/formatters/json/json.c b/web/api/formatters/json/json.c new file mode 100644 index 000000000..66b3b9c83 --- /dev/null +++ b/web/api/formatters/json/json.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "json.h" + +#define JSON_DATES_JS 1 +#define JSON_DATES_TIMESTAMP 2 + +void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable) { + rrdset_check_rdlock(r->st); + + //info("RRD2JSON(): %s: BEGIN", r->st->id); + int row_annotations = 0, dates, dates_with_new = 0; + char kq[2] = "", // key quote + sq[2] = "", // string quote + pre_label[101] = "", // before each label + post_label[101] = "", // after each label + pre_date[101] = "", // the beginning of line, to the date + post_date[101] = "", // closing the date + pre_value[101] = "", // before each value + post_value[101] = "", // after each value + post_line[101] = "", // at the end of each row + normal_annotation[201] = "", // default row annotation + overflow_annotation[201] = "", // overflow row annotation + data_begin[101] = "", // between labels and values + finish[101] = ""; // at the end of everything + + if(datatable) { + dates = JSON_DATES_JS; + if( options & RRDR_OPTION_GOOGLE_JSON ) { + kq[0] = '\0'; + sq[0] = '\''; + } + else { + kq[0] = '"'; + sq[0] = '"'; + } + row_annotations = 1; + snprintfz(pre_date, 100, " {%sc%s:[{%sv%s:%s", kq, kq, kq, kq, sq); + snprintfz(post_date, 100, "%s}", sq); + snprintfz(pre_label, 100, ",\n {%sid%s:%s%s,%slabel%s:%s", kq, kq, sq, sq, kq, kq, sq); + snprintfz(post_label, 100, "%s,%spattern%s:%s%s,%stype%s:%snumber%s}", sq, kq, kq, sq, sq, kq, kq, sq, sq); + snprintfz(pre_value, 100, ",{%sv%s:", kq, kq); + strcpy(post_value, "}"); + strcpy(post_line, "]}"); + snprintfz(data_begin, 100, "\n ],\n %srows%s:\n [\n", kq, kq); + strcpy(finish, "\n ]\n}"); + + snprintfz(overflow_annotation, 200, ",{%sv%s:%sRESET OR OVERFLOW%s},{%sv%s:%sThe counters have been wrapped.%s}", kq, kq, sq, sq, kq, kq, sq, sq); + snprintfz(normal_annotation, 200, ",{%sv%s:null},{%sv%s:null}", kq, kq, kq, kq); + + buffer_sprintf(wb, "{\n %scols%s:\n [\n", kq, kq); + buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%stime%s,%spattern%s:%s%s,%stype%s:%sdatetime%s},\n", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq); + buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%s%s,%spattern%s:%s%s,%stype%s:%sstring%s,%sp%s:{%srole%s:%sannotation%s}},\n", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, kq, kq, sq, sq); + buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%s%s,%spattern%s:%s%s,%stype%s:%sstring%s,%sp%s:{%srole%s:%sannotationText%s}}", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, kq, kq, sq, sq); + + // remove the valueobjects flag + // google wants its own keys + if(options & RRDR_OPTION_OBJECTSROWS) + options &= ~RRDR_OPTION_OBJECTSROWS; + } + else { + kq[0] = '"'; + sq[0] = '"'; + if(options & RRDR_OPTION_GOOGLE_JSON) { + dates = JSON_DATES_JS; + dates_with_new = 1; + } + else { + dates = JSON_DATES_TIMESTAMP; + dates_with_new = 0; + } + if( options & RRDR_OPTION_OBJECTSROWS ) + strcpy(pre_date, " { "); + else + strcpy(pre_date, " [ "); + strcpy(pre_label, ", \""); + strcpy(post_label, "\""); + strcpy(pre_value, ", "); + if( options & RRDR_OPTION_OBJECTSROWS ) + strcpy(post_line, "}"); + else + strcpy(post_line, "]"); + snprintfz(data_begin, 100, "],\n %sdata%s:\n [\n", kq, kq); + strcpy(finish, "\n ]\n}"); + + buffer_sprintf(wb, "{\n %slabels%s: [", kq, kq); + buffer_sprintf(wb, "%stime%s", sq, sq); + } + + // ------------------------------------------------------------------------- + // print the JSON header + + long c, i; + RRDDIM *rd; + + // print the header lines + for(c = 0, i = 0, rd = r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) { + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue; + + buffer_strcat(wb, pre_label); + buffer_strcat(wb, rd->name); + buffer_strcat(wb, post_label); + i++; + } + if(!i) { + buffer_strcat(wb, pre_label); + buffer_strcat(wb, "no data"); + buffer_strcat(wb, post_label); + } + + // print the begin of row data + buffer_strcat(wb, data_begin); + + // if all dimensions are hidden, print a null + if(!i) { + buffer_strcat(wb, finish); + return; + } + + long start = 0, end = rrdr_rows(r), step = 1; + if(!(options & RRDR_OPTION_REVERSED)) { + start = rrdr_rows(r) - 1; + end = -1; + step = -1; + } + + // for each line in the array + calculated_number total = 1; + for(i = start; i != end ;i += step) { + calculated_number *cn = &r->v[ i * r->d ]; + RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ]; + + time_t now = r->t[i]; + + if(dates == JSON_DATES_JS) { + // generate the local date time + struct tm tmbuf, *tm = localtime_r(&now, &tmbuf); + if(!tm) { error("localtime_r() failed."); continue; } + + if(likely(i != start)) buffer_strcat(wb, ",\n"); + buffer_strcat(wb, pre_date); + + if( options & RRDR_OPTION_OBJECTSROWS ) + buffer_sprintf(wb, "%stime%s: ", kq, kq); + + if(dates_with_new) + buffer_strcat(wb, "new "); + + buffer_jsdate(wb, tm->tm_year + 1900, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); + + buffer_strcat(wb, post_date); + + if(row_annotations) { + // google supports one annotation per row + int annotation_found = 0; + for(c = 0, rd = r->st->dimensions; rd ;c++, rd = rd->next) { + if(unlikely(!(r->od[c] & RRDR_DIMENSION_SELECTED))) continue; + + if(co[c] & RRDR_VALUE_RESET) { + buffer_strcat(wb, overflow_annotation); + annotation_found = 1; + break; + } + } + if(!annotation_found) + buffer_strcat(wb, normal_annotation); + } + } + else { + // print the timestamp of the line + if(likely(i != start)) buffer_strcat(wb, ",\n"); + buffer_strcat(wb, pre_date); + + if( options & RRDR_OPTION_OBJECTSROWS ) + buffer_sprintf(wb, "%stime%s: ", kq, kq); + + buffer_rrd_value(wb, (calculated_number)r->t[i]); + // in ms + if(options & RRDR_OPTION_MILLISECONDS) buffer_strcat(wb, "000"); + + buffer_strcat(wb, post_date); + } + + int set_min_max = 0; + if(unlikely(options & RRDR_OPTION_PERCENTAGE)) { + total = 0; + for(c = 0, rd = r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) { + calculated_number n = cn[c]; + + if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) + n = -n; + + total += n; + } + // prevent a division by zero + if(total == 0) total = 1; + set_min_max = 1; + } + + // for each dimension + for(c = 0, rd = r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) { + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue; + + calculated_number n = cn[c]; + + buffer_strcat(wb, pre_value); + + if( options & RRDR_OPTION_OBJECTSROWS ) + buffer_sprintf(wb, "%s%s%s: ", kq, rd->name, kq); + + if(co[c] & RRDR_VALUE_EMPTY) { + if(options & RRDR_OPTION_NULL2ZERO) + buffer_strcat(wb, "0"); + else + buffer_strcat(wb, "null"); + } + else { + if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) + n = -n; + + if(unlikely(options & RRDR_OPTION_PERCENTAGE)) { + n = n * 100 / total; + + if(unlikely(set_min_max)) { + r->min = r->max = n; + set_min_max = 0; + } + + if(n < r->min) r->min = n; + if(n > r->max) r->max = n; + } + + buffer_rrd_value(wb, n); + } + + buffer_strcat(wb, post_value); + } + + buffer_strcat(wb, post_line); + } + + buffer_strcat(wb, finish); + //info("RRD2JSON(): %s: END", r->st->id); +} diff --git a/web/api/formatters/json/json.h b/web/api/formatters/json/json.h new file mode 100644 index 000000000..01363ce01 --- /dev/null +++ b/web/api/formatters/json/json.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_FORMATTER_JSON_H +#define NETDATA_API_FORMATTER_JSON_H + +#include "../rrd2json.h" + +extern void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable); + +#endif //NETDATA_API_FORMATTER_JSON_H diff --git a/web/api/formatters/json_wrapper.c b/web/api/formatters/json_wrapper.c new file mode 100644 index 000000000..253883568 --- /dev/null +++ b/web/api/formatters/json_wrapper.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "json_wrapper.h" + +void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, int string_value) { + rrdset_check_rdlock(r->st); + + long rows = rrdr_rows(r); + long c, i; + RRDDIM *rd; + + //info("JSONWRAPPER(): %s: BEGIN", r->st->id); + char kq[2] = "", // key quote + sq[2] = ""; // string quote + + if( options & RRDR_OPTION_GOOGLE_JSON ) { + kq[0] = '\0'; + sq[0] = '\''; + } + else { + kq[0] = '"'; + sq[0] = '"'; + } + + buffer_sprintf(wb, "{\n" + " %sapi%s: 1,\n" + " %sid%s: %s%s%s,\n" + " %sname%s: %s%s%s,\n" + " %sview_update_every%s: %d,\n" + " %supdate_every%s: %d,\n" + " %sfirst_entry%s: %u,\n" + " %slast_entry%s: %u,\n" + " %sbefore%s: %u,\n" + " %safter%s: %u,\n" + " %sdimension_names%s: [" + , kq, kq + , kq, kq, sq, r->st->id, sq + , kq, kq, sq, r->st->name, sq + , kq, kq, r->update_every + , kq, kq, r->st->update_every + , kq, kq, (uint32_t)rrdset_first_entry_t(r->st) + , kq, kq, (uint32_t)rrdset_last_entry_t(r->st) + , kq, kq, (uint32_t)r->before + , kq, kq, (uint32_t)r->after + , kq, kq); + + for(c = 0, i = 0, rd = r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) { + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue; + + if(i) buffer_strcat(wb, ", "); + buffer_strcat(wb, sq); + buffer_strcat(wb, rd->name); + buffer_strcat(wb, sq); + i++; + } + if(!i) { +#ifdef NETDATA_INTERNAL_CHECKS + error("RRDR is empty for %s (RRDR has %d dimensions, options is 0x%08x)", r->st->id, r->d, options); +#endif + rows = 0; + buffer_strcat(wb, sq); + buffer_strcat(wb, "no data"); + buffer_strcat(wb, sq); + } + + buffer_sprintf(wb, "],\n" + " %sdimension_ids%s: [" + , kq, kq); + + for(c = 0, i = 0, rd = r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) { + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue; + + if(i) buffer_strcat(wb, ", "); + buffer_strcat(wb, sq); + buffer_strcat(wb, rd->id); + buffer_strcat(wb, sq); + i++; + } + if(!i) { + rows = 0; + buffer_strcat(wb, sq); + buffer_strcat(wb, "no data"); + buffer_strcat(wb, sq); + } + + buffer_sprintf(wb, "],\n" + " %slatest_values%s: [" + , kq, kq); + + for(c = 0, i = 0, rd = r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) { + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue; + + if(i) buffer_strcat(wb, ", "); + i++; + + storage_number n = rd->values[rrdset_last_slot(r->st)]; + + if(!does_storage_number_exist(n)) + buffer_strcat(wb, "null"); + else + buffer_rrd_value(wb, unpack_storage_number(n)); + } + if(!i) { + rows = 0; + buffer_strcat(wb, "null"); + } + + buffer_sprintf(wb, "],\n" + " %sview_latest_values%s: [" + , kq, kq); + + i = 0; + if(rows) { + calculated_number total = 1; + + if(unlikely(options & RRDR_OPTION_PERCENTAGE)) { + total = 0; + for(c = 0, rd = r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) { + calculated_number *cn = &r->v[ (rrdr_rows(r) - 1) * r->d ]; + calculated_number n = cn[c]; + + if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) + n = -n; + + total += n; + } + // prevent a division by zero + if(total == 0) total = 1; + } + + for(c = 0, i = 0, rd = r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) { + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue; + + if(i) buffer_strcat(wb, ", "); + i++; + + calculated_number *cn = &r->v[ (rrdr_rows(r) - 1) * r->d ]; + RRDR_VALUE_FLAGS *co = &r->o[ (rrdr_rows(r) - 1) * r->d ]; + calculated_number n = cn[c]; + + if(co[c] & RRDR_VALUE_EMPTY) { + if(options & RRDR_OPTION_NULL2ZERO) + buffer_strcat(wb, "0"); + else + buffer_strcat(wb, "null"); + } + else { + if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) + n = -n; + + if(unlikely(options & RRDR_OPTION_PERCENTAGE)) + n = n * 100 / total; + + buffer_rrd_value(wb, n); + } + } + } + if(!i) { + rows = 0; + buffer_strcat(wb, "null"); + } + + buffer_sprintf(wb, "],\n" + " %sdimensions%s: %ld,\n" + " %spoints%s: %ld,\n" + " %sformat%s: %s" + , kq, kq, i + , kq, kq, rows + , kq, kq, sq + ); + + rrdr_buffer_print_format(wb, format); + + buffer_sprintf(wb, "%s,\n" + " %sresult%s: " + , sq + , kq, kq + ); + + if(string_value) buffer_strcat(wb, sq); + //info("JSONWRAPPER(): %s: END", r->st->id); +} + +void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value) { + (void)format; + + char kq[2] = "", // key quote + sq[2] = ""; // string quote + + if( options & RRDR_OPTION_GOOGLE_JSON ) { + kq[0] = '\0'; + sq[0] = '\''; + } + else { + kq[0] = '"'; + sq[0] = '"'; + } + + if(string_value) buffer_strcat(wb, sq); + + buffer_sprintf(wb, ",\n %smin%s: ", kq, kq); + buffer_rrd_value(wb, r->min); + buffer_sprintf(wb, ",\n %smax%s: ", kq, kq); + buffer_rrd_value(wb, r->max); + buffer_strcat(wb, "\n}\n"); +} diff --git a/web/api/formatters/json_wrapper.h b/web/api/formatters/json_wrapper.h new file mode 100644 index 000000000..7cb7d3453 --- /dev/null +++ b/web/api/formatters/json_wrapper.h @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_FORMATTER_JSON_WRAPPER_H +#define NETDATA_API_FORMATTER_JSON_WRAPPER_H + +#include "rrd2json.h" + +extern void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, int string_value); +extern void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value); + +#endif //NETDATA_API_FORMATTER_JSON_WRAPPER_H diff --git a/web/api/formatters/rrd2json.c b/web/api/formatters/rrd2json.c new file mode 100644 index 000000000..5cdcc80ff --- /dev/null +++ b/web/api/formatters/rrd2json.c @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "web/api/web_api_v1.h" + +void rrd_stats_api_v1_chart(RRDSET *st, BUFFER *wb) { + rrdset2json(st, wb, NULL, NULL); +} + +void rrdr_buffer_print_format(BUFFER *wb, uint32_t format) { + switch(format) { + case DATASOURCE_JSON: + buffer_strcat(wb, DATASOURCE_FORMAT_JSON); + break; + + case DATASOURCE_DATATABLE_JSON: + buffer_strcat(wb, DATASOURCE_FORMAT_DATATABLE_JSON); + break; + + case DATASOURCE_DATATABLE_JSONP: + buffer_strcat(wb, DATASOURCE_FORMAT_DATATABLE_JSONP); + break; + + case DATASOURCE_JSONP: + buffer_strcat(wb, DATASOURCE_FORMAT_JSONP); + break; + + case DATASOURCE_SSV: + buffer_strcat(wb, DATASOURCE_FORMAT_SSV); + break; + + case DATASOURCE_CSV: + buffer_strcat(wb, DATASOURCE_FORMAT_CSV); + break; + + case DATASOURCE_TSV: + buffer_strcat(wb, DATASOURCE_FORMAT_TSV); + break; + + case DATASOURCE_HTML: + buffer_strcat(wb, DATASOURCE_FORMAT_HTML); + break; + + case DATASOURCE_JS_ARRAY: + buffer_strcat(wb, DATASOURCE_FORMAT_JS_ARRAY); + break; + + case DATASOURCE_SSV_COMMA: + buffer_strcat(wb, DATASOURCE_FORMAT_SSV_COMMA); + break; + + default: + buffer_strcat(wb, "unknown"); + break; + } +} + +int rrdset2value_api_v1( + RRDSET *st + , BUFFER *wb + , calculated_number *n + , const char *dimensions + , long points + , long long after + , long long before + , int group_method + , long group_time + , uint32_t options + , time_t *db_after + , time_t *db_before + , int *value_is_null +) { + RRDR *r = rrd2rrdr(st, points, after, before, group_method, group_time, options, dimensions); + if(!r) { + if(value_is_null) *value_is_null = 1; + return 500; + } + + if(rrdr_rows(r) == 0) { + rrdr_free(r); + + if(db_after) *db_after = 0; + if(db_before) *db_before = 0; + if(value_is_null) *value_is_null = 1; + + return 400; + } + + if(wb) { + if (r->result_options & RRDR_RESULT_OPTION_RELATIVE) + buffer_no_cacheable(wb); + else if (r->result_options & RRDR_RESULT_OPTION_ABSOLUTE) + buffer_cacheable(wb); + } + + if(db_after) *db_after = r->after; + if(db_before) *db_before = r->before; + + long i = (!(options & RRDR_OPTION_REVERSED))?rrdr_rows(r) - 1:0; + *n = rrdr2value(r, i, options, value_is_null); + + rrdr_free(r); + return 200; +} + +int rrdset2anything_api_v1( + RRDSET *st + , BUFFER *wb + , BUFFER *dimensions + , uint32_t format + , long points + , long long after + , long long before + , int group_method + , long group_time + , uint32_t options + , time_t *latest_timestamp +) { + st->last_accessed_time = now_realtime_sec(); + + RRDR *r = rrd2rrdr(st, points, after, before, group_method, group_time, options, dimensions?buffer_tostring(dimensions):NULL); + if(!r) { + buffer_strcat(wb, "Cannot generate output with these parameters on this chart."); + return 500; + } + + if(r->result_options & RRDR_RESULT_OPTION_RELATIVE) + buffer_no_cacheable(wb); + else if(r->result_options & RRDR_RESULT_OPTION_ABSOLUTE) + buffer_cacheable(wb); + + if(latest_timestamp && rrdr_rows(r) > 0) + *latest_timestamp = r->before; + + switch(format) { + case DATASOURCE_SSV: + if(options & RRDR_OPTION_JSON_WRAP) { + wb->contenttype = CT_APPLICATION_JSON; + rrdr_json_wrapper_begin(r, wb, format, options, 1); + rrdr2ssv(r, wb, options, "", " ", ""); + rrdr_json_wrapper_end(r, wb, format, options, 1); + } + else { + wb->contenttype = CT_TEXT_PLAIN; + rrdr2ssv(r, wb, options, "", " ", ""); + } + break; + + case DATASOURCE_SSV_COMMA: + if(options & RRDR_OPTION_JSON_WRAP) { + wb->contenttype = CT_APPLICATION_JSON; + rrdr_json_wrapper_begin(r, wb, format, options, 1); + rrdr2ssv(r, wb, options, "", ",", ""); + rrdr_json_wrapper_end(r, wb, format, options, 1); + } + else { + wb->contenttype = CT_TEXT_PLAIN; + rrdr2ssv(r, wb, options, "", ",", ""); + } + break; + + case DATASOURCE_JS_ARRAY: + if(options & RRDR_OPTION_JSON_WRAP) { + wb->contenttype = CT_APPLICATION_JSON; + rrdr_json_wrapper_begin(r, wb, format, options, 0); + rrdr2ssv(r, wb, options, "[", ",", "]"); + rrdr_json_wrapper_end(r, wb, format, options, 0); + } + else { + wb->contenttype = CT_APPLICATION_JSON; + rrdr2ssv(r, wb, options, "[", ",", "]"); + } + break; + + case DATASOURCE_CSV: + if(options & RRDR_OPTION_JSON_WRAP) { + wb->contenttype = CT_APPLICATION_JSON; + rrdr_json_wrapper_begin(r, wb, format, options, 1); + rrdr2csv(r, wb, format, options, "", ",", "\\n", ""); + rrdr_json_wrapper_end(r, wb, format, options, 1); + } + else { + wb->contenttype = CT_TEXT_PLAIN; + rrdr2csv(r, wb, format, options, "", ",", "\r\n", ""); + } + break; + + case DATASOURCE_CSV_MARKDOWN: + if(options & RRDR_OPTION_JSON_WRAP) { + wb->contenttype = CT_APPLICATION_JSON; + rrdr_json_wrapper_begin(r, wb, format, options, 1); + rrdr2csv(r, wb, format, options, "", "|", "\\n", ""); + rrdr_json_wrapper_end(r, wb, format, options, 1); + } + else { + wb->contenttype = CT_TEXT_PLAIN; + rrdr2csv(r, wb, format, options, "", "|", "\r\n", ""); + } + break; + + case DATASOURCE_CSV_JSON_ARRAY: + wb->contenttype = CT_APPLICATION_JSON; + if(options & RRDR_OPTION_JSON_WRAP) { + rrdr_json_wrapper_begin(r, wb, format, options, 0); + buffer_strcat(wb, "[\n"); + rrdr2csv(r, wb, format, options + RRDR_OPTION_LABEL_QUOTES, "[", ",", "]", ",\n"); + buffer_strcat(wb, "\n]"); + rrdr_json_wrapper_end(r, wb, format, options, 0); + } + else { + wb->contenttype = CT_APPLICATION_JSON; + buffer_strcat(wb, "[\n"); + rrdr2csv(r, wb, format, options + RRDR_OPTION_LABEL_QUOTES, "[", ",", "]", ",\n"); + buffer_strcat(wb, "\n]"); + } + break; + + case DATASOURCE_TSV: + if(options & RRDR_OPTION_JSON_WRAP) { + wb->contenttype = CT_APPLICATION_JSON; + rrdr_json_wrapper_begin(r, wb, format, options, 1); + rrdr2csv(r, wb, format, options, "", "\t", "\\n", ""); + rrdr_json_wrapper_end(r, wb, format, options, 1); + } + else { + wb->contenttype = CT_TEXT_PLAIN; + rrdr2csv(r, wb, format, options, "", "\t", "\r\n", ""); + } + break; + + case DATASOURCE_HTML: + if(options & RRDR_OPTION_JSON_WRAP) { + wb->contenttype = CT_APPLICATION_JSON; + rrdr_json_wrapper_begin(r, wb, format, options, 1); + buffer_strcat(wb, "<html>\\n<center>\\n<table border=\\\"0\\\" cellpadding=\\\"5\\\" cellspacing=\\\"5\\\">\\n"); + rrdr2csv(r, wb, format, options, "<tr><td>", "</td><td>", "</td></tr>\\n", ""); + buffer_strcat(wb, "</table>\\n</center>\\n</html>\\n"); + rrdr_json_wrapper_end(r, wb, format, options, 1); + } + else { + wb->contenttype = CT_TEXT_HTML; + buffer_strcat(wb, "<html>\n<center>\n<table border=\"0\" cellpadding=\"5\" cellspacing=\"5\">\n"); + rrdr2csv(r, wb, format, options, "<tr><td>", "</td><td>", "</td></tr>\n", ""); + buffer_strcat(wb, "</table>\n</center>\n</html>\n"); + } + break; + + case DATASOURCE_DATATABLE_JSONP: + wb->contenttype = CT_APPLICATION_X_JAVASCRIPT; + + if(options & RRDR_OPTION_JSON_WRAP) + rrdr_json_wrapper_begin(r, wb, format, options, 0); + + rrdr2json(r, wb, options, 1); + + if(options & RRDR_OPTION_JSON_WRAP) + rrdr_json_wrapper_end(r, wb, format, options, 0); + break; + + case DATASOURCE_DATATABLE_JSON: + wb->contenttype = CT_APPLICATION_JSON; + + if(options & RRDR_OPTION_JSON_WRAP) + rrdr_json_wrapper_begin(r, wb, format, options, 0); + + rrdr2json(r, wb, options, 1); + + if(options & RRDR_OPTION_JSON_WRAP) + rrdr_json_wrapper_end(r, wb, format, options, 0); + break; + + case DATASOURCE_JSONP: + wb->contenttype = CT_APPLICATION_X_JAVASCRIPT; + if(options & RRDR_OPTION_JSON_WRAP) + rrdr_json_wrapper_begin(r, wb, format, options, 0); + + rrdr2json(r, wb, options, 0); + + if(options & RRDR_OPTION_JSON_WRAP) + rrdr_json_wrapper_end(r, wb, format, options, 0); + break; + + case DATASOURCE_JSON: + default: + wb->contenttype = CT_APPLICATION_JSON; + + if(options & RRDR_OPTION_JSON_WRAP) + rrdr_json_wrapper_begin(r, wb, format, options, 0); + + rrdr2json(r, wb, options, 0); + + if(options & RRDR_OPTION_JSON_WRAP) + rrdr_json_wrapper_end(r, wb, format, options, 0); + break; + } + + rrdr_free(r); + return 200; +} diff --git a/web/api/formatters/rrd2json.h b/web/api/formatters/rrd2json.h new file mode 100644 index 000000000..bac6130de --- /dev/null +++ b/web/api/formatters/rrd2json.h @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_RRD2JSON_H +#define NETDATA_RRD2JSON_H 1 + +#include "web/api/web_api_v1.h" +#include "web/api/exporters/allmetrics.h" +#include "web/api/queries/rrdr.h" + +#include "web/api/formatters/csv/csv.h" +#include "web/api/formatters/ssv/ssv.h" +#include "web/api/formatters/json/json.h" +#include "web/api/formatters/value/value.h" + +#include "web/api/formatters/rrdset2json.h" +#include "web/api/formatters/charts2json.h" +#include "web/api/formatters/json_wrapper.h" + +#define HOSTNAME_MAX 1024 + +#define API_RELATIVE_TIME_MAX (3 * 365 * 86400) + +// type of JSON generations +#define DATASOURCE_INVALID (-1) +#define DATASOURCE_JSON 0 +#define DATASOURCE_DATATABLE_JSON 1 +#define DATASOURCE_DATATABLE_JSONP 2 +#define DATASOURCE_SSV 3 +#define DATASOURCE_CSV 4 +#define DATASOURCE_JSONP 5 +#define DATASOURCE_TSV 6 +#define DATASOURCE_HTML 7 +#define DATASOURCE_JS_ARRAY 8 +#define DATASOURCE_SSV_COMMA 9 +#define DATASOURCE_CSV_JSON_ARRAY 10 +#define DATASOURCE_CSV_MARKDOWN 11 + +#define DATASOURCE_FORMAT_JSON "json" +#define DATASOURCE_FORMAT_DATATABLE_JSON "datatable" +#define DATASOURCE_FORMAT_DATATABLE_JSONP "datasource" +#define DATASOURCE_FORMAT_JSONP "jsonp" +#define DATASOURCE_FORMAT_SSV "ssv" +#define DATASOURCE_FORMAT_CSV "csv" +#define DATASOURCE_FORMAT_TSV "tsv" +#define DATASOURCE_FORMAT_HTML "html" +#define DATASOURCE_FORMAT_JS_ARRAY "array" +#define DATASOURCE_FORMAT_SSV_COMMA "ssvcomma" +#define DATASOURCE_FORMAT_CSV_JSON_ARRAY "csvjsonarray" +#define DATASOURCE_FORMAT_CSV_MARKDOWN "markdown" + +extern void rrd_stats_api_v1_chart(RRDSET *st, BUFFER *wb); +extern void rrdr_buffer_print_format(BUFFER *wb, uint32_t format); + +extern int rrdset2anything_api_v1( + RRDSET *st + , BUFFER *wb + , BUFFER *dimensions + , uint32_t format + , long points + , long long after + , long long before + , int group_method + , long group_time + , uint32_t options + , time_t *latest_timestamp +); + +extern int rrdset2value_api_v1( + RRDSET *st + , BUFFER *wb + , calculated_number *n + , const char *dimensions + , long points + , long long after + , long long before + , int group_method + , long group_time + , uint32_t options + , time_t *db_after + , time_t *db_before + , int *value_is_null +); + +#endif /* NETDATA_RRD2JSON_H */ diff --git a/web/api/formatters/rrdset2json.c b/web/api/formatters/rrdset2json.c new file mode 100644 index 000000000..6d57e34cf --- /dev/null +++ b/web/api/formatters/rrdset2json.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "rrdset2json.h" + +// generate JSON for the /api/v1/chart API call + +void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memory_used) { + rrdset_rdlock(st); + + buffer_sprintf(wb, + "\t\t{\n" + "\t\t\t\"id\": \"%s\",\n" + "\t\t\t\"name\": \"%s\",\n" + "\t\t\t\"type\": \"%s\",\n" + "\t\t\t\"family\": \"%s\",\n" + "\t\t\t\"context\": \"%s\",\n" + "\t\t\t\"title\": \"%s (%s)\",\n" + "\t\t\t\"priority\": %ld,\n" + "\t\t\t\"plugin\": \"%s\",\n" + "\t\t\t\"module\": \"%s\",\n" + "\t\t\t\"enabled\": %s,\n" + "\t\t\t\"units\": \"%s\",\n" + "\t\t\t\"data_url\": \"/api/v1/data?chart=%s\",\n" + "\t\t\t\"chart_type\": \"%s\",\n" + "\t\t\t\"duration\": %ld,\n" + "\t\t\t\"first_entry\": %ld,\n" + "\t\t\t\"last_entry\": %ld,\n" + "\t\t\t\"update_every\": %d,\n" + "\t\t\t\"dimensions\": {\n" + , st->id + , st->name + , st->type + , st->family + , st->context + , st->title, st->name + , st->priority + , st->plugin_name?st->plugin_name:"" + , st->module_name?st->module_name:"" + , rrdset_flag_check(st, RRDSET_FLAG_ENABLED)?"true":"false" + , st->units + , st->name + , rrdset_type_name(st->chart_type) + , st->entries * st->update_every + , rrdset_first_entry_t(st) + , rrdset_last_entry_t(st) + , st->update_every + ); + + unsigned long memory = st->memsize; + + size_t dimensions = 0; + RRDDIM *rd; + rrddim_foreach_read(rd, st) { + if(rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN)) continue; + + memory += rd->memsize; + + buffer_sprintf( + wb + , "%s" + "\t\t\t\t\"%s\": { \"name\": \"%s\" }" + , dimensions ? ",\n" : "" + , rd->id + , rd->name + ); + + dimensions++; + } + + if(dimensions_count) *dimensions_count += dimensions; + if(memory_used) *memory_used += memory; + + buffer_strcat(wb, "\n\t\t\t},\n\t\t\t\"green\": "); + buffer_rrd_value(wb, st->green); + buffer_strcat(wb, ",\n\t\t\t\"red\": "); + buffer_rrd_value(wb, st->red); + + buffer_strcat(wb, ",\n\t\t\t\"alarms\": {\n"); + size_t alarms = 0; + RRDCALC *rc; + for(rc = st->alarms; rc ; rc = rc->rrdset_next) { + + buffer_sprintf( + wb + , "%s" + "\t\t\t\t\"%s\": {\n" + "\t\t\t\t\t\"id\": %u,\n" + "\t\t\t\t\t\"status\": \"%s\",\n" + "\t\t\t\t\t\"units\": \"%s\",\n" + "\t\t\t\t\t\"update_every\": %d\n" + "\t\t\t\t}" + , (alarms) ? ",\n" : "" + , rc->name + , rc->id + , rrdcalc_status2string(rc->status) + , rc->units + , rc->update_every + ); + + alarms++; + } + + buffer_sprintf(wb, + "\n\t\t\t}\n\t\t}" + ); + + rrdset_unlock(st); +} diff --git a/web/api/formatters/rrdset2json.h b/web/api/formatters/rrdset2json.h new file mode 100644 index 000000000..b2669ec0a --- /dev/null +++ b/web/api/formatters/rrdset2json.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_FORMATTER_RRDSET2JSON_H +#define NETDATA_API_FORMATTER_RRDSET2JSON_H + +#include "rrd2json.h" + +extern void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memory_used); + +#endif //NETDATA_API_FORMATTER_RRDSET2JSON_H diff --git a/web/api/formatters/ssv/Makefile.am b/web/api/formatters/ssv/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/web/api/formatters/ssv/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/formatters/ssv/Makefile.in b/web/api/formatters/ssv/Makefile.in new file mode 100644 index 000000000..c7c8c3fa9 --- /dev/null +++ b/web/api/formatters/ssv/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api/formatters/ssv +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/formatters/ssv/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/formatters/ssv/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/formatters/ssv/README.md b/web/api/formatters/ssv/README.md new file mode 100644 index 000000000..74f0d3189 --- /dev/null +++ b/web/api/formatters/ssv/README.md @@ -0,0 +1,52 @@ +# SSV formatter + +The SSV formatter sums all dimensions in [results of database queries](../../queries) +to a single value and returns a list of such values showing how it changes through time. + +It supports the following formats: + +format|content type|description +:---:|:---:|:----- +`ssv`|text/plain|a space separated list of values +`ssvcomma`|text/plain|a comma separated list of values +`array`|application/json|a JSON array + +The CSV formatter respects the following API `&options=`: + +option|supported|description +:---:|:---:|:--- +`nonzero`|yes|to return only the dimensions that have at least a non-zero value +`flip`|yes|to return the numbers older to newer (the default is newer to older) +`percent`|yes|to replace all values with their percentage over the row total +`abs`|yes|to turn all values positive, before using them +`min2max`|yes|to return the delta from the minimum value to the maximum value (across dimensions) + +## Examples + +Get the average system CPU utilization of the last hour, in 6 values (one every 10 minutes): + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.cpu&format=ssv&after=-3600&points=6&group=average' +1.741352 1.6800467 1.769411 1.6761112 1.629862 1.6807968 +``` + +--- + +Get the total mysql bandwidth (in + out) for the last hour, in 6 values (one every 10 minutes): + +Netdata returns bandwidth in `kilobits`. + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=mysql_local.net&format=ssvcomma&after=-3600&points=6&group=sum&options=abs' +72618.7936215,72618.778889,72618.788084,72618.9195918,72618.7760612,72618.6712421 +``` + +--- + +Get the web server max connections for the last hour, in 12 values (one every 5 minutes) +in a JSON array: + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&format=array&after=-3600&points=12&group=max' +[278,258,268,239,259,260,243,266,278,318,264,258] +``` diff --git a/web/api/formatters/ssv/ssv.c b/web/api/formatters/ssv/ssv.c new file mode 100644 index 000000000..eeba0283d --- /dev/null +++ b/web/api/formatters/ssv/ssv.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "ssv.h" + +void rrdr2ssv(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, const char *prefix, const char *separator, const char *suffix) { + //info("RRD2SSV(): %s: BEGIN", r->st->id); + long i; + + buffer_strcat(wb, prefix); + long start = 0, end = rrdr_rows(r), step = 1; + if(!(options & RRDR_OPTION_REVERSED)) { + start = rrdr_rows(r) - 1; + end = -1; + step = -1; + } + + // for each line in the array + for(i = start; i != end ;i += step) { + int all_values_are_null = 0; + calculated_number v = rrdr2value(r, i, options, &all_values_are_null); + + if(likely(i != start)) { + if(r->min > v) r->min = v; + if(r->max < v) r->max = v; + } + else { + r->min = v; + r->max = v; + } + + if(likely(i != start)) + buffer_strcat(wb, separator); + + if(all_values_are_null) { + if(options & RRDR_OPTION_NULL2ZERO) + buffer_strcat(wb, "0"); + else + buffer_strcat(wb, "null"); + } + else + buffer_rrd_value(wb, v); + } + buffer_strcat(wb, suffix); + //info("RRD2SSV(): %s: END", r->st->id); +} diff --git a/web/api/formatters/ssv/ssv.h b/web/api/formatters/ssv/ssv.h new file mode 100644 index 000000000..6963dcf6e --- /dev/null +++ b/web/api/formatters/ssv/ssv.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_FORMATTER_SSV_H +#define NETDATA_API_FORMATTER_SSV_H + +#include "../rrd2json.h" + +extern void rrdr2ssv(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, const char *prefix, const char *separator, const char *suffix); + +#endif //NETDATA_API_FORMATTER_SSV_H diff --git a/web/api/formatters/value/Makefile.am b/web/api/formatters/value/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/web/api/formatters/value/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/formatters/value/Makefile.in b/web/api/formatters/value/Makefile.in new file mode 100644 index 000000000..eb0b2bddf --- /dev/null +++ b/web/api/formatters/value/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api/formatters/value +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/formatters/value/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/formatters/value/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/formatters/value/README.md b/web/api/formatters/value/README.md new file mode 100644 index 000000000..5024e74ee --- /dev/null +++ b/web/api/formatters/value/README.md @@ -0,0 +1,17 @@ +# Value formatter + +The Value formatter presents [results of database queries](../../queries) as a single value. + +To calculate the single value to be returned, it sums the values of all dimensions. + +The Value formatter respects the following API `&options=`: + +option|supported|description +:---:|:---:|:--- +`percent`|yes|to replace all values with their percentage over the row total +`abs`|yes|to turn all values positive, before using them +`min2max`|yes|to return the delta from the minimum value to the maximum value (across dimensions) + +The Value formatter is not exposed by the API by itself. +Instead it is used by the [`ssv`](../ssv) formatter +and [health monitoring queries](../../../../health). diff --git a/web/api/formatters/value/value.c b/web/api/formatters/value/value.c new file mode 100644 index 000000000..aea6c162b --- /dev/null +++ b/web/api/formatters/value/value.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "value.h" + + +inline calculated_number rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all_values_are_null) { + rrdset_check_rdlock(r->st); + + long c; + RRDDIM *d; + + calculated_number *cn = &r->v[ i * r->d ]; + RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ]; + + calculated_number sum = 0, min = 0, max = 0, v; + int all_null = 1, init = 1; + + calculated_number total = 1; + int set_min_max = 0; + if(unlikely(options & RRDR_OPTION_PERCENTAGE)) { + total = 0; + for(c = 0, d = r->st->dimensions; d && c < r->d ;c++, d = d->next) { + calculated_number n = cn[c]; + + if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) + n = -n; + + total += n; + } + // prevent a division by zero + if(total == 0) total = 1; + set_min_max = 1; + } + + // for each dimension + for(c = 0, d = r->st->dimensions; d && c < r->d ;c++, d = d->next) { + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue; + + calculated_number n = cn[c]; + + if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) + n = -n; + + if(unlikely(options & RRDR_OPTION_PERCENTAGE)) { + n = n * 100 / total; + + if(unlikely(set_min_max)) { + r->min = r->max = n; + set_min_max = 0; + } + + if(n < r->min) r->min = n; + if(n > r->max) r->max = n; + } + + if(unlikely(init)) { + if(n > 0) { + min = 0; + max = n; + } + else { + min = n; + max = 0; + } + init = 0; + } + + if(likely(!(co[c] & RRDR_VALUE_EMPTY))) { + all_null = 0; + sum += n; + } + + if(n < min) min = n; + if(n > max) max = n; + } + + if(unlikely(all_null)) { + if(likely(all_values_are_null)) + *all_values_are_null = 1; + return 0; + } + else { + if(likely(all_values_are_null)) + *all_values_are_null = 0; + } + + if(options & RRDR_OPTION_MIN2MAX) + v = max - min; + else + v = sum; + + return v; +} diff --git a/web/api/formatters/value/value.h b/web/api/formatters/value/value.h new file mode 100644 index 000000000..d9e981f88 --- /dev/null +++ b/web/api/formatters/value/value.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_FORMATTER_VALUE_H +#define NETDATA_API_FORMATTER_VALUE_H + +#include "../rrd2json.h" + +extern calculated_number rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all_values_are_null); + +#endif //NETDATA_API_FORMATTER_VALUE_H diff --git a/web/api/netdata-swagger.json b/web/api/netdata-swagger.json new file mode 100644 index 000000000..8ee1a8a72 --- /dev/null +++ b/web/api/netdata-swagger.json @@ -0,0 +1,777 @@ +{ + "swagger": "2.0", + "info": { + "title": "NetData API", + "description": "Real-time performance and health monitoring.", + "version": "1.9.11_rolling" + }, + "host": "registry.my-netdata.io", + "schemes": [ + "https", + "http" + ], + "basePath": "/api/v1", + "produces": [ + "application/json" + ], + "paths": { + "/charts": { + "get": { + "summary": "Get a list of all charts available at the server", + "description": "The charts endpoint returns a summary about all charts stored in the netdata server.", + "responses": { + "200": { + "description": "An array of charts", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/chart_summary" + } + } + } + } + } + }, + "/chart": { + "get": { + "summary": "Get info about a specific chart", + "description": "The Chart endpoint returns detailed information about a chart.", + "parameters": [ + { + "name": "chart", + "in": "query", + "description": "The id of the chart as returned by the /charts call.", + "required": true, + "type": "string", + "format": "as returned by /charts", + "default": "system.cpu" + } + ], + "responses": { + "200": { + "description": "A javascript object with detailed information about the chart.", + "schema": { + "$ref": "#/definitions/chart" + } + }, + "404": { + "description": "No chart with the given id is found." + } + } + } + }, + "/data": { + "get": { + "summary": "Get collected data for a specific chart", + "description": "The Data endpoint returns data stored in the round robin database of a chart.\n", + "parameters": [ + { + "name": "chart", + "in": "query", + "description": "The id of the chart as returned by the /charts call.", + "required": true, + "type": "string", + "format": "as returned by /charts", + "allowEmptyValue": false, + "default": "system.cpu" + }, + { + "name": "dimension", + "in": "query", + "description": "zero, one or more dimension ids or names, as returned by the /chart call, separated with comma or pipe. Netdata simple patterns are supported.", + "required": false, + "type": "array", + "items": { + "type": "string", + "collectionFormat": "pipes", + "format": "as returned by /charts" + }, + "allowEmptyValue": false + }, + { + "name": "after", + "in": "query", + "description": "This parameter can either be an absolute timestamp specifying the starting point of the data to be returned, or a relative number of seconds (negative, relative to parameter: before). Netdata will assume it is a relative number if it is less that 3 years (in seconds). Netdata will adapt this parameter to the boundaries of the round robin database. The default is the beginning of the round robin database (i.e. by default netdata will attempt to return data for the entire database).", + "required": true, + "type": "number", + "format": "integer", + "allowEmptyValue": false, + "default": -600 + }, + { + "name": "before", + "in": "query", + "description": "This parameter can either be an absolute timestamp specifying the ending point of the data to be returned, or a relative number of seconds (negative), relative to the last collected timestamp. Netdata will assume it is a relative number if it is less than 3 years (in seconds). Netdata will adapt this parameter to the boundaries of the round robin database. The default is zero (i.e. the timestamp of the last value collected).", + "required": false, + "type": "number", + "format": "integer", + "default": 0 + }, + { + "name": "points", + "in": "query", + "description": "The number of points to be returned. If not given, or it is <= 0, or it is bigger than the points stored in the round robin database for this chart for the given duration, all the available collected values for the given duration will be returned.", + "required": true, + "type": "number", + "format": "integer", + "allowEmptyValue": false, + "default": 20 + }, + { + "name": "group", + "in": "query", + "description": "The grouping method. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. methods supported \"min\", \"max\", \"average\", \"sum\", \"incremental-sum\". \"max\" is actually calculated on the absolute value collected (so it works for both positive and negative dimesions to return the most extreme value in either direction).", + "required": true, + "type": "string", + "enum": [ + "min", + "max", + "average", + "median", + "stddev", + "sum", + "incremental-sum" + ], + "default": "average", + "allowEmptyValue": false + }, + { + "name": "gtime", + "in": "query", + "description": "The grouping number of seconds. This is used in conjunction with group=average to change the units of metrics (ie when the data is per-second, setting gtime=60 will turn them to per-minute).", + "required": false, + "type": "number", + "format": "integer", + "allowEmptyValue": false, + "default": 0 + }, + { + "name": "format", + "in": "query", + "description": "The format of the data to be returned.", + "required": true, + "type": "string", + "enum": [ + "json", + "jsonp", + "csv", + "tsv", + "tsv-excel", + "ssv", + "ssvcomma", + "datatable", + "datasource", + "html", + "markdown", + "array", + "csvjsonarray" + ], + "default": "json", + "allowEmptyValue": false + }, + { + "name": "options", + "in": "query", + "description": "Options that affect data generation.", + "required": false, + "type": "array", + "items": { + "type": "string", + "enum": [ + "nonzero", + "flip", + "jsonwrap", + "min2max", + "seconds", + "milliseconds", + "abs", + "absolute", + "absolute-sum", + "null2zero", + "objectrows", + "google_json", + "percentage", + "unaligned", + "match-ids", + "match-names" + ], + "collectionFormat": "pipes" + }, + "default": [ + "seconds", + "jsonwrap" + ], + "allowEmptyValue": false + }, + { + "name": "callback", + "in": "query", + "description": "For JSONP responses, the callback function name.", + "required": false, + "type": "string", + "allowEmptyValue": true + }, + { + "name": "filename", + "in": "query", + "description": "Add Content-Disposition: attachment; filename=<filename> header to the response, that will instruct the browser to save the response with the given filename.", + "required": false, + "type": "string", + "allowEmptyValue": true + }, + { + "name": "tqx", + "in": "query", + "description": "[Google Visualization API](https://developers.google.com/chart/interactive/docs/dev/implementing_data_source?hl=en) formatted parameter.", + "required": false, + "type": "string", + "allowEmptyValue": true + } + ], + "responses": { + "200": { + "description": "The call was successful. The response should include the data.", + "schema": { + "$ref": "#/definitions/chart" + } + }, + "400": { + "description": "Bad request - the body will include a message stating what is wrong." + }, + "404": { + "description": "No chart with the given id is found." + }, + "500": { + "description": "Internal server error. This usually means the server is out of memory." + } + } + } + }, + "/badge.svg": { + "get": { + "summary": "Generate a SVG image for a chart (or dimension)", + "description": "Successful responses are SVG images\n", + "parameters": [ + { + "name": "chart", + "in": "query", + "description": "The id of the chart as returned by the /charts call.", + "required": true, + "type": "string", + "format": "as returned by /charts", + "allowEmptyValue": false, + "default": "system.cpu" + }, + { + "name": "alarm", + "in": "query", + "description": "the name of an alarm linked to the chart", + "required": false, + "type": "string", + "format": "any text", + "allowEmptyValue": true + }, + { + "name": "dimension", + "in": "query", + "description": "zero, one or more dimension ids, as returned by the /chart call.", + "required": false, + "type": "array", + "items": { + "type": "string", + "collectionFormat": "pipes", + "format": "as returned by /charts" + }, + "allowEmptyValue": false + }, + { + "name": "after", + "in": "query", + "description": "This parameter can either be an absolute timestamp specifying the starting point of the data to be returned, or a relative number of seconds, to the last collected timestamp. Netdata will assume it is a relative number if it is smaller than the duration of the round robin database for this chart. So, if the round robin database is 3600 seconds, any value from -3600 to 3600 will trigger relative arithmetics. Netdata will adapt this parameter to the boundaries of the round robin database.", + "required": true, + "type": "number", + "format": "integer", + "allowEmptyValue": false, + "default": -600 + }, + { + "name": "before", + "in": "query", + "description": "This parameter can either be an absolute timestamp specifying the ending point of the data to be returned, or a relative number of seconds, to the last collected timestamp. Netdata will assume it is a relative number if it is smaller than the duration of the round robin database for this chart. So, if the round robin database is 3600 seconds, any value from -3600 to 3600 will trigger relative arithmetics. Netdata will adapt this parameter to the boundaries of the round robin database.", + "required": false, + "type": "number", + "format": "integer", + "default": 0 + }, + { + "name": "group", + "in": "query", + "description": "The grouping method. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. methods are supported \"min\", \"max\", \"average\", \"sum\", \"incremental-sum\". \"max\" is actually calculated on the absolute value collected (so it works for both positive and negative dimesions to return the most extreme value in either direction).", + "required": true, + "type": "string", + "enum": [ + "min", + "max", + "average", + "median", + "stddev", + "sum", + "incremental-sum" + ], + "default": "average", + "allowEmptyValue": false + }, + { + "name": "options", + "in": "query", + "description": "Options that affect data generation.", + "required": false, + "type": "array", + "items": { + "type": "string", + "enum": [ + "abs", + "absolute", + "display-absolute", + "absolute-sum", + "null2zero", + "percentage", + "unaligned" + ], + "collectionFormat": "pipes" + }, + "default": [ + "absolute" + ], + "allowEmptyValue": true + }, + { + "name": "label", + "in": "query", + "description": "a text to be used as the label", + "required": false, + "type": "string", + "format": "any text", + "allowEmptyValue": true + }, + { + "name": "units", + "in": "query", + "description": "a text to be used as the units", + "required": false, + "type": "string", + "format": "any text", + "allowEmptyValue": true + }, + { + "name": "label_color", + "in": "query", + "description": "a color to be used for the background of the label", + "required": false, + "type": "string", + "format": "any text", + "allowEmptyValue": true + }, + { + "name": "value_color", + "in": "query", + "description": "a color to be used for the background of the label. You can set multiple using a pipe with a condition each, like this: color<value|color>value|color:null The following operators are supported: >, <, >=, <=, =, :null (to check if no value exists).", + "required": false, + "type": "string", + "format": "any text", + "allowEmptyValue": true + }, + { + "name": "multiply", + "in": "query", + "description": "multiply the value with this number for rendering it at the image (integer value required)", + "required": false, + "type": "number", + "format": "integer", + "allowEmptyValue": true + }, + { + "name": "divide", + "in": "query", + "description": "divide the value with this number for rendering it at the image (integer value required)", + "required": false, + "type": "number", + "format": "integer", + "allowEmptyValue": true + }, + { + "name": "scale", + "in": "query", + "description": "set the scale of the badge (greater or equal to 100)", + "required": false, + "type": "number", + "format": "integer", + "allowEmptyValue": true + } + ], + "responses": { + "200": { + "description": "The call was successful. The response should be an SVG image." + }, + "400": { + "description": "Bad request - the body will include a message stating what is wrong." + }, + "404": { + "description": "No chart with the given id is found." + }, + "500": { + "description": "Internal server error. This usually means the server is out of memory." + } + } + } + }, + "/allmetrics": { + "get": { + "summary": "Get a value of all the metrics maintained by netdata", + "description": "The charts endpoint returns the latest value of all charts and dimensions stored in the netdata server.", + "parameters": [ + { + "name": "format", + "in": "query", + "description": "The format of the response to be returned", + "required": true, + "type": "string", + "enum": [ + "shell", + "prometheus", + "prometheus_all_hosts", + "json" + ], + "default": "shell" + }, + { + "name": "help", + "in": "query", + "description": "enable or disable HELP lines in prometheus output", + "required": false, + "type": "string", + "enum": [ + "yes", + "no" + ], + "default": "no" + }, + { + "name": "types", + "in": "query", + "description": "enable or disable TYPE lines in prometheus output", + "required": false, + "type": "string", + "enum": [ + "yes", + "no" + ], + "default": "no" + }, + { + "name": "timestamps", + "in": "query", + "description": "enable or disable timestamps in prometheus output", + "required": false, + "type": "string", + "enum": [ + "yes", + "no" + ], + "default": "yes" + }, + { + "name": "names", + "in": "query", + "description": "When enabled netdata will report dimension names. When disabled netdata will report dimension IDs. The default is controlled in netdata.conf.", + "required": false, + "type": "string", + "enum": [ + "yes", + "no" + ], + "default": "yes" + }, + { + "name": "server", + "in": "query", + "description": "Set a distinct name of the client querying prometheus metrics. Netdata will use the client IP if this is not set.", + "required": false, + "type": "string", + "format": "any text" + }, + { + "name": "prefix", + "in": "query", + "description": "Prefix all prometheus metrics with this string.", + "required": false, + "type": "string", + "format": "any text" + }, + { + "name": "data", + "in": "query", + "description": "Select the prometheus response data source. The default is controlled in netdata.conf", + "required": false, + "type": "string", + "enum": [ + "as-collected", + "average", + "sum" + ], + "default": "average" + } + ], + "responses": { + "200": { + "description": "All the metrics returned in the format requested" + }, + "400": { + "description": "The format requested is not supported" + } + } + } + } + }, + "definitions": { + "chart_summary": { + "type": "object", + "properties": { + "hostname": { + "type": "string", + "description": "The hostname of the netdata server." + }, + "version": { + "type": "string", + "description": "netdata version of the server." + }, + "os": { + "type": "string", + "description": "The netdata server host operating system.", + "enum": [ + "macos", + "linux", + "freebsd" + ] + }, + "history": { + "type": "number", + "description": "The duration, in seconds, of the round robin database maintained by netdata." + }, + "update_every": { + "type": "number", + "description": "The default update frequency of the netdata server. All charts have an update frequency equal or bigger than this." + }, + "charts": { + "type": "object", + "description": "An object containing all the chart objects available at the netdata server. This is used as an indexed array. The key of each chart object is the id of the chart.", + "properties": { + "key": { + "$ref": "#/definitions/chart" + } + } + }, + "charts_count": { + "type": "number", + "description": "The number of charts." + }, + "dimensions_count": { + "type": "number", + "description": "The total number of dimensions." + }, + "alarms_count": { + "type": "number", + "description": "The number of alarms." + }, + "rrd_memory_bytes": { + "type": "number", + "description": "The size of the round robin database in bytes." + } + } + }, + "chart": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The unique id of the chart" + }, + "name": { + "type": "string", + "description": "The name of the chart" + }, + "type": { + "type": "string", + "description": "The type of the chart. Types are not handled by netdata. You can use this field for anything you like." + }, + "family": { + "type": "string", + "description": "The family of the chart. Families are not handled by netdata. You can use this field for anything you like." + }, + "title": { + "type": "string", + "description": "The title of the chart." + }, + "priority": { + "type": "string", + "description": "The relative priority of the chart. NetData does not care about priorities. This is just an indication of importance for the chart viewers to sort charts of higher priority (lower number) closer to the top. Priority sorting should only be used among charts of the same type or family." + }, + "enabled": { + "type": "boolean", + "description": "True when the chart is enabled. Disabled charts do not currently collect values, but they may have historical values available." + }, + "units": { + "type": "string", + "description": "The unit of measurement for the values of all dimensions of the chart." + }, + "data_url": { + "type": "string", + "description": "The absolute path to get data values for this chart. You are expected to use this path as the base when constructing the URL to fetch data values for this chart." + }, + "chart_type": { + "type": "string", + "description": "The chart type.", + "enum": [ + "line", + "area", + "stacked" + ] + }, + "duration": { + "type": "number", + "description": "The duration, in seconds, of the round robin database maintained by netdata." + }, + "first_entry": { + "type": "number", + "description": "The UNIX timestamp of the first entry (the oldest) in the round robin database." + }, + "last_entry": { + "type": "number", + "description": "The UNIX timestamp of the latest entry in the round robin database." + }, + "update_every": { + "type": "number", + "description": "The update frequency of this chart, in seconds. One value every this amount of time is kept in the round robin database." + }, + "dimensions": { + "type": "object", + "description": "An object containing all the chart dimensions available for the chart. This is used as an indexed array. The key of the object the id of the dimension.", + "properties": { + "key": { + "$ref": "#/definitions/dimension" + } + } + }, + "green": { + "type": "number", + "description": "Chart health green threshold" + }, + "red": { + "type": "number", + "description": "Chart health red trheshold" + } + } + }, + "dimension": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The name of the dimension" + } + } + }, + "json_wrap": { + "type": "object", + "properties": { + "api": { + "type": "number", + "description": "The API version this conforms to, currently 1" + }, + "id": { + "type": "string", + "description": "The unique id of the chart" + }, + "name": { + "type": "string", + "description": "The name of the chart" + }, + "update_every": { + "type": "number", + "description": "The update frequency of this chart, in seconds. One value every this amount of time is kept in the round robin database (indepedently of the current view)." + }, + "view_update_every": { + "type": "number", + "description": "The current view appropriate update frequency of this chart, in seconds. There is no point to request chart refreshes, using the same settings, more frequently than this." + }, + "first_entry": { + "type": "number", + "description": "The UNIX timestamp of the first entry (the oldest) in the round robin database (indepedently of the current view)." + }, + "last_entry": { + "type": "number", + "description": "The UNIX timestamp of the latest entry in the round robin database (indepedently of the current view)." + }, + "after": { + "type": "number", + "description": "The UNIX timestamp of the first entry (the oldest) returned in this response." + }, + "before": { + "type": "number", + "description": "The UNIX timestamp of the latest entry returned in this response." + }, + "min": { + "type": "number", + "description": "The minimum value returned in the current view. This can be used to size the y-series of the chart." + }, + "max": { + "type": "number", + "description": "The maximum value returned in the current view. This can be used to size the y-series of the chart." + }, + "dimension_names": { + "description": "The dimension names of the chart as returned in the current view.", + "type": "array", + "items": { + "type": "string" + } + }, + "dimension_ids": { + "description": "The dimension IDs of the chart as returned in the current view.", + "type": "array", + "items": { + "type": "string" + } + }, + "latest_values": { + "description": "The latest values collected for the chart (indepedently of the current view).", + "type": "array", + "items": { + "type": "string" + } + }, + "view_latest_values": { + "description": "The latest values returned with this response.", + "type": "array", + "items": { + "type": "string" + } + }, + "dimensions": { + "type": "number", + "description": "The number of dimensions returned." + }, + "points": { + "type": "number", + "description": "The number of rows / points returned." + }, + "format": { + "type": "string", + "description": "The format of the result returned." + }, + "result": { + "description": "The result requested, in the format requested." + } + } + } + } +} \ No newline at end of file diff --git a/web/api/netdata-swagger.yaml b/web/api/netdata-swagger.yaml new file mode 100644 index 000000000..58f19198f --- /dev/null +++ b/web/api/netdata-swagger.yaml @@ -0,0 +1,512 @@ +# SPDX-License-Identifier: GPL-3.0-or-later +swagger: '2.0' +info: + title: NetData API + description: 'Real-time performance and health monitoring.' + version: 1.11.0_rolling +host: registry.my-netdata.io +schemes: + - https + - http +basePath: /api/v1 +produces: + - application/json +paths: + /charts: + get: + summary: 'Get a list of all charts available at the server' + description: 'The charts endpoint returns a summary about all charts stored in the netdata server.' + responses: + '200': + description: 'An array of charts' + schema: + type: array + items: + $ref: '#/definitions/chart_summary' + /chart: + get: + summary: 'Get info about a specific chart' + description: 'The Chart endpoint returns detailed information about a chart.' + parameters: + - name: chart + in: query + description: 'The id of the chart as returned by the /charts call.' + required: true + type: string + format: 'as returned by /charts' + default: 'system.cpu' + responses: + '200': + description: 'A javascript object with detailed information about the chart.' + schema: + $ref: '#/definitions/chart' + '404': + description: 'No chart with the given id is found.' + /data: + get: + summary: 'Get collected data for a specific chart' + description: | + The Data endpoint returns data stored in the round robin database of a chart. + parameters: + - name: chart + in: query + description: 'The id of the chart as returned by the /charts call.' + required: true + type: string + format: 'as returned by /charts' + allowEmptyValue: false + default: system.cpu + - name: dimension + in: query + description: 'zero, one or more dimension ids or names, as returned by the /chart call, separated with comma or pipe. Netdata simple patterns are supported.' + required: false + type: array + items: + type: string + collectionFormat: pipes + format: 'as returned by /charts' + allowEmptyValue: false + - name: after + in: query + description: 'This parameter can either be an absolute timestamp specifying the starting point of the data to be returned, or a relative number of seconds (negative, relative to parameter: before). Netdata will assume it is a relative number if it is less that 3 years (in seconds). Netdata will adapt this parameter to the boundaries of the round robin database. The default is the beginning of the round robin database (i.e. by default netdata will attempt to return data for the entire database).' + required: true + type: number + format: integer + allowEmptyValue: false + default: -600 + - name: before + in: query + description: 'This parameter can either be an absolute timestamp specifying the ending point of the data to be returned, or a relative number of seconds (negative), relative to the last collected timestamp. Netdata will assume it is a relative number if it is less than 3 years (in seconds). Netdata will adapt this parameter to the boundaries of the round robin database. The default is zero (i.e. the timestamp of the last value collected).' + required: false + type: number + format: integer + default: 0 + - name: points + in: query + description: 'The number of points to be returned. If not given, or it is <= 0, or it is bigger than the points stored in the round robin database for this chart for the given duration, all the available collected values for the given duration will be returned.' + required: true + type: number + format: integer + allowEmptyValue: false + default: 20 + - name: group + in: query + description: 'The grouping method. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. methods supported "min", "max", "average", "sum", "incremental-sum". "max" is actually calculated on the absolute value collected (so it works for both positive and negative dimesions to return the most extreme value in either direction).' + required: true + type: string + enum: [ 'min', 'max', 'average', 'median', 'stddev', 'sum', 'incremental-sum' ] + default: 'average' + allowEmptyValue: false + - name: gtime + in: query + description: 'The grouping number of seconds. This is used in conjunction with group=average to change the units of metrics (ie when the data is per-second, setting gtime=60 will turn them to per-minute).' + required: false + type: number + format: integer + allowEmptyValue: false + default: 0 + - name: format + in: query + description: 'The format of the data to be returned.' + required: true + type: string + enum: [ 'json', 'jsonp', 'csv', 'tsv', 'tsv-excel', 'ssv', 'ssvcomma', 'datatable', 'datasource', 'html', 'markdown', 'array', 'csvjsonarray' ] + default: json + allowEmptyValue: false + - name: options + in: query + description: 'Options that affect data generation.' + required: false + type: array + items: + type: string + enum: [ 'nonzero', 'flip', 'jsonwrap', 'min2max', 'seconds', 'milliseconds', 'abs', 'absolute', 'absolute-sum', 'null2zero', 'objectrows', 'google_json', 'percentage', 'unaligned', 'match-ids', 'match-names' ] + collectionFormat: pipes + default: [seconds, jsonwrap] + allowEmptyValue: false + - name: callback + in: query + description: 'For JSONP responses, the callback function name.' + required: false + type: string + allowEmptyValue: true + - name: filename + in: query + description: 'Add Content-Disposition: attachment; filename=<filename> header to the response, that will instruct the browser to save the response with the given filename.' + required: false + type: string + allowEmptyValue: true + - name: tqx + in: query + description: '[Google Visualization API](https://developers.google.com/chart/interactive/docs/dev/implementing_data_source?hl=en) formatted parameter.' + required: false + type: string + allowEmptyValue: true + responses: + '200': + description: 'The call was successful. The response should include the data.' + schema: + $ref: '#/definitions/chart' + '400': + description: 'Bad request - the body will include a message stating what is wrong.' + '404': + description: 'No chart with the given id is found.' + '500': + description: 'Internal server error. This usually means the server is out of memory.' + /badge.svg: + get: + summary: 'Generate a SVG image for a chart (or dimension)' + description: | + Successful responses are SVG images + parameters: + - name: chart + in: query + description: 'The id of the chart as returned by the /charts call.' + required: true + type: string + format: 'as returned by /charts' + allowEmptyValue: false + default: system.cpu + - name: alarm + in: query + description: 'the name of an alarm linked to the chart' + required: false + type: string + format: 'any text' + allowEmptyValue: true + - name: dimension + in: query + description: 'zero, one or more dimension ids, as returned by the /chart call.' + required: false + type: array + items: + type: string + collectionFormat: pipes + format: 'as returned by /charts' + allowEmptyValue: false + - name: after + in: query + description: 'This parameter can either be an absolute timestamp specifying the starting point of the data to be returned, or a relative number of seconds, to the last collected timestamp. Netdata will assume it is a relative number if it is smaller than the duration of the round robin database for this chart. So, if the round robin database is 3600 seconds, any value from -3600 to 3600 will trigger relative arithmetics. Netdata will adapt this parameter to the boundaries of the round robin database.' + required: true + type: number + format: integer + allowEmptyValue: false + default: -600 + - name: before + in: query + description: 'This parameter can either be an absolute timestamp specifying the ending point of the data to be returned, or a relative number of seconds, to the last collected timestamp. Netdata will assume it is a relative number if it is smaller than the duration of the round robin database for this chart. So, if the round robin database is 3600 seconds, any value from -3600 to 3600 will trigger relative arithmetics. Netdata will adapt this parameter to the boundaries of the round robin database.' + required: false + type: number + format: integer + default: 0 + - name: group + in: query + description: 'The grouping method. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. methods are supported "min", "max", "average", "sum", "incremental-sum". "max" is actually calculated on the absolute value collected (so it works for both positive and negative dimesions to return the most extreme value in either direction).' + required: true + type: string + enum: [ 'min', 'max', 'average', 'median', 'stddev', 'sum', 'incremental-sum' ] + default: 'average' + allowEmptyValue: false + - name: options + in: query + description: 'Options that affect data generation.' + required: false + type: array + items: + type: string + enum: [ 'abs', 'absolute', 'display-absolute', 'absolute-sum', 'null2zero', 'percentage', 'unaligned' ] + collectionFormat: pipes + default: ['absolute'] + allowEmptyValue: true + - name: label + in: query + description: 'a text to be used as the label' + required: false + type: string + format: 'any text' + allowEmptyValue: true + - name: units + in: query + description: 'a text to be used as the units' + required: false + type: string + format: 'any text' + allowEmptyValue: true + - name: label_color + in: query + description: 'a color to be used for the background of the label' + required: false + type: string + format: 'any text' + allowEmptyValue: true + - name: value_color + in: query + description: 'a color to be used for the background of the label. You can set multiple using a pipe with a condition each, like this: color<value|color>value|color:null The following operators are supported: >, <, >=, <=, =, :null (to check if no value exists).' + required: false + type: string + format: 'any text' + allowEmptyValue: true + - name: multiply + in: query + description: 'multiply the value with this number for rendering it at the image (integer value required)' + required: false + type: number + format: integer + allowEmptyValue: true + - name: divide + in: query + description: 'divide the value with this number for rendering it at the image (integer value required)' + required: false + type: number + format: integer + allowEmptyValue: true + - name: scale + in: query + description: 'set the scale of the badge (greater or equal to 100)' + required: false + type: number + format: integer + allowEmptyValue: true + responses: + '200': + description: 'The call was successful. The response should be an SVG image.' + '400': + description: 'Bad request - the body will include a message stating what is wrong.' + '404': + description: 'No chart with the given id is found.' + '500': + description: 'Internal server error. This usually means the server is out of memory.' + /allmetrics: + get: + summary: 'Get a value of all the metrics maintained by netdata' + description: 'The charts endpoint returns the latest value of all charts and dimensions stored in the netdata server.' + parameters: + - name: format + in: query + description: 'The format of the response to be returned' + required: true + type: string + enum: [ 'shell', 'prometheus', 'prometheus_all_hosts', 'json' ] + default: 'shell' + - name: help + in: query + description: 'enable or disable HELP lines in prometheus output' + required: false + type: string + enum: [ 'yes', 'no' ] + default: 'no' + - name: types + in: query + description: 'enable or disable TYPE lines in prometheus output' + required: false + type: string + enum: [ 'yes', 'no' ] + default: 'no' + - name: timestamps + in: query + description: 'enable or disable timestamps in prometheus output' + required: false + type: string + enum: [ 'yes', 'no' ] + default: 'yes' + - name: names + in: query + description: 'When enabled netdata will report dimension names. When disabled netdata will report dimension IDs. The default is controlled in netdata.conf.' + required: false + type: string + enum: [ 'yes', 'no' ] + default: 'yes' + - name: server + in: query + description: 'Set a distinct name of the client querying prometheus metrics. Netdata will use the client IP if this is not set.' + required: false + type: string + format: 'any text' + - name: prefix + in: query + description: 'Prefix all prometheus metrics with this string.' + required: false + type: string + format: 'any text' + - name: data + in: query + description: 'Select the prometheus response data source. The default is controlled in netdata.conf' + required: false + type: string + enum: [ 'as-collected', 'average', 'sum' ] + default: 'average' + responses: + '200': + description: 'All the metrics returned in the format requested' + '400': + description: 'The format requested is not supported' +definitions: + chart_summary: + type: object + properties: + hostname: + type: string + description: 'The hostname of the netdata server.' + version: + type: string + description: 'netdata version of the server.' + os: + type: string + description: 'The netdata server host operating system.' + enum: [ 'macos', 'linux', 'freebsd' ] + history: + type: number + description: 'The duration, in seconds, of the round robin database maintained by netdata.' + update_every: + type: number + description: 'The default update frequency of the netdata server. All charts have an update frequency equal or bigger than this.' + charts: + type: object + description: 'An object containing all the chart objects available at the netdata server. This is used as an indexed array. The key of each chart object is the id of the chart.' + properties: + key: + $ref: '#/definitions/chart' + charts_count: + type: number + description: 'The number of charts.' + dimensions_count: + type: number + description: 'The total number of dimensions.' + alarms_count: + type: number + description: 'The number of alarms.' + rrd_memory_bytes: + type: number + description: 'The size of the round robin database in bytes.' + chart: + type: object + properties: + id: + type: string + description: 'The unique id of the chart' + name: + type: string + description: 'The name of the chart' + type: + type: string + description: 'The type of the chart. Types are not handled by netdata. You can use this field for anything you like.' + family: + type: string + description: 'The family of the chart. Families are not handled by netdata. You can use this field for anything you like.' + title: + type: string + description: 'The title of the chart.' + priority: + type: string + description: 'The relative priority of the chart. NetData does not care about priorities. This is just an indication of importance for the chart viewers to sort charts of higher priority (lower number) closer to the top. Priority sorting should only be used among charts of the same type or family.' + enabled: + type: boolean + description: 'True when the chart is enabled. Disabled charts do not currently collect values, but they may have historical values available.' + units: + type: string + description: 'The unit of measurement for the values of all dimensions of the chart.' + data_url: + type: string + description: 'The absolute path to get data values for this chart. You are expected to use this path as the base when constructing the URL to fetch data values for this chart.' + chart_type: + type: string + description: 'The chart type.' + enum: [ 'line', 'area', 'stacked' ] + duration: + type: number + description: 'The duration, in seconds, of the round robin database maintained by netdata.' + first_entry: + type: number + description: 'The UNIX timestamp of the first entry (the oldest) in the round robin database.' + last_entry: + type: number + description: 'The UNIX timestamp of the latest entry in the round robin database.' + update_every: + type: number + description: 'The update frequency of this chart, in seconds. One value every this amount of time is kept in the round robin database.' + dimensions: + type: object + description: 'An object containing all the chart dimensions available for the chart. This is used as an indexed array. The key of the object the id of the dimension.' + properties: + key: + $ref: '#/definitions/dimension' + green: + type: number + description: 'Chart health green threshold' + red: + type: number + description: 'Chart health red trheshold' + dimension: + type: object + properties: + name: + type: string + description: 'The name of the dimension' + + json_wrap: + type: object + properties: + api: + type: number + description: 'The API version this conforms to, currently 1' + id: + type: string + description: 'The unique id of the chart' + name: + type: string + description: 'The name of the chart' + update_every: + type: number + description: 'The update frequency of this chart, in seconds. One value every this amount of time is kept in the round robin database (indepedently of the current view).' + view_update_every: + type: number + description: 'The current view appropriate update frequency of this chart, in seconds. There is no point to request chart refreshes, using the same settings, more frequently than this.' + first_entry: + type: number + description: 'The UNIX timestamp of the first entry (the oldest) in the round robin database (indepedently of the current view).' + last_entry: + type: number + description: 'The UNIX timestamp of the latest entry in the round robin database (indepedently of the current view).' + after: + type: number + description: 'The UNIX timestamp of the first entry (the oldest) returned in this response.' + before: + type: number + description: 'The UNIX timestamp of the latest entry returned in this response.' + min: + type: number + description: 'The minimum value returned in the current view. This can be used to size the y-series of the chart.' + max: + type: number + description: 'The maximum value returned in the current view. This can be used to size the y-series of the chart.' + dimension_names: + description: 'The dimension names of the chart as returned in the current view.' + type: array + items: + type: string + dimension_ids: + description: 'The dimension IDs of the chart as returned in the current view.' + type: array + items: + type: string + latest_values: + description: 'The latest values collected for the chart (indepedently of the current view).' + type: array + items: + type: string + view_latest_values: + description: 'The latest values returned with this response.' + type: array + items: + type: string + dimensions: + type: number + description: 'The number of dimensions returned.' + points: + type: number + description: 'The number of rows / points returned.' + format: + type: string + description: 'The format of the result returned.' + result: + description: 'The result requested, in the format requested.' diff --git a/web/api/queries/Makefile.am b/web/api/queries/Makefile.am new file mode 100644 index 000000000..008dbfeb8 --- /dev/null +++ b/web/api/queries/Makefile.am @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +SUBDIRS = \ + average \ + des \ + incremental_sum \ + max \ + min \ + sum \ + median \ + ses \ + stddev \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/queries/Makefile.in b/web/api/queries/Makefile.in new file mode 100644 index 000000000..295870d2c --- /dev/null +++ b/web/api/queries/Makefile.in @@ -0,0 +1,656 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api/queries +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + distdir +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = $(SUBDIRS) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +SUBDIRS = \ + average \ + des \ + incremental_sum \ + max \ + min \ + sum \ + median \ + ses \ + stddev \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-recursive + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/queries/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-recursive +all-am: Makefile $(DATA) +installdirs: installdirs-recursive +installdirs-am: +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-recursive + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-recursive + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: + +.MAKE: $(am__recursive_targets) install-am install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ + check-am clean clean-generic cscopelist-am ctags ctags-am \ + distclean distclean-generic distclean-tags distdir dvi dvi-am \ + html html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs installdirs-am maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/queries/README.md b/web/api/queries/README.md new file mode 100644 index 000000000..7a517d26e --- /dev/null +++ b/web/api/queries/README.md @@ -0,0 +1,128 @@ +# Database Queries + +Netdata database can be queried with `/api/v1/data` and `/api/v1/badge.svg` REST API methods. + +Every data query accepts the following parameters: + +name|required|description +:----:|:----:|:--- +`chart`|yes|The chart to be queried. +`points`|no|The number of points to be returned. Netdata can reduce number of points by applying query grouping methods. If not given, the result will have the same granularity as the database (although this relates to `gtime`). +`before`|no|The absolute timestamp or the relative (to now) time the query should finish evaluating data. If not given, it defaults to the timestamp of the latest point in the database. +`after`|no|The absolute timestamp or the relative (to `before`) time the query should start evaluating data. if not given, it defaults to the timestamp of the oldest point in the database. +`group`|no|The grouping method to use when reducing the points the database has. If not given, it defaults to `average`. +`gtime`|no|A resampling period to change the units of the metrics (i.e. setting this to `60` will convert `per second` metrics to `per minute`. If not given it defaults to granularity of the database. +`options`|no|A bitmap of options that can affect the operation of the query. Only 2 options are used by the query engine: `unaligned` and `percentage`. All the other options are used by the output formatters. The default is to return aligned data. +`dimensions`|no|A simple pattern to filter the dimensions to be queried. The default is to return all the dimensions of the chart. + +## Operation + +The query engine works as follows (in this order): + +#### Time-frame + +`after` and `before` define a time-frame, accepting: + +- **absolute timestamps** (unix timestamps, i.e. seconds since epoch). + +- **relative timestamps**: + + `before` is relative to now and `after` is relative to `before`. + + Example: `before=-60&after=-60` evaluates to the time-frame from -120 up to -60 seconds in + the past, relative to the latest entry of the database of the chart. + +The engine verifies that the time-frame requested is available at the database: + +- If the requested time-frame overlaps with the database, the excess requested + will be truncated. + +- If the requested time-frame does not overlap with the database, the engine will + return an empty data set. + +At the end of this operation, `after` and `before` are absolute timestamps. + +#### Data grouping + +Database points grouping is applied when the caller requests a time-frame to be +expressed with fewer points, compared to what is available at the database. + +There are 2 uses that enable this feature: + +- The caller requests a specific number of `points` to be returned. + + For example, for a time-frame of 10 minutes, the database has 600 points (1/sec), + while the caller requested these 10 minutes to be expressed in 200 points. + + This feature is used by netdata dashboards when you zoom-out the charts. + The dashboard is requesting the number of points the user's screen has. + This saves bandwidth and speeds up the browser (fewer points to evaluate for drawing the charts). + +- The caller requests a **re-sampling** of the database, by setting `gtime` to any value + above the granularity of the chart. + + For example, the chart's units is `requests/sec` and caller wants `requests/min`. + +Using `points` and `gtime` the query engine tries to find a best fit for **database-points** +vs **result-points** (we call this ratio `group points`). It always tries to keep `group points` +an integer. Keep in mind the query engine may shift `after` if required. + +#### Time-frame Alignment + +Alignment is a very important aspect of netdata queries. Without it, the animated +charts on the dashboards would constantly change shape during incremental updates. + +To provide consistent grouping through time, the query engine (by default) aligns +`after` and `before` to be a multiple of `group points`. + +For example, if `group points` is 60 and alignment is enabled, the engine will return +each point with durations XX:XX:00 - XX:XX:59, matching whole minutes. + +To disable alignment, pass `&options=unaligned` to the query. + +#### Query Execution + +To execute the query, the engine evaluates all dimensions of the chart, one after another. + +The engine does not evaluate dimensions that do not match the [simple pattern](../../../libnetdata/simple_pattern) +given at the `dimensions` parameter, except when `options=percentage` is given (this option +requires all the dimensions to be evaluated to find the percentage of each dimension vs to chart +total). + +For each dimension, it starts evaluating values starting at `after` (not inclusive) towards +`before` (inclusive). + +For each value it calls the **grouping method** given with the `&group=` query parameter +(the default is `average`). + +## Grouping methods + +The following grouping methods are supported. These are given all the values in the time-frame +and they group the values every `group points`. + +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min&value_color=blue) finds the minimum value +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max&value_color=lightblue) finds the maximum value +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=yellow) finds the average value +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=sum&after=-60&label=sum&units=requests&value_color=orange) adds all the values and returns the sum +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=median&after=-60&label=median&value_color=red) sorts the values and returns the value in the middle of the list +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=stddev&after=-60&label=stddev&value_color=green) finds the standard deviation of the values +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=cv&after=-60&label=cv&units=pcent&value_color=yellow) finds the relative standard deviation (coefficient of variation) of the values +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=ses&after=-60&label=ses&value_color=brown) finds the exponential weighted moving average of the values +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=des&after=-60&label=des&value_color=blue) applies Holt-Winters double exponential smoothing +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=incremental_sum&after=-60&label=incremental_sum&value_color=red) finds the difference of the last vs the first value + +The examples shown above, are live information from the `successful` web requests of the global netdata registry. + +## Further processing + +The result of the query engine is always a structure that has dimensions and values +for each dimension. + +Formatting modules are then used to convert this result in many different formats and return it +to the caller. + +## Performance + +The query engine is highly optimized for speed. Most of its modules implement "online" +versions of the algorithms, requiring just one pass on the database values to produce +the result. diff --git a/web/api/queries/average/Makefile.am b/web/api/queries/average/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/web/api/queries/average/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/queries/average/Makefile.in b/web/api/queries/average/Makefile.in new file mode 100644 index 000000000..a5db03648 --- /dev/null +++ b/web/api/queries/average/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api/queries/average +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/average/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/queries/average/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/queries/average/README.md b/web/api/queries/average/README.md new file mode 100644 index 000000000..3be434f68 --- /dev/null +++ b/web/api/queries/average/README.md @@ -0,0 +1,39 @@ +# Average or Mean + +> This query is available as `average` and `mean`. + +An average is a single number taken as representative of a list of numbers. + +It is calculated as: + +``` +average = sum(numbers) / count(numbers) +``` + +## how to use + +Use it in alarms like this: + +``` + alarm: my_alarm + on: my_chart +lookup: average -1m unaligned of my_dimension + warn: $this > 1000 +``` + +`average` does not change the units. For example, if the chart units is `requests/sec`, the result +will be again expressed in the same units. + +It can also be used in APIs and badges as `&group=average` in the URL. + +## Examples + +Examining last 1 minute `successful` web server responses: + +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=orange) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max) + +## References + +- [https://en.wikipedia.org/wiki/Average](https://en.wikipedia.org/wiki/Average). diff --git a/web/api/queries/average/average.c b/web/api/queries/average/average.c new file mode 100644 index 000000000..c871b8778 --- /dev/null +++ b/web/api/queries/average/average.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "average.h" + +// ---------------------------------------------------------------------------- +// average + +struct grouping_average { + calculated_number sum; + size_t count; +}; + +void *grouping_create_average(RRDR *r) { + (void)r; + return callocz(1, sizeof(struct grouping_average)); +} + +// resets when switches dimensions +// so, clear everything to restart +void grouping_reset_average(RRDR *r) { + struct grouping_average *g = (struct grouping_average *)r->internal.grouping_data; + g->sum = 0; + g->count = 0; +} + +void grouping_free_average(RRDR *r) { + freez(r->internal.grouping_data); + r->internal.grouping_data = NULL; +} + +void grouping_add_average(RRDR *r, calculated_number value) { + if(!isnan(value)) { + struct grouping_average *g = (struct grouping_average *)r->internal.grouping_data; + g->sum += value; + g->count++; + } +} + +calculated_number grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) { + struct grouping_average *g = (struct grouping_average *)r->internal.grouping_data; + + calculated_number value; + + if(unlikely(!g->count)) { + value = 0.0; + *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY; + } + else { + if(unlikely(r->internal.resampling_group != 1)) + value = g->sum / r->internal.resampling_divisor; + else + value = g->sum / g->count; + } + + g->sum = 0.0; + g->count = 0; + + return value; +} diff --git a/web/api/queries/average/average.h b/web/api/queries/average/average.h new file mode 100644 index 000000000..9fb7de21a --- /dev/null +++ b/web/api/queries/average/average.h @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_QUERY_AVERAGE_H +#define NETDATA_API_QUERY_AVERAGE_H + +#include "../query.h" +#include "../rrdr.h" + +extern void *grouping_create_average(RRDR *r); +extern void grouping_reset_average(RRDR *r); +extern void grouping_free_average(RRDR *r); +extern void grouping_add_average(RRDR *r, calculated_number value); +extern calculated_number grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr); + +#endif //NETDATA_API_QUERY_AVERAGE_H diff --git a/web/api/queries/des/Makefile.am b/web/api/queries/des/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/web/api/queries/des/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/queries/des/Makefile.in b/web/api/queries/des/Makefile.in new file mode 100644 index 000000000..d3f05587b --- /dev/null +++ b/web/api/queries/des/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api/queries/des +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/des/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/queries/des/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/queries/des/README.md b/web/api/queries/des/README.md new file mode 100644 index 000000000..fa546a338 --- /dev/null +++ b/web/api/queries/des/README.md @@ -0,0 +1,66 @@ +# double exponential smoothing + +Exponential smoothing is one of many window functions commonly applied to smooth data in signal +processing, acting as low-pass filters to remove high frequency noise. + +Simple exponential smoothing does not do well when there is a trend in the data. +In such situations, several methods were devised under the name "double exponential smoothing" +or "second-order exponential smoothing.", which is the recursive application of an exponential +filter twice, thus being termed "double exponential smoothing". + +In simple terms, this is like an average value, but more recent values are given more weight +and the trend of the values influences significantly the result. + +> **IMPORTANT** +> +> It is common for `des` to provide "average" values that far beyond the minimum or the maximum +> values found in the time-series. +> `des` estimates these values because of it takes into account the trend. + +This module implements the "Holt-Winters double exponential smoothing". + +Netdata automatically adjusts the weight (`alpha`) and the trend (`beta`) based on the number +of values processed, using the formula: + +``` +window = max(number of values, 15) +alpha = 2 / (window + 1) +beta = 2 / (window + 1) +``` + +You can change the fixed value `15` by setting in `netdata.conf`: + +``` +[web] + des max window = 15 +``` + +## how to use + +Use it in alarms like this: + +``` + alarm: my_alarm + on: my_chart +lookup: des -1m unaligned of my_dimension + warn: $this > 1000 +``` + +`des` does not change the units. For example, if the chart units is `requests/sec`, the result +will be again expressed in the same units. + +It can also be used in APIs and badges as `&group=des` in the URL. + +## Examples + +Examining last 1 minute `successful` web server responses: + +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=yellow) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=ses&after=-60&label=single+exponential+smoothing&value_color=yellow) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=des&after=-60&label=double+exponential+smoothing&value_color=orange) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max) + +## References + +- [https://en.wikipedia.org/wiki/Exponential_smoothing](https://en.wikipedia.org/wiki/Exponential_smoothing). diff --git a/web/api/queries/des/des.c b/web/api/queries/des/des.c new file mode 100644 index 000000000..93d724723 --- /dev/null +++ b/web/api/queries/des/des.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include <web/api/queries/rrdr.h> +#include "des.h" + + +// ---------------------------------------------------------------------------- +// single exponential smoothing + +struct grouping_des { + calculated_number alpha; + calculated_number alpha_other; + calculated_number beta; + calculated_number beta_other; + + calculated_number level; + calculated_number trend; + + size_t count; +}; + +static size_t max_window_size = 15; + +void grouping_init_des(void) { + long long ret = config_get_number(CONFIG_SECTION_WEB, "des max window", (long long)max_window_size); + if(ret <= 1) { + config_set_number(CONFIG_SECTION_WEB, "des max window", (long long)max_window_size); + } + else { + max_window_size = (size_t) ret; + } +} + +static inline calculated_number window(RRDR *r, struct grouping_des *g) { + (void)g; + + calculated_number points; + if(r->group == 1) { + // provide a running DES + points = r->internal.points_wanted; + } + else { + // provide a SES with flush points + points = r->group; + } + + // https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average + // A commonly used value for alpha is 2 / (N + 1) + return (points > max_window_size) ? max_window_size : points; +} + +static inline void set_alpha(RRDR *r, struct grouping_des *g) { + // https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average + // A commonly used value for alpha is 2 / (N + 1) + + g->alpha = 2.0 / (window(r, g) + 1.0); + g->alpha_other = 1.0 - g->alpha; + + //info("alpha for chart '%s' is " CALCULATED_NUMBER_FORMAT, r->st->name, g->alpha); +} + +static inline void set_beta(RRDR *r, struct grouping_des *g) { + // https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average + // A commonly used value for alpha is 2 / (N + 1) + + g->beta = 2.0 / (window(r, g) + 1.0); + g->beta_other = 1.0 - g->beta; + + //info("beta for chart '%s' is " CALCULATED_NUMBER_FORMAT, r->st->name, g->beta); +} + +void *grouping_create_des(RRDR *r) { + struct grouping_des *g = (struct grouping_des *)malloc(sizeof(struct grouping_des)); + set_alpha(r, g); + set_beta(r, g); + g->level = 0.0; + g->trend = 0.0; + g->count = 0; + return g; +} + +// resets when switches dimensions +// so, clear everything to restart +void grouping_reset_des(RRDR *r) { + struct grouping_des *g = (struct grouping_des *)r->internal.grouping_data; + g->level = 0.0; + g->trend = 0.0; + g->count = 0; + + // fprintf(stderr, "\nDES: "); + +} + +void grouping_free_des(RRDR *r) { + freez(r->internal.grouping_data); + r->internal.grouping_data = NULL; +} + +void grouping_add_des(RRDR *r, calculated_number value) { + struct grouping_des *g = (struct grouping_des *)r->internal.grouping_data; + + if(isnormal(value)) { + if(likely(g->count > 0)) { + // we have at least a number so far + + if(unlikely(g->count == 1)) { + // the second value we got + g->trend = value - g->trend; + g->level = value; + } + + // for the values, except the first + calculated_number last_level = g->level; + g->level = (g->alpha * value) + (g->alpha_other * (g->level + g->trend)); + g->trend = (g->beta * (g->level - last_level)) + (g->beta_other * g->trend); + } + else { + // the first value we got + g->level = g->trend = value; + } + + g->count++; + } + + //fprintf(stderr, "value: " CALCULATED_NUMBER_FORMAT ", level: " CALCULATED_NUMBER_FORMAT ", trend: " CALCULATED_NUMBER_FORMAT "\n", value, g->level, g->trend); +} + +calculated_number grouping_flush_des(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) { + struct grouping_des *g = (struct grouping_des *)r->internal.grouping_data; + + if(unlikely(!g->count || !isnormal(g->level))) { + *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY; + return 0.0; + } + + //fprintf(stderr, " RESULT for %zu values = " CALCULATED_NUMBER_FORMAT " \n", g->count, g->level); + + return g->level; +} diff --git a/web/api/queries/des/des.h b/web/api/queries/des/des.h new file mode 100644 index 000000000..360513e9c --- /dev/null +++ b/web/api/queries/des/des.h @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_QUERIES_DES_H +#define NETDATA_API_QUERIES_DES_H + +#include "../query.h" +#include "../rrdr.h" + +extern void grouping_init_des(void); + +extern void *grouping_create_des(RRDR *r); +extern void grouping_reset_des(RRDR *r); +extern void grouping_free_des(RRDR *r); +extern void grouping_add_des(RRDR *r, calculated_number value); +extern calculated_number grouping_flush_des(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr); + +#endif //NETDATA_API_QUERIES_DES_H diff --git a/web/api/queries/incremental_sum/Makefile.am b/web/api/queries/incremental_sum/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/web/api/queries/incremental_sum/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/queries/incremental_sum/Makefile.in b/web/api/queries/incremental_sum/Makefile.in new file mode 100644 index 000000000..1863466fe --- /dev/null +++ b/web/api/queries/incremental_sum/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api/queries/incremental_sum +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/incremental_sum/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/queries/incremental_sum/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/queries/incremental_sum/README.md b/web/api/queries/incremental_sum/README.md new file mode 100644 index 000000000..ef86d4d41 --- /dev/null +++ b/web/api/queries/incremental_sum/README.md @@ -0,0 +1,34 @@ +# Incremental Sum (`incremental_sum`) + +This modules finds the incremental sum of a period, which `last value - first value`. + +The result may be positive (rising) or negative (falling) depending on the first and last values. + +## how to use + +Use it in alarms like this: + +``` + alarm: my_alarm + on: my_chart +lookup: incremental_sum -1m unaligned of my_dimension + warn: $this > 1000 +``` + +`incremental_sum` does not change the units. For example, if the chart units is `requests/sec`, the result +will be again expressed in the same units. + +It can also be used in APIs and badges as `&group=incremental_sum` in the URL. + +## Examples + +Examining last 1 minute `successful` web server responses: + +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=incremental_sum&after=-60&label=incremental+sum&value_color=orange) + +## References + +- none diff --git a/web/api/queries/incremental_sum/incremental_sum.c b/web/api/queries/incremental_sum/incremental_sum.c new file mode 100644 index 000000000..131d85d78 --- /dev/null +++ b/web/api/queries/incremental_sum/incremental_sum.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "incremental_sum.h" + +// ---------------------------------------------------------------------------- +// incremental sum + +struct grouping_incremental_sum { + calculated_number first; + calculated_number last; + size_t count; +}; + +void *grouping_create_incremental_sum(RRDR *r) { + (void)r; + return callocz(1, sizeof(struct grouping_incremental_sum)); +} + +// resets when switches dimensions +// so, clear everything to restart +void grouping_reset_incremental_sum(RRDR *r) { + struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->internal.grouping_data; + g->first = 0; + g->last = 0; + g->count = 0; +} + +void grouping_free_incremental_sum(RRDR *r) { + freez(r->internal.grouping_data); + r->internal.grouping_data = NULL; +} + +void grouping_add_incremental_sum(RRDR *r, calculated_number value) { + if(!isnan(value)) { + struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->internal.grouping_data; + + if(unlikely(!g->count)) { + g->first = value; + g->count++; + } + else { + g->last = value; + g->count++; + } + } +} + +calculated_number grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) { + struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->internal.grouping_data; + + calculated_number value; + + if(unlikely(!g->count)) { + value = 0.0; + *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY; + } + else if(unlikely(g->count == 1)) { + value = 0.0; + } + else { + value = g->last - g->first; + } + + g->first = 0.0; + g->last = 0.0; + g->count = 0; + + return value; +} diff --git a/web/api/queries/incremental_sum/incremental_sum.h b/web/api/queries/incremental_sum/incremental_sum.h new file mode 100644 index 000000000..990a2ac4a --- /dev/null +++ b/web/api/queries/incremental_sum/incremental_sum.h @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_QUERY_INCREMENTAL_SUM_H +#define NETDATA_API_QUERY_INCREMENTAL_SUM_H + +#include "../query.h" +#include "../rrdr.h" + +extern void *grouping_create_incremental_sum(RRDR *r); +extern void grouping_reset_incremental_sum(RRDR *r); +extern void grouping_free_incremental_sum(RRDR *r); +extern void grouping_add_incremental_sum(RRDR *r, calculated_number value); +extern calculated_number grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr); + +#endif //NETDATA_API_QUERY_INCREMENTAL_SUM_H diff --git a/web/api/queries/max/Makefile.am b/web/api/queries/max/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/web/api/queries/max/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/queries/max/Makefile.in b/web/api/queries/max/Makefile.in new file mode 100644 index 000000000..f8475ad62 --- /dev/null +++ b/web/api/queries/max/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api/queries/max +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/max/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/queries/max/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/queries/max/README.md b/web/api/queries/max/README.md new file mode 100644 index 000000000..2ea14bf0b --- /dev/null +++ b/web/api/queries/max/README.md @@ -0,0 +1,31 @@ +# Max + +This module finds the max value in the time-frame given. + +## how to use + +Use it in alarms like this: + +``` + alarm: my_alarm + on: my_chart +lookup: max -1m unaligned of my_dimension + warn: $this > 1000 +``` + +`max` does not change the units. For example, if the chart units is `requests/sec`, the result +will be again expressed in the same units. + +It can also be used in APIs and badges as `&group=max` in the URL. + +## Examples + +Examining last 1 minute `successful` web server responses: + +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max&value_color=orange) + +## References + +- [https://en.wikipedia.org/wiki/Sample_maximum_and_minimum](https://en.wikipedia.org/wiki/Sample_maximum_and_minimum). diff --git a/web/api/queries/max/max.c b/web/api/queries/max/max.c new file mode 100644 index 000000000..a4be36add --- /dev/null +++ b/web/api/queries/max/max.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "max.h" + +// ---------------------------------------------------------------------------- +// max + +struct grouping_max { + calculated_number max; + size_t count; +}; + +void *grouping_create_max(RRDR *r) { + (void)r; + return callocz(1, sizeof(struct grouping_max)); +} + +// resets when switches dimensions +// so, clear everything to restart +void grouping_reset_max(RRDR *r) { + struct grouping_max *g = (struct grouping_max *)r->internal.grouping_data; + g->max = 0; + g->count = 0; +} + +void grouping_free_max(RRDR *r) { + freez(r->internal.grouping_data); + r->internal.grouping_data = NULL; +} + +void grouping_add_max(RRDR *r, calculated_number value) { + if(!isnan(value)) { + struct grouping_max *g = (struct grouping_max *)r->internal.grouping_data; + + if(!g->count || calculated_number_fabs(value) > calculated_number_fabs(g->max)) { + g->max = value; + g->count++; + } + } +} + +calculated_number grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) { + struct grouping_max *g = (struct grouping_max *)r->internal.grouping_data; + + calculated_number value; + + if(unlikely(!g->count)) { + value = 0.0; + *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY; + } + else { + value = g->max; + } + + g->max = 0.0; + g->count = 0; + + return value; +} + diff --git a/web/api/queries/max/max.h b/web/api/queries/max/max.h new file mode 100644 index 000000000..d839fe3f9 --- /dev/null +++ b/web/api/queries/max/max.h @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_QUERY_MAX_H +#define NETDATA_API_QUERY_MAX_H + +#include "../query.h" +#include "../rrdr.h" + +extern void *grouping_create_max(RRDR *r); +extern void grouping_reset_max(RRDR *r); +extern void grouping_free_max(RRDR *r); +extern void grouping_add_max(RRDR *r, calculated_number value); +extern calculated_number grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr); + +#endif //NETDATA_API_QUERY_MAX_H diff --git a/web/api/queries/median/Makefile.am b/web/api/queries/median/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/web/api/queries/median/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/queries/median/Makefile.in b/web/api/queries/median/Makefile.in new file mode 100644 index 000000000..f6d471bdf --- /dev/null +++ b/web/api/queries/median/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api/queries/median +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/median/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/queries/median/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/queries/median/README.md b/web/api/queries/median/README.md new file mode 100644 index 000000000..eb2dbaa96 --- /dev/null +++ b/web/api/queries/median/README.md @@ -0,0 +1,37 @@ +# Median + +The median is the value separating the higher half from the lower half of a data sample +(a population or a probability distribution). For a data set, it may be thought of as the +"middle" value. + +`median` is not an accurate average. However, it eliminates all spikes, by sorting +all the values in a period, and selecting the value in the middle of the sorted array. + +## how to use + +Use it in alarms like this: + +``` + alarm: my_alarm + on: my_chart +lookup: median -1m unaligned of my_dimension + warn: $this > 1000 +``` + +`median` does not change the units. For example, if the chart units is `requests/sec`, the result +will be again expressed in the same units. + +It can also be used in APIs and badges as `&group=median` in the URL. + +## Examples + +Examining last 1 minute `successful` web server responses: + +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=median&after=-60&label=median&value_color=orange) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max) + +## References + +- [https://en.wikipedia.org/wiki/Median](https://en.wikipedia.org/wiki/Median). diff --git a/web/api/queries/median/median.c b/web/api/queries/median/median.c new file mode 100644 index 000000000..5a13b2e4d --- /dev/null +++ b/web/api/queries/median/median.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "median.h" + + +// ---------------------------------------------------------------------------- +// median + +struct grouping_median { + size_t series_size; + size_t next_pos; + + LONG_DOUBLE series[]; +}; + +void *grouping_create_median(RRDR *r) { + long entries = r->group; + if(entries < 0) entries = 0; + + struct grouping_median *g = (struct grouping_median *)callocz(1, sizeof(struct grouping_median) + entries * sizeof(LONG_DOUBLE)); + g->series_size = (size_t)entries; + + return g; +} + +// resets when switches dimensions +// so, clear everything to restart +void grouping_reset_median(RRDR *r) { + struct grouping_median *g = (struct grouping_median *)r->internal.grouping_data; + g->next_pos = 0; +} + +void grouping_free_median(RRDR *r) { + freez(r->internal.grouping_data); + r->internal.grouping_data = NULL; +} + +void grouping_add_median(RRDR *r, calculated_number value) { + struct grouping_median *g = (struct grouping_median *)r->internal.grouping_data; + + if(unlikely(g->next_pos >= g->series_size)) { + error("INTERNAL ERROR: median buffer overflow on chart '%s' - next_pos = %zu, series_size = %zu, r->group = %ld.", r->st->name, g->next_pos, g->series_size, r->group); + } + else { + if(isnormal(value)) + g->series[g->next_pos++] = (LONG_DOUBLE)value; + } +} + +calculated_number grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) { + struct grouping_median *g = (struct grouping_median *)r->internal.grouping_data; + + calculated_number value; + + if(unlikely(!g->next_pos)) { + value = 0.0; + *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY; + } + else { + if(g->next_pos > 1) { + sort_series(g->series, g->next_pos); + value = (calculated_number)median_on_sorted_series(g->series, g->next_pos); + } + else + value = (calculated_number)g->series[0]; + + if(!isnormal(value)) { + value = 0.0; + *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY; + } + + //log_series_to_stderr(g->series, g->next_pos, value, "median"); + } + + g->next_pos = 0; + + return value; +} + diff --git a/web/api/queries/median/median.h b/web/api/queries/median/median.h new file mode 100644 index 000000000..dd2c1ffc5 --- /dev/null +++ b/web/api/queries/median/median.h @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_QUERIES_MEDIAN_H +#define NETDATA_API_QUERIES_MEDIAN_H + +#include "../query.h" +#include "../rrdr.h" + +extern void *grouping_create_median(RRDR *r); +extern void grouping_reset_median(RRDR *r); +extern void grouping_free_median(RRDR *r); +extern void grouping_add_median(RRDR *r, calculated_number value); +extern calculated_number grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr); + +#endif //NETDATA_API_QUERIES_MEDIAN_H diff --git a/web/api/queries/min/Makefile.am b/web/api/queries/min/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/web/api/queries/min/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/queries/min/Makefile.in b/web/api/queries/min/Makefile.in new file mode 100644 index 000000000..11fa615f6 --- /dev/null +++ b/web/api/queries/min/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api/queries/min +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/min/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/queries/min/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/queries/min/README.md b/web/api/queries/min/README.md new file mode 100644 index 000000000..3fe13106a --- /dev/null +++ b/web/api/queries/min/README.md @@ -0,0 +1,31 @@ +# Min + +This module finds the min value in the time-frame given. + +## how to use + +Use it in alarms like this: + +``` + alarm: my_alarm + on: my_chart +lookup: min -1m unaligned of my_dimension + warn: $this > 1000 +``` + +`min` does not change the units. For example, if the chart units is `requests/sec`, the result +will be again expressed in the same units. + +It can also be used in APIs and badges as `&group=min` in the URL. + +## Examples + +Examining last 1 minute `successful` web server responses: + +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min&value_color=orange) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max) + +## References + +- [https://en.wikipedia.org/wiki/Sample_maximum_and_minimum](https://en.wikipedia.org/wiki/Sample_maximum_and_minimum). diff --git a/web/api/queries/min/min.c b/web/api/queries/min/min.c new file mode 100644 index 000000000..9bd7460e0 --- /dev/null +++ b/web/api/queries/min/min.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "min.h" + +// ---------------------------------------------------------------------------- +// min + +struct grouping_min { + calculated_number min; + size_t count; +}; + +void *grouping_create_min(RRDR *r) { + (void)r; + return callocz(1, sizeof(struct grouping_min)); +} + +// resets when switches dimensions +// so, clear everything to restart +void grouping_reset_min(RRDR *r) { + struct grouping_min *g = (struct grouping_min *)r->internal.grouping_data; + g->min = 0; + g->count = 0; +} + +void grouping_free_min(RRDR *r) { + freez(r->internal.grouping_data); + r->internal.grouping_data = NULL; +} + +void grouping_add_min(RRDR *r, calculated_number value) { + if(!isnan(value)) { + struct grouping_min *g = (struct grouping_min *)r->internal.grouping_data; + + if(!g->count || calculated_number_fabs(value) < calculated_number_fabs(g->min)) { + g->min = value; + g->count++; + } + } +} + +calculated_number grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) { + struct grouping_min *g = (struct grouping_min *)r->internal.grouping_data; + + calculated_number value; + + if(unlikely(!g->count)) { + value = 0.0; + *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY; + } + else { + value = g->min; + } + + g->min = 0.0; + g->count = 0; + + return value; +} + diff --git a/web/api/queries/min/min.h b/web/api/queries/min/min.h new file mode 100644 index 000000000..74703605c --- /dev/null +++ b/web/api/queries/min/min.h @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_QUERY_MIN_H +#define NETDATA_API_QUERY_MIN_H + +#include "../query.h" +#include "../rrdr.h" + +extern void *grouping_create_min(RRDR *r); +extern void grouping_reset_min(RRDR *r); +extern void grouping_free_min(RRDR *r); +extern void grouping_add_min(RRDR *r, calculated_number value); +extern calculated_number grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr); + +#endif //NETDATA_API_QUERY_MIN_H diff --git a/web/api/queries/query.c b/web/api/queries/query.c new file mode 100644 index 000000000..d03b43d3c --- /dev/null +++ b/web/api/queries/query.c @@ -0,0 +1,974 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "query.h" +#include "web/api/formatters/rrd2json.h" +#include "rrdr.h" + +#include "average/average.h" +#include "incremental_sum/incremental_sum.h" +#include "max/max.h" +#include "median/median.h" +#include "min/min.h" +#include "sum/sum.h" +#include "stddev/stddev.h" +#include "ses/ses.h" +#include "des/des.h" + +// ---------------------------------------------------------------------------- + +static struct { + const char *name; + uint32_t hash; + RRDR_GROUPING value; + + // One time initialization for the module. + // This is called once, when netdata starts. + void (*init)(void); + + // Allocate all required structures for a query. + // This is called once for each netdata query. + void *(*create)(struct rrdresult *r); + + // Cleanup collected values, but don't destroy the structures. + // This is called when the query engine switches dimensions, + // as part of the same query (so same chart, switching metric). + void (*reset)(struct rrdresult *r); + + // Free all resources allocated for the query. + void (*free)(struct rrdresult *r); + + // Add a single value into the calculation. + // The module may decide to cache it, or use it in the fly. + void (*add)(struct rrdresult *r, calculated_number value); + + // Generate a single result for the values added so far. + // More values and points may be requested later. + // It is up to the module to reset its internal structures + // when flushing it (so for a few modules it may be better to + // continue after a flush as if nothing changed, for others a + // cleanup of the internal structures may be required). + calculated_number (*flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr); +} api_v1_data_groups[] = { + {.name = "average", + .hash = 0, + .value = RRDR_GROUPING_AVERAGE, + .init = NULL, + .create= grouping_create_average, + .reset = grouping_reset_average, + .free = grouping_free_average, + .add = grouping_add_average, + .flush = grouping_flush_average + }, + {.name = "mean", // alias on 'average' + .hash = 0, + .value = RRDR_GROUPING_AVERAGE, + .init = NULL, + .create= grouping_create_average, + .reset = grouping_reset_average, + .free = grouping_free_average, + .add = grouping_add_average, + .flush = grouping_flush_average + }, + {.name = "incremental_sum", + .hash = 0, + .value = RRDR_GROUPING_INCREMENTAL_SUM, + .init = NULL, + .create= grouping_create_incremental_sum, + .reset = grouping_reset_incremental_sum, + .free = grouping_free_incremental_sum, + .add = grouping_add_incremental_sum, + .flush = grouping_flush_incremental_sum + }, + {.name = "incremental-sum", + .hash = 0, + .value = RRDR_GROUPING_INCREMENTAL_SUM, + .init = NULL, + .create= grouping_create_incremental_sum, + .reset = grouping_reset_incremental_sum, + .free = grouping_free_incremental_sum, + .add = grouping_add_incremental_sum, + .flush = grouping_flush_incremental_sum + }, + {.name = "median", + .hash = 0, + .value = RRDR_GROUPING_MEDIAN, + .init = NULL, + .create= grouping_create_median, + .reset = grouping_reset_median, + .free = grouping_free_median, + .add = grouping_add_median, + .flush = grouping_flush_median + }, + {.name = "min", + .hash = 0, + .value = RRDR_GROUPING_MIN, + .init = NULL, + .create= grouping_create_min, + .reset = grouping_reset_min, + .free = grouping_free_min, + .add = grouping_add_min, + .flush = grouping_flush_min + }, + {.name = "max", + .hash = 0, + .value = RRDR_GROUPING_MAX, + .init = NULL, + .create= grouping_create_max, + .reset = grouping_reset_max, + .free = grouping_free_max, + .add = grouping_add_max, + .flush = grouping_flush_max + }, + {.name = "sum", + .hash = 0, + .value = RRDR_GROUPING_SUM, + .init = NULL, + .create= grouping_create_sum, + .reset = grouping_reset_sum, + .free = grouping_free_sum, + .add = grouping_add_sum, + .flush = grouping_flush_sum + }, + + // standard deviation + {.name = "stddev", + .hash = 0, + .value = RRDR_GROUPING_STDDEV, + .init = NULL, + .create= grouping_create_stddev, + .reset = grouping_reset_stddev, + .free = grouping_free_stddev, + .add = grouping_add_stddev, + .flush = grouping_flush_stddev + }, + {.name = "cv", // coefficient variation is calculated by stddev + .hash = 0, + .value = RRDR_GROUPING_CV, + .init = NULL, + .create= grouping_create_stddev, // not an error, stddev calculates this too + .reset = grouping_reset_stddev, // not an error, stddev calculates this too + .free = grouping_free_stddev, // not an error, stddev calculates this too + .add = grouping_add_stddev, // not an error, stddev calculates this too + .flush = grouping_flush_coefficient_of_variation + }, + {.name = "rsd", // alias of 'cv' + .hash = 0, + .value = RRDR_GROUPING_CV, + .init = NULL, + .create= grouping_create_stddev, // not an error, stddev calculates this too + .reset = grouping_reset_stddev, // not an error, stddev calculates this too + .free = grouping_free_stddev, // not an error, stddev calculates this too + .add = grouping_add_stddev, // not an error, stddev calculates this too + .flush = grouping_flush_coefficient_of_variation + }, + + /* + {.name = "mean", // same as average, no need to define it again + .hash = 0, + .value = RRDR_GROUPING_MEAN, + .setup = NULL, + .create= grouping_create_stddev, + .reset = grouping_reset_stddev, + .free = grouping_free_stddev, + .add = grouping_add_stddev, + .flush = grouping_flush_mean + }, + */ + + /* + {.name = "variance", // meaningless to offer + .hash = 0, + .value = RRDR_GROUPING_VARIANCE, + .setup = NULL, + .create= grouping_create_stddev, + .reset = grouping_reset_stddev, + .free = grouping_free_stddev, + .add = grouping_add_stddev, + .flush = grouping_flush_variance + }, + */ + + // single exponential smoothing + {.name = "ses", + .hash = 0, + .value = RRDR_GROUPING_SES, + .init = grouping_init_ses, + .create= grouping_create_ses, + .reset = grouping_reset_ses, + .free = grouping_free_ses, + .add = grouping_add_ses, + .flush = grouping_flush_ses + }, + {.name = "ema", // alias for 'ses' + .hash = 0, + .value = RRDR_GROUPING_SES, + .init = NULL, + .create= grouping_create_ses, + .reset = grouping_reset_ses, + .free = grouping_free_ses, + .add = grouping_add_ses, + .flush = grouping_flush_ses + }, + {.name = "ewma", // alias for ses + .hash = 0, + .value = RRDR_GROUPING_SES, + .init = NULL, + .create= grouping_create_ses, + .reset = grouping_reset_ses, + .free = grouping_free_ses, + .add = grouping_add_ses, + .flush = grouping_flush_ses + }, + + // double exponential smoothing + {.name = "des", + .hash = 0, + .value = RRDR_GROUPING_DES, + .init = grouping_init_des, + .create= grouping_create_des, + .reset = grouping_reset_des, + .free = grouping_free_des, + .add = grouping_add_des, + .flush = grouping_flush_des + }, + + // terminator + {.name = NULL, + .hash = 0, + .value = RRDR_GROUPING_UNDEFINED, + .init = NULL, + .create= grouping_create_average, + .reset = grouping_reset_average, + .free = grouping_free_average, + .add = grouping_add_average, + .flush = grouping_flush_average + } +}; + +void web_client_api_v1_init_grouping(void) { + int i; + + for(i = 0; api_v1_data_groups[i].name ; i++) { + api_v1_data_groups[i].hash = simple_hash(api_v1_data_groups[i].name); + + if(api_v1_data_groups[i].init) + api_v1_data_groups[i].init(); + } +} + +const char *group_method2string(RRDR_GROUPING group) { + int i; + + for(i = 0; api_v1_data_groups[i].name ; i++) { + if(api_v1_data_groups[i].value == group) { + return api_v1_data_groups[i].name; + } + } + + return "unknown-group-method"; +} + +RRDR_GROUPING web_client_api_request_v1_data_group(const char *name, RRDR_GROUPING def) { + int i; + + uint32_t hash = simple_hash(name); + for(i = 0; api_v1_data_groups[i].name ; i++) + if(unlikely(hash == api_v1_data_groups[i].hash && !strcmp(name, api_v1_data_groups[i].name))) + return api_v1_data_groups[i].value; + + return def; +} + +// ---------------------------------------------------------------------------- + +static void rrdr_disable_not_selected_dimensions(RRDR *r, RRDR_OPTIONS options, const char *dims) { + rrdset_check_rdlock(r->st); + + if(unlikely(!dims || !*dims || (dims[0] == '*' && dims[1] == '\0'))) return; + + int match_ids = 0, match_names = 0; + + if(unlikely(options & RRDR_OPTION_MATCH_IDS)) + match_ids = 1; + if(unlikely(options & RRDR_OPTION_MATCH_NAMES)) + match_names = 1; + + if(likely(!match_ids && !match_names)) + match_ids = match_names = 1; + + SIMPLE_PATTERN *pattern = simple_pattern_create(dims, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT); + + RRDDIM *d; + long c, dims_selected = 0, dims_not_hidden_not_zero = 0; + for(c = 0, d = r->st->dimensions; d ;c++, d = d->next) { + if( (match_ids && simple_pattern_matches(pattern, d->id)) + || (match_names && simple_pattern_matches(pattern, d->name)) + ) { + r->od[c] |= RRDR_DIMENSION_SELECTED; + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) r->od[c] &= ~RRDR_DIMENSION_HIDDEN; + dims_selected++; + + // since the user needs this dimension + // make it appear as NONZERO, to return it + // even if the dimension has only zeros + // unless option non_zero is set + if(unlikely(!(options & RRDR_OPTION_NONZERO))) + r->od[c] |= RRDR_DIMENSION_NONZERO; + + // count the visible dimensions + if(likely(r->od[c] & RRDR_DIMENSION_NONZERO)) + dims_not_hidden_not_zero++; + } + else { + r->od[c] |= RRDR_DIMENSION_HIDDEN; + if(unlikely(r->od[c] & RRDR_DIMENSION_SELECTED)) r->od[c] &= ~RRDR_DIMENSION_SELECTED; + } + } + simple_pattern_free(pattern); + + // check if all dimensions are hidden + if(unlikely(!dims_not_hidden_not_zero && dims_selected)) { + // there are a few selected dimensions + // but they are all zero + // enable the selected ones + // to avoid returning an empty chart + for(c = 0, d = r->st->dimensions; d ;c++, d = d->next) + if(unlikely(r->od[c] & RRDR_DIMENSION_SELECTED)) + r->od[c] |= RRDR_DIMENSION_NONZERO; + } +} + +// ---------------------------------------------------------------------------- +// helpers to find our way in RRDR + +static inline RRDR_VALUE_FLAGS *rrdr_line_options(RRDR *r, long rrdr_line) { + return &r->o[ rrdr_line * r->d ]; +} + +static inline calculated_number *rrdr_line_values(RRDR *r, long rrdr_line) { + return &r->v[ rrdr_line * r->d ]; +} + +static inline long rrdr_line_init(RRDR *r, time_t t, long rrdr_line) { + rrdr_line++; + + #ifdef NETDATA_INTERNAL_CHECKS + + if(unlikely(rrdr_line >= r->n)) + error("INTERNAL ERROR: requested to step above RRDR size for chart '%s'", r->st->name); + + if(unlikely(r->t[rrdr_line] != 0 && r->t[rrdr_line] != t)) + error("INTERNAL ERROR: overwriting the timestamp of RRDR line %zu from %zu to %zu, of chart '%s'", (size_t)rrdr_line, (size_t)r->t[rrdr_line], (size_t)t, r->st->name); + + #endif + + // save the time + r->t[rrdr_line] = t; + + return rrdr_line; +} + +static inline void rrdr_done(RRDR *r, long rrdr_line) { + r->rows = rrdr_line + 1; +} + + +// ---------------------------------------------------------------------------- +// fill RRDR for a single dimension + +static inline void do_dimension( + RRDR *r + , long points_wanted + , RRDDIM *rd + , long dim_id_in_rrdr + , long after_slot + , long before_slot + , time_t after_wanted + , time_t before_wanted +){ + (void) before_slot; + + RRDSET *st = r->st; + + time_t + now = after_wanted, + dt = st->update_every, + max_date = 0, + min_date = 0; + + long + slot = after_slot, + group_size = r->group, + points_added = 0, + values_in_group = 0, + values_in_group_non_zero = 0, + rrdr_line = -1, + entries = st->entries; + + RRDR_VALUE_FLAGS + group_value_flags = RRDR_VALUE_NOTHING; + + calculated_number min = r->min, max = r->max; + size_t db_points_read = 0; + for( ; points_added < points_wanted ; now += dt, slot++ ) { + if(unlikely(slot >= entries)) slot = 0; + + // make sure we return data in the proper time range + if(unlikely(now > before_wanted)) { + #ifdef NETDATA_INTERNAL_CHECKS + r->internal.log = "stopped, because attempted to access the db after 'wanted before'"; + #endif + break; + } + if(unlikely(now < after_wanted)) { + #ifdef NETDATA_INTERNAL_CHECKS + r->internal.log = "skipped, because attempted to access the db before 'wanted after'"; + #endif + continue; + } + + // read the value from the database + storage_number n = rd->values[slot]; + calculated_number value = NAN; + if(likely(does_storage_number_exist(n))) { + + value = unpack_storage_number(n); + if(likely(value != 0.0)) + values_in_group_non_zero++; + + if(unlikely(did_storage_number_reset(n))) + group_value_flags |= RRDR_VALUE_RESET; + + } + + // add this value for grouping + r->internal.grouping_add(r, value); + values_in_group++; + db_points_read++; + + if(unlikely(values_in_group == group_size)) { + rrdr_line = rrdr_line_init(r, now, rrdr_line); + + if(unlikely(!min_date)) min_date = now; + max_date = now; + + // find the place to store our values + RRDR_VALUE_FLAGS *rrdr_value_options_ptr = &r->o[rrdr_line * r->d + dim_id_in_rrdr]; + + // update the dimension options + if(likely(values_in_group_non_zero)) + r->od[dim_id_in_rrdr] |= RRDR_DIMENSION_NONZERO; + + // store the specific point options + *rrdr_value_options_ptr = group_value_flags; + + // store the value + calculated_number value = r->internal.grouping_flush(r, rrdr_value_options_ptr); + r->v[rrdr_line * r->d + dim_id_in_rrdr] = value; + + if(likely(points_added || dim_id_in_rrdr)) { + // find the min/max across all dimensions + + if(unlikely(value < min)) min = value; + if(unlikely(value > max)) max = value; + + } + else { + // runs only when dim_id_in_rrdr == 0 && points_added == 0 + // so, on the first point added for the query. + min = max = value; + } + + points_added++; + values_in_group = 0; + group_value_flags = RRDR_VALUE_NOTHING; + values_in_group_non_zero = 0; + } + } + + r->internal.db_points_read += db_points_read; + r->internal.result_points_generated += points_added; + + r->min = min; + r->max = max; + r->before = max_date; + r->after = min_date; + rrdr_done(r, rrdr_line); + + #ifdef NETDATA_INTERNAL_CHECKS + if(unlikely(r->rows != points_added)) + error("INTERNAL ERROR: %s.%s added %zu rows, but RRDR says I added %zu.", r->st->name, rd->name, (size_t)points_added, (size_t)r->rows); + #endif +} + +// ---------------------------------------------------------------------------- +// fill RRDR for the whole chart + +#ifdef NETDATA_INTERNAL_CHECKS +static void rrd2rrdr_log_request_response_metdata(RRDR *r + , RRDR_GROUPING group_method + , int aligned + , long group + , long resampling_time + , long resampling_group + , time_t after_wanted + , time_t after_requested + , time_t before_wanted + , time_t before_requested + , long points_requested + , long points_wanted + , size_t after_slot + , size_t before_slot + , const char *msg + ) { + info("INTERNAL ERROR: rrd2rrdr() on %s update every %d with %s grouping %s (group: %ld, resampling_time: %ld, resampling_group: %ld), " + "after (got: %zu, want: %zu, req: %zu, db: %zu), " + "before (got: %zu, want: %zu, req: %zu, db: %zu), " + "duration (got: %zu, want: %zu, req: %zu, db: %zu), " + "slot (after: %zu, before: %zu, delta: %zu), " + "points (got: %ld, want: %ld, req: %ld, db: %ld), " + "%s" + , r->st->name + , r->st->update_every + + // grouping + , (aligned) ? "aligned" : "unaligned" + , group_method2string(group_method) + , group + , resampling_time + , resampling_group + + // after + , (size_t)r->after - (group - 1) * r->st->update_every + , (size_t)after_wanted + , (size_t)after_requested + , (size_t)rrdset_first_entry_t(r->st) + + // before + , (size_t)r->before + , (size_t)before_wanted + , (size_t)before_requested + , (size_t)rrdset_last_entry_t(r->st) + + // duration + , (size_t)(r->before - r->after + r->st->update_every) + , (size_t)(before_wanted - after_wanted + r->st->update_every) + , (size_t)(before_requested - after_requested) + , (size_t)((rrdset_last_entry_t(r->st) - rrdset_first_entry_t(r->st)) + r->st->update_every) + + // slot + , after_slot + , before_slot + , (after_slot > before_slot) ? (r->st->entries - after_slot + before_slot) : (before_slot - after_slot) + + // points + , r->rows + , points_wanted + , points_requested + , r->st->entries + + // message + , msg + ); +} +#endif // NETDATA_INTERNAL_CHECKS + +RRDR *rrd2rrdr( + RRDSET *st + , long points_requested + , long long after_requested + , long long before_requested + , RRDR_GROUPING group_method + , long resampling_time_requested + , RRDR_OPTIONS options + , const char *dimensions +) { + int aligned = !(options & RRDR_OPTION_NOT_ALIGNED); + + int absolute_period_requested = -1; + + time_t first_entry_t = rrdset_first_entry_t(st); + time_t last_entry_t = rrdset_last_entry_t(st); + + if(before_requested == 0 && after_requested == 0) { + // dump the all the data + before_requested = last_entry_t; + after_requested = first_entry_t; + absolute_period_requested = 0; + } + + // allow relative for before (smaller than API_RELATIVE_TIME_MAX) + if(((before_requested < 0)?-before_requested:before_requested) <= API_RELATIVE_TIME_MAX) { + if(abs(before_requested) % st->update_every) { + // make sure it is multiple of st->update_every + if(before_requested < 0) before_requested = before_requested - st->update_every - before_requested % st->update_every; + else before_requested = before_requested + st->update_every - before_requested % st->update_every; + } + if(before_requested > 0) before_requested = first_entry_t + before_requested; + else before_requested = last_entry_t + before_requested; + absolute_period_requested = 0; + } + + // allow relative for after (smaller than API_RELATIVE_TIME_MAX) + if(((after_requested < 0)?-after_requested:after_requested) <= API_RELATIVE_TIME_MAX) { + if(after_requested == 0) after_requested = -st->update_every; + if(abs(after_requested) % st->update_every) { + // make sure it is multiple of st->update_every + if(after_requested < 0) after_requested = after_requested - st->update_every - after_requested % st->update_every; + else after_requested = after_requested + st->update_every - after_requested % st->update_every; + } + after_requested = before_requested + after_requested; + absolute_period_requested = 0; + } + + if(absolute_period_requested == -1) + absolute_period_requested = 1; + + // make sure they are within our timeframe + if(before_requested > last_entry_t) before_requested = last_entry_t; + if(before_requested < first_entry_t) before_requested = first_entry_t; + + if(after_requested > last_entry_t) after_requested = last_entry_t; + if(after_requested < first_entry_t) after_requested = first_entry_t; + + // check if they are reversed + if(after_requested > before_requested) { + time_t tmp = before_requested; + before_requested = after_requested; + after_requested = tmp; + } + + // the duration of the chart + time_t duration = before_requested - after_requested; + long available_points = duration / st->update_every; + + if(duration <= 0 || available_points <= 0) + return rrdr_create(st, 1); + + // check the number of wanted points in the result + if(unlikely(points_requested < 0)) points_requested = -points_requested; + if(unlikely(points_requested > available_points)) points_requested = available_points; + if(unlikely(points_requested == 0)) points_requested = available_points; + + // calculate the desired grouping of source data points + long group = available_points / points_requested; + if(unlikely(group <= 0)) group = 1; + if(unlikely(available_points % points_requested > points_requested / 2)) group++; // rounding to the closest integer + + // resampling_time_requested enforces a certain grouping multiple + calculated_number resampling_divisor = 1.0; + long resampling_group = 1; + if(unlikely(resampling_time_requested > st->update_every)) { + if (unlikely(resampling_time_requested > duration)) { + // group_time is above the available duration + + #ifdef NETDATA_INTERNAL_CHECKS + info("INTERNAL CHECK: %s: requested gtime %ld secs, is greater than the desired duration %ld secs", st->id, resampling_time_requested, duration); + #endif + + group = available_points; // use all the points + } + else { + // the points we should group to satisfy gtime + resampling_group = resampling_time_requested / st->update_every; + if(unlikely(resampling_time_requested % st->update_every)) { + #ifdef NETDATA_INTERNAL_CHECKS + info("INTERNAL CHECK: %s: requested gtime %ld secs, is not a multiple of the chart's data collection frequency %d secs", st->id, resampling_time_requested, st->update_every); + #endif + + resampling_group++; + } + + // adapt group according to resampling_group + if(unlikely(group < resampling_group)) group = resampling_group; // do not allow grouping below the desired one + if(unlikely(group % resampling_group)) group += resampling_group - (group % resampling_group); // make sure group is multiple of resampling_group + + //resampling_divisor = group / resampling_group; + resampling_divisor = (calculated_number)(group * st->update_every) / (calculated_number)resampling_time_requested; + } + } + + // now that we have group, + // align the requested timeframe to fit it. + + if(aligned) { + // alignement has been requested, so align the values + before_requested -= (before_requested % group); + after_requested -= (after_requested % group); + } + + // we align the request on requested_before + time_t before_wanted = before_requested; + if(likely(before_wanted > last_entry_t)) { + #ifdef NETDATA_INTERNAL_CHECKS + error("INTERNAL ERROR: rrd2rrdr() on %s, before_wanted is after db max", st->name); + #endif + + before_wanted = last_entry_t - (last_entry_t % ( ((aligned)?group:1) * st->update_every )); + } + size_t before_slot = rrdset_time2slot(st, before_wanted); + + // we need to estimate the number of points, for having + // an integer number of values per point + long points_wanted = (before_wanted - after_requested) / st->update_every / group; + + time_t after_wanted = before_wanted - (points_wanted * group * st->update_every) + st->update_every; + if(unlikely(after_wanted < first_entry_t)) { + // hm... we go to the past, calculate again points_wanted using all the db from before_wanted to the beginning + points_wanted = (before_wanted - first_entry_t) / group; + + // recalculate after wanted with the new number of points + after_wanted = before_wanted - (points_wanted * group * st->update_every) + st->update_every; + + if(unlikely(after_wanted < first_entry_t)) { + #ifdef NETDATA_INTERNAL_CHECKS + error("INTERNAL ERROR: rrd2rrdr() on %s, after_wanted is before db min", st->name); + #endif + + after_wanted = first_entry_t - (first_entry_t % ( ((aligned)?group:1) * st->update_every )) + ( ((aligned)?group:1) * st->update_every ); + } + } + size_t after_slot = rrdset_time2slot(st, after_wanted); + + // check if they are reversed + if(unlikely(after_wanted > before_wanted)) { + #ifdef NETDATA_INTERNAL_CHECKS + error("INTERNAL ERROR: rrd2rrdr() on %s, reversed wanted after/before", st->name); + #endif + time_t tmp = before_wanted; + before_wanted = after_wanted; + after_wanted = tmp; + } + + // recalculate points_wanted using the final time-frame + points_wanted = (before_wanted - after_wanted) / st->update_every / group + 1; + if(unlikely(points_wanted < 0)) { + #ifdef NETDATA_INTERNAL_CHECKS + error("INTERNAL ERROR: rrd2rrdr() on %s, points_wanted is %ld", st->name, points_wanted); + #endif + points_wanted = 0; + } + +#ifdef NETDATA_INTERNAL_CHECKS + duration = before_wanted - after_wanted; + + if(after_wanted < first_entry_t) + error("INTERNAL CHECK: after_wanted %u is too small, minimum %u", (uint32_t)after_wanted, (uint32_t)first_entry_t); + + if(after_wanted > last_entry_t) + error("INTERNAL CHECK: after_wanted %u is too big, maximum %u", (uint32_t)after_wanted, (uint32_t)last_entry_t); + + if(before_wanted < first_entry_t) + error("INTERNAL CHECK: before_wanted %u is too small, minimum %u", (uint32_t)before_wanted, (uint32_t)first_entry_t); + + if(before_wanted > last_entry_t) + error("INTERNAL CHECK: before_wanted %u is too big, maximum %u", (uint32_t)before_wanted, (uint32_t)last_entry_t); + + if(before_slot >= (size_t)st->entries) + error("INTERNAL CHECK: before_slot is invalid %zu, expected 0 to %ld", before_slot, st->entries - 1); + + if(after_slot >= (size_t)st->entries) + error("INTERNAL CHECK: after_slot is invalid %zu, expected 0 to %ld", after_slot, st->entries - 1); + + if(points_wanted > (before_wanted - after_wanted) / group / st->update_every + 1) + error("INTERNAL CHECK: points_wanted %ld is more than points %ld", points_wanted, (before_wanted - after_wanted) / group / st->update_every + 1); + + if(group < resampling_group) + error("INTERNAL CHECK: group %ld is less than the desired group points %ld", group, resampling_group); + + if(group > resampling_group && group % resampling_group) + error("INTERNAL CHECK: group %ld is not a multiple of the desired group points %ld", group, resampling_group); +#endif + + // ------------------------------------------------------------------------- + // initialize our result set + // this also locks the chart for us + + RRDR *r = rrdr_create(st, points_wanted); + if(unlikely(!r)) { + #ifdef NETDATA_INTERNAL_CHECKS + error("INTERNAL CHECK: Cannot create RRDR for %s, after=%u, before=%u, duration=%u, points=%ld", st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (uint32_t)duration, points_wanted); + #endif + return NULL; + } + + if(unlikely(!r->d || !points_wanted)) { + #ifdef NETDATA_INTERNAL_CHECKS + error("INTERNAL CHECK: Returning empty RRDR (no dimensions in RRDSET) for %s, after=%u, before=%u, duration=%zu, points=%ld", st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (size_t)duration, points_wanted); + #endif + return r; + } + + if(unlikely(absolute_period_requested == 1)) + r->result_options |= RRDR_RESULT_OPTION_ABSOLUTE; + else + r->result_options |= RRDR_RESULT_OPTION_RELATIVE; + + // find how many dimensions we have + long dimensions_count = r->d; + + // ------------------------------------------------------------------------- + // initialize RRDR + + r->group = group; + r->update_every = (int)group * st->update_every; + r->before = before_wanted; + r->after = after_wanted; + r->internal.points_wanted = points_wanted; + r->internal.resampling_group = resampling_group; + r->internal.resampling_divisor = resampling_divisor; + + + // ------------------------------------------------------------------------- + // assign the processor functions + + { + int i, found = 0; + for(i = 0; !found && api_v1_data_groups[i].name ;i++) { + if(api_v1_data_groups[i].value == group_method) { + r->internal.grouping_create= api_v1_data_groups[i].create; + r->internal.grouping_reset = api_v1_data_groups[i].reset; + r->internal.grouping_free = api_v1_data_groups[i].free; + r->internal.grouping_add = api_v1_data_groups[i].add; + r->internal.grouping_flush = api_v1_data_groups[i].flush; + found = 1; + } + } + if(!found) { + errno = 0; + #ifdef NETDATA_INTERNAL_CHECKS + error("INTERNAL ERROR: grouping method %u not found for chart '%s'. Using 'average'", (unsigned int)group_method, r->st->name); + #endif + r->internal.grouping_create= grouping_create_average; + r->internal.grouping_reset = grouping_reset_average; + r->internal.grouping_free = grouping_free_average; + r->internal.grouping_add = grouping_add_average; + r->internal.grouping_flush = grouping_flush_average; + } + } + + // allocate any memory required by the grouping method + r->internal.grouping_data = r->internal.grouping_create(r); + + + // ------------------------------------------------------------------------- + // disable the not-wanted dimensions + + rrdset_check_rdlock(st); + + if(dimensions) + rrdr_disable_not_selected_dimensions(r, options, dimensions); + + + // ------------------------------------------------------------------------- + // do the work for each dimension + + time_t max_after = 0, min_before = 0; + long max_rows = 0; + + RRDDIM *rd; + long c, dimensions_used = 0, dimensions_nonzero = 0; + for(rd = st->dimensions, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) { + + // if we need a percentage, we need to calculate all dimensions + if(unlikely(!(options & RRDR_OPTION_PERCENTAGE) && (r->od[c] & RRDR_DIMENSION_HIDDEN))) { + if(unlikely(r->od[c] & RRDR_DIMENSION_SELECTED)) r->od[c] &= ~RRDR_DIMENSION_SELECTED; + continue; + } + r->od[c] |= RRDR_DIMENSION_SELECTED; + + // reset the grouping for the new dimension + r->internal.grouping_reset(r); + + do_dimension( + r + , points_wanted + , rd + , c + , after_slot + , before_slot + , after_wanted + , before_wanted + ); + + if(r->od[c] & RRDR_DIMENSION_NONZERO) + dimensions_nonzero++; + + // verify all dimensions are aligned + if(unlikely(!dimensions_used)) { + min_before = r->before; + max_after = r->after; + max_rows = r->rows; + } + else { + if(r->after != max_after) { + #ifdef NETDATA_INTERNAL_CHECKS + error("INTERNAL ERROR: 'after' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu", + st->name, (size_t)max_after, rd->name, (size_t)r->after); + #endif + r->after = (r->after > max_after) ? r->after : max_after; + } + + if(r->before != min_before) { + #ifdef NETDATA_INTERNAL_CHECKS + error("INTERNAL ERROR: 'before' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu", + st->name, (size_t)min_before, rd->name, (size_t)r->before); + #endif + r->before = (r->before < min_before) ? r->before : min_before; + } + + if(r->rows != max_rows) { + #ifdef NETDATA_INTERNAL_CHECKS + error("INTERNAL ERROR: 'rows' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu", + st->name, (size_t)max_rows, rd->name, (size_t)r->rows); + #endif + r->rows = (r->rows > max_rows) ? r->rows : max_rows; + } + } + + dimensions_used++; + } + + #ifdef NETDATA_INTERNAL_CHECKS + + if(r->internal.log) + rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, r->internal.log); + + if(r->rows != points_wanted) + rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "got 'points' is not wanted 'points'"); + + if(aligned && (r->before % group) != 0) + rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'before' is not aligned but alignment is required"); + + // 'after' should not be aligned, since we start inside the first group + //if(aligned && (r->after % group) != 0) + // rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'after' is not aligned but alignment is required"); + + if(r->before != before_requested) + rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "chart is not aligned to requested 'before'"); + + if(r->before != before_wanted) + rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "got 'before' is not wanted 'before'"); + + // reported 'after' varies, depending on group + if((r->after - (group - 1) * r->st->update_every) != after_wanted) + rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "got 'after' is not wanted 'after'"); + + #endif + + // free all resources used by the grouping method + r->internal.grouping_free(r); + + // when all the dimensions are zero, we should return all of them + if(unlikely(options & RRDR_OPTION_NONZERO && !dimensions_nonzero)) { + // all the dimensions are zero + // mark them as NONZERO to send them all + for(rd = st->dimensions, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) { + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + r->od[c] |= RRDR_DIMENSION_NONZERO; + } + } + + rrdr_query_completed(r->internal.db_points_read, r->internal.result_points_generated); + return r; +} diff --git a/web/api/queries/query.h b/web/api/queries/query.h new file mode 100644 index 000000000..6b8a51c58 --- /dev/null +++ b/web/api/queries/query.h @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_DATA_QUERY_H +#define NETDATA_API_DATA_QUERY_H + +typedef enum rrdr_grouping { + RRDR_GROUPING_UNDEFINED = 0, + RRDR_GROUPING_AVERAGE, + RRDR_GROUPING_MIN, + RRDR_GROUPING_MAX, + RRDR_GROUPING_SUM, + RRDR_GROUPING_INCREMENTAL_SUM, + RRDR_GROUPING_MEDIAN, + RRDR_GROUPING_STDDEV, + RRDR_GROUPING_CV, + RRDR_GROUPING_SES, + RRDR_GROUPING_DES, +} RRDR_GROUPING; + +extern const char *group_method2string(RRDR_GROUPING group); +extern void web_client_api_v1_init_grouping(void); +extern RRDR_GROUPING web_client_api_request_v1_data_group(const char *name, RRDR_GROUPING def); + +#endif //NETDATA_API_DATA_QUERY_H diff --git a/web/api/queries/rrdr.c b/web/api/queries/rrdr.c new file mode 100644 index 000000000..e727e607e --- /dev/null +++ b/web/api/queries/rrdr.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "rrdr.h" + +/* +static void rrdr_dump(RRDR *r) +{ + long c, i; + RRDDIM *d; + + fprintf(stderr, "\nCHART %s (%s)\n", r->st->id, r->st->name); + + for(c = 0, d = r->st->dimensions; d ;c++, d = d->next) { + fprintf(stderr, "DIMENSION %s (%s), %s%s%s%s\n" + , d->id + , d->name + , (r->od[c] & RRDR_EMPTY)?"EMPTY ":"" + , (r->od[c] & RRDR_RESET)?"RESET ":"" + , (r->od[c] & RRDR_DIMENSION_HIDDEN)?"HIDDEN ":"" + , (r->od[c] & RRDR_DIMENSION_NONZERO)?"NONZERO ":"" + ); + } + + if(r->rows <= 0) { + fprintf(stderr, "RRDR does not have any values in it.\n"); + return; + } + + fprintf(stderr, "RRDR includes %d values in it:\n", r->rows); + + // for each line in the array + for(i = 0; i < r->rows ;i++) { + calculated_number *cn = &r->v[ i * r->d ]; + RRDR_DIMENSION_FLAGS *co = &r->o[ i * r->d ]; + + // print the id and the timestamp of the line + fprintf(stderr, "%ld %ld ", i + 1, r->t[i]); + + // for each dimension + for(c = 0, d = r->st->dimensions; d ;c++, d = d->next) { + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + if(unlikely(!(r->od[c] & RRDR_DIMENSION_NONZERO))) continue; + + if(co[c] & RRDR_EMPTY) + fprintf(stderr, "null "); + else + fprintf(stderr, CALCULATED_NUMBER_FORMAT " %s%s%s%s " + , cn[c] + , (co[c] & RRDR_EMPTY)?"E":" " + , (co[c] & RRDR_RESET)?"R":" " + , (co[c] & RRDR_DIMENSION_HIDDEN)?"H":" " + , (co[c] & RRDR_DIMENSION_NONZERO)?"N":" " + ); + } + + fprintf(stderr, "\n"); + } +} +*/ + + + + +inline static void rrdr_lock_rrdset(RRDR *r) { + if(unlikely(!r)) { + error("NULL value given!"); + return; + } + + rrdset_rdlock(r->st); + r->has_st_lock = 1; +} + +inline static void rrdr_unlock_rrdset(RRDR *r) { + if(unlikely(!r)) { + error("NULL value given!"); + return; + } + + if(likely(r->has_st_lock)) { + rrdset_unlock(r->st); + r->has_st_lock = 0; + } +} + +inline void rrdr_free(RRDR *r) +{ + if(unlikely(!r)) { + error("NULL value given!"); + return; + } + + rrdr_unlock_rrdset(r); + freez(r->t); + freez(r->v); + freez(r->o); + freez(r->od); + freez(r); +} + +RRDR *rrdr_create(RRDSET *st, long n) +{ + if(unlikely(!st)) { + error("NULL value given!"); + return NULL; + } + + RRDR *r = callocz(1, sizeof(RRDR)); + r->st = st; + + rrdr_lock_rrdset(r); + + RRDDIM *rd; + rrddim_foreach_read(rd, st) r->d++; + + r->n = n; + + r->t = callocz((size_t)n, sizeof(time_t)); + r->v = mallocz(n * r->d * sizeof(calculated_number)); + r->o = mallocz(n * r->d * sizeof(RRDR_VALUE_FLAGS)); + r->od = mallocz(r->d * sizeof(RRDR_DIMENSION_FLAGS)); + + // set the hidden flag on hidden dimensions + int c; + for(c = 0, rd = st->dimensions ; rd ; c++, rd = rd->next) { + if(unlikely(rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN))) + r->od[c] = RRDR_DIMENSION_HIDDEN; + else + r->od[c] = RRDR_DIMENSION_DEFAULT; + } + + r->group = 1; + r->update_every = 1; + + return r; +} diff --git a/web/api/queries/rrdr.h b/web/api/queries/rrdr.h new file mode 100644 index 000000000..4f6350389 --- /dev/null +++ b/web/api/queries/rrdr.h @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_QUERIES_RRDR_H +#define NETDATA_QUERIES_RRDR_H + +#include "libnetdata/libnetdata.h" + +typedef enum rrdr_options { + RRDR_OPTION_NONZERO = 0x00000001, // don't output dimensions will just zero values + RRDR_OPTION_REVERSED = 0x00000002, // output the rows in reverse order (oldest to newest) + RRDR_OPTION_ABSOLUTE = 0x00000004, // values positive, for DATASOURCE_SSV before summing + RRDR_OPTION_MIN2MAX = 0x00000008, // when adding dimensions, use max - min, instead of sum + RRDR_OPTION_SECONDS = 0x00000010, // output seconds, instead of dates + RRDR_OPTION_MILLISECONDS = 0x00000020, // output milliseconds, instead of dates + RRDR_OPTION_NULL2ZERO = 0x00000040, // do not show nulls, convert them to zeros + RRDR_OPTION_OBJECTSROWS = 0x00000080, // each row of values should be an object, not an array + RRDR_OPTION_GOOGLE_JSON = 0x00000100, // comply with google JSON/JSONP specs + RRDR_OPTION_JSON_WRAP = 0x00000200, // wrap the response in a JSON header with info about the result + RRDR_OPTION_LABEL_QUOTES = 0x00000400, // in CSV output, wrap header labels in double quotes + RRDR_OPTION_PERCENTAGE = 0x00000800, // give values as percentage of total + RRDR_OPTION_NOT_ALIGNED = 0x00001000, // do not align charts for persistant timeframes + RRDR_OPTION_DISPLAY_ABS = 0x00002000, // for badges, display the absolute value, but calculate colors with sign + RRDR_OPTION_MATCH_IDS = 0x00004000, // when filtering dimensions, match only IDs + RRDR_OPTION_MATCH_NAMES = 0x00008000, // when filtering dimensions, match only names +} RRDR_OPTIONS; + +typedef enum rrdr_value_flag { + RRDR_VALUE_NOTHING = 0x00, // no flag set (a good default) + RRDR_VALUE_EMPTY = 0x01, // the database value is empty + RRDR_VALUE_RESET = 0x02, // the database value is marked as reset (overflown) +} RRDR_VALUE_FLAGS; + +typedef enum rrdr_dimension_flag { + RRDR_DIMENSION_DEFAULT = 0x00, + RRDR_DIMENSION_HIDDEN = 0x04, // the dimension is hidden (not to be presented to callers) + RRDR_DIMENSION_NONZERO = 0x08, // the dimension is non zero (contains non-zero values) + RRDR_DIMENSION_SELECTED = 0x10, // the dimension is selected for evaluation in this RRDR +} RRDR_DIMENSION_FLAGS; + +// RRDR result options +typedef enum rrdr_result_flags { + RRDR_RESULT_OPTION_ABSOLUTE = 0x00000001, // the query uses absolute time-frames (can be cached by browsers and proxies) + RRDR_RESULT_OPTION_RELATIVE = 0x00000002, // the query uses relative time-frames (should not to be cached by browsers and proxies) +} RRDR_RESULT_FLAGS; + +typedef struct rrdresult { + struct rrdset *st; // the chart this result refers to + + RRDR_RESULT_FLAGS result_options; // RRDR_RESULT_OPTION_* + + int d; // the number of dimensions + long n; // the number of values in the arrays + long rows; // the number of rows used + + RRDR_DIMENSION_FLAGS *od; // the options for the dimensions + + time_t *t; // array of n timestamps + calculated_number *v; // array n x d values + RRDR_VALUE_FLAGS *o; // array n x d options for each value returned + + long group; // how many collected values were grouped for each row + int update_every; // what is the suggested update frequency in seconds + + calculated_number min; + calculated_number max; + + time_t before; + time_t after; + + int has_st_lock; // if st is read locked by us + + // internal rrd2rrdr() members below this point + struct { + long points_wanted; + long resampling_group; + calculated_number resampling_divisor; + + void *(*grouping_create)(struct rrdresult *r); + void (*grouping_reset)(struct rrdresult *r); + void (*grouping_free)(struct rrdresult *r); + void (*grouping_add)(struct rrdresult *r, calculated_number value); + calculated_number (*grouping_flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr); + void *grouping_data; + + #ifdef NETDATA_INTERNAL_CHECKS + const char *log; + #endif + + size_t db_points_read; + size_t result_points_generated; + } internal; +} RRDR; + +#define rrdr_rows(r) ((r)->rows) + +extern void rrdr_free(RRDR *r); +extern RRDR *rrdr_create(struct rrdset *st, long n); + +#include "../web_api_v1.h" +#include "web/api/queries/query.h" + +extern RRDR *rrd2rrdr(RRDSET *st, long points_requested, long long after_requested, long long before_requested, RRDR_GROUPING group_method, long resampling_time_requested, RRDR_OPTIONS options, const char *dimensions); + +#include "query.h" + +#endif //NETDATA_QUERIES_RRDR_H diff --git a/web/api/queries/ses/Makefile.am b/web/api/queries/ses/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/web/api/queries/ses/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/queries/ses/Makefile.in b/web/api/queries/ses/Makefile.in new file mode 100644 index 000000000..bb538fac3 --- /dev/null +++ b/web/api/queries/ses/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api/queries/ses +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/ses/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/queries/ses/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/queries/ses/README.md b/web/api/queries/ses/README.md new file mode 100644 index 000000000..b5dacea83 --- /dev/null +++ b/web/api/queries/ses/README.md @@ -0,0 +1,54 @@ +# Single (or Simple) Exponential Smoothing (`ses`) + +> This query is also available as `ema` and `ewma`. + +An exponential moving average (`ema`), also known as an exponentially weighted moving average (`ewma`) +is a first-order infinite impulse response filter that applies weighting factors which decrease +exponentially. The weighting for each older datum decreases exponentially, never reaching zero. + +In simple terms, this is like an average value, but more recent values are given more weight. + +Netdata automatically adjusts the weight (`alpha`) based on the number of values processed, +using the formula: + +``` +window = max(number of values, 15) +alpha = 2 / (window + 1) +``` + +You can change the fixed value `15` by setting in `netdata.conf`: + +``` +[web] + ses max window = 15 +``` + +## how to use + +Use it in alarms like this: + +``` + alarm: my_alarm + on: my_chart +lookup: ses -1m unaligned of my_dimension + warn: $this > 1000 +``` + +`ses` does not change the units. For example, if the chart units is `requests/sec`, the exponential +moving average will be again expressed in the same units. + +It can also be used in APIs and badges as `&group=ses` in the URL. + +## Examples + +Examining last 1 minute `successful` web server responses: + +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=yellow) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=ses&after=-60&label=single+exponential+smoothing&value_color=orange) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max) + +## References + +- [https://en.wikipedia.org/wiki/Moving_average#exponential-moving-average](https://en.wikipedia.org/wiki/Moving_average#exponential-moving-average) +- [https://en.wikipedia.org/wiki/Exponential_smoothing](https://en.wikipedia.org/wiki/Exponential_smoothing). diff --git a/web/api/queries/ses/ses.c b/web/api/queries/ses/ses.c new file mode 100644 index 000000000..6ea40dfba --- /dev/null +++ b/web/api/queries/ses/ses.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "ses.h" + + +// ---------------------------------------------------------------------------- +// single exponential smoothing + +struct grouping_ses { + calculated_number alpha; + calculated_number alpha_other; + calculated_number level; + size_t count; +}; + +static size_t max_window_size = 15; + +void grouping_init_ses(void) { + long long ret = config_get_number(CONFIG_SECTION_WEB, "ses max window", (long long)max_window_size); + if(ret <= 1) { + config_set_number(CONFIG_SECTION_WEB, "ses max window", (long long)max_window_size); + } + else { + max_window_size = (size_t) ret; + } +} + +static inline calculated_number window(RRDR *r, struct grouping_ses *g) { + (void)g; + + calculated_number points; + if(r->group == 1) { + // provide a running DES + points = r->internal.points_wanted; + } + else { + // provide a SES with flush points + points = r->group; + } + + return (points > max_window_size) ? max_window_size : points; +} + +static inline void set_alpha(RRDR *r, struct grouping_ses *g) { + // https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average + // A commonly used value for alpha is 2 / (N + 1) + g->alpha = 2.0 / (window(r, g) + 1.0); + g->alpha_other = 1.0 - g->alpha; +} + +void *grouping_create_ses(RRDR *r) { + struct grouping_ses *g = (struct grouping_ses *)callocz(1, sizeof(struct grouping_ses)); + set_alpha(r, g); + g->level = 0.0; + return g; +} + +// resets when switches dimensions +// so, clear everything to restart +void grouping_reset_ses(RRDR *r) { + struct grouping_ses *g = (struct grouping_ses *)r->internal.grouping_data; + g->level = 0.0; + g->count = 0; +} + +void grouping_free_ses(RRDR *r) { + freez(r->internal.grouping_data); + r->internal.grouping_data = NULL; +} + +void grouping_add_ses(RRDR *r, calculated_number value) { + struct grouping_ses *g = (struct grouping_ses *)r->internal.grouping_data; + + if(isnormal(value)) { + if(unlikely(!g->count)) + g->level = value; + + g->level = g->alpha * value + g->alpha_other * g->level; + g->count++; + } +} + +calculated_number grouping_flush_ses(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) { + struct grouping_ses *g = (struct grouping_ses *)r->internal.grouping_data; + + if(unlikely(!g->count || !isnormal(g->level))) { + *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY; + return 0.0; + } + + return g->level; +} diff --git a/web/api/queries/ses/ses.h b/web/api/queries/ses/ses.h new file mode 100644 index 000000000..603fdb57c --- /dev/null +++ b/web/api/queries/ses/ses.h @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_QUERIES_SES_H +#define NETDATA_API_QUERIES_SES_H + +#include "../query.h" +#include "../rrdr.h" + +extern void grouping_init_ses(void); + +extern void *grouping_create_ses(RRDR *r); +extern void grouping_reset_ses(RRDR *r); +extern void grouping_free_ses(RRDR *r); +extern void grouping_add_ses(RRDR *r, calculated_number value); +extern calculated_number grouping_flush_ses(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr); + +#endif //NETDATA_API_QUERIES_SES_H diff --git a/web/api/queries/stddev/Makefile.am b/web/api/queries/stddev/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/web/api/queries/stddev/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/queries/stddev/Makefile.in b/web/api/queries/stddev/Makefile.in new file mode 100644 index 000000000..b7ccdfdca --- /dev/null +++ b/web/api/queries/stddev/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api/queries/stddev +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/stddev/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/queries/stddev/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/queries/stddev/README.md b/web/api/queries/stddev/README.md new file mode 100644 index 000000000..3436ff834 --- /dev/null +++ b/web/api/queries/stddev/README.md @@ -0,0 +1,87 @@ + +# standard deviation (`stddev`) + +The standard deviation is a measure that is used to quantify the amount of variation or dispersion +of a set of data values. + +A low standard deviation indicates that the data points tend to be close to the mean (also called the +expected value) of the set, while a high standard deviation indicates that the data points are spread +out over a wider range of values. + +## how to use + +Use it in alarms like this: + +``` + alarm: my_alarm + on: my_chart +lookup: stddev -1m unaligned of my_dimension + warn: $this > 1000 +``` + +`stdev` does not change the units. For example, if the chart units is `requests/sec`, the standard +deviation will be again expressed in the same units. + +It can also be used in APIs and badges as `&group=stddev` in the URL. + +## Examples + +Examining last 1 minute `successful` web server responses: + +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=min&after=-60&label=min) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=average&after=-60&label=average&value_color=yellow) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=stddev&after=-60&label=standard+deviation&value_color=orange) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=max&after=-60&label=max) + +## References + +Check [https://en.wikipedia.org/wiki/Standard_deviation](https://en.wikipedia.org/wiki/Standard_deviation). + +--- + +# Coefficient of variation (`cv`) + +> This query is also available as `rsd`. + +The coefficient of variation (`cv`), also known as relative standard deviation (`rsd`), +is a standardized measure of dispersion of a probability distribution or frequency distribution. + +It is defined as the ratio of the **standard deviation** to the **mean**. + +In simple terms, it gives the percentage of change. So, if the average value of a metric is 1000 +and its standard deviation is 100 (meaning that it variates from 900 to 1100), then `cv` is 10%. + +This is an easy way to check the % variation, without using absolute values. + +For example, you may trigger an alarm if your web server requests/sec `cv` is above 20 (`%`) +over the last minute. So if your web server was serving 1000 reqs/sec over the last minute, +it will trigger the alarm if had spikes below 800/sec or above 1200/sec. + +## how to use + +Use it in alarms like this: + +``` + alarm: my_alarm + on: my_chart +lookup: cv -1m unaligned of my_dimension + units: % + warn: $this > 20 +``` + +The units reported by `cv` is always `%`. + +It can also be used in APIs and badges as `&group=cv` in the URL. + +## Examples + +Examining last 1 minute `successful` web server responses: + +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=min&after=-60&label=min) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=average&after=-60&label=average&value_color=yellow) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=cv&after=-60&label=coefficient+of+variation&value_color=orange&units=pcent) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=max&after=-60&label=max) + +## References + +Check [https://en.wikipedia.org/wiki/Coefficient_of_variation](https://en.wikipedia.org/wiki/Coefficient_of_variation). diff --git a/web/api/queries/stddev/stddev.c b/web/api/queries/stddev/stddev.c new file mode 100644 index 000000000..3858003d9 --- /dev/null +++ b/web/api/queries/stddev/stddev.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "stddev.h" + + +// ---------------------------------------------------------------------------- +// stddev + +// this implementation comes from: +// https://www.johndcook.com/blog/standard_deviation/ + +struct grouping_stddev { + long count; + calculated_number m_oldM, m_newM, m_oldS, m_newS; +}; + +void *grouping_create_stddev(RRDR *r) { + long entries = r->group; + if(entries < 0) entries = 0; + + return callocz(1, sizeof(struct grouping_stddev) + entries * sizeof(LONG_DOUBLE)); +} + +// resets when switches dimensions +// so, clear everything to restart +void grouping_reset_stddev(RRDR *r) { + struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data; + g->count = 0; +} + +void grouping_free_stddev(RRDR *r) { + freez(r->internal.grouping_data); + r->internal.grouping_data = NULL; +} + +void grouping_add_stddev(RRDR *r, calculated_number value) { + struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data; + + if(isnormal(value)) { + g->count++; + + // See Knuth TAOCP vol 2, 3rd edition, page 232 + if (g->count == 1) { + g->m_oldM = g->m_newM = value; + g->m_oldS = 0.0; + } + else { + g->m_newM = g->m_oldM + (value - g->m_oldM) / g->count; + g->m_newS = g->m_oldS + (value - g->m_oldM) * (value - g->m_newM); + + // set up for next iteration + g->m_oldM = g->m_newM; + g->m_oldS = g->m_newS; + } + } +} + +static inline calculated_number mean(struct grouping_stddev *g) { + return (g->count > 0) ? g->m_newM : 0.0; +} + +static inline calculated_number variance(struct grouping_stddev *g) { + return ( (g->count > 1) ? g->m_newS/(g->count - 1) : 0.0 ); +} +static inline calculated_number stddev(struct grouping_stddev *g) { + return sqrtl(variance(g)); +} + +calculated_number grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) { + struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data; + + calculated_number value; + + if(likely(g->count > 1)) { + value = stddev(g); + + if(!isnormal(value)) { + value = 0.0; + *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY; + } + } + else if(g->count == 1) { + value = 0.0; + } + else { + value = 0.0; + *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY; + } + + grouping_reset_stddev(r); + + return value; +} + +// https://en.wikipedia.org/wiki/Coefficient_of_variation +calculated_number grouping_flush_coefficient_of_variation(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) { + struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data; + + calculated_number value; + + if(likely(g->count > 1)) { + calculated_number m = mean(g); + value = 100.0 * stddev(g) / ((m < 0)? -m : m); + + if(unlikely(!isnormal(value))) { + value = 0.0; + *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY; + } + } + else if(g->count == 1) { + // one value collected + value = 0.0; + } + else { + // no values collected + value = 0.0; + *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY; + } + + grouping_reset_stddev(r); + + return value; +} + + +/* + * Mean = average + * +calculated_number grouping_flush_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) { + struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data; + + calculated_number value; + + if(unlikely(!g->count)) { + value = 0.0; + *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY; + } + else { + value = mean(g); + + if(!isnormal(value)) { + value = 0.0; + *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY; + } + } + + grouping_reset_stddev(r); + + return value; +} + */ + +/* + * It is not advised to use this version of variance directly + * +calculated_number grouping_flush_variance(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) { + struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data; + + calculated_number value; + + if(unlikely(!g->count)) { + value = 0.0; + *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY; + } + else { + value = variance(g); + + if(!isnormal(value)) { + value = 0.0; + *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY; + } + } + + grouping_reset_stddev(r); + + return value; +} +*/ \ No newline at end of file diff --git a/web/api/queries/stddev/stddev.h b/web/api/queries/stddev/stddev.h new file mode 100644 index 000000000..7a4697572 --- /dev/null +++ b/web/api/queries/stddev/stddev.h @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_QUERIES_STDDEV_H +#define NETDATA_API_QUERIES_STDDEV_H + +#include "../query.h" +#include "../rrdr.h" + +extern void *grouping_create_stddev(RRDR *r); +extern void grouping_reset_stddev(RRDR *r); +extern void grouping_free_stddev(RRDR *r); +extern void grouping_add_stddev(RRDR *r, calculated_number value); +extern calculated_number grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr); +extern calculated_number grouping_flush_coefficient_of_variation(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr); +// extern calculated_number grouping_flush_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr); +// extern calculated_number grouping_flush_variance(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr); + +#endif //NETDATA_API_QUERIES_STDDEV_H diff --git a/web/api/queries/sum/Makefile.am b/web/api/queries/sum/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/web/api/queries/sum/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/queries/sum/Makefile.in b/web/api/queries/sum/Makefile.in new file mode 100644 index 000000000..56dfb2d75 --- /dev/null +++ b/web/api/queries/sum/Makefile.in @@ -0,0 +1,464 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api/queries/sum +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/sum/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/queries/sum/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/queries/sum/README.md b/web/api/queries/sum/README.md new file mode 100644 index 000000000..be6596080 --- /dev/null +++ b/web/api/queries/sum/README.md @@ -0,0 +1,34 @@ +# Sum + +This module sums all the values in the time-frame requested. + +You can use `sum` to find the volume of something over a period. + +## how to use + +Use it in alarms like this: + +``` + alarm: my_alarm + on: my_chart +lookup: sum -1m unaligned of my_dimension + warn: $this > 1000 +``` + +`sum` does not change the units. For example, if the chart units is `requests/sec`, the result +will be again expressed in the same units. + +It can also be used in APIs and badges as `&group=sum` in the URL. + +## Examples + +Examining last 1 minute `successful` web server responses: + +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max) +- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=sum&after=-60&label=1m+sum&value_color=orange&units=requests) + +## References + +- [https://en.wikipedia.org/wiki/Summation](https://en.wikipedia.org/wiki/Summation). diff --git a/web/api/queries/sum/sum.c b/web/api/queries/sum/sum.c new file mode 100644 index 000000000..0da9937a3 --- /dev/null +++ b/web/api/queries/sum/sum.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "sum.h" + +// ---------------------------------------------------------------------------- +// sum + +struct grouping_sum { + calculated_number sum; + size_t count; +}; + +void *grouping_create_sum(RRDR *r) { + (void)r; + return callocz(1, sizeof(struct grouping_sum)); +} + +// resets when switches dimensions +// so, clear everything to restart +void grouping_reset_sum(RRDR *r) { + struct grouping_sum *g = (struct grouping_sum *)r->internal.grouping_data; + g->sum = 0; + g->count = 0; +} + +void grouping_free_sum(RRDR *r) { + freez(r->internal.grouping_data); + r->internal.grouping_data = NULL; +} + +void grouping_add_sum(RRDR *r, calculated_number value) { + if(!isnan(value)) { + struct grouping_sum *g = (struct grouping_sum *)r->internal.grouping_data; + g->sum += value; + g->count++; + } +} + +calculated_number grouping_flush_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) { + struct grouping_sum *g = (struct grouping_sum *)r->internal.grouping_data; + + calculated_number value; + + if(unlikely(!g->count)) { + value = 0.0; + *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY; + } + else { + value = g->sum; + } + + g->sum = 0.0; + g->count = 0; + + return value; +} + + diff --git a/web/api/queries/sum/sum.h b/web/api/queries/sum/sum.h new file mode 100644 index 000000000..9dc8d209c --- /dev/null +++ b/web/api/queries/sum/sum.h @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_QUERY_SUM_H +#define NETDATA_API_QUERY_SUM_H + +#include "../query.h" +#include "../rrdr.h" + +extern void *grouping_create_sum(RRDR *r); +extern void grouping_reset_sum(RRDR *r); +extern void grouping_free_sum(RRDR *r); +extern void grouping_add_sum(RRDR *r, calculated_number value); +extern calculated_number grouping_flush_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr); + +#endif //NETDATA_API_QUERY_SUM_H diff --git a/web/api/web_api_v1.c b/web/api/web_api_v1.c new file mode 100644 index 000000000..1e03828e4 --- /dev/null +++ b/web/api/web_api_v1.c @@ -0,0 +1,665 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "web_api_v1.h" + +static struct { + const char *name; + uint32_t hash; + RRDR_OPTIONS value; +} api_v1_data_options[] = { + { "nonzero" , 0 , RRDR_OPTION_NONZERO} + , {"flip" , 0 , RRDR_OPTION_REVERSED} + , {"reversed" , 0 , RRDR_OPTION_REVERSED} + , {"reverse" , 0 , RRDR_OPTION_REVERSED} + , {"jsonwrap" , 0 , RRDR_OPTION_JSON_WRAP} + , {"min2max" , 0 , RRDR_OPTION_MIN2MAX} + , {"ms" , 0 , RRDR_OPTION_MILLISECONDS} + , {"milliseconds" , 0 , RRDR_OPTION_MILLISECONDS} + , {"abs" , 0 , RRDR_OPTION_ABSOLUTE} + , {"absolute" , 0 , RRDR_OPTION_ABSOLUTE} + , {"absolute_sum" , 0 , RRDR_OPTION_ABSOLUTE} + , {"absolute-sum" , 0 , RRDR_OPTION_ABSOLUTE} + , {"display_absolute", 0 , RRDR_OPTION_DISPLAY_ABS} + , {"display-absolute", 0 , RRDR_OPTION_DISPLAY_ABS} + , {"seconds" , 0 , RRDR_OPTION_SECONDS} + , {"null2zero" , 0 , RRDR_OPTION_NULL2ZERO} + , {"objectrows" , 0 , RRDR_OPTION_OBJECTSROWS} + , {"google_json" , 0 , RRDR_OPTION_GOOGLE_JSON} + , {"google-json" , 0 , RRDR_OPTION_GOOGLE_JSON} + , {"percentage" , 0 , RRDR_OPTION_PERCENTAGE} + , {"unaligned" , 0 , RRDR_OPTION_NOT_ALIGNED} + , {"match_ids" , 0 , RRDR_OPTION_MATCH_IDS} + , {"match-ids" , 0 , RRDR_OPTION_MATCH_IDS} + , {"match_names" , 0 , RRDR_OPTION_MATCH_NAMES} + , {"match-names" , 0 , RRDR_OPTION_MATCH_NAMES} + , { NULL, 0, 0} +}; + +static struct { + const char *name; + uint32_t hash; + uint32_t value; +} api_v1_data_formats[] = { + { DATASOURCE_FORMAT_DATATABLE_JSON , 0 , DATASOURCE_DATATABLE_JSON} + , {DATASOURCE_FORMAT_DATATABLE_JSONP, 0 , DATASOURCE_DATATABLE_JSONP} + , {DATASOURCE_FORMAT_JSON , 0 , DATASOURCE_JSON} + , {DATASOURCE_FORMAT_JSONP , 0 , DATASOURCE_JSONP} + , {DATASOURCE_FORMAT_SSV , 0 , DATASOURCE_SSV} + , {DATASOURCE_FORMAT_CSV , 0 , DATASOURCE_CSV} + , {DATASOURCE_FORMAT_TSV , 0 , DATASOURCE_TSV} + , {"tsv-excel" , 0 , DATASOURCE_TSV} + , {DATASOURCE_FORMAT_HTML , 0 , DATASOURCE_HTML} + , {DATASOURCE_FORMAT_JS_ARRAY , 0 , DATASOURCE_JS_ARRAY} + , {DATASOURCE_FORMAT_SSV_COMMA , 0 , DATASOURCE_SSV_COMMA} + , {DATASOURCE_FORMAT_CSV_JSON_ARRAY , 0 , DATASOURCE_CSV_JSON_ARRAY} + , {DATASOURCE_FORMAT_CSV_MARKDOWN , 0 , DATASOURCE_CSV_MARKDOWN} + , { NULL, 0, 0} +}; + +static struct { + const char *name; + uint32_t hash; + uint32_t value; +} api_v1_data_google_formats[] = { + // this is not error - when google requests json, it expects javascript + // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source#responseformat + { "json" , 0 , DATASOURCE_DATATABLE_JSONP} + , {"html" , 0 , DATASOURCE_HTML} + , {"csv" , 0 , DATASOURCE_CSV} + , {"tsv-excel", 0 , DATASOURCE_TSV} + , { NULL, 0, 0} +}; + +void web_client_api_v1_init(void) { + int i; + + for(i = 0; api_v1_data_options[i].name ; i++) + api_v1_data_options[i].hash = simple_hash(api_v1_data_options[i].name); + + for(i = 0; api_v1_data_formats[i].name ; i++) + api_v1_data_formats[i].hash = simple_hash(api_v1_data_formats[i].name); + + for(i = 0; api_v1_data_google_formats[i].name ; i++) + api_v1_data_google_formats[i].hash = simple_hash(api_v1_data_google_formats[i].name); + + web_client_api_v1_init_grouping(); +} + +inline uint32_t web_client_api_request_v1_data_options(char *o) { + uint32_t ret = 0x00000000; + char *tok; + + while(o && *o && (tok = mystrsep(&o, ", |"))) { + if(!*tok) continue; + + uint32_t hash = simple_hash(tok); + int i; + for(i = 0; api_v1_data_options[i].name ; i++) { + if (unlikely(hash == api_v1_data_options[i].hash && !strcmp(tok, api_v1_data_options[i].name))) { + ret |= api_v1_data_options[i].value; + break; + } + } + } + + return ret; +} + +inline uint32_t web_client_api_request_v1_data_format(char *name) { + uint32_t hash = simple_hash(name); + int i; + + for(i = 0; api_v1_data_formats[i].name ; i++) { + if (unlikely(hash == api_v1_data_formats[i].hash && !strcmp(name, api_v1_data_formats[i].name))) { + return api_v1_data_formats[i].value; + } + } + + return DATASOURCE_JSON; +} + +inline uint32_t web_client_api_request_v1_data_google_format(char *name) { + uint32_t hash = simple_hash(name); + int i; + + for(i = 0; api_v1_data_google_formats[i].name ; i++) { + if (unlikely(hash == api_v1_data_google_formats[i].hash && !strcmp(name, api_v1_data_google_formats[i].name))) { + return api_v1_data_google_formats[i].value; + } + } + + return DATASOURCE_JSON; +} + + +inline int web_client_api_request_v1_alarms(RRDHOST *host, struct web_client *w, char *url) { + int all = 0; + + while(url) { + char *value = mystrsep(&url, "?&"); + if (!value || !*value) continue; + + if(!strcmp(value, "all")) all = 1; + else if(!strcmp(value, "active")) all = 0; + } + + buffer_flush(w->response.data); + w->response.data->contenttype = CT_APPLICATION_JSON; + health_alarms2json(host, w->response.data, all); + return 200; +} + +inline int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url) { + uint32_t after = 0; + + while(url) { + char *value = mystrsep(&url, "?&"); + if (!value || !*value) continue; + + char *name = mystrsep(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + if(!strcmp(name, "after")) after = (uint32_t)strtoul(value, NULL, 0); + } + + buffer_flush(w->response.data); + w->response.data->contenttype = CT_APPLICATION_JSON; + health_alarm_log2json(host, w->response.data, after); + return 200; +} + +inline int web_client_api_request_single_chart(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf)) { + int ret = 400; + char *chart = NULL; + + buffer_flush(w->response.data); + + while(url) { + char *value = mystrsep(&url, "?&"); + if(!value || !*value) continue; + + char *name = mystrsep(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "chart")) chart = value; + //else { + /// buffer_sprintf(w->response.data, "Unknown parameter '%s' in request.", name); + // goto cleanup; + //} + } + + if(!chart || !*chart) { + buffer_sprintf(w->response.data, "No chart id is given at the request."); + goto cleanup; + } + + RRDSET *st = rrdset_find(host, chart); + if(!st) st = rrdset_find_byname(host, chart); + if(!st) { + buffer_strcat(w->response.data, "Chart is not found: "); + buffer_strcat_htmlescape(w->response.data, chart); + ret = 404; + goto cleanup; + } + + w->response.data->contenttype = CT_APPLICATION_JSON; + st->last_accessed_time = now_realtime_sec(); + callback(st, w->response.data); + return 200; + + cleanup: + return ret; +} + +inline int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url) { + return web_client_api_request_single_chart(host, w, url, health_api_v1_chart_variables2json); +} + +inline int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w, char *url) { + (void)url; + + buffer_flush(w->response.data); + w->response.data->contenttype = CT_APPLICATION_JSON; + charts2json(host, w->response.data); + return 200; +} + +inline int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w, char *url) { + return web_client_api_request_single_chart(host, w, url, rrd_stats_api_v1_chart); +} + +void fix_google_param(char *s) { + if(unlikely(!s)) return; + + for( ; *s ;s++) { + if(!isalnum(*s) && *s != '.' && *s != '_' && *s != '-') + *s = '_'; + } +} + +// returns the HTTP code +inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url) { + debug(D_WEB_CLIENT, "%llu: API v1 data with URL '%s'", w->id, url); + + int ret = 400; + BUFFER *dimensions = NULL; + + buffer_flush(w->response.data); + + char *google_version = "0.6", + *google_reqId = "0", + *google_sig = "0", + *google_out = "json", + *responseHandler = NULL, + *outFileName = NULL; + + time_t last_timestamp_in_data = 0, google_timestamp = 0; + + char *chart = NULL + , *before_str = NULL + , *after_str = NULL + , *group_time_str = NULL + , *points_str = NULL; + + int group = RRDR_GROUPING_AVERAGE; + uint32_t format = DATASOURCE_JSON; + uint32_t options = 0x00000000; + + while(url) { + char *value = mystrsep(&url, "?&"); + if(!value || !*value) continue; + + char *name = mystrsep(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + debug(D_WEB_CLIENT, "%llu: API v1 data query param '%s' with value '%s'", w->id, name, value); + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "chart")) chart = value; + else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) { + if(!dimensions) dimensions = buffer_create(100); + buffer_strcat(dimensions, "|"); + buffer_strcat(dimensions, value); + } + else if(!strcmp(name, "after")) after_str = value; + else if(!strcmp(name, "before")) before_str = value; + else if(!strcmp(name, "points")) points_str = value; + else if(!strcmp(name, "gtime")) group_time_str = value; + else if(!strcmp(name, "group")) { + group = web_client_api_request_v1_data_group(value, RRDR_GROUPING_AVERAGE); + } + else if(!strcmp(name, "format")) { + format = web_client_api_request_v1_data_format(value); + } + else if(!strcmp(name, "options")) { + options |= web_client_api_request_v1_data_options(value); + } + else if(!strcmp(name, "callback")) { + responseHandler = value; + } + else if(!strcmp(name, "filename")) { + outFileName = value; + } + else if(!strcmp(name, "tqx")) { + // parse Google Visualization API options + // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source + char *tqx_name, *tqx_value; + + while(value) { + tqx_value = mystrsep(&value, ";"); + if(!tqx_value || !*tqx_value) continue; + + tqx_name = mystrsep(&tqx_value, ":"); + if(!tqx_name || !*tqx_name) continue; + if(!tqx_value || !*tqx_value) continue; + + if(!strcmp(tqx_name, "version")) + google_version = tqx_value; + else if(!strcmp(tqx_name, "reqId")) + google_reqId = tqx_value; + else if(!strcmp(tqx_name, "sig")) { + google_sig = tqx_value; + google_timestamp = strtoul(google_sig, NULL, 0); + } + else if(!strcmp(tqx_name, "out")) { + google_out = tqx_value; + format = web_client_api_request_v1_data_google_format(google_out); + } + else if(!strcmp(tqx_name, "responseHandler")) + responseHandler = tqx_value; + else if(!strcmp(tqx_name, "outFileName")) + outFileName = tqx_value; + } + } + } + + // validate the google parameters given + fix_google_param(google_out); + fix_google_param(google_sig); + fix_google_param(google_reqId); + fix_google_param(google_version); + fix_google_param(responseHandler); + fix_google_param(outFileName); + + if(!chart || !*chart) { + buffer_sprintf(w->response.data, "No chart id is given at the request."); + goto cleanup; + } + + RRDSET *st = rrdset_find(host, chart); + if(!st) st = rrdset_find_byname(host, chart); + if(!st) { + buffer_strcat(w->response.data, "Chart is not found: "); + buffer_strcat_htmlescape(w->response.data, chart); + ret = 404; + goto cleanup; + } + st->last_accessed_time = now_realtime_sec(); + + long long before = (before_str && *before_str)?str2l(before_str):0; + long long after = (after_str && *after_str) ?str2l(after_str):0; + int points = (points_str && *points_str)?str2i(points_str):0; + long group_time = (group_time_str && *group_time_str)?str2l(group_time_str):0; + + debug(D_WEB_CLIENT, "%llu: API command 'data' for chart '%s', dimensions '%s', after '%lld', before '%lld', points '%d', group '%d', format '%u', options '0x%08x'" + , w->id + , chart + , (dimensions)?buffer_tostring(dimensions):"" + , after + , before + , points + , group + , format + , options + ); + + if(outFileName && *outFileName) { + buffer_sprintf(w->response.header, "Content-Disposition: attachment; filename=\"%s\"\r\n", outFileName); + debug(D_WEB_CLIENT, "%llu: generating outfilename header: '%s'", w->id, outFileName); + } + + if(format == DATASOURCE_DATATABLE_JSONP) { + if(responseHandler == NULL) + responseHandler = "google.visualization.Query.setResponse"; + + debug(D_WEB_CLIENT_ACCESS, "%llu: GOOGLE JSON/JSONP: version = '%s', reqId = '%s', sig = '%s', out = '%s', responseHandler = '%s', outFileName = '%s'", + w->id, google_version, google_reqId, google_sig, google_out, responseHandler, outFileName + ); + + buffer_sprintf(w->response.data, + "%s({version:'%s',reqId:'%s',status:'ok',sig:'%ld',table:", + responseHandler, google_version, google_reqId, st->last_updated.tv_sec); + } + else if(format == DATASOURCE_JSONP) { + if(responseHandler == NULL) + responseHandler = "callback"; + + buffer_strcat(w->response.data, responseHandler); + buffer_strcat(w->response.data, "("); + } + + ret = rrdset2anything_api_v1(st, w->response.data, dimensions, format, points, after, before, group, group_time + , options, &last_timestamp_in_data); + + if(format == DATASOURCE_DATATABLE_JSONP) { + if(google_timestamp < last_timestamp_in_data) + buffer_strcat(w->response.data, "});"); + + else { + // the client already has the latest data + buffer_flush(w->response.data); + buffer_sprintf(w->response.data, + "%s({version:'%s',reqId:'%s',status:'error',errors:[{reason:'not_modified',message:'Data not modified'}]});", + responseHandler, google_version, google_reqId); + } + } + else if(format == DATASOURCE_JSONP) + buffer_strcat(w->response.data, ");"); + + cleanup: + buffer_free(dimensions); + return ret; +} + +inline int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *w, char *url) { + static uint32_t hash_action = 0, hash_access = 0, hash_hello = 0, hash_delete = 0, hash_search = 0, + hash_switch = 0, hash_machine = 0, hash_url = 0, hash_name = 0, hash_delete_url = 0, hash_for = 0, + hash_to = 0 /*, hash_redirects = 0 */; + + if(unlikely(!hash_action)) { + hash_action = simple_hash("action"); + hash_access = simple_hash("access"); + hash_hello = simple_hash("hello"); + hash_delete = simple_hash("delete"); + hash_search = simple_hash("search"); + hash_switch = simple_hash("switch"); + hash_machine = simple_hash("machine"); + hash_url = simple_hash("url"); + hash_name = simple_hash("name"); + hash_delete_url = simple_hash("delete_url"); + hash_for = simple_hash("for"); + hash_to = simple_hash("to"); +/* + hash_redirects = simple_hash("redirects"); +*/ + } + + char person_guid[GUID_LEN + 1] = ""; + + debug(D_WEB_CLIENT, "%llu: API v1 registry with URL '%s'", w->id, url); + + // TODO + // The browser may send multiple cookies with our id + + char *cookie = strstr(w->response.data->buffer, NETDATA_REGISTRY_COOKIE_NAME "="); + if(cookie) + strncpyz(person_guid, &cookie[sizeof(NETDATA_REGISTRY_COOKIE_NAME)], 36); + + char action = '\0'; + char *machine_guid = NULL, + *machine_url = NULL, + *url_name = NULL, + *search_machine_guid = NULL, + *delete_url = NULL, + *to_person_guid = NULL; +/* + int redirects = 0; +*/ + + while(url) { + char *value = mystrsep(&url, "?&"); + if (!value || !*value) continue; + + char *name = mystrsep(&value, "="); + if (!name || !*name) continue; + if (!value || !*value) continue; + + debug(D_WEB_CLIENT, "%llu: API v1 registry query param '%s' with value '%s'", w->id, name, value); + + uint32_t hash = simple_hash(name); + + if(hash == hash_action && !strcmp(name, "action")) { + uint32_t vhash = simple_hash(value); + + if(vhash == hash_access && !strcmp(value, "access")) action = 'A'; + else if(vhash == hash_hello && !strcmp(value, "hello")) action = 'H'; + else if(vhash == hash_delete && !strcmp(value, "delete")) action = 'D'; + else if(vhash == hash_search && !strcmp(value, "search")) action = 'S'; + else if(vhash == hash_switch && !strcmp(value, "switch")) action = 'W'; +#ifdef NETDATA_INTERNAL_CHECKS + else error("unknown registry action '%s'", value); +#endif /* NETDATA_INTERNAL_CHECKS */ + } +/* + else if(hash == hash_redirects && !strcmp(name, "redirects")) + redirects = atoi(value); +*/ + else if(hash == hash_machine && !strcmp(name, "machine")) + machine_guid = value; + + else if(hash == hash_url && !strcmp(name, "url")) + machine_url = value; + + else if(action == 'A') { + if(hash == hash_name && !strcmp(name, "name")) + url_name = value; + } + else if(action == 'D') { + if(hash == hash_delete_url && !strcmp(name, "delete_url")) + delete_url = value; + } + else if(action == 'S') { + if(hash == hash_for && !strcmp(name, "for")) + search_machine_guid = value; + } + else if(action == 'W') { + if(hash == hash_to && !strcmp(name, "to")) + to_person_guid = value; + } +#ifdef NETDATA_INTERNAL_CHECKS + else error("unused registry URL parameter '%s' with value '%s'", name, value); +#endif /* NETDATA_INTERNAL_CHECKS */ + } + + if(unlikely(respect_web_browser_do_not_track_policy && web_client_has_donottrack(w))) { + buffer_flush(w->response.data); + buffer_sprintf(w->response.data, "Your web browser is sending 'DNT: 1' (Do Not Track). The registry requires persistent cookies on your browser to work."); + return 400; + } + + if(unlikely(action == 'H')) { + // HELLO request, dashboard ACL + if(unlikely(!web_client_can_access_dashboard(w))) + return web_client_permission_denied(w); + } + else { + // everything else, registry ACL + if(unlikely(!web_client_can_access_registry(w))) + return web_client_permission_denied(w); + } + + switch(action) { + case 'A': + if(unlikely(!machine_guid || !machine_url || !url_name)) { + error("Invalid registry request - access requires these parameters: machine ('%s'), url ('%s'), name ('%s')", machine_guid ? machine_guid : "UNSET", machine_url ? machine_url : "UNSET", url_name ? url_name : "UNSET"); + buffer_flush(w->response.data); + buffer_strcat(w->response.data, "Invalid registry Access request."); + return 400; + } + + web_client_enable_tracking_required(w); + return registry_request_access_json(host, w, person_guid, machine_guid, machine_url, url_name, now_realtime_sec()); + + case 'D': + if(unlikely(!machine_guid || !machine_url || !delete_url)) { + error("Invalid registry request - delete requires these parameters: machine ('%s'), url ('%s'), delete_url ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", delete_url?delete_url:"UNSET"); + buffer_flush(w->response.data); + buffer_strcat(w->response.data, "Invalid registry Delete request."); + return 400; + } + + web_client_enable_tracking_required(w); + return registry_request_delete_json(host, w, person_guid, machine_guid, machine_url, delete_url, now_realtime_sec()); + + case 'S': + if(unlikely(!machine_guid || !machine_url || !search_machine_guid)) { + error("Invalid registry request - search requires these parameters: machine ('%s'), url ('%s'), for ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", search_machine_guid?search_machine_guid:"UNSET"); + buffer_flush(w->response.data); + buffer_strcat(w->response.data, "Invalid registry Search request."); + return 400; + } + + web_client_enable_tracking_required(w); + return registry_request_search_json(host, w, person_guid, machine_guid, machine_url, search_machine_guid, now_realtime_sec()); + + case 'W': + if(unlikely(!machine_guid || !machine_url || !to_person_guid)) { + error("Invalid registry request - switching identity requires these parameters: machine ('%s'), url ('%s'), to ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", to_person_guid?to_person_guid:"UNSET"); + buffer_flush(w->response.data); + buffer_strcat(w->response.data, "Invalid registry Switch request."); + return 400; + } + + web_client_enable_tracking_required(w); + return registry_request_switch_json(host, w, person_guid, machine_guid, machine_url, to_person_guid, now_realtime_sec()); + + case 'H': + return registry_request_hello_json(host, w); + + default: + buffer_flush(w->response.data); + buffer_strcat(w->response.data, "Invalid registry request - you need to set an action: hello, access, delete, search"); + return 400; + } +} + +static struct api_command { + const char *command; + uint32_t hash; + WEB_CLIENT_ACL acl; + int (*callback)(RRDHOST *host, struct web_client *w, char *url); +} api_commands[] = { + { "data", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_data }, + { "chart", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_chart }, + { "charts", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_charts }, + + // registry checks the ACL by itself, so we allow everything + { "registry", 0, WEB_CLIENT_ACL_NOCHECK, web_client_api_request_v1_registry }, + + // badges can be fetched with both dashboard and badge permissions + { "badge.svg", 0, WEB_CLIENT_ACL_DASHBOARD|WEB_CLIENT_ACL_BADGE, web_client_api_request_v1_badge }, + + { "alarms", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarms }, + { "alarm_log", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarm_log }, + { "alarm_variables", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarm_variables }, + { "allmetrics", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_allmetrics }, + + // terminator + { NULL, 0, WEB_CLIENT_ACL_NONE, NULL }, +}; + +inline int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url) { + static int initialized = 0; + int i; + + if(unlikely(initialized == 0)) { + initialized = 1; + + for(i = 0; api_commands[i].command ; i++) + api_commands[i].hash = simple_hash(api_commands[i].command); + } + + // get the command + char *tok = mystrsep(&url, "/?&"); + if(tok && *tok) { + debug(D_WEB_CLIENT, "%llu: Searching for API v1 command '%s'.", w->id, tok); + uint32_t hash = simple_hash(tok); + + for(i = 0; api_commands[i].command ;i++) { + if(unlikely(hash == api_commands[i].hash && !strcmp(tok, api_commands[i].command))) { + if(unlikely(api_commands[i].acl != WEB_CLIENT_ACL_NOCHECK) && !(w->acl & api_commands[i].acl)) + return web_client_permission_denied(w); + + return api_commands[i].callback(host, w, url); + } + } + + buffer_flush(w->response.data); + buffer_strcat(w->response.data, "Unsupported v1 API command: "); + buffer_strcat_htmlescape(w->response.data, tok); + return 404; + } + else { + buffer_flush(w->response.data); + buffer_sprintf(w->response.data, "Which API v1 command?"); + return 400; + } +} diff --git a/web/api/web_api_v1.h b/web/api/web_api_v1.h new file mode 100644 index 000000000..b6f315dca --- /dev/null +++ b/web/api/web_api_v1.h @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_WEB_API_V1_H +#define NETDATA_WEB_API_V1_H 1 + +#include "daemon/common.h" +#include "web/api/badges/web_buffer_svg.h" +#include "web/api/formatters/rrd2json.h" + +extern uint32_t web_client_api_request_v1_data_options(char *o); +extern uint32_t web_client_api_request_v1_data_format(char *name); +extern uint32_t web_client_api_request_v1_data_google_format(char *name); + +extern int web_client_api_request_v1_alarms(RRDHOST *host, struct web_client *w, char *url); +extern int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url); +extern int web_client_api_request_single_chart(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf)); +extern int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url); +extern int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w, char *url); +extern int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w, char *url); +extern int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url); +extern int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *w, char *url); +extern int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url); + +extern void web_client_api_v1_init(void); + +#endif //NETDATA_WEB_API_V1_H diff --git a/web/css/bootstrap-3.3.7.css b/web/css/bootstrap-3.3.7.css deleted file mode 100644 index 6167622ce..000000000 --- a/web/css/bootstrap-3.3.7.css +++ /dev/null @@ -1,6757 +0,0 @@ -/*! - * Bootstrap v3.3.7 (http://getbootstrap.com) - * Copyright 2011-2016 Twitter, Inc. - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) - */ -/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */ -html { - font-family: sans-serif; - -webkit-text-size-adjust: 100%; - -ms-text-size-adjust: 100%; -} -body { - margin: 0; -} -article, -aside, -details, -figcaption, -figure, -footer, -header, -hgroup, -main, -menu, -nav, -section, -summary { - display: block; -} -audio, -canvas, -progress, -video { - display: inline-block; - vertical-align: baseline; -} -audio:not([controls]) { - display: none; - height: 0; -} -[hidden], -template { - display: none; -} -a { - background-color: transparent; -} -a:active, -a:hover { - outline: 0; -} -abbr[title] { - border-bottom: 1px dotted; -} -b, -strong { - font-weight: bold; -} -dfn { - font-style: italic; -} -h1 { - margin: .67em 0; - font-size: 2em; -} -mark { - color: #000; - background: #ff0; -} -small { - font-size: 80%; -} -sub, -sup { - position: relative; - font-size: 75%; - line-height: 0; - vertical-align: baseline; -} -sup { - top: -.5em; -} -sub { - bottom: -.25em; -} -img { - border: 0; -} -svg:not(:root) { - overflow: hidden; -} -figure { - margin: 1em 40px; -} -hr { - height: 0; - -webkit-box-sizing: content-box; - -moz-box-sizing: content-box; - box-sizing: content-box; -} -pre { - overflow: auto; -} -code, -kbd, -pre, -samp { - font-family: monospace, monospace; - font-size: 1em; -} -button, -input, -optgroup, -select, -textarea { - margin: 0; - font: inherit; - color: inherit; -} -button { - overflow: visible; -} -button, -select { - text-transform: none; -} -button, -html input[type="button"], -input[type="reset"], -input[type="submit"] { - -webkit-appearance: button; - cursor: pointer; -} -button[disabled], -html input[disabled] { - cursor: default; -} -button::-moz-focus-inner, -input::-moz-focus-inner { - padding: 0; - border: 0; -} -input { - line-height: normal; -} -input[type="checkbox"], -input[type="radio"] { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; - padding: 0; -} -input[type="number"]::-webkit-inner-spin-button, -input[type="number"]::-webkit-outer-spin-button { - height: auto; -} -input[type="search"] { - -webkit-box-sizing: content-box; - -moz-box-sizing: content-box; - box-sizing: content-box; - -webkit-appearance: textfield; -} -input[type="search"]::-webkit-search-cancel-button, -input[type="search"]::-webkit-search-decoration { - -webkit-appearance: none; -} -fieldset { - padding: .35em .625em .75em; - margin: 0 2px; - border: 1px solid #c0c0c0; -} -legend { - padding: 0; - border: 0; -} -textarea { - overflow: auto; -} -optgroup { - font-weight: bold; -} -table { - border-spacing: 0; - border-collapse: collapse; -} -td, -th { - padding: 0; -} -/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */ -@media print { - *, - *:before, - *:after { - color: #000 !important; - text-shadow: none !important; - background: transparent !important; - -webkit-box-shadow: none !important; - box-shadow: none !important; - } - a, - a:visited { - text-decoration: underline; - } - a[href]:after { - content: " (" attr(href) ")"; - } - abbr[title]:after { - content: " (" attr(title) ")"; - } - a[href^="#"]:after, - a[href^="javascript:"]:after { - content: ""; - } - pre, - blockquote { - border: 1px solid #999; - - page-break-inside: avoid; - } - thead { - display: table-header-group; - } - tr, - img { - page-break-inside: avoid; - } - img { - max-width: 100% !important; - } - p, - h2, - h3 { - orphans: 3; - widows: 3; - } - h2, - h3 { - page-break-after: avoid; - } - .navbar { - display: none; - } - .btn > .caret, - .dropup > .btn > .caret { - border-top-color: #000 !important; - } - .label { - border: 1px solid #000; - } - .table { - border-collapse: collapse !important; - } - .table td, - .table th { - background-color: #fff !important; - } - .table-bordered th, - .table-bordered td { - border: 1px solid #ddd !important; - } -} -@font-face { - font-family: 'Glyphicons Halflings'; - - src: url('../fonts/glyphicons-halflings-regular.eot'); - src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff2') format('woff2'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg'); -} -.glyphicon { - position: relative; - top: 1px; - display: inline-block; - font-family: 'Glyphicons Halflings'; - font-style: normal; - font-weight: normal; - line-height: 1; - - -webkit-font-smoothing: antialiased; - -moz-osx-font-smoothing: grayscale; -} -.glyphicon-asterisk:before { - content: "\002a"; -} -.glyphicon-plus:before { - content: "\002b"; -} -.glyphicon-euro:before, -.glyphicon-eur:before { - content: "\20ac"; -} -.glyphicon-minus:before { - content: "\2212"; -} -.glyphicon-cloud:before { - content: "\2601"; -} -.glyphicon-envelope:before { - content: "\2709"; -} -.glyphicon-pencil:before { - content: "\270f"; -} -.glyphicon-glass:before { - content: "\e001"; -} -.glyphicon-music:before { - content: "\e002"; -} -.glyphicon-search:before { - content: "\e003"; -} -.glyphicon-heart:before { - content: "\e005"; -} -.glyphicon-star:before { - content: "\e006"; -} -.glyphicon-star-empty:before { - content: "\e007"; -} -.glyphicon-user:before { - content: "\e008"; -} -.glyphicon-film:before { - content: "\e009"; -} -.glyphicon-th-large:before { - content: "\e010"; -} -.glyphicon-th:before { - content: "\e011"; -} -.glyphicon-th-list:before { - content: "\e012"; -} -.glyphicon-ok:before { - content: "\e013"; -} -.glyphicon-remove:before { - content: "\e014"; -} -.glyphicon-zoom-in:before { - content: "\e015"; -} -.glyphicon-zoom-out:before { - content: "\e016"; -} -.glyphicon-off:before { - content: "\e017"; -} -.glyphicon-signal:before { - content: "\e018"; -} -.glyphicon-cog:before { - content: "\e019"; -} -.glyphicon-trash:before { - content: "\e020"; -} -.glyphicon-home:before { - content: "\e021"; -} -.glyphicon-file:before { - content: "\e022"; -} -.glyphicon-time:before { - content: "\e023"; -} -.glyphicon-road:before { - content: "\e024"; -} -.glyphicon-download-alt:before { - content: "\e025"; -} -.glyphicon-download:before { - content: "\e026"; -} -.glyphicon-upload:before { - content: "\e027"; -} -.glyphicon-inbox:before { - content: "\e028"; -} -.glyphicon-play-circle:before { - content: "\e029"; -} -.glyphicon-repeat:before { - content: "\e030"; -} -.glyphicon-refresh:before { - content: "\e031"; -} -.glyphicon-list-alt:before { - content: "\e032"; -} -.glyphicon-lock:before { - content: "\e033"; -} -.glyphicon-flag:before { - content: "\e034"; -} -.glyphicon-headphones:before { - content: "\e035"; -} -.glyphicon-volume-off:before { - content: "\e036"; -} -.glyphicon-volume-down:before { - content: "\e037"; -} -.glyphicon-volume-up:before { - content: "\e038"; -} -.glyphicon-qrcode:before { - content: "\e039"; -} -.glyphicon-barcode:before { - content: "\e040"; -} -.glyphicon-tag:before { - content: "\e041"; -} -.glyphicon-tags:before { - content: "\e042"; -} -.glyphicon-book:before { - content: "\e043"; -} -.glyphicon-bookmark:before { - content: "\e044"; -} -.glyphicon-print:before { - content: "\e045"; -} -.glyphicon-camera:before { - content: "\e046"; -} -.glyphicon-font:before { - content: "\e047"; -} -.glyphicon-bold:before { - content: "\e048"; -} -.glyphicon-italic:before { - content: "\e049"; -} -.glyphicon-text-height:before { - content: "\e050"; -} -.glyphicon-text-width:before { - content: "\e051"; -} -.glyphicon-align-left:before { - content: "\e052"; -} -.glyphicon-align-center:before { - content: "\e053"; -} -.glyphicon-align-right:before { - content: "\e054"; -} -.glyphicon-align-justify:before { - content: "\e055"; -} -.glyphicon-list:before { - content: "\e056"; -} -.glyphicon-indent-left:before { - content: "\e057"; -} -.glyphicon-indent-right:before { - content: "\e058"; -} -.glyphicon-facetime-video:before { - content: "\e059"; -} -.glyphicon-picture:before { - content: "\e060"; -} -.glyphicon-map-marker:before { - content: "\e062"; -} -.glyphicon-adjust:before { - content: "\e063"; -} -.glyphicon-tint:before { - content: "\e064"; -} -.glyphicon-edit:before { - content: "\e065"; -} -.glyphicon-share:before { - content: "\e066"; -} -.glyphicon-check:before { - content: "\e067"; -} -.glyphicon-move:before { - content: "\e068"; -} -.glyphicon-step-backward:before { - content: "\e069"; -} -.glyphicon-fast-backward:before { - content: "\e070"; -} -.glyphicon-backward:before { - content: "\e071"; -} -.glyphicon-play:before { - content: "\e072"; -} -.glyphicon-pause:before { - content: "\e073"; -} -.glyphicon-stop:before { - content: "\e074"; -} -.glyphicon-forward:before { - content: "\e075"; -} -.glyphicon-fast-forward:before { - content: "\e076"; -} -.glyphicon-step-forward:before { - content: "\e077"; -} -.glyphicon-eject:before { - content: "\e078"; -} -.glyphicon-chevron-left:before { - content: "\e079"; -} -.glyphicon-chevron-right:before { - content: "\e080"; -} -.glyphicon-plus-sign:before { - content: "\e081"; -} -.glyphicon-minus-sign:before { - content: "\e082"; -} -.glyphicon-remove-sign:before { - content: "\e083"; -} -.glyphicon-ok-sign:before { - content: "\e084"; -} -.glyphicon-question-sign:before { - content: "\e085"; -} -.glyphicon-info-sign:before { - content: "\e086"; -} -.glyphicon-screenshot:before { - content: "\e087"; -} -.glyphicon-remove-circle:before { - content: "\e088"; -} -.glyphicon-ok-circle:before { - content: "\e089"; -} -.glyphicon-ban-circle:before { - content: "\e090"; -} -.glyphicon-arrow-left:before { - content: "\e091"; -} -.glyphicon-arrow-right:before { - content: "\e092"; -} -.glyphicon-arrow-up:before { - content: "\e093"; -} -.glyphicon-arrow-down:before { - content: "\e094"; -} -.glyphicon-share-alt:before { - content: "\e095"; -} -.glyphicon-resize-full:before { - content: "\e096"; -} -.glyphicon-resize-small:before { - content: "\e097"; -} -.glyphicon-exclamation-sign:before { - content: "\e101"; -} -.glyphicon-gift:before { - content: "\e102"; -} -.glyphicon-leaf:before { - content: "\e103"; -} -.glyphicon-fire:before { - content: "\e104"; -} -.glyphicon-eye-open:before { - content: "\e105"; -} -.glyphicon-eye-close:before { - content: "\e106"; -} -.glyphicon-warning-sign:before { - content: "\e107"; -} -.glyphicon-plane:before { - content: "\e108"; -} -.glyphicon-calendar:before { - content: "\e109"; -} -.glyphicon-random:before { - content: "\e110"; -} -.glyphicon-comment:before { - content: "\e111"; -} -.glyphicon-magnet:before { - content: "\e112"; -} -.glyphicon-chevron-up:before { - content: "\e113"; -} -.glyphicon-chevron-down:before { - content: "\e114"; -} -.glyphicon-retweet:before { - content: "\e115"; -} -.glyphicon-shopping-cart:before { - content: "\e116"; -} -.glyphicon-folder-close:before { - content: "\e117"; -} -.glyphicon-folder-open:before { - content: "\e118"; -} -.glyphicon-resize-vertical:before { - content: "\e119"; -} -.glyphicon-resize-horizontal:before { - content: "\e120"; -} -.glyphicon-hdd:before { - content: "\e121"; -} -.glyphicon-bullhorn:before { - content: "\e122"; -} -.glyphicon-bell:before { - content: "\e123"; -} -.glyphicon-certificate:before { - content: "\e124"; -} -.glyphicon-thumbs-up:before { - content: "\e125"; -} -.glyphicon-thumbs-down:before { - content: "\e126"; -} -.glyphicon-hand-right:before { - content: "\e127"; -} -.glyphicon-hand-left:before { - content: "\e128"; -} -.glyphicon-hand-up:before { - content: "\e129"; -} -.glyphicon-hand-down:before { - content: "\e130"; -} -.glyphicon-circle-arrow-right:before { - content: "\e131"; -} -.glyphicon-circle-arrow-left:before { - content: "\e132"; -} -.glyphicon-circle-arrow-up:before { - content: "\e133"; -} -.glyphicon-circle-arrow-down:before { - content: "\e134"; -} -.glyphicon-globe:before { - content: "\e135"; -} -.glyphicon-wrench:before { - content: "\e136"; -} -.glyphicon-tasks:before { - content: "\e137"; -} -.glyphicon-filter:before { - content: "\e138"; -} -.glyphicon-briefcase:before { - content: "\e139"; -} -.glyphicon-fullscreen:before { - content: "\e140"; -} -.glyphicon-dashboard:before { - content: "\e141"; -} -.glyphicon-paperclip:before { - content: "\e142"; -} -.glyphicon-heart-empty:before { - content: "\e143"; -} -.glyphicon-link:before { - content: "\e144"; -} -.glyphicon-phone:before { - content: "\e145"; -} -.glyphicon-pushpin:before { - content: "\e146"; -} -.glyphicon-usd:before { - content: "\e148"; -} -.glyphicon-gbp:before { - content: "\e149"; -} -.glyphicon-sort:before { - content: "\e150"; -} -.glyphicon-sort-by-alphabet:before { - content: "\e151"; -} -.glyphicon-sort-by-alphabet-alt:before { - content: "\e152"; -} -.glyphicon-sort-by-order:before { - content: "\e153"; -} -.glyphicon-sort-by-order-alt:before { - content: "\e154"; -} -.glyphicon-sort-by-attributes:before { - content: "\e155"; -} -.glyphicon-sort-by-attributes-alt:before { - content: "\e156"; -} -.glyphicon-unchecked:before { - content: "\e157"; -} -.glyphicon-expand:before { - content: "\e158"; -} -.glyphicon-collapse-down:before { - content: "\e159"; -} -.glyphicon-collapse-up:before { - content: "\e160"; -} -.glyphicon-log-in:before { - content: "\e161"; -} -.glyphicon-flash:before { - content: "\e162"; -} -.glyphicon-log-out:before { - content: "\e163"; -} -.glyphicon-new-window:before { - content: "\e164"; -} -.glyphicon-record:before { - content: "\e165"; -} -.glyphicon-save:before { - content: "\e166"; -} -.glyphicon-open:before { - content: "\e167"; -} -.glyphicon-saved:before { - content: "\e168"; -} -.glyphicon-import:before { - content: "\e169"; -} -.glyphicon-export:before { - content: "\e170"; -} -.glyphicon-send:before { - content: "\e171"; -} -.glyphicon-floppy-disk:before { - content: "\e172"; -} -.glyphicon-floppy-saved:before { - content: "\e173"; -} -.glyphicon-floppy-remove:before { - content: "\e174"; -} -.glyphicon-floppy-save:before { - content: "\e175"; -} -.glyphicon-floppy-open:before { - content: "\e176"; -} -.glyphicon-credit-card:before { - content: "\e177"; -} -.glyphicon-transfer:before { - content: "\e178"; -} -.glyphicon-cutlery:before { - content: "\e179"; -} -.glyphicon-header:before { - content: "\e180"; -} -.glyphicon-compressed:before { - content: "\e181"; -} -.glyphicon-earphone:before { - content: "\e182"; -} -.glyphicon-phone-alt:before { - content: "\e183"; -} -.glyphicon-tower:before { - content: "\e184"; -} -.glyphicon-stats:before { - content: "\e185"; -} -.glyphicon-sd-video:before { - content: "\e186"; -} -.glyphicon-hd-video:before { - content: "\e187"; -} -.glyphicon-subtitles:before { - content: "\e188"; -} -.glyphicon-sound-stereo:before { - content: "\e189"; -} -.glyphicon-sound-dolby:before { - content: "\e190"; -} -.glyphicon-sound-5-1:before { - content: "\e191"; -} -.glyphicon-sound-6-1:before { - content: "\e192"; -} -.glyphicon-sound-7-1:before { - content: "\e193"; -} -.glyphicon-copyright-mark:before { - content: "\e194"; -} -.glyphicon-registration-mark:before { - content: "\e195"; -} -.glyphicon-cloud-download:before { - content: "\e197"; -} -.glyphicon-cloud-upload:before { - content: "\e198"; -} -.glyphicon-tree-conifer:before { - content: "\e199"; -} -.glyphicon-tree-deciduous:before { - content: "\e200"; -} -.glyphicon-cd:before { - content: "\e201"; -} -.glyphicon-save-file:before { - content: "\e202"; -} -.glyphicon-open-file:before { - content: "\e203"; -} -.glyphicon-level-up:before { - content: "\e204"; -} -.glyphicon-copy:before { - content: "\e205"; -} -.glyphicon-paste:before { - content: "\e206"; -} -.glyphicon-alert:before { - content: "\e209"; -} -.glyphicon-equalizer:before { - content: "\e210"; -} -.glyphicon-king:before { - content: "\e211"; -} -.glyphicon-queen:before { - content: "\e212"; -} -.glyphicon-pawn:before { - content: "\e213"; -} -.glyphicon-bishop:before { - content: "\e214"; -} -.glyphicon-knight:before { - content: "\e215"; -} -.glyphicon-baby-formula:before { - content: "\e216"; -} -.glyphicon-tent:before { - content: "\26fa"; -} -.glyphicon-blackboard:before { - content: "\e218"; -} -.glyphicon-bed:before { - content: "\e219"; -} -.glyphicon-apple:before { - content: "\f8ff"; -} -.glyphicon-erase:before { - content: "\e221"; -} -.glyphicon-hourglass:before { - content: "\231b"; -} -.glyphicon-lamp:before { - content: "\e223"; -} -.glyphicon-duplicate:before { - content: "\e224"; -} -.glyphicon-piggy-bank:before { - content: "\e225"; -} -.glyphicon-scissors:before { - content: "\e226"; -} -.glyphicon-bitcoin:before { - content: "\e227"; -} -.glyphicon-btc:before { - content: "\e227"; -} -.glyphicon-xbt:before { - content: "\e227"; -} -.glyphicon-yen:before { - content: "\00a5"; -} -.glyphicon-jpy:before { - content: "\00a5"; -} -.glyphicon-ruble:before { - content: "\20bd"; -} -.glyphicon-rub:before { - content: "\20bd"; -} -.glyphicon-scale:before { - content: "\e230"; -} -.glyphicon-ice-lolly:before { - content: "\e231"; -} -.glyphicon-ice-lolly-tasted:before { - content: "\e232"; -} -.glyphicon-education:before { - content: "\e233"; -} -.glyphicon-option-horizontal:before { - content: "\e234"; -} -.glyphicon-option-vertical:before { - content: "\e235"; -} -.glyphicon-menu-hamburger:before { - content: "\e236"; -} -.glyphicon-modal-window:before { - content: "\e237"; -} -.glyphicon-oil:before { - content: "\e238"; -} -.glyphicon-grain:before { - content: "\e239"; -} -.glyphicon-sunglasses:before { - content: "\e240"; -} -.glyphicon-text-size:before { - content: "\e241"; -} -.glyphicon-text-color:before { - content: "\e242"; -} -.glyphicon-text-background:before { - content: "\e243"; -} -.glyphicon-object-align-top:before { - content: "\e244"; -} -.glyphicon-object-align-bottom:before { - content: "\e245"; -} -.glyphicon-object-align-horizontal:before { - content: "\e246"; -} -.glyphicon-object-align-left:before { - content: "\e247"; -} -.glyphicon-object-align-vertical:before { - content: "\e248"; -} -.glyphicon-object-align-right:before { - content: "\e249"; -} -.glyphicon-triangle-right:before { - content: "\e250"; -} -.glyphicon-triangle-left:before { - content: "\e251"; -} -.glyphicon-triangle-bottom:before { - content: "\e252"; -} -.glyphicon-triangle-top:before { - content: "\e253"; -} -.glyphicon-console:before { - content: "\e254"; -} -.glyphicon-superscript:before { - content: "\e255"; -} -.glyphicon-subscript:before { - content: "\e256"; -} -.glyphicon-menu-left:before { - content: "\e257"; -} -.glyphicon-menu-right:before { - content: "\e258"; -} -.glyphicon-menu-down:before { - content: "\e259"; -} -.glyphicon-menu-up:before { - content: "\e260"; -} -* { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} -*:before, -*:after { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} -html { - font-size: 10px; - - -webkit-tap-highlight-color: rgba(0, 0, 0, 0); -} -body { - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 14px; - line-height: 1.42857143; - color: #333; - background-color: #fff; -} -input, -button, -select, -textarea { - font-family: inherit; - font-size: inherit; - line-height: inherit; -} -a { - color: #337ab7; - text-decoration: none; -} -a:hover, -a:focus { - color: #23527c; - text-decoration: underline; -} -a:focus { - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} -figure { - margin: 0; -} -img { - vertical-align: middle; -} -.img-responsive, -.thumbnail > img, -.thumbnail a > img, -.carousel-inner > .item > img, -.carousel-inner > .item > a > img { - display: block; - max-width: 100%; - height: auto; -} -.img-rounded { - border-radius: 6px; -} -.img-thumbnail { - display: inline-block; - max-width: 100%; - height: auto; - padding: 4px; - line-height: 1.42857143; - background-color: #fff; - border: 1px solid #ddd; - border-radius: 4px; - -webkit-transition: all .2s ease-in-out; - -o-transition: all .2s ease-in-out; - transition: all .2s ease-in-out; -} -.img-circle { - border-radius: 50%; -} -hr { - margin-top: 20px; - margin-bottom: 20px; - border: 0; - border-top: 1px solid #eee; -} -.sr-only { - position: absolute; - width: 1px; - height: 1px; - padding: 0; - margin: -1px; - overflow: hidden; - clip: rect(0, 0, 0, 0); - border: 0; -} -.sr-only-focusable:active, -.sr-only-focusable:focus { - position: static; - width: auto; - height: auto; - margin: 0; - overflow: visible; - clip: auto; -} -[role="button"] { - cursor: pointer; -} -h1, -h2, -h3, -h4, -h5, -h6, -.h1, -.h2, -.h3, -.h4, -.h5, -.h6 { - font-family: inherit; - font-weight: 500; - line-height: 1.1; - color: inherit; -} -h1 small, -h2 small, -h3 small, -h4 small, -h5 small, -h6 small, -.h1 small, -.h2 small, -.h3 small, -.h4 small, -.h5 small, -.h6 small, -h1 .small, -h2 .small, -h3 .small, -h4 .small, -h5 .small, -h6 .small, -.h1 .small, -.h2 .small, -.h3 .small, -.h4 .small, -.h5 .small, -.h6 .small { - font-weight: normal; - line-height: 1; - color: #777; -} -h1, -.h1, -h2, -.h2, -h3, -.h3 { - margin-top: 20px; - margin-bottom: 10px; -} -h1 small, -.h1 small, -h2 small, -.h2 small, -h3 small, -.h3 small, -h1 .small, -.h1 .small, -h2 .small, -.h2 .small, -h3 .small, -.h3 .small { - font-size: 65%; -} -h4, -.h4, -h5, -.h5, -h6, -.h6 { - margin-top: 10px; - margin-bottom: 10px; -} -h4 small, -.h4 small, -h5 small, -.h5 small, -h6 small, -.h6 small, -h4 .small, -.h4 .small, -h5 .small, -.h5 .small, -h6 .small, -.h6 .small { - font-size: 75%; -} -h1, -.h1 { - font-size: 36px; -} -h2, -.h2 { - font-size: 30px; -} -h3, -.h3 { - font-size: 24px; -} -h4, -.h4 { - font-size: 18px; -} -h5, -.h5 { - font-size: 14px; -} -h6, -.h6 { - font-size: 12px; -} -p { - margin: 0 0 10px; -} -.lead { - margin-bottom: 20px; - font-size: 16px; - font-weight: 300; - line-height: 1.4; -} -@media (min-width: 768px) { - .lead { - font-size: 21px; - } -} -small, -.small { - font-size: 85%; -} -mark, -.mark { - padding: .2em; - background-color: #fcf8e3; -} -.text-left { - text-align: left; -} -.text-right { - text-align: right; -} -.text-center { - text-align: center; -} -.text-justify { - text-align: justify; -} -.text-nowrap { - white-space: nowrap; -} -.text-lowercase { - text-transform: lowercase; -} -.text-uppercase { - text-transform: uppercase; -} -.text-capitalize { - text-transform: capitalize; -} -.text-muted { - color: #777; -} -.text-primary { - color: #337ab7; -} -a.text-primary:hover, -a.text-primary:focus { - color: #286090; -} -.text-success { - color: #3c763d; -} -a.text-success:hover, -a.text-success:focus { - color: #2b542c; -} -.text-info { - color: #31708f; -} -a.text-info:hover, -a.text-info:focus { - color: #245269; -} -.text-warning { - color: #8a6d3b; -} -a.text-warning:hover, -a.text-warning:focus { - color: #66512c; -} -.text-danger { - color: #a94442; -} -a.text-danger:hover, -a.text-danger:focus { - color: #843534; -} -.bg-primary { - color: #fff; - background-color: #337ab7; -} -a.bg-primary:hover, -a.bg-primary:focus { - background-color: #286090; -} -.bg-success { - background-color: #dff0d8; -} -a.bg-success:hover, -a.bg-success:focus { - background-color: #c1e2b3; -} -.bg-info { - background-color: #d9edf7; -} -a.bg-info:hover, -a.bg-info:focus { - background-color: #afd9ee; -} -.bg-warning { - background-color: #fcf8e3; -} -a.bg-warning:hover, -a.bg-warning:focus { - background-color: #f7ecb5; -} -.bg-danger { - background-color: #f2dede; -} -a.bg-danger:hover, -a.bg-danger:focus { - background-color: #e4b9b9; -} -.page-header { - padding-bottom: 9px; - margin: 40px 0 20px; - border-bottom: 1px solid #eee; -} -ul, -ol { - margin-top: 0; - margin-bottom: 10px; -} -ul ul, -ol ul, -ul ol, -ol ol { - margin-bottom: 0; -} -.list-unstyled { - padding-left: 0; - list-style: none; -} -.list-inline { - padding-left: 0; - margin-left: -5px; - list-style: none; -} -.list-inline > li { - display: inline-block; - padding-right: 5px; - padding-left: 5px; -} -dl { - margin-top: 0; - margin-bottom: 20px; -} -dt, -dd { - line-height: 1.42857143; -} -dt { - font-weight: bold; -} -dd { - margin-left: 0; -} -@media (min-width: 768px) { - .dl-horizontal dt { - float: left; - width: 160px; - overflow: hidden; - clear: left; - text-align: right; - text-overflow: ellipsis; - white-space: nowrap; - } - .dl-horizontal dd { - margin-left: 180px; - } -} -abbr[title], -abbr[data-original-title] { - cursor: help; - border-bottom: 1px dotted #777; -} -.initialism { - font-size: 90%; - text-transform: uppercase; -} -blockquote { - padding: 10px 20px; - margin: 0 0 20px; - font-size: 17.5px; - border-left: 5px solid #eee; -} -blockquote p:last-child, -blockquote ul:last-child, -blockquote ol:last-child { - margin-bottom: 0; -} -blockquote footer, -blockquote small, -blockquote .small { - display: block; - font-size: 80%; - line-height: 1.42857143; - color: #777; -} -blockquote footer:before, -blockquote small:before, -blockquote .small:before { - content: '\2014 \00A0'; -} -.blockquote-reverse, -blockquote.pull-right { - padding-right: 15px; - padding-left: 0; - text-align: right; - border-right: 5px solid #eee; - border-left: 0; -} -.blockquote-reverse footer:before, -blockquote.pull-right footer:before, -.blockquote-reverse small:before, -blockquote.pull-right small:before, -.blockquote-reverse .small:before, -blockquote.pull-right .small:before { - content: ''; -} -.blockquote-reverse footer:after, -blockquote.pull-right footer:after, -.blockquote-reverse small:after, -blockquote.pull-right small:after, -.blockquote-reverse .small:after, -blockquote.pull-right .small:after { - content: '\00A0 \2014'; -} -address { - margin-bottom: 20px; - font-style: normal; - line-height: 1.42857143; -} -code, -kbd, -pre, -samp { - font-family: Menlo, Monaco, Consolas, "Courier New", monospace; -} -code { - padding: 2px 4px; - font-size: 90%; - color: #c7254e; - background-color: #f9f2f4; - border-radius: 4px; -} -kbd { - padding: 2px 4px; - font-size: 90%; - color: #fff; - background-color: #333; - border-radius: 3px; - -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25); - box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25); -} -kbd kbd { - padding: 0; - font-size: 100%; - font-weight: bold; - -webkit-box-shadow: none; - box-shadow: none; -} -pre { - display: block; - padding: 9.5px; - margin: 0 0 10px; - font-size: 13px; - line-height: 1.42857143; - color: #333; - word-break: break-all; - word-wrap: break-word; - background-color: #f5f5f5; - border: 1px solid #ccc; - border-radius: 4px; -} -pre code { - padding: 0; - font-size: inherit; - color: inherit; - white-space: pre-wrap; - background-color: transparent; - border-radius: 0; -} -.pre-scrollable { - max-height: 340px; - overflow-y: scroll; -} -.container { - padding-right: 15px; - padding-left: 15px; - margin-right: auto; - margin-left: auto; -} -@media (min-width: 768px) { - .container { - width: 750px; - } -} -@media (min-width: 992px) { - .container { - width: 970px; - } -} -@media (min-width: 1200px) { - .container { - width: 1170px; - } -} -.container-fluid { - padding-right: 15px; - padding-left: 15px; - margin-right: auto; - margin-left: auto; -} -.row { - margin-right: -15px; - margin-left: -15px; -} -.col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-lg-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11, .col-xs-12, .col-sm-12, .col-md-12, .col-lg-12 { - position: relative; - min-height: 1px; - padding-right: 15px; - padding-left: 15px; -} -.col-xs-1, .col-xs-2, .col-xs-3, .col-xs-4, .col-xs-5, .col-xs-6, .col-xs-7, .col-xs-8, .col-xs-9, .col-xs-10, .col-xs-11, .col-xs-12 { - float: left; -} -.col-xs-12 { - width: 100%; -} -.col-xs-11 { - width: 91.66666667%; -} -.col-xs-10 { - width: 83.33333333%; -} -.col-xs-9 { - width: 75%; -} -.col-xs-8 { - width: 66.66666667%; -} -.col-xs-7 { - width: 58.33333333%; -} -.col-xs-6 { - width: 50%; -} -.col-xs-5 { - width: 41.66666667%; -} -.col-xs-4 { - width: 33.33333333%; -} -.col-xs-3 { - width: 25%; -} -.col-xs-2 { - width: 16.66666667%; -} -.col-xs-1 { - width: 8.33333333%; -} -.col-xs-pull-12 { - right: 100%; -} -.col-xs-pull-11 { - right: 91.66666667%; -} -.col-xs-pull-10 { - right: 83.33333333%; -} -.col-xs-pull-9 { - right: 75%; -} -.col-xs-pull-8 { - right: 66.66666667%; -} -.col-xs-pull-7 { - right: 58.33333333%; -} -.col-xs-pull-6 { - right: 50%; -} -.col-xs-pull-5 { - right: 41.66666667%; -} -.col-xs-pull-4 { - right: 33.33333333%; -} -.col-xs-pull-3 { - right: 25%; -} -.col-xs-pull-2 { - right: 16.66666667%; -} -.col-xs-pull-1 { - right: 8.33333333%; -} -.col-xs-pull-0 { - right: auto; -} -.col-xs-push-12 { - left: 100%; -} -.col-xs-push-11 { - left: 91.66666667%; -} -.col-xs-push-10 { - left: 83.33333333%; -} -.col-xs-push-9 { - left: 75%; -} -.col-xs-push-8 { - left: 66.66666667%; -} -.col-xs-push-7 { - left: 58.33333333%; -} -.col-xs-push-6 { - left: 50%; -} -.col-xs-push-5 { - left: 41.66666667%; -} -.col-xs-push-4 { - left: 33.33333333%; -} -.col-xs-push-3 { - left: 25%; -} -.col-xs-push-2 { - left: 16.66666667%; -} -.col-xs-push-1 { - left: 8.33333333%; -} -.col-xs-push-0 { - left: auto; -} -.col-xs-offset-12 { - margin-left: 100%; -} -.col-xs-offset-11 { - margin-left: 91.66666667%; -} -.col-xs-offset-10 { - margin-left: 83.33333333%; -} -.col-xs-offset-9 { - margin-left: 75%; -} -.col-xs-offset-8 { - margin-left: 66.66666667%; -} -.col-xs-offset-7 { - margin-left: 58.33333333%; -} -.col-xs-offset-6 { - margin-left: 50%; -} -.col-xs-offset-5 { - margin-left: 41.66666667%; -} -.col-xs-offset-4 { - margin-left: 33.33333333%; -} -.col-xs-offset-3 { - margin-left: 25%; -} -.col-xs-offset-2 { - margin-left: 16.66666667%; -} -.col-xs-offset-1 { - margin-left: 8.33333333%; -} -.col-xs-offset-0 { - margin-left: 0; -} -@media (min-width: 768px) { - .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12 { - float: left; - } - .col-sm-12 { - width: 100%; - } - .col-sm-11 { - width: 91.66666667%; - } - .col-sm-10 { - width: 83.33333333%; - } - .col-sm-9 { - width: 75%; - } - .col-sm-8 { - width: 66.66666667%; - } - .col-sm-7 { - width: 58.33333333%; - } - .col-sm-6 { - width: 50%; - } - .col-sm-5 { - width: 41.66666667%; - } - .col-sm-4 { - width: 33.33333333%; - } - .col-sm-3 { - width: 25%; - } - .col-sm-2 { - width: 16.66666667%; - } - .col-sm-1 { - width: 8.33333333%; - } - .col-sm-pull-12 { - right: 100%; - } - .col-sm-pull-11 { - right: 91.66666667%; - } - .col-sm-pull-10 { - right: 83.33333333%; - } - .col-sm-pull-9 { - right: 75%; - } - .col-sm-pull-8 { - right: 66.66666667%; - } - .col-sm-pull-7 { - right: 58.33333333%; - } - .col-sm-pull-6 { - right: 50%; - } - .col-sm-pull-5 { - right: 41.66666667%; - } - .col-sm-pull-4 { - right: 33.33333333%; - } - .col-sm-pull-3 { - right: 25%; - } - .col-sm-pull-2 { - right: 16.66666667%; - } - .col-sm-pull-1 { - right: 8.33333333%; - } - .col-sm-pull-0 { - right: auto; - } - .col-sm-push-12 { - left: 100%; - } - .col-sm-push-11 { - left: 91.66666667%; - } - .col-sm-push-10 { - left: 83.33333333%; - } - .col-sm-push-9 { - left: 75%; - } - .col-sm-push-8 { - left: 66.66666667%; - } - .col-sm-push-7 { - left: 58.33333333%; - } - .col-sm-push-6 { - left: 50%; - } - .col-sm-push-5 { - left: 41.66666667%; - } - .col-sm-push-4 { - left: 33.33333333%; - } - .col-sm-push-3 { - left: 25%; - } - .col-sm-push-2 { - left: 16.66666667%; - } - .col-sm-push-1 { - left: 8.33333333%; - } - .col-sm-push-0 { - left: auto; - } - .col-sm-offset-12 { - margin-left: 100%; - } - .col-sm-offset-11 { - margin-left: 91.66666667%; - } - .col-sm-offset-10 { - margin-left: 83.33333333%; - } - .col-sm-offset-9 { - margin-left: 75%; - } - .col-sm-offset-8 { - margin-left: 66.66666667%; - } - .col-sm-offset-7 { - margin-left: 58.33333333%; - } - .col-sm-offset-6 { - margin-left: 50%; - } - .col-sm-offset-5 { - margin-left: 41.66666667%; - } - .col-sm-offset-4 { - margin-left: 33.33333333%; - } - .col-sm-offset-3 { - margin-left: 25%; - } - .col-sm-offset-2 { - margin-left: 16.66666667%; - } - .col-sm-offset-1 { - margin-left: 8.33333333%; - } - .col-sm-offset-0 { - margin-left: 0; - } -} -@media (min-width: 992px) { - .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12 { - float: left; - } - .col-md-12 { - width: 100%; - } - .col-md-11 { - width: 91.66666667%; - } - .col-md-10 { - width: 83.33333333%; - } - .col-md-9 { - width: 75%; - } - .col-md-8 { - width: 66.66666667%; - } - .col-md-7 { - width: 58.33333333%; - } - .col-md-6 { - width: 50%; - } - .col-md-5 { - width: 41.66666667%; - } - .col-md-4 { - width: 33.33333333%; - } - .col-md-3 { - width: 25%; - } - .col-md-2 { - width: 16.66666667%; - } - .col-md-1 { - width: 8.33333333%; - } - .col-md-pull-12 { - right: 100%; - } - .col-md-pull-11 { - right: 91.66666667%; - } - .col-md-pull-10 { - right: 83.33333333%; - } - .col-md-pull-9 { - right: 75%; - } - .col-md-pull-8 { - right: 66.66666667%; - } - .col-md-pull-7 { - right: 58.33333333%; - } - .col-md-pull-6 { - right: 50%; - } - .col-md-pull-5 { - right: 41.66666667%; - } - .col-md-pull-4 { - right: 33.33333333%; - } - .col-md-pull-3 { - right: 25%; - } - .col-md-pull-2 { - right: 16.66666667%; - } - .col-md-pull-1 { - right: 8.33333333%; - } - .col-md-pull-0 { - right: auto; - } - .col-md-push-12 { - left: 100%; - } - .col-md-push-11 { - left: 91.66666667%; - } - .col-md-push-10 { - left: 83.33333333%; - } - .col-md-push-9 { - left: 75%; - } - .col-md-push-8 { - left: 66.66666667%; - } - .col-md-push-7 { - left: 58.33333333%; - } - .col-md-push-6 { - left: 50%; - } - .col-md-push-5 { - left: 41.66666667%; - } - .col-md-push-4 { - left: 33.33333333%; - } - .col-md-push-3 { - left: 25%; - } - .col-md-push-2 { - left: 16.66666667%; - } - .col-md-push-1 { - left: 8.33333333%; - } - .col-md-push-0 { - left: auto; - } - .col-md-offset-12 { - margin-left: 100%; - } - .col-md-offset-11 { - margin-left: 91.66666667%; - } - .col-md-offset-10 { - margin-left: 83.33333333%; - } - .col-md-offset-9 { - margin-left: 75%; - } - .col-md-offset-8 { - margin-left: 66.66666667%; - } - .col-md-offset-7 { - margin-left: 58.33333333%; - } - .col-md-offset-6 { - margin-left: 50%; - } - .col-md-offset-5 { - margin-left: 41.66666667%; - } - .col-md-offset-4 { - margin-left: 33.33333333%; - } - .col-md-offset-3 { - margin-left: 25%; - } - .col-md-offset-2 { - margin-left: 16.66666667%; - } - .col-md-offset-1 { - margin-left: 8.33333333%; - } - .col-md-offset-0 { - margin-left: 0; - } -} -@media (min-width: 1200px) { - .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12 { - float: left; - } - .col-lg-12 { - width: 100%; - } - .col-lg-11 { - width: 91.66666667%; - } - .col-lg-10 { - width: 83.33333333%; - } - .col-lg-9 { - width: 75%; - } - .col-lg-8 { - width: 66.66666667%; - } - .col-lg-7 { - width: 58.33333333%; - } - .col-lg-6 { - width: 50%; - } - .col-lg-5 { - width: 41.66666667%; - } - .col-lg-4 { - width: 33.33333333%; - } - .col-lg-3 { - width: 25%; - } - .col-lg-2 { - width: 16.66666667%; - } - .col-lg-1 { - width: 8.33333333%; - } - .col-lg-pull-12 { - right: 100%; - } - .col-lg-pull-11 { - right: 91.66666667%; - } - .col-lg-pull-10 { - right: 83.33333333%; - } - .col-lg-pull-9 { - right: 75%; - } - .col-lg-pull-8 { - right: 66.66666667%; - } - .col-lg-pull-7 { - right: 58.33333333%; - } - .col-lg-pull-6 { - right: 50%; - } - .col-lg-pull-5 { - right: 41.66666667%; - } - .col-lg-pull-4 { - right: 33.33333333%; - } - .col-lg-pull-3 { - right: 25%; - } - .col-lg-pull-2 { - right: 16.66666667%; - } - .col-lg-pull-1 { - right: 8.33333333%; - } - .col-lg-pull-0 { - right: auto; - } - .col-lg-push-12 { - left: 100%; - } - .col-lg-push-11 { - left: 91.66666667%; - } - .col-lg-push-10 { - left: 83.33333333%; - } - .col-lg-push-9 { - left: 75%; - } - .col-lg-push-8 { - left: 66.66666667%; - } - .col-lg-push-7 { - left: 58.33333333%; - } - .col-lg-push-6 { - left: 50%; - } - .col-lg-push-5 { - left: 41.66666667%; - } - .col-lg-push-4 { - left: 33.33333333%; - } - .col-lg-push-3 { - left: 25%; - } - .col-lg-push-2 { - left: 16.66666667%; - } - .col-lg-push-1 { - left: 8.33333333%; - } - .col-lg-push-0 { - left: auto; - } - .col-lg-offset-12 { - margin-left: 100%; - } - .col-lg-offset-11 { - margin-left: 91.66666667%; - } - .col-lg-offset-10 { - margin-left: 83.33333333%; - } - .col-lg-offset-9 { - margin-left: 75%; - } - .col-lg-offset-8 { - margin-left: 66.66666667%; - } - .col-lg-offset-7 { - margin-left: 58.33333333%; - } - .col-lg-offset-6 { - margin-left: 50%; - } - .col-lg-offset-5 { - margin-left: 41.66666667%; - } - .col-lg-offset-4 { - margin-left: 33.33333333%; - } - .col-lg-offset-3 { - margin-left: 25%; - } - .col-lg-offset-2 { - margin-left: 16.66666667%; - } - .col-lg-offset-1 { - margin-left: 8.33333333%; - } - .col-lg-offset-0 { - margin-left: 0; - } -} -table { - background-color: transparent; -} -caption { - padding-top: 8px; - padding-bottom: 8px; - color: #777; - text-align: left; -} -th { - text-align: left; -} -.table { - width: 100%; - max-width: 100%; - margin-bottom: 20px; -} -.table > thead > tr > th, -.table > tbody > tr > th, -.table > tfoot > tr > th, -.table > thead > tr > td, -.table > tbody > tr > td, -.table > tfoot > tr > td { - padding: 8px; - line-height: 1.42857143; - vertical-align: top; - border-top: 1px solid #ddd; -} -.table > thead > tr > th { - vertical-align: bottom; - border-bottom: 2px solid #ddd; -} -.table > caption + thead > tr:first-child > th, -.table > colgroup + thead > tr:first-child > th, -.table > thead:first-child > tr:first-child > th, -.table > caption + thead > tr:first-child > td, -.table > colgroup + thead > tr:first-child > td, -.table > thead:first-child > tr:first-child > td { - border-top: 0; -} -.table > tbody + tbody { - border-top: 2px solid #ddd; -} -.table .table { - background-color: #fff; -} -.table-condensed > thead > tr > th, -.table-condensed > tbody > tr > th, -.table-condensed > tfoot > tr > th, -.table-condensed > thead > tr > td, -.table-condensed > tbody > tr > td, -.table-condensed > tfoot > tr > td { - padding: 5px; -} -.table-bordered { - border: 1px solid #ddd; -} -.table-bordered > thead > tr > th, -.table-bordered > tbody > tr > th, -.table-bordered > tfoot > tr > th, -.table-bordered > thead > tr > td, -.table-bordered > tbody > tr > td, -.table-bordered > tfoot > tr > td { - border: 1px solid #ddd; -} -.table-bordered > thead > tr > th, -.table-bordered > thead > tr > td { - border-bottom-width: 2px; -} -.table-striped > tbody > tr:nth-of-type(odd) { - background-color: #f9f9f9; -} -.table-hover > tbody > tr:hover { - background-color: #f5f5f5; -} -table col[class*="col-"] { - position: static; - display: table-column; - float: none; -} -table td[class*="col-"], -table th[class*="col-"] { - position: static; - display: table-cell; - float: none; -} -.table > thead > tr > td.active, -.table > tbody > tr > td.active, -.table > tfoot > tr > td.active, -.table > thead > tr > th.active, -.table > tbody > tr > th.active, -.table > tfoot > tr > th.active, -.table > thead > tr.active > td, -.table > tbody > tr.active > td, -.table > tfoot > tr.active > td, -.table > thead > tr.active > th, -.table > tbody > tr.active > th, -.table > tfoot > tr.active > th { - background-color: #f5f5f5; -} -.table-hover > tbody > tr > td.active:hover, -.table-hover > tbody > tr > th.active:hover, -.table-hover > tbody > tr.active:hover > td, -.table-hover > tbody > tr:hover > .active, -.table-hover > tbody > tr.active:hover > th { - background-color: #e8e8e8; -} -.table > thead > tr > td.success, -.table > tbody > tr > td.success, -.table > tfoot > tr > td.success, -.table > thead > tr > th.success, -.table > tbody > tr > th.success, -.table > tfoot > tr > th.success, -.table > thead > tr.success > td, -.table > tbody > tr.success > td, -.table > tfoot > tr.success > td, -.table > thead > tr.success > th, -.table > tbody > tr.success > th, -.table > tfoot > tr.success > th { - background-color: #dff0d8; -} -.table-hover > tbody > tr > td.success:hover, -.table-hover > tbody > tr > th.success:hover, -.table-hover > tbody > tr.success:hover > td, -.table-hover > tbody > tr:hover > .success, -.table-hover > tbody > tr.success:hover > th { - background-color: #d0e9c6; -} -.table > thead > tr > td.info, -.table > tbody > tr > td.info, -.table > tfoot > tr > td.info, -.table > thead > tr > th.info, -.table > tbody > tr > th.info, -.table > tfoot > tr > th.info, -.table > thead > tr.info > td, -.table > tbody > tr.info > td, -.table > tfoot > tr.info > td, -.table > thead > tr.info > th, -.table > tbody > tr.info > th, -.table > tfoot > tr.info > th { - background-color: #d9edf7; -} -.table-hover > tbody > tr > td.info:hover, -.table-hover > tbody > tr > th.info:hover, -.table-hover > tbody > tr.info:hover > td, -.table-hover > tbody > tr:hover > .info, -.table-hover > tbody > tr.info:hover > th { - background-color: #c4e3f3; -} -.table > thead > tr > td.warning, -.table > tbody > tr > td.warning, -.table > tfoot > tr > td.warning, -.table > thead > tr > th.warning, -.table > tbody > tr > th.warning, -.table > tfoot > tr > th.warning, -.table > thead > tr.warning > td, -.table > tbody > tr.warning > td, -.table > tfoot > tr.warning > td, -.table > thead > tr.warning > th, -.table > tbody > tr.warning > th, -.table > tfoot > tr.warning > th { - background-color: #fcf8e3; -} -.table-hover > tbody > tr > td.warning:hover, -.table-hover > tbody > tr > th.warning:hover, -.table-hover > tbody > tr.warning:hover > td, -.table-hover > tbody > tr:hover > .warning, -.table-hover > tbody > tr.warning:hover > th { - background-color: #faf2cc; -} -.table > thead > tr > td.danger, -.table > tbody > tr > td.danger, -.table > tfoot > tr > td.danger, -.table > thead > tr > th.danger, -.table > tbody > tr > th.danger, -.table > tfoot > tr > th.danger, -.table > thead > tr.danger > td, -.table > tbody > tr.danger > td, -.table > tfoot > tr.danger > td, -.table > thead > tr.danger > th, -.table > tbody > tr.danger > th, -.table > tfoot > tr.danger > th { - background-color: #f2dede; -} -.table-hover > tbody > tr > td.danger:hover, -.table-hover > tbody > tr > th.danger:hover, -.table-hover > tbody > tr.danger:hover > td, -.table-hover > tbody > tr:hover > .danger, -.table-hover > tbody > tr.danger:hover > th { - background-color: #ebcccc; -} -.table-responsive { - min-height: .01%; - overflow-x: auto; -} -@media screen and (max-width: 767px) { - .table-responsive { - width: 100%; - margin-bottom: 15px; - overflow-y: hidden; - -ms-overflow-style: -ms-autohiding-scrollbar; - border: 1px solid #ddd; - } - .table-responsive > .table { - margin-bottom: 0; - } - .table-responsive > .table > thead > tr > th, - .table-responsive > .table > tbody > tr > th, - .table-responsive > .table > tfoot > tr > th, - .table-responsive > .table > thead > tr > td, - .table-responsive > .table > tbody > tr > td, - .table-responsive > .table > tfoot > tr > td { - white-space: nowrap; - } - .table-responsive > .table-bordered { - border: 0; - } - .table-responsive > .table-bordered > thead > tr > th:first-child, - .table-responsive > .table-bordered > tbody > tr > th:first-child, - .table-responsive > .table-bordered > tfoot > tr > th:first-child, - .table-responsive > .table-bordered > thead > tr > td:first-child, - .table-responsive > .table-bordered > tbody > tr > td:first-child, - .table-responsive > .table-bordered > tfoot > tr > td:first-child { - border-left: 0; - } - .table-responsive > .table-bordered > thead > tr > th:last-child, - .table-responsive > .table-bordered > tbody > tr > th:last-child, - .table-responsive > .table-bordered > tfoot > tr > th:last-child, - .table-responsive > .table-bordered > thead > tr > td:last-child, - .table-responsive > .table-bordered > tbody > tr > td:last-child, - .table-responsive > .table-bordered > tfoot > tr > td:last-child { - border-right: 0; - } - .table-responsive > .table-bordered > tbody > tr:last-child > th, - .table-responsive > .table-bordered > tfoot > tr:last-child > th, - .table-responsive > .table-bordered > tbody > tr:last-child > td, - .table-responsive > .table-bordered > tfoot > tr:last-child > td { - border-bottom: 0; - } -} -fieldset { - min-width: 0; - padding: 0; - margin: 0; - border: 0; -} -legend { - display: block; - width: 100%; - padding: 0; - margin-bottom: 20px; - font-size: 21px; - line-height: inherit; - color: #333; - border: 0; - border-bottom: 1px solid #e5e5e5; -} -label { - display: inline-block; - max-width: 100%; - margin-bottom: 5px; - font-weight: bold; -} -input[type="search"] { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} -input[type="radio"], -input[type="checkbox"] { - margin: 4px 0 0; - margin-top: 1px \9; - line-height: normal; -} -input[type="file"] { - display: block; -} -input[type="range"] { - display: block; - width: 100%; -} -select[multiple], -select[size] { - height: auto; -} -input[type="file"]:focus, -input[type="radio"]:focus, -input[type="checkbox"]:focus { - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} -output { - display: block; - padding-top: 7px; - font-size: 14px; - line-height: 1.42857143; - color: #555; -} -.form-control { - display: block; - width: 100%; - height: 34px; - padding: 6px 12px; - font-size: 14px; - line-height: 1.42857143; - color: #555; - background-color: #fff; - background-image: none; - border: 1px solid #ccc; - border-radius: 4px; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); - -webkit-transition: border-color ease-in-out .15s, -webkit-box-shadow ease-in-out .15s; - -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s; - transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s; -} -.form-control:focus { - border-color: #66afe9; - outline: 0; - -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6); - box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6); -} -.form-control::-moz-placeholder { - color: #999; - opacity: 1; -} -.form-control:-ms-input-placeholder { - color: #999; -} -.form-control::-webkit-input-placeholder { - color: #999; -} -.form-control::-ms-expand { - background-color: transparent; - border: 0; -} -.form-control[disabled], -.form-control[readonly], -fieldset[disabled] .form-control { - background-color: #eee; - opacity: 1; -} -.form-control[disabled], -fieldset[disabled] .form-control { - cursor: not-allowed; -} -textarea.form-control { - height: auto; -} -input[type="search"] { - -webkit-appearance: none; -} -@media screen and (-webkit-min-device-pixel-ratio: 0) { - input[type="date"].form-control, - input[type="time"].form-control, - input[type="datetime-local"].form-control, - input[type="month"].form-control { - line-height: 34px; - } - input[type="date"].input-sm, - input[type="time"].input-sm, - input[type="datetime-local"].input-sm, - input[type="month"].input-sm, - .input-group-sm input[type="date"], - .input-group-sm input[type="time"], - .input-group-sm input[type="datetime-local"], - .input-group-sm input[type="month"] { - line-height: 30px; - } - input[type="date"].input-lg, - input[type="time"].input-lg, - input[type="datetime-local"].input-lg, - input[type="month"].input-lg, - .input-group-lg input[type="date"], - .input-group-lg input[type="time"], - .input-group-lg input[type="datetime-local"], - .input-group-lg input[type="month"] { - line-height: 46px; - } -} -.form-group { - margin-bottom: 15px; -} -.radio, -.checkbox { - position: relative; - display: block; - margin-top: 10px; - margin-bottom: 10px; -} -.radio label, -.checkbox label { - min-height: 20px; - padding-left: 20px; - margin-bottom: 0; - font-weight: normal; - cursor: pointer; -} -.radio input[type="radio"], -.radio-inline input[type="radio"], -.checkbox input[type="checkbox"], -.checkbox-inline input[type="checkbox"] { - position: absolute; - margin-top: 4px \9; - margin-left: -20px; -} -.radio + .radio, -.checkbox + .checkbox { - margin-top: -5px; -} -.radio-inline, -.checkbox-inline { - position: relative; - display: inline-block; - padding-left: 20px; - margin-bottom: 0; - font-weight: normal; - vertical-align: middle; - cursor: pointer; -} -.radio-inline + .radio-inline, -.checkbox-inline + .checkbox-inline { - margin-top: 0; - margin-left: 10px; -} -input[type="radio"][disabled], -input[type="checkbox"][disabled], -input[type="radio"].disabled, -input[type="checkbox"].disabled, -fieldset[disabled] input[type="radio"], -fieldset[disabled] input[type="checkbox"] { - cursor: not-allowed; -} -.radio-inline.disabled, -.checkbox-inline.disabled, -fieldset[disabled] .radio-inline, -fieldset[disabled] .checkbox-inline { - cursor: not-allowed; -} -.radio.disabled label, -.checkbox.disabled label, -fieldset[disabled] .radio label, -fieldset[disabled] .checkbox label { - cursor: not-allowed; -} -.form-control-static { - min-height: 34px; - padding-top: 7px; - padding-bottom: 7px; - margin-bottom: 0; -} -.form-control-static.input-lg, -.form-control-static.input-sm { - padding-right: 0; - padding-left: 0; -} -.input-sm { - height: 30px; - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} -select.input-sm { - height: 30px; - line-height: 30px; -} -textarea.input-sm, -select[multiple].input-sm { - height: auto; -} -.form-group-sm .form-control { - height: 30px; - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} -.form-group-sm select.form-control { - height: 30px; - line-height: 30px; -} -.form-group-sm textarea.form-control, -.form-group-sm select[multiple].form-control { - height: auto; -} -.form-group-sm .form-control-static { - height: 30px; - min-height: 32px; - padding: 6px 10px; - font-size: 12px; - line-height: 1.5; -} -.input-lg { - height: 46px; - padding: 10px 16px; - font-size: 18px; - line-height: 1.3333333; - border-radius: 6px; -} -select.input-lg { - height: 46px; - line-height: 46px; -} -textarea.input-lg, -select[multiple].input-lg { - height: auto; -} -.form-group-lg .form-control { - height: 46px; - padding: 10px 16px; - font-size: 18px; - line-height: 1.3333333; - border-radius: 6px; -} -.form-group-lg select.form-control { - height: 46px; - line-height: 46px; -} -.form-group-lg textarea.form-control, -.form-group-lg select[multiple].form-control { - height: auto; -} -.form-group-lg .form-control-static { - height: 46px; - min-height: 38px; - padding: 11px 16px; - font-size: 18px; - line-height: 1.3333333; -} -.has-feedback { - position: relative; -} -.has-feedback .form-control { - padding-right: 42.5px; -} -.form-control-feedback { - position: absolute; - top: 0; - right: 0; - z-index: 2; - display: block; - width: 34px; - height: 34px; - line-height: 34px; - text-align: center; - pointer-events: none; -} -.input-lg + .form-control-feedback, -.input-group-lg + .form-control-feedback, -.form-group-lg .form-control + .form-control-feedback { - width: 46px; - height: 46px; - line-height: 46px; -} -.input-sm + .form-control-feedback, -.input-group-sm + .form-control-feedback, -.form-group-sm .form-control + .form-control-feedback { - width: 30px; - height: 30px; - line-height: 30px; -} -.has-success .help-block, -.has-success .control-label, -.has-success .radio, -.has-success .checkbox, -.has-success .radio-inline, -.has-success .checkbox-inline, -.has-success.radio label, -.has-success.checkbox label, -.has-success.radio-inline label, -.has-success.checkbox-inline label { - color: #3c763d; -} -.has-success .form-control { - border-color: #3c763d; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); -} -.has-success .form-control:focus { - border-color: #2b542c; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168; -} -.has-success .input-group-addon { - color: #3c763d; - background-color: #dff0d8; - border-color: #3c763d; -} -.has-success .form-control-feedback { - color: #3c763d; -} -.has-warning .help-block, -.has-warning .control-label, -.has-warning .radio, -.has-warning .checkbox, -.has-warning .radio-inline, -.has-warning .checkbox-inline, -.has-warning.radio label, -.has-warning.checkbox label, -.has-warning.radio-inline label, -.has-warning.checkbox-inline label { - color: #8a6d3b; -} -.has-warning .form-control { - border-color: #8a6d3b; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); -} -.has-warning .form-control:focus { - border-color: #66512c; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b; -} -.has-warning .input-group-addon { - color: #8a6d3b; - background-color: #fcf8e3; - border-color: #8a6d3b; -} -.has-warning .form-control-feedback { - color: #8a6d3b; -} -.has-error .help-block, -.has-error .control-label, -.has-error .radio, -.has-error .checkbox, -.has-error .radio-inline, -.has-error .checkbox-inline, -.has-error.radio label, -.has-error.checkbox label, -.has-error.radio-inline label, -.has-error.checkbox-inline label { - color: #a94442; -} -.has-error .form-control { - border-color: #a94442; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); -} -.has-error .form-control:focus { - border-color: #843534; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483; -} -.has-error .input-group-addon { - color: #a94442; - background-color: #f2dede; - border-color: #a94442; -} -.has-error .form-control-feedback { - color: #a94442; -} -.has-feedback label ~ .form-control-feedback { - top: 25px; -} -.has-feedback label.sr-only ~ .form-control-feedback { - top: 0; -} -.help-block { - display: block; - margin-top: 5px; - margin-bottom: 10px; - color: #737373; -} -@media (min-width: 768px) { - .form-inline .form-group { - display: inline-block; - margin-bottom: 0; - vertical-align: middle; - } - .form-inline .form-control { - display: inline-block; - width: auto; - vertical-align: middle; - } - .form-inline .form-control-static { - display: inline-block; - } - .form-inline .input-group { - display: inline-table; - vertical-align: middle; - } - .form-inline .input-group .input-group-addon, - .form-inline .input-group .input-group-btn, - .form-inline .input-group .form-control { - width: auto; - } - .form-inline .input-group > .form-control { - width: 100%; - } - .form-inline .control-label { - margin-bottom: 0; - vertical-align: middle; - } - .form-inline .radio, - .form-inline .checkbox { - display: inline-block; - margin-top: 0; - margin-bottom: 0; - vertical-align: middle; - } - .form-inline .radio label, - .form-inline .checkbox label { - padding-left: 0; - } - .form-inline .radio input[type="radio"], - .form-inline .checkbox input[type="checkbox"] { - position: relative; - margin-left: 0; - } - .form-inline .has-feedback .form-control-feedback { - top: 0; - } -} -.form-horizontal .radio, -.form-horizontal .checkbox, -.form-horizontal .radio-inline, -.form-horizontal .checkbox-inline { - padding-top: 7px; - margin-top: 0; - margin-bottom: 0; -} -.form-horizontal .radio, -.form-horizontal .checkbox { - min-height: 27px; -} -.form-horizontal .form-group { - margin-right: -15px; - margin-left: -15px; -} -@media (min-width: 768px) { - .form-horizontal .control-label { - padding-top: 7px; - margin-bottom: 0; - text-align: right; - } -} -.form-horizontal .has-feedback .form-control-feedback { - right: 15px; -} -@media (min-width: 768px) { - .form-horizontal .form-group-lg .control-label { - padding-top: 11px; - font-size: 18px; - } -} -@media (min-width: 768px) { - .form-horizontal .form-group-sm .control-label { - padding-top: 6px; - font-size: 12px; - } -} -.btn { - display: inline-block; - padding: 6px 12px; - margin-bottom: 0; - font-size: 14px; - font-weight: normal; - line-height: 1.42857143; - text-align: center; - white-space: nowrap; - vertical-align: middle; - -ms-touch-action: manipulation; - touch-action: manipulation; - cursor: pointer; - -webkit-user-select: none; - -moz-user-select: none; - -ms-user-select: none; - user-select: none; - background-image: none; - border: 1px solid transparent; - border-radius: 4px; -} -.btn:focus, -.btn:active:focus, -.btn.active:focus, -.btn.focus, -.btn:active.focus, -.btn.active.focus { - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} -.btn:hover, -.btn:focus, -.btn.focus { - color: #333; - text-decoration: none; -} -.btn:active, -.btn.active { - background-image: none; - outline: 0; - -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125); - box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125); -} -.btn.disabled, -.btn[disabled], -fieldset[disabled] .btn { - cursor: not-allowed; - filter: alpha(opacity=65); - -webkit-box-shadow: none; - box-shadow: none; - opacity: .65; -} -a.btn.disabled, -fieldset[disabled] a.btn { - pointer-events: none; -} -.btn-default { - color: #333; - background-color: #fff; - border-color: #ccc; -} -.btn-default:focus, -.btn-default.focus { - color: #333; - background-color: #e6e6e6; - border-color: #8c8c8c; -} -.btn-default:hover { - color: #333; - background-color: #e6e6e6; - border-color: #adadad; -} -.btn-default:active, -.btn-default.active, -.open > .dropdown-toggle.btn-default { - color: #333; - background-color: #e6e6e6; - border-color: #adadad; -} -.btn-default:active:hover, -.btn-default.active:hover, -.open > .dropdown-toggle.btn-default:hover, -.btn-default:active:focus, -.btn-default.active:focus, -.open > .dropdown-toggle.btn-default:focus, -.btn-default:active.focus, -.btn-default.active.focus, -.open > .dropdown-toggle.btn-default.focus { - color: #333; - background-color: #d4d4d4; - border-color: #8c8c8c; -} -.btn-default:active, -.btn-default.active, -.open > .dropdown-toggle.btn-default { - background-image: none; -} -.btn-default.disabled:hover, -.btn-default[disabled]:hover, -fieldset[disabled] .btn-default:hover, -.btn-default.disabled:focus, -.btn-default[disabled]:focus, -fieldset[disabled] .btn-default:focus, -.btn-default.disabled.focus, -.btn-default[disabled].focus, -fieldset[disabled] .btn-default.focus { - background-color: #fff; - border-color: #ccc; -} -.btn-default .badge { - color: #fff; - background-color: #333; -} -.btn-primary { - color: #fff; - background-color: #337ab7; - border-color: #2e6da4; -} -.btn-primary:focus, -.btn-primary.focus { - color: #fff; - background-color: #286090; - border-color: #122b40; -} -.btn-primary:hover { - color: #fff; - background-color: #286090; - border-color: #204d74; -} -.btn-primary:active, -.btn-primary.active, -.open > .dropdown-toggle.btn-primary { - color: #fff; - background-color: #286090; - border-color: #204d74; -} -.btn-primary:active:hover, -.btn-primary.active:hover, -.open > .dropdown-toggle.btn-primary:hover, -.btn-primary:active:focus, -.btn-primary.active:focus, -.open > .dropdown-toggle.btn-primary:focus, -.btn-primary:active.focus, -.btn-primary.active.focus, -.open > .dropdown-toggle.btn-primary.focus { - color: #fff; - background-color: #204d74; - border-color: #122b40; -} -.btn-primary:active, -.btn-primary.active, -.open > .dropdown-toggle.btn-primary { - background-image: none; -} -.btn-primary.disabled:hover, -.btn-primary[disabled]:hover, -fieldset[disabled] .btn-primary:hover, -.btn-primary.disabled:focus, -.btn-primary[disabled]:focus, -fieldset[disabled] .btn-primary:focus, -.btn-primary.disabled.focus, -.btn-primary[disabled].focus, -fieldset[disabled] .btn-primary.focus { - background-color: #337ab7; - border-color: #2e6da4; -} -.btn-primary .badge { - color: #337ab7; - background-color: #fff; -} -.btn-success { - color: #fff; - background-color: #5cb85c; - border-color: #4cae4c; -} -.btn-success:focus, -.btn-success.focus { - color: #fff; - background-color: #449d44; - border-color: #255625; -} -.btn-success:hover { - color: #fff; - background-color: #449d44; - border-color: #398439; -} -.btn-success:active, -.btn-success.active, -.open > .dropdown-toggle.btn-success { - color: #fff; - background-color: #449d44; - border-color: #398439; -} -.btn-success:active:hover, -.btn-success.active:hover, -.open > .dropdown-toggle.btn-success:hover, -.btn-success:active:focus, -.btn-success.active:focus, -.open > .dropdown-toggle.btn-success:focus, -.btn-success:active.focus, -.btn-success.active.focus, -.open > .dropdown-toggle.btn-success.focus { - color: #fff; - background-color: #398439; - border-color: #255625; -} -.btn-success:active, -.btn-success.active, -.open > .dropdown-toggle.btn-success { - background-image: none; -} -.btn-success.disabled:hover, -.btn-success[disabled]:hover, -fieldset[disabled] .btn-success:hover, -.btn-success.disabled:focus, -.btn-success[disabled]:focus, -fieldset[disabled] .btn-success:focus, -.btn-success.disabled.focus, -.btn-success[disabled].focus, -fieldset[disabled] .btn-success.focus { - background-color: #5cb85c; - border-color: #4cae4c; -} -.btn-success .badge { - color: #5cb85c; - background-color: #fff; -} -.btn-info { - color: #fff; - background-color: #5bc0de; - border-color: #46b8da; -} -.btn-info:focus, -.btn-info.focus { - color: #fff; - background-color: #31b0d5; - border-color: #1b6d85; -} -.btn-info:hover { - color: #fff; - background-color: #31b0d5; - border-color: #269abc; -} -.btn-info:active, -.btn-info.active, -.open > .dropdown-toggle.btn-info { - color: #fff; - background-color: #31b0d5; - border-color: #269abc; -} -.btn-info:active:hover, -.btn-info.active:hover, -.open > .dropdown-toggle.btn-info:hover, -.btn-info:active:focus, -.btn-info.active:focus, -.open > .dropdown-toggle.btn-info:focus, -.btn-info:active.focus, -.btn-info.active.focus, -.open > .dropdown-toggle.btn-info.focus { - color: #fff; - background-color: #269abc; - border-color: #1b6d85; -} -.btn-info:active, -.btn-info.active, -.open > .dropdown-toggle.btn-info { - background-image: none; -} -.btn-info.disabled:hover, -.btn-info[disabled]:hover, -fieldset[disabled] .btn-info:hover, -.btn-info.disabled:focus, -.btn-info[disabled]:focus, -fieldset[disabled] .btn-info:focus, -.btn-info.disabled.focus, -.btn-info[disabled].focus, -fieldset[disabled] .btn-info.focus { - background-color: #5bc0de; - border-color: #46b8da; -} -.btn-info .badge { - color: #5bc0de; - background-color: #fff; -} -.btn-warning { - color: #fff; - background-color: #f0ad4e; - border-color: #eea236; -} -.btn-warning:focus, -.btn-warning.focus { - color: #fff; - background-color: #ec971f; - border-color: #985f0d; -} -.btn-warning:hover { - color: #fff; - background-color: #ec971f; - border-color: #d58512; -} -.btn-warning:active, -.btn-warning.active, -.open > .dropdown-toggle.btn-warning { - color: #fff; - background-color: #ec971f; - border-color: #d58512; -} -.btn-warning:active:hover, -.btn-warning.active:hover, -.open > .dropdown-toggle.btn-warning:hover, -.btn-warning:active:focus, -.btn-warning.active:focus, -.open > .dropdown-toggle.btn-warning:focus, -.btn-warning:active.focus, -.btn-warning.active.focus, -.open > .dropdown-toggle.btn-warning.focus { - color: #fff; - background-color: #d58512; - border-color: #985f0d; -} -.btn-warning:active, -.btn-warning.active, -.open > .dropdown-toggle.btn-warning { - background-image: none; -} -.btn-warning.disabled:hover, -.btn-warning[disabled]:hover, -fieldset[disabled] .btn-warning:hover, -.btn-warning.disabled:focus, -.btn-warning[disabled]:focus, -fieldset[disabled] .btn-warning:focus, -.btn-warning.disabled.focus, -.btn-warning[disabled].focus, -fieldset[disabled] .btn-warning.focus { - background-color: #f0ad4e; - border-color: #eea236; -} -.btn-warning .badge { - color: #f0ad4e; - background-color: #fff; -} -.btn-danger { - color: #fff; - background-color: #d9534f; - border-color: #d43f3a; -} -.btn-danger:focus, -.btn-danger.focus { - color: #fff; - background-color: #c9302c; - border-color: #761c19; -} -.btn-danger:hover { - color: #fff; - background-color: #c9302c; - border-color: #ac2925; -} -.btn-danger:active, -.btn-danger.active, -.open > .dropdown-toggle.btn-danger { - color: #fff; - background-color: #c9302c; - border-color: #ac2925; -} -.btn-danger:active:hover, -.btn-danger.active:hover, -.open > .dropdown-toggle.btn-danger:hover, -.btn-danger:active:focus, -.btn-danger.active:focus, -.open > .dropdown-toggle.btn-danger:focus, -.btn-danger:active.focus, -.btn-danger.active.focus, -.open > .dropdown-toggle.btn-danger.focus { - color: #fff; - background-color: #ac2925; - border-color: #761c19; -} -.btn-danger:active, -.btn-danger.active, -.open > .dropdown-toggle.btn-danger { - background-image: none; -} -.btn-danger.disabled:hover, -.btn-danger[disabled]:hover, -fieldset[disabled] .btn-danger:hover, -.btn-danger.disabled:focus, -.btn-danger[disabled]:focus, -fieldset[disabled] .btn-danger:focus, -.btn-danger.disabled.focus, -.btn-danger[disabled].focus, -fieldset[disabled] .btn-danger.focus { - background-color: #d9534f; - border-color: #d43f3a; -} -.btn-danger .badge { - color: #d9534f; - background-color: #fff; -} -.btn-link { - font-weight: normal; - color: #337ab7; - border-radius: 0; -} -.btn-link, -.btn-link:active, -.btn-link.active, -.btn-link[disabled], -fieldset[disabled] .btn-link { - background-color: transparent; - -webkit-box-shadow: none; - box-shadow: none; -} -.btn-link, -.btn-link:hover, -.btn-link:focus, -.btn-link:active { - border-color: transparent; -} -.btn-link:hover, -.btn-link:focus { - color: #23527c; - text-decoration: underline; - background-color: transparent; -} -.btn-link[disabled]:hover, -fieldset[disabled] .btn-link:hover, -.btn-link[disabled]:focus, -fieldset[disabled] .btn-link:focus { - color: #777; - text-decoration: none; -} -.btn-lg, -.btn-group-lg > .btn { - padding: 10px 16px; - font-size: 18px; - line-height: 1.3333333; - border-radius: 6px; -} -.btn-sm, -.btn-group-sm > .btn { - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} -.btn-xs, -.btn-group-xs > .btn { - padding: 1px 5px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} -.btn-block { - display: block; - width: 100%; -} -.btn-block + .btn-block { - margin-top: 5px; -} -input[type="submit"].btn-block, -input[type="reset"].btn-block, -input[type="button"].btn-block { - width: 100%; -} -.fade { - opacity: 0; - -webkit-transition: opacity .15s linear; - -o-transition: opacity .15s linear; - transition: opacity .15s linear; -} -.fade.in { - opacity: 1; -} -.collapse { - display: none; -} -.collapse.in { - display: block; -} -tr.collapse.in { - display: table-row; -} -tbody.collapse.in { - display: table-row-group; -} -.collapsing { - position: relative; - height: 0; - overflow: hidden; - -webkit-transition-timing-function: ease; - -o-transition-timing-function: ease; - transition-timing-function: ease; - -webkit-transition-duration: .35s; - -o-transition-duration: .35s; - transition-duration: .35s; - -webkit-transition-property: height, visibility; - -o-transition-property: height, visibility; - transition-property: height, visibility; -} -.caret { - display: inline-block; - width: 0; - height: 0; - margin-left: 2px; - vertical-align: middle; - border-top: 4px dashed; - border-top: 4px solid \9; - border-right: 4px solid transparent; - border-left: 4px solid transparent; -} -.dropup, -.dropdown { - position: relative; -} -.dropdown-toggle:focus { - outline: 0; -} -.dropdown-menu { - position: absolute; - top: 100%; - left: 0; - z-index: 1000; - display: none; - float: left; - min-width: 160px; - padding: 5px 0; - margin: 2px 0 0; - font-size: 14px; - text-align: left; - list-style: none; - background-color: #fff; - -webkit-background-clip: padding-box; - background-clip: padding-box; - border: 1px solid #ccc; - border: 1px solid rgba(0, 0, 0, .15); - border-radius: 4px; - -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, .175); - box-shadow: 0 6px 12px rgba(0, 0, 0, .175); -} -.dropdown-menu.pull-right { - right: 0; - left: auto; -} -.dropdown-menu .divider { - height: 1px; - margin: 9px 0; - overflow: hidden; - background-color: #e5e5e5; -} -.dropdown-menu > li > a { - display: block; - padding: 3px 20px; - clear: both; - font-weight: normal; - line-height: 1.42857143; - color: #333; - white-space: nowrap; -} -.dropdown-menu > li > a:hover, -.dropdown-menu > li > a:focus { - color: #262626; - text-decoration: none; - background-color: #f5f5f5; -} -.dropdown-menu > .active > a, -.dropdown-menu > .active > a:hover, -.dropdown-menu > .active > a:focus { - color: #fff; - text-decoration: none; - background-color: #337ab7; - outline: 0; -} -.dropdown-menu > .disabled > a, -.dropdown-menu > .disabled > a:hover, -.dropdown-menu > .disabled > a:focus { - color: #777; -} -.dropdown-menu > .disabled > a:hover, -.dropdown-menu > .disabled > a:focus { - text-decoration: none; - cursor: not-allowed; - background-color: transparent; - background-image: none; - filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); -} -.open > .dropdown-menu { - display: block; -} -.open > a { - outline: 0; -} -.dropdown-menu-right { - right: 0; - left: auto; -} -.dropdown-menu-left { - right: auto; - left: 0; -} -.dropdown-header { - display: block; - padding: 3px 20px; - font-size: 12px; - line-height: 1.42857143; - color: #777; - white-space: nowrap; -} -.dropdown-backdrop { - position: fixed; - top: 0; - right: 0; - bottom: 0; - left: 0; - z-index: 990; -} -.pull-right > .dropdown-menu { - right: 0; - left: auto; -} -.dropup .caret, -.navbar-fixed-bottom .dropdown .caret { - content: ""; - border-top: 0; - border-bottom: 4px dashed; - border-bottom: 4px solid \9; -} -.dropup .dropdown-menu, -.navbar-fixed-bottom .dropdown .dropdown-menu { - top: auto; - bottom: 100%; - margin-bottom: 2px; -} -@media (min-width: 768px) { - .navbar-right .dropdown-menu { - right: 0; - left: auto; - } - .navbar-right .dropdown-menu-left { - right: auto; - left: 0; - } -} -.btn-group, -.btn-group-vertical { - position: relative; - display: inline-block; - vertical-align: middle; -} -.btn-group > .btn, -.btn-group-vertical > .btn { - position: relative; - float: left; -} -.btn-group > .btn:hover, -.btn-group-vertical > .btn:hover, -.btn-group > .btn:focus, -.btn-group-vertical > .btn:focus, -.btn-group > .btn:active, -.btn-group-vertical > .btn:active, -.btn-group > .btn.active, -.btn-group-vertical > .btn.active { - z-index: 2; -} -.btn-group .btn + .btn, -.btn-group .btn + .btn-group, -.btn-group .btn-group + .btn, -.btn-group .btn-group + .btn-group { - margin-left: -1px; -} -.btn-toolbar { - margin-left: -5px; -} -.btn-toolbar .btn, -.btn-toolbar .btn-group, -.btn-toolbar .input-group { - float: left; -} -.btn-toolbar > .btn, -.btn-toolbar > .btn-group, -.btn-toolbar > .input-group { - margin-left: 5px; -} -.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) { - border-radius: 0; -} -.btn-group > .btn:first-child { - margin-left: 0; -} -.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) { - border-top-right-radius: 0; - border-bottom-right-radius: 0; -} -.btn-group > .btn:last-child:not(:first-child), -.btn-group > .dropdown-toggle:not(:first-child) { - border-top-left-radius: 0; - border-bottom-left-radius: 0; -} -.btn-group > .btn-group { - float: left; -} -.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn { - border-radius: 0; -} -.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child, -.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle { - border-top-right-radius: 0; - border-bottom-right-radius: 0; -} -.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child { - border-top-left-radius: 0; - border-bottom-left-radius: 0; -} -.btn-group .dropdown-toggle:active, -.btn-group.open .dropdown-toggle { - outline: 0; -} -.btn-group > .btn + .dropdown-toggle { - padding-right: 8px; - padding-left: 8px; -} -.btn-group > .btn-lg + .dropdown-toggle { - padding-right: 12px; - padding-left: 12px; -} -.btn-group.open .dropdown-toggle { - -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125); - box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125); -} -.btn-group.open .dropdown-toggle.btn-link { - -webkit-box-shadow: none; - box-shadow: none; -} -.btn .caret { - margin-left: 0; -} -.btn-lg .caret { - border-width: 5px 5px 0; - border-bottom-width: 0; -} -.dropup .btn-lg .caret { - border-width: 0 5px 5px; -} -.btn-group-vertical > .btn, -.btn-group-vertical > .btn-group, -.btn-group-vertical > .btn-group > .btn { - display: block; - float: none; - width: 100%; - max-width: 100%; -} -.btn-group-vertical > .btn-group > .btn { - float: none; -} -.btn-group-vertical > .btn + .btn, -.btn-group-vertical > .btn + .btn-group, -.btn-group-vertical > .btn-group + .btn, -.btn-group-vertical > .btn-group + .btn-group { - margin-top: -1px; - margin-left: 0; -} -.btn-group-vertical > .btn:not(:first-child):not(:last-child) { - border-radius: 0; -} -.btn-group-vertical > .btn:first-child:not(:last-child) { - border-top-left-radius: 4px; - border-top-right-radius: 4px; - border-bottom-right-radius: 0; - border-bottom-left-radius: 0; -} -.btn-group-vertical > .btn:last-child:not(:first-child) { - border-top-left-radius: 0; - border-top-right-radius: 0; - border-bottom-right-radius: 4px; - border-bottom-left-radius: 4px; -} -.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn { - border-radius: 0; -} -.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child, -.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle { - border-bottom-right-radius: 0; - border-bottom-left-radius: 0; -} -.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child { - border-top-left-radius: 0; - border-top-right-radius: 0; -} -.btn-group-justified { - display: table; - width: 100%; - table-layout: fixed; - border-collapse: separate; -} -.btn-group-justified > .btn, -.btn-group-justified > .btn-group { - display: table-cell; - float: none; - width: 1%; -} -.btn-group-justified > .btn-group .btn { - width: 100%; -} -.btn-group-justified > .btn-group .dropdown-menu { - left: auto; -} -[data-toggle="buttons"] > .btn input[type="radio"], -[data-toggle="buttons"] > .btn-group > .btn input[type="radio"], -[data-toggle="buttons"] > .btn input[type="checkbox"], -[data-toggle="buttons"] > .btn-group > .btn input[type="checkbox"] { - position: absolute; - clip: rect(0, 0, 0, 0); - pointer-events: none; -} -.input-group { - position: relative; - display: table; - border-collapse: separate; -} -.input-group[class*="col-"] { - float: none; - padding-right: 0; - padding-left: 0; -} -.input-group .form-control { - position: relative; - z-index: 2; - float: left; - width: 100%; - margin-bottom: 0; -} -.input-group .form-control:focus { - z-index: 3; -} -.input-group-lg > .form-control, -.input-group-lg > .input-group-addon, -.input-group-lg > .input-group-btn > .btn { - height: 46px; - padding: 10px 16px; - font-size: 18px; - line-height: 1.3333333; - border-radius: 6px; -} -select.input-group-lg > .form-control, -select.input-group-lg > .input-group-addon, -select.input-group-lg > .input-group-btn > .btn { - height: 46px; - line-height: 46px; -} -textarea.input-group-lg > .form-control, -textarea.input-group-lg > .input-group-addon, -textarea.input-group-lg > .input-group-btn > .btn, -select[multiple].input-group-lg > .form-control, -select[multiple].input-group-lg > .input-group-addon, -select[multiple].input-group-lg > .input-group-btn > .btn { - height: auto; -} -.input-group-sm > .form-control, -.input-group-sm > .input-group-addon, -.input-group-sm > .input-group-btn > .btn { - height: 30px; - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} -select.input-group-sm > .form-control, -select.input-group-sm > .input-group-addon, -select.input-group-sm > .input-group-btn > .btn { - height: 30px; - line-height: 30px; -} -textarea.input-group-sm > .form-control, -textarea.input-group-sm > .input-group-addon, -textarea.input-group-sm > .input-group-btn > .btn, -select[multiple].input-group-sm > .form-control, -select[multiple].input-group-sm > .input-group-addon, -select[multiple].input-group-sm > .input-group-btn > .btn { - height: auto; -} -.input-group-addon, -.input-group-btn, -.input-group .form-control { - display: table-cell; -} -.input-group-addon:not(:first-child):not(:last-child), -.input-group-btn:not(:first-child):not(:last-child), -.input-group .form-control:not(:first-child):not(:last-child) { - border-radius: 0; -} -.input-group-addon, -.input-group-btn { - width: 1%; - white-space: nowrap; - vertical-align: middle; -} -.input-group-addon { - padding: 6px 12px; - font-size: 14px; - font-weight: normal; - line-height: 1; - color: #555; - text-align: center; - background-color: #eee; - border: 1px solid #ccc; - border-radius: 4px; -} -.input-group-addon.input-sm { - padding: 5px 10px; - font-size: 12px; - border-radius: 3px; -} -.input-group-addon.input-lg { - padding: 10px 16px; - font-size: 18px; - border-radius: 6px; -} -.input-group-addon input[type="radio"], -.input-group-addon input[type="checkbox"] { - margin-top: 0; -} -.input-group .form-control:first-child, -.input-group-addon:first-child, -.input-group-btn:first-child > .btn, -.input-group-btn:first-child > .btn-group > .btn, -.input-group-btn:first-child > .dropdown-toggle, -.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle), -.input-group-btn:last-child > .btn-group:not(:last-child) > .btn { - border-top-right-radius: 0; - border-bottom-right-radius: 0; -} -.input-group-addon:first-child { - border-right: 0; -} -.input-group .form-control:last-child, -.input-group-addon:last-child, -.input-group-btn:last-child > .btn, -.input-group-btn:last-child > .btn-group > .btn, -.input-group-btn:last-child > .dropdown-toggle, -.input-group-btn:first-child > .btn:not(:first-child), -.input-group-btn:first-child > .btn-group:not(:first-child) > .btn { - border-top-left-radius: 0; - border-bottom-left-radius: 0; -} -.input-group-addon:last-child { - border-left: 0; -} -.input-group-btn { - position: relative; - font-size: 0; - white-space: nowrap; -} -.input-group-btn > .btn { - position: relative; -} -.input-group-btn > .btn + .btn { - margin-left: -1px; -} -.input-group-btn > .btn:hover, -.input-group-btn > .btn:focus, -.input-group-btn > .btn:active { - z-index: 2; -} -.input-group-btn:first-child > .btn, -.input-group-btn:first-child > .btn-group { - margin-right: -1px; -} -.input-group-btn:last-child > .btn, -.input-group-btn:last-child > .btn-group { - z-index: 2; - margin-left: -1px; -} -.nav { - padding-left: 0; - margin-bottom: 0; - list-style: none; -} -.nav > li { - position: relative; - display: block; -} -.nav > li > a { - position: relative; - display: block; - padding: 10px 15px; -} -.nav > li > a:hover, -.nav > li > a:focus { - text-decoration: none; - background-color: #eee; -} -.nav > li.disabled > a { - color: #777; -} -.nav > li.disabled > a:hover, -.nav > li.disabled > a:focus { - color: #777; - text-decoration: none; - cursor: not-allowed; - background-color: transparent; -} -.nav .open > a, -.nav .open > a:hover, -.nav .open > a:focus { - background-color: #eee; - border-color: #337ab7; -} -.nav .nav-divider { - height: 1px; - margin: 9px 0; - overflow: hidden; - background-color: #e5e5e5; -} -.nav > li > a > img { - max-width: none; -} -.nav-tabs { - border-bottom: 1px solid #ddd; -} -.nav-tabs > li { - float: left; - margin-bottom: -1px; -} -.nav-tabs > li > a { - margin-right: 2px; - line-height: 1.42857143; - border: 1px solid transparent; - border-radius: 4px 4px 0 0; -} -.nav-tabs > li > a:hover { - border-color: #eee #eee #ddd; -} -.nav-tabs > li.active > a, -.nav-tabs > li.active > a:hover, -.nav-tabs > li.active > a:focus { - color: #555; - cursor: default; - background-color: #fff; - border: 1px solid #ddd; - border-bottom-color: transparent; -} -.nav-tabs.nav-justified { - width: 100%; - border-bottom: 0; -} -.nav-tabs.nav-justified > li { - float: none; -} -.nav-tabs.nav-justified > li > a { - margin-bottom: 5px; - text-align: center; -} -.nav-tabs.nav-justified > .dropdown .dropdown-menu { - top: auto; - left: auto; -} -@media (min-width: 768px) { - .nav-tabs.nav-justified > li { - display: table-cell; - width: 1%; - } - .nav-tabs.nav-justified > li > a { - margin-bottom: 0; - } -} -.nav-tabs.nav-justified > li > a { - margin-right: 0; - border-radius: 4px; -} -.nav-tabs.nav-justified > .active > a, -.nav-tabs.nav-justified > .active > a:hover, -.nav-tabs.nav-justified > .active > a:focus { - border: 1px solid #ddd; -} -@media (min-width: 768px) { - .nav-tabs.nav-justified > li > a { - border-bottom: 1px solid #ddd; - border-radius: 4px 4px 0 0; - } - .nav-tabs.nav-justified > .active > a, - .nav-tabs.nav-justified > .active > a:hover, - .nav-tabs.nav-justified > .active > a:focus { - border-bottom-color: #fff; - } -} -.nav-pills > li { - float: left; -} -.nav-pills > li > a { - border-radius: 4px; -} -.nav-pills > li + li { - margin-left: 2px; -} -.nav-pills > li.active > a, -.nav-pills > li.active > a:hover, -.nav-pills > li.active > a:focus { - color: #fff; - background-color: #337ab7; -} -.nav-stacked > li { - float: none; -} -.nav-stacked > li + li { - margin-top: 2px; - margin-left: 0; -} -.nav-justified { - width: 100%; -} -.nav-justified > li { - float: none; -} -.nav-justified > li > a { - margin-bottom: 5px; - text-align: center; -} -.nav-justified > .dropdown .dropdown-menu { - top: auto; - left: auto; -} -@media (min-width: 768px) { - .nav-justified > li { - display: table-cell; - width: 1%; - } - .nav-justified > li > a { - margin-bottom: 0; - } -} -.nav-tabs-justified { - border-bottom: 0; -} -.nav-tabs-justified > li > a { - margin-right: 0; - border-radius: 4px; -} -.nav-tabs-justified > .active > a, -.nav-tabs-justified > .active > a:hover, -.nav-tabs-justified > .active > a:focus { - border: 1px solid #ddd; -} -@media (min-width: 768px) { - .nav-tabs-justified > li > a { - border-bottom: 1px solid #ddd; - border-radius: 4px 4px 0 0; - } - .nav-tabs-justified > .active > a, - .nav-tabs-justified > .active > a:hover, - .nav-tabs-justified > .active > a:focus { - border-bottom-color: #fff; - } -} -.tab-content > .tab-pane { - display: none; -} -.tab-content > .active { - display: block; -} -.nav-tabs .dropdown-menu { - margin-top: -1px; - border-top-left-radius: 0; - border-top-right-radius: 0; -} -.navbar { - position: relative; - min-height: 50px; - margin-bottom: 20px; - border: 1px solid transparent; -} -@media (min-width: 768px) { - .navbar { - border-radius: 4px; - } -} -@media (min-width: 768px) { - .navbar-header { - float: left; - } -} -.navbar-collapse { - padding-right: 15px; - padding-left: 15px; - overflow-x: visible; - -webkit-overflow-scrolling: touch; - border-top: 1px solid transparent; - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1); -} -.navbar-collapse.in { - overflow-y: auto; -} -@media (min-width: 768px) { - .navbar-collapse { - width: auto; - border-top: 0; - -webkit-box-shadow: none; - box-shadow: none; - } - .navbar-collapse.collapse { - display: block !important; - height: auto !important; - padding-bottom: 0; - overflow: visible !important; - } - .navbar-collapse.in { - overflow-y: visible; - } - .navbar-fixed-top .navbar-collapse, - .navbar-static-top .navbar-collapse, - .navbar-fixed-bottom .navbar-collapse { - padding-right: 0; - padding-left: 0; - } -} -.navbar-fixed-top .navbar-collapse, -.navbar-fixed-bottom .navbar-collapse { - max-height: 340px; -} -@media (max-device-width: 480px) and (orientation: landscape) { - .navbar-fixed-top .navbar-collapse, - .navbar-fixed-bottom .navbar-collapse { - max-height: 200px; - } -} -.container > .navbar-header, -.container-fluid > .navbar-header, -.container > .navbar-collapse, -.container-fluid > .navbar-collapse { - margin-right: -15px; - margin-left: -15px; -} -@media (min-width: 768px) { - .container > .navbar-header, - .container-fluid > .navbar-header, - .container > .navbar-collapse, - .container-fluid > .navbar-collapse { - margin-right: 0; - margin-left: 0; - } -} -.navbar-static-top { - z-index: 1000; - border-width: 0 0 1px; -} -@media (min-width: 768px) { - .navbar-static-top { - border-radius: 0; - } -} -.navbar-fixed-top, -.navbar-fixed-bottom { - position: fixed; - right: 0; - left: 0; - z-index: 1030; -} -@media (min-width: 768px) { - .navbar-fixed-top, - .navbar-fixed-bottom { - border-radius: 0; - } -} -.navbar-fixed-top { - top: 0; - border-width: 0 0 1px; -} -.navbar-fixed-bottom { - bottom: 0; - margin-bottom: 0; - border-width: 1px 0 0; -} -.navbar-brand { - float: left; - height: 50px; - padding: 15px 15px; - font-size: 18px; - line-height: 20px; -} -.navbar-brand:hover, -.navbar-brand:focus { - text-decoration: none; -} -.navbar-brand > img { - display: block; -} -@media (min-width: 768px) { - .navbar > .container .navbar-brand, - .navbar > .container-fluid .navbar-brand { - margin-left: -15px; - } -} -.navbar-toggle { - position: relative; - float: right; - padding: 9px 10px; - margin-top: 8px; - margin-right: 15px; - margin-bottom: 8px; - background-color: transparent; - background-image: none; - border: 1px solid transparent; - border-radius: 4px; -} -.navbar-toggle:focus { - outline: 0; -} -.navbar-toggle .icon-bar { - display: block; - width: 22px; - height: 2px; - border-radius: 1px; -} -.navbar-toggle .icon-bar + .icon-bar { - margin-top: 4px; -} -@media (min-width: 768px) { - .navbar-toggle { - display: none; - } -} -.navbar-nav { - margin: 7.5px -15px; -} -.navbar-nav > li > a { - padding-top: 10px; - padding-bottom: 10px; - line-height: 20px; -} -@media (max-width: 767px) { - .navbar-nav .open .dropdown-menu { - position: static; - float: none; - width: auto; - margin-top: 0; - background-color: transparent; - border: 0; - -webkit-box-shadow: none; - box-shadow: none; - } - .navbar-nav .open .dropdown-menu > li > a, - .navbar-nav .open .dropdown-menu .dropdown-header { - padding: 5px 15px 5px 25px; - } - .navbar-nav .open .dropdown-menu > li > a { - line-height: 20px; - } - .navbar-nav .open .dropdown-menu > li > a:hover, - .navbar-nav .open .dropdown-menu > li > a:focus { - background-image: none; - } -} -@media (min-width: 768px) { - .navbar-nav { - float: left; - margin: 0; - } - .navbar-nav > li { - float: left; - } - .navbar-nav > li > a { - padding-top: 15px; - padding-bottom: 15px; - } -} -.navbar-form { - padding: 10px 15px; - margin-top: 8px; - margin-right: -15px; - margin-bottom: 8px; - margin-left: -15px; - border-top: 1px solid transparent; - border-bottom: 1px solid transparent; - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1); -} -@media (min-width: 768px) { - .navbar-form .form-group { - display: inline-block; - margin-bottom: 0; - vertical-align: middle; - } - .navbar-form .form-control { - display: inline-block; - width: auto; - vertical-align: middle; - } - .navbar-form .form-control-static { - display: inline-block; - } - .navbar-form .input-group { - display: inline-table; - vertical-align: middle; - } - .navbar-form .input-group .input-group-addon, - .navbar-form .input-group .input-group-btn, - .navbar-form .input-group .form-control { - width: auto; - } - .navbar-form .input-group > .form-control { - width: 100%; - } - .navbar-form .control-label { - margin-bottom: 0; - vertical-align: middle; - } - .navbar-form .radio, - .navbar-form .checkbox { - display: inline-block; - margin-top: 0; - margin-bottom: 0; - vertical-align: middle; - } - .navbar-form .radio label, - .navbar-form .checkbox label { - padding-left: 0; - } - .navbar-form .radio input[type="radio"], - .navbar-form .checkbox input[type="checkbox"] { - position: relative; - margin-left: 0; - } - .navbar-form .has-feedback .form-control-feedback { - top: 0; - } -} -@media (max-width: 767px) { - .navbar-form .form-group { - margin-bottom: 5px; - } - .navbar-form .form-group:last-child { - margin-bottom: 0; - } -} -@media (min-width: 768px) { - .navbar-form { - width: auto; - padding-top: 0; - padding-bottom: 0; - margin-right: 0; - margin-left: 0; - border: 0; - -webkit-box-shadow: none; - box-shadow: none; - } -} -.navbar-nav > li > .dropdown-menu { - margin-top: 0; - border-top-left-radius: 0; - border-top-right-radius: 0; -} -.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu { - margin-bottom: 0; - border-top-left-radius: 4px; - border-top-right-radius: 4px; - border-bottom-right-radius: 0; - border-bottom-left-radius: 0; -} -.navbar-btn { - margin-top: 8px; - margin-bottom: 8px; -} -.navbar-btn.btn-sm { - margin-top: 10px; - margin-bottom: 10px; -} -.navbar-btn.btn-xs { - margin-top: 14px; - margin-bottom: 14px; -} -.navbar-text { - margin-top: 15px; - margin-bottom: 15px; -} -@media (min-width: 768px) { - .navbar-text { - float: left; - margin-right: 15px; - margin-left: 15px; - } -} -@media (min-width: 768px) { - .navbar-left { - float: left !important; - } - .navbar-right { - float: right !important; - margin-right: -15px; - } - .navbar-right ~ .navbar-right { - margin-right: 0; - } -} -.navbar-default { - background-color: #f8f8f8; - border-color: #e7e7e7; -} -.navbar-default .navbar-brand { - color: #777; -} -.navbar-default .navbar-brand:hover, -.navbar-default .navbar-brand:focus { - color: #5e5e5e; - background-color: transparent; -} -.navbar-default .navbar-text { - color: #777; -} -.navbar-default .navbar-nav > li > a { - color: #777; -} -.navbar-default .navbar-nav > li > a:hover, -.navbar-default .navbar-nav > li > a:focus { - color: #333; - background-color: transparent; -} -.navbar-default .navbar-nav > .active > a, -.navbar-default .navbar-nav > .active > a:hover, -.navbar-default .navbar-nav > .active > a:focus { - color: #555; - background-color: #e7e7e7; -} -.navbar-default .navbar-nav > .disabled > a, -.navbar-default .navbar-nav > .disabled > a:hover, -.navbar-default .navbar-nav > .disabled > a:focus { - color: #ccc; - background-color: transparent; -} -.navbar-default .navbar-toggle { - border-color: #ddd; -} -.navbar-default .navbar-toggle:hover, -.navbar-default .navbar-toggle:focus { - background-color: #ddd; -} -.navbar-default .navbar-toggle .icon-bar { - background-color: #888; -} -.navbar-default .navbar-collapse, -.navbar-default .navbar-form { - border-color: #e7e7e7; -} -.navbar-default .navbar-nav > .open > a, -.navbar-default .navbar-nav > .open > a:hover, -.navbar-default .navbar-nav > .open > a:focus { - color: #555; - background-color: #e7e7e7; -} -@media (max-width: 767px) { - .navbar-default .navbar-nav .open .dropdown-menu > li > a { - color: #777; - } - .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover, - .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus { - color: #333; - background-color: transparent; - } - .navbar-default .navbar-nav .open .dropdown-menu > .active > a, - .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover, - .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus { - color: #555; - background-color: #e7e7e7; - } - .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a, - .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover, - .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus { - color: #ccc; - background-color: transparent; - } -} -.navbar-default .navbar-link { - color: #777; -} -.navbar-default .navbar-link:hover { - color: #333; -} -.navbar-default .btn-link { - color: #777; -} -.navbar-default .btn-link:hover, -.navbar-default .btn-link:focus { - color: #333; -} -.navbar-default .btn-link[disabled]:hover, -fieldset[disabled] .navbar-default .btn-link:hover, -.navbar-default .btn-link[disabled]:focus, -fieldset[disabled] .navbar-default .btn-link:focus { - color: #ccc; -} -.navbar-inverse { - background-color: #222; - border-color: #080808; -} -.navbar-inverse .navbar-brand { - color: #9d9d9d; -} -.navbar-inverse .navbar-brand:hover, -.navbar-inverse .navbar-brand:focus { - color: #fff; - background-color: transparent; -} -.navbar-inverse .navbar-text { - color: #9d9d9d; -} -.navbar-inverse .navbar-nav > li > a { - color: #9d9d9d; -} -.navbar-inverse .navbar-nav > li > a:hover, -.navbar-inverse .navbar-nav > li > a:focus { - color: #fff; - background-color: transparent; -} -.navbar-inverse .navbar-nav > .active > a, -.navbar-inverse .navbar-nav > .active > a:hover, -.navbar-inverse .navbar-nav > .active > a:focus { - color: #fff; - background-color: #080808; -} -.navbar-inverse .navbar-nav > .disabled > a, -.navbar-inverse .navbar-nav > .disabled > a:hover, -.navbar-inverse .navbar-nav > .disabled > a:focus { - color: #444; - background-color: transparent; -} -.navbar-inverse .navbar-toggle { - border-color: #333; -} -.navbar-inverse .navbar-toggle:hover, -.navbar-inverse .navbar-toggle:focus { - background-color: #333; -} -.navbar-inverse .navbar-toggle .icon-bar { - background-color: #fff; -} -.navbar-inverse .navbar-collapse, -.navbar-inverse .navbar-form { - border-color: #101010; -} -.navbar-inverse .navbar-nav > .open > a, -.navbar-inverse .navbar-nav > .open > a:hover, -.navbar-inverse .navbar-nav > .open > a:focus { - color: #fff; - background-color: #080808; -} -@media (max-width: 767px) { - .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header { - border-color: #080808; - } - .navbar-inverse .navbar-nav .open .dropdown-menu .divider { - background-color: #080808; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > li > a { - color: #9d9d9d; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover, - .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus { - color: #fff; - background-color: transparent; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a, - .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover, - .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus { - color: #fff; - background-color: #080808; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a, - .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover, - .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus { - color: #444; - background-color: transparent; - } -} -.navbar-inverse .navbar-link { - color: #9d9d9d; -} -.navbar-inverse .navbar-link:hover { - color: #fff; -} -.navbar-inverse .btn-link { - color: #9d9d9d; -} -.navbar-inverse .btn-link:hover, -.navbar-inverse .btn-link:focus { - color: #fff; -} -.navbar-inverse .btn-link[disabled]:hover, -fieldset[disabled] .navbar-inverse .btn-link:hover, -.navbar-inverse .btn-link[disabled]:focus, -fieldset[disabled] .navbar-inverse .btn-link:focus { - color: #444; -} -.breadcrumb { - padding: 8px 15px; - margin-bottom: 20px; - list-style: none; - background-color: #f5f5f5; - border-radius: 4px; -} -.breadcrumb > li { - display: inline-block; -} -.breadcrumb > li + li:before { - padding: 0 5px; - color: #ccc; - content: "/\00a0"; -} -.breadcrumb > .active { - color: #777; -} -.pagination { - display: inline-block; - padding-left: 0; - margin: 20px 0; - border-radius: 4px; -} -.pagination > li { - display: inline; -} -.pagination > li > a, -.pagination > li > span { - position: relative; - float: left; - padding: 6px 12px; - margin-left: -1px; - line-height: 1.42857143; - color: #337ab7; - text-decoration: none; - background-color: #fff; - border: 1px solid #ddd; -} -.pagination > li:first-child > a, -.pagination > li:first-child > span { - margin-left: 0; - border-top-left-radius: 4px; - border-bottom-left-radius: 4px; -} -.pagination > li:last-child > a, -.pagination > li:last-child > span { - border-top-right-radius: 4px; - border-bottom-right-radius: 4px; -} -.pagination > li > a:hover, -.pagination > li > span:hover, -.pagination > li > a:focus, -.pagination > li > span:focus { - z-index: 2; - color: #23527c; - background-color: #eee; - border-color: #ddd; -} -.pagination > .active > a, -.pagination > .active > span, -.pagination > .active > a:hover, -.pagination > .active > span:hover, -.pagination > .active > a:focus, -.pagination > .active > span:focus { - z-index: 3; - color: #fff; - cursor: default; - background-color: #337ab7; - border-color: #337ab7; -} -.pagination > .disabled > span, -.pagination > .disabled > span:hover, -.pagination > .disabled > span:focus, -.pagination > .disabled > a, -.pagination > .disabled > a:hover, -.pagination > .disabled > a:focus { - color: #777; - cursor: not-allowed; - background-color: #fff; - border-color: #ddd; -} -.pagination-lg > li > a, -.pagination-lg > li > span { - padding: 10px 16px; - font-size: 18px; - line-height: 1.3333333; -} -.pagination-lg > li:first-child > a, -.pagination-lg > li:first-child > span { - border-top-left-radius: 6px; - border-bottom-left-radius: 6px; -} -.pagination-lg > li:last-child > a, -.pagination-lg > li:last-child > span { - border-top-right-radius: 6px; - border-bottom-right-radius: 6px; -} -.pagination-sm > li > a, -.pagination-sm > li > span { - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; -} -.pagination-sm > li:first-child > a, -.pagination-sm > li:first-child > span { - border-top-left-radius: 3px; - border-bottom-left-radius: 3px; -} -.pagination-sm > li:last-child > a, -.pagination-sm > li:last-child > span { - border-top-right-radius: 3px; - border-bottom-right-radius: 3px; -} -.pager { - padding-left: 0; - margin: 20px 0; - text-align: center; - list-style: none; -} -.pager li { - display: inline; -} -.pager li > a, -.pager li > span { - display: inline-block; - padding: 5px 14px; - background-color: #fff; - border: 1px solid #ddd; - border-radius: 15px; -} -.pager li > a:hover, -.pager li > a:focus { - text-decoration: none; - background-color: #eee; -} -.pager .next > a, -.pager .next > span { - float: right; -} -.pager .previous > a, -.pager .previous > span { - float: left; -} -.pager .disabled > a, -.pager .disabled > a:hover, -.pager .disabled > a:focus, -.pager .disabled > span { - color: #777; - cursor: not-allowed; - background-color: #fff; -} -.label { - display: inline; - padding: .2em .6em .3em; - font-size: 75%; - font-weight: bold; - line-height: 1; - color: #fff; - text-align: center; - white-space: nowrap; - vertical-align: baseline; - border-radius: .25em; -} -a.label:hover, -a.label:focus { - color: #fff; - text-decoration: none; - cursor: pointer; -} -.label:empty { - display: none; -} -.btn .label { - position: relative; - top: -1px; -} -.label-default { - background-color: #777; -} -.label-default[href]:hover, -.label-default[href]:focus { - background-color: #5e5e5e; -} -.label-primary { - background-color: #337ab7; -} -.label-primary[href]:hover, -.label-primary[href]:focus { - background-color: #286090; -} -.label-success { - background-color: #5cb85c; -} -.label-success[href]:hover, -.label-success[href]:focus { - background-color: #449d44; -} -.label-info { - background-color: #5bc0de; -} -.label-info[href]:hover, -.label-info[href]:focus { - background-color: #31b0d5; -} -.label-warning { - background-color: #f0ad4e; -} -.label-warning[href]:hover, -.label-warning[href]:focus { - background-color: #ec971f; -} -.label-danger { - background-color: #d9534f; -} -.label-danger[href]:hover, -.label-danger[href]:focus { - background-color: #c9302c; -} -.badge { - display: inline-block; - min-width: 10px; - padding: 3px 7px; - font-size: 12px; - font-weight: bold; - line-height: 1; - color: #fff; - text-align: center; - white-space: nowrap; - vertical-align: middle; - background-color: #777; - border-radius: 10px; -} -.badge:empty { - display: none; -} -.btn .badge { - position: relative; - top: -1px; -} -.btn-xs .badge, -.btn-group-xs > .btn .badge { - top: 0; - padding: 1px 5px; -} -a.badge:hover, -a.badge:focus { - color: #fff; - text-decoration: none; - cursor: pointer; -} -.list-group-item.active > .badge, -.nav-pills > .active > a > .badge { - color: #337ab7; - background-color: #fff; -} -.list-group-item > .badge { - float: right; -} -.list-group-item > .badge + .badge { - margin-right: 5px; -} -.nav-pills > li > a > .badge { - margin-left: 3px; -} -.jumbotron { - padding-top: 30px; - padding-bottom: 30px; - margin-bottom: 30px; - color: inherit; - background-color: #eee; -} -.jumbotron h1, -.jumbotron .h1 { - color: inherit; -} -.jumbotron p { - margin-bottom: 15px; - font-size: 21px; - font-weight: 200; -} -.jumbotron > hr { - border-top-color: #d5d5d5; -} -.container .jumbotron, -.container-fluid .jumbotron { - padding-right: 15px; - padding-left: 15px; - border-radius: 6px; -} -.jumbotron .container { - max-width: 100%; -} -@media screen and (min-width: 768px) { - .jumbotron { - padding-top: 48px; - padding-bottom: 48px; - } - .container .jumbotron, - .container-fluid .jumbotron { - padding-right: 60px; - padding-left: 60px; - } - .jumbotron h1, - .jumbotron .h1 { - font-size: 63px; - } -} -.thumbnail { - display: block; - padding: 4px; - margin-bottom: 20px; - line-height: 1.42857143; - background-color: #fff; - border: 1px solid #ddd; - border-radius: 4px; - -webkit-transition: border .2s ease-in-out; - -o-transition: border .2s ease-in-out; - transition: border .2s ease-in-out; -} -.thumbnail > img, -.thumbnail a > img { - margin-right: auto; - margin-left: auto; -} -a.thumbnail:hover, -a.thumbnail:focus, -a.thumbnail.active { - border-color: #337ab7; -} -.thumbnail .caption { - padding: 9px; - color: #333; -} -.alert { - padding: 15px; - margin-bottom: 20px; - border: 1px solid transparent; - border-radius: 4px; -} -.alert h4 { - margin-top: 0; - color: inherit; -} -.alert .alert-link { - font-weight: bold; -} -.alert > p, -.alert > ul { - margin-bottom: 0; -} -.alert > p + p { - margin-top: 5px; -} -.alert-dismissable, -.alert-dismissible { - padding-right: 35px; -} -.alert-dismissable .close, -.alert-dismissible .close { - position: relative; - top: -2px; - right: -21px; - color: inherit; -} -.alert-success { - color: #3c763d; - background-color: #dff0d8; - border-color: #d6e9c6; -} -.alert-success hr { - border-top-color: #c9e2b3; -} -.alert-success .alert-link { - color: #2b542c; -} -.alert-info { - color: #31708f; - background-color: #d9edf7; - border-color: #bce8f1; -} -.alert-info hr { - border-top-color: #a6e1ec; -} -.alert-info .alert-link { - color: #245269; -} -.alert-warning { - color: #8a6d3b; - background-color: #fcf8e3; - border-color: #faebcc; -} -.alert-warning hr { - border-top-color: #f7e1b5; -} -.alert-warning .alert-link { - color: #66512c; -} -.alert-danger { - color: #a94442; - background-color: #f2dede; - border-color: #ebccd1; -} -.alert-danger hr { - border-top-color: #e4b9c0; -} -.alert-danger .alert-link { - color: #843534; -} -@-webkit-keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} -@-o-keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} -@keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} -.progress { - height: 20px; - margin-bottom: 20px; - overflow: hidden; - background-color: #f5f5f5; - border-radius: 4px; - -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1); - box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1); -} -.progress-bar { - float: left; - width: 0; - height: 100%; - font-size: 12px; - line-height: 20px; - color: #fff; - text-align: center; - background-color: #337ab7; - -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15); - box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15); - -webkit-transition: width .6s ease; - -o-transition: width .6s ease; - transition: width .6s ease; -} -.progress-striped .progress-bar, -.progress-bar-striped { - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); - -webkit-background-size: 40px 40px; - background-size: 40px 40px; -} -.progress.active .progress-bar, -.progress-bar.active { - -webkit-animation: progress-bar-stripes 2s linear infinite; - -o-animation: progress-bar-stripes 2s linear infinite; - animation: progress-bar-stripes 2s linear infinite; -} -.progress-bar-success { - background-color: #5cb85c; -} -.progress-striped .progress-bar-success { - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); -} -.progress-bar-info { - background-color: #5bc0de; -} -.progress-striped .progress-bar-info { - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); -} -.progress-bar-warning { - background-color: #f0ad4e; -} -.progress-striped .progress-bar-warning { - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); -} -.progress-bar-danger { - background-color: #d9534f; -} -.progress-striped .progress-bar-danger { - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); -} -.media { - margin-top: 15px; -} -.media:first-child { - margin-top: 0; -} -.media, -.media-body { - overflow: hidden; - zoom: 1; -} -.media-body { - width: 10000px; -} -.media-object { - display: block; -} -.media-object.img-thumbnail { - max-width: none; -} -.media-right, -.media > .pull-right { - padding-left: 10px; -} -.media-left, -.media > .pull-left { - padding-right: 10px; -} -.media-left, -.media-right, -.media-body { - display: table-cell; - vertical-align: top; -} -.media-middle { - vertical-align: middle; -} -.media-bottom { - vertical-align: bottom; -} -.media-heading { - margin-top: 0; - margin-bottom: 5px; -} -.media-list { - padding-left: 0; - list-style: none; -} -.list-group { - padding-left: 0; - margin-bottom: 20px; -} -.list-group-item { - position: relative; - display: block; - padding: 10px 15px; - margin-bottom: -1px; - background-color: #fff; - border: 1px solid #ddd; -} -.list-group-item:first-child { - border-top-left-radius: 4px; - border-top-right-radius: 4px; -} -.list-group-item:last-child { - margin-bottom: 0; - border-bottom-right-radius: 4px; - border-bottom-left-radius: 4px; -} -a.list-group-item, -button.list-group-item { - color: #555; -} -a.list-group-item .list-group-item-heading, -button.list-group-item .list-group-item-heading { - color: #333; -} -a.list-group-item:hover, -button.list-group-item:hover, -a.list-group-item:focus, -button.list-group-item:focus { - color: #555; - text-decoration: none; - background-color: #f5f5f5; -} -button.list-group-item { - width: 100%; - text-align: left; -} -.list-group-item.disabled, -.list-group-item.disabled:hover, -.list-group-item.disabled:focus { - color: #777; - cursor: not-allowed; - background-color: #eee; -} -.list-group-item.disabled .list-group-item-heading, -.list-group-item.disabled:hover .list-group-item-heading, -.list-group-item.disabled:focus .list-group-item-heading { - color: inherit; -} -.list-group-item.disabled .list-group-item-text, -.list-group-item.disabled:hover .list-group-item-text, -.list-group-item.disabled:focus .list-group-item-text { - color: #777; -} -.list-group-item.active, -.list-group-item.active:hover, -.list-group-item.active:focus { - z-index: 2; - color: #fff; - background-color: #337ab7; - border-color: #337ab7; -} -.list-group-item.active .list-group-item-heading, -.list-group-item.active:hover .list-group-item-heading, -.list-group-item.active:focus .list-group-item-heading, -.list-group-item.active .list-group-item-heading > small, -.list-group-item.active:hover .list-group-item-heading > small, -.list-group-item.active:focus .list-group-item-heading > small, -.list-group-item.active .list-group-item-heading > .small, -.list-group-item.active:hover .list-group-item-heading > .small, -.list-group-item.active:focus .list-group-item-heading > .small { - color: inherit; -} -.list-group-item.active .list-group-item-text, -.list-group-item.active:hover .list-group-item-text, -.list-group-item.active:focus .list-group-item-text { - color: #c7ddef; -} -.list-group-item-success { - color: #3c763d; - background-color: #dff0d8; -} -a.list-group-item-success, -button.list-group-item-success { - color: #3c763d; -} -a.list-group-item-success .list-group-item-heading, -button.list-group-item-success .list-group-item-heading { - color: inherit; -} -a.list-group-item-success:hover, -button.list-group-item-success:hover, -a.list-group-item-success:focus, -button.list-group-item-success:focus { - color: #3c763d; - background-color: #d0e9c6; -} -a.list-group-item-success.active, -button.list-group-item-success.active, -a.list-group-item-success.active:hover, -button.list-group-item-success.active:hover, -a.list-group-item-success.active:focus, -button.list-group-item-success.active:focus { - color: #fff; - background-color: #3c763d; - border-color: #3c763d; -} -.list-group-item-info { - color: #31708f; - background-color: #d9edf7; -} -a.list-group-item-info, -button.list-group-item-info { - color: #31708f; -} -a.list-group-item-info .list-group-item-heading, -button.list-group-item-info .list-group-item-heading { - color: inherit; -} -a.list-group-item-info:hover, -button.list-group-item-info:hover, -a.list-group-item-info:focus, -button.list-group-item-info:focus { - color: #31708f; - background-color: #c4e3f3; -} -a.list-group-item-info.active, -button.list-group-item-info.active, -a.list-group-item-info.active:hover, -button.list-group-item-info.active:hover, -a.list-group-item-info.active:focus, -button.list-group-item-info.active:focus { - color: #fff; - background-color: #31708f; - border-color: #31708f; -} -.list-group-item-warning { - color: #8a6d3b; - background-color: #fcf8e3; -} -a.list-group-item-warning, -button.list-group-item-warning { - color: #8a6d3b; -} -a.list-group-item-warning .list-group-item-heading, -button.list-group-item-warning .list-group-item-heading { - color: inherit; -} -a.list-group-item-warning:hover, -button.list-group-item-warning:hover, -a.list-group-item-warning:focus, -button.list-group-item-warning:focus { - color: #8a6d3b; - background-color: #faf2cc; -} -a.list-group-item-warning.active, -button.list-group-item-warning.active, -a.list-group-item-warning.active:hover, -button.list-group-item-warning.active:hover, -a.list-group-item-warning.active:focus, -button.list-group-item-warning.active:focus { - color: #fff; - background-color: #8a6d3b; - border-color: #8a6d3b; -} -.list-group-item-danger { - color: #a94442; - background-color: #f2dede; -} -a.list-group-item-danger, -button.list-group-item-danger { - color: #a94442; -} -a.list-group-item-danger .list-group-item-heading, -button.list-group-item-danger .list-group-item-heading { - color: inherit; -} -a.list-group-item-danger:hover, -button.list-group-item-danger:hover, -a.list-group-item-danger:focus, -button.list-group-item-danger:focus { - color: #a94442; - background-color: #ebcccc; -} -a.list-group-item-danger.active, -button.list-group-item-danger.active, -a.list-group-item-danger.active:hover, -button.list-group-item-danger.active:hover, -a.list-group-item-danger.active:focus, -button.list-group-item-danger.active:focus { - color: #fff; - background-color: #a94442; - border-color: #a94442; -} -.list-group-item-heading { - margin-top: 0; - margin-bottom: 5px; -} -.list-group-item-text { - margin-bottom: 0; - line-height: 1.3; -} -.panel { - margin-bottom: 20px; - background-color: #fff; - border: 1px solid transparent; - border-radius: 4px; - -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, .05); - box-shadow: 0 1px 1px rgba(0, 0, 0, .05); -} -.panel-body { - padding: 15px; -} -.panel-heading { - padding: 10px 15px; - border-bottom: 1px solid transparent; - border-top-left-radius: 3px; - border-top-right-radius: 3px; -} -.panel-heading > .dropdown .dropdown-toggle { - color: inherit; -} -.panel-title { - margin-top: 0; - margin-bottom: 0; - font-size: 16px; - color: inherit; -} -.panel-title > a, -.panel-title > small, -.panel-title > .small, -.panel-title > small > a, -.panel-title > .small > a { - color: inherit; -} -.panel-footer { - padding: 10px 15px; - background-color: #f5f5f5; - border-top: 1px solid #ddd; - border-bottom-right-radius: 3px; - border-bottom-left-radius: 3px; -} -.panel > .list-group, -.panel > .panel-collapse > .list-group { - margin-bottom: 0; -} -.panel > .list-group .list-group-item, -.panel > .panel-collapse > .list-group .list-group-item { - border-width: 1px 0; - border-radius: 0; -} -.panel > .list-group:first-child .list-group-item:first-child, -.panel > .panel-collapse > .list-group:first-child .list-group-item:first-child { - border-top: 0; - border-top-left-radius: 3px; - border-top-right-radius: 3px; -} -.panel > .list-group:last-child .list-group-item:last-child, -.panel > .panel-collapse > .list-group:last-child .list-group-item:last-child { - border-bottom: 0; - border-bottom-right-radius: 3px; - border-bottom-left-radius: 3px; -} -.panel > .panel-heading + .panel-collapse > .list-group .list-group-item:first-child { - border-top-left-radius: 0; - border-top-right-radius: 0; -} -.panel-heading + .list-group .list-group-item:first-child { - border-top-width: 0; -} -.list-group + .panel-footer { - border-top-width: 0; -} -.panel > .table, -.panel > .table-responsive > .table, -.panel > .panel-collapse > .table { - margin-bottom: 0; -} -.panel > .table caption, -.panel > .table-responsive > .table caption, -.panel > .panel-collapse > .table caption { - padding-right: 15px; - padding-left: 15px; -} -.panel > .table:first-child, -.panel > .table-responsive:first-child > .table:first-child { - border-top-left-radius: 3px; - border-top-right-radius: 3px; -} -.panel > .table:first-child > thead:first-child > tr:first-child, -.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child, -.panel > .table:first-child > tbody:first-child > tr:first-child, -.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child { - border-top-left-radius: 3px; - border-top-right-radius: 3px; -} -.panel > .table:first-child > thead:first-child > tr:first-child td:first-child, -.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child, -.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child, -.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child, -.panel > .table:first-child > thead:first-child > tr:first-child th:first-child, -.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child, -.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child, -.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child { - border-top-left-radius: 3px; -} -.panel > .table:first-child > thead:first-child > tr:first-child td:last-child, -.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child, -.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child, -.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child, -.panel > .table:first-child > thead:first-child > tr:first-child th:last-child, -.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child, -.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child, -.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child { - border-top-right-radius: 3px; -} -.panel > .table:last-child, -.panel > .table-responsive:last-child > .table:last-child { - border-bottom-right-radius: 3px; - border-bottom-left-radius: 3px; -} -.panel > .table:last-child > tbody:last-child > tr:last-child, -.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child, -.panel > .table:last-child > tfoot:last-child > tr:last-child, -.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child { - border-bottom-right-radius: 3px; - border-bottom-left-radius: 3px; -} -.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child, -.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child, -.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child, -.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child, -.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child, -.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child, -.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child, -.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child { - border-bottom-left-radius: 3px; -} -.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child, -.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child, -.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child, -.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child, -.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child, -.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child, -.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child, -.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child { - border-bottom-right-radius: 3px; -} -.panel > .panel-body + .table, -.panel > .panel-body + .table-responsive, -.panel > .table + .panel-body, -.panel > .table-responsive + .panel-body { - border-top: 1px solid #ddd; -} -.panel > .table > tbody:first-child > tr:first-child th, -.panel > .table > tbody:first-child > tr:first-child td { - border-top: 0; -} -.panel > .table-bordered, -.panel > .table-responsive > .table-bordered { - border: 0; -} -.panel > .table-bordered > thead > tr > th:first-child, -.panel > .table-responsive > .table-bordered > thead > tr > th:first-child, -.panel > .table-bordered > tbody > tr > th:first-child, -.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child, -.panel > .table-bordered > tfoot > tr > th:first-child, -.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child, -.panel > .table-bordered > thead > tr > td:first-child, -.panel > .table-responsive > .table-bordered > thead > tr > td:first-child, -.panel > .table-bordered > tbody > tr > td:first-child, -.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child, -.panel > .table-bordered > tfoot > tr > td:first-child, -.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child { - border-left: 0; -} -.panel > .table-bordered > thead > tr > th:last-child, -.panel > .table-responsive > .table-bordered > thead > tr > th:last-child, -.panel > .table-bordered > tbody > tr > th:last-child, -.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child, -.panel > .table-bordered > tfoot > tr > th:last-child, -.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child, -.panel > .table-bordered > thead > tr > td:last-child, -.panel > .table-responsive > .table-bordered > thead > tr > td:last-child, -.panel > .table-bordered > tbody > tr > td:last-child, -.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child, -.panel > .table-bordered > tfoot > tr > td:last-child, -.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child { - border-right: 0; -} -.panel > .table-bordered > thead > tr:first-child > td, -.panel > .table-responsive > .table-bordered > thead > tr:first-child > td, -.panel > .table-bordered > tbody > tr:first-child > td, -.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td, -.panel > .table-bordered > thead > tr:first-child > th, -.panel > .table-responsive > .table-bordered > thead > tr:first-child > th, -.panel > .table-bordered > tbody > tr:first-child > th, -.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th { - border-bottom: 0; -} -.panel > .table-bordered > tbody > tr:last-child > td, -.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td, -.panel > .table-bordered > tfoot > tr:last-child > td, -.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td, -.panel > .table-bordered > tbody > tr:last-child > th, -.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th, -.panel > .table-bordered > tfoot > tr:last-child > th, -.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th { - border-bottom: 0; -} -.panel > .table-responsive { - margin-bottom: 0; - border: 0; -} -.panel-group { - margin-bottom: 20px; -} -.panel-group .panel { - margin-bottom: 0; - border-radius: 4px; -} -.panel-group .panel + .panel { - margin-top: 5px; -} -.panel-group .panel-heading { - border-bottom: 0; -} -.panel-group .panel-heading + .panel-collapse > .panel-body, -.panel-group .panel-heading + .panel-collapse > .list-group { - border-top: 1px solid #ddd; -} -.panel-group .panel-footer { - border-top: 0; -} -.panel-group .panel-footer + .panel-collapse .panel-body { - border-bottom: 1px solid #ddd; -} -.panel-default { - border-color: #ddd; -} -.panel-default > .panel-heading { - color: #333; - background-color: #f5f5f5; - border-color: #ddd; -} -.panel-default > .panel-heading + .panel-collapse > .panel-body { - border-top-color: #ddd; -} -.panel-default > .panel-heading .badge { - color: #f5f5f5; - background-color: #333; -} -.panel-default > .panel-footer + .panel-collapse > .panel-body { - border-bottom-color: #ddd; -} -.panel-primary { - border-color: #337ab7; -} -.panel-primary > .panel-heading { - color: #fff; - background-color: #337ab7; - border-color: #337ab7; -} -.panel-primary > .panel-heading + .panel-collapse > .panel-body { - border-top-color: #337ab7; -} -.panel-primary > .panel-heading .badge { - color: #337ab7; - background-color: #fff; -} -.panel-primary > .panel-footer + .panel-collapse > .panel-body { - border-bottom-color: #337ab7; -} -.panel-success { - border-color: #d6e9c6; -} -.panel-success > .panel-heading { - color: #3c763d; - background-color: #dff0d8; - border-color: #d6e9c6; -} -.panel-success > .panel-heading + .panel-collapse > .panel-body { - border-top-color: #d6e9c6; -} -.panel-success > .panel-heading .badge { - color: #dff0d8; - background-color: #3c763d; -} -.panel-success > .panel-footer + .panel-collapse > .panel-body { - border-bottom-color: #d6e9c6; -} -.panel-info { - border-color: #bce8f1; -} -.panel-info > .panel-heading { - color: #31708f; - background-color: #d9edf7; - border-color: #bce8f1; -} -.panel-info > .panel-heading + .panel-collapse > .panel-body { - border-top-color: #bce8f1; -} -.panel-info > .panel-heading .badge { - color: #d9edf7; - background-color: #31708f; -} -.panel-info > .panel-footer + .panel-collapse > .panel-body { - border-bottom-color: #bce8f1; -} -.panel-warning { - border-color: #faebcc; -} -.panel-warning > .panel-heading { - color: #8a6d3b; - background-color: #fcf8e3; - border-color: #faebcc; -} -.panel-warning > .panel-heading + .panel-collapse > .panel-body { - border-top-color: #faebcc; -} -.panel-warning > .panel-heading .badge { - color: #fcf8e3; - background-color: #8a6d3b; -} -.panel-warning > .panel-footer + .panel-collapse > .panel-body { - border-bottom-color: #faebcc; -} -.panel-danger { - border-color: #ebccd1; -} -.panel-danger > .panel-heading { - color: #a94442; - background-color: #f2dede; - border-color: #ebccd1; -} -.panel-danger > .panel-heading + .panel-collapse > .panel-body { - border-top-color: #ebccd1; -} -.panel-danger > .panel-heading .badge { - color: #f2dede; - background-color: #a94442; -} -.panel-danger > .panel-footer + .panel-collapse > .panel-body { - border-bottom-color: #ebccd1; -} -.embed-responsive { - position: relative; - display: block; - height: 0; - padding: 0; - overflow: hidden; -} -.embed-responsive .embed-responsive-item, -.embed-responsive iframe, -.embed-responsive embed, -.embed-responsive object, -.embed-responsive video { - position: absolute; - top: 0; - bottom: 0; - left: 0; - width: 100%; - height: 100%; - border: 0; -} -.embed-responsive-16by9 { - padding-bottom: 56.25%; -} -.embed-responsive-4by3 { - padding-bottom: 75%; -} -.well { - min-height: 20px; - padding: 19px; - margin-bottom: 20px; - background-color: #f5f5f5; - border: 1px solid #e3e3e3; - border-radius: 4px; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05); -} -.well blockquote { - border-color: #ddd; - border-color: rgba(0, 0, 0, .15); -} -.well-lg { - padding: 24px; - border-radius: 6px; -} -.well-sm { - padding: 9px; - border-radius: 3px; -} -.close { - float: right; - font-size: 21px; - font-weight: bold; - line-height: 1; - color: #000; - text-shadow: 0 1px 0 #fff; - filter: alpha(opacity=20); - opacity: .2; -} -.close:hover, -.close:focus { - color: #000; - text-decoration: none; - cursor: pointer; - filter: alpha(opacity=50); - opacity: .5; -} -button.close { - -webkit-appearance: none; - padding: 0; - cursor: pointer; - background: transparent; - border: 0; -} -.modal-open { - overflow: hidden; -} -.modal { - position: fixed; - top: 0; - right: 0; - bottom: 0; - left: 0; - z-index: 1050; - display: none; - overflow: hidden; - -webkit-overflow-scrolling: touch; - outline: 0; -} -.modal.fade .modal-dialog { - -webkit-transition: -webkit-transform .3s ease-out; - -o-transition: -o-transform .3s ease-out; - transition: transform .3s ease-out; - -webkit-transform: translate(0, -25%); - -ms-transform: translate(0, -25%); - -o-transform: translate(0, -25%); - transform: translate(0, -25%); -} -.modal.in .modal-dialog { - -webkit-transform: translate(0, 0); - -ms-transform: translate(0, 0); - -o-transform: translate(0, 0); - transform: translate(0, 0); -} -.modal-open .modal { - overflow-x: hidden; - overflow-y: auto; -} -.modal-dialog { - position: relative; - width: auto; - margin: 10px; -} -.modal-content { - position: relative; - background-color: #fff; - -webkit-background-clip: padding-box; - background-clip: padding-box; - border: 1px solid #999; - border: 1px solid rgba(0, 0, 0, .2); - border-radius: 6px; - outline: 0; - -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, .5); - box-shadow: 0 3px 9px rgba(0, 0, 0, .5); -} -.modal-backdrop { - position: fixed; - top: 0; - right: 0; - bottom: 0; - left: 0; - z-index: 1040; - background-color: #000; -} -.modal-backdrop.fade { - filter: alpha(opacity=0); - opacity: 0; -} -.modal-backdrop.in { - filter: alpha(opacity=50); - opacity: .5; -} -.modal-header { - padding: 15px; - border-bottom: 1px solid #e5e5e5; -} -.modal-header .close { - margin-top: -2px; -} -.modal-title { - margin: 0; - line-height: 1.42857143; -} -.modal-body { - position: relative; - padding: 15px; -} -.modal-footer { - padding: 15px; - text-align: right; - border-top: 1px solid #e5e5e5; -} -.modal-footer .btn + .btn { - margin-bottom: 0; - margin-left: 5px; -} -.modal-footer .btn-group .btn + .btn { - margin-left: -1px; -} -.modal-footer .btn-block + .btn-block { - margin-left: 0; -} -.modal-scrollbar-measure { - position: absolute; - top: -9999px; - width: 50px; - height: 50px; - overflow: scroll; -} -@media (min-width: 768px) { - .modal-dialog { - width: 600px; - margin: 30px auto; - } - .modal-content { - -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, .5); - box-shadow: 0 5px 15px rgba(0, 0, 0, .5); - } - .modal-sm { - width: 300px; - } -} -@media (min-width: 992px) { - .modal-lg { - width: 900px; - } -} -.tooltip { - position: absolute; - z-index: 1070; - display: block; - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 12px; - font-style: normal; - font-weight: normal; - line-height: 1.42857143; - text-align: left; - text-align: start; - text-decoration: none; - text-shadow: none; - text-transform: none; - letter-spacing: normal; - word-break: normal; - word-spacing: normal; - word-wrap: normal; - white-space: normal; - filter: alpha(opacity=0); - opacity: 0; - - line-break: auto; -} -.tooltip.in { - filter: alpha(opacity=90); - opacity: .9; -} -.tooltip.top { - padding: 5px 0; - margin-top: -3px; -} -.tooltip.right { - padding: 0 5px; - margin-left: 3px; -} -.tooltip.bottom { - padding: 5px 0; - margin-top: 3px; -} -.tooltip.left { - padding: 0 5px; - margin-left: -3px; -} -.tooltip-inner { - max-width: 200px; - padding: 3px 8px; - color: #fff; - text-align: center; - background-color: #000; - border-radius: 4px; -} -.tooltip-arrow { - position: absolute; - width: 0; - height: 0; - border-color: transparent; - border-style: solid; -} -.tooltip.top .tooltip-arrow { - bottom: 0; - left: 50%; - margin-left: -5px; - border-width: 5px 5px 0; - border-top-color: #000; -} -.tooltip.top-left .tooltip-arrow { - right: 5px; - bottom: 0; - margin-bottom: -5px; - border-width: 5px 5px 0; - border-top-color: #000; -} -.tooltip.top-right .tooltip-arrow { - bottom: 0; - left: 5px; - margin-bottom: -5px; - border-width: 5px 5px 0; - border-top-color: #000; -} -.tooltip.right .tooltip-arrow { - top: 50%; - left: 0; - margin-top: -5px; - border-width: 5px 5px 5px 0; - border-right-color: #000; -} -.tooltip.left .tooltip-arrow { - top: 50%; - right: 0; - margin-top: -5px; - border-width: 5px 0 5px 5px; - border-left-color: #000; -} -.tooltip.bottom .tooltip-arrow { - top: 0; - left: 50%; - margin-left: -5px; - border-width: 0 5px 5px; - border-bottom-color: #000; -} -.tooltip.bottom-left .tooltip-arrow { - top: 0; - right: 5px; - margin-top: -5px; - border-width: 0 5px 5px; - border-bottom-color: #000; -} -.tooltip.bottom-right .tooltip-arrow { - top: 0; - left: 5px; - margin-top: -5px; - border-width: 0 5px 5px; - border-bottom-color: #000; -} -.popover { - position: absolute; - top: 0; - left: 0; - z-index: 1060; - display: none; - max-width: 276px; - padding: 1px; - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 14px; - font-style: normal; - font-weight: normal; - line-height: 1.42857143; - text-align: left; - text-align: start; - text-decoration: none; - text-shadow: none; - text-transform: none; - letter-spacing: normal; - word-break: normal; - word-spacing: normal; - word-wrap: normal; - white-space: normal; - background-color: #fff; - -webkit-background-clip: padding-box; - background-clip: padding-box; - border: 1px solid #ccc; - border: 1px solid rgba(0, 0, 0, .2); - border-radius: 6px; - -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, .2); - box-shadow: 0 5px 10px rgba(0, 0, 0, .2); - - line-break: auto; -} -.popover.top { - margin-top: -10px; -} -.popover.right { - margin-left: 10px; -} -.popover.bottom { - margin-top: 10px; -} -.popover.left { - margin-left: -10px; -} -.popover-title { - padding: 8px 14px; - margin: 0; - font-size: 14px; - background-color: #f7f7f7; - border-bottom: 1px solid #ebebeb; - border-radius: 5px 5px 0 0; -} -.popover-content { - padding: 9px 14px; -} -.popover > .arrow, -.popover > .arrow:after { - position: absolute; - display: block; - width: 0; - height: 0; - border-color: transparent; - border-style: solid; -} -.popover > .arrow { - border-width: 11px; -} -.popover > .arrow:after { - content: ""; - border-width: 10px; -} -.popover.top > .arrow { - bottom: -11px; - left: 50%; - margin-left: -11px; - border-top-color: #999; - border-top-color: rgba(0, 0, 0, .25); - border-bottom-width: 0; -} -.popover.top > .arrow:after { - bottom: 1px; - margin-left: -10px; - content: " "; - border-top-color: #fff; - border-bottom-width: 0; -} -.popover.right > .arrow { - top: 50%; - left: -11px; - margin-top: -11px; - border-right-color: #999; - border-right-color: rgba(0, 0, 0, .25); - border-left-width: 0; -} -.popover.right > .arrow:after { - bottom: -10px; - left: 1px; - content: " "; - border-right-color: #fff; - border-left-width: 0; -} -.popover.bottom > .arrow { - top: -11px; - left: 50%; - margin-left: -11px; - border-top-width: 0; - border-bottom-color: #999; - border-bottom-color: rgba(0, 0, 0, .25); -} -.popover.bottom > .arrow:after { - top: 1px; - margin-left: -10px; - content: " "; - border-top-width: 0; - border-bottom-color: #fff; -} -.popover.left > .arrow { - top: 50%; - right: -11px; - margin-top: -11px; - border-right-width: 0; - border-left-color: #999; - border-left-color: rgba(0, 0, 0, .25); -} -.popover.left > .arrow:after { - right: 1px; - bottom: -10px; - content: " "; - border-right-width: 0; - border-left-color: #fff; -} -.carousel { - position: relative; -} -.carousel-inner { - position: relative; - width: 100%; - overflow: hidden; -} -.carousel-inner > .item { - position: relative; - display: none; - -webkit-transition: .6s ease-in-out left; - -o-transition: .6s ease-in-out left; - transition: .6s ease-in-out left; -} -.carousel-inner > .item > img, -.carousel-inner > .item > a > img { - line-height: 1; -} -@media all and (transform-3d), (-webkit-transform-3d) { - .carousel-inner > .item { - -webkit-transition: -webkit-transform .6s ease-in-out; - -o-transition: -o-transform .6s ease-in-out; - transition: transform .6s ease-in-out; - - -webkit-backface-visibility: hidden; - backface-visibility: hidden; - -webkit-perspective: 1000px; - perspective: 1000px; - } - .carousel-inner > .item.next, - .carousel-inner > .item.active.right { - left: 0; - -webkit-transform: translate3d(100%, 0, 0); - transform: translate3d(100%, 0, 0); - } - .carousel-inner > .item.prev, - .carousel-inner > .item.active.left { - left: 0; - -webkit-transform: translate3d(-100%, 0, 0); - transform: translate3d(-100%, 0, 0); - } - .carousel-inner > .item.next.left, - .carousel-inner > .item.prev.right, - .carousel-inner > .item.active { - left: 0; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.carousel-inner > .active, -.carousel-inner > .next, -.carousel-inner > .prev { - display: block; -} -.carousel-inner > .active { - left: 0; -} -.carousel-inner > .next, -.carousel-inner > .prev { - position: absolute; - top: 0; - width: 100%; -} -.carousel-inner > .next { - left: 100%; -} -.carousel-inner > .prev { - left: -100%; -} -.carousel-inner > .next.left, -.carousel-inner > .prev.right { - left: 0; -} -.carousel-inner > .active.left { - left: -100%; -} -.carousel-inner > .active.right { - left: 100%; -} -.carousel-control { - position: absolute; - top: 0; - bottom: 0; - left: 0; - width: 15%; - font-size: 20px; - color: #fff; - text-align: center; - text-shadow: 0 1px 2px rgba(0, 0, 0, .6); - background-color: rgba(0, 0, 0, 0); - filter: alpha(opacity=50); - opacity: .5; -} -.carousel-control.left { - background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%); - background-image: -o-linear-gradient(left, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%); - background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, .5)), to(rgba(0, 0, 0, .0001))); - background-image: linear-gradient(to right, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1); - background-repeat: repeat-x; -} -.carousel-control.right { - right: 0; - left: auto; - background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%); - background-image: -o-linear-gradient(left, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%); - background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, .0001)), to(rgba(0, 0, 0, .5))); - background-image: linear-gradient(to right, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1); - background-repeat: repeat-x; -} -.carousel-control:hover, -.carousel-control:focus { - color: #fff; - text-decoration: none; - filter: alpha(opacity=90); - outline: 0; - opacity: .9; -} -.carousel-control .icon-prev, -.carousel-control .icon-next, -.carousel-control .glyphicon-chevron-left, -.carousel-control .glyphicon-chevron-right { - position: absolute; - top: 50%; - z-index: 5; - display: inline-block; - margin-top: -10px; -} -.carousel-control .icon-prev, -.carousel-control .glyphicon-chevron-left { - left: 50%; - margin-left: -10px; -} -.carousel-control .icon-next, -.carousel-control .glyphicon-chevron-right { - right: 50%; - margin-right: -10px; -} -.carousel-control .icon-prev, -.carousel-control .icon-next { - width: 20px; - height: 20px; - font-family: serif; - line-height: 1; -} -.carousel-control .icon-prev:before { - content: '\2039'; -} -.carousel-control .icon-next:before { - content: '\203a'; -} -.carousel-indicators { - position: absolute; - bottom: 10px; - left: 50%; - z-index: 15; - width: 60%; - padding-left: 0; - margin-left: -30%; - text-align: center; - list-style: none; -} -.carousel-indicators li { - display: inline-block; - width: 10px; - height: 10px; - margin: 1px; - text-indent: -999px; - cursor: pointer; - background-color: #000 \9; - background-color: rgba(0, 0, 0, 0); - border: 1px solid #fff; - border-radius: 10px; -} -.carousel-indicators .active { - width: 12px; - height: 12px; - margin: 0; - background-color: #fff; -} -.carousel-caption { - position: absolute; - right: 15%; - bottom: 20px; - left: 15%; - z-index: 10; - padding-top: 20px; - padding-bottom: 20px; - color: #fff; - text-align: center; - text-shadow: 0 1px 2px rgba(0, 0, 0, .6); -} -.carousel-caption .btn { - text-shadow: none; -} -@media screen and (min-width: 768px) { - .carousel-control .glyphicon-chevron-left, - .carousel-control .glyphicon-chevron-right, - .carousel-control .icon-prev, - .carousel-control .icon-next { - width: 30px; - height: 30px; - margin-top: -10px; - font-size: 30px; - } - .carousel-control .glyphicon-chevron-left, - .carousel-control .icon-prev { - margin-left: -10px; - } - .carousel-control .glyphicon-chevron-right, - .carousel-control .icon-next { - margin-right: -10px; - } - .carousel-caption { - right: 20%; - left: 20%; - padding-bottom: 30px; - } - .carousel-indicators { - bottom: 20px; - } -} -.clearfix:before, -.clearfix:after, -.dl-horizontal dd:before, -.dl-horizontal dd:after, -.container:before, -.container:after, -.container-fluid:before, -.container-fluid:after, -.row:before, -.row:after, -.form-horizontal .form-group:before, -.form-horizontal .form-group:after, -.btn-toolbar:before, -.btn-toolbar:after, -.btn-group-vertical > .btn-group:before, -.btn-group-vertical > .btn-group:after, -.nav:before, -.nav:after, -.navbar:before, -.navbar:after, -.navbar-header:before, -.navbar-header:after, -.navbar-collapse:before, -.navbar-collapse:after, -.pager:before, -.pager:after, -.panel-body:before, -.panel-body:after, -.modal-header:before, -.modal-header:after, -.modal-footer:before, -.modal-footer:after { - display: table; - content: " "; -} -.clearfix:after, -.dl-horizontal dd:after, -.container:after, -.container-fluid:after, -.row:after, -.form-horizontal .form-group:after, -.btn-toolbar:after, -.btn-group-vertical > .btn-group:after, -.nav:after, -.navbar:after, -.navbar-header:after, -.navbar-collapse:after, -.pager:after, -.panel-body:after, -.modal-header:after, -.modal-footer:after { - clear: both; -} -.center-block { - display: block; - margin-right: auto; - margin-left: auto; -} -.pull-right { - float: right !important; -} -.pull-left { - float: left !important; -} -.hide { - display: none !important; -} -.show { - display: block !important; -} -.invisible { - visibility: hidden; -} -.text-hide { - font: 0/0 a; - color: transparent; - text-shadow: none; - background-color: transparent; - border: 0; -} -.hidden { - display: none !important; -} -.affix { - position: fixed; -} -@-ms-viewport { - width: device-width; -} -.visible-xs, -.visible-sm, -.visible-md, -.visible-lg { - display: none !important; -} -.visible-xs-block, -.visible-xs-inline, -.visible-xs-inline-block, -.visible-sm-block, -.visible-sm-inline, -.visible-sm-inline-block, -.visible-md-block, -.visible-md-inline, -.visible-md-inline-block, -.visible-lg-block, -.visible-lg-inline, -.visible-lg-inline-block { - display: none !important; -} -@media (max-width: 767px) { - .visible-xs { - display: block !important; - } - table.visible-xs { - display: table !important; - } - tr.visible-xs { - display: table-row !important; - } - th.visible-xs, - td.visible-xs { - display: table-cell !important; - } -} -@media (max-width: 767px) { - .visible-xs-block { - display: block !important; - } -} -@media (max-width: 767px) { - .visible-xs-inline { - display: inline !important; - } -} -@media (max-width: 767px) { - .visible-xs-inline-block { - display: inline-block !important; - } -} -@media (min-width: 768px) and (max-width: 991px) { - .visible-sm { - display: block !important; - } - table.visible-sm { - display: table !important; - } - tr.visible-sm { - display: table-row !important; - } - th.visible-sm, - td.visible-sm { - display: table-cell !important; - } -} -@media (min-width: 768px) and (max-width: 991px) { - .visible-sm-block { - display: block !important; - } -} -@media (min-width: 768px) and (max-width: 991px) { - .visible-sm-inline { - display: inline !important; - } -} -@media (min-width: 768px) and (max-width: 991px) { - .visible-sm-inline-block { - display: inline-block !important; - } -} -@media (min-width: 992px) and (max-width: 1199px) { - .visible-md { - display: block !important; - } - table.visible-md { - display: table !important; - } - tr.visible-md { - display: table-row !important; - } - th.visible-md, - td.visible-md { - display: table-cell !important; - } -} -@media (min-width: 992px) and (max-width: 1199px) { - .visible-md-block { - display: block !important; - } -} -@media (min-width: 992px) and (max-width: 1199px) { - .visible-md-inline { - display: inline !important; - } -} -@media (min-width: 992px) and (max-width: 1199px) { - .visible-md-inline-block { - display: inline-block !important; - } -} -@media (min-width: 1200px) { - .visible-lg { - display: block !important; - } - table.visible-lg { - display: table !important; - } - tr.visible-lg { - display: table-row !important; - } - th.visible-lg, - td.visible-lg { - display: table-cell !important; - } -} -@media (min-width: 1200px) { - .visible-lg-block { - display: block !important; - } -} -@media (min-width: 1200px) { - .visible-lg-inline { - display: inline !important; - } -} -@media (min-width: 1200px) { - .visible-lg-inline-block { - display: inline-block !important; - } -} -@media (max-width: 767px) { - .hidden-xs { - display: none !important; - } -} -@media (min-width: 768px) and (max-width: 991px) { - .hidden-sm { - display: none !important; - } -} -@media (min-width: 992px) and (max-width: 1199px) { - .hidden-md { - display: none !important; - } -} -@media (min-width: 1200px) { - .hidden-lg { - display: none !important; - } -} -.visible-print { - display: none !important; -} -@media print { - .visible-print { - display: block !important; - } - table.visible-print { - display: table !important; - } - tr.visible-print { - display: table-row !important; - } - th.visible-print, - td.visible-print { - display: table-cell !important; - } -} -.visible-print-block { - display: none !important; -} -@media print { - .visible-print-block { - display: block !important; - } -} -.visible-print-inline { - display: none !important; -} -@media print { - .visible-print-inline { - display: inline !important; - } -} -.visible-print-inline-block { - display: none !important; -} -@media print { - .visible-print-inline-block { - display: inline-block !important; - } -} -@media print { - .hidden-print { - display: none !important; - } -} -/*# sourceMappingURL=bootstrap.css.map */ diff --git a/web/css/bootstrap-slate-flat-3.3.7.css b/web/css/bootstrap-slate-flat-3.3.7.css deleted file mode 100644 index 98a8c9fcc..000000000 --- a/web/css/bootstrap-slate-flat-3.3.7.css +++ /dev/null @@ -1,7100 +0,0 @@ -/*! - * bootswatch v3.3.7 - * Homepage: http://bootswatch.com - * Copyright 2012-2016 Thomas Park - * Licensed under MIT - * Based on Bootstrap -*/ -/*! - * Bootstrap v3.3.7 (http://getbootstrap.com) - * Copyright 2011-2016 Twitter, Inc. - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) - */ -/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */ -html { - font-family: sans-serif; - -ms-text-size-adjust: 100%; - -webkit-text-size-adjust: 100%; -} -body { - margin: 0; -} -article, -aside, -details, -figcaption, -figure, -footer, -header, -hgroup, -main, -menu, -nav, -section, -summary { - display: block; -} -audio, -canvas, -progress, -video { - display: inline-block; - vertical-align: baseline; -} -audio:not([controls]) { - display: none; - height: 0; -} -[hidden], -template { - display: none; -} -a { - background-color: transparent; -} -a:active, -a:hover { - outline: 0; -} -abbr[title] { - border-bottom: 1px dotted; -} -b, -strong { - font-weight: bold; -} -dfn { - font-style: italic; -} -h1 { - font-size: 2em; - margin: 0.67em 0; -} -mark { - background: #ff0; - color: #000; -} -small { - font-size: 80%; -} -sub, -sup { - font-size: 75%; - line-height: 0; - position: relative; - vertical-align: baseline; -} -sup { - top: -0.5em; -} -sub { - bottom: -0.25em; -} -img { - border: 0; -} -svg:not(:root) { - overflow: hidden; -} -figure { - margin: 1em 40px; -} -hr { - -webkit-box-sizing: content-box; - -moz-box-sizing: content-box; - box-sizing: content-box; - height: 0; -} -pre { - overflow: auto; -} -code, -kbd, -pre, -samp { - font-family: monospace, monospace; - font-size: 1em; -} -button, -input, -optgroup, -select, -textarea { - color: inherit; - font: inherit; - margin: 0; -} -button { - overflow: visible; -} -button, -select { - text-transform: none; -} -button, -html input[type="button"], -input[type="reset"], -input[type="submit"] { - -webkit-appearance: button; - cursor: pointer; -} -button[disabled], -html input[disabled] { - cursor: default; -} -button::-moz-focus-inner, -input::-moz-focus-inner { - border: 0; - padding: 0; -} -input { - line-height: normal; -} -input[type="checkbox"], -input[type="radio"] { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; - padding: 0; -} -input[type="number"]::-webkit-inner-spin-button, -input[type="number"]::-webkit-outer-spin-button { - height: auto; -} -input[type="search"] { - -webkit-appearance: textfield; - -webkit-box-sizing: content-box; - -moz-box-sizing: content-box; - box-sizing: content-box; -} -input[type="search"]::-webkit-search-cancel-button, -input[type="search"]::-webkit-search-decoration { - -webkit-appearance: none; -} -fieldset { - border: 1px solid #c0c0c0; - margin: 0 2px; - padding: 0.35em 0.625em 0.75em; -} -legend { - border: 0; - padding: 0; -} -textarea { - overflow: auto; -} -optgroup { - font-weight: bold; -} -table { - border-collapse: collapse; - border-spacing: 0; -} -td, -th { - padding: 0; -} -/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */ -@media print { - *, - *:before, - *:after { - background: transparent !important; - color: #000 !important; - -webkit-box-shadow: none !important; - box-shadow: none !important; - text-shadow: none !important; - } - a, - a:visited { - text-decoration: underline; - } - a[href]:after { - content: " (" attr(href) ")"; - } - abbr[title]:after { - content: " (" attr(title) ")"; - } - a[href^="#"]:after, - a[href^="javascript:"]:after { - content: ""; - } - pre, - blockquote { - border: 1px solid #999; - page-break-inside: avoid; - } - thead { - display: table-header-group; - } - tr, - img { - page-break-inside: avoid; - } - img { - max-width: 100% !important; - } - p, - h2, - h3 { - orphans: 3; - widows: 3; - } - h2, - h3 { - page-break-after: avoid; - } - .navbar { - display: none; - } - .btn > .caret, - .dropup > .btn > .caret { - border-top-color: #000 !important; - } - .label { - border: 1px solid #000; - } - .table { - border-collapse: collapse !important; - } - .table td, - .table th { - background-color: #fff !important; - } - .table-bordered th, - .table-bordered td { - border: 1px solid #ddd !important; - } -} -@font-face { - font-family: 'Glyphicons Halflings'; - src: url('../fonts/glyphicons-halflings-regular.eot'); - src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff2') format('woff2'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg'); -} -.glyphicon { - position: relative; - top: 1px; - display: inline-block; - font-family: 'Glyphicons Halflings'; - font-style: normal; - font-weight: normal; - line-height: 1; - -webkit-font-smoothing: antialiased; - -moz-osx-font-smoothing: grayscale; -} -.glyphicon-asterisk:before { - content: "\002a"; -} -.glyphicon-plus:before { - content: "\002b"; -} -.glyphicon-euro:before, -.glyphicon-eur:before { - content: "\20ac"; -} -.glyphicon-minus:before { - content: "\2212"; -} -.glyphicon-cloud:before { - content: "\2601"; -} -.glyphicon-envelope:before { - content: "\2709"; -} -.glyphicon-pencil:before { - content: "\270f"; -} -.glyphicon-glass:before { - content: "\e001"; -} -.glyphicon-music:before { - content: "\e002"; -} -.glyphicon-search:before { - content: "\e003"; -} -.glyphicon-heart:before { - content: "\e005"; -} -.glyphicon-star:before { - content: "\e006"; -} -.glyphicon-star-empty:before { - content: "\e007"; -} -.glyphicon-user:before { - content: "\e008"; -} -.glyphicon-film:before { - content: "\e009"; -} -.glyphicon-th-large:before { - content: "\e010"; -} -.glyphicon-th:before { - content: "\e011"; -} -.glyphicon-th-list:before { - content: "\e012"; -} -.glyphicon-ok:before { - content: "\e013"; -} -.glyphicon-remove:before { - content: "\e014"; -} -.glyphicon-zoom-in:before { - content: "\e015"; -} -.glyphicon-zoom-out:before { - content: "\e016"; -} -.glyphicon-off:before { - content: "\e017"; -} -.glyphicon-signal:before { - content: "\e018"; -} -.glyphicon-cog:before { - content: "\e019"; -} -.glyphicon-trash:before { - content: "\e020"; -} -.glyphicon-home:before { - content: "\e021"; -} -.glyphicon-file:before { - content: "\e022"; -} -.glyphicon-time:before { - content: "\e023"; -} -.glyphicon-road:before { - content: "\e024"; -} -.glyphicon-download-alt:before { - content: "\e025"; -} -.glyphicon-download:before { - content: "\e026"; -} -.glyphicon-upload:before { - content: "\e027"; -} -.glyphicon-inbox:before { - content: "\e028"; -} -.glyphicon-play-circle:before { - content: "\e029"; -} -.glyphicon-repeat:before { - content: "\e030"; -} -.glyphicon-refresh:before { - content: "\e031"; -} -.glyphicon-list-alt:before { - content: "\e032"; -} -.glyphicon-lock:before { - content: "\e033"; -} -.glyphicon-flag:before { - content: "\e034"; -} -.glyphicon-headphones:before { - content: "\e035"; -} -.glyphicon-volume-off:before { - content: "\e036"; -} -.glyphicon-volume-down:before { - content: "\e037"; -} -.glyphicon-volume-up:before { - content: "\e038"; -} -.glyphicon-qrcode:before { - content: "\e039"; -} -.glyphicon-barcode:before { - content: "\e040"; -} -.glyphicon-tag:before { - content: "\e041"; -} -.glyphicon-tags:before { - content: "\e042"; -} -.glyphicon-book:before { - content: "\e043"; -} -.glyphicon-bookmark:before { - content: "\e044"; -} -.glyphicon-print:before { - content: "\e045"; -} -.glyphicon-camera:before { - content: "\e046"; -} -.glyphicon-font:before { - content: "\e047"; -} -.glyphicon-bold:before { - content: "\e048"; -} -.glyphicon-italic:before { - content: "\e049"; -} -.glyphicon-text-height:before { - content: "\e050"; -} -.glyphicon-text-width:before { - content: "\e051"; -} -.glyphicon-align-left:before { - content: "\e052"; -} -.glyphicon-align-center:before { - content: "\e053"; -} -.glyphicon-align-right:before { - content: "\e054"; -} -.glyphicon-align-justify:before { - content: "\e055"; -} -.glyphicon-list:before { - content: "\e056"; -} -.glyphicon-indent-left:before { - content: "\e057"; -} -.glyphicon-indent-right:before { - content: "\e058"; -} -.glyphicon-facetime-video:before { - content: "\e059"; -} -.glyphicon-picture:before { - content: "\e060"; -} -.glyphicon-map-marker:before { - content: "\e062"; -} -.glyphicon-adjust:before { - content: "\e063"; -} -.glyphicon-tint:before { - content: "\e064"; -} -.glyphicon-edit:before { - content: "\e065"; -} -.glyphicon-share:before { - content: "\e066"; -} -.glyphicon-check:before { - content: "\e067"; -} -.glyphicon-move:before { - content: "\e068"; -} -.glyphicon-step-backward:before { - content: "\e069"; -} -.glyphicon-fast-backward:before { - content: "\e070"; -} -.glyphicon-backward:before { - content: "\e071"; -} -.glyphicon-play:before { - content: "\e072"; -} -.glyphicon-pause:before { - content: "\e073"; -} -.glyphicon-stop:before { - content: "\e074"; -} -.glyphicon-forward:before { - content: "\e075"; -} -.glyphicon-fast-forward:before { - content: "\e076"; -} -.glyphicon-step-forward:before { - content: "\e077"; -} -.glyphicon-eject:before { - content: "\e078"; -} -.glyphicon-chevron-left:before { - content: "\e079"; -} -.glyphicon-chevron-right:before { - content: "\e080"; -} -.glyphicon-plus-sign:before { - content: "\e081"; -} -.glyphicon-minus-sign:before { - content: "\e082"; -} -.glyphicon-remove-sign:before { - content: "\e083"; -} -.glyphicon-ok-sign:before { - content: "\e084"; -} -.glyphicon-question-sign:before { - content: "\e085"; -} -.glyphicon-info-sign:before { - content: "\e086"; -} -.glyphicon-screenshot:before { - content: "\e087"; -} -.glyphicon-remove-circle:before { - content: "\e088"; -} -.glyphicon-ok-circle:before { - content: "\e089"; -} -.glyphicon-ban-circle:before { - content: "\e090"; -} -.glyphicon-arrow-left:before { - content: "\e091"; -} -.glyphicon-arrow-right:before { - content: "\e092"; -} -.glyphicon-arrow-up:before { - content: "\e093"; -} -.glyphicon-arrow-down:before { - content: "\e094"; -} -.glyphicon-share-alt:before { - content: "\e095"; -} -.glyphicon-resize-full:before { - content: "\e096"; -} -.glyphicon-resize-small:before { - content: "\e097"; -} -.glyphicon-exclamation-sign:before { - content: "\e101"; -} -.glyphicon-gift:before { - content: "\e102"; -} -.glyphicon-leaf:before { - content: "\e103"; -} -.glyphicon-fire:before { - content: "\e104"; -} -.glyphicon-eye-open:before { - content: "\e105"; -} -.glyphicon-eye-close:before { - content: "\e106"; -} -.glyphicon-warning-sign:before { - content: "\e107"; -} -.glyphicon-plane:before { - content: "\e108"; -} -.glyphicon-calendar:before { - content: "\e109"; -} -.glyphicon-random:before { - content: "\e110"; -} -.glyphicon-comment:before { - content: "\e111"; -} -.glyphicon-magnet:before { - content: "\e112"; -} -.glyphicon-chevron-up:before { - content: "\e113"; -} -.glyphicon-chevron-down:before { - content: "\e114"; -} -.glyphicon-retweet:before { - content: "\e115"; -} -.glyphicon-shopping-cart:before { - content: "\e116"; -} -.glyphicon-folder-close:before { - content: "\e117"; -} -.glyphicon-folder-open:before { - content: "\e118"; -} -.glyphicon-resize-vertical:before { - content: "\e119"; -} -.glyphicon-resize-horizontal:before { - content: "\e120"; -} -.glyphicon-hdd:before { - content: "\e121"; -} -.glyphicon-bullhorn:before { - content: "\e122"; -} -.glyphicon-bell:before { - content: "\e123"; -} -.glyphicon-certificate:before { - content: "\e124"; -} -.glyphicon-thumbs-up:before { - content: "\e125"; -} -.glyphicon-thumbs-down:before { - content: "\e126"; -} -.glyphicon-hand-right:before { - content: "\e127"; -} -.glyphicon-hand-left:before { - content: "\e128"; -} -.glyphicon-hand-up:before { - content: "\e129"; -} -.glyphicon-hand-down:before { - content: "\e130"; -} -.glyphicon-circle-arrow-right:before { - content: "\e131"; -} -.glyphicon-circle-arrow-left:before { - content: "\e132"; -} -.glyphicon-circle-arrow-up:before { - content: "\e133"; -} -.glyphicon-circle-arrow-down:before { - content: "\e134"; -} -.glyphicon-globe:before { - content: "\e135"; -} -.glyphicon-wrench:before { - content: "\e136"; -} -.glyphicon-tasks:before { - content: "\e137"; -} -.glyphicon-filter:before { - content: "\e138"; -} -.glyphicon-briefcase:before { - content: "\e139"; -} -.glyphicon-fullscreen:before { - content: "\e140"; -} -.glyphicon-dashboard:before { - content: "\e141"; -} -.glyphicon-paperclip:before { - content: "\e142"; -} -.glyphicon-heart-empty:before { - content: "\e143"; -} -.glyphicon-link:before { - content: "\e144"; -} -.glyphicon-phone:before { - content: "\e145"; -} -.glyphicon-pushpin:before { - content: "\e146"; -} -.glyphicon-usd:before { - content: "\e148"; -} -.glyphicon-gbp:before { - content: "\e149"; -} -.glyphicon-sort:before { - content: "\e150"; -} -.glyphicon-sort-by-alphabet:before { - content: "\e151"; -} -.glyphicon-sort-by-alphabet-alt:before { - content: "\e152"; -} -.glyphicon-sort-by-order:before { - content: "\e153"; -} -.glyphicon-sort-by-order-alt:before { - content: "\e154"; -} -.glyphicon-sort-by-attributes:before { - content: "\e155"; -} -.glyphicon-sort-by-attributes-alt:before { - content: "\e156"; -} -.glyphicon-unchecked:before { - content: "\e157"; -} -.glyphicon-expand:before { - content: "\e158"; -} -.glyphicon-collapse-down:before { - content: "\e159"; -} -.glyphicon-collapse-up:before { - content: "\e160"; -} -.glyphicon-log-in:before { - content: "\e161"; -} -.glyphicon-flash:before { - content: "\e162"; -} -.glyphicon-log-out:before { - content: "\e163"; -} -.glyphicon-new-window:before { - content: "\e164"; -} -.glyphicon-record:before { - content: "\e165"; -} -.glyphicon-save:before { - content: "\e166"; -} -.glyphicon-open:before { - content: "\e167"; -} -.glyphicon-saved:before { - content: "\e168"; -} -.glyphicon-import:before { - content: "\e169"; -} -.glyphicon-export:before { - content: "\e170"; -} -.glyphicon-send:before { - content: "\e171"; -} -.glyphicon-floppy-disk:before { - content: "\e172"; -} -.glyphicon-floppy-saved:before { - content: "\e173"; -} -.glyphicon-floppy-remove:before { - content: "\e174"; -} -.glyphicon-floppy-save:before { - content: "\e175"; -} -.glyphicon-floppy-open:before { - content: "\e176"; -} -.glyphicon-credit-card:before { - content: "\e177"; -} -.glyphicon-transfer:before { - content: "\e178"; -} -.glyphicon-cutlery:before { - content: "\e179"; -} -.glyphicon-header:before { - content: "\e180"; -} -.glyphicon-compressed:before { - content: "\e181"; -} -.glyphicon-earphone:before { - content: "\e182"; -} -.glyphicon-phone-alt:before { - content: "\e183"; -} -.glyphicon-tower:before { - content: "\e184"; -} -.glyphicon-stats:before { - content: "\e185"; -} -.glyphicon-sd-video:before { - content: "\e186"; -} -.glyphicon-hd-video:before { - content: "\e187"; -} -.glyphicon-subtitles:before { - content: "\e188"; -} -.glyphicon-sound-stereo:before { - content: "\e189"; -} -.glyphicon-sound-dolby:before { - content: "\e190"; -} -.glyphicon-sound-5-1:before { - content: "\e191"; -} -.glyphicon-sound-6-1:before { - content: "\e192"; -} -.glyphicon-sound-7-1:before { - content: "\e193"; -} -.glyphicon-copyright-mark:before { - content: "\e194"; -} -.glyphicon-registration-mark:before { - content: "\e195"; -} -.glyphicon-cloud-download:before { - content: "\e197"; -} -.glyphicon-cloud-upload:before { - content: "\e198"; -} -.glyphicon-tree-conifer:before { - content: "\e199"; -} -.glyphicon-tree-deciduous:before { - content: "\e200"; -} -.glyphicon-cd:before { - content: "\e201"; -} -.glyphicon-save-file:before { - content: "\e202"; -} -.glyphicon-open-file:before { - content: "\e203"; -} -.glyphicon-level-up:before { - content: "\e204"; -} -.glyphicon-copy:before { - content: "\e205"; -} -.glyphicon-paste:before { - content: "\e206"; -} -.glyphicon-alert:before { - content: "\e209"; -} -.glyphicon-equalizer:before { - content: "\e210"; -} -.glyphicon-king:before { - content: "\e211"; -} -.glyphicon-queen:before { - content: "\e212"; -} -.glyphicon-pawn:before { - content: "\e213"; -} -.glyphicon-bishop:before { - content: "\e214"; -} -.glyphicon-knight:before { - content: "\e215"; -} -.glyphicon-baby-formula:before { - content: "\e216"; -} -.glyphicon-tent:before { - content: "\26fa"; -} -.glyphicon-blackboard:before { - content: "\e218"; -} -.glyphicon-bed:before { - content: "\e219"; -} -.glyphicon-apple:before { - content: "\f8ff"; -} -.glyphicon-erase:before { - content: "\e221"; -} -.glyphicon-hourglass:before { - content: "\231b"; -} -.glyphicon-lamp:before { - content: "\e223"; -} -.glyphicon-duplicate:before { - content: "\e224"; -} -.glyphicon-piggy-bank:before { - content: "\e225"; -} -.glyphicon-scissors:before { - content: "\e226"; -} -.glyphicon-bitcoin:before { - content: "\e227"; -} -.glyphicon-btc:before { - content: "\e227"; -} -.glyphicon-xbt:before { - content: "\e227"; -} -.glyphicon-yen:before { - content: "\00a5"; -} -.glyphicon-jpy:before { - content: "\00a5"; -} -.glyphicon-ruble:before { - content: "\20bd"; -} -.glyphicon-rub:before { - content: "\20bd"; -} -.glyphicon-scale:before { - content: "\e230"; -} -.glyphicon-ice-lolly:before { - content: "\e231"; -} -.glyphicon-ice-lolly-tasted:before { - content: "\e232"; -} -.glyphicon-education:before { - content: "\e233"; -} -.glyphicon-option-horizontal:before { - content: "\e234"; -} -.glyphicon-option-vertical:before { - content: "\e235"; -} -.glyphicon-menu-hamburger:before { - content: "\e236"; -} -.glyphicon-modal-window:before { - content: "\e237"; -} -.glyphicon-oil:before { - content: "\e238"; -} -.glyphicon-grain:before { - content: "\e239"; -} -.glyphicon-sunglasses:before { - content: "\e240"; -} -.glyphicon-text-size:before { - content: "\e241"; -} -.glyphicon-text-color:before { - content: "\e242"; -} -.glyphicon-text-background:before { - content: "\e243"; -} -.glyphicon-object-align-top:before { - content: "\e244"; -} -.glyphicon-object-align-bottom:before { - content: "\e245"; -} -.glyphicon-object-align-horizontal:before { - content: "\e246"; -} -.glyphicon-object-align-left:before { - content: "\e247"; -} -.glyphicon-object-align-vertical:before { - content: "\e248"; -} -.glyphicon-object-align-right:before { - content: "\e249"; -} -.glyphicon-triangle-right:before { - content: "\e250"; -} -.glyphicon-triangle-left:before { - content: "\e251"; -} -.glyphicon-triangle-bottom:before { - content: "\e252"; -} -.glyphicon-triangle-top:before { - content: "\e253"; -} -.glyphicon-console:before { - content: "\e254"; -} -.glyphicon-superscript:before { - content: "\e255"; -} -.glyphicon-subscript:before { - content: "\e256"; -} -.glyphicon-menu-left:before { - content: "\e257"; -} -.glyphicon-menu-right:before { - content: "\e258"; -} -.glyphicon-menu-down:before { - content: "\e259"; -} -.glyphicon-menu-up:before { - content: "\e260"; -} -* { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} -*:before, -*:after { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} -html { - font-size: 10px; - -webkit-tap-highlight-color: rgba(0, 0, 0, 0); -} -body { - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 14px; - line-height: 1.42857143; - color: #c8c8c8; - background-color: #272b30; -} -input, -button, -select, -textarea { - font-family: inherit; - font-size: inherit; - line-height: inherit; -} -a { - color: #ffffff; - text-decoration: none; -} -a:hover, -a:focus { - color: #ffffff; - text-decoration: underline; -} -a:focus { - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} -figure { - margin: 0; -} -img { - vertical-align: middle; -} -.img-responsive, -.thumbnail > img, -.thumbnail a > img, -.carousel-inner > .item > img, -.carousel-inner > .item > a > img { - display: block; - max-width: 100%; - height: auto; -} -.img-rounded { - border-radius: 6px; -} -.img-thumbnail { - padding: 4px; - line-height: 1.42857143; - background-color: #1c1e22; - border: 1px solid #0c0d0e; - border-radius: 4px; - -webkit-transition: all 0.2s ease-in-out; - -o-transition: all 0.2s ease-in-out; - transition: all 0.2s ease-in-out; - display: inline-block; - max-width: 100%; - height: auto; -} -.img-circle { - border-radius: 50%; -} -hr { - margin-top: 20px; - margin-bottom: 20px; - border: 0; - border-top: 1px solid #1c1e22; -} -.sr-only { - position: absolute; - width: 1px; - height: 1px; - margin: -1px; - padding: 0; - overflow: hidden; - clip: rect(0, 0, 0, 0); - border: 0; -} -.sr-only-focusable:active, -.sr-only-focusable:focus { - position: static; - width: auto; - height: auto; - margin: 0; - overflow: visible; - clip: auto; -} -[role="button"] { - cursor: pointer; -} -h1, -h2, -h3, -h4, -h5, -h6, -.h1, -.h2, -.h3, -.h4, -.h5, -.h6 { - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-weight: 500; - line-height: 1.1; - color: inherit; -} -h1 small, -h2 small, -h3 small, -h4 small, -h5 small, -h6 small, -.h1 small, -.h2 small, -.h3 small, -.h4 small, -.h5 small, -.h6 small, -h1 .small, -h2 .small, -h3 .small, -h4 .small, -h5 .small, -h6 .small, -.h1 .small, -.h2 .small, -.h3 .small, -.h4 .small, -.h5 .small, -.h6 .small { - font-weight: normal; - line-height: 1; - color: #7a8288; -} -h1, -.h1, -h2, -.h2, -h3, -.h3 { - margin-top: 20px; - margin-bottom: 10px; -} -h1 small, -.h1 small, -h2 small, -.h2 small, -h3 small, -.h3 small, -h1 .small, -.h1 .small, -h2 .small, -.h2 .small, -h3 .small, -.h3 .small { - font-size: 65%; -} -h4, -.h4, -h5, -.h5, -h6, -.h6 { - margin-top: 10px; - margin-bottom: 10px; -} -h4 small, -.h4 small, -h5 small, -.h5 small, -h6 small, -.h6 small, -h4 .small, -.h4 .small, -h5 .small, -.h5 .small, -h6 .small, -.h6 .small { - font-size: 75%; -} -h1, -.h1 { - font-size: 36px; -} -h2, -.h2 { - font-size: 30px; -} -h3, -.h3 { - font-size: 24px; -} -h4, -.h4 { - font-size: 18px; -} -h5, -.h5 { - font-size: 14px; -} -h6, -.h6 { - font-size: 12px; -} -p { - margin: 0 0 10px; -} -.lead { - margin-bottom: 20px; - font-size: 16px; - font-weight: 300; - line-height: 1.4; -} -@media (min-width: 768px) { - .lead { - font-size: 21px; - } -} -small, -.small { - font-size: 85%; -} -mark, -.mark { - background-color: #f89406; - padding: .2em; -} -.text-left { - text-align: left; -} -.text-right { - text-align: right; -} -.text-center { - text-align: center; -} -.text-justify { - text-align: justify; -} -.text-nowrap { - white-space: nowrap; -} -.text-lowercase { - text-transform: lowercase; -} -.text-uppercase { - text-transform: uppercase; -} -.text-capitalize { - text-transform: capitalize; -} -.text-muted { - color: #7a8288; -} -.text-primary { - color: #7a8288; -} -a.text-primary:hover, -a.text-primary:focus { - color: #62686d; -} -.text-success { - color: #ffffff; -} -a.text-success:hover, -a.text-success:focus { - color: #e6e6e6; -} -.text-info { - color: #ffffff; -} -a.text-info:hover, -a.text-info:focus { - color: #e6e6e6; -} -.text-warning { - color: #ffffff; -} -a.text-warning:hover, -a.text-warning:focus { - color: #e6e6e6; -} -.text-danger { - color: #ffffff; -} -a.text-danger:hover, -a.text-danger:focus { - color: #e6e6e6; -} -.bg-primary { - color: #fff; - background-color: #7a8288; -} -a.bg-primary:hover, -a.bg-primary:focus { - background-color: #62686d; -} -.bg-success { - background-color: #62c462; -} -a.bg-success:hover, -a.bg-success:focus { - background-color: #42b142; -} -.bg-info { - background-color: #5bc0de; -} -a.bg-info:hover, -a.bg-info:focus { - background-color: #31b0d5; -} -.bg-warning { - background-color: #f89406; -} -a.bg-warning:hover, -a.bg-warning:focus { - background-color: #c67605; -} -.bg-danger { - background-color: #ee5f5b; -} -a.bg-danger:hover, -a.bg-danger:focus { - background-color: #e9322d; -} -.page-header { - padding-bottom: 9px; - margin: 40px 0 20px; - border-bottom: 1px solid #1c1e22; -} -ul, -ol { - margin-top: 0; - margin-bottom: 10px; -} -ul ul, -ol ul, -ul ol, -ol ol { - margin-bottom: 0; -} -.list-unstyled { - padding-left: 0; - list-style: none; -} -.list-inline { - padding-left: 0; - list-style: none; - margin-left: -5px; -} -.list-inline > li { - display: inline-block; - padding-left: 5px; - padding-right: 5px; -} -dl { - margin-top: 0; - margin-bottom: 20px; -} -dt, -dd { - line-height: 1.42857143; -} -dt { - font-weight: bold; -} -dd { - margin-left: 0; -} -@media (min-width: 768px) { - .dl-horizontal dt { - float: left; - width: 160px; - clear: left; - text-align: right; - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; - } - .dl-horizontal dd { - margin-left: 180px; - } -} -abbr[title], -abbr[data-original-title] { - cursor: help; - border-bottom: 1px dotted #7a8288; -} -.initialism { - font-size: 90%; - text-transform: uppercase; -} -blockquote { - padding: 10px 20px; - margin: 0 0 20px; - font-size: 17.5px; - border-left: 5px solid #7a8288; -} -blockquote p:last-child, -blockquote ul:last-child, -blockquote ol:last-child { - margin-bottom: 0; -} -blockquote footer, -blockquote small, -blockquote .small { - display: block; - font-size: 80%; - line-height: 1.42857143; - color: #7a8288; -} -blockquote footer:before, -blockquote small:before, -blockquote .small:before { - content: '\2014 \00A0'; -} -.blockquote-reverse, -blockquote.pull-right { - padding-right: 15px; - padding-left: 0; - border-right: 5px solid #7a8288; - border-left: 0; - text-align: right; -} -.blockquote-reverse footer:before, -blockquote.pull-right footer:before, -.blockquote-reverse small:before, -blockquote.pull-right small:before, -.blockquote-reverse .small:before, -blockquote.pull-right .small:before { - content: ''; -} -.blockquote-reverse footer:after, -blockquote.pull-right footer:after, -.blockquote-reverse small:after, -blockquote.pull-right small:after, -.blockquote-reverse .small:after, -blockquote.pull-right .small:after { - content: '\00A0 \2014'; -} -address { - margin-bottom: 20px; - font-style: normal; - line-height: 1.42857143; -} -code, -kbd, -pre, -samp { - font-family: Menlo, Monaco, Consolas, "Courier New", monospace; -} -code { - padding: 2px 4px; - font-size: 90%; - color: #c7254e; - background-color: #f9f2f4; - border-radius: 4px; -} -kbd { - padding: 2px 4px; - font-size: 90%; - color: #ffffff; - background-color: #333333; - border-radius: 3px; - -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25); - box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25); -} -kbd kbd { - padding: 0; - font-size: 100%; - font-weight: bold; - -webkit-box-shadow: none; - box-shadow: none; -} -pre { - display: block; - padding: 9.5px; - margin: 0 0 10px; - font-size: 13px; - line-height: 1.42857143; - word-break: break-all; - word-wrap: break-word; - color: #3a3f44; - background-color: #f5f5f5; - border: 1px solid #cccccc; - border-radius: 4px; -} -pre code { - padding: 0; - font-size: inherit; - color: inherit; - white-space: pre-wrap; - background-color: transparent; - border-radius: 0; -} -.pre-scrollable { - max-height: 340px; - overflow-y: scroll; -} -.container { - margin-right: auto; - margin-left: auto; - padding-left: 15px; - padding-right: 15px; -} -@media (min-width: 768px) { - .container { - width: 750px; - } -} -@media (min-width: 992px) { - .container { - width: 970px; - } -} -@media (min-width: 1200px) { - .container { - width: 1170px; - } -} -.container-fluid { - margin-right: auto; - margin-left: auto; - padding-left: 15px; - padding-right: 15px; -} -.row { - margin-left: -15px; - margin-right: -15px; -} -.col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-lg-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11, .col-xs-12, .col-sm-12, .col-md-12, .col-lg-12 { - position: relative; - min-height: 1px; - padding-left: 15px; - padding-right: 15px; -} -.col-xs-1, .col-xs-2, .col-xs-3, .col-xs-4, .col-xs-5, .col-xs-6, .col-xs-7, .col-xs-8, .col-xs-9, .col-xs-10, .col-xs-11, .col-xs-12 { - float: left; -} -.col-xs-12 { - width: 100%; -} -.col-xs-11 { - width: 91.66666667%; -} -.col-xs-10 { - width: 83.33333333%; -} -.col-xs-9 { - width: 75%; -} -.col-xs-8 { - width: 66.66666667%; -} -.col-xs-7 { - width: 58.33333333%; -} -.col-xs-6 { - width: 50%; -} -.col-xs-5 { - width: 41.66666667%; -} -.col-xs-4 { - width: 33.33333333%; -} -.col-xs-3 { - width: 25%; -} -.col-xs-2 { - width: 16.66666667%; -} -.col-xs-1 { - width: 8.33333333%; -} -.col-xs-pull-12 { - right: 100%; -} -.col-xs-pull-11 { - right: 91.66666667%; -} -.col-xs-pull-10 { - right: 83.33333333%; -} -.col-xs-pull-9 { - right: 75%; -} -.col-xs-pull-8 { - right: 66.66666667%; -} -.col-xs-pull-7 { - right: 58.33333333%; -} -.col-xs-pull-6 { - right: 50%; -} -.col-xs-pull-5 { - right: 41.66666667%; -} -.col-xs-pull-4 { - right: 33.33333333%; -} -.col-xs-pull-3 { - right: 25%; -} -.col-xs-pull-2 { - right: 16.66666667%; -} -.col-xs-pull-1 { - right: 8.33333333%; -} -.col-xs-pull-0 { - right: auto; -} -.col-xs-push-12 { - left: 100%; -} -.col-xs-push-11 { - left: 91.66666667%; -} -.col-xs-push-10 { - left: 83.33333333%; -} -.col-xs-push-9 { - left: 75%; -} -.col-xs-push-8 { - left: 66.66666667%; -} -.col-xs-push-7 { - left: 58.33333333%; -} -.col-xs-push-6 { - left: 50%; -} -.col-xs-push-5 { - left: 41.66666667%; -} -.col-xs-push-4 { - left: 33.33333333%; -} -.col-xs-push-3 { - left: 25%; -} -.col-xs-push-2 { - left: 16.66666667%; -} -.col-xs-push-1 { - left: 8.33333333%; -} -.col-xs-push-0 { - left: auto; -} -.col-xs-offset-12 { - margin-left: 100%; -} -.col-xs-offset-11 { - margin-left: 91.66666667%; -} -.col-xs-offset-10 { - margin-left: 83.33333333%; -} -.col-xs-offset-9 { - margin-left: 75%; -} -.col-xs-offset-8 { - margin-left: 66.66666667%; -} -.col-xs-offset-7 { - margin-left: 58.33333333%; -} -.col-xs-offset-6 { - margin-left: 50%; -} -.col-xs-offset-5 { - margin-left: 41.66666667%; -} -.col-xs-offset-4 { - margin-left: 33.33333333%; -} -.col-xs-offset-3 { - margin-left: 25%; -} -.col-xs-offset-2 { - margin-left: 16.66666667%; -} -.col-xs-offset-1 { - margin-left: 8.33333333%; -} -.col-xs-offset-0 { - margin-left: 0%; -} -@media (min-width: 768px) { - .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12 { - float: left; - } - .col-sm-12 { - width: 100%; - } - .col-sm-11 { - width: 91.66666667%; - } - .col-sm-10 { - width: 83.33333333%; - } - .col-sm-9 { - width: 75%; - } - .col-sm-8 { - width: 66.66666667%; - } - .col-sm-7 { - width: 58.33333333%; - } - .col-sm-6 { - width: 50%; - } - .col-sm-5 { - width: 41.66666667%; - } - .col-sm-4 { - width: 33.33333333%; - } - .col-sm-3 { - width: 25%; - } - .col-sm-2 { - width: 16.66666667%; - } - .col-sm-1 { - width: 8.33333333%; - } - .col-sm-pull-12 { - right: 100%; - } - .col-sm-pull-11 { - right: 91.66666667%; - } - .col-sm-pull-10 { - right: 83.33333333%; - } - .col-sm-pull-9 { - right: 75%; - } - .col-sm-pull-8 { - right: 66.66666667%; - } - .col-sm-pull-7 { - right: 58.33333333%; - } - .col-sm-pull-6 { - right: 50%; - } - .col-sm-pull-5 { - right: 41.66666667%; - } - .col-sm-pull-4 { - right: 33.33333333%; - } - .col-sm-pull-3 { - right: 25%; - } - .col-sm-pull-2 { - right: 16.66666667%; - } - .col-sm-pull-1 { - right: 8.33333333%; - } - .col-sm-pull-0 { - right: auto; - } - .col-sm-push-12 { - left: 100%; - } - .col-sm-push-11 { - left: 91.66666667%; - } - .col-sm-push-10 { - left: 83.33333333%; - } - .col-sm-push-9 { - left: 75%; - } - .col-sm-push-8 { - left: 66.66666667%; - } - .col-sm-push-7 { - left: 58.33333333%; - } - .col-sm-push-6 { - left: 50%; - } - .col-sm-push-5 { - left: 41.66666667%; - } - .col-sm-push-4 { - left: 33.33333333%; - } - .col-sm-push-3 { - left: 25%; - } - .col-sm-push-2 { - left: 16.66666667%; - } - .col-sm-push-1 { - left: 8.33333333%; - } - .col-sm-push-0 { - left: auto; - } - .col-sm-offset-12 { - margin-left: 100%; - } - .col-sm-offset-11 { - margin-left: 91.66666667%; - } - .col-sm-offset-10 { - margin-left: 83.33333333%; - } - .col-sm-offset-9 { - margin-left: 75%; - } - .col-sm-offset-8 { - margin-left: 66.66666667%; - } - .col-sm-offset-7 { - margin-left: 58.33333333%; - } - .col-sm-offset-6 { - margin-left: 50%; - } - .col-sm-offset-5 { - margin-left: 41.66666667%; - } - .col-sm-offset-4 { - margin-left: 33.33333333%; - } - .col-sm-offset-3 { - margin-left: 25%; - } - .col-sm-offset-2 { - margin-left: 16.66666667%; - } - .col-sm-offset-1 { - margin-left: 8.33333333%; - } - .col-sm-offset-0 { - margin-left: 0%; - } -} -@media (min-width: 992px) { - .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12 { - float: left; - } - .col-md-12 { - width: 100%; - } - .col-md-11 { - width: 91.66666667%; - } - .col-md-10 { - width: 83.33333333%; - } - .col-md-9 { - width: 75%; - } - .col-md-8 { - width: 66.66666667%; - } - .col-md-7 { - width: 58.33333333%; - } - .col-md-6 { - width: 50%; - } - .col-md-5 { - width: 41.66666667%; - } - .col-md-4 { - width: 33.33333333%; - } - .col-md-3 { - width: 25%; - } - .col-md-2 { - width: 16.66666667%; - } - .col-md-1 { - width: 8.33333333%; - } - .col-md-pull-12 { - right: 100%; - } - .col-md-pull-11 { - right: 91.66666667%; - } - .col-md-pull-10 { - right: 83.33333333%; - } - .col-md-pull-9 { - right: 75%; - } - .col-md-pull-8 { - right: 66.66666667%; - } - .col-md-pull-7 { - right: 58.33333333%; - } - .col-md-pull-6 { - right: 50%; - } - .col-md-pull-5 { - right: 41.66666667%; - } - .col-md-pull-4 { - right: 33.33333333%; - } - .col-md-pull-3 { - right: 25%; - } - .col-md-pull-2 { - right: 16.66666667%; - } - .col-md-pull-1 { - right: 8.33333333%; - } - .col-md-pull-0 { - right: auto; - } - .col-md-push-12 { - left: 100%; - } - .col-md-push-11 { - left: 91.66666667%; - } - .col-md-push-10 { - left: 83.33333333%; - } - .col-md-push-9 { - left: 75%; - } - .col-md-push-8 { - left: 66.66666667%; - } - .col-md-push-7 { - left: 58.33333333%; - } - .col-md-push-6 { - left: 50%; - } - .col-md-push-5 { - left: 41.66666667%; - } - .col-md-push-4 { - left: 33.33333333%; - } - .col-md-push-3 { - left: 25%; - } - .col-md-push-2 { - left: 16.66666667%; - } - .col-md-push-1 { - left: 8.33333333%; - } - .col-md-push-0 { - left: auto; - } - .col-md-offset-12 { - margin-left: 100%; - } - .col-md-offset-11 { - margin-left: 91.66666667%; - } - .col-md-offset-10 { - margin-left: 83.33333333%; - } - .col-md-offset-9 { - margin-left: 75%; - } - .col-md-offset-8 { - margin-left: 66.66666667%; - } - .col-md-offset-7 { - margin-left: 58.33333333%; - } - .col-md-offset-6 { - margin-left: 50%; - } - .col-md-offset-5 { - margin-left: 41.66666667%; - } - .col-md-offset-4 { - margin-left: 33.33333333%; - } - .col-md-offset-3 { - margin-left: 25%; - } - .col-md-offset-2 { - margin-left: 16.66666667%; - } - .col-md-offset-1 { - margin-left: 8.33333333%; - } - .col-md-offset-0 { - margin-left: 0%; - } -} -@media (min-width: 1200px) { - .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12 { - float: left; - } - .col-lg-12 { - width: 100%; - } - .col-lg-11 { - width: 91.66666667%; - } - .col-lg-10 { - width: 83.33333333%; - } - .col-lg-9 { - width: 75%; - } - .col-lg-8 { - width: 66.66666667%; - } - .col-lg-7 { - width: 58.33333333%; - } - .col-lg-6 { - width: 50%; - } - .col-lg-5 { - width: 41.66666667%; - } - .col-lg-4 { - width: 33.33333333%; - } - .col-lg-3 { - width: 25%; - } - .col-lg-2 { - width: 16.66666667%; - } - .col-lg-1 { - width: 8.33333333%; - } - .col-lg-pull-12 { - right: 100%; - } - .col-lg-pull-11 { - right: 91.66666667%; - } - .col-lg-pull-10 { - right: 83.33333333%; - } - .col-lg-pull-9 { - right: 75%; - } - .col-lg-pull-8 { - right: 66.66666667%; - } - .col-lg-pull-7 { - right: 58.33333333%; - } - .col-lg-pull-6 { - right: 50%; - } - .col-lg-pull-5 { - right: 41.66666667%; - } - .col-lg-pull-4 { - right: 33.33333333%; - } - .col-lg-pull-3 { - right: 25%; - } - .col-lg-pull-2 { - right: 16.66666667%; - } - .col-lg-pull-1 { - right: 8.33333333%; - } - .col-lg-pull-0 { - right: auto; - } - .col-lg-push-12 { - left: 100%; - } - .col-lg-push-11 { - left: 91.66666667%; - } - .col-lg-push-10 { - left: 83.33333333%; - } - .col-lg-push-9 { - left: 75%; - } - .col-lg-push-8 { - left: 66.66666667%; - } - .col-lg-push-7 { - left: 58.33333333%; - } - .col-lg-push-6 { - left: 50%; - } - .col-lg-push-5 { - left: 41.66666667%; - } - .col-lg-push-4 { - left: 33.33333333%; - } - .col-lg-push-3 { - left: 25%; - } - .col-lg-push-2 { - left: 16.66666667%; - } - .col-lg-push-1 { - left: 8.33333333%; - } - .col-lg-push-0 { - left: auto; - } - .col-lg-offset-12 { - margin-left: 100%; - } - .col-lg-offset-11 { - margin-left: 91.66666667%; - } - .col-lg-offset-10 { - margin-left: 83.33333333%; - } - .col-lg-offset-9 { - margin-left: 75%; - } - .col-lg-offset-8 { - margin-left: 66.66666667%; - } - .col-lg-offset-7 { - margin-left: 58.33333333%; - } - .col-lg-offset-6 { - margin-left: 50%; - } - .col-lg-offset-5 { - margin-left: 41.66666667%; - } - .col-lg-offset-4 { - margin-left: 33.33333333%; - } - .col-lg-offset-3 { - margin-left: 25%; - } - .col-lg-offset-2 { - margin-left: 16.66666667%; - } - .col-lg-offset-1 { - margin-left: 8.33333333%; - } - .col-lg-offset-0 { - margin-left: 0%; - } -} -table { - background-color: #2e3338; -} -caption { - padding-top: 8px; - padding-bottom: 8px; - color: #7a8288; - text-align: left; -} -th { - text-align: left; -} -.table { - width: 100%; - max-width: 100%; - margin-bottom: 20px; -} -.table > thead > tr > th, -.table > tbody > tr > th, -.table > tfoot > tr > th, -.table > thead > tr > td, -.table > tbody > tr > td, -.table > tfoot > tr > td { - padding: 8px; - line-height: 1.42857143; - vertical-align: top; - border-top: 1px solid #1c1e22; -} -.table > thead > tr > th { - vertical-align: bottom; - border-bottom: 2px solid #1c1e22; -} -.table > caption + thead > tr:first-child > th, -.table > colgroup + thead > tr:first-child > th, -.table > thead:first-child > tr:first-child > th, -.table > caption + thead > tr:first-child > td, -.table > colgroup + thead > tr:first-child > td, -.table > thead:first-child > tr:first-child > td { - border-top: 0; -} -.table > tbody + tbody { - border-top: 2px solid #1c1e22; -} -.table .table { - background-color: #272b30; -} -.table-condensed > thead > tr > th, -.table-condensed > tbody > tr > th, -.table-condensed > tfoot > tr > th, -.table-condensed > thead > tr > td, -.table-condensed > tbody > tr > td, -.table-condensed > tfoot > tr > td { - padding: 5px; -} -.table-bordered { - border: 1px solid #1c1e22; -} -.table-bordered > thead > tr > th, -.table-bordered > tbody > tr > th, -.table-bordered > tfoot > tr > th, -.table-bordered > thead > tr > td, -.table-bordered > tbody > tr > td, -.table-bordered > tfoot > tr > td { - border: 1px solid #1c1e22; -} -.table-bordered > thead > tr > th, -.table-bordered > thead > tr > td { - border-bottom-width: 2px; -} -.table-striped > tbody > tr:nth-of-type(odd) { - background-color: #353a41; -} -.table-hover > tbody > tr:hover { - background-color: #49515a; -} -table col[class*="col-"] { - position: static; - float: none; - display: table-column; -} -table td[class*="col-"], -table th[class*="col-"] { - position: static; - float: none; - display: table-cell; -} -.table > thead > tr > td.active, -.table > tbody > tr > td.active, -.table > tfoot > tr > td.active, -.table > thead > tr > th.active, -.table > tbody > tr > th.active, -.table > tfoot > tr > th.active, -.table > thead > tr.active > td, -.table > tbody > tr.active > td, -.table > tfoot > tr.active > td, -.table > thead > tr.active > th, -.table > tbody > tr.active > th, -.table > tfoot > tr.active > th { - background-color: #49515a; -} -.table-hover > tbody > tr > td.active:hover, -.table-hover > tbody > tr > th.active:hover, -.table-hover > tbody > tr.active:hover > td, -.table-hover > tbody > tr:hover > .active, -.table-hover > tbody > tr.active:hover > th { - background-color: #3e444c; -} -.table > thead > tr > td.success, -.table > tbody > tr > td.success, -.table > tfoot > tr > td.success, -.table > thead > tr > th.success, -.table > tbody > tr > th.success, -.table > tfoot > tr > th.success, -.table > thead > tr.success > td, -.table > tbody > tr.success > td, -.table > tfoot > tr.success > td, -.table > thead > tr.success > th, -.table > tbody > tr.success > th, -.table > tfoot > tr.success > th { - background-color: #62c462; -} -.table-hover > tbody > tr > td.success:hover, -.table-hover > tbody > tr > th.success:hover, -.table-hover > tbody > tr.success:hover > td, -.table-hover > tbody > tr:hover > .success, -.table-hover > tbody > tr.success:hover > th { - background-color: #4fbd4f; -} -.table > thead > tr > td.info, -.table > tbody > tr > td.info, -.table > tfoot > tr > td.info, -.table > thead > tr > th.info, -.table > tbody > tr > th.info, -.table > tfoot > tr > th.info, -.table > thead > tr.info > td, -.table > tbody > tr.info > td, -.table > tfoot > tr.info > td, -.table > thead > tr.info > th, -.table > tbody > tr.info > th, -.table > tfoot > tr.info > th { - background-color: #5bc0de; -} -.table-hover > tbody > tr > td.info:hover, -.table-hover > tbody > tr > th.info:hover, -.table-hover > tbody > tr.info:hover > td, -.table-hover > tbody > tr:hover > .info, -.table-hover > tbody > tr.info:hover > th { - background-color: #46b8da; -} -.table > thead > tr > td.warning, -.table > tbody > tr > td.warning, -.table > tfoot > tr > td.warning, -.table > thead > tr > th.warning, -.table > tbody > tr > th.warning, -.table > tfoot > tr > th.warning, -.table > thead > tr.warning > td, -.table > tbody > tr.warning > td, -.table > tfoot > tr.warning > td, -.table > thead > tr.warning > th, -.table > tbody > tr.warning > th, -.table > tfoot > tr.warning > th { - background-color: #f89406; -} -.table-hover > tbody > tr > td.warning:hover, -.table-hover > tbody > tr > th.warning:hover, -.table-hover > tbody > tr.warning:hover > td, -.table-hover > tbody > tr:hover > .warning, -.table-hover > tbody > tr.warning:hover > th { - background-color: #df8505; -} -.table > thead > tr > td.danger, -.table > tbody > tr > td.danger, -.table > tfoot > tr > td.danger, -.table > thead > tr > th.danger, -.table > tbody > tr > th.danger, -.table > tfoot > tr > th.danger, -.table > thead > tr.danger > td, -.table > tbody > tr.danger > td, -.table > tfoot > tr.danger > td, -.table > thead > tr.danger > th, -.table > tbody > tr.danger > th, -.table > tfoot > tr.danger > th { - background-color: #ee5f5b; -} -.table-hover > tbody > tr > td.danger:hover, -.table-hover > tbody > tr > th.danger:hover, -.table-hover > tbody > tr.danger:hover > td, -.table-hover > tbody > tr:hover > .danger, -.table-hover > tbody > tr.danger:hover > th { - background-color: #ec4844; -} -.table-responsive { - overflow-x: auto; - min-height: 0.01%; -} -@media screen and (max-width: 767px) { - .table-responsive { - width: 100%; - margin-bottom: 15px; - overflow-y: hidden; - -ms-overflow-style: -ms-autohiding-scrollbar; - border: 1px solid #1c1e22; - } - .table-responsive > .table { - margin-bottom: 0; - } - .table-responsive > .table > thead > tr > th, - .table-responsive > .table > tbody > tr > th, - .table-responsive > .table > tfoot > tr > th, - .table-responsive > .table > thead > tr > td, - .table-responsive > .table > tbody > tr > td, - .table-responsive > .table > tfoot > tr > td { - white-space: nowrap; - } - .table-responsive > .table-bordered { - border: 0; - } - .table-responsive > .table-bordered > thead > tr > th:first-child, - .table-responsive > .table-bordered > tbody > tr > th:first-child, - .table-responsive > .table-bordered > tfoot > tr > th:first-child, - .table-responsive > .table-bordered > thead > tr > td:first-child, - .table-responsive > .table-bordered > tbody > tr > td:first-child, - .table-responsive > .table-bordered > tfoot > tr > td:first-child { - border-left: 0; - } - .table-responsive > .table-bordered > thead > tr > th:last-child, - .table-responsive > .table-bordered > tbody > tr > th:last-child, - .table-responsive > .table-bordered > tfoot > tr > th:last-child, - .table-responsive > .table-bordered > thead > tr > td:last-child, - .table-responsive > .table-bordered > tbody > tr > td:last-child, - .table-responsive > .table-bordered > tfoot > tr > td:last-child { - border-right: 0; - } - .table-responsive > .table-bordered > tbody > tr:last-child > th, - .table-responsive > .table-bordered > tfoot > tr:last-child > th, - .table-responsive > .table-bordered > tbody > tr:last-child > td, - .table-responsive > .table-bordered > tfoot > tr:last-child > td { - border-bottom: 0; - } -} -fieldset { - padding: 0; - margin: 0; - border: 0; - min-width: 0; -} -legend { - display: block; - width: 100%; - padding: 0; - margin-bottom: 20px; - font-size: 21px; - line-height: inherit; - color: #c8c8c8; - border: 0; - border-bottom: 1px solid #1c1e22; -} -label { - display: inline-block; - max-width: 100%; - margin-bottom: 5px; - font-weight: bold; -} -input[type="search"] { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} -input[type="radio"], -input[type="checkbox"] { - margin: 4px 0 0; - margin-top: 1px \9; - line-height: normal; -} -input[type="file"] { - display: block; -} -input[type="range"] { - display: block; - width: 100%; -} -select[multiple], -select[size] { - height: auto; -} -input[type="file"]:focus, -input[type="radio"]:focus, -input[type="checkbox"]:focus { - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} -output { - display: block; - padding-top: 9px; - font-size: 14px; - line-height: 1.42857143; - color: #272b30; -} -.form-control { - display: block; - width: 100%; - height: 38px; - padding: 8px 12px; - font-size: 14px; - line-height: 1.42857143; - color: #272b30; - background-color: #ffffff; - background-image: none; - border: 1px solid #000000; - border-radius: 4px; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - -webkit-transition: border-color ease-in-out .15s, -webkit-box-shadow ease-in-out .15s; - -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s; - transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s; -} -.form-control:focus { - border-color: #66afe9; - outline: 0; - -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, 0.6); - box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, 0.6); -} -.form-control::-moz-placeholder { - color: #7a8288; - opacity: 1; -} -.form-control:-ms-input-placeholder { - color: #7a8288; -} -.form-control::-webkit-input-placeholder { - color: #7a8288; -} -.form-control::-ms-expand { - border: 0; - background-color: transparent; -} -.form-control[disabled], -.form-control[readonly], -fieldset[disabled] .form-control { - background-color: #999999; - opacity: 1; -} -.form-control[disabled], -fieldset[disabled] .form-control { - cursor: not-allowed; -} -textarea.form-control { - height: auto; -} -input[type="search"] { - -webkit-appearance: none; -} -@media screen and (-webkit-min-device-pixel-ratio: 0) { - input[type="date"].form-control, - input[type="time"].form-control, - input[type="datetime-local"].form-control, - input[type="month"].form-control { - line-height: 38px; - } - input[type="date"].input-sm, - input[type="time"].input-sm, - input[type="datetime-local"].input-sm, - input[type="month"].input-sm, - .input-group-sm input[type="date"], - .input-group-sm input[type="time"], - .input-group-sm input[type="datetime-local"], - .input-group-sm input[type="month"] { - line-height: 30px; - } - input[type="date"].input-lg, - input[type="time"].input-lg, - input[type="datetime-local"].input-lg, - input[type="month"].input-lg, - .input-group-lg input[type="date"], - .input-group-lg input[type="time"], - .input-group-lg input[type="datetime-local"], - .input-group-lg input[type="month"] { - line-height: 54px; - } -} -.form-group { - margin-bottom: 15px; -} -.radio, -.checkbox { - position: relative; - display: block; - margin-top: 10px; - margin-bottom: 10px; -} -.radio label, -.checkbox label { - min-height: 20px; - padding-left: 20px; - margin-bottom: 0; - font-weight: normal; - cursor: pointer; -} -.radio input[type="radio"], -.radio-inline input[type="radio"], -.checkbox input[type="checkbox"], -.checkbox-inline input[type="checkbox"] { - position: absolute; - margin-left: -20px; - margin-top: 4px \9; -} -.radio + .radio, -.checkbox + .checkbox { - margin-top: -5px; -} -.radio-inline, -.checkbox-inline { - position: relative; - display: inline-block; - padding-left: 20px; - margin-bottom: 0; - vertical-align: middle; - font-weight: normal; - cursor: pointer; -} -.radio-inline + .radio-inline, -.checkbox-inline + .checkbox-inline { - margin-top: 0; - margin-left: 10px; -} -input[type="radio"][disabled], -input[type="checkbox"][disabled], -input[type="radio"].disabled, -input[type="checkbox"].disabled, -fieldset[disabled] input[type="radio"], -fieldset[disabled] input[type="checkbox"] { - cursor: not-allowed; -} -.radio-inline.disabled, -.checkbox-inline.disabled, -fieldset[disabled] .radio-inline, -fieldset[disabled] .checkbox-inline { - cursor: not-allowed; -} -.radio.disabled label, -.checkbox.disabled label, -fieldset[disabled] .radio label, -fieldset[disabled] .checkbox label { - cursor: not-allowed; -} -.form-control-static { - padding-top: 9px; - padding-bottom: 9px; - margin-bottom: 0; - min-height: 34px; -} -.form-control-static.input-lg, -.form-control-static.input-sm { - padding-left: 0; - padding-right: 0; -} -.input-sm { - height: 30px; - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} -select.input-sm { - height: 30px; - line-height: 30px; -} -textarea.input-sm, -select[multiple].input-sm { - height: auto; -} -.form-group-sm .form-control { - height: 30px; - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} -.form-group-sm select.form-control { - height: 30px; - line-height: 30px; -} -.form-group-sm textarea.form-control, -.form-group-sm select[multiple].form-control { - height: auto; -} -.form-group-sm .form-control-static { - height: 30px; - min-height: 32px; - padding: 6px 10px; - font-size: 12px; - line-height: 1.5; -} -.input-lg { - height: 54px; - padding: 14px 16px; - font-size: 18px; - line-height: 1.3333333; - border-radius: 6px; -} -select.input-lg { - height: 54px; - line-height: 54px; -} -textarea.input-lg, -select[multiple].input-lg { - height: auto; -} -.form-group-lg .form-control { - height: 54px; - padding: 14px 16px; - font-size: 18px; - line-height: 1.3333333; - border-radius: 6px; -} -.form-group-lg select.form-control { - height: 54px; - line-height: 54px; -} -.form-group-lg textarea.form-control, -.form-group-lg select[multiple].form-control { - height: auto; -} -.form-group-lg .form-control-static { - height: 54px; - min-height: 38px; - padding: 15px 16px; - font-size: 18px; - line-height: 1.3333333; -} -.has-feedback { - position: relative; -} -.has-feedback .form-control { - padding-right: 47.5px; -} -.form-control-feedback { - position: absolute; - top: 0; - right: 0; - z-index: 2; - display: block; - width: 38px; - height: 38px; - line-height: 38px; - text-align: center; - pointer-events: none; -} -.input-lg + .form-control-feedback, -.input-group-lg + .form-control-feedback, -.form-group-lg .form-control + .form-control-feedback { - width: 54px; - height: 54px; - line-height: 54px; -} -.input-sm + .form-control-feedback, -.input-group-sm + .form-control-feedback, -.form-group-sm .form-control + .form-control-feedback { - width: 30px; - height: 30px; - line-height: 30px; -} -.has-success .help-block, -.has-success .control-label, -.has-success .radio, -.has-success .checkbox, -.has-success .radio-inline, -.has-success .checkbox-inline, -.has-success.radio label, -.has-success.checkbox label, -.has-success.radio-inline label, -.has-success.checkbox-inline label { - color: #ffffff; -} -.has-success .form-control { - border-color: #ffffff; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} -.has-success .form-control:focus { - border-color: #e6e6e6; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ffffff; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ffffff; -} -.has-success .input-group-addon { - color: #ffffff; - border-color: #ffffff; - background-color: #62c462; -} -.has-success .form-control-feedback { - color: #ffffff; -} -.has-warning .help-block, -.has-warning .control-label, -.has-warning .radio, -.has-warning .checkbox, -.has-warning .radio-inline, -.has-warning .checkbox-inline, -.has-warning.radio label, -.has-warning.checkbox label, -.has-warning.radio-inline label, -.has-warning.checkbox-inline label { - color: #ffffff; -} -.has-warning .form-control { - border-color: #ffffff; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} -.has-warning .form-control:focus { - border-color: #e6e6e6; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ffffff; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ffffff; -} -.has-warning .input-group-addon { - color: #ffffff; - border-color: #ffffff; - background-color: #f89406; -} -.has-warning .form-control-feedback { - color: #ffffff; -} -.has-error .help-block, -.has-error .control-label, -.has-error .radio, -.has-error .checkbox, -.has-error .radio-inline, -.has-error .checkbox-inline, -.has-error.radio label, -.has-error.checkbox label, -.has-error.radio-inline label, -.has-error.checkbox-inline label { - color: #ffffff; -} -.has-error .form-control { - border-color: #ffffff; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} -.has-error .form-control:focus { - border-color: #e6e6e6; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ffffff; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ffffff; -} -.has-error .input-group-addon { - color: #ffffff; - border-color: #ffffff; - background-color: #ee5f5b; -} -.has-error .form-control-feedback { - color: #ffffff; -} -.has-feedback label ~ .form-control-feedback { - top: 25px; -} -.has-feedback label.sr-only ~ .form-control-feedback { - top: 0; -} -.help-block { - display: block; - margin-top: 5px; - margin-bottom: 10px; - color: #ffffff; -} -@media (min-width: 768px) { - .form-inline .form-group { - display: inline-block; - margin-bottom: 0; - vertical-align: middle; - } - .form-inline .form-control { - display: inline-block; - width: auto; - vertical-align: middle; - } - .form-inline .form-control-static { - display: inline-block; - } - .form-inline .input-group { - display: inline-table; - vertical-align: middle; - } - .form-inline .input-group .input-group-addon, - .form-inline .input-group .input-group-btn, - .form-inline .input-group .form-control { - width: auto; - } - .form-inline .input-group > .form-control { - width: 100%; - } - .form-inline .control-label { - margin-bottom: 0; - vertical-align: middle; - } - .form-inline .radio, - .form-inline .checkbox { - display: inline-block; - margin-top: 0; - margin-bottom: 0; - vertical-align: middle; - } - .form-inline .radio label, - .form-inline .checkbox label { - padding-left: 0; - } - .form-inline .radio input[type="radio"], - .form-inline .checkbox input[type="checkbox"] { - position: relative; - margin-left: 0; - } - .form-inline .has-feedback .form-control-feedback { - top: 0; - } -} -.form-horizontal .radio, -.form-horizontal .checkbox, -.form-horizontal .radio-inline, -.form-horizontal .checkbox-inline { - margin-top: 0; - margin-bottom: 0; - padding-top: 9px; -} -.form-horizontal .radio, -.form-horizontal .checkbox { - min-height: 29px; -} -.form-horizontal .form-group { - margin-left: -15px; - margin-right: -15px; -} -@media (min-width: 768px) { - .form-horizontal .control-label { - text-align: right; - margin-bottom: 0; - padding-top: 9px; - } -} -.form-horizontal .has-feedback .form-control-feedback { - right: 15px; -} -@media (min-width: 768px) { - .form-horizontal .form-group-lg .control-label { - padding-top: 15px; - font-size: 18px; - } -} -@media (min-width: 768px) { - .form-horizontal .form-group-sm .control-label { - padding-top: 6px; - font-size: 12px; - } -} -.btn { - display: inline-block; - margin-bottom: 0; - font-weight: normal; - text-align: center; - vertical-align: middle; - -ms-touch-action: manipulation; - touch-action: manipulation; - cursor: pointer; - background-image: none; - border: 0px solid transparent; - white-space: nowrap; - padding: 8px 12px; - font-size: 14px; - line-height: 1.42857143; - border-radius: 4px; - -webkit-user-select: none; - -moz-user-select: none; - -ms-user-select: none; - user-select: none; -} -.btn:focus, -.btn:active:focus, -.btn.active:focus, -.btn.focus, -.btn:active.focus, -.btn.active.focus { - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} -.btn:hover, -.btn:focus, -.btn.focus { - color: #ffffff; - text-decoration: none; -} -.btn:active, -.btn.active { - outline: 0; - background-image: none; - -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); - box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); -} -.btn.disabled, -.btn[disabled], -fieldset[disabled] .btn { - cursor: not-allowed; - opacity: 0.65; - filter: alpha(opacity=65); - -webkit-box-shadow: none; - box-shadow: none; -} -a.btn.disabled, -fieldset[disabled] a.btn { - pointer-events: none; -} -.btn-default { - color: #ffffff; - background-color: #3a3f44; - border-color: #3a3f44; -} -.btn-default:focus, -.btn-default.focus { - color: #ffffff; - background-color: #232628; - border-color: #000000; -} -.btn-default:hover { - color: #ffffff; - background-color: #232628; - border-color: #1e2023; -} -.btn-default:active, -.btn-default.active, -.open > .dropdown-toggle.btn-default { - color: #ffffff; - background-color: #232628; - border-color: #1e2023; -} -.btn-default:active:hover, -.btn-default.active:hover, -.open > .dropdown-toggle.btn-default:hover, -.btn-default:active:focus, -.btn-default.active:focus, -.open > .dropdown-toggle.btn-default:focus, -.btn-default:active.focus, -.btn-default.active.focus, -.open > .dropdown-toggle.btn-default.focus { - color: #ffffff; - background-color: #121415; - border-color: #000000; -} -.btn-default:active, -.btn-default.active, -.open > .dropdown-toggle.btn-default { - background-image: none; -} -.btn-default.disabled:hover, -.btn-default[disabled]:hover, -fieldset[disabled] .btn-default:hover, -.btn-default.disabled:focus, -.btn-default[disabled]:focus, -fieldset[disabled] .btn-default:focus, -.btn-default.disabled.focus, -.btn-default[disabled].focus, -fieldset[disabled] .btn-default.focus { - background-color: #3a3f44; - border-color: #3a3f44; -} -.btn-default .badge { - color: #3a3f44; - background-color: #ffffff; -} -.btn-primary { - color: #ffffff; - background-color: #7a8288; - border-color: #7a8288; -} -.btn-primary:focus, -.btn-primary.focus { - color: #ffffff; - background-color: #62686d; - border-color: #3e4245; -} -.btn-primary:hover { - color: #ffffff; - background-color: #62686d; - border-color: #5d6368; -} -.btn-primary:active, -.btn-primary.active, -.open > .dropdown-toggle.btn-primary { - color: #ffffff; - background-color: #62686d; - border-color: #5d6368; -} -.btn-primary:active:hover, -.btn-primary.active:hover, -.open > .dropdown-toggle.btn-primary:hover, -.btn-primary:active:focus, -.btn-primary.active:focus, -.open > .dropdown-toggle.btn-primary:focus, -.btn-primary:active.focus, -.btn-primary.active.focus, -.open > .dropdown-toggle.btn-primary.focus { - color: #ffffff; - background-color: #51565a; - border-color: #3e4245; -} -.btn-primary:active, -.btn-primary.active, -.open > .dropdown-toggle.btn-primary { - background-image: none; -} -.btn-primary.disabled:hover, -.btn-primary[disabled]:hover, -fieldset[disabled] .btn-primary:hover, -.btn-primary.disabled:focus, -.btn-primary[disabled]:focus, -fieldset[disabled] .btn-primary:focus, -.btn-primary.disabled.focus, -.btn-primary[disabled].focus, -fieldset[disabled] .btn-primary.focus { - background-color: #7a8288; - border-color: #7a8288; -} -.btn-primary .badge { - color: #7a8288; - background-color: #ffffff; -} -.btn-success { - color: #ffffff; - background-color: #62c462; - border-color: #62c462; -} -.btn-success:focus, -.btn-success.focus { - color: #ffffff; - background-color: #42b142; - border-color: #2d792d; -} -.btn-success:hover { - color: #ffffff; - background-color: #42b142; - border-color: #40a940; -} -.btn-success:active, -.btn-success.active, -.open > .dropdown-toggle.btn-success { - color: #ffffff; - background-color: #42b142; - border-color: #40a940; -} -.btn-success:active:hover, -.btn-success.active:hover, -.open > .dropdown-toggle.btn-success:hover, -.btn-success:active:focus, -.btn-success.active:focus, -.open > .dropdown-toggle.btn-success:focus, -.btn-success:active.focus, -.btn-success.active.focus, -.open > .dropdown-toggle.btn-success.focus { - color: #ffffff; - background-color: #399739; - border-color: #2d792d; -} -.btn-success:active, -.btn-success.active, -.open > .dropdown-toggle.btn-success { - background-image: none; -} -.btn-success.disabled:hover, -.btn-success[disabled]:hover, -fieldset[disabled] .btn-success:hover, -.btn-success.disabled:focus, -.btn-success[disabled]:focus, -fieldset[disabled] .btn-success:focus, -.btn-success.disabled.focus, -.btn-success[disabled].focus, -fieldset[disabled] .btn-success.focus { - background-color: #62c462; - border-color: #62c462; -} -.btn-success .badge { - color: #62c462; - background-color: #ffffff; -} -.btn-info { - color: #ffffff; - background-color: #5bc0de; - border-color: #5bc0de; -} -.btn-info:focus, -.btn-info.focus { - color: #ffffff; - background-color: #31b0d5; - border-color: #1f7e9a; -} -.btn-info:hover { - color: #ffffff; - background-color: #31b0d5; - border-color: #2aabd2; -} -.btn-info:active, -.btn-info.active, -.open > .dropdown-toggle.btn-info { - color: #ffffff; - background-color: #31b0d5; - border-color: #2aabd2; -} -.btn-info:active:hover, -.btn-info.active:hover, -.open > .dropdown-toggle.btn-info:hover, -.btn-info:active:focus, -.btn-info.active:focus, -.open > .dropdown-toggle.btn-info:focus, -.btn-info:active.focus, -.btn-info.active.focus, -.open > .dropdown-toggle.btn-info.focus { - color: #ffffff; - background-color: #269abc; - border-color: #1f7e9a; -} -.btn-info:active, -.btn-info.active, -.open > .dropdown-toggle.btn-info { - background-image: none; -} -.btn-info.disabled:hover, -.btn-info[disabled]:hover, -fieldset[disabled] .btn-info:hover, -.btn-info.disabled:focus, -.btn-info[disabled]:focus, -fieldset[disabled] .btn-info:focus, -.btn-info.disabled.focus, -.btn-info[disabled].focus, -fieldset[disabled] .btn-info.focus { - background-color: #5bc0de; - border-color: #5bc0de; -} -.btn-info .badge { - color: #5bc0de; - background-color: #ffffff; -} -.btn-warning { - color: #ffffff; - background-color: #f89406; - border-color: #f89406; -} -.btn-warning:focus, -.btn-warning.focus { - color: #ffffff; - background-color: #c67605; - border-color: #7c4a03; -} -.btn-warning:hover { - color: #ffffff; - background-color: #c67605; - border-color: #bc7005; -} -.btn-warning:active, -.btn-warning.active, -.open > .dropdown-toggle.btn-warning { - color: #ffffff; - background-color: #c67605; - border-color: #bc7005; -} -.btn-warning:active:hover, -.btn-warning.active:hover, -.open > .dropdown-toggle.btn-warning:hover, -.btn-warning:active:focus, -.btn-warning.active:focus, -.open > .dropdown-toggle.btn-warning:focus, -.btn-warning:active.focus, -.btn-warning.active.focus, -.open > .dropdown-toggle.btn-warning.focus { - color: #ffffff; - background-color: #a36104; - border-color: #7c4a03; -} -.btn-warning:active, -.btn-warning.active, -.open > .dropdown-toggle.btn-warning { - background-image: none; -} -.btn-warning.disabled:hover, -.btn-warning[disabled]:hover, -fieldset[disabled] .btn-warning:hover, -.btn-warning.disabled:focus, -.btn-warning[disabled]:focus, -fieldset[disabled] .btn-warning:focus, -.btn-warning.disabled.focus, -.btn-warning[disabled].focus, -fieldset[disabled] .btn-warning.focus { - background-color: #f89406; - border-color: #f89406; -} -.btn-warning .badge { - color: #f89406; - background-color: #ffffff; -} -.btn-danger { - color: #ffffff; - background-color: #ee5f5b; - border-color: #ee5f5b; -} -.btn-danger:focus, -.btn-danger.focus { - color: #ffffff; - background-color: #e9322d; - border-color: #b71713; -} -.btn-danger:hover { - color: #ffffff; - background-color: #e9322d; - border-color: #e82924; -} -.btn-danger:active, -.btn-danger.active, -.open > .dropdown-toggle.btn-danger { - color: #ffffff; - background-color: #e9322d; - border-color: #e82924; -} -.btn-danger:active:hover, -.btn-danger.active:hover, -.open > .dropdown-toggle.btn-danger:hover, -.btn-danger:active:focus, -.btn-danger.active:focus, -.open > .dropdown-toggle.btn-danger:focus, -.btn-danger:active.focus, -.btn-danger.active.focus, -.open > .dropdown-toggle.btn-danger.focus { - color: #ffffff; - background-color: #dc1c17; - border-color: #b71713; -} -.btn-danger:active, -.btn-danger.active, -.open > .dropdown-toggle.btn-danger { - background-image: none; -} -.btn-danger.disabled:hover, -.btn-danger[disabled]:hover, -fieldset[disabled] .btn-danger:hover, -.btn-danger.disabled:focus, -.btn-danger[disabled]:focus, -fieldset[disabled] .btn-danger:focus, -.btn-danger.disabled.focus, -.btn-danger[disabled].focus, -fieldset[disabled] .btn-danger.focus { - background-color: #ee5f5b; - border-color: #ee5f5b; -} -.btn-danger .badge { - color: #ee5f5b; - background-color: #ffffff; -} -.btn-link { - color: #ffffff; - font-weight: normal; - border-radius: 0; -} -.btn-link, -.btn-link:active, -.btn-link.active, -.btn-link[disabled], -fieldset[disabled] .btn-link { - background-color: transparent; - -webkit-box-shadow: none; - box-shadow: none; -} -.btn-link, -.btn-link:hover, -.btn-link:focus, -.btn-link:active { - border-color: transparent; -} -.btn-link:hover, -.btn-link:focus { - color: #ffffff; - text-decoration: underline; - background-color: transparent; -} -.btn-link[disabled]:hover, -fieldset[disabled] .btn-link:hover, -.btn-link[disabled]:focus, -fieldset[disabled] .btn-link:focus { - color: #7a8288; - text-decoration: none; -} -.btn-lg, -.btn-group-lg > .btn { - padding: 14px 16px; - font-size: 18px; - line-height: 1.3333333; - border-radius: 6px; -} -.btn-sm, -.btn-group-sm > .btn { - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} -.btn-xs, -.btn-group-xs > .btn { - padding: 1px 5px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} -.btn-block { - display: block; - width: 100%; -} -.btn-block + .btn-block { - margin-top: 5px; -} -input[type="submit"].btn-block, -input[type="reset"].btn-block, -input[type="button"].btn-block { - width: 100%; -} -.fade { - opacity: 0; - -webkit-transition: opacity 0.15s linear; - -o-transition: opacity 0.15s linear; - transition: opacity 0.15s linear; -} -.fade.in { - opacity: 1; -} -.collapse { - display: none; -} -.collapse.in { - display: block; -} -tr.collapse.in { - display: table-row; -} -tbody.collapse.in { - display: table-row-group; -} -.collapsing { - position: relative; - height: 0; - overflow: hidden; - -webkit-transition-property: height, visibility; - -o-transition-property: height, visibility; - transition-property: height, visibility; - -webkit-transition-duration: 0.35s; - -o-transition-duration: 0.35s; - transition-duration: 0.35s; - -webkit-transition-timing-function: ease; - -o-transition-timing-function: ease; - transition-timing-function: ease; -} -.caret { - display: inline-block; - width: 0; - height: 0; - margin-left: 2px; - vertical-align: middle; - border-top: 4px dashed; - border-top: 4px solid \9; - border-right: 4px solid transparent; - border-left: 4px solid transparent; -} -.dropup, -.dropdown { - position: relative; -} -.dropdown-toggle:focus { - outline: 0; -} -.dropdown-menu { - position: absolute; - top: 100%; - left: 0; - z-index: 1000; - display: none; - float: left; - min-width: 160px; - padding: 5px 0; - margin: 2px 0 0; - list-style: none; - font-size: 14px; - text-align: left; - background-color: #3a3f44; - border: 1px solid #272b30; - border: 1px solid rgba(0, 0, 0, 0.15); - border-radius: 4px; - -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175); - box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175); - -webkit-background-clip: padding-box; - background-clip: padding-box; -} -.dropdown-menu.pull-right { - right: 0; - left: auto; -} -.dropdown-menu .divider { - height: 1px; - margin: 9px 0; - overflow: hidden; - background-color: #272b30; -} -.dropdown-menu > li > a { - display: block; - padding: 3px 20px; - clear: both; - font-weight: normal; - line-height: 1.42857143; - color: #c8c8c8; - white-space: nowrap; -} -.dropdown-menu > li > a:hover, -.dropdown-menu > li > a:focus { - text-decoration: none; - color: #ffffff; - background-color: #272b30; -} -.dropdown-menu > .active > a, -.dropdown-menu > .active > a:hover, -.dropdown-menu > .active > a:focus { - color: #ffffff; - text-decoration: none; - outline: 0; - background-color: #272b30; -} -.dropdown-menu > .disabled > a, -.dropdown-menu > .disabled > a:hover, -.dropdown-menu > .disabled > a:focus { - color: #7a8288; -} -.dropdown-menu > .disabled > a:hover, -.dropdown-menu > .disabled > a:focus { - text-decoration: none; - background-color: transparent; - background-image: none; - filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); - cursor: not-allowed; -} -.open > .dropdown-menu { - display: block; -} -.open > a { - outline: 0; -} -.dropdown-menu-right { - left: auto; - right: 0; -} -.dropdown-menu-left { - left: 0; - right: auto; -} -.dropdown-header { - display: block; - padding: 3px 20px; - font-size: 12px; - line-height: 1.42857143; - color: #7a8288; - white-space: nowrap; -} -.dropdown-backdrop { - position: fixed; - left: 0; - right: 0; - bottom: 0; - top: 0; - z-index: 990; -} -.pull-right > .dropdown-menu { - right: 0; - left: auto; -} -.dropup .caret, -.navbar-fixed-bottom .dropdown .caret { - border-top: 0; - border-bottom: 4px dashed; - border-bottom: 4px solid \9; - content: ""; -} -.dropup .dropdown-menu, -.navbar-fixed-bottom .dropdown .dropdown-menu { - top: auto; - bottom: 100%; - margin-bottom: 2px; -} -@media (min-width: 768px) { - .navbar-right .dropdown-menu { - left: auto; - right: 0; - } - .navbar-right .dropdown-menu-left { - left: 0; - right: auto; - } -} -.btn-group, -.btn-group-vertical { - position: relative; - display: inline-block; - vertical-align: middle; -} -.btn-group > .btn, -.btn-group-vertical > .btn { - position: relative; - float: left; -} -.btn-group > .btn:hover, -.btn-group-vertical > .btn:hover, -.btn-group > .btn:focus, -.btn-group-vertical > .btn:focus, -.btn-group > .btn:active, -.btn-group-vertical > .btn:active, -.btn-group > .btn.active, -.btn-group-vertical > .btn.active { - z-index: 2; -} -.btn-group .btn + .btn, -.btn-group .btn + .btn-group, -.btn-group .btn-group + .btn, -.btn-group .btn-group + .btn-group { - margin-left: -1px; -} -.btn-toolbar { - margin-left: -5px; -} -.btn-toolbar .btn, -.btn-toolbar .btn-group, -.btn-toolbar .input-group { - float: left; -} -.btn-toolbar > .btn, -.btn-toolbar > .btn-group, -.btn-toolbar > .input-group { - margin-left: 5px; -} -.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) { - border-radius: 0; -} -.btn-group > .btn:first-child { - margin-left: 0; -} -.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) { - border-bottom-right-radius: 0; - border-top-right-radius: 0; -} -.btn-group > .btn:last-child:not(:first-child), -.btn-group > .dropdown-toggle:not(:first-child) { - border-bottom-left-radius: 0; - border-top-left-radius: 0; -} -.btn-group > .btn-group { - float: left; -} -.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn { - border-radius: 0; -} -.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child, -.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle { - border-bottom-right-radius: 0; - border-top-right-radius: 0; -} -.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child { - border-bottom-left-radius: 0; - border-top-left-radius: 0; -} -.btn-group .dropdown-toggle:active, -.btn-group.open .dropdown-toggle { - outline: 0; -} -.btn-group > .btn + .dropdown-toggle { - padding-left: 8px; - padding-right: 8px; -} -.btn-group > .btn-lg + .dropdown-toggle { - padding-left: 12px; - padding-right: 12px; -} -.btn-group.open .dropdown-toggle { - -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); - box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); -} -.btn-group.open .dropdown-toggle.btn-link { - -webkit-box-shadow: none; - box-shadow: none; -} -.btn .caret { - margin-left: 0; -} -.btn-lg .caret { - border-width: 5px 5px 0; - border-bottom-width: 0; -} -.dropup .btn-lg .caret { - border-width: 0 5px 5px; -} -.btn-group-vertical > .btn, -.btn-group-vertical > .btn-group, -.btn-group-vertical > .btn-group > .btn { - display: block; - float: none; - width: 100%; - max-width: 100%; -} -.btn-group-vertical > .btn-group > .btn { - float: none; -} -.btn-group-vertical > .btn + .btn, -.btn-group-vertical > .btn + .btn-group, -.btn-group-vertical > .btn-group + .btn, -.btn-group-vertical > .btn-group + .btn-group { - margin-top: -1px; - margin-left: 0; -} -.btn-group-vertical > .btn:not(:first-child):not(:last-child) { - border-radius: 0; -} -.btn-group-vertical > .btn:first-child:not(:last-child) { - border-top-right-radius: 4px; - border-top-left-radius: 4px; - border-bottom-right-radius: 0; - border-bottom-left-radius: 0; -} -.btn-group-vertical > .btn:last-child:not(:first-child) { - border-top-right-radius: 0; - border-top-left-radius: 0; - border-bottom-right-radius: 4px; - border-bottom-left-radius: 4px; -} -.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn { - border-radius: 0; -} -.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child, -.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle { - border-bottom-right-radius: 0; - border-bottom-left-radius: 0; -} -.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child { - border-top-right-radius: 0; - border-top-left-radius: 0; -} -.btn-group-justified { - display: table; - width: 100%; - table-layout: fixed; - border-collapse: separate; -} -.btn-group-justified > .btn, -.btn-group-justified > .btn-group { - float: none; - display: table-cell; - width: 1%; -} -.btn-group-justified > .btn-group .btn { - width: 100%; -} -.btn-group-justified > .btn-group .dropdown-menu { - left: auto; -} -[data-toggle="buttons"] > .btn input[type="radio"], -[data-toggle="buttons"] > .btn-group > .btn input[type="radio"], -[data-toggle="buttons"] > .btn input[type="checkbox"], -[data-toggle="buttons"] > .btn-group > .btn input[type="checkbox"] { - position: absolute; - clip: rect(0, 0, 0, 0); - pointer-events: none; -} -.input-group { - position: relative; - display: table; - border-collapse: separate; -} -.input-group[class*="col-"] { - float: none; - padding-left: 0; - padding-right: 0; -} -.input-group .form-control { - position: relative; - z-index: 2; - float: left; - width: 100%; - margin-bottom: 0; -} -.input-group .form-control:focus { - z-index: 3; -} -.input-group-lg > .form-control, -.input-group-lg > .input-group-addon, -.input-group-lg > .input-group-btn > .btn { - height: 54px; - padding: 14px 16px; - font-size: 18px; - line-height: 1.3333333; - border-radius: 6px; -} -select.input-group-lg > .form-control, -select.input-group-lg > .input-group-addon, -select.input-group-lg > .input-group-btn > .btn { - height: 54px; - line-height: 54px; -} -textarea.input-group-lg > .form-control, -textarea.input-group-lg > .input-group-addon, -textarea.input-group-lg > .input-group-btn > .btn, -select[multiple].input-group-lg > .form-control, -select[multiple].input-group-lg > .input-group-addon, -select[multiple].input-group-lg > .input-group-btn > .btn { - height: auto; -} -.input-group-sm > .form-control, -.input-group-sm > .input-group-addon, -.input-group-sm > .input-group-btn > .btn { - height: 30px; - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} -select.input-group-sm > .form-control, -select.input-group-sm > .input-group-addon, -select.input-group-sm > .input-group-btn > .btn { - height: 30px; - line-height: 30px; -} -textarea.input-group-sm > .form-control, -textarea.input-group-sm > .input-group-addon, -textarea.input-group-sm > .input-group-btn > .btn, -select[multiple].input-group-sm > .form-control, -select[multiple].input-group-sm > .input-group-addon, -select[multiple].input-group-sm > .input-group-btn > .btn { - height: auto; -} -.input-group-addon, -.input-group-btn, -.input-group .form-control { - display: table-cell; -} -.input-group-addon:not(:first-child):not(:last-child), -.input-group-btn:not(:first-child):not(:last-child), -.input-group .form-control:not(:first-child):not(:last-child) { - border-radius: 0; -} -.input-group-addon, -.input-group-btn { - width: 1%; - white-space: nowrap; - vertical-align: middle; -} -.input-group-addon { - padding: 8px 12px; - font-size: 14px; - font-weight: normal; - line-height: 1; - color: #272b30; - text-align: center; - background-color: #3a3f44; - border: 1px solid rgba(0, 0, 0, 0.6); - border-radius: 4px; -} -.input-group-addon.input-sm { - padding: 5px 10px; - font-size: 12px; - border-radius: 3px; -} -.input-group-addon.input-lg { - padding: 14px 16px; - font-size: 18px; - border-radius: 6px; -} -.input-group-addon input[type="radio"], -.input-group-addon input[type="checkbox"] { - margin-top: 0; -} -.input-group .form-control:first-child, -.input-group-addon:first-child, -.input-group-btn:first-child > .btn, -.input-group-btn:first-child > .btn-group > .btn, -.input-group-btn:first-child > .dropdown-toggle, -.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle), -.input-group-btn:last-child > .btn-group:not(:last-child) > .btn { - border-bottom-right-radius: 0; - border-top-right-radius: 0; -} -.input-group-addon:first-child { - border-right: 0; -} -.input-group .form-control:last-child, -.input-group-addon:last-child, -.input-group-btn:last-child > .btn, -.input-group-btn:last-child > .btn-group > .btn, -.input-group-btn:last-child > .dropdown-toggle, -.input-group-btn:first-child > .btn:not(:first-child), -.input-group-btn:first-child > .btn-group:not(:first-child) > .btn { - border-bottom-left-radius: 0; - border-top-left-radius: 0; -} -.input-group-addon:last-child { - border-left: 0; -} -.input-group-btn { - position: relative; - font-size: 0; - white-space: nowrap; -} -.input-group-btn > .btn { - position: relative; -} -.input-group-btn > .btn + .btn { - margin-left: -1px; -} -.input-group-btn > .btn:hover, -.input-group-btn > .btn:focus, -.input-group-btn > .btn:active { - z-index: 2; -} -.input-group-btn:first-child > .btn, -.input-group-btn:first-child > .btn-group { - margin-right: -1px; -} -.input-group-btn:last-child > .btn, -.input-group-btn:last-child > .btn-group { - z-index: 2; - margin-left: -1px; -} -.nav { - margin-bottom: 0; - padding-left: 0; - list-style: none; -} -.nav > li { - position: relative; - display: block; -} -.nav > li > a { - position: relative; - display: block; - padding: 10px 15px; -} -.nav > li > a:hover, -.nav > li > a:focus { - text-decoration: none; - background-color: #3e444c; -} -.nav > li.disabled > a { - color: #7a8288; -} -.nav > li.disabled > a:hover, -.nav > li.disabled > a:focus { - color: #7a8288; - text-decoration: none; - background-color: transparent; - cursor: not-allowed; -} -.nav .open > a, -.nav .open > a:hover, -.nav .open > a:focus { - background-color: #3e444c; - border-color: #ffffff; -} -.nav .nav-divider { - height: 1px; - margin: 9px 0; - overflow: hidden; - background-color: #e5e5e5; -} -.nav > li > a > img { - max-width: none; -} -.nav-tabs { - border-bottom: 1px solid #1c1e22; -} -.nav-tabs > li { - float: left; - margin-bottom: -1px; -} -.nav-tabs > li > a { - margin-right: 2px; - line-height: 1.42857143; - border: 1px solid transparent; - border-radius: 4px 4px 0 0; -} -.nav-tabs > li > a:hover { - border-color: #1c1e22 #1c1e22 #1c1e22; -} -.nav-tabs > li.active > a, -.nav-tabs > li.active > a:hover, -.nav-tabs > li.active > a:focus { - color: #ffffff; - background-color: #3e444c; - border: 1px solid #1c1e22; - border-bottom-color: transparent; - cursor: default; -} -.nav-tabs.nav-justified { - width: 100%; - border-bottom: 0; -} -.nav-tabs.nav-justified > li { - float: none; -} -.nav-tabs.nav-justified > li > a { - text-align: center; - margin-bottom: 5px; -} -.nav-tabs.nav-justified > .dropdown .dropdown-menu { - top: auto; - left: auto; -} -@media (min-width: 768px) { - .nav-tabs.nav-justified > li { - display: table-cell; - width: 1%; - } - .nav-tabs.nav-justified > li > a { - margin-bottom: 0; - } -} -.nav-tabs.nav-justified > li > a { - margin-right: 0; - border-radius: 4px; -} -.nav-tabs.nav-justified > .active > a, -.nav-tabs.nav-justified > .active > a:hover, -.nav-tabs.nav-justified > .active > a:focus { - border: 1px solid #1c1e22; -} -@media (min-width: 768px) { - .nav-tabs.nav-justified > li > a { - border-bottom: 1px solid #1c1e22; - border-radius: 4px 4px 0 0; - } - .nav-tabs.nav-justified > .active > a, - .nav-tabs.nav-justified > .active > a:hover, - .nav-tabs.nav-justified > .active > a:focus { - border-bottom-color: #272b30; - } -} -.nav-pills > li { - float: left; -} -.nav-pills > li > a { - border-radius: 4px; -} -.nav-pills > li + li { - margin-left: 2px; -} -.nav-pills > li.active > a, -.nav-pills > li.active > a:hover, -.nav-pills > li.active > a:focus { - color: #ffffff; - background-color: transparent; -} -.nav-stacked > li { - float: none; -} -.nav-stacked > li + li { - margin-top: 2px; - margin-left: 0; -} -.nav-justified { - width: 100%; -} -.nav-justified > li { - float: none; -} -.nav-justified > li > a { - text-align: center; - margin-bottom: 5px; -} -.nav-justified > .dropdown .dropdown-menu { - top: auto; - left: auto; -} -@media (min-width: 768px) { - .nav-justified > li { - display: table-cell; - width: 1%; - } - .nav-justified > li > a { - margin-bottom: 0; - } -} -.nav-tabs-justified { - border-bottom: 0; -} -.nav-tabs-justified > li > a { - margin-right: 0; - border-radius: 4px; -} -.nav-tabs-justified > .active > a, -.nav-tabs-justified > .active > a:hover, -.nav-tabs-justified > .active > a:focus { - border: 1px solid #1c1e22; -} -@media (min-width: 768px) { - .nav-tabs-justified > li > a { - border-bottom: 1px solid #1c1e22; - border-radius: 4px 4px 0 0; - } - .nav-tabs-justified > .active > a, - .nav-tabs-justified > .active > a:hover, - .nav-tabs-justified > .active > a:focus { - border-bottom-color: #272b30; - } -} -.tab-content > .tab-pane { - display: none; -} -.tab-content > .active { - display: block; -} -.nav-tabs .dropdown-menu { - margin-top: -1px; - border-top-right-radius: 0; - border-top-left-radius: 0; -} -.navbar { - position: relative; - min-height: 50px; - margin-bottom: 20px; - border: 1px solid transparent; -} -@media (min-width: 768px) { - .navbar { - border-radius: 4px; - } -} -@media (min-width: 768px) { - .navbar-header { - float: left; - } -} -.navbar-collapse { - overflow-x: visible; - padding-right: 15px; - padding-left: 15px; - border-top: 1px solid transparent; - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1); - -webkit-overflow-scrolling: touch; -} -.navbar-collapse.in { - overflow-y: auto; -} -@media (min-width: 768px) { - .navbar-collapse { - width: auto; - border-top: 0; - -webkit-box-shadow: none; - box-shadow: none; - } - .navbar-collapse.collapse { - display: block !important; - height: auto !important; - padding-bottom: 0; - overflow: visible !important; - } - .navbar-collapse.in { - overflow-y: visible; - } - .navbar-fixed-top .navbar-collapse, - .navbar-static-top .navbar-collapse, - .navbar-fixed-bottom .navbar-collapse { - padding-left: 0; - padding-right: 0; - } -} -.navbar-fixed-top .navbar-collapse, -.navbar-fixed-bottom .navbar-collapse { - max-height: 340px; -} -@media (max-device-width: 480px) and (orientation: landscape) { - .navbar-fixed-top .navbar-collapse, - .navbar-fixed-bottom .navbar-collapse { - max-height: 200px; - } -} -.container > .navbar-header, -.container-fluid > .navbar-header, -.container > .navbar-collapse, -.container-fluid > .navbar-collapse { - margin-right: -15px; - margin-left: -15px; -} -@media (min-width: 768px) { - .container > .navbar-header, - .container-fluid > .navbar-header, - .container > .navbar-collapse, - .container-fluid > .navbar-collapse { - margin-right: 0; - margin-left: 0; - } -} -.navbar-static-top { - z-index: 1000; - border-width: 0 0 1px; -} -@media (min-width: 768px) { - .navbar-static-top { - border-radius: 0; - } -} -.navbar-fixed-top, -.navbar-fixed-bottom { - position: fixed; - right: 0; - left: 0; - z-index: 1030; -} -@media (min-width: 768px) { - .navbar-fixed-top, - .navbar-fixed-bottom { - border-radius: 0; - } -} -.navbar-fixed-top { - top: 0; - border-width: 0 0 1px; -} -.navbar-fixed-bottom { - bottom: 0; - margin-bottom: 0; - border-width: 1px 0 0; -} -.navbar-brand { - float: left; - padding: 15px 15px; - font-size: 18px; - line-height: 20px; - height: 50px; -} -.navbar-brand:hover, -.navbar-brand:focus { - text-decoration: none; -} -.navbar-brand > img { - display: block; -} -@media (min-width: 768px) { - .navbar > .container .navbar-brand, - .navbar > .container-fluid .navbar-brand { - margin-left: -15px; - } -} -.navbar-toggle { - position: relative; - float: right; - margin-right: 15px; - padding: 9px 10px; - margin-top: 8px; - margin-bottom: 8px; - background-color: transparent; - background-image: none; - border: 1px solid transparent; - border-radius: 4px; -} -.navbar-toggle:focus { - outline: 0; -} -.navbar-toggle .icon-bar { - display: block; - width: 22px; - height: 2px; - border-radius: 1px; -} -.navbar-toggle .icon-bar + .icon-bar { - margin-top: 4px; -} -@media (min-width: 768px) { - .navbar-toggle { - display: none; - } -} -.navbar-nav { - margin: 7.5px -15px; -} -.navbar-nav > li > a { - padding-top: 10px; - padding-bottom: 10px; - line-height: 20px; -} -@media (max-width: 767px) { - .navbar-nav .open .dropdown-menu { - position: static; - float: none; - width: auto; - margin-top: 0; - background-color: transparent; - border: 0; - -webkit-box-shadow: none; - box-shadow: none; - } - .navbar-nav .open .dropdown-menu > li > a, - .navbar-nav .open .dropdown-menu .dropdown-header { - padding: 5px 15px 5px 25px; - } - .navbar-nav .open .dropdown-menu > li > a { - line-height: 20px; - } - .navbar-nav .open .dropdown-menu > li > a:hover, - .navbar-nav .open .dropdown-menu > li > a:focus { - background-image: none; - } -} -@media (min-width: 768px) { - .navbar-nav { - float: left; - margin: 0; - } - .navbar-nav > li { - float: left; - } - .navbar-nav > li > a { - padding-top: 15px; - padding-bottom: 15px; - } -} -.navbar-form { - margin-left: -15px; - margin-right: -15px; - padding: 10px 15px; - border-top: 1px solid transparent; - border-bottom: 1px solid transparent; - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1); - margin-top: 6px; - margin-bottom: 6px; -} -@media (min-width: 768px) { - .navbar-form .form-group { - display: inline-block; - margin-bottom: 0; - vertical-align: middle; - } - .navbar-form .form-control { - display: inline-block; - width: auto; - vertical-align: middle; - } - .navbar-form .form-control-static { - display: inline-block; - } - .navbar-form .input-group { - display: inline-table; - vertical-align: middle; - } - .navbar-form .input-group .input-group-addon, - .navbar-form .input-group .input-group-btn, - .navbar-form .input-group .form-control { - width: auto; - } - .navbar-form .input-group > .form-control { - width: 100%; - } - .navbar-form .control-label { - margin-bottom: 0; - vertical-align: middle; - } - .navbar-form .radio, - .navbar-form .checkbox { - display: inline-block; - margin-top: 0; - margin-bottom: 0; - vertical-align: middle; - } - .navbar-form .radio label, - .navbar-form .checkbox label { - padding-left: 0; - } - .navbar-form .radio input[type="radio"], - .navbar-form .checkbox input[type="checkbox"] { - position: relative; - margin-left: 0; - } - .navbar-form .has-feedback .form-control-feedback { - top: 0; - } -} -@media (max-width: 767px) { - .navbar-form .form-group { - margin-bottom: 5px; - } - .navbar-form .form-group:last-child { - margin-bottom: 0; - } -} -@media (min-width: 768px) { - .navbar-form { - width: auto; - border: 0; - margin-left: 0; - margin-right: 0; - padding-top: 0; - padding-bottom: 0; - -webkit-box-shadow: none; - box-shadow: none; - } -} -.navbar-nav > li > .dropdown-menu { - margin-top: 0; - border-top-right-radius: 0; - border-top-left-radius: 0; -} -.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu { - margin-bottom: 0; - border-top-right-radius: 4px; - border-top-left-radius: 4px; - border-bottom-right-radius: 0; - border-bottom-left-radius: 0; -} -.navbar-btn { - margin-top: 6px; - margin-bottom: 6px; -} -.navbar-btn.btn-sm { - margin-top: 10px; - margin-bottom: 10px; -} -.navbar-btn.btn-xs { - margin-top: 14px; - margin-bottom: 14px; -} -.navbar-text { - margin-top: 15px; - margin-bottom: 15px; -} -@media (min-width: 768px) { - .navbar-text { - float: left; - margin-left: 15px; - margin-right: 15px; - } -} -@media (min-width: 768px) { - .navbar-left { - float: left !important; - } - .navbar-right { - float: right !important; - margin-right: -15px; - } - .navbar-right ~ .navbar-right { - margin-right: 0; - } -} -.navbar-default { - background-color: #3a3f44; - border-color: #2b2e32; -} -.navbar-default .navbar-brand { - color: #c8c8c8; -} -.navbar-default .navbar-brand:hover, -.navbar-default .navbar-brand:focus { - color: #ffffff; - background-color: none; -} -.navbar-default .navbar-text { - color: #c8c8c8; -} -.navbar-default .navbar-nav > li > a { - color: #c8c8c8; -} -.navbar-default .navbar-nav > li > a:hover, -.navbar-default .navbar-nav > li > a:focus { - color: #ffffff; - background-color: #272b2e; -} -.navbar-default .navbar-nav > .active > a, -.navbar-default .navbar-nav > .active > a:hover, -.navbar-default .navbar-nav > .active > a:focus { - color: #ffffff; - background-color: #272b2e; -} -.navbar-default .navbar-nav > .disabled > a, -.navbar-default .navbar-nav > .disabled > a:hover, -.navbar-default .navbar-nav > .disabled > a:focus { - color: #cccccc; - background-color: transparent; -} -.navbar-default .navbar-toggle { - border-color: #272b2e; -} -.navbar-default .navbar-toggle:hover, -.navbar-default .navbar-toggle:focus { - background-color: #272b2e; -} -.navbar-default .navbar-toggle .icon-bar { - background-color: #c8c8c8; -} -.navbar-default .navbar-collapse, -.navbar-default .navbar-form { - border-color: #2b2e32; -} -.navbar-default .navbar-nav > .open > a, -.navbar-default .navbar-nav > .open > a:hover, -.navbar-default .navbar-nav > .open > a:focus { - background-color: #272b2e; - color: #ffffff; -} -@media (max-width: 767px) { - .navbar-default .navbar-nav .open .dropdown-menu > li > a { - color: #c8c8c8; - } - .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover, - .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus { - color: #ffffff; - background-color: #272b2e; - } - .navbar-default .navbar-nav .open .dropdown-menu > .active > a, - .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover, - .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus { - color: #ffffff; - background-color: #272b2e; - } - .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a, - .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover, - .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus { - color: #cccccc; - background-color: transparent; - } -} -.navbar-default .navbar-link { - color: #c8c8c8; -} -.navbar-default .navbar-link:hover { - color: #ffffff; -} -.navbar-default .btn-link { - color: #c8c8c8; -} -.navbar-default .btn-link:hover, -.navbar-default .btn-link:focus { - color: #ffffff; -} -.navbar-default .btn-link[disabled]:hover, -fieldset[disabled] .navbar-default .btn-link:hover, -.navbar-default .btn-link[disabled]:focus, -fieldset[disabled] .navbar-default .btn-link:focus { - color: #cccccc; -} -.navbar-inverse { - background-color: #7a8288; - border-color: #62686d; -} -.navbar-inverse .navbar-brand { - color: #cccccc; -} -.navbar-inverse .navbar-brand:hover, -.navbar-inverse .navbar-brand:focus { - color: #ffffff; - background-color: none; -} -.navbar-inverse .navbar-text { - color: #cccccc; -} -.navbar-inverse .navbar-nav > li > a { - color: #cccccc; -} -.navbar-inverse .navbar-nav > li > a:hover, -.navbar-inverse .navbar-nav > li > a:focus { - color: #ffffff; - background-color: #5d6368; -} -.navbar-inverse .navbar-nav > .active > a, -.navbar-inverse .navbar-nav > .active > a:hover, -.navbar-inverse .navbar-nav > .active > a:focus { - color: #ffffff; - background-color: #5d6368; -} -.navbar-inverse .navbar-nav > .disabled > a, -.navbar-inverse .navbar-nav > .disabled > a:hover, -.navbar-inverse .navbar-nav > .disabled > a:focus { - color: #cccccc; - background-color: transparent; -} -.navbar-inverse .navbar-toggle { - border-color: #5d6368; -} -.navbar-inverse .navbar-toggle:hover, -.navbar-inverse .navbar-toggle:focus { - background-color: #5d6368; -} -.navbar-inverse .navbar-toggle .icon-bar { - background-color: #ffffff; -} -.navbar-inverse .navbar-collapse, -.navbar-inverse .navbar-form { - border-color: #697075; -} -.navbar-inverse .navbar-nav > .open > a, -.navbar-inverse .navbar-nav > .open > a:hover, -.navbar-inverse .navbar-nav > .open > a:focus { - background-color: #5d6368; - color: #ffffff; -} -@media (max-width: 767px) { - .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header { - border-color: #62686d; - } - .navbar-inverse .navbar-nav .open .dropdown-menu .divider { - background-color: #62686d; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > li > a { - color: #cccccc; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover, - .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus { - color: #ffffff; - background-color: #5d6368; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a, - .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover, - .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus { - color: #ffffff; - background-color: #5d6368; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a, - .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover, - .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus { - color: #cccccc; - background-color: transparent; - } -} -.navbar-inverse .navbar-link { - color: #cccccc; -} -.navbar-inverse .navbar-link:hover { - color: #ffffff; -} -.navbar-inverse .btn-link { - color: #cccccc; -} -.navbar-inverse .btn-link:hover, -.navbar-inverse .btn-link:focus { - color: #ffffff; -} -.navbar-inverse .btn-link[disabled]:hover, -fieldset[disabled] .navbar-inverse .btn-link:hover, -.navbar-inverse .btn-link[disabled]:focus, -fieldset[disabled] .navbar-inverse .btn-link:focus { - color: #cccccc; -} -.breadcrumb { - padding: 8px 15px; - margin-bottom: 20px; - list-style: none; - background-color: transparent; - border-radius: 4px; -} -.breadcrumb > li { - display: inline-block; -} -.breadcrumb > li + li:before { - content: "/\00a0"; - padding: 0 5px; - color: #cccccc; -} -.breadcrumb > .active { - color: #7a8288; -} -.pagination { - display: inline-block; - padding-left: 0; - margin: 20px 0; - border-radius: 4px; -} -.pagination > li { - display: inline; -} -.pagination > li > a, -.pagination > li > span { - position: relative; - float: left; - padding: 8px 12px; - line-height: 1.42857143; - text-decoration: none; - color: #ffffff; - background-color: #3a3f44; - border: 1px solid rgba(0, 0, 0, 0.6); - margin-left: -1px; -} -.pagination > li:first-child > a, -.pagination > li:first-child > span { - margin-left: 0; - border-bottom-left-radius: 4px; - border-top-left-radius: 4px; -} -.pagination > li:last-child > a, -.pagination > li:last-child > span { - border-bottom-right-radius: 4px; - border-top-right-radius: 4px; -} -.pagination > li > a:hover, -.pagination > li > span:hover, -.pagination > li > a:focus, -.pagination > li > span:focus { - z-index: 2; - color: #ffffff; - background-color: transparent; - border-color: rgba(0, 0, 0, 0.6); -} -.pagination > .active > a, -.pagination > .active > span, -.pagination > .active > a:hover, -.pagination > .active > span:hover, -.pagination > .active > a:focus, -.pagination > .active > span:focus { - z-index: 3; - color: #ffffff; - background-color: #232628; - border-color: rgba(0, 0, 0, 0.6); - cursor: default; -} -.pagination > .disabled > span, -.pagination > .disabled > span:hover, -.pagination > .disabled > span:focus, -.pagination > .disabled > a, -.pagination > .disabled > a:hover, -.pagination > .disabled > a:focus { - color: #7a8288; - background-color: #ffffff; - border-color: rgba(0, 0, 0, 0.6); - cursor: not-allowed; -} -.pagination-lg > li > a, -.pagination-lg > li > span { - padding: 14px 16px; - font-size: 18px; - line-height: 1.3333333; -} -.pagination-lg > li:first-child > a, -.pagination-lg > li:first-child > span { - border-bottom-left-radius: 6px; - border-top-left-radius: 6px; -} -.pagination-lg > li:last-child > a, -.pagination-lg > li:last-child > span { - border-bottom-right-radius: 6px; - border-top-right-radius: 6px; -} -.pagination-sm > li > a, -.pagination-sm > li > span { - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; -} -.pagination-sm > li:first-child > a, -.pagination-sm > li:first-child > span { - border-bottom-left-radius: 3px; - border-top-left-radius: 3px; -} -.pagination-sm > li:last-child > a, -.pagination-sm > li:last-child > span { - border-bottom-right-radius: 3px; - border-top-right-radius: 3px; -} -.pager { - padding-left: 0; - margin: 20px 0; - list-style: none; - text-align: center; -} -.pager li { - display: inline; -} -.pager li > a, -.pager li > span { - display: inline-block; - padding: 5px 14px; - background-color: #3a3f44; - border: 1px solid rgba(0, 0, 0, 0.6); - border-radius: 15px; -} -.pager li > a:hover, -.pager li > a:focus { - text-decoration: none; - background-color: transparent; -} -.pager .next > a, -.pager .next > span { - float: right; -} -.pager .previous > a, -.pager .previous > span { - float: left; -} -.pager .disabled > a, -.pager .disabled > a:hover, -.pager .disabled > a:focus, -.pager .disabled > span { - color: #7a8288; - background-color: #3a3f44; - cursor: not-allowed; -} -.label { - display: inline; - padding: .2em .6em .3em; - font-size: 75%; - font-weight: bold; - line-height: 1; - color: #ffffff; - text-align: center; - white-space: nowrap; - vertical-align: baseline; - border-radius: .25em; -} -a.label:hover, -a.label:focus { - color: #ffffff; - text-decoration: none; - cursor: pointer; -} -.label:empty { - display: none; -} -.btn .label { - position: relative; - top: -1px; -} -.label-default { - background-color: #3a3f44; -} -.label-default[href]:hover, -.label-default[href]:focus { - background-color: #232628; -} -.label-primary { - background-color: #7a8288; -} -.label-primary[href]:hover, -.label-primary[href]:focus { - background-color: #62686d; -} -.label-success { - background-color: #62c462; -} -.label-success[href]:hover, -.label-success[href]:focus { - background-color: #42b142; -} -.label-info { - background-color: #5bc0de; -} -.label-info[href]:hover, -.label-info[href]:focus { - background-color: #31b0d5; -} -.label-warning { - background-color: #f89406; -} -.label-warning[href]:hover, -.label-warning[href]:focus { - background-color: #c67605; -} -.label-danger { - background-color: #ee5f5b; -} -.label-danger[href]:hover, -.label-danger[href]:focus { - background-color: #e9322d; -} -.badge { - display: inline-block; - min-width: 10px; - padding: 3px 7px; - font-size: 12px; - font-weight: bold; - color: #ffffff; - line-height: 1; - vertical-align: middle; - white-space: nowrap; - text-align: center; - background-color: #7a8288; - border-radius: 10px; -} -.badge:empty { - display: none; -} -.btn .badge { - position: relative; - top: -1px; -} -.btn-xs .badge, -.btn-group-xs > .btn .badge { - top: 0; - padding: 1px 5px; -} -a.badge:hover, -a.badge:focus { - color: #ffffff; - text-decoration: none; - cursor: pointer; -} -.list-group-item.active > .badge, -.nav-pills > .active > a > .badge { - color: #ffffff; - background-color: #7a8288; -} -.list-group-item > .badge { - float: right; -} -.list-group-item > .badge + .badge { - margin-right: 5px; -} -.nav-pills > li > a > .badge { - margin-left: 3px; -} -.jumbotron { - padding-top: 30px; - padding-bottom: 30px; - margin-bottom: 30px; - color: inherit; - background-color: #1c1e22; -} -.jumbotron h1, -.jumbotron .h1 { - color: inherit; -} -.jumbotron p { - margin-bottom: 15px; - font-size: 21px; - font-weight: 200; -} -.jumbotron > hr { - border-top-color: #050506; -} -.container .jumbotron, -.container-fluid .jumbotron { - border-radius: 6px; - padding-left: 15px; - padding-right: 15px; -} -.jumbotron .container { - max-width: 100%; -} -@media screen and (min-width: 768px) { - .jumbotron { - padding-top: 48px; - padding-bottom: 48px; - } - .container .jumbotron, - .container-fluid .jumbotron { - padding-left: 60px; - padding-right: 60px; - } - .jumbotron h1, - .jumbotron .h1 { - font-size: 63px; - } -} -.thumbnail { - display: block; - padding: 4px; - margin-bottom: 20px; - line-height: 1.42857143; - background-color: #1c1e22; - border: 1px solid #0c0d0e; - border-radius: 4px; - -webkit-transition: border 0.2s ease-in-out; - -o-transition: border 0.2s ease-in-out; - transition: border 0.2s ease-in-out; -} -.thumbnail > img, -.thumbnail a > img { - margin-left: auto; - margin-right: auto; -} -a.thumbnail:hover, -a.thumbnail:focus, -a.thumbnail.active { - border-color: #ffffff; -} -.thumbnail .caption { - padding: 9px; - color: #c8c8c8; -} -.alert { - padding: 15px; - margin-bottom: 20px; - border: 1px solid transparent; - border-radius: 4px; -} -.alert h4 { - margin-top: 0; - color: inherit; -} -.alert .alert-link { - font-weight: bold; -} -.alert > p, -.alert > ul { - margin-bottom: 0; -} -.alert > p + p { - margin-top: 5px; -} -.alert-dismissable, -.alert-dismissible { - padding-right: 35px; -} -.alert-dismissable .close, -.alert-dismissible .close { - position: relative; - top: -2px; - right: -21px; - color: inherit; -} -.alert-success { - background-color: #62c462; - border-color: #62bd4f; - color: #ffffff; -} -.alert-success hr { - border-top-color: #55b142; -} -.alert-success .alert-link { - color: #e6e6e6; -} -.alert-info { - background-color: #5bc0de; - border-color: #3dced8; - color: #ffffff; -} -.alert-info hr { - border-top-color: #2ac7d2; -} -.alert-info .alert-link { - color: #e6e6e6; -} -.alert-warning { - background-color: #f89406; - border-color: #e96506; - color: #ffffff; -} -.alert-warning hr { - border-top-color: #d05a05; -} -.alert-warning .alert-link { - color: #e6e6e6; -} -.alert-danger { - background-color: #ee5f5b; - border-color: #ed4d63; - color: #ffffff; -} -.alert-danger hr { - border-top-color: #ea364f; -} -.alert-danger .alert-link { - color: #e6e6e6; -} -@-webkit-keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} -@-o-keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} -@keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} -.progress { - overflow: hidden; - height: 20px; - margin-bottom: 20px; - background-color: #1c1e22; - border-radius: 4px; - -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1); - box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1); -} -.progress-bar { - float: left; - width: 0%; - height: 100%; - font-size: 12px; - line-height: 20px; - color: #ffffff; - text-align: center; - background-color: #7a8288; - -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15); - box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15); - -webkit-transition: width 0.6s ease; - -o-transition: width 0.6s ease; - transition: width 0.6s ease; -} -.progress-striped .progress-bar, -.progress-bar-striped { - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - -webkit-background-size: 40px 40px; - background-size: 40px 40px; -} -.progress.active .progress-bar, -.progress-bar.active { - -webkit-animation: progress-bar-stripes 2s linear infinite; - -o-animation: progress-bar-stripes 2s linear infinite; - animation: progress-bar-stripes 2s linear infinite; -} -.progress-bar-success { - background-color: #62c462; -} -.progress-striped .progress-bar-success { - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} -.progress-bar-info { - background-color: #5bc0de; -} -.progress-striped .progress-bar-info { - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} -.progress-bar-warning { - background-color: #f89406; -} -.progress-striped .progress-bar-warning { - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} -.progress-bar-danger { - background-color: #ee5f5b; -} -.progress-striped .progress-bar-danger { - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} -.media { - margin-top: 15px; -} -.media:first-child { - margin-top: 0; -} -.media, -.media-body { - zoom: 1; - overflow: hidden; -} -.media-body { - width: 10000px; -} -.media-object { - display: block; -} -.media-object.img-thumbnail { - max-width: none; -} -.media-right, -.media > .pull-right { - padding-left: 10px; -} -.media-left, -.media > .pull-left { - padding-right: 10px; -} -.media-left, -.media-right, -.media-body { - display: table-cell; - vertical-align: top; -} -.media-middle { - vertical-align: middle; -} -.media-bottom { - vertical-align: bottom; -} -.media-heading { - margin-top: 0; - margin-bottom: 5px; -} -.media-list { - padding-left: 0; - list-style: none; -} -.list-group { - margin-bottom: 20px; - padding-left: 0; -} -.list-group-item { - position: relative; - display: block; - padding: 10px 15px; - margin-bottom: -1px; - background-color: #32383e; - border: 1px solid rgba(0, 0, 0, 0.6); -} -.list-group-item:first-child { - border-top-right-radius: 4px; - border-top-left-radius: 4px; -} -.list-group-item:last-child { - margin-bottom: 0; - border-bottom-right-radius: 4px; - border-bottom-left-radius: 4px; -} -a.list-group-item, -button.list-group-item { - color: #c8c8c8; -} -a.list-group-item .list-group-item-heading, -button.list-group-item .list-group-item-heading { - color: #ffffff; -} -a.list-group-item:hover, -button.list-group-item:hover, -a.list-group-item:focus, -button.list-group-item:focus { - text-decoration: none; - color: #c8c8c8; - background-color: #3e444c; -} -button.list-group-item { - width: 100%; - text-align: left; -} -.list-group-item.disabled, -.list-group-item.disabled:hover, -.list-group-item.disabled:focus { - background-color: #999999; - color: #7a8288; - cursor: not-allowed; -} -.list-group-item.disabled .list-group-item-heading, -.list-group-item.disabled:hover .list-group-item-heading, -.list-group-item.disabled:focus .list-group-item-heading { - color: inherit; -} -.list-group-item.disabled .list-group-item-text, -.list-group-item.disabled:hover .list-group-item-text, -.list-group-item.disabled:focus .list-group-item-text { - color: #7a8288; -} -.list-group-item.active, -.list-group-item.active:hover, -.list-group-item.active:focus { - z-index: 2; - color: #ffffff; - background-color: #3e444c; - border-color: rgba(0, 0, 0, 0.6); -} -.list-group-item.active .list-group-item-heading, -.list-group-item.active:hover .list-group-item-heading, -.list-group-item.active:focus .list-group-item-heading, -.list-group-item.active .list-group-item-heading > small, -.list-group-item.active:hover .list-group-item-heading > small, -.list-group-item.active:focus .list-group-item-heading > small, -.list-group-item.active .list-group-item-heading > .small, -.list-group-item.active:hover .list-group-item-heading > .small, -.list-group-item.active:focus .list-group-item-heading > .small { - color: inherit; -} -.list-group-item.active .list-group-item-text, -.list-group-item.active:hover .list-group-item-text, -.list-group-item.active:focus .list-group-item-text { - color: #a2aab4; -} -.list-group-item-success { - color: #ffffff; - background-color: #62c462; -} -a.list-group-item-success, -button.list-group-item-success { - color: #ffffff; -} -a.list-group-item-success .list-group-item-heading, -button.list-group-item-success .list-group-item-heading { - color: inherit; -} -a.list-group-item-success:hover, -button.list-group-item-success:hover, -a.list-group-item-success:focus, -button.list-group-item-success:focus { - color: #ffffff; - background-color: #4fbd4f; -} -a.list-group-item-success.active, -button.list-group-item-success.active, -a.list-group-item-success.active:hover, -button.list-group-item-success.active:hover, -a.list-group-item-success.active:focus, -button.list-group-item-success.active:focus { - color: #fff; - background-color: #ffffff; - border-color: #ffffff; -} -.list-group-item-info { - color: #ffffff; - background-color: #5bc0de; -} -a.list-group-item-info, -button.list-group-item-info { - color: #ffffff; -} -a.list-group-item-info .list-group-item-heading, -button.list-group-item-info .list-group-item-heading { - color: inherit; -} -a.list-group-item-info:hover, -button.list-group-item-info:hover, -a.list-group-item-info:focus, -button.list-group-item-info:focus { - color: #ffffff; - background-color: #46b8da; -} -a.list-group-item-info.active, -button.list-group-item-info.active, -a.list-group-item-info.active:hover, -button.list-group-item-info.active:hover, -a.list-group-item-info.active:focus, -button.list-group-item-info.active:focus { - color: #fff; - background-color: #ffffff; - border-color: #ffffff; -} -.list-group-item-warning { - color: #ffffff; - background-color: #f89406; -} -a.list-group-item-warning, -button.list-group-item-warning { - color: #ffffff; -} -a.list-group-item-warning .list-group-item-heading, -button.list-group-item-warning .list-group-item-heading { - color: inherit; -} -a.list-group-item-warning:hover, -button.list-group-item-warning:hover, -a.list-group-item-warning:focus, -button.list-group-item-warning:focus { - color: #ffffff; - background-color: #df8505; -} -a.list-group-item-warning.active, -button.list-group-item-warning.active, -a.list-group-item-warning.active:hover, -button.list-group-item-warning.active:hover, -a.list-group-item-warning.active:focus, -button.list-group-item-warning.active:focus { - color: #fff; - background-color: #ffffff; - border-color: #ffffff; -} -.list-group-item-danger { - color: #ffffff; - background-color: #ee5f5b; -} -a.list-group-item-danger, -button.list-group-item-danger { - color: #ffffff; -} -a.list-group-item-danger .list-group-item-heading, -button.list-group-item-danger .list-group-item-heading { - color: inherit; -} -a.list-group-item-danger:hover, -button.list-group-item-danger:hover, -a.list-group-item-danger:focus, -button.list-group-item-danger:focus { - color: #ffffff; - background-color: #ec4844; -} -a.list-group-item-danger.active, -button.list-group-item-danger.active, -a.list-group-item-danger.active:hover, -button.list-group-item-danger.active:hover, -a.list-group-item-danger.active:focus, -button.list-group-item-danger.active:focus { - color: #fff; - background-color: #ffffff; - border-color: #ffffff; -} -.list-group-item-heading { - margin-top: 0; - margin-bottom: 5px; -} -.list-group-item-text { - margin-bottom: 0; - line-height: 1.3; -} -.panel { - margin-bottom: 20px; - background-color: #2e3338; - border: 1px solid transparent; - border-radius: 4px; - -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05); - box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05); -} -.panel-body { - padding: 15px; -} -.panel-heading { - padding: 10px 15px; - border-bottom: 1px solid transparent; - border-top-right-radius: 3px; - border-top-left-radius: 3px; -} -.panel-heading > .dropdown .dropdown-toggle { - color: inherit; -} -.panel-title { - margin-top: 0; - margin-bottom: 0; - font-size: 16px; - color: inherit; -} -.panel-title > a, -.panel-title > small, -.panel-title > .small, -.panel-title > small > a, -.panel-title > .small > a { - color: inherit; -} -.panel-footer { - padding: 10px 15px; - background-color: #3e444c; - border-top: 1px solid rgba(0, 0, 0, 0.6); - border-bottom-right-radius: 3px; - border-bottom-left-radius: 3px; -} -.panel > .list-group, -.panel > .panel-collapse > .list-group { - margin-bottom: 0; -} -.panel > .list-group .list-group-item, -.panel > .panel-collapse > .list-group .list-group-item { - border-width: 1px 0; - border-radius: 0; -} -.panel > .list-group:first-child .list-group-item:first-child, -.panel > .panel-collapse > .list-group:first-child .list-group-item:first-child { - border-top: 0; - border-top-right-radius: 3px; - border-top-left-radius: 3px; -} -.panel > .list-group:last-child .list-group-item:last-child, -.panel > .panel-collapse > .list-group:last-child .list-group-item:last-child { - border-bottom: 0; - border-bottom-right-radius: 3px; - border-bottom-left-radius: 3px; -} -.panel > .panel-heading + .panel-collapse > .list-group .list-group-item:first-child { - border-top-right-radius: 0; - border-top-left-radius: 0; -} -.panel-heading + .list-group .list-group-item:first-child { - border-top-width: 0; -} -.list-group + .panel-footer { - border-top-width: 0; -} -.panel > .table, -.panel > .table-responsive > .table, -.panel > .panel-collapse > .table { - margin-bottom: 0; -} -.panel > .table caption, -.panel > .table-responsive > .table caption, -.panel > .panel-collapse > .table caption { - padding-left: 15px; - padding-right: 15px; -} -.panel > .table:first-child, -.panel > .table-responsive:first-child > .table:first-child { - border-top-right-radius: 3px; - border-top-left-radius: 3px; -} -.panel > .table:first-child > thead:first-child > tr:first-child, -.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child, -.panel > .table:first-child > tbody:first-child > tr:first-child, -.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child { - border-top-left-radius: 3px; - border-top-right-radius: 3px; -} -.panel > .table:first-child > thead:first-child > tr:first-child td:first-child, -.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child, -.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child, -.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child, -.panel > .table:first-child > thead:first-child > tr:first-child th:first-child, -.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child, -.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child, -.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child { - border-top-left-radius: 3px; -} -.panel > .table:first-child > thead:first-child > tr:first-child td:last-child, -.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child, -.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child, -.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child, -.panel > .table:first-child > thead:first-child > tr:first-child th:last-child, -.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child, -.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child, -.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child { - border-top-right-radius: 3px; -} -.panel > .table:last-child, -.panel > .table-responsive:last-child > .table:last-child { - border-bottom-right-radius: 3px; - border-bottom-left-radius: 3px; -} -.panel > .table:last-child > tbody:last-child > tr:last-child, -.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child, -.panel > .table:last-child > tfoot:last-child > tr:last-child, -.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child { - border-bottom-left-radius: 3px; - border-bottom-right-radius: 3px; -} -.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child, -.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child, -.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child, -.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child, -.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child, -.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child, -.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child, -.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child { - border-bottom-left-radius: 3px; -} -.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child, -.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child, -.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child, -.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child, -.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child, -.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child, -.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child, -.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child { - border-bottom-right-radius: 3px; -} -.panel > .panel-body + .table, -.panel > .panel-body + .table-responsive, -.panel > .table + .panel-body, -.panel > .table-responsive + .panel-body { - border-top: 1px solid #1c1e22; -} -.panel > .table > tbody:first-child > tr:first-child th, -.panel > .table > tbody:first-child > tr:first-child td { - border-top: 0; -} -.panel > .table-bordered, -.panel > .table-responsive > .table-bordered { - border: 0; -} -.panel > .table-bordered > thead > tr > th:first-child, -.panel > .table-responsive > .table-bordered > thead > tr > th:first-child, -.panel > .table-bordered > tbody > tr > th:first-child, -.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child, -.panel > .table-bordered > tfoot > tr > th:first-child, -.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child, -.panel > .table-bordered > thead > tr > td:first-child, -.panel > .table-responsive > .table-bordered > thead > tr > td:first-child, -.panel > .table-bordered > tbody > tr > td:first-child, -.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child, -.panel > .table-bordered > tfoot > tr > td:first-child, -.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child { - border-left: 0; -} -.panel > .table-bordered > thead > tr > th:last-child, -.panel > .table-responsive > .table-bordered > thead > tr > th:last-child, -.panel > .table-bordered > tbody > tr > th:last-child, -.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child, -.panel > .table-bordered > tfoot > tr > th:last-child, -.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child, -.panel > .table-bordered > thead > tr > td:last-child, -.panel > .table-responsive > .table-bordered > thead > tr > td:last-child, -.panel > .table-bordered > tbody > tr > td:last-child, -.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child, -.panel > .table-bordered > tfoot > tr > td:last-child, -.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child { - border-right: 0; -} -.panel > .table-bordered > thead > tr:first-child > td, -.panel > .table-responsive > .table-bordered > thead > tr:first-child > td, -.panel > .table-bordered > tbody > tr:first-child > td, -.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td, -.panel > .table-bordered > thead > tr:first-child > th, -.panel > .table-responsive > .table-bordered > thead > tr:first-child > th, -.panel > .table-bordered > tbody > tr:first-child > th, -.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th { - border-bottom: 0; -} -.panel > .table-bordered > tbody > tr:last-child > td, -.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td, -.panel > .table-bordered > tfoot > tr:last-child > td, -.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td, -.panel > .table-bordered > tbody > tr:last-child > th, -.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th, -.panel > .table-bordered > tfoot > tr:last-child > th, -.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th { - border-bottom: 0; -} -.panel > .table-responsive { - border: 0; - margin-bottom: 0; -} -.panel-group { - margin-bottom: 20px; -} -.panel-group .panel { - margin-bottom: 0; - border-radius: 4px; -} -.panel-group .panel + .panel { - margin-top: 5px; -} -.panel-group .panel-heading { - border-bottom: 0; -} -.panel-group .panel-heading + .panel-collapse > .panel-body, -.panel-group .panel-heading + .panel-collapse > .list-group { - border-top: 1px solid rgba(0, 0, 0, 0.6); -} -.panel-group .panel-footer { - border-top: 0; -} -.panel-group .panel-footer + .panel-collapse .panel-body { - border-bottom: 1px solid rgba(0, 0, 0, 0.6); -} -.panel-default { - border-color: rgba(0, 0, 0, 0.6); -} -.panel-default > .panel-heading { - color: #c8c8c8; - background-color: #3e444c; - border-color: rgba(0, 0, 0, 0.6); -} -.panel-default > .panel-heading + .panel-collapse > .panel-body { - border-top-color: rgba(0, 0, 0, 0.6); -} -.panel-default > .panel-heading .badge { - color: #3e444c; - background-color: #c8c8c8; -} -.panel-default > .panel-footer + .panel-collapse > .panel-body { - border-bottom-color: rgba(0, 0, 0, 0.6); -} -.panel-primary { - border-color: rgba(0, 0, 0, 0.6); -} -.panel-primary > .panel-heading { - color: #ffffff; - background-color: #7a8288; - border-color: rgba(0, 0, 0, 0.6); -} -.panel-primary > .panel-heading + .panel-collapse > .panel-body { - border-top-color: rgba(0, 0, 0, 0.6); -} -.panel-primary > .panel-heading .badge { - color: #7a8288; - background-color: #ffffff; -} -.panel-primary > .panel-footer + .panel-collapse > .panel-body { - border-bottom-color: rgba(0, 0, 0, 0.6); -} -.panel-success { - border-color: rgba(0, 0, 0, 0.6); -} -.panel-success > .panel-heading { - color: #ffffff; - background-color: #62c462; - border-color: rgba(0, 0, 0, 0.6); -} -.panel-success > .panel-heading + .panel-collapse > .panel-body { - border-top-color: rgba(0, 0, 0, 0.6); -} -.panel-success > .panel-heading .badge { - color: #62c462; - background-color: #ffffff; -} -.panel-success > .panel-footer + .panel-collapse > .panel-body { - border-bottom-color: rgba(0, 0, 0, 0.6); -} -.panel-info { - border-color: rgba(0, 0, 0, 0.6); -} -.panel-info > .panel-heading { - color: #ffffff; - background-color: #5bc0de; - border-color: rgba(0, 0, 0, 0.6); -} -.panel-info > .panel-heading + .panel-collapse > .panel-body { - border-top-color: rgba(0, 0, 0, 0.6); -} -.panel-info > .panel-heading .badge { - color: #5bc0de; - background-color: #ffffff; -} -.panel-info > .panel-footer + .panel-collapse > .panel-body { - border-bottom-color: rgba(0, 0, 0, 0.6); -} -.panel-warning { - border-color: rgba(0, 0, 0, 0.6); -} -.panel-warning > .panel-heading { - color: #ffffff; - background-color: #f89406; - border-color: rgba(0, 0, 0, 0.6); -} -.panel-warning > .panel-heading + .panel-collapse > .panel-body { - border-top-color: rgba(0, 0, 0, 0.6); -} -.panel-warning > .panel-heading .badge { - color: #f89406; - background-color: #ffffff; -} -.panel-warning > .panel-footer + .panel-collapse > .panel-body { - border-bottom-color: rgba(0, 0, 0, 0.6); -} -.panel-danger { - border-color: rgba(0, 0, 0, 0.6); -} -.panel-danger > .panel-heading { - color: #ffffff; - background-color: #ee5f5b; - border-color: rgba(0, 0, 0, 0.6); -} -.panel-danger > .panel-heading + .panel-collapse > .panel-body { - border-top-color: rgba(0, 0, 0, 0.6); -} -.panel-danger > .panel-heading .badge { - color: #ee5f5b; - background-color: #ffffff; -} -.panel-danger > .panel-footer + .panel-collapse > .panel-body { - border-bottom-color: rgba(0, 0, 0, 0.6); -} -.embed-responsive { - position: relative; - display: block; - height: 0; - padding: 0; - overflow: hidden; -} -.embed-responsive .embed-responsive-item, -.embed-responsive iframe, -.embed-responsive embed, -.embed-responsive object, -.embed-responsive video { - position: absolute; - top: 0; - left: 0; - bottom: 0; - height: 100%; - width: 100%; - border: 0; -} -.embed-responsive-16by9 { - padding-bottom: 56.25%; -} -.embed-responsive-4by3 { - padding-bottom: 75%; -} -.well { - min-height: 20px; - padding: 19px; - margin-bottom: 20px; - background-color: #1c1e22; - border: 1px solid #0c0d0e; - border-radius: 4px; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05); -} -.well blockquote { - border-color: #ddd; - border-color: rgba(0, 0, 0, 0.15); -} -.well-lg { - padding: 24px; - border-radius: 6px; -} -.well-sm { - padding: 9px; - border-radius: 3px; -} -.close { - float: right; - font-size: 21px; - font-weight: bold; - line-height: 1; - color: #000000; - text-shadow: 0 1px 0 #ffffff; - opacity: 0.2; - filter: alpha(opacity=20); -} -.close:hover, -.close:focus { - color: #000000; - text-decoration: none; - cursor: pointer; - opacity: 0.5; - filter: alpha(opacity=50); -} -button.close { - padding: 0; - cursor: pointer; - background: transparent; - border: 0; - -webkit-appearance: none; -} -.modal-open { - overflow: hidden; -} -.modal { - display: none; - overflow: hidden; - position: fixed; - top: 0; - right: 0; - bottom: 0; - left: 0; - z-index: 1050; - -webkit-overflow-scrolling: touch; - outline: 0; -} -.modal.fade .modal-dialog { - -webkit-transform: translate(0, -25%); - -ms-transform: translate(0, -25%); - -o-transform: translate(0, -25%); - transform: translate(0, -25%); - -webkit-transition: -webkit-transform 0.3s ease-out; - -o-transition: -o-transform 0.3s ease-out; - transition: transform 0.3s ease-out; -} -.modal.in .modal-dialog { - -webkit-transform: translate(0, 0); - -ms-transform: translate(0, 0); - -o-transform: translate(0, 0); - transform: translate(0, 0); -} -.modal-open .modal { - overflow-x: hidden; - overflow-y: auto; -} -.modal-dialog { - position: relative; - width: auto; - margin: 10px; -} -.modal-content { - position: relative; - background-color: #2e3338; - border: 1px solid #999999; - border: 1px solid rgba(0, 0, 0, 0.2); - border-radius: 6px; - -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5); - box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5); - -webkit-background-clip: padding-box; - background-clip: padding-box; - outline: 0; -} -.modal-backdrop { - position: fixed; - top: 0; - right: 0; - bottom: 0; - left: 0; - z-index: 1040; - background-color: #000000; -} -.modal-backdrop.fade { - opacity: 0; - filter: alpha(opacity=0); -} -.modal-backdrop.in { - opacity: 0.5; - filter: alpha(opacity=50); -} -.modal-header { - padding: 15px; - border-bottom: 1px solid #1c1e22; -} -.modal-header .close { - margin-top: -2px; -} -.modal-title { - margin: 0; - line-height: 1.42857143; -} -.modal-body { - position: relative; - padding: 20px; -} -.modal-footer { - padding: 20px; - text-align: right; - border-top: 1px solid #1c1e22; -} -.modal-footer .btn + .btn { - margin-left: 5px; - margin-bottom: 0; -} -.modal-footer .btn-group .btn + .btn { - margin-left: -1px; -} -.modal-footer .btn-block + .btn-block { - margin-left: 0; -} -.modal-scrollbar-measure { - position: absolute; - top: -9999px; - width: 50px; - height: 50px; - overflow: scroll; -} -@media (min-width: 768px) { - .modal-dialog { - width: 600px; - margin: 30px auto; - } - .modal-content { - -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5); - box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5); - } - .modal-sm { - width: 300px; - } -} -@media (min-width: 992px) { - .modal-lg { - width: 900px; - } -} -.tooltip { - position: absolute; - z-index: 1070; - display: block; - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-style: normal; - font-weight: normal; - letter-spacing: normal; - line-break: auto; - line-height: 1.42857143; - text-align: left; - text-align: start; - text-decoration: none; - text-shadow: none; - text-transform: none; - white-space: normal; - word-break: normal; - word-spacing: normal; - word-wrap: normal; - font-size: 12px; - opacity: 0; - filter: alpha(opacity=0); -} -.tooltip.in { - opacity: 0.9; - filter: alpha(opacity=90); -} -.tooltip.top { - margin-top: -3px; - padding: 5px 0; -} -.tooltip.right { - margin-left: 3px; - padding: 0 5px; -} -.tooltip.bottom { - margin-top: 3px; - padding: 5px 0; -} -.tooltip.left { - margin-left: -3px; - padding: 0 5px; -} -.tooltip-inner { - max-width: 200px; - padding: 3px 8px; - color: #ffffff; - text-align: center; - background-color: #000000; - border-radius: 4px; -} -.tooltip-arrow { - position: absolute; - width: 0; - height: 0; - border-color: transparent; - border-style: solid; -} -.tooltip.top .tooltip-arrow { - bottom: 0; - left: 50%; - margin-left: -5px; - border-width: 5px 5px 0; - border-top-color: #000000; -} -.tooltip.top-left .tooltip-arrow { - bottom: 0; - right: 5px; - margin-bottom: -5px; - border-width: 5px 5px 0; - border-top-color: #000000; -} -.tooltip.top-right .tooltip-arrow { - bottom: 0; - left: 5px; - margin-bottom: -5px; - border-width: 5px 5px 0; - border-top-color: #000000; -} -.tooltip.right .tooltip-arrow { - top: 50%; - left: 0; - margin-top: -5px; - border-width: 5px 5px 5px 0; - border-right-color: #000000; -} -.tooltip.left .tooltip-arrow { - top: 50%; - right: 0; - margin-top: -5px; - border-width: 5px 0 5px 5px; - border-left-color: #000000; -} -.tooltip.bottom .tooltip-arrow { - top: 0; - left: 50%; - margin-left: -5px; - border-width: 0 5px 5px; - border-bottom-color: #000000; -} -.tooltip.bottom-left .tooltip-arrow { - top: 0; - right: 5px; - margin-top: -5px; - border-width: 0 5px 5px; - border-bottom-color: #000000; -} -.tooltip.bottom-right .tooltip-arrow { - top: 0; - left: 5px; - margin-top: -5px; - border-width: 0 5px 5px; - border-bottom-color: #000000; -} -.popover { - position: absolute; - top: 0; - left: 0; - z-index: 1060; - display: none; - max-width: 276px; - padding: 1px; - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-style: normal; - font-weight: normal; - letter-spacing: normal; - line-break: auto; - line-height: 1.42857143; - text-align: left; - text-align: start; - text-decoration: none; - text-shadow: none; - text-transform: none; - white-space: normal; - word-break: normal; - word-spacing: normal; - word-wrap: normal; - font-size: 14px; - background-color: #2e3338; - -webkit-background-clip: padding-box; - background-clip: padding-box; - border: 1px solid #999999; - border: 1px solid rgba(0, 0, 0, 0.2); - border-radius: 6px; - -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); - box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); -} -.popover.top { - margin-top: -10px; -} -.popover.right { - margin-left: 10px; -} -.popover.bottom { - margin-top: 10px; -} -.popover.left { - margin-left: -10px; -} -.popover-title { - margin: 0; - padding: 8px 14px; - font-size: 14px; - background-color: #2e3338; - border-bottom: 1px solid #22262a; - border-radius: 5px 5px 0 0; -} -.popover-content { - padding: 9px 14px; -} -.popover > .arrow, -.popover > .arrow:after { - position: absolute; - display: block; - width: 0; - height: 0; - border-color: transparent; - border-style: solid; -} -.popover > .arrow { - border-width: 11px; -} -.popover > .arrow:after { - border-width: 10px; - content: ""; -} -.popover.top > .arrow { - left: 50%; - margin-left: -11px; - border-bottom-width: 0; - border-top-color: #666666; - border-top-color: rgba(0, 0, 0, 0.25); - bottom: -11px; -} -.popover.top > .arrow:after { - content: " "; - bottom: 1px; - margin-left: -10px; - border-bottom-width: 0; - border-top-color: #2e3338; -} -.popover.right > .arrow { - top: 50%; - left: -11px; - margin-top: -11px; - border-left-width: 0; - border-right-color: #666666; - border-right-color: rgba(0, 0, 0, 0.25); -} -.popover.right > .arrow:after { - content: " "; - left: 1px; - bottom: -10px; - border-left-width: 0; - border-right-color: #2e3338; -} -.popover.bottom > .arrow { - left: 50%; - margin-left: -11px; - border-top-width: 0; - border-bottom-color: #666666; - border-bottom-color: rgba(0, 0, 0, 0.25); - top: -11px; -} -.popover.bottom > .arrow:after { - content: " "; - top: 1px; - margin-left: -10px; - border-top-width: 0; - border-bottom-color: #2e3338; -} -.popover.left > .arrow { - top: 50%; - right: -11px; - margin-top: -11px; - border-right-width: 0; - border-left-color: #666666; - border-left-color: rgba(0, 0, 0, 0.25); -} -.popover.left > .arrow:after { - content: " "; - right: 1px; - border-right-width: 0; - border-left-color: #2e3338; - bottom: -10px; -} -.carousel { - position: relative; -} -.carousel-inner { - position: relative; - overflow: hidden; - width: 100%; -} -.carousel-inner > .item { - display: none; - position: relative; - -webkit-transition: 0.6s ease-in-out left; - -o-transition: 0.6s ease-in-out left; - transition: 0.6s ease-in-out left; -} -.carousel-inner > .item > img, -.carousel-inner > .item > a > img { - line-height: 1; -} -@media all and (transform-3d), (-webkit-transform-3d) { - .carousel-inner > .item { - -webkit-transition: -webkit-transform 0.6s ease-in-out; - -o-transition: -o-transform 0.6s ease-in-out; - transition: transform 0.6s ease-in-out; - -webkit-backface-visibility: hidden; - backface-visibility: hidden; - -webkit-perspective: 1000px; - perspective: 1000px; - } - .carousel-inner > .item.next, - .carousel-inner > .item.active.right { - -webkit-transform: translate3d(100%, 0, 0); - transform: translate3d(100%, 0, 0); - left: 0; - } - .carousel-inner > .item.prev, - .carousel-inner > .item.active.left { - -webkit-transform: translate3d(-100%, 0, 0); - transform: translate3d(-100%, 0, 0); - left: 0; - } - .carousel-inner > .item.next.left, - .carousel-inner > .item.prev.right, - .carousel-inner > .item.active { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - left: 0; - } -} -.carousel-inner > .active, -.carousel-inner > .next, -.carousel-inner > .prev { - display: block; -} -.carousel-inner > .active { - left: 0; -} -.carousel-inner > .next, -.carousel-inner > .prev { - position: absolute; - top: 0; - width: 100%; -} -.carousel-inner > .next { - left: 100%; -} -.carousel-inner > .prev { - left: -100%; -} -.carousel-inner > .next.left, -.carousel-inner > .prev.right { - left: 0; -} -.carousel-inner > .active.left { - left: -100%; -} -.carousel-inner > .active.right { - left: 100%; -} -.carousel-control { - position: absolute; - top: 0; - left: 0; - bottom: 0; - width: 15%; - opacity: 0.5; - filter: alpha(opacity=50); - font-size: 20px; - color: #ffffff; - text-align: center; - text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6); - background-color: rgba(0, 0, 0, 0); -} -.carousel-control.left { - background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%); - background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%); - background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, 0.5)), to(rgba(0, 0, 0, 0.0001))); - background-image: linear-gradient(to right, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1); -} -.carousel-control.right { - left: auto; - right: 0; - background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%); - background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%); - background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, 0.0001)), to(rgba(0, 0, 0, 0.5))); - background-image: linear-gradient(to right, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1); -} -.carousel-control:hover, -.carousel-control:focus { - outline: 0; - color: #ffffff; - text-decoration: none; - opacity: 0.9; - filter: alpha(opacity=90); -} -.carousel-control .icon-prev, -.carousel-control .icon-next, -.carousel-control .glyphicon-chevron-left, -.carousel-control .glyphicon-chevron-right { - position: absolute; - top: 50%; - margin-top: -10px; - z-index: 5; - display: inline-block; -} -.carousel-control .icon-prev, -.carousel-control .glyphicon-chevron-left { - left: 50%; - margin-left: -10px; -} -.carousel-control .icon-next, -.carousel-control .glyphicon-chevron-right { - right: 50%; - margin-right: -10px; -} -.carousel-control .icon-prev, -.carousel-control .icon-next { - width: 20px; - height: 20px; - line-height: 1; - font-family: serif; -} -.carousel-control .icon-prev:before { - content: '\2039'; -} -.carousel-control .icon-next:before { - content: '\203a'; -} -.carousel-indicators { - position: absolute; - bottom: 10px; - left: 50%; - z-index: 15; - width: 60%; - margin-left: -30%; - padding-left: 0; - list-style: none; - text-align: center; -} -.carousel-indicators li { - display: inline-block; - width: 10px; - height: 10px; - margin: 1px; - text-indent: -999px; - border: 1px solid #ffffff; - border-radius: 10px; - cursor: pointer; - background-color: #000 \9; - background-color: rgba(0, 0, 0, 0); -} -.carousel-indicators .active { - margin: 0; - width: 12px; - height: 12px; - background-color: #ffffff; -} -.carousel-caption { - position: absolute; - left: 15%; - right: 15%; - bottom: 20px; - z-index: 10; - padding-top: 20px; - padding-bottom: 20px; - color: #ffffff; - text-align: center; - text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6); -} -.carousel-caption .btn { - text-shadow: none; -} -@media screen and (min-width: 768px) { - .carousel-control .glyphicon-chevron-left, - .carousel-control .glyphicon-chevron-right, - .carousel-control .icon-prev, - .carousel-control .icon-next { - width: 30px; - height: 30px; - margin-top: -10px; - font-size: 30px; - } - .carousel-control .glyphicon-chevron-left, - .carousel-control .icon-prev { - margin-left: -10px; - } - .carousel-control .glyphicon-chevron-right, - .carousel-control .icon-next { - margin-right: -10px; - } - .carousel-caption { - left: 20%; - right: 20%; - padding-bottom: 30px; - } - .carousel-indicators { - bottom: 20px; - } -} -.clearfix:before, -.clearfix:after, -.dl-horizontal dd:before, -.dl-horizontal dd:after, -.container:before, -.container:after, -.container-fluid:before, -.container-fluid:after, -.row:before, -.row:after, -.form-horizontal .form-group:before, -.form-horizontal .form-group:after, -.btn-toolbar:before, -.btn-toolbar:after, -.btn-group-vertical > .btn-group:before, -.btn-group-vertical > .btn-group:after, -.nav:before, -.nav:after, -.navbar:before, -.navbar:after, -.navbar-header:before, -.navbar-header:after, -.navbar-collapse:before, -.navbar-collapse:after, -.pager:before, -.pager:after, -.panel-body:before, -.panel-body:after, -.modal-header:before, -.modal-header:after, -.modal-footer:before, -.modal-footer:after { - content: " "; - display: table; -} -.clearfix:after, -.dl-horizontal dd:after, -.container:after, -.container-fluid:after, -.row:after, -.form-horizontal .form-group:after, -.btn-toolbar:after, -.btn-group-vertical > .btn-group:after, -.nav:after, -.navbar:after, -.navbar-header:after, -.navbar-collapse:after, -.pager:after, -.panel-body:after, -.modal-header:after, -.modal-footer:after { - clear: both; -} -.center-block { - display: block; - margin-left: auto; - margin-right: auto; -} -.pull-right { - float: right !important; -} -.pull-left { - float: left !important; -} -.hide { - display: none !important; -} -.show { - display: block !important; -} -.invisible { - visibility: hidden; -} -.text-hide { - font: 0/0 a; - color: transparent; - text-shadow: none; - background-color: transparent; - border: 0; -} -.hidden { - display: none !important; -} -.affix { - position: fixed; -} -@-ms-viewport { - width: device-width; -} -.visible-xs, -.visible-sm, -.visible-md, -.visible-lg { - display: none !important; -} -.visible-xs-block, -.visible-xs-inline, -.visible-xs-inline-block, -.visible-sm-block, -.visible-sm-inline, -.visible-sm-inline-block, -.visible-md-block, -.visible-md-inline, -.visible-md-inline-block, -.visible-lg-block, -.visible-lg-inline, -.visible-lg-inline-block { - display: none !important; -} -@media (max-width: 767px) { - .visible-xs { - display: block !important; - } - table.visible-xs { - display: table !important; - } - tr.visible-xs { - display: table-row !important; - } - th.visible-xs, - td.visible-xs { - display: table-cell !important; - } -} -@media (max-width: 767px) { - .visible-xs-block { - display: block !important; - } -} -@media (max-width: 767px) { - .visible-xs-inline { - display: inline !important; - } -} -@media (max-width: 767px) { - .visible-xs-inline-block { - display: inline-block !important; - } -} -@media (min-width: 768px) and (max-width: 991px) { - .visible-sm { - display: block !important; - } - table.visible-sm { - display: table !important; - } - tr.visible-sm { - display: table-row !important; - } - th.visible-sm, - td.visible-sm { - display: table-cell !important; - } -} -@media (min-width: 768px) and (max-width: 991px) { - .visible-sm-block { - display: block !important; - } -} -@media (min-width: 768px) and (max-width: 991px) { - .visible-sm-inline { - display: inline !important; - } -} -@media (min-width: 768px) and (max-width: 991px) { - .visible-sm-inline-block { - display: inline-block !important; - } -} -@media (min-width: 992px) and (max-width: 1199px) { - .visible-md { - display: block !important; - } - table.visible-md { - display: table !important; - } - tr.visible-md { - display: table-row !important; - } - th.visible-md, - td.visible-md { - display: table-cell !important; - } -} -@media (min-width: 992px) and (max-width: 1199px) { - .visible-md-block { - display: block !important; - } -} -@media (min-width: 992px) and (max-width: 1199px) { - .visible-md-inline { - display: inline !important; - } -} -@media (min-width: 992px) and (max-width: 1199px) { - .visible-md-inline-block { - display: inline-block !important; - } -} -@media (min-width: 1200px) { - .visible-lg { - display: block !important; - } - table.visible-lg { - display: table !important; - } - tr.visible-lg { - display: table-row !important; - } - th.visible-lg, - td.visible-lg { - display: table-cell !important; - } -} -@media (min-width: 1200px) { - .visible-lg-block { - display: block !important; - } -} -@media (min-width: 1200px) { - .visible-lg-inline { - display: inline !important; - } -} -@media (min-width: 1200px) { - .visible-lg-inline-block { - display: inline-block !important; - } -} -@media (max-width: 767px) { - .hidden-xs { - display: none !important; - } -} -@media (min-width: 768px) and (max-width: 991px) { - .hidden-sm { - display: none !important; - } -} -@media (min-width: 992px) and (max-width: 1199px) { - .hidden-md { - display: none !important; - } -} -@media (min-width: 1200px) { - .hidden-lg { - display: none !important; - } -} -.visible-print { - display: none !important; -} -@media print { - .visible-print { - display: block !important; - } - table.visible-print { - display: table !important; - } - tr.visible-print { - display: table-row !important; - } - th.visible-print, - td.visible-print { - display: table-cell !important; - } -} -.visible-print-block { - display: none !important; -} -@media print { - .visible-print-block { - display: block !important; - } -} -.visible-print-inline { - display: none !important; -} -@media print { - .visible-print-inline { - display: inline !important; - } -} -.visible-print-inline-block { - display: none !important; -} -@media print { - .visible-print-inline-block { - display: inline-block !important; - } -} -@media print { - .hidden-print { - display: none !important; - } -} -.navbar-default, -.navbar-inverse { - border: 1px solid rgba(0, 0, 0, 0.6); - text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.3); -} -@media (min-width: 768px) { - .navbar-default .navbar-nav > li > a, - .navbar-inverse .navbar-nav > li > a { - border-right: 1px solid rgba(0, 0, 0, 0.2); - border-left: 1px solid rgba(255, 255, 255, 0.1); - } - .navbar-default .navbar-nav > li > a:hover, - .navbar-inverse .navbar-nav > li > a:hover { - border-left-color: transparent; - } - .navbar-default .nav .open > a, - .navbar-inverse .nav .open > a { - border-color: transparent; - } - .navbar-default .navbar-nav > li.active > a, - .navbar-inverse .navbar-nav > li.active > a { - border-left-color: transparent; - } - .navbar-default .navbar-form, - .navbar-inverse .navbar-form { - margin-left: 5px; - margin-right: 5px; - } -} -.navbar-default { - -webkit-filter: none; - filter: none; -} -.navbar-default .navbar-nav > li > a:hover { - -webkit-filter: none; - filter: none; -} -.navbar-inverse { - -webkit-filter: none; - filter: none; -} -.navbar-inverse .badge { - background-color: #5d6368; -} -.navbar-inverse .navbar-nav > li > a:hover { - -webkit-filter: none; - filter: none; -} -.btn, -.btn:hover { - border-color: rgba(0, 0, 0, 0.6); - text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.3); -} -.btn-default { - -webkit-filter: none; - filter: none; -} -.btn-default:hover { - -webkit-filter: none; - filter: none; -} -.btn-primary { - -webkit-filter: none; - filter: none; -} -.btn-primary:hover { - -webkit-filter: none; - filter: none; -} -.btn-success { - -webkit-filter: none; - filter: none; -} -.btn-success:hover { - -webkit-filter: none; - filter: none; -} -.btn-info { - -webkit-filter: none; - filter: none; -} -.btn-info:hover { - -webkit-filter: none; - filter: none; -} -.btn-warning { - -webkit-filter: none; - filter: none; -} -.btn-warning:hover { - -webkit-filter: none; - filter: none; -} -.btn-danger { - -webkit-filter: none; - filter: none; -} -.btn-danger:hover { - -webkit-filter: none; - filter: none; -} -.btn-link, -.btn-link:hover { - border-color: transparent; -} -h1, -h2, -h3, -h4, -h5, -h6 { - text-shadow: -1px -1px 0 rgba(0, 0, 0, 0.3); -} -.text-primary, -.text-primary:hover { - color: #7a8288; -} -.text-success, -.text-success:hover { - color: #62c462; -} -.text-danger, -.text-danger:hover { - color: #ee5f5b; -} -.text-warning, -.text-warning:hover { - color: #f89406; -} -.text-info, -.text-info:hover { - color: #5bc0de; -} -.table .success, -.table .warning, -.table .danger, -.table .info { - color: #fff; -} -.table-bordered tbody tr.success td, -.table-bordered tbody tr.warning td, -.table-bordered tbody tr.danger td, -.table-bordered tbody tr.success:hover td, -.table-bordered tbody tr.warning:hover td, -.table-bordered tbody tr.danger:hover td { - border-color: #1c1e22; -} -.table-responsive > .table { - background-color: #2e3338; -} -input, -textarea { - color: #272b30; -} -.has-warning .help-block, -.has-warning .control-label, -.has-warning .radio, -.has-warning .checkbox, -.has-warning .radio-inline, -.has-warning .checkbox-inline, -.has-warning.radio label, -.has-warning.checkbox label, -.has-warning.radio-inline label, -.has-warning.checkbox-inline label, -.has-warning .form-control-feedback { - color: #f89406; -} -.has-warning .form-control, -.has-warning .form-control:focus { - border-color: #f89406; -} -.has-warning .input-group-addon { - border-color: rgba(0, 0, 0, 0.6); -} -.has-error .help-block, -.has-error .control-label, -.has-error .radio, -.has-error .checkbox, -.has-error .radio-inline, -.has-error .checkbox-inline, -.has-error.radio label, -.has-error.checkbox label, -.has-error.radio-inline label, -.has-error.checkbox-inline label, -.has-error .form-control-feedback { - color: #ee5f5b; -} -.has-error .form-control, -.has-error .form-control:focus { - border-color: #ee5f5b; -} -.has-error .input-group-addon { - border-color: rgba(0, 0, 0, 0.6); -} -.has-success .help-block, -.has-success .control-label, -.has-success .radio, -.has-success .checkbox, -.has-success .radio-inline, -.has-success .checkbox-inline, -.has-success.radio label, -.has-success.checkbox label, -.has-success.radio-inline label, -.has-success.checkbox-inline label, -.has-success .form-control-feedback { - color: #62c462; -} -.has-success .form-control, -.has-success .form-control:focus { - border-color: #62c462; -} -.has-success .input-group-addon { - border-color: rgba(0, 0, 0, 0.6); -} -legend { - color: #fff; -} -.input-group-addon { - -webkit-filter: none; - filter: none; - text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.3); - color: #ffffff; -} -.nav .open > a, -.nav .open > a:hover, -.nav .open > a:focus { - border-color: rgba(0, 0, 0, 0.6); -} -.nav-pills > li > a { - -webkit-filter: none; - filter: none; - border: 1px solid rgba(0, 0, 0, 0.6); - text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.3); -} -.nav-pills > li > a:hover { - -webkit-filter: none; - filter: none; - border: 1px solid rgba(0, 0, 0, 0.6); -} -.nav-pills > li.active > a, -.nav-pills > li.active > a:hover { - background-color: none; - -webkit-filter: none; - filter: none; - border: 1px solid rgba(0, 0, 0, 0.6); -} -.nav-pills > li.disabled > a, -.nav-pills > li.disabled > a:hover { - -webkit-filter: none; - filter: none; -} -.pagination > li > a, -.pagination > li > span { - text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.3); - -webkit-filter: none; - filter: none; -} -.pagination > li > a:hover, -.pagination > li > span:hover { - -webkit-filter: none; - filter: none; -} -.pagination > li.active > a, -.pagination > li.active > span { - -webkit-filter: none; - filter: none; -} -.pagination > li.disabled > a, -.pagination > li.disabled > a:hover, -.pagination > li.disabled > span, -.pagination > li.disabled > span:hover { - -webkit-filter: none; - filter: none; -} -.pager > li > a { - -webkit-filter: none; - filter: none; - text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.3); -} -.pager > li > a:hover { - -webkit-filter: none; - filter: none; -} -.pager > li.disabled > a, -.pager > li.disabled > a:hover { - -webkit-filter: none; - filter: none; -} -.breadcrumb { - border: 1px solid rgba(0, 0, 0, 0.6); - text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.3); - -webkit-filter: none; - filter: none; -} -.alert .alert-link, -.alert a { - color: #fff; - text-decoration: underline; -} -.alert .close { - color: #000000; - text-decoration: none; -} -a.thumbnail:hover, -a.thumbnail:focus, -a.thumbnail.active { - border-color: #0c0d0e; -} -a.list-group-item.active, -a.list-group-item.active:hover, -a.list-group-item.active:focus { - border-color: rgba(0, 0, 0, 0.6); -} -a.list-group-item-success.active { - background-color: #62c462; -} -a.list-group-item-success.active:hover, -a.list-group-item-success.active:focus { - background-color: #4fbd4f; -} -a.list-group-item-warning.active { - background-color: #f89406; -} -a.list-group-item-warning.active:hover, -a.list-group-item-warning.active:focus { - background-color: #df8505; -} -a.list-group-item-danger.active { - background-color: #ee5f5b; -} -a.list-group-item-danger.active:hover, -a.list-group-item-danger.active:focus { - background-color: #ec4844; -} -.jumbotron { - border: 1px solid rgba(0, 0, 0, 0.6); -} -.panel-primary .panel-heading, -.panel-success .panel-heading, -.panel-danger .panel-heading, -.panel-warning .panel-heading, -.panel-info .panel-heading { - border-color: #000; -} diff --git a/web/css/bootstrap-slider-10.0.0.min.css b/web/css/bootstrap-slider-10.0.0.min.css deleted file mode 100644 index 1cf68b5e7..000000000 --- a/web/css/bootstrap-slider-10.0.0.min.css +++ /dev/null @@ -1,41 +0,0 @@ -/*! ======================================================= - VERSION 10.0.0 -========================================================= */ -/*! ========================================================= - * bootstrap-slider.js - * - * Maintainers: - * Kyle Kemp - * - Twitter: @seiyria - * - Github: seiyria - * Rohit Kalkur - * - Twitter: @Rovolutionary - * - Github: rovolution - * - * ========================================================= - * - * bootstrap-slider is released under the MIT License - * Copyright (c) 2017 Kyle Kemp, Rohit Kalkur, and contributors - * - * Permission is hereby granted, free of charge, to any person - * obtaining a copy of this software and associated documentation - * files (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following - * conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * ========================================================= */.slider{display:inline-block;vertical-align:middle;position:relative}.slider.slider-horizontal{width:210px;height:20px}.slider.slider-horizontal .slider-track{height:10px;width:100%;margin-top:-5px;top:50%;left:0}.slider.slider-horizontal .slider-selection,.slider.slider-horizontal .slider-track-low,.slider.slider-horizontal .slider-track-high{height:100%;top:0;bottom:0}.slider.slider-horizontal .slider-tick,.slider.slider-horizontal .slider-handle{margin-left:-10px}.slider.slider-horizontal .slider-tick.triangle,.slider.slider-horizontal .slider-handle.triangle{position:relative;top:50%;-ms-transform:translateY(-50%);transform:translateY(-50%);border-width:0 10px 10px 10px;width:0;height:0;border-bottom-color:#2e6da4;margin-top:0}.slider.slider-horizontal .slider-tick-container{white-space:nowrap;position:absolute;top:0;left:0;width:100%}.slider.slider-horizontal .slider-tick-label-container{white-space:nowrap;margin-top:20px}.slider.slider-horizontal .slider-tick-label-container .slider-tick-label{padding-top:4px;display:inline-block;text-align:center}.slider.slider-horizontal .tooltip{-ms-transform:translateX(-50%);transform:translateX(-50%)}.slider.slider-horizontal.slider-rtl .slider-track{left:initial;right:0}.slider.slider-horizontal.slider-rtl .slider-tick,.slider.slider-horizontal.slider-rtl .slider-handle{margin-left:initial;margin-right:-10px}.slider.slider-horizontal.slider-rtl .slider-tick-container{left:initial;right:0}.slider.slider-horizontal.slider-rtl .tooltip{-ms-transform:translateX(50%);transform:translateX(50%)}.slider.slider-vertical{height:210px;width:20px}.slider.slider-vertical .slider-track{width:10px;height:100%;left:25%;top:0}.slider.slider-vertical .slider-selection{width:100%;left:0;top:0;bottom:0}.slider.slider-vertical .slider-track-low,.slider.slider-vertical .slider-track-high{width:100%;left:0;right:0}.slider.slider-vertical .slider-tick,.slider.slider-vertical .slider-handle{margin-top:-10px}.slider.slider-vertical .slider-tick.triangle,.slider.slider-vertical .slider-handle.triangle{border-width:10px 0 10px 10px;width:1px;height:1px;border-left-color:#2e6da4;border-right-color:#2e6da4;margin-left:0;margin-right:0}.slider.slider-vertical .slider-tick-label-container{white-space:nowrap}.slider.slider-vertical .slider-tick-label-container .slider-tick-label{padding-left:4px}.slider.slider-vertical .tooltip{-ms-transform:translateY(-50%);transform:translateY(-50%)}.slider.slider-vertical.slider-rtl .slider-track{left:initial;right:25%}.slider.slider-vertical.slider-rtl .slider-selection{left:initial;right:0}.slider.slider-vertical.slider-rtl .slider-tick.triangle,.slider.slider-vertical.slider-rtl .slider-handle.triangle{border-width:10px 10px 10px 0}.slider.slider-vertical.slider-rtl .slider-tick-label-container .slider-tick-label{padding-left:initial;padding-right:4px}.slider.slider-disabled .slider-handle{background-image:-webkit-linear-gradient(top,#dfdfdf 0,#bebebe 100%);background-image:-o-linear-gradient(top,#dfdfdf 0,#bebebe 100%);background-image:linear-gradient(to bottom,#dfdfdf 0,#bebebe 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdfdfdf',endColorstr='#ffbebebe',GradientType=0)}.slider.slider-disabled .slider-track{background-image:-webkit-linear-gradient(top,#e5e5e5 0,#e9e9e9 100%);background-image:-o-linear-gradient(top,#e5e5e5 0,#e9e9e9 100%);background-image:linear-gradient(to bottom,#e5e5e5 0,#e9e9e9 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe5e5e5',endColorstr='#ffe9e9e9',GradientType=0);cursor:not-allowed}.slider input{display:none}.slider .tooltip.top{margin-top:-36px}.slider .tooltip-inner{white-space:nowrap;max-width:none}.slider .hide{display:none}.slider-track{position:absolute;cursor:pointer;background-image:-webkit-linear-gradient(top,#f5f5f5 0,#f9f9f9 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#f9f9f9 100%);background-image:linear-gradient(to bottom,#f5f5f5 0,#f9f9f9 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5',endColorstr='#fff9f9f9',GradientType=0);-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);border-radius:4px}.slider-selection{position:absolute;background-image:-webkit-linear-gradient(top,#f9f9f9 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#f9f9f9 0,#f5f5f5 100%);background-image:linear-gradient(to bottom,#f9f9f9 0,#f5f5f5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff9f9f9',endColorstr='#fff5f5f5',GradientType=0);-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;border-radius:4px}.slider-selection.tick-slider-selection{background-image:-webkit-linear-gradient(top,#8ac1ef 0,#82b3de 100%);background-image:-o-linear-gradient(top,#8ac1ef 0,#82b3de 100%);background-image:linear-gradient(to bottom,#8ac1ef 0,#82b3de 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff8ac1ef',endColorstr='#ff82b3de',GradientType=0)}.slider-track-low,.slider-track-high{position:absolute;background:transparent;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;border-radius:4px}.slider-handle{position:absolute;top:0;width:20px;height:20px;background-color:#337ab7;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7',endColorstr='#ff2e6da4',GradientType=0);filter:none;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.2),0 1px 2px rgba(0,0,0,.05);box-shadow:inset 0 1px 0 rgba(255,255,255,.2),0 1px 2px rgba(0,0,0,.05);border:0 solid transparent}.slider-handle.round{border-radius:50%}.slider-handle.triangle{background:transparent none}.slider-handle.custom{background:transparent none}.slider-handle.custom::before{line-height:20px;font-size:20px;content:'\2605';color:#726204}.slider-tick{position:absolute;width:20px;height:20px;background-image:-webkit-linear-gradient(top,#f9f9f9 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#f9f9f9 0,#f5f5f5 100%);background-image:linear-gradient(to bottom,#f9f9f9 0,#f5f5f5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff9f9f9',endColorstr='#fff5f5f5',GradientType=0);-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;filter:none;opacity:.8;border:0 solid transparent}.slider-tick.round{border-radius:50%}.slider-tick.triangle{background:transparent none}.slider-tick.custom{background:transparent none}.slider-tick.custom::before{line-height:20px;font-size:20px;content:'\2605';color:#726204}.slider-tick.in-selection{background-image:-webkit-linear-gradient(top,#8ac1ef 0,#82b3de 100%);background-image:-o-linear-gradient(top,#8ac1ef 0,#82b3de 100%);background-image:linear-gradient(to bottom,#8ac1ef 0,#82b3de 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff8ac1ef',endColorstr='#ff82b3de',GradientType=0);opacity:1} \ No newline at end of file diff --git a/web/css/bootstrap-theme-3.3.7.min.css b/web/css/bootstrap-theme-3.3.7.min.css deleted file mode 100644 index 5e3940195..000000000 --- a/web/css/bootstrap-theme-3.3.7.min.css +++ /dev/null @@ -1,6 +0,0 @@ -/*! - * Bootstrap v3.3.7 (http://getbootstrap.com) - * Copyright 2011-2016 Twitter, Inc. - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) - */.btn-danger,.btn-default,.btn-info,.btn-primary,.btn-success,.btn-warning{text-shadow:0 -1px 0 rgba(0,0,0,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075)}.btn-danger.active,.btn-danger:active,.btn-default.active,.btn-default:active,.btn-info.active,.btn-info:active,.btn-primary.active,.btn-primary:active,.btn-success.active,.btn-success:active,.btn-warning.active,.btn-warning:active{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-danger.disabled,.btn-danger[disabled],.btn-default.disabled,.btn-default[disabled],.btn-info.disabled,.btn-info[disabled],.btn-primary.disabled,.btn-primary[disabled],.btn-success.disabled,.btn-success[disabled],.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-danger,fieldset[disabled] .btn-default,fieldset[disabled] .btn-info,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-success,fieldset[disabled] .btn-warning{-webkit-box-shadow:none;box-shadow:none}.btn-danger .badge,.btn-default .badge,.btn-info .badge,.btn-primary .badge,.btn-success .badge,.btn-warning .badge{text-shadow:none}.btn.active,.btn:active{background-image:none}.btn-default{text-shadow:0 1px 0 #fff;background-image:-webkit-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-o-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#e0e0e0));background-image:linear-gradient(to bottom,#fff 0,#e0e0e0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#dbdbdb;border-color:#ccc}.btn-default:focus,.btn-default:hover{background-color:#e0e0e0;background-position:0 -15px}.btn-default.active,.btn-default:active{background-color:#e0e0e0;border-color:#dbdbdb}.btn-default.disabled,.btn-default.disabled.active,.btn-default.disabled.focus,.btn-default.disabled:active,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled],.btn-default[disabled].active,.btn-default[disabled].focus,.btn-default[disabled]:active,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default,fieldset[disabled] .btn-default.active,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:active,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#e0e0e0;background-image:none}.btn-primary{background-image:-webkit-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-o-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#265a88));background-image:linear-gradient(to bottom,#337ab7 0,#265a88 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#245580}.btn-primary:focus,.btn-primary:hover{background-color:#265a88;background-position:0 -15px}.btn-primary.active,.btn-primary:active{background-color:#265a88;border-color:#245580}.btn-primary.disabled,.btn-primary.disabled.active,.btn-primary.disabled.focus,.btn-primary.disabled:active,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled],.btn-primary[disabled].active,.btn-primary[disabled].focus,.btn-primary[disabled]:active,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-primary.active,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:active,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#265a88;background-image:none}.btn-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#419641));background-image:linear-gradient(to bottom,#5cb85c 0,#419641 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#3e8f3e}.btn-success:focus,.btn-success:hover{background-color:#419641;background-position:0 -15px}.btn-success.active,.btn-success:active{background-color:#419641;border-color:#3e8f3e}.btn-success.disabled,.btn-success.disabled.active,.btn-success.disabled.focus,.btn-success.disabled:active,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled],.btn-success[disabled].active,.btn-success[disabled].focus,.btn-success[disabled]:active,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success,fieldset[disabled] .btn-success.active,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:active,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#419641;background-image:none}.btn-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#2aabd2));background-image:linear-gradient(to bottom,#5bc0de 0,#2aabd2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#28a4c9}.btn-info:focus,.btn-info:hover{background-color:#2aabd2;background-position:0 -15px}.btn-info.active,.btn-info:active{background-color:#2aabd2;border-color:#28a4c9}.btn-info.disabled,.btn-info.disabled.active,.btn-info.disabled.focus,.btn-info.disabled:active,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled],.btn-info[disabled].active,.btn-info[disabled].focus,.btn-info[disabled]:active,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info,fieldset[disabled] .btn-info.active,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:active,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#2aabd2;background-image:none}.btn-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#eb9316));background-image:linear-gradient(to bottom,#f0ad4e 0,#eb9316 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#e38d13}.btn-warning:focus,.btn-warning:hover{background-color:#eb9316;background-position:0 -15px}.btn-warning.active,.btn-warning:active{background-color:#eb9316;border-color:#e38d13}.btn-warning.disabled,.btn-warning.disabled.active,.btn-warning.disabled.focus,.btn-warning.disabled:active,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled],.btn-warning[disabled].active,.btn-warning[disabled].focus,.btn-warning[disabled]:active,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning,fieldset[disabled] .btn-warning.active,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:active,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#eb9316;background-image:none}.btn-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c12e2a));background-image:linear-gradient(to bottom,#d9534f 0,#c12e2a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#b92c28}.btn-danger:focus,.btn-danger:hover{background-color:#c12e2a;background-position:0 -15px}.btn-danger.active,.btn-danger:active{background-color:#c12e2a;border-color:#b92c28}.btn-danger.disabled,.btn-danger.disabled.active,.btn-danger.disabled.focus,.btn-danger.disabled:active,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled],.btn-danger[disabled].active,.btn-danger[disabled].focus,.btn-danger[disabled]:active,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger,fieldset[disabled] .btn-danger.active,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:active,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#c12e2a;background-image:none}.img-thumbnail,.thumbnail{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{background-color:#e8e8e8;background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{background-color:#2e6da4;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.navbar-default{background-image:-webkit-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-o-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#f8f8f8));background-image:linear-gradient(to bottom,#fff 0,#f8f8f8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-radius:4px;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075)}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-o-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dbdbdb),to(#e2e2e2));background-image:linear-gradient(to bottom,#dbdbdb 0,#e2e2e2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.075);box-shadow:inset 0 3px 9px rgba(0,0,0,.075)}.navbar-brand,.navbar-nav>li>a{text-shadow:0 1px 0 rgba(255,255,255,.25)}.navbar-inverse{background-image:-webkit-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-o-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#3c3c3c),to(#222));background-image:linear-gradient(to bottom,#3c3c3c 0,#222 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-radius:4px}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-o-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#080808),to(#0f0f0f));background-image:linear-gradient(to bottom,#080808 0,#0f0f0f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.25);box-shadow:inset 0 3px 9px rgba(0,0,0,.25)}.navbar-inverse .navbar-brand,.navbar-inverse .navbar-nav>li>a{text-shadow:0 -1px 0 rgba(0,0,0,.25)}.navbar-fixed-bottom,.navbar-fixed-top,.navbar-static-top{border-radius:0}@media (max-width:767px){.navbar .navbar-nav .open .dropdown-menu>.active>a,.navbar .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}}.alert{text-shadow:0 1px 0 rgba(255,255,255,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05);box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05)}.alert-success{background-image:-webkit-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#c8e5bc));background-image:linear-gradient(to bottom,#dff0d8 0,#c8e5bc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);background-repeat:repeat-x;border-color:#b2dba1}.alert-info{background-image:-webkit-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#b9def0));background-image:linear-gradient(to bottom,#d9edf7 0,#b9def0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);background-repeat:repeat-x;border-color:#9acfea}.alert-warning{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#f8efc0));background-image:linear-gradient(to bottom,#fcf8e3 0,#f8efc0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);background-repeat:repeat-x;border-color:#f5e79e}.alert-danger{background-image:-webkit-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-o-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#e7c3c3));background-image:linear-gradient(to bottom,#f2dede 0,#e7c3c3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);background-repeat:repeat-x;border-color:#dca7a7}.progress{background-image:-webkit-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#ebebeb),to(#f5f5f5));background-image:linear-gradient(to bottom,#ebebeb 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x}.progress-bar{background-image:-webkit-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-o-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#286090));background-image:linear-gradient(to bottom,#337ab7 0,#286090 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);background-repeat:repeat-x}.progress-bar-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#449d44));background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);background-repeat:repeat-x}.progress-bar-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#31b0d5));background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);background-repeat:repeat-x}.progress-bar-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#ec971f));background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);background-repeat:repeat-x}.progress-bar-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c9302c));background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);background-repeat:repeat-x}.progress-bar-striped{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.list-group{border-radius:4px;-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{text-shadow:0 -1px 0 #286090;background-image:-webkit-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2b669a));background-image:linear-gradient(to bottom,#337ab7 0,#2b669a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);background-repeat:repeat-x;border-color:#2b669a}.list-group-item.active .badge,.list-group-item.active:focus .badge,.list-group-item.active:hover .badge{text-shadow:none}.panel{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.05);box-shadow:0 1px 2px rgba(0,0,0,.05)}.panel-default>.panel-heading{background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.panel-primary>.panel-heading{background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.panel-success>.panel-heading{background-image:-webkit-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#d0e9c6));background-image:linear-gradient(to bottom,#dff0d8 0,#d0e9c6 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);background-repeat:repeat-x}.panel-info>.panel-heading{background-image:-webkit-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#c4e3f3));background-image:linear-gradient(to bottom,#d9edf7 0,#c4e3f3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);background-repeat:repeat-x}.panel-warning>.panel-heading{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#faf2cc));background-image:linear-gradient(to bottom,#fcf8e3 0,#faf2cc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);background-repeat:repeat-x}.panel-danger>.panel-heading{background-image:-webkit-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-o-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#ebcccc));background-image:linear-gradient(to bottom,#f2dede 0,#ebcccc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);background-repeat:repeat-x}.well{background-image:-webkit-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#e8e8e8),to(#f5f5f5));background-image:linear-gradient(to bottom,#e8e8e8 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x;border-color:#dcdcdc;-webkit-box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1)} -/*# sourceMappingURL=bootstrap-theme.min.css.map */ \ No newline at end of file diff --git a/web/css/bootstrap-toggle-2.2.2.min.css b/web/css/bootstrap-toggle-2.2.2.min.css deleted file mode 100644 index 0d42ed09c..000000000 --- a/web/css/bootstrap-toggle-2.2.2.min.css +++ /dev/null @@ -1,28 +0,0 @@ -/*! ======================================================================== - * Bootstrap Toggle: bootstrap-toggle.css v2.2.0 - * http://www.bootstraptoggle.com - * ======================================================================== - * Copyright 2014 Min Hur, The New York Times Company - * Licensed under MIT - * ======================================================================== */ -.checkbox label .toggle,.checkbox-inline .toggle{margin-left:-20px;margin-right:5px} -.toggle{position:relative;overflow:hidden} -.toggle input[type=checkbox]{display:none} -.toggle-group{position:absolute;width:200%;top:0;bottom:0;left:0;transition:left .35s;-webkit-transition:left .35s;-moz-user-select:none;-webkit-user-select:none} -.toggle.off .toggle-group{left:-100%} -.toggle-on{position:absolute;top:0;bottom:0;left:0;right:50%;margin:0;border:0;border-radius:0} -.toggle-off{position:absolute;top:0;bottom:0;left:50%;right:0;margin:0;border:0;border-radius:0} -.toggle-handle{position:relative;margin:0 auto;padding-top:0;padding-bottom:0;height:100%;width:0;border-width:0 1px} -.toggle.btn{min-width:59px;min-height:34px} -.toggle-on.btn{padding-right:24px} -.toggle-off.btn{padding-left:24px} -.toggle.btn-lg{min-width:79px;min-height:45px} -.toggle-on.btn-lg{padding-right:31px} -.toggle-off.btn-lg{padding-left:31px} -.toggle-handle.btn-lg{width:40px} -.toggle.btn-sm{min-width:50px;min-height:30px} -.toggle-on.btn-sm{padding-right:20px} -.toggle-off.btn-sm{padding-left:20px} -.toggle.btn-xs{min-width:35px;min-height:22px} -.toggle-on.btn-xs{padding-right:12px} -.toggle-off.btn-xs{padding-left:12px} \ No newline at end of file diff --git a/web/css/c3-0.4.18.min.css b/web/css/c3-0.4.18.min.css deleted file mode 100644 index 61ae63a03..000000000 --- a/web/css/c3-0.4.18.min.css +++ /dev/null @@ -1 +0,0 @@ -.c3 svg{font:10px sans-serif;-webkit-tap-highlight-color:transparent}.c3 line,.c3 path{fill:none;stroke:#000}.c3 text{-webkit-user-select:none;-moz-user-select:none;user-select:none}.c3-bars path,.c3-event-rect,.c3-legend-item-tile,.c3-xgrid-focus,.c3-ygrid{shape-rendering:crispEdges}.c3-chart-arc path{stroke:#fff}.c3-chart-arc text{fill:#fff;font-size:13px}.c3-grid line{stroke:#aaa}.c3-grid text{fill:#aaa}.c3-xgrid,.c3-ygrid{stroke-dasharray:3 3}.c3-text.c3-empty{fill:grey;font-size:2em}.c3-line{stroke-width:1px}.c3-circle._expanded_{stroke-width:1px;stroke:#fff}.c3-selected-circle{fill:#fff;stroke-width:2px}.c3-bar{stroke-width:0}.c3-bar._expanded_{fill-opacity:1;fill-opacity:.75}.c3-target.c3-focused{opacity:1}.c3-target.c3-focused path.c3-line,.c3-target.c3-focused path.c3-step{stroke-width:2px}.c3-target.c3-defocused{opacity:.3!important}.c3-region{fill:#4682b4;fill-opacity:.1}.c3-brush .extent{fill-opacity:.1}.c3-legend-item{font-size:12px}.c3-legend-item-hidden{opacity:.15}.c3-legend-background{opacity:.75;fill:#fff;stroke:#d3d3d3;stroke-width:1}.c3-title{font:14px sans-serif}.c3-tooltip-container{z-index:10}.c3-tooltip{border-collapse:collapse;border-spacing:0;background-color:#fff;empty-cells:show;-webkit-box-shadow:7px 7px 12px -9px #777;-moz-box-shadow:7px 7px 12px -9px #777;box-shadow:7px 7px 12px -9px #777;opacity:.9}.c3-tooltip tr{border:1px solid #ccc}.c3-tooltip th{background-color:#aaa;font-size:14px;padding:2px 5px;text-align:left;color:#fff}.c3-tooltip td{font-size:13px;padding:3px 6px;background-color:#fff;border-left:1px dotted #999}.c3-tooltip td>span{display:inline-block;width:10px;height:10px;margin-right:6px}.c3-tooltip td.value{text-align:right}.c3-area{stroke-width:0;opacity:.2}.c3-chart-arcs-title{dominant-baseline:middle;font-size:1.3em}.c3-chart-arcs .c3-chart-arcs-background{fill:#e0e0e0;stroke:none}.c3-chart-arcs .c3-chart-arcs-gauge-unit{fill:#000;font-size:16px}.c3-chart-arcs .c3-chart-arcs-gauge-max{fill:#777}.c3-chart-arcs .c3-chart-arcs-gauge-min{fill:#777}.c3-chart-arc .c3-gauge-value{fill:#000}.c3-chart-arc.c3-target g path{opacity:1}.c3-chart-arc.c3-target.c3-focused g path{opacity:1} \ No newline at end of file diff --git a/web/css/morris-0.5.1.css b/web/css/morris-0.5.1.css deleted file mode 100644 index 209f09156..000000000 --- a/web/css/morris-0.5.1.css +++ /dev/null @@ -1,2 +0,0 @@ -.morris-hover{position:absolute;z-index:1000}.morris-hover.morris-default-style{border-radius:10px;padding:6px;color:#666;background:rgba(255,255,255,0.8);border:solid 2px rgba(230,230,230,0.8);font-family:sans-serif;font-size:12px;text-align:center}.morris-hover.morris-default-style .morris-hover-row-label{font-weight:bold;margin:0.25em 0} -.morris-hover.morris-default-style .morris-hover-point{white-space:nowrap;margin:0.1em 0} diff --git a/web/dashboard.css b/web/dashboard.css deleted file mode 100644 index 80062a272..000000000 --- a/web/dashboard.css +++ /dev/null @@ -1,738 +0,0 @@ -html, -body { - /*font-family: Calibri,"Segoe UI","Helvetica Neue",Helvetica,Arial,sans-serif;*/ - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-style: normal; - font-variant: normal; -} - -.morelink { - color: #765d9c; - text-decoration: none; -} - -.morelink:hover { - color: #563d7c; - text-decoration: none; -} - -.morelink:focus { - color: #765d9c; - text-decoration: none; -} - -.netdata-chart-alignment { - margin-left: 55px; -} - -.netdata-chart-row { - width: 100%; - text-align: center; - display: flex; - display: -webkit-flex; - display: -moz-flex; - align-items: baseline; - -moz-align-items: baseline; - -webkit-align-items: baseline; - justify-content: center; - -webkit-justify-content: center; - -moz-justify-content: center; - padding-top: 10px; -} - -.netdata-container { - display: inline-block; - overflow: hidden; - - transform: translate3d(0,0,0); - - /* required for child elements to have absolute position */ - position: relative; - - /* width and height is given per chart with data-width and data-height */ -} - -.netdata-container-gauge { - display: inline-block; - overflow: hidden; - - transform: translate3d(0,0,0); - - /* required for child elements to have absolute position */ - position: relative; - - /* width and height is given per chart with data-width and data-height */ -} - -.netdata-container-gauge:after { - padding-top: 60%; - display: block; - content: ''; -} - -.netdata-container-easypiechart { - display: inline-block; - overflow: hidden; - - transform: translate3d(0,0,0); - - /* required for child elements to have absolute position */ - position: relative; - - /* width and height is given per chart with data-width and data-height */ -} - -.netdata-container-easypiechart:after { - padding-top: 100%; - display: block; - content: ''; -} - -.netdata-aspect { - position: relative; - width: 100%; - padding: 0px; - margin: 0px; -} - -.netdata-container-with-legend { - display: inline-block; - overflow: hidden; - - transform: translate3d(0,0,0); - - /* fix minimum scrollbar issue in firefox */ - min-height: 99px; - - /* required for child elements to have absolute position */ - position: relative; - - /* width and height is given per chart with data-width and data-height */ -} - -.netdata-legend-resize-handler { - display: block; - position: absolute; - bottom: 0px; - right: 0px; - height: 15px; - width: 20px; - background-color: White; - font-size: 15px; - vertical-align: middle; - line-height: 15px; - cursor: ns-resize; - color: #DDDDDD; - text-align: center; - overflow: hidden; - z-index: 20; - padding: 0px; - margin: 0px; -} - -.netdata-legend-toolbox { - display: block; - position: absolute; - bottom: 0px; - right: 30px; - height: 15px; - width: 110px; - background-color: White; - font-size: 12px; - vertical-align: middle; - line-height: 15px; - color: #DDDDDD; - text-align: center; - overflow: hidden; - z-index: 20; - padding: 0px; - margin: 0px; - - /* prevent text selection after double click */ - -webkit-user-select: none; /* webkit (safari, chrome) browsers */ - -moz-user-select: none; /* mozilla browsers */ - -khtml-user-select: none; /* webkit (konqueror) browsers */ - -ms-user-select: none; /* IE10+ */ -} - -.netdata-legend-toolbox-button { - display: inline-block; - position: relative; - height: 15px; - width: 18px; - background-color: White; - font-size: 12px; - vertical-align: middle; - line-height: 15px; - color: #CDCDCD; - text-align: center; - overflow: hidden; - z-index: 21; - padding: 0px; - margin: 0px; - cursor: pointer; - - /* prevent text selection after double click */ - -webkit-user-select: none; /* webkit (safari, chrome) browsers */ - -moz-user-select: none; /* mozilla browsers */ - -khtml-user-select: none; /* webkit (konqueror) browsers */ - -ms-user-select: none; /* IE10+ */ -} - -.netdata-message { - display: inline-block; - position: absolute; - top: 0; - left: 0; - right: 0; - bottom: 0; - text-align: left; - vertical-align: top; - font-weight: bold; - font-size: x-small; - overflow: hidden; - background: inherit; - z-index: 0; -} - -.netdata-message.hidden { - display: none; -} - -.netdata-message.icon { - color: #F8F8F8; - text-align: center; - vertical-align: middle; -} - -.netdata-chart-legend { - position: absolute; /* within .netdata-container */ - top: 0; - right: 0; - overflow: hidden; - text-overflow: ellipsis; - line-height: 14px; - display: block; - width: 140px; /* --legend-width */ - height: calc(100% - 15px); /* 10px for the resize handler and 5px for the top margin */ - font-size: 10px; - margin-top: 5px; - text-align: left; - /* width and height is calculated (depends on the appearance of the legend) */ -} - -.netdata-legend-title-date { - font-size: 10px; - font-weight: normal; - margin-top: 0px; - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; -} - -.netdata-legend-title-time { - font-size: 11px; - font-weight: bold; - margin-top: 0px; - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; -} - -.netdata-legend-title-units { - position: absolute; - right: 10px; - float: right; - font-size: 11px; - vertical-align: top; - font-weight: normal; - margin-top: 0px; - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; -} - -.netdata-legend-series { - position: absolute; - width: 140px; /* legend-width */ - height: calc(100% - 50px); - overflow: hidden; - text-overflow: ellipsis; - line-height: 14.5px; /* line spacing at the legend */ - display: block; - font-size: 10px; - margin-top: 0px; -} - -.netdata-legend-name-table-line { - display: inline-block; - width: 13px; - height: 4px; - border-width: 0px; - border-bottom-width: 2px; - border-bottom-style: solid; - border-bottom-color: white; -} - -.netdata-legend-name-table-area { - display: inline-block; - width: 13px; - height: 5px; - border-width: 1px; - border-top-width: 1px; - border-top-style: solid; - border-top-color: inherit; -} - -.netdata-legend-name-table-stacked { - display: inline-block; - width: 13px; - height: 5px; - border-width: 1px; - border-top-width: 1px; - border-top-style: solid; - border-top-color: inherit; -} - -.netdata-legend-name-tr { -} - -.netdata-legend-name-td { -} - -.netdata-legend-name { - text-align: left; - font-size: 11px; /* legend: dimension name size */ - font-weight: bold; - vertical-align: bottom; - margin-top: 0px; - z-index: 9; - padding: 0px; - width: 80px !important; - max-width: 80px !important; - text-overflow: ellipsis; - overflow: hidden; - white-space: nowrap; - display: inline-block; - cursor: pointer; - -webkit-print-color-adjust: exact; -} - -.netdata-legend-value { - /*margin-left: 14px;*/ - position: absolute; - right: 10px; - float: right; - text-align: right; - font-size: 11px; /* legend: dimension value size */ - font-weight: bold; - vertical-align: bottom; - background-color: White; - margin-top: 0px; - z-index: 10; - padding: 0px; - padding-left: 15px; - cursor: pointer; - /* -webkit-font-smoothing: none; */ -} - -.netdata-legend-name.not-selected { - font-weight: normal; - opacity: 0.3; -} - -.netdata-chart { - position: absolute; /* within .netdata-container */ - top: 0; /* within .netdata-container */ - left: 0; /* within .netdata-container */ - display: inline-block; - overflow: hidden; - width: 100%; - height: 100%; - z-index: 5; - - /* width and height is calculated (depends on the appearance of the legend) */ -} - -.netdata-chart-with-legend-right { - position: absolute; /* within .netdata-container */ - top: 0; /* within .netdata-container */ - left: 0; /* within .netdata-container */ - display: block; - overflow: hidden; - margin-right: 140px; /* --legend-width */ - width: calc(100% - 140px); /* --legend-width */ - height: 100%; - z-index: 5; - flex-grow: 1; - - /* width and height is calculated (depends on the appearance of the legend) */ -} - -.netdata-peity-chart { - -} - -.netdata-sparkline-chart { - -} - -.netdata-dygraph-chart { - -} - -.netdata-morris-chart { - -} - -.netdata-google-chart { - -} - -.dygraph-ylabel { -} - -.dygraph-axis-label-x { - overflow-x: hidden; -} - -.dygraph-label-rotate-left { - text-align: center; - /* See http://caniuse.com/#feat=transforms2d */ - transform: rotate(90deg); - -webkit-transform: rotate(90deg); - -moz-transform: rotate(90deg); - -o-transform: rotate(90deg); - -ms-transform: rotate(90deg); -} - -/* For y2-axis label */ -.dygraph-label-rotate-right { - text-align: center; - /* See http://caniuse.com/#feat=transforms2d */ - transform: rotate(-90deg); - -webkit-transform: rotate(-90deg); - -moz-transform: rotate(-90deg); - -o-transform: rotate(-90deg); - -ms-transform: rotate(-90deg); -} - -.dygraph-title { - text-indent: 56px; - text-align: left; - position: absolute; - left: 0px; - top: 4px; - font-size: 11px; - font-weight: bold; - text-overflow: ellipsis; - overflow: hidden; - white-space: nowrap; -} - -/* fix for sparkline tooltip under bootstrap */ -.jqstooltip { - width: auto !important; - height: auto !important; -} - -.easyPieChart { - position: relative; - text-align: center; -} - -.easyPieChart canvas { - position: absolute; - top: 0; - left: 0; -} - -.easyPieChartLabel { - display: inline-block; - position: absolute; - float: left; - left: 0; - width: 100%; - text-align: center; - color: #666; - font-weight: normal; - text-shadow: #BBB 0px 0px 1px; - /* -webkit-font-smoothing: none; */ -} - -.easyPieChartTitle { - display: inline-block; - position: absolute; - float: left; - left: 0; - width: 64%; - margin-left: 18% !important; - text-align: center; - color: #999999; - font-weight: bold; -} - -.easyPieChartUnits { - display: inline-block; - position: absolute; - float: left; - left: 0; - width: 60%; - margin-left: 20% !important; - text-align: center; - color: #999999; - font-weight: normal; -} - -.gaugeChart { - position: relative; - text-align: center; -} - -.gaugeChart canvas { - position: absolute; - top: 0; - left: 0; - bottom: 0; - right: 0; - z-index: 0; -} - -.gaugeChartLabel { - display: inline-block; - position: absolute; - float: left; - left: 0; - width: 100%; - text-align: center; - color: #FFFFFF; - font-weight: bold; - z-index: 1; - text-shadow: #777 0px 0px 1px; - /* text-shadow: #CCC 1px 1px 0px, #CCC -1px -1px 0px, #CCC 1px -1px 0px, #CCC -1px 1px 0px; */ - /* -webkit-text-stroke: 1px #777; */ - /* -webkit-font-smoothing: none; */ -} - -.gaugeChartTitle { - display: inline-block; - position: absolute; - float: left; - left: 0; - width: 100%; - text-align: center; - color: #999999; - font-weight: bold; -} - -.gaugeChartUnits { - display: inline-block; - position: absolute; - float: left; - left: 0; - bottom: 0; - width: 100%; - text-align: left; - margin-left: 5%; - color: #999999; - font-weight: normal; -} - -.gaugeChartMin { - display: inline-block; - position: absolute; - float: left; - left: 0; - bottom: 8%; - width: 92%; - margin-left: 8%; - text-align: left; - color: #999999; - font-weight: normal; -} - -.gaugeChartMax { - display: inline-block; - position: absolute; - float: left; - left: 0; - bottom: 8%; - width: 95%; - margin-right: 5%; - text-align: right; - color: #999999; - font-weight: normal; -} - -.popover-title { - font-weight: bold; - font-size: 12px; -} - -.popover-content { - font-size: 11px; -} - -/* ---------------------------------------------------------------------------- - perfect-scrollbar settings - */ - -.ps-container { - -ms-touch-action: auto; - touch-action: auto; - overflow: hidden !important; - -ms-overflow-style: none; -} - -@supports (-ms-overflow-style: none) { - .ps-container { - overflow: auto !important; - } -} - -@media screen and (-ms-high-contrast: active), (-ms-high-contrast: none) { - .ps-container { - overflow: auto !important; - } -} - -.ps-container.ps-active-x > .ps-scrollbar-x-rail, -.ps-container.ps-active-y > .ps-scrollbar-y-rail { - display: block; - background-color: transparent; -} - -.ps-container.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail { - background-color: transparent; /* background color when dragged away */ - opacity: 0.9; -} - -.ps-container.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail > .ps-scrollbar-x { - background-color: #aaa; /* scrollbar color when dragged away */ - height: 5px; -} - -.ps-container.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail { - background-color: transparent; /* background color when dragged away */ - opacity: 0.9; -} - -.ps-container.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail > .ps-scrollbar-y { - background-color: #aaa; /* scrollbar color when dragged away */ - width: 5px; -} - -.ps-container > .ps-scrollbar-x-rail { - display: none; - position: absolute; - /* please don't change 'position' */ - opacity: 0.2; /* the opacity when not on hover of the content */ - -webkit-transition: background-color .2s linear, opacity .2s linear; - -o-transition: background-color .2s linear, opacity .2s linear; - -moz-transition: background-color .2s linear, opacity .2s linear; - transition: background-color .2s linear, opacity .2s linear; - bottom: 0px; - /* there must be 'bottom' for ps-scrollbar-x-rail */ - height: 15px; -} - -.ps-container > .ps-scrollbar-x-rail > .ps-scrollbar-x { - position: absolute; - /* please don't change 'position' */ - background-color: #666; /* #aaa; the color on content hover */ - -webkit-border-radius: 6px; - -moz-border-radius: 6px; - border-radius: 6px; - -webkit-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out; - transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out; - -o-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out; - -moz-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out; - transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out; - transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -webkit-border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out; - bottom: 2px; - /* there must be 'bottom' for ps-scrollbar-x */ - height: 5px; /* the width of the scrollbar */ -} - -.ps-container > .ps-scrollbar-x-rail:hover > .ps-scrollbar-x, .ps-container > .ps-scrollbar-x-rail:active > .ps-scrollbar-x { - height: 4px; -} - -.ps-container > .ps-scrollbar-y-rail { - display: none; - position: absolute; - /* please don't change 'position' */ - opacity: 0.2; /* the opacity when not on hover of the content */ - -webkit-transition: background-color .2s linear, opacity .2s linear; - -o-transition: background-color .2s linear, opacity .2s linear; - -moz-transition: background-color .2s linear, opacity .2s linear; - transition: background-color .2s linear, opacity .2s linear; - right: 0; - /* there must be 'right' for ps-scrollbar-y-rail */ - width: 15px; -} - -.ps-container > .ps-scrollbar-y-rail > .ps-scrollbar-y { - position: absolute; - /* please don't change 'position' */ - background-color: #666; /* #aaa; the color on content hover */ - -webkit-border-radius: 6px; - -moz-border-radius: 6px; - border-radius: 6px; - -webkit-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out; - transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out; - -o-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out; - -moz-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out; - transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out; - transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -webkit-border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out; - right: 2px; - /* there must be 'right' for ps-scrollbar-y */ - width: 5px; /* the width of the scrollbar */ -} - -.ps-container > .ps-scrollbar-y-rail:hover > .ps-scrollbar-y, .ps-container > .ps-scrollbar-y-rail:active > .ps-scrollbar-y { - width: 5px; -} - -.ps-container:hover.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail { - background-color: transparent; /* background color when dragged */ - opacity: 0.9; -} - -.ps-container:hover.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail > .ps-scrollbar-x { - background-color: #bbb; /* scrollbar color when dragged */ - height: 5px; -} - -.ps-container:hover.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail { - background-color: transparent; /* background color when dragged */ - opacity: 0.9; -} - -.ps-container:hover.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail > .ps-scrollbar-y { - background-color: #bbb; /* scrollbar color when dragged */ - width: 5px; -} - -.ps-container:hover > .ps-scrollbar-x-rail, -.ps-container:hover > .ps-scrollbar-y-rail { - opacity: 0.6; -} - -.ps-container:hover > .ps-scrollbar-x-rail:hover { - background-color: transparent; /* the background color on hover of the scrollbar */ - opacity: 0.9; -} - -.ps-container:hover > .ps-scrollbar-x-rail:hover > .ps-scrollbar-x { - background-color: #999; /* scrollbar color on hover */ -} - -.ps-container:hover > .ps-scrollbar-y-rail:hover { - background-color: transparent; /* the background color on hover of the scrollbar */ - opacity: 0.9; -} - -.ps-container:hover > .ps-scrollbar-y-rail:hover > .ps-scrollbar-y { - background-color: #999; /* scrollbar color on hover */ -} diff --git a/web/dashboard.html b/web/dashboard.html deleted file mode 100644 index 152178947..000000000 --- a/web/dashboard.html +++ /dev/null @@ -1,700 +0,0 @@ -<!DOCTYPE html> -<html lang="en"> -<head> - <title>NetData Dashboard - - - - - - - - - - - - - - - - - - - - -
- -

NetData Custom Dashboard

- -This is a template for building custom dashboards. To build a dashboard you just do this: - -
-<!DOCTYPE html>
-<html lang="en">
-<head>
-    <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
-    <meta charset="utf-8">
-    <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-    <meta name="apple-mobile-web-app-capable" content="yes">
-    <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent">
-</head>
-<body>
-    <div data-netdata="system.processes"
-        data-chart-library="dygraph"
-        data-width="600"
-        data-height="200"
-        data-after="-600"
-        ></div>
-</body>
-<script type="text/javascript" src="http://netdata.server:19999/dashboard.js"></script>
-</html>
-
- -
    -
  • You can host your dashboard anywhere.
  • -
  • You can add as many charts as you like.
  • -
  • You can have charts from many different netdata servers (add
    data-host="http://another.netdata.server:19999/"
    to each chart).
  • -
  • You can use different chart libraries on the same page: peity, sparkline, dygraph, google, morris
  • -
  • You can customize each chart to your preferences. For each chart library most of their attributes can be given in data- attributes.
  • -
  • Each chart can have each own duration - it is controlled with the data-after attribute to give that many seconds of data.
  • -
  • Depending on the width of the chart and data-after attribute, netdata will automatically refresh the chart when it needs to be updated. For example giving 600 pixels for width for -600 seconds of data, using a chart library that needs 3 pixels per point, will yeld in a chart updated once every 3 seconds.
  • -
- - -
-

Sparkline Charts

-Sparkline charts support 'NULL' values, so the charts can indicate that values are missing. -Sparkline charts stretch the values to show the variations between values in more detail. -They also have mouse-hover support. -
-Sparklines are fantastic. You can inline charts in text. For example this -
is my current cpu usage (last 30 seconds), - while this -
is the bandwidth my netdata server is currently transmitting (last minute) - and this -
is the requests/sec it serves (last 3 minutes). - -
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
- - - -
-

Peity Charts

-Peity charts do not support 'NULL' values, so the charts cannot indicate that values are missing. -Peity charts cannot have multiple dimensions on the charts - so netdata will use 'min2max' to show -the total of all dimensions. -
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
- - - - -
-

Dygraph Charts

-The fastest charting engine that can chart complete charts (not just sparklines). -The charts are zoomable (drag their contents to pan, shift with mouse wheel to zoom-in or zoom-out, double click to reset it). -Netdata magic! Realtime charts on your web page! -
-Sparklines using dygraphs -
- are also possible! This -
- is an area chart, while this -
is a stacked area chart! - - -
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
- - - -
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
- - - -
-

EasyPieChart

-
-
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
-
-
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
-
-
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
-
- - -
-

Gauge.js

-
-
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
-
-
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
-
-
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
-
- - -
-

Google Charts

-NetData was originaly developed with Google Charts. -NetData is a complete Google Visualization API provider. -
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
- - - - - - -
-

Morris Charts

-Unfortunatelly, Morris Charts are very slow. Here we force them to lower their detail to get acceptable results. -
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
- - -
-

C3 Charts

-C3 charts are not usable in large scale. They suffer from the following issues: -
    -
  • extreme use of transitions (implemented with D3 instead of CSS, meaning they are javascript rendered) that cannot be disabled - even opacity is hardcoded in the javascript library
  • -
  • rendering is done with SVG instead of canvas, so they use DOM elements for every point, becomimg useless if more than 500 points are drawn
  • -
  • lack of a raw data format, so every time a chart is updated, data convertion in javascript is required
  • -
  • lack of stacked charts support
  • -
-So, to avoid flashing the charts, we destroy and re-create the charts on each update. Also, since they manipulate the data with javascript we were forced to lower the detail they render to get acceptable speeds. -
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
-
-
-
- rendered in X ms -
- - -
-

d3pie Charts

-
-
-
- rendered in X ms -
- -
-
-
- rendered in X ms -
- -
-
-
- rendered in X ms -
-
- - - - - - - - - - diff --git a/web/dashboard.js b/web/dashboard.js deleted file mode 100644 index 607c9d1ac..000000000 --- a/web/dashboard.js +++ /dev/null @@ -1,9457 +0,0 @@ -// ---------------------------------------------------------------------------- -// You can set the following variables before loading this script: - -/*global netdataNoDygraphs *//* boolean, disable dygraph charts - * (default: false) */ -/*global netdataNoSparklines *//* boolean, disable sparkline charts - * (default: false) */ -/*global netdataNoPeitys *//* boolean, disable peity charts - * (default: false) */ -/*global netdataNoGoogleCharts *//* boolean, disable google charts - * (default: false) */ -/*global netdataNoMorris *//* boolean, disable morris charts - * (default: false) */ -/*global netdataNoEasyPieChart *//* boolean, disable easypiechart charts - * (default: false) */ -/*global netdataNoGauge *//* boolean, disable gauge.js charts - * (default: false) */ -/*global netdataNoD3 *//* boolean, disable d3 charts - * (default: false) */ -/*global netdataNoC3 *//* boolean, disable c3 charts - * (default: false) */ -/*global netdataNoD3pie *//* boolean, disable d3pie charts - * (default: false) */ -/*global netdataNoBootstrap *//* boolean, disable bootstrap - disables help too - * (default: false) */ -/*global netdataDontStart *//* boolean, do not start the thread to process the charts - * (default: false) */ -/*global netdataErrorCallback *//* function, callback to be called when the dashboard encounters an error - * (default: null) */ -/*global netdataRegistry:true *//* boolean, use the netdata registry - * (default: false) */ -/*global netdataNoRegistry *//* boolean, included only for compatibility with existing custom dashboard - * (obsolete - do not use this any more) */ -/*global netdataRegistryCallback *//* function, callback that will be invoked with one param: the URLs from the registry - * (default: null) */ -/*global netdataShowHelp:true *//* boolean, disable charts help - * (default: true) */ -/*global netdataShowAlarms:true *//* boolean, enable alarms checks and notifications - * (default: false) */ -/*global netdataRegistryAfterMs:true *//* ms, delay registry use at started - * (default: 1500) */ -/*global netdataCallback *//* function, callback to be called when netdata is ready to start - * (default: null) - * netdata will be running while this is called - * (call NETDATA.pause to stop it) */ -/*global netdataPrepCallback *//* function, callback to be called before netdata does anything else - * (default: null) */ -/*global netdataServer *//* string, the URL of the netdata server to use - * (default: the URL the page is hosted at) */ -/*global netdataServerStatic *//* string, the URL of the netdata server to use for static files - * (default: netdataServer) */ -/*global netdataSnapshotData *//* object, a netdata snapshot loaded - * (default: null) */ -/*global netdataAlarmsRecipients *//* array, an array of alarm recipients to show notifications for - * (default: null) */ -/*global netdataAlarmsRemember *//* boolen, keep our position in the alarm log at browser local storage - * (default: true) */ -/*global netdataAlarmsActiveCallback *//* function, a hook for the alarm logs - * (default: undefined) */ -/*global netdataAlarmsNotifCallback *//* function, a hook for alarm notifications - * (default: undefined) */ -/*global netdataIntersectionObserver *//* boolean, enable or disable the use of intersection observer - * (default: true) */ -/*global netdataCheckXSS *//* boolean, enable or disable checking for XSS issues - * (default: false) */ - -// ---------------------------------------------------------------------------- -// global namespace - -var NETDATA = window.NETDATA || {}; - -(function(window, document, $, undefined) { - // ------------------------------------------------------------------------ - // compatibility fixes - - // fix IE issue with console - if(!window.console) { window.console = { log: function(){} }; } - - // if string.endsWith is not defined, define it - if(typeof String.prototype.endsWith !== 'function') { - String.prototype.endsWith = function(s) { - if(s.length > this.length) return false; - return this.slice(-s.length) === s; - }; - } - - // if string.startsWith is not defined, define it - if(typeof String.prototype.startsWith !== 'function') { - String.prototype.startsWith = function(s) { - if(s.length > this.length) return false; - return this.slice(s.length) === s; - }; - } - - NETDATA.name2id = function(s) { - return s - .replace(/ /g, '_') - .replace(/\(/g, '_') - .replace(/\)/g, '_') - .replace(/\./g, '_') - .replace(/\//g, '_'); - }; - - // ---------------------------------------------------------------------------------------------------------------- - // XSS checks - - NETDATA.xss = { - enabled: (typeof netdataCheckXSS === 'undefined')?false:netdataCheckXSS, - enabled_for_data: (typeof netdataCheckXSS === 'undefined')?false:netdataCheckXSS, - - string: function (s) { - return s.toString() - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '#27;'); - }, - - object: function(name, obj, ignore_regex) { - if(typeof ignore_regex !== 'undefined' && ignore_regex.test(name) === true) { - // console.log('XSS: ignoring "' + name + '"'); - return obj; - } - - switch (typeof(obj)) { - case 'string': - var ret = this.string(obj); - if(ret !== obj) console.log('XSS protection changed string ' + name + ' from "' + obj + '" to "' + ret + '"'); - return ret; - - case 'object': - if(obj === null) return obj; - - if(Array.isArray(obj) === true) { - // console.log('checking array "' + name + '"'); - - var len = obj.length; - while(len--) - obj[len] = this.object(name + '[' + len + ']', obj[len], ignore_regex); - } - else { - // console.log('checking object "' + name + '"'); - - for(var i in obj) { - if(obj.hasOwnProperty(i) === false) continue; - if(this.string(i) !== i) { - console.log('XSS protection removed invalid object member "' + name + '.' + i + '"'); - delete obj[i]; - } - else - obj[i] = this.object(name + '.' + i, obj[i], ignore_regex); - } - } - return obj; - - default: - return obj; - } - }, - - checkOptional: function(name, obj, ignore_regex) { - if(this.enabled === true) { - //console.log('XSS: checking optional "' + name + '"...'); - return this.object(name, obj, ignore_regex); - } - return obj; - }, - - checkAlways: function(name, obj, ignore_regex) { - //console.log('XSS: checking always "' + name + '"...'); - return this.object(name, obj, ignore_regex); - }, - - checkData: function(name, obj, ignore_regex) { - if(this.enabled_for_data === true) { - //console.log('XSS: checking data "' + name + '"...'); - return this.object(name, obj, ignore_regex); - } - return obj; - } - }; - - // ---------------------------------------------------------------------------------------------------------------- - // Detect the netdata server - - // http://stackoverflow.com/questions/984510/what-is-my-script-src-url - // http://stackoverflow.com/questions/6941533/get-protocol-domain-and-port-from-url - NETDATA._scriptSource = function() { - var script = null; - - if(typeof document.currentScript !== 'undefined') { - script = document.currentScript; - } - else { - var all_scripts = document.getElementsByTagName('script'); - script = all_scripts[all_scripts.length - 1]; - } - - if (typeof script.getAttribute.length !== 'undefined') - script = script.src; - else - script = script.getAttribute('src', -1); - - return script; - }; - - if(typeof netdataServer !== 'undefined') - NETDATA.serverDefault = netdataServer; - else { - var s = NETDATA._scriptSource(); - if(s) NETDATA.serverDefault = s.replace(/\/dashboard.js(\?.*)*$/g, ""); - else { - console.log('WARNING: Cannot detect the URL of the netdata server.'); - NETDATA.serverDefault = null; - } - } - - if(NETDATA.serverDefault === null) - NETDATA.serverDefault = ''; - else if(NETDATA.serverDefault.slice(-1) !== '/') - NETDATA.serverDefault += '/'; - - if(typeof netdataServerStatic !== 'undefined' && netdataServerStatic !== null && netdataServerStatic !== '') { - NETDATA.serverStatic = netdataServerStatic; - if(NETDATA.serverStatic.slice(-1) !== '/') - NETDATA.serverStatic += '/'; - } - else { - NETDATA.serverStatic = NETDATA.serverDefault; - } - - - // default URLs for all the external files we need - // make them RELATIVE so that the whole thing can also be - // installed under a web server - NETDATA.jQuery = NETDATA.serverStatic + 'lib/jquery-2.2.4.min.js'; - NETDATA.peity_js = NETDATA.serverStatic + 'lib/jquery.peity-3.2.0.min.js'; - NETDATA.sparkline_js = NETDATA.serverStatic + 'lib/jquery.sparkline-2.1.2.min.js'; - NETDATA.easypiechart_js = NETDATA.serverStatic + 'lib/jquery.easypiechart-97b5824.min.js'; - NETDATA.gauge_js = NETDATA.serverStatic + 'lib/gauge-1.3.2.min.js'; - NETDATA.dygraph_js = NETDATA.serverStatic + 'lib/dygraph-c91c859.min.js'; - NETDATA.dygraph_smooth_js = NETDATA.serverStatic + 'lib/dygraph-smooth-plotter-c91c859.js'; - NETDATA.raphael_js = NETDATA.serverStatic + 'lib/raphael-2.2.4-min.js'; - NETDATA.c3_js = NETDATA.serverStatic + 'lib/c3-0.4.18.min.js'; - NETDATA.c3_css = NETDATA.serverStatic + 'css/c3-0.4.18.min.css'; - NETDATA.d3pie_js = NETDATA.serverStatic + 'lib/d3pie-0.2.1-netdata-3.js'; - NETDATA.d3_js = NETDATA.serverStatic + 'lib/d3-4.12.2.min.js'; - NETDATA.morris_js = NETDATA.serverStatic + 'lib/morris-0.5.1.min.js'; - NETDATA.morris_css = NETDATA.serverStatic + 'css/morris-0.5.1.css'; - NETDATA.google_js = 'https://www.google.com/jsapi'; - - NETDATA.themes = { - white: { - bootstrap_css: NETDATA.serverStatic + 'css/bootstrap-3.3.7.css', - dashboard_css: NETDATA.serverStatic + 'dashboard.css?v20180210-1', - background: '#FFFFFF', - foreground: '#000000', - grid: '#F0F0F0', - axis: '#F0F0F0', - highlight: '#F5F5F5', - colors: [ '#3366CC', '#DC3912', '#109618', '#FF9900', '#990099', '#DD4477', - '#3B3EAC', '#66AA00', '#0099C6', '#B82E2E', '#AAAA11', '#5574A6', - '#994499', '#22AA99', '#6633CC', '#E67300', '#316395', '#8B0707', - '#329262', '#3B3EAC' ], - easypiechart_track: '#f0f0f0', - easypiechart_scale: '#dfe0e0', - gauge_pointer: '#C0C0C0', - gauge_stroke: '#F0F0F0', - gauge_gradient: false, - d3pie: { - title: '#333333', - subtitle: '#666666', - footer: '#888888', - other: '#aaaaaa', - mainlabel: '#333333', - percentage: '#dddddd', - value: '#aaaa22', - tooltip_bg: '#000000', - tooltip_fg: '#efefef', - segment_stroke: "#ffffff", - gradient_color: '#000000' - } - }, - slate: { - bootstrap_css: NETDATA.serverStatic + 'css/bootstrap-slate-flat-3.3.7.css?v20161229-1', - dashboard_css: NETDATA.serverStatic + 'dashboard.slate.css?v20180210-1', - background: '#272b30', - foreground: '#C8C8C8', - grid: '#283236', - axis: '#283236', - highlight: '#383838', -/* colors: [ '#55bb33', '#ff2222', '#0099C6', '#faa11b', '#adbce0', '#DDDD00', - '#4178ba', '#f58122', '#a5cc39', '#f58667', '#f5ef89', '#cf93c0', - '#a5d18a', '#b8539d', '#3954a3', '#c8a9cf', '#c7de8a', '#fad20a', - '#a6a479', '#a66da8' ], -*/ - colors: [ '#66AA00', '#FE3912', '#3366CC', '#D66300', '#0099C6', '#DDDD00', - '#5054e6', '#EE9911', '#BB44CC', '#e45757', '#ef0aef', '#CC7700', - '#22AA99', '#109618', '#905bfd', '#f54882', '#4381bf', '#ff3737', - '#329262', '#3B3EFF' ], - easypiechart_track: '#373b40', - easypiechart_scale: '#373b40', - gauge_pointer: '#474b50', - gauge_stroke: '#373b40', - gauge_gradient: false, - d3pie: { - title: '#C8C8C8', - subtitle: '#283236', - footer: '#283236', - other: '#283236', - mainlabel: '#C8C8C8', - percentage: '#dddddd', - value: '#cccc44', - tooltip_bg: '#272b30', - tooltip_fg: '#C8C8C8', - segment_stroke: "#283236", - gradient_color: '#000000' - } - } - }; - - if(typeof netdataTheme !== 'undefined' && typeof NETDATA.themes[netdataTheme] !== 'undefined') - NETDATA.themes.current = NETDATA.themes[netdataTheme]; - else - NETDATA.themes.current = NETDATA.themes.white; - - NETDATA.colors = NETDATA.themes.current.colors; - - // these are the colors Google Charts are using - // we have them here to attempt emulate their look and feel on the other chart libraries - // http://there4.io/2012/05/02/google-chart-color-list/ - //NETDATA.colors = [ '#3366CC', '#DC3912', '#FF9900', '#109618', '#990099', '#3B3EAC', '#0099C6', - // '#DD4477', '#66AA00', '#B82E2E', '#316395', '#994499', '#22AA99', '#AAAA11', - // '#6633CC', '#E67300', '#8B0707', '#329262', '#5574A6', '#3B3EAC' ]; - - // an alternative set - // http://www.mulinblog.com/a-color-palette-optimized-for-data-visualization/ - // (blue) (red) (orange) (green) (pink) (brown) (purple) (yellow) (gray) - //NETDATA.colors = [ '#5DA5DA', '#F15854', '#FAA43A', '#60BD68', '#F17CB0', '#B2912F', '#B276B2', '#DECF3F', '#4D4D4D' ]; - - if(typeof netdataSnapshotData === 'undefined') - netdataSnapshotData = null; - - if(typeof netdataShowHelp === 'undefined') - netdataShowHelp = true; - - if(typeof netdataShowAlarms === 'undefined') - netdataShowAlarms = false; - - if(typeof netdataRegistryAfterMs !== 'number' || netdataRegistryAfterMs < 0) - netdataRegistryAfterMs = 1500; - - if(typeof netdataRegistry === 'undefined') { - // backward compatibility - netdataRegistry = (typeof netdataNoRegistry !== 'undefined' && netdataNoRegistry === false); - } - if(netdataRegistry === false && typeof netdataRegistryCallback === 'function') - netdataRegistry = true; - - - // ---------------------------------------------------------------------------------------------------------------- - // detect if this is probably a slow device - - var isSlowDeviceResult = undefined; - var isSlowDevice = function() { - if(isSlowDeviceResult !== undefined) - return isSlowDeviceResult; - - try { - var ua = navigator.userAgent.toLowerCase(); - - var iOS = /ipad|iphone|ipod/.test(ua) && !window.MSStream; - var android = /android/.test(ua) && !window.MSStream; - isSlowDeviceResult = (iOS === true || android === true); - } - catch (e) { - isSlowDeviceResult = false; - } - - return isSlowDeviceResult; - }; - - // ---------------------------------------------------------------------------------------------------------------- - // the defaults for all charts - - // if the user does not specify any of these, the following will be used - - NETDATA.chartDefaults = { - width: '100%', // the chart width - can be null - height: '100%', // the chart height - can be null - min_width: null, // the chart minimum width - can be null - library: 'dygraph', // the graphing library to use - method: 'average', // the grouping method - before: 0, // panning - after: -600, // panning - pixels_per_point: 1, // the detail of the chart - fill_luminance: 0.8 // luminance of colors in solid areas - }; - - // ---------------------------------------------------------------------------------------------------------------- - // global options - - NETDATA.options = { - pauseCallback: null, // a callback when we are really paused - - pause: false, // when enabled we don't auto-refresh the charts - - targets: [], // an array of all the state objects that are - // currently active (independently of their - // viewport visibility) - - updated_dom: true, // when true, the DOM has been updated with - // new elements we have to check. - - auto_refresher_fast_weight: 0, // this is the current time in ms, spent - // rendering charts continuously. - // used with .current.fast_render_timeframe - - page_is_visible: true, // when true, this page is visible - - auto_refresher_stop_until: 0, // timestamp in ms - used internally, to stop the - // auto-refresher for some time (when a chart is - // performing pan or zoom, we need to stop refreshing - // all other charts, to have the maximum speed for - // rendering the chart that is panned or zoomed). - // Used with .current.global_pan_sync_time - - on_scroll_refresher_stop_until: 0, // timestamp in ms - used to stop evaluating - // charts for some time, after a page scroll - - last_page_resize: Date.now(), // the timestamp of the last resize request - - last_page_scroll: 0, // the timestamp the last time the page was scrolled - - browser_timezone: 'unknown', // timezone detected by javascript - server_timezone: 'unknown', // timezone reported by the server - - force_data_points: 0, // force the number of points to be returned for charts - fake_chart_rendering: false, // when set to true, the dashboard will download data but will not render the charts - - passive_events: null, // true if the browser supports passive events - - // the current profile - // we may have many... - current: { - units: 'auto', // can be 'auto' or 'original' - temperature: 'celsius', // can be 'celsius' or 'fahrenheit' - seconds_as_time: true, // show seconds as DDd:HH:MM:SS ? - timezone: 'default', // the timezone to use, or 'default' - user_set_server_timezone: 'default', // as set by the user on the dashboard - - legend_toolbox: true, // show the legend toolbox on charts - resize_charts: true, // show the resize handler on charts - - pixels_per_point: isSlowDevice()?5:1, // the minimum pixels per point for all charts - // increase this to speed javascript up - // each chart library has its own limit too - // the max of this and the chart library is used - // the final is calculated every time, so a change - // here will have immediate effect on the next chart - // update - - idle_between_charts: 100, // ms - how much time to wait between chart updates - - fast_render_timeframe: 200, // ms - render continuously until this time of continuous - // rendering has been reached - // this setting is used to make it render e.g. 10 - // charts at once, sleep idle_between_charts time - // and continue for another 10 charts. - - idle_between_loops: 500, // ms - if all charts have been updated, wait this - // time before starting again. - - idle_parallel_loops: 100, // ms - the time between parallel refresher updates - - idle_lost_focus: 500, // ms - when the window does not have focus, check - // if focus has been regained, every this time - - global_pan_sync_time: 300, // ms - when you pan or zoom a chart, the background - // auto-refreshing of charts is paused for this amount - // of time - - sync_selection_delay: 400, // ms - when you pan or zoom a chart, wait this amount - // of time before setting up synchronized selections - // on hover. - - sync_selection: true, // enable or disable selection sync - - pan_and_zoom_delay: 50, // when panning or zooming, how ofter to update the chart - - sync_pan_and_zoom: true, // enable or disable pan and zoom sync - - pan_and_zoom_data_padding: true, // fetch more data for the master chart when panning or zooming - - update_only_visible: true, // enable or disable visibility management / used for printing - - parallel_refresher: (isSlowDevice() === false), // enable parallel refresh of charts - - concurrent_refreshes: true, // when parallel_refresher is enabled, sync also the charts - - destroy_on_hide: (isSlowDevice() === true), // destroy charts when they are not visible - - show_help: netdataShowHelp, // when enabled the charts will show some help - show_help_delay_show_ms: 500, - show_help_delay_hide_ms: 0, - - eliminate_zero_dimensions: true, // do not show dimensions with just zeros - - stop_updates_when_focus_is_lost: true, // boolean - shall we stop auto-refreshes when document does not have user focus - stop_updates_while_resizing: 1000, // ms - time to stop auto-refreshes while resizing the charts - - double_click_speed: 500, // ms - time between clicks / taps to detect double click/tap - - smooth_plot: (isSlowDevice() === false), // enable smooth plot, where possible - - color_fill_opacity_line: 1.0, - color_fill_opacity_area: 0.2, - color_fill_opacity_stacked: 0.8, - - pan_and_zoom_factor: 0.25, // the increment when panning and zooming with the toolbox - pan_and_zoom_factor_multiplier_control: 2.0, - pan_and_zoom_factor_multiplier_shift: 3.0, - pan_and_zoom_factor_multiplier_alt: 4.0, - - abort_ajax_on_scroll: false, // kill pending ajax page scroll - async_on_scroll: false, // sync/async onscroll handler - onscroll_worker_duration_threshold: 30, // time in ms, for async scroll handler - - retries_on_data_failures: 3, // how many retries to make if we can't fetch chart data from the server - - setOptionCallback: function() { } - }, - - debug: { - show_boxes: false, - main_loop: false, - focus: false, - visibility: false, - chart_data_url: false, - chart_errors: true, // FIXME: remember to set it to false before merging - chart_timing: false, - chart_calls: false, - libraries: false, - dygraph: false, - globalSelectionSync:false, - globalPanAndZoom: false - } - }; - - NETDATA.statistics = { - refreshes_total: 0, - refreshes_active: 0, - refreshes_active_max: 0 - }; - - - // ---------------------------------------------------------------------------------------------------------------- - - NETDATA.timeout = { - // by default, these are just wrappers to setTimeout() / clearTimeout() - - step: function(callback) { - return window.setTimeout(callback, 1000 / 60); - }, - - set: function(callback, delay) { - return window.setTimeout(callback, delay); - }, - - clear: function(id) { - return window.clearTimeout(id); - }, - - init: function() { - var custom = true; - - if(window.requestAnimationFrame) { - this.step = function(callback) { - return window.requestAnimationFrame(callback); - }; - - this.clear = function(handle) { - return window.cancelAnimationFrame(handle.value); - }; - } - else if(window.webkitRequestAnimationFrame) { - this.step = function(callback) { - return window.webkitRequestAnimationFrame(callback); - }; - - if(window.webkitCancelAnimationFrame) { - this.clear = function (handle) { - return window.webkitCancelAnimationFrame(handle.value); - }; - } - else if(window.webkitCancelRequestAnimationFrame) { - this.clear = function (handle) { - return window.webkitCancelRequestAnimationFrame(handle.value); - }; - } - } - else if(window.mozRequestAnimationFrame) { - this.step = function(callback) { - return window.mozRequestAnimationFrame(callback); - }; - - this.clear = function(handle) { - return window.mozCancelRequestAnimationFrame(handle.value); - }; - } - else if(window.oRequestAnimationFrame) { - this.step = function(callback) { - return window.oRequestAnimationFrame(callback); - }; - - this.clear = function(handle) { - return window.oCancelRequestAnimationFrame(handle.value); - }; - } - else if(window.msRequestAnimationFrame) { - this.step = function(callback) { - return window.msRequestAnimationFrame(callback); - }; - - this.clear = function(handle) { - return window.msCancelRequestAnimationFrame(handle.value); - }; - } - else - custom = false; - - - if(custom === true) { - // we have installed custom .step() / .clear() functions - // overwrite the .set() too - - this.set = function(callback, delay) { - var that = this; - - var start = Date.now(), - handle = new Object(); - - function loop() { - var current = Date.now(), - delta = current - start; - - if(delta >= delay) { - callback.call(); - } - else { - handle.value = that.step(loop); - } - } - - handle.value = that.step(loop); - return handle; - }; - } - } - }; - - NETDATA.timeout.init(); - - - // ---------------------------------------------------------------------------------------------------------------- - // local storage options - - NETDATA.localStorage = { - default: {}, - current: {}, - callback: {} // only used for resetting back to defaults - }; - - NETDATA.localStorageTested = -1; - NETDATA.localStorageTest = function() { - if(NETDATA.localStorageTested !== -1) - return NETDATA.localStorageTested; - - if(typeof Storage !== "undefined" && typeof localStorage === 'object') { - var test = 'test'; - try { - localStorage.setItem(test, test); - localStorage.removeItem(test); - NETDATA.localStorageTested = true; - } - catch (e) { - NETDATA.localStorageTested = false; - } - } - else - NETDATA.localStorageTested = false; - - return NETDATA.localStorageTested; - }; - - NETDATA.localStorageGet = function(key, def, callback) { - var ret = def; - - if(typeof NETDATA.localStorage.default[key.toString()] === 'undefined') { - NETDATA.localStorage.default[key.toString()] = def; - NETDATA.localStorage.callback[key.toString()] = callback; - } - - if(NETDATA.localStorageTest() === true) { - try { - // console.log('localStorage: loading "' + key.toString() + '"'); - ret = localStorage.getItem(key.toString()); - // console.log('netdata loaded: ' + key.toString() + ' = ' + ret.toString()); - if(ret === null || ret === 'undefined') { - // console.log('localStorage: cannot load it, saving "' + key.toString() + '" with value "' + JSON.stringify(def) + '"'); - localStorage.setItem(key.toString(), JSON.stringify(def)); - ret = def; - } - else { - // console.log('localStorage: got "' + key.toString() + '" with value "' + ret + '"'); - ret = JSON.parse(ret); - // console.log('localStorage: loaded "' + key.toString() + '" as value ' + ret + ' of type ' + typeof(ret)); - } - } - catch(error) { - console.log('localStorage: failed to read "' + key.toString() + '", using default: "' + def.toString() + '"'); - ret = def; - } - } - - if(typeof ret === 'undefined' || ret === 'undefined') { - console.log('localStorage: LOADED UNDEFINED "' + key.toString() + '" as value ' + ret + ' of type ' + typeof(ret)); - ret = def; - } - - NETDATA.localStorage.current[key.toString()] = ret; - return ret; - }; - - NETDATA.localStorageSet = function(key, value, callback) { - if(typeof value === 'undefined' || value === 'undefined') { - console.log('localStorage: ATTEMPT TO SET UNDEFINED "' + key.toString() + '" as value ' + value + ' of type ' + typeof(value)); - } - - if(typeof NETDATA.localStorage.default[key.toString()] === 'undefined') { - NETDATA.localStorage.default[key.toString()] = value; - NETDATA.localStorage.current[key.toString()] = value; - NETDATA.localStorage.callback[key.toString()] = callback; - } - - if(NETDATA.localStorageTest() === true) { - // console.log('localStorage: saving "' + key.toString() + '" with value "' + JSON.stringify(value) + '"'); - try { - localStorage.setItem(key.toString(), JSON.stringify(value)); - } - catch(e) { - console.log('localStorage: failed to save "' + key.toString() + '" with value: "' + value.toString() + '"'); - } - } - - NETDATA.localStorage.current[key.toString()] = value; - return value; - }; - - NETDATA.localStorageGetRecursive = function(obj, prefix, callback) { - var keys = Object.keys(obj); - var len = keys.length; - while(len--) { - var i = keys[len]; - - if(typeof obj[i] === 'object') { - //console.log('object ' + prefix + '.' + i.toString()); - NETDATA.localStorageGetRecursive(obj[i], prefix + '.' + i.toString(), callback); - continue; - } - - obj[i] = NETDATA.localStorageGet(prefix + '.' + i.toString(), obj[i], callback); - } - }; - - NETDATA.setOption = function(key, value) { - if(key.toString() === 'setOptionCallback') { - if(typeof NETDATA.options.current.setOptionCallback === 'function') { - NETDATA.options.current[key.toString()] = value; - NETDATA.options.current.setOptionCallback(); - } - } - else if(NETDATA.options.current[key.toString()] !== value) { - var name = 'options.' + key.toString(); - - if(typeof NETDATA.localStorage.default[name.toString()] === 'undefined') - console.log('localStorage: setOption() on unsaved option: "' + name.toString() + '", value: ' + value); - - //console.log(NETDATA.localStorage); - //console.log('setOption: setting "' + key.toString() + '" to "' + value + '" of type ' + typeof(value) + ' original type ' + typeof(NETDATA.options.current[key.toString()])); - //console.log(NETDATA.options); - NETDATA.options.current[key.toString()] = NETDATA.localStorageSet(name.toString(), value, null); - - if(typeof NETDATA.options.current.setOptionCallback === 'function') - NETDATA.options.current.setOptionCallback(); - } - - return true; - }; - - NETDATA.getOption = function(key) { - return NETDATA.options.current[key.toString()]; - }; - - // read settings from local storage - NETDATA.localStorageGetRecursive(NETDATA.options.current, 'options', null); - - // always start with this option enabled. - NETDATA.setOption('stop_updates_when_focus_is_lost', true); - - NETDATA.resetOptions = function() { - var keys = Object.keys(NETDATA.localStorage.default); - var len = keys.length; - while(len--) { - var i = keys[len]; - var a = i.split('.'); - - if(a[0] === 'options') { - if(a[1] === 'setOptionCallback') continue; - if(typeof NETDATA.localStorage.default[i] === 'undefined') continue; - if(NETDATA.options.current[i] === NETDATA.localStorage.default[i]) continue; - - NETDATA.setOption(a[1], NETDATA.localStorage.default[i]); - } - else if(a[0] === 'chart_heights') { - if(typeof NETDATA.localStorage.callback[i] === 'function' && typeof NETDATA.localStorage.default[i] !== 'undefined') { - NETDATA.localStorage.callback[i](NETDATA.localStorage.default[i]); - } - } - } - - NETDATA.dateTime.init(NETDATA.options.current.timezone); - }; - - // ---------------------------------------------------------------------------------------------------------------- - - if(NETDATA.options.debug.main_loop === true) - console.log('welcome to NETDATA'); - - NETDATA.onresizeCallback = null; - NETDATA.onresize = function() { - NETDATA.options.last_page_resize = Date.now(); - NETDATA.onscroll(); - - if(typeof NETDATA.onresizeCallback === 'function') - NETDATA.onresizeCallback(); - }; - - NETDATA.abort_all_refreshes = function() { - var targets = NETDATA.options.targets; - var len = targets.length; - - while (len--) { - if (targets[len].fetching_data === true) { - if (typeof targets[len].xhr !== 'undefined') { - targets[len].xhr.abort(); - targets[len].running = false; - targets[len].fetching_data = false; - } - } - } - }; - - NETDATA.onscroll_start_delay = function() { - NETDATA.options.last_page_scroll = Date.now(); - - NETDATA.options.on_scroll_refresher_stop_until = - NETDATA.options.last_page_scroll - + ((NETDATA.options.current.async_on_scroll === true) ? 1000 : 0); - }; - - NETDATA.onscroll_end_delay = function() { - NETDATA.options.on_scroll_refresher_stop_until = - Date.now() - + ((NETDATA.options.current.async_on_scroll === true) ? NETDATA.options.current.onscroll_worker_duration_threshold : 0); - }; - - NETDATA.onscroll_updater_timeout_id = undefined; - NETDATA.onscroll_updater = function() { - NETDATA.globalSelectionSync.stop(); - - if(NETDATA.options.abort_ajax_on_scroll === true) - NETDATA.abort_all_refreshes(); - - // when the user scrolls he sees that we have - // hidden all the not-visible charts - // using this little function we try to switch - // the charts back to visible quickly - - if(NETDATA.intersectionObserver.enabled() === false) { - if (NETDATA.options.current.parallel_refresher === false) { - var targets = NETDATA.options.targets; - var len = targets.length; - - while (len--) - if (targets[len].running === false) - targets[len].isVisible(); - } - } - - NETDATA.onscroll_end_delay(); - }; - - NETDATA.scrollUp = false; - NETDATA.scrollY = window.scrollY; - NETDATA.onscroll = function() { - //console.log('onscroll() begin'); - - NETDATA.onscroll_start_delay(); - NETDATA.chartRefresherReschedule(); - - NETDATA.scrollUp = (window.scrollY > NETDATA.scrollY); - NETDATA.scrollY = window.scrollY; - - if(NETDATA.onscroll_updater_timeout_id) - NETDATA.timeout.clear(NETDATA.onscroll_updater_timeout_id); - - NETDATA.onscroll_updater_timeout_id = NETDATA.timeout.set(NETDATA.onscroll_updater, 0); - //console.log('onscroll() end'); - }; - - NETDATA.supportsPassiveEvents = function() { - if(NETDATA.options.passive_events === null) { - var supportsPassive = false; - try { - var opts = Object.defineProperty({}, 'passive', { - get: function () { - supportsPassive = true; - } - }); - window.addEventListener("test", null, opts); - } catch (e) { - console.log('browser does not support passive events'); - } - - NETDATA.options.passive_events = supportsPassive; - } - - // console.log('passive ' + NETDATA.options.passive_events); - return NETDATA.options.passive_events; - }; - - window.addEventListener('resize', NETDATA.onresize, NETDATA.supportsPassiveEvents() ? { passive: true } : false); - window.addEventListener('scroll', NETDATA.onscroll, NETDATA.supportsPassiveEvents() ? { passive: true } : false); - // window.onresize = NETDATA.onresize; - // window.onscroll = NETDATA.onscroll; - - // ---------------------------------------------------------------------------------------------------------------- - // Error Handling - - NETDATA.errorCodes = { - 100: { message: "Cannot load chart library", alert: true }, - 101: { message: "Cannot load jQuery", alert: true }, - 402: { message: "Chart library not found", alert: false }, - 403: { message: "Chart library not enabled/is failed", alert: false }, - 404: { message: "Chart not found", alert: false }, - 405: { message: "Cannot download charts index from server", alert: true }, - 406: { message: "Invalid charts index downloaded from server", alert: true }, - 407: { message: "Cannot HELLO netdata server", alert: false }, - 408: { message: "Netdata servers sent invalid response to HELLO", alert: false }, - 409: { message: "Cannot ACCESS netdata registry", alert: false }, - 410: { message: "Netdata registry ACCESS failed", alert: false }, - 411: { message: "Netdata registry server send invalid response to DELETE ", alert: false }, - 412: { message: "Netdata registry DELETE failed", alert: false }, - 413: { message: "Netdata registry server send invalid response to SWITCH ", alert: false }, - 414: { message: "Netdata registry SWITCH failed", alert: false }, - 415: { message: "Netdata alarms download failed", alert: false }, - 416: { message: "Netdata alarms log download failed", alert: false }, - 417: { message: "Netdata registry server send invalid response to SEARCH ", alert: false }, - 418: { message: "Netdata registry SEARCH failed", alert: false } - }; - NETDATA.errorLast = { - code: 0, - message: "", - datetime: 0 - }; - - NETDATA.error = function(code, msg) { - NETDATA.errorLast.code = code; - NETDATA.errorLast.message = msg; - NETDATA.errorLast.datetime = Date.now(); - - console.log("ERROR " + code + ": " + NETDATA.errorCodes[code].message + ": " + msg); - - var ret = true; - if(typeof netdataErrorCallback === 'function') { - ret = netdataErrorCallback('system', code, msg); - } - - if(ret && NETDATA.errorCodes[code].alert) - alert("ERROR " + code + ": " + NETDATA.errorCodes[code].message + ": " + msg); - }; - - NETDATA.errorReset = function() { - NETDATA.errorLast.code = 0; - NETDATA.errorLast.message = "You are doing fine!"; - NETDATA.errorLast.datetime = 0; - }; - - // ---------------------------------------------------------------------------------------------------------------- - // fast numbers formatting - - NETDATA.fastNumberFormat = { - formatters_fixed: [], - formatters_zero_based: [], - - // this is the fastest and the preferred - getIntlNumberFormat: function(min, max) { - var key = max; - if(min === max) { - if(typeof this.formatters_fixed[key] === 'undefined') - this.formatters_fixed[key] = new Intl.NumberFormat(undefined, { - // style: 'decimal', - // minimumIntegerDigits: 1, - // minimumSignificantDigits: 1, - // maximumSignificantDigits: 1, - useGrouping: true, - minimumFractionDigits: min, - maximumFractionDigits: max - }); - - return this.formatters_fixed[key]; - } - else if(min === 0) { - if(typeof this.formatters_zero_based[key] === 'undefined') - this.formatters_zero_based[key] = new Intl.NumberFormat(undefined, { - // style: 'decimal', - // minimumIntegerDigits: 1, - // minimumSignificantDigits: 1, - // maximumSignificantDigits: 1, - useGrouping: true, - minimumFractionDigits: min, - maximumFractionDigits: max - }); - - return this.formatters_zero_based[key]; - } - else { - // this is never used - // it is added just for completeness - return new Intl.NumberFormat(undefined, { - // style: 'decimal', - // minimumIntegerDigits: 1, - // minimumSignificantDigits: 1, - // maximumSignificantDigits: 1, - useGrouping: true, - minimumFractionDigits: min, - maximumFractionDigits: max - }); - } - }, - - // this respects locale - getLocaleString: function(min, max) { - var key = max; - if(min === max) { - if(typeof this.formatters_fixed[key] === 'undefined') - this.formatters_fixed[key] = { - format: function (value) { - return value.toLocaleString(undefined, { - // style: 'decimal', - // minimumIntegerDigits: 1, - // minimumSignificantDigits: 1, - // maximumSignificantDigits: 1, - useGrouping: true, - minimumFractionDigits: min, - maximumFractionDigits: max - }); - } - }; - - return this.formatters_fixed[key]; - } - else if(min === 0) { - if(typeof this.formatters_zero_based[key] === 'undefined') - this.formatters_zero_based[key] = { - format: function (value) { - return value.toLocaleString(undefined, { - // style: 'decimal', - // minimumIntegerDigits: 1, - // minimumSignificantDigits: 1, - // maximumSignificantDigits: 1, - useGrouping: true, - minimumFractionDigits: min, - maximumFractionDigits: max - }); - } - }; - - return this.formatters_zero_based[key]; - } - else { - return { - format: function (value) { - return value.toLocaleString(undefined, { - // style: 'decimal', - // minimumIntegerDigits: 1, - // minimumSignificantDigits: 1, - // maximumSignificantDigits: 1, - useGrouping: true, - minimumFractionDigits: min, - maximumFractionDigits: max - }); - } - }; - } - }, - - // the fallback - getFixed: function(min, max) { - var key = max; - if(min === max) { - if(typeof this.formatters_fixed[key] === 'undefined') - this.formatters_fixed[key] = { - format: function (value) { - if(value === 0) return "0"; - return value.toFixed(max); - } - }; - - return this.formatters_fixed[key]; - } - else if(min === 0) { - if(typeof this.formatters_zero_based[key] === 'undefined') - this.formatters_zero_based[key] = { - format: function (value) { - if(value === 0) return "0"; - return value.toFixed(max); - } - }; - - return this.formatters_zero_based[key]; - } - else { - return { - format: function (value) { - if(value === 0) return "0"; - return value.toFixed(max); - } - }; - } - }, - - testIntlNumberFormat: function() { - var value = 1.12345; - var e1 = "1.12", e2 = "1,12"; - var s = ""; - - try { - var x = new Intl.NumberFormat(undefined, { - useGrouping: true, - minimumFractionDigits: 2, - maximumFractionDigits: 2 - }); - - s = x.format(value); - } - catch(e) { - s = ""; - } - - // console.log('NumberFormat: ', s); - return (s === e1 || s === e2); - }, - - testLocaleString: function() { - var value = 1.12345; - var e1 = "1.12", e2 = "1,12"; - var s = ""; - - try { - s = value.toLocaleString(undefined, { - useGrouping: true, - minimumFractionDigits: 2, - maximumFractionDigits: 2 - }); - } - catch(e) { - s = ""; - } - - // console.log('localeString: ', s); - return (s === e1 || s === e2); - }, - - // on first run we decide which formatter to use - get: function(min, max) { - if(this.testIntlNumberFormat()) { - // console.log('numberformat'); - this.get = this.getIntlNumberFormat; - } - else if(this.testLocaleString()) { - // console.log('localestring'); - this.get = this.getLocaleString; - } - else { - // console.log('fixed'); - this.get = this.getFixed; - } - return this.get(min, max); - } - }; - - // ---------------------------------------------------------------------------------------------------------------- - // element data attributes - - NETDATA.dataAttribute = function(element, attribute, def) { - var key = 'data-' + attribute.toString(); - if(element.hasAttribute(key) === true) { - var data = element.getAttribute(key); - - if(data === 'true') return true; - if(data === 'false') return false; - if(data === 'null') return null; - - // Only convert to a number if it doesn't change the string - if(data === +data + '') return +data; - - if(/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/.test(data)) - return JSON.parse(data); - - return data; - } - else return def; - }; - - NETDATA.dataAttributeBoolean = function(element, attribute, def) { - var value = NETDATA.dataAttribute(element, attribute, def); - - if(value === true || value === false) - return value; - - if(typeof(value) === 'string') { - if(value === 'yes' || value === 'on') - return true; - - if(value === '' || value === 'no' || value === 'off' || value === 'null') - return false; - - return def; - } - - if(typeof(value) === 'number') - return value !== 0; - - return def; - }; - - // ---------------------------------------------------------------------------------------------------------------- - // commonMin & commonMax - - NETDATA.commonMin = { - keys: {}, - latest: {}, - - globalReset: function() { - this.keys = {}; - this.latest = {}; - }, - - get: function(state) { - if(typeof state.tmp.__commonMin === 'undefined') { - // get the commonMin setting - state.tmp.__commonMin = NETDATA.dataAttribute(state.element, 'common-min', null); - } - - var min = state.data.min; - var name = state.tmp.__commonMin; - - if(name === null) { - // we don't need commonMin - //state.log('no need for commonMin'); - return min; - } - - var t = this.keys[name]; - if(typeof t === 'undefined') { - // add our commonMin - this.keys[name] = {}; - t = this.keys[name]; - } - - var uuid = state.uuid; - if(typeof t[uuid] !== 'undefined') { - if(t[uuid] === min) { - //state.log('commonMin ' + state.tmp.__commonMin + ' not changed: ' + this.latest[name]); - return this.latest[name]; - } - else if(min < this.latest[name]) { - //state.log('commonMin ' + state.tmp.__commonMin + ' increased: ' + min); - t[uuid] = min; - this.latest[name] = min; - return min; - } - } - - // add our min - t[uuid] = min; - - // find the common min - var m = min; - for(var i in t) - if(t.hasOwnProperty(i) && t[i] < m) m = t[i]; - - //state.log('commonMin ' + state.tmp.__commonMin + ' updated: ' + m); - this.latest[name] = m; - return m; - } - }; - - NETDATA.commonMax = { - keys: {}, - latest: {}, - - globalReset: function() { - this.keys = {}; - this.latest = {}; - }, - - get: function(state) { - if(typeof state.tmp.__commonMax === 'undefined') { - // get the commonMax setting - state.tmp.__commonMax = NETDATA.dataAttribute(state.element, 'common-max', null); - } - - var max = state.data.max; - var name = state.tmp.__commonMax; - - if(name === null) { - // we don't need commonMax - //state.log('no need for commonMax'); - return max; - } - - var t = this.keys[name]; - if(typeof t === 'undefined') { - // add our commonMax - this.keys[name] = {}; - t = this.keys[name]; - } - - var uuid = state.uuid; - if(typeof t[uuid] !== 'undefined') { - if(t[uuid] === max) { - //state.log('commonMax ' + state.tmp.__commonMax + ' not changed: ' + this.latest[name]); - return this.latest[name]; - } - else if(max > this.latest[name]) { - //state.log('commonMax ' + state.tmp.__commonMax + ' increased: ' + max); - t[uuid] = max; - this.latest[name] = max; - return max; - } - } - - // add our max - t[uuid] = max; - - // find the common max - var m = max; - for(var i in t) - if(t.hasOwnProperty(i) && t[i] > m) m = t[i]; - - //state.log('commonMax ' + state.tmp.__commonMax + ' updated: ' + m); - this.latest[name] = m; - return m; - } - }; - - NETDATA.commonColors = { - keys: {}, - - globalReset: function() { - this.keys = {}; - }, - - get: function(state, label) { - var ret = this.refill(state); - - if(typeof ret.assigned[label] === 'undefined') - ret.assigned[label] = ret.available.shift(); - - return ret.assigned[label]; - }, - - refill: function(state) { - var ret, len; - - if(typeof state.tmp.__commonColors === 'undefined') - ret = this.prepare(state); - else { - ret = this.keys[state.tmp.__commonColors]; - if(typeof ret === 'undefined') - ret = this.prepare(state); - } - - if(ret.available.length === 0) { - if(ret.copy_theme === true || ret.custom.length === 0) { - // copy the theme colors - len = NETDATA.themes.current.colors.length; - while (len--) - ret.available.unshift(NETDATA.themes.current.colors[len]); - } - - // copy the custom colors - len = ret.custom.length; - while (len--) - ret.available.unshift(ret.custom[len]); - } - - state.colors_assigned = ret.assigned; - state.colors_available = ret.available; - state.colors_custom = ret.custom; - - return ret; - }, - - __read_custom_colors: function(state, ret) { - // add the user supplied colors - var c = NETDATA.dataAttribute(state.element, 'colors', undefined); - if (typeof c === 'string' && c.length > 0) { - c = c.split(' '); - var len = c.length; - - if (len > 0 && c[len - 1] === 'ONLY') { - len--; - ret.copy_theme = false; - } - - while (len--) - ret.custom.unshift(c[len]); - } - }, - - prepare: function(state) { - var has_custom_colors = false; - - if(typeof state.tmp.__commonColors === 'undefined') { - var defname = state.chart.context; - - // if this chart has data-colors="" - // we should use the chart uuid as the default key (private palette) - // (data-common-colors="NAME" will be used anyways) - var c = NETDATA.dataAttribute(state.element, 'colors', undefined); - if (typeof c === 'string' && c.length > 0) { - defname = state.uuid; - has_custom_colors = true; - } - - // get the commonColors setting - state.tmp.__commonColors = NETDATA.dataAttribute(state.element, 'common-colors', defname); - } - - var name = state.tmp.__commonColors; - var ret = this.keys[name]; - - if(typeof ret === 'undefined') { - // add our commonMax - this.keys[name] = { - assigned: {}, // name-value of dimensions and their colors - available: [], // an array of colors available to be used - custom: [], // the array of colors defined by the user - charts: {}, // the charts linked to this - copy_theme: true - }; - ret = this.keys[name]; - } - - if(typeof ret.charts[state.uuid] === 'undefined') { - ret.charts[state.uuid] = state; - - if(has_custom_colors === true) - this.__read_custom_colors(state, ret); - } - - return ret; - } - }; - - // ---------------------------------------------------------------------------------------------------------------- - // Chart Registry - - // When multiple charts need the same chart, we avoid downloading it - // multiple times (and having it in browser memory multiple time) - // by using this registry. - - // Every time we download a chart definition, we save it here with .add() - // Then we try to get it back with .get(). If that fails, we download it. - - NETDATA.fixHost = function(host) { - while(host.slice(-1) === '/') - host = host.substring(0, host.length - 1); - - return host; - }; - - NETDATA.chartRegistry = { - charts: {}, - - globalReset: function() { - this.charts = {}; - }, - - add: function(host, id, data) { - if(typeof this.charts[host] === 'undefined') - this.charts[host] = {}; - - //console.log('added ' + host + '/' + id); - this.charts[host][id] = data; - }, - - get: function(host, id) { - if(typeof this.charts[host] === 'undefined') - return null; - - if(typeof this.charts[host][id] === 'undefined') - return null; - - //console.log('cached ' + host + '/' + id); - return this.charts[host][id]; - }, - - downloadAll: function(host, callback) { - host = NETDATA.fixHost(host); - - var self = this; - - function got_data(h, data, callback) { - if(data !== null) { - self.charts[h] = data.charts; - - // update the server timezone in our options - if(typeof data.timezone === 'string') - NETDATA.options.server_timezone = data.timezone; - } - else NETDATA.error(406, h + '/api/v1/charts'); - - if(typeof callback === 'function') - callback(data); - } - - if(netdataSnapshotData !== null) { - got_data(host, netdataSnapshotData.charts, callback); - } - else { - $.ajax({ - url: host + '/api/v1/charts', - async: true, - cache: false, - xhrFields: {withCredentials: true} // required for the cookie - }) - .done(function (data) { - data = NETDATA.xss.checkOptional('/api/v1/charts', data); - got_data(host, data, callback); - }) - .fail(function () { - NETDATA.error(405, host + '/api/v1/charts'); - - if (typeof callback === 'function') - callback(null); - }); - } - } - }; - - // ---------------------------------------------------------------------------------------------------------------- - // Global Pan and Zoom on charts - - // Using this structure are synchronize all the charts, so that - // when you pan or zoom one, all others are automatically refreshed - // to the same timespan. - - NETDATA.globalPanAndZoom = { - seq: 0, // timestamp ms - // every time a chart is panned or zoomed - // we set the timestamp here - // then we use it as a sequence number - // to find if other charts are synchronized - // to this time-range - - master: null, // the master chart (state), to which all others - // are synchronized - - force_before_ms: null, // the timespan to sync all other charts - force_after_ms: null, - - callback: null, - - globalReset: function() { - this.clearMaster(); - this.seq = 0; - this.master = null; - this.force_after_ms = null; - this.force_before_ms = null; - this.callback = null; - }, - - delay: function() { - if(NETDATA.options.debug.globalPanAndZoom === true) - console.log('globalPanAndZoom.delay()'); - - NETDATA.options.auto_refresher_stop_until = Date.now() + NETDATA.options.current.global_pan_sync_time; - }, - - // set a new master - setMaster: function(state, after, before) { - this.delay(); - - if(NETDATA.options.current.sync_pan_and_zoom === false) - return; - - if(this.master === null) { - if(NETDATA.options.debug.globalPanAndZoom === true) - console.log('globalPanAndZoom.setMaster(' + state.id + ', ' + after + ', ' + before + ') SET MASTER'); - } - else if(this.master !== state) { - if(NETDATA.options.debug.globalPanAndZoom === true) - console.log('globalPanAndZoom.setMaster(' + state.id + ', ' + after + ', ' + before + ') CHANGED MASTER'); - - this.master.resetChart(true, true); - } - - var now = Date.now(); - this.master = state; - this.seq = now; - this.force_after_ms = after; - this.force_before_ms = before; - - if(typeof this.callback === 'function') - this.callback(true, after, before); - }, - - // clear the master - clearMaster: function() { - if(NETDATA.options.debug.globalPanAndZoom === true) - console.log('globalPanAndZoom.clearMaster()'); - - if(this.master !== null) { - var st = this.master; - this.master = null; - st.resetChart(); - } - - this.master = null; - this.seq = 0; - this.force_after_ms = null; - this.force_before_ms = null; - NETDATA.options.auto_refresher_stop_until = 0; - - if(typeof this.callback === 'function') - this.callback(false, 0, 0); - }, - - // is the given state the master of the global - // pan and zoom sync? - isMaster: function(state) { - return (this.master === state); - }, - - // are we currently have a global pan and zoom sync? - isActive: function() { - return (this.master !== null && this.force_before_ms !== null && this.force_after_ms !== null && this.seq !== 0); - }, - - // check if a chart, other than the master - // needs to be refreshed, due to the global pan and zoom - shouldBeAutoRefreshed: function(state) { - if(this.master === null || this.seq === 0) - return false; - - //if(state.needsRecreation()) - // return true; - - return (state.tm.pan_and_zoom_seq !== this.seq); - } - }; - - // ---------------------------------------------------------------------------------------------------------------- - // global chart underlay (time-frame highlighting) - - NETDATA.globalChartUnderlay = { - callback: null, // what to call when a highlighted range is setup - after: null, // highlight after this time - before: null, // highlight before this time - view_after: null, // the charts after_ms viewport when the highlight was setup - view_before: null, // the charts before_ms viewport, when the highlight was setup - state: null, // the chart the highlight was setup - - isActive: function() { - return (this.after !== null && this.before !== null); - }, - - hasViewport: function() { - return (this.state !== null && this.view_after !== null && this.view_before !== null); - }, - - init: function(state, after, before, view_after, view_before) { - this.state = (typeof state !== 'undefined') ? state : null; - this.after = (typeof after !== 'undefined' && after !== null && after > 0) ? after : null; - this.before = (typeof before !== 'undefined' && before !== null && before > 0) ? before : null; - this.view_after = (typeof view_after !== 'undefined' && view_after !== null && view_after > 0) ? view_after : null; - this.view_before = (typeof view_before !== 'undefined' && view_before !== null && view_before > 0) ? view_before : null; - }, - - setup: function() { - if(this.isActive() === true) { - if (this.state === null) - this.state = NETDATA.options.targets[0]; - - if (typeof this.callback === 'function') - this.callback(true, this.after, this.before); - } - else { - if (typeof this.callback === 'function') - this.callback(false, 0, 0); - } - }, - - set: function(state, after, before, view_after, view_before) { - if(after > before) { - var t = after; - after = before; - before = t; - } - - this.init(state, after, before, view_after, view_before); - - if (this.hasViewport() === true) - NETDATA.globalPanAndZoom.setMaster(this.state, this.view_after, this.view_before); - - this.setup(); - }, - - clear: function() { - this.after = null; - this.before = null; - this.state = null; - this.view_after = null; - this.view_before = null; - - if(typeof this.callback === 'function') - this.callback(false, 0, 0); - }, - - focus: function() { - if(this.isActive() === true && this.hasViewport() === true) { - if(this.state === null) - this.state = NETDATA.options.targets[0]; - - if(NETDATA.globalPanAndZoom.isMaster(this.state) === true) - NETDATA.globalPanAndZoom.clearMaster(); - - NETDATA.globalPanAndZoom.setMaster(this.state, this.view_after, this.view_before, true); - } - } - }; - - // ---------------------------------------------------------------------------------------------------------------- - // dimensions selection - - // FIXME - // move color assignment to dimensions, here - - var dimensionStatus = function(parent, label, name_div, value_div, color) { - this.enabled = false; - this.parent = parent; - this.label = label; - this.name_div = null; - this.value_div = null; - this.color = NETDATA.themes.current.foreground; - this.selected = (parent.unselected_count === 0); - - this.setOptions(name_div, value_div, color); - }; - - dimensionStatus.prototype.invalidate = function() { - this.name_div = null; - this.value_div = null; - this.enabled = false; - }; - - dimensionStatus.prototype.setOptions = function(name_div, value_div, color) { - this.color = color; - - if(this.name_div !== name_div) { - this.name_div = name_div; - this.name_div.title = this.label; - this.name_div.style.setProperty('color', this.color, 'important'); - if(this.selected === false) - this.name_div.className = 'netdata-legend-name not-selected'; - else - this.name_div.className = 'netdata-legend-name selected'; - } - - if(this.value_div !== value_div) { - this.value_div = value_div; - this.value_div.title = this.label; - this.value_div.style.setProperty('color', this.color, 'important'); - if(this.selected === false) - this.value_div.className = 'netdata-legend-value not-selected'; - else - this.value_div.className = 'netdata-legend-value selected'; - } - - this.enabled = true; - this.setHandler(); - }; - - dimensionStatus.prototype.setHandler = function() { - if(this.enabled === false) return; - - var ds = this; - - // this.name_div.onmousedown = this.value_div.onmousedown = function(e) { - this.name_div.onclick = this.value_div.onclick = function(e) { - e.preventDefault(); - if(ds.isSelected()) { - // this is selected - if(e.shiftKey === true || e.ctrlKey === true) { - // control or shift key is pressed -> unselect this (except is none will remain selected, in which case select all) - ds.unselect(); - - if(ds.parent.countSelected() === 0) - ds.parent.selectAll(); - } - else { - // no key is pressed -> select only this (except if it is the only selected already, in which case select all) - if(ds.parent.countSelected() === 1) { - ds.parent.selectAll(); - } - else { - ds.parent.selectNone(); - ds.select(); - } - } - } - else { - // this is not selected - if(e.shiftKey === true || e.ctrlKey === true) { - // control or shift key is pressed -> select this too - ds.select(); - } - else { - // no key is pressed -> select only this - ds.parent.selectNone(); - ds.select(); - } - } - - ds.parent.state.redrawChart(); - } - }; - - dimensionStatus.prototype.select = function() { - if(this.enabled === false) return; - - this.name_div.className = 'netdata-legend-name selected'; - this.value_div.className = 'netdata-legend-value selected'; - this.selected = true; - }; - - dimensionStatus.prototype.unselect = function() { - if(this.enabled === false) return; - - this.name_div.className = 'netdata-legend-name not-selected'; - this.value_div.className = 'netdata-legend-value hidden'; - this.selected = false; - }; - - dimensionStatus.prototype.isSelected = function() { - return(this.enabled === true && this.selected === true); - }; - - // ---------------------------------------------------------------------------------------------------------------- - - var dimensionsVisibility = function(state) { - this.state = state; - this.len = 0; - this.dimensions = {}; - this.selected_count = 0; - this.unselected_count = 0; - }; - - dimensionsVisibility.prototype.dimensionAdd = function(label, name_div, value_div, color) { - if(typeof this.dimensions[label] === 'undefined') { - this.len++; - this.dimensions[label] = new dimensionStatus(this, label, name_div, value_div, color); - } - else - this.dimensions[label].setOptions(name_div, value_div, color); - - return this.dimensions[label]; - }; - - dimensionsVisibility.prototype.dimensionGet = function(label) { - return this.dimensions[label]; - }; - - dimensionsVisibility.prototype.invalidateAll = function() { - var keys = Object.keys(this.dimensions); - var len = keys.length; - while(len--) - this.dimensions[keys[len]].invalidate(); - }; - - dimensionsVisibility.prototype.selectAll = function() { - var keys = Object.keys(this.dimensions); - var len = keys.length; - while(len--) - this.dimensions[keys[len]].select(); - }; - - dimensionsVisibility.prototype.countSelected = function() { - var selected = 0; - var keys = Object.keys(this.dimensions); - var len = keys.length; - while(len--) - if(this.dimensions[keys[len]].isSelected()) selected++; - - return selected; - }; - - dimensionsVisibility.prototype.selectNone = function() { - var keys = Object.keys(this.dimensions); - var len = keys.length; - while(len--) - this.dimensions[keys[len]].unselect(); - }; - - dimensionsVisibility.prototype.selected2BooleanArray = function(array) { - var ret = []; - this.selected_count = 0; - this.unselected_count = 0; - - var len = array.length; - while(len--) { - var ds = this.dimensions[array[len]]; - if(typeof ds === 'undefined') { - // console.log(array[i] + ' is not found'); - ret.unshift(false); - } - else if(ds.isSelected()) { - ret.unshift(true); - this.selected_count++; - } - else { - ret.unshift(false); - this.unselected_count++; - } - } - - if(this.selected_count === 0 && this.unselected_count !== 0) { - this.selectAll(); - return this.selected2BooleanArray(array); - } - - return ret; - }; - - - // ---------------------------------------------------------------------------------------------------------------- - // date/time conversion - - NETDATA.dateTime = { - using_timezone: false, - - // these are the old netdata functions - // we fallback to these, if the new ones fail - - localeDateStringNative: function(d) { - return d.toLocaleDateString(); - }, - - localeTimeStringNative: function(d) { - return d.toLocaleTimeString(); - }, - - xAxisTimeStringNative: function(d) { - return NETDATA.zeropad(d.getHours()) + ":" - + NETDATA.zeropad(d.getMinutes()) + ":" - + NETDATA.zeropad(d.getSeconds()); - }, - - // initialize the new date/time conversion - // functions. - // if this fails, we fallback to the above - init: function(timezone) { - //console.log('init with timezone: ' + timezone); - - // detect browser timezone - try { - NETDATA.options.browser_timezone = Intl.DateTimeFormat().resolvedOptions().timeZone; - } - catch(e) { - console.log('failed to detect browser timezone: ' + e.toString()); - NETDATA.options.browser_timezone = 'cannot-detect-it'; - } - - var ret = false; - - try { - var dateOptions ={ - localeMatcher: 'best fit', - formatMatcher: 'best fit', - weekday: 'short', - year: 'numeric', - month: 'short', - day: '2-digit' - }; - - var timeOptions = { - localeMatcher: 'best fit', - hour12: false, - formatMatcher: 'best fit', - hour: '2-digit', - minute: '2-digit', - second: '2-digit' - }; - - var xAxisOptions = { - localeMatcher: 'best fit', - hour12: false, - formatMatcher: 'best fit', - hour: '2-digit', - minute: '2-digit', - second: '2-digit' - }; - - if(typeof timezone === 'string' && timezone !== '' && timezone !== 'default') { - dateOptions.timeZone = timezone; - timeOptions.timeZone = timezone; - timeOptions.timeZoneName = 'short'; - xAxisOptions.timeZone = timezone; - this.using_timezone = true; - } - else { - timezone = 'default'; - this.using_timezone = false; - } - - this.dateFormat = new Intl.DateTimeFormat(navigator.language, dateOptions); - this.timeFormat = new Intl.DateTimeFormat(navigator.language, timeOptions); - this.xAxisFormat = new Intl.DateTimeFormat(navigator.language, xAxisOptions); - - this.localeDateString = function(d) { - return this.dateFormat.format(d); - }; - - this.localeTimeString = function(d) { - return this.timeFormat.format(d); - }; - - this.xAxisTimeString = function(d) { - return this.xAxisFormat.format(d); - }; - - var d = new Date(); - var t = this.dateFormat.format(d) + ' ' + this.timeFormat.format(d) + ' ' + this.xAxisFormat.format(d); - - ret = true; - } - catch(e) { - console.log('Cannot setup Date/Time formatting: ' + e.toString()); - - timezone = 'default'; - this.localeDateString = this.localeDateStringNative; - this.localeTimeString = this.localeTimeStringNative; - this.xAxisTimeString = this.xAxisTimeStringNative; - this.using_timezone = false; - - ret = false; - } - - // save it - //console.log('init setOption timezone: ' + timezone); - NETDATA.setOption('timezone', timezone); - - return ret; - } - }; - NETDATA.dateTime.init(NETDATA.options.current.timezone); - - - // ---------------------------------------------------------------------------------------------------------------- - // units conversion - - NETDATA.unitsConversion = { - keys: {}, // keys for data-common-units - latest: {}, // latest selected units for data-common-units - - globalReset: function() { - this.keys = {}; - this.latest = {}; - }, - - scalableUnits: { - 'kilobits/s': { - 'bits/s': 1 / 1000, - 'kilobits/s': 1, - 'megabits/s': 1000, - 'gigabits/s': 1000000, - 'terabits/s': 1000000000 - }, - 'kilobytes/s': { - 'bytes/s': 1 / 1024, - 'kilobytes/s': 1, - 'megabytes/s': 1024, - 'gigabytes/s': 1024 * 1024, - 'terabytes/s': 1024 * 1024 * 1024 - }, - 'KB/s': { - 'B/s': 1 / 1024, - 'KB/s': 1, - 'MB/s': 1024, - 'GB/s': 1024 * 1024, - 'TB/s': 1024 * 1024 * 1024 - }, - 'KB': { - 'B': 1 / 1024, - 'KB': 1, - 'MB': 1024, - 'GB': 1024 * 1024, - 'TB': 1024 * 1024 * 1024 - }, - 'MB': { - 'B': 1 / (1024 * 1024), - 'KB': 1 / 1024, - 'MB': 1, - 'GB': 1024, - 'TB': 1024 * 1024, - 'PB': 1024 * 1024 * 1024 - }, - 'GB': { - 'B': 1 / (1024 * 1024 * 1024), - 'KB': 1 / (1024 * 1024), - 'MB': 1 / 1024, - 'GB': 1, - 'TB': 1024, - 'PB': 1024 * 1024, - 'EB': 1024 * 1024 * 1024 - } - /* - 'milliseconds': { - 'seconds': 1000 - }, - 'seconds': { - 'milliseconds': 0.001, - 'seconds': 1, - 'minutes': 60, - 'hours': 3600, - 'days': 86400 - } - */ - }, - - convertibleUnits: { - 'Celsius': { - 'Fahrenheit': { - check: function(max) { void(max); return NETDATA.options.current.temperature === 'fahrenheit'; }, - convert: function(value) { return value * 9 / 5 + 32; } - } - }, - 'celsius': { - 'fahrenheit': { - check: function(max) { void(max); return NETDATA.options.current.temperature === 'fahrenheit'; }, - convert: function(value) { return value * 9 / 5 + 32; } - } - }, - 'seconds': { - 'time': { - check: function (max) { void(max); return NETDATA.options.current.seconds_as_time; }, - convert: function(seconds) { return NETDATA.unitsConversion.seconds2time(seconds); } - } - }, - 'milliseconds': { - 'milliseconds': { - check: function (max) { return NETDATA.options.current.seconds_as_time && max < 1000; }, - convert: function(milliseconds) { - tms = Math.round(milliseconds * 10); - milliseconds = Math.floor(tms / 10); - - tms -= milliseconds * 10; - - return (milliseconds).toString() + '.' + tms.toString(); - } - }, - 'seconds': { - check: function (max) { return NETDATA.options.current.seconds_as_time && max >= 1000 && max < 60000; }, - convert: function(milliseconds) { - milliseconds = Math.round(milliseconds); - - var seconds = Math.floor(milliseconds / 1000); - milliseconds -= seconds * 1000; - - milliseconds = Math.round(milliseconds / 10); - - return seconds.toString() + '.' - + NETDATA.zeropad(milliseconds); - } - }, - 'M:SS.ms': { - check: function (max) { return NETDATA.options.current.seconds_as_time && max >= 60000; }, - convert: function(milliseconds) { - milliseconds = Math.round(milliseconds); - - var minutes = Math.floor(milliseconds / 60000); - milliseconds -= minutes * 60000; - - var seconds = Math.floor(milliseconds / 1000); - milliseconds -= seconds * 1000; - - milliseconds = Math.round(milliseconds / 10); - - return minutes.toString() + ':' - + NETDATA.zeropad(seconds) + '.' - + NETDATA.zeropad(milliseconds); - } - } - } - }, - - seconds2time: function(seconds) { - seconds = Math.abs(seconds); - - var days = Math.floor(seconds / 86400); - seconds -= days * 86400; - - var hours = Math.floor(seconds / 3600); - seconds -= hours * 3600; - - var minutes = Math.floor(seconds / 60); - seconds -= minutes * 60; - - seconds = Math.round(seconds); - - var ms_txt = ''; - /* - var ms = seconds - Math.floor(seconds); - seconds -= ms; - ms = Math.round(ms * 1000); - - if(ms > 1) { - if(ms < 10) - ms_txt = '.00' + ms.toString(); - else if(ms < 100) - ms_txt = '.0' + ms.toString(); - else - ms_txt = '.' + ms.toString(); - } - */ - - return ((days > 0)?days.toString() + 'd:':'').toString() - + NETDATA.zeropad(hours) + ':' - + NETDATA.zeropad(minutes) + ':' - + NETDATA.zeropad(seconds) - + ms_txt; - }, - - // get a function that converts the units - // + every time units are switched call the callback - get: function(uuid, min, max, units, desired_units, common_units_name, switch_units_callback) { - // validate the parameters - if(typeof units === 'undefined') - units = 'undefined'; - - // check if we support units conversion - if(typeof this.scalableUnits[units] === 'undefined' && typeof this.convertibleUnits[units] === 'undefined') { - // we can't convert these units - //console.log('DEBUG: ' + uuid.toString() + ' can\'t convert units: ' + units.toString()); - return function(value) { return value; }; - } - - // check if the caller wants the original units - if(typeof desired_units === 'undefined' || desired_units === null || desired_units === 'original' || desired_units === units) { - //console.log('DEBUG: ' + uuid.toString() + ' original units wanted'); - switch_units_callback(units); - return function(value) { return value; }; - } - - // now we know we can convert the units - // and the caller wants some kind of conversion - - var tunits = null; - var tdivider = 0; - var x; - - if(typeof this.scalableUnits[units] !== 'undefined') { - // units that can be scaled - // we decide a divider - - // console.log('NETDATA.unitsConversion.get(' + units.toString() + ', ' + desired_units.toString() + ', function()) decide divider with min = ' + min.toString() + ', max = ' + max.toString()); - - if (desired_units === 'auto') { - // the caller wants to auto-scale the units - - // find the absolute maximum value that is rendered on the chart - // based on this we decide the scale - min = Math.abs(min); - max = Math.abs(max); - if (min > max) max = min; - - // find the smallest scale that provides integers - for (x in this.scalableUnits[units]) { - if (this.scalableUnits[units].hasOwnProperty(x)) { - var m = this.scalableUnits[units][x]; - if (m <= max && m > tdivider) { - tunits = x; - tdivider = m; - } - } - } - - if(tunits === null || tdivider <= 0) { - // we couldn't find one - //console.log('DEBUG: ' + uuid.toString() + ' cannot find an auto-scaling candidate for units: ' + units.toString() + ' (max: ' + max.toString() + ')'); - switch_units_callback(units); - return function(value) { return value; }; - } - - if(typeof common_units_name === 'string' && typeof uuid === 'string') { - // the caller wants several charts to have the same units - // data-common-units - - var common_units_key = common_units_name + '-' + units; - - // add our divider into the list of keys - var t = this.keys[common_units_key]; - if(typeof t === 'undefined') { - this.keys[common_units_key] = {}; - t = this.keys[common_units_key]; - } - t[uuid] = { - units: tunits, - divider: tdivider - }; - - // find the max divider of all charts - var common_units = t[uuid]; - for(x in t) { - if (t.hasOwnProperty(x) && t[x].divider > common_units.divider) - common_units = t[x]; - } - - // save our common_max to the latest keys - var latest = this.latest[common_units_key]; - if(typeof latest === 'undefined') { - this.latest[common_units_key] = {}; - latest = this.latest[common_units_key]; - } - latest.units = common_units.units; - latest.divider = common_units.divider; - - tunits = latest.units; - tdivider = latest.divider; - - //console.log('DEBUG: ' + uuid.toString() + ' converted units: ' + units.toString() + ' to units: ' + tunits.toString() + ' with divider ' + tdivider.toString() + ', common-units=' + common_units_name.toString() + ((t[uuid].divider !== tdivider)?' USED COMMON, mine was ' + t[uuid].units:' set common').toString()); - - // apply it to this chart - switch_units_callback(tunits); - return function(value) { - if(tdivider !== latest.divider) { - // another chart switched our common units - // we should switch them too - //console.log('DEBUG: ' + uuid + ' switching units due to a common-units change, from ' + tunits.toString() + ' to ' + latest.units.toString()); - tunits = latest.units; - tdivider = latest.divider; - switch_units_callback(tunits); - } - - return value / tdivider; - } - } - else { - // the caller did not give data-common-units - // this chart auto-scales independently of all others - //console.log('DEBUG: ' + uuid.toString() + ' converted units: ' + units.toString() + ' to units: ' + tunits.toString() + ' with divider ' + tdivider.toString() + ', autonomously'); - - switch_units_callback(tunits); - return function (value) { return value / tdivider; }; - } - } - else { - // the caller wants specific units - - if(typeof this.scalableUnits[units][desired_units] !== 'undefined') { - // all good, set the new units - tdivider = this.scalableUnits[units][desired_units]; - // console.log('DEBUG: ' + uuid.toString() + ' converted units: ' + units.toString() + ' to units: ' + desired_units.toString() + ' with divider ' + tdivider.toString() + ', by reference'); - switch_units_callback(desired_units); - return function (value) { return value / tdivider; }; - } - else { - // oops! switch back to original units - console.log('Units conversion from ' + units.toString() + ' to ' + desired_units.toString() + ' is not supported.'); - switch_units_callback(units); - return function (value) { return value; }; - } - } - } - else if(typeof this.convertibleUnits[units] !== 'undefined') { - // units that can be converted - if(desired_units === 'auto') { - for(x in this.convertibleUnits[units]) { - if (this.convertibleUnits[units].hasOwnProperty(x)) { - if (this.convertibleUnits[units][x].check(max)) { - //console.log('DEBUG: ' + uuid.toString() + ' converting ' + units.toString() + ' to: ' + x.toString()); - switch_units_callback(x); - return this.convertibleUnits[units][x].convert; - } - } - } - - // none checked ok - //console.log('DEBUG: ' + uuid.toString() + ' no conversion available for ' + units.toString() + ' to: ' + desired_units.toString()); - switch_units_callback(units); - return function (value) { return value; }; - } - else if(typeof this.convertibleUnits[units][desired_units] !== 'undefined') { - switch_units_callback(desired_units); - return this.convertibleUnits[units][desired_units].convert; - } - else { - console.log('Units conversion from ' + units.toString() + ' to ' + desired_units.toString() + ' is not supported.'); - switch_units_callback(units); - return function (value) { return value; }; - } - } - else { - // hm... did we forget to implement the new type? - console.log('Unmatched unit conversion method for units ' + units.toString()); - switch_units_callback(units); - return function (value) { return value; }; - } - } - }; - - // ---------------------------------------------------------------------------------------------------------------- - // global selection sync - - NETDATA.globalSelectionSync = { - state: null, - dont_sync_before: 0, - last_t: 0, - slaves: [], - timeout_id: undefined, - - globalReset: function() { - this.stop(); - this.state = null; - this.dont_sync_before = 0; - this.last_t = 0; - this.slaves = []; - this.timeout_id = undefined; - }, - - active: function() { - return (this.state !== null); - }, - - // return true if global selection sync can be enabled now - enabled: function() { - // console.log('enabled()'); - // can we globally apply selection sync? - if(NETDATA.options.current.sync_selection === false) - return false; - - return (this.dont_sync_before <= Date.now()); - }, - - // set the global selection sync master - setMaster: function(state) { - if(this.enabled() === false) { - this.stop(); - return; - } - - if(this.state === state) - return; - - if(this.state !== null) - this.stop(); - - if(NETDATA.options.debug.globalSelectionSync === true) - console.log('globalSelectionSync.setMaster(' + state.id + ')'); - - state.selected = true; - this.state = state; - this.last_t = 0; - - // find all slaves - var targets = NETDATA.intersectionObserver.targets(); - this.slaves = []; - var len = targets.length; - while(len--) { - var st = targets[len]; - if (this.state !== st && st.globalSelectionSyncIsEligible() === true) - this.slaves.push(st); - } - - // this.delay(100); - }, - - // stop global selection sync - stop: function() { - if(this.state !== null) { - if(NETDATA.options.debug.globalSelectionSync === true) - console.log('globalSelectionSync.stop()'); - - var len = this.slaves.length; - while (len--) - this.slaves[len].clearSelection(); - - this.state.clearSelection(); - - this.last_t = 0; - this.slaves = []; - this.state = null; - } - }, - - // delay global selection sync for some time - delay: function(ms) { - if(NETDATA.options.current.sync_selection === true) { - if(NETDATA.options.debug.globalSelectionSync === true) - console.log('globalSelectionSync.delay()'); - - if(typeof ms === 'number') - this.dont_sync_before = Date.now() + ms; - else - this.dont_sync_before = Date.now() + NETDATA.options.current.sync_selection_delay; - } - }, - - __syncSlaves: function() { - if(NETDATA.globalSelectionSync.enabled() === true) { - if(NETDATA.options.debug.globalSelectionSync === true) - console.log('globalSelectionSync.__syncSlaves()'); - - var t = NETDATA.globalSelectionSync.last_t; - var len = NETDATA.globalSelectionSync.slaves.length; - while (len--) - NETDATA.globalSelectionSync.slaves[len].setSelection(t); - - this.timeout_id = undefined; - } - }, - - // sync all the visible charts to the given time - // this is to be called from the chart libraries - sync: function(state, t) { - if(NETDATA.options.current.sync_selection === true) { - if(NETDATA.options.debug.globalSelectionSync === true) - console.log('globalSelectionSync.sync(' + state.id + ', ' + t.toString() + ')'); - - this.setMaster(state); - - if(t === this.last_t) - return; - - this.last_t = t; - - if (state.foreign_element_selection !== null) - state.foreign_element_selection.innerText = NETDATA.dateTime.localeDateString(t) + ' ' + NETDATA.dateTime.localeTimeString(t); - - if (this.timeout_id) - NETDATA.timeout.clear(this.timeout_id); - - this.timeout_id = NETDATA.timeout.set(this.__syncSlaves, 0); - } - } - }; - - NETDATA.intersectionObserver = { - observer: null, - visible_targets: [], - - options: { - root: null, - rootMargin: "0px", - threshold: null - }, - - enabled: function() { - return this.observer !== null; - }, - - globalReset: function() { - if(this.observer !== null) { - this.visible_targets = []; - this.observer.disconnect(); - this.init(); - } - }, - - targets: function() { - if(this.enabled() === true && this.visible_targets.length > 0) - return this.visible_targets; - else - return NETDATA.options.targets; - }, - - switchChartVisibility: function() { - var old = this.__visibilityRatioOld; - - if(old !== this.__visibilityRatio) { - if (old === 0 && this.__visibilityRatio > 0) - this.unhideChart(); - else if (old > 0 && this.__visibilityRatio === 0) - this.hideChart(); - - this.__visibilityRatioOld = this.__visibilityRatio; - } - }, - - handler: function(entries, observer) { - entries.forEach(function(entry) { - var state = NETDATA.chartState(entry.target); - - var idx; - if(entry.intersectionRatio > 0) { - idx = NETDATA.intersectionObserver.visible_targets.indexOf(state); - if(idx === -1) { - if(NETDATA.scrollUp === true) - NETDATA.intersectionObserver.visible_targets.push(state); - else - NETDATA.intersectionObserver.visible_targets.unshift(state); - } - else if(state.__visibilityRatio === 0) - state.log("was not visible until now, but was already in visible_targets"); - } - else { - idx = NETDATA.intersectionObserver.visible_targets.indexOf(state); - if(idx !== -1) - NETDATA.intersectionObserver.visible_targets.splice(idx, 1); - else if(state.__visibilityRatio > 0) - state.log("was visible, but not found in visible_targets"); - } - - state.__visibilityRatio = entry.intersectionRatio; - - if(NETDATA.options.current.async_on_scroll === false) { - if(window.requestIdleCallback) - window.requestIdleCallback(function() { - NETDATA.intersectionObserver.switchChartVisibility.call(state); - }, {timeout: 100}); - else - NETDATA.intersectionObserver.switchChartVisibility.call(state); - } - }); - }, - - observe: function(state) { - if(this.enabled() === true) { - state.__visibilityRatioOld = 0; - state.__visibilityRatio = 0; - this.observer.observe(state.element); - - state.isVisible = function() { - if(NETDATA.options.current.update_only_visible === false) - return true; - - NETDATA.intersectionObserver.switchChartVisibility.call(this); - - return this.__visibilityRatio > 0; - } - } - }, - - init: function() { - if(typeof netdataIntersectionObserver === 'undefined' || netdataIntersectionObserver === true) { - try { - this.observer = new IntersectionObserver(this.handler, this.options); - } - catch (e) { - console.log("IntersectionObserver is not supported on this browser"); - this.observer = null; - } - } - //else { - // console.log("IntersectionObserver is disabled"); - //} - } - }; - NETDATA.intersectionObserver.init(); - - // ---------------------------------------------------------------------------------------------------------------- - // Our state object, where all per-chart values are stored - - var chartState = function(element) { - this.element = element; - - // IMPORTANT: - // all private functions should use 'that', instead of 'this' - var that = this; - - // ============================================================================================================ - // ERROR HANDLING - - /* error() - private - * show an error instead of the chart - */ - var error = function(msg) { - var ret = true; - - if(typeof netdataErrorCallback === 'function') { - ret = netdataErrorCallback('chart', that.id, msg); - } - - if(ret) { - that.element.innerHTML = that.id + ': ' + msg; - that.enabled = false; - that.current = that.pan; - } - }; - - // console logging - this.log = function(msg) { - console.log(this.id + ' (' + this.library_name + ' ' + this.uuid + '): ' + msg); - }; - - - // ============================================================================================================ - // EARLY INITIALIZATION - - // These are variables that should exist even if the chart is never to be rendered. - // Be careful what you add here - there may be thousands of charts on the page. - - // GUID - a unique identifier for the chart - this.uuid = NETDATA.guid(); - - // string - the name of chart - this.id = NETDATA.dataAttribute(this.element, 'netdata', undefined); - if(typeof this.id === 'undefined') { - error("netdata elements need data-netdata"); - return; - } - - // string - the key for localStorage settings - this.settings_id = NETDATA.dataAttribute(this.element, 'id', null); - - // the user given dimensions of the element - this.width = NETDATA.dataAttribute(this.element, 'width', NETDATA.chartDefaults.width); - this.height = NETDATA.dataAttribute(this.element, 'height', NETDATA.chartDefaults.height); - this.height_original = this.height; - - if(this.settings_id !== null) { - this.height = NETDATA.localStorageGet('chart_heights.' + this.settings_id, this.height, function(height) { - // this is the callback that will be called - // if and when the user resets all localStorage variables - // to their defaults - - resizeChartToHeight(height); - }); - } - - // the chart library requested by the user - this.library_name = NETDATA.dataAttribute(this.element, 'chart-library', NETDATA.chartDefaults.library); - - // check the requested library is available - // we don't initialize it here - it will be initialized when - // this chart will be first used - if(typeof NETDATA.chartLibraries[this.library_name] === 'undefined') { - NETDATA.error(402, this.library_name); - error('chart library "' + this.library_name + '" is not found'); - this.enabled = false; - } - else if(NETDATA.chartLibraries[this.library_name].enabled === false) { - NETDATA.error(403, this.library_name); - error('chart library "' + this.library_name + '" is not enabled'); - this.enabled = false; - } - else - this.library = NETDATA.chartLibraries[this.library_name]; - - this.auto = { - name: 'auto', - autorefresh: true, - force_update_at: 0, // the timestamp to force the update at - force_before_ms: null, - force_after_ms: null - }; - this.pan = { - name: 'pan', - autorefresh: false, - force_update_at: 0, // the timestamp to force the update at - force_before_ms: null, - force_after_ms: null - }; - this.zoom = { - name: 'zoom', - autorefresh: false, - force_update_at: 0, // the timestamp to force the update at - force_before_ms: null, - force_after_ms: null - }; - - // this is a pointer to one of the sub-classes below - // auto, pan, zoom - this.current = this.auto; - - this.running = false; // boolean - true when the chart is being refreshed now - this.enabled = true; // boolean - is the chart enabled for refresh? - - this.force_update_every = null; // number - overwrite the visualization update frequency of the chart - - this.tmp = {}; - - this.foreign_element_before = null; - this.foreign_element_after = null; - this.foreign_element_duration = null; - this.foreign_element_update_every = null; - this.foreign_element_selection = null; - - // ============================================================================================================ - // PRIVATE FUNCTIONS - - // reset the runtime status variables to their defaults - var runtimeInit = function() { - that.paused = false; // boolean - is the chart paused for any reason? - that.selected = false; // boolean - is the chart shown a selection? - - that.chart_created = false; // boolean - is the library.create() been called? - that.dom_created = false; // boolean - is the chart DOM been created? - that.fetching_data = false; // boolean - true while we fetch data via ajax - - that.updates_counter = 0; // numeric - the number of refreshes made so far - that.updates_since_last_unhide = 0; // numeric - the number of refreshes made since the last time the chart was unhidden - that.updates_since_last_creation = 0; // numeric - the number of refreshes made since the last time the chart was created - - that.tm = { - last_initialized: 0, // milliseconds - the timestamp it was last initialized - last_dom_created: 0, // milliseconds - the timestamp its DOM was last created - last_mode_switch: 0, // milliseconds - the timestamp it switched modes - - last_info_downloaded: 0, // milliseconds - the timestamp we downloaded the chart - last_updated: 0, // the timestamp the chart last updated with data - pan_and_zoom_seq: 0, // the sequence number of the global synchronization - // between chart. - // Used with NETDATA.globalPanAndZoom.seq - last_visible_check: 0, // the time we last checked if it is visible - last_resized: 0, // the time the chart was resized - last_hidden: 0, // the time the chart was hidden - last_unhidden: 0, // the time the chart was unhidden - last_autorefreshed: 0 // the time the chart was last refreshed - }; - - that.data = null; // the last data as downloaded from the netdata server - that.data_url = 'invalid://'; // string - the last url used to update the chart - that.data_points = 0; // number - the number of points returned from netdata - that.data_after = 0; // milliseconds - the first timestamp of the data - that.data_before = 0; // milliseconds - the last timestamp of the data - that.data_update_every = 0; // milliseconds - the frequency to update the data - - that.tmp = {}; // members that can be destroyed to save memory - }; - - // initialize all the variables that are required for the chart to be rendered - var lateInitialization = function() { - if(typeof that.host !== 'undefined') - return; - - // string - the netdata server URL, without any path - that.host = NETDATA.dataAttribute(that.element, 'host', NETDATA.serverDefault); - - // make sure the host does not end with / - // all netdata API requests use absolute paths - while(that.host.slice(-1) === '/') - that.host = that.host.substring(0, that.host.length - 1); - - // string - the grouping method requested by the user - that.method = NETDATA.dataAttribute(that.element, 'method', NETDATA.chartDefaults.method); - that.gtime = NETDATA.dataAttribute(that.element, 'gtime', 0); - - // the time-range requested by the user - that.after = NETDATA.dataAttribute(that.element, 'after', NETDATA.chartDefaults.after); - that.before = NETDATA.dataAttribute(that.element, 'before', NETDATA.chartDefaults.before); - - // the pixels per point requested by the user - that.pixels_per_point = NETDATA.dataAttribute(that.element, 'pixels-per-point', 1); - that.points = NETDATA.dataAttribute(that.element, 'points', null); - - // the forced update_every - that.force_update_every = NETDATA.dataAttribute(that.element, 'update-every', null); - if(typeof that.force_update_every !== 'number' || that.force_update_every <= 1) { - if(that.force_update_every !== null) - that.log('ignoring invalid value of property data-update-every'); - - that.force_update_every = null; - } - else - that.force_update_every *= 1000; - - // the dimensions requested by the user - that.dimensions = NETDATA.dataAttribute(that.element, 'dimensions', null); - - that.title = NETDATA.dataAttribute(that.element, 'title', null); // the title of the chart - that.units = NETDATA.dataAttribute(that.element, 'units', null); // the units of the chart dimensions - that.units_desired = NETDATA.dataAttribute(that.element, 'desired-units', NETDATA.options.current.units); // the units of the chart dimensions - that.units_current = that.units; - that.units_common = NETDATA.dataAttribute(that.element, 'common-units', null); - - that.append_options = NETDATA.dataAttribute(that.element, 'append-options', null); // additional options to pass to netdata - that.override_options = NETDATA.dataAttribute(that.element, 'override-options', null); // override options to pass to netdata - - that.debug = NETDATA.dataAttributeBoolean(that.element, 'debug', false); - - that.value_decimal_detail = -1; - var d = NETDATA.dataAttribute(that.element, 'decimal-digits', -1); - if(typeof d === 'number') - that.value_decimal_detail = d; - else if(typeof d !== 'undefined') - that.log('ignoring decimal-digits value: ' + d.toString()); - - // if we need to report the rendering speed - // find the element that needs to be updated - var refresh_dt_element_name = NETDATA.dataAttribute(that.element, 'dt-element-name', null); // string - the element to print refresh_dt_ms - - if(refresh_dt_element_name !== null) { - that.refresh_dt_element = document.getElementById(refresh_dt_element_name) || null; - } - else - that.refresh_dt_element = null; - - that.dimensions_visibility = new dimensionsVisibility(that); - - that.netdata_first = 0; // milliseconds - the first timestamp in netdata - that.netdata_last = 0; // milliseconds - the last timestamp in netdata - that.requested_after = null; // milliseconds - the timestamp of the request after param - that.requested_before = null; // milliseconds - the timestamp of the request before param - that.requested_padding = null; - that.view_after = 0; - that.view_before = 0; - - that.refresh_dt_ms = 0; // milliseconds - the time the last refresh took - - // how many retries we have made to load chart data from the server - that.retries_on_data_failures = 0; - - // color management - that.colors = null; - that.colors_assigned = null; - that.colors_available = null; - that.colors_custom = null; - - that.element_message = null; // the element already created by the user - that.element_chart = null; // the element with the chart - that.element_legend = null; // the element with the legend of the chart (if created by us) - that.element_legend_childs = { - content: null, - hidden: null, - title_date: null, - title_time: null, - title_units: null, - perfect_scroller: null, // the container to apply perfect scroller to - series: null - }; - - that.chart_url = null; // string - the url to download chart info - that.chart = null; // object - the chart as downloaded from the server - - function get_foreign_element_by_id(opt) { - var id = NETDATA.dataAttribute(that.element, opt, null); - if(id === null) { - //that.log('option "' + opt + '" is undefined'); - return null; - } - - var el = document.getElementById(id); - if(typeof el === 'undefined') { - that.log('cannot find an element with name "' + id.toString() + '"'); - return null; - } - - return el; - } - - that.foreign_element_before = get_foreign_element_by_id('show-before-at'); - that.foreign_element_after = get_foreign_element_by_id('show-after-at'); - that.foreign_element_duration = get_foreign_element_by_id('show-duration-at'); - that.foreign_element_update_every = get_foreign_element_by_id('show-update-every-at'); - that.foreign_element_selection = get_foreign_element_by_id('show-selection-at'); - }; - - var destroyDOM = function() { - if(that.enabled === false) return; - - if(that.debug === true) - that.log('destroyDOM()'); - - // that.element.className = 'netdata-message icon'; - // that.element.innerHTML = ' netdata'; - that.element.innerHTML = ''; - that.element_message = null; - that.element_legend = null; - that.element_chart = null; - that.element_legend_childs.series = null; - - that.chart_created = false; - that.dom_created = false; - - that.tm.last_resized = 0; - that.tm.last_dom_created = 0; - }; - - var createDOM = function() { - if(that.enabled === false) return; - lateInitialization(); - - destroyDOM(); - - if(that.debug === true) - that.log('createDOM()'); - - that.element_message = document.createElement('div'); - that.element_message.className = 'netdata-message icon hidden'; - that.element.appendChild(that.element_message); - - that.dom_created = true; - that.chart_created = false; - - that.tm.last_dom_created = - that.tm.last_resized = Date.now(); - - showLoading(); - }; - - var initDOM = function() { - that.element.className = that.library.container_class(that); - - if(typeof(that.width) === 'string') - that.element.style.width = that.width; - else if(typeof(that.width) === 'number') - that.element.style.width = that.width.toString() + 'px'; - - if(typeof(that.library.aspect_ratio) === 'undefined') { - if(typeof(that.height) === 'string') - that.element.style.height = that.height; - else if(typeof(that.height) === 'number') - that.element.style.height = that.height.toString() + 'px'; - } - - if(NETDATA.chartDefaults.min_width !== null) - that.element.style.min_width = NETDATA.chartDefaults.min_width; - }; - - var invisibleSearchableText = function() { - return '' + that.id + ''; - } - - /* init() private - * initialize state variables - * destroy all (possibly) created state elements - * create the basic DOM for a chart - */ - var init = function(opt) { - if(that.enabled === false) return; - - runtimeInit(); - that.element.innerHTML = invisibleSearchableText(); - - that.tm.last_initialized = Date.now(); - that.setMode('auto'); - - if(opt !== 'fast') { - if (that.isVisible(true) || opt === 'force') - createDOM(); - } - }; - - var maxMessageFontSize = function() { - var screenHeight = screen.height; - var el = that.element; - - // normally we want a font size, as tall as the element - var h = el.clientHeight; - - // but give it some air, 20% let's say, or 5 pixels min - var lost = Math.max(h * 0.2, 5); - h -= lost; - - // center the text, vertically - var paddingTop = (lost - 5) / 2; - - // but check the width too - // it should fit 10 characters in it - var w = el.clientWidth / 10; - if(h > w) { - paddingTop += (h - w) / 2; - h = w; - } - - // and don't make it too huge - // 5% of the screen size is good - if(h > screenHeight / 20) { - paddingTop += (h - (screenHeight / 20)) / 2; - h = screenHeight / 20; - } - - // set it - that.element_message.style.fontSize = h.toString() + 'px'; - that.element_message.style.paddingTop = paddingTop.toString() + 'px'; - }; - - var showMessageIcon = function(icon) { - that.element_message.innerHTML = icon; - maxMessageFontSize(); - $(that.element_message).removeClass('hidden'); - that.tmp.___messageHidden___ = undefined; - }; - - var hideMessage = function() { - if(typeof that.tmp.___messageHidden___ === 'undefined') { - that.tmp.___messageHidden___ = true; - $(that.element_message).addClass('hidden'); - } - }; - - var showRendering = function() { - var icon; - if(that.chart !== null) { - if(that.chart.chart_type === 'line') - icon = ''; - else - icon = ''; - } - else - icon = ''; - - showMessageIcon(icon + ' netdata' + invisibleSearchableText()); - }; - - var showLoading = function() { - if(that.chart_created === false) { - showMessageIcon(' netdata'); - return true; - } - return false; - }; - - var isHidden = function() { - return (typeof that.tmp.___chartIsHidden___ !== 'undefined'); - }; - - // hide the chart, when it is not visible - called from isVisible() - this.hideChart = function() { - // hide it, if it is not already hidden - if(isHidden() === true) return; - - if(this.chart_created === true) { - if(NETDATA.options.current.show_help === true) { - if(this.element_legend_childs.toolbox !== null) { - if(this.debug === true) - this.log('hideChart(): hidding legend popovers'); - - $(this.element_legend_childs.toolbox_left).popover('hide'); - $(this.element_legend_childs.toolbox_reset).popover('hide'); - $(this.element_legend_childs.toolbox_right).popover('hide'); - $(this.element_legend_childs.toolbox_zoomin).popover('hide'); - $(this.element_legend_childs.toolbox_zoomout).popover('hide'); - } - - if(this.element_legend_childs.resize_handler !== null) - $(this.element_legend_childs.resize_handler).popover('hide'); - - if(this.element_legend_childs.content !== null) - $(this.element_legend_childs.content).popover('hide'); - } - - if(NETDATA.options.current.destroy_on_hide === true) { - if(this.debug === true) - this.log('hideChart(): initializing chart'); - - // we should destroy it - init('force'); - } - else { - if(this.debug === true) - this.log('hideChart(): hiding chart'); - - showRendering(); - this.element_chart.style.display = 'none'; - this.element.style.willChange = 'auto'; - if(this.element_legend !== null) this.element_legend.style.display = 'none'; - if(this.element_legend_childs.toolbox !== null) this.element_legend_childs.toolbox.style.display = 'none'; - if(this.element_legend_childs.resize_handler !== null) this.element_legend_childs.resize_handler.style.display = 'none'; - - this.tm.last_hidden = Date.now(); - - // de-allocate data - // This works, but I not sure there are no corner cases somewhere - // so it is commented - if the user has memory issues he can - // set Destroy on Hide for all charts - // this.data = null; - } - } - - this.tmp.___chartIsHidden___ = true; - }; - - // unhide the chart, when it is visible - called from isVisible() - this.unhideChart = function() { - if(isHidden() === false) return; - - this.tmp.___chartIsHidden___ = undefined; - this.updates_since_last_unhide = 0; - - if(this.chart_created === false) { - if(this.debug === true) - this.log('unhideChart(): initializing chart'); - - // we need to re-initialize it, to show our background - // logo in bootstrap tabs, until the chart loads - init('force'); - } - else { - if(this.debug === true) - this.log('unhideChart(): unhiding chart'); - - this.element.style.willChange = 'transform'; - this.tm.last_unhidden = Date.now(); - this.element_chart.style.display = ''; - if(this.element_legend !== null) this.element_legend.style.display = ''; - if(this.element_legend_childs.toolbox !== null) this.element_legend_childs.toolbox.style.display = ''; - if(this.element_legend_childs.resize_handler !== null) this.element_legend_childs.resize_handler.style.display = ''; - resizeChart(); - hideMessage(); - } - - if(this.__redraw_on_unhide === true) { - - if(this.debug === true) - this.log("redrawing chart on unhide"); - - this.__redraw_on_unhide = undefined; - this.redrawChart(); - } - }; - - var canBeRendered = function(uncached_visibility) { - if(that.debug === true) - that.log('canBeRendered() called'); - - if(NETDATA.options.current.update_only_visible === false) - return true; - - var ret = ( - ( - NETDATA.options.page_is_visible === true || - NETDATA.options.current.stop_updates_when_focus_is_lost === false || - that.updates_since_last_unhide === 0 - ) - && isHidden() === false && that.isVisible(uncached_visibility) === true - ); - - if(that.debug === true) - that.log('canBeRendered(): ' + ret); - - return ret; - }; - - // https://github.com/petkaantonov/bluebird/wiki/Optimization-killers - var callChartLibraryUpdateSafely = function(data) { - var status; - - // we should not do this here - // if we prevent rendering the chart then: - // 1. globalSelectionSync will be wrong - // 2. globalPanAndZoom will be wrong - //if(canBeRendered(true) === false) - // return false; - - if(NETDATA.options.fake_chart_rendering === true) - return true; - - that.updates_counter++; - that.updates_since_last_unhide++; - that.updates_since_last_creation++; - - if(NETDATA.options.debug.chart_errors === true) - status = that.library.update(that, data); - else { - try { - status = that.library.update(that, data); - } - catch(err) { - status = false; - } - } - - if(status === false) { - error('chart failed to be updated as ' + that.library_name); - return false; - } - - return true; - }; - - // https://github.com/petkaantonov/bluebird/wiki/Optimization-killers - var callChartLibraryCreateSafely = function(data) { - var status; - - // we should not do this here - // if we prevent rendering the chart then: - // 1. globalSelectionSync will be wrong - // 2. globalPanAndZoom will be wrong - //if(canBeRendered(true) === false) - // return false; - - if(NETDATA.options.fake_chart_rendering === true) - return true; - - that.updates_counter++; - that.updates_since_last_unhide++; - that.updates_since_last_creation++; - - if(NETDATA.options.debug.chart_errors === true) - status = that.library.create(that, data); - else { - try { - status = that.library.create(that, data); - } - catch(err) { - status = false; - } - } - - if(status === false) { - error('chart failed to be created as ' + that.library_name); - return false; - } - - that.chart_created = true; - that.updates_since_last_creation = 0; - return true; - }; - - // ---------------------------------------------------------------------------------------------------------------- - // Chart Resize - - // resizeChart() - private - // to be called just before the chart library to make sure that - // a properly sized dom is available - var resizeChart = function() { - if(that.tm.last_resized < NETDATA.options.last_page_resize) { - if(that.chart_created === false) return; - - if(that.needsRecreation()) { - if(that.debug === true) - that.log('resizeChart(): initializing chart'); - - init('force'); - } - else if(typeof that.library.resize === 'function') { - if(that.debug === true) - that.log('resizeChart(): resizing chart'); - - that.library.resize(that); - - if(that.element_legend_childs.perfect_scroller !== null) - Ps.update(that.element_legend_childs.perfect_scroller); - - maxMessageFontSize(); - } - - that.tm.last_resized = Date.now(); - } - }; - - // this is the actual chart resize algorithm - // it will: - // - resize the entire container - // - update the internal states - // - resize the chart as the div changes height - // - update the scrollbar of the legend - var resizeChartToHeight = function(h) { - // console.log(h); - that.element.style.height = h; - - if(that.settings_id !== null) - NETDATA.localStorageSet('chart_heights.' + that.settings_id, h); - - var now = Date.now(); - NETDATA.options.last_page_scroll = now; - NETDATA.options.auto_refresher_stop_until = now + NETDATA.options.current.stop_updates_while_resizing; - - // force a resize - that.tm.last_resized = 0; - resizeChart(); - }; - - this.resizeForPrint = function() { - if(typeof this.element_legend_childs !== 'undefined' && this.element_legend_childs.perfect_scroller !== null) { - var current = this.element.clientHeight; - var optimal = current - + this.element_legend_childs.perfect_scroller.scrollHeight - - this.element_legend_childs.perfect_scroller.clientHeight; - - if(optimal > current) { - // this.log('resized'); - this.element.style.height = optimal + 'px'; - this.library.resize(this); - } - } - }; - - this.resizeHandler = function(e) { - e.preventDefault(); - - if(typeof this.event_resize === 'undefined' - || this.event_resize.chart_original_w === 'undefined' - || this.event_resize.chart_original_h === 'undefined') - this.event_resize = { - chart_original_w: this.element.clientWidth, - chart_original_h: this.element.clientHeight, - last: 0 - }; - - if(e.type === 'touchstart') { - this.event_resize.mouse_start_x = e.touches.item(0).pageX; - this.event_resize.mouse_start_y = e.touches.item(0).pageY; - } - else { - this.event_resize.mouse_start_x = e.clientX; - this.event_resize.mouse_start_y = e.clientY; - } - - this.event_resize.chart_start_w = this.element.clientWidth; - this.event_resize.chart_start_h = this.element.clientHeight; - this.event_resize.chart_last_w = this.element.clientWidth; - this.event_resize.chart_last_h = this.element.clientHeight; - - var now = Date.now(); - if(now - this.event_resize.last <= NETDATA.options.current.double_click_speed && this.element_legend_childs.perfect_scroller !== null) { - // double click / double tap event - - // console.dir(this.element_legend_childs.content); - // console.dir(this.element_legend_childs.perfect_scroller); - - // the optimal height of the chart - // showing the entire legend - var optimal = this.event_resize.chart_last_h - + this.element_legend_childs.perfect_scroller.scrollHeight - - this.element_legend_childs.perfect_scroller.clientHeight; - - // if we are not optimal, be optimal - if(this.event_resize.chart_last_h !== optimal) { - // this.log('resize to optimal, current = ' + this.event_resize.chart_last_h.toString() + 'px, original = ' + this.event_resize.chart_original_h.toString() + 'px, optimal = ' + optimal.toString() + 'px, internal = ' + this.height_original.toString()); - resizeChartToHeight(optimal.toString() + 'px'); - } - - // else if the current height is not the original/saved height - // reset to the original/saved height - else if(this.event_resize.chart_last_h !== this.event_resize.chart_original_h) { - // this.log('resize to original, current = ' + this.event_resize.chart_last_h.toString() + 'px, original = ' + this.event_resize.chart_original_h.toString() + 'px, optimal = ' + optimal.toString() + 'px, internal = ' + this.height_original.toString()); - resizeChartToHeight(this.event_resize.chart_original_h.toString() + 'px'); - } - - // else if the current height is not the internal default height - // reset to the internal default height - else if((this.event_resize.chart_last_h.toString() + 'px') !== this.height_original) { - // this.log('resize to internal default, current = ' + this.event_resize.chart_last_h.toString() + 'px, original = ' + this.event_resize.chart_original_h.toString() + 'px, optimal = ' + optimal.toString() + 'px, internal = ' + this.height_original.toString()); - resizeChartToHeight(this.height_original.toString()); - } - - // else if the current height is not the firstchild's clientheight - // resize to it - else if(typeof this.element_legend_childs.perfect_scroller.firstChild !== 'undefined') { - var parent_rect = this.element.getBoundingClientRect(); - var content_rect = this.element_legend_childs.perfect_scroller.firstElementChild.getBoundingClientRect(); - var wanted = content_rect.top - parent_rect.top + this.element_legend_childs.perfect_scroller.firstChild.clientHeight + 18; // 15 = toolbox + 3 space - - // console.log(parent_rect); - // console.log(content_rect); - // console.log(wanted); - - // this.log('resize to firstChild, current = ' + this.event_resize.chart_last_h.toString() + 'px, original = ' + this.event_resize.chart_original_h.toString() + 'px, optimal = ' + optimal.toString() + 'px, internal = ' + this.height_original.toString() + 'px, firstChild = ' + wanted.toString() + 'px' ); - if(this.event_resize.chart_last_h !== wanted) - resizeChartToHeight(wanted.toString() + 'px'); - } - } - else { - this.event_resize.last = now; - - // process movement event - document.onmousemove = - document.ontouchmove = - this.element_legend_childs.resize_handler.onmousemove = - this.element_legend_childs.resize_handler.ontouchmove = - function(e) { - var y = null; - - switch(e.type) { - case 'mousemove': y = e.clientY; break; - case 'touchmove': y = e.touches.item(e.touches - 1).pageY; break; - } - - if(y !== null) { - var newH = that.event_resize.chart_start_h + y - that.event_resize.mouse_start_y; - - if(newH >= 70 && newH !== that.event_resize.chart_last_h) { - resizeChartToHeight(newH.toString() + 'px'); - that.event_resize.chart_last_h = newH; - } - } - }; - - // process end event - document.onmouseup = - document.ontouchend = - this.element_legend_childs.resize_handler.onmouseup = - this.element_legend_childs.resize_handler.ontouchend = - function(e) { - void(e); - - // remove all the hooks - document.onmouseup = - document.onmousemove = - document.ontouchmove = - document.ontouchend = - that.element_legend_childs.resize_handler.onmousemove = - that.element_legend_childs.resize_handler.ontouchmove = - that.element_legend_childs.resize_handler.onmouseout = - that.element_legend_childs.resize_handler.onmouseup = - that.element_legend_childs.resize_handler.ontouchend = - null; - - // allow auto-refreshes - NETDATA.options.auto_refresher_stop_until = 0; - }; - } - }; - - - var noDataToShow = function() { - showMessageIcon(' empty'); - that.legendUpdateDOM(); - that.tm.last_autorefreshed = Date.now(); - // that.data_update_every = 30 * 1000; - //that.element_chart.style.display = 'none'; - //if(that.element_legend !== null) that.element_legend.style.display = 'none'; - //that.tmp.___chartIsHidden___ = true; - }; - - // ============================================================================================================ - // PUBLIC FUNCTIONS - - this.error = function(msg) { - error(msg); - }; - - this.setMode = function(m) { - if(this.current !== null && this.current.name === m) return; - - if(m === 'auto') - this.current = this.auto; - else if(m === 'pan') - this.current = this.pan; - else if(m === 'zoom') - this.current = this.zoom; - else - this.current = this.auto; - - this.current.force_update_at = 0; - this.current.force_before_ms = null; - this.current.force_after_ms = null; - - this.tm.last_mode_switch = Date.now(); - }; - - // ---------------------------------------------------------------------------------------------------------------- - // global selection sync for slaves - - // can the chart participate to the global selection sync as a slave? - this.globalSelectionSyncIsEligible = function() { - return (this.enabled === true - && this.library !== null - && typeof this.library.setSelection === 'function' - && this.isVisible() === true - && this.chart_created === true); - }; - - this.setSelection = function(t) { - if(typeof this.library.setSelection === 'function') - this.selected = (this.library.setSelection(this, t) === true); - else - this.selected = true; - - if(this.selected === true && this.debug === true) - this.log('selection set to ' + t.toString()); - - if (this.foreign_element_selection !== null) - this.foreign_element_selection.innerText = NETDATA.dateTime.localeDateString(t) + ' ' + NETDATA.dateTime.localeTimeString(t); - - return this.selected; - }; - - this.clearSelection = function() { - if(this.selected === true) { - if(typeof this.library.clearSelection === 'function') - this.selected = (this.library.clearSelection(this) !== true); - else - this.selected = false; - - if(this.selected === false && this.debug === true) - this.log('selection cleared'); - - if (this.foreign_element_selection !== null) - this.foreign_element_selection.innerText = ''; - - this.legendReset(); - } - - return this.selected; - }; - - // ---------------------------------------------------------------------------------------------------------------- - - // find if a timestamp (ms) is shown in the current chart - this.timeIsVisible = function(t) { - return (t >= this.data_after && t <= this.data_before); - }; - - this.calculateRowForTime = function(t) { - if(this.timeIsVisible(t) === false) return -1; - return Math.floor((t - this.data_after) / this.data_update_every); - }; - - // ---------------------------------------------------------------------------------------------------------------- - - this.pauseChart = function() { - if(this.paused === false) { - if(this.debug === true) - this.log('pauseChart()'); - - this.paused = true; - } - }; - - this.unpauseChart = function() { - if(this.paused === true) { - if(this.debug === true) - this.log('unpauseChart()'); - - this.paused = false; - } - }; - - this.resetChart = function(dont_clear_master, dont_update) { - if(this.debug === true) - this.log('resetChart(' + dont_clear_master + ', ' + dont_update + ') called'); - - if(typeof dont_clear_master === 'undefined') - dont_clear_master = false; - - if(typeof dont_update === 'undefined') - dont_update = false; - - if(dont_clear_master !== true && NETDATA.globalPanAndZoom.isMaster(this) === true) { - if(this.debug === true) - this.log('resetChart() diverting to clearMaster().'); - // this will call us back with master === true - NETDATA.globalPanAndZoom.clearMaster(); - return; - } - - this.clearSelection(); - - this.tm.pan_and_zoom_seq = 0; - - this.setMode('auto'); - this.current.force_update_at = 0; - this.current.force_before_ms = null; - this.current.force_after_ms = null; - this.tm.last_autorefreshed = 0; - this.paused = false; - this.selected = false; - this.enabled = true; - // this.debug = false; - - // do not update the chart here - // or the chart will flip-flop when it is the master - // of a selection sync and another chart becomes - // the new master - - if(dont_update !== true && this.isVisible() === true) { - this.updateChart(); - } - }; - - this.updateChartPanOrZoom = function(after, before, callback) { - var logme = 'updateChartPanOrZoom(' + after + ', ' + before + '): '; - var ret = true; - - NETDATA.globalPanAndZoom.delay(); - NETDATA.globalSelectionSync.delay(); - - if(this.debug === true) - this.log(logme); - - if(before < after) { - if(this.debug === true) - this.log(logme + 'flipped parameters, rejecting it.'); - - return false; - } - - if(typeof this.fixed_min_duration === 'undefined') - this.fixed_min_duration = Math.round((this.chartWidth() / 30) * this.chart.update_every * 1000); - - var min_duration = this.fixed_min_duration; - var current_duration = Math.round(this.view_before - this.view_after); - - // round the numbers - after = Math.round(after); - before = Math.round(before); - - // align them to update_every - // stretching them further away - after -= after % this.data_update_every; - before += this.data_update_every - (before % this.data_update_every); - - // the final wanted duration - var wanted_duration = before - after; - - // to allow panning, accept just a point below our minimum - if((current_duration - this.data_update_every) < min_duration) - min_duration = current_duration - this.data_update_every; - - // we do it, but we adjust to minimum size and return false - // when the wanted size is below the current and the minimum - // and we zoom - if(wanted_duration < current_duration && wanted_duration < min_duration) { - if(this.debug === true) - this.log(logme + 'too small: min_duration: ' + (min_duration / 1000).toString() + ', wanted: ' + (wanted_duration / 1000).toString()); - - min_duration = this.fixed_min_duration; - - var dt = (min_duration - wanted_duration) / 2; - before += dt; - after -= dt; - wanted_duration = before - after; - ret = false; - } - - var tolerance = this.data_update_every * 2; - var movement = Math.abs(before - this.view_before); - - if(Math.abs(current_duration - wanted_duration) <= tolerance && movement <= tolerance && ret === true) { - if(this.debug === true) - this.log(logme + 'REJECTING UPDATE: current/min duration: ' + (current_duration / 1000).toString() + '/' + (this.fixed_min_duration / 1000).toString() + ', wanted duration: ' + (wanted_duration / 1000).toString() + ', duration diff: ' + (Math.round(Math.abs(current_duration - wanted_duration) / 1000)).toString() + ', movement: ' + (movement / 1000).toString() + ', tolerance: ' + (tolerance / 1000).toString() + ', returning: ' + false); - return false; - } - - if(this.current.name === 'auto') { - this.log(logme + 'caller called me with mode: ' + this.current.name); - this.setMode('pan'); - } - - if(this.debug === true) - this.log(logme + 'ACCEPTING UPDATE: current/min duration: ' + (current_duration / 1000).toString() + '/' + (this.fixed_min_duration / 1000).toString() + ', wanted duration: ' + (wanted_duration / 1000).toString() + ', duration diff: ' + (Math.round(Math.abs(current_duration - wanted_duration) / 1000)).toString() + ', movement: ' + (movement / 1000).toString() + ', tolerance: ' + (tolerance / 1000).toString() + ', returning: ' + ret); - - this.current.force_update_at = Date.now() + NETDATA.options.current.pan_and_zoom_delay; - this.current.force_after_ms = after; - this.current.force_before_ms = before; - NETDATA.globalPanAndZoom.setMaster(this, after, before); - - if(ret === true && typeof callback === 'function') - callback(); - - return ret; - }; - - this.updateChartPanOrZoomAsyncTimeOutId = undefined; - this.updateChartPanOrZoomAsync = function(after, before, callback) { - NETDATA.globalPanAndZoom.delay(); - NETDATA.globalSelectionSync.delay(); - - if(NETDATA.globalPanAndZoom.isMaster(this) === false) { - this.pauseChart(); - NETDATA.globalPanAndZoom.setMaster(this, after, before); - // NETDATA.globalSelectionSync.stop(); - NETDATA.globalSelectionSync.setMaster(this); - } - - if(this.updateChartPanOrZoomAsyncTimeOutId) - NETDATA.timeout.clear(this.updateChartPanOrZoomAsyncTimeOutId); - - NETDATA.timeout.set(function() { - that.updateChartPanOrZoomAsyncTimeOutId = undefined; - that.updateChartPanOrZoom(after, before, callback); - }, 0); - }; - - var __unitsConversionLastUnits = undefined; - var __unitsConversionLastUnitsDesired = undefined; - var __unitsConversionLastMin = undefined; - var __unitsConversionLastMax = undefined; - var __unitsConversion = function(value) { return value; }; - this.unitsConversionSetup = function(min, max) { - if(this.units !== __unitsConversionLastUnits - || this.units_desired !== __unitsConversionLastUnitsDesired - || min !== __unitsConversionLastMin - || max !== __unitsConversionLastMax) { - - __unitsConversionLastUnits = this.units; - __unitsConversionLastUnitsDesired = this.units_desired; - __unitsConversionLastMin = min; - __unitsConversionLastMax = max; - - __unitsConversion = NETDATA.unitsConversion.get(this.uuid, min, max, this.units, this.units_desired, this.units_common, function (units) { - // console.log('switching units from ' + that.units.toString() + ' to ' + units.toString()); - that.units_current = units; - that.legendSetUnitsString(that.units_current); - }); - } - }; - - var __legendFormatValueChartDecimalsLastMin = undefined; - var __legendFormatValueChartDecimalsLastMax = undefined; - var __legendFormatValueChartDecimals = -1; - var __intlNumberFormat = null; - this.legendFormatValueDecimalsFromMinMax = function(min, max) { - if(min === __legendFormatValueChartDecimalsLastMin && max === __legendFormatValueChartDecimalsLastMax) - return; - - this.unitsConversionSetup(min, max); - if(__unitsConversion !== null) { - min = __unitsConversion(min); - max = __unitsConversion(max); - - if(typeof min !== 'number' || typeof max !== 'number') - return; - } - - __legendFormatValueChartDecimalsLastMin = min; - __legendFormatValueChartDecimalsLastMax = max; - - var old = __legendFormatValueChartDecimals; - - if(this.data !== null && this.data.min === this.data.max) - // it is a fixed number, let the visualizer decide based on the value - __legendFormatValueChartDecimals = -1; - - else if(this.value_decimal_detail !== -1) - // there is an override - __legendFormatValueChartDecimals = this.value_decimal_detail; - - else { - // ok, let's calculate the proper number of decimal points - var delta; - - if (min === max) - delta = Math.abs(min); - else - delta = Math.abs(max - min); - - if (delta > 1000) __legendFormatValueChartDecimals = 0; - else if (delta > 10) __legendFormatValueChartDecimals = 1; - else if (delta > 1) __legendFormatValueChartDecimals = 2; - else if (delta > 0.1) __legendFormatValueChartDecimals = 2; - else if (delta > 0.01) __legendFormatValueChartDecimals = 4; - else if (delta > 0.001) __legendFormatValueChartDecimals = 5; - else if (delta > 0.0001) __legendFormatValueChartDecimals = 6; - else __legendFormatValueChartDecimals = 7; - } - - if(__legendFormatValueChartDecimals !== old) { - if(__legendFormatValueChartDecimals < 0) - __intlNumberFormat = null; - else - __intlNumberFormat = NETDATA.fastNumberFormat.get( - __legendFormatValueChartDecimals, - __legendFormatValueChartDecimals - ); - } - }; - - this.legendFormatValue = function(value) { - if(typeof value !== 'number') - return '-'; - - value = __unitsConversion(value); - - if(typeof value !== 'number') - return value; - - if(__intlNumberFormat !== null) - return __intlNumberFormat.format(value); - - var dmin, dmax; - if(this.value_decimal_detail !== -1) { - dmin = dmax = this.value_decimal_detail; - } - else { - dmin = 0; - var abs = (value < 0) ? -value : value; - if (abs > 1000) dmax = 0; - else if (abs > 10) dmax = 1; - else if (abs > 1) dmax = 2; - else if (abs > 0.1) dmax = 2; - else if (abs > 0.01) dmax = 4; - else if (abs > 0.001) dmax = 5; - else if (abs > 0.0001) dmax = 6; - else dmax = 7; - } - - return NETDATA.fastNumberFormat.get(dmin, dmax).format(value); - }; - - this.legendSetLabelValue = function(label, value) { - var series = this.element_legend_childs.series[label]; - if(typeof series === 'undefined') return; - if(series.value === null && series.user === null) return; - - /* - // this slows down firefox and edge significantly - // since it requires to use innerHTML(), instead of innerText() - - // if the value has not changed, skip DOM update - //if(series.last === value) return; - - var s, r; - if(typeof value === 'number') { - var v = Math.abs(value); - s = r = this.legendFormatValue(value); - - if(typeof series.last === 'number') { - if(v > series.last) s += ''; - else if(v < series.last) s += ''; - else s += ''; - } - else s += ''; - - series.last = v; - } - else { - if(value === null) - s = r = ''; - else - s = r = value; - - series.last = value; - } - */ - - var s = this.legendFormatValue(value); - - // caching: do not update the update to show the same value again - if(s === series.last_shown_value) return; - series.last_shown_value = s; - - if(series.value !== null) series.value.innerText = s; - if(series.user !== null) series.user.innerText = s; - }; - - this.legendSetDateString = function(date) { - if(this.element_legend_childs.title_date !== null && date !== this.tmp.__last_shown_legend_date) { - this.element_legend_childs.title_date.innerText = date; - this.tmp.__last_shown_legend_date = date; - } - }; - - this.legendSetTimeString = function(time) { - if(this.element_legend_childs.title_time !== null && time !== this.tmp.__last_shown_legend_time) { - this.element_legend_childs.title_time.innerText = time; - this.tmp.__last_shown_legend_time = time; - } - }; - - this.legendSetUnitsString = function(units) { - if(this.element_legend_childs.title_units !== null && units !== this.tmp.__last_shown_legend_units) { - this.element_legend_childs.title_units.innerText = units; - this.tmp.__last_shown_legend_units = units; - } - }; - - this.legendSetDateLast = { - ms: 0, - date: undefined, - time: undefined - }; - - this.legendSetDate = function(ms) { - if(typeof ms !== 'number') { - this.legendShowUndefined(); - return; - } - - if(this.legendSetDateLast.ms !== ms) { - var d = new Date(ms); - this.legendSetDateLast.ms = ms; - this.legendSetDateLast.date = NETDATA.dateTime.localeDateString(d); - this.legendSetDateLast.time = NETDATA.dateTime.localeTimeString(d); - } - - this.legendSetDateString(this.legendSetDateLast.date); - this.legendSetTimeString(this.legendSetDateLast.time); - this.legendSetUnitsString(this.units_current) - }; - - this.legendShowUndefined = function() { - this.legendSetDateString(this.legendPluginModuleString(false)); - this.legendSetTimeString(this.chart.context.toString()); - // this.legendSetUnitsString(' '); - - if(this.data && this.element_legend_childs.series !== null) { - var labels = this.data.dimension_names; - var i = labels.length; - while(i--) { - var label = labels[i]; - - if(typeof label === 'undefined' || typeof this.element_legend_childs.series[label] === 'undefined') continue; - this.legendSetLabelValue(label, null); - } - } - }; - - this.legendShowLatestValues = function() { - if(this.chart === null) return; - if(this.selected) return; - - if(this.data === null || this.element_legend_childs.series === null) { - this.legendShowUndefined(); - return; - } - - var show_undefined = true; - if(Math.abs(this.netdata_last - this.view_before) <= this.data_update_every) - show_undefined = false; - - if(show_undefined) { - this.legendShowUndefined(); - return; - } - - this.legendSetDate(this.view_before); - - var labels = this.data.dimension_names; - var i = labels.length; - while(i--) { - var label = labels[i]; - - if(typeof label === 'undefined') continue; - if(typeof this.element_legend_childs.series[label] === 'undefined') continue; - - if(show_undefined) - this.legendSetLabelValue(label, null); - else - this.legendSetLabelValue(label, this.data.view_latest_values[i]); - } - }; - - this.legendReset = function() { - this.legendShowLatestValues(); - }; - - // this should be called just ONCE per dimension per chart - this.__chartDimensionColor = function(label) { - var c = NETDATA.commonColors.get(this, label); - - // it is important to maintain a list of colors - // for this chart only, since the chart library - // uses this to assign colors to dimensions in the same - // order the dimension are given to it - this.colors.push(c); - - return c; - }; - - this.chartPrepareColorPalette = function() { - NETDATA.commonColors.refill(this); - }; - - // get the ordered list of chart colors - // this includes user defined colors - this.chartCustomColors = function() { - this.chartPrepareColorPalette(); - - var colors; - if(this.colors_custom.length) - colors = this.colors_custom; - else - colors = this.colors; - - if(this.debug === true) { - this.log("chartCustomColors() returns:"); - this.log(colors); - } - - return colors; - }; - - // get the ordered list of chart ASSIGNED colors - // (this returns only the colors that have been - // assigned to dimensions, prepended with any - // custom colors defined) - this.chartColors = function() { - this.chartPrepareColorPalette(); - - if(this.debug === true) { - this.log("chartColors() returns:"); - this.log(this.colors); - } - - return this.colors; - }; - - this.legendPluginModuleString = function(withContext) { - var str = ' '; - var context = ''; - - if(typeof this.chart !== 'undefined') { - if(withContext && typeof this.chart.context === 'string') - context = this.chart.context; - - if (typeof this.chart.plugin === 'string' && this.chart.plugin !== '') { - str = this.chart.plugin; - if (typeof this.chart.module === 'string' && this.chart.module !== '') { - str += '/' + this.chart.module; - } - - if (withContext && context !== '') - str += ', ' + context; - } - else if (withContext && context !== '') - str = context; - } - - return str; - }; - - this.legendResolutionTooltip = function () { - if(!this.chart) return ''; - - var collected = this.chart.update_every; - var viewed = (this.data)?this.data.view_update_every:collected; - - if(collected === viewed) - return "resolution " + NETDATA.seconds4human(collected); - - return "resolution " + NETDATA.seconds4human(viewed) + ", collected every " + NETDATA.seconds4human(collected); - }; - - this.legendUpdateDOM = function() { - var needed = false, dim, keys, len, i; - - // check that the legend DOM is up to date for the downloaded dimensions - if(typeof this.element_legend_childs.series !== 'object' || this.element_legend_childs.series === null) { - // this.log('the legend does not have any series - requesting legend update'); - needed = true; - } - else if(this.data === null) { - // this.log('the chart does not have any data - requesting legend update'); - needed = true; - } - else if(typeof this.element_legend_childs.series.labels_key === 'undefined') { - needed = true; - } - else { - var labels = this.data.dimension_names.toString(); - if(labels !== this.element_legend_childs.series.labels_key) { - needed = true; - - if(this.debug === true) - this.log('NEW LABELS: "' + labels + '" NOT EQUAL OLD LABELS: "' + this.element_legend_childs.series.labels_key + '"'); - } - } - - if(needed === false) { - // make sure colors available - this.chartPrepareColorPalette(); - - // do we have to update the current values? - // we do this, only when the visible chart is current - if(Math.abs(this.netdata_last - this.view_before) <= this.data_update_every) { - if(this.debug === true) - this.log('chart is in latest position... updating values on legend...'); - - //var labels = this.data.dimension_names; - //var i = labels.length; - //while(i--) - // this.legendSetLabelValue(labels[i], this.data.view_latest_values[i]); - } - return; - } - - if(this.colors === null) { - // this is the first time we update the chart - // let's assign colors to all dimensions - if(this.library.track_colors() === true) { - this.colors = []; - keys = Object.keys(this.chart.dimensions); - len = keys.length; - for(i = 0; i < len ;i++) - NETDATA.commonColors.get(this, this.chart.dimensions[keys[i]].name); - } - } - - // we will re-generate the colors for the chart - // based on the dimensions this result has data for - this.colors = []; - - if(this.debug === true) - this.log('updating Legend DOM'); - - // mark all dimensions as invalid - this.dimensions_visibility.invalidateAll(); - - var genLabel = function(state, parent, dim, name, count) { - var color = state.__chartDimensionColor(name); - - var user_element = null; - var user_id = NETDATA.dataAttribute(state.element, 'show-value-of-' + name.toLowerCase() + '-at', null); - if(user_id === null) - user_id = NETDATA.dataAttribute(state.element, 'show-value-of-' + dim.toLowerCase() + '-at', null); - if(user_id !== null) { - user_element = document.getElementById(user_id) || null; - if (user_element === null) - state.log('Cannot find element with id: ' + user_id); - } - - state.element_legend_childs.series[name] = { - name: document.createElement('span'), - value: document.createElement('span'), - user: user_element, - last: null, - last_shown_value: null - }; - - var label = state.element_legend_childs.series[name]; - - // create the dimension visibility tracking for this label - state.dimensions_visibility.dimensionAdd(name, label.name, label.value, color); - - var rgb = NETDATA.colorHex2Rgb(color); - label.name.innerHTML = '
'; - - var text = document.createTextNode(' ' + name); - label.name.appendChild(text); - - if(count > 0) - parent.appendChild(document.createElement('br')); - - parent.appendChild(label.name); - parent.appendChild(label.value); - }; - - var content = document.createElement('div'); - - if(this.element_chart === null) { - this.element_chart = document.createElement('div'); - this.element_chart.id = this.library_name + '-' + this.uuid + '-chart'; - this.element.appendChild(this.element_chart); - - if(this.hasLegend() === true) - this.element_chart.className = 'netdata-chart-with-legend-right netdata-' + this.library_name + '-chart-with-legend-right'; - else - this.element_chart.className = ' netdata-chart netdata-' + this.library_name + '-chart'; - } - - if(this.hasLegend() === true) { - if(this.element_legend === null) { - this.element_legend = document.createElement('div'); - this.element_legend.className = 'netdata-chart-legend netdata-' + this.library_name + '-legend'; - this.element.appendChild(this.element_legend); - } - else - this.element_legend.innerHTML = ''; - - this.element_legend_childs = { - content: content, - resize_handler: null, - toolbox: null, - toolbox_left: null, - toolbox_right: null, - toolbox_reset: null, - toolbox_zoomin: null, - toolbox_zoomout: null, - toolbox_volume: null, - title_date: document.createElement('span'), - title_time: document.createElement('span'), - title_units: document.createElement('span'), - perfect_scroller: document.createElement('div'), - series: {} - }; - - if(NETDATA.options.current.legend_toolbox === true && this.library.toolboxPanAndZoom !== null) { - this.element_legend_childs.toolbox = document.createElement('div'); - this.element_legend_childs.toolbox_left = document.createElement('div'); - this.element_legend_childs.toolbox_right = document.createElement('div'); - this.element_legend_childs.toolbox_reset = document.createElement('div'); - this.element_legend_childs.toolbox_zoomin = document.createElement('div'); - this.element_legend_childs.toolbox_zoomout = document.createElement('div'); - this.element_legend_childs.toolbox_volume = document.createElement('div'); - - var get_pan_and_zoom_step = function(event) { - if (event.ctrlKey) - return NETDATA.options.current.pan_and_zoom_factor * NETDATA.options.current.pan_and_zoom_factor_multiplier_control; - - else if (event.shiftKey) - return NETDATA.options.current.pan_and_zoom_factor * NETDATA.options.current.pan_and_zoom_factor_multiplier_shift; - - else if (event.altKey) - return NETDATA.options.current.pan_and_zoom_factor * NETDATA.options.current.pan_and_zoom_factor_multiplier_alt; - - else - return NETDATA.options.current.pan_and_zoom_factor; - }; - - this.element_legend_childs.toolbox.className += ' netdata-legend-toolbox'; - this.element.appendChild(this.element_legend_childs.toolbox); - - this.element_legend_childs.toolbox_left.className += ' netdata-legend-toolbox-button'; - this.element_legend_childs.toolbox_left.innerHTML = ''; - this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_left); - this.element_legend_childs.toolbox_left.onclick = function(e) { - e.preventDefault(); - - var step = (that.view_before - that.view_after) * get_pan_and_zoom_step(e); - var before = that.view_before - step; - var after = that.view_after - step; - if(after >= that.netdata_first) - that.library.toolboxPanAndZoom(that, after, before); - }; - if(NETDATA.options.current.show_help === true) - $(this.element_legend_childs.toolbox_left).popover({ - container: "body", - animation: false, - html: true, - trigger: 'hover', - placement: 'bottom', - delay: { show: NETDATA.options.current.show_help_delay_show_ms, hide: NETDATA.options.current.show_help_delay_hide_ms }, - title: 'Pan Left', - content: 'Pan the chart to the left. You can also drag it with your mouse or your finger (on touch devices).
Help can be disabled from the settings.' - }); - - - this.element_legend_childs.toolbox_reset.className += ' netdata-legend-toolbox-button'; - this.element_legend_childs.toolbox_reset.innerHTML = ''; - this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_reset); - this.element_legend_childs.toolbox_reset.onclick = function(e) { - e.preventDefault(); - NETDATA.resetAllCharts(that); - }; - if(NETDATA.options.current.show_help === true) - $(this.element_legend_childs.toolbox_reset).popover({ - container: "body", - animation: false, - html: true, - trigger: 'hover', - placement: 'bottom', - delay: { show: NETDATA.options.current.show_help_delay_show_ms, hide: NETDATA.options.current.show_help_delay_hide_ms }, - title: 'Chart Reset', - content: 'Reset all the charts to their default auto-refreshing state. You can also double click the chart contents with your mouse or your finger (on touch devices).
Help can be disabled from the settings.' - }); - - this.element_legend_childs.toolbox_right.className += ' netdata-legend-toolbox-button'; - this.element_legend_childs.toolbox_right.innerHTML = ''; - this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_right); - this.element_legend_childs.toolbox_right.onclick = function(e) { - e.preventDefault(); - var step = (that.view_before - that.view_after) * get_pan_and_zoom_step(e); - var before = that.view_before + step; - var after = that.view_after + step; - if(before <= that.netdata_last) - that.library.toolboxPanAndZoom(that, after, before); - }; - if(NETDATA.options.current.show_help === true) - $(this.element_legend_childs.toolbox_right).popover({ - container: "body", - animation: false, - html: true, - trigger: 'hover', - placement: 'bottom', - delay: { show: NETDATA.options.current.show_help_delay_show_ms, hide: NETDATA.options.current.show_help_delay_hide_ms }, - title: 'Pan Right', - content: 'Pan the chart to the right. You can also drag it with your mouse or your finger (on touch devices).
Help, can be disabled from the settings.' - }); - - - this.element_legend_childs.toolbox_zoomin.className += ' netdata-legend-toolbox-button'; - this.element_legend_childs.toolbox_zoomin.innerHTML = ''; - this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_zoomin); - this.element_legend_childs.toolbox_zoomin.onclick = function(e) { - e.preventDefault(); - var dt = ((that.view_before - that.view_after) * (get_pan_and_zoom_step(e) * 0.8) / 2); - var before = that.view_before - dt; - var after = that.view_after + dt; - that.library.toolboxPanAndZoom(that, after, before); - }; - if(NETDATA.options.current.show_help === true) - $(this.element_legend_childs.toolbox_zoomin).popover({ - container: "body", - animation: false, - html: true, - trigger: 'hover', - placement: 'bottom', - delay: { show: NETDATA.options.current.show_help_delay_show_ms, hide: NETDATA.options.current.show_help_delay_hide_ms }, - title: 'Chart Zoom In', - content: 'Zoom in the chart. You can also press SHIFT and select an area of the chart, or press SHIFT or ALT and use the mouse wheel or 2-finger touchpad scroll to zoom in or out.
Help, can be disabled from the settings.' - }); - - this.element_legend_childs.toolbox_zoomout.className += ' netdata-legend-toolbox-button'; - this.element_legend_childs.toolbox_zoomout.innerHTML = ''; - this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_zoomout); - this.element_legend_childs.toolbox_zoomout.onclick = function(e) { - e.preventDefault(); - var dt = (((that.view_before - that.view_after) / (1.0 - (get_pan_and_zoom_step(e) * 0.8)) - (that.view_before - that.view_after)) / 2); - var before = that.view_before + dt; - var after = that.view_after - dt; - - that.library.toolboxPanAndZoom(that, after, before); - }; - if(NETDATA.options.current.show_help === true) - $(this.element_legend_childs.toolbox_zoomout).popover({ - container: "body", - animation: false, - html: true, - trigger: 'hover', - placement: 'bottom', - delay: { show: NETDATA.options.current.show_help_delay_show_ms, hide: NETDATA.options.current.show_help_delay_hide_ms }, - title: 'Chart Zoom Out', - content: 'Zoom out the chart. You can also press SHIFT or ALT and use the mouse wheel, or 2-finger touchpad scroll to zoom in or out.
Help, can be disabled from the settings.' - }); - - //this.element_legend_childs.toolbox_volume.className += ' netdata-legend-toolbox-button'; - //this.element_legend_childs.toolbox_volume.innerHTML = ''; - //this.element_legend_childs.toolbox_volume.title = 'Visible Volume'; - //this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_volume); - //this.element_legend_childs.toolbox_volume.onclick = function(e) { - //e.preventDefault(); - //alert('clicked toolbox_volume on ' + that.id); - //} - } - - if(NETDATA.options.current.resize_charts === true) { - this.element_legend_childs.resize_handler = document.createElement('div'); - - this.element_legend_childs.resize_handler.className += " netdata-legend-resize-handler"; - this.element_legend_childs.resize_handler.innerHTML = ''; - this.element.appendChild(this.element_legend_childs.resize_handler); - if (NETDATA.options.current.show_help === true) - $(this.element_legend_childs.resize_handler).popover({ - container: "body", - animation: false, - html: true, - trigger: 'hover', - placement: 'bottom', - delay: { - show: NETDATA.options.current.show_help_delay_show_ms, - hide: NETDATA.options.current.show_help_delay_hide_ms - }, - title: 'Chart Resize', - content: 'Drag this point with your mouse or your finger (on touch devices), to resize the chart vertically. You can also double click it or double tap it to reset between 2 states: the default and the one that fits all the values.
Help, can be disabled from the settings.' - }); - - // mousedown event - this.element_legend_childs.resize_handler.onmousedown = - function (e) { - that.resizeHandler(e); - }; - - // touchstart event - this.element_legend_childs.resize_handler.addEventListener('touchstart', function (e) { - that.resizeHandler(e); - }, false); - } - - if(this.chart) { - this.element_legend_childs.title_date.title = this.legendPluginModuleString(true); - this.element_legend_childs.title_time.title = this.legendResolutionTooltip(); - } - - this.element_legend_childs.title_date.className += " netdata-legend-title-date"; - this.element_legend.appendChild(this.element_legend_childs.title_date); - this.tmp.__last_shown_legend_date = undefined; - - this.element_legend.appendChild(document.createElement('br')); - - this.element_legend_childs.title_time.className += " netdata-legend-title-time"; - this.element_legend.appendChild(this.element_legend_childs.title_time); - this.tmp.__last_shown_legend_time = undefined; - - this.element_legend.appendChild(document.createElement('br')); - - this.element_legend_childs.title_units.className += " netdata-legend-title-units"; - this.element_legend_childs.title_units.innerText = this.units_current; - this.element_legend.appendChild(this.element_legend_childs.title_units); - this.tmp.__last_shown_legend_units = undefined; - - this.element_legend.appendChild(document.createElement('br')); - - this.element_legend_childs.perfect_scroller.className = 'netdata-legend-series'; - this.element_legend.appendChild(this.element_legend_childs.perfect_scroller); - - content.className = 'netdata-legend-series-content'; - this.element_legend_childs.perfect_scroller.appendChild(content); - - this.element_legend_childs.content = content; - - if(NETDATA.options.current.show_help === true) - $(content).popover({ - container: "body", - animation: false, - html: true, - trigger: 'hover', - placement: 'bottom', - title: 'Chart Legend', - delay: { show: NETDATA.options.current.show_help_delay_show_ms, hide: NETDATA.options.current.show_help_delay_hide_ms }, - content: 'You can click or tap on the values or the labels to select dimensions. By pressing SHIFT or CONTROL, you can enable or disable multiple dimensions.
Help, can be disabled from the settings.' - }); - } - else { - this.element_legend_childs = { - content: content, - resize_handler: null, - toolbox: null, - toolbox_left: null, - toolbox_right: null, - toolbox_reset: null, - toolbox_zoomin: null, - toolbox_zoomout: null, - toolbox_volume: null, - title_date: null, - title_time: null, - title_units: null, - perfect_scroller: null, - series: {} - }; - } - - if(this.data) { - this.element_legend_childs.series.labels_key = this.data.dimension_names.toString(); - if(this.debug === true) - this.log('labels from data: "' + this.element_legend_childs.series.labels_key + '"'); - - for(i = 0, len = this.data.dimension_names.length; i < len ;i++) { - genLabel(this, content, this.data.dimension_ids[i], this.data.dimension_names[i], i); - } - } - else { - var tmp = []; - keys = Object.keys(this.chart.dimensions); - for(i = 0, len = keys.length; i < len ;i++) { - dim = keys[i]; - tmp.push(this.chart.dimensions[dim].name); - genLabel(this, content, dim, this.chart.dimensions[dim].name, i); - } - this.element_legend_childs.series.labels_key = tmp.toString(); - if(this.debug === true) - this.log('labels from chart: "' + this.element_legend_childs.series.labels_key + '"'); - } - - // create a hidden div to be used for hidding - // the original legend of the chart library - var el = document.createElement('div'); - if(this.element_legend !== null) - this.element_legend.appendChild(el); - el.style.display = 'none'; - - this.element_legend_childs.hidden = document.createElement('div'); - el.appendChild(this.element_legend_childs.hidden); - - if(this.element_legend_childs.perfect_scroller !== null) { - Ps.initialize(this.element_legend_childs.perfect_scroller, { - wheelSpeed: 0.2, - wheelPropagation: true, - swipePropagation: true, - minScrollbarLength: null, - maxScrollbarLength: null, - useBothWheelAxes: false, - suppressScrollX: true, - suppressScrollY: false, - scrollXMarginOffset: 0, - scrollYMarginOffset: 0, - theme: 'default' - }); - Ps.update(this.element_legend_childs.perfect_scroller); - } - - this.legendShowLatestValues(); - }; - - this.hasLegend = function() { - if(typeof this.tmp.___hasLegendCache___ !== 'undefined') - return this.tmp.___hasLegendCache___; - - var leg = false; - if(this.library && this.library.legend(this) === 'right-side') - leg = true; - - this.tmp.___hasLegendCache___ = leg; - return leg; - }; - - this.legendWidth = function() { - return (this.hasLegend())?140:0; - }; - - this.legendHeight = function() { - return $(this.element).height(); - }; - - this.chartWidth = function() { - return $(this.element).width() - this.legendWidth(); - }; - - this.chartHeight = function() { - return $(this.element).height(); - }; - - this.chartPixelsPerPoint = function() { - // force an options provided detail - var px = this.pixels_per_point; - - if(this.library && px < this.library.pixels_per_point(this)) - px = this.library.pixels_per_point(this); - - if(px < NETDATA.options.current.pixels_per_point) - px = NETDATA.options.current.pixels_per_point; - - return px; - }; - - this.needsRecreation = function() { - var ret = ( - this.chart_created === true - && this.library - && this.library.autoresize() === false - && this.tm.last_resized < NETDATA.options.last_page_resize - ); - - if(this.debug === true) - this.log('needsRecreation(): ' + ret.toString() + ', chart_created = ' + this.chart_created.toString()); - - return ret; - }; - - this.chartDataUniqueID = function() { - return this.id + ',' + this.library_name + ',' + this.dimensions + ',' + this.chartURLOptions(); - }; - - this.chartURLOptions = function() { - var ret = ''; - - if(this.override_options !== null) - ret = this.override_options.toString(); - else - ret = this.library.options(this); - - if(this.append_options !== null) - ret += '|' + this.append_options.toString(); - - ret += '|jsonwrap'; - - if(NETDATA.options.current.eliminate_zero_dimensions === true) - ret += '|nonzero'; - - return ret; - }; - - this.chartURL = function() { - var after, before, points_multiplier = 1; - if(NETDATA.globalPanAndZoom.isActive()) { - if(this.current.force_before_ms !== null && this.current.force_after_ms !== null) { - this.tm.pan_and_zoom_seq = 0; - - before = Math.round(this.current.force_before_ms / 1000); - after = Math.round(this.current.force_after_ms / 1000); - this.view_after = after * 1000; - this.view_before = before * 1000; - - if(NETDATA.options.current.pan_and_zoom_data_padding === true) { - this.requested_padding = Math.round((before - after) / 2); - after -= this.requested_padding; - before += this.requested_padding; - this.requested_padding *= 1000; - points_multiplier = 2; - } - - this.current.force_before_ms = null; - this.current.force_after_ms = null; - } - else { - this.tm.pan_and_zoom_seq = NETDATA.globalPanAndZoom.seq; - - after = Math.round(NETDATA.globalPanAndZoom.force_after_ms / 1000); - before = Math.round(NETDATA.globalPanAndZoom.force_before_ms / 1000); - this.view_after = after * 1000; - this.view_before = before * 1000; - - this.requested_padding = null; - points_multiplier = 1; - } - } - else { - this.tm.pan_and_zoom_seq = 0; - - before = this.before; - after = this.after; - this.view_after = after * 1000; - this.view_before = before * 1000; - - this.requested_padding = null; - points_multiplier = 1; - } - - this.requested_after = after * 1000; - this.requested_before = before * 1000; - - var data_points; - if(NETDATA.options.force_data_points !== 0) { - data_points = NETDATA.options.force_data_points; - this.data_points = data_points; - } - else { - this.data_points = this.points || Math.round(this.chartWidth() / this.chartPixelsPerPoint()); - data_points = this.data_points * points_multiplier; - } - - // build the data URL - this.data_url = this.host + this.chart.data_url; - this.data_url += "&format=" + this.library.format(); - this.data_url += "&points=" + (data_points).toString(); - this.data_url += "&group=" + this.method; - this.data_url += ">ime=" + this.gtime; - this.data_url += "&options=" + this.chartURLOptions(); - - if(after) - this.data_url += "&after=" + after.toString(); - - if(before) - this.data_url += "&before=" + before.toString(); - - if(this.dimensions) - this.data_url += "&dimensions=" + this.dimensions; - - if(NETDATA.options.debug.chart_data_url === true || this.debug === true) - this.log('chartURL(): ' + this.data_url + ' WxH:' + this.chartWidth() + 'x' + this.chartHeight() + ' points: ' + data_points.toString() + ' library: ' + this.library_name); - }; - - this.redrawChart = function() { - if(this.data !== null) - this.updateChartWithData(this.data); - }; - - this.updateChartWithData = function(data) { - if(this.debug === true) - this.log('updateChartWithData() called.'); - - // this may force the chart to be re-created - resizeChart(); - - this.data = data; - - var started = Date.now(); - var view_update_every = data.view_update_every * 1000; - - - if(this.data_update_every !== view_update_every) { - if(this.element_legend_childs.title_time) - this.element_legend_childs.title_time.title = this.legendResolutionTooltip(); - } - - // if the result is JSON, find the latest update-every - this.data_update_every = view_update_every; - this.data_after = data.after * 1000; - this.data_before = data.before * 1000; - this.netdata_first = data.first_entry * 1000; - this.netdata_last = data.last_entry * 1000; - this.data_points = data.points; - - data.state = this; - - if(NETDATA.options.current.pan_and_zoom_data_padding === true && this.requested_padding !== null) { - if(this.view_after < this.data_after) { - // console.log('adjusting view_after from ' + this.view_after + ' to ' + this.data_after); - this.view_after = this.data_after; - } - - if(this.view_before > this.data_before) { - // console.log('adjusting view_before from ' + this.view_before + ' to ' + this.data_before); - this.view_before = this.data_before; - } - } - else { - this.view_after = this.data_after; - this.view_before = this.data_before; - } - - if(this.debug === true) { - this.log('UPDATE No ' + this.updates_counter + ' COMPLETED'); - - if(this.current.force_after_ms) - this.log('STATUS: forced : ' + (this.current.force_after_ms / 1000).toString() + ' - ' + (this.current.force_before_ms / 1000).toString()); - else - this.log('STATUS: forced : unset'); - - this.log('STATUS: requested : ' + (this.requested_after / 1000).toString() + ' - ' + (this.requested_before / 1000).toString()); - this.log('STATUS: downloaded: ' + (this.data_after / 1000).toString() + ' - ' + (this.data_before / 1000).toString()); - this.log('STATUS: rendered : ' + (this.view_after / 1000).toString() + ' - ' + (this.view_before / 1000).toString()); - this.log('STATUS: points : ' + (this.data_points).toString()); - } - - if(this.data_points === 0) { - noDataToShow(); - return; - } - - if(this.updates_since_last_creation >= this.library.max_updates_to_recreate()) { - if(this.debug === true) - this.log('max updates of ' + this.updates_since_last_creation.toString() + ' reached. Forcing re-generation.'); - - init('force'); - return; - } - - // check and update the legend - this.legendUpdateDOM(); - - if(this.chart_created === true - && typeof this.library.update === 'function') { - - if(this.debug === true) - this.log('updating chart...'); - - if(callChartLibraryUpdateSafely(data) === false) - return; - } - else { - if(this.debug === true) - this.log('creating chart...'); - - if(callChartLibraryCreateSafely(data) === false) - return; - } - if(this.isVisible() === true) { - hideMessage(); - this.legendShowLatestValues(); - } - else { - this.__redraw_on_unhide = true; - - if(this.debug === true) - this.log("drawn while not visible") - } - - if(this.selected === true) - NETDATA.globalSelectionSync.stop(); - - // update the performance counters - var now = Date.now(); - this.tm.last_updated = now; - - // don't update last_autorefreshed if this chart is - // forced to be updated with global PanAndZoom - if(NETDATA.globalPanAndZoom.isActive()) - this.tm.last_autorefreshed = 0; - else { - if(NETDATA.options.current.parallel_refresher === true && NETDATA.options.current.concurrent_refreshes === true && typeof this.force_update_every !== 'number') - this.tm.last_autorefreshed = now - (now % this.data_update_every); - else - this.tm.last_autorefreshed = now; - } - - this.refresh_dt_ms = now - started; - NETDATA.options.auto_refresher_fast_weight += this.refresh_dt_ms; - - if(this.refresh_dt_element !== null) - this.refresh_dt_element.innerText = this.refresh_dt_ms.toString(); - - if(this.foreign_element_before !== null) - this.foreign_element_before.innerText = NETDATA.dateTime.localeDateString(this.view_before) + ' ' + NETDATA.dateTime.localeTimeString(this.view_before); - - if(this.foreign_element_after !== null) - this.foreign_element_after.innerText = NETDATA.dateTime.localeDateString(this.view_after) + ' ' + NETDATA.dateTime.localeTimeString(this.view_after); - - if(this.foreign_element_duration !== null) - this.foreign_element_duration.innerText = NETDATA.seconds4human(Math.floor((this.view_before - this.view_after) / 1000) + 1); - - if(this.foreign_element_update_every !== null) - this.foreign_element_update_every.innerText = NETDATA.seconds4human(Math.floor(this.data_update_every / 1000)); - }; - - this.getSnapshotData = function(key) { - if(this.debug === true) - this.log('updating from snapshot: ' + key); - - if(typeof netdataSnapshotData.data[key] === 'undefined') { - this.log('snapshot does not include data for key "' + key + '"'); - return null; - } - - if(typeof netdataSnapshotData.data[key] !== 'string') { - this.log('snapshot data for key "' + key + '" is not string'); - return null; - } - - var uncompressed; - try { - uncompressed = netdataSnapshotData.uncompress(netdataSnapshotData.data[key]); - - if(uncompressed === null) { - this.log('uncompressed snapshot data for key ' + key + ' is null'); - return null; - } - - if(typeof uncompressed === 'undefined') { - this.log('uncompressed snapshot data for key ' + key + ' is undefined'); - return null; - } - } - catch(e) { - this.log('decompression of snapshot data for key ' + key + ' failed'); - console.log(e); - uncompressed = null; - } - - if(typeof uncompressed !== 'string') { - this.log('uncompressed snapshot data for key ' + key + ' is not string'); - return null; - } - - var data; - try { - data = JSON.parse(uncompressed); - } - catch(e) { - this.log('parsing snapshot data for key ' + key + ' failed'); - console.log(e); - data = null; - } - - return data; - }; - - this.updateChart = function(callback) { - if (this.debug === true) - this.log('updateChart()'); - - if (this.fetching_data === true) { - if (this.debug === true) - this.log('updateChart(): I am already updating...'); - - if (typeof callback === 'function') - return callback(false, 'already running'); - - return; - } - - // due to late initialization of charts and libraries - // we need to check this too - if (this.enabled === false) { - if (this.debug === true) - this.log('updateChart(): I am not enabled'); - - if (typeof callback === 'function') - return callback(false, 'not enabled'); - - return; - } - - if (canBeRendered() === false) { - if (this.debug === true) - this.log('updateChart(): cannot be rendered'); - - if (typeof callback === 'function') - return callback(false, 'cannot be rendered'); - - return; - } - - if (that.dom_created !== true) { - if (this.debug === true) - this.log('updateChart(): creating DOM'); - - createDOM(); - } - - if (this.chart === null) { - if (this.debug === true) - this.log('updateChart(): getting chart'); - - return this.getChart(function () { - return that.updateChart(callback); - }); - } - - if(this.library.initialized === false) { - if(this.library.enabled === true) { - if(this.debug === true) - this.log('updateChart(): initializing chart library'); - - return this.library.initialize(function () { - return that.updateChart(callback); - }); - } - else { - error('chart library "' + this.library_name + '" is not available.'); - - if(typeof callback === 'function') - return callback(false, 'library not available'); - - return; - } - } - - this.clearSelection(); - this.chartURL(); - - NETDATA.statistics.refreshes_total++; - NETDATA.statistics.refreshes_active++; - - if(NETDATA.statistics.refreshes_active > NETDATA.statistics.refreshes_active_max) - NETDATA.statistics.refreshes_active_max = NETDATA.statistics.refreshes_active; - - var ok = false; - this.fetching_data = true; - - if(netdataSnapshotData !== null) { - var key = this.chartDataUniqueID(); - var data = this.getSnapshotData(key); - if (data !== null) { - ok = true; - data = NETDATA.xss.checkData('/api/v1/data', data, this.library.xssRegexIgnore); - this.updateChartWithData(data); - } - else { - ok = false; - error('cannot get data from snapshot for key: "' + key + '"'); - that.tm.last_autorefreshed = Date.now(); - } - - NETDATA.statistics.refreshes_active--; - this.fetching_data = false; - - if(typeof callback === 'function') - callback(ok, 'snapshot'); - - return; - } - - if(this.debug === true) - this.log('updating from ' + this.data_url); - - this.xhr = $.ajax( { - url: this.data_url, - cache: false, - async: true, - headers: { - 'Cache-Control': 'no-cache, no-store', - 'Pragma': 'no-cache' - }, - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function(data) { - data = NETDATA.xss.checkData('/api/v1/data', data, that.library.xssRegexIgnore); - - that.xhr = undefined; - that.retries_on_data_failures = 0; - ok = true; - - if(that.debug === true) - that.log('data received. updating chart.'); - - that.updateChartWithData(data); - }) - .fail(function(msg) { - that.xhr = undefined; - - if(msg.statusText !== 'abort') { - that.retries_on_data_failures++; - if(that.retries_on_data_failures > NETDATA.options.current.retries_on_data_failures) { - // that.log('failed ' + that.retries_on_data_failures.toString() + ' times - giving up'); - that.retries_on_data_failures = 0; - error('data download failed for url: ' + that.data_url); - } - else { - that.tm.last_autorefreshed = Date.now(); - // that.log('failed ' + that.retries_on_data_failures.toString() + ' times, but I will retry'); - } - } - }) - .always(function() { - that.xhr = undefined; - - NETDATA.statistics.refreshes_active--; - that.fetching_data = false; - - if(typeof callback === 'function') - return callback(ok, 'download'); - }); - }; - - var __isVisible = function() { - var ret = true; - - if(NETDATA.options.current.update_only_visible !== false) { - // tolerance is the number of pixels a chart can be off-screen - // to consider it as visible and refresh it as if was visible - var tolerance = 0; - - that.tm.last_visible_check = Date.now(); - - var rect = that.element.getBoundingClientRect(); - - var screenTop = window.scrollY; - var screenBottom = screenTop + window.innerHeight; - - var chartTop = rect.top + screenTop; - var chartBottom = chartTop + rect.height; - - ret = !(rect.width === 0 || rect.height === 0 || chartBottom + tolerance < screenTop || chartTop - tolerance > screenBottom); - } - - if(that.debug === true) - that.log('__isVisible(): ' + ret); - - return ret; - }; - - this.isVisible = function(nocache) { - // this.log('last_visible_check: ' + this.tm.last_visible_check + ', last_page_scroll: ' + NETDATA.options.last_page_scroll); - - // caching - we do not evaluate the charts visibility - // if the page has not been scrolled since the last check - if((typeof nocache !== 'undefined' && nocache === true) - || typeof this.tmp.___isVisible___ === 'undefined' - || this.tm.last_visible_check <= NETDATA.options.last_page_scroll) { - this.tmp.___isVisible___ = __isVisible(); - if (this.tmp.___isVisible___ === true) this.unhideChart(); - else this.hideChart(); - } - - if(this.debug === true) - this.log('isVisible(' + nocache + '): ' + this.tmp.___isVisible___); - - return this.tmp.___isVisible___; - }; - - this.isAutoRefreshable = function() { - return (this.current.autorefresh); - }; - - this.canBeAutoRefreshed = function() { - if(this.enabled === false) { - if(this.debug === true) - this.log('canBeAutoRefreshed() -> not enabled'); - - return false; - } - - if(this.running === true) { - if(this.debug === true) - this.log('canBeAutoRefreshed() -> already running'); - - return false; - } - - if(this.library === null || this.library.enabled === false) { - error('charting library "' + this.library_name + '" is not available'); - if(this.debug === true) - this.log('canBeAutoRefreshed() -> chart library ' + this.library_name + ' is not available'); - - return false; - } - - if(this.isVisible() === false) { - if(NETDATA.options.debug.visibility === true || this.debug === true) - this.log('canBeAutoRefreshed() -> not visible'); - - return false; - } - - var now = Date.now(); - - if(this.current.force_update_at !== 0 && this.current.force_update_at < now) { - if(this.debug === true) - this.log('canBeAutoRefreshed() -> timed force update - allowing this update'); - - this.current.force_update_at = 0; - return true; - } - - if(this.isAutoRefreshable() === false) { - if(this.debug === true) - this.log('canBeAutoRefreshed() -> not auto-refreshable'); - - return false; - } - - // allow the first update, even if the page is not visible - if(NETDATA.options.page_is_visible === false && this.updates_counter && this.updates_since_last_unhide) { - if(NETDATA.options.debug.focus === true || this.debug === true) - this.log('canBeAutoRefreshed() -> not the first update, and page does not have focus'); - - return false; - } - - if(this.needsRecreation() === true) { - if(this.debug === true) - this.log('canBeAutoRefreshed() -> needs re-creation.'); - - return true; - } - - if(NETDATA.options.auto_refresher_stop_until >= now) { - if(this.debug === true) - this.log('canBeAutoRefreshed() -> stopped until is in future.'); - - return false; - } - - // options valid only for autoRefresh() - if(NETDATA.globalPanAndZoom.isActive()) { - if(NETDATA.globalPanAndZoom.shouldBeAutoRefreshed(this)) { - if(this.debug === true) - this.log('canBeAutoRefreshed(): global panning: I need an update.'); - - return true; - } - else { - if(this.debug === true) - this.log('canBeAutoRefreshed(): global panning: I am already up to date.'); - - return false; - } - } - - if(this.selected === true) { - if(this.debug === true) - this.log('canBeAutoRefreshed(): I have a selection in place.'); - - return false; - } - - if(this.paused === true) { - if(this.debug === true) - this.log('canBeAutoRefreshed(): I am paused.'); - - return false; - } - - var data_update_every = this.data_update_every; - if(typeof this.force_update_every === 'number') - data_update_every = this.force_update_every; - - if(now - this.tm.last_autorefreshed >= data_update_every) { - if(this.debug === true) - this.log('canBeAutoRefreshed(): It is time to update me. Now: ' + now.toString() + ', last_autorefreshed: ' + this.tm.last_autorefreshed + ', data_update_every: ' + data_update_every + ', delta: ' + (now - this.tm.last_autorefreshed).toString()); - - return true; - } - - return false; - }; - - this.autoRefresh = function(callback) { - var state = that; - - if(state.canBeAutoRefreshed() === true && state.running === false) { - - state.running = true; - state.updateChart(function() { - state.running = false; - - if(typeof callback === 'function') - return callback(); - }); - } - else { - if(typeof callback === 'function') - return callback(); - } - }; - - this.__defaultsFromDownloadedChart = function(chart) { - this.chart = chart; - this.chart_url = chart.url; - this.data_update_every = chart.update_every * 1000; - this.data_points = Math.round(this.chartWidth() / this.chartPixelsPerPoint()); - this.tm.last_info_downloaded = Date.now(); - - if(this.title === null) - this.title = chart.title; - - if(this.units === null) { - this.units = chart.units; - this.units_current = this.units; - } - }; - - // fetch the chart description from the netdata server - this.getChart = function(callback) { - this.chart = NETDATA.chartRegistry.get(this.host, this.id); - if(this.chart) { - this.__defaultsFromDownloadedChart(this.chart); - - if(typeof callback === 'function') - return callback(); - } - else if(netdataSnapshotData !== null) { - // console.log(this); - // console.log(NETDATA.chartRegistry); - NETDATA.error(404, 'host: ' + this.host + ', chart: ' + this.id); - error('chart not found in snapshot'); - - if(typeof callback === 'function') - return callback(); - } - else { - this.chart_url = "/api/v1/chart?chart=" + this.id; - - if(this.debug === true) - this.log('downloading ' + this.chart_url); - - $.ajax( { - url: this.host + this.chart_url, - cache: false, - async: true, - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function(chart) { - chart = NETDATA.xss.checkOptional('/api/v1/chart', chart); - - chart.url = that.chart_url; - that.__defaultsFromDownloadedChart(chart); - NETDATA.chartRegistry.add(that.host, that.id, chart); - }) - .fail(function() { - NETDATA.error(404, that.chart_url); - error('chart not found on url "' + that.chart_url + '"'); - }) - .always(function() { - if(typeof callback === 'function') - return callback(); - }); - } - }; - - // ============================================================================================================ - // INITIALIZATION - - initDOM(); - init('fast'); - }; - - NETDATA.resetAllCharts = function(state) { - // first clear the global selection sync - // to make sure no chart is in selected state - NETDATA.globalSelectionSync.stop(); - - // there are 2 possibilities here - // a. state is the global Pan and Zoom master - // b. state is not the global Pan and Zoom master - var master = true; - if(NETDATA.globalPanAndZoom.isMaster(state) === false) - master = false; - - // clear the global Pan and Zoom - // this will also refresh the master - // and unblock any charts currently mirroring the master - NETDATA.globalPanAndZoom.clearMaster(); - - // if we were not the master, reset our status too - // this is required because most probably the mouse - // is over this chart, blocking it from auto-refreshing - if(master === false && (state.paused === true || state.selected === true)) - state.resetChart(); - }; - - // get or create a chart state, given a DOM element - NETDATA.chartState = function(element) { - var self = $(element); - - var state = self.data('netdata-state-object') || null; - if(state === null) { - state = new chartState(element); - self.data('netdata-state-object', state); - } - return state; - }; - - // ---------------------------------------------------------------------------------------------------------------- - // Library functions - - // Load a script without jquery - // This is used to load jquery - after it is loaded, we use jquery - NETDATA._loadjQuery = function(callback) { - if(typeof jQuery === 'undefined') { - if(NETDATA.options.debug.main_loop === true) - console.log('loading ' + NETDATA.jQuery); - - var script = document.createElement('script'); - script.type = 'text/javascript'; - script.async = true; - script.src = NETDATA.jQuery; - - // script.onabort = onError; - script.onerror = function() { NETDATA.error(101, NETDATA.jQuery); }; - if(typeof callback === "function") { - script.onload = function () { - $ = jQuery; - return callback(); - }; - } - - var s = document.getElementsByTagName('script')[0]; - s.parentNode.insertBefore(script, s); - } - else if(typeof callback === "function") { - $ = jQuery; - return callback(); - } - }; - - NETDATA._loadCSS = function(filename) { - // don't use jQuery here - // styles are loaded before jQuery - // to eliminate showing an unstyled page to the user - - var fileref = document.createElement("link"); - fileref.setAttribute("rel", "stylesheet"); - fileref.setAttribute("type", "text/css"); - fileref.setAttribute("href", filename); - - if (typeof fileref !== 'undefined') - document.getElementsByTagName("head")[0].appendChild(fileref); - }; - - NETDATA.colorHex2Rgb = function(hex) { - // Expand shorthand form (e.g. "03F") to full form (e.g. "0033FF") - var shorthandRegex = /^#?([a-f\d])([a-f\d])([a-f\d])$/i; - hex = hex.replace(shorthandRegex, function(m, r, g, b) { - return r + r + g + g + b + b; - }); - - var result = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(hex); - return result ? { - r: parseInt(result[1], 16), - g: parseInt(result[2], 16), - b: parseInt(result[3], 16) - } : null; - }; - - NETDATA.colorLuminance = function(hex, lum) { - // validate hex string - hex = String(hex).replace(/[^0-9a-f]/gi, ''); - if (hex.length < 6) - hex = hex[0]+hex[0]+hex[1]+hex[1]+hex[2]+hex[2]; - - lum = lum || 0; - - // convert to decimal and change luminosity - var rgb = "#", c, i; - for (i = 0; i < 3; i++) { - c = parseInt(hex.substr(i*2,2), 16); - c = Math.round(Math.min(Math.max(0, c + (c * lum)), 255)).toString(16); - rgb += ("00"+c).substr(c.length); - } - - return rgb; - }; - - NETDATA.guid = function() { - function s4() { - return Math.floor((1 + Math.random()) * 0x10000) - .toString(16) - .substring(1); - } - - return s4() + s4() + '-' + s4() + '-' + s4() + '-' + s4() + '-' + s4() + s4() + s4(); - }; - - NETDATA.zeropad = function(x) { - if(x > -10 && x < 10) return '0' + x.toString(); - else return x.toString(); - }; - - // user function to signal us the DOM has been - // updated. - NETDATA.updatedDom = function() { - NETDATA.options.updated_dom = true; - }; - - NETDATA.ready = function(callback) { - NETDATA.options.pauseCallback = callback; - }; - - NETDATA.pause = function(callback) { - if(typeof callback === 'function') { - if (NETDATA.options.pause === true) - return callback(); - else - NETDATA.options.pauseCallback = callback; - } - }; - - NETDATA.unpause = function() { - NETDATA.options.pauseCallback = null; - NETDATA.options.updated_dom = true; - NETDATA.options.pause = false; - }; - - NETDATA.seconds4human = function (seconds, options) { - var default_options = { - now: 'now', - space: ' ', - negative_suffix: 'ago', - day: 'day', - days: 'days', - hour: 'hour', - hours: 'hours', - minute: 'min', - minutes: 'mins', - second: 'sec', - seconds: 'secs', - and: 'and' - }; - - if(typeof options !== 'object') - options = default_options; - else { - var x; - for(x in default_options) { - if(typeof options[x] !== 'string') - options[x] = default_options[x]; - } - } - - if(typeof seconds === 'string') - seconds = parseInt(seconds, 10); - - if(seconds === 0) - return options.now; - - var suffix = ''; - if(seconds < 0) { - seconds = -seconds; - if(options.negative_suffix !== '') suffix = options.space + options.negative_suffix; - } - - var days = Math.floor(seconds / 86400); - seconds -= (days * 86400); - - var hours = Math.floor(seconds / 3600); - seconds -= (hours * 3600); - - var minutes = Math.floor(seconds / 60); - seconds -= (minutes * 60); - - var strings = []; - - if(days > 1) strings.push(days.toString() + options.space + options.days); - else if(days === 1) strings.push(days.toString() + options.space + options.day); - - if(hours > 1) strings.push(hours.toString() + options.space + options.hours); - else if(hours === 1) strings.push(hours.toString() + options.space + options.hour); - - if(minutes > 1) strings.push(minutes.toString() + options.space + options.minutes); - else if(minutes === 1) strings.push(minutes.toString() + options.space + options.minute); - - if(seconds > 1) strings.push(Math.floor(seconds).toString() + options.space + options.seconds); - else if(seconds === 1) strings.push(Math.floor(seconds).toString() + options.space + options.second); - - if(strings.length === 1) - return strings.pop() + suffix; - - var last = strings.pop(); - return strings.join(", ") + " " + options.and + " " + last + suffix; - }; - - // ---------------------------------------------------------------------------------------------------------------- - - // this is purely sequential charts refresher - // it is meant to be autonomous - NETDATA.chartRefresherNoParallel = function(index, callback) { - var targets = NETDATA.intersectionObserver.targets(); - - if(NETDATA.options.debug.main_loop === true) - console.log('NETDATA.chartRefresherNoParallel(' + index + ')'); - - if(NETDATA.options.updated_dom === true) { - // the dom has been updated - // get the dom parts again - NETDATA.parseDom(callback); - return; - } - if(index >= targets.length) { - if(NETDATA.options.debug.main_loop === true) - console.log('waiting to restart main loop...'); - - NETDATA.options.auto_refresher_fast_weight = 0; - callback(); - } - else { - var state = targets[index]; - - if(NETDATA.options.auto_refresher_fast_weight < NETDATA.options.current.fast_render_timeframe) { - if(NETDATA.options.debug.main_loop === true) - console.log('fast rendering...'); - - if(state.isVisible() === true) - NETDATA.timeout.set(function() { - state.autoRefresh(function () { - NETDATA.chartRefresherNoParallel(++index, callback); - }); - }, 0); - else - NETDATA.chartRefresherNoParallel(++index, callback); - } - else { - if(NETDATA.options.debug.main_loop === true) console.log('waiting for next refresh...'); - NETDATA.options.auto_refresher_fast_weight = 0; - - NETDATA.timeout.set(function() { - state.autoRefresh(function() { - NETDATA.chartRefresherNoParallel(++index, callback); - }); - }, NETDATA.options.current.idle_between_charts); - } - } - }; - - NETDATA.chartRefresherWaitTime = function() { - return NETDATA.options.current.idle_parallel_loops; - }; - - // the default refresher - NETDATA.chartRefresherLastRun = 0; - NETDATA.chartRefresherRunsAfterParseDom = 0; - NETDATA.chartRefresherTimeoutId = undefined; - - NETDATA.chartRefresherReschedule = function() { - if(NETDATA.options.current.async_on_scroll === true) { - if(NETDATA.chartRefresherTimeoutId) - NETDATA.timeout.clear(NETDATA.chartRefresherTimeoutId); - NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set(NETDATA.chartRefresher, NETDATA.options.current.onscroll_worker_duration_threshold); - //console.log('chartRefresherReschedule()'); - } - }; - - NETDATA.chartRefresher = function() { - // console.log('chartRefresher() begin ' + (Date.now() - NETDATA.chartRefresherLastRun).toString() + ' ms since last run'); - - if(NETDATA.options.page_is_visible === false - && NETDATA.options.current.stop_updates_when_focus_is_lost === true - && NETDATA.chartRefresherLastRun > NETDATA.options.last_page_resize - && NETDATA.chartRefresherLastRun > NETDATA.options.last_page_scroll - && NETDATA.chartRefresherRunsAfterParseDom > 10 - ) { - setTimeout( - NETDATA.chartRefresher, - NETDATA.options.current.idle_lost_focus - ); - - // console.log('chartRefresher() page without focus, will run in ' + NETDATA.options.current.idle_lost_focus.toString() + ' ms, ' + NETDATA.chartRefresherRunsAfterParseDom.toString()); - return; - } - NETDATA.chartRefresherRunsAfterParseDom++; - - var now = Date.now(); - NETDATA.chartRefresherLastRun = now; - - if( now < NETDATA.options.on_scroll_refresher_stop_until ) { - NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set( - NETDATA.chartRefresher, - NETDATA.chartRefresherWaitTime() - ); - - // console.log('chartRefresher() end1 will run in ' + NETDATA.chartRefresherWaitTime().toString() + ' ms'); - return; - } - - if( now < NETDATA.options.auto_refresher_stop_until ) { - NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set( - NETDATA.chartRefresher, - NETDATA.chartRefresherWaitTime() - ); - - // console.log('chartRefresher() end2 will run in ' + NETDATA.chartRefresherWaitTime().toString() + ' ms'); - return; - } - - if(NETDATA.options.pause === true) { - // console.log('auto-refresher is paused'); - NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set( - NETDATA.chartRefresher, - NETDATA.chartRefresherWaitTime() - ); - - // console.log('chartRefresher() end3 will run in ' + NETDATA.chartRefresherWaitTime().toString() + ' ms'); - return; - } - - if(typeof NETDATA.options.pauseCallback === 'function') { - // console.log('auto-refresher is calling pauseCallback'); - - NETDATA.options.pause = true; - NETDATA.options.pauseCallback(); - NETDATA.chartRefresher(); - - // console.log('chartRefresher() end4 (nested)'); - return; - } - - if(NETDATA.options.current.parallel_refresher === false) { - // console.log('auto-refresher is calling chartRefresherNoParallel(0)'); - NETDATA.chartRefresherNoParallel(0, function() { - NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set( - NETDATA.chartRefresher, - NETDATA.options.current.idle_between_loops - ); - }); - // console.log('chartRefresher() end5 (no parallel, nested)'); - return; - } - - if(NETDATA.options.updated_dom === true) { - // the dom has been updated - // get the dom parts again - // console.log('auto-refresher is calling parseDom()'); - NETDATA.parseDom(NETDATA.chartRefresher); - // console.log('chartRefresher() end6 (parseDom)'); - return; - } - - if(NETDATA.globalSelectionSync.active() === false) { - var parallel = []; - var targets = NETDATA.intersectionObserver.targets(); - var len = targets.length; - var state; - while(len--) { - state = targets[len]; - if(state.running === true || state.isVisible() === false) - continue; - - if(state.library.initialized === false) { - if(state.library.enabled === true) { - state.library.initialize(NETDATA.chartRefresher); - //console.log('chartRefresher() end6 (library init)'); - return; - } - else { - state.error('chart library "' + state.library_name + '" is not enabled.'); - } - } - - if(NETDATA.scrollUp === true) - parallel.unshift(state); - else - parallel.push(state); - } - - len = parallel.length; - while (len--) { - state = parallel[len]; - // console.log('auto-refresher executing in parallel for ' + parallel.length.toString() + ' charts'); - // this will execute the jobs in parallel - - if (state.running === false) - NETDATA.timeout.set(state.autoRefresh, 0); - } - //else { - // console.log('auto-refresher nothing to do'); - //} - } - - // run the next refresh iteration - NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set( - NETDATA.chartRefresher, - NETDATA.chartRefresherWaitTime() - ); - - //console.log('chartRefresher() completed in ' + (Date.now() - now).toString() + ' ms'); - }; - - NETDATA.parseDom = function(callback) { - //console.log('parseDom()'); - - NETDATA.options.last_page_scroll = Date.now(); - NETDATA.options.updated_dom = false; - NETDATA.chartRefresherRunsAfterParseDom = 0; - - var targets = $('div[data-netdata]'); //.filter(':visible'); - - if(NETDATA.options.debug.main_loop === true) - console.log('DOM updated - there are ' + targets.length + ' charts on page.'); - - NETDATA.intersectionObserver.globalReset(); - NETDATA.options.targets = []; - var len = targets.length; - while(len--) { - // the initialization will take care of sizing - // and the "loading..." message - var state = NETDATA.chartState(targets[len]); - NETDATA.options.targets.push(state); - NETDATA.intersectionObserver.observe(state); - } - - if(NETDATA.globalChartUnderlay.isActive() === true) - NETDATA.globalChartUnderlay.setup(); - else - NETDATA.globalChartUnderlay.clear(); - - if(typeof callback === 'function') - return callback(); - }; - - // this is the main function - where everything starts - NETDATA.started = false; - NETDATA.start = function() { - // this should be called only once - - if(NETDATA.started === true) { - console.log('netdata is already started'); - return; - } - - NETDATA.started = true; - NETDATA.options.page_is_visible = true; - - $(window).blur(function() { - if(NETDATA.options.current.stop_updates_when_focus_is_lost === true) { - NETDATA.options.page_is_visible = false; - if(NETDATA.options.debug.focus === true) - console.log('Lost Focus!'); - } - }); - - $(window).focus(function() { - if(NETDATA.options.current.stop_updates_when_focus_is_lost === true) { - NETDATA.options.page_is_visible = true; - if(NETDATA.options.debug.focus === true) - console.log('Focus restored!'); - } - }); - - if(typeof document.hasFocus === 'function' && !document.hasFocus()) { - if(NETDATA.options.current.stop_updates_when_focus_is_lost === true) { - NETDATA.options.page_is_visible = false; - if(NETDATA.options.debug.focus === true) - console.log('Document has no focus!'); - } - } - - // bootstrap tab switching - $('a[data-toggle="tab"]').on('shown.bs.tab', NETDATA.onscroll); - - // bootstrap modal switching - var $modal = $('.modal'); - $modal.on('hidden.bs.modal', NETDATA.onscroll); - $modal.on('shown.bs.modal', NETDATA.onscroll); - - // bootstrap collapse switching - var $collapse = $('.collapse'); - $collapse.on('hidden.bs.collapse', NETDATA.onscroll); - $collapse.on('shown.bs.collapse', NETDATA.onscroll); - - NETDATA.parseDom(NETDATA.chartRefresher); - - // Alarms initialization - setTimeout(NETDATA.alarms.init, 1000); - - // Registry initialization - setTimeout(NETDATA.registry.init, netdataRegistryAfterMs); - - if(typeof netdataCallback === 'function') - netdataCallback(); - }; - - NETDATA.globalReset = function() { - NETDATA.intersectionObserver.globalReset(); - NETDATA.globalSelectionSync.globalReset(); - NETDATA.globalPanAndZoom.globalReset(); - NETDATA.chartRegistry.globalReset(); - NETDATA.commonMin.globalReset(); - NETDATA.commonMax.globalReset(); - NETDATA.commonColors.globalReset(); - NETDATA.unitsConversion.globalReset(); - NETDATA.options.targets = []; - NETDATA.parseDom(); - NETDATA.unpause(); - }; - - // ---------------------------------------------------------------------------------------------------------------- - // peity - - NETDATA.peityInitialize = function(callback) { - if(typeof netdataNoPeitys === 'undefined' || !netdataNoPeitys) { - $.ajax({ - url: NETDATA.peity_js, - cache: true, - dataType: "script", - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function() { - NETDATA.registerChartLibrary('peity', NETDATA.peity_js); - }) - .fail(function() { - NETDATA.chartLibraries.peity.enabled = false; - NETDATA.error(100, NETDATA.peity_js); - }) - .always(function() { - if(typeof callback === "function") - return callback(); - }); - } - else { - NETDATA.chartLibraries.peity.enabled = false; - if(typeof callback === "function") - return callback(); - } - }; - - NETDATA.peityChartUpdate = function(state, data) { - state.peity_instance.innerHTML = data.result; - - if(state.peity_options.stroke !== state.chartCustomColors()[0]) { - state.peity_options.stroke = state.chartCustomColors()[0]; - if(state.chart.chart_type === 'line') - state.peity_options.fill = NETDATA.themes.current.background; - else - state.peity_options.fill = NETDATA.colorLuminance(state.chartCustomColors()[0], NETDATA.chartDefaults.fill_luminance); - } - - $(state.peity_instance).peity('line', state.peity_options); - return true; - }; - - NETDATA.peityChartCreate = function(state, data) { - state.peity_instance = document.createElement('div'); - state.element_chart.appendChild(state.peity_instance); - - state.peity_options = { - stroke: NETDATA.themes.current.foreground, - strokeWidth: NETDATA.dataAttribute(state.element, 'peity-strokewidth', 1), - width: state.chartWidth(), - height: state.chartHeight(), - fill: NETDATA.themes.current.foreground - }; - - NETDATA.peityChartUpdate(state, data); - return true; - }; - - // ---------------------------------------------------------------------------------------------------------------- - // sparkline - - NETDATA.sparklineInitialize = function(callback) { - if(typeof netdataNoSparklines === 'undefined' || !netdataNoSparklines) { - $.ajax({ - url: NETDATA.sparkline_js, - cache: true, - dataType: "script", - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function() { - NETDATA.registerChartLibrary('sparkline', NETDATA.sparkline_js); - }) - .fail(function() { - NETDATA.chartLibraries.sparkline.enabled = false; - NETDATA.error(100, NETDATA.sparkline_js); - }) - .always(function() { - if(typeof callback === "function") - return callback(); - }); - } - else { - NETDATA.chartLibraries.sparkline.enabled = false; - if(typeof callback === "function") - return callback(); - } - }; - - NETDATA.sparklineChartUpdate = function(state, data) { - state.sparkline_options.width = state.chartWidth(); - state.sparkline_options.height = state.chartHeight(); - - $(state.element_chart).sparkline(data.result, state.sparkline_options); - return true; - }; - - NETDATA.sparklineChartCreate = function(state, data) { - var type = NETDATA.dataAttribute(state.element, 'sparkline-type', 'line'); - var lineColor = NETDATA.dataAttribute(state.element, 'sparkline-linecolor', state.chartCustomColors()[0]); - var fillColor = NETDATA.dataAttribute(state.element, 'sparkline-fillcolor', ((state.chart.chart_type === 'line')?NETDATA.themes.current.background:NETDATA.colorLuminance(lineColor, NETDATA.chartDefaults.fill_luminance))); - var chartRangeMin = NETDATA.dataAttribute(state.element, 'sparkline-chartrangemin', undefined); - var chartRangeMax = NETDATA.dataAttribute(state.element, 'sparkline-chartrangemax', undefined); - var composite = NETDATA.dataAttribute(state.element, 'sparkline-composite', undefined); - var enableTagOptions = NETDATA.dataAttribute(state.element, 'sparkline-enabletagoptions', undefined); - var tagOptionPrefix = NETDATA.dataAttribute(state.element, 'sparkline-tagoptionprefix', undefined); - var tagValuesAttribute = NETDATA.dataAttribute(state.element, 'sparkline-tagvaluesattribute', undefined); - var disableHiddenCheck = NETDATA.dataAttribute(state.element, 'sparkline-disablehiddencheck', undefined); - var defaultPixelsPerValue = NETDATA.dataAttribute(state.element, 'sparkline-defaultpixelspervalue', undefined); - var spotColor = NETDATA.dataAttribute(state.element, 'sparkline-spotcolor', undefined); - var minSpotColor = NETDATA.dataAttribute(state.element, 'sparkline-minspotcolor', undefined); - var maxSpotColor = NETDATA.dataAttribute(state.element, 'sparkline-maxspotcolor', undefined); - var spotRadius = NETDATA.dataAttribute(state.element, 'sparkline-spotradius', undefined); - var valueSpots = NETDATA.dataAttribute(state.element, 'sparkline-valuespots', undefined); - var highlightSpotColor = NETDATA.dataAttribute(state.element, 'sparkline-highlightspotcolor', undefined); - var highlightLineColor = NETDATA.dataAttribute(state.element, 'sparkline-highlightlinecolor', undefined); - var lineWidth = NETDATA.dataAttribute(state.element, 'sparkline-linewidth', undefined); - var normalRangeMin = NETDATA.dataAttribute(state.element, 'sparkline-normalrangemin', undefined); - var normalRangeMax = NETDATA.dataAttribute(state.element, 'sparkline-normalrangemax', undefined); - var drawNormalOnTop = NETDATA.dataAttribute(state.element, 'sparkline-drawnormalontop', undefined); - var xvalues = NETDATA.dataAttribute(state.element, 'sparkline-xvalues', undefined); - var chartRangeClip = NETDATA.dataAttribute(state.element, 'sparkline-chartrangeclip', undefined); - var chartRangeMinX = NETDATA.dataAttribute(state.element, 'sparkline-chartrangeminx', undefined); - var chartRangeMaxX = NETDATA.dataAttribute(state.element, 'sparkline-chartrangemaxx', undefined); - var disableInteraction = NETDATA.dataAttributeBoolean(state.element, 'sparkline-disableinteraction', false); - var disableTooltips = NETDATA.dataAttributeBoolean(state.element, 'sparkline-disabletooltips', false); - var disableHighlight = NETDATA.dataAttributeBoolean(state.element, 'sparkline-disablehighlight', false); - var highlightLighten = NETDATA.dataAttribute(state.element, 'sparkline-highlightlighten', 1.4); - var highlightColor = NETDATA.dataAttribute(state.element, 'sparkline-highlightcolor', undefined); - var tooltipContainer = NETDATA.dataAttribute(state.element, 'sparkline-tooltipcontainer', undefined); - var tooltipClassname = NETDATA.dataAttribute(state.element, 'sparkline-tooltipclassname', undefined); - var tooltipFormat = NETDATA.dataAttribute(state.element, 'sparkline-tooltipformat', undefined); - var tooltipPrefix = NETDATA.dataAttribute(state.element, 'sparkline-tooltipprefix', undefined); - var tooltipSuffix = NETDATA.dataAttribute(state.element, 'sparkline-tooltipsuffix', ' ' + state.units_current); - var tooltipSkipNull = NETDATA.dataAttributeBoolean(state.element, 'sparkline-tooltipskipnull', true); - var tooltipValueLookups = NETDATA.dataAttribute(state.element, 'sparkline-tooltipvaluelookups', undefined); - var tooltipFormatFieldlist = NETDATA.dataAttribute(state.element, 'sparkline-tooltipformatfieldlist', undefined); - var tooltipFormatFieldlistKey = NETDATA.dataAttribute(state.element, 'sparkline-tooltipformatfieldlistkey', undefined); - var numberFormatter = NETDATA.dataAttribute(state.element, 'sparkline-numberformatter', function(n){ return n.toFixed(2); }); - var numberDigitGroupSep = NETDATA.dataAttribute(state.element, 'sparkline-numberdigitgroupsep', undefined); - var numberDecimalMark = NETDATA.dataAttribute(state.element, 'sparkline-numberdecimalmark', undefined); - var numberDigitGroupCount = NETDATA.dataAttribute(state.element, 'sparkline-numberdigitgroupcount', undefined); - var animatedZooms = NETDATA.dataAttributeBoolean(state.element, 'sparkline-animatedzooms', false); - - if(spotColor === 'disable') spotColor=''; - if(minSpotColor === 'disable') minSpotColor=''; - if(maxSpotColor === 'disable') maxSpotColor=''; - - // state.log('sparkline type ' + type + ', lineColor: ' + lineColor + ', fillColor: ' + fillColor); - - state.sparkline_options = { - type: type, - lineColor: lineColor, - fillColor: fillColor, - chartRangeMin: chartRangeMin, - chartRangeMax: chartRangeMax, - composite: composite, - enableTagOptions: enableTagOptions, - tagOptionPrefix: tagOptionPrefix, - tagValuesAttribute: tagValuesAttribute, - disableHiddenCheck: disableHiddenCheck, - defaultPixelsPerValue: defaultPixelsPerValue, - spotColor: spotColor, - minSpotColor: minSpotColor, - maxSpotColor: maxSpotColor, - spotRadius: spotRadius, - valueSpots: valueSpots, - highlightSpotColor: highlightSpotColor, - highlightLineColor: highlightLineColor, - lineWidth: lineWidth, - normalRangeMin: normalRangeMin, - normalRangeMax: normalRangeMax, - drawNormalOnTop: drawNormalOnTop, - xvalues: xvalues, - chartRangeClip: chartRangeClip, - chartRangeMinX: chartRangeMinX, - chartRangeMaxX: chartRangeMaxX, - disableInteraction: disableInteraction, - disableTooltips: disableTooltips, - disableHighlight: disableHighlight, - highlightLighten: highlightLighten, - highlightColor: highlightColor, - tooltipContainer: tooltipContainer, - tooltipClassname: tooltipClassname, - tooltipChartTitle: state.title, - tooltipFormat: tooltipFormat, - tooltipPrefix: tooltipPrefix, - tooltipSuffix: tooltipSuffix, - tooltipSkipNull: tooltipSkipNull, - tooltipValueLookups: tooltipValueLookups, - tooltipFormatFieldlist: tooltipFormatFieldlist, - tooltipFormatFieldlistKey: tooltipFormatFieldlistKey, - numberFormatter: numberFormatter, - numberDigitGroupSep: numberDigitGroupSep, - numberDecimalMark: numberDecimalMark, - numberDigitGroupCount: numberDigitGroupCount, - animatedZooms: animatedZooms, - width: state.chartWidth(), - height: state.chartHeight() - }; - - $(state.element_chart).sparkline(data.result, state.sparkline_options); - - return true; - }; - - // ---------------------------------------------------------------------------------------------------------------- - // dygraph - - NETDATA.dygraph = { - smooth: false - }; - - NETDATA.dygraphToolboxPanAndZoom = function(state, after, before) { - if(after < state.netdata_first) - after = state.netdata_first; - - if(before > state.netdata_last) - before = state.netdata_last; - - state.setMode('zoom'); - NETDATA.globalSelectionSync.stop(); - NETDATA.globalSelectionSync.delay(); - state.tmp.dygraph_user_action = true; - state.tmp.dygraph_force_zoom = true; - // state.log('toolboxPanAndZoom'); - state.updateChartPanOrZoom(after, before); - NETDATA.globalPanAndZoom.setMaster(state, after, before); - }; - - NETDATA.dygraphSetSelection = function(state, t) { - if(typeof state.tmp.dygraph_instance !== 'undefined') { - var r = state.calculateRowForTime(t); - if(r !== -1) { - state.tmp.dygraph_instance.setSelection(r); - return true; - } - else { - state.tmp.dygraph_instance.clearSelection(); - state.legendShowUndefined(); - } - } - - return false; - }; - - NETDATA.dygraphClearSelection = function(state) { - if(typeof state.tmp.dygraph_instance !== 'undefined') { - state.tmp.dygraph_instance.clearSelection(); - } - return true; - }; - - NETDATA.dygraphSmoothInitialize = function(callback) { - $.ajax({ - url: NETDATA.dygraph_smooth_js, - cache: true, - dataType: "script", - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function() { - NETDATA.dygraph.smooth = true; - smoothPlotter.smoothing = 0.3; - }) - .fail(function() { - NETDATA.dygraph.smooth = false; - }) - .always(function() { - if(typeof callback === "function") - return callback(); - }); - }; - - NETDATA.dygraphInitialize = function(callback) { - if(typeof netdataNoDygraphs === 'undefined' || !netdataNoDygraphs) { - $.ajax({ - url: NETDATA.dygraph_js, - cache: true, - dataType: "script", - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function() { - NETDATA.registerChartLibrary('dygraph', NETDATA.dygraph_js); - }) - .fail(function() { - NETDATA.chartLibraries.dygraph.enabled = false; - NETDATA.error(100, NETDATA.dygraph_js); - }) - .always(function() { - if(NETDATA.chartLibraries.dygraph.enabled === true && NETDATA.options.current.smooth_plot === true) - NETDATA.dygraphSmoothInitialize(callback); - else if(typeof callback === "function") - return callback(); - }); - } - else { - NETDATA.chartLibraries.dygraph.enabled = false; - if(typeof callback === "function") - return callback(); - } - }; - - NETDATA.dygraphChartUpdate = function(state, data) { - var dygraph = state.tmp.dygraph_instance; - - if(typeof dygraph === 'undefined') - return NETDATA.dygraphChartCreate(state, data); - - // when the chart is not visible, and hidden - // if there is a window resize, dygraph detects - // its element size as 0x0. - // this will make it re-appear properly - - if(state.tm.last_unhidden > state.tmp.dygraph_last_rendered) - dygraph.resize(); - - var options = { - file: data.result.data, - colors: state.chartColors(), - labels: data.result.labels, - //labelsDivWidth: state.chartWidth() - 70, - includeZero: state.tmp.dygraph_include_zero, - visibility: state.dimensions_visibility.selected2BooleanArray(state.data.dimension_names) - }; - - if(state.tmp.dygraph_chart_type === 'stacked') { - if(options.includeZero === true && state.dimensions_visibility.countSelected() < options.visibility.length) - options.includeZero = 0; - } - - if(!NETDATA.chartLibraries.dygraph.isSparkline(state)) { - options.ylabel = state.units_current; // (state.units_desired === 'auto')?"":state.units_current; - } - - if(state.tmp.dygraph_force_zoom === true) { - if(NETDATA.options.debug.dygraph === true || state.debug === true) - state.log('dygraphChartUpdate() forced zoom update'); - - options.dateWindow = (state.requested_padding !== null)?[ state.view_after, state.view_before ]:null; - //options.isZoomedIgnoreProgrammaticZoom = true; - state.tmp.dygraph_force_zoom = false; - } - else if(state.current.name !== 'auto') { - if(NETDATA.options.debug.dygraph === true || state.debug === true) - state.log('dygraphChartUpdate() loose update'); - } - else { - if(NETDATA.options.debug.dygraph === true || state.debug === true) - state.log('dygraphChartUpdate() strict update'); - - options.dateWindow = (state.requested_padding !== null)?[ state.view_after, state.view_before ]:null; - //options.isZoomedIgnoreProgrammaticZoom = true; - } - - options.valueRange = state.tmp.dygraph_options.valueRange; - - var oldMax = null, oldMin = null; - if (state.tmp.__commonMin !== null) { - state.data.min = state.tmp.dygraph_instance.axes_[0].extremeRange[0]; - oldMin = options.valueRange[0] = NETDATA.commonMin.get(state); - } - if (state.tmp.__commonMax !== null) { - state.data.max = state.tmp.dygraph_instance.axes_[0].extremeRange[1]; - oldMax = options.valueRange[1] = NETDATA.commonMax.get(state); - } - - if(state.tmp.dygraph_smooth_eligible === true) { - if((NETDATA.options.current.smooth_plot === true && state.tmp.dygraph_options.plotter !== smoothPlotter) - || (NETDATA.options.current.smooth_plot === false && state.tmp.dygraph_options.plotter === smoothPlotter)) { - NETDATA.dygraphChartCreate(state, data); - return; - } - } - - if(netdataSnapshotData !== null && NETDATA.globalPanAndZoom.isActive() === true && NETDATA.globalPanAndZoom.isMaster(state) === false) { - // pan and zoom on snapshots - options.dateWindow = [ NETDATA.globalPanAndZoom.force_after_ms, NETDATA.globalPanAndZoom.force_before_ms ]; - //options.isZoomedIgnoreProgrammaticZoom = true; - } - - if(NETDATA.chartLibraries.dygraph.isLogScale(state) === true) { - if(Array.isArray(options.valueRange) && options.valueRange[0] <= 0) - options.valueRange[0] = null; - } - - dygraph.updateOptions(options); - - var redraw = false; - if(oldMin !== null && oldMin > state.tmp.dygraph_instance.axes_[0].extremeRange[0]) { - state.data.min = state.tmp.dygraph_instance.axes_[0].extremeRange[0]; - options.valueRange[0] = NETDATA.commonMin.get(state); - redraw = true; - } - if(oldMax !== null && oldMax < state.tmp.dygraph_instance.axes_[0].extremeRange[1]) { - state.data.max = state.tmp.dygraph_instance.axes_[0].extremeRange[1]; - options.valueRange[1] = NETDATA.commonMax.get(state); - redraw = true; - } - - if(redraw === true) { - // state.log('forcing redraw to adapt to common- min/max'); - dygraph.updateOptions(options); - } - - state.tmp.dygraph_last_rendered = Date.now(); - return true; - }; - - NETDATA.dygraphChartCreate = function(state, data) { - if(NETDATA.options.debug.dygraph === true || state.debug === true) - state.log('dygraphChartCreate()'); - - state.tmp.dygraph_chart_type = NETDATA.dataAttribute(state.element, 'dygraph-type', state.chart.chart_type); - if(state.tmp.dygraph_chart_type === 'stacked' && data.dimensions === 1) state.tmp.dygraph_chart_type = 'area'; - if(state.tmp.dygraph_chart_type === 'stacked' && NETDATA.chartLibraries.dygraph.isLogScale(state) === true) state.tmp.dygraph_chart_type = 'area'; - - var highlightCircleSize = (NETDATA.chartLibraries.dygraph.isSparkline(state) === true)?3:4; - - var smooth = (NETDATA.dygraph.smooth === true) - ?(NETDATA.dataAttributeBoolean(state.element, 'dygraph-smooth', (state.tmp.dygraph_chart_type === 'line' && NETDATA.chartLibraries.dygraph.isSparkline(state) === false))) - :false; - - state.tmp.dygraph_include_zero = NETDATA.dataAttribute(state.element, 'dygraph-includezero', (state.tmp.dygraph_chart_type === 'stacked')); - - state.tmp.dygraph_options = { - colors: NETDATA.dataAttribute(state.element, 'dygraph-colors', state.chartColors()), - - // leave a few pixels empty on the right of the chart - rightGap: NETDATA.dataAttribute(state.element, 'dygraph-rightgap', 5), - showRangeSelector: NETDATA.dataAttributeBoolean(state.element, 'dygraph-showrangeselector', false), - showRoller: NETDATA.dataAttributeBoolean(state.element, 'dygraph-showroller', false), - title: NETDATA.dataAttribute(state.element, 'dygraph-title', state.title), - titleHeight: NETDATA.dataAttribute(state.element, 'dygraph-titleheight', 19), - legend: NETDATA.dataAttribute(state.element, 'dygraph-legend', 'always'), // we need this to get selection events - labels: data.result.labels, - labelsDiv: NETDATA.dataAttribute(state.element, 'dygraph-labelsdiv', state.element_legend_childs.hidden), - //labelsDivStyles: NETDATA.dataAttribute(state.element, 'dygraph-labelsdivstyles', { 'fontSize':'1px' }), - //labelsDivWidth: NETDATA.dataAttribute(state.element, 'dygraph-labelsdivwidth', state.chartWidth() - 70), - labelsSeparateLines: NETDATA.dataAttributeBoolean(state.element, 'dygraph-labelsseparatelines', true), - labelsShowZeroValues: (NETDATA.chartLibraries.dygraph.isLogScale(state) === true)?false:NETDATA.dataAttributeBoolean(state.element, 'dygraph-labelsshowzerovalues', true), - labelsKMB: false, - labelsKMG2: false, - showLabelsOnHighlight: NETDATA.dataAttributeBoolean(state.element, 'dygraph-showlabelsonhighlight', true), - hideOverlayOnMouseOut: NETDATA.dataAttributeBoolean(state.element, 'dygraph-hideoverlayonmouseout', true), - includeZero: state.tmp.dygraph_include_zero, - xRangePad: NETDATA.dataAttribute(state.element, 'dygraph-xrangepad', 0), - yRangePad: NETDATA.dataAttribute(state.element, 'dygraph-yrangepad', 1), - valueRange: NETDATA.dataAttribute(state.element, 'dygraph-valuerange', [ null, null ]), - ylabel: state.units_current, // (state.units_desired === 'auto')?"":state.units_current, - yLabelWidth: NETDATA.dataAttribute(state.element, 'dygraph-ylabelwidth', 12), - - // the function to plot the chart - plotter: null, - - // The width of the lines connecting data points. - // This can be used to increase the contrast or some graphs. - strokeWidth: NETDATA.dataAttribute(state.element, 'dygraph-strokewidth', ((state.tmp.dygraph_chart_type === 'stacked')?0.1:((smooth === true)?1.5:0.7))), - strokePattern: NETDATA.dataAttribute(state.element, 'dygraph-strokepattern', undefined), - - // The size of the dot to draw on each point in pixels (see drawPoints). - // A dot is always drawn when a point is "isolated", - // i.e. there is a missing point on either side of it. - // This also controls the size of those dots. - drawPoints: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawpoints', false), - - // Draw points at the edges of gaps in the data. - // This improves visibility of small data segments or other data irregularities. - drawGapEdgePoints: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawgapedgepoints', true), - connectSeparatedPoints: (NETDATA.chartLibraries.dygraph.isLogScale(state) === true)?false:NETDATA.dataAttributeBoolean(state.element, 'dygraph-connectseparatedpoints', false), - pointSize: NETDATA.dataAttribute(state.element, 'dygraph-pointsize', 1), - - // enabling this makes the chart with little square lines - stepPlot: NETDATA.dataAttributeBoolean(state.element, 'dygraph-stepplot', false), - - // Draw a border around graph lines to make crossing lines more easily - // distinguishable. Useful for graphs with many lines. - strokeBorderColor: NETDATA.dataAttribute(state.element, 'dygraph-strokebordercolor', NETDATA.themes.current.background), - strokeBorderWidth: NETDATA.dataAttribute(state.element, 'dygraph-strokeborderwidth', (state.tmp.dygraph_chart_type === 'stacked')?0.0:0.0), - fillGraph: NETDATA.dataAttribute(state.element, 'dygraph-fillgraph', (state.tmp.dygraph_chart_type === 'area' || state.tmp.dygraph_chart_type === 'stacked')), - fillAlpha: NETDATA.dataAttribute(state.element, 'dygraph-fillalpha', - ((state.tmp.dygraph_chart_type === 'stacked') - ?NETDATA.options.current.color_fill_opacity_stacked - :NETDATA.options.current.color_fill_opacity_area) - ), - stackedGraph: NETDATA.dataAttribute(state.element, 'dygraph-stackedgraph', (state.tmp.dygraph_chart_type === 'stacked')), - stackedGraphNaNFill: NETDATA.dataAttribute(state.element, 'dygraph-stackedgraphnanfill', 'none'), - drawAxis: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawaxis', true), - axisLabelFontSize: NETDATA.dataAttribute(state.element, 'dygraph-axislabelfontsize', 10), - axisLineColor: NETDATA.dataAttribute(state.element, 'dygraph-axislinecolor', NETDATA.themes.current.axis), - axisLineWidth: NETDATA.dataAttribute(state.element, 'dygraph-axislinewidth', 1.0), - drawGrid: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawgrid', true), - gridLinePattern: NETDATA.dataAttribute(state.element, 'dygraph-gridlinepattern', null), - gridLineWidth: NETDATA.dataAttribute(state.element, 'dygraph-gridlinewidth', 1.0), - gridLineColor: NETDATA.dataAttribute(state.element, 'dygraph-gridlinecolor', NETDATA.themes.current.grid), - maxNumberWidth: NETDATA.dataAttribute(state.element, 'dygraph-maxnumberwidth', 8), - sigFigs: NETDATA.dataAttribute(state.element, 'dygraph-sigfigs', null), - digitsAfterDecimal: NETDATA.dataAttribute(state.element, 'dygraph-digitsafterdecimal', 2), - valueFormatter: NETDATA.dataAttribute(state.element, 'dygraph-valueformatter', undefined), - highlightCircleSize: NETDATA.dataAttribute(state.element, 'dygraph-highlightcirclesize', highlightCircleSize), - highlightSeriesOpts: NETDATA.dataAttribute(state.element, 'dygraph-highlightseriesopts', null), // TOO SLOW: { strokeWidth: 1.5 }, - highlightSeriesBackgroundAlpha: NETDATA.dataAttribute(state.element, 'dygraph-highlightseriesbackgroundalpha', null), // TOO SLOW: (state.tmp.dygraph_chart_type === 'stacked')?0.7:0.5, - pointClickCallback: NETDATA.dataAttribute(state.element, 'dygraph-pointclickcallback', undefined), - visibility: state.dimensions_visibility.selected2BooleanArray(state.data.dimension_names), - logscale: (NETDATA.chartLibraries.dygraph.isLogScale(state) === true)?'y':undefined, - - axes: { - x: { - pixelsPerLabel: NETDATA.dataAttribute(state.element, 'dygraph-xpixelsperlabel', 50), - ticker: Dygraph.dateTicker, - axisLabelWidth: NETDATA.dataAttribute(state.element, 'dygraph-xaxislabelwidth', 60), - axisLabelFormatter: function (d, gran) { - void(gran); - return NETDATA.dateTime.xAxisTimeString(d); - } - }, - y: { - logscale: (NETDATA.chartLibraries.dygraph.isLogScale(state) === true)?true:undefined, - pixelsPerLabel: NETDATA.dataAttribute(state.element, 'dygraph-ypixelsperlabel', 15), - axisLabelWidth: NETDATA.dataAttribute(state.element, 'dygraph-yaxislabelwidth', 50), - axisLabelFormatter: function (y) { - - // unfortunately, we have to call this every single time - state.legendFormatValueDecimalsFromMinMax( - this.axes_[0].extremeRange[0], - this.axes_[0].extremeRange[1] - ); - - var old_units = this.user_attrs_.ylabel; - var v = state.legendFormatValue(y); - var new_units = state.units_current; - - if(state.units_desired === 'auto' && typeof old_units !== 'undefined' && new_units !== old_units && !NETDATA.chartLibraries.dygraph.isSparkline(state)) { - // console.log(this); - // state.log('units discrepancy: old = ' + old_units + ', new = ' + new_units); - var len = this.plugins_.length; - while(len--) { - // console.log(this.plugins_[len]); - if(typeof this.plugins_[len].plugin.ylabel_div_ !== 'undefined' - && this.plugins_[len].plugin.ylabel_div_ !== null - && typeof this.plugins_[len].plugin.ylabel_div_.children !== 'undefined' - && this.plugins_[len].plugin.ylabel_div_.children !== null - && typeof this.plugins_[len].plugin.ylabel_div_.children[0].children !== 'undefined' - && this.plugins_[len].plugin.ylabel_div_.children[0].children !== null - ) { - this.plugins_[len].plugin.ylabel_div_.children[0].children[0].innerHTML = new_units; - this.user_attrs_.ylabel = new_units; - break; - } - } - - if(len < 0) - state.log('units discrepancy, but cannot find dygraphs div to change: old = ' + old_units + ', new = ' + new_units); - } - - return v; - } - } - }, - legendFormatter: function(data) { - if(state.tmp.dygraph_mouse_down === true) - return; - - var elements = state.element_legend_childs; - - // if the hidden div is not there - // we are not managing the legend - if(elements.hidden === null) return; - - if (typeof data.x !== 'undefined') { - state.legendSetDate(data.x); - var i = data.series.length; - while(i--) { - var series = data.series[i]; - if(series.isVisible === true) - state.legendSetLabelValue(series.label, series.y); - else - state.legendSetLabelValue(series.label, null); - } - } - - return ''; - }, - drawCallback: function(dygraph, is_initial) { - - // the user has panned the chart and this is called to re-draw the chart - // 1. refresh this chart by adding data to it - // 2. notify all the other charts about the update they need - - // to prevent an infinite loop (feedback), we use - // state.tmp.dygraph_user_action - // - when true, this is initiated by a user - // - when false, this is feedback - - if(state.current.name !== 'auto' && state.tmp.dygraph_user_action === true) { - state.tmp.dygraph_user_action = false; - - var x_range = dygraph.xAxisRange(); - var after = Math.round(x_range[0]); - var before = Math.round(x_range[1]); - - if(NETDATA.options.debug.dygraph === true) - state.log('dygraphDrawCallback(dygraph, ' + is_initial + '): mode ' + state.current.name + ' ' + (after / 1000).toString() + ' - ' + (before / 1000).toString()); - //console.log(state); - - if(before <= state.netdata_last && after >= state.netdata_first) - // update only when we are within the data limits - state.updateChartPanOrZoom(after, before); - } - }, - zoomCallback: function(minDate, maxDate, yRanges) { - - // the user has selected a range on the chart - // 1. refresh this chart by adding data to it - // 2. notify all the other charts about the update they need - - void(yRanges); - - if(NETDATA.options.debug.dygraph === true) - state.log('dygraphZoomCallback(): ' + state.current.name); - - NETDATA.globalSelectionSync.stop(); - NETDATA.globalSelectionSync.delay(); - state.setMode('zoom'); - - // refresh it to the greatest possible zoom level - state.tmp.dygraph_user_action = true; - state.tmp.dygraph_force_zoom = true; - state.updateChartPanOrZoom(minDate, maxDate); - }, - highlightCallback: function(event, x, points, row, seriesName) { - void(seriesName); - - state.pauseChart(); - - // there is a bug in dygraph when the chart is zoomed enough - // the time it thinks is selected is wrong - // here we calculate the time t based on the row number selected - // which is ok - // var t = state.data_after + row * state.data_update_every; - // console.log('row = ' + row + ', x = ' + x + ', t = ' + t + ' ' + ((t === x)?'SAME':(Math.abs(x-t)<=state.data_update_every)?'SIMILAR':'DIFFERENT') + ', rows in db: ' + state.data_points + ' visible(x) = ' + state.timeIsVisible(x) + ' visible(t) = ' + state.timeIsVisible(t) + ' r(x) = ' + state.calculateRowForTime(x) + ' r(t) = ' + state.calculateRowForTime(t) + ' range: ' + state.data_after + ' - ' + state.data_before + ' real: ' + state.data.after + ' - ' + state.data.before + ' every: ' + state.data_update_every); - - if(state.tmp.dygraph_mouse_down !== true) - NETDATA.globalSelectionSync.sync(state, x); - - // fix legend zIndex using the internal structures of dygraph legend module - // this works, but it is a hack! - // state.tmp.dygraph_instance.plugins_[0].plugin.legend_div_.style.zIndex = 10000; - }, - unhighlightCallback: function(event) { - void(event); - - if(state.tmp.dygraph_mouse_down === true) - return; - - if(NETDATA.options.debug.dygraph === true || state.debug === true) - state.log('dygraphUnhighlightCallback()'); - - state.unpauseChart(); - NETDATA.globalSelectionSync.stop(); - }, - underlayCallback: function(canvas, area, g) { - - // the chart is about to be drawn - // this function renders global highlighted time-frame - - if(NETDATA.globalChartUnderlay.isActive()) { - var after = NETDATA.globalChartUnderlay.after; - var before = NETDATA.globalChartUnderlay.before; - - if(after < state.view_after) - after = state.view_after; - - if(before > state.view_before) - before = state.view_before; - - if(after < before) { - var bottom_left = g.toDomCoords(after, -20); - var top_right = g.toDomCoords(before, +20); - - var left = bottom_left[0]; - var right = top_right[0]; - - canvas.fillStyle = NETDATA.themes.current.highlight; - canvas.fillRect(left, area.y, right - left, area.h); - } - } - }, - interactionModel : { - mousedown: function(event, dygraph, context) { - if(NETDATA.options.debug.dygraph === true || state.debug === true) - state.log('interactionModel.mousedown()'); - - state.tmp.dygraph_user_action = true; - - if(NETDATA.options.debug.dygraph === true) - state.log('dygraphMouseDown()'); - - // Right-click should not initiate anything. - if(event.button && event.button === 2) return; - - NETDATA.globalSelectionSync.stop(); - NETDATA.globalSelectionSync.delay(); - - state.tmp.dygraph_mouse_down = true; - context.initializeMouseDown(event, dygraph, context); - - //console.log(event); - if(event.button && event.button === 1) { - if (event.shiftKey) { - //console.log('middle mouse button dragging (PAN)'); - - state.setMode('pan'); - // NETDATA.globalSelectionSync.delay(); - state.tmp.dygraph_highlight_after = null; - Dygraph.startPan(event, dygraph, context); - } - else if(event.altKey || event.ctrlKey || event.metaKey) { - //console.log('middle mouse button highlight'); - - if (!(event.offsetX && event.offsetY)) { - event.offsetX = event.layerX - event.target.offsetLeft; - event.offsetY = event.layerY - event.target.offsetTop; - } - state.tmp.dygraph_highlight_after = dygraph.toDataXCoord(event.offsetX); - Dygraph.startZoom(event, dygraph, context); - } - else { - //console.log('middle mouse button selection for zoom (ZOOM)'); - - state.setMode('zoom'); - // NETDATA.globalSelectionSync.delay(); - state.tmp.dygraph_highlight_after = null; - Dygraph.startZoom(event, dygraph, context); - } - } - else { - if (event.shiftKey) { - //console.log('left mouse button selection for zoom (ZOOM)'); - - state.setMode('zoom'); - // NETDATA.globalSelectionSync.delay(); - state.tmp.dygraph_highlight_after = null; - Dygraph.startZoom(event, dygraph, context); - } - else if(event.altKey || event.ctrlKey || event.metaKey) { - //console.log('left mouse button highlight'); - - if (!(event.offsetX && event.offsetY)) { - event.offsetX = event.layerX - event.target.offsetLeft; - event.offsetY = event.layerY - event.target.offsetTop; - } - state.tmp.dygraph_highlight_after = dygraph.toDataXCoord(event.offsetX); - Dygraph.startZoom(event, dygraph, context); - } - else { - //console.log('left mouse button dragging (PAN)'); - - state.setMode('pan'); - // NETDATA.globalSelectionSync.delay(); - state.tmp.dygraph_highlight_after = null; - Dygraph.startPan(event, dygraph, context); - } - } - }, - mousemove: function(event, dygraph, context) { - if(NETDATA.options.debug.dygraph === true || state.debug === true) - state.log('interactionModel.mousemove()'); - - if(state.tmp.dygraph_highlight_after !== null) { - //console.log('highlight selection...'); - - NETDATA.globalSelectionSync.stop(); - NETDATA.globalSelectionSync.delay(); - - state.tmp.dygraph_user_action = true; - Dygraph.moveZoom(event, dygraph, context); - event.preventDefault(); - } - else if(context.isPanning) { - //console.log('panning...'); - - NETDATA.globalSelectionSync.stop(); - NETDATA.globalSelectionSync.delay(); - - state.tmp.dygraph_user_action = true; - //NETDATA.globalSelectionSync.stop(); - //NETDATA.globalSelectionSync.delay(); - state.setMode('pan'); - context.is2DPan = false; - Dygraph.movePan(event, dygraph, context); - } - else if(context.isZooming) { - //console.log('zooming...'); - - NETDATA.globalSelectionSync.stop(); - NETDATA.globalSelectionSync.delay(); - - state.tmp.dygraph_user_action = true; - //NETDATA.globalSelectionSync.stop(); - //NETDATA.globalSelectionSync.delay(); - state.setMode('zoom'); - Dygraph.moveZoom(event, dygraph, context); - } - }, - mouseup: function(event, dygraph, context) { - state.tmp.dygraph_mouse_down = false; - - if(NETDATA.options.debug.dygraph === true || state.debug === true) - state.log('interactionModel.mouseup()'); - - if(state.tmp.dygraph_highlight_after !== null) { - //console.log('done highlight selection'); - - NETDATA.globalSelectionSync.stop(); - NETDATA.globalSelectionSync.delay(); - - if (!(event.offsetX && event.offsetY)){ - event.offsetX = event.layerX - event.target.offsetLeft; - event.offsetY = event.layerY - event.target.offsetTop; - } - - NETDATA.globalChartUnderlay.set(state - , state.tmp.dygraph_highlight_after - , dygraph.toDataXCoord(event.offsetX) - , state.view_after - , state.view_before - ); - - state.tmp.dygraph_highlight_after = null; - - context.isZooming = false; - dygraph.clearZoomRect_(); - dygraph.drawGraph_(false); - - // refresh all the charts immediately - NETDATA.options.auto_refresher_stop_until = 0; - } - else if (context.isPanning) { - //console.log('done panning'); - - NETDATA.globalSelectionSync.stop(); - NETDATA.globalSelectionSync.delay(); - - state.tmp.dygraph_user_action = true; - Dygraph.endPan(event, dygraph, context); - - // refresh all the charts immediately - NETDATA.options.auto_refresher_stop_until = 0; - } - else if (context.isZooming) { - //console.log('done zomming'); - - NETDATA.globalSelectionSync.stop(); - NETDATA.globalSelectionSync.delay(); - - state.tmp.dygraph_user_action = true; - Dygraph.endZoom(event, dygraph, context); - - // refresh all the charts immediately - NETDATA.options.auto_refresher_stop_until = 0; - } - }, - click: function(event, dygraph, context) { - void(dygraph); - void(context); - - if(NETDATA.options.debug.dygraph === true || state.debug === true) - state.log('interactionModel.click()'); - - event.preventDefault(); - }, - dblclick: function(event, dygraph, context) { - void(event); - void(dygraph); - void(context); - - if(NETDATA.options.debug.dygraph === true || state.debug === true) - state.log('interactionModel.dblclick()'); - NETDATA.resetAllCharts(state); - }, - wheel: function(event, dygraph, context) { - void(context); - - if(NETDATA.options.debug.dygraph === true || state.debug === true) - state.log('interactionModel.wheel()'); - - // Take the offset of a mouse event on the dygraph canvas and - // convert it to a pair of percentages from the bottom left. - // (Not top left, bottom is where the lower value is.) - function offsetToPercentage(g, offsetX, offsetY) { - // This is calculating the pixel offset of the leftmost date. - var xOffset = g.toDomCoords(g.xAxisRange()[0], null)[0]; - var yar0 = g.yAxisRange(0); - - // This is calculating the pixel of the highest value. (Top pixel) - var yOffset = g.toDomCoords(null, yar0[1])[1]; - - // x y w and h are relative to the corner of the drawing area, - // so that the upper corner of the drawing area is (0, 0). - var x = offsetX - xOffset; - var y = offsetY - yOffset; - - // This is computing the rightmost pixel, effectively defining the - // width. - var w = g.toDomCoords(g.xAxisRange()[1], null)[0] - xOffset; - - // This is computing the lowest pixel, effectively defining the height. - var h = g.toDomCoords(null, yar0[0])[1] - yOffset; - - // Percentage from the left. - var xPct = w === 0 ? 0 : (x / w); - // Percentage from the top. - var yPct = h === 0 ? 0 : (y / h); - - // The (1-) part below changes it from "% distance down from the top" - // to "% distance up from the bottom". - return [xPct, (1-yPct)]; - } - - // Adjusts [x, y] toward each other by zoomInPercentage% - // Split it so the left/bottom axis gets xBias/yBias of that change and - // tight/top gets (1-xBias)/(1-yBias) of that change. - // - // If a bias is missing it splits it down the middle. - function zoomRange(g, zoomInPercentage, xBias, yBias) { - xBias = xBias || 0.5; - yBias = yBias || 0.5; - - function adjustAxis(axis, zoomInPercentage, bias) { - var delta = axis[1] - axis[0]; - var increment = delta * zoomInPercentage; - var foo = [increment * bias, increment * (1-bias)]; - - return [ axis[0] + foo[0], axis[1] - foo[1] ]; - } - - var yAxes = g.yAxisRanges(); - var newYAxes = []; - for (var i = 0; i < yAxes.length; i++) { - newYAxes[i] = adjustAxis(yAxes[i], zoomInPercentage, yBias); - } - - return adjustAxis(g.xAxisRange(), zoomInPercentage, xBias); - } - - if(event.altKey || event.shiftKey) { - state.tmp.dygraph_user_action = true; - - NETDATA.globalSelectionSync.stop(); - NETDATA.globalSelectionSync.delay(); - - // http://dygraphs.com/gallery/interaction-api.js - var normal_def; - if(typeof event.wheelDelta === 'number' && !isNaN(event.wheelDelta)) - // chrome - normal_def = event.wheelDelta / 40; - else - // firefox - normal_def = event.deltaY * -1.2; - - var normal = (event.detail) ? event.detail * -1 : normal_def; - var percentage = normal / 50; - - if (!(event.offsetX && event.offsetY)){ - event.offsetX = event.layerX - event.target.offsetLeft; - event.offsetY = event.layerY - event.target.offsetTop; - } - - var percentages = offsetToPercentage(dygraph, event.offsetX, event.offsetY); - var xPct = percentages[0]; - var yPct = percentages[1]; - - var new_x_range = zoomRange(dygraph, percentage, xPct, yPct); - var after = new_x_range[0]; - var before = new_x_range[1]; - - var first = state.netdata_first + state.data_update_every; - var last = state.netdata_last + state.data_update_every; - - if(before > last) { - after -= (before - last); - before = last; - } - if(after < first) { - after = first; - } - - state.setMode('zoom'); - state.updateChartPanOrZoom(after, before, function() { - dygraph.updateOptions({ dateWindow: [ after, before ] }); - }); - - event.preventDefault(); - } - }, - touchstart: function(event, dygraph, context) { - state.tmp.dygraph_mouse_down = true; - - if(NETDATA.options.debug.dygraph === true || state.debug === true) - state.log('interactionModel.touchstart()'); - - state.tmp.dygraph_user_action = true; - state.setMode('zoom'); - state.pauseChart(); - - NETDATA.globalSelectionSync.stop(); - NETDATA.globalSelectionSync.delay(); - - Dygraph.defaultInteractionModel.touchstart(event, dygraph, context); - - // we overwrite the touch directions at the end, to overwrite - // the internal default of dygraph - context.touchDirections = { x: true, y: false }; - - state.dygraph_last_touch_start = Date.now(); - state.dygraph_last_touch_move = 0; - - if(typeof event.touches[0].pageX === 'number') - state.dygraph_last_touch_page_x = event.touches[0].pageX; - else - state.dygraph_last_touch_page_x = 0; - }, - touchmove: function(event, dygraph, context) { - if(NETDATA.options.debug.dygraph === true || state.debug === true) - state.log('interactionModel.touchmove()'); - - NETDATA.globalSelectionSync.stop(); - NETDATA.globalSelectionSync.delay(); - - state.tmp.dygraph_user_action = true; - Dygraph.defaultInteractionModel.touchmove(event, dygraph, context); - - state.dygraph_last_touch_move = Date.now(); - }, - touchend: function(event, dygraph, context) { - state.tmp.dygraph_mouse_down = false; - - if(NETDATA.options.debug.dygraph === true || state.debug === true) - state.log('interactionModel.touchend()'); - - NETDATA.globalSelectionSync.stop(); - NETDATA.globalSelectionSync.delay(); - - state.tmp.dygraph_user_action = true; - Dygraph.defaultInteractionModel.touchend(event, dygraph, context); - - // if it didn't move, it is a selection - if(state.dygraph_last_touch_move === 0 && state.dygraph_last_touch_page_x !== 0) { - NETDATA.globalSelectionSync.dont_sync_before = 0; - NETDATA.globalSelectionSync.setMaster(state); - - // internal api of dygraph - var pct = (state.dygraph_last_touch_page_x - (dygraph.plotter_.area.x + state.element.getBoundingClientRect().left)) / dygraph.plotter_.area.w; - console.log('pct: ' + pct.toString()); - - var t = Math.round(state.view_after + (state.view_before - state.view_after) * pct); - if(NETDATA.dygraphSetSelection(state, t) === true) { - NETDATA.globalSelectionSync.sync(state, t); - } - } - - // if it was double tap within double click time, reset the charts - var now = Date.now(); - if(typeof state.dygraph_last_touch_end !== 'undefined') { - if(state.dygraph_last_touch_move === 0) { - var dt = now - state.dygraph_last_touch_end; - if(dt <= NETDATA.options.current.double_click_speed) - NETDATA.resetAllCharts(state); - } - } - - // remember the timestamp of the last touch end - state.dygraph_last_touch_end = now; - - // refresh all the charts immediately - NETDATA.options.auto_refresher_stop_until = 0; - } - } - }; - - if(NETDATA.chartLibraries.dygraph.isLogScale(state) === true) { - if(Array.isArray(state.tmp.dygraph_options.valueRange) && state.tmp.dygraph_options.valueRange[0] <= 0) - state.tmp.dygraph_options.valueRange[0] = null; - } - - if(NETDATA.chartLibraries.dygraph.isSparkline(state) === true) { - state.tmp.dygraph_options.drawGrid = false; - state.tmp.dygraph_options.drawAxis = false; - state.tmp.dygraph_options.title = undefined; - state.tmp.dygraph_options.ylabel = undefined; - state.tmp.dygraph_options.yLabelWidth = 0; - //state.tmp.dygraph_options.labelsDivWidth = 120; - //state.tmp.dygraph_options.labelsDivStyles.width = '120px'; - state.tmp.dygraph_options.labelsSeparateLines = true; - state.tmp.dygraph_options.rightGap = 0; - state.tmp.dygraph_options.yRangePad = 1; - } - - if(smooth === true) { - state.tmp.dygraph_smooth_eligible = true; - - if(NETDATA.options.current.smooth_plot === true) - state.tmp.dygraph_options.plotter = smoothPlotter; - } - else state.tmp.dygraph_smooth_eligible = false; - - if(netdataSnapshotData !== null && NETDATA.globalPanAndZoom.isActive() === true && NETDATA.globalPanAndZoom.isMaster(state) === false) { - // pan and zoom on snapshots - state.tmp.dygraph_options.dateWindow = [ NETDATA.globalPanAndZoom.force_after_ms, NETDATA.globalPanAndZoom.force_before_ms ]; - //state.tmp.dygraph_options.isZoomedIgnoreProgrammaticZoom = true; - } - - state.tmp.dygraph_instance = new Dygraph(state.element_chart, - data.result.data, state.tmp.dygraph_options); - - state.tmp.dygraph_force_zoom = false; - state.tmp.dygraph_user_action = false; - state.tmp.dygraph_last_rendered = Date.now(); - state.tmp.dygraph_highlight_after = null; - - if(state.tmp.dygraph_options.valueRange[0] === null && state.tmp.dygraph_options.valueRange[1] === null) { - if (typeof state.tmp.dygraph_instance.axes_[0].extremeRange !== 'undefined') { - state.tmp.__commonMin = NETDATA.dataAttribute(state.element, 'common-min', null); - state.tmp.__commonMax = NETDATA.dataAttribute(state.element, 'common-max', null); - } - else { - state.log('incompatible version of Dygraph detected'); - state.tmp.__commonMin = null; - state.tmp.__commonMax = null; - } - } - else { - // if the user gave a valueRange, respect it - state.tmp.__commonMin = null; - state.tmp.__commonMax = null; - } - - return true; - }; - - // ---------------------------------------------------------------------------------------------------------------- - // morris - - NETDATA.morrisInitialize = function(callback) { - if(typeof netdataNoMorris === 'undefined' || !netdataNoMorris) { - - // morris requires raphael - if(!NETDATA.chartLibraries.raphael.initialized) { - if(NETDATA.chartLibraries.raphael.enabled) { - NETDATA.raphaelInitialize(function() { - NETDATA.morrisInitialize(callback); - }); - } - else { - NETDATA.chartLibraries.morris.enabled = false; - if(typeof callback === "function") - return callback(); - } - } - else { - NETDATA._loadCSS(NETDATA.morris_css); - - $.ajax({ - url: NETDATA.morris_js, - cache: true, - dataType: "script", - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function() { - NETDATA.registerChartLibrary('morris', NETDATA.morris_js); - }) - .fail(function() { - NETDATA.chartLibraries.morris.enabled = false; - NETDATA.error(100, NETDATA.morris_js); - }) - .always(function() { - if(typeof callback === "function") - return callback(); - }); - } - } - else { - NETDATA.chartLibraries.morris.enabled = false; - if(typeof callback === "function") - return callback(); - } - }; - - NETDATA.morrisChartUpdate = function(state, data) { - state.morris_instance.setData(data.result.data); - return true; - }; - - NETDATA.morrisChartCreate = function(state, data) { - - state.morris_options = { - element: state.element_chart.id, - data: data.result.data, - xkey: 'time', - ykeys: data.dimension_names, - labels: data.dimension_names, - lineWidth: 2, - pointSize: 3, - smooth: true, - hideHover: 'auto', - parseTime: true, - continuousLine: false, - behaveLikeLine: false - }; - - if(state.chart.chart_type === 'line') - state.morris_instance = new Morris.Line(state.morris_options); - - else if(state.chart.chart_type === 'area') { - state.morris_options.behaveLikeLine = true; - state.morris_instance = new Morris.Area(state.morris_options); - } - else // stacked - state.morris_instance = new Morris.Area(state.morris_options); - - return true; - }; - - // ---------------------------------------------------------------------------------------------------------------- - // raphael - - NETDATA.raphaelInitialize = function(callback) { - if(typeof netdataStopRaphael === 'undefined' || !netdataStopRaphael) { - $.ajax({ - url: NETDATA.raphael_js, - cache: true, - dataType: "script", - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function() { - NETDATA.registerChartLibrary('raphael', NETDATA.raphael_js); - }) - .fail(function() { - NETDATA.chartLibraries.raphael.enabled = false; - NETDATA.error(100, NETDATA.raphael_js); - }) - .always(function() { - if(typeof callback === "function") - return callback(); - }); - } - else { - NETDATA.chartLibraries.raphael.enabled = false; - if(typeof callback === "function") - return callback(); - } - }; - - NETDATA.raphaelChartUpdate = function(state, data) { - $(state.element_chart).raphael(data.result, { - width: state.chartWidth(), - height: state.chartHeight() - }); - - return false; - }; - - NETDATA.raphaelChartCreate = function(state, data) { - $(state.element_chart).raphael(data.result, { - width: state.chartWidth(), - height: state.chartHeight() - }); - - return false; - }; - - // ---------------------------------------------------------------------------------------------------------------- - // C3 - - NETDATA.c3Initialize = function(callback) { - if(typeof netdataNoC3 === 'undefined' || !netdataNoC3) { - - // C3 requires D3 - if(!NETDATA.chartLibraries.d3.initialized) { - if(NETDATA.chartLibraries.d3.enabled) { - NETDATA.d3Initialize(function() { - NETDATA.c3Initialize(callback); - }); - } - else { - NETDATA.chartLibraries.c3.enabled = false; - if(typeof callback === "function") - return callback(); - } - } - else { - NETDATA._loadCSS(NETDATA.c3_css); - - $.ajax({ - url: NETDATA.c3_js, - cache: true, - dataType: "script", - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function() { - NETDATA.registerChartLibrary('c3', NETDATA.c3_js); - }) - .fail(function() { - NETDATA.chartLibraries.c3.enabled = false; - NETDATA.error(100, NETDATA.c3_js); - }) - .always(function() { - if(typeof callback === "function") - return callback(); - }); - } - } - else { - NETDATA.chartLibraries.c3.enabled = false; - if(typeof callback === "function") - return callback(); - } - }; - - NETDATA.c3ChartUpdate = function(state, data) { - state.c3_instance.destroy(); - return NETDATA.c3ChartCreate(state, data); - - //state.c3_instance.load({ - // rows: data.result, - // unload: true - //}); - - //return true; - }; - - NETDATA.c3ChartCreate = function(state, data) { - - state.element_chart.id = 'c3-' + state.uuid; - // console.log('id = ' + state.element_chart.id); - - state.c3_instance = c3.generate({ - bindto: '#' + state.element_chart.id, - size: { - width: state.chartWidth(), - height: state.chartHeight() - }, - color: { - pattern: state.chartColors() - }, - data: { - x: 'time', - rows: data.result, - type: (state.chart.chart_type === 'line')?'spline':'area-spline' - }, - axis: { - x: { - type: 'timeseries', - tick: { - format: function(x) { - return NETDATA.dateTime.xAxisTimeString(x); - } - } - } - }, - grid: { - x: { - show: true - }, - y: { - show: true - } - }, - point: { - show: false - }, - line: { - connectNull: false - }, - transition: { - duration: 0 - }, - interaction: { - enabled: true - } - }); - - // console.log(state.c3_instance); - - return true; - }; - - // ---------------------------------------------------------------------------------------------------------------- - // d3pie - - NETDATA.d3pieInitialize = function(callback) { - if(typeof netdataNoD3pie === 'undefined' || !netdataNoD3pie) { - - // d3pie requires D3 - if(!NETDATA.chartLibraries.d3.initialized) { - if(NETDATA.chartLibraries.d3.enabled) { - NETDATA.d3Initialize(function() { - NETDATA.d3pieInitialize(callback); - }); - } - else { - NETDATA.chartLibraries.d3pie.enabled = false; - if(typeof callback === "function") - return callback(); - } - } - else { - $.ajax({ - url: NETDATA.d3pie_js, - cache: true, - dataType: "script", - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function() { - NETDATA.registerChartLibrary('d3pie', NETDATA.d3pie_js); - }) - .fail(function() { - NETDATA.chartLibraries.d3pie.enabled = false; - NETDATA.error(100, NETDATA.d3pie_js); - }) - .always(function() { - if(typeof callback === "function") - return callback(); - }); - } - } - else { - NETDATA.chartLibraries.d3pie.enabled = false; - if(typeof callback === "function") - return callback(); - } - }; - - NETDATA.d3pieSetContent = function(state, data, index) { - state.legendFormatValueDecimalsFromMinMax( - data.min, - data.max - ); - - var content = []; - var colors = state.chartColors(); - var len = data.result.labels.length; - for(var i = 1; i < len ; i++) { - var label = data.result.labels[i]; - var value = data.result.data[index][label]; - var color = colors[i - 1]; - - if(value !== null && value > 0) { - content.push({ - label: label, - value: value, - color: color - }); - } - } - - if(content.length === 0) - content.push({ - label: 'no data', - value: 100, - color: '#666666' - }); - - state.tmp.d3pie_last_slot = index; - return content; - }; - - NETDATA.d3pieDateRange = function(state, data, index) { - var dt = Math.round((data.before - data.after + 1) / data.points); - var dt_str = NETDATA.seconds4human(dt); - - var before = data.result.data[index].time; - var after = before - (dt * 1000); - - var d1 = NETDATA.dateTime.localeDateString(after); - var t1 = NETDATA.dateTime.localeTimeString(after); - var d2 = NETDATA.dateTime.localeDateString(before); - var t2 = NETDATA.dateTime.localeTimeString(before); - - if(d1 === d2) - return d1 + ' ' + t1 + ' to ' + t2 + ', ' + dt_str; - - return d1 + ' ' + t1 + ' to ' + d2 + ' ' + t2 + ', ' + dt_str; - }; - - NETDATA.d3pieSetSelection = function(state, t) { - if(state.timeIsVisible(t) !== true) - return NETDATA.d3pieClearSelection(state, true); - - var slot = state.calculateRowForTime(t); - slot = state.data.result.data.length - slot - 1; - - if(slot < 0 || slot >= state.data.result.length) - return NETDATA.d3pieClearSelection(state, true); - - if(state.tmp.d3pie_last_slot === slot) { - // we already show this slot, don't do anything - return true; - } - - if(state.tmp.d3pie_timer === undefined) { - state.tmp.d3pie_timer = NETDATA.timeout.set(function() { - state.tmp.d3pie_timer = undefined; - NETDATA.d3pieChange(state, NETDATA.d3pieSetContent(state, state.data, slot), NETDATA.d3pieDateRange(state, state.data, slot)); - }, 0); - } - - return true; - }; - - NETDATA.d3pieClearSelection = function(state, force) { - if(typeof state.tmp.d3pie_timer !== 'undefined') { - NETDATA.timeout.clear(state.tmp.d3pie_timer); - state.tmp.d3pie_timer = undefined; - } - - if(state.isAutoRefreshable() === true && state.data !== null && force !== true) { - NETDATA.d3pieChartUpdate(state, state.data); - } - else { - if(state.tmp.d3pie_last_slot !== -1) { - state.tmp.d3pie_last_slot = -1; - NETDATA.d3pieChange(state, [{label: 'no data', value: 1, color: '#666666'}], 'no data available'); - } - } - - return true; - }; - - NETDATA.d3pieChange = function(state, content, footer) { - if(state.d3pie_forced_subtitle === null) { - //state.d3pie_instance.updateProp("header.subtitle.text", state.units_current); - state.d3pie_instance.options.header.subtitle.text = state.units_current; - } - - if(state.d3pie_forced_footer === null) { - //state.d3pie_instance.updateProp("footer.text", footer); - state.d3pie_instance.options.footer.text = footer; - } - - //state.d3pie_instance.updateProp("data.content", content); - state.d3pie_instance.options.data.content = content; - state.d3pie_instance.destroy(); - state.d3pie_instance.recreate(); - return true; - }; - - NETDATA.d3pieChartUpdate = function(state, data) { - return NETDATA.d3pieChange(state, NETDATA.d3pieSetContent(state, data, 0), NETDATA.d3pieDateRange(state, data, 0)); - }; - - NETDATA.d3pieChartCreate = function(state, data) { - - state.element_chart.id = 'd3pie-' + state.uuid; - // console.log('id = ' + state.element_chart.id); - - var content = NETDATA.d3pieSetContent(state, data, 0); - - state.d3pie_forced_title = NETDATA.dataAttribute(state.element, 'd3pie-title', null); - state.d3pie_forced_subtitle = NETDATA.dataAttribute(state.element, 'd3pie-subtitle', null); - state.d3pie_forced_footer = NETDATA.dataAttribute(state.element, 'd3pie-footer', null); - - state.d3pie_options = { - header: { - title: { - text: (state.d3pie_forced_title !== null) ? state.d3pie_forced_title : state.title, - color: NETDATA.dataAttribute(state.element, 'd3pie-title-color', NETDATA.themes.current.d3pie.title), - fontSize: NETDATA.dataAttribute(state.element, 'd3pie-title-fontsize', 12), - fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-title-fontweight', "bold"), - font: NETDATA.dataAttribute(state.element, 'd3pie-title-font', "arial") - }, - subtitle: { - text: (state.d3pie_forced_subtitle !== null) ? state.d3pie_forced_subtitle : state.units_current, - color: NETDATA.dataAttribute(state.element, 'd3pie-subtitle-color', NETDATA.themes.current.d3pie.subtitle), - fontSize: NETDATA.dataAttribute(state.element, 'd3pie-subtitle-fontsize', 10), - fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-subtitle-fontweight', "normal"), - font: NETDATA.dataAttribute(state.element, 'd3pie-subtitle-font', "arial") - }, - titleSubtitlePadding: 1 - }, - footer: { - text: (state.d3pie_forced_footer !== null) ? state.d3pie_forced_footer : NETDATA.d3pieDateRange(state, data, 0), - color: NETDATA.dataAttribute(state.element, 'd3pie-footer-color', NETDATA.themes.current.d3pie.footer), - fontSize: NETDATA.dataAttribute(state.element, 'd3pie-footer-fontsize', 9), - fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-footer-fontweight', "bold"), - font: NETDATA.dataAttribute(state.element, 'd3pie-footer-font', "arial"), - location: NETDATA.dataAttribute(state.element, 'd3pie-footer-location', "bottom-center") // bottom-left, bottom-center, bottom-right - }, - size: { - canvasHeight: state.chartHeight(), - canvasWidth: state.chartWidth(), - pieInnerRadius: NETDATA.dataAttribute(state.element, 'd3pie-pieinnerradius', "45%"), - pieOuterRadius: NETDATA.dataAttribute(state.element, 'd3pie-pieouterradius', "80%") - }, - data: { - // none, random, value-asc, value-desc, label-asc, label-desc - sortOrder: NETDATA.dataAttribute(state.element, 'd3pie-sortorder', "value-desc"), - smallSegmentGrouping: { - enabled: NETDATA.dataAttributeBoolean(state.element, "d3pie-smallsegmentgrouping-enabled", false), - value: NETDATA.dataAttribute(state.element, 'd3pie-smallsegmentgrouping-value', 1), - // percentage, value - valueType: NETDATA.dataAttribute(state.element, 'd3pie-smallsegmentgrouping-valuetype', "percentage"), - label: NETDATA.dataAttribute(state.element, 'd3pie-smallsegmentgrouping-label', "other"), - color: NETDATA.dataAttribute(state.element, 'd3pie-smallsegmentgrouping-color', NETDATA.themes.current.d3pie.other) - }, - - // REQUIRED! This is where you enter your pie data; it needs to be an array of objects - // of this form: { label: "label", value: 1.5, color: "#000000" } - color is optional - content: content - }, - labels: { - outer: { - // label, value, percentage, label-value1, label-value2, label-percentage1, label-percentage2 - format: NETDATA.dataAttribute(state.element, 'd3pie-labels-outer-format', "label-value1"), - hideWhenLessThanPercentage: NETDATA.dataAttribute(state.element, 'd3pie-labels-outer-hidewhenlessthanpercentage', null), - pieDistance: NETDATA.dataAttribute(state.element, 'd3pie-labels-outer-piedistance', 15) - }, - inner: { - // label, value, percentage, label-value1, label-value2, label-percentage1, label-percentage2 - format: NETDATA.dataAttribute(state.element, 'd3pie-labels-inner-format', "percentage"), - hideWhenLessThanPercentage: NETDATA.dataAttribute(state.element, 'd3pie-labels-inner-hidewhenlessthanpercentage', 2) - }, - mainLabel: { - color: NETDATA.dataAttribute(state.element, 'd3pie-labels-mainLabel-color', NETDATA.themes.current.d3pie.mainlabel), // or 'segment' for dynamic color - font: NETDATA.dataAttribute(state.element, 'd3pie-labels-mainLabel-font', "arial"), - fontSize: NETDATA.dataAttribute(state.element, 'd3pie-labels-mainLabel-fontsize', 10), - fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-labels-mainLabel-fontweight', "normal") - }, - percentage: { - color: NETDATA.dataAttribute(state.element, 'd3pie-labels-percentage-color', NETDATA.themes.current.d3pie.percentage), - font: NETDATA.dataAttribute(state.element, 'd3pie-labels-percentage-font', "arial"), - fontSize: NETDATA.dataAttribute(state.element, 'd3pie-labels-percentage-fontsize', 10), - fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-labels-percentage-fontweight', "bold"), - decimalPlaces: 0 - }, - value: { - color: NETDATA.dataAttribute(state.element, 'd3pie-labels-value-color', NETDATA.themes.current.d3pie.value), - font: NETDATA.dataAttribute(state.element, 'd3pie-labels-value-font', "arial"), - fontSize: NETDATA.dataAttribute(state.element, 'd3pie-labels-value-fontsize', 10), - fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-labels-value-fontweight', "bold") - }, - lines: { - enabled: NETDATA.dataAttributeBoolean(state.element, 'd3pie-labels-lines-enabled', true), - style: NETDATA.dataAttribute(state.element, 'd3pie-labels-lines-style', "curved"), - color: NETDATA.dataAttribute(state.element, 'd3pie-labels-lines-color', "segment") // "segment" or a hex color - }, - truncation: { - enabled: NETDATA.dataAttributeBoolean(state.element, 'd3pie-labels-truncation-enabled', false), - truncateLength: NETDATA.dataAttribute(state.element, 'd3pie-labels-truncation-truncatelength', 30) - }, - formatter: function(context) { - // console.log(context); - if(context.part === 'value') - return state.legendFormatValue(context.value); - if(context.part === 'percentage') - return context.label + '%'; - - return context.label; - } - }, - effects: { - load: { - effect: "none", // none / default - speed: 0 // commented in the d3pie code to speed it up - }, - pullOutSegmentOnClick: { - effect: "bounce", // none / linear / bounce / elastic / back - speed: 400, - size: 5 - }, - highlightSegmentOnMouseover: true, - highlightLuminosity: -0.2 - }, - tooltips: { - enabled: false, - type: "placeholder", // caption|placeholder - string: "", - placeholderParser: null, // function - styles: { - fadeInSpeed: 250, - backgroundColor: NETDATA.themes.current.d3pie.tooltip_bg, - backgroundOpacity: 0.5, - color: NETDATA.themes.current.d3pie.tooltip_fg, - borderRadius: 2, - font: "arial", - fontSize: 12, - padding: 4 - } - }, - misc: { - colors: { - background: 'transparent', // transparent or color # - // segments: state.chartColors(), - segmentStroke: NETDATA.dataAttribute(state.element, 'd3pie-misc-colors-segmentstroke', NETDATA.themes.current.d3pie.segment_stroke) - }, - gradient: { - enabled: NETDATA.dataAttributeBoolean(state.element, 'd3pie-misc-gradient-enabled', false), - percentage: NETDATA.dataAttribute(state.element, 'd3pie-misc-colors-percentage', 95), - color: NETDATA.dataAttribute(state.element, 'd3pie-misc-gradient-color', NETDATA.themes.current.d3pie.gradient_color) - }, - canvasPadding: { - top: 5, - right: 5, - bottom: 5, - left: 5 - }, - pieCenterOffset: { - x: 0, - y: 0 - }, - cssPrefix: NETDATA.dataAttribute(state.element, 'd3pie-cssprefix', null) - }, - callbacks: { - onload: null, - onMouseoverSegment: null, - onMouseoutSegment: null, - onClickSegment: null - } - }; - - state.d3pie_instance = new d3pie(state.element_chart, state.d3pie_options); - return true; - }; - - // ---------------------------------------------------------------------------------------------------------------- - // D3 - - NETDATA.d3Initialize = function(callback) { - if(typeof netdataStopD3 === 'undefined' || !netdataStopD3) { - $.ajax({ - url: NETDATA.d3_js, - cache: true, - dataType: "script", - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function() { - NETDATA.registerChartLibrary('d3', NETDATA.d3_js); - }) - .fail(function() { - NETDATA.chartLibraries.d3.enabled = false; - NETDATA.error(100, NETDATA.d3_js); - }) - .always(function() { - if(typeof callback === "function") - return callback(); - }); - } - else { - NETDATA.chartLibraries.d3.enabled = false; - if(typeof callback === "function") - return callback(); - } - }; - - NETDATA.d3ChartUpdate = function(state, data) { - void(state); - void(data); - - return false; - }; - - NETDATA.d3ChartCreate = function(state, data) { - void(state); - void(data); - - return false; - }; - - // ---------------------------------------------------------------------------------------------------------------- - // google charts - - NETDATA.googleInitialize = function(callback) { - if(typeof netdataNoGoogleCharts === 'undefined' || !netdataNoGoogleCharts) { - $.ajax({ - url: NETDATA.google_js, - cache: true, - dataType: "script", - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function() { - NETDATA.registerChartLibrary('google', NETDATA.google_js); - google.load('visualization', '1.1', { - 'packages': ['corechart', 'controls'], - 'callback': callback - }); - }) - .fail(function() { - NETDATA.chartLibraries.google.enabled = false; - NETDATA.error(100, NETDATA.google_js); - if(typeof callback === "function") - return callback(); - }); - } - else { - NETDATA.chartLibraries.google.enabled = false; - if(typeof callback === "function") - return callback(); - } - }; - - NETDATA.googleChartUpdate = function(state, data) { - var datatable = new google.visualization.DataTable(data.result); - state.google_instance.draw(datatable, state.google_options); - return true; - }; - - NETDATA.googleChartCreate = function(state, data) { - var datatable = new google.visualization.DataTable(data.result); - - state.google_options = { - colors: state.chartColors(), - - // do not set width, height - the chart resizes itself - //width: state.chartWidth(), - //height: state.chartHeight(), - lineWidth: 1, - title: state.title, - fontSize: 11, - hAxis: { - // title: "Time of Day", - // format:'HH:mm:ss', - viewWindowMode: 'maximized', - slantedText: false, - format:'HH:mm:ss', - textStyle: { - fontSize: 9 - }, - gridlines: { - color: '#EEE' - } - }, - vAxis: { - title: state.units_current, - viewWindowMode: 'pretty', - minValue: -0.1, - maxValue: 0.1, - direction: 1, - textStyle: { - fontSize: 9 - }, - gridlines: { - color: '#EEE' - } - }, - chartArea: { - width: '65%', - height: '80%' - }, - focusTarget: 'category', - annotation: { - '1': { - style: 'line' - } - }, - pointsVisible: 0, - titlePosition: 'out', - titleTextStyle: { - fontSize: 11 - }, - tooltip: { - isHtml: false, - ignoreBounds: true, - textStyle: { - fontSize: 9 - } - }, - curveType: 'function', - areaOpacity: 0.3, - isStacked: false - }; - - switch(state.chart.chart_type) { - case "area": - state.google_options.vAxis.viewWindowMode = 'maximized'; - state.google_options.areaOpacity = NETDATA.options.current.color_fill_opacity_area; - state.google_instance = new google.visualization.AreaChart(state.element_chart); - break; - - case "stacked": - state.google_options.isStacked = true; - state.google_options.areaOpacity = NETDATA.options.current.color_fill_opacity_stacked; - state.google_options.vAxis.viewWindowMode = 'maximized'; - state.google_options.vAxis.minValue = null; - state.google_options.vAxis.maxValue = null; - state.google_instance = new google.visualization.AreaChart(state.element_chart); - break; - - default: - case "line": - state.google_options.lineWidth = 2; - state.google_instance = new google.visualization.LineChart(state.element_chart); - break; - } - - state.google_instance.draw(datatable, state.google_options); - return true; - }; - - // ---------------------------------------------------------------------------------------------------------------- - - NETDATA.easypiechartPercentFromValueMinMax = function(state, value, min, max) { - if(typeof value !== 'number') value = 0; - if(typeof min !== 'number') min = 0; - if(typeof max !== 'number') max = 0; - - if(min > max) { - var t = min; - min = max; - max = t; - } - - if(min > value) min = value; - if(max < value) max = value; - - state.legendFormatValueDecimalsFromMinMax(min, max); - - if(state.tmp.easyPieChartMin === null && min > 0) min = 0; - if(state.tmp.easyPieChartMax === null && max < 0) max = 0; - - var pcent; - - if(min < 0 && max > 0) { - // it is both positive and negative - // zero at the top center of the chart - max = (-min > max)? -min : max; - pcent = Math.round(value * 100 / max); - } - else if(value >= 0 && min >= 0 && max >= 0) { - // clockwise - pcent = Math.round((value - min) * 100 / (max - min)); - if(pcent === 0) pcent = 0.1; - } - else { - // counter clockwise - pcent = Math.round((value - max) * 100 / (max - min)); - if(pcent === 0) pcent = -0.1; - } - - return pcent; - }; - - // ---------------------------------------------------------------------------------------------------------------- - // easy-pie-chart - - NETDATA.easypiechartInitialize = function(callback) { - if(typeof netdataNoEasyPieChart === 'undefined' || !netdataNoEasyPieChart) { - $.ajax({ - url: NETDATA.easypiechart_js, - cache: true, - dataType: "script", - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function() { - NETDATA.registerChartLibrary('easypiechart', NETDATA.easypiechart_js); - }) - .fail(function() { - NETDATA.chartLibraries.easypiechart.enabled = false; - NETDATA.error(100, NETDATA.easypiechart_js); - }) - .always(function() { - if(typeof callback === "function") - return callback(); - }) - } - else { - NETDATA.chartLibraries.easypiechart.enabled = false; - if(typeof callback === "function") - return callback(); - } - }; - - NETDATA.easypiechartClearSelection = function(state, force) { - if(typeof state.tmp.easyPieChartEvent !== 'undefined' && typeof state.tmp.easyPieChartEvent.timer !== 'undefined') { - NETDATA.timeout.clear(state.tmp.easyPieChartEvent.timer); - state.tmp.easyPieChartEvent.timer = undefined; - } - - if(state.isAutoRefreshable() === true && state.data !== null && force !== true) { - NETDATA.easypiechartChartUpdate(state, state.data); - } - else { - state.tmp.easyPieChartLabel.innerText = state.legendFormatValue(null); - state.tmp.easyPieChart_instance.update(0); - } - state.tmp.easyPieChart_instance.enableAnimation(); - - return true; - }; - - NETDATA.easypiechartSetSelection = function(state, t) { - if(state.timeIsVisible(t) !== true) - return NETDATA.easypiechartClearSelection(state, true); - - var slot = state.calculateRowForTime(t); - if(slot < 0 || slot >= state.data.result.length) - return NETDATA.easypiechartClearSelection(state, true); - - if(typeof state.tmp.easyPieChartEvent === 'undefined') { - state.tmp.easyPieChartEvent = { - timer: undefined, - value: 0, - pcent: 0 - }; - } - - var value = state.data.result[state.data.result.length - 1 - slot]; - var min = (state.tmp.easyPieChartMin === null)?NETDATA.commonMin.get(state):state.tmp.easyPieChartMin; - var max = (state.tmp.easyPieChartMax === null)?NETDATA.commonMax.get(state):state.tmp.easyPieChartMax; - var pcent = NETDATA.easypiechartPercentFromValueMinMax(state, value, min, max); - - state.tmp.easyPieChartEvent.value = value; - state.tmp.easyPieChartEvent.pcent = pcent; - state.tmp.easyPieChartLabel.innerText = state.legendFormatValue(value); - - if(state.tmp.easyPieChartEvent.timer === undefined) { - state.tmp.easyPieChart_instance.disableAnimation(); - - state.tmp.easyPieChartEvent.timer = NETDATA.timeout.set(function() { - state.tmp.easyPieChartEvent.timer = undefined; - state.tmp.easyPieChart_instance.update(state.tmp.easyPieChartEvent.pcent); - }, 0); - } - - return true; - }; - - NETDATA.easypiechartChartUpdate = function(state, data) { - var value, min, max, pcent; - - if(NETDATA.globalPanAndZoom.isActive() === true || state.isAutoRefreshable() === false) { - value = null; - pcent = 0; - } - else { - value = data.result[0]; - min = (state.tmp.easyPieChartMin === null)?NETDATA.commonMin.get(state):state.tmp.easyPieChartMin; - max = (state.tmp.easyPieChartMax === null)?NETDATA.commonMax.get(state):state.tmp.easyPieChartMax; - pcent = NETDATA.easypiechartPercentFromValueMinMax(state, value, min, max); - } - - state.tmp.easyPieChartLabel.innerText = state.legendFormatValue(value); - state.tmp.easyPieChart_instance.update(pcent); - return true; - }; - - NETDATA.easypiechartChartCreate = function(state, data) { - var chart = $(state.element_chart); - - var value = data.result[0]; - var min = NETDATA.dataAttribute(state.element, 'easypiechart-min-value', null); - var max = NETDATA.dataAttribute(state.element, 'easypiechart-max-value', null); - - if(min === null) { - min = NETDATA.commonMin.get(state); - state.tmp.easyPieChartMin = null; - } - else - state.tmp.easyPieChartMin = min; - - if(max === null) { - max = NETDATA.commonMax.get(state); - state.tmp.easyPieChartMax = null; - } - else - state.tmp.easyPieChartMax = max; - - var size = state.chartWidth(); - var stroke = Math.floor(size / 22); - if(stroke < 3) stroke = 2; - - var valuefontsize = Math.floor((size * 2 / 3) / 5); - var valuetop = Math.round((size - valuefontsize - (size / 40)) / 2); - state.tmp.easyPieChartLabel = document.createElement('span'); - state.tmp.easyPieChartLabel.className = 'easyPieChartLabel'; - state.tmp.easyPieChartLabel.innerText = state.legendFormatValue(value); - state.tmp.easyPieChartLabel.style.fontSize = valuefontsize + 'px'; - state.tmp.easyPieChartLabel.style.top = valuetop.toString() + 'px'; - state.element_chart.appendChild(state.tmp.easyPieChartLabel); - - var titlefontsize = Math.round(valuefontsize * 1.6 / 3); - var titletop = Math.round(valuetop - (titlefontsize * 2) - (size / 40)); - state.tmp.easyPieChartTitle = document.createElement('span'); - state.tmp.easyPieChartTitle.className = 'easyPieChartTitle'; - state.tmp.easyPieChartTitle.innerText = state.title; - state.tmp.easyPieChartTitle.style.fontSize = titlefontsize + 'px'; - state.tmp.easyPieChartTitle.style.lineHeight = titlefontsize + 'px'; - state.tmp.easyPieChartTitle.style.top = titletop.toString() + 'px'; - state.element_chart.appendChild(state.tmp.easyPieChartTitle); - - var unitfontsize = Math.round(titlefontsize * 0.9); - var unittop = Math.round(valuetop + (valuefontsize + unitfontsize) + (size / 40)); - state.tmp.easyPieChartUnits = document.createElement('span'); - state.tmp.easyPieChartUnits.className = 'easyPieChartUnits'; - state.tmp.easyPieChartUnits.innerText = state.units_current; - state.tmp.easyPieChartUnits.style.fontSize = unitfontsize + 'px'; - state.tmp.easyPieChartUnits.style.top = unittop.toString() + 'px'; - state.element_chart.appendChild(state.tmp.easyPieChartUnits); - - var barColor = NETDATA.dataAttribute(state.element, 'easypiechart-barcolor', undefined); - if(typeof barColor === 'undefined' || barColor === null) - barColor = state.chartCustomColors()[0]; - else { - //
- var tmp = eval(barColor); - if(typeof tmp === 'function') - barColor = tmp; - } - - var pcent = NETDATA.easypiechartPercentFromValueMinMax(state, value, min, max); - chart.data('data-percent', pcent); - - chart.easyPieChart({ - barColor: barColor, - trackColor: NETDATA.dataAttribute(state.element, 'easypiechart-trackcolor', NETDATA.themes.current.easypiechart_track), - scaleColor: NETDATA.dataAttribute(state.element, 'easypiechart-scalecolor', NETDATA.themes.current.easypiechart_scale), - scaleLength: NETDATA.dataAttribute(state.element, 'easypiechart-scalelength', 5), - lineCap: NETDATA.dataAttribute(state.element, 'easypiechart-linecap', 'round'), - lineWidth: NETDATA.dataAttribute(state.element, 'easypiechart-linewidth', stroke), - trackWidth: NETDATA.dataAttribute(state.element, 'easypiechart-trackwidth', undefined), - size: NETDATA.dataAttribute(state.element, 'easypiechart-size', size), - rotate: NETDATA.dataAttribute(state.element, 'easypiechart-rotate', 0), - animate: NETDATA.dataAttribute(state.element, 'easypiechart-animate', {duration: 500, enabled: true}), - easing: NETDATA.dataAttribute(state.element, 'easypiechart-easing', undefined) - }); - - // when we just re-create the chart - // do not animate the first update - var animate = true; - if(typeof state.tmp.easyPieChart_instance !== 'undefined') - animate = false; - - state.tmp.easyPieChart_instance = chart.data('easyPieChart'); - if(animate === false) state.tmp.easyPieChart_instance.disableAnimation(); - state.tmp.easyPieChart_instance.update(pcent); - if(animate === false) state.tmp.easyPieChart_instance.enableAnimation(); - - state.legendSetUnitsString = function(units) { - if(typeof state.tmp.easyPieChartUnits !== 'undefined' && state.tmp.units !== units) { - state.tmp.easyPieChartUnits.innerText = units; - state.tmp.units = units; - } - }; - state.legendShowUndefined = function() { - if(typeof state.tmp.easyPieChart_instance !== 'undefined') - NETDATA.easypiechartClearSelection(state); - }; - - return true; - }; - - // ---------------------------------------------------------------------------------------------------------------- - // gauge.js - - NETDATA.gaugeInitialize = function(callback) { - if(typeof netdataNoGauge === 'undefined' || !netdataNoGauge) { - $.ajax({ - url: NETDATA.gauge_js, - cache: true, - dataType: "script", - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function() { - NETDATA.registerChartLibrary('gauge', NETDATA.gauge_js); - }) - .fail(function() { - NETDATA.chartLibraries.gauge.enabled = false; - NETDATA.error(100, NETDATA.gauge_js); - }) - .always(function() { - if(typeof callback === "function") - return callback(); - }) - } - else { - NETDATA.chartLibraries.gauge.enabled = false; - if(typeof callback === "function") - return callback(); - } - }; - - NETDATA.gaugeAnimation = function(state, status) { - var speed = 32; - - if(typeof status === 'boolean' && status === false) - speed = 1000000000; - else if(typeof status === 'number') - speed = status; - - // console.log('gauge speed ' + speed); - state.tmp.gauge_instance.animationSpeed = speed; - state.tmp.___gaugeOld__.speed = speed; - }; - - NETDATA.gaugeSet = function(state, value, min, max) { - if(typeof value !== 'number') value = 0; - if(typeof min !== 'number') min = 0; - if(typeof max !== 'number') max = 0; - if(value > max) max = value; - if(value < min) min = value; - if(min > max) { - var t = min; - min = max; - max = t; - } - else if(min === max) - max = min + 1; - - state.legendFormatValueDecimalsFromMinMax(min, max); - - // gauge.js has an issue if the needle - // is smaller than min or larger than max - // when we set the new values - // the needle will go crazy - - // to prevent it, we always feed it - // with a percentage, so that the needle - // is always between min and max - var pcent = (value - min) * 100 / (max - min); - - // bug fix for gauge.js 1.3.1 - // if the value is the absolute min or max, the chart is broken - if(pcent < 0.001) pcent = 0.001; - if(pcent > 99.999) pcent = 99.999; - - state.tmp.gauge_instance.set(pcent); - // console.log('gauge set ' + pcent + ', value ' + value + ', min ' + min + ', max ' + max); - - state.tmp.___gaugeOld__.value = value; - state.tmp.___gaugeOld__.min = min; - state.tmp.___gaugeOld__.max = max; - }; - - NETDATA.gaugeSetLabels = function(state, value, min, max) { - if(state.tmp.___gaugeOld__.valueLabel !== value) { - state.tmp.___gaugeOld__.valueLabel = value; - state.tmp.gaugeChartLabel.innerText = state.legendFormatValue(value); - } - if(state.tmp.___gaugeOld__.minLabel !== min) { - state.tmp.___gaugeOld__.minLabel = min; - state.tmp.gaugeChartMin.innerText = state.legendFormatValue(min); - } - if(state.tmp.___gaugeOld__.maxLabel !== max) { - state.tmp.___gaugeOld__.maxLabel = max; - state.tmp.gaugeChartMax.innerText = state.legendFormatValue(max); - } - }; - - NETDATA.gaugeClearSelection = function(state, force) { - if(typeof state.tmp.gaugeEvent !== 'undefined' && typeof state.tmp.gaugeEvent.timer !== 'undefined') { - NETDATA.timeout.clear(state.tmp.gaugeEvent.timer); - state.tmp.gaugeEvent.timer = undefined; - } - - if(state.isAutoRefreshable() === true && state.data !== null && force !== true) { - NETDATA.gaugeChartUpdate(state, state.data); - } - else { - NETDATA.gaugeAnimation(state, false); - NETDATA.gaugeSetLabels(state, null, null, null); - NETDATA.gaugeSet(state, null, null, null); - } - - NETDATA.gaugeAnimation(state, true); - return true; - }; - - NETDATA.gaugeSetSelection = function(state, t) { - if(state.timeIsVisible(t) !== true) - return NETDATA.gaugeClearSelection(state, true); - - var slot = state.calculateRowForTime(t); - if(slot < 0 || slot >= state.data.result.length) - return NETDATA.gaugeClearSelection(state, true); - - if(typeof state.tmp.gaugeEvent === 'undefined') { - state.tmp.gaugeEvent = { - timer: undefined, - value: 0, - min: 0, - max: 0 - }; - } - - var value = state.data.result[state.data.result.length - 1 - slot]; - var min = (state.tmp.gaugeMin === null)?NETDATA.commonMin.get(state):state.tmp.gaugeMin; - var max = (state.tmp.gaugeMax === null)?NETDATA.commonMax.get(state):state.tmp.gaugeMax; - - // make sure it is zero based - // but only if it has not been set by the user - if(state.tmp.gaugeMin === null && min > 0) min = 0; - if(state.tmp.gaugeMax === null && max < 0) max = 0; - - state.tmp.gaugeEvent.value = value; - state.tmp.gaugeEvent.min = min; - state.tmp.gaugeEvent.max = max; - NETDATA.gaugeSetLabels(state, value, min, max); - - if(state.tmp.gaugeEvent.timer === undefined) { - NETDATA.gaugeAnimation(state, false); - - state.tmp.gaugeEvent.timer = NETDATA.timeout.set(function() { - state.tmp.gaugeEvent.timer = undefined; - NETDATA.gaugeSet(state, state.tmp.gaugeEvent.value, state.tmp.gaugeEvent.min, state.tmp.gaugeEvent.max); - }, 0); - } - - return true; - }; - - NETDATA.gaugeChartUpdate = function(state, data) { - var value, min, max; - - if(NETDATA.globalPanAndZoom.isActive() === true || state.isAutoRefreshable() === false) { - NETDATA.gaugeSetLabels(state, null, null, null); - state.tmp.gauge_instance.set(0); - } - else { - value = data.result[0]; - min = (state.tmp.gaugeMin === null)?NETDATA.commonMin.get(state):state.tmp.gaugeMin; - max = (state.tmp.gaugeMax === null)?NETDATA.commonMax.get(state):state.tmp.gaugeMax; - if(value < min) min = value; - if(value > max) max = value; - - // make sure it is zero based - // but only if it has not been set by the user - if(state.tmp.gaugeMin === null && min > 0) min = 0; - if(state.tmp.gaugeMax === null && max < 0) max = 0; - - NETDATA.gaugeSet(state, value, min, max); - NETDATA.gaugeSetLabels(state, value, min, max); - } - - return true; - }; - - NETDATA.gaugeChartCreate = function(state, data) { - // var chart = $(state.element_chart); - - var value = data.result[0]; - var min = NETDATA.dataAttribute(state.element, 'gauge-min-value', null); - var max = NETDATA.dataAttribute(state.element, 'gauge-max-value', null); - // var adjust = NETDATA.dataAttribute(state.element, 'gauge-adjust', null); - var pointerColor = NETDATA.dataAttribute(state.element, 'gauge-pointer-color', NETDATA.themes.current.gauge_pointer); - var strokeColor = NETDATA.dataAttribute(state.element, 'gauge-stroke-color', NETDATA.themes.current.gauge_stroke); - var startColor = NETDATA.dataAttribute(state.element, 'gauge-start-color', state.chartCustomColors()[0]); - var stopColor = NETDATA.dataAttribute(state.element, 'gauge-stop-color', void 0); - var generateGradient = NETDATA.dataAttribute(state.element, 'gauge-generate-gradient', false); - - if(min === null) { - min = NETDATA.commonMin.get(state); - state.tmp.gaugeMin = null; - } - else - state.tmp.gaugeMin = min; - - if(max === null) { - max = NETDATA.commonMax.get(state); - state.tmp.gaugeMax = null; - } - else - state.tmp.gaugeMax = max; - - // make sure it is zero based - // but only if it has not been set by the user - if(state.tmp.gaugeMin === null && min > 0) min = 0; - if(state.tmp.gaugeMax === null && max < 0) max = 0; - - var width = state.chartWidth(), height = state.chartHeight(); //, ratio = 1.5; - // console.log('gauge width: ' + width.toString() + ', height: ' + height.toString()); - //switch(adjust) { - // case 'width': width = height * ratio; break; - // case 'height': - // default: height = width / ratio; break; - //} - //state.element.style.width = width.toString() + 'px'; - //state.element.style.height = height.toString() + 'px'; - - var lum_d = 0.05; - - var options = { - lines: 12, // The number of lines to draw - angle: 0.14, // The span of the gauge arc - lineWidth: 0.57, // The line thickness - radiusScale: 1.0, // Relative radius - pointer: { - length: 0.85, // 0.9 The radius of the inner circle - strokeWidth: 0.045, // The rotation offset - color: pointerColor // Fill color - }, - limitMax: true, // If false, the max value of the gauge will be updated if value surpass max - limitMin: true, // If true, the min value of the gauge will be fixed unless you set it manually - colorStart: startColor, // Colors - colorStop: stopColor, // just experiment with them - strokeColor: strokeColor, // to see which ones work best for you - generateGradient: (generateGradient === true), - gradientType: 0, - highDpiSupport: true // High resolution support - }; - - if (generateGradient.constructor === Array) { - // example options: - // data-gauge-generate-gradient="[0, 50, 100]" - // data-gauge-gradient-percent-color-0="#FFFFFF" - // data-gauge-gradient-percent-color-50="#999900" - // data-gauge-gradient-percent-color-100="#000000" - - options.percentColors = []; - var len = generateGradient.length; - while(len--) { - var pcent = generateGradient[len]; - var color = NETDATA.dataAttribute(state.element, 'gauge-gradient-percent-color-' + pcent.toString(), false); - if(color !== false) { - var a = []; - a[0] = pcent / 100; - a[1] = color; - options.percentColors.unshift(a); - } - } - if(options.percentColors.length === 0) - delete options.percentColors; - } - else if(generateGradient === false && NETDATA.themes.current.gauge_gradient === true) { - //noinspection PointlessArithmeticExpressionJS - options.percentColors = [ - [0.0, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 0))], - [0.1, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 1))], - [0.2, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 2))], - [0.3, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 3))], - [0.4, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 4))], - [0.5, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 5))], - [0.6, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 6))], - [0.7, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 7))], - [0.8, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 8))], - [0.9, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 9))], - [1.0, NETDATA.colorLuminance(startColor, 0.0)]]; - } - - state.tmp.gauge_canvas = document.createElement('canvas'); - state.tmp.gauge_canvas.id = 'gauge-' + state.uuid + '-canvas'; - state.tmp.gauge_canvas.className = 'gaugeChart'; - state.tmp.gauge_canvas.width = width; - state.tmp.gauge_canvas.height = height; - state.element_chart.appendChild(state.tmp.gauge_canvas); - - var valuefontsize = Math.floor(height / 5); - var valuetop = Math.round((height - valuefontsize) / 3.2); - state.tmp.gaugeChartLabel = document.createElement('span'); - state.tmp.gaugeChartLabel.className = 'gaugeChartLabel'; - state.tmp.gaugeChartLabel.style.fontSize = valuefontsize + 'px'; - state.tmp.gaugeChartLabel.style.top = valuetop.toString() + 'px'; - state.element_chart.appendChild(state.tmp.gaugeChartLabel); - - var titlefontsize = Math.round(valuefontsize / 2.1); - var titletop = 0; - state.tmp.gaugeChartTitle = document.createElement('span'); - state.tmp.gaugeChartTitle.className = 'gaugeChartTitle'; - state.tmp.gaugeChartTitle.innerText = state.title; - state.tmp.gaugeChartTitle.style.fontSize = titlefontsize + 'px'; - state.tmp.gaugeChartTitle.style.lineHeight = titlefontsize + 'px'; - state.tmp.gaugeChartTitle.style.top = titletop.toString() + 'px'; - state.element_chart.appendChild(state.tmp.gaugeChartTitle); - - var unitfontsize = Math.round(titlefontsize * 0.9); - state.tmp.gaugeChartUnits = document.createElement('span'); - state.tmp.gaugeChartUnits.className = 'gaugeChartUnits'; - state.tmp.gaugeChartUnits.innerText = state.units_current; - state.tmp.gaugeChartUnits.style.fontSize = unitfontsize + 'px'; - state.element_chart.appendChild(state.tmp.gaugeChartUnits); - - state.tmp.gaugeChartMin = document.createElement('span'); - state.tmp.gaugeChartMin.className = 'gaugeChartMin'; - state.tmp.gaugeChartMin.style.fontSize = Math.round(valuefontsize * 0.75).toString() + 'px'; - state.element_chart.appendChild(state.tmp.gaugeChartMin); - - state.tmp.gaugeChartMax = document.createElement('span'); - state.tmp.gaugeChartMax.className = 'gaugeChartMax'; - state.tmp.gaugeChartMax.style.fontSize = Math.round(valuefontsize * 0.75).toString() + 'px'; - state.element_chart.appendChild(state.tmp.gaugeChartMax); - - // when we just re-create the chart - // do not animate the first update - var animate = true; - if(typeof state.tmp.gauge_instance !== 'undefined') - animate = false; - - state.tmp.gauge_instance = new Gauge(state.tmp.gauge_canvas).setOptions(options); // create sexy gauge! - - state.tmp.___gaugeOld__ = { - value: value, - min: min, - max: max, - valueLabel: null, - minLabel: null, - maxLabel: null - }; - - // we will always feed a percentage - state.tmp.gauge_instance.minValue = 0; - state.tmp.gauge_instance.maxValue = 100; - - NETDATA.gaugeAnimation(state, animate); - NETDATA.gaugeSet(state, value, min, max); - NETDATA.gaugeSetLabels(state, value, min, max); - NETDATA.gaugeAnimation(state, true); - - state.legendSetUnitsString = function(units) { - if(typeof state.tmp.gaugeChartUnits !== 'undefined' && state.tmp.units !== units) { - state.tmp.gaugeChartUnits.innerText = units; - state.tmp.___gaugeOld__.valueLabel = null; - state.tmp.___gaugeOld__.minLabel = null; - state.tmp.___gaugeOld__.maxLabel = null; - state.tmp.units = units; - } - }; - state.legendShowUndefined = function() { - if(typeof state.tmp.gauge_instance !== 'undefined') - NETDATA.gaugeClearSelection(state); - }; - - return true; - }; - - // ---------------------------------------------------------------------------------------------------------------- - // Charts Libraries Registration - - NETDATA.chartLibraries = { - "dygraph": { - initialize: NETDATA.dygraphInitialize, - create: NETDATA.dygraphChartCreate, - update: NETDATA.dygraphChartUpdate, - resize: function(state) { - if(typeof state.tmp.dygraph_instance !== 'undefined' && typeof state.tmp.dygraph_instance.resize === 'function') - state.tmp.dygraph_instance.resize(); - }, - setSelection: NETDATA.dygraphSetSelection, - clearSelection: NETDATA.dygraphClearSelection, - toolboxPanAndZoom: NETDATA.dygraphToolboxPanAndZoom, - initialized: false, - enabled: true, - xssRegexIgnore: new RegExp('^/api/v1/data\.result.data$'), - format: function(state) { void(state); return 'json'; }, - options: function(state) { return 'ms|flip' + (this.isLogScale(state)?'|abs':'').toString(); }, - legend: function(state) { - return (this.isSparkline(state) === false && NETDATA.dataAttributeBoolean(state.element, 'legend', true) === true) ? 'right-side' : null; - }, - autoresize: function(state) { void(state); return true; }, - max_updates_to_recreate: function(state) { void(state); return 5000; }, - track_colors: function(state) { void(state); return true; }, - pixels_per_point: function(state) { - return (this.isSparkline(state) === false)?3:2; - }, - isSparkline: function(state) { - if(typeof state.tmp.dygraph_sparkline === 'undefined') { - state.tmp.dygraph_sparkline = (this.theme(state) === 'sparkline'); - } - return state.tmp.dygraph_sparkline; - }, - isLogScale: function(state) { - if(typeof state.tmp.dygraph_logscale === 'undefined') { - state.tmp.dygraph_logscale = (this.theme(state) === 'logscale'); - } - return state.tmp.dygraph_logscale; - }, - theme: function(state) { - if(typeof state.tmp.dygraph_theme === 'undefined') - state.tmp.dygraph_theme = NETDATA.dataAttribute(state.element, 'dygraph-theme', 'default'); - return state.tmp.dygraph_theme; - }, - container_class: function(state) { - if(this.legend(state) !== null) - return 'netdata-container-with-legend'; - return 'netdata-container'; - } - }, - "sparkline": { - initialize: NETDATA.sparklineInitialize, - create: NETDATA.sparklineChartCreate, - update: NETDATA.sparklineChartUpdate, - resize: null, - setSelection: undefined, // function(state, t) { void(state); return true; }, - clearSelection: undefined, // function(state) { void(state); return true; }, - toolboxPanAndZoom: null, - initialized: false, - enabled: true, - xssRegexIgnore: new RegExp('^/api/v1/data\.result$'), - format: function(state) { void(state); return 'array'; }, - options: function(state) { void(state); return 'flip|abs'; }, - legend: function(state) { void(state); return null; }, - autoresize: function(state) { void(state); return false; }, - max_updates_to_recreate: function(state) { void(state); return 5000; }, - track_colors: function(state) { void(state); return false; }, - pixels_per_point: function(state) { void(state); return 3; }, - container_class: function(state) { void(state); return 'netdata-container'; } - }, - "peity": { - initialize: NETDATA.peityInitialize, - create: NETDATA.peityChartCreate, - update: NETDATA.peityChartUpdate, - resize: null, - setSelection: undefined, // function(state, t) { void(state); return true; }, - clearSelection: undefined, // function(state) { void(state); return true; }, - toolboxPanAndZoom: null, - initialized: false, - enabled: true, - xssRegexIgnore: new RegExp('^/api/v1/data\.result$'), - format: function(state) { void(state); return 'ssvcomma'; }, - options: function(state) { void(state); return 'null2zero|flip|abs'; }, - legend: function(state) { void(state); return null; }, - autoresize: function(state) { void(state); return false; }, - max_updates_to_recreate: function(state) { void(state); return 5000; }, - track_colors: function(state) { void(state); return false; }, - pixels_per_point: function(state) { void(state); return 3; }, - container_class: function(state) { void(state); return 'netdata-container'; } - }, - "morris": { - initialize: NETDATA.morrisInitialize, - create: NETDATA.morrisChartCreate, - update: NETDATA.morrisChartUpdate, - resize: null, - setSelection: undefined, // function(state, t) { void(state); return true; }, - clearSelection: undefined, // function(state) { void(state); return true; }, - toolboxPanAndZoom: null, - initialized: false, - enabled: true, - xssRegexIgnore: new RegExp('^/api/v1/data\.result.data$'), - format: function(state) { void(state); return 'json'; }, - options: function(state) { void(state); return 'objectrows|ms'; }, - legend: function(state) { void(state); return null; }, - autoresize: function(state) { void(state); return false; }, - max_updates_to_recreate: function(state) { void(state); return 50; }, - track_colors: function(state) { void(state); return false; }, - pixels_per_point: function(state) { void(state); return 15; }, - container_class: function(state) { void(state); return 'netdata-container'; } - }, - "google": { - initialize: NETDATA.googleInitialize, - create: NETDATA.googleChartCreate, - update: NETDATA.googleChartUpdate, - resize: null, - setSelection: undefined, //function(state, t) { void(state); return true; }, - clearSelection: undefined, //function(state) { void(state); return true; }, - toolboxPanAndZoom: null, - initialized: false, - enabled: true, - xssRegexIgnore: new RegExp('^/api/v1/data\.result.rows$'), - format: function(state) { void(state); return 'datatable'; }, - options: function(state) { void(state); return ''; }, - legend: function(state) { void(state); return null; }, - autoresize: function(state) { void(state); return false; }, - max_updates_to_recreate: function(state) { void(state); return 300; }, - track_colors: function(state) { void(state); return false; }, - pixels_per_point: function(state) { void(state); return 4; }, - container_class: function(state) { void(state); return 'netdata-container'; } - }, - "raphael": { - initialize: NETDATA.raphaelInitialize, - create: NETDATA.raphaelChartCreate, - update: NETDATA.raphaelChartUpdate, - resize: null, - setSelection: undefined, // function(state, t) { void(state); return true; }, - clearSelection: undefined, // function(state) { void(state); return true; }, - toolboxPanAndZoom: null, - initialized: false, - enabled: true, - xssRegexIgnore: new RegExp('^/api/v1/data\.result.data$'), - format: function(state) { void(state); return 'json'; }, - options: function(state) { void(state); return ''; }, - legend: function(state) { void(state); return null; }, - autoresize: function(state) { void(state); return false; }, - max_updates_to_recreate: function(state) { void(state); return 5000; }, - track_colors: function(state) { void(state); return false; }, - pixels_per_point: function(state) { void(state); return 3; }, - container_class: function(state) { void(state); return 'netdata-container'; } - }, - "c3": { - initialize: NETDATA.c3Initialize, - create: NETDATA.c3ChartCreate, - update: NETDATA.c3ChartUpdate, - resize: null, - setSelection: undefined, // function(state, t) { void(state); return true; }, - clearSelection: undefined, // function(state) { void(state); return true; }, - toolboxPanAndZoom: null, - initialized: false, - enabled: true, - xssRegexIgnore: new RegExp('^/api/v1/data\.result$'), - format: function(state) { void(state); return 'csvjsonarray'; }, - options: function(state) { void(state); return 'milliseconds'; }, - legend: function(state) { void(state); return null; }, - autoresize: function(state) { void(state); return false; }, - max_updates_to_recreate: function(state) { void(state); return 5000; }, - track_colors: function(state) { void(state); return false; }, - pixels_per_point: function(state) { void(state); return 15; }, - container_class: function(state) { void(state); return 'netdata-container'; } - }, - "d3pie": { - initialize: NETDATA.d3pieInitialize, - create: NETDATA.d3pieChartCreate, - update: NETDATA.d3pieChartUpdate, - resize: null, - setSelection: NETDATA.d3pieSetSelection, - clearSelection: NETDATA.d3pieClearSelection, - toolboxPanAndZoom: null, - initialized: false, - enabled: true, - xssRegexIgnore: new RegExp('^/api/v1/data\.result.data$'), - format: function(state) { void(state); return 'json'; }, - options: function(state) { void(state); return 'objectrows|ms'; }, - legend: function(state) { void(state); return null; }, - autoresize: function(state) { void(state); return false; }, - max_updates_to_recreate: function(state) { void(state); return 5000; }, - track_colors: function(state) { void(state); return false; }, - pixels_per_point: function(state) { void(state); return 15; }, - container_class: function(state) { void(state); return 'netdata-container'; } - }, - "d3": { - initialize: NETDATA.d3Initialize, - create: NETDATA.d3ChartCreate, - update: NETDATA.d3ChartUpdate, - resize: null, - setSelection: undefined, // function(state, t) { void(state); return true; }, - clearSelection: undefined, // function(state) { void(state); return true; }, - toolboxPanAndZoom: null, - initialized: false, - enabled: true, - xssRegexIgnore: new RegExp('^/api/v1/data\.result.data$'), - format: function(state) { void(state); return 'json'; }, - options: function(state) { void(state); return ''; }, - legend: function(state) { void(state); return null; }, - autoresize: function(state) { void(state); return false; }, - max_updates_to_recreate: function(state) { void(state); return 5000; }, - track_colors: function(state) { void(state); return false; }, - pixels_per_point: function(state) { void(state); return 3; }, - container_class: function(state) { void(state); return 'netdata-container'; } - }, - "easypiechart": { - initialize: NETDATA.easypiechartInitialize, - create: NETDATA.easypiechartChartCreate, - update: NETDATA.easypiechartChartUpdate, - resize: null, - setSelection: NETDATA.easypiechartSetSelection, - clearSelection: NETDATA.easypiechartClearSelection, - toolboxPanAndZoom: null, - initialized: false, - enabled: true, - xssRegexIgnore: new RegExp('^/api/v1/data\.result$'), - format: function(state) { void(state); return 'array'; }, - options: function(state) { void(state); return 'absolute'; }, - legend: function(state) { void(state); return null; }, - autoresize: function(state) { void(state); return false; }, - max_updates_to_recreate: function(state) { void(state); return 5000; }, - track_colors: function(state) { void(state); return true; }, - pixels_per_point: function(state) { void(state); return 3; }, - aspect_ratio: 100, - container_class: function(state) { void(state); return 'netdata-container-easypiechart'; } - }, - "gauge": { - initialize: NETDATA.gaugeInitialize, - create: NETDATA.gaugeChartCreate, - update: NETDATA.gaugeChartUpdate, - resize: null, - setSelection: NETDATA.gaugeSetSelection, - clearSelection: NETDATA.gaugeClearSelection, - toolboxPanAndZoom: null, - initialized: false, - enabled: true, - xssRegexIgnore: new RegExp('^/api/v1/data\.result$'), - format: function(state) { void(state); return 'array'; }, - options: function(state) { void(state); return 'absolute'; }, - legend: function(state) { void(state); return null; }, - autoresize: function(state) { void(state); return false; }, - max_updates_to_recreate: function(state) { void(state); return 5000; }, - track_colors: function(state) { void(state); return true; }, - pixels_per_point: function(state) { void(state); return 3; }, - aspect_ratio: 60, - container_class: function(state) { void(state); return 'netdata-container-gauge'; } - } - }; - - NETDATA.registerChartLibrary = function(library, url) { - if(NETDATA.options.debug.libraries === true) - console.log("registering chart library: " + library); - - NETDATA.chartLibraries[library].url = url; - NETDATA.chartLibraries[library].initialized = true; - NETDATA.chartLibraries[library].enabled = true; - }; - - // ---------------------------------------------------------------------------------------------------------------- - // Load required JS libraries and CSS - - NETDATA.requiredJs = [ - { - url: NETDATA.serverStatic + 'lib/bootstrap-3.3.7.min.js', - async: false, - isAlreadyLoaded: function() { - // check if bootstrap is loaded - if(typeof $().emulateTransitionEnd === 'function') - return true; - else { - return (typeof netdataNoBootstrap !== 'undefined' && netdataNoBootstrap === true); - } - } - }, - { - url: NETDATA.serverStatic + 'lib/fontawesome-all-5.0.1.min.js', - async: true, - isAlreadyLoaded: function() { return false; } - }, - { - url: NETDATA.serverStatic + 'lib/perfect-scrollbar-0.6.15.min.js', - isAlreadyLoaded: function() { return false; } - } - ]; - - NETDATA.requiredCSS = [ - { - url: NETDATA.themes.current.bootstrap_css, - isAlreadyLoaded: function() { - return (typeof netdataNoBootstrap !== 'undefined' && netdataNoBootstrap === true); - } - }, - { - url: NETDATA.themes.current.dashboard_css, - isAlreadyLoaded: function() { return false; } - } - ]; - - NETDATA.loadedRequiredJs = 0; - NETDATA.loadRequiredJs = function(index, callback) { - if(index >= NETDATA.requiredJs.length) { - if(typeof callback === 'function') - return callback(); - return; - } - - if(NETDATA.requiredJs[index].isAlreadyLoaded()) { - NETDATA.loadedRequiredJs++; - NETDATA.loadRequiredJs(++index, callback); - return; - } - - if(NETDATA.options.debug.main_loop === true) - console.log('loading ' + NETDATA.requiredJs[index].url); - - var async = true; - if(typeof NETDATA.requiredJs[index].async !== 'undefined' && NETDATA.requiredJs[index].async === false) - async = false; - - $.ajax({ - url: NETDATA.requiredJs[index].url, - cache: true, - dataType: "script", - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function() { - if(NETDATA.options.debug.main_loop === true) - console.log('loaded ' + NETDATA.requiredJs[index].url); - }) - .fail(function() { - alert('Cannot load required JS library: ' + NETDATA.requiredJs[index].url); - }) - .always(function() { - NETDATA.loadedRequiredJs++; - - if(async === false) - NETDATA.loadRequiredJs(++index, callback); - }); - - if(async === true) - NETDATA.loadRequiredJs(++index, callback); - }; - - NETDATA.loadRequiredCSS = function(index) { - if(index >= NETDATA.requiredCSS.length) - return; - - if(NETDATA.requiredCSS[index].isAlreadyLoaded()) { - NETDATA.loadRequiredCSS(++index); - return; - } - - if(NETDATA.options.debug.main_loop === true) - console.log('loading ' + NETDATA.requiredCSS[index].url); - - NETDATA._loadCSS(NETDATA.requiredCSS[index].url); - NETDATA.loadRequiredCSS(++index); - }; - - - // ---------------------------------------------------------------------------------------------------------------- - // Registry of netdata hosts - - NETDATA.alarms = { - onclick: null, // the callback to handle the click - it will be called with the alarm log entry - chart_div_offset: -50, // give that space above the chart when scrolling to it - chart_div_id_prefix: 'chart_', // the chart DIV IDs have this prefix (they should be NETDATA.name2id(chart.id)) - chart_div_animation_duration: 0,// the duration of the animation while scrolling to a chart - - ms_penalty: 0, // the time penalty of the next alarm - ms_between_notifications: 500, // firefox moves the alarms off-screen (above, outside the top of the screen) - // if alarms are shown faster than: one per 500ms - - update_every: 10000, // the time in ms between alarm checks - - notifications: false, // when true, the browser supports notifications (may not be granted though) - last_notification_id: 0, // the id of the last alarm_log we have raised an alarm for - first_notification_id: 0, // the id of the first alarm_log entry for this session - // this is used to prevent CLEAR notifications for past events - // notifications_shown: [], - - server: null, // the server to connect to for fetching alarms - current: null, // the list of raised alarms - updated in the background - - // a callback function to call every time the list of raised alarms is refreshed - callback: (typeof netdataAlarmsActiveCallback === 'function')?netdataAlarmsActiveCallback:null, - - // a callback function to call every time a notification is shown - // the return value is used to decide if the notification will be shown - notificationCallback: (typeof netdataAlarmsNotifCallback === 'function')?netdataAlarmsNotifCallback:null, - - recipients: null, // the list (array) of recipients to show alarms for, or null - - recipientMatches: function(to_string, wanted_array) { - if(typeof wanted_array === 'undefined' || wanted_array === null || Array.isArray(wanted_array) === false) - return true; - - var r = ' ' + to_string.toString() + ' '; - var len = wanted_array.length; - while(len--) { - if(r.indexOf(' ' + wanted_array[len] + ' ') >= 0) - return true; - } - - return false; - }, - - activeForRecipients: function() { - var active = {}; - var data = NETDATA.alarms.current; - - if(typeof data === 'undefined' || data === null) - return active; - - for(var x in data.alarms) { - if(!data.alarms.hasOwnProperty(x)) continue; - - var alarm = data.alarms[x]; - if((alarm.status === 'WARNING' || alarm.status === 'CRITICAL') && NETDATA.alarms.recipientMatches(alarm.recipient, NETDATA.alarms.recipients)) - active[x] = alarm; - } - - return active; - }, - - notify: function(entry) { - // console.log('alarm ' + entry.unique_id); - - if(entry.updated === true) { - // console.log('alarm ' + entry.unique_id + ' has been updated by another alarm'); - return; - } - - var value_string = entry.value_string; - - if(NETDATA.alarms.current !== null) { - // get the current value_string - var t = NETDATA.alarms.current.alarms[entry.chart + '.' + entry.name]; - if(typeof t !== 'undefined' && entry.status === t.status && typeof t.value_string !== 'undefined') - value_string = t.value_string; - } - - var name = entry.name.replace(/_/g, ' '); - var status = entry.status.toLowerCase(); - var title = name + ' = ' + value_string.toString(); - var tag = entry.alarm_id; - var icon = 'images/seo-performance-128.png'; - var interaction = false; - var data = entry; - var show = true; - - // console.log('alarm ' + entry.unique_id + ' ' + entry.chart + '.' + entry.name + ' is ' + entry.status); - - switch(entry.status) { - case 'REMOVED': - show = false; - break; - - case 'UNDEFINED': - return; - - case 'UNINITIALIZED': - return; - - case 'CLEAR': - if(entry.unique_id < NETDATA.alarms.first_notification_id) { - // console.log('alarm ' + entry.unique_id + ' is not current'); - return; - } - if(entry.old_status === 'UNINITIALIZED' || entry.old_status === 'UNDEFINED') { - // console.log('alarm' + entry.unique_id + ' switch to CLEAR from ' + entry.old_status); - return; - } - if(entry.no_clear_notification === true) { - // console.log('alarm' + entry.unique_id + ' is CLEAR but has no_clear_notification flag'); - return; - } - title = name + ' back to normal (' + value_string.toString() + ')'; - icon = 'images/check-mark-2-128-green.png'; - interaction = false; - break; - - case 'WARNING': - if(entry.old_status === 'CRITICAL') - status = 'demoted to ' + entry.status.toLowerCase(); - - icon = 'images/alert-128-orange.png'; - interaction = false; - break; - - case 'CRITICAL': - if(entry.old_status === 'WARNING') - status = 'escalated to ' + entry.status.toLowerCase(); - - icon = 'images/alert-128-red.png'; - interaction = true; - break; - - default: - console.log('invalid alarm status ' + entry.status); - return; - } - - // filter recipients - if(show === true) - show = NETDATA.alarms.recipientMatches(entry.recipient, NETDATA.alarms.recipients); - - /* - // cleanup old notifications with the same alarm_id as this one - // FIXME: it does not seem to work on any web browser! - var len = NETDATA.alarms.notifications_shown.length; - while(len--) { - var n = NETDATA.alarms.notifications_shown[len]; - if(n.data.alarm_id === entry.alarm_id) { - console.log('removing old alarm ' + n.data.unique_id); - - // close the notification - n.close.bind(n); - - // remove it from the array - NETDATA.alarms.notifications_shown.splice(len, 1); - len = NETDATA.alarms.notifications_shown.length; - } - } - */ - - if(show === true) { - if(typeof NETDATA.alarms.notificationCallback === 'function') - show = NETDATA.alarms.notificationCallback(entry); - - if(show === true) { - setTimeout(function() { - // show this notification - // console.log('new notification: ' + title); - var n = new Notification(title, { - body: entry.hostname + ' - ' + entry.chart + ' (' + entry.family + ') - ' + status + ': ' + entry.info, - tag: tag, - requireInteraction: interaction, - icon: NETDATA.serverStatic + icon, - data: data - }); - - n.onclick = function(event) { - event.preventDefault(); - NETDATA.alarms.onclick(event.target.data); - }; - - // console.log(n); - // NETDATA.alarms.notifications_shown.push(n); - // console.log(entry); - }, NETDATA.alarms.ms_penalty); - - NETDATA.alarms.ms_penalty += NETDATA.alarms.ms_between_notifications; - } - } - }, - - scrollToChart: function(chart_id) { - if(typeof chart_id === 'string') { - var offset = $('#' + NETDATA.alarms.chart_div_id_prefix + NETDATA.name2id(chart_id)).offset(); - if(typeof offset !== 'undefined') { - $('html, body').animate({ scrollTop: offset.top + NETDATA.alarms.chart_div_offset }, NETDATA.alarms.chart_div_animation_duration); - return true; - } - } - return false; - }, - - scrollToAlarm: function(alarm) { - if(typeof alarm === 'object') { - var ret = NETDATA.alarms.scrollToChart(alarm.chart); - - if(ret === true && NETDATA.options.page_is_visible === false) - window.focus(); - // alert('netdata dashboard will now scroll to chart: ' + alarm.chart + '\n\nThis alarm opened to bring the browser window in front of the screen. Click on the dashboard to prevent it from appearing again.'); - } - - }, - - notifyAll: function() { - // console.log('FETCHING ALARM LOG'); - NETDATA.alarms.get_log(NETDATA.alarms.last_notification_id, function(data) { - // console.log('ALARM LOG FETCHED'); - - if(data === null || typeof data !== 'object') { - console.log('invalid alarms log response'); - return; - } - - if(data.length === 0) { - console.log('received empty alarm log'); - return; - } - - // console.log('received alarm log of ' + data.length + ' entries, from ' + data[data.length - 1].unique_id.toString() + ' to ' + data[0].unique_id.toString()); - - data.sort(function(a, b) { - if(a.unique_id > b.unique_id) return -1; - if(a.unique_id < b.unique_id) return 1; - return 0; - }); - - NETDATA.alarms.ms_penalty = 0; - - var len = data.length; - while(len--) { - if(data[len].unique_id > NETDATA.alarms.last_notification_id) { - NETDATA.alarms.notify(data[len]); - } - //else - // console.log('ignoring alarm (older) with id ' + data[len].unique_id.toString()); - } - - NETDATA.alarms.last_notification_id = data[0].unique_id; - - if(typeof netdataAlarmsRemember === 'undefined' || netdataAlarmsRemember === true) - NETDATA.localStorageSet('last_notification_id', NETDATA.alarms.last_notification_id, null); - // console.log('last notification id = ' + NETDATA.alarms.last_notification_id); - }) - }, - - check_notifications: function() { - // returns true if we should fire 1+ notifications - - if(NETDATA.alarms.notifications !== true) { - // console.log('web notifications are not available'); - return false; - } - - if(Notification.permission !== 'granted') { - // console.log('web notifications are not granted'); - return false; - } - - if(typeof NETDATA.alarms.current !== 'undefined' && typeof NETDATA.alarms.current.alarms === 'object') { - // console.log('can do alarms: old id = ' + NETDATA.alarms.last_notification_id + ' new id = ' + NETDATA.alarms.current.latest_alarm_log_unique_id); - - if(NETDATA.alarms.current.latest_alarm_log_unique_id > NETDATA.alarms.last_notification_id) { - // console.log('new alarms detected'); - return true; - } - //else console.log('no new alarms'); - } - // else console.log('cannot process alarms'); - - return false; - }, - - get: function(what, callback) { - $.ajax({ - url: NETDATA.alarms.server + '/api/v1/alarms?' + what.toString(), - async: true, - cache: false, - headers: { - 'Cache-Control': 'no-cache, no-store', - 'Pragma': 'no-cache' - }, - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function(data) { - data = NETDATA.xss.checkOptional('/api/v1/alarms', data /*, '.*\.(calc|calc_parsed|warn|warn_parsed|crit|crit_parsed)$' */); - - if(NETDATA.alarms.first_notification_id === 0 && typeof data.latest_alarm_log_unique_id === 'number') - NETDATA.alarms.first_notification_id = data.latest_alarm_log_unique_id; - - if(typeof callback === 'function') - return callback(data); - }) - .fail(function() { - NETDATA.error(415, NETDATA.alarms.server); - - if(typeof callback === 'function') - return callback(null); - }); - }, - - update_forever: function() { - if(netdataShowAlarms !== true || netdataSnapshotData !== null) - return; - - NETDATA.alarms.get('active', function(data) { - if(data !== null) { - NETDATA.alarms.current = data; - - if(NETDATA.alarms.check_notifications() === true) { - NETDATA.alarms.notifyAll(); - } - - if (typeof NETDATA.alarms.callback === 'function') { - NETDATA.alarms.callback(data); - } - - // Health monitoring is disabled on this netdata - if(data.status === false) return; - } - - setTimeout(NETDATA.alarms.update_forever, NETDATA.alarms.update_every); - }); - }, - - get_log: function(last_id, callback) { - // console.log('fetching all log after ' + last_id.toString()); - $.ajax({ - url: NETDATA.alarms.server + '/api/v1/alarm_log?after=' + last_id.toString(), - async: true, - cache: false, - headers: { - 'Cache-Control': 'no-cache, no-store', - 'Pragma': 'no-cache' - }, - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function(data) { - data = NETDATA.xss.checkOptional('/api/v1/alarm_log', data); - - if(typeof callback === 'function') - return callback(data); - }) - .fail(function() { - NETDATA.error(416, NETDATA.alarms.server); - - if(typeof callback === 'function') - return callback(null); - }); - }, - - init: function() { - NETDATA.alarms.server = NETDATA.fixHost(NETDATA.serverDefault); - - if(typeof netdataAlarmsRemember === 'undefined' || netdataAlarmsRemember === true) { - NETDATA.alarms.last_notification_id = - NETDATA.localStorageGet('last_notification_id', NETDATA.alarms.last_notification_id, null); - } - - if(NETDATA.alarms.onclick === null) - NETDATA.alarms.onclick = NETDATA.alarms.scrollToAlarm; - - if(typeof netdataAlarmsRecipients !== 'undefined' && Array.isArray(netdataAlarmsRecipients)) - NETDATA.alarms.recipients = netdataAlarmsRecipients; - - if(netdataShowAlarms === true) { - NETDATA.alarms.update_forever(); - - if('Notification' in window) { - // console.log('notifications available'); - NETDATA.alarms.notifications = true; - - if(Notification.permission === 'default') - Notification.requestPermission(); - } - } - } - }; - - // ---------------------------------------------------------------------------------------------------------------- - // Registry of netdata hosts - - NETDATA.registry = { - server: null, // the netdata registry server - person_guid: null, // the unique ID of this browser / user - machine_guid: null, // the unique ID the netdata server that served dashboard.js - hostname: 'unknown', // the hostname of the netdata server that served dashboard.js - machines: null, // the user's other URLs - machines_array: null, // the user's other URLs in an array - person_urls: null, - - parsePersonUrls: function(person_urls) { - // console.log(person_urls); - NETDATA.registry.person_urls = person_urls; - - if(person_urls) { - NETDATA.registry.machines = {}; - NETDATA.registry.machines_array = []; - - var apu = person_urls; - var i = apu.length; - while(i--) { - if(typeof NETDATA.registry.machines[apu[i][0]] === 'undefined') { - // console.log('adding: ' + apu[i][4] + ', ' + ((now - apu[i][2]) / 1000).toString()); - - var obj = { - guid: apu[i][0], - url: apu[i][1], - last_t: apu[i][2], - accesses: apu[i][3], - name: apu[i][4], - alternate_urls: [] - }; - obj.alternate_urls.push(apu[i][1]); - - NETDATA.registry.machines[apu[i][0]] = obj; - NETDATA.registry.machines_array.push(obj); - } - else { - // console.log('appending: ' + apu[i][4] + ', ' + ((now - apu[i][2]) / 1000).toString()); - - var pu = NETDATA.registry.machines[apu[i][0]]; - if(pu.last_t < apu[i][2]) { - pu.url = apu[i][1]; - pu.last_t = apu[i][2]; - pu.name = apu[i][4]; - } - pu.accesses += apu[i][3]; - pu.alternate_urls.push(apu[i][1]); - } - } - } - - if(typeof netdataRegistryCallback === 'function') - netdataRegistryCallback(NETDATA.registry.machines_array); - }, - - init: function() { - if(netdataRegistry !== true) return; - - NETDATA.registry.hello(NETDATA.serverDefault, function(data) { - if(data) { - NETDATA.registry.server = data.registry; - NETDATA.registry.machine_guid = data.machine_guid; - NETDATA.registry.hostname = data.hostname; - - NETDATA.registry.access(2, function (person_urls) { - NETDATA.registry.parsePersonUrls(person_urls); - - }); - } - }); - }, - - hello: function(host, callback) { - host = NETDATA.fixHost(host); - - // send HELLO to a netdata server: - // 1. verifies the server is reachable - // 2. responds with the registry URL, the machine GUID of this netdata server and its hostname - $.ajax({ - url: host + '/api/v1/registry?action=hello', - async: true, - cache: false, - headers: { - 'Cache-Control': 'no-cache, no-store', - 'Pragma': 'no-cache' - }, - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function(data) { - data = NETDATA.xss.checkOptional('/api/v1/registry?action=hello', data); - - if(typeof data.status !== 'string' || data.status !== 'ok') { - NETDATA.error(408, host + ' response: ' + JSON.stringify(data)); - data = null; - } - - if(typeof callback === 'function') - return callback(data); - }) - .fail(function() { - NETDATA.error(407, host); - - if(typeof callback === 'function') - return callback(null); - }); - }, - - access: function(max_redirects, callback) { - // send ACCESS to a netdata registry: - // 1. it lets it know we are accessing a netdata server (its machine GUID and its URL) - // 2. it responds with a list of netdata servers we know - // the registry identifies us using a cookie it sets the first time we access it - // the registry may respond with a redirect URL to send us to another registry - $.ajax({ - url: NETDATA.registry.server + '/api/v1/registry?action=access&machine=' + NETDATA.registry.machine_guid + '&name=' + encodeURIComponent(NETDATA.registry.hostname) + '&url=' + encodeURIComponent(NETDATA.serverDefault), // + '&visible_url=' + encodeURIComponent(document.location), - async: true, - cache: false, - headers: { - 'Cache-Control': 'no-cache, no-store', - 'Pragma': 'no-cache' - }, - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function(data) { - data = NETDATA.xss.checkAlways('/api/v1/registry?action=access', data); - - var redirect = null; - if(typeof data.registry === 'string') - redirect = data.registry; - - if(typeof data.status !== 'string' || data.status !== 'ok') { - NETDATA.error(409, NETDATA.registry.server + ' responded with: ' + JSON.stringify(data)); - data = null; - } - - if(data === null) { - if(redirect !== null && max_redirects > 0) { - NETDATA.registry.server = redirect; - NETDATA.registry.access(max_redirects - 1, callback); - } - else { - if(typeof callback === 'function') - return callback(null); - } - } - else { - if(typeof data.person_guid === 'string') - NETDATA.registry.person_guid = data.person_guid; - - if(typeof callback === 'function') - return callback(data.urls); - } - }) - .fail(function() { - NETDATA.error(410, NETDATA.registry.server); - - if(typeof callback === 'function') - return callback(null); - }); - }, - - delete: function(delete_url, callback) { - // send DELETE to a netdata registry: - $.ajax({ - url: NETDATA.registry.server + '/api/v1/registry?action=delete&machine=' + NETDATA.registry.machine_guid + '&name=' + encodeURIComponent(NETDATA.registry.hostname) + '&url=' + encodeURIComponent(NETDATA.serverDefault) + '&delete_url=' + encodeURIComponent(delete_url), - async: true, - cache: false, - headers: { - 'Cache-Control': 'no-cache, no-store', - 'Pragma': 'no-cache' - }, - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function(data) { - data = NETDATA.xss.checkAlways('/api/v1/registry?action=delete', data); - - if(typeof data.status !== 'string' || data.status !== 'ok') { - NETDATA.error(411, NETDATA.registry.server + ' responded with: ' + JSON.stringify(data)); - data = null; - } - - if(typeof callback === 'function') - return callback(data); - }) - .fail(function() { - NETDATA.error(412, NETDATA.registry.server); - - if(typeof callback === 'function') - return callback(null); - }); - }, - - search: function(machine_guid, callback) { - // SEARCH for the URLs of a machine: - $.ajax({ - url: NETDATA.registry.server + '/api/v1/registry?action=search&machine=' + NETDATA.registry.machine_guid + '&name=' + encodeURIComponent(NETDATA.registry.hostname) + '&url=' + encodeURIComponent(NETDATA.serverDefault) + '&for=' + machine_guid, - async: true, - cache: false, - headers: { - 'Cache-Control': 'no-cache, no-store', - 'Pragma': 'no-cache' - }, - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function(data) { - data = NETDATA.xss.checkAlways('/api/v1/registry?action=search', data); - - if(typeof data.status !== 'string' || data.status !== 'ok') { - NETDATA.error(417, NETDATA.registry.server + ' responded with: ' + JSON.stringify(data)); - data = null; - } - - if(typeof callback === 'function') - return callback(data); - }) - .fail(function() { - NETDATA.error(418, NETDATA.registry.server); - - if(typeof callback === 'function') - return callback(null); - }); - }, - - switch: function(new_person_guid, callback) { - // impersonate - $.ajax({ - url: NETDATA.registry.server + '/api/v1/registry?action=switch&machine=' + NETDATA.registry.machine_guid + '&name=' + encodeURIComponent(NETDATA.registry.hostname) + '&url=' + encodeURIComponent(NETDATA.serverDefault) + '&to=' + new_person_guid, - async: true, - cache: false, - headers: { - 'Cache-Control': 'no-cache, no-store', - 'Pragma': 'no-cache' - }, - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function(data) { - data = NETDATA.xss.checkAlways('/api/v1/registry?action=switch', data); - - if(typeof data.status !== 'string' || data.status !== 'ok') { - NETDATA.error(413, NETDATA.registry.server + ' responded with: ' + JSON.stringify(data)); - data = null; - } - - if(typeof callback === 'function') - return callback(data); - }) - .fail(function() { - NETDATA.error(414, NETDATA.registry.server); - - if(typeof callback === 'function') - return callback(null); - }); - } - }; - - // ---------------------------------------------------------------------------------------------------------------- - // Boot it! - - if(typeof netdataPrepCallback === 'function') - netdataPrepCallback(); - - NETDATA.errorReset(); - NETDATA.loadRequiredCSS(0); - - NETDATA._loadjQuery(function() { - NETDATA.loadRequiredJs(0, function() { - if(typeof $().emulateTransitionEnd !== 'function') { - // bootstrap is not available - NETDATA.options.current.show_help = false; - } - - if(typeof netdataDontStart === 'undefined' || !netdataDontStart) { - if(NETDATA.options.debug.main_loop === true) - console.log('starting chart refresh thread'); - - NETDATA.start(); - } - }); - }); -})(window, document, (typeof jQuery === 'function')?jQuery:undefined); diff --git a/web/dashboard.slate.css b/web/dashboard.slate.css deleted file mode 100644 index 9b1d50cd5..000000000 --- a/web/dashboard.slate.css +++ /dev/null @@ -1,756 +0,0 @@ -html, -body { - /*font-family: Calibri,"Segoe UI","Helvetica Neue",Helvetica,Arial,sans-serif;*/ - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-style: normal; - font-variant: normal; - color: #878b90; -} - -/* fixes for default slate theme */ -code { - color: #bbb; /*#c7254e;*/ - background-color: #555; /* #f9f2f4; */ -} - -.dashboard-sidebar .nav > .active > a, -.dashboard-sidebar .nav > .active:hover > a, -.dashboard-sidebar .nav > .active:focus > a { - color: #765d9c; - border-left: 2px solid #765d9c; -} - -.morelink { - color: #765d9c; - text-decoration: none; -} - -.morelink:hover { - color: #563d7c; - text-decoration: none; -} - -.morelink:focus { - color: #765d9c; - text-decoration: none; -} - -.netdata-chart-alignment { - margin-left: 55px; -} - -.netdata-chart-row { - width: 100%; - text-align: center; - display: flex; - display: -webkit-flex; - display: -moz-flex; - align-items: flex-end; - -moz-align-items: flex-end; - -webkit-align-items: flex-end; - justify-content: center; - -moz--webkit-justify-content: center; - -moz-justify-content: center; - padding-top: 10px; -} - -.netdata-container { - display: inline-block; - overflow: hidden; - - transform: translate3d(0,0,0); - - /* required for child elements to have absolute position */ - position: relative; - - /* width and height is given per chart with data-width and data-height */ -} - -.netdata-container-gauge { - display: inline-block; - overflow: hidden; - - transform: translate3d(0,0,0); - - /* required for child elements to have absolute position */ - position: relative; - - /* width and height is given per chart with data-width and data-height */ -} - -.netdata-container-gauge:after { - padding-top: 60%; - display: block; - content: ''; -} - -.netdata-container-easypiechart { - display: inline-block; - overflow: hidden; - - transform: translate3d(0,0,0); - - /* required for child elements to have absolute position */ - position: relative; - - /* width and height is given per chart with data-width and data-height */ -} - -.netdata-container-easypiechart:after { - padding-top: 100%; - display: block; - content: ''; -} - -.netdata-aspect { - position: relative; - width: 100%; - padding: 0px; - margin: 0px; -} - -.netdata-container-with-legend { - display: inline-block; - overflow: hidden; - - transform: translate3d(0,0,0); - - /* fix minimum scrollbar issue in firefox */ - min-height: 99px; - - /* required for child elements to have absolute position */ - position: relative; - - /* width and height is given per chart with data-width and data-height */ -} - -.netdata-legend-resize-handler { - display: block; - position: absolute; - bottom: 0px; - right: 0px; - height: 15px; - width: 20px; - background-color: #272b30; - font-size: 15px; - vertical-align: middle; - line-height: 15px; - cursor: ns-resize; - color: #373b40; - text-align: center; - overflow: hidden; - z-index: 20; - padding: 0px; - margin: 0px; -} - -.netdata-legend-toolbox { - display: block; - position: absolute; - bottom: 0px; - right: 30px; - height: 15px; - width: 110px; - background-color: #272b30; - font-size: 12px; - vertical-align: middle; - line-height: 15px; - color: #373b40; - text-align: center; - overflow: hidden; - z-index: 20; - padding: 0px; - margin: 0px; - - /* prevent text selection after double click */ - -webkit-user-select: none; /* webkit (safari, chrome) browsers */ - -moz-user-select: none; /* mozilla browsers */ - -khtml-user-select: none; /* webkit (konqueror) browsers */ - -ms-user-select: none; /* IE10+ */ -} - -.netdata-legend-toolbox-button { - display: inline-block; - position: relative; - height: 15px; - width: 18px; - background-color: #272b30; - font-size: 12px; - vertical-align: middle; - line-height: 15px; - color: #474b50; - text-align: center; - overflow: hidden; - z-index: 21; - padding: 0px; - margin: 0px; - cursor: pointer; - - /* prevent text selection after double click */ - -webkit-user-select: none; /* webkit (safari, chrome) browsers */ - -moz-user-select: none; /* mozilla browsers */ - -khtml-user-select: none; /* webkit (konqueror) browsers */ - -ms-user-select: none; /* IE10+ */ -} - -.netdata-message { - display: inline-block; - position: absolute; - top: 0; - left: 0; - right: 0; - bottom: 0; - text-align: left; - vertical-align: top; - font-weight: bold; - font-size: x-small; - overflow: hidden; - background: inherit; - z-index: 0; -} - -.netdata-message.hidden { - display: none; -} - -.netdata-message.icon { - color: #2f3338; - text-align: center; - vertical-align: middle; -} - -.netdata-chart-legend { - position: absolute; /* within .netdata-container */ - top: 0; - right: 0; - overflow: hidden; - text-overflow: ellipsis; - line-height: 14px; - display: block; - width: 140px; /* --legend-width */ - height: calc(100% - 15px); /* 10px for the resize handler and 5px for the top margin */ - font-size: 10px; - margin-top: 5px; - text-align: left; - /* width and height is calculated (depends on the appearance of the legend) */ -} - -.netdata-legend-title-date { - font-size: 10px; - font-weight: normal; - margin-top: 0px; - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; -} - -.netdata-legend-title-time { - font-size: 11px; - font-weight: bold; - margin-top: 0px; - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; -} - -.netdata-legend-title-units { - position: absolute; - right: 10px; - float: right; - font-size: 11px; - vertical-align: top; - font-weight: normal; - margin-top: 0px; - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; -} - -.netdata-legend-series { - position: absolute; - width: 140px; /* legend-width */ - height: calc(100% - 50px); - overflow: hidden; - text-overflow: ellipsis; - line-height: 14.5px; /* line spacing at the legend */ - display: block; - font-size: 10px; - margin-top: 0px; -} - -.netdata-legend-name-table-line { - display: inline-block; - width: 13px; - height: 4px; - border-width: 0px; - border-bottom-width: 2px; - border-bottom-style: solid; - border-bottom-color: #272b30; -} - -.netdata-legend-name-table-area { - display: inline-block; - width: 13px; - height: 5px; - border-width: 1px; - border-top-width: 1px; - border-top-style: solid; - border-top-color: inherit; -} - -.netdata-legend-name-table-stacked { - display: inline-block; - width: 13px; - height: 5px; - border-width: 1px; - border-top-width: 1px; - border-top-style: solid; - border-top-color: inherit; -} - -.netdata-legend-name-tr { -} - -.netdata-legend-name-td { -} - -.netdata-legend-name { - text-align: left; - font-size: 11px; /* legend: dimension name size */ - font-weight: bold; - vertical-align: bottom; - margin-top: 0px; - z-index: 9; - padding: 0px; - width: 80px !important; - max-width: 80px !important; - text-overflow: ellipsis; - overflow: hidden; - white-space: nowrap; - display: inline-block; - cursor: pointer; - -webkit-print-color-adjust: exact; -} - -.netdata-legend-value { - /*margin-left: 14px;*/ - position: absolute; - right: 10px; - float: right; - text-align: right; - font-size: 11px; /* legend: dimension value size */ - font-weight: bold; - vertical-align: bottom; - background-color: #272b30; - margin-top: 0px; - z-index: 10; - padding: 0px; - padding-left: 15px; - cursor: pointer; - /* -webkit-font-smoothing: none; */ -} - -.netdata-legend-name.not-selected { - font-weight: normal; - opacity: 0.3; -} - -.netdata-chart { - position: absolute; /* within .netdata-container */ - top: 0; /* within .netdata-container */ - left: 0; /* within .netdata-container */ - display: inline-block; - overflow: hidden; - width: 100%; - height: 100%; - z-index: 5; - - /* width and height is calculated (depends on the appearance of the legend) */ -} - -.netdata-chart-with-legend-right { - position: absolute; /* within .netdata-container */ - top: 0; /* within .netdata-container */ - left: 0; /* within .netdata-container */ - display: block; - overflow: hidden; - margin-right: 140px; /* --legend-width */ - width: calc(100% - 140px); /* --legend-width */ - height: 100%; - z-index: 5; - flex-grow: 1; - - /* width and height is calculated (depends on the appearance of the legend) */ -} - -.netdata-peity-chart { - -} - -.netdata-sparkline-chart { - -} - -.netdata-dygraph-chart { - -} - -.netdata-morris-chart { - -} - -.netdata-google-chart { - -} - -.dygraph-ylabel { -} - -.dygraph-axis-label-x { - overflow-x: hidden; -} - -.dygraph-axis-label { - color: #6c7075; -} - -.dygraph-label-rotate-left { - text-align: center; - /* See http://caniuse.com/#feat=transforms2d */ - transform: rotate(90deg); - -webkit-transform: rotate(90deg); - -moz-transform: rotate(90deg); - -o-transform: rotate(90deg); - -ms-transform: rotate(90deg); -} - -/* For y2-axis label */ -.dygraph-label-rotate-right { - text-align: center; - /* See http://caniuse.com/#feat=transforms2d */ - transform: rotate(-90deg); - -webkit-transform: rotate(-90deg); - -moz-transform: rotate(-90deg); - -o-transform: rotate(-90deg); - -ms-transform: rotate(-90deg); -} - -.dygraph-title { - text-indent: 56px; - text-align: left; - position: absolute; - left: 0px; - top: 4px; - font-size: 11px; - font-weight: bold; - text-overflow: ellipsis; - overflow: hidden; - white-space: nowrap; -} - -/* fix for sparkline tooltip under bootstrap */ -.jqstooltip { - width: auto !important; - height: auto !important; -} - -.easyPieChart { - position: relative; - text-align: center; -} - -.easyPieChart canvas { - position: absolute; - top: 0; - left: 0; -} - -.easyPieChartLabel { - display: inline-block; - position: absolute; - float: left; - left: 0; - width: 100%; - text-align: center; - color: #BBB; - font-weight: normal; - text-shadow: #272b30 0px 0px 1px; - /* -webkit-font-smoothing: none; */ -} - -.easyPieChartTitle { - display: inline-block; - position: absolute; - float: left; - left: 0; - width: 64%; - margin-left: 18% !important; - text-align: center; - color: #676b70; - font-weight: bold; -} - -.easyPieChartUnits { - display: inline-block; - position: absolute; - float: left; - left: 0; - width: 60%; - margin-left: 20% !important; - text-align: center; - color: #676b70; - font-weight: normal; -} - -.gaugeChart { - position: relative; - text-align: center; -} - -.gaugeChart canvas { - position: absolute; - top: 0; - left: 0; - bottom: 0; - right: 0; - z-index: 0; -} - -.gaugeChartLabel { - display: inline-block; - position: absolute; - float: left; - left: 0; - width: 100%; - text-align: center; - color: #BBB; - font-weight: bold; - z-index: 1; - text-shadow: #272b30 0px 0px 1px; - /* text-shadow: #CCC 1px 1px 0px, #CCC -1px -1px 0px, #CCC 1px -1px 0px, #CCC -1px 1px 0px; */ - /* -webkit-text-stroke: 1px #777; */ - /* -webkit-font-smoothing: none; */ -} - -.gaugeChartTitle { - display: inline-block; - position: absolute; - float: left; - left: 0; - width: 100%; - text-align: center; - color: #676b70; - font-weight: bold; -} - -.gaugeChartUnits { - display: inline-block; - position: absolute; - float: left; - left: 0; - bottom: 0; - width: 100%; - text-align: left; - margin-left: 5%; - color: #676b70; - font-weight: normal; -} - -.gaugeChartMin { - display: inline-block; - position: absolute; - float: left; - left: 0; - bottom: 8%; - width: 92%; - margin-left: 8%; - text-align: left; - color: #676b70; - font-weight: normal; -} - -.gaugeChartMax { - display: inline-block; - position: absolute; - float: left; - left: 0; - bottom: 8%; - width: 95%; - margin-right: 5%; - text-align: right; - color: #676b70; - font-weight: normal; -} - -.popover-title { - font-weight: bold; - font-size: 12px; -} - -.popover-content { - font-size: 11px; -} - -/* ---------------------------------------------------------------------------- - perfect-scrollbar settings - */ - -.ps-container { - -ms-touch-action: auto; - touch-action: auto; - overflow: hidden !important; - -ms-overflow-style: none; -} - -@supports (-ms-overflow-style: none) { - .ps-container { - overflow: auto !important; - } -} - -@media screen and (-ms-high-contrast: active), (-ms-high-contrast: none) { - .ps-container { - overflow: auto !important; - } -} - -.ps-container.ps-active-x > .ps-scrollbar-x-rail, -.ps-container.ps-active-y > .ps-scrollbar-y-rail { - display: block; - background-color: transparent; -} - -.ps-container.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail { - background-color: transparent; /* background color when dragged away */ - opacity: 0.9; -} - -.ps-container.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail > .ps-scrollbar-x { - background-color: #aaa; /* scrollbar color when dragged away */ - height: 5px; -} - -.ps-container.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail { - background-color: transparent; /* background color when dragged away */ - opacity: 0.9; -} - -.ps-container.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail > .ps-scrollbar-y { - background-color: #aaa; /* scrollbar color when dragged away */ - width: 5px; -} - -.ps-container > .ps-scrollbar-x-rail { - display: none; - position: absolute; - /* please don't change 'position' */ - opacity: 0.2; /* the opacity when not on hover of the content */ - -webkit-transition: background-color .2s linear, opacity .2s linear; - -o-transition: background-color .2s linear, opacity .2s linear; - -moz-transition: background-color .2s linear, opacity .2s linear; - transition: background-color .2s linear, opacity .2s linear; - bottom: 0px; - /* there must be 'bottom' for ps-scrollbar-x-rail */ - height: 15px; -} - -.ps-container > .ps-scrollbar-x-rail > .ps-scrollbar-x { - position: absolute; - /* please don't change 'position' */ - background-color: #666; /* #aaa; the color on content hover */ - -webkit-border-radius: 6px; - -moz-border-radius: 6px; - border-radius: 6px; - -webkit-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out; - transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out; - -o-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out; - -moz-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out; - transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out; - transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -webkit-border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out; - bottom: 2px; - /* there must be 'bottom' for ps-scrollbar-x */ - height: 5px; /* the width of the scrollbar */ -} - -.ps-container > .ps-scrollbar-x-rail:hover > .ps-scrollbar-x, .ps-container > .ps-scrollbar-x-rail:active > .ps-scrollbar-x { - height: 5px; -} - -.ps-container > .ps-scrollbar-y-rail { - display: none; - position: absolute; - /* please don't change 'position' */ - opacity: 0.2; /* the opacity when not on hover of the content */ - -webkit-transition: background-color .2s linear, opacity .2s linear; - -o-transition: background-color .2s linear, opacity .2s linear; - -moz-transition: background-color .2s linear, opacity .2s linear; - transition: background-color .2s linear, opacity .2s linear; - right: 0; - /* there must be 'right' for ps-scrollbar-y-rail */ - width: 15px; -} - -.ps-container > .ps-scrollbar-y-rail > .ps-scrollbar-y { - position: absolute; - /* please don't change 'position' */ - background-color: #666; /* #aaa; the color on content hover */ - -webkit-border-radius: 6px; - -moz-border-radius: 6px; - border-radius: 6px; - -webkit-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out; - transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out; - -o-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out; - -moz-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out; - transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out; - transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -webkit-border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out; - right: 2px; - /* there must be 'right' for ps-scrollbar-y */ - width: 5px; /* the width of the scrollbar */ -} - -.ps-container > .ps-scrollbar-y-rail:hover > .ps-scrollbar-y, .ps-container > .ps-scrollbar-y-rail:active > .ps-scrollbar-y { - width: 5px; -} - -.ps-container:hover.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail { - background-color: transparent; /* background color when dragged */ - opacity: 0.9; -} - -.ps-container:hover.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail > .ps-scrollbar-x { - background-color: #bbb; /* scrollbar color when dragged */ - height: 5px; -} - -.ps-container:hover.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail { - background-color: transparent; /* background color when dragged */ - opacity: 0.9; -} - -.ps-container:hover.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail > .ps-scrollbar-y { - background-color: #bbb; /* scrollbar color when dragged */ - width: 5px; -} - -.ps-container:hover > .ps-scrollbar-x-rail, -.ps-container:hover > .ps-scrollbar-y-rail { - opacity: 0.6; -} - -.ps-container:hover > .ps-scrollbar-x-rail:hover { - background-color: transparent; /* the background color on hover of the scrollbar */ - opacity: 0.9; -} - -.ps-container:hover > .ps-scrollbar-x-rail:hover > .ps-scrollbar-x { - background-color: #999; /* scrollbar color on hover */ -} - -.ps-container:hover > .ps-scrollbar-y-rail:hover { - background-color: transparent; /* the background color on hover of the scrollbar */ - opacity: 0.9; -} - -.ps-container:hover > .ps-scrollbar-y-rail:hover > .ps-scrollbar-y { - background-color: #999; /* scrollbar color on hover */ -} diff --git a/web/dashboard_info.js b/web/dashboard_info.js deleted file mode 100644 index 55d454e03..000000000 --- a/web/dashboard_info.js +++ /dev/null @@ -1,2070 +0,0 @@ - -var netdataDashboard = window.netdataDashboard || {}; - -// ---------------------------------------------------------------------------- -// menus - -// information about the main menus - -netdataDashboard.menu = { - 'system': { - title: 'System Overview', - icon: '', - info: 'Overview of the key system metrics.' - }, - - 'services': { - title: 'systemd Services', - icon: '', - info: 'Resources utilization of systemd services. netdata monitors all systemd services via cgroups (the resources accounting used by containers). ' - }, - - 'ap': { - title: 'Access Points', - icon: '', - info: 'Performance metrics for the access points (i.e. wireless interfaces in AP mode) found on the system.' - }, - - 'tc': { - title: 'Quality of Service', - icon: '', - info: 'Netdata collects and visualizes tc class utilization using its tc-helper plugin. If you also use FireQOS for setting up QoS, netdata automatically collects interface and class names. If your QoS configuration includes overheads calculation, the values shown here will include these overheads (the total bandwidth for the same interface as reported in the Network Interfaces section, will be lower than the total bandwidth reported here). QoS data collection may have a slight time difference compared to the interface (QoS data collection uses a BASH script, so a shift in data collection of a few milliseconds should be justified).' - }, - - 'net': { - title: 'Network Interfaces', - icon: '', - info: 'Performance metrics for network interfaces.' - }, - - 'ipv4': { - title: 'IPv4 Networking', - icon: '', - info: 'Metrics for the IPv4 stack of the system. Internet Protocol version 4 (IPv4) is the fourth version of the Internet Protocol (IP). It is one of the core protocols of standards-based internetworking methods in the Internet. IPv4 is a connectionless protocol for use on packet-switched networks. It operates on a best effort delivery model, in that it does not guarantee delivery, nor does it assure proper sequencing or avoidance of duplicate delivery. These aspects, including data integrity, are addressed by an upper layer transport protocol, such as the Transmission Control Protocol (TCP).' - }, - - 'ipv6': { - title: 'IPv6 Networking', - icon: '', - info: 'Metrics for the IPv6 stack of the system. Internet Protocol version 6 (IPv6) is the most recent version of the Internet Protocol (IP), the communications protocol that provides an identification and location system for computers on networks and routes traffic across the Internet. IPv6 was developed by the Internet Engineering Task Force (IETF) to deal with the long-anticipated problem of IPv4 address exhaustion. IPv6 is intended to replace IPv4.' - }, - - 'ipvs': { - title: 'IP Virtual Server', - icon: '', - info: 'IPVS (IP Virtual Server) implements transport-layer load balancing inside the Linux kernel, so called Layer-4 switching. IPVS running on a host acts as a load balancer at the front of a cluster of real servers, it can direct requests for TCP/UDP based services to the real servers, and makes services of the real servers to appear as a virtual service on a single IP address.' - }, - - 'netfilter': { - title: 'Firewall (netfilter)', - icon: '', - info: 'Performance metrics of the netfilter components.' - }, - - 'ipfw': { - title: 'Firewall (ipfw)', - icon: '', - info: 'Counters and memory usage for the ipfw rules.' - }, - - 'cpu': { - title: 'CPUs', - icon: '', - info: 'Detailed information for each CPU of the system. A summary of the system for all CPUs can be found at the System Overview section.' - }, - - 'mem': { - title: 'Memory', - icon: '', - info: 'Detailed information about the memory management of the system.' - }, - - 'disk': { - title: 'Disks', - icon: '', - info: 'Charts with performance information for all the system disks. Special care has been given to present disk performance metrics in a way compatible with iostat -x. netdata by default prevents rendering performance charts for individual partitions and unmounted virtual disks. Disabled charts can still be enabled by configuring the relative settings in the netdata configuration file.' - }, - - 'sensors': { - title: 'Sensors', - icon: '', - info: 'Readings of the configured system sensors.' - }, - - 'ipmi': { - title: 'IPMI', - icon: '', - info: 'The Intelligent Platform Management Interface (IPMI) is a set of computer interface specifications for an autonomous computer subsystem that provides management and monitoring capabilities independently of the host system\'s CPU, firmware (BIOS or UEFI) and operating system.' - }, - - 'samba': { - title: 'Samba', - icon: '', - info: 'Performance metrics of the Samba file share operations of this system. Samba is a implementation of Windows services, including Windows SMB protocol file shares.' - }, - - 'nfsd': { - title: 'NFS Server', - icon: '', - info: 'Performance metrics of the Network File Server. NFS is a distributed file system protocol, allowing a user on a client computer to access files over a network, much like local storage is accessed. NFS, like many other protocols, builds on the Open Network Computing Remote Procedure Call (ONC RPC) system. The NFS is an open standard defined in Request for Comments (RFC).' - }, - - 'nfs': { - title: 'NFS Client', - icon: '', - info: 'Performance metrics of the NFS operations of this system, acting as an NFS client.' - }, - - 'zfs': { - title: 'ZFS filesystem', - icon: '', - info: 'Performance metrics of the ZFS filesystem. The following charts visualize all metrics reported by arcstat.py and arc_summary.py.' - }, - - 'btrfs': { - title: 'BTRFS filesystem', - icon: '', - info: 'Disk space metrics for the BTRFS filesystem.' - }, - - 'apps': { - title: 'Applications', - icon: '', - info: 'Per application statistics are collected using netdata\'s apps.plugin. This plugin walks through all processes and aggregates statistics for applications of interest, defined in /etc/netdata/apps_groups.conf (the default is here). The plugin internally builds a process tree (much like ps fax does), and groups processes together (evaluating both child and parent processes) so that the result is always a chart with a predefined set of dimensions (of course, only application groups found running are reported). The reported values are compatible with top, although the netdata plugin counts also the resources of exited children (unlike top which shows only the resources of the currently running processes). So for processes like shell scripts, the reported values include the resources used by the commands these scripts run within each timeframe.', - height: 1.5 - }, - - 'users': { - title: 'Users', - icon: '', - info: 'Per user statistics are collected using netdata\'s apps.plugin. This plugin walks through all processes and aggregates statistics per user. The reported values are compatible with top, although the netdata plugin counts also the resources of exited children (unlike top which shows only the resources of the currently running processes). So for processes like shell scripts, the reported values include the resources used by the commands these scripts run within each timeframe.', - height: 1.5 - }, - - 'groups': { - title: 'User Groups', - icon: '', - info: 'Per user group statistics are collected using netdata\'s apps.plugin. This plugin walks through all processes and aggregates statistics per user group. The reported values are compatible with top, although the netdata plugin counts also the resources of exited children (unlike top which shows only the resources of the currently running processes). So for processes like shell scripts, the reported values include the resources used by the commands these scripts run within each timeframe.', - height: 1.5 - }, - - 'netdata': { - title: 'Netdata Monitoring', - icon: '', - info: 'Performance metrics for the operation of netdata itself and its plugins.' - }, - - 'example': { - title: 'Example Charts', - info: 'Example charts, demonstrating the external plugin architecture.' - }, - - 'cgroup': { - title: '', - icon: '', - info: 'Container resource utilization metrics. Netdata reads this information from cgroups (abbreviated from control groups), a Linux kernel feature that limits and accounts resource usage (CPU, memory, disk I/O, network, etc.) of a collection of processes. cgroups together with namespaces (that offer isolation between processes) provide what we usually call: containers.' - }, - - 'cgqemu': { - title: '', - icon: '', - info: 'QEMU virtual machine resource utilization metrics. QEMU (short for Quick Emulator) is a free and open-source hosted hypervisor that performs hardware virtualization.' - }, - - 'fping': { - title: 'fping', - icon: '', - info: 'Network latency statistics, via fping. fping is a program to send ICMP echo probes to network hosts, similar to ping, but much better performing when pinging multiple hosts. fping versions after 3.15 can be directly used as netdata plugins.' - }, - - 'httpcheck': { - title: 'Http Check', - icon: '', - info: 'Web Service availability and latency monitoring using HTTP checks. This plugin is a specialized version of the port check plugin.' - }, - - 'memcached': { - title: 'memcached', - icon: '', - info: 'Performance metrics for memcached. Memcached is a general-purpose distributed memory caching system. It is often used to speed up dynamic database-driven websites by caching data and objects in RAM to reduce the number of times an external data source (such as a database or API) must be read.' - }, - - 'mysql': { - title: 'MySQL', - icon: '', - info: 'Performance metrics for mysql, the open-source relational database management system (RDBMS).' - }, - - 'postgres': { - title: 'Postgres', - icon: '', - info: 'Performance metrics for PostgresSQL, the object-relational database (ORDBMS).' - }, - - 'redis': { - title: 'Redis', - icon: '', - info: 'Performance metrics for redis. Redis (REmote DIctionary Server) is a software project that implements data structure servers. It is open-source, networked, in-memory, and stores keys with optional durability.' - }, - - 'retroshare': { - title: 'RetroShare', - icon: '', - info: 'Performance metrics for RetroShare. RetroShare is open source software for encrypted filesharing, serverless email, instant messaging, online chat, and BBS, based on a friend-to-friend network built on GNU Privacy Guard (GPG).' - }, - - 'ipfs': { - title: 'IPFS', - icon: '', - info: 'Performance metrics for the InterPlanetary File System (IPFS), a content-addressable, peer-to-peer hypermedia distribution protocol.' - }, - - 'phpfpm': { - title: 'PHP-FPM', - icon: '', - info: 'Performance metrics for PHP-FPM, an alternative FastCGI implementation for PHP.' - }, - - 'portcheck': { - title: 'Port Check', - icon: '', - info: 'Service availability and latency monitoring using port checks.' - }, - - 'postfix': { - title: 'postfix', - icon: '', - info: undefined - }, - - 'dovecot': { - title: 'Dovecot', - icon: '', - info: undefined - }, - - 'hddtemp': { - title: 'HDD Temp', - icon: '', - info: undefined - }, - - 'nginx': { - title: 'nginx', - icon: '', - info: undefined - }, - - 'apache': { - title: 'Apache', - icon: '', - info: undefined - }, - - 'lighttpd': { - title: 'Lighttpd', - icon: '', - info: undefined - }, - - 'web_log': { - title: undefined, - icon: '', - info: 'Information extracted from a server log file. web_log plugin incrementally parses the server log file to provide, in real-time, a break down of key server performance metrics. For web servers, an extended log file format may optionally be used (for nginx and apache) offering timing information and bandwidth for both requests and responses. web_log plugin may also be configured to provide a break down of requests per URL pattern (check /etc/netdata/python.d/web_log.conf).' - }, - - 'named': { - title: 'named', - icon: '', - info: undefined - }, - - 'squid': { - title: 'squid', - icon: '', - info: undefined - }, - - 'nut': { - title: 'UPS', - icon: '', - info: undefined - }, - - 'apcupsd': { - title: 'UPS', - icon: '', - info: undefined - }, - - 'smawebbox': { - title: 'Solar Power', - icon: '', - info: undefined - }, - - 'fronius': { - title: 'Fronius', - icon: '', - info: undefined - }, - - 'stiebeleltron': { - title: 'Stiebel Eltron', - icon: '', - info: undefined - }, - - 'snmp': { - title: 'SNMP', - icon: '', - info: undefined - }, - - 'go_expvar': { - title: 'Go - expvars', - icon: '', - info: 'Statistics about running Go applications exposed by the expvar package.' - }, - - 'chrony': { - icon: '', - info: 'chronyd parameters about the system’s clock performance.' - }, - - 'couchdb': { - icon: '', - info: 'Performance metrics for CouchDB, the open-source, JSON document-based database with an HTTP API and multi-master replication.' - }, - - 'beanstalk': { - title: 'Beanstalkd', - icon: '', - info: 'Provides statistics on the beanstalkd server and any tubes available on that server using data pulled from beanstalkc' - }, - - 'rabbitmq': { - title: 'RabbitMQ', - icon: '', - info: 'Performance data for the RabbitMQ open-source message broker.' - }, - - 'ceph': { - title: 'Ceph', - icon: '', - info: 'Provides statistics on the ceph cluster server, the open-source distributed storage system.' - }, - - 'ntpd': { - title: 'ntpd', - icon: '', - info: 'Provides statistics for the internal variables of the Network Time Protocol daemon ntpd and optional including the configured peers (if enabled in the module configuration). The module presents the performance metrics as shown by ntpq (the standard NTP query program) using NTP mode 6 UDP packets to communicate with the NTP server.' - } -}; - - - -// ---------------------------------------------------------------------------- -// submenus - -// information to be shown, just below each submenu - -// information about the submenus -netdataDashboard.submenu = { - 'web_log.squid_bandwidth': { - title: 'bandwidth', - info: 'Bandwidth of responses (sent) by squid. This chart may present unusual spikes, since the bandwidth is accounted at the time the log line is saved by the server, even if the time needed to serve it spans across a longer duration. We suggest to use QoS (e.g. FireQOS) for accurate accounting of the server bandwidth.' - }, - - 'web_log.squid_responses': { - title: 'responses', - info: 'Information related to the responses sent by squid.' - }, - - 'web_log.squid_requests': { - title: 'requests', - info: 'Information related to the requests squid has received.' - }, - - 'web_log.squid_hierarchy': { - title: 'hierarchy', - info: 'Performance metrics for the squid hierarchy used to serve the requests.' - }, - - 'web_log.squid_squid_transport': { - title: 'transport' - }, - - 'web_log.squid_squid_cache': { - title: 'cache', - info: 'Performance metrics for the performance of the squid cache.' - }, - - 'web_log.squid_timings': { - title: 'timings', - info: 'Duration of squid requests. Unrealistic spikes may be reported, since squid logs the total time of the requests, when they complete. Especially for HTTPS, the clients get a tunnel from the proxy and exchange requests directly with the upstream servers, so squid cannot evaluate the individual requests and reports the total time the tunnel was open.' - }, - - 'web_log.squid_clients': { - title: 'clients' - }, - - 'web_log.bandwidth': { - info: 'Bandwidth of requests (received) and responses (sent). received requires an extended log format (without it, the web server log does not have this information). This chart may present unusual spikes, since the bandwidth is accounted at the time the log line is saved by the web server, even if the time needed to serve it spans across a longer duration. We suggest to use QoS (e.g. FireQOS) for accurate accounting of the web server bandwidth.' - }, - - 'web_log.urls': { - info: 'Number of requests for each URL pattern defined in /etc/netdata/python.d/web_log.conf. This chart counts all requests matching the URL patterns defined, independently of the web server response codes (i.e. both successful and unsuccessful).' - }, - - 'web_log.clients': { - info: 'Charts showing the number of unique client IPs, accessing the web server.' - }, - - 'web_log.timings': { - info: 'Web server response timings - the time the web server needed to prepare and respond to requests. This requires an extended log format and its meaning is web server specific. For most web servers this accounts the time from the reception of a complete request, to the dispatch of the last byte of the response. So, it includes the network delays of responses, but it does not include the network delays of requests.' - }, - - 'mem.ksm': { - title: 'deduper (ksm)', - info: 'Kernel Same-page Merging (KSM) performance monitoring, read from several files in /sys/kernel/mm/ksm/. KSM is a memory-saving de-duplication feature in the Linux kernel (since version 2.6.32). The KSM daemon ksmd periodically scans those areas of user memory which have been registered with it, looking for pages of identical content which can be replaced by a single write-protected page (which is automatically copied if a process later wants to update its content). KSM was originally developed for use with KVM (where it was known as Kernel Shared Memory), to fit more virtual machines into physical memory, by sharing the data common between them. But it can be useful to any application which generates many instances of the same data.' - }, - - 'mem.hugepages': { - info: 'Hugepages is a feature that allows the kernel to utilize the multiple page size capabilities of modern hardware architectures. The kernel creates multiple pages of virtual memory, mapped from both physical RAM and swap. There is a mechanism in the CPU architecture called "Translation Lookaside Buffers" (TLB) to manage the mapping of virtual memory pages to actual physical memory addresses. The TLB is a limited hardware resource, so utilizing a large amount of physical memory with the default page size consumes the TLB and adds processing overhead. By utilizing Huge Pages, the kernel is able to create pages of much larger sizes, each page consuming a single resource in the TLB. Huge Pages are pinned to physical RAM and cannot be swapped/paged out.' - }, - - 'mem.numa': { - info: 'Non-Uniform Memory Access (NUMA) is a hierarchical memory design the memory access time is dependent on locality. Under NUMA, a processor can access its own local memory faster than non-local memory (memory local to another processor or memory shared between processors). The individual metrics are described in the Linux kernel documentation.' - }, - - 'ipv4.ecn': { - info: 'Explicit Congestion Notification (ECN) is a TCP extension that allows end-to-end notification of network congestion without dropping packets. ECN is an optional feature that may be used between two ECN-enabled endpoints when the underlying network infrastructure also supports it.' - }, - - 'netfilter.conntrack': { - title: 'connection tracker', - info: 'Netfilter Connection Tracker performance metrics. The connection tracker keeps track of all connections of the machine, inbound and outbound. It works by keeping a database with all open connections, tracking network and address translation and connection expectations.' - }, - - 'netfilter.nfacct': { - title: 'bandwidth accounting', - info: 'The following information is read using the nfacct.plugin.' - }, - - 'netfilter.synproxy': { - title: 'DDoS protection', - info: 'DDoS protection performance metrics. SYNPROXY is a TCP SYN packets proxy. It is used to protect any TCP server (like a web server) from SYN floods and similar DDoS attacks. It is a netfilter module, in the Linux kernel (since version 3.12). It is optimized to handle millions of packets per second utilizing all CPUs available without any concurrency locking between the connections. It can be used for any kind of TCP traffic (even encrypted), since it does not interfere with the content itself.' - }, - - 'ipfw.dynamic_rules': { - title: 'dynamic rules', - info: 'Number of dynamic rules, created by correspondent stateful firewall rules.' - }, - - 'system.softnet_stat': { - title: 'softnet', - info: function(os) { - if(os === 'linux') - return 'Statistics for CPUs SoftIRQs related to network receive work. Break down per CPU core can be found at CPU / softnet statistics. processed states the number of packets processed, dropped is the number packets dropped because the network device backlog was full (to fix them on Linux use sysctl to increase net.core.netdev_max_backlog), squeezed is the number of packets dropped because the network device budget ran out (to fix them on Linux use sysctl to increase net.core.netdev_budget). More information about identifying and troubleshooting network driver related issues can be found at Red Hat Enterprise Linux Network Performance Tuning Guide.'; - else - return 'Statistics for CPUs SoftIRQs related to network receive work.'; - } - }, - - 'cpu.softnet_stat': { - title: 'softnet', - info: function(os) { - if(os === 'linux') - return 'Statistics for per CPUs core SoftIRQs related to network receive work. Total for all CPU cores can be found at System / softnet statistics. processed states the number of packets processed, dropped is the number packets dropped because the network device backlog was full (to fix them on Linux use sysctl to increase net.core.netdev_max_backlog), squeezed is the number of packets dropped because the network device budget ran out (to fix them on Linux use sysctl to increase net.core.netdev_budget). More information about identifying and troubleshooting network driver related issues can be found at Red Hat Enterprise Linux Network Performance Tuning Guide.'; - else - return 'Statistics for per CPUs core SoftIRQs related to network receive work. Total for all CPU cores can be found at System / softnet statistics.'; - } - }, - - 'go_expvar.memstats': { - title: 'memory statistics', - info: 'Go runtime memory statistics. See runtime.MemStats documentation for more info about each chart and the values.' - }, - - 'couchdb.dbactivity': { - title: 'db activity', - info: 'Overall database reads and writes for the entire server. This includes any external HTTP traffic, as well as internal replication traffic performed in a cluster to ensure node consistency.' - }, - - 'couchdb.httptraffic': { - title: 'http traffic breakdown', - info: 'All HTTP traffic, broken down by type of request (GET, PUT, POST, etc.) and response status code (200, 201, 4xx, etc.)

Any 5xx errors here indicate a likely CouchDB bug; check the logfile for further information.' - }, - - 'couchdb.ops': { - title: 'server operations' - }, - - 'couchdb.perdbstats': { - title: 'per db statistics', - info: 'Statistics per database. This includes 3 size graphs per database: active (the size of live data in the database), external (the uncompressed size of the database contents), and file (the size of the file on disk, exclusive of any views and indexes). It also includes the number of documents and number of deleted documents per database.' - }, - - 'couchdb.erlang': { - title: 'erlang statistics', - info: 'Detailed information about the status of the Erlang VM that hosts CouchDB. These are intended for advanced users only. High values of the peak message queue (>10e6) generally indicate an overload condition.' - }, - - 'ntpd.system': { - title: 'system', - info: 'Statistics of the system variables as shown by the readlist billboard ntpq -c rl. System variables are assigned an association ID of zero and can also be shown in the readvar billboard ntpq -c "rv 0". These variables are used in the Clock Discipline Algorithm, to calculate the lowest and most stable offset.' - }, - - 'ntpd.peers': { - title: 'peers', - info: 'Statistics of the peer variables for each peer configured in /etc/ntp.conf as shown by the readvar billboard ntpq -c "rv <association>", while each peer is assigned a nonzero association ID as shown by ntpq -c "apeers". The module periodically scans for new/changed peers (default: every 60s). ntpd selects the best possible peer from the available peers to synchronize the clock. A minimum of at least 3 peers is required to properly identify the best possible peer.' - } -}; - - - -// ---------------------------------------------------------------------------- -// chart - -// information works on the context of a chart -// Its purpose is to set: -// -// info: the text above the charts -// heads: the representation of the chart at the top the subsection (second level menu) -// mainheads: the representation of the chart at the top of the section (first level menu) -// colors: the dimension colors of the chart (the default colors are appended) -// height: the ratio of the chart height relative to the default -// -netdataDashboard.context = { - 'system.cpu': { - info: function(os) { - void(os); - return 'Total CPU utilization (all cores). 100% here means there is no CPU idle time at all. You can get per core usage at the CPUs section and per application usage at the Applications Monitoring section.' - + netdataDashboard.sparkline('
Keep an eye on iowait ', 'system.cpu', 'iowait', '%', '. If it is constantly high, your disks are a bottleneck and they slow your system down.') - + netdataDashboard.sparkline('
An important metric worth monitoring, is softirq ', 'system.cpu', 'softirq', '%', '. A constantly high percentage of softirq may indicate network driver issues.'); - }, - valueRange: "[0, 100]" - }, - - 'system.load': { - info: 'Current system load, i.e. the number of processes using CPU or waiting for system resources (usually CPU and disk). The 3 metrics refer to 1, 5 and 15 minute averages. The system calculates this once every 5 seconds. For more information check this wikipedia article', - height: 0.7 - }, - - 'system.io': { - info: function(os) { - var s = 'Total Disk I/O, for all physical disks. You can get detailed information about each disk at the Disks section and per application Disk usage at the Applications Monitoring section.'; - - if(os === 'linux') - return s + ' Physical are all the disks that are listed in /sys/block, but do not exist in /sys/devices/virtual/block.'; - else - return s; - } - }, - - 'system.pgpgio': { - info: 'Memory paged from/to disk. This is usually the total disk I/O of the system.' - }, - - 'system.swapio': { - info: 'Total Swap I/O. (netdata measures both in and out. If either of them is not shown in the chart, it is because it is zero - you can change the page settings to always render all the available dimensions on all charts).' - }, - - 'system.pgfaults': { - info: 'Total page faults. Major page faults indicates that the system is using its swap. You can find which applications use the swap at the Applications Monitoring section.' - }, - - 'system.entropy': { - colors: '#CC22AA', - info: 'Entropy, is a pool of random numbers (/dev/random) that is mainly used in cryptography. If the pool of entropy gets empty, processes requiring random numbers may run a lot slower (it depends on the interface each program uses), waiting for the pool to be replenished. Ideally a system with high entropy demands should have a hardware device for that purpose (TPM is one such device). There are also several software-only options you may install, like haveged, although these are generally useful only in servers.' - }, - - 'system.forks': { - colors: '#5555DD', - info: 'Number of new processes created.' - }, - - 'system.intr': { - colors: '#DD5555', - info: 'Total number of CPU interrupts. Check system.interrupts that gives more detail about each interrupt and also the CPUs section where interrupts are analyzed per CPU core.' - }, - - 'system.interrupts': { - info: 'CPU interrupts in detail. At the CPUs section, interrupts are analyzed per CPU core.' - }, - - 'system.softirqs': { - info: 'CPU softirqs in detail. At the CPUs section, softirqs are analyzed per CPU core.' - }, - - 'system.processes': { - info: 'System processes. Running are the processes in the CPU. Blocked are processes that are willing to enter the CPU, but they cannot, e.g. because they wait for disk activity.' - }, - - 'system.active_processes': { - info: 'All system processes.' - }, - - 'system.ctxt': { - info: 'Context Switches, is the switching of the CPU from one process, task or thread to another. If there are many processes or threads willing to execute and very few CPU cores available to handle them, the system is making more context switching to balance the CPU resources among them. The whole process is computationally intensive. The more the context switches, the slower the system gets.' - }, - - 'system.idlejitter': { - info: 'Idle jitter is calculated by netdata. A thread is spawned that requests to sleep for a few microseconds. When the system wakes it up, it measures how many microseconds have passed. The difference between the requested and the actual duration of the sleep, is the idle jitter. This number is useful in real-time environments, where CPU jitter can affect the quality of the service (like VoIP media gateways).' - }, - - 'system.net': { - info: function(os) { - var s = 'Total bandwidth of all physical network interfaces. This does not include lo, VPNs, network bridges, IFB devices, bond interfaces, etc. Only the bandwidth of physical network interfaces is aggregated.'; - - if(os === 'linux') - return s + ' Physical are all the network interfaces that are listed in /proc/net/dev, but do not exist in /sys/devices/virtual/net.'; - else - return s; - } - }, - - 'system.ipv4': { - info: 'Total IPv4 Traffic.' - }, - - 'system.ipv6': { - info: 'Total IPv6 Traffic.' - }, - - 'system.ram': { - info: 'System Random Access Memory (i.e. physical memory) usage.' - }, - - 'system.swap': { - info: 'System swap memory usage. Swap space is used when the amount of physical memory (RAM) is full. When the system needs more memory resources and the RAM is full, inactive pages in memory are moved to the swap space (usually a disk, a disk partition or a file).' - }, - - // ------------------------------------------------------------------------ - // CPU charts - - 'cpu.cpu': { - commonMin: true, - commonMax: true, - valueRange: "[0, 100]" - }, - - 'cpu.interrupts': { - commonMin: true, - commonMax: true - }, - - 'cpu.softirqs': { - commonMin: true, - commonMax: true - }, - - 'cpu.softnet_stat': { - commonMin: true, - commonMax: true - }, - - // ------------------------------------------------------------------------ - // MEMORY - - 'mem.ksm_savings': { - heads: [ - netdataDashboard.gaugeChart('Saved', '12%', 'savings', '#0099CC') - ] - }, - - 'mem.ksm_ratios': { - heads: [ - function(os, id) { - void(os); - return '
'; - } - ] - }, - - 'mem.pgfaults': { - info: 'A page fault is a type of interrupt, called trap, raised by computer hardware when a running program accesses a memory page that is mapped into the virtual address space, but not actually loaded into main memory. If the page is loaded in memory at the time the fault is generated, but is not marked in the memory management unit as being loaded in memory, then it is called a minor or soft page fault. A major page fault is generated when the system needs to load the memory page from disk or swap memory.' - }, - - 'mem.committed': { - colors: NETDATA.colors[3], - info: 'Committed Memory, is the sum of all memory which has been allocated by processes.' - }, - - 'mem.available': { - info: 'Available Memory is estimated by the kernel, as the amount of RAM that can be used by userspace processes, without causing swapping.' - }, - - 'mem.writeback': { - info: 'Dirty is the amount of memory waiting to be written to disk. Writeback is how much memory is actively being written to disk.' - }, - - 'mem.kernel': { - info: 'The total amount of memory being used by the kernel. Slab is the amount of memory used by the kernel to cache data structures for its own use. KernelStack is the amount of memory allocated for each task done by the kernel. PageTables is the amount of memory decicated to the lowest level of page tables (A page table is used to turn a virtual address into a physical memory address). VmallocUsed is the amount of memory being used as virtual address space.' - }, - - 'mem.slab': { - info: 'Reclaimable is the amount of memory which the kernel can reuse. Unreclaimable can not be reused even when the kernel is lacking memory.' - }, - - 'mem.hugepages': { - info: 'Dedicated (or Direct) HugePages is memory reserved for applications configured to utilize huge pages. Hugepages are used memory, even if there are free hugepages available.' - }, - - 'mem.transparent_hugepages': { - info: 'Transparent HugePages (THP) is backing virtual memory with huge pages, supporting automatic promotion and demotion of page sizes. It works for all applications for anonymous memory mappings and tmpfs/shmem.' - }, - - // ------------------------------------------------------------------------ - // network interfaces - - 'net.drops': { - info: 'Packets that have been dropped at the network interface level. These are the same counters reported by ifconfig as RX dropped (inbound) and TX dropped (outbound). inbound packets can be dropped at the network interface level due to softnet backlog overflow, bad / unintented VLAN tags, unknown or unregistered protocols, IPv6 frames when the server is not configured for IPv6. Check this document for more information.' - }, - - // ------------------------------------------------------------------------ - // IPv4 - - 'ipv4.tcpmemorypressures': { - info: 'Number of times a socket was put in memory pressure due to a non fatal memory allocation failure (the kernel attempts to work around this situation by reducing the send buffers, etc).' - }, - - 'ipv4.tcpconnaborts': { - info: 'TCP connection aborts. baddata (TCPAbortOnData) happens while the connection is on FIN_WAIT1 and the kernel receives a packet with a sequence number beyond the last one for this connection - the kernel responds with RST (closes the connection). userclosed (TCPAbortOnClose) happens when the kernel receives data on an already closed connection and responds with RST. nomemory (TCPAbortOnMemory happens when there are too many orphaned sockets (not attached to an fd) and the kernel has to drop a connection - sometimes it will send an RST, sometimes it won\'t. timeout (TCPAbortOnTimeout) happens when a connection times out. linger (TCPAbortOnLinger) happens when the kernel killed a socket that was already closed by the application and lingered around for long enough. failed (TCPAbortFailed) happens when the kernel attempted to send an RST but failed because there was no memory available.' - }, - - 'ipv4.tcpsock': { - info: 'The number of established TCP connections (known as CurrEstab). This is a snapshot of the established connections at the time of measurement (i.e. a connection established and a connection disconnected within the same iteration will not affect this metric).' - }, - - 'ipv4.tcpopens': { - info: 'active or ActiveOpens is the number of outgoing TCP connections attempted by this host.' - + ' passive or PassiveOpens is the number of incoming TCP connections accepted by this host.' - }, - - 'ipv4.tcperrors': { - info: 'InErrs is the number of TCP segments received in error (including header too small, checksum errors, sequence errors, bad packets - for both IPv4 and IPv6).' - + ' InCsumErrors is the number of TCP segments received with checksum errors (for both IPv4 and IPv6).' - + ' RetransSegs is the number of TCP segments retransmitted.' - }, - - 'ipv4.tcphandshake': { - info: 'EstabResets is the number of established connections resets (i.e. connections that made a direct transition from ESTABLISHED or CLOSE_WAIT to CLOSED).' - + ' OutRsts is the number of TCP segments sent, with the RST flag set (for both IPv4 and IPv6).' - + ' AttemptFails is the number of times TCP connections made a direct transition from either SYN_SENT or SYN_RECV to CLOSED, plus the number of times TCP connections made a direct transition from the SYN_RECV to LISTEN.' - + ' TCPSynRetrans shows retries for new outbound TCP connections, which can indicate general connectivity issues or backlog on the remote host.' - }, - - 'ipv4.tcplistenissues': { - info: 'overflows (or ListenOverflows) is the number of incoming connections that could not be handled because the receive queue of the application was full (for both IPv4 and IPv6).' - + ' drops (or ListenDrops) is the number of incoming connections that could not be handled, including SYN floods, overflows, out of memory, security issues, no route to destination, reception of related ICMP messages, socket is broadcast or multicast (for both IPv4 and IPv6).' - }, - - // ------------------------------------------------------------------------ - // APPS - - 'apps.cpu': { - height: 2.0 - }, - - 'apps.mem': { - info: 'Real memory (RAM) used by applications. This does not include shared memory.' - }, - - 'apps.vmem': { - info: 'Virtual memory allocated by applications. Please check this article for more information.' - }, - - 'apps.preads': { - height: 2.0 - }, - - 'apps.pwrites': { - height: 2.0 - }, - - // ------------------------------------------------------------------------ - // USERS - - 'users.cpu': { - height: 2.0 - }, - - 'users.mem': { - info: 'Real memory (RAM) used per user. This does not include shared memory.' - }, - - 'users.vmem': { - info: 'Virtual memory allocated per user. Please check this article for more information.' - }, - - 'users.preads': { - height: 2.0 - }, - - 'users.pwrites': { - height: 2.0 - }, - - // ------------------------------------------------------------------------ - // GROUPS - - 'groups.cpu': { - height: 2.0 - }, - - 'groups.mem': { - info: 'Real memory (RAM) used per user group. This does not include shared memory.' - }, - - 'groups.vmem': { - info: 'Virtual memory allocated per user group. Please check this article for more information.' - }, - - 'groups.preads': { - height: 2.0 - }, - - 'groups.pwrites': { - height: 2.0 - }, - - // ------------------------------------------------------------------------ - // NETWORK QoS - - 'tc.qos': { - heads: [ - function(os, id) { - void(os); - - if(id.match(/.*-ifb$/)) - return netdataDashboard.gaugeChart('Inbound', '12%', '', '#5555AA'); - else - return netdataDashboard.gaugeChart('Outbound', '12%', '', '#AA9900'); - } - ] - }, - - // ------------------------------------------------------------------------ - // NETWORK INTERFACES - - 'net.net': { - mainheads: [ - function(os, id) { - void(os); - if(id.match(/^cgroup_.*/)) { - var iface; - try { - iface = ' ' + id.substring(id.lastIndexOf('.net_') + 5, id.length); - } - catch (e) { - iface = ''; - } - return netdataDashboard.gaugeChart('Received' + iface, '12%', 'received'); - } - else - return ''; - }, - function(os, id) { - void(os); - if(id.match(/^cgroup_.*/)) { - var iface; - try { - iface = ' ' + id.substring(id.lastIndexOf('.net_') + 5, id.length); - } - catch (e) { - iface = ''; - } - return netdataDashboard.gaugeChart('Sent' + iface, '12%', 'sent'); - } - else - return ''; - } - ], - heads: [ - function(os, id) { - void(os); - if(!id.match(/^cgroup_.*/)) - return netdataDashboard.gaugeChart('Received', '12%', 'received'); - else - return ''; - }, - function(os, id) { - void(os); - if(!id.match(/^cgroup_.*/)) - return netdataDashboard.gaugeChart('Sent', '12%', 'sent'); - else - return ''; - } - ] - }, - - // ------------------------------------------------------------------------ - // NETFILTER - - 'netfilter.sockets': { - colors: '#88AA00', - heads: [ - netdataDashboard.gaugeChart('Active Connections', '12%', '', '#88AA00') - ] - }, - - 'netfilter.new': { - heads: [ - netdataDashboard.gaugeChart('New Connections', '12%', 'new', '#5555AA') - ] - }, - - // ------------------------------------------------------------------------ - // DISKS - - 'disk.util': { - colors: '#FF5588', - heads: [ - netdataDashboard.gaugeChart('Utilization', '12%', '', '#FF5588') - ], - info: 'Disk Utilization measures the amount of time the disk was busy with something. This is not related to its performance. 100% means that the system always had an outstanding operation on the disk. Keep in mind that depending on the underlying technology of the disk, 100% here may or may not be an indication of congestion.' - }, - - 'disk.backlog': { - colors: '#0099CC', - info: 'Backlog is an indication of the duration of pending disk operations. On every I/O event the system is multiplying the time spent doing I/O since the last update of this field with the number of pending operations. While not accurate, this metric can provide an indication of the expected completion time of the operations in progress.' - }, - - 'disk.io': { - heads: [ - netdataDashboard.gaugeChart('Read', '12%', 'reads'), - netdataDashboard.gaugeChart('Write', '12%', 'writes') - ], - info: 'Amount of data transferred to and from disk.' - }, - - 'disk.ops': { - info: 'Completed disk I/O operations. Keep in mind the number of operations requested might be higher, since the system is able to merge adjacent to each other (see merged operations chart).' - }, - - 'disk.qops': { - info: 'I/O operations currently in progress. This metric is a snapshot - it is not an average over the last interval.' - }, - - 'disk.iotime': { - height: 0.5, - info: 'The sum of the duration of all completed I/O operations. This number can exceed the interval if the disk is able to execute I/O operations in parallel.' - }, - 'disk.mops': { - height: 0.5, - info: 'The number of merged disk operations. The system is able to merge adjacent I/O operations, for example two 4KB reads can become one 8KB read before given to disk.' - }, - 'disk.svctm': { - height: 0.5, - info: 'The average service time for completed I/O operations. This metric is calculated using the total busy time of the disk and the number of completed operations. If the disk is able to execute multiple parallel operations the reporting average service time will be misleading.' - }, - 'disk.avgsz': { - height: 0.5, - info: 'The average I/O operation size.' - }, - 'disk.await': { - height: 0.5, - info: 'The average time for I/O requests issued to the device to be served. This includes the time spent by the requests in queue and the time spent servicing them.' - }, - - 'disk.space': { - info: 'Disk space utilization. reserved for root is automatically reserved by the system to prevent the root user from getting out of space.' - }, - 'disk.inodes': { - info: 'inodes (or index nodes) are filesystem objects (e.g. files and directories). On many types of file system implementations, the maximum number of inodes is fixed at filesystem creation, limiting the maximum number of files the filesystem can hold. It is possible for a device to run out of inodes. When this happens, new files cannot be created on the device, even though there may be free space available.' - }, - - 'mysql.net': { - info: 'The amount of data sent to mysql clients (out) and received from mysql clients (in).' - }, - - // ------------------------------------------------------------------------ - // MYSQL - - 'mysql.queries': { - info: 'The number of statements executed by the server.
    ' + - '
  • queries counts the statements executed within stored SQL programs.
  • ' + - '
  • questions counts the statements sent to the mysql server by mysql clients.
  • ' + - '
  • slow queries counts the number of statements that took more than long_query_time seconds to be executed.' + - ' For more information about slow queries check the mysql slow query log.
  • ' + - '
' - }, - - 'mysql.handlers': { - info: 'Usage of the internal handlers of mysql. This chart provides very good insights of what the mysql server is actually doing.' + - ' (if the chart is not showing all these dimensions it is because they are zero - set Which dimensions to show? to All from the dashboard settings, to render even the zero values)
    ' + - '
  • commit, the number of internal COMMIT statements.
  • ' + - '
  • delete, the number of times that rows have been deleted from tables.
  • ' + - '
  • prepare, a counter for the prepare phase of two-phase commit operations.
  • ' + - '
  • read first, the number of times the first entry in an index was read. A high value suggests that the server is doing a lot of full index scans; e.g. SELECT col1 FROM foo, with col1 indexed.
  • ' + - '
  • read key, the number of requests to read a row based on a key. If this value is high, it is a good indication that your tables are properly indexed for your queries.
  • ' + - '
  • read next, the number of requests to read the next row in key order. This value is incremented if you are querying an index column with a range constraint or if you are doing an index scan.
  • ' + - '
  • read prev, the number of requests to read the previous row in key order. This read method is mainly used to optimize ORDER BY ... DESC.
  • ' + - '
  • read rnd, the number of requests to read a row based on a fixed position. A high value indicates you are doing a lot of queries that require sorting of the result. You probably have a lot of queries that require MySQL to scan entire tables or you have joins that do not use keys properly.
  • ' + - '
  • read rnd next, the number of requests to read the next row in the data file. This value is high if you are doing a lot of table scans. Generally this suggests that your tables are not properly indexed or that your queries are not written to take advantage of the indexes you have.
  • ' + - '
  • rollback, the number of requests for a storage engine to perform a rollback operation.
  • ' + - '
  • savepoint, the number of requests for a storage engine to place a savepoint.
  • ' + - '
  • savepoint rollback, the number of requests for a storage engine to roll back to a savepoint.
  • ' + - '
  • update, the number of requests to update a row in a table.
  • ' + - '
  • write, the number of requests to insert a row in a table.
  • ' + - '
' - }, - - 'mysql.table_locks': { - info: 'MySQL table locks counters:
    ' + - '
  • immediate, the number of times that a request for a table lock could be granted immediately.
  • ' + - '
  • waited, the number of times that a request for a table lock could not be granted immediately and a wait was needed. If this is high and you have performance problems, you should first optimize your queries, and then either split your table or tables or use replication.
  • ' + - '
' - }, - - // ------------------------------------------------------------------------ - // POSTGRESQL - - - 'postgres.db_stat_blks': { - info: 'Blocks reads from disk or cache.
    ' + - '
  • blks_read: number of disk blocks read in this database.
  • ' + - '
  • blks_hit: number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)
  • ' + - '
' - }, - 'postgres.db_stat_tuple_write': { - info: '
  • Number of rows inserted/updated/deleted.
  • ' + - '
  • conflicts: number of queries canceled due to conflicts with recovery in this database. (Conflicts occur only on standby servers; see pg_stat_database_conflicts for details.)
  • ' + - '
' - }, - 'postgres.db_stat_temp_bytes': { - info: 'Temporary files can be created on disk for sorts, hashes, and temporary query results.' - }, - 'postgres.db_stat_temp_files': { - info: '
    ' + - '
  • files: number of temporary files created by queries. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing).
  • ' + - '
' - }, - 'postgres.archive_wal': { - info: 'WAL archiving.' - }, - 'postgres.checkpointer': { - info: 'Number of checkpoints.
    ' + - '
  • scheduled: when checkpoint_timeout is reached.
  • ' + - '
  • requested: when max_wal_size is reached.
  • ' + - '
' + - 'For more information see WAL Configuration.' - }, - 'postgres.autovacuum': { - info: 'PostgreSQL databases require periodic maintenance known as vacuuming. For many installations, it is sufficient to let vacuuming be performed by the autovacuum daemon.' + - 'For more information see The Autovacuum Daemon.' - }, - 'postgres.standby_delta': { - info: 'Streaming replication delta.
    ' + - '
  • sent_delta: replication delta sent to standby.
  • ' + - '
  • write_delta: replication delta written to disk by this standby.
  • ' + - '
  • flush_delta: replication delta flushed to disk by this standby server.
  • ' + - '
  • replay_delta: replication delta replayed into the database on this standby server.
  • ' + - '
' + - 'For more information see Synchronous Replication.' - }, - 'postgres.replication_slot': { - info: 'Replication slot files.
    ' + - '
  • wal_keeped: WAL files retained by each replication slots.
  • ' + - '
  • pg_replslot_files: files present in pg_replslot.
  • ' + - '
' + - 'For more information see Replication Slots.' - }, - - - // ------------------------------------------------------------------------ - // APACHE - - 'apache.connections': { - colors: NETDATA.colors[4], - mainheads: [ - netdataDashboard.gaugeChart('Connections', '12%', '', NETDATA.colors[4]) - ] - }, - - 'apache.requests': { - colors: NETDATA.colors[0], - mainheads: [ - netdataDashboard.gaugeChart('Requests', '12%', '', NETDATA.colors[0]) - ] - }, - - 'apache.net': { - colors: NETDATA.colors[3], - mainheads: [ - netdataDashboard.gaugeChart('Bandwidth', '12%', '', NETDATA.colors[3]) - ] - }, - - 'apache.workers': { - mainheads: [ - function(os, id) { - void(os); - return '
'; - } - ] - }, - - 'apache.bytesperreq': { - colors: NETDATA.colors[3], - height: 0.5 - }, - - 'apache.reqpersec': { - colors: NETDATA.colors[4], - height: 0.5 - }, - - 'apache.bytespersec': { - colors: NETDATA.colors[6], - height: 0.5 - }, - - - // ------------------------------------------------------------------------ - // LIGHTTPD - - 'lighttpd.connections': { - colors: NETDATA.colors[4], - mainheads: [ - netdataDashboard.gaugeChart('Connections', '12%', '', NETDATA.colors[4]) - ] - }, - - 'lighttpd.requests': { - colors: NETDATA.colors[0], - mainheads: [ - netdataDashboard.gaugeChart('Requests', '12%', '', NETDATA.colors[0]) - ] - }, - - 'lighttpd.net': { - colors: NETDATA.colors[3], - mainheads: [ - netdataDashboard.gaugeChart('Bandwidth', '12%', '', NETDATA.colors[3]) - ] - }, - - 'lighttpd.workers': { - mainheads: [ - function(os, id) { - void(os); - return '
'; - } - ] - }, - - 'lighttpd.bytesperreq': { - colors: NETDATA.colors[3], - height: 0.5 - }, - - 'lighttpd.reqpersec': { - colors: NETDATA.colors[4], - height: 0.5 - }, - - 'lighttpd.bytespersec': { - colors: NETDATA.colors[6], - height: 0.5 - }, - - // ------------------------------------------------------------------------ - // NGINX - - 'nginx.connections': { - colors: NETDATA.colors[4], - mainheads: [ - netdataDashboard.gaugeChart('Connections', '12%', '', NETDATA.colors[4]) - ] - }, - - 'nginx.requests': { - colors: NETDATA.colors[0], - mainheads: [ - netdataDashboard.gaugeChart('Requests', '12%', '', NETDATA.colors[0]) - ] - }, - - // ------------------------------------------------------------------------ - // HTTP check - - 'httpcheck.responsetime': { - info: 'The response time describes the time passed between request and response. ' + - 'Currently, the accuracy of the response time is low and should be used as reference only.' - }, - - 'httpcheck.responselength': { - info: 'The response length counts the number of characters in the response body. For static pages, this should be mostly constant.' - }, - - 'httpcheck.status': { - valueRange: "[0, 1]", - info: 'This chart verifies the response of the webserver. Each status dimension will have a value of 1 if triggered. ' + - 'Dimension success is 1 only if all constraints are satisfied.' + - 'This chart is most useful for alarms or third-party apps.' - }, - - // ------------------------------------------------------------------------ - // NETDATA - - 'netdata.response_time': { - info: 'The netdata API response time measures the time netdata needed to serve requests. This time includes everything, from the reception of the first byte of a request, to the dispatch of the last byte of its reply, therefore it includes all network latencies involved (i.e. a client over a slow network will influence these metrics).' - }, - - // ------------------------------------------------------------------------ - // RETROSHARE - - 'retroshare.bandwidth': { - info: 'RetroShare inbound and outbound traffic.', - mainheads: [ - netdataDashboard.gaugeChart('Received', '12%', 'bandwidth_down_kb'), - netdataDashboard.gaugeChart('Sent', '12%', 'bandwidth_up_kb') - ] - }, - - 'retroshare.peers': { - info: 'Number of (connected) RetroShare friends.', - mainheads: [ - function(os, id) { - void(os); - return '
'; - } - ] - }, - - 'retroshare.dht': { - info: 'Statistics about RetroShare\'s DHT. These values are estimated!' - }, - - // ------------------------------------------------------------------------ - // fping - - 'fping.quality': { - colors: NETDATA.colors[10], - height: 0.5 - }, - - 'fping.packets': { - height: 0.5 - }, - - - // ------------------------------------------------------------------------ - // containers - - 'cgroup.cpu': { - mainheads: [ - function(os, id) { - void(os); - return '
'; - } - ] - }, - - 'cgroup.mem_usage': { - mainheads: [ - function(os, id) { - void(os); - return '
'; - } - ] - }, - - 'cgroup.throttle_io': { - mainheads: [ - function(os, id) { - void(os); - return '
'; - }, - function(os, id) { - void(os); - return '
'; - } - ] - }, - - // ------------------------------------------------------------------------ - // beanstalkd - // system charts - 'beanstalk.cpu_usage': { - info: 'Amount of CPU Time for user and system used by beanstalkd.' - }, - - // This is also a per-tube stat - 'beanstalk.jobs_rate': { - info: 'The rate of jobs processed by the beanstalkd served.' - }, - - 'beanstalk.connections_rate': { - info: 'Tthe rate of connections opened to beanstalkd.' - }, - - 'beanstalk.commands_rate': { - info: 'The rate of commands received by beanstalkd.' - }, - - 'beanstalk.current_tubes': { - info: 'Total number of current tubes on the server including the default tube (which always exists).' - }, - - 'beanstalk.current_jobs': { - info: 'Current number of jobs in all tubes grouped by status: urgent, ready, reserved, delayed and buried.' - }, - - 'beanstalk.current_connections': { - info: 'Current number of connections group by connection type: written, producers, workers, waiting.' - }, - - 'beanstalk.binlog': { - info: 'The rate of records written to binlog and migrated as part of compaction.' - }, - - 'beanstalk.uptime': { - info: 'Total time beanstalkd server has been up for.' - }, - - // tube charts - 'beanstalk.jobs': { - info: 'Number of jobs currently in the tube grouped by status: urgent, ready, reserved, delayed and buried.' - }, - - 'beanstalk.connections': { - info: 'The current number of connections to this tube grouped by connection type; using, waiting and watching.' - }, - - 'beanstalk.commands': { - info: 'The rate of delete and pause commands executed by beanstalkd.' - }, - - 'beanstalk.pause': { - info: 'Shows info on how long the tube has been paused for, and how long is left remaining on the pause.' - }, - - // ------------------------------------------------------------------------ - // ceph - - 'ceph.general_usage': { - info: 'The usage and available space in all ceph cluster.' - }, - - 'ceph.general_objects': { - info: 'Total number of objects storage on ceph cluster.' - }, - - 'ceph.general_bytes': { - info: 'Cluster read and write data per second.' - }, - - 'ceph.general_operations': { - info: 'Number of read and write operations per second.' - }, - - 'ceph.general_latency': { - info: 'Total of apply and commit latency in all OSDs. The apply latency is the total time taken to flush an update to disk. The commit latency is the total time taken to commit an operation to the journal.' - }, - - 'ceph.pool_usage': { - info: 'The usage space in each pool.' - }, - - 'ceph.pool_objects': { - info: 'Number of objects presents in each pool.' - }, - - 'ceph.pool_read_bytes': { - info: 'The rate of read data per second in each pool.' - }, - - 'ceph.pool_write_bytes': { - info: 'The rate of write data per second in each pool.' - }, - - 'ceph.pool_read_objects': { - info: 'Number of read objects per second in each pool.' - }, - - 'ceph.pool_write_objects': { - info: 'Number of write objects per second in each pool.' - }, - - 'ceph.osd_usage': { - info: 'The usage space in each OSD.' - }, - - 'ceph.apply_latency': { - info: 'Time taken to flush an update in each OSD.' - }, - - 'ceph.commit_latency': { - info: 'Time taken to commit an operation to the journal in each OSD.' - }, - - // ------------------------------------------------------------------------ - // web_log - - 'web_log.response_statuses': { - info: 'Web server responses by type. success includes 1xx, 2xx and 304, error includes 5xx, redirect includes 3xx except 304, bad includes 4xx, other are all the other responses.', - mainheads: [ - function(os, id) { - void(os); - return '
'; - }, - - function(os, id) { - void(os); - return '
'; - }, - - function(os, id) { - void(os); - return '
'; - }, - - function(os, id) { - void(os); - return '
'; - } - ] - }, - - 'web_log.response_codes': { - info: 'Web server responses by code family. ' + - 'According to the standards 1xx are informational responses, ' + - '2xx are successful responses, ' + - '3xx are redirects (although they include 304 which is used as "not modified"), ' + - '4xx are bad requests, ' + - '5xx are internal server errors, ' + - 'other are non-standard responses, ' + - 'unmatched counts the lines in the log file that are not matched by the plugin (let us know if you have any unmatched).' - }, - - 'web_log.response_time': { - mainheads: [ - function(os, id) { - void(os); - return '
'; - } - ] - }, - - 'web_log.detailed_response_codes': { - info: 'Number of responses for each response code individually.' - }, - - 'web_log.requests_per_ipproto': { - info: 'Web server requests received per IP protocol version.' - }, - - 'web_log.clients': { - info: 'Unique client IPs accessing the web server, within each data collection iteration. If data collection is per second, this chart shows unique client IPs per second.' - }, - - 'web_log.clients_all': { - info: 'Unique client IPs accessing the web server since the last restart of netdata. This plugin keeps in memory all the unique IPs that have accessed the web server. On very busy web servers (several millions of unique IPs) you may want to disable this chart (check /etc/netdata/python.d/web_log.conf).' - }, - - // ------------------------------------------------------------------------ - // web_log for squid - - 'web_log.squid_response_statuses': { - info: 'Squid responses by type. ' + - 'success includes 1xx, 2xx, 000, 304, ' + - 'error includes 5xx and 6xx, ' + - 'redirect includes 3xx except 304, ' + - 'bad includes 4xx, ' + - 'other are all the other responses.', - mainheads: [ - function(os, id) { - void(os); - return '
'; - }, - - function(os, id) { - void(os); - return '
'; - }, - - function(os, id) { - void(os); - return '
'; - }, - - function(os, id) { - void(os); - return '
'; - } - ] - }, - - 'web_log.squid_response_codes': { - info: 'Web server responses by code family. ' + - 'According to HTTP standards 1xx are informational responses, ' + - '2xx are successful responses, ' + - '3xx are redirects (although they include 304 which is used as "not modified"), ' + - '4xx are bad requests, ' + - '5xx are internal server errors. ' + - 'Squid also defines 000 mostly for UDP requests, and ' + - '6xx for broken upstream servers sending wrong headers. ' + - 'Finally, other are non-standard responses, and ' + - 'unmatched counts the lines in the log file that are not matched by the plugin (let us know if you have any unmatched).' - }, - - 'web_log.squid_duration': { - mainheads: [ - function(os, id) { - void(os); - return '
'; - } - ] - }, - - 'web_log.squid_detailed_response_codes': { - info: 'Number of responses for each response code individually.' - }, - - 'web_log.squid_clients': { - info: 'Unique client IPs accessing squid, within each data collection iteration. If data collection is per second, this chart shows unique client IPs per second.' - }, - - 'web_log.squid_clients_all': { - info: 'Unique client IPs accessing squid since the last restart of netdata. This plugin keeps in memory all the unique IPs that have accessed the server. On very busy squid servers (several millions of unique IPs) you may want to disable this chart (check /etc/netdata/python.d/web_log.conf).' - }, - - 'web_log.squid_transport_methods': { - info: 'Break down per delivery method: TCP are requests on the HTTP port (usually 3128), ' + - 'UDP are requests on the ICP port (usually 3130), or HTCP port (usually 4128). ' + - 'If ICP logging was disabled using the log_icp_queries option, no ICP replies will be logged. ' + - 'NONE are used to state that squid delivered an unusual response or no response at all. ' + - 'Seen with cachemgr requests and errors, usually when the transaction fails before being classified into one of the above outcomes. ' + - 'Also seen with responses to CONNECT requests.' - }, - - 'web_log.squid_code': { - info: 'These are combined squid result status codes. A break down per component is given in the following charts. ' + - 'Check the squid documentation about them.' - }, - - 'web_log.squid_handling_opts': { - info: 'These tags are optional and describe why the particular handling was performed or where the request came from. ' + - 'CLIENT means that the client request placed limits affecting the response. Usually seen with client issued a no-cache, or analogous cache control command along with the request. Thus, the cache has to validate the object.' + - 'IMS states that the client sent a revalidation (conditional) request. ' + - 'ASYNC, is used when the request was generated internally by Squid. Usually this is background fetches for cache information exchanges, background revalidation from stale-while-revalidate cache controls, or ESI sub-objects being loaded. ' + - 'SWAPFAIL is assigned when the object was believed to be in the cache, but could not be accessed. A new copy was requested from the server. ' + - 'REFRESH when a revalidation (conditional) request was sent to the server. ' + - 'SHARED when this request was combined with an existing transaction by collapsed forwarding. NOTE: the existing request is not marked as SHARED. ' + - 'REPLY when particular handling was requested in the HTTP reply from server or peer. Usually seen on DENIED due to http_reply_access ACLs preventing delivery of servers response object to the client.' - }, - - 'web_log.squid_object_types': { - info: 'These tags are optional and describe what type of object was produced. ' + - 'NEGATIVE is only seen on HIT responses, indicating the response was a cached error response. e.g. 404 not found. ' + - 'STALE means the object was cached and served stale. This is usually caused by stale-while-revalidate or stale-if-error cache controls. ' + - 'OFFLINE when the requested object was retrieved from the cache during offline_mode. The offline mode never validates any object. ' + - 'INVALID when an invalid request was received. An error response was delivered indicating what the problem was. ' + - 'FAIL is only seen on REFRESH to indicate the revalidation request failed. The response object may be the server provided network error or the stale object which was being revalidated depending on stale-if-error cache control. ' + - 'MODIFIED is only seen on REFRESH responses to indicate revalidation produced a new modified object. ' + - 'UNMODIFIED is only seen on REFRESH responses to indicate revalidation produced a 304 (Not Modified) status, which was relayed to the client. ' + - 'REDIRECT when squid generated an HTTP redirect response to this request.' - }, - - 'web_log.squid_cache_events': { - info: 'These tags are optional and describe whether the response was loaded from cache, network, or otherwise. ' + - 'HIT when the response object delivered was the local cache object. ' + - 'MEM when the response object came from memory cache, avoiding disk accesses. Only seen on HIT responses. ' + - 'MISS when the response object delivered was the network response object. ' + - 'DENIED when the request was denied by access controls. ' + - 'NOFETCH an ICP specific type, indicating service is alive, but not to be used for this request (sent during "-Y" startup, or during frequent failures, a cache in hit only mode will return either UDP_HIT or UDP_MISS_NOFETCH. Neighbours will thus only fetch hits). ' + - 'TUNNEL when a binary tunnel was established for this transaction.' - }, - - 'web_log.squid_transport_errors': { - info: 'These tags are optional and describe some error conditions which occured during response delivery (if any). ' + - 'ABORTED when the response was not completed due to the connection being aborted (usually by the client). ' + - 'TIMEOUT, when the response was not completed due to a connection timeout.' - }, - - // ------------------------------------------------------------------------ - // Fronius Solar Power - - 'fronius.power': { - info: 'Positive Grid values mean that power is coming from the grid. Negative values are excess power that is going back into the grid, possibly selling it. ' + - 'Photovoltaics is the power generated from the solar panels. ' + - 'Accumulator is the stored power in the accumulator, if one is present.' - }, - - 'fronius.autonomy': { - commonMin: true, - commonMax: true, - valueRange: "[0, 100]", - info: 'The Autonomy is the percentage of how autonomous the installation is. An autonomy of 100 % means that the installation is producing more energy than it is needed. ' + - 'The Self consumption indicates the ratio between the current power generated and the current load. When it reaches 100 %, the Autonomy declines, since the solar panels can not produce enough energy and need support from the grid.' - }, - - 'fronius.energy.today': { - commonMin: true, - commonMax: true, - valueRange: "[0, null]" - }, - - // ------------------------------------------------------------------------ - // Stiebel Eltron Heat pump installation - - 'stiebeleltron.system.roomtemp': { - commonMin: true, - commonMax: true, - valueRange: "[0, null]" - }, - - // ------------------------------------------------------------------------ - // Port check - - 'portcheck.latency': { - info: 'The latency describes the time spent connecting to a TCP port. No data is sent or received. ' + - 'Currently, the accuracy of the latency is low and should be used as reference only.' - }, - - 'portcheck.status': { - valueRange: "[0, 1]", - info: 'The status chart verifies the availability of the service. ' + - 'Each status dimension will have a value of 1 if triggered. Dimension success is 1 only if connection could be established.' + - 'This chart is most useful for alarms and third-party apps.' - }, - - // ------------------------------------------------------------------------ - - 'chrony.system': { - info: 'In normal operation, chronyd never steps the system clock, because any jump in the timescale can have adverse consequences for certain application programs. Instead, any error in the system clock is corrected by slightly speeding up or slowing down the system clock until the error has been removed, and then returning to the system clock’s normal speed. A consequence of this is that there will be a period when the system clock (as read by other programs using the gettimeofday() system call, or by the date command in the shell) will be different from chronyd\'s estimate of the current true time (which it reports to NTP clients when it is operating in server mode). The value reported on this line is the difference due to this effect.', - colors: NETDATA.colors[3] - }, - - 'chrony.offsets': { - info: 'last offset is the estimated local offset on the last clock update. RMS offset is a long-term average of the offset value.', - height: 0.5 - }, - - 'chrony.stratum': { - info: 'The stratum indicates how many hops away from a computer with an attached reference clock we are. Such a computer is a stratum-1 computer.', - decimalDigits: 0, - height: 0.5 - }, - - 'chrony.root': { - info: 'Estimated delays against the root time server this system is synchronized with. delay is the total of the network path delays to the stratum-1 computer from which the computer is ultimately synchronised. dispersion is the total dispersion accumulated through all the computers back to the stratum-1 computer from which the computer is ultimately synchronised. Dispersion is due to system clock resolution, statistical measurement variations etc.' - }, - - 'chrony.frequency': { - info: 'The frequency is the rate by which the system\'s clock would be would be wrong if chronyd was not correcting it. It is expressed in ppm (parts per million). For example, a value of 1ppm would mean that when the system\'s clock thinks it has advanced 1 second, it has actually advanced by 1.000001 seconds relative to true time.', - colors: NETDATA.colors[0] - }, - - 'chrony.residualfreq': { - info: 'This shows the residual frequency for the currently selected reference source. It reflects any difference between what the measurements from the reference source indicate the frequency should be and the frequency currently being used.' + - 'The reason this is not always zero is that a smoothing procedure is applied to the frequency. Each time a measurement from the reference source is obtained and a new residual frequency computed, the estimated accuracy of this residual is compared with the estimated accuracy (see skew) of the existing frequency value. A weighted average is computed for the new frequency, with weights depending on these accuracies. If the measurements from the reference source follow a consistent trend, the residual will be driven to zero over time.', - height: 0.5, - colors: NETDATA.colors[3] - }, - - 'chrony.skew': { - info: 'The estimated error bound on the frequency.', - height: 0.5, - colors: NETDATA.colors[5] - }, - - 'couchdb.active_tasks': { - info: 'Active tasks running on this CouchDB cluster. Four types of tasks currently exist: indexer (view building), replication, database compaction and view compaction.' - }, - - 'couchdb.replicator_jobs': { - info: 'Detailed breakdown of any replication jobs in progress on this node. For more information, see the replicator documentation.' - }, - - 'couchdb.open_files': { - info: 'Count of all files held open by CouchDB. If this value seems pegged at 1024 or 4096, your server process is probably hitting the open file handle limit and needs to be increased.' - }, - - 'btrfs.disk': { - info: 'Physical disk usage of BTRFS. The disk space reported here is the raw physical disk space assigned to the BTRFS volume (i.e. before any RAID levels). BTRFS uses a two-stage allocator, first allocating large regions of disk space for one type of block (data, metadata, or system), and then using a regular block allocator inside those regions. unallocated is the physical disk space that is not allocated yet and is available to become data, metdata or system on demand. When unallocated is zero, all available disk space has been allocated to a specific function. Healthy volumes should ideally have at least five percent of their total space unallocated. You can keep your volume healthy by running the btrfs balance command on it regularly (check man btrfs-balance for more info).' - }, - - 'btrfs.data': { - info: 'Logical disk usage for BTRFS data. Data chunks are used to store the actual file data (file contents). The disk space reported here is the usable allocation (i.e. after any striping or replication). Healthy volumes should ideally have no more than a few GB of free space reported here persistently. Running btrfs balance can help here.' - }, - - 'btrfs.metadata': { - info: 'Logical disk usage for BTRFS metadata. Metadata chunks store most of the filesystem interal structures, as well as information like directory structure and file names. The disk space reported here is the usable allocation (i.e. after any striping or replication). Healthy volumes should ideally have no more than a few GB of free space reported here persistently. Running btrfs balance can help here.' - }, - - 'btrfs.system': { - info: 'Logical disk usage for BTRFS system. System chunks store information aobut the allocation of other chunks. The disk space reported here is the usable allocation (i.e. after any striping or replication). The values reported here should be relatively small compared to Data and Metadata, and will scale with the volume size and overall space usage.' - }, - - // ------------------------------------------------------------------------ - // RabbitMQ - - // info: the text above the charts - // heads: the representation of the chart at the top the subsection (second level menu) - // mainheads: the representation of the chart at the top of the section (first level menu) - // colors: the dimension colors of the chart (the default colors are appended) - // height: the ratio of the chart height relative to the default - - 'rabbitmq.queued_messages': { - info: 'Overall total of ready and unacknowledged queued messages. Messages that are delivered immediately are not counted here.' - }, - - 'rabbitmq.message_rates': { - info: 'Overall messaging rates including acknowledgements, delieveries, redeliveries, and publishes.' - }, - - 'rabbitmq.global_counts': { - info: 'Overall totals for channels, consumers, connections, queues and exchanges.' - }, - - 'rabbitmq.file_descriptors': { - info: 'Total number of used filed descriptors. See Open File Limits for further details.', - colors: NETDATA.colors[3] - }, - - 'rabbitmq.sockets': { - info: 'Total number of used socket descriptors. Each used socket also counts as a used file descriptor. See Open File Limits for further details.', - colors: NETDATA.colors[3] - }, - - 'rabbitmq.processes': { - info: 'Total number of processes running within the Erlang VM. This is not the same as the number of processes running on the host.', - colors: NETDATA.colors[3] - }, - - 'rabbitmq.erlang_run_queue': { - info: 'Number of Erlang processes the Erlang schedulers have queued to run.', - colors: NETDATA.colors[3] - }, - - 'rabbitmq.memory': { - info: 'Total amount of memory used by the RabbitMQ. This is a complex statistic that can be further analyzed in the management UI. See Memory for further details.', - colors: NETDATA.colors[3] - }, - - 'rabbitmq.disk_space': { - info: 'Total amount of disk space consumed by the message store(s). See Disk Space Limits for further details.', - colors: NETDATA.colors[3] - }, - - // ------------------------------------------------------------------------ - // ntpd - - 'ntpd.sys_offset': { - info: 'For hosts without any time critical services an offset of < 100 ms should be acceptable even with high network latencies. For hosts with time critical services an offset of about 0.01 ms or less can be achieved by using peers with low delays and configuring optimal poll exponent values.', - colors: NETDATA.colors[4] - }, - - 'ntpd.sys_jitter': { - info: 'The jitter statistics are exponentially-weighted RMS averages. The system jitter is defined in the NTPv4 specification; the clock jitter statistic is computed by the clock discipline module.' - }, - - 'ntpd.sys_frequency': { - info: 'The frequency offset is shown in ppm (parts per million) relative to the frequency of the system. The frequency correction needed for the clock can vary significantly between boots and also due to external influences like temperature or radiation.', - colors: NETDATA.colors[2], - height: 0.6 - }, - - 'ntpd.sys_wander': { - info: 'The wander statistics are exponentially-weighted RMS averages.', - colors: NETDATA.colors[3], - height: 0.6 - }, - - 'ntpd.sys_rootdelay': { - info: 'The rootdelay is the round-trip delay to the primary reference clock, similar to the delay shown by the ping command. A lower delay should result in a lower clock offset.', - colors: NETDATA.colors[1] - }, - - 'ntpd.sys_stratum': { - info: 'The distance in "hops" to the primary reference clock', - colors: NETDATA.colors[5], - height: 0.3 - }, - - 'ntpd.sys_tc': { - info: 'Time constants and poll intervals are expressed as exponents of 2. The default poll exponent of 6 corresponds to a poll interval of 64 s. For typical Internet paths, the optimum poll interval is about 64 s. For fast LANs with modern computers, a poll exponent of 4 (16 s) is appropriate. The poll process sends NTP packets at intervals determined by the clock discipline algorithm.', - height: 0.5 - }, - - 'ntpd.sys_precision': { - colors: NETDATA.colors[6], - height: 0.2 - }, - - 'ntpd.peer_offset': { - info: 'The offset of the peer clock relative to the system clock in milliseconds. Smaller values here weight peers more heavily for selection after the initial synchronization of the local clock. For a system providing time service to other systems, these should be as low as possible.' - }, - - 'ntpd.peer_delay': { - info: 'The round-trip time (RTT) for communication with the peer, similar to the delay shown by the ping command. Not as critical as either the offset or jitter, but still factored into the selection algorithm (because as a general rule, lower delay means more accurate time). In most cases, it should be below 100ms.' - }, - - 'ntpd.peer_dispersion': { - info: 'This is a measure of the estimated error between the peer and the local system. Lower values here are better.' - }, - - 'ntpd.peer_jitter': { - info: 'This is essentially a remote estimate of the peer\'s system_jitter value. Lower values here weight highly in favor of peer selection, and this is a good indicator of overall quality of a given time server (good servers will have values not exceeding single digit milliseconds here, with high quality stratum one servers regularly having sub-millisecond jitter).' - }, - - 'ntpd.peer_xleave': { - info: 'This variable is used in interleaved mode (used only in NTP symmetric and broadcast modes). See NTP Interleaved Modes.' - }, - - 'ntpd.peer_rootdelay': { - info: 'For a stratum 1 server, this is the access latency for the reference clock. For lower stratum servers, it is the sum of the peer_delay and peer_rootdelay for the system they are syncing off of. Similarly to peer_delay, lower values here are technically better, but have limited influence in peer selection.' - }, - - 'ntpd.peer_rootdisp': { - info: 'Is the same as peer_rootdelay, but measures accumulated peer_dispersion instead of accumulated peer_delay.' - }, - - 'ntpd.peer_hmode': { - info: 'The peer_hmode and peer_pmode variables give info about what mode the packets being sent to and received from a given peer are. Mode 1 is symmetric active (both the local system and the remote peer have each other declared as peers in /etc/ntp.conf), Mode 2 is symmetric passive (only one side has the other declared as a peer), Mode 3 is client, Mode 4 is server, and Mode 5 is broadcast (also used for multicast and manycast operation).', - height: 0.2 - }, - - 'ntpd.peer_pmode': { - height: 0.2 - }, - - 'ntpd.peer_hpoll': { - info: 'The peer_hpoll and peer_ppoll variables are log2 representations of the polling interval in seconds.', - height: 0.5 - }, - - 'ntpd.peer_ppoll': { - height: 0.5 - }, - - 'ntpd.peer_precision': { - height: 0.2 - } - - // ------------------------------------------------------------------------ -}; diff --git a/web/dashboard_info_custom_example.js b/web/dashboard_info_custom_example.js deleted file mode 100644 index f9e255d77..000000000 --- a/web/dashboard_info_custom_example.js +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Custom netdata information file - * ------------------------------- - * - * Use this file to add custom information on netdata dashboards: - * - * 1. Copy it to a new filename (so that it will not be overwritten with netdata updates) - * 2. Edit it to fit your needs - * 3. Set the following option to /etc/netdata/netdata.conf : - * - * [web] - * custom dashboard_info.js = your_filename.js - * - * Using this file you can: - * - * 1. Overwrite or add messages to menus, submenus and charts. - * Use dashboard_info.js to find out what you can define. - * - * 2. Inject javascript code into the default netdata dashboard. - * - */ - -// ---------------------------------------------------------------------------- -// MENU -// -// - title the menu title as to be rendered at the charts menu -// - icon html fragment of the icon to display -// - info html fragment for the description above all the menu charts - -customDashboard.menu = { - -}; - - -// ---------------------------------------------------------------------------- -// SUBMENU -// -// - title the submenu title as to be rendered at the charts menu -// - info html fragment for the description above all the submenu charts - -customDashboard.submenu = { - -}; - - -// ---------------------------------------------------------------------------- -// CONTEXT (the template each chart is based on) -// -// - info html fragment for the description above the chart -// - height a ratio to the default as a decimal number: 1.0 = 100% -// - colors a single color or an array of colors to use for the dimensions -// - valuerange the y-range of the chart as an array [min, max] -// - heads an array of gauge charts to render above the submenu section -// - mainheads an array of gauge charts to render at the menu section - -customDashboard.context = { - -}; diff --git a/web/favicon.ico b/web/favicon.ico deleted file mode 100644 index 821f7c402..000000000 Binary files a/web/favicon.ico and /dev/null differ diff --git a/web/goto-host-from-alarm.html b/web/goto-host-from-alarm.html deleted file mode 100644 index 40592134b..000000000 --- a/web/goto-host-from-alarm.html +++ /dev/null @@ -1,209 +0,0 @@ - - - - Goto a host you know... - - - - - - - - - - - - - - - -
-
- Please wait... - -
- -
-
-

- This page can only find netdata URLs you have already visited and are linked to your account on this netdata registry. -

-
-
- -
- - diff --git a/web/gui/Makefile.am b/web/gui/Makefile.am new file mode 100644 index 000000000..314ca3773 --- /dev/null +++ b/web/gui/Makefile.am @@ -0,0 +1,123 @@ +# +# Copyright (C) 2015 Alon Bar-Lev +# SPDX-License-Identifier: GPL-3.0-or-later +# +MAINTAINERCLEANFILES= $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +dist_web_DATA = \ + demo.html \ + demo2.html \ + demosites.html \ + demosites2.html \ + dashboard.html \ + dashboard.js \ + dashboard_info.js \ + dashboard_info_custom_example.js \ + dashboard.css \ + dashboard.slate.css \ + favicon.ico \ + goto-host-from-alarm.html \ + index.html \ + infographic.html \ + robots.txt \ + refresh-badges.js \ + registry.html \ + sitemap.xml \ + tv.html \ + version.txt \ + $(NULL) + +weblibdir=$(webdir)/lib +dist_weblib_DATA = \ + lib/bootstrap-3.3.7.min.js \ + lib/bootstrap-slider-10.0.0.min.js \ + lib/bootstrap-table-1.11.0.min.js \ + lib/bootstrap-table-export-1.11.0.min.js \ + lib/bootstrap-toggle-2.2.2.min.js \ + lib/clipboard-polyfill-be05dad.js \ + lib/c3-0.4.18.min.js \ + lib/d3-4.12.2.min.js \ + lib/d3pie-0.2.1-netdata-3.js \ + lib/dygraph-c91c859.min.js \ + lib/dygraph-smooth-plotter-c91c859.js \ + lib/fontawesome-all-5.0.1.min.js \ + lib/gauge-1.3.2.min.js \ + lib/jquery-2.2.4.min.js \ + lib/jquery.easypiechart-97b5824.min.js \ + lib/jquery.peity-3.2.0.min.js \ + lib/jquery.sparkline-2.1.2.min.js \ + lib/lz-string-1.4.4.min.js \ + lib/morris-0.5.1.min.js \ + lib/pako-1.0.6.min.js \ + lib/perfect-scrollbar-0.6.15.min.js \ + lib/raphael-2.2.4-min.js \ + lib/tableExport-1.6.0.min.js \ + $(NULL) + +webcssdir=$(webdir)/css +dist_webcss_DATA = \ + css/morris-0.5.1.css \ + css/bootstrap-3.3.7.css \ + css/bootstrap-theme-3.3.7.min.css \ + css/bootstrap-slate-flat-3.3.7.css \ + css/bootstrap-slider-10.0.0.min.css \ + css/bootstrap-toggle-2.2.2.min.css \ + css/c3-0.4.18.min.css \ + $(NULL) + +webfontsdir=$(webdir)/fonts +dist_webfonts_DATA = \ + fonts/glyphicons-halflings-regular.eot \ + fonts/glyphicons-halflings-regular.svg \ + fonts/glyphicons-halflings-regular.ttf \ + fonts/glyphicons-halflings-regular.woff \ + fonts/glyphicons-halflings-regular.woff2 \ + $(NULL) + +webimagesdir=$(webdir)/images +dist_webimages_DATA = \ + images/alert-128-orange.png \ + images/alert-128-red.png \ + images/alert-multi-size-orange.ico \ + images/alert-multi-size-red.ico \ + images/animated.gif \ + images/check-mark-2-128-green.png \ + images/check-mark-2-multi-size-green.ico \ + images/netdata.svg \ + images/post.png \ + images/seo-performance-16.png \ + images/seo-performance-24.png \ + images/seo-performance-32.png \ + images/seo-performance-48.png \ + images/seo-performance-64.png \ + images/seo-performance-72.png \ + images/seo-performance-114.png \ + images/seo-performance-128.png \ + images/seo-performance-256.png \ + images/seo-performance-512.png \ + images/seo-performance-multi-size.ico \ + images/seo-performance-multi-size.icns \ + $(NULL) + + +webwellknowndir=$(webdir)/.well-known +dist_webwellknown_DATA = \ + $(NULL) + +webdntdir=$(webdir)/.well-known/dnt +dist_webdnt_DATA = \ + .well-known/dnt/cookies \ + $(NULL) + +version.txt: + if test -d "$(top_srcdir)/.git"; then \ + git --git-dir="$(top_srcdir)/.git" log -n 1 --format=%H; \ + fi > $@.tmp + test -s $@.tmp || echo 0 > $@.tmp + mv $@.tmp $@ + +.PHONY: version.txt diff --git a/web/gui/Makefile.in b/web/gui/Makefile.in new file mode 100644 index 000000000..2f79809ef --- /dev/null +++ b/web/gui/Makefile.in @@ -0,0 +1,779 @@ +# Makefile.in generated by automake 1.14.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/gui +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(dist_noinst_DATA) $(dist_web_DATA) $(dist_webcss_DATA) \ + $(dist_webdnt_DATA) $(dist_webfonts_DATA) \ + $(dist_webimages_DATA) $(dist_weblib_DATA) \ + $(dist_webwellknown_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(webdir)" "$(DESTDIR)$(webcssdir)" \ + "$(DESTDIR)$(webdntdir)" "$(DESTDIR)$(webfontsdir)" \ + "$(DESTDIR)$(webimagesdir)" "$(DESTDIR)$(weblibdir)" \ + "$(DESTDIR)$(webwellknowndir)" +DATA = $(dist_noinst_DATA) $(dist_web_DATA) $(dist_webcss_DATA) \ + $(dist_webdnt_DATA) $(dist_webfonts_DATA) \ + $(dist_webimages_DATA) $(dist_weblib_DATA) \ + $(dist_webwellknown_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ + +# +# Copyright (C) 2015 Alon Bar-Lev +# SPDX-License-Identifier: GPL-3.0-or-later +# +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +dist_web_DATA = \ + demo.html \ + demo2.html \ + demosites.html \ + demosites2.html \ + dashboard.html \ + dashboard.js \ + dashboard_info.js \ + dashboard_info_custom_example.js \ + dashboard.css \ + dashboard.slate.css \ + favicon.ico \ + goto-host-from-alarm.html \ + index.html \ + infographic.html \ + robots.txt \ + refresh-badges.js \ + registry.html \ + sitemap.xml \ + tv.html \ + version.txt \ + $(NULL) + +weblibdir = $(webdir)/lib +dist_weblib_DATA = \ + lib/bootstrap-3.3.7.min.js \ + lib/bootstrap-slider-10.0.0.min.js \ + lib/bootstrap-table-1.11.0.min.js \ + lib/bootstrap-table-export-1.11.0.min.js \ + lib/bootstrap-toggle-2.2.2.min.js \ + lib/clipboard-polyfill-be05dad.js \ + lib/c3-0.4.18.min.js \ + lib/d3-4.12.2.min.js \ + lib/d3pie-0.2.1-netdata-3.js \ + lib/dygraph-c91c859.min.js \ + lib/dygraph-smooth-plotter-c91c859.js \ + lib/fontawesome-all-5.0.1.min.js \ + lib/gauge-1.3.2.min.js \ + lib/jquery-2.2.4.min.js \ + lib/jquery.easypiechart-97b5824.min.js \ + lib/jquery.peity-3.2.0.min.js \ + lib/jquery.sparkline-2.1.2.min.js \ + lib/lz-string-1.4.4.min.js \ + lib/morris-0.5.1.min.js \ + lib/pako-1.0.6.min.js \ + lib/perfect-scrollbar-0.6.15.min.js \ + lib/raphael-2.2.4-min.js \ + lib/tableExport-1.6.0.min.js \ + $(NULL) + +webcssdir = $(webdir)/css +dist_webcss_DATA = \ + css/morris-0.5.1.css \ + css/bootstrap-3.3.7.css \ + css/bootstrap-theme-3.3.7.min.css \ + css/bootstrap-slate-flat-3.3.7.css \ + css/bootstrap-slider-10.0.0.min.css \ + css/bootstrap-toggle-2.2.2.min.css \ + css/c3-0.4.18.min.css \ + $(NULL) + +webfontsdir = $(webdir)/fonts +dist_webfonts_DATA = \ + fonts/glyphicons-halflings-regular.eot \ + fonts/glyphicons-halflings-regular.svg \ + fonts/glyphicons-halflings-regular.ttf \ + fonts/glyphicons-halflings-regular.woff \ + fonts/glyphicons-halflings-regular.woff2 \ + $(NULL) + +webimagesdir = $(webdir)/images +dist_webimages_DATA = \ + images/alert-128-orange.png \ + images/alert-128-red.png \ + images/alert-multi-size-orange.ico \ + images/alert-multi-size-red.ico \ + images/animated.gif \ + images/check-mark-2-128-green.png \ + images/check-mark-2-multi-size-green.ico \ + images/netdata.svg \ + images/post.png \ + images/seo-performance-16.png \ + images/seo-performance-24.png \ + images/seo-performance-32.png \ + images/seo-performance-48.png \ + images/seo-performance-64.png \ + images/seo-performance-72.png \ + images/seo-performance-114.png \ + images/seo-performance-128.png \ + images/seo-performance-256.png \ + images/seo-performance-512.png \ + images/seo-performance-multi-size.ico \ + images/seo-performance-multi-size.icns \ + $(NULL) + +webwellknowndir = $(webdir)/.well-known +dist_webwellknown_DATA = \ + $(NULL) + +webdntdir = $(webdir)/.well-known/dnt +dist_webdnt_DATA = \ + .well-known/dnt/cookies \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/gui/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/gui/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-dist_webDATA: $(dist_web_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_web_DATA)'; test -n "$(webdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(webdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(webdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(webdir)" || exit $$?; \ + done + +uninstall-dist_webDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_web_DATA)'; test -n "$(webdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(webdir)'; $(am__uninstall_files_from_dir) +install-dist_webcssDATA: $(dist_webcss_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_webcss_DATA)'; test -n "$(webcssdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(webcssdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(webcssdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webcssdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(webcssdir)" || exit $$?; \ + done + +uninstall-dist_webcssDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_webcss_DATA)'; test -n "$(webcssdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(webcssdir)'; $(am__uninstall_files_from_dir) +install-dist_webdntDATA: $(dist_webdnt_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_webdnt_DATA)'; test -n "$(webdntdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(webdntdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(webdntdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webdntdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(webdntdir)" || exit $$?; \ + done + +uninstall-dist_webdntDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_webdnt_DATA)'; test -n "$(webdntdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(webdntdir)'; $(am__uninstall_files_from_dir) +install-dist_webfontsDATA: $(dist_webfonts_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_webfonts_DATA)'; test -n "$(webfontsdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(webfontsdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(webfontsdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webfontsdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(webfontsdir)" || exit $$?; \ + done + +uninstall-dist_webfontsDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_webfonts_DATA)'; test -n "$(webfontsdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(webfontsdir)'; $(am__uninstall_files_from_dir) +install-dist_webimagesDATA: $(dist_webimages_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_webimages_DATA)'; test -n "$(webimagesdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(webimagesdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(webimagesdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webimagesdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(webimagesdir)" || exit $$?; \ + done + +uninstall-dist_webimagesDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_webimages_DATA)'; test -n "$(webimagesdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(webimagesdir)'; $(am__uninstall_files_from_dir) +install-dist_weblibDATA: $(dist_weblib_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_weblib_DATA)'; test -n "$(weblibdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(weblibdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(weblibdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(weblibdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(weblibdir)" || exit $$?; \ + done + +uninstall-dist_weblibDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_weblib_DATA)'; test -n "$(weblibdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(weblibdir)'; $(am__uninstall_files_from_dir) +install-dist_webwellknownDATA: $(dist_webwellknown_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_webwellknown_DATA)'; test -n "$(webwellknowndir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(webwellknowndir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(webwellknowndir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webwellknowndir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(webwellknowndir)" || exit $$?; \ + done + +uninstall-dist_webwellknownDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_webwellknown_DATA)'; test -n "$(webwellknowndir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(webwellknowndir)'; $(am__uninstall_files_from_dir) +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: + for dir in "$(DESTDIR)$(webdir)" "$(DESTDIR)$(webcssdir)" "$(DESTDIR)$(webdntdir)" "$(DESTDIR)$(webfontsdir)" "$(DESTDIR)$(webimagesdir)" "$(DESTDIR)$(weblibdir)" "$(DESTDIR)$(webwellknowndir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-dist_webDATA install-dist_webcssDATA \ + install-dist_webdntDATA install-dist_webfontsDATA \ + install-dist_webimagesDATA install-dist_weblibDATA \ + install-dist_webwellknownDATA + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-dist_webDATA uninstall-dist_webcssDATA \ + uninstall-dist_webdntDATA uninstall-dist_webfontsDATA \ + uninstall-dist_webimagesDATA uninstall-dist_weblibDATA \ + uninstall-dist_webwellknownDATA + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dist_webDATA install-dist_webcssDATA \ + install-dist_webdntDATA install-dist_webfontsDATA \ + install-dist_webimagesDATA install-dist_weblibDATA \ + install-dist_webwellknownDATA install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + installcheck installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am \ + uninstall-dist_webDATA uninstall-dist_webcssDATA \ + uninstall-dist_webdntDATA uninstall-dist_webfontsDATA \ + uninstall-dist_webimagesDATA uninstall-dist_weblibDATA \ + uninstall-dist_webwellknownDATA + + +version.txt: + if test -d "$(top_srcdir)/.git"; then \ + git --git-dir="$(top_srcdir)/.git" log -n 1 --format=%H; \ + fi > $@.tmp + test -s $@.tmp || echo 0 > $@.tmp + mv $@.tmp $@ + +.PHONY: version.txt + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/gui/README.md b/web/gui/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/web/gui/css/bootstrap-3.3.7.css b/web/gui/css/bootstrap-3.3.7.css new file mode 100644 index 000000000..8c4db1f33 --- /dev/null +++ b/web/gui/css/bootstrap-3.3.7.css @@ -0,0 +1,6758 @@ +/*! + * Bootstrap v3.3.7 (http://getbootstrap.com) + * Copyright 2011-2016 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + * SPDX-License-Identifier: MIT + */ +/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */ +html { + font-family: sans-serif; + -webkit-text-size-adjust: 100%; + -ms-text-size-adjust: 100%; +} +body { + margin: 0; +} +article, +aside, +details, +figcaption, +figure, +footer, +header, +hgroup, +main, +menu, +nav, +section, +summary { + display: block; +} +audio, +canvas, +progress, +video { + display: inline-block; + vertical-align: baseline; +} +audio:not([controls]) { + display: none; + height: 0; +} +[hidden], +template { + display: none; +} +a { + background-color: transparent; +} +a:active, +a:hover { + outline: 0; +} +abbr[title] { + border-bottom: 1px dotted; +} +b, +strong { + font-weight: bold; +} +dfn { + font-style: italic; +} +h1 { + margin: .67em 0; + font-size: 2em; +} +mark { + color: #000; + background: #ff0; +} +small { + font-size: 80%; +} +sub, +sup { + position: relative; + font-size: 75%; + line-height: 0; + vertical-align: baseline; +} +sup { + top: -.5em; +} +sub { + bottom: -.25em; +} +img { + border: 0; +} +svg:not(:root) { + overflow: hidden; +} +figure { + margin: 1em 40px; +} +hr { + height: 0; + -webkit-box-sizing: content-box; + -moz-box-sizing: content-box; + box-sizing: content-box; +} +pre { + overflow: auto; +} +code, +kbd, +pre, +samp { + font-family: monospace, monospace; + font-size: 1em; +} +button, +input, +optgroup, +select, +textarea { + margin: 0; + font: inherit; + color: inherit; +} +button { + overflow: visible; +} +button, +select { + text-transform: none; +} +button, +html input[type="button"], +input[type="reset"], +input[type="submit"] { + -webkit-appearance: button; + cursor: pointer; +} +button[disabled], +html input[disabled] { + cursor: default; +} +button::-moz-focus-inner, +input::-moz-focus-inner { + padding: 0; + border: 0; +} +input { + line-height: normal; +} +input[type="checkbox"], +input[type="radio"] { + -webkit-box-sizing: border-box; + -moz-box-sizing: border-box; + box-sizing: border-box; + padding: 0; +} +input[type="number"]::-webkit-inner-spin-button, +input[type="number"]::-webkit-outer-spin-button { + height: auto; +} +input[type="search"] { + -webkit-box-sizing: content-box; + -moz-box-sizing: content-box; + box-sizing: content-box; + -webkit-appearance: textfield; +} +input[type="search"]::-webkit-search-cancel-button, +input[type="search"]::-webkit-search-decoration { + -webkit-appearance: none; +} +fieldset { + padding: .35em .625em .75em; + margin: 0 2px; + border: 1px solid #c0c0c0; +} +legend { + padding: 0; + border: 0; +} +textarea { + overflow: auto; +} +optgroup { + font-weight: bold; +} +table { + border-spacing: 0; + border-collapse: collapse; +} +td, +th { + padding: 0; +} +/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */ +@media print { + *, + *:before, + *:after { + color: #000 !important; + text-shadow: none !important; + background: transparent !important; + -webkit-box-shadow: none !important; + box-shadow: none !important; + } + a, + a:visited { + text-decoration: underline; + } + a[href]:after { + content: " (" attr(href) ")"; + } + abbr[title]:after { + content: " (" attr(title) ")"; + } + a[href^="#"]:after, + a[href^="javascript:"]:after { + content: ""; + } + pre, + blockquote { + border: 1px solid #999; + + page-break-inside: avoid; + } + thead { + display: table-header-group; + } + tr, + img { + page-break-inside: avoid; + } + img { + max-width: 100% !important; + } + p, + h2, + h3 { + orphans: 3; + widows: 3; + } + h2, + h3 { + page-break-after: avoid; + } + .navbar { + display: none; + } + .btn > .caret, + .dropup > .btn > .caret { + border-top-color: #000 !important; + } + .label { + border: 1px solid #000; + } + .table { + border-collapse: collapse !important; + } + .table td, + .table th { + background-color: #fff !important; + } + .table-bordered th, + .table-bordered td { + border: 1px solid #ddd !important; + } +} +@font-face { + font-family: 'Glyphicons Halflings'; + + src: url('../fonts/glyphicons-halflings-regular.eot'); + src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff2') format('woff2'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg'); +} +.glyphicon { + position: relative; + top: 1px; + display: inline-block; + font-family: 'Glyphicons Halflings'; + font-style: normal; + font-weight: normal; + line-height: 1; + + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; +} +.glyphicon-asterisk:before { + content: "\002a"; +} +.glyphicon-plus:before { + content: "\002b"; +} +.glyphicon-euro:before, +.glyphicon-eur:before { + content: "\20ac"; +} +.glyphicon-minus:before { + content: "\2212"; +} +.glyphicon-cloud:before { + content: "\2601"; +} +.glyphicon-envelope:before { + content: "\2709"; +} +.glyphicon-pencil:before { + content: "\270f"; +} +.glyphicon-glass:before { + content: "\e001"; +} +.glyphicon-music:before { + content: "\e002"; +} +.glyphicon-search:before { + content: "\e003"; +} +.glyphicon-heart:before { + content: "\e005"; +} +.glyphicon-star:before { + content: "\e006"; +} +.glyphicon-star-empty:before { + content: "\e007"; +} +.glyphicon-user:before { + content: "\e008"; +} +.glyphicon-film:before { + content: "\e009"; +} +.glyphicon-th-large:before { + content: "\e010"; +} +.glyphicon-th:before { + content: "\e011"; +} +.glyphicon-th-list:before { + content: "\e012"; +} +.glyphicon-ok:before { + content: "\e013"; +} +.glyphicon-remove:before { + content: "\e014"; +} +.glyphicon-zoom-in:before { + content: "\e015"; +} +.glyphicon-zoom-out:before { + content: "\e016"; +} +.glyphicon-off:before { + content: "\e017"; +} +.glyphicon-signal:before { + content: "\e018"; +} +.glyphicon-cog:before { + content: "\e019"; +} +.glyphicon-trash:before { + content: "\e020"; +} +.glyphicon-home:before { + content: "\e021"; +} +.glyphicon-file:before { + content: "\e022"; +} +.glyphicon-time:before { + content: "\e023"; +} +.glyphicon-road:before { + content: "\e024"; +} +.glyphicon-download-alt:before { + content: "\e025"; +} +.glyphicon-download:before { + content: "\e026"; +} +.glyphicon-upload:before { + content: "\e027"; +} +.glyphicon-inbox:before { + content: "\e028"; +} +.glyphicon-play-circle:before { + content: "\e029"; +} +.glyphicon-repeat:before { + content: "\e030"; +} +.glyphicon-refresh:before { + content: "\e031"; +} +.glyphicon-list-alt:before { + content: "\e032"; +} +.glyphicon-lock:before { + content: "\e033"; +} +.glyphicon-flag:before { + content: "\e034"; +} +.glyphicon-headphones:before { + content: "\e035"; +} +.glyphicon-volume-off:before { + content: "\e036"; +} +.glyphicon-volume-down:before { + content: "\e037"; +} +.glyphicon-volume-up:before { + content: "\e038"; +} +.glyphicon-qrcode:before { + content: "\e039"; +} +.glyphicon-barcode:before { + content: "\e040"; +} +.glyphicon-tag:before { + content: "\e041"; +} +.glyphicon-tags:before { + content: "\e042"; +} +.glyphicon-book:before { + content: "\e043"; +} +.glyphicon-bookmark:before { + content: "\e044"; +} +.glyphicon-print:before { + content: "\e045"; +} +.glyphicon-camera:before { + content: "\e046"; +} +.glyphicon-font:before { + content: "\e047"; +} +.glyphicon-bold:before { + content: "\e048"; +} +.glyphicon-italic:before { + content: "\e049"; +} +.glyphicon-text-height:before { + content: "\e050"; +} +.glyphicon-text-width:before { + content: "\e051"; +} +.glyphicon-align-left:before { + content: "\e052"; +} +.glyphicon-align-center:before { + content: "\e053"; +} +.glyphicon-align-right:before { + content: "\e054"; +} +.glyphicon-align-justify:before { + content: "\e055"; +} +.glyphicon-list:before { + content: "\e056"; +} +.glyphicon-indent-left:before { + content: "\e057"; +} +.glyphicon-indent-right:before { + content: "\e058"; +} +.glyphicon-facetime-video:before { + content: "\e059"; +} +.glyphicon-picture:before { + content: "\e060"; +} +.glyphicon-map-marker:before { + content: "\e062"; +} +.glyphicon-adjust:before { + content: "\e063"; +} +.glyphicon-tint:before { + content: "\e064"; +} +.glyphicon-edit:before { + content: "\e065"; +} +.glyphicon-share:before { + content: "\e066"; +} +.glyphicon-check:before { + content: "\e067"; +} +.glyphicon-move:before { + content: "\e068"; +} +.glyphicon-step-backward:before { + content: "\e069"; +} +.glyphicon-fast-backward:before { + content: "\e070"; +} +.glyphicon-backward:before { + content: "\e071"; +} +.glyphicon-play:before { + content: "\e072"; +} +.glyphicon-pause:before { + content: "\e073"; +} +.glyphicon-stop:before { + content: "\e074"; +} +.glyphicon-forward:before { + content: "\e075"; +} +.glyphicon-fast-forward:before { + content: "\e076"; +} +.glyphicon-step-forward:before { + content: "\e077"; +} +.glyphicon-eject:before { + content: "\e078"; +} +.glyphicon-chevron-left:before { + content: "\e079"; +} +.glyphicon-chevron-right:before { + content: "\e080"; +} +.glyphicon-plus-sign:before { + content: "\e081"; +} +.glyphicon-minus-sign:before { + content: "\e082"; +} +.glyphicon-remove-sign:before { + content: "\e083"; +} +.glyphicon-ok-sign:before { + content: "\e084"; +} +.glyphicon-question-sign:before { + content: "\e085"; +} +.glyphicon-info-sign:before { + content: "\e086"; +} +.glyphicon-screenshot:before { + content: "\e087"; +} +.glyphicon-remove-circle:before { + content: "\e088"; +} +.glyphicon-ok-circle:before { + content: "\e089"; +} +.glyphicon-ban-circle:before { + content: "\e090"; +} +.glyphicon-arrow-left:before { + content: "\e091"; +} +.glyphicon-arrow-right:before { + content: "\e092"; +} +.glyphicon-arrow-up:before { + content: "\e093"; +} +.glyphicon-arrow-down:before { + content: "\e094"; +} +.glyphicon-share-alt:before { + content: "\e095"; +} +.glyphicon-resize-full:before { + content: "\e096"; +} +.glyphicon-resize-small:before { + content: "\e097"; +} +.glyphicon-exclamation-sign:before { + content: "\e101"; +} +.glyphicon-gift:before { + content: "\e102"; +} +.glyphicon-leaf:before { + content: "\e103"; +} +.glyphicon-fire:before { + content: "\e104"; +} +.glyphicon-eye-open:before { + content: "\e105"; +} +.glyphicon-eye-close:before { + content: "\e106"; +} +.glyphicon-warning-sign:before { + content: "\e107"; +} +.glyphicon-plane:before { + content: "\e108"; +} +.glyphicon-calendar:before { + content: "\e109"; +} +.glyphicon-random:before { + content: "\e110"; +} +.glyphicon-comment:before { + content: "\e111"; +} +.glyphicon-magnet:before { + content: "\e112"; +} +.glyphicon-chevron-up:before { + content: "\e113"; +} +.glyphicon-chevron-down:before { + content: "\e114"; +} +.glyphicon-retweet:before { + content: "\e115"; +} +.glyphicon-shopping-cart:before { + content: "\e116"; +} +.glyphicon-folder-close:before { + content: "\e117"; +} +.glyphicon-folder-open:before { + content: "\e118"; +} +.glyphicon-resize-vertical:before { + content: "\e119"; +} +.glyphicon-resize-horizontal:before { + content: "\e120"; +} +.glyphicon-hdd:before { + content: "\e121"; +} +.glyphicon-bullhorn:before { + content: "\e122"; +} +.glyphicon-bell:before { + content: "\e123"; +} +.glyphicon-certificate:before { + content: "\e124"; +} +.glyphicon-thumbs-up:before { + content: "\e125"; +} +.glyphicon-thumbs-down:before { + content: "\e126"; +} +.glyphicon-hand-right:before { + content: "\e127"; +} +.glyphicon-hand-left:before { + content: "\e128"; +} +.glyphicon-hand-up:before { + content: "\e129"; +} +.glyphicon-hand-down:before { + content: "\e130"; +} +.glyphicon-circle-arrow-right:before { + content: "\e131"; +} +.glyphicon-circle-arrow-left:before { + content: "\e132"; +} +.glyphicon-circle-arrow-up:before { + content: "\e133"; +} +.glyphicon-circle-arrow-down:before { + content: "\e134"; +} +.glyphicon-globe:before { + content: "\e135"; +} +.glyphicon-wrench:before { + content: "\e136"; +} +.glyphicon-tasks:before { + content: "\e137"; +} +.glyphicon-filter:before { + content: "\e138"; +} +.glyphicon-briefcase:before { + content: "\e139"; +} +.glyphicon-fullscreen:before { + content: "\e140"; +} +.glyphicon-dashboard:before { + content: "\e141"; +} +.glyphicon-paperclip:before { + content: "\e142"; +} +.glyphicon-heart-empty:before { + content: "\e143"; +} +.glyphicon-link:before { + content: "\e144"; +} +.glyphicon-phone:before { + content: "\e145"; +} +.glyphicon-pushpin:before { + content: "\e146"; +} +.glyphicon-usd:before { + content: "\e148"; +} +.glyphicon-gbp:before { + content: "\e149"; +} +.glyphicon-sort:before { + content: "\e150"; +} +.glyphicon-sort-by-alphabet:before { + content: "\e151"; +} +.glyphicon-sort-by-alphabet-alt:before { + content: "\e152"; +} +.glyphicon-sort-by-order:before { + content: "\e153"; +} +.glyphicon-sort-by-order-alt:before { + content: "\e154"; +} +.glyphicon-sort-by-attributes:before { + content: "\e155"; +} +.glyphicon-sort-by-attributes-alt:before { + content: "\e156"; +} +.glyphicon-unchecked:before { + content: "\e157"; +} +.glyphicon-expand:before { + content: "\e158"; +} +.glyphicon-collapse-down:before { + content: "\e159"; +} +.glyphicon-collapse-up:before { + content: "\e160"; +} +.glyphicon-log-in:before { + content: "\e161"; +} +.glyphicon-flash:before { + content: "\e162"; +} +.glyphicon-log-out:before { + content: "\e163"; +} +.glyphicon-new-window:before { + content: "\e164"; +} +.glyphicon-record:before { + content: "\e165"; +} +.glyphicon-save:before { + content: "\e166"; +} +.glyphicon-open:before { + content: "\e167"; +} +.glyphicon-saved:before { + content: "\e168"; +} +.glyphicon-import:before { + content: "\e169"; +} +.glyphicon-export:before { + content: "\e170"; +} +.glyphicon-send:before { + content: "\e171"; +} +.glyphicon-floppy-disk:before { + content: "\e172"; +} +.glyphicon-floppy-saved:before { + content: "\e173"; +} +.glyphicon-floppy-remove:before { + content: "\e174"; +} +.glyphicon-floppy-save:before { + content: "\e175"; +} +.glyphicon-floppy-open:before { + content: "\e176"; +} +.glyphicon-credit-card:before { + content: "\e177"; +} +.glyphicon-transfer:before { + content: "\e178"; +} +.glyphicon-cutlery:before { + content: "\e179"; +} +.glyphicon-header:before { + content: "\e180"; +} +.glyphicon-compressed:before { + content: "\e181"; +} +.glyphicon-earphone:before { + content: "\e182"; +} +.glyphicon-phone-alt:before { + content: "\e183"; +} +.glyphicon-tower:before { + content: "\e184"; +} +.glyphicon-stats:before { + content: "\e185"; +} +.glyphicon-sd-video:before { + content: "\e186"; +} +.glyphicon-hd-video:before { + content: "\e187"; +} +.glyphicon-subtitles:before { + content: "\e188"; +} +.glyphicon-sound-stereo:before { + content: "\e189"; +} +.glyphicon-sound-dolby:before { + content: "\e190"; +} +.glyphicon-sound-5-1:before { + content: "\e191"; +} +.glyphicon-sound-6-1:before { + content: "\e192"; +} +.glyphicon-sound-7-1:before { + content: "\e193"; +} +.glyphicon-copyright-mark:before { + content: "\e194"; +} +.glyphicon-registration-mark:before { + content: "\e195"; +} +.glyphicon-cloud-download:before { + content: "\e197"; +} +.glyphicon-cloud-upload:before { + content: "\e198"; +} +.glyphicon-tree-conifer:before { + content: "\e199"; +} +.glyphicon-tree-deciduous:before { + content: "\e200"; +} +.glyphicon-cd:before { + content: "\e201"; +} +.glyphicon-save-file:before { + content: "\e202"; +} +.glyphicon-open-file:before { + content: "\e203"; +} +.glyphicon-level-up:before { + content: "\e204"; +} +.glyphicon-copy:before { + content: "\e205"; +} +.glyphicon-paste:before { + content: "\e206"; +} +.glyphicon-alert:before { + content: "\e209"; +} +.glyphicon-equalizer:before { + content: "\e210"; +} +.glyphicon-king:before { + content: "\e211"; +} +.glyphicon-queen:before { + content: "\e212"; +} +.glyphicon-pawn:before { + content: "\e213"; +} +.glyphicon-bishop:before { + content: "\e214"; +} +.glyphicon-knight:before { + content: "\e215"; +} +.glyphicon-baby-formula:before { + content: "\e216"; +} +.glyphicon-tent:before { + content: "\26fa"; +} +.glyphicon-blackboard:before { + content: "\e218"; +} +.glyphicon-bed:before { + content: "\e219"; +} +.glyphicon-apple:before { + content: "\f8ff"; +} +.glyphicon-erase:before { + content: "\e221"; +} +.glyphicon-hourglass:before { + content: "\231b"; +} +.glyphicon-lamp:before { + content: "\e223"; +} +.glyphicon-duplicate:before { + content: "\e224"; +} +.glyphicon-piggy-bank:before { + content: "\e225"; +} +.glyphicon-scissors:before { + content: "\e226"; +} +.glyphicon-bitcoin:before { + content: "\e227"; +} +.glyphicon-btc:before { + content: "\e227"; +} +.glyphicon-xbt:before { + content: "\e227"; +} +.glyphicon-yen:before { + content: "\00a5"; +} +.glyphicon-jpy:before { + content: "\00a5"; +} +.glyphicon-ruble:before { + content: "\20bd"; +} +.glyphicon-rub:before { + content: "\20bd"; +} +.glyphicon-scale:before { + content: "\e230"; +} +.glyphicon-ice-lolly:before { + content: "\e231"; +} +.glyphicon-ice-lolly-tasted:before { + content: "\e232"; +} +.glyphicon-education:before { + content: "\e233"; +} +.glyphicon-option-horizontal:before { + content: "\e234"; +} +.glyphicon-option-vertical:before { + content: "\e235"; +} +.glyphicon-menu-hamburger:before { + content: "\e236"; +} +.glyphicon-modal-window:before { + content: "\e237"; +} +.glyphicon-oil:before { + content: "\e238"; +} +.glyphicon-grain:before { + content: "\e239"; +} +.glyphicon-sunglasses:before { + content: "\e240"; +} +.glyphicon-text-size:before { + content: "\e241"; +} +.glyphicon-text-color:before { + content: "\e242"; +} +.glyphicon-text-background:before { + content: "\e243"; +} +.glyphicon-object-align-top:before { + content: "\e244"; +} +.glyphicon-object-align-bottom:before { + content: "\e245"; +} +.glyphicon-object-align-horizontal:before { + content: "\e246"; +} +.glyphicon-object-align-left:before { + content: "\e247"; +} +.glyphicon-object-align-vertical:before { + content: "\e248"; +} +.glyphicon-object-align-right:before { + content: "\e249"; +} +.glyphicon-triangle-right:before { + content: "\e250"; +} +.glyphicon-triangle-left:before { + content: "\e251"; +} +.glyphicon-triangle-bottom:before { + content: "\e252"; +} +.glyphicon-triangle-top:before { + content: "\e253"; +} +.glyphicon-console:before { + content: "\e254"; +} +.glyphicon-superscript:before { + content: "\e255"; +} +.glyphicon-subscript:before { + content: "\e256"; +} +.glyphicon-menu-left:before { + content: "\e257"; +} +.glyphicon-menu-right:before { + content: "\e258"; +} +.glyphicon-menu-down:before { + content: "\e259"; +} +.glyphicon-menu-up:before { + content: "\e260"; +} +* { + -webkit-box-sizing: border-box; + -moz-box-sizing: border-box; + box-sizing: border-box; +} +*:before, +*:after { + -webkit-box-sizing: border-box; + -moz-box-sizing: border-box; + box-sizing: border-box; +} +html { + font-size: 10px; + + -webkit-tap-highlight-color: rgba(0, 0, 0, 0); +} +body { + font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; + font-size: 14px; + line-height: 1.42857143; + color: #333; + background-color: #fff; +} +input, +button, +select, +textarea { + font-family: inherit; + font-size: inherit; + line-height: inherit; +} +a { + color: #337ab7; + text-decoration: none; +} +a:hover, +a:focus { + color: #23527c; + text-decoration: underline; +} +a:focus { + outline: 5px auto -webkit-focus-ring-color; + outline-offset: -2px; +} +figure { + margin: 0; +} +img { + vertical-align: middle; +} +.img-responsive, +.thumbnail > img, +.thumbnail a > img, +.carousel-inner > .item > img, +.carousel-inner > .item > a > img { + display: block; + max-width: 100%; + height: auto; +} +.img-rounded { + border-radius: 6px; +} +.img-thumbnail { + display: inline-block; + max-width: 100%; + height: auto; + padding: 4px; + line-height: 1.42857143; + background-color: #fff; + border: 1px solid #ddd; + border-radius: 4px; + -webkit-transition: all .2s ease-in-out; + -o-transition: all .2s ease-in-out; + transition: all .2s ease-in-out; +} +.img-circle { + border-radius: 50%; +} +hr { + margin-top: 20px; + margin-bottom: 20px; + border: 0; + border-top: 1px solid #eee; +} +.sr-only { + position: absolute; + width: 1px; + height: 1px; + padding: 0; + margin: -1px; + overflow: hidden; + clip: rect(0, 0, 0, 0); + border: 0; +} +.sr-only-focusable:active, +.sr-only-focusable:focus { + position: static; + width: auto; + height: auto; + margin: 0; + overflow: visible; + clip: auto; +} +[role="button"] { + cursor: pointer; +} +h1, +h2, +h3, +h4, +h5, +h6, +.h1, +.h2, +.h3, +.h4, +.h5, +.h6 { + font-family: inherit; + font-weight: 500; + line-height: 1.1; + color: inherit; +} +h1 small, +h2 small, +h3 small, +h4 small, +h5 small, +h6 small, +.h1 small, +.h2 small, +.h3 small, +.h4 small, +.h5 small, +.h6 small, +h1 .small, +h2 .small, +h3 .small, +h4 .small, +h5 .small, +h6 .small, +.h1 .small, +.h2 .small, +.h3 .small, +.h4 .small, +.h5 .small, +.h6 .small { + font-weight: normal; + line-height: 1; + color: #777; +} +h1, +.h1, +h2, +.h2, +h3, +.h3 { + margin-top: 20px; + margin-bottom: 10px; +} +h1 small, +.h1 small, +h2 small, +.h2 small, +h3 small, +.h3 small, +h1 .small, +.h1 .small, +h2 .small, +.h2 .small, +h3 .small, +.h3 .small { + font-size: 65%; +} +h4, +.h4, +h5, +.h5, +h6, +.h6 { + margin-top: 10px; + margin-bottom: 10px; +} +h4 small, +.h4 small, +h5 small, +.h5 small, +h6 small, +.h6 small, +h4 .small, +.h4 .small, +h5 .small, +.h5 .small, +h6 .small, +.h6 .small { + font-size: 75%; +} +h1, +.h1 { + font-size: 36px; +} +h2, +.h2 { + font-size: 30px; +} +h3, +.h3 { + font-size: 24px; +} +h4, +.h4 { + font-size: 18px; +} +h5, +.h5 { + font-size: 14px; +} +h6, +.h6 { + font-size: 12px; +} +p { + margin: 0 0 10px; +} +.lead { + margin-bottom: 20px; + font-size: 16px; + font-weight: 300; + line-height: 1.4; +} +@media (min-width: 768px) { + .lead { + font-size: 21px; + } +} +small, +.small { + font-size: 85%; +} +mark, +.mark { + padding: .2em; + background-color: #fcf8e3; +} +.text-left { + text-align: left; +} +.text-right { + text-align: right; +} +.text-center { + text-align: center; +} +.text-justify { + text-align: justify; +} +.text-nowrap { + white-space: nowrap; +} +.text-lowercase { + text-transform: lowercase; +} +.text-uppercase { + text-transform: uppercase; +} +.text-capitalize { + text-transform: capitalize; +} +.text-muted { + color: #777; +} +.text-primary { + color: #337ab7; +} +a.text-primary:hover, +a.text-primary:focus { + color: #286090; +} +.text-success { + color: #3c763d; +} +a.text-success:hover, +a.text-success:focus { + color: #2b542c; +} +.text-info { + color: #31708f; +} +a.text-info:hover, +a.text-info:focus { + color: #245269; +} +.text-warning { + color: #8a6d3b; +} +a.text-warning:hover, +a.text-warning:focus { + color: #66512c; +} +.text-danger { + color: #a94442; +} +a.text-danger:hover, +a.text-danger:focus { + color: #843534; +} +.bg-primary { + color: #fff; + background-color: #337ab7; +} +a.bg-primary:hover, +a.bg-primary:focus { + background-color: #286090; +} +.bg-success { + background-color: #dff0d8; +} +a.bg-success:hover, +a.bg-success:focus { + background-color: #c1e2b3; +} +.bg-info { + background-color: #d9edf7; +} +a.bg-info:hover, +a.bg-info:focus { + background-color: #afd9ee; +} +.bg-warning { + background-color: #fcf8e3; +} +a.bg-warning:hover, +a.bg-warning:focus { + background-color: #f7ecb5; +} +.bg-danger { + background-color: #f2dede; +} +a.bg-danger:hover, +a.bg-danger:focus { + background-color: #e4b9b9; +} +.page-header { + padding-bottom: 9px; + margin: 40px 0 20px; + border-bottom: 1px solid #eee; +} +ul, +ol { + margin-top: 0; + margin-bottom: 10px; +} +ul ul, +ol ul, +ul ol, +ol ol { + margin-bottom: 0; +} +.list-unstyled { + padding-left: 0; + list-style: none; +} +.list-inline { + padding-left: 0; + margin-left: -5px; + list-style: none; +} +.list-inline > li { + display: inline-block; + padding-right: 5px; + padding-left: 5px; +} +dl { + margin-top: 0; + margin-bottom: 20px; +} +dt, +dd { + line-height: 1.42857143; +} +dt { + font-weight: bold; +} +dd { + margin-left: 0; +} +@media (min-width: 768px) { + .dl-horizontal dt { + float: left; + width: 160px; + overflow: hidden; + clear: left; + text-align: right; + text-overflow: ellipsis; + white-space: nowrap; + } + .dl-horizontal dd { + margin-left: 180px; + } +} +abbr[title], +abbr[data-original-title] { + cursor: help; + border-bottom: 1px dotted #777; +} +.initialism { + font-size: 90%; + text-transform: uppercase; +} +blockquote { + padding: 10px 20px; + margin: 0 0 20px; + font-size: 17.5px; + border-left: 5px solid #eee; +} +blockquote p:last-child, +blockquote ul:last-child, +blockquote ol:last-child { + margin-bottom: 0; +} +blockquote footer, +blockquote small, +blockquote .small { + display: block; + font-size: 80%; + line-height: 1.42857143; + color: #777; +} +blockquote footer:before, +blockquote small:before, +blockquote .small:before { + content: '\2014 \00A0'; +} +.blockquote-reverse, +blockquote.pull-right { + padding-right: 15px; + padding-left: 0; + text-align: right; + border-right: 5px solid #eee; + border-left: 0; +} +.blockquote-reverse footer:before, +blockquote.pull-right footer:before, +.blockquote-reverse small:before, +blockquote.pull-right small:before, +.blockquote-reverse .small:before, +blockquote.pull-right .small:before { + content: ''; +} +.blockquote-reverse footer:after, +blockquote.pull-right footer:after, +.blockquote-reverse small:after, +blockquote.pull-right small:after, +.blockquote-reverse .small:after, +blockquote.pull-right .small:after { + content: '\00A0 \2014'; +} +address { + margin-bottom: 20px; + font-style: normal; + line-height: 1.42857143; +} +code, +kbd, +pre, +samp { + font-family: Menlo, Monaco, Consolas, "Courier New", monospace; +} +code { + padding: 2px 4px; + font-size: 90%; + color: #c7254e; + background-color: #f9f2f4; + border-radius: 4px; +} +kbd { + padding: 2px 4px; + font-size: 90%; + color: #fff; + background-color: #333; + border-radius: 3px; + -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25); + box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25); +} +kbd kbd { + padding: 0; + font-size: 100%; + font-weight: bold; + -webkit-box-shadow: none; + box-shadow: none; +} +pre { + display: block; + padding: 9.5px; + margin: 0 0 10px; + font-size: 13px; + line-height: 1.42857143; + color: #333; + word-break: break-all; + word-wrap: break-word; + background-color: #f5f5f5; + border: 1px solid #ccc; + border-radius: 4px; +} +pre code { + padding: 0; + font-size: inherit; + color: inherit; + white-space: pre-wrap; + background-color: transparent; + border-radius: 0; +} +.pre-scrollable { + max-height: 340px; + overflow-y: scroll; +} +.container { + padding-right: 15px; + padding-left: 15px; + margin-right: auto; + margin-left: auto; +} +@media (min-width: 768px) { + .container { + width: 750px; + } +} +@media (min-width: 992px) { + .container { + width: 970px; + } +} +@media (min-width: 1200px) { + .container { + width: 1170px; + } +} +.container-fluid { + padding-right: 15px; + padding-left: 15px; + margin-right: auto; + margin-left: auto; +} +.row { + margin-right: -15px; + margin-left: -15px; +} +.col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-lg-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11, .col-xs-12, .col-sm-12, .col-md-12, .col-lg-12 { + position: relative; + min-height: 1px; + padding-right: 15px; + padding-left: 15px; +} +.col-xs-1, .col-xs-2, .col-xs-3, .col-xs-4, .col-xs-5, .col-xs-6, .col-xs-7, .col-xs-8, .col-xs-9, .col-xs-10, .col-xs-11, .col-xs-12 { + float: left; +} +.col-xs-12 { + width: 100%; +} +.col-xs-11 { + width: 91.66666667%; +} +.col-xs-10 { + width: 83.33333333%; +} +.col-xs-9 { + width: 75%; +} +.col-xs-8 { + width: 66.66666667%; +} +.col-xs-7 { + width: 58.33333333%; +} +.col-xs-6 { + width: 50%; +} +.col-xs-5 { + width: 41.66666667%; +} +.col-xs-4 { + width: 33.33333333%; +} +.col-xs-3 { + width: 25%; +} +.col-xs-2 { + width: 16.66666667%; +} +.col-xs-1 { + width: 8.33333333%; +} +.col-xs-pull-12 { + right: 100%; +} +.col-xs-pull-11 { + right: 91.66666667%; +} +.col-xs-pull-10 { + right: 83.33333333%; +} +.col-xs-pull-9 { + right: 75%; +} +.col-xs-pull-8 { + right: 66.66666667%; +} +.col-xs-pull-7 { + right: 58.33333333%; +} +.col-xs-pull-6 { + right: 50%; +} +.col-xs-pull-5 { + right: 41.66666667%; +} +.col-xs-pull-4 { + right: 33.33333333%; +} +.col-xs-pull-3 { + right: 25%; +} +.col-xs-pull-2 { + right: 16.66666667%; +} +.col-xs-pull-1 { + right: 8.33333333%; +} +.col-xs-pull-0 { + right: auto; +} +.col-xs-push-12 { + left: 100%; +} +.col-xs-push-11 { + left: 91.66666667%; +} +.col-xs-push-10 { + left: 83.33333333%; +} +.col-xs-push-9 { + left: 75%; +} +.col-xs-push-8 { + left: 66.66666667%; +} +.col-xs-push-7 { + left: 58.33333333%; +} +.col-xs-push-6 { + left: 50%; +} +.col-xs-push-5 { + left: 41.66666667%; +} +.col-xs-push-4 { + left: 33.33333333%; +} +.col-xs-push-3 { + left: 25%; +} +.col-xs-push-2 { + left: 16.66666667%; +} +.col-xs-push-1 { + left: 8.33333333%; +} +.col-xs-push-0 { + left: auto; +} +.col-xs-offset-12 { + margin-left: 100%; +} +.col-xs-offset-11 { + margin-left: 91.66666667%; +} +.col-xs-offset-10 { + margin-left: 83.33333333%; +} +.col-xs-offset-9 { + margin-left: 75%; +} +.col-xs-offset-8 { + margin-left: 66.66666667%; +} +.col-xs-offset-7 { + margin-left: 58.33333333%; +} +.col-xs-offset-6 { + margin-left: 50%; +} +.col-xs-offset-5 { + margin-left: 41.66666667%; +} +.col-xs-offset-4 { + margin-left: 33.33333333%; +} +.col-xs-offset-3 { + margin-left: 25%; +} +.col-xs-offset-2 { + margin-left: 16.66666667%; +} +.col-xs-offset-1 { + margin-left: 8.33333333%; +} +.col-xs-offset-0 { + margin-left: 0; +} +@media (min-width: 768px) { + .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12 { + float: left; + } + .col-sm-12 { + width: 100%; + } + .col-sm-11 { + width: 91.66666667%; + } + .col-sm-10 { + width: 83.33333333%; + } + .col-sm-9 { + width: 75%; + } + .col-sm-8 { + width: 66.66666667%; + } + .col-sm-7 { + width: 58.33333333%; + } + .col-sm-6 { + width: 50%; + } + .col-sm-5 { + width: 41.66666667%; + } + .col-sm-4 { + width: 33.33333333%; + } + .col-sm-3 { + width: 25%; + } + .col-sm-2 { + width: 16.66666667%; + } + .col-sm-1 { + width: 8.33333333%; + } + .col-sm-pull-12 { + right: 100%; + } + .col-sm-pull-11 { + right: 91.66666667%; + } + .col-sm-pull-10 { + right: 83.33333333%; + } + .col-sm-pull-9 { + right: 75%; + } + .col-sm-pull-8 { + right: 66.66666667%; + } + .col-sm-pull-7 { + right: 58.33333333%; + } + .col-sm-pull-6 { + right: 50%; + } + .col-sm-pull-5 { + right: 41.66666667%; + } + .col-sm-pull-4 { + right: 33.33333333%; + } + .col-sm-pull-3 { + right: 25%; + } + .col-sm-pull-2 { + right: 16.66666667%; + } + .col-sm-pull-1 { + right: 8.33333333%; + } + .col-sm-pull-0 { + right: auto; + } + .col-sm-push-12 { + left: 100%; + } + .col-sm-push-11 { + left: 91.66666667%; + } + .col-sm-push-10 { + left: 83.33333333%; + } + .col-sm-push-9 { + left: 75%; + } + .col-sm-push-8 { + left: 66.66666667%; + } + .col-sm-push-7 { + left: 58.33333333%; + } + .col-sm-push-6 { + left: 50%; + } + .col-sm-push-5 { + left: 41.66666667%; + } + .col-sm-push-4 { + left: 33.33333333%; + } + .col-sm-push-3 { + left: 25%; + } + .col-sm-push-2 { + left: 16.66666667%; + } + .col-sm-push-1 { + left: 8.33333333%; + } + .col-sm-push-0 { + left: auto; + } + .col-sm-offset-12 { + margin-left: 100%; + } + .col-sm-offset-11 { + margin-left: 91.66666667%; + } + .col-sm-offset-10 { + margin-left: 83.33333333%; + } + .col-sm-offset-9 { + margin-left: 75%; + } + .col-sm-offset-8 { + margin-left: 66.66666667%; + } + .col-sm-offset-7 { + margin-left: 58.33333333%; + } + .col-sm-offset-6 { + margin-left: 50%; + } + .col-sm-offset-5 { + margin-left: 41.66666667%; + } + .col-sm-offset-4 { + margin-left: 33.33333333%; + } + .col-sm-offset-3 { + margin-left: 25%; + } + .col-sm-offset-2 { + margin-left: 16.66666667%; + } + .col-sm-offset-1 { + margin-left: 8.33333333%; + } + .col-sm-offset-0 { + margin-left: 0; + } +} +@media (min-width: 992px) { + .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12 { + float: left; + } + .col-md-12 { + width: 100%; + } + .col-md-11 { + width: 91.66666667%; + } + .col-md-10 { + width: 83.33333333%; + } + .col-md-9 { + width: 75%; + } + .col-md-8 { + width: 66.66666667%; + } + .col-md-7 { + width: 58.33333333%; + } + .col-md-6 { + width: 50%; + } + .col-md-5 { + width: 41.66666667%; + } + .col-md-4 { + width: 33.33333333%; + } + .col-md-3 { + width: 25%; + } + .col-md-2 { + width: 16.66666667%; + } + .col-md-1 { + width: 8.33333333%; + } + .col-md-pull-12 { + right: 100%; + } + .col-md-pull-11 { + right: 91.66666667%; + } + .col-md-pull-10 { + right: 83.33333333%; + } + .col-md-pull-9 { + right: 75%; + } + .col-md-pull-8 { + right: 66.66666667%; + } + .col-md-pull-7 { + right: 58.33333333%; + } + .col-md-pull-6 { + right: 50%; + } + .col-md-pull-5 { + right: 41.66666667%; + } + .col-md-pull-4 { + right: 33.33333333%; + } + .col-md-pull-3 { + right: 25%; + } + .col-md-pull-2 { + right: 16.66666667%; + } + .col-md-pull-1 { + right: 8.33333333%; + } + .col-md-pull-0 { + right: auto; + } + .col-md-push-12 { + left: 100%; + } + .col-md-push-11 { + left: 91.66666667%; + } + .col-md-push-10 { + left: 83.33333333%; + } + .col-md-push-9 { + left: 75%; + } + .col-md-push-8 { + left: 66.66666667%; + } + .col-md-push-7 { + left: 58.33333333%; + } + .col-md-push-6 { + left: 50%; + } + .col-md-push-5 { + left: 41.66666667%; + } + .col-md-push-4 { + left: 33.33333333%; + } + .col-md-push-3 { + left: 25%; + } + .col-md-push-2 { + left: 16.66666667%; + } + .col-md-push-1 { + left: 8.33333333%; + } + .col-md-push-0 { + left: auto; + } + .col-md-offset-12 { + margin-left: 100%; + } + .col-md-offset-11 { + margin-left: 91.66666667%; + } + .col-md-offset-10 { + margin-left: 83.33333333%; + } + .col-md-offset-9 { + margin-left: 75%; + } + .col-md-offset-8 { + margin-left: 66.66666667%; + } + .col-md-offset-7 { + margin-left: 58.33333333%; + } + .col-md-offset-6 { + margin-left: 50%; + } + .col-md-offset-5 { + margin-left: 41.66666667%; + } + .col-md-offset-4 { + margin-left: 33.33333333%; + } + .col-md-offset-3 { + margin-left: 25%; + } + .col-md-offset-2 { + margin-left: 16.66666667%; + } + .col-md-offset-1 { + margin-left: 8.33333333%; + } + .col-md-offset-0 { + margin-left: 0; + } +} +@media (min-width: 1200px) { + .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12 { + float: left; + } + .col-lg-12 { + width: 100%; + } + .col-lg-11 { + width: 91.66666667%; + } + .col-lg-10 { + width: 83.33333333%; + } + .col-lg-9 { + width: 75%; + } + .col-lg-8 { + width: 66.66666667%; + } + .col-lg-7 { + width: 58.33333333%; + } + .col-lg-6 { + width: 50%; + } + .col-lg-5 { + width: 41.66666667%; + } + .col-lg-4 { + width: 33.33333333%; + } + .col-lg-3 { + width: 25%; + } + .col-lg-2 { + width: 16.66666667%; + } + .col-lg-1 { + width: 8.33333333%; + } + .col-lg-pull-12 { + right: 100%; + } + .col-lg-pull-11 { + right: 91.66666667%; + } + .col-lg-pull-10 { + right: 83.33333333%; + } + .col-lg-pull-9 { + right: 75%; + } + .col-lg-pull-8 { + right: 66.66666667%; + } + .col-lg-pull-7 { + right: 58.33333333%; + } + .col-lg-pull-6 { + right: 50%; + } + .col-lg-pull-5 { + right: 41.66666667%; + } + .col-lg-pull-4 { + right: 33.33333333%; + } + .col-lg-pull-3 { + right: 25%; + } + .col-lg-pull-2 { + right: 16.66666667%; + } + .col-lg-pull-1 { + right: 8.33333333%; + } + .col-lg-pull-0 { + right: auto; + } + .col-lg-push-12 { + left: 100%; + } + .col-lg-push-11 { + left: 91.66666667%; + } + .col-lg-push-10 { + left: 83.33333333%; + } + .col-lg-push-9 { + left: 75%; + } + .col-lg-push-8 { + left: 66.66666667%; + } + .col-lg-push-7 { + left: 58.33333333%; + } + .col-lg-push-6 { + left: 50%; + } + .col-lg-push-5 { + left: 41.66666667%; + } + .col-lg-push-4 { + left: 33.33333333%; + } + .col-lg-push-3 { + left: 25%; + } + .col-lg-push-2 { + left: 16.66666667%; + } + .col-lg-push-1 { + left: 8.33333333%; + } + .col-lg-push-0 { + left: auto; + } + .col-lg-offset-12 { + margin-left: 100%; + } + .col-lg-offset-11 { + margin-left: 91.66666667%; + } + .col-lg-offset-10 { + margin-left: 83.33333333%; + } + .col-lg-offset-9 { + margin-left: 75%; + } + .col-lg-offset-8 { + margin-left: 66.66666667%; + } + .col-lg-offset-7 { + margin-left: 58.33333333%; + } + .col-lg-offset-6 { + margin-left: 50%; + } + .col-lg-offset-5 { + margin-left: 41.66666667%; + } + .col-lg-offset-4 { + margin-left: 33.33333333%; + } + .col-lg-offset-3 { + margin-left: 25%; + } + .col-lg-offset-2 { + margin-left: 16.66666667%; + } + .col-lg-offset-1 { + margin-left: 8.33333333%; + } + .col-lg-offset-0 { + margin-left: 0; + } +} +table { + background-color: transparent; +} +caption { + padding-top: 8px; + padding-bottom: 8px; + color: #777; + text-align: left; +} +th { + text-align: left; +} +.table { + width: 100%; + max-width: 100%; + margin-bottom: 20px; +} +.table > thead > tr > th, +.table > tbody > tr > th, +.table > tfoot > tr > th, +.table > thead > tr > td, +.table > tbody > tr > td, +.table > tfoot > tr > td { + padding: 8px; + line-height: 1.42857143; + vertical-align: top; + border-top: 1px solid #ddd; +} +.table > thead > tr > th { + vertical-align: bottom; + border-bottom: 2px solid #ddd; +} +.table > caption + thead > tr:first-child > th, +.table > colgroup + thead > tr:first-child > th, +.table > thead:first-child > tr:first-child > th, +.table > caption + thead > tr:first-child > td, +.table > colgroup + thead > tr:first-child > td, +.table > thead:first-child > tr:first-child > td { + border-top: 0; +} +.table > tbody + tbody { + border-top: 2px solid #ddd; +} +.table .table { + background-color: #fff; +} +.table-condensed > thead > tr > th, +.table-condensed > tbody > tr > th, +.table-condensed > tfoot > tr > th, +.table-condensed > thead > tr > td, +.table-condensed > tbody > tr > td, +.table-condensed > tfoot > tr > td { + padding: 5px; +} +.table-bordered { + border: 1px solid #ddd; +} +.table-bordered > thead > tr > th, +.table-bordered > tbody > tr > th, +.table-bordered > tfoot > tr > th, +.table-bordered > thead > tr > td, +.table-bordered > tbody > tr > td, +.table-bordered > tfoot > tr > td { + border: 1px solid #ddd; +} +.table-bordered > thead > tr > th, +.table-bordered > thead > tr > td { + border-bottom-width: 2px; +} +.table-striped > tbody > tr:nth-of-type(odd) { + background-color: #f9f9f9; +} +.table-hover > tbody > tr:hover { + background-color: #f5f5f5; +} +table col[class*="col-"] { + position: static; + display: table-column; + float: none; +} +table td[class*="col-"], +table th[class*="col-"] { + position: static; + display: table-cell; + float: none; +} +.table > thead > tr > td.active, +.table > tbody > tr > td.active, +.table > tfoot > tr > td.active, +.table > thead > tr > th.active, +.table > tbody > tr > th.active, +.table > tfoot > tr > th.active, +.table > thead > tr.active > td, +.table > tbody > tr.active > td, +.table > tfoot > tr.active > td, +.table > thead > tr.active > th, +.table > tbody > tr.active > th, +.table > tfoot > tr.active > th { + background-color: #f5f5f5; +} +.table-hover > tbody > tr > td.active:hover, +.table-hover > tbody > tr > th.active:hover, +.table-hover > tbody > tr.active:hover > td, +.table-hover > tbody > tr:hover > .active, +.table-hover > tbody > tr.active:hover > th { + background-color: #e8e8e8; +} +.table > thead > tr > td.success, +.table > tbody > tr > td.success, +.table > tfoot > tr > td.success, +.table > thead > tr > th.success, +.table > tbody > tr > th.success, +.table > tfoot > tr > th.success, +.table > thead > tr.success > td, +.table > tbody > tr.success > td, +.table > tfoot > tr.success > td, +.table > thead > tr.success > th, +.table > tbody > tr.success > th, +.table > tfoot > tr.success > th { + background-color: #dff0d8; +} +.table-hover > tbody > tr > td.success:hover, +.table-hover > tbody > tr > th.success:hover, +.table-hover > tbody > tr.success:hover > td, +.table-hover > tbody > tr:hover > .success, +.table-hover > tbody > tr.success:hover > th { + background-color: #d0e9c6; +} +.table > thead > tr > td.info, +.table > tbody > tr > td.info, +.table > tfoot > tr > td.info, +.table > thead > tr > th.info, +.table > tbody > tr > th.info, +.table > tfoot > tr > th.info, +.table > thead > tr.info > td, +.table > tbody > tr.info > td, +.table > tfoot > tr.info > td, +.table > thead > tr.info > th, +.table > tbody > tr.info > th, +.table > tfoot > tr.info > th { + background-color: #d9edf7; +} +.table-hover > tbody > tr > td.info:hover, +.table-hover > tbody > tr > th.info:hover, +.table-hover > tbody > tr.info:hover > td, +.table-hover > tbody > tr:hover > .info, +.table-hover > tbody > tr.info:hover > th { + background-color: #c4e3f3; +} +.table > thead > tr > td.warning, +.table > tbody > tr > td.warning, +.table > tfoot > tr > td.warning, +.table > thead > tr > th.warning, +.table > tbody > tr > th.warning, +.table > tfoot > tr > th.warning, +.table > thead > tr.warning > td, +.table > tbody > tr.warning > td, +.table > tfoot > tr.warning > td, +.table > thead > tr.warning > th, +.table > tbody > tr.warning > th, +.table > tfoot > tr.warning > th { + background-color: #fcf8e3; +} +.table-hover > tbody > tr > td.warning:hover, +.table-hover > tbody > tr > th.warning:hover, +.table-hover > tbody > tr.warning:hover > td, +.table-hover > tbody > tr:hover > .warning, +.table-hover > tbody > tr.warning:hover > th { + background-color: #faf2cc; +} +.table > thead > tr > td.danger, +.table > tbody > tr > td.danger, +.table > tfoot > tr > td.danger, +.table > thead > tr > th.danger, +.table > tbody > tr > th.danger, +.table > tfoot > tr > th.danger, +.table > thead > tr.danger > td, +.table > tbody > tr.danger > td, +.table > tfoot > tr.danger > td, +.table > thead > tr.danger > th, +.table > tbody > tr.danger > th, +.table > tfoot > tr.danger > th { + background-color: #f2dede; +} +.table-hover > tbody > tr > td.danger:hover, +.table-hover > tbody > tr > th.danger:hover, +.table-hover > tbody > tr.danger:hover > td, +.table-hover > tbody > tr:hover > .danger, +.table-hover > tbody > tr.danger:hover > th { + background-color: #ebcccc; +} +.table-responsive { + min-height: .01%; + overflow-x: auto; +} +@media screen and (max-width: 767px) { + .table-responsive { + width: 100%; + margin-bottom: 15px; + overflow-y: hidden; + -ms-overflow-style: -ms-autohiding-scrollbar; + border: 1px solid #ddd; + } + .table-responsive > .table { + margin-bottom: 0; + } + .table-responsive > .table > thead > tr > th, + .table-responsive > .table > tbody > tr > th, + .table-responsive > .table > tfoot > tr > th, + .table-responsive > .table > thead > tr > td, + .table-responsive > .table > tbody > tr > td, + .table-responsive > .table > tfoot > tr > td { + white-space: nowrap; + } + .table-responsive > .table-bordered { + border: 0; + } + .table-responsive > .table-bordered > thead > tr > th:first-child, + .table-responsive > .table-bordered > tbody > tr > th:first-child, + .table-responsive > .table-bordered > tfoot > tr > th:first-child, + .table-responsive > .table-bordered > thead > tr > td:first-child, + .table-responsive > .table-bordered > tbody > tr > td:first-child, + .table-responsive > .table-bordered > tfoot > tr > td:first-child { + border-left: 0; + } + .table-responsive > .table-bordered > thead > tr > th:last-child, + .table-responsive > .table-bordered > tbody > tr > th:last-child, + .table-responsive > .table-bordered > tfoot > tr > th:last-child, + .table-responsive > .table-bordered > thead > tr > td:last-child, + .table-responsive > .table-bordered > tbody > tr > td:last-child, + .table-responsive > .table-bordered > tfoot > tr > td:last-child { + border-right: 0; + } + .table-responsive > .table-bordered > tbody > tr:last-child > th, + .table-responsive > .table-bordered > tfoot > tr:last-child > th, + .table-responsive > .table-bordered > tbody > tr:last-child > td, + .table-responsive > .table-bordered > tfoot > tr:last-child > td { + border-bottom: 0; + } +} +fieldset { + min-width: 0; + padding: 0; + margin: 0; + border: 0; +} +legend { + display: block; + width: 100%; + padding: 0; + margin-bottom: 20px; + font-size: 21px; + line-height: inherit; + color: #333; + border: 0; + border-bottom: 1px solid #e5e5e5; +} +label { + display: inline-block; + max-width: 100%; + margin-bottom: 5px; + font-weight: bold; +} +input[type="search"] { + -webkit-box-sizing: border-box; + -moz-box-sizing: border-box; + box-sizing: border-box; +} +input[type="radio"], +input[type="checkbox"] { + margin: 4px 0 0; + margin-top: 1px \9; + line-height: normal; +} +input[type="file"] { + display: block; +} +input[type="range"] { + display: block; + width: 100%; +} +select[multiple], +select[size] { + height: auto; +} +input[type="file"]:focus, +input[type="radio"]:focus, +input[type="checkbox"]:focus { + outline: 5px auto -webkit-focus-ring-color; + outline-offset: -2px; +} +output { + display: block; + padding-top: 7px; + font-size: 14px; + line-height: 1.42857143; + color: #555; +} +.form-control { + display: block; + width: 100%; + height: 34px; + padding: 6px 12px; + font-size: 14px; + line-height: 1.42857143; + color: #555; + background-color: #fff; + background-image: none; + border: 1px solid #ccc; + border-radius: 4px; + -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); + box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); + -webkit-transition: border-color ease-in-out .15s, -webkit-box-shadow ease-in-out .15s; + -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s; + transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s; +} +.form-control:focus { + border-color: #66afe9; + outline: 0; + -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6); + box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6); +} +.form-control::-moz-placeholder { + color: #999; + opacity: 1; +} +.form-control:-ms-input-placeholder { + color: #999; +} +.form-control::-webkit-input-placeholder { + color: #999; +} +.form-control::-ms-expand { + background-color: transparent; + border: 0; +} +.form-control[disabled], +.form-control[readonly], +fieldset[disabled] .form-control { + background-color: #eee; + opacity: 1; +} +.form-control[disabled], +fieldset[disabled] .form-control { + cursor: not-allowed; +} +textarea.form-control { + height: auto; +} +input[type="search"] { + -webkit-appearance: none; +} +@media screen and (-webkit-min-device-pixel-ratio: 0) { + input[type="date"].form-control, + input[type="time"].form-control, + input[type="datetime-local"].form-control, + input[type="month"].form-control { + line-height: 34px; + } + input[type="date"].input-sm, + input[type="time"].input-sm, + input[type="datetime-local"].input-sm, + input[type="month"].input-sm, + .input-group-sm input[type="date"], + .input-group-sm input[type="time"], + .input-group-sm input[type="datetime-local"], + .input-group-sm input[type="month"] { + line-height: 30px; + } + input[type="date"].input-lg, + input[type="time"].input-lg, + input[type="datetime-local"].input-lg, + input[type="month"].input-lg, + .input-group-lg input[type="date"], + .input-group-lg input[type="time"], + .input-group-lg input[type="datetime-local"], + .input-group-lg input[type="month"] { + line-height: 46px; + } +} +.form-group { + margin-bottom: 15px; +} +.radio, +.checkbox { + position: relative; + display: block; + margin-top: 10px; + margin-bottom: 10px; +} +.radio label, +.checkbox label { + min-height: 20px; + padding-left: 20px; + margin-bottom: 0; + font-weight: normal; + cursor: pointer; +} +.radio input[type="radio"], +.radio-inline input[type="radio"], +.checkbox input[type="checkbox"], +.checkbox-inline input[type="checkbox"] { + position: absolute; + margin-top: 4px \9; + margin-left: -20px; +} +.radio + .radio, +.checkbox + .checkbox { + margin-top: -5px; +} +.radio-inline, +.checkbox-inline { + position: relative; + display: inline-block; + padding-left: 20px; + margin-bottom: 0; + font-weight: normal; + vertical-align: middle; + cursor: pointer; +} +.radio-inline + .radio-inline, +.checkbox-inline + .checkbox-inline { + margin-top: 0; + margin-left: 10px; +} +input[type="radio"][disabled], +input[type="checkbox"][disabled], +input[type="radio"].disabled, +input[type="checkbox"].disabled, +fieldset[disabled] input[type="radio"], +fieldset[disabled] input[type="checkbox"] { + cursor: not-allowed; +} +.radio-inline.disabled, +.checkbox-inline.disabled, +fieldset[disabled] .radio-inline, +fieldset[disabled] .checkbox-inline { + cursor: not-allowed; +} +.radio.disabled label, +.checkbox.disabled label, +fieldset[disabled] .radio label, +fieldset[disabled] .checkbox label { + cursor: not-allowed; +} +.form-control-static { + min-height: 34px; + padding-top: 7px; + padding-bottom: 7px; + margin-bottom: 0; +} +.form-control-static.input-lg, +.form-control-static.input-sm { + padding-right: 0; + padding-left: 0; +} +.input-sm { + height: 30px; + padding: 5px 10px; + font-size: 12px; + line-height: 1.5; + border-radius: 3px; +} +select.input-sm { + height: 30px; + line-height: 30px; +} +textarea.input-sm, +select[multiple].input-sm { + height: auto; +} +.form-group-sm .form-control { + height: 30px; + padding: 5px 10px; + font-size: 12px; + line-height: 1.5; + border-radius: 3px; +} +.form-group-sm select.form-control { + height: 30px; + line-height: 30px; +} +.form-group-sm textarea.form-control, +.form-group-sm select[multiple].form-control { + height: auto; +} +.form-group-sm .form-control-static { + height: 30px; + min-height: 32px; + padding: 6px 10px; + font-size: 12px; + line-height: 1.5; +} +.input-lg { + height: 46px; + padding: 10px 16px; + font-size: 18px; + line-height: 1.3333333; + border-radius: 6px; +} +select.input-lg { + height: 46px; + line-height: 46px; +} +textarea.input-lg, +select[multiple].input-lg { + height: auto; +} +.form-group-lg .form-control { + height: 46px; + padding: 10px 16px; + font-size: 18px; + line-height: 1.3333333; + border-radius: 6px; +} +.form-group-lg select.form-control { + height: 46px; + line-height: 46px; +} +.form-group-lg textarea.form-control, +.form-group-lg select[multiple].form-control { + height: auto; +} +.form-group-lg .form-control-static { + height: 46px; + min-height: 38px; + padding: 11px 16px; + font-size: 18px; + line-height: 1.3333333; +} +.has-feedback { + position: relative; +} +.has-feedback .form-control { + padding-right: 42.5px; +} +.form-control-feedback { + position: absolute; + top: 0; + right: 0; + z-index: 2; + display: block; + width: 34px; + height: 34px; + line-height: 34px; + text-align: center; + pointer-events: none; +} +.input-lg + .form-control-feedback, +.input-group-lg + .form-control-feedback, +.form-group-lg .form-control + .form-control-feedback { + width: 46px; + height: 46px; + line-height: 46px; +} +.input-sm + .form-control-feedback, +.input-group-sm + .form-control-feedback, +.form-group-sm .form-control + .form-control-feedback { + width: 30px; + height: 30px; + line-height: 30px; +} +.has-success .help-block, +.has-success .control-label, +.has-success .radio, +.has-success .checkbox, +.has-success .radio-inline, +.has-success .checkbox-inline, +.has-success.radio label, +.has-success.checkbox label, +.has-success.radio-inline label, +.has-success.checkbox-inline label { + color: #3c763d; +} +.has-success .form-control { + border-color: #3c763d; + -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); + box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); +} +.has-success .form-control:focus { + border-color: #2b542c; + -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168; + box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168; +} +.has-success .input-group-addon { + color: #3c763d; + background-color: #dff0d8; + border-color: #3c763d; +} +.has-success .form-control-feedback { + color: #3c763d; +} +.has-warning .help-block, +.has-warning .control-label, +.has-warning .radio, +.has-warning .checkbox, +.has-warning .radio-inline, +.has-warning .checkbox-inline, +.has-warning.radio label, +.has-warning.checkbox label, +.has-warning.radio-inline label, +.has-warning.checkbox-inline label { + color: #8a6d3b; +} +.has-warning .form-control { + border-color: #8a6d3b; + -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); + box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); +} +.has-warning .form-control:focus { + border-color: #66512c; + -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b; + box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b; +} +.has-warning .input-group-addon { + color: #8a6d3b; + background-color: #fcf8e3; + border-color: #8a6d3b; +} +.has-warning .form-control-feedback { + color: #8a6d3b; +} +.has-error .help-block, +.has-error .control-label, +.has-error .radio, +.has-error .checkbox, +.has-error .radio-inline, +.has-error .checkbox-inline, +.has-error.radio label, +.has-error.checkbox label, +.has-error.radio-inline label, +.has-error.checkbox-inline label { + color: #a94442; +} +.has-error .form-control { + border-color: #a94442; + -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); + box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); +} +.has-error .form-control:focus { + border-color: #843534; + -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483; + box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483; +} +.has-error .input-group-addon { + color: #a94442; + background-color: #f2dede; + border-color: #a94442; +} +.has-error .form-control-feedback { + color: #a94442; +} +.has-feedback label ~ .form-control-feedback { + top: 25px; +} +.has-feedback label.sr-only ~ .form-control-feedback { + top: 0; +} +.help-block { + display: block; + margin-top: 5px; + margin-bottom: 10px; + color: #737373; +} +@media (min-width: 768px) { + .form-inline .form-group { + display: inline-block; + margin-bottom: 0; + vertical-align: middle; + } + .form-inline .form-control { + display: inline-block; + width: auto; + vertical-align: middle; + } + .form-inline .form-control-static { + display: inline-block; + } + .form-inline .input-group { + display: inline-table; + vertical-align: middle; + } + .form-inline .input-group .input-group-addon, + .form-inline .input-group .input-group-btn, + .form-inline .input-group .form-control { + width: auto; + } + .form-inline .input-group > .form-control { + width: 100%; + } + .form-inline .control-label { + margin-bottom: 0; + vertical-align: middle; + } + .form-inline .radio, + .form-inline .checkbox { + display: inline-block; + margin-top: 0; + margin-bottom: 0; + vertical-align: middle; + } + .form-inline .radio label, + .form-inline .checkbox label { + padding-left: 0; + } + .form-inline .radio input[type="radio"], + .form-inline .checkbox input[type="checkbox"] { + position: relative; + margin-left: 0; + } + .form-inline .has-feedback .form-control-feedback { + top: 0; + } +} +.form-horizontal .radio, +.form-horizontal .checkbox, +.form-horizontal .radio-inline, +.form-horizontal .checkbox-inline { + padding-top: 7px; + margin-top: 0; + margin-bottom: 0; +} +.form-horizontal .radio, +.form-horizontal .checkbox { + min-height: 27px; +} +.form-horizontal .form-group { + margin-right: -15px; + margin-left: -15px; +} +@media (min-width: 768px) { + .form-horizontal .control-label { + padding-top: 7px; + margin-bottom: 0; + text-align: right; + } +} +.form-horizontal .has-feedback .form-control-feedback { + right: 15px; +} +@media (min-width: 768px) { + .form-horizontal .form-group-lg .control-label { + padding-top: 11px; + font-size: 18px; + } +} +@media (min-width: 768px) { + .form-horizontal .form-group-sm .control-label { + padding-top: 6px; + font-size: 12px; + } +} +.btn { + display: inline-block; + padding: 6px 12px; + margin-bottom: 0; + font-size: 14px; + font-weight: normal; + line-height: 1.42857143; + text-align: center; + white-space: nowrap; + vertical-align: middle; + -ms-touch-action: manipulation; + touch-action: manipulation; + cursor: pointer; + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; + background-image: none; + border: 1px solid transparent; + border-radius: 4px; +} +.btn:focus, +.btn:active:focus, +.btn.active:focus, +.btn.focus, +.btn:active.focus, +.btn.active.focus { + outline: 5px auto -webkit-focus-ring-color; + outline-offset: -2px; +} +.btn:hover, +.btn:focus, +.btn.focus { + color: #333; + text-decoration: none; +} +.btn:active, +.btn.active { + background-image: none; + outline: 0; + -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125); + box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125); +} +.btn.disabled, +.btn[disabled], +fieldset[disabled] .btn { + cursor: not-allowed; + filter: alpha(opacity=65); + -webkit-box-shadow: none; + box-shadow: none; + opacity: .65; +} +a.btn.disabled, +fieldset[disabled] a.btn { + pointer-events: none; +} +.btn-default { + color: #333; + background-color: #fff; + border-color: #ccc; +} +.btn-default:focus, +.btn-default.focus { + color: #333; + background-color: #e6e6e6; + border-color: #8c8c8c; +} +.btn-default:hover { + color: #333; + background-color: #e6e6e6; + border-color: #adadad; +} +.btn-default:active, +.btn-default.active, +.open > .dropdown-toggle.btn-default { + color: #333; + background-color: #e6e6e6; + border-color: #adadad; +} +.btn-default:active:hover, +.btn-default.active:hover, +.open > .dropdown-toggle.btn-default:hover, +.btn-default:active:focus, +.btn-default.active:focus, +.open > .dropdown-toggle.btn-default:focus, +.btn-default:active.focus, +.btn-default.active.focus, +.open > .dropdown-toggle.btn-default.focus { + color: #333; + background-color: #d4d4d4; + border-color: #8c8c8c; +} +.btn-default:active, +.btn-default.active, +.open > .dropdown-toggle.btn-default { + background-image: none; +} +.btn-default.disabled:hover, +.btn-default[disabled]:hover, +fieldset[disabled] .btn-default:hover, +.btn-default.disabled:focus, +.btn-default[disabled]:focus, +fieldset[disabled] .btn-default:focus, +.btn-default.disabled.focus, +.btn-default[disabled].focus, +fieldset[disabled] .btn-default.focus { + background-color: #fff; + border-color: #ccc; +} +.btn-default .badge { + color: #fff; + background-color: #333; +} +.btn-primary { + color: #fff; + background-color: #337ab7; + border-color: #2e6da4; +} +.btn-primary:focus, +.btn-primary.focus { + color: #fff; + background-color: #286090; + border-color: #122b40; +} +.btn-primary:hover { + color: #fff; + background-color: #286090; + border-color: #204d74; +} +.btn-primary:active, +.btn-primary.active, +.open > .dropdown-toggle.btn-primary { + color: #fff; + background-color: #286090; + border-color: #204d74; +} +.btn-primary:active:hover, +.btn-primary.active:hover, +.open > .dropdown-toggle.btn-primary:hover, +.btn-primary:active:focus, +.btn-primary.active:focus, +.open > .dropdown-toggle.btn-primary:focus, +.btn-primary:active.focus, +.btn-primary.active.focus, +.open > .dropdown-toggle.btn-primary.focus { + color: #fff; + background-color: #204d74; + border-color: #122b40; +} +.btn-primary:active, +.btn-primary.active, +.open > .dropdown-toggle.btn-primary { + background-image: none; +} +.btn-primary.disabled:hover, +.btn-primary[disabled]:hover, +fieldset[disabled] .btn-primary:hover, +.btn-primary.disabled:focus, +.btn-primary[disabled]:focus, +fieldset[disabled] .btn-primary:focus, +.btn-primary.disabled.focus, +.btn-primary[disabled].focus, +fieldset[disabled] .btn-primary.focus { + background-color: #337ab7; + border-color: #2e6da4; +} +.btn-primary .badge { + color: #337ab7; + background-color: #fff; +} +.btn-success { + color: #fff; + background-color: #5cb85c; + border-color: #4cae4c; +} +.btn-success:focus, +.btn-success.focus { + color: #fff; + background-color: #449d44; + border-color: #255625; +} +.btn-success:hover { + color: #fff; + background-color: #449d44; + border-color: #398439; +} +.btn-success:active, +.btn-success.active, +.open > .dropdown-toggle.btn-success { + color: #fff; + background-color: #449d44; + border-color: #398439; +} +.btn-success:active:hover, +.btn-success.active:hover, +.open > .dropdown-toggle.btn-success:hover, +.btn-success:active:focus, +.btn-success.active:focus, +.open > .dropdown-toggle.btn-success:focus, +.btn-success:active.focus, +.btn-success.active.focus, +.open > .dropdown-toggle.btn-success.focus { + color: #fff; + background-color: #398439; + border-color: #255625; +} +.btn-success:active, +.btn-success.active, +.open > .dropdown-toggle.btn-success { + background-image: none; +} +.btn-success.disabled:hover, +.btn-success[disabled]:hover, +fieldset[disabled] .btn-success:hover, +.btn-success.disabled:focus, +.btn-success[disabled]:focus, +fieldset[disabled] .btn-success:focus, +.btn-success.disabled.focus, +.btn-success[disabled].focus, +fieldset[disabled] .btn-success.focus { + background-color: #5cb85c; + border-color: #4cae4c; +} +.btn-success .badge { + color: #5cb85c; + background-color: #fff; +} +.btn-info { + color: #fff; + background-color: #5bc0de; + border-color: #46b8da; +} +.btn-info:focus, +.btn-info.focus { + color: #fff; + background-color: #31b0d5; + border-color: #1b6d85; +} +.btn-info:hover { + color: #fff; + background-color: #31b0d5; + border-color: #269abc; +} +.btn-info:active, +.btn-info.active, +.open > .dropdown-toggle.btn-info { + color: #fff; + background-color: #31b0d5; + border-color: #269abc; +} +.btn-info:active:hover, +.btn-info.active:hover, +.open > .dropdown-toggle.btn-info:hover, +.btn-info:active:focus, +.btn-info.active:focus, +.open > .dropdown-toggle.btn-info:focus, +.btn-info:active.focus, +.btn-info.active.focus, +.open > .dropdown-toggle.btn-info.focus { + color: #fff; + background-color: #269abc; + border-color: #1b6d85; +} +.btn-info:active, +.btn-info.active, +.open > .dropdown-toggle.btn-info { + background-image: none; +} +.btn-info.disabled:hover, +.btn-info[disabled]:hover, +fieldset[disabled] .btn-info:hover, +.btn-info.disabled:focus, +.btn-info[disabled]:focus, +fieldset[disabled] .btn-info:focus, +.btn-info.disabled.focus, +.btn-info[disabled].focus, +fieldset[disabled] .btn-info.focus { + background-color: #5bc0de; + border-color: #46b8da; +} +.btn-info .badge { + color: #5bc0de; + background-color: #fff; +} +.btn-warning { + color: #fff; + background-color: #f0ad4e; + border-color: #eea236; +} +.btn-warning:focus, +.btn-warning.focus { + color: #fff; + background-color: #ec971f; + border-color: #985f0d; +} +.btn-warning:hover { + color: #fff; + background-color: #ec971f; + border-color: #d58512; +} +.btn-warning:active, +.btn-warning.active, +.open > .dropdown-toggle.btn-warning { + color: #fff; + background-color: #ec971f; + border-color: #d58512; +} +.btn-warning:active:hover, +.btn-warning.active:hover, +.open > .dropdown-toggle.btn-warning:hover, +.btn-warning:active:focus, +.btn-warning.active:focus, +.open > .dropdown-toggle.btn-warning:focus, +.btn-warning:active.focus, +.btn-warning.active.focus, +.open > .dropdown-toggle.btn-warning.focus { + color: #fff; + background-color: #d58512; + border-color: #985f0d; +} +.btn-warning:active, +.btn-warning.active, +.open > .dropdown-toggle.btn-warning { + background-image: none; +} +.btn-warning.disabled:hover, +.btn-warning[disabled]:hover, +fieldset[disabled] .btn-warning:hover, +.btn-warning.disabled:focus, +.btn-warning[disabled]:focus, +fieldset[disabled] .btn-warning:focus, +.btn-warning.disabled.focus, +.btn-warning[disabled].focus, +fieldset[disabled] .btn-warning.focus { + background-color: #f0ad4e; + border-color: #eea236; +} +.btn-warning .badge { + color: #f0ad4e; + background-color: #fff; +} +.btn-danger { + color: #fff; + background-color: #d9534f; + border-color: #d43f3a; +} +.btn-danger:focus, +.btn-danger.focus { + color: #fff; + background-color: #c9302c; + border-color: #761c19; +} +.btn-danger:hover { + color: #fff; + background-color: #c9302c; + border-color: #ac2925; +} +.btn-danger:active, +.btn-danger.active, +.open > .dropdown-toggle.btn-danger { + color: #fff; + background-color: #c9302c; + border-color: #ac2925; +} +.btn-danger:active:hover, +.btn-danger.active:hover, +.open > .dropdown-toggle.btn-danger:hover, +.btn-danger:active:focus, +.btn-danger.active:focus, +.open > .dropdown-toggle.btn-danger:focus, +.btn-danger:active.focus, +.btn-danger.active.focus, +.open > .dropdown-toggle.btn-danger.focus { + color: #fff; + background-color: #ac2925; + border-color: #761c19; +} +.btn-danger:active, +.btn-danger.active, +.open > .dropdown-toggle.btn-danger { + background-image: none; +} +.btn-danger.disabled:hover, +.btn-danger[disabled]:hover, +fieldset[disabled] .btn-danger:hover, +.btn-danger.disabled:focus, +.btn-danger[disabled]:focus, +fieldset[disabled] .btn-danger:focus, +.btn-danger.disabled.focus, +.btn-danger[disabled].focus, +fieldset[disabled] .btn-danger.focus { + background-color: #d9534f; + border-color: #d43f3a; +} +.btn-danger .badge { + color: #d9534f; + background-color: #fff; +} +.btn-link { + font-weight: normal; + color: #337ab7; + border-radius: 0; +} +.btn-link, +.btn-link:active, +.btn-link.active, +.btn-link[disabled], +fieldset[disabled] .btn-link { + background-color: transparent; + -webkit-box-shadow: none; + box-shadow: none; +} +.btn-link, +.btn-link:hover, +.btn-link:focus, +.btn-link:active { + border-color: transparent; +} +.btn-link:hover, +.btn-link:focus { + color: #23527c; + text-decoration: underline; + background-color: transparent; +} +.btn-link[disabled]:hover, +fieldset[disabled] .btn-link:hover, +.btn-link[disabled]:focus, +fieldset[disabled] .btn-link:focus { + color: #777; + text-decoration: none; +} +.btn-lg, +.btn-group-lg > .btn { + padding: 10px 16px; + font-size: 18px; + line-height: 1.3333333; + border-radius: 6px; +} +.btn-sm, +.btn-group-sm > .btn { + padding: 5px 10px; + font-size: 12px; + line-height: 1.5; + border-radius: 3px; +} +.btn-xs, +.btn-group-xs > .btn { + padding: 1px 5px; + font-size: 12px; + line-height: 1.5; + border-radius: 3px; +} +.btn-block { + display: block; + width: 100%; +} +.btn-block + .btn-block { + margin-top: 5px; +} +input[type="submit"].btn-block, +input[type="reset"].btn-block, +input[type="button"].btn-block { + width: 100%; +} +.fade { + opacity: 0; + -webkit-transition: opacity .15s linear; + -o-transition: opacity .15s linear; + transition: opacity .15s linear; +} +.fade.in { + opacity: 1; +} +.collapse { + display: none; +} +.collapse.in { + display: block; +} +tr.collapse.in { + display: table-row; +} +tbody.collapse.in { + display: table-row-group; +} +.collapsing { + position: relative; + height: 0; + overflow: hidden; + -webkit-transition-timing-function: ease; + -o-transition-timing-function: ease; + transition-timing-function: ease; + -webkit-transition-duration: .35s; + -o-transition-duration: .35s; + transition-duration: .35s; + -webkit-transition-property: height, visibility; + -o-transition-property: height, visibility; + transition-property: height, visibility; +} +.caret { + display: inline-block; + width: 0; + height: 0; + margin-left: 2px; + vertical-align: middle; + border-top: 4px dashed; + border-top: 4px solid \9; + border-right: 4px solid transparent; + border-left: 4px solid transparent; +} +.dropup, +.dropdown { + position: relative; +} +.dropdown-toggle:focus { + outline: 0; +} +.dropdown-menu { + position: absolute; + top: 100%; + left: 0; + z-index: 1000; + display: none; + float: left; + min-width: 160px; + padding: 5px 0; + margin: 2px 0 0; + font-size: 14px; + text-align: left; + list-style: none; + background-color: #fff; + -webkit-background-clip: padding-box; + background-clip: padding-box; + border: 1px solid #ccc; + border: 1px solid rgba(0, 0, 0, .15); + border-radius: 4px; + -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, .175); + box-shadow: 0 6px 12px rgba(0, 0, 0, .175); +} +.dropdown-menu.pull-right { + right: 0; + left: auto; +} +.dropdown-menu .divider { + height: 1px; + margin: 9px 0; + overflow: hidden; + background-color: #e5e5e5; +} +.dropdown-menu > li > a { + display: block; + padding: 3px 20px; + clear: both; + font-weight: normal; + line-height: 1.42857143; + color: #333; + white-space: nowrap; +} +.dropdown-menu > li > a:hover, +.dropdown-menu > li > a:focus { + color: #262626; + text-decoration: none; + background-color: #f5f5f5; +} +.dropdown-menu > .active > a, +.dropdown-menu > .active > a:hover, +.dropdown-menu > .active > a:focus { + color: #fff; + text-decoration: none; + background-color: #337ab7; + outline: 0; +} +.dropdown-menu > .disabled > a, +.dropdown-menu > .disabled > a:hover, +.dropdown-menu > .disabled > a:focus { + color: #777; +} +.dropdown-menu > .disabled > a:hover, +.dropdown-menu > .disabled > a:focus { + text-decoration: none; + cursor: not-allowed; + background-color: transparent; + background-image: none; + filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); +} +.open > .dropdown-menu { + display: block; +} +.open > a { + outline: 0; +} +.dropdown-menu-right { + right: 0; + left: auto; +} +.dropdown-menu-left { + right: auto; + left: 0; +} +.dropdown-header { + display: block; + padding: 3px 20px; + font-size: 12px; + line-height: 1.42857143; + color: #777; + white-space: nowrap; +} +.dropdown-backdrop { + position: fixed; + top: 0; + right: 0; + bottom: 0; + left: 0; + z-index: 990; +} +.pull-right > .dropdown-menu { + right: 0; + left: auto; +} +.dropup .caret, +.navbar-fixed-bottom .dropdown .caret { + content: ""; + border-top: 0; + border-bottom: 4px dashed; + border-bottom: 4px solid \9; +} +.dropup .dropdown-menu, +.navbar-fixed-bottom .dropdown .dropdown-menu { + top: auto; + bottom: 100%; + margin-bottom: 2px; +} +@media (min-width: 768px) { + .navbar-right .dropdown-menu { + right: 0; + left: auto; + } + .navbar-right .dropdown-menu-left { + right: auto; + left: 0; + } +} +.btn-group, +.btn-group-vertical { + position: relative; + display: inline-block; + vertical-align: middle; +} +.btn-group > .btn, +.btn-group-vertical > .btn { + position: relative; + float: left; +} +.btn-group > .btn:hover, +.btn-group-vertical > .btn:hover, +.btn-group > .btn:focus, +.btn-group-vertical > .btn:focus, +.btn-group > .btn:active, +.btn-group-vertical > .btn:active, +.btn-group > .btn.active, +.btn-group-vertical > .btn.active { + z-index: 2; +} +.btn-group .btn + .btn, +.btn-group .btn + .btn-group, +.btn-group .btn-group + .btn, +.btn-group .btn-group + .btn-group { + margin-left: -1px; +} +.btn-toolbar { + margin-left: -5px; +} +.btn-toolbar .btn, +.btn-toolbar .btn-group, +.btn-toolbar .input-group { + float: left; +} +.btn-toolbar > .btn, +.btn-toolbar > .btn-group, +.btn-toolbar > .input-group { + margin-left: 5px; +} +.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) { + border-radius: 0; +} +.btn-group > .btn:first-child { + margin-left: 0; +} +.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) { + border-top-right-radius: 0; + border-bottom-right-radius: 0; +} +.btn-group > .btn:last-child:not(:first-child), +.btn-group > .dropdown-toggle:not(:first-child) { + border-top-left-radius: 0; + border-bottom-left-radius: 0; +} +.btn-group > .btn-group { + float: left; +} +.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn { + border-radius: 0; +} +.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child, +.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle { + border-top-right-radius: 0; + border-bottom-right-radius: 0; +} +.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child { + border-top-left-radius: 0; + border-bottom-left-radius: 0; +} +.btn-group .dropdown-toggle:active, +.btn-group.open .dropdown-toggle { + outline: 0; +} +.btn-group > .btn + .dropdown-toggle { + padding-right: 8px; + padding-left: 8px; +} +.btn-group > .btn-lg + .dropdown-toggle { + padding-right: 12px; + padding-left: 12px; +} +.btn-group.open .dropdown-toggle { + -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125); + box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125); +} +.btn-group.open .dropdown-toggle.btn-link { + -webkit-box-shadow: none; + box-shadow: none; +} +.btn .caret { + margin-left: 0; +} +.btn-lg .caret { + border-width: 5px 5px 0; + border-bottom-width: 0; +} +.dropup .btn-lg .caret { + border-width: 0 5px 5px; +} +.btn-group-vertical > .btn, +.btn-group-vertical > .btn-group, +.btn-group-vertical > .btn-group > .btn { + display: block; + float: none; + width: 100%; + max-width: 100%; +} +.btn-group-vertical > .btn-group > .btn { + float: none; +} +.btn-group-vertical > .btn + .btn, +.btn-group-vertical > .btn + .btn-group, +.btn-group-vertical > .btn-group + .btn, +.btn-group-vertical > .btn-group + .btn-group { + margin-top: -1px; + margin-left: 0; +} +.btn-group-vertical > .btn:not(:first-child):not(:last-child) { + border-radius: 0; +} +.btn-group-vertical > .btn:first-child:not(:last-child) { + border-top-left-radius: 4px; + border-top-right-radius: 4px; + border-bottom-right-radius: 0; + border-bottom-left-radius: 0; +} +.btn-group-vertical > .btn:last-child:not(:first-child) { + border-top-left-radius: 0; + border-top-right-radius: 0; + border-bottom-right-radius: 4px; + border-bottom-left-radius: 4px; +} +.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn { + border-radius: 0; +} +.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child, +.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle { + border-bottom-right-radius: 0; + border-bottom-left-radius: 0; +} +.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child { + border-top-left-radius: 0; + border-top-right-radius: 0; +} +.btn-group-justified { + display: table; + width: 100%; + table-layout: fixed; + border-collapse: separate; +} +.btn-group-justified > .btn, +.btn-group-justified > .btn-group { + display: table-cell; + float: none; + width: 1%; +} +.btn-group-justified > .btn-group .btn { + width: 100%; +} +.btn-group-justified > .btn-group .dropdown-menu { + left: auto; +} +[data-toggle="buttons"] > .btn input[type="radio"], +[data-toggle="buttons"] > .btn-group > .btn input[type="radio"], +[data-toggle="buttons"] > .btn input[type="checkbox"], +[data-toggle="buttons"] > .btn-group > .btn input[type="checkbox"] { + position: absolute; + clip: rect(0, 0, 0, 0); + pointer-events: none; +} +.input-group { + position: relative; + display: table; + border-collapse: separate; +} +.input-group[class*="col-"] { + float: none; + padding-right: 0; + padding-left: 0; +} +.input-group .form-control { + position: relative; + z-index: 2; + float: left; + width: 100%; + margin-bottom: 0; +} +.input-group .form-control:focus { + z-index: 3; +} +.input-group-lg > .form-control, +.input-group-lg > .input-group-addon, +.input-group-lg > .input-group-btn > .btn { + height: 46px; + padding: 10px 16px; + font-size: 18px; + line-height: 1.3333333; + border-radius: 6px; +} +select.input-group-lg > .form-control, +select.input-group-lg > .input-group-addon, +select.input-group-lg > .input-group-btn > .btn { + height: 46px; + line-height: 46px; +} +textarea.input-group-lg > .form-control, +textarea.input-group-lg > .input-group-addon, +textarea.input-group-lg > .input-group-btn > .btn, +select[multiple].input-group-lg > .form-control, +select[multiple].input-group-lg > .input-group-addon, +select[multiple].input-group-lg > .input-group-btn > .btn { + height: auto; +} +.input-group-sm > .form-control, +.input-group-sm > .input-group-addon, +.input-group-sm > .input-group-btn > .btn { + height: 30px; + padding: 5px 10px; + font-size: 12px; + line-height: 1.5; + border-radius: 3px; +} +select.input-group-sm > .form-control, +select.input-group-sm > .input-group-addon, +select.input-group-sm > .input-group-btn > .btn { + height: 30px; + line-height: 30px; +} +textarea.input-group-sm > .form-control, +textarea.input-group-sm > .input-group-addon, +textarea.input-group-sm > .input-group-btn > .btn, +select[multiple].input-group-sm > .form-control, +select[multiple].input-group-sm > .input-group-addon, +select[multiple].input-group-sm > .input-group-btn > .btn { + height: auto; +} +.input-group-addon, +.input-group-btn, +.input-group .form-control { + display: table-cell; +} +.input-group-addon:not(:first-child):not(:last-child), +.input-group-btn:not(:first-child):not(:last-child), +.input-group .form-control:not(:first-child):not(:last-child) { + border-radius: 0; +} +.input-group-addon, +.input-group-btn { + width: 1%; + white-space: nowrap; + vertical-align: middle; +} +.input-group-addon { + padding: 6px 12px; + font-size: 14px; + font-weight: normal; + line-height: 1; + color: #555; + text-align: center; + background-color: #eee; + border: 1px solid #ccc; + border-radius: 4px; +} +.input-group-addon.input-sm { + padding: 5px 10px; + font-size: 12px; + border-radius: 3px; +} +.input-group-addon.input-lg { + padding: 10px 16px; + font-size: 18px; + border-radius: 6px; +} +.input-group-addon input[type="radio"], +.input-group-addon input[type="checkbox"] { + margin-top: 0; +} +.input-group .form-control:first-child, +.input-group-addon:first-child, +.input-group-btn:first-child > .btn, +.input-group-btn:first-child > .btn-group > .btn, +.input-group-btn:first-child > .dropdown-toggle, +.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle), +.input-group-btn:last-child > .btn-group:not(:last-child) > .btn { + border-top-right-radius: 0; + border-bottom-right-radius: 0; +} +.input-group-addon:first-child { + border-right: 0; +} +.input-group .form-control:last-child, +.input-group-addon:last-child, +.input-group-btn:last-child > .btn, +.input-group-btn:last-child > .btn-group > .btn, +.input-group-btn:last-child > .dropdown-toggle, +.input-group-btn:first-child > .btn:not(:first-child), +.input-group-btn:first-child > .btn-group:not(:first-child) > .btn { + border-top-left-radius: 0; + border-bottom-left-radius: 0; +} +.input-group-addon:last-child { + border-left: 0; +} +.input-group-btn { + position: relative; + font-size: 0; + white-space: nowrap; +} +.input-group-btn > .btn { + position: relative; +} +.input-group-btn > .btn + .btn { + margin-left: -1px; +} +.input-group-btn > .btn:hover, +.input-group-btn > .btn:focus, +.input-group-btn > .btn:active { + z-index: 2; +} +.input-group-btn:first-child > .btn, +.input-group-btn:first-child > .btn-group { + margin-right: -1px; +} +.input-group-btn:last-child > .btn, +.input-group-btn:last-child > .btn-group { + z-index: 2; + margin-left: -1px; +} +.nav { + padding-left: 0; + margin-bottom: 0; + list-style: none; +} +.nav > li { + position: relative; + display: block; +} +.nav > li > a { + position: relative; + display: block; + padding: 10px 15px; +} +.nav > li > a:hover, +.nav > li > a:focus { + text-decoration: none; + background-color: #eee; +} +.nav > li.disabled > a { + color: #777; +} +.nav > li.disabled > a:hover, +.nav > li.disabled > a:focus { + color: #777; + text-decoration: none; + cursor: not-allowed; + background-color: transparent; +} +.nav .open > a, +.nav .open > a:hover, +.nav .open > a:focus { + background-color: #eee; + border-color: #337ab7; +} +.nav .nav-divider { + height: 1px; + margin: 9px 0; + overflow: hidden; + background-color: #e5e5e5; +} +.nav > li > a > img { + max-width: none; +} +.nav-tabs { + border-bottom: 1px solid #ddd; +} +.nav-tabs > li { + float: left; + margin-bottom: -1px; +} +.nav-tabs > li > a { + margin-right: 2px; + line-height: 1.42857143; + border: 1px solid transparent; + border-radius: 4px 4px 0 0; +} +.nav-tabs > li > a:hover { + border-color: #eee #eee #ddd; +} +.nav-tabs > li.active > a, +.nav-tabs > li.active > a:hover, +.nav-tabs > li.active > a:focus { + color: #555; + cursor: default; + background-color: #fff; + border: 1px solid #ddd; + border-bottom-color: transparent; +} +.nav-tabs.nav-justified { + width: 100%; + border-bottom: 0; +} +.nav-tabs.nav-justified > li { + float: none; +} +.nav-tabs.nav-justified > li > a { + margin-bottom: 5px; + text-align: center; +} +.nav-tabs.nav-justified > .dropdown .dropdown-menu { + top: auto; + left: auto; +} +@media (min-width: 768px) { + .nav-tabs.nav-justified > li { + display: table-cell; + width: 1%; + } + .nav-tabs.nav-justified > li > a { + margin-bottom: 0; + } +} +.nav-tabs.nav-justified > li > a { + margin-right: 0; + border-radius: 4px; +} +.nav-tabs.nav-justified > .active > a, +.nav-tabs.nav-justified > .active > a:hover, +.nav-tabs.nav-justified > .active > a:focus { + border: 1px solid #ddd; +} +@media (min-width: 768px) { + .nav-tabs.nav-justified > li > a { + border-bottom: 1px solid #ddd; + border-radius: 4px 4px 0 0; + } + .nav-tabs.nav-justified > .active > a, + .nav-tabs.nav-justified > .active > a:hover, + .nav-tabs.nav-justified > .active > a:focus { + border-bottom-color: #fff; + } +} +.nav-pills > li { + float: left; +} +.nav-pills > li > a { + border-radius: 4px; +} +.nav-pills > li + li { + margin-left: 2px; +} +.nav-pills > li.active > a, +.nav-pills > li.active > a:hover, +.nav-pills > li.active > a:focus { + color: #fff; + background-color: #337ab7; +} +.nav-stacked > li { + float: none; +} +.nav-stacked > li + li { + margin-top: 2px; + margin-left: 0; +} +.nav-justified { + width: 100%; +} +.nav-justified > li { + float: none; +} +.nav-justified > li > a { + margin-bottom: 5px; + text-align: center; +} +.nav-justified > .dropdown .dropdown-menu { + top: auto; + left: auto; +} +@media (min-width: 768px) { + .nav-justified > li { + display: table-cell; + width: 1%; + } + .nav-justified > li > a { + margin-bottom: 0; + } +} +.nav-tabs-justified { + border-bottom: 0; +} +.nav-tabs-justified > li > a { + margin-right: 0; + border-radius: 4px; +} +.nav-tabs-justified > .active > a, +.nav-tabs-justified > .active > a:hover, +.nav-tabs-justified > .active > a:focus { + border: 1px solid #ddd; +} +@media (min-width: 768px) { + .nav-tabs-justified > li > a { + border-bottom: 1px solid #ddd; + border-radius: 4px 4px 0 0; + } + .nav-tabs-justified > .active > a, + .nav-tabs-justified > .active > a:hover, + .nav-tabs-justified > .active > a:focus { + border-bottom-color: #fff; + } +} +.tab-content > .tab-pane { + display: none; +} +.tab-content > .active { + display: block; +} +.nav-tabs .dropdown-menu { + margin-top: -1px; + border-top-left-radius: 0; + border-top-right-radius: 0; +} +.navbar { + position: relative; + min-height: 50px; + margin-bottom: 20px; + border: 1px solid transparent; +} +@media (min-width: 768px) { + .navbar { + border-radius: 4px; + } +} +@media (min-width: 768px) { + .navbar-header { + float: left; + } +} +.navbar-collapse { + padding-right: 15px; + padding-left: 15px; + overflow-x: visible; + -webkit-overflow-scrolling: touch; + border-top: 1px solid transparent; + -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1); + box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1); +} +.navbar-collapse.in { + overflow-y: auto; +} +@media (min-width: 768px) { + .navbar-collapse { + width: auto; + border-top: 0; + -webkit-box-shadow: none; + box-shadow: none; + } + .navbar-collapse.collapse { + display: block !important; + height: auto !important; + padding-bottom: 0; + overflow: visible !important; + } + .navbar-collapse.in { + overflow-y: visible; + } + .navbar-fixed-top .navbar-collapse, + .navbar-static-top .navbar-collapse, + .navbar-fixed-bottom .navbar-collapse { + padding-right: 0; + padding-left: 0; + } +} +.navbar-fixed-top .navbar-collapse, +.navbar-fixed-bottom .navbar-collapse { + max-height: 340px; +} +@media (max-device-width: 480px) and (orientation: landscape) { + .navbar-fixed-top .navbar-collapse, + .navbar-fixed-bottom .navbar-collapse { + max-height: 200px; + } +} +.container > .navbar-header, +.container-fluid > .navbar-header, +.container > .navbar-collapse, +.container-fluid > .navbar-collapse { + margin-right: -15px; + margin-left: -15px; +} +@media (min-width: 768px) { + .container > .navbar-header, + .container-fluid > .navbar-header, + .container > .navbar-collapse, + .container-fluid > .navbar-collapse { + margin-right: 0; + margin-left: 0; + } +} +.navbar-static-top { + z-index: 1000; + border-width: 0 0 1px; +} +@media (min-width: 768px) { + .navbar-static-top { + border-radius: 0; + } +} +.navbar-fixed-top, +.navbar-fixed-bottom { + position: fixed; + right: 0; + left: 0; + z-index: 1030; +} +@media (min-width: 768px) { + .navbar-fixed-top, + .navbar-fixed-bottom { + border-radius: 0; + } +} +.navbar-fixed-top { + top: 0; + border-width: 0 0 1px; +} +.navbar-fixed-bottom { + bottom: 0; + margin-bottom: 0; + border-width: 1px 0 0; +} +.navbar-brand { + float: left; + height: 50px; + padding: 15px 15px; + font-size: 18px; + line-height: 20px; +} +.navbar-brand:hover, +.navbar-brand:focus { + text-decoration: none; +} +.navbar-brand > img { + display: block; +} +@media (min-width: 768px) { + .navbar > .container .navbar-brand, + .navbar > .container-fluid .navbar-brand { + margin-left: -15px; + } +} +.navbar-toggle { + position: relative; + float: right; + padding: 9px 10px; + margin-top: 8px; + margin-right: 15px; + margin-bottom: 8px; + background-color: transparent; + background-image: none; + border: 1px solid transparent; + border-radius: 4px; +} +.navbar-toggle:focus { + outline: 0; +} +.navbar-toggle .icon-bar { + display: block; + width: 22px; + height: 2px; + border-radius: 1px; +} +.navbar-toggle .icon-bar + .icon-bar { + margin-top: 4px; +} +@media (min-width: 768px) { + .navbar-toggle { + display: none; + } +} +.navbar-nav { + margin: 7.5px -15px; +} +.navbar-nav > li > a { + padding-top: 10px; + padding-bottom: 10px; + line-height: 20px; +} +@media (max-width: 767px) { + .navbar-nav .open .dropdown-menu { + position: static; + float: none; + width: auto; + margin-top: 0; + background-color: transparent; + border: 0; + -webkit-box-shadow: none; + box-shadow: none; + } + .navbar-nav .open .dropdown-menu > li > a, + .navbar-nav .open .dropdown-menu .dropdown-header { + padding: 5px 15px 5px 25px; + } + .navbar-nav .open .dropdown-menu > li > a { + line-height: 20px; + } + .navbar-nav .open .dropdown-menu > li > a:hover, + .navbar-nav .open .dropdown-menu > li > a:focus { + background-image: none; + } +} +@media (min-width: 768px) { + .navbar-nav { + float: left; + margin: 0; + } + .navbar-nav > li { + float: left; + } + .navbar-nav > li > a { + padding-top: 15px; + padding-bottom: 15px; + } +} +.navbar-form { + padding: 10px 15px; + margin-top: 8px; + margin-right: -15px; + margin-bottom: 8px; + margin-left: -15px; + border-top: 1px solid transparent; + border-bottom: 1px solid transparent; + -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1); + box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1); +} +@media (min-width: 768px) { + .navbar-form .form-group { + display: inline-block; + margin-bottom: 0; + vertical-align: middle; + } + .navbar-form .form-control { + display: inline-block; + width: auto; + vertical-align: middle; + } + .navbar-form .form-control-static { + display: inline-block; + } + .navbar-form .input-group { + display: inline-table; + vertical-align: middle; + } + .navbar-form .input-group .input-group-addon, + .navbar-form .input-group .input-group-btn, + .navbar-form .input-group .form-control { + width: auto; + } + .navbar-form .input-group > .form-control { + width: 100%; + } + .navbar-form .control-label { + margin-bottom: 0; + vertical-align: middle; + } + .navbar-form .radio, + .navbar-form .checkbox { + display: inline-block; + margin-top: 0; + margin-bottom: 0; + vertical-align: middle; + } + .navbar-form .radio label, + .navbar-form .checkbox label { + padding-left: 0; + } + .navbar-form .radio input[type="radio"], + .navbar-form .checkbox input[type="checkbox"] { + position: relative; + margin-left: 0; + } + .navbar-form .has-feedback .form-control-feedback { + top: 0; + } +} +@media (max-width: 767px) { + .navbar-form .form-group { + margin-bottom: 5px; + } + .navbar-form .form-group:last-child { + margin-bottom: 0; + } +} +@media (min-width: 768px) { + .navbar-form { + width: auto; + padding-top: 0; + padding-bottom: 0; + margin-right: 0; + margin-left: 0; + border: 0; + -webkit-box-shadow: none; + box-shadow: none; + } +} +.navbar-nav > li > .dropdown-menu { + margin-top: 0; + border-top-left-radius: 0; + border-top-right-radius: 0; +} +.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu { + margin-bottom: 0; + border-top-left-radius: 4px; + border-top-right-radius: 4px; + border-bottom-right-radius: 0; + border-bottom-left-radius: 0; +} +.navbar-btn { + margin-top: 8px; + margin-bottom: 8px; +} +.navbar-btn.btn-sm { + margin-top: 10px; + margin-bottom: 10px; +} +.navbar-btn.btn-xs { + margin-top: 14px; + margin-bottom: 14px; +} +.navbar-text { + margin-top: 15px; + margin-bottom: 15px; +} +@media (min-width: 768px) { + .navbar-text { + float: left; + margin-right: 15px; + margin-left: 15px; + } +} +@media (min-width: 768px) { + .navbar-left { + float: left !important; + } + .navbar-right { + float: right !important; + margin-right: -15px; + } + .navbar-right ~ .navbar-right { + margin-right: 0; + } +} +.navbar-default { + background-color: #f8f8f8; + border-color: #e7e7e7; +} +.navbar-default .navbar-brand { + color: #777; +} +.navbar-default .navbar-brand:hover, +.navbar-default .navbar-brand:focus { + color: #5e5e5e; + background-color: transparent; +} +.navbar-default .navbar-text { + color: #777; +} +.navbar-default .navbar-nav > li > a { + color: #777; +} +.navbar-default .navbar-nav > li > a:hover, +.navbar-default .navbar-nav > li > a:focus { + color: #333; + background-color: transparent; +} +.navbar-default .navbar-nav > .active > a, +.navbar-default .navbar-nav > .active > a:hover, +.navbar-default .navbar-nav > .active > a:focus { + color: #555; + background-color: #e7e7e7; +} +.navbar-default .navbar-nav > .disabled > a, +.navbar-default .navbar-nav > .disabled > a:hover, +.navbar-default .navbar-nav > .disabled > a:focus { + color: #ccc; + background-color: transparent; +} +.navbar-default .navbar-toggle { + border-color: #ddd; +} +.navbar-default .navbar-toggle:hover, +.navbar-default .navbar-toggle:focus { + background-color: #ddd; +} +.navbar-default .navbar-toggle .icon-bar { + background-color: #888; +} +.navbar-default .navbar-collapse, +.navbar-default .navbar-form { + border-color: #e7e7e7; +} +.navbar-default .navbar-nav > .open > a, +.navbar-default .navbar-nav > .open > a:hover, +.navbar-default .navbar-nav > .open > a:focus { + color: #555; + background-color: #e7e7e7; +} +@media (max-width: 767px) { + .navbar-default .navbar-nav .open .dropdown-menu > li > a { + color: #777; + } + .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover, + .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus { + color: #333; + background-color: transparent; + } + .navbar-default .navbar-nav .open .dropdown-menu > .active > a, + .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover, + .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus { + color: #555; + background-color: #e7e7e7; + } + .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a, + .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover, + .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus { + color: #ccc; + background-color: transparent; + } +} +.navbar-default .navbar-link { + color: #777; +} +.navbar-default .navbar-link:hover { + color: #333; +} +.navbar-default .btn-link { + color: #777; +} +.navbar-default .btn-link:hover, +.navbar-default .btn-link:focus { + color: #333; +} +.navbar-default .btn-link[disabled]:hover, +fieldset[disabled] .navbar-default .btn-link:hover, +.navbar-default .btn-link[disabled]:focus, +fieldset[disabled] .navbar-default .btn-link:focus { + color: #ccc; +} +.navbar-inverse { + background-color: #222; + border-color: #080808; +} +.navbar-inverse .navbar-brand { + color: #9d9d9d; +} +.navbar-inverse .navbar-brand:hover, +.navbar-inverse .navbar-brand:focus { + color: #fff; + background-color: transparent; +} +.navbar-inverse .navbar-text { + color: #9d9d9d; +} +.navbar-inverse .navbar-nav > li > a { + color: #9d9d9d; +} +.navbar-inverse .navbar-nav > li > a:hover, +.navbar-inverse .navbar-nav > li > a:focus { + color: #fff; + background-color: transparent; +} +.navbar-inverse .navbar-nav > .active > a, +.navbar-inverse .navbar-nav > .active > a:hover, +.navbar-inverse .navbar-nav > .active > a:focus { + color: #fff; + background-color: #080808; +} +.navbar-inverse .navbar-nav > .disabled > a, +.navbar-inverse .navbar-nav > .disabled > a:hover, +.navbar-inverse .navbar-nav > .disabled > a:focus { + color: #444; + background-color: transparent; +} +.navbar-inverse .navbar-toggle { + border-color: #333; +} +.navbar-inverse .navbar-toggle:hover, +.navbar-inverse .navbar-toggle:focus { + background-color: #333; +} +.navbar-inverse .navbar-toggle .icon-bar { + background-color: #fff; +} +.navbar-inverse .navbar-collapse, +.navbar-inverse .navbar-form { + border-color: #101010; +} +.navbar-inverse .navbar-nav > .open > a, +.navbar-inverse .navbar-nav > .open > a:hover, +.navbar-inverse .navbar-nav > .open > a:focus { + color: #fff; + background-color: #080808; +} +@media (max-width: 767px) { + .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header { + border-color: #080808; + } + .navbar-inverse .navbar-nav .open .dropdown-menu .divider { + background-color: #080808; + } + .navbar-inverse .navbar-nav .open .dropdown-menu > li > a { + color: #9d9d9d; + } + .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover, + .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus { + color: #fff; + background-color: transparent; + } + .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a, + .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover, + .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus { + color: #fff; + background-color: #080808; + } + .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a, + .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover, + .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus { + color: #444; + background-color: transparent; + } +} +.navbar-inverse .navbar-link { + color: #9d9d9d; +} +.navbar-inverse .navbar-link:hover { + color: #fff; +} +.navbar-inverse .btn-link { + color: #9d9d9d; +} +.navbar-inverse .btn-link:hover, +.navbar-inverse .btn-link:focus { + color: #fff; +} +.navbar-inverse .btn-link[disabled]:hover, +fieldset[disabled] .navbar-inverse .btn-link:hover, +.navbar-inverse .btn-link[disabled]:focus, +fieldset[disabled] .navbar-inverse .btn-link:focus { + color: #444; +} +.breadcrumb { + padding: 8px 15px; + margin-bottom: 20px; + list-style: none; + background-color: #f5f5f5; + border-radius: 4px; +} +.breadcrumb > li { + display: inline-block; +} +.breadcrumb > li + li:before { + padding: 0 5px; + color: #ccc; + content: "/\00a0"; +} +.breadcrumb > .active { + color: #777; +} +.pagination { + display: inline-block; + padding-left: 0; + margin: 20px 0; + border-radius: 4px; +} +.pagination > li { + display: inline; +} +.pagination > li > a, +.pagination > li > span { + position: relative; + float: left; + padding: 6px 12px; + margin-left: -1px; + line-height: 1.42857143; + color: #337ab7; + text-decoration: none; + background-color: #fff; + border: 1px solid #ddd; +} +.pagination > li:first-child > a, +.pagination > li:first-child > span { + margin-left: 0; + border-top-left-radius: 4px; + border-bottom-left-radius: 4px; +} +.pagination > li:last-child > a, +.pagination > li:last-child > span { + border-top-right-radius: 4px; + border-bottom-right-radius: 4px; +} +.pagination > li > a:hover, +.pagination > li > span:hover, +.pagination > li > a:focus, +.pagination > li > span:focus { + z-index: 2; + color: #23527c; + background-color: #eee; + border-color: #ddd; +} +.pagination > .active > a, +.pagination > .active > span, +.pagination > .active > a:hover, +.pagination > .active > span:hover, +.pagination > .active > a:focus, +.pagination > .active > span:focus { + z-index: 3; + color: #fff; + cursor: default; + background-color: #337ab7; + border-color: #337ab7; +} +.pagination > .disabled > span, +.pagination > .disabled > span:hover, +.pagination > .disabled > span:focus, +.pagination > .disabled > a, +.pagination > .disabled > a:hover, +.pagination > .disabled > a:focus { + color: #777; + cursor: not-allowed; + background-color: #fff; + border-color: #ddd; +} +.pagination-lg > li > a, +.pagination-lg > li > span { + padding: 10px 16px; + font-size: 18px; + line-height: 1.3333333; +} +.pagination-lg > li:first-child > a, +.pagination-lg > li:first-child > span { + border-top-left-radius: 6px; + border-bottom-left-radius: 6px; +} +.pagination-lg > li:last-child > a, +.pagination-lg > li:last-child > span { + border-top-right-radius: 6px; + border-bottom-right-radius: 6px; +} +.pagination-sm > li > a, +.pagination-sm > li > span { + padding: 5px 10px; + font-size: 12px; + line-height: 1.5; +} +.pagination-sm > li:first-child > a, +.pagination-sm > li:first-child > span { + border-top-left-radius: 3px; + border-bottom-left-radius: 3px; +} +.pagination-sm > li:last-child > a, +.pagination-sm > li:last-child > span { + border-top-right-radius: 3px; + border-bottom-right-radius: 3px; +} +.pager { + padding-left: 0; + margin: 20px 0; + text-align: center; + list-style: none; +} +.pager li { + display: inline; +} +.pager li > a, +.pager li > span { + display: inline-block; + padding: 5px 14px; + background-color: #fff; + border: 1px solid #ddd; + border-radius: 15px; +} +.pager li > a:hover, +.pager li > a:focus { + text-decoration: none; + background-color: #eee; +} +.pager .next > a, +.pager .next > span { + float: right; +} +.pager .previous > a, +.pager .previous > span { + float: left; +} +.pager .disabled > a, +.pager .disabled > a:hover, +.pager .disabled > a:focus, +.pager .disabled > span { + color: #777; + cursor: not-allowed; + background-color: #fff; +} +.label { + display: inline; + padding: .2em .6em .3em; + font-size: 75%; + font-weight: bold; + line-height: 1; + color: #fff; + text-align: center; + white-space: nowrap; + vertical-align: baseline; + border-radius: .25em; +} +a.label:hover, +a.label:focus { + color: #fff; + text-decoration: none; + cursor: pointer; +} +.label:empty { + display: none; +} +.btn .label { + position: relative; + top: -1px; +} +.label-default { + background-color: #777; +} +.label-default[href]:hover, +.label-default[href]:focus { + background-color: #5e5e5e; +} +.label-primary { + background-color: #337ab7; +} +.label-primary[href]:hover, +.label-primary[href]:focus { + background-color: #286090; +} +.label-success { + background-color: #5cb85c; +} +.label-success[href]:hover, +.label-success[href]:focus { + background-color: #449d44; +} +.label-info { + background-color: #5bc0de; +} +.label-info[href]:hover, +.label-info[href]:focus { + background-color: #31b0d5; +} +.label-warning { + background-color: #f0ad4e; +} +.label-warning[href]:hover, +.label-warning[href]:focus { + background-color: #ec971f; +} +.label-danger { + background-color: #d9534f; +} +.label-danger[href]:hover, +.label-danger[href]:focus { + background-color: #c9302c; +} +.badge { + display: inline-block; + min-width: 10px; + padding: 3px 7px; + font-size: 12px; + font-weight: bold; + line-height: 1; + color: #fff; + text-align: center; + white-space: nowrap; + vertical-align: middle; + background-color: #777; + border-radius: 10px; +} +.badge:empty { + display: none; +} +.btn .badge { + position: relative; + top: -1px; +} +.btn-xs .badge, +.btn-group-xs > .btn .badge { + top: 0; + padding: 1px 5px; +} +a.badge:hover, +a.badge:focus { + color: #fff; + text-decoration: none; + cursor: pointer; +} +.list-group-item.active > .badge, +.nav-pills > .active > a > .badge { + color: #337ab7; + background-color: #fff; +} +.list-group-item > .badge { + float: right; +} +.list-group-item > .badge + .badge { + margin-right: 5px; +} +.nav-pills > li > a > .badge { + margin-left: 3px; +} +.jumbotron { + padding-top: 30px; + padding-bottom: 30px; + margin-bottom: 30px; + color: inherit; + background-color: #eee; +} +.jumbotron h1, +.jumbotron .h1 { + color: inherit; +} +.jumbotron p { + margin-bottom: 15px; + font-size: 21px; + font-weight: 200; +} +.jumbotron > hr { + border-top-color: #d5d5d5; +} +.container .jumbotron, +.container-fluid .jumbotron { + padding-right: 15px; + padding-left: 15px; + border-radius: 6px; +} +.jumbotron .container { + max-width: 100%; +} +@media screen and (min-width: 768px) { + .jumbotron { + padding-top: 48px; + padding-bottom: 48px; + } + .container .jumbotron, + .container-fluid .jumbotron { + padding-right: 60px; + padding-left: 60px; + } + .jumbotron h1, + .jumbotron .h1 { + font-size: 63px; + } +} +.thumbnail { + display: block; + padding: 4px; + margin-bottom: 20px; + line-height: 1.42857143; + background-color: #fff; + border: 1px solid #ddd; + border-radius: 4px; + -webkit-transition: border .2s ease-in-out; + -o-transition: border .2s ease-in-out; + transition: border .2s ease-in-out; +} +.thumbnail > img, +.thumbnail a > img { + margin-right: auto; + margin-left: auto; +} +a.thumbnail:hover, +a.thumbnail:focus, +a.thumbnail.active { + border-color: #337ab7; +} +.thumbnail .caption { + padding: 9px; + color: #333; +} +.alert { + padding: 15px; + margin-bottom: 20px; + border: 1px solid transparent; + border-radius: 4px; +} +.alert h4 { + margin-top: 0; + color: inherit; +} +.alert .alert-link { + font-weight: bold; +} +.alert > p, +.alert > ul { + margin-bottom: 0; +} +.alert > p + p { + margin-top: 5px; +} +.alert-dismissable, +.alert-dismissible { + padding-right: 35px; +} +.alert-dismissable .close, +.alert-dismissible .close { + position: relative; + top: -2px; + right: -21px; + color: inherit; +} +.alert-success { + color: #3c763d; + background-color: #dff0d8; + border-color: #d6e9c6; +} +.alert-success hr { + border-top-color: #c9e2b3; +} +.alert-success .alert-link { + color: #2b542c; +} +.alert-info { + color: #31708f; + background-color: #d9edf7; + border-color: #bce8f1; +} +.alert-info hr { + border-top-color: #a6e1ec; +} +.alert-info .alert-link { + color: #245269; +} +.alert-warning { + color: #8a6d3b; + background-color: #fcf8e3; + border-color: #faebcc; +} +.alert-warning hr { + border-top-color: #f7e1b5; +} +.alert-warning .alert-link { + color: #66512c; +} +.alert-danger { + color: #a94442; + background-color: #f2dede; + border-color: #ebccd1; +} +.alert-danger hr { + border-top-color: #e4b9c0; +} +.alert-danger .alert-link { + color: #843534; +} +@-webkit-keyframes progress-bar-stripes { + from { + background-position: 40px 0; + } + to { + background-position: 0 0; + } +} +@-o-keyframes progress-bar-stripes { + from { + background-position: 40px 0; + } + to { + background-position: 0 0; + } +} +@keyframes progress-bar-stripes { + from { + background-position: 40px 0; + } + to { + background-position: 0 0; + } +} +.progress { + height: 20px; + margin-bottom: 20px; + overflow: hidden; + background-color: #f5f5f5; + border-radius: 4px; + -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1); + box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1); +} +.progress-bar { + float: left; + width: 0; + height: 100%; + font-size: 12px; + line-height: 20px; + color: #fff; + text-align: center; + background-color: #337ab7; + -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15); + box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15); + -webkit-transition: width .6s ease; + -o-transition: width .6s ease; + transition: width .6s ease; +} +.progress-striped .progress-bar, +.progress-bar-striped { + background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); + background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); + background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); + -webkit-background-size: 40px 40px; + background-size: 40px 40px; +} +.progress.active .progress-bar, +.progress-bar.active { + -webkit-animation: progress-bar-stripes 2s linear infinite; + -o-animation: progress-bar-stripes 2s linear infinite; + animation: progress-bar-stripes 2s linear infinite; +} +.progress-bar-success { + background-color: #5cb85c; +} +.progress-striped .progress-bar-success { + background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); + background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); + background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); +} +.progress-bar-info { + background-color: #5bc0de; +} +.progress-striped .progress-bar-info { + background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); + background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); + background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); +} +.progress-bar-warning { + background-color: #f0ad4e; +} +.progress-striped .progress-bar-warning { + background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); + background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); + background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); +} +.progress-bar-danger { + background-color: #d9534f; +} +.progress-striped .progress-bar-danger { + background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); + background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); + background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); +} +.media { + margin-top: 15px; +} +.media:first-child { + margin-top: 0; +} +.media, +.media-body { + overflow: hidden; + zoom: 1; +} +.media-body { + width: 10000px; +} +.media-object { + display: block; +} +.media-object.img-thumbnail { + max-width: none; +} +.media-right, +.media > .pull-right { + padding-left: 10px; +} +.media-left, +.media > .pull-left { + padding-right: 10px; +} +.media-left, +.media-right, +.media-body { + display: table-cell; + vertical-align: top; +} +.media-middle { + vertical-align: middle; +} +.media-bottom { + vertical-align: bottom; +} +.media-heading { + margin-top: 0; + margin-bottom: 5px; +} +.media-list { + padding-left: 0; + list-style: none; +} +.list-group { + padding-left: 0; + margin-bottom: 20px; +} +.list-group-item { + position: relative; + display: block; + padding: 10px 15px; + margin-bottom: -1px; + background-color: #fff; + border: 1px solid #ddd; +} +.list-group-item:first-child { + border-top-left-radius: 4px; + border-top-right-radius: 4px; +} +.list-group-item:last-child { + margin-bottom: 0; + border-bottom-right-radius: 4px; + border-bottom-left-radius: 4px; +} +a.list-group-item, +button.list-group-item { + color: #555; +} +a.list-group-item .list-group-item-heading, +button.list-group-item .list-group-item-heading { + color: #333; +} +a.list-group-item:hover, +button.list-group-item:hover, +a.list-group-item:focus, +button.list-group-item:focus { + color: #555; + text-decoration: none; + background-color: #f5f5f5; +} +button.list-group-item { + width: 100%; + text-align: left; +} +.list-group-item.disabled, +.list-group-item.disabled:hover, +.list-group-item.disabled:focus { + color: #777; + cursor: not-allowed; + background-color: #eee; +} +.list-group-item.disabled .list-group-item-heading, +.list-group-item.disabled:hover .list-group-item-heading, +.list-group-item.disabled:focus .list-group-item-heading { + color: inherit; +} +.list-group-item.disabled .list-group-item-text, +.list-group-item.disabled:hover .list-group-item-text, +.list-group-item.disabled:focus .list-group-item-text { + color: #777; +} +.list-group-item.active, +.list-group-item.active:hover, +.list-group-item.active:focus { + z-index: 2; + color: #fff; + background-color: #337ab7; + border-color: #337ab7; +} +.list-group-item.active .list-group-item-heading, +.list-group-item.active:hover .list-group-item-heading, +.list-group-item.active:focus .list-group-item-heading, +.list-group-item.active .list-group-item-heading > small, +.list-group-item.active:hover .list-group-item-heading > small, +.list-group-item.active:focus .list-group-item-heading > small, +.list-group-item.active .list-group-item-heading > .small, +.list-group-item.active:hover .list-group-item-heading > .small, +.list-group-item.active:focus .list-group-item-heading > .small { + color: inherit; +} +.list-group-item.active .list-group-item-text, +.list-group-item.active:hover .list-group-item-text, +.list-group-item.active:focus .list-group-item-text { + color: #c7ddef; +} +.list-group-item-success { + color: #3c763d; + background-color: #dff0d8; +} +a.list-group-item-success, +button.list-group-item-success { + color: #3c763d; +} +a.list-group-item-success .list-group-item-heading, +button.list-group-item-success .list-group-item-heading { + color: inherit; +} +a.list-group-item-success:hover, +button.list-group-item-success:hover, +a.list-group-item-success:focus, +button.list-group-item-success:focus { + color: #3c763d; + background-color: #d0e9c6; +} +a.list-group-item-success.active, +button.list-group-item-success.active, +a.list-group-item-success.active:hover, +button.list-group-item-success.active:hover, +a.list-group-item-success.active:focus, +button.list-group-item-success.active:focus { + color: #fff; + background-color: #3c763d; + border-color: #3c763d; +} +.list-group-item-info { + color: #31708f; + background-color: #d9edf7; +} +a.list-group-item-info, +button.list-group-item-info { + color: #31708f; +} +a.list-group-item-info .list-group-item-heading, +button.list-group-item-info .list-group-item-heading { + color: inherit; +} +a.list-group-item-info:hover, +button.list-group-item-info:hover, +a.list-group-item-info:focus, +button.list-group-item-info:focus { + color: #31708f; + background-color: #c4e3f3; +} +a.list-group-item-info.active, +button.list-group-item-info.active, +a.list-group-item-info.active:hover, +button.list-group-item-info.active:hover, +a.list-group-item-info.active:focus, +button.list-group-item-info.active:focus { + color: #fff; + background-color: #31708f; + border-color: #31708f; +} +.list-group-item-warning { + color: #8a6d3b; + background-color: #fcf8e3; +} +a.list-group-item-warning, +button.list-group-item-warning { + color: #8a6d3b; +} +a.list-group-item-warning .list-group-item-heading, +button.list-group-item-warning .list-group-item-heading { + color: inherit; +} +a.list-group-item-warning:hover, +button.list-group-item-warning:hover, +a.list-group-item-warning:focus, +button.list-group-item-warning:focus { + color: #8a6d3b; + background-color: #faf2cc; +} +a.list-group-item-warning.active, +button.list-group-item-warning.active, +a.list-group-item-warning.active:hover, +button.list-group-item-warning.active:hover, +a.list-group-item-warning.active:focus, +button.list-group-item-warning.active:focus { + color: #fff; + background-color: #8a6d3b; + border-color: #8a6d3b; +} +.list-group-item-danger { + color: #a94442; + background-color: #f2dede; +} +a.list-group-item-danger, +button.list-group-item-danger { + color: #a94442; +} +a.list-group-item-danger .list-group-item-heading, +button.list-group-item-danger .list-group-item-heading { + color: inherit; +} +a.list-group-item-danger:hover, +button.list-group-item-danger:hover, +a.list-group-item-danger:focus, +button.list-group-item-danger:focus { + color: #a94442; + background-color: #ebcccc; +} +a.list-group-item-danger.active, +button.list-group-item-danger.active, +a.list-group-item-danger.active:hover, +button.list-group-item-danger.active:hover, +a.list-group-item-danger.active:focus, +button.list-group-item-danger.active:focus { + color: #fff; + background-color: #a94442; + border-color: #a94442; +} +.list-group-item-heading { + margin-top: 0; + margin-bottom: 5px; +} +.list-group-item-text { + margin-bottom: 0; + line-height: 1.3; +} +.panel { + margin-bottom: 20px; + background-color: #fff; + border: 1px solid transparent; + border-radius: 4px; + -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, .05); + box-shadow: 0 1px 1px rgba(0, 0, 0, .05); +} +.panel-body { + padding: 15px; +} +.panel-heading { + padding: 10px 15px; + border-bottom: 1px solid transparent; + border-top-left-radius: 3px; + border-top-right-radius: 3px; +} +.panel-heading > .dropdown .dropdown-toggle { + color: inherit; +} +.panel-title { + margin-top: 0; + margin-bottom: 0; + font-size: 16px; + color: inherit; +} +.panel-title > a, +.panel-title > small, +.panel-title > .small, +.panel-title > small > a, +.panel-title > .small > a { + color: inherit; +} +.panel-footer { + padding: 10px 15px; + background-color: #f5f5f5; + border-top: 1px solid #ddd; + border-bottom-right-radius: 3px; + border-bottom-left-radius: 3px; +} +.panel > .list-group, +.panel > .panel-collapse > .list-group { + margin-bottom: 0; +} +.panel > .list-group .list-group-item, +.panel > .panel-collapse > .list-group .list-group-item { + border-width: 1px 0; + border-radius: 0; +} +.panel > .list-group:first-child .list-group-item:first-child, +.panel > .panel-collapse > .list-group:first-child .list-group-item:first-child { + border-top: 0; + border-top-left-radius: 3px; + border-top-right-radius: 3px; +} +.panel > .list-group:last-child .list-group-item:last-child, +.panel > .panel-collapse > .list-group:last-child .list-group-item:last-child { + border-bottom: 0; + border-bottom-right-radius: 3px; + border-bottom-left-radius: 3px; +} +.panel > .panel-heading + .panel-collapse > .list-group .list-group-item:first-child { + border-top-left-radius: 0; + border-top-right-radius: 0; +} +.panel-heading + .list-group .list-group-item:first-child { + border-top-width: 0; +} +.list-group + .panel-footer { + border-top-width: 0; +} +.panel > .table, +.panel > .table-responsive > .table, +.panel > .panel-collapse > .table { + margin-bottom: 0; +} +.panel > .table caption, +.panel > .table-responsive > .table caption, +.panel > .panel-collapse > .table caption { + padding-right: 15px; + padding-left: 15px; +} +.panel > .table:first-child, +.panel > .table-responsive:first-child > .table:first-child { + border-top-left-radius: 3px; + border-top-right-radius: 3px; +} +.panel > .table:first-child > thead:first-child > tr:first-child, +.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child, +.panel > .table:first-child > tbody:first-child > tr:first-child, +.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child { + border-top-left-radius: 3px; + border-top-right-radius: 3px; +} +.panel > .table:first-child > thead:first-child > tr:first-child td:first-child, +.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child, +.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child, +.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child, +.panel > .table:first-child > thead:first-child > tr:first-child th:first-child, +.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child, +.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child, +.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child { + border-top-left-radius: 3px; +} +.panel > .table:first-child > thead:first-child > tr:first-child td:last-child, +.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child, +.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child, +.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child, +.panel > .table:first-child > thead:first-child > tr:first-child th:last-child, +.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child, +.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child, +.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child { + border-top-right-radius: 3px; +} +.panel > .table:last-child, +.panel > .table-responsive:last-child > .table:last-child { + border-bottom-right-radius: 3px; + border-bottom-left-radius: 3px; +} +.panel > .table:last-child > tbody:last-child > tr:last-child, +.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child, +.panel > .table:last-child > tfoot:last-child > tr:last-child, +.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child { + border-bottom-right-radius: 3px; + border-bottom-left-radius: 3px; +} +.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child, +.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child, +.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child, +.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child, +.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child, +.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child, +.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child, +.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child { + border-bottom-left-radius: 3px; +} +.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child, +.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child, +.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child, +.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child, +.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child, +.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child, +.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child, +.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child { + border-bottom-right-radius: 3px; +} +.panel > .panel-body + .table, +.panel > .panel-body + .table-responsive, +.panel > .table + .panel-body, +.panel > .table-responsive + .panel-body { + border-top: 1px solid #ddd; +} +.panel > .table > tbody:first-child > tr:first-child th, +.panel > .table > tbody:first-child > tr:first-child td { + border-top: 0; +} +.panel > .table-bordered, +.panel > .table-responsive > .table-bordered { + border: 0; +} +.panel > .table-bordered > thead > tr > th:first-child, +.panel > .table-responsive > .table-bordered > thead > tr > th:first-child, +.panel > .table-bordered > tbody > tr > th:first-child, +.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child, +.panel > .table-bordered > tfoot > tr > th:first-child, +.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child, +.panel > .table-bordered > thead > tr > td:first-child, +.panel > .table-responsive > .table-bordered > thead > tr > td:first-child, +.panel > .table-bordered > tbody > tr > td:first-child, +.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child, +.panel > .table-bordered > tfoot > tr > td:first-child, +.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child { + border-left: 0; +} +.panel > .table-bordered > thead > tr > th:last-child, +.panel > .table-responsive > .table-bordered > thead > tr > th:last-child, +.panel > .table-bordered > tbody > tr > th:last-child, +.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child, +.panel > .table-bordered > tfoot > tr > th:last-child, +.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child, +.panel > .table-bordered > thead > tr > td:last-child, +.panel > .table-responsive > .table-bordered > thead > tr > td:last-child, +.panel > .table-bordered > tbody > tr > td:last-child, +.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child, +.panel > .table-bordered > tfoot > tr > td:last-child, +.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child { + border-right: 0; +} +.panel > .table-bordered > thead > tr:first-child > td, +.panel > .table-responsive > .table-bordered > thead > tr:first-child > td, +.panel > .table-bordered > tbody > tr:first-child > td, +.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td, +.panel > .table-bordered > thead > tr:first-child > th, +.panel > .table-responsive > .table-bordered > thead > tr:first-child > th, +.panel > .table-bordered > tbody > tr:first-child > th, +.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th { + border-bottom: 0; +} +.panel > .table-bordered > tbody > tr:last-child > td, +.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td, +.panel > .table-bordered > tfoot > tr:last-child > td, +.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td, +.panel > .table-bordered > tbody > tr:last-child > th, +.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th, +.panel > .table-bordered > tfoot > tr:last-child > th, +.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th { + border-bottom: 0; +} +.panel > .table-responsive { + margin-bottom: 0; + border: 0; +} +.panel-group { + margin-bottom: 20px; +} +.panel-group .panel { + margin-bottom: 0; + border-radius: 4px; +} +.panel-group .panel + .panel { + margin-top: 5px; +} +.panel-group .panel-heading { + border-bottom: 0; +} +.panel-group .panel-heading + .panel-collapse > .panel-body, +.panel-group .panel-heading + .panel-collapse > .list-group { + border-top: 1px solid #ddd; +} +.panel-group .panel-footer { + border-top: 0; +} +.panel-group .panel-footer + .panel-collapse .panel-body { + border-bottom: 1px solid #ddd; +} +.panel-default { + border-color: #ddd; +} +.panel-default > .panel-heading { + color: #333; + background-color: #f5f5f5; + border-color: #ddd; +} +.panel-default > .panel-heading + .panel-collapse > .panel-body { + border-top-color: #ddd; +} +.panel-default > .panel-heading .badge { + color: #f5f5f5; + background-color: #333; +} +.panel-default > .panel-footer + .panel-collapse > .panel-body { + border-bottom-color: #ddd; +} +.panel-primary { + border-color: #337ab7; +} +.panel-primary > .panel-heading { + color: #fff; + background-color: #337ab7; + border-color: #337ab7; +} +.panel-primary > .panel-heading + .panel-collapse > .panel-body { + border-top-color: #337ab7; +} +.panel-primary > .panel-heading .badge { + color: #337ab7; + background-color: #fff; +} +.panel-primary > .panel-footer + .panel-collapse > .panel-body { + border-bottom-color: #337ab7; +} +.panel-success { + border-color: #d6e9c6; +} +.panel-success > .panel-heading { + color: #3c763d; + background-color: #dff0d8; + border-color: #d6e9c6; +} +.panel-success > .panel-heading + .panel-collapse > .panel-body { + border-top-color: #d6e9c6; +} +.panel-success > .panel-heading .badge { + color: #dff0d8; + background-color: #3c763d; +} +.panel-success > .panel-footer + .panel-collapse > .panel-body { + border-bottom-color: #d6e9c6; +} +.panel-info { + border-color: #bce8f1; +} +.panel-info > .panel-heading { + color: #31708f; + background-color: #d9edf7; + border-color: #bce8f1; +} +.panel-info > .panel-heading + .panel-collapse > .panel-body { + border-top-color: #bce8f1; +} +.panel-info > .panel-heading .badge { + color: #d9edf7; + background-color: #31708f; +} +.panel-info > .panel-footer + .panel-collapse > .panel-body { + border-bottom-color: #bce8f1; +} +.panel-warning { + border-color: #faebcc; +} +.panel-warning > .panel-heading { + color: #8a6d3b; + background-color: #fcf8e3; + border-color: #faebcc; +} +.panel-warning > .panel-heading + .panel-collapse > .panel-body { + border-top-color: #faebcc; +} +.panel-warning > .panel-heading .badge { + color: #fcf8e3; + background-color: #8a6d3b; +} +.panel-warning > .panel-footer + .panel-collapse > .panel-body { + border-bottom-color: #faebcc; +} +.panel-danger { + border-color: #ebccd1; +} +.panel-danger > .panel-heading { + color: #a94442; + background-color: #f2dede; + border-color: #ebccd1; +} +.panel-danger > .panel-heading + .panel-collapse > .panel-body { + border-top-color: #ebccd1; +} +.panel-danger > .panel-heading .badge { + color: #f2dede; + background-color: #a94442; +} +.panel-danger > .panel-footer + .panel-collapse > .panel-body { + border-bottom-color: #ebccd1; +} +.embed-responsive { + position: relative; + display: block; + height: 0; + padding: 0; + overflow: hidden; +} +.embed-responsive .embed-responsive-item, +.embed-responsive iframe, +.embed-responsive embed, +.embed-responsive object, +.embed-responsive video { + position: absolute; + top: 0; + bottom: 0; + left: 0; + width: 100%; + height: 100%; + border: 0; +} +.embed-responsive-16by9 { + padding-bottom: 56.25%; +} +.embed-responsive-4by3 { + padding-bottom: 75%; +} +.well { + min-height: 20px; + padding: 19px; + margin-bottom: 20px; + background-color: #f5f5f5; + border: 1px solid #e3e3e3; + border-radius: 4px; + -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05); + box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05); +} +.well blockquote { + border-color: #ddd; + border-color: rgba(0, 0, 0, .15); +} +.well-lg { + padding: 24px; + border-radius: 6px; +} +.well-sm { + padding: 9px; + border-radius: 3px; +} +.close { + float: right; + font-size: 21px; + font-weight: bold; + line-height: 1; + color: #000; + text-shadow: 0 1px 0 #fff; + filter: alpha(opacity=20); + opacity: .2; +} +.close:hover, +.close:focus { + color: #000; + text-decoration: none; + cursor: pointer; + filter: alpha(opacity=50); + opacity: .5; +} +button.close { + -webkit-appearance: none; + padding: 0; + cursor: pointer; + background: transparent; + border: 0; +} +.modal-open { + overflow: hidden; +} +.modal { + position: fixed; + top: 0; + right: 0; + bottom: 0; + left: 0; + z-index: 1050; + display: none; + overflow: hidden; + -webkit-overflow-scrolling: touch; + outline: 0; +} +.modal.fade .modal-dialog { + -webkit-transition: -webkit-transform .3s ease-out; + -o-transition: -o-transform .3s ease-out; + transition: transform .3s ease-out; + -webkit-transform: translate(0, -25%); + -ms-transform: translate(0, -25%); + -o-transform: translate(0, -25%); + transform: translate(0, -25%); +} +.modal.in .modal-dialog { + -webkit-transform: translate(0, 0); + -ms-transform: translate(0, 0); + -o-transform: translate(0, 0); + transform: translate(0, 0); +} +.modal-open .modal { + overflow-x: hidden; + overflow-y: auto; +} +.modal-dialog { + position: relative; + width: auto; + margin: 10px; +} +.modal-content { + position: relative; + background-color: #fff; + -webkit-background-clip: padding-box; + background-clip: padding-box; + border: 1px solid #999; + border: 1px solid rgba(0, 0, 0, .2); + border-radius: 6px; + outline: 0; + -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, .5); + box-shadow: 0 3px 9px rgba(0, 0, 0, .5); +} +.modal-backdrop { + position: fixed; + top: 0; + right: 0; + bottom: 0; + left: 0; + z-index: 1040; + background-color: #000; +} +.modal-backdrop.fade { + filter: alpha(opacity=0); + opacity: 0; +} +.modal-backdrop.in { + filter: alpha(opacity=50); + opacity: .5; +} +.modal-header { + padding: 15px; + border-bottom: 1px solid #e5e5e5; +} +.modal-header .close { + margin-top: -2px; +} +.modal-title { + margin: 0; + line-height: 1.42857143; +} +.modal-body { + position: relative; + padding: 15px; +} +.modal-footer { + padding: 15px; + text-align: right; + border-top: 1px solid #e5e5e5; +} +.modal-footer .btn + .btn { + margin-bottom: 0; + margin-left: 5px; +} +.modal-footer .btn-group .btn + .btn { + margin-left: -1px; +} +.modal-footer .btn-block + .btn-block { + margin-left: 0; +} +.modal-scrollbar-measure { + position: absolute; + top: -9999px; + width: 50px; + height: 50px; + overflow: scroll; +} +@media (min-width: 768px) { + .modal-dialog { + width: 600px; + margin: 30px auto; + } + .modal-content { + -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, .5); + box-shadow: 0 5px 15px rgba(0, 0, 0, .5); + } + .modal-sm { + width: 300px; + } +} +@media (min-width: 992px) { + .modal-lg { + width: 900px; + } +} +.tooltip { + position: absolute; + z-index: 1070; + display: block; + font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; + font-size: 12px; + font-style: normal; + font-weight: normal; + line-height: 1.42857143; + text-align: left; + text-align: start; + text-decoration: none; + text-shadow: none; + text-transform: none; + letter-spacing: normal; + word-break: normal; + word-spacing: normal; + word-wrap: normal; + white-space: normal; + filter: alpha(opacity=0); + opacity: 0; + + line-break: auto; +} +.tooltip.in { + filter: alpha(opacity=90); + opacity: .9; +} +.tooltip.top { + padding: 5px 0; + margin-top: -3px; +} +.tooltip.right { + padding: 0 5px; + margin-left: 3px; +} +.tooltip.bottom { + padding: 5px 0; + margin-top: 3px; +} +.tooltip.left { + padding: 0 5px; + margin-left: -3px; +} +.tooltip-inner { + max-width: 200px; + padding: 3px 8px; + color: #fff; + text-align: center; + background-color: #000; + border-radius: 4px; +} +.tooltip-arrow { + position: absolute; + width: 0; + height: 0; + border-color: transparent; + border-style: solid; +} +.tooltip.top .tooltip-arrow { + bottom: 0; + left: 50%; + margin-left: -5px; + border-width: 5px 5px 0; + border-top-color: #000; +} +.tooltip.top-left .tooltip-arrow { + right: 5px; + bottom: 0; + margin-bottom: -5px; + border-width: 5px 5px 0; + border-top-color: #000; +} +.tooltip.top-right .tooltip-arrow { + bottom: 0; + left: 5px; + margin-bottom: -5px; + border-width: 5px 5px 0; + border-top-color: #000; +} +.tooltip.right .tooltip-arrow { + top: 50%; + left: 0; + margin-top: -5px; + border-width: 5px 5px 5px 0; + border-right-color: #000; +} +.tooltip.left .tooltip-arrow { + top: 50%; + right: 0; + margin-top: -5px; + border-width: 5px 0 5px 5px; + border-left-color: #000; +} +.tooltip.bottom .tooltip-arrow { + top: 0; + left: 50%; + margin-left: -5px; + border-width: 0 5px 5px; + border-bottom-color: #000; +} +.tooltip.bottom-left .tooltip-arrow { + top: 0; + right: 5px; + margin-top: -5px; + border-width: 0 5px 5px; + border-bottom-color: #000; +} +.tooltip.bottom-right .tooltip-arrow { + top: 0; + left: 5px; + margin-top: -5px; + border-width: 0 5px 5px; + border-bottom-color: #000; +} +.popover { + position: absolute; + top: 0; + left: 0; + z-index: 1060; + display: none; + max-width: 276px; + padding: 1px; + font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; + font-size: 14px; + font-style: normal; + font-weight: normal; + line-height: 1.42857143; + text-align: left; + text-align: start; + text-decoration: none; + text-shadow: none; + text-transform: none; + letter-spacing: normal; + word-break: normal; + word-spacing: normal; + word-wrap: normal; + white-space: normal; + background-color: #fff; + -webkit-background-clip: padding-box; + background-clip: padding-box; + border: 1px solid #ccc; + border: 1px solid rgba(0, 0, 0, .2); + border-radius: 6px; + -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, .2); + box-shadow: 0 5px 10px rgba(0, 0, 0, .2); + + line-break: auto; +} +.popover.top { + margin-top: -10px; +} +.popover.right { + margin-left: 10px; +} +.popover.bottom { + margin-top: 10px; +} +.popover.left { + margin-left: -10px; +} +.popover-title { + padding: 8px 14px; + margin: 0; + font-size: 14px; + background-color: #f7f7f7; + border-bottom: 1px solid #ebebeb; + border-radius: 5px 5px 0 0; +} +.popover-content { + padding: 9px 14px; +} +.popover > .arrow, +.popover > .arrow:after { + position: absolute; + display: block; + width: 0; + height: 0; + border-color: transparent; + border-style: solid; +} +.popover > .arrow { + border-width: 11px; +} +.popover > .arrow:after { + content: ""; + border-width: 10px; +} +.popover.top > .arrow { + bottom: -11px; + left: 50%; + margin-left: -11px; + border-top-color: #999; + border-top-color: rgba(0, 0, 0, .25); + border-bottom-width: 0; +} +.popover.top > .arrow:after { + bottom: 1px; + margin-left: -10px; + content: " "; + border-top-color: #fff; + border-bottom-width: 0; +} +.popover.right > .arrow { + top: 50%; + left: -11px; + margin-top: -11px; + border-right-color: #999; + border-right-color: rgba(0, 0, 0, .25); + border-left-width: 0; +} +.popover.right > .arrow:after { + bottom: -10px; + left: 1px; + content: " "; + border-right-color: #fff; + border-left-width: 0; +} +.popover.bottom > .arrow { + top: -11px; + left: 50%; + margin-left: -11px; + border-top-width: 0; + border-bottom-color: #999; + border-bottom-color: rgba(0, 0, 0, .25); +} +.popover.bottom > .arrow:after { + top: 1px; + margin-left: -10px; + content: " "; + border-top-width: 0; + border-bottom-color: #fff; +} +.popover.left > .arrow { + top: 50%; + right: -11px; + margin-top: -11px; + border-right-width: 0; + border-left-color: #999; + border-left-color: rgba(0, 0, 0, .25); +} +.popover.left > .arrow:after { + right: 1px; + bottom: -10px; + content: " "; + border-right-width: 0; + border-left-color: #fff; +} +.carousel { + position: relative; +} +.carousel-inner { + position: relative; + width: 100%; + overflow: hidden; +} +.carousel-inner > .item { + position: relative; + display: none; + -webkit-transition: .6s ease-in-out left; + -o-transition: .6s ease-in-out left; + transition: .6s ease-in-out left; +} +.carousel-inner > .item > img, +.carousel-inner > .item > a > img { + line-height: 1; +} +@media all and (transform-3d), (-webkit-transform-3d) { + .carousel-inner > .item { + -webkit-transition: -webkit-transform .6s ease-in-out; + -o-transition: -o-transform .6s ease-in-out; + transition: transform .6s ease-in-out; + + -webkit-backface-visibility: hidden; + backface-visibility: hidden; + -webkit-perspective: 1000px; + perspective: 1000px; + } + .carousel-inner > .item.next, + .carousel-inner > .item.active.right { + left: 0; + -webkit-transform: translate3d(100%, 0, 0); + transform: translate3d(100%, 0, 0); + } + .carousel-inner > .item.prev, + .carousel-inner > .item.active.left { + left: 0; + -webkit-transform: translate3d(-100%, 0, 0); + transform: translate3d(-100%, 0, 0); + } + .carousel-inner > .item.next.left, + .carousel-inner > .item.prev.right, + .carousel-inner > .item.active { + left: 0; + -webkit-transform: translate3d(0, 0, 0); + transform: translate3d(0, 0, 0); + } +} +.carousel-inner > .active, +.carousel-inner > .next, +.carousel-inner > .prev { + display: block; +} +.carousel-inner > .active { + left: 0; +} +.carousel-inner > .next, +.carousel-inner > .prev { + position: absolute; + top: 0; + width: 100%; +} +.carousel-inner > .next { + left: 100%; +} +.carousel-inner > .prev { + left: -100%; +} +.carousel-inner > .next.left, +.carousel-inner > .prev.right { + left: 0; +} +.carousel-inner > .active.left { + left: -100%; +} +.carousel-inner > .active.right { + left: 100%; +} +.carousel-control { + position: absolute; + top: 0; + bottom: 0; + left: 0; + width: 15%; + font-size: 20px; + color: #fff; + text-align: center; + text-shadow: 0 1px 2px rgba(0, 0, 0, .6); + background-color: rgba(0, 0, 0, 0); + filter: alpha(opacity=50); + opacity: .5; +} +.carousel-control.left { + background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%); + background-image: -o-linear-gradient(left, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%); + background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, .5)), to(rgba(0, 0, 0, .0001))); + background-image: linear-gradient(to right, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%); + filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1); + background-repeat: repeat-x; +} +.carousel-control.right { + right: 0; + left: auto; + background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%); + background-image: -o-linear-gradient(left, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%); + background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, .0001)), to(rgba(0, 0, 0, .5))); + background-image: linear-gradient(to right, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%); + filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1); + background-repeat: repeat-x; +} +.carousel-control:hover, +.carousel-control:focus { + color: #fff; + text-decoration: none; + filter: alpha(opacity=90); + outline: 0; + opacity: .9; +} +.carousel-control .icon-prev, +.carousel-control .icon-next, +.carousel-control .glyphicon-chevron-left, +.carousel-control .glyphicon-chevron-right { + position: absolute; + top: 50%; + z-index: 5; + display: inline-block; + margin-top: -10px; +} +.carousel-control .icon-prev, +.carousel-control .glyphicon-chevron-left { + left: 50%; + margin-left: -10px; +} +.carousel-control .icon-next, +.carousel-control .glyphicon-chevron-right { + right: 50%; + margin-right: -10px; +} +.carousel-control .icon-prev, +.carousel-control .icon-next { + width: 20px; + height: 20px; + font-family: serif; + line-height: 1; +} +.carousel-control .icon-prev:before { + content: '\2039'; +} +.carousel-control .icon-next:before { + content: '\203a'; +} +.carousel-indicators { + position: absolute; + bottom: 10px; + left: 50%; + z-index: 15; + width: 60%; + padding-left: 0; + margin-left: -30%; + text-align: center; + list-style: none; +} +.carousel-indicators li { + display: inline-block; + width: 10px; + height: 10px; + margin: 1px; + text-indent: -999px; + cursor: pointer; + background-color: #000 \9; + background-color: rgba(0, 0, 0, 0); + border: 1px solid #fff; + border-radius: 10px; +} +.carousel-indicators .active { + width: 12px; + height: 12px; + margin: 0; + background-color: #fff; +} +.carousel-caption { + position: absolute; + right: 15%; + bottom: 20px; + left: 15%; + z-index: 10; + padding-top: 20px; + padding-bottom: 20px; + color: #fff; + text-align: center; + text-shadow: 0 1px 2px rgba(0, 0, 0, .6); +} +.carousel-caption .btn { + text-shadow: none; +} +@media screen and (min-width: 768px) { + .carousel-control .glyphicon-chevron-left, + .carousel-control .glyphicon-chevron-right, + .carousel-control .icon-prev, + .carousel-control .icon-next { + width: 30px; + height: 30px; + margin-top: -10px; + font-size: 30px; + } + .carousel-control .glyphicon-chevron-left, + .carousel-control .icon-prev { + margin-left: -10px; + } + .carousel-control .glyphicon-chevron-right, + .carousel-control .icon-next { + margin-right: -10px; + } + .carousel-caption { + right: 20%; + left: 20%; + padding-bottom: 30px; + } + .carousel-indicators { + bottom: 20px; + } +} +.clearfix:before, +.clearfix:after, +.dl-horizontal dd:before, +.dl-horizontal dd:after, +.container:before, +.container:after, +.container-fluid:before, +.container-fluid:after, +.row:before, +.row:after, +.form-horizontal .form-group:before, +.form-horizontal .form-group:after, +.btn-toolbar:before, +.btn-toolbar:after, +.btn-group-vertical > .btn-group:before, +.btn-group-vertical > .btn-group:after, +.nav:before, +.nav:after, +.navbar:before, +.navbar:after, +.navbar-header:before, +.navbar-header:after, +.navbar-collapse:before, +.navbar-collapse:after, +.pager:before, +.pager:after, +.panel-body:before, +.panel-body:after, +.modal-header:before, +.modal-header:after, +.modal-footer:before, +.modal-footer:after { + display: table; + content: " "; +} +.clearfix:after, +.dl-horizontal dd:after, +.container:after, +.container-fluid:after, +.row:after, +.form-horizontal .form-group:after, +.btn-toolbar:after, +.btn-group-vertical > .btn-group:after, +.nav:after, +.navbar:after, +.navbar-header:after, +.navbar-collapse:after, +.pager:after, +.panel-body:after, +.modal-header:after, +.modal-footer:after { + clear: both; +} +.center-block { + display: block; + margin-right: auto; + margin-left: auto; +} +.pull-right { + float: right !important; +} +.pull-left { + float: left !important; +} +.hide { + display: none !important; +} +.show { + display: block !important; +} +.invisible { + visibility: hidden; +} +.text-hide { + font: 0/0 a; + color: transparent; + text-shadow: none; + background-color: transparent; + border: 0; +} +.hidden { + display: none !important; +} +.affix { + position: fixed; +} +@-ms-viewport { + width: device-width; +} +.visible-xs, +.visible-sm, +.visible-md, +.visible-lg { + display: none !important; +} +.visible-xs-block, +.visible-xs-inline, +.visible-xs-inline-block, +.visible-sm-block, +.visible-sm-inline, +.visible-sm-inline-block, +.visible-md-block, +.visible-md-inline, +.visible-md-inline-block, +.visible-lg-block, +.visible-lg-inline, +.visible-lg-inline-block { + display: none !important; +} +@media (max-width: 767px) { + .visible-xs { + display: block !important; + } + table.visible-xs { + display: table !important; + } + tr.visible-xs { + display: table-row !important; + } + th.visible-xs, + td.visible-xs { + display: table-cell !important; + } +} +@media (max-width: 767px) { + .visible-xs-block { + display: block !important; + } +} +@media (max-width: 767px) { + .visible-xs-inline { + display: inline !important; + } +} +@media (max-width: 767px) { + .visible-xs-inline-block { + display: inline-block !important; + } +} +@media (min-width: 768px) and (max-width: 991px) { + .visible-sm { + display: block !important; + } + table.visible-sm { + display: table !important; + } + tr.visible-sm { + display: table-row !important; + } + th.visible-sm, + td.visible-sm { + display: table-cell !important; + } +} +@media (min-width: 768px) and (max-width: 991px) { + .visible-sm-block { + display: block !important; + } +} +@media (min-width: 768px) and (max-width: 991px) { + .visible-sm-inline { + display: inline !important; + } +} +@media (min-width: 768px) and (max-width: 991px) { + .visible-sm-inline-block { + display: inline-block !important; + } +} +@media (min-width: 992px) and (max-width: 1199px) { + .visible-md { + display: block !important; + } + table.visible-md { + display: table !important; + } + tr.visible-md { + display: table-row !important; + } + th.visible-md, + td.visible-md { + display: table-cell !important; + } +} +@media (min-width: 992px) and (max-width: 1199px) { + .visible-md-block { + display: block !important; + } +} +@media (min-width: 992px) and (max-width: 1199px) { + .visible-md-inline { + display: inline !important; + } +} +@media (min-width: 992px) and (max-width: 1199px) { + .visible-md-inline-block { + display: inline-block !important; + } +} +@media (min-width: 1200px) { + .visible-lg { + display: block !important; + } + table.visible-lg { + display: table !important; + } + tr.visible-lg { + display: table-row !important; + } + th.visible-lg, + td.visible-lg { + display: table-cell !important; + } +} +@media (min-width: 1200px) { + .visible-lg-block { + display: block !important; + } +} +@media (min-width: 1200px) { + .visible-lg-inline { + display: inline !important; + } +} +@media (min-width: 1200px) { + .visible-lg-inline-block { + display: inline-block !important; + } +} +@media (max-width: 767px) { + .hidden-xs { + display: none !important; + } +} +@media (min-width: 768px) and (max-width: 991px) { + .hidden-sm { + display: none !important; + } +} +@media (min-width: 992px) and (max-width: 1199px) { + .hidden-md { + display: none !important; + } +} +@media (min-width: 1200px) { + .hidden-lg { + display: none !important; + } +} +.visible-print { + display: none !important; +} +@media print { + .visible-print { + display: block !important; + } + table.visible-print { + display: table !important; + } + tr.visible-print { + display: table-row !important; + } + th.visible-print, + td.visible-print { + display: table-cell !important; + } +} +.visible-print-block { + display: none !important; +} +@media print { + .visible-print-block { + display: block !important; + } +} +.visible-print-inline { + display: none !important; +} +@media print { + .visible-print-inline { + display: inline !important; + } +} +.visible-print-inline-block { + display: none !important; +} +@media print { + .visible-print-inline-block { + display: inline-block !important; + } +} +@media print { + .hidden-print { + display: none !important; + } +} +/*# sourceMappingURL=bootstrap.css.map */ diff --git a/web/gui/css/bootstrap-slate-flat-3.3.7.css b/web/gui/css/bootstrap-slate-flat-3.3.7.css new file mode 100644 index 000000000..7ce384f81 --- /dev/null +++ b/web/gui/css/bootstrap-slate-flat-3.3.7.css @@ -0,0 +1,7101 @@ +/*! + * bootswatch v3.3.7 + * Homepage: http://bootswatch.com + * Copyright 2012-2016 Thomas Park + * Licensed under MIT + * SPDX-License-Identifier: MIT + * Based on Bootstrap +*/ +/*! + * Bootstrap v3.3.7 (http://getbootstrap.com) + * Copyright 2011-2016 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + */ +/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */ +html { + font-family: sans-serif; + -ms-text-size-adjust: 100%; + -webkit-text-size-adjust: 100%; +} +body { + margin: 0; +} +article, +aside, +details, +figcaption, +figure, +footer, +header, +hgroup, +main, +menu, +nav, +section, +summary { + display: block; +} +audio, +canvas, +progress, +video { + display: inline-block; + vertical-align: baseline; +} +audio:not([controls]) { + display: none; + height: 0; +} +[hidden], +template { + display: none; +} +a { + background-color: transparent; +} +a:active, +a:hover { + outline: 0; +} +abbr[title] { + border-bottom: 1px dotted; +} +b, +strong { + font-weight: bold; +} +dfn { + font-style: italic; +} +h1 { + font-size: 2em; + margin: 0.67em 0; +} +mark { + background: #ff0; + color: #000; +} +small { + font-size: 80%; +} +sub, +sup { + font-size: 75%; + line-height: 0; + position: relative; + vertical-align: baseline; +} +sup { + top: -0.5em; +} +sub { + bottom: -0.25em; +} +img { + border: 0; +} +svg:not(:root) { + overflow: hidden; +} +figure { + margin: 1em 40px; +} +hr { + -webkit-box-sizing: content-box; + -moz-box-sizing: content-box; + box-sizing: content-box; + height: 0; +} +pre { + overflow: auto; +} +code, +kbd, +pre, +samp { + font-family: monospace, monospace; + font-size: 1em; +} +button, +input, +optgroup, +select, +textarea { + color: inherit; + font: inherit; + margin: 0; +} +button { + overflow: visible; +} +button, +select { + text-transform: none; +} +button, +html input[type="button"], +input[type="reset"], +input[type="submit"] { + -webkit-appearance: button; + cursor: pointer; +} +button[disabled], +html input[disabled] { + cursor: default; +} +button::-moz-focus-inner, +input::-moz-focus-inner { + border: 0; + padding: 0; +} +input { + line-height: normal; +} +input[type="checkbox"], +input[type="radio"] { + -webkit-box-sizing: border-box; + -moz-box-sizing: border-box; + box-sizing: border-box; + padding: 0; +} +input[type="number"]::-webkit-inner-spin-button, +input[type="number"]::-webkit-outer-spin-button { + height: auto; +} +input[type="search"] { + -webkit-appearance: textfield; + -webkit-box-sizing: content-box; + -moz-box-sizing: content-box; + box-sizing: content-box; +} +input[type="search"]::-webkit-search-cancel-button, +input[type="search"]::-webkit-search-decoration { + -webkit-appearance: none; +} +fieldset { + border: 1px solid #c0c0c0; + margin: 0 2px; + padding: 0.35em 0.625em 0.75em; +} +legend { + border: 0; + padding: 0; +} +textarea { + overflow: auto; +} +optgroup { + font-weight: bold; +} +table { + border-collapse: collapse; + border-spacing: 0; +} +td, +th { + padding: 0; +} +/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */ +@media print { + *, + *:before, + *:after { + background: transparent !important; + color: #000 !important; + -webkit-box-shadow: none !important; + box-shadow: none !important; + text-shadow: none !important; + } + a, + a:visited { + text-decoration: underline; + } + a[href]:after { + content: " (" attr(href) ")"; + } + abbr[title]:after { + content: " (" attr(title) ")"; + } + a[href^="#"]:after, + a[href^="javascript:"]:after { + content: ""; + } + pre, + blockquote { + border: 1px solid #999; + page-break-inside: avoid; + } + thead { + display: table-header-group; + } + tr, + img { + page-break-inside: avoid; + } + img { + max-width: 100% !important; + } + p, + h2, + h3 { + orphans: 3; + widows: 3; + } + h2, + h3 { + page-break-after: avoid; + } + .navbar { + display: none; + } + .btn > .caret, + .dropup > .btn > .caret { + border-top-color: #000 !important; + } + .label { + border: 1px solid #000; + } + .table { + border-collapse: collapse !important; + } + .table td, + .table th { + background-color: #fff !important; + } + .table-bordered th, + .table-bordered td { + border: 1px solid #ddd !important; + } +} +@font-face { + font-family: 'Glyphicons Halflings'; + src: url('../fonts/glyphicons-halflings-regular.eot'); + src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff2') format('woff2'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg'); +} +.glyphicon { + position: relative; + top: 1px; + display: inline-block; + font-family: 'Glyphicons Halflings'; + font-style: normal; + font-weight: normal; + line-height: 1; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; +} +.glyphicon-asterisk:before { + content: "\002a"; +} +.glyphicon-plus:before { + content: "\002b"; +} +.glyphicon-euro:before, +.glyphicon-eur:before { + content: "\20ac"; +} +.glyphicon-minus:before { + content: "\2212"; +} +.glyphicon-cloud:before { + content: "\2601"; +} +.glyphicon-envelope:before { + content: "\2709"; +} +.glyphicon-pencil:before { + content: "\270f"; +} +.glyphicon-glass:before { + content: "\e001"; +} +.glyphicon-music:before { + content: "\e002"; +} +.glyphicon-search:before { + content: "\e003"; +} +.glyphicon-heart:before { + content: "\e005"; +} +.glyphicon-star:before { + content: "\e006"; +} +.glyphicon-star-empty:before { + content: "\e007"; +} +.glyphicon-user:before { + content: "\e008"; +} +.glyphicon-film:before { + content: "\e009"; +} +.glyphicon-th-large:before { + content: "\e010"; +} +.glyphicon-th:before { + content: "\e011"; +} +.glyphicon-th-list:before { + content: "\e012"; +} +.glyphicon-ok:before { + content: "\e013"; +} +.glyphicon-remove:before { + content: "\e014"; +} +.glyphicon-zoom-in:before { + content: "\e015"; +} +.glyphicon-zoom-out:before { + content: "\e016"; +} +.glyphicon-off:before { + content: "\e017"; +} +.glyphicon-signal:before { + content: "\e018"; +} +.glyphicon-cog:before { + content: "\e019"; +} +.glyphicon-trash:before { + content: "\e020"; +} +.glyphicon-home:before { + content: "\e021"; +} +.glyphicon-file:before { + content: "\e022"; +} +.glyphicon-time:before { + content: "\e023"; +} +.glyphicon-road:before { + content: "\e024"; +} +.glyphicon-download-alt:before { + content: "\e025"; +} +.glyphicon-download:before { + content: "\e026"; +} +.glyphicon-upload:before { + content: "\e027"; +} +.glyphicon-inbox:before { + content: "\e028"; +} +.glyphicon-play-circle:before { + content: "\e029"; +} +.glyphicon-repeat:before { + content: "\e030"; +} +.glyphicon-refresh:before { + content: "\e031"; +} +.glyphicon-list-alt:before { + content: "\e032"; +} +.glyphicon-lock:before { + content: "\e033"; +} +.glyphicon-flag:before { + content: "\e034"; +} +.glyphicon-headphones:before { + content: "\e035"; +} +.glyphicon-volume-off:before { + content: "\e036"; +} +.glyphicon-volume-down:before { + content: "\e037"; +} +.glyphicon-volume-up:before { + content: "\e038"; +} +.glyphicon-qrcode:before { + content: "\e039"; +} +.glyphicon-barcode:before { + content: "\e040"; +} +.glyphicon-tag:before { + content: "\e041"; +} +.glyphicon-tags:before { + content: "\e042"; +} +.glyphicon-book:before { + content: "\e043"; +} +.glyphicon-bookmark:before { + content: "\e044"; +} +.glyphicon-print:before { + content: "\e045"; +} +.glyphicon-camera:before { + content: "\e046"; +} +.glyphicon-font:before { + content: "\e047"; +} +.glyphicon-bold:before { + content: "\e048"; +} +.glyphicon-italic:before { + content: "\e049"; +} +.glyphicon-text-height:before { + content: "\e050"; +} +.glyphicon-text-width:before { + content: "\e051"; +} +.glyphicon-align-left:before { + content: "\e052"; +} +.glyphicon-align-center:before { + content: "\e053"; +} +.glyphicon-align-right:before { + content: "\e054"; +} +.glyphicon-align-justify:before { + content: "\e055"; +} +.glyphicon-list:before { + content: "\e056"; +} +.glyphicon-indent-left:before { + content: "\e057"; +} +.glyphicon-indent-right:before { + content: "\e058"; +} +.glyphicon-facetime-video:before { + content: "\e059"; +} +.glyphicon-picture:before { + content: "\e060"; +} +.glyphicon-map-marker:before { + content: "\e062"; +} +.glyphicon-adjust:before { + content: "\e063"; +} +.glyphicon-tint:before { + content: "\e064"; +} +.glyphicon-edit:before { + content: "\e065"; +} +.glyphicon-share:before { + content: "\e066"; +} +.glyphicon-check:before { + content: "\e067"; +} +.glyphicon-move:before { + content: "\e068"; +} +.glyphicon-step-backward:before { + content: "\e069"; +} +.glyphicon-fast-backward:before { + content: "\e070"; +} +.glyphicon-backward:before { + content: "\e071"; +} +.glyphicon-play:before { + content: "\e072"; +} +.glyphicon-pause:before { + content: "\e073"; +} +.glyphicon-stop:before { + content: "\e074"; +} +.glyphicon-forward:before { + content: "\e075"; +} +.glyphicon-fast-forward:before { + content: "\e076"; +} +.glyphicon-step-forward:before { + content: "\e077"; +} +.glyphicon-eject:before { + content: "\e078"; +} +.glyphicon-chevron-left:before { + content: "\e079"; +} +.glyphicon-chevron-right:before { + content: "\e080"; +} +.glyphicon-plus-sign:before { + content: "\e081"; +} +.glyphicon-minus-sign:before { + content: "\e082"; +} +.glyphicon-remove-sign:before { + content: "\e083"; +} +.glyphicon-ok-sign:before { + content: "\e084"; +} +.glyphicon-question-sign:before { + content: "\e085"; +} +.glyphicon-info-sign:before { + content: "\e086"; +} +.glyphicon-screenshot:before { + content: "\e087"; +} +.glyphicon-remove-circle:before { + content: "\e088"; +} +.glyphicon-ok-circle:before { + content: "\e089"; +} +.glyphicon-ban-circle:before { + content: "\e090"; +} +.glyphicon-arrow-left:before { + content: "\e091"; +} +.glyphicon-arrow-right:before { + content: "\e092"; +} +.glyphicon-arrow-up:before { + content: "\e093"; +} +.glyphicon-arrow-down:before { + content: "\e094"; +} +.glyphicon-share-alt:before { + content: "\e095"; +} +.glyphicon-resize-full:before { + content: "\e096"; +} +.glyphicon-resize-small:before { + content: "\e097"; +} +.glyphicon-exclamation-sign:before { + content: "\e101"; +} +.glyphicon-gift:before { + content: "\e102"; +} +.glyphicon-leaf:before { + content: "\e103"; +} +.glyphicon-fire:before { + content: "\e104"; +} +.glyphicon-eye-open:before { + content: "\e105"; +} +.glyphicon-eye-close:before { + content: "\e106"; +} +.glyphicon-warning-sign:before { + content: "\e107"; +} +.glyphicon-plane:before { + content: "\e108"; +} +.glyphicon-calendar:before { + content: "\e109"; +} +.glyphicon-random:before { + content: "\e110"; +} +.glyphicon-comment:before { + content: "\e111"; +} +.glyphicon-magnet:before { + content: "\e112"; +} +.glyphicon-chevron-up:before { + content: "\e113"; +} +.glyphicon-chevron-down:before { + content: "\e114"; +} +.glyphicon-retweet:before { + content: "\e115"; +} +.glyphicon-shopping-cart:before { + content: "\e116"; +} +.glyphicon-folder-close:before { + content: "\e117"; +} +.glyphicon-folder-open:before { + content: "\e118"; +} +.glyphicon-resize-vertical:before { + content: "\e119"; +} +.glyphicon-resize-horizontal:before { + content: "\e120"; +} +.glyphicon-hdd:before { + content: "\e121"; +} +.glyphicon-bullhorn:before { + content: "\e122"; +} +.glyphicon-bell:before { + content: "\e123"; +} +.glyphicon-certificate:before { + content: "\e124"; +} +.glyphicon-thumbs-up:before { + content: "\e125"; +} +.glyphicon-thumbs-down:before { + content: "\e126"; +} +.glyphicon-hand-right:before { + content: "\e127"; +} +.glyphicon-hand-left:before { + content: "\e128"; +} +.glyphicon-hand-up:before { + content: "\e129"; +} +.glyphicon-hand-down:before { + content: "\e130"; +} +.glyphicon-circle-arrow-right:before { + content: "\e131"; +} +.glyphicon-circle-arrow-left:before { + content: "\e132"; +} +.glyphicon-circle-arrow-up:before { + content: "\e133"; +} +.glyphicon-circle-arrow-down:before { + content: "\e134"; +} +.glyphicon-globe:before { + content: "\e135"; +} +.glyphicon-wrench:before { + content: "\e136"; +} +.glyphicon-tasks:before { + content: "\e137"; +} +.glyphicon-filter:before { + content: "\e138"; +} +.glyphicon-briefcase:before { + content: "\e139"; +} +.glyphicon-fullscreen:before { + content: "\e140"; +} +.glyphicon-dashboard:before { + content: "\e141"; +} +.glyphicon-paperclip:before { + content: "\e142"; +} +.glyphicon-heart-empty:before { + content: "\e143"; +} +.glyphicon-link:before { + content: "\e144"; +} +.glyphicon-phone:before { + content: "\e145"; +} +.glyphicon-pushpin:before { + content: "\e146"; +} +.glyphicon-usd:before { + content: "\e148"; +} +.glyphicon-gbp:before { + content: "\e149"; +} +.glyphicon-sort:before { + content: "\e150"; +} +.glyphicon-sort-by-alphabet:before { + content: "\e151"; +} +.glyphicon-sort-by-alphabet-alt:before { + content: "\e152"; +} +.glyphicon-sort-by-order:before { + content: "\e153"; +} +.glyphicon-sort-by-order-alt:before { + content: "\e154"; +} +.glyphicon-sort-by-attributes:before { + content: "\e155"; +} +.glyphicon-sort-by-attributes-alt:before { + content: "\e156"; +} +.glyphicon-unchecked:before { + content: "\e157"; +} +.glyphicon-expand:before { + content: "\e158"; +} +.glyphicon-collapse-down:before { + content: "\e159"; +} +.glyphicon-collapse-up:before { + content: "\e160"; +} +.glyphicon-log-in:before { + content: "\e161"; +} +.glyphicon-flash:before { + content: "\e162"; +} +.glyphicon-log-out:before { + content: "\e163"; +} +.glyphicon-new-window:before { + content: "\e164"; +} +.glyphicon-record:before { + content: "\e165"; +} +.glyphicon-save:before { + content: "\e166"; +} +.glyphicon-open:before { + content: "\e167"; +} +.glyphicon-saved:before { + content: "\e168"; +} +.glyphicon-import:before { + content: "\e169"; +} +.glyphicon-export:before { + content: "\e170"; +} +.glyphicon-send:before { + content: "\e171"; +} +.glyphicon-floppy-disk:before { + content: "\e172"; +} +.glyphicon-floppy-saved:before { + content: "\e173"; +} +.glyphicon-floppy-remove:before { + content: "\e174"; +} +.glyphicon-floppy-save:before { + content: "\e175"; +} +.glyphicon-floppy-open:before { + content: "\e176"; +} +.glyphicon-credit-card:before { + content: "\e177"; +} +.glyphicon-transfer:before { + content: "\e178"; +} +.glyphicon-cutlery:before { + content: "\e179"; +} +.glyphicon-header:before { + content: "\e180"; +} +.glyphicon-compressed:before { + content: "\e181"; +} +.glyphicon-earphone:before { + content: "\e182"; +} +.glyphicon-phone-alt:before { + content: "\e183"; +} +.glyphicon-tower:before { + content: "\e184"; +} +.glyphicon-stats:before { + content: "\e185"; +} +.glyphicon-sd-video:before { + content: "\e186"; +} +.glyphicon-hd-video:before { + content: "\e187"; +} +.glyphicon-subtitles:before { + content: "\e188"; +} +.glyphicon-sound-stereo:before { + content: "\e189"; +} +.glyphicon-sound-dolby:before { + content: "\e190"; +} +.glyphicon-sound-5-1:before { + content: "\e191"; +} +.glyphicon-sound-6-1:before { + content: "\e192"; +} +.glyphicon-sound-7-1:before { + content: "\e193"; +} +.glyphicon-copyright-mark:before { + content: "\e194"; +} +.glyphicon-registration-mark:before { + content: "\e195"; +} +.glyphicon-cloud-download:before { + content: "\e197"; +} +.glyphicon-cloud-upload:before { + content: "\e198"; +} +.glyphicon-tree-conifer:before { + content: "\e199"; +} +.glyphicon-tree-deciduous:before { + content: "\e200"; +} +.glyphicon-cd:before { + content: "\e201"; +} +.glyphicon-save-file:before { + content: "\e202"; +} +.glyphicon-open-file:before { + content: "\e203"; +} +.glyphicon-level-up:before { + content: "\e204"; +} +.glyphicon-copy:before { + content: "\e205"; +} +.glyphicon-paste:before { + content: "\e206"; +} +.glyphicon-alert:before { + content: "\e209"; +} +.glyphicon-equalizer:before { + content: "\e210"; +} +.glyphicon-king:before { + content: "\e211"; +} +.glyphicon-queen:before { + content: "\e212"; +} +.glyphicon-pawn:before { + content: "\e213"; +} +.glyphicon-bishop:before { + content: "\e214"; +} +.glyphicon-knight:before { + content: "\e215"; +} +.glyphicon-baby-formula:before { + content: "\e216"; +} +.glyphicon-tent:before { + content: "\26fa"; +} +.glyphicon-blackboard:before { + content: "\e218"; +} +.glyphicon-bed:before { + content: "\e219"; +} +.glyphicon-apple:before { + content: "\f8ff"; +} +.glyphicon-erase:before { + content: "\e221"; +} +.glyphicon-hourglass:before { + content: "\231b"; +} +.glyphicon-lamp:before { + content: "\e223"; +} +.glyphicon-duplicate:before { + content: "\e224"; +} +.glyphicon-piggy-bank:before { + content: "\e225"; +} +.glyphicon-scissors:before { + content: "\e226"; +} +.glyphicon-bitcoin:before { + content: "\e227"; +} +.glyphicon-btc:before { + content: "\e227"; +} +.glyphicon-xbt:before { + content: "\e227"; +} +.glyphicon-yen:before { + content: "\00a5"; +} +.glyphicon-jpy:before { + content: "\00a5"; +} +.glyphicon-ruble:before { + content: "\20bd"; +} +.glyphicon-rub:before { + content: "\20bd"; +} +.glyphicon-scale:before { + content: "\e230"; +} +.glyphicon-ice-lolly:before { + content: "\e231"; +} +.glyphicon-ice-lolly-tasted:before { + content: "\e232"; +} +.glyphicon-education:before { + content: "\e233"; +} +.glyphicon-option-horizontal:before { + content: "\e234"; +} +.glyphicon-option-vertical:before { + content: "\e235"; +} +.glyphicon-menu-hamburger:before { + content: "\e236"; +} +.glyphicon-modal-window:before { + content: "\e237"; +} +.glyphicon-oil:before { + content: "\e238"; +} +.glyphicon-grain:before { + content: "\e239"; +} +.glyphicon-sunglasses:before { + content: "\e240"; +} +.glyphicon-text-size:before { + content: "\e241"; +} +.glyphicon-text-color:before { + content: "\e242"; +} +.glyphicon-text-background:before { + content: "\e243"; +} +.glyphicon-object-align-top:before { + content: "\e244"; +} +.glyphicon-object-align-bottom:before { + content: "\e245"; +} +.glyphicon-object-align-horizontal:before { + content: "\e246"; +} +.glyphicon-object-align-left:before { + content: "\e247"; +} +.glyphicon-object-align-vertical:before { + content: "\e248"; +} +.glyphicon-object-align-right:before { + content: "\e249"; +} +.glyphicon-triangle-right:before { + content: "\e250"; +} +.glyphicon-triangle-left:before { + content: "\e251"; +} +.glyphicon-triangle-bottom:before { + content: "\e252"; +} +.glyphicon-triangle-top:before { + content: "\e253"; +} +.glyphicon-console:before { + content: "\e254"; +} +.glyphicon-superscript:before { + content: "\e255"; +} +.glyphicon-subscript:before { + content: "\e256"; +} +.glyphicon-menu-left:before { + content: "\e257"; +} +.glyphicon-menu-right:before { + content: "\e258"; +} +.glyphicon-menu-down:before { + content: "\e259"; +} +.glyphicon-menu-up:before { + content: "\e260"; +} +* { + -webkit-box-sizing: border-box; + -moz-box-sizing: border-box; + box-sizing: border-box; +} +*:before, +*:after { + -webkit-box-sizing: border-box; + -moz-box-sizing: border-box; + box-sizing: border-box; +} +html { + font-size: 10px; + -webkit-tap-highlight-color: rgba(0, 0, 0, 0); +} +body { + font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; + font-size: 14px; + line-height: 1.42857143; + color: #c8c8c8; + background-color: #272b30; +} +input, +button, +select, +textarea { + font-family: inherit; + font-size: inherit; + line-height: inherit; +} +a { + color: #ffffff; + text-decoration: none; +} +a:hover, +a:focus { + color: #ffffff; + text-decoration: underline; +} +a:focus { + outline: 5px auto -webkit-focus-ring-color; + outline-offset: -2px; +} +figure { + margin: 0; +} +img { + vertical-align: middle; +} +.img-responsive, +.thumbnail > img, +.thumbnail a > img, +.carousel-inner > .item > img, +.carousel-inner > .item > a > img { + display: block; + max-width: 100%; + height: auto; +} +.img-rounded { + border-radius: 6px; +} +.img-thumbnail { + padding: 4px; + line-height: 1.42857143; + background-color: #1c1e22; + border: 1px solid #0c0d0e; + border-radius: 4px; + -webkit-transition: all 0.2s ease-in-out; + -o-transition: all 0.2s ease-in-out; + transition: all 0.2s ease-in-out; + display: inline-block; + max-width: 100%; + height: auto; +} +.img-circle { + border-radius: 50%; +} +hr { + margin-top: 20px; + margin-bottom: 20px; + border: 0; + border-top: 1px solid #1c1e22; +} +.sr-only { + position: absolute; + width: 1px; + height: 1px; + margin: -1px; + padding: 0; + overflow: hidden; + clip: rect(0, 0, 0, 0); + border: 0; +} +.sr-only-focusable:active, +.sr-only-focusable:focus { + position: static; + width: auto; + height: auto; + margin: 0; + overflow: visible; + clip: auto; +} +[role="button"] { + cursor: pointer; +} +h1, +h2, +h3, +h4, +h5, +h6, +.h1, +.h2, +.h3, +.h4, +.h5, +.h6 { + font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; + font-weight: 500; + line-height: 1.1; + color: inherit; +} +h1 small, +h2 small, +h3 small, +h4 small, +h5 small, +h6 small, +.h1 small, +.h2 small, +.h3 small, +.h4 small, +.h5 small, +.h6 small, +h1 .small, +h2 .small, +h3 .small, +h4 .small, +h5 .small, +h6 .small, +.h1 .small, +.h2 .small, +.h3 .small, +.h4 .small, +.h5 .small, +.h6 .small { + font-weight: normal; + line-height: 1; + color: #7a8288; +} +h1, +.h1, +h2, +.h2, +h3, +.h3 { + margin-top: 20px; + margin-bottom: 10px; +} +h1 small, +.h1 small, +h2 small, +.h2 small, +h3 small, +.h3 small, +h1 .small, +.h1 .small, +h2 .small, +.h2 .small, +h3 .small, +.h3 .small { + font-size: 65%; +} +h4, +.h4, +h5, +.h5, +h6, +.h6 { + margin-top: 10px; + margin-bottom: 10px; +} +h4 small, +.h4 small, +h5 small, +.h5 small, +h6 small, +.h6 small, +h4 .small, +.h4 .small, +h5 .small, +.h5 .small, +h6 .small, +.h6 .small { + font-size: 75%; +} +h1, +.h1 { + font-size: 36px; +} +h2, +.h2 { + font-size: 30px; +} +h3, +.h3 { + font-size: 24px; +} +h4, +.h4 { + font-size: 18px; +} +h5, +.h5 { + font-size: 14px; +} +h6, +.h6 { + font-size: 12px; +} +p { + margin: 0 0 10px; +} +.lead { + margin-bottom: 20px; + font-size: 16px; + font-weight: 300; + line-height: 1.4; +} +@media (min-width: 768px) { + .lead { + font-size: 21px; + } +} +small, +.small { + font-size: 85%; +} +mark, +.mark { + background-color: #f89406; + padding: .2em; +} +.text-left { + text-align: left; +} +.text-right { + text-align: right; +} +.text-center { + text-align: center; +} +.text-justify { + text-align: justify; +} +.text-nowrap { + white-space: nowrap; +} +.text-lowercase { + text-transform: lowercase; +} +.text-uppercase { + text-transform: uppercase; +} +.text-capitalize { + text-transform: capitalize; +} +.text-muted { + color: #7a8288; +} +.text-primary { + color: #7a8288; +} +a.text-primary:hover, +a.text-primary:focus { + color: #62686d; +} +.text-success { + color: #ffffff; +} +a.text-success:hover, +a.text-success:focus { + color: #e6e6e6; +} +.text-info { + color: #ffffff; +} +a.text-info:hover, +a.text-info:focus { + color: #e6e6e6; +} +.text-warning { + color: #ffffff; +} +a.text-warning:hover, +a.text-warning:focus { + color: #e6e6e6; +} +.text-danger { + color: #ffffff; +} +a.text-danger:hover, +a.text-danger:focus { + color: #e6e6e6; +} +.bg-primary { + color: #fff; + background-color: #7a8288; +} +a.bg-primary:hover, +a.bg-primary:focus { + background-color: #62686d; +} +.bg-success { + background-color: #62c462; +} +a.bg-success:hover, +a.bg-success:focus { + background-color: #42b142; +} +.bg-info { + background-color: #5bc0de; +} +a.bg-info:hover, +a.bg-info:focus { + background-color: #31b0d5; +} +.bg-warning { + background-color: #f89406; +} +a.bg-warning:hover, +a.bg-warning:focus { + background-color: #c67605; +} +.bg-danger { + background-color: #ee5f5b; +} +a.bg-danger:hover, +a.bg-danger:focus { + background-color: #e9322d; +} +.page-header { + padding-bottom: 9px; + margin: 40px 0 20px; + border-bottom: 1px solid #1c1e22; +} +ul, +ol { + margin-top: 0; + margin-bottom: 10px; +} +ul ul, +ol ul, +ul ol, +ol ol { + margin-bottom: 0; +} +.list-unstyled { + padding-left: 0; + list-style: none; +} +.list-inline { + padding-left: 0; + list-style: none; + margin-left: -5px; +} +.list-inline > li { + display: inline-block; + padding-left: 5px; + padding-right: 5px; +} +dl { + margin-top: 0; + margin-bottom: 20px; +} +dt, +dd { + line-height: 1.42857143; +} +dt { + font-weight: bold; +} +dd { + margin-left: 0; +} +@media (min-width: 768px) { + .dl-horizontal dt { + float: left; + width: 160px; + clear: left; + text-align: right; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; + } + .dl-horizontal dd { + margin-left: 180px; + } +} +abbr[title], +abbr[data-original-title] { + cursor: help; + border-bottom: 1px dotted #7a8288; +} +.initialism { + font-size: 90%; + text-transform: uppercase; +} +blockquote { + padding: 10px 20px; + margin: 0 0 20px; + font-size: 17.5px; + border-left: 5px solid #7a8288; +} +blockquote p:last-child, +blockquote ul:last-child, +blockquote ol:last-child { + margin-bottom: 0; +} +blockquote footer, +blockquote small, +blockquote .small { + display: block; + font-size: 80%; + line-height: 1.42857143; + color: #7a8288; +} +blockquote footer:before, +blockquote small:before, +blockquote .small:before { + content: '\2014 \00A0'; +} +.blockquote-reverse, +blockquote.pull-right { + padding-right: 15px; + padding-left: 0; + border-right: 5px solid #7a8288; + border-left: 0; + text-align: right; +} +.blockquote-reverse footer:before, +blockquote.pull-right footer:before, +.blockquote-reverse small:before, +blockquote.pull-right small:before, +.blockquote-reverse .small:before, +blockquote.pull-right .small:before { + content: ''; +} +.blockquote-reverse footer:after, +blockquote.pull-right footer:after, +.blockquote-reverse small:after, +blockquote.pull-right small:after, +.blockquote-reverse .small:after, +blockquote.pull-right .small:after { + content: '\00A0 \2014'; +} +address { + margin-bottom: 20px; + font-style: normal; + line-height: 1.42857143; +} +code, +kbd, +pre, +samp { + font-family: Menlo, Monaco, Consolas, "Courier New", monospace; +} +code { + padding: 2px 4px; + font-size: 90%; + color: #c7254e; + background-color: #f9f2f4; + border-radius: 4px; +} +kbd { + padding: 2px 4px; + font-size: 90%; + color: #ffffff; + background-color: #333333; + border-radius: 3px; + -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25); + box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25); +} +kbd kbd { + padding: 0; + font-size: 100%; + font-weight: bold; + -webkit-box-shadow: none; + box-shadow: none; +} +pre { + display: block; + padding: 9.5px; + margin: 0 0 10px; + font-size: 13px; + line-height: 1.42857143; + word-break: break-all; + word-wrap: break-word; + color: #3a3f44; + background-color: #f5f5f5; + border: 1px solid #cccccc; + border-radius: 4px; +} +pre code { + padding: 0; + font-size: inherit; + color: inherit; + white-space: pre-wrap; + background-color: transparent; + border-radius: 0; +} +.pre-scrollable { + max-height: 340px; + overflow-y: scroll; +} +.container { + margin-right: auto; + margin-left: auto; + padding-left: 15px; + padding-right: 15px; +} +@media (min-width: 768px) { + .container { + width: 750px; + } +} +@media (min-width: 992px) { + .container { + width: 970px; + } +} +@media (min-width: 1200px) { + .container { + width: 1170px; + } +} +.container-fluid { + margin-right: auto; + margin-left: auto; + padding-left: 15px; + padding-right: 15px; +} +.row { + margin-left: -15px; + margin-right: -15px; +} +.col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-lg-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11, .col-xs-12, .col-sm-12, .col-md-12, .col-lg-12 { + position: relative; + min-height: 1px; + padding-left: 15px; + padding-right: 15px; +} +.col-xs-1, .col-xs-2, .col-xs-3, .col-xs-4, .col-xs-5, .col-xs-6, .col-xs-7, .col-xs-8, .col-xs-9, .col-xs-10, .col-xs-11, .col-xs-12 { + float: left; +} +.col-xs-12 { + width: 100%; +} +.col-xs-11 { + width: 91.66666667%; +} +.col-xs-10 { + width: 83.33333333%; +} +.col-xs-9 { + width: 75%; +} +.col-xs-8 { + width: 66.66666667%; +} +.col-xs-7 { + width: 58.33333333%; +} +.col-xs-6 { + width: 50%; +} +.col-xs-5 { + width: 41.66666667%; +} +.col-xs-4 { + width: 33.33333333%; +} +.col-xs-3 { + width: 25%; +} +.col-xs-2 { + width: 16.66666667%; +} +.col-xs-1 { + width: 8.33333333%; +} +.col-xs-pull-12 { + right: 100%; +} +.col-xs-pull-11 { + right: 91.66666667%; +} +.col-xs-pull-10 { + right: 83.33333333%; +} +.col-xs-pull-9 { + right: 75%; +} +.col-xs-pull-8 { + right: 66.66666667%; +} +.col-xs-pull-7 { + right: 58.33333333%; +} +.col-xs-pull-6 { + right: 50%; +} +.col-xs-pull-5 { + right: 41.66666667%; +} +.col-xs-pull-4 { + right: 33.33333333%; +} +.col-xs-pull-3 { + right: 25%; +} +.col-xs-pull-2 { + right: 16.66666667%; +} +.col-xs-pull-1 { + right: 8.33333333%; +} +.col-xs-pull-0 { + right: auto; +} +.col-xs-push-12 { + left: 100%; +} +.col-xs-push-11 { + left: 91.66666667%; +} +.col-xs-push-10 { + left: 83.33333333%; +} +.col-xs-push-9 { + left: 75%; +} +.col-xs-push-8 { + left: 66.66666667%; +} +.col-xs-push-7 { + left: 58.33333333%; +} +.col-xs-push-6 { + left: 50%; +} +.col-xs-push-5 { + left: 41.66666667%; +} +.col-xs-push-4 { + left: 33.33333333%; +} +.col-xs-push-3 { + left: 25%; +} +.col-xs-push-2 { + left: 16.66666667%; +} +.col-xs-push-1 { + left: 8.33333333%; +} +.col-xs-push-0 { + left: auto; +} +.col-xs-offset-12 { + margin-left: 100%; +} +.col-xs-offset-11 { + margin-left: 91.66666667%; +} +.col-xs-offset-10 { + margin-left: 83.33333333%; +} +.col-xs-offset-9 { + margin-left: 75%; +} +.col-xs-offset-8 { + margin-left: 66.66666667%; +} +.col-xs-offset-7 { + margin-left: 58.33333333%; +} +.col-xs-offset-6 { + margin-left: 50%; +} +.col-xs-offset-5 { + margin-left: 41.66666667%; +} +.col-xs-offset-4 { + margin-left: 33.33333333%; +} +.col-xs-offset-3 { + margin-left: 25%; +} +.col-xs-offset-2 { + margin-left: 16.66666667%; +} +.col-xs-offset-1 { + margin-left: 8.33333333%; +} +.col-xs-offset-0 { + margin-left: 0%; +} +@media (min-width: 768px) { + .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12 { + float: left; + } + .col-sm-12 { + width: 100%; + } + .col-sm-11 { + width: 91.66666667%; + } + .col-sm-10 { + width: 83.33333333%; + } + .col-sm-9 { + width: 75%; + } + .col-sm-8 { + width: 66.66666667%; + } + .col-sm-7 { + width: 58.33333333%; + } + .col-sm-6 { + width: 50%; + } + .col-sm-5 { + width: 41.66666667%; + } + .col-sm-4 { + width: 33.33333333%; + } + .col-sm-3 { + width: 25%; + } + .col-sm-2 { + width: 16.66666667%; + } + .col-sm-1 { + width: 8.33333333%; + } + .col-sm-pull-12 { + right: 100%; + } + .col-sm-pull-11 { + right: 91.66666667%; + } + .col-sm-pull-10 { + right: 83.33333333%; + } + .col-sm-pull-9 { + right: 75%; + } + .col-sm-pull-8 { + right: 66.66666667%; + } + .col-sm-pull-7 { + right: 58.33333333%; + } + .col-sm-pull-6 { + right: 50%; + } + .col-sm-pull-5 { + right: 41.66666667%; + } + .col-sm-pull-4 { + right: 33.33333333%; + } + .col-sm-pull-3 { + right: 25%; + } + .col-sm-pull-2 { + right: 16.66666667%; + } + .col-sm-pull-1 { + right: 8.33333333%; + } + .col-sm-pull-0 { + right: auto; + } + .col-sm-push-12 { + left: 100%; + } + .col-sm-push-11 { + left: 91.66666667%; + } + .col-sm-push-10 { + left: 83.33333333%; + } + .col-sm-push-9 { + left: 75%; + } + .col-sm-push-8 { + left: 66.66666667%; + } + .col-sm-push-7 { + left: 58.33333333%; + } + .col-sm-push-6 { + left: 50%; + } + .col-sm-push-5 { + left: 41.66666667%; + } + .col-sm-push-4 { + left: 33.33333333%; + } + .col-sm-push-3 { + left: 25%; + } + .col-sm-push-2 { + left: 16.66666667%; + } + .col-sm-push-1 { + left: 8.33333333%; + } + .col-sm-push-0 { + left: auto; + } + .col-sm-offset-12 { + margin-left: 100%; + } + .col-sm-offset-11 { + margin-left: 91.66666667%; + } + .col-sm-offset-10 { + margin-left: 83.33333333%; + } + .col-sm-offset-9 { + margin-left: 75%; + } + .col-sm-offset-8 { + margin-left: 66.66666667%; + } + .col-sm-offset-7 { + margin-left: 58.33333333%; + } + .col-sm-offset-6 { + margin-left: 50%; + } + .col-sm-offset-5 { + margin-left: 41.66666667%; + } + .col-sm-offset-4 { + margin-left: 33.33333333%; + } + .col-sm-offset-3 { + margin-left: 25%; + } + .col-sm-offset-2 { + margin-left: 16.66666667%; + } + .col-sm-offset-1 { + margin-left: 8.33333333%; + } + .col-sm-offset-0 { + margin-left: 0%; + } +} +@media (min-width: 992px) { + .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12 { + float: left; + } + .col-md-12 { + width: 100%; + } + .col-md-11 { + width: 91.66666667%; + } + .col-md-10 { + width: 83.33333333%; + } + .col-md-9 { + width: 75%; + } + .col-md-8 { + width: 66.66666667%; + } + .col-md-7 { + width: 58.33333333%; + } + .col-md-6 { + width: 50%; + } + .col-md-5 { + width: 41.66666667%; + } + .col-md-4 { + width: 33.33333333%; + } + .col-md-3 { + width: 25%; + } + .col-md-2 { + width: 16.66666667%; + } + .col-md-1 { + width: 8.33333333%; + } + .col-md-pull-12 { + right: 100%; + } + .col-md-pull-11 { + right: 91.66666667%; + } + .col-md-pull-10 { + right: 83.33333333%; + } + .col-md-pull-9 { + right: 75%; + } + .col-md-pull-8 { + right: 66.66666667%; + } + .col-md-pull-7 { + right: 58.33333333%; + } + .col-md-pull-6 { + right: 50%; + } + .col-md-pull-5 { + right: 41.66666667%; + } + .col-md-pull-4 { + right: 33.33333333%; + } + .col-md-pull-3 { + right: 25%; + } + .col-md-pull-2 { + right: 16.66666667%; + } + .col-md-pull-1 { + right: 8.33333333%; + } + .col-md-pull-0 { + right: auto; + } + .col-md-push-12 { + left: 100%; + } + .col-md-push-11 { + left: 91.66666667%; + } + .col-md-push-10 { + left: 83.33333333%; + } + .col-md-push-9 { + left: 75%; + } + .col-md-push-8 { + left: 66.66666667%; + } + .col-md-push-7 { + left: 58.33333333%; + } + .col-md-push-6 { + left: 50%; + } + .col-md-push-5 { + left: 41.66666667%; + } + .col-md-push-4 { + left: 33.33333333%; + } + .col-md-push-3 { + left: 25%; + } + .col-md-push-2 { + left: 16.66666667%; + } + .col-md-push-1 { + left: 8.33333333%; + } + .col-md-push-0 { + left: auto; + } + .col-md-offset-12 { + margin-left: 100%; + } + .col-md-offset-11 { + margin-left: 91.66666667%; + } + .col-md-offset-10 { + margin-left: 83.33333333%; + } + .col-md-offset-9 { + margin-left: 75%; + } + .col-md-offset-8 { + margin-left: 66.66666667%; + } + .col-md-offset-7 { + margin-left: 58.33333333%; + } + .col-md-offset-6 { + margin-left: 50%; + } + .col-md-offset-5 { + margin-left: 41.66666667%; + } + .col-md-offset-4 { + margin-left: 33.33333333%; + } + .col-md-offset-3 { + margin-left: 25%; + } + .col-md-offset-2 { + margin-left: 16.66666667%; + } + .col-md-offset-1 { + margin-left: 8.33333333%; + } + .col-md-offset-0 { + margin-left: 0%; + } +} +@media (min-width: 1200px) { + .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12 { + float: left; + } + .col-lg-12 { + width: 100%; + } + .col-lg-11 { + width: 91.66666667%; + } + .col-lg-10 { + width: 83.33333333%; + } + .col-lg-9 { + width: 75%; + } + .col-lg-8 { + width: 66.66666667%; + } + .col-lg-7 { + width: 58.33333333%; + } + .col-lg-6 { + width: 50%; + } + .col-lg-5 { + width: 41.66666667%; + } + .col-lg-4 { + width: 33.33333333%; + } + .col-lg-3 { + width: 25%; + } + .col-lg-2 { + width: 16.66666667%; + } + .col-lg-1 { + width: 8.33333333%; + } + .col-lg-pull-12 { + right: 100%; + } + .col-lg-pull-11 { + right: 91.66666667%; + } + .col-lg-pull-10 { + right: 83.33333333%; + } + .col-lg-pull-9 { + right: 75%; + } + .col-lg-pull-8 { + right: 66.66666667%; + } + .col-lg-pull-7 { + right: 58.33333333%; + } + .col-lg-pull-6 { + right: 50%; + } + .col-lg-pull-5 { + right: 41.66666667%; + } + .col-lg-pull-4 { + right: 33.33333333%; + } + .col-lg-pull-3 { + right: 25%; + } + .col-lg-pull-2 { + right: 16.66666667%; + } + .col-lg-pull-1 { + right: 8.33333333%; + } + .col-lg-pull-0 { + right: auto; + } + .col-lg-push-12 { + left: 100%; + } + .col-lg-push-11 { + left: 91.66666667%; + } + .col-lg-push-10 { + left: 83.33333333%; + } + .col-lg-push-9 { + left: 75%; + } + .col-lg-push-8 { + left: 66.66666667%; + } + .col-lg-push-7 { + left: 58.33333333%; + } + .col-lg-push-6 { + left: 50%; + } + .col-lg-push-5 { + left: 41.66666667%; + } + .col-lg-push-4 { + left: 33.33333333%; + } + .col-lg-push-3 { + left: 25%; + } + .col-lg-push-2 { + left: 16.66666667%; + } + .col-lg-push-1 { + left: 8.33333333%; + } + .col-lg-push-0 { + left: auto; + } + .col-lg-offset-12 { + margin-left: 100%; + } + .col-lg-offset-11 { + margin-left: 91.66666667%; + } + .col-lg-offset-10 { + margin-left: 83.33333333%; + } + .col-lg-offset-9 { + margin-left: 75%; + } + .col-lg-offset-8 { + margin-left: 66.66666667%; + } + .col-lg-offset-7 { + margin-left: 58.33333333%; + } + .col-lg-offset-6 { + margin-left: 50%; + } + .col-lg-offset-5 { + margin-left: 41.66666667%; + } + .col-lg-offset-4 { + margin-left: 33.33333333%; + } + .col-lg-offset-3 { + margin-left: 25%; + } + .col-lg-offset-2 { + margin-left: 16.66666667%; + } + .col-lg-offset-1 { + margin-left: 8.33333333%; + } + .col-lg-offset-0 { + margin-left: 0%; + } +} +table { + background-color: #2e3338; +} +caption { + padding-top: 8px; + padding-bottom: 8px; + color: #7a8288; + text-align: left; +} +th { + text-align: left; +} +.table { + width: 100%; + max-width: 100%; + margin-bottom: 20px; +} +.table > thead > tr > th, +.table > tbody > tr > th, +.table > tfoot > tr > th, +.table > thead > tr > td, +.table > tbody > tr > td, +.table > tfoot > tr > td { + padding: 8px; + line-height: 1.42857143; + vertical-align: top; + border-top: 1px solid #1c1e22; +} +.table > thead > tr > th { + vertical-align: bottom; + border-bottom: 2px solid #1c1e22; +} +.table > caption + thead > tr:first-child > th, +.table > colgroup + thead > tr:first-child > th, +.table > thead:first-child > tr:first-child > th, +.table > caption + thead > tr:first-child > td, +.table > colgroup + thead > tr:first-child > td, +.table > thead:first-child > tr:first-child > td { + border-top: 0; +} +.table > tbody + tbody { + border-top: 2px solid #1c1e22; +} +.table .table { + background-color: #272b30; +} +.table-condensed > thead > tr > th, +.table-condensed > tbody > tr > th, +.table-condensed > tfoot > tr > th, +.table-condensed > thead > tr > td, +.table-condensed > tbody > tr > td, +.table-condensed > tfoot > tr > td { + padding: 5px; +} +.table-bordered { + border: 1px solid #1c1e22; +} +.table-bordered > thead > tr > th, +.table-bordered > tbody > tr > th, +.table-bordered > tfoot > tr > th, +.table-bordered > thead > tr > td, +.table-bordered > tbody > tr > td, +.table-bordered > tfoot > tr > td { + border: 1px solid #1c1e22; +} +.table-bordered > thead > tr > th, +.table-bordered > thead > tr > td { + border-bottom-width: 2px; +} +.table-striped > tbody > tr:nth-of-type(odd) { + background-color: #353a41; +} +.table-hover > tbody > tr:hover { + background-color: #49515a; +} +table col[class*="col-"] { + position: static; + float: none; + display: table-column; +} +table td[class*="col-"], +table th[class*="col-"] { + position: static; + float: none; + display: table-cell; +} +.table > thead > tr > td.active, +.table > tbody > tr > td.active, +.table > tfoot > tr > td.active, +.table > thead > tr > th.active, +.table > tbody > tr > th.active, +.table > tfoot > tr > th.active, +.table > thead > tr.active > td, +.table > tbody > tr.active > td, +.table > tfoot > tr.active > td, +.table > thead > tr.active > th, +.table > tbody > tr.active > th, +.table > tfoot > tr.active > th { + background-color: #49515a; +} +.table-hover > tbody > tr > td.active:hover, +.table-hover > tbody > tr > th.active:hover, +.table-hover > tbody > tr.active:hover > td, +.table-hover > tbody > tr:hover > .active, +.table-hover > tbody > tr.active:hover > th { + background-color: #3e444c; +} +.table > thead > tr > td.success, +.table > tbody > tr > td.success, +.table > tfoot > tr > td.success, +.table > thead > tr > th.success, +.table > tbody > tr > th.success, +.table > tfoot > tr > th.success, +.table > thead > tr.success > td, +.table > tbody > tr.success > td, +.table > tfoot > tr.success > td, +.table > thead > tr.success > th, +.table > tbody > tr.success > th, +.table > tfoot > tr.success > th { + background-color: #62c462; +} +.table-hover > tbody > tr > td.success:hover, +.table-hover > tbody > tr > th.success:hover, +.table-hover > tbody > tr.success:hover > td, +.table-hover > tbody > tr:hover > .success, +.table-hover > tbody > tr.success:hover > th { + background-color: #4fbd4f; +} +.table > thead > tr > td.info, +.table > tbody > tr > td.info, +.table > tfoot > tr > td.info, +.table > thead > tr > th.info, +.table > tbody > tr > th.info, +.table > tfoot > tr > th.info, +.table > thead > tr.info > td, +.table > tbody > tr.info > td, +.table > tfoot > tr.info > td, +.table > thead > tr.info > th, +.table > tbody > tr.info > th, +.table > tfoot > tr.info > th { + background-color: #5bc0de; +} +.table-hover > tbody > tr > td.info:hover, +.table-hover > tbody > tr > th.info:hover, +.table-hover > tbody > tr.info:hover > td, +.table-hover > tbody > tr:hover > .info, +.table-hover > tbody > tr.info:hover > th { + background-color: #46b8da; +} +.table > thead > tr > td.warning, +.table > tbody > tr > td.warning, +.table > tfoot > tr > td.warning, +.table > thead > tr > th.warning, +.table > tbody > tr > th.warning, +.table > tfoot > tr > th.warning, +.table > thead > tr.warning > td, +.table > tbody > tr.warning > td, +.table > tfoot > tr.warning > td, +.table > thead > tr.warning > th, +.table > tbody > tr.warning > th, +.table > tfoot > tr.warning > th { + background-color: #f89406; +} +.table-hover > tbody > tr > td.warning:hover, +.table-hover > tbody > tr > th.warning:hover, +.table-hover > tbody > tr.warning:hover > td, +.table-hover > tbody > tr:hover > .warning, +.table-hover > tbody > tr.warning:hover > th { + background-color: #df8505; +} +.table > thead > tr > td.danger, +.table > tbody > tr > td.danger, +.table > tfoot > tr > td.danger, +.table > thead > tr > th.danger, +.table > tbody > tr > th.danger, +.table > tfoot > tr > th.danger, +.table > thead > tr.danger > td, +.table > tbody > tr.danger > td, +.table > tfoot > tr.danger > td, +.table > thead > tr.danger > th, +.table > tbody > tr.danger > th, +.table > tfoot > tr.danger > th { + background-color: #ee5f5b; +} +.table-hover > tbody > tr > td.danger:hover, +.table-hover > tbody > tr > th.danger:hover, +.table-hover > tbody > tr.danger:hover > td, +.table-hover > tbody > tr:hover > .danger, +.table-hover > tbody > tr.danger:hover > th { + background-color: #ec4844; +} +.table-responsive { + overflow-x: auto; + min-height: 0.01%; +} +@media screen and (max-width: 767px) { + .table-responsive { + width: 100%; + margin-bottom: 15px; + overflow-y: hidden; + -ms-overflow-style: -ms-autohiding-scrollbar; + border: 1px solid #1c1e22; + } + .table-responsive > .table { + margin-bottom: 0; + } + .table-responsive > .table > thead > tr > th, + .table-responsive > .table > tbody > tr > th, + .table-responsive > .table > tfoot > tr > th, + .table-responsive > .table > thead > tr > td, + .table-responsive > .table > tbody > tr > td, + .table-responsive > .table > tfoot > tr > td { + white-space: nowrap; + } + .table-responsive > .table-bordered { + border: 0; + } + .table-responsive > .table-bordered > thead > tr > th:first-child, + .table-responsive > .table-bordered > tbody > tr > th:first-child, + .table-responsive > .table-bordered > tfoot > tr > th:first-child, + .table-responsive > .table-bordered > thead > tr > td:first-child, + .table-responsive > .table-bordered > tbody > tr > td:first-child, + .table-responsive > .table-bordered > tfoot > tr > td:first-child { + border-left: 0; + } + .table-responsive > .table-bordered > thead > tr > th:last-child, + .table-responsive > .table-bordered > tbody > tr > th:last-child, + .table-responsive > .table-bordered > tfoot > tr > th:last-child, + .table-responsive > .table-bordered > thead > tr > td:last-child, + .table-responsive > .table-bordered > tbody > tr > td:last-child, + .table-responsive > .table-bordered > tfoot > tr > td:last-child { + border-right: 0; + } + .table-responsive > .table-bordered > tbody > tr:last-child > th, + .table-responsive > .table-bordered > tfoot > tr:last-child > th, + .table-responsive > .table-bordered > tbody > tr:last-child > td, + .table-responsive > .table-bordered > tfoot > tr:last-child > td { + border-bottom: 0; + } +} +fieldset { + padding: 0; + margin: 0; + border: 0; + min-width: 0; +} +legend { + display: block; + width: 100%; + padding: 0; + margin-bottom: 20px; + font-size: 21px; + line-height: inherit; + color: #c8c8c8; + border: 0; + border-bottom: 1px solid #1c1e22; +} +label { + display: inline-block; + max-width: 100%; + margin-bottom: 5px; + font-weight: bold; +} +input[type="search"] { + -webkit-box-sizing: border-box; + -moz-box-sizing: border-box; + box-sizing: border-box; +} +input[type="radio"], +input[type="checkbox"] { + margin: 4px 0 0; + margin-top: 1px \9; + line-height: normal; +} +input[type="file"] { + display: block; +} +input[type="range"] { + display: block; + width: 100%; +} +select[multiple], +select[size] { + height: auto; +} +input[type="file"]:focus, +input[type="radio"]:focus, +input[type="checkbox"]:focus { + outline: 5px auto -webkit-focus-ring-color; + outline-offset: -2px; +} +output { + display: block; + padding-top: 9px; + font-size: 14px; + line-height: 1.42857143; + color: #272b30; +} +.form-control { + display: block; + width: 100%; + height: 38px; + padding: 8px 12px; + font-size: 14px; + line-height: 1.42857143; + color: #272b30; + background-color: #ffffff; + background-image: none; + border: 1px solid #000000; + border-radius: 4px; + -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); + box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); + -webkit-transition: border-color ease-in-out .15s, -webkit-box-shadow ease-in-out .15s; + -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s; + transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s; +} +.form-control:focus { + border-color: #66afe9; + outline: 0; + -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, 0.6); + box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, 0.6); +} +.form-control::-moz-placeholder { + color: #7a8288; + opacity: 1; +} +.form-control:-ms-input-placeholder { + color: #7a8288; +} +.form-control::-webkit-input-placeholder { + color: #7a8288; +} +.form-control::-ms-expand { + border: 0; + background-color: transparent; +} +.form-control[disabled], +.form-control[readonly], +fieldset[disabled] .form-control { + background-color: #999999; + opacity: 1; +} +.form-control[disabled], +fieldset[disabled] .form-control { + cursor: not-allowed; +} +textarea.form-control { + height: auto; +} +input[type="search"] { + -webkit-appearance: none; +} +@media screen and (-webkit-min-device-pixel-ratio: 0) { + input[type="date"].form-control, + input[type="time"].form-control, + input[type="datetime-local"].form-control, + input[type="month"].form-control { + line-height: 38px; + } + input[type="date"].input-sm, + input[type="time"].input-sm, + input[type="datetime-local"].input-sm, + input[type="month"].input-sm, + .input-group-sm input[type="date"], + .input-group-sm input[type="time"], + .input-group-sm input[type="datetime-local"], + .input-group-sm input[type="month"] { + line-height: 30px; + } + input[type="date"].input-lg, + input[type="time"].input-lg, + input[type="datetime-local"].input-lg, + input[type="month"].input-lg, + .input-group-lg input[type="date"], + .input-group-lg input[type="time"], + .input-group-lg input[type="datetime-local"], + .input-group-lg input[type="month"] { + line-height: 54px; + } +} +.form-group { + margin-bottom: 15px; +} +.radio, +.checkbox { + position: relative; + display: block; + margin-top: 10px; + margin-bottom: 10px; +} +.radio label, +.checkbox label { + min-height: 20px; + padding-left: 20px; + margin-bottom: 0; + font-weight: normal; + cursor: pointer; +} +.radio input[type="radio"], +.radio-inline input[type="radio"], +.checkbox input[type="checkbox"], +.checkbox-inline input[type="checkbox"] { + position: absolute; + margin-left: -20px; + margin-top: 4px \9; +} +.radio + .radio, +.checkbox + .checkbox { + margin-top: -5px; +} +.radio-inline, +.checkbox-inline { + position: relative; + display: inline-block; + padding-left: 20px; + margin-bottom: 0; + vertical-align: middle; + font-weight: normal; + cursor: pointer; +} +.radio-inline + .radio-inline, +.checkbox-inline + .checkbox-inline { + margin-top: 0; + margin-left: 10px; +} +input[type="radio"][disabled], +input[type="checkbox"][disabled], +input[type="radio"].disabled, +input[type="checkbox"].disabled, +fieldset[disabled] input[type="radio"], +fieldset[disabled] input[type="checkbox"] { + cursor: not-allowed; +} +.radio-inline.disabled, +.checkbox-inline.disabled, +fieldset[disabled] .radio-inline, +fieldset[disabled] .checkbox-inline { + cursor: not-allowed; +} +.radio.disabled label, +.checkbox.disabled label, +fieldset[disabled] .radio label, +fieldset[disabled] .checkbox label { + cursor: not-allowed; +} +.form-control-static { + padding-top: 9px; + padding-bottom: 9px; + margin-bottom: 0; + min-height: 34px; +} +.form-control-static.input-lg, +.form-control-static.input-sm { + padding-left: 0; + padding-right: 0; +} +.input-sm { + height: 30px; + padding: 5px 10px; + font-size: 12px; + line-height: 1.5; + border-radius: 3px; +} +select.input-sm { + height: 30px; + line-height: 30px; +} +textarea.input-sm, +select[multiple].input-sm { + height: auto; +} +.form-group-sm .form-control { + height: 30px; + padding: 5px 10px; + font-size: 12px; + line-height: 1.5; + border-radius: 3px; +} +.form-group-sm select.form-control { + height: 30px; + line-height: 30px; +} +.form-group-sm textarea.form-control, +.form-group-sm select[multiple].form-control { + height: auto; +} +.form-group-sm .form-control-static { + height: 30px; + min-height: 32px; + padding: 6px 10px; + font-size: 12px; + line-height: 1.5; +} +.input-lg { + height: 54px; + padding: 14px 16px; + font-size: 18px; + line-height: 1.3333333; + border-radius: 6px; +} +select.input-lg { + height: 54px; + line-height: 54px; +} +textarea.input-lg, +select[multiple].input-lg { + height: auto; +} +.form-group-lg .form-control { + height: 54px; + padding: 14px 16px; + font-size: 18px; + line-height: 1.3333333; + border-radius: 6px; +} +.form-group-lg select.form-control { + height: 54px; + line-height: 54px; +} +.form-group-lg textarea.form-control, +.form-group-lg select[multiple].form-control { + height: auto; +} +.form-group-lg .form-control-static { + height: 54px; + min-height: 38px; + padding: 15px 16px; + font-size: 18px; + line-height: 1.3333333; +} +.has-feedback { + position: relative; +} +.has-feedback .form-control { + padding-right: 47.5px; +} +.form-control-feedback { + position: absolute; + top: 0; + right: 0; + z-index: 2; + display: block; + width: 38px; + height: 38px; + line-height: 38px; + text-align: center; + pointer-events: none; +} +.input-lg + .form-control-feedback, +.input-group-lg + .form-control-feedback, +.form-group-lg .form-control + .form-control-feedback { + width: 54px; + height: 54px; + line-height: 54px; +} +.input-sm + .form-control-feedback, +.input-group-sm + .form-control-feedback, +.form-group-sm .form-control + .form-control-feedback { + width: 30px; + height: 30px; + line-height: 30px; +} +.has-success .help-block, +.has-success .control-label, +.has-success .radio, +.has-success .checkbox, +.has-success .radio-inline, +.has-success .checkbox-inline, +.has-success.radio label, +.has-success.checkbox label, +.has-success.radio-inline label, +.has-success.checkbox-inline label { + color: #ffffff; +} +.has-success .form-control { + border-color: #ffffff; + -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); + box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); +} +.has-success .form-control:focus { + border-color: #e6e6e6; + -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ffffff; + box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ffffff; +} +.has-success .input-group-addon { + color: #ffffff; + border-color: #ffffff; + background-color: #62c462; +} +.has-success .form-control-feedback { + color: #ffffff; +} +.has-warning .help-block, +.has-warning .control-label, +.has-warning .radio, +.has-warning .checkbox, +.has-warning .radio-inline, +.has-warning .checkbox-inline, +.has-warning.radio label, +.has-warning.checkbox label, +.has-warning.radio-inline label, +.has-warning.checkbox-inline label { + color: #ffffff; +} +.has-warning .form-control { + border-color: #ffffff; + -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); + box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); +} +.has-warning .form-control:focus { + border-color: #e6e6e6; + -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ffffff; + box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ffffff; +} +.has-warning .input-group-addon { + color: #ffffff; + border-color: #ffffff; + background-color: #f89406; +} +.has-warning .form-control-feedback { + color: #ffffff; +} +.has-error .help-block, +.has-error .control-label, +.has-error .radio, +.has-error .checkbox, +.has-error .radio-inline, +.has-error .checkbox-inline, +.has-error.radio label, +.has-error.checkbox label, +.has-error.radio-inline label, +.has-error.checkbox-inline label { + color: #ffffff; +} +.has-error .form-control { + border-color: #ffffff; + -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); + box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); +} +.has-error .form-control:focus { + border-color: #e6e6e6; + -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ffffff; + box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ffffff; +} +.has-error .input-group-addon { + color: #ffffff; + border-color: #ffffff; + background-color: #ee5f5b; +} +.has-error .form-control-feedback { + color: #ffffff; +} +.has-feedback label ~ .form-control-feedback { + top: 25px; +} +.has-feedback label.sr-only ~ .form-control-feedback { + top: 0; +} +.help-block { + display: block; + margin-top: 5px; + margin-bottom: 10px; + color: #ffffff; +} +@media (min-width: 768px) { + .form-inline .form-group { + display: inline-block; + margin-bottom: 0; + vertical-align: middle; + } + .form-inline .form-control { + display: inline-block; + width: auto; + vertical-align: middle; + } + .form-inline .form-control-static { + display: inline-block; + } + .form-inline .input-group { + display: inline-table; + vertical-align: middle; + } + .form-inline .input-group .input-group-addon, + .form-inline .input-group .input-group-btn, + .form-inline .input-group .form-control { + width: auto; + } + .form-inline .input-group > .form-control { + width: 100%; + } + .form-inline .control-label { + margin-bottom: 0; + vertical-align: middle; + } + .form-inline .radio, + .form-inline .checkbox { + display: inline-block; + margin-top: 0; + margin-bottom: 0; + vertical-align: middle; + } + .form-inline .radio label, + .form-inline .checkbox label { + padding-left: 0; + } + .form-inline .radio input[type="radio"], + .form-inline .checkbox input[type="checkbox"] { + position: relative; + margin-left: 0; + } + .form-inline .has-feedback .form-control-feedback { + top: 0; + } +} +.form-horizontal .radio, +.form-horizontal .checkbox, +.form-horizontal .radio-inline, +.form-horizontal .checkbox-inline { + margin-top: 0; + margin-bottom: 0; + padding-top: 9px; +} +.form-horizontal .radio, +.form-horizontal .checkbox { + min-height: 29px; +} +.form-horizontal .form-group { + margin-left: -15px; + margin-right: -15px; +} +@media (min-width: 768px) { + .form-horizontal .control-label { + text-align: right; + margin-bottom: 0; + padding-top: 9px; + } +} +.form-horizontal .has-feedback .form-control-feedback { + right: 15px; +} +@media (min-width: 768px) { + .form-horizontal .form-group-lg .control-label { + padding-top: 15px; + font-size: 18px; + } +} +@media (min-width: 768px) { + .form-horizontal .form-group-sm .control-label { + padding-top: 6px; + font-size: 12px; + } +} +.btn { + display: inline-block; + margin-bottom: 0; + font-weight: normal; + text-align: center; + vertical-align: middle; + -ms-touch-action: manipulation; + touch-action: manipulation; + cursor: pointer; + background-image: none; + border: 0px solid transparent; + white-space: nowrap; + padding: 8px 12px; + font-size: 14px; + line-height: 1.42857143; + border-radius: 4px; + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} +.btn:focus, +.btn:active:focus, +.btn.active:focus, +.btn.focus, +.btn:active.focus, +.btn.active.focus { + outline: 5px auto -webkit-focus-ring-color; + outline-offset: -2px; +} +.btn:hover, +.btn:focus, +.btn.focus { + color: #ffffff; + text-decoration: none; +} +.btn:active, +.btn.active { + outline: 0; + background-image: none; + -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); + box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); +} +.btn.disabled, +.btn[disabled], +fieldset[disabled] .btn { + cursor: not-allowed; + opacity: 0.65; + filter: alpha(opacity=65); + -webkit-box-shadow: none; + box-shadow: none; +} +a.btn.disabled, +fieldset[disabled] a.btn { + pointer-events: none; +} +.btn-default { + color: #ffffff; + background-color: #3a3f44; + border-color: #3a3f44; +} +.btn-default:focus, +.btn-default.focus { + color: #ffffff; + background-color: #232628; + border-color: #000000; +} +.btn-default:hover { + color: #ffffff; + background-color: #232628; + border-color: #1e2023; +} +.btn-default:active, +.btn-default.active, +.open > .dropdown-toggle.btn-default { + color: #ffffff; + background-color: #232628; + border-color: #1e2023; +} +.btn-default:active:hover, +.btn-default.active:hover, +.open > .dropdown-toggle.btn-default:hover, +.btn-default:active:focus, +.btn-default.active:focus, +.open > .dropdown-toggle.btn-default:focus, +.btn-default:active.focus, +.btn-default.active.focus, +.open > .dropdown-toggle.btn-default.focus { + color: #ffffff; + background-color: #121415; + border-color: #000000; +} +.btn-default:active, +.btn-default.active, +.open > .dropdown-toggle.btn-default { + background-image: none; +} +.btn-default.disabled:hover, +.btn-default[disabled]:hover, +fieldset[disabled] .btn-default:hover, +.btn-default.disabled:focus, +.btn-default[disabled]:focus, +fieldset[disabled] .btn-default:focus, +.btn-default.disabled.focus, +.btn-default[disabled].focus, +fieldset[disabled] .btn-default.focus { + background-color: #3a3f44; + border-color: #3a3f44; +} +.btn-default .badge { + color: #3a3f44; + background-color: #ffffff; +} +.btn-primary { + color: #ffffff; + background-color: #7a8288; + border-color: #7a8288; +} +.btn-primary:focus, +.btn-primary.focus { + color: #ffffff; + background-color: #62686d; + border-color: #3e4245; +} +.btn-primary:hover { + color: #ffffff; + background-color: #62686d; + border-color: #5d6368; +} +.btn-primary:active, +.btn-primary.active, +.open > .dropdown-toggle.btn-primary { + color: #ffffff; + background-color: #62686d; + border-color: #5d6368; +} +.btn-primary:active:hover, +.btn-primary.active:hover, +.open > .dropdown-toggle.btn-primary:hover, +.btn-primary:active:focus, +.btn-primary.active:focus, +.open > .dropdown-toggle.btn-primary:focus, +.btn-primary:active.focus, +.btn-primary.active.focus, +.open > .dropdown-toggle.btn-primary.focus { + color: #ffffff; + background-color: #51565a; + border-color: #3e4245; +} +.btn-primary:active, +.btn-primary.active, +.open > .dropdown-toggle.btn-primary { + background-image: none; +} +.btn-primary.disabled:hover, +.btn-primary[disabled]:hover, +fieldset[disabled] .btn-primary:hover, +.btn-primary.disabled:focus, +.btn-primary[disabled]:focus, +fieldset[disabled] .btn-primary:focus, +.btn-primary.disabled.focus, +.btn-primary[disabled].focus, +fieldset[disabled] .btn-primary.focus { + background-color: #7a8288; + border-color: #7a8288; +} +.btn-primary .badge { + color: #7a8288; + background-color: #ffffff; +} +.btn-success { + color: #ffffff; + background-color: #62c462; + border-color: #62c462; +} +.btn-success:focus, +.btn-success.focus { + color: #ffffff; + background-color: #42b142; + border-color: #2d792d; +} +.btn-success:hover { + color: #ffffff; + background-color: #42b142; + border-color: #40a940; +} +.btn-success:active, +.btn-success.active, +.open > .dropdown-toggle.btn-success { + color: #ffffff; + background-color: #42b142; + border-color: #40a940; +} +.btn-success:active:hover, +.btn-success.active:hover, +.open > .dropdown-toggle.btn-success:hover, +.btn-success:active:focus, +.btn-success.active:focus, +.open > .dropdown-toggle.btn-success:focus, +.btn-success:active.focus, +.btn-success.active.focus, +.open > .dropdown-toggle.btn-success.focus { + color: #ffffff; + background-color: #399739; + border-color: #2d792d; +} +.btn-success:active, +.btn-success.active, +.open > .dropdown-toggle.btn-success { + background-image: none; +} +.btn-success.disabled:hover, +.btn-success[disabled]:hover, +fieldset[disabled] .btn-success:hover, +.btn-success.disabled:focus, +.btn-success[disabled]:focus, +fieldset[disabled] .btn-success:focus, +.btn-success.disabled.focus, +.btn-success[disabled].focus, +fieldset[disabled] .btn-success.focus { + background-color: #62c462; + border-color: #62c462; +} +.btn-success .badge { + color: #62c462; + background-color: #ffffff; +} +.btn-info { + color: #ffffff; + background-color: #5bc0de; + border-color: #5bc0de; +} +.btn-info:focus, +.btn-info.focus { + color: #ffffff; + background-color: #31b0d5; + border-color: #1f7e9a; +} +.btn-info:hover { + color: #ffffff; + background-color: #31b0d5; + border-color: #2aabd2; +} +.btn-info:active, +.btn-info.active, +.open > .dropdown-toggle.btn-info { + color: #ffffff; + background-color: #31b0d5; + border-color: #2aabd2; +} +.btn-info:active:hover, +.btn-info.active:hover, +.open > .dropdown-toggle.btn-info:hover, +.btn-info:active:focus, +.btn-info.active:focus, +.open > .dropdown-toggle.btn-info:focus, +.btn-info:active.focus, +.btn-info.active.focus, +.open > .dropdown-toggle.btn-info.focus { + color: #ffffff; + background-color: #269abc; + border-color: #1f7e9a; +} +.btn-info:active, +.btn-info.active, +.open > .dropdown-toggle.btn-info { + background-image: none; +} +.btn-info.disabled:hover, +.btn-info[disabled]:hover, +fieldset[disabled] .btn-info:hover, +.btn-info.disabled:focus, +.btn-info[disabled]:focus, +fieldset[disabled] .btn-info:focus, +.btn-info.disabled.focus, +.btn-info[disabled].focus, +fieldset[disabled] .btn-info.focus { + background-color: #5bc0de; + border-color: #5bc0de; +} +.btn-info .badge { + color: #5bc0de; + background-color: #ffffff; +} +.btn-warning { + color: #ffffff; + background-color: #f89406; + border-color: #f89406; +} +.btn-warning:focus, +.btn-warning.focus { + color: #ffffff; + background-color: #c67605; + border-color: #7c4a03; +} +.btn-warning:hover { + color: #ffffff; + background-color: #c67605; + border-color: #bc7005; +} +.btn-warning:active, +.btn-warning.active, +.open > .dropdown-toggle.btn-warning { + color: #ffffff; + background-color: #c67605; + border-color: #bc7005; +} +.btn-warning:active:hover, +.btn-warning.active:hover, +.open > .dropdown-toggle.btn-warning:hover, +.btn-warning:active:focus, +.btn-warning.active:focus, +.open > .dropdown-toggle.btn-warning:focus, +.btn-warning:active.focus, +.btn-warning.active.focus, +.open > .dropdown-toggle.btn-warning.focus { + color: #ffffff; + background-color: #a36104; + border-color: #7c4a03; +} +.btn-warning:active, +.btn-warning.active, +.open > .dropdown-toggle.btn-warning { + background-image: none; +} +.btn-warning.disabled:hover, +.btn-warning[disabled]:hover, +fieldset[disabled] .btn-warning:hover, +.btn-warning.disabled:focus, +.btn-warning[disabled]:focus, +fieldset[disabled] .btn-warning:focus, +.btn-warning.disabled.focus, +.btn-warning[disabled].focus, +fieldset[disabled] .btn-warning.focus { + background-color: #f89406; + border-color: #f89406; +} +.btn-warning .badge { + color: #f89406; + background-color: #ffffff; +} +.btn-danger { + color: #ffffff; + background-color: #ee5f5b; + border-color: #ee5f5b; +} +.btn-danger:focus, +.btn-danger.focus { + color: #ffffff; + background-color: #e9322d; + border-color: #b71713; +} +.btn-danger:hover { + color: #ffffff; + background-color: #e9322d; + border-color: #e82924; +} +.btn-danger:active, +.btn-danger.active, +.open > .dropdown-toggle.btn-danger { + color: #ffffff; + background-color: #e9322d; + border-color: #e82924; +} +.btn-danger:active:hover, +.btn-danger.active:hover, +.open > .dropdown-toggle.btn-danger:hover, +.btn-danger:active:focus, +.btn-danger.active:focus, +.open > .dropdown-toggle.btn-danger:focus, +.btn-danger:active.focus, +.btn-danger.active.focus, +.open > .dropdown-toggle.btn-danger.focus { + color: #ffffff; + background-color: #dc1c17; + border-color: #b71713; +} +.btn-danger:active, +.btn-danger.active, +.open > .dropdown-toggle.btn-danger { + background-image: none; +} +.btn-danger.disabled:hover, +.btn-danger[disabled]:hover, +fieldset[disabled] .btn-danger:hover, +.btn-danger.disabled:focus, +.btn-danger[disabled]:focus, +fieldset[disabled] .btn-danger:focus, +.btn-danger.disabled.focus, +.btn-danger[disabled].focus, +fieldset[disabled] .btn-danger.focus { + background-color: #ee5f5b; + border-color: #ee5f5b; +} +.btn-danger .badge { + color: #ee5f5b; + background-color: #ffffff; +} +.btn-link { + color: #ffffff; + font-weight: normal; + border-radius: 0; +} +.btn-link, +.btn-link:active, +.btn-link.active, +.btn-link[disabled], +fieldset[disabled] .btn-link { + background-color: transparent; + -webkit-box-shadow: none; + box-shadow: none; +} +.btn-link, +.btn-link:hover, +.btn-link:focus, +.btn-link:active { + border-color: transparent; +} +.btn-link:hover, +.btn-link:focus { + color: #ffffff; + text-decoration: underline; + background-color: transparent; +} +.btn-link[disabled]:hover, +fieldset[disabled] .btn-link:hover, +.btn-link[disabled]:focus, +fieldset[disabled] .btn-link:focus { + color: #7a8288; + text-decoration: none; +} +.btn-lg, +.btn-group-lg > .btn { + padding: 14px 16px; + font-size: 18px; + line-height: 1.3333333; + border-radius: 6px; +} +.btn-sm, +.btn-group-sm > .btn { + padding: 5px 10px; + font-size: 12px; + line-height: 1.5; + border-radius: 3px; +} +.btn-xs, +.btn-group-xs > .btn { + padding: 1px 5px; + font-size: 12px; + line-height: 1.5; + border-radius: 3px; +} +.btn-block { + display: block; + width: 100%; +} +.btn-block + .btn-block { + margin-top: 5px; +} +input[type="submit"].btn-block, +input[type="reset"].btn-block, +input[type="button"].btn-block { + width: 100%; +} +.fade { + opacity: 0; + -webkit-transition: opacity 0.15s linear; + -o-transition: opacity 0.15s linear; + transition: opacity 0.15s linear; +} +.fade.in { + opacity: 1; +} +.collapse { + display: none; +} +.collapse.in { + display: block; +} +tr.collapse.in { + display: table-row; +} +tbody.collapse.in { + display: table-row-group; +} +.collapsing { + position: relative; + height: 0; + overflow: hidden; + -webkit-transition-property: height, visibility; + -o-transition-property: height, visibility; + transition-property: height, visibility; + -webkit-transition-duration: 0.35s; + -o-transition-duration: 0.35s; + transition-duration: 0.35s; + -webkit-transition-timing-function: ease; + -o-transition-timing-function: ease; + transition-timing-function: ease; +} +.caret { + display: inline-block; + width: 0; + height: 0; + margin-left: 2px; + vertical-align: middle; + border-top: 4px dashed; + border-top: 4px solid \9; + border-right: 4px solid transparent; + border-left: 4px solid transparent; +} +.dropup, +.dropdown { + position: relative; +} +.dropdown-toggle:focus { + outline: 0; +} +.dropdown-menu { + position: absolute; + top: 100%; + left: 0; + z-index: 1000; + display: none; + float: left; + min-width: 160px; + padding: 5px 0; + margin: 2px 0 0; + list-style: none; + font-size: 14px; + text-align: left; + background-color: #3a3f44; + border: 1px solid #272b30; + border: 1px solid rgba(0, 0, 0, 0.15); + border-radius: 4px; + -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175); + box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175); + -webkit-background-clip: padding-box; + background-clip: padding-box; +} +.dropdown-menu.pull-right { + right: 0; + left: auto; +} +.dropdown-menu .divider { + height: 1px; + margin: 9px 0; + overflow: hidden; + background-color: #272b30; +} +.dropdown-menu > li > a { + display: block; + padding: 3px 20px; + clear: both; + font-weight: normal; + line-height: 1.42857143; + color: #c8c8c8; + white-space: nowrap; +} +.dropdown-menu > li > a:hover, +.dropdown-menu > li > a:focus { + text-decoration: none; + color: #ffffff; + background-color: #272b30; +} +.dropdown-menu > .active > a, +.dropdown-menu > .active > a:hover, +.dropdown-menu > .active > a:focus { + color: #ffffff; + text-decoration: none; + outline: 0; + background-color: #272b30; +} +.dropdown-menu > .disabled > a, +.dropdown-menu > .disabled > a:hover, +.dropdown-menu > .disabled > a:focus { + color: #7a8288; +} +.dropdown-menu > .disabled > a:hover, +.dropdown-menu > .disabled > a:focus { + text-decoration: none; + background-color: transparent; + background-image: none; + filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); + cursor: not-allowed; +} +.open > .dropdown-menu { + display: block; +} +.open > a { + outline: 0; +} +.dropdown-menu-right { + left: auto; + right: 0; +} +.dropdown-menu-left { + left: 0; + right: auto; +} +.dropdown-header { + display: block; + padding: 3px 20px; + font-size: 12px; + line-height: 1.42857143; + color: #7a8288; + white-space: nowrap; +} +.dropdown-backdrop { + position: fixed; + left: 0; + right: 0; + bottom: 0; + top: 0; + z-index: 990; +} +.pull-right > .dropdown-menu { + right: 0; + left: auto; +} +.dropup .caret, +.navbar-fixed-bottom .dropdown .caret { + border-top: 0; + border-bottom: 4px dashed; + border-bottom: 4px solid \9; + content: ""; +} +.dropup .dropdown-menu, +.navbar-fixed-bottom .dropdown .dropdown-menu { + top: auto; + bottom: 100%; + margin-bottom: 2px; +} +@media (min-width: 768px) { + .navbar-right .dropdown-menu { + left: auto; + right: 0; + } + .navbar-right .dropdown-menu-left { + left: 0; + right: auto; + } +} +.btn-group, +.btn-group-vertical { + position: relative; + display: inline-block; + vertical-align: middle; +} +.btn-group > .btn, +.btn-group-vertical > .btn { + position: relative; + float: left; +} +.btn-group > .btn:hover, +.btn-group-vertical > .btn:hover, +.btn-group > .btn:focus, +.btn-group-vertical > .btn:focus, +.btn-group > .btn:active, +.btn-group-vertical > .btn:active, +.btn-group > .btn.active, +.btn-group-vertical > .btn.active { + z-index: 2; +} +.btn-group .btn + .btn, +.btn-group .btn + .btn-group, +.btn-group .btn-group + .btn, +.btn-group .btn-group + .btn-group { + margin-left: -1px; +} +.btn-toolbar { + margin-left: -5px; +} +.btn-toolbar .btn, +.btn-toolbar .btn-group, +.btn-toolbar .input-group { + float: left; +} +.btn-toolbar > .btn, +.btn-toolbar > .btn-group, +.btn-toolbar > .input-group { + margin-left: 5px; +} +.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) { + border-radius: 0; +} +.btn-group > .btn:first-child { + margin-left: 0; +} +.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) { + border-bottom-right-radius: 0; + border-top-right-radius: 0; +} +.btn-group > .btn:last-child:not(:first-child), +.btn-group > .dropdown-toggle:not(:first-child) { + border-bottom-left-radius: 0; + border-top-left-radius: 0; +} +.btn-group > .btn-group { + float: left; +} +.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn { + border-radius: 0; +} +.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child, +.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle { + border-bottom-right-radius: 0; + border-top-right-radius: 0; +} +.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child { + border-bottom-left-radius: 0; + border-top-left-radius: 0; +} +.btn-group .dropdown-toggle:active, +.btn-group.open .dropdown-toggle { + outline: 0; +} +.btn-group > .btn + .dropdown-toggle { + padding-left: 8px; + padding-right: 8px; +} +.btn-group > .btn-lg + .dropdown-toggle { + padding-left: 12px; + padding-right: 12px; +} +.btn-group.open .dropdown-toggle { + -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); + box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); +} +.btn-group.open .dropdown-toggle.btn-link { + -webkit-box-shadow: none; + box-shadow: none; +} +.btn .caret { + margin-left: 0; +} +.btn-lg .caret { + border-width: 5px 5px 0; + border-bottom-width: 0; +} +.dropup .btn-lg .caret { + border-width: 0 5px 5px; +} +.btn-group-vertical > .btn, +.btn-group-vertical > .btn-group, +.btn-group-vertical > .btn-group > .btn { + display: block; + float: none; + width: 100%; + max-width: 100%; +} +.btn-group-vertical > .btn-group > .btn { + float: none; +} +.btn-group-vertical > .btn + .btn, +.btn-group-vertical > .btn + .btn-group, +.btn-group-vertical > .btn-group + .btn, +.btn-group-vertical > .btn-group + .btn-group { + margin-top: -1px; + margin-left: 0; +} +.btn-group-vertical > .btn:not(:first-child):not(:last-child) { + border-radius: 0; +} +.btn-group-vertical > .btn:first-child:not(:last-child) { + border-top-right-radius: 4px; + border-top-left-radius: 4px; + border-bottom-right-radius: 0; + border-bottom-left-radius: 0; +} +.btn-group-vertical > .btn:last-child:not(:first-child) { + border-top-right-radius: 0; + border-top-left-radius: 0; + border-bottom-right-radius: 4px; + border-bottom-left-radius: 4px; +} +.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn { + border-radius: 0; +} +.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child, +.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle { + border-bottom-right-radius: 0; + border-bottom-left-radius: 0; +} +.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child { + border-top-right-radius: 0; + border-top-left-radius: 0; +} +.btn-group-justified { + display: table; + width: 100%; + table-layout: fixed; + border-collapse: separate; +} +.btn-group-justified > .btn, +.btn-group-justified > .btn-group { + float: none; + display: table-cell; + width: 1%; +} +.btn-group-justified > .btn-group .btn { + width: 100%; +} +.btn-group-justified > .btn-group .dropdown-menu { + left: auto; +} +[data-toggle="buttons"] > .btn input[type="radio"], +[data-toggle="buttons"] > .btn-group > .btn input[type="radio"], +[data-toggle="buttons"] > .btn input[type="checkbox"], +[data-toggle="buttons"] > .btn-group > .btn input[type="checkbox"] { + position: absolute; + clip: rect(0, 0, 0, 0); + pointer-events: none; +} +.input-group { + position: relative; + display: table; + border-collapse: separate; +} +.input-group[class*="col-"] { + float: none; + padding-left: 0; + padding-right: 0; +} +.input-group .form-control { + position: relative; + z-index: 2; + float: left; + width: 100%; + margin-bottom: 0; +} +.input-group .form-control:focus { + z-index: 3; +} +.input-group-lg > .form-control, +.input-group-lg > .input-group-addon, +.input-group-lg > .input-group-btn > .btn { + height: 54px; + padding: 14px 16px; + font-size: 18px; + line-height: 1.3333333; + border-radius: 6px; +} +select.input-group-lg > .form-control, +select.input-group-lg > .input-group-addon, +select.input-group-lg > .input-group-btn > .btn { + height: 54px; + line-height: 54px; +} +textarea.input-group-lg > .form-control, +textarea.input-group-lg > .input-group-addon, +textarea.input-group-lg > .input-group-btn > .btn, +select[multiple].input-group-lg > .form-control, +select[multiple].input-group-lg > .input-group-addon, +select[multiple].input-group-lg > .input-group-btn > .btn { + height: auto; +} +.input-group-sm > .form-control, +.input-group-sm > .input-group-addon, +.input-group-sm > .input-group-btn > .btn { + height: 30px; + padding: 5px 10px; + font-size: 12px; + line-height: 1.5; + border-radius: 3px; +} +select.input-group-sm > .form-control, +select.input-group-sm > .input-group-addon, +select.input-group-sm > .input-group-btn > .btn { + height: 30px; + line-height: 30px; +} +textarea.input-group-sm > .form-control, +textarea.input-group-sm > .input-group-addon, +textarea.input-group-sm > .input-group-btn > .btn, +select[multiple].input-group-sm > .form-control, +select[multiple].input-group-sm > .input-group-addon, +select[multiple].input-group-sm > .input-group-btn > .btn { + height: auto; +} +.input-group-addon, +.input-group-btn, +.input-group .form-control { + display: table-cell; +} +.input-group-addon:not(:first-child):not(:last-child), +.input-group-btn:not(:first-child):not(:last-child), +.input-group .form-control:not(:first-child):not(:last-child) { + border-radius: 0; +} +.input-group-addon, +.input-group-btn { + width: 1%; + white-space: nowrap; + vertical-align: middle; +} +.input-group-addon { + padding: 8px 12px; + font-size: 14px; + font-weight: normal; + line-height: 1; + color: #272b30; + text-align: center; + background-color: #3a3f44; + border: 1px solid rgba(0, 0, 0, 0.6); + border-radius: 4px; +} +.input-group-addon.input-sm { + padding: 5px 10px; + font-size: 12px; + border-radius: 3px; +} +.input-group-addon.input-lg { + padding: 14px 16px; + font-size: 18px; + border-radius: 6px; +} +.input-group-addon input[type="radio"], +.input-group-addon input[type="checkbox"] { + margin-top: 0; +} +.input-group .form-control:first-child, +.input-group-addon:first-child, +.input-group-btn:first-child > .btn, +.input-group-btn:first-child > .btn-group > .btn, +.input-group-btn:first-child > .dropdown-toggle, +.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle), +.input-group-btn:last-child > .btn-group:not(:last-child) > .btn { + border-bottom-right-radius: 0; + border-top-right-radius: 0; +} +.input-group-addon:first-child { + border-right: 0; +} +.input-group .form-control:last-child, +.input-group-addon:last-child, +.input-group-btn:last-child > .btn, +.input-group-btn:last-child > .btn-group > .btn, +.input-group-btn:last-child > .dropdown-toggle, +.input-group-btn:first-child > .btn:not(:first-child), +.input-group-btn:first-child > .btn-group:not(:first-child) > .btn { + border-bottom-left-radius: 0; + border-top-left-radius: 0; +} +.input-group-addon:last-child { + border-left: 0; +} +.input-group-btn { + position: relative; + font-size: 0; + white-space: nowrap; +} +.input-group-btn > .btn { + position: relative; +} +.input-group-btn > .btn + .btn { + margin-left: -1px; +} +.input-group-btn > .btn:hover, +.input-group-btn > .btn:focus, +.input-group-btn > .btn:active { + z-index: 2; +} +.input-group-btn:first-child > .btn, +.input-group-btn:first-child > .btn-group { + margin-right: -1px; +} +.input-group-btn:last-child > .btn, +.input-group-btn:last-child > .btn-group { + z-index: 2; + margin-left: -1px; +} +.nav { + margin-bottom: 0; + padding-left: 0; + list-style: none; +} +.nav > li { + position: relative; + display: block; +} +.nav > li > a { + position: relative; + display: block; + padding: 10px 15px; +} +.nav > li > a:hover, +.nav > li > a:focus { + text-decoration: none; + background-color: #3e444c; +} +.nav > li.disabled > a { + color: #7a8288; +} +.nav > li.disabled > a:hover, +.nav > li.disabled > a:focus { + color: #7a8288; + text-decoration: none; + background-color: transparent; + cursor: not-allowed; +} +.nav .open > a, +.nav .open > a:hover, +.nav .open > a:focus { + background-color: #3e444c; + border-color: #ffffff; +} +.nav .nav-divider { + height: 1px; + margin: 9px 0; + overflow: hidden; + background-color: #e5e5e5; +} +.nav > li > a > img { + max-width: none; +} +.nav-tabs { + border-bottom: 1px solid #1c1e22; +} +.nav-tabs > li { + float: left; + margin-bottom: -1px; +} +.nav-tabs > li > a { + margin-right: 2px; + line-height: 1.42857143; + border: 1px solid transparent; + border-radius: 4px 4px 0 0; +} +.nav-tabs > li > a:hover { + border-color: #1c1e22 #1c1e22 #1c1e22; +} +.nav-tabs > li.active > a, +.nav-tabs > li.active > a:hover, +.nav-tabs > li.active > a:focus { + color: #ffffff; + background-color: #3e444c; + border: 1px solid #1c1e22; + border-bottom-color: transparent; + cursor: default; +} +.nav-tabs.nav-justified { + width: 100%; + border-bottom: 0; +} +.nav-tabs.nav-justified > li { + float: none; +} +.nav-tabs.nav-justified > li > a { + text-align: center; + margin-bottom: 5px; +} +.nav-tabs.nav-justified > .dropdown .dropdown-menu { + top: auto; + left: auto; +} +@media (min-width: 768px) { + .nav-tabs.nav-justified > li { + display: table-cell; + width: 1%; + } + .nav-tabs.nav-justified > li > a { + margin-bottom: 0; + } +} +.nav-tabs.nav-justified > li > a { + margin-right: 0; + border-radius: 4px; +} +.nav-tabs.nav-justified > .active > a, +.nav-tabs.nav-justified > .active > a:hover, +.nav-tabs.nav-justified > .active > a:focus { + border: 1px solid #1c1e22; +} +@media (min-width: 768px) { + .nav-tabs.nav-justified > li > a { + border-bottom: 1px solid #1c1e22; + border-radius: 4px 4px 0 0; + } + .nav-tabs.nav-justified > .active > a, + .nav-tabs.nav-justified > .active > a:hover, + .nav-tabs.nav-justified > .active > a:focus { + border-bottom-color: #272b30; + } +} +.nav-pills > li { + float: left; +} +.nav-pills > li > a { + border-radius: 4px; +} +.nav-pills > li + li { + margin-left: 2px; +} +.nav-pills > li.active > a, +.nav-pills > li.active > a:hover, +.nav-pills > li.active > a:focus { + color: #ffffff; + background-color: transparent; +} +.nav-stacked > li { + float: none; +} +.nav-stacked > li + li { + margin-top: 2px; + margin-left: 0; +} +.nav-justified { + width: 100%; +} +.nav-justified > li { + float: none; +} +.nav-justified > li > a { + text-align: center; + margin-bottom: 5px; +} +.nav-justified > .dropdown .dropdown-menu { + top: auto; + left: auto; +} +@media (min-width: 768px) { + .nav-justified > li { + display: table-cell; + width: 1%; + } + .nav-justified > li > a { + margin-bottom: 0; + } +} +.nav-tabs-justified { + border-bottom: 0; +} +.nav-tabs-justified > li > a { + margin-right: 0; + border-radius: 4px; +} +.nav-tabs-justified > .active > a, +.nav-tabs-justified > .active > a:hover, +.nav-tabs-justified > .active > a:focus { + border: 1px solid #1c1e22; +} +@media (min-width: 768px) { + .nav-tabs-justified > li > a { + border-bottom: 1px solid #1c1e22; + border-radius: 4px 4px 0 0; + } + .nav-tabs-justified > .active > a, + .nav-tabs-justified > .active > a:hover, + .nav-tabs-justified > .active > a:focus { + border-bottom-color: #272b30; + } +} +.tab-content > .tab-pane { + display: none; +} +.tab-content > .active { + display: block; +} +.nav-tabs .dropdown-menu { + margin-top: -1px; + border-top-right-radius: 0; + border-top-left-radius: 0; +} +.navbar { + position: relative; + min-height: 50px; + margin-bottom: 20px; + border: 1px solid transparent; +} +@media (min-width: 768px) { + .navbar { + border-radius: 4px; + } +} +@media (min-width: 768px) { + .navbar-header { + float: left; + } +} +.navbar-collapse { + overflow-x: visible; + padding-right: 15px; + padding-left: 15px; + border-top: 1px solid transparent; + -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1); + box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1); + -webkit-overflow-scrolling: touch; +} +.navbar-collapse.in { + overflow-y: auto; +} +@media (min-width: 768px) { + .navbar-collapse { + width: auto; + border-top: 0; + -webkit-box-shadow: none; + box-shadow: none; + } + .navbar-collapse.collapse { + display: block !important; + height: auto !important; + padding-bottom: 0; + overflow: visible !important; + } + .navbar-collapse.in { + overflow-y: visible; + } + .navbar-fixed-top .navbar-collapse, + .navbar-static-top .navbar-collapse, + .navbar-fixed-bottom .navbar-collapse { + padding-left: 0; + padding-right: 0; + } +} +.navbar-fixed-top .navbar-collapse, +.navbar-fixed-bottom .navbar-collapse { + max-height: 340px; +} +@media (max-device-width: 480px) and (orientation: landscape) { + .navbar-fixed-top .navbar-collapse, + .navbar-fixed-bottom .navbar-collapse { + max-height: 200px; + } +} +.container > .navbar-header, +.container-fluid > .navbar-header, +.container > .navbar-collapse, +.container-fluid > .navbar-collapse { + margin-right: -15px; + margin-left: -15px; +} +@media (min-width: 768px) { + .container > .navbar-header, + .container-fluid > .navbar-header, + .container > .navbar-collapse, + .container-fluid > .navbar-collapse { + margin-right: 0; + margin-left: 0; + } +} +.navbar-static-top { + z-index: 1000; + border-width: 0 0 1px; +} +@media (min-width: 768px) { + .navbar-static-top { + border-radius: 0; + } +} +.navbar-fixed-top, +.navbar-fixed-bottom { + position: fixed; + right: 0; + left: 0; + z-index: 1030; +} +@media (min-width: 768px) { + .navbar-fixed-top, + .navbar-fixed-bottom { + border-radius: 0; + } +} +.navbar-fixed-top { + top: 0; + border-width: 0 0 1px; +} +.navbar-fixed-bottom { + bottom: 0; + margin-bottom: 0; + border-width: 1px 0 0; +} +.navbar-brand { + float: left; + padding: 15px 15px; + font-size: 18px; + line-height: 20px; + height: 50px; +} +.navbar-brand:hover, +.navbar-brand:focus { + text-decoration: none; +} +.navbar-brand > img { + display: block; +} +@media (min-width: 768px) { + .navbar > .container .navbar-brand, + .navbar > .container-fluid .navbar-brand { + margin-left: -15px; + } +} +.navbar-toggle { + position: relative; + float: right; + margin-right: 15px; + padding: 9px 10px; + margin-top: 8px; + margin-bottom: 8px; + background-color: transparent; + background-image: none; + border: 1px solid transparent; + border-radius: 4px; +} +.navbar-toggle:focus { + outline: 0; +} +.navbar-toggle .icon-bar { + display: block; + width: 22px; + height: 2px; + border-radius: 1px; +} +.navbar-toggle .icon-bar + .icon-bar { + margin-top: 4px; +} +@media (min-width: 768px) { + .navbar-toggle { + display: none; + } +} +.navbar-nav { + margin: 7.5px -15px; +} +.navbar-nav > li > a { + padding-top: 10px; + padding-bottom: 10px; + line-height: 20px; +} +@media (max-width: 767px) { + .navbar-nav .open .dropdown-menu { + position: static; + float: none; + width: auto; + margin-top: 0; + background-color: transparent; + border: 0; + -webkit-box-shadow: none; + box-shadow: none; + } + .navbar-nav .open .dropdown-menu > li > a, + .navbar-nav .open .dropdown-menu .dropdown-header { + padding: 5px 15px 5px 25px; + } + .navbar-nav .open .dropdown-menu > li > a { + line-height: 20px; + } + .navbar-nav .open .dropdown-menu > li > a:hover, + .navbar-nav .open .dropdown-menu > li > a:focus { + background-image: none; + } +} +@media (min-width: 768px) { + .navbar-nav { + float: left; + margin: 0; + } + .navbar-nav > li { + float: left; + } + .navbar-nav > li > a { + padding-top: 15px; + padding-bottom: 15px; + } +} +.navbar-form { + margin-left: -15px; + margin-right: -15px; + padding: 10px 15px; + border-top: 1px solid transparent; + border-bottom: 1px solid transparent; + -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1); + box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1); + margin-top: 6px; + margin-bottom: 6px; +} +@media (min-width: 768px) { + .navbar-form .form-group { + display: inline-block; + margin-bottom: 0; + vertical-align: middle; + } + .navbar-form .form-control { + display: inline-block; + width: auto; + vertical-align: middle; + } + .navbar-form .form-control-static { + display: inline-block; + } + .navbar-form .input-group { + display: inline-table; + vertical-align: middle; + } + .navbar-form .input-group .input-group-addon, + .navbar-form .input-group .input-group-btn, + .navbar-form .input-group .form-control { + width: auto; + } + .navbar-form .input-group > .form-control { + width: 100%; + } + .navbar-form .control-label { + margin-bottom: 0; + vertical-align: middle; + } + .navbar-form .radio, + .navbar-form .checkbox { + display: inline-block; + margin-top: 0; + margin-bottom: 0; + vertical-align: middle; + } + .navbar-form .radio label, + .navbar-form .checkbox label { + padding-left: 0; + } + .navbar-form .radio input[type="radio"], + .navbar-form .checkbox input[type="checkbox"] { + position: relative; + margin-left: 0; + } + .navbar-form .has-feedback .form-control-feedback { + top: 0; + } +} +@media (max-width: 767px) { + .navbar-form .form-group { + margin-bottom: 5px; + } + .navbar-form .form-group:last-child { + margin-bottom: 0; + } +} +@media (min-width: 768px) { + .navbar-form { + width: auto; + border: 0; + margin-left: 0; + margin-right: 0; + padding-top: 0; + padding-bottom: 0; + -webkit-box-shadow: none; + box-shadow: none; + } +} +.navbar-nav > li > .dropdown-menu { + margin-top: 0; + border-top-right-radius: 0; + border-top-left-radius: 0; +} +.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu { + margin-bottom: 0; + border-top-right-radius: 4px; + border-top-left-radius: 4px; + border-bottom-right-radius: 0; + border-bottom-left-radius: 0; +} +.navbar-btn { + margin-top: 6px; + margin-bottom: 6px; +} +.navbar-btn.btn-sm { + margin-top: 10px; + margin-bottom: 10px; +} +.navbar-btn.btn-xs { + margin-top: 14px; + margin-bottom: 14px; +} +.navbar-text { + margin-top: 15px; + margin-bottom: 15px; +} +@media (min-width: 768px) { + .navbar-text { + float: left; + margin-left: 15px; + margin-right: 15px; + } +} +@media (min-width: 768px) { + .navbar-left { + float: left !important; + } + .navbar-right { + float: right !important; + margin-right: -15px; + } + .navbar-right ~ .navbar-right { + margin-right: 0; + } +} +.navbar-default { + background-color: #3a3f44; + border-color: #2b2e32; +} +.navbar-default .navbar-brand { + color: #c8c8c8; +} +.navbar-default .navbar-brand:hover, +.navbar-default .navbar-brand:focus { + color: #ffffff; + background-color: none; +} +.navbar-default .navbar-text { + color: #c8c8c8; +} +.navbar-default .navbar-nav > li > a { + color: #c8c8c8; +} +.navbar-default .navbar-nav > li > a:hover, +.navbar-default .navbar-nav > li > a:focus { + color: #ffffff; + background-color: #272b2e; +} +.navbar-default .navbar-nav > .active > a, +.navbar-default .navbar-nav > .active > a:hover, +.navbar-default .navbar-nav > .active > a:focus { + color: #ffffff; + background-color: #272b2e; +} +.navbar-default .navbar-nav > .disabled > a, +.navbar-default .navbar-nav > .disabled > a:hover, +.navbar-default .navbar-nav > .disabled > a:focus { + color: #cccccc; + background-color: transparent; +} +.navbar-default .navbar-toggle { + border-color: #272b2e; +} +.navbar-default .navbar-toggle:hover, +.navbar-default .navbar-toggle:focus { + background-color: #272b2e; +} +.navbar-default .navbar-toggle .icon-bar { + background-color: #c8c8c8; +} +.navbar-default .navbar-collapse, +.navbar-default .navbar-form { + border-color: #2b2e32; +} +.navbar-default .navbar-nav > .open > a, +.navbar-default .navbar-nav > .open > a:hover, +.navbar-default .navbar-nav > .open > a:focus { + background-color: #272b2e; + color: #ffffff; +} +@media (max-width: 767px) { + .navbar-default .navbar-nav .open .dropdown-menu > li > a { + color: #c8c8c8; + } + .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover, + .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus { + color: #ffffff; + background-color: #272b2e; + } + .navbar-default .navbar-nav .open .dropdown-menu > .active > a, + .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover, + .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus { + color: #ffffff; + background-color: #272b2e; + } + .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a, + .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover, + .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus { + color: #cccccc; + background-color: transparent; + } +} +.navbar-default .navbar-link { + color: #c8c8c8; +} +.navbar-default .navbar-link:hover { + color: #ffffff; +} +.navbar-default .btn-link { + color: #c8c8c8; +} +.navbar-default .btn-link:hover, +.navbar-default .btn-link:focus { + color: #ffffff; +} +.navbar-default .btn-link[disabled]:hover, +fieldset[disabled] .navbar-default .btn-link:hover, +.navbar-default .btn-link[disabled]:focus, +fieldset[disabled] .navbar-default .btn-link:focus { + color: #cccccc; +} +.navbar-inverse { + background-color: #7a8288; + border-color: #62686d; +} +.navbar-inverse .navbar-brand { + color: #cccccc; +} +.navbar-inverse .navbar-brand:hover, +.navbar-inverse .navbar-brand:focus { + color: #ffffff; + background-color: none; +} +.navbar-inverse .navbar-text { + color: #cccccc; +} +.navbar-inverse .navbar-nav > li > a { + color: #cccccc; +} +.navbar-inverse .navbar-nav > li > a:hover, +.navbar-inverse .navbar-nav > li > a:focus { + color: #ffffff; + background-color: #5d6368; +} +.navbar-inverse .navbar-nav > .active > a, +.navbar-inverse .navbar-nav > .active > a:hover, +.navbar-inverse .navbar-nav > .active > a:focus { + color: #ffffff; + background-color: #5d6368; +} +.navbar-inverse .navbar-nav > .disabled > a, +.navbar-inverse .navbar-nav > .disabled > a:hover, +.navbar-inverse .navbar-nav > .disabled > a:focus { + color: #cccccc; + background-color: transparent; +} +.navbar-inverse .navbar-toggle { + border-color: #5d6368; +} +.navbar-inverse .navbar-toggle:hover, +.navbar-inverse .navbar-toggle:focus { + background-color: #5d6368; +} +.navbar-inverse .navbar-toggle .icon-bar { + background-color: #ffffff; +} +.navbar-inverse .navbar-collapse, +.navbar-inverse .navbar-form { + border-color: #697075; +} +.navbar-inverse .navbar-nav > .open > a, +.navbar-inverse .navbar-nav > .open > a:hover, +.navbar-inverse .navbar-nav > .open > a:focus { + background-color: #5d6368; + color: #ffffff; +} +@media (max-width: 767px) { + .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header { + border-color: #62686d; + } + .navbar-inverse .navbar-nav .open .dropdown-menu .divider { + background-color: #62686d; + } + .navbar-inverse .navbar-nav .open .dropdown-menu > li > a { + color: #cccccc; + } + .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover, + .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus { + color: #ffffff; + background-color: #5d6368; + } + .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a, + .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover, + .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus { + color: #ffffff; + background-color: #5d6368; + } + .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a, + .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover, + .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus { + color: #cccccc; + background-color: transparent; + } +} +.navbar-inverse .navbar-link { + color: #cccccc; +} +.navbar-inverse .navbar-link:hover { + color: #ffffff; +} +.navbar-inverse .btn-link { + color: #cccccc; +} +.navbar-inverse .btn-link:hover, +.navbar-inverse .btn-link:focus { + color: #ffffff; +} +.navbar-inverse .btn-link[disabled]:hover, +fieldset[disabled] .navbar-inverse .btn-link:hover, +.navbar-inverse .btn-link[disabled]:focus, +fieldset[disabled] .navbar-inverse .btn-link:focus { + color: #cccccc; +} +.breadcrumb { + padding: 8px 15px; + margin-bottom: 20px; + list-style: none; + background-color: transparent; + border-radius: 4px; +} +.breadcrumb > li { + display: inline-block; +} +.breadcrumb > li + li:before { + content: "/\00a0"; + padding: 0 5px; + color: #cccccc; +} +.breadcrumb > .active { + color: #7a8288; +} +.pagination { + display: inline-block; + padding-left: 0; + margin: 20px 0; + border-radius: 4px; +} +.pagination > li { + display: inline; +} +.pagination > li > a, +.pagination > li > span { + position: relative; + float: left; + padding: 8px 12px; + line-height: 1.42857143; + text-decoration: none; + color: #ffffff; + background-color: #3a3f44; + border: 1px solid rgba(0, 0, 0, 0.6); + margin-left: -1px; +} +.pagination > li:first-child > a, +.pagination > li:first-child > span { + margin-left: 0; + border-bottom-left-radius: 4px; + border-top-left-radius: 4px; +} +.pagination > li:last-child > a, +.pagination > li:last-child > span { + border-bottom-right-radius: 4px; + border-top-right-radius: 4px; +} +.pagination > li > a:hover, +.pagination > li > span:hover, +.pagination > li > a:focus, +.pagination > li > span:focus { + z-index: 2; + color: #ffffff; + background-color: transparent; + border-color: rgba(0, 0, 0, 0.6); +} +.pagination > .active > a, +.pagination > .active > span, +.pagination > .active > a:hover, +.pagination > .active > span:hover, +.pagination > .active > a:focus, +.pagination > .active > span:focus { + z-index: 3; + color: #ffffff; + background-color: #232628; + border-color: rgba(0, 0, 0, 0.6); + cursor: default; +} +.pagination > .disabled > span, +.pagination > .disabled > span:hover, +.pagination > .disabled > span:focus, +.pagination > .disabled > a, +.pagination > .disabled > a:hover, +.pagination > .disabled > a:focus { + color: #7a8288; + background-color: #ffffff; + border-color: rgba(0, 0, 0, 0.6); + cursor: not-allowed; +} +.pagination-lg > li > a, +.pagination-lg > li > span { + padding: 14px 16px; + font-size: 18px; + line-height: 1.3333333; +} +.pagination-lg > li:first-child > a, +.pagination-lg > li:first-child > span { + border-bottom-left-radius: 6px; + border-top-left-radius: 6px; +} +.pagination-lg > li:last-child > a, +.pagination-lg > li:last-child > span { + border-bottom-right-radius: 6px; + border-top-right-radius: 6px; +} +.pagination-sm > li > a, +.pagination-sm > li > span { + padding: 5px 10px; + font-size: 12px; + line-height: 1.5; +} +.pagination-sm > li:first-child > a, +.pagination-sm > li:first-child > span { + border-bottom-left-radius: 3px; + border-top-left-radius: 3px; +} +.pagination-sm > li:last-child > a, +.pagination-sm > li:last-child > span { + border-bottom-right-radius: 3px; + border-top-right-radius: 3px; +} +.pager { + padding-left: 0; + margin: 20px 0; + list-style: none; + text-align: center; +} +.pager li { + display: inline; +} +.pager li > a, +.pager li > span { + display: inline-block; + padding: 5px 14px; + background-color: #3a3f44; + border: 1px solid rgba(0, 0, 0, 0.6); + border-radius: 15px; +} +.pager li > a:hover, +.pager li > a:focus { + text-decoration: none; + background-color: transparent; +} +.pager .next > a, +.pager .next > span { + float: right; +} +.pager .previous > a, +.pager .previous > span { + float: left; +} +.pager .disabled > a, +.pager .disabled > a:hover, +.pager .disabled > a:focus, +.pager .disabled > span { + color: #7a8288; + background-color: #3a3f44; + cursor: not-allowed; +} +.label { + display: inline; + padding: .2em .6em .3em; + font-size: 75%; + font-weight: bold; + line-height: 1; + color: #ffffff; + text-align: center; + white-space: nowrap; + vertical-align: baseline; + border-radius: .25em; +} +a.label:hover, +a.label:focus { + color: #ffffff; + text-decoration: none; + cursor: pointer; +} +.label:empty { + display: none; +} +.btn .label { + position: relative; + top: -1px; +} +.label-default { + background-color: #3a3f44; +} +.label-default[href]:hover, +.label-default[href]:focus { + background-color: #232628; +} +.label-primary { + background-color: #7a8288; +} +.label-primary[href]:hover, +.label-primary[href]:focus { + background-color: #62686d; +} +.label-success { + background-color: #62c462; +} +.label-success[href]:hover, +.label-success[href]:focus { + background-color: #42b142; +} +.label-info { + background-color: #5bc0de; +} +.label-info[href]:hover, +.label-info[href]:focus { + background-color: #31b0d5; +} +.label-warning { + background-color: #f89406; +} +.label-warning[href]:hover, +.label-warning[href]:focus { + background-color: #c67605; +} +.label-danger { + background-color: #ee5f5b; +} +.label-danger[href]:hover, +.label-danger[href]:focus { + background-color: #e9322d; +} +.badge { + display: inline-block; + min-width: 10px; + padding: 3px 7px; + font-size: 12px; + font-weight: bold; + color: #ffffff; + line-height: 1; + vertical-align: middle; + white-space: nowrap; + text-align: center; + background-color: #7a8288; + border-radius: 10px; +} +.badge:empty { + display: none; +} +.btn .badge { + position: relative; + top: -1px; +} +.btn-xs .badge, +.btn-group-xs > .btn .badge { + top: 0; + padding: 1px 5px; +} +a.badge:hover, +a.badge:focus { + color: #ffffff; + text-decoration: none; + cursor: pointer; +} +.list-group-item.active > .badge, +.nav-pills > .active > a > .badge { + color: #ffffff; + background-color: #7a8288; +} +.list-group-item > .badge { + float: right; +} +.list-group-item > .badge + .badge { + margin-right: 5px; +} +.nav-pills > li > a > .badge { + margin-left: 3px; +} +.jumbotron { + padding-top: 30px; + padding-bottom: 30px; + margin-bottom: 30px; + color: inherit; + background-color: #1c1e22; +} +.jumbotron h1, +.jumbotron .h1 { + color: inherit; +} +.jumbotron p { + margin-bottom: 15px; + font-size: 21px; + font-weight: 200; +} +.jumbotron > hr { + border-top-color: #050506; +} +.container .jumbotron, +.container-fluid .jumbotron { + border-radius: 6px; + padding-left: 15px; + padding-right: 15px; +} +.jumbotron .container { + max-width: 100%; +} +@media screen and (min-width: 768px) { + .jumbotron { + padding-top: 48px; + padding-bottom: 48px; + } + .container .jumbotron, + .container-fluid .jumbotron { + padding-left: 60px; + padding-right: 60px; + } + .jumbotron h1, + .jumbotron .h1 { + font-size: 63px; + } +} +.thumbnail { + display: block; + padding: 4px; + margin-bottom: 20px; + line-height: 1.42857143; + background-color: #1c1e22; + border: 1px solid #0c0d0e; + border-radius: 4px; + -webkit-transition: border 0.2s ease-in-out; + -o-transition: border 0.2s ease-in-out; + transition: border 0.2s ease-in-out; +} +.thumbnail > img, +.thumbnail a > img { + margin-left: auto; + margin-right: auto; +} +a.thumbnail:hover, +a.thumbnail:focus, +a.thumbnail.active { + border-color: #ffffff; +} +.thumbnail .caption { + padding: 9px; + color: #c8c8c8; +} +.alert { + padding: 15px; + margin-bottom: 20px; + border: 1px solid transparent; + border-radius: 4px; +} +.alert h4 { + margin-top: 0; + color: inherit; +} +.alert .alert-link { + font-weight: bold; +} +.alert > p, +.alert > ul { + margin-bottom: 0; +} +.alert > p + p { + margin-top: 5px; +} +.alert-dismissable, +.alert-dismissible { + padding-right: 35px; +} +.alert-dismissable .close, +.alert-dismissible .close { + position: relative; + top: -2px; + right: -21px; + color: inherit; +} +.alert-success { + background-color: #62c462; + border-color: #62bd4f; + color: #ffffff; +} +.alert-success hr { + border-top-color: #55b142; +} +.alert-success .alert-link { + color: #e6e6e6; +} +.alert-info { + background-color: #5bc0de; + border-color: #3dced8; + color: #ffffff; +} +.alert-info hr { + border-top-color: #2ac7d2; +} +.alert-info .alert-link { + color: #e6e6e6; +} +.alert-warning { + background-color: #f89406; + border-color: #e96506; + color: #ffffff; +} +.alert-warning hr { + border-top-color: #d05a05; +} +.alert-warning .alert-link { + color: #e6e6e6; +} +.alert-danger { + background-color: #ee5f5b; + border-color: #ed4d63; + color: #ffffff; +} +.alert-danger hr { + border-top-color: #ea364f; +} +.alert-danger .alert-link { + color: #e6e6e6; +} +@-webkit-keyframes progress-bar-stripes { + from { + background-position: 40px 0; + } + to { + background-position: 0 0; + } +} +@-o-keyframes progress-bar-stripes { + from { + background-position: 40px 0; + } + to { + background-position: 0 0; + } +} +@keyframes progress-bar-stripes { + from { + background-position: 40px 0; + } + to { + background-position: 0 0; + } +} +.progress { + overflow: hidden; + height: 20px; + margin-bottom: 20px; + background-color: #1c1e22; + border-radius: 4px; + -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1); + box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1); +} +.progress-bar { + float: left; + width: 0%; + height: 100%; + font-size: 12px; + line-height: 20px; + color: #ffffff; + text-align: center; + background-color: #7a8288; + -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15); + box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15); + -webkit-transition: width 0.6s ease; + -o-transition: width 0.6s ease; + transition: width 0.6s ease; +} +.progress-striped .progress-bar, +.progress-bar-striped { + background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); + background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); + background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); + -webkit-background-size: 40px 40px; + background-size: 40px 40px; +} +.progress.active .progress-bar, +.progress-bar.active { + -webkit-animation: progress-bar-stripes 2s linear infinite; + -o-animation: progress-bar-stripes 2s linear infinite; + animation: progress-bar-stripes 2s linear infinite; +} +.progress-bar-success { + background-color: #62c462; +} +.progress-striped .progress-bar-success { + background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); + background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); + background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); +} +.progress-bar-info { + background-color: #5bc0de; +} +.progress-striped .progress-bar-info { + background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); + background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); + background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); +} +.progress-bar-warning { + background-color: #f89406; +} +.progress-striped .progress-bar-warning { + background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); + background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); + background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); +} +.progress-bar-danger { + background-color: #ee5f5b; +} +.progress-striped .progress-bar-danger { + background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); + background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); + background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); +} +.media { + margin-top: 15px; +} +.media:first-child { + margin-top: 0; +} +.media, +.media-body { + zoom: 1; + overflow: hidden; +} +.media-body { + width: 10000px; +} +.media-object { + display: block; +} +.media-object.img-thumbnail { + max-width: none; +} +.media-right, +.media > .pull-right { + padding-left: 10px; +} +.media-left, +.media > .pull-left { + padding-right: 10px; +} +.media-left, +.media-right, +.media-body { + display: table-cell; + vertical-align: top; +} +.media-middle { + vertical-align: middle; +} +.media-bottom { + vertical-align: bottom; +} +.media-heading { + margin-top: 0; + margin-bottom: 5px; +} +.media-list { + padding-left: 0; + list-style: none; +} +.list-group { + margin-bottom: 20px; + padding-left: 0; +} +.list-group-item { + position: relative; + display: block; + padding: 10px 15px; + margin-bottom: -1px; + background-color: #32383e; + border: 1px solid rgba(0, 0, 0, 0.6); +} +.list-group-item:first-child { + border-top-right-radius: 4px; + border-top-left-radius: 4px; +} +.list-group-item:last-child { + margin-bottom: 0; + border-bottom-right-radius: 4px; + border-bottom-left-radius: 4px; +} +a.list-group-item, +button.list-group-item { + color: #c8c8c8; +} +a.list-group-item .list-group-item-heading, +button.list-group-item .list-group-item-heading { + color: #ffffff; +} +a.list-group-item:hover, +button.list-group-item:hover, +a.list-group-item:focus, +button.list-group-item:focus { + text-decoration: none; + color: #c8c8c8; + background-color: #3e444c; +} +button.list-group-item { + width: 100%; + text-align: left; +} +.list-group-item.disabled, +.list-group-item.disabled:hover, +.list-group-item.disabled:focus { + background-color: #999999; + color: #7a8288; + cursor: not-allowed; +} +.list-group-item.disabled .list-group-item-heading, +.list-group-item.disabled:hover .list-group-item-heading, +.list-group-item.disabled:focus .list-group-item-heading { + color: inherit; +} +.list-group-item.disabled .list-group-item-text, +.list-group-item.disabled:hover .list-group-item-text, +.list-group-item.disabled:focus .list-group-item-text { + color: #7a8288; +} +.list-group-item.active, +.list-group-item.active:hover, +.list-group-item.active:focus { + z-index: 2; + color: #ffffff; + background-color: #3e444c; + border-color: rgba(0, 0, 0, 0.6); +} +.list-group-item.active .list-group-item-heading, +.list-group-item.active:hover .list-group-item-heading, +.list-group-item.active:focus .list-group-item-heading, +.list-group-item.active .list-group-item-heading > small, +.list-group-item.active:hover .list-group-item-heading > small, +.list-group-item.active:focus .list-group-item-heading > small, +.list-group-item.active .list-group-item-heading > .small, +.list-group-item.active:hover .list-group-item-heading > .small, +.list-group-item.active:focus .list-group-item-heading > .small { + color: inherit; +} +.list-group-item.active .list-group-item-text, +.list-group-item.active:hover .list-group-item-text, +.list-group-item.active:focus .list-group-item-text { + color: #a2aab4; +} +.list-group-item-success { + color: #ffffff; + background-color: #62c462; +} +a.list-group-item-success, +button.list-group-item-success { + color: #ffffff; +} +a.list-group-item-success .list-group-item-heading, +button.list-group-item-success .list-group-item-heading { + color: inherit; +} +a.list-group-item-success:hover, +button.list-group-item-success:hover, +a.list-group-item-success:focus, +button.list-group-item-success:focus { + color: #ffffff; + background-color: #4fbd4f; +} +a.list-group-item-success.active, +button.list-group-item-success.active, +a.list-group-item-success.active:hover, +button.list-group-item-success.active:hover, +a.list-group-item-success.active:focus, +button.list-group-item-success.active:focus { + color: #fff; + background-color: #ffffff; + border-color: #ffffff; +} +.list-group-item-info { + color: #ffffff; + background-color: #5bc0de; +} +a.list-group-item-info, +button.list-group-item-info { + color: #ffffff; +} +a.list-group-item-info .list-group-item-heading, +button.list-group-item-info .list-group-item-heading { + color: inherit; +} +a.list-group-item-info:hover, +button.list-group-item-info:hover, +a.list-group-item-info:focus, +button.list-group-item-info:focus { + color: #ffffff; + background-color: #46b8da; +} +a.list-group-item-info.active, +button.list-group-item-info.active, +a.list-group-item-info.active:hover, +button.list-group-item-info.active:hover, +a.list-group-item-info.active:focus, +button.list-group-item-info.active:focus { + color: #fff; + background-color: #ffffff; + border-color: #ffffff; +} +.list-group-item-warning { + color: #ffffff; + background-color: #f89406; +} +a.list-group-item-warning, +button.list-group-item-warning { + color: #ffffff; +} +a.list-group-item-warning .list-group-item-heading, +button.list-group-item-warning .list-group-item-heading { + color: inherit; +} +a.list-group-item-warning:hover, +button.list-group-item-warning:hover, +a.list-group-item-warning:focus, +button.list-group-item-warning:focus { + color: #ffffff; + background-color: #df8505; +} +a.list-group-item-warning.active, +button.list-group-item-warning.active, +a.list-group-item-warning.active:hover, +button.list-group-item-warning.active:hover, +a.list-group-item-warning.active:focus, +button.list-group-item-warning.active:focus { + color: #fff; + background-color: #ffffff; + border-color: #ffffff; +} +.list-group-item-danger { + color: #ffffff; + background-color: #ee5f5b; +} +a.list-group-item-danger, +button.list-group-item-danger { + color: #ffffff; +} +a.list-group-item-danger .list-group-item-heading, +button.list-group-item-danger .list-group-item-heading { + color: inherit; +} +a.list-group-item-danger:hover, +button.list-group-item-danger:hover, +a.list-group-item-danger:focus, +button.list-group-item-danger:focus { + color: #ffffff; + background-color: #ec4844; +} +a.list-group-item-danger.active, +button.list-group-item-danger.active, +a.list-group-item-danger.active:hover, +button.list-group-item-danger.active:hover, +a.list-group-item-danger.active:focus, +button.list-group-item-danger.active:focus { + color: #fff; + background-color: #ffffff; + border-color: #ffffff; +} +.list-group-item-heading { + margin-top: 0; + margin-bottom: 5px; +} +.list-group-item-text { + margin-bottom: 0; + line-height: 1.3; +} +.panel { + margin-bottom: 20px; + background-color: #2e3338; + border: 1px solid transparent; + border-radius: 4px; + -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05); + box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05); +} +.panel-body { + padding: 15px; +} +.panel-heading { + padding: 10px 15px; + border-bottom: 1px solid transparent; + border-top-right-radius: 3px; + border-top-left-radius: 3px; +} +.panel-heading > .dropdown .dropdown-toggle { + color: inherit; +} +.panel-title { + margin-top: 0; + margin-bottom: 0; + font-size: 16px; + color: inherit; +} +.panel-title > a, +.panel-title > small, +.panel-title > .small, +.panel-title > small > a, +.panel-title > .small > a { + color: inherit; +} +.panel-footer { + padding: 10px 15px; + background-color: #3e444c; + border-top: 1px solid rgba(0, 0, 0, 0.6); + border-bottom-right-radius: 3px; + border-bottom-left-radius: 3px; +} +.panel > .list-group, +.panel > .panel-collapse > .list-group { + margin-bottom: 0; +} +.panel > .list-group .list-group-item, +.panel > .panel-collapse > .list-group .list-group-item { + border-width: 1px 0; + border-radius: 0; +} +.panel > .list-group:first-child .list-group-item:first-child, +.panel > .panel-collapse > .list-group:first-child .list-group-item:first-child { + border-top: 0; + border-top-right-radius: 3px; + border-top-left-radius: 3px; +} +.panel > .list-group:last-child .list-group-item:last-child, +.panel > .panel-collapse > .list-group:last-child .list-group-item:last-child { + border-bottom: 0; + border-bottom-right-radius: 3px; + border-bottom-left-radius: 3px; +} +.panel > .panel-heading + .panel-collapse > .list-group .list-group-item:first-child { + border-top-right-radius: 0; + border-top-left-radius: 0; +} +.panel-heading + .list-group .list-group-item:first-child { + border-top-width: 0; +} +.list-group + .panel-footer { + border-top-width: 0; +} +.panel > .table, +.panel > .table-responsive > .table, +.panel > .panel-collapse > .table { + margin-bottom: 0; +} +.panel > .table caption, +.panel > .table-responsive > .table caption, +.panel > .panel-collapse > .table caption { + padding-left: 15px; + padding-right: 15px; +} +.panel > .table:first-child, +.panel > .table-responsive:first-child > .table:first-child { + border-top-right-radius: 3px; + border-top-left-radius: 3px; +} +.panel > .table:first-child > thead:first-child > tr:first-child, +.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child, +.panel > .table:first-child > tbody:first-child > tr:first-child, +.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child { + border-top-left-radius: 3px; + border-top-right-radius: 3px; +} +.panel > .table:first-child > thead:first-child > tr:first-child td:first-child, +.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child, +.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child, +.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child, +.panel > .table:first-child > thead:first-child > tr:first-child th:first-child, +.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child, +.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child, +.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child { + border-top-left-radius: 3px; +} +.panel > .table:first-child > thead:first-child > tr:first-child td:last-child, +.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child, +.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child, +.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child, +.panel > .table:first-child > thead:first-child > tr:first-child th:last-child, +.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child, +.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child, +.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child { + border-top-right-radius: 3px; +} +.panel > .table:last-child, +.panel > .table-responsive:last-child > .table:last-child { + border-bottom-right-radius: 3px; + border-bottom-left-radius: 3px; +} +.panel > .table:last-child > tbody:last-child > tr:last-child, +.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child, +.panel > .table:last-child > tfoot:last-child > tr:last-child, +.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child { + border-bottom-left-radius: 3px; + border-bottom-right-radius: 3px; +} +.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child, +.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child, +.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child, +.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child, +.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child, +.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child, +.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child, +.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child { + border-bottom-left-radius: 3px; +} +.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child, +.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child, +.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child, +.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child, +.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child, +.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child, +.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child, +.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child { + border-bottom-right-radius: 3px; +} +.panel > .panel-body + .table, +.panel > .panel-body + .table-responsive, +.panel > .table + .panel-body, +.panel > .table-responsive + .panel-body { + border-top: 1px solid #1c1e22; +} +.panel > .table > tbody:first-child > tr:first-child th, +.panel > .table > tbody:first-child > tr:first-child td { + border-top: 0; +} +.panel > .table-bordered, +.panel > .table-responsive > .table-bordered { + border: 0; +} +.panel > .table-bordered > thead > tr > th:first-child, +.panel > .table-responsive > .table-bordered > thead > tr > th:first-child, +.panel > .table-bordered > tbody > tr > th:first-child, +.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child, +.panel > .table-bordered > tfoot > tr > th:first-child, +.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child, +.panel > .table-bordered > thead > tr > td:first-child, +.panel > .table-responsive > .table-bordered > thead > tr > td:first-child, +.panel > .table-bordered > tbody > tr > td:first-child, +.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child, +.panel > .table-bordered > tfoot > tr > td:first-child, +.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child { + border-left: 0; +} +.panel > .table-bordered > thead > tr > th:last-child, +.panel > .table-responsive > .table-bordered > thead > tr > th:last-child, +.panel > .table-bordered > tbody > tr > th:last-child, +.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child, +.panel > .table-bordered > tfoot > tr > th:last-child, +.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child, +.panel > .table-bordered > thead > tr > td:last-child, +.panel > .table-responsive > .table-bordered > thead > tr > td:last-child, +.panel > .table-bordered > tbody > tr > td:last-child, +.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child, +.panel > .table-bordered > tfoot > tr > td:last-child, +.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child { + border-right: 0; +} +.panel > .table-bordered > thead > tr:first-child > td, +.panel > .table-responsive > .table-bordered > thead > tr:first-child > td, +.panel > .table-bordered > tbody > tr:first-child > td, +.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td, +.panel > .table-bordered > thead > tr:first-child > th, +.panel > .table-responsive > .table-bordered > thead > tr:first-child > th, +.panel > .table-bordered > tbody > tr:first-child > th, +.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th { + border-bottom: 0; +} +.panel > .table-bordered > tbody > tr:last-child > td, +.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td, +.panel > .table-bordered > tfoot > tr:last-child > td, +.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td, +.panel > .table-bordered > tbody > tr:last-child > th, +.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th, +.panel > .table-bordered > tfoot > tr:last-child > th, +.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th { + border-bottom: 0; +} +.panel > .table-responsive { + border: 0; + margin-bottom: 0; +} +.panel-group { + margin-bottom: 20px; +} +.panel-group .panel { + margin-bottom: 0; + border-radius: 4px; +} +.panel-group .panel + .panel { + margin-top: 5px; +} +.panel-group .panel-heading { + border-bottom: 0; +} +.panel-group .panel-heading + .panel-collapse > .panel-body, +.panel-group .panel-heading + .panel-collapse > .list-group { + border-top: 1px solid rgba(0, 0, 0, 0.6); +} +.panel-group .panel-footer { + border-top: 0; +} +.panel-group .panel-footer + .panel-collapse .panel-body { + border-bottom: 1px solid rgba(0, 0, 0, 0.6); +} +.panel-default { + border-color: rgba(0, 0, 0, 0.6); +} +.panel-default > .panel-heading { + color: #c8c8c8; + background-color: #3e444c; + border-color: rgba(0, 0, 0, 0.6); +} +.panel-default > .panel-heading + .panel-collapse > .panel-body { + border-top-color: rgba(0, 0, 0, 0.6); +} +.panel-default > .panel-heading .badge { + color: #3e444c; + background-color: #c8c8c8; +} +.panel-default > .panel-footer + .panel-collapse > .panel-body { + border-bottom-color: rgba(0, 0, 0, 0.6); +} +.panel-primary { + border-color: rgba(0, 0, 0, 0.6); +} +.panel-primary > .panel-heading { + color: #ffffff; + background-color: #7a8288; + border-color: rgba(0, 0, 0, 0.6); +} +.panel-primary > .panel-heading + .panel-collapse > .panel-body { + border-top-color: rgba(0, 0, 0, 0.6); +} +.panel-primary > .panel-heading .badge { + color: #7a8288; + background-color: #ffffff; +} +.panel-primary > .panel-footer + .panel-collapse > .panel-body { + border-bottom-color: rgba(0, 0, 0, 0.6); +} +.panel-success { + border-color: rgba(0, 0, 0, 0.6); +} +.panel-success > .panel-heading { + color: #ffffff; + background-color: #62c462; + border-color: rgba(0, 0, 0, 0.6); +} +.panel-success > .panel-heading + .panel-collapse > .panel-body { + border-top-color: rgba(0, 0, 0, 0.6); +} +.panel-success > .panel-heading .badge { + color: #62c462; + background-color: #ffffff; +} +.panel-success > .panel-footer + .panel-collapse > .panel-body { + border-bottom-color: rgba(0, 0, 0, 0.6); +} +.panel-info { + border-color: rgba(0, 0, 0, 0.6); +} +.panel-info > .panel-heading { + color: #ffffff; + background-color: #5bc0de; + border-color: rgba(0, 0, 0, 0.6); +} +.panel-info > .panel-heading + .panel-collapse > .panel-body { + border-top-color: rgba(0, 0, 0, 0.6); +} +.panel-info > .panel-heading .badge { + color: #5bc0de; + background-color: #ffffff; +} +.panel-info > .panel-footer + .panel-collapse > .panel-body { + border-bottom-color: rgba(0, 0, 0, 0.6); +} +.panel-warning { + border-color: rgba(0, 0, 0, 0.6); +} +.panel-warning > .panel-heading { + color: #ffffff; + background-color: #f89406; + border-color: rgba(0, 0, 0, 0.6); +} +.panel-warning > .panel-heading + .panel-collapse > .panel-body { + border-top-color: rgba(0, 0, 0, 0.6); +} +.panel-warning > .panel-heading .badge { + color: #f89406; + background-color: #ffffff; +} +.panel-warning > .panel-footer + .panel-collapse > .panel-body { + border-bottom-color: rgba(0, 0, 0, 0.6); +} +.panel-danger { + border-color: rgba(0, 0, 0, 0.6); +} +.panel-danger > .panel-heading { + color: #ffffff; + background-color: #ee5f5b; + border-color: rgba(0, 0, 0, 0.6); +} +.panel-danger > .panel-heading + .panel-collapse > .panel-body { + border-top-color: rgba(0, 0, 0, 0.6); +} +.panel-danger > .panel-heading .badge { + color: #ee5f5b; + background-color: #ffffff; +} +.panel-danger > .panel-footer + .panel-collapse > .panel-body { + border-bottom-color: rgba(0, 0, 0, 0.6); +} +.embed-responsive { + position: relative; + display: block; + height: 0; + padding: 0; + overflow: hidden; +} +.embed-responsive .embed-responsive-item, +.embed-responsive iframe, +.embed-responsive embed, +.embed-responsive object, +.embed-responsive video { + position: absolute; + top: 0; + left: 0; + bottom: 0; + height: 100%; + width: 100%; + border: 0; +} +.embed-responsive-16by9 { + padding-bottom: 56.25%; +} +.embed-responsive-4by3 { + padding-bottom: 75%; +} +.well { + min-height: 20px; + padding: 19px; + margin-bottom: 20px; + background-color: #1c1e22; + border: 1px solid #0c0d0e; + border-radius: 4px; + -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05); + box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05); +} +.well blockquote { + border-color: #ddd; + border-color: rgba(0, 0, 0, 0.15); +} +.well-lg { + padding: 24px; + border-radius: 6px; +} +.well-sm { + padding: 9px; + border-radius: 3px; +} +.close { + float: right; + font-size: 21px; + font-weight: bold; + line-height: 1; + color: #000000; + text-shadow: 0 1px 0 #ffffff; + opacity: 0.2; + filter: alpha(opacity=20); +} +.close:hover, +.close:focus { + color: #000000; + text-decoration: none; + cursor: pointer; + opacity: 0.5; + filter: alpha(opacity=50); +} +button.close { + padding: 0; + cursor: pointer; + background: transparent; + border: 0; + -webkit-appearance: none; +} +.modal-open { + overflow: hidden; +} +.modal { + display: none; + overflow: hidden; + position: fixed; + top: 0; + right: 0; + bottom: 0; + left: 0; + z-index: 1050; + -webkit-overflow-scrolling: touch; + outline: 0; +} +.modal.fade .modal-dialog { + -webkit-transform: translate(0, -25%); + -ms-transform: translate(0, -25%); + -o-transform: translate(0, -25%); + transform: translate(0, -25%); + -webkit-transition: -webkit-transform 0.3s ease-out; + -o-transition: -o-transform 0.3s ease-out; + transition: transform 0.3s ease-out; +} +.modal.in .modal-dialog { + -webkit-transform: translate(0, 0); + -ms-transform: translate(0, 0); + -o-transform: translate(0, 0); + transform: translate(0, 0); +} +.modal-open .modal { + overflow-x: hidden; + overflow-y: auto; +} +.modal-dialog { + position: relative; + width: auto; + margin: 10px; +} +.modal-content { + position: relative; + background-color: #2e3338; + border: 1px solid #999999; + border: 1px solid rgba(0, 0, 0, 0.2); + border-radius: 6px; + -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5); + box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5); + -webkit-background-clip: padding-box; + background-clip: padding-box; + outline: 0; +} +.modal-backdrop { + position: fixed; + top: 0; + right: 0; + bottom: 0; + left: 0; + z-index: 1040; + background-color: #000000; +} +.modal-backdrop.fade { + opacity: 0; + filter: alpha(opacity=0); +} +.modal-backdrop.in { + opacity: 0.5; + filter: alpha(opacity=50); +} +.modal-header { + padding: 15px; + border-bottom: 1px solid #1c1e22; +} +.modal-header .close { + margin-top: -2px; +} +.modal-title { + margin: 0; + line-height: 1.42857143; +} +.modal-body { + position: relative; + padding: 20px; +} +.modal-footer { + padding: 20px; + text-align: right; + border-top: 1px solid #1c1e22; +} +.modal-footer .btn + .btn { + margin-left: 5px; + margin-bottom: 0; +} +.modal-footer .btn-group .btn + .btn { + margin-left: -1px; +} +.modal-footer .btn-block + .btn-block { + margin-left: 0; +} +.modal-scrollbar-measure { + position: absolute; + top: -9999px; + width: 50px; + height: 50px; + overflow: scroll; +} +@media (min-width: 768px) { + .modal-dialog { + width: 600px; + margin: 30px auto; + } + .modal-content { + -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5); + box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5); + } + .modal-sm { + width: 300px; + } +} +@media (min-width: 992px) { + .modal-lg { + width: 900px; + } +} +.tooltip { + position: absolute; + z-index: 1070; + display: block; + font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; + font-style: normal; + font-weight: normal; + letter-spacing: normal; + line-break: auto; + line-height: 1.42857143; + text-align: left; + text-align: start; + text-decoration: none; + text-shadow: none; + text-transform: none; + white-space: normal; + word-break: normal; + word-spacing: normal; + word-wrap: normal; + font-size: 12px; + opacity: 0; + filter: alpha(opacity=0); +} +.tooltip.in { + opacity: 0.9; + filter: alpha(opacity=90); +} +.tooltip.top { + margin-top: -3px; + padding: 5px 0; +} +.tooltip.right { + margin-left: 3px; + padding: 0 5px; +} +.tooltip.bottom { + margin-top: 3px; + padding: 5px 0; +} +.tooltip.left { + margin-left: -3px; + padding: 0 5px; +} +.tooltip-inner { + max-width: 200px; + padding: 3px 8px; + color: #ffffff; + text-align: center; + background-color: #000000; + border-radius: 4px; +} +.tooltip-arrow { + position: absolute; + width: 0; + height: 0; + border-color: transparent; + border-style: solid; +} +.tooltip.top .tooltip-arrow { + bottom: 0; + left: 50%; + margin-left: -5px; + border-width: 5px 5px 0; + border-top-color: #000000; +} +.tooltip.top-left .tooltip-arrow { + bottom: 0; + right: 5px; + margin-bottom: -5px; + border-width: 5px 5px 0; + border-top-color: #000000; +} +.tooltip.top-right .tooltip-arrow { + bottom: 0; + left: 5px; + margin-bottom: -5px; + border-width: 5px 5px 0; + border-top-color: #000000; +} +.tooltip.right .tooltip-arrow { + top: 50%; + left: 0; + margin-top: -5px; + border-width: 5px 5px 5px 0; + border-right-color: #000000; +} +.tooltip.left .tooltip-arrow { + top: 50%; + right: 0; + margin-top: -5px; + border-width: 5px 0 5px 5px; + border-left-color: #000000; +} +.tooltip.bottom .tooltip-arrow { + top: 0; + left: 50%; + margin-left: -5px; + border-width: 0 5px 5px; + border-bottom-color: #000000; +} +.tooltip.bottom-left .tooltip-arrow { + top: 0; + right: 5px; + margin-top: -5px; + border-width: 0 5px 5px; + border-bottom-color: #000000; +} +.tooltip.bottom-right .tooltip-arrow { + top: 0; + left: 5px; + margin-top: -5px; + border-width: 0 5px 5px; + border-bottom-color: #000000; +} +.popover { + position: absolute; + top: 0; + left: 0; + z-index: 1060; + display: none; + max-width: 276px; + padding: 1px; + font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; + font-style: normal; + font-weight: normal; + letter-spacing: normal; + line-break: auto; + line-height: 1.42857143; + text-align: left; + text-align: start; + text-decoration: none; + text-shadow: none; + text-transform: none; + white-space: normal; + word-break: normal; + word-spacing: normal; + word-wrap: normal; + font-size: 14px; + background-color: #2e3338; + -webkit-background-clip: padding-box; + background-clip: padding-box; + border: 1px solid #999999; + border: 1px solid rgba(0, 0, 0, 0.2); + border-radius: 6px; + -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); + box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); +} +.popover.top { + margin-top: -10px; +} +.popover.right { + margin-left: 10px; +} +.popover.bottom { + margin-top: 10px; +} +.popover.left { + margin-left: -10px; +} +.popover-title { + margin: 0; + padding: 8px 14px; + font-size: 14px; + background-color: #2e3338; + border-bottom: 1px solid #22262a; + border-radius: 5px 5px 0 0; +} +.popover-content { + padding: 9px 14px; +} +.popover > .arrow, +.popover > .arrow:after { + position: absolute; + display: block; + width: 0; + height: 0; + border-color: transparent; + border-style: solid; +} +.popover > .arrow { + border-width: 11px; +} +.popover > .arrow:after { + border-width: 10px; + content: ""; +} +.popover.top > .arrow { + left: 50%; + margin-left: -11px; + border-bottom-width: 0; + border-top-color: #666666; + border-top-color: rgba(0, 0, 0, 0.25); + bottom: -11px; +} +.popover.top > .arrow:after { + content: " "; + bottom: 1px; + margin-left: -10px; + border-bottom-width: 0; + border-top-color: #2e3338; +} +.popover.right > .arrow { + top: 50%; + left: -11px; + margin-top: -11px; + border-left-width: 0; + border-right-color: #666666; + border-right-color: rgba(0, 0, 0, 0.25); +} +.popover.right > .arrow:after { + content: " "; + left: 1px; + bottom: -10px; + border-left-width: 0; + border-right-color: #2e3338; +} +.popover.bottom > .arrow { + left: 50%; + margin-left: -11px; + border-top-width: 0; + border-bottom-color: #666666; + border-bottom-color: rgba(0, 0, 0, 0.25); + top: -11px; +} +.popover.bottom > .arrow:after { + content: " "; + top: 1px; + margin-left: -10px; + border-top-width: 0; + border-bottom-color: #2e3338; +} +.popover.left > .arrow { + top: 50%; + right: -11px; + margin-top: -11px; + border-right-width: 0; + border-left-color: #666666; + border-left-color: rgba(0, 0, 0, 0.25); +} +.popover.left > .arrow:after { + content: " "; + right: 1px; + border-right-width: 0; + border-left-color: #2e3338; + bottom: -10px; +} +.carousel { + position: relative; +} +.carousel-inner { + position: relative; + overflow: hidden; + width: 100%; +} +.carousel-inner > .item { + display: none; + position: relative; + -webkit-transition: 0.6s ease-in-out left; + -o-transition: 0.6s ease-in-out left; + transition: 0.6s ease-in-out left; +} +.carousel-inner > .item > img, +.carousel-inner > .item > a > img { + line-height: 1; +} +@media all and (transform-3d), (-webkit-transform-3d) { + .carousel-inner > .item { + -webkit-transition: -webkit-transform 0.6s ease-in-out; + -o-transition: -o-transform 0.6s ease-in-out; + transition: transform 0.6s ease-in-out; + -webkit-backface-visibility: hidden; + backface-visibility: hidden; + -webkit-perspective: 1000px; + perspective: 1000px; + } + .carousel-inner > .item.next, + .carousel-inner > .item.active.right { + -webkit-transform: translate3d(100%, 0, 0); + transform: translate3d(100%, 0, 0); + left: 0; + } + .carousel-inner > .item.prev, + .carousel-inner > .item.active.left { + -webkit-transform: translate3d(-100%, 0, 0); + transform: translate3d(-100%, 0, 0); + left: 0; + } + .carousel-inner > .item.next.left, + .carousel-inner > .item.prev.right, + .carousel-inner > .item.active { + -webkit-transform: translate3d(0, 0, 0); + transform: translate3d(0, 0, 0); + left: 0; + } +} +.carousel-inner > .active, +.carousel-inner > .next, +.carousel-inner > .prev { + display: block; +} +.carousel-inner > .active { + left: 0; +} +.carousel-inner > .next, +.carousel-inner > .prev { + position: absolute; + top: 0; + width: 100%; +} +.carousel-inner > .next { + left: 100%; +} +.carousel-inner > .prev { + left: -100%; +} +.carousel-inner > .next.left, +.carousel-inner > .prev.right { + left: 0; +} +.carousel-inner > .active.left { + left: -100%; +} +.carousel-inner > .active.right { + left: 100%; +} +.carousel-control { + position: absolute; + top: 0; + left: 0; + bottom: 0; + width: 15%; + opacity: 0.5; + filter: alpha(opacity=50); + font-size: 20px; + color: #ffffff; + text-align: center; + text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6); + background-color: rgba(0, 0, 0, 0); +} +.carousel-control.left { + background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%); + background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%); + background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, 0.5)), to(rgba(0, 0, 0, 0.0001))); + background-image: linear-gradient(to right, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%); + background-repeat: repeat-x; + filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1); +} +.carousel-control.right { + left: auto; + right: 0; + background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%); + background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%); + background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, 0.0001)), to(rgba(0, 0, 0, 0.5))); + background-image: linear-gradient(to right, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%); + background-repeat: repeat-x; + filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1); +} +.carousel-control:hover, +.carousel-control:focus { + outline: 0; + color: #ffffff; + text-decoration: none; + opacity: 0.9; + filter: alpha(opacity=90); +} +.carousel-control .icon-prev, +.carousel-control .icon-next, +.carousel-control .glyphicon-chevron-left, +.carousel-control .glyphicon-chevron-right { + position: absolute; + top: 50%; + margin-top: -10px; + z-index: 5; + display: inline-block; +} +.carousel-control .icon-prev, +.carousel-control .glyphicon-chevron-left { + left: 50%; + margin-left: -10px; +} +.carousel-control .icon-next, +.carousel-control .glyphicon-chevron-right { + right: 50%; + margin-right: -10px; +} +.carousel-control .icon-prev, +.carousel-control .icon-next { + width: 20px; + height: 20px; + line-height: 1; + font-family: serif; +} +.carousel-control .icon-prev:before { + content: '\2039'; +} +.carousel-control .icon-next:before { + content: '\203a'; +} +.carousel-indicators { + position: absolute; + bottom: 10px; + left: 50%; + z-index: 15; + width: 60%; + margin-left: -30%; + padding-left: 0; + list-style: none; + text-align: center; +} +.carousel-indicators li { + display: inline-block; + width: 10px; + height: 10px; + margin: 1px; + text-indent: -999px; + border: 1px solid #ffffff; + border-radius: 10px; + cursor: pointer; + background-color: #000 \9; + background-color: rgba(0, 0, 0, 0); +} +.carousel-indicators .active { + margin: 0; + width: 12px; + height: 12px; + background-color: #ffffff; +} +.carousel-caption { + position: absolute; + left: 15%; + right: 15%; + bottom: 20px; + z-index: 10; + padding-top: 20px; + padding-bottom: 20px; + color: #ffffff; + text-align: center; + text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6); +} +.carousel-caption .btn { + text-shadow: none; +} +@media screen and (min-width: 768px) { + .carousel-control .glyphicon-chevron-left, + .carousel-control .glyphicon-chevron-right, + .carousel-control .icon-prev, + .carousel-control .icon-next { + width: 30px; + height: 30px; + margin-top: -10px; + font-size: 30px; + } + .carousel-control .glyphicon-chevron-left, + .carousel-control .icon-prev { + margin-left: -10px; + } + .carousel-control .glyphicon-chevron-right, + .carousel-control .icon-next { + margin-right: -10px; + } + .carousel-caption { + left: 20%; + right: 20%; + padding-bottom: 30px; + } + .carousel-indicators { + bottom: 20px; + } +} +.clearfix:before, +.clearfix:after, +.dl-horizontal dd:before, +.dl-horizontal dd:after, +.container:before, +.container:after, +.container-fluid:before, +.container-fluid:after, +.row:before, +.row:after, +.form-horizontal .form-group:before, +.form-horizontal .form-group:after, +.btn-toolbar:before, +.btn-toolbar:after, +.btn-group-vertical > .btn-group:before, +.btn-group-vertical > .btn-group:after, +.nav:before, +.nav:after, +.navbar:before, +.navbar:after, +.navbar-header:before, +.navbar-header:after, +.navbar-collapse:before, +.navbar-collapse:after, +.pager:before, +.pager:after, +.panel-body:before, +.panel-body:after, +.modal-header:before, +.modal-header:after, +.modal-footer:before, +.modal-footer:after { + content: " "; + display: table; +} +.clearfix:after, +.dl-horizontal dd:after, +.container:after, +.container-fluid:after, +.row:after, +.form-horizontal .form-group:after, +.btn-toolbar:after, +.btn-group-vertical > .btn-group:after, +.nav:after, +.navbar:after, +.navbar-header:after, +.navbar-collapse:after, +.pager:after, +.panel-body:after, +.modal-header:after, +.modal-footer:after { + clear: both; +} +.center-block { + display: block; + margin-left: auto; + margin-right: auto; +} +.pull-right { + float: right !important; +} +.pull-left { + float: left !important; +} +.hide { + display: none !important; +} +.show { + display: block !important; +} +.invisible { + visibility: hidden; +} +.text-hide { + font: 0/0 a; + color: transparent; + text-shadow: none; + background-color: transparent; + border: 0; +} +.hidden { + display: none !important; +} +.affix { + position: fixed; +} +@-ms-viewport { + width: device-width; +} +.visible-xs, +.visible-sm, +.visible-md, +.visible-lg { + display: none !important; +} +.visible-xs-block, +.visible-xs-inline, +.visible-xs-inline-block, +.visible-sm-block, +.visible-sm-inline, +.visible-sm-inline-block, +.visible-md-block, +.visible-md-inline, +.visible-md-inline-block, +.visible-lg-block, +.visible-lg-inline, +.visible-lg-inline-block { + display: none !important; +} +@media (max-width: 767px) { + .visible-xs { + display: block !important; + } + table.visible-xs { + display: table !important; + } + tr.visible-xs { + display: table-row !important; + } + th.visible-xs, + td.visible-xs { + display: table-cell !important; + } +} +@media (max-width: 767px) { + .visible-xs-block { + display: block !important; + } +} +@media (max-width: 767px) { + .visible-xs-inline { + display: inline !important; + } +} +@media (max-width: 767px) { + .visible-xs-inline-block { + display: inline-block !important; + } +} +@media (min-width: 768px) and (max-width: 991px) { + .visible-sm { + display: block !important; + } + table.visible-sm { + display: table !important; + } + tr.visible-sm { + display: table-row !important; + } + th.visible-sm, + td.visible-sm { + display: table-cell !important; + } +} +@media (min-width: 768px) and (max-width: 991px) { + .visible-sm-block { + display: block !important; + } +} +@media (min-width: 768px) and (max-width: 991px) { + .visible-sm-inline { + display: inline !important; + } +} +@media (min-width: 768px) and (max-width: 991px) { + .visible-sm-inline-block { + display: inline-block !important; + } +} +@media (min-width: 992px) and (max-width: 1199px) { + .visible-md { + display: block !important; + } + table.visible-md { + display: table !important; + } + tr.visible-md { + display: table-row !important; + } + th.visible-md, + td.visible-md { + display: table-cell !important; + } +} +@media (min-width: 992px) and (max-width: 1199px) { + .visible-md-block { + display: block !important; + } +} +@media (min-width: 992px) and (max-width: 1199px) { + .visible-md-inline { + display: inline !important; + } +} +@media (min-width: 992px) and (max-width: 1199px) { + .visible-md-inline-block { + display: inline-block !important; + } +} +@media (min-width: 1200px) { + .visible-lg { + display: block !important; + } + table.visible-lg { + display: table !important; + } + tr.visible-lg { + display: table-row !important; + } + th.visible-lg, + td.visible-lg { + display: table-cell !important; + } +} +@media (min-width: 1200px) { + .visible-lg-block { + display: block !important; + } +} +@media (min-width: 1200px) { + .visible-lg-inline { + display: inline !important; + } +} +@media (min-width: 1200px) { + .visible-lg-inline-block { + display: inline-block !important; + } +} +@media (max-width: 767px) { + .hidden-xs { + display: none !important; + } +} +@media (min-width: 768px) and (max-width: 991px) { + .hidden-sm { + display: none !important; + } +} +@media (min-width: 992px) and (max-width: 1199px) { + .hidden-md { + display: none !important; + } +} +@media (min-width: 1200px) { + .hidden-lg { + display: none !important; + } +} +.visible-print { + display: none !important; +} +@media print { + .visible-print { + display: block !important; + } + table.visible-print { + display: table !important; + } + tr.visible-print { + display: table-row !important; + } + th.visible-print, + td.visible-print { + display: table-cell !important; + } +} +.visible-print-block { + display: none !important; +} +@media print { + .visible-print-block { + display: block !important; + } +} +.visible-print-inline { + display: none !important; +} +@media print { + .visible-print-inline { + display: inline !important; + } +} +.visible-print-inline-block { + display: none !important; +} +@media print { + .visible-print-inline-block { + display: inline-block !important; + } +} +@media print { + .hidden-print { + display: none !important; + } +} +.navbar-default, +.navbar-inverse { + border: 1px solid rgba(0, 0, 0, 0.6); + text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.3); +} +@media (min-width: 768px) { + .navbar-default .navbar-nav > li > a, + .navbar-inverse .navbar-nav > li > a { + border-right: 1px solid rgba(0, 0, 0, 0.2); + border-left: 1px solid rgba(255, 255, 255, 0.1); + } + .navbar-default .navbar-nav > li > a:hover, + .navbar-inverse .navbar-nav > li > a:hover { + border-left-color: transparent; + } + .navbar-default .nav .open > a, + .navbar-inverse .nav .open > a { + border-color: transparent; + } + .navbar-default .navbar-nav > li.active > a, + .navbar-inverse .navbar-nav > li.active > a { + border-left-color: transparent; + } + .navbar-default .navbar-form, + .navbar-inverse .navbar-form { + margin-left: 5px; + margin-right: 5px; + } +} +.navbar-default { + -webkit-filter: none; + filter: none; +} +.navbar-default .navbar-nav > li > a:hover { + -webkit-filter: none; + filter: none; +} +.navbar-inverse { + -webkit-filter: none; + filter: none; +} +.navbar-inverse .badge { + background-color: #5d6368; +} +.navbar-inverse .navbar-nav > li > a:hover { + -webkit-filter: none; + filter: none; +} +.btn, +.btn:hover { + border-color: rgba(0, 0, 0, 0.6); + text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.3); +} +.btn-default { + -webkit-filter: none; + filter: none; +} +.btn-default:hover { + -webkit-filter: none; + filter: none; +} +.btn-primary { + -webkit-filter: none; + filter: none; +} +.btn-primary:hover { + -webkit-filter: none; + filter: none; +} +.btn-success { + -webkit-filter: none; + filter: none; +} +.btn-success:hover { + -webkit-filter: none; + filter: none; +} +.btn-info { + -webkit-filter: none; + filter: none; +} +.btn-info:hover { + -webkit-filter: none; + filter: none; +} +.btn-warning { + -webkit-filter: none; + filter: none; +} +.btn-warning:hover { + -webkit-filter: none; + filter: none; +} +.btn-danger { + -webkit-filter: none; + filter: none; +} +.btn-danger:hover { + -webkit-filter: none; + filter: none; +} +.btn-link, +.btn-link:hover { + border-color: transparent; +} +h1, +h2, +h3, +h4, +h5, +h6 { + text-shadow: -1px -1px 0 rgba(0, 0, 0, 0.3); +} +.text-primary, +.text-primary:hover { + color: #7a8288; +} +.text-success, +.text-success:hover { + color: #62c462; +} +.text-danger, +.text-danger:hover { + color: #ee5f5b; +} +.text-warning, +.text-warning:hover { + color: #f89406; +} +.text-info, +.text-info:hover { + color: #5bc0de; +} +.table .success, +.table .warning, +.table .danger, +.table .info { + color: #fff; +} +.table-bordered tbody tr.success td, +.table-bordered tbody tr.warning td, +.table-bordered tbody tr.danger td, +.table-bordered tbody tr.success:hover td, +.table-bordered tbody tr.warning:hover td, +.table-bordered tbody tr.danger:hover td { + border-color: #1c1e22; +} +.table-responsive > .table { + background-color: #2e3338; +} +input, +textarea { + color: #272b30; +} +.has-warning .help-block, +.has-warning .control-label, +.has-warning .radio, +.has-warning .checkbox, +.has-warning .radio-inline, +.has-warning .checkbox-inline, +.has-warning.radio label, +.has-warning.checkbox label, +.has-warning.radio-inline label, +.has-warning.checkbox-inline label, +.has-warning .form-control-feedback { + color: #f89406; +} +.has-warning .form-control, +.has-warning .form-control:focus { + border-color: #f89406; +} +.has-warning .input-group-addon { + border-color: rgba(0, 0, 0, 0.6); +} +.has-error .help-block, +.has-error .control-label, +.has-error .radio, +.has-error .checkbox, +.has-error .radio-inline, +.has-error .checkbox-inline, +.has-error.radio label, +.has-error.checkbox label, +.has-error.radio-inline label, +.has-error.checkbox-inline label, +.has-error .form-control-feedback { + color: #ee5f5b; +} +.has-error .form-control, +.has-error .form-control:focus { + border-color: #ee5f5b; +} +.has-error .input-group-addon { + border-color: rgba(0, 0, 0, 0.6); +} +.has-success .help-block, +.has-success .control-label, +.has-success .radio, +.has-success .checkbox, +.has-success .radio-inline, +.has-success .checkbox-inline, +.has-success.radio label, +.has-success.checkbox label, +.has-success.radio-inline label, +.has-success.checkbox-inline label, +.has-success .form-control-feedback { + color: #62c462; +} +.has-success .form-control, +.has-success .form-control:focus { + border-color: #62c462; +} +.has-success .input-group-addon { + border-color: rgba(0, 0, 0, 0.6); +} +legend { + color: #fff; +} +.input-group-addon { + -webkit-filter: none; + filter: none; + text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.3); + color: #ffffff; +} +.nav .open > a, +.nav .open > a:hover, +.nav .open > a:focus { + border-color: rgba(0, 0, 0, 0.6); +} +.nav-pills > li > a { + -webkit-filter: none; + filter: none; + border: 1px solid rgba(0, 0, 0, 0.6); + text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.3); +} +.nav-pills > li > a:hover { + -webkit-filter: none; + filter: none; + border: 1px solid rgba(0, 0, 0, 0.6); +} +.nav-pills > li.active > a, +.nav-pills > li.active > a:hover { + background-color: none; + -webkit-filter: none; + filter: none; + border: 1px solid rgba(0, 0, 0, 0.6); +} +.nav-pills > li.disabled > a, +.nav-pills > li.disabled > a:hover { + -webkit-filter: none; + filter: none; +} +.pagination > li > a, +.pagination > li > span { + text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.3); + -webkit-filter: none; + filter: none; +} +.pagination > li > a:hover, +.pagination > li > span:hover { + -webkit-filter: none; + filter: none; +} +.pagination > li.active > a, +.pagination > li.active > span { + -webkit-filter: none; + filter: none; +} +.pagination > li.disabled > a, +.pagination > li.disabled > a:hover, +.pagination > li.disabled > span, +.pagination > li.disabled > span:hover { + -webkit-filter: none; + filter: none; +} +.pager > li > a { + -webkit-filter: none; + filter: none; + text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.3); +} +.pager > li > a:hover { + -webkit-filter: none; + filter: none; +} +.pager > li.disabled > a, +.pager > li.disabled > a:hover { + -webkit-filter: none; + filter: none; +} +.breadcrumb { + border: 1px solid rgba(0, 0, 0, 0.6); + text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.3); + -webkit-filter: none; + filter: none; +} +.alert .alert-link, +.alert a { + color: #fff; + text-decoration: underline; +} +.alert .close { + color: #000000; + text-decoration: none; +} +a.thumbnail:hover, +a.thumbnail:focus, +a.thumbnail.active { + border-color: #0c0d0e; +} +a.list-group-item.active, +a.list-group-item.active:hover, +a.list-group-item.active:focus { + border-color: rgba(0, 0, 0, 0.6); +} +a.list-group-item-success.active { + background-color: #62c462; +} +a.list-group-item-success.active:hover, +a.list-group-item-success.active:focus { + background-color: #4fbd4f; +} +a.list-group-item-warning.active { + background-color: #f89406; +} +a.list-group-item-warning.active:hover, +a.list-group-item-warning.active:focus { + background-color: #df8505; +} +a.list-group-item-danger.active { + background-color: #ee5f5b; +} +a.list-group-item-danger.active:hover, +a.list-group-item-danger.active:focus { + background-color: #ec4844; +} +.jumbotron { + border: 1px solid rgba(0, 0, 0, 0.6); +} +.panel-primary .panel-heading, +.panel-success .panel-heading, +.panel-danger .panel-heading, +.panel-warning .panel-heading, +.panel-info .panel-heading { + border-color: #000; +} diff --git a/web/gui/css/bootstrap-slider-10.0.0.min.css b/web/gui/css/bootstrap-slider-10.0.0.min.css new file mode 100644 index 000000000..095be9514 --- /dev/null +++ b/web/gui/css/bootstrap-slider-10.0.0.min.css @@ -0,0 +1,22 @@ +/*! ======================================================= + VERSION 10.0.0 +========================================================= */ +/*! ========================================================= + * bootstrap-slider.js + * + * Maintainers: + * Kyle Kemp + * - Twitter: @seiyria + * - Github: seiyria + * Rohit Kalkur + * - Twitter: @Rovolutionary + * - Github: rovolution + * + * ========================================================= + * + * bootstrap-slider is released under the MIT License + * Copyright (c) 2017 Kyle Kemp, Rohit Kalkur, and contributors + * + * SPDX-License-Identifier: MIT + * + * ========================================================= */.slider{display:inline-block;vertical-align:middle;position:relative}.slider.slider-horizontal{width:210px;height:20px}.slider.slider-horizontal .slider-track{height:10px;width:100%;margin-top:-5px;top:50%;left:0}.slider.slider-horizontal .slider-selection,.slider.slider-horizontal .slider-track-low,.slider.slider-horizontal .slider-track-high{height:100%;top:0;bottom:0}.slider.slider-horizontal .slider-tick,.slider.slider-horizontal .slider-handle{margin-left:-10px}.slider.slider-horizontal .slider-tick.triangle,.slider.slider-horizontal .slider-handle.triangle{position:relative;top:50%;-ms-transform:translateY(-50%);transform:translateY(-50%);border-width:0 10px 10px 10px;width:0;height:0;border-bottom-color:#2e6da4;margin-top:0}.slider.slider-horizontal .slider-tick-container{white-space:nowrap;position:absolute;top:0;left:0;width:100%}.slider.slider-horizontal .slider-tick-label-container{white-space:nowrap;margin-top:20px}.slider.slider-horizontal .slider-tick-label-container .slider-tick-label{padding-top:4px;display:inline-block;text-align:center}.slider.slider-horizontal .tooltip{-ms-transform:translateX(-50%);transform:translateX(-50%)}.slider.slider-horizontal.slider-rtl .slider-track{left:initial;right:0}.slider.slider-horizontal.slider-rtl .slider-tick,.slider.slider-horizontal.slider-rtl .slider-handle{margin-left:initial;margin-right:-10px}.slider.slider-horizontal.slider-rtl .slider-tick-container{left:initial;right:0}.slider.slider-horizontal.slider-rtl .tooltip{-ms-transform:translateX(50%);transform:translateX(50%)}.slider.slider-vertical{height:210px;width:20px}.slider.slider-vertical .slider-track{width:10px;height:100%;left:25%;top:0}.slider.slider-vertical .slider-selection{width:100%;left:0;top:0;bottom:0}.slider.slider-vertical .slider-track-low,.slider.slider-vertical .slider-track-high{width:100%;left:0;right:0}.slider.slider-vertical .slider-tick,.slider.slider-vertical .slider-handle{margin-top:-10px}.slider.slider-vertical .slider-tick.triangle,.slider.slider-vertical .slider-handle.triangle{border-width:10px 0 10px 10px;width:1px;height:1px;border-left-color:#2e6da4;border-right-color:#2e6da4;margin-left:0;margin-right:0}.slider.slider-vertical .slider-tick-label-container{white-space:nowrap}.slider.slider-vertical .slider-tick-label-container .slider-tick-label{padding-left:4px}.slider.slider-vertical .tooltip{-ms-transform:translateY(-50%);transform:translateY(-50%)}.slider.slider-vertical.slider-rtl .slider-track{left:initial;right:25%}.slider.slider-vertical.slider-rtl .slider-selection{left:initial;right:0}.slider.slider-vertical.slider-rtl .slider-tick.triangle,.slider.slider-vertical.slider-rtl .slider-handle.triangle{border-width:10px 10px 10px 0}.slider.slider-vertical.slider-rtl .slider-tick-label-container .slider-tick-label{padding-left:initial;padding-right:4px}.slider.slider-disabled .slider-handle{background-image:-webkit-linear-gradient(top,#dfdfdf 0,#bebebe 100%);background-image:-o-linear-gradient(top,#dfdfdf 0,#bebebe 100%);background-image:linear-gradient(to bottom,#dfdfdf 0,#bebebe 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdfdfdf',endColorstr='#ffbebebe',GradientType=0)}.slider.slider-disabled .slider-track{background-image:-webkit-linear-gradient(top,#e5e5e5 0,#e9e9e9 100%);background-image:-o-linear-gradient(top,#e5e5e5 0,#e9e9e9 100%);background-image:linear-gradient(to bottom,#e5e5e5 0,#e9e9e9 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe5e5e5',endColorstr='#ffe9e9e9',GradientType=0);cursor:not-allowed}.slider input{display:none}.slider .tooltip.top{margin-top:-36px}.slider .tooltip-inner{white-space:nowrap;max-width:none}.slider .hide{display:none}.slider-track{position:absolute;cursor:pointer;background-image:-webkit-linear-gradient(top,#f5f5f5 0,#f9f9f9 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#f9f9f9 100%);background-image:linear-gradient(to bottom,#f5f5f5 0,#f9f9f9 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5',endColorstr='#fff9f9f9',GradientType=0);-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);border-radius:4px}.slider-selection{position:absolute;background-image:-webkit-linear-gradient(top,#f9f9f9 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#f9f9f9 0,#f5f5f5 100%);background-image:linear-gradient(to bottom,#f9f9f9 0,#f5f5f5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff9f9f9',endColorstr='#fff5f5f5',GradientType=0);-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;border-radius:4px}.slider-selection.tick-slider-selection{background-image:-webkit-linear-gradient(top,#8ac1ef 0,#82b3de 100%);background-image:-o-linear-gradient(top,#8ac1ef 0,#82b3de 100%);background-image:linear-gradient(to bottom,#8ac1ef 0,#82b3de 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff8ac1ef',endColorstr='#ff82b3de',GradientType=0)}.slider-track-low,.slider-track-high{position:absolute;background:transparent;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;border-radius:4px}.slider-handle{position:absolute;top:0;width:20px;height:20px;background-color:#337ab7;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7',endColorstr='#ff2e6da4',GradientType=0);filter:none;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.2),0 1px 2px rgba(0,0,0,.05);box-shadow:inset 0 1px 0 rgba(255,255,255,.2),0 1px 2px rgba(0,0,0,.05);border:0 solid transparent}.slider-handle.round{border-radius:50%}.slider-handle.triangle{background:transparent none}.slider-handle.custom{background:transparent none}.slider-handle.custom::before{line-height:20px;font-size:20px;content:'\2605';color:#726204}.slider-tick{position:absolute;width:20px;height:20px;background-image:-webkit-linear-gradient(top,#f9f9f9 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#f9f9f9 0,#f5f5f5 100%);background-image:linear-gradient(to bottom,#f9f9f9 0,#f5f5f5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff9f9f9',endColorstr='#fff5f5f5',GradientType=0);-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;filter:none;opacity:.8;border:0 solid transparent}.slider-tick.round{border-radius:50%}.slider-tick.triangle{background:transparent none}.slider-tick.custom{background:transparent none}.slider-tick.custom::before{line-height:20px;font-size:20px;content:'\2605';color:#726204}.slider-tick.in-selection{background-image:-webkit-linear-gradient(top,#8ac1ef 0,#82b3de 100%);background-image:-o-linear-gradient(top,#8ac1ef 0,#82b3de 100%);background-image:linear-gradient(to bottom,#8ac1ef 0,#82b3de 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff8ac1ef',endColorstr='#ff82b3de',GradientType=0);opacity:1} diff --git a/web/gui/css/bootstrap-theme-3.3.7.min.css b/web/gui/css/bootstrap-theme-3.3.7.min.css new file mode 100644 index 000000000..ba77cff5d --- /dev/null +++ b/web/gui/css/bootstrap-theme-3.3.7.min.css @@ -0,0 +1,7 @@ +/*! + * Bootstrap v3.3.7 (http://getbootstrap.com) + * Copyright 2011-2016 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + * SPDX-License-Identifier: MIT + */.btn-danger,.btn-default,.btn-info,.btn-primary,.btn-success,.btn-warning{text-shadow:0 -1px 0 rgba(0,0,0,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075)}.btn-danger.active,.btn-danger:active,.btn-default.active,.btn-default:active,.btn-info.active,.btn-info:active,.btn-primary.active,.btn-primary:active,.btn-success.active,.btn-success:active,.btn-warning.active,.btn-warning:active{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-danger.disabled,.btn-danger[disabled],.btn-default.disabled,.btn-default[disabled],.btn-info.disabled,.btn-info[disabled],.btn-primary.disabled,.btn-primary[disabled],.btn-success.disabled,.btn-success[disabled],.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-danger,fieldset[disabled] .btn-default,fieldset[disabled] .btn-info,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-success,fieldset[disabled] .btn-warning{-webkit-box-shadow:none;box-shadow:none}.btn-danger .badge,.btn-default .badge,.btn-info .badge,.btn-primary .badge,.btn-success .badge,.btn-warning .badge{text-shadow:none}.btn.active,.btn:active{background-image:none}.btn-default{text-shadow:0 1px 0 #fff;background-image:-webkit-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-o-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#e0e0e0));background-image:linear-gradient(to bottom,#fff 0,#e0e0e0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#dbdbdb;border-color:#ccc}.btn-default:focus,.btn-default:hover{background-color:#e0e0e0;background-position:0 -15px}.btn-default.active,.btn-default:active{background-color:#e0e0e0;border-color:#dbdbdb}.btn-default.disabled,.btn-default.disabled.active,.btn-default.disabled.focus,.btn-default.disabled:active,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled],.btn-default[disabled].active,.btn-default[disabled].focus,.btn-default[disabled]:active,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default,fieldset[disabled] .btn-default.active,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:active,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#e0e0e0;background-image:none}.btn-primary{background-image:-webkit-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-o-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#265a88));background-image:linear-gradient(to bottom,#337ab7 0,#265a88 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#245580}.btn-primary:focus,.btn-primary:hover{background-color:#265a88;background-position:0 -15px}.btn-primary.active,.btn-primary:active{background-color:#265a88;border-color:#245580}.btn-primary.disabled,.btn-primary.disabled.active,.btn-primary.disabled.focus,.btn-primary.disabled:active,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled],.btn-primary[disabled].active,.btn-primary[disabled].focus,.btn-primary[disabled]:active,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-primary.active,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:active,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#265a88;background-image:none}.btn-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#419641));background-image:linear-gradient(to bottom,#5cb85c 0,#419641 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#3e8f3e}.btn-success:focus,.btn-success:hover{background-color:#419641;background-position:0 -15px}.btn-success.active,.btn-success:active{background-color:#419641;border-color:#3e8f3e}.btn-success.disabled,.btn-success.disabled.active,.btn-success.disabled.focus,.btn-success.disabled:active,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled],.btn-success[disabled].active,.btn-success[disabled].focus,.btn-success[disabled]:active,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success,fieldset[disabled] .btn-success.active,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:active,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#419641;background-image:none}.btn-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#2aabd2));background-image:linear-gradient(to bottom,#5bc0de 0,#2aabd2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#28a4c9}.btn-info:focus,.btn-info:hover{background-color:#2aabd2;background-position:0 -15px}.btn-info.active,.btn-info:active{background-color:#2aabd2;border-color:#28a4c9}.btn-info.disabled,.btn-info.disabled.active,.btn-info.disabled.focus,.btn-info.disabled:active,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled],.btn-info[disabled].active,.btn-info[disabled].focus,.btn-info[disabled]:active,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info,fieldset[disabled] .btn-info.active,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:active,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#2aabd2;background-image:none}.btn-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#eb9316));background-image:linear-gradient(to bottom,#f0ad4e 0,#eb9316 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#e38d13}.btn-warning:focus,.btn-warning:hover{background-color:#eb9316;background-position:0 -15px}.btn-warning.active,.btn-warning:active{background-color:#eb9316;border-color:#e38d13}.btn-warning.disabled,.btn-warning.disabled.active,.btn-warning.disabled.focus,.btn-warning.disabled:active,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled],.btn-warning[disabled].active,.btn-warning[disabled].focus,.btn-warning[disabled]:active,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning,fieldset[disabled] .btn-warning.active,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:active,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#eb9316;background-image:none}.btn-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c12e2a));background-image:linear-gradient(to bottom,#d9534f 0,#c12e2a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#b92c28}.btn-danger:focus,.btn-danger:hover{background-color:#c12e2a;background-position:0 -15px}.btn-danger.active,.btn-danger:active{background-color:#c12e2a;border-color:#b92c28}.btn-danger.disabled,.btn-danger.disabled.active,.btn-danger.disabled.focus,.btn-danger.disabled:active,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled],.btn-danger[disabled].active,.btn-danger[disabled].focus,.btn-danger[disabled]:active,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger,fieldset[disabled] .btn-danger.active,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:active,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#c12e2a;background-image:none}.img-thumbnail,.thumbnail{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{background-color:#e8e8e8;background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{background-color:#2e6da4;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.navbar-default{background-image:-webkit-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-o-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#f8f8f8));background-image:linear-gradient(to bottom,#fff 0,#f8f8f8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-radius:4px;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075)}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-o-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dbdbdb),to(#e2e2e2));background-image:linear-gradient(to bottom,#dbdbdb 0,#e2e2e2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.075);box-shadow:inset 0 3px 9px rgba(0,0,0,.075)}.navbar-brand,.navbar-nav>li>a{text-shadow:0 1px 0 rgba(255,255,255,.25)}.navbar-inverse{background-image:-webkit-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-o-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#3c3c3c),to(#222));background-image:linear-gradient(to bottom,#3c3c3c 0,#222 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-radius:4px}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-o-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#080808),to(#0f0f0f));background-image:linear-gradient(to bottom,#080808 0,#0f0f0f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.25);box-shadow:inset 0 3px 9px rgba(0,0,0,.25)}.navbar-inverse .navbar-brand,.navbar-inverse .navbar-nav>li>a{text-shadow:0 -1px 0 rgba(0,0,0,.25)}.navbar-fixed-bottom,.navbar-fixed-top,.navbar-static-top{border-radius:0}@media (max-width:767px){.navbar .navbar-nav .open .dropdown-menu>.active>a,.navbar .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}}.alert{text-shadow:0 1px 0 rgba(255,255,255,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05);box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05)}.alert-success{background-image:-webkit-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#c8e5bc));background-image:linear-gradient(to bottom,#dff0d8 0,#c8e5bc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);background-repeat:repeat-x;border-color:#b2dba1}.alert-info{background-image:-webkit-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#b9def0));background-image:linear-gradient(to bottom,#d9edf7 0,#b9def0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);background-repeat:repeat-x;border-color:#9acfea}.alert-warning{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#f8efc0));background-image:linear-gradient(to bottom,#fcf8e3 0,#f8efc0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);background-repeat:repeat-x;border-color:#f5e79e}.alert-danger{background-image:-webkit-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-o-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#e7c3c3));background-image:linear-gradient(to bottom,#f2dede 0,#e7c3c3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);background-repeat:repeat-x;border-color:#dca7a7}.progress{background-image:-webkit-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#ebebeb),to(#f5f5f5));background-image:linear-gradient(to bottom,#ebebeb 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x}.progress-bar{background-image:-webkit-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-o-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#286090));background-image:linear-gradient(to bottom,#337ab7 0,#286090 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);background-repeat:repeat-x}.progress-bar-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#449d44));background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);background-repeat:repeat-x}.progress-bar-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#31b0d5));background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);background-repeat:repeat-x}.progress-bar-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#ec971f));background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);background-repeat:repeat-x}.progress-bar-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c9302c));background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);background-repeat:repeat-x}.progress-bar-striped{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.list-group{border-radius:4px;-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{text-shadow:0 -1px 0 #286090;background-image:-webkit-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2b669a));background-image:linear-gradient(to bottom,#337ab7 0,#2b669a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);background-repeat:repeat-x;border-color:#2b669a}.list-group-item.active .badge,.list-group-item.active:focus .badge,.list-group-item.active:hover .badge{text-shadow:none}.panel{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.05);box-shadow:0 1px 2px rgba(0,0,0,.05)}.panel-default>.panel-heading{background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.panel-primary>.panel-heading{background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.panel-success>.panel-heading{background-image:-webkit-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#d0e9c6));background-image:linear-gradient(to bottom,#dff0d8 0,#d0e9c6 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);background-repeat:repeat-x}.panel-info>.panel-heading{background-image:-webkit-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#c4e3f3));background-image:linear-gradient(to bottom,#d9edf7 0,#c4e3f3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);background-repeat:repeat-x}.panel-warning>.panel-heading{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#faf2cc));background-image:linear-gradient(to bottom,#fcf8e3 0,#faf2cc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);background-repeat:repeat-x}.panel-danger>.panel-heading{background-image:-webkit-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-o-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#ebcccc));background-image:linear-gradient(to bottom,#f2dede 0,#ebcccc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);background-repeat:repeat-x}.well{background-image:-webkit-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#e8e8e8),to(#f5f5f5));background-image:linear-gradient(to bottom,#e8e8e8 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x;border-color:#dcdcdc;-webkit-box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1)} +/*# sourceMappingURL=bootstrap-theme.min.css.map */ diff --git a/web/gui/css/bootstrap-toggle-2.2.2.min.css b/web/gui/css/bootstrap-toggle-2.2.2.min.css new file mode 100644 index 000000000..a3daa3721 --- /dev/null +++ b/web/gui/css/bootstrap-toggle-2.2.2.min.css @@ -0,0 +1,29 @@ +/*! ======================================================================== + * Bootstrap Toggle: bootstrap-toggle.css v2.2.0 + * http://www.bootstraptoggle.com + * ======================================================================== + * Copyright 2014 Min Hur, The New York Times Company + * Licensed under MIT + * SPDX-License-Identifier: MIT + * ======================================================================== */ +.checkbox label .toggle,.checkbox-inline .toggle{margin-left:-20px;margin-right:5px} +.toggle{position:relative;overflow:hidden} +.toggle input[type=checkbox]{display:none} +.toggle-group{position:absolute;width:200%;top:0;bottom:0;left:0;transition:left .35s;-webkit-transition:left .35s;-moz-user-select:none;-webkit-user-select:none} +.toggle.off .toggle-group{left:-100%} +.toggle-on{position:absolute;top:0;bottom:0;left:0;right:50%;margin:0;border:0;border-radius:0} +.toggle-off{position:absolute;top:0;bottom:0;left:50%;right:0;margin:0;border:0;border-radius:0} +.toggle-handle{position:relative;margin:0 auto;padding-top:0;padding-bottom:0;height:100%;width:0;border-width:0 1px} +.toggle.btn{min-width:59px;min-height:34px} +.toggle-on.btn{padding-right:24px} +.toggle-off.btn{padding-left:24px} +.toggle.btn-lg{min-width:79px;min-height:45px} +.toggle-on.btn-lg{padding-right:31px} +.toggle-off.btn-lg{padding-left:31px} +.toggle-handle.btn-lg{width:40px} +.toggle.btn-sm{min-width:50px;min-height:30px} +.toggle-on.btn-sm{padding-right:20px} +.toggle-off.btn-sm{padding-left:20px} +.toggle.btn-xs{min-width:35px;min-height:22px} +.toggle-on.btn-xs{padding-right:12px} +.toggle-off.btn-xs{padding-left:12px} diff --git a/web/gui/css/c3-0.4.18.min.css b/web/gui/css/c3-0.4.18.min.css new file mode 100644 index 000000000..a033d7203 --- /dev/null +++ b/web/gui/css/c3-0.4.18.min.css @@ -0,0 +1,2 @@ +/* SPDX-License-Identifier: MIT */ +.c3 svg{font:10px sans-serif;-webkit-tap-highlight-color:transparent}.c3 line,.c3 path{fill:none;stroke:#000}.c3 text{-webkit-user-select:none;-moz-user-select:none;user-select:none}.c3-bars path,.c3-event-rect,.c3-legend-item-tile,.c3-xgrid-focus,.c3-ygrid{shape-rendering:crispEdges}.c3-chart-arc path{stroke:#fff}.c3-chart-arc text{fill:#fff;font-size:13px}.c3-grid line{stroke:#aaa}.c3-grid text{fill:#aaa}.c3-xgrid,.c3-ygrid{stroke-dasharray:3 3}.c3-text.c3-empty{fill:grey;font-size:2em}.c3-line{stroke-width:1px}.c3-circle._expanded_{stroke-width:1px;stroke:#fff}.c3-selected-circle{fill:#fff;stroke-width:2px}.c3-bar{stroke-width:0}.c3-bar._expanded_{fill-opacity:1;fill-opacity:.75}.c3-target.c3-focused{opacity:1}.c3-target.c3-focused path.c3-line,.c3-target.c3-focused path.c3-step{stroke-width:2px}.c3-target.c3-defocused{opacity:.3!important}.c3-region{fill:#4682b4;fill-opacity:.1}.c3-brush .extent{fill-opacity:.1}.c3-legend-item{font-size:12px}.c3-legend-item-hidden{opacity:.15}.c3-legend-background{opacity:.75;fill:#fff;stroke:#d3d3d3;stroke-width:1}.c3-title{font:14px sans-serif}.c3-tooltip-container{z-index:10}.c3-tooltip{border-collapse:collapse;border-spacing:0;background-color:#fff;empty-cells:show;-webkit-box-shadow:7px 7px 12px -9px #777;-moz-box-shadow:7px 7px 12px -9px #777;box-shadow:7px 7px 12px -9px #777;opacity:.9}.c3-tooltip tr{border:1px solid #ccc}.c3-tooltip th{background-color:#aaa;font-size:14px;padding:2px 5px;text-align:left;color:#fff}.c3-tooltip td{font-size:13px;padding:3px 6px;background-color:#fff;border-left:1px dotted #999}.c3-tooltip td>span{display:inline-block;width:10px;height:10px;margin-right:6px}.c3-tooltip td.value{text-align:right}.c3-area{stroke-width:0;opacity:.2}.c3-chart-arcs-title{dominant-baseline:middle;font-size:1.3em}.c3-chart-arcs .c3-chart-arcs-background{fill:#e0e0e0;stroke:none}.c3-chart-arcs .c3-chart-arcs-gauge-unit{fill:#000;font-size:16px}.c3-chart-arcs .c3-chart-arcs-gauge-max{fill:#777}.c3-chart-arcs .c3-chart-arcs-gauge-min{fill:#777}.c3-chart-arc .c3-gauge-value{fill:#000}.c3-chart-arc.c3-target g path{opacity:1}.c3-chart-arc.c3-target.c3-focused g path{opacity:1} diff --git a/web/gui/css/morris-0.5.1.css b/web/gui/css/morris-0.5.1.css new file mode 100644 index 000000000..39203d314 --- /dev/null +++ b/web/gui/css/morris-0.5.1.css @@ -0,0 +1,3 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +.morris-hover{position:absolute;z-index:1000}.morris-hover.morris-default-style{border-radius:10px;padding:6px;color:#666;background:rgba(255,255,255,0.8);border:solid 2px rgba(230,230,230,0.8);font-family:sans-serif;font-size:12px;text-align:center}.morris-hover.morris-default-style .morris-hover-row-label{font-weight:bold;margin:0.25em 0} +.morris-hover.morris-default-style .morris-hover-point{white-space:nowrap;margin:0.1em 0} diff --git a/web/gui/dashboard.css b/web/gui/dashboard.css new file mode 100644 index 000000000..8062497d0 --- /dev/null +++ b/web/gui/dashboard.css @@ -0,0 +1,739 @@ +/* SPDX-License-Identfier: GPL-3.0-or-later */ +html, +body { + /*font-family: Calibri,"Segoe UI","Helvetica Neue",Helvetica,Arial,sans-serif;*/ + font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; + font-style: normal; + font-variant: normal; +} + +.morelink { + color: #765d9c; + text-decoration: none; +} + +.morelink:hover { + color: #563d7c; + text-decoration: none; +} + +.morelink:focus { + color: #765d9c; + text-decoration: none; +} + +.netdata-chart-alignment { + margin-left: 55px; +} + +.netdata-chart-row { + width: 100%; + text-align: center; + display: flex; + display: -webkit-flex; + display: -moz-flex; + align-items: baseline; + -moz-align-items: baseline; + -webkit-align-items: baseline; + justify-content: center; + -webkit-justify-content: center; + -moz-justify-content: center; + padding-top: 10px; +} + +.netdata-container { + display: inline-block; + overflow: hidden; + + transform: translate3d(0,0,0); + + /* required for child elements to have absolute position */ + position: relative; + + /* width and height is given per chart with data-width and data-height */ +} + +.netdata-container-gauge { + display: inline-block; + overflow: hidden; + + transform: translate3d(0,0,0); + + /* required for child elements to have absolute position */ + position: relative; + + /* width and height is given per chart with data-width and data-height */ +} + +.netdata-container-gauge:after { + padding-top: 60%; + display: block; + content: ''; +} + +.netdata-container-easypiechart { + display: inline-block; + overflow: hidden; + + transform: translate3d(0,0,0); + + /* required for child elements to have absolute position */ + position: relative; + + /* width and height is given per chart with data-width and data-height */ +} + +.netdata-container-easypiechart:after { + padding-top: 100%; + display: block; + content: ''; +} + +.netdata-aspect { + position: relative; + width: 100%; + padding: 0px; + margin: 0px; +} + +.netdata-container-with-legend { + display: inline-block; + overflow: hidden; + + transform: translate3d(0,0,0); + + /* fix minimum scrollbar issue in firefox */ + min-height: 99px; + + /* required for child elements to have absolute position */ + position: relative; + + /* width and height is given per chart with data-width and data-height */ +} + +.netdata-legend-resize-handler { + display: block; + position: absolute; + bottom: 0px; + right: 0px; + height: 15px; + width: 20px; + background-color: White; + font-size: 15px; + vertical-align: middle; + line-height: 15px; + cursor: ns-resize; + color: #DDDDDD; + text-align: center; + overflow: hidden; + z-index: 20; + padding: 0px; + margin: 0px; +} + +.netdata-legend-toolbox { + display: block; + position: absolute; + bottom: 0px; + right: 30px; + height: 15px; + width: 110px; + background-color: White; + font-size: 12px; + vertical-align: middle; + line-height: 15px; + color: #DDDDDD; + text-align: center; + overflow: hidden; + z-index: 20; + padding: 0px; + margin: 0px; + + /* prevent text selection after double click */ + -webkit-user-select: none; /* webkit (safari, chrome) browsers */ + -moz-user-select: none; /* mozilla browsers */ + -khtml-user-select: none; /* webkit (konqueror) browsers */ + -ms-user-select: none; /* IE10+ */ +} + +.netdata-legend-toolbox-button { + display: inline-block; + position: relative; + height: 15px; + width: 18px; + background-color: White; + font-size: 12px; + vertical-align: middle; + line-height: 15px; + color: #CDCDCD; + text-align: center; + overflow: hidden; + z-index: 21; + padding: 0px; + margin: 0px; + cursor: pointer; + + /* prevent text selection after double click */ + -webkit-user-select: none; /* webkit (safari, chrome) browsers */ + -moz-user-select: none; /* mozilla browsers */ + -khtml-user-select: none; /* webkit (konqueror) browsers */ + -ms-user-select: none; /* IE10+ */ +} + +.netdata-message { + display: inline-block; + position: absolute; + top: 0; + left: 0; + right: 0; + bottom: 0; + text-align: left; + vertical-align: top; + font-weight: bold; + font-size: x-small; + overflow: hidden; + background: inherit; + z-index: 0; +} + +.netdata-message.hidden { + display: none; +} + +.netdata-message.icon { + color: #F8F8F8; + text-align: center; + vertical-align: middle; +} + +.netdata-chart-legend { + position: absolute; /* within .netdata-container */ + top: 0; + right: 0; + overflow: hidden; + text-overflow: ellipsis; + line-height: 14px; + display: block; + width: 140px; /* --legend-width */ + height: calc(100% - 15px); /* 10px for the resize handler and 5px for the top margin */ + font-size: 10px; + margin-top: 5px; + text-align: left; + /* width and height is calculated (depends on the appearance of the legend) */ +} + +.netdata-legend-title-date { + font-size: 10px; + font-weight: normal; + margin-top: 0px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.netdata-legend-title-time { + font-size: 11px; + font-weight: bold; + margin-top: 0px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.netdata-legend-title-units { + position: absolute; + right: 10px; + float: right; + font-size: 11px; + vertical-align: top; + font-weight: normal; + margin-top: 0px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.netdata-legend-series { + position: absolute; + width: 140px; /* legend-width */ + height: calc(100% - 50px); + overflow: hidden; + text-overflow: ellipsis; + line-height: 14.5px; /* line spacing at the legend */ + display: block; + font-size: 10px; + margin-top: 0px; +} + +.netdata-legend-name-table-line { + display: inline-block; + width: 13px; + height: 4px; + border-width: 0px; + border-bottom-width: 2px; + border-bottom-style: solid; + border-bottom-color: white; +} + +.netdata-legend-name-table-area { + display: inline-block; + width: 13px; + height: 5px; + border-width: 1px; + border-top-width: 1px; + border-top-style: solid; + border-top-color: inherit; +} + +.netdata-legend-name-table-stacked { + display: inline-block; + width: 13px; + height: 5px; + border-width: 1px; + border-top-width: 1px; + border-top-style: solid; + border-top-color: inherit; +} + +.netdata-legend-name-tr { +} + +.netdata-legend-name-td { +} + +.netdata-legend-name { + text-align: left; + font-size: 11px; /* legend: dimension name size */ + font-weight: bold; + vertical-align: bottom; + margin-top: 0px; + z-index: 9; + padding: 0px; + width: 80px !important; + max-width: 80px !important; + text-overflow: ellipsis; + overflow: hidden; + white-space: nowrap; + display: inline-block; + cursor: pointer; + -webkit-print-color-adjust: exact; +} + +.netdata-legend-value { + /*margin-left: 14px;*/ + position: absolute; + right: 10px; + float: right; + text-align: right; + font-size: 11px; /* legend: dimension value size */ + font-weight: bold; + vertical-align: bottom; + background-color: White; + margin-top: 0px; + z-index: 10; + padding: 0px; + padding-left: 15px; + cursor: pointer; + /* -webkit-font-smoothing: none; */ +} + +.netdata-legend-name.not-selected { + font-weight: normal; + opacity: 0.3; +} + +.netdata-chart { + position: absolute; /* within .netdata-container */ + top: 0; /* within .netdata-container */ + left: 0; /* within .netdata-container */ + display: inline-block; + overflow: hidden; + width: 100%; + height: 100%; + z-index: 5; + + /* width and height is calculated (depends on the appearance of the legend) */ +} + +.netdata-chart-with-legend-right { + position: absolute; /* within .netdata-container */ + top: 0; /* within .netdata-container */ + left: 0; /* within .netdata-container */ + display: block; + overflow: hidden; + margin-right: 140px; /* --legend-width */ + width: calc(100% - 140px); /* --legend-width */ + height: 100%; + z-index: 5; + flex-grow: 1; + + /* width and height is calculated (depends on the appearance of the legend) */ +} + +.netdata-peity-chart { + +} + +.netdata-sparkline-chart { + +} + +.netdata-dygraph-chart { + +} + +.netdata-morris-chart { + +} + +.netdata-google-chart { + +} + +.dygraph-ylabel { +} + +.dygraph-axis-label-x { + overflow-x: hidden; +} + +.dygraph-label-rotate-left { + text-align: center; + /* See http://caniuse.com/#feat=transforms2d */ + transform: rotate(90deg); + -webkit-transform: rotate(90deg); + -moz-transform: rotate(90deg); + -o-transform: rotate(90deg); + -ms-transform: rotate(90deg); +} + +/* For y2-axis label */ +.dygraph-label-rotate-right { + text-align: center; + /* See http://caniuse.com/#feat=transforms2d */ + transform: rotate(-90deg); + -webkit-transform: rotate(-90deg); + -moz-transform: rotate(-90deg); + -o-transform: rotate(-90deg); + -ms-transform: rotate(-90deg); +} + +.dygraph-title { + text-indent: 56px; + text-align: left; + position: absolute; + left: 0px; + top: 4px; + font-size: 11px; + font-weight: bold; + text-overflow: ellipsis; + overflow: hidden; + white-space: nowrap; +} + +/* fix for sparkline tooltip under bootstrap */ +.jqstooltip { + width: auto !important; + height: auto !important; +} + +.easyPieChart { + position: relative; + text-align: center; +} + +.easyPieChart canvas { + position: absolute; + top: 0; + left: 0; +} + +.easyPieChartLabel { + display: inline-block; + position: absolute; + float: left; + left: 0; + width: 100%; + text-align: center; + color: #666; + font-weight: normal; + text-shadow: #BBB 0px 0px 1px; + /* -webkit-font-smoothing: none; */ +} + +.easyPieChartTitle { + display: inline-block; + position: absolute; + float: left; + left: 0; + width: 64%; + margin-left: 18% !important; + text-align: center; + color: #999999; + font-weight: bold; +} + +.easyPieChartUnits { + display: inline-block; + position: absolute; + float: left; + left: 0; + width: 60%; + margin-left: 20% !important; + text-align: center; + color: #999999; + font-weight: normal; +} + +.gaugeChart { + position: relative; + text-align: center; +} + +.gaugeChart canvas { + position: absolute; + top: 0; + left: 0; + bottom: 0; + right: 0; + z-index: 0; +} + +.gaugeChartLabel { + display: inline-block; + position: absolute; + float: left; + left: 0; + width: 100%; + text-align: center; + color: #FFFFFF; + font-weight: bold; + z-index: 1; + text-shadow: #777 0px 0px 1px; + /* text-shadow: #CCC 1px 1px 0px, #CCC -1px -1px 0px, #CCC 1px -1px 0px, #CCC -1px 1px 0px; */ + /* -webkit-text-stroke: 1px #777; */ + /* -webkit-font-smoothing: none; */ +} + +.gaugeChartTitle { + display: inline-block; + position: absolute; + float: left; + left: 0; + width: 100%; + text-align: center; + color: #999999; + font-weight: bold; +} + +.gaugeChartUnits { + display: inline-block; + position: absolute; + float: left; + left: 0; + bottom: 0; + width: 100%; + text-align: left; + margin-left: 5%; + color: #999999; + font-weight: normal; +} + +.gaugeChartMin { + display: inline-block; + position: absolute; + float: left; + left: 0; + bottom: 8%; + width: 92%; + margin-left: 8%; + text-align: left; + color: #999999; + font-weight: normal; +} + +.gaugeChartMax { + display: inline-block; + position: absolute; + float: left; + left: 0; + bottom: 8%; + width: 95%; + margin-right: 5%; + text-align: right; + color: #999999; + font-weight: normal; +} + +.popover-title { + font-weight: bold; + font-size: 12px; +} + +.popover-content { + font-size: 11px; +} + +/* ---------------------------------------------------------------------------- + perfect-scrollbar settings + */ + +.ps-container { + -ms-touch-action: auto; + touch-action: auto; + overflow: hidden !important; + -ms-overflow-style: none; +} + +@supports (-ms-overflow-style: none) { + .ps-container { + overflow: auto !important; + } +} + +@media screen and (-ms-high-contrast: active), (-ms-high-contrast: none) { + .ps-container { + overflow: auto !important; + } +} + +.ps-container.ps-active-x > .ps-scrollbar-x-rail, +.ps-container.ps-active-y > .ps-scrollbar-y-rail { + display: block; + background-color: transparent; +} + +.ps-container.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail { + background-color: transparent; /* background color when dragged away */ + opacity: 0.9; +} + +.ps-container.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail > .ps-scrollbar-x { + background-color: #aaa; /* scrollbar color when dragged away */ + height: 5px; +} + +.ps-container.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail { + background-color: transparent; /* background color when dragged away */ + opacity: 0.9; +} + +.ps-container.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail > .ps-scrollbar-y { + background-color: #aaa; /* scrollbar color when dragged away */ + width: 5px; +} + +.ps-container > .ps-scrollbar-x-rail { + display: none; + position: absolute; + /* please don't change 'position' */ + opacity: 0.2; /* the opacity when not on hover of the content */ + -webkit-transition: background-color .2s linear, opacity .2s linear; + -o-transition: background-color .2s linear, opacity .2s linear; + -moz-transition: background-color .2s linear, opacity .2s linear; + transition: background-color .2s linear, opacity .2s linear; + bottom: 0px; + /* there must be 'bottom' for ps-scrollbar-x-rail */ + height: 15px; +} + +.ps-container > .ps-scrollbar-x-rail > .ps-scrollbar-x { + position: absolute; + /* please don't change 'position' */ + background-color: #666; /* #aaa; the color on content hover */ + -webkit-border-radius: 6px; + -moz-border-radius: 6px; + border-radius: 6px; + -webkit-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out; + transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out; + -o-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out; + -moz-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out; + transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out; + transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -webkit-border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out; + bottom: 2px; + /* there must be 'bottom' for ps-scrollbar-x */ + height: 5px; /* the width of the scrollbar */ +} + +.ps-container > .ps-scrollbar-x-rail:hover > .ps-scrollbar-x, .ps-container > .ps-scrollbar-x-rail:active > .ps-scrollbar-x { + height: 4px; +} + +.ps-container > .ps-scrollbar-y-rail { + display: none; + position: absolute; + /* please don't change 'position' */ + opacity: 0.2; /* the opacity when not on hover of the content */ + -webkit-transition: background-color .2s linear, opacity .2s linear; + -o-transition: background-color .2s linear, opacity .2s linear; + -moz-transition: background-color .2s linear, opacity .2s linear; + transition: background-color .2s linear, opacity .2s linear; + right: 0; + /* there must be 'right' for ps-scrollbar-y-rail */ + width: 15px; +} + +.ps-container > .ps-scrollbar-y-rail > .ps-scrollbar-y { + position: absolute; + /* please don't change 'position' */ + background-color: #666; /* #aaa; the color on content hover */ + -webkit-border-radius: 6px; + -moz-border-radius: 6px; + border-radius: 6px; + -webkit-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out; + transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out; + -o-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out; + -moz-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out; + transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out; + transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -webkit-border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out; + right: 2px; + /* there must be 'right' for ps-scrollbar-y */ + width: 5px; /* the width of the scrollbar */ +} + +.ps-container > .ps-scrollbar-y-rail:hover > .ps-scrollbar-y, .ps-container > .ps-scrollbar-y-rail:active > .ps-scrollbar-y { + width: 5px; +} + +.ps-container:hover.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail { + background-color: transparent; /* background color when dragged */ + opacity: 0.9; +} + +.ps-container:hover.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail > .ps-scrollbar-x { + background-color: #bbb; /* scrollbar color when dragged */ + height: 5px; +} + +.ps-container:hover.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail { + background-color: transparent; /* background color when dragged */ + opacity: 0.9; +} + +.ps-container:hover.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail > .ps-scrollbar-y { + background-color: #bbb; /* scrollbar color when dragged */ + width: 5px; +} + +.ps-container:hover > .ps-scrollbar-x-rail, +.ps-container:hover > .ps-scrollbar-y-rail { + opacity: 0.6; +} + +.ps-container:hover > .ps-scrollbar-x-rail:hover { + background-color: transparent; /* the background color on hover of the scrollbar */ + opacity: 0.9; +} + +.ps-container:hover > .ps-scrollbar-x-rail:hover > .ps-scrollbar-x { + background-color: #999; /* scrollbar color on hover */ +} + +.ps-container:hover > .ps-scrollbar-y-rail:hover { + background-color: transparent; /* the background color on hover of the scrollbar */ + opacity: 0.9; +} + +.ps-container:hover > .ps-scrollbar-y-rail:hover > .ps-scrollbar-y { + background-color: #999; /* scrollbar color on hover */ +} diff --git a/web/gui/dashboard.html b/web/gui/dashboard.html new file mode 100644 index 000000000..4d0685b08 --- /dev/null +++ b/web/gui/dashboard.html @@ -0,0 +1,701 @@ + + + + + NetData Dashboard + + + + + + + + + + + + + + + + + + + + +
+ +

NetData Custom Dashboard

+ +This is a template for building custom dashboards. To build a dashboard you just do this: + +
+<!DOCTYPE html>
+<html lang="en">
+<head>
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+    <meta charset="utf-8">
+    <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
+    <meta name="viewport" content="width=device-width, initial-scale=1">
+    <meta name="apple-mobile-web-app-capable" content="yes">
+    <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent">
+</head>
+<body>
+    <div data-netdata="system.processes"
+        data-chart-library="dygraph"
+        data-width="600"
+        data-height="200"
+        data-after="-600"
+        ></div>
+</body>
+<script type="text/javascript" src="http://netdata.server:19999/dashboard.js"></script>
+</html>
+
+ +
    +
  • You can host your dashboard anywhere.
  • +
  • You can add as many charts as you like.
  • +
  • You can have charts from many different netdata servers (add
    data-host="http://another.netdata.server:19999/"
    to each chart).
  • +
  • You can use different chart libraries on the same page: peity, sparkline, dygraph, google, morris
  • +
  • You can customize each chart to your preferences. For each chart library most of their attributes can be given in data- attributes.
  • +
  • Each chart can have each own duration - it is controlled with the data-after attribute to give that many seconds of data.
  • +
  • Depending on the width of the chart and data-after attribute, netdata will automatically refresh the chart when it needs to be updated. For example giving 600 pixels for width for -600 seconds of data, using a chart library that needs 3 pixels per point, will yeld in a chart updated once every 3 seconds.
  • +
+ + +
+

Sparkline Charts

+Sparkline charts support 'NULL' values, so the charts can indicate that values are missing. +Sparkline charts stretch the values to show the variations between values in more detail. +They also have mouse-hover support. +
+Sparklines are fantastic. You can inline charts in text. For example this +
is my current cpu usage (last 30 seconds), + while this +
is the bandwidth my netdata server is currently transmitting (last minute) + and this +
is the requests/sec it serves (last 3 minutes). + +
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+ + + +
+

Peity Charts

+Peity charts do not support 'NULL' values, so the charts cannot indicate that values are missing. +Peity charts cannot have multiple dimensions on the charts - so netdata will use 'min2max' to show +the total of all dimensions. +
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+ + + + +
+

Dygraph Charts

+The fastest charting engine that can chart complete charts (not just sparklines). +The charts are zoomable (drag their contents to pan, shift with mouse wheel to zoom-in or zoom-out, double click to reset it). +Netdata magic! Realtime charts on your web page! +
+Sparklines using dygraphs +
+ are also possible! This +
+ is an area chart, while this +
is a stacked area chart! + + +
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+ + + +
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+ + + +
+

EasyPieChart

+
+
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+
+
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+
+
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+
+ + +
+

Gauge.js

+
+
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+
+
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+
+
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+
+ + +
+

Google Charts

+NetData was originaly developed with Google Charts. +NetData is a complete Google Visualization API provider. +
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+ + + + + + +
+

Morris Charts

+Unfortunatelly, Morris Charts are very slow. Here we force them to lower their detail to get acceptable results. +
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+ + +
+

C3 Charts

+C3 charts are not usable in large scale. They suffer from the following issues: +
    +
  • extreme use of transitions (implemented with D3 instead of CSS, meaning they are javascript rendered) that cannot be disabled - even opacity is hardcoded in the javascript library
  • +
  • rendering is done with SVG instead of canvas, so they use DOM elements for every point, becomimg useless if more than 500 points are drawn
  • +
  • lack of a raw data format, so every time a chart is updated, data convertion in javascript is required
  • +
  • lack of stacked charts support
  • +
+So, to avoid flashing the charts, we destroy and re-create the charts on each update. Also, since they manipulate the data with javascript we were forced to lower the detail they render to get acceptable speeds. +
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+
+
+
+ rendered in X ms +
+ + +
+

d3pie Charts

+
+
+
+ rendered in X ms +
+ +
+
+
+ rendered in X ms +
+ +
+
+
+ rendered in X ms +
+
+ + + + + + + + + + diff --git a/web/gui/dashboard.js b/web/gui/dashboard.js new file mode 100644 index 000000000..16fbf88d0 --- /dev/null +++ b/web/gui/dashboard.js @@ -0,0 +1,9512 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +// ---------------------------------------------------------------------------- +// You can set the following variables before loading this script: + +/*global netdataNoDygraphs *//* boolean, disable dygraph charts + * (default: false) */ +/*global netdataNoSparklines *//* boolean, disable sparkline charts + * (default: false) */ +/*global netdataNoPeitys *//* boolean, disable peity charts + * (default: false) */ +/*global netdataNoGoogleCharts *//* boolean, disable google charts + * (default: false) */ +/*global netdataNoMorris *//* boolean, disable morris charts + * (default: false) */ +/*global netdataNoEasyPieChart *//* boolean, disable easypiechart charts + * (default: false) */ +/*global netdataNoGauge *//* boolean, disable gauge.js charts + * (default: false) */ +/*global netdataNoD3 *//* boolean, disable d3 charts + * (default: false) */ +/*global netdataNoC3 *//* boolean, disable c3 charts + * (default: false) */ +/*global netdataNoD3pie *//* boolean, disable d3pie charts + * (default: false) */ +/*global netdataNoBootstrap *//* boolean, disable bootstrap - disables help too + * (default: false) */ +/*global netdataNoFontAwesome *//* boolean, disable fontawesome (do not load it) + * (default: false) */ +/*global netdataIcons *//* object, overwrite netdata fontawesome icons + * (default: null) */ +/*global netdataDontStart *//* boolean, do not start the thread to process the charts + * (default: false) */ +/*global netdataErrorCallback *//* function, callback to be called when the dashboard encounters an error + * (default: null) */ +/*global netdataRegistry:true *//* boolean, use the netdata registry + * (default: false) */ +/*global netdataNoRegistry *//* boolean, included only for compatibility with existing custom dashboard + * (obsolete - do not use this any more) */ +/*global netdataRegistryCallback *//* function, callback that will be invoked with one param: the URLs from the registry + * (default: null) */ +/*global netdataShowHelp:true *//* boolean, disable charts help + * (default: true) */ +/*global netdataShowAlarms:true *//* boolean, enable alarms checks and notifications + * (default: false) */ +/*global netdataRegistryAfterMs:true *//* ms, delay registry use at started + * (default: 1500) */ +/*global netdataCallback *//* function, callback to be called when netdata is ready to start + * (default: null) + * netdata will be running while this is called + * (call NETDATA.pause to stop it) */ +/*global netdataPrepCallback *//* function, callback to be called before netdata does anything else + * (default: null) */ +/*global netdataServer *//* string, the URL of the netdata server to use + * (default: the URL the page is hosted at) */ +/*global netdataServerStatic *//* string, the URL of the netdata server to use for static files + * (default: netdataServer) */ +/*global netdataSnapshotData *//* object, a netdata snapshot loaded + * (default: null) */ +/*global netdataAlarmsRecipients *//* array, an array of alarm recipients to show notifications for + * (default: null) */ +/*global netdataAlarmsRemember *//* boolen, keep our position in the alarm log at browser local storage + * (default: true) */ +/*global netdataAlarmsActiveCallback *//* function, a hook for the alarm logs + * (default: undefined) */ +/*global netdataAlarmsNotifCallback *//* function, a hook for alarm notifications + * (default: undefined) */ +/*global netdataIntersectionObserver *//* boolean, enable or disable the use of intersection observer + * (default: true) */ +/*global netdataCheckXSS *//* boolean, enable or disable checking for XSS issues + * (default: false) */ + +// ---------------------------------------------------------------------------- +// global namespace + +var NETDATA = window.NETDATA || {}; + +(function(window, document, $, undefined) { + + NETDATA.encodeURIComponent = function(s) { + if(typeof(s) === 'string') + return encodeURIComponent(s); + + return s; + }; + + // ------------------------------------------------------------------------ + // compatibility fixes + + // fix IE issue with console + if(!window.console) { window.console = { log: function(){} }; } + + // if string.endsWith is not defined, define it + if(typeof String.prototype.endsWith !== 'function') { + String.prototype.endsWith = function(s) { + if(s.length > this.length) return false; + return this.slice(-s.length) === s; + }; + } + + // if string.startsWith is not defined, define it + if(typeof String.prototype.startsWith !== 'function') { + String.prototype.startsWith = function(s) { + if(s.length > this.length) return false; + return this.slice(s.length) === s; + }; + } + + NETDATA.name2id = function(s) { + return s + .replace(/ /g, '_') + .replace(/\(/g, '_') + .replace(/\)/g, '_') + .replace(/\./g, '_') + .replace(/\//g, '_'); + }; + + // ---------------------------------------------------------------------------------------------------------------- + // XSS checks + + NETDATA.xss = { + enabled: (typeof netdataCheckXSS === 'undefined')?false:netdataCheckXSS, + enabled_for_data: (typeof netdataCheckXSS === 'undefined')?false:netdataCheckXSS, + + string: function (s) { + return s.toString() + .replace(//g, '>') + .replace(/"/g, '"') + .replace(/'/g, '''); + }, + + object: function(name, obj, ignore_regex) { + if(typeof ignore_regex !== 'undefined' && ignore_regex.test(name) === true) { + // console.log('XSS: ignoring "' + name + '"'); + return obj; + } + + switch (typeof(obj)) { + case 'string': + var ret = this.string(obj); + if(ret !== obj) console.log('XSS protection changed string ' + name + ' from "' + obj + '" to "' + ret + '"'); + return ret; + + case 'object': + if(obj === null) return obj; + + if(Array.isArray(obj) === true) { + // console.log('checking array "' + name + '"'); + + var len = obj.length; + while(len--) + obj[len] = this.object(name + '[' + len + ']', obj[len], ignore_regex); + } + else { + // console.log('checking object "' + name + '"'); + + for(var i in obj) { + if(obj.hasOwnProperty(i) === false) continue; + if(this.string(i) !== i) { + console.log('XSS protection removed invalid object member "' + name + '.' + i + '"'); + delete obj[i]; + } + else + obj[i] = this.object(name + '.' + i, obj[i], ignore_regex); + } + } + return obj; + + default: + return obj; + } + }, + + checkOptional: function(name, obj, ignore_regex) { + if(this.enabled === true) { + //console.log('XSS: checking optional "' + name + '"...'); + return this.object(name, obj, ignore_regex); + } + return obj; + }, + + checkAlways: function(name, obj, ignore_regex) { + //console.log('XSS: checking always "' + name + '"...'); + return this.object(name, obj, ignore_regex); + }, + + checkData: function(name, obj, ignore_regex) { + if(this.enabled_for_data === true) { + //console.log('XSS: checking data "' + name + '"...'); + return this.object(name, obj, ignore_regex); + } + return obj; + } + }; + + // ---------------------------------------------------------------------------------------------------------------- + // Detect the netdata server + + // http://stackoverflow.com/questions/984510/what-is-my-script-src-url + // http://stackoverflow.com/questions/6941533/get-protocol-domain-and-port-from-url + NETDATA._scriptSource = function() { + var script = null; + + if(typeof document.currentScript !== 'undefined') { + script = document.currentScript; + } + else { + var all_scripts = document.getElementsByTagName('script'); + script = all_scripts[all_scripts.length - 1]; + } + + if (typeof script.getAttribute.length !== 'undefined') + script = script.src; + else + script = script.getAttribute('src', -1); + + return script; + }; + + if(typeof netdataServer !== 'undefined') + NETDATA.serverDefault = netdataServer; + else { + var s = NETDATA._scriptSource(); + if(s) NETDATA.serverDefault = s.replace(/\/dashboard.js(\?.*)?$/g, ""); + else { + console.log('WARNING: Cannot detect the URL of the netdata server.'); + NETDATA.serverDefault = null; + } + } + + if(NETDATA.serverDefault === null) + NETDATA.serverDefault = ''; + else if(NETDATA.serverDefault.slice(-1) !== '/') + NETDATA.serverDefault += '/'; + + if(typeof netdataServerStatic !== 'undefined' && netdataServerStatic !== null && netdataServerStatic !== '') { + NETDATA.serverStatic = netdataServerStatic; + if(NETDATA.serverStatic.slice(-1) !== '/') + NETDATA.serverStatic += '/'; + } + else { + NETDATA.serverStatic = NETDATA.serverDefault; + } + + + // default URLs for all the external files we need + // make them RELATIVE so that the whole thing can also be + // installed under a web server + NETDATA.jQuery = NETDATA.serverStatic + 'lib/jquery-2.2.4.min.js'; + NETDATA.peity_js = NETDATA.serverStatic + 'lib/jquery.peity-3.2.0.min.js'; + NETDATA.sparkline_js = NETDATA.serverStatic + 'lib/jquery.sparkline-2.1.2.min.js'; + NETDATA.easypiechart_js = NETDATA.serverStatic + 'lib/jquery.easypiechart-97b5824.min.js'; + NETDATA.gauge_js = NETDATA.serverStatic + 'lib/gauge-1.3.2.min.js'; + NETDATA.dygraph_js = NETDATA.serverStatic + 'lib/dygraph-c91c859.min.js'; + NETDATA.dygraph_smooth_js = NETDATA.serverStatic + 'lib/dygraph-smooth-plotter-c91c859.js'; + NETDATA.raphael_js = NETDATA.serverStatic + 'lib/raphael-2.2.4-min.js'; + NETDATA.c3_js = NETDATA.serverStatic + 'lib/c3-0.4.18.min.js'; + NETDATA.c3_css = NETDATA.serverStatic + 'css/c3-0.4.18.min.css'; + NETDATA.d3pie_js = NETDATA.serverStatic + 'lib/d3pie-0.2.1-netdata-3.js'; + NETDATA.d3_js = NETDATA.serverStatic + 'lib/d3-4.12.2.min.js'; + NETDATA.morris_js = NETDATA.serverStatic + 'lib/morris-0.5.1.min.js'; + NETDATA.morris_css = NETDATA.serverStatic + 'css/morris-0.5.1.css'; + NETDATA.google_js = 'https://www.google.com/jsapi'; + + NETDATA.themes = { + white: { + bootstrap_css: NETDATA.serverStatic + 'css/bootstrap-3.3.7.css', + dashboard_css: NETDATA.serverStatic + 'dashboard.css?v20180210-1', + background: '#FFFFFF', + foreground: '#000000', + grid: '#F0F0F0', + axis: '#F0F0F0', + highlight: '#F5F5F5', + colors: [ '#3366CC', '#DC3912', '#109618', '#FF9900', '#990099', '#DD4477', + '#3B3EAC', '#66AA00', '#0099C6', '#B82E2E', '#AAAA11', '#5574A6', + '#994499', '#22AA99', '#6633CC', '#E67300', '#316395', '#8B0707', + '#329262', '#3B3EAC' ], + easypiechart_track: '#f0f0f0', + easypiechart_scale: '#dfe0e0', + gauge_pointer: '#C0C0C0', + gauge_stroke: '#F0F0F0', + gauge_gradient: false, + d3pie: { + title: '#333333', + subtitle: '#666666', + footer: '#888888', + other: '#aaaaaa', + mainlabel: '#333333', + percentage: '#dddddd', + value: '#aaaa22', + tooltip_bg: '#000000', + tooltip_fg: '#efefef', + segment_stroke: "#ffffff", + gradient_color: '#000000' + } + }, + slate: { + bootstrap_css: NETDATA.serverStatic + 'css/bootstrap-slate-flat-3.3.7.css?v20161229-1', + dashboard_css: NETDATA.serverStatic + 'dashboard.slate.css?v20180210-1', + background: '#272b30', + foreground: '#C8C8C8', + grid: '#283236', + axis: '#283236', + highlight: '#383838', +/* colors: [ '#55bb33', '#ff2222', '#0099C6', '#faa11b', '#adbce0', '#DDDD00', + '#4178ba', '#f58122', '#a5cc39', '#f58667', '#f5ef89', '#cf93c0', + '#a5d18a', '#b8539d', '#3954a3', '#c8a9cf', '#c7de8a', '#fad20a', + '#a6a479', '#a66da8' ], +*/ + colors: [ '#66AA00', '#FE3912', '#3366CC', '#D66300', '#0099C6', '#DDDD00', + '#5054e6', '#EE9911', '#BB44CC', '#e45757', '#ef0aef', '#CC7700', + '#22AA99', '#109618', '#905bfd', '#f54882', '#4381bf', '#ff3737', + '#329262', '#3B3EFF' ], + easypiechart_track: '#373b40', + easypiechart_scale: '#373b40', + gauge_pointer: '#474b50', + gauge_stroke: '#373b40', + gauge_gradient: false, + d3pie: { + title: '#C8C8C8', + subtitle: '#283236', + footer: '#283236', + other: '#283236', + mainlabel: '#C8C8C8', + percentage: '#dddddd', + value: '#cccc44', + tooltip_bg: '#272b30', + tooltip_fg: '#C8C8C8', + segment_stroke: "#283236", + gradient_color: '#000000' + } + } + }; + + if(typeof netdataTheme !== 'undefined' && typeof NETDATA.themes[netdataTheme] !== 'undefined') + NETDATA.themes.current = NETDATA.themes[netdataTheme]; + else + NETDATA.themes.current = NETDATA.themes.white; + + NETDATA.colors = NETDATA.themes.current.colors; + + // these are the colors Google Charts are using + // we have them here to attempt emulate their look and feel on the other chart libraries + // http://there4.io/2012/05/02/google-chart-color-list/ + //NETDATA.colors = [ '#3366CC', '#DC3912', '#FF9900', '#109618', '#990099', '#3B3EAC', '#0099C6', + // '#DD4477', '#66AA00', '#B82E2E', '#316395', '#994499', '#22AA99', '#AAAA11', + // '#6633CC', '#E67300', '#8B0707', '#329262', '#5574A6', '#3B3EAC' ]; + + // an alternative set + // http://www.mulinblog.com/a-color-palette-optimized-for-data-visualization/ + // (blue) (red) (orange) (green) (pink) (brown) (purple) (yellow) (gray) + //NETDATA.colors = [ '#5DA5DA', '#F15854', '#FAA43A', '#60BD68', '#F17CB0', '#B2912F', '#B276B2', '#DECF3F', '#4D4D4D' ]; + + NETDATA.icons = { + left: '', + reset: '', + right: '', + zoomIn: '', + zoomOut: '', + resize: '', + lineChart: '', + areaChart: '', + noChart: '', + loading: '', + noData: '' + }; + + if(typeof netdataIcons === 'object') { + for(var icon in NETDATA.icons) { + if(NETDATA.icons.hasOwnProperty(icon) && typeof(netdataIcons[icon]) === 'string') + NETDATA.icons[icon] = netdataIcons[icon]; + } + } + + if(typeof netdataSnapshotData === 'undefined') + netdataSnapshotData = null; + + if(typeof netdataShowHelp === 'undefined') + netdataShowHelp = true; + + if(typeof netdataShowAlarms === 'undefined') + netdataShowAlarms = false; + + if(typeof netdataRegistryAfterMs !== 'number' || netdataRegistryAfterMs < 0) + netdataRegistryAfterMs = 1500; + + if(typeof netdataRegistry === 'undefined') { + // backward compatibility + netdataRegistry = (typeof netdataNoRegistry !== 'undefined' && netdataNoRegistry === false); + } + if(netdataRegistry === false && typeof netdataRegistryCallback === 'function') + netdataRegistry = true; + + + // ---------------------------------------------------------------------------------------------------------------- + // detect if this is probably a slow device + + var isSlowDeviceResult = undefined; + var isSlowDevice = function() { + if(isSlowDeviceResult !== undefined) + return isSlowDeviceResult; + + try { + var ua = navigator.userAgent.toLowerCase(); + + var iOS = /ipad|iphone|ipod/.test(ua) && !window.MSStream; + var android = /android/.test(ua) && !window.MSStream; + isSlowDeviceResult = (iOS === true || android === true); + } + catch (e) { + isSlowDeviceResult = false; + } + + return isSlowDeviceResult; + }; + + // ---------------------------------------------------------------------------------------------------------------- + // the defaults for all charts + + // if the user does not specify any of these, the following will be used + + NETDATA.chartDefaults = { + width: '100%', // the chart width - can be null + height: '100%', // the chart height - can be null + min_width: null, // the chart minimum width - can be null + library: 'dygraph', // the graphing library to use + method: 'average', // the grouping method + before: 0, // panning + after: -600, // panning + pixels_per_point: 1, // the detail of the chart + fill_luminance: 0.8 // luminance of colors in solid areas + }; + + // ---------------------------------------------------------------------------------------------------------------- + // global options + + NETDATA.options = { + pauseCallback: null, // a callback when we are really paused + + pause: false, // when enabled we don't auto-refresh the charts + + targets: [], // an array of all the state objects that are + // currently active (independently of their + // viewport visibility) + + updated_dom: true, // when true, the DOM has been updated with + // new elements we have to check. + + auto_refresher_fast_weight: 0, // this is the current time in ms, spent + // rendering charts continuously. + // used with .current.fast_render_timeframe + + page_is_visible: true, // when true, this page is visible + + auto_refresher_stop_until: 0, // timestamp in ms - used internally, to stop the + // auto-refresher for some time (when a chart is + // performing pan or zoom, we need to stop refreshing + // all other charts, to have the maximum speed for + // rendering the chart that is panned or zoomed). + // Used with .current.global_pan_sync_time + + on_scroll_refresher_stop_until: 0, // timestamp in ms - used to stop evaluating + // charts for some time, after a page scroll + + last_page_resize: Date.now(), // the timestamp of the last resize request + + last_page_scroll: 0, // the timestamp the last time the page was scrolled + + browser_timezone: 'unknown', // timezone detected by javascript + server_timezone: 'unknown', // timezone reported by the server + + force_data_points: 0, // force the number of points to be returned for charts + fake_chart_rendering: false, // when set to true, the dashboard will download data but will not render the charts + + passive_events: null, // true if the browser supports passive events + + // the current profile + // we may have many... + current: { + units: 'auto', // can be 'auto' or 'original' + temperature: 'celsius', // can be 'celsius' or 'fahrenheit' + seconds_as_time: true, // show seconds as DDd:HH:MM:SS ? + timezone: 'default', // the timezone to use, or 'default' + user_set_server_timezone: 'default', // as set by the user on the dashboard + + legend_toolbox: true, // show the legend toolbox on charts + resize_charts: true, // show the resize handler on charts + + pixels_per_point: isSlowDevice()?5:1, // the minimum pixels per point for all charts + // increase this to speed javascript up + // each chart library has its own limit too + // the max of this and the chart library is used + // the final is calculated every time, so a change + // here will have immediate effect on the next chart + // update + + idle_between_charts: 100, // ms - how much time to wait between chart updates + + fast_render_timeframe: 200, // ms - render continuously until this time of continuous + // rendering has been reached + // this setting is used to make it render e.g. 10 + // charts at once, sleep idle_between_charts time + // and continue for another 10 charts. + + idle_between_loops: 500, // ms - if all charts have been updated, wait this + // time before starting again. + + idle_parallel_loops: 100, // ms - the time between parallel refresher updates + + idle_lost_focus: 500, // ms - when the window does not have focus, check + // if focus has been regained, every this time + + global_pan_sync_time: 300, // ms - when you pan or zoom a chart, the background + // auto-refreshing of charts is paused for this amount + // of time + + sync_selection_delay: 400, // ms - when you pan or zoom a chart, wait this amount + // of time before setting up synchronized selections + // on hover. + + sync_selection: true, // enable or disable selection sync + + pan_and_zoom_delay: 50, // when panning or zooming, how ofter to update the chart + + sync_pan_and_zoom: true, // enable or disable pan and zoom sync + + pan_and_zoom_data_padding: true, // fetch more data for the master chart when panning or zooming + + update_only_visible: true, // enable or disable visibility management / used for printing + + parallel_refresher: (isSlowDevice() === false), // enable parallel refresh of charts + + concurrent_refreshes: true, // when parallel_refresher is enabled, sync also the charts + + destroy_on_hide: (isSlowDevice() === true), // destroy charts when they are not visible + + show_help: netdataShowHelp, // when enabled the charts will show some help + show_help_delay_show_ms: 500, + show_help_delay_hide_ms: 0, + + eliminate_zero_dimensions: true, // do not show dimensions with just zeros + + stop_updates_when_focus_is_lost: true, // boolean - shall we stop auto-refreshes when document does not have user focus + stop_updates_while_resizing: 1000, // ms - time to stop auto-refreshes while resizing the charts + + double_click_speed: 500, // ms - time between clicks / taps to detect double click/tap + + smooth_plot: (isSlowDevice() === false), // enable smooth plot, where possible + + color_fill_opacity_line: 1.0, + color_fill_opacity_area: 0.2, + color_fill_opacity_stacked: 0.8, + + pan_and_zoom_factor: 0.25, // the increment when panning and zooming with the toolbox + pan_and_zoom_factor_multiplier_control: 2.0, + pan_and_zoom_factor_multiplier_shift: 3.0, + pan_and_zoom_factor_multiplier_alt: 4.0, + + abort_ajax_on_scroll: false, // kill pending ajax page scroll + async_on_scroll: false, // sync/async onscroll handler + onscroll_worker_duration_threshold: 30, // time in ms, for async scroll handler + + retries_on_data_failures: 3, // how many retries to make if we can't fetch chart data from the server + + setOptionCallback: function() { } + }, + + debug: { + show_boxes: false, + main_loop: false, + focus: false, + visibility: false, + chart_data_url: false, + chart_errors: false, // remember to set it to false before merging + chart_timing: false, + chart_calls: false, + libraries: false, + dygraph: false, + globalSelectionSync:false, + globalPanAndZoom: false + } + }; + + NETDATA.statistics = { + refreshes_total: 0, + refreshes_active: 0, + refreshes_active_max: 0 + }; + + + // ---------------------------------------------------------------------------------------------------------------- + + NETDATA.timeout = { + // by default, these are just wrappers to setTimeout() / clearTimeout() + + step: function(callback) { + return window.setTimeout(callback, 1000 / 60); + }, + + set: function(callback, delay) { + return window.setTimeout(callback, delay); + }, + + clear: function(id) { + return window.clearTimeout(id); + }, + + init: function() { + var custom = true; + + if(window.requestAnimationFrame) { + this.step = function(callback) { + return window.requestAnimationFrame(callback); + }; + + this.clear = function(handle) { + return window.cancelAnimationFrame(handle.value); + }; + } + else if(window.webkitRequestAnimationFrame) { + this.step = function(callback) { + return window.webkitRequestAnimationFrame(callback); + }; + + if(window.webkitCancelAnimationFrame) { + this.clear = function (handle) { + return window.webkitCancelAnimationFrame(handle.value); + }; + } + else if(window.webkitCancelRequestAnimationFrame) { + this.clear = function (handle) { + return window.webkitCancelRequestAnimationFrame(handle.value); + }; + } + } + else if(window.mozRequestAnimationFrame) { + this.step = function(callback) { + return window.mozRequestAnimationFrame(callback); + }; + + this.clear = function(handle) { + return window.mozCancelRequestAnimationFrame(handle.value); + }; + } + else if(window.oRequestAnimationFrame) { + this.step = function(callback) { + return window.oRequestAnimationFrame(callback); + }; + + this.clear = function(handle) { + return window.oCancelRequestAnimationFrame(handle.value); + }; + } + else if(window.msRequestAnimationFrame) { + this.step = function(callback) { + return window.msRequestAnimationFrame(callback); + }; + + this.clear = function(handle) { + return window.msCancelRequestAnimationFrame(handle.value); + }; + } + else + custom = false; + + + if(custom === true) { + // we have installed custom .step() / .clear() functions + // overwrite the .set() too + + this.set = function(callback, delay) { + var that = this; + + var start = Date.now(), + handle = new Object(); + + function loop() { + var current = Date.now(), + delta = current - start; + + if(delta >= delay) { + callback.call(); + } + else { + handle.value = that.step(loop); + } + } + + handle.value = that.step(loop); + return handle; + }; + } + } + }; + + NETDATA.timeout.init(); + + + // ---------------------------------------------------------------------------------------------------------------- + // local storage options + + NETDATA.localStorage = { + default: {}, + current: {}, + callback: {} // only used for resetting back to defaults + }; + + NETDATA.localStorageTested = -1; + NETDATA.localStorageTest = function() { + if(NETDATA.localStorageTested !== -1) + return NETDATA.localStorageTested; + + if(typeof Storage !== "undefined" && typeof localStorage === 'object') { + var test = 'test'; + try { + localStorage.setItem(test, test); + localStorage.removeItem(test); + NETDATA.localStorageTested = true; + } + catch (e) { + NETDATA.localStorageTested = false; + } + } + else + NETDATA.localStorageTested = false; + + return NETDATA.localStorageTested; + }; + + NETDATA.localStorageGet = function(key, def, callback) { + var ret = def; + + if(typeof NETDATA.localStorage.default[key.toString()] === 'undefined') { + NETDATA.localStorage.default[key.toString()] = def; + NETDATA.localStorage.callback[key.toString()] = callback; + } + + if(NETDATA.localStorageTest() === true) { + try { + // console.log('localStorage: loading "' + key.toString() + '"'); + ret = localStorage.getItem(key.toString()); + // console.log('netdata loaded: ' + key.toString() + ' = ' + ret.toString()); + if(ret === null || ret === 'undefined') { + // console.log('localStorage: cannot load it, saving "' + key.toString() + '" with value "' + JSON.stringify(def) + '"'); + localStorage.setItem(key.toString(), JSON.stringify(def)); + ret = def; + } + else { + // console.log('localStorage: got "' + key.toString() + '" with value "' + ret + '"'); + ret = JSON.parse(ret); + // console.log('localStorage: loaded "' + key.toString() + '" as value ' + ret + ' of type ' + typeof(ret)); + } + } + catch(error) { + console.log('localStorage: failed to read "' + key.toString() + '", using default: "' + def.toString() + '"'); + ret = def; + } + } + + if(typeof ret === 'undefined' || ret === 'undefined') { + console.log('localStorage: LOADED UNDEFINED "' + key.toString() + '" as value ' + ret + ' of type ' + typeof(ret)); + ret = def; + } + + NETDATA.localStorage.current[key.toString()] = ret; + return ret; + }; + + NETDATA.localStorageSet = function(key, value, callback) { + if(typeof value === 'undefined' || value === 'undefined') { + console.log('localStorage: ATTEMPT TO SET UNDEFINED "' + key.toString() + '" as value ' + value + ' of type ' + typeof(value)); + } + + if(typeof NETDATA.localStorage.default[key.toString()] === 'undefined') { + NETDATA.localStorage.default[key.toString()] = value; + NETDATA.localStorage.current[key.toString()] = value; + NETDATA.localStorage.callback[key.toString()] = callback; + } + + if(NETDATA.localStorageTest() === true) { + // console.log('localStorage: saving "' + key.toString() + '" with value "' + JSON.stringify(value) + '"'); + try { + localStorage.setItem(key.toString(), JSON.stringify(value)); + } + catch(e) { + console.log('localStorage: failed to save "' + key.toString() + '" with value: "' + value.toString() + '"'); + } + } + + NETDATA.localStorage.current[key.toString()] = value; + return value; + }; + + NETDATA.localStorageGetRecursive = function(obj, prefix, callback) { + var keys = Object.keys(obj); + var len = keys.length; + while(len--) { + var i = keys[len]; + + if(typeof obj[i] === 'object') { + //console.log('object ' + prefix + '.' + i.toString()); + NETDATA.localStorageGetRecursive(obj[i], prefix + '.' + i.toString(), callback); + continue; + } + + obj[i] = NETDATA.localStorageGet(prefix + '.' + i.toString(), obj[i], callback); + } + }; + + NETDATA.setOption = function(key, value) { + if(key.toString() === 'setOptionCallback') { + if(typeof NETDATA.options.current.setOptionCallback === 'function') { + NETDATA.options.current[key.toString()] = value; + NETDATA.options.current.setOptionCallback(); + } + } + else if(NETDATA.options.current[key.toString()] !== value) { + var name = 'options.' + key.toString(); + + if(typeof NETDATA.localStorage.default[name.toString()] === 'undefined') + console.log('localStorage: setOption() on unsaved option: "' + name.toString() + '", value: ' + value); + + //console.log(NETDATA.localStorage); + //console.log('setOption: setting "' + key.toString() + '" to "' + value + '" of type ' + typeof(value) + ' original type ' + typeof(NETDATA.options.current[key.toString()])); + //console.log(NETDATA.options); + NETDATA.options.current[key.toString()] = NETDATA.localStorageSet(name.toString(), value, null); + + if(typeof NETDATA.options.current.setOptionCallback === 'function') + NETDATA.options.current.setOptionCallback(); + } + + return true; + }; + + NETDATA.getOption = function(key) { + return NETDATA.options.current[key.toString()]; + }; + + // read settings from local storage + NETDATA.localStorageGetRecursive(NETDATA.options.current, 'options', null); + + // always start with this option enabled. + NETDATA.setOption('stop_updates_when_focus_is_lost', true); + + NETDATA.resetOptions = function() { + var keys = Object.keys(NETDATA.localStorage.default); + var len = keys.length; + while(len--) { + var i = keys[len]; + var a = i.split('.'); + + if(a[0] === 'options') { + if(a[1] === 'setOptionCallback') continue; + if(typeof NETDATA.localStorage.default[i] === 'undefined') continue; + if(NETDATA.options.current[i] === NETDATA.localStorage.default[i]) continue; + + NETDATA.setOption(a[1], NETDATA.localStorage.default[i]); + } + else if(a[0] === 'chart_heights') { + if(typeof NETDATA.localStorage.callback[i] === 'function' && typeof NETDATA.localStorage.default[i] !== 'undefined') { + NETDATA.localStorage.callback[i](NETDATA.localStorage.default[i]); + } + } + } + + NETDATA.dateTime.init(NETDATA.options.current.timezone); + }; + + // ---------------------------------------------------------------------------------------------------------------- + + if(NETDATA.options.debug.main_loop === true) + console.log('welcome to NETDATA'); + + NETDATA.onresizeCallback = null; + NETDATA.onresize = function() { + NETDATA.options.last_page_resize = Date.now(); + NETDATA.onscroll(); + + if(typeof NETDATA.onresizeCallback === 'function') + NETDATA.onresizeCallback(); + }; + + NETDATA.abort_all_refreshes = function() { + var targets = NETDATA.options.targets; + var len = targets.length; + + while (len--) { + if (targets[len].fetching_data === true) { + if (typeof targets[len].xhr !== 'undefined') { + targets[len].xhr.abort(); + targets[len].running = false; + targets[len].fetching_data = false; + } + } + } + }; + + NETDATA.onscroll_start_delay = function() { + NETDATA.options.last_page_scroll = Date.now(); + + NETDATA.options.on_scroll_refresher_stop_until = + NETDATA.options.last_page_scroll + + ((NETDATA.options.current.async_on_scroll === true) ? 1000 : 0); + }; + + NETDATA.onscroll_end_delay = function() { + NETDATA.options.on_scroll_refresher_stop_until = + Date.now() + + ((NETDATA.options.current.async_on_scroll === true) ? NETDATA.options.current.onscroll_worker_duration_threshold : 0); + }; + + NETDATA.onscroll_updater_timeout_id = undefined; + NETDATA.onscroll_updater = function() { + NETDATA.globalSelectionSync.stop(); + + if(NETDATA.options.abort_ajax_on_scroll === true) + NETDATA.abort_all_refreshes(); + + // when the user scrolls he sees that we have + // hidden all the not-visible charts + // using this little function we try to switch + // the charts back to visible quickly + + if(NETDATA.intersectionObserver.enabled() === false) { + if (NETDATA.options.current.parallel_refresher === false) { + var targets = NETDATA.options.targets; + var len = targets.length; + + while (len--) + if (targets[len].running === false) + targets[len].isVisible(); + } + } + + NETDATA.onscroll_end_delay(); + }; + + NETDATA.scrollUp = false; + NETDATA.scrollY = window.scrollY; + NETDATA.onscroll = function() { + //console.log('onscroll() begin'); + + NETDATA.onscroll_start_delay(); + NETDATA.chartRefresherReschedule(); + + NETDATA.scrollUp = (window.scrollY > NETDATA.scrollY); + NETDATA.scrollY = window.scrollY; + + if(NETDATA.onscroll_updater_timeout_id) + NETDATA.timeout.clear(NETDATA.onscroll_updater_timeout_id); + + NETDATA.onscroll_updater_timeout_id = NETDATA.timeout.set(NETDATA.onscroll_updater, 0); + //console.log('onscroll() end'); + }; + + NETDATA.supportsPassiveEvents = function() { + if(NETDATA.options.passive_events === null) { + var supportsPassive = false; + try { + var opts = Object.defineProperty({}, 'passive', { + get: function () { + supportsPassive = true; + } + }); + window.addEventListener("test", null, opts); + } catch (e) { + console.log('browser does not support passive events'); + } + + NETDATA.options.passive_events = supportsPassive; + } + + // console.log('passive ' + NETDATA.options.passive_events); + return NETDATA.options.passive_events; + }; + + window.addEventListener('resize', NETDATA.onresize, NETDATA.supportsPassiveEvents() ? { passive: true } : false); + window.addEventListener('scroll', NETDATA.onscroll, NETDATA.supportsPassiveEvents() ? { passive: true } : false); + // window.onresize = NETDATA.onresize; + // window.onscroll = NETDATA.onscroll; + + // ---------------------------------------------------------------------------------------------------------------- + // Error Handling + + NETDATA.errorCodes = { + 100: { message: "Cannot load chart library", alert: true }, + 101: { message: "Cannot load jQuery", alert: true }, + 402: { message: "Chart library not found", alert: false }, + 403: { message: "Chart library not enabled/is failed", alert: false }, + 404: { message: "Chart not found", alert: false }, + 405: { message: "Cannot download charts index from server", alert: true }, + 406: { message: "Invalid charts index downloaded from server", alert: true }, + 407: { message: "Cannot HELLO netdata server", alert: false }, + 408: { message: "Netdata servers sent invalid response to HELLO", alert: false }, + 409: { message: "Cannot ACCESS netdata registry", alert: false }, + 410: { message: "Netdata registry ACCESS failed", alert: false }, + 411: { message: "Netdata registry server send invalid response to DELETE ", alert: false }, + 412: { message: "Netdata registry DELETE failed", alert: false }, + 413: { message: "Netdata registry server send invalid response to SWITCH ", alert: false }, + 414: { message: "Netdata registry SWITCH failed", alert: false }, + 415: { message: "Netdata alarms download failed", alert: false }, + 416: { message: "Netdata alarms log download failed", alert: false }, + 417: { message: "Netdata registry server send invalid response to SEARCH ", alert: false }, + 418: { message: "Netdata registry SEARCH failed", alert: false } + }; + NETDATA.errorLast = { + code: 0, + message: "", + datetime: 0 + }; + + NETDATA.error = function(code, msg) { + NETDATA.errorLast.code = code; + NETDATA.errorLast.message = msg; + NETDATA.errorLast.datetime = Date.now(); + + console.log("ERROR " + code + ": " + NETDATA.errorCodes[code].message + ": " + msg); + + var ret = true; + if(typeof netdataErrorCallback === 'function') { + ret = netdataErrorCallback('system', code, msg); + } + + if(ret && NETDATA.errorCodes[code].alert) + alert("ERROR " + code + ": " + NETDATA.errorCodes[code].message + ": " + msg); + }; + + NETDATA.errorReset = function() { + NETDATA.errorLast.code = 0; + NETDATA.errorLast.message = "You are doing fine!"; + NETDATA.errorLast.datetime = 0; + }; + + // ---------------------------------------------------------------------------------------------------------------- + // fast numbers formatting + + NETDATA.fastNumberFormat = { + formatters_fixed: [], + formatters_zero_based: [], + + // this is the fastest and the preferred + getIntlNumberFormat: function(min, max) { + var key = max; + if(min === max) { + if(typeof this.formatters_fixed[key] === 'undefined') + this.formatters_fixed[key] = new Intl.NumberFormat(undefined, { + // style: 'decimal', + // minimumIntegerDigits: 1, + // minimumSignificantDigits: 1, + // maximumSignificantDigits: 1, + useGrouping: true, + minimumFractionDigits: min, + maximumFractionDigits: max + }); + + return this.formatters_fixed[key]; + } + else if(min === 0) { + if(typeof this.formatters_zero_based[key] === 'undefined') + this.formatters_zero_based[key] = new Intl.NumberFormat(undefined, { + // style: 'decimal', + // minimumIntegerDigits: 1, + // minimumSignificantDigits: 1, + // maximumSignificantDigits: 1, + useGrouping: true, + minimumFractionDigits: min, + maximumFractionDigits: max + }); + + return this.formatters_zero_based[key]; + } + else { + // this is never used + // it is added just for completeness + return new Intl.NumberFormat(undefined, { + // style: 'decimal', + // minimumIntegerDigits: 1, + // minimumSignificantDigits: 1, + // maximumSignificantDigits: 1, + useGrouping: true, + minimumFractionDigits: min, + maximumFractionDigits: max + }); + } + }, + + // this respects locale + getLocaleString: function(min, max) { + var key = max; + if(min === max) { + if(typeof this.formatters_fixed[key] === 'undefined') + this.formatters_fixed[key] = { + format: function (value) { + return value.toLocaleString(undefined, { + // style: 'decimal', + // minimumIntegerDigits: 1, + // minimumSignificantDigits: 1, + // maximumSignificantDigits: 1, + useGrouping: true, + minimumFractionDigits: min, + maximumFractionDigits: max + }); + } + }; + + return this.formatters_fixed[key]; + } + else if(min === 0) { + if(typeof this.formatters_zero_based[key] === 'undefined') + this.formatters_zero_based[key] = { + format: function (value) { + return value.toLocaleString(undefined, { + // style: 'decimal', + // minimumIntegerDigits: 1, + // minimumSignificantDigits: 1, + // maximumSignificantDigits: 1, + useGrouping: true, + minimumFractionDigits: min, + maximumFractionDigits: max + }); + } + }; + + return this.formatters_zero_based[key]; + } + else { + return { + format: function (value) { + return value.toLocaleString(undefined, { + // style: 'decimal', + // minimumIntegerDigits: 1, + // minimumSignificantDigits: 1, + // maximumSignificantDigits: 1, + useGrouping: true, + minimumFractionDigits: min, + maximumFractionDigits: max + }); + } + }; + } + }, + + // the fallback + getFixed: function(min, max) { + var key = max; + if(min === max) { + if(typeof this.formatters_fixed[key] === 'undefined') + this.formatters_fixed[key] = { + format: function (value) { + if(value === 0) return "0"; + return value.toFixed(max); + } + }; + + return this.formatters_fixed[key]; + } + else if(min === 0) { + if(typeof this.formatters_zero_based[key] === 'undefined') + this.formatters_zero_based[key] = { + format: function (value) { + if(value === 0) return "0"; + return value.toFixed(max); + } + }; + + return this.formatters_zero_based[key]; + } + else { + return { + format: function (value) { + if(value === 0) return "0"; + return value.toFixed(max); + } + }; + } + }, + + testIntlNumberFormat: function() { + var value = 1.12345; + var e1 = "1.12", e2 = "1,12"; + var s = ""; + + try { + var x = new Intl.NumberFormat(undefined, { + useGrouping: true, + minimumFractionDigits: 2, + maximumFractionDigits: 2 + }); + + s = x.format(value); + } + catch(e) { + s = ""; + } + + // console.log('NumberFormat: ', s); + return (s === e1 || s === e2); + }, + + testLocaleString: function() { + var value = 1.12345; + var e1 = "1.12", e2 = "1,12"; + var s = ""; + + try { + s = value.toLocaleString(undefined, { + useGrouping: true, + minimumFractionDigits: 2, + maximumFractionDigits: 2 + }); + } + catch(e) { + s = ""; + } + + // console.log('localeString: ', s); + return (s === e1 || s === e2); + }, + + // on first run we decide which formatter to use + get: function(min, max) { + if(this.testIntlNumberFormat()) { + // console.log('numberformat'); + this.get = this.getIntlNumberFormat; + } + else if(this.testLocaleString()) { + // console.log('localestring'); + this.get = this.getLocaleString; + } + else { + // console.log('fixed'); + this.get = this.getFixed; + } + return this.get(min, max); + } + }; + + // ---------------------------------------------------------------------------------------------------------------- + // element data attributes + + NETDATA.dataAttribute = function(element, attribute, def) { + var key = 'data-' + attribute.toString(); + if(element.hasAttribute(key) === true) { + var data = element.getAttribute(key); + + if(data === 'true') return true; + if(data === 'false') return false; + if(data === 'null') return null; + + // Only convert to a number if it doesn't change the string + if(data === +data + '') return +data; + + if(/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/.test(data)) + return JSON.parse(data); + + return data; + } + else return def; + }; + + NETDATA.dataAttributeBoolean = function(element, attribute, def) { + var value = NETDATA.dataAttribute(element, attribute, def); + + if(value === true || value === false) + return value; + + if(typeof(value) === 'string') { + if(value === 'yes' || value === 'on') + return true; + + if(value === '' || value === 'no' || value === 'off' || value === 'null') + return false; + + return def; + } + + if(typeof(value) === 'number') + return value !== 0; + + return def; + }; + + // ---------------------------------------------------------------------------------------------------------------- + // commonMin & commonMax + + NETDATA.commonMin = { + keys: {}, + latest: {}, + + globalReset: function() { + this.keys = {}; + this.latest = {}; + }, + + get: function(state) { + if(typeof state.tmp.__commonMin === 'undefined') { + // get the commonMin setting + state.tmp.__commonMin = NETDATA.dataAttribute(state.element, 'common-min', null); + } + + var min = state.data.min; + var name = state.tmp.__commonMin; + + if(name === null) { + // we don't need commonMin + //state.log('no need for commonMin'); + return min; + } + + var t = this.keys[name]; + if(typeof t === 'undefined') { + // add our commonMin + this.keys[name] = {}; + t = this.keys[name]; + } + + var uuid = state.uuid; + if(typeof t[uuid] !== 'undefined') { + if(t[uuid] === min) { + //state.log('commonMin ' + state.tmp.__commonMin + ' not changed: ' + this.latest[name]); + return this.latest[name]; + } + else if(min < this.latest[name]) { + //state.log('commonMin ' + state.tmp.__commonMin + ' increased: ' + min); + t[uuid] = min; + this.latest[name] = min; + return min; + } + } + + // add our min + t[uuid] = min; + + // find the common min + var m = min; + for(var i in t) + if(t.hasOwnProperty(i) && t[i] < m) m = t[i]; + + //state.log('commonMin ' + state.tmp.__commonMin + ' updated: ' + m); + this.latest[name] = m; + return m; + } + }; + + NETDATA.commonMax = { + keys: {}, + latest: {}, + + globalReset: function() { + this.keys = {}; + this.latest = {}; + }, + + get: function(state) { + if(typeof state.tmp.__commonMax === 'undefined') { + // get the commonMax setting + state.tmp.__commonMax = NETDATA.dataAttribute(state.element, 'common-max', null); + } + + var max = state.data.max; + var name = state.tmp.__commonMax; + + if(name === null) { + // we don't need commonMax + //state.log('no need for commonMax'); + return max; + } + + var t = this.keys[name]; + if(typeof t === 'undefined') { + // add our commonMax + this.keys[name] = {}; + t = this.keys[name]; + } + + var uuid = state.uuid; + if(typeof t[uuid] !== 'undefined') { + if(t[uuid] === max) { + //state.log('commonMax ' + state.tmp.__commonMax + ' not changed: ' + this.latest[name]); + return this.latest[name]; + } + else if(max > this.latest[name]) { + //state.log('commonMax ' + state.tmp.__commonMax + ' increased: ' + max); + t[uuid] = max; + this.latest[name] = max; + return max; + } + } + + // add our max + t[uuid] = max; + + // find the common max + var m = max; + for(var i in t) + if(t.hasOwnProperty(i) && t[i] > m) m = t[i]; + + //state.log('commonMax ' + state.tmp.__commonMax + ' updated: ' + m); + this.latest[name] = m; + return m; + } + }; + + NETDATA.commonColors = { + keys: {}, + + globalReset: function() { + this.keys = {}; + }, + + get: function(state, label) { + var ret = this.refill(state); + + if(typeof ret.assigned[label] === 'undefined') + ret.assigned[label] = ret.available.shift(); + + return ret.assigned[label]; + }, + + refill: function(state) { + var ret, len; + + if(typeof state.tmp.__commonColors === 'undefined') + ret = this.prepare(state); + else { + ret = this.keys[state.tmp.__commonColors]; + if(typeof ret === 'undefined') + ret = this.prepare(state); + } + + if(ret.available.length === 0) { + if(ret.copy_theme === true || ret.custom.length === 0) { + // copy the theme colors + len = NETDATA.themes.current.colors.length; + while (len--) + ret.available.unshift(NETDATA.themes.current.colors[len]); + } + + // copy the custom colors + len = ret.custom.length; + while (len--) + ret.available.unshift(ret.custom[len]); + } + + state.colors_assigned = ret.assigned; + state.colors_available = ret.available; + state.colors_custom = ret.custom; + + return ret; + }, + + __read_custom_colors: function(state, ret) { + // add the user supplied colors + var c = NETDATA.dataAttribute(state.element, 'colors', undefined); + if (typeof c === 'string' && c.length > 0) { + c = c.split(' '); + var len = c.length; + + if (len > 0 && c[len - 1] === 'ONLY') { + len--; + ret.copy_theme = false; + } + + while (len--) + ret.custom.unshift(c[len]); + } + }, + + prepare: function(state) { + var has_custom_colors = false; + + if(typeof state.tmp.__commonColors === 'undefined') { + var defname = state.chart.context; + + // if this chart has data-colors="" + // we should use the chart uuid as the default key (private palette) + // (data-common-colors="NAME" will be used anyways) + var c = NETDATA.dataAttribute(state.element, 'colors', undefined); + if (typeof c === 'string' && c.length > 0) { + defname = state.uuid; + has_custom_colors = true; + } + + // get the commonColors setting + state.tmp.__commonColors = NETDATA.dataAttribute(state.element, 'common-colors', defname); + } + + var name = state.tmp.__commonColors; + var ret = this.keys[name]; + + if(typeof ret === 'undefined') { + // add our commonMax + this.keys[name] = { + assigned: {}, // name-value of dimensions and their colors + available: [], // an array of colors available to be used + custom: [], // the array of colors defined by the user + charts: {}, // the charts linked to this + copy_theme: true + }; + ret = this.keys[name]; + } + + if(typeof ret.charts[state.uuid] === 'undefined') { + ret.charts[state.uuid] = state; + + if(has_custom_colors === true) + this.__read_custom_colors(state, ret); + } + + return ret; + } + }; + + // ---------------------------------------------------------------------------------------------------------------- + // Chart Registry + + // When multiple charts need the same chart, we avoid downloading it + // multiple times (and having it in browser memory multiple time) + // by using this registry. + + // Every time we download a chart definition, we save it here with .add() + // Then we try to get it back with .get(). If that fails, we download it. + + NETDATA.fixHost = function(host) { + while(host.slice(-1) === '/') + host = host.substring(0, host.length - 1); + + return host; + }; + + NETDATA.chartRegistry = { + charts: {}, + + globalReset: function() { + this.charts = {}; + }, + + add: function(host, id, data) { + if(typeof this.charts[host] === 'undefined') + this.charts[host] = {}; + + //console.log('added ' + host + '/' + id); + this.charts[host][id] = data; + }, + + get: function(host, id) { + if(typeof this.charts[host] === 'undefined') + return null; + + if(typeof this.charts[host][id] === 'undefined') + return null; + + //console.log('cached ' + host + '/' + id); + return this.charts[host][id]; + }, + + downloadAll: function(host, callback) { + host = NETDATA.fixHost(host); + + var self = this; + + function got_data(h, data, callback) { + if(data !== null) { + self.charts[h] = data.charts; + + // update the server timezone in our options + if(typeof data.timezone === 'string') + NETDATA.options.server_timezone = data.timezone; + } + else NETDATA.error(406, h + '/api/v1/charts'); + + if(typeof callback === 'function') + callback(data); + } + + if(netdataSnapshotData !== null) { + got_data(host, netdataSnapshotData.charts, callback); + } + else { + $.ajax({ + url: host + '/api/v1/charts', + async: true, + cache: false, + xhrFields: {withCredentials: true} // required for the cookie + }) + .done(function (data) { + data = NETDATA.xss.checkOptional('/api/v1/charts', data); + got_data(host, data, callback); + }) + .fail(function () { + NETDATA.error(405, host + '/api/v1/charts'); + + if (typeof callback === 'function') + callback(null); + }); + } + } + }; + + // ---------------------------------------------------------------------------------------------------------------- + // Global Pan and Zoom on charts + + // Using this structure are synchronize all the charts, so that + // when you pan or zoom one, all others are automatically refreshed + // to the same timespan. + + NETDATA.globalPanAndZoom = { + seq: 0, // timestamp ms + // every time a chart is panned or zoomed + // we set the timestamp here + // then we use it as a sequence number + // to find if other charts are synchronized + // to this time-range + + master: null, // the master chart (state), to which all others + // are synchronized + + force_before_ms: null, // the timespan to sync all other charts + force_after_ms: null, + + callback: null, + + globalReset: function() { + this.clearMaster(); + this.seq = 0; + this.master = null; + this.force_after_ms = null; + this.force_before_ms = null; + this.callback = null; + }, + + delay: function() { + if(NETDATA.options.debug.globalPanAndZoom === true) + console.log('globalPanAndZoom.delay()'); + + NETDATA.options.auto_refresher_stop_until = Date.now() + NETDATA.options.current.global_pan_sync_time; + }, + + // set a new master + setMaster: function(state, after, before) { + this.delay(); + + if(NETDATA.options.current.sync_pan_and_zoom === false) + return; + + if(this.master === null) { + if(NETDATA.options.debug.globalPanAndZoom === true) + console.log('globalPanAndZoom.setMaster(' + state.id + ', ' + after + ', ' + before + ') SET MASTER'); + } + else if(this.master !== state) { + if(NETDATA.options.debug.globalPanAndZoom === true) + console.log('globalPanAndZoom.setMaster(' + state.id + ', ' + after + ', ' + before + ') CHANGED MASTER'); + + this.master.resetChart(true, true); + } + + var now = Date.now(); + this.master = state; + this.seq = now; + this.force_after_ms = after; + this.force_before_ms = before; + + if(typeof this.callback === 'function') + this.callback(true, after, before); + }, + + // clear the master + clearMaster: function() { + if(NETDATA.options.debug.globalPanAndZoom === true) + console.log('globalPanAndZoom.clearMaster()'); + + if(this.master !== null) { + var st = this.master; + this.master = null; + st.resetChart(); + } + + this.master = null; + this.seq = 0; + this.force_after_ms = null; + this.force_before_ms = null; + NETDATA.options.auto_refresher_stop_until = 0; + + if(typeof this.callback === 'function') + this.callback(false, 0, 0); + }, + + // is the given state the master of the global + // pan and zoom sync? + isMaster: function(state) { + return (this.master === state); + }, + + // are we currently have a global pan and zoom sync? + isActive: function() { + return (this.master !== null && this.force_before_ms !== null && this.force_after_ms !== null && this.seq !== 0); + }, + + // check if a chart, other than the master + // needs to be refreshed, due to the global pan and zoom + shouldBeAutoRefreshed: function(state) { + if(this.master === null || this.seq === 0) + return false; + + //if(state.needsRecreation()) + // return true; + + return (state.tm.pan_and_zoom_seq !== this.seq); + } + }; + + // ---------------------------------------------------------------------------------------------------------------- + // global chart underlay (time-frame highlighting) + + NETDATA.globalChartUnderlay = { + callback: null, // what to call when a highlighted range is setup + after: null, // highlight after this time + before: null, // highlight before this time + view_after: null, // the charts after_ms viewport when the highlight was setup + view_before: null, // the charts before_ms viewport, when the highlight was setup + state: null, // the chart the highlight was setup + + isActive: function() { + return (this.after !== null && this.before !== null); + }, + + hasViewport: function() { + return (this.state !== null && this.view_after !== null && this.view_before !== null); + }, + + init: function(state, after, before, view_after, view_before) { + this.state = (typeof state !== 'undefined') ? state : null; + this.after = (typeof after !== 'undefined' && after !== null && after > 0) ? after : null; + this.before = (typeof before !== 'undefined' && before !== null && before > 0) ? before : null; + this.view_after = (typeof view_after !== 'undefined' && view_after !== null && view_after > 0) ? view_after : null; + this.view_before = (typeof view_before !== 'undefined' && view_before !== null && view_before > 0) ? view_before : null; + }, + + setup: function() { + if(this.isActive() === true) { + if (this.state === null) + this.state = NETDATA.options.targets[0]; + + if (typeof this.callback === 'function') + this.callback(true, this.after, this.before); + } + else { + if (typeof this.callback === 'function') + this.callback(false, 0, 0); + } + }, + + set: function(state, after, before, view_after, view_before) { + if(after > before) { + var t = after; + after = before; + before = t; + } + + this.init(state, after, before, view_after, view_before); + + if (this.hasViewport() === true) + NETDATA.globalPanAndZoom.setMaster(this.state, this.view_after, this.view_before); + + this.setup(); + }, + + clear: function() { + this.after = null; + this.before = null; + this.state = null; + this.view_after = null; + this.view_before = null; + + if(typeof this.callback === 'function') + this.callback(false, 0, 0); + }, + + focus: function() { + if(this.isActive() === true && this.hasViewport() === true) { + if(this.state === null) + this.state = NETDATA.options.targets[0]; + + if(NETDATA.globalPanAndZoom.isMaster(this.state) === true) + NETDATA.globalPanAndZoom.clearMaster(); + + NETDATA.globalPanAndZoom.setMaster(this.state, this.view_after, this.view_before, true); + } + } + }; + + // ---------------------------------------------------------------------------------------------------------------- + // dimensions selection + + // TODO + // move color assignment to dimensions, here + + var dimensionStatus = function(parent, label, name_div, value_div, color) { + this.enabled = false; + this.parent = parent; + this.label = label; + this.name_div = null; + this.value_div = null; + this.color = NETDATA.themes.current.foreground; + this.selected = (parent.unselected_count === 0); + + this.setOptions(name_div, value_div, color); + }; + + dimensionStatus.prototype.invalidate = function() { + this.name_div = null; + this.value_div = null; + this.enabled = false; + }; + + dimensionStatus.prototype.setOptions = function(name_div, value_div, color) { + this.color = color; + + if(this.name_div !== name_div) { + this.name_div = name_div; + this.name_div.title = this.label; + this.name_div.style.setProperty('color', this.color, 'important'); + if(this.selected === false) + this.name_div.className = 'netdata-legend-name not-selected'; + else + this.name_div.className = 'netdata-legend-name selected'; + } + + if(this.value_div !== value_div) { + this.value_div = value_div; + this.value_div.title = this.label; + this.value_div.style.setProperty('color', this.color, 'important'); + if(this.selected === false) + this.value_div.className = 'netdata-legend-value not-selected'; + else + this.value_div.className = 'netdata-legend-value selected'; + } + + this.enabled = true; + this.setHandler(); + }; + + dimensionStatus.prototype.setHandler = function() { + if(this.enabled === false) return; + + var ds = this; + + // this.name_div.onmousedown = this.value_div.onmousedown = function(e) { + this.name_div.onclick = this.value_div.onclick = function(e) { + e.preventDefault(); + if(ds.isSelected()) { + // this is selected + if(e.shiftKey === true || e.ctrlKey === true) { + // control or shift key is pressed -> unselect this (except is none will remain selected, in which case select all) + ds.unselect(); + + if(ds.parent.countSelected() === 0) + ds.parent.selectAll(); + } + else { + // no key is pressed -> select only this (except if it is the only selected already, in which case select all) + if(ds.parent.countSelected() === 1) { + ds.parent.selectAll(); + } + else { + ds.parent.selectNone(); + ds.select(); + } + } + } + else { + // this is not selected + if(e.shiftKey === true || e.ctrlKey === true) { + // control or shift key is pressed -> select this too + ds.select(); + } + else { + // no key is pressed -> select only this + ds.parent.selectNone(); + ds.select(); + } + } + + ds.parent.state.redrawChart(); + } + }; + + dimensionStatus.prototype.select = function() { + if(this.enabled === false) return; + + this.name_div.className = 'netdata-legend-name selected'; + this.value_div.className = 'netdata-legend-value selected'; + this.selected = true; + }; + + dimensionStatus.prototype.unselect = function() { + if(this.enabled === false) return; + + this.name_div.className = 'netdata-legend-name not-selected'; + this.value_div.className = 'netdata-legend-value hidden'; + this.selected = false; + }; + + dimensionStatus.prototype.isSelected = function() { + return(this.enabled === true && this.selected === true); + }; + + // ---------------------------------------------------------------------------------------------------------------- + + var dimensionsVisibility = function(state) { + this.state = state; + this.len = 0; + this.dimensions = {}; + this.selected_count = 0; + this.unselected_count = 0; + }; + + dimensionsVisibility.prototype.dimensionAdd = function(label, name_div, value_div, color) { + if(typeof this.dimensions[label] === 'undefined') { + this.len++; + this.dimensions[label] = new dimensionStatus(this, label, name_div, value_div, color); + } + else + this.dimensions[label].setOptions(name_div, value_div, color); + + return this.dimensions[label]; + }; + + dimensionsVisibility.prototype.dimensionGet = function(label) { + return this.dimensions[label]; + }; + + dimensionsVisibility.prototype.invalidateAll = function() { + var keys = Object.keys(this.dimensions); + var len = keys.length; + while(len--) + this.dimensions[keys[len]].invalidate(); + }; + + dimensionsVisibility.prototype.selectAll = function() { + var keys = Object.keys(this.dimensions); + var len = keys.length; + while(len--) + this.dimensions[keys[len]].select(); + }; + + dimensionsVisibility.prototype.countSelected = function() { + var selected = 0; + var keys = Object.keys(this.dimensions); + var len = keys.length; + while(len--) + if(this.dimensions[keys[len]].isSelected()) selected++; + + return selected; + }; + + dimensionsVisibility.prototype.selectNone = function() { + var keys = Object.keys(this.dimensions); + var len = keys.length; + while(len--) + this.dimensions[keys[len]].unselect(); + }; + + dimensionsVisibility.prototype.selected2BooleanArray = function(array) { + var ret = []; + this.selected_count = 0; + this.unselected_count = 0; + + var len = array.length; + while(len--) { + var ds = this.dimensions[array[len]]; + if(typeof ds === 'undefined') { + // console.log(array[i] + ' is not found'); + ret.unshift(false); + } + else if(ds.isSelected()) { + ret.unshift(true); + this.selected_count++; + } + else { + ret.unshift(false); + this.unselected_count++; + } + } + + if(this.selected_count === 0 && this.unselected_count !== 0) { + this.selectAll(); + return this.selected2BooleanArray(array); + } + + return ret; + }; + + + // ---------------------------------------------------------------------------------------------------------------- + // date/time conversion + + NETDATA.dateTime = { + using_timezone: false, + + // these are the old netdata functions + // we fallback to these, if the new ones fail + + localeDateStringNative: function(d) { + return d.toLocaleDateString(); + }, + + localeTimeStringNative: function(d) { + return d.toLocaleTimeString(); + }, + + xAxisTimeStringNative: function(d) { + return NETDATA.zeropad(d.getHours()) + ":" + + NETDATA.zeropad(d.getMinutes()) + ":" + + NETDATA.zeropad(d.getSeconds()); + }, + + // initialize the new date/time conversion + // functions. + // if this fails, we fallback to the above + init: function(timezone) { + //console.log('init with timezone: ' + timezone); + + // detect browser timezone + try { + NETDATA.options.browser_timezone = Intl.DateTimeFormat().resolvedOptions().timeZone; + } + catch(e) { + console.log('failed to detect browser timezone: ' + e.toString()); + NETDATA.options.browser_timezone = 'cannot-detect-it'; + } + + var ret = false; + + try { + var dateOptions ={ + localeMatcher: 'best fit', + formatMatcher: 'best fit', + weekday: 'short', + year: 'numeric', + month: 'short', + day: '2-digit' + }; + + var timeOptions = { + localeMatcher: 'best fit', + hour12: false, + formatMatcher: 'best fit', + hour: '2-digit', + minute: '2-digit', + second: '2-digit' + }; + + var xAxisOptions = { + localeMatcher: 'best fit', + hour12: false, + formatMatcher: 'best fit', + hour: '2-digit', + minute: '2-digit', + second: '2-digit' + }; + + if(typeof timezone === 'string' && timezone !== '' && timezone !== 'default') { + dateOptions.timeZone = timezone; + timeOptions.timeZone = timezone; + timeOptions.timeZoneName = 'short'; + xAxisOptions.timeZone = timezone; + this.using_timezone = true; + } + else { + timezone = 'default'; + this.using_timezone = false; + } + + this.dateFormat = new Intl.DateTimeFormat(navigator.language, dateOptions); + this.timeFormat = new Intl.DateTimeFormat(navigator.language, timeOptions); + this.xAxisFormat = new Intl.DateTimeFormat(navigator.language, xAxisOptions); + + this.localeDateString = function(d) { + return this.dateFormat.format(d); + }; + + this.localeTimeString = function(d) { + return this.timeFormat.format(d); + }; + + this.xAxisTimeString = function(d) { + return this.xAxisFormat.format(d); + }; + + //var d = new Date(); + //var t = this.dateFormat.format(d) + ' ' + this.timeFormat.format(d) + ' ' + this.xAxisFormat.format(d); + + ret = true; + } + catch(e) { + console.log('Cannot setup Date/Time formatting: ' + e.toString()); + + timezone = 'default'; + this.localeDateString = this.localeDateStringNative; + this.localeTimeString = this.localeTimeStringNative; + this.xAxisTimeString = this.xAxisTimeStringNative; + this.using_timezone = false; + + ret = false; + } + + // save it + //console.log('init setOption timezone: ' + timezone); + NETDATA.setOption('timezone', timezone); + + return ret; + } + }; + NETDATA.dateTime.init(NETDATA.options.current.timezone); + + + // ---------------------------------------------------------------------------------------------------------------- + // units conversion + + NETDATA.unitsConversion = { + keys: {}, // keys for data-common-units + latest: {}, // latest selected units for data-common-units + + globalReset: function() { + this.keys = {}; + this.latest = {}; + }, + + scalableUnits: { + 'packets/s': { + 'pps': 1, + 'Kpps': 1000, + 'Mpps': 1000000 + }, + 'pps': { + 'pps': 1, + 'Kpps': 1000, + 'Mpps': 1000000 + }, + 'kilobits/s': { + 'bits/s': 1 / 1000, + 'kilobits/s': 1, + 'megabits/s': 1000, + 'gigabits/s': 1000000, + 'terabits/s': 1000000000 + }, + 'kilobytes/s': { + 'bytes/s': 1 / 1024, + 'kilobytes/s': 1, + 'megabytes/s': 1024, + 'gigabytes/s': 1024 * 1024, + 'terabytes/s': 1024 * 1024 * 1024 + }, + 'KB/s': { + 'B/s': 1 / 1024, + 'KB/s': 1, + 'MB/s': 1024, + 'GB/s': 1024 * 1024, + 'TB/s': 1024 * 1024 * 1024 + }, + 'KB': { + 'B': 1 / 1024, + 'KB': 1, + 'MB': 1024, + 'GB': 1024 * 1024, + 'TB': 1024 * 1024 * 1024 + }, + 'MB': { + 'B': 1 / (1024 * 1024), + 'KB': 1 / 1024, + 'MB': 1, + 'GB': 1024, + 'TB': 1024 * 1024, + 'PB': 1024 * 1024 * 1024 + }, + 'GB': { + 'B': 1 / (1024 * 1024 * 1024), + 'KB': 1 / (1024 * 1024), + 'MB': 1 / 1024, + 'GB': 1, + 'TB': 1024, + 'PB': 1024 * 1024, + 'EB': 1024 * 1024 * 1024 + } + /* + 'milliseconds': { + 'seconds': 1000 + }, + 'seconds': { + 'milliseconds': 0.001, + 'seconds': 1, + 'minutes': 60, + 'hours': 3600, + 'days': 86400 + } + */ + }, + + convertibleUnits: { + 'Celsius': { + 'Fahrenheit': { + check: function(max) { void(max); return NETDATA.options.current.temperature === 'fahrenheit'; }, + convert: function(value) { return value * 9 / 5 + 32; } + } + }, + 'celsius': { + 'fahrenheit': { + check: function(max) { void(max); return NETDATA.options.current.temperature === 'fahrenheit'; }, + convert: function(value) { return value * 9 / 5 + 32; } + } + }, + 'seconds': { + 'time': { + check: function (max) { void(max); return NETDATA.options.current.seconds_as_time; }, + convert: function(seconds) { return NETDATA.unitsConversion.seconds2time(seconds); } + } + }, + 'milliseconds': { + 'milliseconds': { + check: function (max) { return NETDATA.options.current.seconds_as_time && max < 1000; }, + convert: function(milliseconds) { + var tms = Math.round(milliseconds * 10); + milliseconds = Math.floor(tms / 10); + + tms -= milliseconds * 10; + + return (milliseconds).toString() + '.' + tms.toString(); + } + }, + 'seconds': { + check: function (max) { return NETDATA.options.current.seconds_as_time && max >= 1000 && max < 60000; }, + convert: function(milliseconds) { + milliseconds = Math.round(milliseconds); + + var seconds = Math.floor(milliseconds / 1000); + milliseconds -= seconds * 1000; + + milliseconds = Math.round(milliseconds / 10); + + return seconds.toString() + '.' + + NETDATA.zeropad(milliseconds); + } + }, + 'M:SS.ms': { + check: function (max) { return NETDATA.options.current.seconds_as_time && max >= 60000; }, + convert: function(milliseconds) { + milliseconds = Math.round(milliseconds); + + var minutes = Math.floor(milliseconds / 60000); + milliseconds -= minutes * 60000; + + var seconds = Math.floor(milliseconds / 1000); + milliseconds -= seconds * 1000; + + milliseconds = Math.round(milliseconds / 10); + + return minutes.toString() + ':' + + NETDATA.zeropad(seconds) + '.' + + NETDATA.zeropad(milliseconds); + } + } + } + }, + + seconds2time: function(seconds) { + seconds = Math.abs(seconds); + + var days = Math.floor(seconds / 86400); + seconds -= days * 86400; + + var hours = Math.floor(seconds / 3600); + seconds -= hours * 3600; + + var minutes = Math.floor(seconds / 60); + seconds -= minutes * 60; + + seconds = Math.round(seconds); + + var ms_txt = ''; + /* + var ms = seconds - Math.floor(seconds); + seconds -= ms; + ms = Math.round(ms * 1000); + + if(ms > 1) { + if(ms < 10) + ms_txt = '.00' + ms.toString(); + else if(ms < 100) + ms_txt = '.0' + ms.toString(); + else + ms_txt = '.' + ms.toString(); + } + */ + + return ((days > 0)?days.toString() + 'd:':'').toString() + + NETDATA.zeropad(hours) + ':' + + NETDATA.zeropad(minutes) + ':' + + NETDATA.zeropad(seconds) + + ms_txt; + }, + + // get a function that converts the units + // + every time units are switched call the callback + get: function(uuid, min, max, units, desired_units, common_units_name, switch_units_callback) { + // validate the parameters + if(typeof units === 'undefined') + units = 'undefined'; + + // check if we support units conversion + if(typeof this.scalableUnits[units] === 'undefined' && typeof this.convertibleUnits[units] === 'undefined') { + // we can't convert these units + //console.log('DEBUG: ' + uuid.toString() + ' can\'t convert units: ' + units.toString()); + return function(value) { return value; }; + } + + // check if the caller wants the original units + if(typeof desired_units === 'undefined' || desired_units === null || desired_units === 'original' || desired_units === units) { + //console.log('DEBUG: ' + uuid.toString() + ' original units wanted'); + switch_units_callback(units); + return function(value) { return value; }; + } + + // now we know we can convert the units + // and the caller wants some kind of conversion + + var tunits = null; + var tdivider = 0; + var x; + + if(typeof this.scalableUnits[units] !== 'undefined') { + // units that can be scaled + // we decide a divider + + // console.log('NETDATA.unitsConversion.get(' + units.toString() + ', ' + desired_units.toString() + ', function()) decide divider with min = ' + min.toString() + ', max = ' + max.toString()); + + if (desired_units === 'auto') { + // the caller wants to auto-scale the units + + // find the absolute maximum value that is rendered on the chart + // based on this we decide the scale + min = Math.abs(min); + max = Math.abs(max); + if (min > max) max = min; + + // find the smallest scale that provides integers + for (x in this.scalableUnits[units]) { + if (this.scalableUnits[units].hasOwnProperty(x)) { + var m = this.scalableUnits[units][x]; + if (m <= max && m > tdivider) { + tunits = x; + tdivider = m; + } + } + } + + if(tunits === null || tdivider <= 0) { + // we couldn't find one + //console.log('DEBUG: ' + uuid.toString() + ' cannot find an auto-scaling candidate for units: ' + units.toString() + ' (max: ' + max.toString() + ')'); + switch_units_callback(units); + return function(value) { return value; }; + } + + if(typeof common_units_name === 'string' && typeof uuid === 'string') { + // the caller wants several charts to have the same units + // data-common-units + + var common_units_key = common_units_name + '-' + units; + + // add our divider into the list of keys + var t = this.keys[common_units_key]; + if(typeof t === 'undefined') { + this.keys[common_units_key] = {}; + t = this.keys[common_units_key]; + } + t[uuid] = { + units: tunits, + divider: tdivider + }; + + // find the max divider of all charts + var common_units = t[uuid]; + for(x in t) { + if (t.hasOwnProperty(x) && t[x].divider > common_units.divider) + common_units = t[x]; + } + + // save our common_max to the latest keys + var latest = this.latest[common_units_key]; + if(typeof latest === 'undefined') { + this.latest[common_units_key] = {}; + latest = this.latest[common_units_key]; + } + latest.units = common_units.units; + latest.divider = common_units.divider; + + tunits = latest.units; + tdivider = latest.divider; + + //console.log('DEBUG: ' + uuid.toString() + ' converted units: ' + units.toString() + ' to units: ' + tunits.toString() + ' with divider ' + tdivider.toString() + ', common-units=' + common_units_name.toString() + ((t[uuid].divider !== tdivider)?' USED COMMON, mine was ' + t[uuid].units:' set common').toString()); + + // apply it to this chart + switch_units_callback(tunits); + return function(value) { + if(tdivider !== latest.divider) { + // another chart switched our common units + // we should switch them too + //console.log('DEBUG: ' + uuid + ' switching units due to a common-units change, from ' + tunits.toString() + ' to ' + latest.units.toString()); + tunits = latest.units; + tdivider = latest.divider; + switch_units_callback(tunits); + } + + return value / tdivider; + }; + } + else { + // the caller did not give data-common-units + // this chart auto-scales independently of all others + //console.log('DEBUG: ' + uuid.toString() + ' converted units: ' + units.toString() + ' to units: ' + tunits.toString() + ' with divider ' + tdivider.toString() + ', autonomously'); + + switch_units_callback(tunits); + return function (value) { return value / tdivider; }; + } + } + else { + // the caller wants specific units + + if(typeof this.scalableUnits[units][desired_units] !== 'undefined') { + // all good, set the new units + tdivider = this.scalableUnits[units][desired_units]; + // console.log('DEBUG: ' + uuid.toString() + ' converted units: ' + units.toString() + ' to units: ' + desired_units.toString() + ' with divider ' + tdivider.toString() + ', by reference'); + switch_units_callback(desired_units); + return function (value) { return value / tdivider; }; + } + else { + // oops! switch back to original units + console.log('Units conversion from ' + units.toString() + ' to ' + desired_units.toString() + ' is not supported.'); + switch_units_callback(units); + return function (value) { return value; }; + } + } + } + else if(typeof this.convertibleUnits[units] !== 'undefined') { + // units that can be converted + if(desired_units === 'auto') { + for(x in this.convertibleUnits[units]) { + if (this.convertibleUnits[units].hasOwnProperty(x)) { + if (this.convertibleUnits[units][x].check(max)) { + //console.log('DEBUG: ' + uuid.toString() + ' converting ' + units.toString() + ' to: ' + x.toString()); + switch_units_callback(x); + return this.convertibleUnits[units][x].convert; + } + } + } + + // none checked ok + //console.log('DEBUG: ' + uuid.toString() + ' no conversion available for ' + units.toString() + ' to: ' + desired_units.toString()); + switch_units_callback(units); + return function (value) { return value; }; + } + else if(typeof this.convertibleUnits[units][desired_units] !== 'undefined') { + switch_units_callback(desired_units); + return this.convertibleUnits[units][desired_units].convert; + } + else { + console.log('Units conversion from ' + units.toString() + ' to ' + desired_units.toString() + ' is not supported.'); + switch_units_callback(units); + return function (value) { return value; }; + } + } + else { + // hm... did we forget to implement the new type? + console.log('Unmatched unit conversion method for units ' + units.toString()); + switch_units_callback(units); + return function (value) { return value; }; + } + } + }; + + // ---------------------------------------------------------------------------------------------------------------- + // global selection sync + + NETDATA.globalSelectionSync = { + state: null, + dont_sync_before: 0, + last_t: 0, + slaves: [], + timeout_id: undefined, + + globalReset: function() { + this.stop(); + this.state = null; + this.dont_sync_before = 0; + this.last_t = 0; + this.slaves = []; + this.timeout_id = undefined; + }, + + active: function() { + return (this.state !== null); + }, + + // return true if global selection sync can be enabled now + enabled: function() { + // console.log('enabled()'); + // can we globally apply selection sync? + if(NETDATA.options.current.sync_selection === false) + return false; + + return (this.dont_sync_before <= Date.now()); + }, + + // set the global selection sync master + setMaster: function(state) { + if(this.enabled() === false) { + this.stop(); + return; + } + + if(this.state === state) + return; + + if(this.state !== null) + this.stop(); + + if(NETDATA.options.debug.globalSelectionSync === true) + console.log('globalSelectionSync.setMaster(' + state.id + ')'); + + state.selected = true; + this.state = state; + this.last_t = 0; + + // find all slaves + var targets = NETDATA.intersectionObserver.targets(); + this.slaves = []; + var len = targets.length; + while(len--) { + var st = targets[len]; + if (this.state !== st && st.globalSelectionSyncIsEligible() === true) + this.slaves.push(st); + } + + // this.delay(100); + }, + + // stop global selection sync + stop: function() { + if(this.state !== null) { + if(NETDATA.options.debug.globalSelectionSync === true) + console.log('globalSelectionSync.stop()'); + + var len = this.slaves.length; + while (len--) + this.slaves[len].clearSelection(); + + this.state.clearSelection(); + + this.last_t = 0; + this.slaves = []; + this.state = null; + } + }, + + // delay global selection sync for some time + delay: function(ms) { + if(NETDATA.options.current.sync_selection === true) { + if(NETDATA.options.debug.globalSelectionSync === true) + console.log('globalSelectionSync.delay()'); + + if(typeof ms === 'number') + this.dont_sync_before = Date.now() + ms; + else + this.dont_sync_before = Date.now() + NETDATA.options.current.sync_selection_delay; + } + }, + + __syncSlaves: function() { + if(NETDATA.globalSelectionSync.enabled() === true) { + if(NETDATA.options.debug.globalSelectionSync === true) + console.log('globalSelectionSync.__syncSlaves()'); + + var t = NETDATA.globalSelectionSync.last_t; + var len = NETDATA.globalSelectionSync.slaves.length; + while (len--) + NETDATA.globalSelectionSync.slaves[len].setSelection(t); + + this.timeout_id = undefined; + } + }, + + // sync all the visible charts to the given time + // this is to be called from the chart libraries + sync: function(state, t) { + if(NETDATA.options.current.sync_selection === true) { + if(NETDATA.options.debug.globalSelectionSync === true) + console.log('globalSelectionSync.sync(' + state.id + ', ' + t.toString() + ')'); + + this.setMaster(state); + + if(t === this.last_t) + return; + + this.last_t = t; + + if (state.foreign_element_selection !== null) + state.foreign_element_selection.innerText = NETDATA.dateTime.localeDateString(t) + ' ' + NETDATA.dateTime.localeTimeString(t); + + if (this.timeout_id) + NETDATA.timeout.clear(this.timeout_id); + + this.timeout_id = NETDATA.timeout.set(this.__syncSlaves, 0); + } + } + }; + + NETDATA.intersectionObserver = { + observer: null, + visible_targets: [], + + options: { + root: null, + rootMargin: "0px", + threshold: null + }, + + enabled: function() { + return this.observer !== null; + }, + + globalReset: function() { + if(this.observer !== null) { + this.visible_targets = []; + this.observer.disconnect(); + this.init(); + } + }, + + targets: function() { + if(this.enabled() === true && this.visible_targets.length > 0) + return this.visible_targets; + else + return NETDATA.options.targets; + }, + + switchChartVisibility: function() { + var old = this.__visibilityRatioOld; + + if(old !== this.__visibilityRatio) { + if (old === 0 && this.__visibilityRatio > 0) + this.unhideChart(); + else if (old > 0 && this.__visibilityRatio === 0) + this.hideChart(); + + this.__visibilityRatioOld = this.__visibilityRatio; + } + }, + + handler: function(entries, observer) { + entries.forEach(function(entry) { + var state = NETDATA.chartState(entry.target); + + var idx; + if(entry.intersectionRatio > 0) { + idx = NETDATA.intersectionObserver.visible_targets.indexOf(state); + if(idx === -1) { + if(NETDATA.scrollUp === true) + NETDATA.intersectionObserver.visible_targets.push(state); + else + NETDATA.intersectionObserver.visible_targets.unshift(state); + } + else if(state.__visibilityRatio === 0) + state.log("was not visible until now, but was already in visible_targets"); + } + else { + idx = NETDATA.intersectionObserver.visible_targets.indexOf(state); + if(idx !== -1) + NETDATA.intersectionObserver.visible_targets.splice(idx, 1); + else if(state.__visibilityRatio > 0) + state.log("was visible, but not found in visible_targets"); + } + + state.__visibilityRatio = entry.intersectionRatio; + + if(NETDATA.options.current.async_on_scroll === false) { + if(window.requestIdleCallback) + window.requestIdleCallback(function() { + NETDATA.intersectionObserver.switchChartVisibility.call(state); + }, {timeout: 100}); + else + NETDATA.intersectionObserver.switchChartVisibility.call(state); + } + }); + }, + + observe: function(state) { + if(this.enabled() === true) { + state.__visibilityRatioOld = 0; + state.__visibilityRatio = 0; + this.observer.observe(state.element); + + state.isVisible = function() { + if(NETDATA.options.current.update_only_visible === false) + return true; + + NETDATA.intersectionObserver.switchChartVisibility.call(this); + + return this.__visibilityRatio > 0; + } + } + }, + + init: function() { + if(typeof netdataIntersectionObserver === 'undefined' || netdataIntersectionObserver === true) { + try { + this.observer = new IntersectionObserver(this.handler, this.options); + } + catch (e) { + console.log("IntersectionObserver is not supported on this browser"); + this.observer = null; + } + } + //else { + // console.log("IntersectionObserver is disabled"); + //} + } + }; + NETDATA.intersectionObserver.init(); + + // ---------------------------------------------------------------------------------------------------------------- + // Our state object, where all per-chart values are stored + + var chartState = function(element) { + this.element = element; + + // IMPORTANT: + // all private functions should use 'that', instead of 'this' + var that = this; + + // ============================================================================================================ + // ERROR HANDLING + + /* error() - private + * show an error instead of the chart + */ + var error = function(msg) { + var ret = true; + + if(typeof netdataErrorCallback === 'function') { + ret = netdataErrorCallback('chart', that.id, msg); + } + + if(ret) { + that.element.innerHTML = that.id + ': ' + msg; + that.enabled = false; + that.current = that.pan; + } + }; + + // console logging + this.log = function(msg) { + console.log(this.id + ' (' + this.library_name + ' ' + this.uuid + '): ' + msg); + }; + + + // ============================================================================================================ + // EARLY INITIALIZATION + + // These are variables that should exist even if the chart is never to be rendered. + // Be careful what you add here - there may be thousands of charts on the page. + + // GUID - a unique identifier for the chart + this.uuid = NETDATA.guid(); + + // string - the name of chart + this.id = NETDATA.dataAttribute(this.element, 'netdata', undefined); + if(typeof this.id === 'undefined') { + error("netdata elements need data-netdata"); + return; + } + + // string - the key for localStorage settings + this.settings_id = NETDATA.dataAttribute(this.element, 'id', null); + + // the user given dimensions of the element + this.width = NETDATA.dataAttribute(this.element, 'width', NETDATA.chartDefaults.width); + this.height = NETDATA.dataAttribute(this.element, 'height', NETDATA.chartDefaults.height); + this.height_original = this.height; + + if(this.settings_id !== null) { + this.height = NETDATA.localStorageGet('chart_heights.' + this.settings_id, this.height, function(height) { + // this is the callback that will be called + // if and when the user resets all localStorage variables + // to their defaults + + resizeChartToHeight(height); + }); + } + + // the chart library requested by the user + this.library_name = NETDATA.dataAttribute(this.element, 'chart-library', NETDATA.chartDefaults.library); + + // check the requested library is available + // we don't initialize it here - it will be initialized when + // this chart will be first used + if(typeof NETDATA.chartLibraries[this.library_name] === 'undefined') { + NETDATA.error(402, this.library_name); + error('chart library "' + this.library_name + '" is not found'); + this.enabled = false; + } + else if(NETDATA.chartLibraries[this.library_name].enabled === false) { + NETDATA.error(403, this.library_name); + error('chart library "' + this.library_name + '" is not enabled'); + this.enabled = false; + } + else + this.library = NETDATA.chartLibraries[this.library_name]; + + this.auto = { + name: 'auto', + autorefresh: true, + force_update_at: 0, // the timestamp to force the update at + force_before_ms: null, + force_after_ms: null + }; + this.pan = { + name: 'pan', + autorefresh: false, + force_update_at: 0, // the timestamp to force the update at + force_before_ms: null, + force_after_ms: null + }; + this.zoom = { + name: 'zoom', + autorefresh: false, + force_update_at: 0, // the timestamp to force the update at + force_before_ms: null, + force_after_ms: null + }; + + // this is a pointer to one of the sub-classes below + // auto, pan, zoom + this.current = this.auto; + + this.running = false; // boolean - true when the chart is being refreshed now + this.enabled = true; // boolean - is the chart enabled for refresh? + + this.force_update_every = null; // number - overwrite the visualization update frequency of the chart + + this.tmp = {}; + + this.foreign_element_before = null; + this.foreign_element_after = null; + this.foreign_element_duration = null; + this.foreign_element_update_every = null; + this.foreign_element_selection = null; + + // ============================================================================================================ + // PRIVATE FUNCTIONS + + // reset the runtime status variables to their defaults + var runtimeInit = function() { + that.paused = false; // boolean - is the chart paused for any reason? + that.selected = false; // boolean - is the chart shown a selection? + + that.chart_created = false; // boolean - is the library.create() been called? + that.dom_created = false; // boolean - is the chart DOM been created? + that.fetching_data = false; // boolean - true while we fetch data via ajax + + that.updates_counter = 0; // numeric - the number of refreshes made so far + that.updates_since_last_unhide = 0; // numeric - the number of refreshes made since the last time the chart was unhidden + that.updates_since_last_creation = 0; // numeric - the number of refreshes made since the last time the chart was created + + that.tm = { + last_initialized: 0, // milliseconds - the timestamp it was last initialized + last_dom_created: 0, // milliseconds - the timestamp its DOM was last created + last_mode_switch: 0, // milliseconds - the timestamp it switched modes + + last_info_downloaded: 0, // milliseconds - the timestamp we downloaded the chart + last_updated: 0, // the timestamp the chart last updated with data + pan_and_zoom_seq: 0, // the sequence number of the global synchronization + // between chart. + // Used with NETDATA.globalPanAndZoom.seq + last_visible_check: 0, // the time we last checked if it is visible + last_resized: 0, // the time the chart was resized + last_hidden: 0, // the time the chart was hidden + last_unhidden: 0, // the time the chart was unhidden + last_autorefreshed: 0 // the time the chart was last refreshed + }; + + that.data = null; // the last data as downloaded from the netdata server + that.data_url = 'invalid://'; // string - the last url used to update the chart + that.data_points = 0; // number - the number of points returned from netdata + that.data_after = 0; // milliseconds - the first timestamp of the data + that.data_before = 0; // milliseconds - the last timestamp of the data + that.data_update_every = 0; // milliseconds - the frequency to update the data + + that.tmp = {}; // members that can be destroyed to save memory + }; + + // initialize all the variables that are required for the chart to be rendered + var lateInitialization = function() { + if(typeof that.host !== 'undefined') + return; + + // string - the netdata server URL, without any path + that.host = NETDATA.dataAttribute(that.element, 'host', NETDATA.serverDefault); + + // make sure the host does not end with / + // all netdata API requests use absolute paths + while(that.host.slice(-1) === '/') + that.host = that.host.substring(0, that.host.length - 1); + + // string - the grouping method requested by the user + that.method = NETDATA.dataAttribute(that.element, 'method', NETDATA.chartDefaults.method); + that.gtime = NETDATA.dataAttribute(that.element, 'gtime', 0); + + // the time-range requested by the user + that.after = NETDATA.dataAttribute(that.element, 'after', NETDATA.chartDefaults.after); + that.before = NETDATA.dataAttribute(that.element, 'before', NETDATA.chartDefaults.before); + + // the pixels per point requested by the user + that.pixels_per_point = NETDATA.dataAttribute(that.element, 'pixels-per-point', 1); + that.points = NETDATA.dataAttribute(that.element, 'points', null); + + // the forced update_every + that.force_update_every = NETDATA.dataAttribute(that.element, 'update-every', null); + if(typeof that.force_update_every !== 'number' || that.force_update_every <= 1) { + if(that.force_update_every !== null) + that.log('ignoring invalid value of property data-update-every'); + + that.force_update_every = null; + } + else + that.force_update_every *= 1000; + + // the dimensions requested by the user + that.dimensions = NETDATA.encodeURIComponent(NETDATA.dataAttribute(that.element, 'dimensions', null)); + + that.title = NETDATA.dataAttribute(that.element, 'title', null); // the title of the chart + that.units = NETDATA.dataAttribute(that.element, 'units', null); // the units of the chart dimensions + that.units_desired = NETDATA.dataAttribute(that.element, 'desired-units', NETDATA.options.current.units); // the units of the chart dimensions + that.units_current = that.units; + that.units_common = NETDATA.dataAttribute(that.element, 'common-units', null); + + // additional options to pass to netdata + that.append_options = NETDATA.encodeURIComponent(NETDATA.dataAttribute(that.element, 'append-options', null)); + + // override options to pass to netdata + that.override_options = NETDATA.encodeURIComponent(NETDATA.dataAttribute(that.element, 'override-options', null)); + + that.debug = NETDATA.dataAttributeBoolean(that.element, 'debug', false); + + that.value_decimal_detail = -1; + var d = NETDATA.dataAttribute(that.element, 'decimal-digits', -1); + if(typeof d === 'number') + that.value_decimal_detail = d; + else if(typeof d !== 'undefined') + that.log('ignoring decimal-digits value: ' + d.toString()); + + // if we need to report the rendering speed + // find the element that needs to be updated + var refresh_dt_element_name = NETDATA.dataAttribute(that.element, 'dt-element-name', null); // string - the element to print refresh_dt_ms + + if(refresh_dt_element_name !== null) { + that.refresh_dt_element = document.getElementById(refresh_dt_element_name) || null; + } + else + that.refresh_dt_element = null; + + that.dimensions_visibility = new dimensionsVisibility(that); + + that.netdata_first = 0; // milliseconds - the first timestamp in netdata + that.netdata_last = 0; // milliseconds - the last timestamp in netdata + that.requested_after = null; // milliseconds - the timestamp of the request after param + that.requested_before = null; // milliseconds - the timestamp of the request before param + that.requested_padding = null; + that.view_after = 0; + that.view_before = 0; + + that.refresh_dt_ms = 0; // milliseconds - the time the last refresh took + + // how many retries we have made to load chart data from the server + that.retries_on_data_failures = 0; + + // color management + that.colors = null; + that.colors_assigned = null; + that.colors_available = null; + that.colors_custom = null; + + that.element_message = null; // the element already created by the user + that.element_chart = null; // the element with the chart + that.element_legend = null; // the element with the legend of the chart (if created by us) + that.element_legend_childs = { + content: null, + hidden: null, + title_date: null, + title_time: null, + title_units: null, + perfect_scroller: null, // the container to apply perfect scroller to + series: null + }; + + that.chart_url = null; // string - the url to download chart info + that.chart = null; // object - the chart as downloaded from the server + + function get_foreign_element_by_id(opt) { + var id = NETDATA.dataAttribute(that.element, opt, null); + if(id === null) { + //that.log('option "' + opt + '" is undefined'); + return null; + } + + var el = document.getElementById(id); + if(typeof el === 'undefined') { + that.log('cannot find an element with name "' + id.toString() + '"'); + return null; + } + + return el; + } + + that.foreign_element_before = get_foreign_element_by_id('show-before-at'); + that.foreign_element_after = get_foreign_element_by_id('show-after-at'); + that.foreign_element_duration = get_foreign_element_by_id('show-duration-at'); + that.foreign_element_update_every = get_foreign_element_by_id('show-update-every-at'); + that.foreign_element_selection = get_foreign_element_by_id('show-selection-at'); + }; + + var destroyDOM = function() { + if(that.enabled === false) return; + + if(that.debug === true) + that.log('destroyDOM()'); + + // that.element.className = 'netdata-message icon'; + // that.element.innerHTML = ' netdata'; + that.element.innerHTML = ''; + that.element_message = null; + that.element_legend = null; + that.element_chart = null; + that.element_legend_childs.series = null; + + that.chart_created = false; + that.dom_created = false; + + that.tm.last_resized = 0; + that.tm.last_dom_created = 0; + }; + + var createDOM = function() { + if(that.enabled === false) return; + lateInitialization(); + + destroyDOM(); + + if(that.debug === true) + that.log('createDOM()'); + + that.element_message = document.createElement('div'); + that.element_message.className = 'netdata-message icon hidden'; + that.element.appendChild(that.element_message); + + that.dom_created = true; + that.chart_created = false; + + that.tm.last_dom_created = + that.tm.last_resized = Date.now(); + + showLoading(); + }; + + var initDOM = function() { + that.element.className = that.library.container_class(that); + + if(typeof(that.width) === 'string') + that.element.style.width = that.width; + else if(typeof(that.width) === 'number') + that.element.style.width = that.width.toString() + 'px'; + + if(typeof(that.library.aspect_ratio) === 'undefined') { + if(typeof(that.height) === 'string') + that.element.style.height = that.height; + else if(typeof(that.height) === 'number') + that.element.style.height = that.height.toString() + 'px'; + } + + if(NETDATA.chartDefaults.min_width !== null) + that.element.style.min_width = NETDATA.chartDefaults.min_width; + }; + + var invisibleSearchableText = function() { + return '' + that.id + ''; + }; + + /* init() private + * initialize state variables + * destroy all (possibly) created state elements + * create the basic DOM for a chart + */ + var init = function(opt) { + if(that.enabled === false) return; + + runtimeInit(); + that.element.innerHTML = invisibleSearchableText(); + + that.tm.last_initialized = Date.now(); + that.setMode('auto'); + + if(opt !== 'fast') { + if (that.isVisible(true) || opt === 'force') + createDOM(); + } + }; + + var maxMessageFontSize = function() { + var screenHeight = screen.height; + var el = that.element; + + // normally we want a font size, as tall as the element + var h = el.clientHeight; + + // but give it some air, 20% let's say, or 5 pixels min + var lost = Math.max(h * 0.2, 5); + h -= lost; + + // center the text, vertically + var paddingTop = (lost - 5) / 2; + + // but check the width too + // it should fit 10 characters in it + var w = el.clientWidth / 10; + if(h > w) { + paddingTop += (h - w) / 2; + h = w; + } + + // and don't make it too huge + // 5% of the screen size is good + if(h > screenHeight / 20) { + paddingTop += (h - (screenHeight / 20)) / 2; + h = screenHeight / 20; + } + + // set it + that.element_message.style.fontSize = h.toString() + 'px'; + that.element_message.style.paddingTop = paddingTop.toString() + 'px'; + }; + + var showMessageIcon = function(icon) { + that.element_message.innerHTML = icon; + maxMessageFontSize(); + $(that.element_message).removeClass('hidden'); + that.tmp.___messageHidden___ = undefined; + }; + + var hideMessage = function() { + if(typeof that.tmp.___messageHidden___ === 'undefined') { + that.tmp.___messageHidden___ = true; + $(that.element_message).addClass('hidden'); + } + }; + + var showRendering = function() { + var icon; + if(that.chart !== null) { + if(that.chart.chart_type === 'line') + icon = NETDATA.icons.lineChart; + else + icon = NETDATA.icons.areaChart; + } + else + icon = NETDATA.icons.noChart; + + showMessageIcon(icon + ' netdata' + invisibleSearchableText()); + }; + + var showLoading = function() { + if(that.chart_created === false) { + showMessageIcon(NETDATA.icons.loading + ' netdata'); + return true; + } + return false; + }; + + var isHidden = function() { + return (typeof that.tmp.___chartIsHidden___ !== 'undefined'); + }; + + // hide the chart, when it is not visible - called from isVisible() + this.hideChart = function() { + // hide it, if it is not already hidden + if(isHidden() === true) return; + + if(this.chart_created === true) { + if(NETDATA.options.current.show_help === true) { + if(this.element_legend_childs.toolbox !== null) { + if(this.debug === true) + this.log('hideChart(): hidding legend popovers'); + + $(this.element_legend_childs.toolbox_left).popover('hide'); + $(this.element_legend_childs.toolbox_reset).popover('hide'); + $(this.element_legend_childs.toolbox_right).popover('hide'); + $(this.element_legend_childs.toolbox_zoomin).popover('hide'); + $(this.element_legend_childs.toolbox_zoomout).popover('hide'); + } + + if(this.element_legend_childs.resize_handler !== null) + $(this.element_legend_childs.resize_handler).popover('hide'); + + if(this.element_legend_childs.content !== null) + $(this.element_legend_childs.content).popover('hide'); + } + + if(NETDATA.options.current.destroy_on_hide === true) { + if(this.debug === true) + this.log('hideChart(): initializing chart'); + + // we should destroy it + init('force'); + } + else { + if(this.debug === true) + this.log('hideChart(): hiding chart'); + + showRendering(); + this.element_chart.style.display = 'none'; + this.element.style.willChange = 'auto'; + if(this.element_legend !== null) this.element_legend.style.display = 'none'; + if(this.element_legend_childs.toolbox !== null) this.element_legend_childs.toolbox.style.display = 'none'; + if(this.element_legend_childs.resize_handler !== null) this.element_legend_childs.resize_handler.style.display = 'none'; + + this.tm.last_hidden = Date.now(); + + // de-allocate data + // This works, but I not sure there are no corner cases somewhere + // so it is commented - if the user has memory issues he can + // set Destroy on Hide for all charts + // this.data = null; + } + } + + this.tmp.___chartIsHidden___ = true; + }; + + // unhide the chart, when it is visible - called from isVisible() + this.unhideChart = function() { + if(isHidden() === false) return; + + this.tmp.___chartIsHidden___ = undefined; + this.updates_since_last_unhide = 0; + + if(this.chart_created === false) { + if(this.debug === true) + this.log('unhideChart(): initializing chart'); + + // we need to re-initialize it, to show our background + // logo in bootstrap tabs, until the chart loads + init('force'); + } + else { + if(this.debug === true) + this.log('unhideChart(): unhiding chart'); + + this.element.style.willChange = 'transform'; + this.tm.last_unhidden = Date.now(); + this.element_chart.style.display = ''; + if(this.element_legend !== null) this.element_legend.style.display = ''; + if(this.element_legend_childs.toolbox !== null) this.element_legend_childs.toolbox.style.display = ''; + if(this.element_legend_childs.resize_handler !== null) this.element_legend_childs.resize_handler.style.display = ''; + resizeChart(); + hideMessage(); + } + + if(this.__redraw_on_unhide === true) { + + if(this.debug === true) + this.log("redrawing chart on unhide"); + + this.__redraw_on_unhide = undefined; + this.redrawChart(); + } + }; + + var canBeRendered = function(uncached_visibility) { + if(that.debug === true) + that.log('canBeRendered() called'); + + if(NETDATA.options.current.update_only_visible === false) + return true; + + var ret = ( + ( + NETDATA.options.page_is_visible === true || + NETDATA.options.current.stop_updates_when_focus_is_lost === false || + that.updates_since_last_unhide === 0 + ) + && isHidden() === false && that.isVisible(uncached_visibility) === true + ); + + if(that.debug === true) + that.log('canBeRendered(): ' + ret); + + return ret; + }; + + // https://github.com/petkaantonov/bluebird/wiki/Optimization-killers + var callChartLibraryUpdateSafely = function(data) { + var status; + + // we should not do this here + // if we prevent rendering the chart then: + // 1. globalSelectionSync will be wrong + // 2. globalPanAndZoom will be wrong + //if(canBeRendered(true) === false) + // return false; + + if(NETDATA.options.fake_chart_rendering === true) + return true; + + that.updates_counter++; + that.updates_since_last_unhide++; + that.updates_since_last_creation++; + + if(NETDATA.options.debug.chart_errors === true) + status = that.library.update(that, data); + else { + try { + status = that.library.update(that, data); + } + catch(err) { + status = false; + } + } + + if(status === false) { + error('chart failed to be updated as ' + that.library_name); + return false; + } + + return true; + }; + + // https://github.com/petkaantonov/bluebird/wiki/Optimization-killers + var callChartLibraryCreateSafely = function(data) { + var status; + + // we should not do this here + // if we prevent rendering the chart then: + // 1. globalSelectionSync will be wrong + // 2. globalPanAndZoom will be wrong + //if(canBeRendered(true) === false) + // return false; + + if(NETDATA.options.fake_chart_rendering === true) + return true; + + that.updates_counter++; + that.updates_since_last_unhide++; + that.updates_since_last_creation++; + + if(NETDATA.options.debug.chart_errors === true) + status = that.library.create(that, data); + else { + try { + status = that.library.create(that, data); + } + catch(err) { + status = false; + } + } + + if(status === false) { + error('chart failed to be created as ' + that.library_name); + return false; + } + + that.chart_created = true; + that.updates_since_last_creation = 0; + return true; + }; + + // ---------------------------------------------------------------------------------------------------------------- + // Chart Resize + + // resizeChart() - private + // to be called just before the chart library to make sure that + // a properly sized dom is available + var resizeChart = function() { + if(that.tm.last_resized < NETDATA.options.last_page_resize) { + if(that.chart_created === false) return; + + if(that.needsRecreation()) { + if(that.debug === true) + that.log('resizeChart(): initializing chart'); + + init('force'); + } + else if(typeof that.library.resize === 'function') { + if(that.debug === true) + that.log('resizeChart(): resizing chart'); + + that.library.resize(that); + + if(that.element_legend_childs.perfect_scroller !== null) + Ps.update(that.element_legend_childs.perfect_scroller); + + maxMessageFontSize(); + } + + that.tm.last_resized = Date.now(); + } + }; + + // this is the actual chart resize algorithm + // it will: + // - resize the entire container + // - update the internal states + // - resize the chart as the div changes height + // - update the scrollbar of the legend + var resizeChartToHeight = function(h) { + // console.log(h); + that.element.style.height = h; + + if(that.settings_id !== null) + NETDATA.localStorageSet('chart_heights.' + that.settings_id, h); + + var now = Date.now(); + NETDATA.options.last_page_scroll = now; + NETDATA.options.auto_refresher_stop_until = now + NETDATA.options.current.stop_updates_while_resizing; + + // force a resize + that.tm.last_resized = 0; + resizeChart(); + }; + + this.resizeForPrint = function() { + if(typeof this.element_legend_childs !== 'undefined' && this.element_legend_childs.perfect_scroller !== null) { + var current = this.element.clientHeight; + var optimal = current + + this.element_legend_childs.perfect_scroller.scrollHeight + - this.element_legend_childs.perfect_scroller.clientHeight; + + if(optimal > current) { + // this.log('resized'); + this.element.style.height = optimal + 'px'; + this.library.resize(this); + } + } + }; + + this.resizeHandler = function(e) { + e.preventDefault(); + + if(typeof this.event_resize === 'undefined' + || this.event_resize.chart_original_w === 'undefined' + || this.event_resize.chart_original_h === 'undefined') + this.event_resize = { + chart_original_w: this.element.clientWidth, + chart_original_h: this.element.clientHeight, + last: 0 + }; + + if(e.type === 'touchstart') { + this.event_resize.mouse_start_x = e.touches.item(0).pageX; + this.event_resize.mouse_start_y = e.touches.item(0).pageY; + } + else { + this.event_resize.mouse_start_x = e.clientX; + this.event_resize.mouse_start_y = e.clientY; + } + + this.event_resize.chart_start_w = this.element.clientWidth; + this.event_resize.chart_start_h = this.element.clientHeight; + this.event_resize.chart_last_w = this.element.clientWidth; + this.event_resize.chart_last_h = this.element.clientHeight; + + var now = Date.now(); + if(now - this.event_resize.last <= NETDATA.options.current.double_click_speed && this.element_legend_childs.perfect_scroller !== null) { + // double click / double tap event + + // console.dir(this.element_legend_childs.content); + // console.dir(this.element_legend_childs.perfect_scroller); + + // the optimal height of the chart + // showing the entire legend + var optimal = this.event_resize.chart_last_h + + this.element_legend_childs.perfect_scroller.scrollHeight + - this.element_legend_childs.perfect_scroller.clientHeight; + + // if we are not optimal, be optimal + if(this.event_resize.chart_last_h !== optimal) { + // this.log('resize to optimal, current = ' + this.event_resize.chart_last_h.toString() + 'px, original = ' + this.event_resize.chart_original_h.toString() + 'px, optimal = ' + optimal.toString() + 'px, internal = ' + this.height_original.toString()); + resizeChartToHeight(optimal.toString() + 'px'); + } + + // else if the current height is not the original/saved height + // reset to the original/saved height + else if(this.event_resize.chart_last_h !== this.event_resize.chart_original_h) { + // this.log('resize to original, current = ' + this.event_resize.chart_last_h.toString() + 'px, original = ' + this.event_resize.chart_original_h.toString() + 'px, optimal = ' + optimal.toString() + 'px, internal = ' + this.height_original.toString()); + resizeChartToHeight(this.event_resize.chart_original_h.toString() + 'px'); + } + + // else if the current height is not the internal default height + // reset to the internal default height + else if((this.event_resize.chart_last_h.toString() + 'px') !== this.height_original) { + // this.log('resize to internal default, current = ' + this.event_resize.chart_last_h.toString() + 'px, original = ' + this.event_resize.chart_original_h.toString() + 'px, optimal = ' + optimal.toString() + 'px, internal = ' + this.height_original.toString()); + resizeChartToHeight(this.height_original.toString()); + } + + // else if the current height is not the firstchild's clientheight + // resize to it + else if(typeof this.element_legend_childs.perfect_scroller.firstChild !== 'undefined') { + var parent_rect = this.element.getBoundingClientRect(); + var content_rect = this.element_legend_childs.perfect_scroller.firstElementChild.getBoundingClientRect(); + var wanted = content_rect.top - parent_rect.top + this.element_legend_childs.perfect_scroller.firstChild.clientHeight + 18; // 15 = toolbox + 3 space + + // console.log(parent_rect); + // console.log(content_rect); + // console.log(wanted); + + // this.log('resize to firstChild, current = ' + this.event_resize.chart_last_h.toString() + 'px, original = ' + this.event_resize.chart_original_h.toString() + 'px, optimal = ' + optimal.toString() + 'px, internal = ' + this.height_original.toString() + 'px, firstChild = ' + wanted.toString() + 'px' ); + if(this.event_resize.chart_last_h !== wanted) + resizeChartToHeight(wanted.toString() + 'px'); + } + } + else { + this.event_resize.last = now; + + // process movement event + document.onmousemove = + document.ontouchmove = + this.element_legend_childs.resize_handler.onmousemove = + this.element_legend_childs.resize_handler.ontouchmove = + function(e) { + var y = null; + + switch(e.type) { + case 'mousemove': y = e.clientY; break; + case 'touchmove': y = e.touches.item(e.touches - 1).pageY; break; + } + + if(y !== null) { + var newH = that.event_resize.chart_start_h + y - that.event_resize.mouse_start_y; + + if(newH >= 70 && newH !== that.event_resize.chart_last_h) { + resizeChartToHeight(newH.toString() + 'px'); + that.event_resize.chart_last_h = newH; + } + } + }; + + // process end event + document.onmouseup = + document.ontouchend = + this.element_legend_childs.resize_handler.onmouseup = + this.element_legend_childs.resize_handler.ontouchend = + function(e) { + void(e); + + // remove all the hooks + document.onmouseup = + document.onmousemove = + document.ontouchmove = + document.ontouchend = + that.element_legend_childs.resize_handler.onmousemove = + that.element_legend_childs.resize_handler.ontouchmove = + that.element_legend_childs.resize_handler.onmouseout = + that.element_legend_childs.resize_handler.onmouseup = + that.element_legend_childs.resize_handler.ontouchend = + null; + + // allow auto-refreshes + NETDATA.options.auto_refresher_stop_until = 0; + }; + } + }; + + + var noDataToShow = function() { + showMessageIcon(NETDATA.icons.noData + ' empty'); + that.legendUpdateDOM(); + that.tm.last_autorefreshed = Date.now(); + // that.data_update_every = 30 * 1000; + //that.element_chart.style.display = 'none'; + //if(that.element_legend !== null) that.element_legend.style.display = 'none'; + //that.tmp.___chartIsHidden___ = true; + }; + + // ============================================================================================================ + // PUBLIC FUNCTIONS + + this.error = function(msg) { + error(msg); + }; + + this.setMode = function(m) { + if(this.current !== null && this.current.name === m) return; + + if(m === 'auto') + this.current = this.auto; + else if(m === 'pan') + this.current = this.pan; + else if(m === 'zoom') + this.current = this.zoom; + else + this.current = this.auto; + + this.current.force_update_at = 0; + this.current.force_before_ms = null; + this.current.force_after_ms = null; + + this.tm.last_mode_switch = Date.now(); + }; + + // ---------------------------------------------------------------------------------------------------------------- + // global selection sync for slaves + + // can the chart participate to the global selection sync as a slave? + this.globalSelectionSyncIsEligible = function() { + return (this.enabled === true + && this.library !== null + && typeof this.library.setSelection === 'function' + && this.isVisible() === true + && this.chart_created === true); + }; + + this.setSelection = function(t) { + if(typeof this.library.setSelection === 'function') + this.selected = (this.library.setSelection(this, t) === true); + else + this.selected = true; + + if(this.selected === true && this.debug === true) + this.log('selection set to ' + t.toString()); + + if (this.foreign_element_selection !== null) + this.foreign_element_selection.innerText = NETDATA.dateTime.localeDateString(t) + ' ' + NETDATA.dateTime.localeTimeString(t); + + return this.selected; + }; + + this.clearSelection = function() { + if(this.selected === true) { + if(typeof this.library.clearSelection === 'function') + this.selected = (this.library.clearSelection(this) !== true); + else + this.selected = false; + + if(this.selected === false && this.debug === true) + this.log('selection cleared'); + + if (this.foreign_element_selection !== null) + this.foreign_element_selection.innerText = ''; + + this.legendReset(); + } + + return this.selected; + }; + + // ---------------------------------------------------------------------------------------------------------------- + + // find if a timestamp (ms) is shown in the current chart + this.timeIsVisible = function(t) { + return (t >= this.data_after && t <= this.data_before); + }; + + this.calculateRowForTime = function(t) { + if(this.timeIsVisible(t) === false) return -1; + return Math.floor((t - this.data_after) / this.data_update_every); + }; + + // ---------------------------------------------------------------------------------------------------------------- + + this.pauseChart = function() { + if(this.paused === false) { + if(this.debug === true) + this.log('pauseChart()'); + + this.paused = true; + } + }; + + this.unpauseChart = function() { + if(this.paused === true) { + if(this.debug === true) + this.log('unpauseChart()'); + + this.paused = false; + } + }; + + this.resetChart = function(dont_clear_master, dont_update) { + if(this.debug === true) + this.log('resetChart(' + dont_clear_master + ', ' + dont_update + ') called'); + + if(typeof dont_clear_master === 'undefined') + dont_clear_master = false; + + if(typeof dont_update === 'undefined') + dont_update = false; + + if(dont_clear_master !== true && NETDATA.globalPanAndZoom.isMaster(this) === true) { + if(this.debug === true) + this.log('resetChart() diverting to clearMaster().'); + // this will call us back with master === true + NETDATA.globalPanAndZoom.clearMaster(); + return; + } + + this.clearSelection(); + + this.tm.pan_and_zoom_seq = 0; + + this.setMode('auto'); + this.current.force_update_at = 0; + this.current.force_before_ms = null; + this.current.force_after_ms = null; + this.tm.last_autorefreshed = 0; + this.paused = false; + this.selected = false; + this.enabled = true; + // this.debug = false; + + // do not update the chart here + // or the chart will flip-flop when it is the master + // of a selection sync and another chart becomes + // the new master + + if(dont_update !== true && this.isVisible() === true) { + this.updateChart(); + } + }; + + this.updateChartPanOrZoom = function(after, before, callback) { + var logme = 'updateChartPanOrZoom(' + after + ', ' + before + '): '; + var ret = true; + + NETDATA.globalPanAndZoom.delay(); + NETDATA.globalSelectionSync.delay(); + + if(this.debug === true) + this.log(logme); + + if(before < after) { + if(this.debug === true) + this.log(logme + 'flipped parameters, rejecting it.'); + + return false; + } + + if(typeof this.fixed_min_duration === 'undefined') + this.fixed_min_duration = Math.round((this.chartWidth() / 30) * this.chart.update_every * 1000); + + var min_duration = this.fixed_min_duration; + var current_duration = Math.round(this.view_before - this.view_after); + + // round the numbers + after = Math.round(after); + before = Math.round(before); + + // align them to update_every + // stretching them further away + after -= after % this.data_update_every; + before += this.data_update_every - (before % this.data_update_every); + + // the final wanted duration + var wanted_duration = before - after; + + // to allow panning, accept just a point below our minimum + if((current_duration - this.data_update_every) < min_duration) + min_duration = current_duration - this.data_update_every; + + // we do it, but we adjust to minimum size and return false + // when the wanted size is below the current and the minimum + // and we zoom + if(wanted_duration < current_duration && wanted_duration < min_duration) { + if(this.debug === true) + this.log(logme + 'too small: min_duration: ' + (min_duration / 1000).toString() + ', wanted: ' + (wanted_duration / 1000).toString()); + + min_duration = this.fixed_min_duration; + + var dt = (min_duration - wanted_duration) / 2; + before += dt; + after -= dt; + wanted_duration = before - after; + ret = false; + } + + var tolerance = this.data_update_every * 2; + var movement = Math.abs(before - this.view_before); + + if(Math.abs(current_duration - wanted_duration) <= tolerance && movement <= tolerance && ret === true) { + if(this.debug === true) + this.log(logme + 'REJECTING UPDATE: current/min duration: ' + (current_duration / 1000).toString() + '/' + (this.fixed_min_duration / 1000).toString() + ', wanted duration: ' + (wanted_duration / 1000).toString() + ', duration diff: ' + (Math.round(Math.abs(current_duration - wanted_duration) / 1000)).toString() + ', movement: ' + (movement / 1000).toString() + ', tolerance: ' + (tolerance / 1000).toString() + ', returning: ' + false); + return false; + } + + if(this.current.name === 'auto') { + this.log(logme + 'caller called me with mode: ' + this.current.name); + this.setMode('pan'); + } + + if(this.debug === true) + this.log(logme + 'ACCEPTING UPDATE: current/min duration: ' + (current_duration / 1000).toString() + '/' + (this.fixed_min_duration / 1000).toString() + ', wanted duration: ' + (wanted_duration / 1000).toString() + ', duration diff: ' + (Math.round(Math.abs(current_duration - wanted_duration) / 1000)).toString() + ', movement: ' + (movement / 1000).toString() + ', tolerance: ' + (tolerance / 1000).toString() + ', returning: ' + ret); + + this.current.force_update_at = Date.now() + NETDATA.options.current.pan_and_zoom_delay; + this.current.force_after_ms = after; + this.current.force_before_ms = before; + NETDATA.globalPanAndZoom.setMaster(this, after, before); + + if(ret === true && typeof callback === 'function') + callback(); + + return ret; + }; + + this.updateChartPanOrZoomAsyncTimeOutId = undefined; + this.updateChartPanOrZoomAsync = function(after, before, callback) { + NETDATA.globalPanAndZoom.delay(); + NETDATA.globalSelectionSync.delay(); + + if(NETDATA.globalPanAndZoom.isMaster(this) === false) { + this.pauseChart(); + NETDATA.globalPanAndZoom.setMaster(this, after, before); + // NETDATA.globalSelectionSync.stop(); + NETDATA.globalSelectionSync.setMaster(this); + } + + if(this.updateChartPanOrZoomAsyncTimeOutId) + NETDATA.timeout.clear(this.updateChartPanOrZoomAsyncTimeOutId); + + NETDATA.timeout.set(function() { + that.updateChartPanOrZoomAsyncTimeOutId = undefined; + that.updateChartPanOrZoom(after, before, callback); + }, 0); + }; + + var __unitsConversionLastUnits = undefined; + var __unitsConversionLastUnitsDesired = undefined; + var __unitsConversionLastMin = undefined; + var __unitsConversionLastMax = undefined; + var __unitsConversion = function(value) { return value; }; + this.unitsConversionSetup = function(min, max) { + if(this.units !== __unitsConversionLastUnits + || this.units_desired !== __unitsConversionLastUnitsDesired + || min !== __unitsConversionLastMin + || max !== __unitsConversionLastMax) { + + __unitsConversionLastUnits = this.units; + __unitsConversionLastUnitsDesired = this.units_desired; + __unitsConversionLastMin = min; + __unitsConversionLastMax = max; + + __unitsConversion = NETDATA.unitsConversion.get(this.uuid, min, max, this.units, this.units_desired, this.units_common, function (units) { + // console.log('switching units from ' + that.units.toString() + ' to ' + units.toString()); + that.units_current = units; + that.legendSetUnitsString(that.units_current); + }); + } + }; + + var __legendFormatValueChartDecimalsLastMin = undefined; + var __legendFormatValueChartDecimalsLastMax = undefined; + var __legendFormatValueChartDecimals = -1; + var __intlNumberFormat = null; + this.legendFormatValueDecimalsFromMinMax = function(min, max) { + if(min === __legendFormatValueChartDecimalsLastMin && max === __legendFormatValueChartDecimalsLastMax) + return; + + this.unitsConversionSetup(min, max); + if(__unitsConversion !== null) { + min = __unitsConversion(min); + max = __unitsConversion(max); + + if(typeof min !== 'number' || typeof max !== 'number') + return; + } + + __legendFormatValueChartDecimalsLastMin = min; + __legendFormatValueChartDecimalsLastMax = max; + + var old = __legendFormatValueChartDecimals; + + if(this.data !== null && this.data.min === this.data.max) + // it is a fixed number, let the visualizer decide based on the value + __legendFormatValueChartDecimals = -1; + + else if(this.value_decimal_detail !== -1) + // there is an override + __legendFormatValueChartDecimals = this.value_decimal_detail; + + else { + // ok, let's calculate the proper number of decimal points + var delta; + + if (min === max) + delta = Math.abs(min); + else + delta = Math.abs(max - min); + + if (delta > 1000) __legendFormatValueChartDecimals = 0; + else if (delta > 10) __legendFormatValueChartDecimals = 1; + else if (delta > 1) __legendFormatValueChartDecimals = 2; + else if (delta > 0.1) __legendFormatValueChartDecimals = 2; + else if (delta > 0.01) __legendFormatValueChartDecimals = 4; + else if (delta > 0.001) __legendFormatValueChartDecimals = 5; + else if (delta > 0.0001) __legendFormatValueChartDecimals = 6; + else __legendFormatValueChartDecimals = 7; + } + + if(__legendFormatValueChartDecimals !== old) { + if(__legendFormatValueChartDecimals < 0) + __intlNumberFormat = null; + else + __intlNumberFormat = NETDATA.fastNumberFormat.get( + __legendFormatValueChartDecimals, + __legendFormatValueChartDecimals + ); + } + }; + + this.legendFormatValue = function(value) { + if(typeof value !== 'number') + return '-'; + + value = __unitsConversion(value); + + if(typeof value !== 'number') + return value; + + if(__intlNumberFormat !== null) + return __intlNumberFormat.format(value); + + var dmin, dmax; + if(this.value_decimal_detail !== -1) { + dmin = dmax = this.value_decimal_detail; + } + else { + dmin = 0; + var abs = (value < 0) ? -value : value; + if (abs > 1000) dmax = 0; + else if (abs > 10) dmax = 1; + else if (abs > 1) dmax = 2; + else if (abs > 0.1) dmax = 2; + else if (abs > 0.01) dmax = 4; + else if (abs > 0.001) dmax = 5; + else if (abs > 0.0001) dmax = 6; + else dmax = 7; + } + + return NETDATA.fastNumberFormat.get(dmin, dmax).format(value); + }; + + this.legendSetLabelValue = function(label, value) { + var series = this.element_legend_childs.series[label]; + if(typeof series === 'undefined') return; + if(series.value === null && series.user === null) return; + + /* + // this slows down firefox and edge significantly + // since it requires to use innerHTML(), instead of innerText() + + // if the value has not changed, skip DOM update + //if(series.last === value) return; + + var s, r; + if(typeof value === 'number') { + var v = Math.abs(value); + s = r = this.legendFormatValue(value); + + if(typeof series.last === 'number') { + if(v > series.last) s += ''; + else if(v < series.last) s += ''; + else s += ''; + } + else s += ''; + + series.last = v; + } + else { + if(value === null) + s = r = ''; + else + s = r = value; + + series.last = value; + } + */ + + var s = this.legendFormatValue(value); + + // caching: do not update the update to show the same value again + if(s === series.last_shown_value) return; + series.last_shown_value = s; + + if(series.value !== null) series.value.innerText = s; + if(series.user !== null) series.user.innerText = s; + }; + + this.legendSetDateString = function(date) { + if(this.element_legend_childs.title_date !== null && date !== this.tmp.__last_shown_legend_date) { + this.element_legend_childs.title_date.innerText = date; + this.tmp.__last_shown_legend_date = date; + } + }; + + this.legendSetTimeString = function(time) { + if(this.element_legend_childs.title_time !== null && time !== this.tmp.__last_shown_legend_time) { + this.element_legend_childs.title_time.innerText = time; + this.tmp.__last_shown_legend_time = time; + } + }; + + this.legendSetUnitsString = function(units) { + if(this.element_legend_childs.title_units !== null && units !== this.tmp.__last_shown_legend_units) { + this.element_legend_childs.title_units.innerText = units; + this.tmp.__last_shown_legend_units = units; + } + }; + + this.legendSetDateLast = { + ms: 0, + date: undefined, + time: undefined + }; + + this.legendSetDate = function(ms) { + if(typeof ms !== 'number') { + this.legendShowUndefined(); + return; + } + + if(this.legendSetDateLast.ms !== ms) { + var d = new Date(ms); + this.legendSetDateLast.ms = ms; + this.legendSetDateLast.date = NETDATA.dateTime.localeDateString(d); + this.legendSetDateLast.time = NETDATA.dateTime.localeTimeString(d); + } + + this.legendSetDateString(this.legendSetDateLast.date); + this.legendSetTimeString(this.legendSetDateLast.time); + this.legendSetUnitsString(this.units_current) + }; + + this.legendShowUndefined = function() { + this.legendSetDateString(this.legendPluginModuleString(false)); + this.legendSetTimeString(this.chart.context.toString()); + // this.legendSetUnitsString(' '); + + if(this.data && this.element_legend_childs.series !== null) { + var labels = this.data.dimension_names; + var i = labels.length; + while(i--) { + var label = labels[i]; + + if(typeof label === 'undefined' || typeof this.element_legend_childs.series[label] === 'undefined') continue; + this.legendSetLabelValue(label, null); + } + } + }; + + this.legendShowLatestValues = function() { + if(this.chart === null) return; + if(this.selected) return; + + if(this.data === null || this.element_legend_childs.series === null) { + this.legendShowUndefined(); + return; + } + + var show_undefined = true; + if(Math.abs(this.netdata_last - this.view_before) <= this.data_update_every) + show_undefined = false; + + if(show_undefined) { + this.legendShowUndefined(); + return; + } + + this.legendSetDate(this.view_before); + + var labels = this.data.dimension_names; + var i = labels.length; + while(i--) { + var label = labels[i]; + + if(typeof label === 'undefined') continue; + if(typeof this.element_legend_childs.series[label] === 'undefined') continue; + + this.legendSetLabelValue(label, this.data.view_latest_values[i]); + } + }; + + this.legendReset = function() { + this.legendShowLatestValues(); + }; + + // this should be called just ONCE per dimension per chart + this.__chartDimensionColor = function(label) { + var c = NETDATA.commonColors.get(this, label); + + // it is important to maintain a list of colors + // for this chart only, since the chart library + // uses this to assign colors to dimensions in the same + // order the dimension are given to it + this.colors.push(c); + + return c; + }; + + this.chartPrepareColorPalette = function() { + NETDATA.commonColors.refill(this); + }; + + // get the ordered list of chart colors + // this includes user defined colors + this.chartCustomColors = function() { + this.chartPrepareColorPalette(); + + var colors; + if(this.colors_custom.length) + colors = this.colors_custom; + else + colors = this.colors; + + if(this.debug === true) { + this.log("chartCustomColors() returns:"); + this.log(colors); + } + + return colors; + }; + + // get the ordered list of chart ASSIGNED colors + // (this returns only the colors that have been + // assigned to dimensions, prepended with any + // custom colors defined) + this.chartColors = function() { + this.chartPrepareColorPalette(); + + if(this.debug === true) { + this.log("chartColors() returns:"); + this.log(this.colors); + } + + return this.colors; + }; + + this.legendPluginModuleString = function(withContext) { + var str = ' '; + var context = ''; + + if(typeof this.chart !== 'undefined') { + if(withContext && typeof this.chart.context === 'string') + context = this.chart.context; + + if (typeof this.chart.plugin === 'string' && this.chart.plugin !== '') { + str = this.chart.plugin; + + if(str.endsWith(".plugin")) + str = str.substring(0, str.length - 7); + + if (typeof this.chart.module === 'string' && this.chart.module !== '') + str += ':' + this.chart.module; + + if (withContext && context !== '') + str += ', ' + context; + } + else if (withContext && context !== '') + str = context; + } + + return str; + }; + + this.legendResolutionTooltip = function () { + if(!this.chart) return ''; + + var collected = this.chart.update_every; + var viewed = (this.data)?this.data.view_update_every:collected; + + if(collected === viewed) + return "resolution " + NETDATA.seconds4human(collected); + + return "resolution " + NETDATA.seconds4human(viewed) + ", collected every " + NETDATA.seconds4human(collected); + }; + + this.legendUpdateDOM = function() { + var needed = false, dim, keys, len, i; + + // check that the legend DOM is up to date for the downloaded dimensions + if(typeof this.element_legend_childs.series !== 'object' || this.element_legend_childs.series === null) { + // this.log('the legend does not have any series - requesting legend update'); + needed = true; + } + else if(this.data === null) { + // this.log('the chart does not have any data - requesting legend update'); + needed = true; + } + else if(typeof this.element_legend_childs.series.labels_key === 'undefined') { + needed = true; + } + else { + var labels = this.data.dimension_names.toString(); + if(labels !== this.element_legend_childs.series.labels_key) { + needed = true; + + if(this.debug === true) + this.log('NEW LABELS: "' + labels + '" NOT EQUAL OLD LABELS: "' + this.element_legend_childs.series.labels_key + '"'); + } + } + + if(needed === false) { + // make sure colors available + this.chartPrepareColorPalette(); + + // do we have to update the current values? + // we do this, only when the visible chart is current + if(Math.abs(this.netdata_last - this.view_before) <= this.data_update_every) { + if(this.debug === true) + this.log('chart is in latest position... updating values on legend...'); + + //var labels = this.data.dimension_names; + //var i = labels.length; + //while(i--) + // this.legendSetLabelValue(labels[i], this.data.view_latest_values[i]); + } + return; + } + + if(this.colors === null) { + // this is the first time we update the chart + // let's assign colors to all dimensions + if(this.library.track_colors() === true) { + this.colors = []; + keys = Object.keys(this.chart.dimensions); + len = keys.length; + for(i = 0; i < len ;i++) + NETDATA.commonColors.get(this, this.chart.dimensions[keys[i]].name); + } + } + + // we will re-generate the colors for the chart + // based on the dimensions this result has data for + this.colors = []; + + if(this.debug === true) + this.log('updating Legend DOM'); + + // mark all dimensions as invalid + this.dimensions_visibility.invalidateAll(); + + var genLabel = function(state, parent, dim, name, count) { + var color = state.__chartDimensionColor(name); + + var user_element = null; + var user_id = NETDATA.dataAttribute(state.element, 'show-value-of-' + name.toLowerCase() + '-at', null); + if(user_id === null) + user_id = NETDATA.dataAttribute(state.element, 'show-value-of-' + dim.toLowerCase() + '-at', null); + if(user_id !== null) { + user_element = document.getElementById(user_id) || null; + if (user_element === null) + state.log('Cannot find element with id: ' + user_id); + } + + state.element_legend_childs.series[name] = { + name: document.createElement('span'), + value: document.createElement('span'), + user: user_element, + last: null, + last_shown_value: null + }; + + var label = state.element_legend_childs.series[name]; + + // create the dimension visibility tracking for this label + state.dimensions_visibility.dimensionAdd(name, label.name, label.value, color); + + var rgb = NETDATA.colorHex2Rgb(color); + label.name.innerHTML = '
'; + + var text = document.createTextNode(' ' + name); + label.name.appendChild(text); + + if(count > 0) + parent.appendChild(document.createElement('br')); + + parent.appendChild(label.name); + parent.appendChild(label.value); + }; + + var content = document.createElement('div'); + + if(this.element_chart === null) { + this.element_chart = document.createElement('div'); + this.element_chart.id = this.library_name + '-' + this.uuid + '-chart'; + this.element.appendChild(this.element_chart); + + if(this.hasLegend() === true) + this.element_chart.className = 'netdata-chart-with-legend-right netdata-' + this.library_name + '-chart-with-legend-right'; + else + this.element_chart.className = ' netdata-chart netdata-' + this.library_name + '-chart'; + } + + if(this.hasLegend() === true) { + if(this.element_legend === null) { + this.element_legend = document.createElement('div'); + this.element_legend.className = 'netdata-chart-legend netdata-' + this.library_name + '-legend'; + this.element.appendChild(this.element_legend); + } + else + this.element_legend.innerHTML = ''; + + this.element_legend_childs = { + content: content, + resize_handler: null, + toolbox: null, + toolbox_left: null, + toolbox_right: null, + toolbox_reset: null, + toolbox_zoomin: null, + toolbox_zoomout: null, + toolbox_volume: null, + title_date: document.createElement('span'), + title_time: document.createElement('span'), + title_units: document.createElement('span'), + perfect_scroller: document.createElement('div'), + series: {} + }; + + if(NETDATA.options.current.legend_toolbox === true && this.library.toolboxPanAndZoom !== null) { + this.element_legend_childs.toolbox = document.createElement('div'); + this.element_legend_childs.toolbox_left = document.createElement('div'); + this.element_legend_childs.toolbox_right = document.createElement('div'); + this.element_legend_childs.toolbox_reset = document.createElement('div'); + this.element_legend_childs.toolbox_zoomin = document.createElement('div'); + this.element_legend_childs.toolbox_zoomout = document.createElement('div'); + this.element_legend_childs.toolbox_volume = document.createElement('div'); + + var get_pan_and_zoom_step = function(event) { + if (event.ctrlKey) + return NETDATA.options.current.pan_and_zoom_factor * NETDATA.options.current.pan_and_zoom_factor_multiplier_control; + + else if (event.shiftKey) + return NETDATA.options.current.pan_and_zoom_factor * NETDATA.options.current.pan_and_zoom_factor_multiplier_shift; + + else if (event.altKey) + return NETDATA.options.current.pan_and_zoom_factor * NETDATA.options.current.pan_and_zoom_factor_multiplier_alt; + + else + return NETDATA.options.current.pan_and_zoom_factor; + }; + + this.element_legend_childs.toolbox.className += ' netdata-legend-toolbox'; + this.element.appendChild(this.element_legend_childs.toolbox); + + this.element_legend_childs.toolbox_left.className += ' netdata-legend-toolbox-button'; + this.element_legend_childs.toolbox_left.innerHTML = NETDATA.icons.left; + this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_left); + this.element_legend_childs.toolbox_left.onclick = function(e) { + e.preventDefault(); + + var step = (that.view_before - that.view_after) * get_pan_and_zoom_step(e); + var before = that.view_before - step; + var after = that.view_after - step; + if(after >= that.netdata_first) + that.library.toolboxPanAndZoom(that, after, before); + }; + if(NETDATA.options.current.show_help === true) + $(this.element_legend_childs.toolbox_left).popover({ + container: "body", + animation: false, + html: true, + trigger: 'hover', + placement: 'bottom', + delay: { show: NETDATA.options.current.show_help_delay_show_ms, hide: NETDATA.options.current.show_help_delay_hide_ms }, + title: 'Pan Left', + content: 'Pan the chart to the left. You can also drag it with your mouse or your finger (on touch devices).
Help can be disabled from the settings.' + }); + + + this.element_legend_childs.toolbox_reset.className += ' netdata-legend-toolbox-button'; + this.element_legend_childs.toolbox_reset.innerHTML = NETDATA.icons.reset; + this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_reset); + this.element_legend_childs.toolbox_reset.onclick = function(e) { + e.preventDefault(); + NETDATA.resetAllCharts(that); + }; + if(NETDATA.options.current.show_help === true) + $(this.element_legend_childs.toolbox_reset).popover({ + container: "body", + animation: false, + html: true, + trigger: 'hover', + placement: 'bottom', + delay: { show: NETDATA.options.current.show_help_delay_show_ms, hide: NETDATA.options.current.show_help_delay_hide_ms }, + title: 'Chart Reset', + content: 'Reset all the charts to their default auto-refreshing state. You can also double click the chart contents with your mouse or your finger (on touch devices).
Help can be disabled from the settings.' + }); + + this.element_legend_childs.toolbox_right.className += ' netdata-legend-toolbox-button'; + this.element_legend_childs.toolbox_right.innerHTML = NETDATA.icons.right; + this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_right); + this.element_legend_childs.toolbox_right.onclick = function(e) { + e.preventDefault(); + var step = (that.view_before - that.view_after) * get_pan_and_zoom_step(e); + var before = that.view_before + step; + var after = that.view_after + step; + if(before <= that.netdata_last) + that.library.toolboxPanAndZoom(that, after, before); + }; + if(NETDATA.options.current.show_help === true) + $(this.element_legend_childs.toolbox_right).popover({ + container: "body", + animation: false, + html: true, + trigger: 'hover', + placement: 'bottom', + delay: { show: NETDATA.options.current.show_help_delay_show_ms, hide: NETDATA.options.current.show_help_delay_hide_ms }, + title: 'Pan Right', + content: 'Pan the chart to the right. You can also drag it with your mouse or your finger (on touch devices).
Help, can be disabled from the settings.' + }); + + + this.element_legend_childs.toolbox_zoomin.className += ' netdata-legend-toolbox-button'; + this.element_legend_childs.toolbox_zoomin.innerHTML = NETDATA.icons.zoomIn; + this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_zoomin); + this.element_legend_childs.toolbox_zoomin.onclick = function(e) { + e.preventDefault(); + var dt = ((that.view_before - that.view_after) * (get_pan_and_zoom_step(e) * 0.8) / 2); + var before = that.view_before - dt; + var after = that.view_after + dt; + that.library.toolboxPanAndZoom(that, after, before); + }; + if(NETDATA.options.current.show_help === true) + $(this.element_legend_childs.toolbox_zoomin).popover({ + container: "body", + animation: false, + html: true, + trigger: 'hover', + placement: 'bottom', + delay: { show: NETDATA.options.current.show_help_delay_show_ms, hide: NETDATA.options.current.show_help_delay_hide_ms }, + title: 'Chart Zoom In', + content: 'Zoom in the chart. You can also press SHIFT and select an area of the chart, or press SHIFT or ALT and use the mouse wheel or 2-finger touchpad scroll to zoom in or out.
Help, can be disabled from the settings.' + }); + + this.element_legend_childs.toolbox_zoomout.className += ' netdata-legend-toolbox-button'; + this.element_legend_childs.toolbox_zoomout.innerHTML = NETDATA.icons.zoomOut; + this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_zoomout); + this.element_legend_childs.toolbox_zoomout.onclick = function(e) { + e.preventDefault(); + var dt = (((that.view_before - that.view_after) / (1.0 - (get_pan_and_zoom_step(e) * 0.8)) - (that.view_before - that.view_after)) / 2); + var before = that.view_before + dt; + var after = that.view_after - dt; + + that.library.toolboxPanAndZoom(that, after, before); + }; + if(NETDATA.options.current.show_help === true) + $(this.element_legend_childs.toolbox_zoomout).popover({ + container: "body", + animation: false, + html: true, + trigger: 'hover', + placement: 'bottom', + delay: { show: NETDATA.options.current.show_help_delay_show_ms, hide: NETDATA.options.current.show_help_delay_hide_ms }, + title: 'Chart Zoom Out', + content: 'Zoom out the chart. You can also press SHIFT or ALT and use the mouse wheel, or 2-finger touchpad scroll to zoom in or out.
Help, can be disabled from the settings.' + }); + + //this.element_legend_childs.toolbox_volume.className += ' netdata-legend-toolbox-button'; + //this.element_legend_childs.toolbox_volume.innerHTML = ''; + //this.element_legend_childs.toolbox_volume.title = 'Visible Volume'; + //this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_volume); + //this.element_legend_childs.toolbox_volume.onclick = function(e) { + //e.preventDefault(); + //alert('clicked toolbox_volume on ' + that.id); + //} + } + + if(NETDATA.options.current.resize_charts === true) { + this.element_legend_childs.resize_handler = document.createElement('div'); + + this.element_legend_childs.resize_handler.className += " netdata-legend-resize-handler"; + this.element_legend_childs.resize_handler.innerHTML = NETDATA.icons.resize; + this.element.appendChild(this.element_legend_childs.resize_handler); + if (NETDATA.options.current.show_help === true) + $(this.element_legend_childs.resize_handler).popover({ + container: "body", + animation: false, + html: true, + trigger: 'hover', + placement: 'bottom', + delay: { + show: NETDATA.options.current.show_help_delay_show_ms, + hide: NETDATA.options.current.show_help_delay_hide_ms + }, + title: 'Chart Resize', + content: 'Drag this point with your mouse or your finger (on touch devices), to resize the chart vertically. You can also double click it or double tap it to reset between 2 states: the default and the one that fits all the values.
Help, can be disabled from the settings.' + }); + + // mousedown event + this.element_legend_childs.resize_handler.onmousedown = + function (e) { + that.resizeHandler(e); + }; + + // touchstart event + this.element_legend_childs.resize_handler.addEventListener('touchstart', function (e) { + that.resizeHandler(e); + }, false); + } + + if(this.chart) { + this.element_legend_childs.title_date.title = this.legendPluginModuleString(true); + this.element_legend_childs.title_time.title = this.legendResolutionTooltip(); + } + + this.element_legend_childs.title_date.className += " netdata-legend-title-date"; + this.element_legend.appendChild(this.element_legend_childs.title_date); + this.tmp.__last_shown_legend_date = undefined; + + this.element_legend.appendChild(document.createElement('br')); + + this.element_legend_childs.title_time.className += " netdata-legend-title-time"; + this.element_legend.appendChild(this.element_legend_childs.title_time); + this.tmp.__last_shown_legend_time = undefined; + + this.element_legend.appendChild(document.createElement('br')); + + this.element_legend_childs.title_units.className += " netdata-legend-title-units"; + this.element_legend_childs.title_units.innerText = this.units_current; + this.element_legend.appendChild(this.element_legend_childs.title_units); + this.tmp.__last_shown_legend_units = undefined; + + this.element_legend.appendChild(document.createElement('br')); + + this.element_legend_childs.perfect_scroller.className = 'netdata-legend-series'; + this.element_legend.appendChild(this.element_legend_childs.perfect_scroller); + + content.className = 'netdata-legend-series-content'; + this.element_legend_childs.perfect_scroller.appendChild(content); + + this.element_legend_childs.content = content; + + if(NETDATA.options.current.show_help === true) + $(content).popover({ + container: "body", + animation: false, + html: true, + trigger: 'hover', + placement: 'bottom', + title: 'Chart Legend', + delay: { show: NETDATA.options.current.show_help_delay_show_ms, hide: NETDATA.options.current.show_help_delay_hide_ms }, + content: 'You can click or tap on the values or the labels to select dimensions. By pressing SHIFT or CONTROL, you can enable or disable multiple dimensions.
Help, can be disabled from the settings.' + }); + } + else { + this.element_legend_childs = { + content: content, + resize_handler: null, + toolbox: null, + toolbox_left: null, + toolbox_right: null, + toolbox_reset: null, + toolbox_zoomin: null, + toolbox_zoomout: null, + toolbox_volume: null, + title_date: null, + title_time: null, + title_units: null, + perfect_scroller: null, + series: {} + }; + } + + if(this.data) { + this.element_legend_childs.series.labels_key = this.data.dimension_names.toString(); + if(this.debug === true) + this.log('labels from data: "' + this.element_legend_childs.series.labels_key + '"'); + + for(i = 0, len = this.data.dimension_names.length; i < len ;i++) { + genLabel(this, content, this.data.dimension_ids[i], this.data.dimension_names[i], i); + } + } + else { + var tmp = []; + keys = Object.keys(this.chart.dimensions); + for(i = 0, len = keys.length; i < len ;i++) { + dim = keys[i]; + tmp.push(this.chart.dimensions[dim].name); + genLabel(this, content, dim, this.chart.dimensions[dim].name, i); + } + this.element_legend_childs.series.labels_key = tmp.toString(); + if(this.debug === true) + this.log('labels from chart: "' + this.element_legend_childs.series.labels_key + '"'); + } + + // create a hidden div to be used for hidding + // the original legend of the chart library + var el = document.createElement('div'); + if(this.element_legend !== null) + this.element_legend.appendChild(el); + el.style.display = 'none'; + + this.element_legend_childs.hidden = document.createElement('div'); + el.appendChild(this.element_legend_childs.hidden); + + if(this.element_legend_childs.perfect_scroller !== null) { + Ps.initialize(this.element_legend_childs.perfect_scroller, { + wheelSpeed: 0.2, + wheelPropagation: true, + swipePropagation: true, + minScrollbarLength: null, + maxScrollbarLength: null, + useBothWheelAxes: false, + suppressScrollX: true, + suppressScrollY: false, + scrollXMarginOffset: 0, + scrollYMarginOffset: 0, + theme: 'default' + }); + Ps.update(this.element_legend_childs.perfect_scroller); + } + + this.legendShowLatestValues(); + }; + + this.hasLegend = function() { + if(typeof this.tmp.___hasLegendCache___ !== 'undefined') + return this.tmp.___hasLegendCache___; + + var leg = false; + if(this.library && this.library.legend(this) === 'right-side') + leg = true; + + this.tmp.___hasLegendCache___ = leg; + return leg; + }; + + this.legendWidth = function() { + return (this.hasLegend())?140:0; + }; + + this.legendHeight = function() { + return $(this.element).height(); + }; + + this.chartWidth = function() { + return $(this.element).width() - this.legendWidth(); + }; + + this.chartHeight = function() { + return $(this.element).height(); + }; + + this.chartPixelsPerPoint = function() { + // force an options provided detail + var px = this.pixels_per_point; + + if(this.library && px < this.library.pixels_per_point(this)) + px = this.library.pixels_per_point(this); + + if(px < NETDATA.options.current.pixels_per_point) + px = NETDATA.options.current.pixels_per_point; + + return px; + }; + + this.needsRecreation = function() { + var ret = ( + this.chart_created === true + && this.library + && this.library.autoresize() === false + && this.tm.last_resized < NETDATA.options.last_page_resize + ); + + if(this.debug === true) + this.log('needsRecreation(): ' + ret.toString() + ', chart_created = ' + this.chart_created.toString()); + + return ret; + }; + + this.chartDataUniqueID = function() { + return this.id + ',' + this.library_name + ',' + this.dimensions + ',' + this.chartURLOptions(); + }; + + this.chartURLOptions = function() { + var ret = ''; + + if(this.override_options !== null) + ret = this.override_options.toString(); + else + ret = this.library.options(this); + + if(this.append_options !== null) + ret += '%7C' + this.append_options.toString(); + + ret += '%7C' + 'jsonwrap'; + + if(NETDATA.options.current.eliminate_zero_dimensions === true) + ret += '%7C' + 'nonzero'; + + return ret; + }; + + this.chartURL = function() { + var after, before, points_multiplier = 1; + if(NETDATA.globalPanAndZoom.isActive()) { + if(this.current.force_before_ms !== null && this.current.force_after_ms !== null) { + this.tm.pan_and_zoom_seq = 0; + + before = Math.round(this.current.force_before_ms / 1000); + after = Math.round(this.current.force_after_ms / 1000); + this.view_after = after * 1000; + this.view_before = before * 1000; + + if(NETDATA.options.current.pan_and_zoom_data_padding === true) { + this.requested_padding = Math.round((before - after) / 2); + after -= this.requested_padding; + before += this.requested_padding; + this.requested_padding *= 1000; + points_multiplier = 2; + } + + this.current.force_before_ms = null; + this.current.force_after_ms = null; + } + else { + this.tm.pan_and_zoom_seq = NETDATA.globalPanAndZoom.seq; + + after = Math.round(NETDATA.globalPanAndZoom.force_after_ms / 1000); + before = Math.round(NETDATA.globalPanAndZoom.force_before_ms / 1000); + this.view_after = after * 1000; + this.view_before = before * 1000; + + this.requested_padding = null; + points_multiplier = 1; + } + } + else { + this.tm.pan_and_zoom_seq = 0; + + before = this.before; + after = this.after; + this.view_after = after * 1000; + this.view_before = before * 1000; + + this.requested_padding = null; + points_multiplier = 1; + } + + this.requested_after = after * 1000; + this.requested_before = before * 1000; + + var data_points; + if(NETDATA.options.force_data_points !== 0) { + data_points = NETDATA.options.force_data_points; + this.data_points = data_points; + } + else { + this.data_points = this.points || Math.round(this.chartWidth() / this.chartPixelsPerPoint()); + data_points = this.data_points * points_multiplier; + } + + // build the data URL + this.data_url = this.host + this.chart.data_url; + this.data_url += "&format=" + this.library.format(); + this.data_url += "&points=" + (data_points).toString(); + this.data_url += "&group=" + this.method; + this.data_url += ">ime=" + this.gtime; + this.data_url += "&options=" + this.chartURLOptions(); + + if(after) + this.data_url += "&after=" + after.toString(); + + if(before) + this.data_url += "&before=" + before.toString(); + + if(this.dimensions) + this.data_url += "&dimensions=" + this.dimensions; + + if(NETDATA.options.debug.chart_data_url === true || this.debug === true) + this.log('chartURL(): ' + this.data_url + ' WxH:' + this.chartWidth() + 'x' + this.chartHeight() + ' points: ' + data_points.toString() + ' library: ' + this.library_name); + }; + + this.redrawChart = function() { + if(this.data !== null) + this.updateChartWithData(this.data); + }; + + this.updateChartWithData = function(data) { + if(this.debug === true) + this.log('updateChartWithData() called.'); + + // this may force the chart to be re-created + resizeChart(); + + this.data = data; + + var started = Date.now(); + var view_update_every = data.view_update_every * 1000; + + + if(this.data_update_every !== view_update_every) { + if(this.element_legend_childs.title_time) + this.element_legend_childs.title_time.title = this.legendResolutionTooltip(); + } + + // if the result is JSON, find the latest update-every + this.data_update_every = view_update_every; + this.data_after = data.after * 1000; + this.data_before = data.before * 1000; + this.netdata_first = data.first_entry * 1000; + this.netdata_last = data.last_entry * 1000; + this.data_points = data.points; + + data.state = this; + + if(NETDATA.options.current.pan_and_zoom_data_padding === true && this.requested_padding !== null) { + if(this.view_after < this.data_after) { + // console.log('adjusting view_after from ' + this.view_after + ' to ' + this.data_after); + this.view_after = this.data_after; + } + + if(this.view_before > this.data_before) { + // console.log('adjusting view_before from ' + this.view_before + ' to ' + this.data_before); + this.view_before = this.data_before; + } + } + else { + this.view_after = this.data_after; + this.view_before = this.data_before; + } + + if(this.debug === true) { + this.log('UPDATE No ' + this.updates_counter + ' COMPLETED'); + + if(this.current.force_after_ms) + this.log('STATUS: forced : ' + (this.current.force_after_ms / 1000).toString() + ' - ' + (this.current.force_before_ms / 1000).toString()); + else + this.log('STATUS: forced : unset'); + + this.log('STATUS: requested : ' + (this.requested_after / 1000).toString() + ' - ' + (this.requested_before / 1000).toString()); + this.log('STATUS: downloaded: ' + (this.data_after / 1000).toString() + ' - ' + (this.data_before / 1000).toString()); + this.log('STATUS: rendered : ' + (this.view_after / 1000).toString() + ' - ' + (this.view_before / 1000).toString()); + this.log('STATUS: points : ' + (this.data_points).toString()); + } + + if(this.data_points === 0) { + noDataToShow(); + return; + } + + if(this.updates_since_last_creation >= this.library.max_updates_to_recreate()) { + if(this.debug === true) + this.log('max updates of ' + this.updates_since_last_creation.toString() + ' reached. Forcing re-generation.'); + + init('force'); + return; + } + + // check and update the legend + this.legendUpdateDOM(); + + if(this.chart_created === true + && typeof this.library.update === 'function') { + + if(this.debug === true) + this.log('updating chart...'); + + if(callChartLibraryUpdateSafely(data) === false) + return; + } + else { + if(this.debug === true) + this.log('creating chart...'); + + if(callChartLibraryCreateSafely(data) === false) + return; + } + if(this.isVisible() === true) { + hideMessage(); + this.legendShowLatestValues(); + } + else { + this.__redraw_on_unhide = true; + + if(this.debug === true) + this.log("drawn while not visible"); + } + + if(this.selected === true) + NETDATA.globalSelectionSync.stop(); + + // update the performance counters + var now = Date.now(); + this.tm.last_updated = now; + + // don't update last_autorefreshed if this chart is + // forced to be updated with global PanAndZoom + if(NETDATA.globalPanAndZoom.isActive()) + this.tm.last_autorefreshed = 0; + else { + if(NETDATA.options.current.parallel_refresher === true && NETDATA.options.current.concurrent_refreshes === true && typeof this.force_update_every !== 'number') + this.tm.last_autorefreshed = now - (now % this.data_update_every); + else + this.tm.last_autorefreshed = now; + } + + this.refresh_dt_ms = now - started; + NETDATA.options.auto_refresher_fast_weight += this.refresh_dt_ms; + + if(this.refresh_dt_element !== null) + this.refresh_dt_element.innerText = this.refresh_dt_ms.toString(); + + if(this.foreign_element_before !== null) + this.foreign_element_before.innerText = NETDATA.dateTime.localeDateString(this.view_before) + ' ' + NETDATA.dateTime.localeTimeString(this.view_before); + + if(this.foreign_element_after !== null) + this.foreign_element_after.innerText = NETDATA.dateTime.localeDateString(this.view_after) + ' ' + NETDATA.dateTime.localeTimeString(this.view_after); + + if(this.foreign_element_duration !== null) + this.foreign_element_duration.innerText = NETDATA.seconds4human(Math.floor((this.view_before - this.view_after) / 1000) + 1); + + if(this.foreign_element_update_every !== null) + this.foreign_element_update_every.innerText = NETDATA.seconds4human(Math.floor(this.data_update_every / 1000)); + }; + + this.getSnapshotData = function(key) { + if(this.debug === true) + this.log('updating from snapshot: ' + key); + + if(typeof netdataSnapshotData.data[key] === 'undefined') { + this.log('snapshot does not include data for key "' + key + '"'); + return null; + } + + if(typeof netdataSnapshotData.data[key] !== 'string') { + this.log('snapshot data for key "' + key + '" is not string'); + return null; + } + + var uncompressed; + try { + uncompressed = netdataSnapshotData.uncompress(netdataSnapshotData.data[key]); + + if(uncompressed === null) { + this.log('uncompressed snapshot data for key ' + key + ' is null'); + return null; + } + + if(typeof uncompressed === 'undefined') { + this.log('uncompressed snapshot data for key ' + key + ' is undefined'); + return null; + } + } + catch(e) { + this.log('decompression of snapshot data for key ' + key + ' failed'); + console.log(e); + uncompressed = null; + } + + if(typeof uncompressed !== 'string') { + this.log('uncompressed snapshot data for key ' + key + ' is not string'); + return null; + } + + var data; + try { + data = JSON.parse(uncompressed); + } + catch(e) { + this.log('parsing snapshot data for key ' + key + ' failed'); + console.log(e); + data = null; + } + + return data; + }; + + this.updateChart = function(callback) { + if (this.debug === true) + this.log('updateChart()'); + + if (this.fetching_data === true) { + if (this.debug === true) + this.log('updateChart(): I am already updating...'); + + if (typeof callback === 'function') + return callback(false, 'already running'); + + return; + } + + // due to late initialization of charts and libraries + // we need to check this too + if (this.enabled === false) { + if (this.debug === true) + this.log('updateChart(): I am not enabled'); + + if (typeof callback === 'function') + return callback(false, 'not enabled'); + + return; + } + + if (canBeRendered() === false) { + if (this.debug === true) + this.log('updateChart(): cannot be rendered'); + + if (typeof callback === 'function') + return callback(false, 'cannot be rendered'); + + return; + } + + if (that.dom_created !== true) { + if (this.debug === true) + this.log('updateChart(): creating DOM'); + + createDOM(); + } + + if (this.chart === null) { + if (this.debug === true) + this.log('updateChart(): getting chart'); + + return this.getChart(function () { + return that.updateChart(callback); + }); + } + + if(this.library.initialized === false) { + if(this.library.enabled === true) { + if(this.debug === true) + this.log('updateChart(): initializing chart library'); + + return this.library.initialize(function () { + return that.updateChart(callback); + }); + } + else { + error('chart library "' + this.library_name + '" is not available.'); + + if(typeof callback === 'function') + return callback(false, 'library not available'); + + return; + } + } + + this.clearSelection(); + this.chartURL(); + + NETDATA.statistics.refreshes_total++; + NETDATA.statistics.refreshes_active++; + + if(NETDATA.statistics.refreshes_active > NETDATA.statistics.refreshes_active_max) + NETDATA.statistics.refreshes_active_max = NETDATA.statistics.refreshes_active; + + var ok = false; + this.fetching_data = true; + + if(netdataSnapshotData !== null) { + var key = this.chartDataUniqueID(); + var data = this.getSnapshotData(key); + if (data !== null) { + ok = true; + data = NETDATA.xss.checkData('/api/v1/data', data, this.library.xssRegexIgnore); + this.updateChartWithData(data); + } + else { + ok = false; + error('cannot get data from snapshot for key: "' + key + '"'); + that.tm.last_autorefreshed = Date.now(); + } + + NETDATA.statistics.refreshes_active--; + this.fetching_data = false; + + if(typeof callback === 'function') + callback(ok, 'snapshot'); + + return; + } + + if(this.debug === true) + this.log('updating from ' + this.data_url); + + this.xhr = $.ajax( { + url: this.data_url, + cache: false, + async: true, + headers: { + 'Cache-Control': 'no-cache, no-store', + 'Pragma': 'no-cache' + }, + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function(data) { + data = NETDATA.xss.checkData('/api/v1/data', data, that.library.xssRegexIgnore); + + that.xhr = undefined; + that.retries_on_data_failures = 0; + ok = true; + + if(that.debug === true) + that.log('data received. updating chart.'); + + that.updateChartWithData(data); + }) + .fail(function(msg) { + that.xhr = undefined; + + if(msg.statusText !== 'abort') { + that.retries_on_data_failures++; + if(that.retries_on_data_failures > NETDATA.options.current.retries_on_data_failures) { + // that.log('failed ' + that.retries_on_data_failures.toString() + ' times - giving up'); + that.retries_on_data_failures = 0; + error('data download failed for url: ' + that.data_url); + } + else { + that.tm.last_autorefreshed = Date.now(); + // that.log('failed ' + that.retries_on_data_failures.toString() + ' times, but I will retry'); + } + } + }) + .always(function() { + that.xhr = undefined; + + NETDATA.statistics.refreshes_active--; + that.fetching_data = false; + + if(typeof callback === 'function') + return callback(ok, 'download'); + }); + }; + + var __isVisible = function() { + var ret = true; + + if(NETDATA.options.current.update_only_visible !== false) { + // tolerance is the number of pixels a chart can be off-screen + // to consider it as visible and refresh it as if was visible + var tolerance = 0; + + that.tm.last_visible_check = Date.now(); + + var rect = that.element.getBoundingClientRect(); + + var screenTop = window.scrollY; + var screenBottom = screenTop + window.innerHeight; + + var chartTop = rect.top + screenTop; + var chartBottom = chartTop + rect.height; + + ret = !(rect.width === 0 || rect.height === 0 || chartBottom + tolerance < screenTop || chartTop - tolerance > screenBottom); + } + + if(that.debug === true) + that.log('__isVisible(): ' + ret); + + return ret; + }; + + this.isVisible = function(nocache) { + // this.log('last_visible_check: ' + this.tm.last_visible_check + ', last_page_scroll: ' + NETDATA.options.last_page_scroll); + + // caching - we do not evaluate the charts visibility + // if the page has not been scrolled since the last check + if((typeof nocache !== 'undefined' && nocache === true) + || typeof this.tmp.___isVisible___ === 'undefined' + || this.tm.last_visible_check <= NETDATA.options.last_page_scroll) { + this.tmp.___isVisible___ = __isVisible(); + if (this.tmp.___isVisible___ === true) this.unhideChart(); + else this.hideChart(); + } + + if(this.debug === true) + this.log('isVisible(' + nocache + '): ' + this.tmp.___isVisible___); + + return this.tmp.___isVisible___; + }; + + this.isAutoRefreshable = function() { + return (this.current.autorefresh); + }; + + this.canBeAutoRefreshed = function() { + if(this.enabled === false) { + if(this.debug === true) + this.log('canBeAutoRefreshed() -> not enabled'); + + return false; + } + + if(this.running === true) { + if(this.debug === true) + this.log('canBeAutoRefreshed() -> already running'); + + return false; + } + + if(this.library === null || this.library.enabled === false) { + error('charting library "' + this.library_name + '" is not available'); + if(this.debug === true) + this.log('canBeAutoRefreshed() -> chart library ' + this.library_name + ' is not available'); + + return false; + } + + if(this.isVisible() === false) { + if(NETDATA.options.debug.visibility === true || this.debug === true) + this.log('canBeAutoRefreshed() -> not visible'); + + return false; + } + + var now = Date.now(); + + if(this.current.force_update_at !== 0 && this.current.force_update_at < now) { + if(this.debug === true) + this.log('canBeAutoRefreshed() -> timed force update - allowing this update'); + + this.current.force_update_at = 0; + return true; + } + + if(this.isAutoRefreshable() === false) { + if(this.debug === true) + this.log('canBeAutoRefreshed() -> not auto-refreshable'); + + return false; + } + + // allow the first update, even if the page is not visible + if(NETDATA.options.page_is_visible === false && this.updates_counter && this.updates_since_last_unhide) { + if(NETDATA.options.debug.focus === true || this.debug === true) + this.log('canBeAutoRefreshed() -> not the first update, and page does not have focus'); + + return false; + } + + if(this.needsRecreation() === true) { + if(this.debug === true) + this.log('canBeAutoRefreshed() -> needs re-creation.'); + + return true; + } + + if(NETDATA.options.auto_refresher_stop_until >= now) { + if(this.debug === true) + this.log('canBeAutoRefreshed() -> stopped until is in future.'); + + return false; + } + + // options valid only for autoRefresh() + if(NETDATA.globalPanAndZoom.isActive()) { + if(NETDATA.globalPanAndZoom.shouldBeAutoRefreshed(this)) { + if(this.debug === true) + this.log('canBeAutoRefreshed(): global panning: I need an update.'); + + return true; + } + else { + if(this.debug === true) + this.log('canBeAutoRefreshed(): global panning: I am already up to date.'); + + return false; + } + } + + if(this.selected === true) { + if(this.debug === true) + this.log('canBeAutoRefreshed(): I have a selection in place.'); + + return false; + } + + if(this.paused === true) { + if(this.debug === true) + this.log('canBeAutoRefreshed(): I am paused.'); + + return false; + } + + var data_update_every = this.data_update_every; + if(typeof this.force_update_every === 'number') + data_update_every = this.force_update_every; + + if(now - this.tm.last_autorefreshed >= data_update_every) { + if(this.debug === true) + this.log('canBeAutoRefreshed(): It is time to update me. Now: ' + now.toString() + ', last_autorefreshed: ' + this.tm.last_autorefreshed + ', data_update_every: ' + data_update_every + ', delta: ' + (now - this.tm.last_autorefreshed).toString()); + + return true; + } + + return false; + }; + + this.autoRefresh = function(callback) { + var state = that; + + if(state.canBeAutoRefreshed() === true && state.running === false) { + + state.running = true; + state.updateChart(function() { + state.running = false; + + if(typeof callback === 'function') + return callback(); + }); + } + else { + if(typeof callback === 'function') + return callback(); + } + }; + + this.__defaultsFromDownloadedChart = function(chart) { + this.chart = chart; + this.chart_url = chart.url; + this.data_update_every = chart.update_every * 1000; + this.data_points = Math.round(this.chartWidth() / this.chartPixelsPerPoint()); + this.tm.last_info_downloaded = Date.now(); + + if(this.title === null) + this.title = chart.title; + + if(this.units === null) { + this.units = chart.units; + this.units_current = this.units; + } + }; + + // fetch the chart description from the netdata server + this.getChart = function(callback) { + this.chart = NETDATA.chartRegistry.get(this.host, this.id); + if(this.chart) { + this.__defaultsFromDownloadedChart(this.chart); + + if(typeof callback === 'function') + return callback(); + } + else if(netdataSnapshotData !== null) { + // console.log(this); + // console.log(NETDATA.chartRegistry); + NETDATA.error(404, 'host: ' + this.host + ', chart: ' + this.id); + error('chart not found in snapshot'); + + if(typeof callback === 'function') + return callback(); + } + else { + this.chart_url = "/api/v1/chart?chart=" + this.id; + + if(this.debug === true) + this.log('downloading ' + this.chart_url); + + $.ajax( { + url: this.host + this.chart_url, + cache: false, + async: true, + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function(chart) { + chart = NETDATA.xss.checkOptional('/api/v1/chart', chart); + + chart.url = that.chart_url; + that.__defaultsFromDownloadedChart(chart); + NETDATA.chartRegistry.add(that.host, that.id, chart); + }) + .fail(function() { + NETDATA.error(404, that.chart_url); + error('chart not found on url "' + that.chart_url + '"'); + }) + .always(function() { + if(typeof callback === 'function') + return callback(); + }); + } + }; + + // ============================================================================================================ + // INITIALIZATION + + initDOM(); + init('fast'); + }; + + NETDATA.resetAllCharts = function(state) { + // first clear the global selection sync + // to make sure no chart is in selected state + NETDATA.globalSelectionSync.stop(); + + // there are 2 possibilities here + // a. state is the global Pan and Zoom master + // b. state is not the global Pan and Zoom master + var master = true; + if(NETDATA.globalPanAndZoom.isMaster(state) === false) + master = false; + + // clear the global Pan and Zoom + // this will also refresh the master + // and unblock any charts currently mirroring the master + NETDATA.globalPanAndZoom.clearMaster(); + + // if we were not the master, reset our status too + // this is required because most probably the mouse + // is over this chart, blocking it from auto-refreshing + if(master === false && (state.paused === true || state.selected === true)) + state.resetChart(); + }; + + // get or create a chart state, given a DOM element + NETDATA.chartState = function(element) { + var self = $(element); + + var state = self.data('netdata-state-object') || null; + if(state === null) { + state = new chartState(element); + self.data('netdata-state-object', state); + } + return state; + }; + + // ---------------------------------------------------------------------------------------------------------------- + // Library functions + + // Load a script without jquery + // This is used to load jquery - after it is loaded, we use jquery + NETDATA._loadjQuery = function(callback) { + if(typeof jQuery === 'undefined') { + if(NETDATA.options.debug.main_loop === true) + console.log('loading ' + NETDATA.jQuery); + + var script = document.createElement('script'); + script.type = 'text/javascript'; + script.async = true; + script.src = NETDATA.jQuery; + + // script.onabort = onError; + script.onerror = function() { NETDATA.error(101, NETDATA.jQuery); }; + if(typeof callback === "function") { + script.onload = function () { + $ = jQuery; + return callback(); + }; + } + + var s = document.getElementsByTagName('script')[0]; + s.parentNode.insertBefore(script, s); + } + else if(typeof callback === "function") { + $ = jQuery; + return callback(); + } + }; + + NETDATA._loadCSS = function(filename) { + // don't use jQuery here + // styles are loaded before jQuery + // to eliminate showing an unstyled page to the user + + var fileref = document.createElement("link"); + fileref.setAttribute("rel", "stylesheet"); + fileref.setAttribute("type", "text/css"); + fileref.setAttribute("href", filename); + + if (typeof fileref !== 'undefined') + document.getElementsByTagName("head")[0].appendChild(fileref); + }; + + NETDATA.colorHex2Rgb = function(hex) { + // Expand shorthand form (e.g. "03F") to full form (e.g. "0033FF") + var shorthandRegex = /^#?([a-f\d])([a-f\d])([a-f\d])$/i; + hex = hex.replace(shorthandRegex, function(m, r, g, b) { + return r + r + g + g + b + b; + }); + + var result = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(hex); + return result ? { + r: parseInt(result[1], 16), + g: parseInt(result[2], 16), + b: parseInt(result[3], 16) + } : null; + }; + + NETDATA.colorLuminance = function(hex, lum) { + // validate hex string + hex = String(hex).replace(/[^0-9a-f]/gi, ''); + if (hex.length < 6) + hex = hex[0]+hex[0]+hex[1]+hex[1]+hex[2]+hex[2]; + + lum = lum || 0; + + // convert to decimal and change luminosity + var rgb = "#", c, i; + for (i = 0; i < 3; i++) { + c = parseInt(hex.substr(i*2,2), 16); + c = Math.round(Math.min(Math.max(0, c + (c * lum)), 255)).toString(16); + rgb += ("00"+c).substr(c.length); + } + + return rgb; + }; + + NETDATA.guid = function() { + function s4() { + return Math.floor((1 + Math.random()) * 0x10000) + .toString(16) + .substring(1); + } + + return s4() + s4() + '-' + s4() + '-' + s4() + '-' + s4() + '-' + s4() + s4() + s4(); + }; + + NETDATA.zeropad = function(x) { + if(x > -10 && x < 10) return '0' + x.toString(); + else return x.toString(); + }; + + // user function to signal us the DOM has been + // updated. + NETDATA.updatedDom = function() { + NETDATA.options.updated_dom = true; + }; + + NETDATA.ready = function(callback) { + NETDATA.options.pauseCallback = callback; + }; + + NETDATA.pause = function(callback) { + if(typeof callback === 'function') { + if (NETDATA.options.pause === true) + return callback(); + else + NETDATA.options.pauseCallback = callback; + } + }; + + NETDATA.unpause = function() { + NETDATA.options.pauseCallback = null; + NETDATA.options.updated_dom = true; + NETDATA.options.pause = false; + }; + + NETDATA.seconds4human = function (seconds, options) { + var default_options = { + now: 'now', + space: ' ', + negative_suffix: 'ago', + day: 'day', + days: 'days', + hour: 'hour', + hours: 'hours', + minute: 'min', + minutes: 'mins', + second: 'sec', + seconds: 'secs', + and: 'and' + }; + + if(typeof options !== 'object') + options = default_options; + else { + var x; + for(x in default_options) { + if(typeof options[x] !== 'string') + options[x] = default_options[x]; + } + } + + if(typeof seconds === 'string') + seconds = parseInt(seconds, 10); + + if(seconds === 0) + return options.now; + + var suffix = ''; + if(seconds < 0) { + seconds = -seconds; + if(options.negative_suffix !== '') suffix = options.space + options.negative_suffix; + } + + var days = Math.floor(seconds / 86400); + seconds -= (days * 86400); + + var hours = Math.floor(seconds / 3600); + seconds -= (hours * 3600); + + var minutes = Math.floor(seconds / 60); + seconds -= (minutes * 60); + + var strings = []; + + if(days > 1) strings.push(days.toString() + options.space + options.days); + else if(days === 1) strings.push(days.toString() + options.space + options.day); + + if(hours > 1) strings.push(hours.toString() + options.space + options.hours); + else if(hours === 1) strings.push(hours.toString() + options.space + options.hour); + + if(minutes > 1) strings.push(minutes.toString() + options.space + options.minutes); + else if(minutes === 1) strings.push(minutes.toString() + options.space + options.minute); + + if(seconds > 1) strings.push(Math.floor(seconds).toString() + options.space + options.seconds); + else if(seconds === 1) strings.push(Math.floor(seconds).toString() + options.space + options.second); + + if(strings.length === 1) + return strings.pop() + suffix; + + var last = strings.pop(); + return strings.join(", ") + " " + options.and + " " + last + suffix; + }; + + // ---------------------------------------------------------------------------------------------------------------- + + // this is purely sequential charts refresher + // it is meant to be autonomous + NETDATA.chartRefresherNoParallel = function(index, callback) { + var targets = NETDATA.intersectionObserver.targets(); + + if(NETDATA.options.debug.main_loop === true) + console.log('NETDATA.chartRefresherNoParallel(' + index + ')'); + + if(NETDATA.options.updated_dom === true) { + // the dom has been updated + // get the dom parts again + NETDATA.parseDom(callback); + return; + } + if(index >= targets.length) { + if(NETDATA.options.debug.main_loop === true) + console.log('waiting to restart main loop...'); + + NETDATA.options.auto_refresher_fast_weight = 0; + callback(); + } + else { + var state = targets[index]; + + if(NETDATA.options.auto_refresher_fast_weight < NETDATA.options.current.fast_render_timeframe) { + if(NETDATA.options.debug.main_loop === true) + console.log('fast rendering...'); + + if(state.isVisible() === true) + NETDATA.timeout.set(function() { + state.autoRefresh(function () { + NETDATA.chartRefresherNoParallel(++index, callback); + }); + }, 0); + else + NETDATA.chartRefresherNoParallel(++index, callback); + } + else { + if(NETDATA.options.debug.main_loop === true) console.log('waiting for next refresh...'); + NETDATA.options.auto_refresher_fast_weight = 0; + + NETDATA.timeout.set(function() { + state.autoRefresh(function() { + NETDATA.chartRefresherNoParallel(++index, callback); + }); + }, NETDATA.options.current.idle_between_charts); + } + } + }; + + NETDATA.chartRefresherWaitTime = function() { + return NETDATA.options.current.idle_parallel_loops; + }; + + // the default refresher + NETDATA.chartRefresherLastRun = 0; + NETDATA.chartRefresherRunsAfterParseDom = 0; + NETDATA.chartRefresherTimeoutId = undefined; + + NETDATA.chartRefresherReschedule = function() { + if(NETDATA.options.current.async_on_scroll === true) { + if(NETDATA.chartRefresherTimeoutId) + NETDATA.timeout.clear(NETDATA.chartRefresherTimeoutId); + NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set(NETDATA.chartRefresher, NETDATA.options.current.onscroll_worker_duration_threshold); + //console.log('chartRefresherReschedule()'); + } + }; + + NETDATA.chartRefresher = function() { + // console.log('chartRefresher() begin ' + (Date.now() - NETDATA.chartRefresherLastRun).toString() + ' ms since last run'); + + if(NETDATA.options.page_is_visible === false + && NETDATA.options.current.stop_updates_when_focus_is_lost === true + && NETDATA.chartRefresherLastRun > NETDATA.options.last_page_resize + && NETDATA.chartRefresherLastRun > NETDATA.options.last_page_scroll + && NETDATA.chartRefresherRunsAfterParseDom > 10 + ) { + setTimeout( + NETDATA.chartRefresher, + NETDATA.options.current.idle_lost_focus + ); + + // console.log('chartRefresher() page without focus, will run in ' + NETDATA.options.current.idle_lost_focus.toString() + ' ms, ' + NETDATA.chartRefresherRunsAfterParseDom.toString()); + return; + } + NETDATA.chartRefresherRunsAfterParseDom++; + + var now = Date.now(); + NETDATA.chartRefresherLastRun = now; + + if( now < NETDATA.options.on_scroll_refresher_stop_until ) { + NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set( + NETDATA.chartRefresher, + NETDATA.chartRefresherWaitTime() + ); + + // console.log('chartRefresher() end1 will run in ' + NETDATA.chartRefresherWaitTime().toString() + ' ms'); + return; + } + + if( now < NETDATA.options.auto_refresher_stop_until ) { + NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set( + NETDATA.chartRefresher, + NETDATA.chartRefresherWaitTime() + ); + + // console.log('chartRefresher() end2 will run in ' + NETDATA.chartRefresherWaitTime().toString() + ' ms'); + return; + } + + if(NETDATA.options.pause === true) { + // console.log('auto-refresher is paused'); + NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set( + NETDATA.chartRefresher, + NETDATA.chartRefresherWaitTime() + ); + + // console.log('chartRefresher() end3 will run in ' + NETDATA.chartRefresherWaitTime().toString() + ' ms'); + return; + } + + if(typeof NETDATA.options.pauseCallback === 'function') { + // console.log('auto-refresher is calling pauseCallback'); + + NETDATA.options.pause = true; + NETDATA.options.pauseCallback(); + NETDATA.chartRefresher(); + + // console.log('chartRefresher() end4 (nested)'); + return; + } + + if(NETDATA.options.current.parallel_refresher === false) { + // console.log('auto-refresher is calling chartRefresherNoParallel(0)'); + NETDATA.chartRefresherNoParallel(0, function() { + NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set( + NETDATA.chartRefresher, + NETDATA.options.current.idle_between_loops + ); + }); + // console.log('chartRefresher() end5 (no parallel, nested)'); + return; + } + + if(NETDATA.options.updated_dom === true) { + // the dom has been updated + // get the dom parts again + // console.log('auto-refresher is calling parseDom()'); + NETDATA.parseDom(NETDATA.chartRefresher); + // console.log('chartRefresher() end6 (parseDom)'); + return; + } + + if(NETDATA.globalSelectionSync.active() === false) { + var parallel = []; + var targets = NETDATA.intersectionObserver.targets(); + var len = targets.length; + var state; + while(len--) { + state = targets[len]; + if(state.running === true || state.isVisible() === false) + continue; + + if(state.library.initialized === false) { + if(state.library.enabled === true) { + state.library.initialize(NETDATA.chartRefresher); + //console.log('chartRefresher() end6 (library init)'); + return; + } + else { + state.error('chart library "' + state.library_name + '" is not enabled.'); + } + } + + if(NETDATA.scrollUp === true) + parallel.unshift(state); + else + parallel.push(state); + } + + len = parallel.length; + while (len--) { + state = parallel[len]; + // console.log('auto-refresher executing in parallel for ' + parallel.length.toString() + ' charts'); + // this will execute the jobs in parallel + + if (state.running === false) + NETDATA.timeout.set(state.autoRefresh, 0); + } + //else { + // console.log('auto-refresher nothing to do'); + //} + } + + // run the next refresh iteration + NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set( + NETDATA.chartRefresher, + NETDATA.chartRefresherWaitTime() + ); + + //console.log('chartRefresher() completed in ' + (Date.now() - now).toString() + ' ms'); + }; + + NETDATA.parseDom = function(callback) { + //console.log('parseDom()'); + + NETDATA.options.last_page_scroll = Date.now(); + NETDATA.options.updated_dom = false; + NETDATA.chartRefresherRunsAfterParseDom = 0; + + var targets = $('div[data-netdata]'); //.filter(':visible'); + + if(NETDATA.options.debug.main_loop === true) + console.log('DOM updated - there are ' + targets.length + ' charts on page.'); + + NETDATA.intersectionObserver.globalReset(); + NETDATA.options.targets = []; + var len = targets.length; + while(len--) { + // the initialization will take care of sizing + // and the "loading..." message + var state = NETDATA.chartState(targets[len]); + NETDATA.options.targets.push(state); + NETDATA.intersectionObserver.observe(state); + } + + if(NETDATA.globalChartUnderlay.isActive() === true) + NETDATA.globalChartUnderlay.setup(); + else + NETDATA.globalChartUnderlay.clear(); + + if(typeof callback === 'function') + return callback(); + }; + + // this is the main function - where everything starts + NETDATA.started = false; + NETDATA.start = function() { + // this should be called only once + + if(NETDATA.started === true) { + console.log('netdata is already started'); + return; + } + + NETDATA.started = true; + NETDATA.options.page_is_visible = true; + + $(window).blur(function() { + if(NETDATA.options.current.stop_updates_when_focus_is_lost === true) { + NETDATA.options.page_is_visible = false; + if(NETDATA.options.debug.focus === true) + console.log('Lost Focus!'); + } + }); + + $(window).focus(function() { + if(NETDATA.options.current.stop_updates_when_focus_is_lost === true) { + NETDATA.options.page_is_visible = true; + if(NETDATA.options.debug.focus === true) + console.log('Focus restored!'); + } + }); + + if(typeof document.hasFocus === 'function' && !document.hasFocus()) { + if(NETDATA.options.current.stop_updates_when_focus_is_lost === true) { + NETDATA.options.page_is_visible = false; + if(NETDATA.options.debug.focus === true) + console.log('Document has no focus!'); + } + } + + // bootstrap tab switching + $('a[data-toggle="tab"]').on('shown.bs.tab', NETDATA.onscroll); + + // bootstrap modal switching + var $modal = $('.modal'); + $modal.on('hidden.bs.modal', NETDATA.onscroll); + $modal.on('shown.bs.modal', NETDATA.onscroll); + + // bootstrap collapse switching + var $collapse = $('.collapse'); + $collapse.on('hidden.bs.collapse', NETDATA.onscroll); + $collapse.on('shown.bs.collapse', NETDATA.onscroll); + + NETDATA.parseDom(NETDATA.chartRefresher); + + // Alarms initialization + setTimeout(NETDATA.alarms.init, 1000); + + // Registry initialization + setTimeout(NETDATA.registry.init, netdataRegistryAfterMs); + + if(typeof netdataCallback === 'function') + netdataCallback(); + }; + + NETDATA.globalReset = function() { + NETDATA.intersectionObserver.globalReset(); + NETDATA.globalSelectionSync.globalReset(); + NETDATA.globalPanAndZoom.globalReset(); + NETDATA.chartRegistry.globalReset(); + NETDATA.commonMin.globalReset(); + NETDATA.commonMax.globalReset(); + NETDATA.commonColors.globalReset(); + NETDATA.unitsConversion.globalReset(); + NETDATA.options.targets = []; + NETDATA.parseDom(); + NETDATA.unpause(); + }; + + // ---------------------------------------------------------------------------------------------------------------- + // peity + + NETDATA.peityInitialize = function(callback) { + if(typeof netdataNoPeitys === 'undefined' || !netdataNoPeitys) { + $.ajax({ + url: NETDATA.peity_js, + cache: true, + dataType: "script", + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function() { + NETDATA.registerChartLibrary('peity', NETDATA.peity_js); + }) + .fail(function() { + NETDATA.chartLibraries.peity.enabled = false; + NETDATA.error(100, NETDATA.peity_js); + }) + .always(function() { + if(typeof callback === "function") + return callback(); + }); + } + else { + NETDATA.chartLibraries.peity.enabled = false; + if(typeof callback === "function") + return callback(); + } + }; + + NETDATA.peityChartUpdate = function(state, data) { + state.peity_instance.innerHTML = data.result; + + if(state.peity_options.stroke !== state.chartCustomColors()[0]) { + state.peity_options.stroke = state.chartCustomColors()[0]; + if(state.chart.chart_type === 'line') + state.peity_options.fill = NETDATA.themes.current.background; + else + state.peity_options.fill = NETDATA.colorLuminance(state.chartCustomColors()[0], NETDATA.chartDefaults.fill_luminance); + } + + $(state.peity_instance).peity('line', state.peity_options); + return true; + }; + + NETDATA.peityChartCreate = function(state, data) { + state.peity_instance = document.createElement('div'); + state.element_chart.appendChild(state.peity_instance); + + state.peity_options = { + stroke: NETDATA.themes.current.foreground, + strokeWidth: NETDATA.dataAttribute(state.element, 'peity-strokewidth', 1), + width: state.chartWidth(), + height: state.chartHeight(), + fill: NETDATA.themes.current.foreground + }; + + NETDATA.peityChartUpdate(state, data); + return true; + }; + + // ---------------------------------------------------------------------------------------------------------------- + // sparkline + + NETDATA.sparklineInitialize = function(callback) { + if(typeof netdataNoSparklines === 'undefined' || !netdataNoSparklines) { + $.ajax({ + url: NETDATA.sparkline_js, + cache: true, + dataType: "script", + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function() { + NETDATA.registerChartLibrary('sparkline', NETDATA.sparkline_js); + }) + .fail(function() { + NETDATA.chartLibraries.sparkline.enabled = false; + NETDATA.error(100, NETDATA.sparkline_js); + }) + .always(function() { + if(typeof callback === "function") + return callback(); + }); + } + else { + NETDATA.chartLibraries.sparkline.enabled = false; + if(typeof callback === "function") + return callback(); + } + }; + + NETDATA.sparklineChartUpdate = function(state, data) { + state.sparkline_options.width = state.chartWidth(); + state.sparkline_options.height = state.chartHeight(); + + $(state.element_chart).sparkline(data.result, state.sparkline_options); + return true; + }; + + NETDATA.sparklineChartCreate = function(state, data) { + var type = NETDATA.dataAttribute(state.element, 'sparkline-type', 'line'); + var lineColor = NETDATA.dataAttribute(state.element, 'sparkline-linecolor', state.chartCustomColors()[0]); + var fillColor = NETDATA.dataAttribute(state.element, 'sparkline-fillcolor', ((state.chart.chart_type === 'line')?NETDATA.themes.current.background:NETDATA.colorLuminance(lineColor, NETDATA.chartDefaults.fill_luminance))); + var chartRangeMin = NETDATA.dataAttribute(state.element, 'sparkline-chartrangemin', undefined); + var chartRangeMax = NETDATA.dataAttribute(state.element, 'sparkline-chartrangemax', undefined); + var composite = NETDATA.dataAttribute(state.element, 'sparkline-composite', undefined); + var enableTagOptions = NETDATA.dataAttribute(state.element, 'sparkline-enabletagoptions', undefined); + var tagOptionPrefix = NETDATA.dataAttribute(state.element, 'sparkline-tagoptionprefix', undefined); + var tagValuesAttribute = NETDATA.dataAttribute(state.element, 'sparkline-tagvaluesattribute', undefined); + var disableHiddenCheck = NETDATA.dataAttribute(state.element, 'sparkline-disablehiddencheck', undefined); + var defaultPixelsPerValue = NETDATA.dataAttribute(state.element, 'sparkline-defaultpixelspervalue', undefined); + var spotColor = NETDATA.dataAttribute(state.element, 'sparkline-spotcolor', undefined); + var minSpotColor = NETDATA.dataAttribute(state.element, 'sparkline-minspotcolor', undefined); + var maxSpotColor = NETDATA.dataAttribute(state.element, 'sparkline-maxspotcolor', undefined); + var spotRadius = NETDATA.dataAttribute(state.element, 'sparkline-spotradius', undefined); + var valueSpots = NETDATA.dataAttribute(state.element, 'sparkline-valuespots', undefined); + var highlightSpotColor = NETDATA.dataAttribute(state.element, 'sparkline-highlightspotcolor', undefined); + var highlightLineColor = NETDATA.dataAttribute(state.element, 'sparkline-highlightlinecolor', undefined); + var lineWidth = NETDATA.dataAttribute(state.element, 'sparkline-linewidth', undefined); + var normalRangeMin = NETDATA.dataAttribute(state.element, 'sparkline-normalrangemin', undefined); + var normalRangeMax = NETDATA.dataAttribute(state.element, 'sparkline-normalrangemax', undefined); + var drawNormalOnTop = NETDATA.dataAttribute(state.element, 'sparkline-drawnormalontop', undefined); + var xvalues = NETDATA.dataAttribute(state.element, 'sparkline-xvalues', undefined); + var chartRangeClip = NETDATA.dataAttribute(state.element, 'sparkline-chartrangeclip', undefined); + var chartRangeMinX = NETDATA.dataAttribute(state.element, 'sparkline-chartrangeminx', undefined); + var chartRangeMaxX = NETDATA.dataAttribute(state.element, 'sparkline-chartrangemaxx', undefined); + var disableInteraction = NETDATA.dataAttributeBoolean(state.element, 'sparkline-disableinteraction', false); + var disableTooltips = NETDATA.dataAttributeBoolean(state.element, 'sparkline-disabletooltips', false); + var disableHighlight = NETDATA.dataAttributeBoolean(state.element, 'sparkline-disablehighlight', false); + var highlightLighten = NETDATA.dataAttribute(state.element, 'sparkline-highlightlighten', 1.4); + var highlightColor = NETDATA.dataAttribute(state.element, 'sparkline-highlightcolor', undefined); + var tooltipContainer = NETDATA.dataAttribute(state.element, 'sparkline-tooltipcontainer', undefined); + var tooltipClassname = NETDATA.dataAttribute(state.element, 'sparkline-tooltipclassname', undefined); + var tooltipFormat = NETDATA.dataAttribute(state.element, 'sparkline-tooltipformat', undefined); + var tooltipPrefix = NETDATA.dataAttribute(state.element, 'sparkline-tooltipprefix', undefined); + var tooltipSuffix = NETDATA.dataAttribute(state.element, 'sparkline-tooltipsuffix', ' ' + state.units_current); + var tooltipSkipNull = NETDATA.dataAttributeBoolean(state.element, 'sparkline-tooltipskipnull', true); + var tooltipValueLookups = NETDATA.dataAttribute(state.element, 'sparkline-tooltipvaluelookups', undefined); + var tooltipFormatFieldlist = NETDATA.dataAttribute(state.element, 'sparkline-tooltipformatfieldlist', undefined); + var tooltipFormatFieldlistKey = NETDATA.dataAttribute(state.element, 'sparkline-tooltipformatfieldlistkey', undefined); + var numberFormatter = NETDATA.dataAttribute(state.element, 'sparkline-numberformatter', function(n){ return n.toFixed(2); }); + var numberDigitGroupSep = NETDATA.dataAttribute(state.element, 'sparkline-numberdigitgroupsep', undefined); + var numberDecimalMark = NETDATA.dataAttribute(state.element, 'sparkline-numberdecimalmark', undefined); + var numberDigitGroupCount = NETDATA.dataAttribute(state.element, 'sparkline-numberdigitgroupcount', undefined); + var animatedZooms = NETDATA.dataAttributeBoolean(state.element, 'sparkline-animatedzooms', false); + + if(spotColor === 'disable') spotColor=''; + if(minSpotColor === 'disable') minSpotColor=''; + if(maxSpotColor === 'disable') maxSpotColor=''; + + // state.log('sparkline type ' + type + ', lineColor: ' + lineColor + ', fillColor: ' + fillColor); + + state.sparkline_options = { + type: type, + lineColor: lineColor, + fillColor: fillColor, + chartRangeMin: chartRangeMin, + chartRangeMax: chartRangeMax, + composite: composite, + enableTagOptions: enableTagOptions, + tagOptionPrefix: tagOptionPrefix, + tagValuesAttribute: tagValuesAttribute, + disableHiddenCheck: disableHiddenCheck, + defaultPixelsPerValue: defaultPixelsPerValue, + spotColor: spotColor, + minSpotColor: minSpotColor, + maxSpotColor: maxSpotColor, + spotRadius: spotRadius, + valueSpots: valueSpots, + highlightSpotColor: highlightSpotColor, + highlightLineColor: highlightLineColor, + lineWidth: lineWidth, + normalRangeMin: normalRangeMin, + normalRangeMax: normalRangeMax, + drawNormalOnTop: drawNormalOnTop, + xvalues: xvalues, + chartRangeClip: chartRangeClip, + chartRangeMinX: chartRangeMinX, + chartRangeMaxX: chartRangeMaxX, + disableInteraction: disableInteraction, + disableTooltips: disableTooltips, + disableHighlight: disableHighlight, + highlightLighten: highlightLighten, + highlightColor: highlightColor, + tooltipContainer: tooltipContainer, + tooltipClassname: tooltipClassname, + tooltipChartTitle: state.title, + tooltipFormat: tooltipFormat, + tooltipPrefix: tooltipPrefix, + tooltipSuffix: tooltipSuffix, + tooltipSkipNull: tooltipSkipNull, + tooltipValueLookups: tooltipValueLookups, + tooltipFormatFieldlist: tooltipFormatFieldlist, + tooltipFormatFieldlistKey: tooltipFormatFieldlistKey, + numberFormatter: numberFormatter, + numberDigitGroupSep: numberDigitGroupSep, + numberDecimalMark: numberDecimalMark, + numberDigitGroupCount: numberDigitGroupCount, + animatedZooms: animatedZooms, + width: state.chartWidth(), + height: state.chartHeight() + }; + + $(state.element_chart).sparkline(data.result, state.sparkline_options); + + return true; + }; + + // ---------------------------------------------------------------------------------------------------------------- + // dygraph + + NETDATA.dygraph = { + smooth: false + }; + + NETDATA.dygraphToolboxPanAndZoom = function(state, after, before) { + if(after < state.netdata_first) + after = state.netdata_first; + + if(before > state.netdata_last) + before = state.netdata_last; + + state.setMode('zoom'); + NETDATA.globalSelectionSync.stop(); + NETDATA.globalSelectionSync.delay(); + state.tmp.dygraph_user_action = true; + state.tmp.dygraph_force_zoom = true; + // state.log('toolboxPanAndZoom'); + state.updateChartPanOrZoom(after, before); + NETDATA.globalPanAndZoom.setMaster(state, after, before); + }; + + NETDATA.dygraphSetSelection = function(state, t) { + if(typeof state.tmp.dygraph_instance !== 'undefined') { + var r = state.calculateRowForTime(t); + if(r !== -1) { + state.tmp.dygraph_instance.setSelection(r); + return true; + } + else { + state.tmp.dygraph_instance.clearSelection(); + state.legendShowUndefined(); + } + } + + return false; + }; + + NETDATA.dygraphClearSelection = function(state) { + if(typeof state.tmp.dygraph_instance !== 'undefined') { + state.tmp.dygraph_instance.clearSelection(); + } + return true; + }; + + NETDATA.dygraphSmoothInitialize = function(callback) { + $.ajax({ + url: NETDATA.dygraph_smooth_js, + cache: true, + dataType: "script", + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function() { + NETDATA.dygraph.smooth = true; + smoothPlotter.smoothing = 0.3; + }) + .fail(function() { + NETDATA.dygraph.smooth = false; + }) + .always(function() { + if(typeof callback === "function") + return callback(); + }); + }; + + NETDATA.dygraphInitialize = function(callback) { + if(typeof netdataNoDygraphs === 'undefined' || !netdataNoDygraphs) { + $.ajax({ + url: NETDATA.dygraph_js, + cache: true, + dataType: "script", + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function() { + NETDATA.registerChartLibrary('dygraph', NETDATA.dygraph_js); + }) + .fail(function() { + NETDATA.chartLibraries.dygraph.enabled = false; + NETDATA.error(100, NETDATA.dygraph_js); + }) + .always(function() { + if(NETDATA.chartLibraries.dygraph.enabled === true && NETDATA.options.current.smooth_plot === true) + NETDATA.dygraphSmoothInitialize(callback); + else if(typeof callback === "function") + return callback(); + }); + } + else { + NETDATA.chartLibraries.dygraph.enabled = false; + if(typeof callback === "function") + return callback(); + } + }; + + NETDATA.dygraphChartUpdate = function(state, data) { + var dygraph = state.tmp.dygraph_instance; + + if(typeof dygraph === 'undefined') + return NETDATA.dygraphChartCreate(state, data); + + // when the chart is not visible, and hidden + // if there is a window resize, dygraph detects + // its element size as 0x0. + // this will make it re-appear properly + + if(state.tm.last_unhidden > state.tmp.dygraph_last_rendered) + dygraph.resize(); + + var options = { + file: data.result.data, + colors: state.chartColors(), + labels: data.result.labels, + //labelsDivWidth: state.chartWidth() - 70, + includeZero: state.tmp.dygraph_include_zero, + visibility: state.dimensions_visibility.selected2BooleanArray(state.data.dimension_names) + }; + + if(state.tmp.dygraph_chart_type === 'stacked') { + if(options.includeZero === true && state.dimensions_visibility.countSelected() < options.visibility.length) + options.includeZero = 0; + } + + if(!NETDATA.chartLibraries.dygraph.isSparkline(state)) { + options.ylabel = state.units_current; // (state.units_desired === 'auto')?"":state.units_current; + } + + if(state.tmp.dygraph_force_zoom === true) { + if(NETDATA.options.debug.dygraph === true || state.debug === true) + state.log('dygraphChartUpdate() forced zoom update'); + + options.dateWindow = (state.requested_padding !== null)?[ state.view_after, state.view_before ]:null; + //options.isZoomedIgnoreProgrammaticZoom = true; + state.tmp.dygraph_force_zoom = false; + } + else if(state.current.name !== 'auto') { + if(NETDATA.options.debug.dygraph === true || state.debug === true) + state.log('dygraphChartUpdate() loose update'); + } + else { + if(NETDATA.options.debug.dygraph === true || state.debug === true) + state.log('dygraphChartUpdate() strict update'); + + options.dateWindow = (state.requested_padding !== null)?[ state.view_after, state.view_before ]:null; + //options.isZoomedIgnoreProgrammaticZoom = true; + } + + options.valueRange = state.tmp.dygraph_options.valueRange; + + var oldMax = null, oldMin = null; + if (state.tmp.__commonMin !== null) { + state.data.min = state.tmp.dygraph_instance.axes_[0].extremeRange[0]; + oldMin = options.valueRange[0] = NETDATA.commonMin.get(state); + } + if (state.tmp.__commonMax !== null) { + state.data.max = state.tmp.dygraph_instance.axes_[0].extremeRange[1]; + oldMax = options.valueRange[1] = NETDATA.commonMax.get(state); + } + + if(state.tmp.dygraph_smooth_eligible === true) { + if((NETDATA.options.current.smooth_plot === true && state.tmp.dygraph_options.plotter !== smoothPlotter) + || (NETDATA.options.current.smooth_plot === false && state.tmp.dygraph_options.plotter === smoothPlotter)) { + NETDATA.dygraphChartCreate(state, data); + return; + } + } + + if(netdataSnapshotData !== null && NETDATA.globalPanAndZoom.isActive() === true && NETDATA.globalPanAndZoom.isMaster(state) === false) { + // pan and zoom on snapshots + options.dateWindow = [ NETDATA.globalPanAndZoom.force_after_ms, NETDATA.globalPanAndZoom.force_before_ms ]; + //options.isZoomedIgnoreProgrammaticZoom = true; + } + + if(NETDATA.chartLibraries.dygraph.isLogScale(state) === true) { + if(Array.isArray(options.valueRange) && options.valueRange[0] <= 0) + options.valueRange[0] = null; + } + + dygraph.updateOptions(options); + + var redraw = false; + if(oldMin !== null && oldMin > state.tmp.dygraph_instance.axes_[0].extremeRange[0]) { + state.data.min = state.tmp.dygraph_instance.axes_[0].extremeRange[0]; + options.valueRange[0] = NETDATA.commonMin.get(state); + redraw = true; + } + if(oldMax !== null && oldMax < state.tmp.dygraph_instance.axes_[0].extremeRange[1]) { + state.data.max = state.tmp.dygraph_instance.axes_[0].extremeRange[1]; + options.valueRange[1] = NETDATA.commonMax.get(state); + redraw = true; + } + + if(redraw === true) { + // state.log('forcing redraw to adapt to common- min/max'); + dygraph.updateOptions(options); + } + + state.tmp.dygraph_last_rendered = Date.now(); + return true; + }; + + NETDATA.dygraphChartCreate = function(state, data) { + if(NETDATA.options.debug.dygraph === true || state.debug === true) + state.log('dygraphChartCreate()'); + + state.tmp.dygraph_chart_type = NETDATA.dataAttribute(state.element, 'dygraph-type', state.chart.chart_type); + if(state.tmp.dygraph_chart_type === 'stacked' && data.dimensions === 1) state.tmp.dygraph_chart_type = 'area'; + if(state.tmp.dygraph_chart_type === 'stacked' && NETDATA.chartLibraries.dygraph.isLogScale(state) === true) state.tmp.dygraph_chart_type = 'area'; + + var highlightCircleSize = (NETDATA.chartLibraries.dygraph.isSparkline(state) === true)?3:4; + + var smooth = (NETDATA.dygraph.smooth === true) + ?(NETDATA.dataAttributeBoolean(state.element, 'dygraph-smooth', (state.tmp.dygraph_chart_type === 'line' && NETDATA.chartLibraries.dygraph.isSparkline(state) === false))) + :false; + + state.tmp.dygraph_include_zero = NETDATA.dataAttribute(state.element, 'dygraph-includezero', (state.tmp.dygraph_chart_type === 'stacked')); + var drawAxis = NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawaxis', true); + + state.tmp.dygraph_options = { + colors: NETDATA.dataAttribute(state.element, 'dygraph-colors', state.chartColors()), + + // leave a few pixels empty on the right of the chart + rightGap: NETDATA.dataAttribute(state.element, 'dygraph-rightgap', 5), + showRangeSelector: NETDATA.dataAttributeBoolean(state.element, 'dygraph-showrangeselector', false), + showRoller: NETDATA.dataAttributeBoolean(state.element, 'dygraph-showroller', false), + title: NETDATA.dataAttribute(state.element, 'dygraph-title', state.title), + titleHeight: NETDATA.dataAttribute(state.element, 'dygraph-titleheight', 19), + legend: NETDATA.dataAttribute(state.element, 'dygraph-legend', 'always'), // we need this to get selection events + labels: data.result.labels, + labelsDiv: NETDATA.dataAttribute(state.element, 'dygraph-labelsdiv', state.element_legend_childs.hidden), + //labelsDivStyles: NETDATA.dataAttribute(state.element, 'dygraph-labelsdivstyles', { 'fontSize':'1px' }), + //labelsDivWidth: NETDATA.dataAttribute(state.element, 'dygraph-labelsdivwidth', state.chartWidth() - 70), + labelsSeparateLines: NETDATA.dataAttributeBoolean(state.element, 'dygraph-labelsseparatelines', true), + labelsShowZeroValues: (NETDATA.chartLibraries.dygraph.isLogScale(state) === true)?false:NETDATA.dataAttributeBoolean(state.element, 'dygraph-labelsshowzerovalues', true), + labelsKMB: false, + labelsKMG2: false, + showLabelsOnHighlight: NETDATA.dataAttributeBoolean(state.element, 'dygraph-showlabelsonhighlight', true), + hideOverlayOnMouseOut: NETDATA.dataAttributeBoolean(state.element, 'dygraph-hideoverlayonmouseout', true), + includeZero: state.tmp.dygraph_include_zero, + xRangePad: NETDATA.dataAttribute(state.element, 'dygraph-xrangepad', 0), + yRangePad: NETDATA.dataAttribute(state.element, 'dygraph-yrangepad', 1), + valueRange: NETDATA.dataAttribute(state.element, 'dygraph-valuerange', [ null, null ]), + ylabel: state.units_current, // (state.units_desired === 'auto')?"":state.units_current, + yLabelWidth: NETDATA.dataAttribute(state.element, 'dygraph-ylabelwidth', 12), + + // the function to plot the chart + plotter: null, + + // The width of the lines connecting data points. + // This can be used to increase the contrast or some graphs. + strokeWidth: NETDATA.dataAttribute(state.element, 'dygraph-strokewidth', ((state.tmp.dygraph_chart_type === 'stacked')?0.1:((smooth === true)?1.5:0.7))), + strokePattern: NETDATA.dataAttribute(state.element, 'dygraph-strokepattern', undefined), + + // The size of the dot to draw on each point in pixels (see drawPoints). + // A dot is always drawn when a point is "isolated", + // i.e. there is a missing point on either side of it. + // This also controls the size of those dots. + drawPoints: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawpoints', false), + + // Draw points at the edges of gaps in the data. + // This improves visibility of small data segments or other data irregularities. + drawGapEdgePoints: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawgapedgepoints', true), + connectSeparatedPoints: (NETDATA.chartLibraries.dygraph.isLogScale(state) === true)?false:NETDATA.dataAttributeBoolean(state.element, 'dygraph-connectseparatedpoints', false), + pointSize: NETDATA.dataAttribute(state.element, 'dygraph-pointsize', 1), + + // enabling this makes the chart with little square lines + stepPlot: NETDATA.dataAttributeBoolean(state.element, 'dygraph-stepplot', false), + + // Draw a border around graph lines to make crossing lines more easily + // distinguishable. Useful for graphs with many lines. + strokeBorderColor: NETDATA.dataAttribute(state.element, 'dygraph-strokebordercolor', NETDATA.themes.current.background), + strokeBorderWidth: NETDATA.dataAttribute(state.element, 'dygraph-strokeborderwidth', (state.tmp.dygraph_chart_type === 'stacked')?0.0:0.0), + fillGraph: NETDATA.dataAttribute(state.element, 'dygraph-fillgraph', (state.tmp.dygraph_chart_type === 'area' || state.tmp.dygraph_chart_type === 'stacked')), + fillAlpha: NETDATA.dataAttribute(state.element, 'dygraph-fillalpha', + ((state.tmp.dygraph_chart_type === 'stacked') + ?NETDATA.options.current.color_fill_opacity_stacked + :NETDATA.options.current.color_fill_opacity_area) + ), + stackedGraph: NETDATA.dataAttribute(state.element, 'dygraph-stackedgraph', (state.tmp.dygraph_chart_type === 'stacked')), + stackedGraphNaNFill: NETDATA.dataAttribute(state.element, 'dygraph-stackedgraphnanfill', 'none'), + drawAxis: drawAxis, + axisLabelFontSize: NETDATA.dataAttribute(state.element, 'dygraph-axislabelfontsize', 10), + axisLineColor: NETDATA.dataAttribute(state.element, 'dygraph-axislinecolor', NETDATA.themes.current.axis), + axisLineWidth: NETDATA.dataAttribute(state.element, 'dygraph-axislinewidth', 1.0), + drawGrid: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawgrid', true), + gridLinePattern: NETDATA.dataAttribute(state.element, 'dygraph-gridlinepattern', null), + gridLineWidth: NETDATA.dataAttribute(state.element, 'dygraph-gridlinewidth', 1.0), + gridLineColor: NETDATA.dataAttribute(state.element, 'dygraph-gridlinecolor', NETDATA.themes.current.grid), + maxNumberWidth: NETDATA.dataAttribute(state.element, 'dygraph-maxnumberwidth', 8), + sigFigs: NETDATA.dataAttribute(state.element, 'dygraph-sigfigs', null), + digitsAfterDecimal: NETDATA.dataAttribute(state.element, 'dygraph-digitsafterdecimal', 2), + valueFormatter: NETDATA.dataAttribute(state.element, 'dygraph-valueformatter', undefined), + highlightCircleSize: NETDATA.dataAttribute(state.element, 'dygraph-highlightcirclesize', highlightCircleSize), + highlightSeriesOpts: NETDATA.dataAttribute(state.element, 'dygraph-highlightseriesopts', null), // TOO SLOW: { strokeWidth: 1.5 }, + highlightSeriesBackgroundAlpha: NETDATA.dataAttribute(state.element, 'dygraph-highlightseriesbackgroundalpha', null), // TOO SLOW: (state.tmp.dygraph_chart_type === 'stacked')?0.7:0.5, + pointClickCallback: NETDATA.dataAttribute(state.element, 'dygraph-pointclickcallback', undefined), + visibility: state.dimensions_visibility.selected2BooleanArray(state.data.dimension_names), + logscale: (NETDATA.chartLibraries.dygraph.isLogScale(state) === true)?'y':undefined, + + axes: { + x: { + pixelsPerLabel: NETDATA.dataAttribute(state.element, 'dygraph-xpixelsperlabel', 50), + ticker: Dygraph.dateTicker, + axisLabelWidth: NETDATA.dataAttribute(state.element, 'dygraph-xaxislabelwidth', 60), + drawAxis: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawxaxis', drawAxis), + axisLabelFormatter: function (d, gran) { + void(gran); + return NETDATA.dateTime.xAxisTimeString(d); + } + }, + y: { + logscale: (NETDATA.chartLibraries.dygraph.isLogScale(state) === true)?true:undefined, + pixelsPerLabel: NETDATA.dataAttribute(state.element, 'dygraph-ypixelsperlabel', 15), + axisLabelWidth: NETDATA.dataAttribute(state.element, 'dygraph-yaxislabelwidth', 50), + drawAxis: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawyaxis', drawAxis), + axisLabelFormatter: function (y) { + + // unfortunately, we have to call this every single time + state.legendFormatValueDecimalsFromMinMax( + this.axes_[0].extremeRange[0], + this.axes_[0].extremeRange[1] + ); + + var old_units = this.user_attrs_.ylabel; + var v = state.legendFormatValue(y); + var new_units = state.units_current; + + if(state.units_desired === 'auto' && typeof old_units !== 'undefined' && new_units !== old_units && !NETDATA.chartLibraries.dygraph.isSparkline(state)) { + // console.log(this); + // state.log('units discrepancy: old = ' + old_units + ', new = ' + new_units); + var len = this.plugins_.length; + while(len--) { + // console.log(this.plugins_[len]); + if(typeof this.plugins_[len].plugin.ylabel_div_ !== 'undefined' + && this.plugins_[len].plugin.ylabel_div_ !== null + && typeof this.plugins_[len].plugin.ylabel_div_.children !== 'undefined' + && this.plugins_[len].plugin.ylabel_div_.children !== null + && typeof this.plugins_[len].plugin.ylabel_div_.children[0].children !== 'undefined' + && this.plugins_[len].plugin.ylabel_div_.children[0].children !== null + ) { + this.plugins_[len].plugin.ylabel_div_.children[0].children[0].innerHTML = new_units; + this.user_attrs_.ylabel = new_units; + break; + } + } + + if(len < 0) + state.log('units discrepancy, but cannot find dygraphs div to change: old = ' + old_units + ', new = ' + new_units); + } + + return v; + } + } + }, + legendFormatter: function(data) { + if(state.tmp.dygraph_mouse_down === true) + return; + + var elements = state.element_legend_childs; + + // if the hidden div is not there + // we are not managing the legend + if(elements.hidden === null) return; + + if (typeof data.x !== 'undefined') { + state.legendSetDate(data.x); + var i = data.series.length; + while(i--) { + var series = data.series[i]; + if(series.isVisible === true) + state.legendSetLabelValue(series.label, series.y); + else + state.legendSetLabelValue(series.label, null); + } + } + + return ''; + }, + drawCallback: function(dygraph, is_initial) { + + // the user has panned the chart and this is called to re-draw the chart + // 1. refresh this chart by adding data to it + // 2. notify all the other charts about the update they need + + // to prevent an infinite loop (feedback), we use + // state.tmp.dygraph_user_action + // - when true, this is initiated by a user + // - when false, this is feedback + + if(state.current.name !== 'auto' && state.tmp.dygraph_user_action === true) { + state.tmp.dygraph_user_action = false; + + var x_range = dygraph.xAxisRange(); + var after = Math.round(x_range[0]); + var before = Math.round(x_range[1]); + + if(NETDATA.options.debug.dygraph === true) + state.log('dygraphDrawCallback(dygraph, ' + is_initial + '): mode ' + state.current.name + ' ' + (after / 1000).toString() + ' - ' + (before / 1000).toString()); + //console.log(state); + + if(before <= state.netdata_last && after >= state.netdata_first) + // update only when we are within the data limits + state.updateChartPanOrZoom(after, before); + } + }, + zoomCallback: function(minDate, maxDate, yRanges) { + + // the user has selected a range on the chart + // 1. refresh this chart by adding data to it + // 2. notify all the other charts about the update they need + + void(yRanges); + + if(NETDATA.options.debug.dygraph === true) + state.log('dygraphZoomCallback(): ' + state.current.name); + + NETDATA.globalSelectionSync.stop(); + NETDATA.globalSelectionSync.delay(); + state.setMode('zoom'); + + // refresh it to the greatest possible zoom level + state.tmp.dygraph_user_action = true; + state.tmp.dygraph_force_zoom = true; + state.updateChartPanOrZoom(minDate, maxDate); + }, + highlightCallback: function(event, x, points, row, seriesName) { + void(seriesName); + + state.pauseChart(); + + // there is a bug in dygraph when the chart is zoomed enough + // the time it thinks is selected is wrong + // here we calculate the time t based on the row number selected + // which is ok + // var t = state.data_after + row * state.data_update_every; + // console.log('row = ' + row + ', x = ' + x + ', t = ' + t + ' ' + ((t === x)?'SAME':(Math.abs(x-t)<=state.data_update_every)?'SIMILAR':'DIFFERENT') + ', rows in db: ' + state.data_points + ' visible(x) = ' + state.timeIsVisible(x) + ' visible(t) = ' + state.timeIsVisible(t) + ' r(x) = ' + state.calculateRowForTime(x) + ' r(t) = ' + state.calculateRowForTime(t) + ' range: ' + state.data_after + ' - ' + state.data_before + ' real: ' + state.data.after + ' - ' + state.data.before + ' every: ' + state.data_update_every); + + if(state.tmp.dygraph_mouse_down !== true) + NETDATA.globalSelectionSync.sync(state, x); + + // fix legend zIndex using the internal structures of dygraph legend module + // this works, but it is a hack! + // state.tmp.dygraph_instance.plugins_[0].plugin.legend_div_.style.zIndex = 10000; + }, + unhighlightCallback: function(event) { + void(event); + + if(state.tmp.dygraph_mouse_down === true) + return; + + if(NETDATA.options.debug.dygraph === true || state.debug === true) + state.log('dygraphUnhighlightCallback()'); + + state.unpauseChart(); + NETDATA.globalSelectionSync.stop(); + }, + underlayCallback: function(canvas, area, g) { + + // the chart is about to be drawn + // this function renders global highlighted time-frame + + if(NETDATA.globalChartUnderlay.isActive()) { + var after = NETDATA.globalChartUnderlay.after; + var before = NETDATA.globalChartUnderlay.before; + + if(after < state.view_after) + after = state.view_after; + + if(before > state.view_before) + before = state.view_before; + + if(after < before) { + var bottom_left = g.toDomCoords(after, -20); + var top_right = g.toDomCoords(before, +20); + + var left = bottom_left[0]; + var right = top_right[0]; + + canvas.fillStyle = NETDATA.themes.current.highlight; + canvas.fillRect(left, area.y, right - left, area.h); + } + } + }, + interactionModel : { + mousedown: function(event, dygraph, context) { + if(NETDATA.options.debug.dygraph === true || state.debug === true) + state.log('interactionModel.mousedown()'); + + state.tmp.dygraph_user_action = true; + + if(NETDATA.options.debug.dygraph === true) + state.log('dygraphMouseDown()'); + + // Right-click should not initiate anything. + if(event.button && event.button === 2) return; + + NETDATA.globalSelectionSync.stop(); + NETDATA.globalSelectionSync.delay(); + + state.tmp.dygraph_mouse_down = true; + context.initializeMouseDown(event, dygraph, context); + + //console.log(event); + if(event.button && event.button === 1) { + if (event.shiftKey) { + //console.log('middle mouse button dragging (PAN)'); + + state.setMode('pan'); + // NETDATA.globalSelectionSync.delay(); + state.tmp.dygraph_highlight_after = null; + Dygraph.startPan(event, dygraph, context); + } + else if(event.altKey || event.ctrlKey || event.metaKey) { + //console.log('middle mouse button highlight'); + + if (!(event.offsetX && event.offsetY)) { + event.offsetX = event.layerX - event.target.offsetLeft; + event.offsetY = event.layerY - event.target.offsetTop; + } + state.tmp.dygraph_highlight_after = dygraph.toDataXCoord(event.offsetX); + Dygraph.startZoom(event, dygraph, context); + } + else { + //console.log('middle mouse button selection for zoom (ZOOM)'); + + state.setMode('zoom'); + // NETDATA.globalSelectionSync.delay(); + state.tmp.dygraph_highlight_after = null; + Dygraph.startZoom(event, dygraph, context); + } + } + else { + if (event.shiftKey) { + //console.log('left mouse button selection for zoom (ZOOM)'); + + state.setMode('zoom'); + // NETDATA.globalSelectionSync.delay(); + state.tmp.dygraph_highlight_after = null; + Dygraph.startZoom(event, dygraph, context); + } + else if(event.altKey || event.ctrlKey || event.metaKey) { + //console.log('left mouse button highlight'); + + if (!(event.offsetX && event.offsetY)) { + event.offsetX = event.layerX - event.target.offsetLeft; + event.offsetY = event.layerY - event.target.offsetTop; + } + state.tmp.dygraph_highlight_after = dygraph.toDataXCoord(event.offsetX); + Dygraph.startZoom(event, dygraph, context); + } + else { + //console.log('left mouse button dragging (PAN)'); + + state.setMode('pan'); + // NETDATA.globalSelectionSync.delay(); + state.tmp.dygraph_highlight_after = null; + Dygraph.startPan(event, dygraph, context); + } + } + }, + mousemove: function(event, dygraph, context) { + if(NETDATA.options.debug.dygraph === true || state.debug === true) + state.log('interactionModel.mousemove()'); + + if(state.tmp.dygraph_highlight_after !== null) { + //console.log('highlight selection...'); + + NETDATA.globalSelectionSync.stop(); + NETDATA.globalSelectionSync.delay(); + + state.tmp.dygraph_user_action = true; + Dygraph.moveZoom(event, dygraph, context); + event.preventDefault(); + } + else if(context.isPanning) { + //console.log('panning...'); + + NETDATA.globalSelectionSync.stop(); + NETDATA.globalSelectionSync.delay(); + + state.tmp.dygraph_user_action = true; + //NETDATA.globalSelectionSync.stop(); + //NETDATA.globalSelectionSync.delay(); + state.setMode('pan'); + context.is2DPan = false; + Dygraph.movePan(event, dygraph, context); + } + else if(context.isZooming) { + //console.log('zooming...'); + + NETDATA.globalSelectionSync.stop(); + NETDATA.globalSelectionSync.delay(); + + state.tmp.dygraph_user_action = true; + //NETDATA.globalSelectionSync.stop(); + //NETDATA.globalSelectionSync.delay(); + state.setMode('zoom'); + Dygraph.moveZoom(event, dygraph, context); + } + }, + mouseup: function(event, dygraph, context) { + state.tmp.dygraph_mouse_down = false; + + if(NETDATA.options.debug.dygraph === true || state.debug === true) + state.log('interactionModel.mouseup()'); + + if(state.tmp.dygraph_highlight_after !== null) { + //console.log('done highlight selection'); + + NETDATA.globalSelectionSync.stop(); + NETDATA.globalSelectionSync.delay(); + + if (!(event.offsetX && event.offsetY)){ + event.offsetX = event.layerX - event.target.offsetLeft; + event.offsetY = event.layerY - event.target.offsetTop; + } + + NETDATA.globalChartUnderlay.set(state + , state.tmp.dygraph_highlight_after + , dygraph.toDataXCoord(event.offsetX) + , state.view_after + , state.view_before + ); + + state.tmp.dygraph_highlight_after = null; + + context.isZooming = false; + dygraph.clearZoomRect_(); + dygraph.drawGraph_(false); + + // refresh all the charts immediately + NETDATA.options.auto_refresher_stop_until = 0; + } + else if (context.isPanning) { + //console.log('done panning'); + + NETDATA.globalSelectionSync.stop(); + NETDATA.globalSelectionSync.delay(); + + state.tmp.dygraph_user_action = true; + Dygraph.endPan(event, dygraph, context); + + // refresh all the charts immediately + NETDATA.options.auto_refresher_stop_until = 0; + } + else if (context.isZooming) { + //console.log('done zomming'); + + NETDATA.globalSelectionSync.stop(); + NETDATA.globalSelectionSync.delay(); + + state.tmp.dygraph_user_action = true; + Dygraph.endZoom(event, dygraph, context); + + // refresh all the charts immediately + NETDATA.options.auto_refresher_stop_until = 0; + } + }, + click: function(event, dygraph, context) { + void(dygraph); + void(context); + + if(NETDATA.options.debug.dygraph === true || state.debug === true) + state.log('interactionModel.click()'); + + event.preventDefault(); + }, + dblclick: function(event, dygraph, context) { + void(event); + void(dygraph); + void(context); + + if(NETDATA.options.debug.dygraph === true || state.debug === true) + state.log('interactionModel.dblclick()'); + NETDATA.resetAllCharts(state); + }, + wheel: function(event, dygraph, context) { + void(context); + + if(NETDATA.options.debug.dygraph === true || state.debug === true) + state.log('interactionModel.wheel()'); + + // Take the offset of a mouse event on the dygraph canvas and + // convert it to a pair of percentages from the bottom left. + // (Not top left, bottom is where the lower value is.) + function offsetToPercentage(g, offsetX, offsetY) { + // This is calculating the pixel offset of the leftmost date. + var xOffset = g.toDomCoords(g.xAxisRange()[0], null)[0]; + var yar0 = g.yAxisRange(0); + + // This is calculating the pixel of the highest value. (Top pixel) + var yOffset = g.toDomCoords(null, yar0[1])[1]; + + // x y w and h are relative to the corner of the drawing area, + // so that the upper corner of the drawing area is (0, 0). + var x = offsetX - xOffset; + var y = offsetY - yOffset; + + // This is computing the rightmost pixel, effectively defining the + // width. + var w = g.toDomCoords(g.xAxisRange()[1], null)[0] - xOffset; + + // This is computing the lowest pixel, effectively defining the height. + var h = g.toDomCoords(null, yar0[0])[1] - yOffset; + + // Percentage from the left. + var xPct = w === 0 ? 0 : (x / w); + // Percentage from the top. + var yPct = h === 0 ? 0 : (y / h); + + // The (1-) part below changes it from "% distance down from the top" + // to "% distance up from the bottom". + return [xPct, (1-yPct)]; + } + + // Adjusts [x, y] toward each other by zoomInPercentage% + // Split it so the left/bottom axis gets xBias/yBias of that change and + // tight/top gets (1-xBias)/(1-yBias) of that change. + // + // If a bias is missing it splits it down the middle. + function zoomRange(g, zoomInPercentage, xBias, yBias) { + xBias = xBias || 0.5; + yBias = yBias || 0.5; + + function adjustAxis(axis, zoomInPercentage, bias) { + var delta = axis[1] - axis[0]; + var increment = delta * zoomInPercentage; + var foo = [increment * bias, increment * (1-bias)]; + + return [ axis[0] + foo[0], axis[1] - foo[1] ]; + } + + var yAxes = g.yAxisRanges(); + var newYAxes = []; + for (var i = 0; i < yAxes.length; i++) { + newYAxes[i] = adjustAxis(yAxes[i], zoomInPercentage, yBias); + } + + return adjustAxis(g.xAxisRange(), zoomInPercentage, xBias); + } + + if(event.altKey || event.shiftKey) { + state.tmp.dygraph_user_action = true; + + NETDATA.globalSelectionSync.stop(); + NETDATA.globalSelectionSync.delay(); + + // http://dygraphs.com/gallery/interaction-api.js + var normal_def; + if(typeof event.wheelDelta === 'number' && !isNaN(event.wheelDelta)) + // chrome + normal_def = event.wheelDelta / 40; + else + // firefox + normal_def = event.deltaY * -1.2; + + var normal = (event.detail) ? event.detail * -1 : normal_def; + var percentage = normal / 50; + + if (!(event.offsetX && event.offsetY)){ + event.offsetX = event.layerX - event.target.offsetLeft; + event.offsetY = event.layerY - event.target.offsetTop; + } + + var percentages = offsetToPercentage(dygraph, event.offsetX, event.offsetY); + var xPct = percentages[0]; + var yPct = percentages[1]; + + var new_x_range = zoomRange(dygraph, percentage, xPct, yPct); + var after = new_x_range[0]; + var before = new_x_range[1]; + + var first = state.netdata_first + state.data_update_every; + var last = state.netdata_last + state.data_update_every; + + if(before > last) { + after -= (before - last); + before = last; + } + if(after < first) { + after = first; + } + + state.setMode('zoom'); + state.updateChartPanOrZoom(after, before, function() { + dygraph.updateOptions({ dateWindow: [ after, before ] }); + }); + + event.preventDefault(); + } + }, + touchstart: function(event, dygraph, context) { + state.tmp.dygraph_mouse_down = true; + + if(NETDATA.options.debug.dygraph === true || state.debug === true) + state.log('interactionModel.touchstart()'); + + state.tmp.dygraph_user_action = true; + state.setMode('zoom'); + state.pauseChart(); + + NETDATA.globalSelectionSync.stop(); + NETDATA.globalSelectionSync.delay(); + + Dygraph.defaultInteractionModel.touchstart(event, dygraph, context); + + // we overwrite the touch directions at the end, to overwrite + // the internal default of dygraph + context.touchDirections = { x: true, y: false }; + + state.dygraph_last_touch_start = Date.now(); + state.dygraph_last_touch_move = 0; + + if(typeof event.touches[0].pageX === 'number') + state.dygraph_last_touch_page_x = event.touches[0].pageX; + else + state.dygraph_last_touch_page_x = 0; + }, + touchmove: function(event, dygraph, context) { + if(NETDATA.options.debug.dygraph === true || state.debug === true) + state.log('interactionModel.touchmove()'); + + NETDATA.globalSelectionSync.stop(); + NETDATA.globalSelectionSync.delay(); + + state.tmp.dygraph_user_action = true; + Dygraph.defaultInteractionModel.touchmove(event, dygraph, context); + + state.dygraph_last_touch_move = Date.now(); + }, + touchend: function(event, dygraph, context) { + state.tmp.dygraph_mouse_down = false; + + if(NETDATA.options.debug.dygraph === true || state.debug === true) + state.log('interactionModel.touchend()'); + + NETDATA.globalSelectionSync.stop(); + NETDATA.globalSelectionSync.delay(); + + state.tmp.dygraph_user_action = true; + Dygraph.defaultInteractionModel.touchend(event, dygraph, context); + + // if it didn't move, it is a selection + if(state.dygraph_last_touch_move === 0 && state.dygraph_last_touch_page_x !== 0) { + NETDATA.globalSelectionSync.dont_sync_before = 0; + NETDATA.globalSelectionSync.setMaster(state); + + // internal api of dygraph + var pct = (state.dygraph_last_touch_page_x - (dygraph.plotter_.area.x + state.element.getBoundingClientRect().left)) / dygraph.plotter_.area.w; + console.log('pct: ' + pct.toString()); + + var t = Math.round(state.view_after + (state.view_before - state.view_after) * pct); + if(NETDATA.dygraphSetSelection(state, t) === true) { + NETDATA.globalSelectionSync.sync(state, t); + } + } + + // if it was double tap within double click time, reset the charts + var now = Date.now(); + if(typeof state.dygraph_last_touch_end !== 'undefined') { + if(state.dygraph_last_touch_move === 0) { + var dt = now - state.dygraph_last_touch_end; + if(dt <= NETDATA.options.current.double_click_speed) + NETDATA.resetAllCharts(state); + } + } + + // remember the timestamp of the last touch end + state.dygraph_last_touch_end = now; + + // refresh all the charts immediately + NETDATA.options.auto_refresher_stop_until = 0; + } + } + }; + + if(NETDATA.chartLibraries.dygraph.isLogScale(state) === true) { + if(Array.isArray(state.tmp.dygraph_options.valueRange) && state.tmp.dygraph_options.valueRange[0] <= 0) + state.tmp.dygraph_options.valueRange[0] = null; + } + + if(NETDATA.chartLibraries.dygraph.isSparkline(state) === true) { + state.tmp.dygraph_options.drawGrid = false; + state.tmp.dygraph_options.drawAxis = false; + state.tmp.dygraph_options.title = undefined; + state.tmp.dygraph_options.ylabel = undefined; + state.tmp.dygraph_options.yLabelWidth = 0; + //state.tmp.dygraph_options.labelsDivWidth = 120; + //state.tmp.dygraph_options.labelsDivStyles.width = '120px'; + state.tmp.dygraph_options.labelsSeparateLines = true; + state.tmp.dygraph_options.rightGap = 0; + state.tmp.dygraph_options.yRangePad = 1; + state.tmp.dygraph_options.axes.x.drawAxis = false; + state.tmp.dygraph_options.axes.y.drawAxis = false; + } + + if(smooth === true) { + state.tmp.dygraph_smooth_eligible = true; + + if(NETDATA.options.current.smooth_plot === true) + state.tmp.dygraph_options.plotter = smoothPlotter; + } + else state.tmp.dygraph_smooth_eligible = false; + + if(netdataSnapshotData !== null && NETDATA.globalPanAndZoom.isActive() === true && NETDATA.globalPanAndZoom.isMaster(state) === false) { + // pan and zoom on snapshots + state.tmp.dygraph_options.dateWindow = [ NETDATA.globalPanAndZoom.force_after_ms, NETDATA.globalPanAndZoom.force_before_ms ]; + //state.tmp.dygraph_options.isZoomedIgnoreProgrammaticZoom = true; + } + + state.tmp.dygraph_instance = new Dygraph(state.element_chart, + data.result.data, state.tmp.dygraph_options); + + state.tmp.dygraph_force_zoom = false; + state.tmp.dygraph_user_action = false; + state.tmp.dygraph_last_rendered = Date.now(); + state.tmp.dygraph_highlight_after = null; + + if(state.tmp.dygraph_options.valueRange[0] === null && state.tmp.dygraph_options.valueRange[1] === null) { + if (typeof state.tmp.dygraph_instance.axes_[0].extremeRange !== 'undefined') { + state.tmp.__commonMin = NETDATA.dataAttribute(state.element, 'common-min', null); + state.tmp.__commonMax = NETDATA.dataAttribute(state.element, 'common-max', null); + } + else { + state.log('incompatible version of Dygraph detected'); + state.tmp.__commonMin = null; + state.tmp.__commonMax = null; + } + } + else { + // if the user gave a valueRange, respect it + state.tmp.__commonMin = null; + state.tmp.__commonMax = null; + } + + return true; + }; + + // ---------------------------------------------------------------------------------------------------------------- + // morris + + NETDATA.morrisInitialize = function(callback) { + if(typeof netdataNoMorris === 'undefined' || !netdataNoMorris) { + + // morris requires raphael + if(!NETDATA.chartLibraries.raphael.initialized) { + if(NETDATA.chartLibraries.raphael.enabled) { + NETDATA.raphaelInitialize(function() { + NETDATA.morrisInitialize(callback); + }); + } + else { + NETDATA.chartLibraries.morris.enabled = false; + if(typeof callback === "function") + return callback(); + } + } + else { + NETDATA._loadCSS(NETDATA.morris_css); + + $.ajax({ + url: NETDATA.morris_js, + cache: true, + dataType: "script", + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function() { + NETDATA.registerChartLibrary('morris', NETDATA.morris_js); + }) + .fail(function() { + NETDATA.chartLibraries.morris.enabled = false; + NETDATA.error(100, NETDATA.morris_js); + }) + .always(function() { + if(typeof callback === "function") + return callback(); + }); + } + } + else { + NETDATA.chartLibraries.morris.enabled = false; + if(typeof callback === "function") + return callback(); + } + }; + + NETDATA.morrisChartUpdate = function(state, data) { + state.morris_instance.setData(data.result.data); + return true; + }; + + NETDATA.morrisChartCreate = function(state, data) { + + state.morris_options = { + element: state.element_chart.id, + data: data.result.data, + xkey: 'time', + ykeys: data.dimension_names, + labels: data.dimension_names, + lineWidth: 2, + pointSize: 3, + smooth: true, + hideHover: 'auto', + parseTime: true, + continuousLine: false, + behaveLikeLine: false + }; + + if(state.chart.chart_type === 'line') + state.morris_instance = new Morris.Line(state.morris_options); + + else if(state.chart.chart_type === 'area') { + state.morris_options.behaveLikeLine = true; + state.morris_instance = new Morris.Area(state.morris_options); + } + else // stacked + state.morris_instance = new Morris.Area(state.morris_options); + + return true; + }; + + // ---------------------------------------------------------------------------------------------------------------- + // raphael + + NETDATA.raphaelInitialize = function(callback) { + if(typeof netdataStopRaphael === 'undefined' || !netdataStopRaphael) { + $.ajax({ + url: NETDATA.raphael_js, + cache: true, + dataType: "script", + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function() { + NETDATA.registerChartLibrary('raphael', NETDATA.raphael_js); + }) + .fail(function() { + NETDATA.chartLibraries.raphael.enabled = false; + NETDATA.error(100, NETDATA.raphael_js); + }) + .always(function() { + if(typeof callback === "function") + return callback(); + }); + } + else { + NETDATA.chartLibraries.raphael.enabled = false; + if(typeof callback === "function") + return callback(); + } + }; + + NETDATA.raphaelChartUpdate = function(state, data) { + $(state.element_chart).raphael(data.result, { + width: state.chartWidth(), + height: state.chartHeight() + }); + + return false; + }; + + NETDATA.raphaelChartCreate = function(state, data) { + $(state.element_chart).raphael(data.result, { + width: state.chartWidth(), + height: state.chartHeight() + }); + + return false; + }; + + // ---------------------------------------------------------------------------------------------------------------- + // C3 + + NETDATA.c3Initialize = function(callback) { + if(typeof netdataNoC3 === 'undefined' || !netdataNoC3) { + + // C3 requires D3 + if(!NETDATA.chartLibraries.d3.initialized) { + if(NETDATA.chartLibraries.d3.enabled) { + NETDATA.d3Initialize(function() { + NETDATA.c3Initialize(callback); + }); + } + else { + NETDATA.chartLibraries.c3.enabled = false; + if(typeof callback === "function") + return callback(); + } + } + else { + NETDATA._loadCSS(NETDATA.c3_css); + + $.ajax({ + url: NETDATA.c3_js, + cache: true, + dataType: "script", + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function() { + NETDATA.registerChartLibrary('c3', NETDATA.c3_js); + }) + .fail(function() { + NETDATA.chartLibraries.c3.enabled = false; + NETDATA.error(100, NETDATA.c3_js); + }) + .always(function() { + if(typeof callback === "function") + return callback(); + }); + } + } + else { + NETDATA.chartLibraries.c3.enabled = false; + if(typeof callback === "function") + return callback(); + } + }; + + NETDATA.c3ChartUpdate = function(state, data) { + state.c3_instance.destroy(); + return NETDATA.c3ChartCreate(state, data); + + //state.c3_instance.load({ + // rows: data.result, + // unload: true + //}); + + //return true; + }; + + NETDATA.c3ChartCreate = function(state, data) { + + state.element_chart.id = 'c3-' + state.uuid; + // console.log('id = ' + state.element_chart.id); + + state.c3_instance = c3.generate({ + bindto: '#' + state.element_chart.id, + size: { + width: state.chartWidth(), + height: state.chartHeight() + }, + color: { + pattern: state.chartColors() + }, + data: { + x: 'time', + rows: data.result, + type: (state.chart.chart_type === 'line')?'spline':'area-spline' + }, + axis: { + x: { + type: 'timeseries', + tick: { + format: function(x) { + return NETDATA.dateTime.xAxisTimeString(x); + } + } + } + }, + grid: { + x: { + show: true + }, + y: { + show: true + } + }, + point: { + show: false + }, + line: { + connectNull: false + }, + transition: { + duration: 0 + }, + interaction: { + enabled: true + } + }); + + // console.log(state.c3_instance); + + return true; + }; + + // ---------------------------------------------------------------------------------------------------------------- + // d3pie + + NETDATA.d3pieInitialize = function(callback) { + if(typeof netdataNoD3pie === 'undefined' || !netdataNoD3pie) { + + // d3pie requires D3 + if(!NETDATA.chartLibraries.d3.initialized) { + if(NETDATA.chartLibraries.d3.enabled) { + NETDATA.d3Initialize(function() { + NETDATA.d3pieInitialize(callback); + }); + } + else { + NETDATA.chartLibraries.d3pie.enabled = false; + if(typeof callback === "function") + return callback(); + } + } + else { + $.ajax({ + url: NETDATA.d3pie_js, + cache: true, + dataType: "script", + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function() { + NETDATA.registerChartLibrary('d3pie', NETDATA.d3pie_js); + }) + .fail(function() { + NETDATA.chartLibraries.d3pie.enabled = false; + NETDATA.error(100, NETDATA.d3pie_js); + }) + .always(function() { + if(typeof callback === "function") + return callback(); + }); + } + } + else { + NETDATA.chartLibraries.d3pie.enabled = false; + if(typeof callback === "function") + return callback(); + } + }; + + NETDATA.d3pieSetContent = function(state, data, index) { + state.legendFormatValueDecimalsFromMinMax( + data.min, + data.max + ); + + var content = []; + var colors = state.chartColors(); + var len = data.result.labels.length; + for(var i = 1; i < len ; i++) { + var label = data.result.labels[i]; + var value = data.result.data[index][label]; + var color = colors[i - 1]; + + if(value !== null && value > 0) { + content.push({ + label: label, + value: value, + color: color + }); + } + } + + if(content.length === 0) + content.push({ + label: 'no data', + value: 100, + color: '#666666' + }); + + state.tmp.d3pie_last_slot = index; + return content; + }; + + NETDATA.d3pieDateRange = function(state, data, index) { + var dt = Math.round((data.before - data.after + 1) / data.points); + var dt_str = NETDATA.seconds4human(dt); + + var before = data.result.data[index].time; + var after = before - (dt * 1000); + + var d1 = NETDATA.dateTime.localeDateString(after); + var t1 = NETDATA.dateTime.localeTimeString(after); + var d2 = NETDATA.dateTime.localeDateString(before); + var t2 = NETDATA.dateTime.localeTimeString(before); + + if(d1 === d2) + return d1 + ' ' + t1 + ' to ' + t2 + ', ' + dt_str; + + return d1 + ' ' + t1 + ' to ' + d2 + ' ' + t2 + ', ' + dt_str; + }; + + NETDATA.d3pieSetSelection = function(state, t) { + if(state.timeIsVisible(t) !== true) + return NETDATA.d3pieClearSelection(state, true); + + var slot = state.calculateRowForTime(t); + slot = state.data.result.data.length - slot - 1; + + if(slot < 0 || slot >= state.data.result.length) + return NETDATA.d3pieClearSelection(state, true); + + if(state.tmp.d3pie_last_slot === slot) { + // we already show this slot, don't do anything + return true; + } + + if(state.tmp.d3pie_timer === undefined) { + state.tmp.d3pie_timer = NETDATA.timeout.set(function() { + state.tmp.d3pie_timer = undefined; + NETDATA.d3pieChange(state, NETDATA.d3pieSetContent(state, state.data, slot), NETDATA.d3pieDateRange(state, state.data, slot)); + }, 0); + } + + return true; + }; + + NETDATA.d3pieClearSelection = function(state, force) { + if(typeof state.tmp.d3pie_timer !== 'undefined') { + NETDATA.timeout.clear(state.tmp.d3pie_timer); + state.tmp.d3pie_timer = undefined; + } + + if(state.isAutoRefreshable() === true && state.data !== null && force !== true) { + NETDATA.d3pieChartUpdate(state, state.data); + } + else { + if(state.tmp.d3pie_last_slot !== -1) { + state.tmp.d3pie_last_slot = -1; + NETDATA.d3pieChange(state, [{label: 'no data', value: 1, color: '#666666'}], 'no data available'); + } + } + + return true; + }; + + NETDATA.d3pieChange = function(state, content, footer) { + if(state.d3pie_forced_subtitle === null) { + //state.d3pie_instance.updateProp("header.subtitle.text", state.units_current); + state.d3pie_instance.options.header.subtitle.text = state.units_current; + } + + if(state.d3pie_forced_footer === null) { + //state.d3pie_instance.updateProp("footer.text", footer); + state.d3pie_instance.options.footer.text = footer; + } + + //state.d3pie_instance.updateProp("data.content", content); + state.d3pie_instance.options.data.content = content; + state.d3pie_instance.destroy(); + state.d3pie_instance.recreate(); + return true; + }; + + NETDATA.d3pieChartUpdate = function(state, data) { + return NETDATA.d3pieChange(state, NETDATA.d3pieSetContent(state, data, 0), NETDATA.d3pieDateRange(state, data, 0)); + }; + + NETDATA.d3pieChartCreate = function(state, data) { + + state.element_chart.id = 'd3pie-' + state.uuid; + // console.log('id = ' + state.element_chart.id); + + var content = NETDATA.d3pieSetContent(state, data, 0); + + state.d3pie_forced_title = NETDATA.dataAttribute(state.element, 'd3pie-title', null); + state.d3pie_forced_subtitle = NETDATA.dataAttribute(state.element, 'd3pie-subtitle', null); + state.d3pie_forced_footer = NETDATA.dataAttribute(state.element, 'd3pie-footer', null); + + state.d3pie_options = { + header: { + title: { + text: (state.d3pie_forced_title !== null) ? state.d3pie_forced_title : state.title, + color: NETDATA.dataAttribute(state.element, 'd3pie-title-color', NETDATA.themes.current.d3pie.title), + fontSize: NETDATA.dataAttribute(state.element, 'd3pie-title-fontsize', 12), + fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-title-fontweight', "bold"), + font: NETDATA.dataAttribute(state.element, 'd3pie-title-font', "arial") + }, + subtitle: { + text: (state.d3pie_forced_subtitle !== null) ? state.d3pie_forced_subtitle : state.units_current, + color: NETDATA.dataAttribute(state.element, 'd3pie-subtitle-color', NETDATA.themes.current.d3pie.subtitle), + fontSize: NETDATA.dataAttribute(state.element, 'd3pie-subtitle-fontsize', 10), + fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-subtitle-fontweight', "normal"), + font: NETDATA.dataAttribute(state.element, 'd3pie-subtitle-font', "arial") + }, + titleSubtitlePadding: 1 + }, + footer: { + text: (state.d3pie_forced_footer !== null) ? state.d3pie_forced_footer : NETDATA.d3pieDateRange(state, data, 0), + color: NETDATA.dataAttribute(state.element, 'd3pie-footer-color', NETDATA.themes.current.d3pie.footer), + fontSize: NETDATA.dataAttribute(state.element, 'd3pie-footer-fontsize', 9), + fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-footer-fontweight', "bold"), + font: NETDATA.dataAttribute(state.element, 'd3pie-footer-font', "arial"), + location: NETDATA.dataAttribute(state.element, 'd3pie-footer-location', "bottom-center") // bottom-left, bottom-center, bottom-right + }, + size: { + canvasHeight: state.chartHeight(), + canvasWidth: state.chartWidth(), + pieInnerRadius: NETDATA.dataAttribute(state.element, 'd3pie-pieinnerradius', "45%"), + pieOuterRadius: NETDATA.dataAttribute(state.element, 'd3pie-pieouterradius', "80%") + }, + data: { + // none, random, value-asc, value-desc, label-asc, label-desc + sortOrder: NETDATA.dataAttribute(state.element, 'd3pie-sortorder', "value-desc"), + smallSegmentGrouping: { + enabled: NETDATA.dataAttributeBoolean(state.element, "d3pie-smallsegmentgrouping-enabled", false), + value: NETDATA.dataAttribute(state.element, 'd3pie-smallsegmentgrouping-value', 1), + // percentage, value + valueType: NETDATA.dataAttribute(state.element, 'd3pie-smallsegmentgrouping-valuetype', "percentage"), + label: NETDATA.dataAttribute(state.element, 'd3pie-smallsegmentgrouping-label', "other"), + color: NETDATA.dataAttribute(state.element, 'd3pie-smallsegmentgrouping-color', NETDATA.themes.current.d3pie.other) + }, + + // REQUIRED! This is where you enter your pie data; it needs to be an array of objects + // of this form: { label: "label", value: 1.5, color: "#000000" } - color is optional + content: content + }, + labels: { + outer: { + // label, value, percentage, label-value1, label-value2, label-percentage1, label-percentage2 + format: NETDATA.dataAttribute(state.element, 'd3pie-labels-outer-format', "label-value1"), + hideWhenLessThanPercentage: NETDATA.dataAttribute(state.element, 'd3pie-labels-outer-hidewhenlessthanpercentage', null), + pieDistance: NETDATA.dataAttribute(state.element, 'd3pie-labels-outer-piedistance', 15) + }, + inner: { + // label, value, percentage, label-value1, label-value2, label-percentage1, label-percentage2 + format: NETDATA.dataAttribute(state.element, 'd3pie-labels-inner-format', "percentage"), + hideWhenLessThanPercentage: NETDATA.dataAttribute(state.element, 'd3pie-labels-inner-hidewhenlessthanpercentage', 2) + }, + mainLabel: { + color: NETDATA.dataAttribute(state.element, 'd3pie-labels-mainLabel-color', NETDATA.themes.current.d3pie.mainlabel), // or 'segment' for dynamic color + font: NETDATA.dataAttribute(state.element, 'd3pie-labels-mainLabel-font', "arial"), + fontSize: NETDATA.dataAttribute(state.element, 'd3pie-labels-mainLabel-fontsize', 10), + fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-labels-mainLabel-fontweight', "normal") + }, + percentage: { + color: NETDATA.dataAttribute(state.element, 'd3pie-labels-percentage-color', NETDATA.themes.current.d3pie.percentage), + font: NETDATA.dataAttribute(state.element, 'd3pie-labels-percentage-font', "arial"), + fontSize: NETDATA.dataAttribute(state.element, 'd3pie-labels-percentage-fontsize', 10), + fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-labels-percentage-fontweight', "bold"), + decimalPlaces: 0 + }, + value: { + color: NETDATA.dataAttribute(state.element, 'd3pie-labels-value-color', NETDATA.themes.current.d3pie.value), + font: NETDATA.dataAttribute(state.element, 'd3pie-labels-value-font', "arial"), + fontSize: NETDATA.dataAttribute(state.element, 'd3pie-labels-value-fontsize', 10), + fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-labels-value-fontweight', "bold") + }, + lines: { + enabled: NETDATA.dataAttributeBoolean(state.element, 'd3pie-labels-lines-enabled', true), + style: NETDATA.dataAttribute(state.element, 'd3pie-labels-lines-style', "curved"), + color: NETDATA.dataAttribute(state.element, 'd3pie-labels-lines-color', "segment") // "segment" or a hex color + }, + truncation: { + enabled: NETDATA.dataAttributeBoolean(state.element, 'd3pie-labels-truncation-enabled', false), + truncateLength: NETDATA.dataAttribute(state.element, 'd3pie-labels-truncation-truncatelength', 30) + }, + formatter: function(context) { + // console.log(context); + if(context.part === 'value') + return state.legendFormatValue(context.value); + if(context.part === 'percentage') + return context.label + '%'; + + return context.label; + } + }, + effects: { + load: { + effect: "none", // none / default + speed: 0 // commented in the d3pie code to speed it up + }, + pullOutSegmentOnClick: { + effect: "bounce", // none / linear / bounce / elastic / back + speed: 400, + size: 5 + }, + highlightSegmentOnMouseover: true, + highlightLuminosity: -0.2 + }, + tooltips: { + enabled: false, + type: "placeholder", // caption|placeholder + string: "", + placeholderParser: null, // function + styles: { + fadeInSpeed: 250, + backgroundColor: NETDATA.themes.current.d3pie.tooltip_bg, + backgroundOpacity: 0.5, + color: NETDATA.themes.current.d3pie.tooltip_fg, + borderRadius: 2, + font: "arial", + fontSize: 12, + padding: 4 + } + }, + misc: { + colors: { + background: 'transparent', // transparent or color # + // segments: state.chartColors(), + segmentStroke: NETDATA.dataAttribute(state.element, 'd3pie-misc-colors-segmentstroke', NETDATA.themes.current.d3pie.segment_stroke) + }, + gradient: { + enabled: NETDATA.dataAttributeBoolean(state.element, 'd3pie-misc-gradient-enabled', false), + percentage: NETDATA.dataAttribute(state.element, 'd3pie-misc-colors-percentage', 95), + color: NETDATA.dataAttribute(state.element, 'd3pie-misc-gradient-color', NETDATA.themes.current.d3pie.gradient_color) + }, + canvasPadding: { + top: 5, + right: 5, + bottom: 5, + left: 5 + }, + pieCenterOffset: { + x: 0, + y: 0 + }, + cssPrefix: NETDATA.dataAttribute(state.element, 'd3pie-cssprefix', null) + }, + callbacks: { + onload: null, + onMouseoverSegment: null, + onMouseoutSegment: null, + onClickSegment: null + } + }; + + state.d3pie_instance = new d3pie(state.element_chart, state.d3pie_options); + return true; + }; + + // ---------------------------------------------------------------------------------------------------------------- + // D3 + + NETDATA.d3Initialize = function(callback) { + if(typeof netdataStopD3 === 'undefined' || !netdataStopD3) { + $.ajax({ + url: NETDATA.d3_js, + cache: true, + dataType: "script", + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function() { + NETDATA.registerChartLibrary('d3', NETDATA.d3_js); + }) + .fail(function() { + NETDATA.chartLibraries.d3.enabled = false; + NETDATA.error(100, NETDATA.d3_js); + }) + .always(function() { + if(typeof callback === "function") + return callback(); + }); + } + else { + NETDATA.chartLibraries.d3.enabled = false; + if(typeof callback === "function") + return callback(); + } + }; + + NETDATA.d3ChartUpdate = function(state, data) { + void(state); + void(data); + + return false; + }; + + NETDATA.d3ChartCreate = function(state, data) { + void(state); + void(data); + + return false; + }; + + // ---------------------------------------------------------------------------------------------------------------- + // google charts + + NETDATA.googleInitialize = function(callback) { + if(typeof netdataNoGoogleCharts === 'undefined' || !netdataNoGoogleCharts) { + $.ajax({ + url: NETDATA.google_js, + cache: true, + dataType: "script", + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function() { + NETDATA.registerChartLibrary('google', NETDATA.google_js); + google.load('visualization', '1.1', { + 'packages': ['corechart', 'controls'], + 'callback': callback + }); + }) + .fail(function() { + NETDATA.chartLibraries.google.enabled = false; + NETDATA.error(100, NETDATA.google_js); + if(typeof callback === "function") + return callback(); + }); + } + else { + NETDATA.chartLibraries.google.enabled = false; + if(typeof callback === "function") + return callback(); + } + }; + + NETDATA.googleChartUpdate = function(state, data) { + var datatable = new google.visualization.DataTable(data.result); + state.google_instance.draw(datatable, state.google_options); + return true; + }; + + NETDATA.googleChartCreate = function(state, data) { + var datatable = new google.visualization.DataTable(data.result); + + state.google_options = { + colors: state.chartColors(), + + // do not set width, height - the chart resizes itself + //width: state.chartWidth(), + //height: state.chartHeight(), + lineWidth: 1, + title: state.title, + fontSize: 11, + hAxis: { + // title: "Time of Day", + // format:'HH:mm:ss', + viewWindowMode: 'maximized', + slantedText: false, + format:'HH:mm:ss', + textStyle: { + fontSize: 9 + }, + gridlines: { + color: '#EEE' + } + }, + vAxis: { + title: state.units_current, + viewWindowMode: 'pretty', + minValue: -0.1, + maxValue: 0.1, + direction: 1, + textStyle: { + fontSize: 9 + }, + gridlines: { + color: '#EEE' + } + }, + chartArea: { + width: '65%', + height: '80%' + }, + focusTarget: 'category', + annotation: { + '1': { + style: 'line' + } + }, + pointsVisible: 0, + titlePosition: 'out', + titleTextStyle: { + fontSize: 11 + }, + tooltip: { + isHtml: false, + ignoreBounds: true, + textStyle: { + fontSize: 9 + } + }, + curveType: 'function', + areaOpacity: 0.3, + isStacked: false + }; + + switch(state.chart.chart_type) { + case "area": + state.google_options.vAxis.viewWindowMode = 'maximized'; + state.google_options.areaOpacity = NETDATA.options.current.color_fill_opacity_area; + state.google_instance = new google.visualization.AreaChart(state.element_chart); + break; + + case "stacked": + state.google_options.isStacked = true; + state.google_options.areaOpacity = NETDATA.options.current.color_fill_opacity_stacked; + state.google_options.vAxis.viewWindowMode = 'maximized'; + state.google_options.vAxis.minValue = null; + state.google_options.vAxis.maxValue = null; + state.google_instance = new google.visualization.AreaChart(state.element_chart); + break; + + default: + case "line": + state.google_options.lineWidth = 2; + state.google_instance = new google.visualization.LineChart(state.element_chart); + break; + } + + state.google_instance.draw(datatable, state.google_options); + return true; + }; + + // ---------------------------------------------------------------------------------------------------------------- + + NETDATA.easypiechartPercentFromValueMinMax = function(state, value, min, max) { + if(typeof value !== 'number') value = 0; + if(typeof min !== 'number') min = 0; + if(typeof max !== 'number') max = 0; + + if(min > max) { + var t = min; + min = max; + max = t; + } + + if(min > value) min = value; + if(max < value) max = value; + + state.legendFormatValueDecimalsFromMinMax(min, max); + + if(state.tmp.easyPieChartMin === null && min > 0) min = 0; + if(state.tmp.easyPieChartMax === null && max < 0) max = 0; + + var pcent; + + if(min < 0 && max > 0) { + // it is both positive and negative + // zero at the top center of the chart + max = (-min > max)? -min : max; + pcent = Math.round(value * 100 / max); + } + else if(value >= 0 && min >= 0 && max >= 0) { + // clockwise + pcent = Math.round((value - min) * 100 / (max - min)); + if(pcent === 0) pcent = 0.1; + } + else { + // counter clockwise + pcent = Math.round((value - max) * 100 / (max - min)); + if(pcent === 0) pcent = -0.1; + } + + return pcent; + }; + + // ---------------------------------------------------------------------------------------------------------------- + // easy-pie-chart + + NETDATA.easypiechartInitialize = function(callback) { + if(typeof netdataNoEasyPieChart === 'undefined' || !netdataNoEasyPieChart) { + $.ajax({ + url: NETDATA.easypiechart_js, + cache: true, + dataType: "script", + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function() { + NETDATA.registerChartLibrary('easypiechart', NETDATA.easypiechart_js); + }) + .fail(function() { + NETDATA.chartLibraries.easypiechart.enabled = false; + NETDATA.error(100, NETDATA.easypiechart_js); + }) + .always(function() { + if(typeof callback === "function") + return callback(); + }) + } + else { + NETDATA.chartLibraries.easypiechart.enabled = false; + if(typeof callback === "function") + return callback(); + } + }; + + NETDATA.easypiechartClearSelection = function(state, force) { + if(typeof state.tmp.easyPieChartEvent !== 'undefined' && typeof state.tmp.easyPieChartEvent.timer !== 'undefined') { + NETDATA.timeout.clear(state.tmp.easyPieChartEvent.timer); + state.tmp.easyPieChartEvent.timer = undefined; + } + + if(state.isAutoRefreshable() === true && state.data !== null && force !== true) { + NETDATA.easypiechartChartUpdate(state, state.data); + } + else { + state.tmp.easyPieChartLabel.innerText = state.legendFormatValue(null); + state.tmp.easyPieChart_instance.update(0); + } + state.tmp.easyPieChart_instance.enableAnimation(); + + return true; + }; + + NETDATA.easypiechartSetSelection = function(state, t) { + if(state.timeIsVisible(t) !== true) + return NETDATA.easypiechartClearSelection(state, true); + + var slot = state.calculateRowForTime(t); + if(slot < 0 || slot >= state.data.result.length) + return NETDATA.easypiechartClearSelection(state, true); + + if(typeof state.tmp.easyPieChartEvent === 'undefined') { + state.tmp.easyPieChartEvent = { + timer: undefined, + value: 0, + pcent: 0 + }; + } + + var value = state.data.result[state.data.result.length - 1 - slot]; + var min = (state.tmp.easyPieChartMin === null)?NETDATA.commonMin.get(state):state.tmp.easyPieChartMin; + var max = (state.tmp.easyPieChartMax === null)?NETDATA.commonMax.get(state):state.tmp.easyPieChartMax; + var pcent = NETDATA.easypiechartPercentFromValueMinMax(state, value, min, max); + + state.tmp.easyPieChartEvent.value = value; + state.tmp.easyPieChartEvent.pcent = pcent; + state.tmp.easyPieChartLabel.innerText = state.legendFormatValue(value); + + if(state.tmp.easyPieChartEvent.timer === undefined) { + state.tmp.easyPieChart_instance.disableAnimation(); + + state.tmp.easyPieChartEvent.timer = NETDATA.timeout.set(function() { + state.tmp.easyPieChartEvent.timer = undefined; + state.tmp.easyPieChart_instance.update(state.tmp.easyPieChartEvent.pcent); + }, 0); + } + + return true; + }; + + NETDATA.easypiechartChartUpdate = function(state, data) { + var value, min, max, pcent; + + if(NETDATA.globalPanAndZoom.isActive() === true || state.isAutoRefreshable() === false) { + value = null; + pcent = 0; + } + else { + value = data.result[0]; + min = (state.tmp.easyPieChartMin === null)?NETDATA.commonMin.get(state):state.tmp.easyPieChartMin; + max = (state.tmp.easyPieChartMax === null)?NETDATA.commonMax.get(state):state.tmp.easyPieChartMax; + pcent = NETDATA.easypiechartPercentFromValueMinMax(state, value, min, max); + } + + state.tmp.easyPieChartLabel.innerText = state.legendFormatValue(value); + state.tmp.easyPieChart_instance.update(pcent); + return true; + }; + + NETDATA.easypiechartChartCreate = function(state, data) { + var chart = $(state.element_chart); + + var value = data.result[0]; + var min = NETDATA.dataAttribute(state.element, 'easypiechart-min-value', null); + var max = NETDATA.dataAttribute(state.element, 'easypiechart-max-value', null); + + if(min === null) { + min = NETDATA.commonMin.get(state); + state.tmp.easyPieChartMin = null; + } + else + state.tmp.easyPieChartMin = min; + + if(max === null) { + max = NETDATA.commonMax.get(state); + state.tmp.easyPieChartMax = null; + } + else + state.tmp.easyPieChartMax = max; + + var size = state.chartWidth(); + var stroke = Math.floor(size / 22); + if(stroke < 3) stroke = 2; + + var valuefontsize = Math.floor((size * 2 / 3) / 5); + var valuetop = Math.round((size - valuefontsize - (size / 40)) / 2); + state.tmp.easyPieChartLabel = document.createElement('span'); + state.tmp.easyPieChartLabel.className = 'easyPieChartLabel'; + state.tmp.easyPieChartLabel.innerText = state.legendFormatValue(value); + state.tmp.easyPieChartLabel.style.fontSize = valuefontsize + 'px'; + state.tmp.easyPieChartLabel.style.top = valuetop.toString() + 'px'; + state.element_chart.appendChild(state.tmp.easyPieChartLabel); + + var titlefontsize = Math.round(valuefontsize * 1.6 / 3); + var titletop = Math.round(valuetop - (titlefontsize * 2) - (size / 40)); + state.tmp.easyPieChartTitle = document.createElement('span'); + state.tmp.easyPieChartTitle.className = 'easyPieChartTitle'; + state.tmp.easyPieChartTitle.innerText = state.title; + state.tmp.easyPieChartTitle.style.fontSize = titlefontsize + 'px'; + state.tmp.easyPieChartTitle.style.lineHeight = titlefontsize + 'px'; + state.tmp.easyPieChartTitle.style.top = titletop.toString() + 'px'; + state.element_chart.appendChild(state.tmp.easyPieChartTitle); + + var unitfontsize = Math.round(titlefontsize * 0.9); + var unittop = Math.round(valuetop + (valuefontsize + unitfontsize) + (size / 40)); + state.tmp.easyPieChartUnits = document.createElement('span'); + state.tmp.easyPieChartUnits.className = 'easyPieChartUnits'; + state.tmp.easyPieChartUnits.innerText = state.units_current; + state.tmp.easyPieChartUnits.style.fontSize = unitfontsize + 'px'; + state.tmp.easyPieChartUnits.style.top = unittop.toString() + 'px'; + state.element_chart.appendChild(state.tmp.easyPieChartUnits); + + var barColor = NETDATA.dataAttribute(state.element, 'easypiechart-barcolor', undefined); + if(typeof barColor === 'undefined' || barColor === null) + barColor = state.chartCustomColors()[0]; + else { + //
+ var tmp = eval(barColor); + if(typeof tmp === 'function') + barColor = tmp; + } + + var pcent = NETDATA.easypiechartPercentFromValueMinMax(state, value, min, max); + chart.data('data-percent', pcent); + + chart.easyPieChart({ + barColor: barColor, + trackColor: NETDATA.dataAttribute(state.element, 'easypiechart-trackcolor', NETDATA.themes.current.easypiechart_track), + scaleColor: NETDATA.dataAttribute(state.element, 'easypiechart-scalecolor', NETDATA.themes.current.easypiechart_scale), + scaleLength: NETDATA.dataAttribute(state.element, 'easypiechart-scalelength', 5), + lineCap: NETDATA.dataAttribute(state.element, 'easypiechart-linecap', 'round'), + lineWidth: NETDATA.dataAttribute(state.element, 'easypiechart-linewidth', stroke), + trackWidth: NETDATA.dataAttribute(state.element, 'easypiechart-trackwidth', undefined), + size: NETDATA.dataAttribute(state.element, 'easypiechart-size', size), + rotate: NETDATA.dataAttribute(state.element, 'easypiechart-rotate', 0), + animate: NETDATA.dataAttribute(state.element, 'easypiechart-animate', {duration: 500, enabled: true}), + easing: NETDATA.dataAttribute(state.element, 'easypiechart-easing', undefined) + }); + + // when we just re-create the chart + // do not animate the first update + var animate = true; + if(typeof state.tmp.easyPieChart_instance !== 'undefined') + animate = false; + + state.tmp.easyPieChart_instance = chart.data('easyPieChart'); + if(animate === false) state.tmp.easyPieChart_instance.disableAnimation(); + state.tmp.easyPieChart_instance.update(pcent); + if(animate === false) state.tmp.easyPieChart_instance.enableAnimation(); + + state.legendSetUnitsString = function(units) { + if(typeof state.tmp.easyPieChartUnits !== 'undefined' && state.tmp.units !== units) { + state.tmp.easyPieChartUnits.innerText = units; + state.tmp.units = units; + } + }; + state.legendShowUndefined = function() { + if(typeof state.tmp.easyPieChart_instance !== 'undefined') + NETDATA.easypiechartClearSelection(state); + }; + + return true; + }; + + // ---------------------------------------------------------------------------------------------------------------- + // gauge.js + + NETDATA.gaugeInitialize = function(callback) { + if(typeof netdataNoGauge === 'undefined' || !netdataNoGauge) { + $.ajax({ + url: NETDATA.gauge_js, + cache: true, + dataType: "script", + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function() { + NETDATA.registerChartLibrary('gauge', NETDATA.gauge_js); + }) + .fail(function() { + NETDATA.chartLibraries.gauge.enabled = false; + NETDATA.error(100, NETDATA.gauge_js); + }) + .always(function() { + if(typeof callback === "function") + return callback(); + }) + } + else { + NETDATA.chartLibraries.gauge.enabled = false; + if(typeof callback === "function") + return callback(); + } + }; + + NETDATA.gaugeAnimation = function(state, status) { + var speed = 32; + + if(typeof status === 'boolean' && status === false) + speed = 1000000000; + else if(typeof status === 'number') + speed = status; + + // console.log('gauge speed ' + speed); + state.tmp.gauge_instance.animationSpeed = speed; + state.tmp.___gaugeOld__.speed = speed; + }; + + NETDATA.gaugeSet = function(state, value, min, max) { + if(typeof value !== 'number') value = 0; + if(typeof min !== 'number') min = 0; + if(typeof max !== 'number') max = 0; + if(value > max) max = value; + if(value < min) min = value; + if(min > max) { + var t = min; + min = max; + max = t; + } + else if(min === max) + max = min + 1; + + state.legendFormatValueDecimalsFromMinMax(min, max); + + // gauge.js has an issue if the needle + // is smaller than min or larger than max + // when we set the new values + // the needle will go crazy + + // to prevent it, we always feed it + // with a percentage, so that the needle + // is always between min and max + var pcent = (value - min) * 100 / (max - min); + + // bug fix for gauge.js 1.3.1 + // if the value is the absolute min or max, the chart is broken + if(pcent < 0.001) pcent = 0.001; + if(pcent > 99.999) pcent = 99.999; + + state.tmp.gauge_instance.set(pcent); + // console.log('gauge set ' + pcent + ', value ' + value + ', min ' + min + ', max ' + max); + + state.tmp.___gaugeOld__.value = value; + state.tmp.___gaugeOld__.min = min; + state.tmp.___gaugeOld__.max = max; + }; + + NETDATA.gaugeSetLabels = function(state, value, min, max) { + if(state.tmp.___gaugeOld__.valueLabel !== value) { + state.tmp.___gaugeOld__.valueLabel = value; + state.tmp.gaugeChartLabel.innerText = state.legendFormatValue(value); + } + if(state.tmp.___gaugeOld__.minLabel !== min) { + state.tmp.___gaugeOld__.minLabel = min; + state.tmp.gaugeChartMin.innerText = state.legendFormatValue(min); + } + if(state.tmp.___gaugeOld__.maxLabel !== max) { + state.tmp.___gaugeOld__.maxLabel = max; + state.tmp.gaugeChartMax.innerText = state.legendFormatValue(max); + } + }; + + NETDATA.gaugeClearSelection = function(state, force) { + if(typeof state.tmp.gaugeEvent !== 'undefined' && typeof state.tmp.gaugeEvent.timer !== 'undefined') { + NETDATA.timeout.clear(state.tmp.gaugeEvent.timer); + state.tmp.gaugeEvent.timer = undefined; + } + + if(state.isAutoRefreshable() === true && state.data !== null && force !== true) { + NETDATA.gaugeChartUpdate(state, state.data); + } + else { + NETDATA.gaugeAnimation(state, false); + NETDATA.gaugeSetLabels(state, null, null, null); + NETDATA.gaugeSet(state, null, null, null); + } + + NETDATA.gaugeAnimation(state, true); + return true; + }; + + NETDATA.gaugeSetSelection = function(state, t) { + if(state.timeIsVisible(t) !== true) + return NETDATA.gaugeClearSelection(state, true); + + var slot = state.calculateRowForTime(t); + if(slot < 0 || slot >= state.data.result.length) + return NETDATA.gaugeClearSelection(state, true); + + if(typeof state.tmp.gaugeEvent === 'undefined') { + state.tmp.gaugeEvent = { + timer: undefined, + value: 0, + min: 0, + max: 0 + }; + } + + var value = state.data.result[state.data.result.length - 1 - slot]; + var min = (state.tmp.gaugeMin === null)?NETDATA.commonMin.get(state):state.tmp.gaugeMin; + var max = (state.tmp.gaugeMax === null)?NETDATA.commonMax.get(state):state.tmp.gaugeMax; + + // make sure it is zero based + // but only if it has not been set by the user + if(state.tmp.gaugeMin === null && min > 0) min = 0; + if(state.tmp.gaugeMax === null && max < 0) max = 0; + + state.tmp.gaugeEvent.value = value; + state.tmp.gaugeEvent.min = min; + state.tmp.gaugeEvent.max = max; + NETDATA.gaugeSetLabels(state, value, min, max); + + if(state.tmp.gaugeEvent.timer === undefined) { + NETDATA.gaugeAnimation(state, false); + + state.tmp.gaugeEvent.timer = NETDATA.timeout.set(function() { + state.tmp.gaugeEvent.timer = undefined; + NETDATA.gaugeSet(state, state.tmp.gaugeEvent.value, state.tmp.gaugeEvent.min, state.tmp.gaugeEvent.max); + }, 0); + } + + return true; + }; + + NETDATA.gaugeChartUpdate = function(state, data) { + var value, min, max; + + if(NETDATA.globalPanAndZoom.isActive() === true || state.isAutoRefreshable() === false) { + NETDATA.gaugeSetLabels(state, null, null, null); + state.tmp.gauge_instance.set(0); + } + else { + value = data.result[0]; + min = (state.tmp.gaugeMin === null)?NETDATA.commonMin.get(state):state.tmp.gaugeMin; + max = (state.tmp.gaugeMax === null)?NETDATA.commonMax.get(state):state.tmp.gaugeMax; + if(value < min) min = value; + if(value > max) max = value; + + // make sure it is zero based + // but only if it has not been set by the user + if(state.tmp.gaugeMin === null && min > 0) min = 0; + if(state.tmp.gaugeMax === null && max < 0) max = 0; + + NETDATA.gaugeSet(state, value, min, max); + NETDATA.gaugeSetLabels(state, value, min, max); + } + + return true; + }; + + NETDATA.gaugeChartCreate = function(state, data) { + // var chart = $(state.element_chart); + + var value = data.result[0]; + var min = NETDATA.dataAttribute(state.element, 'gauge-min-value', null); + var max = NETDATA.dataAttribute(state.element, 'gauge-max-value', null); + // var adjust = NETDATA.dataAttribute(state.element, 'gauge-adjust', null); + var pointerColor = NETDATA.dataAttribute(state.element, 'gauge-pointer-color', NETDATA.themes.current.gauge_pointer); + var strokeColor = NETDATA.dataAttribute(state.element, 'gauge-stroke-color', NETDATA.themes.current.gauge_stroke); + var startColor = NETDATA.dataAttribute(state.element, 'gauge-start-color', state.chartCustomColors()[0]); + var stopColor = NETDATA.dataAttribute(state.element, 'gauge-stop-color', void 0); + var generateGradient = NETDATA.dataAttribute(state.element, 'gauge-generate-gradient', false); + + if(min === null) { + min = NETDATA.commonMin.get(state); + state.tmp.gaugeMin = null; + } + else + state.tmp.gaugeMin = min; + + if(max === null) { + max = NETDATA.commonMax.get(state); + state.tmp.gaugeMax = null; + } + else + state.tmp.gaugeMax = max; + + // make sure it is zero based + // but only if it has not been set by the user + if(state.tmp.gaugeMin === null && min > 0) min = 0; + if(state.tmp.gaugeMax === null && max < 0) max = 0; + + var width = state.chartWidth(), height = state.chartHeight(); //, ratio = 1.5; + // console.log('gauge width: ' + width.toString() + ', height: ' + height.toString()); + //switch(adjust) { + // case 'width': width = height * ratio; break; + // case 'height': + // default: height = width / ratio; break; + //} + //state.element.style.width = width.toString() + 'px'; + //state.element.style.height = height.toString() + 'px'; + + var lum_d = 0.05; + + var options = { + lines: 12, // The number of lines to draw + angle: 0.14, // The span of the gauge arc + lineWidth: 0.57, // The line thickness + radiusScale: 1.0, // Relative radius + pointer: { + length: 0.85, // 0.9 The radius of the inner circle + strokeWidth: 0.045, // The rotation offset + color: pointerColor // Fill color + }, + limitMax: true, // If false, the max value of the gauge will be updated if value surpass max + limitMin: true, // If true, the min value of the gauge will be fixed unless you set it manually + colorStart: startColor, // Colors + colorStop: stopColor, // just experiment with them + strokeColor: strokeColor, // to see which ones work best for you + generateGradient: (generateGradient === true), + gradientType: 0, + highDpiSupport: true // High resolution support + }; + + if (generateGradient.constructor === Array) { + // example options: + // data-gauge-generate-gradient="[0, 50, 100]" + // data-gauge-gradient-percent-color-0="#FFFFFF" + // data-gauge-gradient-percent-color-50="#999900" + // data-gauge-gradient-percent-color-100="#000000" + + options.percentColors = []; + var len = generateGradient.length; + while(len--) { + var pcent = generateGradient[len]; + var color = NETDATA.dataAttribute(state.element, 'gauge-gradient-percent-color-' + pcent.toString(), false); + if(color !== false) { + var a = []; + a[0] = pcent / 100; + a[1] = color; + options.percentColors.unshift(a); + } + } + if(options.percentColors.length === 0) + delete options.percentColors; + } + else if(generateGradient === false && NETDATA.themes.current.gauge_gradient === true) { + //noinspection PointlessArithmeticExpressionJS + options.percentColors = [ + [0.0, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 0))], + [0.1, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 1))], + [0.2, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 2))], + [0.3, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 3))], + [0.4, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 4))], + [0.5, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 5))], + [0.6, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 6))], + [0.7, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 7))], + [0.8, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 8))], + [0.9, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 9))], + [1.0, NETDATA.colorLuminance(startColor, 0.0)]]; + } + + state.tmp.gauge_canvas = document.createElement('canvas'); + state.tmp.gauge_canvas.id = 'gauge-' + state.uuid + '-canvas'; + state.tmp.gauge_canvas.className = 'gaugeChart'; + state.tmp.gauge_canvas.width = width; + state.tmp.gauge_canvas.height = height; + state.element_chart.appendChild(state.tmp.gauge_canvas); + + var valuefontsize = Math.floor(height / 5); + var valuetop = Math.round((height - valuefontsize) / 3.2); + state.tmp.gaugeChartLabel = document.createElement('span'); + state.tmp.gaugeChartLabel.className = 'gaugeChartLabel'; + state.tmp.gaugeChartLabel.style.fontSize = valuefontsize + 'px'; + state.tmp.gaugeChartLabel.style.top = valuetop.toString() + 'px'; + state.element_chart.appendChild(state.tmp.gaugeChartLabel); + + var titlefontsize = Math.round(valuefontsize / 2.1); + var titletop = 0; + state.tmp.gaugeChartTitle = document.createElement('span'); + state.tmp.gaugeChartTitle.className = 'gaugeChartTitle'; + state.tmp.gaugeChartTitle.innerText = state.title; + state.tmp.gaugeChartTitle.style.fontSize = titlefontsize + 'px'; + state.tmp.gaugeChartTitle.style.lineHeight = titlefontsize + 'px'; + state.tmp.gaugeChartTitle.style.top = titletop.toString() + 'px'; + state.element_chart.appendChild(state.tmp.gaugeChartTitle); + + var unitfontsize = Math.round(titlefontsize * 0.9); + state.tmp.gaugeChartUnits = document.createElement('span'); + state.tmp.gaugeChartUnits.className = 'gaugeChartUnits'; + state.tmp.gaugeChartUnits.innerText = state.units_current; + state.tmp.gaugeChartUnits.style.fontSize = unitfontsize + 'px'; + state.element_chart.appendChild(state.tmp.gaugeChartUnits); + + state.tmp.gaugeChartMin = document.createElement('span'); + state.tmp.gaugeChartMin.className = 'gaugeChartMin'; + state.tmp.gaugeChartMin.style.fontSize = Math.round(valuefontsize * 0.75).toString() + 'px'; + state.element_chart.appendChild(state.tmp.gaugeChartMin); + + state.tmp.gaugeChartMax = document.createElement('span'); + state.tmp.gaugeChartMax.className = 'gaugeChartMax'; + state.tmp.gaugeChartMax.style.fontSize = Math.round(valuefontsize * 0.75).toString() + 'px'; + state.element_chart.appendChild(state.tmp.gaugeChartMax); + + // when we just re-create the chart + // do not animate the first update + var animate = true; + if(typeof state.tmp.gauge_instance !== 'undefined') + animate = false; + + state.tmp.gauge_instance = new Gauge(state.tmp.gauge_canvas).setOptions(options); // create sexy gauge! + + state.tmp.___gaugeOld__ = { + value: value, + min: min, + max: max, + valueLabel: null, + minLabel: null, + maxLabel: null + }; + + // we will always feed a percentage + state.tmp.gauge_instance.minValue = 0; + state.tmp.gauge_instance.maxValue = 100; + + NETDATA.gaugeAnimation(state, animate); + NETDATA.gaugeSet(state, value, min, max); + NETDATA.gaugeSetLabels(state, value, min, max); + NETDATA.gaugeAnimation(state, true); + + state.legendSetUnitsString = function(units) { + if(typeof state.tmp.gaugeChartUnits !== 'undefined' && state.tmp.units !== units) { + state.tmp.gaugeChartUnits.innerText = units; + state.tmp.___gaugeOld__.valueLabel = null; + state.tmp.___gaugeOld__.minLabel = null; + state.tmp.___gaugeOld__.maxLabel = null; + state.tmp.units = units; + } + }; + state.legendShowUndefined = function() { + if(typeof state.tmp.gauge_instance !== 'undefined') + NETDATA.gaugeClearSelection(state); + }; + + return true; + }; + + // ---------------------------------------------------------------------------------------------------------------- + // Charts Libraries Registration + + NETDATA.chartLibraries = { + "dygraph": { + initialize: NETDATA.dygraphInitialize, + create: NETDATA.dygraphChartCreate, + update: NETDATA.dygraphChartUpdate, + resize: function(state) { + if(typeof state.tmp.dygraph_instance !== 'undefined' && typeof state.tmp.dygraph_instance.resize === 'function') + state.tmp.dygraph_instance.resize(); + }, + setSelection: NETDATA.dygraphSetSelection, + clearSelection: NETDATA.dygraphClearSelection, + toolboxPanAndZoom: NETDATA.dygraphToolboxPanAndZoom, + initialized: false, + enabled: true, + xssRegexIgnore: new RegExp('^/api/v1/data\.result.data$'), + format: function(state) { void(state); return 'json'; }, + options: function(state) { return 'ms' + '%7C' + 'flip' + (this.isLogScale(state)?('%7C' + 'abs'):'').toString(); }, + legend: function(state) { + return (this.isSparkline(state) === false && NETDATA.dataAttributeBoolean(state.element, 'legend', true) === true) ? 'right-side' : null; + }, + autoresize: function(state) { void(state); return true; }, + max_updates_to_recreate: function(state) { void(state); return 5000; }, + track_colors: function(state) { void(state); return true; }, + pixels_per_point: function(state) { + return (this.isSparkline(state) === false)?3:2; + }, + isSparkline: function(state) { + if(typeof state.tmp.dygraph_sparkline === 'undefined') { + state.tmp.dygraph_sparkline = (this.theme(state) === 'sparkline'); + } + return state.tmp.dygraph_sparkline; + }, + isLogScale: function(state) { + if(typeof state.tmp.dygraph_logscale === 'undefined') { + state.tmp.dygraph_logscale = (this.theme(state) === 'logscale'); + } + return state.tmp.dygraph_logscale; + }, + theme: function(state) { + if(typeof state.tmp.dygraph_theme === 'undefined') + state.tmp.dygraph_theme = NETDATA.dataAttribute(state.element, 'dygraph-theme', 'default'); + return state.tmp.dygraph_theme; + }, + container_class: function(state) { + if(this.legend(state) !== null) + return 'netdata-container-with-legend'; + return 'netdata-container'; + } + }, + "sparkline": { + initialize: NETDATA.sparklineInitialize, + create: NETDATA.sparklineChartCreate, + update: NETDATA.sparklineChartUpdate, + resize: null, + setSelection: undefined, // function(state, t) { void(state); return true; }, + clearSelection: undefined, // function(state) { void(state); return true; }, + toolboxPanAndZoom: null, + initialized: false, + enabled: true, + xssRegexIgnore: new RegExp('^/api/v1/data\.result$'), + format: function(state) { void(state); return 'array'; }, + options: function(state) { void(state); return 'flip' + '%7C' + 'abs'; }, + legend: function(state) { void(state); return null; }, + autoresize: function(state) { void(state); return false; }, + max_updates_to_recreate: function(state) { void(state); return 5000; }, + track_colors: function(state) { void(state); return false; }, + pixels_per_point: function(state) { void(state); return 3; }, + container_class: function(state) { void(state); return 'netdata-container'; } + }, + "peity": { + initialize: NETDATA.peityInitialize, + create: NETDATA.peityChartCreate, + update: NETDATA.peityChartUpdate, + resize: null, + setSelection: undefined, // function(state, t) { void(state); return true; }, + clearSelection: undefined, // function(state) { void(state); return true; }, + toolboxPanAndZoom: null, + initialized: false, + enabled: true, + xssRegexIgnore: new RegExp('^/api/v1/data\.result$'), + format: function(state) { void(state); return 'ssvcomma'; }, + options: function(state) { void(state); return 'null2zero' + '%7C' + 'flip' + '%7C' + 'abs'; }, + legend: function(state) { void(state); return null; }, + autoresize: function(state) { void(state); return false; }, + max_updates_to_recreate: function(state) { void(state); return 5000; }, + track_colors: function(state) { void(state); return false; }, + pixels_per_point: function(state) { void(state); return 3; }, + container_class: function(state) { void(state); return 'netdata-container'; } + }, + "morris": { + initialize: NETDATA.morrisInitialize, + create: NETDATA.morrisChartCreate, + update: NETDATA.morrisChartUpdate, + resize: null, + setSelection: undefined, // function(state, t) { void(state); return true; }, + clearSelection: undefined, // function(state) { void(state); return true; }, + toolboxPanAndZoom: null, + initialized: false, + enabled: true, + xssRegexIgnore: new RegExp('^/api/v1/data\.result.data$'), + format: function(state) { void(state); return 'json'; }, + options: function(state) { void(state); return 'objectrows' + '%7C' + 'ms'; }, + legend: function(state) { void(state); return null; }, + autoresize: function(state) { void(state); return false; }, + max_updates_to_recreate: function(state) { void(state); return 50; }, + track_colors: function(state) { void(state); return false; }, + pixels_per_point: function(state) { void(state); return 15; }, + container_class: function(state) { void(state); return 'netdata-container'; } + }, + "google": { + initialize: NETDATA.googleInitialize, + create: NETDATA.googleChartCreate, + update: NETDATA.googleChartUpdate, + resize: null, + setSelection: undefined, //function(state, t) { void(state); return true; }, + clearSelection: undefined, //function(state) { void(state); return true; }, + toolboxPanAndZoom: null, + initialized: false, + enabled: true, + xssRegexIgnore: new RegExp('^/api/v1/data\.result.rows$'), + format: function(state) { void(state); return 'datatable'; }, + options: function(state) { void(state); return ''; }, + legend: function(state) { void(state); return null; }, + autoresize: function(state) { void(state); return false; }, + max_updates_to_recreate: function(state) { void(state); return 300; }, + track_colors: function(state) { void(state); return false; }, + pixels_per_point: function(state) { void(state); return 4; }, + container_class: function(state) { void(state); return 'netdata-container'; } + }, + "raphael": { + initialize: NETDATA.raphaelInitialize, + create: NETDATA.raphaelChartCreate, + update: NETDATA.raphaelChartUpdate, + resize: null, + setSelection: undefined, // function(state, t) { void(state); return true; }, + clearSelection: undefined, // function(state) { void(state); return true; }, + toolboxPanAndZoom: null, + initialized: false, + enabled: true, + xssRegexIgnore: new RegExp('^/api/v1/data\.result.data$'), + format: function(state) { void(state); return 'json'; }, + options: function(state) { void(state); return ''; }, + legend: function(state) { void(state); return null; }, + autoresize: function(state) { void(state); return false; }, + max_updates_to_recreate: function(state) { void(state); return 5000; }, + track_colors: function(state) { void(state); return false; }, + pixels_per_point: function(state) { void(state); return 3; }, + container_class: function(state) { void(state); return 'netdata-container'; } + }, + "c3": { + initialize: NETDATA.c3Initialize, + create: NETDATA.c3ChartCreate, + update: NETDATA.c3ChartUpdate, + resize: null, + setSelection: undefined, // function(state, t) { void(state); return true; }, + clearSelection: undefined, // function(state) { void(state); return true; }, + toolboxPanAndZoom: null, + initialized: false, + enabled: true, + xssRegexIgnore: new RegExp('^/api/v1/data\.result$'), + format: function(state) { void(state); return 'csvjsonarray'; }, + options: function(state) { void(state); return 'milliseconds'; }, + legend: function(state) { void(state); return null; }, + autoresize: function(state) { void(state); return false; }, + max_updates_to_recreate: function(state) { void(state); return 5000; }, + track_colors: function(state) { void(state); return false; }, + pixels_per_point: function(state) { void(state); return 15; }, + container_class: function(state) { void(state); return 'netdata-container'; } + }, + "d3pie": { + initialize: NETDATA.d3pieInitialize, + create: NETDATA.d3pieChartCreate, + update: NETDATA.d3pieChartUpdate, + resize: null, + setSelection: NETDATA.d3pieSetSelection, + clearSelection: NETDATA.d3pieClearSelection, + toolboxPanAndZoom: null, + initialized: false, + enabled: true, + xssRegexIgnore: new RegExp('^/api/v1/data\.result.data$'), + format: function(state) { void(state); return 'json'; }, + options: function(state) { void(state); return 'objectrows' + '%7C' + 'ms'; }, + legend: function(state) { void(state); return null; }, + autoresize: function(state) { void(state); return false; }, + max_updates_to_recreate: function(state) { void(state); return 5000; }, + track_colors: function(state) { void(state); return false; }, + pixels_per_point: function(state) { void(state); return 15; }, + container_class: function(state) { void(state); return 'netdata-container'; } + }, + "d3": { + initialize: NETDATA.d3Initialize, + create: NETDATA.d3ChartCreate, + update: NETDATA.d3ChartUpdate, + resize: null, + setSelection: undefined, // function(state, t) { void(state); return true; }, + clearSelection: undefined, // function(state) { void(state); return true; }, + toolboxPanAndZoom: null, + initialized: false, + enabled: true, + xssRegexIgnore: new RegExp('^/api/v1/data\.result.data$'), + format: function(state) { void(state); return 'json'; }, + options: function(state) { void(state); return ''; }, + legend: function(state) { void(state); return null; }, + autoresize: function(state) { void(state); return false; }, + max_updates_to_recreate: function(state) { void(state); return 5000; }, + track_colors: function(state) { void(state); return false; }, + pixels_per_point: function(state) { void(state); return 3; }, + container_class: function(state) { void(state); return 'netdata-container'; } + }, + "easypiechart": { + initialize: NETDATA.easypiechartInitialize, + create: NETDATA.easypiechartChartCreate, + update: NETDATA.easypiechartChartUpdate, + resize: null, + setSelection: NETDATA.easypiechartSetSelection, + clearSelection: NETDATA.easypiechartClearSelection, + toolboxPanAndZoom: null, + initialized: false, + enabled: true, + xssRegexIgnore: new RegExp('^/api/v1/data\.result$'), + format: function(state) { void(state); return 'array'; }, + options: function(state) { void(state); return 'absolute'; }, + legend: function(state) { void(state); return null; }, + autoresize: function(state) { void(state); return false; }, + max_updates_to_recreate: function(state) { void(state); return 5000; }, + track_colors: function(state) { void(state); return true; }, + pixels_per_point: function(state) { void(state); return 3; }, + aspect_ratio: 100, + container_class: function(state) { void(state); return 'netdata-container-easypiechart'; } + }, + "gauge": { + initialize: NETDATA.gaugeInitialize, + create: NETDATA.gaugeChartCreate, + update: NETDATA.gaugeChartUpdate, + resize: null, + setSelection: NETDATA.gaugeSetSelection, + clearSelection: NETDATA.gaugeClearSelection, + toolboxPanAndZoom: null, + initialized: false, + enabled: true, + xssRegexIgnore: new RegExp('^/api/v1/data\.result$'), + format: function(state) { void(state); return 'array'; }, + options: function(state) { void(state); return 'absolute'; }, + legend: function(state) { void(state); return null; }, + autoresize: function(state) { void(state); return false; }, + max_updates_to_recreate: function(state) { void(state); return 5000; }, + track_colors: function(state) { void(state); return true; }, + pixels_per_point: function(state) { void(state); return 3; }, + aspect_ratio: 60, + container_class: function(state) { void(state); return 'netdata-container-gauge'; } + } + }; + + NETDATA.registerChartLibrary = function(library, url) { + if(NETDATA.options.debug.libraries === true) + console.log("registering chart library: " + library); + + NETDATA.chartLibraries[library].url = url; + NETDATA.chartLibraries[library].initialized = true; + NETDATA.chartLibraries[library].enabled = true; + }; + + // ---------------------------------------------------------------------------------------------------------------- + // Load required JS libraries and CSS + + NETDATA.requiredJs = [ + { + url: NETDATA.serverStatic + 'lib/bootstrap-3.3.7.min.js', + async: false, + isAlreadyLoaded: function() { + // check if bootstrap is loaded + if(typeof $().emulateTransitionEnd === 'function') + return true; + else { + return (typeof netdataNoBootstrap !== 'undefined' && netdataNoBootstrap === true); + } + } + }, + { + url: NETDATA.serverStatic + 'lib/fontawesome-all-5.0.1.min.js', + async: true, + isAlreadyLoaded: function() { + return (typeof netdataNoFontAwesome !== 'undefined' && netdataNoFontAwesome === true); + } + }, + { + url: NETDATA.serverStatic + 'lib/perfect-scrollbar-0.6.15.min.js', + isAlreadyLoaded: function() { return false; } + } + ]; + + NETDATA.requiredCSS = [ + { + url: NETDATA.themes.current.bootstrap_css, + isAlreadyLoaded: function() { + return (typeof netdataNoBootstrap !== 'undefined' && netdataNoBootstrap === true); + } + }, + { + url: NETDATA.themes.current.dashboard_css, + isAlreadyLoaded: function() { return false; } + } + ]; + + NETDATA.loadedRequiredJs = 0; + NETDATA.loadRequiredJs = function(index, callback) { + if(index >= NETDATA.requiredJs.length) { + if(typeof callback === 'function') + return callback(); + return; + } + + if(NETDATA.requiredJs[index].isAlreadyLoaded()) { + NETDATA.loadedRequiredJs++; + NETDATA.loadRequiredJs(++index, callback); + return; + } + + if(NETDATA.options.debug.main_loop === true) + console.log('loading ' + NETDATA.requiredJs[index].url); + + var async = true; + if(typeof NETDATA.requiredJs[index].async !== 'undefined' && NETDATA.requiredJs[index].async === false) + async = false; + + $.ajax({ + url: NETDATA.requiredJs[index].url, + cache: true, + dataType: "script", + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function() { + if(NETDATA.options.debug.main_loop === true) + console.log('loaded ' + NETDATA.requiredJs[index].url); + }) + .fail(function() { + alert('Cannot load required JS library: ' + NETDATA.requiredJs[index].url); + }) + .always(function() { + NETDATA.loadedRequiredJs++; + + if(async === false) + NETDATA.loadRequiredJs(++index, callback); + }); + + if(async === true) + NETDATA.loadRequiredJs(++index, callback); + }; + + NETDATA.loadRequiredCSS = function(index) { + if(index >= NETDATA.requiredCSS.length) + return; + + if(NETDATA.requiredCSS[index].isAlreadyLoaded()) { + NETDATA.loadRequiredCSS(++index); + return; + } + + if(NETDATA.options.debug.main_loop === true) + console.log('loading ' + NETDATA.requiredCSS[index].url); + + NETDATA._loadCSS(NETDATA.requiredCSS[index].url); + NETDATA.loadRequiredCSS(++index); + }; + + + // ---------------------------------------------------------------------------------------------------------------- + // Registry of netdata hosts + + NETDATA.alarms = { + onclick: null, // the callback to handle the click - it will be called with the alarm log entry + chart_div_offset: -50, // give that space above the chart when scrolling to it + chart_div_id_prefix: 'chart_', // the chart DIV IDs have this prefix (they should be NETDATA.name2id(chart.id)) + chart_div_animation_duration: 0,// the duration of the animation while scrolling to a chart + + ms_penalty: 0, // the time penalty of the next alarm + ms_between_notifications: 500, // firefox moves the alarms off-screen (above, outside the top of the screen) + // if alarms are shown faster than: one per 500ms + + update_every: 10000, // the time in ms between alarm checks + + notifications: false, // when true, the browser supports notifications (may not be granted though) + last_notification_id: 0, // the id of the last alarm_log we have raised an alarm for + first_notification_id: 0, // the id of the first alarm_log entry for this session + // this is used to prevent CLEAR notifications for past events + // notifications_shown: [], + + server: null, // the server to connect to for fetching alarms + current: null, // the list of raised alarms - updated in the background + + // a callback function to call every time the list of raised alarms is refreshed + callback: (typeof netdataAlarmsActiveCallback === 'function')?netdataAlarmsActiveCallback:null, + + // a callback function to call every time a notification is shown + // the return value is used to decide if the notification will be shown + notificationCallback: (typeof netdataAlarmsNotifCallback === 'function')?netdataAlarmsNotifCallback:null, + + recipients: null, // the list (array) of recipients to show alarms for, or null + + recipientMatches: function(to_string, wanted_array) { + if(typeof wanted_array === 'undefined' || wanted_array === null || Array.isArray(wanted_array) === false) + return true; + + var r = ' ' + to_string.toString() + ' '; + var len = wanted_array.length; + while(len--) { + if(r.indexOf(' ' + wanted_array[len] + ' ') >= 0) + return true; + } + + return false; + }, + + activeForRecipients: function() { + var active = {}; + var data = NETDATA.alarms.current; + + if(typeof data === 'undefined' || data === null) + return active; + + for(var x in data.alarms) { + if(!data.alarms.hasOwnProperty(x)) continue; + + var alarm = data.alarms[x]; + if((alarm.status === 'WARNING' || alarm.status === 'CRITICAL') && NETDATA.alarms.recipientMatches(alarm.recipient, NETDATA.alarms.recipients)) + active[x] = alarm; + } + + return active; + }, + + notify: function(entry) { + // console.log('alarm ' + entry.unique_id); + + if(entry.updated === true) { + // console.log('alarm ' + entry.unique_id + ' has been updated by another alarm'); + return; + } + + var value_string = entry.value_string; + + if(NETDATA.alarms.current !== null) { + // get the current value_string + var t = NETDATA.alarms.current.alarms[entry.chart + '.' + entry.name]; + if(typeof t !== 'undefined' && entry.status === t.status && typeof t.value_string !== 'undefined') + value_string = t.value_string; + } + + var name = entry.name.replace(/_/g, ' '); + var status = entry.status.toLowerCase(); + var title = name + ' = ' + value_string.toString(); + var tag = entry.alarm_id; + var icon = 'images/seo-performance-128.png'; + var interaction = false; + var data = entry; + var show = true; + + // console.log('alarm ' + entry.unique_id + ' ' + entry.chart + '.' + entry.name + ' is ' + entry.status); + + switch(entry.status) { + case 'REMOVED': + show = false; + break; + + case 'UNDEFINED': + return; + + case 'UNINITIALIZED': + return; + + case 'CLEAR': + if(entry.unique_id < NETDATA.alarms.first_notification_id) { + // console.log('alarm ' + entry.unique_id + ' is not current'); + return; + } + if(entry.old_status === 'UNINITIALIZED' || entry.old_status === 'UNDEFINED') { + // console.log('alarm' + entry.unique_id + ' switch to CLEAR from ' + entry.old_status); + return; + } + if(entry.no_clear_notification === true) { + // console.log('alarm' + entry.unique_id + ' is CLEAR but has no_clear_notification flag'); + return; + } + title = name + ' back to normal (' + value_string.toString() + ')'; + icon = 'images/check-mark-2-128-green.png'; + interaction = false; + break; + + case 'WARNING': + if(entry.old_status === 'CRITICAL') + status = 'demoted to ' + entry.status.toLowerCase(); + + icon = 'images/alert-128-orange.png'; + interaction = false; + break; + + case 'CRITICAL': + if(entry.old_status === 'WARNING') + status = 'escalated to ' + entry.status.toLowerCase(); + + icon = 'images/alert-128-red.png'; + interaction = true; + break; + + default: + console.log('invalid alarm status ' + entry.status); + return; + } + + // filter recipients + if(show === true) + show = NETDATA.alarms.recipientMatches(entry.recipient, NETDATA.alarms.recipients); + + /* + // cleanup old notifications with the same alarm_id as this one + // it does not seem to work on any web browser - so notifications cannot be removed + + var len = NETDATA.alarms.notifications_shown.length; + while(len--) { + var n = NETDATA.alarms.notifications_shown[len]; + if(n.data.alarm_id === entry.alarm_id) { + console.log('removing old alarm ' + n.data.unique_id); + + // close the notification + n.close.bind(n); + + // remove it from the array + NETDATA.alarms.notifications_shown.splice(len, 1); + len = NETDATA.alarms.notifications_shown.length; + } + } + */ + + if(show === true) { + if(typeof NETDATA.alarms.notificationCallback === 'function') + show = NETDATA.alarms.notificationCallback(entry); + + if(show === true) { + setTimeout(function() { + // show this notification + // console.log('new notification: ' + title); + var n = new Notification(title, { + body: entry.hostname + ' - ' + entry.chart + ' (' + entry.family + ') - ' + status + ': ' + entry.info, + tag: tag, + requireInteraction: interaction, + icon: NETDATA.serverStatic + icon, + data: data + }); + + n.onclick = function(event) { + event.preventDefault(); + NETDATA.alarms.onclick(event.target.data); + }; + + // console.log(n); + // NETDATA.alarms.notifications_shown.push(n); + // console.log(entry); + }, NETDATA.alarms.ms_penalty); + + NETDATA.alarms.ms_penalty += NETDATA.alarms.ms_between_notifications; + } + } + }, + + scrollToChart: function(chart_id) { + if(typeof chart_id === 'string') { + var offset = $('#' + NETDATA.alarms.chart_div_id_prefix + NETDATA.name2id(chart_id)).offset(); + if(typeof offset !== 'undefined') { + $('html, body').animate({ scrollTop: offset.top + NETDATA.alarms.chart_div_offset }, NETDATA.alarms.chart_div_animation_duration); + return true; + } + } + return false; + }, + + scrollToAlarm: function(alarm) { + if(typeof alarm === 'object') { + var ret = NETDATA.alarms.scrollToChart(alarm.chart); + + if(ret === true && NETDATA.options.page_is_visible === false) + window.focus(); + // alert('netdata dashboard will now scroll to chart: ' + alarm.chart + '\n\nThis alarm opened to bring the browser window in front of the screen. Click on the dashboard to prevent it from appearing again.'); + } + + }, + + notifyAll: function() { + // console.log('FETCHING ALARM LOG'); + NETDATA.alarms.get_log(NETDATA.alarms.last_notification_id, function(data) { + // console.log('ALARM LOG FETCHED'); + + if(data === null || typeof data !== 'object') { + console.log('invalid alarms log response'); + return; + } + + if(data.length === 0) { + console.log('received empty alarm log'); + return; + } + + // console.log('received alarm log of ' + data.length + ' entries, from ' + data[data.length - 1].unique_id.toString() + ' to ' + data[0].unique_id.toString()); + + data.sort(function(a, b) { + if(a.unique_id > b.unique_id) return -1; + if(a.unique_id < b.unique_id) return 1; + return 0; + }); + + NETDATA.alarms.ms_penalty = 0; + + var len = data.length; + while(len--) { + if(data[len].unique_id > NETDATA.alarms.last_notification_id) { + NETDATA.alarms.notify(data[len]); + } + //else + // console.log('ignoring alarm (older) with id ' + data[len].unique_id.toString()); + } + + NETDATA.alarms.last_notification_id = data[0].unique_id; + + if(typeof netdataAlarmsRemember === 'undefined' || netdataAlarmsRemember === true) + NETDATA.localStorageSet('last_notification_id', NETDATA.alarms.last_notification_id, null); + // console.log('last notification id = ' + NETDATA.alarms.last_notification_id); + }) + }, + + check_notifications: function() { + // returns true if we should fire 1+ notifications + + if(NETDATA.alarms.notifications !== true) { + // console.log('web notifications are not available'); + return false; + } + + if(Notification.permission !== 'granted') { + // console.log('web notifications are not granted'); + return false; + } + + if(typeof NETDATA.alarms.current !== 'undefined' && typeof NETDATA.alarms.current.alarms === 'object') { + // console.log('can do alarms: old id = ' + NETDATA.alarms.last_notification_id + ' new id = ' + NETDATA.alarms.current.latest_alarm_log_unique_id); + + if(NETDATA.alarms.current.latest_alarm_log_unique_id > NETDATA.alarms.last_notification_id) { + // console.log('new alarms detected'); + return true; + } + //else console.log('no new alarms'); + } + // else console.log('cannot process alarms'); + + return false; + }, + + get: function(what, callback) { + $.ajax({ + url: NETDATA.alarms.server + '/api/v1/alarms?' + what.toString(), + async: true, + cache: false, + headers: { + 'Cache-Control': 'no-cache, no-store', + 'Pragma': 'no-cache' + }, + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function(data) { + data = NETDATA.xss.checkOptional('/api/v1/alarms', data /*, '.*\.(calc|calc_parsed|warn|warn_parsed|crit|crit_parsed)$' */); + + if(NETDATA.alarms.first_notification_id === 0 && typeof data.latest_alarm_log_unique_id === 'number') + NETDATA.alarms.first_notification_id = data.latest_alarm_log_unique_id; + + if(typeof callback === 'function') + return callback(data); + }) + .fail(function() { + NETDATA.error(415, NETDATA.alarms.server); + + if(typeof callback === 'function') + return callback(null); + }); + }, + + update_forever: function() { + if(netdataShowAlarms !== true || netdataSnapshotData !== null) + return; + + NETDATA.alarms.get('active', function(data) { + if(data !== null) { + NETDATA.alarms.current = data; + + if(NETDATA.alarms.check_notifications() === true) { + NETDATA.alarms.notifyAll(); + } + + if (typeof NETDATA.alarms.callback === 'function') { + NETDATA.alarms.callback(data); + } + + // Health monitoring is disabled on this netdata + if(data.status === false) return; + } + + setTimeout(NETDATA.alarms.update_forever, NETDATA.alarms.update_every); + }); + }, + + get_log: function(last_id, callback) { + // console.log('fetching all log after ' + last_id.toString()); + $.ajax({ + url: NETDATA.alarms.server + '/api/v1/alarm_log?after=' + last_id.toString(), + async: true, + cache: false, + headers: { + 'Cache-Control': 'no-cache, no-store', + 'Pragma': 'no-cache' + }, + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function(data) { + data = NETDATA.xss.checkOptional('/api/v1/alarm_log', data); + + if(typeof callback === 'function') + return callback(data); + }) + .fail(function() { + NETDATA.error(416, NETDATA.alarms.server); + + if(typeof callback === 'function') + return callback(null); + }); + }, + + init: function() { + NETDATA.alarms.server = NETDATA.fixHost(NETDATA.serverDefault); + + if(typeof netdataAlarmsRemember === 'undefined' || netdataAlarmsRemember === true) { + NETDATA.alarms.last_notification_id = + NETDATA.localStorageGet('last_notification_id', NETDATA.alarms.last_notification_id, null); + } + + if(NETDATA.alarms.onclick === null) + NETDATA.alarms.onclick = NETDATA.alarms.scrollToAlarm; + + if(typeof netdataAlarmsRecipients !== 'undefined' && Array.isArray(netdataAlarmsRecipients)) + NETDATA.alarms.recipients = netdataAlarmsRecipients; + + if(netdataShowAlarms === true) { + NETDATA.alarms.update_forever(); + + if('Notification' in window) { + // console.log('notifications available'); + NETDATA.alarms.notifications = true; + + if(Notification.permission === 'default') + Notification.requestPermission(); + } + } + } + }; + + // ---------------------------------------------------------------------------------------------------------------- + // Registry of netdata hosts + + NETDATA.registry = { + server: null, // the netdata registry server + person_guid: null, // the unique ID of this browser / user + machine_guid: null, // the unique ID the netdata server that served dashboard.js + hostname: 'unknown', // the hostname of the netdata server that served dashboard.js + machines: null, // the user's other URLs + machines_array: null, // the user's other URLs in an array + person_urls: null, + + parsePersonUrls: function(person_urls) { + // console.log(person_urls); + NETDATA.registry.person_urls = person_urls; + + if(person_urls) { + NETDATA.registry.machines = {}; + NETDATA.registry.machines_array = []; + + var apu = person_urls; + var i = apu.length; + while(i--) { + if(typeof NETDATA.registry.machines[apu[i][0]] === 'undefined') { + // console.log('adding: ' + apu[i][4] + ', ' + ((now - apu[i][2]) / 1000).toString()); + + var obj = { + guid: apu[i][0], + url: apu[i][1], + last_t: apu[i][2], + accesses: apu[i][3], + name: apu[i][4], + alternate_urls: [] + }; + obj.alternate_urls.push(apu[i][1]); + + NETDATA.registry.machines[apu[i][0]] = obj; + NETDATA.registry.machines_array.push(obj); + } + else { + // console.log('appending: ' + apu[i][4] + ', ' + ((now - apu[i][2]) / 1000).toString()); + + var pu = NETDATA.registry.machines[apu[i][0]]; + if(pu.last_t < apu[i][2]) { + pu.url = apu[i][1]; + pu.last_t = apu[i][2]; + pu.name = apu[i][4]; + } + pu.accesses += apu[i][3]; + pu.alternate_urls.push(apu[i][1]); + } + } + } + + if(typeof netdataRegistryCallback === 'function') + netdataRegistryCallback(NETDATA.registry.machines_array); + }, + + init: function() { + if(netdataRegistry !== true) return; + + NETDATA.registry.hello(NETDATA.serverDefault, function(data) { + if(data) { + NETDATA.registry.server = data.registry; + NETDATA.registry.machine_guid = data.machine_guid; + NETDATA.registry.hostname = data.hostname; + + NETDATA.registry.access(2, function (person_urls) { + NETDATA.registry.parsePersonUrls(person_urls); + + }); + } + }); + }, + + hello: function(host, callback) { + host = NETDATA.fixHost(host); + + // send HELLO to a netdata server: + // 1. verifies the server is reachable + // 2. responds with the registry URL, the machine GUID of this netdata server and its hostname + $.ajax({ + url: host + '/api/v1/registry?action=hello', + async: true, + cache: false, + headers: { + 'Cache-Control': 'no-cache, no-store', + 'Pragma': 'no-cache' + }, + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function(data) { + data = NETDATA.xss.checkOptional('/api/v1/registry?action=hello', data); + + if(typeof data.status !== 'string' || data.status !== 'ok') { + NETDATA.error(408, host + ' response: ' + JSON.stringify(data)); + data = null; + } + + if(typeof callback === 'function') + return callback(data); + }) + .fail(function() { + NETDATA.error(407, host); + + if(typeof callback === 'function') + return callback(null); + }); + }, + + access: function(max_redirects, callback) { + // send ACCESS to a netdata registry: + // 1. it lets it know we are accessing a netdata server (its machine GUID and its URL) + // 2. it responds with a list of netdata servers we know + // the registry identifies us using a cookie it sets the first time we access it + // the registry may respond with a redirect URL to send us to another registry + $.ajax({ + url: NETDATA.registry.server + '/api/v1/registry?action=access&machine=' + NETDATA.registry.machine_guid + '&name=' + encodeURIComponent(NETDATA.registry.hostname) + '&url=' + encodeURIComponent(NETDATA.serverDefault), // + '&visible_url=' + encodeURIComponent(document.location), + async: true, + cache: false, + headers: { + 'Cache-Control': 'no-cache, no-store', + 'Pragma': 'no-cache' + }, + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function(data) { + data = NETDATA.xss.checkAlways('/api/v1/registry?action=access', data); + + var redirect = null; + if(typeof data.registry === 'string') + redirect = data.registry; + + if(typeof data.status !== 'string' || data.status !== 'ok') { + NETDATA.error(409, NETDATA.registry.server + ' responded with: ' + JSON.stringify(data)); + data = null; + } + + if(data === null) { + if(redirect !== null && max_redirects > 0) { + NETDATA.registry.server = redirect; + NETDATA.registry.access(max_redirects - 1, callback); + } + else { + if(typeof callback === 'function') + return callback(null); + } + } + else { + if(typeof data.person_guid === 'string') + NETDATA.registry.person_guid = data.person_guid; + + if(typeof callback === 'function') + return callback(data.urls); + } + }) + .fail(function() { + NETDATA.error(410, NETDATA.registry.server); + + if(typeof callback === 'function') + return callback(null); + }); + }, + + delete: function(delete_url, callback) { + // send DELETE to a netdata registry: + $.ajax({ + url: NETDATA.registry.server + '/api/v1/registry?action=delete&machine=' + NETDATA.registry.machine_guid + '&name=' + encodeURIComponent(NETDATA.registry.hostname) + '&url=' + encodeURIComponent(NETDATA.serverDefault) + '&delete_url=' + encodeURIComponent(delete_url), + async: true, + cache: false, + headers: { + 'Cache-Control': 'no-cache, no-store', + 'Pragma': 'no-cache' + }, + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function(data) { + data = NETDATA.xss.checkAlways('/api/v1/registry?action=delete', data); + + if(typeof data.status !== 'string' || data.status !== 'ok') { + NETDATA.error(411, NETDATA.registry.server + ' responded with: ' + JSON.stringify(data)); + data = null; + } + + if(typeof callback === 'function') + return callback(data); + }) + .fail(function() { + NETDATA.error(412, NETDATA.registry.server); + + if(typeof callback === 'function') + return callback(null); + }); + }, + + search: function(machine_guid, callback) { + // SEARCH for the URLs of a machine: + $.ajax({ + url: NETDATA.registry.server + '/api/v1/registry?action=search&machine=' + NETDATA.registry.machine_guid + '&name=' + encodeURIComponent(NETDATA.registry.hostname) + '&url=' + encodeURIComponent(NETDATA.serverDefault) + '&for=' + machine_guid, + async: true, + cache: false, + headers: { + 'Cache-Control': 'no-cache, no-store', + 'Pragma': 'no-cache' + }, + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function(data) { + data = NETDATA.xss.checkAlways('/api/v1/registry?action=search', data); + + if(typeof data.status !== 'string' || data.status !== 'ok') { + NETDATA.error(417, NETDATA.registry.server + ' responded with: ' + JSON.stringify(data)); + data = null; + } + + if(typeof callback === 'function') + return callback(data); + }) + .fail(function() { + NETDATA.error(418, NETDATA.registry.server); + + if(typeof callback === 'function') + return callback(null); + }); + }, + + switch: function(new_person_guid, callback) { + // impersonate + $.ajax({ + url: NETDATA.registry.server + '/api/v1/registry?action=switch&machine=' + NETDATA.registry.machine_guid + '&name=' + encodeURIComponent(NETDATA.registry.hostname) + '&url=' + encodeURIComponent(NETDATA.serverDefault) + '&to=' + new_person_guid, + async: true, + cache: false, + headers: { + 'Cache-Control': 'no-cache, no-store', + 'Pragma': 'no-cache' + }, + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function(data) { + data = NETDATA.xss.checkAlways('/api/v1/registry?action=switch', data); + + if(typeof data.status !== 'string' || data.status !== 'ok') { + NETDATA.error(413, NETDATA.registry.server + ' responded with: ' + JSON.stringify(data)); + data = null; + } + + if(typeof callback === 'function') + return callback(data); + }) + .fail(function() { + NETDATA.error(414, NETDATA.registry.server); + + if(typeof callback === 'function') + return callback(null); + }); + } + }; + + // ---------------------------------------------------------------------------------------------------------------- + // Boot it! + + if(typeof netdataPrepCallback === 'function') + netdataPrepCallback(); + + NETDATA.errorReset(); + NETDATA.loadRequiredCSS(0); + + NETDATA._loadjQuery(function() { + NETDATA.loadRequiredJs(0, function() { + if(typeof $().emulateTransitionEnd !== 'function') { + // bootstrap is not available + NETDATA.options.current.show_help = false; + } + + if(typeof netdataDontStart === 'undefined' || !netdataDontStart) { + if(NETDATA.options.debug.main_loop === true) + console.log('starting chart refresh thread'); + + NETDATA.start(); + } + }); + }); +})(window, document, (typeof jQuery === 'function')?jQuery:undefined); diff --git a/web/gui/dashboard.slate.css b/web/gui/dashboard.slate.css new file mode 100644 index 000000000..f1c9c4101 --- /dev/null +++ b/web/gui/dashboard.slate.css @@ -0,0 +1,757 @@ +/* SPDX-License-Identifier: GPL-3.0-or-later */ +html, +body { + /*font-family: Calibri,"Segoe UI","Helvetica Neue",Helvetica,Arial,sans-serif;*/ + font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; + font-style: normal; + font-variant: normal; + color: #878b90; +} + +/* fixes for default slate theme */ +code { + color: #bbb; /*#c7254e;*/ + background-color: #555; /* #f9f2f4; */ +} + +.dashboard-sidebar .nav > .active > a, +.dashboard-sidebar .nav > .active:hover > a, +.dashboard-sidebar .nav > .active:focus > a { + color: #765d9c; + border-left: 2px solid #765d9c; +} + +.morelink { + color: #765d9c; + text-decoration: none; +} + +.morelink:hover { + color: #563d7c; + text-decoration: none; +} + +.morelink:focus { + color: #765d9c; + text-decoration: none; +} + +.netdata-chart-alignment { + margin-left: 55px; +} + +.netdata-chart-row { + width: 100%; + text-align: center; + display: flex; + display: -webkit-flex; + display: -moz-flex; + align-items: flex-end; + -moz-align-items: flex-end; + -webkit-align-items: flex-end; + justify-content: center; + -moz--webkit-justify-content: center; + -moz-justify-content: center; + padding-top: 10px; +} + +.netdata-container { + display: inline-block; + overflow: hidden; + + transform: translate3d(0,0,0); + + /* required for child elements to have absolute position */ + position: relative; + + /* width and height is given per chart with data-width and data-height */ +} + +.netdata-container-gauge { + display: inline-block; + overflow: hidden; + + transform: translate3d(0,0,0); + + /* required for child elements to have absolute position */ + position: relative; + + /* width and height is given per chart with data-width and data-height */ +} + +.netdata-container-gauge:after { + padding-top: 60%; + display: block; + content: ''; +} + +.netdata-container-easypiechart { + display: inline-block; + overflow: hidden; + + transform: translate3d(0,0,0); + + /* required for child elements to have absolute position */ + position: relative; + + /* width and height is given per chart with data-width and data-height */ +} + +.netdata-container-easypiechart:after { + padding-top: 100%; + display: block; + content: ''; +} + +.netdata-aspect { + position: relative; + width: 100%; + padding: 0px; + margin: 0px; +} + +.netdata-container-with-legend { + display: inline-block; + overflow: hidden; + + transform: translate3d(0,0,0); + + /* fix minimum scrollbar issue in firefox */ + min-height: 99px; + + /* required for child elements to have absolute position */ + position: relative; + + /* width and height is given per chart with data-width and data-height */ +} + +.netdata-legend-resize-handler { + display: block; + position: absolute; + bottom: 0px; + right: 0px; + height: 15px; + width: 20px; + background-color: #272b30; + font-size: 15px; + vertical-align: middle; + line-height: 15px; + cursor: ns-resize; + color: #373b40; + text-align: center; + overflow: hidden; + z-index: 20; + padding: 0px; + margin: 0px; +} + +.netdata-legend-toolbox { + display: block; + position: absolute; + bottom: 0px; + right: 30px; + height: 15px; + width: 110px; + background-color: #272b30; + font-size: 12px; + vertical-align: middle; + line-height: 15px; + color: #373b40; + text-align: center; + overflow: hidden; + z-index: 20; + padding: 0px; + margin: 0px; + + /* prevent text selection after double click */ + -webkit-user-select: none; /* webkit (safari, chrome) browsers */ + -moz-user-select: none; /* mozilla browsers */ + -khtml-user-select: none; /* webkit (konqueror) browsers */ + -ms-user-select: none; /* IE10+ */ +} + +.netdata-legend-toolbox-button { + display: inline-block; + position: relative; + height: 15px; + width: 18px; + background-color: #272b30; + font-size: 12px; + vertical-align: middle; + line-height: 15px; + color: #474b50; + text-align: center; + overflow: hidden; + z-index: 21; + padding: 0px; + margin: 0px; + cursor: pointer; + + /* prevent text selection after double click */ + -webkit-user-select: none; /* webkit (safari, chrome) browsers */ + -moz-user-select: none; /* mozilla browsers */ + -khtml-user-select: none; /* webkit (konqueror) browsers */ + -ms-user-select: none; /* IE10+ */ +} + +.netdata-message { + display: inline-block; + position: absolute; + top: 0; + left: 0; + right: 0; + bottom: 0; + text-align: left; + vertical-align: top; + font-weight: bold; + font-size: x-small; + overflow: hidden; + background: inherit; + z-index: 0; +} + +.netdata-message.hidden { + display: none; +} + +.netdata-message.icon { + color: #2f3338; + text-align: center; + vertical-align: middle; +} + +.netdata-chart-legend { + position: absolute; /* within .netdata-container */ + top: 0; + right: 0; + overflow: hidden; + text-overflow: ellipsis; + line-height: 14px; + display: block; + width: 140px; /* --legend-width */ + height: calc(100% - 15px); /* 10px for the resize handler and 5px for the top margin */ + font-size: 10px; + margin-top: 5px; + text-align: left; + /* width and height is calculated (depends on the appearance of the legend) */ +} + +.netdata-legend-title-date { + font-size: 10px; + font-weight: normal; + margin-top: 0px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.netdata-legend-title-time { + font-size: 11px; + font-weight: bold; + margin-top: 0px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.netdata-legend-title-units { + position: absolute; + right: 10px; + float: right; + font-size: 11px; + vertical-align: top; + font-weight: normal; + margin-top: 0px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.netdata-legend-series { + position: absolute; + width: 140px; /* legend-width */ + height: calc(100% - 50px); + overflow: hidden; + text-overflow: ellipsis; + line-height: 14.5px; /* line spacing at the legend */ + display: block; + font-size: 10px; + margin-top: 0px; +} + +.netdata-legend-name-table-line { + display: inline-block; + width: 13px; + height: 4px; + border-width: 0px; + border-bottom-width: 2px; + border-bottom-style: solid; + border-bottom-color: #272b30; +} + +.netdata-legend-name-table-area { + display: inline-block; + width: 13px; + height: 5px; + border-width: 1px; + border-top-width: 1px; + border-top-style: solid; + border-top-color: inherit; +} + +.netdata-legend-name-table-stacked { + display: inline-block; + width: 13px; + height: 5px; + border-width: 1px; + border-top-width: 1px; + border-top-style: solid; + border-top-color: inherit; +} + +.netdata-legend-name-tr { +} + +.netdata-legend-name-td { +} + +.netdata-legend-name { + text-align: left; + font-size: 11px; /* legend: dimension name size */ + font-weight: bold; + vertical-align: bottom; + margin-top: 0px; + z-index: 9; + padding: 0px; + width: 80px !important; + max-width: 80px !important; + text-overflow: ellipsis; + overflow: hidden; + white-space: nowrap; + display: inline-block; + cursor: pointer; + -webkit-print-color-adjust: exact; +} + +.netdata-legend-value { + /*margin-left: 14px;*/ + position: absolute; + right: 10px; + float: right; + text-align: right; + font-size: 11px; /* legend: dimension value size */ + font-weight: bold; + vertical-align: bottom; + background-color: #272b30; + margin-top: 0px; + z-index: 10; + padding: 0px; + padding-left: 15px; + cursor: pointer; + /* -webkit-font-smoothing: none; */ +} + +.netdata-legend-name.not-selected { + font-weight: normal; + opacity: 0.3; +} + +.netdata-chart { + position: absolute; /* within .netdata-container */ + top: 0; /* within .netdata-container */ + left: 0; /* within .netdata-container */ + display: inline-block; + overflow: hidden; + width: 100%; + height: 100%; + z-index: 5; + + /* width and height is calculated (depends on the appearance of the legend) */ +} + +.netdata-chart-with-legend-right { + position: absolute; /* within .netdata-container */ + top: 0; /* within .netdata-container */ + left: 0; /* within .netdata-container */ + display: block; + overflow: hidden; + margin-right: 140px; /* --legend-width */ + width: calc(100% - 140px); /* --legend-width */ + height: 100%; + z-index: 5; + flex-grow: 1; + + /* width and height is calculated (depends on the appearance of the legend) */ +} + +.netdata-peity-chart { + +} + +.netdata-sparkline-chart { + +} + +.netdata-dygraph-chart { + +} + +.netdata-morris-chart { + +} + +.netdata-google-chart { + +} + +.dygraph-ylabel { +} + +.dygraph-axis-label-x { + overflow-x: hidden; +} + +.dygraph-axis-label { + color: #6c7075; +} + +.dygraph-label-rotate-left { + text-align: center; + /* See http://caniuse.com/#feat=transforms2d */ + transform: rotate(90deg); + -webkit-transform: rotate(90deg); + -moz-transform: rotate(90deg); + -o-transform: rotate(90deg); + -ms-transform: rotate(90deg); +} + +/* For y2-axis label */ +.dygraph-label-rotate-right { + text-align: center; + /* See http://caniuse.com/#feat=transforms2d */ + transform: rotate(-90deg); + -webkit-transform: rotate(-90deg); + -moz-transform: rotate(-90deg); + -o-transform: rotate(-90deg); + -ms-transform: rotate(-90deg); +} + +.dygraph-title { + text-indent: 56px; + text-align: left; + position: absolute; + left: 0px; + top: 4px; + font-size: 11px; + font-weight: bold; + text-overflow: ellipsis; + overflow: hidden; + white-space: nowrap; +} + +/* fix for sparkline tooltip under bootstrap */ +.jqstooltip { + width: auto !important; + height: auto !important; +} + +.easyPieChart { + position: relative; + text-align: center; +} + +.easyPieChart canvas { + position: absolute; + top: 0; + left: 0; +} + +.easyPieChartLabel { + display: inline-block; + position: absolute; + float: left; + left: 0; + width: 100%; + text-align: center; + color: #BBB; + font-weight: normal; + text-shadow: #272b30 0px 0px 1px; + /* -webkit-font-smoothing: none; */ +} + +.easyPieChartTitle { + display: inline-block; + position: absolute; + float: left; + left: 0; + width: 64%; + margin-left: 18% !important; + text-align: center; + color: #676b70; + font-weight: bold; +} + +.easyPieChartUnits { + display: inline-block; + position: absolute; + float: left; + left: 0; + width: 60%; + margin-left: 20% !important; + text-align: center; + color: #676b70; + font-weight: normal; +} + +.gaugeChart { + position: relative; + text-align: center; +} + +.gaugeChart canvas { + position: absolute; + top: 0; + left: 0; + bottom: 0; + right: 0; + z-index: 0; +} + +.gaugeChartLabel { + display: inline-block; + position: absolute; + float: left; + left: 0; + width: 100%; + text-align: center; + color: #BBB; + font-weight: bold; + z-index: 1; + text-shadow: #272b30 0px 0px 1px; + /* text-shadow: #CCC 1px 1px 0px, #CCC -1px -1px 0px, #CCC 1px -1px 0px, #CCC -1px 1px 0px; */ + /* -webkit-text-stroke: 1px #777; */ + /* -webkit-font-smoothing: none; */ +} + +.gaugeChartTitle { + display: inline-block; + position: absolute; + float: left; + left: 0; + width: 100%; + text-align: center; + color: #676b70; + font-weight: bold; +} + +.gaugeChartUnits { + display: inline-block; + position: absolute; + float: left; + left: 0; + bottom: 0; + width: 100%; + text-align: left; + margin-left: 5%; + color: #676b70; + font-weight: normal; +} + +.gaugeChartMin { + display: inline-block; + position: absolute; + float: left; + left: 0; + bottom: 8%; + width: 92%; + margin-left: 8%; + text-align: left; + color: #676b70; + font-weight: normal; +} + +.gaugeChartMax { + display: inline-block; + position: absolute; + float: left; + left: 0; + bottom: 8%; + width: 95%; + margin-right: 5%; + text-align: right; + color: #676b70; + font-weight: normal; +} + +.popover-title { + font-weight: bold; + font-size: 12px; +} + +.popover-content { + font-size: 11px; +} + +/* ---------------------------------------------------------------------------- + perfect-scrollbar settings + */ + +.ps-container { + -ms-touch-action: auto; + touch-action: auto; + overflow: hidden !important; + -ms-overflow-style: none; +} + +@supports (-ms-overflow-style: none) { + .ps-container { + overflow: auto !important; + } +} + +@media screen and (-ms-high-contrast: active), (-ms-high-contrast: none) { + .ps-container { + overflow: auto !important; + } +} + +.ps-container.ps-active-x > .ps-scrollbar-x-rail, +.ps-container.ps-active-y > .ps-scrollbar-y-rail { + display: block; + background-color: transparent; +} + +.ps-container.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail { + background-color: transparent; /* background color when dragged away */ + opacity: 0.9; +} + +.ps-container.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail > .ps-scrollbar-x { + background-color: #aaa; /* scrollbar color when dragged away */ + height: 5px; +} + +.ps-container.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail { + background-color: transparent; /* background color when dragged away */ + opacity: 0.9; +} + +.ps-container.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail > .ps-scrollbar-y { + background-color: #aaa; /* scrollbar color when dragged away */ + width: 5px; +} + +.ps-container > .ps-scrollbar-x-rail { + display: none; + position: absolute; + /* please don't change 'position' */ + opacity: 0.2; /* the opacity when not on hover of the content */ + -webkit-transition: background-color .2s linear, opacity .2s linear; + -o-transition: background-color .2s linear, opacity .2s linear; + -moz-transition: background-color .2s linear, opacity .2s linear; + transition: background-color .2s linear, opacity .2s linear; + bottom: 0px; + /* there must be 'bottom' for ps-scrollbar-x-rail */ + height: 15px; +} + +.ps-container > .ps-scrollbar-x-rail > .ps-scrollbar-x { + position: absolute; + /* please don't change 'position' */ + background-color: #666; /* #aaa; the color on content hover */ + -webkit-border-radius: 6px; + -moz-border-radius: 6px; + border-radius: 6px; + -webkit-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out; + transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out; + -o-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out; + -moz-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out; + transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out; + transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -webkit-border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out; + bottom: 2px; + /* there must be 'bottom' for ps-scrollbar-x */ + height: 5px; /* the width of the scrollbar */ +} + +.ps-container > .ps-scrollbar-x-rail:hover > .ps-scrollbar-x, .ps-container > .ps-scrollbar-x-rail:active > .ps-scrollbar-x { + height: 5px; +} + +.ps-container > .ps-scrollbar-y-rail { + display: none; + position: absolute; + /* please don't change 'position' */ + opacity: 0.2; /* the opacity when not on hover of the content */ + -webkit-transition: background-color .2s linear, opacity .2s linear; + -o-transition: background-color .2s linear, opacity .2s linear; + -moz-transition: background-color .2s linear, opacity .2s linear; + transition: background-color .2s linear, opacity .2s linear; + right: 0; + /* there must be 'right' for ps-scrollbar-y-rail */ + width: 15px; +} + +.ps-container > .ps-scrollbar-y-rail > .ps-scrollbar-y { + position: absolute; + /* please don't change 'position' */ + background-color: #666; /* #aaa; the color on content hover */ + -webkit-border-radius: 6px; + -moz-border-radius: 6px; + border-radius: 6px; + -webkit-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out; + transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out; + -o-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out; + -moz-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out; + transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out; + transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -webkit-border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out; + right: 2px; + /* there must be 'right' for ps-scrollbar-y */ + width: 5px; /* the width of the scrollbar */ +} + +.ps-container > .ps-scrollbar-y-rail:hover > .ps-scrollbar-y, .ps-container > .ps-scrollbar-y-rail:active > .ps-scrollbar-y { + width: 5px; +} + +.ps-container:hover.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail { + background-color: transparent; /* background color when dragged */ + opacity: 0.9; +} + +.ps-container:hover.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail > .ps-scrollbar-x { + background-color: #bbb; /* scrollbar color when dragged */ + height: 5px; +} + +.ps-container:hover.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail { + background-color: transparent; /* background color when dragged */ + opacity: 0.9; +} + +.ps-container:hover.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail > .ps-scrollbar-y { + background-color: #bbb; /* scrollbar color when dragged */ + width: 5px; +} + +.ps-container:hover > .ps-scrollbar-x-rail, +.ps-container:hover > .ps-scrollbar-y-rail { + opacity: 0.6; +} + +.ps-container:hover > .ps-scrollbar-x-rail:hover { + background-color: transparent; /* the background color on hover of the scrollbar */ + opacity: 0.9; +} + +.ps-container:hover > .ps-scrollbar-x-rail:hover > .ps-scrollbar-x { + background-color: #999; /* scrollbar color on hover */ +} + +.ps-container:hover > .ps-scrollbar-y-rail:hover { + background-color: transparent; /* the background color on hover of the scrollbar */ + opacity: 0.9; +} + +.ps-container:hover > .ps-scrollbar-y-rail:hover > .ps-scrollbar-y { + background-color: #999; /* scrollbar color on hover */ +} diff --git a/web/gui/dashboard_info.js b/web/gui/dashboard_info.js new file mode 100644 index 000000000..139ac9340 --- /dev/null +++ b/web/gui/dashboard_info.js @@ -0,0 +1,2321 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +var netdataDashboard = window.netdataDashboard || {}; + +// ---------------------------------------------------------------------------- +// menus + +// information about the main menus + +netdataDashboard.menu = { + 'system': { + title: 'System Overview', + icon: '', + info: 'Overview of the key system metrics.' + }, + + 'services': { + title: 'systemd Services', + icon: '', + info: 'Resources utilization of systemd services. netdata monitors all systemd services via CGROUPS ' + + '(the resources accounting used by containers). ' + }, + + 'ap': { + title: 'Access Points', + icon: '', + info: 'Performance metrics for the access points (i.e. wireless interfaces in AP mode) found on the system.' + }, + + 'tc': { + title: 'Quality of Service', + icon: '', + info: 'Netdata collects and visualizes tc class utilization using its ' + + 'tc-helper plugin. ' + + 'If you also use FireQOS for setting up QoS, ' + + 'netdata automatically collects interface and class names. If your QoS configuration includes overheads ' + + 'calculation, the values shown here will include these overheads (the total bandwidth for the same ' + + 'interface as reported in the Network Interfaces section, will be lower than the total bandwidth ' + + 'reported here). QoS data collection may have a slight time difference compared to the interface ' + + '(QoS data collection uses a BASH script, so a shift in data collection of a few milliseconds ' + + 'should be justified).' + }, + + 'net': { + title: 'Network Interfaces', + icon: '', + info: 'Performance metrics for network interfaces.' + }, + + 'ip': { + title: 'Networking Stack', + icon: '', + info: function (os) { + if(os === "linux") + return 'Metrics for the networking stack of the system. These metrics are collected from /proc/net/netstat, apply to both IPv4 and IPv6 traffic and are related to operation of the kernel networking stack.'; + else + return 'Metrics for the networking stack of the system.'; + } + }, + + 'ipv4': { + title: 'IPv4 Networking', + icon: '', + info: 'Metrics for the IPv4 stack of the system. ' + + 'Internet Protocol version 4 (IPv4) is ' + + 'the fourth version of the Internet Protocol (IP). It is one of the core protocols of standards-based ' + + 'internetworking methods in the Internet. IPv4 is a connectionless protocol for use on packet-switched ' + + 'networks. It operates on a best effort delivery model, in that it does not guarantee delivery, nor does ' + + 'it assure proper sequencing or avoidance of duplicate delivery. These aspects, including data integrity, ' + + 'are addressed by an upper layer transport protocol, such as the Transmission Control Protocol (TCP).' + }, + + 'ipv6': { + title: 'IPv6 Networking', + icon: '', + info: 'Metrics for the IPv6 stack of the system. Internet Protocol version 6 (IPv6) is the most recent version of the Internet Protocol (IP), the communications protocol that provides an identification and location system for computers on networks and routes traffic across the Internet. IPv6 was developed by the Internet Engineering Task Force (IETF) to deal with the long-anticipated problem of IPv4 address exhaustion. IPv6 is intended to replace IPv4.' + }, + + 'sctp': { + title: 'SCTP Networking', + icon: '', + info: 'Stream Control Transmission Protocol (SCTP) is a computer network protocol which operates at the transport layer and serves a role similar to the popular protocols TCP and UDP. SCTP provides some of the features of both UDP and TCP: it is message-oriented like UDP and ensures reliable, in-sequence transport of messages with congestion control like TCP. It differs from those protocols by providing multi-homing and redundant paths to increase resilience and reliability.' + }, + + 'ipvs': { + title: 'IP Virtual Server', + icon: '', + info: 'IPVS (IP Virtual Server) implements transport-layer load balancing inside the Linux kernel, so called Layer-4 switching. IPVS running on a host acts as a load balancer at the front of a cluster of real servers, it can direct requests for TCP/UDP based services to the real servers, and makes services of the real servers to appear as a virtual service on a single IP address.' + }, + + 'netfilter': { + title: 'Firewall (netfilter)', + icon: '', + info: 'Performance metrics of the netfilter components.' + }, + + 'ipfw': { + title: 'Firewall (ipfw)', + icon: '', + info: 'Counters and memory usage for the ipfw rules.' + }, + + 'cpu': { + title: 'CPUs', + icon: '', + info: 'Detailed information for each CPU of the system. A summary of the system for all CPUs can be found at the System Overview section.' + }, + + 'mem': { + title: 'Memory', + icon: '', + info: 'Detailed information about the memory management of the system.' + }, + + 'disk': { + title: 'Disks', + icon: '', + info: 'Charts with performance information for all the system disks. Special care has been given to present disk performance metrics in a way compatible with iostat -x. netdata by default prevents rendering performance charts for individual partitions and unmounted virtual disks. Disabled charts can still be enabled by configuring the relative settings in the netdata configuration file.' + }, + + 'sensors': { + title: 'Sensors', + icon: '', + info: 'Readings of the configured system sensors.' + }, + + 'ipmi': { + title: 'IPMI', + icon: '', + info: 'The Intelligent Platform Management Interface (IPMI) is a set of computer interface specifications for an autonomous computer subsystem that provides management and monitoring capabilities independently of the host system\'s CPU, firmware (BIOS or UEFI) and operating system.' + }, + + 'samba': { + title: 'Samba', + icon: '', + info: 'Performance metrics of the Samba file share operations of this system. Samba is a implementation of Windows services, including Windows SMB protocol file shares.' + }, + + 'nfsd': { + title: 'NFS Server', + icon: '', + info: 'Performance metrics of the Network File Server. NFS is a distributed file system protocol, allowing a user on a client computer to access files over a network, much like local storage is accessed. NFS, like many other protocols, builds on the Open Network Computing Remote Procedure Call (ONC RPC) system. The NFS is an open standard defined in Request for Comments (RFC).' + }, + + 'nfs': { + title: 'NFS Client', + icon: '', + info: 'Performance metrics of the NFS operations of this system, acting as an NFS client.' + }, + + 'zfs': { + title: 'ZFS filesystem', + icon: '', + info: 'Performance metrics of the ZFS filesystem. The following charts visualize all metrics reported by arcstat.py and arc_summary.py.' + }, + + 'btrfs': { + title: 'BTRFS filesystem', + icon: '', + info: 'Disk space metrics for the BTRFS filesystem.' + }, + + 'apps': { + title: 'Applications', + icon: '', + info: 'Per application statistics are collected using netdata\'s apps.plugin. This plugin walks through all processes and aggregates statistics for applications of interest, defined in /etc/netdata/apps_groups.conf (the default is here). The plugin internally builds a process tree (much like ps fax does), and groups processes together (evaluating both child and parent processes) so that the result is always a chart with a predefined set of dimensions (of course, only application groups found running are reported). The reported values are compatible with top, although the netdata plugin counts also the resources of exited children (unlike top which shows only the resources of the currently running processes). So for processes like shell scripts, the reported values include the resources used by the commands these scripts run within each timeframe.', + height: 1.5 + }, + + 'users': { + title: 'Users', + icon: '', + info: 'Per user statistics are collected using netdata\'s apps.plugin. This plugin walks through all processes and aggregates statistics per user. The reported values are compatible with top, although the netdata plugin counts also the resources of exited children (unlike top which shows only the resources of the currently running processes). So for processes like shell scripts, the reported values include the resources used by the commands these scripts run within each timeframe.', + height: 1.5 + }, + + 'groups': { + title: 'User Groups', + icon: '', + info: 'Per user group statistics are collected using netdata\'s apps.plugin. This plugin walks through all processes and aggregates statistics per user group. The reported values are compatible with top, although the netdata plugin counts also the resources of exited children (unlike top which shows only the resources of the currently running processes). So for processes like shell scripts, the reported values include the resources used by the commands these scripts run within each timeframe.', + height: 1.5 + }, + + 'netdata': { + title: 'Netdata Monitoring', + icon: '', + info: 'Performance metrics for the operation of netdata itself and its plugins.' + }, + + 'example': { + title: 'Example Charts', + info: 'Example charts, demonstrating the external plugin architecture.' + }, + + 'cgroup': { + title: '', + icon: '', + info: 'Container resource utilization metrics. Netdata reads this information from cgroups (abbreviated from control groups), a Linux kernel feature that limits and accounts resource usage (CPU, memory, disk I/O, network, etc.) of a collection of processes. cgroups together with namespaces (that offer isolation between processes) provide what we usually call: containers.' + }, + + 'cgqemu': { + title: '', + icon: '', + info: 'QEMU virtual machine resource utilization metrics. QEMU (short for Quick Emulator) is a free and open-source hosted hypervisor that performs hardware virtualization.' + }, + + 'fping': { + title: 'fping', + icon: '', + info: 'Network latency statistics, via fping. fping is a program to send ICMP echo probes to network hosts, similar to ping, but much better performing when pinging multiple hosts. fping versions after 3.15 can be directly used as netdata plugins.' + }, + + 'httpcheck': { + title: 'Http Check', + icon: '', + info: 'Web Service availability and latency monitoring using HTTP checks. This plugin is a specialized version of the port check plugin.' + }, + + 'memcached': { + title: 'memcached', + icon: '', + info: 'Performance metrics for memcached. Memcached is a general-purpose distributed memory caching system. It is often used to speed up dynamic database-driven websites by caching data and objects in RAM to reduce the number of times an external data source (such as a database or API) must be read.' + }, + + 'monit': { + title: 'monit', + icon: '', + info: 'Statuses of checks in monit. Monit is a utility for managing and monitoring processes, programs, files, directories and filesystems on a Unix system. Monit conducts automatic maintenance and repair and can execute meaningful causal actions in error situations.' + }, + + 'mysql': { + title: 'MySQL', + icon: '', + info: 'Performance metrics for mysql, the open-source relational database management system (RDBMS).' + }, + + 'postgres': { + title: 'Postgres', + icon: '', + info: 'Performance metrics for PostgresSQL, the object-relational database (ORDBMS).' + }, + + 'redis': { + title: 'Redis', + icon: '', + info: 'Performance metrics for redis. Redis (REmote DIctionary Server) is a software project that implements data structure servers. It is open-source, networked, in-memory, and stores keys with optional durability.' + }, + + 'rethinkdbs': { + title: 'RethinkDB', + icon: '', + info: 'Performance metrics for rethinkdb. RethinkDB is the first open-source scalable database built for realtime applications' + }, + + 'retroshare': { + title: 'RetroShare', + icon: '', + info: 'Performance metrics for RetroShare. RetroShare is open source software for encrypted filesharing, serverless email, instant messaging, online chat, and BBS, based on a friend-to-friend network built on GNU Privacy Guard (GPG).' + }, + + 'ipfs': { + title: 'IPFS', + icon: '', + info: 'Performance metrics for the InterPlanetary File System (IPFS), a content-addressable, peer-to-peer hypermedia distribution protocol.' + }, + + 'phpfpm': { + title: 'PHP-FPM', + icon: '', + info: 'Performance metrics for PHP-FPM, an alternative FastCGI implementation for PHP.' + }, + + 'portcheck': { + title: 'Port Check', + icon: '', + info: 'Service availability and latency monitoring using port checks.' + }, + + 'postfix': { + title: 'postfix', + icon: '', + info: undefined + }, + + 'dovecot': { + title: 'Dovecot', + icon: '', + info: undefined + }, + + 'hddtemp': { + title: 'HDD Temp', + icon: '', + info: undefined + }, + + 'nginx': { + title: 'nginx', + icon: '', + info: undefined + }, + + 'apache': { + title: 'Apache', + icon: '', + info: undefined + }, + + 'lighttpd': { + title: 'Lighttpd', + icon: '', + info: undefined + }, + + 'web_log': { + title: undefined, + icon: '', + info: 'Information extracted from a server log file. web_log plugin incrementally parses the server log file to provide, in real-time, a break down of key server performance metrics. For web servers, an extended log file format may optionally be used (for nginx and apache) offering timing information and bandwidth for both requests and responses. web_log plugin may also be configured to provide a break down of requests per URL pattern (check /etc/netdata/python.d/web_log.conf).' + }, + + 'named': { + title: 'named', + icon: '', + info: undefined + }, + + 'squid': { + title: 'squid', + icon: '', + info: undefined + }, + + 'nut': { + title: 'UPS', + icon: '', + info: undefined + }, + + 'apcupsd': { + title: 'UPS', + icon: '', + info: undefined + }, + + 'smawebbox': { + title: 'Solar Power', + icon: '', + info: undefined + }, + + 'fronius': { + title: 'Fronius', + icon: '', + info: undefined + }, + + 'stiebeleltron': { + title: 'Stiebel Eltron', + icon: '', + info: undefined + }, + + 'snmp': { + title: 'SNMP', + icon: '', + info: undefined + }, + + 'go_expvar': { + title: 'Go - expvars', + icon: '', + info: 'Statistics about running Go applications exposed by the expvar package.' + }, + + 'chrony': { + icon: '', + info: 'chronyd parameters about the system’s clock performance.' + }, + + 'couchdb': { + icon: '', + info: 'Performance metrics for CouchDB, the open-source, JSON document-based database with an HTTP API and multi-master replication.' + }, + + 'beanstalk': { + title: 'Beanstalkd', + icon: '', + info: 'Provides statistics on the beanstalkd server and any tubes available on that server using data pulled from beanstalkc' + }, + + 'rabbitmq': { + title: 'RabbitMQ', + icon: '', + info: 'Performance data for the RabbitMQ open-source message broker.' + }, + + 'ceph': { + title: 'Ceph', + icon: '', + info: 'Provides statistics on the ceph cluster server, the open-source distributed storage system.' + }, + + 'ntpd': { + title: 'ntpd', + icon: '', + info: 'Provides statistics for the internal variables of the Network Time Protocol daemon ntpd and optional including the configured peers (if enabled in the module configuration). The module presents the performance metrics as shown by ntpq (the standard NTP query program) using NTP mode 6 UDP packets to communicate with the NTP server.' + }, + + 'spigotmc': { + title: 'Spigot MC', + icon: '', + info: 'Provides basic performance statistics for the Spigot Minecraft server.' + }, + + 'unbound': { + title: 'Unbound', + icon: '', + info: undefined + }, + + 'boinc': { + title: 'BOINC', + icon: '', + info: 'Provides task counts for BOINC distributed computing clients.' + }, + + 'w1sensor': { + title: '1-Wire Sensors', + icon: '', + info: 'Data derived from 1-Wire sensors. Currently temperature sensors are automatically detected.' + }, + + 'logind': { + title: 'Logind', + icon: '', + info: undefined + }, + + 'linux_power_supply': { + title: 'Power Supply', + icon: '', + info: 'Statistics for the various system power supplies.' + } +}; + + +// ---------------------------------------------------------------------------- +// submenus + +// information to be shown, just below each submenu + +// information about the submenus +netdataDashboard.submenu = { + 'web_log.squid_bandwidth': { + title: 'bandwidth', + info: 'Bandwidth of responses (sent) by squid. This chart may present unusual spikes, since the bandwidth is accounted at the time the log line is saved by the server, even if the time needed to serve it spans across a longer duration. We suggest to use QoS (e.g. FireQOS) for accurate accounting of the server bandwidth.' + }, + + 'web_log.squid_responses': { + title: 'responses', + info: 'Information related to the responses sent by squid.' + }, + + 'web_log.squid_requests': { + title: 'requests', + info: 'Information related to the requests squid has received.' + }, + + 'web_log.squid_hierarchy': { + title: 'hierarchy', + info: 'Performance metrics for the squid hierarchy used to serve the requests.' + }, + + 'web_log.squid_squid_transport': { + title: 'transport' + }, + + 'web_log.squid_squid_cache': { + title: 'cache', + info: 'Performance metrics for the performance of the squid cache.' + }, + + 'web_log.squid_timings': { + title: 'timings', + info: 'Duration of squid requests. Unrealistic spikes may be reported, since squid logs the total time of the requests, when they complete. Especially for HTTPS, the clients get a tunnel from the proxy and exchange requests directly with the upstream servers, so squid cannot evaluate the individual requests and reports the total time the tunnel was open.' + }, + + 'web_log.squid_clients': { + title: 'clients' + }, + + 'web_log.bandwidth': { + info: 'Bandwidth of requests (received) and responses (sent). received requires an extended log format (without it, the web server log does not have this information). This chart may present unusual spikes, since the bandwidth is accounted at the time the log line is saved by the web server, even if the time needed to serve it spans across a longer duration. We suggest to use QoS (e.g. FireQOS) for accurate accounting of the web server bandwidth.' + }, + + 'web_log.urls': { + info: 'Number of requests for each URL pattern defined in /etc/netdata/python.d/web_log.conf. This chart counts all requests matching the URL patterns defined, independently of the web server response codes (i.e. both successful and unsuccessful).' + }, + + 'web_log.clients': { + info: 'Charts showing the number of unique client IPs, accessing the web server.' + }, + + 'web_log.timings': { + info: 'Web server response timings - the time the web server needed to prepare and respond to requests. This requires an extended log format and its meaning is web server specific. For most web servers this accounts the time from the reception of a complete request, to the dispatch of the last byte of the response. So, it includes the network delays of responses, but it does not include the network delays of requests.' + }, + + 'mem.ksm': { + title: 'deduper (ksm)', + info: 'Kernel Same-page Merging (KSM) performance monitoring, read from several files in /sys/kernel/mm/ksm/. KSM is a memory-saving de-duplication feature in the Linux kernel (since version 2.6.32). The KSM daemon ksmd periodically scans those areas of user memory which have been registered with it, looking for pages of identical content which can be replaced by a single write-protected page (which is automatically copied if a process later wants to update its content). KSM was originally developed for use with KVM (where it was known as Kernel Shared Memory), to fit more virtual machines into physical memory, by sharing the data common between them. But it can be useful to any application which generates many instances of the same data.' + }, + + 'mem.hugepages': { + info: 'Hugepages is a feature that allows the kernel to utilize the multiple page size capabilities of modern hardware architectures. The kernel creates multiple pages of virtual memory, mapped from both physical RAM and swap. There is a mechanism in the CPU architecture called "Translation Lookaside Buffers" (TLB) to manage the mapping of virtual memory pages to actual physical memory addresses. The TLB is a limited hardware resource, so utilizing a large amount of physical memory with the default page size consumes the TLB and adds processing overhead. By utilizing Huge Pages, the kernel is able to create pages of much larger sizes, each page consuming a single resource in the TLB. Huge Pages are pinned to physical RAM and cannot be swapped/paged out.' + }, + + 'mem.numa': { + info: 'Non-Uniform Memory Access (NUMA) is a hierarchical memory design the memory access time is dependent on locality. Under NUMA, a processor can access its own local memory faster than non-local memory (memory local to another processor or memory shared between processors). The individual metrics are described in the Linux kernel documentation.' + }, + + 'ip.ecn': { + info: 'Explicit Congestion Notification (ECN) is a TCP extension that allows end-to-end notification of network congestion without dropping packets. ECN is an optional feature that may be used between two ECN-enabled endpoints when the underlying network infrastructure also supports it.' + }, + + 'netfilter.conntrack': { + title: 'connection tracker', + info: 'Netfilter Connection Tracker performance metrics. The connection tracker keeps track of all connections of the machine, inbound and outbound. It works by keeping a database with all open connections, tracking network and address translation and connection expectations.' + }, + + 'netfilter.nfacct': { + title: 'bandwidth accounting', + info: 'The following information is read using the nfacct.plugin.' + }, + + 'netfilter.synproxy': { + title: 'DDoS protection', + info: 'DDoS protection performance metrics. SYNPROXY is a TCP SYN packets proxy. It is used to protect any TCP server (like a web server) from SYN floods and similar DDoS attacks. It is a netfilter module, in the Linux kernel (since version 3.12). It is optimized to handle millions of packets per second utilizing all CPUs available without any concurrency locking between the connections. It can be used for any kind of TCP traffic (even encrypted), since it does not interfere with the content itself.' + }, + + 'ipfw.dynamic_rules': { + title: 'dynamic rules', + info: 'Number of dynamic rules, created by correspondent stateful firewall rules.' + }, + + 'system.softnet_stat': { + title: 'softnet', + info: function (os) { + if (os === 'linux') + return 'Statistics for CPUs SoftIRQs related to network receive work. Break down per CPU core can be found at CPU / softnet statistics. processed states the number of packets processed, dropped is the number packets dropped because the network device backlog was full (to fix them on Linux use sysctl to increase net.core.netdev_max_backlog), squeezed is the number of packets dropped because the network device budget ran out (to fix them on Linux use sysctl to increase net.core.netdev_budget and/or net.core.netdev_budget_usecs). More information about identifying and troubleshooting network driver related issues can be found at Red Hat Enterprise Linux Network Performance Tuning Guide.'; + else + return 'Statistics for CPUs SoftIRQs related to network receive work.'; + } + }, + + 'cpu.softnet_stat': { + title: 'softnet', + info: function (os) { + if (os === 'linux') + return 'Statistics for per CPUs core SoftIRQs related to network receive work. Total for all CPU cores can be found at System / softnet statistics. processed states the number of packets processed, dropped is the number packets dropped because the network device backlog was full (to fix them on Linux use sysctl to increase net.core.netdev_max_backlog), squeezed is the number of packets dropped because the network device budget ran out (to fix them on Linux use sysctl to increase net.core.netdev_budget and/or net.core.netdev_budget_usecs). More information about identifying and troubleshooting network driver related issues can be found at Red Hat Enterprise Linux Network Performance Tuning Guide.'; + else + return 'Statistics for per CPUs core SoftIRQs related to network receive work. Total for all CPU cores can be found at System / softnet statistics.'; + } + }, + + 'go_expvar.memstats': { + title: 'memory statistics', + info: 'Go runtime memory statistics. See runtime.MemStats documentation for more info about each chart and the values.' + }, + + 'couchdb.dbactivity': { + title: 'db activity', + info: 'Overall database reads and writes for the entire server. This includes any external HTTP traffic, as well as internal replication traffic performed in a cluster to ensure node consistency.' + }, + + 'couchdb.httptraffic': { + title: 'http traffic breakdown', + info: 'All HTTP traffic, broken down by type of request (GET, PUT, POST, etc.) and response status code (200, 201, 4xx, etc.)

Any 5xx errors here indicate a likely CouchDB bug; check the logfile for further information.' + }, + + 'couchdb.ops': { + title: 'server operations' + }, + + 'couchdb.perdbstats': { + title: 'per db statistics', + info: 'Statistics per database. This includes 3 size graphs per database: active (the size of live data in the database), external (the uncompressed size of the database contents), and file (the size of the file on disk, exclusive of any views and indexes). It also includes the number of documents and number of deleted documents per database.' + }, + + 'couchdb.erlang': { + title: 'erlang statistics', + info: 'Detailed information about the status of the Erlang VM that hosts CouchDB. These are intended for advanced users only. High values of the peak message queue (>10e6) generally indicate an overload condition.' + }, + + 'ntpd.system': { + title: 'system', + info: 'Statistics of the system variables as shown by the readlist billboard ntpq -c rl. System variables are assigned an association ID of zero and can also be shown in the readvar billboard ntpq -c "rv 0". These variables are used in the Clock Discipline Algorithm, to calculate the lowest and most stable offset.' + }, + + 'ntpd.peers': { + title: 'peers', + info: 'Statistics of the peer variables for each peer configured in /etc/ntp.conf as shown by the readvar billboard ntpq -c "rv <association>", while each peer is assigned a nonzero association ID as shown by ntpq -c "apeers". The module periodically scans for new/changed peers (default: every 60s). ntpd selects the best possible peer from the available peers to synchronize the clock. A minimum of at least 3 peers is required to properly identify the best possible peer.' + } +}; + + +// ---------------------------------------------------------------------------- +// chart + +// information works on the context of a chart +// Its purpose is to set: +// +// info: the text above the charts +// heads: the representation of the chart at the top the subsection (second level menu) +// mainheads: the representation of the chart at the top of the section (first level menu) +// colors: the dimension colors of the chart (the default colors are appended) +// height: the ratio of the chart height relative to the default +// +netdataDashboard.context = { + 'system.cpu': { + info: function (os) { + void(os); + return 'Total CPU utilization (all cores). 100% here means there is no CPU idle time at all. You can get per core usage at the CPUs section and per application usage at the Applications Monitoring section.' + + netdataDashboard.sparkline('
Keep an eye on iowait ', 'system.cpu', 'iowait', '%', '. If it is constantly high, your disks are a bottleneck and they slow your system down.') + + netdataDashboard.sparkline('
An important metric worth monitoring, is softirq ', 'system.cpu', 'softirq', '%', '. A constantly high percentage of softirq may indicate network driver issues.'); + }, + valueRange: "[0, 100]" + }, + + 'system.load': { + info: 'Current system load, i.e. the number of processes using CPU or waiting for system resources (usually CPU and disk). The 3 metrics refer to 1, 5 and 15 minute averages. The system calculates this once every 5 seconds. For more information check this wikipedia article', + height: 0.7 + }, + + 'system.io': { + info: function (os) { + var s = 'Total Disk I/O, for all physical disks. You can get detailed information about each disk at the Disks section and per application Disk usage at the Applications Monitoring section.'; + + if (os === 'linux') + return s + ' Physical are all the disks that are listed in /sys/block, but do not exist in /sys/devices/virtual/block.'; + else + return s; + } + }, + + 'system.pgpgio': { + info: 'Memory paged from/to disk. This is usually the total disk I/O of the system.' + }, + + 'system.swapio': { + info: 'Total Swap I/O. (netdata measures both in and out. If either of them is not shown in the chart, it is because it is zero - you can change the page settings to always render all the available dimensions on all charts).' + }, + + 'system.pgfaults': { + info: 'Total page faults. Major page faults indicates that the system is using its swap. You can find which applications use the swap at the Applications Monitoring section.' + }, + + 'system.entropy': { + colors: '#CC22AA', + info: 'Entropy, is a pool of random numbers (/dev/random) that is mainly used in cryptography. If the pool of entropy gets empty, processes requiring random numbers may run a lot slower (it depends on the interface each program uses), waiting for the pool to be replenished. Ideally a system with high entropy demands should have a hardware device for that purpose (TPM is one such device). There are also several software-only options you may install, like haveged, although these are generally useful only in servers.' + }, + + 'system.forks': { + colors: '#5555DD', + info: 'Number of new processes created.' + }, + + 'system.intr': { + colors: '#DD5555', + info: 'Total number of CPU interrupts. Check system.interrupts that gives more detail about each interrupt and also the CPUs section where interrupts are analyzed per CPU core.' + }, + + 'system.interrupts': { + info: 'CPU interrupts in detail. At the CPUs section, interrupts are analyzed per CPU core.' + }, + + 'system.softirqs': { + info: 'CPU softirqs in detail. At the CPUs section, softirqs are analyzed per CPU core.' + }, + + 'system.processes': { + info: 'System processes. Running are the processes in the CPU. Blocked are processes that are willing to enter the CPU, but they cannot, e.g. because they wait for disk activity.' + }, + + 'system.active_processes': { + info: 'All system processes.' + }, + + 'system.ctxt': { + info: 'Context Switches, is the switching of the CPU from one process, task or thread to another. If there are many processes or threads willing to execute and very few CPU cores available to handle them, the system is making more context switching to balance the CPU resources among them. The whole process is computationally intensive. The more the context switches, the slower the system gets.' + }, + + 'system.idlejitter': { + info: 'Idle jitter is calculated by netdata. A thread is spawned that requests to sleep for a few microseconds. When the system wakes it up, it measures how many microseconds have passed. The difference between the requested and the actual duration of the sleep, is the idle jitter. This number is useful in real-time environments, where CPU jitter can affect the quality of the service (like VoIP media gateways).' + }, + + 'system.net': { + info: function (os) { + var s = 'Total bandwidth of all physical network interfaces. This does not include lo, VPNs, network bridges, IFB devices, bond interfaces, etc. Only the bandwidth of physical network interfaces is aggregated.'; + + if (os === 'linux') + return s + ' Physical are all the network interfaces that are listed in /proc/net/dev, but do not exist in /sys/devices/virtual/net.'; + else + return s; + } + }, + + 'system.ip': { + info: 'Total IP traffic in the system.' + }, + + 'system.ipv4': { + info: 'Total IPv4 Traffic.' + }, + + 'system.ipv6': { + info: 'Total IPv6 Traffic.' + }, + + 'system.ram': { + info: 'System Random Access Memory (i.e. physical memory) usage.' + }, + + 'system.swap': { + info: 'System swap memory usage. Swap space is used when the amount of physical memory (RAM) is full. When the system needs more memory resources and the RAM is full, inactive pages in memory are moved to the swap space (usually a disk, a disk partition or a file).' + }, + + // ------------------------------------------------------------------------ + // CPU charts + + 'cpu.cpu': { + commonMin: true, + commonMax: true, + valueRange: "[0, 100]" + }, + + 'cpu.interrupts': { + commonMin: true, + commonMax: true + }, + + 'cpu.softirqs': { + commonMin: true, + commonMax: true + }, + + 'cpu.softnet_stat': { + commonMin: true, + commonMax: true + }, + + // ------------------------------------------------------------------------ + // MEMORY + + 'mem.ksm_savings': { + heads: [ + netdataDashboard.gaugeChart('Saved', '12%', 'savings', '#0099CC') + ] + }, + + 'mem.ksm_ratios': { + heads: [ + function (os, id) { + void(os); + return '
'; + } + ] + }, + + 'mem.pgfaults': { + info: 'A page fault is a type of interrupt, called trap, raised by computer hardware when a running program accesses a memory page that is mapped into the virtual address space, but not actually loaded into main memory. If the page is loaded in memory at the time the fault is generated, but is not marked in the memory management unit as being loaded in memory, then it is called a minor or soft page fault. A major page fault is generated when the system needs to load the memory page from disk or swap memory.' + }, + + 'mem.committed': { + colors: NETDATA.colors[3], + info: 'Committed Memory, is the sum of all memory which has been allocated by processes.' + }, + + 'mem.available': { + info: 'Available Memory is estimated by the kernel, as the amount of RAM that can be used by userspace processes, without causing swapping.' + }, + + 'mem.writeback': { + info: 'Dirty is the amount of memory waiting to be written to disk. Writeback is how much memory is actively being written to disk.' + }, + + 'mem.kernel': { + info: 'The total amount of memory being used by the kernel. Slab is the amount of memory used by the kernel to cache data structures for its own use. KernelStack is the amount of memory allocated for each task done by the kernel. PageTables is the amount of memory decicated to the lowest level of page tables (A page table is used to turn a virtual address into a physical memory address). VmallocUsed is the amount of memory being used as virtual address space.' + }, + + 'mem.slab': { + info: 'Reclaimable is the amount of memory which the kernel can reuse. Unreclaimable can not be reused even when the kernel is lacking memory.' + }, + + 'mem.hugepages': { + info: 'Dedicated (or Direct) HugePages is memory reserved for applications configured to utilize huge pages. Hugepages are used memory, even if there are free hugepages available.' + }, + + 'mem.transparent_hugepages': { + info: 'Transparent HugePages (THP) is backing virtual memory with huge pages, supporting automatic promotion and demotion of page sizes. It works for all applications for anonymous memory mappings and tmpfs/shmem.' + }, + + // ------------------------------------------------------------------------ + // network interfaces + + 'net.drops': { + info: 'Packets that have been dropped at the network interface level. These are the same counters reported by ifconfig as RX dropped (inbound) and TX dropped (outbound). inbound packets can be dropped at the network interface level due to softnet backlog overflow, bad / unintented VLAN tags, unknown or unregistered protocols, IPv6 frames when the server is not configured for IPv6. Check this document for more information.' + }, + + // ------------------------------------------------------------------------ + // IP + + 'ip.inerrors': { + info: 'Errors encountered during the reception of IP packets. ' + + 'noroutes (InNoRoutes) counts packets that were dropped because there was no route to send them. ' + + 'truncated (InTruncatedPkts) counts packets which is being discarded because the datagram frame didn\'t carry enough data. ' + + 'checksum (InCsumErrors) counts packets that were dropped because they had wrong checksum. ' + }, + + 'ip.tcpmemorypressures': { + info: 'Number of times a socket was put in memory pressure due to a non fatal memory allocation failure (the kernel attempts to work around this situation by reducing the send buffers, etc).' + }, + + 'ip.tcpconnaborts': { + info: 'TCP connection aborts. baddata (TCPAbortOnData) happens while the connection is on FIN_WAIT1 and the kernel receives a packet with a sequence number beyond the last one for this connection - the kernel responds with RST (closes the connection). userclosed (TCPAbortOnClose) happens when the kernel receives data on an already closed connection and responds with RST. nomemory (TCPAbortOnMemory happens when there are too many orphaned sockets (not attached to an fd) and the kernel has to drop a connection - sometimes it will send an RST, sometimes it won\'t. timeout (TCPAbortOnTimeout) happens when a connection times out. linger (TCPAbortOnLinger) happens when the kernel killed a socket that was already closed by the application and lingered around for long enough. failed (TCPAbortFailed) happens when the kernel attempted to send an RST but failed because there was no memory available.' + }, + + 'ip.tcp_syn_queue': { + info: 'The SYN queue of the kernel tracks TCP handshakes until connections get fully established. ' + + 'It overflows when too many incoming TCP connection requests hang in the half-open state and the server ' + + 'is not configured to fall back to SYN cookies*. Overflows are usually caused by SYN flood DoS attacks ' + + '(i.e. someone sends lots of SYN packets and never completes the handshakes). ' + + 'drops (or TcpExtTCPReqQFullDrop) is the number of connections dropped because the ' + + 'SYN queue was full and SYN cookies were disabled. ' + + 'cookies (or TcpExtTCPReqQFullDoCookies) is the number of SYN cookies sent because the ' + + 'SYN queue was full.' + }, + + 'ip.tcp_accept_queue': { + info: 'The accept queue of the kernel holds the fully established TCP connections, waiting to be handled ' + + 'by the listening application. overflows (or ListenOverflows) is the number of ' + + 'established connections that could not be handled because the receive queue of the listening application ' + + 'was full. drops (or ListenDrops) is the number of incoming ' + + 'connections that could not be handled, including SYN floods, overflows, out of memory, security issues, ' + + 'no route to destination, reception of related ICMP messages, socket is broadcast or multicast.' + }, + + + // ------------------------------------------------------------------------ + // IPv4 + + 'ipv4.tcpsock': { + info: 'The number of established TCP connections (known as CurrEstab). This is a snapshot of the established connections at the time of measurement (i.e. a connection established and a connection disconnected within the same iteration will not affect this metric).' + }, + + 'ipv4.tcpopens': { + info: 'active or ActiveOpens is the number of outgoing TCP connections attempted by this host.' + + ' passive or PassiveOpens is the number of incoming TCP connections accepted by this host.' + }, + + 'ipv4.tcperrors': { + info: 'InErrs is the number of TCP segments received in error (including header too small, checksum errors, sequence errors, bad packets - for both IPv4 and IPv6).' + + ' InCsumErrors is the number of TCP segments received with checksum errors (for both IPv4 and IPv6).' + + ' RetransSegs is the number of TCP segments retransmitted.' + }, + + 'ipv4.tcphandshake': { + info: 'EstabResets is the number of established connections resets (i.e. connections that made a direct transition from ESTABLISHED or CLOSE_WAIT to CLOSED).' + + ' OutRsts is the number of TCP segments sent, with the RST flag set (for both IPv4 and IPv6).' + + ' AttemptFails is the number of times TCP connections made a direct transition from either SYN_SENT or SYN_RECV to CLOSED, plus the number of times TCP connections made a direct transition from the SYN_RECV to LISTEN.' + + ' TCPSynRetrans shows retries for new outbound TCP connections, which can indicate general connectivity issues or backlog on the remote host.' + }, + + // ------------------------------------------------------------------------ + // APPS + + 'apps.cpu': { + height: 2.0 + }, + + 'apps.mem': { + info: 'Real memory (RAM) used by applications. This does not include shared memory.' + }, + + 'apps.vmem': { + info: 'Virtual memory allocated by applications. Please check this article for more information.' + }, + + 'apps.preads': { + height: 2.0 + }, + + 'apps.pwrites': { + height: 2.0 + }, + + // ------------------------------------------------------------------------ + // USERS + + 'users.cpu': { + height: 2.0 + }, + + 'users.mem': { + info: 'Real memory (RAM) used per user. This does not include shared memory.' + }, + + 'users.vmem': { + info: 'Virtual memory allocated per user. Please check this article for more information.' + }, + + 'users.preads': { + height: 2.0 + }, + + 'users.pwrites': { + height: 2.0 + }, + + // ------------------------------------------------------------------------ + // GROUPS + + 'groups.cpu': { + height: 2.0 + }, + + 'groups.mem': { + info: 'Real memory (RAM) used per user group. This does not include shared memory.' + }, + + 'groups.vmem': { + info: 'Virtual memory allocated per user group. Please check this article for more information.' + }, + + 'groups.preads': { + height: 2.0 + }, + + 'groups.pwrites': { + height: 2.0 + }, + + // ------------------------------------------------------------------------ + // NETWORK QoS + + 'tc.qos': { + heads: [ + function (os, id) { + void(os); + + if (id.match(/.*-ifb$/)) + return netdataDashboard.gaugeChart('Inbound', '12%', '', '#5555AA'); + else + return netdataDashboard.gaugeChart('Outbound', '12%', '', '#AA9900'); + } + ] + }, + + // ------------------------------------------------------------------------ + // NETWORK INTERFACES + + 'net.net': { + mainheads: [ + function (os, id) { + void(os); + if (id.match(/^cgroup_.*/)) { + var iface; + try { + iface = ' ' + id.substring(id.lastIndexOf('.net_') + 5, id.length); + } + catch (e) { + iface = ''; + } + return netdataDashboard.gaugeChart('Received' + iface, '12%', 'received'); + } + else + return ''; + }, + function (os, id) { + void(os); + if (id.match(/^cgroup_.*/)) { + var iface; + try { + iface = ' ' + id.substring(id.lastIndexOf('.net_') + 5, id.length); + } + catch (e) { + iface = ''; + } + return netdataDashboard.gaugeChart('Sent' + iface, '12%', 'sent'); + } + else + return ''; + } + ], + heads: [ + function (os, id) { + void(os); + if (!id.match(/^cgroup_.*/)) + return netdataDashboard.gaugeChart('Received', '12%', 'received'); + else + return ''; + }, + function (os, id) { + void(os); + if (!id.match(/^cgroup_.*/)) + return netdataDashboard.gaugeChart('Sent', '12%', 'sent'); + else + return ''; + } + ] + }, + + // ------------------------------------------------------------------------ + // NETFILTER + + 'netfilter.sockets': { + colors: '#88AA00', + heads: [ + netdataDashboard.gaugeChart('Active Connections', '12%', '', '#88AA00') + ] + }, + + 'netfilter.new': { + heads: [ + netdataDashboard.gaugeChart('New Connections', '12%', 'new', '#5555AA') + ] + }, + + // ------------------------------------------------------------------------ + // DISKS + + 'disk.util': { + colors: '#FF5588', + heads: [ + netdataDashboard.gaugeChart('Utilization', '12%', '', '#FF5588') + ], + info: 'Disk Utilization measures the amount of time the disk was busy with something. This is not related to its performance. 100% means that the system always had an outstanding operation on the disk. Keep in mind that depending on the underlying technology of the disk, 100% here may or may not be an indication of congestion.' + }, + + 'disk.backlog': { + colors: '#0099CC', + info: 'Backlog is an indication of the duration of pending disk operations. On every I/O event the system is multiplying the time spent doing I/O since the last update of this field with the number of pending operations. While not accurate, this metric can provide an indication of the expected completion time of the operations in progress.' + }, + + 'disk.io': { + heads: [ + netdataDashboard.gaugeChart('Read', '12%', 'reads'), + netdataDashboard.gaugeChart('Write', '12%', 'writes') + ], + info: 'Amount of data transferred to and from disk.' + }, + + 'disk.ops': { + info: 'Completed disk I/O operations. Keep in mind the number of operations requested might be higher, since the system is able to merge adjacent to each other (see merged operations chart).' + }, + + 'disk.qops': { + info: 'I/O operations currently in progress. This metric is a snapshot - it is not an average over the last interval.' + }, + + 'disk.iotime': { + height: 0.5, + info: 'The sum of the duration of all completed I/O operations. This number can exceed the interval if the disk is able to execute I/O operations in parallel.' + }, + 'disk.mops': { + height: 0.5, + info: 'The number of merged disk operations. The system is able to merge adjacent I/O operations, for example two 4KB reads can become one 8KB read before given to disk.' + }, + 'disk.svctm': { + height: 0.5, + info: 'The average service time for completed I/O operations. This metric is calculated using the total busy time of the disk and the number of completed operations. If the disk is able to execute multiple parallel operations the reporting average service time will be misleading.' + }, + 'disk.avgsz': { + height: 0.5, + info: 'The average I/O operation size.' + }, + 'disk.await': { + height: 0.5, + info: 'The average time for I/O requests issued to the device to be served. This includes the time spent by the requests in queue and the time spent servicing them.' + }, + + 'disk.space': { + info: 'Disk space utilization. reserved for root is automatically reserved by the system to prevent the root user from getting out of space.' + }, + 'disk.inodes': { + info: 'inodes (or index nodes) are filesystem objects (e.g. files and directories). On many types of file system implementations, the maximum number of inodes is fixed at filesystem creation, limiting the maximum number of files the filesystem can hold. It is possible for a device to run out of inodes. When this happens, new files cannot be created on the device, even though there may be free space available.' + }, + + 'mysql.net': { + info: 'The amount of data sent to mysql clients (out) and received from mysql clients (in).' + }, + + // ------------------------------------------------------------------------ + // MYSQL + + 'mysql.queries': { + info: 'The number of statements executed by the server.
    ' + + '
  • queries counts the statements executed within stored SQL programs.
  • ' + + '
  • questions counts the statements sent to the mysql server by mysql clients.
  • ' + + '
  • slow queries counts the number of statements that took more than long_query_time seconds to be executed.' + + ' For more information about slow queries check the mysql slow query log.
  • ' + + '
' + }, + + 'mysql.handlers': { + info: 'Usage of the internal handlers of mysql. This chart provides very good insights of what the mysql server is actually doing.' + + ' (if the chart is not showing all these dimensions it is because they are zero - set Which dimensions to show? to All from the dashboard settings, to render even the zero values)
    ' + + '
  • commit, the number of internal COMMIT statements.
  • ' + + '
  • delete, the number of times that rows have been deleted from tables.
  • ' + + '
  • prepare, a counter for the prepare phase of two-phase commit operations.
  • ' + + '
  • read first, the number of times the first entry in an index was read. A high value suggests that the server is doing a lot of full index scans; e.g. SELECT col1 FROM foo, with col1 indexed.
  • ' + + '
  • read key, the number of requests to read a row based on a key. If this value is high, it is a good indication that your tables are properly indexed for your queries.
  • ' + + '
  • read next, the number of requests to read the next row in key order. This value is incremented if you are querying an index column with a range constraint or if you are doing an index scan.
  • ' + + '
  • read prev, the number of requests to read the previous row in key order. This read method is mainly used to optimize ORDER BY ... DESC.
  • ' + + '
  • read rnd, the number of requests to read a row based on a fixed position. A high value indicates you are doing a lot of queries that require sorting of the result. You probably have a lot of queries that require MySQL to scan entire tables or you have joins that do not use keys properly.
  • ' + + '
  • read rnd next, the number of requests to read the next row in the data file. This value is high if you are doing a lot of table scans. Generally this suggests that your tables are not properly indexed or that your queries are not written to take advantage of the indexes you have.
  • ' + + '
  • rollback, the number of requests for a storage engine to perform a rollback operation.
  • ' + + '
  • savepoint, the number of requests for a storage engine to place a savepoint.
  • ' + + '
  • savepoint rollback, the number of requests for a storage engine to roll back to a savepoint.
  • ' + + '
  • update, the number of requests to update a row in a table.
  • ' + + '
  • write, the number of requests to insert a row in a table.
  • ' + + '
' + }, + + 'mysql.table_locks': { + info: 'MySQL table locks counters:
    ' + + '
  • immediate, the number of times that a request for a table lock could be granted immediately.
  • ' + + '
  • waited, the number of times that a request for a table lock could not be granted immediately and a wait was needed. If this is high and you have performance problems, you should first optimize your queries, and then either split your table or tables or use replication.
  • ' + + '
' + }, + + // ------------------------------------------------------------------------ + // POSTGRESQL + + + 'postgres.db_stat_blks': { + info: 'Blocks reads from disk or cache.
    ' + + '
  • blks_read: number of disk blocks read in this database.
  • ' + + '
  • blks_hit: number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)
  • ' + + '
' + }, + 'postgres.db_stat_tuple_write': { + info: '
  • Number of rows inserted/updated/deleted.
  • ' + + '
  • conflicts: number of queries canceled due to conflicts with recovery in this database. (Conflicts occur only on standby servers; see pg_stat_database_conflicts for details.)
  • ' + + '
' + }, + 'postgres.db_stat_temp_bytes': { + info: 'Temporary files can be created on disk for sorts, hashes, and temporary query results.' + }, + 'postgres.db_stat_temp_files': { + info: '
    ' + + '
  • files: number of temporary files created by queries. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing).
  • ' + + '
' + }, + 'postgres.archive_wal': { + info: 'WAL archiving.' + }, + 'postgres.checkpointer': { + info: 'Number of checkpoints.
    ' + + '
  • scheduled: when checkpoint_timeout is reached.
  • ' + + '
  • requested: when max_wal_size is reached.
  • ' + + '
' + + 'For more information see WAL Configuration.' + }, + 'postgres.autovacuum': { + info: 'PostgreSQL databases require periodic maintenance known as vacuuming. For many installations, it is sufficient to let vacuuming be performed by the autovacuum daemon. ' + + 'For more information see The Autovacuum Daemon.' + }, + 'postgres.standby_delta': { + info: 'Streaming replication delta.
    ' + + '
  • sent_delta: replication delta sent to standby.
  • ' + + '
  • write_delta: replication delta written to disk by this standby.
  • ' + + '
  • flush_delta: replication delta flushed to disk by this standby server.
  • ' + + '
  • replay_delta: replication delta replayed into the database on this standby server.
  • ' + + '
' + + 'For more information see Synchronous Replication.' + }, + 'postgres.replication_slot': { + info: 'Replication slot files.
    ' + + '
  • wal_keeped: WAL files retained by each replication slots.
  • ' + + '
  • pg_replslot_files: files present in pg_replslot.
  • ' + + '
' + + 'For more information see Replication Slots.' + }, + + + // ------------------------------------------------------------------------ + // APACHE + + 'apache.connections': { + colors: NETDATA.colors[4], + mainheads: [ + netdataDashboard.gaugeChart('Connections', '12%', '', NETDATA.colors[4]) + ] + }, + + 'apache.requests': { + colors: NETDATA.colors[0], + mainheads: [ + netdataDashboard.gaugeChart('Requests', '12%', '', NETDATA.colors[0]) + ] + }, + + 'apache.net': { + colors: NETDATA.colors[3], + mainheads: [ + netdataDashboard.gaugeChart('Bandwidth', '12%', '', NETDATA.colors[3]) + ] + }, + + 'apache.workers': { + mainheads: [ + function (os, id) { + void(os); + return '
'; + } + ] + }, + + 'apache.bytesperreq': { + colors: NETDATA.colors[3], + height: 0.5 + }, + + 'apache.reqpersec': { + colors: NETDATA.colors[4], + height: 0.5 + }, + + 'apache.bytespersec': { + colors: NETDATA.colors[6], + height: 0.5 + }, + + + // ------------------------------------------------------------------------ + // LIGHTTPD + + 'lighttpd.connections': { + colors: NETDATA.colors[4], + mainheads: [ + netdataDashboard.gaugeChart('Connections', '12%', '', NETDATA.colors[4]) + ] + }, + + 'lighttpd.requests': { + colors: NETDATA.colors[0], + mainheads: [ + netdataDashboard.gaugeChart('Requests', '12%', '', NETDATA.colors[0]) + ] + }, + + 'lighttpd.net': { + colors: NETDATA.colors[3], + mainheads: [ + netdataDashboard.gaugeChart('Bandwidth', '12%', '', NETDATA.colors[3]) + ] + }, + + 'lighttpd.workers': { + mainheads: [ + function (os, id) { + void(os); + return '
'; + } + ] + }, + + 'lighttpd.bytesperreq': { + colors: NETDATA.colors[3], + height: 0.5 + }, + + 'lighttpd.reqpersec': { + colors: NETDATA.colors[4], + height: 0.5 + }, + + 'lighttpd.bytespersec': { + colors: NETDATA.colors[6], + height: 0.5 + }, + + // ------------------------------------------------------------------------ + // NGINX + + 'nginx.connections': { + colors: NETDATA.colors[4], + mainheads: [ + netdataDashboard.gaugeChart('Connections', '12%', '', NETDATA.colors[4]) + ] + }, + + 'nginx.requests': { + colors: NETDATA.colors[0], + mainheads: [ + netdataDashboard.gaugeChart('Requests', '12%', '', NETDATA.colors[0]) + ] + }, + + // ------------------------------------------------------------------------ + // HTTP check + + 'httpcheck.responsetime': { + info: 'The response time describes the time passed between request and response. ' + + 'Currently, the accuracy of the response time is low and should be used as reference only.' + }, + + 'httpcheck.responselength': { + info: 'The response length counts the number of characters in the response body. For static pages, this should be mostly constant.' + }, + + 'httpcheck.status': { + valueRange: "[0, 1]", + info: 'This chart verifies the response of the webserver. Each status dimension will have a value of 1 if triggered. ' + + 'Dimension success is 1 only if all constraints are satisfied. ' + + 'This chart is most useful for alarms or third-party apps.' + }, + + // ------------------------------------------------------------------------ + // NETDATA + + 'netdata.response_time': { + info: 'The netdata API response time measures the time netdata needed to serve requests. This time includes everything, from the reception of the first byte of a request, to the dispatch of the last byte of its reply, therefore it includes all network latencies involved (i.e. a client over a slow network will influence these metrics).' + }, + + // ------------------------------------------------------------------------ + // RETROSHARE + + 'retroshare.bandwidth': { + info: 'RetroShare inbound and outbound traffic.', + mainheads: [ + netdataDashboard.gaugeChart('Received', '12%', 'bandwidth_down_kb'), + netdataDashboard.gaugeChart('Sent', '12%', 'bandwidth_up_kb') + ] + }, + + 'retroshare.peers': { + info: 'Number of (connected) RetroShare friends.', + mainheads: [ + function (os, id) { + void(os); + return '
'; + } + ] + }, + + 'retroshare.dht': { + info: 'Statistics about RetroShare\'s DHT. These values are estimated!' + }, + + // ------------------------------------------------------------------------ + // fping + + 'fping.quality': { + colors: NETDATA.colors[10], + height: 0.5 + }, + + 'fping.packets': { + height: 0.5 + }, + + + // ------------------------------------------------------------------------ + // containers + + 'cgroup.cpu': { + mainheads: [ + function (os, id) { + void(os); + return '
'; + } + ] + }, + + 'cgroup.mem_usage': { + mainheads: [ + function (os, id) { + void(os); + return '
'; + } + ] + }, + + 'cgroup.throttle_io': { + mainheads: [ + function (os, id) { + void(os); + return '
'; + }, + function (os, id) { + void(os); + return '
'; + } + ] + }, + + // ------------------------------------------------------------------------ + // beanstalkd + // system charts + 'beanstalk.cpu_usage': { + info: 'Amount of CPU Time for user and system used by beanstalkd.' + }, + + // This is also a per-tube stat + 'beanstalk.jobs_rate': { + info: 'The rate of jobs processed by the beanstalkd served.' + }, + + 'beanstalk.connections_rate': { + info: 'Tthe rate of connections opened to beanstalkd.' + }, + + 'beanstalk.commands_rate': { + info: 'The rate of commands received by beanstalkd.' + }, + + 'beanstalk.current_tubes': { + info: 'Total number of current tubes on the server including the default tube (which always exists).' + }, + + 'beanstalk.current_jobs': { + info: 'Current number of jobs in all tubes grouped by status: urgent, ready, reserved, delayed and buried.' + }, + + 'beanstalk.current_connections': { + info: 'Current number of connections group by connection type: written, producers, workers, waiting.' + }, + + 'beanstalk.binlog': { + info: 'The rate of records written to binlog and migrated as part of compaction.' + }, + + 'beanstalk.uptime': { + info: 'Total time beanstalkd server has been up for.' + }, + + // tube charts + 'beanstalk.jobs': { + info: 'Number of jobs currently in the tube grouped by status: urgent, ready, reserved, delayed and buried.' + }, + + 'beanstalk.connections': { + info: 'The current number of connections to this tube grouped by connection type; using, waiting and watching.' + }, + + 'beanstalk.commands': { + info: 'The rate of delete and pause commands executed by beanstalkd.' + }, + + 'beanstalk.pause': { + info: 'Shows info on how long the tube has been paused for, and how long is left remaining on the pause.' + }, + + // ------------------------------------------------------------------------ + // ceph + + 'ceph.general_usage': { + info: 'The usage and available space in all ceph cluster.' + }, + + 'ceph.general_objects': { + info: 'Total number of objects storage on ceph cluster.' + }, + + 'ceph.general_bytes': { + info: 'Cluster read and write data per second.' + }, + + 'ceph.general_operations': { + info: 'Number of read and write operations per second.' + }, + + 'ceph.general_latency': { + info: 'Total of apply and commit latency in all OSDs. The apply latency is the total time taken to flush an update to disk. The commit latency is the total time taken to commit an operation to the journal.' + }, + + 'ceph.pool_usage': { + info: 'The usage space in each pool.' + }, + + 'ceph.pool_objects': { + info: 'Number of objects presents in each pool.' + }, + + 'ceph.pool_read_bytes': { + info: 'The rate of read data per second in each pool.' + }, + + 'ceph.pool_write_bytes': { + info: 'The rate of write data per second in each pool.' + }, + + 'ceph.pool_read_objects': { + info: 'Number of read objects per second in each pool.' + }, + + 'ceph.pool_write_objects': { + info: 'Number of write objects per second in each pool.' + }, + + 'ceph.osd_usage': { + info: 'The usage space in each OSD.' + }, + + 'ceph.apply_latency': { + info: 'Time taken to flush an update in each OSD.' + }, + + 'ceph.commit_latency': { + info: 'Time taken to commit an operation to the journal in each OSD.' + }, + + // ------------------------------------------------------------------------ + // web_log + + 'web_log.response_statuses': { + info: 'Web server responses by type. success includes 1xx, 2xx and 304, error includes 5xx, redirect includes 3xx except 304, bad includes 4xx, other are all the other responses.', + mainheads: [ + function (os, id) { + void(os); + return '
'; + }, + + function (os, id) { + void(os); + return '
'; + }, + + function (os, id) { + void(os); + return '
'; + }, + + function (os, id) { + void(os); + return '
'; + } + ] + }, + + 'web_log.response_codes': { + info: 'Web server responses by code family. ' + + 'According to the standards 1xx are informational responses, ' + + '2xx are successful responses, ' + + '3xx are redirects (although they include 304 which is used as "not modified"), ' + + '4xx are bad requests, ' + + '5xx are internal server errors, ' + + 'other are non-standard responses, ' + + 'unmatched counts the lines in the log file that are not matched by the plugin (let us know if you have any unmatched).' + }, + + 'web_log.response_time': { + mainheads: [ + function (os, id) { + void(os); + return '
'; + } + ] + }, + + 'web_log.detailed_response_codes': { + info: 'Number of responses for each response code individually.' + }, + + 'web_log.requests_per_ipproto': { + info: 'Web server requests received per IP protocol version.' + }, + + 'web_log.clients': { + info: 'Unique client IPs accessing the web server, within each data collection iteration. If data collection is per second, this chart shows unique client IPs per second.' + }, + + 'web_log.clients_all': { + info: 'Unique client IPs accessing the web server since the last restart of netdata. This plugin keeps in memory all the unique IPs that have accessed the web server. On very busy web servers (several millions of unique IPs) you may want to disable this chart (check /etc/netdata/python.d/web_log.conf).' + }, + + // ------------------------------------------------------------------------ + // web_log for squid + + 'web_log.squid_response_statuses': { + info: 'Squid responses by type. ' + + 'success includes 1xx, 2xx, 000, 304, ' + + 'error includes 5xx and 6xx, ' + + 'redirect includes 3xx except 304, ' + + 'bad includes 4xx, ' + + 'other are all the other responses.', + mainheads: [ + function (os, id) { + void(os); + return '
'; + }, + + function (os, id) { + void(os); + return '
'; + }, + + function (os, id) { + void(os); + return '
'; + }, + + function (os, id) { + void(os); + return '
'; + } + ] + }, + + 'web_log.squid_response_codes': { + info: 'Web server responses by code family. ' + + 'According to HTTP standards 1xx are informational responses, ' + + '2xx are successful responses, ' + + '3xx are redirects (although they include 304 which is used as "not modified"), ' + + '4xx are bad requests, ' + + '5xx are internal server errors. ' + + 'Squid also defines 000 mostly for UDP requests, and ' + + '6xx for broken upstream servers sending wrong headers. ' + + 'Finally, other are non-standard responses, and ' + + 'unmatched counts the lines in the log file that are not matched by the plugin (let us know if you have any unmatched).' + }, + + 'web_log.squid_duration': { + mainheads: [ + function (os, id) { + void(os); + return '
'; + } + ] + }, + + 'web_log.squid_detailed_response_codes': { + info: 'Number of responses for each response code individually.' + }, + + 'web_log.squid_clients': { + info: 'Unique client IPs accessing squid, within each data collection iteration. If data collection is per second, this chart shows unique client IPs per second.' + }, + + 'web_log.squid_clients_all': { + info: 'Unique client IPs accessing squid since the last restart of netdata. This plugin keeps in memory all the unique IPs that have accessed the server. On very busy squid servers (several millions of unique IPs) you may want to disable this chart (check /etc/netdata/python.d/web_log.conf).' + }, + + 'web_log.squid_transport_methods': { + info: 'Break down per delivery method: TCP are requests on the HTTP port (usually 3128), ' + + 'UDP are requests on the ICP port (usually 3130), or HTCP port (usually 4128). ' + + 'If ICP logging was disabled using the log_icp_queries option, no ICP replies will be logged. ' + + 'NONE are used to state that squid delivered an unusual response or no response at all. ' + + 'Seen with cachemgr requests and errors, usually when the transaction fails before being classified into one of the above outcomes. ' + + 'Also seen with responses to CONNECT requests.' + }, + + 'web_log.squid_code': { + info: 'These are combined squid result status codes. A break down per component is given in the following charts. ' + + 'Check the squid documentation about them.' + }, + + 'web_log.squid_handling_opts': { + info: 'These tags are optional and describe why the particular handling was performed or where the request came from. ' + + 'CLIENT means that the client request placed limits affecting the response. Usually seen with client issued a no-cache, or analogous cache control command along with the request. Thus, the cache has to validate the object.' + + 'IMS states that the client sent a revalidation (conditional) request. ' + + 'ASYNC, is used when the request was generated internally by Squid. Usually this is background fetches for cache information exchanges, background revalidation from stale-while-revalidate cache controls, or ESI sub-objects being loaded. ' + + 'SWAPFAIL is assigned when the object was believed to be in the cache, but could not be accessed. A new copy was requested from the server. ' + + 'REFRESH when a revalidation (conditional) request was sent to the server. ' + + 'SHARED when this request was combined with an existing transaction by collapsed forwarding. NOTE: the existing request is not marked as SHARED. ' + + 'REPLY when particular handling was requested in the HTTP reply from server or peer. Usually seen on DENIED due to http_reply_access ACLs preventing delivery of servers response object to the client.' + }, + + 'web_log.squid_object_types': { + info: 'These tags are optional and describe what type of object was produced. ' + + 'NEGATIVE is only seen on HIT responses, indicating the response was a cached error response. e.g. 404 not found. ' + + 'STALE means the object was cached and served stale. This is usually caused by stale-while-revalidate or stale-if-error cache controls. ' + + 'OFFLINE when the requested object was retrieved from the cache during offline_mode. The offline mode never validates any object. ' + + 'INVALID when an invalid request was received. An error response was delivered indicating what the problem was. ' + + 'FAIL is only seen on REFRESH to indicate the revalidation request failed. The response object may be the server provided network error or the stale object which was being revalidated depending on stale-if-error cache control. ' + + 'MODIFIED is only seen on REFRESH responses to indicate revalidation produced a new modified object. ' + + 'UNMODIFIED is only seen on REFRESH responses to indicate revalidation produced a 304 (Not Modified) status, which was relayed to the client. ' + + 'REDIRECT when squid generated an HTTP redirect response to this request.' + }, + + 'web_log.squid_cache_events': { + info: 'These tags are optional and describe whether the response was loaded from cache, network, or otherwise. ' + + 'HIT when the response object delivered was the local cache object. ' + + 'MEM when the response object came from memory cache, avoiding disk accesses. Only seen on HIT responses. ' + + 'MISS when the response object delivered was the network response object. ' + + 'DENIED when the request was denied by access controls. ' + + 'NOFETCH an ICP specific type, indicating service is alive, but not to be used for this request (sent during "-Y" startup, or during frequent failures, a cache in hit only mode will return either UDP_HIT or UDP_MISS_NOFETCH. Neighbours will thus only fetch hits). ' + + 'TUNNEL when a binary tunnel was established for this transaction.' + }, + + 'web_log.squid_transport_errors': { + info: 'These tags are optional and describe some error conditions which occured during response delivery (if any). ' + + 'ABORTED when the response was not completed due to the connection being aborted (usually by the client). ' + + 'TIMEOUT, when the response was not completed due to a connection timeout.' + }, + + // ------------------------------------------------------------------------ + // Fronius Solar Power + + 'fronius.power': { + info: 'Positive Grid values mean that power is coming from the grid. Negative values are excess power that is going back into the grid, possibly selling it. ' + + 'Photovoltaics is the power generated from the solar panels. ' + + 'Accumulator is the stored power in the accumulator, if one is present.' + }, + + 'fronius.autonomy': { + commonMin: true, + commonMax: true, + valueRange: "[0, 100]", + info: 'The Autonomy is the percentage of how autonomous the installation is. An autonomy of 100 % means that the installation is producing more energy than it is needed. ' + + 'The Self consumption indicates the ratio between the current power generated and the current load. When it reaches 100 %, the Autonomy declines, since the solar panels can not produce enough energy and need support from the grid.' + }, + + 'fronius.energy.today': { + commonMin: true, + commonMax: true, + valueRange: "[0, null]" + }, + + // ------------------------------------------------------------------------ + // Stiebel Eltron Heat pump installation + + 'stiebeleltron.system.roomtemp': { + commonMin: true, + commonMax: true, + valueRange: "[0, null]" + }, + + // ------------------------------------------------------------------------ + // Port check + + 'portcheck.latency': { + info: 'The latency describes the time spent connecting to a TCP port. No data is sent or received. ' + + 'Currently, the accuracy of the latency is low and should be used as reference only.' + }, + + 'portcheck.status': { + valueRange: "[0, 1]", + info: 'The status chart verifies the availability of the service. ' + + 'Each status dimension will have a value of 1 if triggered. Dimension success is 1 only if connection could be established. ' + + 'This chart is most useful for alarms and third-party apps.' + }, + + // ------------------------------------------------------------------------ + + 'chrony.system': { + info: 'In normal operation, chronyd never steps the system clock, because any jump in the timescale can have adverse consequences for certain application programs. Instead, any error in the system clock is corrected by slightly speeding up or slowing down the system clock until the error has been removed, and then returning to the system clock’s normal speed. A consequence of this is that there will be a period when the system clock (as read by other programs using the gettimeofday() system call, or by the date command in the shell) will be different from chronyd\'s estimate of the current true time (which it reports to NTP clients when it is operating in server mode). The value reported on this line is the difference due to this effect.', + colors: NETDATA.colors[3] + }, + + 'chrony.offsets': { + info: 'last offset is the estimated local offset on the last clock update. RMS offset is a long-term average of the offset value.', + height: 0.5 + }, + + 'chrony.stratum': { + info: 'The stratum indicates how many hops away from a computer with an attached reference clock we are. Such a computer is a stratum-1 computer.', + decimalDigits: 0, + height: 0.5 + }, + + 'chrony.root': { + info: 'Estimated delays against the root time server this system is synchronized with. delay is the total of the network path delays to the stratum-1 computer from which the computer is ultimately synchronised. dispersion is the total dispersion accumulated through all the computers back to the stratum-1 computer from which the computer is ultimately synchronised. Dispersion is due to system clock resolution, statistical measurement variations etc.' + }, + + 'chrony.frequency': { + info: 'The frequency is the rate by which the system\'s clock would be would be wrong if chronyd was not correcting it. It is expressed in ppm (parts per million). For example, a value of 1ppm would mean that when the system\'s clock thinks it has advanced 1 second, it has actually advanced by 1.000001 seconds relative to true time.', + colors: NETDATA.colors[0] + }, + + 'chrony.residualfreq': { + info: 'This shows the residual frequency for the currently selected reference source. ' + + 'It reflects any difference between what the measurements from the reference source indicate the ' + + 'frequency should be and the frequency currently being used. The reason this is not always zero is ' + + 'that a smoothing procedure is applied to the frequency. Each time a measurement from the reference ' + + 'source is obtained and a new residual frequency computed, the estimated accuracy of this residual ' + + 'is compared with the estimated accuracy (see skew) of the existing frequency value. ' + + 'A weighted average is computed for the new frequency, with weights depending on these accuracies. ' + + 'If the measurements from the reference source follow a consistent trend, the residual will be ' + + 'driven to zero over time.', + height: 0.5, + colors: NETDATA.colors[3] + }, + + 'chrony.skew': { + info: 'The estimated error bound on the frequency.', + height: 0.5, + colors: NETDATA.colors[5] + }, + + 'couchdb.active_tasks': { + info: 'Active tasks running on this CouchDB cluster. Four types of tasks currently exist: indexer (view building), replication, database compaction and view compaction.' + }, + + 'couchdb.replicator_jobs': { + info: 'Detailed breakdown of any replication jobs in progress on this node. For more information, see the replicator documentation.' + }, + + 'couchdb.open_files': { + info: 'Count of all files held open by CouchDB. If this value seems pegged at 1024 or 4096, your server process is probably hitting the open file handle limit and needs to be increased.' + }, + + 'btrfs.disk': { + info: 'Physical disk usage of BTRFS. The disk space reported here is the raw physical disk space assigned to the BTRFS volume (i.e. before any RAID levels). BTRFS uses a two-stage allocator, first allocating large regions of disk space for one type of block (data, metadata, or system), and then using a regular block allocator inside those regions. unallocated is the physical disk space that is not allocated yet and is available to become data, metdata or system on demand. When unallocated is zero, all available disk space has been allocated to a specific function. Healthy volumes should ideally have at least five percent of their total space unallocated. You can keep your volume healthy by running the btrfs balance command on it regularly (check man btrfs-balance for more info). Note that some of the spac elisted as unallocated may not actually be usable if the volume uses devices of different sizes.', + colors: [NETDATA.colors[12]] + }, + + 'btrfs.data': { + info: 'Logical disk usage for BTRFS data. Data chunks are used to store the actual file data (file contents). The disk space reported here is the usable allocation (i.e. after any striping or replication). Healthy volumes should ideally have no more than a few GB of free space reported here persistently. Running btrfs balance can help here.' + }, + + 'btrfs.metadata': { + info: 'Logical disk usage for BTRFS metadata. Metadata chunks store most of the filesystem interal structures, as well as information like directory structure and file names. The disk space reported here is the usable allocation (i.e. after any striping or replication). Healthy volumes should ideally have no more than a few GB of free space reported here persistently. Running btrfs balance can help here.' + }, + + 'btrfs.system': { + info: 'Logical disk usage for BTRFS system. System chunks store information about the allocation of other chunks. The disk space reported here is the usable allocation (i.e. after any striping or replication). The values reported here should be relatively small compared to Data and Metadata, and will scale with the volume size and overall space usage.' + }, + + // ------------------------------------------------------------------------ + // RabbitMQ + + // info: the text above the charts + // heads: the representation of the chart at the top the subsection (second level menu) + // mainheads: the representation of the chart at the top of the section (first level menu) + // colors: the dimension colors of the chart (the default colors are appended) + // height: the ratio of the chart height relative to the default + + 'rabbitmq.queued_messages': { + info: 'Overall total of ready and unacknowledged queued messages. Messages that are delivered immediately are not counted here.' + }, + + 'rabbitmq.message_rates': { + info: 'Overall messaging rates including acknowledgements, delieveries, redeliveries, and publishes.' + }, + + 'rabbitmq.global_counts': { + info: 'Overall totals for channels, consumers, connections, queues and exchanges.' + }, + + 'rabbitmq.file_descriptors': { + info: 'Total number of used filed descriptors. See Open File Limits for further details.', + colors: NETDATA.colors[3] + }, + + 'rabbitmq.sockets': { + info: 'Total number of used socket descriptors. Each used socket also counts as a used file descriptor. See Open File Limits for further details.', + colors: NETDATA.colors[3] + }, + + 'rabbitmq.processes': { + info: 'Total number of processes running within the Erlang VM. This is not the same as the number of processes running on the host.', + colors: NETDATA.colors[3] + }, + + 'rabbitmq.erlang_run_queue': { + info: 'Number of Erlang processes the Erlang schedulers have queued to run.', + colors: NETDATA.colors[3] + }, + + 'rabbitmq.memory': { + info: 'Total amount of memory used by the RabbitMQ. This is a complex statistic that can be further analyzed in the management UI. See Memory for further details.', + colors: NETDATA.colors[3] + }, + + 'rabbitmq.disk_space': { + info: 'Total amount of disk space consumed by the message store(s). See Disk Space Limits for further details.', + colors: NETDATA.colors[3] + }, + + // ------------------------------------------------------------------------ + // ntpd + + 'ntpd.sys_offset': { + info: 'For hosts without any time critical services an offset of < 100 ms should be acceptable even with high network latencies. For hosts with time critical services an offset of about 0.01 ms or less can be achieved by using peers with low delays and configuring optimal poll exponent values.', + colors: NETDATA.colors[4] + }, + + 'ntpd.sys_jitter': { + info: 'The jitter statistics are exponentially-weighted RMS averages. The system jitter is defined in the NTPv4 specification; the clock jitter statistic is computed by the clock discipline module.' + }, + + 'ntpd.sys_frequency': { + info: 'The frequency offset is shown in ppm (parts per million) relative to the frequency of the system. The frequency correction needed for the clock can vary significantly between boots and also due to external influences like temperature or radiation.', + colors: NETDATA.colors[2], + height: 0.6 + }, + + 'ntpd.sys_wander': { + info: 'The wander statistics are exponentially-weighted RMS averages.', + colors: NETDATA.colors[3], + height: 0.6 + }, + + 'ntpd.sys_rootdelay': { + info: 'The rootdelay is the round-trip delay to the primary reference clock, similar to the delay shown by the ping command. A lower delay should result in a lower clock offset.', + colors: NETDATA.colors[1] + }, + + 'ntpd.sys_stratum': { + info: 'The distance in "hops" to the primary reference clock', + colors: NETDATA.colors[5], + height: 0.3 + }, + + 'ntpd.sys_tc': { + info: 'Time constants and poll intervals are expressed as exponents of 2. The default poll exponent of 6 corresponds to a poll interval of 64 s. For typical Internet paths, the optimum poll interval is about 64 s. For fast LANs with modern computers, a poll exponent of 4 (16 s) is appropriate. The poll process sends NTP packets at intervals determined by the clock discipline algorithm.', + height: 0.5 + }, + + 'ntpd.sys_precision': { + colors: NETDATA.colors[6], + height: 0.2 + }, + + 'ntpd.peer_offset': { + info: 'The offset of the peer clock relative to the system clock in milliseconds. Smaller values here weight peers more heavily for selection after the initial synchronization of the local clock. For a system providing time service to other systems, these should be as low as possible.' + }, + + 'ntpd.peer_delay': { + info: 'The round-trip time (RTT) for communication with the peer, similar to the delay shown by the ping command. Not as critical as either the offset or jitter, but still factored into the selection algorithm (because as a general rule, lower delay means more accurate time). In most cases, it should be below 100ms.' + }, + + 'ntpd.peer_dispersion': { + info: 'This is a measure of the estimated error between the peer and the local system. Lower values here are better.' + }, + + 'ntpd.peer_jitter': { + info: 'This is essentially a remote estimate of the peer\'s system_jitter value. Lower values here weight highly in favor of peer selection, and this is a good indicator of overall quality of a given time server (good servers will have values not exceeding single digit milliseconds here, with high quality stratum one servers regularly having sub-millisecond jitter).' + }, + + 'ntpd.peer_xleave': { + info: 'This variable is used in interleaved mode (used only in NTP symmetric and broadcast modes). See NTP Interleaved Modes.' + }, + + 'ntpd.peer_rootdelay': { + info: 'For a stratum 1 server, this is the access latency for the reference clock. For lower stratum servers, it is the sum of the peer_delay and peer_rootdelay for the system they are syncing off of. Similarly to peer_delay, lower values here are technically better, but have limited influence in peer selection.' + }, + + 'ntpd.peer_rootdisp': { + info: 'Is the same as peer_rootdelay, but measures accumulated peer_dispersion instead of accumulated peer_delay.' + }, + + 'ntpd.peer_hmode': { + info: 'The peer_hmode and peer_pmode variables give info about what mode the packets being sent to and received from a given peer are. Mode 1 is symmetric active (both the local system and the remote peer have each other declared as peers in /etc/ntp.conf), Mode 2 is symmetric passive (only one side has the other declared as a peer), Mode 3 is client, Mode 4 is server, and Mode 5 is broadcast (also used for multicast and manycast operation).', + height: 0.2 + }, + + 'ntpd.peer_pmode': { + height: 0.2 + }, + + 'ntpd.peer_hpoll': { + info: 'The peer_hpoll and peer_ppoll variables are log2 representations of the polling interval in seconds.', + height: 0.5 + }, + + 'ntpd.peer_ppoll': { + height: 0.5 + }, + + 'ntpd.peer_precision': { + height: 0.2 + }, + + 'spigotmc.tps': { + info: 'The running 1, 5, and 15 minute average number of server ticks per second. An idealized server will show 20.0 for all values, but in practice this almost never happens. Typical servers should show approximately 19.98-20.0 here. Lower values indicate progressively more server-side lag (and thus that you need better hardware for your server or a lower user limit). For every 0.05 ticks below 20, redstone clocks will lag behind by approximately 0.25%. Values below approximately 19.50 may interfere with complex free-running redstone circuits and will noticeably slow down growth.' + }, + + 'spigotmc.users': { + info: 'THe number of currently connect users on the monitored Spigot server.' + }, + + 'unbound.queries': { + info: 'Shows the number of queries being processed of each type. Note that Recursive queries are also accounted as cache misses.' + }, + + 'unbound.reqlist': { + info: 'Shows various stats about Unbound\'s internal request list.' + }, + + 'unbound.recursion': { + info: 'Average and median time to complete recursive name resolution.' + }, + + 'unbound.cache': { + info: 'The number of items in each of the various caches.' + }, + + 'unbound.threads.queries': { + height: 0.2 + }, + + 'unbound.threads.reqlist': { + height: 0.2 + }, + + 'unbound.threads.recursion': { + height: 0.2 + }, + + 'boinc.tasks': { + info: 'The total number of tasks and the number of active tasks. Active tasks are those which are either currently being processed, or are partialy processed but suspended.' + }, + + 'boinc.states': { + info: 'Counts of tasks in each task state. The normal sequence of states is New, Downloading, Ready to Run, Uploading, Uploaded. Tasks which are marked Ready to Run may be actively running, or may be waiting to be scheduled. Compute Errors are tasks which failed for some reason during execution. Aborted tasks were manually cancelled, and will not be processed. Failed Uploads are otherwise finished tasks which failed to upload to the server, and usually indicate networking issues.' + }, + + 'boinc.sched': { + info: 'Counts of active tasks in each scheduling state. Scheduled tasks are the ones which will run if the system is permitted to process tasks. Preempted tasks are on standby, and will run if a Scheduled task stops running for some reason. Uninitialized tasks should never be present, and indicate tha the scheduler has not tried to schedule them yet.' + }, + + 'boinc.process': { + info: 'Counts of active tasks in each process state. Executing tasks are running right now. Suspended tasks have an associated process, but are not currently running (either because the system isn\'t processing any tasks right now, or because they have been preempted by higher priority tasks). Quit tasks are exiting gracefully. Aborted tasks exceeded some resource limit, and are being shut down. Copy Pending tasks are waiting on a background file transfer to finish. Uninitialized tasks do not have an associated process yet.' + }, + + 'w1sensor.temp': { + info: 'Temperature derived from 1-Wire temperature sensors.' + }, + + 'logind.sessions': { + info: 'Shows the number of active sessions of each type tracked by logind.' + }, + + 'logind.users': { + info: 'Shows the number of active users of each type tracked by logind.' + }, + + 'logind.seats': { + info: 'Shows the number of active seats tracked by logind. Each seat corresponds to a combination of a display device and input device providing a physical presence for the system.' + }, + + // ------------------------------------------------------------------------ + // ProxySQL + + 'proxysql.pool_status': { + info: 'The status of the backend servers. ' + + '1=ONLINE backend server is fully operational, ' + + '2=SHUNNED backend sever is temporarily taken out of use because of either too many connection errors in a time that was too short, or replication lag exceeded the allowed threshold, ' + + '3=OFFLINE_SOFT when a server is put into OFFLINE_SOFT mode, new incoming connections aren\'t accepted anymore, while the existing connections are kept until they became inactive. In other words, connections are kept in use until the current transaction is completed. This allows to gracefully detach a backend, ' + + '4=OFFLINE_HARD when a server is put into OFFLINE_HARD mode, the existing connections are dropped, while new incoming connections aren\'t accepted either. This is equivalent to deleting the server from a hostgroup, or temporarily taking it out of the hostgroup for maintenance work, ' + + '-1 Unknown status.' + }, + + 'proxysql.pool_net': { + info: 'The amount of data sent to/received from the backend ' + + '(This does not include metadata (packets\' headers, OK/ERR packets, fields\' description, etc).' + }, + + 'proxysql.pool_overall_net': { + info: 'The amount of data sent to/received from the all backends ' + + '(This does not include metadata (packets\' headers, OK/ERR packets, fields\' description, etc).' + }, + + 'proxysql.questions': { + info: 'questions total number of queries sent from frontends, ' + + 'slow_queries number of queries that ran for longer than the threshold in milliseconds defined in global variable mysql-long_query_time. ' + }, + + 'proxysql.connections': { + info: 'aborted number of frontend connections aborted due to invalid credential or max_connections reached, ' + + 'connected number of frontend connections currently connected, ' + + 'created number of frontend connections created, ' + + 'non_idle number of frontend connections that are not currently idle. ' + }, + + 'proxysql.pool_latency': { + info: 'The currently ping time in microseconds, as reported from Monitor.' + }, + + 'proxysql.queries': { + info: 'The number of queries routed towards this particular backend server.' + }, + + 'proxysql.pool_used_connections': { + info: 'The number of connections are currently used by ProxySQL for sending queries to the backend server.' + }, + + 'proxysql.pool_free_connections': { + info: 'The number of connections are currently free. They are kept open in order to minimize the time cost of sending a query to the backend server.' + }, + + 'proxysql.pool_ok_connections': { + info: 'The number of connections were established successfully.' + }, + + 'proxysql.pool_error_connections': { + info: 'The number of connections weren\'t established successfully.' + }, + + 'proxysql.commands_count': { + info: 'The total number of commands of that type executed' + }, + + 'proxysql.commands_duration': { + info: 'The total time spent executing commands of that type, in ms' + } + + // ------------------------------------------------------------------------ +}; diff --git a/web/gui/dashboard_info_custom_example.js b/web/gui/dashboard_info_custom_example.js new file mode 100644 index 000000000..51ce0be22 --- /dev/null +++ b/web/gui/dashboard_info_custom_example.js @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +/* + * Custom netdata information file + * ------------------------------- + * + * Use this file to add custom information on netdata dashboards: + * + * 1. Copy it to a new filename (so that it will not be overwritten with netdata updates) + * 2. Edit it to fit your needs + * 3. Set the following option to /etc/netdata/netdata.conf : + * + * [web] + * custom dashboard_info.js = your_filename.js + * + * Using this file you can: + * + * 1. Overwrite or add messages to menus, submenus and charts. + * Use dashboard_info.js to find out what you can define. + * + * 2. Inject javascript code into the default netdata dashboard. + * + */ + +// ---------------------------------------------------------------------------- +// MENU +// +// - title the menu title as to be rendered at the charts menu +// - icon html fragment of the icon to display +// - info html fragment for the description above all the menu charts + +customDashboard.menu = { + +}; + + +// ---------------------------------------------------------------------------- +// SUBMENU +// +// - title the submenu title as to be rendered at the charts menu +// - info html fragment for the description above all the submenu charts + +customDashboard.submenu = { + +}; + + +// ---------------------------------------------------------------------------- +// CONTEXT (the template each chart is based on) +// +// - info html fragment for the description above the chart +// - height a ratio to the default as a decimal number: 1.0 = 100% +// - colors a single color or an array of colors to use for the dimensions +// - valuerange the y-range of the chart as an array [min, max] +// - heads an array of gauge charts to render above the submenu section +// - mainheads an array of gauge charts to render at the menu section + +customDashboard.context = { + +}; diff --git a/web/gui/favicon.ico b/web/gui/favicon.ico new file mode 100644 index 000000000..821f7c402 Binary files /dev/null and b/web/gui/favicon.ico differ diff --git a/web/gui/goto-host-from-alarm.html b/web/gui/goto-host-from-alarm.html new file mode 100644 index 000000000..5eb66b5d0 --- /dev/null +++ b/web/gui/goto-host-from-alarm.html @@ -0,0 +1,244 @@ + + + + + Goto a host you know... + + + + + + + + + + + + + + + +
+
+ Please wait... + +
+ +
+
+

+ This page can only find netdata URLs you have already visited and are linked to your account on this netdata registry. +

+
+
+ +
+ + diff --git a/web/gui/images/alert-128-orange.png b/web/gui/images/alert-128-orange.png new file mode 100644 index 000000000..c6182bfad Binary files /dev/null and b/web/gui/images/alert-128-orange.png differ diff --git a/web/gui/images/alert-128-red.png b/web/gui/images/alert-128-red.png new file mode 100644 index 000000000..90b9c73e6 Binary files /dev/null and b/web/gui/images/alert-128-red.png differ diff --git a/web/gui/images/alert-multi-size-orange.ico b/web/gui/images/alert-multi-size-orange.ico new file mode 100644 index 000000000..edca43871 Binary files /dev/null and b/web/gui/images/alert-multi-size-orange.ico differ diff --git a/web/gui/images/alert-multi-size-red.ico b/web/gui/images/alert-multi-size-red.ico new file mode 100644 index 000000000..8f7cbd069 Binary files /dev/null and b/web/gui/images/alert-multi-size-red.ico differ diff --git a/web/gui/images/animated.gif b/web/gui/images/animated.gif new file mode 100644 index 000000000..0e94a20ba Binary files /dev/null and b/web/gui/images/animated.gif differ diff --git a/web/gui/images/check-mark-2-128-green.png b/web/gui/images/check-mark-2-128-green.png new file mode 100644 index 000000000..e04ddca12 Binary files /dev/null and b/web/gui/images/check-mark-2-128-green.png differ diff --git a/web/gui/images/check-mark-2-multi-size-green.ico b/web/gui/images/check-mark-2-multi-size-green.ico new file mode 100644 index 000000000..2fc414113 Binary files /dev/null and b/web/gui/images/check-mark-2-multi-size-green.ico differ diff --git a/web/gui/images/netdata.svg b/web/gui/images/netdata.svg new file mode 100644 index 000000000..f8ddbda19 --- /dev/null +++ b/web/gui/images/netdata.svg @@ -0,0 +1,18 @@ + + + + + + Created by potrace 1.15, written by Peter Selinger 2001-2017 + + + + + + + + + + diff --git a/web/gui/images/post.png b/web/gui/images/post.png new file mode 100644 index 000000000..6bad54742 Binary files /dev/null and b/web/gui/images/post.png differ diff --git a/web/gui/images/seo-performance-114.png b/web/gui/images/seo-performance-114.png new file mode 100644 index 000000000..3f3862b3b Binary files /dev/null and b/web/gui/images/seo-performance-114.png differ diff --git a/web/gui/images/seo-performance-128.png b/web/gui/images/seo-performance-128.png new file mode 100644 index 000000000..2a212a475 Binary files /dev/null and b/web/gui/images/seo-performance-128.png differ diff --git a/web/gui/images/seo-performance-16.png b/web/gui/images/seo-performance-16.png new file mode 100644 index 000000000..6d7f075ec Binary files /dev/null and b/web/gui/images/seo-performance-16.png differ diff --git a/web/gui/images/seo-performance-24.png b/web/gui/images/seo-performance-24.png new file mode 100644 index 000000000..32d077ef1 Binary files /dev/null and b/web/gui/images/seo-performance-24.png differ diff --git a/web/gui/images/seo-performance-256.png b/web/gui/images/seo-performance-256.png new file mode 100644 index 000000000..07abfa01c Binary files /dev/null and b/web/gui/images/seo-performance-256.png differ diff --git a/web/gui/images/seo-performance-32.png b/web/gui/images/seo-performance-32.png new file mode 100644 index 000000000..a39543cfb Binary files /dev/null and b/web/gui/images/seo-performance-32.png differ diff --git a/web/gui/images/seo-performance-48.png b/web/gui/images/seo-performance-48.png new file mode 100644 index 000000000..6dab89e92 Binary files /dev/null and b/web/gui/images/seo-performance-48.png differ diff --git a/web/gui/images/seo-performance-512.png b/web/gui/images/seo-performance-512.png new file mode 100644 index 000000000..1f8c16410 Binary files /dev/null and b/web/gui/images/seo-performance-512.png differ diff --git a/web/gui/images/seo-performance-64.png b/web/gui/images/seo-performance-64.png new file mode 100644 index 000000000..e79f3b35b Binary files /dev/null and b/web/gui/images/seo-performance-64.png differ diff --git a/web/gui/images/seo-performance-72.png b/web/gui/images/seo-performance-72.png new file mode 100644 index 000000000..a4c9efb30 Binary files /dev/null and b/web/gui/images/seo-performance-72.png differ diff --git a/web/gui/images/seo-performance-multi-size.icns b/web/gui/images/seo-performance-multi-size.icns new file mode 100644 index 000000000..2e1a884fb Binary files /dev/null and b/web/gui/images/seo-performance-multi-size.icns differ diff --git a/web/gui/images/seo-performance-multi-size.ico b/web/gui/images/seo-performance-multi-size.ico new file mode 100644 index 000000000..821f7c402 Binary files /dev/null and b/web/gui/images/seo-performance-multi-size.ico differ diff --git a/web/gui/index.html b/web/gui/index.html new file mode 100644 index 000000000..0a01b1df9 --- /dev/null +++ b/web/gui/index.html @@ -0,0 +1,5791 @@ + + + + + netdata dashboard + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ netdata
Real-time performance monitoring, done right!
+
+ + + + + + +
+
+
+
+
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/web/gui/infographic.html b/web/gui/infographic.html new file mode 100644 index 000000000..b3112781b --- /dev/null +++ b/web/gui/infographic.html @@ -0,0 +1,171 @@ + + + + + + NetData: Get control of your Linux Servers. Simple. Effective. Awesome. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+

+ Interactive infographic of netdata features and functions +

+

+ Hover and click on the infographic, to open the related wiki page. +
+ + The links and the docs are still a work in progress. + The interactive infographic is a feature of draw.io. + +

+
+ +
+

+ New to netdata? Have a look at a netdata demo. You will love it! +

+

+ + + +

+
+
+
+
+
+
+ + + + + + + + + +
+ + + + + + diff --git a/web/gui/lib/bootstrap-3.3.7.min.js b/web/gui/lib/bootstrap-3.3.7.min.js new file mode 100644 index 000000000..03a97168a --- /dev/null +++ b/web/gui/lib/bootstrap-3.3.7.min.js @@ -0,0 +1,8 @@ +/*! + * Bootstrap v3.3.7 (http://getbootstrap.com) + * Copyright 2011-2016 Twitter, Inc. + * Licensed under the MIT license + * SPDX-License-Identifier: MIT + */ +if("undefined"==typeof jQuery)throw new Error("Bootstrap's JavaScript requires jQuery");+function(a){"use strict";var b=a.fn.jquery.split(" ")[0].split(".");if(b[0]<2&&b[1]<9||1==b[0]&&9==b[1]&&b[2]<1||b[0]>3)throw new Error("Bootstrap's JavaScript requires jQuery version 1.9.1 or higher, but lower than version 4")}(jQuery),+function(a){"use strict";function b(){var a=document.createElement("bootstrap"),b={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"};for(var c in b)if(void 0!==a.style[c])return{end:b[c]};return!1}a.fn.emulateTransitionEnd=function(b){var c=!1,d=this;a(this).one("bsTransitionEnd",function(){c=!0});var e=function(){c||a(d).trigger(a.support.transition.end)};return setTimeout(e,b),this},a(function(){a.support.transition=b(),a.support.transition&&(a.event.special.bsTransitionEnd={bindType:a.support.transition.end,delegateType:a.support.transition.end,handle:function(b){if(a(b.target).is(this))return b.handleObj.handler.apply(this,arguments)}})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var c=a(this),e=c.data("bs.alert");e||c.data("bs.alert",e=new d(this)),"string"==typeof b&&e[b].call(c)})}var c='[data-dismiss="alert"]',d=function(b){a(b).on("click",c,this.close)};d.VERSION="3.3.7",d.TRANSITION_DURATION=150,d.prototype.close=function(b){function c(){g.detach().trigger("closed.bs.alert").remove()}var e=a(this),f=e.attr("data-target");f||(f=e.attr("href"),f=f&&f.replace(/.*(?=#[^\s]*$)/,""));var g=a("#"===f?[]:f);b&&b.preventDefault(),g.length||(g=e.closest(".alert")),g.trigger(b=a.Event("close.bs.alert")),b.isDefaultPrevented()||(g.removeClass("in"),a.support.transition&&g.hasClass("fade")?g.one("bsTransitionEnd",c).emulateTransitionEnd(d.TRANSITION_DURATION):c())};var e=a.fn.alert;a.fn.alert=b,a.fn.alert.Constructor=d,a.fn.alert.noConflict=function(){return a.fn.alert=e,this},a(document).on("click.bs.alert.data-api",c,d.prototype.close)}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.button"),f="object"==typeof b&&b;e||d.data("bs.button",e=new c(this,f)),"toggle"==b?e.toggle():b&&e.setState(b)})}var c=function(b,d){this.$element=a(b),this.options=a.extend({},c.DEFAULTS,d),this.isLoading=!1};c.VERSION="3.3.7",c.DEFAULTS={loadingText:"loading..."},c.prototype.setState=function(b){var c="disabled",d=this.$element,e=d.is("input")?"val":"html",f=d.data();b+="Text",null==f.resetText&&d.data("resetText",d[e]()),setTimeout(a.proxy(function(){d[e](null==f[b]?this.options[b]:f[b]),"loadingText"==b?(this.isLoading=!0,d.addClass(c).attr(c,c).prop(c,!0)):this.isLoading&&(this.isLoading=!1,d.removeClass(c).removeAttr(c).prop(c,!1))},this),0)},c.prototype.toggle=function(){var a=!0,b=this.$element.closest('[data-toggle="buttons"]');if(b.length){var c=this.$element.find("input");"radio"==c.prop("type")?(c.prop("checked")&&(a=!1),b.find(".active").removeClass("active"),this.$element.addClass("active")):"checkbox"==c.prop("type")&&(c.prop("checked")!==this.$element.hasClass("active")&&(a=!1),this.$element.toggleClass("active")),c.prop("checked",this.$element.hasClass("active")),a&&c.trigger("change")}else this.$element.attr("aria-pressed",!this.$element.hasClass("active")),this.$element.toggleClass("active")};var d=a.fn.button;a.fn.button=b,a.fn.button.Constructor=c,a.fn.button.noConflict=function(){return a.fn.button=d,this},a(document).on("click.bs.button.data-api",'[data-toggle^="button"]',function(c){var d=a(c.target).closest(".btn");b.call(d,"toggle"),a(c.target).is('input[type="radio"], input[type="checkbox"]')||(c.preventDefault(),d.is("input,button")?d.trigger("focus"):d.find("input:visible,button:visible").first().trigger("focus"))}).on("focus.bs.button.data-api blur.bs.button.data-api",'[data-toggle^="button"]',function(b){a(b.target).closest(".btn").toggleClass("focus",/^focus(in)?$/.test(b.type))})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.carousel"),f=a.extend({},c.DEFAULTS,d.data(),"object"==typeof b&&b),g="string"==typeof b?b:f.slide;e||d.data("bs.carousel",e=new c(this,f)),"number"==typeof b?e.to(b):g?e[g]():f.interval&&e.pause().cycle()})}var c=function(b,c){this.$element=a(b),this.$indicators=this.$element.find(".carousel-indicators"),this.options=c,this.paused=null,this.sliding=null,this.interval=null,this.$active=null,this.$items=null,this.options.keyboard&&this.$element.on("keydown.bs.carousel",a.proxy(this.keydown,this)),"hover"==this.options.pause&&!("ontouchstart"in document.documentElement)&&this.$element.on("mouseenter.bs.carousel",a.proxy(this.pause,this)).on("mouseleave.bs.carousel",a.proxy(this.cycle,this))};c.VERSION="3.3.7",c.TRANSITION_DURATION=600,c.DEFAULTS={interval:5e3,pause:"hover",wrap:!0,keyboard:!0},c.prototype.keydown=function(a){if(!/input|textarea/i.test(a.target.tagName)){switch(a.which){case 37:this.prev();break;case 39:this.next();break;default:return}a.preventDefault()}},c.prototype.cycle=function(b){return b||(this.paused=!1),this.interval&&clearInterval(this.interval),this.options.interval&&!this.paused&&(this.interval=setInterval(a.proxy(this.next,this),this.options.interval)),this},c.prototype.getItemIndex=function(a){return this.$items=a.parent().children(".item"),this.$items.index(a||this.$active)},c.prototype.getItemForDirection=function(a,b){var c=this.getItemIndex(b),d="prev"==a&&0===c||"next"==a&&c==this.$items.length-1;if(d&&!this.options.wrap)return b;var e="prev"==a?-1:1,f=(c+e)%this.$items.length;return this.$items.eq(f)},c.prototype.to=function(a){var b=this,c=this.getItemIndex(this.$active=this.$element.find(".item.active"));if(!(a>this.$items.length-1||a<0))return this.sliding?this.$element.one("slid.bs.carousel",function(){b.to(a)}):c==a?this.pause().cycle():this.slide(a>c?"next":"prev",this.$items.eq(a))},c.prototype.pause=function(b){return b||(this.paused=!0),this.$element.find(".next, .prev").length&&a.support.transition&&(this.$element.trigger(a.support.transition.end),this.cycle(!0)),this.interval=clearInterval(this.interval),this},c.prototype.next=function(){if(!this.sliding)return this.slide("next")},c.prototype.prev=function(){if(!this.sliding)return this.slide("prev")},c.prototype.slide=function(b,d){var e=this.$element.find(".item.active"),f=d||this.getItemForDirection(b,e),g=this.interval,h="next"==b?"left":"right",i=this;if(f.hasClass("active"))return this.sliding=!1;var j=f[0],k=a.Event("slide.bs.carousel",{relatedTarget:j,direction:h});if(this.$element.trigger(k),!k.isDefaultPrevented()){if(this.sliding=!0,g&&this.pause(),this.$indicators.length){this.$indicators.find(".active").removeClass("active");var l=a(this.$indicators.children()[this.getItemIndex(f)]);l&&l.addClass("active")}var m=a.Event("slid.bs.carousel",{relatedTarget:j,direction:h});return a.support.transition&&this.$element.hasClass("slide")?(f.addClass(b),f[0].offsetWidth,e.addClass(h),f.addClass(h),e.one("bsTransitionEnd",function(){f.removeClass([b,h].join(" ")).addClass("active"),e.removeClass(["active",h].join(" ")),i.sliding=!1,setTimeout(function(){i.$element.trigger(m)},0)}).emulateTransitionEnd(c.TRANSITION_DURATION)):(e.removeClass("active"),f.addClass("active"),this.sliding=!1,this.$element.trigger(m)),g&&this.cycle(),this}};var d=a.fn.carousel;a.fn.carousel=b,a.fn.carousel.Constructor=c,a.fn.carousel.noConflict=function(){return a.fn.carousel=d,this};var e=function(c){var d,e=a(this),f=a(e.attr("data-target")||(d=e.attr("href"))&&d.replace(/.*(?=#[^\s]+$)/,""));if(f.hasClass("carousel")){var g=a.extend({},f.data(),e.data()),h=e.attr("data-slide-to");h&&(g.interval=!1),b.call(f,g),h&&f.data("bs.carousel").to(h),c.preventDefault()}};a(document).on("click.bs.carousel.data-api","[data-slide]",e).on("click.bs.carousel.data-api","[data-slide-to]",e),a(window).on("load",function(){a('[data-ride="carousel"]').each(function(){var c=a(this);b.call(c,c.data())})})}(jQuery),+function(a){"use strict";function b(b){var c,d=b.attr("data-target")||(c=b.attr("href"))&&c.replace(/.*(?=#[^\s]+$)/,"");return a(d)}function c(b){return this.each(function(){var c=a(this),e=c.data("bs.collapse"),f=a.extend({},d.DEFAULTS,c.data(),"object"==typeof b&&b);!e&&f.toggle&&/show|hide/.test(b)&&(f.toggle=!1),e||c.data("bs.collapse",e=new d(this,f)),"string"==typeof b&&e[b]()})}var d=function(b,c){this.$element=a(b),this.options=a.extend({},d.DEFAULTS,c),this.$trigger=a('[data-toggle="collapse"][href="#'+b.id+'"],[data-toggle="collapse"][data-target="#'+b.id+'"]'),this.transitioning=null,this.options.parent?this.$parent=this.getParent():this.addAriaAndCollapsedClass(this.$element,this.$trigger),this.options.toggle&&this.toggle()};d.VERSION="3.3.7",d.TRANSITION_DURATION=350,d.DEFAULTS={toggle:!0},d.prototype.dimension=function(){var a=this.$element.hasClass("width");return a?"width":"height"},d.prototype.show=function(){if(!this.transitioning&&!this.$element.hasClass("in")){var b,e=this.$parent&&this.$parent.children(".panel").children(".in, .collapsing");if(!(e&&e.length&&(b=e.data("bs.collapse"),b&&b.transitioning))){var f=a.Event("show.bs.collapse");if(this.$element.trigger(f),!f.isDefaultPrevented()){e&&e.length&&(c.call(e,"hide"),b||e.data("bs.collapse",null));var g=this.dimension();this.$element.removeClass("collapse").addClass("collapsing")[g](0).attr("aria-expanded",!0),this.$trigger.removeClass("collapsed").attr("aria-expanded",!0),this.transitioning=1;var h=function(){this.$element.removeClass("collapsing").addClass("collapse in")[g](""),this.transitioning=0,this.$element.trigger("shown.bs.collapse")};if(!a.support.transition)return h.call(this);var i=a.camelCase(["scroll",g].join("-"));this.$element.one("bsTransitionEnd",a.proxy(h,this)).emulateTransitionEnd(d.TRANSITION_DURATION)[g](this.$element[0][i])}}}},d.prototype.hide=function(){if(!this.transitioning&&this.$element.hasClass("in")){var b=a.Event("hide.bs.collapse");if(this.$element.trigger(b),!b.isDefaultPrevented()){var c=this.dimension();this.$element[c](this.$element[c]())[0].offsetHeight,this.$element.addClass("collapsing").removeClass("collapse in").attr("aria-expanded",!1),this.$trigger.addClass("collapsed").attr("aria-expanded",!1),this.transitioning=1;var e=function(){this.transitioning=0,this.$element.removeClass("collapsing").addClass("collapse").trigger("hidden.bs.collapse")};return a.support.transition?void this.$element[c](0).one("bsTransitionEnd",a.proxy(e,this)).emulateTransitionEnd(d.TRANSITION_DURATION):e.call(this)}}},d.prototype.toggle=function(){this[this.$element.hasClass("in")?"hide":"show"]()},d.prototype.getParent=function(){return a(this.options.parent).find('[data-toggle="collapse"][data-parent="'+this.options.parent+'"]').each(a.proxy(function(c,d){var e=a(d);this.addAriaAndCollapsedClass(b(e),e)},this)).end()},d.prototype.addAriaAndCollapsedClass=function(a,b){var c=a.hasClass("in");a.attr("aria-expanded",c),b.toggleClass("collapsed",!c).attr("aria-expanded",c)};var e=a.fn.collapse;a.fn.collapse=c,a.fn.collapse.Constructor=d,a.fn.collapse.noConflict=function(){return a.fn.collapse=e,this},a(document).on("click.bs.collapse.data-api",'[data-toggle="collapse"]',function(d){var e=a(this);e.attr("data-target")||d.preventDefault();var f=b(e),g=f.data("bs.collapse"),h=g?"toggle":e.data();c.call(f,h)})}(jQuery),+function(a){"use strict";function b(b){var c=b.attr("data-target");c||(c=b.attr("href"),c=c&&/#[A-Za-z]/.test(c)&&c.replace(/.*(?=#[^\s]*$)/,""));var d=c&&a(c);return d&&d.length?d:b.parent()}function c(c){c&&3===c.which||(a(e).remove(),a(f).each(function(){var d=a(this),e=b(d),f={relatedTarget:this};e.hasClass("open")&&(c&&"click"==c.type&&/input|textarea/i.test(c.target.tagName)&&a.contains(e[0],c.target)||(e.trigger(c=a.Event("hide.bs.dropdown",f)),c.isDefaultPrevented()||(d.attr("aria-expanded","false"),e.removeClass("open").trigger(a.Event("hidden.bs.dropdown",f)))))}))}function d(b){return this.each(function(){var c=a(this),d=c.data("bs.dropdown");d||c.data("bs.dropdown",d=new g(this)),"string"==typeof b&&d[b].call(c)})}var e=".dropdown-backdrop",f='[data-toggle="dropdown"]',g=function(b){a(b).on("click.bs.dropdown",this.toggle)};g.VERSION="3.3.7",g.prototype.toggle=function(d){var e=a(this);if(!e.is(".disabled, :disabled")){var f=b(e),g=f.hasClass("open");if(c(),!g){"ontouchstart"in document.documentElement&&!f.closest(".navbar-nav").length&&a(document.createElement("div")).addClass("dropdown-backdrop").insertAfter(a(this)).on("click",c);var h={relatedTarget:this};if(f.trigger(d=a.Event("show.bs.dropdown",h)),d.isDefaultPrevented())return;e.trigger("focus").attr("aria-expanded","true"),f.toggleClass("open").trigger(a.Event("shown.bs.dropdown",h))}return!1}},g.prototype.keydown=function(c){if(/(38|40|27|32)/.test(c.which)&&!/input|textarea/i.test(c.target.tagName)){var d=a(this);if(c.preventDefault(),c.stopPropagation(),!d.is(".disabled, :disabled")){var e=b(d),g=e.hasClass("open");if(!g&&27!=c.which||g&&27==c.which)return 27==c.which&&e.find(f).trigger("focus"),d.trigger("click");var h=" li:not(.disabled):visible a",i=e.find(".dropdown-menu"+h);if(i.length){var j=i.index(c.target);38==c.which&&j>0&&j--,40==c.which&&jdocument.documentElement.clientHeight;this.$element.css({paddingLeft:!this.bodyIsOverflowing&&a?this.scrollbarWidth:"",paddingRight:this.bodyIsOverflowing&&!a?this.scrollbarWidth:""})},c.prototype.resetAdjustments=function(){this.$element.css({paddingLeft:"",paddingRight:""})},c.prototype.checkScrollbar=function(){var a=window.innerWidth;if(!a){var b=document.documentElement.getBoundingClientRect();a=b.right-Math.abs(b.left)}this.bodyIsOverflowing=document.body.clientWidth
',trigger:"hover focus",title:"",delay:0,html:!1,container:!1,viewport:{selector:"body",padding:0}},c.prototype.init=function(b,c,d){if(this.enabled=!0,this.type=b,this.$element=a(c),this.options=this.getOptions(d),this.$viewport=this.options.viewport&&a(a.isFunction(this.options.viewport)?this.options.viewport.call(this,this.$element):this.options.viewport.selector||this.options.viewport),this.inState={click:!1,hover:!1,focus:!1},this.$element[0]instanceof document.constructor&&!this.options.selector)throw new Error("`selector` option must be specified when initializing "+this.type+" on the window.document object!");for(var e=this.options.trigger.split(" "),f=e.length;f--;){var g=e[f];if("click"==g)this.$element.on("click."+this.type,this.options.selector,a.proxy(this.toggle,this));else if("manual"!=g){var h="hover"==g?"mouseenter":"focusin",i="hover"==g?"mouseleave":"focusout";this.$element.on(h+"."+this.type,this.options.selector,a.proxy(this.enter,this)),this.$element.on(i+"."+this.type,this.options.selector,a.proxy(this.leave,this))}}this.options.selector?this._options=a.extend({},this.options,{trigger:"manual",selector:""}):this.fixTitle()},c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.getOptions=function(b){return b=a.extend({},this.getDefaults(),this.$element.data(),b),b.delay&&"number"==typeof b.delay&&(b.delay={show:b.delay,hide:b.delay}),b},c.prototype.getDelegateOptions=function(){var b={},c=this.getDefaults();return this._options&&a.each(this._options,function(a,d){c[a]!=d&&(b[a]=d)}),b},c.prototype.enter=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);return c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c)),b instanceof a.Event&&(c.inState["focusin"==b.type?"focus":"hover"]=!0),c.tip().hasClass("in")||"in"==c.hoverState?void(c.hoverState="in"):(clearTimeout(c.timeout),c.hoverState="in",c.options.delay&&c.options.delay.show?void(c.timeout=setTimeout(function(){"in"==c.hoverState&&c.show()},c.options.delay.show)):c.show())},c.prototype.isInStateTrue=function(){for(var a in this.inState)if(this.inState[a])return!0;return!1},c.prototype.leave=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);if(c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c)),b instanceof a.Event&&(c.inState["focusout"==b.type?"focus":"hover"]=!1),!c.isInStateTrue())return clearTimeout(c.timeout),c.hoverState="out",c.options.delay&&c.options.delay.hide?void(c.timeout=setTimeout(function(){"out"==c.hoverState&&c.hide()},c.options.delay.hide)):c.hide()},c.prototype.show=function(){var b=a.Event("show.bs."+this.type);if(this.hasContent()&&this.enabled){this.$element.trigger(b);var d=a.contains(this.$element[0].ownerDocument.documentElement,this.$element[0]);if(b.isDefaultPrevented()||!d)return;var e=this,f=this.tip(),g=this.getUID(this.type);this.setContent(),f.attr("id",g),this.$element.attr("aria-describedby",g),this.options.animation&&f.addClass("fade");var h="function"==typeof this.options.placement?this.options.placement.call(this,f[0],this.$element[0]):this.options.placement,i=/\s?auto?\s?/i,j=i.test(h);j&&(h=h.replace(i,"")||"top"),f.detach().css({top:0,left:0,display:"block"}).addClass(h).data("bs."+this.type,this),this.options.container?f.appendTo(this.options.container):f.insertAfter(this.$element),this.$element.trigger("inserted.bs."+this.type);var k=this.getPosition(),l=f[0].offsetWidth,m=f[0].offsetHeight;if(j){var n=h,o=this.getPosition(this.$viewport);h="bottom"==h&&k.bottom+m>o.bottom?"top":"top"==h&&k.top-mo.width?"left":"left"==h&&k.left-lg.top+g.height&&(e.top=g.top+g.height-i)}else{var j=b.left-f,k=b.left+f+c;jg.right&&(e.left=g.left+g.width-k)}return e},c.prototype.getTitle=function(){var a,b=this.$element,c=this.options;return a=b.attr("data-original-title")||("function"==typeof c.title?c.title.call(b[0]):c.title)},c.prototype.getUID=function(a){do a+=~~(1e6*Math.random());while(document.getElementById(a));return a},c.prototype.tip=function(){if(!this.$tip&&(this.$tip=a(this.options.template),1!=this.$tip.length))throw new Error(this.type+" `template` option must consist of exactly 1 top-level element!");return this.$tip},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".tooltip-arrow")},c.prototype.enable=function(){this.enabled=!0},c.prototype.disable=function(){this.enabled=!1},c.prototype.toggleEnabled=function(){this.enabled=!this.enabled},c.prototype.toggle=function(b){var c=this;b&&(c=a(b.currentTarget).data("bs."+this.type),c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c))),b?(c.inState.click=!c.inState.click,c.isInStateTrue()?c.enter(c):c.leave(c)):c.tip().hasClass("in")?c.leave(c):c.enter(c)},c.prototype.destroy=function(){var a=this;clearTimeout(this.timeout),this.hide(function(){a.$element.off("."+a.type).removeData("bs."+a.type),a.$tip&&a.$tip.detach(),a.$tip=null,a.$arrow=null,a.$viewport=null,a.$element=null})};var d=a.fn.tooltip;a.fn.tooltip=b,a.fn.tooltip.Constructor=c,a.fn.tooltip.noConflict=function(){return a.fn.tooltip=d,this}}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.popover"),f="object"==typeof b&&b;!e&&/destroy|hide/.test(b)||(e||d.data("bs.popover",e=new c(this,f)),"string"==typeof b&&e[b]())})}var c=function(a,b){this.init("popover",a,b)};if(!a.fn.tooltip)throw new Error("Popover requires tooltip.js");c.VERSION="3.3.7",c.DEFAULTS=a.extend({},a.fn.tooltip.Constructor.DEFAULTS,{placement:"right",trigger:"click",content:"",template:''}),c.prototype=a.extend({},a.fn.tooltip.Constructor.prototype),c.prototype.constructor=c,c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.setContent=function(){var a=this.tip(),b=this.getTitle(),c=this.getContent();a.find(".popover-title")[this.options.html?"html":"text"](b),a.find(".popover-content").children().detach().end()[this.options.html?"string"==typeof c?"html":"append":"text"](c),a.removeClass("fade top bottom left right in"),a.find(".popover-title").html()||a.find(".popover-title").hide()},c.prototype.hasContent=function(){return this.getTitle()||this.getContent()},c.prototype.getContent=function(){var a=this.$element,b=this.options;return a.attr("data-content")||("function"==typeof b.content?b.content.call(a[0]):b.content)},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".arrow")};var d=a.fn.popover;a.fn.popover=b,a.fn.popover.Constructor=c,a.fn.popover.noConflict=function(){return a.fn.popover=d,this}}(jQuery),+function(a){"use strict";function b(c,d){this.$body=a(document.body),this.$scrollElement=a(a(c).is(document.body)?window:c),this.options=a.extend({},b.DEFAULTS,d),this.selector=(this.options.target||"")+" .nav li > a",this.offsets=[],this.targets=[],this.activeTarget=null,this.scrollHeight=0,this.$scrollElement.on("scroll.bs.scrollspy",a.proxy(this.process,this)),this.refresh(),this.process()}function c(c){return this.each(function(){var d=a(this),e=d.data("bs.scrollspy"),f="object"==typeof c&&c;e||d.data("bs.scrollspy",e=new b(this,f)),"string"==typeof c&&e[c]()})}b.VERSION="3.3.7",b.DEFAULTS={offset:10},b.prototype.getScrollHeight=function(){return this.$scrollElement[0].scrollHeight||Math.max(this.$body[0].scrollHeight,document.documentElement.scrollHeight)},b.prototype.refresh=function(){var b=this,c="offset",d=0;this.offsets=[],this.targets=[],this.scrollHeight=this.getScrollHeight(),a.isWindow(this.$scrollElement[0])||(c="position",d=this.$scrollElement.scrollTop()),this.$body.find(this.selector).map(function(){var b=a(this),e=b.data("target")||b.attr("href"),f=/^#./.test(e)&&a(e);return f&&f.length&&f.is(":visible")&&[[f[c]().top+d,e]]||null}).sort(function(a,b){return a[0]-b[0]}).each(function(){b.offsets.push(this[0]),b.targets.push(this[1])})},b.prototype.process=function(){var a,b=this.$scrollElement.scrollTop()+this.options.offset,c=this.getScrollHeight(),d=this.options.offset+c-this.$scrollElement.height(),e=this.offsets,f=this.targets,g=this.activeTarget;if(this.scrollHeight!=c&&this.refresh(),b>=d)return g!=(a=f[f.length-1])&&this.activate(a);if(g&&b=e[a]&&(void 0===e[a+1]||b .dropdown-menu > .active").removeClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!1),b.addClass("active").find('[data-toggle="tab"]').attr("aria-expanded",!0),h?(b[0].offsetWidth,b.addClass("in")):b.removeClass("fade"),b.parent(".dropdown-menu").length&&b.closest("li.dropdown").addClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!0),e&&e()}var g=d.find("> .active"),h=e&&a.support.transition&&(g.length&&g.hasClass("fade")||!!d.find("> .fade").length);g.length&&h?g.one("bsTransitionEnd",f).emulateTransitionEnd(c.TRANSITION_DURATION):f(),g.removeClass("in")};var d=a.fn.tab;a.fn.tab=b,a.fn.tab.Constructor=c,a.fn.tab.noConflict=function(){return a.fn.tab=d,this};var e=function(c){c.preventDefault(),b.call(a(this),"show")};a(document).on("click.bs.tab.data-api",'[data-toggle="tab"]',e).on("click.bs.tab.data-api",'[data-toggle="pill"]',e)}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.affix"),f="object"==typeof b&&b;e||d.data("bs.affix",e=new c(this,f)),"string"==typeof b&&e[b]()})}var c=function(b,d){this.options=a.extend({},c.DEFAULTS,d),this.$target=a(this.options.target).on("scroll.bs.affix.data-api",a.proxy(this.checkPosition,this)).on("click.bs.affix.data-api",a.proxy(this.checkPositionWithEventLoop,this)),this.$element=a(b),this.affixed=null,this.unpin=null,this.pinnedOffset=null,this.checkPosition()};c.VERSION="3.3.7",c.RESET="affix affix-top affix-bottom",c.DEFAULTS={offset:0,target:window},c.prototype.getState=function(a,b,c,d){var e=this.$target.scrollTop(),f=this.$element.offset(),g=this.$target.height();if(null!=c&&"top"==this.affixed)return e=a-d&&"bottom"},c.prototype.getPinnedOffset=function(){if(this.pinnedOffset)return this.pinnedOffset;this.$element.removeClass(c.RESET).addClass("affix");var a=this.$target.scrollTop(),b=this.$element.offset();return this.pinnedOffset=b.top-a},c.prototype.checkPositionWithEventLoop=function(){setTimeout(a.proxy(this.checkPosition,this),1)},c.prototype.checkPosition=function(){if(this.$element.is(":visible")){var b=this.$element.height(),d=this.options.offset,e=d.top,f=d.bottom,g=Math.max(a(document).height(),a(document.body).height());"object"!=typeof d&&(f=e=d),"function"==typeof e&&(e=d.top(this.$element)),"function"==typeof f&&(f=d.bottom(this.$element));var h=this.getState(g,b,e,f);if(this.affixed!=h){null!=this.unpin&&this.$element.css("top","");var i="affix"+(h?"-"+h:""),j=a.Event(i+".bs.affix");if(this.$element.trigger(j),j.isDefaultPrevented())return;this.affixed=h,this.unpin="bottom"==h?this.getPinnedOffset():null,this.$element.removeClass(c.RESET).addClass(i).trigger(i.replace("affix","affixed")+".bs.affix")}"bottom"==h&&this.$element.offset({top:g-b-f})}};var d=a.fn.affix;a.fn.affix=b,a.fn.affix.Constructor=c,a.fn.affix.noConflict=function(){return a.fn.affix=d,this},a(window).on("load",function(){a('[data-spy="affix"]').each(function(){var c=a(this),d=c.data();d.offset=d.offset||{},null!=d.offsetBottom&&(d.offset.bottom=d.offsetBottom),null!=d.offsetTop&&(d.offset.top=d.offsetTop),b.call(c,d)})})}(jQuery); diff --git a/web/gui/lib/bootstrap-slider-10.0.0.min.js b/web/gui/lib/bootstrap-slider-10.0.0.min.js new file mode 100644 index 000000000..87e834908 --- /dev/null +++ b/web/gui/lib/bootstrap-slider-10.0.0.min.js @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +/*! ======================================================= + VERSION 10.0.0 +========================================================= */ +"use strict";var _typeof="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(a){return typeof a}:function(a){return a&&"function"==typeof Symbol&&a.constructor===Symbol&&a!==Symbol.prototype?"symbol":typeof a},windowIsDefined="object"===("undefined"==typeof window?"undefined":_typeof(window));!function(a){if("function"==typeof define&&define.amd)define(["jquery"],a);else if("object"===("undefined"==typeof module?"undefined":_typeof(module))&&module.exports){var b;try{b=require("jquery")}catch(c){b=null}module.exports=a(b)}else window&&(window.Slider=a(window.jQuery))}(function(a){var b="slider",c="bootstrapSlider";windowIsDefined&&!window.console&&(window.console={}),windowIsDefined&&!window.console.log&&(window.console.log=function(){}),windowIsDefined&&!window.console.warn&&(window.console.warn=function(){});var d;return function(a){function b(){}function c(a){function c(b){b.prototype.option||(b.prototype.option=function(b){a.isPlainObject(b)&&(this.options=a.extend(!0,this.options,b))})}function e(b,c){a.fn[b]=function(e){if("string"==typeof e){for(var g=d.call(arguments,1),h=0,i=this.length;i>h;h++){var j=this[h],k=a.data(j,b);if(k)if(a.isFunction(k[e])&&"_"!==e.charAt(0)){var l=k[e].apply(k,g);if(void 0!==l&&l!==k)return l}else f("no such method '"+e+"' for "+b+" instance");else f("cannot call methods on "+b+" prior to initialization; attempted to call '"+e+"'")}return this}var m=this.map(function(){var d=a.data(this,b);return d?(d.option(e),d._init()):(d=new c(this,e),a.data(this,b,d)),a(this)});return!m||m.length>1?m:m[0]}}if(a){var f="undefined"==typeof console?b:function(a){console.error(a)};return a.bridget=function(a,b){c(b),e(a,b)},a.bridget}}var d=Array.prototype.slice;c(a)}(a),function(a){function e(b,c){function d(a,b){var c="data-slider-"+b.replace(/_/g,"-"),d=a.getAttribute(c);try{return JSON.parse(d)}catch(e){return d}}this._state={value:null,enabled:null,offset:null,size:null,percentage:null,inDrag:!1,over:!1},this.ticksCallbackMap={},this.handleCallbackMap={},"string"==typeof b?this.element=document.querySelector(b):b instanceof HTMLElement&&(this.element=b),c=c?c:{};for(var e=Object.keys(this.defaultOptions),f=0;f0)for(var t=0;t0){for(this.ticksContainer=document.createElement("div"),this.ticksContainer.className="slider-tick-container",f=0;f0)for(this.tickLabelContainer=document.createElement("div"),this.tickLabelContainer.className="slider-tick-label-container",f=0;f0&&(this.options.max=Math.max.apply(Math,this.options.ticks),this.options.min=Math.min.apply(Math,this.options.ticks)),Array.isArray(this.options.value)?(this.options.range=!0,this._state.value=this.options.value):this.options.range?this._state.value=[this.options.value,this.options.max]:this._state.value=this.options.value,this.trackLow=k||this.trackLow,this.trackSelection=j||this.trackSelection,this.trackHigh=l||this.trackHigh,"none"===this.options.selection?(this._addClass(this.trackLow,"hide"),this._addClass(this.trackSelection,"hide"),this._addClass(this.trackHigh,"hide")):("after"===this.options.selection||"before"===this.options.selection)&&(this._removeClass(this.trackLow,"hide"),this._removeClass(this.trackSelection,"hide"),this._removeClass(this.trackHigh,"hide")),this.handle1=m||this.handle1,this.handle2=n||this.handle2,p===!0)for(this._removeClass(this.handle1,"round triangle"),this._removeClass(this.handle2,"round triangle hide"),f=0;f0){for(var d,e,f,g=0,h=1;hthis.options.max?this.options.max:k},toPercentage:function(a){if(this.options.max===this.options.min)return 0;if(this.options.ticks_positions.length>0){for(var b,c,d,e=0,f=0;f0?this.options.ticks[f-1]:0,d=f>0?this.options.ticks_positions[f-1]:0,c=this.options.ticks[f],e=this.options.ticks_positions[f];break}if(f>0){var g=(a-b)/(c-b);return d+g*(e-d)}}return 100*(a-this.options.min)/(this.options.max-this.options.min)}},logarithmic:{toValue:function(a){var b=0===this.options.min?0:Math.log(this.options.min),c=Math.log(this.options.max),d=Math.exp(b+(c-b)*a/100);return Math.round(d)===this.options.max?this.options.max:(d=this.options.min+Math.round((d-this.options.min)/this.options.step)*this.options.step,dthis.options.max?this.options.max:d)},toPercentage:function(a){if(this.options.max===this.options.min)return 0;var b=Math.log(this.options.max),c=0===this.options.min?0:Math.log(this.options.min),d=0===a?0:Math.log(a);return 100*(d-c)/(b-c)}}};if(d=function(a,b){return e.call(this,a,b),this},d.prototype={_init:function(){},constructor:d,defaultOptions:{id:"",min:0,max:10,step:1,precision:0,orientation:"horizontal",value:5,range:!1,selection:"before",tooltip:"show",tooltip_split:!1,handle:"round",reversed:!1,rtl:"auto",enabled:!0,formatter:function(a){return Array.isArray(a)?a[0]+" : "+a[1]:a},natural_arrow_keys:!1,ticks:[],ticks_positions:[],ticks_labels:[],ticks_snap_bounds:0,ticks_tooltip:!1,scale:"linear",focus:!1,tooltip_position:null,labelledby:null,rangeHighlights:[]},getElement:function(){return this.sliderElem},getValue:function(){return this.options.range?this._state.value:this._state.value[0]},setValue:function(a,b,c){a||(a=0);var d=this.getValue();this._state.value=this._validateInputValue(a);var e=this._applyPrecision.bind(this);this.options.range?(this._state.value[0]=e(this._state.value[0]),this._state.value[1]=e(this._state.value[1]),this._state.value[0]=Math.max(this.options.min,Math.min(this.options.max,this._state.value[0])),this._state.value[1]=Math.max(this.options.min,Math.min(this.options.max,this._state.value[1]))):(this._state.value=e(this._state.value),this._state.value=[Math.max(this.options.min,Math.min(this.options.max,this._state.value))],this._addClass(this.handle2,"hide"),"after"===this.options.selection?this._state.value[1]=this.options.max:this._state.value[1]=this.options.min),this.options.max>this.options.min?this._state.percentage=[this._toPercentage(this._state.value[0]),this._toPercentage(this._state.value[1]),100*this.options.step/(this.options.max-this.options.min)]:this._state.percentage=[0,0,100],this._layout();var f=this.options.range?this._state.value:this._state.value[0];return this._setDataVal(f),b===!0&&this._trigger("slide",f),d!==f&&c===!0&&this._trigger("change",{oldValue:d,newValue:f}),this},destroy:function(){this._removeSliderEventHandlers(),this.sliderElem.parentNode.removeChild(this.sliderElem),this.element.style.display="",this._cleanUpEventCallbacksMap(),this.element.removeAttribute("data"),a&&(this._unbindJQueryEventHandlers(),this.$element.removeData("slider"))},disable:function(){return this._state.enabled=!1,this.handle1.removeAttribute("tabindex"),this.handle2.removeAttribute("tabindex"),this._addClass(this.sliderElem,"slider-disabled"),this._trigger("slideDisabled"),this},enable:function(){return this._state.enabled=!0,this.handle1.setAttribute("tabindex",0),this.handle2.setAttribute("tabindex",0),this._removeClass(this.sliderElem,"slider-disabled"),this._trigger("slideEnabled"),this},toggle:function(){return this._state.enabled?this.disable():this.enable(),this},isEnabled:function(){return this._state.enabled},on:function(a,b){return this._bindNonQueryEventHandler(a,b),this},off:function(b,c){a?(this.$element.off(b,c),this.$sliderElem.off(b,c)):this._unbindNonQueryEventHandler(b,c)},getAttribute:function(a){return a?this.options[a]:this.options},setAttribute:function(a,b){return this.options[a]=b,this},refresh:function(){return this._removeSliderEventHandlers(),e.call(this,this.element,this.options),a&&a.data(this.element,"slider",this),this},relayout:function(){return this._resize(),this._layout(),this},_removeSliderEventHandlers:function(){if(this.handle1.removeEventListener("keydown",this.handle1Keydown,!1),this.handle2.removeEventListener("keydown",this.handle2Keydown,!1),this.options.ticks_tooltip){for(var a=this.ticksContainer.getElementsByClassName("slider-tick"),b=0;b=0?c:this.attributes["aria-valuenow"].value,e=parseInt(d,10);b.value[0]=e,b.percentage[0]=a.options.ticks_positions[e],a._setToolTipOnMouseOver(b),a._showTooltip()};return b.addEventListener("mouseenter",d,!1),d},addMouseLeave:function(a,b){var c=function(){a._hideTooltip()};return b.addEventListener("mouseleave",c,!1),c}}},_layout:function(){var a;if(a=this.options.reversed?[100-this._state.percentage[0],this.options.range?100-this._state.percentage[1]:this._state.percentage[1]]:[this._state.percentage[0],this._state.percentage[1]],this.handle1.style[this.stylePos]=a[0]+"%",this.handle1.setAttribute("aria-valuenow",this._state.value[0]),isNaN(this.options.formatter(this._state.value[0]))&&this.handle1.setAttribute("aria-valuetext",this.options.formatter(this._state.value[0])),this.handle2.style[this.stylePos]=a[1]+"%",this.handle2.setAttribute("aria-valuenow",this._state.value[1]),isNaN(this.options.formatter(this._state.value[1]))&&this.handle2.setAttribute("aria-valuetext",this.options.formatter(this._state.value[1])),this.rangeHighlightElements.length>0&&Array.isArray(this.options.rangeHighlights)&&this.options.rangeHighlights.length>0)for(var b=0;b0){var g,h="vertical"===this.options.orientation?"height":"width";g="vertical"===this.options.orientation?"marginTop":this.options.rtl?"marginRight":"marginLeft";var i=this._state.size/(this.options.ticks.length-1);if(this.tickLabelContainer){var j=0;if(0===this.options.ticks_positions.length)"vertical"!==this.options.orientation&&(this.tickLabelContainer.style[g]=-i/2+"px"),j=this.tickLabelContainer.offsetHeight;else for(k=0;kj&&(j=this.tickLabelContainer.childNodes[k].offsetHeight);"horizontal"===this.options.orientation&&(this.sliderElem.style.marginBottom=j+"px")}for(var k=0;k=a[0]&&l<=a[1]&&this._addClass(this.ticks[k],"in-selection"):"after"===this.options.selection&&l>=a[0]?this._addClass(this.ticks[k],"in-selection"):"before"===this.options.selection&&l<=a[0]&&this._addClass(this.ticks[k],"in-selection"),this.tickLabels[k]&&(this.tickLabels[k].style[h]=i+"px","vertical"!==this.options.orientation&&void 0!==this.options.ticks_positions[k]?(this.tickLabels[k].style.position="absolute",this.tickLabels[k].style[this.stylePos]=l+"%",this.tickLabels[k].style[g]=-i/2+"px"):"vertical"===this.options.orientation&&(this.options.rtl?this.tickLabels[k].style.marginRight=this.sliderElem.offsetWidth+"px":this.tickLabels[k].style.marginLeft=this.sliderElem.offsetWidth+"px",this.tickLabelContainer.style[g]=this.sliderElem.offsetWidth/2*-1+"px"))}}var m;if(this.options.range){m=this.options.formatter(this._state.value),this._setText(this.tooltipInner,m),this.tooltip.style[this.stylePos]=(a[1]+a[0])/2+"%";var n=this.options.formatter(this._state.value[0]);this._setText(this.tooltipInner_min,n);var o=this.options.formatter(this._state.value[1]);this._setText(this.tooltipInner_max,o),this.tooltip_min.style[this.stylePos]=a[0]+"%",this.tooltip_max.style[this.stylePos]=a[1]+"%"}else m=this.options.formatter(this._state.value[0]),this._setText(this.tooltipInner,m),this.tooltip.style[this.stylePos]=a[0]+"%";if("vertical"===this.options.orientation)this.trackLow.style.top="0",this.trackLow.style.height=Math.min(a[0],a[1])+"%",this.trackSelection.style.top=Math.min(a[0],a[1])+"%",this.trackSelection.style.height=Math.abs(a[0]-a[1])+"%",this.trackHigh.style.bottom="0",this.trackHigh.style.height=100-Math.min(a[0],a[1])-Math.abs(a[0]-a[1])+"%";else{"right"===this.stylePos?this.trackLow.style.right="0":this.trackLow.style.left="0",this.trackLow.style.width=Math.min(a[0],a[1])+"%","right"===this.stylePos?this.trackSelection.style.right=Math.min(a[0],a[1])+"%":this.trackSelection.style.left=Math.min(a[0],a[1])+"%",this.trackSelection.style.width=Math.abs(a[0]-a[1])+"%","right"===this.stylePos?this.trackHigh.style.left="0":this.trackHigh.style.right="0",this.trackHigh.style.width=100-Math.min(a[0],a[1])-Math.abs(a[0]-a[1])+"%";var p=this.tooltip_min.getBoundingClientRect(),q=this.tooltip_max.getBoundingClientRect();"bottom"===this.options.tooltip_position?p.right>q.left?(this._removeClass(this.tooltip_max,"bottom"),this._addClass(this.tooltip_max,"top"),this.tooltip_max.style.top="",this.tooltip_max.style.bottom="22px"):(this._removeClass(this.tooltip_max,"top"),this._addClass(this.tooltip_max,"bottom"),this.tooltip_max.style.top=this.tooltip_min.style.top,this.tooltip_max.style.bottom=""):p.right>q.left?(this._removeClass(this.tooltip_max,"top"),this._addClass(this.tooltip_max,"bottom"),this.tooltip_max.style.top="18px"):(this._removeClass(this.tooltip_max,"bottom"),this._addClass(this.tooltip_max,"top"),this.tooltip_max.style.top=this.tooltip_min.style.top)}},_createHighlightRange:function(a,b){return this._isHighlightRange(a,b)?a>b?{start:b,size:a-b}:{start:a,size:b-a}:null},_isHighlightRange:function(a,b){return a>=0&&100>=a&&b>=0&&100>=b?!0:!1},_resize:function(a){this._state.offset=this._offset(this.sliderElem),this._state.size=this.sliderElem[this.sizePos],this._layout()},_removeProperty:function(a,b){a.style.removeProperty?a.style.removeProperty(b):a.style.removeAttribute(b)},_mousedown:function(a){if(!this._state.enabled)return!1;this._state.offset=this._offset(this.sliderElem),this._state.size=this.sliderElem[this.sizePos];var b=this._getPercentage(a);if(this.options.range){var c=Math.abs(this._state.percentage[0]-b),d=Math.abs(this._state.percentage[1]-b);this._state.dragged=d>c?0:1,this._adjustPercentageForRangeSliders(b)}else this._state.dragged=0;this._state.percentage[this._state.dragged]=b,this._layout(),this.touchCapable&&(document.removeEventListener("touchmove",this.mousemove,!1),document.removeEventListener("touchend",this.mouseup,!1)),this.mousemove&&document.removeEventListener("mousemove",this.mousemove,!1),this.mouseup&&document.removeEventListener("mouseup",this.mouseup,!1),this.mousemove=this._mousemove.bind(this),this.mouseup=this._mouseup.bind(this),this.touchCapable&&(document.addEventListener("touchmove",this.mousemove,!1),document.addEventListener("touchend",this.mouseup,!1)),document.addEventListener("mousemove",this.mousemove,!1),document.addEventListener("mouseup",this.mouseup,!1),this._state.inDrag=!0;var e=this._calculateValue();return this._trigger("slideStart",e),this._setDataVal(e),this.setValue(e,!1,!0),a.returnValue=!1,this.options.focus&&this._triggerFocusOnHandle(this._state.dragged),!0},_touchstart:function(a){if(void 0===a.changedTouches)return void this._mousedown(a);var b=a.changedTouches[0];this.touchX=b.pageX,this.touchY=b.pageY},_triggerFocusOnHandle:function(a){0===a&&this.handle1.focus(),1===a&&this.handle2.focus()},_keydown:function(a,b){if(!this._state.enabled)return!1;var c;switch(b.keyCode){case 37:case 40:c=-1;break;case 39:case 38:c=1}if(c){if(this.options.natural_arrow_keys){var d="vertical"===this.options.orientation&&!this.options.reversed,e="horizontal"===this.options.orientation&&this.options.reversed;(d||e)&&(c=-c)}var f=this._state.value[a]+c*this.options.step,g=f/this.options.max*100;if(this._state.keyCtrl=a,this.options.range){this._adjustPercentageForRangeSliders(g);var h=this._state.keyCtrl?this._state.value[0]:f,i=this._state.keyCtrl?f:this._state.value[1];f=[h,i]}return this._trigger("slideStart",f),this._setDataVal(f),this.setValue(f,!0,!0),this._setDataVal(f),this._trigger("slideStop",f),this._layout(),this._pauseEvent(b),delete this._state.keyCtrl,!1}},_pauseEvent:function(a){a.stopPropagation&&a.stopPropagation(),a.preventDefault&&a.preventDefault(),a.cancelBubble=!0,a.returnValue=!1},_mousemove:function(a){if(!this._state.enabled)return!1;var b=this._getPercentage(a);this._adjustPercentageForRangeSliders(b),this._state.percentage[this._state.dragged]=b,this._layout();var c=this._calculateValue(!0);return this.setValue(c,!0,!0),!1},_touchmove:function(a){if(void 0!==a.changedTouches){var b=a.changedTouches[0],c=b.pageX-this.touchX,d=b.pageY-this.touchY;this._state.inDrag||("vertical"===this.options.orientation&&5>=c&&c>=-5&&(d>=15||-15>=d)?this._mousedown(a):5>=d&&d>=-5&&(c>=15||-15>=c)&&this._mousedown(a))}},_adjustPercentageForRangeSliders:function(a){if(this.options.range){var b=this._getNumDigitsAfterDecimalPlace(a);b=b?b-1:0;var c=this._applyToFixedAndParseFloat(a,b);0===this._state.dragged&&this._applyToFixedAndParseFloat(this._state.percentage[1],b)c?(this._state.percentage[1]=this._state.percentage[0],this._state.dragged=0):0===this._state.keyCtrl&&this._state.value[1]/this.options.max*100a&&(this._state.percentage[1]=this._state.percentage[0],this._state.keyCtrl=0,this.handle1.focus())}},_mouseup:function(){if(!this._state.enabled)return!1;this.touchCapable&&(document.removeEventListener("touchmove",this.mousemove,!1),document.removeEventListener("touchend",this.mouseup,!1)),document.removeEventListener("mousemove",this.mousemove,!1),document.removeEventListener("mouseup",this.mouseup,!1),this._state.inDrag=!1,this._state.over===!1&&this._hideTooltip();var a=this._calculateValue(!0);return this._layout(),this._setDataVal(a),this._trigger("slideStop",a),!1},_calculateValue:function(a){var b;if(this.options.range?(b=[this.options.min,this.options.max],0!==this._state.percentage[0]&&(b[0]=this._toValue(this._state.percentage[0]),b[0]=this._applyPrecision(b[0])),100!==this._state.percentage[1]&&(b[1]=this._toValue(this._state.percentage[1]),b[1]=this._applyPrecision(b[1]))):(b=this._toValue(this._state.percentage[0]),b=parseFloat(b),b=this._applyPrecision(b)),a){for(var c=[b,1/0],d=0;dd;d++)g[c][d]=!1;for(c=0;ce;e++)g[c+e][k]=!0;for(e=0;j>e;e++)g[c][k+e]=!0}},g=function(){if(null===b){var c,d,e=a("

").addClass("fixed-table-scroll-inner"),f=a("

").addClass("fixed-table-scroll-outer");f.append(e),a("body").append(f),c=e[0].offsetWidth,f.css("overflow","scroll"),d=e[0].offsetWidth,c===d&&(d=f[0].clientWidth),f.remove(),b=c-d}return b},h=function(b,d,e,f){var g=d;if("string"==typeof d){var h=d.split(".");h.length>1?(g=window,a.each(h,function(a,b){g=g[b]})):g=window[d]}return"object"==typeof g?g:"function"==typeof g?g.apply(b,e):!g&&"string"==typeof d&&c.apply(this,[d].concat(e))?c.apply(this,[d].concat(e)):f},i=function(b,c,d){var e=Object.getOwnPropertyNames(b),f=Object.getOwnPropertyNames(c),g="";if(d&&e.length!==f.length)return!1;for(var h=0;h-1&&b[g]!==c[g])return!1;return!0},j=function(a){return"string"==typeof a?a.replace(/&/g,"&").replace(//g,">").replace(/"/g,""").replace(/'/g,"'").replace(/`/g,"`"):a},k=function(b){var c=0;return b.children().each(function(){c0||navigator.userAgent.match(/Trident.*rv\:11\./))},o=function(){Object.keys||(Object.keys=function(){var a=Object.prototype.hasOwnProperty,b=!{toString:null}.propertyIsEnumerable("toString"),c=["toString","toLocaleString","valueOf","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","constructor"],d=c.length;return function(e){if("object"!=typeof e&&("function"!=typeof e||null===e))throw new TypeError("Object.keys called on non-object");var f,g,h=[];for(f in e)a.call(e,f)&&h.push(f);if(b)for(g=0;d>g;g++)a.call(e,c[g])&&h.push(c[g]);return h}}())},p=function(b,c){this.options=c,this.$el=a(b),this.$el_=this.$el.clone(),this.timeoutId_=0,this.timeoutFooter_=0,this.init()};p.DEFAULTS={classes:"table table-hover",locale:void 0,height:void 0,undefinedText:"-",sortName:void 0,sortOrder:"asc",sortStable:!1,striped:!1,columns:[[]],data:[],dataField:"rows",method:"get",url:void 0,ajax:void 0,cache:!0,contentType:"application/json",dataType:"json",ajaxOptions:{},queryParams:function(a){return a},queryParamsType:"limit",responseHandler:function(a){return a},pagination:!1,onlyInfoPagination:!1,sidePagination:"client",totalRows:0,pageNumber:1,pageSize:10,pageList:[10,25,50,100],paginationHAlign:"right",paginationVAlign:"bottom",paginationDetailHAlign:"left",paginationPreText:"‹",paginationNextText:"›",search:!1,searchOnEnterKey:!1,strictSearch:!1,searchAlign:"right",selectItemName:"btSelectItem",showHeader:!0,showFooter:!1,showColumns:!1,showPaginationSwitch:!1,showRefresh:!1,showToggle:!1,buttonsAlign:"right",smartDisplay:!0,escape:!1,minimumCountColumns:1,idField:void 0,uniqueId:void 0,cardView:!1,detailView:!1,detailFormatter:function(){return""},trimOnSearch:!0,clickToSelect:!1,singleSelect:!1,toolbar:void 0,toolbarAlign:"left",checkboxHeader:!0,sortable:!0,silentSort:!0,maintainSelected:!1,searchTimeOut:500,searchText:"",iconSize:void 0,buttonsClass:"default",iconsPrefix:"glyphicon",icons:{paginationSwitchDown:"glyphicon-collapse-down icon-chevron-down",paginationSwitchUp:"glyphicon-collapse-up icon-chevron-up",refresh:"glyphicon-refresh icon-refresh",toggle:"glyphicon-list-alt icon-list-alt",columns:"glyphicon-th icon-th",detailOpen:"glyphicon-plus icon-plus",detailClose:"glyphicon-minus icon-minus"},customSearch:a.noop,customSort:a.noop,rowStyle:function(){return{}},rowAttributes:function(){return{}},footerStyle:function(){return{}},onAll:function(){return!1},onClickCell:function(){return!1},onDblClickCell:function(){return!1},onClickRow:function(){return!1},onDblClickRow:function(){return!1},onSort:function(){return!1},onCheck:function(){return!1},onUncheck:function(){return!1},onCheckAll:function(){return!1},onUncheckAll:function(){return!1},onCheckSome:function(){return!1},onUncheckSome:function(){return!1},onLoadSuccess:function(){return!1},onLoadError:function(){return!1},onColumnSwitch:function(){return!1},onPageChange:function(){return!1},onSearch:function(){return!1},onToggle:function(){return!1},onPreBody:function(){return!1},onPostBody:function(){return!1},onPostHeader:function(){return!1},onExpandRow:function(){return!1},onCollapseRow:function(){return!1},onRefreshOptions:function(){return!1},onRefresh:function(){return!1},onResetView:function(){return!1}},p.LOCALES={},p.LOCALES["en-US"]=p.LOCALES.en={formatLoadingMessage:function(){return"Loading, please wait..."},formatRecordsPerPage:function(a){return c("%s rows per page",a)},formatShowingRows:function(a,b,d){return c("Showing %s to %s of %s rows",a,b,d)},formatDetailPagination:function(a){return c("Showing %s rows",a)},formatSearch:function(){return"Search"},formatNoMatches:function(){return"No matching records found"},formatPaginationSwitch:function(){return"Hide/Show pagination"},formatRefresh:function(){return"Refresh"},formatToggle:function(){return"Toggle"},formatColumns:function(){return"Columns"},formatAllRows:function(){return"All"}},a.extend(p.DEFAULTS,p.LOCALES["en-US"]),p.COLUMN_DEFAULTS={radio:!1,checkbox:!1,checkboxEnabled:!0,field:void 0,title:void 0,titleTooltip:void 0,"class":void 0,align:void 0,halign:void 0,falign:void 0,valign:void 0,width:void 0,sortable:!1,order:"asc",visible:!0,switchable:!0,clickToSelect:!0,formatter:void 0,footerFormatter:void 0,events:void 0,sorter:void 0,sortName:void 0,cellStyle:void 0,searchable:!0,searchFormatter:!0,cardVisible:!0},p.EVENTS={"all.bs.table":"onAll","click-cell.bs.table":"onClickCell","dbl-click-cell.bs.table":"onDblClickCell","click-row.bs.table":"onClickRow","dbl-click-row.bs.table":"onDblClickRow","sort.bs.table":"onSort","check.bs.table":"onCheck","uncheck.bs.table":"onUncheck","check-all.bs.table":"onCheckAll","uncheck-all.bs.table":"onUncheckAll","check-some.bs.table":"onCheckSome","uncheck-some.bs.table":"onUncheckSome","load-success.bs.table":"onLoadSuccess","load-error.bs.table":"onLoadError","column-switch.bs.table":"onColumnSwitch","page-change.bs.table":"onPageChange","search.bs.table":"onSearch","toggle.bs.table":"onToggle","pre-body.bs.table":"onPreBody","post-body.bs.table":"onPostBody","post-header.bs.table":"onPostHeader","expand-row.bs.table":"onExpandRow","collapse-row.bs.table":"onCollapseRow","refresh-options.bs.table":"onRefreshOptions","reset-view.bs.table":"onResetView","refresh.bs.table":"onRefresh"},p.prototype.init=function(){this.initLocale(),this.initContainer(),this.initTable(),this.initHeader(),this.initData(),this.initFooter(),this.initToolbar(),this.initPagination(),this.initBody(),this.initSearchText(),this.initServer()},p.prototype.initLocale=function(){if(this.options.locale){var b=this.options.locale.split(/-|_/);b[0].toLowerCase(),b[1]&&b[1].toUpperCase(),a.fn.bootstrapTable.locales[this.options.locale]?a.extend(this.options,a.fn.bootstrapTable.locales[this.options.locale]):a.fn.bootstrapTable.locales[b.join("-")]?a.extend(this.options,a.fn.bootstrapTable.locales[b.join("-")]):a.fn.bootstrapTable.locales[b[0]]&&a.extend(this.options,a.fn.bootstrapTable.locales[b[0]])}},p.prototype.initContainer=function(){this.$container=a(['
','
',"top"===this.options.paginationVAlign||"both"===this.options.paginationVAlign?'
':"",'
','
','
','
',this.options.formatLoadingMessage(),"
","
",'',"bottom"===this.options.paginationVAlign||"both"===this.options.paginationVAlign?'
':"","
","
"].join("")),this.$container.insertAfter(this.$el),this.$tableContainer=this.$container.find(".fixed-table-container"),this.$tableHeader=this.$container.find(".fixed-table-header"),this.$tableBody=this.$container.find(".fixed-table-body"),this.$tableLoading=this.$container.find(".fixed-table-loading"),this.$tableFooter=this.$container.find(".fixed-table-footer"),this.$toolbar=this.$container.find(".fixed-table-toolbar"),this.$pagination=this.$container.find(".fixed-table-pagination"),this.$tableBody.append(this.$el),this.$container.after('
'),this.$el.addClass(this.options.classes),this.options.striped&&this.$el.addClass("table-striped"),-1!==a.inArray("table-no-bordered",this.options.classes.split(" "))&&this.$tableContainer.addClass("table-no-bordered")},p.prototype.initTable=function(){var b=this,c=[],d=[];if(this.$header=this.$el.find(">thead"),this.$header.length||(this.$header=a("").appendTo(this.$el)),this.$header.find("tr").each(function(){var b=[];a(this).find("th").each(function(){"undefined"!=typeof a(this).data("field")&&a(this).data("field",a(this).data("field")+""),b.push(a.extend({},{title:a(this).html(),"class":a(this).attr("class"),titleTooltip:a(this).attr("title"),rowspan:a(this).attr("rowspan")?+a(this).attr("rowspan"):void 0,colspan:a(this).attr("colspan")?+a(this).attr("colspan"):void 0},a(this).data()))}),c.push(b)}),a.isArray(this.options.columns[0])||(this.options.columns=[this.options.columns]),this.options.columns=a.extend(!0,[],c,this.options.columns),this.columns=[],f(this.options.columns),a.each(this.options.columns,function(c,d){a.each(d,function(d,e){e=a.extend({},p.COLUMN_DEFAULTS,e),"undefined"!=typeof e.fieldIndex&&(b.columns[e.fieldIndex]=e),b.options.columns[c][d]=e})}),!this.options.data.length){var e=[];this.$el.find(">tbody>tr").each(function(c){var f={};f._id=a(this).attr("id"),f._class=a(this).attr("class"),f._data=l(a(this).data()),a(this).find(">td").each(function(d){for(var g,h,i=a(this),j=+i.attr("colspan")||1,k=+i.attr("rowspan")||1;e[c]&&e[c][d];d++);for(g=d;d+j>g;g++)for(h=c;c+k>h;h++)e[h]||(e[h]=[]),e[h][g]=!0;var m=b.columns[d].field;f[m]=a(this).html(),f["_"+m+"_id"]=a(this).attr("id"),f["_"+m+"_class"]=a(this).attr("class"),f["_"+m+"_rowspan"]=a(this).attr("rowspan"),f["_"+m+"_colspan"]=a(this).attr("colspan"),f["_"+m+"_title"]=a(this).attr("title"),f["_"+m+"_data"]=l(a(this).data())}),d.push(f)}),this.options.data=d,d.length&&(this.fromHtml=!0)}},p.prototype.initHeader=function(){var b=this,d={},e=[];this.header={fields:[],styles:[],classes:[],formatters:[],events:[],sorters:[],sortNames:[],cellStyles:[],searchables:[]},a.each(this.options.columns,function(f,g){e.push(""),0===f&&!b.options.cardView&&b.options.detailView&&e.push(c('
',b.options.columns.length)),a.each(g,function(a,f){var g="",h="",i="",j="",k=c(' class="%s"',f["class"]),l=(b.options.sortOrder||f.order,"px"),m=f.width;if(void 0===f.width||b.options.cardView||"string"==typeof f.width&&-1!==f.width.indexOf("%")&&(l="%"),f.width&&"string"==typeof f.width&&(m=f.width.replace("%","").replace("px","")),h=c("text-align: %s; ",f.halign?f.halign:f.align),i=c("text-align: %s; ",f.align),j=c("vertical-align: %s; ",f.valign),j+=c("width: %s; ",!f.checkbox&&!f.radio||m?m?m+l:void 0:"36px"),"undefined"!=typeof f.fieldIndex){if(b.header.fields[f.fieldIndex]=f.field,b.header.styles[f.fieldIndex]=i+j,b.header.classes[f.fieldIndex]=k,b.header.formatters[f.fieldIndex]=f.formatter,b.header.events[f.fieldIndex]=f.events,b.header.sorters[f.fieldIndex]=f.sorter,b.header.sortNames[f.fieldIndex]=f.sortName,b.header.cellStyles[f.fieldIndex]=f.cellStyle,b.header.searchables[f.fieldIndex]=f.searchable,!f.visible)return;if(b.options.cardView&&!f.cardVisible)return;d[f.field]=f}e.push(""),e.push(c('
',b.options.sortable&&f.sortable?"sortable both":"")),g=f.title,f.checkbox&&(!b.options.singleSelect&&b.options.checkboxHeader&&(g=''),b.header.stateField=f.field),f.radio&&(g="",b.header.stateField=f.field,b.options.singleSelect=!0),e.push(g),e.push("
"),e.push('
'),e.push("
"),e.push("")}),e.push("")}),this.$header.html(e.join("")),this.$header.find("th[data-field]").each(function(){a(this).data(d[a(this).data("field")])}),this.$container.off("click",".th-inner").on("click",".th-inner",function(c){var d=a(this);return b.options.detailView&&d.closest(".bootstrap-table")[0]!==b.$container[0]?!1:void(b.options.sortable&&d.parent().data().sortable&&b.onSort(c))}),this.$header.children().children().off("keypress").on("keypress",function(c){if(b.options.sortable&&a(this).data().sortable){var d=c.keyCode||c.which;13==d&&b.onSort(c)}}),a(window).off("resize.bootstrap-table"),!this.options.showHeader||this.options.cardView?(this.$header.hide(),this.$tableHeader.hide(),this.$tableLoading.css("top",0)):(this.$header.show(),this.$tableHeader.show(),this.$tableLoading.css("top",this.$header.outerHeight()+1),this.getCaret(),a(window).on("resize.bootstrap-table",a.proxy(this.resetWidth,this))),this.$selectAll=this.$header.find('[name="btSelectAll"]'),this.$selectAll.off("click").on("click",function(){var c=a(this).prop("checked");b[c?"checkAll":"uncheckAll"](),b.updateSelected()})},p.prototype.initFooter=function(){!this.options.showFooter||this.options.cardView?this.$tableFooter.hide():this.$tableFooter.show()},p.prototype.initData=function(a,b){this.data="append"===b?this.data.concat(a):"prepend"===b?[].concat(a).concat(this.data):a||this.options.data,this.options.data="append"===b?this.options.data.concat(a):"prepend"===b?[].concat(a).concat(this.options.data):this.data,"server"!==this.options.sidePagination&&this.initSort()},p.prototype.initSort=function(){var b=this,c=this.options.sortName,d="desc"===this.options.sortOrder?-1:1,e=a.inArray(this.options.sortName,this.header.fields);return this.options.customSort!==a.noop?void this.options.customSort.apply(this,[this.options.sortName,this.options.sortOrder]):void(-1!==e&&(this.options.sortStable&&a.each(this.data,function(a,b){b.hasOwnProperty("_position")||(b._position=a)}),this.data.sort(function(f,g){b.header.sortNames[e]&&(c=b.header.sortNames[e]);var i=m(f,c,b.options.escape),j=m(g,c,b.options.escape),k=h(b.header,b.header.sorters[e],[i,j]);return void 0!==k?d*k:((void 0===i||null===i)&&(i=""),(void 0===j||null===j)&&(j=""),b.options.sortStable&&i===j&&(i=f._position,j=g._position),a.isNumeric(i)&&a.isNumeric(j)?(i=parseFloat(i),j=parseFloat(j),j>i?-1*d:d):i===j?0:("string"!=typeof i&&(i=i.toString()),-1===i.localeCompare(j)?-1*d:d))})))},p.prototype.onSort=function(b){var c="keypress"===b.type?a(b.currentTarget):a(b.currentTarget).parent(),d=this.$header.find("th").eq(c.index());return this.$header.add(this.$header_).find("span.order").remove(),this.options.sortName===c.data("field")?this.options.sortOrder="asc"===this.options.sortOrder?"desc":"asc":(this.options.sortName=c.data("field"),this.options.sortOrder="asc"===c.data("order")?"desc":"asc"),this.trigger("sort",this.options.sortName,this.options.sortOrder),c.add(d).data("order",this.options.sortOrder),this.getCaret(),"server"===this.options.sidePagination?void this.initServer(this.options.silentSort):(this.initSort(),void this.initBody())},p.prototype.initToolbar=function(){var b,d,e=this,f=[],g=0,i=0;this.$toolbar.find(".bs-bars").children().length&&a("body").append(a(this.options.toolbar)),this.$toolbar.html(""),("string"==typeof this.options.toolbar||"object"==typeof this.options.toolbar)&&a(c('
',this.options.toolbarAlign)).appendTo(this.$toolbar).append(a(this.options.toolbar)),f=[c('
',this.options.buttonsAlign,this.options.buttonsAlign)],"string"==typeof this.options.icons&&(this.options.icons=h(null,this.options.icons)),this.options.showPaginationSwitch&&f.push(c('"),this.options.showRefresh&&f.push(c('"),this.options.showToggle&&f.push(c('"),this.options.showColumns&&(f.push(c('
',this.options.formatColumns()),'",'","
")),f.push("
"),(this.showToolbar||f.length>2)&&this.$toolbar.append(f.join("")),this.options.showPaginationSwitch&&this.$toolbar.find('button[name="paginationSwitch"]').off("click").on("click",a.proxy(this.togglePagination,this)),this.options.showRefresh&&this.$toolbar.find('button[name="refresh"]').off("click").on("click",a.proxy(this.refresh,this)),this.options.showToggle&&this.$toolbar.find('button[name="toggle"]').off("click").on("click",function(){e.toggleView()}),this.options.showColumns&&(b=this.$toolbar.find(".keep-open"),i<=this.options.minimumCountColumns&&b.find("input").prop("disabled",!0),b.find("li").off("click").on("click",function(a){a.stopImmediatePropagation()}),b.find("input").off("click").on("click",function(){var b=a(this);e.toggleColumn(a(this).val(),b.prop("checked"),!1),e.trigger("column-switch",a(this).data("field"),b.prop("checked"))})),this.options.search&&(f=[],f.push('"),this.$toolbar.append(f.join("")),d=this.$toolbar.find(".search input"),d.off("keyup drop").on("keyup drop",function(b){e.options.searchOnEnterKey&&13!==b.keyCode||a.inArray(b.keyCode,[37,38,39,40])>-1||(clearTimeout(g),g=setTimeout(function(){e.onSearch(b)},e.options.searchTimeOut))}),n()&&d.off("mouseup").on("mouseup",function(a){clearTimeout(g),g=setTimeout(function(){e.onSearch(a)},e.options.searchTimeOut)}))},p.prototype.onSearch=function(b){var c=a.trim(a(b.currentTarget).val());this.options.trimOnSearch&&a(b.currentTarget).val()!==c&&a(b.currentTarget).val(c),c!==this.searchText&&(this.searchText=c,this.options.searchText=c,this.options.pageNumber=1,this.initSearch(),this.updatePagination(),this.trigger("search",c))},p.prototype.initSearch=function(){var b=this;if("server"!==this.options.sidePagination){if(this.options.customSearch!==a.noop)return void this.options.customSearch.apply(this,[this.searchText]);var c=this.searchText&&(this.options.escape?j(this.searchText):this.searchText).toLowerCase(),d=a.isEmptyObject(this.filterColumns)?null:this.filterColumns;this.data=d?a.grep(this.options.data,function(b){for(var c in d)if(a.isArray(d[c])&&-1===a.inArray(b[c],d[c])||b[c]!==d[c])return!1;return!0}):this.options.data,this.data=c?a.grep(this.data,function(d,f){for(var g=0;g-1&&(n=!0)}this.totalPages=~~((this.options.totalRows-1)/this.options.pageSize)+1,this.options.totalPages=this.totalPages}if(this.totalPages>0&&this.options.pageNumber>this.totalPages&&(this.options.pageNumber=this.totalPages),this.pageFrom=(this.options.pageNumber-1)*this.options.pageSize+1,this.pageTo=this.options.pageNumber*this.options.pageSize,this.pageTo>this.options.totalRows&&(this.pageTo=this.options.totalRows),m.push('
','',this.options.onlyInfoPagination?this.options.formatDetailPagination(this.options.totalRows):this.options.formatShowingRows(this.pageFrom,this.pageTo,this.options.totalRows),""),!this.options.onlyInfoPagination){m.push('');var r=[c('',"top"===this.options.paginationVAlign||"both"===this.options.paginationVAlign?"dropdown":"dropup"),'",'"),m.push(this.options.formatRecordsPerPage(r.join(""))),m.push(""),m.push("
",'")}this.$pagination.html(m.join("")),this.options.onlyInfoPagination||(f=this.$pagination.find(".page-list a"),g=this.$pagination.find(".page-first"),h=this.$pagination.find(".page-pre"),i=this.$pagination.find(".page-next"),j=this.$pagination.find(".page-last"),k=this.$pagination.find(".page-number"),this.options.smartDisplay&&(this.totalPages<=1&&this.$pagination.find("div.pagination").hide(),(p.length<2||this.options.totalRows<=p[0])&&this.$pagination.find("span.page-list").hide(),this.$pagination[this.getData().length?"show":"hide"]()),n&&(this.options.pageSize=this.options.formatAllRows()),f.off("click").on("click",a.proxy(this.onPageListChange,this)),g.off("click").on("click",a.proxy(this.onPageFirst,this)),h.off("click").on("click",a.proxy(this.onPagePre,this)),i.off("click").on("click",a.proxy(this.onPageNext,this)),j.off("click").on("click",a.proxy(this.onPageLast,this)),k.off("click").on("click",a.proxy(this.onPageNumber,this)))},p.prototype.updatePagination=function(b){b&&a(b.currentTarget).hasClass("disabled")||(this.options.maintainSelected||this.resetRows(),this.initPagination(),"server"===this.options.sidePagination?this.initServer():this.initBody(),this.trigger("page-change",this.options.pageNumber,this.options.pageSize))},p.prototype.onPageListChange=function(b){var c=a(b.currentTarget);c.parent().addClass("active").siblings().removeClass("active"),this.options.pageSize=c.text().toUpperCase()===this.options.formatAllRows().toUpperCase()?this.options.formatAllRows():+c.text(),this.$toolbar.find(".page-size").text(this.options.pageSize),this.updatePagination(b)},p.prototype.onPageFirst=function(a){this.options.pageNumber=1,this.updatePagination(a)},p.prototype.onPagePre=function(a){this.options.pageNumber-1===0?this.options.pageNumber=this.options.totalPages:this.options.pageNumber--,this.updatePagination(a)},p.prototype.onPageNext=function(a){this.options.pageNumber+1>this.options.totalPages?this.options.pageNumber=1:this.options.pageNumber++,this.updatePagination(a)},p.prototype.onPageLast=function(a){this.options.pageNumber=this.totalPages,this.updatePagination(a)},p.prototype.onPageNumber=function(b){this.options.pageNumber!==+a(b.currentTarget).text()&&(this.options.pageNumber=+a(b.currentTarget).text(),this.updatePagination(b))},p.prototype.initBody=function(b){var f=this,g=[],i=this.getData();this.trigger("pre-body",i),this.$body=this.$el.find(">tbody"),this.$body.length||(this.$body=a("").appendTo(this.$el)),this.options.pagination&&"server"!==this.options.sidePagination||(this.pageFrom=1,this.pageTo=i.length);for(var k=this.pageFrom-1;k"),this.options.cardView&&g.push(c('
',this.header.fields.length)),!this.options.cardView&&this.options.detailView&&g.push("",'',c('',this.options.iconsPrefix,this.options.icons.detailOpen),"",""),a.each(this.header.fields,function(b,e){var i="",j=m(n,e,f.options.escape),l="",q={},r="",s=f.header.classes[b],t="",u="",v="",w="",x=f.columns[b];if(!(f.fromHtml&&"undefined"==typeof j||!x.visible||f.options.cardView&&!x.cardVisible)){if(o=c('style="%s"',p.concat(f.header.styles[b]).join("; ")),n["_"+e+"_id"]&&(r=c(' id="%s"',n["_"+e+"_id"])),n["_"+e+"_class"]&&(s=c(' class="%s"',n["_"+e+"_class"])),n["_"+e+"_rowspan"]&&(u=c(' rowspan="%s"',n["_"+e+"_rowspan"])),n["_"+e+"_colspan"]&&(v=c(' colspan="%s"',n["_"+e+"_colspan"])),n["_"+e+"_title"]&&(w=c(' title="%s"',n["_"+e+"_title"])),q=h(f.header,f.header.cellStyles[b],[j,n,k,e],q),q.classes&&(s=c(' class="%s"',q.classes)),q.css){var y=[];for(var z in q.css)y.push(z+": "+q.css[z]);o=c('style="%s"',y.concat(f.header.styles[b]).join("; "))}j=h(x,f.header.formatters[b],[j,n,k],j),n["_"+e+"_data"]&&!a.isEmptyObject(n["_"+e+"_data"])&&a.each(n["_"+e+"_data"],function(a,b){"index"!==a&&(t+=c(' data-%s="%s"',a,b))}),x.checkbox||x.radio?(l=x.checkbox?"checkbox":l,l=x.radio?"radio":l,i=[c(f.options.cardView?'
':'',x["class"]||""),"",f.header.formatters[b]&&"string"==typeof j?j:"",f.options.cardView?"
":""].join(""),n[f.header.stateField]=j===!0||j&&j.checked):(j="undefined"==typeof j||null===j?f.options.undefinedText:j,i=f.options.cardView?['
',f.options.showHeader?c('%s',o,d(f.columns,"field","title",e)):"",c('%s',j),"
"].join(""):[c("",r,s,o,t,u,v,w),j,""].join(""),f.options.cardView&&f.options.smartDisplay&&""===j&&(i='
')),g.push(i)}}),this.options.cardView&&g.push("
"),g.push("")}g.length||g.push('',c('%s',this.$header.find("th").length,this.options.formatNoMatches()),""),this.$body.html(g.join("")),b||this.scrollTo(0),this.$body.find("> tr[data-index] > td").off("click dblclick").on("click dblclick",function(b){var d=a(this),g=d.parent(),h=f.data[g.data("index")],i=d[0].cellIndex,j=f.getVisibleFields(),k=j[f.options.detailView&&!f.options.cardView?i-1:i],l=f.columns[e(f.columns,k)],n=m(h,k,f.options.escape);if(!d.find(".detail-icon").length&&(f.trigger("click"===b.type?"click-cell":"dbl-click-cell",k,n,h,d),f.trigger("click"===b.type?"click-row":"dbl-click-row",h,g,k), +"click"===b.type&&f.options.clickToSelect&&l.clickToSelect)){var o=g.find(c('[name="%s"]',f.options.selectItemName));o.length&&o[0].click()}}),this.$body.find("> tr[data-index] > td > .detail-icon").off("click").on("click",function(){var b=a(this),d=b.parent().parent(),e=d.data("index"),g=i[e];if(d.next().is("tr.detail-view"))b.find("i").attr("class",c("%s %s",f.options.iconsPrefix,f.options.icons.detailOpen)),d.next().remove(),f.trigger("collapse-row",e,g);else{b.find("i").attr("class",c("%s %s",f.options.iconsPrefix,f.options.icons.detailClose)),d.after(c('',d.find("td").length));var j=d.next().find("td"),k=h(f.options,f.options.detailFormatter,[e,g,j],"");1===j.length&&j.append(k),f.trigger("expand-row",e,g,j)}f.resetView()}),this.$selectItem=this.$body.find(c('[name="%s"]',this.options.selectItemName)),this.$selectItem.off("click").on("click",function(b){b.stopImmediatePropagation();var c=a(this),d=c.prop("checked"),e=f.data[c.data("index")];f.options.maintainSelected&&a(this).is(":radio")&&a.each(f.options.data,function(a,b){b[f.header.stateField]=!1}),e[f.header.stateField]=d,f.options.singleSelect&&(f.$selectItem.not(this).each(function(){f.data[a(this).data("index")][f.header.stateField]=!1}),f.$selectItem.filter(":checked").not(this).prop("checked",!1)),f.updateSelected(),f.trigger(d?"check":"uncheck",e,c)}),a.each(this.header.events,function(b,c){if(c){"string"==typeof c&&(c=h(null,c));var d=f.header.fields[b],e=a.inArray(d,f.getVisibleFields());f.options.detailView&&!f.options.cardView&&(e+=1);for(var g in c)f.$body.find(">tr:not(.no-records-found)").each(function(){var b=a(this),h=b.find(f.options.cardView?".card-view":"td").eq(e),i=g.indexOf(" "),j=g.substring(0,i),k=g.substring(i+1),l=c[g];h.find(k).off(j).on(j,function(a){var c=b.data("index"),e=f.data[c],g=e[d];l.apply(this,[a,g,e,c])})})}}),this.updateSelected(),this.resetView(),this.trigger("post-body",i)},p.prototype.initServer=function(b,c,d){var e,f=this,g={},i={searchText:this.searchText,sortName:this.options.sortName,sortOrder:this.options.sortOrder};this.options.pagination&&(i.pageSize=this.options.pageSize===this.options.formatAllRows()?this.options.totalRows:this.options.pageSize,i.pageNumber=this.options.pageNumber),(d||this.options.url||this.options.ajax)&&("limit"===this.options.queryParamsType&&(i={search:i.searchText,sort:i.sortName,order:i.sortOrder},this.options.pagination&&(i.offset=this.options.pageSize===this.options.formatAllRows()?0:this.options.pageSize*(this.options.pageNumber-1),i.limit=this.options.pageSize===this.options.formatAllRows()?this.options.totalRows:this.options.pageSize)),a.isEmptyObject(this.filterColumnsPartial)||(i.filter=JSON.stringify(this.filterColumnsPartial,null)),g=h(this.options,this.options.queryParams,[i],g),a.extend(g,c||{}),g!==!1&&(b||this.$tableLoading.show(),e=a.extend({},h(null,this.options.ajaxOptions),{type:this.options.method,url:d||this.options.url,data:"application/json"===this.options.contentType&&"post"===this.options.method?JSON.stringify(g):g,cache:this.options.cache,contentType:this.options.contentType,dataType:this.options.dataType,success:function(a){a=h(f.options,f.options.responseHandler,[a],a),f.load(a),f.trigger("load-success",a),b||f.$tableLoading.hide()},error:function(a){f.trigger("load-error",a.status,a),b||f.$tableLoading.hide()}}),this.options.ajax?h(this,this.options.ajax,[e],null):(this._xhr&&4!==this._xhr.readyState&&this._xhr.abort(),this._xhr=a.ajax(e))))},p.prototype.initSearchText=function(){if(this.options.search&&""!==this.options.searchText){var a=this.$toolbar.find(".search input");a.val(this.options.searchText),this.onSearch({currentTarget:a})}},p.prototype.getCaret=function(){var b=this;a.each(this.$header.find("th"),function(c,d){a(d).find(".sortable").removeClass("desc asc").addClass(a(d).data("field")===b.options.sortName?b.options.sortOrder:"both")})},p.prototype.updateSelected=function(){var b=this.$selectItem.filter(":enabled").length&&this.$selectItem.filter(":enabled").length===this.$selectItem.filter(":enabled").filter(":checked").length;this.$selectAll.add(this.$selectAll_).prop("checked",b),this.$selectItem.each(function(){a(this).closest("tr")[a(this).prop("checked")?"addClass":"removeClass"]("selected")})},p.prototype.updateRows=function(){var b=this;this.$selectItem.each(function(){b.data[a(this).data("index")][b.header.stateField]=a(this).prop("checked")})},p.prototype.resetRows=function(){var b=this;a.each(this.data,function(a,c){b.$selectAll.prop("checked",!1),b.$selectItem.prop("checked",!1),b.header.stateField&&(c[b.header.stateField]=!1)})},p.prototype.trigger=function(b){var c=Array.prototype.slice.call(arguments,1);b+=".bs.table",this.options[p.EVENTS[b]].apply(this.options,c),this.$el.trigger(a.Event(b),c),this.options.onAll(b,c),this.$el.trigger(a.Event("all.bs.table"),[b,c])},p.prototype.resetHeader=function(){clearTimeout(this.timeoutId_),this.timeoutId_=setTimeout(a.proxy(this.fitHeader,this),this.$el.is(":hidden")?100:0)},p.prototype.fitHeader=function(){var b,d,e,f,h=this;if(h.$el.is(":hidden"))return void(h.timeoutId_=setTimeout(a.proxy(h.fitHeader,h),100));if(b=this.$tableBody.get(0),d=b.scrollWidth>b.clientWidth&&b.scrollHeight>b.clientHeight+this.$header.outerHeight()?g():0,this.$el.css("margin-top",-this.$header.outerHeight()),e=a(":focus"),e.length>0){var i=e.parents("th");if(i.length>0){var j=i.attr("data-field");if(void 0!==j){var k=this.$header.find("[data-field='"+j+"']");k.length>0&&k.find(":input").addClass("focus-temp")}}}this.$header_=this.$header.clone(!0,!0),this.$selectAll_=this.$header_.find('[name="btSelectAll"]'),this.$tableHeader.css({"margin-right":d}).find("table").css("width",this.$el.outerWidth()).html("").attr("class",this.$el.attr("class")).append(this.$header_),f=a(".focus-temp:visible:eq(0)"),f.length>0&&(f.focus(),this.$header.find(".focus-temp").removeClass("focus-temp")),this.$header.find("th[data-field]").each(function(){h.$header_.find(c('th[data-field="%s"]',a(this).data("field"))).data(a(this).data())});var l=this.getVisibleFields(),m=this.$header_.find("th");this.$body.find(">tr:first-child:not(.no-records-found) > *").each(function(b){var d=a(this),e=b;h.options.detailView&&!h.options.cardView&&(0===b&&h.$header_.find("th.detail").find(".fht-cell").width(d.innerWidth()),e=b-1);var f=h.$header_.find(c('th[data-field="%s"]',l[e]));f.length>1&&(f=a(m[d[0].cellIndex])),f.find(".fht-cell").width(d.innerWidth())}),this.$tableBody.off("scroll").on("scroll",function(){h.$tableHeader.scrollLeft(a(this).scrollLeft()),h.options.showFooter&&!h.options.cardView&&h.$tableFooter.scrollLeft(a(this).scrollLeft())}),h.trigger("post-header")},p.prototype.resetFooter=function(){var b=this,d=b.getData(),e=[];this.options.showFooter&&!this.options.cardView&&(!this.options.cardView&&this.options.detailView&&e.push('
 
'),a.each(this.columns,function(a,f){var g,i="",j="",k=[],l={},m=c(' class="%s"',f["class"]);if(f.visible&&(!b.options.cardView||f.cardVisible)){if(i=c("text-align: %s; ",f.falign?f.falign:f.align),j=c("vertical-align: %s; ",f.valign),l=h(null,b.options.footerStyle),l&&l.css)for(g in l.css)k.push(g+": "+l.css[g]);e.push(""),e.push('
'),e.push(h(f,f.footerFormatter,[d]," ")||" "),e.push("
"),e.push('
'),e.push(""),e.push("")}}),this.$tableFooter.find("tr").html(e.join("")),this.$tableFooter.show(),clearTimeout(this.timeoutFooter_),this.timeoutFooter_=setTimeout(a.proxy(this.fitFooter,this),this.$el.is(":hidden")?100:0))},p.prototype.fitFooter=function(){var b,c,d;return clearTimeout(this.timeoutFooter_),this.$el.is(":hidden")?void(this.timeoutFooter_=setTimeout(a.proxy(this.fitFooter,this),100)):(c=this.$el.css("width"),d=c>this.$tableBody.width()?g():0,this.$tableFooter.css({"margin-right":d}).find("table").css("width",c).attr("class",this.$el.attr("class")),b=this.$tableFooter.find("td"),void this.$body.find(">tr:first-child:not(.no-records-found) > *").each(function(c){var d=a(this);b.eq(c).find(".fht-cell").width(d.innerWidth())}))},p.prototype.toggleColumn=function(a,b,d){if(-1!==a&&(this.columns[a].visible=b,this.initHeader(),this.initSearch(),this.initPagination(),this.initBody(),this.options.showColumns)){var e=this.$toolbar.find(".keep-open input").prop("disabled",!1);d&&e.filter(c('[value="%s"]',a)).prop("checked",b),e.filter(":checked").length<=this.options.minimumCountColumns&&e.filter(":checked").prop("disabled",!0)}},p.prototype.toggleRow=function(a,b,d){-1!==a&&this.$body.find("undefined"!=typeof a?c('tr[data-index="%s"]',a):c('tr[data-uniqueid="%s"]',b))[d?"show":"hide"]()},p.prototype.getVisibleFields=function(){var b=this,c=[];return a.each(this.header.fields,function(a,d){var f=b.columns[e(b.columns,d)];f.visible&&c.push(d)}),c},p.prototype.resetView=function(a){var b=0;if(a&&a.height&&(this.options.height=a.height),this.$selectAll.prop("checked",this.$selectItem.length>0&&this.$selectItem.length===this.$selectItem.filter(":checked").length),this.options.height){var c=k(this.$toolbar),d=k(this.$pagination),e=this.options.height-c-d;this.$tableContainer.css("height",e+"px")}return this.options.cardView?(this.$el.css("margin-top","0"),this.$tableContainer.css("padding-bottom","0"),void this.$tableFooter.hide()):(this.options.showHeader&&this.options.height?(this.$tableHeader.show(),this.resetHeader(),b+=this.$header.outerHeight()):(this.$tableHeader.hide(),this.trigger("post-header")),this.options.showFooter&&(this.resetFooter(),this.options.height&&(b+=this.$tableFooter.outerHeight()+1)),this.getCaret(),this.$tableContainer.css("padding-bottom",b+"px"),void this.trigger("reset-view"))},p.prototype.getData=function(b){return!this.searchText&&a.isEmptyObject(this.filterColumns)&&a.isEmptyObject(this.filterColumnsPartial)?b?this.options.data.slice(this.pageFrom-1,this.pageTo):this.options.data:b?this.data.slice(this.pageFrom-1,this.pageTo):this.data},p.prototype.load=function(b){var c=!1;"server"===this.options.sidePagination?(this.options.totalRows=b.total,c=b.fixedScroll,b=b[this.options.dataField]):a.isArray(b)||(c=b.fixedScroll,b=b.data),this.initData(b),this.initSearch(),this.initPagination(),this.initBody(c)},p.prototype.append=function(a){this.initData(a,"append"),this.initSearch(),this.initPagination(),this.initSort(),this.initBody(!0)},p.prototype.prepend=function(a){this.initData(a,"prepend"),this.initSearch(),this.initPagination(),this.initSort(),this.initBody(!0)},p.prototype.remove=function(b){var c,d,e=this.options.data.length;if(b.hasOwnProperty("field")&&b.hasOwnProperty("values")){for(c=e-1;c>=0;c--)d=this.options.data[c],d.hasOwnProperty(b.field)&&-1!==a.inArray(d[b.field],b.values)&&this.options.data.splice(c,1);e!==this.options.data.length&&(this.initSearch(),this.initPagination(),this.initSort(),this.initBody(!0))}},p.prototype.removeAll=function(){this.options.data.length>0&&(this.options.data.splice(0,this.options.data.length),this.initSearch(),this.initPagination(),this.initBody(!0))},p.prototype.getRowByUniqueId=function(a){var b,c,d,e=this.options.uniqueId,f=this.options.data.length,g=null;for(b=f-1;b>=0;b--){if(c=this.options.data[b],c.hasOwnProperty(e))d=c[e];else{if(!c._data.hasOwnProperty(e))continue;d=c._data[e]}if("string"==typeof d?a=a.toString():"number"==typeof d&&(Number(d)===d&&d%1===0?a=parseInt(a):d===Number(d)&&0!==d&&(a=parseFloat(a))),d===a){g=c;break}}return g},p.prototype.removeByUniqueId=function(a){var b=this.options.data.length,c=this.getRowByUniqueId(a);c&&this.options.data.splice(this.options.data.indexOf(c),1),b!==this.options.data.length&&(this.initSearch(),this.initPagination(),this.initBody(!0))},p.prototype.updateByUniqueId=function(b){var c=this,d=a.isArray(b)?b:[b];a.each(d,function(b,d){var e;d.hasOwnProperty("id")&&d.hasOwnProperty("row")&&(e=a.inArray(c.getRowByUniqueId(d.id),c.options.data),-1!==e&&a.extend(c.options.data[e],d.row))}),this.initSearch(),this.initSort(),this.initBody(!0)},p.prototype.insertRow=function(a){a.hasOwnProperty("index")&&a.hasOwnProperty("row")&&(this.data.splice(a.index,0,a.row),this.initSearch(),this.initPagination(),this.initSort(),this.initBody(!0))},p.prototype.updateRow=function(b){var c=this,d=a.isArray(b)?b:[b];a.each(d,function(b,d){d.hasOwnProperty("index")&&d.hasOwnProperty("row")&&a.extend(c.options.data[d.index],d.row)}),this.initSearch(),this.initSort(),this.initBody(!0)},p.prototype.showRow=function(a){(a.hasOwnProperty("index")||a.hasOwnProperty("uniqueId"))&&this.toggleRow(a.index,a.uniqueId,!0)},p.prototype.hideRow=function(a){(a.hasOwnProperty("index")||a.hasOwnProperty("uniqueId"))&&this.toggleRow(a.index,a.uniqueId,!1)},p.prototype.getRowsHidden=function(b){var c=a(this.$body[0]).children().filter(":hidden"),d=0;if(b)for(;dtr");if(this.options.detailView&&!this.options.cardView&&(g+=1),e=j.eq(f).find(">td").eq(g),!(0>f||0>g||f>=this.data.length)){for(c=f;f+h>c;c++)for(d=g;g+i>d;d++)j.eq(c).find(">td").eq(d).hide();e.attr("rowspan",h).attr("colspan",i).show()}},p.prototype.updateCell=function(a){a.hasOwnProperty("index")&&a.hasOwnProperty("field")&&a.hasOwnProperty("value")&&(this.data[a.index][a.field]=a.value,a.reinit!==!1&&(this.initSort(),this.initBody(!0)))},p.prototype.getOptions=function(){return this.options},p.prototype.getSelections=function(){var b=this;return a.grep(this.options.data,function(a){return a[b.header.stateField]})},p.prototype.getAllSelections=function(){var b=this;return a.grep(this.options.data,function(a){return a[b.header.stateField]})},p.prototype.checkAll=function(){this.checkAll_(!0)},p.prototype.uncheckAll=function(){this.checkAll_(!1)},p.prototype.checkInvert=function(){var b=this,c=b.$selectItem.filter(":enabled"),d=c.filter(":checked");c.each(function(){a(this).prop("checked",!a(this).prop("checked"))}),b.updateRows(),b.updateSelected(),b.trigger("uncheck-some",d),d=b.getSelections(),b.trigger("check-some",d)},p.prototype.checkAll_=function(a){var b;a||(b=this.getSelections()),this.$selectAll.add(this.$selectAll_).prop("checked",a),this.$selectItem.filter(":enabled").prop("checked",a),this.updateRows(),a&&(b=this.getSelections()),this.trigger(a?"check-all":"uncheck-all",b)},p.prototype.check=function(a){this.check_(!0,a)},p.prototype.uncheck=function(a){this.check_(!1,a)},p.prototype.check_=function(a,b){var d=this.$selectItem.filter(c('[data-index="%s"]',b)).prop("checked",a);this.data[b][this.header.stateField]=a,this.updateSelected(),this.trigger(a?"check":"uncheck",this.data[b],d)},p.prototype.checkBy=function(a){this.checkBy_(!0,a)},p.prototype.uncheckBy=function(a){this.checkBy_(!1,a)},p.prototype.checkBy_=function(b,d){if(d.hasOwnProperty("field")&&d.hasOwnProperty("values")){var e=this,f=[];a.each(this.options.data,function(g,h){if(!h.hasOwnProperty(d.field))return!1;if(-1!==a.inArray(h[d.field],d.values)){var i=e.$selectItem.filter(":enabled").filter(c('[data-index="%s"]',g)).prop("checked",b);h[e.header.stateField]=b,f.push(h),e.trigger(b?"check":"uncheck",h,i)}}),this.updateSelected(),this.trigger(b?"check-some":"uncheck-some",f)}},p.prototype.destroy=function(){this.$el.insertBefore(this.$container),a(this.options.toolbar).insertBefore(this.$el),this.$container.next().remove(),this.$container.remove(),this.$el.html(this.$el_.html()).css("margin-top","0").attr("class",this.$el_.attr("class")||"")},p.prototype.showLoading=function(){this.$tableLoading.show()},p.prototype.hideLoading=function(){this.$tableLoading.hide()},p.prototype.togglePagination=function(){this.options.pagination=!this.options.pagination;var a=this.$toolbar.find('button[name="paginationSwitch"] i');this.options.pagination?a.attr("class",this.options.iconsPrefix+" "+this.options.icons.paginationSwitchDown):a.attr("class",this.options.iconsPrefix+" "+this.options.icons.paginationSwitchUp),this.updatePagination()},p.prototype.refresh=function(a){a&&a.url&&(this.options.pageNumber=1),this.initServer(a&&a.silent,a&&a.query,a&&a.url),this.trigger("refresh",a)},p.prototype.resetWidth=function(){this.options.showHeader&&this.options.height&&this.fitHeader(),this.options.showFooter&&this.fitFooter()},p.prototype.showColumn=function(a){this.toggleColumn(e(this.columns,a),!0,!0)},p.prototype.hideColumn=function(a){this.toggleColumn(e(this.columns,a),!1,!0)},p.prototype.getHiddenColumns=function(){return a.grep(this.columns,function(a){return!a.visible})},p.prototype.getVisibleColumns=function(){return a.grep(this.columns,function(a){return a.visible})},p.prototype.toggleAllColumns=function(b){if(a.each(this.columns,function(a){this.columns[a].visible=b}),this.initHeader(),this.initSearch(),this.initPagination(),this.initBody(),this.options.showColumns){var c=this.$toolbar.find(".keep-open input").prop("disabled",!1);c.filter(":checked").length<=this.options.minimumCountColumns&&c.filter(":checked").prop("disabled",!0)}},p.prototype.showAllColumns=function(){this.toggleAllColumns(!0)},p.prototype.hideAllColumns=function(){this.toggleAllColumns(!1)},p.prototype.filterBy=function(b){this.filterColumns=a.isEmptyObject(b)?{}:b,this.options.pageNumber=1,this.initSearch(),this.updatePagination()},p.prototype.scrollTo=function(a){return"string"==typeof a&&(a="bottom"===a?this.$tableBody[0].scrollHeight:0),"number"==typeof a&&this.$tableBody.scrollTop(a),"undefined"==typeof a?this.$tableBody.scrollTop():void 0},p.prototype.getScrollPosition=function(){return this.scrollTo()},p.prototype.selectPage=function(a){a>0&&a<=this.options.totalPages&&(this.options.pageNumber=a,this.updatePagination())},p.prototype.prevPage=function(){this.options.pageNumber>1&&(this.options.pageNumber--,this.updatePagination())},p.prototype.nextPage=function(){this.options.pageNumber tr[data-index="%s"]',b));d.next().is("tr.detail-view")===(a?!1:!0)&&d.find("> td > .detail-icon").click()},p.prototype.expandRow=function(a){this.expandRow_(!0,a)},p.prototype.collapseRow=function(a){this.expandRow_(!1,a)},p.prototype.expandAllRows=function(b){if(b){var d=this.$body.find(c('> tr[data-index="%s"]',0)),e=this,f=null,g=!1,h=-1;if(d.next().is("tr.detail-view")?d.next().next().is("tr.detail-view")||(d.next().find(".detail-icon").click(),g=!0):(d.find("> td > .detail-icon").click(),g=!0),g)try{h=setInterval(function(){f=e.$body.find("tr.detail-view").last().find(".detail-icon"),f.length>0?f.click():clearInterval(h)},1)}catch(i){clearInterval(h)}}else for(var j=this.$body.children(),k=0;k.btn-group"),g=f.find("div.export");if(!g.length){g=a(['
','",'","
"].join("")).appendTo(f);var h=g.find(".dropdown-menu"),i=this.options.exportTypes;if("string"==typeof this.options.exportTypes){var j=this.options.exportTypes.slice(1,-1).replace(/ /g,"").split(",");i=[],a.each(j,function(a,b){i.push(b.slice(1,-1))})}a.each(i,function(a,b){c.hasOwnProperty(b)&&h.append(['
  • ','',c[b],"","
  • "].join(""))}),h.find("li").click(function(){var b=a(this).data("type"),c=function(){d.$el.tableExport(a.extend({},d.options.exportOptions,{type:b,escape:!1}))};if("all"===d.options.exportDataType&&d.options.pagination)d.$el.one("server"===d.options.sidePagination?"post-body.bs.table":"page-change.bs.table",function(){c(),d.togglePagination()}),d.togglePagination();else if("selected"===d.options.exportDataType){var e=d.getData(),f=d.getAllSelections();d.load(f),c(),d.load(e)}else c()})}}}}(jQuery); diff --git a/web/gui/lib/bootstrap-toggle-2.2.2.min.js b/web/gui/lib/bootstrap-toggle-2.2.2.min.js new file mode 100644 index 000000000..a11e156f8 --- /dev/null +++ b/web/gui/lib/bootstrap-toggle-2.2.2.min.js @@ -0,0 +1,10 @@ +/*! ======================================================================== + * Bootstrap Toggle: bootstrap-toggle.js v2.2.0 + * http://www.bootstraptoggle.com + * ======================================================================== + * Copyright 2014 Min Hur, The New York Times Company + * Licensed under MIT + * SPDX-License-Identifier: MIT + * ======================================================================== */ ++function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.toggle"),f="object"==typeof b&&b;e||d.data("bs.toggle",e=new c(this,f)),"string"==typeof b&&e[b]&&e[b]()})}var c=function(b,c){this.$element=a(b),this.options=a.extend({},this.defaults(),c),this.render()};c.VERSION="2.2.0",c.DEFAULTS={on:"On",off:"Off",onstyle:"primary",offstyle:"default",size:"normal",style:"",width:null,height:null},c.prototype.defaults=function(){return{on:this.$element.attr("data-on")||c.DEFAULTS.on,off:this.$element.attr("data-off")||c.DEFAULTS.off,onstyle:this.$element.attr("data-onstyle")||c.DEFAULTS.onstyle,offstyle:this.$element.attr("data-offstyle")||c.DEFAULTS.offstyle,size:this.$element.attr("data-size")||c.DEFAULTS.size,style:this.$element.attr("data-style")||c.DEFAULTS.style,width:this.$element.attr("data-width")||c.DEFAULTS.width,height:this.$element.attr("data-height")||c.DEFAULTS.height}},c.prototype.render=function(){this._onstyle="btn-"+this.options.onstyle,this._offstyle="btn-"+this.options.offstyle;var b="large"===this.options.size?"btn-lg":"small"===this.options.size?"btn-sm":"mini"===this.options.size?"btn-xs":"",c=a('